diff --git a/terraform/providers/google/go.mod b/terraform/providers/google/go.mod index 6369081db0..c4d0152736 100644 --- a/terraform/providers/google/go.mod +++ b/terraform/providers/google/go.mod @@ -1,18 +1,18 @@ module github.com/openshift/installer/terraform/providers/google -go 1.18 +go 1.20 -require github.com/hashicorp/terraform-provider-google v1.20.1-0.20230327171628-0dc3bde12208 // v4.59.0 +require github.com/hashicorp/terraform-provider-google v1.20.1-0.20230718215755-3edc574a3a8f // v4.74.0 require ( - bitbucket.org/creachadair/stringset v0.0.11 // indirect - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/bigtable v1.17.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + bitbucket.org/creachadair/stringset v0.0.8 // indirect + cloud.google.com/go v0.110.2 // indirect + cloud.google.com/go/bigtable v1.19.0 // indirect + cloud.google.com/go/compute v1.19.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.12.0 // indirect - cloud.google.com/go/longrunning v0.4.1 // indirect - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.34.0 // indirect + cloud.google.com/go/iam v1.1.0 // indirect + cloud.google.com/go/longrunning v0.5.0 // indirect + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.44.0 // indirect github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect @@ -20,22 +20,22 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dnaeon/go-vcr v1.0.1 // indirect - github.com/envoyproxy/go-control-plane v0.10.3 // indirect - github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect github.com/fatih/color v1.13.0 // indirect github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect + github.com/google/s2a-go v0.1.4 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect @@ -52,6 +52,7 @@ require ( github.com/hashicorp/terraform-exec v0.17.3 // indirect github.com/hashicorp/terraform-json v0.14.0 // indirect github.com/hashicorp/terraform-plugin-framework v1.1.1 // indirect + github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 // indirect github.com/hashicorp/terraform-plugin-go v0.14.3 // indirect github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect github.com/hashicorp/terraform-plugin-mux v0.8.0 // indirect @@ -76,34 +77,28 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.11.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.11.0 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/oauth2 v0.9.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.114.0 // indirect + google.golang.org/api v0.130.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/grpc v1.53.0 // indirect - google.golang.org/protobuf v1.29.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 // indirect + google.golang.org/grpc v1.56.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect ) -// https://issues.redhat.com//browse/OCPBUGS-7699 -replace github.com/hashicorp/go-getter => github.com/hashicorp/go-getter v1.7.0 - // https://bugzilla.redhat.com/show_bug.cgi?id=2064702 -replace golang.org/x/crypto => golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 +replace golang.org/x/crypto => golang.org/x/crypto v0.11.0 // https://bugzilla.redhat.com/show_bug.cgi?id=2100495 -replace golang.org/x/text => golang.org/x/text v0.8.0 - -// https://issues.redhat.com/browse/OCPBUGS-5667 -replace github.com/Masterminds/goutils => github.com/Masterminds/goutils v1.1.1 - -// https://bugzilla.redhat.com/show_bug.cgi?id=2045880 -replace github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.12.1 +replace golang.org/x/text => golang.org/x/text v0.11.0 // https://issues.redhat.com/browse/OCPBUGS-6422 -replace golang.org/x/net => golang.org/x/net v0.8.0 +replace golang.org/x/net => golang.org/x/net v0.12.0 + +replace bitbucket.org/creachadair/stringset => bitbucket.org/creachadair/stringset v0.0.11 diff --git a/terraform/providers/google/go.sum b/terraform/providers/google/go.sum index 1adb1b2d8b..cd075515e9 100644 --- a/terraform/providers/google/go.sum +++ b/terraform/providers/google/go.sum @@ -2,57 +2,24 @@ bitbucket.org/creachadair/stringset v0.0.11 h1:6Sv4CCv14Wm+OipW4f3tWOb0SQVpBDLW0 bitbucket.org/creachadair/stringset v0.0.11/go.mod h1:wh0BHewFe+j0HrzWz7KcGbSNpFzWwnpmgPRlB57U5jU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.17.0 h1:8t48YTxxFsYKy+AWuHdoePgAr4J2gEtntbdWclbEbco= -cloud.google.com/go/bigtable v1.17.0/go.mod h1:wtf7lFV1Wa5ay6aKa/gv/T2Ci7J6qXpBX8Ofij2z5mo= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go/bigtable v1.19.0 h1:wiq9LT0kukfInzvy1joMDijCw/OD1UChpSbORXYn0LI= +cloud.google.com/go/bigtable v1.19.0/go.mod h1:xl5kPa8PTkJjdBxg6qdGH88464nNqmbISHSRU+D2yFE= +cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/longrunning v0.5.0 h1:DK8BH0+hS+DIvc9a2TPnteUievsTCH4ORMAASSb7JcQ= +cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.34.0 h1:o7t+hPFv+Ax5O2vxzIH7dEtvlWA7JJOlOd7mWFvMa6s= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.34.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.44.0 h1:hASUAck0/5j84kejIHGJjipjUzFHiN5edNMobKwj2HA= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.44.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= @@ -72,49 +39,39 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -132,9 +89,6 @@ github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -142,27 +96,18 @@ github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -172,50 +117,34 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 h1:5/4TSDzpDnHQ8rKEEQBjRlYx77mHOvXu08oGchxej7o= github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932/go.mod h1:cC6EdPbj/17GFCPDK39NRarlMI+kt+O60S12cNB5J9Y= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= @@ -239,8 +168,6 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34= @@ -253,6 +180,8 @@ github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= github.com/hashicorp/terraform-plugin-framework v1.1.1 h1:PbnEKHsIU8KTTzoztHQGgjZUWx7Kk8uGtpGMMc1p+oI= github.com/hashicorp/terraform-plugin-framework v1.1.1/go.mod h1:DyZPxQA+4OKK5ELxFIIcqggcszqdWWUpTLPHAhS/tkY= +github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 h1:LYz4bXh3t7bTEydXOmPDPupRRnA480B/9+jV8yZvxBA= +github.com/hashicorp/terraform-plugin-framework-validators v0.9.0/go.mod h1:+BVERsnfdlhYR2YkXMBtPnmn9UsL19U3qUtSZ+Y/5MY= github.com/hashicorp/terraform-plugin-go v0.14.3 h1:nlnJ1GXKdMwsC8g1Nh05tK2wsC3+3BL/DBBxFEki+j0= github.com/hashicorp/terraform-plugin-go v0.14.3/go.mod h1:7ees7DMZ263q8wQ6E4RdIdR6nHHJtrdt4ogX5lPkX1A= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= @@ -261,30 +190,25 @@ github.com/hashicorp/terraform-plugin-mux v0.8.0 h1:WCTP66mZ+iIaIrCNJnjPEYnVjawT github.com/hashicorp/terraform-plugin-mux v0.8.0/go.mod h1:vdW0daEi8Kd4RFJmet5Ot+SIVB/B8SwQVJiYKQwdCy8= github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.0 h1:FtCLTiTcykdsURXPt/ku7fYXm3y19nbzbZcUxHx9RbI= github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.0/go.mod h1:80wf5oad1tW+oLnbXS4UTYmDCrl7BuN1Q+IA91X1a4Y= -github.com/hashicorp/terraform-provider-google v1.20.1-0.20230327171628-0dc3bde12208 h1:vcmq3h7Nn2QK86EVNLeqTRcdmPD7r3mMJLdDUbCWa0Y= -github.com/hashicorp/terraform-provider-google v1.20.1-0.20230327171628-0dc3bde12208/go.mod h1:Wn+5oEyIb0xoRzKqjLIZ8DE7bvH9WQ68mAb3G8RJrcE= +github.com/hashicorp/terraform-provider-google v1.20.1-0.20230718215755-3edc574a3a8f h1:2sKkIdnfifwt0AHVjDQ25meweYxCiD4vb/0bXITerSA= +github.com/hashicorp/terraform-provider-google v1.20.1-0.20230718215755-3edc574a3a8f/go.mod h1:U5Kc3m293wEkGpgbyrCsmVLQfty60myf2D+UslmqJBU= github.com/hashicorp/terraform-registry-address v0.1.0 h1:W6JkV9wbum+m516rCl5/NjKxCyTVaaUBbzYcMzBDO3U= github.com/hashicorp/terraform-registry-address v0.1.0/go.mod h1:EnyO2jYO6j29DTHbJcm00E5nQTFeTtyZH3H5ycydQ5A= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jhump/protoreflect v1.6.1 h1:4/2yi5LyDPP7nN+Hiird1SAJ6YoxUm13/oxHGRnbPd8= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= @@ -296,7 +220,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= @@ -324,13 +247,10 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= @@ -338,9 +258,6 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -349,13 +266,12 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -366,11 +282,8 @@ github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vb github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= @@ -378,170 +291,68 @@ github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uU github.com/zclconf/go-cty v1.11.0 h1:726SxLdi2SDnjY+BStqB9J1hNp4+2WlzyXLuimibIe0= github.com/zclconf/go-cty v1.11.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 h1:O8uGbHCqlTp2P6QJSLmCojM4mN6UemYv8K+dCnmHmu0= -golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= +golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -550,87 +361,35 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.130.0 h1:A50ujooa1h9iizvfzA4rrJr2B7uRmWexwbekQ2+5FPQ= +google.golang.org/api v0.130.0/go.mod h1:J/LCJMYSDFvAVREGCbrESb53n4++NMBDetSHGL5I5RY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 h1:DEH99RbiLZhMxrpEJCZ0A+wdTe0EOgou/poSLx9vWf4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -639,21 +398,17 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -661,18 +416,9 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-individual.json index 3a9f008201..df63131eb5 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-individual.json +++ b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -1,13 +1,13 @@ { - "bigquery": "1.46.0", + "bigquery": "1.51.2", "bigtable": "1.18.1", - "datastore": "1.10.0", + "datastore": "1.11.0", "errorreporting": "0.3.0", "firestore": "1.9.0", - "logging": "1.6.1", + "logging": "1.7.0", "profiler": "0.3.1", - "pubsub": "1.28.0", - "pubsublite": "1.6.0", - "spanner": "1.44.0", - "storage": "1.29.0" + "pubsub": "1.30.1", + "pubsublite": "1.8.0", + "spanner": "1.45.1", + "storage": "1.30.1" } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index 50a4d12bca..4e79b0916d 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,116 +1,123 @@ { - "accessapproval": "1.6.0", - "accesscontextmanager": "1.6.0", - "aiplatform": "1.34.0", - "analytics": "0.17.0", - "apigateway": "1.5.0", - "apigeeconnect": "1.5.0", - "apigeeregistry": "0.3.0", - "apikeys": "0.3.0", - "appengine": "1.6.0", - "area120": "0.7.0", - "artifactregistry": "1.11.0", - "asset": "1.11.1", - "assuredworkloads": "1.10.0", - "automl": "1.12.0", - "baremetalsolution": "0.5.0", - "batch": "0.7.0", - "beyondcorp": "0.4.0", - "billing": "1.12.0", - "binaryauthorization": "1.5.0", - "certificatemanager": "1.6.0", - "channel": "1.11.0", - "cloudbuild": "1.6.0", - "clouddms": "1.5.0", - "cloudtasks": "1.9.0", - "compute": "1.18.0", + "accessapproval": "1.6.1", + "accesscontextmanager": "1.7.1", + "advisorynotifications": "0.2.1", + "aiplatform": "1.40.1", + "alloydb": "1.0.1", + "analytics": "0.19.1", + "apigateway": "1.5.1", + "apigeeconnect": "1.5.1", + "apigeeregistry": "0.6.1", + "apikeys": "1.0.1", + "appengine": "1.7.3", + "area120": "0.7.2", + "artifactregistry": "1.13.1", + "asset": "1.13.1", + "assuredworkloads": "1.10.1", + "automl": "1.12.1", + "baremetalsolution": "1.0.1", + "batch": "1.0.1", + "beyondcorp": "0.5.1", + "billing": "1.13.1", + "binaryauthorization": "1.5.1", + "certificatemanager": "1.6.1", + "channel": "1.12.1", + "cloudbuild": "1.9.1", + "clouddms": "1.5.1", + "cloudtasks": "1.10.1", + "compute": "1.19.2", "compute/metadata": "0.2.3", - "contactcenterinsights": "1.6.0", - "container": "1.13.1", - "containeranalysis": "0.7.0", - "datacatalog": "1.12.0", - "dataflow": "0.8.0", - "dataform": "0.6.0", - "datafusion": "1.6.0", - "datalabeling": "0.7.0", - "dataplex": "1.5.2", - "dataproc": "1.12.0", - "dataqna": "0.7.0", - "datastream": "1.6.0", - "deploy": "1.6.0", - "dialogflow": "1.27.0", - "dlp": "1.9.0", - "documentai": "1.15.0", - "domains": "0.8.0", - "edgecontainer": "0.3.0", - "essentialcontacts": "1.5.0", - "eventarc": "1.10.0", - "filestore": "1.5.0", - "functions": "1.10.0", - "gaming": "1.9.0", - "gkebackup": "0.4.0", - "gkeconnect": "0.7.0", - "gkehub": "0.11.0", - "gkemulticloud": "0.5.0", - "grafeas": "0.2.0", - "gsuiteaddons": "1.5.0", - "iam": "0.10.0", - "iap": "1.6.0", - "ids": "1.3.0", - "iot": "1.5.0", - "kms": "1.8.0", - "language": "1.9.0", - "lifesciences": "0.8.0", - "longrunning": "0.4.1", - "managedidentities": "1.5.0", - "maps": "0.6.0", - "mediatranslation": "0.7.0", - "memcache": "1.9.0", - "metastore": "1.10.0", - "monitoring": "1.12.0", - "networkconnectivity": "1.10.0", - "networkmanagement": "1.6.0", - "networksecurity": "0.7.0", - "notebooks": "1.7.0", - "optimization": "1.3.1", - "orchestration": "1.6.0", - "orgpolicy": "1.10.0", - "osconfig": "1.11.0", - "oslogin": "1.9.0", - "phishingprotection": "0.7.0", - "policytroubleshooter": "1.5.0", - "privatecatalog": "0.7.0", - "recaptchaenterprise/v2": "2.6.0", - "recommendationengine": "0.7.0", - "recommender": "1.9.0", - "redis": "1.11.0", - "resourcemanager": "1.5.0", - "resourcesettings": "1.5.0", - "retail": "1.12.0", - "run": "0.8.0", - "scheduler": "1.8.0", - "secretmanager": "1.10.0", - "security": "1.12.0", - "securitycenter": "1.18.1", - "servicecontrol": "1.10.0", - "servicedirectory": "1.8.0", - "servicemanagement": "1.6.0", - "serviceusage": "1.5.0", - "shell": "1.6.0", - "speech": "1.14.1", - "storagetransfer": "1.7.0", - "talent": "1.5.0", - "texttospeech": "1.6.0", - "tpu": "1.5.0", - "trace": "1.8.0", - "translate": "1.5.0", - "video": "1.12.0", - "videointelligence": "1.10.0", - "vision/v2": "2.6.0", - "vmmigration": "1.5.0", - "vmwareengine": "0.2.2", - "vpcaccess": "1.6.0", - "webrisk": "1.8.0", - "websecurityscanner": "1.5.0", - "workflows": "1.10.0" + "confidentialcomputing": "0.2.1", + "contactcenterinsights": "1.7.1", + "container": "1.18.1", + "containeranalysis": "0.9.2", + "datacatalog": "1.13.1", + "dataflow": "0.8.1", + "dataform": "0.7.1", + "datafusion": "1.6.1", + "datalabeling": "0.7.1", + "dataplex": "1.7.1", + "dataproc": "2.0.0", + "dataqna": "0.7.1", + "datastream": "1.7.1", + "deploy": "1.8.1", + "dialogflow": "1.32.1", + "discoveryengine": "0.3.1", + "dlp": "1.9.1", + "documentai": "1.18.1", + "domains": "0.8.1", + "edgecontainer": "1.0.1", + "essentialcontacts": "1.5.1", + "eventarc": "1.11.1", + "filestore": "1.6.1", + "functions": "1.13.1", + "gaming": "1.9.1", + "gkebackup": "1.0.1", + "gkeconnect": "0.7.1", + "gkehub": "0.13.1", + "gkemulticloud": "0.5.1", + "grafeas": "0.2.1", + "gsuiteaddons": "1.5.1", + "iam": "1.0.1", + "iap": "1.7.3", + "ids": "1.3.1", + "iot": "1.6.1", + "kms": "1.10.2", + "language": "1.9.1", + "lifesciences": "0.8.1", + "longrunning": "0.4.2", + "managedidentities": "1.5.1", + "maps": "1.0.1", + "mediatranslation": "0.7.1", + "memcache": "1.9.1", + "metastore": "1.10.1", + "monitoring": "1.13.1", + "networkconnectivity": "1.11.1", + "networkmanagement": "1.6.1", + "networksecurity": "0.8.1", + "notebooks": "1.8.1", + "optimization": "1.3.2", + "orchestration": "1.6.1", + "orgpolicy": "1.10.1", + "osconfig": "1.11.1", + "oslogin": "1.9.1", + "phishingprotection": "0.7.1", + "policytroubleshooter": "1.6.1", + "privatecatalog": "0.8.1", + "recaptchaenterprise/v2": "2.7.1", + "recommendationengine": "0.7.1", + "recommender": "1.9.1", + "redis": "1.11.1", + "resourcemanager": "1.8.1", + "resourcesettings": "1.5.1", + "retail": "1.13.1", + "run": "1.0.1", + "scheduler": "1.9.1", + "secretmanager": "1.10.1", + "security": "1.14.1", + "securitycenter": "1.20.1", + "servicecontrol": "1.11.2", + "servicedirectory": "1.9.1", + "servicemanagement": "1.8.1", + "serviceusage": "1.6.1", + "shell": "1.6.1", + "speech": "1.15.1", + "storageinsights": "0.1.0", + "storagetransfer": "1.8.1", + "support": "0.1.0", + "talent": "1.5.2", + "texttospeech": "1.6.1", + "tpu": "1.5.1", + "trace": "1.9.1", + "translate": "1.7.1", + "video": "1.16.1", + "videointelligence": "1.10.1", + "vision/v2": "2.7.1", + "vmmigration": "1.6.1", + "vmwareengine": "0.3.1", + "vpcaccess": "1.6.1", + "webrisk": "1.8.1", + "websecurityscanner": "1.5.1", + "workflows": "1.10.1", + "workstations": "0.2.1" } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest.json b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest.json index d077941155..b1cef4f2c0 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/terraform/providers/google/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.110.0" + ".": "0.110.2" } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/CHANGES.md index 34209ce0c1..0feae98e34 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.110.2](https://github.com/googleapis/google-cloud-go/compare/v0.110.1...v0.110.2) (2023-05-08) + + +### Bug Fixes + +* **deps:** Update grpc to v1.55.0 ([#7885](https://github.com/googleapis/google-cloud-go/issues/7885)) ([9fc48a9](https://github.com/googleapis/google-cloud-go/commit/9fc48a921428c94c725ea90415d55ff0c177dd81)) + +## [0.110.1](https://github.com/googleapis/google-cloud-go/compare/v0.110.0...v0.110.1) (2023-05-03) + + +### Bug Fixes + +* **httpreplay:** Add ignore-header flag, fix tests ([#7865](https://github.com/googleapis/google-cloud-go/issues/7865)) ([1829706](https://github.com/googleapis/google-cloud-go/commit/1829706c5ade36cc786b2e6780fda5e7302f965b)) + ## [0.110.0](https://github.com/googleapis/google-cloud-go/compare/v0.109.0...v0.110.0) (2023-02-15) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/CHANGES.md index 60eac78f1e..bfd8b32510 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/CHANGES.md @@ -1,5 +1,35 @@ # Changes +## [1.19.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.18.1...bigtable/v1.19.0) (2023-07-06) + + +### Features + +* **bigtable:** Add change stream config to create and update table ([#8180](https://github.com/googleapis/google-cloud-go/issues/8180)) ([32897ce](https://github.com/googleapis/google-cloud-go/commit/32897cec9be7413fa09b403199980e782ae52107)) +* **bigtable:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) +* **bigtable:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) + + +### Bug Fixes + +* **bigtable:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) +* **bigtable:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) +* **bigtable:** Use fieldmask directly instead of field_mask genproto alias ([#8032](https://github.com/googleapis/google-cloud-go/issues/8032)) ([cae6cd6](https://github.com/googleapis/google-cloud-go/commit/cae6cd6d0e09e98157879fb03fb23f718f4d2bb3)) + +## [1.18.1](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.18.0...bigtable/v1.18.1) (2022-12-02) + + +### Bug Fixes + +* **bigtable:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + +## [1.18.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.17.0...bigtable/v1.18.0) (2022-11-10) + + +### Features + +* **bigtable:** Add support for request stats ([#6991](https://github.com/googleapis/google-cloud-go/issues/6991)) ([609421e](https://github.com/googleapis/google-cloud-go/commit/609421e87ff25971f3fc29e15dbcdaa7fba02d11)) + ## [1.17.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.16.0...bigtable/v1.17.0) (2022-11-03) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin.go index 920197d319..d62dea9288 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/admin.go @@ -40,8 +40,9 @@ import ( gtransport "google.golang.org/api/transport/grpc" btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/genproto/protobuf/field_mask" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/types/known/durationpb" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" ) const adminAddr = "bigtableadmin.googleapis.com:443" @@ -211,6 +212,11 @@ func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { return names, nil } +// ChangeStreamRetention indicates how long bigtable should retain change data. +// Minimum is 1 day. Maximum is 7. nil to not change the retention period. 0 to +// disable change stream retention. +type ChangeStreamRetention optional.Duration + // DeletionProtection indicates whether the table is protected against data loss // i.e. when set to protected, deleting the table, the column families in the table, // and the instance containing the table would be prohibited. @@ -233,13 +239,14 @@ type TableConf struct { Families map[string]GCPolicy // DeletionProtection can be none, protected or unprotected // set to protected to make the table protected against data loss - DeletionProtection DeletionProtection + DeletionProtection DeletionProtection + ChangeStreamRetention ChangeStreamRetention } // CreateTable creates a new table in the instance. // This method may return before the table's creation is complete. func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { - return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, DeletionProtection: None}) + return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, ChangeStreamRetention: nil, DeletionProtection: None}) } // CreatePresplitTable creates a new table in the instance. @@ -248,7 +255,7 @@ func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { // spanning the key ranges: [, s1), [s1, s2), [s2, ). // This method may return before the table's creation is complete. func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, splitKeys []string) error { - return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, SplitKeys: splitKeys, DeletionProtection: None}) + return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, SplitKeys: splitKeys, ChangeStreamRetention: nil, DeletionProtection: None}) } // CreateTableFromConf creates a new table in the instance from the given configuration. @@ -269,6 +276,10 @@ func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) } else if conf.DeletionProtection == Unprotected { tbl.DeletionProtection = false } + if conf.ChangeStreamRetention != nil && conf.ChangeStreamRetention.(time.Duration) != 0 { + tbl.ChangeStreamConfig = &btapb.ChangeStreamConfig{} + tbl.ChangeStreamConfig.RetentionPeriod = durationpb.New(conf.ChangeStreamRetention.(time.Duration)) + } if conf.Families != nil { tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily) for fam, policy := range conf.Families { @@ -307,12 +318,23 @@ type UpdateTableConf struct { tableID string // deletionProtection can be unset, true or false // set to true to make the table protected against data loss - deletionProtection DeletionProtection + deletionProtection DeletionProtection + changeStreamRetention ChangeStreamRetention +} + +// UpdateTableDisableChangeStream updates a table to disable change stream for table ID. +func (ac *AdminClient) UpdateTableDisableChangeStream(ctx context.Context, tableID string) error { + return ac.updateTableWithConf(ctx, &UpdateTableConf{tableID, None, time.Duration(0)}) +} + +// UpdateTableWithChangeStream updates a table to with the given table ID and change stream config. +func (ac *AdminClient) UpdateTableWithChangeStream(ctx context.Context, tableID string, changeStreamRetention ChangeStreamRetention) error { + return ac.updateTableWithConf(ctx, &UpdateTableConf{tableID, None, changeStreamRetention}) } // UpdateTableWithDeletionProtection updates a table with the given table ID and deletion protection parameter. func (ac *AdminClient) UpdateTableWithDeletionProtection(ctx context.Context, tableID string, deletionProtection DeletionProtection) error { - return ac.updateTableWithConf(ctx, &UpdateTableConf{tableID, deletionProtection}) + return ac.updateTableWithConf(ctx, &UpdateTableConf{tableID, deletionProtection, nil}) } // updateTableWithConf updates a table in the instance from the given configuration. @@ -323,30 +345,34 @@ func (ac *AdminClient) updateTableWithConf(ctx context.Context, conf *UpdateTabl return errors.New("TableID is required") } - if conf.deletionProtection == None { - return errors.New("deletion protection is required") - } - ctx = mergeOutgoingMetadata(ctx, ac.md) updateMask := &field_mask.FieldMask{ - Paths: []string{ - "deletion_protection", - }, - } - - deletionProtection := true - if conf.deletionProtection == Unprotected { - deletionProtection = false + Paths: []string{}, } prefix := ac.instancePrefix() req := &btapb.UpdateTableRequest{ Table: &btapb.Table{ - Name: prefix + "/tables/" + conf.tableID, - DeletionProtection: deletionProtection, + Name: prefix + "/tables/" + conf.tableID, }, UpdateMask: updateMask, } + + if conf.deletionProtection != None { + updateMask.Paths = append(updateMask.Paths, "deletion_protection") + req.Table.DeletionProtection = conf.deletionProtection != Unprotected + } + + if conf.changeStreamRetention != nil { + if conf.changeStreamRetention.(time.Duration) == time.Duration(0) { + updateMask.Paths = append(updateMask.Paths, "change_stream_config") + } else { + updateMask.Paths = append(updateMask.Paths, "change_stream_config.retention_period") + req.Table.ChangeStreamConfig = &btapb.ChangeStreamConfig{} + req.Table.ChangeStreamConfig.RetentionPeriod = durationpb.New(conf.changeStreamRetention.(time.Duration)) + } + } + lro, err := ac.tClient.UpdateTable(ctx, req) if err != nil { return fmt.Errorf("error from update: %w", err) @@ -394,7 +420,8 @@ type TableInfo struct { // DeletionProtection indicates whether the table is protected against data loss // DeletionProtection could be None depending on the table view // for example when using NAME_ONLY, the response does not contain DeletionProtection and the value should be None - DeletionProtection DeletionProtection + DeletionProtection DeletionProtection + ChangeStreamRetention ChangeStreamRetention } // FamilyInfo represents information about a column family. @@ -450,6 +477,10 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, } else { ti.DeletionProtection = Unprotected } + if res.ChangeStreamConfig != nil && res.ChangeStreamConfig.RetentionPeriod != nil { + ti.ChangeStreamRetention = res.ChangeStreamConfig.RetentionPeriod.AsDuration() + } + return ti, nil } diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable.go index 307f8f43bc..24a3e20fa8 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -37,6 +37,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + + // Install google-c2p resolver, which is required for direct path. + _ "google.golang.org/grpc/xds/googledirectpath" + // Install RLS load balancer policy, which is needed for gRPC RLS. + _ "google.golang.org/grpc/balancer/rls" ) const prodAddr = "bigtable.googleapis.com:443" @@ -187,8 +192,9 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts AppProfileId: t.c.appProfile, Rows: arg.proto(), } + settings := makeReadSettings(req) for _, opt := range opts { - opt.set(req) + opt.set(&settings) } ctx, cancel := context.WithCancel(ctx) // for aborting the stream defer cancel() @@ -239,6 +245,13 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts } } } + + // Handle any incoming RequestStats. This should happen at most once. + if res.RequestStats != nil && settings.fullReadStatsFunc != nil { + stats := makeFullReadStats(res.RequestStats) + settings.fullReadStatsFunc(&stats) + } + if err := cr.Close(); err != nil { // No need to prepare for a retry, this is an unretryable error. return err @@ -453,9 +466,78 @@ func prefixSuccessor(prefix string) string { return string(ans) } +// ReadIterationStats captures information about the iteration of rows or cells over the course of +// a read, e.g. how many results were scanned in a read operation versus the results returned. +type ReadIterationStats struct { + // The cells returned as part of the request. + CellsReturnedCount int64 + + // The cells seen (scanned) as part of the request. This includes the count of cells returned, as + // captured below. + CellsSeenCount int64 + + // The rows returned as part of the request. + RowsReturnedCount int64 + + // The rows seen (scanned) as part of the request. This includes the count of rows returned, as + // captured below. + RowsSeenCount int64 +} + +// RequestLatencyStats provides a measurement of the latency of the request as it interacts with +// different systems over its lifetime, e.g. how long the request took to execute within a frontend +// server. +type RequestLatencyStats struct { + // The latency measured by the frontend server handling this request, from when the request was + // received, to when this value is sent back in the response. For more context on the component + // that is measuring this latency, see: https://cloud.google.com/bigtable/docs/overview + FrontendServerLatency time.Duration +} + +// FullReadStats captures all known information about a read. +type FullReadStats struct { + // Iteration stats describe how efficient the read is, e.g. comparing rows seen vs. rows + // returned or cells seen vs cells returned can provide an indication of read efficiency + // (the higher the ratio of seen to retuned the better). + ReadIterationStats ReadIterationStats + + // Request latency stats describe the time taken to complete a request, from the server + // side. + RequestLatencyStats RequestLatencyStats +} + +// Returns a FullReadStats populated from a RequestStats. This assumes the stats view is +// REQUEST_STATS_FULL. That is the only stats view currently supported. +func makeFullReadStats(reqStats *btpb.RequestStats) FullReadStats { + statsView := reqStats.GetFullReadStatsView() + readStats := statsView.ReadIterationStats + latencyStats := statsView.RequestLatencyStats + return FullReadStats{ + ReadIterationStats: ReadIterationStats{ + CellsReturnedCount: readStats.CellsReturnedCount, + CellsSeenCount: readStats.CellsSeenCount, + RowsReturnedCount: readStats.RowsReturnedCount, + RowsSeenCount: readStats.RowsSeenCount}, + RequestLatencyStats: RequestLatencyStats{ + FrontendServerLatency: latencyStats.FrontendServerLatency.AsDuration()}} +} + +// FullReadStatsFunc describes a callback that receives a FullReadStats for evaluation. +type FullReadStatsFunc func(*FullReadStats) + +// readSettings is a collection of objects that can be modified by ReadOption instances to apply settings. +type readSettings struct { + req *btpb.ReadRowsRequest + fullReadStatsFunc FullReadStatsFunc +} + +func makeReadSettings(req *btpb.ReadRowsRequest) readSettings { + return readSettings{req, nil} +} + // A ReadOption is an optional argument to ReadRows. type ReadOption interface { - set(req *btpb.ReadRowsRequest) + set(settings *readSettings) } // RowFilter returns a ReadOption that applies f to the contents of read rows. @@ -466,14 +548,27 @@ func RowFilter(f Filter) ReadOption { return rowFilter{f} } type rowFilter struct{ f Filter } -func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() } +func (rf rowFilter) set(settings *readSettings) { settings.req.Filter = rf.f.proto() } // LimitRows returns a ReadOption that will limit the number of rows to be read. func LimitRows(limit int64) ReadOption { return limitRows{limit} } type limitRows struct{ limit int64 } -func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit } +func (lr limitRows) set(settings *readSettings) { settings.req.RowsLimit = lr.limit } + +// WithFullReadStats returns a ReadOption that will request FullReadStats +// and invoke the given callback on the resulting FullReadStats. +func WithFullReadStats(f FullReadStatsFunc) ReadOption { return withFullReadStats{f} } + +type withFullReadStats struct { + f FullReadStatsFunc +} + +func (wrs withFullReadStats) set(settings *readSettings) { + settings.req.RequestStatsView = btpb.ReadRowsRequest_REQUEST_STATS_FULL + settings.fullReadStatsFunc = wrs.f +} // mutationsAreRetryable returns true if all mutations are idempotent // and therefore retryable. A mutation is idempotent iff all cell timestamps diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable_enablexds.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable_enablexds.go deleted file mode 100644 index 08a9d9b40d..0000000000 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/bigtable_enablexds.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2021 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build enablexds -// +build enablexds - -package bigtable - -import ( - // Install google-c2p resolver, which is required for direct path. - _ "google.golang.org/grpc/xds/googledirectpath" -) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/version.go b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/version.go index 6607c695d5..ac02a3ce12 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/version.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/bigtable/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.17.0" +const Version = "1.19.0" diff --git a/terraform/providers/google/vendor/cloud.google.com/go/compute/internal/version.go b/terraform/providers/google/vendor/cloud.google.com/go/compute/internal/version.go index ddddbd21f2..7513e24cc7 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/compute/internal/version.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.18.0" +const Version = "1.19.3" diff --git a/terraform/providers/google/vendor/cloud.google.com/go/go.work b/terraform/providers/google/vendor/cloud.google.com/go/go.work new file mode 100644 index 0000000000..ae8a14b957 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/go.work @@ -0,0 +1,149 @@ +go 1.18 + +use ( + . + ./accessapproval + ./accesscontextmanager + ./advisorynotifications + ./aiplatform + ./alloydb + ./analytics + ./apigateway + ./apigeeconnect + ./apigeeregistry + ./apikeys + ./appengine + ./area120 + ./artifactregistry + ./asset + ./assuredworkloads + ./automl + ./baremetalsolution + ./batch + ./beyondcorp + ./bigquery + ./bigtable + ./billing + ./binaryauthorization + ./certificatemanager + ./channel + ./cloudbuild + ./clouddms + ./cloudtasks + ./compute + ./compute/metadata + ./confidentialcomputing + ./contactcenterinsights + ./container + ./containeranalysis + ./datacatalog + ./dataflow + ./dataform + ./datafusion + ./datalabeling + ./dataplex + ./dataproc + ./dataqna + ./datastore + ./datastream + ./deploy + ./dialogflow + ./discoveryengine + ./dlp + ./documentai + ./domains + ./edgecontainer + ./errorreporting + ./essentialcontacts + ./eventarc + ./filestore + ./firestore + ./functions + ./gaming + ./gkebackup + ./gkeconnect + ./gkehub + ./gkemulticloud + ./grafeas + ./gsuiteaddons + ./iam + ./iap + ./ids + ./internal/actions + ./internal/aliasfix + ./internal/aliasgen + ./internal/carver + ./internal/examples/fake + ./internal/examples/mock + ./internal/gapicgen + ./internal/generated/snippets + ./internal/gensnippets + ./internal/godocfx + ./internal/postprocessor + ./iot + ./kms + ./language + ./lifesciences + ./logging + ./longrunning + ./managedidentities + ./maps + ./mediatranslation + ./memcache + ./metastore + ./monitoring + ./networkconnectivity + ./networkmanagement + ./networksecurity + ./notebooks + ./optimization + ./orchestration + ./orgpolicy + ./osconfig + ./oslogin + ./phishingprotection + ./policytroubleshooter + ./privatecatalog + ./profiler + ./pubsub + ./pubsublite + ./recaptchaenterprise + ./recaptchaenterprise/v2 + ./recommendationengine + ./recommender + ./redis + ./resourcemanager + ./resourcesettings + ./retail + ./run + ./scheduler + ./secretmanager + ./security + ./securitycenter + ./servicecontrol + ./servicedirectory + ./servicemanagement + ./serviceusage + ./shell + ./spanner + ./speech + ./storage + ./storage/internal/benchmarks + ./storagetransfer + ./talent + ./texttospeech + ./tpu + ./trace + ./translate + ./video + ./videointelligence + ./vision + ./vision/v2 + ./vmmigration + ./vmwareengine + ./vpcaccess + ./webrisk + ./websecurityscanner + ./workflows + ./workstations +) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/go.work.sum b/terraform/providers/google/vendor/cloud.google.com/go/go.work.sum new file mode 100644 index 0000000000..24a40570f2 --- /dev/null +++ b/terraform/providers/google/vendor/cloud.google.com/go/go.work.sum @@ -0,0 +1,43 @@ +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= +modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= +modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/terraform/providers/google/vendor/cloud.google.com/go/iam/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/iam/CHANGES.md index 9d39f98060..d18a339ae1 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,34 @@ # Changes + +## [1.1.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.1...iam/v1.1.0) (2023-05-30) + + +### Features + +* **iam:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + +## [1.0.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.0...iam/v1.0.1) (2023-05-08) + + +### Bug Fixes + +* **iam:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + +## [1.0.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.13.0...iam/v1.0.0) (2023-04-04) + + +### Features + +* **iam:** Promote to GA ([#7627](https://github.com/googleapis/google-cloud-go/issues/7627)) ([b351906](https://github.com/googleapis/google-cloud-go/commit/b351906a10e17a02d7f7e2551bc1585fd9dc3742)) + +## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15) + + +### Features + +* **iam:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) + ## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 9ef7373d2c..fdcca8a52b 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.23.1 // source: google/iam/v1/iam_policy.proto package iampb @@ -342,37 +342,37 @@ var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x9a, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, - 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, - 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, + 0x01, 0x2a, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 026c115c27..7c91cc59af 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.23.1 // source: google/iam/v1/options.proto package iampb @@ -111,16 +111,16 @@ var file_google_iam_v1_options_proto_rawDesc = []byte{ 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0x0a, 0x11, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, + 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index 16bed436c6..8b82b0f58b 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.23.1 // source: google/iam/v1/policy.proto package iampb @@ -214,7 +214,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // only if the expression evaluates to `true`. A condition can add constraints // based on attributes of the request, the resource, or both. To learn which // resources support conditions in their IAM policies, see the -// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). // // **JSON example:** // @@ -237,7 +238,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // "condition": { // "title": "expirable access", // "description": "Does not grant access after Sep 2020", -// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", // } // } // ], @@ -279,11 +281,11 @@ type Policy struct { // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // - Getting a policy that includes a conditional role binding - // - Adding a conditional role binding to a policy - // - Changing a conditional role binding in a policy - // - Removing any role binding, with or without a condition, from a policy - // that includes conditions + // * Getting a policy that includes a conditional role binding + // * Adding a conditional role binding to a policy + // * Changing a conditional role binding in a policy + // * Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -294,7 +296,8 @@ type Policy struct { // specify any valid version or leave the field unset. // // To learn which resources support conditions in their IAM policies, see the - // [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // Associates a list of `members`, or principals, with a `role`. Optionally, // may specify a `condition` that determines how and when the `bindings` are @@ -396,43 +399,47 @@ type Binding struct { // Specifies the principals requesting access for a Cloud Platform resource. // `members` can have the following values: // - // - `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // * `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . + // + // + // * `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. // - // - `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. // - // - `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . + // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. // - // - `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. // - // - `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. + // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // - // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. // - // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. + // * `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. // - // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. // - // - `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // @@ -640,7 +647,8 @@ type AuditLogConfig struct { LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` // Specifies the identities that do not cause logging for this type of // permission. - // Follows the same format of [Binding.members][google.iam.v1.Binding.members]. + // Follows the same format of + // [Binding.members][google.iam.v1.Binding.members]. ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` } @@ -999,16 +1007,15 @@ var file_google_iam_v1_policy_proto_rawDesc = []byte{ 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, - 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x45, 0x10, 0x02, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/CHANGES.md b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/CHANGES.md index d4b44b09ac..9c793afa7c 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/CHANGES.md +++ b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.2...longrunning/v0.5.0) (2023-05-30) + + +### Features + +* **longrunning:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.1...longrunning/v0.4.2) (2023-05-08) + + +### Bug Fixes + +* **longrunning:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + ## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.0...longrunning/v0.4.1) (2023-02-14) diff --git a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/doc.go index 58b74d9605..57b1bbf2cb 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/doc.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -17,8 +17,6 @@ // Package longrunning is an auto-generated package for the // Long Running Operations API. // -// NOTE: This package is in alpha. It is not stable, and is likely to change. -// // # General documentation // // For information about setting deadlines, reusing contexts, and more diff --git a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go index 18f7147604..23ec5ee28d 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go +++ b/terraform/providers/google/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc-gen-go v1.30.0 +// protoc v4.23.1 // source: google/longrunning/operations.proto package longrunningpb @@ -70,7 +70,6 @@ type Operation struct { // If `done` == `true`, exactly one of `error` or `response` is set. // // Types that are assignable to Result: - // // *Operation_Error // *Operation_Response Result isOperation_Result `protobuf_oneof:"result"` @@ -697,34 +696,34 @@ var file_google_longrunning_operations_proto_rawDesc = []byte{ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x7d, 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x7d, 0x12, 0x7f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, - 0x12, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x7e, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x27, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a, + 0x7d, 0x12, 0x7e, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, - 0x2a, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x27, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x2a, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a, + 0x7d, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x24, 0x22, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x63, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5a, 0x0a, 0x0d, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, 0x01, 0x2a, 0x22, 0x1f, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x5a, 0x0a, 0x0d, 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, @@ -739,17 +738,17 @@ var file_google_longrunning_operations_proto_rawDesc = []byte{ 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x42, 0x97, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x66, 0x6f, 0x42, 0x9d, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x0f, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x3b, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0xf8, - 0x01, 0x01, 0xaa, 0x02, 0x12, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, - 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0xca, 0x02, 0x12, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x5c, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x43, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x67, 0x65, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, + 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x12, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0xca, 0x02, 0x12, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/terraform/providers/google/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index 7d1d51f0ea..d074fb234f 100644 --- a/terraform/providers/google/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/terraform/providers/google/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -9,9 +9,15 @@ "accesscontextmanager": { "component": "accesscontextmanager" }, + "advisorynotifications": { + "component": "advisorynotifications" + }, "aiplatform": { "component": "aiplatform" }, + "alloydb": { + "component": "alloydb" + }, "analytics": { "component": "analytics" }, @@ -81,6 +87,9 @@ "compute/metadata": { "component": "compute/metadata" }, + "confidentialcomputing": { + "component": "confidentialcomputing" + }, "contactcenterinsights": { "component": "contactcenterinsights" }, @@ -123,6 +132,9 @@ "dialogflow": { "component": "dialogflow" }, + "discoveryengine": { + "component": "discoveryengine" + }, "dlp": { "component": "dlp" }, @@ -300,12 +312,18 @@ "speech": { "component": "speech" }, + "storageinsights": { + "component": "storageinsights" + }, "storagetransfer": { "component": "storagetransfer" }, "talent": { "component": "talent" }, + "support": { + "component": "support" + }, "texttospeech": { "component": "texttospeech" }, @@ -344,6 +362,9 @@ }, "workflows": { "component": "workflows" + }, + "workstations": { + "component": "workstations" } }, "plugins": ["sentence-case"] diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.go index caf3c57e58..80aa07f346 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.go @@ -85,7 +85,7 @@ func (v WorkloadComplianceRegimeEnum) Validate() error { // Empty enum is okay. return nil } - for _, s := range []string{"COMPLIANCE_REGIME_UNSPECIFIED", "IL4", "CJIS", "FEDRAMP_HIGH", "FEDRAMP_MODERATE", "US_REGIONAL_ACCESS"} { + for _, s := range []string{"COMPLIANCE_REGIME_UNSPECIFIED", "IL4", "CJIS", "FEDRAMP_HIGH", "FEDRAMP_MODERATE", "US_REGIONAL_ACCESS", "HIPAA", "EU_REGIONS_AND_SUPPORT", "CA_REGIONS_AND_SUPPORT", "ITAR", "AU_REGIONS_AND_US_SUPPORT", "ASSURED_WORKLOADS_FOR_PARTNERS"} { if string(v) == s { return nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.yaml index 7c8c1e8416..55a52794b0 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload.yaml @@ -98,7 +98,8 @@ components: x-dcl-go-type: WorkloadComplianceRegimeEnum description: 'Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, - FEDRAMP_MODERATE, US_REGIONAL_ACCESS' + FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, + ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS' x-kubernetes-immutable: true enum: - COMPLIANCE_REGIME_UNSPECIFIED @@ -107,6 +108,12 @@ components: - FEDRAMP_HIGH - FEDRAMP_MODERATE - US_REGIONAL_ACCESS + - HIPAA + - EU_REGIONS_AND_SUPPORT + - CA_REGIONS_AND_SUPPORT + - ITAR + - AU_REGIONS_AND_US_SUPPORT + - ASSURED_WORKLOADS_FOR_PARTNERS createTime: type: string format: date-time diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_schema.go index 0f4dc6de54..d14e40b7a5 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_schema.go @@ -130,7 +130,7 @@ func DCLWorkloadSchema() *dcl.Schema { Type: "string", GoName: "ComplianceRegime", GoType: "WorkloadComplianceRegimeEnum", - Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS", + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS", Immutable: true, Enum: []string{ "COMPLIANCE_REGIME_UNSPECIFIED", @@ -139,6 +139,12 @@ func DCLWorkloadSchema() *dcl.Schema { "FEDRAMP_HIGH", "FEDRAMP_MODERATE", "US_REGIONAL_ACCESS", + "HIPAA", + "EU_REGIONS_AND_SUPPORT", + "CA_REGIONS_AND_SUPPORT", + "ITAR", + "AU_REGIONS_AND_US_SUPPORT", + "ASSURED_WORKLOADS_FOR_PARTNERS", }, }, "createTime": &dcl.Property{ diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_yaml_embed.go index e1662d5795..8c0ce3a5ad 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads/workload_yaml_embed.go @@ -17,7 +17,7 @@ package assuredworkloads // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/assuredworkloads/workload.yaml -var YAML_workload = []byte("info:\n title: AssuredWorkloads/Workload\n description: The AssuredWorkloads Workload resource\n x-dcl-struct-name: Workload\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n apply:\n description: The function used to apply information about a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n delete:\n description: The function used to delete a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n deleteAll:\n description: The function used to delete all Workload\n parameters:\n - name: organization\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Workload\n parameters:\n - name: organization\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Workload:\n title: Workload\n x-dcl-id: organizations/{{organization}}/locations/{{location}}/workloads/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: organization\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - displayName\n - complianceRegime\n - billingAccount\n - organization\n - location\n properties:\n billingAccount:\n type: string\n x-dcl-go-name: BillingAccount\n description: Required. Input only. The billing account used for the resources\n which are direct children of workload. This billing account is initially\n associated with the resources created as part of Workload creation. After\n the initial creation of these resources, the customer can change the assigned\n billing account. The resource name has the form `billingAccounts/{billing_account_id}`.\n For example, 'billingAccounts/012345-567890-ABCDEF`.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/BillingAccount\n field: name\n x-dcl-mutable-unreadable: true\n complianceRegime:\n type: string\n x-dcl-go-name: ComplianceRegime\n x-dcl-go-type: WorkloadComplianceRegimeEnum\n description: 'Required. Immutable. Compliance Regime associated with this\n workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH,\n FEDRAMP_MODERATE, US_REGIONAL_ACCESS'\n x-kubernetes-immutable: true\n enum:\n - COMPLIANCE_REGIME_UNSPECIFIED\n - IL4\n - CJIS\n - FEDRAMP_HIGH\n - FEDRAMP_MODERATE\n - US_REGIONAL_ACCESS\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Immutable. The Workload creation timestamp.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: 'Required. The user-assigned display name of the Workload.\n When present it must be between 4 to 30 characters. Allowed characters\n are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example:\n My Workload'\n kmsSettings:\n type: object\n x-dcl-go-name: KmsSettings\n x-dcl-go-type: WorkloadKmsSettings\n description: Input only. Settings used to create a CMEK crypto key. When\n set a project with a KMS CMEK key is provisioned. This field is mandatory\n for a subset of Compliance Regimes.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n required:\n - nextRotationTime\n - rotationPeriod\n properties:\n nextRotationTime:\n type: string\n format: date-time\n x-dcl-go-name: NextRotationTime\n description: Required. Input only. Immutable. The time at which the\n Key Management Service will automatically create a new version of\n the crypto key and mark it as the primary.\n x-kubernetes-immutable: true\n rotationPeriod:\n type: string\n x-dcl-go-name: RotationPeriod\n description: Required. Input only. Immutable. will be advanced by this\n period when the Key Management Service automatically rotates a key.\n Must be at least 24 hours and at most 876,000 hours.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. Labels applied to the workload.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Output only. The resource name of the workload.\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n organization:\n type: string\n x-dcl-go-name: Organization\n description: The organization for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n provisionedResourcesParent:\n type: string\n x-dcl-go-name: ProvisionedResourcesParent\n description: 'Input only. The parent resource for the resources managed\n by this Assured Workload. May be either an organization or a folder. Must\n be the same or a child of the Workload parent. If not specified all resources\n are created under the Workload parent. Formats: folders/{folder_id}, organizations/{organization_id}'\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n resourceSettings:\n type: array\n x-dcl-go-name: ResourceSettings\n description: Input only. Resource properties that are used to customize\n workload resources. These properties (such as custom project id) will\n be used to create workload resources if possible. This field is optional.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkloadResourceSettings\n properties:\n resourceId:\n type: string\n x-dcl-go-name: ResourceId\n description: Resource identifier. For a project this represents project_number.\n If the project is already taken, the workload creation will fail.\n x-kubernetes-immutable: true\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: WorkloadResourceSettingsResourceTypeEnum\n description: 'Indicates the type of resource. This field should be\n specified to correspond the id to the right project type (CONSUMER_PROJECT\n or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED,\n CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - CONSUMER_PROJECT\n - ENCRYPTION_KEYS_PROJECT\n - KEYRING\n - CONSUMER_FOLDER\n x-dcl-mutable-unreadable: true\n resources:\n type: array\n x-dcl-go-name: Resources\n readOnly: true\n description: Output only. The resources associated with this workload. These\n resources will be created when creating the workload. If any of the projects\n already exist, the workload creation will fail. Always read only.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkloadResources\n properties:\n resourceId:\n type: integer\n format: int64\n x-dcl-go-name: ResourceId\n description: Resource identifier. For a project this represents project_number.\n x-kubernetes-immutable: true\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: WorkloadResourcesResourceTypeEnum\n description: 'Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED,\n CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - CONSUMER_PROJECT\n - ENCRYPTION_KEYS_PROJECT\n - KEYRING\n - CONSUMER_FOLDER\n") +var YAML_workload = []byte("info:\n title: AssuredWorkloads/Workload\n description: The AssuredWorkloads Workload resource\n x-dcl-struct-name: Workload\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n apply:\n description: The function used to apply information about a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n delete:\n description: The function used to delete a Workload\n parameters:\n - name: workload\n required: true\n description: A full instance of a Workload\n deleteAll:\n description: The function used to delete all Workload\n parameters:\n - name: organization\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Workload\n parameters:\n - name: organization\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Workload:\n title: Workload\n x-dcl-id: organizations/{{organization}}/locations/{{location}}/workloads/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: organization\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - displayName\n - complianceRegime\n - billingAccount\n - organization\n - location\n properties:\n billingAccount:\n type: string\n x-dcl-go-name: BillingAccount\n description: Required. Input only. The billing account used for the resources\n which are direct children of workload. This billing account is initially\n associated with the resources created as part of Workload creation. After\n the initial creation of these resources, the customer can change the assigned\n billing account. The resource name has the form `billingAccounts/{billing_account_id}`.\n For example, 'billingAccounts/012345-567890-ABCDEF`.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/BillingAccount\n field: name\n x-dcl-mutable-unreadable: true\n complianceRegime:\n type: string\n x-dcl-go-name: ComplianceRegime\n x-dcl-go-type: WorkloadComplianceRegimeEnum\n description: 'Required. Immutable. Compliance Regime associated with this\n workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH,\n FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT,\n ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS'\n x-kubernetes-immutable: true\n enum:\n - COMPLIANCE_REGIME_UNSPECIFIED\n - IL4\n - CJIS\n - FEDRAMP_HIGH\n - FEDRAMP_MODERATE\n - US_REGIONAL_ACCESS\n - HIPAA\n - EU_REGIONS_AND_SUPPORT\n - CA_REGIONS_AND_SUPPORT\n - ITAR\n - AU_REGIONS_AND_US_SUPPORT\n - ASSURED_WORKLOADS_FOR_PARTNERS\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Immutable. The Workload creation timestamp.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: 'Required. The user-assigned display name of the Workload.\n When present it must be between 4 to 30 characters. Allowed characters\n are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example:\n My Workload'\n kmsSettings:\n type: object\n x-dcl-go-name: KmsSettings\n x-dcl-go-type: WorkloadKmsSettings\n description: Input only. Settings used to create a CMEK crypto key. When\n set a project with a KMS CMEK key is provisioned. This field is mandatory\n for a subset of Compliance Regimes.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n required:\n - nextRotationTime\n - rotationPeriod\n properties:\n nextRotationTime:\n type: string\n format: date-time\n x-dcl-go-name: NextRotationTime\n description: Required. Input only. Immutable. The time at which the\n Key Management Service will automatically create a new version of\n the crypto key and mark it as the primary.\n x-kubernetes-immutable: true\n rotationPeriod:\n type: string\n x-dcl-go-name: RotationPeriod\n description: Required. Input only. Immutable. will be advanced by this\n period when the Key Management Service automatically rotates a key.\n Must be at least 24 hours and at most 876,000 hours.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. Labels applied to the workload.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Output only. The resource name of the workload.\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n organization:\n type: string\n x-dcl-go-name: Organization\n description: The organization for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n provisionedResourcesParent:\n type: string\n x-dcl-go-name: ProvisionedResourcesParent\n description: 'Input only. The parent resource for the resources managed\n by this Assured Workload. May be either an organization or a folder. Must\n be the same or a child of the Workload parent. If not specified all resources\n are created under the Workload parent. Formats: folders/{folder_id}, organizations/{organization_id}'\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n resourceSettings:\n type: array\n x-dcl-go-name: ResourceSettings\n description: Input only. Resource properties that are used to customize\n workload resources. These properties (such as custom project id) will\n be used to create workload resources if possible. This field is optional.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkloadResourceSettings\n properties:\n resourceId:\n type: string\n x-dcl-go-name: ResourceId\n description: Resource identifier. For a project this represents project_number.\n If the project is already taken, the workload creation will fail.\n x-kubernetes-immutable: true\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: WorkloadResourceSettingsResourceTypeEnum\n description: 'Indicates the type of resource. This field should be\n specified to correspond the id to the right project type (CONSUMER_PROJECT\n or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED,\n CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - CONSUMER_PROJECT\n - ENCRYPTION_KEYS_PROJECT\n - KEYRING\n - CONSUMER_FOLDER\n x-dcl-mutable-unreadable: true\n resources:\n type: array\n x-dcl-go-name: Resources\n readOnly: true\n description: Output only. The resources associated with this workload. These\n resources will be created when creating the workload. If any of the projects\n already exist, the workload creation will fail. Always read only.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: WorkloadResources\n properties:\n resourceId:\n type: integer\n format: int64\n x-dcl-go-name: ResourceId\n description: Resource identifier. For a project this represents project_number.\n x-kubernetes-immutable: true\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: WorkloadResourcesResourceTypeEnum\n description: 'Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED,\n CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - CONSUMER_PROJECT\n - ENCRYPTION_KEYS_PROJECT\n - KEYRING\n - CONSUMER_FOLDER\n") -// 9623 bytes -// MD5: dd68fa5affc7ba301aff1c33c5e65c87 +// 9941 bytes +// MD5: e84d640a769006efc0dd28ce22b54bf5 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/client.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/client.go new file mode 100644 index 0000000000..cab9e49b9e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/client.go @@ -0,0 +1,32 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Package cloudbuildv2 defines operations in the declarative SDK. +package cloudbuildv2 + +import ( + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +) + +// The Client is the base struct of all operations. This will receive the +// Get, Delete, List, and Apply operations on all resources. +type Client struct { + Config *dcl.Config +} + +// NewClient creates a client that retries all operations a few times each. +func NewClient(c *dcl.Config) *Client { + return &Client{ + Config: c, + } +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection.go new file mode 100644 index 0000000000..53944ac935 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection.go @@ -0,0 +1,885 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package cloudbuildv2 + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "time" + + "google.golang.org/api/googleapi" + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +) + +type Connection struct { + Name *string `json:"name"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + GithubConfig *ConnectionGithubConfig `json:"githubConfig"` + GithubEnterpriseConfig *ConnectionGithubEnterpriseConfig `json:"githubEnterpriseConfig"` + GitlabConfig *ConnectionGitlabConfig `json:"gitlabConfig"` + InstallationState *ConnectionInstallationState `json:"installationState"` + Disabled *bool `json:"disabled"` + Reconciling *bool `json:"reconciling"` + Annotations map[string]string `json:"annotations"` + Etag *string `json:"etag"` + Project *string `json:"project"` + Location *string `json:"location"` +} + +func (r *Connection) String() string { + return dcl.SprintResource(r) +} + +// The enum ConnectionInstallationStateStageEnum. +type ConnectionInstallationStateStageEnum string + +// ConnectionInstallationStateStageEnumRef returns a *ConnectionInstallationStateStageEnum with the value of string s +// If the empty string is provided, nil is returned. +func ConnectionInstallationStateStageEnumRef(s string) *ConnectionInstallationStateStageEnum { + v := ConnectionInstallationStateStageEnum(s) + return &v +} + +func (v ConnectionInstallationStateStageEnum) Validate() error { + if string(v) == "" { + // Empty enum is okay. + return nil + } + for _, s := range []string{"STAGE_UNSPECIFIED", "PENDING_CREATE_APP", "PENDING_USER_OAUTH", "PENDING_INSTALL_APP", "COMPLETE"} { + if string(v) == s { + return nil + } + } + return &dcl.EnumInvalidError{ + Enum: "ConnectionInstallationStateStageEnum", + Value: string(v), + Valid: []string{}, + } +} + +type ConnectionGithubConfig struct { + empty bool `json:"-"` + AuthorizerCredential *ConnectionGithubConfigAuthorizerCredential `json:"authorizerCredential"` + AppInstallationId *int64 `json:"appInstallationId"` +} + +type jsonConnectionGithubConfig ConnectionGithubConfig + +func (r *ConnectionGithubConfig) UnmarshalJSON(data []byte) error { + var res jsonConnectionGithubConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionGithubConfig + } else { + + r.AuthorizerCredential = res.AuthorizerCredential + + r.AppInstallationId = res.AppInstallationId + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionGithubConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionGithubConfig *ConnectionGithubConfig = &ConnectionGithubConfig{empty: true} + +func (r *ConnectionGithubConfig) Empty() bool { + return r.empty +} + +func (r *ConnectionGithubConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionGithubConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ConnectionGithubConfigAuthorizerCredential struct { + empty bool `json:"-"` + OAuthTokenSecretVersion *string `json:"oauthTokenSecretVersion"` + Username *string `json:"username"` +} + +type jsonConnectionGithubConfigAuthorizerCredential ConnectionGithubConfigAuthorizerCredential + +func (r *ConnectionGithubConfigAuthorizerCredential) UnmarshalJSON(data []byte) error { + var res jsonConnectionGithubConfigAuthorizerCredential + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionGithubConfigAuthorizerCredential + } else { + + r.OAuthTokenSecretVersion = res.OAuthTokenSecretVersion + + r.Username = res.Username + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionGithubConfigAuthorizerCredential is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionGithubConfigAuthorizerCredential *ConnectionGithubConfigAuthorizerCredential = &ConnectionGithubConfigAuthorizerCredential{empty: true} + +func (r *ConnectionGithubConfigAuthorizerCredential) Empty() bool { + return r.empty +} + +func (r *ConnectionGithubConfigAuthorizerCredential) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionGithubConfigAuthorizerCredential) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ConnectionGithubEnterpriseConfig struct { + empty bool `json:"-"` + HostUri *string `json:"hostUri"` + AppId *int64 `json:"appId"` + AppSlug *string `json:"appSlug"` + PrivateKeySecretVersion *string `json:"privateKeySecretVersion"` + WebhookSecretSecretVersion *string `json:"webhookSecretSecretVersion"` + AppInstallationId *int64 `json:"appInstallationId"` + ServiceDirectoryConfig *ConnectionGithubEnterpriseConfigServiceDirectoryConfig `json:"serviceDirectoryConfig"` + SslCa *string `json:"sslCa"` +} + +type jsonConnectionGithubEnterpriseConfig ConnectionGithubEnterpriseConfig + +func (r *ConnectionGithubEnterpriseConfig) UnmarshalJSON(data []byte) error { + var res jsonConnectionGithubEnterpriseConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionGithubEnterpriseConfig + } else { + + r.HostUri = res.HostUri + + r.AppId = res.AppId + + r.AppSlug = res.AppSlug + + r.PrivateKeySecretVersion = res.PrivateKeySecretVersion + + r.WebhookSecretSecretVersion = res.WebhookSecretSecretVersion + + r.AppInstallationId = res.AppInstallationId + + r.ServiceDirectoryConfig = res.ServiceDirectoryConfig + + r.SslCa = res.SslCa + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionGithubEnterpriseConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionGithubEnterpriseConfig *ConnectionGithubEnterpriseConfig = &ConnectionGithubEnterpriseConfig{empty: true} + +func (r *ConnectionGithubEnterpriseConfig) Empty() bool { + return r.empty +} + +func (r *ConnectionGithubEnterpriseConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionGithubEnterpriseConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ConnectionGithubEnterpriseConfigServiceDirectoryConfig struct { + empty bool `json:"-"` + Service *string `json:"service"` +} + +type jsonConnectionGithubEnterpriseConfigServiceDirectoryConfig ConnectionGithubEnterpriseConfigServiceDirectoryConfig + +func (r *ConnectionGithubEnterpriseConfigServiceDirectoryConfig) UnmarshalJSON(data []byte) error { + var res jsonConnectionGithubEnterpriseConfigServiceDirectoryConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionGithubEnterpriseConfigServiceDirectoryConfig + } else { + + r.Service = res.Service + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionGithubEnterpriseConfigServiceDirectoryConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionGithubEnterpriseConfigServiceDirectoryConfig *ConnectionGithubEnterpriseConfigServiceDirectoryConfig = &ConnectionGithubEnterpriseConfigServiceDirectoryConfig{empty: true} + +func (r *ConnectionGithubEnterpriseConfigServiceDirectoryConfig) Empty() bool { + return r.empty +} + +func (r *ConnectionGithubEnterpriseConfigServiceDirectoryConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionGithubEnterpriseConfigServiceDirectoryConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ConnectionGitlabConfig struct { + empty bool `json:"-"` + HostUri *string `json:"hostUri"` + WebhookSecretSecretVersion *string `json:"webhookSecretSecretVersion"` + ReadAuthorizerCredential *ConnectionGitlabConfigReadAuthorizerCredential `json:"readAuthorizerCredential"` + AuthorizerCredential *ConnectionGitlabConfigAuthorizerCredential `json:"authorizerCredential"` + ServiceDirectoryConfig *ConnectionGitlabConfigServiceDirectoryConfig `json:"serviceDirectoryConfig"` + SslCa *string `json:"sslCa"` + ServerVersion *string `json:"serverVersion"` +} + +type jsonConnectionGitlabConfig ConnectionGitlabConfig + +func (r *ConnectionGitlabConfig) UnmarshalJSON(data []byte) error { + var res jsonConnectionGitlabConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionGitlabConfig + } else { + + r.HostUri = res.HostUri + + r.WebhookSecretSecretVersion = res.WebhookSecretSecretVersion + + r.ReadAuthorizerCredential = res.ReadAuthorizerCredential + + r.AuthorizerCredential = res.AuthorizerCredential + + r.ServiceDirectoryConfig = res.ServiceDirectoryConfig + + r.SslCa = res.SslCa + + r.ServerVersion = res.ServerVersion + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionGitlabConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionGitlabConfig *ConnectionGitlabConfig = &ConnectionGitlabConfig{empty: true} + +func (r *ConnectionGitlabConfig) Empty() bool { + return r.empty +} + +func (r *ConnectionGitlabConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionGitlabConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ConnectionGitlabConfigReadAuthorizerCredential struct { + empty bool `json:"-"` + UserTokenSecretVersion *string `json:"userTokenSecretVersion"` + Username *string `json:"username"` +} + +type jsonConnectionGitlabConfigReadAuthorizerCredential ConnectionGitlabConfigReadAuthorizerCredential + +func (r *ConnectionGitlabConfigReadAuthorizerCredential) UnmarshalJSON(data []byte) error { + var res jsonConnectionGitlabConfigReadAuthorizerCredential + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionGitlabConfigReadAuthorizerCredential + } else { + + r.UserTokenSecretVersion = res.UserTokenSecretVersion + + r.Username = res.Username + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionGitlabConfigReadAuthorizerCredential is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionGitlabConfigReadAuthorizerCredential *ConnectionGitlabConfigReadAuthorizerCredential = &ConnectionGitlabConfigReadAuthorizerCredential{empty: true} + +func (r *ConnectionGitlabConfigReadAuthorizerCredential) Empty() bool { + return r.empty +} + +func (r *ConnectionGitlabConfigReadAuthorizerCredential) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionGitlabConfigReadAuthorizerCredential) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ConnectionGitlabConfigAuthorizerCredential struct { + empty bool `json:"-"` + UserTokenSecretVersion *string `json:"userTokenSecretVersion"` + Username *string `json:"username"` +} + +type jsonConnectionGitlabConfigAuthorizerCredential ConnectionGitlabConfigAuthorizerCredential + +func (r *ConnectionGitlabConfigAuthorizerCredential) UnmarshalJSON(data []byte) error { + var res jsonConnectionGitlabConfigAuthorizerCredential + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionGitlabConfigAuthorizerCredential + } else { + + r.UserTokenSecretVersion = res.UserTokenSecretVersion + + r.Username = res.Username + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionGitlabConfigAuthorizerCredential is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionGitlabConfigAuthorizerCredential *ConnectionGitlabConfigAuthorizerCredential = &ConnectionGitlabConfigAuthorizerCredential{empty: true} + +func (r *ConnectionGitlabConfigAuthorizerCredential) Empty() bool { + return r.empty +} + +func (r *ConnectionGitlabConfigAuthorizerCredential) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionGitlabConfigAuthorizerCredential) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ConnectionGitlabConfigServiceDirectoryConfig struct { + empty bool `json:"-"` + Service *string `json:"service"` +} + +type jsonConnectionGitlabConfigServiceDirectoryConfig ConnectionGitlabConfigServiceDirectoryConfig + +func (r *ConnectionGitlabConfigServiceDirectoryConfig) UnmarshalJSON(data []byte) error { + var res jsonConnectionGitlabConfigServiceDirectoryConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionGitlabConfigServiceDirectoryConfig + } else { + + r.Service = res.Service + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionGitlabConfigServiceDirectoryConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionGitlabConfigServiceDirectoryConfig *ConnectionGitlabConfigServiceDirectoryConfig = &ConnectionGitlabConfigServiceDirectoryConfig{empty: true} + +func (r *ConnectionGitlabConfigServiceDirectoryConfig) Empty() bool { + return r.empty +} + +func (r *ConnectionGitlabConfigServiceDirectoryConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionGitlabConfigServiceDirectoryConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type ConnectionInstallationState struct { + empty bool `json:"-"` + Stage *ConnectionInstallationStateStageEnum `json:"stage"` + Message *string `json:"message"` + ActionUri *string `json:"actionUri"` +} + +type jsonConnectionInstallationState ConnectionInstallationState + +func (r *ConnectionInstallationState) UnmarshalJSON(data []byte) error { + var res jsonConnectionInstallationState + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyConnectionInstallationState + } else { + + r.Stage = res.Stage + + r.Message = res.Message + + r.ActionUri = res.ActionUri + + } + return nil +} + +// This object is used to assert a desired state where this ConnectionInstallationState is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyConnectionInstallationState *ConnectionInstallationState = &ConnectionInstallationState{empty: true} + +func (r *ConnectionInstallationState) Empty() bool { + return r.empty +} + +func (r *ConnectionInstallationState) String() string { + return dcl.SprintResource(r) +} + +func (r *ConnectionInstallationState) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Connection) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "cloudbuildv2", + Type: "Connection", + Version: "cloudbuildv2", + } +} + +func (r *Connection) ID() (string, error) { + if err := extractConnectionFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "github_config": dcl.ValueOrEmptyString(nr.GithubConfig), + "github_enterprise_config": dcl.ValueOrEmptyString(nr.GithubEnterpriseConfig), + "gitlab_config": dcl.ValueOrEmptyString(nr.GitlabConfig), + "installation_state": dcl.ValueOrEmptyString(nr.InstallationState), + "disabled": dcl.ValueOrEmptyString(nr.Disabled), + "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.Nprintf("projects/{{project}}/locations/{{location}}/connections/{{name}}", params), nil +} + +const ConnectionMaxPage = -1 + +type ConnectionList struct { + Items []*Connection + + nextToken string + + pageSize int32 + + resource *Connection +} + +func (l *ConnectionList) HasNext() bool { + return l.nextToken != "" +} + +func (l *ConnectionList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listConnection(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListConnection(ctx context.Context, project, location string) (*ConnectionList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListConnectionWithMaxResults(ctx, project, location, ConnectionMaxPage) + +} + +func (c *Client) ListConnectionWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*ConnectionList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Connection{ + Project: &project, + Location: &location, + } + items, token, err := c.listConnection(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &ConnectionList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetConnection(ctx context.Context, r *Connection) (*Connection, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractConnectionFields(r) + + b, err := c.getConnectionRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalConnection(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeConnectionNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractConnectionFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteConnection(ctx context.Context, r *Connection) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Connection resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Connection...") + deleteOp := deleteConnectionOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllConnection deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllConnection(ctx context.Context, project, location string, filter func(*Connection) bool) error { + listObj, err := c.ListConnection(ctx, project, location) + if err != nil { + return err + } + + err = c.deleteAllConnection(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllConnection(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyConnection(ctx context.Context, rawDesired *Connection, opts ...dcl.ApplyOption) (*Connection, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Connection + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyConnectionHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyConnectionHelper(c *Client, ctx context.Context, rawDesired *Connection, opts ...dcl.ApplyOption) (*Connection, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyConnection...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractConnectionFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.connectionDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToConnectionDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []connectionApiOperation + if create { + ops = append(ops, &createConnectionOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyConnectionDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyConnectionDiff(c *Client, ctx context.Context, desired *Connection, rawDesired *Connection, ops []connectionApiOperation, opts ...dcl.ApplyOption) (*Connection, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetConnection(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createConnectionOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapConnection(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeConnectionNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeConnectionNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeConnectionDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractConnectionFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractConnectionFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffConnection(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection.yaml new file mode 100644 index 0000000000..6ba403978e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection.yaml @@ -0,0 +1,387 @@ +# Copyright 2023 Google LLC. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +info: + title: Cloudbuildv2/Connection + description: The Cloudbuildv2 Connection resource + x-dcl-struct-name: Connection + x-dcl-has-iam: false +paths: + get: + description: The function used to get information about a Connection + parameters: + - name: connection + required: true + description: A full instance of a Connection + apply: + description: The function used to apply information about a Connection + parameters: + - name: connection + required: true + description: A full instance of a Connection + delete: + description: The function used to delete a Connection + parameters: + - name: connection + required: true + description: A full instance of a Connection + deleteAll: + description: The function used to delete all Connection + parameters: + - name: project + required: true + schema: + type: string + - name: location + required: true + schema: + type: string + list: + description: The function used to list information about many Connection + parameters: + - name: project + required: true + schema: + type: string + - name: location + required: true + schema: + type: string +components: + schemas: + Connection: + title: Connection + x-dcl-id: projects/{{project}}/locations/{{location}}/connections/{{name}} + x-dcl-parent-container: project + x-dcl-has-create: true + x-dcl-has-iam: false + x-dcl-read-timeout: 0 + x-dcl-apply-timeout: 0 + x-dcl-delete-timeout: 0 + type: object + required: + - name + - project + - location + properties: + annotations: + type: object + additionalProperties: + type: string + x-dcl-go-name: Annotations + description: Allows clients to store small amounts of arbitrary data. + createTime: + type: string + format: date-time + x-dcl-go-name: CreateTime + readOnly: true + description: Output only. Server assigned timestamp for when the connection + was created. + x-kubernetes-immutable: true + disabled: + type: boolean + x-dcl-go-name: Disabled + description: If disabled is set to true, functionality is disabled for this + connection. Repository based API methods and webhooks processing for repositories + in this connection will be disabled. + etag: + type: string + x-dcl-go-name: Etag + readOnly: true + description: This checksum is computed by the server based on the value + of other fields, and may be sent on update and delete requests to ensure + the client has an up-to-date value before proceeding. + x-kubernetes-immutable: true + githubConfig: + type: object + x-dcl-go-name: GithubConfig + x-dcl-go-type: ConnectionGithubConfig + description: Configuration for connections to github.com. + x-dcl-conflicts: + - githubEnterpriseConfig + - gitlabConfig + properties: + appInstallationId: + type: integer + format: int64 + x-dcl-go-name: AppInstallationId + description: GitHub App installation id. + authorizerCredential: + type: object + x-dcl-go-name: AuthorizerCredential + x-dcl-go-type: ConnectionGithubConfigAuthorizerCredential + description: OAuth credential of the account that authorized the Cloud + Build GitHub App. It is recommended to use a robot account instead + of a human user account. The OAuth token must be tied to the Cloud + Build GitHub App. + properties: + oauthTokenSecretVersion: + type: string + x-dcl-go-name: OAuthTokenSecretVersion + description: 'A SecretManager resource containing the OAuth token + that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.' + x-dcl-references: + - resource: Secretmanager/SecretVersion + field: selfLink + username: + type: string + x-dcl-go-name: Username + readOnly: true + description: Output only. The username associated to this token. + githubEnterpriseConfig: + type: object + x-dcl-go-name: GithubEnterpriseConfig + x-dcl-go-type: ConnectionGithubEnterpriseConfig + description: Configuration for connections to an instance of GitHub Enterprise. + x-dcl-conflicts: + - githubConfig + - gitlabConfig + required: + - hostUri + properties: + appId: + type: integer + format: int64 + x-dcl-go-name: AppId + description: Id of the GitHub App created from the manifest. + appInstallationId: + type: integer + format: int64 + x-dcl-go-name: AppInstallationId + description: ID of the installation of the GitHub App. + appSlug: + type: string + x-dcl-go-name: AppSlug + description: The URL-friendly name of the GitHub App. + hostUri: + type: string + x-dcl-go-name: HostUri + description: Required. The URI of the GitHub Enterprise host this connection + is for. + privateKeySecretVersion: + type: string + x-dcl-go-name: PrivateKeySecretVersion + description: SecretManager resource containing the private key of the + GitHub App, formatted as `projects/*/secrets/*/versions/*`. + x-dcl-references: + - resource: Secretmanager/SecretVersion + field: selfLink + serviceDirectoryConfig: + type: object + x-dcl-go-name: ServiceDirectoryConfig + x-dcl-go-type: ConnectionGithubEnterpriseConfigServiceDirectoryConfig + description: Configuration for using Service Directory to privately + connect to a GitHub Enterprise server. This should only be set if + the GitHub Enterprise server is hosted on-premises and not reachable + by public internet. If this field is left empty, calls to the GitHub + Enterprise server will be made over the public internet. + required: + - service + properties: + service: + type: string + x-dcl-go-name: Service + description: 'Required. The Service Directory service name. Format: + projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.' + x-dcl-references: + - resource: Servicedirectory/Service + field: selfLink + sslCa: + type: string + x-dcl-go-name: SslCa + description: SSL certificate to use for requests to GitHub Enterprise. + webhookSecretSecretVersion: + type: string + x-dcl-go-name: WebhookSecretSecretVersion + description: SecretManager resource containing the webhook secret of + the GitHub App, formatted as `projects/*/secrets/*/versions/*`. + x-dcl-references: + - resource: Secretmanager/SecretVersion + field: selfLink + gitlabConfig: + type: object + x-dcl-go-name: GitlabConfig + x-dcl-go-type: ConnectionGitlabConfig + description: Configuration for connections to gitlab.com or an instance + of GitLab Enterprise. + x-dcl-conflicts: + - githubConfig + - githubEnterpriseConfig + required: + - webhookSecretSecretVersion + - readAuthorizerCredential + - authorizerCredential + properties: + authorizerCredential: + type: object + x-dcl-go-name: AuthorizerCredential + x-dcl-go-type: ConnectionGitlabConfigAuthorizerCredential + description: Required. A GitLab personal access token with the `api` + scope access. + required: + - userTokenSecretVersion + properties: + userTokenSecretVersion: + type: string + x-dcl-go-name: UserTokenSecretVersion + description: 'Required. A SecretManager resource containing the + user token that authorizes the Cloud Build connection. Format: + `projects/*/secrets/*/versions/*`.' + x-dcl-references: + - resource: Secretmanager/SecretVersion + field: selfLink + username: + type: string + x-dcl-go-name: Username + readOnly: true + description: Output only. The username associated to this token. + hostUri: + type: string + x-dcl-go-name: HostUri + description: The URI of the GitLab Enterprise host this connection is + for. If not specified, the default value is https://gitlab.com. + x-dcl-server-default: true + readAuthorizerCredential: + type: object + x-dcl-go-name: ReadAuthorizerCredential + x-dcl-go-type: ConnectionGitlabConfigReadAuthorizerCredential + description: Required. A GitLab personal access token with the minimum + `read_api` scope access. + required: + - userTokenSecretVersion + properties: + userTokenSecretVersion: + type: string + x-dcl-go-name: UserTokenSecretVersion + description: 'Required. A SecretManager resource containing the + user token that authorizes the Cloud Build connection. Format: + `projects/*/secrets/*/versions/*`.' + x-dcl-references: + - resource: Secretmanager/SecretVersion + field: selfLink + username: + type: string + x-dcl-go-name: Username + readOnly: true + description: Output only. The username associated to this token. + serverVersion: + type: string + x-dcl-go-name: ServerVersion + readOnly: true + description: Output only. Version of the GitLab Enterprise server running + on the `host_uri`. + serviceDirectoryConfig: + type: object + x-dcl-go-name: ServiceDirectoryConfig + x-dcl-go-type: ConnectionGitlabConfigServiceDirectoryConfig + description: Configuration for using Service Directory to privately + connect to a GitLab Enterprise server. This should only be set if + the GitLab Enterprise server is hosted on-premises and not reachable + by public internet. If this field is left empty, calls to the GitLab + Enterprise server will be made over the public internet. + required: + - service + properties: + service: + type: string + x-dcl-go-name: Service + description: 'Required. The Service Directory service name. Format: + projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.' + x-dcl-references: + - resource: Servicedirectory/Service + field: selfLink + sslCa: + type: string + x-dcl-go-name: SslCa + description: SSL certificate to use for requests to GitLab Enterprise. + webhookSecretSecretVersion: + type: string + x-dcl-go-name: WebhookSecretSecretVersion + description: Required. Immutable. SecretManager resource containing + the webhook secret of a GitLab Enterprise project, formatted as `projects/*/secrets/*/versions/*`. + x-dcl-references: + - resource: Secretmanager/SecretVersion + field: selfLink + installationState: + type: object + x-dcl-go-name: InstallationState + x-dcl-go-type: ConnectionInstallationState + readOnly: true + description: Output only. Installation state of the Connection. + x-kubernetes-immutable: true + properties: + actionUri: + type: string + x-dcl-go-name: ActionUri + readOnly: true + description: Output only. Link to follow for next action. Empty string + if the installation is already complete. + x-kubernetes-immutable: true + message: + type: string + x-dcl-go-name: Message + readOnly: true + description: Output only. Message of what the user should do next to + continue the installation. Empty string if the installation is already + complete. + x-kubernetes-immutable: true + stage: + type: string + x-dcl-go-name: Stage + x-dcl-go-type: ConnectionInstallationStateStageEnum + readOnly: true + description: 'Output only. Current step of the installation process. + Possible values: STAGE_UNSPECIFIED, PENDING_CREATE_APP, PENDING_USER_OAUTH, + PENDING_INSTALL_APP, COMPLETE' + x-kubernetes-immutable: true + enum: + - STAGE_UNSPECIFIED + - PENDING_CREATE_APP + - PENDING_USER_OAUTH + - PENDING_INSTALL_APP + - COMPLETE + location: + type: string + x-dcl-go-name: Location + description: The location for the resource + x-kubernetes-immutable: true + name: + type: string + x-dcl-go-name: Name + description: Immutable. The resource name of the connection, in the format + `projects/{project}/locations/{location}/connections/{connection_id}`. + x-kubernetes-immutable: true + project: + type: string + x-dcl-go-name: Project + description: The project for the resource + x-kubernetes-immutable: true + x-dcl-references: + - resource: Cloudresourcemanager/Project + field: name + parent: true + reconciling: + type: boolean + x-dcl-go-name: Reconciling + readOnly: true + description: Output only. Set to true when the connection is being set up + or updated in the background. + x-kubernetes-immutable: true + updateTime: + type: string + format: date-time + x-dcl-go-name: UpdateTime + readOnly: true + description: Output only. Server assigned timestamp for when the connection + was updated. + x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_internal.go new file mode 100644 index 0000000000..9a8f4e2c02 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_internal.go @@ -0,0 +1,3979 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package cloudbuildv2 + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations" +) + +func (r *Connection) validate() error { + + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"GithubConfig", "GithubEnterpriseConfig", "GitlabConfig"}, r.GithubConfig, r.GithubEnterpriseConfig, r.GitlabConfig); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.GithubConfig) { + if err := r.GithubConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.GithubEnterpriseConfig) { + if err := r.GithubEnterpriseConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.GitlabConfig) { + if err := r.GitlabConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.InstallationState) { + if err := r.InstallationState.validate(); err != nil { + return err + } + } + return nil +} +func (r *ConnectionGithubConfig) validate() error { + if !dcl.IsEmptyValueIndirect(r.AuthorizerCredential) { + if err := r.AuthorizerCredential.validate(); err != nil { + return err + } + } + return nil +} +func (r *ConnectionGithubConfigAuthorizerCredential) validate() error { + return nil +} +func (r *ConnectionGithubEnterpriseConfig) validate() error { + if err := dcl.Required(r, "hostUri"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.ServiceDirectoryConfig) { + if err := r.ServiceDirectoryConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ConnectionGithubEnterpriseConfigServiceDirectoryConfig) validate() error { + if err := dcl.Required(r, "service"); err != nil { + return err + } + return nil +} +func (r *ConnectionGitlabConfig) validate() error { + if err := dcl.Required(r, "webhookSecretSecretVersion"); err != nil { + return err + } + if err := dcl.Required(r, "readAuthorizerCredential"); err != nil { + return err + } + if err := dcl.Required(r, "authorizerCredential"); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.ReadAuthorizerCredential) { + if err := r.ReadAuthorizerCredential.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.AuthorizerCredential) { + if err := r.AuthorizerCredential.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ServiceDirectoryConfig) { + if err := r.ServiceDirectoryConfig.validate(); err != nil { + return err + } + } + return nil +} +func (r *ConnectionGitlabConfigReadAuthorizerCredential) validate() error { + if err := dcl.Required(r, "userTokenSecretVersion"); err != nil { + return err + } + return nil +} +func (r *ConnectionGitlabConfigAuthorizerCredential) validate() error { + if err := dcl.Required(r, "userTokenSecretVersion"); err != nil { + return err + } + return nil +} +func (r *ConnectionGitlabConfigServiceDirectoryConfig) validate() error { + if err := dcl.Required(r, "service"); err != nil { + return err + } + return nil +} +func (r *ConnectionInstallationState) validate() error { + return nil +} +func (r *Connection) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://cloudbuild.googleapis.com/v2/", params) +} + +func (r *Connection) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Connection) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections", nr.basePath(), userBasePath, params), nil + +} + +func (r *Connection) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections?connectionId={{name}}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Connection) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections/{{name}}", nr.basePath(), userBasePath, params), nil +} + +// connectionApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type connectionApiOperation interface { + do(context.Context, *Connection, *Client) error +} + +// newUpdateConnectionUpdateConnectionRequest creates a request for an +// Connection resource's UpdateConnection update type by filling in the update +// fields based on the intended state of the resource. +func newUpdateConnectionUpdateConnectionRequest(ctx context.Context, f *Connection, c *Client) (map[string]interface{}, error) { + req := map[string]interface{}{} + res := f + _ = res + + if v, err := expandConnectionGithubConfig(c, f.GithubConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GithubConfig into githubConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["githubConfig"] = v + } + if v, err := expandConnectionGithubEnterpriseConfig(c, f.GithubEnterpriseConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GithubEnterpriseConfig into githubEnterpriseConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["githubEnterpriseConfig"] = v + } + if v, err := expandConnectionGitlabConfig(c, f.GitlabConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GitlabConfig into gitlabConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["gitlabConfig"] = v + } + if v := f.Disabled; !dcl.IsEmptyValueIndirect(v) { + req["disabled"] = v + } + if v := f.Annotations; !dcl.IsEmptyValueIndirect(v) { + req["annotations"] = v + } + b, err := c.getConnectionRaw(ctx, f) + if err != nil { + return nil, err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + rawEtag, err := dcl.GetMapEntry( + m, + []string{"etag"}, + ) + if err != nil { + c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) + } else { + req["etag"] = rawEtag.(string) + } + req["name"] = fmt.Sprintf("projects/%s/locations/%s/connections/%s", *f.Project, *f.Location, *f.Name) + + return req, nil +} + +// marshalUpdateConnectionUpdateConnectionRequest converts the update into +// the final JSON request body. +func marshalUpdateConnectionUpdateConnectionRequest(c *Client, m map[string]interface{}) ([]byte, error) { + + return json.Marshal(m) +} + +type updateConnectionUpdateConnectionOperation struct { + // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. + // Usually it will be nil - this is to prevent us from accidentally depending on apply + // options, which should usually be unnecessary. + ApplyOptions []dcl.ApplyOption + FieldDiffs []*dcl.FieldDiff +} + +// do creates a request and sends it to the appropriate URL. In most operations, +// do will transcribe a subset of the resource into a request object and send a +// PUT request to a single URL. + +func (op *updateConnectionUpdateConnectionOperation) do(ctx context.Context, r *Connection, c *Client) error { + _, err := c.GetConnection(ctx, r) + if err != nil { + return err + } + + u, err := r.updateURL(c.Config.BasePath, "UpdateConnection") + if err != nil { + return err + } + mask := dcl.UpdateMask(op.FieldDiffs) + u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) + if err != nil { + return err + } + + req, err := newUpdateConnectionUpdateConnectionRequest(ctx, r, c) + if err != nil { + return err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) + body, err := marshalUpdateConnectionUpdateConnectionRequest(c, req) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) + if err != nil { + return err + } + + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") + + if err != nil { + return err + } + + return nil +} + +func (c *Client) listConnectionRaw(ctx context.Context, r *Connection, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != ConnectionMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listConnectionOperation struct { + Connections []map[string]interface{} `json:"connections"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listConnection(ctx context.Context, r *Connection, pageToken string, pageSize int32) ([]*Connection, string, error) { + b, err := c.listConnectionRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listConnectionOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Connection + for _, v := range m.Connections { + res, err := unmarshalMapConnection(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllConnection(ctx context.Context, f func(*Connection) bool, resources []*Connection) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteConnection(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteConnectionOperation struct{} + +func (op *deleteConnectionOperation) do(ctx context.Context, r *Connection, c *Client) error { + r, err := c.GetConnection(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Connection not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetConnection checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetConnection(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createConnectionOperation struct { + response map[string]interface{} +} + +func (op *createConnectionOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createConnectionOperation) do(ctx context.Context, r *Connection, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetConnection(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getConnectionRaw(ctx context.Context, r *Connection) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) connectionDiffsForRawDesired(ctx context.Context, rawDesired *Connection, opts ...dcl.ApplyOption) (initial, desired *Connection, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Connection + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Connection); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Connection, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetConnection(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Connection resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Connection resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Connection resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeConnectionDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Connection: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Connection: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractConnectionFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeConnectionInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Connection: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeConnectionDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Connection: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffConnection(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeConnectionInitialState(rawInitial, rawDesired *Connection) (*Connection, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + + if !dcl.IsZeroValue(rawInitial.GithubConfig) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.GithubEnterpriseConfig, rawInitial.GitlabConfig) { + rawInitial.GithubConfig = EmptyConnectionGithubConfig + } + } + + if !dcl.IsZeroValue(rawInitial.GithubEnterpriseConfig) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.GithubConfig, rawInitial.GitlabConfig) { + rawInitial.GithubEnterpriseConfig = EmptyConnectionGithubEnterpriseConfig + } + } + + if !dcl.IsZeroValue(rawInitial.GitlabConfig) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.GithubConfig, rawInitial.GithubEnterpriseConfig) { + rawInitial.GitlabConfig = EmptyConnectionGitlabConfig + } + } + + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeConnectionDesiredState(rawDesired, rawInitial *Connection, opts ...dcl.ApplyOption) (*Connection, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + rawDesired.GithubConfig = canonicalizeConnectionGithubConfig(rawDesired.GithubConfig, nil, opts...) + rawDesired.GithubEnterpriseConfig = canonicalizeConnectionGithubEnterpriseConfig(rawDesired.GithubEnterpriseConfig, nil, opts...) + rawDesired.GitlabConfig = canonicalizeConnectionGitlabConfig(rawDesired.GitlabConfig, nil, opts...) + rawDesired.InstallationState = canonicalizeConnectionInstallationState(rawDesired.InstallationState, nil, opts...) + + return rawDesired, nil + } + canonicalDesired := &Connection{} + if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + canonicalDesired.GithubConfig = canonicalizeConnectionGithubConfig(rawDesired.GithubConfig, rawInitial.GithubConfig, opts...) + canonicalDesired.GithubEnterpriseConfig = canonicalizeConnectionGithubEnterpriseConfig(rawDesired.GithubEnterpriseConfig, rawInitial.GithubEnterpriseConfig, opts...) + canonicalDesired.GitlabConfig = canonicalizeConnectionGitlabConfig(rawDesired.GitlabConfig, rawInitial.GitlabConfig, opts...) + if dcl.BoolCanonicalize(rawDesired.Disabled, rawInitial.Disabled) { + canonicalDesired.Disabled = rawInitial.Disabled + } else { + canonicalDesired.Disabled = rawDesired.Disabled + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + + if canonicalDesired.GithubConfig != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.GithubEnterpriseConfig, rawDesired.GitlabConfig) { + canonicalDesired.GithubConfig = EmptyConnectionGithubConfig + } + } + + if canonicalDesired.GithubEnterpriseConfig != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.GithubConfig, rawDesired.GitlabConfig) { + canonicalDesired.GithubEnterpriseConfig = EmptyConnectionGithubEnterpriseConfig + } + } + + if canonicalDesired.GitlabConfig != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.GithubConfig, rawDesired.GithubEnterpriseConfig) { + canonicalDesired.GitlabConfig = EmptyConnectionGitlabConfig + } + } + + return canonicalDesired, nil +} + +func canonicalizeConnectionNewState(c *Client, rawNew, rawDesired *Connection) (*Connection, error) { + + rawNew.Name = rawDesired.Name + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.GithubConfig) && dcl.IsEmptyValueIndirect(rawDesired.GithubConfig) { + rawNew.GithubConfig = rawDesired.GithubConfig + } else { + rawNew.GithubConfig = canonicalizeNewConnectionGithubConfig(c, rawDesired.GithubConfig, rawNew.GithubConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.GithubEnterpriseConfig) && dcl.IsEmptyValueIndirect(rawDesired.GithubEnterpriseConfig) { + rawNew.GithubEnterpriseConfig = rawDesired.GithubEnterpriseConfig + } else { + rawNew.GithubEnterpriseConfig = canonicalizeNewConnectionGithubEnterpriseConfig(c, rawDesired.GithubEnterpriseConfig, rawNew.GithubEnterpriseConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.GitlabConfig) && dcl.IsEmptyValueIndirect(rawDesired.GitlabConfig) { + rawNew.GitlabConfig = rawDesired.GitlabConfig + } else { + rawNew.GitlabConfig = canonicalizeNewConnectionGitlabConfig(c, rawDesired.GitlabConfig, rawNew.GitlabConfig) + } + + if dcl.IsEmptyValueIndirect(rawNew.InstallationState) && dcl.IsEmptyValueIndirect(rawDesired.InstallationState) { + rawNew.InstallationState = rawDesired.InstallationState + } else { + rawNew.InstallationState = canonicalizeNewConnectionInstallationState(c, rawDesired.InstallationState, rawNew.InstallationState) + } + + if dcl.IsEmptyValueIndirect(rawNew.Disabled) && dcl.IsEmptyValueIndirect(rawDesired.Disabled) { + rawNew.Disabled = rawDesired.Disabled + } else { + if dcl.BoolCanonicalize(rawDesired.Disabled, rawNew.Disabled) { + rawNew.Disabled = rawDesired.Disabled + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } else { + if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { + rawNew.Reconciling = rawDesired.Reconciling + } + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + return rawNew, nil +} + +func canonicalizeConnectionGithubConfig(des, initial *ConnectionGithubConfig, opts ...dcl.ApplyOption) *ConnectionGithubConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionGithubConfig{} + + cDes.AuthorizerCredential = canonicalizeConnectionGithubConfigAuthorizerCredential(des.AuthorizerCredential, initial.AuthorizerCredential, opts...) + if dcl.IsZeroValue(des.AppInstallationId) || (dcl.IsEmptyValueIndirect(des.AppInstallationId) && dcl.IsEmptyValueIndirect(initial.AppInstallationId)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AppInstallationId = initial.AppInstallationId + } else { + cDes.AppInstallationId = des.AppInstallationId + } + + return cDes +} + +func canonicalizeConnectionGithubConfigSlice(des, initial []ConnectionGithubConfig, opts ...dcl.ApplyOption) []ConnectionGithubConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionGithubConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionGithubConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionGithubConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionGithubConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionGithubConfig(c *Client, des, nw *ConnectionGithubConfig) *ConnectionGithubConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionGithubConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.AuthorizerCredential = canonicalizeNewConnectionGithubConfigAuthorizerCredential(c, des.AuthorizerCredential, nw.AuthorizerCredential) + + return nw +} + +func canonicalizeNewConnectionGithubConfigSet(c *Client, des, nw []ConnectionGithubConfig) []ConnectionGithubConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionGithubConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionGithubConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionGithubConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionGithubConfigSlice(c *Client, des, nw []ConnectionGithubConfig) []ConnectionGithubConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionGithubConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionGithubConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeConnectionGithubConfigAuthorizerCredential(des, initial *ConnectionGithubConfigAuthorizerCredential, opts ...dcl.ApplyOption) *ConnectionGithubConfigAuthorizerCredential { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionGithubConfigAuthorizerCredential{} + + if dcl.IsZeroValue(des.OAuthTokenSecretVersion) || (dcl.IsEmptyValueIndirect(des.OAuthTokenSecretVersion) && dcl.IsEmptyValueIndirect(initial.OAuthTokenSecretVersion)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.OAuthTokenSecretVersion = initial.OAuthTokenSecretVersion + } else { + cDes.OAuthTokenSecretVersion = des.OAuthTokenSecretVersion + } + + return cDes +} + +func canonicalizeConnectionGithubConfigAuthorizerCredentialSlice(des, initial []ConnectionGithubConfigAuthorizerCredential, opts ...dcl.ApplyOption) []ConnectionGithubConfigAuthorizerCredential { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionGithubConfigAuthorizerCredential, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionGithubConfigAuthorizerCredential(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionGithubConfigAuthorizerCredential, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionGithubConfigAuthorizerCredential(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionGithubConfigAuthorizerCredential(c *Client, des, nw *ConnectionGithubConfigAuthorizerCredential) *ConnectionGithubConfigAuthorizerCredential { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionGithubConfigAuthorizerCredential while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Username, nw.Username) { + nw.Username = des.Username + } + + return nw +} + +func canonicalizeNewConnectionGithubConfigAuthorizerCredentialSet(c *Client, des, nw []ConnectionGithubConfigAuthorizerCredential) []ConnectionGithubConfigAuthorizerCredential { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionGithubConfigAuthorizerCredential + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionGithubConfigAuthorizerCredentialNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionGithubConfigAuthorizerCredential(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionGithubConfigAuthorizerCredentialSlice(c *Client, des, nw []ConnectionGithubConfigAuthorizerCredential) []ConnectionGithubConfigAuthorizerCredential { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionGithubConfigAuthorizerCredential + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionGithubConfigAuthorizerCredential(c, &d, &n)) + } + + return items +} + +func canonicalizeConnectionGithubEnterpriseConfig(des, initial *ConnectionGithubEnterpriseConfig, opts ...dcl.ApplyOption) *ConnectionGithubEnterpriseConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionGithubEnterpriseConfig{} + + if dcl.StringCanonicalize(des.HostUri, initial.HostUri) || dcl.IsZeroValue(des.HostUri) { + cDes.HostUri = initial.HostUri + } else { + cDes.HostUri = des.HostUri + } + if dcl.IsZeroValue(des.AppId) || (dcl.IsEmptyValueIndirect(des.AppId) && dcl.IsEmptyValueIndirect(initial.AppId)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AppId = initial.AppId + } else { + cDes.AppId = des.AppId + } + if dcl.StringCanonicalize(des.AppSlug, initial.AppSlug) || dcl.IsZeroValue(des.AppSlug) { + cDes.AppSlug = initial.AppSlug + } else { + cDes.AppSlug = des.AppSlug + } + if dcl.IsZeroValue(des.PrivateKeySecretVersion) || (dcl.IsEmptyValueIndirect(des.PrivateKeySecretVersion) && dcl.IsEmptyValueIndirect(initial.PrivateKeySecretVersion)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.PrivateKeySecretVersion = initial.PrivateKeySecretVersion + } else { + cDes.PrivateKeySecretVersion = des.PrivateKeySecretVersion + } + if dcl.IsZeroValue(des.WebhookSecretSecretVersion) || (dcl.IsEmptyValueIndirect(des.WebhookSecretSecretVersion) && dcl.IsEmptyValueIndirect(initial.WebhookSecretSecretVersion)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.WebhookSecretSecretVersion = initial.WebhookSecretSecretVersion + } else { + cDes.WebhookSecretSecretVersion = des.WebhookSecretSecretVersion + } + if dcl.IsZeroValue(des.AppInstallationId) || (dcl.IsEmptyValueIndirect(des.AppInstallationId) && dcl.IsEmptyValueIndirect(initial.AppInstallationId)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.AppInstallationId = initial.AppInstallationId + } else { + cDes.AppInstallationId = des.AppInstallationId + } + cDes.ServiceDirectoryConfig = canonicalizeConnectionGithubEnterpriseConfigServiceDirectoryConfig(des.ServiceDirectoryConfig, initial.ServiceDirectoryConfig, opts...) + if dcl.StringCanonicalize(des.SslCa, initial.SslCa) || dcl.IsZeroValue(des.SslCa) { + cDes.SslCa = initial.SslCa + } else { + cDes.SslCa = des.SslCa + } + + return cDes +} + +func canonicalizeConnectionGithubEnterpriseConfigSlice(des, initial []ConnectionGithubEnterpriseConfig, opts ...dcl.ApplyOption) []ConnectionGithubEnterpriseConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionGithubEnterpriseConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionGithubEnterpriseConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionGithubEnterpriseConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionGithubEnterpriseConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionGithubEnterpriseConfig(c *Client, des, nw *ConnectionGithubEnterpriseConfig) *ConnectionGithubEnterpriseConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionGithubEnterpriseConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.HostUri, nw.HostUri) { + nw.HostUri = des.HostUri + } + if dcl.StringCanonicalize(des.AppSlug, nw.AppSlug) { + nw.AppSlug = des.AppSlug + } + nw.ServiceDirectoryConfig = canonicalizeNewConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, des.ServiceDirectoryConfig, nw.ServiceDirectoryConfig) + if dcl.StringCanonicalize(des.SslCa, nw.SslCa) { + nw.SslCa = des.SslCa + } + + return nw +} + +func canonicalizeNewConnectionGithubEnterpriseConfigSet(c *Client, des, nw []ConnectionGithubEnterpriseConfig) []ConnectionGithubEnterpriseConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionGithubEnterpriseConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionGithubEnterpriseConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionGithubEnterpriseConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionGithubEnterpriseConfigSlice(c *Client, des, nw []ConnectionGithubEnterpriseConfig) []ConnectionGithubEnterpriseConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionGithubEnterpriseConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionGithubEnterpriseConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeConnectionGithubEnterpriseConfigServiceDirectoryConfig(des, initial *ConnectionGithubEnterpriseConfigServiceDirectoryConfig, opts ...dcl.ApplyOption) *ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionGithubEnterpriseConfigServiceDirectoryConfig{} + + if dcl.IsZeroValue(des.Service) || (dcl.IsEmptyValueIndirect(des.Service) && dcl.IsEmptyValueIndirect(initial.Service)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + + return cDes +} + +func canonicalizeConnectionGithubEnterpriseConfigServiceDirectoryConfigSlice(des, initial []ConnectionGithubEnterpriseConfigServiceDirectoryConfig, opts ...dcl.ApplyOption) []ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionGithubEnterpriseConfigServiceDirectoryConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionGithubEnterpriseConfigServiceDirectoryConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionGithubEnterpriseConfigServiceDirectoryConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionGithubEnterpriseConfigServiceDirectoryConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionGithubEnterpriseConfigServiceDirectoryConfig(c *Client, des, nw *ConnectionGithubEnterpriseConfigServiceDirectoryConfig) *ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionGithubEnterpriseConfigServiceDirectoryConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewConnectionGithubEnterpriseConfigServiceDirectoryConfigSet(c *Client, des, nw []ConnectionGithubEnterpriseConfigServiceDirectoryConfig) []ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionGithubEnterpriseConfigServiceDirectoryConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionGithubEnterpriseConfigServiceDirectoryConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionGithubEnterpriseConfigServiceDirectoryConfigSlice(c *Client, des, nw []ConnectionGithubEnterpriseConfigServiceDirectoryConfig) []ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionGithubEnterpriseConfigServiceDirectoryConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeConnectionGitlabConfig(des, initial *ConnectionGitlabConfig, opts ...dcl.ApplyOption) *ConnectionGitlabConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionGitlabConfig{} + + if dcl.StringCanonicalize(des.HostUri, initial.HostUri) || dcl.IsZeroValue(des.HostUri) { + cDes.HostUri = initial.HostUri + } else { + cDes.HostUri = des.HostUri + } + if dcl.IsZeroValue(des.WebhookSecretSecretVersion) || (dcl.IsEmptyValueIndirect(des.WebhookSecretSecretVersion) && dcl.IsEmptyValueIndirect(initial.WebhookSecretSecretVersion)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.WebhookSecretSecretVersion = initial.WebhookSecretSecretVersion + } else { + cDes.WebhookSecretSecretVersion = des.WebhookSecretSecretVersion + } + cDes.ReadAuthorizerCredential = canonicalizeConnectionGitlabConfigReadAuthorizerCredential(des.ReadAuthorizerCredential, initial.ReadAuthorizerCredential, opts...) + cDes.AuthorizerCredential = canonicalizeConnectionGitlabConfigAuthorizerCredential(des.AuthorizerCredential, initial.AuthorizerCredential, opts...) + cDes.ServiceDirectoryConfig = canonicalizeConnectionGitlabConfigServiceDirectoryConfig(des.ServiceDirectoryConfig, initial.ServiceDirectoryConfig, opts...) + if dcl.StringCanonicalize(des.SslCa, initial.SslCa) || dcl.IsZeroValue(des.SslCa) { + cDes.SslCa = initial.SslCa + } else { + cDes.SslCa = des.SslCa + } + + return cDes +} + +func canonicalizeConnectionGitlabConfigSlice(des, initial []ConnectionGitlabConfig, opts ...dcl.ApplyOption) []ConnectionGitlabConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionGitlabConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionGitlabConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionGitlabConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionGitlabConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionGitlabConfig(c *Client, des, nw *ConnectionGitlabConfig) *ConnectionGitlabConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionGitlabConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.HostUri, nw.HostUri) { + nw.HostUri = des.HostUri + } + nw.ReadAuthorizerCredential = canonicalizeNewConnectionGitlabConfigReadAuthorizerCredential(c, des.ReadAuthorizerCredential, nw.ReadAuthorizerCredential) + nw.AuthorizerCredential = canonicalizeNewConnectionGitlabConfigAuthorizerCredential(c, des.AuthorizerCredential, nw.AuthorizerCredential) + nw.ServiceDirectoryConfig = canonicalizeNewConnectionGitlabConfigServiceDirectoryConfig(c, des.ServiceDirectoryConfig, nw.ServiceDirectoryConfig) + if dcl.StringCanonicalize(des.SslCa, nw.SslCa) { + nw.SslCa = des.SslCa + } + if dcl.StringCanonicalize(des.ServerVersion, nw.ServerVersion) { + nw.ServerVersion = des.ServerVersion + } + + return nw +} + +func canonicalizeNewConnectionGitlabConfigSet(c *Client, des, nw []ConnectionGitlabConfig) []ConnectionGitlabConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionGitlabConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionGitlabConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionGitlabConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionGitlabConfigSlice(c *Client, des, nw []ConnectionGitlabConfig) []ConnectionGitlabConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionGitlabConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionGitlabConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeConnectionGitlabConfigReadAuthorizerCredential(des, initial *ConnectionGitlabConfigReadAuthorizerCredential, opts ...dcl.ApplyOption) *ConnectionGitlabConfigReadAuthorizerCredential { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionGitlabConfigReadAuthorizerCredential{} + + if dcl.IsZeroValue(des.UserTokenSecretVersion) || (dcl.IsEmptyValueIndirect(des.UserTokenSecretVersion) && dcl.IsEmptyValueIndirect(initial.UserTokenSecretVersion)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UserTokenSecretVersion = initial.UserTokenSecretVersion + } else { + cDes.UserTokenSecretVersion = des.UserTokenSecretVersion + } + + return cDes +} + +func canonicalizeConnectionGitlabConfigReadAuthorizerCredentialSlice(des, initial []ConnectionGitlabConfigReadAuthorizerCredential, opts ...dcl.ApplyOption) []ConnectionGitlabConfigReadAuthorizerCredential { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionGitlabConfigReadAuthorizerCredential, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionGitlabConfigReadAuthorizerCredential(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionGitlabConfigReadAuthorizerCredential, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionGitlabConfigReadAuthorizerCredential(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionGitlabConfigReadAuthorizerCredential(c *Client, des, nw *ConnectionGitlabConfigReadAuthorizerCredential) *ConnectionGitlabConfigReadAuthorizerCredential { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionGitlabConfigReadAuthorizerCredential while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Username, nw.Username) { + nw.Username = des.Username + } + + return nw +} + +func canonicalizeNewConnectionGitlabConfigReadAuthorizerCredentialSet(c *Client, des, nw []ConnectionGitlabConfigReadAuthorizerCredential) []ConnectionGitlabConfigReadAuthorizerCredential { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionGitlabConfigReadAuthorizerCredential + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionGitlabConfigReadAuthorizerCredentialNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionGitlabConfigReadAuthorizerCredential(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionGitlabConfigReadAuthorizerCredentialSlice(c *Client, des, nw []ConnectionGitlabConfigReadAuthorizerCredential) []ConnectionGitlabConfigReadAuthorizerCredential { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionGitlabConfigReadAuthorizerCredential + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionGitlabConfigReadAuthorizerCredential(c, &d, &n)) + } + + return items +} + +func canonicalizeConnectionGitlabConfigAuthorizerCredential(des, initial *ConnectionGitlabConfigAuthorizerCredential, opts ...dcl.ApplyOption) *ConnectionGitlabConfigAuthorizerCredential { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionGitlabConfigAuthorizerCredential{} + + if dcl.IsZeroValue(des.UserTokenSecretVersion) || (dcl.IsEmptyValueIndirect(des.UserTokenSecretVersion) && dcl.IsEmptyValueIndirect(initial.UserTokenSecretVersion)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UserTokenSecretVersion = initial.UserTokenSecretVersion + } else { + cDes.UserTokenSecretVersion = des.UserTokenSecretVersion + } + + return cDes +} + +func canonicalizeConnectionGitlabConfigAuthorizerCredentialSlice(des, initial []ConnectionGitlabConfigAuthorizerCredential, opts ...dcl.ApplyOption) []ConnectionGitlabConfigAuthorizerCredential { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionGitlabConfigAuthorizerCredential, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionGitlabConfigAuthorizerCredential(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionGitlabConfigAuthorizerCredential, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionGitlabConfigAuthorizerCredential(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionGitlabConfigAuthorizerCredential(c *Client, des, nw *ConnectionGitlabConfigAuthorizerCredential) *ConnectionGitlabConfigAuthorizerCredential { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionGitlabConfigAuthorizerCredential while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Username, nw.Username) { + nw.Username = des.Username + } + + return nw +} + +func canonicalizeNewConnectionGitlabConfigAuthorizerCredentialSet(c *Client, des, nw []ConnectionGitlabConfigAuthorizerCredential) []ConnectionGitlabConfigAuthorizerCredential { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionGitlabConfigAuthorizerCredential + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionGitlabConfigAuthorizerCredentialNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionGitlabConfigAuthorizerCredential(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionGitlabConfigAuthorizerCredentialSlice(c *Client, des, nw []ConnectionGitlabConfigAuthorizerCredential) []ConnectionGitlabConfigAuthorizerCredential { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionGitlabConfigAuthorizerCredential + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionGitlabConfigAuthorizerCredential(c, &d, &n)) + } + + return items +} + +func canonicalizeConnectionGitlabConfigServiceDirectoryConfig(des, initial *ConnectionGitlabConfigServiceDirectoryConfig, opts ...dcl.ApplyOption) *ConnectionGitlabConfigServiceDirectoryConfig { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionGitlabConfigServiceDirectoryConfig{} + + if dcl.IsZeroValue(des.Service) || (dcl.IsEmptyValueIndirect(des.Service) && dcl.IsEmptyValueIndirect(initial.Service)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + + return cDes +} + +func canonicalizeConnectionGitlabConfigServiceDirectoryConfigSlice(des, initial []ConnectionGitlabConfigServiceDirectoryConfig, opts ...dcl.ApplyOption) []ConnectionGitlabConfigServiceDirectoryConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionGitlabConfigServiceDirectoryConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionGitlabConfigServiceDirectoryConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionGitlabConfigServiceDirectoryConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionGitlabConfigServiceDirectoryConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionGitlabConfigServiceDirectoryConfig(c *Client, des, nw *ConnectionGitlabConfigServiceDirectoryConfig) *ConnectionGitlabConfigServiceDirectoryConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionGitlabConfigServiceDirectoryConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewConnectionGitlabConfigServiceDirectoryConfigSet(c *Client, des, nw []ConnectionGitlabConfigServiceDirectoryConfig) []ConnectionGitlabConfigServiceDirectoryConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionGitlabConfigServiceDirectoryConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionGitlabConfigServiceDirectoryConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionGitlabConfigServiceDirectoryConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionGitlabConfigServiceDirectoryConfigSlice(c *Client, des, nw []ConnectionGitlabConfigServiceDirectoryConfig) []ConnectionGitlabConfigServiceDirectoryConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionGitlabConfigServiceDirectoryConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionGitlabConfigServiceDirectoryConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeConnectionInstallationState(des, initial *ConnectionInstallationState, opts ...dcl.ApplyOption) *ConnectionInstallationState { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &ConnectionInstallationState{} + + return cDes +} + +func canonicalizeConnectionInstallationStateSlice(des, initial []ConnectionInstallationState, opts ...dcl.ApplyOption) []ConnectionInstallationState { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]ConnectionInstallationState, 0, len(des)) + for _, d := range des { + cd := canonicalizeConnectionInstallationState(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]ConnectionInstallationState, 0, len(des)) + for i, d := range des { + cd := canonicalizeConnectionInstallationState(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewConnectionInstallationState(c *Client, des, nw *ConnectionInstallationState) *ConnectionInstallationState { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for ConnectionInstallationState while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Message, nw.Message) { + nw.Message = des.Message + } + if dcl.StringCanonicalize(des.ActionUri, nw.ActionUri) { + nw.ActionUri = des.ActionUri + } + + return nw +} + +func canonicalizeNewConnectionInstallationStateSet(c *Client, des, nw []ConnectionInstallationState) []ConnectionInstallationState { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []ConnectionInstallationState + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareConnectionInstallationStateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewConnectionInstallationState(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewConnectionInstallationStateSlice(c *Client, des, nw []ConnectionInstallationState) []ConnectionInstallationState { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []ConnectionInstallationState + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewConnectionInstallationState(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffConnection(c *Client, desired, actual *Connection, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.GithubConfig, actual.GithubConfig, dcl.DiffInfo{ObjectFunction: compareConnectionGithubConfigNewStyle, EmptyObject: EmptyConnectionGithubConfig, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("GithubConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.GithubEnterpriseConfig, actual.GithubEnterpriseConfig, dcl.DiffInfo{ObjectFunction: compareConnectionGithubEnterpriseConfigNewStyle, EmptyObject: EmptyConnectionGithubEnterpriseConfig, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("GithubEnterpriseConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.GitlabConfig, actual.GitlabConfig, dcl.DiffInfo{ObjectFunction: compareConnectionGitlabConfigNewStyle, EmptyObject: EmptyConnectionGitlabConfig, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("GitlabConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.InstallationState, actual.InstallationState, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareConnectionInstallationStateNewStyle, EmptyObject: EmptyConnectionInstallationState, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("InstallationState")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Disabled, actual.Disabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("Disabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareConnectionGithubConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionGithubConfig) + if !ok { + desiredNotPointer, ok := d.(ConnectionGithubConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGithubConfig or *ConnectionGithubConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionGithubConfig) + if !ok { + actualNotPointer, ok := a.(ConnectionGithubConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGithubConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AuthorizerCredential, actual.AuthorizerCredential, dcl.DiffInfo{ObjectFunction: compareConnectionGithubConfigAuthorizerCredentialNewStyle, EmptyObject: EmptyConnectionGithubConfigAuthorizerCredential, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("AuthorizerCredential")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AppInstallationId, actual.AppInstallationId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("AppInstallationId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareConnectionGithubConfigAuthorizerCredentialNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionGithubConfigAuthorizerCredential) + if !ok { + desiredNotPointer, ok := d.(ConnectionGithubConfigAuthorizerCredential) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGithubConfigAuthorizerCredential or *ConnectionGithubConfigAuthorizerCredential", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionGithubConfigAuthorizerCredential) + if !ok { + actualNotPointer, ok := a.(ConnectionGithubConfigAuthorizerCredential) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGithubConfigAuthorizerCredential", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.OAuthTokenSecretVersion, actual.OAuthTokenSecretVersion, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("OauthTokenSecretVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Username, actual.Username, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Username")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareConnectionGithubEnterpriseConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionGithubEnterpriseConfig) + if !ok { + desiredNotPointer, ok := d.(ConnectionGithubEnterpriseConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGithubEnterpriseConfig or *ConnectionGithubEnterpriseConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionGithubEnterpriseConfig) + if !ok { + actualNotPointer, ok := a.(ConnectionGithubEnterpriseConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGithubEnterpriseConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HostUri, actual.HostUri, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("HostUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AppId, actual.AppId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("AppId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AppSlug, actual.AppSlug, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("AppSlug")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.PrivateKeySecretVersion, actual.PrivateKeySecretVersion, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("PrivateKeySecretVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WebhookSecretSecretVersion, actual.WebhookSecretSecretVersion, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("WebhookSecretSecretVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AppInstallationId, actual.AppInstallationId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("AppInstallationId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceDirectoryConfig, actual.ServiceDirectoryConfig, dcl.DiffInfo{ObjectFunction: compareConnectionGithubEnterpriseConfigServiceDirectoryConfigNewStyle, EmptyObject: EmptyConnectionGithubEnterpriseConfigServiceDirectoryConfig, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("ServiceDirectoryConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SslCa, actual.SslCa, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("SslCa")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareConnectionGithubEnterpriseConfigServiceDirectoryConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionGithubEnterpriseConfigServiceDirectoryConfig) + if !ok { + desiredNotPointer, ok := d.(ConnectionGithubEnterpriseConfigServiceDirectoryConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGithubEnterpriseConfigServiceDirectoryConfig or *ConnectionGithubEnterpriseConfigServiceDirectoryConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionGithubEnterpriseConfigServiceDirectoryConfig) + if !ok { + actualNotPointer, ok := a.(ConnectionGithubEnterpriseConfigServiceDirectoryConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGithubEnterpriseConfigServiceDirectoryConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareConnectionGitlabConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionGitlabConfig) + if !ok { + desiredNotPointer, ok := d.(ConnectionGitlabConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGitlabConfig or *ConnectionGitlabConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionGitlabConfig) + if !ok { + actualNotPointer, ok := a.(ConnectionGitlabConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGitlabConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HostUri, actual.HostUri, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("HostUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.WebhookSecretSecretVersion, actual.WebhookSecretSecretVersion, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("WebhookSecretSecretVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ReadAuthorizerCredential, actual.ReadAuthorizerCredential, dcl.DiffInfo{ObjectFunction: compareConnectionGitlabConfigReadAuthorizerCredentialNewStyle, EmptyObject: EmptyConnectionGitlabConfigReadAuthorizerCredential, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("ReadAuthorizerCredential")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.AuthorizerCredential, actual.AuthorizerCredential, dcl.DiffInfo{ObjectFunction: compareConnectionGitlabConfigAuthorizerCredentialNewStyle, EmptyObject: EmptyConnectionGitlabConfigAuthorizerCredential, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("AuthorizerCredential")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceDirectoryConfig, actual.ServiceDirectoryConfig, dcl.DiffInfo{ObjectFunction: compareConnectionGitlabConfigServiceDirectoryConfigNewStyle, EmptyObject: EmptyConnectionGitlabConfigServiceDirectoryConfig, OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("ServiceDirectoryConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SslCa, actual.SslCa, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("SslCa")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServerVersion, actual.ServerVersion, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServerVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareConnectionGitlabConfigReadAuthorizerCredentialNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionGitlabConfigReadAuthorizerCredential) + if !ok { + desiredNotPointer, ok := d.(ConnectionGitlabConfigReadAuthorizerCredential) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGitlabConfigReadAuthorizerCredential or *ConnectionGitlabConfigReadAuthorizerCredential", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionGitlabConfigReadAuthorizerCredential) + if !ok { + actualNotPointer, ok := a.(ConnectionGitlabConfigReadAuthorizerCredential) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGitlabConfigReadAuthorizerCredential", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.UserTokenSecretVersion, actual.UserTokenSecretVersion, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("UserTokenSecretVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Username, actual.Username, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Username")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareConnectionGitlabConfigAuthorizerCredentialNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionGitlabConfigAuthorizerCredential) + if !ok { + desiredNotPointer, ok := d.(ConnectionGitlabConfigAuthorizerCredential) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGitlabConfigAuthorizerCredential or *ConnectionGitlabConfigAuthorizerCredential", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionGitlabConfigAuthorizerCredential) + if !ok { + actualNotPointer, ok := a.(ConnectionGitlabConfigAuthorizerCredential) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGitlabConfigAuthorizerCredential", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.UserTokenSecretVersion, actual.UserTokenSecretVersion, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("UserTokenSecretVersion")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Username, actual.Username, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Username")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareConnectionGitlabConfigServiceDirectoryConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionGitlabConfigServiceDirectoryConfig) + if !ok { + desiredNotPointer, ok := d.(ConnectionGitlabConfigServiceDirectoryConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGitlabConfigServiceDirectoryConfig or *ConnectionGitlabConfigServiceDirectoryConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionGitlabConfigServiceDirectoryConfig) + if !ok { + actualNotPointer, ok := a.(ConnectionGitlabConfigServiceDirectoryConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionGitlabConfigServiceDirectoryConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateConnectionUpdateConnectionOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareConnectionInstallationStateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*ConnectionInstallationState) + if !ok { + desiredNotPointer, ok := d.(ConnectionInstallationState) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionInstallationState or *ConnectionInstallationState", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*ConnectionInstallationState) + if !ok { + actualNotPointer, ok := a.(ConnectionInstallationState) + if !ok { + return nil, fmt.Errorf("obj %v is not a ConnectionInstallationState", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Stage, actual.Stage, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Stage")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Message, actual.Message, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Message")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ActionUri, actual.ActionUri, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ActionUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Connection) urlNormalized() *Connection { + normalized := dcl.Copy(*r).(Connection) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *Connection) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateConnection" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections/{{name}}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Connection resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Connection) marshal(c *Client) ([]byte, error) { + m, err := expandConnection(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Connection: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalConnection decodes JSON responses into the Connection resource schema. +func unmarshalConnection(b []byte, c *Client, res *Connection) (*Connection, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapConnection(m, c, res) +} + +func unmarshalMapConnection(m map[string]interface{}, c *Client, res *Connection) (*Connection, error) { + + flattened := flattenConnection(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandConnection expands Connection into a JSON request object. +func expandConnection(c *Client, f *Connection) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v, err := expandConnectionGithubConfig(c, f.GithubConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GithubConfig into githubConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["githubConfig"] = v + } + if v, err := expandConnectionGithubEnterpriseConfig(c, f.GithubEnterpriseConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GithubEnterpriseConfig into githubEnterpriseConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["githubEnterpriseConfig"] = v + } + if v, err := expandConnectionGitlabConfig(c, f.GitlabConfig, res); err != nil { + return nil, fmt.Errorf("error expanding GitlabConfig into gitlabConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gitlabConfig"] = v + } + if v := f.Disabled; dcl.ValueShouldBeSent(v) { + m["disabled"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenConnection flattens Connection from a JSON request object into the +// Connection type. +func flattenConnection(c *Client, i interface{}, res *Connection) *Connection { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Connection{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.GithubConfig = flattenConnectionGithubConfig(c, m["githubConfig"], res) + resultRes.GithubEnterpriseConfig = flattenConnectionGithubEnterpriseConfig(c, m["githubEnterpriseConfig"], res) + resultRes.GitlabConfig = flattenConnectionGitlabConfig(c, m["gitlabConfig"], res) + resultRes.InstallationState = flattenConnectionInstallationState(c, m["installationState"], res) + resultRes.Disabled = dcl.FlattenBool(m["disabled"]) + resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + + return resultRes +} + +// expandConnectionGithubConfigMap expands the contents of ConnectionGithubConfig into a JSON +// request object. +func expandConnectionGithubConfigMap(c *Client, f map[string]ConnectionGithubConfig, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionGithubConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionGithubConfigSlice expands the contents of ConnectionGithubConfig into a JSON +// request object. +func expandConnectionGithubConfigSlice(c *Client, f []ConnectionGithubConfig, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionGithubConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionGithubConfigMap flattens the contents of ConnectionGithubConfig from a JSON +// response object. +func flattenConnectionGithubConfigMap(c *Client, i interface{}, res *Connection) map[string]ConnectionGithubConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionGithubConfig{} + } + + if len(a) == 0 { + return map[string]ConnectionGithubConfig{} + } + + items := make(map[string]ConnectionGithubConfig) + for k, item := range a { + items[k] = *flattenConnectionGithubConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionGithubConfigSlice flattens the contents of ConnectionGithubConfig from a JSON +// response object. +func flattenConnectionGithubConfigSlice(c *Client, i interface{}, res *Connection) []ConnectionGithubConfig { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionGithubConfig{} + } + + if len(a) == 0 { + return []ConnectionGithubConfig{} + } + + items := make([]ConnectionGithubConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionGithubConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionGithubConfig expands an instance of ConnectionGithubConfig into a JSON +// request object. +func expandConnectionGithubConfig(c *Client, f *ConnectionGithubConfig, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandConnectionGithubConfigAuthorizerCredential(c, f.AuthorizerCredential, res); err != nil { + return nil, fmt.Errorf("error expanding AuthorizerCredential into authorizerCredential: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["authorizerCredential"] = v + } + if v := f.AppInstallationId; !dcl.IsEmptyValueIndirect(v) { + m["appInstallationId"] = v + } + + return m, nil +} + +// flattenConnectionGithubConfig flattens an instance of ConnectionGithubConfig from a JSON +// response object. +func flattenConnectionGithubConfig(c *Client, i interface{}, res *Connection) *ConnectionGithubConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionGithubConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionGithubConfig + } + r.AuthorizerCredential = flattenConnectionGithubConfigAuthorizerCredential(c, m["authorizerCredential"], res) + r.AppInstallationId = dcl.FlattenInteger(m["appInstallationId"]) + + return r +} + +// expandConnectionGithubConfigAuthorizerCredentialMap expands the contents of ConnectionGithubConfigAuthorizerCredential into a JSON +// request object. +func expandConnectionGithubConfigAuthorizerCredentialMap(c *Client, f map[string]ConnectionGithubConfigAuthorizerCredential, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionGithubConfigAuthorizerCredential(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionGithubConfigAuthorizerCredentialSlice expands the contents of ConnectionGithubConfigAuthorizerCredential into a JSON +// request object. +func expandConnectionGithubConfigAuthorizerCredentialSlice(c *Client, f []ConnectionGithubConfigAuthorizerCredential, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionGithubConfigAuthorizerCredential(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionGithubConfigAuthorizerCredentialMap flattens the contents of ConnectionGithubConfigAuthorizerCredential from a JSON +// response object. +func flattenConnectionGithubConfigAuthorizerCredentialMap(c *Client, i interface{}, res *Connection) map[string]ConnectionGithubConfigAuthorizerCredential { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionGithubConfigAuthorizerCredential{} + } + + if len(a) == 0 { + return map[string]ConnectionGithubConfigAuthorizerCredential{} + } + + items := make(map[string]ConnectionGithubConfigAuthorizerCredential) + for k, item := range a { + items[k] = *flattenConnectionGithubConfigAuthorizerCredential(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionGithubConfigAuthorizerCredentialSlice flattens the contents of ConnectionGithubConfigAuthorizerCredential from a JSON +// response object. +func flattenConnectionGithubConfigAuthorizerCredentialSlice(c *Client, i interface{}, res *Connection) []ConnectionGithubConfigAuthorizerCredential { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionGithubConfigAuthorizerCredential{} + } + + if len(a) == 0 { + return []ConnectionGithubConfigAuthorizerCredential{} + } + + items := make([]ConnectionGithubConfigAuthorizerCredential, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionGithubConfigAuthorizerCredential(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionGithubConfigAuthorizerCredential expands an instance of ConnectionGithubConfigAuthorizerCredential into a JSON +// request object. +func expandConnectionGithubConfigAuthorizerCredential(c *Client, f *ConnectionGithubConfigAuthorizerCredential, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.OAuthTokenSecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["oauthTokenSecretVersion"] = v + } + + return m, nil +} + +// flattenConnectionGithubConfigAuthorizerCredential flattens an instance of ConnectionGithubConfigAuthorizerCredential from a JSON +// response object. +func flattenConnectionGithubConfigAuthorizerCredential(c *Client, i interface{}, res *Connection) *ConnectionGithubConfigAuthorizerCredential { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionGithubConfigAuthorizerCredential{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionGithubConfigAuthorizerCredential + } + r.OAuthTokenSecretVersion = dcl.FlattenString(m["oauthTokenSecretVersion"]) + r.Username = dcl.FlattenString(m["username"]) + + return r +} + +// expandConnectionGithubEnterpriseConfigMap expands the contents of ConnectionGithubEnterpriseConfig into a JSON +// request object. +func expandConnectionGithubEnterpriseConfigMap(c *Client, f map[string]ConnectionGithubEnterpriseConfig, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionGithubEnterpriseConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionGithubEnterpriseConfigSlice expands the contents of ConnectionGithubEnterpriseConfig into a JSON +// request object. +func expandConnectionGithubEnterpriseConfigSlice(c *Client, f []ConnectionGithubEnterpriseConfig, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionGithubEnterpriseConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionGithubEnterpriseConfigMap flattens the contents of ConnectionGithubEnterpriseConfig from a JSON +// response object. +func flattenConnectionGithubEnterpriseConfigMap(c *Client, i interface{}, res *Connection) map[string]ConnectionGithubEnterpriseConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionGithubEnterpriseConfig{} + } + + if len(a) == 0 { + return map[string]ConnectionGithubEnterpriseConfig{} + } + + items := make(map[string]ConnectionGithubEnterpriseConfig) + for k, item := range a { + items[k] = *flattenConnectionGithubEnterpriseConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionGithubEnterpriseConfigSlice flattens the contents of ConnectionGithubEnterpriseConfig from a JSON +// response object. +func flattenConnectionGithubEnterpriseConfigSlice(c *Client, i interface{}, res *Connection) []ConnectionGithubEnterpriseConfig { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionGithubEnterpriseConfig{} + } + + if len(a) == 0 { + return []ConnectionGithubEnterpriseConfig{} + } + + items := make([]ConnectionGithubEnterpriseConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionGithubEnterpriseConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionGithubEnterpriseConfig expands an instance of ConnectionGithubEnterpriseConfig into a JSON +// request object. +func expandConnectionGithubEnterpriseConfig(c *Client, f *ConnectionGithubEnterpriseConfig, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HostUri; !dcl.IsEmptyValueIndirect(v) { + m["hostUri"] = v + } + if v := f.AppId; !dcl.IsEmptyValueIndirect(v) { + m["appId"] = v + } + if v := f.AppSlug; !dcl.IsEmptyValueIndirect(v) { + m["appSlug"] = v + } + if v := f.PrivateKeySecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["privateKeySecretVersion"] = v + } + if v := f.WebhookSecretSecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["webhookSecretSecretVersion"] = v + } + if v := f.AppInstallationId; !dcl.IsEmptyValueIndirect(v) { + m["appInstallationId"] = v + } + if v, err := expandConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, f.ServiceDirectoryConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ServiceDirectoryConfig into serviceDirectoryConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serviceDirectoryConfig"] = v + } + if v := f.SslCa; !dcl.IsEmptyValueIndirect(v) { + m["sslCa"] = v + } + + return m, nil +} + +// flattenConnectionGithubEnterpriseConfig flattens an instance of ConnectionGithubEnterpriseConfig from a JSON +// response object. +func flattenConnectionGithubEnterpriseConfig(c *Client, i interface{}, res *Connection) *ConnectionGithubEnterpriseConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionGithubEnterpriseConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionGithubEnterpriseConfig + } + r.HostUri = dcl.FlattenString(m["hostUri"]) + r.AppId = dcl.FlattenInteger(m["appId"]) + r.AppSlug = dcl.FlattenString(m["appSlug"]) + r.PrivateKeySecretVersion = dcl.FlattenString(m["privateKeySecretVersion"]) + r.WebhookSecretSecretVersion = dcl.FlattenString(m["webhookSecretSecretVersion"]) + r.AppInstallationId = dcl.FlattenInteger(m["appInstallationId"]) + r.ServiceDirectoryConfig = flattenConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, m["serviceDirectoryConfig"], res) + r.SslCa = dcl.FlattenString(m["sslCa"]) + + return r +} + +// expandConnectionGithubEnterpriseConfigServiceDirectoryConfigMap expands the contents of ConnectionGithubEnterpriseConfigServiceDirectoryConfig into a JSON +// request object. +func expandConnectionGithubEnterpriseConfigServiceDirectoryConfigMap(c *Client, f map[string]ConnectionGithubEnterpriseConfigServiceDirectoryConfig, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionGithubEnterpriseConfigServiceDirectoryConfigSlice expands the contents of ConnectionGithubEnterpriseConfigServiceDirectoryConfig into a JSON +// request object. +func expandConnectionGithubEnterpriseConfigServiceDirectoryConfigSlice(c *Client, f []ConnectionGithubEnterpriseConfigServiceDirectoryConfig, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionGithubEnterpriseConfigServiceDirectoryConfigMap flattens the contents of ConnectionGithubEnterpriseConfigServiceDirectoryConfig from a JSON +// response object. +func flattenConnectionGithubEnterpriseConfigServiceDirectoryConfigMap(c *Client, i interface{}, res *Connection) map[string]ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionGithubEnterpriseConfigServiceDirectoryConfig{} + } + + if len(a) == 0 { + return map[string]ConnectionGithubEnterpriseConfigServiceDirectoryConfig{} + } + + items := make(map[string]ConnectionGithubEnterpriseConfigServiceDirectoryConfig) + for k, item := range a { + items[k] = *flattenConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionGithubEnterpriseConfigServiceDirectoryConfigSlice flattens the contents of ConnectionGithubEnterpriseConfigServiceDirectoryConfig from a JSON +// response object. +func flattenConnectionGithubEnterpriseConfigServiceDirectoryConfigSlice(c *Client, i interface{}, res *Connection) []ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionGithubEnterpriseConfigServiceDirectoryConfig{} + } + + if len(a) == 0 { + return []ConnectionGithubEnterpriseConfigServiceDirectoryConfig{} + } + + items := make([]ConnectionGithubEnterpriseConfigServiceDirectoryConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionGithubEnterpriseConfigServiceDirectoryConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionGithubEnterpriseConfigServiceDirectoryConfig expands an instance of ConnectionGithubEnterpriseConfigServiceDirectoryConfig into a JSON +// request object. +func expandConnectionGithubEnterpriseConfigServiceDirectoryConfig(c *Client, f *ConnectionGithubEnterpriseConfigServiceDirectoryConfig, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + + return m, nil +} + +// flattenConnectionGithubEnterpriseConfigServiceDirectoryConfig flattens an instance of ConnectionGithubEnterpriseConfigServiceDirectoryConfig from a JSON +// response object. +func flattenConnectionGithubEnterpriseConfigServiceDirectoryConfig(c *Client, i interface{}, res *Connection) *ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionGithubEnterpriseConfigServiceDirectoryConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionGithubEnterpriseConfigServiceDirectoryConfig + } + r.Service = dcl.FlattenString(m["service"]) + + return r +} + +// expandConnectionGitlabConfigMap expands the contents of ConnectionGitlabConfig into a JSON +// request object. +func expandConnectionGitlabConfigMap(c *Client, f map[string]ConnectionGitlabConfig, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionGitlabConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionGitlabConfigSlice expands the contents of ConnectionGitlabConfig into a JSON +// request object. +func expandConnectionGitlabConfigSlice(c *Client, f []ConnectionGitlabConfig, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionGitlabConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionGitlabConfigMap flattens the contents of ConnectionGitlabConfig from a JSON +// response object. +func flattenConnectionGitlabConfigMap(c *Client, i interface{}, res *Connection) map[string]ConnectionGitlabConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionGitlabConfig{} + } + + if len(a) == 0 { + return map[string]ConnectionGitlabConfig{} + } + + items := make(map[string]ConnectionGitlabConfig) + for k, item := range a { + items[k] = *flattenConnectionGitlabConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionGitlabConfigSlice flattens the contents of ConnectionGitlabConfig from a JSON +// response object. +func flattenConnectionGitlabConfigSlice(c *Client, i interface{}, res *Connection) []ConnectionGitlabConfig { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionGitlabConfig{} + } + + if len(a) == 0 { + return []ConnectionGitlabConfig{} + } + + items := make([]ConnectionGitlabConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionGitlabConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionGitlabConfig expands an instance of ConnectionGitlabConfig into a JSON +// request object. +func expandConnectionGitlabConfig(c *Client, f *ConnectionGitlabConfig, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HostUri; !dcl.IsEmptyValueIndirect(v) { + m["hostUri"] = v + } + if v := f.WebhookSecretSecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["webhookSecretSecretVersion"] = v + } + if v, err := expandConnectionGitlabConfigReadAuthorizerCredential(c, f.ReadAuthorizerCredential, res); err != nil { + return nil, fmt.Errorf("error expanding ReadAuthorizerCredential into readAuthorizerCredential: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["readAuthorizerCredential"] = v + } + if v, err := expandConnectionGitlabConfigAuthorizerCredential(c, f.AuthorizerCredential, res); err != nil { + return nil, fmt.Errorf("error expanding AuthorizerCredential into authorizerCredential: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["authorizerCredential"] = v + } + if v, err := expandConnectionGitlabConfigServiceDirectoryConfig(c, f.ServiceDirectoryConfig, res); err != nil { + return nil, fmt.Errorf("error expanding ServiceDirectoryConfig into serviceDirectoryConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serviceDirectoryConfig"] = v + } + if v := f.SslCa; !dcl.IsEmptyValueIndirect(v) { + m["sslCa"] = v + } + + return m, nil +} + +// flattenConnectionGitlabConfig flattens an instance of ConnectionGitlabConfig from a JSON +// response object. +func flattenConnectionGitlabConfig(c *Client, i interface{}, res *Connection) *ConnectionGitlabConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionGitlabConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionGitlabConfig + } + r.HostUri = dcl.FlattenString(m["hostUri"]) + r.WebhookSecretSecretVersion = dcl.FlattenString(m["webhookSecretSecretVersion"]) + r.ReadAuthorizerCredential = flattenConnectionGitlabConfigReadAuthorizerCredential(c, m["readAuthorizerCredential"], res) + r.AuthorizerCredential = flattenConnectionGitlabConfigAuthorizerCredential(c, m["authorizerCredential"], res) + r.ServiceDirectoryConfig = flattenConnectionGitlabConfigServiceDirectoryConfig(c, m["serviceDirectoryConfig"], res) + r.SslCa = dcl.FlattenString(m["sslCa"]) + r.ServerVersion = dcl.FlattenString(m["serverVersion"]) + + return r +} + +// expandConnectionGitlabConfigReadAuthorizerCredentialMap expands the contents of ConnectionGitlabConfigReadAuthorizerCredential into a JSON +// request object. +func expandConnectionGitlabConfigReadAuthorizerCredentialMap(c *Client, f map[string]ConnectionGitlabConfigReadAuthorizerCredential, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionGitlabConfigReadAuthorizerCredential(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionGitlabConfigReadAuthorizerCredentialSlice expands the contents of ConnectionGitlabConfigReadAuthorizerCredential into a JSON +// request object. +func expandConnectionGitlabConfigReadAuthorizerCredentialSlice(c *Client, f []ConnectionGitlabConfigReadAuthorizerCredential, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionGitlabConfigReadAuthorizerCredential(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionGitlabConfigReadAuthorizerCredentialMap flattens the contents of ConnectionGitlabConfigReadAuthorizerCredential from a JSON +// response object. +func flattenConnectionGitlabConfigReadAuthorizerCredentialMap(c *Client, i interface{}, res *Connection) map[string]ConnectionGitlabConfigReadAuthorizerCredential { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionGitlabConfigReadAuthorizerCredential{} + } + + if len(a) == 0 { + return map[string]ConnectionGitlabConfigReadAuthorizerCredential{} + } + + items := make(map[string]ConnectionGitlabConfigReadAuthorizerCredential) + for k, item := range a { + items[k] = *flattenConnectionGitlabConfigReadAuthorizerCredential(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionGitlabConfigReadAuthorizerCredentialSlice flattens the contents of ConnectionGitlabConfigReadAuthorizerCredential from a JSON +// response object. +func flattenConnectionGitlabConfigReadAuthorizerCredentialSlice(c *Client, i interface{}, res *Connection) []ConnectionGitlabConfigReadAuthorizerCredential { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionGitlabConfigReadAuthorizerCredential{} + } + + if len(a) == 0 { + return []ConnectionGitlabConfigReadAuthorizerCredential{} + } + + items := make([]ConnectionGitlabConfigReadAuthorizerCredential, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionGitlabConfigReadAuthorizerCredential(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionGitlabConfigReadAuthorizerCredential expands an instance of ConnectionGitlabConfigReadAuthorizerCredential into a JSON +// request object. +func expandConnectionGitlabConfigReadAuthorizerCredential(c *Client, f *ConnectionGitlabConfigReadAuthorizerCredential, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.UserTokenSecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["userTokenSecretVersion"] = v + } + + return m, nil +} + +// flattenConnectionGitlabConfigReadAuthorizerCredential flattens an instance of ConnectionGitlabConfigReadAuthorizerCredential from a JSON +// response object. +func flattenConnectionGitlabConfigReadAuthorizerCredential(c *Client, i interface{}, res *Connection) *ConnectionGitlabConfigReadAuthorizerCredential { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionGitlabConfigReadAuthorizerCredential{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionGitlabConfigReadAuthorizerCredential + } + r.UserTokenSecretVersion = dcl.FlattenString(m["userTokenSecretVersion"]) + r.Username = dcl.FlattenString(m["username"]) + + return r +} + +// expandConnectionGitlabConfigAuthorizerCredentialMap expands the contents of ConnectionGitlabConfigAuthorizerCredential into a JSON +// request object. +func expandConnectionGitlabConfigAuthorizerCredentialMap(c *Client, f map[string]ConnectionGitlabConfigAuthorizerCredential, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionGitlabConfigAuthorizerCredential(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionGitlabConfigAuthorizerCredentialSlice expands the contents of ConnectionGitlabConfigAuthorizerCredential into a JSON +// request object. +func expandConnectionGitlabConfigAuthorizerCredentialSlice(c *Client, f []ConnectionGitlabConfigAuthorizerCredential, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionGitlabConfigAuthorizerCredential(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionGitlabConfigAuthorizerCredentialMap flattens the contents of ConnectionGitlabConfigAuthorizerCredential from a JSON +// response object. +func flattenConnectionGitlabConfigAuthorizerCredentialMap(c *Client, i interface{}, res *Connection) map[string]ConnectionGitlabConfigAuthorizerCredential { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionGitlabConfigAuthorizerCredential{} + } + + if len(a) == 0 { + return map[string]ConnectionGitlabConfigAuthorizerCredential{} + } + + items := make(map[string]ConnectionGitlabConfigAuthorizerCredential) + for k, item := range a { + items[k] = *flattenConnectionGitlabConfigAuthorizerCredential(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionGitlabConfigAuthorizerCredentialSlice flattens the contents of ConnectionGitlabConfigAuthorizerCredential from a JSON +// response object. +func flattenConnectionGitlabConfigAuthorizerCredentialSlice(c *Client, i interface{}, res *Connection) []ConnectionGitlabConfigAuthorizerCredential { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionGitlabConfigAuthorizerCredential{} + } + + if len(a) == 0 { + return []ConnectionGitlabConfigAuthorizerCredential{} + } + + items := make([]ConnectionGitlabConfigAuthorizerCredential, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionGitlabConfigAuthorizerCredential(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionGitlabConfigAuthorizerCredential expands an instance of ConnectionGitlabConfigAuthorizerCredential into a JSON +// request object. +func expandConnectionGitlabConfigAuthorizerCredential(c *Client, f *ConnectionGitlabConfigAuthorizerCredential, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.UserTokenSecretVersion; !dcl.IsEmptyValueIndirect(v) { + m["userTokenSecretVersion"] = v + } + + return m, nil +} + +// flattenConnectionGitlabConfigAuthorizerCredential flattens an instance of ConnectionGitlabConfigAuthorizerCredential from a JSON +// response object. +func flattenConnectionGitlabConfigAuthorizerCredential(c *Client, i interface{}, res *Connection) *ConnectionGitlabConfigAuthorizerCredential { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionGitlabConfigAuthorizerCredential{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionGitlabConfigAuthorizerCredential + } + r.UserTokenSecretVersion = dcl.FlattenString(m["userTokenSecretVersion"]) + r.Username = dcl.FlattenString(m["username"]) + + return r +} + +// expandConnectionGitlabConfigServiceDirectoryConfigMap expands the contents of ConnectionGitlabConfigServiceDirectoryConfig into a JSON +// request object. +func expandConnectionGitlabConfigServiceDirectoryConfigMap(c *Client, f map[string]ConnectionGitlabConfigServiceDirectoryConfig, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionGitlabConfigServiceDirectoryConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionGitlabConfigServiceDirectoryConfigSlice expands the contents of ConnectionGitlabConfigServiceDirectoryConfig into a JSON +// request object. +func expandConnectionGitlabConfigServiceDirectoryConfigSlice(c *Client, f []ConnectionGitlabConfigServiceDirectoryConfig, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionGitlabConfigServiceDirectoryConfig(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionGitlabConfigServiceDirectoryConfigMap flattens the contents of ConnectionGitlabConfigServiceDirectoryConfig from a JSON +// response object. +func flattenConnectionGitlabConfigServiceDirectoryConfigMap(c *Client, i interface{}, res *Connection) map[string]ConnectionGitlabConfigServiceDirectoryConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionGitlabConfigServiceDirectoryConfig{} + } + + if len(a) == 0 { + return map[string]ConnectionGitlabConfigServiceDirectoryConfig{} + } + + items := make(map[string]ConnectionGitlabConfigServiceDirectoryConfig) + for k, item := range a { + items[k] = *flattenConnectionGitlabConfigServiceDirectoryConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionGitlabConfigServiceDirectoryConfigSlice flattens the contents of ConnectionGitlabConfigServiceDirectoryConfig from a JSON +// response object. +func flattenConnectionGitlabConfigServiceDirectoryConfigSlice(c *Client, i interface{}, res *Connection) []ConnectionGitlabConfigServiceDirectoryConfig { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionGitlabConfigServiceDirectoryConfig{} + } + + if len(a) == 0 { + return []ConnectionGitlabConfigServiceDirectoryConfig{} + } + + items := make([]ConnectionGitlabConfigServiceDirectoryConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionGitlabConfigServiceDirectoryConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionGitlabConfigServiceDirectoryConfig expands an instance of ConnectionGitlabConfigServiceDirectoryConfig into a JSON +// request object. +func expandConnectionGitlabConfigServiceDirectoryConfig(c *Client, f *ConnectionGitlabConfigServiceDirectoryConfig, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + + return m, nil +} + +// flattenConnectionGitlabConfigServiceDirectoryConfig flattens an instance of ConnectionGitlabConfigServiceDirectoryConfig from a JSON +// response object. +func flattenConnectionGitlabConfigServiceDirectoryConfig(c *Client, i interface{}, res *Connection) *ConnectionGitlabConfigServiceDirectoryConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionGitlabConfigServiceDirectoryConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionGitlabConfigServiceDirectoryConfig + } + r.Service = dcl.FlattenString(m["service"]) + + return r +} + +// expandConnectionInstallationStateMap expands the contents of ConnectionInstallationState into a JSON +// request object. +func expandConnectionInstallationStateMap(c *Client, f map[string]ConnectionInstallationState, res *Connection) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandConnectionInstallationState(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandConnectionInstallationStateSlice expands the contents of ConnectionInstallationState into a JSON +// request object. +func expandConnectionInstallationStateSlice(c *Client, f []ConnectionInstallationState, res *Connection) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandConnectionInstallationState(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenConnectionInstallationStateMap flattens the contents of ConnectionInstallationState from a JSON +// response object. +func flattenConnectionInstallationStateMap(c *Client, i interface{}, res *Connection) map[string]ConnectionInstallationState { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionInstallationState{} + } + + if len(a) == 0 { + return map[string]ConnectionInstallationState{} + } + + items := make(map[string]ConnectionInstallationState) + for k, item := range a { + items[k] = *flattenConnectionInstallationState(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenConnectionInstallationStateSlice flattens the contents of ConnectionInstallationState from a JSON +// response object. +func flattenConnectionInstallationStateSlice(c *Client, i interface{}, res *Connection) []ConnectionInstallationState { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionInstallationState{} + } + + if len(a) == 0 { + return []ConnectionInstallationState{} + } + + items := make([]ConnectionInstallationState, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionInstallationState(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandConnectionInstallationState expands an instance of ConnectionInstallationState into a JSON +// request object. +func expandConnectionInstallationState(c *Client, f *ConnectionInstallationState, res *Connection) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + + return m, nil +} + +// flattenConnectionInstallationState flattens an instance of ConnectionInstallationState from a JSON +// response object. +func flattenConnectionInstallationState(c *Client, i interface{}, res *Connection) *ConnectionInstallationState { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &ConnectionInstallationState{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyConnectionInstallationState + } + r.Stage = flattenConnectionInstallationStateStageEnum(m["stage"]) + r.Message = dcl.FlattenString(m["message"]) + r.ActionUri = dcl.FlattenString(m["actionUri"]) + + return r +} + +// flattenConnectionInstallationStateStageEnumMap flattens the contents of ConnectionInstallationStateStageEnum from a JSON +// response object. +func flattenConnectionInstallationStateStageEnumMap(c *Client, i interface{}, res *Connection) map[string]ConnectionInstallationStateStageEnum { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]ConnectionInstallationStateStageEnum{} + } + + if len(a) == 0 { + return map[string]ConnectionInstallationStateStageEnum{} + } + + items := make(map[string]ConnectionInstallationStateStageEnum) + for k, item := range a { + items[k] = *flattenConnectionInstallationStateStageEnum(item.(interface{})) + } + + return items +} + +// flattenConnectionInstallationStateStageEnumSlice flattens the contents of ConnectionInstallationStateStageEnum from a JSON +// response object. +func flattenConnectionInstallationStateStageEnumSlice(c *Client, i interface{}, res *Connection) []ConnectionInstallationStateStageEnum { + a, ok := i.([]interface{}) + if !ok { + return []ConnectionInstallationStateStageEnum{} + } + + if len(a) == 0 { + return []ConnectionInstallationStateStageEnum{} + } + + items := make([]ConnectionInstallationStateStageEnum, 0, len(a)) + for _, item := range a { + items = append(items, *flattenConnectionInstallationStateStageEnum(item.(interface{}))) + } + + return items +} + +// flattenConnectionInstallationStateStageEnum asserts that an interface is a string, and returns a +// pointer to a *ConnectionInstallationStateStageEnum with the same value as that string. +func flattenConnectionInstallationStateStageEnum(i interface{}) *ConnectionInstallationStateStageEnum { + s, ok := i.(string) + if !ok { + return nil + } + + return ConnectionInstallationStateStageEnumRef(s) +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Connection) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalConnection(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type connectionDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp connectionApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToConnectionDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]connectionDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []connectionDiff + // For each operation name, create a connectionDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := connectionDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToConnectionApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToConnectionApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (connectionApiOperation, error) { + switch opName { + + case "updateConnectionUpdateConnectionOperation": + return &updateConnectionUpdateConnectionOperation{FieldDiffs: fieldDiffs}, nil + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractConnectionFields(r *Connection) error { + vGithubConfig := r.GithubConfig + if vGithubConfig == nil { + // note: explicitly not the empty object. + vGithubConfig = &ConnectionGithubConfig{} + } + if err := extractConnectionGithubConfigFields(r, vGithubConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGithubConfig) { + r.GithubConfig = vGithubConfig + } + vGithubEnterpriseConfig := r.GithubEnterpriseConfig + if vGithubEnterpriseConfig == nil { + // note: explicitly not the empty object. + vGithubEnterpriseConfig = &ConnectionGithubEnterpriseConfig{} + } + if err := extractConnectionGithubEnterpriseConfigFields(r, vGithubEnterpriseConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGithubEnterpriseConfig) { + r.GithubEnterpriseConfig = vGithubEnterpriseConfig + } + vGitlabConfig := r.GitlabConfig + if vGitlabConfig == nil { + // note: explicitly not the empty object. + vGitlabConfig = &ConnectionGitlabConfig{} + } + if err := extractConnectionGitlabConfigFields(r, vGitlabConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGitlabConfig) { + r.GitlabConfig = vGitlabConfig + } + vInstallationState := r.InstallationState + if vInstallationState == nil { + // note: explicitly not the empty object. + vInstallationState = &ConnectionInstallationState{} + } + if err := extractConnectionInstallationStateFields(r, vInstallationState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstallationState) { + r.InstallationState = vInstallationState + } + return nil +} +func extractConnectionGithubConfigFields(r *Connection, o *ConnectionGithubConfig) error { + vAuthorizerCredential := o.AuthorizerCredential + if vAuthorizerCredential == nil { + // note: explicitly not the empty object. + vAuthorizerCredential = &ConnectionGithubConfigAuthorizerCredential{} + } + if err := extractConnectionGithubConfigAuthorizerCredentialFields(r, vAuthorizerCredential); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorizerCredential) { + o.AuthorizerCredential = vAuthorizerCredential + } + return nil +} +func extractConnectionGithubConfigAuthorizerCredentialFields(r *Connection, o *ConnectionGithubConfigAuthorizerCredential) error { + return nil +} +func extractConnectionGithubEnterpriseConfigFields(r *Connection, o *ConnectionGithubEnterpriseConfig) error { + vServiceDirectoryConfig := o.ServiceDirectoryConfig + if vServiceDirectoryConfig == nil { + // note: explicitly not the empty object. + vServiceDirectoryConfig = &ConnectionGithubEnterpriseConfigServiceDirectoryConfig{} + } + if err := extractConnectionGithubEnterpriseConfigServiceDirectoryConfigFields(r, vServiceDirectoryConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceDirectoryConfig) { + o.ServiceDirectoryConfig = vServiceDirectoryConfig + } + return nil +} +func extractConnectionGithubEnterpriseConfigServiceDirectoryConfigFields(r *Connection, o *ConnectionGithubEnterpriseConfigServiceDirectoryConfig) error { + return nil +} +func extractConnectionGitlabConfigFields(r *Connection, o *ConnectionGitlabConfig) error { + vReadAuthorizerCredential := o.ReadAuthorizerCredential + if vReadAuthorizerCredential == nil { + // note: explicitly not the empty object. + vReadAuthorizerCredential = &ConnectionGitlabConfigReadAuthorizerCredential{} + } + if err := extractConnectionGitlabConfigReadAuthorizerCredentialFields(r, vReadAuthorizerCredential); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vReadAuthorizerCredential) { + o.ReadAuthorizerCredential = vReadAuthorizerCredential + } + vAuthorizerCredential := o.AuthorizerCredential + if vAuthorizerCredential == nil { + // note: explicitly not the empty object. + vAuthorizerCredential = &ConnectionGitlabConfigAuthorizerCredential{} + } + if err := extractConnectionGitlabConfigAuthorizerCredentialFields(r, vAuthorizerCredential); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorizerCredential) { + o.AuthorizerCredential = vAuthorizerCredential + } + vServiceDirectoryConfig := o.ServiceDirectoryConfig + if vServiceDirectoryConfig == nil { + // note: explicitly not the empty object. + vServiceDirectoryConfig = &ConnectionGitlabConfigServiceDirectoryConfig{} + } + if err := extractConnectionGitlabConfigServiceDirectoryConfigFields(r, vServiceDirectoryConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceDirectoryConfig) { + o.ServiceDirectoryConfig = vServiceDirectoryConfig + } + return nil +} +func extractConnectionGitlabConfigReadAuthorizerCredentialFields(r *Connection, o *ConnectionGitlabConfigReadAuthorizerCredential) error { + return nil +} +func extractConnectionGitlabConfigAuthorizerCredentialFields(r *Connection, o *ConnectionGitlabConfigAuthorizerCredential) error { + return nil +} +func extractConnectionGitlabConfigServiceDirectoryConfigFields(r *Connection, o *ConnectionGitlabConfigServiceDirectoryConfig) error { + return nil +} +func extractConnectionInstallationStateFields(r *Connection, o *ConnectionInstallationState) error { + return nil +} + +func postReadExtractConnectionFields(r *Connection) error { + vGithubConfig := r.GithubConfig + if vGithubConfig == nil { + // note: explicitly not the empty object. + vGithubConfig = &ConnectionGithubConfig{} + } + if err := postReadExtractConnectionGithubConfigFields(r, vGithubConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGithubConfig) { + r.GithubConfig = vGithubConfig + } + vGithubEnterpriseConfig := r.GithubEnterpriseConfig + if vGithubEnterpriseConfig == nil { + // note: explicitly not the empty object. + vGithubEnterpriseConfig = &ConnectionGithubEnterpriseConfig{} + } + if err := postReadExtractConnectionGithubEnterpriseConfigFields(r, vGithubEnterpriseConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGithubEnterpriseConfig) { + r.GithubEnterpriseConfig = vGithubEnterpriseConfig + } + vGitlabConfig := r.GitlabConfig + if vGitlabConfig == nil { + // note: explicitly not the empty object. + vGitlabConfig = &ConnectionGitlabConfig{} + } + if err := postReadExtractConnectionGitlabConfigFields(r, vGitlabConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGitlabConfig) { + r.GitlabConfig = vGitlabConfig + } + vInstallationState := r.InstallationState + if vInstallationState == nil { + // note: explicitly not the empty object. + vInstallationState = &ConnectionInstallationState{} + } + if err := postReadExtractConnectionInstallationStateFields(r, vInstallationState); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vInstallationState) { + r.InstallationState = vInstallationState + } + return nil +} +func postReadExtractConnectionGithubConfigFields(r *Connection, o *ConnectionGithubConfig) error { + vAuthorizerCredential := o.AuthorizerCredential + if vAuthorizerCredential == nil { + // note: explicitly not the empty object. + vAuthorizerCredential = &ConnectionGithubConfigAuthorizerCredential{} + } + if err := extractConnectionGithubConfigAuthorizerCredentialFields(r, vAuthorizerCredential); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorizerCredential) { + o.AuthorizerCredential = vAuthorizerCredential + } + return nil +} +func postReadExtractConnectionGithubConfigAuthorizerCredentialFields(r *Connection, o *ConnectionGithubConfigAuthorizerCredential) error { + return nil +} +func postReadExtractConnectionGithubEnterpriseConfigFields(r *Connection, o *ConnectionGithubEnterpriseConfig) error { + vServiceDirectoryConfig := o.ServiceDirectoryConfig + if vServiceDirectoryConfig == nil { + // note: explicitly not the empty object. + vServiceDirectoryConfig = &ConnectionGithubEnterpriseConfigServiceDirectoryConfig{} + } + if err := extractConnectionGithubEnterpriseConfigServiceDirectoryConfigFields(r, vServiceDirectoryConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceDirectoryConfig) { + o.ServiceDirectoryConfig = vServiceDirectoryConfig + } + return nil +} +func postReadExtractConnectionGithubEnterpriseConfigServiceDirectoryConfigFields(r *Connection, o *ConnectionGithubEnterpriseConfigServiceDirectoryConfig) error { + return nil +} +func postReadExtractConnectionGitlabConfigFields(r *Connection, o *ConnectionGitlabConfig) error { + vReadAuthorizerCredential := o.ReadAuthorizerCredential + if vReadAuthorizerCredential == nil { + // note: explicitly not the empty object. + vReadAuthorizerCredential = &ConnectionGitlabConfigReadAuthorizerCredential{} + } + if err := extractConnectionGitlabConfigReadAuthorizerCredentialFields(r, vReadAuthorizerCredential); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vReadAuthorizerCredential) { + o.ReadAuthorizerCredential = vReadAuthorizerCredential + } + vAuthorizerCredential := o.AuthorizerCredential + if vAuthorizerCredential == nil { + // note: explicitly not the empty object. + vAuthorizerCredential = &ConnectionGitlabConfigAuthorizerCredential{} + } + if err := extractConnectionGitlabConfigAuthorizerCredentialFields(r, vAuthorizerCredential); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vAuthorizerCredential) { + o.AuthorizerCredential = vAuthorizerCredential + } + vServiceDirectoryConfig := o.ServiceDirectoryConfig + if vServiceDirectoryConfig == nil { + // note: explicitly not the empty object. + vServiceDirectoryConfig = &ConnectionGitlabConfigServiceDirectoryConfig{} + } + if err := extractConnectionGitlabConfigServiceDirectoryConfigFields(r, vServiceDirectoryConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceDirectoryConfig) { + o.ServiceDirectoryConfig = vServiceDirectoryConfig + } + return nil +} +func postReadExtractConnectionGitlabConfigReadAuthorizerCredentialFields(r *Connection, o *ConnectionGitlabConfigReadAuthorizerCredential) error { + return nil +} +func postReadExtractConnectionGitlabConfigAuthorizerCredentialFields(r *Connection, o *ConnectionGitlabConfigAuthorizerCredential) error { + return nil +} +func postReadExtractConnectionGitlabConfigServiceDirectoryConfigFields(r *Connection, o *ConnectionGitlabConfigServiceDirectoryConfig) error { + return nil +} +func postReadExtractConnectionInstallationStateFields(r *Connection, o *ConnectionInstallationState) error { + return nil +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_schema.go new file mode 100644 index 0000000000..ed3e362dd7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_schema.go @@ -0,0 +1,477 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package cloudbuildv2 + +import ( + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +) + +func DCLConnectionSchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Cloudbuildv2/Connection", + Description: "The Cloudbuildv2 Connection resource", + StructName: "Connection", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Connection", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "connection", + Required: true, + Description: "A full instance of a Connection", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Connection", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "connection", + Required: true, + Description: "A full instance of a Connection", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Connection", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "connection", + Required: true, + Description: "A full instance of a Connection", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Connection", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Connection", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Connection": &dcl.Component{ + Title: "Connection", + ID: "projects/{{project}}/locations/{{location}}/connections/{{name}}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "project", + "location", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "Allows clients to store small amounts of arbitrary data.", + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. Server assigned timestamp for when the connection was created.", + Immutable: true, + }, + "disabled": &dcl.Property{ + Type: "boolean", + GoName: "Disabled", + Description: "If disabled is set to true, functionality is disabled for this connection. Repository based API methods and webhooks processing for repositories in this connection will be disabled.", + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "githubConfig": &dcl.Property{ + Type: "object", + GoName: "GithubConfig", + GoType: "ConnectionGithubConfig", + Description: "Configuration for connections to github.com.", + Conflicts: []string{ + "githubEnterpriseConfig", + "gitlabConfig", + }, + Properties: map[string]*dcl.Property{ + "appInstallationId": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AppInstallationId", + Description: "GitHub App installation id.", + }, + "authorizerCredential": &dcl.Property{ + Type: "object", + GoName: "AuthorizerCredential", + GoType: "ConnectionGithubConfigAuthorizerCredential", + Description: "OAuth credential of the account that authorized the Cloud Build GitHub App. It is recommended to use a robot account instead of a human user account. The OAuth token must be tied to the Cloud Build GitHub App.", + Properties: map[string]*dcl.Property{ + "oauthTokenSecretVersion": &dcl.Property{ + Type: "string", + GoName: "OAuthTokenSecretVersion", + Description: "A SecretManager resource containing the OAuth token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Secretmanager/SecretVersion", + Field: "selfLink", + }, + }, + }, + "username": &dcl.Property{ + Type: "string", + GoName: "Username", + ReadOnly: true, + Description: "Output only. The username associated to this token.", + }, + }, + }, + }, + }, + "githubEnterpriseConfig": &dcl.Property{ + Type: "object", + GoName: "GithubEnterpriseConfig", + GoType: "ConnectionGithubEnterpriseConfig", + Description: "Configuration for connections to an instance of GitHub Enterprise.", + Conflicts: []string{ + "githubConfig", + "gitlabConfig", + }, + Required: []string{ + "hostUri", + }, + Properties: map[string]*dcl.Property{ + "appId": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AppId", + Description: "Id of the GitHub App created from the manifest.", + }, + "appInstallationId": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "AppInstallationId", + Description: "ID of the installation of the GitHub App.", + }, + "appSlug": &dcl.Property{ + Type: "string", + GoName: "AppSlug", + Description: "The URL-friendly name of the GitHub App.", + }, + "hostUri": &dcl.Property{ + Type: "string", + GoName: "HostUri", + Description: "Required. The URI of the GitHub Enterprise host this connection is for.", + }, + "privateKeySecretVersion": &dcl.Property{ + Type: "string", + GoName: "PrivateKeySecretVersion", + Description: "SecretManager resource containing the private key of the GitHub App, formatted as `projects/*/secrets/*/versions/*`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Secretmanager/SecretVersion", + Field: "selfLink", + }, + }, + }, + "serviceDirectoryConfig": &dcl.Property{ + Type: "object", + GoName: "ServiceDirectoryConfig", + GoType: "ConnectionGithubEnterpriseConfigServiceDirectoryConfig", + Description: "Configuration for using Service Directory to privately connect to a GitHub Enterprise server. This should only be set if the GitHub Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitHub Enterprise server will be made over the public internet.", + Required: []string{ + "service", + }, + Properties: map[string]*dcl.Property{ + "service": &dcl.Property{ + Type: "string", + GoName: "Service", + Description: "Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Servicedirectory/Service", + Field: "selfLink", + }, + }, + }, + }, + }, + "sslCa": &dcl.Property{ + Type: "string", + GoName: "SslCa", + Description: "SSL certificate to use for requests to GitHub Enterprise.", + }, + "webhookSecretSecretVersion": &dcl.Property{ + Type: "string", + GoName: "WebhookSecretSecretVersion", + Description: "SecretManager resource containing the webhook secret of the GitHub App, formatted as `projects/*/secrets/*/versions/*`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Secretmanager/SecretVersion", + Field: "selfLink", + }, + }, + }, + }, + }, + "gitlabConfig": &dcl.Property{ + Type: "object", + GoName: "GitlabConfig", + GoType: "ConnectionGitlabConfig", + Description: "Configuration for connections to gitlab.com or an instance of GitLab Enterprise.", + Conflicts: []string{ + "githubConfig", + "githubEnterpriseConfig", + }, + Required: []string{ + "webhookSecretSecretVersion", + "readAuthorizerCredential", + "authorizerCredential", + }, + Properties: map[string]*dcl.Property{ + "authorizerCredential": &dcl.Property{ + Type: "object", + GoName: "AuthorizerCredential", + GoType: "ConnectionGitlabConfigAuthorizerCredential", + Description: "Required. A GitLab personal access token with the `api` scope access.", + Required: []string{ + "userTokenSecretVersion", + }, + Properties: map[string]*dcl.Property{ + "userTokenSecretVersion": &dcl.Property{ + Type: "string", + GoName: "UserTokenSecretVersion", + Description: "Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Secretmanager/SecretVersion", + Field: "selfLink", + }, + }, + }, + "username": &dcl.Property{ + Type: "string", + GoName: "Username", + ReadOnly: true, + Description: "Output only. The username associated to this token.", + }, + }, + }, + "hostUri": &dcl.Property{ + Type: "string", + GoName: "HostUri", + Description: "The URI of the GitLab Enterprise host this connection is for. If not specified, the default value is https://gitlab.com.", + ServerDefault: true, + }, + "readAuthorizerCredential": &dcl.Property{ + Type: "object", + GoName: "ReadAuthorizerCredential", + GoType: "ConnectionGitlabConfigReadAuthorizerCredential", + Description: "Required. A GitLab personal access token with the minimum `read_api` scope access.", + Required: []string{ + "userTokenSecretVersion", + }, + Properties: map[string]*dcl.Property{ + "userTokenSecretVersion": &dcl.Property{ + Type: "string", + GoName: "UserTokenSecretVersion", + Description: "Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Secretmanager/SecretVersion", + Field: "selfLink", + }, + }, + }, + "username": &dcl.Property{ + Type: "string", + GoName: "Username", + ReadOnly: true, + Description: "Output only. The username associated to this token.", + }, + }, + }, + "serverVersion": &dcl.Property{ + Type: "string", + GoName: "ServerVersion", + ReadOnly: true, + Description: "Output only. Version of the GitLab Enterprise server running on the `host_uri`.", + }, + "serviceDirectoryConfig": &dcl.Property{ + Type: "object", + GoName: "ServiceDirectoryConfig", + GoType: "ConnectionGitlabConfigServiceDirectoryConfig", + Description: "Configuration for using Service Directory to privately connect to a GitLab Enterprise server. This should only be set if the GitLab Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitLab Enterprise server will be made over the public internet.", + Required: []string{ + "service", + }, + Properties: map[string]*dcl.Property{ + "service": &dcl.Property{ + Type: "string", + GoName: "Service", + Description: "Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Servicedirectory/Service", + Field: "selfLink", + }, + }, + }, + }, + }, + "sslCa": &dcl.Property{ + Type: "string", + GoName: "SslCa", + Description: "SSL certificate to use for requests to GitLab Enterprise.", + }, + "webhookSecretSecretVersion": &dcl.Property{ + Type: "string", + GoName: "WebhookSecretSecretVersion", + Description: "Required. Immutable. SecretManager resource containing the webhook secret of a GitLab Enterprise project, formatted as `projects/*/secrets/*/versions/*`.", + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Secretmanager/SecretVersion", + Field: "selfLink", + }, + }, + }, + }, + }, + "installationState": &dcl.Property{ + Type: "object", + GoName: "InstallationState", + GoType: "ConnectionInstallationState", + ReadOnly: true, + Description: "Output only. Installation state of the Connection.", + Immutable: true, + Properties: map[string]*dcl.Property{ + "actionUri": &dcl.Property{ + Type: "string", + GoName: "ActionUri", + ReadOnly: true, + Description: "Output only. Link to follow for next action. Empty string if the installation is already complete.", + Immutable: true, + }, + "message": &dcl.Property{ + Type: "string", + GoName: "Message", + ReadOnly: true, + Description: "Output only. Message of what the user should do next to continue the installation. Empty string if the installation is already complete.", + Immutable: true, + }, + "stage": &dcl.Property{ + Type: "string", + GoName: "Stage", + GoType: "ConnectionInstallationStateStageEnum", + ReadOnly: true, + Description: "Output only. Current step of the installation process. Possible values: STAGE_UNSPECIFIED, PENDING_CREATE_APP, PENDING_USER_OAUTH, PENDING_INSTALL_APP, COMPLETE", + Immutable: true, + Enum: []string{ + "STAGE_UNSPECIFIED", + "PENDING_CREATE_APP", + "PENDING_USER_OAUTH", + "PENDING_INSTALL_APP", + "COMPLETE", + }, + }, + }, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Immutable. The resource name of the connection, in the format `projects/{project}/locations/{location}/connections/{connection_id}`.", + Immutable: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + }, + "reconciling": &dcl.Property{ + Type: "boolean", + GoName: "Reconciling", + ReadOnly: true, + Description: "Output only. Set to true when the connection is being set up or updated in the background.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. Server assigned timestamp for when the connection was updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_yaml_embed.go new file mode 100644 index 0000000000..fb91e75376 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/connection_yaml_embed.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// GENERATED BY gen_go_data.go +// gen_go_data -package cloudbuildv2 -var YAML_connection blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/cloudbuildv2/connection.yaml + +package cloudbuildv2 + +// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/cloudbuildv2/connection.yaml +var YAML_connection = []byte("info:\n title: Cloudbuildv2/Connection\n description: The Cloudbuildv2 Connection resource\n x-dcl-struct-name: Connection\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Connection\n parameters:\n - name: connection\n required: true\n description: A full instance of a Connection\n apply:\n description: The function used to apply information about a Connection\n parameters:\n - name: connection\n required: true\n description: A full instance of a Connection\n delete:\n description: The function used to delete a Connection\n parameters:\n - name: connection\n required: true\n description: A full instance of a Connection\n deleteAll:\n description: The function used to delete all Connection\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Connection\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Connection:\n title: Connection\n x-dcl-id: projects/{{project}}/locations/{{location}}/connections/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: Allows clients to store small amounts of arbitrary data.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Server assigned timestamp for when the connection\n was created.\n x-kubernetes-immutable: true\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: If disabled is set to true, functionality is disabled for this\n connection. Repository based API methods and webhooks processing for repositories\n in this connection will be disabled.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: This checksum is computed by the server based on the value\n of other fields, and may be sent on update and delete requests to ensure\n the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n githubConfig:\n type: object\n x-dcl-go-name: GithubConfig\n x-dcl-go-type: ConnectionGithubConfig\n description: Configuration for connections to github.com.\n x-dcl-conflicts:\n - githubEnterpriseConfig\n - gitlabConfig\n properties:\n appInstallationId:\n type: integer\n format: int64\n x-dcl-go-name: AppInstallationId\n description: GitHub App installation id.\n authorizerCredential:\n type: object\n x-dcl-go-name: AuthorizerCredential\n x-dcl-go-type: ConnectionGithubConfigAuthorizerCredential\n description: OAuth credential of the account that authorized the Cloud\n Build GitHub App. It is recommended to use a robot account instead\n of a human user account. The OAuth token must be tied to the Cloud\n Build GitHub App.\n properties:\n oauthTokenSecretVersion:\n type: string\n x-dcl-go-name: OAuthTokenSecretVersion\n description: 'A SecretManager resource containing the OAuth token\n that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.'\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: selfLink\n username:\n type: string\n x-dcl-go-name: Username\n readOnly: true\n description: Output only. The username associated to this token.\n githubEnterpriseConfig:\n type: object\n x-dcl-go-name: GithubEnterpriseConfig\n x-dcl-go-type: ConnectionGithubEnterpriseConfig\n description: Configuration for connections to an instance of GitHub Enterprise.\n x-dcl-conflicts:\n - githubConfig\n - gitlabConfig\n required:\n - hostUri\n properties:\n appId:\n type: integer\n format: int64\n x-dcl-go-name: AppId\n description: Id of the GitHub App created from the manifest.\n appInstallationId:\n type: integer\n format: int64\n x-dcl-go-name: AppInstallationId\n description: ID of the installation of the GitHub App.\n appSlug:\n type: string\n x-dcl-go-name: AppSlug\n description: The URL-friendly name of the GitHub App.\n hostUri:\n type: string\n x-dcl-go-name: HostUri\n description: Required. The URI of the GitHub Enterprise host this connection\n is for.\n privateKeySecretVersion:\n type: string\n x-dcl-go-name: PrivateKeySecretVersion\n description: SecretManager resource containing the private key of the\n GitHub App, formatted as `projects/*/secrets/*/versions/*`.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: selfLink\n serviceDirectoryConfig:\n type: object\n x-dcl-go-name: ServiceDirectoryConfig\n x-dcl-go-type: ConnectionGithubEnterpriseConfigServiceDirectoryConfig\n description: Configuration for using Service Directory to privately\n connect to a GitHub Enterprise server. This should only be set if\n the GitHub Enterprise server is hosted on-premises and not reachable\n by public internet. If this field is left empty, calls to the GitHub\n Enterprise server will be made over the public internet.\n required:\n - service\n properties:\n service:\n type: string\n x-dcl-go-name: Service\n description: 'Required. The Service Directory service name. Format:\n projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.'\n x-dcl-references:\n - resource: Servicedirectory/Service\n field: selfLink\n sslCa:\n type: string\n x-dcl-go-name: SslCa\n description: SSL certificate to use for requests to GitHub Enterprise.\n webhookSecretSecretVersion:\n type: string\n x-dcl-go-name: WebhookSecretSecretVersion\n description: SecretManager resource containing the webhook secret of\n the GitHub App, formatted as `projects/*/secrets/*/versions/*`.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: selfLink\n gitlabConfig:\n type: object\n x-dcl-go-name: GitlabConfig\n x-dcl-go-type: ConnectionGitlabConfig\n description: Configuration for connections to gitlab.com or an instance\n of GitLab Enterprise.\n x-dcl-conflicts:\n - githubConfig\n - githubEnterpriseConfig\n required:\n - webhookSecretSecretVersion\n - readAuthorizerCredential\n - authorizerCredential\n properties:\n authorizerCredential:\n type: object\n x-dcl-go-name: AuthorizerCredential\n x-dcl-go-type: ConnectionGitlabConfigAuthorizerCredential\n description: Required. A GitLab personal access token with the `api`\n scope access.\n required:\n - userTokenSecretVersion\n properties:\n userTokenSecretVersion:\n type: string\n x-dcl-go-name: UserTokenSecretVersion\n description: 'Required. A SecretManager resource containing the\n user token that authorizes the Cloud Build connection. Format:\n `projects/*/secrets/*/versions/*`.'\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: selfLink\n username:\n type: string\n x-dcl-go-name: Username\n readOnly: true\n description: Output only. The username associated to this token.\n hostUri:\n type: string\n x-dcl-go-name: HostUri\n description: The URI of the GitLab Enterprise host this connection is\n for. If not specified, the default value is https://gitlab.com.\n x-dcl-server-default: true\n readAuthorizerCredential:\n type: object\n x-dcl-go-name: ReadAuthorizerCredential\n x-dcl-go-type: ConnectionGitlabConfigReadAuthorizerCredential\n description: Required. A GitLab personal access token with the minimum\n `read_api` scope access.\n required:\n - userTokenSecretVersion\n properties:\n userTokenSecretVersion:\n type: string\n x-dcl-go-name: UserTokenSecretVersion\n description: 'Required. A SecretManager resource containing the\n user token that authorizes the Cloud Build connection. Format:\n `projects/*/secrets/*/versions/*`.'\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: selfLink\n username:\n type: string\n x-dcl-go-name: Username\n readOnly: true\n description: Output only. The username associated to this token.\n serverVersion:\n type: string\n x-dcl-go-name: ServerVersion\n readOnly: true\n description: Output only. Version of the GitLab Enterprise server running\n on the `host_uri`.\n serviceDirectoryConfig:\n type: object\n x-dcl-go-name: ServiceDirectoryConfig\n x-dcl-go-type: ConnectionGitlabConfigServiceDirectoryConfig\n description: Configuration for using Service Directory to privately\n connect to a GitLab Enterprise server. This should only be set if\n the GitLab Enterprise server is hosted on-premises and not reachable\n by public internet. If this field is left empty, calls to the GitLab\n Enterprise server will be made over the public internet.\n required:\n - service\n properties:\n service:\n type: string\n x-dcl-go-name: Service\n description: 'Required. The Service Directory service name. Format:\n projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.'\n x-dcl-references:\n - resource: Servicedirectory/Service\n field: selfLink\n sslCa:\n type: string\n x-dcl-go-name: SslCa\n description: SSL certificate to use for requests to GitLab Enterprise.\n webhookSecretSecretVersion:\n type: string\n x-dcl-go-name: WebhookSecretSecretVersion\n description: Required. Immutable. SecretManager resource containing\n the webhook secret of a GitLab Enterprise project, formatted as `projects/*/secrets/*/versions/*`.\n x-dcl-references:\n - resource: Secretmanager/SecretVersion\n field: selfLink\n installationState:\n type: object\n x-dcl-go-name: InstallationState\n x-dcl-go-type: ConnectionInstallationState\n readOnly: true\n description: Output only. Installation state of the Connection.\n x-kubernetes-immutable: true\n properties:\n actionUri:\n type: string\n x-dcl-go-name: ActionUri\n readOnly: true\n description: Output only. Link to follow for next action. Empty string\n if the installation is already complete.\n x-kubernetes-immutable: true\n message:\n type: string\n x-dcl-go-name: Message\n readOnly: true\n description: Output only. Message of what the user should do next to\n continue the installation. Empty string if the installation is already\n complete.\n x-kubernetes-immutable: true\n stage:\n type: string\n x-dcl-go-name: Stage\n x-dcl-go-type: ConnectionInstallationStateStageEnum\n readOnly: true\n description: 'Output only. Current step of the installation process.\n Possible values: STAGE_UNSPECIFIED, PENDING_CREATE_APP, PENDING_USER_OAUTH,\n PENDING_INSTALL_APP, COMPLETE'\n x-kubernetes-immutable: true\n enum:\n - STAGE_UNSPECIFIED\n - PENDING_CREATE_APP\n - PENDING_USER_OAUTH\n - PENDING_INSTALL_APP\n - COMPLETE\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Immutable. The resource name of the connection, in the format\n `projects/{project}/locations/{location}/connections/{connection_id}`.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. Set to true when the connection is being set up\n or updated in the background.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Server assigned timestamp for when the connection\n was updated.\n x-kubernetes-immutable: true\n") + +// 15422 bytes +// MD5: 8aeeed1345743eb8568528a5672af403 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository.go new file mode 100644 index 0000000000..02173626fc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository.go @@ -0,0 +1,379 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package cloudbuildv2 + +import ( + "context" + "fmt" + "time" + + "google.golang.org/api/googleapi" + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +) + +type Repository struct { + Name *string `json:"name"` + RemoteUri *string `json:"remoteUri"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + Annotations map[string]string `json:"annotations"` + Etag *string `json:"etag"` + Project *string `json:"project"` + Location *string `json:"location"` + Connection *string `json:"connection"` +} + +func (r *Repository) String() string { + return dcl.SprintResource(r) +} + +// Describe returns a simple description of this resource to ensure that automated tools +// can identify it. +func (r *Repository) Describe() dcl.ServiceTypeVersion { + return dcl.ServiceTypeVersion{ + Service: "cloudbuildv2", + Type: "Repository", + Version: "cloudbuildv2", + } +} + +func (r *Repository) ID() (string, error) { + if err := extractRepositoryFields(r); err != nil { + return "", err + } + nr := r.urlNormalized() + params := map[string]interface{}{ + "name": dcl.ValueOrEmptyString(nr.Name), + "remote_uri": dcl.ValueOrEmptyString(nr.RemoteUri), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "annotations": dcl.ValueOrEmptyString(nr.Annotations), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "connection": dcl.ValueOrEmptyString(nr.Connection), + } + return dcl.Nprintf("projects/{{project}}/locations/{{location}}/connections/{{connection}}/repositories/{{name}}", params), nil +} + +const RepositoryMaxPage = -1 + +type RepositoryList struct { + Items []*Repository + + nextToken string + + pageSize int32 + + resource *Repository +} + +func (l *RepositoryList) HasNext() bool { + return l.nextToken != "" +} + +func (l *RepositoryList) Next(ctx context.Context, c *Client) error { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if !l.HasNext() { + return fmt.Errorf("no next page") + } + items, token, err := c.listRepository(ctx, l.resource, l.nextToken, l.pageSize) + if err != nil { + return err + } + l.Items = items + l.nextToken = token + return err +} + +func (c *Client) ListRepository(ctx context.Context, project, location, connection string) (*RepositoryList, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + return c.ListRepositoryWithMaxResults(ctx, project, location, connection, RepositoryMaxPage) + +} + +func (c *Client) ListRepositoryWithMaxResults(ctx context.Context, project, location, connection string, pageSize int32) (*RepositoryList, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // Create a resource object so that we can use proper url normalization methods. + r := &Repository{ + Project: &project, + Location: &location, + Connection: &connection, + } + items, token, err := c.listRepository(ctx, r, "", pageSize) + if err != nil { + return nil, err + } + return &RepositoryList{ + Items: items, + nextToken: token, + pageSize: pageSize, + resource: r, + }, nil +} + +func (c *Client) GetRepository(ctx context.Context, r *Repository) (*Repository, error) { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + // This is *purposefully* supressing errors. + // This function is used with url-normalized values + not URL normalized values. + // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. + extractRepositoryFields(r) + + b, err := c.getRepositoryRaw(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + return nil, &googleapi.Error{ + Code: 404, + Message: err.Error(), + } + } + return nil, err + } + result, err := unmarshalRepository(b, c, r) + if err != nil { + return nil, err + } + result.Project = r.Project + result.Location = r.Location + result.Connection = r.Connection + result.Name = r.Name + + c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) + result, err = canonicalizeRepositoryNewState(c, result, r) + if err != nil { + return nil, err + } + if err := postReadExtractRepositoryFields(result); err != nil { + return result, err + } + c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) + + return result, nil +} + +func (c *Client) DeleteRepository(ctx context.Context, r *Repository) error { + ctx = dcl.ContextWithRequestID(ctx) + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + if r == nil { + return fmt.Errorf("Repository resource is nil") + } + c.Config.Logger.InfoWithContext(ctx, "Deleting Repository...") + deleteOp := deleteRepositoryOperation{} + return deleteOp.do(ctx, r, c) +} + +// DeleteAllRepository deletes all resources that the filter functions returns true on. +func (c *Client) DeleteAllRepository(ctx context.Context, project, location, connection string, filter func(*Repository) bool) error { + listObj, err := c.ListRepository(ctx, project, location, connection) + if err != nil { + return err + } + + err = c.deleteAllRepository(ctx, filter, listObj.Items) + if err != nil { + return err + } + for listObj.HasNext() { + err = listObj.Next(ctx, c) + if err != nil { + return nil + } + err = c.deleteAllRepository(ctx, filter, listObj.Items) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) ApplyRepository(ctx context.Context, rawDesired *Repository, opts ...dcl.ApplyOption) (*Repository, error) { + ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) + defer cancel() + + ctx = dcl.ContextWithRequestID(ctx) + var resultNewState *Repository + err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + newState, err := applyRepositoryHelper(c, ctx, rawDesired, opts...) + resultNewState = newState + if err != nil { + // If the error is 409, there is conflict in resource update. + // Here we want to apply changes based on latest state. + if dcl.IsConflictError(err) { + return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} + } + return nil, err + } + return nil, nil + }, c.Config.RetryProvider) + return resultNewState, err +} + +func applyRepositoryHelper(c *Client, ctx context.Context, rawDesired *Repository, opts ...dcl.ApplyOption) (*Repository, error) { + c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyRepository...") + c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) + + // 1.1: Validation of user-specified fields in desired state. + if err := rawDesired.validate(); err != nil { + return nil, err + } + + if err := extractRepositoryFields(rawDesired); err != nil { + return nil, err + } + + initial, desired, fieldDiffs, err := c.repositoryDiffsForRawDesired(ctx, rawDesired, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create a diff: %w", err) + } + + diffs, err := convertFieldDiffsToRepositoryDiffs(c.Config, fieldDiffs, opts) + if err != nil { + return nil, err + } + + // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). + + // 2.3: Lifecycle Directive Check + var create bool + lp := dcl.FetchLifecycleParams(opts) + if initial == nil { + if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} + } + create = true + } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), + } + } else { + for _, d := range diffs { + if d.RequiresRecreate { + return nil, dcl.ApplyInfeasibleError{ + Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), + } + } + if dcl.HasLifecycleParam(lp, dcl.BlockModification) { + return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} + } + } + } + + // 2.4 Imperative Request Planning + var ops []repositoryApiOperation + if create { + ops = append(ops, &createRepositoryOperation{}) + } else { + for _, d := range diffs { + ops = append(ops, d.UpdateOp) + } + } + c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) + + // 2.5 Request Actuation + for _, op := range ops { + c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) + if err := op.do(ctx, desired, c); err != nil { + c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) + return nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) + } + return applyRepositoryDiff(c, ctx, desired, rawDesired, ops, opts...) +} + +func applyRepositoryDiff(c *Client, ctx context.Context, desired *Repository, rawDesired *Repository, ops []repositoryApiOperation, opts ...dcl.ApplyOption) (*Repository, error) { + // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") + rawNew, err := c.GetRepository(ctx, desired) + if err != nil { + return nil, err + } + // Get additional values from the first response. + // These values should be merged into the newState above. + if len(ops) > 0 { + lastOp := ops[len(ops)-1] + if o, ok := lastOp.(*createRepositoryOperation); ok { + if r, hasR := o.FirstResponse(); hasR { + + c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") + + fullResp, err := unmarshalMapRepository(r, c, rawDesired) + if err != nil { + return nil, err + } + + rawNew, err = canonicalizeRepositoryNewState(c, rawNew, fullResp) + if err != nil { + return nil, err + } + } + } + } + + c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) + // 3.2b Canonicalization of raw new state using raw desired state + newState, err := canonicalizeRepositoryNewState(c, rawNew, rawDesired) + if err != nil { + return rawNew, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) + // 3.3 Comparison of the new state and raw desired state. + // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE + newDesired, err := canonicalizeRepositoryDesiredState(rawDesired, newState) + if err != nil { + return newState, err + } + + if err := postReadExtractRepositoryFields(newState); err != nil { + return newState, err + } + + // Need to ensure any transformations made here match acceptably in differ. + if err := postReadExtractRepositoryFields(newDesired); err != nil { + return newState, err + } + + c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) + newDiffs, err := diffRepository(c, newDesired, newState) + if err != nil { + return newState, err + } + + if len(newDiffs) == 0 { + c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") + } else { + c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) + diffMessages := make([]string, len(newDiffs)) + for i, d := range newDiffs { + diffMessages[i] = fmt.Sprintf("%v", d) + } + return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} + } + c.Config.Logger.InfoWithContext(ctx, "Done Apply.") + return newState, nil +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository.yaml new file mode 100644 index 0000000000..d634441f19 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository.yaml @@ -0,0 +1,152 @@ +# Copyright 2023 Google LLC. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +info: + title: Cloudbuildv2/Repository + description: The Cloudbuildv2 Repository resource + x-dcl-struct-name: Repository + x-dcl-has-iam: false +paths: + get: + description: The function used to get information about a Repository + parameters: + - name: repository + required: true + description: A full instance of a Repository + apply: + description: The function used to apply information about a Repository + parameters: + - name: repository + required: true + description: A full instance of a Repository + delete: + description: The function used to delete a Repository + parameters: + - name: repository + required: true + description: A full instance of a Repository + deleteAll: + description: The function used to delete all Repository + parameters: + - name: project + required: true + schema: + type: string + - name: location + required: true + schema: + type: string + - name: connection + required: true + schema: + type: string + list: + description: The function used to list information about many Repository + parameters: + - name: project + required: true + schema: + type: string + - name: location + required: true + schema: + type: string + - name: connection + required: true + schema: + type: string +components: + schemas: + Repository: + title: Repository + x-dcl-id: projects/{{project}}/locations/{{location}}/connections/{{connection}}/repositories/{{name}} + x-dcl-parent-container: project + x-dcl-has-create: true + x-dcl-has-iam: false + x-dcl-read-timeout: 0 + x-dcl-apply-timeout: 0 + x-dcl-delete-timeout: 0 + type: object + required: + - name + - remoteUri + - project + - location + - connection + properties: + annotations: + type: object + additionalProperties: + type: string + x-dcl-go-name: Annotations + description: Allows clients to store small amounts of arbitrary data. + x-kubernetes-immutable: true + connection: + type: string + x-dcl-go-name: Connection + description: The connection for the resource + x-kubernetes-immutable: true + x-dcl-references: + - resource: Cloudbuildv2/Connection + field: name + parent: true + createTime: + type: string + format: date-time + x-dcl-go-name: CreateTime + readOnly: true + description: Output only. Server assigned timestamp for when the connection + was created. + x-kubernetes-immutable: true + etag: + type: string + x-dcl-go-name: Etag + readOnly: true + description: This checksum is computed by the server based on the value + of other fields, and may be sent on update and delete requests to ensure + the client has an up-to-date value before proceeding. + x-kubernetes-immutable: true + location: + type: string + x-dcl-go-name: Location + description: The location for the resource + x-kubernetes-immutable: true + x-dcl-extract-if-empty: true + name: + type: string + x-dcl-go-name: Name + description: Name of the repository. + x-kubernetes-immutable: true + project: + type: string + x-dcl-go-name: Project + description: The project for the resource + x-kubernetes-immutable: true + x-dcl-references: + - resource: Cloudresourcemanager/Project + field: name + parent: true + x-dcl-extract-if-empty: true + remoteUri: + type: string + x-dcl-go-name: RemoteUri + description: Required. Git Clone HTTPS URI. + x-kubernetes-immutable: true + updateTime: + type: string + format: date-time + x-dcl-go-name: UpdateTime + readOnly: true + description: Output only. Server assigned timestamp for when the connection + was updated. + x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_internal.go new file mode 100644 index 0000000000..55fa43143e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_internal.go @@ -0,0 +1,763 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package cloudbuildv2 + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations" +) + +func (r *Repository) validate() error { + + if err := dcl.Required(r, "name"); err != nil { + return err + } + if err := dcl.Required(r, "remoteUri"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { + return err + } + if err := dcl.RequiredParameter(r.Connection, "Connection"); err != nil { + return err + } + return nil +} +func (r *Repository) basePath() string { + params := map[string]interface{}{} + return dcl.Nprintf("https://cloudbuild.googleapis.com/v2/", params) +} + +func (r *Repository) getURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "connection": dcl.ValueOrEmptyString(nr.Connection), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections/{{connection}}/repositories/{{name}}", nr.basePath(), userBasePath, params), nil +} + +func (r *Repository) listURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "connection": dcl.ValueOrEmptyString(nr.Connection), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections/{{connection}}/repositories", nr.basePath(), userBasePath, params), nil + +} + +func (r *Repository) createURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "connection": dcl.ValueOrEmptyString(nr.Connection), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections/{{connection}}/repositories?repositoryId={{name}}", nr.basePath(), userBasePath, params), nil + +} + +func (r *Repository) deleteURL(userBasePath string) (string, error) { + nr := r.urlNormalized() + params := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "connection": dcl.ValueOrEmptyString(nr.Connection), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/connections/{{connection}}/repositories/{{name}}", nr.basePath(), userBasePath, params), nil +} + +// repositoryApiOperation represents a mutable operation in the underlying REST +// API such as Create, Update, or Delete. +type repositoryApiOperation interface { + do(context.Context, *Repository, *Client) error +} + +func (c *Client) listRepositoryRaw(ctx context.Context, r *Repository, pageToken string, pageSize int32) ([]byte, error) { + u, err := r.urlNormalized().listURL(c.Config.BasePath) + if err != nil { + return nil, err + } + + m := make(map[string]string) + if pageToken != "" { + m["pageToken"] = pageToken + } + + if pageSize != RepositoryMaxPage { + m["pageSize"] = fmt.Sprintf("%v", pageSize) + } + + u, err = dcl.AddQueryParams(u, m) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + return ioutil.ReadAll(resp.Response.Body) +} + +type listRepositoryOperation struct { + Repositories []map[string]interface{} `json:"repositories"` + Token string `json:"nextPageToken"` +} + +func (c *Client) listRepository(ctx context.Context, r *Repository, pageToken string, pageSize int32) ([]*Repository, string, error) { + b, err := c.listRepositoryRaw(ctx, r, pageToken, pageSize) + if err != nil { + return nil, "", err + } + + var m listRepositoryOperation + if err := json.Unmarshal(b, &m); err != nil { + return nil, "", err + } + + var l []*Repository + for _, v := range m.Repositories { + res, err := unmarshalMapRepository(v, c, r) + if err != nil { + return nil, m.Token, err + } + res.Project = r.Project + res.Location = r.Location + res.Connection = r.Connection + l = append(l, res) + } + + return l, m.Token, nil +} + +func (c *Client) deleteAllRepository(ctx context.Context, f func(*Repository) bool, resources []*Repository) error { + var errors []string + for _, res := range resources { + if f(res) { + // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. + err := c.DeleteRepository(ctx, res) + if err != nil { + errors = append(errors, err.Error()) + } + } + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, "\n")) + } else { + return nil + } +} + +type deleteRepositoryOperation struct{} + +func (op *deleteRepositoryOperation) do(ctx context.Context, r *Repository, c *Client) error { + r, err := c.GetRepository(ctx, r) + if err != nil { + if dcl.IsNotFound(err) { + c.Config.Logger.InfoWithContextf(ctx, "Repository not found, returning. Original error: %v", err) + return nil + } + c.Config.Logger.WarningWithContextf(ctx, "GetRepository checking for existence. error: %v", err) + return err + } + + u, err := r.deleteURL(c.Config.BasePath) + if err != nil { + return err + } + + // Delete should never have a body + body := &bytes.Buffer{} + resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) + if err != nil { + return err + } + + // wait for object to be deleted. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + return err + } + + // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. + // This is the reason we are adding retry to handle that case. + retriesRemaining := 10 + dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { + _, err := c.GetRepository(ctx, r) + if dcl.IsNotFound(err) { + return nil, nil + } + if retriesRemaining > 0 { + retriesRemaining-- + return &dcl.RetryDetails{}, dcl.OperationNotDone{} + } + return nil, dcl.NotDeletedError{ExistingResource: r} + }, c.Config.RetryProvider) + return nil +} + +// Create operations are similar to Update operations, although they do not have +// specific request objects. The Create request object is the json encoding of +// the resource, which is modified by res.marshal to form the base request body. +type createRepositoryOperation struct { + response map[string]interface{} +} + +func (op *createRepositoryOperation) FirstResponse() (map[string]interface{}, bool) { + return op.response, len(op.response) > 0 +} + +func (op *createRepositoryOperation) do(ctx context.Context, r *Repository, c *Client) error { + c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) + u, err := r.createURL(c.Config.BasePath) + if err != nil { + return err + } + + req, err := r.marshal(c) + if err != nil { + return err + } + resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) + if err != nil { + return err + } + // wait for object to be created. + var o operations.StandardGCPOperation + if err := dcl.ParseResponse(resp.Response, &o); err != nil { + return err + } + if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { + c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) + return err + } + c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") + op.response, _ = o.FirstResponse() + + if _, err := c.GetRepository(ctx, r); err != nil { + c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) + return err + } + + return nil +} + +func (c *Client) getRepositoryRaw(ctx context.Context, r *Repository) ([]byte, error) { + + u, err := r.getURL(c.Config.BasePath) + if err != nil { + return nil, err + } + resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) + if err != nil { + return nil, err + } + defer resp.Response.Body.Close() + b, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + + return b, nil +} + +func (c *Client) repositoryDiffsForRawDesired(ctx context.Context, rawDesired *Repository, opts ...dcl.ApplyOption) (initial, desired *Repository, diffs []*dcl.FieldDiff, err error) { + c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") + // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. + var fetchState *Repository + if sh := dcl.FetchStateHint(opts); sh != nil { + if r, ok := sh.(*Repository); !ok { + c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Repository, got %T", sh) + } else { + fetchState = r + } + } + if fetchState == nil { + fetchState = rawDesired + } + + // 1.2: Retrieval of raw initial state from API + rawInitial, err := c.GetRepository(ctx, fetchState) + if rawInitial == nil { + if !dcl.IsNotFound(err) { + c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Repository resource already exists: %s", err) + return nil, nil, nil, fmt.Errorf("failed to retrieve Repository resource: %v", err) + } + c.Config.Logger.InfoWithContext(ctx, "Found that Repository resource did not exist.") + // Perform canonicalization to pick up defaults. + desired, err = canonicalizeRepositoryDesiredState(rawDesired, rawInitial) + return nil, desired, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Repository: %v", rawInitial) + c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Repository: %v", rawDesired) + + // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. + if err := extractRepositoryFields(rawInitial); err != nil { + return nil, nil, nil, err + } + + // 1.3: Canonicalize raw initial state into initial state. + initial, err = canonicalizeRepositoryInitialState(rawInitial, rawDesired) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Repository: %v", initial) + + // 1.4: Canonicalize raw desired state into desired state. + desired, err = canonicalizeRepositoryDesiredState(rawDesired, rawInitial, opts...) + if err != nil { + return nil, nil, nil, err + } + c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Repository: %v", desired) + + // 2.1: Comparison of initial and desired state. + diffs, err = diffRepository(c, desired, initial, opts...) + return initial, desired, diffs, err +} + +func canonicalizeRepositoryInitialState(rawInitial, rawDesired *Repository) (*Repository, error) { + // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. + return rawInitial, nil +} + +/* +* Canonicalizers +* +* These are responsible for converting either a user-specified config or a +* GCP API response to a standard format that can be used for difference checking. +* */ + +func canonicalizeRepositoryDesiredState(rawDesired, rawInitial *Repository, opts ...dcl.ApplyOption) (*Repository, error) { + + if rawInitial == nil { + // Since the initial state is empty, the desired state is all we have. + // We canonicalize the remaining nested objects with nil to pick up defaults. + + return rawDesired, nil + } + canonicalDesired := &Repository{} + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { + canonicalDesired.Name = rawInitial.Name + } else { + canonicalDesired.Name = rawDesired.Name + } + if dcl.StringCanonicalize(rawDesired.RemoteUri, rawInitial.RemoteUri) { + canonicalDesired.RemoteUri = rawInitial.RemoteUri + } else { + canonicalDesired.RemoteUri = rawDesired.RemoteUri + } + if dcl.IsZeroValue(rawDesired.Annotations) || (dcl.IsEmptyValueIndirect(rawDesired.Annotations) && dcl.IsEmptyValueIndirect(rawInitial.Annotations)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.Annotations = rawInitial.Annotations + } else { + canonicalDesired.Annotations = rawDesired.Annotations + } + if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { + canonicalDesired.Project = rawInitial.Project + } else { + canonicalDesired.Project = rawDesired.Project + } + if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { + canonicalDesired.Location = rawInitial.Location + } else { + canonicalDesired.Location = rawDesired.Location + } + if dcl.NameToSelfLink(rawDesired.Connection, rawInitial.Connection) { + canonicalDesired.Connection = rawInitial.Connection + } else { + canonicalDesired.Connection = rawDesired.Connection + } + return canonicalDesired, nil +} + +func canonicalizeRepositoryNewState(c *Client, rawNew, rawDesired *Repository) (*Repository, error) { + + if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { + rawNew.Name = rawDesired.Name + } else { + if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { + rawNew.Name = rawDesired.Name + } + } + + if dcl.IsEmptyValueIndirect(rawNew.RemoteUri) && dcl.IsEmptyValueIndirect(rawDesired.RemoteUri) { + rawNew.RemoteUri = rawDesired.RemoteUri + } else { + if dcl.StringCanonicalize(rawDesired.RemoteUri, rawNew.RemoteUri) { + rawNew.RemoteUri = rawDesired.RemoteUri + } + } + + if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { + rawNew.CreateTime = rawDesired.CreateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { + rawNew.UpdateTime = rawDesired.UpdateTime + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Annotations) && dcl.IsEmptyValueIndirect(rawDesired.Annotations) { + rawNew.Annotations = rawDesired.Annotations + } else { + } + + if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { + rawNew.Etag = rawDesired.Etag + } else { + if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { + rawNew.Etag = rawDesired.Etag + } + } + + rawNew.Project = rawDesired.Project + + rawNew.Location = rawDesired.Location + + rawNew.Connection = rawDesired.Connection + + return rawNew, nil +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffRepository(c *Client, desired, actual *Repository, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.RemoteUri, actual.RemoteUri, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RemoteUri")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Connection, actual.Connection, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Connection")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *Repository) urlNormalized() *Repository { + normalized := dcl.Copy(*r).(Repository) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.RemoteUri = dcl.SelfLinkToName(r.RemoteUri) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + normalized.Connection = dcl.SelfLinkToName(r.Connection) + return &normalized +} + +func (r *Repository) updateURL(userBasePath, updateName string) (string, error) { + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the Repository resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *Repository) marshal(c *Client) ([]byte, error) { + m, err := expandRepository(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling Repository: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalRepository decodes JSON responses into the Repository resource schema. +func unmarshalRepository(b []byte, c *Client, res *Repository) (*Repository, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapRepository(m, c, res) +} + +func unmarshalMapRepository(m map[string]interface{}, c *Client, res *Repository) (*Repository, error) { + + flattened := flattenRepository(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandRepository expands Repository into a JSON request object. +func expandRepository(c *Client, f *Repository) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.DeriveField("projects/%s/locations/%s/connections/%s/repositories/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Connection), dcl.SelfLinkToName(f.Name)); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.RemoteUri; dcl.ValueShouldBeSent(v) { + m["remoteUri"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Connection into connection: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["connection"] = v + } + + return m, nil +} + +// flattenRepository flattens Repository from a JSON request object into the +// Repository type. +func flattenRepository(c *Client, i interface{}, res *Repository) *Repository { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &Repository{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.RemoteUri = dcl.FlattenString(m["remoteUri"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Connection = dcl.FlattenString(m["connection"]) + + return resultRes +} + +// This function returns a matcher that checks whether a serialized resource matches this resource +// in its parameters (as defined by the fields in a Get, which definitionally define resource +// identity). This is useful in extracting the element from a List call. +func (r *Repository) matcher(c *Client) func([]byte) bool { + return func(b []byte) bool { + cr, err := unmarshalRepository(b, c, r) + if err != nil { + c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") + return false + } + nr := r.urlNormalized() + ncr := cr.urlNormalized() + c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) + + if nr.Project == nil && ncr.Project == nil { + c.Config.Logger.Info("Both Project fields null - considering equal.") + } else if nr.Project == nil || ncr.Project == nil { + c.Config.Logger.Info("Only one Project field is null - considering unequal.") + return false + } else if *nr.Project != *ncr.Project { + return false + } + if nr.Location == nil && ncr.Location == nil { + c.Config.Logger.Info("Both Location fields null - considering equal.") + } else if nr.Location == nil || ncr.Location == nil { + c.Config.Logger.Info("Only one Location field is null - considering unequal.") + return false + } else if *nr.Location != *ncr.Location { + return false + } + if nr.Connection == nil && ncr.Connection == nil { + c.Config.Logger.Info("Both Connection fields null - considering equal.") + } else if nr.Connection == nil || ncr.Connection == nil { + c.Config.Logger.Info("Only one Connection field is null - considering unequal.") + return false + } else if *nr.Connection != *ncr.Connection { + return false + } + if nr.Name == nil && ncr.Name == nil { + c.Config.Logger.Info("Both Name fields null - considering equal.") + } else if nr.Name == nil || ncr.Name == nil { + c.Config.Logger.Info("Only one Name field is null - considering unequal.") + return false + } else if *nr.Name != *ncr.Name { + return false + } + return true + } +} + +type repositoryDiff struct { + // The diff should include one or the other of RequiresRecreate or UpdateOp. + RequiresRecreate bool + UpdateOp repositoryApiOperation + FieldName string // used for error logging +} + +func convertFieldDiffsToRepositoryDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]repositoryDiff, error) { + opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) + // Map each operation name to the field diffs associated with it. + for _, fd := range fds { + for _, ro := range fd.ResultingOperation { + if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { + fieldDiffs = append(fieldDiffs, fd) + opNamesToFieldDiffs[ro] = fieldDiffs + } else { + config.Logger.Infof("%s required due to diff: %v", ro, fd) + opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} + } + } + } + var diffs []repositoryDiff + // For each operation name, create a repositoryDiff which contains the operation. + for opName, fieldDiffs := range opNamesToFieldDiffs { + // Use the first field diff's field name for logging required recreate error. + diff := repositoryDiff{FieldName: fieldDiffs[0].FieldName} + if opName == "Recreate" { + diff.RequiresRecreate = true + } else { + apiOp, err := convertOpNameToRepositoryApiOperation(opName, fieldDiffs, opts...) + if err != nil { + return diffs, err + } + diff.UpdateOp = apiOp + } + diffs = append(diffs, diff) + } + return diffs, nil +} + +func convertOpNameToRepositoryApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (repositoryApiOperation, error) { + switch opName { + + default: + return nil, fmt.Errorf("no such operation with name: %v", opName) + } +} + +func extractRepositoryFields(r *Repository) error { + vProject, err := dcl.ValueFromRegexOnField("Project", r.Project, r.Connection, "projects/([a-z0-9A-Z-]*)/locations/.*") + if err != nil { + return err + } + r.Project = vProject + vLocation, err := dcl.ValueFromRegexOnField("Location", r.Location, r.Connection, "projects/.*/locations/([a-z0-9A-Z-]*)/connections/.*") + if err != nil { + return err + } + r.Location = vLocation + return nil +} + +func postReadExtractRepositoryFields(r *Repository) error { + return nil +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_schema.go new file mode 100644 index 0000000000..7c7aae42c5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_schema.go @@ -0,0 +1,212 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package cloudbuildv2 + +import ( + "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +) + +func DCLRepositorySchema() *dcl.Schema { + return &dcl.Schema{ + Info: &dcl.Info{ + Title: "Cloudbuildv2/Repository", + Description: "The Cloudbuildv2 Repository resource", + StructName: "Repository", + }, + Paths: &dcl.Paths{ + Get: &dcl.Path{ + Description: "The function used to get information about a Repository", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "repository", + Required: true, + Description: "A full instance of a Repository", + }, + }, + }, + Apply: &dcl.Path{ + Description: "The function used to apply information about a Repository", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "repository", + Required: true, + Description: "A full instance of a Repository", + }, + }, + }, + Delete: &dcl.Path{ + Description: "The function used to delete a Repository", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "repository", + Required: true, + Description: "A full instance of a Repository", + }, + }, + }, + DeleteAll: &dcl.Path{ + Description: "The function used to delete all Repository", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "connection", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + List: &dcl.Path{ + Description: "The function used to list information about many Repository", + Parameters: []dcl.PathParameters{ + dcl.PathParameters{ + Name: "project", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "location", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + dcl.PathParameters{ + Name: "connection", + Required: true, + Schema: &dcl.PathParametersSchema{ + Type: "string", + }, + }, + }, + }, + }, + Components: &dcl.Components{ + Schemas: map[string]*dcl.Component{ + "Repository": &dcl.Component{ + Title: "Repository", + ID: "projects/{{project}}/locations/{{location}}/connections/{{connection}}/repositories/{{name}}", + ParentContainer: "project", + HasCreate: true, + SchemaProperty: dcl.Property{ + Type: "object", + Required: []string{ + "name", + "remoteUri", + "project", + "location", + "connection", + }, + Properties: map[string]*dcl.Property{ + "annotations": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Annotations", + Description: "Allows clients to store small amounts of arbitrary data.", + Immutable: true, + }, + "connection": &dcl.Property{ + Type: "string", + GoName: "Connection", + Description: "The connection for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudbuildv2/Connection", + Field: "name", + Parent: true, + }, + }, + }, + "createTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "CreateTime", + ReadOnly: true, + Description: "Output only. Server assigned timestamp for when the connection was created.", + Immutable: true, + }, + "etag": &dcl.Property{ + Type: "string", + GoName: "Etag", + ReadOnly: true, + Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + Immutable: true, + }, + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "The location for the resource", + Immutable: true, + ExtractIfEmpty: true, + }, + "name": &dcl.Property{ + Type: "string", + GoName: "Name", + Description: "Name of the repository.", + Immutable: true, + }, + "project": &dcl.Property{ + Type: "string", + GoName: "Project", + Description: "The project for the resource", + Immutable: true, + ResourceReferences: []*dcl.PropertyResourceReference{ + &dcl.PropertyResourceReference{ + Resource: "Cloudresourcemanager/Project", + Field: "name", + Parent: true, + }, + }, + ExtractIfEmpty: true, + }, + "remoteUri": &dcl.Property{ + Type: "string", + GoName: "RemoteUri", + Description: "Required. Git Clone HTTPS URI.", + Immutable: true, + }, + "updateTime": &dcl.Property{ + Type: "string", + Format: "date-time", + GoName: "UpdateTime", + ReadOnly: true, + Description: "Output only. Server assigned timestamp for when the connection was updated.", + Immutable: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_yaml_embed.go new file mode 100644 index 0000000000..73c3366752 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2/repository_yaml_embed.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// GENERATED BY gen_go_data.go +// gen_go_data -package cloudbuildv2 -var YAML_repository blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/cloudbuildv2/repository.yaml + +package cloudbuildv2 + +// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/cloudbuildv2/repository.yaml +var YAML_repository = []byte("info:\n title: Cloudbuildv2/Repository\n description: The Cloudbuildv2 Repository resource\n x-dcl-struct-name: Repository\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Repository\n parameters:\n - name: repository\n required: true\n description: A full instance of a Repository\n apply:\n description: The function used to apply information about a Repository\n parameters:\n - name: repository\n required: true\n description: A full instance of a Repository\n delete:\n description: The function used to delete a Repository\n parameters:\n - name: repository\n required: true\n description: A full instance of a Repository\n deleteAll:\n description: The function used to delete all Repository\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: connection\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Repository\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: connection\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Repository:\n title: Repository\n x-dcl-id: projects/{{project}}/locations/{{location}}/connections/{{connection}}/repositories/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - remoteUri\n - project\n - location\n - connection\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: Allows clients to store small amounts of arbitrary data.\n x-kubernetes-immutable: true\n connection:\n type: string\n x-dcl-go-name: Connection\n description: The connection for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudbuildv2/Connection\n field: name\n parent: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Server assigned timestamp for when the connection\n was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: This checksum is computed by the server based on the value\n of other fields, and may be sent on update and delete requests to ensure\n the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n x-dcl-extract-if-empty: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the repository.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-extract-if-empty: true\n remoteUri:\n type: string\n x-dcl-go-name: RemoteUri\n description: Required. Git Clone HTTPS URI.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Server assigned timestamp for when the connection\n was updated.\n x-kubernetes-immutable: true\n") + +// 4202 bytes +// MD5: 08536debc9812c7bb49387b3c2a0af8f diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.go index 5e791cbe95..1ad6efee33 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.go @@ -92,9 +92,11 @@ func (r *DeliveryPipelineSerialPipeline) HashCode() string { } type DeliveryPipelineSerialPipelineStages struct { - empty bool `json:"-"` - TargetId *string `json:"targetId"` - Profiles []string `json:"profiles"` + empty bool `json:"-"` + TargetId *string `json:"targetId"` + Profiles []string `json:"profiles"` + Strategy *DeliveryPipelineSerialPipelineStagesStrategy `json:"strategy"` + DeployParameters []DeliveryPipelineSerialPipelineStagesDeployParameters `json:"deployParameters"` } type jsonDeliveryPipelineSerialPipelineStages DeliveryPipelineSerialPipelineStages @@ -116,6 +118,10 @@ func (r *DeliveryPipelineSerialPipelineStages) UnmarshalJSON(data []byte) error r.Profiles = res.Profiles + r.Strategy = res.Strategy + + r.DeployParameters = res.DeployParameters + } return nil } @@ -140,10 +146,605 @@ func (r *DeliveryPipelineSerialPipelineStages) HashCode() string { return fmt.Sprintf("%x", hash) } +type DeliveryPipelineSerialPipelineStagesStrategy struct { + empty bool `json:"-"` + Standard *DeliveryPipelineSerialPipelineStagesStrategyStandard `json:"standard"` + Canary *DeliveryPipelineSerialPipelineStagesStrategyCanary `json:"canary"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategy DeliveryPipelineSerialPipelineStagesStrategy + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategy + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategy + } else { + + r.Standard = res.Standard + + r.Canary = res.Canary + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategy is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategy *DeliveryPipelineSerialPipelineStagesStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategy) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyStandard struct { + empty bool `json:"-"` + Verify *bool `json:"verify"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyStandard DeliveryPipelineSerialPipelineStagesStrategyStandard + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyStandard + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } else { + + r.Verify = res.Verify + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyStandard is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard *DeliveryPipelineSerialPipelineStagesStrategyStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanary struct { + empty bool `json:"-"` + RuntimeConfig *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig `json:"runtimeConfig"` + CanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment `json:"canaryDeployment"` + CustomCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment `json:"customCanaryDeployment"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanary DeliveryPipelineSerialPipelineStagesStrategyCanary + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanary + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } else { + + r.RuntimeConfig = res.RuntimeConfig + + r.CanaryDeployment = res.CanaryDeployment + + r.CustomCanaryDeployment = res.CustomCanaryDeployment + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanary is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary *DeliveryPipelineSerialPipelineStagesStrategyCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig struct { + empty bool `json:"-"` + Kubernetes *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes `json:"kubernetes"` + CloudRun *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun `json:"cloudRun"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } else { + + r.Kubernetes = res.Kubernetes + + r.CloudRun = res.CloudRun + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes struct { + empty bool `json:"-"` + GatewayServiceMesh *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh `json:"gatewayServiceMesh"` + ServiceNetworking *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking `json:"serviceNetworking"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } else { + + r.GatewayServiceMesh = res.GatewayServiceMesh + + r.ServiceNetworking = res.ServiceNetworking + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh struct { + empty bool `json:"-"` + HttpRoute *string `json:"httpRoute"` + Service *string `json:"service"` + Deployment *string `json:"deployment"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } else { + + r.HttpRoute = res.HttpRoute + + r.Service = res.Service + + r.Deployment = res.Deployment + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking struct { + empty bool `json:"-"` + Service *string `json:"service"` + Deployment *string `json:"deployment"` + DisablePodOverprovisioning *bool `json:"disablePodOverprovisioning"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } else { + + r.Service = res.Service + + r.Deployment = res.Deployment + + r.DisablePodOverprovisioning = res.DisablePodOverprovisioning + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun struct { + empty bool `json:"-"` + AutomaticTrafficControl *bool `json:"automaticTrafficControl"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } else { + + r.AutomaticTrafficControl = res.AutomaticTrafficControl + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment struct { + empty bool `json:"-"` + Percentages []int64 `json:"percentages"` + Verify *bool `json:"verify"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } else { + + r.Percentages = res.Percentages + + r.Verify = res.Verify + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment struct { + empty bool `json:"-"` + PhaseConfigs []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs `json:"phaseConfigs"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } else { + + r.PhaseConfigs = res.PhaseConfigs + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs struct { + empty bool `json:"-"` + PhaseId *string `json:"phaseId"` + Percentage *int64 `json:"percentage"` + Profiles []string `json:"profiles"` + Verify *bool `json:"verify"` +} + +type jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + } else { + + r.PhaseId = res.PhaseId + + r.Percentage = res.Percentage + + r.Profiles = res.Profiles + + r.Verify = res.Verify + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + +type DeliveryPipelineSerialPipelineStagesDeployParameters struct { + empty bool `json:"-"` + Values map[string]string `json:"values"` + MatchTargetLabels map[string]string `json:"matchTargetLabels"` +} + +type jsonDeliveryPipelineSerialPipelineStagesDeployParameters DeliveryPipelineSerialPipelineStagesDeployParameters + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineSerialPipelineStagesDeployParameters + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } else { + + r.Values = res.Values + + r.MatchTargetLabels = res.MatchTargetLabels + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineSerialPipelineStagesDeployParameters is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineSerialPipelineStagesDeployParameters *DeliveryPipelineSerialPipelineStagesDeployParameters = &DeliveryPipelineSerialPipelineStagesDeployParameters{empty: true} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + type DeliveryPipelineCondition struct { empty bool `json:"-"` PipelineReadyCondition *DeliveryPipelineConditionPipelineReadyCondition `json:"pipelineReadyCondition"` TargetsPresentCondition *DeliveryPipelineConditionTargetsPresentCondition `json:"targetsPresentCondition"` + TargetsTypeCondition *DeliveryPipelineConditionTargetsTypeCondition `json:"targetsTypeCondition"` } type jsonDeliveryPipelineCondition DeliveryPipelineCondition @@ -165,6 +766,8 @@ func (r *DeliveryPipelineCondition) UnmarshalJSON(data []byte) error { r.TargetsPresentCondition = res.TargetsPresentCondition + r.TargetsTypeCondition = res.TargetsTypeCondition + } return nil } @@ -290,6 +893,55 @@ func (r *DeliveryPipelineConditionTargetsPresentCondition) HashCode() string { return fmt.Sprintf("%x", hash) } +type DeliveryPipelineConditionTargetsTypeCondition struct { + empty bool `json:"-"` + Status *bool `json:"status"` + ErrorDetails *string `json:"errorDetails"` +} + +type jsonDeliveryPipelineConditionTargetsTypeCondition DeliveryPipelineConditionTargetsTypeCondition + +func (r *DeliveryPipelineConditionTargetsTypeCondition) UnmarshalJSON(data []byte) error { + var res jsonDeliveryPipelineConditionTargetsTypeCondition + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyDeliveryPipelineConditionTargetsTypeCondition + } else { + + r.Status = res.Status + + r.ErrorDetails = res.ErrorDetails + + } + return nil +} + +// This object is used to assert a desired state where this DeliveryPipelineConditionTargetsTypeCondition is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyDeliveryPipelineConditionTargetsTypeCondition *DeliveryPipelineConditionTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{empty: true} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) Empty() bool { + return r.empty +} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) String() string { + return dcl.SprintResource(r) +} + +func (r *DeliveryPipelineConditionTargetsTypeCondition) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + // Describe returns a simple description of this resource to ensure that automated tools // can identify it. func (r *DeliveryPipeline) Describe() dcl.ServiceTypeVersion { diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.yaml index cbfa8868d2..fa571e2177 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline.yaml @@ -138,6 +138,24 @@ components: format: date-time x-dcl-go-name: UpdateTime description: Last time the condition was updated. + targetsTypeCondition: + type: object + x-dcl-go-name: TargetsTypeCondition + x-dcl-go-type: DeliveryPipelineConditionTargetsTypeCondition + description: Details on the whether the targets enumerated in the pipeline + are of the same type. + properties: + errorDetails: + type: string + x-dcl-go-name: ErrorDetails + description: Human readable error message. + status: + type: boolean + x-dcl-go-name: Status + description: True if the targets are all a comparable type. For + example this is true if all targets are GKE clusters. This is + false if some targets are Cloud Run targets and others are GKE + clusters. createTime: type: string format: date-time @@ -206,6 +224,35 @@ components: type: object x-dcl-go-type: DeliveryPipelineSerialPipelineStages properties: + deployParameters: + type: array + x-dcl-go-name: DeployParameters + description: Optional. The deploy parameters to use for the target + in this stage. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: object + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesDeployParameters + required: + - values + properties: + matchTargetLabels: + type: object + additionalProperties: + type: string + x-dcl-go-name: MatchTargetLabels + description: Optional. Deploy parameters are applied to + targets with match labels. If unspecified, deploy parameters + are applied to all targets (including child targets of + a multi-target). + values: + type: object + additionalProperties: + type: string + x-dcl-go-name: Values + description: Required. Values are deploy parameters in key-value + pairs. profiles: type: array x-dcl-go-name: Profiles @@ -216,6 +263,212 @@ components: items: type: string x-dcl-go-type: string + strategy: + type: object + x-dcl-go-name: Strategy + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategy + description: Optional. The strategy to use for a `Rollout` to + this stage. + properties: + canary: + type: object + x-dcl-go-name: Canary + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanary + description: Canary deployment strategy provides progressive + percentage based deployments to a Target. + properties: + canaryDeployment: + type: object + x-dcl-go-name: CanaryDeployment + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + description: Configures the progressive based deployment + for a Target. + x-dcl-conflicts: + - customCanaryDeployment + required: + - percentages + properties: + percentages: + type: array + x-dcl-go-name: Percentages + description: Required. The percentage based deployments + that will occur as a part of a `Rollout`. List is + expected in ascending order and each integer n is + 0 <= n < 100. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: integer + format: int64 + x-dcl-go-type: int64 + verify: + type: boolean + x-dcl-go-name: Verify + description: Whether to run verify tests after each + percentage deployment. + customCanaryDeployment: + type: object + x-dcl-go-name: CustomCanaryDeployment + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + description: Configures the progressive based deployment + for a Target, but allows customizing at the phase level + where a phase represents each of the percentage deployments. + x-dcl-conflicts: + - canaryDeployment + required: + - phaseConfigs + properties: + phaseConfigs: + type: array + x-dcl-go-name: PhaseConfigs + description: Required. Configuration for each phase + in the canary deployment in the order executed. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: object + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + required: + - phaseId + - percentage + properties: + percentage: + type: integer + format: int64 + x-dcl-go-name: Percentage + description: Required. Percentage deployment + for the phase. + phaseId: + type: string + x-dcl-go-name: PhaseId + description: 'Required. The ID to assign to + the `Rollout` phase. This value must consist + of lower-case letters, numbers, and hyphens, + start with a letter and end with a letter + or a number, and have a max length of 63 characters. + In other words, it must match the following + regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.' + profiles: + type: array + x-dcl-go-name: Profiles + description: Skaffold profiles to use when rendering + the manifest for this phase. These are in + addition to the profiles list specified in + the `DeliveryPipeline` stage. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string + verify: + type: boolean + x-dcl-go-name: Verify + description: Whether to run verify tests after + the deployment. + runtimeConfig: + type: object + x-dcl-go-name: RuntimeConfig + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + description: Optional. Runtime specific configurations + for the deployment strategy. The runtime configuration + is used to determine how Cloud Deploy will split traffic + to enable a progressive deployment. + properties: + cloudRun: + type: object + x-dcl-go-name: CloudRun + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + description: Cloud Run runtime configuration. + x-dcl-conflicts: + - kubernetes + properties: + automaticTrafficControl: + type: boolean + x-dcl-go-name: AutomaticTrafficControl + description: Whether Cloud Deploy should update + the traffic stanza in a Cloud Run Service on + the user's behalf to facilitate traffic splitting. + This is required to be true for CanaryDeployments, + but optional for CustomCanaryDeployments. + kubernetes: + type: object + x-dcl-go-name: Kubernetes + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + description: Kubernetes runtime configuration. + x-dcl-conflicts: + - cloudRun + properties: + gatewayServiceMesh: + type: object + x-dcl-go-name: GatewayServiceMesh + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + description: Kubernetes Gateway API service mesh + configuration. + x-dcl-conflicts: + - serviceNetworking + required: + - httpRoute + - service + - deployment + properties: + deployment: + type: string + x-dcl-go-name: Deployment + description: Required. Name of the Kubernetes + Deployment whose traffic is managed by the + specified HTTPRoute and Service. + httpRoute: + type: string + x-dcl-go-name: HttpRoute + description: Required. Name of the Gateway + API HTTPRoute. + service: + type: string + x-dcl-go-name: Service + description: Required. Name of the Kubernetes + Service. + serviceNetworking: + type: object + x-dcl-go-name: ServiceNetworking + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + description: Kubernetes Service networking configuration. + x-dcl-conflicts: + - gatewayServiceMesh + required: + - service + - deployment + properties: + deployment: + type: string + x-dcl-go-name: Deployment + description: Required. Name of the Kubernetes + Deployment whose traffic is managed by the + specified Service. + disablePodOverprovisioning: + type: boolean + x-dcl-go-name: DisablePodOverprovisioning + description: Optional. Whether to disable + Pod overprovisioning. If Pod overprovisioning + is disabled then Cloud Deploy will limit + the number of total Pods used for the deployment + strategy to the number of Pods the Deployment + has on the cluster. + service: + type: string + x-dcl-go-name: Service + description: Required. Name of the Kubernetes + Service. + standard: + type: object + x-dcl-go-name: Standard + x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyStandard + description: Standard deployment strategy executes a single + deploy and allows verifying the deployment. + properties: + verify: + type: boolean + x-dcl-go-name: Verify + description: Whether to verify a deployment. targetId: type: string x-dcl-go-name: TargetId diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_internal.go index 26828a985a..6ffe828baf 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_internal.go @@ -52,6 +52,131 @@ func (r *DeliveryPipelineSerialPipeline) validate() error { return nil } func (r *DeliveryPipelineSerialPipelineStages) validate() error { + if !dcl.IsEmptyValueIndirect(r.Strategy) { + if err := r.Strategy.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategy) validate() error { + if !dcl.IsEmptyValueIndirect(r.Standard) { + if err := r.Standard.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.Canary) { + if err := r.Canary.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyStandard) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanary) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"CanaryDeployment", "CustomCanaryDeployment"}, r.CanaryDeployment, r.CustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.RuntimeConfig) { + if err := r.RuntimeConfig.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CanaryDeployment) { + if err := r.CanaryDeployment.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CustomCanaryDeployment) { + if err := r.CustomCanaryDeployment.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Kubernetes", "CloudRun"}, r.Kubernetes, r.CloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.Kubernetes) { + if err := r.Kubernetes.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.CloudRun) { + if err := r.CloudRun.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) validate() error { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"GatewayServiceMesh", "ServiceNetworking"}, r.GatewayServiceMesh, r.ServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(r.GatewayServiceMesh) { + if err := r.GatewayServiceMesh.validate(); err != nil { + return err + } + } + if !dcl.IsEmptyValueIndirect(r.ServiceNetworking) { + if err := r.ServiceNetworking.validate(); err != nil { + return err + } + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) validate() error { + if err := dcl.Required(r, "httpRoute"); err != nil { + return err + } + if err := dcl.Required(r, "service"); err != nil { + return err + } + if err := dcl.Required(r, "deployment"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) validate() error { + if err := dcl.Required(r, "service"); err != nil { + return err + } + if err := dcl.Required(r, "deployment"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) validate() error { + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) validate() error { + if err := dcl.Required(r, "percentages"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) validate() error { + if err := dcl.Required(r, "phaseConfigs"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) validate() error { + if err := dcl.Required(r, "phaseId"); err != nil { + return err + } + if err := dcl.Required(r, "percentage"); err != nil { + return err + } + return nil +} +func (r *DeliveryPipelineSerialPipelineStagesDeployParameters) validate() error { + if err := dcl.Required(r, "values"); err != nil { + return err + } return nil } func (r *DeliveryPipelineCondition) validate() error { @@ -65,6 +190,11 @@ func (r *DeliveryPipelineCondition) validate() error { return err } } + if !dcl.IsEmptyValueIndirect(r.TargetsTypeCondition) { + if err := r.TargetsTypeCondition.validate(); err != nil { + return err + } + } return nil } func (r *DeliveryPipelineConditionPipelineReadyCondition) validate() error { @@ -73,6 +203,9 @@ func (r *DeliveryPipelineConditionPipelineReadyCondition) validate() error { func (r *DeliveryPipelineConditionTargetsPresentCondition) validate() error { return nil } +func (r *DeliveryPipelineConditionTargetsTypeCondition) validate() error { + return nil +} func (r *DeliveryPipeline) basePath() string { params := map[string]interface{}{} return dcl.Nprintf("https://clouddeploy.googleapis.com/v1/", params) @@ -785,6 +918,8 @@ func canonicalizeDeliveryPipelineSerialPipelineStages(des, initial *DeliveryPipe } else { cDes.Profiles = des.Profiles } + cDes.Strategy = canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(des.Strategy, initial.Strategy, opts...) + cDes.DeployParameters = canonicalizeDeliveryPipelineSerialPipelineStagesDeployParametersSlice(des.DeployParameters, initial.DeployParameters, opts...) return cDes } @@ -837,6 +972,8 @@ func canonicalizeNewDeliveryPipelineSerialPipelineStages(c *Client, des, nw *Del if dcl.StringArrayCanonicalize(des.Profiles, nw.Profiles) { nw.Profiles = des.Profiles } + nw.Strategy = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, des.Strategy, nw.Strategy) + nw.DeployParameters = canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, des.DeployParameters, nw.DeployParameters) return nw } @@ -887,7 +1024,7 @@ func canonicalizeNewDeliveryPipelineSerialPipelineStagesSlice(c *Client, des, nw return items } -func canonicalizeDeliveryPipelineCondition(des, initial *DeliveryPipelineCondition, opts ...dcl.ApplyOption) *DeliveryPipelineCondition { +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(des, initial *DeliveryPipelineSerialPipelineStagesStrategy, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategy { if des == nil { return initial } @@ -899,24 +1036,24 @@ func canonicalizeDeliveryPipelineCondition(des, initial *DeliveryPipelineConditi return des } - cDes := &DeliveryPipelineCondition{} + cDes := &DeliveryPipelineSerialPipelineStagesStrategy{} - cDes.PipelineReadyCondition = canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des.PipelineReadyCondition, initial.PipelineReadyCondition, opts...) - cDes.TargetsPresentCondition = canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des.TargetsPresentCondition, initial.TargetsPresentCondition, opts...) + cDes.Standard = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(des.Standard, initial.Standard, opts...) + cDes.Canary = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(des.Canary, initial.Canary, opts...) return cDes } -func canonicalizeDeliveryPipelineConditionSlice(des, initial []DeliveryPipelineCondition, opts ...dcl.ApplyOption) []DeliveryPipelineCondition { +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategy, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategy { if dcl.IsEmptyValueIndirect(des) { return initial } if len(des) != len(initial) { - items := make([]DeliveryPipelineCondition, 0, len(des)) + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(des)) for _, d := range des { - cd := canonicalizeDeliveryPipelineCondition(&d, nil, opts...) + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(&d, nil, opts...) if cd != nil { items = append(items, *cd) } @@ -924,9 +1061,9 @@ func canonicalizeDeliveryPipelineConditionSlice(des, initial []DeliveryPipelineC return items } - items := make([]DeliveryPipelineCondition, 0, len(des)) + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(des)) for i, d := range des { - cd := canonicalizeDeliveryPipelineCondition(&d, &initial[i], opts...) + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategy(&d, &initial[i], opts...) if cd != nil { items = append(items, *cd) } @@ -935,7 +1072,7 @@ func canonicalizeDeliveryPipelineConditionSlice(des, initial []DeliveryPipelineC } -func canonicalizeNewDeliveryPipelineCondition(c *Client, des, nw *DeliveryPipelineCondition) *DeliveryPipelineCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategy) *DeliveryPipelineSerialPipelineStagesStrategy { if des == nil { return nw @@ -943,35 +1080,35 @@ func canonicalizeNewDeliveryPipelineCondition(c *Client, des, nw *DeliveryPipeli if nw == nil { if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineCondition while comparing non-nil desired to nil actual. Returning desired object.") + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategy while comparing non-nil desired to nil actual. Returning desired object.") return des } return nil } - nw.PipelineReadyCondition = canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, des.PipelineReadyCondition, nw.PipelineReadyCondition) - nw.TargetsPresentCondition = canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, des.TargetsPresentCondition, nw.TargetsPresentCondition) + nw.Standard = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, des.Standard, nw.Standard) + nw.Canary = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, des.Canary, nw.Canary) return nw } -func canonicalizeNewDeliveryPipelineConditionSet(c *Client, des, nw []DeliveryPipelineCondition) []DeliveryPipelineCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategy) []DeliveryPipelineSerialPipelineStagesStrategy { if des == nil { return nw } // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DeliveryPipelineCondition + var items []DeliveryPipelineSerialPipelineStagesStrategy for _, d := range des { matchedIndex := -1 for i, n := range nw { - if diffs, _ := compareDeliveryPipelineConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { matchedIndex = i break } } if matchedIndex != -1 { - items = append(items, *canonicalizeNewDeliveryPipelineCondition(c, &d, &nw[matchedIndex])) + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, &d, &nw[matchedIndex])) nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) } } @@ -981,7 +1118,7 @@ func canonicalizeNewDeliveryPipelineConditionSet(c *Client, des, nw []DeliveryPi return items } -func canonicalizeNewDeliveryPipelineConditionSlice(c *Client, des, nw []DeliveryPipelineCondition) []DeliveryPipelineCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategy) []DeliveryPipelineSerialPipelineStagesStrategy { if des == nil { return nw } @@ -992,16 +1129,16 @@ func canonicalizeNewDeliveryPipelineConditionSlice(c *Client, des, nw []Delivery return nw } - var items []DeliveryPipelineCondition + var items []DeliveryPipelineSerialPipelineStagesStrategy for i, d := range des { n := nw[i] - items = append(items, *canonicalizeNewDeliveryPipelineCondition(c, &d, &n)) + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategy(c, &d, &n)) } return items } -func canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des, initial *DeliveryPipelineConditionPipelineReadyCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionPipelineReadyCondition { +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(des, initial *DeliveryPipelineSerialPipelineStagesStrategyStandard, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyStandard { if des == nil { return initial } @@ -1013,33 +1150,27 @@ func canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des, initial *D return des } - cDes := &DeliveryPipelineConditionPipelineReadyCondition{} + cDes := &DeliveryPipelineSerialPipelineStagesStrategyStandard{} - if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { - cDes.Status = initial.Status - } else { - cDes.Status = des.Status - } - if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.UpdateTime = initial.UpdateTime + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify } else { - cDes.UpdateTime = des.UpdateTime + cDes.Verify = des.Verify } return cDes } -func canonicalizeDeliveryPipelineConditionPipelineReadyConditionSlice(des, initial []DeliveryPipelineConditionPipelineReadyCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionPipelineReadyCondition { +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyStandard, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyStandard { if dcl.IsEmptyValueIndirect(des) { return initial } if len(des) != len(initial) { - items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(des)) + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(des)) for _, d := range des { - cd := canonicalizeDeliveryPipelineConditionPipelineReadyCondition(&d, nil, opts...) + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(&d, nil, opts...) if cd != nil { items = append(items, *cd) } @@ -1047,9 +1178,9 @@ func canonicalizeDeliveryPipelineConditionPipelineReadyConditionSlice(des, initi return items } - items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(des)) + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(des)) for i, d := range des { - cd := canonicalizeDeliveryPipelineConditionPipelineReadyCondition(&d, &initial[i], opts...) + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyStandard(&d, &initial[i], opts...) if cd != nil { items = append(items, *cd) } @@ -1058,7 +1189,7 @@ func canonicalizeDeliveryPipelineConditionPipelineReadyConditionSlice(des, initi } -func canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c *Client, des, nw *DeliveryPipelineConditionPipelineReadyCondition) *DeliveryPipelineConditionPipelineReadyCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyStandard) *DeliveryPipelineSerialPipelineStagesStrategyStandard { if des == nil { return nw @@ -1066,36 +1197,36 @@ func canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c *Client, d if nw == nil { if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionPipelineReadyCondition while comparing non-nil desired to nil actual. Returning desired object.") + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyStandard while comparing non-nil desired to nil actual. Returning desired object.") return des } return nil } - if dcl.BoolCanonicalize(des.Status, nw.Status) { - nw.Status = des.Status + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify } return nw } -func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSet(c *Client, des, nw []DeliveryPipelineConditionPipelineReadyCondition) []DeliveryPipelineConditionPipelineReadyCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandard) []DeliveryPipelineSerialPipelineStagesStrategyStandard { if des == nil { return nw } // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DeliveryPipelineConditionPipelineReadyCondition + var items []DeliveryPipelineSerialPipelineStagesStrategyStandard for _, d := range des { matchedIndex := -1 for i, n := range nw { - if diffs, _ := compareDeliveryPipelineConditionPipelineReadyConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { matchedIndex = i break } } if matchedIndex != -1 { - items = append(items, *canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, &d, &nw[matchedIndex])) + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &d, &nw[matchedIndex])) nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) } } @@ -1105,7 +1236,7 @@ func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSet(c *Client return items } -func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSlice(c *Client, des, nw []DeliveryPipelineConditionPipelineReadyCondition) []DeliveryPipelineConditionPipelineReadyCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyStandard) []DeliveryPipelineSerialPipelineStagesStrategyStandard { if des == nil { return nw } @@ -1116,16 +1247,16 @@ func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSlice(c *Clie return nw } - var items []DeliveryPipelineConditionPipelineReadyCondition + var items []DeliveryPipelineSerialPipelineStagesStrategyStandard for i, d := range des { n := nw[i] - items = append(items, *canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, &d, &n)) + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &d, &n)) } return items } -func canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des, initial *DeliveryPipelineConditionTargetsPresentCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionTargetsPresentCondition { +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanary, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanary { if des == nil { return initial } @@ -1133,42 +1264,49 @@ func canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des, initial * return des } + if des.CanaryDeployment != nil || (initial != nil && initial.CanaryDeployment != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CustomCanaryDeployment) { + des.CanaryDeployment = nil + if initial != nil { + initial.CanaryDeployment = nil + } + } + } + + if des.CustomCanaryDeployment != nil || (initial != nil && initial.CustomCanaryDeployment != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CanaryDeployment) { + des.CustomCanaryDeployment = nil + if initial != nil { + initial.CustomCanaryDeployment = nil + } + } + } + if initial == nil { return des } - cDes := &DeliveryPipelineConditionTargetsPresentCondition{} + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanary{} - if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { - cDes.Status = initial.Status - } else { - cDes.Status = des.Status - } - if dcl.StringArrayCanonicalize(des.MissingTargets, initial.MissingTargets) { - cDes.MissingTargets = initial.MissingTargets - } else { - cDes.MissingTargets = des.MissingTargets - } - if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.UpdateTime = initial.UpdateTime - } else { - cDes.UpdateTime = des.UpdateTime - } + cDes.RuntimeConfig = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(des.RuntimeConfig, initial.RuntimeConfig, opts...) + cDes.CanaryDeployment = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(des.CanaryDeployment, initial.CanaryDeployment, opts...) + cDes.CustomCanaryDeployment = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(des.CustomCanaryDeployment, initial.CustomCanaryDeployment, opts...) return cDes } -func canonicalizeDeliveryPipelineConditionTargetsPresentConditionSlice(des, initial []DeliveryPipelineConditionTargetsPresentCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionTargetsPresentCondition { +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanary, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanary { if dcl.IsEmptyValueIndirect(des) { return initial } if len(des) != len(initial) { - items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(des)) + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(des)) for _, d := range des { - cd := canonicalizeDeliveryPipelineConditionTargetsPresentCondition(&d, nil, opts...) + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(&d, nil, opts...) if cd != nil { items = append(items, *cd) } @@ -1176,9 +1314,9 @@ func canonicalizeDeliveryPipelineConditionTargetsPresentConditionSlice(des, init return items } - items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(des)) + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(des)) for i, d := range des { - cd := canonicalizeDeliveryPipelineConditionTargetsPresentCondition(&d, &initial[i], opts...) + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanary(&d, &initial[i], opts...) if cd != nil { items = append(items, *cd) } @@ -1187,7 +1325,7 @@ func canonicalizeDeliveryPipelineConditionTargetsPresentConditionSlice(des, init } -func canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c *Client, des, nw *DeliveryPipelineConditionTargetsPresentCondition) *DeliveryPipelineConditionTargetsPresentCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanary) *DeliveryPipelineSerialPipelineStagesStrategyCanary { if des == nil { return nw @@ -1195,39 +1333,36 @@ func canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c *Client, if nw == nil { if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionTargetsPresentCondition while comparing non-nil desired to nil actual. Returning desired object.") + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanary while comparing non-nil desired to nil actual. Returning desired object.") return des } return nil } - if dcl.BoolCanonicalize(des.Status, nw.Status) { - nw.Status = des.Status - } - if dcl.StringArrayCanonicalize(des.MissingTargets, nw.MissingTargets) { - nw.MissingTargets = des.MissingTargets - } + nw.RuntimeConfig = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, des.RuntimeConfig, nw.RuntimeConfig) + nw.CanaryDeployment = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, des.CanaryDeployment, nw.CanaryDeployment) + nw.CustomCanaryDeployment = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, des.CustomCanaryDeployment, nw.CustomCanaryDeployment) return nw } -func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSet(c *Client, des, nw []DeliveryPipelineConditionTargetsPresentCondition) []DeliveryPipelineConditionTargetsPresentCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanarySet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanary) []DeliveryPipelineSerialPipelineStagesStrategyCanary { if des == nil { return nw } // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DeliveryPipelineConditionTargetsPresentCondition + var items []DeliveryPipelineSerialPipelineStagesStrategyCanary for _, d := range des { matchedIndex := -1 for i, n := range nw { - if diffs, _ := compareDeliveryPipelineConditionTargetsPresentConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { matchedIndex = i break } } if matchedIndex != -1 { - items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, &d, &nw[matchedIndex])) + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &d, &nw[matchedIndex])) nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) } } @@ -1237,7 +1372,7 @@ func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSet(c *Clien return items } -func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSlice(c *Client, des, nw []DeliveryPipelineConditionTargetsPresentCondition) []DeliveryPipelineConditionTargetsPresentCondition { +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanary) []DeliveryPipelineSerialPipelineStagesStrategyCanary { if des == nil { return nw } @@ -1248,448 +1383,4046 @@ func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSlice(c *Cli return nw } - var items []DeliveryPipelineConditionTargetsPresentCondition + var items []DeliveryPipelineSerialPipelineStagesStrategyCanary for i, d := range des { n := nw[i] - items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, &d, &n)) + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &d, &n)) } return items } -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffDeliveryPipeline(c *Client, desired, actual *DeliveryPipeline, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return initial } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) + if des.empty { + return des } - if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err + if des.Kubernetes != nil || (initial != nil && initial.Kubernetes != nil) { + // Check if anything else is set. + if dcl.AnySet(des.CloudRun) { + des.Kubernetes = nil + if initial != nil { + initial.Kubernetes = nil + } } - newDiffs = append(newDiffs, ds...) } - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err + if des.CloudRun != nil || (initial != nil && initial.CloudRun != nil) { + // Check if anything else is set. + if dcl.AnySet(des.Kubernetes) { + des.CloudRun = nil + if initial != nil { + initial.CloudRun = nil + } } - newDiffs = append(newDiffs, ds...) } - if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) + if initial == nil { + return des } - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) + cDes.Kubernetes = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(des.Kubernetes, initial.Kubernetes, opts...) + cDes.CloudRun = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(des.CloudRun, initial.CloudRun, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if dcl.IsEmptyValueIndirect(des) { + return initial } - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } } - newDiffs = append(newDiffs, ds...) + return items } - if ds, err := dcl.Diff(desired.SerialPipeline, actual.SerialPipeline, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipeline, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("SerialPipeline")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) } - newDiffs = append(newDiffs, ds...) } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.Kubernetes = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, des.Kubernetes, nw.Kubernetes) + nw.CloudRun = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, des.CloudRun, nw.CloudRun) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return initial + } + if des.empty { + return des + } + + if des.GatewayServiceMesh != nil || (initial != nil && initial.GatewayServiceMesh != nil) { + // Check if anything else is set. + if dcl.AnySet(des.ServiceNetworking) { + des.GatewayServiceMesh = nil + if initial != nil { + initial.GatewayServiceMesh = nil + } + } + } + + if des.ServiceNetworking != nil || (initial != nil && initial.ServiceNetworking != nil) { + // Check if anything else is set. + if dcl.AnySet(des.GatewayServiceMesh) { + des.ServiceNetworking = nil + if initial != nil { + initial.ServiceNetworking = nil + } + } + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + + cDes.GatewayServiceMesh = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(des.GatewayServiceMesh, initial.GatewayServiceMesh, opts...) + cDes.ServiceNetworking = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(des.ServiceNetworking, initial.ServiceNetworking, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.GatewayServiceMesh = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, des.GatewayServiceMesh, nw.GatewayServiceMesh) + nw.ServiceNetworking = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, des.ServiceNetworking, nw.ServiceNetworking) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + + if dcl.StringCanonicalize(des.HttpRoute, initial.HttpRoute) || dcl.IsZeroValue(des.HttpRoute) { + cDes.HttpRoute = initial.HttpRoute + } else { + cDes.HttpRoute = des.HttpRoute + } + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, initial.Deployment) || dcl.IsZeroValue(des.Deployment) { + cDes.Deployment = initial.Deployment + } else { + cDes.Deployment = des.Deployment + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.HttpRoute, nw.HttpRoute) { + nw.HttpRoute = des.HttpRoute + } + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, nw.Deployment) { + nw.Deployment = des.Deployment + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + + if dcl.StringCanonicalize(des.Service, initial.Service) || dcl.IsZeroValue(des.Service) { + cDes.Service = initial.Service + } else { + cDes.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, initial.Deployment) || dcl.IsZeroValue(des.Deployment) { + cDes.Deployment = initial.Deployment + } else { + cDes.Deployment = des.Deployment + } + if dcl.BoolCanonicalize(des.DisablePodOverprovisioning, initial.DisablePodOverprovisioning) || dcl.IsZeroValue(des.DisablePodOverprovisioning) { + cDes.DisablePodOverprovisioning = initial.DisablePodOverprovisioning + } else { + cDes.DisablePodOverprovisioning = des.DisablePodOverprovisioning + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Service, nw.Service) { + nw.Service = des.Service + } + if dcl.StringCanonicalize(des.Deployment, nw.Deployment) { + nw.Deployment = des.Deployment + } + if dcl.BoolCanonicalize(des.DisablePodOverprovisioning, nw.DisablePodOverprovisioning) { + nw.DisablePodOverprovisioning = des.DisablePodOverprovisioning + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + + if dcl.BoolCanonicalize(des.AutomaticTrafficControl, initial.AutomaticTrafficControl) || dcl.IsZeroValue(des.AutomaticTrafficControl) { + cDes.AutomaticTrafficControl = initial.AutomaticTrafficControl + } else { + cDes.AutomaticTrafficControl = des.AutomaticTrafficControl + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.AutomaticTrafficControl, nw.AutomaticTrafficControl) { + nw.AutomaticTrafficControl = des.AutomaticTrafficControl + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + + if dcl.IsZeroValue(des.Percentages) || (dcl.IsEmptyValueIndirect(des.Percentages) && dcl.IsEmptyValueIndirect(initial.Percentages)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Percentages = initial.Percentages + } else { + cDes.Percentages = des.Percentages + } + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify + } else { + cDes.Verify = des.Verify + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + + cDes.PhaseConfigs = canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(des.PhaseConfigs, initial.PhaseConfigs, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.PhaseConfigs = canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, des.PhaseConfigs, nw.PhaseConfigs) + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(des, initial *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} + + if dcl.StringCanonicalize(des.PhaseId, initial.PhaseId) || dcl.IsZeroValue(des.PhaseId) { + cDes.PhaseId = initial.PhaseId + } else { + cDes.PhaseId = des.PhaseId + } + if dcl.IsZeroValue(des.Percentage) || (dcl.IsEmptyValueIndirect(des.Percentage) && dcl.IsEmptyValueIndirect(initial.Percentage)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Percentage = initial.Percentage + } else { + cDes.Percentage = des.Percentage + } + if dcl.StringArrayCanonicalize(des.Profiles, initial.Profiles) { + cDes.Profiles = initial.Profiles + } else { + cDes.Profiles = des.Profiles + } + if dcl.BoolCanonicalize(des.Verify, initial.Verify) || dcl.IsZeroValue(des.Verify) { + cDes.Verify = initial.Verify + } else { + cDes.Verify = des.Verify + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(des, initial []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.PhaseId, nw.PhaseId) { + nw.PhaseId = des.PhaseId + } + if dcl.StringArrayCanonicalize(des.Profiles, nw.Profiles) { + nw.Profiles = des.Profiles + } + if dcl.BoolCanonicalize(des.Verify, nw.Verify) { + nw.Verify = des.Verify + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(des, initial *DeliveryPipelineSerialPipelineStagesDeployParameters, opts ...dcl.ApplyOption) *DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineSerialPipelineStagesDeployParameters{} + + if dcl.IsZeroValue(des.Values) || (dcl.IsEmptyValueIndirect(des.Values) && dcl.IsEmptyValueIndirect(initial.Values)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Values = initial.Values + } else { + cDes.Values = des.Values + } + if dcl.IsZeroValue(des.MatchTargetLabels) || (dcl.IsEmptyValueIndirect(des.MatchTargetLabels) && dcl.IsEmptyValueIndirect(initial.MatchTargetLabels)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.MatchTargetLabels = initial.MatchTargetLabels + } else { + cDes.MatchTargetLabels = des.MatchTargetLabels + } + + return cDes +} + +func canonicalizeDeliveryPipelineSerialPipelineStagesDeployParametersSlice(des, initial []DeliveryPipelineSerialPipelineStagesDeployParameters, opts ...dcl.ApplyOption) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineSerialPipelineStagesDeployParameters(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, des, nw *DeliveryPipelineSerialPipelineStagesDeployParameters) *DeliveryPipelineSerialPipelineStagesDeployParameters { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineSerialPipelineStagesDeployParameters while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + return nw +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSet(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesDeployParameters) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineSerialPipelineStagesDeployParameters + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, des, nw []DeliveryPipelineSerialPipelineStagesDeployParameters) []DeliveryPipelineSerialPipelineStagesDeployParameters { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineSerialPipelineStagesDeployParameters + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineSerialPipelineStagesDeployParameters(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineCondition(des, initial *DeliveryPipelineCondition, opts ...dcl.ApplyOption) *DeliveryPipelineCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineCondition{} + + cDes.PipelineReadyCondition = canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des.PipelineReadyCondition, initial.PipelineReadyCondition, opts...) + cDes.TargetsPresentCondition = canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des.TargetsPresentCondition, initial.TargetsPresentCondition, opts...) + cDes.TargetsTypeCondition = canonicalizeDeliveryPipelineConditionTargetsTypeCondition(des.TargetsTypeCondition, initial.TargetsTypeCondition, opts...) + + return cDes +} + +func canonicalizeDeliveryPipelineConditionSlice(des, initial []DeliveryPipelineCondition, opts ...dcl.ApplyOption) []DeliveryPipelineCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineCondition(c *Client, des, nw *DeliveryPipelineCondition) *DeliveryPipelineCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + nw.PipelineReadyCondition = canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, des.PipelineReadyCondition, nw.PipelineReadyCondition) + nw.TargetsPresentCondition = canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, des.TargetsPresentCondition, nw.TargetsPresentCondition) + nw.TargetsTypeCondition = canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, des.TargetsTypeCondition, nw.TargetsTypeCondition) + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionSet(c *Client, des, nw []DeliveryPipelineCondition) []DeliveryPipelineCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionSlice(c *Client, des, nw []DeliveryPipelineCondition) []DeliveryPipelineCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionPipelineReadyCondition(des, initial *DeliveryPipelineConditionPipelineReadyCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionPipelineReadyCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionPipelineReadyConditionSlice(des, initial []DeliveryPipelineConditionPipelineReadyCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionPipelineReadyCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionPipelineReadyCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionPipelineReadyCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionPipelineReadyCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c *Client, des, nw *DeliveryPipelineConditionPipelineReadyCondition) *DeliveryPipelineConditionPipelineReadyCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionPipelineReadyCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSet(c *Client, des, nw []DeliveryPipelineConditionPipelineReadyCondition) []DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionPipelineReadyCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionPipelineReadyConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionPipelineReadyConditionSlice(c *Client, des, nw []DeliveryPipelineConditionPipelineReadyCondition) []DeliveryPipelineConditionPipelineReadyCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionPipelineReadyCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionPipelineReadyCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionTargetsPresentCondition(des, initial *DeliveryPipelineConditionTargetsPresentCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionTargetsPresentCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.StringArrayCanonicalize(des.MissingTargets, initial.MissingTargets) { + cDes.MissingTargets = initial.MissingTargets + } else { + cDes.MissingTargets = des.MissingTargets + } + if dcl.IsZeroValue(des.UpdateTime) || (dcl.IsEmptyValueIndirect(des.UpdateTime) && dcl.IsEmptyValueIndirect(initial.UpdateTime)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.UpdateTime = initial.UpdateTime + } else { + cDes.UpdateTime = des.UpdateTime + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionTargetsPresentConditionSlice(des, initial []DeliveryPipelineConditionTargetsPresentCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionTargetsPresentCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsPresentCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionTargetsPresentCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsPresentCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c *Client, des, nw *DeliveryPipelineConditionTargetsPresentCondition) *DeliveryPipelineConditionTargetsPresentCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionTargetsPresentCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + if dcl.StringArrayCanonicalize(des.MissingTargets, nw.MissingTargets) { + nw.MissingTargets = des.MissingTargets + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSet(c *Client, des, nw []DeliveryPipelineConditionTargetsPresentCondition) []DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionTargetsPresentCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionTargetsPresentConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionTargetsPresentConditionSlice(c *Client, des, nw []DeliveryPipelineConditionTargetsPresentCondition) []DeliveryPipelineConditionTargetsPresentCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionTargetsPresentCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsPresentCondition(c, &d, &n)) + } + + return items +} + +func canonicalizeDeliveryPipelineConditionTargetsTypeCondition(des, initial *DeliveryPipelineConditionTargetsTypeCondition, opts ...dcl.ApplyOption) *DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &DeliveryPipelineConditionTargetsTypeCondition{} + + if dcl.BoolCanonicalize(des.Status, initial.Status) || dcl.IsZeroValue(des.Status) { + cDes.Status = initial.Status + } else { + cDes.Status = des.Status + } + if dcl.StringCanonicalize(des.ErrorDetails, initial.ErrorDetails) || dcl.IsZeroValue(des.ErrorDetails) { + cDes.ErrorDetails = initial.ErrorDetails + } else { + cDes.ErrorDetails = des.ErrorDetails + } + + return cDes +} + +func canonicalizeDeliveryPipelineConditionTargetsTypeConditionSlice(des, initial []DeliveryPipelineConditionTargetsTypeCondition, opts ...dcl.ApplyOption) []DeliveryPipelineConditionTargetsTypeCondition { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(des)) + for _, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsTypeCondition(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(des)) + for i, d := range des { + cd := canonicalizeDeliveryPipelineConditionTargetsTypeCondition(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c *Client, des, nw *DeliveryPipelineConditionTargetsTypeCondition) *DeliveryPipelineConditionTargetsTypeCondition { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for DeliveryPipelineConditionTargetsTypeCondition while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.BoolCanonicalize(des.Status, nw.Status) { + nw.Status = des.Status + } + if dcl.StringCanonicalize(des.ErrorDetails, nw.ErrorDetails) { + nw.ErrorDetails = des.ErrorDetails + } + + return nw +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeConditionSet(c *Client, des, nw []DeliveryPipelineConditionTargetsTypeCondition) []DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []DeliveryPipelineConditionTargetsTypeCondition + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareDeliveryPipelineConditionTargetsTypeConditionNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, des, nw []DeliveryPipelineConditionTargetsTypeCondition) []DeliveryPipelineConditionTargetsTypeCondition { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []DeliveryPipelineConditionTargetsTypeCondition + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewDeliveryPipelineConditionTargetsTypeCondition(c, &d, &n)) + } + + return items +} + +// The differ returns a list of diffs, along with a list of operations that should be taken +// to remedy them. Right now, it does not attempt to consolidate operations - if several +// fields can be fixed with a patch update, it will perform the patch several times. +// Diffs on some fields will be ignored if the `desired` state has an empty (nil) +// value. This empty value indicates that the user does not care about the state for +// the field. Empty fields on the actual object will cause diffs. +// TODO(magic-modules-eng): for efficiency in some resources, add batching. +func diffDeliveryPipeline(c *Client, desired, actual *DeliveryPipeline, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { + if desired == nil || actual == nil { + return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) + } + + c.Config.Logger.Infof("Diff function called with desired state: %v", desired) + c.Config.Logger.Infof("Diff function called with actual state: %v", actual) + + var fn dcl.FieldName + var newDiffs []*dcl.FieldDiff + // New style diffs. + if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Annotations, actual.Annotations, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Annotations")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.SerialPipeline, actual.SerialPipeline, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipeline, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("SerialPipeline")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Condition, actual.Condition, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareDeliveryPipelineConditionNewStyle, EmptyObject: EmptyDeliveryPipelineCondition, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Condition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.Suspended, actual.Suspended, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Suspended")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if len(newDiffs) > 0 { + c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + } + return newDiffs, nil +} +func compareDeliveryPipelineSerialPipelineNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipeline) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipeline) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipeline or *DeliveryPipelineSerialPipeline", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipeline) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipeline) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipeline", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Stages, actual.Stages, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStages, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Stages")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStages) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStages) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStages or *DeliveryPipelineSerialPipelineStages", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStages) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStages) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStages", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.TargetId, actual.TargetId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Profiles, actual.Profiles, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Profiles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Strategy, actual.Strategy, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategy, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Strategy")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeployParameters, actual.DeployParameters, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesDeployParameters, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("DeployParameters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategy or *DeliveryPipelineSerialPipelineStagesStrategy", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategy) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategy", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Standard, actual.Standard, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Standard")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Canary, actual.Canary, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Canary")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyStandardNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandard or *DeliveryPipelineSerialPipelineStagesStrategyStandard", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyStandard) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyStandard", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanary or *DeliveryPipelineSerialPipelineStagesStrategyCanary", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanary) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanary", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.RuntimeConfig, actual.RuntimeConfig, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("RuntimeConfig")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CanaryDeployment, actual.CanaryDeployment, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CanaryDeployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CustomCanaryDeployment, actual.CustomCanaryDeployment, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CustomCanaryDeployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Kubernetes, actual.Kubernetes, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Kubernetes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.CloudRun, actual.CloudRun, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("CloudRun")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.GatewayServiceMesh, actual.GatewayServiceMesh, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("GatewayServiceMesh")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ServiceNetworking, actual.ServiceNetworking, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("ServiceNetworking")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.HttpRoute, actual.HttpRoute, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("HttpRoute")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Deployment, actual.Deployment, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Deployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Service")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Deployment, actual.Deployment, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Deployment")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DisablePodOverprovisioning, actual.DisablePodOverprovisioning, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("DisablePodOverprovisioning")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun or *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.AutomaticTrafficControl, actual.AutomaticTrafficControl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("AutomaticTrafficControl")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Percentages, actual.Percentages, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Percentages")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PhaseConfigs, actual.PhaseConfigs, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PhaseConfigs")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs or *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PhaseId, actual.PhaseId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PhaseId")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Percentage, actual.Percentage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Percentage")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Profiles, actual.Profiles, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Profiles")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.Verify, actual.Verify, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Verify")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineSerialPipelineStagesDeployParametersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesDeployParameters or *DeliveryPipelineSerialPipelineStagesDeployParameters", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStagesDeployParameters) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStagesDeployParameters", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Values, actual.Values, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Values")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MatchTargetLabels, actual.MatchTargetLabels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("MatchTargetLabels")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineCondition or *DeliveryPipelineCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.PipelineReadyCondition, actual.PipelineReadyCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionPipelineReadyConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionPipelineReadyCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PipelineReadyCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TargetsPresentCondition, actual.TargetsPresentCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionTargetsPresentConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionTargetsPresentCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetsPresentCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.TargetsTypeCondition, actual.TargetsTypeCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionTargetsTypeConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionTargetsTypeCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetsTypeCondition")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionPipelineReadyConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionPipelineReadyCondition or *DeliveryPipelineConditionPipelineReadyCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionPipelineReadyCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionPipelineReadyCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionTargetsPresentConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsPresentCondition or *DeliveryPipelineConditionTargetsPresentCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionTargetsPresentCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsPresentCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.MissingTargets, actual.MissingTargets, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("MissingTargets")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +func compareDeliveryPipelineConditionTargetsTypeConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + desiredNotPointer, ok := d.(DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsTypeCondition or *DeliveryPipelineConditionTargetsTypeCondition", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + actualNotPointer, ok := a.(DeliveryPipelineConditionTargetsTypeCondition) + if !ok { + return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsTypeCondition", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.ErrorDetails, actual.ErrorDetails, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("ErrorDetails")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + +// urlNormalized returns a copy of the resource struct with values normalized +// for URL substitutions. For instance, it converts long-form self-links to +// short-form so they can be substituted in. +func (r *DeliveryPipeline) urlNormalized() *DeliveryPipeline { + normalized := dcl.Copy(*r).(DeliveryPipeline) + normalized.Name = dcl.SelfLinkToName(r.Name) + normalized.Uid = dcl.SelfLinkToName(r.Uid) + normalized.Description = dcl.SelfLinkToName(r.Description) + normalized.Etag = dcl.SelfLinkToName(r.Etag) + normalized.Project = dcl.SelfLinkToName(r.Project) + normalized.Location = dcl.SelfLinkToName(r.Location) + return &normalized +} + +func (r *DeliveryPipeline) updateURL(userBasePath, updateName string) (string, error) { + nr := r.urlNormalized() + if updateName == "UpdateDeliveryPipeline" { + fields := map[string]interface{}{ + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "name": dcl.ValueOrEmptyString(nr.Name), + } + return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", nr.basePath(), userBasePath, fields), nil + + } + + return "", fmt.Errorf("unknown update name: %s", updateName) +} + +// marshal encodes the DeliveryPipeline resource into JSON for a Create request, and +// performs transformations from the resource schema to the API schema if +// necessary. +func (r *DeliveryPipeline) marshal(c *Client) ([]byte, error) { + m, err := expandDeliveryPipeline(c, r) + if err != nil { + return nil, fmt.Errorf("error marshalling DeliveryPipeline: %w", err) + } + + return json.Marshal(m) +} + +// unmarshalDeliveryPipeline decodes JSON responses into the DeliveryPipeline resource schema. +func unmarshalDeliveryPipeline(b []byte, c *Client, res *DeliveryPipeline) (*DeliveryPipeline, error) { + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return unmarshalMapDeliveryPipeline(m, c, res) +} + +func unmarshalMapDeliveryPipeline(m map[string]interface{}, c *Client, res *DeliveryPipeline) (*DeliveryPipeline, error) { + + flattened := flattenDeliveryPipeline(c, m, res) + if flattened == nil { + return nil, fmt.Errorf("attempted to flatten empty json object") + } + return flattened, nil +} + +// expandDeliveryPipeline expands DeliveryPipeline into a JSON request object. +func expandDeliveryPipeline(c *Client, f *DeliveryPipeline) (map[string]interface{}, error) { + m := make(map[string]interface{}) + res := f + _ = res + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Name into name: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["name"] = v + } + if v := f.Description; dcl.ValueShouldBeSent(v) { + m["description"] = v + } + if v := f.Annotations; dcl.ValueShouldBeSent(v) { + m["annotations"] = v + } + if v := f.Labels; dcl.ValueShouldBeSent(v) { + m["labels"] = v + } + if v, err := expandDeliveryPipelineSerialPipeline(c, f.SerialPipeline, res); err != nil { + return nil, fmt.Errorf("error expanding SerialPipeline into serialPipeline: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serialPipeline"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Project into project: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["project"] = v + } + if v, err := dcl.EmptyValue(); err != nil { + return nil, fmt.Errorf("error expanding Location into location: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + if v := f.Suspended; dcl.ValueShouldBeSent(v) { + m["suspended"] = v + } + + return m, nil +} + +// flattenDeliveryPipeline flattens DeliveryPipeline from a JSON request object into the +// DeliveryPipeline type. +func flattenDeliveryPipeline(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipeline { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + if len(m) == 0 { + return nil + } + + resultRes := &DeliveryPipeline{} + resultRes.Name = dcl.FlattenString(m["name"]) + resultRes.Uid = dcl.FlattenString(m["uid"]) + resultRes.Description = dcl.FlattenString(m["description"]) + resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) + resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) + resultRes.CreateTime = dcl.FlattenString(m["createTime"]) + resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) + resultRes.SerialPipeline = flattenDeliveryPipelineSerialPipeline(c, m["serialPipeline"], res) + resultRes.Condition = flattenDeliveryPipelineCondition(c, m["condition"], res) + resultRes.Etag = dcl.FlattenString(m["etag"]) + resultRes.Project = dcl.FlattenString(m["project"]) + resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Suspended = dcl.FlattenBool(m["suspended"]) + + return resultRes +} + +// expandDeliveryPipelineSerialPipelineMap expands the contents of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineMap(c *Client, f map[string]DeliveryPipelineSerialPipeline, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipeline(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineSlice expands the contents of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineSlice(c *Client, f []DeliveryPipelineSerialPipeline, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipeline(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineMap flattens the contents of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipeline { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipeline{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipeline{} + } + + items := make(map[string]DeliveryPipelineSerialPipeline) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipeline(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineSlice flattens the contents of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipeline { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipeline{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipeline{} + } + + items := make([]DeliveryPipelineSerialPipeline, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipeline(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipeline expands an instance of DeliveryPipelineSerialPipeline into a JSON +// request object. +func expandDeliveryPipelineSerialPipeline(c *Client, f *DeliveryPipelineSerialPipeline, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesSlice(c, f.Stages, res); err != nil { + return nil, fmt.Errorf("error expanding Stages into stages: %w", err) + } else if v != nil { + m["stages"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipeline flattens an instance of DeliveryPipelineSerialPipeline from a JSON +// response object. +func flattenDeliveryPipelineSerialPipeline(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipeline { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipeline{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipeline + } + r.Stages = flattenDeliveryPipelineSerialPipelineStagesSlice(c, m["stages"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesMap expands the contents of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStages(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesSlice expands the contents of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesSlice(c *Client, f []DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStages(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesMap flattens the contents of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStages { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStages{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStages{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStages) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStages(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesSlice flattens the contents of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStages { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStages{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStages{} + } + + items := make([]DeliveryPipelineSerialPipelineStages, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStages(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStages expands an instance of DeliveryPipelineSerialPipelineStages into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStages(c *Client, f *DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.TargetId; !dcl.IsEmptyValueIndirect(v) { + m["targetId"] = v + } + if v := f.Profiles; v != nil { + m["profiles"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, f.Strategy, res); err != nil { + return nil, fmt.Errorf("error expanding Strategy into strategy: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["strategy"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, f.DeployParameters, res); err != nil { + return nil, fmt.Errorf("error expanding DeployParameters into deployParameters: %w", err) + } else if v != nil { + m["deployParameters"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStages flattens an instance of DeliveryPipelineSerialPipelineStages from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStages(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStages { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStages{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStages + } + r.TargetId = dcl.FlattenString(m["targetId"]) + r.Profiles = dcl.FlattenStringSlice(m["profiles"]) + r.Strategy = flattenDeliveryPipelineSerialPipelineStagesStrategy(c, m["strategy"], res) + r.DeployParameters = flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c, m["deployParameters"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategy(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategy { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategy{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategy{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategy) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategy(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategy { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategy{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategy{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategy, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategy(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategy expands an instance of DeliveryPipelineSerialPipelineStagesStrategy into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategy(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategy, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, f.Standard, res); err != nil { + return nil, fmt.Errorf("error expanding Standard into standard: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["standard"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, f.Canary, res); err != nil { + return nil, fmt.Errorf("error expanding Canary into canary: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["canary"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategy flattens an instance of DeliveryPipelineSerialPipelineStagesStrategy from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategy(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategy { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategy{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + r.Standard = flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, m["standard"], res) + r.Canary = flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, m["canary"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandardSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyStandard) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandardSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandardSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyStandard { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyStandard, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyStandard expands an instance of DeliveryPipelineSerialPipelineStagesStrategyStandard into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyStandard, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyStandard flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyStandard from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyStandard(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyStandard { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + r.Verify = dcl.FlattenBool(m["verify"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanarySlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanary) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanarySlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanarySlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanary { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanary, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanary expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanary into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanary, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, f.RuntimeConfig, res); err != nil { + return nil, fmt.Errorf("error expanding RuntimeConfig into runtimeConfig: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["runtimeConfig"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, f.CanaryDeployment, res); err != nil { + return nil, fmt.Errorf("error expanding CanaryDeployment into canaryDeployment: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["canaryDeployment"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, f.CustomCanaryDeployment, res); err != nil { + return nil, fmt.Errorf("error expanding CustomCanaryDeployment into customCanaryDeployment: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["customCanaryDeployment"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanary flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanary from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanary(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanary { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + r.RuntimeConfig = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, m["runtimeConfig"], res) + r.CanaryDeployment = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, m["canaryDeployment"], res) + r.CustomCanaryDeployment = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, m["customCanaryDeployment"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, &item, res) + if err != nil { + return nil, err + } - if ds, err := dcl.Diff(desired.Condition, actual.Condition, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareDeliveryPipelineConditionNewStyle, EmptyObject: EmptyDeliveryPipelineCondition, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Condition")); len(ds) != 0 || err != nil { + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, f.Kubernetes, res); err != nil { + return nil, fmt.Errorf("error expanding Kubernetes into kubernetes: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["kubernetes"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, f.CloudRun, res); err != nil { + return nil, fmt.Errorf("error expanding CloudRun into cloudRun: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["cloudRun"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + r.Kubernetes = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, m["kubernetes"], res) + r.CloudRun = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, m["cloudRun"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &item, res) if err != nil { return nil, err } - newDiffs = append(newDiffs, ds...) + if i != nil { + items[k] = i + } } - if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, &item, res) if err != nil { return nil, err } - newDiffs = append(newDiffs, ds...) + + items = append(items, i) } - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, f.GatewayServiceMesh, res); err != nil { + return nil, fmt.Errorf("error expanding GatewayServiceMesh into gatewayServiceMesh: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["gatewayServiceMesh"] = v + } + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, f.ServiceNetworking, res); err != nil { + return nil, fmt.Errorf("error expanding ServiceNetworking into serviceNetworking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["serviceNetworking"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + r.GatewayServiceMesh = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, m["gatewayServiceMesh"], res) + r.ServiceNetworking = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, m["serviceNetworking"], res) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &item, res) if err != nil { return nil, err } - newDiffs = append(newDiffs, ds...) + if i != nil { + items[k] = i + } } - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, &item, res) if err != nil { return nil, err } - newDiffs = append(newDiffs, ds...) + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.HttpRoute; !dcl.IsEmptyValueIndirect(v) { + m["httpRoute"] = v + } + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v + } + if v := f.Deployment; !dcl.IsEmptyValueIndirect(v) { + m["deployment"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + r.HttpRoute = dcl.FlattenString(m["httpRoute"]) + r.Service = dcl.FlattenString(m["service"]) + r.Deployment = dcl.FlattenString(m["deployment"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil } - if ds, err := dcl.Diff(desired.Suspended, actual.Suspended, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Suspended")); len(ds) != 0 || err != nil { + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, &item, res) if err != nil { return nil, err } - newDiffs = append(newDiffs, ds...) - } - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) + items = append(items, i) } - return newDiffs, nil + + return items, nil } -func compareDeliveryPipelineSerialPipelineNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - desired, ok := d.(*DeliveryPipelineSerialPipeline) +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + a, ok := i.(map[string]interface{}) if !ok { - desiredNotPointer, ok := d.(DeliveryPipelineSerialPipeline) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipeline or *DeliveryPipelineSerialPipeline", d) - } - desired = &desiredNotPointer + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} } - actual, ok := a.(*DeliveryPipelineSerialPipeline) + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + a, ok := i.([]interface{}) if !ok { - actualNotPointer, ok := a.(DeliveryPipelineSerialPipeline) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipeline", a) - } - actual = &actualNotPointer + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} } - if ds, err := dcl.Diff(desired.Stages, actual.Stages, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineSerialPipelineStagesNewStyle, EmptyObject: EmptyDeliveryPipelineSerialPipelineStages, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Stages")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} } - return diffs, nil + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c, item.(map[string]interface{}), res)) + } + + return items } -func compareDeliveryPipelineSerialPipelineStagesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } - desired, ok := d.(*DeliveryPipelineSerialPipelineStages) - if !ok { - desiredNotPointer, ok := d.(DeliveryPipelineSerialPipelineStages) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStages or *DeliveryPipelineSerialPipelineStages", d) - } - desired = &desiredNotPointer + m := make(map[string]interface{}) + if v := f.Service; !dcl.IsEmptyValueIndirect(v) { + m["service"] = v } - actual, ok := a.(*DeliveryPipelineSerialPipelineStages) + if v := f.Deployment; !dcl.IsEmptyValueIndirect(v) { + m["deployment"] = v + } + if v := f.DisablePodOverprovisioning; !dcl.IsEmptyValueIndirect(v) { + m["disablePodOverprovisioning"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + m, ok := i.(map[string]interface{}) if !ok { - actualNotPointer, ok := a.(DeliveryPipelineSerialPipelineStages) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineSerialPipelineStages", a) - } - actual = &actualNotPointer + return nil } - if ds, err := dcl.Diff(desired.TargetId, actual.TargetId, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetId")); len(ds) != 0 || err != nil { + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + r.Service = dcl.FlattenString(m["service"]) + r.Deployment = dcl.FlattenString(m["deployment"]) + r.DisablePodOverprovisioning = dcl.FlattenBool(m["disablePodOverprovisioning"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &item, res) if err != nil { return nil, err } - diffs = append(diffs, ds...) + if i != nil { + items[k] = i + } } - if ds, err := dcl.Diff(desired.Profiles, actual.Profiles, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Profiles")); len(ds) != 0 || err != nil { + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, &item, res) if err != nil { return nil, err } - diffs = append(diffs, ds...) + + items = append(items, i) } - return diffs, nil + + return items, nil } -func compareDeliveryPipelineConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } - desired, ok := d.(*DeliveryPipelineCondition) + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + a, ok := i.([]interface{}) if !ok { - desiredNotPointer, ok := d.(DeliveryPipelineCondition) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineCondition or *DeliveryPipelineCondition", d) - } - desired = &desiredNotPointer + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} } - actual, ok := a.(*DeliveryPipelineCondition) + + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.AutomaticTrafficControl; !dcl.IsEmptyValueIndirect(v) { + m["automaticTrafficControl"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + m, ok := i.(map[string]interface{}) if !ok { - actualNotPointer, ok := a.(DeliveryPipelineCondition) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineCondition", a) - } - actual = &actualNotPointer + return nil } - if ds, err := dcl.Diff(desired.PipelineReadyCondition, actual.PipelineReadyCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionPipelineReadyConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionPipelineReadyCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("PipelineReadyCondition")); len(ds) != 0 || err != nil { + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + r.AutomaticTrafficControl = dcl.FlattenBool(m["automaticTrafficControl"]) + + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &item, res) if err != nil { return nil, err } - diffs = append(diffs, ds...) + if i != nil { + items[k] = i + } } - if ds, err := dcl.Diff(desired.TargetsPresentCondition, actual.TargetsPresentCondition, dcl.DiffInfo{ObjectFunction: compareDeliveryPipelineConditionTargetsPresentConditionNewStyle, EmptyObject: EmptyDeliveryPipelineConditionTargetsPresentCondition, OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("TargetsPresentCondition")); len(ds) != 0 || err != nil { + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, &item, res) if err != nil { return nil, err } - diffs = append(diffs, ds...) + + items = append(items, i) } - return diffs, nil -} -func compareDeliveryPipelineConditionPipelineReadyConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff + return items, nil +} - desired, ok := d.(*DeliveryPipelineConditionPipelineReadyCondition) +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + a, ok := i.(map[string]interface{}) if !ok { - desiredNotPointer, ok := d.(DeliveryPipelineConditionPipelineReadyCondition) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionPipelineReadyCondition or *DeliveryPipelineConditionPipelineReadyCondition", d) - } - desired = &desiredNotPointer + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} } - actual, ok := a.(*DeliveryPipelineConditionPipelineReadyCondition) + + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + a, ok := i.([]interface{}) if !ok { - actualNotPointer, ok := a.(DeliveryPipelineConditionPipelineReadyCondition) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionPipelineReadyCondition", a) - } - actual = &actualNotPointer + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} } - if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} } - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Percentages; v != nil { + m["percentages"] = v + } + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v } - return diffs, nil -} -func compareDeliveryPipelineConditionTargetsPresentConditionNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff + return m, nil +} - desired, ok := d.(*DeliveryPipelineConditionTargetsPresentCondition) +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + m, ok := i.(map[string]interface{}) if !ok { - desiredNotPointer, ok := d.(DeliveryPipelineConditionTargetsPresentCondition) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsPresentCondition or *DeliveryPipelineConditionTargetsPresentCondition", d) - } - desired = &desiredNotPointer + return nil } - actual, ok := a.(*DeliveryPipelineConditionTargetsPresentCondition) - if !ok { - actualNotPointer, ok := a.(DeliveryPipelineConditionTargetsPresentCondition) - if !ok { - return nil, fmt.Errorf("obj %v is not a DeliveryPipelineConditionTargetsPresentCondition", a) - } - actual = &actualNotPointer + + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment } + r.Percentages = dcl.FlattenIntSlice(m["percentages"]) + r.Verify = dcl.FlattenBool(m["verify"]) - if ds, err := dcl.Diff(desired.Status, actual.Status, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("Status")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) + return r +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil } - if ds, err := dcl.Diff(desired.MissingTargets, actual.MissingTargets, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("MissingTargets")); len(ds) != 0 || err != nil { + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &item, res) if err != nil { return nil, err } - diffs = append(diffs, ds...) + if i != nil { + items[k] = i + } } - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDeliveryPipelineUpdateDeliveryPipelineOperation")}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { + return items, nil +} + +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, &item, res) if err != nil { return nil, err } - diffs = append(diffs, ds...) + + items = append(items, i) } - return diffs, nil -} -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *DeliveryPipeline) urlNormalized() *DeliveryPipeline { - normalized := dcl.Copy(*r).(DeliveryPipeline) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Uid = dcl.SelfLinkToName(r.Uid) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.Etag = dcl.SelfLinkToName(r.Etag) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Location = dcl.SelfLinkToName(r.Location) - return &normalized + return items, nil } -func (r *DeliveryPipeline) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateDeliveryPipeline" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}", nr.basePath(), userBasePath, fields), nil - +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} } - return "", fmt.Errorf("unknown update name: %s", updateName) -} + if len(a) == 0 { + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } -// marshal encodes the DeliveryPipeline resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *DeliveryPipeline) marshal(c *Client) ([]byte, error) { - m, err := expandDeliveryPipeline(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling DeliveryPipeline: %w", err) + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) + for k, item := range a { + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, item.(map[string]interface{}), res) } - return json.Marshal(m) + return items } -// unmarshalDeliveryPipeline decodes JSON responses into the DeliveryPipeline resource schema. -func unmarshalDeliveryPipeline(b []byte, c *Client, res *DeliveryPipeline) (*DeliveryPipeline, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} } - return unmarshalMapDeliveryPipeline(m, c, res) -} -func unmarshalMapDeliveryPipeline(m map[string]interface{}, c *Client, res *DeliveryPipeline) (*DeliveryPipeline, error) { + if len(a) == 0 { + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } - flattened := flattenDeliveryPipeline(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c, item.(map[string]interface{}), res)) } - return flattened, nil + + return items } -// expandDeliveryPipeline expands DeliveryPipeline into a JSON request object. -func expandDeliveryPipeline(c *Client, f *DeliveryPipeline) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.Annotations; dcl.ValueShouldBeSent(v) { - m["annotations"] = v - } - if v := f.Labels; dcl.ValueShouldBeSent(v) { - m["labels"] = v - } - if v, err := expandDeliveryPipelineSerialPipeline(c, f.SerialPipeline, res); err != nil { - return nil, fmt.Errorf("error expanding SerialPipeline into serialPipeline: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["serialPipeline"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment into a JSON +// request object. +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil } - if v := f.Suspended; dcl.ValueShouldBeSent(v) { - m["suspended"] = v + + m := make(map[string]interface{}) + if v, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, f.PhaseConfigs, res); err != nil { + return nil, fmt.Errorf("error expanding PhaseConfigs into phaseConfigs: %w", err) + } else if v != nil { + m["phaseConfigs"] = v } return m, nil } -// flattenDeliveryPipeline flattens DeliveryPipeline from a JSON request object into the -// DeliveryPipeline type. -func flattenDeliveryPipeline(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipeline { +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment from a JSON +// response object. +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { m, ok := i.(map[string]interface{}) if !ok { return nil } - if len(m) == 0 { - return nil - } - resultRes := &DeliveryPipeline{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.Uid = dcl.FlattenString(m["uid"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.Annotations = dcl.FlattenKeyValuePairs(m["annotations"]) - resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.SerialPipeline = flattenDeliveryPipelineSerialPipeline(c, m["serialPipeline"], res) - resultRes.Condition = flattenDeliveryPipelineCondition(c, m["condition"], res) - resultRes.Etag = dcl.FlattenString(m["etag"]) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.Location = dcl.FlattenString(m["location"]) - resultRes.Suspended = dcl.FlattenBool(m["suspended"]) + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} - return resultRes + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + r.PhaseConfigs = flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c, m["phaseConfigs"], res) + + return r } -// expandDeliveryPipelineSerialPipelineMap expands the contents of DeliveryPipelineSerialPipeline into a JSON +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON // request object. -func expandDeliveryPipelineSerialPipelineMap(c *Client, f map[string]DeliveryPipelineSerialPipeline, res *DeliveryPipeline) (map[string]interface{}, error) { +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) (map[string]interface{}, error) { if f == nil { return nil, nil } items := make(map[string]interface{}) for k, item := range f { - i, err := expandDeliveryPipelineSerialPipeline(c, &item, res) + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &item, res) if err != nil { return nil, err } @@ -1701,16 +5434,16 @@ func expandDeliveryPipelineSerialPipelineMap(c *Client, f map[string]DeliveryPip return items, nil } -// expandDeliveryPipelineSerialPipelineSlice expands the contents of DeliveryPipelineSerialPipeline into a JSON +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice expands the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON // request object. -func expandDeliveryPipelineSerialPipelineSlice(c *Client, f []DeliveryPipelineSerialPipeline, res *DeliveryPipeline) ([]map[string]interface{}, error) { +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) ([]map[string]interface{}, error) { if f == nil { return nil, nil } items := []map[string]interface{}{} for _, item := range f { - i, err := expandDeliveryPipelineSerialPipeline(c, &item, res) + i, err := expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, &item, res) if err != nil { return nil, err } @@ -1721,91 +5454,101 @@ func expandDeliveryPipelineSerialPipelineSlice(c *Client, f []DeliveryPipelineSe return items, nil } -// flattenDeliveryPipelineSerialPipelineMap flattens the contents of DeliveryPipelineSerialPipeline from a JSON +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON // response object. -func flattenDeliveryPipelineSerialPipelineMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipeline { +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { a, ok := i.(map[string]interface{}) if !ok { - return map[string]DeliveryPipelineSerialPipeline{} + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} } if len(a) == 0 { - return map[string]DeliveryPipelineSerialPipeline{} + return map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} } - items := make(map[string]DeliveryPipelineSerialPipeline) + items := make(map[string]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) for k, item := range a { - items[k] = *flattenDeliveryPipelineSerialPipeline(c, item.(map[string]interface{}), res) + items[k] = *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, item.(map[string]interface{}), res) } return items } -// flattenDeliveryPipelineSerialPipelineSlice flattens the contents of DeliveryPipelineSerialPipeline from a JSON +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice flattens the contents of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON // response object. -func flattenDeliveryPipelineSerialPipelineSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipeline { +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { a, ok := i.([]interface{}) if !ok { - return []DeliveryPipelineSerialPipeline{} + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} } if len(a) == 0 { - return []DeliveryPipelineSerialPipeline{} + return []DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} } - items := make([]DeliveryPipelineSerialPipeline, 0, len(a)) + items := make([]DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(a)) for _, item := range a { - items = append(items, *flattenDeliveryPipelineSerialPipeline(c, item.(map[string]interface{}), res)) + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c, item.(map[string]interface{}), res)) } return items } -// expandDeliveryPipelineSerialPipeline expands an instance of DeliveryPipelineSerialPipeline into a JSON +// expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs expands an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs into a JSON // request object. -func expandDeliveryPipelineSerialPipeline(c *Client, f *DeliveryPipelineSerialPipeline, res *DeliveryPipeline) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { +func expandDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, f *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { return nil, nil } m := make(map[string]interface{}) - if v, err := expandDeliveryPipelineSerialPipelineStagesSlice(c, f.Stages, res); err != nil { - return nil, fmt.Errorf("error expanding Stages into stages: %w", err) - } else if v != nil { - m["stages"] = v + if v := f.PhaseId; !dcl.IsEmptyValueIndirect(v) { + m["phaseId"] = v + } + if v := f.Percentage; !dcl.IsEmptyValueIndirect(v) { + m["percentage"] = v + } + if v := f.Profiles; v != nil { + m["profiles"] = v + } + if v := f.Verify; !dcl.IsEmptyValueIndirect(v) { + m["verify"] = v } return m, nil } -// flattenDeliveryPipelineSerialPipeline flattens an instance of DeliveryPipelineSerialPipeline from a JSON +// flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs flattens an instance of DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs from a JSON // response object. -func flattenDeliveryPipelineSerialPipeline(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipeline { +func flattenDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { m, ok := i.(map[string]interface{}) if !ok { return nil } - r := &DeliveryPipelineSerialPipeline{} + r := &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{} if dcl.IsEmptyValueIndirect(i) { - return EmptyDeliveryPipelineSerialPipeline + return EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs } - r.Stages = flattenDeliveryPipelineSerialPipelineStagesSlice(c, m["stages"], res) + r.PhaseId = dcl.FlattenString(m["phaseId"]) + r.Percentage = dcl.FlattenInteger(m["percentage"]) + r.Profiles = dcl.FlattenStringSlice(m["profiles"]) + r.Verify = dcl.FlattenBool(m["verify"]) return r } -// expandDeliveryPipelineSerialPipelineStagesMap expands the contents of DeliveryPipelineSerialPipelineStages into a JSON +// expandDeliveryPipelineSerialPipelineStagesDeployParametersMap expands the contents of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON // request object. -func expandDeliveryPipelineSerialPipelineStagesMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) (map[string]interface{}, error) { +func expandDeliveryPipelineSerialPipelineStagesDeployParametersMap(c *Client, f map[string]DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) (map[string]interface{}, error) { if f == nil { return nil, nil } items := make(map[string]interface{}) for k, item := range f { - i, err := expandDeliveryPipelineSerialPipelineStages(c, &item, res) + i, err := expandDeliveryPipelineSerialPipelineStagesDeployParameters(c, &item, res) if err != nil { return nil, err } @@ -1817,16 +5560,16 @@ func expandDeliveryPipelineSerialPipelineStagesMap(c *Client, f map[string]Deliv return items, nil } -// expandDeliveryPipelineSerialPipelineStagesSlice expands the contents of DeliveryPipelineSerialPipelineStages into a JSON +// expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice expands the contents of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON // request object. -func expandDeliveryPipelineSerialPipelineStagesSlice(c *Client, f []DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) ([]map[string]interface{}, error) { +func expandDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, f []DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) ([]map[string]interface{}, error) { if f == nil { return nil, nil } items := []map[string]interface{}{} for _, item := range f { - i, err := expandDeliveryPipelineSerialPipelineStages(c, &item, res) + i, err := expandDeliveryPipelineSerialPipelineStagesDeployParameters(c, &item, res) if err != nil { return nil, err } @@ -1837,79 +5580,79 @@ func expandDeliveryPipelineSerialPipelineStagesSlice(c *Client, f []DeliveryPipe return items, nil } -// flattenDeliveryPipelineSerialPipelineStagesMap flattens the contents of DeliveryPipelineSerialPipelineStages from a JSON +// flattenDeliveryPipelineSerialPipelineStagesDeployParametersMap flattens the contents of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON // response object. -func flattenDeliveryPipelineSerialPipelineStagesMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStages { +func flattenDeliveryPipelineSerialPipelineStagesDeployParametersMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineSerialPipelineStagesDeployParameters { a, ok := i.(map[string]interface{}) if !ok { - return map[string]DeliveryPipelineSerialPipelineStages{} + return map[string]DeliveryPipelineSerialPipelineStagesDeployParameters{} } if len(a) == 0 { - return map[string]DeliveryPipelineSerialPipelineStages{} + return map[string]DeliveryPipelineSerialPipelineStagesDeployParameters{} } - items := make(map[string]DeliveryPipelineSerialPipelineStages) + items := make(map[string]DeliveryPipelineSerialPipelineStagesDeployParameters) for k, item := range a { - items[k] = *flattenDeliveryPipelineSerialPipelineStages(c, item.(map[string]interface{}), res) + items[k] = *flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c, item.(map[string]interface{}), res) } return items } -// flattenDeliveryPipelineSerialPipelineStagesSlice flattens the contents of DeliveryPipelineSerialPipelineStages from a JSON +// flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice flattens the contents of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON // response object. -func flattenDeliveryPipelineSerialPipelineStagesSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStages { +func flattenDeliveryPipelineSerialPipelineStagesDeployParametersSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineSerialPipelineStagesDeployParameters { a, ok := i.([]interface{}) if !ok { - return []DeliveryPipelineSerialPipelineStages{} + return []DeliveryPipelineSerialPipelineStagesDeployParameters{} } if len(a) == 0 { - return []DeliveryPipelineSerialPipelineStages{} + return []DeliveryPipelineSerialPipelineStagesDeployParameters{} } - items := make([]DeliveryPipelineSerialPipelineStages, 0, len(a)) + items := make([]DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(a)) for _, item := range a { - items = append(items, *flattenDeliveryPipelineSerialPipelineStages(c, item.(map[string]interface{}), res)) + items = append(items, *flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c, item.(map[string]interface{}), res)) } return items } -// expandDeliveryPipelineSerialPipelineStages expands an instance of DeliveryPipelineSerialPipelineStages into a JSON +// expandDeliveryPipelineSerialPipelineStagesDeployParameters expands an instance of DeliveryPipelineSerialPipelineStagesDeployParameters into a JSON // request object. -func expandDeliveryPipelineSerialPipelineStages(c *Client, f *DeliveryPipelineSerialPipelineStages, res *DeliveryPipeline) (map[string]interface{}, error) { +func expandDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, f *DeliveryPipelineSerialPipelineStagesDeployParameters, res *DeliveryPipeline) (map[string]interface{}, error) { if f == nil { return nil, nil } m := make(map[string]interface{}) - if v := f.TargetId; !dcl.IsEmptyValueIndirect(v) { - m["targetId"] = v + if v := f.Values; !dcl.IsEmptyValueIndirect(v) { + m["values"] = v } - if v := f.Profiles; v != nil { - m["profiles"] = v + if v := f.MatchTargetLabels; !dcl.IsEmptyValueIndirect(v) { + m["matchTargetLabels"] = v } return m, nil } -// flattenDeliveryPipelineSerialPipelineStages flattens an instance of DeliveryPipelineSerialPipelineStages from a JSON +// flattenDeliveryPipelineSerialPipelineStagesDeployParameters flattens an instance of DeliveryPipelineSerialPipelineStagesDeployParameters from a JSON // response object. -func flattenDeliveryPipelineSerialPipelineStages(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStages { +func flattenDeliveryPipelineSerialPipelineStagesDeployParameters(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineSerialPipelineStagesDeployParameters { m, ok := i.(map[string]interface{}) if !ok { return nil } - r := &DeliveryPipelineSerialPipelineStages{} + r := &DeliveryPipelineSerialPipelineStagesDeployParameters{} if dcl.IsEmptyValueIndirect(i) { - return EmptyDeliveryPipelineSerialPipelineStages - } - r.TargetId = dcl.FlattenString(m["targetId"]) - r.Profiles = dcl.FlattenStringSlice(m["profiles"]) + return EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } + r.Values = dcl.FlattenKeyValuePairs(m["values"]) + r.MatchTargetLabels = dcl.FlattenKeyValuePairs(m["matchTargetLabels"]) return r } @@ -2013,6 +5756,11 @@ func expandDeliveryPipelineCondition(c *Client, f *DeliveryPipelineCondition, re } else if !dcl.IsEmptyValueIndirect(v) { m["targetsPresentCondition"] = v } + if v, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, f.TargetsTypeCondition, res); err != nil { + return nil, fmt.Errorf("error expanding TargetsTypeCondition into targetsTypeCondition: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["targetsTypeCondition"] = v + } return m, nil } @@ -2032,6 +5780,7 @@ func flattenDeliveryPipelineCondition(c *Client, i interface{}, res *DeliveryPip } r.PipelineReadyCondition = flattenDeliveryPipelineConditionPipelineReadyCondition(c, m["pipelineReadyCondition"], res) r.TargetsPresentCondition = flattenDeliveryPipelineConditionTargetsPresentCondition(c, m["targetsPresentCondition"], res) + r.TargetsTypeCondition = flattenDeliveryPipelineConditionTargetsTypeCondition(c, m["targetsTypeCondition"], res) return r } @@ -2276,6 +6025,124 @@ func flattenDeliveryPipelineConditionTargetsPresentCondition(c *Client, i interf return r } +// expandDeliveryPipelineConditionTargetsTypeConditionMap expands the contents of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeConditionMap(c *Client, f map[string]DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandDeliveryPipelineConditionTargetsTypeConditionSlice expands the contents of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, f []DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandDeliveryPipelineConditionTargetsTypeCondition(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenDeliveryPipelineConditionTargetsTypeConditionMap flattens the contents of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeConditionMap(c *Client, i interface{}, res *DeliveryPipeline) map[string]DeliveryPipelineConditionTargetsTypeCondition { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]DeliveryPipelineConditionTargetsTypeCondition{} + } + + if len(a) == 0 { + return map[string]DeliveryPipelineConditionTargetsTypeCondition{} + } + + items := make(map[string]DeliveryPipelineConditionTargetsTypeCondition) + for k, item := range a { + items[k] = *flattenDeliveryPipelineConditionTargetsTypeCondition(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenDeliveryPipelineConditionTargetsTypeConditionSlice flattens the contents of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeConditionSlice(c *Client, i interface{}, res *DeliveryPipeline) []DeliveryPipelineConditionTargetsTypeCondition { + a, ok := i.([]interface{}) + if !ok { + return []DeliveryPipelineConditionTargetsTypeCondition{} + } + + if len(a) == 0 { + return []DeliveryPipelineConditionTargetsTypeCondition{} + } + + items := make([]DeliveryPipelineConditionTargetsTypeCondition, 0, len(a)) + for _, item := range a { + items = append(items, *flattenDeliveryPipelineConditionTargetsTypeCondition(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandDeliveryPipelineConditionTargetsTypeCondition expands an instance of DeliveryPipelineConditionTargetsTypeCondition into a JSON +// request object. +func expandDeliveryPipelineConditionTargetsTypeCondition(c *Client, f *DeliveryPipelineConditionTargetsTypeCondition, res *DeliveryPipeline) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Status; !dcl.IsEmptyValueIndirect(v) { + m["status"] = v + } + if v := f.ErrorDetails; !dcl.IsEmptyValueIndirect(v) { + m["errorDetails"] = v + } + + return m, nil +} + +// flattenDeliveryPipelineConditionTargetsTypeCondition flattens an instance of DeliveryPipelineConditionTargetsTypeCondition from a JSON +// response object. +func flattenDeliveryPipelineConditionTargetsTypeCondition(c *Client, i interface{}, res *DeliveryPipeline) *DeliveryPipelineConditionTargetsTypeCondition { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &DeliveryPipelineConditionTargetsTypeCondition{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyDeliveryPipelineConditionTargetsTypeCondition + } + r.Status = dcl.FlattenBool(m["status"]) + r.ErrorDetails = dcl.FlattenString(m["errorDetails"]) + + return r +} + // This function returns a matcher that checks whether a serialized resource matches this resource // in its parameters (as defined by the fields in a Get, which definitionally define resource // identity). This is useful in extracting the element from a List call. @@ -2398,6 +6265,152 @@ func extractDeliveryPipelineSerialPipelineFields(r *DeliveryPipeline, o *Deliver return nil } func extractDeliveryPipelineSerialPipelineStagesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStages) error { + vStrategy := o.Strategy + if vStrategy == nil { + // note: explicitly not the empty object. + vStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyFields(r, vStrategy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStrategy) { + o.Strategy = vStrategy + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategy) error { + vStandard := o.Standard + if vStandard == nil { + // note: explicitly not the empty object. + vStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r, vStandard); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStandard) { + o.Standard = vStandard + } + vCanary := o.Canary + if vCanary == nil { + // note: explicitly not the empty object. + vCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r, vCanary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanary) { + o.Canary = vCanary + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandard) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanary) error { + vRuntimeConfig := o.RuntimeConfig + if vRuntimeConfig == nil { + // note: explicitly not the empty object. + vRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r, vRuntimeConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRuntimeConfig) { + o.RuntimeConfig = vRuntimeConfig + } + vCanaryDeployment := o.CanaryDeployment + if vCanaryDeployment == nil { + // note: explicitly not the empty object. + vCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r, vCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanaryDeployment) { + o.CanaryDeployment = vCanaryDeployment + } + vCustomCanaryDeployment := o.CustomCanaryDeployment + if vCustomCanaryDeployment == nil { + // note: explicitly not the empty object. + vCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r, vCustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomCanaryDeployment) { + o.CustomCanaryDeployment = vCustomCanaryDeployment + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) error { + vKubernetes := o.Kubernetes + if vKubernetes == nil { + // note: explicitly not the empty object. + vKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r, vKubernetes); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetes) { + o.Kubernetes = vKubernetes + } + vCloudRun := o.CloudRun + if vCloudRun == nil { + // note: explicitly not the empty object. + vCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r, vCloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCloudRun) { + o.CloudRun = vCloudRun + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) error { + vGatewayServiceMesh := o.GatewayServiceMesh + if vGatewayServiceMesh == nil { + // note: explicitly not the empty object. + vGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r, vGatewayServiceMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGatewayServiceMesh) { + o.GatewayServiceMesh = vGatewayServiceMesh + } + vServiceNetworking := o.ServiceNetworking + if vServiceNetworking == nil { + // note: explicitly not the empty object. + vServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r, vServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceNetworking) { + o.ServiceNetworking = vServiceNetworking + } + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) error { + return nil +} +func extractDeliveryPipelineSerialPipelineStagesDeployParametersFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesDeployParameters) error { return nil } func extractDeliveryPipelineConditionFields(r *DeliveryPipeline, o *DeliveryPipelineCondition) error { @@ -2423,6 +6436,17 @@ func extractDeliveryPipelineConditionFields(r *DeliveryPipeline, o *DeliveryPipe if !dcl.IsEmptyValueIndirect(vTargetsPresentCondition) { o.TargetsPresentCondition = vTargetsPresentCondition } + vTargetsTypeCondition := o.TargetsTypeCondition + if vTargetsTypeCondition == nil { + // note: explicitly not the empty object. + vTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{} + } + if err := extractDeliveryPipelineConditionTargetsTypeConditionFields(r, vTargetsTypeCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsTypeCondition) { + o.TargetsTypeCondition = vTargetsTypeCondition + } return nil } func extractDeliveryPipelineConditionPipelineReadyConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionPipelineReadyCondition) error { @@ -2431,6 +6455,9 @@ func extractDeliveryPipelineConditionPipelineReadyConditionFields(r *DeliveryPip func extractDeliveryPipelineConditionTargetsPresentConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsPresentCondition) error { return nil } +func extractDeliveryPipelineConditionTargetsTypeConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsTypeCondition) error { + return nil +} func postReadExtractDeliveryPipelineFields(r *DeliveryPipeline) error { vSerialPipeline := r.SerialPipeline @@ -2461,6 +6488,152 @@ func postReadExtractDeliveryPipelineSerialPipelineFields(r *DeliveryPipeline, o return nil } func postReadExtractDeliveryPipelineSerialPipelineStagesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStages) error { + vStrategy := o.Strategy + if vStrategy == nil { + // note: explicitly not the empty object. + vStrategy = &DeliveryPipelineSerialPipelineStagesStrategy{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyFields(r, vStrategy); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStrategy) { + o.Strategy = vStrategy + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategy) error { + vStandard := o.Standard + if vStandard == nil { + // note: explicitly not the empty object. + vStandard = &DeliveryPipelineSerialPipelineStagesStrategyStandard{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r, vStandard); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vStandard) { + o.Standard = vStandard + } + vCanary := o.Canary + if vCanary == nil { + // note: explicitly not the empty object. + vCanary = &DeliveryPipelineSerialPipelineStagesStrategyCanary{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r, vCanary); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanary) { + o.Canary = vCanary + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyStandardFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyStandard) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanary) error { + vRuntimeConfig := o.RuntimeConfig + if vRuntimeConfig == nil { + // note: explicitly not the empty object. + vRuntimeConfig = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r, vRuntimeConfig); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRuntimeConfig) { + o.RuntimeConfig = vRuntimeConfig + } + vCanaryDeployment := o.CanaryDeployment + if vCanaryDeployment == nil { + // note: explicitly not the empty object. + vCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r, vCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCanaryDeployment) { + o.CanaryDeployment = vCanaryDeployment + } + vCustomCanaryDeployment := o.CustomCanaryDeployment + if vCustomCanaryDeployment == nil { + // note: explicitly not the empty object. + vCustomCanaryDeployment = &DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r, vCustomCanaryDeployment); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCustomCanaryDeployment) { + o.CustomCanaryDeployment = vCustomCanaryDeployment + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) error { + vKubernetes := o.Kubernetes + if vKubernetes == nil { + // note: explicitly not the empty object. + vKubernetes = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r, vKubernetes); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vKubernetes) { + o.Kubernetes = vKubernetes + } + vCloudRun := o.CloudRun + if vCloudRun == nil { + // note: explicitly not the empty object. + vCloudRun = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r, vCloudRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vCloudRun) { + o.CloudRun = vCloudRun + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) error { + vGatewayServiceMesh := o.GatewayServiceMesh + if vGatewayServiceMesh == nil { + // note: explicitly not the empty object. + vGatewayServiceMesh = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r, vGatewayServiceMesh); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vGatewayServiceMesh) { + o.GatewayServiceMesh = vGatewayServiceMesh + } + vServiceNetworking := o.ServiceNetworking + if vServiceNetworking == nil { + // note: explicitly not the empty object. + vServiceNetworking = &DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{} + } + if err := extractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r, vServiceNetworking); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vServiceNetworking) { + o.ServiceNetworking = vServiceNetworking + } + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) error { + return nil +} +func postReadExtractDeliveryPipelineSerialPipelineStagesDeployParametersFields(r *DeliveryPipeline, o *DeliveryPipelineSerialPipelineStagesDeployParameters) error { return nil } func postReadExtractDeliveryPipelineConditionFields(r *DeliveryPipeline, o *DeliveryPipelineCondition) error { @@ -2486,6 +6659,17 @@ func postReadExtractDeliveryPipelineConditionFields(r *DeliveryPipeline, o *Deli if !dcl.IsEmptyValueIndirect(vTargetsPresentCondition) { o.TargetsPresentCondition = vTargetsPresentCondition } + vTargetsTypeCondition := o.TargetsTypeCondition + if vTargetsTypeCondition == nil { + // note: explicitly not the empty object. + vTargetsTypeCondition = &DeliveryPipelineConditionTargetsTypeCondition{} + } + if err := extractDeliveryPipelineConditionTargetsTypeConditionFields(r, vTargetsTypeCondition); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vTargetsTypeCondition) { + o.TargetsTypeCondition = vTargetsTypeCondition + } return nil } func postReadExtractDeliveryPipelineConditionPipelineReadyConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionPipelineReadyCondition) error { @@ -2494,3 +6678,6 @@ func postReadExtractDeliveryPipelineConditionPipelineReadyConditionFields(r *Del func postReadExtractDeliveryPipelineConditionTargetsPresentConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsPresentCondition) error { return nil } +func postReadExtractDeliveryPipelineConditionTargetsTypeConditionFields(r *DeliveryPipeline, o *DeliveryPipelineConditionTargetsTypeCondition) error { + return nil +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_schema.go index 4820b9b3b8..394f5675e4 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_schema.go @@ -183,6 +183,24 @@ func DCLDeliveryPipelineSchema() *dcl.Schema { }, }, }, + "targetsTypeCondition": &dcl.Property{ + Type: "object", + GoName: "TargetsTypeCondition", + GoType: "DeliveryPipelineConditionTargetsTypeCondition", + Description: "Details on the whether the targets enumerated in the pipeline are of the same type.", + Properties: map[string]*dcl.Property{ + "errorDetails": &dcl.Property{ + Type: "string", + GoName: "ErrorDetails", + Description: "Human readable error message.", + }, + "status": &dcl.Property{ + Type: "boolean", + GoName: "Status", + Description: "True if the targets are all a comparable type. For example this is true if all targets are GKE clusters. This is false if some targets are Cloud Run targets and others are GKE clusters.", + }, + }, + }, }, }, "createTime": &dcl.Property{ @@ -254,6 +272,38 @@ func DCLDeliveryPipelineSchema() *dcl.Schema { Type: "object", GoType: "DeliveryPipelineSerialPipelineStages", Properties: map[string]*dcl.Property{ + "deployParameters": &dcl.Property{ + Type: "array", + GoName: "DeployParameters", + Description: "Optional. The deploy parameters to use for the target in this stage.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "DeliveryPipelineSerialPipelineStagesDeployParameters", + Required: []string{ + "values", + }, + Properties: map[string]*dcl.Property{ + "matchTargetLabels": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "MatchTargetLabels", + Description: "Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target).", + }, + "values": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "Values", + Description: "Required. Values are deploy parameters in key-value pairs.", + }, + }, + }, + }, "profiles": &dcl.Property{ Type: "array", GoName: "Profiles", @@ -265,6 +315,220 @@ func DCLDeliveryPipelineSchema() *dcl.Schema { GoType: "string", }, }, + "strategy": &dcl.Property{ + Type: "object", + GoName: "Strategy", + GoType: "DeliveryPipelineSerialPipelineStagesStrategy", + Description: "Optional. The strategy to use for a `Rollout` to this stage.", + Properties: map[string]*dcl.Property{ + "canary": &dcl.Property{ + Type: "object", + GoName: "Canary", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanary", + Description: "Canary deployment strategy provides progressive percentage based deployments to a Target.", + Properties: map[string]*dcl.Property{ + "canaryDeployment": &dcl.Property{ + Type: "object", + GoName: "CanaryDeployment", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment", + Description: "Configures the progressive based deployment for a Target.", + Conflicts: []string{ + "customCanaryDeployment", + }, + Required: []string{ + "percentages", + }, + Properties: map[string]*dcl.Property{ + "percentages": &dcl.Property{ + Type: "array", + GoName: "Percentages", + Description: "Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "integer", + Format: "int64", + GoType: "int64", + }, + }, + "verify": &dcl.Property{ + Type: "boolean", + GoName: "Verify", + Description: "Whether to run verify tests after each percentage deployment.", + }, + }, + }, + "customCanaryDeployment": &dcl.Property{ + Type: "object", + GoName: "CustomCanaryDeployment", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment", + Description: "Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments.", + Conflicts: []string{ + "canaryDeployment", + }, + Required: []string{ + "phaseConfigs", + }, + Properties: map[string]*dcl.Property{ + "phaseConfigs": &dcl.Property{ + Type: "array", + GoName: "PhaseConfigs", + Description: "Required. Configuration for each phase in the canary deployment in the order executed.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "object", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs", + Required: []string{ + "phaseId", + "percentage", + }, + Properties: map[string]*dcl.Property{ + "percentage": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Percentage", + Description: "Required. Percentage deployment for the phase.", + }, + "phaseId": &dcl.Property{ + Type: "string", + GoName: "PhaseId", + Description: "Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + }, + "profiles": &dcl.Property{ + Type: "array", + GoName: "Profiles", + Description: "Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "verify": &dcl.Property{ + Type: "boolean", + GoName: "Verify", + Description: "Whether to run verify tests after the deployment.", + }, + }, + }, + }, + }, + }, + "runtimeConfig": &dcl.Property{ + Type: "object", + GoName: "RuntimeConfig", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig", + Description: "Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment.", + Properties: map[string]*dcl.Property{ + "cloudRun": &dcl.Property{ + Type: "object", + GoName: "CloudRun", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun", + Description: "Cloud Run runtime configuration.", + Conflicts: []string{ + "kubernetes", + }, + Properties: map[string]*dcl.Property{ + "automaticTrafficControl": &dcl.Property{ + Type: "boolean", + GoName: "AutomaticTrafficControl", + Description: "Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments.", + }, + }, + }, + "kubernetes": &dcl.Property{ + Type: "object", + GoName: "Kubernetes", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes", + Description: "Kubernetes runtime configuration.", + Conflicts: []string{ + "cloudRun", + }, + Properties: map[string]*dcl.Property{ + "gatewayServiceMesh": &dcl.Property{ + Type: "object", + GoName: "GatewayServiceMesh", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh", + Description: "Kubernetes Gateway API service mesh configuration.", + Conflicts: []string{ + "serviceNetworking", + }, + Required: []string{ + "httpRoute", + "service", + "deployment", + }, + Properties: map[string]*dcl.Property{ + "deployment": &dcl.Property{ + Type: "string", + GoName: "Deployment", + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service.", + }, + "httpRoute": &dcl.Property{ + Type: "string", + GoName: "HttpRoute", + Description: "Required. Name of the Gateway API HTTPRoute.", + }, + "service": &dcl.Property{ + Type: "string", + GoName: "Service", + Description: "Required. Name of the Kubernetes Service.", + }, + }, + }, + "serviceNetworking": &dcl.Property{ + Type: "object", + GoName: "ServiceNetworking", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking", + Description: "Kubernetes Service networking configuration.", + Conflicts: []string{ + "gatewayServiceMesh", + }, + Required: []string{ + "service", + "deployment", + }, + Properties: map[string]*dcl.Property{ + "deployment": &dcl.Property{ + Type: "string", + GoName: "Deployment", + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service.", + }, + "disablePodOverprovisioning": &dcl.Property{ + Type: "boolean", + GoName: "DisablePodOverprovisioning", + Description: "Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster.", + }, + "service": &dcl.Property{ + Type: "string", + GoName: "Service", + Description: "Required. Name of the Kubernetes Service.", + }, + }, + }, + }, + }, + }, + }, + }, + }, + "standard": &dcl.Property{ + Type: "object", + GoName: "Standard", + GoType: "DeliveryPipelineSerialPipelineStagesStrategyStandard", + Description: "Standard deployment strategy executes a single deploy and allows verifying the deployment.", + Properties: map[string]*dcl.Property{ + "verify": &dcl.Property{ + Type: "boolean", + GoName: "Verify", + Description: "Whether to verify a deployment.", + }, + }, + }, + }, + }, "targetId": &dcl.Property{ Type: "string", GoName: "TargetId", diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_yaml_embed.go index a5bb36491d..66c2d740d4 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/delivery_pipeline_yaml_embed.go @@ -17,7 +17,7 @@ package clouddeploy // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/clouddeploy/delivery_pipeline.yaml -var YAML_delivery_pipeline = []byte("info:\n title: Clouddeploy/DeliveryPipeline\n description: The Cloud Deploy `DeliveryPipeline` resource\n x-dcl-struct-name: DeliveryPipeline\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.deliveryPipelines\npaths:\n get:\n description: The function used to get information about a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n apply:\n description: The function used to apply information about a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n delete:\n description: The function used to delete a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n deleteAll:\n description: The function used to delete all DeliveryPipeline\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many DeliveryPipeline\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n DeliveryPipeline:\n title: DeliveryPipeline\n x-dcl-id: projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: User annotations. These attributes can only be set and used\n by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations\n for more details such as format and size limitations.\n condition:\n type: object\n x-dcl-go-name: Condition\n x-dcl-go-type: DeliveryPipelineCondition\n readOnly: true\n description: Output only. Information around the state of the Delivery Pipeline.\n properties:\n pipelineReadyCondition:\n type: object\n x-dcl-go-name: PipelineReadyCondition\n x-dcl-go-type: DeliveryPipelineConditionPipelineReadyCondition\n description: Details around the Pipeline's overall status.\n properties:\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if the Pipeline is in a valid state. Otherwise\n at least one condition in `PipelineCondition` is in an invalid\n state. Iterate over those conditions and see which condition(s)\n has status = false to find out what is wrong with the Pipeline.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last time the condition was updated.\n targetsPresentCondition:\n type: object\n x-dcl-go-name: TargetsPresentCondition\n x-dcl-go-type: DeliveryPipelineConditionTargetsPresentCondition\n description: Details around targets enumerated in the pipeline.\n properties:\n missingTargets:\n type: array\n x-dcl-go-name: MissingTargets\n description: The list of Target names that are missing. For example,\n projects/{project_id}/locations/{location_name}/targets/{target_name}.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Clouddeploy/Target\n field: selfLink\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if there aren't any missing Targets.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last time the condition was updated.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time at which the pipeline was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Description of the `DeliveryPipeline`. Max length is 255 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: This checksum is computed by the server based on the value\n of other fields, and may be sent on update and delete requests to ensure\n the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Labels are attributes that can be set and used by both the\n user and by Google Cloud Deploy. Labels must meet the following constraints:\n * Keys and values can contain only lowercase letters, numeric characters,\n underscores, and dashes. * All characters must use UTF-8 encoding, and\n international characters are allowed. * Keys must start with a lowercase\n letter or international character. * Each resource is limited to a maximum\n of 64 labels. Both keys and values are additionally constrained to be\n <= 128 bytes.'\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the `DeliveryPipeline`. Format is [a-z][a-z0-9\\-]{0,62}.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n serialPipeline:\n type: object\n x-dcl-go-name: SerialPipeline\n x-dcl-go-type: DeliveryPipelineSerialPipeline\n description: SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.\n properties:\n stages:\n type: array\n x-dcl-go-name: Stages\n description: Each stage specifies configuration for a `Target`. The\n ordering of this list defines the promotion flow.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStages\n properties:\n profiles:\n type: array\n x-dcl-go-name: Profiles\n description: Skaffold profiles to use when rendering the manifest\n for this stage's `Target`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n targetId:\n type: string\n x-dcl-go-name: TargetId\n description: The target_id to which this stage points. This field\n refers exclusively to the last segment of a target name. For\n example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`).\n The location of the `Target` is inferred to be the same as the\n location of the `DeliveryPipeline` that contains this `Stage`.\n suspended:\n type: boolean\n x-dcl-go-name: Suspended\n description: When suspended, no new releases or rollouts can be created,\n but in-progress ones will complete.\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Unique identifier of the `DeliveryPipeline`.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Most recent time at which the pipeline was updated.\n x-kubernetes-immutable: true\n") +var YAML_delivery_pipeline = []byte("info:\n title: Clouddeploy/DeliveryPipeline\n description: The Cloud Deploy `DeliveryPipeline` resource\n x-dcl-struct-name: DeliveryPipeline\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.deliveryPipelines\npaths:\n get:\n description: The function used to get information about a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n apply:\n description: The function used to apply information about a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n delete:\n description: The function used to delete a DeliveryPipeline\n parameters:\n - name: deliveryPipeline\n required: true\n description: A full instance of a DeliveryPipeline\n deleteAll:\n description: The function used to delete all DeliveryPipeline\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many DeliveryPipeline\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n DeliveryPipeline:\n title: DeliveryPipeline\n x-dcl-id: projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: User annotations. These attributes can only be set and used\n by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations\n for more details such as format and size limitations.\n condition:\n type: object\n x-dcl-go-name: Condition\n x-dcl-go-type: DeliveryPipelineCondition\n readOnly: true\n description: Output only. Information around the state of the Delivery Pipeline.\n properties:\n pipelineReadyCondition:\n type: object\n x-dcl-go-name: PipelineReadyCondition\n x-dcl-go-type: DeliveryPipelineConditionPipelineReadyCondition\n description: Details around the Pipeline's overall status.\n properties:\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if the Pipeline is in a valid state. Otherwise\n at least one condition in `PipelineCondition` is in an invalid\n state. Iterate over those conditions and see which condition(s)\n has status = false to find out what is wrong with the Pipeline.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last time the condition was updated.\n targetsPresentCondition:\n type: object\n x-dcl-go-name: TargetsPresentCondition\n x-dcl-go-type: DeliveryPipelineConditionTargetsPresentCondition\n description: Details around targets enumerated in the pipeline.\n properties:\n missingTargets:\n type: array\n x-dcl-go-name: MissingTargets\n description: The list of Target names that are missing. For example,\n projects/{project_id}/locations/{location_name}/targets/{target_name}.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Clouddeploy/Target\n field: selfLink\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if there aren't any missing Targets.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n description: Last time the condition was updated.\n targetsTypeCondition:\n type: object\n x-dcl-go-name: TargetsTypeCondition\n x-dcl-go-type: DeliveryPipelineConditionTargetsTypeCondition\n description: Details on the whether the targets enumerated in the pipeline\n are of the same type.\n properties:\n errorDetails:\n type: string\n x-dcl-go-name: ErrorDetails\n description: Human readable error message.\n status:\n type: boolean\n x-dcl-go-name: Status\n description: True if the targets are all a comparable type. For\n example this is true if all targets are GKE clusters. This is\n false if some targets are Cloud Run targets and others are GKE\n clusters.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time at which the pipeline was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Description of the `DeliveryPipeline`. Max length is 255 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: This checksum is computed by the server based on the value\n of other fields, and may be sent on update and delete requests to ensure\n the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Labels are attributes that can be set and used by both the\n user and by Google Cloud Deploy. Labels must meet the following constraints:\n * Keys and values can contain only lowercase letters, numeric characters,\n underscores, and dashes. * All characters must use UTF-8 encoding, and\n international characters are allowed. * Keys must start with a lowercase\n letter or international character. * Each resource is limited to a maximum\n of 64 labels. Both keys and values are additionally constrained to be\n <= 128 bytes.'\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the `DeliveryPipeline`. Format is [a-z][a-z0-9\\-]{0,62}.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n serialPipeline:\n type: object\n x-dcl-go-name: SerialPipeline\n x-dcl-go-type: DeliveryPipelineSerialPipeline\n description: SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.\n properties:\n stages:\n type: array\n x-dcl-go-name: Stages\n description: Each stage specifies configuration for a `Target`. The\n ordering of this list defines the promotion flow.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStages\n properties:\n deployParameters:\n type: array\n x-dcl-go-name: DeployParameters\n description: Optional. The deploy parameters to use for the target\n in this stage.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesDeployParameters\n required:\n - values\n properties:\n matchTargetLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: MatchTargetLabels\n description: Optional. Deploy parameters are applied to\n targets with match labels. If unspecified, deploy parameters\n are applied to all targets (including child targets of\n a multi-target).\n values:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Values\n description: Required. Values are deploy parameters in key-value\n pairs.\n profiles:\n type: array\n x-dcl-go-name: Profiles\n description: Skaffold profiles to use when rendering the manifest\n for this stage's `Target`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n strategy:\n type: object\n x-dcl-go-name: Strategy\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategy\n description: Optional. The strategy to use for a `Rollout` to\n this stage.\n properties:\n canary:\n type: object\n x-dcl-go-name: Canary\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanary\n description: Canary deployment strategy provides progressive\n percentage based deployments to a Target.\n properties:\n canaryDeployment:\n type: object\n x-dcl-go-name: CanaryDeployment\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment\n description: Configures the progressive based deployment\n for a Target.\n x-dcl-conflicts:\n - customCanaryDeployment\n required:\n - percentages\n properties:\n percentages:\n type: array\n x-dcl-go-name: Percentages\n description: Required. The percentage based deployments\n that will occur as a part of a `Rollout`. List is\n expected in ascending order and each integer n is\n 0 <= n < 100.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to run verify tests after each\n percentage deployment.\n customCanaryDeployment:\n type: object\n x-dcl-go-name: CustomCanaryDeployment\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment\n description: Configures the progressive based deployment\n for a Target, but allows customizing at the phase level\n where a phase represents each of the percentage deployments.\n x-dcl-conflicts:\n - canaryDeployment\n required:\n - phaseConfigs\n properties:\n phaseConfigs:\n type: array\n x-dcl-go-name: PhaseConfigs\n description: Required. Configuration for each phase\n in the canary deployment in the order executed.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs\n required:\n - phaseId\n - percentage\n properties:\n percentage:\n type: integer\n format: int64\n x-dcl-go-name: Percentage\n description: Required. Percentage deployment\n for the phase.\n phaseId:\n type: string\n x-dcl-go-name: PhaseId\n description: 'Required. The ID to assign to\n the `Rollout` phase. This value must consist\n of lower-case letters, numbers, and hyphens,\n start with a letter and end with a letter\n or a number, and have a max length of 63 characters.\n In other words, it must match the following\n regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.'\n profiles:\n type: array\n x-dcl-go-name: Profiles\n description: Skaffold profiles to use when rendering\n the manifest for this phase. These are in\n addition to the profiles list specified in\n the `DeliveryPipeline` stage.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to run verify tests after\n the deployment.\n runtimeConfig:\n type: object\n x-dcl-go-name: RuntimeConfig\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig\n description: Optional. Runtime specific configurations\n for the deployment strategy. The runtime configuration\n is used to determine how Cloud Deploy will split traffic\n to enable a progressive deployment.\n properties:\n cloudRun:\n type: object\n x-dcl-go-name: CloudRun\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun\n description: Cloud Run runtime configuration.\n x-dcl-conflicts:\n - kubernetes\n properties:\n automaticTrafficControl:\n type: boolean\n x-dcl-go-name: AutomaticTrafficControl\n description: Whether Cloud Deploy should update\n the traffic stanza in a Cloud Run Service on\n the user's behalf to facilitate traffic splitting.\n This is required to be true for CanaryDeployments,\n but optional for CustomCanaryDeployments.\n kubernetes:\n type: object\n x-dcl-go-name: Kubernetes\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes\n description: Kubernetes runtime configuration.\n x-dcl-conflicts:\n - cloudRun\n properties:\n gatewayServiceMesh:\n type: object\n x-dcl-go-name: GatewayServiceMesh\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh\n description: Kubernetes Gateway API service mesh\n configuration.\n x-dcl-conflicts:\n - serviceNetworking\n required:\n - httpRoute\n - service\n - deployment\n properties:\n deployment:\n type: string\n x-dcl-go-name: Deployment\n description: Required. Name of the Kubernetes\n Deployment whose traffic is managed by the\n specified HTTPRoute and Service.\n httpRoute:\n type: string\n x-dcl-go-name: HttpRoute\n description: Required. Name of the Gateway\n API HTTPRoute.\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. Name of the Kubernetes\n Service.\n serviceNetworking:\n type: object\n x-dcl-go-name: ServiceNetworking\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking\n description: Kubernetes Service networking configuration.\n x-dcl-conflicts:\n - gatewayServiceMesh\n required:\n - service\n - deployment\n properties:\n deployment:\n type: string\n x-dcl-go-name: Deployment\n description: Required. Name of the Kubernetes\n Deployment whose traffic is managed by the\n specified Service.\n disablePodOverprovisioning:\n type: boolean\n x-dcl-go-name: DisablePodOverprovisioning\n description: Optional. Whether to disable\n Pod overprovisioning. If Pod overprovisioning\n is disabled then Cloud Deploy will limit\n the number of total Pods used for the deployment\n strategy to the number of Pods the Deployment\n has on the cluster.\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. Name of the Kubernetes\n Service.\n standard:\n type: object\n x-dcl-go-name: Standard\n x-dcl-go-type: DeliveryPipelineSerialPipelineStagesStrategyStandard\n description: Standard deployment strategy executes a single\n deploy and allows verifying the deployment.\n properties:\n verify:\n type: boolean\n x-dcl-go-name: Verify\n description: Whether to verify a deployment.\n targetId:\n type: string\n x-dcl-go-name: TargetId\n description: The target_id to which this stage points. This field\n refers exclusively to the last segment of a target name. For\n example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`).\n The location of the `Target` is inferred to be the same as the\n location of the `DeliveryPipeline` that contains this `Stage`.\n suspended:\n type: boolean\n x-dcl-go-name: Suspended\n description: When suspended, no new releases or rollouts can be created,\n but in-progress ones will complete.\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Unique identifier of the `DeliveryPipeline`.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Most recent time at which the pipeline was updated.\n x-kubernetes-immutable: true\n") -// 9321 bytes -// MD5: 0d460ed1a19cc897922581015b2e537f +// 24434 bytes +// MD5: 6a0abf2c7318c93d232cc641af7528b8 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.go index a64b1395fc..1592bc13d9 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.go @@ -41,6 +41,8 @@ type Target struct { ExecutionConfigs []TargetExecutionConfigs `json:"executionConfigs"` Project *string `json:"project"` Location *string `json:"location"` + Run *TargetRun `json:"run"` + DeployParameters map[string]string `json:"deployParameters"` } func (r *Target) String() string { @@ -227,6 +229,52 @@ func (r *TargetExecutionConfigs) HashCode() string { return fmt.Sprintf("%x", hash) } +type TargetRun struct { + empty bool `json:"-"` + Location *string `json:"location"` +} + +type jsonTargetRun TargetRun + +func (r *TargetRun) UnmarshalJSON(data []byte) error { + var res jsonTargetRun + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + var m map[string]interface{} + json.Unmarshal(data, &m) + + if len(m) == 0 { + *r = *EmptyTargetRun + } else { + + r.Location = res.Location + + } + return nil +} + +// This object is used to assert a desired state where this TargetRun is +// empty. Go lacks global const objects, but this object should be treated +// as one. Modifying this object will have undesirable results. +var EmptyTargetRun *TargetRun = &TargetRun{empty: true} + +func (r *TargetRun) Empty() bool { + return r.empty +} + +func (r *TargetRun) String() string { + return dcl.SprintResource(r) +} + +func (r *TargetRun) HashCode() string { + // Placeholder for a more complex hash method that handles ordering, etc + // Hash resource body for easy comparison later + hash := sha256.New().Sum([]byte(r.String())) + return fmt.Sprintf("%x", hash) +} + // Describe returns a simple description of this resource to ensure that automated tools // can identify it. func (r *Target) Describe() dcl.ServiceTypeVersion { @@ -258,6 +306,8 @@ func (r *Target) ID() (string, error) { "execution_configs": dcl.ValueOrEmptyString(nr.ExecutionConfigs), "project": dcl.ValueOrEmptyString(nr.Project), "location": dcl.ValueOrEmptyString(nr.Location), + "run": dcl.ValueOrEmptyString(nr.Run), + "deploy_parameters": dcl.ValueOrEmptyString(nr.DeployParameters), } return dcl.Nprintf("projects/{{project}}/locations/{{location}}/targets/{{name}}", params), nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.yaml index dd69478ac6..0c74dfa825 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target.yaml @@ -92,6 +92,7 @@ components: description: Information specifying an Anthos Cluster. x-dcl-conflicts: - gke + - run properties: membership: type: string @@ -108,6 +109,12 @@ components: readOnly: true description: Output only. Time at which the `Target` was created. x-kubernetes-immutable: true + deployParameters: + type: object + additionalProperties: + type: string + x-dcl-go-name: DeployParameters + description: Optional. The deploy parameters to use for this target. description: type: string x-dcl-go-name: Description @@ -190,6 +197,7 @@ components: description: Information specifying a GKE Cluster. x-dcl-conflicts: - anthosCluster + - run properties: cluster: type: string @@ -243,6 +251,22 @@ components: type: boolean x-dcl-go-name: RequireApproval description: Optional. Whether or not the `Target` requires approval. + run: + type: object + x-dcl-go-name: Run + x-dcl-go-type: TargetRun + description: Information specifying a Cloud Run deployment target. + x-dcl-conflicts: + - gke + - anthosCluster + required: + - location + properties: + location: + type: string + x-dcl-go-name: Location + description: Required. The location where the Cloud Run Service should + be located. Format is `projects/{project}/locations/{location}`. targetId: type: string x-dcl-go-name: TargetId diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_internal.go index ea51a4f066..e6642a2dcd 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_internal.go @@ -27,7 +27,7 @@ import ( func (r *Target) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Gke", "AnthosCluster"}, r.Gke, r.AnthosCluster); err != nil { + if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Gke", "AnthosCluster", "Run"}, r.Gke, r.AnthosCluster, r.Run); err != nil { return err } if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { @@ -49,6 +49,11 @@ func (r *Target) validate() error { return err } } + if !dcl.IsEmptyValueIndirect(r.Run) { + if err := r.Run.validate(); err != nil { + return err + } + } return nil } func (r *TargetGke) validate() error { @@ -66,6 +71,12 @@ func (r *TargetExecutionConfigs) validate() error { } return nil } +func (r *TargetRun) validate() error { + if err := dcl.Required(r, "location"); err != nil { + return err + } + return nil +} func (r *Target) basePath() string { params := map[string]interface{}{} return dcl.Nprintf("https://clouddeploy.googleapis.com/v1/", params) @@ -173,6 +184,14 @@ func newUpdateTargetUpdateTargetRequest(ctx context.Context, f *Target, c *Clien } else if v != nil { req["executionConfigs"] = v } + if v, err := expandTargetRun(c, f.Run, res); err != nil { + return nil, fmt.Errorf("error expanding Run into run: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["run"] = v + } + if v := f.DeployParameters; !dcl.IsEmptyValueIndirect(v) { + req["deployParameters"] = v + } b, err := c.getTargetRaw(ctx, f) if err != nil { return nil, err @@ -508,18 +527,25 @@ func canonicalizeTargetInitialState(rawInitial, rawDesired *Target) (*Target, er if !dcl.IsZeroValue(rawInitial.Gke) { // Check if anything else is set. - if dcl.AnySet(rawInitial.AnthosCluster) { + if dcl.AnySet(rawInitial.AnthosCluster, rawInitial.Run) { rawInitial.Gke = EmptyTargetGke } } if !dcl.IsZeroValue(rawInitial.AnthosCluster) { // Check if anything else is set. - if dcl.AnySet(rawInitial.Gke) { + if dcl.AnySet(rawInitial.Gke, rawInitial.Run) { rawInitial.AnthosCluster = EmptyTargetAnthosCluster } } + if !dcl.IsZeroValue(rawInitial.Run) { + // Check if anything else is set. + if dcl.AnySet(rawInitial.Gke, rawInitial.AnthosCluster) { + rawInitial.Run = EmptyTargetRun + } + } + return rawInitial, nil } @@ -537,6 +563,7 @@ func canonicalizeTargetDesiredState(rawDesired, rawInitial *Target, opts ...dcl. // We canonicalize the remaining nested objects with nil to pick up defaults. rawDesired.Gke = canonicalizeTargetGke(rawDesired.Gke, nil, opts...) rawDesired.AnthosCluster = canonicalizeTargetAnthosCluster(rawDesired.AnthosCluster, nil, opts...) + rawDesired.Run = canonicalizeTargetRun(rawDesired.Run, nil, opts...) return rawDesired, nil } @@ -581,21 +608,35 @@ func canonicalizeTargetDesiredState(rawDesired, rawInitial *Target, opts ...dcl. } else { canonicalDesired.Location = rawDesired.Location } + canonicalDesired.Run = canonicalizeTargetRun(rawDesired.Run, rawInitial.Run, opts...) + if dcl.IsZeroValue(rawDesired.DeployParameters) || (dcl.IsEmptyValueIndirect(rawDesired.DeployParameters) && dcl.IsEmptyValueIndirect(rawInitial.DeployParameters)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + canonicalDesired.DeployParameters = rawInitial.DeployParameters + } else { + canonicalDesired.DeployParameters = rawDesired.DeployParameters + } if canonicalDesired.Gke != nil { // Check if anything else is set. - if dcl.AnySet(rawDesired.AnthosCluster) { + if dcl.AnySet(rawDesired.AnthosCluster, rawDesired.Run) { canonicalDesired.Gke = EmptyTargetGke } } if canonicalDesired.AnthosCluster != nil { // Check if anything else is set. - if dcl.AnySet(rawDesired.Gke) { + if dcl.AnySet(rawDesired.Gke, rawDesired.Run) { canonicalDesired.AnthosCluster = EmptyTargetAnthosCluster } } + if canonicalDesired.Run != nil { + // Check if anything else is set. + if dcl.AnySet(rawDesired.Gke, rawDesired.AnthosCluster) { + canonicalDesired.Run = EmptyTargetRun + } + } + return canonicalDesired, nil } @@ -685,6 +726,17 @@ func canonicalizeTargetNewState(c *Client, rawNew, rawDesired *Target) (*Target, rawNew.Location = rawDesired.Location + if dcl.IsEmptyValueIndirect(rawNew.Run) && dcl.IsEmptyValueIndirect(rawDesired.Run) { + rawNew.Run = rawDesired.Run + } else { + rawNew.Run = canonicalizeNewTargetRun(c, rawDesired.Run, rawNew.Run) + } + + if dcl.IsEmptyValueIndirect(rawNew.DeployParameters) && dcl.IsEmptyValueIndirect(rawDesired.DeployParameters) { + rawNew.DeployParameters = rawDesired.DeployParameters + } else { + } + return rawNew, nil } @@ -1073,6 +1125,124 @@ func canonicalizeNewTargetExecutionConfigsSlice(c *Client, des, nw []TargetExecu return items } +func canonicalizeTargetRun(des, initial *TargetRun, opts ...dcl.ApplyOption) *TargetRun { + if des == nil { + return initial + } + if des.empty { + return des + } + + if initial == nil { + return des + } + + cDes := &TargetRun{} + + if dcl.StringCanonicalize(des.Location, initial.Location) || dcl.IsZeroValue(des.Location) { + cDes.Location = initial.Location + } else { + cDes.Location = des.Location + } + + return cDes +} + +func canonicalizeTargetRunSlice(des, initial []TargetRun, opts ...dcl.ApplyOption) []TargetRun { + if dcl.IsEmptyValueIndirect(des) { + return initial + } + + if len(des) != len(initial) { + + items := make([]TargetRun, 0, len(des)) + for _, d := range des { + cd := canonicalizeTargetRun(&d, nil, opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + } + + items := make([]TargetRun, 0, len(des)) + for i, d := range des { + cd := canonicalizeTargetRun(&d, &initial[i], opts...) + if cd != nil { + items = append(items, *cd) + } + } + return items + +} + +func canonicalizeNewTargetRun(c *Client, des, nw *TargetRun) *TargetRun { + + if des == nil { + return nw + } + + if nw == nil { + if dcl.IsEmptyValueIndirect(des) { + c.Config.Logger.Info("Found explicitly empty value for TargetRun while comparing non-nil desired to nil actual. Returning desired object.") + return des + } + return nil + } + + if dcl.StringCanonicalize(des.Location, nw.Location) { + nw.Location = des.Location + } + + return nw +} + +func canonicalizeNewTargetRunSet(c *Client, des, nw []TargetRun) []TargetRun { + if des == nil { + return nw + } + + // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. + var items []TargetRun + for _, d := range des { + matchedIndex := -1 + for i, n := range nw { + if diffs, _ := compareTargetRunNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { + matchedIndex = i + break + } + } + if matchedIndex != -1 { + items = append(items, *canonicalizeNewTargetRun(c, &d, &nw[matchedIndex])) + nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) + } + } + // Also include elements in nw that are not matched in des. + items = append(items, nw...) + + return items +} + +func canonicalizeNewTargetRunSlice(c *Client, des, nw []TargetRun) []TargetRun { + if des == nil { + return nw + } + + // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. + // Return the original array. + if len(des) != len(nw) { + return nw + } + + var items []TargetRun + for i, d := range des { + n := nw[i] + items = append(items, *canonicalizeNewTargetRun(c, &d, &n)) + } + + return items +} + // The differ returns a list of diffs, along with a list of operations that should be taken // to remedy them. Right now, it does not attempt to consolidate operations - if several // fields can be fixed with a patch update, it will perform the patch several times. @@ -1196,6 +1366,20 @@ func diffTarget(c *Client, desired, actual *Target, opts ...dcl.ApplyOption) ([] newDiffs = append(newDiffs, ds...) } + if ds, err := dcl.Diff(desired.Run, actual.Run, dcl.DiffInfo{ObjectFunction: compareTargetRunNewStyle, EmptyObject: EmptyTargetRun, OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("Run")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.DeployParameters, actual.DeployParameters, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTargetUpdateTargetOperation")}, fn.AddNest("DeployParameters")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + if len(newDiffs) > 0 { c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) } @@ -1323,6 +1507,35 @@ func compareTargetExecutionConfigsNewStyle(d, a interface{}, fn dcl.FieldName) ( return diffs, nil } +func compareTargetRunNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { + var diffs []*dcl.FieldDiff + + desired, ok := d.(*TargetRun) + if !ok { + desiredNotPointer, ok := d.(TargetRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetRun or *TargetRun", d) + } + desired = &desiredNotPointer + } + actual, ok := a.(*TargetRun) + if !ok { + actualNotPointer, ok := a.(TargetRun) + if !ok { + return nil, fmt.Errorf("obj %v is not a TargetRun", a) + } + actual = &actualNotPointer + } + + if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + return diffs, nil +} + // urlNormalized returns a copy of the resource struct with values normalized // for URL substitutions. For instance, it converts long-form self-links to // short-form so they can be substituted in. @@ -1430,6 +1643,14 @@ func expandTarget(c *Client, f *Target) (map[string]interface{}, error) { } else if !dcl.IsEmptyValueIndirect(v) { m["location"] = v } + if v, err := expandTargetRun(c, f.Run, res); err != nil { + return nil, fmt.Errorf("error expanding Run into run: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + m["run"] = v + } + if v := f.DeployParameters; dcl.ValueShouldBeSent(v) { + m["deployParameters"] = v + } return m, nil } @@ -1461,6 +1682,8 @@ func flattenTarget(c *Client, i interface{}, res *Target) *Target { resultRes.ExecutionConfigs = flattenTargetExecutionConfigsSlice(c, m["executionConfigs"], res) resultRes.Project = dcl.FlattenString(m["project"]) resultRes.Location = dcl.FlattenString(m["location"]) + resultRes.Run = flattenTargetRun(c, m["run"], res) + resultRes.DeployParameters = dcl.FlattenKeyValuePairs(m["deployParameters"]) return resultRes } @@ -1827,6 +2050,120 @@ func flattenTargetExecutionConfigs(c *Client, i interface{}, res *Target) *Targe return r } +// expandTargetRunMap expands the contents of TargetRun into a JSON +// request object. +func expandTargetRunMap(c *Client, f map[string]TargetRun, res *Target) (map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := make(map[string]interface{}) + for k, item := range f { + i, err := expandTargetRun(c, &item, res) + if err != nil { + return nil, err + } + if i != nil { + items[k] = i + } + } + + return items, nil +} + +// expandTargetRunSlice expands the contents of TargetRun into a JSON +// request object. +func expandTargetRunSlice(c *Client, f []TargetRun, res *Target) ([]map[string]interface{}, error) { + if f == nil { + return nil, nil + } + + items := []map[string]interface{}{} + for _, item := range f { + i, err := expandTargetRun(c, &item, res) + if err != nil { + return nil, err + } + + items = append(items, i) + } + + return items, nil +} + +// flattenTargetRunMap flattens the contents of TargetRun from a JSON +// response object. +func flattenTargetRunMap(c *Client, i interface{}, res *Target) map[string]TargetRun { + a, ok := i.(map[string]interface{}) + if !ok { + return map[string]TargetRun{} + } + + if len(a) == 0 { + return map[string]TargetRun{} + } + + items := make(map[string]TargetRun) + for k, item := range a { + items[k] = *flattenTargetRun(c, item.(map[string]interface{}), res) + } + + return items +} + +// flattenTargetRunSlice flattens the contents of TargetRun from a JSON +// response object. +func flattenTargetRunSlice(c *Client, i interface{}, res *Target) []TargetRun { + a, ok := i.([]interface{}) + if !ok { + return []TargetRun{} + } + + if len(a) == 0 { + return []TargetRun{} + } + + items := make([]TargetRun, 0, len(a)) + for _, item := range a { + items = append(items, *flattenTargetRun(c, item.(map[string]interface{}), res)) + } + + return items +} + +// expandTargetRun expands an instance of TargetRun into a JSON +// request object. +func expandTargetRun(c *Client, f *TargetRun, res *Target) (map[string]interface{}, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]interface{}) + if v := f.Location; !dcl.IsEmptyValueIndirect(v) { + m["location"] = v + } + + return m, nil +} + +// flattenTargetRun flattens an instance of TargetRun from a JSON +// response object. +func flattenTargetRun(c *Client, i interface{}, res *Target) *TargetRun { + m, ok := i.(map[string]interface{}) + if !ok { + return nil + } + + r := &TargetRun{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyTargetRun + } + r.Location = dcl.FlattenString(m["location"]) + + return r +} + // flattenTargetExecutionConfigsUsagesEnumMap flattens the contents of TargetExecutionConfigsUsagesEnum from a JSON // response object. func flattenTargetExecutionConfigsUsagesEnumMap(c *Client, i interface{}, res *Target) map[string]TargetExecutionConfigsUsagesEnum { @@ -1994,6 +2331,17 @@ func extractTargetFields(r *Target) error { if !dcl.IsEmptyValueIndirect(vAnthosCluster) { r.AnthosCluster = vAnthosCluster } + vRun := r.Run + if vRun == nil { + // note: explicitly not the empty object. + vRun = &TargetRun{} + } + if err := extractTargetRunFields(r, vRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRun) { + r.Run = vRun + } return nil } func extractTargetGkeFields(r *Target, o *TargetGke) error { @@ -2005,6 +2353,9 @@ func extractTargetAnthosClusterFields(r *Target, o *TargetAnthosCluster) error { func extractTargetExecutionConfigsFields(r *Target, o *TargetExecutionConfigs) error { return nil } +func extractTargetRunFields(r *Target, o *TargetRun) error { + return nil +} func postReadExtractTargetFields(r *Target) error { vGke := r.Gke @@ -2029,6 +2380,17 @@ func postReadExtractTargetFields(r *Target) error { if !dcl.IsEmptyValueIndirect(vAnthosCluster) { r.AnthosCluster = vAnthosCluster } + vRun := r.Run + if vRun == nil { + // note: explicitly not the empty object. + vRun = &TargetRun{} + } + if err := postReadExtractTargetRunFields(r, vRun); err != nil { + return err + } + if !dcl.IsEmptyValueIndirect(vRun) { + r.Run = vRun + } return nil } func postReadExtractTargetGkeFields(r *Target, o *TargetGke) error { @@ -2040,3 +2402,6 @@ func postReadExtractTargetAnthosClusterFields(r *Target, o *TargetAnthosCluster) func postReadExtractTargetExecutionConfigsFields(r *Target, o *TargetExecutionConfigs) error { return nil } +func postReadExtractTargetRunFields(r *Target, o *TargetRun) error { + return nil +} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_schema.go index 7acdfcb49d..2ee3af3fa5 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_schema.go @@ -128,6 +128,7 @@ func DCLTargetSchema() *dcl.Schema { Description: "Information specifying an Anthos Cluster.", Conflicts: []string{ "gke", + "run", }, Properties: map[string]*dcl.Property{ "membership": &dcl.Property{ @@ -151,6 +152,14 @@ func DCLTargetSchema() *dcl.Schema { Description: "Output only. Time at which the `Target` was created.", Immutable: true, }, + "deployParameters": &dcl.Property{ + Type: "object", + AdditionalProperties: &dcl.Property{ + Type: "string", + }, + GoName: "DeployParameters", + Description: "Optional. The deploy parameters to use for this target.", + }, "description": &dcl.Property{ Type: "string", GoName: "Description", @@ -232,6 +241,7 @@ func DCLTargetSchema() *dcl.Schema { Description: "Information specifying a GKE Cluster.", Conflicts: []string{ "anthosCluster", + "run", }, Properties: map[string]*dcl.Property{ "cluster": &dcl.Property{ @@ -290,6 +300,26 @@ func DCLTargetSchema() *dcl.Schema { GoName: "RequireApproval", Description: "Optional. Whether or not the `Target` requires approval.", }, + "run": &dcl.Property{ + Type: "object", + GoName: "Run", + GoType: "TargetRun", + Description: "Information specifying a Cloud Run deployment target.", + Conflicts: []string{ + "gke", + "anthosCluster", + }, + Required: []string{ + "location", + }, + Properties: map[string]*dcl.Property{ + "location": &dcl.Property{ + Type: "string", + GoName: "Location", + Description: "Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`.", + }, + }, + }, "targetId": &dcl.Property{ Type: "string", GoName: "TargetId", diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_yaml_embed.go index c2db11ca55..f1fb0c2e11 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy/target_yaml_embed.go @@ -17,7 +17,7 @@ package clouddeploy // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/clouddeploy/target.yaml -var YAML_target = []byte("info:\n title: Clouddeploy/Target\n description: The Cloud Deploy `Target` resource\n x-dcl-struct-name: Target\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.targets\npaths:\n get:\n description: The function used to get information about a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n apply:\n description: The function used to apply information about a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n delete:\n description: The function used to delete a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n deleteAll:\n description: The function used to delete all Target\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Target\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Target:\n title: Target\n x-dcl-id: projects/{{project}}/locations/{{location}}/targets/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: Optional. User annotations. These attributes can only be set\n and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations\n for more details such as format and size limitations.\n anthosCluster:\n type: object\n x-dcl-go-name: AnthosCluster\n x-dcl-go-type: TargetAnthosCluster\n description: Information specifying an Anthos Cluster.\n x-dcl-conflicts:\n - gke\n properties:\n membership:\n type: string\n x-dcl-go-name: Membership\n description: Membership of the GKE Hub-registered cluster to which to\n apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.\n x-dcl-references:\n - resource: Gkehub/Membership\n field: selfLink\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time at which the `Target` was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Description of the `Target`. Max length is 255 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Optional. This checksum is computed by the server based on\n the value of other fields, and may be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n executionConfigs:\n type: array\n x-dcl-go-name: ExecutionConfigs\n description: Configurations for all execution that relates to this `Target`.\n Each `ExecutionEnvironmentUsage` value may only be used in a single configuration;\n using the same value multiple times is an error. When one or more configurations\n are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage`\n values. When no configurations are specified, execution will use the default\n specified in `DefaultPool`.\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: TargetExecutionConfigs\n required:\n - usages\n properties:\n artifactStorage:\n type: string\n x-dcl-go-name: ArtifactStorage\n description: Optional. Cloud Storage location in which to store execution\n outputs. This can either be a bucket (\"gs://my-bucket\") or a path\n within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default\n bucket located in the same region will be used.\n x-dcl-server-default: true\n executionTimeout:\n type: string\n x-dcl-go-name: ExecutionTimeout\n description: Optional. Execution timeout for a Cloud Build Execution.\n This must be between 10m and 24h in seconds format. If unspecified,\n a default timeout of 1h is used.\n x-dcl-server-default: true\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. Google service account to use for execution.\n If unspecified, the project execution service account (-compute@developer.gserviceaccount.com)\n is used.\n x-dcl-server-default: true\n usages:\n type: array\n x-dcl-go-name: Usages\n description: Required. Usages when this configuration should be applied.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: TargetExecutionConfigsUsagesEnum\n enum:\n - EXECUTION_ENVIRONMENT_USAGE_UNSPECIFIED\n - RENDER\n - DEPLOY\n workerPool:\n type: string\n x-dcl-go-name: WorkerPool\n description: Optional. The resource name of the `WorkerPool`, with\n the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`.\n If this optional field is unspecified, the default Cloud Build pool\n will be used.\n x-dcl-references:\n - resource: Cloudbuild/WorkerPool\n field: selfLink\n gke:\n type: object\n x-dcl-go-name: Gke\n x-dcl-go-type: TargetGke\n description: Information specifying a GKE Cluster.\n x-dcl-conflicts:\n - anthosCluster\n properties:\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.\n x-dcl-references:\n - resource: Container/Cluster\n field: selfLink\n internalIP:\n type: boolean\n x-dcl-go-name: InternalIP\n description: Optional. If true, `cluster` is accessed using the private\n IP address of the control plane endpoint. Otherwise, the default IP\n address of the control plane endpoint is used. The default IP address\n is the private IP address for clusters with private control-plane\n endpoints and the public IP address otherwise. Only specify this option\n when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. Labels are attributes that can be set and used by\n both the user and by Google Cloud Deploy. Labels must meet the following\n constraints: * Keys and values can contain only lowercase letters, numeric\n characters, underscores, and dashes. * All characters must use UTF-8 encoding,\n and international characters are allowed. * Keys must start with a lowercase\n letter or international character. * Each resource is limited to a maximum\n of 64 labels. Both keys and values are additionally constrained to be\n <= 128 bytes.'\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the `Target`. Format is [a-z][a-z0-9\\-]{0,62}.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n requireApproval:\n type: boolean\n x-dcl-go-name: RequireApproval\n description: Optional. Whether or not the `Target` requires approval.\n targetId:\n type: string\n x-dcl-go-name: TargetId\n readOnly: true\n description: Output only. Resource id of the `Target`.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Unique identifier of the `Target`.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Most recent time at which the `Target` was updated.\n x-kubernetes-immutable: true\n") +var YAML_target = []byte("info:\n title: Clouddeploy/Target\n description: The Cloud Deploy `Target` resource\n x-dcl-struct-name: Target\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/deploy/docs/api/reference/rest/v1/projects.locations.targets\npaths:\n get:\n description: The function used to get information about a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n apply:\n description: The function used to apply information about a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n delete:\n description: The function used to delete a Target\n parameters:\n - name: target\n required: true\n description: A full instance of a Target\n deleteAll:\n description: The function used to delete all Target\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Target\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Target:\n title: Target\n x-dcl-id: projects/{{project}}/locations/{{location}}/targets/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n - location\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: Optional. User annotations. These attributes can only be set\n and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations\n for more details such as format and size limitations.\n anthosCluster:\n type: object\n x-dcl-go-name: AnthosCluster\n x-dcl-go-type: TargetAnthosCluster\n description: Information specifying an Anthos Cluster.\n x-dcl-conflicts:\n - gke\n - run\n properties:\n membership:\n type: string\n x-dcl-go-name: Membership\n description: Membership of the GKE Hub-registered cluster to which to\n apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.\n x-dcl-references:\n - resource: Gkehub/Membership\n field: selfLink\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. Time at which the `Target` was created.\n x-kubernetes-immutable: true\n deployParameters:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: DeployParameters\n description: Optional. The deploy parameters to use for this target.\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Description of the `Target`. Max length is 255 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Optional. This checksum is computed by the server based on\n the value of other fields, and may be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n executionConfigs:\n type: array\n x-dcl-go-name: ExecutionConfigs\n description: Configurations for all execution that relates to this `Target`.\n Each `ExecutionEnvironmentUsage` value may only be used in a single configuration;\n using the same value multiple times is an error. When one or more configurations\n are specified, they must include the `RENDER` and `DEPLOY` `ExecutionEnvironmentUsage`\n values. When no configurations are specified, execution will use the default\n specified in `DefaultPool`.\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: TargetExecutionConfigs\n required:\n - usages\n properties:\n artifactStorage:\n type: string\n x-dcl-go-name: ArtifactStorage\n description: Optional. Cloud Storage location in which to store execution\n outputs. This can either be a bucket (\"gs://my-bucket\") or a path\n within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default\n bucket located in the same region will be used.\n x-dcl-server-default: true\n executionTimeout:\n type: string\n x-dcl-go-name: ExecutionTimeout\n description: Optional. Execution timeout for a Cloud Build Execution.\n This must be between 10m and 24h in seconds format. If unspecified,\n a default timeout of 1h is used.\n x-dcl-server-default: true\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. Google service account to use for execution.\n If unspecified, the project execution service account (-compute@developer.gserviceaccount.com)\n is used.\n x-dcl-server-default: true\n usages:\n type: array\n x-dcl-go-name: Usages\n description: Required. Usages when this configuration should be applied.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: TargetExecutionConfigsUsagesEnum\n enum:\n - EXECUTION_ENVIRONMENT_USAGE_UNSPECIFIED\n - RENDER\n - DEPLOY\n workerPool:\n type: string\n x-dcl-go-name: WorkerPool\n description: Optional. The resource name of the `WorkerPool`, with\n the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`.\n If this optional field is unspecified, the default Cloud Build pool\n will be used.\n x-dcl-references:\n - resource: Cloudbuild/WorkerPool\n field: selfLink\n gke:\n type: object\n x-dcl-go-name: Gke\n x-dcl-go-type: TargetGke\n description: Information specifying a GKE Cluster.\n x-dcl-conflicts:\n - anthosCluster\n - run\n properties:\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.\n x-dcl-references:\n - resource: Container/Cluster\n field: selfLink\n internalIP:\n type: boolean\n x-dcl-go-name: InternalIP\n description: Optional. If true, `cluster` is accessed using the private\n IP address of the control plane endpoint. Otherwise, the default IP\n address of the control plane endpoint is used. The default IP address\n is the private IP address for clusters with private control-plane\n endpoints and the public IP address otherwise. Only specify this option\n when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept).\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. Labels are attributes that can be set and used by\n both the user and by Google Cloud Deploy. Labels must meet the following\n constraints: * Keys and values can contain only lowercase letters, numeric\n characters, underscores, and dashes. * All characters must use UTF-8 encoding,\n and international characters are allowed. * Keys must start with a lowercase\n letter or international character. * Each resource is limited to a maximum\n of 64 labels. Both keys and values are additionally constrained to be\n <= 128 bytes.'\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the `Target`. Format is [a-z][a-z0-9\\-]{0,62}.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n requireApproval:\n type: boolean\n x-dcl-go-name: RequireApproval\n description: Optional. Whether or not the `Target` requires approval.\n run:\n type: object\n x-dcl-go-name: Run\n x-dcl-go-type: TargetRun\n description: Information specifying a Cloud Run deployment target.\n x-dcl-conflicts:\n - gke\n - anthosCluster\n required:\n - location\n properties:\n location:\n type: string\n x-dcl-go-name: Location\n description: Required. The location where the Cloud Run Service should\n be located. Format is `projects/{project}/locations/{location}`.\n targetId:\n type: string\n x-dcl-go-name: TargetId\n readOnly: true\n description: Output only. Resource id of the `Target`.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Unique identifier of the `Target`.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. Most recent time at which the `Target` was updated.\n x-kubernetes-immutable: true\n") -// 10138 bytes -// MD5: 40c05270fd1ef823c0c9449c5303a79f +// 10959 bytes +// MD5: 160cecea2006e79b6130797252cde178 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule.go index d0e9bc9ee6..3a85e30f9f 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule.go @@ -71,10 +71,18 @@ func (v FirewallPolicyRuleDirectionEnum) Validate() error { } type FirewallPolicyRuleMatch struct { - empty bool `json:"-"` - SrcIPRanges []string `json:"srcIPRanges"` - DestIPRanges []string `json:"destIPRanges"` - Layer4Configs []FirewallPolicyRuleMatchLayer4Configs `json:"layer4Configs"` + empty bool `json:"-"` + SrcIPRanges []string `json:"srcIPRanges"` + DestIPRanges []string `json:"destIPRanges"` + SrcRegionCodes []string `json:"srcRegionCodes"` + DestRegionCodes []string `json:"destRegionCodes"` + SrcThreatIntelligences []string `json:"srcThreatIntelligences"` + DestThreatIntelligences []string `json:"destThreatIntelligences"` + SrcFqdns []string `json:"srcFqdns"` + DestFqdns []string `json:"destFqdns"` + Layer4Configs []FirewallPolicyRuleMatchLayer4Configs `json:"layer4Configs"` + SrcAddressGroups []string `json:"srcAddressGroups"` + DestAddressGroups []string `json:"destAddressGroups"` } type jsonFirewallPolicyRuleMatch FirewallPolicyRuleMatch @@ -96,8 +104,24 @@ func (r *FirewallPolicyRuleMatch) UnmarshalJSON(data []byte) error { r.DestIPRanges = res.DestIPRanges + r.SrcRegionCodes = res.SrcRegionCodes + + r.DestRegionCodes = res.DestRegionCodes + + r.SrcThreatIntelligences = res.SrcThreatIntelligences + + r.DestThreatIntelligences = res.DestThreatIntelligences + + r.SrcFqdns = res.SrcFqdns + + r.DestFqdns = res.DestFqdns + r.Layer4Configs = res.Layer4Configs + r.SrcAddressGroups = res.SrcAddressGroups + + r.DestAddressGroups = res.DestAddressGroups + } return nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule.yaml index 4adc47e116..27d9c1664f 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule.yaml @@ -73,8 +73,7 @@ components: type: string x-dcl-go-name: Action description: The Action to perform when the client connection triggers the - rule. Can currently be either "allow" or "deny()" where valid values for - status are 403, 404, and 502. + rule. Valid actions are "allow", "deny" and "goto_next". description: type: string x-dcl-go-name: Description @@ -127,6 +126,28 @@ components: required: - layer4Configs properties: + destAddressGroups: + type: array + x-dcl-go-name: DestAddressGroups + description: Address groups which should be matched against the traffic + destination. Maximum number of destination address groups is 10. Destination + address groups is only supported in Egress rules. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string + destFqdns: + type: array + x-dcl-go-name: DestFqdns + description: Domain names that will be used to match against the resolved + domain name of destination of traffic. Can only be specified if DIRECTION + is egress. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string destIPRanges: type: array x-dcl-go-name: DestIPRanges @@ -137,6 +158,26 @@ components: items: type: string x-dcl-go-type: string + destRegionCodes: + type: array + x-dcl-go-name: DestRegionCodes + description: The Unicode country codes whose IP addresses will be used + to match against the source of traffic. Can only be specified if DIRECTION + is egress. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string + destThreatIntelligences: + type: array + x-dcl-go-name: DestThreatIntelligences + description: Name of the Google Cloud Threat Intelligence list. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string layer4Configs: type: array x-dcl-go-name: Layer4Configs @@ -170,6 +211,28 @@ components: items: type: string x-dcl-go-type: string + srcAddressGroups: + type: array + x-dcl-go-name: SrcAddressGroups + description: Address groups which should be matched against the traffic + source. Maximum number of source address groups is 10. Source address + groups is only supported in Ingress rules. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string + srcFqdns: + type: array + x-dcl-go-name: SrcFqdns + description: Domain names that will be used to match against the resolved + domain name of source of traffic. Can only be specified if DIRECTION + is ingress. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string srcIPRanges: type: array x-dcl-go-name: SrcIPRanges @@ -180,6 +243,26 @@ components: items: type: string x-dcl-go-type: string + srcRegionCodes: + type: array + x-dcl-go-name: SrcRegionCodes + description: The Unicode country codes whose IP addresses will be used + to match against the source of traffic. Can only be specified if DIRECTION + is ingress. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string + srcThreatIntelligences: + type: array + x-dcl-go-name: SrcThreatIntelligences + description: Name of the Google Cloud Threat Intelligence list. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string priority: type: integer format: int64 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_internal.go index 027692d913..bbde6d2ba2 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_internal.go @@ -637,7 +637,47 @@ func canonicalizeFirewallPolicyRuleMatch(des, initial *FirewallPolicyRuleMatch, } else { cDes.DestIPRanges = des.DestIPRanges } + if dcl.StringArrayCanonicalize(des.SrcRegionCodes, initial.SrcRegionCodes) { + cDes.SrcRegionCodes = initial.SrcRegionCodes + } else { + cDes.SrcRegionCodes = des.SrcRegionCodes + } + if dcl.StringArrayCanonicalize(des.DestRegionCodes, initial.DestRegionCodes) { + cDes.DestRegionCodes = initial.DestRegionCodes + } else { + cDes.DestRegionCodes = des.DestRegionCodes + } + if dcl.StringArrayCanonicalize(des.SrcThreatIntelligences, initial.SrcThreatIntelligences) { + cDes.SrcThreatIntelligences = initial.SrcThreatIntelligences + } else { + cDes.SrcThreatIntelligences = des.SrcThreatIntelligences + } + if dcl.StringArrayCanonicalize(des.DestThreatIntelligences, initial.DestThreatIntelligences) { + cDes.DestThreatIntelligences = initial.DestThreatIntelligences + } else { + cDes.DestThreatIntelligences = des.DestThreatIntelligences + } + if dcl.StringArrayCanonicalize(des.SrcFqdns, initial.SrcFqdns) { + cDes.SrcFqdns = initial.SrcFqdns + } else { + cDes.SrcFqdns = des.SrcFqdns + } + if dcl.StringArrayCanonicalize(des.DestFqdns, initial.DestFqdns) { + cDes.DestFqdns = initial.DestFqdns + } else { + cDes.DestFqdns = des.DestFqdns + } cDes.Layer4Configs = canonicalizeFirewallPolicyRuleMatchLayer4ConfigsSlice(des.Layer4Configs, initial.Layer4Configs, opts...) + if dcl.StringArrayCanonicalize(des.SrcAddressGroups, initial.SrcAddressGroups) { + cDes.SrcAddressGroups = initial.SrcAddressGroups + } else { + cDes.SrcAddressGroups = des.SrcAddressGroups + } + if dcl.StringArrayCanonicalize(des.DestAddressGroups, initial.DestAddressGroups) { + cDes.DestAddressGroups = initial.DestAddressGroups + } else { + cDes.DestAddressGroups = des.DestAddressGroups + } return cDes } @@ -690,7 +730,31 @@ func canonicalizeNewFirewallPolicyRuleMatch(c *Client, des, nw *FirewallPolicyRu if dcl.StringArrayCanonicalize(des.DestIPRanges, nw.DestIPRanges) { nw.DestIPRanges = des.DestIPRanges } + if dcl.StringArrayCanonicalize(des.SrcRegionCodes, nw.SrcRegionCodes) { + nw.SrcRegionCodes = des.SrcRegionCodes + } + if dcl.StringArrayCanonicalize(des.DestRegionCodes, nw.DestRegionCodes) { + nw.DestRegionCodes = des.DestRegionCodes + } + if dcl.StringArrayCanonicalize(des.SrcThreatIntelligences, nw.SrcThreatIntelligences) { + nw.SrcThreatIntelligences = des.SrcThreatIntelligences + } + if dcl.StringArrayCanonicalize(des.DestThreatIntelligences, nw.DestThreatIntelligences) { + nw.DestThreatIntelligences = des.DestThreatIntelligences + } + if dcl.StringArrayCanonicalize(des.SrcFqdns, nw.SrcFqdns) { + nw.SrcFqdns = des.SrcFqdns + } + if dcl.StringArrayCanonicalize(des.DestFqdns, nw.DestFqdns) { + nw.DestFqdns = des.DestFqdns + } nw.Layer4Configs = canonicalizeNewFirewallPolicyRuleMatchLayer4ConfigsSlice(c, des.Layer4Configs, nw.Layer4Configs) + if dcl.StringArrayCanonicalize(des.SrcAddressGroups, nw.SrcAddressGroups) { + nw.SrcAddressGroups = des.SrcAddressGroups + } + if dcl.StringArrayCanonicalize(des.DestAddressGroups, nw.DestAddressGroups) { + nw.DestAddressGroups = des.DestAddressGroups + } return nw } @@ -1008,12 +1072,68 @@ func compareFirewallPolicyRuleMatchNewStyle(d, a interface{}, fn dcl.FieldName) diffs = append(diffs, ds...) } + if ds, err := dcl.Diff(desired.SrcRegionCodes, actual.SrcRegionCodes, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("SrcRegionCodes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DestRegionCodes, actual.DestRegionCodes, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("DestRegionCodes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SrcThreatIntelligences, actual.SrcThreatIntelligences, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("SrcThreatIntelligences")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DestThreatIntelligences, actual.DestThreatIntelligences, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("DestThreatIntelligences")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SrcFqdns, actual.SrcFqdns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("SrcFqdns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DestFqdns, actual.DestFqdns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("DestFqdns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + if ds, err := dcl.Diff(desired.Layer4Configs, actual.Layer4Configs, dcl.DiffInfo{ObjectFunction: compareFirewallPolicyRuleMatchLayer4ConfigsNewStyle, EmptyObject: EmptyFirewallPolicyRuleMatchLayer4Configs, OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("Layer4Configs")); len(ds) != 0 || err != nil { if err != nil { return nil, err } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.SrcAddressGroups, actual.SrcAddressGroups, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("SrcAddressGroups")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DestAddressGroups, actual.DestAddressGroups, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("DestAddressGroups")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -1275,11 +1395,35 @@ func expandFirewallPolicyRuleMatch(c *Client, f *FirewallPolicyRuleMatch, res *F if v := f.DestIPRanges; v != nil { m["destIpRanges"] = v } + if v := f.SrcRegionCodes; v != nil { + m["srcRegionCodes"] = v + } + if v := f.DestRegionCodes; v != nil { + m["destRegionCodes"] = v + } + if v := f.SrcThreatIntelligences; v != nil { + m["srcThreatIntelligences"] = v + } + if v := f.DestThreatIntelligences; v != nil { + m["destThreatIntelligences"] = v + } + if v := f.SrcFqdns; v != nil { + m["srcFqdns"] = v + } + if v := f.DestFqdns; v != nil { + m["destFqdns"] = v + } if v, err := expandFirewallPolicyRuleMatchLayer4ConfigsSlice(c, f.Layer4Configs, res); err != nil { return nil, fmt.Errorf("error expanding Layer4Configs into layer4Configs: %w", err) } else if v != nil { m["layer4Configs"] = v } + if v := f.SrcAddressGroups; v != nil { + m["srcAddressGroups"] = v + } + if v := f.DestAddressGroups; v != nil { + m["destAddressGroups"] = v + } return m, nil } @@ -1299,7 +1443,15 @@ func flattenFirewallPolicyRuleMatch(c *Client, i interface{}, res *FirewallPolic } r.SrcIPRanges = dcl.FlattenStringSlice(m["srcIpRanges"]) r.DestIPRanges = dcl.FlattenStringSlice(m["destIpRanges"]) + r.SrcRegionCodes = dcl.FlattenStringSlice(m["srcRegionCodes"]) + r.DestRegionCodes = dcl.FlattenStringSlice(m["destRegionCodes"]) + r.SrcThreatIntelligences = dcl.FlattenStringSlice(m["srcThreatIntelligences"]) + r.DestThreatIntelligences = dcl.FlattenStringSlice(m["destThreatIntelligences"]) + r.SrcFqdns = dcl.FlattenStringSlice(m["srcFqdns"]) + r.DestFqdns = dcl.FlattenStringSlice(m["destFqdns"]) r.Layer4Configs = flattenFirewallPolicyRuleMatchLayer4ConfigsSlice(c, m["layer4Configs"], res) + r.SrcAddressGroups = dcl.FlattenStringSlice(m["srcAddressGroups"]) + r.DestAddressGroups = dcl.FlattenStringSlice(m["destAddressGroups"]) return r } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_schema.go index d19ce2705a..f5d8c81963 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_schema.go @@ -102,7 +102,7 @@ func DCLFirewallPolicyRuleSchema() *dcl.Schema { "action": &dcl.Property{ Type: "string", GoName: "Action", - Description: "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + Description: "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".", }, "description": &dcl.Property{ Type: "string", @@ -158,6 +158,28 @@ func DCLFirewallPolicyRuleSchema() *dcl.Schema { "layer4Configs", }, Properties: map[string]*dcl.Property{ + "destAddressGroups": &dcl.Property{ + Type: "array", + GoName: "DestAddressGroups", + Description: "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "destFqdns": &dcl.Property{ + Type: "array", + GoName: "DestFqdns", + Description: "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, "destIPRanges": &dcl.Property{ Type: "array", GoName: "DestIPRanges", @@ -169,6 +191,28 @@ func DCLFirewallPolicyRuleSchema() *dcl.Schema { GoType: "string", }, }, + "destRegionCodes": &dcl.Property{ + Type: "array", + GoName: "DestRegionCodes", + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "destThreatIntelligences": &dcl.Property{ + Type: "array", + GoName: "DestThreatIntelligences", + Description: "Name of the Google Cloud Threat Intelligence list.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, "layer4Configs": &dcl.Property{ Type: "array", GoName: "Layer4Configs", @@ -201,6 +245,28 @@ func DCLFirewallPolicyRuleSchema() *dcl.Schema { }, }, }, + "srcAddressGroups": &dcl.Property{ + Type: "array", + GoName: "SrcAddressGroups", + Description: "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "srcFqdns": &dcl.Property{ + Type: "array", + GoName: "SrcFqdns", + Description: "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, "srcIPRanges": &dcl.Property{ Type: "array", GoName: "SrcIPRanges", @@ -212,6 +278,28 @@ func DCLFirewallPolicyRuleSchema() *dcl.Schema { GoType: "string", }, }, + "srcRegionCodes": &dcl.Property{ + Type: "array", + GoName: "SrcRegionCodes", + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "srcThreatIntelligences": &dcl.Property{ + Type: "array", + GoName: "SrcThreatIntelligences", + Description: "Name of the Google Cloud Threat Intelligence list.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, }, }, "priority": &dcl.Property{ diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_yaml_embed.go index 00cb985044..407cc586fe 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/firewall_policy_rule_yaml_embed.go @@ -17,7 +17,7 @@ package compute // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/compute/firewall_policy_rule.yaml -var YAML_firewall_policy_rule = []byte("info:\n title: Compute/FirewallPolicyRule\n description: The Compute FirewallPolicyRule resource\n x-dcl-struct-name: FirewallPolicyRule\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a FirewallPolicyRule\n parameters:\n - name: firewallPolicyRule\n required: true\n description: A full instance of a FirewallPolicyRule\n apply:\n description: The function used to apply information about a FirewallPolicyRule\n parameters:\n - name: firewallPolicyRule\n required: true\n description: A full instance of a FirewallPolicyRule\n delete:\n description: The function used to delete a FirewallPolicyRule\n parameters:\n - name: firewallPolicyRule\n required: true\n description: A full instance of a FirewallPolicyRule\n deleteAll:\n description: The function used to delete all FirewallPolicyRule\n parameters:\n - name: firewallPolicy\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many FirewallPolicyRule\n parameters:\n - name: firewallPolicy\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n FirewallPolicyRule:\n title: FirewallPolicyRule\n x-dcl-id: locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}\n x-dcl-locations:\n - global\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - priority\n - match\n - action\n - direction\n - firewallPolicy\n properties:\n action:\n type: string\n x-dcl-go-name: Action\n description: The Action to perform when the client connection triggers the\n rule. Can currently be either \"allow\" or \"deny()\" where valid values for\n status are 403, 404, and 502.\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description for this resource.\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: FirewallPolicyRuleDirectionEnum\n description: 'The direction in which this rule applies. Possible values:\n INGRESS, EGRESS'\n enum:\n - INGRESS\n - EGRESS\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Denotes whether the firewall policy rule is disabled. When\n set to true, the firewall policy rule is not enforced and traffic behaves\n as if it did not exist. If this is unspecified, the firewall policy rule\n will be enabled.\n enableLogging:\n type: boolean\n x-dcl-go-name: EnableLogging\n description: 'Denotes whether to enable logging for a particular rule. If\n logging is enabled, logs will be exported to the configured export destination\n in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you\n cannot enable logging on \"goto_next\" rules.'\n firewallPolicy:\n type: string\n x-dcl-go-name: FirewallPolicy\n description: The firewall policy of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/FirewallPolicy\n field: name\n parent: true\n kind:\n type: string\n x-dcl-go-name: Kind\n readOnly: true\n description: Type of the resource. Always `compute#firewallPolicyRule` for\n firewall policy rules\n x-kubernetes-immutable: true\n match:\n type: object\n x-dcl-go-name: Match\n x-dcl-go-type: FirewallPolicyRuleMatch\n description: A match condition that incoming traffic is evaluated against.\n If it evaluates to true, the corresponding 'action' is enforced.\n required:\n - layer4Configs\n properties:\n destIPRanges:\n type: array\n x-dcl-go-name: DestIPRanges\n description: CIDR IP address range. Maximum number of destination CIDR\n IP ranges allowed is 256.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n layer4Configs:\n type: array\n x-dcl-go-name: Layer4Configs\n description: Pairs of IP protocols and ports that the rule should match.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: FirewallPolicyRuleMatchLayer4Configs\n required:\n - ipProtocol\n properties:\n ipProtocol:\n type: string\n x-dcl-go-name: IPProtocol\n description: The IP protocol to which this rule applies. The protocol\n type is required when creating a firewall rule. This value can\n either be one of the following well known protocol strings (`tcp`,\n `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol\n number.\n ports:\n type: array\n x-dcl-go-name: Ports\n description: 'An optional list of ports to which this rule applies.\n This field is only applicable for UDP or TCP protocol. Each\n entry must be either an integer or a range. If not specified,\n this rule applies to connections through any port. Example inputs\n include: ``.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcIPRanges:\n type: array\n x-dcl-go-name: SrcIPRanges\n description: CIDR IP address range. Maximum number of source CIDR IP\n ranges allowed is 256.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n priority:\n type: integer\n format: int64\n x-dcl-go-name: Priority\n description: An integer indicating the priority of a rule in the list. The\n priority must be a positive value between 0 and 2147483647. Rules are\n evaluated from highest to lowest priority where 0 is the highest priority\n and 2147483647 is the lowest prority.\n x-kubernetes-immutable: true\n ruleTupleCount:\n type: integer\n format: int64\n x-dcl-go-name: RuleTupleCount\n readOnly: true\n description: Calculation of the complexity of a single firewall policy rule.\n targetResources:\n type: array\n x-dcl-go-name: TargetResources\n description: A list of network resource URLs to which this rule applies.\n This field allows you to control which network's VMs get this rule. If\n this field is left blank, all VMs within the organization will receive\n the rule.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Network\n field: selfLink\n targetServiceAccounts:\n type: array\n x-dcl-go-name: TargetServiceAccounts\n description: A list of service accounts indicating the sets of instances\n that are applied with this rule.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: name\n") +var YAML_firewall_policy_rule = []byte("info:\n title: Compute/FirewallPolicyRule\n description: The Compute FirewallPolicyRule resource\n x-dcl-struct-name: FirewallPolicyRule\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a FirewallPolicyRule\n parameters:\n - name: firewallPolicyRule\n required: true\n description: A full instance of a FirewallPolicyRule\n apply:\n description: The function used to apply information about a FirewallPolicyRule\n parameters:\n - name: firewallPolicyRule\n required: true\n description: A full instance of a FirewallPolicyRule\n delete:\n description: The function used to delete a FirewallPolicyRule\n parameters:\n - name: firewallPolicyRule\n required: true\n description: A full instance of a FirewallPolicyRule\n deleteAll:\n description: The function used to delete all FirewallPolicyRule\n parameters:\n - name: firewallPolicy\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many FirewallPolicyRule\n parameters:\n - name: firewallPolicy\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n FirewallPolicyRule:\n title: FirewallPolicyRule\n x-dcl-id: locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}\n x-dcl-locations:\n - global\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - priority\n - match\n - action\n - direction\n - firewallPolicy\n properties:\n action:\n type: string\n x-dcl-go-name: Action\n description: The Action to perform when the client connection triggers the\n rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description for this resource.\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: FirewallPolicyRuleDirectionEnum\n description: 'The direction in which this rule applies. Possible values:\n INGRESS, EGRESS'\n enum:\n - INGRESS\n - EGRESS\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Denotes whether the firewall policy rule is disabled. When\n set to true, the firewall policy rule is not enforced and traffic behaves\n as if it did not exist. If this is unspecified, the firewall policy rule\n will be enabled.\n enableLogging:\n type: boolean\n x-dcl-go-name: EnableLogging\n description: 'Denotes whether to enable logging for a particular rule. If\n logging is enabled, logs will be exported to the configured export destination\n in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you\n cannot enable logging on \"goto_next\" rules.'\n firewallPolicy:\n type: string\n x-dcl-go-name: FirewallPolicy\n description: The firewall policy of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/FirewallPolicy\n field: name\n parent: true\n kind:\n type: string\n x-dcl-go-name: Kind\n readOnly: true\n description: Type of the resource. Always `compute#firewallPolicyRule` for\n firewall policy rules\n x-kubernetes-immutable: true\n match:\n type: object\n x-dcl-go-name: Match\n x-dcl-go-type: FirewallPolicyRuleMatch\n description: A match condition that incoming traffic is evaluated against.\n If it evaluates to true, the corresponding 'action' is enforced.\n required:\n - layer4Configs\n properties:\n destAddressGroups:\n type: array\n x-dcl-go-name: DestAddressGroups\n description: Address groups which should be matched against the traffic\n destination. Maximum number of destination address groups is 10. Destination\n address groups is only supported in Egress rules.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n destFqdns:\n type: array\n x-dcl-go-name: DestFqdns\n description: Domain names that will be used to match against the resolved\n domain name of destination of traffic. Can only be specified if DIRECTION\n is egress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n destIPRanges:\n type: array\n x-dcl-go-name: DestIPRanges\n description: CIDR IP address range. Maximum number of destination CIDR\n IP ranges allowed is 256.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n destRegionCodes:\n type: array\n x-dcl-go-name: DestRegionCodes\n description: The Unicode country codes whose IP addresses will be used\n to match against the source of traffic. Can only be specified if DIRECTION\n is egress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n destThreatIntelligences:\n type: array\n x-dcl-go-name: DestThreatIntelligences\n description: Name of the Google Cloud Threat Intelligence list.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n layer4Configs:\n type: array\n x-dcl-go-name: Layer4Configs\n description: Pairs of IP protocols and ports that the rule should match.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: FirewallPolicyRuleMatchLayer4Configs\n required:\n - ipProtocol\n properties:\n ipProtocol:\n type: string\n x-dcl-go-name: IPProtocol\n description: The IP protocol to which this rule applies. The protocol\n type is required when creating a firewall rule. This value can\n either be one of the following well known protocol strings (`tcp`,\n `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol\n number.\n ports:\n type: array\n x-dcl-go-name: Ports\n description: 'An optional list of ports to which this rule applies.\n This field is only applicable for UDP or TCP protocol. Each\n entry must be either an integer or a range. If not specified,\n this rule applies to connections through any port. Example inputs\n include: ``.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcAddressGroups:\n type: array\n x-dcl-go-name: SrcAddressGroups\n description: Address groups which should be matched against the traffic\n source. Maximum number of source address groups is 10. Source address\n groups is only supported in Ingress rules.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcFqdns:\n type: array\n x-dcl-go-name: SrcFqdns\n description: Domain names that will be used to match against the resolved\n domain name of source of traffic. Can only be specified if DIRECTION\n is ingress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcIPRanges:\n type: array\n x-dcl-go-name: SrcIPRanges\n description: CIDR IP address range. Maximum number of source CIDR IP\n ranges allowed is 256.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcRegionCodes:\n type: array\n x-dcl-go-name: SrcRegionCodes\n description: The Unicode country codes whose IP addresses will be used\n to match against the source of traffic. Can only be specified if DIRECTION\n is ingress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcThreatIntelligences:\n type: array\n x-dcl-go-name: SrcThreatIntelligences\n description: Name of the Google Cloud Threat Intelligence list.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n priority:\n type: integer\n format: int64\n x-dcl-go-name: Priority\n description: An integer indicating the priority of a rule in the list. The\n priority must be a positive value between 0 and 2147483647. Rules are\n evaluated from highest to lowest priority where 0 is the highest priority\n and 2147483647 is the lowest prority.\n x-kubernetes-immutable: true\n ruleTupleCount:\n type: integer\n format: int64\n x-dcl-go-name: RuleTupleCount\n readOnly: true\n description: Calculation of the complexity of a single firewall policy rule.\n targetResources:\n type: array\n x-dcl-go-name: TargetResources\n description: A list of network resource URLs to which this rule applies.\n This field allows you to control which network's VMs get this rule. If\n this field is left blank, all VMs within the organization will receive\n the rule.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Compute/Network\n field: selfLink\n targetServiceAccounts:\n type: array\n x-dcl-go-name: TargetServiceAccounts\n description: A list of service accounts indicating the sets of instances\n that are applied with this rule.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: name\n") -// 8165 bytes -// MD5: 3004bd6501bb2a871c874f4628108176 +// 11648 bytes +// MD5: a8b05c7caab7e7abbe018e4057c6a4bf diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule.go index b7bd480f80..881842e042 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule.go @@ -54,6 +54,8 @@ type ForwardingRule struct { ServiceDirectoryRegistrations []ForwardingRuleServiceDirectoryRegistrations `json:"serviceDirectoryRegistrations"` PscConnectionId *string `json:"pscConnectionId"` PscConnectionStatus *ForwardingRulePscConnectionStatusEnum `json:"pscConnectionStatus"` + SourceIPRanges []string `json:"sourceIPRanges"` + BaseForwardingRule *string `json:"baseForwardingRule"` } func (r *ForwardingRule) String() string { @@ -414,6 +416,8 @@ func (r *ForwardingRule) ID() (string, error) { "service_directory_registrations": dcl.ValueOrEmptyString(nr.ServiceDirectoryRegistrations), "psc_connection_id": dcl.ValueOrEmptyString(nr.PscConnectionId), "psc_connection_status": dcl.ValueOrEmptyString(nr.PscConnectionStatus), + "source_ip_ranges": dcl.ValueOrEmptyString(nr.SourceIPRanges), + "base_forwarding_rule": dcl.ValueOrEmptyString(nr.BaseForwardingRule), } if dcl.IsRegion(nr.Location) { return dcl.Nprintf("projects/{{project}}/regions/{{location}}/forwardingRules/{{name}}", params), nil diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule.yaml index 2ad73cd9d4..45d26c1947 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule.yaml @@ -102,6 +102,16 @@ components: internal load balancing, this field identifies the BackendService resource to receive the matched traffic. x-kubernetes-immutable: true + baseForwardingRule: + type: string + x-dcl-go-name: BaseForwardingRule + readOnly: true + description: '[Output Only] The URL for the corresponding base Forwarding + Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the + same IP address, protocol, and port settings with the current Forwarding + Rule, but without sourceIPRanges specified. Always empty if the current + Forwarding Rule does not have sourceIPRanges specified.' + x-kubernetes-immutable: true creationTimestamp: type: string x-dcl-go-name: CreationTimestamp @@ -446,6 +456,21 @@ components: description: '[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.' x-kubernetes-immutable: true + sourceIPRanges: + type: array + x-dcl-go-name: SourceIPRanges + description: If not empty, this Forwarding Rule will only forward the traffic + when the source IP address matches one of the IP addresses or CIDR ranges + set here. Note that a Forwarding Rule can only have up to 64 source IP + ranges, and this field can only be used with a regional Forwarding Rule + whose scheme is EXTERNAL. Each sourceIpRange entry should be either an + IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). + x-kubernetes-immutable: true + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string subnetwork: type: string x-dcl-go-name: Subnetwork diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_internal.go index 1a56539577..786abe8fd1 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_internal.go @@ -731,6 +731,11 @@ func canonicalizeForwardingRuleDesiredState(rawDesired, rawInitial *ForwardingRu canonicalDesired.Location = rawDesired.Location } canonicalDesired.ServiceDirectoryRegistrations = canonicalizeForwardingRuleServiceDirectoryRegistrationsSlice(rawDesired.ServiceDirectoryRegistrations, rawInitial.ServiceDirectoryRegistrations, opts...) + if dcl.StringArrayCanonicalize(rawDesired.SourceIPRanges, rawInitial.SourceIPRanges) { + canonicalDesired.SourceIPRanges = rawInitial.SourceIPRanges + } else { + canonicalDesired.SourceIPRanges = rawDesired.SourceIPRanges + } return canonicalDesired, nil } @@ -934,6 +939,22 @@ func canonicalizeForwardingRuleNewState(c *Client, rawNew, rawDesired *Forwardin } else { } + if dcl.IsEmptyValueIndirect(rawNew.SourceIPRanges) && dcl.IsEmptyValueIndirect(rawDesired.SourceIPRanges) { + rawNew.SourceIPRanges = rawDesired.SourceIPRanges + } else { + if dcl.StringArrayCanonicalize(rawDesired.SourceIPRanges, rawNew.SourceIPRanges) { + rawNew.SourceIPRanges = rawDesired.SourceIPRanges + } + } + + if dcl.IsEmptyValueIndirect(rawNew.BaseForwardingRule) && dcl.IsEmptyValueIndirect(rawDesired.BaseForwardingRule) { + rawNew.BaseForwardingRule = rawDesired.BaseForwardingRule + } else { + if dcl.StringCanonicalize(rawDesired.BaseForwardingRule, rawNew.BaseForwardingRule) { + rawNew.BaseForwardingRule = rawDesired.BaseForwardingRule + } + } + return rawNew, nil } @@ -1528,6 +1549,20 @@ func diffForwardingRule(c *Client, desired, actual *ForwardingRule, opts ...dcl. newDiffs = append(newDiffs, ds...) } + if ds, err := dcl.Diff(desired.SourceIPRanges, actual.SourceIPRanges, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SourceIpRanges")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + + if ds, err := dcl.Diff(desired.BaseForwardingRule, actual.BaseForwardingRule, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("BaseForwardingRule")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + if len(newDiffs) > 0 { c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) } @@ -1663,6 +1698,7 @@ func (r *ForwardingRule) urlNormalized() *ForwardingRule { normalized.Project = dcl.SelfLinkToName(r.Project) normalized.Location = dcl.SelfLinkToName(r.Location) normalized.PscConnectionId = dcl.SelfLinkToName(r.PscConnectionId) + normalized.BaseForwardingRule = dcl.SelfLinkToName(r.BaseForwardingRule) return &normalized } @@ -1831,6 +1867,9 @@ func expandForwardingRule(c *Client, f *ForwardingRule) (map[string]interface{}, } else if v != nil { m["serviceDirectoryRegistrations"] = v } + if v := f.SourceIPRanges; v != nil { + m["sourceIpRanges"] = v + } return m, nil } @@ -1876,6 +1915,8 @@ func flattenForwardingRule(c *Client, i interface{}, res *ForwardingRule) *Forwa resultRes.ServiceDirectoryRegistrations = flattenForwardingRuleServiceDirectoryRegistrationsSlice(c, m["serviceDirectoryRegistrations"], res) resultRes.PscConnectionId = dcl.FlattenString(m["pscConnectionId"]) resultRes.PscConnectionStatus = flattenForwardingRulePscConnectionStatusEnum(m["pscConnectionStatus"]) + resultRes.SourceIPRanges = dcl.FlattenStringSlice(m["sourceIpRanges"]) + resultRes.BaseForwardingRule = dcl.FlattenString(m["baseForwardingRule"]) return resultRes } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_schema.go index cc82fff2aa..92a563ae1c 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_schema.go @@ -130,6 +130,13 @@ func DCLForwardingRuleSchema() *dcl.Schema { Description: "This field is only used for `INTERNAL` load balancing. For internal load balancing, this field identifies the BackendService resource to receive the matched traffic.", Immutable: true, }, + "baseForwardingRule": &dcl.Property{ + Type: "string", + GoName: "BaseForwardingRule", + ReadOnly: true, + Description: "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + Immutable: true, + }, "creationTimestamp": &dcl.Property{ Type: "string", GoName: "CreationTimestamp", @@ -414,6 +421,18 @@ func DCLForwardingRuleSchema() *dcl.Schema { Description: "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", Immutable: true, }, + "sourceIPRanges": &dcl.Property{ + Type: "array", + GoName: "SourceIPRanges", + Description: "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each sourceIpRange entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + Immutable: true, + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, "subnetwork": &dcl.Property{ Type: "string", GoName: "Subnetwork", diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_yaml_embed.go index 20622b70d0..431c9ff378 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/forwarding_rule_yaml_embed.go @@ -17,7 +17,7 @@ package compute // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/compute/forwarding_rule.yaml -var YAML_forwarding_rule = []byte("info:\n title: Compute/ForwardingRule\n description: The Compute ForwardingRule resource\n x-dcl-struct-name: ForwardingRule\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a ForwardingRule\n parameters:\n - name: forwardingRule\n required: true\n description: A full instance of a ForwardingRule\n apply:\n description: The function used to apply information about a ForwardingRule\n parameters:\n - name: forwardingRule\n required: true\n description: A full instance of a ForwardingRule\n delete:\n description: The function used to delete a ForwardingRule\n parameters:\n - name: forwardingRule\n required: true\n description: A full instance of a ForwardingRule\n deleteAll:\n description: The function used to delete all ForwardingRule\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many ForwardingRule\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n ForwardingRule:\n title: ForwardingRule\n x-dcl-id: projects/{{project}}/global/forwardingRules/{{name}}\n x-dcl-locations:\n - region\n - global\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n properties:\n allPorts:\n type: boolean\n x-dcl-go-name: AllPorts\n description: This field is used along with the `backend_service` field for\n internal load balancing or with the `target` field for internal TargetInstance.\n This field cannot be used with `port` or `portRange` fields. When the\n load balancing scheme is `INTERNAL` and protocol is TCP/UDP, specify this\n field to allow packets addressed to any ports will be forwarded to the\n backends configured with this forwarding rule.\n x-kubernetes-immutable: true\n allowGlobalAccess:\n type: boolean\n x-dcl-go-name: AllowGlobalAccess\n description: This field is used along with the `backend_service` field for\n internal load balancing or with the `target` field for internal TargetInstance.\n If the field is set to `TRUE`, clients can access ILB from all regions.\n Otherwise only allows access from clients in the same region as the internal\n load balancer.\n backendService:\n type: string\n x-dcl-go-name: BackendService\n description: This field is only used for `INTERNAL` load balancing. For\n internal load balancing, this field identifies the BackendService resource\n to receive the matched traffic.\n x-kubernetes-immutable: true\n creationTimestamp:\n type: string\n x-dcl-go-name: CreationTimestamp\n readOnly: true\n description: '[Output Only] Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)\n text format.'\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description of this resource. Provide this property\n when you create the resource.\n x-kubernetes-immutable: true\n ipAddress:\n type: string\n x-dcl-go-name: IPAddress\n description: 'IP address that this forwarding rule serves. When a client\n sends traffic to this IP address, the forwarding rule directs the traffic\n to the target that you specify in the forwarding rule. If you don''t specify\n a reserved IP address, an ephemeral IP address is assigned. Methods for\n specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full\n URL, as in `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name`\n * Partial URL or by name, as in: * `projects/project_id/regions/region/addresses/address-name`\n * `regions/region/addresses/address-name` * `global/addresses/address-name`\n * `address-name` The loadBalancingScheme and the forwarding rule''s target\n determine the type of IP address that you can use. For detailed information,\n refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).'\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n ipProtocol:\n type: string\n x-dcl-go-name: IPProtocol\n x-dcl-go-type: ForwardingRuleIPProtocolEnum\n description: The IP protocol to which this rule applies. For protocol forwarding,\n valid options are `TCP`, `UDP`, `ESP`, `AH`, `SCTP` or `ICMP`. For Internal\n TCP/UDP Load Balancing, the load balancing scheme is `INTERNAL`, and one\n of `TCP` or `UDP` are valid. For Traffic Director, the load balancing\n scheme is `INTERNAL_SELF_MANAGED`, and only `TCP`is valid. For Internal\n HTTP(S) Load Balancing, the load balancing scheme is `INTERNAL_MANAGED`,\n and only `TCP` is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing,\n the load balancing scheme is `EXTERNAL` and only `TCP` is valid. For Network\n TCP/UDP Load Balancing, the load balancing scheme is `EXTERNAL`, and one\n of `TCP` or `UDP` is valid.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - TCP\n - UDP\n - ESP\n - AH\n - SCTP\n - ICMP\n - L3_DEFAULT\n ipVersion:\n type: string\n x-dcl-go-name: IPVersion\n x-dcl-go-type: ForwardingRuleIPVersionEnum\n description: 'The IP Version that will be used by this forwarding rule.\n Valid options are `IPV4` or `IPV6`. This can only be specified for an\n external global forwarding rule. Possible values: UNSPECIFIED_VERSION,\n IPV4, IPV6'\n x-kubernetes-immutable: true\n enum:\n - UNSPECIFIED_VERSION\n - IPV4\n - IPV6\n isMirroringCollector:\n type: boolean\n x-dcl-go-name: IsMirroringCollector\n description: Indicates whether or not this load balancer can be used as\n a collector for packet mirroring. To prevent mirroring loops, instances\n behind this load balancer will not have their traffic mirrored even if\n a `PacketMirroring` rule applies to them. This can only be set to true\n for load balancers that have their `loadBalancingScheme` set to `INTERNAL`.\n x-kubernetes-immutable: true\n labelFingerprint:\n type: string\n x-dcl-go-name: LabelFingerprint\n readOnly: true\n description: Used internally during label updates.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Labels to apply to this rule.\n loadBalancingScheme:\n type: string\n x-dcl-go-name: LoadBalancingScheme\n x-dcl-go-type: ForwardingRuleLoadBalancingSchemeEnum\n description: \"Specifies the forwarding rule type.\\n\\n* `EXTERNAL` is used\n for:\\n * Classic Cloud VPN gateways\\n * Protocol forwarding\n to VMs from an external IP address\\n * The following load balancers:\n HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP\\n* `INTERNAL` is\n used for:\\n * Protocol forwarding to VMs from an internal IP address\\n\n \\ * Internal TCP/UDP load balancers\\n* `INTERNAL_MANAGED` is used\n for:\\n * Internal HTTP(S) load balancers\\n* `INTERNAL_SELF_MANAGED`\n is used for:\\n * Traffic Director\\n* `EXTERNAL_MANAGED` is used\n for:\\n * Global external HTTP(S) load balancers \\n\\nFor more information\n about forwarding rules, refer to [Forwarding rule concepts](/load-balancing/docs/forwarding-rule-concepts).\n Possible values: INVALID, INTERNAL, INTERNAL_MANAGED, INTERNAL_SELF_MANAGED,\n EXTERNAL, EXTERNAL_MANAGED\"\n x-kubernetes-immutable: true\n enum:\n - INVALID\n - INTERNAL\n - INTERNAL_MANAGED\n - INTERNAL_SELF_MANAGED\n - EXTERNAL\n - EXTERNAL_MANAGED\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of this resource.\n x-kubernetes-immutable: true\n metadataFilter:\n type: array\n x-dcl-go-name: MetadataFilter\n description: |-\n Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of [xDS](https://github.com/envoyproxy/data-plane-api/blob/master/XDS_PROTOCOL.md) compliant clients. In their xDS requests to Loadbalancer, xDS clients present [node metadata](https://github.com/envoyproxy/data-plane-api/search?q=%22message+Node%22+in%3A%2Fenvoy%2Fapi%2Fv2%2Fcore%2Fbase.proto&). If a match takes place, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. `TargetHttpProxy`, `UrlMap`) referenced by the `ForwardingRule` will not be visible to those proxies.\n\n For each `metadataFilter` in this list, if its `filterMatchCriteria` is set to MATCH_ANY, at least one of the `filterLabel`s must match the corresponding label provided in the metadata. If its `filterMatchCriteria` is set to MATCH_ALL, then all of its `filterLabel`s must match with corresponding labels provided in the metadata.\n\n `metadataFilters` specified here will be applifed before those specified in the `UrlMap` that this `ForwardingRule` references.\n\n `metadataFilters` only applies to Loadbalancers that have their loadBalancingScheme set to `INTERNAL_SELF_MANAGED`.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ForwardingRuleMetadataFilter\n required:\n - filterMatchCriteria\n - filterLabel\n properties:\n filterLabel:\n type: array\n x-dcl-go-name: FilterLabel\n description: |-\n The list of label value pairs that must match labels in the provided metadata based on `filterMatchCriteria`\n\n This list must not be empty and can have at the most 64 entries.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ForwardingRuleMetadataFilterFilterLabel\n required:\n - name\n - value\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: |-\n Name of metadata label.\n\n The name can have a maximum length of 1024 characters and must be at least 1 character long.\n x-kubernetes-immutable: true\n value:\n type: string\n x-dcl-go-name: Value\n description: |-\n The value of the label must match the specified value.\n\n value can have a maximum length of 1024 characters.\n x-kubernetes-immutable: true\n filterMatchCriteria:\n type: string\n x-dcl-go-name: FilterMatchCriteria\n x-dcl-go-type: ForwardingRuleMetadataFilterFilterMatchCriteriaEnum\n description: |-\n Specifies how individual `filterLabel` matches within the list of `filterLabel`s contribute towards the overall `metadataFilter` match.\n\n Supported values are:\n\n * MATCH_ANY: At least one of the `filterLabels` must have a matching label in the provided metadata.\n * MATCH_ALL: All `filterLabels` must have matching labels in the provided metadata. Possible values: NOT_SET, MATCH_ALL, MATCH_ANY\n x-kubernetes-immutable: true\n enum:\n - NOT_SET\n - MATCH_ALL\n - MATCH_ANY\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the resource; provided by the client when the resource\n is created. The name must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\n Specifically, the name must be 1-63 characters long and match the regular\n expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character\n must be a lowercase letter, and all following characters must be a dash,\n lowercase letter, or digit, except the last character, which cannot be\n a dash.\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: This field is not used for external load balancing. For `INTERNAL`\n and `INTERNAL_SELF_MANAGED` load balancing, this field identifies the\n network that the load balanced IP should belong to for this Forwarding\n Rule. If this field is not specified, the default network will be used.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n networkTier:\n type: string\n x-dcl-go-name: NetworkTier\n x-dcl-go-type: ForwardingRuleNetworkTierEnum\n description: 'This signifies the networking tier used for configuring this\n load balancer and can only take the following values: `PREMIUM`, `STANDARD`.\n For regional ForwardingRule, the valid values are `PREMIUM` and `STANDARD`.\n For GlobalForwardingRule, the valid value is `PREMIUM`. If this field\n is not specified, it is assumed to be `PREMIUM`. If `IPAddress` is specified,\n this value must be equal to the networkTier of the Address.'\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - PREMIUM\n - STANDARD\n portRange:\n type: string\n x-dcl-go-name: PortRange\n description: |-\n When the load balancing scheme is `EXTERNAL`, `INTERNAL_SELF_MANAGED` and `INTERNAL_MANAGED`, you can specify a `port_range`. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the `target` field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. Applicable only when `IPProtocol` is `TCP`, `UDP`, or `SCTP`, only packets addressed to ports in the specified range will be forwarded to `target`. Forwarding rules with the same `[IPAddress, IPProtocol]` pair must have disjoint port ranges. Some types of forwarding target have constraints on the acceptable ports:\n\n * TargetHttpProxy: 80, 8080\n * TargetHttpsProxy: 443\n * TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n * TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n * TargetVpnGateway: 500, 4500\n\n @pattern: d+(?:-d+)?\n x-kubernetes-immutable: true\n ports:\n type: array\n x-dcl-go-name: Ports\n description: 'This field is used along with the `backend_service` field\n for internal load balancing. When the load balancing scheme is `INTERNAL`,\n a list of ports can be configured, for example, [''80''], [''8000'',''9000''].\n Only packets addressed to these ports are forwarded to the backends configured\n with the forwarding rule. If the forwarding rule''s loadBalancingScheme\n is INTERNAL, you can specify ports in one of the following ways: * A list\n of up to five ports, which can be non-contiguous * Keyword `ALL`, which\n causes the forwarding rule to forward traffic on any port of the forwarding\n rule''s protocol. @pattern: d+(?:-d+)? For more information, refer to\n [Port specifications](/load-balancing/docs/forwarding-rule-concepts#port_specifications).'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project this resource belongs in.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n pscConnectionId:\n type: string\n x-dcl-go-name: PscConnectionId\n readOnly: true\n description: The PSC connection id of the PSC Forwarding Rule.\n x-kubernetes-immutable: true\n pscConnectionStatus:\n type: string\n x-dcl-go-name: PscConnectionStatus\n x-dcl-go-type: ForwardingRulePscConnectionStatusEnum\n readOnly: true\n description: 'The PSC connection status of the PSC Forwarding Rule. Possible\n values: STATUS_UNSPECIFIED, PENDING, ACCEPTED, REJECTED, CLOSED'\n x-kubernetes-immutable: true\n enum:\n - STATUS_UNSPECIFIED\n - PENDING\n - ACCEPTED\n - REJECTED\n - CLOSED\n region:\n type: string\n x-dcl-go-name: Region\n readOnly: true\n description: '[Output Only] URL of the region where the regional forwarding\n rule resides. This field is not applicable to global forwarding rules.\n You must specify this field as part of the HTTP request URL. It is not\n settable as a field in the request body.'\n x-kubernetes-immutable: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: '[Output Only] Server-defined URL for the resource.'\n x-kubernetes-immutable: true\n serviceDirectoryRegistrations:\n type: array\n x-dcl-go-name: ServiceDirectoryRegistrations\n description: Service Directory resources to register this forwarding rule\n with. Currently, only supports a single Service Directory resource.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ForwardingRuleServiceDirectoryRegistrations\n properties:\n namespace:\n type: string\n x-dcl-go-name: Namespace\n description: Service Directory namespace to register the forwarding\n rule under.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n service:\n type: string\n x-dcl-go-name: Service\n description: Service Directory service to register the forwarding\n rule under.\n x-kubernetes-immutable: true\n serviceLabel:\n type: string\n x-dcl-go-name: ServiceLabel\n description: An optional prefix to the service name for this Forwarding\n Rule. If specified, the prefix is the first label of the fully qualified\n service name. The label must be 1-63 characters long, and comply with\n [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the label\n must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`\n which means the first character must be a lowercase letter, and all following\n characters must be a dash, lowercase letter, or digit, except the last\n character, which cannot be a dash. This field is only used for internal\n load balancing.\n x-kubernetes-immutable: true\n serviceName:\n type: string\n x-dcl-go-name: ServiceName\n readOnly: true\n description: '[Output Only] The internal fully qualified service name for\n this Forwarding Rule. This field is only used for internal load balancing.'\n x-kubernetes-immutable: true\n subnetwork:\n type: string\n x-dcl-go-name: Subnetwork\n description: This field is only used for `INTERNAL` load balancing. For\n internal load balancing, this field identifies the subnetwork that the\n load balanced IP should belong to for this Forwarding Rule. If the network\n specified is in auto subnet mode, this field is optional. However, if\n the network is in custom subnet mode, a subnetwork must be specified.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n target:\n type: string\n x-dcl-go-name: Target\n description: The URL of the target resource to receive the matched traffic.\n For regional forwarding rules, this target must live in the same region\n as the forwarding rule. For global forwarding rules, this target must\n be a global load balancing resource. The forwarded traffic must be of\n a type appropriate to the target object. For `INTERNAL_SELF_MANAGED` load\n balancing, only `targetHttpProxy` is valid, not `targetHttpsProxy`.\n") +var YAML_forwarding_rule = []byte("info:\n title: Compute/ForwardingRule\n description: The Compute ForwardingRule resource\n x-dcl-struct-name: ForwardingRule\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a ForwardingRule\n parameters:\n - name: forwardingRule\n required: true\n description: A full instance of a ForwardingRule\n apply:\n description: The function used to apply information about a ForwardingRule\n parameters:\n - name: forwardingRule\n required: true\n description: A full instance of a ForwardingRule\n delete:\n description: The function used to delete a ForwardingRule\n parameters:\n - name: forwardingRule\n required: true\n description: A full instance of a ForwardingRule\n deleteAll:\n description: The function used to delete all ForwardingRule\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many ForwardingRule\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n ForwardingRule:\n title: ForwardingRule\n x-dcl-id: projects/{{project}}/global/forwardingRules/{{name}}\n x-dcl-locations:\n - region\n - global\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n properties:\n allPorts:\n type: boolean\n x-dcl-go-name: AllPorts\n description: This field is used along with the `backend_service` field for\n internal load balancing or with the `target` field for internal TargetInstance.\n This field cannot be used with `port` or `portRange` fields. When the\n load balancing scheme is `INTERNAL` and protocol is TCP/UDP, specify this\n field to allow packets addressed to any ports will be forwarded to the\n backends configured with this forwarding rule.\n x-kubernetes-immutable: true\n allowGlobalAccess:\n type: boolean\n x-dcl-go-name: AllowGlobalAccess\n description: This field is used along with the `backend_service` field for\n internal load balancing or with the `target` field for internal TargetInstance.\n If the field is set to `TRUE`, clients can access ILB from all regions.\n Otherwise only allows access from clients in the same region as the internal\n load balancer.\n backendService:\n type: string\n x-dcl-go-name: BackendService\n description: This field is only used for `INTERNAL` load balancing. For\n internal load balancing, this field identifies the BackendService resource\n to receive the matched traffic.\n x-kubernetes-immutable: true\n baseForwardingRule:\n type: string\n x-dcl-go-name: BaseForwardingRule\n readOnly: true\n description: '[Output Only] The URL for the corresponding base Forwarding\n Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the\n same IP address, protocol, and port settings with the current Forwarding\n Rule, but without sourceIPRanges specified. Always empty if the current\n Forwarding Rule does not have sourceIPRanges specified.'\n x-kubernetes-immutable: true\n creationTimestamp:\n type: string\n x-dcl-go-name: CreationTimestamp\n readOnly: true\n description: '[Output Only] Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)\n text format.'\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description of this resource. Provide this property\n when you create the resource.\n x-kubernetes-immutable: true\n ipAddress:\n type: string\n x-dcl-go-name: IPAddress\n description: 'IP address that this forwarding rule serves. When a client\n sends traffic to this IP address, the forwarding rule directs the traffic\n to the target that you specify in the forwarding rule. If you don''t specify\n a reserved IP address, an ephemeral IP address is assigned. Methods for\n specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full\n URL, as in `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name`\n * Partial URL or by name, as in: * `projects/project_id/regions/region/addresses/address-name`\n * `regions/region/addresses/address-name` * `global/addresses/address-name`\n * `address-name` The loadBalancingScheme and the forwarding rule''s target\n determine the type of IP address that you can use. For detailed information,\n refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).'\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n ipProtocol:\n type: string\n x-dcl-go-name: IPProtocol\n x-dcl-go-type: ForwardingRuleIPProtocolEnum\n description: The IP protocol to which this rule applies. For protocol forwarding,\n valid options are `TCP`, `UDP`, `ESP`, `AH`, `SCTP` or `ICMP`. For Internal\n TCP/UDP Load Balancing, the load balancing scheme is `INTERNAL`, and one\n of `TCP` or `UDP` are valid. For Traffic Director, the load balancing\n scheme is `INTERNAL_SELF_MANAGED`, and only `TCP`is valid. For Internal\n HTTP(S) Load Balancing, the load balancing scheme is `INTERNAL_MANAGED`,\n and only `TCP` is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing,\n the load balancing scheme is `EXTERNAL` and only `TCP` is valid. For Network\n TCP/UDP Load Balancing, the load balancing scheme is `EXTERNAL`, and one\n of `TCP` or `UDP` is valid.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - TCP\n - UDP\n - ESP\n - AH\n - SCTP\n - ICMP\n - L3_DEFAULT\n ipVersion:\n type: string\n x-dcl-go-name: IPVersion\n x-dcl-go-type: ForwardingRuleIPVersionEnum\n description: 'The IP Version that will be used by this forwarding rule.\n Valid options are `IPV4` or `IPV6`. This can only be specified for an\n external global forwarding rule. Possible values: UNSPECIFIED_VERSION,\n IPV4, IPV6'\n x-kubernetes-immutable: true\n enum:\n - UNSPECIFIED_VERSION\n - IPV4\n - IPV6\n isMirroringCollector:\n type: boolean\n x-dcl-go-name: IsMirroringCollector\n description: Indicates whether or not this load balancer can be used as\n a collector for packet mirroring. To prevent mirroring loops, instances\n behind this load balancer will not have their traffic mirrored even if\n a `PacketMirroring` rule applies to them. This can only be set to true\n for load balancers that have their `loadBalancingScheme` set to `INTERNAL`.\n x-kubernetes-immutable: true\n labelFingerprint:\n type: string\n x-dcl-go-name: LabelFingerprint\n readOnly: true\n description: Used internally during label updates.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Labels to apply to this rule.\n loadBalancingScheme:\n type: string\n x-dcl-go-name: LoadBalancingScheme\n x-dcl-go-type: ForwardingRuleLoadBalancingSchemeEnum\n description: \"Specifies the forwarding rule type.\\n\\n* `EXTERNAL` is used\n for:\\n * Classic Cloud VPN gateways\\n * Protocol forwarding\n to VMs from an external IP address\\n * The following load balancers:\n HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP\\n* `INTERNAL` is\n used for:\\n * Protocol forwarding to VMs from an internal IP address\\n\n \\ * Internal TCP/UDP load balancers\\n* `INTERNAL_MANAGED` is used\n for:\\n * Internal HTTP(S) load balancers\\n* `INTERNAL_SELF_MANAGED`\n is used for:\\n * Traffic Director\\n* `EXTERNAL_MANAGED` is used\n for:\\n * Global external HTTP(S) load balancers \\n\\nFor more information\n about forwarding rules, refer to [Forwarding rule concepts](/load-balancing/docs/forwarding-rule-concepts).\n Possible values: INVALID, INTERNAL, INTERNAL_MANAGED, INTERNAL_SELF_MANAGED,\n EXTERNAL, EXTERNAL_MANAGED\"\n x-kubernetes-immutable: true\n enum:\n - INVALID\n - INTERNAL\n - INTERNAL_MANAGED\n - INTERNAL_SELF_MANAGED\n - EXTERNAL\n - EXTERNAL_MANAGED\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of this resource.\n x-kubernetes-immutable: true\n metadataFilter:\n type: array\n x-dcl-go-name: MetadataFilter\n description: |-\n Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of [xDS](https://github.com/envoyproxy/data-plane-api/blob/master/XDS_PROTOCOL.md) compliant clients. In their xDS requests to Loadbalancer, xDS clients present [node metadata](https://github.com/envoyproxy/data-plane-api/search?q=%22message+Node%22+in%3A%2Fenvoy%2Fapi%2Fv2%2Fcore%2Fbase.proto&). If a match takes place, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. `TargetHttpProxy`, `UrlMap`) referenced by the `ForwardingRule` will not be visible to those proxies.\n\n For each `metadataFilter` in this list, if its `filterMatchCriteria` is set to MATCH_ANY, at least one of the `filterLabel`s must match the corresponding label provided in the metadata. If its `filterMatchCriteria` is set to MATCH_ALL, then all of its `filterLabel`s must match with corresponding labels provided in the metadata.\n\n `metadataFilters` specified here will be applifed before those specified in the `UrlMap` that this `ForwardingRule` references.\n\n `metadataFilters` only applies to Loadbalancers that have their loadBalancingScheme set to `INTERNAL_SELF_MANAGED`.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ForwardingRuleMetadataFilter\n required:\n - filterMatchCriteria\n - filterLabel\n properties:\n filterLabel:\n type: array\n x-dcl-go-name: FilterLabel\n description: |-\n The list of label value pairs that must match labels in the provided metadata based on `filterMatchCriteria`\n\n This list must not be empty and can have at the most 64 entries.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ForwardingRuleMetadataFilterFilterLabel\n required:\n - name\n - value\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: |-\n Name of metadata label.\n\n The name can have a maximum length of 1024 characters and must be at least 1 character long.\n x-kubernetes-immutable: true\n value:\n type: string\n x-dcl-go-name: Value\n description: |-\n The value of the label must match the specified value.\n\n value can have a maximum length of 1024 characters.\n x-kubernetes-immutable: true\n filterMatchCriteria:\n type: string\n x-dcl-go-name: FilterMatchCriteria\n x-dcl-go-type: ForwardingRuleMetadataFilterFilterMatchCriteriaEnum\n description: |-\n Specifies how individual `filterLabel` matches within the list of `filterLabel`s contribute towards the overall `metadataFilter` match.\n\n Supported values are:\n\n * MATCH_ANY: At least one of the `filterLabels` must have a matching label in the provided metadata.\n * MATCH_ALL: All `filterLabels` must have matching labels in the provided metadata. Possible values: NOT_SET, MATCH_ALL, MATCH_ANY\n x-kubernetes-immutable: true\n enum:\n - NOT_SET\n - MATCH_ALL\n - MATCH_ANY\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the resource; provided by the client when the resource\n is created. The name must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).\n Specifically, the name must be 1-63 characters long and match the regular\n expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character\n must be a lowercase letter, and all following characters must be a dash,\n lowercase letter, or digit, except the last character, which cannot be\n a dash.\n x-kubernetes-immutable: true\n network:\n type: string\n x-dcl-go-name: Network\n description: This field is not used for external load balancing. For `INTERNAL`\n and `INTERNAL_SELF_MANAGED` load balancing, this field identifies the\n network that the load balanced IP should belong to for this Forwarding\n Rule. If this field is not specified, the default network will be used.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n networkTier:\n type: string\n x-dcl-go-name: NetworkTier\n x-dcl-go-type: ForwardingRuleNetworkTierEnum\n description: 'This signifies the networking tier used for configuring this\n load balancer and can only take the following values: `PREMIUM`, `STANDARD`.\n For regional ForwardingRule, the valid values are `PREMIUM` and `STANDARD`.\n For GlobalForwardingRule, the valid value is `PREMIUM`. If this field\n is not specified, it is assumed to be `PREMIUM`. If `IPAddress` is specified,\n this value must be equal to the networkTier of the Address.'\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - PREMIUM\n - STANDARD\n portRange:\n type: string\n x-dcl-go-name: PortRange\n description: |-\n When the load balancing scheme is `EXTERNAL`, `INTERNAL_SELF_MANAGED` and `INTERNAL_MANAGED`, you can specify a `port_range`. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the `target` field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. Applicable only when `IPProtocol` is `TCP`, `UDP`, or `SCTP`, only packets addressed to ports in the specified range will be forwarded to `target`. Forwarding rules with the same `[IPAddress, IPProtocol]` pair must have disjoint port ranges. Some types of forwarding target have constraints on the acceptable ports:\n\n * TargetHttpProxy: 80, 8080\n * TargetHttpsProxy: 443\n * TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n * TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n * TargetVpnGateway: 500, 4500\n\n @pattern: d+(?:-d+)?\n x-kubernetes-immutable: true\n ports:\n type: array\n x-dcl-go-name: Ports\n description: 'This field is used along with the `backend_service` field\n for internal load balancing. When the load balancing scheme is `INTERNAL`,\n a list of ports can be configured, for example, [''80''], [''8000'',''9000''].\n Only packets addressed to these ports are forwarded to the backends configured\n with the forwarding rule. If the forwarding rule''s loadBalancingScheme\n is INTERNAL, you can specify ports in one of the following ways: * A list\n of up to five ports, which can be non-contiguous * Keyword `ALL`, which\n causes the forwarding rule to forward traffic on any port of the forwarding\n rule''s protocol. @pattern: d+(?:-d+)? For more information, refer to\n [Port specifications](/load-balancing/docs/forwarding-rule-concepts#port_specifications).'\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project this resource belongs in.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n pscConnectionId:\n type: string\n x-dcl-go-name: PscConnectionId\n readOnly: true\n description: The PSC connection id of the PSC Forwarding Rule.\n x-kubernetes-immutable: true\n pscConnectionStatus:\n type: string\n x-dcl-go-name: PscConnectionStatus\n x-dcl-go-type: ForwardingRulePscConnectionStatusEnum\n readOnly: true\n description: 'The PSC connection status of the PSC Forwarding Rule. Possible\n values: STATUS_UNSPECIFIED, PENDING, ACCEPTED, REJECTED, CLOSED'\n x-kubernetes-immutable: true\n enum:\n - STATUS_UNSPECIFIED\n - PENDING\n - ACCEPTED\n - REJECTED\n - CLOSED\n region:\n type: string\n x-dcl-go-name: Region\n readOnly: true\n description: '[Output Only] URL of the region where the regional forwarding\n rule resides. This field is not applicable to global forwarding rules.\n You must specify this field as part of the HTTP request URL. It is not\n settable as a field in the request body.'\n x-kubernetes-immutable: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: '[Output Only] Server-defined URL for the resource.'\n x-kubernetes-immutable: true\n serviceDirectoryRegistrations:\n type: array\n x-dcl-go-name: ServiceDirectoryRegistrations\n description: Service Directory resources to register this forwarding rule\n with. Currently, only supports a single Service Directory resource.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ForwardingRuleServiceDirectoryRegistrations\n properties:\n namespace:\n type: string\n x-dcl-go-name: Namespace\n description: Service Directory namespace to register the forwarding\n rule under.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n service:\n type: string\n x-dcl-go-name: Service\n description: Service Directory service to register the forwarding\n rule under.\n x-kubernetes-immutable: true\n serviceLabel:\n type: string\n x-dcl-go-name: ServiceLabel\n description: An optional prefix to the service name for this Forwarding\n Rule. If specified, the prefix is the first label of the fully qualified\n service name. The label must be 1-63 characters long, and comply with\n [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the label\n must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`\n which means the first character must be a lowercase letter, and all following\n characters must be a dash, lowercase letter, or digit, except the last\n character, which cannot be a dash. This field is only used for internal\n load balancing.\n x-kubernetes-immutable: true\n serviceName:\n type: string\n x-dcl-go-name: ServiceName\n readOnly: true\n description: '[Output Only] The internal fully qualified service name for\n this Forwarding Rule. This field is only used for internal load balancing.'\n x-kubernetes-immutable: true\n sourceIPRanges:\n type: array\n x-dcl-go-name: SourceIPRanges\n description: If not empty, this Forwarding Rule will only forward the traffic\n when the source IP address matches one of the IP addresses or CIDR ranges\n set here. Note that a Forwarding Rule can only have up to 64 source IP\n ranges, and this field can only be used with a regional Forwarding Rule\n whose scheme is EXTERNAL. Each sourceIpRange entry should be either an\n IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n subnetwork:\n type: string\n x-dcl-go-name: Subnetwork\n description: This field is only used for `INTERNAL` load balancing. For\n internal load balancing, this field identifies the subnetwork that the\n load balanced IP should belong to for this Forwarding Rule. If the network\n specified is in auto subnet mode, this field is optional. However, if\n the network is in custom subnet mode, a subnetwork must be specified.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n target:\n type: string\n x-dcl-go-name: Target\n description: The URL of the target resource to receive the matched traffic.\n For regional forwarding rules, this target must live in the same region\n as the forwarding rule. For global forwarding rules, this target must\n be a global load balancing resource. The forwarded traffic must be of\n a type appropriate to the target object. For `INTERNAL_SELF_MANAGED` load\n balancing, only `targetHttpProxy` is valid, not `targetHttpsProxy`.\n") -// 22446 bytes -// MD5: 3d9b87f30aec7e739f29bb62aa5e6e38 +// 23790 bytes +// MD5: 270a32375a7e5c04afc2570e08e9cc46 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule.go index 0009d8d1a5..070ced0b05 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule.go @@ -128,11 +128,19 @@ func (v NetworkFirewallPolicyRuleTargetSecureTagsStateEnum) Validate() error { } type NetworkFirewallPolicyRuleMatch struct { - empty bool `json:"-"` - SrcIPRanges []string `json:"srcIPRanges"` - DestIPRanges []string `json:"destIPRanges"` - Layer4Configs []NetworkFirewallPolicyRuleMatchLayer4Configs `json:"layer4Configs"` - SrcSecureTags []NetworkFirewallPolicyRuleMatchSrcSecureTags `json:"srcSecureTags"` + empty bool `json:"-"` + SrcIPRanges []string `json:"srcIPRanges"` + DestIPRanges []string `json:"destIPRanges"` + Layer4Configs []NetworkFirewallPolicyRuleMatchLayer4Configs `json:"layer4Configs"` + SrcSecureTags []NetworkFirewallPolicyRuleMatchSrcSecureTags `json:"srcSecureTags"` + SrcRegionCodes []string `json:"srcRegionCodes"` + DestRegionCodes []string `json:"destRegionCodes"` + SrcThreatIntelligences []string `json:"srcThreatIntelligences"` + DestThreatIntelligences []string `json:"destThreatIntelligences"` + SrcFqdns []string `json:"srcFqdns"` + DestFqdns []string `json:"destFqdns"` + SrcAddressGroups []string `json:"srcAddressGroups"` + DestAddressGroups []string `json:"destAddressGroups"` } type jsonNetworkFirewallPolicyRuleMatch NetworkFirewallPolicyRuleMatch @@ -158,6 +166,22 @@ func (r *NetworkFirewallPolicyRuleMatch) UnmarshalJSON(data []byte) error { r.SrcSecureTags = res.SrcSecureTags + r.SrcRegionCodes = res.SrcRegionCodes + + r.DestRegionCodes = res.DestRegionCodes + + r.SrcThreatIntelligences = res.SrcThreatIntelligences + + r.DestThreatIntelligences = res.DestThreatIntelligences + + r.SrcFqdns = res.SrcFqdns + + r.DestFqdns = res.DestFqdns + + r.SrcAddressGroups = res.SrcAddressGroups + + r.DestAddressGroups = res.DestAddressGroups + } return nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule.yaml index de9ff477e9..3dc72bf8d5 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule.yaml @@ -92,8 +92,7 @@ components: type: string x-dcl-go-name: Action description: The Action to perform when the client connection triggers the - rule. Can currently be either "allow" or "deny()" where valid values for - status are 403, 404, and 502. + rule. Valid actions are "allow", "deny" and "goto_next". description: type: string x-dcl-go-name: Description @@ -151,6 +150,28 @@ components: required: - layer4Configs properties: + destAddressGroups: + type: array + x-dcl-go-name: DestAddressGroups + description: Address groups which should be matched against the traffic + destination. Maximum number of destination address groups is 10. Destination + address groups is only supported in Egress rules. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string + destFqdns: + type: array + x-dcl-go-name: DestFqdns + description: Domain names that will be used to match against the resolved + domain name of destination of traffic. Can only be specified if DIRECTION + is egress. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string destIPRanges: type: array x-dcl-go-name: DestIPRanges @@ -161,6 +182,26 @@ components: items: type: string x-dcl-go-type: string + destRegionCodes: + type: array + x-dcl-go-name: DestRegionCodes + description: The Unicode country codes whose IP addresses will be used + to match against the source of traffic. Can only be specified if DIRECTION + is egress. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string + destThreatIntelligences: + type: array + x-dcl-go-name: DestThreatIntelligences + description: Name of the Google Cloud Threat Intelligence list. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string layer4Configs: type: array x-dcl-go-name: Layer4Configs @@ -194,6 +235,28 @@ components: items: type: string x-dcl-go-type: string + srcAddressGroups: + type: array + x-dcl-go-name: SrcAddressGroups + description: Address groups which should be matched against the traffic + source. Maximum number of source address groups is 10. Source address + groups is only supported in Ingress rules. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string + srcFqdns: + type: array + x-dcl-go-name: SrcFqdns + description: Domain names that will be used to match against the resolved + domain name of source of traffic. Can only be specified if DIRECTION + is ingress. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string srcIPRanges: type: array x-dcl-go-name: SrcIPRanges @@ -204,6 +267,17 @@ components: items: type: string x-dcl-go-type: string + srcRegionCodes: + type: array + x-dcl-go-name: SrcRegionCodes + description: The Unicode country codes whose IP addresses will be used + to match against the source of traffic. Can only be specified if DIRECTION + is ingress. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string srcSecureTags: type: array x-dcl-go-name: SrcSecureTags @@ -238,6 +312,15 @@ components: enum: - EFFECTIVE - INEFFECTIVE + srcThreatIntelligences: + type: array + x-dcl-go-name: SrcThreatIntelligences + description: Name of the Google Cloud Threat Intelligence list. + x-dcl-send-empty: true + x-dcl-list-type: list + items: + type: string + x-dcl-go-type: string priority: type: integer format: int64 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_internal.go index 0ef24fe852..016104a615 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_internal.go @@ -701,6 +701,46 @@ func canonicalizeNetworkFirewallPolicyRuleMatch(des, initial *NetworkFirewallPol } cDes.Layer4Configs = canonicalizeNetworkFirewallPolicyRuleMatchLayer4ConfigsSlice(des.Layer4Configs, initial.Layer4Configs, opts...) cDes.SrcSecureTags = canonicalizeNetworkFirewallPolicyRuleMatchSrcSecureTagsSlice(des.SrcSecureTags, initial.SrcSecureTags, opts...) + if dcl.StringArrayCanonicalize(des.SrcRegionCodes, initial.SrcRegionCodes) { + cDes.SrcRegionCodes = initial.SrcRegionCodes + } else { + cDes.SrcRegionCodes = des.SrcRegionCodes + } + if dcl.StringArrayCanonicalize(des.DestRegionCodes, initial.DestRegionCodes) { + cDes.DestRegionCodes = initial.DestRegionCodes + } else { + cDes.DestRegionCodes = des.DestRegionCodes + } + if dcl.StringArrayCanonicalize(des.SrcThreatIntelligences, initial.SrcThreatIntelligences) { + cDes.SrcThreatIntelligences = initial.SrcThreatIntelligences + } else { + cDes.SrcThreatIntelligences = des.SrcThreatIntelligences + } + if dcl.StringArrayCanonicalize(des.DestThreatIntelligences, initial.DestThreatIntelligences) { + cDes.DestThreatIntelligences = initial.DestThreatIntelligences + } else { + cDes.DestThreatIntelligences = des.DestThreatIntelligences + } + if dcl.StringArrayCanonicalize(des.SrcFqdns, initial.SrcFqdns) { + cDes.SrcFqdns = initial.SrcFqdns + } else { + cDes.SrcFqdns = des.SrcFqdns + } + if dcl.StringArrayCanonicalize(des.DestFqdns, initial.DestFqdns) { + cDes.DestFqdns = initial.DestFqdns + } else { + cDes.DestFqdns = des.DestFqdns + } + if dcl.StringArrayCanonicalize(des.SrcAddressGroups, initial.SrcAddressGroups) { + cDes.SrcAddressGroups = initial.SrcAddressGroups + } else { + cDes.SrcAddressGroups = des.SrcAddressGroups + } + if dcl.StringArrayCanonicalize(des.DestAddressGroups, initial.DestAddressGroups) { + cDes.DestAddressGroups = initial.DestAddressGroups + } else { + cDes.DestAddressGroups = des.DestAddressGroups + } return cDes } @@ -755,6 +795,30 @@ func canonicalizeNewNetworkFirewallPolicyRuleMatch(c *Client, des, nw *NetworkFi } nw.Layer4Configs = canonicalizeNewNetworkFirewallPolicyRuleMatchLayer4ConfigsSlice(c, des.Layer4Configs, nw.Layer4Configs) nw.SrcSecureTags = canonicalizeNewNetworkFirewallPolicyRuleMatchSrcSecureTagsSlice(c, des.SrcSecureTags, nw.SrcSecureTags) + if dcl.StringArrayCanonicalize(des.SrcRegionCodes, nw.SrcRegionCodes) { + nw.SrcRegionCodes = des.SrcRegionCodes + } + if dcl.StringArrayCanonicalize(des.DestRegionCodes, nw.DestRegionCodes) { + nw.DestRegionCodes = des.DestRegionCodes + } + if dcl.StringArrayCanonicalize(des.SrcThreatIntelligences, nw.SrcThreatIntelligences) { + nw.SrcThreatIntelligences = des.SrcThreatIntelligences + } + if dcl.StringArrayCanonicalize(des.DestThreatIntelligences, nw.DestThreatIntelligences) { + nw.DestThreatIntelligences = des.DestThreatIntelligences + } + if dcl.StringArrayCanonicalize(des.SrcFqdns, nw.SrcFqdns) { + nw.SrcFqdns = des.SrcFqdns + } + if dcl.StringArrayCanonicalize(des.DestFqdns, nw.DestFqdns) { + nw.DestFqdns = des.DestFqdns + } + if dcl.StringArrayCanonicalize(des.SrcAddressGroups, nw.SrcAddressGroups) { + nw.SrcAddressGroups = des.SrcAddressGroups + } + if dcl.StringArrayCanonicalize(des.DestAddressGroups, nw.DestAddressGroups) { + nw.DestAddressGroups = des.DestAddressGroups + } return nw } @@ -1336,6 +1400,62 @@ func compareNetworkFirewallPolicyRuleMatchNewStyle(d, a interface{}, fn dcl.Fiel } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.SrcRegionCodes, actual.SrcRegionCodes, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNetworkFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("SrcRegionCodes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DestRegionCodes, actual.DestRegionCodes, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNetworkFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("DestRegionCodes")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SrcThreatIntelligences, actual.SrcThreatIntelligences, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNetworkFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("SrcThreatIntelligences")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DestThreatIntelligences, actual.DestThreatIntelligences, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNetworkFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("DestThreatIntelligences")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SrcFqdns, actual.SrcFqdns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNetworkFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("SrcFqdns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DestFqdns, actual.DestFqdns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNetworkFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("DestFqdns")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.SrcAddressGroups, actual.SrcAddressGroups, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNetworkFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("SrcAddressGroups")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + + if ds, err := dcl.Diff(desired.DestAddressGroups, actual.DestAddressGroups, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNetworkFirewallPolicyRulePatchRuleOperation")}, fn.AddNest("DestAddressGroups")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -1704,6 +1824,30 @@ func expandNetworkFirewallPolicyRuleMatch(c *Client, f *NetworkFirewallPolicyRul } else if v != nil { m["srcSecureTags"] = v } + if v := f.SrcRegionCodes; v != nil { + m["srcRegionCodes"] = v + } + if v := f.DestRegionCodes; v != nil { + m["destRegionCodes"] = v + } + if v := f.SrcThreatIntelligences; v != nil { + m["srcThreatIntelligences"] = v + } + if v := f.DestThreatIntelligences; v != nil { + m["destThreatIntelligences"] = v + } + if v := f.SrcFqdns; v != nil { + m["srcFqdns"] = v + } + if v := f.DestFqdns; v != nil { + m["destFqdns"] = v + } + if v := f.SrcAddressGroups; v != nil { + m["srcAddressGroups"] = v + } + if v := f.DestAddressGroups; v != nil { + m["destAddressGroups"] = v + } return m, nil } @@ -1725,6 +1869,14 @@ func flattenNetworkFirewallPolicyRuleMatch(c *Client, i interface{}, res *Networ r.DestIPRanges = dcl.FlattenStringSlice(m["destIpRanges"]) r.Layer4Configs = flattenNetworkFirewallPolicyRuleMatchLayer4ConfigsSlice(c, m["layer4Configs"], res) r.SrcSecureTags = flattenNetworkFirewallPolicyRuleMatchSrcSecureTagsSlice(c, m["srcSecureTags"], res) + r.SrcRegionCodes = dcl.FlattenStringSlice(m["srcRegionCodes"]) + r.DestRegionCodes = dcl.FlattenStringSlice(m["destRegionCodes"]) + r.SrcThreatIntelligences = dcl.FlattenStringSlice(m["srcThreatIntelligences"]) + r.DestThreatIntelligences = dcl.FlattenStringSlice(m["destThreatIntelligences"]) + r.SrcFqdns = dcl.FlattenStringSlice(m["srcFqdns"]) + r.DestFqdns = dcl.FlattenStringSlice(m["destFqdns"]) + r.SrcAddressGroups = dcl.FlattenStringSlice(m["srcAddressGroups"]) + r.DestAddressGroups = dcl.FlattenStringSlice(m["destAddressGroups"]) return r } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_schema.go index 8d157b631c..3879fc97f2 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_schema.go @@ -133,7 +133,7 @@ func DCLNetworkFirewallPolicyRuleSchema() *dcl.Schema { "action": &dcl.Property{ Type: "string", GoName: "Action", - Description: "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + Description: "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".", }, "description": &dcl.Property{ Type: "string", @@ -195,6 +195,28 @@ func DCLNetworkFirewallPolicyRuleSchema() *dcl.Schema { "layer4Configs", }, Properties: map[string]*dcl.Property{ + "destAddressGroups": &dcl.Property{ + Type: "array", + GoName: "DestAddressGroups", + Description: "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "destFqdns": &dcl.Property{ + Type: "array", + GoName: "DestFqdns", + Description: "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, "destIPRanges": &dcl.Property{ Type: "array", GoName: "DestIPRanges", @@ -206,6 +228,28 @@ func DCLNetworkFirewallPolicyRuleSchema() *dcl.Schema { GoType: "string", }, }, + "destRegionCodes": &dcl.Property{ + Type: "array", + GoName: "DestRegionCodes", + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "destThreatIntelligences": &dcl.Property{ + Type: "array", + GoName: "DestThreatIntelligences", + Description: "Name of the Google Cloud Threat Intelligence list.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, "layer4Configs": &dcl.Property{ Type: "array", GoName: "Layer4Configs", @@ -238,6 +282,28 @@ func DCLNetworkFirewallPolicyRuleSchema() *dcl.Schema { }, }, }, + "srcAddressGroups": &dcl.Property{ + Type: "array", + GoName: "SrcAddressGroups", + Description: "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, + "srcFqdns": &dcl.Property{ + Type: "array", + GoName: "SrcFqdns", + Description: "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, "srcIPRanges": &dcl.Property{ Type: "array", GoName: "SrcIPRanges", @@ -249,6 +315,17 @@ func DCLNetworkFirewallPolicyRuleSchema() *dcl.Schema { GoType: "string", }, }, + "srcRegionCodes": &dcl.Property{ + Type: "array", + GoName: "SrcRegionCodes", + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, "srcSecureTags": &dcl.Property{ Type: "array", GoName: "SrcSecureTags", @@ -287,6 +364,17 @@ func DCLNetworkFirewallPolicyRuleSchema() *dcl.Schema { }, }, }, + "srcThreatIntelligences": &dcl.Property{ + Type: "array", + GoName: "SrcThreatIntelligences", + Description: "Name of the Google Cloud Threat Intelligence list.", + SendEmpty: true, + ListType: "list", + Items: &dcl.Property{ + Type: "string", + GoType: "string", + }, + }, }, }, "priority": &dcl.Property{ diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_yaml_embed.go index 4043594dae..674f095a98 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/network_firewall_policy_rule_yaml_embed.go @@ -17,7 +17,7 @@ package compute // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/compute/network_firewall_policy_rule.yaml -var YAML_network_firewall_policy_rule = []byte("info:\n title: Compute/NetworkFirewallPolicyRule\n description: The Compute NetworkFirewallPolicyRule resource\n x-dcl-struct-name: NetworkFirewallPolicyRule\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a NetworkFirewallPolicyRule\n parameters:\n - name: networkFirewallPolicyRule\n required: true\n description: A full instance of a NetworkFirewallPolicyRule\n apply:\n description: The function used to apply information about a NetworkFirewallPolicyRule\n parameters:\n - name: networkFirewallPolicyRule\n required: true\n description: A full instance of a NetworkFirewallPolicyRule\n delete:\n description: The function used to delete a NetworkFirewallPolicyRule\n parameters:\n - name: networkFirewallPolicyRule\n required: true\n description: A full instance of a NetworkFirewallPolicyRule\n deleteAll:\n description: The function used to delete all NetworkFirewallPolicyRule\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: firewallPolicy\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NetworkFirewallPolicyRule\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: firewallPolicy\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NetworkFirewallPolicyRule:\n title: NetworkFirewallPolicyRule\n x-dcl-id: projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}\n x-dcl-locations:\n - global\n - region\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - priority\n - match\n - action\n - direction\n - firewallPolicy\n - project\n properties:\n action:\n type: string\n x-dcl-go-name: Action\n description: The Action to perform when the client connection triggers the\n rule. Can currently be either \"allow\" or \"deny()\" where valid values for\n status are 403, 404, and 502.\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description for this resource.\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: NetworkFirewallPolicyRuleDirectionEnum\n description: 'The direction in which this rule applies. Possible values:\n INGRESS, EGRESS'\n enum:\n - INGRESS\n - EGRESS\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Denotes whether the firewall policy rule is disabled. When\n set to true, the firewall policy rule is not enforced and traffic behaves\n as if it did not exist. If this is unspecified, the firewall policy rule\n will be enabled.\n enableLogging:\n type: boolean\n x-dcl-go-name: EnableLogging\n description: 'Denotes whether to enable logging for a particular rule. If\n logging is enabled, logs will be exported to the configured export destination\n in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you\n cannot enable logging on \"goto_next\" rules.'\n firewallPolicy:\n type: string\n x-dcl-go-name: FirewallPolicy\n description: The firewall policy of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/NetworkFirewallPolicy\n field: name\n parent: true\n kind:\n type: string\n x-dcl-go-name: Kind\n readOnly: true\n description: Type of the resource. Always `compute#firewallPolicyRule` for\n firewall policy rules\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of this resource.\n x-kubernetes-immutable: true\n match:\n type: object\n x-dcl-go-name: Match\n x-dcl-go-type: NetworkFirewallPolicyRuleMatch\n description: A match condition that incoming traffic is evaluated against.\n If it evaluates to true, the corresponding 'action' is enforced.\n required:\n - layer4Configs\n properties:\n destIPRanges:\n type: array\n x-dcl-go-name: DestIPRanges\n description: CIDR IP address range. Maximum number of destination CIDR\n IP ranges allowed is 5000.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n layer4Configs:\n type: array\n x-dcl-go-name: Layer4Configs\n description: Pairs of IP protocols and ports that the rule should match.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NetworkFirewallPolicyRuleMatchLayer4Configs\n required:\n - ipProtocol\n properties:\n ipProtocol:\n type: string\n x-dcl-go-name: IPProtocol\n description: The IP protocol to which this rule applies. The protocol\n type is required when creating a firewall rule. This value can\n either be one of the following well known protocol strings (`tcp`,\n `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol\n number.\n ports:\n type: array\n x-dcl-go-name: Ports\n description: 'An optional list of ports to which this rule applies.\n This field is only applicable for UDP or TCP protocol. Each\n entry must be either an integer or a range. If not specified,\n this rule applies to connections through any port. Example inputs\n include: ``.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcIPRanges:\n type: array\n x-dcl-go-name: SrcIPRanges\n description: CIDR IP address range. Maximum number of source CIDR IP\n ranges allowed is 5000.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcSecureTags:\n type: array\n x-dcl-go-name: SrcSecureTags\n description: List of secure tag values, which should be matched at the\n source of the traffic. For INGRESS rule, if all the srcSecureTag\n are INEFFECTIVE, and there is no srcIpRange, this rule\n will be ignored. Maximum number of source tag values allowed is 256.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NetworkFirewallPolicyRuleMatchSrcSecureTags\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the secure tag, created with TagManager's\n TagValue API. @pattern tagValues/[0-9]+\n x-dcl-references:\n - resource: Cloudresourcemanager/TagValue\n field: namespacedName\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum\n readOnly: true\n description: '[Output Only] State of the secure tag, either `EFFECTIVE`\n or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted\n or its network is deleted.'\n enum:\n - EFFECTIVE\n - INEFFECTIVE\n priority:\n type: integer\n format: int64\n x-dcl-go-name: Priority\n description: An integer indicating the priority of a rule in the list. The\n priority must be a positive value between 0 and 2147483647. Rules are\n evaluated from highest to lowest priority where 0 is the highest priority\n and 2147483647 is the lowest prority.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n ruleName:\n type: string\n x-dcl-go-name: RuleName\n description: An optional name for the rule. This field is not a unique identifier\n and can be updated.\n ruleTupleCount:\n type: integer\n format: int64\n x-dcl-go-name: RuleTupleCount\n readOnly: true\n description: Calculation of the complexity of a single firewall policy rule.\n x-kubernetes-immutable: true\n targetSecureTags:\n type: array\n x-dcl-go-name: TargetSecureTags\n description: A list of secure tags that controls which instances the firewall\n rule applies to. If targetSecureTag are specified, then the\n firewall rule applies only to instances in the VPC network that have one\n of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE\n state, then this rule will be ignored. targetSecureTag may\n not be set at the same time as targetServiceAccounts. If\n neither targetServiceAccounts nor targetSecureTag\n are specified, the firewall rule applies to all instances on the specified\n network. Maximum number of target label tags allowed is 256.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NetworkFirewallPolicyRuleTargetSecureTags\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the secure tag, created with TagManager's TagValue\n API. @pattern tagValues/[0-9]+\n x-dcl-references:\n - resource: Cloudresourcemanager/TagValue\n field: namespacedName\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NetworkFirewallPolicyRuleTargetSecureTagsStateEnum\n readOnly: true\n description: '[Output Only] State of the secure tag, either `EFFECTIVE`\n or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted\n or its network is deleted.'\n enum:\n - EFFECTIVE\n - INEFFECTIVE\n targetServiceAccounts:\n type: array\n x-dcl-go-name: TargetServiceAccounts\n description: A list of service accounts indicating the sets of instances\n that are applied with this rule.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: name\n") +var YAML_network_firewall_policy_rule = []byte("info:\n title: Compute/NetworkFirewallPolicyRule\n description: The Compute NetworkFirewallPolicyRule resource\n x-dcl-struct-name: NetworkFirewallPolicyRule\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a NetworkFirewallPolicyRule\n parameters:\n - name: networkFirewallPolicyRule\n required: true\n description: A full instance of a NetworkFirewallPolicyRule\n apply:\n description: The function used to apply information about a NetworkFirewallPolicyRule\n parameters:\n - name: networkFirewallPolicyRule\n required: true\n description: A full instance of a NetworkFirewallPolicyRule\n delete:\n description: The function used to delete a NetworkFirewallPolicyRule\n parameters:\n - name: networkFirewallPolicyRule\n required: true\n description: A full instance of a NetworkFirewallPolicyRule\n deleteAll:\n description: The function used to delete all NetworkFirewallPolicyRule\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: firewallPolicy\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NetworkFirewallPolicyRule\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: firewallPolicy\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NetworkFirewallPolicyRule:\n title: NetworkFirewallPolicyRule\n x-dcl-id: projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}\n x-dcl-locations:\n - global\n - region\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - priority\n - match\n - action\n - direction\n - firewallPolicy\n - project\n properties:\n action:\n type: string\n x-dcl-go-name: Action\n description: The Action to perform when the client connection triggers the\n rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional description for this resource.\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: NetworkFirewallPolicyRuleDirectionEnum\n description: 'The direction in which this rule applies. Possible values:\n INGRESS, EGRESS'\n enum:\n - INGRESS\n - EGRESS\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Denotes whether the firewall policy rule is disabled. When\n set to true, the firewall policy rule is not enforced and traffic behaves\n as if it did not exist. If this is unspecified, the firewall policy rule\n will be enabled.\n enableLogging:\n type: boolean\n x-dcl-go-name: EnableLogging\n description: 'Denotes whether to enable logging for a particular rule. If\n logging is enabled, logs will be exported to the configured export destination\n in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you\n cannot enable logging on \"goto_next\" rules.'\n firewallPolicy:\n type: string\n x-dcl-go-name: FirewallPolicy\n description: The firewall policy of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Compute/NetworkFirewallPolicy\n field: name\n parent: true\n kind:\n type: string\n x-dcl-go-name: Kind\n readOnly: true\n description: Type of the resource. Always `compute#firewallPolicyRule` for\n firewall policy rules\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location of this resource.\n x-kubernetes-immutable: true\n match:\n type: object\n x-dcl-go-name: Match\n x-dcl-go-type: NetworkFirewallPolicyRuleMatch\n description: A match condition that incoming traffic is evaluated against.\n If it evaluates to true, the corresponding 'action' is enforced.\n required:\n - layer4Configs\n properties:\n destAddressGroups:\n type: array\n x-dcl-go-name: DestAddressGroups\n description: Address groups which should be matched against the traffic\n destination. Maximum number of destination address groups is 10. Destination\n address groups is only supported in Egress rules.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n destFqdns:\n type: array\n x-dcl-go-name: DestFqdns\n description: Domain names that will be used to match against the resolved\n domain name of destination of traffic. Can only be specified if DIRECTION\n is egress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n destIPRanges:\n type: array\n x-dcl-go-name: DestIPRanges\n description: CIDR IP address range. Maximum number of destination CIDR\n IP ranges allowed is 5000.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n destRegionCodes:\n type: array\n x-dcl-go-name: DestRegionCodes\n description: The Unicode country codes whose IP addresses will be used\n to match against the source of traffic. Can only be specified if DIRECTION\n is egress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n destThreatIntelligences:\n type: array\n x-dcl-go-name: DestThreatIntelligences\n description: Name of the Google Cloud Threat Intelligence list.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n layer4Configs:\n type: array\n x-dcl-go-name: Layer4Configs\n description: Pairs of IP protocols and ports that the rule should match.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NetworkFirewallPolicyRuleMatchLayer4Configs\n required:\n - ipProtocol\n properties:\n ipProtocol:\n type: string\n x-dcl-go-name: IPProtocol\n description: The IP protocol to which this rule applies. The protocol\n type is required when creating a firewall rule. This value can\n either be one of the following well known protocol strings (`tcp`,\n `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol\n number.\n ports:\n type: array\n x-dcl-go-name: Ports\n description: 'An optional list of ports to which this rule applies.\n This field is only applicable for UDP or TCP protocol. Each\n entry must be either an integer or a range. If not specified,\n this rule applies to connections through any port. Example inputs\n include: ``.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcAddressGroups:\n type: array\n x-dcl-go-name: SrcAddressGroups\n description: Address groups which should be matched against the traffic\n source. Maximum number of source address groups is 10. Source address\n groups is only supported in Ingress rules.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcFqdns:\n type: array\n x-dcl-go-name: SrcFqdns\n description: Domain names that will be used to match against the resolved\n domain name of source of traffic. Can only be specified if DIRECTION\n is ingress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcIPRanges:\n type: array\n x-dcl-go-name: SrcIPRanges\n description: CIDR IP address range. Maximum number of source CIDR IP\n ranges allowed is 5000.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcRegionCodes:\n type: array\n x-dcl-go-name: SrcRegionCodes\n description: The Unicode country codes whose IP addresses will be used\n to match against the source of traffic. Can only be specified if DIRECTION\n is ingress.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n srcSecureTags:\n type: array\n x-dcl-go-name: SrcSecureTags\n description: List of secure tag values, which should be matched at the\n source of the traffic. For INGRESS rule, if all the srcSecureTag\n are INEFFECTIVE, and there is no srcIpRange, this rule\n will be ignored. Maximum number of source tag values allowed is 256.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NetworkFirewallPolicyRuleMatchSrcSecureTags\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the secure tag, created with TagManager's\n TagValue API. @pattern tagValues/[0-9]+\n x-dcl-references:\n - resource: Cloudresourcemanager/TagValue\n field: namespacedName\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum\n readOnly: true\n description: '[Output Only] State of the secure tag, either `EFFECTIVE`\n or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted\n or its network is deleted.'\n enum:\n - EFFECTIVE\n - INEFFECTIVE\n srcThreatIntelligences:\n type: array\n x-dcl-go-name: SrcThreatIntelligences\n description: Name of the Google Cloud Threat Intelligence list.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n priority:\n type: integer\n format: int64\n x-dcl-go-name: Priority\n description: An integer indicating the priority of a rule in the list. The\n priority must be a positive value between 0 and 2147483647. Rules are\n evaluated from highest to lowest priority where 0 is the highest priority\n and 2147483647 is the lowest prority.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n ruleName:\n type: string\n x-dcl-go-name: RuleName\n description: An optional name for the rule. This field is not a unique identifier\n and can be updated.\n ruleTupleCount:\n type: integer\n format: int64\n x-dcl-go-name: RuleTupleCount\n readOnly: true\n description: Calculation of the complexity of a single firewall policy rule.\n x-kubernetes-immutable: true\n targetSecureTags:\n type: array\n x-dcl-go-name: TargetSecureTags\n description: A list of secure tags that controls which instances the firewall\n rule applies to. If targetSecureTag are specified, then the\n firewall rule applies only to instances in the VPC network that have one\n of those EFFECTIVE secure tags, if all the target_secure_tag are in INEFFECTIVE\n state, then this rule will be ignored. targetSecureTag may\n not be set at the same time as targetServiceAccounts. If\n neither targetServiceAccounts nor targetSecureTag\n are specified, the firewall rule applies to all instances on the specified\n network. Maximum number of target label tags allowed is 256.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NetworkFirewallPolicyRuleTargetSecureTags\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Name of the secure tag, created with TagManager's TagValue\n API. @pattern tagValues/[0-9]+\n x-dcl-references:\n - resource: Cloudresourcemanager/TagValue\n field: namespacedName\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NetworkFirewallPolicyRuleTargetSecureTagsStateEnum\n readOnly: true\n description: '[Output Only] State of the secure tag, either `EFFECTIVE`\n or `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted\n or its network is deleted.'\n enum:\n - EFFECTIVE\n - INEFFECTIVE\n targetServiceAccounts:\n type: array\n x-dcl-go-name: TargetServiceAccounts\n description: A list of service accounts indicating the sets of instances\n that are applied with this rule.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: name\n") -// 12389 bytes -// MD5: 1a6d79fc0a72c95f6ded368d1c7f12c3 +// 15872 bytes +// MD5: dabcaecb0113f0b3ff218072f2529ab1 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster.go index 822a0ada32..455ff99e5d 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster.go @@ -131,10 +131,11 @@ func (v ClusterStateEnum) Validate() error { } type ClusterNetworking struct { - empty bool `json:"-"` - VPCId *string `json:"vpcId"` - PodAddressCidrBlocks []string `json:"podAddressCidrBlocks"` - ServiceAddressCidrBlocks []string `json:"serviceAddressCidrBlocks"` + empty bool `json:"-"` + VPCId *string `json:"vpcId"` + PodAddressCidrBlocks []string `json:"podAddressCidrBlocks"` + ServiceAddressCidrBlocks []string `json:"serviceAddressCidrBlocks"` + PerNodePoolSgRulesDisabled *bool `json:"perNodePoolSgRulesDisabled"` } type jsonClusterNetworking ClusterNetworking @@ -158,6 +159,8 @@ func (r *ClusterNetworking) UnmarshalJSON(data []byte) error { r.ServiceAddressCidrBlocks = res.ServiceAddressCidrBlocks + r.PerNodePoolSgRulesDisabled = res.PerNodePoolSgRulesDisabled + } return nil } @@ -361,6 +364,7 @@ type ClusterControlPlaneRootVolume struct { SizeGib *int64 `json:"sizeGib"` VolumeType *ClusterControlPlaneRootVolumeVolumeTypeEnum `json:"volumeType"` Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` KmsKeyArn *string `json:"kmsKeyArn"` } @@ -385,6 +389,8 @@ func (r *ClusterControlPlaneRootVolume) UnmarshalJSON(data []byte) error { r.Iops = res.Iops + r.Throughput = res.Throughput + r.KmsKeyArn = res.KmsKeyArn } @@ -416,6 +422,7 @@ type ClusterControlPlaneMainVolume struct { SizeGib *int64 `json:"sizeGib"` VolumeType *ClusterControlPlaneMainVolumeVolumeTypeEnum `json:"volumeType"` Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` KmsKeyArn *string `json:"kmsKeyArn"` } @@ -440,6 +447,8 @@ func (r *ClusterControlPlaneMainVolume) UnmarshalJSON(data []byte) error { r.Iops = res.Iops + r.Throughput = res.Throughput + r.KmsKeyArn = res.KmsKeyArn } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster.yaml index 67e283956d..c43e54d145 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster.yaml @@ -239,6 +239,14 @@ components: parent resource. x-kubernetes-immutable: true x-dcl-server-default: true + throughput: + type: integer + format: int64 + x-dcl-go-name: Throughput + description: Optional. The throughput to provision for the volume, + in MiB/s. Only valid if the volume type is GP3. + x-kubernetes-immutable: true + x-dcl-server-default: true volumeType: type: string x-dcl-go-name: VolumeType @@ -303,6 +311,13 @@ components: a default value is provided. See the specific reference in the parent resource. x-dcl-server-default: true + throughput: + type: integer + format: int64 + x-dcl-go-name: Throughput + description: Optional. The throughput to provision for the volume, + in MiB/s. Only valid if the volume type is GP3. + x-dcl-server-default: true volumeType: type: string x-dcl-go-name: VolumeType @@ -436,12 +451,19 @@ components: x-dcl-go-name: Networking x-dcl-go-type: ClusterNetworking description: Cluster-wide networking configuration. - x-kubernetes-immutable: true required: - vpcId - podAddressCidrBlocks - serviceAddressCidrBlocks properties: + perNodePoolSgRulesDisabled: + type: boolean + x-dcl-go-name: PerNodePoolSgRulesDisabled + description: Disable the per node pool subnet security group rules on + the control plane security group. When set to true, you must also + provide one or more security groups that ensure node pools are able + to send requests to the control plane on TCP/443 and TCP/8132. Failure + to do so may result in unavailable node pools. podAddressCidrBlocks: type: array x-dcl-go-name: PodAddressCidrBlocks diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_internal.go index 3e425f8e1f..0da05b3dc7 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_internal.go @@ -271,6 +271,11 @@ func newUpdateClusterUpdateAwsClusterRequest(ctx context.Context, f *Cluster, c if v := f.Description; !dcl.IsEmptyValueIndirect(v) { req["description"] = v } + if v, err := expandClusterNetworking(c, f.Networking, res); err != nil { + return nil, fmt.Errorf("error expanding Networking into networking: %w", err) + } else if !dcl.IsEmptyValueIndirect(v) { + req["networking"] = v + } if v, err := expandClusterControlPlane(c, f.ControlPlane, res); err != nil { return nil, fmt.Errorf("error expanding ControlPlane into controlPlane: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { @@ -820,6 +825,11 @@ func canonicalizeClusterNetworking(des, initial *ClusterNetworking, opts ...dcl. } else { cDes.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks } + if dcl.BoolCanonicalize(des.PerNodePoolSgRulesDisabled, initial.PerNodePoolSgRulesDisabled) || dcl.IsZeroValue(des.PerNodePoolSgRulesDisabled) { + cDes.PerNodePoolSgRulesDisabled = initial.PerNodePoolSgRulesDisabled + } else { + cDes.PerNodePoolSgRulesDisabled = des.PerNodePoolSgRulesDisabled + } return cDes } @@ -875,6 +885,9 @@ func canonicalizeNewClusterNetworking(c *Client, des, nw *ClusterNetworking) *Cl if dcl.StringArrayCanonicalize(des.ServiceAddressCidrBlocks, nw.ServiceAddressCidrBlocks) { nw.ServiceAddressCidrBlocks = des.ServiceAddressCidrBlocks } + if dcl.BoolCanonicalize(des.PerNodePoolSgRulesDisabled, nw.PerNodePoolSgRulesDisabled) { + nw.PerNodePoolSgRulesDisabled = des.PerNodePoolSgRulesDisabled + } return nw } @@ -1363,6 +1376,12 @@ func canonicalizeClusterControlPlaneRootVolume(des, initial *ClusterControlPlane } else { cDes.Iops = des.Iops } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { cDes.KmsKeyArn = initial.KmsKeyArn } else { @@ -1499,6 +1518,12 @@ func canonicalizeClusterControlPlaneMainVolume(des, initial *ClusterControlPlane } else { cDes.Iops = des.Iops } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { cDes.KmsKeyArn = initial.KmsKeyArn } else { @@ -2647,6 +2672,13 @@ func compareClusterNetworkingNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dc } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.PerNodePoolSgRulesDisabled, actual.PerNodePoolSgRulesDisabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("PerNodePoolSgRulesDisabled")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -2862,6 +2894,13 @@ func compareClusterControlPlaneRootVolumeNewStyle(d, a interface{}, fn dcl.Field diffs = append(diffs, ds...) } + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateClusterUpdateAwsClusterOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { if err != nil { return nil, err @@ -2912,6 +2951,13 @@ func compareClusterControlPlaneMainVolumeNewStyle(d, a interface{}, fn dcl.Field diffs = append(diffs, ds...) } + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { if err != nil { return nil, err @@ -3405,6 +3451,9 @@ func expandClusterNetworking(c *Client, f *ClusterNetworking, res *Cluster) (map if v := f.ServiceAddressCidrBlocks; v != nil { m["serviceAddressCidrBlocks"] = v } + if v := f.PerNodePoolSgRulesDisabled; !dcl.IsEmptyValueIndirect(v) { + m["perNodePoolSgRulesDisabled"] = v + } return m, nil } @@ -3425,6 +3474,7 @@ func flattenClusterNetworking(c *Client, i interface{}, res *Cluster) *ClusterNe r.VPCId = dcl.FlattenString(m["vpcId"]) r.PodAddressCidrBlocks = dcl.FlattenStringSlice(m["podAddressCidrBlocks"]) r.ServiceAddressCidrBlocks = dcl.FlattenStringSlice(m["serviceAddressCidrBlocks"]) + r.PerNodePoolSgRulesDisabled = dcl.FlattenBool(m["perNodePoolSgRulesDisabled"]) return r } @@ -3931,6 +3981,9 @@ func expandClusterControlPlaneRootVolume(c *Client, f *ClusterControlPlaneRootVo if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { m["iops"] = v } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { m["kmsKeyArn"] = v } @@ -3954,6 +4007,7 @@ func flattenClusterControlPlaneRootVolume(c *Client, i interface{}, res *Cluster r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) r.VolumeType = flattenClusterControlPlaneRootVolumeVolumeTypeEnum(m["volumeType"]) r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) return r @@ -4057,6 +4111,9 @@ func expandClusterControlPlaneMainVolume(c *Client, f *ClusterControlPlaneMainVo if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { m["iops"] = v } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { m["kmsKeyArn"] = v } @@ -4080,6 +4137,7 @@ func flattenClusterControlPlaneMainVolume(c *Client, i interface{}, res *Cluster r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) r.VolumeType = flattenClusterControlPlaneMainVolumeVolumeTypeEnum(m["volumeType"]) r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) return r diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_schema.go index 5533a62ec9..5106d51930 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_schema.go @@ -281,6 +281,14 @@ func DCLClusterSchema() *dcl.Schema { Immutable: true, ServerDefault: true, }, + "throughput": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Throughput", + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3.", + Immutable: true, + ServerDefault: true, + }, "volumeType": &dcl.Property{ Type: "string", GoName: "VolumeType", @@ -344,6 +352,13 @@ func DCLClusterSchema() *dcl.Schema { Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", ServerDefault: true, }, + "throughput": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Throughput", + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3.", + ServerDefault: true, + }, "volumeType": &dcl.Property{ Type: "string", GoName: "VolumeType", @@ -488,13 +503,17 @@ func DCLClusterSchema() *dcl.Schema { GoName: "Networking", GoType: "ClusterNetworking", Description: "Cluster-wide networking configuration.", - Immutable: true, Required: []string{ "vpcId", "podAddressCidrBlocks", "serviceAddressCidrBlocks", }, Properties: map[string]*dcl.Property{ + "perNodePoolSgRulesDisabled": &dcl.Property{ + Type: "boolean", + GoName: "PerNodePoolSgRulesDisabled", + Description: "Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools.", + }, "podAddressCidrBlocks": &dcl.Property{ Type: "array", GoName: "PodAddressCidrBlocks", diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_yaml_embed.go index 4532511023..eb94921acd 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/cluster_yaml_embed.go @@ -17,7 +17,7 @@ package containeraws // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/containeraws/cluster.yaml -var YAML_cluster = []byte("info:\n title: ContainerAws/Cluster\n description: An Anthos cluster running on AWS.\n x-dcl-struct-name: Cluster\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API reference\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud/reference/rest/v1/projects.locations.awsClusters\n x-dcl-guides:\n - text: Multicloud overview\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud\npaths:\n get:\n description: The function used to get information about a Cluster\n parameters:\n - name: cluster\n required: true\n description: A full instance of a Cluster\n apply:\n description: The function used to apply information about a Cluster\n parameters:\n - name: cluster\n required: true\n description: A full instance of a Cluster\n delete:\n description: The function used to delete a Cluster\n parameters:\n - name: cluster\n required: true\n description: A full instance of a Cluster\n deleteAll:\n description: The function used to delete all Cluster\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Cluster\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Cluster:\n title: Cluster\n x-dcl-id: projects/{{project}}/locations/{{location}}/awsClusters/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - networking\n - awsRegion\n - controlPlane\n - authorization\n - project\n - location\n - fleet\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Optional. Annotations on the cluster. This field has the same\n restrictions as Kubernetes annotations. The total size of all keys and\n values combined is limited to 256k. Key can have 2 segments: prefix (optional)\n and name (required), separated by a slash (/). Prefix must be a DNS subdomain.\n Name must be 63 characters or less, begin and end with alphanumerics,\n with dashes (-), underscores (_), dots (.), and alphanumerics between.'\n authorization:\n type: object\n x-dcl-go-name: Authorization\n x-dcl-go-type: ClusterAuthorization\n description: Configuration related to the cluster RBAC settings.\n required:\n - adminUsers\n properties:\n adminUsers:\n type: array\n x-dcl-go-name: AdminUsers\n description: Users to perform operations as a cluster admin. A managed\n ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole\n to the users. Up to ten admin users can be provided. For more info\n on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ClusterAuthorizationAdminUsers\n required:\n - username\n properties:\n username:\n type: string\n x-dcl-go-name: Username\n description: The name of the user, e.g. `my-gcp-id@gmail.com`.\n awsRegion:\n type: string\n x-dcl-go-name: AwsRegion\n description: The AWS region where the cluster runs. Each Google Cloud region\n supports a subset of nearby AWS regions. You can call to list all supported\n AWS regions within a given Google Cloud region.\n x-kubernetes-immutable: true\n controlPlane:\n type: object\n x-dcl-go-name: ControlPlane\n x-dcl-go-type: ClusterControlPlane\n description: Configuration related to the cluster control plane.\n required:\n - version\n - subnetIds\n - configEncryption\n - iamInstanceProfile\n - databaseEncryption\n - awsServicesAuthentication\n properties:\n awsServicesAuthentication:\n type: object\n x-dcl-go-name: AwsServicesAuthentication\n x-dcl-go-type: ClusterControlPlaneAwsServicesAuthentication\n description: Authentication configuration for management of AWS resources.\n required:\n - roleArn\n properties:\n roleArn:\n type: string\n x-dcl-go-name: RoleArn\n description: The Amazon Resource Name (ARN) of the role that the\n Anthos Multi-Cloud API will assume when managing AWS resources\n on your account.\n roleSessionName:\n type: string\n x-dcl-go-name: RoleSessionName\n description: Optional. An identifier for the assumed role session.\n When unspecified, it defaults to `multicloud-service-agent`.\n x-dcl-server-default: true\n configEncryption:\n type: object\n x-dcl-go-name: ConfigEncryption\n x-dcl-go-type: ClusterControlPlaneConfigEncryption\n description: The ARN of the AWS KMS key used to encrypt cluster configuration.\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt cluster\n configuration.\n databaseEncryption:\n type: object\n x-dcl-go-name: DatabaseEncryption\n x-dcl-go-type: ClusterControlPlaneDatabaseEncryption\n description: The ARN of the AWS KMS key used to encrypt cluster secrets.\n x-kubernetes-immutable: true\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt cluster\n secrets.\n x-kubernetes-immutable: true\n iamInstanceProfile:\n type: string\n x-dcl-go-name: IamInstanceProfile\n description: The name of the AWS IAM instance pofile to assign to each\n control plane replica.\n instanceType:\n type: string\n x-dcl-go-name: InstanceType\n description: Optional. The AWS instance type. When unspecified, it defaults\n to `m5.large`.\n x-dcl-server-default: true\n mainVolume:\n type: object\n x-dcl-go-name: MainVolume\n x-dcl-go-type: ClusterControlPlaneMainVolume\n description: Optional. Configuration related to the main volume provisioned\n for each control plane replica. The main volume is in charge of storing\n all of the cluster's etcd state. Volumes will be provisioned in the\n availability zone associated with the corresponding subnet. When unspecified,\n it defaults to 8 GiB with the GP2 volume type.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n x-kubernetes-immutable: true\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: ClusterControlPlaneMainVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n proxyConfig:\n type: object\n x-dcl-go-name: ProxyConfig\n x-dcl-go-type: ClusterControlPlaneProxyConfig\n description: Proxy configuration for outbound HTTP(S) traffic.\n required:\n - secretArn\n - secretVersion\n properties:\n secretArn:\n type: string\n x-dcl-go-name: SecretArn\n description: The ARN of the AWS Secret Manager secret that contains\n the HTTP(S) proxy configuration.\n secretVersion:\n type: string\n x-dcl-go-name: SecretVersion\n description: The version string of the AWS Secret Manager secret\n that contains the HTTP(S) proxy configuration.\n rootVolume:\n type: object\n x-dcl-go-name: RootVolume\n x-dcl-go-type: ClusterControlPlaneRootVolume\n description: Optional. Configuration related to the root volume provisioned\n for each control plane replica. Volumes will be provisioned in the\n availability zone associated with the corresponding subnet. When unspecified,\n it defaults to 32 GiB with the GP2 volume type.\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: ClusterControlPlaneRootVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n securityGroupIds:\n type: array\n x-dcl-go-name: SecurityGroupIds\n description: Optional. The IDs of additional security groups to add\n to control plane replicas. The Anthos Multi-Cloud API will automatically\n create and manage security groups with the minimum rules needed for\n a functioning cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n sshConfig:\n type: object\n x-dcl-go-name: SshConfig\n x-dcl-go-type: ClusterControlPlaneSshConfig\n description: Optional. SSH configuration for how to access the underlying\n control plane machines.\n required:\n - ec2KeyPair\n properties:\n ec2KeyPair:\n type: string\n x-dcl-go-name: Ec2KeyPair\n description: The name of the EC2 key pair used to login into cluster\n machines.\n subnetIds:\n type: array\n x-dcl-go-name: SubnetIds\n description: The list of subnets where control plane replicas will run.\n A replica will be provisioned on each subnet and up to three values\n can be provided. Each subnet must be in a different AWS Availability\n Zone (AZ).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n tags:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Tags\n description: Optional. A set of AWS resource tags to propagate to all\n underlying managed AWS resources. Specify at most 50 pairs containing\n alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127\n Unicode characters. Values can be up to 255 Unicode characters.\n version:\n type: string\n x-dcl-go-name: Version\n description: The Kubernetes version to run on control plane replicas\n (e.g. `1.19.10-gke.1000`). You can list all supported versions on\n a given Google Cloud region by calling .\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this cluster was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. A human readable description of this cluster. Cannot\n be longer than 255 UTF-8 encoded bytes.\n endpoint:\n type: string\n x-dcl-go-name: Endpoint\n readOnly: true\n description: Output only. The endpoint of the cluster's API server.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Allows clients to perform consistent read-modify-writes through\n optimistic concurrency control. May be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n fleet:\n type: object\n x-dcl-go-name: Fleet\n x-dcl-go-type: ClusterFleet\n description: Fleet configuration.\n x-kubernetes-immutable: true\n required:\n - project\n properties:\n membership:\n type: string\n x-dcl-go-name: Membership\n readOnly: true\n description: The name of the managed Hub Membership resource associated\n to this cluster. Membership names are formatted as projects//locations/global/membership/.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The number of the Fleet host project where this cluster\n will be registered.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of this resource.\n x-kubernetes-immutable: true\n networking:\n type: object\n x-dcl-go-name: Networking\n x-dcl-go-type: ClusterNetworking\n description: Cluster-wide networking configuration.\n x-kubernetes-immutable: true\n required:\n - vpcId\n - podAddressCidrBlocks\n - serviceAddressCidrBlocks\n properties:\n podAddressCidrBlocks:\n type: array\n x-dcl-go-name: PodAddressCidrBlocks\n description: All pods in the cluster are assigned an RFC1918 IPv4 address\n from these ranges. Only a single range is supported. This field cannot\n be changed after creation.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n serviceAddressCidrBlocks:\n type: array\n x-dcl-go-name: ServiceAddressCidrBlocks\n description: All services in the cluster are assigned an RFC1918 IPv4\n address from these ranges. Only a single range is supported. This\n field cannot be changed after creation.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n vpcId:\n type: string\n x-dcl-go-name: VPCId\n description: The VPC associated with the cluster. All component clusters\n (i.e. control plane and node pools) run on a single VPC. This field\n cannot be changed after creation.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. If set, there are currently changes in flight\n to the cluster.\n x-kubernetes-immutable: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: ClusterStateEnum\n readOnly: true\n description: 'Output only. The current state of the cluster. Possible values:\n STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR,\n DEGRADED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - PROVISIONING\n - RUNNING\n - RECONCILING\n - STOPPING\n - ERROR\n - DEGRADED\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. A globally unique identifier for the cluster.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this cluster was last updated.\n x-kubernetes-immutable: true\n workloadIdentityConfig:\n type: object\n x-dcl-go-name: WorkloadIdentityConfig\n x-dcl-go-type: ClusterWorkloadIdentityConfig\n readOnly: true\n description: Output only. Workload Identity settings.\n x-kubernetes-immutable: true\n properties:\n identityProvider:\n type: string\n x-dcl-go-name: IdentityProvider\n description: The ID of the OIDC Identity Provider (IdP) associated to\n the Workload Identity Pool.\n x-kubernetes-immutable: true\n issuerUri:\n type: string\n x-dcl-go-name: IssuerUri\n description: The OIDC issuer URL for this cluster.\n x-kubernetes-immutable: true\n workloadPool:\n type: string\n x-dcl-go-name: WorkloadPool\n description: The Workload Identity Pool associated to the cluster.\n x-kubernetes-immutable: true\n") +var YAML_cluster = []byte("info:\n title: ContainerAws/Cluster\n description: An Anthos cluster running on AWS.\n x-dcl-struct-name: Cluster\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API reference\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud/reference/rest/v1/projects.locations.awsClusters\n x-dcl-guides:\n - text: Multicloud overview\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud\npaths:\n get:\n description: The function used to get information about a Cluster\n parameters:\n - name: cluster\n required: true\n description: A full instance of a Cluster\n apply:\n description: The function used to apply information about a Cluster\n parameters:\n - name: cluster\n required: true\n description: A full instance of a Cluster\n delete:\n description: The function used to delete a Cluster\n parameters:\n - name: cluster\n required: true\n description: A full instance of a Cluster\n deleteAll:\n description: The function used to delete all Cluster\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Cluster\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Cluster:\n title: Cluster\n x-dcl-id: projects/{{project}}/locations/{{location}}/awsClusters/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - networking\n - awsRegion\n - controlPlane\n - authorization\n - project\n - location\n - fleet\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Optional. Annotations on the cluster. This field has the same\n restrictions as Kubernetes annotations. The total size of all keys and\n values combined is limited to 256k. Key can have 2 segments: prefix (optional)\n and name (required), separated by a slash (/). Prefix must be a DNS subdomain.\n Name must be 63 characters or less, begin and end with alphanumerics,\n with dashes (-), underscores (_), dots (.), and alphanumerics between.'\n authorization:\n type: object\n x-dcl-go-name: Authorization\n x-dcl-go-type: ClusterAuthorization\n description: Configuration related to the cluster RBAC settings.\n required:\n - adminUsers\n properties:\n adminUsers:\n type: array\n x-dcl-go-name: AdminUsers\n description: Users to perform operations as a cluster admin. A managed\n ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole\n to the users. Up to ten admin users can be provided. For more info\n on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: ClusterAuthorizationAdminUsers\n required:\n - username\n properties:\n username:\n type: string\n x-dcl-go-name: Username\n description: The name of the user, e.g. `my-gcp-id@gmail.com`.\n awsRegion:\n type: string\n x-dcl-go-name: AwsRegion\n description: The AWS region where the cluster runs. Each Google Cloud region\n supports a subset of nearby AWS regions. You can call to list all supported\n AWS regions within a given Google Cloud region.\n x-kubernetes-immutable: true\n controlPlane:\n type: object\n x-dcl-go-name: ControlPlane\n x-dcl-go-type: ClusterControlPlane\n description: Configuration related to the cluster control plane.\n required:\n - version\n - subnetIds\n - configEncryption\n - iamInstanceProfile\n - databaseEncryption\n - awsServicesAuthentication\n properties:\n awsServicesAuthentication:\n type: object\n x-dcl-go-name: AwsServicesAuthentication\n x-dcl-go-type: ClusterControlPlaneAwsServicesAuthentication\n description: Authentication configuration for management of AWS resources.\n required:\n - roleArn\n properties:\n roleArn:\n type: string\n x-dcl-go-name: RoleArn\n description: The Amazon Resource Name (ARN) of the role that the\n Anthos Multi-Cloud API will assume when managing AWS resources\n on your account.\n roleSessionName:\n type: string\n x-dcl-go-name: RoleSessionName\n description: Optional. An identifier for the assumed role session.\n When unspecified, it defaults to `multicloud-service-agent`.\n x-dcl-server-default: true\n configEncryption:\n type: object\n x-dcl-go-name: ConfigEncryption\n x-dcl-go-type: ClusterControlPlaneConfigEncryption\n description: The ARN of the AWS KMS key used to encrypt cluster configuration.\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt cluster\n configuration.\n databaseEncryption:\n type: object\n x-dcl-go-name: DatabaseEncryption\n x-dcl-go-type: ClusterControlPlaneDatabaseEncryption\n description: The ARN of the AWS KMS key used to encrypt cluster secrets.\n x-kubernetes-immutable: true\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt cluster\n secrets.\n x-kubernetes-immutable: true\n iamInstanceProfile:\n type: string\n x-dcl-go-name: IamInstanceProfile\n description: The name of the AWS IAM instance pofile to assign to each\n control plane replica.\n instanceType:\n type: string\n x-dcl-go-name: InstanceType\n description: Optional. The AWS instance type. When unspecified, it defaults\n to `m5.large`.\n x-dcl-server-default: true\n mainVolume:\n type: object\n x-dcl-go-name: MainVolume\n x-dcl-go-type: ClusterControlPlaneMainVolume\n description: Optional. Configuration related to the main volume provisioned\n for each control plane replica. The main volume is in charge of storing\n all of the cluster's etcd state. Volumes will be provisioned in the\n availability zone associated with the corresponding subnet. When unspecified,\n it defaults to 8 GiB with the GP2 volume type.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n x-kubernetes-immutable: true\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n throughput:\n type: integer\n format: int64\n x-dcl-go-name: Throughput\n description: Optional. The throughput to provision for the volume,\n in MiB/s. Only valid if the volume type is GP3.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: ClusterControlPlaneMainVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n proxyConfig:\n type: object\n x-dcl-go-name: ProxyConfig\n x-dcl-go-type: ClusterControlPlaneProxyConfig\n description: Proxy configuration for outbound HTTP(S) traffic.\n required:\n - secretArn\n - secretVersion\n properties:\n secretArn:\n type: string\n x-dcl-go-name: SecretArn\n description: The ARN of the AWS Secret Manager secret that contains\n the HTTP(S) proxy configuration.\n secretVersion:\n type: string\n x-dcl-go-name: SecretVersion\n description: The version string of the AWS Secret Manager secret\n that contains the HTTP(S) proxy configuration.\n rootVolume:\n type: object\n x-dcl-go-name: RootVolume\n x-dcl-go-type: ClusterControlPlaneRootVolume\n description: Optional. Configuration related to the root volume provisioned\n for each control plane replica. Volumes will be provisioned in the\n availability zone associated with the corresponding subnet. When unspecified,\n it defaults to 32 GiB with the GP2 volume type.\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-dcl-server-default: true\n throughput:\n type: integer\n format: int64\n x-dcl-go-name: Throughput\n description: Optional. The throughput to provision for the volume,\n in MiB/s. Only valid if the volume type is GP3.\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: ClusterControlPlaneRootVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n securityGroupIds:\n type: array\n x-dcl-go-name: SecurityGroupIds\n description: Optional. The IDs of additional security groups to add\n to control plane replicas. The Anthos Multi-Cloud API will automatically\n create and manage security groups with the minimum rules needed for\n a functioning cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n sshConfig:\n type: object\n x-dcl-go-name: SshConfig\n x-dcl-go-type: ClusterControlPlaneSshConfig\n description: Optional. SSH configuration for how to access the underlying\n control plane machines.\n required:\n - ec2KeyPair\n properties:\n ec2KeyPair:\n type: string\n x-dcl-go-name: Ec2KeyPair\n description: The name of the EC2 key pair used to login into cluster\n machines.\n subnetIds:\n type: array\n x-dcl-go-name: SubnetIds\n description: The list of subnets where control plane replicas will run.\n A replica will be provisioned on each subnet and up to three values\n can be provided. Each subnet must be in a different AWS Availability\n Zone (AZ).\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n tags:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Tags\n description: Optional. A set of AWS resource tags to propagate to all\n underlying managed AWS resources. Specify at most 50 pairs containing\n alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127\n Unicode characters. Values can be up to 255 Unicode characters.\n version:\n type: string\n x-dcl-go-name: Version\n description: The Kubernetes version to run on control plane replicas\n (e.g. `1.19.10-gke.1000`). You can list all supported versions on\n a given Google Cloud region by calling .\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this cluster was created.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. A human readable description of this cluster. Cannot\n be longer than 255 UTF-8 encoded bytes.\n endpoint:\n type: string\n x-dcl-go-name: Endpoint\n readOnly: true\n description: Output only. The endpoint of the cluster's API server.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Allows clients to perform consistent read-modify-writes through\n optimistic concurrency control. May be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n fleet:\n type: object\n x-dcl-go-name: Fleet\n x-dcl-go-type: ClusterFleet\n description: Fleet configuration.\n x-kubernetes-immutable: true\n required:\n - project\n properties:\n membership:\n type: string\n x-dcl-go-name: Membership\n readOnly: true\n description: The name of the managed Hub Membership resource associated\n to this cluster. Membership names are formatted as projects//locations/global/membership/.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The number of the Fleet host project where this cluster\n will be registered.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of this resource.\n x-kubernetes-immutable: true\n networking:\n type: object\n x-dcl-go-name: Networking\n x-dcl-go-type: ClusterNetworking\n description: Cluster-wide networking configuration.\n required:\n - vpcId\n - podAddressCidrBlocks\n - serviceAddressCidrBlocks\n properties:\n perNodePoolSgRulesDisabled:\n type: boolean\n x-dcl-go-name: PerNodePoolSgRulesDisabled\n description: Disable the per node pool subnet security group rules on\n the control plane security group. When set to true, you must also\n provide one or more security groups that ensure node pools are able\n to send requests to the control plane on TCP/443 and TCP/8132. Failure\n to do so may result in unavailable node pools.\n podAddressCidrBlocks:\n type: array\n x-dcl-go-name: PodAddressCidrBlocks\n description: All pods in the cluster are assigned an RFC1918 IPv4 address\n from these ranges. Only a single range is supported. This field cannot\n be changed after creation.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n serviceAddressCidrBlocks:\n type: array\n x-dcl-go-name: ServiceAddressCidrBlocks\n description: All services in the cluster are assigned an RFC1918 IPv4\n address from these ranges. Only a single range is supported. This\n field cannot be changed after creation.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n vpcId:\n type: string\n x-dcl-go-name: VPCId\n description: The VPC associated with the cluster. All component clusters\n (i.e. control plane and node pools) run on a single VPC. This field\n cannot be changed after creation.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. If set, there are currently changes in flight\n to the cluster.\n x-kubernetes-immutable: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: ClusterStateEnum\n readOnly: true\n description: 'Output only. The current state of the cluster. Possible values:\n STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR,\n DEGRADED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - PROVISIONING\n - RUNNING\n - RECONCILING\n - STOPPING\n - ERROR\n - DEGRADED\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. A globally unique identifier for the cluster.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this cluster was last updated.\n x-kubernetes-immutable: true\n workloadIdentityConfig:\n type: object\n x-dcl-go-name: WorkloadIdentityConfig\n x-dcl-go-type: ClusterWorkloadIdentityConfig\n readOnly: true\n description: Output only. Workload Identity settings.\n x-kubernetes-immutable: true\n properties:\n identityProvider:\n type: string\n x-dcl-go-name: IdentityProvider\n description: The ID of the OIDC Identity Provider (IdP) associated to\n the Workload Identity Pool.\n x-kubernetes-immutable: true\n issuerUri:\n type: string\n x-dcl-go-name: IssuerUri\n description: The OIDC issuer URL for this cluster.\n x-kubernetes-immutable: true\n workloadPool:\n type: string\n x-dcl-go-name: WorkloadPool\n description: The Workload Identity Pool associated to the cluster.\n x-kubernetes-immutable: true\n") -// 22013 bytes -// MD5: 84a665658f5d4d70eb594e7863a50bd9 +// 23213 bytes +// MD5: 4575d82194b229624a2afc1a80a7d7f3 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.go index 83fa158104..691ea31f09 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.go @@ -209,6 +209,7 @@ type NodePoolConfigRootVolume struct { SizeGib *int64 `json:"sizeGib"` VolumeType *NodePoolConfigRootVolumeVolumeTypeEnum `json:"volumeType"` Iops *int64 `json:"iops"` + Throughput *int64 `json:"throughput"` KmsKeyArn *string `json:"kmsKeyArn"` } @@ -233,6 +234,8 @@ func (r *NodePoolConfigRootVolume) UnmarshalJSON(data []byte) error { r.Iops = res.Iops + r.Throughput = res.Throughput + r.KmsKeyArn = res.KmsKeyArn } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.yaml index 3c44a0fbfd..7be2709e7f 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool.yaml @@ -255,6 +255,13 @@ components: a default value is provided. See the specific reference in the parent resource. x-dcl-server-default: true + throughput: + type: integer + format: int64 + x-dcl-go-name: Throughput + description: Optional. The throughput to provision for the volume, + in MiB/s. Only valid if the volume type is GP3. + x-dcl-server-default: true volumeType: type: string x-dcl-go-name: VolumeType diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_internal.go index dfd7129dc8..7cd0b8eff9 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_internal.go @@ -926,6 +926,12 @@ func canonicalizeNodePoolConfigRootVolume(des, initial *NodePoolConfigRootVolume } else { cDes.Iops = des.Iops } + if dcl.IsZeroValue(des.Throughput) || (dcl.IsEmptyValueIndirect(des.Throughput) && dcl.IsEmptyValueIndirect(initial.Throughput)) { + // Desired and initial values are equivalent, so set canonical desired value to initial value. + cDes.Throughput = initial.Throughput + } else { + cDes.Throughput = des.Throughput + } if dcl.StringCanonicalize(des.KmsKeyArn, initial.KmsKeyArn) || dcl.IsZeroValue(des.KmsKeyArn) { cDes.KmsKeyArn = initial.KmsKeyArn } else { @@ -2161,6 +2167,13 @@ func compareNodePoolConfigRootVolumeNewStyle(d, a interface{}, fn dcl.FieldName) diffs = append(diffs, ds...) } + if ds, err := dcl.Diff(desired.Throughput, actual.Throughput, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("Throughput")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } + if ds, err := dcl.Diff(desired.KmsKeyArn, actual.KmsKeyArn, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNodePoolUpdateAwsNodePoolOperation")}, fn.AddNest("KmsKeyArn")); len(ds) != 0 || err != nil { if err != nil { return nil, err @@ -2819,6 +2832,9 @@ func expandNodePoolConfigRootVolume(c *Client, f *NodePoolConfigRootVolume, res if v := f.Iops; !dcl.IsEmptyValueIndirect(v) { m["iops"] = v } + if v := f.Throughput; !dcl.IsEmptyValueIndirect(v) { + m["throughput"] = v + } if v := f.KmsKeyArn; !dcl.IsEmptyValueIndirect(v) { m["kmsKeyArn"] = v } @@ -2842,6 +2858,7 @@ func flattenNodePoolConfigRootVolume(c *Client, i interface{}, res *NodePool) *N r.SizeGib = dcl.FlattenInteger(m["sizeGib"]) r.VolumeType = flattenNodePoolConfigRootVolumeVolumeTypeEnum(m["volumeType"]) r.Iops = dcl.FlattenInteger(m["iops"]) + r.Throughput = dcl.FlattenInteger(m["throughput"]) r.KmsKeyArn = dcl.FlattenString(m["kmsKeyArn"]) return r diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_schema.go index df45383362..5494b7e50b 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_schema.go @@ -306,6 +306,13 @@ func DCLNodePoolSchema() *dcl.Schema { Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", ServerDefault: true, }, + "throughput": &dcl.Property{ + Type: "integer", + Format: "int64", + GoName: "Throughput", + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3.", + ServerDefault: true, + }, "volumeType": &dcl.Property{ Type: "string", GoName: "VolumeType", diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_yaml_embed.go index b922755d70..8b45239b8a 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/node_pool_yaml_embed.go @@ -17,7 +17,7 @@ package containeraws // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/containeraws/node_pool.yaml -var YAML_node_pool = []byte("info:\n title: ContainerAws/NodePool\n description: An Anthos node pool running on AWS.\n x-dcl-struct-name: NodePool\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API reference\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud/reference/rest/v1/projects.locations.awsClusters.awsNodePools\n x-dcl-guides:\n - text: Multicloud overview\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud\npaths:\n get:\n description: The function used to get information about a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n apply:\n description: The function used to apply information about a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n delete:\n description: The function used to delete a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n deleteAll:\n description: The function used to delete all NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NodePool:\n title: NodePool\n x-dcl-id: projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - version\n - config\n - autoscaling\n - subnetId\n - maxPodsConstraint\n - project\n - location\n - cluster\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Optional. Annotations on the node pool. This field has the\n same restrictions as Kubernetes annotations. The total size of all keys\n and values combined is limited to 256k. Key can have 2 segments: prefix\n (optional) and name (required), separated by a slash (/). Prefix must\n be a DNS subdomain. Name must be 63 characters or less, begin and end\n with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics\n between.'\n autoscaling:\n type: object\n x-dcl-go-name: Autoscaling\n x-dcl-go-type: NodePoolAutoscaling\n description: Autoscaler configuration for this node pool.\n required:\n - minNodeCount\n - maxNodeCount\n properties:\n maxNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MaxNodeCount\n description: Maximum number of nodes in the NodePool. Must be >= min_node_count.\n minNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MinNodeCount\n description: Minimum number of nodes in the NodePool. Must be >= 1 and\n <= max_node_count.\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: The awsCluster for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkemulticloud/Cluster\n field: name\n parent: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: NodePoolConfig\n description: The configuration of the node pool.\n required:\n - iamInstanceProfile\n - configEncryption\n properties:\n autoscalingMetricsCollection:\n type: object\n x-dcl-go-name: AutoscalingMetricsCollection\n x-dcl-go-type: NodePoolConfigAutoscalingMetricsCollection\n description: Optional. Configuration related to CloudWatch metrics collection\n on the Auto Scaling group of the node pool. When unspecified, metrics\n collection is disabled.\n required:\n - granularity\n properties:\n granularity:\n type: string\n x-dcl-go-name: Granularity\n description: The frequency at which EC2 Auto Scaling sends aggregated\n data to AWS CloudWatch. The only valid value is \"1Minute\".\n metrics:\n type: array\n x-dcl-go-name: Metrics\n description: The metrics to enable. For a list of valid metrics,\n see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html.\n If you specify granularity and don't specify any metrics, all\n metrics are enabled.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n configEncryption:\n type: object\n x-dcl-go-name: ConfigEncryption\n x-dcl-go-type: NodePoolConfigConfigEncryption\n description: The ARN of the AWS KMS key used to encrypt node pool configuration.\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt node pool\n configuration.\n iamInstanceProfile:\n type: string\n x-dcl-go-name: IamInstanceProfile\n description: The name of the AWS IAM role assigned to nodes in the pool.\n instanceType:\n type: string\n x-dcl-go-name: InstanceType\n description: Optional. The AWS instance type. When unspecified, it defaults\n to `m5.large`.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The initial labels assigned to nodes of this\n node pool. An object containing a list of \"key\": value pairs. Example:\n { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.'\n x-kubernetes-immutable: true\n proxyConfig:\n type: object\n x-dcl-go-name: ProxyConfig\n x-dcl-go-type: NodePoolConfigProxyConfig\n description: Proxy configuration for outbound HTTP(S) traffic.\n required:\n - secretArn\n - secretVersion\n properties:\n secretArn:\n type: string\n x-dcl-go-name: SecretArn\n description: The ARN of the AWS Secret Manager secret that contains\n the HTTP(S) proxy configuration.\n secretVersion:\n type: string\n x-dcl-go-name: SecretVersion\n description: The version string of the AWS Secret Manager secret\n that contains the HTTP(S) proxy configuration.\n rootVolume:\n type: object\n x-dcl-go-name: RootVolume\n x-dcl-go-type: NodePoolConfigRootVolume\n description: Optional. Template for the root volume provisioned for\n node pool nodes. Volumes will be provisioned in the availability zone\n assigned to the node pool subnet. When unspecified, it defaults to\n 32 GiB with the GP2 volume type.\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: NodePoolConfigRootVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n securityGroupIds:\n type: array\n x-dcl-go-name: SecurityGroupIds\n description: Optional. The IDs of additional security groups to add\n to nodes in this pool. The manager will automatically create security\n groups with minimum rules needed for a functioning cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n sshConfig:\n type: object\n x-dcl-go-name: SshConfig\n x-dcl-go-type: NodePoolConfigSshConfig\n description: Optional. The SSH configuration.\n required:\n - ec2KeyPair\n properties:\n ec2KeyPair:\n type: string\n x-dcl-go-name: Ec2KeyPair\n description: The name of the EC2 key pair used to login into cluster\n machines.\n tags:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Tags\n description: Optional. Key/value metadata to assign to each underlying\n AWS resource. Specify at most 50 pairs containing alphanumerics, spaces,\n and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters.\n Values can be up to 255 Unicode characters.\n taints:\n type: array\n x-dcl-go-name: Taints\n description: Optional. The initial taints assigned to nodes of this\n node pool.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NodePoolConfigTaints\n required:\n - key\n - value\n - effect\n properties:\n effect:\n type: string\n x-dcl-go-name: Effect\n x-dcl-go-type: NodePoolConfigTaintsEffectEnum\n description: 'The taint effect. Possible values: EFFECT_UNSPECIFIED,\n NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE'\n x-kubernetes-immutable: true\n enum:\n - EFFECT_UNSPECIFIED\n - NO_SCHEDULE\n - PREFER_NO_SCHEDULE\n - NO_EXECUTE\n key:\n type: string\n x-dcl-go-name: Key\n description: Key for the taint.\n x-kubernetes-immutable: true\n value:\n type: string\n x-dcl-go-name: Value\n description: Value for the taint.\n x-kubernetes-immutable: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this node pool was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Allows clients to perform consistent read-modify-writes through\n optimistic concurrency control. May be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n maxPodsConstraint:\n type: object\n x-dcl-go-name: MaxPodsConstraint\n x-dcl-go-type: NodePoolMaxPodsConstraint\n description: The constraint on the maximum number of pods that can be run\n simultaneously on a node in the node pool.\n x-kubernetes-immutable: true\n required:\n - maxPodsPerNode\n properties:\n maxPodsPerNode:\n type: integer\n format: int64\n x-dcl-go-name: MaxPodsPerNode\n description: The maximum number of pods to schedule on a single node.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of this resource.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. If set, there are currently changes in flight\n to the node pool.\n x-kubernetes-immutable: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NodePoolStateEnum\n readOnly: true\n description: 'Output only. The lifecycle state of the node pool. Possible\n values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING,\n ERROR, DEGRADED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - PROVISIONING\n - RUNNING\n - RECONCILING\n - STOPPING\n - ERROR\n - DEGRADED\n subnetId:\n type: string\n x-dcl-go-name: SubnetId\n description: The subnet where the node pool node run.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. A globally unique identifier for the node pool.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this node pool was last updated.\n x-kubernetes-immutable: true\n version:\n type: string\n x-dcl-go-name: Version\n description: The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`).\n You can list all supported versions on a given Google Cloud region by\n calling GetAwsServerConfig.\n") +var YAML_node_pool = []byte("info:\n title: ContainerAws/NodePool\n description: An Anthos node pool running on AWS.\n x-dcl-struct-name: NodePool\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API reference\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud/reference/rest/v1/projects.locations.awsClusters.awsNodePools\n x-dcl-guides:\n - text: Multicloud overview\n url: https://cloud.google.com/anthos/clusters/docs/multi-cloud\npaths:\n get:\n description: The function used to get information about a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n apply:\n description: The function used to apply information about a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n delete:\n description: The function used to delete a NodePool\n parameters:\n - name: nodePool\n required: true\n description: A full instance of a NodePool\n deleteAll:\n description: The function used to delete all NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NodePool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: cluster\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NodePool:\n title: NodePool\n x-dcl-id: projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - version\n - config\n - autoscaling\n - subnetId\n - maxPodsConstraint\n - project\n - location\n - cluster\n properties:\n annotations:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Annotations\n description: 'Optional. Annotations on the node pool. This field has the\n same restrictions as Kubernetes annotations. The total size of all keys\n and values combined is limited to 256k. Key can have 2 segments: prefix\n (optional) and name (required), separated by a slash (/). Prefix must\n be a DNS subdomain. Name must be 63 characters or less, begin and end\n with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics\n between.'\n autoscaling:\n type: object\n x-dcl-go-name: Autoscaling\n x-dcl-go-type: NodePoolAutoscaling\n description: Autoscaler configuration for this node pool.\n required:\n - minNodeCount\n - maxNodeCount\n properties:\n maxNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MaxNodeCount\n description: Maximum number of nodes in the NodePool. Must be >= min_node_count.\n minNodeCount:\n type: integer\n format: int64\n x-dcl-go-name: MinNodeCount\n description: Minimum number of nodes in the NodePool. Must be >= 1 and\n <= max_node_count.\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: The awsCluster for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Gkemulticloud/Cluster\n field: name\n parent: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: NodePoolConfig\n description: The configuration of the node pool.\n required:\n - iamInstanceProfile\n - configEncryption\n properties:\n autoscalingMetricsCollection:\n type: object\n x-dcl-go-name: AutoscalingMetricsCollection\n x-dcl-go-type: NodePoolConfigAutoscalingMetricsCollection\n description: Optional. Configuration related to CloudWatch metrics collection\n on the Auto Scaling group of the node pool. When unspecified, metrics\n collection is disabled.\n required:\n - granularity\n properties:\n granularity:\n type: string\n x-dcl-go-name: Granularity\n description: The frequency at which EC2 Auto Scaling sends aggregated\n data to AWS CloudWatch. The only valid value is \"1Minute\".\n metrics:\n type: array\n x-dcl-go-name: Metrics\n description: The metrics to enable. For a list of valid metrics,\n see https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_EnableMetricsCollection.html.\n If you specify granularity and don't specify any metrics, all\n metrics are enabled.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n configEncryption:\n type: object\n x-dcl-go-name: ConfigEncryption\n x-dcl-go-type: NodePoolConfigConfigEncryption\n description: The ARN of the AWS KMS key used to encrypt node pool configuration.\n required:\n - kmsKeyArn\n properties:\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: The ARN of the AWS KMS key used to encrypt node pool\n configuration.\n iamInstanceProfile:\n type: string\n x-dcl-go-name: IamInstanceProfile\n description: The name of the AWS IAM role assigned to nodes in the pool.\n instanceType:\n type: string\n x-dcl-go-name: InstanceType\n description: Optional. The AWS instance type. When unspecified, it defaults\n to `m5.large`.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: 'Optional. The initial labels assigned to nodes of this\n node pool. An object containing a list of \"key\": value pairs. Example:\n { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.'\n x-kubernetes-immutable: true\n proxyConfig:\n type: object\n x-dcl-go-name: ProxyConfig\n x-dcl-go-type: NodePoolConfigProxyConfig\n description: Proxy configuration for outbound HTTP(S) traffic.\n required:\n - secretArn\n - secretVersion\n properties:\n secretArn:\n type: string\n x-dcl-go-name: SecretArn\n description: The ARN of the AWS Secret Manager secret that contains\n the HTTP(S) proxy configuration.\n secretVersion:\n type: string\n x-dcl-go-name: SecretVersion\n description: The version string of the AWS Secret Manager secret\n that contains the HTTP(S) proxy configuration.\n rootVolume:\n type: object\n x-dcl-go-name: RootVolume\n x-dcl-go-type: NodePoolConfigRootVolume\n description: Optional. Template for the root volume provisioned for\n node pool nodes. Volumes will be provisioned in the availability zone\n assigned to the node pool subnet. When unspecified, it defaults to\n 32 GiB with the GP2 volume type.\n x-dcl-server-default: true\n properties:\n iops:\n type: integer\n format: int64\n x-dcl-go-name: Iops\n description: Optional. The number of I/O operations per second (IOPS)\n to provision for GP3 volume.\n x-dcl-server-default: true\n kmsKeyArn:\n type: string\n x-dcl-go-name: KmsKeyArn\n description: Optional. The Amazon Resource Name (ARN) of the Customer\n Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified,\n the default Amazon managed key associated to the AWS region where\n this cluster runs will be used.\n sizeGib:\n type: integer\n format: int64\n x-dcl-go-name: SizeGib\n description: Optional. The size of the volume, in GiBs. When unspecified,\n a default value is provided. See the specific reference in the\n parent resource.\n x-dcl-server-default: true\n throughput:\n type: integer\n format: int64\n x-dcl-go-name: Throughput\n description: Optional. The throughput to provision for the volume,\n in MiB/s. Only valid if the volume type is GP3.\n x-dcl-server-default: true\n volumeType:\n type: string\n x-dcl-go-name: VolumeType\n x-dcl-go-type: NodePoolConfigRootVolumeVolumeTypeEnum\n description: 'Optional. Type of the EBS volume. When unspecified,\n it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED,\n GP2, GP3'\n x-dcl-server-default: true\n enum:\n - VOLUME_TYPE_UNSPECIFIED\n - GP2\n - GP3\n securityGroupIds:\n type: array\n x-dcl-go-name: SecurityGroupIds\n description: Optional. The IDs of additional security groups to add\n to nodes in this pool. The manager will automatically create security\n groups with minimum rules needed for a functioning cluster.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n sshConfig:\n type: object\n x-dcl-go-name: SshConfig\n x-dcl-go-type: NodePoolConfigSshConfig\n description: Optional. The SSH configuration.\n required:\n - ec2KeyPair\n properties:\n ec2KeyPair:\n type: string\n x-dcl-go-name: Ec2KeyPair\n description: The name of the EC2 key pair used to login into cluster\n machines.\n tags:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Tags\n description: Optional. Key/value metadata to assign to each underlying\n AWS resource. Specify at most 50 pairs containing alphanumerics, spaces,\n and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters.\n Values can be up to 255 Unicode characters.\n taints:\n type: array\n x-dcl-go-name: Taints\n description: Optional. The initial taints assigned to nodes of this\n node pool.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: NodePoolConfigTaints\n required:\n - key\n - value\n - effect\n properties:\n effect:\n type: string\n x-dcl-go-name: Effect\n x-dcl-go-type: NodePoolConfigTaintsEffectEnum\n description: 'The taint effect. Possible values: EFFECT_UNSPECIFIED,\n NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE'\n x-kubernetes-immutable: true\n enum:\n - EFFECT_UNSPECIFIED\n - NO_SCHEDULE\n - PREFER_NO_SCHEDULE\n - NO_EXECUTE\n key:\n type: string\n x-dcl-go-name: Key\n description: Key for the taint.\n x-kubernetes-immutable: true\n value:\n type: string\n x-dcl-go-name: Value\n description: Value for the taint.\n x-kubernetes-immutable: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this node pool was created.\n x-kubernetes-immutable: true\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Allows clients to perform consistent read-modify-writes through\n optimistic concurrency control. May be sent on update and delete requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n maxPodsConstraint:\n type: object\n x-dcl-go-name: MaxPodsConstraint\n x-dcl-go-type: NodePoolMaxPodsConstraint\n description: The constraint on the maximum number of pods that can be run\n simultaneously on a node in the node pool.\n x-kubernetes-immutable: true\n required:\n - maxPodsPerNode\n properties:\n maxPodsPerNode:\n type: integer\n format: int64\n x-dcl-go-name: MaxPodsPerNode\n description: The maximum number of pods to schedule on a single node.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The name of this resource.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: Output only. If set, there are currently changes in flight\n to the node pool.\n x-kubernetes-immutable: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: NodePoolStateEnum\n readOnly: true\n description: 'Output only. The lifecycle state of the node pool. Possible\n values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING,\n ERROR, DEGRADED'\n x-kubernetes-immutable: true\n enum:\n - STATE_UNSPECIFIED\n - PROVISIONING\n - RUNNING\n - RECONCILING\n - STOPPING\n - ERROR\n - DEGRADED\n subnetId:\n type: string\n x-dcl-go-name: SubnetId\n description: The subnet where the node pool node run.\n x-kubernetes-immutable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. A globally unique identifier for the node pool.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this node pool was last updated.\n x-kubernetes-immutable: true\n version:\n type: string\n x-dcl-go-name: Version\n description: The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`).\n You can list all supported versions on a given Google Cloud region by\n calling GetAwsServerConfig.\n") -// 16573 bytes -// MD5: 15025bf72fda102d218c9b748b446e53 +// 16907 bytes +// MD5: bae4f4e07d81af621ab198daf9049d74 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger.go index caf902e644..0eb92f879b 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger.go @@ -25,20 +25,21 @@ import ( ) type Trigger struct { - Name *string `json:"name"` - Uid *string `json:"uid"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - MatchingCriteria []TriggerMatchingCriteria `json:"matchingCriteria"` - ServiceAccount *string `json:"serviceAccount"` - Destination *TriggerDestination `json:"destination"` - Transport *TriggerTransport `json:"transport"` - Labels map[string]string `json:"labels"` - Etag *string `json:"etag"` - Project *string `json:"project"` - Location *string `json:"location"` - Channel *string `json:"channel"` - Conditions map[string]string `json:"conditions"` + Name *string `json:"name"` + Uid *string `json:"uid"` + CreateTime *string `json:"createTime"` + UpdateTime *string `json:"updateTime"` + MatchingCriteria []TriggerMatchingCriteria `json:"matchingCriteria"` + ServiceAccount *string `json:"serviceAccount"` + Destination *TriggerDestination `json:"destination"` + Transport *TriggerTransport `json:"transport"` + Labels map[string]string `json:"labels"` + Etag *string `json:"etag"` + Project *string `json:"project"` + Location *string `json:"location"` + Channel *string `json:"channel"` + Conditions map[string]string `json:"conditions"` + EventDataContentType *string `json:"eventDataContentType"` } func (r *Trigger) String() string { @@ -373,20 +374,21 @@ func (r *Trigger) ID() (string, error) { } nr := r.urlNormalized() params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "uid": dcl.ValueOrEmptyString(nr.Uid), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "matching_criteria": dcl.ValueOrEmptyString(nr.MatchingCriteria), - "service_account": dcl.ValueOrEmptyString(nr.ServiceAccount), - "destination": dcl.ValueOrEmptyString(nr.Destination), - "transport": dcl.ValueOrEmptyString(nr.Transport), - "labels": dcl.ValueOrEmptyString(nr.Labels), - "etag": dcl.ValueOrEmptyString(nr.Etag), - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "channel": dcl.ValueOrEmptyString(nr.Channel), - "conditions": dcl.ValueOrEmptyString(nr.Conditions), + "name": dcl.ValueOrEmptyString(nr.Name), + "uid": dcl.ValueOrEmptyString(nr.Uid), + "create_time": dcl.ValueOrEmptyString(nr.CreateTime), + "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), + "matching_criteria": dcl.ValueOrEmptyString(nr.MatchingCriteria), + "service_account": dcl.ValueOrEmptyString(nr.ServiceAccount), + "destination": dcl.ValueOrEmptyString(nr.Destination), + "transport": dcl.ValueOrEmptyString(nr.Transport), + "labels": dcl.ValueOrEmptyString(nr.Labels), + "etag": dcl.ValueOrEmptyString(nr.Etag), + "project": dcl.ValueOrEmptyString(nr.Project), + "location": dcl.ValueOrEmptyString(nr.Location), + "channel": dcl.ValueOrEmptyString(nr.Channel), + "conditions": dcl.ValueOrEmptyString(nr.Conditions), + "event_data_content_type": dcl.ValueOrEmptyString(nr.EventDataContentType), } return dcl.Nprintf("projects/{{project}}/locations/{{location}}/triggers/{{name}}", params), nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger.yaml index c731fd8df3..ed9d3b9a63 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger.yaml @@ -226,6 +226,12 @@ components: on the value of other fields, and may be sent only on create requests to ensure the client has an up-to-date value before proceeding. x-kubernetes-immutable: true + eventDataContentType: + type: string + x-dcl-go-name: EventDataContentType + description: Optional. EventDataContentType specifies the type of payload + in MIME format that is expected from the CloudEvent data field. This is + set to `application/json` if the value is not defined. labels: type: object additionalProperties: diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_internal.go index 27ca4dbc16..a19a6a0a90 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_internal.go @@ -190,6 +190,9 @@ func newUpdateTriggerUpdateTriggerRequest(ctx context.Context, f *Trigger, c *Cl if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { req["labels"] = v } + if v := f.EventDataContentType; !dcl.IsEmptyValueIndirect(v) { + req["eventDataContentType"] = v + } b, err := c.getTriggerRaw(ctx, f) if err != nil { return nil, err @@ -577,6 +580,11 @@ func canonicalizeTriggerDesiredState(rawDesired, rawInitial *Trigger, opts ...dc } else { canonicalDesired.Channel = rawDesired.Channel } + if dcl.StringCanonicalize(rawDesired.EventDataContentType, rawInitial.EventDataContentType) { + canonicalDesired.EventDataContentType = rawInitial.EventDataContentType + } else { + canonicalDesired.EventDataContentType = rawDesired.EventDataContentType + } return canonicalDesired, nil } @@ -658,6 +666,14 @@ func canonicalizeTriggerNewState(c *Client, rawNew, rawDesired *Trigger) (*Trigg } else { } + if dcl.IsEmptyValueIndirect(rawNew.EventDataContentType) && dcl.IsEmptyValueIndirect(rawDesired.EventDataContentType) { + rawNew.EventDataContentType = rawDesired.EventDataContentType + } else { + if dcl.StringCanonicalize(rawDesired.EventDataContentType, rawNew.EventDataContentType) { + rawNew.EventDataContentType = rawDesired.EventDataContentType + } + } + return rawNew, nil } @@ -1594,6 +1610,13 @@ func diffTrigger(c *Client, desired, actual *Trigger, opts ...dcl.ApplyOption) ( newDiffs = append(newDiffs, ds...) } + if ds, err := dcl.Diff(desired.EventDataContentType, actual.EventDataContentType, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateTriggerUpdateTriggerOperation")}, fn.AddNest("EventDataContentType")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + newDiffs = append(newDiffs, ds...) + } + if len(newDiffs) > 0 { c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) } @@ -1869,6 +1892,7 @@ func (r *Trigger) urlNormalized() *Trigger { normalized.Project = dcl.SelfLinkToName(r.Project) normalized.Location = dcl.SelfLinkToName(r.Location) normalized.Channel = dcl.SelfLinkToName(r.Channel) + normalized.EventDataContentType = dcl.SelfLinkToName(r.EventDataContentType) return &normalized } @@ -1961,6 +1985,9 @@ func expandTrigger(c *Client, f *Trigger) (map[string]interface{}, error) { if v := f.Channel; dcl.ValueShouldBeSent(v) { m["channel"] = v } + if v := f.EventDataContentType; dcl.ValueShouldBeSent(v) { + m["eventDataContentType"] = v + } return m, nil } @@ -1991,6 +2018,7 @@ func flattenTrigger(c *Client, i interface{}, res *Trigger) *Trigger { resultRes.Location = dcl.FlattenString(m["location"]) resultRes.Channel = dcl.FlattenString(m["channel"]) resultRes.Conditions = dcl.FlattenKeyValuePairs(m["conditions"]) + resultRes.EventDataContentType = dcl.FlattenString(m["eventDataContentType"]) return resultRes } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_schema.go index 613b7a0443..051def4e82 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_schema.go @@ -277,6 +277,11 @@ func DCLTriggerSchema() *dcl.Schema { Description: "Output only. This checksum is computed by the server based on the value of other fields, and may be sent only on create requests to ensure the client has an up-to-date value before proceeding.", Immutable: true, }, + "eventDataContentType": &dcl.Property{ + Type: "string", + GoName: "EventDataContentType", + Description: "Optional. EventDataContentType specifies the type of payload in MIME format that is expected from the CloudEvent data field. This is set to `application/json` if the value is not defined.", + }, "labels": &dcl.Property{ Type: "object", AdditionalProperties: &dcl.Property{ diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_yaml_embed.go index 2bc705b215..16f0c58486 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc/trigger_yaml_embed.go @@ -17,7 +17,7 @@ package eventarc // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/eventarc/trigger.yaml -var YAML_trigger = []byte("info:\n title: Eventarc/Trigger\n description: The Eventarc Trigger resource\n x-dcl-struct-name: Trigger\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Trigger\n parameters:\n - name: trigger\n required: true\n description: A full instance of a Trigger\n apply:\n description: The function used to apply information about a Trigger\n parameters:\n - name: trigger\n required: true\n description: A full instance of a Trigger\n delete:\n description: The function used to delete a Trigger\n parameters:\n - name: trigger\n required: true\n description: A full instance of a Trigger\n deleteAll:\n description: The function used to delete all Trigger\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Trigger\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Trigger:\n title: Trigger\n x-dcl-id: projects/{{project}}/locations/{{location}}/triggers/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - matchingCriteria\n - destination\n - project\n - location\n properties:\n channel:\n type: string\n x-dcl-go-name: Channel\n description: Optional. The name of the channel associated with the trigger\n in `projects/{project}/locations/{location}/channels/{channel}` format.\n You must provide a channel to receive events from Eventarc SaaS partners.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Eventarc/Channel\n field: name\n conditions:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Conditions\n readOnly: true\n description: Output only. The reason(s) why a trigger is in FAILED state.\n x-kubernetes-immutable: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation time.\n x-kubernetes-immutable: true\n destination:\n type: object\n x-dcl-go-name: Destination\n x-dcl-go-type: TriggerDestination\n description: Required. Destination specifies where the events should be\n sent to.\n properties:\n cloudFunction:\n type: string\n x-dcl-go-name: CloudFunction\n description: '[WARNING] Configuring a Cloud Function in Trigger is not\n supported as of today. The Cloud Function resource name. Format: projects/{project}/locations/{location}/functions/{function}'\n x-dcl-conflicts:\n - cloudRunService\n - gke\n - workflow\n x-dcl-references:\n - resource: Cloudfunctions/Function\n field: name\n cloudRunService:\n type: object\n x-dcl-go-name: CloudRunService\n x-dcl-go-type: TriggerDestinationCloudRunService\n description: Cloud Run fully-managed service that receives the events.\n The service should be running in the same project of the trigger.\n x-dcl-conflicts:\n - cloudFunction\n - gke\n - workflow\n required:\n - service\n - region\n properties:\n path:\n type: string\n x-dcl-go-name: Path\n description: 'Optional. The relative path on the Cloud Run service\n the events should be sent to. The value must conform to the definition\n of URI path segment (section 3.3 of RFC2396). Examples: \"/route\",\n \"route\", \"route/subroute\".'\n region:\n type: string\n x-dcl-go-name: Region\n description: Required. The region the Cloud Run service is deployed\n in.\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. The name of the Cloud Run service being addressed.\n See https://cloud.google.com/run/docs/reference/rest/v1/namespaces.services.\n Only services located in the same project of the trigger object\n can be addressed.\n x-dcl-references:\n - resource: Run/Service\n field: name\n gke:\n type: object\n x-dcl-go-name: Gke\n x-dcl-go-type: TriggerDestinationGke\n description: A GKE service capable of receiving events. The service\n should be running in the same project as the trigger.\n x-dcl-conflicts:\n - cloudRunService\n - cloudFunction\n - workflow\n required:\n - cluster\n - location\n - namespace\n - service\n properties:\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: Required. The name of the cluster the GKE service is\n running in. The cluster must be running in the same project as\n the trigger being created.\n x-dcl-references:\n - resource: Container/Cluster\n field: selfLink\n location:\n type: string\n x-dcl-go-name: Location\n description: Required. The name of the Google Compute Engine in\n which the cluster resides, which can either be compute zone (for\n example, us-central1-a) for the zonal clusters or region (for\n example, us-central1) for regional clusters.\n namespace:\n type: string\n x-dcl-go-name: Namespace\n description: Required. The namespace the GKE service is running\n in.\n path:\n type: string\n x-dcl-go-name: Path\n description: 'Optional. The relative path on the GKE service the\n events should be sent to. The value must conform to the definition\n of a URI path segment (section 3.3 of RFC2396). Examples: \"/route\",\n \"route\", \"route/subroute\".'\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. Name of the GKE service.\n workflow:\n type: string\n x-dcl-go-name: Workflow\n description: 'The resource name of the Workflow whose Executions are\n triggered by the events. The Workflow resource should be deployed\n in the same project as the trigger. Format: `projects/{project}/locations/{location}/workflows/{workflow}`'\n x-dcl-conflicts:\n - cloudRunService\n - cloudFunction\n - gke\n x-dcl-references:\n - resource: Workflows/Workflow\n field: name\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Output only. This checksum is computed by the server based\n on the value of other fields, and may be sent only on create requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. User labels attached to the triggers that can be\n used to group resources.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n matchingCriteria:\n type: array\n x-dcl-go-name: MatchingCriteria\n description: Required. null The list of filters that applies to event attributes.\n Only events that match all the provided filters will be sent to the destination.\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: TriggerMatchingCriteria\n required:\n - attribute\n - value\n properties:\n attribute:\n type: string\n x-dcl-go-name: Attribute\n description: Required. The name of a CloudEvents attribute. Currently,\n only a subset of attributes are supported for filtering. All triggers\n MUST provide a filter for the 'type' attribute.\n operator:\n type: string\n x-dcl-go-name: Operator\n description: Optional. The operator used for matching the events with\n the value of the filter. If not specified, only events that have\n an exact key-value pair specified in the filter are matched. The\n only allowed value is `match-path-pattern`.\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value for the attribute. See https://cloud.google.com/eventarc/docs/creating-triggers#trigger-gcloud\n for available values.\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. The resource name of the trigger. Must be unique\n within the location on the project.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. The IAM service account email associated with the\n trigger. The service account represents the identity of the trigger. The\n principal who calls this API must have `iam.serviceAccounts.actAs` permission\n in the service account. See https://cloud.google.com/iam/docs/understanding-service-accounts#sa_common\n for more information. For Cloud Run destinations, this service account\n is used to generate identity tokens when invoking the service. See https://cloud.google.com/run/docs/triggering/pubsub-push#create-service-account\n for information on how to invoke authenticated Cloud Run services. In\n order to create Audit Log triggers, the service account should also have\n `roles/eventarc.eventReceiver` IAM role.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n transport:\n type: object\n x-dcl-go-name: Transport\n x-dcl-go-type: TriggerTransport\n description: Optional. In order to deliver messages, Eventarc may use other\n GCP products as transport intermediary. This field contains a reference\n to that transport intermediary. This information can be used for debugging\n purposes.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n pubsub:\n type: object\n x-dcl-go-name: Pubsub\n x-dcl-go-type: TriggerTransportPubsub\n description: The Pub/Sub topic and subscription used by Eventarc as\n delivery intermediary.\n x-kubernetes-immutable: true\n properties:\n subscription:\n type: string\n x-dcl-go-name: Subscription\n readOnly: true\n description: 'Output only. The name of the Pub/Sub subscription\n created and managed by Eventarc system as a transport for the\n event delivery. Format: `projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}`.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Pubsub/Subscription\n field: name\n topic:\n type: string\n x-dcl-go-name: Topic\n description: 'Optional. The name of the Pub/Sub topic created and\n managed by Eventarc system as a transport for the event delivery.\n Format: `projects/{PROJECT_ID}/topics/{TOPIC_NAME}. You may set\n an existing topic for triggers of the type google.cloud.pubsub.topic.v1.messagePublished`\n only. The topic you provide here will not be deleted by Eventarc\n at trigger deletion.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Pubsub/Topic\n field: name\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Server assigned unique identifier for the trigger.\n The value is a UUID4 string and guaranteed to remain unchanged until the\n resource is deleted.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last-modified time.\n x-kubernetes-immutable: true\n") +var YAML_trigger = []byte("info:\n title: Eventarc/Trigger\n description: The Eventarc Trigger resource\n x-dcl-struct-name: Trigger\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Trigger\n parameters:\n - name: trigger\n required: true\n description: A full instance of a Trigger\n apply:\n description: The function used to apply information about a Trigger\n parameters:\n - name: trigger\n required: true\n description: A full instance of a Trigger\n delete:\n description: The function used to delete a Trigger\n parameters:\n - name: trigger\n required: true\n description: A full instance of a Trigger\n deleteAll:\n description: The function used to delete all Trigger\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Trigger\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Trigger:\n title: Trigger\n x-dcl-id: projects/{{project}}/locations/{{location}}/triggers/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - matchingCriteria\n - destination\n - project\n - location\n properties:\n channel:\n type: string\n x-dcl-go-name: Channel\n description: Optional. The name of the channel associated with the trigger\n in `projects/{project}/locations/{location}/channels/{channel}` format.\n You must provide a channel to receive events from Eventarc SaaS partners.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Eventarc/Channel\n field: name\n conditions:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Conditions\n readOnly: true\n description: Output only. The reason(s) why a trigger is in FAILED state.\n x-kubernetes-immutable: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation time.\n x-kubernetes-immutable: true\n destination:\n type: object\n x-dcl-go-name: Destination\n x-dcl-go-type: TriggerDestination\n description: Required. Destination specifies where the events should be\n sent to.\n properties:\n cloudFunction:\n type: string\n x-dcl-go-name: CloudFunction\n description: '[WARNING] Configuring a Cloud Function in Trigger is not\n supported as of today. The Cloud Function resource name. Format: projects/{project}/locations/{location}/functions/{function}'\n x-dcl-conflicts:\n - cloudRunService\n - gke\n - workflow\n x-dcl-references:\n - resource: Cloudfunctions/Function\n field: name\n cloudRunService:\n type: object\n x-dcl-go-name: CloudRunService\n x-dcl-go-type: TriggerDestinationCloudRunService\n description: Cloud Run fully-managed service that receives the events.\n The service should be running in the same project of the trigger.\n x-dcl-conflicts:\n - cloudFunction\n - gke\n - workflow\n required:\n - service\n - region\n properties:\n path:\n type: string\n x-dcl-go-name: Path\n description: 'Optional. The relative path on the Cloud Run service\n the events should be sent to. The value must conform to the definition\n of URI path segment (section 3.3 of RFC2396). Examples: \"/route\",\n \"route\", \"route/subroute\".'\n region:\n type: string\n x-dcl-go-name: Region\n description: Required. The region the Cloud Run service is deployed\n in.\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. The name of the Cloud Run service being addressed.\n See https://cloud.google.com/run/docs/reference/rest/v1/namespaces.services.\n Only services located in the same project of the trigger object\n can be addressed.\n x-dcl-references:\n - resource: Run/Service\n field: name\n gke:\n type: object\n x-dcl-go-name: Gke\n x-dcl-go-type: TriggerDestinationGke\n description: A GKE service capable of receiving events. The service\n should be running in the same project as the trigger.\n x-dcl-conflicts:\n - cloudRunService\n - cloudFunction\n - workflow\n required:\n - cluster\n - location\n - namespace\n - service\n properties:\n cluster:\n type: string\n x-dcl-go-name: Cluster\n description: Required. The name of the cluster the GKE service is\n running in. The cluster must be running in the same project as\n the trigger being created.\n x-dcl-references:\n - resource: Container/Cluster\n field: selfLink\n location:\n type: string\n x-dcl-go-name: Location\n description: Required. The name of the Google Compute Engine in\n which the cluster resides, which can either be compute zone (for\n example, us-central1-a) for the zonal clusters or region (for\n example, us-central1) for regional clusters.\n namespace:\n type: string\n x-dcl-go-name: Namespace\n description: Required. The namespace the GKE service is running\n in.\n path:\n type: string\n x-dcl-go-name: Path\n description: 'Optional. The relative path on the GKE service the\n events should be sent to. The value must conform to the definition\n of a URI path segment (section 3.3 of RFC2396). Examples: \"/route\",\n \"route\", \"route/subroute\".'\n service:\n type: string\n x-dcl-go-name: Service\n description: Required. Name of the GKE service.\n workflow:\n type: string\n x-dcl-go-name: Workflow\n description: 'The resource name of the Workflow whose Executions are\n triggered by the events. The Workflow resource should be deployed\n in the same project as the trigger. Format: `projects/{project}/locations/{location}/workflows/{workflow}`'\n x-dcl-conflicts:\n - cloudRunService\n - cloudFunction\n - gke\n x-dcl-references:\n - resource: Workflows/Workflow\n field: name\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: Output only. This checksum is computed by the server based\n on the value of other fields, and may be sent only on create requests\n to ensure the client has an up-to-date value before proceeding.\n x-kubernetes-immutable: true\n eventDataContentType:\n type: string\n x-dcl-go-name: EventDataContentType\n description: Optional. EventDataContentType specifies the type of payload\n in MIME format that is expected from the CloudEvent data field. This is\n set to `application/json` if the value is not defined.\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. User labels attached to the triggers that can be\n used to group resources.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n matchingCriteria:\n type: array\n x-dcl-go-name: MatchingCriteria\n description: Required. null The list of filters that applies to event attributes.\n Only events that match all the provided filters will be sent to the destination.\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: TriggerMatchingCriteria\n required:\n - attribute\n - value\n properties:\n attribute:\n type: string\n x-dcl-go-name: Attribute\n description: Required. The name of a CloudEvents attribute. Currently,\n only a subset of attributes are supported for filtering. All triggers\n MUST provide a filter for the 'type' attribute.\n operator:\n type: string\n x-dcl-go-name: Operator\n description: Optional. The operator used for matching the events with\n the value of the filter. If not specified, only events that have\n an exact key-value pair specified in the filter are matched. The\n only allowed value is `match-path-pattern`.\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value for the attribute. See https://cloud.google.com/eventarc/docs/creating-triggers#trigger-gcloud\n for available values.\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. The resource name of the trigger. Must be unique\n within the location on the project.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n serviceAccount:\n type: string\n x-dcl-go-name: ServiceAccount\n description: Optional. The IAM service account email associated with the\n trigger. The service account represents the identity of the trigger. The\n principal who calls this API must have `iam.serviceAccounts.actAs` permission\n in the service account. See https://cloud.google.com/iam/docs/understanding-service-accounts#sa_common\n for more information. For Cloud Run destinations, this service account\n is used to generate identity tokens when invoking the service. See https://cloud.google.com/run/docs/triggering/pubsub-push#create-service-account\n for information on how to invoke authenticated Cloud Run services. In\n order to create Audit Log triggers, the service account should also have\n `roles/eventarc.eventReceiver` IAM role.\n x-dcl-references:\n - resource: Iam/ServiceAccount\n field: email\n transport:\n type: object\n x-dcl-go-name: Transport\n x-dcl-go-type: TriggerTransport\n description: Optional. In order to deliver messages, Eventarc may use other\n GCP products as transport intermediary. This field contains a reference\n to that transport intermediary. This information can be used for debugging\n purposes.\n x-kubernetes-immutable: true\n x-dcl-server-default: true\n properties:\n pubsub:\n type: object\n x-dcl-go-name: Pubsub\n x-dcl-go-type: TriggerTransportPubsub\n description: The Pub/Sub topic and subscription used by Eventarc as\n delivery intermediary.\n x-kubernetes-immutable: true\n properties:\n subscription:\n type: string\n x-dcl-go-name: Subscription\n readOnly: true\n description: 'Output only. The name of the Pub/Sub subscription\n created and managed by Eventarc system as a transport for the\n event delivery. Format: `projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}`.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Pubsub/Subscription\n field: name\n topic:\n type: string\n x-dcl-go-name: Topic\n description: 'Optional. The name of the Pub/Sub topic created and\n managed by Eventarc system as a transport for the event delivery.\n Format: `projects/{PROJECT_ID}/topics/{TOPIC_NAME}. You may set\n an existing topic for triggers of the type google.cloud.pubsub.topic.v1.messagePublished`\n only. The topic you provide here will not be deleted by Eventarc\n at trigger deletion.'\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Pubsub/Topic\n field: name\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Server assigned unique identifier for the trigger.\n The value is a UUID4 string and guaranteed to remain unchanged until the\n resource is deleted.\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last-modified time.\n x-kubernetes-immutable: true\n") -// 14388 bytes -// MD5: ac26f29afab44701a412358850225563 +// 14722 bytes +// MD5: ea06f09a25dea925998f4496fc8a232f diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/client.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/client.go deleted file mode 100644 index 3a86c1038e..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/client.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Package logging defines operations in the declarative SDK. -package logging - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -// The Client is the base struct of all operations. This will receive the -// Get, Delete, List, and Apply operations on all resources. -type Client struct { - Config *dcl.Config -} - -// NewClient creates a client that retries all operations a few times each. -func NewClient(c *dcl.Config) *Client { - return &Client{ - Config: c, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.go deleted file mode 100644 index e6c472abee..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "context" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type LogBucket struct { - Name *string `json:"name"` - Description *string `json:"description"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - RetentionDays *int64 `json:"retentionDays"` - Locked *bool `json:"locked"` - LifecycleState *LogBucketLifecycleStateEnum `json:"lifecycleState"` - Parent *string `json:"parent"` - Location *string `json:"location"` -} - -func (r *LogBucket) String() string { - return dcl.SprintResource(r) -} - -// The enum LogBucketLifecycleStateEnum. -type LogBucketLifecycleStateEnum string - -// LogBucketLifecycleStateEnumRef returns a *LogBucketLifecycleStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func LogBucketLifecycleStateEnumRef(s string) *LogBucketLifecycleStateEnum { - v := LogBucketLifecycleStateEnum(s) - return &v -} - -func (v LogBucketLifecycleStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"LIFECYCLE_STATE_UNSPECIFIED", "ACTIVE", "DELETE_REQUESTED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "LogBucketLifecycleStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *LogBucket) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "logging", - Type: "LogBucket", - Version: "logging", - } -} - -func (r *LogBucket) ID() (string, error) { - if err := extractLogBucketFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "description": dcl.ValueOrEmptyString(nr.Description), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "retention_days": dcl.ValueOrEmptyString(nr.RetentionDays), - "locked": dcl.ValueOrEmptyString(nr.Locked), - "lifecycle_state": dcl.ValueOrEmptyString(nr.LifecycleState), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.Nprintf("{{parent}}/locations/{{location}}/buckets/{{name}}", params), nil -} - -const LogBucketMaxPage = -1 - -type LogBucketList struct { - Items []*LogBucket - - nextToken string - - pageSize int32 - - resource *LogBucket -} - -func (l *LogBucketList) HasNext() bool { - return l.nextToken != "" -} - -func (l *LogBucketList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listLogBucket(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListLogBucket(ctx context.Context, location, parent string) (*LogBucketList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListLogBucketWithMaxResults(ctx, location, parent, LogBucketMaxPage) - -} - -func (c *Client) ListLogBucketWithMaxResults(ctx context.Context, location, parent string, pageSize int32) (*LogBucketList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &LogBucket{ - Location: &location, - Parent: &parent, - } - items, token, err := c.listLogBucket(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &LogBucketList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetLogBucket(ctx context.Context, r *LogBucket) (*LogBucket, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractLogBucketFields(r) - - b, err := c.getLogBucketRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalLogBucket(b, c, r) - if err != nil { - return nil, err - } - result.Location = r.Location - result.Parent = r.Parent - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeLogBucketNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractLogBucketFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteLogBucket(ctx context.Context, r *LogBucket) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("LogBucket resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting LogBucket...") - deleteOp := deleteLogBucketOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllLogBucket deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllLogBucket(ctx context.Context, location, parent string, filter func(*LogBucket) bool) error { - listObj, err := c.ListLogBucket(ctx, location, parent) - if err != nil { - return err - } - - err = c.deleteAllLogBucket(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllLogBucket(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyLogBucket(ctx context.Context, rawDesired *LogBucket, opts ...dcl.ApplyOption) (*LogBucket, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *LogBucket - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyLogBucketHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyLogBucketHelper(c *Client, ctx context.Context, rawDesired *LogBucket, opts ...dcl.ApplyOption) (*LogBucket, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyLogBucket...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractLogBucketFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.logBucketDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToLogBucketDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []logBucketApiOperation - if create { - ops = append(ops, &createLogBucketOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyLogBucketDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyLogBucketDiff(c *Client, ctx context.Context, desired *LogBucket, rawDesired *LogBucket, ops []logBucketApiOperation, opts ...dcl.ApplyOption) (*LogBucket, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetLogBucket(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createLogBucketOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapLogBucket(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeLogBucketNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeLogBucketNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeLogBucketDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractLogBucketFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractLogBucketFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffLogBucket(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.yaml deleted file mode 100644 index dd7f451cb0..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket.yaml +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Logging/LogBucket - description: The Logging LogBucket resource - x-dcl-struct-name: LogBucket - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a LogBucket - parameters: - - name: logBucket - required: true - description: A full instance of a LogBucket - apply: - description: The function used to apply information about a LogBucket - parameters: - - name: logBucket - required: true - description: A full instance of a LogBucket - delete: - description: The function used to delete a LogBucket - parameters: - - name: logBucket - required: true - description: A full instance of a LogBucket - deleteAll: - description: The function used to delete all LogBucket - parameters: - - name: location - required: true - schema: - type: string - - name: parent - required: true - schema: - type: string - list: - description: The function used to list information about many LogBucket - parameters: - - name: location - required: true - schema: - type: string - - name: parent - required: true - schema: - type: string -components: - schemas: - LogBucket: - title: LogBucket - x-dcl-id: '{{parent}}/locations/{{location}}/buckets/{{name}}' - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - parent - - location - properties: - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The creation timestamp of the bucket. This is - not set for any of the default buckets. - x-kubernetes-immutable: true - description: - type: string - x-dcl-go-name: Description - description: Describes this bucket. - lifecycleState: - type: string - x-dcl-go-name: LifecycleState - x-dcl-go-type: LogBucketLifecycleStateEnum - readOnly: true - description: 'Output only. The bucket lifecycle state. Possible values: - LIFECYCLE_STATE_UNSPECIFIED, ACTIVE, DELETE_REQUESTED' - x-kubernetes-immutable: true - enum: - - LIFECYCLE_STATE_UNSPECIFIED - - ACTIVE - - DELETE_REQUESTED - location: - type: string - x-dcl-go-name: Location - description: 'The location of the resource. The supported locations are: - global, us-central1, us-east1, us-west1, asia-east1, europe-west1.' - x-kubernetes-immutable: true - locked: - type: boolean - x-dcl-go-name: Locked - description: Whether the bucket has been locked. The retention period on - a locked bucket may not be changed. Locked buckets may only be deleted - if they are empty. - name: - type: string - x-dcl-go-name: Name - description: 'The resource name of the bucket. For example: "projects/my-project-id/locations/my-location/buckets/my-bucket-id" - The supported locations are: `global`, `us-central1`, `us-east1`, `us-west1`, - `asia-east1`, `europe-west1`. For the location of `global` it is unspecified - where logs are actually stored. Once a bucket has been created, the location - can not be changed.' - x-kubernetes-immutable: true - parent: - type: string - x-dcl-go-name: Parent - description: The parent of the resource. - x-kubernetes-immutable: true - x-dcl-forward-slash-allowed: true - x-dcl-references: - - resource: Cloudresourcemanager/BillingAccount - field: name - parent: true - - resource: Cloudresourcemanager/Folder - field: name - parent: true - - resource: Cloudresourcemanager/Organization - field: name - parent: true - - resource: Cloudresourcemanager/Project - field: name - parent: true - retentionDays: - type: integer - format: int64 - x-dcl-go-name: RetentionDays - description: Logs will be retained by default for this amount of time, after - which they will automatically be deleted. The minimum retention period - is 1 day. If this value is set to zero at bucket creation time, the default - time of 30 days will be used. - updateTime: - type: string - format: date-time - x-dcl-go-name: UpdateTime - readOnly: true - description: Output only. The last update timestamp of the bucket. - x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_internal.go deleted file mode 100644 index 5b4df35c24..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_internal.go +++ /dev/null @@ -1,816 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *LogBucket) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Parent, "Parent"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { - return err - } - return nil -} -func (r *LogBucket) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://logging.googleapis.com/v2/", params) -} - -func (r *LogBucket) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *LogBucket) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "parent": dcl.ValueOrEmptyString(nr.Parent), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets", nr.basePath(), userBasePath, params), nil - -} - -func (r *LogBucket) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets?bucketId={{name}}", nr.basePath(), userBasePath, params), nil - -} - -func (r *LogBucket) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// logBucketApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type logBucketApiOperation interface { - do(context.Context, *LogBucket, *Client) error -} - -// newUpdateLogBucketUpdateBucketRequest creates a request for an -// LogBucket resource's UpdateBucket update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateLogBucketUpdateBucketRequest(ctx context.Context, f *LogBucket, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - if v := f.RetentionDays; !dcl.IsEmptyValueIndirect(v) { - req["retentionDays"] = v - } - if v := f.Locked; !dcl.IsEmptyValueIndirect(v) { - req["locked"] = v - } - return req, nil -} - -// marshalUpdateLogBucketUpdateBucketRequest converts the update into -// the final JSON request body. -func marshalUpdateLogBucketUpdateBucketRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateLogBucketUpdateBucketOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateLogBucketUpdateBucketOperation) do(ctx context.Context, r *LogBucket, c *Client) error { - _, err := c.GetLogBucket(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateBucket") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateLogBucketUpdateBucketRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateLogBucketUpdateBucketRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listLogBucketRaw(ctx context.Context, r *LogBucket, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != LogBucketMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listLogBucketOperation struct { - Buckets []map[string]interface{} `json:"buckets"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listLogBucket(ctx context.Context, r *LogBucket, pageToken string, pageSize int32) ([]*LogBucket, string, error) { - b, err := c.listLogBucketRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listLogBucketOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*LogBucket - for _, v := range m.Buckets { - res, err := unmarshalMapLogBucket(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Location = r.Location - res.Parent = r.Parent - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllLogBucket(ctx context.Context, f func(*LogBucket) bool, resources []*LogBucket) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteLogBucket(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteLogBucketOperation struct{} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createLogBucketOperation struct { - response map[string]interface{} -} - -func (op *createLogBucketOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createLogBucketOperation) do(ctx context.Context, r *LogBucket, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - if _, err := c.GetLogBucket(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getLogBucketRaw(ctx context.Context, r *LogBucket) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) logBucketDiffsForRawDesired(ctx context.Context, rawDesired *LogBucket, opts ...dcl.ApplyOption) (initial, desired *LogBucket, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *LogBucket - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*LogBucket); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected LogBucket, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetLogBucket(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a LogBucket resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve LogBucket resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that LogBucket resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeLogBucketDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for LogBucket: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for LogBucket: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractLogBucketFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeLogBucketInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for LogBucket: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeLogBucketDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for LogBucket: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffLogBucket(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeLogBucketInitialState(rawInitial, rawDesired *LogBucket) (*LogBucket, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeLogBucketDesiredState(rawDesired, rawInitial *LogBucket, opts ...dcl.ApplyOption) (*LogBucket, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - - return rawDesired, nil - } - canonicalDesired := &LogBucket{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.IsZeroValue(rawDesired.RetentionDays) || (dcl.IsEmptyValueIndirect(rawDesired.RetentionDays) && dcl.IsEmptyValueIndirect(rawInitial.RetentionDays)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.RetentionDays = rawInitial.RetentionDays - } else { - canonicalDesired.RetentionDays = rawDesired.RetentionDays - } - if dcl.BoolCanonicalize(rawDesired.Locked, rawInitial.Locked) { - canonicalDesired.Locked = rawInitial.Locked - } else { - canonicalDesired.Locked = rawDesired.Locked - } - if dcl.NameToSelfLink(rawDesired.Parent, rawInitial.Parent) { - canonicalDesired.Parent = rawInitial.Parent - } else { - canonicalDesired.Parent = rawDesired.Parent - } - if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { - canonicalDesired.Location = rawInitial.Location - } else { - canonicalDesired.Location = rawDesired.Location - } - return canonicalDesired, nil -} - -func canonicalizeLogBucketNewState(c *Client, rawNew, rawDesired *LogBucket) (*LogBucket, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.RetentionDays) && dcl.IsEmptyValueIndirect(rawDesired.RetentionDays) { - rawNew.RetentionDays = rawDesired.RetentionDays - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Locked) && dcl.IsEmptyValueIndirect(rawDesired.Locked) { - rawNew.Locked = rawDesired.Locked - } else { - if dcl.BoolCanonicalize(rawDesired.Locked, rawNew.Locked) { - rawNew.Locked = rawDesired.Locked - } - } - - if dcl.IsEmptyValueIndirect(rawNew.LifecycleState) && dcl.IsEmptyValueIndirect(rawDesired.LifecycleState) { - rawNew.LifecycleState = rawDesired.LifecycleState - } else { - } - - rawNew.Parent = rawDesired.Parent - - rawNew.Location = rawDesired.Location - - return rawNew, nil -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffLogBucket(c *Client, desired, actual *LogBucket, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogBucketUpdateBucketOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.RetentionDays, actual.RetentionDays, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogBucketUpdateBucketOperation")}, fn.AddNest("RetentionDays")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Locked, actual.Locked, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogBucketUpdateBucketOperation")}, fn.AddNest("Locked")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.LifecycleState, actual.LifecycleState, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LifecycleState")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Parent, actual.Parent, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Parent")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *LogBucket) urlNormalized() *LogBucket { - normalized := dcl.Copy(*r).(LogBucket) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.Parent = r.Parent - normalized.Location = dcl.SelfLinkToName(r.Location) - return &normalized -} - -func (r *LogBucket) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateBucket" { - fields := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the LogBucket resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *LogBucket) marshal(c *Client) ([]byte, error) { - m, err := expandLogBucket(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling LogBucket: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalLogBucket decodes JSON responses into the LogBucket resource schema. -func unmarshalLogBucket(b []byte, c *Client, res *LogBucket) (*LogBucket, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapLogBucket(m, c, res) -} - -func unmarshalMapLogBucket(m map[string]interface{}, c *Client, res *LogBucket) (*LogBucket, error) { - - flattened := flattenLogBucket(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandLogBucket expands LogBucket into a JSON request object. -func expandLogBucket(c *Client, f *LogBucket) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("%s/locations/%s/buckets/%s", f.Name, f.Parent, dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.RetentionDays; dcl.ValueShouldBeSent(v) { - m["retentionDays"] = v - } - if v := f.Locked; dcl.ValueShouldBeSent(v) { - m["locked"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Parent into parent: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["parent"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v - } - - return m, nil -} - -// flattenLogBucket flattens LogBucket from a JSON request object into the -// LogBucket type. -func flattenLogBucket(c *Client, i interface{}, res *LogBucket) *LogBucket { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &LogBucket{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.RetentionDays = dcl.FlattenInteger(m["retentionDays"]) - resultRes.Locked = dcl.FlattenBool(m["locked"]) - resultRes.LifecycleState = flattenLogBucketLifecycleStateEnum(m["lifecycleState"]) - resultRes.Parent = dcl.FlattenString(m["parent"]) - resultRes.Location = dcl.FlattenString(m["location"]) - - return resultRes -} - -// flattenLogBucketLifecycleStateEnumMap flattens the contents of LogBucketLifecycleStateEnum from a JSON -// response object. -func flattenLogBucketLifecycleStateEnumMap(c *Client, i interface{}, res *LogBucket) map[string]LogBucketLifecycleStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogBucketLifecycleStateEnum{} - } - - if len(a) == 0 { - return map[string]LogBucketLifecycleStateEnum{} - } - - items := make(map[string]LogBucketLifecycleStateEnum) - for k, item := range a { - items[k] = *flattenLogBucketLifecycleStateEnum(item.(interface{})) - } - - return items -} - -// flattenLogBucketLifecycleStateEnumSlice flattens the contents of LogBucketLifecycleStateEnum from a JSON -// response object. -func flattenLogBucketLifecycleStateEnumSlice(c *Client, i interface{}, res *LogBucket) []LogBucketLifecycleStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []LogBucketLifecycleStateEnum{} - } - - if len(a) == 0 { - return []LogBucketLifecycleStateEnum{} - } - - items := make([]LogBucketLifecycleStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogBucketLifecycleStateEnum(item.(interface{}))) - } - - return items -} - -// flattenLogBucketLifecycleStateEnum asserts that an interface is a string, and returns a -// pointer to a *LogBucketLifecycleStateEnum with the same value as that string. -func flattenLogBucketLifecycleStateEnum(i interface{}) *LogBucketLifecycleStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return LogBucketLifecycleStateEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *LogBucket) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalLogBucket(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Location == nil && ncr.Location == nil { - c.Config.Logger.Info("Both Location fields null - considering equal.") - } else if nr.Location == nil || ncr.Location == nil { - c.Config.Logger.Info("Only one Location field is null - considering unequal.") - return false - } else if *nr.Location != *ncr.Location { - return false - } - if nr.Parent == nil && ncr.Parent == nil { - c.Config.Logger.Info("Both Parent fields null - considering equal.") - } else if nr.Parent == nil || ncr.Parent == nil { - c.Config.Logger.Info("Only one Parent field is null - considering unequal.") - return false - } else if *nr.Parent != *ncr.Parent { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type logBucketDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp logBucketApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToLogBucketDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]logBucketDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []logBucketDiff - // For each operation name, create a logBucketDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := logBucketDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToLogBucketApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToLogBucketApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (logBucketApiOperation, error) { - switch opName { - - case "updateLogBucketUpdateBucketOperation": - return &updateLogBucketUpdateBucketOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractLogBucketFields(r *LogBucket) error { - return nil -} - -func postReadExtractLogBucketFields(r *LogBucket) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_schema.go deleted file mode 100644 index 96d086810e..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_schema.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLLogBucketSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Logging/LogBucket", - Description: "The Logging LogBucket resource", - StructName: "LogBucket", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a LogBucket", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logBucket", - Required: true, - Description: "A full instance of a LogBucket", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a LogBucket", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logBucket", - Required: true, - Description: "A full instance of a LogBucket", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a LogBucket", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logBucket", - Required: true, - Description: "A full instance of a LogBucket", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all LogBucket", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "parent", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many LogBucket", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "parent", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "LogBucket": &dcl.Component{ - Title: "LogBucket", - ID: "{{parent}}/locations/{{location}}/buckets/{{name}}", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "parent", - "location", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The creation timestamp of the bucket. This is not set for any of the default buckets.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Describes this bucket.", - }, - "lifecycleState": &dcl.Property{ - Type: "string", - GoName: "LifecycleState", - GoType: "LogBucketLifecycleStateEnum", - ReadOnly: true, - Description: "Output only. The bucket lifecycle state. Possible values: LIFECYCLE_STATE_UNSPECIFIED, ACTIVE, DELETE_REQUESTED", - Immutable: true, - Enum: []string{ - "LIFECYCLE_STATE_UNSPECIFIED", - "ACTIVE", - "DELETE_REQUESTED", - }, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location of the resource. The supported locations are: global, us-central1, us-east1, us-west1, asia-east1, europe-west1.", - Immutable: true, - }, - "locked": &dcl.Property{ - Type: "boolean", - GoName: "Locked", - Description: "Whether the bucket has been locked. The retention period on a locked bucket may not be changed. Locked buckets may only be deleted if they are empty.", - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The resource name of the bucket. For example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\" The supported locations are: `global`, `us-central1`, `us-east1`, `us-west1`, `asia-east1`, `europe-west1`. For the location of `global` it is unspecified where logs are actually stored. Once a bucket has been created, the location can not be changed.", - Immutable: true, - }, - "parent": &dcl.Property{ - Type: "string", - GoName: "Parent", - Description: "The parent of the resource.", - Immutable: true, - ForwardSlashAllowed: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/BillingAccount", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Folder", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Organization", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "retentionDays": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "RetentionDays", - Description: "Logs will be retained by default for this amount of time, after which they will automatically be deleted. The minimum retention period is 1 day. If this value is set to zero at bucket creation time, the default time of 30 days will be used.", - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The last update timestamp of the bucket.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_yaml_embed.go deleted file mode 100644 index cbc5928d25..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_bucket_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package logging -var YAML_log_bucket blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_bucket.yaml - -package logging - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_bucket.yaml -var YAML_log_bucket = []byte("info:\n title: Logging/LogBucket\n description: The Logging LogBucket resource\n x-dcl-struct-name: LogBucket\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a LogBucket\n parameters:\n - name: logBucket\n required: true\n description: A full instance of a LogBucket\n apply:\n description: The function used to apply information about a LogBucket\n parameters:\n - name: logBucket\n required: true\n description: A full instance of a LogBucket\n delete:\n description: The function used to delete a LogBucket\n parameters:\n - name: logBucket\n required: true\n description: A full instance of a LogBucket\n deleteAll:\n description: The function used to delete all LogBucket\n parameters:\n - name: location\n required: true\n schema:\n type: string\n - name: parent\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many LogBucket\n parameters:\n - name: location\n required: true\n schema:\n type: string\n - name: parent\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n LogBucket:\n title: LogBucket\n x-dcl-id: '{{parent}}/locations/{{location}}/buckets/{{name}}'\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - parent\n - location\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of the bucket. This is\n not set for any of the default buckets.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Describes this bucket.\n lifecycleState:\n type: string\n x-dcl-go-name: LifecycleState\n x-dcl-go-type: LogBucketLifecycleStateEnum\n readOnly: true\n description: 'Output only. The bucket lifecycle state. Possible values:\n LIFECYCLE_STATE_UNSPECIFIED, ACTIVE, DELETE_REQUESTED'\n x-kubernetes-immutable: true\n enum:\n - LIFECYCLE_STATE_UNSPECIFIED\n - ACTIVE\n - DELETE_REQUESTED\n location:\n type: string\n x-dcl-go-name: Location\n description: 'The location of the resource. The supported locations are:\n global, us-central1, us-east1, us-west1, asia-east1, europe-west1.'\n x-kubernetes-immutable: true\n locked:\n type: boolean\n x-dcl-go-name: Locked\n description: Whether the bucket has been locked. The retention period on\n a locked bucket may not be changed. Locked buckets may only be deleted\n if they are empty.\n name:\n type: string\n x-dcl-go-name: Name\n description: 'The resource name of the bucket. For example: \"projects/my-project-id/locations/my-location/buckets/my-bucket-id\"\n The supported locations are: `global`, `us-central1`, `us-east1`, `us-west1`,\n `asia-east1`, `europe-west1`. For the location of `global` it is unspecified\n where logs are actually stored. Once a bucket has been created, the location\n can not be changed.'\n x-kubernetes-immutable: true\n parent:\n type: string\n x-dcl-go-name: Parent\n description: The parent of the resource.\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n x-dcl-references:\n - resource: Cloudresourcemanager/BillingAccount\n field: name\n parent: true\n - resource: Cloudresourcemanager/Folder\n field: name\n parent: true\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n retentionDays:\n type: integer\n format: int64\n x-dcl-go-name: RetentionDays\n description: Logs will be retained by default for this amount of time, after\n which they will automatically be deleted. The minimum retention period\n is 1 day. If this value is set to zero at bucket creation time, the default\n time of 30 days will be used.\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of the bucket.\n x-kubernetes-immutable: true\n") - -// 4841 bytes -// MD5: cbf20ec9f28796e7d05c23eb5e38b1ac diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.go deleted file mode 100644 index f158224a0d..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "context" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type LogExclusion struct { - Name *string `json:"name"` - Description *string `json:"description"` - Filter *string `json:"filter"` - Disabled *bool `json:"disabled"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - Parent *string `json:"parent"` -} - -func (r *LogExclusion) String() string { - return dcl.SprintResource(r) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *LogExclusion) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "logging", - Type: "LogExclusion", - Version: "logging", - } -} - -func (r *LogExclusion) ID() (string, error) { - if err := extractLogExclusionFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "description": dcl.ValueOrEmptyString(nr.Description), - "filter": dcl.ValueOrEmptyString(nr.Filter), - "disabled": dcl.ValueOrEmptyString(nr.Disabled), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "parent": dcl.ValueOrEmptyString(nr.Parent), - } - return dcl.Nprintf("{{parent}}/exclusions/{{name}}", params), nil -} - -const LogExclusionMaxPage = -1 - -type LogExclusionList struct { - Items []*LogExclusion - - nextToken string - - pageSize int32 - - resource *LogExclusion -} - -func (l *LogExclusionList) HasNext() bool { - return l.nextToken != "" -} - -func (l *LogExclusionList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listLogExclusion(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListLogExclusion(ctx context.Context, parent string) (*LogExclusionList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListLogExclusionWithMaxResults(ctx, parent, LogExclusionMaxPage) - -} - -func (c *Client) ListLogExclusionWithMaxResults(ctx context.Context, parent string, pageSize int32) (*LogExclusionList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &LogExclusion{ - Parent: &parent, - } - items, token, err := c.listLogExclusion(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &LogExclusionList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetLogExclusion(ctx context.Context, r *LogExclusion) (*LogExclusion, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractLogExclusionFields(r) - - b, err := c.getLogExclusionRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalLogExclusion(b, c, r) - if err != nil { - return nil, err - } - result.Parent = r.Parent - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeLogExclusionNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractLogExclusionFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteLogExclusion(ctx context.Context, r *LogExclusion) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("LogExclusion resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting LogExclusion...") - deleteOp := deleteLogExclusionOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllLogExclusion deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllLogExclusion(ctx context.Context, parent string, filter func(*LogExclusion) bool) error { - listObj, err := c.ListLogExclusion(ctx, parent) - if err != nil { - return err - } - - err = c.deleteAllLogExclusion(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllLogExclusion(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyLogExclusion(ctx context.Context, rawDesired *LogExclusion, opts ...dcl.ApplyOption) (*LogExclusion, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *LogExclusion - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyLogExclusionHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyLogExclusionHelper(c *Client, ctx context.Context, rawDesired *LogExclusion, opts ...dcl.ApplyOption) (*LogExclusion, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyLogExclusion...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractLogExclusionFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.logExclusionDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToLogExclusionDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []logExclusionApiOperation - if create { - ops = append(ops, &createLogExclusionOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyLogExclusionDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyLogExclusionDiff(c *Client, ctx context.Context, desired *LogExclusion, rawDesired *LogExclusion, ops []logExclusionApiOperation, opts ...dcl.ApplyOption) (*LogExclusion, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetLogExclusion(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createLogExclusionOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapLogExclusion(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeLogExclusionNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeLogExclusionNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeLogExclusionDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractLogExclusionFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractLogExclusionFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffLogExclusion(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.yaml deleted file mode 100644 index 292c3dad2e..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion.yaml +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Logging/LogExclusion - description: The Logging LogExclusion resource - x-dcl-struct-name: LogExclusion - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a LogExclusion - parameters: - - name: logExclusion - required: true - description: A full instance of a LogExclusion - apply: - description: The function used to apply information about a LogExclusion - parameters: - - name: logExclusion - required: true - description: A full instance of a LogExclusion - delete: - description: The function used to delete a LogExclusion - parameters: - - name: logExclusion - required: true - description: A full instance of a LogExclusion - deleteAll: - description: The function used to delete all LogExclusion - parameters: - - name: parent - required: true - schema: - type: string - list: - description: The function used to list information about many LogExclusion - parameters: - - name: parent - required: true - schema: - type: string -components: - schemas: - LogExclusion: - title: LogExclusion - x-dcl-id: '{{parent}}/exclusions/{{name}}' - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - filter - properties: - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The creation timestamp of the exclusion. This - field may not be present for older exclusions. - x-kubernetes-immutable: true - description: - type: string - x-dcl-go-name: Description - description: Optional. A description of this exclusion. - disabled: - type: boolean - x-dcl-go-name: Disabled - description: Optional. If set to True, then this exclusion is disabled and - it does not exclude any log entries. You can update an exclusion to change - the value of this field. - filter: - type: string - x-dcl-go-name: Filter - description: 'Required. An (https://cloud.google.com/logging/docs/view/advanced-queries#sample), - you can exclude less than 100% of the matching log entries. For example, - the following query matches 99% of low-severity log entries from Google - Cloud Storage buckets: `"resource.type=gcs_bucket severity' - name: - type: string - x-dcl-go-name: Name - description: Required. A client-assigned identifier, such as `"load-balancer-exclusion"`. - Identifiers are limited to 100 characters and can include only letters, - digits, underscores, hyphens, and periods. First character has to be alphanumeric. - x-kubernetes-immutable: true - parent: - type: string - x-dcl-go-name: Parent - description: 'The parent resource in which to create the exclusion: "projects/[PROJECT_ID]" - "organizations/[ORGANIZATION_ID]" "billingAccounts/[BILLING_ACCOUNT_ID]" - "folders/[FOLDER_ID]" Examples: "projects/my-logging-project", "organizations/123456789". - Authorization requires the following IAM permission on the specified resource - parent: logging.exclusions.create' - x-kubernetes-immutable: true - x-dcl-forward-slash-allowed: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - - resource: Cloudresourcemanager/Folder - field: name - parent: true - - resource: Cloudresourcemanager/Organization - field: name - parent: true - - resource: Cloudresourcemanager/BillingAccount - field: name - parent: true - updateTime: - type: string - format: date-time - x-dcl-go-name: UpdateTime - readOnly: true - description: Output only. The last update timestamp of the exclusion. This - field may not be present for older exclusions. - x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_internal.go deleted file mode 100644 index 628b285689..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_internal.go +++ /dev/null @@ -1,756 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *LogExclusion) validate() error { - - if err := dcl.ValidateAtMostOneOfFieldsSet([]string(nil)); err != nil { - return err - } - if err := dcl.Required(r, "filter"); err != nil { - return err - } - return nil -} -func (r *LogExclusion) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://logging.googleapis.com/v2/", params) -} - -func (r *LogExclusion) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/exclusions/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *LogExclusion) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "parent": dcl.ValueOrEmptyString(nr.Parent), - } - return dcl.URL("{{parent}}/exclusions", nr.basePath(), userBasePath, params), nil - -} - -func (r *LogExclusion) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "parent": dcl.ValueOrEmptyString(nr.Parent), - } - return dcl.URL("{{parent}}/exclusions", nr.basePath(), userBasePath, params), nil - -} - -func (r *LogExclusion) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/exclusions/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// logExclusionApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type logExclusionApiOperation interface { - do(context.Context, *LogExclusion, *Client) error -} - -// newUpdateLogExclusionUpdateExclusionRequest creates a request for an -// LogExclusion resource's UpdateExclusion update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateLogExclusionUpdateExclusionRequest(ctx context.Context, f *LogExclusion, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - req["filter"] = v - } - if v := f.Disabled; !dcl.IsEmptyValueIndirect(v) { - req["disabled"] = v - } - return req, nil -} - -// marshalUpdateLogExclusionUpdateExclusionRequest converts the update into -// the final JSON request body. -func marshalUpdateLogExclusionUpdateExclusionRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateLogExclusionUpdateExclusionOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateLogExclusionUpdateExclusionOperation) do(ctx context.Context, r *LogExclusion, c *Client) error { - _, err := c.GetLogExclusion(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateExclusion") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateLogExclusionUpdateExclusionRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateLogExclusionUpdateExclusionRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listLogExclusionRaw(ctx context.Context, r *LogExclusion, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != LogExclusionMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listLogExclusionOperation struct { - Exclusions []map[string]interface{} `json:"exclusions"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listLogExclusion(ctx context.Context, r *LogExclusion, pageToken string, pageSize int32) ([]*LogExclusion, string, error) { - b, err := c.listLogExclusionRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listLogExclusionOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*LogExclusion - for _, v := range m.Exclusions { - res, err := unmarshalMapLogExclusion(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Parent = r.Parent - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllLogExclusion(ctx context.Context, f func(*LogExclusion) bool, resources []*LogExclusion) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteLogExclusion(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteLogExclusionOperation struct{} - -func (op *deleteLogExclusionOperation) do(ctx context.Context, r *LogExclusion, c *Client) error { - r, err := c.GetLogExclusion(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "LogExclusion not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetLogExclusion checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete LogExclusion: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetLogExclusion(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createLogExclusionOperation struct { - response map[string]interface{} -} - -func (op *createLogExclusionOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createLogExclusionOperation) do(ctx context.Context, r *LogExclusion, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - if _, err := c.GetLogExclusion(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getLogExclusionRaw(ctx context.Context, r *LogExclusion) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) logExclusionDiffsForRawDesired(ctx context.Context, rawDesired *LogExclusion, opts ...dcl.ApplyOption) (initial, desired *LogExclusion, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *LogExclusion - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*LogExclusion); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected LogExclusion, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetLogExclusion(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a LogExclusion resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve LogExclusion resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that LogExclusion resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeLogExclusionDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for LogExclusion: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for LogExclusion: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractLogExclusionFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeLogExclusionInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for LogExclusion: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeLogExclusionDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for LogExclusion: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffLogExclusion(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeLogExclusionInitialState(rawInitial, rawDesired *LogExclusion) (*LogExclusion, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeLogExclusionDesiredState(rawDesired, rawInitial *LogExclusion, opts ...dcl.ApplyOption) (*LogExclusion, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - - return rawDesired, nil - } - canonicalDesired := &LogExclusion{} - if dcl.StringCanonicalize(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.StringCanonicalize(rawDesired.Filter, rawInitial.Filter) { - canonicalDesired.Filter = rawInitial.Filter - } else { - canonicalDesired.Filter = rawDesired.Filter - } - if dcl.BoolCanonicalize(rawDesired.Disabled, rawInitial.Disabled) { - canonicalDesired.Disabled = rawInitial.Disabled - } else { - canonicalDesired.Disabled = rawDesired.Disabled - } - if dcl.NameToSelfLink(rawDesired.Parent, rawInitial.Parent) { - canonicalDesired.Parent = rawInitial.Parent - } else { - canonicalDesired.Parent = rawDesired.Parent - } - - return canonicalDesired, nil -} - -func canonicalizeLogExclusionNewState(c *Client, rawNew, rawDesired *LogExclusion) (*LogExclusion, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.StringCanonicalize(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Filter) && dcl.IsEmptyValueIndirect(rawDesired.Filter) { - rawNew.Filter = rawDesired.Filter - } else { - if dcl.StringCanonicalize(rawDesired.Filter, rawNew.Filter) { - rawNew.Filter = rawDesired.Filter - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Disabled) && dcl.IsEmptyValueIndirect(rawDesired.Disabled) { - rawNew.Disabled = rawDesired.Disabled - } else { - if dcl.BoolCanonicalize(rawDesired.Disabled, rawNew.Disabled) { - rawNew.Disabled = rawDesired.Disabled - } - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - rawNew.Parent = rawDesired.Parent - - return rawNew, nil -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffLogExclusion(c *Client, desired, actual *LogExclusion, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogExclusionUpdateExclusionOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogExclusionUpdateExclusionOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Disabled, actual.Disabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogExclusionUpdateExclusionOperation")}, fn.AddNest("Disabled")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Parent, actual.Parent, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Parent")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *LogExclusion) urlNormalized() *LogExclusion { - normalized := dcl.Copy(*r).(LogExclusion) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.Filter = dcl.SelfLinkToName(r.Filter) - normalized.Parent = r.Parent - return &normalized -} - -func (r *LogExclusion) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateExclusion" { - fields := map[string]interface{}{ - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/exclusions/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the LogExclusion resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *LogExclusion) marshal(c *Client) ([]byte, error) { - m, err := expandLogExclusion(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling LogExclusion: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalLogExclusion decodes JSON responses into the LogExclusion resource schema. -func unmarshalLogExclusion(b []byte, c *Client, res *LogExclusion) (*LogExclusion, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapLogExclusion(m, c, res) -} - -func unmarshalMapLogExclusion(m map[string]interface{}, c *Client, res *LogExclusion) (*LogExclusion, error) { - - flattened := flattenLogExclusion(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandLogExclusion expands LogExclusion into a JSON request object. -func expandLogExclusion(c *Client, f *LogExclusion) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v := f.Name; dcl.ValueShouldBeSent(v) { - m["name"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.Filter; dcl.ValueShouldBeSent(v) { - m["filter"] = v - } - if v := f.Disabled; dcl.ValueShouldBeSent(v) { - m["disabled"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Parent into parent: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["parent"] = v - } - - return m, nil -} - -// flattenLogExclusion flattens LogExclusion from a JSON request object into the -// LogExclusion type. -func flattenLogExclusion(c *Client, i interface{}, res *LogExclusion) *LogExclusion { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &LogExclusion{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.Filter = dcl.FlattenString(m["filter"]) - resultRes.Disabled = dcl.FlattenBool(m["disabled"]) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.Parent = dcl.FlattenString(m["parent"]) - - return resultRes -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *LogExclusion) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalLogExclusion(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Parent == nil && ncr.Parent == nil { - c.Config.Logger.Info("Both Parent fields null - considering equal.") - } else if nr.Parent == nil || ncr.Parent == nil { - c.Config.Logger.Info("Only one Parent field is null - considering unequal.") - return false - } else if *nr.Parent != *ncr.Parent { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type logExclusionDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp logExclusionApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToLogExclusionDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]logExclusionDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []logExclusionDiff - // For each operation name, create a logExclusionDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := logExclusionDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToLogExclusionApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToLogExclusionApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (logExclusionApiOperation, error) { - switch opName { - - case "updateLogExclusionUpdateExclusionOperation": - return &updateLogExclusionUpdateExclusionOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractLogExclusionFields(r *LogExclusion) error { - return nil -} - -func postReadExtractLogExclusionFields(r *LogExclusion) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_schema.go deleted file mode 100644 index 46bc7f6a7c..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_schema.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLLogExclusionSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Logging/LogExclusion", - Description: "The Logging LogExclusion resource", - StructName: "LogExclusion", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a LogExclusion", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logExclusion", - Required: true, - Description: "A full instance of a LogExclusion", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a LogExclusion", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logExclusion", - Required: true, - Description: "A full instance of a LogExclusion", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a LogExclusion", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logExclusion", - Required: true, - Description: "A full instance of a LogExclusion", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all LogExclusion", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "parent", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many LogExclusion", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "parent", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "LogExclusion": &dcl.Component{ - Title: "LogExclusion", - ID: "{{parent}}/exclusions/{{name}}", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The creation timestamp of the exclusion. This field may not be present for older exclusions.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. A description of this exclusion.", - }, - "disabled": &dcl.Property{ - Type: "boolean", - GoName: "Disabled", - Description: "Optional. If set to True, then this exclusion is disabled and it does not exclude any log entries. You can update an exclusion to change the value of this field.", - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. An (https://cloud.google.com/logging/docs/view/advanced-queries#sample), you can exclude less than 100% of the matching log entries. For example, the following query matches 99% of low-severity log entries from Google Cloud Storage buckets: `\"resource.type=gcs_bucket severity", - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. A client-assigned identifier, such as `\"load-balancer-exclusion\"`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.", - Immutable: true, - }, - "parent": &dcl.Property{ - Type: "string", - GoName: "Parent", - Description: "The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\". Authorization requires the following IAM permission on the specified resource parent: logging.exclusions.create", - Immutable: true, - ForwardSlashAllowed: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Folder", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Organization", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/BillingAccount", - Field: "name", - Parent: true, - }, - }, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The last update timestamp of the exclusion. This field may not be present for older exclusions.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_yaml_embed.go deleted file mode 100644 index 20675c950d..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_exclusion_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package logging -var YAML_log_exclusion blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_exclusion.yaml - -package logging - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_exclusion.yaml -var YAML_log_exclusion = []byte("info:\n title: Logging/LogExclusion\n description: The Logging LogExclusion resource\n x-dcl-struct-name: LogExclusion\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a LogExclusion\n parameters:\n - name: logExclusion\n required: true\n description: A full instance of a LogExclusion\n apply:\n description: The function used to apply information about a LogExclusion\n parameters:\n - name: logExclusion\n required: true\n description: A full instance of a LogExclusion\n delete:\n description: The function used to delete a LogExclusion\n parameters:\n - name: logExclusion\n required: true\n description: A full instance of a LogExclusion\n deleteAll:\n description: The function used to delete all LogExclusion\n parameters:\n - name: parent\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many LogExclusion\n parameters:\n - name: parent\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n LogExclusion:\n title: LogExclusion\n x-dcl-id: '{{parent}}/exclusions/{{name}}'\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - filter\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of the exclusion. This\n field may not be present for older exclusions.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. A description of this exclusion.\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Optional. If set to True, then this exclusion is disabled and\n it does not exclude any log entries. You can update an exclusion to change\n the value of this field.\n filter:\n type: string\n x-dcl-go-name: Filter\n description: 'Required. An (https://cloud.google.com/logging/docs/view/advanced-queries#sample),\n you can exclude less than 100% of the matching log entries. For example,\n the following query matches 99% of low-severity log entries from Google\n Cloud Storage buckets: `\"resource.type=gcs_bucket severity'\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. A client-assigned identifier, such as `\"load-balancer-exclusion\"`.\n Identifiers are limited to 100 characters and can include only letters,\n digits, underscores, hyphens, and periods. First character has to be alphanumeric.\n x-kubernetes-immutable: true\n parent:\n type: string\n x-dcl-go-name: Parent\n description: 'The parent resource in which to create the exclusion: \"projects/[PROJECT_ID]\"\n \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\"\n \"folders/[FOLDER_ID]\" Examples: \"projects/my-logging-project\", \"organizations/123456789\".\n Authorization requires the following IAM permission on the specified resource\n parent: logging.exclusions.create'\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n - resource: Cloudresourcemanager/Folder\n field: name\n parent: true\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n - resource: Cloudresourcemanager/BillingAccount\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of the exclusion. This\n field may not be present for older exclusions.\n x-kubernetes-immutable: true\n") - -// 4285 bytes -// MD5: 0da2900589a5821fb6ceac1d8baf5fd0 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.go deleted file mode 100644 index 97d5e81001..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.go +++ /dev/null @@ -1,868 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type LogMetric struct { - Name *string `json:"name"` - Description *string `json:"description"` - Filter *string `json:"filter"` - Disabled *bool `json:"disabled"` - MetricDescriptor *LogMetricMetricDescriptor `json:"metricDescriptor"` - ValueExtractor *string `json:"valueExtractor"` - LabelExtractors map[string]string `json:"labelExtractors"` - BucketOptions *LogMetricBucketOptions `json:"bucketOptions"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - Project *string `json:"project"` -} - -func (r *LogMetric) String() string { - return dcl.SprintResource(r) -} - -// The enum LogMetricMetricDescriptorLabelsValueTypeEnum. -type LogMetricMetricDescriptorLabelsValueTypeEnum string - -// LogMetricMetricDescriptorLabelsValueTypeEnumRef returns a *LogMetricMetricDescriptorLabelsValueTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func LogMetricMetricDescriptorLabelsValueTypeEnumRef(s string) *LogMetricMetricDescriptorLabelsValueTypeEnum { - v := LogMetricMetricDescriptorLabelsValueTypeEnum(s) - return &v -} - -func (v LogMetricMetricDescriptorLabelsValueTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"STRING", "BOOL", "INT64", "DOUBLE", "DISTRIBUTION", "MONEY"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "LogMetricMetricDescriptorLabelsValueTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum LogMetricMetricDescriptorMetricKindEnum. -type LogMetricMetricDescriptorMetricKindEnum string - -// LogMetricMetricDescriptorMetricKindEnumRef returns a *LogMetricMetricDescriptorMetricKindEnum with the value of string s -// If the empty string is provided, nil is returned. -func LogMetricMetricDescriptorMetricKindEnumRef(s string) *LogMetricMetricDescriptorMetricKindEnum { - v := LogMetricMetricDescriptorMetricKindEnum(s) - return &v -} - -func (v LogMetricMetricDescriptorMetricKindEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"GAUGE", "DELTA", "CUMULATIVE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "LogMetricMetricDescriptorMetricKindEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum LogMetricMetricDescriptorValueTypeEnum. -type LogMetricMetricDescriptorValueTypeEnum string - -// LogMetricMetricDescriptorValueTypeEnumRef returns a *LogMetricMetricDescriptorValueTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func LogMetricMetricDescriptorValueTypeEnumRef(s string) *LogMetricMetricDescriptorValueTypeEnum { - v := LogMetricMetricDescriptorValueTypeEnum(s) - return &v -} - -func (v LogMetricMetricDescriptorValueTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"STRING", "BOOL", "INT64", "DOUBLE", "DISTRIBUTION", "MONEY"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "LogMetricMetricDescriptorValueTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum LogMetricMetricDescriptorLaunchStageEnum. -type LogMetricMetricDescriptorLaunchStageEnum string - -// LogMetricMetricDescriptorLaunchStageEnumRef returns a *LogMetricMetricDescriptorLaunchStageEnum with the value of string s -// If the empty string is provided, nil is returned. -func LogMetricMetricDescriptorLaunchStageEnumRef(s string) *LogMetricMetricDescriptorLaunchStageEnum { - v := LogMetricMetricDescriptorLaunchStageEnum(s) - return &v -} - -func (v LogMetricMetricDescriptorLaunchStageEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "LogMetricMetricDescriptorLaunchStageEnum", - Value: string(v), - Valid: []string{}, - } -} - -type LogMetricMetricDescriptor struct { - empty bool `json:"-"` - Name *string `json:"name"` - Type *string `json:"type"` - Labels []LogMetricMetricDescriptorLabels `json:"labels"` - MetricKind *LogMetricMetricDescriptorMetricKindEnum `json:"metricKind"` - ValueType *LogMetricMetricDescriptorValueTypeEnum `json:"valueType"` - Unit *string `json:"unit"` - Description *string `json:"description"` - DisplayName *string `json:"displayName"` - Metadata *LogMetricMetricDescriptorMetadata `json:"metadata"` - LaunchStage *LogMetricMetricDescriptorLaunchStageEnum `json:"launchStage"` - MonitoredResourceTypes []string `json:"monitoredResourceTypes"` -} - -type jsonLogMetricMetricDescriptor LogMetricMetricDescriptor - -func (r *LogMetricMetricDescriptor) UnmarshalJSON(data []byte) error { - var res jsonLogMetricMetricDescriptor - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyLogMetricMetricDescriptor - } else { - - r.Name = res.Name - - r.Type = res.Type - - r.Labels = res.Labels - - r.MetricKind = res.MetricKind - - r.ValueType = res.ValueType - - r.Unit = res.Unit - - r.Description = res.Description - - r.DisplayName = res.DisplayName - - r.Metadata = res.Metadata - - r.LaunchStage = res.LaunchStage - - r.MonitoredResourceTypes = res.MonitoredResourceTypes - - } - return nil -} - -// This object is used to assert a desired state where this LogMetricMetricDescriptor is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyLogMetricMetricDescriptor *LogMetricMetricDescriptor = &LogMetricMetricDescriptor{empty: true} - -func (r *LogMetricMetricDescriptor) Empty() bool { - return r.empty -} - -func (r *LogMetricMetricDescriptor) String() string { - return dcl.SprintResource(r) -} - -func (r *LogMetricMetricDescriptor) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type LogMetricMetricDescriptorLabels struct { - empty bool `json:"-"` - Key *string `json:"key"` - ValueType *LogMetricMetricDescriptorLabelsValueTypeEnum `json:"valueType"` - Description *string `json:"description"` -} - -type jsonLogMetricMetricDescriptorLabels LogMetricMetricDescriptorLabels - -func (r *LogMetricMetricDescriptorLabels) UnmarshalJSON(data []byte) error { - var res jsonLogMetricMetricDescriptorLabels - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyLogMetricMetricDescriptorLabels - } else { - - r.Key = res.Key - - r.ValueType = res.ValueType - - r.Description = res.Description - - } - return nil -} - -// This object is used to assert a desired state where this LogMetricMetricDescriptorLabels is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyLogMetricMetricDescriptorLabels *LogMetricMetricDescriptorLabels = &LogMetricMetricDescriptorLabels{empty: true} - -func (r *LogMetricMetricDescriptorLabels) Empty() bool { - return r.empty -} - -func (r *LogMetricMetricDescriptorLabels) String() string { - return dcl.SprintResource(r) -} - -func (r *LogMetricMetricDescriptorLabels) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type LogMetricMetricDescriptorMetadata struct { - empty bool `json:"-"` - SamplePeriod *string `json:"samplePeriod"` - IngestDelay *string `json:"ingestDelay"` -} - -type jsonLogMetricMetricDescriptorMetadata LogMetricMetricDescriptorMetadata - -func (r *LogMetricMetricDescriptorMetadata) UnmarshalJSON(data []byte) error { - var res jsonLogMetricMetricDescriptorMetadata - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyLogMetricMetricDescriptorMetadata - } else { - - r.SamplePeriod = res.SamplePeriod - - r.IngestDelay = res.IngestDelay - - } - return nil -} - -// This object is used to assert a desired state where this LogMetricMetricDescriptorMetadata is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyLogMetricMetricDescriptorMetadata *LogMetricMetricDescriptorMetadata = &LogMetricMetricDescriptorMetadata{empty: true} - -func (r *LogMetricMetricDescriptorMetadata) Empty() bool { - return r.empty -} - -func (r *LogMetricMetricDescriptorMetadata) String() string { - return dcl.SprintResource(r) -} - -func (r *LogMetricMetricDescriptorMetadata) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type LogMetricBucketOptions struct { - empty bool `json:"-"` - LinearBuckets *LogMetricBucketOptionsLinearBuckets `json:"linearBuckets"` - ExponentialBuckets *LogMetricBucketOptionsExponentialBuckets `json:"exponentialBuckets"` - ExplicitBuckets *LogMetricBucketOptionsExplicitBuckets `json:"explicitBuckets"` -} - -type jsonLogMetricBucketOptions LogMetricBucketOptions - -func (r *LogMetricBucketOptions) UnmarshalJSON(data []byte) error { - var res jsonLogMetricBucketOptions - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyLogMetricBucketOptions - } else { - - r.LinearBuckets = res.LinearBuckets - - r.ExponentialBuckets = res.ExponentialBuckets - - r.ExplicitBuckets = res.ExplicitBuckets - - } - return nil -} - -// This object is used to assert a desired state where this LogMetricBucketOptions is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyLogMetricBucketOptions *LogMetricBucketOptions = &LogMetricBucketOptions{empty: true} - -func (r *LogMetricBucketOptions) Empty() bool { - return r.empty -} - -func (r *LogMetricBucketOptions) String() string { - return dcl.SprintResource(r) -} - -func (r *LogMetricBucketOptions) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type LogMetricBucketOptionsLinearBuckets struct { - empty bool `json:"-"` - NumFiniteBuckets *int64 `json:"numFiniteBuckets"` - Width *float64 `json:"width"` - Offset *float64 `json:"offset"` -} - -type jsonLogMetricBucketOptionsLinearBuckets LogMetricBucketOptionsLinearBuckets - -func (r *LogMetricBucketOptionsLinearBuckets) UnmarshalJSON(data []byte) error { - var res jsonLogMetricBucketOptionsLinearBuckets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyLogMetricBucketOptionsLinearBuckets - } else { - - r.NumFiniteBuckets = res.NumFiniteBuckets - - r.Width = res.Width - - r.Offset = res.Offset - - } - return nil -} - -// This object is used to assert a desired state where this LogMetricBucketOptionsLinearBuckets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyLogMetricBucketOptionsLinearBuckets *LogMetricBucketOptionsLinearBuckets = &LogMetricBucketOptionsLinearBuckets{empty: true} - -func (r *LogMetricBucketOptionsLinearBuckets) Empty() bool { - return r.empty -} - -func (r *LogMetricBucketOptionsLinearBuckets) String() string { - return dcl.SprintResource(r) -} - -func (r *LogMetricBucketOptionsLinearBuckets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type LogMetricBucketOptionsExponentialBuckets struct { - empty bool `json:"-"` - NumFiniteBuckets *int64 `json:"numFiniteBuckets"` - GrowthFactor *float64 `json:"growthFactor"` - Scale *float64 `json:"scale"` -} - -type jsonLogMetricBucketOptionsExponentialBuckets LogMetricBucketOptionsExponentialBuckets - -func (r *LogMetricBucketOptionsExponentialBuckets) UnmarshalJSON(data []byte) error { - var res jsonLogMetricBucketOptionsExponentialBuckets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyLogMetricBucketOptionsExponentialBuckets - } else { - - r.NumFiniteBuckets = res.NumFiniteBuckets - - r.GrowthFactor = res.GrowthFactor - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this LogMetricBucketOptionsExponentialBuckets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyLogMetricBucketOptionsExponentialBuckets *LogMetricBucketOptionsExponentialBuckets = &LogMetricBucketOptionsExponentialBuckets{empty: true} - -func (r *LogMetricBucketOptionsExponentialBuckets) Empty() bool { - return r.empty -} - -func (r *LogMetricBucketOptionsExponentialBuckets) String() string { - return dcl.SprintResource(r) -} - -func (r *LogMetricBucketOptionsExponentialBuckets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type LogMetricBucketOptionsExplicitBuckets struct { - empty bool `json:"-"` - Bounds []float64 `json:"bounds"` -} - -type jsonLogMetricBucketOptionsExplicitBuckets LogMetricBucketOptionsExplicitBuckets - -func (r *LogMetricBucketOptionsExplicitBuckets) UnmarshalJSON(data []byte) error { - var res jsonLogMetricBucketOptionsExplicitBuckets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyLogMetricBucketOptionsExplicitBuckets - } else { - - r.Bounds = res.Bounds - - } - return nil -} - -// This object is used to assert a desired state where this LogMetricBucketOptionsExplicitBuckets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyLogMetricBucketOptionsExplicitBuckets *LogMetricBucketOptionsExplicitBuckets = &LogMetricBucketOptionsExplicitBuckets{empty: true} - -func (r *LogMetricBucketOptionsExplicitBuckets) Empty() bool { - return r.empty -} - -func (r *LogMetricBucketOptionsExplicitBuckets) String() string { - return dcl.SprintResource(r) -} - -func (r *LogMetricBucketOptionsExplicitBuckets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *LogMetric) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "logging", - Type: "LogMetric", - Version: "logging", - } -} - -func (r *LogMetric) ID() (string, error) { - if err := extractLogMetricFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "description": dcl.ValueOrEmptyString(nr.Description), - "filter": dcl.ValueOrEmptyString(nr.Filter), - "disabled": dcl.ValueOrEmptyString(nr.Disabled), - "metric_descriptor": dcl.ValueOrEmptyString(nr.MetricDescriptor), - "value_extractor": dcl.ValueOrEmptyString(nr.ValueExtractor), - "label_extractors": dcl.ValueOrEmptyString(nr.LabelExtractors), - "bucket_options": dcl.ValueOrEmptyString(nr.BucketOptions), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.Nprintf("projects/{{project}}/metrics/{{name}}", params), nil -} - -const LogMetricMaxPage = -1 - -type LogMetricList struct { - Items []*LogMetric - - nextToken string - - pageSize int32 - - resource *LogMetric -} - -func (l *LogMetricList) HasNext() bool { - return l.nextToken != "" -} - -func (l *LogMetricList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listLogMetric(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListLogMetric(ctx context.Context, project string) (*LogMetricList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListLogMetricWithMaxResults(ctx, project, LogMetricMaxPage) - -} - -func (c *Client) ListLogMetricWithMaxResults(ctx context.Context, project string, pageSize int32) (*LogMetricList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &LogMetric{ - Project: &project, - } - items, token, err := c.listLogMetric(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &LogMetricList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetLogMetric(ctx context.Context, r *LogMetric) (*LogMetric, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractLogMetricFields(r) - - b, err := c.getLogMetricRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalLogMetric(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeLogMetricNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractLogMetricFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteLogMetric(ctx context.Context, r *LogMetric) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("LogMetric resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting LogMetric...") - deleteOp := deleteLogMetricOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllLogMetric deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllLogMetric(ctx context.Context, project string, filter func(*LogMetric) bool) error { - listObj, err := c.ListLogMetric(ctx, project) - if err != nil { - return err - } - - err = c.deleteAllLogMetric(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllLogMetric(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyLogMetric(ctx context.Context, rawDesired *LogMetric, opts ...dcl.ApplyOption) (*LogMetric, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *LogMetric - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyLogMetricHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyLogMetricHelper(c *Client, ctx context.Context, rawDesired *LogMetric, opts ...dcl.ApplyOption) (*LogMetric, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyLogMetric...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractLogMetricFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.logMetricDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToLogMetricDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []logMetricApiOperation - if create { - ops = append(ops, &createLogMetricOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyLogMetricDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyLogMetricDiff(c *Client, ctx context.Context, desired *LogMetric, rawDesired *LogMetric, ops []logMetricApiOperation, opts ...dcl.ApplyOption) (*LogMetric, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetLogMetric(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createLogMetricOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapLogMetric(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeLogMetricNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeLogMetricNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeLogMetricDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractLogMetricFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractLogMetricFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffLogMetric(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.yaml deleted file mode 100644 index d959aa66db..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric.yaml +++ /dev/null @@ -1,452 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Logging/LogMetric - description: The Logging LogMetric resource - x-dcl-struct-name: LogMetric - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a LogMetric - parameters: - - name: logMetric - required: true - description: A full instance of a LogMetric - apply: - description: The function used to apply information about a LogMetric - parameters: - - name: logMetric - required: true - description: A full instance of a LogMetric - delete: - description: The function used to delete a LogMetric - parameters: - - name: logMetric - required: true - description: A full instance of a LogMetric - deleteAll: - description: The function used to delete all LogMetric - parameters: - - name: project - required: true - schema: - type: string - list: - description: The function used to list information about many LogMetric - parameters: - - name: project - required: true - schema: - type: string -components: - schemas: - LogMetric: - title: LogMetric - x-dcl-id: projects/{{project}}/metrics/{{name}} - x-dcl-uses-state-hint: true - x-dcl-parent-container: project - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - filter - - project - properties: - bucketOptions: - type: object - x-dcl-go-name: BucketOptions - x-dcl-go-type: LogMetricBucketOptions - description: Optional. The `bucket_options` are required when the logs-based - metric is using a DISTRIBUTION value type and it describes the bucket - boundaries used to create a histogram of the extracted values. - properties: - explicitBuckets: - type: object - x-dcl-go-name: ExplicitBuckets - x-dcl-go-type: LogMetricBucketOptionsExplicitBuckets - description: The explicit buckets. - x-dcl-conflicts: - - linearBuckets - - exponentialBuckets - properties: - bounds: - type: array - x-dcl-go-name: Bounds - description: The values must be monotonically increasing. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: number - format: double - x-dcl-go-type: float64 - exponentialBuckets: - type: object - x-dcl-go-name: ExponentialBuckets - x-dcl-go-type: LogMetricBucketOptionsExponentialBuckets - description: The exponential buckets. - x-dcl-conflicts: - - linearBuckets - - explicitBuckets - properties: - growthFactor: - type: number - format: double - x-dcl-go-name: GrowthFactor - description: Must be greater than 1. - numFiniteBuckets: - type: integer - format: int64 - x-dcl-go-name: NumFiniteBuckets - description: Must be greater than 0. - scale: - type: number - format: double - x-dcl-go-name: Scale - description: Must be greater than 0. - linearBuckets: - type: object - x-dcl-go-name: LinearBuckets - x-dcl-go-type: LogMetricBucketOptionsLinearBuckets - description: The linear bucket. - x-dcl-conflicts: - - exponentialBuckets - - explicitBuckets - properties: - numFiniteBuckets: - type: integer - format: int64 - x-dcl-go-name: NumFiniteBuckets - description: Must be greater than 0. - offset: - type: number - format: double - x-dcl-go-name: Offset - description: Lower bound of the first bucket. - width: - type: number - format: double - x-dcl-go-name: Width - description: Must be greater than 0. - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The creation timestamp of the metric. This field - may not be present for older metrics. - x-kubernetes-immutable: true - description: - type: string - x-dcl-go-name: Description - description: Optional. A description of this metric, which is used in documentation. - The maximum length of the description is 8000 characters. - disabled: - type: boolean - x-dcl-go-name: Disabled - description: Optional. If set to True, then this metric is disabled and - it does not generate any points. - filter: - type: string - x-dcl-go-name: Filter - description: 'Required. An [advanced logs filter](https://cloud.google.com/logging/docs/view/advanced_filters) - which is used to match log entries. Example: "resource.type=gae_app AND - severity>=ERROR" The maximum length of the filter is 20000 characters.' - labelExtractors: - type: object - additionalProperties: - type: string - x-dcl-go-name: LabelExtractors - description: Optional. A map from a label key string to an extractor expression - which is used to extract data from a log entry field and assign as the - label value. Each label key specified in the LabelDescriptor must have - an associated extractor expression in this map. The syntax of the extractor - expression is the same as for the `value_extractor` field. The extracted - value is converted to the type defined in the label descriptor. If the - either the extraction or the type conversion fails, the label will have - a default value. The default value for a string label is an empty string, - for an integer label its 0, and for a boolean label its `false`. Note - that there are upper bounds on the maximum number of labels and the number - of active time series that are allowed in a project. - metricDescriptor: - type: object - x-dcl-go-name: MetricDescriptor - x-dcl-go-type: LogMetricMetricDescriptor - description: Optional. The metric descriptor associated with the logs-based - metric. If unspecified, it uses a default metric descriptor with a DELTA - metric kind, INT64 value type, with no labels and a unit of "1". Such - a metric counts the number of log entries matching the `filter` expression. - The `name`, `type`, and `description` fields in the `metric_descriptor` - are output only, and is constructed using the `name` and `description` - field in the LogMetric. To create a logs-based metric that records a distribution - of log values, a DELTA metric kind with a DISTRIBUTION value type must - be used along with a `value_extractor` expression in the LogMetric. Each - label in the metric descriptor must have a matching label name as the - key and an extractor expression as the value in the `label_extractors` - map. The `metric_kind` and `value_type` fields in the `metric_descriptor` - cannot be updated once initially configured. New labels can be added in - the `metric_descriptor`, but existing labels cannot be modified except - for their description. - properties: - description: - type: string - x-dcl-go-name: Description - readOnly: true - description: A detailed description of the metric, which can be used - in documentation. - displayName: - type: string - x-dcl-go-name: DisplayName - description: A concise name for the metric, which can be displayed in - user interfaces. Use sentence case without an ending period, for example - "Request count". This field is optional but it is recommended to be - set for any metrics associated with user-visible concepts, such as - Quota. - labels: - type: array - x-dcl-go-name: Labels - description: The set of labels that can be used to describe a specific - instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies` - metric type has a label for the HTTP response code, `response_code`, - so you can look at latencies for successful responses or just for - responses that failed. - x-dcl-send-empty: true - x-dcl-list-type: set - items: - type: object - x-dcl-go-type: LogMetricMetricDescriptorLabels - properties: - description: - type: string - x-dcl-go-name: Description - description: A human-readable description for the label. - x-kubernetes-immutable: true - key: - type: string - x-dcl-go-name: Key - description: The label key. - x-kubernetes-immutable: true - valueType: - type: string - x-dcl-go-name: ValueType - x-dcl-go-type: LogMetricMetricDescriptorLabelsValueTypeEnum - description: 'The type of data that can be assigned to the label. - Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION, - MONEY' - x-kubernetes-immutable: true - enum: - - STRING - - BOOL - - INT64 - - DOUBLE - - DISTRIBUTION - - MONEY - launchStage: - type: string - x-dcl-go-name: LaunchStage - x-dcl-go-type: LogMetricMetricDescriptorLaunchStageEnum - description: 'Optional. The launch stage of the metric definition. Possible - values: UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED' - enum: - - UNIMPLEMENTED - - PRELAUNCH - - EARLY_ACCESS - - ALPHA - - BETA - - GA - - DEPRECATED - x-dcl-mutable-unreadable: true - metadata: - type: object - x-dcl-go-name: Metadata - x-dcl-go-type: LogMetricMetricDescriptorMetadata - description: Optional. Metadata which can be used to guide usage of - the metric. - x-dcl-mutable-unreadable: true - properties: - ingestDelay: - type: string - x-dcl-go-name: IngestDelay - description: The delay of data points caused by ingestion. Data - points older than this age are guaranteed to be ingested and available - to be read, excluding data loss due to errors. - samplePeriod: - type: string - x-dcl-go-name: SamplePeriod - description: The sampling period of metric data points. For metrics - which are written periodically, consecutive data points are stored - at this time interval, excluding data loss due to errors. Metrics - with a higher granularity have a smaller sampling period. - metricKind: - type: string - x-dcl-go-name: MetricKind - x-dcl-go-type: LogMetricMetricDescriptorMetricKindEnum - description: 'Whether the metric records instantaneous values, changes - to a value, etc. Some combinations of `metric_kind` and `value_type` - might not be supported. Possible values: GAUGE, DELTA, CUMULATIVE' - x-kubernetes-immutable: true - enum: - - GAUGE - - DELTA - - CUMULATIVE - monitoredResourceTypes: - type: array - x-dcl-go-name: MonitoredResourceTypes - readOnly: true - description: Read-only. If present, then a time series, which is identified - partially by a metric type and a MonitoredResourceDescriptor, that - is associated with this metric type can only be associated with one - of the monitored resource types listed here. - x-kubernetes-immutable: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - name: - type: string - x-dcl-go-name: Name - readOnly: true - description: The resource name of the metric descriptor. - x-kubernetes-immutable: true - type: - type: string - x-dcl-go-name: Type - readOnly: true - description: 'The metric type, including its DNS name prefix. The type - is not URL-encoded. All user-defined metric types have the DNS name - `custom.googleapis.com` or `external.googleapis.com`. Metric types - should use a natural hierarchical grouping. For example: "custom.googleapis.com/invoice/paid/amount" - "external.googleapis.com/prometheus/up" "appengine.googleapis.com/http/server/response_latencies"' - x-kubernetes-immutable: true - unit: - type: string - x-dcl-go-name: Unit - description: 'The units in which the metric value is reported. It is - only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. - The `unit` defines the representation of the stored metric values. - Different systems might scale the values to be more easily displayed - (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value - of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` - is `kBy`, then the value of the metric is always in thousands of bytes, - no matter how it might be displayed. If you want a custom metric to - record the exact number of CPU-seconds used by a job, you can create - an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently - `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the - value is written as `12005`. Alternatively, if you want a custom metric - to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` - metric whose `unit` is `ks{CPU}`, and then write the value `12.005` - (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which - is `12005/1024`). The supported units are a subset of [The Unified - Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: - **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` - minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** - * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) - * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta - (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) - * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` - zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi - (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** - The grammar also includes these connectors: * `/` division or ratio - (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` - (although you should almost never have `/s` in a metric `unit`; rates - should always be computed at query time from the underlying cumulative - or delta value). * `.` multiplication or composition (as an infix - operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a - unit is as follows: Expression = Component: { "." Component } { "/" - Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` - is just a comment if it follows a `UNIT`. If the annotation is used - alone, then the unit is equivalent to `1`. For examples, `{request}/s - == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank - printable ASCII characters not containing `{` or `}`. * `1` represents - a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) - of 1, such as in `1/s`. It is typically used when none of the basic - units are appropriate. For example, "new users per day" can be represented - as `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 - new users). Alternatively, "thousands of page views per day" would - be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric - value of `5.3` would mean "5300 page views per day"). * `%` represents - dimensionless value of 1/100, and annotates values giving a percentage - (so the metric values are typically in the range of 0..100, and a - metric value `3` means "3 percent"). * `10^2.%` indicates a metric - contains a ratio, typically in the range 0..1, that will be multiplied - by 100 and displayed as a percentage (so a metric value `0.03` means - "3 percent").' - x-dcl-server-default: true - valueType: - type: string - x-dcl-go-name: ValueType - x-dcl-go-type: LogMetricMetricDescriptorValueTypeEnum - description: 'Whether the measurement is an integer, a floating-point - number, etc. Some combinations of `metric_kind` and `value_type` might - not be supported. Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION, - MONEY' - x-kubernetes-immutable: true - enum: - - STRING - - BOOL - - INT64 - - DOUBLE - - DISTRIBUTION - - MONEY - name: - type: string - x-dcl-go-name: Name - description: 'Required. The client-assigned metric identifier. Examples: - `"error_count"`, `"nginx/requests"`. Metric identifiers are limited to - 100 characters and can include only the following characters: `A-Z`, `a-z`, - `0-9`, and the special characters `_-.,+!*'',()%/`. The forward-slash - character (`/`) denotes a hierarchy of name pieces, and it cannot be the - first character of the name. The metric identifier in this field must - not be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding). - However, when the metric identifier appears as the `[METRIC_ID]` part - of a `metric_name` API parameter, then the metric identifier must be URL-encoded. - Example: `"projects/my-project/metrics/nginx%2Frequests"`.' - x-kubernetes-immutable: true - project: - type: string - x-dcl-go-name: Project - description: The resource name of the project in which to create the metric. - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - updateTime: - type: string - format: date-time - x-dcl-go-name: UpdateTime - readOnly: true - description: Output only. The last update timestamp of the metric. This - field may not be present for older metrics. - x-kubernetes-immutable: true - valueExtractor: - type: string - x-dcl-go-name: ValueExtractor - description: 'Optional. A `value_extractor` is required when using a distribution - logs-based metric to extract the values to record from a log entry. Two - functions are supported for value extraction: `EXTRACT(field)` or `REGEXP_EXTRACT(field, - regex)`. The argument are: 1. field: The name of the log entry field from - which the value is to be extracted. 2. regex: A regular expression using - the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with - a single capture group to extract data from the specified log entry field. - The value of the field is converted to a string before applying the regex. - It is an error to specify a regex that does not include exactly one capture - group. The result of the extraction must be convertible to a double type, - as the distribution always records double values. If either the extraction - or the conversion to double fails, then those values are not recorded - in the distribution. Example: `REGEXP_EXTRACT(jsonPayload.request, ".*quantity=(d+).*")`' diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_internal.go deleted file mode 100644 index 991e5f6d14..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_internal.go +++ /dev/null @@ -1,3434 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *LogMetric) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.MetricDescriptor) { - if err := r.MetricDescriptor.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.BucketOptions) { - if err := r.BucketOptions.validate(); err != nil { - return err - } - } - return nil -} -func (r *LogMetricMetricDescriptor) validate() error { - if !dcl.IsEmptyValueIndirect(r.Metadata) { - if err := r.Metadata.validate(); err != nil { - return err - } - } - return nil -} -func (r *LogMetricMetricDescriptorLabels) validate() error { - return nil -} -func (r *LogMetricMetricDescriptorMetadata) validate() error { - return nil -} -func (r *LogMetricBucketOptions) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"LinearBuckets", "ExponentialBuckets", "ExplicitBuckets"}, r.LinearBuckets, r.ExponentialBuckets, r.ExplicitBuckets); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.LinearBuckets) { - if err := r.LinearBuckets.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ExponentialBuckets) { - if err := r.ExponentialBuckets.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ExplicitBuckets) { - if err := r.ExplicitBuckets.validate(); err != nil { - return err - } - } - return nil -} -func (r *LogMetricBucketOptionsLinearBuckets) validate() error { - return nil -} -func (r *LogMetricBucketOptionsExponentialBuckets) validate() error { - return nil -} -func (r *LogMetricBucketOptionsExplicitBuckets) validate() error { - return nil -} -func (r *LogMetric) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://logging.googleapis.com/v2/", params) -} - -func (r *LogMetric) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/metrics/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *LogMetric) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/metrics", nr.basePath(), userBasePath, params), nil - -} - -func (r *LogMetric) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/metrics", nr.basePath(), userBasePath, params), nil - -} - -func (r *LogMetric) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/metrics/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// logMetricApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type logMetricApiOperation interface { - do(context.Context, *LogMetric, *Client) error -} - -// newUpdateLogMetricUpdateRequest creates a request for an -// LogMetric resource's update update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateLogMetricUpdateRequest(ctx context.Context, f *LogMetric, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - req["filter"] = v - } - if v := f.Disabled; !dcl.IsEmptyValueIndirect(v) { - req["disabled"] = v - } - if v, err := expandLogMetricMetricDescriptor(c, f.MetricDescriptor, res); err != nil { - return nil, fmt.Errorf("error expanding MetricDescriptor into metricDescriptor: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["metricDescriptor"] = v - } - if v := f.ValueExtractor; !dcl.IsEmptyValueIndirect(v) { - req["valueExtractor"] = v - } - if v := f.LabelExtractors; !dcl.IsEmptyValueIndirect(v) { - req["labelExtractors"] = v - } - if v, err := expandLogMetricBucketOptions(c, f.BucketOptions, res); err != nil { - return nil, fmt.Errorf("error expanding BucketOptions into bucketOptions: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["bucketOptions"] = v - } - return req, nil -} - -// marshalUpdateLogMetricUpdateRequest converts the update into -// the final JSON request body. -func marshalUpdateLogMetricUpdateRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateLogMetricUpdateOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateLogMetricUpdateOperation) do(ctx context.Context, r *LogMetric, c *Client) error { - _, err := c.GetLogMetric(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "update") - if err != nil { - return err - } - - req, err := newUpdateLogMetricUpdateRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateLogMetricUpdateRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PUT", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listLogMetricRaw(ctx context.Context, r *LogMetric, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != LogMetricMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listLogMetricOperation struct { - Metrics []map[string]interface{} `json:"metrics"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listLogMetric(ctx context.Context, r *LogMetric, pageToken string, pageSize int32) ([]*LogMetric, string, error) { - b, err := c.listLogMetricRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listLogMetricOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*LogMetric - for _, v := range m.Metrics { - res, err := unmarshalMapLogMetric(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllLogMetric(ctx context.Context, f func(*LogMetric) bool, resources []*LogMetric) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteLogMetric(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteLogMetricOperation struct{} - -func (op *deleteLogMetricOperation) do(ctx context.Context, r *LogMetric, c *Client) error { - r, err := c.GetLogMetric(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "LogMetric not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetLogMetric checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete LogMetric: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetLogMetric(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createLogMetricOperation struct { - response map[string]interface{} -} - -func (op *createLogMetricOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createLogMetricOperation) do(ctx context.Context, r *LogMetric, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - if _, err := c.GetLogMetric(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getLogMetricRaw(ctx context.Context, r *LogMetric) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) logMetricDiffsForRawDesired(ctx context.Context, rawDesired *LogMetric, opts ...dcl.ApplyOption) (initial, desired *LogMetric, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *LogMetric - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*LogMetric); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected LogMetric, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetLogMetric(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a LogMetric resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve LogMetric resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that LogMetric resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeLogMetricDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for LogMetric: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for LogMetric: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractLogMetricFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeLogMetricInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for LogMetric: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeLogMetricDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for LogMetric: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffLogMetric(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeLogMetricInitialState(rawInitial, rawDesired *LogMetric) (*LogMetric, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeLogMetricDesiredState(rawDesired, rawInitial *LogMetric, opts ...dcl.ApplyOption) (*LogMetric, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.MetricDescriptor = canonicalizeLogMetricMetricDescriptor(rawDesired.MetricDescriptor, nil, opts...) - rawDesired.BucketOptions = canonicalizeLogMetricBucketOptions(rawDesired.BucketOptions, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &LogMetric{} - if dcl.StringCanonicalize(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.StringCanonicalize(rawDesired.Filter, rawInitial.Filter) { - canonicalDesired.Filter = rawInitial.Filter - } else { - canonicalDesired.Filter = rawDesired.Filter - } - if dcl.BoolCanonicalize(rawDesired.Disabled, rawInitial.Disabled) { - canonicalDesired.Disabled = rawInitial.Disabled - } else { - canonicalDesired.Disabled = rawDesired.Disabled - } - canonicalDesired.MetricDescriptor = canonicalizeLogMetricMetricDescriptor(rawDesired.MetricDescriptor, rawInitial.MetricDescriptor, opts...) - if dcl.StringCanonicalize(rawDesired.ValueExtractor, rawInitial.ValueExtractor) { - canonicalDesired.ValueExtractor = rawInitial.ValueExtractor - } else { - canonicalDesired.ValueExtractor = rawDesired.ValueExtractor - } - if dcl.IsZeroValue(rawDesired.LabelExtractors) || (dcl.IsEmptyValueIndirect(rawDesired.LabelExtractors) && dcl.IsEmptyValueIndirect(rawInitial.LabelExtractors)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.LabelExtractors = rawInitial.LabelExtractors - } else { - canonicalDesired.LabelExtractors = rawDesired.LabelExtractors - } - canonicalDesired.BucketOptions = canonicalizeLogMetricBucketOptions(rawDesired.BucketOptions, rawInitial.BucketOptions, opts...) - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - return canonicalDesired, nil -} - -func canonicalizeLogMetricNewState(c *Client, rawNew, rawDesired *LogMetric) (*LogMetric, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.StringCanonicalize(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Filter) && dcl.IsEmptyValueIndirect(rawDesired.Filter) { - rawNew.Filter = rawDesired.Filter - } else { - if dcl.StringCanonicalize(rawDesired.Filter, rawNew.Filter) { - rawNew.Filter = rawDesired.Filter - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Disabled) && dcl.IsEmptyValueIndirect(rawDesired.Disabled) { - rawNew.Disabled = rawDesired.Disabled - } else { - if dcl.BoolCanonicalize(rawDesired.Disabled, rawNew.Disabled) { - rawNew.Disabled = rawDesired.Disabled - } - } - - if dcl.IsEmptyValueIndirect(rawNew.MetricDescriptor) && dcl.IsEmptyValueIndirect(rawDesired.MetricDescriptor) { - rawNew.MetricDescriptor = rawDesired.MetricDescriptor - } else { - rawNew.MetricDescriptor = canonicalizeNewLogMetricMetricDescriptor(c, rawDesired.MetricDescriptor, rawNew.MetricDescriptor) - } - - if dcl.IsEmptyValueIndirect(rawNew.ValueExtractor) && dcl.IsEmptyValueIndirect(rawDesired.ValueExtractor) { - rawNew.ValueExtractor = rawDesired.ValueExtractor - } else { - if dcl.StringCanonicalize(rawDesired.ValueExtractor, rawNew.ValueExtractor) { - rawNew.ValueExtractor = rawDesired.ValueExtractor - } - } - - if dcl.IsEmptyValueIndirect(rawNew.LabelExtractors) && dcl.IsEmptyValueIndirect(rawDesired.LabelExtractors) { - rawNew.LabelExtractors = rawDesired.LabelExtractors - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.BucketOptions) && dcl.IsEmptyValueIndirect(rawDesired.BucketOptions) { - rawNew.BucketOptions = rawDesired.BucketOptions - } else { - rawNew.BucketOptions = canonicalizeNewLogMetricBucketOptions(c, rawDesired.BucketOptions, rawNew.BucketOptions) - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - rawNew.Project = rawDesired.Project - - return rawNew, nil -} - -func canonicalizeLogMetricMetricDescriptor(des, initial *LogMetricMetricDescriptor, opts ...dcl.ApplyOption) *LogMetricMetricDescriptor { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &LogMetricMetricDescriptor{} - - cDes.Labels = canonicalizeLogMetricMetricDescriptorLabelsSlice(des.Labels, initial.Labels, opts...) - if dcl.IsZeroValue(des.MetricKind) || (dcl.IsEmptyValueIndirect(des.MetricKind) && dcl.IsEmptyValueIndirect(initial.MetricKind)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.MetricKind = initial.MetricKind - } else { - cDes.MetricKind = des.MetricKind - } - if canonicalizeLogMetricMetricDescriptorValueType(des.ValueType, initial.ValueType) || dcl.IsZeroValue(des.ValueType) { - cDes.ValueType = initial.ValueType - } else { - cDes.ValueType = des.ValueType - } - if dcl.StringCanonicalize(des.Unit, initial.Unit) || dcl.IsZeroValue(des.Unit) { - cDes.Unit = initial.Unit - } else { - cDes.Unit = des.Unit - } - if dcl.StringCanonicalize(des.DisplayName, initial.DisplayName) || dcl.IsZeroValue(des.DisplayName) { - cDes.DisplayName = initial.DisplayName - } else { - cDes.DisplayName = des.DisplayName - } - cDes.Metadata = canonicalizeLogMetricMetricDescriptorMetadata(des.Metadata, initial.Metadata, opts...) - if dcl.IsZeroValue(des.LaunchStage) || (dcl.IsEmptyValueIndirect(des.LaunchStage) && dcl.IsEmptyValueIndirect(initial.LaunchStage)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.LaunchStage = initial.LaunchStage - } else { - cDes.LaunchStage = des.LaunchStage - } - - return cDes -} - -func canonicalizeLogMetricMetricDescriptorSlice(des, initial []LogMetricMetricDescriptor, opts ...dcl.ApplyOption) []LogMetricMetricDescriptor { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]LogMetricMetricDescriptor, 0, len(des)) - for _, d := range des { - cd := canonicalizeLogMetricMetricDescriptor(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]LogMetricMetricDescriptor, 0, len(des)) - for i, d := range des { - cd := canonicalizeLogMetricMetricDescriptor(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewLogMetricMetricDescriptor(c *Client, des, nw *LogMetricMetricDescriptor) *LogMetricMetricDescriptor { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for LogMetricMetricDescriptor while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Name, nw.Name) { - nw.Name = des.Name - } - if dcl.StringCanonicalize(des.Type, nw.Type) { - nw.Type = des.Type - } - nw.Labels = canonicalizeNewLogMetricMetricDescriptorLabelsSet(c, des.Labels, nw.Labels) - if canonicalizeLogMetricMetricDescriptorValueType(des.ValueType, nw.ValueType) { - nw.ValueType = des.ValueType - } - if dcl.StringCanonicalize(des.Unit, nw.Unit) { - nw.Unit = des.Unit - } - if dcl.StringCanonicalize(des.Description, nw.Description) { - nw.Description = des.Description - } - if dcl.StringCanonicalize(des.DisplayName, nw.DisplayName) { - nw.DisplayName = des.DisplayName - } - nw.Metadata = des.Metadata - nw.LaunchStage = des.LaunchStage - if dcl.StringArrayCanonicalize(des.MonitoredResourceTypes, nw.MonitoredResourceTypes) { - nw.MonitoredResourceTypes = des.MonitoredResourceTypes - } - - return nw -} - -func canonicalizeNewLogMetricMetricDescriptorSet(c *Client, des, nw []LogMetricMetricDescriptor) []LogMetricMetricDescriptor { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []LogMetricMetricDescriptor - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareLogMetricMetricDescriptorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewLogMetricMetricDescriptor(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewLogMetricMetricDescriptorSlice(c *Client, des, nw []LogMetricMetricDescriptor) []LogMetricMetricDescriptor { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []LogMetricMetricDescriptor - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewLogMetricMetricDescriptor(c, &d, &n)) - } - - return items -} - -func canonicalizeLogMetricMetricDescriptorLabels(des, initial *LogMetricMetricDescriptorLabels, opts ...dcl.ApplyOption) *LogMetricMetricDescriptorLabels { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &LogMetricMetricDescriptorLabels{} - - if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { - cDes.Key = initial.Key - } else { - cDes.Key = des.Key - } - if canonicalizeLogMetricMetricDescriptorLabelsValueType(des.ValueType, initial.ValueType) || dcl.IsZeroValue(des.ValueType) { - cDes.ValueType = initial.ValueType - } else { - cDes.ValueType = des.ValueType - } - if dcl.StringCanonicalize(des.Description, initial.Description) || dcl.IsZeroValue(des.Description) { - cDes.Description = initial.Description - } else { - cDes.Description = des.Description - } - - return cDes -} - -func canonicalizeLogMetricMetricDescriptorLabelsSlice(des, initial []LogMetricMetricDescriptorLabels, opts ...dcl.ApplyOption) []LogMetricMetricDescriptorLabels { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]LogMetricMetricDescriptorLabels, 0, len(des)) - for _, d := range des { - cd := canonicalizeLogMetricMetricDescriptorLabels(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]LogMetricMetricDescriptorLabels, 0, len(des)) - for i, d := range des { - cd := canonicalizeLogMetricMetricDescriptorLabels(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewLogMetricMetricDescriptorLabels(c *Client, des, nw *LogMetricMetricDescriptorLabels) *LogMetricMetricDescriptorLabels { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for LogMetricMetricDescriptorLabels while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Key, nw.Key) { - nw.Key = des.Key - } - if canonicalizeLogMetricMetricDescriptorLabelsValueType(des.ValueType, nw.ValueType) { - nw.ValueType = des.ValueType - } - if dcl.StringCanonicalize(des.Description, nw.Description) { - nw.Description = des.Description - } - - return nw -} - -func canonicalizeNewLogMetricMetricDescriptorLabelsSet(c *Client, des, nw []LogMetricMetricDescriptorLabels) []LogMetricMetricDescriptorLabels { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []LogMetricMetricDescriptorLabels - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareLogMetricMetricDescriptorLabelsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewLogMetricMetricDescriptorLabels(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewLogMetricMetricDescriptorLabelsSlice(c *Client, des, nw []LogMetricMetricDescriptorLabels) []LogMetricMetricDescriptorLabels { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []LogMetricMetricDescriptorLabels - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewLogMetricMetricDescriptorLabels(c, &d, &n)) - } - - return items -} - -func canonicalizeLogMetricMetricDescriptorMetadata(des, initial *LogMetricMetricDescriptorMetadata, opts ...dcl.ApplyOption) *LogMetricMetricDescriptorMetadata { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &LogMetricMetricDescriptorMetadata{} - - if dcl.StringCanonicalize(des.SamplePeriod, initial.SamplePeriod) || dcl.IsZeroValue(des.SamplePeriod) { - cDes.SamplePeriod = initial.SamplePeriod - } else { - cDes.SamplePeriod = des.SamplePeriod - } - if dcl.StringCanonicalize(des.IngestDelay, initial.IngestDelay) || dcl.IsZeroValue(des.IngestDelay) { - cDes.IngestDelay = initial.IngestDelay - } else { - cDes.IngestDelay = des.IngestDelay - } - - return cDes -} - -func canonicalizeLogMetricMetricDescriptorMetadataSlice(des, initial []LogMetricMetricDescriptorMetadata, opts ...dcl.ApplyOption) []LogMetricMetricDescriptorMetadata { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]LogMetricMetricDescriptorMetadata, 0, len(des)) - for _, d := range des { - cd := canonicalizeLogMetricMetricDescriptorMetadata(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]LogMetricMetricDescriptorMetadata, 0, len(des)) - for i, d := range des { - cd := canonicalizeLogMetricMetricDescriptorMetadata(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewLogMetricMetricDescriptorMetadata(c *Client, des, nw *LogMetricMetricDescriptorMetadata) *LogMetricMetricDescriptorMetadata { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for LogMetricMetricDescriptorMetadata while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.SamplePeriod, nw.SamplePeriod) { - nw.SamplePeriod = des.SamplePeriod - } - if dcl.StringCanonicalize(des.IngestDelay, nw.IngestDelay) { - nw.IngestDelay = des.IngestDelay - } - - return nw -} - -func canonicalizeNewLogMetricMetricDescriptorMetadataSet(c *Client, des, nw []LogMetricMetricDescriptorMetadata) []LogMetricMetricDescriptorMetadata { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []LogMetricMetricDescriptorMetadata - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareLogMetricMetricDescriptorMetadataNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewLogMetricMetricDescriptorMetadata(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewLogMetricMetricDescriptorMetadataSlice(c *Client, des, nw []LogMetricMetricDescriptorMetadata) []LogMetricMetricDescriptorMetadata { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []LogMetricMetricDescriptorMetadata - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewLogMetricMetricDescriptorMetadata(c, &d, &n)) - } - - return items -} - -func canonicalizeLogMetricBucketOptions(des, initial *LogMetricBucketOptions, opts ...dcl.ApplyOption) *LogMetricBucketOptions { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.LinearBuckets != nil || (initial != nil && initial.LinearBuckets != nil) { - // Check if anything else is set. - if dcl.AnySet(des.ExponentialBuckets, des.ExplicitBuckets) { - des.LinearBuckets = nil - if initial != nil { - initial.LinearBuckets = nil - } - } - } - - if des.ExponentialBuckets != nil || (initial != nil && initial.ExponentialBuckets != nil) { - // Check if anything else is set. - if dcl.AnySet(des.LinearBuckets, des.ExplicitBuckets) { - des.ExponentialBuckets = nil - if initial != nil { - initial.ExponentialBuckets = nil - } - } - } - - if des.ExplicitBuckets != nil || (initial != nil && initial.ExplicitBuckets != nil) { - // Check if anything else is set. - if dcl.AnySet(des.LinearBuckets, des.ExponentialBuckets) { - des.ExplicitBuckets = nil - if initial != nil { - initial.ExplicitBuckets = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &LogMetricBucketOptions{} - - cDes.LinearBuckets = canonicalizeLogMetricBucketOptionsLinearBuckets(des.LinearBuckets, initial.LinearBuckets, opts...) - cDes.ExponentialBuckets = canonicalizeLogMetricBucketOptionsExponentialBuckets(des.ExponentialBuckets, initial.ExponentialBuckets, opts...) - cDes.ExplicitBuckets = canonicalizeLogMetricBucketOptionsExplicitBuckets(des.ExplicitBuckets, initial.ExplicitBuckets, opts...) - - return cDes -} - -func canonicalizeLogMetricBucketOptionsSlice(des, initial []LogMetricBucketOptions, opts ...dcl.ApplyOption) []LogMetricBucketOptions { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]LogMetricBucketOptions, 0, len(des)) - for _, d := range des { - cd := canonicalizeLogMetricBucketOptions(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]LogMetricBucketOptions, 0, len(des)) - for i, d := range des { - cd := canonicalizeLogMetricBucketOptions(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewLogMetricBucketOptions(c *Client, des, nw *LogMetricBucketOptions) *LogMetricBucketOptions { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for LogMetricBucketOptions while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.LinearBuckets = canonicalizeNewLogMetricBucketOptionsLinearBuckets(c, des.LinearBuckets, nw.LinearBuckets) - nw.ExponentialBuckets = canonicalizeNewLogMetricBucketOptionsExponentialBuckets(c, des.ExponentialBuckets, nw.ExponentialBuckets) - nw.ExplicitBuckets = canonicalizeNewLogMetricBucketOptionsExplicitBuckets(c, des.ExplicitBuckets, nw.ExplicitBuckets) - - return nw -} - -func canonicalizeNewLogMetricBucketOptionsSet(c *Client, des, nw []LogMetricBucketOptions) []LogMetricBucketOptions { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []LogMetricBucketOptions - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareLogMetricBucketOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewLogMetricBucketOptions(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewLogMetricBucketOptionsSlice(c *Client, des, nw []LogMetricBucketOptions) []LogMetricBucketOptions { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []LogMetricBucketOptions - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewLogMetricBucketOptions(c, &d, &n)) - } - - return items -} - -func canonicalizeLogMetricBucketOptionsLinearBuckets(des, initial *LogMetricBucketOptionsLinearBuckets, opts ...dcl.ApplyOption) *LogMetricBucketOptionsLinearBuckets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &LogMetricBucketOptionsLinearBuckets{} - - if dcl.IsZeroValue(des.NumFiniteBuckets) || (dcl.IsEmptyValueIndirect(des.NumFiniteBuckets) && dcl.IsEmptyValueIndirect(initial.NumFiniteBuckets)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumFiniteBuckets = initial.NumFiniteBuckets - } else { - cDes.NumFiniteBuckets = des.NumFiniteBuckets - } - if dcl.IsZeroValue(des.Width) || (dcl.IsEmptyValueIndirect(des.Width) && dcl.IsEmptyValueIndirect(initial.Width)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Width = initial.Width - } else { - cDes.Width = des.Width - } - if dcl.IsZeroValue(des.Offset) || (dcl.IsEmptyValueIndirect(des.Offset) && dcl.IsEmptyValueIndirect(initial.Offset)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Offset = initial.Offset - } else { - cDes.Offset = des.Offset - } - - return cDes -} - -func canonicalizeLogMetricBucketOptionsLinearBucketsSlice(des, initial []LogMetricBucketOptionsLinearBuckets, opts ...dcl.ApplyOption) []LogMetricBucketOptionsLinearBuckets { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]LogMetricBucketOptionsLinearBuckets, 0, len(des)) - for _, d := range des { - cd := canonicalizeLogMetricBucketOptionsLinearBuckets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]LogMetricBucketOptionsLinearBuckets, 0, len(des)) - for i, d := range des { - cd := canonicalizeLogMetricBucketOptionsLinearBuckets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewLogMetricBucketOptionsLinearBuckets(c *Client, des, nw *LogMetricBucketOptionsLinearBuckets) *LogMetricBucketOptionsLinearBuckets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for LogMetricBucketOptionsLinearBuckets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewLogMetricBucketOptionsLinearBucketsSet(c *Client, des, nw []LogMetricBucketOptionsLinearBuckets) []LogMetricBucketOptionsLinearBuckets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []LogMetricBucketOptionsLinearBuckets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareLogMetricBucketOptionsLinearBucketsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewLogMetricBucketOptionsLinearBuckets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewLogMetricBucketOptionsLinearBucketsSlice(c *Client, des, nw []LogMetricBucketOptionsLinearBuckets) []LogMetricBucketOptionsLinearBuckets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []LogMetricBucketOptionsLinearBuckets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewLogMetricBucketOptionsLinearBuckets(c, &d, &n)) - } - - return items -} - -func canonicalizeLogMetricBucketOptionsExponentialBuckets(des, initial *LogMetricBucketOptionsExponentialBuckets, opts ...dcl.ApplyOption) *LogMetricBucketOptionsExponentialBuckets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &LogMetricBucketOptionsExponentialBuckets{} - - if dcl.IsZeroValue(des.NumFiniteBuckets) || (dcl.IsEmptyValueIndirect(des.NumFiniteBuckets) && dcl.IsEmptyValueIndirect(initial.NumFiniteBuckets)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumFiniteBuckets = initial.NumFiniteBuckets - } else { - cDes.NumFiniteBuckets = des.NumFiniteBuckets - } - if dcl.IsZeroValue(des.GrowthFactor) || (dcl.IsEmptyValueIndirect(des.GrowthFactor) && dcl.IsEmptyValueIndirect(initial.GrowthFactor)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.GrowthFactor = initial.GrowthFactor - } else { - cDes.GrowthFactor = des.GrowthFactor - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeLogMetricBucketOptionsExponentialBucketsSlice(des, initial []LogMetricBucketOptionsExponentialBuckets, opts ...dcl.ApplyOption) []LogMetricBucketOptionsExponentialBuckets { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]LogMetricBucketOptionsExponentialBuckets, 0, len(des)) - for _, d := range des { - cd := canonicalizeLogMetricBucketOptionsExponentialBuckets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]LogMetricBucketOptionsExponentialBuckets, 0, len(des)) - for i, d := range des { - cd := canonicalizeLogMetricBucketOptionsExponentialBuckets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewLogMetricBucketOptionsExponentialBuckets(c *Client, des, nw *LogMetricBucketOptionsExponentialBuckets) *LogMetricBucketOptionsExponentialBuckets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for LogMetricBucketOptionsExponentialBuckets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewLogMetricBucketOptionsExponentialBucketsSet(c *Client, des, nw []LogMetricBucketOptionsExponentialBuckets) []LogMetricBucketOptionsExponentialBuckets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []LogMetricBucketOptionsExponentialBuckets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareLogMetricBucketOptionsExponentialBucketsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewLogMetricBucketOptionsExponentialBuckets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewLogMetricBucketOptionsExponentialBucketsSlice(c *Client, des, nw []LogMetricBucketOptionsExponentialBuckets) []LogMetricBucketOptionsExponentialBuckets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []LogMetricBucketOptionsExponentialBuckets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewLogMetricBucketOptionsExponentialBuckets(c, &d, &n)) - } - - return items -} - -func canonicalizeLogMetricBucketOptionsExplicitBuckets(des, initial *LogMetricBucketOptionsExplicitBuckets, opts ...dcl.ApplyOption) *LogMetricBucketOptionsExplicitBuckets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &LogMetricBucketOptionsExplicitBuckets{} - - if dcl.IsZeroValue(des.Bounds) || (dcl.IsEmptyValueIndirect(des.Bounds) && dcl.IsEmptyValueIndirect(initial.Bounds)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Bounds = initial.Bounds - } else { - cDes.Bounds = des.Bounds - } - - return cDes -} - -func canonicalizeLogMetricBucketOptionsExplicitBucketsSlice(des, initial []LogMetricBucketOptionsExplicitBuckets, opts ...dcl.ApplyOption) []LogMetricBucketOptionsExplicitBuckets { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]LogMetricBucketOptionsExplicitBuckets, 0, len(des)) - for _, d := range des { - cd := canonicalizeLogMetricBucketOptionsExplicitBuckets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]LogMetricBucketOptionsExplicitBuckets, 0, len(des)) - for i, d := range des { - cd := canonicalizeLogMetricBucketOptionsExplicitBuckets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewLogMetricBucketOptionsExplicitBuckets(c *Client, des, nw *LogMetricBucketOptionsExplicitBuckets) *LogMetricBucketOptionsExplicitBuckets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for LogMetricBucketOptionsExplicitBuckets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewLogMetricBucketOptionsExplicitBucketsSet(c *Client, des, nw []LogMetricBucketOptionsExplicitBuckets) []LogMetricBucketOptionsExplicitBuckets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []LogMetricBucketOptionsExplicitBuckets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareLogMetricBucketOptionsExplicitBucketsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewLogMetricBucketOptionsExplicitBuckets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewLogMetricBucketOptionsExplicitBucketsSlice(c *Client, des, nw []LogMetricBucketOptionsExplicitBuckets) []LogMetricBucketOptionsExplicitBuckets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []LogMetricBucketOptionsExplicitBuckets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewLogMetricBucketOptionsExplicitBuckets(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffLogMetric(c *Client, desired, actual *LogMetric, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Disabled, actual.Disabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Disabled")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.MetricDescriptor, actual.MetricDescriptor, dcl.DiffInfo{ObjectFunction: compareLogMetricMetricDescriptorNewStyle, EmptyObject: EmptyLogMetricMetricDescriptor, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricDescriptor")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ValueExtractor, actual.ValueExtractor, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("ValueExtractor")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.LabelExtractors, actual.LabelExtractors, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("LabelExtractors")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.BucketOptions, actual.BucketOptions, dcl.DiffInfo{ObjectFunction: compareLogMetricBucketOptionsNewStyle, EmptyObject: EmptyLogMetricBucketOptions, OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("BucketOptions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareLogMetricMetricDescriptorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*LogMetricMetricDescriptor) - if !ok { - desiredNotPointer, ok := d.(LogMetricMetricDescriptor) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricMetricDescriptor or *LogMetricMetricDescriptor", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*LogMetricMetricDescriptor) - if !ok { - actualNotPointer, ok := a.(LogMetricMetricDescriptor) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricMetricDescriptor", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Type")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{Type: "Set", ObjectFunction: compareLogMetricMetricDescriptorLabelsNewStyle, EmptyObject: EmptyLogMetricMetricDescriptorLabels, OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MetricKind, actual.MetricKind, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricKind")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ValueType, actual.ValueType, dcl.DiffInfo{Type: "EnumType", CustomDiff: canonicalizeLogMetricMetricDescriptorValueType, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ValueType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Unit, actual.Unit, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Unit")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Metadata, actual.Metadata, dcl.DiffInfo{Ignore: true, ObjectFunction: compareLogMetricMetricDescriptorMetadataNewStyle, EmptyObject: EmptyLogMetricMetricDescriptorMetadata, OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Metadata")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LaunchStage, actual.LaunchStage, dcl.DiffInfo{Ignore: true, Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("LaunchStage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MonitoredResourceTypes, actual.MonitoredResourceTypes, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoredResourceTypes")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareLogMetricMetricDescriptorLabelsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*LogMetricMetricDescriptorLabels) - if !ok { - desiredNotPointer, ok := d.(LogMetricMetricDescriptorLabels) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricMetricDescriptorLabels or *LogMetricMetricDescriptorLabels", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*LogMetricMetricDescriptorLabels) - if !ok { - actualNotPointer, ok := a.(LogMetricMetricDescriptorLabels) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricMetricDescriptorLabels", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ValueType, actual.ValueType, dcl.DiffInfo{Type: "EnumType", CustomDiff: canonicalizeLogMetricMetricDescriptorLabelsValueType, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ValueType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareLogMetricMetricDescriptorMetadataNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*LogMetricMetricDescriptorMetadata) - if !ok { - desiredNotPointer, ok := d.(LogMetricMetricDescriptorMetadata) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricMetricDescriptorMetadata or *LogMetricMetricDescriptorMetadata", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*LogMetricMetricDescriptorMetadata) - if !ok { - actualNotPointer, ok := a.(LogMetricMetricDescriptorMetadata) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricMetricDescriptorMetadata", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.SamplePeriod, actual.SamplePeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("SamplePeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IngestDelay, actual.IngestDelay, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("IngestDelay")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareLogMetricBucketOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*LogMetricBucketOptions) - if !ok { - desiredNotPointer, ok := d.(LogMetricBucketOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricBucketOptions or *LogMetricBucketOptions", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*LogMetricBucketOptions) - if !ok { - actualNotPointer, ok := a.(LogMetricBucketOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricBucketOptions", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.LinearBuckets, actual.LinearBuckets, dcl.DiffInfo{ObjectFunction: compareLogMetricBucketOptionsLinearBucketsNewStyle, EmptyObject: EmptyLogMetricBucketOptionsLinearBuckets, OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("LinearBuckets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ExponentialBuckets, actual.ExponentialBuckets, dcl.DiffInfo{ObjectFunction: compareLogMetricBucketOptionsExponentialBucketsNewStyle, EmptyObject: EmptyLogMetricBucketOptionsExponentialBuckets, OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("ExponentialBuckets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ExplicitBuckets, actual.ExplicitBuckets, dcl.DiffInfo{ObjectFunction: compareLogMetricBucketOptionsExplicitBucketsNewStyle, EmptyObject: EmptyLogMetricBucketOptionsExplicitBuckets, OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("ExplicitBuckets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareLogMetricBucketOptionsLinearBucketsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*LogMetricBucketOptionsLinearBuckets) - if !ok { - desiredNotPointer, ok := d.(LogMetricBucketOptionsLinearBuckets) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricBucketOptionsLinearBuckets or *LogMetricBucketOptionsLinearBuckets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*LogMetricBucketOptionsLinearBuckets) - if !ok { - actualNotPointer, ok := a.(LogMetricBucketOptionsLinearBuckets) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricBucketOptionsLinearBuckets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.NumFiniteBuckets, actual.NumFiniteBuckets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("NumFiniteBuckets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Width, actual.Width, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Width")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Offset, actual.Offset, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Offset")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareLogMetricBucketOptionsExponentialBucketsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*LogMetricBucketOptionsExponentialBuckets) - if !ok { - desiredNotPointer, ok := d.(LogMetricBucketOptionsExponentialBuckets) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricBucketOptionsExponentialBuckets or *LogMetricBucketOptionsExponentialBuckets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*LogMetricBucketOptionsExponentialBuckets) - if !ok { - actualNotPointer, ok := a.(LogMetricBucketOptionsExponentialBuckets) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricBucketOptionsExponentialBuckets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.NumFiniteBuckets, actual.NumFiniteBuckets, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("NumFiniteBuckets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GrowthFactor, actual.GrowthFactor, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("GrowthFactor")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareLogMetricBucketOptionsExplicitBucketsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*LogMetricBucketOptionsExplicitBuckets) - if !ok { - desiredNotPointer, ok := d.(LogMetricBucketOptionsExplicitBuckets) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricBucketOptionsExplicitBuckets or *LogMetricBucketOptionsExplicitBuckets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*LogMetricBucketOptionsExplicitBuckets) - if !ok { - actualNotPointer, ok := a.(LogMetricBucketOptionsExplicitBuckets) - if !ok { - return nil, fmt.Errorf("obj %v is not a LogMetricBucketOptionsExplicitBuckets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Bounds, actual.Bounds, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogMetricUpdateOperation")}, fn.AddNest("Bounds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *LogMetric) urlNormalized() *LogMetric { - normalized := dcl.Copy(*r).(LogMetric) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.Filter = dcl.SelfLinkToName(r.Filter) - normalized.ValueExtractor = dcl.SelfLinkToName(r.ValueExtractor) - normalized.Project = dcl.SelfLinkToName(r.Project) - return &normalized -} - -func (r *LogMetric) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "update" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/metrics/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the LogMetric resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *LogMetric) marshal(c *Client) ([]byte, error) { - m, err := expandLogMetric(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling LogMetric: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalLogMetric decodes JSON responses into the LogMetric resource schema. -func unmarshalLogMetric(b []byte, c *Client, res *LogMetric) (*LogMetric, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapLogMetric(m, c, res) -} - -func unmarshalMapLogMetric(m map[string]interface{}, c *Client, res *LogMetric) (*LogMetric, error) { - - flattened := flattenLogMetric(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandLogMetric expands LogMetric into a JSON request object. -func expandLogMetric(c *Client, f *LogMetric) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v := f.Name; dcl.ValueShouldBeSent(v) { - m["name"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.Filter; dcl.ValueShouldBeSent(v) { - m["filter"] = v - } - if v := f.Disabled; dcl.ValueShouldBeSent(v) { - m["disabled"] = v - } - if v, err := expandLogMetricMetricDescriptor(c, f.MetricDescriptor, res); err != nil { - return nil, fmt.Errorf("error expanding MetricDescriptor into metricDescriptor: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["metricDescriptor"] = v - } - if v := f.ValueExtractor; dcl.ValueShouldBeSent(v) { - m["valueExtractor"] = v - } - if v := f.LabelExtractors; dcl.ValueShouldBeSent(v) { - m["labelExtractors"] = v - } - if v, err := expandLogMetricBucketOptions(c, f.BucketOptions, res); err != nil { - return nil, fmt.Errorf("error expanding BucketOptions into bucketOptions: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["bucketOptions"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - - return m, nil -} - -// flattenLogMetric flattens LogMetric from a JSON request object into the -// LogMetric type. -func flattenLogMetric(c *Client, i interface{}, res *LogMetric) *LogMetric { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &LogMetric{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.Filter = dcl.FlattenString(m["filter"]) - resultRes.Disabled = dcl.FlattenBool(m["disabled"]) - resultRes.MetricDescriptor = flattenLogMetricMetricDescriptor(c, m["metricDescriptor"], res) - resultRes.ValueExtractor = dcl.FlattenString(m["valueExtractor"]) - resultRes.LabelExtractors = dcl.FlattenKeyValuePairs(m["labelExtractors"]) - resultRes.BucketOptions = flattenLogMetricBucketOptions(c, m["bucketOptions"], res) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.Project = dcl.FlattenString(m["project"]) - - return resultRes -} - -// expandLogMetricMetricDescriptorMap expands the contents of LogMetricMetricDescriptor into a JSON -// request object. -func expandLogMetricMetricDescriptorMap(c *Client, f map[string]LogMetricMetricDescriptor, res *LogMetric) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandLogMetricMetricDescriptor(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandLogMetricMetricDescriptorSlice expands the contents of LogMetricMetricDescriptor into a JSON -// request object. -func expandLogMetricMetricDescriptorSlice(c *Client, f []LogMetricMetricDescriptor, res *LogMetric) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandLogMetricMetricDescriptor(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenLogMetricMetricDescriptorMap flattens the contents of LogMetricMetricDescriptor from a JSON -// response object. -func flattenLogMetricMetricDescriptorMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricMetricDescriptor { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricMetricDescriptor{} - } - - if len(a) == 0 { - return map[string]LogMetricMetricDescriptor{} - } - - items := make(map[string]LogMetricMetricDescriptor) - for k, item := range a { - items[k] = *flattenLogMetricMetricDescriptor(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenLogMetricMetricDescriptorSlice flattens the contents of LogMetricMetricDescriptor from a JSON -// response object. -func flattenLogMetricMetricDescriptorSlice(c *Client, i interface{}, res *LogMetric) []LogMetricMetricDescriptor { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricMetricDescriptor{} - } - - if len(a) == 0 { - return []LogMetricMetricDescriptor{} - } - - items := make([]LogMetricMetricDescriptor, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricMetricDescriptor(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandLogMetricMetricDescriptor expands an instance of LogMetricMetricDescriptor into a JSON -// request object. -func expandLogMetricMetricDescriptor(c *Client, f *LogMetricMetricDescriptor, res *LogMetric) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandLogMetricMetricDescriptorLabelsSlice(c, f.Labels, res); err != nil { - return nil, fmt.Errorf("error expanding Labels into labels: %w", err) - } else if v != nil { - m["labels"] = v - } - if v := f.MetricKind; !dcl.IsEmptyValueIndirect(v) { - m["metricKind"] = v - } - if v := f.ValueType; !dcl.IsEmptyValueIndirect(v) { - m["valueType"] = v - } - if v := f.Unit; !dcl.IsEmptyValueIndirect(v) { - m["unit"] = v - } - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - m["displayName"] = v - } - if v, err := expandLogMetricMetricDescriptorMetadata(c, f.Metadata, res); err != nil { - return nil, fmt.Errorf("error expanding Metadata into metadata: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["metadata"] = v - } - if v := f.LaunchStage; !dcl.IsEmptyValueIndirect(v) { - m["launchStage"] = v - } - - return m, nil -} - -// flattenLogMetricMetricDescriptor flattens an instance of LogMetricMetricDescriptor from a JSON -// response object. -func flattenLogMetricMetricDescriptor(c *Client, i interface{}, res *LogMetric) *LogMetricMetricDescriptor { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &LogMetricMetricDescriptor{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyLogMetricMetricDescriptor - } - r.Name = dcl.FlattenString(m["name"]) - r.Type = dcl.FlattenString(m["type"]) - r.Labels = flattenLogMetricMetricDescriptorLabelsSlice(c, m["labels"], res) - r.MetricKind = flattenLogMetricMetricDescriptorMetricKindEnum(m["metricKind"]) - r.ValueType = flattenLogMetricMetricDescriptorValueTypeEnum(m["valueType"]) - r.Unit = dcl.FlattenString(m["unit"]) - r.Description = dcl.FlattenString(m["description"]) - r.DisplayName = dcl.FlattenString(m["displayName"]) - r.Metadata = flattenLogMetricMetricDescriptorMetadata(c, m["metadata"], res) - r.LaunchStage = flattenLogMetricMetricDescriptorLaunchStageEnum(m["launchStage"]) - r.MonitoredResourceTypes = dcl.FlattenStringSlice(m["monitoredResourceTypes"]) - - return r -} - -// expandLogMetricMetricDescriptorLabelsMap expands the contents of LogMetricMetricDescriptorLabels into a JSON -// request object. -func expandLogMetricMetricDescriptorLabelsMap(c *Client, f map[string]LogMetricMetricDescriptorLabels, res *LogMetric) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandLogMetricMetricDescriptorLabels(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandLogMetricMetricDescriptorLabelsSlice expands the contents of LogMetricMetricDescriptorLabels into a JSON -// request object. -func expandLogMetricMetricDescriptorLabelsSlice(c *Client, f []LogMetricMetricDescriptorLabels, res *LogMetric) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandLogMetricMetricDescriptorLabels(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenLogMetricMetricDescriptorLabelsMap flattens the contents of LogMetricMetricDescriptorLabels from a JSON -// response object. -func flattenLogMetricMetricDescriptorLabelsMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricMetricDescriptorLabels { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricMetricDescriptorLabels{} - } - - if len(a) == 0 { - return map[string]LogMetricMetricDescriptorLabels{} - } - - items := make(map[string]LogMetricMetricDescriptorLabels) - for k, item := range a { - items[k] = *flattenLogMetricMetricDescriptorLabels(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenLogMetricMetricDescriptorLabelsSlice flattens the contents of LogMetricMetricDescriptorLabels from a JSON -// response object. -func flattenLogMetricMetricDescriptorLabelsSlice(c *Client, i interface{}, res *LogMetric) []LogMetricMetricDescriptorLabels { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricMetricDescriptorLabels{} - } - - if len(a) == 0 { - return []LogMetricMetricDescriptorLabels{} - } - - items := make([]LogMetricMetricDescriptorLabels, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricMetricDescriptorLabels(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandLogMetricMetricDescriptorLabels expands an instance of LogMetricMetricDescriptorLabels into a JSON -// request object. -func expandLogMetricMetricDescriptorLabels(c *Client, f *LogMetricMetricDescriptorLabels, res *LogMetric) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Key; !dcl.IsEmptyValueIndirect(v) { - m["key"] = v - } - if v := f.ValueType; !dcl.IsEmptyValueIndirect(v) { - m["valueType"] = v - } - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - m["description"] = v - } - - return m, nil -} - -// flattenLogMetricMetricDescriptorLabels flattens an instance of LogMetricMetricDescriptorLabels from a JSON -// response object. -func flattenLogMetricMetricDescriptorLabels(c *Client, i interface{}, res *LogMetric) *LogMetricMetricDescriptorLabels { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &LogMetricMetricDescriptorLabels{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyLogMetricMetricDescriptorLabels - } - r.Key = dcl.FlattenString(m["key"]) - r.ValueType = flattenLogMetricMetricDescriptorLabelsValueTypeEnum(m["valueType"]) - r.Description = dcl.FlattenString(m["description"]) - - return r -} - -// expandLogMetricMetricDescriptorMetadataMap expands the contents of LogMetricMetricDescriptorMetadata into a JSON -// request object. -func expandLogMetricMetricDescriptorMetadataMap(c *Client, f map[string]LogMetricMetricDescriptorMetadata, res *LogMetric) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandLogMetricMetricDescriptorMetadata(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandLogMetricMetricDescriptorMetadataSlice expands the contents of LogMetricMetricDescriptorMetadata into a JSON -// request object. -func expandLogMetricMetricDescriptorMetadataSlice(c *Client, f []LogMetricMetricDescriptorMetadata, res *LogMetric) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandLogMetricMetricDescriptorMetadata(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenLogMetricMetricDescriptorMetadataMap flattens the contents of LogMetricMetricDescriptorMetadata from a JSON -// response object. -func flattenLogMetricMetricDescriptorMetadataMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricMetricDescriptorMetadata { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricMetricDescriptorMetadata{} - } - - if len(a) == 0 { - return map[string]LogMetricMetricDescriptorMetadata{} - } - - items := make(map[string]LogMetricMetricDescriptorMetadata) - for k, item := range a { - items[k] = *flattenLogMetricMetricDescriptorMetadata(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenLogMetricMetricDescriptorMetadataSlice flattens the contents of LogMetricMetricDescriptorMetadata from a JSON -// response object. -func flattenLogMetricMetricDescriptorMetadataSlice(c *Client, i interface{}, res *LogMetric) []LogMetricMetricDescriptorMetadata { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricMetricDescriptorMetadata{} - } - - if len(a) == 0 { - return []LogMetricMetricDescriptorMetadata{} - } - - items := make([]LogMetricMetricDescriptorMetadata, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricMetricDescriptorMetadata(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandLogMetricMetricDescriptorMetadata expands an instance of LogMetricMetricDescriptorMetadata into a JSON -// request object. -func expandLogMetricMetricDescriptorMetadata(c *Client, f *LogMetricMetricDescriptorMetadata, res *LogMetric) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.SamplePeriod; !dcl.IsEmptyValueIndirect(v) { - m["samplePeriod"] = v - } - if v := f.IngestDelay; !dcl.IsEmptyValueIndirect(v) { - m["ingestDelay"] = v - } - - return m, nil -} - -// flattenLogMetricMetricDescriptorMetadata flattens an instance of LogMetricMetricDescriptorMetadata from a JSON -// response object. -func flattenLogMetricMetricDescriptorMetadata(c *Client, i interface{}, res *LogMetric) *LogMetricMetricDescriptorMetadata { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &LogMetricMetricDescriptorMetadata{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyLogMetricMetricDescriptorMetadata - } - r.SamplePeriod = dcl.FlattenString(m["samplePeriod"]) - r.IngestDelay = dcl.FlattenString(m["ingestDelay"]) - - return r -} - -// expandLogMetricBucketOptionsMap expands the contents of LogMetricBucketOptions into a JSON -// request object. -func expandLogMetricBucketOptionsMap(c *Client, f map[string]LogMetricBucketOptions, res *LogMetric) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandLogMetricBucketOptions(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandLogMetricBucketOptionsSlice expands the contents of LogMetricBucketOptions into a JSON -// request object. -func expandLogMetricBucketOptionsSlice(c *Client, f []LogMetricBucketOptions, res *LogMetric) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandLogMetricBucketOptions(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenLogMetricBucketOptionsMap flattens the contents of LogMetricBucketOptions from a JSON -// response object. -func flattenLogMetricBucketOptionsMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricBucketOptions { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricBucketOptions{} - } - - if len(a) == 0 { - return map[string]LogMetricBucketOptions{} - } - - items := make(map[string]LogMetricBucketOptions) - for k, item := range a { - items[k] = *flattenLogMetricBucketOptions(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenLogMetricBucketOptionsSlice flattens the contents of LogMetricBucketOptions from a JSON -// response object. -func flattenLogMetricBucketOptionsSlice(c *Client, i interface{}, res *LogMetric) []LogMetricBucketOptions { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricBucketOptions{} - } - - if len(a) == 0 { - return []LogMetricBucketOptions{} - } - - items := make([]LogMetricBucketOptions, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricBucketOptions(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandLogMetricBucketOptions expands an instance of LogMetricBucketOptions into a JSON -// request object. -func expandLogMetricBucketOptions(c *Client, f *LogMetricBucketOptions, res *LogMetric) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandLogMetricBucketOptionsLinearBuckets(c, f.LinearBuckets, res); err != nil { - return nil, fmt.Errorf("error expanding LinearBuckets into linearBuckets: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["linearBuckets"] = v - } - if v, err := expandLogMetricBucketOptionsExponentialBuckets(c, f.ExponentialBuckets, res); err != nil { - return nil, fmt.Errorf("error expanding ExponentialBuckets into exponentialBuckets: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["exponentialBuckets"] = v - } - if v, err := expandLogMetricBucketOptionsExplicitBuckets(c, f.ExplicitBuckets, res); err != nil { - return nil, fmt.Errorf("error expanding ExplicitBuckets into explicitBuckets: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["explicitBuckets"] = v - } - - return m, nil -} - -// flattenLogMetricBucketOptions flattens an instance of LogMetricBucketOptions from a JSON -// response object. -func flattenLogMetricBucketOptions(c *Client, i interface{}, res *LogMetric) *LogMetricBucketOptions { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &LogMetricBucketOptions{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyLogMetricBucketOptions - } - r.LinearBuckets = flattenLogMetricBucketOptionsLinearBuckets(c, m["linearBuckets"], res) - r.ExponentialBuckets = flattenLogMetricBucketOptionsExponentialBuckets(c, m["exponentialBuckets"], res) - r.ExplicitBuckets = flattenLogMetricBucketOptionsExplicitBuckets(c, m["explicitBuckets"], res) - - return r -} - -// expandLogMetricBucketOptionsLinearBucketsMap expands the contents of LogMetricBucketOptionsLinearBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsLinearBucketsMap(c *Client, f map[string]LogMetricBucketOptionsLinearBuckets, res *LogMetric) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandLogMetricBucketOptionsLinearBuckets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandLogMetricBucketOptionsLinearBucketsSlice expands the contents of LogMetricBucketOptionsLinearBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsLinearBucketsSlice(c *Client, f []LogMetricBucketOptionsLinearBuckets, res *LogMetric) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandLogMetricBucketOptionsLinearBuckets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenLogMetricBucketOptionsLinearBucketsMap flattens the contents of LogMetricBucketOptionsLinearBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsLinearBucketsMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricBucketOptionsLinearBuckets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricBucketOptionsLinearBuckets{} - } - - if len(a) == 0 { - return map[string]LogMetricBucketOptionsLinearBuckets{} - } - - items := make(map[string]LogMetricBucketOptionsLinearBuckets) - for k, item := range a { - items[k] = *flattenLogMetricBucketOptionsLinearBuckets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenLogMetricBucketOptionsLinearBucketsSlice flattens the contents of LogMetricBucketOptionsLinearBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsLinearBucketsSlice(c *Client, i interface{}, res *LogMetric) []LogMetricBucketOptionsLinearBuckets { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricBucketOptionsLinearBuckets{} - } - - if len(a) == 0 { - return []LogMetricBucketOptionsLinearBuckets{} - } - - items := make([]LogMetricBucketOptionsLinearBuckets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricBucketOptionsLinearBuckets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandLogMetricBucketOptionsLinearBuckets expands an instance of LogMetricBucketOptionsLinearBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsLinearBuckets(c *Client, f *LogMetricBucketOptionsLinearBuckets, res *LogMetric) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.NumFiniteBuckets; !dcl.IsEmptyValueIndirect(v) { - m["numFiniteBuckets"] = v - } - if v := f.Width; !dcl.IsEmptyValueIndirect(v) { - m["width"] = v - } - if v := f.Offset; !dcl.IsEmptyValueIndirect(v) { - m["offset"] = v - } - - return m, nil -} - -// flattenLogMetricBucketOptionsLinearBuckets flattens an instance of LogMetricBucketOptionsLinearBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsLinearBuckets(c *Client, i interface{}, res *LogMetric) *LogMetricBucketOptionsLinearBuckets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &LogMetricBucketOptionsLinearBuckets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyLogMetricBucketOptionsLinearBuckets - } - r.NumFiniteBuckets = dcl.FlattenInteger(m["numFiniteBuckets"]) - r.Width = dcl.FlattenDouble(m["width"]) - r.Offset = dcl.FlattenDouble(m["offset"]) - - return r -} - -// expandLogMetricBucketOptionsExponentialBucketsMap expands the contents of LogMetricBucketOptionsExponentialBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsExponentialBucketsMap(c *Client, f map[string]LogMetricBucketOptionsExponentialBuckets, res *LogMetric) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandLogMetricBucketOptionsExponentialBuckets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandLogMetricBucketOptionsExponentialBucketsSlice expands the contents of LogMetricBucketOptionsExponentialBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsExponentialBucketsSlice(c *Client, f []LogMetricBucketOptionsExponentialBuckets, res *LogMetric) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandLogMetricBucketOptionsExponentialBuckets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenLogMetricBucketOptionsExponentialBucketsMap flattens the contents of LogMetricBucketOptionsExponentialBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsExponentialBucketsMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricBucketOptionsExponentialBuckets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricBucketOptionsExponentialBuckets{} - } - - if len(a) == 0 { - return map[string]LogMetricBucketOptionsExponentialBuckets{} - } - - items := make(map[string]LogMetricBucketOptionsExponentialBuckets) - for k, item := range a { - items[k] = *flattenLogMetricBucketOptionsExponentialBuckets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenLogMetricBucketOptionsExponentialBucketsSlice flattens the contents of LogMetricBucketOptionsExponentialBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsExponentialBucketsSlice(c *Client, i interface{}, res *LogMetric) []LogMetricBucketOptionsExponentialBuckets { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricBucketOptionsExponentialBuckets{} - } - - if len(a) == 0 { - return []LogMetricBucketOptionsExponentialBuckets{} - } - - items := make([]LogMetricBucketOptionsExponentialBuckets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricBucketOptionsExponentialBuckets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandLogMetricBucketOptionsExponentialBuckets expands an instance of LogMetricBucketOptionsExponentialBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsExponentialBuckets(c *Client, f *LogMetricBucketOptionsExponentialBuckets, res *LogMetric) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.NumFiniteBuckets; !dcl.IsEmptyValueIndirect(v) { - m["numFiniteBuckets"] = v - } - if v := f.GrowthFactor; !dcl.IsEmptyValueIndirect(v) { - m["growthFactor"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenLogMetricBucketOptionsExponentialBuckets flattens an instance of LogMetricBucketOptionsExponentialBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsExponentialBuckets(c *Client, i interface{}, res *LogMetric) *LogMetricBucketOptionsExponentialBuckets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &LogMetricBucketOptionsExponentialBuckets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyLogMetricBucketOptionsExponentialBuckets - } - r.NumFiniteBuckets = dcl.FlattenInteger(m["numFiniteBuckets"]) - r.GrowthFactor = dcl.FlattenDouble(m["growthFactor"]) - r.Scale = dcl.FlattenDouble(m["scale"]) - - return r -} - -// expandLogMetricBucketOptionsExplicitBucketsMap expands the contents of LogMetricBucketOptionsExplicitBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsExplicitBucketsMap(c *Client, f map[string]LogMetricBucketOptionsExplicitBuckets, res *LogMetric) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandLogMetricBucketOptionsExplicitBuckets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandLogMetricBucketOptionsExplicitBucketsSlice expands the contents of LogMetricBucketOptionsExplicitBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsExplicitBucketsSlice(c *Client, f []LogMetricBucketOptionsExplicitBuckets, res *LogMetric) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandLogMetricBucketOptionsExplicitBuckets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenLogMetricBucketOptionsExplicitBucketsMap flattens the contents of LogMetricBucketOptionsExplicitBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsExplicitBucketsMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricBucketOptionsExplicitBuckets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricBucketOptionsExplicitBuckets{} - } - - if len(a) == 0 { - return map[string]LogMetricBucketOptionsExplicitBuckets{} - } - - items := make(map[string]LogMetricBucketOptionsExplicitBuckets) - for k, item := range a { - items[k] = *flattenLogMetricBucketOptionsExplicitBuckets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenLogMetricBucketOptionsExplicitBucketsSlice flattens the contents of LogMetricBucketOptionsExplicitBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsExplicitBucketsSlice(c *Client, i interface{}, res *LogMetric) []LogMetricBucketOptionsExplicitBuckets { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricBucketOptionsExplicitBuckets{} - } - - if len(a) == 0 { - return []LogMetricBucketOptionsExplicitBuckets{} - } - - items := make([]LogMetricBucketOptionsExplicitBuckets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricBucketOptionsExplicitBuckets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandLogMetricBucketOptionsExplicitBuckets expands an instance of LogMetricBucketOptionsExplicitBuckets into a JSON -// request object. -func expandLogMetricBucketOptionsExplicitBuckets(c *Client, f *LogMetricBucketOptionsExplicitBuckets, res *LogMetric) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Bounds; v != nil { - m["bounds"] = v - } - - return m, nil -} - -// flattenLogMetricBucketOptionsExplicitBuckets flattens an instance of LogMetricBucketOptionsExplicitBuckets from a JSON -// response object. -func flattenLogMetricBucketOptionsExplicitBuckets(c *Client, i interface{}, res *LogMetric) *LogMetricBucketOptionsExplicitBuckets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &LogMetricBucketOptionsExplicitBuckets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyLogMetricBucketOptionsExplicitBuckets - } - r.Bounds = dcl.FlattenFloatSlice(m["bounds"]) - - return r -} - -// flattenLogMetricMetricDescriptorLabelsValueTypeEnumMap flattens the contents of LogMetricMetricDescriptorLabelsValueTypeEnum from a JSON -// response object. -func flattenLogMetricMetricDescriptorLabelsValueTypeEnumMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricMetricDescriptorLabelsValueTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricMetricDescriptorLabelsValueTypeEnum{} - } - - if len(a) == 0 { - return map[string]LogMetricMetricDescriptorLabelsValueTypeEnum{} - } - - items := make(map[string]LogMetricMetricDescriptorLabelsValueTypeEnum) - for k, item := range a { - items[k] = *flattenLogMetricMetricDescriptorLabelsValueTypeEnum(item.(interface{})) - } - - return items -} - -// flattenLogMetricMetricDescriptorLabelsValueTypeEnumSlice flattens the contents of LogMetricMetricDescriptorLabelsValueTypeEnum from a JSON -// response object. -func flattenLogMetricMetricDescriptorLabelsValueTypeEnumSlice(c *Client, i interface{}, res *LogMetric) []LogMetricMetricDescriptorLabelsValueTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricMetricDescriptorLabelsValueTypeEnum{} - } - - if len(a) == 0 { - return []LogMetricMetricDescriptorLabelsValueTypeEnum{} - } - - items := make([]LogMetricMetricDescriptorLabelsValueTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricMetricDescriptorLabelsValueTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenLogMetricMetricDescriptorLabelsValueTypeEnum asserts that an interface is a string, and returns a -// pointer to a *LogMetricMetricDescriptorLabelsValueTypeEnum with the same value as that string. -func flattenLogMetricMetricDescriptorLabelsValueTypeEnum(i interface{}) *LogMetricMetricDescriptorLabelsValueTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return LogMetricMetricDescriptorLabelsValueTypeEnumRef(s) -} - -// flattenLogMetricMetricDescriptorMetricKindEnumMap flattens the contents of LogMetricMetricDescriptorMetricKindEnum from a JSON -// response object. -func flattenLogMetricMetricDescriptorMetricKindEnumMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricMetricDescriptorMetricKindEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricMetricDescriptorMetricKindEnum{} - } - - if len(a) == 0 { - return map[string]LogMetricMetricDescriptorMetricKindEnum{} - } - - items := make(map[string]LogMetricMetricDescriptorMetricKindEnum) - for k, item := range a { - items[k] = *flattenLogMetricMetricDescriptorMetricKindEnum(item.(interface{})) - } - - return items -} - -// flattenLogMetricMetricDescriptorMetricKindEnumSlice flattens the contents of LogMetricMetricDescriptorMetricKindEnum from a JSON -// response object. -func flattenLogMetricMetricDescriptorMetricKindEnumSlice(c *Client, i interface{}, res *LogMetric) []LogMetricMetricDescriptorMetricKindEnum { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricMetricDescriptorMetricKindEnum{} - } - - if len(a) == 0 { - return []LogMetricMetricDescriptorMetricKindEnum{} - } - - items := make([]LogMetricMetricDescriptorMetricKindEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricMetricDescriptorMetricKindEnum(item.(interface{}))) - } - - return items -} - -// flattenLogMetricMetricDescriptorMetricKindEnum asserts that an interface is a string, and returns a -// pointer to a *LogMetricMetricDescriptorMetricKindEnum with the same value as that string. -func flattenLogMetricMetricDescriptorMetricKindEnum(i interface{}) *LogMetricMetricDescriptorMetricKindEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return LogMetricMetricDescriptorMetricKindEnumRef(s) -} - -// flattenLogMetricMetricDescriptorValueTypeEnumMap flattens the contents of LogMetricMetricDescriptorValueTypeEnum from a JSON -// response object. -func flattenLogMetricMetricDescriptorValueTypeEnumMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricMetricDescriptorValueTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricMetricDescriptorValueTypeEnum{} - } - - if len(a) == 0 { - return map[string]LogMetricMetricDescriptorValueTypeEnum{} - } - - items := make(map[string]LogMetricMetricDescriptorValueTypeEnum) - for k, item := range a { - items[k] = *flattenLogMetricMetricDescriptorValueTypeEnum(item.(interface{})) - } - - return items -} - -// flattenLogMetricMetricDescriptorValueTypeEnumSlice flattens the contents of LogMetricMetricDescriptorValueTypeEnum from a JSON -// response object. -func flattenLogMetricMetricDescriptorValueTypeEnumSlice(c *Client, i interface{}, res *LogMetric) []LogMetricMetricDescriptorValueTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricMetricDescriptorValueTypeEnum{} - } - - if len(a) == 0 { - return []LogMetricMetricDescriptorValueTypeEnum{} - } - - items := make([]LogMetricMetricDescriptorValueTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricMetricDescriptorValueTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenLogMetricMetricDescriptorValueTypeEnum asserts that an interface is a string, and returns a -// pointer to a *LogMetricMetricDescriptorValueTypeEnum with the same value as that string. -func flattenLogMetricMetricDescriptorValueTypeEnum(i interface{}) *LogMetricMetricDescriptorValueTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return LogMetricMetricDescriptorValueTypeEnumRef(s) -} - -// flattenLogMetricMetricDescriptorLaunchStageEnumMap flattens the contents of LogMetricMetricDescriptorLaunchStageEnum from a JSON -// response object. -func flattenLogMetricMetricDescriptorLaunchStageEnumMap(c *Client, i interface{}, res *LogMetric) map[string]LogMetricMetricDescriptorLaunchStageEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]LogMetricMetricDescriptorLaunchStageEnum{} - } - - if len(a) == 0 { - return map[string]LogMetricMetricDescriptorLaunchStageEnum{} - } - - items := make(map[string]LogMetricMetricDescriptorLaunchStageEnum) - for k, item := range a { - items[k] = *flattenLogMetricMetricDescriptorLaunchStageEnum(item.(interface{})) - } - - return items -} - -// flattenLogMetricMetricDescriptorLaunchStageEnumSlice flattens the contents of LogMetricMetricDescriptorLaunchStageEnum from a JSON -// response object. -func flattenLogMetricMetricDescriptorLaunchStageEnumSlice(c *Client, i interface{}, res *LogMetric) []LogMetricMetricDescriptorLaunchStageEnum { - a, ok := i.([]interface{}) - if !ok { - return []LogMetricMetricDescriptorLaunchStageEnum{} - } - - if len(a) == 0 { - return []LogMetricMetricDescriptorLaunchStageEnum{} - } - - items := make([]LogMetricMetricDescriptorLaunchStageEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenLogMetricMetricDescriptorLaunchStageEnum(item.(interface{}))) - } - - return items -} - -// flattenLogMetricMetricDescriptorLaunchStageEnum asserts that an interface is a string, and returns a -// pointer to a *LogMetricMetricDescriptorLaunchStageEnum with the same value as that string. -func flattenLogMetricMetricDescriptorLaunchStageEnum(i interface{}) *LogMetricMetricDescriptorLaunchStageEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return LogMetricMetricDescriptorLaunchStageEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *LogMetric) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalLogMetric(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type logMetricDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp logMetricApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToLogMetricDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]logMetricDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []logMetricDiff - // For each operation name, create a logMetricDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := logMetricDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToLogMetricApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToLogMetricApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (logMetricApiOperation, error) { - switch opName { - - case "updateLogMetricUpdateOperation": - return &updateLogMetricUpdateOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractLogMetricFields(r *LogMetric) error { - vMetricDescriptor := r.MetricDescriptor - if vMetricDescriptor == nil { - // note: explicitly not the empty object. - vMetricDescriptor = &LogMetricMetricDescriptor{} - } - if err := extractLogMetricMetricDescriptorFields(r, vMetricDescriptor); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetricDescriptor) { - r.MetricDescriptor = vMetricDescriptor - } - vBucketOptions := r.BucketOptions - if vBucketOptions == nil { - // note: explicitly not the empty object. - vBucketOptions = &LogMetricBucketOptions{} - } - if err := extractLogMetricBucketOptionsFields(r, vBucketOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBucketOptions) { - r.BucketOptions = vBucketOptions - } - return nil -} -func extractLogMetricMetricDescriptorFields(r *LogMetric, o *LogMetricMetricDescriptor) error { - vMetadata := o.Metadata - if vMetadata == nil { - // note: explicitly not the empty object. - vMetadata = &LogMetricMetricDescriptorMetadata{} - } - if err := extractLogMetricMetricDescriptorMetadataFields(r, vMetadata); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetadata) { - o.Metadata = vMetadata - } - return nil -} -func extractLogMetricMetricDescriptorLabelsFields(r *LogMetric, o *LogMetricMetricDescriptorLabels) error { - return nil -} -func extractLogMetricMetricDescriptorMetadataFields(r *LogMetric, o *LogMetricMetricDescriptorMetadata) error { - return nil -} -func extractLogMetricBucketOptionsFields(r *LogMetric, o *LogMetricBucketOptions) error { - vLinearBuckets := o.LinearBuckets - if vLinearBuckets == nil { - // note: explicitly not the empty object. - vLinearBuckets = &LogMetricBucketOptionsLinearBuckets{} - } - if err := extractLogMetricBucketOptionsLinearBucketsFields(r, vLinearBuckets); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinearBuckets) { - o.LinearBuckets = vLinearBuckets - } - vExponentialBuckets := o.ExponentialBuckets - if vExponentialBuckets == nil { - // note: explicitly not the empty object. - vExponentialBuckets = &LogMetricBucketOptionsExponentialBuckets{} - } - if err := extractLogMetricBucketOptionsExponentialBucketsFields(r, vExponentialBuckets); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vExponentialBuckets) { - o.ExponentialBuckets = vExponentialBuckets - } - vExplicitBuckets := o.ExplicitBuckets - if vExplicitBuckets == nil { - // note: explicitly not the empty object. - vExplicitBuckets = &LogMetricBucketOptionsExplicitBuckets{} - } - if err := extractLogMetricBucketOptionsExplicitBucketsFields(r, vExplicitBuckets); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vExplicitBuckets) { - o.ExplicitBuckets = vExplicitBuckets - } - return nil -} -func extractLogMetricBucketOptionsLinearBucketsFields(r *LogMetric, o *LogMetricBucketOptionsLinearBuckets) error { - return nil -} -func extractLogMetricBucketOptionsExponentialBucketsFields(r *LogMetric, o *LogMetricBucketOptionsExponentialBuckets) error { - return nil -} -func extractLogMetricBucketOptionsExplicitBucketsFields(r *LogMetric, o *LogMetricBucketOptionsExplicitBuckets) error { - return nil -} - -func postReadExtractLogMetricFields(r *LogMetric) error { - vMetricDescriptor := r.MetricDescriptor - if vMetricDescriptor == nil { - // note: explicitly not the empty object. - vMetricDescriptor = &LogMetricMetricDescriptor{} - } - if err := postReadExtractLogMetricMetricDescriptorFields(r, vMetricDescriptor); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetricDescriptor) { - r.MetricDescriptor = vMetricDescriptor - } - vBucketOptions := r.BucketOptions - if vBucketOptions == nil { - // note: explicitly not the empty object. - vBucketOptions = &LogMetricBucketOptions{} - } - if err := postReadExtractLogMetricBucketOptionsFields(r, vBucketOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBucketOptions) { - r.BucketOptions = vBucketOptions - } - return nil -} -func postReadExtractLogMetricMetricDescriptorFields(r *LogMetric, o *LogMetricMetricDescriptor) error { - vMetadata := o.Metadata - if vMetadata == nil { - // note: explicitly not the empty object. - vMetadata = &LogMetricMetricDescriptorMetadata{} - } - if err := extractLogMetricMetricDescriptorMetadataFields(r, vMetadata); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetadata) { - o.Metadata = vMetadata - } - return nil -} -func postReadExtractLogMetricMetricDescriptorLabelsFields(r *LogMetric, o *LogMetricMetricDescriptorLabels) error { - return nil -} -func postReadExtractLogMetricMetricDescriptorMetadataFields(r *LogMetric, o *LogMetricMetricDescriptorMetadata) error { - return nil -} -func postReadExtractLogMetricBucketOptionsFields(r *LogMetric, o *LogMetricBucketOptions) error { - vLinearBuckets := o.LinearBuckets - if vLinearBuckets == nil { - // note: explicitly not the empty object. - vLinearBuckets = &LogMetricBucketOptionsLinearBuckets{} - } - if err := extractLogMetricBucketOptionsLinearBucketsFields(r, vLinearBuckets); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLinearBuckets) { - o.LinearBuckets = vLinearBuckets - } - vExponentialBuckets := o.ExponentialBuckets - if vExponentialBuckets == nil { - // note: explicitly not the empty object. - vExponentialBuckets = &LogMetricBucketOptionsExponentialBuckets{} - } - if err := extractLogMetricBucketOptionsExponentialBucketsFields(r, vExponentialBuckets); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vExponentialBuckets) { - o.ExponentialBuckets = vExponentialBuckets - } - vExplicitBuckets := o.ExplicitBuckets - if vExplicitBuckets == nil { - // note: explicitly not the empty object. - vExplicitBuckets = &LogMetricBucketOptionsExplicitBuckets{} - } - if err := extractLogMetricBucketOptionsExplicitBucketsFields(r, vExplicitBuckets); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vExplicitBuckets) { - o.ExplicitBuckets = vExplicitBuckets - } - return nil -} -func postReadExtractLogMetricBucketOptionsLinearBucketsFields(r *LogMetric, o *LogMetricBucketOptionsLinearBuckets) error { - return nil -} -func postReadExtractLogMetricBucketOptionsExponentialBucketsFields(r *LogMetric, o *LogMetricBucketOptionsExponentialBuckets) error { - return nil -} -func postReadExtractLogMetricBucketOptionsExplicitBucketsFields(r *LogMetric, o *LogMetricBucketOptionsExplicitBuckets) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_schema.go deleted file mode 100644 index 054b040b7c..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_schema.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLLogMetricSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Logging/LogMetric", - Description: "The Logging LogMetric resource", - StructName: "LogMetric", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a LogMetric", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logMetric", - Required: true, - Description: "A full instance of a LogMetric", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a LogMetric", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logMetric", - Required: true, - Description: "A full instance of a LogMetric", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a LogMetric", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logMetric", - Required: true, - Description: "A full instance of a LogMetric", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all LogMetric", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many LogMetric", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "LogMetric": &dcl.Component{ - Title: "LogMetric", - ID: "projects/{{project}}/metrics/{{name}}", - UsesStateHint: true, - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "filter", - "project", - }, - Properties: map[string]*dcl.Property{ - "bucketOptions": &dcl.Property{ - Type: "object", - GoName: "BucketOptions", - GoType: "LogMetricBucketOptions", - Description: "Optional. The `bucket_options` are required when the logs-based metric is using a DISTRIBUTION value type and it describes the bucket boundaries used to create a histogram of the extracted values.", - Properties: map[string]*dcl.Property{ - "explicitBuckets": &dcl.Property{ - Type: "object", - GoName: "ExplicitBuckets", - GoType: "LogMetricBucketOptionsExplicitBuckets", - Description: "The explicit buckets.", - Conflicts: []string{ - "linearBuckets", - "exponentialBuckets", - }, - Properties: map[string]*dcl.Property{ - "bounds": &dcl.Property{ - Type: "array", - GoName: "Bounds", - Description: "The values must be monotonically increasing.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "number", - Format: "double", - GoType: "float64", - }, - }, - }, - }, - "exponentialBuckets": &dcl.Property{ - Type: "object", - GoName: "ExponentialBuckets", - GoType: "LogMetricBucketOptionsExponentialBuckets", - Description: "The exponential buckets.", - Conflicts: []string{ - "linearBuckets", - "explicitBuckets", - }, - Properties: map[string]*dcl.Property{ - "growthFactor": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "GrowthFactor", - Description: "Must be greater than 1.", - }, - "numFiniteBuckets": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumFiniteBuckets", - Description: "Must be greater than 0.", - }, - "scale": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Scale", - Description: "Must be greater than 0.", - }, - }, - }, - "linearBuckets": &dcl.Property{ - Type: "object", - GoName: "LinearBuckets", - GoType: "LogMetricBucketOptionsLinearBuckets", - Description: "The linear bucket.", - Conflicts: []string{ - "exponentialBuckets", - "explicitBuckets", - }, - Properties: map[string]*dcl.Property{ - "numFiniteBuckets": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumFiniteBuckets", - Description: "Must be greater than 0.", - }, - "offset": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Offset", - Description: "Lower bound of the first bucket.", - }, - "width": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Width", - Description: "Must be greater than 0.", - }, - }, - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The creation timestamp of the metric. This field may not be present for older metrics.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Optional. A description of this metric, which is used in documentation. The maximum length of the description is 8000 characters.", - }, - "disabled": &dcl.Property{ - Type: "boolean", - GoName: "Disabled", - Description: "Optional. If set to True, then this metric is disabled and it does not generate any points.", - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. An [advanced logs filter](https://cloud.google.com/logging/docs/view/advanced_filters) which is used to match log entries. Example: \"resource.type=gae_app AND severity>=ERROR\" The maximum length of the filter is 20000 characters.", - }, - "labelExtractors": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "LabelExtractors", - Description: "Optional. A map from a label key string to an extractor expression which is used to extract data from a log entry field and assign as the label value. Each label key specified in the LabelDescriptor must have an associated extractor expression in this map. The syntax of the extractor expression is the same as for the `value_extractor` field. The extracted value is converted to the type defined in the label descriptor. If the either the extraction or the type conversion fails, the label will have a default value. The default value for a string label is an empty string, for an integer label its 0, and for a boolean label its `false`. Note that there are upper bounds on the maximum number of labels and the number of active time series that are allowed in a project.", - }, - "metricDescriptor": &dcl.Property{ - Type: "object", - GoName: "MetricDescriptor", - GoType: "LogMetricMetricDescriptor", - Description: "Optional. The metric descriptor associated with the logs-based metric. If unspecified, it uses a default metric descriptor with a DELTA metric kind, INT64 value type, with no labels and a unit of \"1\". Such a metric counts the number of log entries matching the `filter` expression. The `name`, `type`, and `description` fields in the `metric_descriptor` are output only, and is constructed using the `name` and `description` field in the LogMetric. To create a logs-based metric that records a distribution of log values, a DELTA metric kind with a DISTRIBUTION value type must be used along with a `value_extractor` expression in the LogMetric. Each label in the metric descriptor must have a matching label name as the key and an extractor expression as the value in the `label_extractors` map. The `metric_kind` and `value_type` fields in the `metric_descriptor` cannot be updated once initially configured. New labels can be added in the `metric_descriptor`, but existing labels cannot be modified except for their description.", - Properties: map[string]*dcl.Property{ - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - ReadOnly: true, - Description: "A detailed description of the metric, which can be used in documentation.", - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.", - }, - "labels": &dcl.Property{ - Type: "array", - GoName: "Labels", - Description: "The set of labels that can be used to describe a specific instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies` metric type has a label for the HTTP response code, `response_code`, so you can look at latencies for successful responses or just for responses that failed.", - SendEmpty: true, - ListType: "set", - Items: &dcl.Property{ - Type: "object", - GoType: "LogMetricMetricDescriptorLabels", - Properties: map[string]*dcl.Property{ - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "A human-readable description for the label.", - Immutable: true, - }, - "key": &dcl.Property{ - Type: "string", - GoName: "Key", - Description: "The label key.", - Immutable: true, - }, - "valueType": &dcl.Property{ - Type: "string", - GoName: "ValueType", - GoType: "LogMetricMetricDescriptorLabelsValueTypeEnum", - Description: "The type of data that can be assigned to the label. Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION, MONEY", - Immutable: true, - Enum: []string{ - "STRING", - "BOOL", - "INT64", - "DOUBLE", - "DISTRIBUTION", - "MONEY", - }, - }, - }, - }, - }, - "launchStage": &dcl.Property{ - Type: "string", - GoName: "LaunchStage", - GoType: "LogMetricMetricDescriptorLaunchStageEnum", - Description: "Optional. The launch stage of the metric definition. Possible values: UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED", - Enum: []string{ - "UNIMPLEMENTED", - "PRELAUNCH", - "EARLY_ACCESS", - "ALPHA", - "BETA", - "GA", - "DEPRECATED", - }, - Unreadable: true, - }, - "metadata": &dcl.Property{ - Type: "object", - GoName: "Metadata", - GoType: "LogMetricMetricDescriptorMetadata", - Description: "Optional. Metadata which can be used to guide usage of the metric.", - Unreadable: true, - Properties: map[string]*dcl.Property{ - "ingestDelay": &dcl.Property{ - Type: "string", - GoName: "IngestDelay", - Description: "The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors.", - }, - "samplePeriod": &dcl.Property{ - Type: "string", - GoName: "SamplePeriod", - Description: "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", - }, - }, - }, - "metricKind": &dcl.Property{ - Type: "string", - GoName: "MetricKind", - GoType: "LogMetricMetricDescriptorMetricKindEnum", - Description: "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of `metric_kind` and `value_type` might not be supported. Possible values: GAUGE, DELTA, CUMULATIVE", - Immutable: true, - Enum: []string{ - "GAUGE", - "DELTA", - "CUMULATIVE", - }, - }, - "monitoredResourceTypes": &dcl.Property{ - Type: "array", - GoName: "MonitoredResourceTypes", - ReadOnly: true, - Description: "Read-only. If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - ReadOnly: true, - Description: "The resource name of the metric descriptor.", - Immutable: true, - }, - "type": &dcl.Property{ - Type: "string", - GoName: "Type", - ReadOnly: true, - Description: "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com` or `external.googleapis.com`. Metric types should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"", - Immutable: true, - }, - "unit": &dcl.Property{ - Type: "string", - GoName: "Unit", - Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", - ServerDefault: true, - }, - "valueType": &dcl.Property{ - Type: "string", - GoName: "ValueType", - GoType: "LogMetricMetricDescriptorValueTypeEnum", - Description: "Whether the measurement is an integer, a floating-point number, etc. Some combinations of `metric_kind` and `value_type` might not be supported. Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION, MONEY", - Immutable: true, - Enum: []string{ - "STRING", - "BOOL", - "INT64", - "DOUBLE", - "DISTRIBUTION", - "MONEY", - }, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. The client-assigned metric identifier. Examples: `\"error_count\"`, `\"nginx/requests\"`. Metric identifiers are limited to 100 characters and can include only the following characters: `A-Z`, `a-z`, `0-9`, and the special characters `_-.,+!*',()%/`. The forward-slash character (`/`) denotes a hierarchy of name pieces, and it cannot be the first character of the name. The metric identifier in this field must not be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding). However, when the metric identifier appears as the `[METRIC_ID]` part of a `metric_name` API parameter, then the metric identifier must be URL-encoded. Example: `\"projects/my-project/metrics/nginx%2Frequests\"`.", - Immutable: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The resource name of the project in which to create the metric.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The last update timestamp of the metric. This field may not be present for older metrics.", - Immutable: true, - }, - "valueExtractor": &dcl.Property{ - Type: "string", - GoName: "ValueExtractor", - Description: "Optional. A `value_extractor` is required when using a distribution logs-based metric to extract the values to record from a log entry. Two functions are supported for value extraction: `EXTRACT(field)` or `REGEXP_EXTRACT(field, regex)`. The argument are: 1. field: The name of the log entry field from which the value is to be extracted. 2. regex: A regular expression using the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified log entry field. The value of the field is converted to a string before applying the regex. It is an error to specify a regex that does not include exactly one capture group. The result of the extraction must be convertible to a double type, as the distribution always records double values. If either the extraction or the conversion to double fails, then those values are not recorded in the distribution. Example: `REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(d+).*\")`", - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_yaml_embed.go deleted file mode 100644 index 669a0e7588..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_metric_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package logging -var YAML_log_metric blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_metric.yaml - -package logging - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_metric.yaml -var YAML_log_metric = []byte("info:\n title: Logging/LogMetric\n description: The Logging LogMetric resource\n x-dcl-struct-name: LogMetric\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a LogMetric\n parameters:\n - name: logMetric\n required: true\n description: A full instance of a LogMetric\n apply:\n description: The function used to apply information about a LogMetric\n parameters:\n - name: logMetric\n required: true\n description: A full instance of a LogMetric\n delete:\n description: The function used to delete a LogMetric\n parameters:\n - name: logMetric\n required: true\n description: A full instance of a LogMetric\n deleteAll:\n description: The function used to delete all LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many LogMetric\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n LogMetric:\n title: LogMetric\n x-dcl-id: projects/{{project}}/metrics/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - filter\n - project\n properties:\n bucketOptions:\n type: object\n x-dcl-go-name: BucketOptions\n x-dcl-go-type: LogMetricBucketOptions\n description: Optional. The `bucket_options` are required when the logs-based\n metric is using a DISTRIBUTION value type and it describes the bucket\n boundaries used to create a histogram of the extracted values.\n properties:\n explicitBuckets:\n type: object\n x-dcl-go-name: ExplicitBuckets\n x-dcl-go-type: LogMetricBucketOptionsExplicitBuckets\n description: The explicit buckets.\n x-dcl-conflicts:\n - linearBuckets\n - exponentialBuckets\n properties:\n bounds:\n type: array\n x-dcl-go-name: Bounds\n description: The values must be monotonically increasing.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: number\n format: double\n x-dcl-go-type: float64\n exponentialBuckets:\n type: object\n x-dcl-go-name: ExponentialBuckets\n x-dcl-go-type: LogMetricBucketOptionsExponentialBuckets\n description: The exponential buckets.\n x-dcl-conflicts:\n - linearBuckets\n - explicitBuckets\n properties:\n growthFactor:\n type: number\n format: double\n x-dcl-go-name: GrowthFactor\n description: Must be greater than 1.\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n scale:\n type: number\n format: double\n x-dcl-go-name: Scale\n description: Must be greater than 0.\n linearBuckets:\n type: object\n x-dcl-go-name: LinearBuckets\n x-dcl-go-type: LogMetricBucketOptionsLinearBuckets\n description: The linear bucket.\n x-dcl-conflicts:\n - exponentialBuckets\n - explicitBuckets\n properties:\n numFiniteBuckets:\n type: integer\n format: int64\n x-dcl-go-name: NumFiniteBuckets\n description: Must be greater than 0.\n offset:\n type: number\n format: double\n x-dcl-go-name: Offset\n description: Lower bound of the first bucket.\n width:\n type: number\n format: double\n x-dcl-go-name: Width\n description: Must be greater than 0.\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of the metric. This field\n may not be present for older metrics.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. A description of this metric, which is used in documentation.\n The maximum length of the description is 8000 characters.\n disabled:\n type: boolean\n x-dcl-go-name: Disabled\n description: Optional. If set to True, then this metric is disabled and\n it does not generate any points.\n filter:\n type: string\n x-dcl-go-name: Filter\n description: 'Required. An [advanced logs filter](https://cloud.google.com/logging/docs/view/advanced_filters)\n which is used to match log entries. Example: \"resource.type=gae_app AND\n severity>=ERROR\" The maximum length of the filter is 20000 characters.'\n labelExtractors:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: LabelExtractors\n description: Optional. A map from a label key string to an extractor expression\n which is used to extract data from a log entry field and assign as the\n label value. Each label key specified in the LabelDescriptor must have\n an associated extractor expression in this map. The syntax of the extractor\n expression is the same as for the `value_extractor` field. The extracted\n value is converted to the type defined in the label descriptor. If the\n either the extraction or the type conversion fails, the label will have\n a default value. The default value for a string label is an empty string,\n for an integer label its 0, and for a boolean label its `false`. Note\n that there are upper bounds on the maximum number of labels and the number\n of active time series that are allowed in a project.\n metricDescriptor:\n type: object\n x-dcl-go-name: MetricDescriptor\n x-dcl-go-type: LogMetricMetricDescriptor\n description: Optional. The metric descriptor associated with the logs-based\n metric. If unspecified, it uses a default metric descriptor with a DELTA\n metric kind, INT64 value type, with no labels and a unit of \"1\". Such\n a metric counts the number of log entries matching the `filter` expression.\n The `name`, `type`, and `description` fields in the `metric_descriptor`\n are output only, and is constructed using the `name` and `description`\n field in the LogMetric. To create a logs-based metric that records a distribution\n of log values, a DELTA metric kind with a DISTRIBUTION value type must\n be used along with a `value_extractor` expression in the LogMetric. Each\n label in the metric descriptor must have a matching label name as the\n key and an extractor expression as the value in the `label_extractors`\n map. The `metric_kind` and `value_type` fields in the `metric_descriptor`\n cannot be updated once initially configured. New labels can be added in\n the `metric_descriptor`, but existing labels cannot be modified except\n for their description.\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n readOnly: true\n description: A detailed description of the metric, which can be used\n in documentation.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in\n user interfaces. Use sentence case without an ending period, for example\n \"Request count\". This field is optional but it is recommended to be\n set for any metrics associated with user-visible concepts, such as\n Quota.\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific\n instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`,\n so you can look at latencies for successful responses or just for\n responses that failed.\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: LogMetricMetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: The label key.\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: LogMetricMetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n enum:\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: LogMetricMetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of\n the metric.\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data\n points older than this age are guaranteed to be ingested and available\n to be read, excluding data loss due to errors.\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: LogMetricMetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes\n to a value, etc. Some combinations of `metric_kind` and `value_type`\n might not be supported. Possible values: GAUGE, DELTA, CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that\n is associated with this metric type can only be associated with one\n of the monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n name:\n type: string\n x-dcl-go-name: Name\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n readOnly: true\n description: 'The metric type, including its DNS name prefix. The type\n is not URL-encoded. All user-defined metric types have the DNS name\n `custom.googleapis.com` or `external.googleapis.com`. Metric types\n should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\"\n \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is\n only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values.\n Different systems might scale the values to be more easily displayed\n (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value\n of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit`\n is `kBy`, then the value of the metric is always in thousands of bytes,\n no matter how it might be displayed. If you want a custom metric to\n record the exact number of CPU-seconds used by a job, you can create\n an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently\n `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the\n value is written as `12005`. Alternatively, if you want a custom metric\n to record data in a more granular way, you can create a `DOUBLE CUMULATIVE`\n metric whose `unit` is `ks{CPU}`, and then write the value `12.005`\n (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which\n is `12005/1024`). The supported units are a subset of [The Unified\n Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard:\n **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min`\n minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta\n (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9)\n * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z`\n zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi\n (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar**\n The grammar also includes these connectors: * `/` division or ratio\n (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms`\n (although you should almost never have `/s` in a metric `unit`; rates\n should always be computed at query time from the underlying cumulative\n or delta value). * `.` multiplication or composition (as an infix\n operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a\n unit is as follows: Expression = Component: { \".\" Component } { \"/\"\n Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used\n alone, then the unit is equivalent to `1`. For examples, `{request}/s\n == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank\n printable ASCII characters not containing `{` or `}`. * `1` represents\n a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic\n units are appropriate. For example, \"new users per day\" can be represented\n as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5\n new users). Alternatively, \"thousands of page views per day\" would\n be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric\n value of `5.3` would mean \"5300 page views per day\"). * `%` represents\n dimensionless value of 1/100, and annotates values giving a percentage\n (so the metric values are typically in the range of 0..100, and a\n metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric\n contains a ratio, typically in the range 0..1, that will be multiplied\n by 100 and displayed as a percentage (so a metric value `0.03` means\n \"3 percent\").'\n x-dcl-server-default: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: LogMetricMetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point\n number, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: STRING, BOOL, INT64, DOUBLE, DISTRIBUTION,\n MONEY'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n - DOUBLE\n - DISTRIBUTION\n - MONEY\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Required. The client-assigned metric identifier. Examples:\n `\"error_count\"`, `\"nginx/requests\"`. Metric identifiers are limited to\n 100 characters and can include only the following characters: `A-Z`, `a-z`,\n `0-9`, and the special characters `_-.,+!*'',()%/`. The forward-slash\n character (`/`) denotes a hierarchy of name pieces, and it cannot be the\n first character of the name. The metric identifier in this field must\n not be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding).\n However, when the metric identifier appears as the `[METRIC_ID]` part\n of a `metric_name` API parameter, then the metric identifier must be URL-encoded.\n Example: `\"projects/my-project/metrics/nginx%2Frequests\"`.'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The resource name of the project in which to create the metric.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of the metric. This\n field may not be present for older metrics.\n x-kubernetes-immutable: true\n valueExtractor:\n type: string\n x-dcl-go-name: ValueExtractor\n description: 'Optional. A `value_extractor` is required when using a distribution\n logs-based metric to extract the values to record from a log entry. Two\n functions are supported for value extraction: `EXTRACT(field)` or `REGEXP_EXTRACT(field,\n regex)`. The argument are: 1. field: The name of the log entry field from\n which the value is to be extracted. 2. regex: A regular expression using\n the Google RE2 syntax (https://github.com/google/re2/wiki/Syntax) with\n a single capture group to extract data from the specified log entry field.\n The value of the field is converted to a string before applying the regex.\n It is an error to specify a regex that does not include exactly one capture\n group. The result of the extraction must be convertible to a double type,\n as the distribution always records double values. If either the extraction\n or the conversion to double fails, then those values are not recorded\n in the distribution. Example: `REGEXP_EXTRACT(jsonPayload.request, \".*quantity=(d+).*\")`'\n") - -// 22100 bytes -// MD5: b55014c3141d88528a1a93f25cd97b16 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.go deleted file mode 100644 index 36594a17cd..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "context" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type LogView struct { - Name *string `json:"name"` - Description *string `json:"description"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - Filter *string `json:"filter"` - Parent *string `json:"parent"` - Location *string `json:"location"` - Bucket *string `json:"bucket"` -} - -func (r *LogView) String() string { - return dcl.SprintResource(r) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *LogView) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "logging", - Type: "LogView", - Version: "logging", - } -} - -func (r *LogView) ID() (string, error) { - if err := extractLogViewFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "description": dcl.ValueOrEmptyString(nr.Description), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "filter": dcl.ValueOrEmptyString(nr.Filter), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "location": dcl.ValueOrEmptyString(nr.Location), - "bucket": dcl.ValueOrEmptyString(nr.Bucket), - } - return dcl.Nprintf("{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}", params), nil -} - -const LogViewMaxPage = -1 - -type LogViewList struct { - Items []*LogView - - nextToken string - - pageSize int32 - - resource *LogView -} - -func (l *LogViewList) HasNext() bool { - return l.nextToken != "" -} - -func (l *LogViewList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listLogView(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListLogView(ctx context.Context, location, bucket, parent string) (*LogViewList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListLogViewWithMaxResults(ctx, location, bucket, parent, LogViewMaxPage) - -} - -func (c *Client) ListLogViewWithMaxResults(ctx context.Context, location, bucket, parent string, pageSize int32) (*LogViewList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &LogView{ - Location: &location, - Bucket: &bucket, - Parent: &parent, - } - items, token, err := c.listLogView(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &LogViewList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetLogView(ctx context.Context, r *LogView) (*LogView, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractLogViewFields(r) - - b, err := c.getLogViewRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalLogView(b, c, r) - if err != nil { - return nil, err - } - result.Location = r.Location - result.Bucket = r.Bucket - result.Parent = r.Parent - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeLogViewNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractLogViewFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteLogView(ctx context.Context, r *LogView) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("LogView resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting LogView...") - deleteOp := deleteLogViewOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllLogView deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllLogView(ctx context.Context, location, bucket, parent string, filter func(*LogView) bool) error { - listObj, err := c.ListLogView(ctx, location, bucket, parent) - if err != nil { - return err - } - - err = c.deleteAllLogView(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllLogView(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyLogView(ctx context.Context, rawDesired *LogView, opts ...dcl.ApplyOption) (*LogView, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *LogView - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyLogViewHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyLogViewHelper(c *Client, ctx context.Context, rawDesired *LogView, opts ...dcl.ApplyOption) (*LogView, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyLogView...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractLogViewFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.logViewDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToLogViewDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []logViewApiOperation - if create { - ops = append(ops, &createLogViewOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyLogViewDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyLogViewDiff(c *Client, ctx context.Context, desired *LogView, rawDesired *LogView, ops []logViewApiOperation, opts ...dcl.ApplyOption) (*LogView, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetLogView(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createLogViewOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapLogView(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeLogViewNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeLogViewNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeLogViewDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractLogViewFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractLogViewFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffLogView(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.yaml deleted file mode 100644 index 036dc6796d..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view.yaml +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Logging/LogView - description: The Logging LogView resource - x-dcl-struct-name: LogView - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a LogView - parameters: - - name: logView - required: true - description: A full instance of a LogView - apply: - description: The function used to apply information about a LogView - parameters: - - name: logView - required: true - description: A full instance of a LogView - delete: - description: The function used to delete a LogView - parameters: - - name: logView - required: true - description: A full instance of a LogView - deleteAll: - description: The function used to delete all LogView - parameters: - - name: location - required: true - schema: - type: string - - name: bucket - required: true - schema: - type: string - - name: parent - required: true - schema: - type: string - list: - description: The function used to list information about many LogView - parameters: - - name: location - required: true - schema: - type: string - - name: bucket - required: true - schema: - type: string - - name: parent - required: true - schema: - type: string -components: - schemas: - LogView: - title: LogView - x-dcl-id: '{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}' - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - bucket - properties: - bucket: - type: string - x-dcl-go-name: Bucket - description: The bucket of the resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Logging/LogBucket - field: name - parent: true - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The creation timestamp of the view. - x-kubernetes-immutable: true - description: - type: string - x-dcl-go-name: Description - description: Describes this view. - filter: - type: string - x-dcl-go-name: Filter - description: 'Filter that restricts which log entries in a bucket are visible - in this view. Filters are restricted to be a logical AND of ==/!= of any - of the following: - originating project/folder/organization/billing account. - - resource type - log id For example: SOURCE("projects/myproject") AND - resource.type = "gce_instance" AND LOG_ID("stdout")' - location: - type: string - x-dcl-go-name: Location - description: 'The location of the resource. The supported locations are: - global, us-central1, us-east1, us-west1, asia-east1, europe-west1.' - x-kubernetes-immutable: true - x-dcl-extract-if-empty: true - name: - type: string - x-dcl-go-name: Name - description: 'The resource name of the view. For example: `projects/my-project/locations/global/buckets/my-bucket/views/my-view`' - x-kubernetes-immutable: true - parent: - type: string - x-dcl-go-name: Parent - description: The parent of the resource. - x-kubernetes-immutable: true - x-dcl-forward-slash-allowed: true - x-dcl-references: - - resource: Cloudresourcemanager/BillingAccount - field: name - parent: true - - resource: Cloudresourcemanager/Folder - field: name - parent: true - - resource: Cloudresourcemanager/Organization - field: name - parent: true - - resource: Cloudresourcemanager/Project - field: name - parent: true - x-dcl-extract-if-empty: true - updateTime: - type: string - format: date-time - x-dcl-go-name: UpdateTime - readOnly: true - description: Output only. The last update timestamp of the view. - x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_internal.go deleted file mode 100644 index 942e74c295..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_internal.go +++ /dev/null @@ -1,810 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *LogView) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Bucket, "Bucket"); err != nil { - return err - } - return nil -} -func (r *LogView) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://logging.googleapis.com/v2/", params) -} - -func (r *LogView) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "bucket": dcl.ValueOrEmptyString(nr.Bucket), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *LogView) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "bucket": dcl.ValueOrEmptyString(nr.Bucket), - "parent": dcl.ValueOrEmptyString(nr.Parent), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets/{{bucket}}/views", nr.basePath(), userBasePath, params), nil - -} - -func (r *LogView) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "bucket": dcl.ValueOrEmptyString(nr.Bucket), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets/{{bucket}}/views?viewId={{name}}", nr.basePath(), userBasePath, params), nil - -} - -func (r *LogView) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "bucket": dcl.ValueOrEmptyString(nr.Bucket), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// logViewApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type logViewApiOperation interface { - do(context.Context, *LogView, *Client) error -} - -// newUpdateLogViewUpdateLogViewRequest creates a request for an -// LogView resource's UpdateLogView update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateLogViewUpdateLogViewRequest(ctx context.Context, f *LogView, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - req["filter"] = v - } - return req, nil -} - -// marshalUpdateLogViewUpdateLogViewRequest converts the update into -// the final JSON request body. -func marshalUpdateLogViewUpdateLogViewRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateLogViewUpdateLogViewOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateLogViewUpdateLogViewOperation) do(ctx context.Context, r *LogView, c *Client) error { - _, err := c.GetLogView(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateLogView") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateLogViewUpdateLogViewRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateLogViewUpdateLogViewRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listLogViewRaw(ctx context.Context, r *LogView, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != LogViewMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listLogViewOperation struct { - Views []map[string]interface{} `json:"views"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listLogView(ctx context.Context, r *LogView, pageToken string, pageSize int32) ([]*LogView, string, error) { - b, err := c.listLogViewRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listLogViewOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*LogView - for _, v := range m.Views { - res, err := unmarshalMapLogView(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Location = r.Location - res.Bucket = r.Bucket - res.Parent = r.Parent - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllLogView(ctx context.Context, f func(*LogView) bool, resources []*LogView) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteLogView(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteLogViewOperation struct{} - -func (op *deleteLogViewOperation) do(ctx context.Context, r *LogView, c *Client) error { - r, err := c.GetLogView(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "LogView not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetLogView checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete LogView: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetLogView(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createLogViewOperation struct { - response map[string]interface{} -} - -func (op *createLogViewOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createLogViewOperation) do(ctx context.Context, r *LogView, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - if _, err := c.GetLogView(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getLogViewRaw(ctx context.Context, r *LogView) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) logViewDiffsForRawDesired(ctx context.Context, rawDesired *LogView, opts ...dcl.ApplyOption) (initial, desired *LogView, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *LogView - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*LogView); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected LogView, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetLogView(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a LogView resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve LogView resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that LogView resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeLogViewDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for LogView: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for LogView: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractLogViewFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeLogViewInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for LogView: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeLogViewDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for LogView: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffLogView(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeLogViewInitialState(rawInitial, rawDesired *LogView) (*LogView, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeLogViewDesiredState(rawDesired, rawInitial *LogView, opts ...dcl.ApplyOption) (*LogView, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - - return rawDesired, nil - } - canonicalDesired := &LogView{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.StringCanonicalize(rawDesired.Filter, rawInitial.Filter) { - canonicalDesired.Filter = rawInitial.Filter - } else { - canonicalDesired.Filter = rawDesired.Filter - } - if dcl.NameToSelfLink(rawDesired.Parent, rawInitial.Parent) { - canonicalDesired.Parent = rawInitial.Parent - } else { - canonicalDesired.Parent = rawDesired.Parent - } - if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { - canonicalDesired.Location = rawInitial.Location - } else { - canonicalDesired.Location = rawDesired.Location - } - if dcl.NameToSelfLink(rawDesired.Bucket, rawInitial.Bucket) { - canonicalDesired.Bucket = rawInitial.Bucket - } else { - canonicalDesired.Bucket = rawDesired.Bucket - } - return canonicalDesired, nil -} - -func canonicalizeLogViewNewState(c *Client, rawNew, rawDesired *LogView) (*LogView, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Filter) && dcl.IsEmptyValueIndirect(rawDesired.Filter) { - rawNew.Filter = rawDesired.Filter - } else { - if dcl.StringCanonicalize(rawDesired.Filter, rawNew.Filter) { - rawNew.Filter = rawDesired.Filter - } - } - - rawNew.Parent = rawDesired.Parent - - rawNew.Location = rawDesired.Location - - rawNew.Bucket = rawDesired.Bucket - - return rawNew, nil -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffLogView(c *Client, desired, actual *LogView, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogViewUpdateLogViewOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateLogViewUpdateLogViewOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Parent, actual.Parent, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Parent")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Bucket, actual.Bucket, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Bucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *LogView) urlNormalized() *LogView { - normalized := dcl.Copy(*r).(LogView) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.Filter = dcl.SelfLinkToName(r.Filter) - normalized.Parent = r.Parent - normalized.Location = dcl.SelfLinkToName(r.Location) - normalized.Bucket = dcl.SelfLinkToName(r.Bucket) - return &normalized -} - -func (r *LogView) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateLogView" { - fields := map[string]interface{}{ - "location": dcl.ValueOrEmptyString(nr.Location), - "bucket": dcl.ValueOrEmptyString(nr.Bucket), - "parent": dcl.ValueOrEmptyString(nr.Parent), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the LogView resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *LogView) marshal(c *Client) ([]byte, error) { - m, err := expandLogView(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling LogView: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalLogView decodes JSON responses into the LogView resource schema. -func unmarshalLogView(b []byte, c *Client, res *LogView) (*LogView, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapLogView(m, c, res) -} - -func unmarshalMapLogView(m map[string]interface{}, c *Client, res *LogView) (*LogView, error) { - - flattened := flattenLogView(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandLogView expands LogView into a JSON request object. -func expandLogView(c *Client, f *LogView) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("%s/locations/%s/buckets/%s/views/%s", f.Name, f.Parent, dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Bucket), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.Filter; dcl.ValueShouldBeSent(v) { - m["filter"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Parent into parent: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["parent"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Bucket into bucket: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["bucket"] = v - } - - return m, nil -} - -// flattenLogView flattens LogView from a JSON request object into the -// LogView type. -func flattenLogView(c *Client, i interface{}, res *LogView) *LogView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &LogView{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.Filter = dcl.FlattenString(m["filter"]) - resultRes.Parent = dcl.FlattenString(m["parent"]) - resultRes.Location = dcl.FlattenString(m["location"]) - resultRes.Bucket = dcl.FlattenString(m["bucket"]) - - return resultRes -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *LogView) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalLogView(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Location == nil && ncr.Location == nil { - c.Config.Logger.Info("Both Location fields null - considering equal.") - } else if nr.Location == nil || ncr.Location == nil { - c.Config.Logger.Info("Only one Location field is null - considering unequal.") - return false - } else if *nr.Location != *ncr.Location { - return false - } - if nr.Bucket == nil && ncr.Bucket == nil { - c.Config.Logger.Info("Both Bucket fields null - considering equal.") - } else if nr.Bucket == nil || ncr.Bucket == nil { - c.Config.Logger.Info("Only one Bucket field is null - considering unequal.") - return false - } else if *nr.Bucket != *ncr.Bucket { - return false - } - if nr.Parent == nil && ncr.Parent == nil { - c.Config.Logger.Info("Both Parent fields null - considering equal.") - } else if nr.Parent == nil || ncr.Parent == nil { - c.Config.Logger.Info("Only one Parent field is null - considering unequal.") - return false - } else if *nr.Parent != *ncr.Parent { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type logViewDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp logViewApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToLogViewDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]logViewDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []logViewDiff - // For each operation name, create a logViewDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := logViewDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToLogViewApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToLogViewApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (logViewApiOperation, error) { - switch opName { - - case "updateLogViewUpdateLogViewOperation": - return &updateLogViewUpdateLogViewOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractLogViewFields(r *LogView) error { - vParent, err := dcl.ValueFromRegexOnField("Parent", r.Parent, r.Bucket, "((projects|folders|organizations|billingAccounts)/[a-z0-9A-Z-]*)/locations/.*") - if err != nil { - return err - } - r.Parent = vParent - vLocation, err := dcl.ValueFromRegexOnField("Location", r.Location, r.Bucket, "[a-zA-Z]*/[a-z0-9A-Z-]*/locations/([a-z0-9-]*)/buckets/.*") - if err != nil { - return err - } - r.Location = vLocation - return nil -} - -func postReadExtractLogViewFields(r *LogView) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_schema.go deleted file mode 100644 index 214d5127e5..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_schema.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package logging - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLLogViewSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Logging/LogView", - Description: "The Logging LogView resource", - StructName: "LogView", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a LogView", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logView", - Required: true, - Description: "A full instance of a LogView", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a LogView", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logView", - Required: true, - Description: "A full instance of a LogView", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a LogView", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "logView", - Required: true, - Description: "A full instance of a LogView", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all LogView", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "bucket", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "parent", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many LogView", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "bucket", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "parent", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "LogView": &dcl.Component{ - Title: "LogView", - ID: "{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "bucket", - }, - Properties: map[string]*dcl.Property{ - "bucket": &dcl.Property{ - Type: "string", - GoName: "Bucket", - Description: "The bucket of the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Logging/LogBucket", - Field: "name", - Parent: true, - }, - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The creation timestamp of the view.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Describes this view.", - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Filter that restricts which log entries in a bucket are visible in this view. Filters are restricted to be a logical AND of ==/!= of any of the following: - originating project/folder/organization/billing account. - resource type - log id For example: SOURCE(\"projects/myproject\") AND resource.type = \"gce_instance\" AND LOG_ID(\"stdout\")", - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location of the resource. The supported locations are: global, us-central1, us-east1, us-west1, asia-east1, europe-west1.", - Immutable: true, - ExtractIfEmpty: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The resource name of the view. For example: `projects/my-project/locations/global/buckets/my-bucket/views/my-view`", - Immutable: true, - }, - "parent": &dcl.Property{ - Type: "string", - GoName: "Parent", - Description: "The parent of the resource.", - Immutable: true, - ForwardSlashAllowed: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/BillingAccount", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Folder", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Organization", - Field: "name", - Parent: true, - }, - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - ExtractIfEmpty: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The last update timestamp of the view.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_yaml_embed.go deleted file mode 100644 index 2fd56ad912..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/log_view_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package logging -var YAML_log_view blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_view.yaml - -package logging - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/logging/log_view.yaml -var YAML_log_view = []byte("info:\n title: Logging/LogView\n description: The Logging LogView resource\n x-dcl-struct-name: LogView\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a LogView\n parameters:\n - name: logView\n required: true\n description: A full instance of a LogView\n apply:\n description: The function used to apply information about a LogView\n parameters:\n - name: logView\n required: true\n description: A full instance of a LogView\n delete:\n description: The function used to delete a LogView\n parameters:\n - name: logView\n required: true\n description: A full instance of a LogView\n deleteAll:\n description: The function used to delete all LogView\n parameters:\n - name: location\n required: true\n schema:\n type: string\n - name: bucket\n required: true\n schema:\n type: string\n - name: parent\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many LogView\n parameters:\n - name: location\n required: true\n schema:\n type: string\n - name: bucket\n required: true\n schema:\n type: string\n - name: parent\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n LogView:\n title: LogView\n x-dcl-id: '{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}'\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - bucket\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: The bucket of the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Logging/LogBucket\n field: name\n parent: true\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The creation timestamp of the view.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: Describes this view.\n filter:\n type: string\n x-dcl-go-name: Filter\n description: 'Filter that restricts which log entries in a bucket are visible\n in this view. Filters are restricted to be a logical AND of ==/!= of any\n of the following: - originating project/folder/organization/billing account.\n - resource type - log id For example: SOURCE(\"projects/myproject\") AND\n resource.type = \"gce_instance\" AND LOG_ID(\"stdout\")'\n location:\n type: string\n x-dcl-go-name: Location\n description: 'The location of the resource. The supported locations are:\n global, us-central1, us-east1, us-west1, asia-east1, europe-west1.'\n x-kubernetes-immutable: true\n x-dcl-extract-if-empty: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'The resource name of the view. For example: `projects/my-project/locations/global/buckets/my-bucket/views/my-view`'\n x-kubernetes-immutable: true\n parent:\n type: string\n x-dcl-go-name: Parent\n description: The parent of the resource.\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n x-dcl-references:\n - resource: Cloudresourcemanager/BillingAccount\n field: name\n parent: true\n - resource: Cloudresourcemanager/Folder\n field: name\n parent: true\n - resource: Cloudresourcemanager/Organization\n field: name\n parent: true\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n x-dcl-extract-if-empty: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The last update timestamp of the view.\n x-kubernetes-immutable: true\n") - -// 4294 bytes -// MD5: edba246c77cc6d3cc93aa607c27a6d7c diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/logging_utils.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/logging_utils.go deleted file mode 100644 index 4e8af2dbad..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging/logging_utils.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Package logging provides types and functiosn for handling logging GCP resources. -package logging - -import ( - "bytes" - "context" - "fmt" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -// do makes a request to delete a log bucket if the name of the bucket is not -// "_Default" or "_Required" -func (op *deleteLogBucketOperation) do(ctx context.Context, r *LogBucket, c *Client) error { - - _, err := c.GetLogBucket(ctx, r) - - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.Infof("LogBucket not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.Warningf("GetLogBucket checking for existence. error: %v", err) - return err - } - - if r.Name != nil && (*r.Name == "_Default" || *r.Name == "_Required") { - return nil - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete LogBucket: %w", err) - } - return nil -} - -func equalsLogMetricMetricDescriptorLabelsValueType(m, n *LogMetricMetricDescriptorLabelsValueTypeEnum) bool { - if m == nil && n == nil { - return true - } - v := *LogMetricMetricDescriptorLabelsValueTypeEnumRef("STRING") - w := *LogMetricMetricDescriptorLabelsValueTypeEnumRef("") - if m == nil || *m == w { - // m is nil or blank, should compare equal to blank or "STRING" - return n == nil || *n == v || *n == w - } - if n == nil || *n == w { - // n is nil or blank, should compare equal to blank or "STRING" - return *m == v || *m == w - } - return *m == *n -} - -func canonicalizeLogMetricMetricDescriptorLabelsValueType(m, n interface{}) bool { - if m == nil && n == nil { - return true - } - mVal, _ := m.(*LogMetricMetricDescriptorLabelsValueTypeEnum) - nVal, _ := n.(*LogMetricMetricDescriptorLabelsValueTypeEnum) - return equalsLogMetricMetricDescriptorLabelsValueType(mVal, nVal) -} - -func equalsLogMetricMetricDescriptorValueType(m, n *LogMetricMetricDescriptorValueTypeEnum) bool { - if m == nil && n == nil { - return true - } - v := *LogMetricMetricDescriptorValueTypeEnumRef("STRING") - w := *LogMetricMetricDescriptorValueTypeEnumRef("") - if m == nil || *m == w { - // m is nil or blank, should compare equal to blank or "STRING" - return n == nil || *n == v || *n == w - } - if n == nil || *n == w { - // n is nil or blank, should compare equal to blank or "STRING" - return *m == v || *m == w - } - return *m == *n -} - -func canonicalizeLogMetricMetricDescriptorValueType(m, n interface{}) bool { - if m == nil && n == nil { - return true - } - mVal, _ := m.(*LogMetricMetricDescriptorValueTypeEnum) - nVal, _ := n.(*LogMetricMetricDescriptorValueTypeEnum) - return equalsLogMetricMetricDescriptorValueType(mVal, nVal) -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/client.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/client.go deleted file mode 100644 index 8dd920f921..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/client.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Package monitoring defines operations in the declarative SDK. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -// The Client is the base struct of all operations. This will receive the -// Get, Delete, List, and Apply operations on all resources. -type Client struct { - Config *dcl.Config -} - -// NewClient creates a client that retries all operations a few times each. -func NewClient(c *dcl.Config) *Client { - return &Client{ - Config: c, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.go deleted file mode 100644 index 2a1b9710b9..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.go +++ /dev/null @@ -1,12861 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type Dashboard struct { - Name *string `json:"name"` - DisplayName *string `json:"displayName"` - GridLayout *DashboardGridLayout `json:"gridLayout"` - MosaicLayout *DashboardMosaicLayout `json:"mosaicLayout"` - RowLayout *DashboardRowLayout `json:"rowLayout"` - ColumnLayout *DashboardColumnLayout `json:"columnLayout"` - Project *string `json:"project"` - Etag *string `json:"etag"` -} - -func (r *Dashboard) String() string { - return dcl.SprintResource(r) -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum. -type DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum string - -// DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnumRef returns a *DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnumRef(s string) *DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum { - v := DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"PLOT_TYPE_UNSPECIFIED", "LINE", "STACKED_AREA", "STACKED_BAR", "HEATMAP"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartThresholdsColorEnum. -type DashboardGridLayoutWidgetsXyChartThresholdsColorEnum string - -// DashboardGridLayoutWidgetsXyChartThresholdsColorEnumRef returns a *DashboardGridLayoutWidgetsXyChartThresholdsColorEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartThresholdsColorEnumRef(s string) *DashboardGridLayoutWidgetsXyChartThresholdsColorEnum { - v := DashboardGridLayoutWidgetsXyChartThresholdsColorEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartThresholdsColorEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COLOR_UNSPECIFIED", "GREY", "BLUE", "GREEN", "YELLOW", "ORANGE", "RED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartThresholdsColorEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum. -type DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum string - -// DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnumRef returns a *DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnumRef(s string) *DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum { - v := DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "ABOVE", "BELOW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartXAxisScaleEnum. -type DashboardGridLayoutWidgetsXyChartXAxisScaleEnum string - -// DashboardGridLayoutWidgetsXyChartXAxisScaleEnumRef returns a *DashboardGridLayoutWidgetsXyChartXAxisScaleEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartXAxisScaleEnumRef(s string) *DashboardGridLayoutWidgetsXyChartXAxisScaleEnum { - v := DashboardGridLayoutWidgetsXyChartXAxisScaleEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartXAxisScaleEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SCALE_UNSPECIFIED", "LINEAR", "LOG10"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartXAxisScaleEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartYAxisScaleEnum. -type DashboardGridLayoutWidgetsXyChartYAxisScaleEnum string - -// DashboardGridLayoutWidgetsXyChartYAxisScaleEnumRef returns a *DashboardGridLayoutWidgetsXyChartYAxisScaleEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartYAxisScaleEnumRef(s string) *DashboardGridLayoutWidgetsXyChartYAxisScaleEnum { - v := DashboardGridLayoutWidgetsXyChartYAxisScaleEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartYAxisScaleEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SCALE_UNSPECIFIED", "LINEAR", "LOG10"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartYAxisScaleEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum. -type DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum string - -// DashboardGridLayoutWidgetsXyChartChartOptionsModeEnumRef returns a *DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsXyChartChartOptionsModeEnumRef(s string) *DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum { - v := DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"MODE_UNSPECIFIED", "COLOR", "X_RAY", "STATS"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum. -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum string - -// DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef returns a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - v := DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum. -type DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum string - -// DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnumRef returns a *DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnumRef(s string) *DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum { - v := DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SPARK_CHART_TYPE_UNSPECIFIED", "SPARK_LINE", "SPARK_BAR"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardThresholdsColorEnum. -type DashboardGridLayoutWidgetsScorecardThresholdsColorEnum string - -// DashboardGridLayoutWidgetsScorecardThresholdsColorEnumRef returns a *DashboardGridLayoutWidgetsScorecardThresholdsColorEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardThresholdsColorEnumRef(s string) *DashboardGridLayoutWidgetsScorecardThresholdsColorEnum { - v := DashboardGridLayoutWidgetsScorecardThresholdsColorEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardThresholdsColorEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COLOR_UNSPECIFIED", "GREY", "BLUE", "GREEN", "YELLOW", "ORANGE", "RED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardThresholdsColorEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum. -type DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum string - -// DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnumRef returns a *DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnumRef(s string) *DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum { - v := DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "ABOVE", "BELOW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardGridLayoutWidgetsTextFormatEnum. -type DashboardGridLayoutWidgetsTextFormatEnum string - -// DashboardGridLayoutWidgetsTextFormatEnumRef returns a *DashboardGridLayoutWidgetsTextFormatEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardGridLayoutWidgetsTextFormatEnumRef(s string) *DashboardGridLayoutWidgetsTextFormatEnum { - v := DashboardGridLayoutWidgetsTextFormatEnum(s) - return &v -} - -func (v DashboardGridLayoutWidgetsTextFormatEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"FORMAT_UNSPECIFIED", "MARKDOWN", "RAW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardGridLayoutWidgetsTextFormatEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum. -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"PLOT_TYPE_UNSPECIFIED", "LINE", "STACKED_AREA", "STACKED_BAR", "HEATMAP"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum. -type DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COLOR_UNSPECIFIED", "GREY", "BLUE", "GREEN", "YELLOW", "ORANGE", "RED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum. -type DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "ABOVE", "BELOW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum. -type DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SCALE_UNSPECIFIED", "LINEAR", "LOG10"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum. -type DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SCALE_UNSPECIFIED", "LINEAR", "LOG10"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum. -type DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum string - -// DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnumRef returns a *DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnumRef(s string) *DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum { - v := DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"MODE_UNSPECIFIED", "COLOR", "X_RAY", "STATS"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum. -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum. -type DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SPARK_CHART_TYPE_UNSPECIFIED", "SPARK_LINE", "SPARK_BAR"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum. -type DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COLOR_UNSPECIFIED", "GREY", "BLUE", "GREEN", "YELLOW", "ORANGE", "RED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum. -type DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum string - -// DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnumRef returns a *DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnumRef(s string) *DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum { - v := DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "ABOVE", "BELOW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardMosaicLayoutTilesWidgetTextFormatEnum. -type DashboardMosaicLayoutTilesWidgetTextFormatEnum string - -// DashboardMosaicLayoutTilesWidgetTextFormatEnumRef returns a *DashboardMosaicLayoutTilesWidgetTextFormatEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardMosaicLayoutTilesWidgetTextFormatEnumRef(s string) *DashboardMosaicLayoutTilesWidgetTextFormatEnum { - v := DashboardMosaicLayoutTilesWidgetTextFormatEnum(s) - return &v -} - -func (v DashboardMosaicLayoutTilesWidgetTextFormatEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"FORMAT_UNSPECIFIED", "MARKDOWN", "RAW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardMosaicLayoutTilesWidgetTextFormatEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum. -type DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum string - -// DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum { - v := DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"PLOT_TYPE_UNSPECIFIED", "LINE", "STACKED_AREA", "STACKED_BAR", "HEATMAP"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum. -type DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum string - -// DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum { - v := DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COLOR_UNSPECIFIED", "GREY", "BLUE", "GREEN", "YELLOW", "ORANGE", "RED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum. -type DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum string - -// DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum { - v := DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "ABOVE", "BELOW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum. -type DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum string - -// DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum { - v := DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SCALE_UNSPECIFIED", "LINEAR", "LOG10"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum. -type DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum string - -// DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum { - v := DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SCALE_UNSPECIFIED", "LINEAR", "LOG10"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum. -type DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum string - -// DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnumRef returns a *DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnumRef(s string) *DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum { - v := DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"MODE_UNSPECIFIED", "COLOR", "X_RAY", "STATS"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum. -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum string - -// DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - v := DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum. -type DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum string - -// DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum { - v := DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SPARK_CHART_TYPE_UNSPECIFIED", "SPARK_LINE", "SPARK_BAR"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum. -type DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum string - -// DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum { - v := DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COLOR_UNSPECIFIED", "GREY", "BLUE", "GREEN", "YELLOW", "ORANGE", "RED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum. -type DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum string - -// DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnumRef returns a *DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnumRef(s string) *DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum { - v := DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "ABOVE", "BELOW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardRowLayoutRowsWidgetsTextFormatEnum. -type DashboardRowLayoutRowsWidgetsTextFormatEnum string - -// DashboardRowLayoutRowsWidgetsTextFormatEnumRef returns a *DashboardRowLayoutRowsWidgetsTextFormatEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardRowLayoutRowsWidgetsTextFormatEnumRef(s string) *DashboardRowLayoutRowsWidgetsTextFormatEnum { - v := DashboardRowLayoutRowsWidgetsTextFormatEnum(s) - return &v -} - -func (v DashboardRowLayoutRowsWidgetsTextFormatEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"FORMAT_UNSPECIFIED", "MARKDOWN", "RAW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardRowLayoutRowsWidgetsTextFormatEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"PLOT_TYPE_UNSPECIFIED", "LINE", "STACKED_AREA", "STACKED_BAR", "HEATMAP"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COLOR_UNSPECIFIED", "GREY", "BLUE", "GREEN", "YELLOW", "ORANGE", "RED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "ABOVE", "BELOW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SCALE_UNSPECIFIED", "LINEAR", "LOG10"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SCALE_UNSPECIFIED", "LINEAR", "LOG10"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum. -type DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum string - -// DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnumRef returns a *DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum { - v := DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"MODE_UNSPECIFIED", "COLOR", "X_RAY", "STATS"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_MAKE_DISTRIBUTION", "ALIGN_PERCENT_CHANGE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", "REDUCE_FRACTION_LESS_THAN", "REDUCE_MAKE_DISTRIBUTION"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "METHOD_MEAN", "METHOD_MAX", "METHOD_MIN", "METHOD_SUM", "METHOD_LATEST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "TOP", "BOTTOM"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"SPARK_CHART_TYPE_UNSPECIFIED", "SPARK_LINE", "SPARK_BAR"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"COLOR_UNSPECIFIED", "GREY", "BLUE", "GREEN", "YELLOW", "ORANGE", "RED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum. -type DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum string - -// DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnumRef returns a *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum { - v := DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DIRECTION_UNSPECIFIED", "ABOVE", "BELOW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum DashboardColumnLayoutColumnsWidgetsTextFormatEnum. -type DashboardColumnLayoutColumnsWidgetsTextFormatEnum string - -// DashboardColumnLayoutColumnsWidgetsTextFormatEnumRef returns a *DashboardColumnLayoutColumnsWidgetsTextFormatEnum with the value of string s -// If the empty string is provided, nil is returned. -func DashboardColumnLayoutColumnsWidgetsTextFormatEnumRef(s string) *DashboardColumnLayoutColumnsWidgetsTextFormatEnum { - v := DashboardColumnLayoutColumnsWidgetsTextFormatEnum(s) - return &v -} - -func (v DashboardColumnLayoutColumnsWidgetsTextFormatEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"FORMAT_UNSPECIFIED", "MARKDOWN", "RAW"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "DashboardColumnLayoutColumnsWidgetsTextFormatEnum", - Value: string(v), - Valid: []string{}, - } -} - -type DashboardGridLayout struct { - empty bool `json:"-"` - Columns *int64 `json:"columns"` - Widgets []DashboardGridLayoutWidgets `json:"widgets"` -} - -type jsonDashboardGridLayout DashboardGridLayout - -func (r *DashboardGridLayout) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayout - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayout - } else { - - r.Columns = res.Columns - - r.Widgets = res.Widgets - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayout is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayout *DashboardGridLayout = &DashboardGridLayout{empty: true} - -func (r *DashboardGridLayout) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayout) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayout) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgets struct { - empty bool `json:"-"` - Title *string `json:"title"` - XyChart *DashboardGridLayoutWidgetsXyChart `json:"xyChart"` - Scorecard *DashboardGridLayoutWidgetsScorecard `json:"scorecard"` - Text *DashboardGridLayoutWidgetsText `json:"text"` - Blank *DashboardGridLayoutWidgetsBlank `json:"blank"` - LogsPanel *DashboardGridLayoutWidgetsLogsPanel `json:"logsPanel"` -} - -type jsonDashboardGridLayoutWidgets DashboardGridLayoutWidgets - -func (r *DashboardGridLayoutWidgets) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgets - } else { - - r.Title = res.Title - - r.XyChart = res.XyChart - - r.Scorecard = res.Scorecard - - r.Text = res.Text - - r.Blank = res.Blank - - r.LogsPanel = res.LogsPanel - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgets *DashboardGridLayoutWidgets = &DashboardGridLayoutWidgets{empty: true} - -func (r *DashboardGridLayoutWidgets) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgets) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChart struct { - empty bool `json:"-"` - DataSets []DashboardGridLayoutWidgetsXyChartDataSets `json:"dataSets"` - TimeshiftDuration *string `json:"timeshiftDuration"` - Thresholds []DashboardGridLayoutWidgetsXyChartThresholds `json:"thresholds"` - XAxis *DashboardGridLayoutWidgetsXyChartXAxis `json:"xAxis"` - YAxis *DashboardGridLayoutWidgetsXyChartYAxis `json:"yAxis"` - ChartOptions *DashboardGridLayoutWidgetsXyChartChartOptions `json:"chartOptions"` -} - -type jsonDashboardGridLayoutWidgetsXyChart DashboardGridLayoutWidgetsXyChart - -func (r *DashboardGridLayoutWidgetsXyChart) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChart - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChart - } else { - - r.DataSets = res.DataSets - - r.TimeshiftDuration = res.TimeshiftDuration - - r.Thresholds = res.Thresholds - - r.XAxis = res.XAxis - - r.YAxis = res.YAxis - - r.ChartOptions = res.ChartOptions - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChart is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChart *DashboardGridLayoutWidgetsXyChart = &DashboardGridLayoutWidgetsXyChart{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChart) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChart) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChart) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSets struct { - empty bool `json:"-"` - TimeSeriesQuery *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery `json:"timeSeriesQuery"` - PlotType *DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum `json:"plotType"` - LegendTemplate *string `json:"legendTemplate"` - MinAlignmentPeriod *string `json:"minAlignmentPeriod"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSets DashboardGridLayoutWidgetsXyChartDataSets - -func (r *DashboardGridLayoutWidgetsXyChartDataSets) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSets - } else { - - r.TimeSeriesQuery = res.TimeSeriesQuery - - r.PlotType = res.PlotType - - r.LegendTemplate = res.LegendTemplate - - r.MinAlignmentPeriod = res.MinAlignmentPeriod - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSets *DashboardGridLayoutWidgetsXyChartDataSets = &DashboardGridLayoutWidgetsXyChartDataSets{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSets) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSets) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery struct { - empty bool `json:"-"` - TimeSeriesFilter *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter `json:"timeSeriesFilter"` - TimeSeriesFilterRatio *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio `json:"timeSeriesFilterRatio"` - TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage"` - UnitOverride *string `json:"unitOverride"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery - } else { - - r.TimeSeriesFilter = res.TimeSeriesFilter - - r.TimeSeriesFilterRatio = res.TimeSeriesFilterRatio - - r.TimeSeriesQueryLanguage = res.TimeSeriesQueryLanguage - - r.UnitOverride = res.UnitOverride - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation `json:"aggregation"` - SecondaryAggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio struct { - empty bool `json:"-"` - Numerator *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator `json:"numerator"` - Denominator *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator `json:"denominator"` - SecondaryAggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - } else { - - r.Numerator = res.Numerator - - r.Denominator = res.Denominator - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation `json:"aggregation"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation `json:"aggregation"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartThresholds struct { - empty bool `json:"-"` - Label *string `json:"label"` - Value *float64 `json:"value"` - Color *DashboardGridLayoutWidgetsXyChartThresholdsColorEnum `json:"color"` - Direction *DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum `json:"direction"` -} - -type jsonDashboardGridLayoutWidgetsXyChartThresholds DashboardGridLayoutWidgetsXyChartThresholds - -func (r *DashboardGridLayoutWidgetsXyChartThresholds) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartThresholds - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartThresholds - } else { - - r.Label = res.Label - - r.Value = res.Value - - r.Color = res.Color - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartThresholds is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartThresholds *DashboardGridLayoutWidgetsXyChartThresholds = &DashboardGridLayoutWidgetsXyChartThresholds{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartThresholds) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartThresholds) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartThresholds) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartXAxis struct { - empty bool `json:"-"` - Label *string `json:"label"` - Scale *DashboardGridLayoutWidgetsXyChartXAxisScaleEnum `json:"scale"` -} - -type jsonDashboardGridLayoutWidgetsXyChartXAxis DashboardGridLayoutWidgetsXyChartXAxis - -func (r *DashboardGridLayoutWidgetsXyChartXAxis) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartXAxis - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartXAxis - } else { - - r.Label = res.Label - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartXAxis is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartXAxis *DashboardGridLayoutWidgetsXyChartXAxis = &DashboardGridLayoutWidgetsXyChartXAxis{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartXAxis) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartXAxis) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartXAxis) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartYAxis struct { - empty bool `json:"-"` - Label *string `json:"label"` - Scale *DashboardGridLayoutWidgetsXyChartYAxisScaleEnum `json:"scale"` -} - -type jsonDashboardGridLayoutWidgetsXyChartYAxis DashboardGridLayoutWidgetsXyChartYAxis - -func (r *DashboardGridLayoutWidgetsXyChartYAxis) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartYAxis - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartYAxis - } else { - - r.Label = res.Label - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartYAxis is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartYAxis *DashboardGridLayoutWidgetsXyChartYAxis = &DashboardGridLayoutWidgetsXyChartYAxis{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartYAxis) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartYAxis) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartYAxis) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsXyChartChartOptions struct { - empty bool `json:"-"` - Mode *DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum `json:"mode"` -} - -type jsonDashboardGridLayoutWidgetsXyChartChartOptions DashboardGridLayoutWidgetsXyChartChartOptions - -func (r *DashboardGridLayoutWidgetsXyChartChartOptions) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsXyChartChartOptions - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsXyChartChartOptions - } else { - - r.Mode = res.Mode - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsXyChartChartOptions is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsXyChartChartOptions *DashboardGridLayoutWidgetsXyChartChartOptions = &DashboardGridLayoutWidgetsXyChartChartOptions{empty: true} - -func (r *DashboardGridLayoutWidgetsXyChartChartOptions) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsXyChartChartOptions) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsXyChartChartOptions) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecard struct { - empty bool `json:"-"` - TimeSeriesQuery *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery `json:"timeSeriesQuery"` - GaugeView *DashboardGridLayoutWidgetsScorecardGaugeView `json:"gaugeView"` - SparkChartView *DashboardGridLayoutWidgetsScorecardSparkChartView `json:"sparkChartView"` - Thresholds []DashboardGridLayoutWidgetsScorecardThresholds `json:"thresholds"` -} - -type jsonDashboardGridLayoutWidgetsScorecard DashboardGridLayoutWidgetsScorecard - -func (r *DashboardGridLayoutWidgetsScorecard) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecard - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecard - } else { - - r.TimeSeriesQuery = res.TimeSeriesQuery - - r.GaugeView = res.GaugeView - - r.SparkChartView = res.SparkChartView - - r.Thresholds = res.Thresholds - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecard is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecard *DashboardGridLayoutWidgetsScorecard = &DashboardGridLayoutWidgetsScorecard{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecard) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecard) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecard) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQuery struct { - empty bool `json:"-"` - TimeSeriesFilter *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter `json:"timeSeriesFilter"` - TimeSeriesFilterRatio *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio `json:"timeSeriesFilterRatio"` - TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage"` - UnitOverride *string `json:"unitOverride"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQuery DashboardGridLayoutWidgetsScorecardTimeSeriesQuery - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQuery - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQuery - } else { - - r.TimeSeriesFilter = res.TimeSeriesFilter - - r.TimeSeriesFilterRatio = res.TimeSeriesFilterRatio - - r.TimeSeriesQueryLanguage = res.TimeSeriesQueryLanguage - - r.UnitOverride = res.UnitOverride - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQuery is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQuery *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery = &DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation `json:"aggregation"` - SecondaryAggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio struct { - empty bool `json:"-"` - Numerator *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator `json:"numerator"` - Denominator *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator `json:"denominator"` - SecondaryAggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - } else { - - r.Numerator = res.Numerator - - r.Denominator = res.Denominator - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation `json:"aggregation"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation `json:"aggregation"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardGaugeView struct { - empty bool `json:"-"` - LowerBound *float64 `json:"lowerBound"` - UpperBound *float64 `json:"upperBound"` -} - -type jsonDashboardGridLayoutWidgetsScorecardGaugeView DashboardGridLayoutWidgetsScorecardGaugeView - -func (r *DashboardGridLayoutWidgetsScorecardGaugeView) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardGaugeView - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardGaugeView - } else { - - r.LowerBound = res.LowerBound - - r.UpperBound = res.UpperBound - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardGaugeView is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardGaugeView *DashboardGridLayoutWidgetsScorecardGaugeView = &DashboardGridLayoutWidgetsScorecardGaugeView{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardGaugeView) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardGaugeView) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardGaugeView) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardSparkChartView struct { - empty bool `json:"-"` - SparkChartType *DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum `json:"sparkChartType"` - MinAlignmentPeriod *string `json:"minAlignmentPeriod"` -} - -type jsonDashboardGridLayoutWidgetsScorecardSparkChartView DashboardGridLayoutWidgetsScorecardSparkChartView - -func (r *DashboardGridLayoutWidgetsScorecardSparkChartView) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardSparkChartView - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardSparkChartView - } else { - - r.SparkChartType = res.SparkChartType - - r.MinAlignmentPeriod = res.MinAlignmentPeriod - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardSparkChartView is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardSparkChartView *DashboardGridLayoutWidgetsScorecardSparkChartView = &DashboardGridLayoutWidgetsScorecardSparkChartView{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardSparkChartView) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardSparkChartView) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardSparkChartView) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsScorecardThresholds struct { - empty bool `json:"-"` - Label *string `json:"label"` - Value *float64 `json:"value"` - Color *DashboardGridLayoutWidgetsScorecardThresholdsColorEnum `json:"color"` - Direction *DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum `json:"direction"` -} - -type jsonDashboardGridLayoutWidgetsScorecardThresholds DashboardGridLayoutWidgetsScorecardThresholds - -func (r *DashboardGridLayoutWidgetsScorecardThresholds) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsScorecardThresholds - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsScorecardThresholds - } else { - - r.Label = res.Label - - r.Value = res.Value - - r.Color = res.Color - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsScorecardThresholds is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsScorecardThresholds *DashboardGridLayoutWidgetsScorecardThresholds = &DashboardGridLayoutWidgetsScorecardThresholds{empty: true} - -func (r *DashboardGridLayoutWidgetsScorecardThresholds) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsScorecardThresholds) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsScorecardThresholds) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsText struct { - empty bool `json:"-"` - Content *string `json:"content"` - Format *DashboardGridLayoutWidgetsTextFormatEnum `json:"format"` -} - -type jsonDashboardGridLayoutWidgetsText DashboardGridLayoutWidgetsText - -func (r *DashboardGridLayoutWidgetsText) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsText - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsText - } else { - - r.Content = res.Content - - r.Format = res.Format - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsText is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsText *DashboardGridLayoutWidgetsText = &DashboardGridLayoutWidgetsText{empty: true} - -func (r *DashboardGridLayoutWidgetsText) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsText) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsText) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsBlank struct { - empty bool `json:"-"` -} - -type jsonDashboardGridLayoutWidgetsBlank DashboardGridLayoutWidgetsBlank - -func (r *DashboardGridLayoutWidgetsBlank) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsBlank - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsBlank - } else { - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsBlank is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsBlank *DashboardGridLayoutWidgetsBlank = &DashboardGridLayoutWidgetsBlank{empty: true} - -func (r *DashboardGridLayoutWidgetsBlank) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsBlank) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsBlank) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardGridLayoutWidgetsLogsPanel struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - ResourceNames []string `json:"resourceNames"` -} - -type jsonDashboardGridLayoutWidgetsLogsPanel DashboardGridLayoutWidgetsLogsPanel - -func (r *DashboardGridLayoutWidgetsLogsPanel) UnmarshalJSON(data []byte) error { - var res jsonDashboardGridLayoutWidgetsLogsPanel - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardGridLayoutWidgetsLogsPanel - } else { - - r.Filter = res.Filter - - r.ResourceNames = res.ResourceNames - - } - return nil -} - -// This object is used to assert a desired state where this DashboardGridLayoutWidgetsLogsPanel is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardGridLayoutWidgetsLogsPanel *DashboardGridLayoutWidgetsLogsPanel = &DashboardGridLayoutWidgetsLogsPanel{empty: true} - -func (r *DashboardGridLayoutWidgetsLogsPanel) Empty() bool { - return r.empty -} - -func (r *DashboardGridLayoutWidgetsLogsPanel) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardGridLayoutWidgetsLogsPanel) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayout struct { - empty bool `json:"-"` - Columns *int64 `json:"columns"` - Tiles []DashboardMosaicLayoutTiles `json:"tiles"` -} - -type jsonDashboardMosaicLayout DashboardMosaicLayout - -func (r *DashboardMosaicLayout) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayout - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayout - } else { - - r.Columns = res.Columns - - r.Tiles = res.Tiles - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayout is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayout *DashboardMosaicLayout = &DashboardMosaicLayout{empty: true} - -func (r *DashboardMosaicLayout) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayout) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayout) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTiles struct { - empty bool `json:"-"` - XPos *int64 `json:"xPos"` - YPos *int64 `json:"yPos"` - Width *int64 `json:"width"` - Height *int64 `json:"height"` - Widget *DashboardMosaicLayoutTilesWidget `json:"widget"` -} - -type jsonDashboardMosaicLayoutTiles DashboardMosaicLayoutTiles - -func (r *DashboardMosaicLayoutTiles) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTiles - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTiles - } else { - - r.XPos = res.XPos - - r.YPos = res.YPos - - r.Width = res.Width - - r.Height = res.Height - - r.Widget = res.Widget - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTiles is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTiles *DashboardMosaicLayoutTiles = &DashboardMosaicLayoutTiles{empty: true} - -func (r *DashboardMosaicLayoutTiles) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTiles) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTiles) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidget struct { - empty bool `json:"-"` - Title *string `json:"title"` - XyChart *DashboardMosaicLayoutTilesWidgetXyChart `json:"xyChart"` - Scorecard *DashboardMosaicLayoutTilesWidgetScorecard `json:"scorecard"` - Text *DashboardMosaicLayoutTilesWidgetText `json:"text"` - Blank *DashboardMosaicLayoutTilesWidgetBlank `json:"blank"` - LogsPanel *DashboardMosaicLayoutTilesWidgetLogsPanel `json:"logsPanel"` -} - -type jsonDashboardMosaicLayoutTilesWidget DashboardMosaicLayoutTilesWidget - -func (r *DashboardMosaicLayoutTilesWidget) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidget - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidget - } else { - - r.Title = res.Title - - r.XyChart = res.XyChart - - r.Scorecard = res.Scorecard - - r.Text = res.Text - - r.Blank = res.Blank - - r.LogsPanel = res.LogsPanel - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidget is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidget *DashboardMosaicLayoutTilesWidget = &DashboardMosaicLayoutTilesWidget{empty: true} - -func (r *DashboardMosaicLayoutTilesWidget) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidget) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidget) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChart struct { - empty bool `json:"-"` - DataSets []DashboardMosaicLayoutTilesWidgetXyChartDataSets `json:"dataSets"` - TimeshiftDuration *string `json:"timeshiftDuration"` - Thresholds []DashboardMosaicLayoutTilesWidgetXyChartThresholds `json:"thresholds"` - XAxis *DashboardMosaicLayoutTilesWidgetXyChartXAxis `json:"xAxis"` - YAxis *DashboardMosaicLayoutTilesWidgetXyChartYAxis `json:"yAxis"` - ChartOptions *DashboardMosaicLayoutTilesWidgetXyChartChartOptions `json:"chartOptions"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChart DashboardMosaicLayoutTilesWidgetXyChart - -func (r *DashboardMosaicLayoutTilesWidgetXyChart) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChart - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChart - } else { - - r.DataSets = res.DataSets - - r.TimeshiftDuration = res.TimeshiftDuration - - r.Thresholds = res.Thresholds - - r.XAxis = res.XAxis - - r.YAxis = res.YAxis - - r.ChartOptions = res.ChartOptions - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChart is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChart *DashboardMosaicLayoutTilesWidgetXyChart = &DashboardMosaicLayoutTilesWidgetXyChart{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChart) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChart) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChart) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSets struct { - empty bool `json:"-"` - TimeSeriesQuery *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery `json:"timeSeriesQuery"` - PlotType *DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum `json:"plotType"` - LegendTemplate *string `json:"legendTemplate"` - MinAlignmentPeriod *string `json:"minAlignmentPeriod"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSets DashboardMosaicLayoutTilesWidgetXyChartDataSets - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSets) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSets - } else { - - r.TimeSeriesQuery = res.TimeSeriesQuery - - r.PlotType = res.PlotType - - r.LegendTemplate = res.LegendTemplate - - r.MinAlignmentPeriod = res.MinAlignmentPeriod - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSets *DashboardMosaicLayoutTilesWidgetXyChartDataSets = &DashboardMosaicLayoutTilesWidgetXyChartDataSets{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSets) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSets) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery struct { - empty bool `json:"-"` - TimeSeriesFilter *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter `json:"timeSeriesFilter"` - TimeSeriesFilterRatio *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio `json:"timeSeriesFilterRatio"` - TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage"` - UnitOverride *string `json:"unitOverride"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery - } else { - - r.TimeSeriesFilter = res.TimeSeriesFilter - - r.TimeSeriesFilterRatio = res.TimeSeriesFilterRatio - - r.TimeSeriesQueryLanguage = res.TimeSeriesQueryLanguage - - r.UnitOverride = res.UnitOverride - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation `json:"aggregation"` - SecondaryAggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio struct { - empty bool `json:"-"` - Numerator *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator `json:"numerator"` - Denominator *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator `json:"denominator"` - SecondaryAggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - } else { - - r.Numerator = res.Numerator - - r.Denominator = res.Denominator - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation `json:"aggregation"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation `json:"aggregation"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartThresholds struct { - empty bool `json:"-"` - Label *string `json:"label"` - Value *float64 `json:"value"` - Color *DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum `json:"color"` - Direction *DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum `json:"direction"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartThresholds DashboardMosaicLayoutTilesWidgetXyChartThresholds - -func (r *DashboardMosaicLayoutTilesWidgetXyChartThresholds) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartThresholds - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartThresholds - } else { - - r.Label = res.Label - - r.Value = res.Value - - r.Color = res.Color - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartThresholds is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartThresholds *DashboardMosaicLayoutTilesWidgetXyChartThresholds = &DashboardMosaicLayoutTilesWidgetXyChartThresholds{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartThresholds) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartThresholds) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartThresholds) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartXAxis struct { - empty bool `json:"-"` - Label *string `json:"label"` - Scale *DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum `json:"scale"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartXAxis DashboardMosaicLayoutTilesWidgetXyChartXAxis - -func (r *DashboardMosaicLayoutTilesWidgetXyChartXAxis) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartXAxis - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartXAxis - } else { - - r.Label = res.Label - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartXAxis is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartXAxis *DashboardMosaicLayoutTilesWidgetXyChartXAxis = &DashboardMosaicLayoutTilesWidgetXyChartXAxis{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartXAxis) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartXAxis) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartXAxis) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartYAxis struct { - empty bool `json:"-"` - Label *string `json:"label"` - Scale *DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum `json:"scale"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartYAxis DashboardMosaicLayoutTilesWidgetXyChartYAxis - -func (r *DashboardMosaicLayoutTilesWidgetXyChartYAxis) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartYAxis - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartYAxis - } else { - - r.Label = res.Label - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartYAxis is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartYAxis *DashboardMosaicLayoutTilesWidgetXyChartYAxis = &DashboardMosaicLayoutTilesWidgetXyChartYAxis{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartYAxis) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartYAxis) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartYAxis) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetXyChartChartOptions struct { - empty bool `json:"-"` - Mode *DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum `json:"mode"` -} - -type jsonDashboardMosaicLayoutTilesWidgetXyChartChartOptions DashboardMosaicLayoutTilesWidgetXyChartChartOptions - -func (r *DashboardMosaicLayoutTilesWidgetXyChartChartOptions) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetXyChartChartOptions - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetXyChartChartOptions - } else { - - r.Mode = res.Mode - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetXyChartChartOptions is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetXyChartChartOptions *DashboardMosaicLayoutTilesWidgetXyChartChartOptions = &DashboardMosaicLayoutTilesWidgetXyChartChartOptions{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartChartOptions) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartChartOptions) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetXyChartChartOptions) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecard struct { - empty bool `json:"-"` - TimeSeriesQuery *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery `json:"timeSeriesQuery"` - GaugeView *DashboardMosaicLayoutTilesWidgetScorecardGaugeView `json:"gaugeView"` - SparkChartView *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView `json:"sparkChartView"` - Thresholds []DashboardMosaicLayoutTilesWidgetScorecardThresholds `json:"thresholds"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecard DashboardMosaicLayoutTilesWidgetScorecard - -func (r *DashboardMosaicLayoutTilesWidgetScorecard) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecard - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecard - } else { - - r.TimeSeriesQuery = res.TimeSeriesQuery - - r.GaugeView = res.GaugeView - - r.SparkChartView = res.SparkChartView - - r.Thresholds = res.Thresholds - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecard is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecard *DashboardMosaicLayoutTilesWidgetScorecard = &DashboardMosaicLayoutTilesWidgetScorecard{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecard) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecard) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecard) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery struct { - empty bool `json:"-"` - TimeSeriesFilter *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter `json:"timeSeriesFilter"` - TimeSeriesFilterRatio *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio `json:"timeSeriesFilterRatio"` - TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage"` - UnitOverride *string `json:"unitOverride"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery - } else { - - r.TimeSeriesFilter = res.TimeSeriesFilter - - r.TimeSeriesFilterRatio = res.TimeSeriesFilterRatio - - r.TimeSeriesQueryLanguage = res.TimeSeriesQueryLanguage - - r.UnitOverride = res.UnitOverride - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation `json:"aggregation"` - SecondaryAggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio struct { - empty bool `json:"-"` - Numerator *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator `json:"numerator"` - Denominator *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator `json:"denominator"` - SecondaryAggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio - } else { - - r.Numerator = res.Numerator - - r.Denominator = res.Denominator - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation `json:"aggregation"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation `json:"aggregation"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardGaugeView struct { - empty bool `json:"-"` - LowerBound *float64 `json:"lowerBound"` - UpperBound *float64 `json:"upperBound"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardGaugeView DashboardMosaicLayoutTilesWidgetScorecardGaugeView - -func (r *DashboardMosaicLayoutTilesWidgetScorecardGaugeView) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardGaugeView - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardGaugeView - } else { - - r.LowerBound = res.LowerBound - - r.UpperBound = res.UpperBound - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardGaugeView is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardGaugeView *DashboardMosaicLayoutTilesWidgetScorecardGaugeView = &DashboardMosaicLayoutTilesWidgetScorecardGaugeView{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardGaugeView) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardGaugeView) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardGaugeView) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardSparkChartView struct { - empty bool `json:"-"` - SparkChartType *DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum `json:"sparkChartType"` - MinAlignmentPeriod *string `json:"minAlignmentPeriod"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardSparkChartView DashboardMosaicLayoutTilesWidgetScorecardSparkChartView - -func (r *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardSparkChartView - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardSparkChartView - } else { - - r.SparkChartType = res.SparkChartType - - r.MinAlignmentPeriod = res.MinAlignmentPeriod - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardSparkChartView is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardSparkChartView *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView = &DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetScorecardThresholds struct { - empty bool `json:"-"` - Label *string `json:"label"` - Value *float64 `json:"value"` - Color *DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum `json:"color"` - Direction *DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum `json:"direction"` -} - -type jsonDashboardMosaicLayoutTilesWidgetScorecardThresholds DashboardMosaicLayoutTilesWidgetScorecardThresholds - -func (r *DashboardMosaicLayoutTilesWidgetScorecardThresholds) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetScorecardThresholds - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetScorecardThresholds - } else { - - r.Label = res.Label - - r.Value = res.Value - - r.Color = res.Color - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetScorecardThresholds is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetScorecardThresholds *DashboardMosaicLayoutTilesWidgetScorecardThresholds = &DashboardMosaicLayoutTilesWidgetScorecardThresholds{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardThresholds) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardThresholds) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetScorecardThresholds) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetText struct { - empty bool `json:"-"` - Content *string `json:"content"` - Format *DashboardMosaicLayoutTilesWidgetTextFormatEnum `json:"format"` -} - -type jsonDashboardMosaicLayoutTilesWidgetText DashboardMosaicLayoutTilesWidgetText - -func (r *DashboardMosaicLayoutTilesWidgetText) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetText - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetText - } else { - - r.Content = res.Content - - r.Format = res.Format - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetText is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetText *DashboardMosaicLayoutTilesWidgetText = &DashboardMosaicLayoutTilesWidgetText{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetText) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetText) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetText) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetBlank struct { - empty bool `json:"-"` -} - -type jsonDashboardMosaicLayoutTilesWidgetBlank DashboardMosaicLayoutTilesWidgetBlank - -func (r *DashboardMosaicLayoutTilesWidgetBlank) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetBlank - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetBlank - } else { - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetBlank is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetBlank *DashboardMosaicLayoutTilesWidgetBlank = &DashboardMosaicLayoutTilesWidgetBlank{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetBlank) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetBlank) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetBlank) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardMosaicLayoutTilesWidgetLogsPanel struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - ResourceNames []string `json:"resourceNames"` -} - -type jsonDashboardMosaicLayoutTilesWidgetLogsPanel DashboardMosaicLayoutTilesWidgetLogsPanel - -func (r *DashboardMosaicLayoutTilesWidgetLogsPanel) UnmarshalJSON(data []byte) error { - var res jsonDashboardMosaicLayoutTilesWidgetLogsPanel - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardMosaicLayoutTilesWidgetLogsPanel - } else { - - r.Filter = res.Filter - - r.ResourceNames = res.ResourceNames - - } - return nil -} - -// This object is used to assert a desired state where this DashboardMosaicLayoutTilesWidgetLogsPanel is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardMosaicLayoutTilesWidgetLogsPanel *DashboardMosaicLayoutTilesWidgetLogsPanel = &DashboardMosaicLayoutTilesWidgetLogsPanel{empty: true} - -func (r *DashboardMosaicLayoutTilesWidgetLogsPanel) Empty() bool { - return r.empty -} - -func (r *DashboardMosaicLayoutTilesWidgetLogsPanel) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardMosaicLayoutTilesWidgetLogsPanel) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayout struct { - empty bool `json:"-"` - Rows []DashboardRowLayoutRows `json:"rows"` -} - -type jsonDashboardRowLayout DashboardRowLayout - -func (r *DashboardRowLayout) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayout - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayout - } else { - - r.Rows = res.Rows - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayout is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayout *DashboardRowLayout = &DashboardRowLayout{empty: true} - -func (r *DashboardRowLayout) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayout) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayout) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRows struct { - empty bool `json:"-"` - Weight *int64 `json:"weight"` - Widgets []DashboardRowLayoutRowsWidgets `json:"widgets"` -} - -type jsonDashboardRowLayoutRows DashboardRowLayoutRows - -func (r *DashboardRowLayoutRows) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRows - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRows - } else { - - r.Weight = res.Weight - - r.Widgets = res.Widgets - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRows is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRows *DashboardRowLayoutRows = &DashboardRowLayoutRows{empty: true} - -func (r *DashboardRowLayoutRows) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRows) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRows) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgets struct { - empty bool `json:"-"` - Title *string `json:"title"` - XyChart *DashboardRowLayoutRowsWidgetsXyChart `json:"xyChart"` - Scorecard *DashboardRowLayoutRowsWidgetsScorecard `json:"scorecard"` - Text *DashboardRowLayoutRowsWidgetsText `json:"text"` - Blank *DashboardRowLayoutRowsWidgetsBlank `json:"blank"` - LogsPanel *DashboardRowLayoutRowsWidgetsLogsPanel `json:"logsPanel"` -} - -type jsonDashboardRowLayoutRowsWidgets DashboardRowLayoutRowsWidgets - -func (r *DashboardRowLayoutRowsWidgets) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgets - } else { - - r.Title = res.Title - - r.XyChart = res.XyChart - - r.Scorecard = res.Scorecard - - r.Text = res.Text - - r.Blank = res.Blank - - r.LogsPanel = res.LogsPanel - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgets *DashboardRowLayoutRowsWidgets = &DashboardRowLayoutRowsWidgets{empty: true} - -func (r *DashboardRowLayoutRowsWidgets) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgets) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChart struct { - empty bool `json:"-"` - DataSets []DashboardRowLayoutRowsWidgetsXyChartDataSets `json:"dataSets"` - TimeshiftDuration *string `json:"timeshiftDuration"` - Thresholds []DashboardRowLayoutRowsWidgetsXyChartThresholds `json:"thresholds"` - XAxis *DashboardRowLayoutRowsWidgetsXyChartXAxis `json:"xAxis"` - YAxis *DashboardRowLayoutRowsWidgetsXyChartYAxis `json:"yAxis"` - ChartOptions *DashboardRowLayoutRowsWidgetsXyChartChartOptions `json:"chartOptions"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChart DashboardRowLayoutRowsWidgetsXyChart - -func (r *DashboardRowLayoutRowsWidgetsXyChart) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChart - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChart - } else { - - r.DataSets = res.DataSets - - r.TimeshiftDuration = res.TimeshiftDuration - - r.Thresholds = res.Thresholds - - r.XAxis = res.XAxis - - r.YAxis = res.YAxis - - r.ChartOptions = res.ChartOptions - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChart is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChart *DashboardRowLayoutRowsWidgetsXyChart = &DashboardRowLayoutRowsWidgetsXyChart{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChart) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChart) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChart) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSets struct { - empty bool `json:"-"` - TimeSeriesQuery *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery `json:"timeSeriesQuery"` - PlotType *DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum `json:"plotType"` - LegendTemplate *string `json:"legendTemplate"` - MinAlignmentPeriod *string `json:"minAlignmentPeriod"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSets DashboardRowLayoutRowsWidgetsXyChartDataSets - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSets) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSets - } else { - - r.TimeSeriesQuery = res.TimeSeriesQuery - - r.PlotType = res.PlotType - - r.LegendTemplate = res.LegendTemplate - - r.MinAlignmentPeriod = res.MinAlignmentPeriod - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSets *DashboardRowLayoutRowsWidgetsXyChartDataSets = &DashboardRowLayoutRowsWidgetsXyChartDataSets{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSets) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSets) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery struct { - empty bool `json:"-"` - TimeSeriesFilter *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter `json:"timeSeriesFilter"` - TimeSeriesFilterRatio *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio `json:"timeSeriesFilterRatio"` - TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage"` - UnitOverride *string `json:"unitOverride"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery - } else { - - r.TimeSeriesFilter = res.TimeSeriesFilter - - r.TimeSeriesFilterRatio = res.TimeSeriesFilterRatio - - r.TimeSeriesQueryLanguage = res.TimeSeriesQueryLanguage - - r.UnitOverride = res.UnitOverride - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation `json:"aggregation"` - SecondaryAggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio struct { - empty bool `json:"-"` - Numerator *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator `json:"numerator"` - Denominator *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator `json:"denominator"` - SecondaryAggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - } else { - - r.Numerator = res.Numerator - - r.Denominator = res.Denominator - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation `json:"aggregation"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation `json:"aggregation"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartThresholds struct { - empty bool `json:"-"` - Label *string `json:"label"` - Value *float64 `json:"value"` - Color *DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum `json:"color"` - Direction *DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum `json:"direction"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartThresholds DashboardRowLayoutRowsWidgetsXyChartThresholds - -func (r *DashboardRowLayoutRowsWidgetsXyChartThresholds) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartThresholds - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartThresholds - } else { - - r.Label = res.Label - - r.Value = res.Value - - r.Color = res.Color - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartThresholds is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartThresholds *DashboardRowLayoutRowsWidgetsXyChartThresholds = &DashboardRowLayoutRowsWidgetsXyChartThresholds{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartThresholds) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartThresholds) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartThresholds) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartXAxis struct { - empty bool `json:"-"` - Label *string `json:"label"` - Scale *DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum `json:"scale"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartXAxis DashboardRowLayoutRowsWidgetsXyChartXAxis - -func (r *DashboardRowLayoutRowsWidgetsXyChartXAxis) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartXAxis - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartXAxis - } else { - - r.Label = res.Label - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartXAxis is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartXAxis *DashboardRowLayoutRowsWidgetsXyChartXAxis = &DashboardRowLayoutRowsWidgetsXyChartXAxis{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartXAxis) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartXAxis) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartXAxis) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartYAxis struct { - empty bool `json:"-"` - Label *string `json:"label"` - Scale *DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum `json:"scale"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartYAxis DashboardRowLayoutRowsWidgetsXyChartYAxis - -func (r *DashboardRowLayoutRowsWidgetsXyChartYAxis) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartYAxis - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartYAxis - } else { - - r.Label = res.Label - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartYAxis is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartYAxis *DashboardRowLayoutRowsWidgetsXyChartYAxis = &DashboardRowLayoutRowsWidgetsXyChartYAxis{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartYAxis) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartYAxis) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartYAxis) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsXyChartChartOptions struct { - empty bool `json:"-"` - Mode *DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum `json:"mode"` -} - -type jsonDashboardRowLayoutRowsWidgetsXyChartChartOptions DashboardRowLayoutRowsWidgetsXyChartChartOptions - -func (r *DashboardRowLayoutRowsWidgetsXyChartChartOptions) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsXyChartChartOptions - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsXyChartChartOptions - } else { - - r.Mode = res.Mode - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsXyChartChartOptions is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsXyChartChartOptions *DashboardRowLayoutRowsWidgetsXyChartChartOptions = &DashboardRowLayoutRowsWidgetsXyChartChartOptions{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsXyChartChartOptions) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartChartOptions) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsXyChartChartOptions) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecard struct { - empty bool `json:"-"` - TimeSeriesQuery *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery `json:"timeSeriesQuery"` - GaugeView *DashboardRowLayoutRowsWidgetsScorecardGaugeView `json:"gaugeView"` - SparkChartView *DashboardRowLayoutRowsWidgetsScorecardSparkChartView `json:"sparkChartView"` - Thresholds []DashboardRowLayoutRowsWidgetsScorecardThresholds `json:"thresholds"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecard DashboardRowLayoutRowsWidgetsScorecard - -func (r *DashboardRowLayoutRowsWidgetsScorecard) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecard - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecard - } else { - - r.TimeSeriesQuery = res.TimeSeriesQuery - - r.GaugeView = res.GaugeView - - r.SparkChartView = res.SparkChartView - - r.Thresholds = res.Thresholds - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecard is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecard *DashboardRowLayoutRowsWidgetsScorecard = &DashboardRowLayoutRowsWidgetsScorecard{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecard) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecard) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecard) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery struct { - empty bool `json:"-"` - TimeSeriesFilter *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter `json:"timeSeriesFilter"` - TimeSeriesFilterRatio *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio `json:"timeSeriesFilterRatio"` - TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage"` - UnitOverride *string `json:"unitOverride"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery - } else { - - r.TimeSeriesFilter = res.TimeSeriesFilter - - r.TimeSeriesFilterRatio = res.TimeSeriesFilterRatio - - r.TimeSeriesQueryLanguage = res.TimeSeriesQueryLanguage - - r.UnitOverride = res.UnitOverride - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation `json:"aggregation"` - SecondaryAggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio struct { - empty bool `json:"-"` - Numerator *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator `json:"numerator"` - Denominator *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator `json:"denominator"` - SecondaryAggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - } else { - - r.Numerator = res.Numerator - - r.Denominator = res.Denominator - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation `json:"aggregation"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation `json:"aggregation"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardGaugeView struct { - empty bool `json:"-"` - LowerBound *float64 `json:"lowerBound"` - UpperBound *float64 `json:"upperBound"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardGaugeView DashboardRowLayoutRowsWidgetsScorecardGaugeView - -func (r *DashboardRowLayoutRowsWidgetsScorecardGaugeView) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardGaugeView - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardGaugeView - } else { - - r.LowerBound = res.LowerBound - - r.UpperBound = res.UpperBound - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardGaugeView is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardGaugeView *DashboardRowLayoutRowsWidgetsScorecardGaugeView = &DashboardRowLayoutRowsWidgetsScorecardGaugeView{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardGaugeView) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardGaugeView) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardGaugeView) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardSparkChartView struct { - empty bool `json:"-"` - SparkChartType *DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum `json:"sparkChartType"` - MinAlignmentPeriod *string `json:"minAlignmentPeriod"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardSparkChartView DashboardRowLayoutRowsWidgetsScorecardSparkChartView - -func (r *DashboardRowLayoutRowsWidgetsScorecardSparkChartView) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardSparkChartView - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardSparkChartView - } else { - - r.SparkChartType = res.SparkChartType - - r.MinAlignmentPeriod = res.MinAlignmentPeriod - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardSparkChartView is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardSparkChartView *DashboardRowLayoutRowsWidgetsScorecardSparkChartView = &DashboardRowLayoutRowsWidgetsScorecardSparkChartView{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardSparkChartView) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardSparkChartView) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardSparkChartView) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsScorecardThresholds struct { - empty bool `json:"-"` - Label *string `json:"label"` - Value *float64 `json:"value"` - Color *DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum `json:"color"` - Direction *DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum `json:"direction"` -} - -type jsonDashboardRowLayoutRowsWidgetsScorecardThresholds DashboardRowLayoutRowsWidgetsScorecardThresholds - -func (r *DashboardRowLayoutRowsWidgetsScorecardThresholds) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsScorecardThresholds - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsScorecardThresholds - } else { - - r.Label = res.Label - - r.Value = res.Value - - r.Color = res.Color - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsScorecardThresholds is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsScorecardThresholds *DashboardRowLayoutRowsWidgetsScorecardThresholds = &DashboardRowLayoutRowsWidgetsScorecardThresholds{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsScorecardThresholds) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardThresholds) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsScorecardThresholds) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsText struct { - empty bool `json:"-"` - Content *string `json:"content"` - Format *DashboardRowLayoutRowsWidgetsTextFormatEnum `json:"format"` -} - -type jsonDashboardRowLayoutRowsWidgetsText DashboardRowLayoutRowsWidgetsText - -func (r *DashboardRowLayoutRowsWidgetsText) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsText - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsText - } else { - - r.Content = res.Content - - r.Format = res.Format - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsText is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsText *DashboardRowLayoutRowsWidgetsText = &DashboardRowLayoutRowsWidgetsText{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsText) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsText) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsText) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsBlank struct { - empty bool `json:"-"` -} - -type jsonDashboardRowLayoutRowsWidgetsBlank DashboardRowLayoutRowsWidgetsBlank - -func (r *DashboardRowLayoutRowsWidgetsBlank) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsBlank - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsBlank - } else { - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsBlank is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsBlank *DashboardRowLayoutRowsWidgetsBlank = &DashboardRowLayoutRowsWidgetsBlank{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsBlank) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsBlank) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsBlank) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardRowLayoutRowsWidgetsLogsPanel struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - ResourceNames []string `json:"resourceNames"` -} - -type jsonDashboardRowLayoutRowsWidgetsLogsPanel DashboardRowLayoutRowsWidgetsLogsPanel - -func (r *DashboardRowLayoutRowsWidgetsLogsPanel) UnmarshalJSON(data []byte) error { - var res jsonDashboardRowLayoutRowsWidgetsLogsPanel - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardRowLayoutRowsWidgetsLogsPanel - } else { - - r.Filter = res.Filter - - r.ResourceNames = res.ResourceNames - - } - return nil -} - -// This object is used to assert a desired state where this DashboardRowLayoutRowsWidgetsLogsPanel is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardRowLayoutRowsWidgetsLogsPanel *DashboardRowLayoutRowsWidgetsLogsPanel = &DashboardRowLayoutRowsWidgetsLogsPanel{empty: true} - -func (r *DashboardRowLayoutRowsWidgetsLogsPanel) Empty() bool { - return r.empty -} - -func (r *DashboardRowLayoutRowsWidgetsLogsPanel) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardRowLayoutRowsWidgetsLogsPanel) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayout struct { - empty bool `json:"-"` - Columns []DashboardColumnLayoutColumns `json:"columns"` -} - -type jsonDashboardColumnLayout DashboardColumnLayout - -func (r *DashboardColumnLayout) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayout - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayout - } else { - - r.Columns = res.Columns - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayout is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayout *DashboardColumnLayout = &DashboardColumnLayout{empty: true} - -func (r *DashboardColumnLayout) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayout) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayout) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumns struct { - empty bool `json:"-"` - Weight *int64 `json:"weight"` - Widgets []DashboardColumnLayoutColumnsWidgets `json:"widgets"` -} - -type jsonDashboardColumnLayoutColumns DashboardColumnLayoutColumns - -func (r *DashboardColumnLayoutColumns) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumns - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumns - } else { - - r.Weight = res.Weight - - r.Widgets = res.Widgets - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumns is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumns *DashboardColumnLayoutColumns = &DashboardColumnLayoutColumns{empty: true} - -func (r *DashboardColumnLayoutColumns) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumns) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumns) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgets struct { - empty bool `json:"-"` - Title *string `json:"title"` - XyChart *DashboardColumnLayoutColumnsWidgetsXyChart `json:"xyChart"` - Scorecard *DashboardColumnLayoutColumnsWidgetsScorecard `json:"scorecard"` - Text *DashboardColumnLayoutColumnsWidgetsText `json:"text"` - Blank *DashboardColumnLayoutColumnsWidgetsBlank `json:"blank"` - LogsPanel *DashboardColumnLayoutColumnsWidgetsLogsPanel `json:"logsPanel"` -} - -type jsonDashboardColumnLayoutColumnsWidgets DashboardColumnLayoutColumnsWidgets - -func (r *DashboardColumnLayoutColumnsWidgets) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgets - } else { - - r.Title = res.Title - - r.XyChart = res.XyChart - - r.Scorecard = res.Scorecard - - r.Text = res.Text - - r.Blank = res.Blank - - r.LogsPanel = res.LogsPanel - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgets *DashboardColumnLayoutColumnsWidgets = &DashboardColumnLayoutColumnsWidgets{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgets) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgets) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChart struct { - empty bool `json:"-"` - DataSets []DashboardColumnLayoutColumnsWidgetsXyChartDataSets `json:"dataSets"` - TimeshiftDuration *string `json:"timeshiftDuration"` - Thresholds []DashboardColumnLayoutColumnsWidgetsXyChartThresholds `json:"thresholds"` - XAxis *DashboardColumnLayoutColumnsWidgetsXyChartXAxis `json:"xAxis"` - YAxis *DashboardColumnLayoutColumnsWidgetsXyChartYAxis `json:"yAxis"` - ChartOptions *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions `json:"chartOptions"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChart DashboardColumnLayoutColumnsWidgetsXyChart - -func (r *DashboardColumnLayoutColumnsWidgetsXyChart) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChart - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChart - } else { - - r.DataSets = res.DataSets - - r.TimeshiftDuration = res.TimeshiftDuration - - r.Thresholds = res.Thresholds - - r.XAxis = res.XAxis - - r.YAxis = res.YAxis - - r.ChartOptions = res.ChartOptions - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChart is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChart *DashboardColumnLayoutColumnsWidgetsXyChart = &DashboardColumnLayoutColumnsWidgetsXyChart{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChart) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChart) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChart) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSets struct { - empty bool `json:"-"` - TimeSeriesQuery *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery `json:"timeSeriesQuery"` - PlotType *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum `json:"plotType"` - LegendTemplate *string `json:"legendTemplate"` - MinAlignmentPeriod *string `json:"minAlignmentPeriod"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSets DashboardColumnLayoutColumnsWidgetsXyChartDataSets - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSets) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSets - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSets - } else { - - r.TimeSeriesQuery = res.TimeSeriesQuery - - r.PlotType = res.PlotType - - r.LegendTemplate = res.LegendTemplate - - r.MinAlignmentPeriod = res.MinAlignmentPeriod - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSets is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSets *DashboardColumnLayoutColumnsWidgetsXyChartDataSets = &DashboardColumnLayoutColumnsWidgetsXyChartDataSets{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSets) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSets) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSets) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery struct { - empty bool `json:"-"` - TimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter `json:"timeSeriesFilter"` - TimeSeriesFilterRatio *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio `json:"timeSeriesFilterRatio"` - TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage"` - UnitOverride *string `json:"unitOverride"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery - } else { - - r.TimeSeriesFilter = res.TimeSeriesFilter - - r.TimeSeriesFilterRatio = res.TimeSeriesFilterRatio - - r.TimeSeriesQueryLanguage = res.TimeSeriesQueryLanguage - - r.UnitOverride = res.UnitOverride - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation `json:"aggregation"` - SecondaryAggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio struct { - empty bool `json:"-"` - Numerator *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator `json:"numerator"` - Denominator *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator `json:"denominator"` - SecondaryAggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - } else { - - r.Numerator = res.Numerator - - r.Denominator = res.Denominator - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation `json:"aggregation"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation `json:"aggregation"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartThresholds struct { - empty bool `json:"-"` - Label *string `json:"label"` - Value *float64 `json:"value"` - Color *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum `json:"color"` - Direction *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum `json:"direction"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartThresholds DashboardColumnLayoutColumnsWidgetsXyChartThresholds - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartThresholds) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartThresholds - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartThresholds - } else { - - r.Label = res.Label - - r.Value = res.Value - - r.Color = res.Color - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartThresholds is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartThresholds *DashboardColumnLayoutColumnsWidgetsXyChartThresholds = &DashboardColumnLayoutColumnsWidgetsXyChartThresholds{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartThresholds) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartThresholds) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartThresholds) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartXAxis struct { - empty bool `json:"-"` - Label *string `json:"label"` - Scale *DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum `json:"scale"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartXAxis DashboardColumnLayoutColumnsWidgetsXyChartXAxis - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartXAxis) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartXAxis - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartXAxis - } else { - - r.Label = res.Label - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartXAxis is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartXAxis *DashboardColumnLayoutColumnsWidgetsXyChartXAxis = &DashboardColumnLayoutColumnsWidgetsXyChartXAxis{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartXAxis) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartXAxis) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartXAxis) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartYAxis struct { - empty bool `json:"-"` - Label *string `json:"label"` - Scale *DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum `json:"scale"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartYAxis DashboardColumnLayoutColumnsWidgetsXyChartYAxis - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartYAxis) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartYAxis - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartYAxis - } else { - - r.Label = res.Label - - r.Scale = res.Scale - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartYAxis is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartYAxis *DashboardColumnLayoutColumnsWidgetsXyChartYAxis = &DashboardColumnLayoutColumnsWidgetsXyChartYAxis{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartYAxis) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartYAxis) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartYAxis) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsXyChartChartOptions struct { - empty bool `json:"-"` - Mode *DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum `json:"mode"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsXyChartChartOptions DashboardColumnLayoutColumnsWidgetsXyChartChartOptions - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsXyChartChartOptions - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsXyChartChartOptions - } else { - - r.Mode = res.Mode - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsXyChartChartOptions is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsXyChartChartOptions *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions = &DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecard struct { - empty bool `json:"-"` - TimeSeriesQuery *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery `json:"timeSeriesQuery"` - GaugeView *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView `json:"gaugeView"` - SparkChartView *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView `json:"sparkChartView"` - Thresholds []DashboardColumnLayoutColumnsWidgetsScorecardThresholds `json:"thresholds"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecard DashboardColumnLayoutColumnsWidgetsScorecard - -func (r *DashboardColumnLayoutColumnsWidgetsScorecard) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecard - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecard - } else { - - r.TimeSeriesQuery = res.TimeSeriesQuery - - r.GaugeView = res.GaugeView - - r.SparkChartView = res.SparkChartView - - r.Thresholds = res.Thresholds - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecard is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecard *DashboardColumnLayoutColumnsWidgetsScorecard = &DashboardColumnLayoutColumnsWidgetsScorecard{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecard) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecard) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecard) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery struct { - empty bool `json:"-"` - TimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter `json:"timeSeriesFilter"` - TimeSeriesFilterRatio *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio `json:"timeSeriesFilterRatio"` - TimeSeriesQueryLanguage *string `json:"timeSeriesQueryLanguage"` - UnitOverride *string `json:"unitOverride"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery - } else { - - r.TimeSeriesFilter = res.TimeSeriesFilter - - r.TimeSeriesFilterRatio = res.TimeSeriesFilterRatio - - r.TimeSeriesQueryLanguage = res.TimeSeriesQueryLanguage - - r.UnitOverride = res.UnitOverride - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation `json:"aggregation"` - SecondaryAggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio struct { - empty bool `json:"-"` - Numerator *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator `json:"numerator"` - Denominator *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator `json:"denominator"` - SecondaryAggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation `json:"secondaryAggregation"` - PickTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter `json:"pickTimeSeriesFilter"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - } else { - - r.Numerator = res.Numerator - - r.Denominator = res.Denominator - - r.SecondaryAggregation = res.SecondaryAggregation - - r.PickTimeSeriesFilter = res.PickTimeSeriesFilter - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation `json:"aggregation"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - Aggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation `json:"aggregation"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - } else { - - r.Filter = res.Filter - - r.Aggregation = res.Aggregation - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation struct { - empty bool `json:"-"` - AlignmentPeriod *string `json:"alignmentPeriod"` - PerSeriesAligner *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum `json:"perSeriesAligner"` - CrossSeriesReducer *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum `json:"crossSeriesReducer"` - GroupByFields []string `json:"groupByFields"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } else { - - r.AlignmentPeriod = res.AlignmentPeriod - - r.PerSeriesAligner = res.PerSeriesAligner - - r.CrossSeriesReducer = res.CrossSeriesReducer - - r.GroupByFields = res.GroupByFields - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter struct { - empty bool `json:"-"` - RankingMethod *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum `json:"rankingMethod"` - NumTimeSeries *int64 `json:"numTimeSeries"` - Direction *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum `json:"direction"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } else { - - r.RankingMethod = res.RankingMethod - - r.NumTimeSeries = res.NumTimeSeries - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardGaugeView struct { - empty bool `json:"-"` - LowerBound *float64 `json:"lowerBound"` - UpperBound *float64 `json:"upperBound"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardGaugeView DashboardColumnLayoutColumnsWidgetsScorecardGaugeView - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardGaugeView - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardGaugeView - } else { - - r.LowerBound = res.LowerBound - - r.UpperBound = res.UpperBound - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardGaugeView is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardGaugeView *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView = &DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView struct { - empty bool `json:"-"` - SparkChartType *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum `json:"sparkChartType"` - MinAlignmentPeriod *string `json:"minAlignmentPeriod"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView - } else { - - r.SparkChartType = res.SparkChartType - - r.MinAlignmentPeriod = res.MinAlignmentPeriod - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView = &DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsScorecardThresholds struct { - empty bool `json:"-"` - Label *string `json:"label"` - Value *float64 `json:"value"` - Color *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum `json:"color"` - Direction *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum `json:"direction"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsScorecardThresholds DashboardColumnLayoutColumnsWidgetsScorecardThresholds - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardThresholds) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsScorecardThresholds - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsScorecardThresholds - } else { - - r.Label = res.Label - - r.Value = res.Value - - r.Color = res.Color - - r.Direction = res.Direction - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsScorecardThresholds is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsScorecardThresholds *DashboardColumnLayoutColumnsWidgetsScorecardThresholds = &DashboardColumnLayoutColumnsWidgetsScorecardThresholds{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardThresholds) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardThresholds) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsScorecardThresholds) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsText struct { - empty bool `json:"-"` - Content *string `json:"content"` - Format *DashboardColumnLayoutColumnsWidgetsTextFormatEnum `json:"format"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsText DashboardColumnLayoutColumnsWidgetsText - -func (r *DashboardColumnLayoutColumnsWidgetsText) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsText - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsText - } else { - - r.Content = res.Content - - r.Format = res.Format - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsText is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsText *DashboardColumnLayoutColumnsWidgetsText = &DashboardColumnLayoutColumnsWidgetsText{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsText) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsText) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsText) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsBlank struct { - empty bool `json:"-"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsBlank DashboardColumnLayoutColumnsWidgetsBlank - -func (r *DashboardColumnLayoutColumnsWidgetsBlank) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsBlank - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsBlank - } else { - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsBlank is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsBlank *DashboardColumnLayoutColumnsWidgetsBlank = &DashboardColumnLayoutColumnsWidgetsBlank{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsBlank) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsBlank) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsBlank) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type DashboardColumnLayoutColumnsWidgetsLogsPanel struct { - empty bool `json:"-"` - Filter *string `json:"filter"` - ResourceNames []string `json:"resourceNames"` -} - -type jsonDashboardColumnLayoutColumnsWidgetsLogsPanel DashboardColumnLayoutColumnsWidgetsLogsPanel - -func (r *DashboardColumnLayoutColumnsWidgetsLogsPanel) UnmarshalJSON(data []byte) error { - var res jsonDashboardColumnLayoutColumnsWidgetsLogsPanel - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyDashboardColumnLayoutColumnsWidgetsLogsPanel - } else { - - r.Filter = res.Filter - - r.ResourceNames = res.ResourceNames - - } - return nil -} - -// This object is used to assert a desired state where this DashboardColumnLayoutColumnsWidgetsLogsPanel is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyDashboardColumnLayoutColumnsWidgetsLogsPanel *DashboardColumnLayoutColumnsWidgetsLogsPanel = &DashboardColumnLayoutColumnsWidgetsLogsPanel{empty: true} - -func (r *DashboardColumnLayoutColumnsWidgetsLogsPanel) Empty() bool { - return r.empty -} - -func (r *DashboardColumnLayoutColumnsWidgetsLogsPanel) String() string { - return dcl.SprintResource(r) -} - -func (r *DashboardColumnLayoutColumnsWidgetsLogsPanel) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *Dashboard) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "Dashboard", - Version: "monitoring", - } -} - -func (r *Dashboard) ID() (string, error) { - if err := extractDashboardFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "display_name": dcl.ValueOrEmptyString(nr.DisplayName), - "grid_layout": dcl.ValueOrEmptyString(nr.GridLayout), - "mosaic_layout": dcl.ValueOrEmptyString(nr.MosaicLayout), - "row_layout": dcl.ValueOrEmptyString(nr.RowLayout), - "column_layout": dcl.ValueOrEmptyString(nr.ColumnLayout), - "project": dcl.ValueOrEmptyString(nr.Project), - "etag": dcl.ValueOrEmptyString(nr.Etag), - } - return dcl.Nprintf("projects/{{project}}/dashboards/{{name}}", params), nil -} - -const DashboardMaxPage = -1 - -type DashboardList struct { - Items []*Dashboard - - nextToken string - - pageSize int32 - - resource *Dashboard -} - -func (l *DashboardList) HasNext() bool { - return l.nextToken != "" -} - -func (l *DashboardList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listDashboard(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListDashboard(ctx context.Context, project string) (*DashboardList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListDashboardWithMaxResults(ctx, project, DashboardMaxPage) - -} - -func (c *Client) ListDashboardWithMaxResults(ctx context.Context, project string, pageSize int32) (*DashboardList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &Dashboard{ - Project: &project, - } - items, token, err := c.listDashboard(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &DashboardList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetDashboard(ctx context.Context, r *Dashboard) (*Dashboard, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractDashboardFields(r) - - b, err := c.getDashboardRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalDashboard(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeDashboardNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractDashboardFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteDashboard(ctx context.Context, r *Dashboard) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("Dashboard resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting Dashboard...") - deleteOp := deleteDashboardOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllDashboard deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllDashboard(ctx context.Context, project string, filter func(*Dashboard) bool) error { - listObj, err := c.ListDashboard(ctx, project) - if err != nil { - return err - } - - err = c.deleteAllDashboard(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllDashboard(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyDashboard(ctx context.Context, rawDesired *Dashboard, opts ...dcl.ApplyOption) (*Dashboard, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *Dashboard - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyDashboardHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyDashboardHelper(c *Client, ctx context.Context, rawDesired *Dashboard, opts ...dcl.ApplyOption) (*Dashboard, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyDashboard...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractDashboardFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.dashboardDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToDashboardDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []dashboardApiOperation - if create { - ops = append(ops, &createDashboardOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyDashboardDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyDashboardDiff(c *Client, ctx context.Context, desired *Dashboard, rawDesired *Dashboard, ops []dashboardApiOperation, opts ...dcl.ApplyOption) (*Dashboard, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetDashboard(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createDashboardOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapDashboard(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeDashboardNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeDashboardNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeDashboardDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractDashboardFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractDashboardFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffDashboard(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.yaml deleted file mode 100644 index 8ae3337c87..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard.yaml +++ /dev/null @@ -1,8992 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/Dashboard - description: The Monitoring Dashboard resource - x-dcl-struct-name: Dashboard - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a Dashboard - parameters: - - name: dashboard - required: true - description: A full instance of a Dashboard - apply: - description: The function used to apply information about a Dashboard - parameters: - - name: dashboard - required: true - description: A full instance of a Dashboard - delete: - description: The function used to delete a Dashboard - parameters: - - name: dashboard - required: true - description: A full instance of a Dashboard - deleteAll: - description: The function used to delete all Dashboard - parameters: - - name: project - required: true - schema: - type: string - list: - description: The function used to list information about many Dashboard - parameters: - - name: project - required: true - schema: - type: string -components: - schemas: - Dashboard: - title: Dashboard - x-dcl-id: projects/{{project}}/dashboards/{{name}} - x-dcl-parent-container: project - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - displayName - - project - properties: - columnLayout: - type: object - x-dcl-go-name: ColumnLayout - x-dcl-go-type: DashboardColumnLayout - description: The content is divided into equally spaced columns and the - widgets are arranged vertically. - x-dcl-conflicts: - - gridLayout - - mosaicLayout - - rowLayout - properties: - columns: - type: array - x-dcl-go-name: Columns - description: The columns of content to display. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardColumnLayoutColumns - properties: - weight: - type: integer - format: int64 - x-dcl-go-name: Weight - description: The relative weight of this column. The column weight - is used to adjust the width of columns on the screen (relative - to peers). Greater the weight, greater the width of the column - on the screen. If omitted, a value of 1 is used while rendering. - widgets: - type: array - x-dcl-go-name: Widgets - description: The display widgets arranged vertically in this column. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardColumnLayoutColumnsWidgets - properties: - blank: - type: object - x-dcl-go-name: Blank - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsBlank - description: A blank space. - x-dcl-conflicts: - - xyChart - - scorecard - - text - - logsPanel - logsPanel: - type: object - x-dcl-go-name: LogsPanel - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsLogsPanel - x-dcl-conflicts: - - xyChart - - scorecard - - text - - blank - properties: - filter: - type: string - x-dcl-go-name: Filter - description: A filter that chooses which log entries - to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. - An empty filter matches all log entries. - resourceNames: - type: array - x-dcl-go-name: ResourceNames - description: The names of logging resources to collect - logs for. Currently only projects are supported. If - empty, the widget will default to the host project. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - scorecard: - type: object - x-dcl-go-name: Scorecard - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecard - description: A scorecard summarizing time series data. - x-dcl-conflicts: - - xyChart - - text - - blank - - logsPanel - required: - - timeSeriesQuery - properties: - gaugeView: - type: object - x-dcl-go-name: GaugeView - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardGaugeView - description: Will cause the scorecard to show a gauge - chart. - properties: - lowerBound: - type: number - format: double - x-dcl-go-name: LowerBound - description: The lower bound for this gauge chart. - The value of the chart should always be greater - than or equal to this. - upperBound: - type: number - format: double - x-dcl-go-name: UpperBound - description: The upper bound for this gauge chart. - The value of the chart should always be less than - or equal to this. - sparkChartView: - type: object - x-dcl-go-name: SparkChartView - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView - description: Will cause the scorecard to show a spark - chart. - required: - - sparkChartType - properties: - minAlignmentPeriod: - type: string - x-dcl-go-name: MinAlignmentPeriod - description: The lower bound on data point frequency - in the chart implemented by specifying the minimum - alignment period to use in a time series query. - For example, if the data is published once every - 10 minutes it would not make sense to fetch and - align data at one minute intervals. This field - is optional and exists only as a hint. - sparkChartType: - type: string - x-dcl-go-name: SparkChartType - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum - description: 'Required. The type of sparkchart to - show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED, - SPARK_LINE, SPARK_BAR' - enum: - - SPARK_CHART_TYPE_UNSPECIFIED - - SPARK_LINE - - SPARK_BAR - thresholds: - type: array - x-dcl-go-name: Thresholds - description: 'The thresholds used to determine the state - of the scorecard given the time series'' current value. - For an actual value x, the scorecard is in a danger - state if x is less than or equal to a danger threshold - that triggers below, or greater than or equal to a - danger threshold that triggers above. Similarly, if - x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - - unless x also puts it in a danger state. (Danger - trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: 90, category: - ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: - ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: - ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: - ''WARNING'', trigger: ''BELOW'', } Then: values - less than or equal to 10 would put the scorecard in - a DANGER state, values greater than 10 but less than - or equal to 20 a WARNING state, values strictly between - 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values - greater than or equal to 90 a DANGER state.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholds - properties: - color: - type: string - x-dcl-go-name: Color - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum - description: 'The state color for this threshold. - Color is not allowed in a XyChart. Possible - values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, - YELLOW, ORANGE, RED' - enum: - - COLOR_UNSPECIFIED - - GREY - - BLUE - - GREEN - - YELLOW - - ORANGE - - RED - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum - description: 'The direction for the current threshold. - Direction is not allowed in a XyChart. Possible - values: DIRECTION_UNSPECIFIED, ABOVE, BELOW' - enum: - - DIRECTION_UNSPECIFIED - - ABOVE - - BELOW - label: - type: string - x-dcl-go-name: Label - description: A label for the threshold. - value: - type: number - format: double - x-dcl-go-name: Value - description: The value of the threshold. The value - should be defined in the native scale of the - metric. - timeSeriesQuery: - type: object - x-dcl-go-name: TimeSeriesQuery - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery - description: Required. Fields for querying time series - data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - type: object - x-dcl-go-name: TimeSeriesFilter - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - description: Filter parameters to fetch time series. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views of - the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to - select time series that pass through the - filter. Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow - to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series. - Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - description: Apply a second aggregation after - `aggregation` is applied. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesFilterRatio: - type: object - x-dcl-go-name: TimeSeriesFilterRatio - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - type: object - x-dcl-go-name: Denominator - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - description: The denominator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - numerator: - type: object - x-dcl-go-name: Numerator - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - description: The numerator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to - select time series that pass through the - filter. Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow - to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series. - Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - description: Apply a second aggregation after - the ratio is computed. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesQueryLanguage: - type: string - x-dcl-go-name: TimeSeriesQueryLanguage - description: A query used to fetch time series. - unitOverride: - type: string - x-dcl-go-name: UnitOverride - description: The unit of data contained in fetched - time series. If non-empty, this unit will override - any unit that accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - text: - type: object - x-dcl-go-name: Text - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsText - description: A raw string or markdown displaying textual - content. - x-dcl-conflicts: - - xyChart - - scorecard - - blank - - logsPanel - properties: - content: - type: string - x-dcl-go-name: Content - description: The text content to be displayed. - format: - type: string - x-dcl-go-name: Format - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsTextFormatEnum - description: 'How the text content is formatted. Possible - values: FORMAT_UNSPECIFIED, MARKDOWN, RAW' - enum: - - FORMAT_UNSPECIFIED - - MARKDOWN - - RAW - title: - type: string - x-dcl-go-name: Title - description: Optional. The title of the widget. - xyChart: - type: object - x-dcl-go-name: XyChart - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChart - description: A chart of time series data. - x-dcl-conflicts: - - scorecard - - text - - blank - - logsPanel - required: - - dataSets - properties: - chartOptions: - type: object - x-dcl-go-name: ChartOptions - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptions - description: Display options for the chart. - properties: - mode: - type: string - x-dcl-go-name: Mode - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum - description: 'The chart mode. Possible values: MODE_UNSPECIFIED, - COLOR, X_RAY, STATS' - enum: - - MODE_UNSPECIFIED - - COLOR - - X_RAY - - STATS - dataSets: - type: array - x-dcl-go-name: DataSets - description: Required. The data displayed in this chart. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSets - required: - - timeSeriesQuery - properties: - legendTemplate: - type: string - x-dcl-go-name: LegendTemplate - description: 'A template string for naming `TimeSeries` - in the resulting data set. This should be a - string with interpolations of the form `${label_name}`, - which will resolve to the label''s value. ' - minAlignmentPeriod: - type: string - x-dcl-go-name: MinAlignmentPeriod - description: Optional. The lower bound on data - point frequency for this data set, implemented - by specifying the minimum alignment period to - use in a time series query For example, if the - data is published once every 10 minutes, the - `min_alignment_period` should be at least 10 - minutes. It would not make sense to fetch and - align data at one minute intervals. - plotType: - type: string - x-dcl-go-name: PlotType - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum - description: 'How this data should be plotted - on the chart. Possible values: PLOT_TYPE_UNSPECIFIED, - LINE, STACKED_AREA, STACKED_BAR, HEATMAP' - enum: - - PLOT_TYPE_UNSPECIFIED - - LINE - - STACKED_AREA - - STACKED_BAR - - HEATMAP - timeSeriesQuery: - type: object - x-dcl-go-name: TimeSeriesQuery - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - type: object - x-dcl-go-name: TimeSeriesFilter - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - description: Filter parameters to fetch time - series. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - description: By default, the raw time - series data is returned. Use this field - to combine multiple time series for - different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data - in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error is - returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` - is specified, then this field is - ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point in - the resulting series is a function - of all the already aligned values - in the input time series. Not all - reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and - the `value_type` of the original - time series. Reduction can yield - a time series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not - be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in - a single time series into temporal - alignment. Except for `ALIGN_NONE`, - all alignments cause all the data - points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point - for each `alignment_period` with - end timestamp at the end of the - period. Not all alignment operations - may be applied to all time series. - The valid choices depend on the - `metric_kind` and `value_type` of - the original time series. Alignment - can change the `metric_kind` or - the `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - description: Ranking based time series - filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking - to select time series that pass - through the filter. Possible values: - DIRECTION_UNSPECIFIED, TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series - to allow to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series. Possible values: - METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, - METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data - in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error is - returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` - is specified, then this field is - ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point in - the resulting series is a function - of all the already aligned values - in the input time series. Not all - reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and - the `value_type` of the original - time series. Reduction can yield - a time series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not - be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in - a single time series into temporal - alignment. Except for `ALIGN_NONE`, - all alignments cause all the data - points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point - for each `alignment_period` with - end timestamp at the end of the - period. Not all alignment operations - may be applied to all time series. - The valid choices depend on the - `metric_kind` and `value_type` of - the original time series. Alignment - can change the `metric_kind` or - the `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesFilterRatio: - type: object - x-dcl-go-name: TimeSeriesFilterRatio - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - type: object - x-dcl-go-name: Denominator - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - description: The denominator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in - seconds, that is used to divide - the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the - per-series aligner can be applied - to the data. The value must - be at least 60 seconds. If a - per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error - is returned. If no per-series - aligner is specified, or the - aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point - in the resulting series is a - function of all the already - aligned values in the input - time series. Not all reducer - operations can be applied to - all time series. The valid choices - depend on the `metric_kind` - and the `value_type` of the - original time series. Reduction - can yield a time series with - a different `metric_kind` or - `value_type` than the input - time series. Time series data - must first be aligned (see `per_series_aligner`) - in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must - not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, - REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, - REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points - in a single time series into - temporal alignment. Except for - `ALIGN_NONE`, all alignments - cause all the data points in - an `alignment_period` to be - mathematically grouped together, - resulting in a single data point - for each `alignment_period` - with end timestamp at the end - of the period. Not all alignment - operations may be applied to - all time series. The valid choices - depend on the `metric_kind` - and `value_type` of the original - time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series - data must be aligned in order - to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal - to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, - an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - numerator: - type: object - x-dcl-go-name: Numerator - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - description: The numerator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in - seconds, that is used to divide - the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the - per-series aligner can be applied - to the data. The value must - be at least 60 seconds. If a - per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error - is returned. If no per-series - aligner is specified, or the - aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point - in the resulting series is a - function of all the already - aligned values in the input - time series. Not all reducer - operations can be applied to - all time series. The valid choices - depend on the `metric_kind` - and the `value_type` of the - original time series. Reduction - can yield a time series with - a different `metric_kind` or - `value_type` than the input - time series. Time series data - must first be aligned (see `per_series_aligner`) - in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must - not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, - REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, - REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points - in a single time series into - temporal alignment. Except for - `ALIGN_NONE`, all alignments - cause all the data points in - an `alignment_period` to be - mathematically grouped together, - resulting in a single data point - for each `alignment_period` - with end timestamp at the end - of the period. Not all alignment - operations may be applied to - all time series. The valid choices - depend on the `metric_kind` - and `value_type` of the original - time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series - data must be aligned in order - to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal - to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, - an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - description: Ranking based time series - filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking - to select time series that pass - through the filter. Possible values: - DIRECTION_UNSPECIFIED, TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series - to allow to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series. Possible values: - METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, - METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data - in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error is - returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` - is specified, then this field is - ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point in - the resulting series is a function - of all the already aligned values - in the input time series. Not all - reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and - the `value_type` of the original - time series. Reduction can yield - a time series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not - be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in - a single time series into temporal - alignment. Except for `ALIGN_NONE`, - all alignments cause all the data - points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point - for each `alignment_period` with - end timestamp at the end of the - period. Not all alignment operations - may be applied to all time series. - The valid choices depend on the - `metric_kind` and `value_type` of - the original time series. Alignment - can change the `metric_kind` or - the `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesQueryLanguage: - type: string - x-dcl-go-name: TimeSeriesQueryLanguage - description: A query used to fetch time series. - unitOverride: - type: string - x-dcl-go-name: UnitOverride - description: The unit of data contained in - fetched time series. If non-empty, this - unit will override any unit that accompanies - fetched data. The format is the same as - the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - thresholds: - type: array - x-dcl-go-name: Thresholds - description: Threshold lines drawn horizontally across - the chart. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholds - properties: - color: - type: string - x-dcl-go-name: Color - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum - description: 'The state color for this threshold. - Color is not allowed in a XyChart. Possible - values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, - YELLOW, ORANGE, RED' - enum: - - COLOR_UNSPECIFIED - - GREY - - BLUE - - GREEN - - YELLOW - - ORANGE - - RED - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum - description: 'The direction for the current threshold. - Direction is not allowed in a XyChart. Possible - values: DIRECTION_UNSPECIFIED, ABOVE, BELOW' - enum: - - DIRECTION_UNSPECIFIED - - ABOVE - - BELOW - label: - type: string - x-dcl-go-name: Label - description: A label for the threshold. - value: - type: number - format: double - x-dcl-go-name: Value - description: The value of the threshold. The value - should be defined in the native scale of the - metric. - timeshiftDuration: - type: string - x-dcl-go-name: TimeshiftDuration - description: The duration used to display a comparison - chart. A comparison chart simultaneously shows values - from two similar-length time periods (e.g., week-over-week - metrics). The duration must be positive, and it can - only be applied to charts with data sets of LINE plot - type. - xAxis: - type: object - x-dcl-go-name: XAxis - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxis - description: The properties applied to the X axis. - properties: - label: - type: string - x-dcl-go-name: Label - description: The label of the axis. - scale: - type: string - x-dcl-go-name: Scale - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum - description: 'The axis scale. By default, a linear - scale is used. Possible values: SCALE_UNSPECIFIED, - LINEAR, LOG10' - enum: - - SCALE_UNSPECIFIED - - LINEAR - - LOG10 - yAxis: - type: object - x-dcl-go-name: YAxis - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxis - description: The properties applied to the Y axis. - properties: - label: - type: string - x-dcl-go-name: Label - description: The label of the axis. - scale: - type: string - x-dcl-go-name: Scale - x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum - description: 'The axis scale. By default, a linear - scale is used. Possible values: SCALE_UNSPECIFIED, - LINEAR, LOG10' - enum: - - SCALE_UNSPECIFIED - - LINEAR - - LOG10 - displayName: - type: string - x-dcl-go-name: DisplayName - description: Required. The mutable, human-readable name. - etag: - type: string - x-dcl-go-name: Etag - readOnly: true - description: \`etag\` is used for optimistic concurrency control as a way - to help prevent simultaneous updates of a policy from overwriting each - other. An \`etag\` is returned in the response to \`GetDashboard\`, and - users are expected to put that etag in the request to \`UpdateDashboard\` - to ensure that their change will be applied to the same version of the - Dashboard configuration. The field should not be passed during dashboard - creation. - x-kubernetes-immutable: true - gridLayout: - type: object - x-dcl-go-name: GridLayout - x-dcl-go-type: DashboardGridLayout - description: Content is arranged with a basic layout that re-flows a simple - list of informational elements like widgets or tiles. - x-dcl-conflicts: - - mosaicLayout - - rowLayout - - columnLayout - properties: - columns: - type: integer - format: int64 - x-dcl-go-name: Columns - description: The number of columns into which the view's width is divided. - If omitted or set to zero, a system default will be used while rendering. - widgets: - type: array - x-dcl-go-name: Widgets - description: The informational elements that are arranged into the columns - row-first. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardGridLayoutWidgets - properties: - blank: - type: object - x-dcl-go-name: Blank - x-dcl-go-type: DashboardGridLayoutWidgetsBlank - description: A blank space. - x-dcl-conflicts: - - xyChart - - scorecard - - text - - logsPanel - logsPanel: - type: object - x-dcl-go-name: LogsPanel - x-dcl-go-type: DashboardGridLayoutWidgetsLogsPanel - x-dcl-conflicts: - - xyChart - - scorecard - - text - - blank - properties: - filter: - type: string - x-dcl-go-name: Filter - description: A filter that chooses which log entries to return. - See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. An - empty filter matches all log entries. - resourceNames: - type: array - x-dcl-go-name: ResourceNames - description: The names of logging resources to collect logs - for. Currently only projects are supported. If empty, the - widget will default to the host project. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - scorecard: - type: object - x-dcl-go-name: Scorecard - x-dcl-go-type: DashboardGridLayoutWidgetsScorecard - description: A scorecard summarizing time series data. - x-dcl-conflicts: - - xyChart - - text - - blank - - logsPanel - required: - - timeSeriesQuery - properties: - gaugeView: - type: object - x-dcl-go-name: GaugeView - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardGaugeView - description: Will cause the scorecard to show a gauge chart. - properties: - lowerBound: - type: number - format: double - x-dcl-go-name: LowerBound - description: The lower bound for this gauge chart. The - value of the chart should always be greater than or - equal to this. - upperBound: - type: number - format: double - x-dcl-go-name: UpperBound - description: The upper bound for this gauge chart. The - value of the chart should always be less than or equal - to this. - sparkChartView: - type: object - x-dcl-go-name: SparkChartView - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartView - description: Will cause the scorecard to show a spark chart. - required: - - sparkChartType - properties: - minAlignmentPeriod: - type: string - x-dcl-go-name: MinAlignmentPeriod - description: The lower bound on data point frequency in - the chart implemented by specifying the minimum alignment - period to use in a time series query. For example, if - the data is published once every 10 minutes it would - not make sense to fetch and align data at one minute - intervals. This field is optional and exists only as - a hint. - sparkChartType: - type: string - x-dcl-go-name: SparkChartType - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum - description: 'Required. The type of sparkchart to show - in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED, - SPARK_LINE, SPARK_BAR' - enum: - - SPARK_CHART_TYPE_UNSPECIFIED - - SPARK_LINE - - SPARK_BAR - thresholds: - type: array - x-dcl-go-name: Thresholds - description: 'The thresholds used to determine the state of - the scorecard given the time series'' current value. For - an actual value x, the scorecard is in a danger state if - x is less than or equal to a danger threshold that triggers - below, or greater than or equal to a danger threshold that - triggers above. Similarly, if x is above/below a warning - threshold that triggers above/below, then the scorecard - is in a warning state - unless x also puts it in a danger - state. (Danger trumps warning.) As an example, consider - a scorecard with the following four thresholds: { value: - 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value: - 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value: - 10, category: ''DANGER'', trigger: ''BELOW'', }, { value: - 20, category: ''WARNING'', trigger: ''BELOW'', } Then: - values less than or equal to 10 would put the scorecard - in a DANGER state, values greater than 10 but less than - or equal to 20 a WARNING state, values strictly between - 20 and 70 an OK state, values greater than or equal to 70 - but less than 90 a WARNING state, and values greater than - or equal to 90 a DANGER state.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholds - properties: - color: - type: string - x-dcl-go-name: Color - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsColorEnum - description: 'The state color for this threshold. Color - is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, - GREY, BLUE, GREEN, YELLOW, ORANGE, RED' - enum: - - COLOR_UNSPECIFIED - - GREY - - BLUE - - GREEN - - YELLOW - - ORANGE - - RED - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum - description: 'The direction for the current threshold. - Direction is not allowed in a XyChart. Possible values: - DIRECTION_UNSPECIFIED, ABOVE, BELOW' - enum: - - DIRECTION_UNSPECIFIED - - ABOVE - - BELOW - label: - type: string - x-dcl-go-name: Label - description: A label for the threshold. - value: - type: number - format: double - x-dcl-go-name: Value - description: The value of the threshold. The value should - be defined in the native scale of the metric. - timeSeriesQuery: - type: object - x-dcl-go-name: TimeSeriesQuery - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQuery - description: Required. Fields for querying time series data - from the Stackdriver metrics API. - properties: - timeSeriesFilter: - type: object - x-dcl-go-name: TimeSeriesFilter - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - description: Filter parameters to fetch time series. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - description: By default, the raw time series data - is returned. Use this field to combine multiple - time series for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used to - divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will be - done before the per-series aligner can be applied - to the data. The value must be at least 60 - seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required - or an error is returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum - description: 'The reduction operation to be used - to combine time series into a single time series, - where the value of each data point in the resulting - series is a function of all the already aligned - values in the input time series. Not all reducer - operations can be applied to all time series. - The valid choices depend on the `metric_kind` - and the `value_type` of the original time series. - Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input - time series. Time series data must first be - aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, - REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve when - `cross_series_reducer` is specified. The `group_by_fields` - determine how the time series are partitioned - into subsets prior to applying the aggregation - operation. Each subset contains time series - that have the same value for each of the grouping - fields. Each individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. It - is not possible to reduce across different resource - types, so this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same resource - type, then the time series are aggregated into - a single output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how to bring - the data points in a single time series into - temporal alignment. Except for `ALIGN_NONE`, - all alignments cause all the data points in - an `alignment_period` to be mathematically grouped - together, resulting in a single data point for - each `alignment_period` with end timestamp at - the end of the period. Not all alignment operations - may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` - of the original time series. Alignment can change - the `metric_kind` or the `value_type` of the - time series. Time series data must be aligned - in order to perform cross-time series reduction. - If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not - equal to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, and - projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to select - time series that pass through the filter. Possible - values: DIRECTION_UNSPECIFIED, TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow to - pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied to each - time series independently to produce the value - which will be used to compare the time series - to other time series. Possible values: METHOD_UNSPECIFIED, - METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, - METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - description: Apply a second aggregation after `aggregation` - is applied. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used to - divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will be - done before the per-series aligner can be applied - to the data. The value must be at least 60 - seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required - or an error is returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to be used - to combine time series into a single time series, - where the value of each data point in the resulting - series is a function of all the already aligned - values in the input time series. Not all reducer - operations can be applied to all time series. - The valid choices depend on the `metric_kind` - and the `value_type` of the original time series. - Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input - time series. Time series data must first be - aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, - REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve when - `cross_series_reducer` is specified. The `group_by_fields` - determine how the time series are partitioned - into subsets prior to applying the aggregation - operation. Each subset contains time series - that have the same value for each of the grouping - fields. Each individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. It - is not possible to reduce across different resource - types, so this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same resource - type, then the time series are aggregated into - a single output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how to bring - the data points in a single time series into - temporal alignment. Except for `ALIGN_NONE`, - all alignments cause all the data points in - an `alignment_period` to be mathematically grouped - together, resulting in a single data point for - each `alignment_period` with end timestamp at - the end of the period. Not all alignment operations - may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` - of the original time series. Alignment can change - the `metric_kind` or the `value_type` of the - time series. Time series data must be aligned - in order to perform cross-time series reduction. - If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not - equal to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesFilterRatio: - type: object - x-dcl-go-name: TimeSeriesFilterRatio - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - description: Parameters to fetch a ratio between two time - series filters. - properties: - denominator: - type: object - x-dcl-go-name: Denominator - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - description: The denominator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - description: By default, the raw time series data - is returned. Use this field to combine multiple - time series for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner can - be applied to the data. The value must - be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is returned. - If no per-series aligner is specified, or - the aligner `ALIGN_NONE` is specified, then - this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum - description: 'The reduction operation to be - used to combine time series into a single - time series, where the value of each data - point in the resulting series is a function - of all the already aligned values in the - input time series. Not all reducer operations - can be applied to all time series. The valid - choices depend on the `metric_kind` and - the `value_type` of the original time series. - Reduction can yield a time series with a - different `metric_kind` or `value_type` - than the input time series. Time series - data must first be aligned (see `per_series_aligner`) - in order to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be specified, - and must not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, an error - is returned. Possible values: REDUCE_NONE, - REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that have - the same value for each of the grouping - fields. Each individual time series is a - member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not specified - in `group_by_fields` are aggregated away. If - `group_by_fields` is not specified and all - the time series have the same resource type, - then the time series are aggregated into - a single output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how to - bring the data points in a single time series - into temporal alignment. Except for `ALIGN_NONE`, - all alignments cause all the data points - in an `alignment_period` to be mathematically - grouped together, resulting in a single - data point for each `alignment_period` with - end timestamp at the end of the period. Not - all alignment operations may be applied - to all time series. The valid choices depend - on the `metric_kind` and `value_type` of - the original time series. Alignment can - change the `metric_kind` or the `value_type` - of the time series. Time series data must - be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - numerator: - type: object - x-dcl-go-name: Numerator - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - description: The numerator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - description: By default, the raw time series data - is returned. Use this field to combine multiple - time series for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner can - be applied to the data. The value must - be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is returned. - If no per-series aligner is specified, or - the aligner `ALIGN_NONE` is specified, then - this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum - description: 'The reduction operation to be - used to combine time series into a single - time series, where the value of each data - point in the resulting series is a function - of all the already aligned values in the - input time series. Not all reducer operations - can be applied to all time series. The valid - choices depend on the `metric_kind` and - the `value_type` of the original time series. - Reduction can yield a time series with a - different `metric_kind` or `value_type` - than the input time series. Time series - data must first be aligned (see `per_series_aligner`) - in order to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be specified, - and must not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, an error - is returned. Possible values: REDUCE_NONE, - REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that have - the same value for each of the grouping - fields. Each individual time series is a - member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not specified - in `group_by_fields` are aggregated away. If - `group_by_fields` is not specified and all - the time series have the same resource type, - then the time series are aggregated into - a single output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how to - bring the data points in a single time series - into temporal alignment. Except for `ALIGN_NONE`, - all alignments cause all the data points - in an `alignment_period` to be mathematically - grouped together, resulting in a single - data point for each `alignment_period` with - end timestamp at the end of the period. Not - all alignment operations may be applied - to all time series. The valid choices depend - on the `metric_kind` and `value_type` of - the original time series. Alignment can - change the `metric_kind` or the `value_type` - of the time series. Time series data must - be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to select - time series that pass through the filter. Possible - values: DIRECTION_UNSPECIFIED, TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow to - pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied to each - time series independently to produce the value - which will be used to compare the time series - to other time series. Possible values: METHOD_UNSPECIFIED, - METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, - METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - description: Apply a second aggregation after the - ratio is computed. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used to - divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will be - done before the per-series aligner can be applied - to the data. The value must be at least 60 - seconds. If a per-series aligner other than - `ALIGN_NONE` is specified, this field is required - or an error is returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` is - specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to be used - to combine time series into a single time series, - where the value of each data point in the resulting - series is a function of all the already aligned - values in the input time series. Not all reducer - operations can be applied to all time series. - The valid choices depend on the `metric_kind` - and the `value_type` of the original time series. - Reduction can yield a time series with a different - `metric_kind` or `value_type` than the input - time series. Time series data must first be - aligned (see `per_series_aligner`) in order - to perform cross-time series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, - REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve when - `cross_series_reducer` is specified. The `group_by_fields` - determine how the time series are partitioned - into subsets prior to applying the aggregation - operation. Each subset contains time series - that have the same value for each of the grouping - fields. Each individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. It - is not possible to reduce across different resource - types, so this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same resource - type, then the time series are aggregated into - a single output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how to bring - the data points in a single time series into - temporal alignment. Except for `ALIGN_NONE`, - all alignments cause all the data points in - an `alignment_period` to be mathematically grouped - together, resulting in a single data point for - each `alignment_period` with end timestamp at - the end of the period. Not all alignment operations - may be applied to all time series. The valid - choices depend on the `metric_kind` and `value_type` - of the original time series. Alignment can change - the `metric_kind` or the `value_type` of the - time series. Time series data must be aligned - in order to perform cross-time series reduction. - If `cross_series_reducer` is specified, then - `per_series_aligner` must be specified and not - equal to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesQueryLanguage: - type: string - x-dcl-go-name: TimeSeriesQueryLanguage - description: A query used to fetch time series. - unitOverride: - type: string - x-dcl-go-name: UnitOverride - description: The unit of data contained in fetched time - series. If non-empty, this unit will override any unit - that accompanies fetched data. The format is the same - as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - text: - type: object - x-dcl-go-name: Text - x-dcl-go-type: DashboardGridLayoutWidgetsText - description: A raw string or markdown displaying textual content. - x-dcl-conflicts: - - xyChart - - scorecard - - blank - - logsPanel - properties: - content: - type: string - x-dcl-go-name: Content - description: The text content to be displayed. - format: - type: string - x-dcl-go-name: Format - x-dcl-go-type: DashboardGridLayoutWidgetsTextFormatEnum - description: 'How the text content is formatted. Possible - values: FORMAT_UNSPECIFIED, MARKDOWN, RAW' - enum: - - FORMAT_UNSPECIFIED - - MARKDOWN - - RAW - title: - type: string - x-dcl-go-name: Title - description: Optional. The title of the widget. - xyChart: - type: object - x-dcl-go-name: XyChart - x-dcl-go-type: DashboardGridLayoutWidgetsXyChart - description: A chart of time series data. - x-dcl-conflicts: - - scorecard - - text - - blank - - logsPanel - required: - - dataSets - properties: - chartOptions: - type: object - x-dcl-go-name: ChartOptions - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptions - description: Display options for the chart. - properties: - mode: - type: string - x-dcl-go-name: Mode - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum - description: 'The chart mode. Possible values: MODE_UNSPECIFIED, - COLOR, X_RAY, STATS' - enum: - - MODE_UNSPECIFIED - - COLOR - - X_RAY - - STATS - dataSets: - type: array - x-dcl-go-name: DataSets - description: Required. The data displayed in this chart. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSets - required: - - timeSeriesQuery - properties: - legendTemplate: - type: string - x-dcl-go-name: LegendTemplate - description: 'A template string for naming `TimeSeries` - in the resulting data set. This should be a string - with interpolations of the form `${label_name}`, which - will resolve to the label''s value. ' - minAlignmentPeriod: - type: string - x-dcl-go-name: MinAlignmentPeriod - description: Optional. The lower bound on data point - frequency for this data set, implemented by specifying - the minimum alignment period to use in a time series - query For example, if the data is published once every - 10 minutes, the `min_alignment_period` should be at - least 10 minutes. It would not make sense to fetch - and align data at one minute intervals. - plotType: - type: string - x-dcl-go-name: PlotType - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum - description: 'How this data should be plotted on the - chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE, - STACKED_AREA, STACKED_BAR, HEATMAP' - enum: - - PLOT_TYPE_UNSPECIFIED - - LINE - - STACKED_AREA - - STACKED_BAR - - HEATMAP - timeSeriesQuery: - type: object - x-dcl-go-name: TimeSeriesQuery - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery - description: Required. Fields for querying time series - data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - type: object - x-dcl-go-name: TimeSeriesFilter - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - description: Filter parameters to fetch time series. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views of - the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to - select time series that pass through the - filter. Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow - to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series. - Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - description: Apply a second aggregation after - `aggregation` is applied. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesFilterRatio: - type: object - x-dcl-go-name: TimeSeriesFilterRatio - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - type: object - x-dcl-go-name: Denominator - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - description: The denominator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - numerator: - type: object - x-dcl-go-name: Numerator - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - description: The numerator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to - select time series that pass through the - filter. Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow - to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series. - Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - description: Apply a second aggregation after - the ratio is computed. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesQueryLanguage: - type: string - x-dcl-go-name: TimeSeriesQueryLanguage - description: A query used to fetch time series. - unitOverride: - type: string - x-dcl-go-name: UnitOverride - description: The unit of data contained in fetched - time series. If non-empty, this unit will override - any unit that accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - thresholds: - type: array - x-dcl-go-name: Thresholds - description: Threshold lines drawn horizontally across the - chart. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholds - properties: - color: - type: string - x-dcl-go-name: Color - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsColorEnum - description: 'The state color for this threshold. Color - is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, - GREY, BLUE, GREEN, YELLOW, ORANGE, RED' - enum: - - COLOR_UNSPECIFIED - - GREY - - BLUE - - GREEN - - YELLOW - - ORANGE - - RED - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum - description: 'The direction for the current threshold. - Direction is not allowed in a XyChart. Possible values: - DIRECTION_UNSPECIFIED, ABOVE, BELOW' - enum: - - DIRECTION_UNSPECIFIED - - ABOVE - - BELOW - label: - type: string - x-dcl-go-name: Label - description: A label for the threshold. - value: - type: number - format: double - x-dcl-go-name: Value - description: The value of the threshold. The value should - be defined in the native scale of the metric. - timeshiftDuration: - type: string - x-dcl-go-name: TimeshiftDuration - description: The duration used to display a comparison chart. - A comparison chart simultaneously shows values from two - similar-length time periods (e.g., week-over-week metrics). - The duration must be positive, and it can only be applied - to charts with data sets of LINE plot type. - xAxis: - type: object - x-dcl-go-name: XAxis - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxis - description: The properties applied to the X axis. - properties: - label: - type: string - x-dcl-go-name: Label - description: The label of the axis. - scale: - type: string - x-dcl-go-name: Scale - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxisScaleEnum - description: 'The axis scale. By default, a linear scale - is used. Possible values: SCALE_UNSPECIFIED, LINEAR, - LOG10' - enum: - - SCALE_UNSPECIFIED - - LINEAR - - LOG10 - yAxis: - type: object - x-dcl-go-name: YAxis - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxis - description: The properties applied to the Y axis. - properties: - label: - type: string - x-dcl-go-name: Label - description: The label of the axis. - scale: - type: string - x-dcl-go-name: Scale - x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxisScaleEnum - description: 'The axis scale. By default, a linear scale - is used. Possible values: SCALE_UNSPECIFIED, LINEAR, - LOG10' - enum: - - SCALE_UNSPECIFIED - - LINEAR - - LOG10 - mosaicLayout: - type: object - x-dcl-go-name: MosaicLayout - x-dcl-go-type: DashboardMosaicLayout - description: The content is arranged as a grid of tiles, with each content - widget occupying one or more tiles. - x-dcl-conflicts: - - gridLayout - - rowLayout - - columnLayout - properties: - columns: - type: integer - format: int64 - x-dcl-go-name: Columns - description: The number of columns in the mosaic grid. - tiles: - type: array - x-dcl-go-name: Tiles - description: The tiles to display. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardMosaicLayoutTiles - properties: - height: - type: integer - format: int64 - x-dcl-go-name: Height - description: The height of the tile, measured in grid squares. - widget: - type: object - x-dcl-go-name: Widget - x-dcl-go-type: DashboardMosaicLayoutTilesWidget - description: The informational widget contained in the tile. - properties: - blank: - type: object - x-dcl-go-name: Blank - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetBlank - description: A blank space. - x-dcl-conflicts: - - xyChart - - scorecard - - text - - logsPanel - logsPanel: - type: object - x-dcl-go-name: LogsPanel - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetLogsPanel - x-dcl-conflicts: - - xyChart - - scorecard - - text - - blank - properties: - filter: - type: string - x-dcl-go-name: Filter - description: A filter that chooses which log entries to - return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. - An empty filter matches all log entries. - resourceNames: - type: array - x-dcl-go-name: ResourceNames - description: The names of logging resources to collect - logs for. Currently only projects are supported. If - empty, the widget will default to the host project. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - scorecard: - type: object - x-dcl-go-name: Scorecard - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecard - description: A scorecard summarizing time series data. - x-dcl-conflicts: - - xyChart - - text - - blank - - logsPanel - required: - - timeSeriesQuery - properties: - gaugeView: - type: object - x-dcl-go-name: GaugeView - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardGaugeView - description: Will cause the scorecard to show a gauge - chart. - properties: - lowerBound: - type: number - format: double - x-dcl-go-name: LowerBound - description: The lower bound for this gauge chart. - The value of the chart should always be greater - than or equal to this. - upperBound: - type: number - format: double - x-dcl-go-name: UpperBound - description: The upper bound for this gauge chart. - The value of the chart should always be less than - or equal to this. - sparkChartView: - type: object - x-dcl-go-name: SparkChartView - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartView - description: Will cause the scorecard to show a spark - chart. - required: - - sparkChartType - properties: - minAlignmentPeriod: - type: string - x-dcl-go-name: MinAlignmentPeriod - description: The lower bound on data point frequency - in the chart implemented by specifying the minimum - alignment period to use in a time series query. - For example, if the data is published once every - 10 minutes it would not make sense to fetch and - align data at one minute intervals. This field is - optional and exists only as a hint. - sparkChartType: - type: string - x-dcl-go-name: SparkChartType - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum - description: 'Required. The type of sparkchart to - show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED, - SPARK_LINE, SPARK_BAR' - enum: - - SPARK_CHART_TYPE_UNSPECIFIED - - SPARK_LINE - - SPARK_BAR - thresholds: - type: array - x-dcl-go-name: Thresholds - description: 'The thresholds used to determine the state - of the scorecard given the time series'' current value. - For an actual value x, the scorecard is in a danger - state if x is less than or equal to a danger threshold - that triggers below, or greater than or equal to a danger - threshold that triggers above. Similarly, if x is above/below - a warning threshold that triggers above/below, then - the scorecard is in a warning state - unless x also - puts it in a danger state. (Danger trumps warning.) As - an example, consider a scorecard with the following - four thresholds: { value: 90, category: ''DANGER'', trigger: - ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger: - ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger: - ''BELOW'', }, { value: 20, category: ''WARNING'', trigger: - ''BELOW'', } Then: values less than or equal to 10 - would put the scorecard in a DANGER state, values greater - than 10 but less than or equal to 20 a WARNING state, - values strictly between 20 and 70 an OK state, values - greater than or equal to 70 but less than 90 a WARNING - state, and values greater than or equal to 90 a DANGER - state.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholds - properties: - color: - type: string - x-dcl-go-name: Color - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum - description: 'The state color for this threshold. - Color is not allowed in a XyChart. Possible values: - COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, - ORANGE, RED' - enum: - - COLOR_UNSPECIFIED - - GREY - - BLUE - - GREEN - - YELLOW - - ORANGE - - RED - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum - description: 'The direction for the current threshold. - Direction is not allowed in a XyChart. Possible - values: DIRECTION_UNSPECIFIED, ABOVE, BELOW' - enum: - - DIRECTION_UNSPECIFIED - - ABOVE - - BELOW - label: - type: string - x-dcl-go-name: Label - description: A label for the threshold. - value: - type: number - format: double - x-dcl-go-name: Value - description: The value of the threshold. The value - should be defined in the native scale of the metric. - timeSeriesQuery: - type: object - x-dcl-go-name: TimeSeriesQuery - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery - description: Required. Fields for querying time series - data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - type: object - x-dcl-go-name: TimeSeriesFilter - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter - description: Filter parameters to fetch time series. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation - description: By default, the raw time series data - is returned. Use this field to combine multiple - time series for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner can - be applied to the data. The value must - be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is returned. - If no per-series aligner is specified, or - the aligner `ALIGN_NONE` is specified, then - this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum - description: 'The reduction operation to be - used to combine time series into a single - time series, where the value of each data - point in the resulting series is a function - of all the already aligned values in the - input time series. Not all reducer operations - can be applied to all time series. The valid - choices depend on the `metric_kind` and - the `value_type` of the original time series. - Reduction can yield a time series with a - different `metric_kind` or `value_type` - than the input time series. Time series - data must first be aligned (see `per_series_aligner`) - in order to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be specified, - and must not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, an error - is returned. Possible values: REDUCE_NONE, - REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that have - the same value for each of the grouping - fields. Each individual time series is a - member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not specified - in `group_by_fields` are aggregated away. If - `group_by_fields` is not specified and all - the time series have the same resource type, - then the time series are aggregated into - a single output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how to - bring the data points in a single time series - into temporal alignment. Except for `ALIGN_NONE`, - all alignments cause all the data points - in an `alignment_period` to be mathematically - grouped together, resulting in a single - data point for each `alignment_period` with - end timestamp at the end of the period. Not - all alignment operations may be applied - to all time series. The valid choices depend - on the `metric_kind` and `value_type` of - the original time series. Alignment can - change the `metric_kind` or the `value_type` - of the time series. Time series data must - be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to select - time series that pass through the filter. - Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow - to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series. Possible - values: METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - description: Apply a second aggregation after - `aggregation` is applied. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner can - be applied to the data. The value must - be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is returned. - If no per-series aligner is specified, or - the aligner `ALIGN_NONE` is specified, then - this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to be - used to combine time series into a single - time series, where the value of each data - point in the resulting series is a function - of all the already aligned values in the - input time series. Not all reducer operations - can be applied to all time series. The valid - choices depend on the `metric_kind` and - the `value_type` of the original time series. - Reduction can yield a time series with a - different `metric_kind` or `value_type` - than the input time series. Time series - data must first be aligned (see `per_series_aligner`) - in order to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be specified, - and must not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, an error - is returned. Possible values: REDUCE_NONE, - REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that have - the same value for each of the grouping - fields. Each individual time series is a - member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not specified - in `group_by_fields` are aggregated away. If - `group_by_fields` is not specified and all - the time series have the same resource type, - then the time series are aggregated into - a single output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how to - bring the data points in a single time series - into temporal alignment. Except for `ALIGN_NONE`, - all alignments cause all the data points - in an `alignment_period` to be mathematically - grouped together, resulting in a single - data point for each `alignment_period` with - end timestamp at the end of the period. Not - all alignment operations may be applied - to all time series. The valid choices depend - on the `metric_kind` and `value_type` of - the original time series. Alignment can - change the `metric_kind` or the `value_type` - of the time series. Time series data must - be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesFilterRatio: - type: object - x-dcl-go-name: TimeSeriesFilterRatio - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio - description: Parameters to fetch a ratio between two - time series filters. - properties: - denominator: - type: object - x-dcl-go-name: Denominator - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - description: The denominator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is - used to divide the data in all the [time - series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series into - a single time series, where the value - of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time series. - Reduction can yield a time series with - a different `metric_kind` or `value_type` - than the input time series. Time series - data must first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each individual - time series is a member of exactly one - subset. The `cross_series_reducer` is - applied to each subset of time series. - It is not possible to reduce across - different resource types, so this field - implicitly contains `resource.type`. Fields - not specified in `group_by_fields` are - aggregated away. If `group_by_fields` - is not specified and all the time series - have the same resource type, then the - time series are aggregated into a single - output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single - time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on - the `metric_kind` and `value_type` of - the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be specified - and not equal to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, an error - is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - numerator: - type: object - x-dcl-go-name: Numerator - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - description: The numerator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is - used to divide the data in all the [time - series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series into - a single time series, where the value - of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time series. - Reduction can yield a time series with - a different `metric_kind` or `value_type` - than the input time series. Time series - data must first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each individual - time series is a member of exactly one - subset. The `cross_series_reducer` is - applied to each subset of time series. - It is not possible to reduce across - different resource types, so this field - implicitly contains `resource.type`. Fields - not specified in `group_by_fields` are - aggregated away. If `group_by_fields` - is not specified and all the time series - have the same resource type, then the - time series are aggregated into a single - output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single - time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on - the `metric_kind` and `value_type` of - the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be specified - and not equal to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, an error - is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to select - time series that pass through the filter. - Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow - to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series. Possible - values: METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - description: Apply a second aggregation after - the ratio is computed. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner can - be applied to the data. The value must - be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is returned. - If no per-series aligner is specified, or - the aligner `ALIGN_NONE` is specified, then - this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to be - used to combine time series into a single - time series, where the value of each data - point in the resulting series is a function - of all the already aligned values in the - input time series. Not all reducer operations - can be applied to all time series. The valid - choices depend on the `metric_kind` and - the `value_type` of the original time series. - Reduction can yield a time series with a - different `metric_kind` or `value_type` - than the input time series. Time series - data must first be aligned (see `per_series_aligner`) - in order to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be specified, - and must not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, an error - is returned. Possible values: REDUCE_NONE, - REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that have - the same value for each of the grouping - fields. Each individual time series is a - member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not specified - in `group_by_fields` are aggregated away. If - `group_by_fields` is not specified and all - the time series have the same resource type, - then the time series are aggregated into - a single output time series. If `cross_series_reducer` - is not defined, this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how to - bring the data points in a single time series - into temporal alignment. Except for `ALIGN_NONE`, - all alignments cause all the data points - in an `alignment_period` to be mathematically - grouped together, resulting in a single - data point for each `alignment_period` with - end timestamp at the end of the period. Not - all alignment operations may be applied - to all time series. The valid choices depend - on the `metric_kind` and `value_type` of - the original time series. Alignment can - change the `metric_kind` or the `value_type` - of the time series. Time series data must - be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesQueryLanguage: - type: string - x-dcl-go-name: TimeSeriesQueryLanguage - description: A query used to fetch time series. - unitOverride: - type: string - x-dcl-go-name: UnitOverride - description: The unit of data contained in fetched - time series. If non-empty, this unit will override - any unit that accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - text: - type: object - x-dcl-go-name: Text - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetText - description: A raw string or markdown displaying textual content. - x-dcl-conflicts: - - xyChart - - scorecard - - blank - - logsPanel - properties: - content: - type: string - x-dcl-go-name: Content - description: The text content to be displayed. - format: - type: string - x-dcl-go-name: Format - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetTextFormatEnum - description: 'How the text content is formatted. Possible - values: FORMAT_UNSPECIFIED, MARKDOWN, RAW' - enum: - - FORMAT_UNSPECIFIED - - MARKDOWN - - RAW - title: - type: string - x-dcl-go-name: Title - description: Optional. The title of the widget. - xyChart: - type: object - x-dcl-go-name: XyChart - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChart - description: A chart of time series data. - x-dcl-conflicts: - - scorecard - - text - - blank - - logsPanel - required: - - dataSets - properties: - chartOptions: - type: object - x-dcl-go-name: ChartOptions - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptions - description: Display options for the chart. - properties: - mode: - type: string - x-dcl-go-name: Mode - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum - description: 'The chart mode. Possible values: MODE_UNSPECIFIED, - COLOR, X_RAY, STATS' - enum: - - MODE_UNSPECIFIED - - COLOR - - X_RAY - - STATS - dataSets: - type: array - x-dcl-go-name: DataSets - description: Required. The data displayed in this chart. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSets - required: - - timeSeriesQuery - properties: - legendTemplate: - type: string - x-dcl-go-name: LegendTemplate - description: 'A template string for naming `TimeSeries` - in the resulting data set. This should be a string - with interpolations of the form `${label_name}`, - which will resolve to the label''s value. ' - minAlignmentPeriod: - type: string - x-dcl-go-name: MinAlignmentPeriod - description: Optional. The lower bound on data point - frequency for this data set, implemented by specifying - the minimum alignment period to use in a time - series query For example, if the data is published - once every 10 minutes, the `min_alignment_period` - should be at least 10 minutes. It would not make - sense to fetch and align data at one minute intervals. - plotType: - type: string - x-dcl-go-name: PlotType - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum - description: 'How this data should be plotted on - the chart. Possible values: PLOT_TYPE_UNSPECIFIED, - LINE, STACKED_AREA, STACKED_BAR, HEATMAP' - enum: - - PLOT_TYPE_UNSPECIFIED - - LINE - - STACKED_AREA - - STACKED_BAR - - HEATMAP - timeSeriesQuery: - type: object - x-dcl-go-name: TimeSeriesQuery - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - type: object - x-dcl-go-name: TimeSeriesFilter - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - description: Filter parameters to fetch time - series. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking - to select time series that pass through - the filter. Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to - allow to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently - to produce the value which will be - used to compare the time series to - other time series. Possible values: - METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, - METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesFilterRatio: - type: object - x-dcl-go-name: TimeSeriesFilterRatio - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - type: object - x-dcl-go-name: Denominator - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - description: The denominator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in - seconds, that is used to divide - the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the per-series - aligner can be applied to the - data. The value must be at least - 60 seconds. If a per-series aligner - other than `ALIGN_NONE` is specified, - this field is required or an error - is returned. If no per-series - aligner is specified, or the aligner - `ALIGN_NONE` is specified, then - this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point in - the resulting series is a function - of all the already aligned values - in the input time series. Not - all reducer operations can be - applied to all time series. The - valid choices depend on the `metric_kind` - and the `value_type` of the original - time series. Reduction can yield - a time series with a different - `metric_kind` or `value_type` - than the input time series. Time - series data must first be aligned - (see `per_series_aligner`) in - order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not - be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, - REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets prior - to applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to - reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in - a single time series into temporal - alignment. Except for `ALIGN_NONE`, - all alignments cause all the data - points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point - for each `alignment_period` with - end timestamp at the end of the - period. Not all alignment operations - may be applied to all time series. - The valid choices depend on the - `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or - the `value_type` of the time series. Time - series data must be aligned in - order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal - to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, - an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - numerator: - type: object - x-dcl-go-name: Numerator - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - description: The numerator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in - seconds, that is used to divide - the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the per-series - aligner can be applied to the - data. The value must be at least - 60 seconds. If a per-series aligner - other than `ALIGN_NONE` is specified, - this field is required or an error - is returned. If no per-series - aligner is specified, or the aligner - `ALIGN_NONE` is specified, then - this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point in - the resulting series is a function - of all the already aligned values - in the input time series. Not - all reducer operations can be - applied to all time series. The - valid choices depend on the `metric_kind` - and the `value_type` of the original - time series. Reduction can yield - a time series with a different - `metric_kind` or `value_type` - than the input time series. Time - series data must first be aligned - (see `per_series_aligner`) in - order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not - be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, - REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets prior - to applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to - reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in - a single time series into temporal - alignment. Except for `ALIGN_NONE`, - all alignments cause all the data - points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point - for each `alignment_period` with - end timestamp at the end of the - period. Not all alignment operations - may be applied to all time series. - The valid choices depend on the - `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or - the `value_type` of the time series. Time - series data must be aligned in - order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal - to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, - an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking - to select time series that pass through - the filter. Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to - allow to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently - to produce the value which will be - used to compare the time series to - other time series. Possible values: - METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, - METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesQueryLanguage: - type: string - x-dcl-go-name: TimeSeriesQueryLanguage - description: A query used to fetch time series. - unitOverride: - type: string - x-dcl-go-name: UnitOverride - description: The unit of data contained in fetched - time series. If non-empty, this unit will - override any unit that accompanies fetched - data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - thresholds: - type: array - x-dcl-go-name: Thresholds - description: Threshold lines drawn horizontally across - the chart. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholds - properties: - color: - type: string - x-dcl-go-name: Color - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum - description: 'The state color for this threshold. - Color is not allowed in a XyChart. Possible values: - COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, - ORANGE, RED' - enum: - - COLOR_UNSPECIFIED - - GREY - - BLUE - - GREEN - - YELLOW - - ORANGE - - RED - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum - description: 'The direction for the current threshold. - Direction is not allowed in a XyChart. Possible - values: DIRECTION_UNSPECIFIED, ABOVE, BELOW' - enum: - - DIRECTION_UNSPECIFIED - - ABOVE - - BELOW - label: - type: string - x-dcl-go-name: Label - description: A label for the threshold. - value: - type: number - format: double - x-dcl-go-name: Value - description: The value of the threshold. The value - should be defined in the native scale of the metric. - timeshiftDuration: - type: string - x-dcl-go-name: TimeshiftDuration - description: The duration used to display a comparison - chart. A comparison chart simultaneously shows values - from two similar-length time periods (e.g., week-over-week - metrics). The duration must be positive, and it can - only be applied to charts with data sets of LINE plot - type. - xAxis: - type: object - x-dcl-go-name: XAxis - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxis - description: The properties applied to the X axis. - properties: - label: - type: string - x-dcl-go-name: Label - description: The label of the axis. - scale: - type: string - x-dcl-go-name: Scale - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum - description: 'The axis scale. By default, a linear - scale is used. Possible values: SCALE_UNSPECIFIED, - LINEAR, LOG10' - enum: - - SCALE_UNSPECIFIED - - LINEAR - - LOG10 - yAxis: - type: object - x-dcl-go-name: YAxis - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxis - description: The properties applied to the Y axis. - properties: - label: - type: string - x-dcl-go-name: Label - description: The label of the axis. - scale: - type: string - x-dcl-go-name: Scale - x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum - description: 'The axis scale. By default, a linear - scale is used. Possible values: SCALE_UNSPECIFIED, - LINEAR, LOG10' - enum: - - SCALE_UNSPECIFIED - - LINEAR - - LOG10 - width: - type: integer - format: int64 - x-dcl-go-name: Width - description: The width of the tile, measured in grid squares. - xPos: - type: integer - format: int64 - x-dcl-go-name: XPos - description: The zero-indexed position of the tile in grid squares - relative to the left edge of the grid. - yPos: - type: integer - format: int64 - x-dcl-go-name: YPos - description: The zero-indexed position of the tile in grid squares - relative to the top edge of the grid. - name: - type: string - x-dcl-go-name: Name - description: Immutable. The resource name of the dashboard. - x-kubernetes-immutable: true - project: - type: string - x-dcl-go-name: Project - description: The project id of the resource. - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - rowLayout: - type: object - x-dcl-go-name: RowLayout - x-dcl-go-type: DashboardRowLayout - description: The content is divided into equally spaced rows and the widgets - are arranged horizontally. - x-dcl-conflicts: - - gridLayout - - mosaicLayout - - columnLayout - properties: - rows: - type: array - x-dcl-go-name: Rows - description: The rows of content to display. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardRowLayoutRows - properties: - weight: - type: integer - format: int64 - x-dcl-go-name: Weight - description: The relative weight of this row. The row weight is - used to adjust the height of rows on the screen (relative to - peers). Greater the weight, greater the height of the row on - the screen. If omitted, a value of 1 is used while rendering. - widgets: - type: array - x-dcl-go-name: Widgets - description: The display widgets arranged horizontally in this - row. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardRowLayoutRowsWidgets - properties: - blank: - type: object - x-dcl-go-name: Blank - x-dcl-go-type: DashboardRowLayoutRowsWidgetsBlank - description: A blank space. - x-dcl-conflicts: - - xyChart - - scorecard - - text - - logsPanel - logsPanel: - type: object - x-dcl-go-name: LogsPanel - x-dcl-go-type: DashboardRowLayoutRowsWidgetsLogsPanel - x-dcl-conflicts: - - xyChart - - scorecard - - text - - blank - properties: - filter: - type: string - x-dcl-go-name: Filter - description: A filter that chooses which log entries - to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). - Only log entries that match the filter are returned. - An empty filter matches all log entries. - resourceNames: - type: array - x-dcl-go-name: ResourceNames - description: The names of logging resources to collect - logs for. Currently only projects are supported. If - empty, the widget will default to the host project. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - scorecard: - type: object - x-dcl-go-name: Scorecard - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecard - description: A scorecard summarizing time series data. - x-dcl-conflicts: - - xyChart - - text - - blank - - logsPanel - required: - - timeSeriesQuery - properties: - gaugeView: - type: object - x-dcl-go-name: GaugeView - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardGaugeView - description: Will cause the scorecard to show a gauge - chart. - properties: - lowerBound: - type: number - format: double - x-dcl-go-name: LowerBound - description: The lower bound for this gauge chart. - The value of the chart should always be greater - than or equal to this. - upperBound: - type: number - format: double - x-dcl-go-name: UpperBound - description: The upper bound for this gauge chart. - The value of the chart should always be less than - or equal to this. - sparkChartView: - type: object - x-dcl-go-name: SparkChartView - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartView - description: Will cause the scorecard to show a spark - chart. - required: - - sparkChartType - properties: - minAlignmentPeriod: - type: string - x-dcl-go-name: MinAlignmentPeriod - description: The lower bound on data point frequency - in the chart implemented by specifying the minimum - alignment period to use in a time series query. - For example, if the data is published once every - 10 minutes it would not make sense to fetch and - align data at one minute intervals. This field - is optional and exists only as a hint. - sparkChartType: - type: string - x-dcl-go-name: SparkChartType - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum - description: 'Required. The type of sparkchart to - show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED, - SPARK_LINE, SPARK_BAR' - enum: - - SPARK_CHART_TYPE_UNSPECIFIED - - SPARK_LINE - - SPARK_BAR - thresholds: - type: array - x-dcl-go-name: Thresholds - description: 'The thresholds used to determine the state - of the scorecard given the time series'' current value. - For an actual value x, the scorecard is in a danger - state if x is less than or equal to a danger threshold - that triggers below, or greater than or equal to a - danger threshold that triggers above. Similarly, if - x is above/below a warning threshold that triggers - above/below, then the scorecard is in a warning state - - unless x also puts it in a danger state. (Danger - trumps warning.) As an example, consider a scorecard - with the following four thresholds: { value: 90, category: - ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category: - ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category: - ''DANGER'', trigger: ''BELOW'', }, { value: 20, category: - ''WARNING'', trigger: ''BELOW'', } Then: values - less than or equal to 10 would put the scorecard in - a DANGER state, values greater than 10 but less than - or equal to 20 a WARNING state, values strictly between - 20 and 70 an OK state, values greater than or equal - to 70 but less than 90 a WARNING state, and values - greater than or equal to 90 a DANGER state.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholds - properties: - color: - type: string - x-dcl-go-name: Color - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum - description: 'The state color for this threshold. - Color is not allowed in a XyChart. Possible - values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, - YELLOW, ORANGE, RED' - enum: - - COLOR_UNSPECIFIED - - GREY - - BLUE - - GREEN - - YELLOW - - ORANGE - - RED - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum - description: 'The direction for the current threshold. - Direction is not allowed in a XyChart. Possible - values: DIRECTION_UNSPECIFIED, ABOVE, BELOW' - enum: - - DIRECTION_UNSPECIFIED - - ABOVE - - BELOW - label: - type: string - x-dcl-go-name: Label - description: A label for the threshold. - value: - type: number - format: double - x-dcl-go-name: Value - description: The value of the threshold. The value - should be defined in the native scale of the - metric. - timeSeriesQuery: - type: object - x-dcl-go-name: TimeSeriesQuery - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery - description: Required. Fields for querying time series - data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - type: object - x-dcl-go-name: TimeSeriesFilter - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - description: Filter parameters to fetch time series. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views of - the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to - select time series that pass through the - filter. Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow - to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series. - Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - description: Apply a second aggregation after - `aggregation` is applied. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesFilterRatio: - type: object - x-dcl-go-name: TimeSeriesFilterRatio - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - type: object - x-dcl-go-name: Denominator - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - description: The denominator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - numerator: - type: object - x-dcl-go-name: Numerator - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - description: The numerator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - description: By default, the raw time series - data is returned. Use this field to combine - multiple time series for different views - of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data in - all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This - will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this field - is required or an error is returned. - If no per-series aligner is specified, - or the aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where the - value of each data point in the resulting - series is a function of all the already - aligned values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the - `value_type` of the original time - series. Reduction can yield a time - series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not be - `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how - the time series are partitioned into - subsets prior to applying the aggregation - operation. Each subset contains time - series that have the same value for - each of the grouping fields. Each - individual time series is a member - of exactly one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, so - this field implicitly contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the time - series have the same resource type, - then the time series are aggregated - into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in a - single time series into temporal alignment. - Except for `ALIGN_NONE`, all alignments - cause all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for - each `alignment_period` with end timestamp - at the end of the period. Not all - alignment operations may be applied - to all time series. The valid choices - depend on the `metric_kind` and `value_type` - of the original time series. Alignment - can change the `metric_kind` or the - `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - description: Ranking based time series filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking to - select time series that pass through the - filter. Possible values: DIRECTION_UNSPECIFIED, - TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series to allow - to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is applied - to each time series independently to produce - the value which will be used to compare - the time series to other time series. - Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - description: Apply a second aggregation after - the ratio is computed. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` specifies - a time interval, in seconds, that is used - to divide the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. This will - be done before the per-series aligner - can be applied to the data. The value - must be at least 60 seconds. If a per-series - aligner other than `ALIGN_NONE` is specified, - this field is required or an error is - returned. If no per-series aligner is - specified, or the aligner `ALIGN_NONE` - is specified, then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation to - be used to combine time series into a - single time series, where the value of - each data point in the resulting series - is a function of all the already aligned - values in the input time series. Not - all reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and the `value_type` - of the original time series. Reduction - can yield a time series with a different - `metric_kind` or `value_type` than the - input time series. Time series data must - first be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` is - specified, then `per_series_aligner` must - be specified, and must not be `ALIGN_NONE`. - An `alignment_period` must also be specified; - otherwise, an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to preserve - when `cross_series_reducer` is specified. - The `group_by_fields` determine how the - time series are partitioned into subsets - prior to applying the aggregation operation. - Each subset contains time series that - have the same value for each of the grouping - fields. Each individual time series is - a member of exactly one subset. The `cross_series_reducer` - is applied to each subset of time series. - It is not possible to reduce across different - resource types, so this field implicitly - contains `resource.type`. Fields not - specified in `group_by_fields` are aggregated - away. If `group_by_fields` is not specified - and all the time series have the same - resource type, then the time series are - aggregated into a single output time series. - If `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes how - to bring the data points in a single time - series into temporal alignment. Except - for `ALIGN_NONE`, all alignments cause - all the data points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point for each - `alignment_period` with end timestamp - at the end of the period. Not all alignment - operations may be applied to all time - series. The valid choices depend on the - `metric_kind` and `value_type` of the - original time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series data - must be aligned in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesQueryLanguage: - type: string - x-dcl-go-name: TimeSeriesQueryLanguage - description: A query used to fetch time series. - unitOverride: - type: string - x-dcl-go-name: UnitOverride - description: The unit of data contained in fetched - time series. If non-empty, this unit will override - any unit that accompanies fetched data. The format - is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - text: - type: object - x-dcl-go-name: Text - x-dcl-go-type: DashboardRowLayoutRowsWidgetsText - description: A raw string or markdown displaying textual - content. - x-dcl-conflicts: - - xyChart - - scorecard - - blank - - logsPanel - properties: - content: - type: string - x-dcl-go-name: Content - description: The text content to be displayed. - format: - type: string - x-dcl-go-name: Format - x-dcl-go-type: DashboardRowLayoutRowsWidgetsTextFormatEnum - description: 'How the text content is formatted. Possible - values: FORMAT_UNSPECIFIED, MARKDOWN, RAW' - enum: - - FORMAT_UNSPECIFIED - - MARKDOWN - - RAW - title: - type: string - x-dcl-go-name: Title - description: Optional. The title of the widget. - xyChart: - type: object - x-dcl-go-name: XyChart - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChart - description: A chart of time series data. - x-dcl-conflicts: - - scorecard - - text - - blank - - logsPanel - required: - - dataSets - properties: - chartOptions: - type: object - x-dcl-go-name: ChartOptions - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptions - description: Display options for the chart. - properties: - mode: - type: string - x-dcl-go-name: Mode - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum - description: 'The chart mode. Possible values: MODE_UNSPECIFIED, - COLOR, X_RAY, STATS' - enum: - - MODE_UNSPECIFIED - - COLOR - - X_RAY - - STATS - dataSets: - type: array - x-dcl-go-name: DataSets - description: Required. The data displayed in this chart. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSets - required: - - timeSeriesQuery - properties: - legendTemplate: - type: string - x-dcl-go-name: LegendTemplate - description: 'A template string for naming `TimeSeries` - in the resulting data set. This should be a - string with interpolations of the form `${label_name}`, - which will resolve to the label''s value. ' - minAlignmentPeriod: - type: string - x-dcl-go-name: MinAlignmentPeriod - description: Optional. The lower bound on data - point frequency for this data set, implemented - by specifying the minimum alignment period to - use in a time series query For example, if the - data is published once every 10 minutes, the - `min_alignment_period` should be at least 10 - minutes. It would not make sense to fetch and - align data at one minute intervals. - plotType: - type: string - x-dcl-go-name: PlotType - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum - description: 'How this data should be plotted - on the chart. Possible values: PLOT_TYPE_UNSPECIFIED, - LINE, STACKED_AREA, STACKED_BAR, HEATMAP' - enum: - - PLOT_TYPE_UNSPECIFIED - - LINE - - STACKED_AREA - - STACKED_BAR - - HEATMAP - timeSeriesQuery: - type: object - x-dcl-go-name: TimeSeriesQuery - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery - description: Required. Fields for querying time - series data from the Stackdriver metrics API. - properties: - timeSeriesFilter: - type: object - x-dcl-go-name: TimeSeriesFilter - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - description: Filter parameters to fetch time - series. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - description: By default, the raw time - series data is returned. Use this field - to combine multiple time series for - different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data - in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error is - returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` - is specified, then this field is - ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point in - the resulting series is a function - of all the already aligned values - in the input time series. Not all - reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and - the `value_type` of the original - time series. Reduction can yield - a time series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not - be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in - a single time series into temporal - alignment. Except for `ALIGN_NONE`, - all alignments cause all the data - points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point - for each `alignment_period` with - end timestamp at the end of the - period. Not all alignment operations - may be applied to all time series. - The valid choices depend on the - `metric_kind` and `value_type` of - the original time series. Alignment - can change the `metric_kind` or - the `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, resources, - and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - description: Ranking based time series - filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking - to select time series that pass - through the filter. Possible values: - DIRECTION_UNSPECIFIED, TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series - to allow to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series. Possible values: - METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, - METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - description: Apply a second aggregation - after `aggregation` is applied. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data - in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error is - returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` - is specified, then this field is - ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point in - the resulting series is a function - of all the already aligned values - in the input time series. Not all - reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and - the `value_type` of the original - time series. Reduction can yield - a time series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not - be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in - a single time series into temporal - alignment. Except for `ALIGN_NONE`, - all alignments cause all the data - points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point - for each `alignment_period` with - end timestamp at the end of the - period. Not all alignment operations - may be applied to all time series. - The valid choices depend on the - `metric_kind` and `value_type` of - the original time series. Alignment - can change the `metric_kind` or - the `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesFilterRatio: - type: object - x-dcl-go-name: TimeSeriesFilterRatio - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - description: Parameters to fetch a ratio between - two time series filters. - properties: - denominator: - type: object - x-dcl-go-name: Denominator - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - description: The denominator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in - seconds, that is used to divide - the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the - per-series aligner can be applied - to the data. The value must - be at least 60 seconds. If a - per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error - is returned. If no per-series - aligner is specified, or the - aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point - in the resulting series is a - function of all the already - aligned values in the input - time series. Not all reducer - operations can be applied to - all time series. The valid choices - depend on the `metric_kind` - and the `value_type` of the - original time series. Reduction - can yield a time series with - a different `metric_kind` or - `value_type` than the input - time series. Time series data - must first be aligned (see `per_series_aligner`) - in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must - not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, - REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, - REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points - in a single time series into - temporal alignment. Except for - `ALIGN_NONE`, all alignments - cause all the data points in - an `alignment_period` to be - mathematically grouped together, - resulting in a single data point - for each `alignment_period` - with end timestamp at the end - of the period. Not all alignment - operations may be applied to - all time series. The valid choices - depend on the `metric_kind` - and `value_type` of the original - time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series - data must be aligned in order - to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal - to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, - an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - numerator: - type: object - x-dcl-go-name: Numerator - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - description: The numerator of the ratio. - required: - - filter - properties: - aggregation: - type: object - x-dcl-go-name: Aggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - description: By default, the raw time - series data is returned. Use this - field to combine multiple time series - for different views of the data. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in - seconds, that is used to divide - the data in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the - per-series aligner can be applied - to the data. The value must - be at least 60 seconds. If a - per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error - is returned. If no per-series - aligner is specified, or the - aligner `ALIGN_NONE` is specified, - then this field is ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point - in the resulting series is a - function of all the already - aligned values in the input - time series. Not all reducer - operations can be applied to - all time series. The valid choices - depend on the `metric_kind` - and the `value_type` of the - original time series. Reduction - can yield a time series with - a different `metric_kind` or - `value_type` than the input - time series. Time series data - must first be aligned (see `per_series_aligner`) - in order to perform cross-time - series reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must - not be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible - values: REDUCE_NONE, REDUCE_MEAN, - REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, - REDUCE_STDDEV, REDUCE_COUNT, - REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, - REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, - REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, - REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, - REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields - to preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series - are partitioned into subsets - prior to applying the aggregation - operation. Each subset contains - time series that have the same - value for each of the grouping - fields. Each individual time - series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of - time series. It is not possible - to reduce across different resource - types, so this field implicitly - contains `resource.type`. Fields - not specified in `group_by_fields` - are aggregated away. If `group_by_fields` - is not specified and all the - time series have the same resource - type, then the time series are - aggregated into a single output - time series. If `cross_series_reducer` - is not defined, this field is - ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points - in a single time series into - temporal alignment. Except for - `ALIGN_NONE`, all alignments - cause all the data points in - an `alignment_period` to be - mathematically grouped together, - resulting in a single data point - for each `alignment_period` - with end timestamp at the end - of the period. Not all alignment - operations may be applied to - all time series. The valid choices - depend on the `metric_kind` - and `value_type` of the original - time series. Alignment can change - the `metric_kind` or the `value_type` - of the time series. Time series - data must be aligned in order - to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified and not equal - to `ALIGN_NONE` and `alignment_period` - must be specified; otherwise, - an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - filter: - type: string - x-dcl-go-name: Filter - description: Required. The [monitoring - filter](https://cloud.google.com/monitoring/api/v3/filters) - that identifies the metric types, - resources, and projects to query. - pickTimeSeriesFilter: - type: object - x-dcl-go-name: PickTimeSeriesFilter - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - description: Ranking based time series - filter. - properties: - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum - description: 'How to use the ranking - to select time series that pass - through the filter. Possible values: - DIRECTION_UNSPECIFIED, TOP, BOTTOM' - enum: - - DIRECTION_UNSPECIFIED - - TOP - - BOTTOM - numTimeSeries: - type: integer - format: int64 - x-dcl-go-name: NumTimeSeries - description: How many time series - to allow to pass through the filter. - rankingMethod: - type: string - x-dcl-go-name: RankingMethod - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum - description: '`ranking_method` is - applied to each time series independently - to produce the value which will - be used to compare the time series - to other time series. Possible values: - METHOD_UNSPECIFIED, METHOD_MEAN, - METHOD_MAX, METHOD_MIN, METHOD_SUM, - METHOD_LATEST' - enum: - - METHOD_UNSPECIFIED - - METHOD_MEAN - - METHOD_MAX - - METHOD_MIN - - METHOD_SUM - - METHOD_LATEST - secondaryAggregation: - type: object - x-dcl-go-name: SecondaryAggregation - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - description: Apply a second aggregation - after the ratio is computed. - properties: - alignmentPeriod: - type: string - x-dcl-go-name: AlignmentPeriod - description: The `alignment_period` - specifies a time interval, in seconds, - that is used to divide the data - in all the [time series][google.monitoring.v3.TimeSeries] - into consistent blocks of time. - This will be done before the per-series - aligner can be applied to the data. The - value must be at least 60 seconds. - If a per-series aligner other than - `ALIGN_NONE` is specified, this - field is required or an error is - returned. If no per-series aligner - is specified, or the aligner `ALIGN_NONE` - is specified, then this field is - ignored. - crossSeriesReducer: - type: string - x-dcl-go-name: CrossSeriesReducer - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum - description: 'The reduction operation - to be used to combine time series - into a single time series, where - the value of each data point in - the resulting series is a function - of all the already aligned values - in the input time series. Not all - reducer operations can be applied - to all time series. The valid choices - depend on the `metric_kind` and - the `value_type` of the original - time series. Reduction can yield - a time series with a different `metric_kind` - or `value_type` than the input time - series. Time series data must first - be aligned (see `per_series_aligner`) - in order to perform cross-time series - reduction. If `cross_series_reducer` - is specified, then `per_series_aligner` - must be specified, and must not - be `ALIGN_NONE`. An `alignment_period` - must also be specified; otherwise, - an error is returned. Possible values: - REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, - REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, - REDUCE_COUNT, REDUCE_COUNT_TRUE, - REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, - REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, - REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, - REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION' - enum: - - REDUCE_NONE - - REDUCE_MEAN - - REDUCE_MIN - - REDUCE_MAX - - REDUCE_SUM - - REDUCE_STDDEV - - REDUCE_COUNT - - REDUCE_COUNT_TRUE - - REDUCE_COUNT_FALSE - - REDUCE_FRACTION_TRUE - - REDUCE_PERCENTILE_99 - - REDUCE_PERCENTILE_95 - - REDUCE_PERCENTILE_50 - - REDUCE_PERCENTILE_05 - - REDUCE_FRACTION_LESS_THAN - - REDUCE_MAKE_DISTRIBUTION - groupByFields: - type: array - x-dcl-go-name: GroupByFields - description: The set of fields to - preserve when `cross_series_reducer` - is specified. The `group_by_fields` - determine how the time series are - partitioned into subsets prior to - applying the aggregation operation. - Each subset contains time series - that have the same value for each - of the grouping fields. Each individual - time series is a member of exactly - one subset. The `cross_series_reducer` - is applied to each subset of time - series. It is not possible to reduce - across different resource types, - so this field implicitly contains - `resource.type`. Fields not specified - in `group_by_fields` are aggregated - away. If `group_by_fields` is not - specified and all the time series - have the same resource type, then - the time series are aggregated into - a single output time series. If - `cross_series_reducer` is not defined, - this field is ignored. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - perSeriesAligner: - type: string - x-dcl-go-name: PerSeriesAligner - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum - description: An `Aligner` describes - how to bring the data points in - a single time series into temporal - alignment. Except for `ALIGN_NONE`, - all alignments cause all the data - points in an `alignment_period` - to be mathematically grouped together, - resulting in a single data point - for each `alignment_period` with - end timestamp at the end of the - period. Not all alignment operations - may be applied to all time series. - The valid choices depend on the - `metric_kind` and `value_type` of - the original time series. Alignment - can change the `metric_kind` or - the `value_type` of the time series. Time - series data must be aligned in order - to perform cross-time series reduction. - If `cross_series_reducer` is specified, - then `per_series_aligner` must be - specified and not equal to `ALIGN_NONE` - and `alignment_period` must be specified; - otherwise, an error is returned. - enum: - - ALIGN_NONE - - ALIGN_DELTA - - ALIGN_RATE - - ALIGN_INTERPOLATE - - ALIGN_NEXT_OLDER - - ALIGN_MIN - - ALIGN_MAX - - ALIGN_MEAN - - ALIGN_COUNT - - ALIGN_SUM - - ALIGN_STDDEV - - ALIGN_COUNT_TRUE - - ALIGN_COUNT_FALSE - - ALIGN_FRACTION_TRUE - - ALIGN_PERCENTILE_99 - - ALIGN_PERCENTILE_95 - - ALIGN_PERCENTILE_50 - - ALIGN_PERCENTILE_05 - - ALIGN_MAKE_DISTRIBUTION - - ALIGN_PERCENT_CHANGE - timeSeriesQueryLanguage: - type: string - x-dcl-go-name: TimeSeriesQueryLanguage - description: A query used to fetch time series. - unitOverride: - type: string - x-dcl-go-name: UnitOverride - description: The unit of data contained in - fetched time series. If non-empty, this - unit will override any unit that accompanies - fetched data. The format is the same as - the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) - field in `MetricDescriptor`. - thresholds: - type: array - x-dcl-go-name: Thresholds - description: Threshold lines drawn horizontally across - the chart. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholds - properties: - color: - type: string - x-dcl-go-name: Color - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum - description: 'The state color for this threshold. - Color is not allowed in a XyChart. Possible - values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, - YELLOW, ORANGE, RED' - enum: - - COLOR_UNSPECIFIED - - GREY - - BLUE - - GREEN - - YELLOW - - ORANGE - - RED - direction: - type: string - x-dcl-go-name: Direction - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum - description: 'The direction for the current threshold. - Direction is not allowed in a XyChart. Possible - values: DIRECTION_UNSPECIFIED, ABOVE, BELOW' - enum: - - DIRECTION_UNSPECIFIED - - ABOVE - - BELOW - label: - type: string - x-dcl-go-name: Label - description: A label for the threshold. - value: - type: number - format: double - x-dcl-go-name: Value - description: The value of the threshold. The value - should be defined in the native scale of the - metric. - timeshiftDuration: - type: string - x-dcl-go-name: TimeshiftDuration - description: The duration used to display a comparison - chart. A comparison chart simultaneously shows values - from two similar-length time periods (e.g., week-over-week - metrics). The duration must be positive, and it can - only be applied to charts with data sets of LINE plot - type. - xAxis: - type: object - x-dcl-go-name: XAxis - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxis - description: The properties applied to the X axis. - properties: - label: - type: string - x-dcl-go-name: Label - description: The label of the axis. - scale: - type: string - x-dcl-go-name: Scale - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum - description: 'The axis scale. By default, a linear - scale is used. Possible values: SCALE_UNSPECIFIED, - LINEAR, LOG10' - enum: - - SCALE_UNSPECIFIED - - LINEAR - - LOG10 - yAxis: - type: object - x-dcl-go-name: YAxis - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxis - description: The properties applied to the Y axis. - properties: - label: - type: string - x-dcl-go-name: Label - description: The label of the axis. - scale: - type: string - x-dcl-go-name: Scale - x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum - description: 'The axis scale. By default, a linear - scale is used. Possible values: SCALE_UNSPECIFIED, - LINEAR, LOG10' - enum: - - SCALE_UNSPECIFIED - - LINEAR - - LOG10 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_internal.go deleted file mode 100644 index dd6309bdad..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_internal.go +++ /dev/null @@ -1,61359 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *Dashboard) validate() error { - - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"GridLayout", "MosaicLayout", "RowLayout", "ColumnLayout"}, r.GridLayout, r.MosaicLayout, r.RowLayout, r.ColumnLayout); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Name, "Name"); err != nil { - return err - } - if err := dcl.Required(r, "displayName"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.GridLayout) { - if err := r.GridLayout.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.MosaicLayout) { - if err := r.MosaicLayout.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.RowLayout) { - if err := r.RowLayout.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ColumnLayout) { - if err := r.ColumnLayout.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayout) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgets) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"XyChart", "Scorecard", "Text", "Blank", "LogsPanel"}, r.XyChart, r.Scorecard, r.Text, r.Blank, r.LogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.XyChart) { - if err := r.XyChart.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Scorecard) { - if err := r.Scorecard.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Text) { - if err := r.Text.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Blank) { - if err := r.Blank.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.LogsPanel) { - if err := r.LogsPanel.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsXyChart) validate() error { - if err := dcl.Required(r, "dataSets"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.XAxis) { - if err := r.XAxis.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.YAxis) { - if err := r.YAxis.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ChartOptions) { - if err := r.ChartOptions.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSets) validate() error { - if err := dcl.Required(r, "timeSeriesQuery"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesQuery) { - if err := r.TimeSeriesQuery.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) validate() error { - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilter) { - if err := r.TimeSeriesFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilterRatio) { - if err := r.TimeSeriesFilterRatio.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) validate() error { - if !dcl.IsEmptyValueIndirect(r.Numerator) { - if err := r.Numerator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Denominator) { - if err := r.Denominator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartThresholds) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartXAxis) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartYAxis) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsXyChartChartOptions) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecard) validate() error { - if err := dcl.Required(r, "timeSeriesQuery"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesQuery) { - if err := r.TimeSeriesQuery.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.GaugeView) { - if err := r.GaugeView.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SparkChartView) { - if err := r.SparkChartView.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) validate() error { - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilter) { - if err := r.TimeSeriesFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilterRatio) { - if err := r.TimeSeriesFilterRatio.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) validate() error { - if !dcl.IsEmptyValueIndirect(r.Numerator) { - if err := r.Numerator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Denominator) { - if err := r.Denominator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardGaugeView) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardSparkChartView) validate() error { - if err := dcl.Required(r, "sparkChartType"); err != nil { - return err - } - return nil -} -func (r *DashboardGridLayoutWidgetsScorecardThresholds) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsText) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsBlank) validate() error { - return nil -} -func (r *DashboardGridLayoutWidgetsLogsPanel) validate() error { - return nil -} -func (r *DashboardMosaicLayout) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTiles) validate() error { - if !dcl.IsEmptyValueIndirect(r.Widget) { - if err := r.Widget.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidget) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"XyChart", "Scorecard", "Text", "Blank", "LogsPanel"}, r.XyChart, r.Scorecard, r.Text, r.Blank, r.LogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.XyChart) { - if err := r.XyChart.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Scorecard) { - if err := r.Scorecard.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Text) { - if err := r.Text.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Blank) { - if err := r.Blank.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.LogsPanel) { - if err := r.LogsPanel.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChart) validate() error { - if err := dcl.Required(r, "dataSets"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.XAxis) { - if err := r.XAxis.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.YAxis) { - if err := r.YAxis.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ChartOptions) { - if err := r.ChartOptions.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSets) validate() error { - if err := dcl.Required(r, "timeSeriesQuery"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesQuery) { - if err := r.TimeSeriesQuery.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) validate() error { - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilter) { - if err := r.TimeSeriesFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilterRatio) { - if err := r.TimeSeriesFilterRatio.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) validate() error { - if !dcl.IsEmptyValueIndirect(r.Numerator) { - if err := r.Numerator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Denominator) { - if err := r.Denominator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartThresholds) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartXAxis) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartYAxis) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetXyChartChartOptions) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecard) validate() error { - if err := dcl.Required(r, "timeSeriesQuery"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesQuery) { - if err := r.TimeSeriesQuery.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.GaugeView) { - if err := r.GaugeView.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SparkChartView) { - if err := r.SparkChartView.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) validate() error { - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilter) { - if err := r.TimeSeriesFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilterRatio) { - if err := r.TimeSeriesFilterRatio.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) validate() error { - if !dcl.IsEmptyValueIndirect(r.Numerator) { - if err := r.Numerator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Denominator) { - if err := r.Denominator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardGaugeView) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) validate() error { - if err := dcl.Required(r, "sparkChartType"); err != nil { - return err - } - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetScorecardThresholds) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetText) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetBlank) validate() error { - return nil -} -func (r *DashboardMosaicLayoutTilesWidgetLogsPanel) validate() error { - return nil -} -func (r *DashboardRowLayout) validate() error { - return nil -} -func (r *DashboardRowLayoutRows) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgets) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"XyChart", "Scorecard", "Text", "Blank", "LogsPanel"}, r.XyChart, r.Scorecard, r.Text, r.Blank, r.LogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.XyChart) { - if err := r.XyChart.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Scorecard) { - if err := r.Scorecard.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Text) { - if err := r.Text.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Blank) { - if err := r.Blank.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.LogsPanel) { - if err := r.LogsPanel.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChart) validate() error { - if err := dcl.Required(r, "dataSets"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.XAxis) { - if err := r.XAxis.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.YAxis) { - if err := r.YAxis.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ChartOptions) { - if err := r.ChartOptions.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSets) validate() error { - if err := dcl.Required(r, "timeSeriesQuery"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesQuery) { - if err := r.TimeSeriesQuery.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) validate() error { - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilter) { - if err := r.TimeSeriesFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilterRatio) { - if err := r.TimeSeriesFilterRatio.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) validate() error { - if !dcl.IsEmptyValueIndirect(r.Numerator) { - if err := r.Numerator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Denominator) { - if err := r.Denominator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartThresholds) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartXAxis) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartYAxis) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsXyChartChartOptions) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecard) validate() error { - if err := dcl.Required(r, "timeSeriesQuery"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesQuery) { - if err := r.TimeSeriesQuery.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.GaugeView) { - if err := r.GaugeView.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SparkChartView) { - if err := r.SparkChartView.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) validate() error { - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilter) { - if err := r.TimeSeriesFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilterRatio) { - if err := r.TimeSeriesFilterRatio.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) validate() error { - if !dcl.IsEmptyValueIndirect(r.Numerator) { - if err := r.Numerator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Denominator) { - if err := r.Denominator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardGaugeView) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardSparkChartView) validate() error { - if err := dcl.Required(r, "sparkChartType"); err != nil { - return err - } - return nil -} -func (r *DashboardRowLayoutRowsWidgetsScorecardThresholds) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsText) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsBlank) validate() error { - return nil -} -func (r *DashboardRowLayoutRowsWidgetsLogsPanel) validate() error { - return nil -} -func (r *DashboardColumnLayout) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumns) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgets) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"XyChart", "Scorecard", "Text", "Blank", "LogsPanel"}, r.XyChart, r.Scorecard, r.Text, r.Blank, r.LogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.XyChart) { - if err := r.XyChart.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Scorecard) { - if err := r.Scorecard.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Text) { - if err := r.Text.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Blank) { - if err := r.Blank.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.LogsPanel) { - if err := r.LogsPanel.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChart) validate() error { - if err := dcl.Required(r, "dataSets"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.XAxis) { - if err := r.XAxis.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.YAxis) { - if err := r.YAxis.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ChartOptions) { - if err := r.ChartOptions.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSets) validate() error { - if err := dcl.Required(r, "timeSeriesQuery"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesQuery) { - if err := r.TimeSeriesQuery.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) validate() error { - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilter) { - if err := r.TimeSeriesFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilterRatio) { - if err := r.TimeSeriesFilterRatio.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) validate() error { - if !dcl.IsEmptyValueIndirect(r.Numerator) { - if err := r.Numerator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Denominator) { - if err := r.Denominator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartThresholds) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartXAxis) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartYAxis) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecard) validate() error { - if err := dcl.Required(r, "timeSeriesQuery"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesQuery) { - if err := r.TimeSeriesQuery.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.GaugeView) { - if err := r.GaugeView.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SparkChartView) { - if err := r.SparkChartView.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) validate() error { - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilter) { - if err := r.TimeSeriesFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TimeSeriesFilterRatio) { - if err := r.TimeSeriesFilterRatio.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) validate() error { - if !dcl.IsEmptyValueIndirect(r.Numerator) { - if err := r.Numerator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Denominator) { - if err := r.Denominator.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.SecondaryAggregation) { - if err := r.SecondaryAggregation.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.PickTimeSeriesFilter) { - if err := r.PickTimeSeriesFilter.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) validate() error { - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Aggregation) { - if err := r.Aggregation.validate(); err != nil { - return err - } - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) validate() error { - if err := dcl.Required(r, "sparkChartType"); err != nil { - return err - } - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsScorecardThresholds) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsText) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsBlank) validate() error { - return nil -} -func (r *DashboardColumnLayoutColumnsWidgetsLogsPanel) validate() error { - return nil -} -func (r *Dashboard) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v1/", params) -} - -func (r *Dashboard) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/dashboards/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *Dashboard) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/dashboards", nr.basePath(), userBasePath, params), nil - -} - -func (r *Dashboard) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/dashboards", nr.basePath(), userBasePath, params), nil - -} - -func (r *Dashboard) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/dashboards/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// dashboardApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type dashboardApiOperation interface { - do(context.Context, *Dashboard, *Client) error -} - -// newUpdateDashboardUpdateDashboardRequest creates a request for an -// Dashboard resource's UpdateDashboard update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateDashboardUpdateDashboardRequest(ctx context.Context, f *Dashboard, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - req["displayName"] = v - } - if v, err := expandDashboardGridLayout(c, f.GridLayout, res); err != nil { - return nil, fmt.Errorf("error expanding GridLayout into gridLayout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["gridLayout"] = v - } - if v, err := expandDashboardMosaicLayout(c, f.MosaicLayout, res); err != nil { - return nil, fmt.Errorf("error expanding MosaicLayout into mosaicLayout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["mosaicLayout"] = v - } - if v, err := expandDashboardRowLayout(c, f.RowLayout, res); err != nil { - return nil, fmt.Errorf("error expanding RowLayout into rowLayout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["rowLayout"] = v - } - if v, err := expandDashboardColumnLayout(c, f.ColumnLayout, res); err != nil { - return nil, fmt.Errorf("error expanding ColumnLayout into columnLayout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["columnLayout"] = v - } - b, err := c.getDashboardRaw(ctx, f) - if err != nil { - return nil, err - } - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - rawEtag, err := dcl.GetMapEntry( - m, - []string{"etag"}, - ) - if err != nil { - c.Config.Logger.WarningWithContextf(ctx, "Failed to fetch from JSON Path: %v", err) - } else { - req["etag"] = rawEtag.(string) - } - req["name"] = fmt.Sprintf("projects/%s/dashboards/%s", *f.Project, *f.Name) - - return req, nil -} - -// marshalUpdateDashboardUpdateDashboardRequest converts the update into -// the final JSON request body. -func marshalUpdateDashboardUpdateDashboardRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateDashboardUpdateDashboardOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateDashboardUpdateDashboardOperation) do(ctx context.Context, r *Dashboard, c *Client) error { - _, err := c.GetDashboard(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateDashboard") - if err != nil { - return err - } - - req, err := newUpdateDashboardUpdateDashboardRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateDashboardUpdateDashboardRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listDashboardRaw(ctx context.Context, r *Dashboard, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != DashboardMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listDashboardOperation struct { - Dashboards []map[string]interface{} `json:"dashboards"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listDashboard(ctx context.Context, r *Dashboard, pageToken string, pageSize int32) ([]*Dashboard, string, error) { - b, err := c.listDashboardRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listDashboardOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*Dashboard - for _, v := range m.Dashboards { - res, err := unmarshalMapDashboard(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllDashboard(ctx context.Context, f func(*Dashboard) bool, resources []*Dashboard) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteDashboard(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteDashboardOperation struct{} - -func (op *deleteDashboardOperation) do(ctx context.Context, r *Dashboard, c *Client) error { - r, err := c.GetDashboard(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "Dashboard not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetDashboard checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete Dashboard: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetDashboard(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createDashboardOperation struct { - response map[string]interface{} -} - -func (op *createDashboardOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createDashboardOperation) do(ctx context.Context, r *Dashboard, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - var m map[string]interface{} - if err := json.Unmarshal(req, &m); err != nil { - return err - } - normalized := r.urlNormalized() - m["name"] = fmt.Sprintf("projects/%s/dashboards/%s", *normalized.Project, *normalized.Name) - - req, err = json.Marshal(m) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - if _, err := c.GetDashboard(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getDashboardRaw(ctx context.Context, r *Dashboard) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) dashboardDiffsForRawDesired(ctx context.Context, rawDesired *Dashboard, opts ...dcl.ApplyOption) (initial, desired *Dashboard, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *Dashboard - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*Dashboard); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Dashboard, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetDashboard(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Dashboard resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve Dashboard resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that Dashboard resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeDashboardDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Dashboard: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Dashboard: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractDashboardFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeDashboardInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Dashboard: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeDashboardDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Dashboard: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffDashboard(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeDashboardInitialState(rawInitial, rawDesired *Dashboard) (*Dashboard, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - - if !dcl.IsZeroValue(rawInitial.GridLayout) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.MosaicLayout, rawInitial.RowLayout, rawInitial.ColumnLayout) { - rawInitial.GridLayout = EmptyDashboardGridLayout - } - } - - if !dcl.IsZeroValue(rawInitial.MosaicLayout) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.GridLayout, rawInitial.RowLayout, rawInitial.ColumnLayout) { - rawInitial.MosaicLayout = EmptyDashboardMosaicLayout - } - } - - if !dcl.IsZeroValue(rawInitial.RowLayout) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.GridLayout, rawInitial.MosaicLayout, rawInitial.ColumnLayout) { - rawInitial.RowLayout = EmptyDashboardRowLayout - } - } - - if !dcl.IsZeroValue(rawInitial.ColumnLayout) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.GridLayout, rawInitial.MosaicLayout, rawInitial.RowLayout) { - rawInitial.ColumnLayout = EmptyDashboardColumnLayout - } - } - - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeDashboardDesiredState(rawDesired, rawInitial *Dashboard, opts ...dcl.ApplyOption) (*Dashboard, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.GridLayout = canonicalizeDashboardGridLayout(rawDesired.GridLayout, nil, opts...) - rawDesired.MosaicLayout = canonicalizeDashboardMosaicLayout(rawDesired.MosaicLayout, nil, opts...) - rawDesired.RowLayout = canonicalizeDashboardRowLayout(rawDesired.RowLayout, nil, opts...) - rawDesired.ColumnLayout = canonicalizeDashboardColumnLayout(rawDesired.ColumnLayout, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &Dashboard{} - if dcl.NameToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { - canonicalDesired.DisplayName = rawInitial.DisplayName - } else { - canonicalDesired.DisplayName = rawDesired.DisplayName - } - canonicalDesired.GridLayout = canonicalizeDashboardGridLayout(rawDesired.GridLayout, rawInitial.GridLayout, opts...) - canonicalDesired.MosaicLayout = canonicalizeDashboardMosaicLayout(rawDesired.MosaicLayout, rawInitial.MosaicLayout, opts...) - canonicalDesired.RowLayout = canonicalizeDashboardRowLayout(rawDesired.RowLayout, rawInitial.RowLayout, opts...) - canonicalDesired.ColumnLayout = canonicalizeDashboardColumnLayout(rawDesired.ColumnLayout, rawInitial.ColumnLayout, opts...) - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - - if canonicalDesired.GridLayout != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.MosaicLayout, rawDesired.RowLayout, rawDesired.ColumnLayout) { - canonicalDesired.GridLayout = EmptyDashboardGridLayout - } - } - - if canonicalDesired.MosaicLayout != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.GridLayout, rawDesired.RowLayout, rawDesired.ColumnLayout) { - canonicalDesired.MosaicLayout = EmptyDashboardMosaicLayout - } - } - - if canonicalDesired.RowLayout != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.GridLayout, rawDesired.MosaicLayout, rawDesired.ColumnLayout) { - canonicalDesired.RowLayout = EmptyDashboardRowLayout - } - } - - if canonicalDesired.ColumnLayout != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.GridLayout, rawDesired.MosaicLayout, rawDesired.RowLayout) { - canonicalDesired.ColumnLayout = EmptyDashboardColumnLayout - } - } - - return canonicalDesired, nil -} - -func canonicalizeDashboardNewState(c *Client, rawNew, rawDesired *Dashboard) (*Dashboard, error) { - - rawNew.Name = rawDesired.Name - - if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } else { - if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } - } - - if dcl.IsEmptyValueIndirect(rawNew.GridLayout) && dcl.IsEmptyValueIndirect(rawDesired.GridLayout) { - rawNew.GridLayout = rawDesired.GridLayout - } else { - rawNew.GridLayout = canonicalizeNewDashboardGridLayout(c, rawDesired.GridLayout, rawNew.GridLayout) - } - - if dcl.IsEmptyValueIndirect(rawNew.MosaicLayout) && dcl.IsEmptyValueIndirect(rawDesired.MosaicLayout) { - rawNew.MosaicLayout = rawDesired.MosaicLayout - } else { - rawNew.MosaicLayout = canonicalizeNewDashboardMosaicLayout(c, rawDesired.MosaicLayout, rawNew.MosaicLayout) - } - - if dcl.IsEmptyValueIndirect(rawNew.RowLayout) && dcl.IsEmptyValueIndirect(rawDesired.RowLayout) { - rawNew.RowLayout = rawDesired.RowLayout - } else { - rawNew.RowLayout = canonicalizeNewDashboardRowLayout(c, rawDesired.RowLayout, rawNew.RowLayout) - } - - if dcl.IsEmptyValueIndirect(rawNew.ColumnLayout) && dcl.IsEmptyValueIndirect(rawDesired.ColumnLayout) { - rawNew.ColumnLayout = rawDesired.ColumnLayout - } else { - rawNew.ColumnLayout = canonicalizeNewDashboardColumnLayout(c, rawDesired.ColumnLayout, rawNew.ColumnLayout) - } - - rawNew.Project = rawDesired.Project - - if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { - rawNew.Etag = rawDesired.Etag - } else { - if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { - rawNew.Etag = rawDesired.Etag - } - } - - return rawNew, nil -} - -func canonicalizeDashboardGridLayout(des, initial *DashboardGridLayout, opts ...dcl.ApplyOption) *DashboardGridLayout { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayout{} - - if dcl.IsZeroValue(des.Columns) || (dcl.IsEmptyValueIndirect(des.Columns) && dcl.IsEmptyValueIndirect(initial.Columns)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Columns = initial.Columns - } else { - cDes.Columns = des.Columns - } - cDes.Widgets = canonicalizeDashboardGridLayoutWidgetsSlice(des.Widgets, initial.Widgets, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutSlice(des, initial []DashboardGridLayout, opts ...dcl.ApplyOption) []DashboardGridLayout { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayout, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayout(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayout, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayout(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayout(c *Client, des, nw *DashboardGridLayout) *DashboardGridLayout { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayout while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Widgets = canonicalizeNewDashboardGridLayoutWidgetsSlice(c, des.Widgets, nw.Widgets) - - return nw -} - -func canonicalizeNewDashboardGridLayoutSet(c *Client, des, nw []DashboardGridLayout) []DashboardGridLayout { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayout - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayout(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutSlice(c *Client, des, nw []DashboardGridLayout) []DashboardGridLayout { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayout - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayout(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgets(des, initial *DashboardGridLayoutWidgets, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.XyChart != nil || (initial != nil && initial.XyChart != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Scorecard, des.Text, des.Blank, des.LogsPanel) { - des.XyChart = nil - if initial != nil { - initial.XyChart = nil - } - } - } - - if des.Scorecard != nil || (initial != nil && initial.Scorecard != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Text, des.Blank, des.LogsPanel) { - des.Scorecard = nil - if initial != nil { - initial.Scorecard = nil - } - } - } - - if des.Text != nil || (initial != nil && initial.Text != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Blank, des.LogsPanel) { - des.Text = nil - if initial != nil { - initial.Text = nil - } - } - } - - if des.Blank != nil || (initial != nil && initial.Blank != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Text, des.LogsPanel) { - des.Blank = nil - if initial != nil { - initial.Blank = nil - } - } - } - - if des.LogsPanel != nil || (initial != nil && initial.LogsPanel != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Text, des.Blank) { - des.LogsPanel = nil - if initial != nil { - initial.LogsPanel = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgets{} - - if dcl.StringCanonicalize(des.Title, initial.Title) || dcl.IsZeroValue(des.Title) { - cDes.Title = initial.Title - } else { - cDes.Title = des.Title - } - cDes.XyChart = canonicalizeDashboardGridLayoutWidgetsXyChart(des.XyChart, initial.XyChart, opts...) - cDes.Scorecard = canonicalizeDashboardGridLayoutWidgetsScorecard(des.Scorecard, initial.Scorecard, opts...) - cDes.Text = canonicalizeDashboardGridLayoutWidgetsText(des.Text, initial.Text, opts...) - cDes.Blank = canonicalizeDashboardGridLayoutWidgetsBlank(des.Blank, initial.Blank, opts...) - cDes.LogsPanel = canonicalizeDashboardGridLayoutWidgetsLogsPanel(des.LogsPanel, initial.LogsPanel, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsSlice(des, initial []DashboardGridLayoutWidgets, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgets { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgets, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgets, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgets(c *Client, des, nw *DashboardGridLayoutWidgets) *DashboardGridLayoutWidgets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Title, nw.Title) { - nw.Title = des.Title - } - nw.XyChart = canonicalizeNewDashboardGridLayoutWidgetsXyChart(c, des.XyChart, nw.XyChart) - nw.Scorecard = canonicalizeNewDashboardGridLayoutWidgetsScorecard(c, des.Scorecard, nw.Scorecard) - nw.Text = canonicalizeNewDashboardGridLayoutWidgetsText(c, des.Text, nw.Text) - nw.Blank = canonicalizeNewDashboardGridLayoutWidgetsBlank(c, des.Blank, nw.Blank) - nw.LogsPanel = canonicalizeNewDashboardGridLayoutWidgetsLogsPanel(c, des.LogsPanel, nw.LogsPanel) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsSet(c *Client, des, nw []DashboardGridLayoutWidgets) []DashboardGridLayoutWidgets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsSlice(c *Client, des, nw []DashboardGridLayoutWidgets) []DashboardGridLayoutWidgets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgets(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChart(des, initial *DashboardGridLayoutWidgetsXyChart, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChart { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChart{} - - cDes.DataSets = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsSlice(des.DataSets, initial.DataSets, opts...) - if dcl.StringCanonicalize(des.TimeshiftDuration, initial.TimeshiftDuration) || dcl.IsZeroValue(des.TimeshiftDuration) { - cDes.TimeshiftDuration = initial.TimeshiftDuration - } else { - cDes.TimeshiftDuration = des.TimeshiftDuration - } - cDes.Thresholds = canonicalizeDashboardGridLayoutWidgetsXyChartThresholdsSlice(des.Thresholds, initial.Thresholds, opts...) - cDes.XAxis = canonicalizeDashboardGridLayoutWidgetsXyChartXAxis(des.XAxis, initial.XAxis, opts...) - cDes.YAxis = canonicalizeDashboardGridLayoutWidgetsXyChartYAxis(des.YAxis, initial.YAxis, opts...) - cDes.ChartOptions = canonicalizeDashboardGridLayoutWidgetsXyChartChartOptions(des.ChartOptions, initial.ChartOptions, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartSlice(des, initial []DashboardGridLayoutWidgetsXyChart, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChart { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChart, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChart(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChart, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChart(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChart(c *Client, des, nw *DashboardGridLayoutWidgetsXyChart) *DashboardGridLayoutWidgetsXyChart { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChart while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.DataSets = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsSlice(c, des.DataSets, nw.DataSets) - if dcl.StringCanonicalize(des.TimeshiftDuration, nw.TimeshiftDuration) { - nw.TimeshiftDuration = des.TimeshiftDuration - } - nw.Thresholds = canonicalizeNewDashboardGridLayoutWidgetsXyChartThresholdsSlice(c, des.Thresholds, nw.Thresholds) - nw.XAxis = canonicalizeNewDashboardGridLayoutWidgetsXyChartXAxis(c, des.XAxis, nw.XAxis) - nw.YAxis = canonicalizeNewDashboardGridLayoutWidgetsXyChartYAxis(c, des.YAxis, nw.YAxis) - nw.ChartOptions = canonicalizeNewDashboardGridLayoutWidgetsXyChartChartOptions(c, des.ChartOptions, nw.ChartOptions) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChart) []DashboardGridLayoutWidgetsXyChart { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChart - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChart(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChart) []DashboardGridLayoutWidgetsXyChart { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChart - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChart(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSets(des, initial *DashboardGridLayoutWidgetsXyChartDataSets, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSets{} - - cDes.TimeSeriesQuery = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(des.TimeSeriesQuery, initial.TimeSeriesQuery, opts...) - if dcl.IsZeroValue(des.PlotType) || (dcl.IsEmptyValueIndirect(des.PlotType) && dcl.IsEmptyValueIndirect(initial.PlotType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PlotType = initial.PlotType - } else { - cDes.PlotType = des.PlotType - } - if dcl.StringCanonicalize(des.LegendTemplate, initial.LegendTemplate) || dcl.IsZeroValue(des.LegendTemplate) { - cDes.LegendTemplate = initial.LegendTemplate - } else { - cDes.LegendTemplate = des.LegendTemplate - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, initial.MinAlignmentPeriod) || dcl.IsZeroValue(des.MinAlignmentPeriod) { - cDes.MinAlignmentPeriod = initial.MinAlignmentPeriod - } else { - cDes.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSets, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSets { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSets, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSets, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSets(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSets) *DashboardGridLayoutWidgetsXyChartDataSets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesQuery = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, des.TimeSeriesQuery, nw.TimeSeriesQuery) - if dcl.StringCanonicalize(des.LegendTemplate, nw.LegendTemplate) { - nw.LegendTemplate = des.LegendTemplate - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, nw.MinAlignmentPeriod) { - nw.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSets) []DashboardGridLayoutWidgetsXyChartDataSets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSets) []DashboardGridLayoutWidgetsXyChartDataSets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSets(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{} - - cDes.TimeSeriesFilter = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(des.TimeSeriesFilter, initial.TimeSeriesFilter, opts...) - cDes.TimeSeriesFilterRatio = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(des.TimeSeriesFilterRatio, initial.TimeSeriesFilterRatio, opts...) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, initial.TimeSeriesQueryLanguage) || dcl.IsZeroValue(des.TimeSeriesQueryLanguage) { - cDes.TimeSeriesQueryLanguage = initial.TimeSeriesQueryLanguage - } else { - cDes.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, initial.UnitOverride) || dcl.IsZeroValue(des.UnitOverride) { - cDes.UnitOverride = initial.UnitOverride - } else { - cDes.UnitOverride = des.UnitOverride - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuerySlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesFilter = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, des.TimeSeriesFilter, nw.TimeSeriesFilter) - nw.TimeSeriesFilterRatio = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, des.TimeSeriesFilterRatio, nw.TimeSeriesFilterRatio) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, nw.TimeSeriesQueryLanguage) { - nw.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, nw.UnitOverride) { - nw.UnitOverride = des.UnitOverride - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuerySet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(des.Aggregation, initial.Aggregation, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, des.Aggregation, nw.Aggregation) - nw.SecondaryAggregation = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - - cDes.Numerator = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(des.Numerator, initial.Numerator, opts...) - cDes.Denominator = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(des.Denominator, initial.Denominator, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Numerator = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, des.Numerator, nw.Numerator) - nw.Denominator = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, des.Denominator, nw.Denominator) - nw.SecondaryAggregation = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des, initial *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(des, initial []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartThresholds(des, initial *DashboardGridLayoutWidgetsXyChartThresholds, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartThresholds { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartThresholds{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Value) || (dcl.IsEmptyValueIndirect(des.Value) && dcl.IsEmptyValueIndirect(initial.Value)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Value = initial.Value - } else { - cDes.Value = des.Value - } - if dcl.IsZeroValue(des.Color) || (dcl.IsEmptyValueIndirect(des.Color) && dcl.IsEmptyValueIndirect(initial.Color)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Color = initial.Color - } else { - cDes.Color = des.Color - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartThresholdsSlice(des, initial []DashboardGridLayoutWidgetsXyChartThresholds, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartThresholds { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartThresholds, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartThresholds(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartThresholds, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartThresholds(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartThresholds(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartThresholds) *DashboardGridLayoutWidgetsXyChartThresholds { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartThresholds while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartThresholdsSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartThresholds) []DashboardGridLayoutWidgetsXyChartThresholds { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartThresholds - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartThresholdsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartThresholds(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartThresholdsSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartThresholds) []DashboardGridLayoutWidgetsXyChartThresholds { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartThresholds - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartThresholds(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartXAxis(des, initial *DashboardGridLayoutWidgetsXyChartXAxis, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartXAxis { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartXAxis{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartXAxisSlice(des, initial []DashboardGridLayoutWidgetsXyChartXAxis, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartXAxis { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartXAxis, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartXAxis(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartXAxis, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartXAxis(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartXAxis(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartXAxis) *DashboardGridLayoutWidgetsXyChartXAxis { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartXAxis while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartXAxisSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartXAxis) []DashboardGridLayoutWidgetsXyChartXAxis { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartXAxis - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartXAxisNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartXAxis(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartXAxisSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartXAxis) []DashboardGridLayoutWidgetsXyChartXAxis { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartXAxis - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartXAxis(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartYAxis(des, initial *DashboardGridLayoutWidgetsXyChartYAxis, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartYAxis { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartYAxis{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartYAxisSlice(des, initial []DashboardGridLayoutWidgetsXyChartYAxis, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartYAxis { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartYAxis, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartYAxis(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartYAxis, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartYAxis(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartYAxis(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartYAxis) *DashboardGridLayoutWidgetsXyChartYAxis { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartYAxis while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartYAxisSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartYAxis) []DashboardGridLayoutWidgetsXyChartYAxis { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartYAxis - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartYAxisNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartYAxis(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartYAxisSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartYAxis) []DashboardGridLayoutWidgetsXyChartYAxis { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartYAxis - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartYAxis(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartChartOptions(des, initial *DashboardGridLayoutWidgetsXyChartChartOptions, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsXyChartChartOptions { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsXyChartChartOptions{} - - if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Mode = initial.Mode - } else { - cDes.Mode = des.Mode - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsXyChartChartOptionsSlice(des, initial []DashboardGridLayoutWidgetsXyChartChartOptions, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsXyChartChartOptions { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsXyChartChartOptions, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartChartOptions(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsXyChartChartOptions, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsXyChartChartOptions(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartChartOptions(c *Client, des, nw *DashboardGridLayoutWidgetsXyChartChartOptions) *DashboardGridLayoutWidgetsXyChartChartOptions { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsXyChartChartOptions while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartChartOptionsSet(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartChartOptions) []DashboardGridLayoutWidgetsXyChartChartOptions { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsXyChartChartOptions - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsXyChartChartOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartChartOptions(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsXyChartChartOptionsSlice(c *Client, des, nw []DashboardGridLayoutWidgetsXyChartChartOptions) []DashboardGridLayoutWidgetsXyChartChartOptions { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsXyChartChartOptions - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsXyChartChartOptions(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecard(des, initial *DashboardGridLayoutWidgetsScorecard, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecard { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecard{} - - cDes.TimeSeriesQuery = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(des.TimeSeriesQuery, initial.TimeSeriesQuery, opts...) - cDes.GaugeView = canonicalizeDashboardGridLayoutWidgetsScorecardGaugeView(des.GaugeView, initial.GaugeView, opts...) - cDes.SparkChartView = canonicalizeDashboardGridLayoutWidgetsScorecardSparkChartView(des.SparkChartView, initial.SparkChartView, opts...) - cDes.Thresholds = canonicalizeDashboardGridLayoutWidgetsScorecardThresholdsSlice(des.Thresholds, initial.Thresholds, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardSlice(des, initial []DashboardGridLayoutWidgetsScorecard, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecard { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecard, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecard(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecard, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecard(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecard(c *Client, des, nw *DashboardGridLayoutWidgetsScorecard) *DashboardGridLayoutWidgetsScorecard { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecard while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesQuery = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, des.TimeSeriesQuery, nw.TimeSeriesQuery) - nw.GaugeView = canonicalizeNewDashboardGridLayoutWidgetsScorecardGaugeView(c, des.GaugeView, nw.GaugeView) - nw.SparkChartView = canonicalizeNewDashboardGridLayoutWidgetsScorecardSparkChartView(c, des.SparkChartView, nw.SparkChartView) - nw.Thresholds = canonicalizeNewDashboardGridLayoutWidgetsScorecardThresholdsSlice(c, des.Thresholds, nw.Thresholds) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecard) []DashboardGridLayoutWidgetsScorecard { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecard - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecard(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecard) []DashboardGridLayoutWidgetsScorecard { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecard - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecard(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{} - - cDes.TimeSeriesFilter = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(des.TimeSeriesFilter, initial.TimeSeriesFilter, opts...) - cDes.TimeSeriesFilterRatio = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(des.TimeSeriesFilterRatio, initial.TimeSeriesFilterRatio, opts...) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, initial.TimeSeriesQueryLanguage) || dcl.IsZeroValue(des.TimeSeriesQueryLanguage) { - cDes.TimeSeriesQueryLanguage = initial.TimeSeriesQueryLanguage - } else { - cDes.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, initial.UnitOverride) || dcl.IsZeroValue(des.UnitOverride) { - cDes.UnitOverride = initial.UnitOverride - } else { - cDes.UnitOverride = des.UnitOverride - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQuerySlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQuery, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQuery, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQuery while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesFilter = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, des.TimeSeriesFilter, nw.TimeSeriesFilter) - nw.TimeSeriesFilterRatio = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, des.TimeSeriesFilterRatio, nw.TimeSeriesFilterRatio) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, nw.TimeSeriesQueryLanguage) { - nw.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, nw.UnitOverride) { - nw.UnitOverride = des.UnitOverride - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQuerySet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQuerySlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(des.Aggregation, initial.Aggregation, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, des.Aggregation, nw.Aggregation) - nw.SecondaryAggregation = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - - cDes.Numerator = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(des.Numerator, initial.Numerator, opts...) - cDes.Denominator = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(des.Denominator, initial.Denominator, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Numerator = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, des.Numerator, nw.Numerator) - nw.Denominator = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, des.Denominator, nw.Denominator) - nw.SecondaryAggregation = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des, initial *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(des, initial []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardGaugeView(des, initial *DashboardGridLayoutWidgetsScorecardGaugeView, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardGaugeView { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardGaugeView{} - - if dcl.IsZeroValue(des.LowerBound) || (dcl.IsEmptyValueIndirect(des.LowerBound) && dcl.IsEmptyValueIndirect(initial.LowerBound)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.LowerBound = initial.LowerBound - } else { - cDes.LowerBound = des.LowerBound - } - if dcl.IsZeroValue(des.UpperBound) || (dcl.IsEmptyValueIndirect(des.UpperBound) && dcl.IsEmptyValueIndirect(initial.UpperBound)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.UpperBound = initial.UpperBound - } else { - cDes.UpperBound = des.UpperBound - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardGaugeViewSlice(des, initial []DashboardGridLayoutWidgetsScorecardGaugeView, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardGaugeView { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardGaugeView, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardGaugeView(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardGaugeView, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardGaugeView(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardGaugeView(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardGaugeView) *DashboardGridLayoutWidgetsScorecardGaugeView { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardGaugeView while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardGaugeViewSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardGaugeView) []DashboardGridLayoutWidgetsScorecardGaugeView { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardGaugeView - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardGaugeViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardGaugeView(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardGaugeViewSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardGaugeView) []DashboardGridLayoutWidgetsScorecardGaugeView { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardGaugeView - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardGaugeView(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardSparkChartView(des, initial *DashboardGridLayoutWidgetsScorecardSparkChartView, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardSparkChartView { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardSparkChartView{} - - if dcl.IsZeroValue(des.SparkChartType) || (dcl.IsEmptyValueIndirect(des.SparkChartType) && dcl.IsEmptyValueIndirect(initial.SparkChartType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.SparkChartType = initial.SparkChartType - } else { - cDes.SparkChartType = des.SparkChartType - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, initial.MinAlignmentPeriod) || dcl.IsZeroValue(des.MinAlignmentPeriod) { - cDes.MinAlignmentPeriod = initial.MinAlignmentPeriod - } else { - cDes.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardSparkChartViewSlice(des, initial []DashboardGridLayoutWidgetsScorecardSparkChartView, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardSparkChartView { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardSparkChartView, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardSparkChartView(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardSparkChartView, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardSparkChartView(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardSparkChartView(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardSparkChartView) *DashboardGridLayoutWidgetsScorecardSparkChartView { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardSparkChartView while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.MinAlignmentPeriod, nw.MinAlignmentPeriod) { - nw.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardSparkChartViewSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardSparkChartView) []DashboardGridLayoutWidgetsScorecardSparkChartView { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardSparkChartView - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardSparkChartViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardSparkChartView(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardSparkChartViewSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardSparkChartView) []DashboardGridLayoutWidgetsScorecardSparkChartView { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardSparkChartView - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardSparkChartView(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardThresholds(des, initial *DashboardGridLayoutWidgetsScorecardThresholds, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsScorecardThresholds { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsScorecardThresholds{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Value) || (dcl.IsEmptyValueIndirect(des.Value) && dcl.IsEmptyValueIndirect(initial.Value)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Value = initial.Value - } else { - cDes.Value = des.Value - } - if dcl.IsZeroValue(des.Color) || (dcl.IsEmptyValueIndirect(des.Color) && dcl.IsEmptyValueIndirect(initial.Color)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Color = initial.Color - } else { - cDes.Color = des.Color - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsScorecardThresholdsSlice(des, initial []DashboardGridLayoutWidgetsScorecardThresholds, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsScorecardThresholds { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsScorecardThresholds, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardThresholds(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsScorecardThresholds, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsScorecardThresholds(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardThresholds(c *Client, des, nw *DashboardGridLayoutWidgetsScorecardThresholds) *DashboardGridLayoutWidgetsScorecardThresholds { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsScorecardThresholds while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardThresholdsSet(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardThresholds) []DashboardGridLayoutWidgetsScorecardThresholds { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsScorecardThresholds - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsScorecardThresholdsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardThresholds(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsScorecardThresholdsSlice(c *Client, des, nw []DashboardGridLayoutWidgetsScorecardThresholds) []DashboardGridLayoutWidgetsScorecardThresholds { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsScorecardThresholds - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsScorecardThresholds(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsText(des, initial *DashboardGridLayoutWidgetsText, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsText { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsText{} - - if dcl.StringCanonicalize(des.Content, initial.Content) || dcl.IsZeroValue(des.Content) { - cDes.Content = initial.Content - } else { - cDes.Content = des.Content - } - if dcl.IsZeroValue(des.Format) || (dcl.IsEmptyValueIndirect(des.Format) && dcl.IsEmptyValueIndirect(initial.Format)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Format = initial.Format - } else { - cDes.Format = des.Format - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsTextSlice(des, initial []DashboardGridLayoutWidgetsText, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsText { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsText, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsText(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsText, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsText(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsText(c *Client, des, nw *DashboardGridLayoutWidgetsText) *DashboardGridLayoutWidgetsText { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsText while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Content, nw.Content) { - nw.Content = des.Content - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsTextSet(c *Client, des, nw []DashboardGridLayoutWidgetsText) []DashboardGridLayoutWidgetsText { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsText - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsTextNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsText(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsTextSlice(c *Client, des, nw []DashboardGridLayoutWidgetsText) []DashboardGridLayoutWidgetsText { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsText - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsText(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsBlank(des, initial *DashboardGridLayoutWidgetsBlank, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsBlank { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsBlank{} - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsBlankSlice(des, initial []DashboardGridLayoutWidgetsBlank, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsBlank { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsBlank, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsBlank(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsBlank, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsBlank(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsBlank(c *Client, des, nw *DashboardGridLayoutWidgetsBlank) *DashboardGridLayoutWidgetsBlank { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsBlank while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsBlankSet(c *Client, des, nw []DashboardGridLayoutWidgetsBlank) []DashboardGridLayoutWidgetsBlank { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsBlank - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsBlankNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsBlank(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsBlankSlice(c *Client, des, nw []DashboardGridLayoutWidgetsBlank) []DashboardGridLayoutWidgetsBlank { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsBlank - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsBlank(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardGridLayoutWidgetsLogsPanel(des, initial *DashboardGridLayoutWidgetsLogsPanel, opts ...dcl.ApplyOption) *DashboardGridLayoutWidgetsLogsPanel { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardGridLayoutWidgetsLogsPanel{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - if dcl.StringArrayCanonicalize(des.ResourceNames, initial.ResourceNames) { - cDes.ResourceNames = initial.ResourceNames - } else { - cDes.ResourceNames = des.ResourceNames - } - - return cDes -} - -func canonicalizeDashboardGridLayoutWidgetsLogsPanelSlice(des, initial []DashboardGridLayoutWidgetsLogsPanel, opts ...dcl.ApplyOption) []DashboardGridLayoutWidgetsLogsPanel { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardGridLayoutWidgetsLogsPanel, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsLogsPanel(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardGridLayoutWidgetsLogsPanel, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardGridLayoutWidgetsLogsPanel(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardGridLayoutWidgetsLogsPanel(c *Client, des, nw *DashboardGridLayoutWidgetsLogsPanel) *DashboardGridLayoutWidgetsLogsPanel { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardGridLayoutWidgetsLogsPanel while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - if dcl.StringArrayCanonicalize(des.ResourceNames, nw.ResourceNames) { - nw.ResourceNames = des.ResourceNames - } - - return nw -} - -func canonicalizeNewDashboardGridLayoutWidgetsLogsPanelSet(c *Client, des, nw []DashboardGridLayoutWidgetsLogsPanel) []DashboardGridLayoutWidgetsLogsPanel { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardGridLayoutWidgetsLogsPanel - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardGridLayoutWidgetsLogsPanelNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsLogsPanel(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardGridLayoutWidgetsLogsPanelSlice(c *Client, des, nw []DashboardGridLayoutWidgetsLogsPanel) []DashboardGridLayoutWidgetsLogsPanel { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardGridLayoutWidgetsLogsPanel - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardGridLayoutWidgetsLogsPanel(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayout(des, initial *DashboardMosaicLayout, opts ...dcl.ApplyOption) *DashboardMosaicLayout { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayout{} - - if dcl.IsZeroValue(des.Columns) || (dcl.IsEmptyValueIndirect(des.Columns) && dcl.IsEmptyValueIndirect(initial.Columns)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Columns = initial.Columns - } else { - cDes.Columns = des.Columns - } - cDes.Tiles = canonicalizeDashboardMosaicLayoutTilesSlice(des.Tiles, initial.Tiles, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutSlice(des, initial []DashboardMosaicLayout, opts ...dcl.ApplyOption) []DashboardMosaicLayout { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayout, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayout(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayout, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayout(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayout(c *Client, des, nw *DashboardMosaicLayout) *DashboardMosaicLayout { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayout while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Tiles = canonicalizeNewDashboardMosaicLayoutTilesSlice(c, des.Tiles, nw.Tiles) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutSet(c *Client, des, nw []DashboardMosaicLayout) []DashboardMosaicLayout { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayout - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayout(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutSlice(c *Client, des, nw []DashboardMosaicLayout) []DashboardMosaicLayout { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayout - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayout(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTiles(des, initial *DashboardMosaicLayoutTiles, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTiles { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTiles{} - - if dcl.IsZeroValue(des.XPos) || (dcl.IsEmptyValueIndirect(des.XPos) && dcl.IsEmptyValueIndirect(initial.XPos)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.XPos = initial.XPos - } else { - cDes.XPos = des.XPos - } - if dcl.IsZeroValue(des.YPos) || (dcl.IsEmptyValueIndirect(des.YPos) && dcl.IsEmptyValueIndirect(initial.YPos)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.YPos = initial.YPos - } else { - cDes.YPos = des.YPos - } - if dcl.IsZeroValue(des.Width) || (dcl.IsEmptyValueIndirect(des.Width) && dcl.IsEmptyValueIndirect(initial.Width)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Width = initial.Width - } else { - cDes.Width = des.Width - } - if dcl.IsZeroValue(des.Height) || (dcl.IsEmptyValueIndirect(des.Height) && dcl.IsEmptyValueIndirect(initial.Height)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Height = initial.Height - } else { - cDes.Height = des.Height - } - cDes.Widget = canonicalizeDashboardMosaicLayoutTilesWidget(des.Widget, initial.Widget, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesSlice(des, initial []DashboardMosaicLayoutTiles, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTiles { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTiles, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTiles(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTiles, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTiles(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTiles(c *Client, des, nw *DashboardMosaicLayoutTiles) *DashboardMosaicLayoutTiles { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTiles while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Widget = canonicalizeNewDashboardMosaicLayoutTilesWidget(c, des.Widget, nw.Widget) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesSet(c *Client, des, nw []DashboardMosaicLayoutTiles) []DashboardMosaicLayoutTiles { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTiles - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTiles(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesSlice(c *Client, des, nw []DashboardMosaicLayoutTiles) []DashboardMosaicLayoutTiles { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTiles - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTiles(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidget(des, initial *DashboardMosaicLayoutTilesWidget, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidget { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.XyChart != nil || (initial != nil && initial.XyChart != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Scorecard, des.Text, des.Blank, des.LogsPanel) { - des.XyChart = nil - if initial != nil { - initial.XyChart = nil - } - } - } - - if des.Scorecard != nil || (initial != nil && initial.Scorecard != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Text, des.Blank, des.LogsPanel) { - des.Scorecard = nil - if initial != nil { - initial.Scorecard = nil - } - } - } - - if des.Text != nil || (initial != nil && initial.Text != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Blank, des.LogsPanel) { - des.Text = nil - if initial != nil { - initial.Text = nil - } - } - } - - if des.Blank != nil || (initial != nil && initial.Blank != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Text, des.LogsPanel) { - des.Blank = nil - if initial != nil { - initial.Blank = nil - } - } - } - - if des.LogsPanel != nil || (initial != nil && initial.LogsPanel != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Text, des.Blank) { - des.LogsPanel = nil - if initial != nil { - initial.LogsPanel = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidget{} - - if dcl.StringCanonicalize(des.Title, initial.Title) || dcl.IsZeroValue(des.Title) { - cDes.Title = initial.Title - } else { - cDes.Title = des.Title - } - cDes.XyChart = canonicalizeDashboardMosaicLayoutTilesWidgetXyChart(des.XyChart, initial.XyChart, opts...) - cDes.Scorecard = canonicalizeDashboardMosaicLayoutTilesWidgetScorecard(des.Scorecard, initial.Scorecard, opts...) - cDes.Text = canonicalizeDashboardMosaicLayoutTilesWidgetText(des.Text, initial.Text, opts...) - cDes.Blank = canonicalizeDashboardMosaicLayoutTilesWidgetBlank(des.Blank, initial.Blank, opts...) - cDes.LogsPanel = canonicalizeDashboardMosaicLayoutTilesWidgetLogsPanel(des.LogsPanel, initial.LogsPanel, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetSlice(des, initial []DashboardMosaicLayoutTilesWidget, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidget { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidget, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidget(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidget, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidget(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidget(c *Client, des, nw *DashboardMosaicLayoutTilesWidget) *DashboardMosaicLayoutTilesWidget { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidget while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Title, nw.Title) { - nw.Title = des.Title - } - nw.XyChart = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChart(c, des.XyChart, nw.XyChart) - nw.Scorecard = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecard(c, des.Scorecard, nw.Scorecard) - nw.Text = canonicalizeNewDashboardMosaicLayoutTilesWidgetText(c, des.Text, nw.Text) - nw.Blank = canonicalizeNewDashboardMosaicLayoutTilesWidgetBlank(c, des.Blank, nw.Blank) - nw.LogsPanel = canonicalizeNewDashboardMosaicLayoutTilesWidgetLogsPanel(c, des.LogsPanel, nw.LogsPanel) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidget) []DashboardMosaicLayoutTilesWidget { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidget - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidget(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidget) []DashboardMosaicLayoutTilesWidget { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidget - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidget(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChart(des, initial *DashboardMosaicLayoutTilesWidgetXyChart, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChart { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChart{} - - cDes.DataSets = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice(des.DataSets, initial.DataSets, opts...) - if dcl.StringCanonicalize(des.TimeshiftDuration, initial.TimeshiftDuration) || dcl.IsZeroValue(des.TimeshiftDuration) { - cDes.TimeshiftDuration = initial.TimeshiftDuration - } else { - cDes.TimeshiftDuration = des.TimeshiftDuration - } - cDes.Thresholds = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice(des.Thresholds, initial.Thresholds, opts...) - cDes.XAxis = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartXAxis(des.XAxis, initial.XAxis, opts...) - cDes.YAxis = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartYAxis(des.YAxis, initial.YAxis, opts...) - cDes.ChartOptions = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartChartOptions(des.ChartOptions, initial.ChartOptions, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChart, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChart { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChart, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChart(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChart, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChart(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChart(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChart) *DashboardMosaicLayoutTilesWidgetXyChart { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChart while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.DataSets = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice(c, des.DataSets, nw.DataSets) - if dcl.StringCanonicalize(des.TimeshiftDuration, nw.TimeshiftDuration) { - nw.TimeshiftDuration = des.TimeshiftDuration - } - nw.Thresholds = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice(c, des.Thresholds, nw.Thresholds) - nw.XAxis = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, des.XAxis, nw.XAxis) - nw.YAxis = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, des.YAxis, nw.YAxis) - nw.ChartOptions = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, des.ChartOptions, nw.ChartOptions) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChart) []DashboardMosaicLayoutTilesWidgetXyChart { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChart - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChart(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChart) []DashboardMosaicLayoutTilesWidgetXyChart { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChart - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChart(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSets(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSets, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSets{} - - cDes.TimeSeriesQuery = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(des.TimeSeriesQuery, initial.TimeSeriesQuery, opts...) - if dcl.IsZeroValue(des.PlotType) || (dcl.IsEmptyValueIndirect(des.PlotType) && dcl.IsEmptyValueIndirect(initial.PlotType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PlotType = initial.PlotType - } else { - cDes.PlotType = des.PlotType - } - if dcl.StringCanonicalize(des.LegendTemplate, initial.LegendTemplate) || dcl.IsZeroValue(des.LegendTemplate) { - cDes.LegendTemplate = initial.LegendTemplate - } else { - cDes.LegendTemplate = des.LegendTemplate - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, initial.MinAlignmentPeriod) || dcl.IsZeroValue(des.MinAlignmentPeriod) { - cDes.MinAlignmentPeriod = initial.MinAlignmentPeriod - } else { - cDes.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSets, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSets { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSets, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSets, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSets(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSets) *DashboardMosaicLayoutTilesWidgetXyChartDataSets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesQuery = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, des.TimeSeriesQuery, nw.TimeSeriesQuery) - if dcl.StringCanonicalize(des.LegendTemplate, nw.LegendTemplate) { - nw.LegendTemplate = des.LegendTemplate - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, nw.MinAlignmentPeriod) { - nw.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSets) []DashboardMosaicLayoutTilesWidgetXyChartDataSets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSets) []DashboardMosaicLayoutTilesWidgetXyChartDataSets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSets(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{} - - cDes.TimeSeriesFilter = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(des.TimeSeriesFilter, initial.TimeSeriesFilter, opts...) - cDes.TimeSeriesFilterRatio = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(des.TimeSeriesFilterRatio, initial.TimeSeriesFilterRatio, opts...) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, initial.TimeSeriesQueryLanguage) || dcl.IsZeroValue(des.TimeSeriesQueryLanguage) { - cDes.TimeSeriesQueryLanguage = initial.TimeSeriesQueryLanguage - } else { - cDes.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, initial.UnitOverride) || dcl.IsZeroValue(des.UnitOverride) { - cDes.UnitOverride = initial.UnitOverride - } else { - cDes.UnitOverride = des.UnitOverride - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuerySlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesFilter = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, des.TimeSeriesFilter, nw.TimeSeriesFilter) - nw.TimeSeriesFilterRatio = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, des.TimeSeriesFilterRatio, nw.TimeSeriesFilterRatio) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, nw.TimeSeriesQueryLanguage) { - nw.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, nw.UnitOverride) { - nw.UnitOverride = des.UnitOverride - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuerySet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuerySlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(des.Aggregation, initial.Aggregation, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, des.Aggregation, nw.Aggregation) - nw.SecondaryAggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - - cDes.Numerator = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(des.Numerator, initial.Numerator, opts...) - cDes.Denominator = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(des.Denominator, initial.Denominator, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Numerator = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, des.Numerator, nw.Numerator) - nw.Denominator = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, des.Denominator, nw.Denominator) - nw.SecondaryAggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des, initial *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartThresholds(des, initial *DashboardMosaicLayoutTilesWidgetXyChartThresholds, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartThresholds { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartThresholds{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Value) || (dcl.IsEmptyValueIndirect(des.Value) && dcl.IsEmptyValueIndirect(initial.Value)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Value = initial.Value - } else { - cDes.Value = des.Value - } - if dcl.IsZeroValue(des.Color) || (dcl.IsEmptyValueIndirect(des.Color) && dcl.IsEmptyValueIndirect(initial.Color)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Color = initial.Color - } else { - cDes.Color = des.Color - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartThresholds, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartThresholds { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartThresholds, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartThresholds(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartThresholds, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartThresholds(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartThresholds(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartThresholds) *DashboardMosaicLayoutTilesWidgetXyChartThresholds { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartThresholds while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartThresholdsSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartThresholds) []DashboardMosaicLayoutTilesWidgetXyChartThresholds { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartThresholds - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartThresholdsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartThresholds(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartThresholds) []DashboardMosaicLayoutTilesWidgetXyChartThresholds { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartThresholds - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartThresholds(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartXAxis(des, initial *DashboardMosaicLayoutTilesWidgetXyChartXAxis, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartXAxis { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartXAxis{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartXAxisSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartXAxis, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartXAxis { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartXAxis, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartXAxis(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartXAxis, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartXAxis(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartXAxis(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartXAxis) *DashboardMosaicLayoutTilesWidgetXyChartXAxis { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartXAxis while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartXAxisSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartXAxis) []DashboardMosaicLayoutTilesWidgetXyChartXAxis { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartXAxis - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartXAxisNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartXAxisSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartXAxis) []DashboardMosaicLayoutTilesWidgetXyChartXAxis { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartXAxis - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartYAxis(des, initial *DashboardMosaicLayoutTilesWidgetXyChartYAxis, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartYAxis { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartYAxis{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartYAxisSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartYAxis, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartYAxis { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartYAxis, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartYAxis(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartYAxis, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartYAxis(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartYAxis(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartYAxis) *DashboardMosaicLayoutTilesWidgetXyChartYAxis { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartYAxis while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartYAxisSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartYAxis) []DashboardMosaicLayoutTilesWidgetXyChartYAxis { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartYAxis - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartYAxisNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartYAxisSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartYAxis) []DashboardMosaicLayoutTilesWidgetXyChartYAxis { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartYAxis - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartChartOptions(des, initial *DashboardMosaicLayoutTilesWidgetXyChartChartOptions, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetXyChartChartOptions { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetXyChartChartOptions{} - - if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Mode = initial.Mode - } else { - cDes.Mode = des.Mode - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetXyChartChartOptionsSlice(des, initial []DashboardMosaicLayoutTilesWidgetXyChartChartOptions, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetXyChartChartOptions { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartChartOptions, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartChartOptions(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartChartOptions, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetXyChartChartOptions(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetXyChartChartOptions) *DashboardMosaicLayoutTilesWidgetXyChartChartOptions { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetXyChartChartOptions while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartChartOptionsSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartChartOptions) []DashboardMosaicLayoutTilesWidgetXyChartChartOptions { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetXyChartChartOptions - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetXyChartChartOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartChartOptionsSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetXyChartChartOptions) []DashboardMosaicLayoutTilesWidgetXyChartChartOptions { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetXyChartChartOptions - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecard(des, initial *DashboardMosaicLayoutTilesWidgetScorecard, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecard { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecard{} - - cDes.TimeSeriesQuery = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(des.TimeSeriesQuery, initial.TimeSeriesQuery, opts...) - cDes.GaugeView = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardGaugeView(des.GaugeView, initial.GaugeView, opts...) - cDes.SparkChartView = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(des.SparkChartView, initial.SparkChartView, opts...) - cDes.Thresholds = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice(des.Thresholds, initial.Thresholds, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecard, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecard { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecard, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecard(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecard, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecard(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecard(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecard) *DashboardMosaicLayoutTilesWidgetScorecard { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecard while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesQuery = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, des.TimeSeriesQuery, nw.TimeSeriesQuery) - nw.GaugeView = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, des.GaugeView, nw.GaugeView) - nw.SparkChartView = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, des.SparkChartView, nw.SparkChartView) - nw.Thresholds = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice(c, des.Thresholds, nw.Thresholds) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecard) []DashboardMosaicLayoutTilesWidgetScorecard { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecard - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecard(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecard) []DashboardMosaicLayoutTilesWidgetScorecard { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecard - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecard(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{} - - cDes.TimeSeriesFilter = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(des.TimeSeriesFilter, initial.TimeSeriesFilter, opts...) - cDes.TimeSeriesFilterRatio = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(des.TimeSeriesFilterRatio, initial.TimeSeriesFilterRatio, opts...) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, initial.TimeSeriesQueryLanguage) || dcl.IsZeroValue(des.TimeSeriesQueryLanguage) { - cDes.TimeSeriesQueryLanguage = initial.TimeSeriesQueryLanguage - } else { - cDes.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, initial.UnitOverride) || dcl.IsZeroValue(des.UnitOverride) { - cDes.UnitOverride = initial.UnitOverride - } else { - cDes.UnitOverride = des.UnitOverride - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuerySlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesFilter = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, des.TimeSeriesFilter, nw.TimeSeriesFilter) - nw.TimeSeriesFilterRatio = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, des.TimeSeriesFilterRatio, nw.TimeSeriesFilterRatio) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, nw.TimeSeriesQueryLanguage) { - nw.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, nw.UnitOverride) { - nw.UnitOverride = des.UnitOverride - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuerySet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuerySlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(des.Aggregation, initial.Aggregation, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, des.Aggregation, nw.Aggregation) - nw.SecondaryAggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - - cDes.Numerator = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(des.Numerator, initial.Numerator, opts...) - cDes.Denominator = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(des.Denominator, initial.Denominator, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Numerator = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, des.Numerator, nw.Numerator) - nw.Denominator = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, des.Denominator, nw.Denominator) - nw.SecondaryAggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des, initial *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardGaugeView(des, initial *DashboardMosaicLayoutTilesWidgetScorecardGaugeView, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardGaugeView { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardGaugeView{} - - if dcl.IsZeroValue(des.LowerBound) || (dcl.IsEmptyValueIndirect(des.LowerBound) && dcl.IsEmptyValueIndirect(initial.LowerBound)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.LowerBound = initial.LowerBound - } else { - cDes.LowerBound = des.LowerBound - } - if dcl.IsZeroValue(des.UpperBound) || (dcl.IsEmptyValueIndirect(des.UpperBound) && dcl.IsEmptyValueIndirect(initial.UpperBound)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.UpperBound = initial.UpperBound - } else { - cDes.UpperBound = des.UpperBound - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardGaugeViewSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardGaugeView, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardGaugeView { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardGaugeView, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardGaugeView(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardGaugeView, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardGaugeView(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardGaugeView) *DashboardMosaicLayoutTilesWidgetScorecardGaugeView { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardGaugeView while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardGaugeViewSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardGaugeView) []DashboardMosaicLayoutTilesWidgetScorecardGaugeView { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardGaugeView - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardGaugeViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardGaugeViewSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardGaugeView) []DashboardMosaicLayoutTilesWidgetScorecardGaugeView { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardGaugeView - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(des, initial *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{} - - if dcl.IsZeroValue(des.SparkChartType) || (dcl.IsEmptyValueIndirect(des.SparkChartType) && dcl.IsEmptyValueIndirect(initial.SparkChartType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.SparkChartType = initial.SparkChartType - } else { - cDes.SparkChartType = des.SparkChartType - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, initial.MinAlignmentPeriod) || dcl.IsZeroValue(des.MinAlignmentPeriod) { - cDes.MinAlignmentPeriod = initial.MinAlignmentPeriod - } else { - cDes.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardSparkChartView, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardSparkChartView, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardSparkChartView while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.MinAlignmentPeriod, nw.MinAlignmentPeriod) { - nw.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardThresholds(des, initial *DashboardMosaicLayoutTilesWidgetScorecardThresholds, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetScorecardThresholds { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetScorecardThresholds{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Value) || (dcl.IsEmptyValueIndirect(des.Value) && dcl.IsEmptyValueIndirect(initial.Value)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Value = initial.Value - } else { - cDes.Value = des.Value - } - if dcl.IsZeroValue(des.Color) || (dcl.IsEmptyValueIndirect(des.Color) && dcl.IsEmptyValueIndirect(initial.Color)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Color = initial.Color - } else { - cDes.Color = des.Color - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice(des, initial []DashboardMosaicLayoutTilesWidgetScorecardThresholds, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetScorecardThresholds { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardThresholds, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardThresholds(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardThresholds, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetScorecardThresholds(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardThresholds(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetScorecardThresholds) *DashboardMosaicLayoutTilesWidgetScorecardThresholds { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetScorecardThresholds while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardThresholdsSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardThresholds) []DashboardMosaicLayoutTilesWidgetScorecardThresholds { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetScorecardThresholds - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetScorecardThresholdsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardThresholds(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetScorecardThresholds) []DashboardMosaicLayoutTilesWidgetScorecardThresholds { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetScorecardThresholds - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetScorecardThresholds(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetText(des, initial *DashboardMosaicLayoutTilesWidgetText, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetText { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetText{} - - if dcl.StringCanonicalize(des.Content, initial.Content) || dcl.IsZeroValue(des.Content) { - cDes.Content = initial.Content - } else { - cDes.Content = des.Content - } - if dcl.IsZeroValue(des.Format) || (dcl.IsEmptyValueIndirect(des.Format) && dcl.IsEmptyValueIndirect(initial.Format)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Format = initial.Format - } else { - cDes.Format = des.Format - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetTextSlice(des, initial []DashboardMosaicLayoutTilesWidgetText, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetText { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetText, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetText(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetText, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetText(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetText(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetText) *DashboardMosaicLayoutTilesWidgetText { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetText while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Content, nw.Content) { - nw.Content = des.Content - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetTextSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetText) []DashboardMosaicLayoutTilesWidgetText { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetText - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetTextNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetText(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetTextSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetText) []DashboardMosaicLayoutTilesWidgetText { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetText - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetText(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetBlank(des, initial *DashboardMosaicLayoutTilesWidgetBlank, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetBlank { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetBlank{} - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetBlankSlice(des, initial []DashboardMosaicLayoutTilesWidgetBlank, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetBlank { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetBlank, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetBlank(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetBlank, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetBlank(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetBlank(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetBlank) *DashboardMosaicLayoutTilesWidgetBlank { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetBlank while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetBlankSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetBlank) []DashboardMosaicLayoutTilesWidgetBlank { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetBlank - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetBlankNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetBlank(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetBlankSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetBlank) []DashboardMosaicLayoutTilesWidgetBlank { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetBlank - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetBlank(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetLogsPanel(des, initial *DashboardMosaicLayoutTilesWidgetLogsPanel, opts ...dcl.ApplyOption) *DashboardMosaicLayoutTilesWidgetLogsPanel { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardMosaicLayoutTilesWidgetLogsPanel{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - if dcl.StringArrayCanonicalize(des.ResourceNames, initial.ResourceNames) { - cDes.ResourceNames = initial.ResourceNames - } else { - cDes.ResourceNames = des.ResourceNames - } - - return cDes -} - -func canonicalizeDashboardMosaicLayoutTilesWidgetLogsPanelSlice(des, initial []DashboardMosaicLayoutTilesWidgetLogsPanel, opts ...dcl.ApplyOption) []DashboardMosaicLayoutTilesWidgetLogsPanel { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardMosaicLayoutTilesWidgetLogsPanel, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetLogsPanel(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardMosaicLayoutTilesWidgetLogsPanel, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardMosaicLayoutTilesWidgetLogsPanel(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetLogsPanel(c *Client, des, nw *DashboardMosaicLayoutTilesWidgetLogsPanel) *DashboardMosaicLayoutTilesWidgetLogsPanel { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardMosaicLayoutTilesWidgetLogsPanel while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - if dcl.StringArrayCanonicalize(des.ResourceNames, nw.ResourceNames) { - nw.ResourceNames = des.ResourceNames - } - - return nw -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetLogsPanelSet(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetLogsPanel) []DashboardMosaicLayoutTilesWidgetLogsPanel { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardMosaicLayoutTilesWidgetLogsPanel - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardMosaicLayoutTilesWidgetLogsPanelNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetLogsPanel(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardMosaicLayoutTilesWidgetLogsPanelSlice(c *Client, des, nw []DashboardMosaicLayoutTilesWidgetLogsPanel) []DashboardMosaicLayoutTilesWidgetLogsPanel { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardMosaicLayoutTilesWidgetLogsPanel - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardMosaicLayoutTilesWidgetLogsPanel(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayout(des, initial *DashboardRowLayout, opts ...dcl.ApplyOption) *DashboardRowLayout { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayout{} - - cDes.Rows = canonicalizeDashboardRowLayoutRowsSlice(des.Rows, initial.Rows, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutSlice(des, initial []DashboardRowLayout, opts ...dcl.ApplyOption) []DashboardRowLayout { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayout, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayout(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayout, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayout(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayout(c *Client, des, nw *DashboardRowLayout) *DashboardRowLayout { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayout while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Rows = canonicalizeNewDashboardRowLayoutRowsSlice(c, des.Rows, nw.Rows) - - return nw -} - -func canonicalizeNewDashboardRowLayoutSet(c *Client, des, nw []DashboardRowLayout) []DashboardRowLayout { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayout - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayout(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutSlice(c *Client, des, nw []DashboardRowLayout) []DashboardRowLayout { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayout - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayout(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRows(des, initial *DashboardRowLayoutRows, opts ...dcl.ApplyOption) *DashboardRowLayoutRows { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRows{} - - if dcl.IsZeroValue(des.Weight) || (dcl.IsEmptyValueIndirect(des.Weight) && dcl.IsEmptyValueIndirect(initial.Weight)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Weight = initial.Weight - } else { - cDes.Weight = des.Weight - } - cDes.Widgets = canonicalizeDashboardRowLayoutRowsWidgetsSlice(des.Widgets, initial.Widgets, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsSlice(des, initial []DashboardRowLayoutRows, opts ...dcl.ApplyOption) []DashboardRowLayoutRows { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRows, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRows(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRows, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRows(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRows(c *Client, des, nw *DashboardRowLayoutRows) *DashboardRowLayoutRows { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRows while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Widgets = canonicalizeNewDashboardRowLayoutRowsWidgetsSlice(c, des.Widgets, nw.Widgets) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsSet(c *Client, des, nw []DashboardRowLayoutRows) []DashboardRowLayoutRows { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRows - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRows(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsSlice(c *Client, des, nw []DashboardRowLayoutRows) []DashboardRowLayoutRows { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRows - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRows(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgets(des, initial *DashboardRowLayoutRowsWidgets, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.XyChart != nil || (initial != nil && initial.XyChart != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Scorecard, des.Text, des.Blank, des.LogsPanel) { - des.XyChart = nil - if initial != nil { - initial.XyChart = nil - } - } - } - - if des.Scorecard != nil || (initial != nil && initial.Scorecard != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Text, des.Blank, des.LogsPanel) { - des.Scorecard = nil - if initial != nil { - initial.Scorecard = nil - } - } - } - - if des.Text != nil || (initial != nil && initial.Text != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Blank, des.LogsPanel) { - des.Text = nil - if initial != nil { - initial.Text = nil - } - } - } - - if des.Blank != nil || (initial != nil && initial.Blank != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Text, des.LogsPanel) { - des.Blank = nil - if initial != nil { - initial.Blank = nil - } - } - } - - if des.LogsPanel != nil || (initial != nil && initial.LogsPanel != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Text, des.Blank) { - des.LogsPanel = nil - if initial != nil { - initial.LogsPanel = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgets{} - - if dcl.StringCanonicalize(des.Title, initial.Title) || dcl.IsZeroValue(des.Title) { - cDes.Title = initial.Title - } else { - cDes.Title = des.Title - } - cDes.XyChart = canonicalizeDashboardRowLayoutRowsWidgetsXyChart(des.XyChart, initial.XyChart, opts...) - cDes.Scorecard = canonicalizeDashboardRowLayoutRowsWidgetsScorecard(des.Scorecard, initial.Scorecard, opts...) - cDes.Text = canonicalizeDashboardRowLayoutRowsWidgetsText(des.Text, initial.Text, opts...) - cDes.Blank = canonicalizeDashboardRowLayoutRowsWidgetsBlank(des.Blank, initial.Blank, opts...) - cDes.LogsPanel = canonicalizeDashboardRowLayoutRowsWidgetsLogsPanel(des.LogsPanel, initial.LogsPanel, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsSlice(des, initial []DashboardRowLayoutRowsWidgets, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgets { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgets, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgets, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgets(c *Client, des, nw *DashboardRowLayoutRowsWidgets) *DashboardRowLayoutRowsWidgets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Title, nw.Title) { - nw.Title = des.Title - } - nw.XyChart = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChart(c, des.XyChart, nw.XyChart) - nw.Scorecard = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecard(c, des.Scorecard, nw.Scorecard) - nw.Text = canonicalizeNewDashboardRowLayoutRowsWidgetsText(c, des.Text, nw.Text) - nw.Blank = canonicalizeNewDashboardRowLayoutRowsWidgetsBlank(c, des.Blank, nw.Blank) - nw.LogsPanel = canonicalizeNewDashboardRowLayoutRowsWidgetsLogsPanel(c, des.LogsPanel, nw.LogsPanel) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsSet(c *Client, des, nw []DashboardRowLayoutRowsWidgets) []DashboardRowLayoutRowsWidgets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgets) []DashboardRowLayoutRowsWidgets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgets(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChart(des, initial *DashboardRowLayoutRowsWidgetsXyChart, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChart { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChart{} - - cDes.DataSets = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice(des.DataSets, initial.DataSets, opts...) - if dcl.StringCanonicalize(des.TimeshiftDuration, initial.TimeshiftDuration) || dcl.IsZeroValue(des.TimeshiftDuration) { - cDes.TimeshiftDuration = initial.TimeshiftDuration - } else { - cDes.TimeshiftDuration = des.TimeshiftDuration - } - cDes.Thresholds = canonicalizeDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice(des.Thresholds, initial.Thresholds, opts...) - cDes.XAxis = canonicalizeDashboardRowLayoutRowsWidgetsXyChartXAxis(des.XAxis, initial.XAxis, opts...) - cDes.YAxis = canonicalizeDashboardRowLayoutRowsWidgetsXyChartYAxis(des.YAxis, initial.YAxis, opts...) - cDes.ChartOptions = canonicalizeDashboardRowLayoutRowsWidgetsXyChartChartOptions(des.ChartOptions, initial.ChartOptions, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChart, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChart { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChart, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChart(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChart, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChart(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChart(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChart) *DashboardRowLayoutRowsWidgetsXyChart { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChart while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.DataSets = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice(c, des.DataSets, nw.DataSets) - if dcl.StringCanonicalize(des.TimeshiftDuration, nw.TimeshiftDuration) { - nw.TimeshiftDuration = des.TimeshiftDuration - } - nw.Thresholds = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice(c, des.Thresholds, nw.Thresholds) - nw.XAxis = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartXAxis(c, des.XAxis, nw.XAxis) - nw.YAxis = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartYAxis(c, des.YAxis, nw.YAxis) - nw.ChartOptions = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, des.ChartOptions, nw.ChartOptions) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChart) []DashboardRowLayoutRowsWidgetsXyChart { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChart - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChart(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChart) []DashboardRowLayoutRowsWidgetsXyChart { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChart - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChart(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSets(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSets, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSets{} - - cDes.TimeSeriesQuery = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(des.TimeSeriesQuery, initial.TimeSeriesQuery, opts...) - if dcl.IsZeroValue(des.PlotType) || (dcl.IsEmptyValueIndirect(des.PlotType) && dcl.IsEmptyValueIndirect(initial.PlotType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PlotType = initial.PlotType - } else { - cDes.PlotType = des.PlotType - } - if dcl.StringCanonicalize(des.LegendTemplate, initial.LegendTemplate) || dcl.IsZeroValue(des.LegendTemplate) { - cDes.LegendTemplate = initial.LegendTemplate - } else { - cDes.LegendTemplate = des.LegendTemplate - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, initial.MinAlignmentPeriod) || dcl.IsZeroValue(des.MinAlignmentPeriod) { - cDes.MinAlignmentPeriod = initial.MinAlignmentPeriod - } else { - cDes.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSets, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSets { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSets, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSets, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSets(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSets) *DashboardRowLayoutRowsWidgetsXyChartDataSets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesQuery = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, des.TimeSeriesQuery, nw.TimeSeriesQuery) - if dcl.StringCanonicalize(des.LegendTemplate, nw.LegendTemplate) { - nw.LegendTemplate = des.LegendTemplate - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, nw.MinAlignmentPeriod) { - nw.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSets) []DashboardRowLayoutRowsWidgetsXyChartDataSets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSets) []DashboardRowLayoutRowsWidgetsXyChartDataSets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSets(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{} - - cDes.TimeSeriesFilter = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(des.TimeSeriesFilter, initial.TimeSeriesFilter, opts...) - cDes.TimeSeriesFilterRatio = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(des.TimeSeriesFilterRatio, initial.TimeSeriesFilterRatio, opts...) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, initial.TimeSeriesQueryLanguage) || dcl.IsZeroValue(des.TimeSeriesQueryLanguage) { - cDes.TimeSeriesQueryLanguage = initial.TimeSeriesQueryLanguage - } else { - cDes.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, initial.UnitOverride) || dcl.IsZeroValue(des.UnitOverride) { - cDes.UnitOverride = initial.UnitOverride - } else { - cDes.UnitOverride = des.UnitOverride - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuerySlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesFilter = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, des.TimeSeriesFilter, nw.TimeSeriesFilter) - nw.TimeSeriesFilterRatio = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, des.TimeSeriesFilterRatio, nw.TimeSeriesFilterRatio) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, nw.TimeSeriesQueryLanguage) { - nw.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, nw.UnitOverride) { - nw.UnitOverride = des.UnitOverride - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuerySet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(des.Aggregation, initial.Aggregation, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, des.Aggregation, nw.Aggregation) - nw.SecondaryAggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - - cDes.Numerator = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(des.Numerator, initial.Numerator, opts...) - cDes.Denominator = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(des.Denominator, initial.Denominator, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Numerator = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, des.Numerator, nw.Numerator) - nw.Denominator = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, des.Denominator, nw.Denominator) - nw.SecondaryAggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des, initial *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartThresholds(des, initial *DashboardRowLayoutRowsWidgetsXyChartThresholds, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartThresholds { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartThresholds{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Value) || (dcl.IsEmptyValueIndirect(des.Value) && dcl.IsEmptyValueIndirect(initial.Value)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Value = initial.Value - } else { - cDes.Value = des.Value - } - if dcl.IsZeroValue(des.Color) || (dcl.IsEmptyValueIndirect(des.Color) && dcl.IsEmptyValueIndirect(initial.Color)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Color = initial.Color - } else { - cDes.Color = des.Color - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartThresholds, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartThresholds { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartThresholds, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartThresholds(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartThresholds, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartThresholds(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartThresholds(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartThresholds) *DashboardRowLayoutRowsWidgetsXyChartThresholds { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartThresholds while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartThresholdsSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartThresholds) []DashboardRowLayoutRowsWidgetsXyChartThresholds { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartThresholds - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartThresholdsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartThresholds(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartThresholds) []DashboardRowLayoutRowsWidgetsXyChartThresholds { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartThresholds - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartThresholds(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartXAxis(des, initial *DashboardRowLayoutRowsWidgetsXyChartXAxis, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartXAxis { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartXAxis{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartXAxisSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartXAxis, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartXAxis { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartXAxis, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartXAxis(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartXAxis, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartXAxis(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartXAxis(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartXAxis) *DashboardRowLayoutRowsWidgetsXyChartXAxis { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartXAxis while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartXAxisSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartXAxis) []DashboardRowLayoutRowsWidgetsXyChartXAxis { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartXAxis - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartXAxisNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartXAxis(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartXAxisSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartXAxis) []DashboardRowLayoutRowsWidgetsXyChartXAxis { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartXAxis - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartXAxis(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartYAxis(des, initial *DashboardRowLayoutRowsWidgetsXyChartYAxis, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartYAxis { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartYAxis{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartYAxisSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartYAxis, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartYAxis { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartYAxis, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartYAxis(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartYAxis, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartYAxis(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartYAxis(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartYAxis) *DashboardRowLayoutRowsWidgetsXyChartYAxis { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartYAxis while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartYAxisSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartYAxis) []DashboardRowLayoutRowsWidgetsXyChartYAxis { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartYAxis - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartYAxisNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartYAxis(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartYAxisSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartYAxis) []DashboardRowLayoutRowsWidgetsXyChartYAxis { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartYAxis - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartYAxis(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartChartOptions(des, initial *DashboardRowLayoutRowsWidgetsXyChartChartOptions, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsXyChartChartOptions { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsXyChartChartOptions{} - - if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Mode = initial.Mode - } else { - cDes.Mode = des.Mode - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsXyChartChartOptionsSlice(des, initial []DashboardRowLayoutRowsWidgetsXyChartChartOptions, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsXyChartChartOptions { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsXyChartChartOptions, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartChartOptions(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartChartOptions, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsXyChartChartOptions(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartChartOptions(c *Client, des, nw *DashboardRowLayoutRowsWidgetsXyChartChartOptions) *DashboardRowLayoutRowsWidgetsXyChartChartOptions { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsXyChartChartOptions while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartChartOptionsSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartChartOptions) []DashboardRowLayoutRowsWidgetsXyChartChartOptions { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsXyChartChartOptions - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsXyChartChartOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartChartOptionsSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsXyChartChartOptions) []DashboardRowLayoutRowsWidgetsXyChartChartOptions { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsXyChartChartOptions - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecard(des, initial *DashboardRowLayoutRowsWidgetsScorecard, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecard { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecard{} - - cDes.TimeSeriesQuery = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(des.TimeSeriesQuery, initial.TimeSeriesQuery, opts...) - cDes.GaugeView = canonicalizeDashboardRowLayoutRowsWidgetsScorecardGaugeView(des.GaugeView, initial.GaugeView, opts...) - cDes.SparkChartView = canonicalizeDashboardRowLayoutRowsWidgetsScorecardSparkChartView(des.SparkChartView, initial.SparkChartView, opts...) - cDes.Thresholds = canonicalizeDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice(des.Thresholds, initial.Thresholds, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecard, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecard { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecard, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecard(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecard, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecard(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecard(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecard) *DashboardRowLayoutRowsWidgetsScorecard { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecard while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesQuery = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, des.TimeSeriesQuery, nw.TimeSeriesQuery) - nw.GaugeView = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, des.GaugeView, nw.GaugeView) - nw.SparkChartView = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, des.SparkChartView, nw.SparkChartView) - nw.Thresholds = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice(c, des.Thresholds, nw.Thresholds) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecard) []DashboardRowLayoutRowsWidgetsScorecard { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecard - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecard(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecard) []DashboardRowLayoutRowsWidgetsScorecard { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecard - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecard(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{} - - cDes.TimeSeriesFilter = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(des.TimeSeriesFilter, initial.TimeSeriesFilter, opts...) - cDes.TimeSeriesFilterRatio = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(des.TimeSeriesFilterRatio, initial.TimeSeriesFilterRatio, opts...) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, initial.TimeSeriesQueryLanguage) || dcl.IsZeroValue(des.TimeSeriesQueryLanguage) { - cDes.TimeSeriesQueryLanguage = initial.TimeSeriesQueryLanguage - } else { - cDes.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, initial.UnitOverride) || dcl.IsZeroValue(des.UnitOverride) { - cDes.UnitOverride = initial.UnitOverride - } else { - cDes.UnitOverride = des.UnitOverride - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuerySlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesFilter = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, des.TimeSeriesFilter, nw.TimeSeriesFilter) - nw.TimeSeriesFilterRatio = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, des.TimeSeriesFilterRatio, nw.TimeSeriesFilterRatio) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, nw.TimeSeriesQueryLanguage) { - nw.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, nw.UnitOverride) { - nw.UnitOverride = des.UnitOverride - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuerySet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuerySlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(des.Aggregation, initial.Aggregation, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, des.Aggregation, nw.Aggregation) - nw.SecondaryAggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - - cDes.Numerator = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(des.Numerator, initial.Numerator, opts...) - cDes.Denominator = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(des.Denominator, initial.Denominator, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Numerator = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, des.Numerator, nw.Numerator) - nw.Denominator = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, des.Denominator, nw.Denominator) - nw.SecondaryAggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des, initial *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardGaugeView(des, initial *DashboardRowLayoutRowsWidgetsScorecardGaugeView, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardGaugeView { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardGaugeView{} - - if dcl.IsZeroValue(des.LowerBound) || (dcl.IsEmptyValueIndirect(des.LowerBound) && dcl.IsEmptyValueIndirect(initial.LowerBound)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.LowerBound = initial.LowerBound - } else { - cDes.LowerBound = des.LowerBound - } - if dcl.IsZeroValue(des.UpperBound) || (dcl.IsEmptyValueIndirect(des.UpperBound) && dcl.IsEmptyValueIndirect(initial.UpperBound)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.UpperBound = initial.UpperBound - } else { - cDes.UpperBound = des.UpperBound - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardGaugeViewSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardGaugeView, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardGaugeView { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardGaugeView, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardGaugeView(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardGaugeView, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardGaugeView(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardGaugeView(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardGaugeView) *DashboardRowLayoutRowsWidgetsScorecardGaugeView { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardGaugeView while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardGaugeViewSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardGaugeView) []DashboardRowLayoutRowsWidgetsScorecardGaugeView { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardGaugeView - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardGaugeViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardGaugeViewSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardGaugeView) []DashboardRowLayoutRowsWidgetsScorecardGaugeView { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardGaugeView - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardSparkChartView(des, initial *DashboardRowLayoutRowsWidgetsScorecardSparkChartView, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardSparkChartView { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardSparkChartView{} - - if dcl.IsZeroValue(des.SparkChartType) || (dcl.IsEmptyValueIndirect(des.SparkChartType) && dcl.IsEmptyValueIndirect(initial.SparkChartType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.SparkChartType = initial.SparkChartType - } else { - cDes.SparkChartType = des.SparkChartType - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, initial.MinAlignmentPeriod) || dcl.IsZeroValue(des.MinAlignmentPeriod) { - cDes.MinAlignmentPeriod = initial.MinAlignmentPeriod - } else { - cDes.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardSparkChartView, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardSparkChartView { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardSparkChartView, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardSparkChartView(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardSparkChartView, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardSparkChartView(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardSparkChartView) *DashboardRowLayoutRowsWidgetsScorecardSparkChartView { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardSparkChartView while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.MinAlignmentPeriod, nw.MinAlignmentPeriod) { - nw.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardSparkChartView) []DashboardRowLayoutRowsWidgetsScorecardSparkChartView { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardSparkChartView - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardSparkChartViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardSparkChartView) []DashboardRowLayoutRowsWidgetsScorecardSparkChartView { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardSparkChartView - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardThresholds(des, initial *DashboardRowLayoutRowsWidgetsScorecardThresholds, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsScorecardThresholds { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsScorecardThresholds{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Value) || (dcl.IsEmptyValueIndirect(des.Value) && dcl.IsEmptyValueIndirect(initial.Value)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Value = initial.Value - } else { - cDes.Value = des.Value - } - if dcl.IsZeroValue(des.Color) || (dcl.IsEmptyValueIndirect(des.Color) && dcl.IsEmptyValueIndirect(initial.Color)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Color = initial.Color - } else { - cDes.Color = des.Color - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice(des, initial []DashboardRowLayoutRowsWidgetsScorecardThresholds, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsScorecardThresholds { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsScorecardThresholds, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardThresholds(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardThresholds, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsScorecardThresholds(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardThresholds(c *Client, des, nw *DashboardRowLayoutRowsWidgetsScorecardThresholds) *DashboardRowLayoutRowsWidgetsScorecardThresholds { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsScorecardThresholds while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardThresholdsSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardThresholds) []DashboardRowLayoutRowsWidgetsScorecardThresholds { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsScorecardThresholds - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsScorecardThresholdsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardThresholds(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsScorecardThresholds) []DashboardRowLayoutRowsWidgetsScorecardThresholds { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsScorecardThresholds - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsScorecardThresholds(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsText(des, initial *DashboardRowLayoutRowsWidgetsText, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsText { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsText{} - - if dcl.StringCanonicalize(des.Content, initial.Content) || dcl.IsZeroValue(des.Content) { - cDes.Content = initial.Content - } else { - cDes.Content = des.Content - } - if dcl.IsZeroValue(des.Format) || (dcl.IsEmptyValueIndirect(des.Format) && dcl.IsEmptyValueIndirect(initial.Format)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Format = initial.Format - } else { - cDes.Format = des.Format - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsTextSlice(des, initial []DashboardRowLayoutRowsWidgetsText, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsText { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsText, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsText(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsText, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsText(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsText(c *Client, des, nw *DashboardRowLayoutRowsWidgetsText) *DashboardRowLayoutRowsWidgetsText { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsText while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Content, nw.Content) { - nw.Content = des.Content - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsTextSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsText) []DashboardRowLayoutRowsWidgetsText { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsText - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsTextNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsText(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsTextSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsText) []DashboardRowLayoutRowsWidgetsText { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsText - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsText(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsBlank(des, initial *DashboardRowLayoutRowsWidgetsBlank, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsBlank { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsBlank{} - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsBlankSlice(des, initial []DashboardRowLayoutRowsWidgetsBlank, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsBlank { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsBlank, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsBlank(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsBlank, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsBlank(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsBlank(c *Client, des, nw *DashboardRowLayoutRowsWidgetsBlank) *DashboardRowLayoutRowsWidgetsBlank { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsBlank while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsBlankSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsBlank) []DashboardRowLayoutRowsWidgetsBlank { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsBlank - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsBlankNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsBlank(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsBlankSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsBlank) []DashboardRowLayoutRowsWidgetsBlank { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsBlank - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsBlank(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardRowLayoutRowsWidgetsLogsPanel(des, initial *DashboardRowLayoutRowsWidgetsLogsPanel, opts ...dcl.ApplyOption) *DashboardRowLayoutRowsWidgetsLogsPanel { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardRowLayoutRowsWidgetsLogsPanel{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - if dcl.StringArrayCanonicalize(des.ResourceNames, initial.ResourceNames) { - cDes.ResourceNames = initial.ResourceNames - } else { - cDes.ResourceNames = des.ResourceNames - } - - return cDes -} - -func canonicalizeDashboardRowLayoutRowsWidgetsLogsPanelSlice(des, initial []DashboardRowLayoutRowsWidgetsLogsPanel, opts ...dcl.ApplyOption) []DashboardRowLayoutRowsWidgetsLogsPanel { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardRowLayoutRowsWidgetsLogsPanel, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsLogsPanel(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardRowLayoutRowsWidgetsLogsPanel, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardRowLayoutRowsWidgetsLogsPanel(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsLogsPanel(c *Client, des, nw *DashboardRowLayoutRowsWidgetsLogsPanel) *DashboardRowLayoutRowsWidgetsLogsPanel { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardRowLayoutRowsWidgetsLogsPanel while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - if dcl.StringArrayCanonicalize(des.ResourceNames, nw.ResourceNames) { - nw.ResourceNames = des.ResourceNames - } - - return nw -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsLogsPanelSet(c *Client, des, nw []DashboardRowLayoutRowsWidgetsLogsPanel) []DashboardRowLayoutRowsWidgetsLogsPanel { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardRowLayoutRowsWidgetsLogsPanel - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardRowLayoutRowsWidgetsLogsPanelNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsLogsPanel(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardRowLayoutRowsWidgetsLogsPanelSlice(c *Client, des, nw []DashboardRowLayoutRowsWidgetsLogsPanel) []DashboardRowLayoutRowsWidgetsLogsPanel { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardRowLayoutRowsWidgetsLogsPanel - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardRowLayoutRowsWidgetsLogsPanel(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayout(des, initial *DashboardColumnLayout, opts ...dcl.ApplyOption) *DashboardColumnLayout { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayout{} - - cDes.Columns = canonicalizeDashboardColumnLayoutColumnsSlice(des.Columns, initial.Columns, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutSlice(des, initial []DashboardColumnLayout, opts ...dcl.ApplyOption) []DashboardColumnLayout { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayout, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayout(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayout, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayout(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayout(c *Client, des, nw *DashboardColumnLayout) *DashboardColumnLayout { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayout while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Columns = canonicalizeNewDashboardColumnLayoutColumnsSlice(c, des.Columns, nw.Columns) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutSet(c *Client, des, nw []DashboardColumnLayout) []DashboardColumnLayout { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayout - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayout(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutSlice(c *Client, des, nw []DashboardColumnLayout) []DashboardColumnLayout { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayout - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayout(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumns(des, initial *DashboardColumnLayoutColumns, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumns { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumns{} - - if dcl.IsZeroValue(des.Weight) || (dcl.IsEmptyValueIndirect(des.Weight) && dcl.IsEmptyValueIndirect(initial.Weight)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Weight = initial.Weight - } else { - cDes.Weight = des.Weight - } - cDes.Widgets = canonicalizeDashboardColumnLayoutColumnsWidgetsSlice(des.Widgets, initial.Widgets, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsSlice(des, initial []DashboardColumnLayoutColumns, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumns { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumns, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumns(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumns, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumns(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumns(c *Client, des, nw *DashboardColumnLayoutColumns) *DashboardColumnLayoutColumns { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumns while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Widgets = canonicalizeNewDashboardColumnLayoutColumnsWidgetsSlice(c, des.Widgets, nw.Widgets) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsSet(c *Client, des, nw []DashboardColumnLayoutColumns) []DashboardColumnLayoutColumns { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumns - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumns(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsSlice(c *Client, des, nw []DashboardColumnLayoutColumns) []DashboardColumnLayoutColumns { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumns - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumns(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgets(des, initial *DashboardColumnLayoutColumnsWidgets, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.XyChart != nil || (initial != nil && initial.XyChart != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Scorecard, des.Text, des.Blank, des.LogsPanel) { - des.XyChart = nil - if initial != nil { - initial.XyChart = nil - } - } - } - - if des.Scorecard != nil || (initial != nil && initial.Scorecard != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Text, des.Blank, des.LogsPanel) { - des.Scorecard = nil - if initial != nil { - initial.Scorecard = nil - } - } - } - - if des.Text != nil || (initial != nil && initial.Text != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Blank, des.LogsPanel) { - des.Text = nil - if initial != nil { - initial.Text = nil - } - } - } - - if des.Blank != nil || (initial != nil && initial.Blank != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Text, des.LogsPanel) { - des.Blank = nil - if initial != nil { - initial.Blank = nil - } - } - } - - if des.LogsPanel != nil || (initial != nil && initial.LogsPanel != nil) { - // Check if anything else is set. - if dcl.AnySet(des.XyChart, des.Scorecard, des.Text, des.Blank) { - des.LogsPanel = nil - if initial != nil { - initial.LogsPanel = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgets{} - - if dcl.StringCanonicalize(des.Title, initial.Title) || dcl.IsZeroValue(des.Title) { - cDes.Title = initial.Title - } else { - cDes.Title = des.Title - } - cDes.XyChart = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChart(des.XyChart, initial.XyChart, opts...) - cDes.Scorecard = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecard(des.Scorecard, initial.Scorecard, opts...) - cDes.Text = canonicalizeDashboardColumnLayoutColumnsWidgetsText(des.Text, initial.Text, opts...) - cDes.Blank = canonicalizeDashboardColumnLayoutColumnsWidgetsBlank(des.Blank, initial.Blank, opts...) - cDes.LogsPanel = canonicalizeDashboardColumnLayoutColumnsWidgetsLogsPanel(des.LogsPanel, initial.LogsPanel, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsSlice(des, initial []DashboardColumnLayoutColumnsWidgets, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgets { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgets, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgets, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgets(c *Client, des, nw *DashboardColumnLayoutColumnsWidgets) *DashboardColumnLayoutColumnsWidgets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Title, nw.Title) { - nw.Title = des.Title - } - nw.XyChart = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChart(c, des.XyChart, nw.XyChart) - nw.Scorecard = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecard(c, des.Scorecard, nw.Scorecard) - nw.Text = canonicalizeNewDashboardColumnLayoutColumnsWidgetsText(c, des.Text, nw.Text) - nw.Blank = canonicalizeNewDashboardColumnLayoutColumnsWidgetsBlank(c, des.Blank, nw.Blank) - nw.LogsPanel = canonicalizeNewDashboardColumnLayoutColumnsWidgetsLogsPanel(c, des.LogsPanel, nw.LogsPanel) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgets) []DashboardColumnLayoutColumnsWidgets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgets) []DashboardColumnLayoutColumnsWidgets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgets(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChart(des, initial *DashboardColumnLayoutColumnsWidgetsXyChart, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChart { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChart{} - - cDes.DataSets = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice(des.DataSets, initial.DataSets, opts...) - if dcl.StringCanonicalize(des.TimeshiftDuration, initial.TimeshiftDuration) || dcl.IsZeroValue(des.TimeshiftDuration) { - cDes.TimeshiftDuration = initial.TimeshiftDuration - } else { - cDes.TimeshiftDuration = des.TimeshiftDuration - } - cDes.Thresholds = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice(des.Thresholds, initial.Thresholds, opts...) - cDes.XAxis = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartXAxis(des.XAxis, initial.XAxis, opts...) - cDes.YAxis = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartYAxis(des.YAxis, initial.YAxis, opts...) - cDes.ChartOptions = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(des.ChartOptions, initial.ChartOptions, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChart, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChart { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChart, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChart(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChart, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChart(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChart(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChart) *DashboardColumnLayoutColumnsWidgetsXyChart { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChart while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.DataSets = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice(c, des.DataSets, nw.DataSets) - if dcl.StringCanonicalize(des.TimeshiftDuration, nw.TimeshiftDuration) { - nw.TimeshiftDuration = des.TimeshiftDuration - } - nw.Thresholds = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice(c, des.Thresholds, nw.Thresholds) - nw.XAxis = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, des.XAxis, nw.XAxis) - nw.YAxis = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, des.YAxis, nw.YAxis) - nw.ChartOptions = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, des.ChartOptions, nw.ChartOptions) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChart) []DashboardColumnLayoutColumnsWidgetsXyChart { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChart - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChart(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChart) []DashboardColumnLayoutColumnsWidgetsXyChart { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChart - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChart(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSets(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSets, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSets { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSets{} - - cDes.TimeSeriesQuery = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(des.TimeSeriesQuery, initial.TimeSeriesQuery, opts...) - if dcl.IsZeroValue(des.PlotType) || (dcl.IsEmptyValueIndirect(des.PlotType) && dcl.IsEmptyValueIndirect(initial.PlotType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PlotType = initial.PlotType - } else { - cDes.PlotType = des.PlotType - } - if dcl.StringCanonicalize(des.LegendTemplate, initial.LegendTemplate) || dcl.IsZeroValue(des.LegendTemplate) { - cDes.LegendTemplate = initial.LegendTemplate - } else { - cDes.LegendTemplate = des.LegendTemplate - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, initial.MinAlignmentPeriod) || dcl.IsZeroValue(des.MinAlignmentPeriod) { - cDes.MinAlignmentPeriod = initial.MinAlignmentPeriod - } else { - cDes.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSets, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSets { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSets, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSets(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSets, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSets(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSets) *DashboardColumnLayoutColumnsWidgetsXyChartDataSets { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSets while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesQuery = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, des.TimeSeriesQuery, nw.TimeSeriesQuery) - if dcl.StringCanonicalize(des.LegendTemplate, nw.LegendTemplate) { - nw.LegendTemplate = des.LegendTemplate - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, nw.MinAlignmentPeriod) { - nw.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSets) []DashboardColumnLayoutColumnsWidgetsXyChartDataSets { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSets - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSets) []DashboardColumnLayoutColumnsWidgetsXyChartDataSets { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSets - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{} - - cDes.TimeSeriesFilter = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(des.TimeSeriesFilter, initial.TimeSeriesFilter, opts...) - cDes.TimeSeriesFilterRatio = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(des.TimeSeriesFilterRatio, initial.TimeSeriesFilterRatio, opts...) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, initial.TimeSeriesQueryLanguage) || dcl.IsZeroValue(des.TimeSeriesQueryLanguage) { - cDes.TimeSeriesQueryLanguage = initial.TimeSeriesQueryLanguage - } else { - cDes.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, initial.UnitOverride) || dcl.IsZeroValue(des.UnitOverride) { - cDes.UnitOverride = initial.UnitOverride - } else { - cDes.UnitOverride = des.UnitOverride - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuerySlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesFilter = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, des.TimeSeriesFilter, nw.TimeSeriesFilter) - nw.TimeSeriesFilterRatio = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, des.TimeSeriesFilterRatio, nw.TimeSeriesFilterRatio) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, nw.TimeSeriesQueryLanguage) { - nw.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, nw.UnitOverride) { - nw.UnitOverride = des.UnitOverride - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuerySet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(des.Aggregation, initial.Aggregation, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, des.Aggregation, nw.Aggregation) - nw.SecondaryAggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - - cDes.Numerator = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(des.Numerator, initial.Numerator, opts...) - cDes.Denominator = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(des.Denominator, initial.Denominator, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Numerator = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, des.Numerator, nw.Numerator) - nw.Denominator = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, des.Denominator, nw.Denominator) - nw.SecondaryAggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartThresholds(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartThresholds, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartThresholds { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartThresholds{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Value) || (dcl.IsEmptyValueIndirect(des.Value) && dcl.IsEmptyValueIndirect(initial.Value)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Value = initial.Value - } else { - cDes.Value = des.Value - } - if dcl.IsZeroValue(des.Color) || (dcl.IsEmptyValueIndirect(des.Color) && dcl.IsEmptyValueIndirect(initial.Color)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Color = initial.Color - } else { - cDes.Color = des.Color - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartThresholds, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartThresholds { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartThresholds, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartThresholds(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartThresholds, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartThresholds(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartThresholds) *DashboardColumnLayoutColumnsWidgetsXyChartThresholds { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartThresholds while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartThresholds) []DashboardColumnLayoutColumnsWidgetsXyChartThresholds { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartThresholds - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartThresholdsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartThresholds) []DashboardColumnLayoutColumnsWidgetsXyChartThresholds { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartThresholds - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartXAxis(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartXAxis, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartXAxis { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartXAxis{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartXAxisSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartXAxis, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartXAxis { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartXAxis, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartXAxis(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartXAxis, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartXAxis(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartXAxis) *DashboardColumnLayoutColumnsWidgetsXyChartXAxis { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartXAxis while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartXAxisSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartXAxis) []DashboardColumnLayoutColumnsWidgetsXyChartXAxis { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartXAxis - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartXAxisNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartXAxisSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartXAxis) []DashboardColumnLayoutColumnsWidgetsXyChartXAxis { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartXAxis - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartYAxis(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartYAxis, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartYAxis { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartYAxis{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Scale) || (dcl.IsEmptyValueIndirect(des.Scale) && dcl.IsEmptyValueIndirect(initial.Scale)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Scale = initial.Scale - } else { - cDes.Scale = des.Scale - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartYAxisSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartYAxis, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartYAxis { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartYAxis, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartYAxis(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartYAxis, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartYAxis(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartYAxis) *DashboardColumnLayoutColumnsWidgetsXyChartYAxis { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartYAxis while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartYAxisSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartYAxis) []DashboardColumnLayoutColumnsWidgetsXyChartYAxis { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartYAxis - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartYAxisNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartYAxisSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartYAxis) []DashboardColumnLayoutColumnsWidgetsXyChartYAxis { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartYAxis - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(des, initial *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{} - - if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Mode = initial.Mode - } else { - cDes.Mode = des.Mode - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsSlice(des, initial []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartChartOptions, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartChartOptions, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsXyChartChartOptions while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecard(des, initial *DashboardColumnLayoutColumnsWidgetsScorecard, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecard { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecard{} - - cDes.TimeSeriesQuery = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(des.TimeSeriesQuery, initial.TimeSeriesQuery, opts...) - cDes.GaugeView = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(des.GaugeView, initial.GaugeView, opts...) - cDes.SparkChartView = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(des.SparkChartView, initial.SparkChartView, opts...) - cDes.Thresholds = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice(des.Thresholds, initial.Thresholds, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecard, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecard { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecard, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecard(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecard, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecard(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecard(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecard) *DashboardColumnLayoutColumnsWidgetsScorecard { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecard while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesQuery = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, des.TimeSeriesQuery, nw.TimeSeriesQuery) - nw.GaugeView = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, des.GaugeView, nw.GaugeView) - nw.SparkChartView = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, des.SparkChartView, nw.SparkChartView) - nw.Thresholds = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice(c, des.Thresholds, nw.Thresholds) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecard) []DashboardColumnLayoutColumnsWidgetsScorecard { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecard - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecard(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecard) []DashboardColumnLayoutColumnsWidgetsScorecard { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecard - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecard(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{} - - cDes.TimeSeriesFilter = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(des.TimeSeriesFilter, initial.TimeSeriesFilter, opts...) - cDes.TimeSeriesFilterRatio = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(des.TimeSeriesFilterRatio, initial.TimeSeriesFilterRatio, opts...) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, initial.TimeSeriesQueryLanguage) || dcl.IsZeroValue(des.TimeSeriesQueryLanguage) { - cDes.TimeSeriesQueryLanguage = initial.TimeSeriesQueryLanguage - } else { - cDes.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, initial.UnitOverride) || dcl.IsZeroValue(des.UnitOverride) { - cDes.UnitOverride = initial.UnitOverride - } else { - cDes.UnitOverride = des.UnitOverride - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuerySlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.TimeSeriesFilter = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, des.TimeSeriesFilter, nw.TimeSeriesFilter) - nw.TimeSeriesFilterRatio = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, des.TimeSeriesFilterRatio, nw.TimeSeriesFilterRatio) - if dcl.StringCanonicalize(des.TimeSeriesQueryLanguage, nw.TimeSeriesQueryLanguage) { - nw.TimeSeriesQueryLanguage = des.TimeSeriesQueryLanguage - } - if dcl.StringCanonicalize(des.UnitOverride, nw.UnitOverride) { - nw.UnitOverride = des.UnitOverride - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuerySet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuerySlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(des.Aggregation, initial.Aggregation, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, des.Aggregation, nw.Aggregation) - nw.SecondaryAggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - - cDes.Numerator = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(des.Numerator, initial.Numerator, opts...) - cDes.Denominator = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(des.Denominator, initial.Denominator, opts...) - cDes.SecondaryAggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des.SecondaryAggregation, initial.SecondaryAggregation, opts...) - cDes.PickTimeSeriesFilter = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des.PickTimeSeriesFilter, initial.PickTimeSeriesFilter, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Numerator = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, des.Numerator, nw.Numerator) - nw.Denominator = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, des.Denominator, nw.Denominator) - nw.SecondaryAggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, des.SecondaryAggregation, nw.SecondaryAggregation) - nw.PickTimeSeriesFilter = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, des.PickTimeSeriesFilter, nw.PickTimeSeriesFilter) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - cDes.Aggregation = canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des.Aggregation, initial.Aggregation, opts...) - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - nw.Aggregation = canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, des.Aggregation, nw.Aggregation) - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.StringCanonicalize(des.AlignmentPeriod, initial.AlignmentPeriod) || dcl.IsZeroValue(des.AlignmentPeriod) { - cDes.AlignmentPeriod = initial.AlignmentPeriod - } else { - cDes.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.IsZeroValue(des.PerSeriesAligner) || (dcl.IsEmptyValueIndirect(des.PerSeriesAligner) && dcl.IsEmptyValueIndirect(initial.PerSeriesAligner)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.PerSeriesAligner = initial.PerSeriesAligner - } else { - cDes.PerSeriesAligner = des.PerSeriesAligner - } - if dcl.IsZeroValue(des.CrossSeriesReducer) || (dcl.IsEmptyValueIndirect(des.CrossSeriesReducer) && dcl.IsEmptyValueIndirect(initial.CrossSeriesReducer)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.CrossSeriesReducer = initial.CrossSeriesReducer - } else { - cDes.CrossSeriesReducer = des.CrossSeriesReducer - } - if dcl.StringArrayCanonicalize(des.GroupByFields, initial.GroupByFields) { - cDes.GroupByFields = initial.GroupByFields - } else { - cDes.GroupByFields = des.GroupByFields - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.AlignmentPeriod, nw.AlignmentPeriod) { - nw.AlignmentPeriod = des.AlignmentPeriod - } - if dcl.StringArrayCanonicalize(des.GroupByFields, nw.GroupByFields) { - nw.GroupByFields = des.GroupByFields - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsZeroValue(des.RankingMethod) || (dcl.IsEmptyValueIndirect(des.RankingMethod) && dcl.IsEmptyValueIndirect(initial.RankingMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RankingMethod = initial.RankingMethod - } else { - cDes.RankingMethod = des.RankingMethod - } - if dcl.IsZeroValue(des.NumTimeSeries) || (dcl.IsEmptyValueIndirect(des.NumTimeSeries) && dcl.IsEmptyValueIndirect(initial.NumTimeSeries)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.NumTimeSeries = initial.NumTimeSeries - } else { - cDes.NumTimeSeries = des.NumTimeSeries - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{} - - if dcl.IsZeroValue(des.LowerBound) || (dcl.IsEmptyValueIndirect(des.LowerBound) && dcl.IsEmptyValueIndirect(initial.LowerBound)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.LowerBound = initial.LowerBound - } else { - cDes.LowerBound = des.LowerBound - } - if dcl.IsZeroValue(des.UpperBound) || (dcl.IsEmptyValueIndirect(des.UpperBound) && dcl.IsEmptyValueIndirect(initial.UpperBound)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.UpperBound = initial.UpperBound - } else { - cDes.UpperBound = des.UpperBound - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardGaugeView, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardGaugeView, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardGaugeView while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{} - - if dcl.IsZeroValue(des.SparkChartType) || (dcl.IsEmptyValueIndirect(des.SparkChartType) && dcl.IsEmptyValueIndirect(initial.SparkChartType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.SparkChartType = initial.SparkChartType - } else { - cDes.SparkChartType = des.SparkChartType - } - if dcl.StringCanonicalize(des.MinAlignmentPeriod, initial.MinAlignmentPeriod) || dcl.IsZeroValue(des.MinAlignmentPeriod) { - cDes.MinAlignmentPeriod = initial.MinAlignmentPeriod - } else { - cDes.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.MinAlignmentPeriod, nw.MinAlignmentPeriod) { - nw.MinAlignmentPeriod = des.MinAlignmentPeriod - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardThresholds(des, initial *DashboardColumnLayoutColumnsWidgetsScorecardThresholds, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsScorecardThresholds { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsScorecardThresholds{} - - if dcl.StringCanonicalize(des.Label, initial.Label) || dcl.IsZeroValue(des.Label) { - cDes.Label = initial.Label - } else { - cDes.Label = des.Label - } - if dcl.IsZeroValue(des.Value) || (dcl.IsEmptyValueIndirect(des.Value) && dcl.IsEmptyValueIndirect(initial.Value)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Value = initial.Value - } else { - cDes.Value = des.Value - } - if dcl.IsZeroValue(des.Color) || (dcl.IsEmptyValueIndirect(des.Color) && dcl.IsEmptyValueIndirect(initial.Color)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Color = initial.Color - } else { - cDes.Color = des.Color - } - if dcl.IsZeroValue(des.Direction) || (dcl.IsEmptyValueIndirect(des.Direction) && dcl.IsEmptyValueIndirect(initial.Direction)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Direction = initial.Direction - } else { - cDes.Direction = des.Direction - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice(des, initial []DashboardColumnLayoutColumnsWidgetsScorecardThresholds, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsScorecardThresholds { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardThresholds, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardThresholds(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardThresholds, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsScorecardThresholds(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsScorecardThresholds) *DashboardColumnLayoutColumnsWidgetsScorecardThresholds { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsScorecardThresholds while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Label, nw.Label) { - nw.Label = des.Label - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardThresholds) []DashboardColumnLayoutColumnsWidgetsScorecardThresholds { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsScorecardThresholds - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsScorecardThresholdsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsScorecardThresholds) []DashboardColumnLayoutColumnsWidgetsScorecardThresholds { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsScorecardThresholds - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsText(des, initial *DashboardColumnLayoutColumnsWidgetsText, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsText { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsText{} - - if dcl.StringCanonicalize(des.Content, initial.Content) || dcl.IsZeroValue(des.Content) { - cDes.Content = initial.Content - } else { - cDes.Content = des.Content - } - if dcl.IsZeroValue(des.Format) || (dcl.IsEmptyValueIndirect(des.Format) && dcl.IsEmptyValueIndirect(initial.Format)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Format = initial.Format - } else { - cDes.Format = des.Format - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsTextSlice(des, initial []DashboardColumnLayoutColumnsWidgetsText, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsText { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsText, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsText(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsText, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsText(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsText(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsText) *DashboardColumnLayoutColumnsWidgetsText { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsText while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Content, nw.Content) { - nw.Content = des.Content - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsTextSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsText) []DashboardColumnLayoutColumnsWidgetsText { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsText - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsTextNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsText(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsTextSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsText) []DashboardColumnLayoutColumnsWidgetsText { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsText - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsText(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsBlank(des, initial *DashboardColumnLayoutColumnsWidgetsBlank, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsBlank { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsBlank{} - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsBlankSlice(des, initial []DashboardColumnLayoutColumnsWidgetsBlank, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsBlank { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsBlank, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsBlank(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsBlank, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsBlank(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsBlank(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsBlank) *DashboardColumnLayoutColumnsWidgetsBlank { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsBlank while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsBlankSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsBlank) []DashboardColumnLayoutColumnsWidgetsBlank { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsBlank - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsBlankNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsBlank(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsBlankSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsBlank) []DashboardColumnLayoutColumnsWidgetsBlank { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsBlank - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsBlank(c, &d, &n)) - } - - return items -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsLogsPanel(des, initial *DashboardColumnLayoutColumnsWidgetsLogsPanel, opts ...dcl.ApplyOption) *DashboardColumnLayoutColumnsWidgetsLogsPanel { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &DashboardColumnLayoutColumnsWidgetsLogsPanel{} - - if dcl.StringCanonicalize(des.Filter, initial.Filter) || dcl.IsZeroValue(des.Filter) { - cDes.Filter = initial.Filter - } else { - cDes.Filter = des.Filter - } - if dcl.StringArrayCanonicalize(des.ResourceNames, initial.ResourceNames) { - cDes.ResourceNames = initial.ResourceNames - } else { - cDes.ResourceNames = des.ResourceNames - } - - return cDes -} - -func canonicalizeDashboardColumnLayoutColumnsWidgetsLogsPanelSlice(des, initial []DashboardColumnLayoutColumnsWidgetsLogsPanel, opts ...dcl.ApplyOption) []DashboardColumnLayoutColumnsWidgetsLogsPanel { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]DashboardColumnLayoutColumnsWidgetsLogsPanel, 0, len(des)) - for _, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsLogsPanel(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]DashboardColumnLayoutColumnsWidgetsLogsPanel, 0, len(des)) - for i, d := range des { - cd := canonicalizeDashboardColumnLayoutColumnsWidgetsLogsPanel(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsLogsPanel(c *Client, des, nw *DashboardColumnLayoutColumnsWidgetsLogsPanel) *DashboardColumnLayoutColumnsWidgetsLogsPanel { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for DashboardColumnLayoutColumnsWidgetsLogsPanel while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Filter, nw.Filter) { - nw.Filter = des.Filter - } - if dcl.StringArrayCanonicalize(des.ResourceNames, nw.ResourceNames) { - nw.ResourceNames = des.ResourceNames - } - - return nw -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsLogsPanelSet(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsLogsPanel) []DashboardColumnLayoutColumnsWidgetsLogsPanel { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []DashboardColumnLayoutColumnsWidgetsLogsPanel - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareDashboardColumnLayoutColumnsWidgetsLogsPanelNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsLogsPanel(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewDashboardColumnLayoutColumnsWidgetsLogsPanelSlice(c *Client, des, nw []DashboardColumnLayoutColumnsWidgetsLogsPanel) []DashboardColumnLayoutColumnsWidgetsLogsPanel { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []DashboardColumnLayoutColumnsWidgetsLogsPanel - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewDashboardColumnLayoutColumnsWidgetsLogsPanel(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffDashboard(c *Client, desired, actual *Dashboard, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.GridLayout, actual.GridLayout, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutNewStyle, EmptyObject: EmptyDashboardGridLayout, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GridLayout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.MosaicLayout, actual.MosaicLayout, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutNewStyle, EmptyObject: EmptyDashboardMosaicLayout, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MosaicLayout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.RowLayout, actual.RowLayout, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutNewStyle, EmptyObject: EmptyDashboardRowLayout, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RowLayout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ColumnLayout, actual.ColumnLayout, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutNewStyle, EmptyObject: EmptyDashboardColumnLayout, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ColumnLayout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareDashboardGridLayoutNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayout) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayout) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayout or *DashboardGridLayout", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayout) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayout) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayout", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Columns, actual.Columns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Columns")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Widgets, actual.Widgets, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgets, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Widgets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgets) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgets or *DashboardGridLayoutWidgets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgets) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Title, actual.Title, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Title")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.XyChart, actual.XyChart, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChart, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XyChart")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scorecard, actual.Scorecard, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecard, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scorecard")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Text, actual.Text, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsTextNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsText, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Text")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Blank, actual.Blank, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsBlankNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsBlank, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Blank")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LogsPanel, actual.LogsPanel, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsLogsPanelNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsLogsPanel, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LogsPanel")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChart) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChart) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChart or *DashboardGridLayoutWidgetsXyChart", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChart) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChart) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChart", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DataSets, actual.DataSets, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSets, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("DataSets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeshiftDuration, actual.TimeshiftDuration, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeshiftDuration")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Thresholds, actual.Thresholds, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartThresholdsNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartThresholds, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Thresholds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.XAxis, actual.XAxis, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartXAxisNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartXAxis, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XAxis")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.YAxis, actual.YAxis, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartYAxisNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartYAxis, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("YAxis")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ChartOptions, actual.ChartOptions, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartChartOptionsNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartChartOptions, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ChartOptions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSets) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSets or *DashboardGridLayoutWidgetsXyChartDataSets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSets) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesQuery, actual.TimeSeriesQuery, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQuery")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PlotType, actual.PlotType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PlotType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LegendTemplate, actual.LegendTemplate, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("LegendTemplate")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinAlignmentPeriod, actual.MinAlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MinAlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilter, actual.TimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilterRatio, actual.TimeSeriesFilterRatio, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilterRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesQueryLanguage, actual.TimeSeriesQueryLanguage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQueryLanguage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UnitOverride, actual.UnitOverride, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UnitOverride")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Numerator, actual.Numerator, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Numerator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Denominator, actual.Denominator, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Denominator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter or *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartThresholdsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartThresholds) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartThresholds or *DashboardGridLayoutWidgetsXyChartThresholds", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartThresholds) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartThresholds", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Value")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Color, actual.Color, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Color")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartXAxisNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartXAxis) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartXAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartXAxis or *DashboardGridLayoutWidgetsXyChartXAxis", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartXAxis) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartXAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartXAxis", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartYAxisNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartYAxis) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartYAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartYAxis or *DashboardGridLayoutWidgetsXyChartYAxis", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartYAxis) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartYAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartYAxis", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsXyChartChartOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsXyChartChartOptions) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsXyChartChartOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartChartOptions or *DashboardGridLayoutWidgetsXyChartChartOptions", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsXyChartChartOptions) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsXyChartChartOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsXyChartChartOptions", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecard) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecard) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecard or *DashboardGridLayoutWidgetsScorecard", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecard) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecard) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecard", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesQuery, actual.TimeSeriesQuery, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQuery, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQuery")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GaugeView, actual.GaugeView, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardGaugeViewNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardGaugeView, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GaugeView")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SparkChartView, actual.SparkChartView, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardSparkChartViewNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardSparkChartView, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SparkChartView")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Thresholds, actual.Thresholds, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardThresholdsNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardThresholds, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Thresholds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQuery or *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQuery", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilter, actual.TimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilterRatio, actual.TimeSeriesFilterRatio, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilterRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesQueryLanguage, actual.TimeSeriesQueryLanguage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQueryLanguage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UnitOverride, actual.UnitOverride, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UnitOverride")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Numerator, actual.Numerator, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Numerator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Denominator, actual.Denominator, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Denominator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle, EmptyObject: EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter or *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardGaugeViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardGaugeView) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardGaugeView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardGaugeView or *DashboardGridLayoutWidgetsScorecardGaugeView", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardGaugeView) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardGaugeView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardGaugeView", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.LowerBound, actual.LowerBound, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("LowerBound")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpperBound, actual.UpperBound, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UpperBound")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardSparkChartViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardSparkChartView) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardSparkChartView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardSparkChartView or *DashboardGridLayoutWidgetsScorecardSparkChartView", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardSparkChartView) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardSparkChartView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardSparkChartView", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.SparkChartType, actual.SparkChartType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SparkChartType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinAlignmentPeriod, actual.MinAlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MinAlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsScorecardThresholdsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsScorecardThresholds) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsScorecardThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardThresholds or *DashboardGridLayoutWidgetsScorecardThresholds", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsScorecardThresholds) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsScorecardThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsScorecardThresholds", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Value")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Color, actual.Color, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Color")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsTextNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsText) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsText) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsText or *DashboardGridLayoutWidgetsText", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsText) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsText) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsText", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Content, actual.Content, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Content")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Format, actual.Format, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Format")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsBlankNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareDashboardGridLayoutWidgetsLogsPanelNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardGridLayoutWidgetsLogsPanel) - if !ok { - desiredNotPointer, ok := d.(DashboardGridLayoutWidgetsLogsPanel) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsLogsPanel or *DashboardGridLayoutWidgetsLogsPanel", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardGridLayoutWidgetsLogsPanel) - if !ok { - actualNotPointer, ok := a.(DashboardGridLayoutWidgetsLogsPanel) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardGridLayoutWidgetsLogsPanel", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceNames, actual.ResourceNames, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ResourceNames")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayout) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayout) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayout or *DashboardMosaicLayout", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayout) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayout) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayout", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Columns, actual.Columns, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Columns")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Tiles, actual.Tiles, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTiles, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Tiles")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTiles) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTiles) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTiles or *DashboardMosaicLayoutTiles", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTiles) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTiles) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTiles", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.XPos, actual.XPos, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XPos")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.YPos, actual.YPos, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("YPos")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Width, actual.Width, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Width")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Height, actual.Height, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Height")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Widget, actual.Widget, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidget, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Widget")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidget) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidget) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidget or *DashboardMosaicLayoutTilesWidget", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidget) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidget) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidget", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Title, actual.Title, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Title")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.XyChart, actual.XyChart, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChart, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XyChart")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scorecard, actual.Scorecard, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecard, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scorecard")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Text, actual.Text, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetTextNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetText, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Text")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Blank, actual.Blank, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetBlankNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetBlank, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Blank")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LogsPanel, actual.LogsPanel, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetLogsPanelNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetLogsPanel, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LogsPanel")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChart) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChart) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChart or *DashboardMosaicLayoutTilesWidgetXyChart", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChart) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChart) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChart", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DataSets, actual.DataSets, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSets, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("DataSets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeshiftDuration, actual.TimeshiftDuration, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeshiftDuration")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Thresholds, actual.Thresholds, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartThresholdsNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartThresholds, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Thresholds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.XAxis, actual.XAxis, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartXAxisNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartXAxis, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XAxis")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.YAxis, actual.YAxis, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartYAxisNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartYAxis, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("YAxis")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ChartOptions, actual.ChartOptions, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartChartOptionsNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartChartOptions, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ChartOptions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSets) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSets or *DashboardMosaicLayoutTilesWidgetXyChartDataSets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSets) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesQuery, actual.TimeSeriesQuery, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQuery")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PlotType, actual.PlotType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PlotType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LegendTemplate, actual.LegendTemplate, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("LegendTemplate")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinAlignmentPeriod, actual.MinAlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MinAlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilter, actual.TimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilterRatio, actual.TimeSeriesFilterRatio, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilterRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesQueryLanguage, actual.TimeSeriesQueryLanguage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQueryLanguage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UnitOverride, actual.UnitOverride, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UnitOverride")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Numerator, actual.Numerator, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Numerator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Denominator, actual.Denominator, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Denominator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter or *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartThresholdsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartThresholds) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartThresholds or *DashboardMosaicLayoutTilesWidgetXyChartThresholds", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartThresholds) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartThresholds", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Value")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Color, actual.Color, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Color")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartXAxisNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartXAxis) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartXAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartXAxis or *DashboardMosaicLayoutTilesWidgetXyChartXAxis", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartXAxis) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartXAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartXAxis", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartYAxisNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartYAxis) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartYAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartYAxis or *DashboardMosaicLayoutTilesWidgetXyChartYAxis", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartYAxis) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartYAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartYAxis", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetXyChartChartOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetXyChartChartOptions) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetXyChartChartOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartChartOptions or *DashboardMosaicLayoutTilesWidgetXyChartChartOptions", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetXyChartChartOptions) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetXyChartChartOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetXyChartChartOptions", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecard) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecard) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecard or *DashboardMosaicLayoutTilesWidgetScorecard", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecard) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecard) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecard", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesQuery, actual.TimeSeriesQuery, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQuery")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GaugeView, actual.GaugeView, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardGaugeViewNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardGaugeView, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GaugeView")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SparkChartView, actual.SparkChartView, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardSparkChartView, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SparkChartView")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Thresholds, actual.Thresholds, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardThresholdsNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardThresholds, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Thresholds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilter, actual.TimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilterRatio, actual.TimeSeriesFilterRatio, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilterRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesQueryLanguage, actual.TimeSeriesQueryLanguage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQueryLanguage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UnitOverride, actual.UnitOverride, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UnitOverride")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Numerator, actual.Numerator, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Numerator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Denominator, actual.Denominator, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Denominator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle, EmptyObject: EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter or *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardGaugeViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardGaugeView) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardGaugeView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardGaugeView or *DashboardMosaicLayoutTilesWidgetScorecardGaugeView", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardGaugeView) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardGaugeView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardGaugeView", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.LowerBound, actual.LowerBound, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("LowerBound")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpperBound, actual.UpperBound, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UpperBound")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardSparkChartView or *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardSparkChartView", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.SparkChartType, actual.SparkChartType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SparkChartType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinAlignmentPeriod, actual.MinAlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MinAlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetScorecardThresholdsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetScorecardThresholds) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetScorecardThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardThresholds or *DashboardMosaicLayoutTilesWidgetScorecardThresholds", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetScorecardThresholds) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetScorecardThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetScorecardThresholds", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Value")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Color, actual.Color, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Color")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetTextNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetText) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetText) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetText or *DashboardMosaicLayoutTilesWidgetText", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetText) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetText) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetText", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Content, actual.Content, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Content")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Format, actual.Format, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Format")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetBlankNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareDashboardMosaicLayoutTilesWidgetLogsPanelNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardMosaicLayoutTilesWidgetLogsPanel) - if !ok { - desiredNotPointer, ok := d.(DashboardMosaicLayoutTilesWidgetLogsPanel) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetLogsPanel or *DashboardMosaicLayoutTilesWidgetLogsPanel", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardMosaicLayoutTilesWidgetLogsPanel) - if !ok { - actualNotPointer, ok := a.(DashboardMosaicLayoutTilesWidgetLogsPanel) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardMosaicLayoutTilesWidgetLogsPanel", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceNames, actual.ResourceNames, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ResourceNames")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayout) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayout) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayout or *DashboardRowLayout", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayout) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayout) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayout", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Rows, actual.Rows, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsNewStyle, EmptyObject: EmptyDashboardRowLayoutRows, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Rows")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRows) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRows) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRows or *DashboardRowLayoutRows", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRows) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRows) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRows", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Weight, actual.Weight, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Weight")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Widgets, actual.Widgets, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgets, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Widgets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgets) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgets or *DashboardRowLayoutRowsWidgets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgets) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Title, actual.Title, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Title")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.XyChart, actual.XyChart, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChart, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XyChart")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scorecard, actual.Scorecard, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecard, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scorecard")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Text, actual.Text, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsTextNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsText, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Text")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Blank, actual.Blank, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsBlankNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsBlank, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Blank")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LogsPanel, actual.LogsPanel, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsLogsPanelNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsLogsPanel, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LogsPanel")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChart) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChart) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChart or *DashboardRowLayoutRowsWidgetsXyChart", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChart) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChart) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChart", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DataSets, actual.DataSets, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSets, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("DataSets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeshiftDuration, actual.TimeshiftDuration, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeshiftDuration")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Thresholds, actual.Thresholds, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartThresholdsNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartThresholds, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Thresholds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.XAxis, actual.XAxis, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartXAxisNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartXAxis, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XAxis")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.YAxis, actual.YAxis, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartYAxisNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartYAxis, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("YAxis")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ChartOptions, actual.ChartOptions, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartChartOptionsNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartChartOptions, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ChartOptions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSets) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSets or *DashboardRowLayoutRowsWidgetsXyChartDataSets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSets) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesQuery, actual.TimeSeriesQuery, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQuery")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PlotType, actual.PlotType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PlotType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LegendTemplate, actual.LegendTemplate, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("LegendTemplate")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinAlignmentPeriod, actual.MinAlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MinAlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilter, actual.TimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilterRatio, actual.TimeSeriesFilterRatio, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilterRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesQueryLanguage, actual.TimeSeriesQueryLanguage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQueryLanguage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UnitOverride, actual.UnitOverride, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UnitOverride")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Numerator, actual.Numerator, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Numerator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Denominator, actual.Denominator, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Denominator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter or *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartThresholdsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartThresholds) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartThresholds or *DashboardRowLayoutRowsWidgetsXyChartThresholds", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartThresholds) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartThresholds", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Value")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Color, actual.Color, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Color")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartXAxisNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartXAxis) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartXAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartXAxis or *DashboardRowLayoutRowsWidgetsXyChartXAxis", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartXAxis) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartXAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartXAxis", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartYAxisNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartYAxis) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartYAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartYAxis or *DashboardRowLayoutRowsWidgetsXyChartYAxis", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartYAxis) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartYAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartYAxis", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsXyChartChartOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsXyChartChartOptions) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsXyChartChartOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartChartOptions or *DashboardRowLayoutRowsWidgetsXyChartChartOptions", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsXyChartChartOptions) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsXyChartChartOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsXyChartChartOptions", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecard) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecard) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecard or *DashboardRowLayoutRowsWidgetsScorecard", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecard) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecard) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecard", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesQuery, actual.TimeSeriesQuery, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQuery")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GaugeView, actual.GaugeView, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardGaugeViewNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardGaugeView, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GaugeView")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SparkChartView, actual.SparkChartView, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardSparkChartViewNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardSparkChartView, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SparkChartView")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Thresholds, actual.Thresholds, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardThresholdsNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardThresholds, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Thresholds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilter, actual.TimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilterRatio, actual.TimeSeriesFilterRatio, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilterRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesQueryLanguage, actual.TimeSeriesQueryLanguage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQueryLanguage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UnitOverride, actual.UnitOverride, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UnitOverride")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Numerator, actual.Numerator, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Numerator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Denominator, actual.Denominator, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Denominator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle, EmptyObject: EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter or *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardGaugeViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardGaugeView) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardGaugeView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardGaugeView or *DashboardRowLayoutRowsWidgetsScorecardGaugeView", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardGaugeView) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardGaugeView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardGaugeView", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.LowerBound, actual.LowerBound, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("LowerBound")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpperBound, actual.UpperBound, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UpperBound")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardSparkChartViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardSparkChartView) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardSparkChartView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardSparkChartView or *DashboardRowLayoutRowsWidgetsScorecardSparkChartView", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardSparkChartView) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardSparkChartView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardSparkChartView", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.SparkChartType, actual.SparkChartType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SparkChartType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinAlignmentPeriod, actual.MinAlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MinAlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsScorecardThresholdsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsScorecardThresholds) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsScorecardThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardThresholds or *DashboardRowLayoutRowsWidgetsScorecardThresholds", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsScorecardThresholds) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsScorecardThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsScorecardThresholds", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Value")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Color, actual.Color, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Color")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsTextNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsText) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsText) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsText or *DashboardRowLayoutRowsWidgetsText", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsText) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsText) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsText", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Content, actual.Content, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Content")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Format, actual.Format, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Format")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsBlankNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareDashboardRowLayoutRowsWidgetsLogsPanelNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardRowLayoutRowsWidgetsLogsPanel) - if !ok { - desiredNotPointer, ok := d.(DashboardRowLayoutRowsWidgetsLogsPanel) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsLogsPanel or *DashboardRowLayoutRowsWidgetsLogsPanel", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardRowLayoutRowsWidgetsLogsPanel) - if !ok { - actualNotPointer, ok := a.(DashboardRowLayoutRowsWidgetsLogsPanel) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardRowLayoutRowsWidgetsLogsPanel", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceNames, actual.ResourceNames, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ResourceNames")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayout) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayout) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayout or *DashboardColumnLayout", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayout) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayout) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayout", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Columns, actual.Columns, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumns, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Columns")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumns) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumns) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumns or *DashboardColumnLayoutColumns", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumns) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumns) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumns", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Weight, actual.Weight, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Weight")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Widgets, actual.Widgets, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgets, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Widgets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgets) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgets or *DashboardColumnLayoutColumnsWidgets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgets) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Title, actual.Title, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Title")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.XyChart, actual.XyChart, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChart, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XyChart")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scorecard, actual.Scorecard, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecard, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scorecard")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Text, actual.Text, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsTextNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsText, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Text")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Blank, actual.Blank, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsBlankNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsBlank, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Blank")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LogsPanel, actual.LogsPanel, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsLogsPanelNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsLogsPanel, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LogsPanel")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChart) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChart) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChart or *DashboardColumnLayoutColumnsWidgetsXyChart", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChart) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChart) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChart", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DataSets, actual.DataSets, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSets, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("DataSets")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeshiftDuration, actual.TimeshiftDuration, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeshiftDuration")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Thresholds, actual.Thresholds, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartThresholdsNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartThresholds, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Thresholds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.XAxis, actual.XAxis, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartXAxisNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartXAxis, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("XAxis")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.YAxis, actual.YAxis, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartYAxisNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartYAxis, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("YAxis")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ChartOptions, actual.ChartOptions, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartChartOptions, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ChartOptions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSets) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSets or *DashboardColumnLayoutColumnsWidgetsXyChartDataSets", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSets) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSets) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSets", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesQuery, actual.TimeSeriesQuery, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQuery")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PlotType, actual.PlotType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PlotType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LegendTemplate, actual.LegendTemplate, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("LegendTemplate")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinAlignmentPeriod, actual.MinAlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MinAlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilter, actual.TimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilterRatio, actual.TimeSeriesFilterRatio, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilterRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesQueryLanguage, actual.TimeSeriesQueryLanguage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQueryLanguage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UnitOverride, actual.UnitOverride, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UnitOverride")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Numerator, actual.Numerator, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Numerator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Denominator, actual.Denominator, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Denominator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter or *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartThresholdsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartThresholds) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartThresholds or *DashboardColumnLayoutColumnsWidgetsXyChartThresholds", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartThresholds) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartThresholds", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Value")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Color, actual.Color, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Color")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartXAxisNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartXAxis) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartXAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartXAxis or *DashboardColumnLayoutColumnsWidgetsXyChartXAxis", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartXAxis) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartXAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartXAxis", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartYAxisNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartYAxis) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartYAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartYAxis or *DashboardColumnLayoutColumnsWidgetsXyChartYAxis", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartYAxis) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartYAxis) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartYAxis", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Scale, actual.Scale, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Scale")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartChartOptions or *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsXyChartChartOptions", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecard) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecard) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecard or *DashboardColumnLayoutColumnsWidgetsScorecard", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecard) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecard) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecard", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesQuery, actual.TimeSeriesQuery, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQuery")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GaugeView, actual.GaugeView, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardGaugeView, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GaugeView")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SparkChartView, actual.SparkChartView, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SparkChartView")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Thresholds, actual.Thresholds, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardThresholdsNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardThresholds, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Thresholds")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilter, actual.TimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesFilterRatio, actual.TimeSeriesFilterRatio, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesFilterRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TimeSeriesQueryLanguage, actual.TimeSeriesQueryLanguage, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("TimeSeriesQueryLanguage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UnitOverride, actual.UnitOverride, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UnitOverride")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Numerator, actual.Numerator, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Numerator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Denominator, actual.Denominator, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Denominator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SecondaryAggregation, actual.SecondaryAggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SecondaryAggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PickTimeSeriesFilter, actual.PickTimeSeriesFilter, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PickTimeSeriesFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Aggregation, actual.Aggregation, dcl.DiffInfo{ObjectFunction: compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle, EmptyObject: EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Aggregation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.AlignmentPeriod, actual.AlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("AlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PerSeriesAligner, actual.PerSeriesAligner, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("PerSeriesAligner")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CrossSeriesReducer, actual.CrossSeriesReducer, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("CrossSeriesReducer")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GroupByFields, actual.GroupByFields, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("GroupByFields")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter or *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RankingMethod, actual.RankingMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("RankingMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.NumTimeSeries, actual.NumTimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("NumTimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardGaugeView or *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardGaugeView", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.LowerBound, actual.LowerBound, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("LowerBound")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpperBound, actual.UpperBound, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("UpperBound")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView or *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.SparkChartType, actual.SparkChartType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("SparkChartType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinAlignmentPeriod, actual.MinAlignmentPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("MinAlignmentPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsScorecardThresholdsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsScorecardThresholds) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsScorecardThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardThresholds or *DashboardColumnLayoutColumnsWidgetsScorecardThresholds", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsScorecardThresholds) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsScorecardThresholds) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsScorecardThresholds", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Label, actual.Label, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Label")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Value, actual.Value, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Value")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Color, actual.Color, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Color")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Direction, actual.Direction, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Direction")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsTextNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsText) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsText) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsText or *DashboardColumnLayoutColumnsWidgetsText", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsText) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsText) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsText", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Content, actual.Content, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Content")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Format, actual.Format, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Format")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsBlankNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareDashboardColumnLayoutColumnsWidgetsLogsPanelNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*DashboardColumnLayoutColumnsWidgetsLogsPanel) - if !ok { - desiredNotPointer, ok := d.(DashboardColumnLayoutColumnsWidgetsLogsPanel) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsLogsPanel or *DashboardColumnLayoutColumnsWidgetsLogsPanel", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*DashboardColumnLayoutColumnsWidgetsLogsPanel) - if !ok { - actualNotPointer, ok := a.(DashboardColumnLayoutColumnsWidgetsLogsPanel) - if !ok { - return nil, fmt.Errorf("obj %v is not a DashboardColumnLayoutColumnsWidgetsLogsPanel", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceNames, actual.ResourceNames, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateDashboardUpdateDashboardOperation")}, fn.AddNest("ResourceNames")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *Dashboard) urlNormalized() *Dashboard { - normalized := dcl.Copy(*r).(Dashboard) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Etag = dcl.SelfLinkToName(r.Etag) - return &normalized -} - -func (r *Dashboard) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateDashboard" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/dashboards/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the Dashboard resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *Dashboard) marshal(c *Client) ([]byte, error) { - m, err := expandDashboard(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling Dashboard: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalDashboard decodes JSON responses into the Dashboard resource schema. -func unmarshalDashboard(b []byte, c *Client, res *Dashboard) (*Dashboard, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapDashboard(m, c, res) -} - -func unmarshalMapDashboard(m map[string]interface{}, c *Client, res *Dashboard) (*Dashboard, error) { - - flattened := flattenDashboard(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandDashboard expands Dashboard into a JSON request object. -func expandDashboard(c *Client, f *Dashboard) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.DisplayName; dcl.ValueShouldBeSent(v) { - m["displayName"] = v - } - if v, err := expandDashboardGridLayout(c, f.GridLayout, res); err != nil { - return nil, fmt.Errorf("error expanding GridLayout into gridLayout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gridLayout"] = v - } - if v, err := expandDashboardMosaicLayout(c, f.MosaicLayout, res); err != nil { - return nil, fmt.Errorf("error expanding MosaicLayout into mosaicLayout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["mosaicLayout"] = v - } - if v, err := expandDashboardRowLayout(c, f.RowLayout, res); err != nil { - return nil, fmt.Errorf("error expanding RowLayout into rowLayout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["rowLayout"] = v - } - if v, err := expandDashboardColumnLayout(c, f.ColumnLayout, res); err != nil { - return nil, fmt.Errorf("error expanding ColumnLayout into columnLayout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["columnLayout"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - - return m, nil -} - -// flattenDashboard flattens Dashboard from a JSON request object into the -// Dashboard type. -func flattenDashboard(c *Client, i interface{}, res *Dashboard) *Dashboard { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &Dashboard{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.DisplayName = dcl.FlattenString(m["displayName"]) - resultRes.GridLayout = flattenDashboardGridLayout(c, m["gridLayout"], res) - resultRes.MosaicLayout = flattenDashboardMosaicLayout(c, m["mosaicLayout"], res) - resultRes.RowLayout = flattenDashboardRowLayout(c, m["rowLayout"], res) - resultRes.ColumnLayout = flattenDashboardColumnLayout(c, m["columnLayout"], res) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.Etag = dcl.FlattenString(m["etag"]) - - return resultRes -} - -// expandDashboardGridLayoutMap expands the contents of DashboardGridLayout into a JSON -// request object. -func expandDashboardGridLayoutMap(c *Client, f map[string]DashboardGridLayout, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayout(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutSlice expands the contents of DashboardGridLayout into a JSON -// request object. -func expandDashboardGridLayoutSlice(c *Client, f []DashboardGridLayout, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayout(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutMap flattens the contents of DashboardGridLayout from a JSON -// response object. -func flattenDashboardGridLayoutMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayout { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayout{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayout{} - } - - items := make(map[string]DashboardGridLayout) - for k, item := range a { - items[k] = *flattenDashboardGridLayout(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutSlice flattens the contents of DashboardGridLayout from a JSON -// response object. -func flattenDashboardGridLayoutSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayout { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayout{} - } - - if len(a) == 0 { - return []DashboardGridLayout{} - } - - items := make([]DashboardGridLayout, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayout(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayout expands an instance of DashboardGridLayout into a JSON -// request object. -func expandDashboardGridLayout(c *Client, f *DashboardGridLayout, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Columns; !dcl.IsEmptyValueIndirect(v) { - m["columns"] = v - } - if v, err := expandDashboardGridLayoutWidgetsSlice(c, f.Widgets, res); err != nil { - return nil, fmt.Errorf("error expanding Widgets into widgets: %w", err) - } else if v != nil { - m["widgets"] = v - } - - return m, nil -} - -// flattenDashboardGridLayout flattens an instance of DashboardGridLayout from a JSON -// response object. -func flattenDashboardGridLayout(c *Client, i interface{}, res *Dashboard) *DashboardGridLayout { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayout{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayout - } - r.Columns = dcl.FlattenInteger(m["columns"]) - r.Widgets = flattenDashboardGridLayoutWidgetsSlice(c, m["widgets"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsMap expands the contents of DashboardGridLayoutWidgets into a JSON -// request object. -func expandDashboardGridLayoutWidgetsMap(c *Client, f map[string]DashboardGridLayoutWidgets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsSlice expands the contents of DashboardGridLayoutWidgets into a JSON -// request object. -func expandDashboardGridLayoutWidgetsSlice(c *Client, f []DashboardGridLayoutWidgets, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsMap flattens the contents of DashboardGridLayoutWidgets from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgets{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgets{} - } - - items := make(map[string]DashboardGridLayoutWidgets) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsSlice flattens the contents of DashboardGridLayoutWidgets from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgets { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgets{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgets{} - } - - items := make([]DashboardGridLayoutWidgets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgets expands an instance of DashboardGridLayoutWidgets into a JSON -// request object. -func expandDashboardGridLayoutWidgets(c *Client, f *DashboardGridLayoutWidgets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Title; !dcl.IsEmptyValueIndirect(v) { - m["title"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChart(c, f.XyChart, res); err != nil { - return nil, fmt.Errorf("error expanding XyChart into xyChart: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["xyChart"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecard(c, f.Scorecard, res); err != nil { - return nil, fmt.Errorf("error expanding Scorecard into scorecard: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["scorecard"] = v - } - if v, err := expandDashboardGridLayoutWidgetsText(c, f.Text, res); err != nil { - return nil, fmt.Errorf("error expanding Text into text: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["text"] = v - } - if v, err := expandDashboardGridLayoutWidgetsBlank(c, f.Blank, res); err != nil { - return nil, fmt.Errorf("error expanding Blank into blank: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["blank"] = v - } - if v, err := expandDashboardGridLayoutWidgetsLogsPanel(c, f.LogsPanel, res); err != nil { - return nil, fmt.Errorf("error expanding LogsPanel into logsPanel: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["logsPanel"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgets flattens an instance of DashboardGridLayoutWidgets from a JSON -// response object. -func flattenDashboardGridLayoutWidgets(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgets - } - r.Title = dcl.FlattenString(m["title"]) - r.XyChart = flattenDashboardGridLayoutWidgetsXyChart(c, m["xyChart"], res) - r.Scorecard = flattenDashboardGridLayoutWidgetsScorecard(c, m["scorecard"], res) - r.Text = flattenDashboardGridLayoutWidgetsText(c, m["text"], res) - r.Blank = flattenDashboardGridLayoutWidgetsBlank(c, m["blank"], res) - r.LogsPanel = flattenDashboardGridLayoutWidgetsLogsPanel(c, m["logsPanel"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartMap expands the contents of DashboardGridLayoutWidgetsXyChart into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChart, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChart(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartSlice expands the contents of DashboardGridLayoutWidgetsXyChart into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartSlice(c *Client, f []DashboardGridLayoutWidgetsXyChart, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChart(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartMap flattens the contents of DashboardGridLayoutWidgetsXyChart from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChart { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChart{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChart{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChart) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChart(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartSlice flattens the contents of DashboardGridLayoutWidgetsXyChart from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChart { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChart{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChart{} - } - - items := make([]DashboardGridLayoutWidgetsXyChart, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChart(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChart expands an instance of DashboardGridLayoutWidgetsXyChart into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChart(c *Client, f *DashboardGridLayoutWidgetsXyChart, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsSlice(c, f.DataSets, res); err != nil { - return nil, fmt.Errorf("error expanding DataSets into dataSets: %w", err) - } else if v != nil { - m["dataSets"] = v - } - if v := f.TimeshiftDuration; !dcl.IsEmptyValueIndirect(v) { - m["timeshiftDuration"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartThresholdsSlice(c, f.Thresholds, res); err != nil { - return nil, fmt.Errorf("error expanding Thresholds into thresholds: %w", err) - } else if v != nil { - m["thresholds"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartXAxis(c, f.XAxis, res); err != nil { - return nil, fmt.Errorf("error expanding XAxis into xAxis: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["xAxis"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartYAxis(c, f.YAxis, res); err != nil { - return nil, fmt.Errorf("error expanding YAxis into yAxis: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["yAxis"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartChartOptions(c, f.ChartOptions, res); err != nil { - return nil, fmt.Errorf("error expanding ChartOptions into chartOptions: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["chartOptions"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChart flattens an instance of DashboardGridLayoutWidgetsXyChart from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChart(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChart { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChart{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChart - } - r.DataSets = flattenDashboardGridLayoutWidgetsXyChartDataSetsSlice(c, m["dataSets"], res) - r.TimeshiftDuration = dcl.FlattenString(m["timeshiftDuration"]) - r.Thresholds = flattenDashboardGridLayoutWidgetsXyChartThresholdsSlice(c, m["thresholds"], res) - r.XAxis = flattenDashboardGridLayoutWidgetsXyChartXAxis(c, m["xAxis"], res) - r.YAxis = flattenDashboardGridLayoutWidgetsXyChartYAxis(c, m["yAxis"], res) - r.ChartOptions = flattenDashboardGridLayoutWidgetsXyChartChartOptions(c, m["chartOptions"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSets, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSets{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSets{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSets) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSets { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSets{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSets{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSets expands an instance of DashboardGridLayoutWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSets(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, f.TimeSeriesQuery, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesQuery into timeSeriesQuery: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQuery"] = v - } - if v := f.PlotType; !dcl.IsEmptyValueIndirect(v) { - m["plotType"] = v - } - if v := f.LegendTemplate; !dcl.IsEmptyValueIndirect(v) { - m["legendTemplate"] = v - } - if v := f.MinAlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["minAlignmentPeriod"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSets flattens an instance of DashboardGridLayoutWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSets(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSets - } - r.TimeSeriesQuery = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, m["timeSeriesQuery"], res) - r.PlotType = flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum(m["plotType"]) - r.LegendTemplate = dcl.FlattenString(m["legendTemplate"]) - r.MinAlignmentPeriod = dcl.FlattenString(m["minAlignmentPeriod"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuerySlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuerySlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, f.TimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilter into timeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilter"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, f.TimeSeriesFilterRatio, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilterRatio into timeSeriesFilterRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilterRatio"] = v - } - if v := f.TimeSeriesQueryLanguage; !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQueryLanguage"] = v - } - if v := f.UnitOverride; !dcl.IsEmptyValueIndirect(v) { - m["unitOverride"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery - } - r.TimeSeriesFilter = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, m["timeSeriesFilter"], res) - r.TimeSeriesFilterRatio = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, m["timeSeriesFilterRatio"], res) - r.TimeSeriesQueryLanguage = dcl.FlattenString(m["timeSeriesQueryLanguage"]) - r.UnitOverride = dcl.FlattenString(m["unitOverride"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, m["aggregation"], res) - r.SecondaryAggregation = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, f.Numerator, res); err != nil { - return nil, fmt.Errorf("error expanding Numerator into numerator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["numerator"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, f.Denominator, res); err != nil { - return nil, fmt.Errorf("error expanding Denominator into denominator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["denominator"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - } - r.Numerator = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, m["numerator"], res) - r.Denominator = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, m["denominator"], res) - r.SecondaryAggregation = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice expands the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter expands an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, f *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter flattens an instance of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartThresholdsMap expands the contents of DashboardGridLayoutWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartThresholdsMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartThresholds(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartThresholdsSlice expands the contents of DashboardGridLayoutWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartThresholdsSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartThresholds, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartThresholds(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholdsMap flattens the contents of DashboardGridLayoutWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartThresholdsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartThresholds { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartThresholds{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartThresholds{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartThresholds) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartThresholds(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholdsSlice flattens the contents of DashboardGridLayoutWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartThresholdsSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartThresholds { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartThresholds{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartThresholds{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartThresholds, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartThresholds(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartThresholds expands an instance of DashboardGridLayoutWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartThresholds(c *Client, f *DashboardGridLayoutWidgetsXyChartThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Value; !dcl.IsEmptyValueIndirect(v) { - m["value"] = v - } - if v := f.Color; !dcl.IsEmptyValueIndirect(v) { - m["color"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholds flattens an instance of DashboardGridLayoutWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartThresholds(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartThresholds { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartThresholds{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartThresholds - } - r.Label = dcl.FlattenString(m["label"]) - r.Value = dcl.FlattenDouble(m["value"]) - r.Color = flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnum(m["color"]) - r.Direction = flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartXAxisMap expands the contents of DashboardGridLayoutWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartXAxisMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartXAxis, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartXAxis(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartXAxisSlice expands the contents of DashboardGridLayoutWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartXAxisSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartXAxis, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartXAxis(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartXAxisMap flattens the contents of DashboardGridLayoutWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartXAxisMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartXAxis { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartXAxis{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartXAxis{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartXAxis) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartXAxis(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartXAxisSlice flattens the contents of DashboardGridLayoutWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartXAxisSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartXAxis { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartXAxis{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartXAxis{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartXAxis, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartXAxis(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartXAxis expands an instance of DashboardGridLayoutWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartXAxis(c *Client, f *DashboardGridLayoutWidgetsXyChartXAxis, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartXAxis flattens an instance of DashboardGridLayoutWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartXAxis(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartXAxis { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartXAxis{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartXAxis - } - r.Label = dcl.FlattenString(m["label"]) - r.Scale = flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnum(m["scale"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartYAxisMap expands the contents of DashboardGridLayoutWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartYAxisMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartYAxis, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartYAxis(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartYAxisSlice expands the contents of DashboardGridLayoutWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartYAxisSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartYAxis, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartYAxis(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartYAxisMap flattens the contents of DashboardGridLayoutWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartYAxisMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartYAxis { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartYAxis{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartYAxis{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartYAxis) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartYAxis(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartYAxisSlice flattens the contents of DashboardGridLayoutWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartYAxisSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartYAxis { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartYAxis{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartYAxis{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartYAxis, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartYAxis(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartYAxis expands an instance of DashboardGridLayoutWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartYAxis(c *Client, f *DashboardGridLayoutWidgetsXyChartYAxis, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartYAxis flattens an instance of DashboardGridLayoutWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartYAxis(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartYAxis { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartYAxis{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartYAxis - } - r.Label = dcl.FlattenString(m["label"]) - r.Scale = flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnum(m["scale"]) - - return r -} - -// expandDashboardGridLayoutWidgetsXyChartChartOptionsMap expands the contents of DashboardGridLayoutWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartChartOptionsMap(c *Client, f map[string]DashboardGridLayoutWidgetsXyChartChartOptions, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartChartOptions(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsXyChartChartOptionsSlice expands the contents of DashboardGridLayoutWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartChartOptionsSlice(c *Client, f []DashboardGridLayoutWidgetsXyChartChartOptions, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsXyChartChartOptions(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartChartOptionsMap flattens the contents of DashboardGridLayoutWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartChartOptionsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartChartOptions { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartChartOptions{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartChartOptions{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartChartOptions) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartChartOptions(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartChartOptionsSlice flattens the contents of DashboardGridLayoutWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartChartOptionsSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartChartOptions { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartChartOptions{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartChartOptions{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartChartOptions, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartChartOptions(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsXyChartChartOptions expands an instance of DashboardGridLayoutWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardGridLayoutWidgetsXyChartChartOptions(c *Client, f *DashboardGridLayoutWidgetsXyChartChartOptions, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { - m["mode"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsXyChartChartOptions flattens an instance of DashboardGridLayoutWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartChartOptions(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsXyChartChartOptions { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsXyChartChartOptions{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsXyChartChartOptions - } - r.Mode = flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnum(m["mode"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardMap expands the contents of DashboardGridLayoutWidgetsScorecard into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecard, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecard(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardSlice expands the contents of DashboardGridLayoutWidgetsScorecard into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardSlice(c *Client, f []DashboardGridLayoutWidgetsScorecard, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecard(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardMap flattens the contents of DashboardGridLayoutWidgetsScorecard from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecard { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecard{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecard{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecard) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecard(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardSlice flattens the contents of DashboardGridLayoutWidgetsScorecard from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecard { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecard{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecard{} - } - - items := make([]DashboardGridLayoutWidgetsScorecard, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecard(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecard expands an instance of DashboardGridLayoutWidgetsScorecard into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecard(c *Client, f *DashboardGridLayoutWidgetsScorecard, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, f.TimeSeriesQuery, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesQuery into timeSeriesQuery: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQuery"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardGaugeView(c, f.GaugeView, res); err != nil { - return nil, fmt.Errorf("error expanding GaugeView into gaugeView: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gaugeView"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardSparkChartView(c, f.SparkChartView, res); err != nil { - return nil, fmt.Errorf("error expanding SparkChartView into sparkChartView: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["sparkChartView"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardThresholdsSlice(c, f.Thresholds, res); err != nil { - return nil, fmt.Errorf("error expanding Thresholds into thresholds: %w", err) - } else if v != nil { - m["thresholds"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecard flattens an instance of DashboardGridLayoutWidgetsScorecard from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecard(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecard { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecard{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecard - } - r.TimeSeriesQuery = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, m["timeSeriesQuery"], res) - r.GaugeView = flattenDashboardGridLayoutWidgetsScorecardGaugeView(c, m["gaugeView"], res) - r.SparkChartView = flattenDashboardGridLayoutWidgetsScorecardSparkChartView(c, m["sparkChartView"], res) - r.Thresholds = flattenDashboardGridLayoutWidgetsScorecardThresholdsSlice(c, m["thresholds"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQuerySlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQuerySlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQuery { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQuerySlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQuerySlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQuery, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQuery expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, f.TimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilter into timeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilter"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, f.TimeSeriesFilterRatio, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilterRatio into timeSeriesFilterRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilterRatio"] = v - } - if v := f.TimeSeriesQueryLanguage; !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQueryLanguage"] = v - } - if v := f.UnitOverride; !dcl.IsEmptyValueIndirect(v) { - m["unitOverride"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQuery flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQuery(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQuery - } - r.TimeSeriesFilter = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, m["timeSeriesFilter"], res) - r.TimeSeriesFilterRatio = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, m["timeSeriesFilterRatio"], res) - r.TimeSeriesQueryLanguage = dcl.FlattenString(m["timeSeriesQueryLanguage"]) - r.UnitOverride = dcl.FlattenString(m["unitOverride"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, m["aggregation"], res) - r.SecondaryAggregation = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, f.Numerator, res); err != nil { - return nil, fmt.Errorf("error expanding Numerator into numerator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["numerator"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, f.Denominator, res); err != nil { - return nil, fmt.Errorf("error expanding Denominator into denominator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["denominator"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - } - r.Numerator = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, m["numerator"], res) - r.Denominator = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, m["denominator"], res) - r.SecondaryAggregation = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice expands the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter expands an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, f *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter flattens an instance of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardGaugeViewMap expands the contents of DashboardGridLayoutWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardGaugeViewMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardGaugeView, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardGaugeView(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardGaugeViewSlice expands the contents of DashboardGridLayoutWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardGaugeViewSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardGaugeView, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardGaugeView(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardGaugeViewMap flattens the contents of DashboardGridLayoutWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardGaugeViewMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardGaugeView { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardGaugeView{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardGaugeView{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardGaugeView) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardGaugeView(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardGaugeViewSlice flattens the contents of DashboardGridLayoutWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardGaugeViewSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardGaugeView { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardGaugeView{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardGaugeView{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardGaugeView, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardGaugeView(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardGaugeView expands an instance of DashboardGridLayoutWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardGaugeView(c *Client, f *DashboardGridLayoutWidgetsScorecardGaugeView, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.LowerBound; !dcl.IsEmptyValueIndirect(v) { - m["lowerBound"] = v - } - if v := f.UpperBound; !dcl.IsEmptyValueIndirect(v) { - m["upperBound"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardGaugeView flattens an instance of DashboardGridLayoutWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardGaugeView(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardGaugeView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardGaugeView{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardGaugeView - } - r.LowerBound = dcl.FlattenDouble(m["lowerBound"]) - r.UpperBound = dcl.FlattenDouble(m["upperBound"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardSparkChartViewMap expands the contents of DashboardGridLayoutWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardSparkChartViewMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardSparkChartView, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardSparkChartView(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardSparkChartViewSlice expands the contents of DashboardGridLayoutWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardSparkChartViewSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardSparkChartView, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardSparkChartView(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardSparkChartViewMap flattens the contents of DashboardGridLayoutWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardSparkChartViewMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardSparkChartView { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardSparkChartView{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardSparkChartView{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardSparkChartView) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardSparkChartView(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSlice flattens the contents of DashboardGridLayoutWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardSparkChartView { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardSparkChartView{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardSparkChartView{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardSparkChartView, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardSparkChartView(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardSparkChartView expands an instance of DashboardGridLayoutWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardSparkChartView(c *Client, f *DashboardGridLayoutWidgetsScorecardSparkChartView, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.SparkChartType; !dcl.IsEmptyValueIndirect(v) { - m["sparkChartType"] = v - } - if v := f.MinAlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["minAlignmentPeriod"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardSparkChartView flattens an instance of DashboardGridLayoutWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardSparkChartView(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardSparkChartView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardSparkChartView{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardSparkChartView - } - r.SparkChartType = flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum(m["sparkChartType"]) - r.MinAlignmentPeriod = dcl.FlattenString(m["minAlignmentPeriod"]) - - return r -} - -// expandDashboardGridLayoutWidgetsScorecardThresholdsMap expands the contents of DashboardGridLayoutWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardThresholdsMap(c *Client, f map[string]DashboardGridLayoutWidgetsScorecardThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardThresholds(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsScorecardThresholdsSlice expands the contents of DashboardGridLayoutWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardThresholdsSlice(c *Client, f []DashboardGridLayoutWidgetsScorecardThresholds, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsScorecardThresholds(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholdsMap flattens the contents of DashboardGridLayoutWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardThresholdsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardThresholds { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardThresholds{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardThresholds{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardThresholds) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardThresholds(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholdsSlice flattens the contents of DashboardGridLayoutWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardThresholdsSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardThresholds { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardThresholds{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardThresholds{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardThresholds, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardThresholds(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsScorecardThresholds expands an instance of DashboardGridLayoutWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardGridLayoutWidgetsScorecardThresholds(c *Client, f *DashboardGridLayoutWidgetsScorecardThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Value; !dcl.IsEmptyValueIndirect(v) { - m["value"] = v - } - if v := f.Color; !dcl.IsEmptyValueIndirect(v) { - m["color"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholds flattens an instance of DashboardGridLayoutWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardThresholds(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsScorecardThresholds { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsScorecardThresholds{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsScorecardThresholds - } - r.Label = dcl.FlattenString(m["label"]) - r.Value = dcl.FlattenDouble(m["value"]) - r.Color = flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnum(m["color"]) - r.Direction = flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardGridLayoutWidgetsTextMap expands the contents of DashboardGridLayoutWidgetsText into a JSON -// request object. -func expandDashboardGridLayoutWidgetsTextMap(c *Client, f map[string]DashboardGridLayoutWidgetsText, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsText(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsTextSlice expands the contents of DashboardGridLayoutWidgetsText into a JSON -// request object. -func expandDashboardGridLayoutWidgetsTextSlice(c *Client, f []DashboardGridLayoutWidgetsText, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsText(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsTextMap flattens the contents of DashboardGridLayoutWidgetsText from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsTextMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsText { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsText{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsText{} - } - - items := make(map[string]DashboardGridLayoutWidgetsText) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsText(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsTextSlice flattens the contents of DashboardGridLayoutWidgetsText from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsTextSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsText { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsText{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsText{} - } - - items := make([]DashboardGridLayoutWidgetsText, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsText(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsText expands an instance of DashboardGridLayoutWidgetsText into a JSON -// request object. -func expandDashboardGridLayoutWidgetsText(c *Client, f *DashboardGridLayoutWidgetsText, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Content; !dcl.IsEmptyValueIndirect(v) { - m["content"] = v - } - if v := f.Format; !dcl.IsEmptyValueIndirect(v) { - m["format"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsText flattens an instance of DashboardGridLayoutWidgetsText from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsText(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsText { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsText{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsText - } - r.Content = dcl.FlattenString(m["content"]) - r.Format = flattenDashboardGridLayoutWidgetsTextFormatEnum(m["format"]) - - return r -} - -// expandDashboardGridLayoutWidgetsBlankMap expands the contents of DashboardGridLayoutWidgetsBlank into a JSON -// request object. -func expandDashboardGridLayoutWidgetsBlankMap(c *Client, f map[string]DashboardGridLayoutWidgetsBlank, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsBlank(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsBlankSlice expands the contents of DashboardGridLayoutWidgetsBlank into a JSON -// request object. -func expandDashboardGridLayoutWidgetsBlankSlice(c *Client, f []DashboardGridLayoutWidgetsBlank, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsBlank(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsBlankMap flattens the contents of DashboardGridLayoutWidgetsBlank from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsBlankMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsBlank { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsBlank{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsBlank{} - } - - items := make(map[string]DashboardGridLayoutWidgetsBlank) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsBlank(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsBlankSlice flattens the contents of DashboardGridLayoutWidgetsBlank from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsBlankSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsBlank { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsBlank{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsBlank{} - } - - items := make([]DashboardGridLayoutWidgetsBlank, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsBlank(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsBlank expands an instance of DashboardGridLayoutWidgetsBlank into a JSON -// request object. -func expandDashboardGridLayoutWidgetsBlank(c *Client, f *DashboardGridLayoutWidgetsBlank, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsBlank flattens an instance of DashboardGridLayoutWidgetsBlank from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsBlank(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsBlank { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsBlank{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsBlank - } - - return r -} - -// expandDashboardGridLayoutWidgetsLogsPanelMap expands the contents of DashboardGridLayoutWidgetsLogsPanel into a JSON -// request object. -func expandDashboardGridLayoutWidgetsLogsPanelMap(c *Client, f map[string]DashboardGridLayoutWidgetsLogsPanel, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardGridLayoutWidgetsLogsPanel(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardGridLayoutWidgetsLogsPanelSlice expands the contents of DashboardGridLayoutWidgetsLogsPanel into a JSON -// request object. -func expandDashboardGridLayoutWidgetsLogsPanelSlice(c *Client, f []DashboardGridLayoutWidgetsLogsPanel, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardGridLayoutWidgetsLogsPanel(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardGridLayoutWidgetsLogsPanelMap flattens the contents of DashboardGridLayoutWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsLogsPanelMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsLogsPanel { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsLogsPanel{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsLogsPanel{} - } - - items := make(map[string]DashboardGridLayoutWidgetsLogsPanel) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsLogsPanel(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsLogsPanelSlice flattens the contents of DashboardGridLayoutWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsLogsPanelSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsLogsPanel { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsLogsPanel{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsLogsPanel{} - } - - items := make([]DashboardGridLayoutWidgetsLogsPanel, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsLogsPanel(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardGridLayoutWidgetsLogsPanel expands an instance of DashboardGridLayoutWidgetsLogsPanel into a JSON -// request object. -func expandDashboardGridLayoutWidgetsLogsPanel(c *Client, f *DashboardGridLayoutWidgetsLogsPanel, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v := f.ResourceNames; v != nil { - m["resourceNames"] = v - } - - return m, nil -} - -// flattenDashboardGridLayoutWidgetsLogsPanel flattens an instance of DashboardGridLayoutWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsLogsPanel(c *Client, i interface{}, res *Dashboard) *DashboardGridLayoutWidgetsLogsPanel { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardGridLayoutWidgetsLogsPanel{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardGridLayoutWidgetsLogsPanel - } - r.Filter = dcl.FlattenString(m["filter"]) - r.ResourceNames = dcl.FlattenStringSlice(m["resourceNames"]) - - return r -} - -// expandDashboardMosaicLayoutMap expands the contents of DashboardMosaicLayout into a JSON -// request object. -func expandDashboardMosaicLayoutMap(c *Client, f map[string]DashboardMosaicLayout, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayout(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutSlice expands the contents of DashboardMosaicLayout into a JSON -// request object. -func expandDashboardMosaicLayoutSlice(c *Client, f []DashboardMosaicLayout, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayout(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutMap flattens the contents of DashboardMosaicLayout from a JSON -// response object. -func flattenDashboardMosaicLayoutMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayout { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayout{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayout{} - } - - items := make(map[string]DashboardMosaicLayout) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayout(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutSlice flattens the contents of DashboardMosaicLayout from a JSON -// response object. -func flattenDashboardMosaicLayoutSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayout { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayout{} - } - - if len(a) == 0 { - return []DashboardMosaicLayout{} - } - - items := make([]DashboardMosaicLayout, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayout(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayout expands an instance of DashboardMosaicLayout into a JSON -// request object. -func expandDashboardMosaicLayout(c *Client, f *DashboardMosaicLayout, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Columns; !dcl.IsEmptyValueIndirect(v) { - m["columns"] = v - } - if v, err := expandDashboardMosaicLayoutTilesSlice(c, f.Tiles, res); err != nil { - return nil, fmt.Errorf("error expanding Tiles into tiles: %w", err) - } else if v != nil { - m["tiles"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayout flattens an instance of DashboardMosaicLayout from a JSON -// response object. -func flattenDashboardMosaicLayout(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayout { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayout{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayout - } - r.Columns = dcl.FlattenInteger(m["columns"]) - r.Tiles = flattenDashboardMosaicLayoutTilesSlice(c, m["tiles"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesMap expands the contents of DashboardMosaicLayoutTiles into a JSON -// request object. -func expandDashboardMosaicLayoutTilesMap(c *Client, f map[string]DashboardMosaicLayoutTiles, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTiles(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesSlice expands the contents of DashboardMosaicLayoutTiles into a JSON -// request object. -func expandDashboardMosaicLayoutTilesSlice(c *Client, f []DashboardMosaicLayoutTiles, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTiles(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesMap flattens the contents of DashboardMosaicLayoutTiles from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTiles { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTiles{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTiles{} - } - - items := make(map[string]DashboardMosaicLayoutTiles) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTiles(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesSlice flattens the contents of DashboardMosaicLayoutTiles from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTiles { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTiles{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTiles{} - } - - items := make([]DashboardMosaicLayoutTiles, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTiles(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTiles expands an instance of DashboardMosaicLayoutTiles into a JSON -// request object. -func expandDashboardMosaicLayoutTiles(c *Client, f *DashboardMosaicLayoutTiles, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.XPos; !dcl.IsEmptyValueIndirect(v) { - m["xPos"] = v - } - if v := f.YPos; !dcl.IsEmptyValueIndirect(v) { - m["yPos"] = v - } - if v := f.Width; !dcl.IsEmptyValueIndirect(v) { - m["width"] = v - } - if v := f.Height; !dcl.IsEmptyValueIndirect(v) { - m["height"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidget(c, f.Widget, res); err != nil { - return nil, fmt.Errorf("error expanding Widget into widget: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["widget"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTiles flattens an instance of DashboardMosaicLayoutTiles from a JSON -// response object. -func flattenDashboardMosaicLayoutTiles(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTiles { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTiles{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTiles - } - r.XPos = dcl.FlattenInteger(m["xPos"]) - r.YPos = dcl.FlattenInteger(m["yPos"]) - r.Width = dcl.FlattenInteger(m["width"]) - r.Height = dcl.FlattenInteger(m["height"]) - r.Widget = flattenDashboardMosaicLayoutTilesWidget(c, m["widget"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetMap expands the contents of DashboardMosaicLayoutTilesWidget into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidget, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidget(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetSlice expands the contents of DashboardMosaicLayoutTilesWidget into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetSlice(c *Client, f []DashboardMosaicLayoutTilesWidget, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidget(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetMap flattens the contents of DashboardMosaicLayoutTilesWidget from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidget { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidget{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidget{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidget) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidget(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetSlice flattens the contents of DashboardMosaicLayoutTilesWidget from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidget { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidget{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidget{} - } - - items := make([]DashboardMosaicLayoutTilesWidget, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidget(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidget expands an instance of DashboardMosaicLayoutTilesWidget into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidget(c *Client, f *DashboardMosaicLayoutTilesWidget, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Title; !dcl.IsEmptyValueIndirect(v) { - m["title"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChart(c, f.XyChart, res); err != nil { - return nil, fmt.Errorf("error expanding XyChart into xyChart: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["xyChart"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecard(c, f.Scorecard, res); err != nil { - return nil, fmt.Errorf("error expanding Scorecard into scorecard: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["scorecard"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetText(c, f.Text, res); err != nil { - return nil, fmt.Errorf("error expanding Text into text: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["text"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetBlank(c, f.Blank, res); err != nil { - return nil, fmt.Errorf("error expanding Blank into blank: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["blank"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetLogsPanel(c, f.LogsPanel, res); err != nil { - return nil, fmt.Errorf("error expanding LogsPanel into logsPanel: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["logsPanel"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidget flattens an instance of DashboardMosaicLayoutTilesWidget from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidget(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidget { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidget{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidget - } - r.Title = dcl.FlattenString(m["title"]) - r.XyChart = flattenDashboardMosaicLayoutTilesWidgetXyChart(c, m["xyChart"], res) - r.Scorecard = flattenDashboardMosaicLayoutTilesWidgetScorecard(c, m["scorecard"], res) - r.Text = flattenDashboardMosaicLayoutTilesWidgetText(c, m["text"], res) - r.Blank = flattenDashboardMosaicLayoutTilesWidgetBlank(c, m["blank"], res) - r.LogsPanel = flattenDashboardMosaicLayoutTilesWidgetLogsPanel(c, m["logsPanel"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChart into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChart, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChart(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChart into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChart, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChart(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChart from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChart { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChart{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChart{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChart) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChart(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChart from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChart { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChart{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChart{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChart, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChart(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChart expands an instance of DashboardMosaicLayoutTilesWidgetXyChart into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChart(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChart, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice(c, f.DataSets, res); err != nil { - return nil, fmt.Errorf("error expanding DataSets into dataSets: %w", err) - } else if v != nil { - m["dataSets"] = v - } - if v := f.TimeshiftDuration; !dcl.IsEmptyValueIndirect(v) { - m["timeshiftDuration"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice(c, f.Thresholds, res); err != nil { - return nil, fmt.Errorf("error expanding Thresholds into thresholds: %w", err) - } else if v != nil { - m["thresholds"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, f.XAxis, res); err != nil { - return nil, fmt.Errorf("error expanding XAxis into xAxis: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["xAxis"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, f.YAxis, res); err != nil { - return nil, fmt.Errorf("error expanding YAxis into yAxis: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["yAxis"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, f.ChartOptions, res); err != nil { - return nil, fmt.Errorf("error expanding ChartOptions into chartOptions: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["chartOptions"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChart flattens an instance of DashboardMosaicLayoutTilesWidgetXyChart from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChart(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChart { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChart{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChart - } - r.DataSets = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice(c, m["dataSets"], res) - r.TimeshiftDuration = dcl.FlattenString(m["timeshiftDuration"]) - r.Thresholds = flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice(c, m["thresholds"], res) - r.XAxis = flattenDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, m["xAxis"], res) - r.YAxis = flattenDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, m["yAxis"], res) - r.ChartOptions = flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, m["chartOptions"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSets into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSets into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSets, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSets from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSets{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSets{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSets) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSets from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSets { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSets{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSets{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSets expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSets into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSets(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, f.TimeSeriesQuery, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesQuery into timeSeriesQuery: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQuery"] = v - } - if v := f.PlotType; !dcl.IsEmptyValueIndirect(v) { - m["plotType"] = v - } - if v := f.LegendTemplate; !dcl.IsEmptyValueIndirect(v) { - m["legendTemplate"] = v - } - if v := f.MinAlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["minAlignmentPeriod"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSets flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSets from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSets(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSets - } - r.TimeSeriesQuery = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, m["timeSeriesQuery"], res) - r.PlotType = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum(m["plotType"]) - r.LegendTemplate = dcl.FlattenString(m["legendTemplate"]) - r.MinAlignmentPeriod = dcl.FlattenString(m["minAlignmentPeriod"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuerySlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuerySlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuerySlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuerySlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, f.TimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilter into timeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilter"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, f.TimeSeriesFilterRatio, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilterRatio into timeSeriesFilterRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilterRatio"] = v - } - if v := f.TimeSeriesQueryLanguage; !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQueryLanguage"] = v - } - if v := f.UnitOverride; !dcl.IsEmptyValueIndirect(v) { - m["unitOverride"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery - } - r.TimeSeriesFilter = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, m["timeSeriesFilter"], res) - r.TimeSeriesFilterRatio = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, m["timeSeriesFilterRatio"], res) - r.TimeSeriesQueryLanguage = dcl.FlattenString(m["timeSeriesQueryLanguage"]) - r.UnitOverride = dcl.FlattenString(m["unitOverride"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, m["aggregation"], res) - r.SecondaryAggregation = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, f.Numerator, res); err != nil { - return nil, fmt.Errorf("error expanding Numerator into numerator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["numerator"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, f.Denominator, res); err != nil { - return nil, fmt.Errorf("error expanding Denominator into denominator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["denominator"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - } - r.Numerator = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, m["numerator"], res) - r.Denominator = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, m["denominator"], res) - r.SecondaryAggregation = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter expands an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartThresholdsMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartThresholds into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartThresholdsMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartThresholds(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartThresholds into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartThresholds, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartThresholds(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartThresholds from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholds { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholds{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholds{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholds) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartThresholds(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartThresholds from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartThresholds { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartThresholds{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartThresholds{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartThresholds, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartThresholds(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartThresholds expands an instance of DashboardMosaicLayoutTilesWidgetXyChartThresholds into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartThresholds(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Value; !dcl.IsEmptyValueIndirect(v) { - m["value"] = v - } - if v := f.Color; !dcl.IsEmptyValueIndirect(v) { - m["color"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholds flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartThresholds from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholds(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartThresholds { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartThresholds{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartThresholds - } - r.Label = dcl.FlattenString(m["label"]) - r.Value = dcl.FlattenDouble(m["value"]) - r.Color = flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum(m["color"]) - r.Direction = flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartXAxisMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartXAxis into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartXAxisMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxis, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartXAxisSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartXAxis into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartXAxisSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartXAxis, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartXAxis from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxis { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxis{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxis{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxis) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartXAxis from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartXAxis { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartXAxis{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartXAxis{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartXAxis, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartXAxis(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartXAxis expands an instance of DashboardMosaicLayoutTilesWidgetXyChartXAxis into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartXAxis(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartXAxis, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartXAxis flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartXAxis from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartXAxis(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartXAxis { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartXAxis{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartXAxis - } - r.Label = dcl.FlattenString(m["label"]) - r.Scale = flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum(m["scale"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartYAxisMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartYAxis into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartYAxisMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxis, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartYAxisSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartYAxis into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartYAxisSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartYAxis, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartYAxis from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxis { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxis{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxis{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxis) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartYAxis from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartYAxis { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartYAxis{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartYAxis{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartYAxis, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartYAxis(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartYAxis expands an instance of DashboardMosaicLayoutTilesWidgetXyChartYAxis into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartYAxis(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartYAxis, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartYAxis flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartYAxis from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartYAxis(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartYAxis { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartYAxis{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartYAxis - } - r.Label = dcl.FlattenString(m["label"]) - r.Scale = flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum(m["scale"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartChartOptionsMap expands the contents of DashboardMosaicLayoutTilesWidgetXyChartChartOptions into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartChartOptionsMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptions, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartChartOptionsSlice expands the contents of DashboardMosaicLayoutTilesWidgetXyChartChartOptions into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartChartOptionsSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetXyChartChartOptions, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartChartOptions from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptions { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptions{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptions{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptions) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartChartOptions from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartChartOptions { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartChartOptions{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartChartOptions{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartChartOptions, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetXyChartChartOptions expands an instance of DashboardMosaicLayoutTilesWidgetXyChartChartOptions into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c *Client, f *DashboardMosaicLayoutTilesWidgetXyChartChartOptions, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { - m["mode"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptions flattens an instance of DashboardMosaicLayoutTilesWidgetXyChartChartOptions from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptions(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetXyChartChartOptions { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetXyChartChartOptions{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetXyChartChartOptions - } - r.Mode = flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum(m["mode"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecard into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecard, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecard(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecard into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecard, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecard(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecard from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecard { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecard{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecard{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecard) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecard(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecard from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecard { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecard{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecard{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecard, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecard(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecard expands an instance of DashboardMosaicLayoutTilesWidgetScorecard into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecard(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecard, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, f.TimeSeriesQuery, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesQuery into timeSeriesQuery: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQuery"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, f.GaugeView, res); err != nil { - return nil, fmt.Errorf("error expanding GaugeView into gaugeView: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gaugeView"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, f.SparkChartView, res); err != nil { - return nil, fmt.Errorf("error expanding SparkChartView into sparkChartView: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["sparkChartView"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice(c, f.Thresholds, res); err != nil { - return nil, fmt.Errorf("error expanding Thresholds into thresholds: %w", err) - } else if v != nil { - m["thresholds"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecard flattens an instance of DashboardMosaicLayoutTilesWidgetScorecard from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecard(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecard { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecard{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecard - } - r.TimeSeriesQuery = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, m["timeSeriesQuery"], res) - r.GaugeView = flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, m["gaugeView"], res) - r.SparkChartView = flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, m["sparkChartView"], res) - r.Thresholds = flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice(c, m["thresholds"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuerySlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuerySlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuerySlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuerySlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, f.TimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilter into timeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilter"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, f.TimeSeriesFilterRatio, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilterRatio into timeSeriesFilterRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilterRatio"] = v - } - if v := f.TimeSeriesQueryLanguage; !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQueryLanguage"] = v - } - if v := f.UnitOverride; !dcl.IsEmptyValueIndirect(v) { - m["unitOverride"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery - } - r.TimeSeriesFilter = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, m["timeSeriesFilter"], res) - r.TimeSeriesFilterRatio = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, m["timeSeriesFilterRatio"], res) - r.TimeSeriesQueryLanguage = dcl.FlattenString(m["timeSeriesQueryLanguage"]) - r.UnitOverride = dcl.FlattenString(m["unitOverride"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, m["aggregation"], res) - r.SecondaryAggregation = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, f.Numerator, res); err != nil { - return nil, fmt.Errorf("error expanding Numerator into numerator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["numerator"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, f.Denominator, res); err != nil { - return nil, fmt.Errorf("error expanding Denominator into denominator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["denominator"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio - } - r.Numerator = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, m["numerator"], res) - r.Denominator = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, m["denominator"], res) - r.SecondaryAggregation = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter expands an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardGaugeViewMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardGaugeView into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardGaugeViewMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardGaugeView, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardGaugeViewSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardGaugeView into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardGaugeViewSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardGaugeView, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeViewMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardGaugeView from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeViewMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardGaugeView { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardGaugeView{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardGaugeView{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardGaugeView) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeViewSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardGaugeView from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeViewSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardGaugeView { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardGaugeView{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardGaugeView{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardGaugeView, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardGaugeView expands an instance of DashboardMosaicLayoutTilesWidgetScorecardGaugeView into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardGaugeView, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.LowerBound; !dcl.IsEmptyValueIndirect(v) { - m["lowerBound"] = v - } - if v := f.UpperBound; !dcl.IsEmptyValueIndirect(v) { - m["upperBound"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeView flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardGaugeView from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardGaugeView(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardGaugeView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardGaugeView{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardGaugeView - } - r.LowerBound = dcl.FlattenDouble(m["lowerBound"]) - r.UpperBound = dcl.FlattenDouble(m["upperBound"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardSparkChartView into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartView, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardSparkChartView into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardSparkChartView from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartView { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardSparkChartView from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardSparkChartView, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartView expands an instance of DashboardMosaicLayoutTilesWidgetScorecardSparkChartView into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.SparkChartType; !dcl.IsEmptyValueIndirect(v) { - m["sparkChartType"] = v - } - if v := f.MinAlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["minAlignmentPeriod"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartView flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardSparkChartView from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartView(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardSparkChartView - } - r.SparkChartType = flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum(m["sparkChartType"]) - r.MinAlignmentPeriod = dcl.FlattenString(m["minAlignmentPeriod"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardThresholdsMap expands the contents of DashboardMosaicLayoutTilesWidgetScorecardThresholds into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardThresholdsMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardThresholds(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice expands the contents of DashboardMosaicLayoutTilesWidgetScorecardThresholds into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetScorecardThresholds, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetScorecardThresholds(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardThresholds from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholds { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholds{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholds{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholds) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardThresholds(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardThresholds from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardThresholds { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardThresholds{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardThresholds{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardThresholds, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardThresholds(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetScorecardThresholds expands an instance of DashboardMosaicLayoutTilesWidgetScorecardThresholds into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetScorecardThresholds(c *Client, f *DashboardMosaicLayoutTilesWidgetScorecardThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Value; !dcl.IsEmptyValueIndirect(v) { - m["value"] = v - } - if v := f.Color; !dcl.IsEmptyValueIndirect(v) { - m["color"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholds flattens an instance of DashboardMosaicLayoutTilesWidgetScorecardThresholds from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholds(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetScorecardThresholds { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetScorecardThresholds{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetScorecardThresholds - } - r.Label = dcl.FlattenString(m["label"]) - r.Value = dcl.FlattenDouble(m["value"]) - r.Color = flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum(m["color"]) - r.Direction = flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetTextMap expands the contents of DashboardMosaicLayoutTilesWidgetText into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetTextMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetText, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetText(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetTextSlice expands the contents of DashboardMosaicLayoutTilesWidgetText into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetTextSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetText, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetText(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetTextMap flattens the contents of DashboardMosaicLayoutTilesWidgetText from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetTextMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetText { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetText{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetText{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetText) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetText(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetTextSlice flattens the contents of DashboardMosaicLayoutTilesWidgetText from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetTextSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetText { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetText{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetText{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetText, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetText(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetText expands an instance of DashboardMosaicLayoutTilesWidgetText into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetText(c *Client, f *DashboardMosaicLayoutTilesWidgetText, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Content; !dcl.IsEmptyValueIndirect(v) { - m["content"] = v - } - if v := f.Format; !dcl.IsEmptyValueIndirect(v) { - m["format"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetText flattens an instance of DashboardMosaicLayoutTilesWidgetText from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetText(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetText { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetText{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetText - } - r.Content = dcl.FlattenString(m["content"]) - r.Format = flattenDashboardMosaicLayoutTilesWidgetTextFormatEnum(m["format"]) - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetBlankMap expands the contents of DashboardMosaicLayoutTilesWidgetBlank into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetBlankMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetBlank, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetBlank(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetBlankSlice expands the contents of DashboardMosaicLayoutTilesWidgetBlank into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetBlankSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetBlank, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetBlank(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetBlankMap flattens the contents of DashboardMosaicLayoutTilesWidgetBlank from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetBlankMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetBlank { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetBlank{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetBlank{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetBlank) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetBlank(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetBlankSlice flattens the contents of DashboardMosaicLayoutTilesWidgetBlank from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetBlankSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetBlank { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetBlank{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetBlank{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetBlank, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetBlank(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetBlank expands an instance of DashboardMosaicLayoutTilesWidgetBlank into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetBlank(c *Client, f *DashboardMosaicLayoutTilesWidgetBlank, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetBlank flattens an instance of DashboardMosaicLayoutTilesWidgetBlank from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetBlank(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetBlank { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetBlank{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetBlank - } - - return r -} - -// expandDashboardMosaicLayoutTilesWidgetLogsPanelMap expands the contents of DashboardMosaicLayoutTilesWidgetLogsPanel into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetLogsPanelMap(c *Client, f map[string]DashboardMosaicLayoutTilesWidgetLogsPanel, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetLogsPanel(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardMosaicLayoutTilesWidgetLogsPanelSlice expands the contents of DashboardMosaicLayoutTilesWidgetLogsPanel into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetLogsPanelSlice(c *Client, f []DashboardMosaicLayoutTilesWidgetLogsPanel, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardMosaicLayoutTilesWidgetLogsPanel(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetLogsPanelMap flattens the contents of DashboardMosaicLayoutTilesWidgetLogsPanel from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetLogsPanelMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetLogsPanel { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetLogsPanel{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetLogsPanel{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetLogsPanel) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetLogsPanel(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetLogsPanelSlice flattens the contents of DashboardMosaicLayoutTilesWidgetLogsPanel from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetLogsPanelSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetLogsPanel { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetLogsPanel{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetLogsPanel{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetLogsPanel, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetLogsPanel(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardMosaicLayoutTilesWidgetLogsPanel expands an instance of DashboardMosaicLayoutTilesWidgetLogsPanel into a JSON -// request object. -func expandDashboardMosaicLayoutTilesWidgetLogsPanel(c *Client, f *DashboardMosaicLayoutTilesWidgetLogsPanel, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v := f.ResourceNames; v != nil { - m["resourceNames"] = v - } - - return m, nil -} - -// flattenDashboardMosaicLayoutTilesWidgetLogsPanel flattens an instance of DashboardMosaicLayoutTilesWidgetLogsPanel from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetLogsPanel(c *Client, i interface{}, res *Dashboard) *DashboardMosaicLayoutTilesWidgetLogsPanel { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardMosaicLayoutTilesWidgetLogsPanel{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardMosaicLayoutTilesWidgetLogsPanel - } - r.Filter = dcl.FlattenString(m["filter"]) - r.ResourceNames = dcl.FlattenStringSlice(m["resourceNames"]) - - return r -} - -// expandDashboardRowLayoutMap expands the contents of DashboardRowLayout into a JSON -// request object. -func expandDashboardRowLayoutMap(c *Client, f map[string]DashboardRowLayout, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayout(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutSlice expands the contents of DashboardRowLayout into a JSON -// request object. -func expandDashboardRowLayoutSlice(c *Client, f []DashboardRowLayout, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayout(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutMap flattens the contents of DashboardRowLayout from a JSON -// response object. -func flattenDashboardRowLayoutMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayout { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayout{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayout{} - } - - items := make(map[string]DashboardRowLayout) - for k, item := range a { - items[k] = *flattenDashboardRowLayout(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutSlice flattens the contents of DashboardRowLayout from a JSON -// response object. -func flattenDashboardRowLayoutSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayout { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayout{} - } - - if len(a) == 0 { - return []DashboardRowLayout{} - } - - items := make([]DashboardRowLayout, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayout(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayout expands an instance of DashboardRowLayout into a JSON -// request object. -func expandDashboardRowLayout(c *Client, f *DashboardRowLayout, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardRowLayoutRowsSlice(c, f.Rows, res); err != nil { - return nil, fmt.Errorf("error expanding Rows into rows: %w", err) - } else if v != nil { - m["rows"] = v - } - - return m, nil -} - -// flattenDashboardRowLayout flattens an instance of DashboardRowLayout from a JSON -// response object. -func flattenDashboardRowLayout(c *Client, i interface{}, res *Dashboard) *DashboardRowLayout { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayout{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayout - } - r.Rows = flattenDashboardRowLayoutRowsSlice(c, m["rows"], res) - - return r -} - -// expandDashboardRowLayoutRowsMap expands the contents of DashboardRowLayoutRows into a JSON -// request object. -func expandDashboardRowLayoutRowsMap(c *Client, f map[string]DashboardRowLayoutRows, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRows(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsSlice expands the contents of DashboardRowLayoutRows into a JSON -// request object. -func expandDashboardRowLayoutRowsSlice(c *Client, f []DashboardRowLayoutRows, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRows(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsMap flattens the contents of DashboardRowLayoutRows from a JSON -// response object. -func flattenDashboardRowLayoutRowsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRows { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRows{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRows{} - } - - items := make(map[string]DashboardRowLayoutRows) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRows(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsSlice flattens the contents of DashboardRowLayoutRows from a JSON -// response object. -func flattenDashboardRowLayoutRowsSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRows { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRows{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRows{} - } - - items := make([]DashboardRowLayoutRows, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRows(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRows expands an instance of DashboardRowLayoutRows into a JSON -// request object. -func expandDashboardRowLayoutRows(c *Client, f *DashboardRowLayoutRows, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Weight; !dcl.IsEmptyValueIndirect(v) { - m["weight"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsSlice(c, f.Widgets, res); err != nil { - return nil, fmt.Errorf("error expanding Widgets into widgets: %w", err) - } else if v != nil { - m["widgets"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRows flattens an instance of DashboardRowLayoutRows from a JSON -// response object. -func flattenDashboardRowLayoutRows(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRows { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRows{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRows - } - r.Weight = dcl.FlattenInteger(m["weight"]) - r.Widgets = flattenDashboardRowLayoutRowsWidgetsSlice(c, m["widgets"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsMap expands the contents of DashboardRowLayoutRowsWidgets into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsMap(c *Client, f map[string]DashboardRowLayoutRowsWidgets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsSlice expands the contents of DashboardRowLayoutRowsWidgets into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsSlice(c *Client, f []DashboardRowLayoutRowsWidgets, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsMap flattens the contents of DashboardRowLayoutRowsWidgets from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgets{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgets{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgets) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsSlice flattens the contents of DashboardRowLayoutRowsWidgets from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgets { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgets{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgets{} - } - - items := make([]DashboardRowLayoutRowsWidgets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgets expands an instance of DashboardRowLayoutRowsWidgets into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgets(c *Client, f *DashboardRowLayoutRowsWidgets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Title; !dcl.IsEmptyValueIndirect(v) { - m["title"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChart(c, f.XyChart, res); err != nil { - return nil, fmt.Errorf("error expanding XyChart into xyChart: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["xyChart"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecard(c, f.Scorecard, res); err != nil { - return nil, fmt.Errorf("error expanding Scorecard into scorecard: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["scorecard"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsText(c, f.Text, res); err != nil { - return nil, fmt.Errorf("error expanding Text into text: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["text"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsBlank(c, f.Blank, res); err != nil { - return nil, fmt.Errorf("error expanding Blank into blank: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["blank"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsLogsPanel(c, f.LogsPanel, res); err != nil { - return nil, fmt.Errorf("error expanding LogsPanel into logsPanel: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["logsPanel"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgets flattens an instance of DashboardRowLayoutRowsWidgets from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgets(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgets - } - r.Title = dcl.FlattenString(m["title"]) - r.XyChart = flattenDashboardRowLayoutRowsWidgetsXyChart(c, m["xyChart"], res) - r.Scorecard = flattenDashboardRowLayoutRowsWidgetsScorecard(c, m["scorecard"], res) - r.Text = flattenDashboardRowLayoutRowsWidgetsText(c, m["text"], res) - r.Blank = flattenDashboardRowLayoutRowsWidgetsBlank(c, m["blank"], res) - r.LogsPanel = flattenDashboardRowLayoutRowsWidgetsLogsPanel(c, m["logsPanel"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartMap expands the contents of DashboardRowLayoutRowsWidgetsXyChart into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChart, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChart(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChart into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChart, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChart(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChart from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChart { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChart{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChart{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChart) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChart(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChart from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChart { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChart{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChart{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChart, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChart(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChart expands an instance of DashboardRowLayoutRowsWidgetsXyChart into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChart(c *Client, f *DashboardRowLayoutRowsWidgetsXyChart, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice(c, f.DataSets, res); err != nil { - return nil, fmt.Errorf("error expanding DataSets into dataSets: %w", err) - } else if v != nil { - m["dataSets"] = v - } - if v := f.TimeshiftDuration; !dcl.IsEmptyValueIndirect(v) { - m["timeshiftDuration"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice(c, f.Thresholds, res); err != nil { - return nil, fmt.Errorf("error expanding Thresholds into thresholds: %w", err) - } else if v != nil { - m["thresholds"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartXAxis(c, f.XAxis, res); err != nil { - return nil, fmt.Errorf("error expanding XAxis into xAxis: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["xAxis"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartYAxis(c, f.YAxis, res); err != nil { - return nil, fmt.Errorf("error expanding YAxis into yAxis: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["yAxis"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, f.ChartOptions, res); err != nil { - return nil, fmt.Errorf("error expanding ChartOptions into chartOptions: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["chartOptions"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChart flattens an instance of DashboardRowLayoutRowsWidgetsXyChart from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChart(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChart { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChart{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChart - } - r.DataSets = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice(c, m["dataSets"], res) - r.TimeshiftDuration = dcl.FlattenString(m["timeshiftDuration"]) - r.Thresholds = flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice(c, m["thresholds"], res) - r.XAxis = flattenDashboardRowLayoutRowsWidgetsXyChartXAxis(c, m["xAxis"], res) - r.YAxis = flattenDashboardRowLayoutRowsWidgetsXyChartYAxis(c, m["yAxis"], res) - r.ChartOptions = flattenDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, m["chartOptions"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSets, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSets{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSets{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSets) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSets { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSets{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSets{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSets expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSets(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, f.TimeSeriesQuery, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesQuery into timeSeriesQuery: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQuery"] = v - } - if v := f.PlotType; !dcl.IsEmptyValueIndirect(v) { - m["plotType"] = v - } - if v := f.LegendTemplate; !dcl.IsEmptyValueIndirect(v) { - m["legendTemplate"] = v - } - if v := f.MinAlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["minAlignmentPeriod"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSets flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSets(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSets - } - r.TimeSeriesQuery = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, m["timeSeriesQuery"], res) - r.PlotType = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum(m["plotType"]) - r.LegendTemplate = dcl.FlattenString(m["legendTemplate"]) - r.MinAlignmentPeriod = dcl.FlattenString(m["minAlignmentPeriod"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuerySlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuerySlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, f.TimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilter into timeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilter"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, f.TimeSeriesFilterRatio, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilterRatio into timeSeriesFilterRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilterRatio"] = v - } - if v := f.TimeSeriesQueryLanguage; !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQueryLanguage"] = v - } - if v := f.UnitOverride; !dcl.IsEmptyValueIndirect(v) { - m["unitOverride"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery - } - r.TimeSeriesFilter = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, m["timeSeriesFilter"], res) - r.TimeSeriesFilterRatio = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, m["timeSeriesFilterRatio"], res) - r.TimeSeriesQueryLanguage = dcl.FlattenString(m["timeSeriesQueryLanguage"]) - r.UnitOverride = dcl.FlattenString(m["unitOverride"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, m["aggregation"], res) - r.SecondaryAggregation = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, f.Numerator, res); err != nil { - return nil, fmt.Errorf("error expanding Numerator into numerator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["numerator"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, f.Denominator, res); err != nil { - return nil, fmt.Errorf("error expanding Denominator into denominator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["denominator"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - } - r.Numerator = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, m["numerator"], res) - r.Denominator = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, m["denominator"], res) - r.SecondaryAggregation = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter expands an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter flattens an instance of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartThresholdsMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartThresholdsMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartThresholds(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartThresholds, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartThresholds(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartThresholds { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartThresholds{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartThresholds{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartThresholds) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartThresholds(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartThresholds { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartThresholds{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartThresholds{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartThresholds, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartThresholds(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartThresholds expands an instance of DashboardRowLayoutRowsWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartThresholds(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Value; !dcl.IsEmptyValueIndirect(v) { - m["value"] = v - } - if v := f.Color; !dcl.IsEmptyValueIndirect(v) { - m["color"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholds flattens an instance of DashboardRowLayoutRowsWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholds(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartThresholds { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartThresholds{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartThresholds - } - r.Label = dcl.FlattenString(m["label"]) - r.Value = dcl.FlattenDouble(m["value"]) - r.Color = flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum(m["color"]) - r.Direction = flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartXAxisMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartXAxisMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartXAxis, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartXAxis(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartXAxisSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartXAxisSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartXAxis, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartXAxis(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartXAxisMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartXAxisMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartXAxis { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartXAxis{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartXAxis{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartXAxis) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartXAxis(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartXAxisSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartXAxisSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartXAxis { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartXAxis{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartXAxis{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartXAxis, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartXAxis(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartXAxis expands an instance of DashboardRowLayoutRowsWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartXAxis(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartXAxis, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartXAxis flattens an instance of DashboardRowLayoutRowsWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartXAxis(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartXAxis { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartXAxis{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartXAxis - } - r.Label = dcl.FlattenString(m["label"]) - r.Scale = flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum(m["scale"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartYAxisMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartYAxisMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartYAxis, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartYAxis(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartYAxisSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartYAxisSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartYAxis, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartYAxis(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartYAxisMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartYAxisMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartYAxis { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartYAxis{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartYAxis{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartYAxis) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartYAxis(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartYAxisSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartYAxisSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartYAxis { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartYAxis{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartYAxis{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartYAxis, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartYAxis(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartYAxis expands an instance of DashboardRowLayoutRowsWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartYAxis(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartYAxis, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartYAxis flattens an instance of DashboardRowLayoutRowsWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartYAxis(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartYAxis { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartYAxis{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartYAxis - } - r.Label = dcl.FlattenString(m["label"]) - r.Scale = flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum(m["scale"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsXyChartChartOptionsMap expands the contents of DashboardRowLayoutRowsWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartChartOptionsMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptions, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsXyChartChartOptionsSlice expands the contents of DashboardRowLayoutRowsWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartChartOptionsSlice(c *Client, f []DashboardRowLayoutRowsWidgetsXyChartChartOptions, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptions { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptions{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptions{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptions) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartChartOptions { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartChartOptions{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartChartOptions{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartChartOptions, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartChartOptions(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsXyChartChartOptions expands an instance of DashboardRowLayoutRowsWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsXyChartChartOptions(c *Client, f *DashboardRowLayoutRowsWidgetsXyChartChartOptions, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { - m["mode"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartChartOptions flattens an instance of DashboardRowLayoutRowsWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartChartOptions(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsXyChartChartOptions { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsXyChartChartOptions{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsXyChartChartOptions - } - r.Mode = flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum(m["mode"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardMap expands the contents of DashboardRowLayoutRowsWidgetsScorecard into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecard, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecard(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecard into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecard, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecard(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecard from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecard { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecard{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecard{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecard) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecard(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecard from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecard { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecard{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecard{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecard, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecard(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecard expands an instance of DashboardRowLayoutRowsWidgetsScorecard into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecard(c *Client, f *DashboardRowLayoutRowsWidgetsScorecard, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, f.TimeSeriesQuery, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesQuery into timeSeriesQuery: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQuery"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, f.GaugeView, res); err != nil { - return nil, fmt.Errorf("error expanding GaugeView into gaugeView: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gaugeView"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, f.SparkChartView, res); err != nil { - return nil, fmt.Errorf("error expanding SparkChartView into sparkChartView: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["sparkChartView"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice(c, f.Thresholds, res); err != nil { - return nil, fmt.Errorf("error expanding Thresholds into thresholds: %w", err) - } else if v != nil { - m["thresholds"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecard flattens an instance of DashboardRowLayoutRowsWidgetsScorecard from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecard(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecard { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecard{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecard - } - r.TimeSeriesQuery = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, m["timeSeriesQuery"], res) - r.GaugeView = flattenDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, m["gaugeView"], res) - r.SparkChartView = flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, m["sparkChartView"], res) - r.Thresholds = flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice(c, m["thresholds"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuerySlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuerySlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuerySlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuerySlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, f.TimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilter into timeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilter"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, f.TimeSeriesFilterRatio, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilterRatio into timeSeriesFilterRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilterRatio"] = v - } - if v := f.TimeSeriesQueryLanguage; !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQueryLanguage"] = v - } - if v := f.UnitOverride; !dcl.IsEmptyValueIndirect(v) { - m["unitOverride"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery - } - r.TimeSeriesFilter = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, m["timeSeriesFilter"], res) - r.TimeSeriesFilterRatio = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, m["timeSeriesFilterRatio"], res) - r.TimeSeriesQueryLanguage = dcl.FlattenString(m["timeSeriesQueryLanguage"]) - r.UnitOverride = dcl.FlattenString(m["unitOverride"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, m["aggregation"], res) - r.SecondaryAggregation = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, f.Numerator, res); err != nil { - return nil, fmt.Errorf("error expanding Numerator into numerator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["numerator"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, f.Denominator, res); err != nil { - return nil, fmt.Errorf("error expanding Denominator into denominator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["denominator"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - } - r.Numerator = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, m["numerator"], res) - r.Denominator = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, m["denominator"], res) - r.SecondaryAggregation = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter expands an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter flattens an instance of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardGaugeViewMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardGaugeViewMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardGaugeView, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardGaugeViewSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardGaugeViewSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardGaugeView, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardGaugeViewMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardGaugeViewMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardGaugeView { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardGaugeView{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardGaugeView{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardGaugeView) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardGaugeViewSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardGaugeViewSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardGaugeView { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardGaugeView{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardGaugeView{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardGaugeView, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardGaugeView(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardGaugeView expands an instance of DashboardRowLayoutRowsWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardGaugeView(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardGaugeView, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.LowerBound; !dcl.IsEmptyValueIndirect(v) { - m["lowerBound"] = v - } - if v := f.UpperBound; !dcl.IsEmptyValueIndirect(v) { - m["upperBound"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardGaugeView flattens an instance of DashboardRowLayoutRowsWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardGaugeView(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardGaugeView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardGaugeView{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardGaugeView - } - r.LowerBound = dcl.FlattenDouble(m["lowerBound"]) - r.UpperBound = dcl.FlattenDouble(m["upperBound"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardSparkChartViewMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardSparkChartViewMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartView, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardSparkChartView, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartView { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartView{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartView{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartView) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardSparkChartView { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardSparkChartView{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardSparkChartView{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardSparkChartView, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardSparkChartView expands an instance of DashboardRowLayoutRowsWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardSparkChartView, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.SparkChartType; !dcl.IsEmptyValueIndirect(v) { - m["sparkChartType"] = v - } - if v := f.MinAlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["minAlignmentPeriod"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartView flattens an instance of DashboardRowLayoutRowsWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartView(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardSparkChartView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardSparkChartView{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardSparkChartView - } - r.SparkChartType = flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum(m["sparkChartType"]) - r.MinAlignmentPeriod = dcl.FlattenString(m["minAlignmentPeriod"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsScorecardThresholdsMap expands the contents of DashboardRowLayoutRowsWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardThresholdsMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsScorecardThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardThresholds(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice expands the contents of DashboardRowLayoutRowsWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice(c *Client, f []DashboardRowLayoutRowsWidgetsScorecardThresholds, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsScorecardThresholds(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardThresholds { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardThresholds{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardThresholds{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardThresholds) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardThresholds(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardThresholds { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardThresholds{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardThresholds{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardThresholds, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardThresholds(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsScorecardThresholds expands an instance of DashboardRowLayoutRowsWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsScorecardThresholds(c *Client, f *DashboardRowLayoutRowsWidgetsScorecardThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Value; !dcl.IsEmptyValueIndirect(v) { - m["value"] = v - } - if v := f.Color; !dcl.IsEmptyValueIndirect(v) { - m["color"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholds flattens an instance of DashboardRowLayoutRowsWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholds(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsScorecardThresholds { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsScorecardThresholds{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsScorecardThresholds - } - r.Label = dcl.FlattenString(m["label"]) - r.Value = dcl.FlattenDouble(m["value"]) - r.Color = flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum(m["color"]) - r.Direction = flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsTextMap expands the contents of DashboardRowLayoutRowsWidgetsText into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsTextMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsText, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsText(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsTextSlice expands the contents of DashboardRowLayoutRowsWidgetsText into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsTextSlice(c *Client, f []DashboardRowLayoutRowsWidgetsText, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsText(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsTextMap flattens the contents of DashboardRowLayoutRowsWidgetsText from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsTextMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsText { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsText{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsText{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsText) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsText(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsTextSlice flattens the contents of DashboardRowLayoutRowsWidgetsText from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsTextSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsText { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsText{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsText{} - } - - items := make([]DashboardRowLayoutRowsWidgetsText, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsText(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsText expands an instance of DashboardRowLayoutRowsWidgetsText into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsText(c *Client, f *DashboardRowLayoutRowsWidgetsText, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Content; !dcl.IsEmptyValueIndirect(v) { - m["content"] = v - } - if v := f.Format; !dcl.IsEmptyValueIndirect(v) { - m["format"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsText flattens an instance of DashboardRowLayoutRowsWidgetsText from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsText(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsText { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsText{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsText - } - r.Content = dcl.FlattenString(m["content"]) - r.Format = flattenDashboardRowLayoutRowsWidgetsTextFormatEnum(m["format"]) - - return r -} - -// expandDashboardRowLayoutRowsWidgetsBlankMap expands the contents of DashboardRowLayoutRowsWidgetsBlank into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsBlankMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsBlank, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsBlank(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsBlankSlice expands the contents of DashboardRowLayoutRowsWidgetsBlank into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsBlankSlice(c *Client, f []DashboardRowLayoutRowsWidgetsBlank, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsBlank(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsBlankMap flattens the contents of DashboardRowLayoutRowsWidgetsBlank from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsBlankMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsBlank { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsBlank{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsBlank{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsBlank) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsBlank(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsBlankSlice flattens the contents of DashboardRowLayoutRowsWidgetsBlank from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsBlankSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsBlank { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsBlank{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsBlank{} - } - - items := make([]DashboardRowLayoutRowsWidgetsBlank, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsBlank(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsBlank expands an instance of DashboardRowLayoutRowsWidgetsBlank into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsBlank(c *Client, f *DashboardRowLayoutRowsWidgetsBlank, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsBlank flattens an instance of DashboardRowLayoutRowsWidgetsBlank from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsBlank(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsBlank { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsBlank{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsBlank - } - - return r -} - -// expandDashboardRowLayoutRowsWidgetsLogsPanelMap expands the contents of DashboardRowLayoutRowsWidgetsLogsPanel into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsLogsPanelMap(c *Client, f map[string]DashboardRowLayoutRowsWidgetsLogsPanel, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsLogsPanel(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardRowLayoutRowsWidgetsLogsPanelSlice expands the contents of DashboardRowLayoutRowsWidgetsLogsPanel into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsLogsPanelSlice(c *Client, f []DashboardRowLayoutRowsWidgetsLogsPanel, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardRowLayoutRowsWidgetsLogsPanel(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardRowLayoutRowsWidgetsLogsPanelMap flattens the contents of DashboardRowLayoutRowsWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsLogsPanelMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsLogsPanel { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsLogsPanel{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsLogsPanel{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsLogsPanel) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsLogsPanel(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsLogsPanelSlice flattens the contents of DashboardRowLayoutRowsWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsLogsPanelSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsLogsPanel { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsLogsPanel{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsLogsPanel{} - } - - items := make([]DashboardRowLayoutRowsWidgetsLogsPanel, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsLogsPanel(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardRowLayoutRowsWidgetsLogsPanel expands an instance of DashboardRowLayoutRowsWidgetsLogsPanel into a JSON -// request object. -func expandDashboardRowLayoutRowsWidgetsLogsPanel(c *Client, f *DashboardRowLayoutRowsWidgetsLogsPanel, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v := f.ResourceNames; v != nil { - m["resourceNames"] = v - } - - return m, nil -} - -// flattenDashboardRowLayoutRowsWidgetsLogsPanel flattens an instance of DashboardRowLayoutRowsWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsLogsPanel(c *Client, i interface{}, res *Dashboard) *DashboardRowLayoutRowsWidgetsLogsPanel { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardRowLayoutRowsWidgetsLogsPanel{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardRowLayoutRowsWidgetsLogsPanel - } - r.Filter = dcl.FlattenString(m["filter"]) - r.ResourceNames = dcl.FlattenStringSlice(m["resourceNames"]) - - return r -} - -// expandDashboardColumnLayoutMap expands the contents of DashboardColumnLayout into a JSON -// request object. -func expandDashboardColumnLayoutMap(c *Client, f map[string]DashboardColumnLayout, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayout(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutSlice expands the contents of DashboardColumnLayout into a JSON -// request object. -func expandDashboardColumnLayoutSlice(c *Client, f []DashboardColumnLayout, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayout(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutMap flattens the contents of DashboardColumnLayout from a JSON -// response object. -func flattenDashboardColumnLayoutMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayout { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayout{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayout{} - } - - items := make(map[string]DashboardColumnLayout) - for k, item := range a { - items[k] = *flattenDashboardColumnLayout(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutSlice flattens the contents of DashboardColumnLayout from a JSON -// response object. -func flattenDashboardColumnLayoutSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayout { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayout{} - } - - if len(a) == 0 { - return []DashboardColumnLayout{} - } - - items := make([]DashboardColumnLayout, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayout(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayout expands an instance of DashboardColumnLayout into a JSON -// request object. -func expandDashboardColumnLayout(c *Client, f *DashboardColumnLayout, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardColumnLayoutColumnsSlice(c, f.Columns, res); err != nil { - return nil, fmt.Errorf("error expanding Columns into columns: %w", err) - } else if v != nil { - m["columns"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayout flattens an instance of DashboardColumnLayout from a JSON -// response object. -func flattenDashboardColumnLayout(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayout { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayout{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayout - } - r.Columns = flattenDashboardColumnLayoutColumnsSlice(c, m["columns"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsMap expands the contents of DashboardColumnLayoutColumns into a JSON -// request object. -func expandDashboardColumnLayoutColumnsMap(c *Client, f map[string]DashboardColumnLayoutColumns, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumns(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsSlice expands the contents of DashboardColumnLayoutColumns into a JSON -// request object. -func expandDashboardColumnLayoutColumnsSlice(c *Client, f []DashboardColumnLayoutColumns, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumns(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsMap flattens the contents of DashboardColumnLayoutColumns from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumns { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumns{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumns{} - } - - items := make(map[string]DashboardColumnLayoutColumns) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumns(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsSlice flattens the contents of DashboardColumnLayoutColumns from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumns { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumns{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumns{} - } - - items := make([]DashboardColumnLayoutColumns, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumns(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumns expands an instance of DashboardColumnLayoutColumns into a JSON -// request object. -func expandDashboardColumnLayoutColumns(c *Client, f *DashboardColumnLayoutColumns, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Weight; !dcl.IsEmptyValueIndirect(v) { - m["weight"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsSlice(c, f.Widgets, res); err != nil { - return nil, fmt.Errorf("error expanding Widgets into widgets: %w", err) - } else if v != nil { - m["widgets"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumns flattens an instance of DashboardColumnLayoutColumns from a JSON -// response object. -func flattenDashboardColumnLayoutColumns(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumns { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumns{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumns - } - r.Weight = dcl.FlattenInteger(m["weight"]) - r.Widgets = flattenDashboardColumnLayoutColumnsWidgetsSlice(c, m["widgets"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsMap expands the contents of DashboardColumnLayoutColumnsWidgets into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsSlice expands the contents of DashboardColumnLayoutColumnsWidgets into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsSlice(c *Client, f []DashboardColumnLayoutColumnsWidgets, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsMap flattens the contents of DashboardColumnLayoutColumnsWidgets from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgets{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgets{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgets) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsSlice flattens the contents of DashboardColumnLayoutColumnsWidgets from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgets { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgets{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgets{} - } - - items := make([]DashboardColumnLayoutColumnsWidgets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgets expands an instance of DashboardColumnLayoutColumnsWidgets into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgets(c *Client, f *DashboardColumnLayoutColumnsWidgets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Title; !dcl.IsEmptyValueIndirect(v) { - m["title"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChart(c, f.XyChart, res); err != nil { - return nil, fmt.Errorf("error expanding XyChart into xyChart: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["xyChart"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecard(c, f.Scorecard, res); err != nil { - return nil, fmt.Errorf("error expanding Scorecard into scorecard: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["scorecard"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsText(c, f.Text, res); err != nil { - return nil, fmt.Errorf("error expanding Text into text: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["text"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsBlank(c, f.Blank, res); err != nil { - return nil, fmt.Errorf("error expanding Blank into blank: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["blank"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsLogsPanel(c, f.LogsPanel, res); err != nil { - return nil, fmt.Errorf("error expanding LogsPanel into logsPanel: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["logsPanel"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgets flattens an instance of DashboardColumnLayoutColumnsWidgets from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgets(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgets - } - r.Title = dcl.FlattenString(m["title"]) - r.XyChart = flattenDashboardColumnLayoutColumnsWidgetsXyChart(c, m["xyChart"], res) - r.Scorecard = flattenDashboardColumnLayoutColumnsWidgetsScorecard(c, m["scorecard"], res) - r.Text = flattenDashboardColumnLayoutColumnsWidgetsText(c, m["text"], res) - r.Blank = flattenDashboardColumnLayoutColumnsWidgetsBlank(c, m["blank"], res) - r.LogsPanel = flattenDashboardColumnLayoutColumnsWidgetsLogsPanel(c, m["logsPanel"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChart into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChart, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChart(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChart into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChart, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChart(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChart from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChart { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChart{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChart{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChart) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChart(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChart from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChart { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChart{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChart{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChart, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChart(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChart expands an instance of DashboardColumnLayoutColumnsWidgetsXyChart into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChart(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChart, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice(c, f.DataSets, res); err != nil { - return nil, fmt.Errorf("error expanding DataSets into dataSets: %w", err) - } else if v != nil { - m["dataSets"] = v - } - if v := f.TimeshiftDuration; !dcl.IsEmptyValueIndirect(v) { - m["timeshiftDuration"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice(c, f.Thresholds, res); err != nil { - return nil, fmt.Errorf("error expanding Thresholds into thresholds: %w", err) - } else if v != nil { - m["thresholds"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, f.XAxis, res); err != nil { - return nil, fmt.Errorf("error expanding XAxis into xAxis: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["xAxis"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, f.YAxis, res); err != nil { - return nil, fmt.Errorf("error expanding YAxis into yAxis: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["yAxis"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, f.ChartOptions, res); err != nil { - return nil, fmt.Errorf("error expanding ChartOptions into chartOptions: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["chartOptions"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChart flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChart from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChart(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChart { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChart{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChart - } - r.DataSets = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice(c, m["dataSets"], res) - r.TimeshiftDuration = dcl.FlattenString(m["timeshiftDuration"]) - r.Thresholds = flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice(c, m["thresholds"], res) - r.XAxis = flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, m["xAxis"], res) - r.YAxis = flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, m["yAxis"], res) - r.ChartOptions = flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, m["chartOptions"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSets, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSets { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSets{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSets{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSets) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSets { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSets{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSets{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSets, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSets expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSets into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSets, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, f.TimeSeriesQuery, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesQuery into timeSeriesQuery: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQuery"] = v - } - if v := f.PlotType; !dcl.IsEmptyValueIndirect(v) { - m["plotType"] = v - } - if v := f.LegendTemplate; !dcl.IsEmptyValueIndirect(v) { - m["legendTemplate"] = v - } - if v := f.MinAlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["minAlignmentPeriod"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSets flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSets from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSets(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSets { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSets{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSets - } - r.TimeSeriesQuery = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, m["timeSeriesQuery"], res) - r.PlotType = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum(m["plotType"]) - r.LegendTemplate = dcl.FlattenString(m["legendTemplate"]) - r.MinAlignmentPeriod = dcl.FlattenString(m["minAlignmentPeriod"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuerySlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuerySlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuerySlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, f.TimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilter into timeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilter"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, f.TimeSeriesFilterRatio, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilterRatio into timeSeriesFilterRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilterRatio"] = v - } - if v := f.TimeSeriesQueryLanguage; !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQueryLanguage"] = v - } - if v := f.UnitOverride; !dcl.IsEmptyValueIndirect(v) { - m["unitOverride"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery - } - r.TimeSeriesFilter = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, m["timeSeriesFilter"], res) - r.TimeSeriesFilterRatio = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, m["timeSeriesFilterRatio"], res) - r.TimeSeriesQueryLanguage = dcl.FlattenString(m["timeSeriesQueryLanguage"]) - r.UnitOverride = dcl.FlattenString(m["unitOverride"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, m["aggregation"], res) - r.SecondaryAggregation = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, f.Numerator, res); err != nil { - return nil, fmt.Errorf("error expanding Numerator into numerator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["numerator"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, f.Denominator, res); err != nil { - return nil, fmt.Errorf("error expanding Denominator into denominator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["denominator"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio - } - r.Numerator = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, m["numerator"], res) - r.Denominator = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, m["denominator"], res) - r.SecondaryAggregation = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartThresholdsMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartThresholdsMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartThresholds, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholds { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholds{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholds{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholds) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartThresholds { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartThresholds{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartThresholds{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartThresholds, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartThresholds expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartThresholds into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Value; !dcl.IsEmptyValueIndirect(v) { - m["value"] = v - } - if v := f.Color; !dcl.IsEmptyValueIndirect(v) { - m["color"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholds flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartThresholds from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholds(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartThresholds { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartThresholds{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartThresholds - } - r.Label = dcl.FlattenString(m["label"]) - r.Value = dcl.FlattenDouble(m["value"]) - r.Color = flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum(m["color"]) - r.Direction = flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartXAxisMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartXAxisMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxis, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartXAxisSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartXAxisSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartXAxis, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxis { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxis{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxis{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxis) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartXAxis { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartXAxis{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartXAxis{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartXAxis, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartXAxis expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartXAxis into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartXAxis, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxis flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartXAxis from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxis(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartXAxis { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartXAxis{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartXAxis - } - r.Label = dcl.FlattenString(m["label"]) - r.Scale = flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum(m["scale"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartYAxisMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartYAxisMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxis, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartYAxisSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartYAxisSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartYAxis, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxis { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxis{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxis{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxis) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartYAxis { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartYAxis{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartYAxis{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartYAxis, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartYAxis expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartYAxis into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartYAxis, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Scale; !dcl.IsEmptyValueIndirect(v) { - m["scale"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxis flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartYAxis from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxis(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartYAxis { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartYAxis{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartYAxis - } - r.Label = dcl.FlattenString(m["label"]) - r.Scale = flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum(m["scale"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsMap expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptions, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsSlice expands the contents of DashboardColumnLayoutColumnsWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptions { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartChartOptions, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptions expands an instance of DashboardColumnLayoutColumnsWidgetsXyChartChartOptions into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c *Client, f *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { - m["mode"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptions flattens an instance of DashboardColumnLayoutColumnsWidgetsXyChartChartOptions from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptions(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsXyChartChartOptions - } - r.Mode = flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum(m["mode"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecard into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecard, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecard(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecard into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecard, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecard(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecard from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecard { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecard{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecard{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecard) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecard(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecard from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecard { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecard{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecard{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecard, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecard(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecard expands an instance of DashboardColumnLayoutColumnsWidgetsScorecard into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecard(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecard, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, f.TimeSeriesQuery, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesQuery into timeSeriesQuery: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQuery"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, f.GaugeView, res); err != nil { - return nil, fmt.Errorf("error expanding GaugeView into gaugeView: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gaugeView"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, f.SparkChartView, res); err != nil { - return nil, fmt.Errorf("error expanding SparkChartView into sparkChartView: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["sparkChartView"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice(c, f.Thresholds, res); err != nil { - return nil, fmt.Errorf("error expanding Thresholds into thresholds: %w", err) - } else if v != nil { - m["thresholds"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecard flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecard from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecard(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecard { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecard{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecard - } - r.TimeSeriesQuery = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, m["timeSeriesQuery"], res) - r.GaugeView = flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, m["gaugeView"], res) - r.SparkChartView = flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, m["sparkChartView"], res) - r.Thresholds = flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice(c, m["thresholds"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuerySlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuerySlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuerySlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuerySlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, f.TimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilter into timeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilter"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, f.TimeSeriesFilterRatio, res); err != nil { - return nil, fmt.Errorf("error expanding TimeSeriesFilterRatio into timeSeriesFilterRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesFilterRatio"] = v - } - if v := f.TimeSeriesQueryLanguage; !dcl.IsEmptyValueIndirect(v) { - m["timeSeriesQueryLanguage"] = v - } - if v := f.UnitOverride; !dcl.IsEmptyValueIndirect(v) { - m["unitOverride"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery - } - r.TimeSeriesFilter = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, m["timeSeriesFilter"], res) - r.TimeSeriesFilterRatio = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, m["timeSeriesFilterRatio"], res) - r.TimeSeriesQueryLanguage = dcl.FlattenString(m["timeSeriesQueryLanguage"]) - r.UnitOverride = dcl.FlattenString(m["unitOverride"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, m["aggregation"], res) - r.SecondaryAggregation = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, f.Numerator, res); err != nil { - return nil, fmt.Errorf("error expanding Numerator into numerator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["numerator"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, f.Denominator, res); err != nil { - return nil, fmt.Errorf("error expanding Denominator into denominator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["denominator"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, f.SecondaryAggregation, res); err != nil { - return nil, fmt.Errorf("error expanding SecondaryAggregation into secondaryAggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["secondaryAggregation"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, f.PickTimeSeriesFilter, res); err != nil { - return nil, fmt.Errorf("error expanding PickTimeSeriesFilter into pickTimeSeriesFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pickTimeSeriesFilter"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio - } - r.Numerator = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, m["numerator"], res) - r.Denominator = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, m["denominator"], res) - r.SecondaryAggregation = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, m["secondaryAggregation"], res) - r.PickTimeSeriesFilter = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, m["pickTimeSeriesFilter"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, f.Aggregation, res); err != nil { - return nil, fmt.Errorf("error expanding Aggregation into aggregation: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["aggregation"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator - } - r.Filter = dcl.FlattenString(m["filter"]) - r.Aggregation = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, m["aggregation"], res) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.AlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["alignmentPeriod"] = v - } - if v := f.PerSeriesAligner; !dcl.IsEmptyValueIndirect(v) { - m["perSeriesAligner"] = v - } - if v := f.CrossSeriesReducer; !dcl.IsEmptyValueIndirect(v) { - m["crossSeriesReducer"] = v - } - if v := f.GroupByFields; v != nil { - m["groupByFields"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation - } - r.AlignmentPeriod = dcl.FlattenString(m["alignmentPeriod"]) - r.PerSeriesAligner = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(m["perSeriesAligner"]) - r.CrossSeriesReducer = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(m["crossSeriesReducer"]) - r.GroupByFields = dcl.FlattenStringSlice(m["groupByFields"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RankingMethod; !dcl.IsEmptyValueIndirect(v) { - m["rankingMethod"] = v - } - if v := f.NumTimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["numTimeSeries"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter - } - r.RankingMethod = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(m["rankingMethod"]) - r.NumTimeSeries = dcl.FlattenInteger(m["numTimeSeries"]) - r.Direction = flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardGaugeView, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardGaugeView { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardGaugeView, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeView expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardGaugeView into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.LowerBound; !dcl.IsEmptyValueIndirect(v) { - m["lowerBound"] = v - } - if v := f.UpperBound; !dcl.IsEmptyValueIndirect(v) { - m["upperBound"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeView flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardGaugeView from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardGaugeView(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardGaugeView - } - r.LowerBound = dcl.FlattenDouble(m["lowerBound"]) - r.UpperBound = dcl.FlattenDouble(m["upperBound"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.SparkChartType; !dcl.IsEmptyValueIndirect(v) { - m["sparkChartType"] = v - } - if v := f.MinAlignmentPeriod; !dcl.IsEmptyValueIndirect(v) { - m["minAlignmentPeriod"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardSparkChartView - } - r.SparkChartType = flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum(m["sparkChartType"]) - r.MinAlignmentPeriod = dcl.FlattenString(m["minAlignmentPeriod"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardThresholdsMap expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardThresholdsMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice expands the contents of DashboardColumnLayoutColumnsWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsScorecardThresholds, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholds { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholds{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholds{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholds) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardThresholds { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardThresholds{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardThresholds{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardThresholds, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsScorecardThresholds expands an instance of DashboardColumnLayoutColumnsWidgetsScorecardThresholds into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c *Client, f *DashboardColumnLayoutColumnsWidgetsScorecardThresholds, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Label; !dcl.IsEmptyValueIndirect(v) { - m["label"] = v - } - if v := f.Value; !dcl.IsEmptyValueIndirect(v) { - m["value"] = v - } - if v := f.Color; !dcl.IsEmptyValueIndirect(v) { - m["color"] = v - } - if v := f.Direction; !dcl.IsEmptyValueIndirect(v) { - m["direction"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholds flattens an instance of DashboardColumnLayoutColumnsWidgetsScorecardThresholds from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholds(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsScorecardThresholds { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsScorecardThresholds{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsScorecardThresholds - } - r.Label = dcl.FlattenString(m["label"]) - r.Value = dcl.FlattenDouble(m["value"]) - r.Color = flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum(m["color"]) - r.Direction = flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum(m["direction"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsTextMap expands the contents of DashboardColumnLayoutColumnsWidgetsText into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsTextMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsText, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsText(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsTextSlice expands the contents of DashboardColumnLayoutColumnsWidgetsText into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsTextSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsText, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsText(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsTextMap flattens the contents of DashboardColumnLayoutColumnsWidgetsText from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsTextMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsText { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsText{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsText{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsText) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsText(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsTextSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsText from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsTextSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsText { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsText{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsText{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsText, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsText(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsText expands an instance of DashboardColumnLayoutColumnsWidgetsText into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsText(c *Client, f *DashboardColumnLayoutColumnsWidgetsText, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Content; !dcl.IsEmptyValueIndirect(v) { - m["content"] = v - } - if v := f.Format; !dcl.IsEmptyValueIndirect(v) { - m["format"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsText flattens an instance of DashboardColumnLayoutColumnsWidgetsText from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsText(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsText { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsText{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsText - } - r.Content = dcl.FlattenString(m["content"]) - r.Format = flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnum(m["format"]) - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsBlankMap expands the contents of DashboardColumnLayoutColumnsWidgetsBlank into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsBlankMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsBlank, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsBlank(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsBlankSlice expands the contents of DashboardColumnLayoutColumnsWidgetsBlank into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsBlankSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsBlank, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsBlank(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsBlankMap flattens the contents of DashboardColumnLayoutColumnsWidgetsBlank from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsBlankMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsBlank { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsBlank{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsBlank{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsBlank) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsBlank(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsBlankSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsBlank from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsBlankSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsBlank { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsBlank{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsBlank{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsBlank, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsBlank(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsBlank expands an instance of DashboardColumnLayoutColumnsWidgetsBlank into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsBlank(c *Client, f *DashboardColumnLayoutColumnsWidgetsBlank, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsBlank flattens an instance of DashboardColumnLayoutColumnsWidgetsBlank from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsBlank(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsBlank { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsBlank{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsBlank - } - - return r -} - -// expandDashboardColumnLayoutColumnsWidgetsLogsPanelMap expands the contents of DashboardColumnLayoutColumnsWidgetsLogsPanel into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsLogsPanelMap(c *Client, f map[string]DashboardColumnLayoutColumnsWidgetsLogsPanel, res *Dashboard) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsLogsPanel(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandDashboardColumnLayoutColumnsWidgetsLogsPanelSlice expands the contents of DashboardColumnLayoutColumnsWidgetsLogsPanel into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsLogsPanelSlice(c *Client, f []DashboardColumnLayoutColumnsWidgetsLogsPanel, res *Dashboard) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandDashboardColumnLayoutColumnsWidgetsLogsPanel(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsLogsPanelMap flattens the contents of DashboardColumnLayoutColumnsWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsLogsPanelMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsLogsPanel { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsLogsPanel{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsLogsPanel{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsLogsPanel) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsLogsPanel(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsLogsPanelSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsLogsPanelSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsLogsPanel { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsLogsPanel{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsLogsPanel{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsLogsPanel, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsLogsPanel(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandDashboardColumnLayoutColumnsWidgetsLogsPanel expands an instance of DashboardColumnLayoutColumnsWidgetsLogsPanel into a JSON -// request object. -func expandDashboardColumnLayoutColumnsWidgetsLogsPanel(c *Client, f *DashboardColumnLayoutColumnsWidgetsLogsPanel, res *Dashboard) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - m["filter"] = v - } - if v := f.ResourceNames; v != nil { - m["resourceNames"] = v - } - - return m, nil -} - -// flattenDashboardColumnLayoutColumnsWidgetsLogsPanel flattens an instance of DashboardColumnLayoutColumnsWidgetsLogsPanel from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsLogsPanel(c *Client, i interface{}, res *Dashboard) *DashboardColumnLayoutColumnsWidgetsLogsPanel { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &DashboardColumnLayoutColumnsWidgetsLogsPanel{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyDashboardColumnLayoutColumnsWidgetsLogsPanel - } - r.Filter = dcl.FlattenString(m["filter"]) - r.ResourceNames = dcl.FlattenStringSlice(m["resourceNames"]) - - return r -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartThresholdsColorEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartThresholdsColorEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartThresholdsColorEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartThresholdsColorEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartThresholdsColorEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartThresholdsColorEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartThresholdsColorEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartThresholdsColorEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartThresholdsColorEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartThresholdsColorEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartThresholdsColorEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartThresholdsColorEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartThresholdsColorEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartThresholdsColorEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartXAxisScaleEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartXAxisScaleEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartXAxisScaleEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartXAxisScaleEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartXAxisScaleEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartXAxisScaleEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartXAxisScaleEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartXAxisScaleEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartXAxisScaleEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartXAxisScaleEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartXAxisScaleEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartXAxisScaleEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartXAxisScaleEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartXAxisScaleEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartYAxisScaleEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartYAxisScaleEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartYAxisScaleEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartYAxisScaleEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartYAxisScaleEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartYAxisScaleEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartYAxisScaleEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartYAxisScaleEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartYAxisScaleEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartYAxisScaleEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartYAxisScaleEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartYAxisScaleEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartYAxisScaleEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartYAxisScaleEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnumMap flattens the contents of DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnumSlice flattens the contents of DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum{} - } - - items := make([]DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsXyChartChartOptionsModeEnum(i interface{}) *DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsXyChartChartOptionsModeEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardThresholdsColorEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardThresholdsColorEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardThresholdsColorEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardThresholdsColorEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardThresholdsColorEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardThresholdsColorEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardThresholdsColorEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardThresholdsColorEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardThresholdsColorEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardThresholdsColorEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardThresholdsColorEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardThresholdsColorEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardThresholdsColorEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardThresholdsColorEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnumMap flattens the contents of DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnumSlice flattens the contents of DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum{} - } - - items := make([]DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum(i interface{}) *DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnumRef(s) -} - -// flattenDashboardGridLayoutWidgetsTextFormatEnumMap flattens the contents of DashboardGridLayoutWidgetsTextFormatEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsTextFormatEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardGridLayoutWidgetsTextFormatEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardGridLayoutWidgetsTextFormatEnum{} - } - - if len(a) == 0 { - return map[string]DashboardGridLayoutWidgetsTextFormatEnum{} - } - - items := make(map[string]DashboardGridLayoutWidgetsTextFormatEnum) - for k, item := range a { - items[k] = *flattenDashboardGridLayoutWidgetsTextFormatEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsTextFormatEnumSlice flattens the contents of DashboardGridLayoutWidgetsTextFormatEnum from a JSON -// response object. -func flattenDashboardGridLayoutWidgetsTextFormatEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardGridLayoutWidgetsTextFormatEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardGridLayoutWidgetsTextFormatEnum{} - } - - if len(a) == 0 { - return []DashboardGridLayoutWidgetsTextFormatEnum{} - } - - items := make([]DashboardGridLayoutWidgetsTextFormatEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardGridLayoutWidgetsTextFormatEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardGridLayoutWidgetsTextFormatEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardGridLayoutWidgetsTextFormatEnum with the same value as that string. -func flattenDashboardGridLayoutWidgetsTextFormatEnum(i interface{}) *DashboardGridLayoutWidgetsTextFormatEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardGridLayoutWidgetsTextFormatEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnumRef(s) -} - -// flattenDashboardMosaicLayoutTilesWidgetTextFormatEnumMap flattens the contents of DashboardMosaicLayoutTilesWidgetTextFormatEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetTextFormatEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardMosaicLayoutTilesWidgetTextFormatEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardMosaicLayoutTilesWidgetTextFormatEnum{} - } - - if len(a) == 0 { - return map[string]DashboardMosaicLayoutTilesWidgetTextFormatEnum{} - } - - items := make(map[string]DashboardMosaicLayoutTilesWidgetTextFormatEnum) - for k, item := range a { - items[k] = *flattenDashboardMosaicLayoutTilesWidgetTextFormatEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetTextFormatEnumSlice flattens the contents of DashboardMosaicLayoutTilesWidgetTextFormatEnum from a JSON -// response object. -func flattenDashboardMosaicLayoutTilesWidgetTextFormatEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardMosaicLayoutTilesWidgetTextFormatEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardMosaicLayoutTilesWidgetTextFormatEnum{} - } - - if len(a) == 0 { - return []DashboardMosaicLayoutTilesWidgetTextFormatEnum{} - } - - items := make([]DashboardMosaicLayoutTilesWidgetTextFormatEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardMosaicLayoutTilesWidgetTextFormatEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardMosaicLayoutTilesWidgetTextFormatEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardMosaicLayoutTilesWidgetTextFormatEnum with the same value as that string. -func flattenDashboardMosaicLayoutTilesWidgetTextFormatEnum(i interface{}) *DashboardMosaicLayoutTilesWidgetTextFormatEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardMosaicLayoutTilesWidgetTextFormatEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum(i interface{}) *DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum(i interface{}) *DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnumRef(s) -} - -// flattenDashboardRowLayoutRowsWidgetsTextFormatEnumMap flattens the contents of DashboardRowLayoutRowsWidgetsTextFormatEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsTextFormatEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardRowLayoutRowsWidgetsTextFormatEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardRowLayoutRowsWidgetsTextFormatEnum{} - } - - if len(a) == 0 { - return map[string]DashboardRowLayoutRowsWidgetsTextFormatEnum{} - } - - items := make(map[string]DashboardRowLayoutRowsWidgetsTextFormatEnum) - for k, item := range a { - items[k] = *flattenDashboardRowLayoutRowsWidgetsTextFormatEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsTextFormatEnumSlice flattens the contents of DashboardRowLayoutRowsWidgetsTextFormatEnum from a JSON -// response object. -func flattenDashboardRowLayoutRowsWidgetsTextFormatEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardRowLayoutRowsWidgetsTextFormatEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardRowLayoutRowsWidgetsTextFormatEnum{} - } - - if len(a) == 0 { - return []DashboardRowLayoutRowsWidgetsTextFormatEnum{} - } - - items := make([]DashboardRowLayoutRowsWidgetsTextFormatEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardRowLayoutRowsWidgetsTextFormatEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardRowLayoutRowsWidgetsTextFormatEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardRowLayoutRowsWidgetsTextFormatEnum with the same value as that string. -func flattenDashboardRowLayoutRowsWidgetsTextFormatEnum(i interface{}) *DashboardRowLayoutRowsWidgetsTextFormatEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardRowLayoutRowsWidgetsTextFormatEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnumRef(s) -} - -// flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnumMap flattens the contents of DashboardColumnLayoutColumnsWidgetsTextFormatEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnumMap(c *Client, i interface{}, res *Dashboard) map[string]DashboardColumnLayoutColumnsWidgetsTextFormatEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]DashboardColumnLayoutColumnsWidgetsTextFormatEnum{} - } - - if len(a) == 0 { - return map[string]DashboardColumnLayoutColumnsWidgetsTextFormatEnum{} - } - - items := make(map[string]DashboardColumnLayoutColumnsWidgetsTextFormatEnum) - for k, item := range a { - items[k] = *flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnum(item.(interface{})) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnumSlice flattens the contents of DashboardColumnLayoutColumnsWidgetsTextFormatEnum from a JSON -// response object. -func flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnumSlice(c *Client, i interface{}, res *Dashboard) []DashboardColumnLayoutColumnsWidgetsTextFormatEnum { - a, ok := i.([]interface{}) - if !ok { - return []DashboardColumnLayoutColumnsWidgetsTextFormatEnum{} - } - - if len(a) == 0 { - return []DashboardColumnLayoutColumnsWidgetsTextFormatEnum{} - } - - items := make([]DashboardColumnLayoutColumnsWidgetsTextFormatEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnum(item.(interface{}))) - } - - return items -} - -// flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnum asserts that an interface is a string, and returns a -// pointer to a *DashboardColumnLayoutColumnsWidgetsTextFormatEnum with the same value as that string. -func flattenDashboardColumnLayoutColumnsWidgetsTextFormatEnum(i interface{}) *DashboardColumnLayoutColumnsWidgetsTextFormatEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return DashboardColumnLayoutColumnsWidgetsTextFormatEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *Dashboard) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalDashboard(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type dashboardDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp dashboardApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToDashboardDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]dashboardDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []dashboardDiff - // For each operation name, create a dashboardDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := dashboardDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToDashboardApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToDashboardApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (dashboardApiOperation, error) { - switch opName { - - case "updateDashboardUpdateDashboardOperation": - return &updateDashboardUpdateDashboardOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractDashboardFields(r *Dashboard) error { - vGridLayout := r.GridLayout - if vGridLayout == nil { - // note: explicitly not the empty object. - vGridLayout = &DashboardGridLayout{} - } - if err := extractDashboardGridLayoutFields(r, vGridLayout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGridLayout) { - r.GridLayout = vGridLayout - } - vMosaicLayout := r.MosaicLayout - if vMosaicLayout == nil { - // note: explicitly not the empty object. - vMosaicLayout = &DashboardMosaicLayout{} - } - if err := extractDashboardMosaicLayoutFields(r, vMosaicLayout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMosaicLayout) { - r.MosaicLayout = vMosaicLayout - } - vRowLayout := r.RowLayout - if vRowLayout == nil { - // note: explicitly not the empty object. - vRowLayout = &DashboardRowLayout{} - } - if err := extractDashboardRowLayoutFields(r, vRowLayout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRowLayout) { - r.RowLayout = vRowLayout - } - vColumnLayout := r.ColumnLayout - if vColumnLayout == nil { - // note: explicitly not the empty object. - vColumnLayout = &DashboardColumnLayout{} - } - if err := extractDashboardColumnLayoutFields(r, vColumnLayout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vColumnLayout) { - r.ColumnLayout = vColumnLayout - } - return nil -} -func extractDashboardGridLayoutFields(r *Dashboard, o *DashboardGridLayout) error { - return nil -} -func extractDashboardGridLayoutWidgetsFields(r *Dashboard, o *DashboardGridLayoutWidgets) error { - vXyChart := o.XyChart - if vXyChart == nil { - // note: explicitly not the empty object. - vXyChart = &DashboardGridLayoutWidgetsXyChart{} - } - if err := extractDashboardGridLayoutWidgetsXyChartFields(r, vXyChart); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXyChart) { - o.XyChart = vXyChart - } - vScorecard := o.Scorecard - if vScorecard == nil { - // note: explicitly not the empty object. - vScorecard = &DashboardGridLayoutWidgetsScorecard{} - } - if err := extractDashboardGridLayoutWidgetsScorecardFields(r, vScorecard); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vScorecard) { - o.Scorecard = vScorecard - } - vText := o.Text - if vText == nil { - // note: explicitly not the empty object. - vText = &DashboardGridLayoutWidgetsText{} - } - if err := extractDashboardGridLayoutWidgetsTextFields(r, vText); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vText) { - o.Text = vText - } - vBlank := o.Blank - if vBlank == nil { - // note: explicitly not the empty object. - vBlank = &DashboardGridLayoutWidgetsBlank{} - } - if err := extractDashboardGridLayoutWidgetsBlankFields(r, vBlank); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBlank) { - o.Blank = vBlank - } - vLogsPanel := o.LogsPanel - if vLogsPanel == nil { - // note: explicitly not the empty object. - vLogsPanel = &DashboardGridLayoutWidgetsLogsPanel{} - } - if err := extractDashboardGridLayoutWidgetsLogsPanelFields(r, vLogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLogsPanel) { - o.LogsPanel = vLogsPanel - } - return nil -} -func extractDashboardGridLayoutWidgetsXyChartFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChart) error { - vXAxis := o.XAxis - if vXAxis == nil { - // note: explicitly not the empty object. - vXAxis = &DashboardGridLayoutWidgetsXyChartXAxis{} - } - if err := extractDashboardGridLayoutWidgetsXyChartXAxisFields(r, vXAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXAxis) { - o.XAxis = vXAxis - } - vYAxis := o.YAxis - if vYAxis == nil { - // note: explicitly not the empty object. - vYAxis = &DashboardGridLayoutWidgetsXyChartYAxis{} - } - if err := extractDashboardGridLayoutWidgetsXyChartYAxisFields(r, vYAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYAxis) { - o.YAxis = vYAxis - } - vChartOptions := o.ChartOptions - if vChartOptions == nil { - // note: explicitly not the empty object. - vChartOptions = &DashboardGridLayoutWidgetsXyChartChartOptions{} - } - if err := extractDashboardGridLayoutWidgetsXyChartChartOptionsFields(r, vChartOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vChartOptions) { - o.ChartOptions = vChartOptions - } - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSets) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartThresholdsFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartThresholds) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartXAxisFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartXAxis) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartYAxisFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartYAxis) error { - return nil -} -func extractDashboardGridLayoutWidgetsXyChartChartOptionsFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartChartOptions) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecard) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - vGaugeView := o.GaugeView - if vGaugeView == nil { - // note: explicitly not the empty object. - vGaugeView = &DashboardGridLayoutWidgetsScorecardGaugeView{} - } - if err := extractDashboardGridLayoutWidgetsScorecardGaugeViewFields(r, vGaugeView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGaugeView) { - o.GaugeView = vGaugeView - } - vSparkChartView := o.SparkChartView - if vSparkChartView == nil { - // note: explicitly not the empty object. - vSparkChartView = &DashboardGridLayoutWidgetsScorecardSparkChartView{} - } - if err := extractDashboardGridLayoutWidgetsScorecardSparkChartViewFields(r, vSparkChartView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkChartView) { - o.SparkChartView = vSparkChartView - } - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardGaugeViewFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardGaugeView) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardSparkChartViewFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardSparkChartView) error { - return nil -} -func extractDashboardGridLayoutWidgetsScorecardThresholdsFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardThresholds) error { - return nil -} -func extractDashboardGridLayoutWidgetsTextFields(r *Dashboard, o *DashboardGridLayoutWidgetsText) error { - return nil -} -func extractDashboardGridLayoutWidgetsBlankFields(r *Dashboard, o *DashboardGridLayoutWidgetsBlank) error { - return nil -} -func extractDashboardGridLayoutWidgetsLogsPanelFields(r *Dashboard, o *DashboardGridLayoutWidgetsLogsPanel) error { - return nil -} -func extractDashboardMosaicLayoutFields(r *Dashboard, o *DashboardMosaicLayout) error { - return nil -} -func extractDashboardMosaicLayoutTilesFields(r *Dashboard, o *DashboardMosaicLayoutTiles) error { - vWidget := o.Widget - if vWidget == nil { - // note: explicitly not the empty object. - vWidget = &DashboardMosaicLayoutTilesWidget{} - } - if err := extractDashboardMosaicLayoutTilesWidgetFields(r, vWidget); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vWidget) { - o.Widget = vWidget - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidget) error { - vXyChart := o.XyChart - if vXyChart == nil { - // note: explicitly not the empty object. - vXyChart = &DashboardMosaicLayoutTilesWidgetXyChart{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartFields(r, vXyChart); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXyChart) { - o.XyChart = vXyChart - } - vScorecard := o.Scorecard - if vScorecard == nil { - // note: explicitly not the empty object. - vScorecard = &DashboardMosaicLayoutTilesWidgetScorecard{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardFields(r, vScorecard); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vScorecard) { - o.Scorecard = vScorecard - } - vText := o.Text - if vText == nil { - // note: explicitly not the empty object. - vText = &DashboardMosaicLayoutTilesWidgetText{} - } - if err := extractDashboardMosaicLayoutTilesWidgetTextFields(r, vText); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vText) { - o.Text = vText - } - vBlank := o.Blank - if vBlank == nil { - // note: explicitly not the empty object. - vBlank = &DashboardMosaicLayoutTilesWidgetBlank{} - } - if err := extractDashboardMosaicLayoutTilesWidgetBlankFields(r, vBlank); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBlank) { - o.Blank = vBlank - } - vLogsPanel := o.LogsPanel - if vLogsPanel == nil { - // note: explicitly not the empty object. - vLogsPanel = &DashboardMosaicLayoutTilesWidgetLogsPanel{} - } - if err := extractDashboardMosaicLayoutTilesWidgetLogsPanelFields(r, vLogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLogsPanel) { - o.LogsPanel = vLogsPanel - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChart) error { - vXAxis := o.XAxis - if vXAxis == nil { - // note: explicitly not the empty object. - vXAxis = &DashboardMosaicLayoutTilesWidgetXyChartXAxis{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartXAxisFields(r, vXAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXAxis) { - o.XAxis = vXAxis - } - vYAxis := o.YAxis - if vYAxis == nil { - // note: explicitly not the empty object. - vYAxis = &DashboardMosaicLayoutTilesWidgetXyChartYAxis{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartYAxisFields(r, vYAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYAxis) { - o.YAxis = vYAxis - } - vChartOptions := o.ChartOptions - if vChartOptions == nil { - // note: explicitly not the empty object. - vChartOptions = &DashboardMosaicLayoutTilesWidgetXyChartChartOptions{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartChartOptionsFields(r, vChartOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vChartOptions) { - o.ChartOptions = vChartOptions - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSets) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartThresholdsFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartThresholds) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartXAxisFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartXAxis) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartYAxisFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartYAxis) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetXyChartChartOptionsFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartChartOptions) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecard) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - vGaugeView := o.GaugeView - if vGaugeView == nil { - // note: explicitly not the empty object. - vGaugeView = &DashboardMosaicLayoutTilesWidgetScorecardGaugeView{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardGaugeViewFields(r, vGaugeView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGaugeView) { - o.GaugeView = vGaugeView - } - vSparkChartView := o.SparkChartView - if vSparkChartView == nil { - // note: explicitly not the empty object. - vSparkChartView = &DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewFields(r, vSparkChartView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkChartView) { - o.SparkChartView = vSparkChartView - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardGaugeViewFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardGaugeView) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetScorecardThresholdsFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardThresholds) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetTextFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetText) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetBlankFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetBlank) error { - return nil -} -func extractDashboardMosaicLayoutTilesWidgetLogsPanelFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetLogsPanel) error { - return nil -} -func extractDashboardRowLayoutFields(r *Dashboard, o *DashboardRowLayout) error { - return nil -} -func extractDashboardRowLayoutRowsFields(r *Dashboard, o *DashboardRowLayoutRows) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgets) error { - vXyChart := o.XyChart - if vXyChart == nil { - // note: explicitly not the empty object. - vXyChart = &DashboardRowLayoutRowsWidgetsXyChart{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartFields(r, vXyChart); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXyChart) { - o.XyChart = vXyChart - } - vScorecard := o.Scorecard - if vScorecard == nil { - // note: explicitly not the empty object. - vScorecard = &DashboardRowLayoutRowsWidgetsScorecard{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardFields(r, vScorecard); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vScorecard) { - o.Scorecard = vScorecard - } - vText := o.Text - if vText == nil { - // note: explicitly not the empty object. - vText = &DashboardRowLayoutRowsWidgetsText{} - } - if err := extractDashboardRowLayoutRowsWidgetsTextFields(r, vText); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vText) { - o.Text = vText - } - vBlank := o.Blank - if vBlank == nil { - // note: explicitly not the empty object. - vBlank = &DashboardRowLayoutRowsWidgetsBlank{} - } - if err := extractDashboardRowLayoutRowsWidgetsBlankFields(r, vBlank); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBlank) { - o.Blank = vBlank - } - vLogsPanel := o.LogsPanel - if vLogsPanel == nil { - // note: explicitly not the empty object. - vLogsPanel = &DashboardRowLayoutRowsWidgetsLogsPanel{} - } - if err := extractDashboardRowLayoutRowsWidgetsLogsPanelFields(r, vLogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLogsPanel) { - o.LogsPanel = vLogsPanel - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChart) error { - vXAxis := o.XAxis - if vXAxis == nil { - // note: explicitly not the empty object. - vXAxis = &DashboardRowLayoutRowsWidgetsXyChartXAxis{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartXAxisFields(r, vXAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXAxis) { - o.XAxis = vXAxis - } - vYAxis := o.YAxis - if vYAxis == nil { - // note: explicitly not the empty object. - vYAxis = &DashboardRowLayoutRowsWidgetsXyChartYAxis{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartYAxisFields(r, vYAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYAxis) { - o.YAxis = vYAxis - } - vChartOptions := o.ChartOptions - if vChartOptions == nil { - // note: explicitly not the empty object. - vChartOptions = &DashboardRowLayoutRowsWidgetsXyChartChartOptions{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartChartOptionsFields(r, vChartOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vChartOptions) { - o.ChartOptions = vChartOptions - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSets) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartThresholdsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartThresholds) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartXAxisFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartXAxis) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartYAxisFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartYAxis) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsXyChartChartOptionsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartChartOptions) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecard) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - vGaugeView := o.GaugeView - if vGaugeView == nil { - // note: explicitly not the empty object. - vGaugeView = &DashboardRowLayoutRowsWidgetsScorecardGaugeView{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardGaugeViewFields(r, vGaugeView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGaugeView) { - o.GaugeView = vGaugeView - } - vSparkChartView := o.SparkChartView - if vSparkChartView == nil { - // note: explicitly not the empty object. - vSparkChartView = &DashboardRowLayoutRowsWidgetsScorecardSparkChartView{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardSparkChartViewFields(r, vSparkChartView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkChartView) { - o.SparkChartView = vSparkChartView - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardGaugeViewFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardGaugeView) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardSparkChartViewFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardSparkChartView) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsScorecardThresholdsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardThresholds) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsTextFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsText) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsBlankFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsBlank) error { - return nil -} -func extractDashboardRowLayoutRowsWidgetsLogsPanelFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsLogsPanel) error { - return nil -} -func extractDashboardColumnLayoutFields(r *Dashboard, o *DashboardColumnLayout) error { - return nil -} -func extractDashboardColumnLayoutColumnsFields(r *Dashboard, o *DashboardColumnLayoutColumns) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgets) error { - vXyChart := o.XyChart - if vXyChart == nil { - // note: explicitly not the empty object. - vXyChart = &DashboardColumnLayoutColumnsWidgetsXyChart{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartFields(r, vXyChart); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXyChart) { - o.XyChart = vXyChart - } - vScorecard := o.Scorecard - if vScorecard == nil { - // note: explicitly not the empty object. - vScorecard = &DashboardColumnLayoutColumnsWidgetsScorecard{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardFields(r, vScorecard); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vScorecard) { - o.Scorecard = vScorecard - } - vText := o.Text - if vText == nil { - // note: explicitly not the empty object. - vText = &DashboardColumnLayoutColumnsWidgetsText{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsTextFields(r, vText); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vText) { - o.Text = vText - } - vBlank := o.Blank - if vBlank == nil { - // note: explicitly not the empty object. - vBlank = &DashboardColumnLayoutColumnsWidgetsBlank{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsBlankFields(r, vBlank); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBlank) { - o.Blank = vBlank - } - vLogsPanel := o.LogsPanel - if vLogsPanel == nil { - // note: explicitly not the empty object. - vLogsPanel = &DashboardColumnLayoutColumnsWidgetsLogsPanel{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsLogsPanelFields(r, vLogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLogsPanel) { - o.LogsPanel = vLogsPanel - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChart) error { - vXAxis := o.XAxis - if vXAxis == nil { - // note: explicitly not the empty object. - vXAxis = &DashboardColumnLayoutColumnsWidgetsXyChartXAxis{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartXAxisFields(r, vXAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXAxis) { - o.XAxis = vXAxis - } - vYAxis := o.YAxis - if vYAxis == nil { - // note: explicitly not the empty object. - vYAxis = &DashboardColumnLayoutColumnsWidgetsXyChartYAxis{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartYAxisFields(r, vYAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYAxis) { - o.YAxis = vYAxis - } - vChartOptions := o.ChartOptions - if vChartOptions == nil { - // note: explicitly not the empty object. - vChartOptions = &DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsFields(r, vChartOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vChartOptions) { - o.ChartOptions = vChartOptions - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSets) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartThresholdsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartThresholds) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartXAxisFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartXAxis) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartYAxisFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartYAxis) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecard) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - vGaugeView := o.GaugeView - if vGaugeView == nil { - // note: explicitly not the empty object. - vGaugeView = &DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewFields(r, vGaugeView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGaugeView) { - o.GaugeView = vGaugeView - } - vSparkChartView := o.SparkChartView - if vSparkChartView == nil { - // note: explicitly not the empty object. - vSparkChartView = &DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewFields(r, vSparkChartView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkChartView) { - o.SparkChartView = vSparkChartView - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsScorecardThresholdsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardThresholds) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsTextFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsText) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsBlankFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsBlank) error { - return nil -} -func extractDashboardColumnLayoutColumnsWidgetsLogsPanelFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsLogsPanel) error { - return nil -} - -func postReadExtractDashboardFields(r *Dashboard) error { - vGridLayout := r.GridLayout - if vGridLayout == nil { - // note: explicitly not the empty object. - vGridLayout = &DashboardGridLayout{} - } - if err := postReadExtractDashboardGridLayoutFields(r, vGridLayout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGridLayout) { - r.GridLayout = vGridLayout - } - vMosaicLayout := r.MosaicLayout - if vMosaicLayout == nil { - // note: explicitly not the empty object. - vMosaicLayout = &DashboardMosaicLayout{} - } - if err := postReadExtractDashboardMosaicLayoutFields(r, vMosaicLayout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMosaicLayout) { - r.MosaicLayout = vMosaicLayout - } - vRowLayout := r.RowLayout - if vRowLayout == nil { - // note: explicitly not the empty object. - vRowLayout = &DashboardRowLayout{} - } - if err := postReadExtractDashboardRowLayoutFields(r, vRowLayout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRowLayout) { - r.RowLayout = vRowLayout - } - vColumnLayout := r.ColumnLayout - if vColumnLayout == nil { - // note: explicitly not the empty object. - vColumnLayout = &DashboardColumnLayout{} - } - if err := postReadExtractDashboardColumnLayoutFields(r, vColumnLayout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vColumnLayout) { - r.ColumnLayout = vColumnLayout - } - return nil -} -func postReadExtractDashboardGridLayoutFields(r *Dashboard, o *DashboardGridLayout) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsFields(r *Dashboard, o *DashboardGridLayoutWidgets) error { - vXyChart := o.XyChart - if vXyChart == nil { - // note: explicitly not the empty object. - vXyChart = &DashboardGridLayoutWidgetsXyChart{} - } - if err := extractDashboardGridLayoutWidgetsXyChartFields(r, vXyChart); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXyChart) { - o.XyChart = vXyChart - } - vScorecard := o.Scorecard - if vScorecard == nil { - // note: explicitly not the empty object. - vScorecard = &DashboardGridLayoutWidgetsScorecard{} - } - if err := extractDashboardGridLayoutWidgetsScorecardFields(r, vScorecard); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vScorecard) { - o.Scorecard = vScorecard - } - vText := o.Text - if vText == nil { - // note: explicitly not the empty object. - vText = &DashboardGridLayoutWidgetsText{} - } - if err := extractDashboardGridLayoutWidgetsTextFields(r, vText); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vText) { - o.Text = vText - } - vBlank := o.Blank - if vBlank == nil { - // note: explicitly not the empty object. - vBlank = &DashboardGridLayoutWidgetsBlank{} - } - if err := extractDashboardGridLayoutWidgetsBlankFields(r, vBlank); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBlank) { - o.Blank = vBlank - } - vLogsPanel := o.LogsPanel - if vLogsPanel == nil { - // note: explicitly not the empty object. - vLogsPanel = &DashboardGridLayoutWidgetsLogsPanel{} - } - if err := extractDashboardGridLayoutWidgetsLogsPanelFields(r, vLogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLogsPanel) { - o.LogsPanel = vLogsPanel - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChart) error { - vXAxis := o.XAxis - if vXAxis == nil { - // note: explicitly not the empty object. - vXAxis = &DashboardGridLayoutWidgetsXyChartXAxis{} - } - if err := extractDashboardGridLayoutWidgetsXyChartXAxisFields(r, vXAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXAxis) { - o.XAxis = vXAxis - } - vYAxis := o.YAxis - if vYAxis == nil { - // note: explicitly not the empty object. - vYAxis = &DashboardGridLayoutWidgetsXyChartYAxis{} - } - if err := extractDashboardGridLayoutWidgetsXyChartYAxisFields(r, vYAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYAxis) { - o.YAxis = vYAxis - } - vChartOptions := o.ChartOptions - if vChartOptions == nil { - // note: explicitly not the empty object. - vChartOptions = &DashboardGridLayoutWidgetsXyChartChartOptions{} - } - if err := extractDashboardGridLayoutWidgetsXyChartChartOptionsFields(r, vChartOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vChartOptions) { - o.ChartOptions = vChartOptions - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSets) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartThresholdsFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartThresholds) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartXAxisFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartXAxis) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartYAxisFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartYAxis) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsXyChartChartOptionsFields(r *Dashboard, o *DashboardGridLayoutWidgetsXyChartChartOptions) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecard) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardGridLayoutWidgetsScorecardTimeSeriesQuery{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - vGaugeView := o.GaugeView - if vGaugeView == nil { - // note: explicitly not the empty object. - vGaugeView = &DashboardGridLayoutWidgetsScorecardGaugeView{} - } - if err := extractDashboardGridLayoutWidgetsScorecardGaugeViewFields(r, vGaugeView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGaugeView) { - o.GaugeView = vGaugeView - } - vSparkChartView := o.SparkChartView - if vSparkChartView == nil { - // note: explicitly not the empty object. - vSparkChartView = &DashboardGridLayoutWidgetsScorecardSparkChartView{} - } - if err := extractDashboardGridLayoutWidgetsScorecardSparkChartViewFields(r, vSparkChartView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkChartView) { - o.SparkChartView = vSparkChartView - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardGaugeViewFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardGaugeView) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardSparkChartViewFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardSparkChartView) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsScorecardThresholdsFields(r *Dashboard, o *DashboardGridLayoutWidgetsScorecardThresholds) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsTextFields(r *Dashboard, o *DashboardGridLayoutWidgetsText) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsBlankFields(r *Dashboard, o *DashboardGridLayoutWidgetsBlank) error { - return nil -} -func postReadExtractDashboardGridLayoutWidgetsLogsPanelFields(r *Dashboard, o *DashboardGridLayoutWidgetsLogsPanel) error { - return nil -} -func postReadExtractDashboardMosaicLayoutFields(r *Dashboard, o *DashboardMosaicLayout) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesFields(r *Dashboard, o *DashboardMosaicLayoutTiles) error { - vWidget := o.Widget - if vWidget == nil { - // note: explicitly not the empty object. - vWidget = &DashboardMosaicLayoutTilesWidget{} - } - if err := extractDashboardMosaicLayoutTilesWidgetFields(r, vWidget); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vWidget) { - o.Widget = vWidget - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidget) error { - vXyChart := o.XyChart - if vXyChart == nil { - // note: explicitly not the empty object. - vXyChart = &DashboardMosaicLayoutTilesWidgetXyChart{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartFields(r, vXyChart); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXyChart) { - o.XyChart = vXyChart - } - vScorecard := o.Scorecard - if vScorecard == nil { - // note: explicitly not the empty object. - vScorecard = &DashboardMosaicLayoutTilesWidgetScorecard{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardFields(r, vScorecard); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vScorecard) { - o.Scorecard = vScorecard - } - vText := o.Text - if vText == nil { - // note: explicitly not the empty object. - vText = &DashboardMosaicLayoutTilesWidgetText{} - } - if err := extractDashboardMosaicLayoutTilesWidgetTextFields(r, vText); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vText) { - o.Text = vText - } - vBlank := o.Blank - if vBlank == nil { - // note: explicitly not the empty object. - vBlank = &DashboardMosaicLayoutTilesWidgetBlank{} - } - if err := extractDashboardMosaicLayoutTilesWidgetBlankFields(r, vBlank); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBlank) { - o.Blank = vBlank - } - vLogsPanel := o.LogsPanel - if vLogsPanel == nil { - // note: explicitly not the empty object. - vLogsPanel = &DashboardMosaicLayoutTilesWidgetLogsPanel{} - } - if err := extractDashboardMosaicLayoutTilesWidgetLogsPanelFields(r, vLogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLogsPanel) { - o.LogsPanel = vLogsPanel - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChart) error { - vXAxis := o.XAxis - if vXAxis == nil { - // note: explicitly not the empty object. - vXAxis = &DashboardMosaicLayoutTilesWidgetXyChartXAxis{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartXAxisFields(r, vXAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXAxis) { - o.XAxis = vXAxis - } - vYAxis := o.YAxis - if vYAxis == nil { - // note: explicitly not the empty object. - vYAxis = &DashboardMosaicLayoutTilesWidgetXyChartYAxis{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartYAxisFields(r, vYAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYAxis) { - o.YAxis = vYAxis - } - vChartOptions := o.ChartOptions - if vChartOptions == nil { - // note: explicitly not the empty object. - vChartOptions = &DashboardMosaicLayoutTilesWidgetXyChartChartOptions{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartChartOptionsFields(r, vChartOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vChartOptions) { - o.ChartOptions = vChartOptions - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSets) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartThresholdsFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartThresholds) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartXAxisFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartXAxis) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartYAxisFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartYAxis) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetXyChartChartOptionsFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetXyChartChartOptions) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecard) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - vGaugeView := o.GaugeView - if vGaugeView == nil { - // note: explicitly not the empty object. - vGaugeView = &DashboardMosaicLayoutTilesWidgetScorecardGaugeView{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardGaugeViewFields(r, vGaugeView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGaugeView) { - o.GaugeView = vGaugeView - } - vSparkChartView := o.SparkChartView - if vSparkChartView == nil { - // note: explicitly not the empty object. - vSparkChartView = &DashboardMosaicLayoutTilesWidgetScorecardSparkChartView{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewFields(r, vSparkChartView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkChartView) { - o.SparkChartView = vSparkChartView - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardGaugeViewFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardGaugeView) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardSparkChartViewFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardSparkChartView) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetScorecardThresholdsFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetScorecardThresholds) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetTextFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetText) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetBlankFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetBlank) error { - return nil -} -func postReadExtractDashboardMosaicLayoutTilesWidgetLogsPanelFields(r *Dashboard, o *DashboardMosaicLayoutTilesWidgetLogsPanel) error { - return nil -} -func postReadExtractDashboardRowLayoutFields(r *Dashboard, o *DashboardRowLayout) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsFields(r *Dashboard, o *DashboardRowLayoutRows) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgets) error { - vXyChart := o.XyChart - if vXyChart == nil { - // note: explicitly not the empty object. - vXyChart = &DashboardRowLayoutRowsWidgetsXyChart{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartFields(r, vXyChart); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXyChart) { - o.XyChart = vXyChart - } - vScorecard := o.Scorecard - if vScorecard == nil { - // note: explicitly not the empty object. - vScorecard = &DashboardRowLayoutRowsWidgetsScorecard{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardFields(r, vScorecard); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vScorecard) { - o.Scorecard = vScorecard - } - vText := o.Text - if vText == nil { - // note: explicitly not the empty object. - vText = &DashboardRowLayoutRowsWidgetsText{} - } - if err := extractDashboardRowLayoutRowsWidgetsTextFields(r, vText); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vText) { - o.Text = vText - } - vBlank := o.Blank - if vBlank == nil { - // note: explicitly not the empty object. - vBlank = &DashboardRowLayoutRowsWidgetsBlank{} - } - if err := extractDashboardRowLayoutRowsWidgetsBlankFields(r, vBlank); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBlank) { - o.Blank = vBlank - } - vLogsPanel := o.LogsPanel - if vLogsPanel == nil { - // note: explicitly not the empty object. - vLogsPanel = &DashboardRowLayoutRowsWidgetsLogsPanel{} - } - if err := extractDashboardRowLayoutRowsWidgetsLogsPanelFields(r, vLogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLogsPanel) { - o.LogsPanel = vLogsPanel - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChart) error { - vXAxis := o.XAxis - if vXAxis == nil { - // note: explicitly not the empty object. - vXAxis = &DashboardRowLayoutRowsWidgetsXyChartXAxis{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartXAxisFields(r, vXAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXAxis) { - o.XAxis = vXAxis - } - vYAxis := o.YAxis - if vYAxis == nil { - // note: explicitly not the empty object. - vYAxis = &DashboardRowLayoutRowsWidgetsXyChartYAxis{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartYAxisFields(r, vYAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYAxis) { - o.YAxis = vYAxis - } - vChartOptions := o.ChartOptions - if vChartOptions == nil { - // note: explicitly not the empty object. - vChartOptions = &DashboardRowLayoutRowsWidgetsXyChartChartOptions{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartChartOptionsFields(r, vChartOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vChartOptions) { - o.ChartOptions = vChartOptions - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSets) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartThresholdsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartThresholds) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartXAxisFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartXAxis) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartYAxisFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartYAxis) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsXyChartChartOptionsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsXyChartChartOptions) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecard) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - vGaugeView := o.GaugeView - if vGaugeView == nil { - // note: explicitly not the empty object. - vGaugeView = &DashboardRowLayoutRowsWidgetsScorecardGaugeView{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardGaugeViewFields(r, vGaugeView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGaugeView) { - o.GaugeView = vGaugeView - } - vSparkChartView := o.SparkChartView - if vSparkChartView == nil { - // note: explicitly not the empty object. - vSparkChartView = &DashboardRowLayoutRowsWidgetsScorecardSparkChartView{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardSparkChartViewFields(r, vSparkChartView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkChartView) { - o.SparkChartView = vSparkChartView - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardGaugeViewFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardGaugeView) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardSparkChartViewFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardSparkChartView) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsScorecardThresholdsFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsScorecardThresholds) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsTextFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsText) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsBlankFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsBlank) error { - return nil -} -func postReadExtractDashboardRowLayoutRowsWidgetsLogsPanelFields(r *Dashboard, o *DashboardRowLayoutRowsWidgetsLogsPanel) error { - return nil -} -func postReadExtractDashboardColumnLayoutFields(r *Dashboard, o *DashboardColumnLayout) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsFields(r *Dashboard, o *DashboardColumnLayoutColumns) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgets) error { - vXyChart := o.XyChart - if vXyChart == nil { - // note: explicitly not the empty object. - vXyChart = &DashboardColumnLayoutColumnsWidgetsXyChart{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartFields(r, vXyChart); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXyChart) { - o.XyChart = vXyChart - } - vScorecard := o.Scorecard - if vScorecard == nil { - // note: explicitly not the empty object. - vScorecard = &DashboardColumnLayoutColumnsWidgetsScorecard{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardFields(r, vScorecard); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vScorecard) { - o.Scorecard = vScorecard - } - vText := o.Text - if vText == nil { - // note: explicitly not the empty object. - vText = &DashboardColumnLayoutColumnsWidgetsText{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsTextFields(r, vText); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vText) { - o.Text = vText - } - vBlank := o.Blank - if vBlank == nil { - // note: explicitly not the empty object. - vBlank = &DashboardColumnLayoutColumnsWidgetsBlank{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsBlankFields(r, vBlank); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBlank) { - o.Blank = vBlank - } - vLogsPanel := o.LogsPanel - if vLogsPanel == nil { - // note: explicitly not the empty object. - vLogsPanel = &DashboardColumnLayoutColumnsWidgetsLogsPanel{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsLogsPanelFields(r, vLogsPanel); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLogsPanel) { - o.LogsPanel = vLogsPanel - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChart) error { - vXAxis := o.XAxis - if vXAxis == nil { - // note: explicitly not the empty object. - vXAxis = &DashboardColumnLayoutColumnsWidgetsXyChartXAxis{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartXAxisFields(r, vXAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vXAxis) { - o.XAxis = vXAxis - } - vYAxis := o.YAxis - if vYAxis == nil { - // note: explicitly not the empty object. - vYAxis = &DashboardColumnLayoutColumnsWidgetsXyChartYAxis{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartYAxisFields(r, vYAxis); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYAxis) { - o.YAxis = vYAxis - } - vChartOptions := o.ChartOptions - if vChartOptions == nil { - // note: explicitly not the empty object. - vChartOptions = &DashboardColumnLayoutColumnsWidgetsXyChartChartOptions{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsFields(r, vChartOptions); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vChartOptions) { - o.ChartOptions = vChartOptions - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSets) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartThresholdsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartThresholds) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartXAxisFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartXAxis) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartYAxisFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartYAxis) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsXyChartChartOptionsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsXyChartChartOptions) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecard) error { - vTimeSeriesQuery := o.TimeSeriesQuery - if vTimeSeriesQuery == nil { - // note: explicitly not the empty object. - vTimeSeriesQuery = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryFields(r, vTimeSeriesQuery); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesQuery) { - o.TimeSeriesQuery = vTimeSeriesQuery - } - vGaugeView := o.GaugeView - if vGaugeView == nil { - // note: explicitly not the empty object. - vGaugeView = &DashboardColumnLayoutColumnsWidgetsScorecardGaugeView{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewFields(r, vGaugeView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGaugeView) { - o.GaugeView = vGaugeView - } - vSparkChartView := o.SparkChartView - if vSparkChartView == nil { - // note: explicitly not the empty object. - vSparkChartView = &DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewFields(r, vSparkChartView); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSparkChartView) { - o.SparkChartView = vSparkChartView - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery) error { - vTimeSeriesFilter := o.TimeSeriesFilter - if vTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r, vTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilter) { - o.TimeSeriesFilter = vTimeSeriesFilter - } - vTimeSeriesFilterRatio := o.TimeSeriesFilterRatio - if vTimeSeriesFilterRatio == nil { - // note: explicitly not the empty object. - vTimeSeriesFilterRatio = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r, vTimeSeriesFilterRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTimeSeriesFilterRatio) { - o.TimeSeriesFilterRatio = vTimeSeriesFilterRatio - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio) error { - vNumerator := o.Numerator - if vNumerator == nil { - // note: explicitly not the empty object. - vNumerator = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r, vNumerator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vNumerator) { - o.Numerator = vNumerator - } - vDenominator := o.Denominator - if vDenominator == nil { - // note: explicitly not the empty object. - vDenominator = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r, vDenominator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDenominator) { - o.Denominator = vDenominator - } - vSecondaryAggregation := o.SecondaryAggregation - if vSecondaryAggregation == nil { - // note: explicitly not the empty object. - vSecondaryAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r, vSecondaryAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSecondaryAggregation) { - o.SecondaryAggregation = vSecondaryAggregation - } - vPickTimeSeriesFilter := o.PickTimeSeriesFilter - if vPickTimeSeriesFilter == nil { - // note: explicitly not the empty object. - vPickTimeSeriesFilter = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r, vPickTimeSeriesFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPickTimeSeriesFilter) { - o.PickTimeSeriesFilter = vPickTimeSeriesFilter - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator) error { - vAggregation := o.Aggregation - if vAggregation == nil { - // note: explicitly not the empty object. - vAggregation = &DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation{} - } - if err := extractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r, vAggregation); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAggregation) { - o.Aggregation = vAggregation - } - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardGaugeViewFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardGaugeView) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsScorecardThresholdsFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsScorecardThresholds) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsTextFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsText) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsBlankFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsBlank) error { - return nil -} -func postReadExtractDashboardColumnLayoutColumnsWidgetsLogsPanelFields(r *Dashboard, o *DashboardColumnLayoutColumnsWidgetsLogsPanel) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_schema.go deleted file mode 100644 index 9a77a9d089..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_schema.go +++ /dev/null @@ -1,6081 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLDashboardSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/Dashboard", - Description: "The Monitoring Dashboard resource", - StructName: "Dashboard", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Dashboard", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "dashboard", - Required: true, - Description: "A full instance of a Dashboard", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Dashboard", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "dashboard", - Required: true, - Description: "A full instance of a Dashboard", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Dashboard", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "dashboard", - Required: true, - Description: "A full instance of a Dashboard", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Dashboard", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Dashboard", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Dashboard": &dcl.Component{ - Title: "Dashboard", - ID: "projects/{{project}}/dashboards/{{name}}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "displayName", - "project", - }, - Properties: map[string]*dcl.Property{ - "columnLayout": &dcl.Property{ - Type: "object", - GoName: "ColumnLayout", - GoType: "DashboardColumnLayout", - Description: "The content is divided into equally spaced columns and the widgets are arranged vertically.", - Conflicts: []string{ - "gridLayout", - "mosaicLayout", - "rowLayout", - }, - Properties: map[string]*dcl.Property{ - "columns": &dcl.Property{ - Type: "array", - GoName: "Columns", - Description: "The columns of content to display.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardColumnLayoutColumns", - Properties: map[string]*dcl.Property{ - "weight": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Weight", - Description: "The relative weight of this column. The column weight is used to adjust the width of columns on the screen (relative to peers). Greater the weight, greater the width of the column on the screen. If omitted, a value of 1 is used while rendering.", - }, - "widgets": &dcl.Property{ - Type: "array", - GoName: "Widgets", - Description: "The display widgets arranged vertically in this column.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardColumnLayoutColumnsWidgets", - Properties: map[string]*dcl.Property{ - "blank": &dcl.Property{ - Type: "object", - GoName: "Blank", - GoType: "DashboardColumnLayoutColumnsWidgetsBlank", - Description: "A blank space.", - Conflicts: []string{ - "xyChart", - "scorecard", - "text", - "logsPanel", - }, - Properties: map[string]*dcl.Property{}, - }, - "logsPanel": &dcl.Property{ - Type: "object", - GoName: "LogsPanel", - GoType: "DashboardColumnLayoutColumnsWidgetsLogsPanel", - Conflicts: []string{ - "xyChart", - "scorecard", - "text", - "blank", - }, - Properties: map[string]*dcl.Property{ - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.", - }, - "resourceNames": &dcl.Property{ - Type: "array", - GoName: "ResourceNames", - Description: "The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - }, - }, - }, - }, - }, - }, - "scorecard": &dcl.Property{ - Type: "object", - GoName: "Scorecard", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecard", - Description: "A scorecard summarizing time series data.", - Conflicts: []string{ - "xyChart", - "text", - "blank", - "logsPanel", - }, - Required: []string{ - "timeSeriesQuery", - }, - Properties: map[string]*dcl.Property{ - "gaugeView": &dcl.Property{ - Type: "object", - GoName: "GaugeView", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardGaugeView", - Description: "Will cause the scorecard to show a gauge chart.", - Properties: map[string]*dcl.Property{ - "lowerBound": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "LowerBound", - Description: "The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.", - }, - "upperBound": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "UpperBound", - Description: "The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.", - }, - }, - }, - "sparkChartView": &dcl.Property{ - Type: "object", - GoName: "SparkChartView", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView", - Description: "Will cause the scorecard to show a spark chart.", - Required: []string{ - "sparkChartType", - }, - Properties: map[string]*dcl.Property{ - "minAlignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "MinAlignmentPeriod", - Description: "The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.", - }, - "sparkChartType": &dcl.Property{ - Type: "string", - GoName: "SparkChartType", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum", - Description: "Required. The type of sparkchart to show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED, SPARK_LINE, SPARK_BAR", - Enum: []string{ - "SPARK_CHART_TYPE_UNSPECIFIED", - "SPARK_LINE", - "SPARK_BAR", - }, - }, - }, - }, - "thresholds": &dcl.Property{ - Type: "array", - GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardThresholds", - Properties: map[string]*dcl.Property{ - "color": &dcl.Property{ - Type: "string", - GoName: "Color", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum", - Description: "The state color for this threshold. Color is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, ORANGE, RED", - Enum: []string{ - "COLOR_UNSPECIFIED", - "GREY", - "BLUE", - "GREEN", - "YELLOW", - "ORANGE", - "RED", - }, - }, - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum", - Description: "The direction for the current threshold. Direction is not allowed in a XyChart. Possible values: DIRECTION_UNSPECIFIED, ABOVE, BELOW", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "ABOVE", - "BELOW", - }, - }, - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "A label for the threshold.", - }, - "value": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Value", - Description: "The value of the threshold. The value should be defined in the native scale of the metric.", - }, - }, - }, - }, - "timeSeriesQuery": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesQuery", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery", - Description: "Required. Fields for querying time series data from the Stackdriver metrics API.", - Properties: map[string]*dcl.Property{ - "timeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilter", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", - Description: "Filter parameters to fetch time series.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", - Description: "Apply a second aggregation after `aggregation` is applied.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesFilterRatio": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilterRatio", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", - Description: "Parameters to fetch a ratio between two time series filters.", - Properties: map[string]*dcl.Property{ - "denominator": &dcl.Property{ - Type: "object", - GoName: "Denominator", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", - Description: "The denominator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "numerator": &dcl.Property{ - Type: "object", - GoName: "Numerator", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", - Description: "The numerator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", - Description: "Apply a second aggregation after the ratio is computed.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesQueryLanguage": &dcl.Property{ - Type: "string", - GoName: "TimeSeriesQueryLanguage", - Description: "A query used to fetch time series.", - }, - "unitOverride": &dcl.Property{ - Type: "string", - GoName: "UnitOverride", - Description: "The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.", - }, - }, - }, - }, - }, - "text": &dcl.Property{ - Type: "object", - GoName: "Text", - GoType: "DashboardColumnLayoutColumnsWidgetsText", - Description: "A raw string or markdown displaying textual content.", - Conflicts: []string{ - "xyChart", - "scorecard", - "blank", - "logsPanel", - }, - Properties: map[string]*dcl.Property{ - "content": &dcl.Property{ - Type: "string", - GoName: "Content", - Description: "The text content to be displayed.", - }, - "format": &dcl.Property{ - Type: "string", - GoName: "Format", - GoType: "DashboardColumnLayoutColumnsWidgetsTextFormatEnum", - Description: "How the text content is formatted. Possible values: FORMAT_UNSPECIFIED, MARKDOWN, RAW", - Enum: []string{ - "FORMAT_UNSPECIFIED", - "MARKDOWN", - "RAW", - }, - }, - }, - }, - "title": &dcl.Property{ - Type: "string", - GoName: "Title", - Description: "Optional. The title of the widget.", - }, - "xyChart": &dcl.Property{ - Type: "object", - GoName: "XyChart", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChart", - Description: "A chart of time series data.", - Conflicts: []string{ - "scorecard", - "text", - "blank", - "logsPanel", - }, - Required: []string{ - "dataSets", - }, - Properties: map[string]*dcl.Property{ - "chartOptions": &dcl.Property{ - Type: "object", - GoName: "ChartOptions", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartChartOptions", - Description: "Display options for the chart.", - Properties: map[string]*dcl.Property{ - "mode": &dcl.Property{ - Type: "string", - GoName: "Mode", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum", - Description: "The chart mode. Possible values: MODE_UNSPECIFIED, COLOR, X_RAY, STATS", - Enum: []string{ - "MODE_UNSPECIFIED", - "COLOR", - "X_RAY", - "STATS", - }, - }, - }, - }, - "dataSets": &dcl.Property{ - Type: "array", - GoName: "DataSets", - Description: "Required. The data displayed in this chart.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSets", - Required: []string{ - "timeSeriesQuery", - }, - Properties: map[string]*dcl.Property{ - "legendTemplate": &dcl.Property{ - Type: "string", - GoName: "LegendTemplate", - Description: "A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value. ", - }, - "minAlignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "MinAlignmentPeriod", - Description: "Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.", - }, - "plotType": &dcl.Property{ - Type: "string", - GoName: "PlotType", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum", - Description: "How this data should be plotted on the chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE, STACKED_AREA, STACKED_BAR, HEATMAP", - Enum: []string{ - "PLOT_TYPE_UNSPECIFIED", - "LINE", - "STACKED_AREA", - "STACKED_BAR", - "HEATMAP", - }, - }, - "timeSeriesQuery": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesQuery", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery", - Description: "Required. Fields for querying time series data from the Stackdriver metrics API.", - Properties: map[string]*dcl.Property{ - "timeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilter", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", - Description: "Filter parameters to fetch time series.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", - Description: "Apply a second aggregation after `aggregation` is applied.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesFilterRatio": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilterRatio", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", - Description: "Parameters to fetch a ratio between two time series filters.", - Properties: map[string]*dcl.Property{ - "denominator": &dcl.Property{ - Type: "object", - GoName: "Denominator", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", - Description: "The denominator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "numerator": &dcl.Property{ - Type: "object", - GoName: "Numerator", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", - Description: "The numerator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", - Description: "Apply a second aggregation after the ratio is computed.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesQueryLanguage": &dcl.Property{ - Type: "string", - GoName: "TimeSeriesQueryLanguage", - Description: "A query used to fetch time series.", - }, - "unitOverride": &dcl.Property{ - Type: "string", - GoName: "UnitOverride", - Description: "The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.", - }, - }, - }, - }, - }, - }, - "thresholds": &dcl.Property{ - Type: "array", - GoName: "Thresholds", - Description: "Threshold lines drawn horizontally across the chart.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartThresholds", - Properties: map[string]*dcl.Property{ - "color": &dcl.Property{ - Type: "string", - GoName: "Color", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum", - Description: "The state color for this threshold. Color is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, ORANGE, RED", - Enum: []string{ - "COLOR_UNSPECIFIED", - "GREY", - "BLUE", - "GREEN", - "YELLOW", - "ORANGE", - "RED", - }, - }, - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum", - Description: "The direction for the current threshold. Direction is not allowed in a XyChart. Possible values: DIRECTION_UNSPECIFIED, ABOVE, BELOW", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "ABOVE", - "BELOW", - }, - }, - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "A label for the threshold.", - }, - "value": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Value", - Description: "The value of the threshold. The value should be defined in the native scale of the metric.", - }, - }, - }, - }, - "timeshiftDuration": &dcl.Property{ - Type: "string", - GoName: "TimeshiftDuration", - Description: "The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.", - }, - "xAxis": &dcl.Property{ - Type: "object", - GoName: "XAxis", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartXAxis", - Description: "The properties applied to the X axis.", - Properties: map[string]*dcl.Property{ - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "The label of the axis.", - }, - "scale": &dcl.Property{ - Type: "string", - GoName: "Scale", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum", - Description: "The axis scale. By default, a linear scale is used. Possible values: SCALE_UNSPECIFIED, LINEAR, LOG10", - Enum: []string{ - "SCALE_UNSPECIFIED", - "LINEAR", - "LOG10", - }, - }, - }, - }, - "yAxis": &dcl.Property{ - Type: "object", - GoName: "YAxis", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartYAxis", - Description: "The properties applied to the Y axis.", - Properties: map[string]*dcl.Property{ - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "The label of the axis.", - }, - "scale": &dcl.Property{ - Type: "string", - GoName: "Scale", - GoType: "DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum", - Description: "The axis scale. By default, a linear scale is used. Possible values: SCALE_UNSPECIFIED, LINEAR, LOG10", - Enum: []string{ - "SCALE_UNSPECIFIED", - "LINEAR", - "LOG10", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Required. The mutable, human-readable name.", - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "\\`etag\\` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. An \\`etag\\` is returned in the response to \\`GetDashboard\\`, and users are expected to put that etag in the request to \\`UpdateDashboard\\` to ensure that their change will be applied to the same version of the Dashboard configuration. The field should not be passed during dashboard creation.", - Immutable: true, - }, - "gridLayout": &dcl.Property{ - Type: "object", - GoName: "GridLayout", - GoType: "DashboardGridLayout", - Description: "Content is arranged with a basic layout that re-flows a simple list of informational elements like widgets or tiles.", - Conflicts: []string{ - "mosaicLayout", - "rowLayout", - "columnLayout", - }, - Properties: map[string]*dcl.Property{ - "columns": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Columns", - Description: "The number of columns into which the view's width is divided. If omitted or set to zero, a system default will be used while rendering.", - }, - "widgets": &dcl.Property{ - Type: "array", - GoName: "Widgets", - Description: "The informational elements that are arranged into the columns row-first.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardGridLayoutWidgets", - Properties: map[string]*dcl.Property{ - "blank": &dcl.Property{ - Type: "object", - GoName: "Blank", - GoType: "DashboardGridLayoutWidgetsBlank", - Description: "A blank space.", - Conflicts: []string{ - "xyChart", - "scorecard", - "text", - "logsPanel", - }, - Properties: map[string]*dcl.Property{}, - }, - "logsPanel": &dcl.Property{ - Type: "object", - GoName: "LogsPanel", - GoType: "DashboardGridLayoutWidgetsLogsPanel", - Conflicts: []string{ - "xyChart", - "scorecard", - "text", - "blank", - }, - Properties: map[string]*dcl.Property{ - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.", - }, - "resourceNames": &dcl.Property{ - Type: "array", - GoName: "ResourceNames", - Description: "The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - }, - }, - }, - }, - }, - }, - "scorecard": &dcl.Property{ - Type: "object", - GoName: "Scorecard", - GoType: "DashboardGridLayoutWidgetsScorecard", - Description: "A scorecard summarizing time series data.", - Conflicts: []string{ - "xyChart", - "text", - "blank", - "logsPanel", - }, - Required: []string{ - "timeSeriesQuery", - }, - Properties: map[string]*dcl.Property{ - "gaugeView": &dcl.Property{ - Type: "object", - GoName: "GaugeView", - GoType: "DashboardGridLayoutWidgetsScorecardGaugeView", - Description: "Will cause the scorecard to show a gauge chart.", - Properties: map[string]*dcl.Property{ - "lowerBound": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "LowerBound", - Description: "The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.", - }, - "upperBound": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "UpperBound", - Description: "The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.", - }, - }, - }, - "sparkChartView": &dcl.Property{ - Type: "object", - GoName: "SparkChartView", - GoType: "DashboardGridLayoutWidgetsScorecardSparkChartView", - Description: "Will cause the scorecard to show a spark chart.", - Required: []string{ - "sparkChartType", - }, - Properties: map[string]*dcl.Property{ - "minAlignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "MinAlignmentPeriod", - Description: "The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.", - }, - "sparkChartType": &dcl.Property{ - Type: "string", - GoName: "SparkChartType", - GoType: "DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum", - Description: "Required. The type of sparkchart to show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED, SPARK_LINE, SPARK_BAR", - Enum: []string{ - "SPARK_CHART_TYPE_UNSPECIFIED", - "SPARK_LINE", - "SPARK_BAR", - }, - }, - }, - }, - "thresholds": &dcl.Property{ - Type: "array", - GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardGridLayoutWidgetsScorecardThresholds", - Properties: map[string]*dcl.Property{ - "color": &dcl.Property{ - Type: "string", - GoName: "Color", - GoType: "DashboardGridLayoutWidgetsScorecardThresholdsColorEnum", - Description: "The state color for this threshold. Color is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, ORANGE, RED", - Enum: []string{ - "COLOR_UNSPECIFIED", - "GREY", - "BLUE", - "GREEN", - "YELLOW", - "ORANGE", - "RED", - }, - }, - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum", - Description: "The direction for the current threshold. Direction is not allowed in a XyChart. Possible values: DIRECTION_UNSPECIFIED, ABOVE, BELOW", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "ABOVE", - "BELOW", - }, - }, - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "A label for the threshold.", - }, - "value": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Value", - Description: "The value of the threshold. The value should be defined in the native scale of the metric.", - }, - }, - }, - }, - "timeSeriesQuery": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesQuery", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQuery", - Description: "Required. Fields for querying time series data from the Stackdriver metrics API.", - Properties: map[string]*dcl.Property{ - "timeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilter", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", - Description: "Filter parameters to fetch time series.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", - Description: "Apply a second aggregation after `aggregation` is applied.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesFilterRatio": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilterRatio", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", - Description: "Parameters to fetch a ratio between two time series filters.", - Properties: map[string]*dcl.Property{ - "denominator": &dcl.Property{ - Type: "object", - GoName: "Denominator", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", - Description: "The denominator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "numerator": &dcl.Property{ - Type: "object", - GoName: "Numerator", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", - Description: "The numerator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", - Description: "Apply a second aggregation after the ratio is computed.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesQueryLanguage": &dcl.Property{ - Type: "string", - GoName: "TimeSeriesQueryLanguage", - Description: "A query used to fetch time series.", - }, - "unitOverride": &dcl.Property{ - Type: "string", - GoName: "UnitOverride", - Description: "The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.", - }, - }, - }, - }, - }, - "text": &dcl.Property{ - Type: "object", - GoName: "Text", - GoType: "DashboardGridLayoutWidgetsText", - Description: "A raw string or markdown displaying textual content.", - Conflicts: []string{ - "xyChart", - "scorecard", - "blank", - "logsPanel", - }, - Properties: map[string]*dcl.Property{ - "content": &dcl.Property{ - Type: "string", - GoName: "Content", - Description: "The text content to be displayed.", - }, - "format": &dcl.Property{ - Type: "string", - GoName: "Format", - GoType: "DashboardGridLayoutWidgetsTextFormatEnum", - Description: "How the text content is formatted. Possible values: FORMAT_UNSPECIFIED, MARKDOWN, RAW", - Enum: []string{ - "FORMAT_UNSPECIFIED", - "MARKDOWN", - "RAW", - }, - }, - }, - }, - "title": &dcl.Property{ - Type: "string", - GoName: "Title", - Description: "Optional. The title of the widget.", - }, - "xyChart": &dcl.Property{ - Type: "object", - GoName: "XyChart", - GoType: "DashboardGridLayoutWidgetsXyChart", - Description: "A chart of time series data.", - Conflicts: []string{ - "scorecard", - "text", - "blank", - "logsPanel", - }, - Required: []string{ - "dataSets", - }, - Properties: map[string]*dcl.Property{ - "chartOptions": &dcl.Property{ - Type: "object", - GoName: "ChartOptions", - GoType: "DashboardGridLayoutWidgetsXyChartChartOptions", - Description: "Display options for the chart.", - Properties: map[string]*dcl.Property{ - "mode": &dcl.Property{ - Type: "string", - GoName: "Mode", - GoType: "DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum", - Description: "The chart mode. Possible values: MODE_UNSPECIFIED, COLOR, X_RAY, STATS", - Enum: []string{ - "MODE_UNSPECIFIED", - "COLOR", - "X_RAY", - "STATS", - }, - }, - }, - }, - "dataSets": &dcl.Property{ - Type: "array", - GoName: "DataSets", - Description: "Required. The data displayed in this chart.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardGridLayoutWidgetsXyChartDataSets", - Required: []string{ - "timeSeriesQuery", - }, - Properties: map[string]*dcl.Property{ - "legendTemplate": &dcl.Property{ - Type: "string", - GoName: "LegendTemplate", - Description: "A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value. ", - }, - "minAlignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "MinAlignmentPeriod", - Description: "Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.", - }, - "plotType": &dcl.Property{ - Type: "string", - GoName: "PlotType", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum", - Description: "How this data should be plotted on the chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE, STACKED_AREA, STACKED_BAR, HEATMAP", - Enum: []string{ - "PLOT_TYPE_UNSPECIFIED", - "LINE", - "STACKED_AREA", - "STACKED_BAR", - "HEATMAP", - }, - }, - "timeSeriesQuery": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesQuery", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery", - Description: "Required. Fields for querying time series data from the Stackdriver metrics API.", - Properties: map[string]*dcl.Property{ - "timeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilter", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", - Description: "Filter parameters to fetch time series.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", - Description: "Apply a second aggregation after `aggregation` is applied.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesFilterRatio": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilterRatio", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", - Description: "Parameters to fetch a ratio between two time series filters.", - Properties: map[string]*dcl.Property{ - "denominator": &dcl.Property{ - Type: "object", - GoName: "Denominator", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", - Description: "The denominator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "numerator": &dcl.Property{ - Type: "object", - GoName: "Numerator", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", - Description: "The numerator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", - Description: "Apply a second aggregation after the ratio is computed.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesQueryLanguage": &dcl.Property{ - Type: "string", - GoName: "TimeSeriesQueryLanguage", - Description: "A query used to fetch time series.", - }, - "unitOverride": &dcl.Property{ - Type: "string", - GoName: "UnitOverride", - Description: "The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.", - }, - }, - }, - }, - }, - }, - "thresholds": &dcl.Property{ - Type: "array", - GoName: "Thresholds", - Description: "Threshold lines drawn horizontally across the chart.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardGridLayoutWidgetsXyChartThresholds", - Properties: map[string]*dcl.Property{ - "color": &dcl.Property{ - Type: "string", - GoName: "Color", - GoType: "DashboardGridLayoutWidgetsXyChartThresholdsColorEnum", - Description: "The state color for this threshold. Color is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, ORANGE, RED", - Enum: []string{ - "COLOR_UNSPECIFIED", - "GREY", - "BLUE", - "GREEN", - "YELLOW", - "ORANGE", - "RED", - }, - }, - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum", - Description: "The direction for the current threshold. Direction is not allowed in a XyChart. Possible values: DIRECTION_UNSPECIFIED, ABOVE, BELOW", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "ABOVE", - "BELOW", - }, - }, - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "A label for the threshold.", - }, - "value": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Value", - Description: "The value of the threshold. The value should be defined in the native scale of the metric.", - }, - }, - }, - }, - "timeshiftDuration": &dcl.Property{ - Type: "string", - GoName: "TimeshiftDuration", - Description: "The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.", - }, - "xAxis": &dcl.Property{ - Type: "object", - GoName: "XAxis", - GoType: "DashboardGridLayoutWidgetsXyChartXAxis", - Description: "The properties applied to the X axis.", - Properties: map[string]*dcl.Property{ - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "The label of the axis.", - }, - "scale": &dcl.Property{ - Type: "string", - GoName: "Scale", - GoType: "DashboardGridLayoutWidgetsXyChartXAxisScaleEnum", - Description: "The axis scale. By default, a linear scale is used. Possible values: SCALE_UNSPECIFIED, LINEAR, LOG10", - Enum: []string{ - "SCALE_UNSPECIFIED", - "LINEAR", - "LOG10", - }, - }, - }, - }, - "yAxis": &dcl.Property{ - Type: "object", - GoName: "YAxis", - GoType: "DashboardGridLayoutWidgetsXyChartYAxis", - Description: "The properties applied to the Y axis.", - Properties: map[string]*dcl.Property{ - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "The label of the axis.", - }, - "scale": &dcl.Property{ - Type: "string", - GoName: "Scale", - GoType: "DashboardGridLayoutWidgetsXyChartYAxisScaleEnum", - Description: "The axis scale. By default, a linear scale is used. Possible values: SCALE_UNSPECIFIED, LINEAR, LOG10", - Enum: []string{ - "SCALE_UNSPECIFIED", - "LINEAR", - "LOG10", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "mosaicLayout": &dcl.Property{ - Type: "object", - GoName: "MosaicLayout", - GoType: "DashboardMosaicLayout", - Description: "The content is arranged as a grid of tiles, with each content widget occupying one or more tiles.", - Conflicts: []string{ - "gridLayout", - "rowLayout", - "columnLayout", - }, - Properties: map[string]*dcl.Property{ - "columns": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Columns", - Description: "The number of columns in the mosaic grid.", - }, - "tiles": &dcl.Property{ - Type: "array", - GoName: "Tiles", - Description: "The tiles to display.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardMosaicLayoutTiles", - Properties: map[string]*dcl.Property{ - "height": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Height", - Description: "The height of the tile, measured in grid squares.", - }, - "widget": &dcl.Property{ - Type: "object", - GoName: "Widget", - GoType: "DashboardMosaicLayoutTilesWidget", - Description: "The informational widget contained in the tile.", - Properties: map[string]*dcl.Property{ - "blank": &dcl.Property{ - Type: "object", - GoName: "Blank", - GoType: "DashboardMosaicLayoutTilesWidgetBlank", - Description: "A blank space.", - Conflicts: []string{ - "xyChart", - "scorecard", - "text", - "logsPanel", - }, - Properties: map[string]*dcl.Property{}, - }, - "logsPanel": &dcl.Property{ - Type: "object", - GoName: "LogsPanel", - GoType: "DashboardMosaicLayoutTilesWidgetLogsPanel", - Conflicts: []string{ - "xyChart", - "scorecard", - "text", - "blank", - }, - Properties: map[string]*dcl.Property{ - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.", - }, - "resourceNames": &dcl.Property{ - Type: "array", - GoName: "ResourceNames", - Description: "The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - }, - }, - }, - }, - }, - }, - "scorecard": &dcl.Property{ - Type: "object", - GoName: "Scorecard", - GoType: "DashboardMosaicLayoutTilesWidgetScorecard", - Description: "A scorecard summarizing time series data.", - Conflicts: []string{ - "xyChart", - "text", - "blank", - "logsPanel", - }, - Required: []string{ - "timeSeriesQuery", - }, - Properties: map[string]*dcl.Property{ - "gaugeView": &dcl.Property{ - Type: "object", - GoName: "GaugeView", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardGaugeView", - Description: "Will cause the scorecard to show a gauge chart.", - Properties: map[string]*dcl.Property{ - "lowerBound": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "LowerBound", - Description: "The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.", - }, - "upperBound": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "UpperBound", - Description: "The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.", - }, - }, - }, - "sparkChartView": &dcl.Property{ - Type: "object", - GoName: "SparkChartView", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardSparkChartView", - Description: "Will cause the scorecard to show a spark chart.", - Required: []string{ - "sparkChartType", - }, - Properties: map[string]*dcl.Property{ - "minAlignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "MinAlignmentPeriod", - Description: "The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.", - }, - "sparkChartType": &dcl.Property{ - Type: "string", - GoName: "SparkChartType", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum", - Description: "Required. The type of sparkchart to show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED, SPARK_LINE, SPARK_BAR", - Enum: []string{ - "SPARK_CHART_TYPE_UNSPECIFIED", - "SPARK_LINE", - "SPARK_BAR", - }, - }, - }, - }, - "thresholds": &dcl.Property{ - Type: "array", - GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardThresholds", - Properties: map[string]*dcl.Property{ - "color": &dcl.Property{ - Type: "string", - GoName: "Color", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum", - Description: "The state color for this threshold. Color is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, ORANGE, RED", - Enum: []string{ - "COLOR_UNSPECIFIED", - "GREY", - "BLUE", - "GREEN", - "YELLOW", - "ORANGE", - "RED", - }, - }, - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum", - Description: "The direction for the current threshold. Direction is not allowed in a XyChart. Possible values: DIRECTION_UNSPECIFIED, ABOVE, BELOW", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "ABOVE", - "BELOW", - }, - }, - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "A label for the threshold.", - }, - "value": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Value", - Description: "The value of the threshold. The value should be defined in the native scale of the metric.", - }, - }, - }, - }, - "timeSeriesQuery": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesQuery", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery", - Description: "Required. Fields for querying time series data from the Stackdriver metrics API.", - Properties: map[string]*dcl.Property{ - "timeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilter", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter", - Description: "Filter parameters to fetch time series.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", - Description: "Apply a second aggregation after `aggregation` is applied.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesFilterRatio": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilterRatio", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio", - Description: "Parameters to fetch a ratio between two time series filters.", - Properties: map[string]*dcl.Property{ - "denominator": &dcl.Property{ - Type: "object", - GoName: "Denominator", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", - Description: "The denominator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "numerator": &dcl.Property{ - Type: "object", - GoName: "Numerator", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", - Description: "The numerator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", - Description: "Apply a second aggregation after the ratio is computed.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesQueryLanguage": &dcl.Property{ - Type: "string", - GoName: "TimeSeriesQueryLanguage", - Description: "A query used to fetch time series.", - }, - "unitOverride": &dcl.Property{ - Type: "string", - GoName: "UnitOverride", - Description: "The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.", - }, - }, - }, - }, - }, - "text": &dcl.Property{ - Type: "object", - GoName: "Text", - GoType: "DashboardMosaicLayoutTilesWidgetText", - Description: "A raw string or markdown displaying textual content.", - Conflicts: []string{ - "xyChart", - "scorecard", - "blank", - "logsPanel", - }, - Properties: map[string]*dcl.Property{ - "content": &dcl.Property{ - Type: "string", - GoName: "Content", - Description: "The text content to be displayed.", - }, - "format": &dcl.Property{ - Type: "string", - GoName: "Format", - GoType: "DashboardMosaicLayoutTilesWidgetTextFormatEnum", - Description: "How the text content is formatted. Possible values: FORMAT_UNSPECIFIED, MARKDOWN, RAW", - Enum: []string{ - "FORMAT_UNSPECIFIED", - "MARKDOWN", - "RAW", - }, - }, - }, - }, - "title": &dcl.Property{ - Type: "string", - GoName: "Title", - Description: "Optional. The title of the widget.", - }, - "xyChart": &dcl.Property{ - Type: "object", - GoName: "XyChart", - GoType: "DashboardMosaicLayoutTilesWidgetXyChart", - Description: "A chart of time series data.", - Conflicts: []string{ - "scorecard", - "text", - "blank", - "logsPanel", - }, - Required: []string{ - "dataSets", - }, - Properties: map[string]*dcl.Property{ - "chartOptions": &dcl.Property{ - Type: "object", - GoName: "ChartOptions", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartChartOptions", - Description: "Display options for the chart.", - Properties: map[string]*dcl.Property{ - "mode": &dcl.Property{ - Type: "string", - GoName: "Mode", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum", - Description: "The chart mode. Possible values: MODE_UNSPECIFIED, COLOR, X_RAY, STATS", - Enum: []string{ - "MODE_UNSPECIFIED", - "COLOR", - "X_RAY", - "STATS", - }, - }, - }, - }, - "dataSets": &dcl.Property{ - Type: "array", - GoName: "DataSets", - Description: "Required. The data displayed in this chart.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSets", - Required: []string{ - "timeSeriesQuery", - }, - Properties: map[string]*dcl.Property{ - "legendTemplate": &dcl.Property{ - Type: "string", - GoName: "LegendTemplate", - Description: "A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value. ", - }, - "minAlignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "MinAlignmentPeriod", - Description: "Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.", - }, - "plotType": &dcl.Property{ - Type: "string", - GoName: "PlotType", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum", - Description: "How this data should be plotted on the chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE, STACKED_AREA, STACKED_BAR, HEATMAP", - Enum: []string{ - "PLOT_TYPE_UNSPECIFIED", - "LINE", - "STACKED_AREA", - "STACKED_BAR", - "HEATMAP", - }, - }, - "timeSeriesQuery": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesQuery", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery", - Description: "Required. Fields for querying time series data from the Stackdriver metrics API.", - Properties: map[string]*dcl.Property{ - "timeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilter", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", - Description: "Filter parameters to fetch time series.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", - Description: "Apply a second aggregation after `aggregation` is applied.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesFilterRatio": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilterRatio", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", - Description: "Parameters to fetch a ratio between two time series filters.", - Properties: map[string]*dcl.Property{ - "denominator": &dcl.Property{ - Type: "object", - GoName: "Denominator", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", - Description: "The denominator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "numerator": &dcl.Property{ - Type: "object", - GoName: "Numerator", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", - Description: "The numerator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", - Description: "Apply a second aggregation after the ratio is computed.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesQueryLanguage": &dcl.Property{ - Type: "string", - GoName: "TimeSeriesQueryLanguage", - Description: "A query used to fetch time series.", - }, - "unitOverride": &dcl.Property{ - Type: "string", - GoName: "UnitOverride", - Description: "The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.", - }, - }, - }, - }, - }, - }, - "thresholds": &dcl.Property{ - Type: "array", - GoName: "Thresholds", - Description: "Threshold lines drawn horizontally across the chart.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartThresholds", - Properties: map[string]*dcl.Property{ - "color": &dcl.Property{ - Type: "string", - GoName: "Color", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum", - Description: "The state color for this threshold. Color is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, ORANGE, RED", - Enum: []string{ - "COLOR_UNSPECIFIED", - "GREY", - "BLUE", - "GREEN", - "YELLOW", - "ORANGE", - "RED", - }, - }, - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum", - Description: "The direction for the current threshold. Direction is not allowed in a XyChart. Possible values: DIRECTION_UNSPECIFIED, ABOVE, BELOW", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "ABOVE", - "BELOW", - }, - }, - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "A label for the threshold.", - }, - "value": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Value", - Description: "The value of the threshold. The value should be defined in the native scale of the metric.", - }, - }, - }, - }, - "timeshiftDuration": &dcl.Property{ - Type: "string", - GoName: "TimeshiftDuration", - Description: "The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.", - }, - "xAxis": &dcl.Property{ - Type: "object", - GoName: "XAxis", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartXAxis", - Description: "The properties applied to the X axis.", - Properties: map[string]*dcl.Property{ - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "The label of the axis.", - }, - "scale": &dcl.Property{ - Type: "string", - GoName: "Scale", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum", - Description: "The axis scale. By default, a linear scale is used. Possible values: SCALE_UNSPECIFIED, LINEAR, LOG10", - Enum: []string{ - "SCALE_UNSPECIFIED", - "LINEAR", - "LOG10", - }, - }, - }, - }, - "yAxis": &dcl.Property{ - Type: "object", - GoName: "YAxis", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartYAxis", - Description: "The properties applied to the Y axis.", - Properties: map[string]*dcl.Property{ - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "The label of the axis.", - }, - "scale": &dcl.Property{ - Type: "string", - GoName: "Scale", - GoType: "DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum", - Description: "The axis scale. By default, a linear scale is used. Possible values: SCALE_UNSPECIFIED, LINEAR, LOG10", - Enum: []string{ - "SCALE_UNSPECIFIED", - "LINEAR", - "LOG10", - }, - }, - }, - }, - }, - }, - }, - }, - "width": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Width", - Description: "The width of the tile, measured in grid squares.", - }, - "xPos": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "XPos", - Description: "The zero-indexed position of the tile in grid squares relative to the left edge of the grid.", - }, - "yPos": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "YPos", - Description: "The zero-indexed position of the tile in grid squares relative to the top edge of the grid.", - }, - }, - }, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Immutable. The resource name of the dashboard.", - Immutable: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project id of the resource.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "rowLayout": &dcl.Property{ - Type: "object", - GoName: "RowLayout", - GoType: "DashboardRowLayout", - Description: "The content is divided into equally spaced rows and the widgets are arranged horizontally.", - Conflicts: []string{ - "gridLayout", - "mosaicLayout", - "columnLayout", - }, - Properties: map[string]*dcl.Property{ - "rows": &dcl.Property{ - Type: "array", - GoName: "Rows", - Description: "The rows of content to display.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardRowLayoutRows", - Properties: map[string]*dcl.Property{ - "weight": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Weight", - Description: "The relative weight of this row. The row weight is used to adjust the height of rows on the screen (relative to peers). Greater the weight, greater the height of the row on the screen. If omitted, a value of 1 is used while rendering.", - }, - "widgets": &dcl.Property{ - Type: "array", - GoName: "Widgets", - Description: "The display widgets arranged horizontally in this row.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardRowLayoutRowsWidgets", - Properties: map[string]*dcl.Property{ - "blank": &dcl.Property{ - Type: "object", - GoName: "Blank", - GoType: "DashboardRowLayoutRowsWidgetsBlank", - Description: "A blank space.", - Conflicts: []string{ - "xyChart", - "scorecard", - "text", - "logsPanel", - }, - Properties: map[string]*dcl.Property{}, - }, - "logsPanel": &dcl.Property{ - Type: "object", - GoName: "LogsPanel", - GoType: "DashboardRowLayoutRowsWidgetsLogsPanel", - Conflicts: []string{ - "xyChart", - "scorecard", - "text", - "blank", - }, - Properties: map[string]*dcl.Property{ - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "A filter that chooses which log entries to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). Only log entries that match the filter are returned. An empty filter matches all log entries.", - }, - "resourceNames": &dcl.Property{ - Type: "array", - GoName: "ResourceNames", - Description: "The names of logging resources to collect logs for. Currently only projects are supported. If empty, the widget will default to the host project.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - }, - }, - }, - }, - }, - }, - "scorecard": &dcl.Property{ - Type: "object", - GoName: "Scorecard", - GoType: "DashboardRowLayoutRowsWidgetsScorecard", - Description: "A scorecard summarizing time series data.", - Conflicts: []string{ - "xyChart", - "text", - "blank", - "logsPanel", - }, - Required: []string{ - "timeSeriesQuery", - }, - Properties: map[string]*dcl.Property{ - "gaugeView": &dcl.Property{ - Type: "object", - GoName: "GaugeView", - GoType: "DashboardRowLayoutRowsWidgetsScorecardGaugeView", - Description: "Will cause the scorecard to show a gauge chart.", - Properties: map[string]*dcl.Property{ - "lowerBound": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "LowerBound", - Description: "The lower bound for this gauge chart. The value of the chart should always be greater than or equal to this.", - }, - "upperBound": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "UpperBound", - Description: "The upper bound for this gauge chart. The value of the chart should always be less than or equal to this.", - }, - }, - }, - "sparkChartView": &dcl.Property{ - Type: "object", - GoName: "SparkChartView", - GoType: "DashboardRowLayoutRowsWidgetsScorecardSparkChartView", - Description: "Will cause the scorecard to show a spark chart.", - Required: []string{ - "sparkChartType", - }, - Properties: map[string]*dcl.Property{ - "minAlignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "MinAlignmentPeriod", - Description: "The lower bound on data point frequency in the chart implemented by specifying the minimum alignment period to use in a time series query. For example, if the data is published once every 10 minutes it would not make sense to fetch and align data at one minute intervals. This field is optional and exists only as a hint.", - }, - "sparkChartType": &dcl.Property{ - Type: "string", - GoName: "SparkChartType", - GoType: "DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum", - Description: "Required. The type of sparkchart to show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED, SPARK_LINE, SPARK_BAR", - Enum: []string{ - "SPARK_CHART_TYPE_UNSPECIFIED", - "SPARK_LINE", - "SPARK_BAR", - }, - }, - }, - }, - "thresholds": &dcl.Property{ - Type: "array", - GoName: "Thresholds", - Description: "The thresholds used to determine the state of the scorecard given the time series' current value. For an actual value x, the scorecard is in a danger state if x is less than or equal to a danger threshold that triggers below, or greater than or equal to a danger threshold that triggers above. Similarly, if x is above/below a warning threshold that triggers above/below, then the scorecard is in a warning state - unless x also puts it in a danger state. (Danger trumps warning.) As an example, consider a scorecard with the following four thresholds: { value: 90, category: 'DANGER', trigger: 'ABOVE', },: { value: 70, category: 'WARNING', trigger: 'ABOVE', }, { value: 10, category: 'DANGER', trigger: 'BELOW', }, { value: 20, category: 'WARNING', trigger: 'BELOW', } Then: values less than or equal to 10 would put the scorecard in a DANGER state, values greater than 10 but less than or equal to 20 a WARNING state, values strictly between 20 and 70 an OK state, values greater than or equal to 70 but less than 90 a WARNING state, and values greater than or equal to 90 a DANGER state.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardRowLayoutRowsWidgetsScorecardThresholds", - Properties: map[string]*dcl.Property{ - "color": &dcl.Property{ - Type: "string", - GoName: "Color", - GoType: "DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum", - Description: "The state color for this threshold. Color is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, ORANGE, RED", - Enum: []string{ - "COLOR_UNSPECIFIED", - "GREY", - "BLUE", - "GREEN", - "YELLOW", - "ORANGE", - "RED", - }, - }, - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum", - Description: "The direction for the current threshold. Direction is not allowed in a XyChart. Possible values: DIRECTION_UNSPECIFIED, ABOVE, BELOW", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "ABOVE", - "BELOW", - }, - }, - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "A label for the threshold.", - }, - "value": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Value", - Description: "The value of the threshold. The value should be defined in the native scale of the metric.", - }, - }, - }, - }, - "timeSeriesQuery": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesQuery", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery", - Description: "Required. Fields for querying time series data from the Stackdriver metrics API.", - Properties: map[string]*dcl.Property{ - "timeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilter", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter", - Description: "Filter parameters to fetch time series.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", - Description: "Apply a second aggregation after `aggregation` is applied.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesFilterRatio": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilterRatio", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio", - Description: "Parameters to fetch a ratio between two time series filters.", - Properties: map[string]*dcl.Property{ - "denominator": &dcl.Property{ - Type: "object", - GoName: "Denominator", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator", - Description: "The denominator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "numerator": &dcl.Property{ - Type: "object", - GoName: "Numerator", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator", - Description: "The numerator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", - Description: "Apply a second aggregation after the ratio is computed.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesQueryLanguage": &dcl.Property{ - Type: "string", - GoName: "TimeSeriesQueryLanguage", - Description: "A query used to fetch time series.", - }, - "unitOverride": &dcl.Property{ - Type: "string", - GoName: "UnitOverride", - Description: "The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.", - }, - }, - }, - }, - }, - "text": &dcl.Property{ - Type: "object", - GoName: "Text", - GoType: "DashboardRowLayoutRowsWidgetsText", - Description: "A raw string or markdown displaying textual content.", - Conflicts: []string{ - "xyChart", - "scorecard", - "blank", - "logsPanel", - }, - Properties: map[string]*dcl.Property{ - "content": &dcl.Property{ - Type: "string", - GoName: "Content", - Description: "The text content to be displayed.", - }, - "format": &dcl.Property{ - Type: "string", - GoName: "Format", - GoType: "DashboardRowLayoutRowsWidgetsTextFormatEnum", - Description: "How the text content is formatted. Possible values: FORMAT_UNSPECIFIED, MARKDOWN, RAW", - Enum: []string{ - "FORMAT_UNSPECIFIED", - "MARKDOWN", - "RAW", - }, - }, - }, - }, - "title": &dcl.Property{ - Type: "string", - GoName: "Title", - Description: "Optional. The title of the widget.", - }, - "xyChart": &dcl.Property{ - Type: "object", - GoName: "XyChart", - GoType: "DashboardRowLayoutRowsWidgetsXyChart", - Description: "A chart of time series data.", - Conflicts: []string{ - "scorecard", - "text", - "blank", - "logsPanel", - }, - Required: []string{ - "dataSets", - }, - Properties: map[string]*dcl.Property{ - "chartOptions": &dcl.Property{ - Type: "object", - GoName: "ChartOptions", - GoType: "DashboardRowLayoutRowsWidgetsXyChartChartOptions", - Description: "Display options for the chart.", - Properties: map[string]*dcl.Property{ - "mode": &dcl.Property{ - Type: "string", - GoName: "Mode", - GoType: "DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum", - Description: "The chart mode. Possible values: MODE_UNSPECIFIED, COLOR, X_RAY, STATS", - Enum: []string{ - "MODE_UNSPECIFIED", - "COLOR", - "X_RAY", - "STATS", - }, - }, - }, - }, - "dataSets": &dcl.Property{ - Type: "array", - GoName: "DataSets", - Description: "Required. The data displayed in this chart.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSets", - Required: []string{ - "timeSeriesQuery", - }, - Properties: map[string]*dcl.Property{ - "legendTemplate": &dcl.Property{ - Type: "string", - GoName: "LegendTemplate", - Description: "A template string for naming `TimeSeries` in the resulting data set. This should be a string with interpolations of the form `${label_name}`, which will resolve to the label's value. ", - }, - "minAlignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "MinAlignmentPeriod", - Description: "Optional. The lower bound on data point frequency for this data set, implemented by specifying the minimum alignment period to use in a time series query For example, if the data is published once every 10 minutes, the `min_alignment_period` should be at least 10 minutes. It would not make sense to fetch and align data at one minute intervals.", - }, - "plotType": &dcl.Property{ - Type: "string", - GoName: "PlotType", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum", - Description: "How this data should be plotted on the chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE, STACKED_AREA, STACKED_BAR, HEATMAP", - Enum: []string{ - "PLOT_TYPE_UNSPECIFIED", - "LINE", - "STACKED_AREA", - "STACKED_BAR", - "HEATMAP", - }, - }, - "timeSeriesQuery": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesQuery", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery", - Description: "Required. Fields for querying time series data from the Stackdriver metrics API.", - Properties: map[string]*dcl.Property{ - "timeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilter", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter", - Description: "Filter parameters to fetch time series.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation", - Description: "Apply a second aggregation after `aggregation` is applied.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesFilterRatio": &dcl.Property{ - Type: "object", - GoName: "TimeSeriesFilterRatio", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio", - Description: "Parameters to fetch a ratio between two time series filters.", - Properties: map[string]*dcl.Property{ - "denominator": &dcl.Property{ - Type: "object", - GoName: "Denominator", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator", - Description: "The denominator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "numerator": &dcl.Property{ - Type: "object", - GoName: "Numerator", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator", - Description: "The numerator of the ratio.", - Required: []string{ - "filter", - }, - Properties: map[string]*dcl.Property{ - "aggregation": &dcl.Property{ - Type: "object", - GoName: "Aggregation", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation", - Description: "By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) that identifies the metric types, resources, and projects to query.", - }, - }, - }, - "pickTimeSeriesFilter": &dcl.Property{ - Type: "object", - GoName: "PickTimeSeriesFilter", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter", - Description: "Ranking based time series filter.", - Properties: map[string]*dcl.Property{ - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum", - Description: "How to use the ranking to select time series that pass through the filter. Possible values: DIRECTION_UNSPECIFIED, TOP, BOTTOM", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "TOP", - "BOTTOM", - }, - }, - "numTimeSeries": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "NumTimeSeries", - Description: "How many time series to allow to pass through the filter.", - }, - "rankingMethod": &dcl.Property{ - Type: "string", - GoName: "RankingMethod", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum", - Description: "`ranking_method` is applied to each time series independently to produce the value which will be used to compare the time series to other time series. Possible values: METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST", - Enum: []string{ - "METHOD_UNSPECIFIED", - "METHOD_MEAN", - "METHOD_MAX", - "METHOD_MIN", - "METHOD_SUM", - "METHOD_LATEST", - }, - }, - }, - }, - "secondaryAggregation": &dcl.Property{ - Type: "object", - GoName: "SecondaryAggregation", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation", - Description: "Apply a second aggregation after the ratio is computed.", - Properties: map[string]*dcl.Property{ - "alignmentPeriod": &dcl.Property{ - Type: "string", - GoName: "AlignmentPeriod", - Description: "The `alignment_period` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than `ALIGN_NONE` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner `ALIGN_NONE` is specified, then this field is ignored.", - }, - "crossSeriesReducer": &dcl.Property{ - Type: "string", - GoName: "CrossSeriesReducer", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum", - Description: "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the `metric_kind` and the `value_type` of the original time series. Reduction can yield a time series with a different `metric_kind` or `value_type` than the input time series. Time series data must first be aligned (see `per_series_aligner`) in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified, and must not be `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an error is returned. Possible values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION", - Enum: []string{ - "REDUCE_NONE", - "REDUCE_MEAN", - "REDUCE_MIN", - "REDUCE_MAX", - "REDUCE_SUM", - "REDUCE_STDDEV", - "REDUCE_COUNT", - "REDUCE_COUNT_TRUE", - "REDUCE_COUNT_FALSE", - "REDUCE_FRACTION_TRUE", - "REDUCE_PERCENTILE_99", - "REDUCE_PERCENTILE_95", - "REDUCE_PERCENTILE_50", - "REDUCE_PERCENTILE_05", - "REDUCE_FRACTION_LESS_THAN", - "REDUCE_MAKE_DISTRIBUTION", - }, - }, - "groupByFields": &dcl.Property{ - Type: "array", - GoName: "GroupByFields", - Description: "The set of fields to preserve when `cross_series_reducer` is specified. The `group_by_fields` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The `cross_series_reducer` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains `resource.type`. Fields not specified in `group_by_fields` are aggregated away. If `group_by_fields` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If `cross_series_reducer` is not defined, this field is ignored.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "perSeriesAligner": &dcl.Property{ - Type: "string", - GoName: "PerSeriesAligner", - GoType: "DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum", - Description: "An `Aligner` describes how to bring the data points in a single time series into temporal alignment. Except for `ALIGN_NONE`, all alignments cause all the data points in an `alignment_period` to be mathematically grouped together, resulting in a single data point for each `alignment_period` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the `metric_kind` and `value_type` of the original time series. Alignment can change the `metric_kind` or the `value_type` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If `cross_series_reducer` is specified, then `per_series_aligner` must be specified and not equal to `ALIGN_NONE` and `alignment_period` must be specified; otherwise, an error is returned.", - Enum: []string{ - "ALIGN_NONE", - "ALIGN_DELTA", - "ALIGN_RATE", - "ALIGN_INTERPOLATE", - "ALIGN_NEXT_OLDER", - "ALIGN_MIN", - "ALIGN_MAX", - "ALIGN_MEAN", - "ALIGN_COUNT", - "ALIGN_SUM", - "ALIGN_STDDEV", - "ALIGN_COUNT_TRUE", - "ALIGN_COUNT_FALSE", - "ALIGN_FRACTION_TRUE", - "ALIGN_PERCENTILE_99", - "ALIGN_PERCENTILE_95", - "ALIGN_PERCENTILE_50", - "ALIGN_PERCENTILE_05", - "ALIGN_MAKE_DISTRIBUTION", - "ALIGN_PERCENT_CHANGE", - }, - }, - }, - }, - }, - }, - "timeSeriesQueryLanguage": &dcl.Property{ - Type: "string", - GoName: "TimeSeriesQueryLanguage", - Description: "A query used to fetch time series.", - }, - "unitOverride": &dcl.Property{ - Type: "string", - GoName: "UnitOverride", - Description: "The unit of data contained in fetched time series. If non-empty, this unit will override any unit that accompanies fetched data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors) field in `MetricDescriptor`.", - }, - }, - }, - }, - }, - }, - "thresholds": &dcl.Property{ - Type: "array", - GoName: "Thresholds", - Description: "Threshold lines drawn horizontally across the chart.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "DashboardRowLayoutRowsWidgetsXyChartThresholds", - Properties: map[string]*dcl.Property{ - "color": &dcl.Property{ - Type: "string", - GoName: "Color", - GoType: "DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum", - Description: "The state color for this threshold. Color is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW, ORANGE, RED", - Enum: []string{ - "COLOR_UNSPECIFIED", - "GREY", - "BLUE", - "GREEN", - "YELLOW", - "ORANGE", - "RED", - }, - }, - "direction": &dcl.Property{ - Type: "string", - GoName: "Direction", - GoType: "DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum", - Description: "The direction for the current threshold. Direction is not allowed in a XyChart. Possible values: DIRECTION_UNSPECIFIED, ABOVE, BELOW", - Enum: []string{ - "DIRECTION_UNSPECIFIED", - "ABOVE", - "BELOW", - }, - }, - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "A label for the threshold.", - }, - "value": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Value", - Description: "The value of the threshold. The value should be defined in the native scale of the metric.", - }, - }, - }, - }, - "timeshiftDuration": &dcl.Property{ - Type: "string", - GoName: "TimeshiftDuration", - Description: "The duration used to display a comparison chart. A comparison chart simultaneously shows values from two similar-length time periods (e.g., week-over-week metrics). The duration must be positive, and it can only be applied to charts with data sets of LINE plot type.", - }, - "xAxis": &dcl.Property{ - Type: "object", - GoName: "XAxis", - GoType: "DashboardRowLayoutRowsWidgetsXyChartXAxis", - Description: "The properties applied to the X axis.", - Properties: map[string]*dcl.Property{ - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "The label of the axis.", - }, - "scale": &dcl.Property{ - Type: "string", - GoName: "Scale", - GoType: "DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum", - Description: "The axis scale. By default, a linear scale is used. Possible values: SCALE_UNSPECIFIED, LINEAR, LOG10", - Enum: []string{ - "SCALE_UNSPECIFIED", - "LINEAR", - "LOG10", - }, - }, - }, - }, - "yAxis": &dcl.Property{ - Type: "object", - GoName: "YAxis", - GoType: "DashboardRowLayoutRowsWidgetsXyChartYAxis", - Description: "The properties applied to the Y axis.", - Properties: map[string]*dcl.Property{ - "label": &dcl.Property{ - Type: "string", - GoName: "Label", - Description: "The label of the axis.", - }, - "scale": &dcl.Property{ - Type: "string", - GoName: "Scale", - GoType: "DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum", - Description: "The axis scale. By default, a linear scale is used. Possible values: SCALE_UNSPECIFIED, LINEAR, LOG10", - Enum: []string{ - "SCALE_UNSPECIFIED", - "LINEAR", - "LOG10", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_yaml_embed.go deleted file mode 100644 index e25366d4e3..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/dashboard_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_dashboard blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/dashboard.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/dashboard.yaml -var YAML_dashboard = []byte("info:\n title: Monitoring/Dashboard\n description: The Monitoring Dashboard resource\n x-dcl-struct-name: Dashboard\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Dashboard\n parameters:\n - name: dashboard\n required: true\n description: A full instance of a Dashboard\n apply:\n description: The function used to apply information about a Dashboard\n parameters:\n - name: dashboard\n required: true\n description: A full instance of a Dashboard\n delete:\n description: The function used to delete a Dashboard\n parameters:\n - name: dashboard\n required: true\n description: A full instance of a Dashboard\n deleteAll:\n description: The function used to delete all Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Dashboard\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Dashboard:\n title: Dashboard\n x-dcl-id: projects/{{project}}/dashboards/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - displayName\n - project\n properties:\n columnLayout:\n type: object\n x-dcl-go-name: ColumnLayout\n x-dcl-go-type: DashboardColumnLayout\n description: The content is divided into equally spaced columns and the\n widgets are arranged vertically.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - rowLayout\n properties:\n columns:\n type: array\n x-dcl-go-name: Columns\n description: The columns of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumns\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this column. The column weight\n is used to adjust the width of columns on the screen (relative\n to peers). Greater the weight, greater the width of the column\n on the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged vertically in this column.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds: { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardColumnLayoutColumnsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Required. The mutable, human-readable name.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: \\`etag\\` is used for optimistic concurrency control as a way\n to help prevent simultaneous updates of a policy from overwriting each\n other. An \\`etag\\` is returned in the response to \\`GetDashboard\\`, and\n users are expected to put that etag in the request to \\`UpdateDashboard\\`\n to ensure that their change will be applied to the same version of the\n Dashboard configuration. The field should not be passed during dashboard\n creation.\n x-kubernetes-immutable: true\n gridLayout:\n type: object\n x-dcl-go-name: GridLayout\n x-dcl-go-type: DashboardGridLayout\n description: Content is arranged with a basic layout that re-flows a simple\n list of informational elements like widgets or tiles.\n x-dcl-conflicts:\n - mosaicLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns into which the view's width is divided.\n If omitted or set to zero, a system default will be used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The informational elements that are arranged into the columns\n row-first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardGridLayoutWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardGridLayoutWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to return.\n See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned. An\n empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect logs\n for. Currently only projects are supported. If empty, the\n widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart. The\n value of the chart should always be greater than or\n equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart. The\n value of the chart should always be less than or equal\n to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency in\n the chart implemented by specifying the minimum alignment\n period to use in a time series query. For example, if\n the data is published once every 10 minutes it would\n not make sense to fetch and align data at one minute\n intervals. This field is optional and exists only as\n a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to show\n in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state of\n the scorecard given the time series'' current value. For\n an actual value x, the scorecard is in a danger state if\n x is less than or equal to a danger threshold that triggers\n below, or greater than or equal to a danger threshold that\n triggers above. Similarly, if x is above/below a warning\n threshold that triggers above/below, then the scorecard\n is in a warning state - unless x also puts it in a danger\n state. (Danger trumps warning.) As an example, consider\n a scorecard with the following four thresholds: { value:\n 90, category: ''DANGER'', trigger: ''ABOVE'', },: { value:\n 70, category: ''WARNING'', trigger: ''ABOVE'', }, { value:\n 10, category: ''DANGER'', trigger: ''BELOW'', }, { value:\n 20, category: ''WARNING'', trigger: ''BELOW'', } Then:\n values less than or equal to 10 would put the scorecard\n in a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal to 70\n but less than 90 a WARNING state, and values greater than\n or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series data\n from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources, and\n projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after `aggregation`\n is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two time\n series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter. Possible\n values: DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow to\n pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied to each\n time series independently to produce the value\n which will be used to compare the time series\n to other time series. Possible values: METHOD_UNSPECIFIED,\n METHOD_MEAN, METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after the\n ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used to\n divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will be\n done before the per-series aligner can be applied\n to the data. The value must be at least 60\n seconds. If a per-series aligner other than\n `ALIGN_NONE` is specified, this field is required\n or an error is returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE` is\n specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be used\n to combine time series into a single time series,\n where the value of each data point in the resulting\n series is a function of all the already aligned\n values in the input time series. Not all reducer\n operations can be applied to all time series.\n The valid choices depend on the `metric_kind`\n and the `value_type` of the original time series.\n Reduction can yield a time series with a different\n `metric_kind` or `value_type` than the input\n time series. Time series data must first be\n aligned (see `per_series_aligner`) in order\n to perform cross-time series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX,\n REDUCE_SUM, REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve when\n `cross_series_reducer` is specified. The `group_by_fields`\n determine how the time series are partitioned\n into subsets prior to applying the aggregation\n operation. Each subset contains time series\n that have the same value for each of the grouping\n fields. Each individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series. It\n is not possible to reduce across different resource\n types, so this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same resource\n type, then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to bring\n the data points in a single time series into\n temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points in\n an `alignment_period` to be mathematically grouped\n together, resulting in a single data point for\n each `alignment_period` with end timestamp at\n the end of the period. Not all alignment operations\n may be applied to all time series. The valid\n choices depend on the `metric_kind` and `value_type`\n of the original time series. Alignment can change\n the `metric_kind` or the `value_type` of the\n time series. Time series data must be aligned\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified, then\n `per_series_aligner` must be specified and not\n equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched time\n series. If non-empty, this unit will override any unit\n that accompanies fetched data. The format is the same\n as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardGridLayoutWidgetsText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardGridLayoutWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`, which\n will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time series\n query For example, if the data is published once every\n 10 minutes, the `min_alignment_period` should be at\n least 10 minutes. It would not make sense to fetch\n and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on the\n chart. Possible values: PLOT_TYPE_UNSPECIFIED, LINE,\n STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across the\n chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold. Color\n is not allowed in a XyChart. Possible values: COLOR_UNSPECIFIED,\n GREY, BLUE, GREEN, YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible values:\n DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value should\n be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison chart.\n A comparison chart simultaneously shows values from two\n similar-length time periods (e.g., week-over-week metrics).\n The duration must be positive, and it can only be applied\n to charts with data sets of LINE plot type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardGridLayoutWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear scale\n is used. Possible values: SCALE_UNSPECIFIED, LINEAR,\n LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n mosaicLayout:\n type: object\n x-dcl-go-name: MosaicLayout\n x-dcl-go-type: DashboardMosaicLayout\n description: The content is arranged as a grid of tiles, with each content\n widget occupying one or more tiles.\n x-dcl-conflicts:\n - gridLayout\n - rowLayout\n - columnLayout\n properties:\n columns:\n type: integer\n format: int64\n x-dcl-go-name: Columns\n description: The number of columns in the mosaic grid.\n tiles:\n type: array\n x-dcl-go-name: Tiles\n description: The tiles to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTiles\n properties:\n height:\n type: integer\n format: int64\n x-dcl-go-name: Height\n description: The height of the tile, measured in grid squares.\n widget:\n type: object\n x-dcl-go-name: Widget\n x-dcl-go-type: DashboardMosaicLayoutTilesWidget\n description: The informational widget contained in the tile.\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries to\n return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field is\n optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a danger\n threshold that triggers above. Similarly, if x is above/below\n a warning threshold that triggers above/below, then\n the scorecard is in a warning state - unless x also\n puts it in a danger state. (Danger trumps warning.) As\n an example, consider a scorecard with the following\n four thresholds: { value: 90, category: ''DANGER'', trigger:\n ''ABOVE'', },: { value: 70, category: ''WARNING'', trigger:\n ''ABOVE'', }, { value: 10, category: ''DANGER'', trigger:\n ''BELOW'', }, { value: 20, category: ''WARNING'', trigger:\n ''BELOW'', } Then: values less than or equal to 10\n would put the scorecard in a DANGER state, values greater\n than 10 but less than or equal to 20 a WARNING state,\n values strictly between 20 and 70 an OK state, values\n greater than or equal to 70 but less than 90 a WARNING\n state, and values greater than or equal to 90 a DANGER\n state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series data\n is returned. Use this field to combine multiple\n time series for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between two\n time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is\n used to divide the data in all the [time\n series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series into\n a single time series, where the value\n of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time series.\n Reduction can yield a time series with\n a different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each individual\n time series is a member of exactly one\n subset. The `cross_series_reducer` is\n applied to each subset of time series.\n It is not possible to reduce across\n different resource types, so this field\n implicitly contains `resource.type`. Fields\n not specified in `group_by_fields` are\n aggregated away. If `group_by_fields`\n is not specified and all the time series\n have the same resource type, then the\n time series are aggregated into a single\n output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single\n time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on\n the `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified\n and not equal to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise, an error\n is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to select\n time series that pass through the filter.\n Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series. Possible\n values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner can\n be applied to the data. The value must\n be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is returned.\n If no per-series aligner is specified, or\n the aligner `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to be\n used to combine time series into a single\n time series, where the value of each data\n point in the resulting series is a function\n of all the already aligned values in the\n input time series. Not all reducer operations\n can be applied to all time series. The valid\n choices depend on the `metric_kind` and\n the `value_type` of the original time series.\n Reduction can yield a time series with a\n different `metric_kind` or `value_type`\n than the input time series. Time series\n data must first be aligned (see `per_series_aligner`)\n in order to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be specified,\n and must not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise, an error\n is returned. Possible values: REDUCE_NONE,\n REDUCE_MEAN, REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that have\n the same value for each of the grouping\n fields. Each individual time series is a\n member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not specified\n in `group_by_fields` are aggregated away. If\n `group_by_fields` is not specified and all\n the time series have the same resource type,\n then the time series are aggregated into\n a single output time series. If `cross_series_reducer`\n is not defined, this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how to\n bring the data points in a single time series\n into temporal alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data points\n in an `alignment_period` to be mathematically\n grouped together, resulting in a single\n data point for each `alignment_period` with\n end timestamp at the end of the period. Not\n all alignment operations may be applied\n to all time series. The valid choices depend\n on the `metric_kind` and `value_type` of\n the original time series. Alignment can\n change the `metric_kind` or the `value_type`\n of the time series. Time series data must\n be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetText\n description: A raw string or markdown displaying textual content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a string\n with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data point\n frequency for this data set, implemented by specifying\n the minimum alignment period to use in a time\n series query For example, if the data is published\n once every 10 minutes, the `min_alignment_period`\n should be at least 10 minutes. It would not make\n sense to fetch and align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted on\n the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the\n data. The value must be at least\n 60 seconds. If a per-series aligner\n other than `ALIGN_NONE` is specified,\n this field is required or an error\n is returned. If no per-series\n aligner is specified, or the aligner\n `ALIGN_NONE` is specified, then\n this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not\n all reducer operations can be\n applied to all time series. The\n valid choices depend on the `metric_kind`\n and the `value_type` of the original\n time series. Reduction can yield\n a time series with a different\n `metric_kind` or `value_type`\n than the input time series. Time\n series data must first be aligned\n (see `per_series_aligner`) in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets prior\n to applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to\n reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in\n order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass through\n the filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to\n allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently\n to produce the value which will be\n used to compare the time series to\n other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN, METHOD_MAX,\n METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will\n override any unit that accompanies fetched\n data. The format is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible values:\n COLOR_UNSPECIFIED, GREY, BLUE, GREEN, YELLOW,\n ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardMosaicLayoutTilesWidgetXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n width:\n type: integer\n format: int64\n x-dcl-go-name: Width\n description: The width of the tile, measured in grid squares.\n xPos:\n type: integer\n format: int64\n x-dcl-go-name: XPos\n description: The zero-indexed position of the tile in grid squares\n relative to the left edge of the grid.\n yPos:\n type: integer\n format: int64\n x-dcl-go-name: YPos\n description: The zero-indexed position of the tile in grid squares\n relative to the top edge of the grid.\n name:\n type: string\n x-dcl-go-name: Name\n description: Immutable. The resource name of the dashboard.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project id of the resource.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n rowLayout:\n type: object\n x-dcl-go-name: RowLayout\n x-dcl-go-type: DashboardRowLayout\n description: The content is divided into equally spaced rows and the widgets\n are arranged horizontally.\n x-dcl-conflicts:\n - gridLayout\n - mosaicLayout\n - columnLayout\n properties:\n rows:\n type: array\n x-dcl-go-name: Rows\n description: The rows of content to display.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRows\n properties:\n weight:\n type: integer\n format: int64\n x-dcl-go-name: Weight\n description: The relative weight of this row. The row weight is\n used to adjust the height of rows on the screen (relative to\n peers). Greater the weight, greater the height of the row on\n the screen. If omitted, a value of 1 is used while rendering.\n widgets:\n type: array\n x-dcl-go-name: Widgets\n description: The display widgets arranged horizontally in this\n row.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgets\n properties:\n blank:\n type: object\n x-dcl-go-name: Blank\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsBlank\n description: A blank space.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - logsPanel\n logsPanel:\n type: object\n x-dcl-go-name: LogsPanel\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsLogsPanel\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - text\n - blank\n properties:\n filter:\n type: string\n x-dcl-go-name: Filter\n description: A filter that chooses which log entries\n to return. See [Advanced Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries).\n Only log entries that match the filter are returned.\n An empty filter matches all log entries.\n resourceNames:\n type: array\n x-dcl-go-name: ResourceNames\n description: The names of logging resources to collect\n logs for. Currently only projects are supported. If\n empty, the widget will default to the host project.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n scorecard:\n type: object\n x-dcl-go-name: Scorecard\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecard\n description: A scorecard summarizing time series data.\n x-dcl-conflicts:\n - xyChart\n - text\n - blank\n - logsPanel\n required:\n - timeSeriesQuery\n properties:\n gaugeView:\n type: object\n x-dcl-go-name: GaugeView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardGaugeView\n description: Will cause the scorecard to show a gauge\n chart.\n properties:\n lowerBound:\n type: number\n format: double\n x-dcl-go-name: LowerBound\n description: The lower bound for this gauge chart.\n The value of the chart should always be greater\n than or equal to this.\n upperBound:\n type: number\n format: double\n x-dcl-go-name: UpperBound\n description: The upper bound for this gauge chart.\n The value of the chart should always be less than\n or equal to this.\n sparkChartView:\n type: object\n x-dcl-go-name: SparkChartView\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartView\n description: Will cause the scorecard to show a spark\n chart.\n required:\n - sparkChartType\n properties:\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: The lower bound on data point frequency\n in the chart implemented by specifying the minimum\n alignment period to use in a time series query.\n For example, if the data is published once every\n 10 minutes it would not make sense to fetch and\n align data at one minute intervals. This field\n is optional and exists only as a hint.\n sparkChartType:\n type: string\n x-dcl-go-name: SparkChartType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardSparkChartViewSparkChartTypeEnum\n description: 'Required. The type of sparkchart to\n show in this chartView. Possible values: SPARK_CHART_TYPE_UNSPECIFIED,\n SPARK_LINE, SPARK_BAR'\n enum:\n - SPARK_CHART_TYPE_UNSPECIFIED\n - SPARK_LINE\n - SPARK_BAR\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: 'The thresholds used to determine the state\n of the scorecard given the time series'' current value.\n For an actual value x, the scorecard is in a danger\n state if x is less than or equal to a danger threshold\n that triggers below, or greater than or equal to a\n danger threshold that triggers above. Similarly, if\n x is above/below a warning threshold that triggers\n above/below, then the scorecard is in a warning state\n - unless x also puts it in a danger state. (Danger\n trumps warning.) As an example, consider a scorecard\n with the following four thresholds: { value: 90, category:\n ''DANGER'', trigger: ''ABOVE'', },: { value: 70, category:\n ''WARNING'', trigger: ''ABOVE'', }, { value: 10, category:\n ''DANGER'', trigger: ''BELOW'', }, { value: 20, category:\n ''WARNING'', trigger: ''BELOW'', } Then: values\n less than or equal to 10 would put the scorecard in\n a DANGER state, values greater than 10 but less than\n or equal to 20 a WARNING state, values strictly between\n 20 and 70 an OK state, values greater than or equal\n to 70 but less than 90 a WARNING state, and values\n greater than or equal to 90 a DANGER state.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQuery\n description: Required. Fields for querying time series\n data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views of\n the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation after\n `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time series\n data is returned. Use this field to combine\n multiple time series for different views\n of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data in\n all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This\n will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this field\n is required or an error is returned.\n If no per-series aligner is specified,\n or the aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where the\n value of each data point in the resulting\n series is a function of all the already\n aligned values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the\n `value_type` of the original time\n series. Reduction can yield a time\n series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not be\n `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how\n the time series are partitioned into\n subsets prior to applying the aggregation\n operation. Each subset contains time\n series that have the same value for\n each of the grouping fields. Each\n individual time series is a member\n of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types, so\n this field implicitly contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the time\n series have the same resource type,\n then the time series are aggregated\n into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in a\n single time series into temporal alignment.\n Except for `ALIGN_NONE`, all alignments\n cause all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for\n each `alignment_period` with end timestamp\n at the end of the period. Not all\n alignment operations may be applied\n to all time series. The valid choices\n depend on the `metric_kind` and `value_type`\n of the original time series. Alignment\n can change the `metric_kind` or the\n `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking to\n select time series that pass through the\n filter. Possible values: DIRECTION_UNSPECIFIED,\n TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series to allow\n to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is applied\n to each time series independently to produce\n the value which will be used to compare\n the time series to other time series.\n Possible values: METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM, METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation after\n the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period` specifies\n a time interval, in seconds, that is used\n to divide the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time. This will\n be done before the per-series aligner\n can be applied to the data. The value\n must be at least 60 seconds. If a per-series\n aligner other than `ALIGN_NONE` is specified,\n this field is required or an error is\n returned. If no per-series aligner is\n specified, or the aligner `ALIGN_NONE`\n is specified, then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation to\n be used to combine time series into a\n single time series, where the value of\n each data point in the resulting series\n is a function of all the already aligned\n values in the input time series. Not\n all reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and the `value_type`\n of the original time series. Reduction\n can yield a time series with a different\n `metric_kind` or `value_type` than the\n input time series. Time series data must\n first be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer` is\n specified, then `per_series_aligner` must\n be specified, and must not be `ALIGN_NONE`.\n An `alignment_period` must also be specified;\n otherwise, an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to preserve\n when `cross_series_reducer` is specified.\n The `group_by_fields` determine how the\n time series are partitioned into subsets\n prior to applying the aggregation operation.\n Each subset contains time series that\n have the same value for each of the grouping\n fields. Each individual time series is\n a member of exactly one subset. The `cross_series_reducer`\n is applied to each subset of time series.\n It is not possible to reduce across different\n resource types, so this field implicitly\n contains `resource.type`. Fields not\n specified in `group_by_fields` are aggregated\n away. If `group_by_fields` is not specified\n and all the time series have the same\n resource type, then the time series are\n aggregated into a single output time series.\n If `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsScorecardTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes how\n to bring the data points in a single time\n series into temporal alignment. Except\n for `ALIGN_NONE`, all alignments cause\n all the data points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point for each\n `alignment_period` with end timestamp\n at the end of the period. Not all alignment\n operations may be applied to all time\n series. The valid choices depend on the\n `metric_kind` and `value_type` of the\n original time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series data\n must be aligned in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in fetched\n time series. If non-empty, this unit will override\n any unit that accompanies fetched data. The format\n is the same as the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n text:\n type: object\n x-dcl-go-name: Text\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsText\n description: A raw string or markdown displaying textual\n content.\n x-dcl-conflicts:\n - xyChart\n - scorecard\n - blank\n - logsPanel\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: The text content to be displayed.\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsTextFormatEnum\n description: 'How the text content is formatted. Possible\n values: FORMAT_UNSPECIFIED, MARKDOWN, RAW'\n enum:\n - FORMAT_UNSPECIFIED\n - MARKDOWN\n - RAW\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. The title of the widget.\n xyChart:\n type: object\n x-dcl-go-name: XyChart\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChart\n description: A chart of time series data.\n x-dcl-conflicts:\n - scorecard\n - text\n - blank\n - logsPanel\n required:\n - dataSets\n properties:\n chartOptions:\n type: object\n x-dcl-go-name: ChartOptions\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptions\n description: Display options for the chart.\n properties:\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartChartOptionsModeEnum\n description: 'The chart mode. Possible values: MODE_UNSPECIFIED,\n COLOR, X_RAY, STATS'\n enum:\n - MODE_UNSPECIFIED\n - COLOR\n - X_RAY\n - STATS\n dataSets:\n type: array\n x-dcl-go-name: DataSets\n description: Required. The data displayed in this chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSets\n required:\n - timeSeriesQuery\n properties:\n legendTemplate:\n type: string\n x-dcl-go-name: LegendTemplate\n description: 'A template string for naming `TimeSeries`\n in the resulting data set. This should be a\n string with interpolations of the form `${label_name}`,\n which will resolve to the label''s value. '\n minAlignmentPeriod:\n type: string\n x-dcl-go-name: MinAlignmentPeriod\n description: Optional. The lower bound on data\n point frequency for this data set, implemented\n by specifying the minimum alignment period to\n use in a time series query For example, if the\n data is published once every 10 minutes, the\n `min_alignment_period` should be at least 10\n minutes. It would not make sense to fetch and\n align data at one minute intervals.\n plotType:\n type: string\n x-dcl-go-name: PlotType\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsPlotTypeEnum\n description: 'How this data should be plotted\n on the chart. Possible values: PLOT_TYPE_UNSPECIFIED,\n LINE, STACKED_AREA, STACKED_BAR, HEATMAP'\n enum:\n - PLOT_TYPE_UNSPECIFIED\n - LINE\n - STACKED_AREA\n - STACKED_BAR\n - HEATMAP\n timeSeriesQuery:\n type: object\n x-dcl-go-name: TimeSeriesQuery\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQuery\n description: Required. Fields for querying time\n series data from the Stackdriver metrics API.\n properties:\n timeSeriesFilter:\n type: object\n x-dcl-go-name: TimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilter\n description: Filter parameters to fetch time\n series.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregation\n description: By default, the raw time\n series data is returned. Use this field\n to combine multiple time series for\n different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types, resources,\n and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregation\n description: Apply a second aggregation\n after `aggregation` is applied.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesFilterRatio:\n type: object\n x-dcl-go-name: TimeSeriesFilterRatio\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatio\n description: Parameters to fetch a ratio between\n two time series filters.\n properties:\n denominator:\n type: object\n x-dcl-go-name: Denominator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominator\n description: The denominator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioDenominatorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n numerator:\n type: object\n x-dcl-go-name: Numerator\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumerator\n description: The numerator of the ratio.\n required:\n - filter\n properties:\n aggregation:\n type: object\n x-dcl-go-name: Aggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregation\n description: By default, the raw time\n series data is returned. Use this\n field to combine multiple time series\n for different views of the data.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in\n seconds, that is used to divide\n the data in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the\n per-series aligner can be applied\n to the data. The value must\n be at least 60 seconds. If a\n per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error\n is returned. If no per-series\n aligner is specified, or the\n aligner `ALIGN_NONE` is specified,\n then this field is ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point\n in the resulting series is a\n function of all the already\n aligned values in the input\n time series. Not all reducer\n operations can be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and the `value_type` of the\n original time series. Reduction\n can yield a time series with\n a different `metric_kind` or\n `value_type` than the input\n time series. Time series data\n must first be aligned (see `per_series_aligner`)\n in order to perform cross-time\n series reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must\n not be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible\n values: REDUCE_NONE, REDUCE_MEAN,\n REDUCE_MIN, REDUCE_MAX, REDUCE_SUM,\n REDUCE_STDDEV, REDUCE_COUNT,\n REDUCE_COUNT_TRUE, REDUCE_COUNT_FALSE,\n REDUCE_FRACTION_TRUE, REDUCE_PERCENTILE_99,\n REDUCE_PERCENTILE_95, REDUCE_PERCENTILE_50,\n REDUCE_PERCENTILE_05, REDUCE_FRACTION_LESS_THAN,\n REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields\n to preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series\n are partitioned into subsets\n prior to applying the aggregation\n operation. Each subset contains\n time series that have the same\n value for each of the grouping\n fields. Each individual time\n series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of\n time series. It is not possible\n to reduce across different resource\n types, so this field implicitly\n contains `resource.type`. Fields\n not specified in `group_by_fields`\n are aggregated away. If `group_by_fields`\n is not specified and all the\n time series have the same resource\n type, then the time series are\n aggregated into a single output\n time series. If `cross_series_reducer`\n is not defined, this field is\n ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioNumeratorAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points\n in a single time series into\n temporal alignment. Except for\n `ALIGN_NONE`, all alignments\n cause all the data points in\n an `alignment_period` to be\n mathematically grouped together,\n resulting in a single data point\n for each `alignment_period`\n with end timestamp at the end\n of the period. Not all alignment\n operations may be applied to\n all time series. The valid choices\n depend on the `metric_kind`\n and `value_type` of the original\n time series. Alignment can change\n the `metric_kind` or the `value_type`\n of the time series. Time series\n data must be aligned in order\n to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified and not equal\n to `ALIGN_NONE` and `alignment_period`\n must be specified; otherwise,\n an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n filter:\n type: string\n x-dcl-go-name: Filter\n description: Required. The [monitoring\n filter](https://cloud.google.com/monitoring/api/v3/filters)\n that identifies the metric types,\n resources, and projects to query.\n pickTimeSeriesFilter:\n type: object\n x-dcl-go-name: PickTimeSeriesFilter\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilter\n description: Ranking based time series\n filter.\n properties:\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterDirectionEnum\n description: 'How to use the ranking\n to select time series that pass\n through the filter. Possible values:\n DIRECTION_UNSPECIFIED, TOP, BOTTOM'\n enum:\n - DIRECTION_UNSPECIFIED\n - TOP\n - BOTTOM\n numTimeSeries:\n type: integer\n format: int64\n x-dcl-go-name: NumTimeSeries\n description: How many time series\n to allow to pass through the filter.\n rankingMethod:\n type: string\n x-dcl-go-name: RankingMethod\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioPickTimeSeriesFilterRankingMethodEnum\n description: '`ranking_method` is\n applied to each time series independently\n to produce the value which will\n be used to compare the time series\n to other time series. Possible values:\n METHOD_UNSPECIFIED, METHOD_MEAN,\n METHOD_MAX, METHOD_MIN, METHOD_SUM,\n METHOD_LATEST'\n enum:\n - METHOD_UNSPECIFIED\n - METHOD_MEAN\n - METHOD_MAX\n - METHOD_MIN\n - METHOD_SUM\n - METHOD_LATEST\n secondaryAggregation:\n type: object\n x-dcl-go-name: SecondaryAggregation\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregation\n description: Apply a second aggregation\n after the ratio is computed.\n properties:\n alignmentPeriod:\n type: string\n x-dcl-go-name: AlignmentPeriod\n description: The `alignment_period`\n specifies a time interval, in seconds,\n that is used to divide the data\n in all the [time series][google.monitoring.v3.TimeSeries]\n into consistent blocks of time.\n This will be done before the per-series\n aligner can be applied to the data. The\n value must be at least 60 seconds.\n If a per-series aligner other than\n `ALIGN_NONE` is specified, this\n field is required or an error is\n returned. If no per-series aligner\n is specified, or the aligner `ALIGN_NONE`\n is specified, then this field is\n ignored.\n crossSeriesReducer:\n type: string\n x-dcl-go-name: CrossSeriesReducer\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationCrossSeriesReducerEnum\n description: 'The reduction operation\n to be used to combine time series\n into a single time series, where\n the value of each data point in\n the resulting series is a function\n of all the already aligned values\n in the input time series. Not all\n reducer operations can be applied\n to all time series. The valid choices\n depend on the `metric_kind` and\n the `value_type` of the original\n time series. Reduction can yield\n a time series with a different `metric_kind`\n or `value_type` than the input time\n series. Time series data must first\n be aligned (see `per_series_aligner`)\n in order to perform cross-time series\n reduction. If `cross_series_reducer`\n is specified, then `per_series_aligner`\n must be specified, and must not\n be `ALIGN_NONE`. An `alignment_period`\n must also be specified; otherwise,\n an error is returned. Possible values:\n REDUCE_NONE, REDUCE_MEAN, REDUCE_MIN,\n REDUCE_MAX, REDUCE_SUM, REDUCE_STDDEV,\n REDUCE_COUNT, REDUCE_COUNT_TRUE,\n REDUCE_COUNT_FALSE, REDUCE_FRACTION_TRUE,\n REDUCE_PERCENTILE_99, REDUCE_PERCENTILE_95,\n REDUCE_PERCENTILE_50, REDUCE_PERCENTILE_05,\n REDUCE_FRACTION_LESS_THAN, REDUCE_MAKE_DISTRIBUTION'\n enum:\n - REDUCE_NONE\n - REDUCE_MEAN\n - REDUCE_MIN\n - REDUCE_MAX\n - REDUCE_SUM\n - REDUCE_STDDEV\n - REDUCE_COUNT\n - REDUCE_COUNT_TRUE\n - REDUCE_COUNT_FALSE\n - REDUCE_FRACTION_TRUE\n - REDUCE_PERCENTILE_99\n - REDUCE_PERCENTILE_95\n - REDUCE_PERCENTILE_50\n - REDUCE_PERCENTILE_05\n - REDUCE_FRACTION_LESS_THAN\n - REDUCE_MAKE_DISTRIBUTION\n groupByFields:\n type: array\n x-dcl-go-name: GroupByFields\n description: The set of fields to\n preserve when `cross_series_reducer`\n is specified. The `group_by_fields`\n determine how the time series are\n partitioned into subsets prior to\n applying the aggregation operation.\n Each subset contains time series\n that have the same value for each\n of the grouping fields. Each individual\n time series is a member of exactly\n one subset. The `cross_series_reducer`\n is applied to each subset of time\n series. It is not possible to reduce\n across different resource types,\n so this field implicitly contains\n `resource.type`. Fields not specified\n in `group_by_fields` are aggregated\n away. If `group_by_fields` is not\n specified and all the time series\n have the same resource type, then\n the time series are aggregated into\n a single output time series. If\n `cross_series_reducer` is not defined,\n this field is ignored.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n perSeriesAligner:\n type: string\n x-dcl-go-name: PerSeriesAligner\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartDataSetsTimeSeriesQueryTimeSeriesFilterRatioSecondaryAggregationPerSeriesAlignerEnum\n description: An `Aligner` describes\n how to bring the data points in\n a single time series into temporal\n alignment. Except for `ALIGN_NONE`,\n all alignments cause all the data\n points in an `alignment_period`\n to be mathematically grouped together,\n resulting in a single data point\n for each `alignment_period` with\n end timestamp at the end of the\n period. Not all alignment operations\n may be applied to all time series.\n The valid choices depend on the\n `metric_kind` and `value_type` of\n the original time series. Alignment\n can change the `metric_kind` or\n the `value_type` of the time series. Time\n series data must be aligned in order\n to perform cross-time series reduction.\n If `cross_series_reducer` is specified,\n then `per_series_aligner` must be\n specified and not equal to `ALIGN_NONE`\n and `alignment_period` must be specified;\n otherwise, an error is returned.\n enum:\n - ALIGN_NONE\n - ALIGN_DELTA\n - ALIGN_RATE\n - ALIGN_INTERPOLATE\n - ALIGN_NEXT_OLDER\n - ALIGN_MIN\n - ALIGN_MAX\n - ALIGN_MEAN\n - ALIGN_COUNT\n - ALIGN_SUM\n - ALIGN_STDDEV\n - ALIGN_COUNT_TRUE\n - ALIGN_COUNT_FALSE\n - ALIGN_FRACTION_TRUE\n - ALIGN_PERCENTILE_99\n - ALIGN_PERCENTILE_95\n - ALIGN_PERCENTILE_50\n - ALIGN_PERCENTILE_05\n - ALIGN_MAKE_DISTRIBUTION\n - ALIGN_PERCENT_CHANGE\n timeSeriesQueryLanguage:\n type: string\n x-dcl-go-name: TimeSeriesQueryLanguage\n description: A query used to fetch time series.\n unitOverride:\n type: string\n x-dcl-go-name: UnitOverride\n description: The unit of data contained in\n fetched time series. If non-empty, this\n unit will override any unit that accompanies\n fetched data. The format is the same as\n the [`unit`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors)\n field in `MetricDescriptor`.\n thresholds:\n type: array\n x-dcl-go-name: Thresholds\n description: Threshold lines drawn horizontally across\n the chart.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholds\n properties:\n color:\n type: string\n x-dcl-go-name: Color\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsColorEnum\n description: 'The state color for this threshold.\n Color is not allowed in a XyChart. Possible\n values: COLOR_UNSPECIFIED, GREY, BLUE, GREEN,\n YELLOW, ORANGE, RED'\n enum:\n - COLOR_UNSPECIFIED\n - GREY\n - BLUE\n - GREEN\n - YELLOW\n - ORANGE\n - RED\n direction:\n type: string\n x-dcl-go-name: Direction\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartThresholdsDirectionEnum\n description: 'The direction for the current threshold.\n Direction is not allowed in a XyChart. Possible\n values: DIRECTION_UNSPECIFIED, ABOVE, BELOW'\n enum:\n - DIRECTION_UNSPECIFIED\n - ABOVE\n - BELOW\n label:\n type: string\n x-dcl-go-name: Label\n description: A label for the threshold.\n value:\n type: number\n format: double\n x-dcl-go-name: Value\n description: The value of the threshold. The value\n should be defined in the native scale of the\n metric.\n timeshiftDuration:\n type: string\n x-dcl-go-name: TimeshiftDuration\n description: The duration used to display a comparison\n chart. A comparison chart simultaneously shows values\n from two similar-length time periods (e.g., week-over-week\n metrics). The duration must be positive, and it can\n only be applied to charts with data sets of LINE plot\n type.\n xAxis:\n type: object\n x-dcl-go-name: XAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxis\n description: The properties applied to the X axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartXAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n yAxis:\n type: object\n x-dcl-go-name: YAxis\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxis\n description: The properties applied to the Y axis.\n properties:\n label:\n type: string\n x-dcl-go-name: Label\n description: The label of the axis.\n scale:\n type: string\n x-dcl-go-name: Scale\n x-dcl-go-type: DashboardRowLayoutRowsWidgetsXyChartYAxisScaleEnum\n description: 'The axis scale. By default, a linear\n scale is used. Possible values: SCALE_UNSPECIFIED,\n LINEAR, LOG10'\n enum:\n - SCALE_UNSPECIFIED\n - LINEAR\n - LOG10\n") - -// 655465 bytes -// MD5: 77a848bc77e76e4f6bd94a19479185ee diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.go deleted file mode 100644 index d3441e2d1f..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type Group struct { - DisplayName *string `json:"displayName"` - Filter *string `json:"filter"` - IsCluster *bool `json:"isCluster"` - Name *string `json:"name"` - ParentName *string `json:"parentName"` - Project *string `json:"project"` -} - -func (r *Group) String() string { - return dcl.SprintResource(r) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *Group) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "Group", - Version: "monitoring", - } -} - -func (r *Group) ID() (string, error) { - if err := extractGroupFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "display_name": dcl.ValueOrEmptyString(nr.DisplayName), - "filter": dcl.ValueOrEmptyString(nr.Filter), - "is_cluster": dcl.ValueOrEmptyString(nr.IsCluster), - "name": dcl.ValueOrEmptyString(nr.Name), - "parent_name": dcl.ValueOrEmptyString(nr.ParentName), - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.Nprintf("projects/{{project}}/groups/{{name}}", params), nil -} - -const GroupMaxPage = -1 - -type GroupList struct { - Items []*Group - - nextToken string - - pageSize int32 - - resource *Group -} - -func (l *GroupList) HasNext() bool { - return l.nextToken != "" -} - -func (l *GroupList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listGroup(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListGroup(ctx context.Context, project string) (*GroupList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListGroupWithMaxResults(ctx, project, GroupMaxPage) - -} - -func (c *Client) ListGroupWithMaxResults(ctx context.Context, project string, pageSize int32) (*GroupList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &Group{ - Project: &project, - } - items, token, err := c.listGroup(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &GroupList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetGroup(ctx context.Context, r *Group) (*Group, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractGroupFields(r) - - b, err := c.getGroupRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalGroup(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeGroupNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractGroupFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteGroup(ctx context.Context, r *Group) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("Group resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting Group...") - deleteOp := deleteGroupOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllGroup deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllGroup(ctx context.Context, project string, filter func(*Group) bool) error { - listObj, err := c.ListGroup(ctx, project) - if err != nil { - return err - } - - err = c.deleteAllGroup(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllGroup(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyGroup(ctx context.Context, rawDesired *Group, opts ...dcl.ApplyOption) (*Group, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *Group - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyGroupHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyGroupHelper(c *Client, ctx context.Context, rawDesired *Group, opts ...dcl.ApplyOption) (*Group, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyGroup...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractGroupFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.groupDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToGroupDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []groupApiOperation - if create { - ops = append(ops, &createGroupOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyGroupDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyGroupDiff(c *Client, ctx context.Context, desired *Group, rawDesired *Group, ops []groupApiOperation, opts ...dcl.ApplyOption) (*Group, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetGroup(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createGroupOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapGroup(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeGroupNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeGroupNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeGroupDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractGroupFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractGroupFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffGroup(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.yaml deleted file mode 100644 index 26c8d48e27..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group.yaml +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/Group - description: The Monitoring Group resource - x-dcl-struct-name: Group - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a Group - parameters: - - name: group - required: true - description: A full instance of a Group - apply: - description: The function used to apply information about a Group - parameters: - - name: group - required: true - description: A full instance of a Group - delete: - description: The function used to delete a Group - parameters: - - name: group - required: true - description: A full instance of a Group - deleteAll: - description: The function used to delete all Group - parameters: - - name: project - required: true - schema: - type: string - list: - description: The function used to list information about many Group - parameters: - - name: project - required: true - schema: - type: string -components: - schemas: - Group: - title: Group - x-dcl-id: projects/{{project}}/groups/{{name}} - x-dcl-parent-container: project - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - displayName - - filter - - project - properties: - displayName: - type: string - x-dcl-go-name: DisplayName - description: A user-assigned name for this group, used only for display - purposes. - filter: - type: string - x-dcl-go-name: Filter - description: The filter used to determine which monitored resources belong - to this group. - isCluster: - type: boolean - x-dcl-go-name: IsCluster - description: If true, the members of this group are considered to be a cluster. - The system can perform additional analysis on groups that are clusters. - name: - type: string - x-dcl-go-name: Name - description: 'Output only. The name of this group. The format is: `projects/{{project}}/groups/{{name}}`, - which is generated automatically.' - x-kubernetes-immutable: true - x-dcl-server-generated-parameter: true - parentName: - type: string - x-dcl-go-name: ParentName - description: 'The name of the group''s parent, if it has one. The format - is: projects/ For groups with no parent, `parent_name` is the empty string, - ``.' - x-dcl-send-empty: true - x-dcl-references: - - resource: Monitoring/Group - field: name - project: - type: string - x-dcl-go-name: Project - description: The project of the group - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_internal.go deleted file mode 100644 index f991101aa1..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_internal.go +++ /dev/null @@ -1,770 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *Group) validate() error { - - if err := dcl.Required(r, "displayName"); err != nil { - return err - } - if err := dcl.Required(r, "filter"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - return nil -} -func (r *Group) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v3/", params) -} - -func (r *Group) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/groups/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *Group) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/groups", nr.basePath(), userBasePath, params), nil - -} - -func (r *Group) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/groups", nr.basePath(), userBasePath, params), nil - -} - -func (r *Group) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/groups/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// groupApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type groupApiOperation interface { - do(context.Context, *Group, *Client) error -} - -// newUpdateGroupUpdateRequest creates a request for an -// Group resource's update update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateGroupUpdateRequest(ctx context.Context, f *Group, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - req["displayName"] = v - } - if v := f.Filter; !dcl.IsEmptyValueIndirect(v) { - req["filter"] = v - } - if v := f.IsCluster; !dcl.IsEmptyValueIndirect(v) { - req["isCluster"] = v - } - if v, err := dcl.DeriveField("projects/%s/groups/%s", f.ParentName, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.ParentName)); err != nil { - return nil, fmt.Errorf("error expanding ParentName into parentName: %w", err) - } else if v != nil { - req["parentName"] = v - } - return req, nil -} - -// marshalUpdateGroupUpdateRequest converts the update into -// the final JSON request body. -func marshalUpdateGroupUpdateRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateGroupUpdateOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateGroupUpdateOperation) do(ctx context.Context, r *Group, c *Client) error { - _, err := c.GetGroup(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "update") - if err != nil { - return err - } - - req, err := newUpdateGroupUpdateRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateGroupUpdateRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PUT", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listGroupRaw(ctx context.Context, r *Group, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != GroupMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listGroupOperation struct { - Group []map[string]interface{} `json:"group"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listGroup(ctx context.Context, r *Group, pageToken string, pageSize int32) ([]*Group, string, error) { - b, err := c.listGroupRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listGroupOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*Group - for _, v := range m.Group { - res, err := unmarshalMapGroup(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllGroup(ctx context.Context, f func(*Group) bool, resources []*Group) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteGroup(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteGroupOperation struct{} - -func (op *deleteGroupOperation) do(ctx context.Context, r *Group, c *Client) error { - r, err := c.GetGroup(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "Group not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetGroup checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete Group: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetGroup(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createGroupOperation struct { - response map[string]interface{} -} - -func (op *createGroupOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createGroupOperation) do(ctx context.Context, r *Group, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - if r.Name != nil { - // Allowing creation to continue with Name set could result in a Group with the wrong Name. - return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - // Include Name in URL substitution for initial GET request. - m := op.response - r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) - - if _, err := c.GetGroup(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getGroupRaw(ctx context.Context, r *Group) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) groupDiffsForRawDesired(ctx context.Context, rawDesired *Group, opts ...dcl.ApplyOption) (initial, desired *Group, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *Group - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*Group); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Group, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - if fetchState.Name == nil { - // We cannot perform a get because of lack of information. We have to assume - // that this is being created for the first time. - desired, err := canonicalizeGroupDesiredState(rawDesired, nil) - return nil, desired, nil, err - } - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetGroup(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Group resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve Group resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that Group resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeGroupDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Group: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Group: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractGroupFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeGroupInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Group: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeGroupDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Group: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffGroup(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeGroupInitialState(rawInitial, rawDesired *Group) (*Group, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeGroupDesiredState(rawDesired, rawInitial *Group, opts ...dcl.ApplyOption) (*Group, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - - return rawDesired, nil - } - canonicalDesired := &Group{} - if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { - canonicalDesired.DisplayName = rawInitial.DisplayName - } else { - canonicalDesired.DisplayName = rawDesired.DisplayName - } - if dcl.StringCanonicalize(rawDesired.Filter, rawInitial.Filter) { - canonicalDesired.Filter = rawInitial.Filter - } else { - canonicalDesired.Filter = rawDesired.Filter - } - if dcl.BoolCanonicalize(rawDesired.IsCluster, rawInitial.IsCluster) { - canonicalDesired.IsCluster = rawInitial.IsCluster - } else { - canonicalDesired.IsCluster = rawDesired.IsCluster - } - if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.PartialSelfLinkToSelfLink(rawDesired.ParentName, rawInitial.ParentName) { - canonicalDesired.ParentName = rawInitial.ParentName - } else { - canonicalDesired.ParentName = rawDesired.ParentName - } - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - return canonicalDesired, nil -} - -func canonicalizeGroupNewState(c *Client, rawNew, rawDesired *Group) (*Group, error) { - - if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } else { - if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Filter) && dcl.IsEmptyValueIndirect(rawDesired.Filter) { - rawNew.Filter = rawDesired.Filter - } else { - if dcl.StringCanonicalize(rawDesired.Filter, rawNew.Filter) { - rawNew.Filter = rawDesired.Filter - } - } - - if dcl.IsEmptyValueIndirect(rawNew.IsCluster) && dcl.IsEmptyValueIndirect(rawDesired.IsCluster) { - rawNew.IsCluster = rawDesired.IsCluster - } else { - if dcl.BoolCanonicalize(rawDesired.IsCluster, rawNew.IsCluster) { - rawNew.IsCluster = rawDesired.IsCluster - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.ParentName) && dcl.IsEmptyValueIndirect(rawDesired.ParentName) { - rawNew.ParentName = rawDesired.ParentName - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.ParentName, rawNew.ParentName) { - rawNew.ParentName = rawDesired.ParentName - } - } - - rawNew.Project = rawDesired.Project - - return rawNew, nil -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffGroup(c *Client, desired, actual *Group, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateGroupUpdateOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Filter, actual.Filter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateGroupUpdateOperation")}, fn.AddNest("Filter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.IsCluster, actual.IsCluster, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateGroupUpdateOperation")}, fn.AddNest("IsCluster")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ParentName, actual.ParentName, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.TriggersOperation("updateGroupUpdateOperation")}, fn.AddNest("ParentName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *Group) urlNormalized() *Group { - normalized := dcl.Copy(*r).(Group) - normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) - normalized.Filter = dcl.SelfLinkToName(r.Filter) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.ParentName = dcl.SelfLinkToName(r.ParentName) - normalized.Project = dcl.SelfLinkToName(r.Project) - return &normalized -} - -func (r *Group) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "update" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/groups/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the Group resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *Group) marshal(c *Client) ([]byte, error) { - m, err := expandGroup(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling Group: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalGroup decodes JSON responses into the Group resource schema. -func unmarshalGroup(b []byte, c *Client, res *Group) (*Group, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapGroup(m, c, res) -} - -func unmarshalMapGroup(m map[string]interface{}, c *Client, res *Group) (*Group, error) { - - flattened := flattenGroup(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandGroup expands Group into a JSON request object. -func expandGroup(c *Client, f *Group) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v := f.DisplayName; dcl.ValueShouldBeSent(v) { - m["displayName"] = v - } - if v := f.Filter; dcl.ValueShouldBeSent(v) { - m["filter"] = v - } - if v := f.IsCluster; dcl.ValueShouldBeSent(v) { - m["isCluster"] = v - } - if v := f.Name; dcl.ValueShouldBeSent(v) { - m["name"] = v - } - if v, err := dcl.DeriveField("projects/%s/groups/%s", f.ParentName, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.ParentName)); err != nil { - return nil, fmt.Errorf("error expanding ParentName into parentName: %w", err) - } else if v != nil { - m["parentName"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - - return m, nil -} - -// flattenGroup flattens Group from a JSON request object into the -// Group type. -func flattenGroup(c *Client, i interface{}, res *Group) *Group { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &Group{} - resultRes.DisplayName = dcl.FlattenString(m["displayName"]) - resultRes.Filter = dcl.FlattenString(m["filter"]) - resultRes.IsCluster = dcl.FlattenBool(m["isCluster"]) - resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) - resultRes.ParentName = dcl.FlattenString(m["parentName"]) - resultRes.Project = dcl.FlattenString(m["project"]) - - return resultRes -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *Group) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalGroup(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type groupDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp groupApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToGroupDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]groupDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []groupDiff - // For each operation name, create a groupDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := groupDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToGroupApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToGroupApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (groupApiOperation, error) { - switch opName { - - case "updateGroupUpdateOperation": - return &updateGroupUpdateOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractGroupFields(r *Group) error { - return nil -} - -func postReadExtractGroupFields(r *Group) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_schema.go deleted file mode 100644 index 3684eb1d96..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_schema.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLGroupSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/Group", - Description: "The Monitoring Group resource", - StructName: "Group", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Group", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "group", - Required: true, - Description: "A full instance of a Group", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Group", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "group", - Required: true, - Description: "A full instance of a Group", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Group", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "group", - Required: true, - Description: "A full instance of a Group", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Group", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Group", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Group": &dcl.Component{ - Title: "Group", - ID: "projects/{{project}}/groups/{{name}}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "displayName", - "filter", - "project", - }, - Properties: map[string]*dcl.Property{ - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "A user-assigned name for this group, used only for display purposes.", - }, - "filter": &dcl.Property{ - Type: "string", - GoName: "Filter", - Description: "The filter used to determine which monitored resources belong to this group.", - }, - "isCluster": &dcl.Property{ - Type: "boolean", - GoName: "IsCluster", - Description: "If true, the members of this group are considered to be a cluster. The system can perform additional analysis on groups that are clusters.", - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Output only. The name of this group. The format is: `projects/{{project}}/groups/{{name}}`, which is generated automatically.", - Immutable: true, - ServerGeneratedParameter: true, - }, - "parentName": &dcl.Property{ - Type: "string", - GoName: "ParentName", - Description: "The name of the group's parent, if it has one. The format is: projects/ For groups with no parent, `parent_name` is the empty string, ``.", - SendEmpty: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Monitoring/Group", - Field: "name", - }, - }, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project of the group", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_yaml_embed.go deleted file mode 100644 index a669615869..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/group_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_group blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/group.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/group.yaml -var YAML_group = []byte("info:\n title: Monitoring/Group\n description: The Monitoring Group resource\n x-dcl-struct-name: Group\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Group\n parameters:\n - name: group\n required: true\n description: A full instance of a Group\n apply:\n description: The function used to apply information about a Group\n parameters:\n - name: group\n required: true\n description: A full instance of a Group\n delete:\n description: The function used to delete a Group\n parameters:\n - name: group\n required: true\n description: A full instance of a Group\n deleteAll:\n description: The function used to delete all Group\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Group\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Group:\n title: Group\n x-dcl-id: projects/{{project}}/groups/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - displayName\n - filter\n - project\n properties:\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A user-assigned name for this group, used only for display\n purposes.\n filter:\n type: string\n x-dcl-go-name: Filter\n description: The filter used to determine which monitored resources belong\n to this group.\n isCluster:\n type: boolean\n x-dcl-go-name: IsCluster\n description: If true, the members of this group are considered to be a cluster.\n The system can perform additional analysis on groups that are clusters.\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Output only. The name of this group. The format is: `projects/{{project}}/groups/{{name}}`,\n which is generated automatically.'\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n parentName:\n type: string\n x-dcl-go-name: ParentName\n description: 'The name of the group''s parent, if it has one. The format\n is: projects/ For groups with no parent, `parent_name` is the empty string,\n ``.'\n x-dcl-send-empty: true\n x-dcl-references:\n - resource: Monitoring/Group\n field: name\n project:\n type: string\n x-dcl-go-name: Project\n description: The project of the group\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n") - -// 2994 bytes -// MD5: 5b0f18ecb40670caa9a101b3177f4844 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.go deleted file mode 100644 index dcf80862ff..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.go +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type MetricDescriptor struct { - SelfLink *string `json:"selfLink"` - Type *string `json:"type"` - Labels []MetricDescriptorLabels `json:"labels"` - MetricKind *MetricDescriptorMetricKindEnum `json:"metricKind"` - ValueType *MetricDescriptorValueTypeEnum `json:"valueType"` - Unit *string `json:"unit"` - Description *string `json:"description"` - DisplayName *string `json:"displayName"` - Metadata *MetricDescriptorMetadata `json:"metadata"` - LaunchStage *MetricDescriptorLaunchStageEnum `json:"launchStage"` - MonitoredResourceTypes []string `json:"monitoredResourceTypes"` - Project *string `json:"project"` -} - -func (r *MetricDescriptor) String() string { - return dcl.SprintResource(r) -} - -// The enum MetricDescriptorLabelsValueTypeEnum. -type MetricDescriptorLabelsValueTypeEnum string - -// MetricDescriptorLabelsValueTypeEnumRef returns a *MetricDescriptorLabelsValueTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func MetricDescriptorLabelsValueTypeEnumRef(s string) *MetricDescriptorLabelsValueTypeEnum { - v := MetricDescriptorLabelsValueTypeEnum(s) - return &v -} - -func (v MetricDescriptorLabelsValueTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"STRING", "BOOL", "INT64"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "MetricDescriptorLabelsValueTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum MetricDescriptorMetricKindEnum. -type MetricDescriptorMetricKindEnum string - -// MetricDescriptorMetricKindEnumRef returns a *MetricDescriptorMetricKindEnum with the value of string s -// If the empty string is provided, nil is returned. -func MetricDescriptorMetricKindEnumRef(s string) *MetricDescriptorMetricKindEnum { - v := MetricDescriptorMetricKindEnum(s) - return &v -} - -func (v MetricDescriptorMetricKindEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METRIC_KIND_UNSPECIFIED", "GAUGE", "DELTA", "CUMULATIVE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "MetricDescriptorMetricKindEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum MetricDescriptorValueTypeEnum. -type MetricDescriptorValueTypeEnum string - -// MetricDescriptorValueTypeEnumRef returns a *MetricDescriptorValueTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func MetricDescriptorValueTypeEnumRef(s string) *MetricDescriptorValueTypeEnum { - v := MetricDescriptorValueTypeEnum(s) - return &v -} - -func (v MetricDescriptorValueTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"STRING", "BOOL", "INT64"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "MetricDescriptorValueTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum MetricDescriptorMetadataLaunchStageEnum. -type MetricDescriptorMetadataLaunchStageEnum string - -// MetricDescriptorMetadataLaunchStageEnumRef returns a *MetricDescriptorMetadataLaunchStageEnum with the value of string s -// If the empty string is provided, nil is returned. -func MetricDescriptorMetadataLaunchStageEnumRef(s string) *MetricDescriptorMetadataLaunchStageEnum { - v := MetricDescriptorMetadataLaunchStageEnum(s) - return &v -} - -func (v MetricDescriptorMetadataLaunchStageEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "MetricDescriptorMetadataLaunchStageEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum MetricDescriptorLaunchStageEnum. -type MetricDescriptorLaunchStageEnum string - -// MetricDescriptorLaunchStageEnumRef returns a *MetricDescriptorLaunchStageEnum with the value of string s -// If the empty string is provided, nil is returned. -func MetricDescriptorLaunchStageEnumRef(s string) *MetricDescriptorLaunchStageEnum { - v := MetricDescriptorLaunchStageEnum(s) - return &v -} - -func (v MetricDescriptorLaunchStageEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "MetricDescriptorLaunchStageEnum", - Value: string(v), - Valid: []string{}, - } -} - -type MetricDescriptorLabels struct { - empty bool `json:"-"` - Key *string `json:"key"` - ValueType *MetricDescriptorLabelsValueTypeEnum `json:"valueType"` - Description *string `json:"description"` -} - -type jsonMetricDescriptorLabels MetricDescriptorLabels - -func (r *MetricDescriptorLabels) UnmarshalJSON(data []byte) error { - var res jsonMetricDescriptorLabels - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMetricDescriptorLabels - } else { - - r.Key = res.Key - - r.ValueType = res.ValueType - - r.Description = res.Description - - } - return nil -} - -// This object is used to assert a desired state where this MetricDescriptorLabels is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMetricDescriptorLabels *MetricDescriptorLabels = &MetricDescriptorLabels{empty: true} - -func (r *MetricDescriptorLabels) Empty() bool { - return r.empty -} - -func (r *MetricDescriptorLabels) String() string { - return dcl.SprintResource(r) -} - -func (r *MetricDescriptorLabels) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type MetricDescriptorMetadata struct { - empty bool `json:"-"` - LaunchStage *MetricDescriptorMetadataLaunchStageEnum `json:"launchStage"` - SamplePeriod *string `json:"samplePeriod"` - IngestDelay *string `json:"ingestDelay"` -} - -type jsonMetricDescriptorMetadata MetricDescriptorMetadata - -func (r *MetricDescriptorMetadata) UnmarshalJSON(data []byte) error { - var res jsonMetricDescriptorMetadata - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMetricDescriptorMetadata - } else { - - r.LaunchStage = res.LaunchStage - - r.SamplePeriod = res.SamplePeriod - - r.IngestDelay = res.IngestDelay - - } - return nil -} - -// This object is used to assert a desired state where this MetricDescriptorMetadata is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMetricDescriptorMetadata *MetricDescriptorMetadata = &MetricDescriptorMetadata{empty: true} - -func (r *MetricDescriptorMetadata) Empty() bool { - return r.empty -} - -func (r *MetricDescriptorMetadata) String() string { - return dcl.SprintResource(r) -} - -func (r *MetricDescriptorMetadata) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *MetricDescriptor) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "MetricDescriptor", - Version: "monitoring", - } -} - -func (r *MetricDescriptor) ID() (string, error) { - if err := extractMetricDescriptorFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "self_link": dcl.ValueOrEmptyString(nr.SelfLink), - "type": dcl.ValueOrEmptyString(nr.Type), - "labels": dcl.ValueOrEmptyString(nr.Labels), - "metric_kind": dcl.ValueOrEmptyString(nr.MetricKind), - "value_type": dcl.ValueOrEmptyString(nr.ValueType), - "unit": dcl.ValueOrEmptyString(nr.Unit), - "description": dcl.ValueOrEmptyString(nr.Description), - "display_name": dcl.ValueOrEmptyString(nr.DisplayName), - "metadata": dcl.ValueOrEmptyString(nr.Metadata), - "launch_stage": dcl.ValueOrEmptyString(nr.LaunchStage), - "monitored_resource_types": dcl.ValueOrEmptyString(nr.MonitoredResourceTypes), - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.Nprintf("projects/{{project}}/metricDescriptors/{{type}}", params), nil -} - -const MetricDescriptorMaxPage = -1 - -type MetricDescriptorList struct { - Items []*MetricDescriptor - - nextToken string - - pageSize int32 - - resource *MetricDescriptor -} - -func (l *MetricDescriptorList) HasNext() bool { - return l.nextToken != "" -} - -func (l *MetricDescriptorList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listMetricDescriptor(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListMetricDescriptor(ctx context.Context, project string) (*MetricDescriptorList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListMetricDescriptorWithMaxResults(ctx, project, MetricDescriptorMaxPage) - -} - -func (c *Client) ListMetricDescriptorWithMaxResults(ctx context.Context, project string, pageSize int32) (*MetricDescriptorList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &MetricDescriptor{ - Project: &project, - } - items, token, err := c.listMetricDescriptor(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &MetricDescriptorList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetMetricDescriptor(ctx context.Context, r *MetricDescriptor) (*MetricDescriptor, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractMetricDescriptorFields(r) - - b, err := c.getMetricDescriptorRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalMetricDescriptor(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Type = r.Type - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeMetricDescriptorNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractMetricDescriptorFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteMetricDescriptor(ctx context.Context, r *MetricDescriptor) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("MetricDescriptor resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting MetricDescriptor...") - deleteOp := deleteMetricDescriptorOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllMetricDescriptor deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllMetricDescriptor(ctx context.Context, project string, filter func(*MetricDescriptor) bool) error { - listObj, err := c.ListMetricDescriptor(ctx, project) - if err != nil { - return err - } - - err = c.deleteAllMetricDescriptor(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllMetricDescriptor(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyMetricDescriptor(ctx context.Context, rawDesired *MetricDescriptor, opts ...dcl.ApplyOption) (*MetricDescriptor, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *MetricDescriptor - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyMetricDescriptorHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyMetricDescriptorHelper(c *Client, ctx context.Context, rawDesired *MetricDescriptor, opts ...dcl.ApplyOption) (*MetricDescriptor, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyMetricDescriptor...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractMetricDescriptorFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.metricDescriptorDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToMetricDescriptorDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []metricDescriptorApiOperation - if create { - ops = append(ops, &createMetricDescriptorOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyMetricDescriptorDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyMetricDescriptorDiff(c *Client, ctx context.Context, desired *MetricDescriptor, rawDesired *MetricDescriptor, ops []metricDescriptorApiOperation, opts ...dcl.ApplyOption) (*MetricDescriptor, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetMetricDescriptor(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createMetricDescriptorOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapMetricDescriptor(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeMetricDescriptorNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeMetricDescriptorNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeMetricDescriptorDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractMetricDescriptorFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractMetricDescriptorFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffMetricDescriptor(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.yaml deleted file mode 100644 index 42fdd8d363..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor.yaml +++ /dev/null @@ -1,299 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/MetricDescriptor - description: The Monitoring MetricDescriptor resource - x-dcl-struct-name: MetricDescriptor - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a MetricDescriptor - parameters: - - name: metricDescriptor - required: true - description: A full instance of a MetricDescriptor - apply: - description: The function used to apply information about a MetricDescriptor - parameters: - - name: metricDescriptor - required: true - description: A full instance of a MetricDescriptor - delete: - description: The function used to delete a MetricDescriptor - parameters: - - name: metricDescriptor - required: true - description: A full instance of a MetricDescriptor - deleteAll: - description: The function used to delete all MetricDescriptor - parameters: - - name: project - required: true - schema: - type: string - list: - description: The function used to list information about many MetricDescriptor - parameters: - - name: project - required: true - schema: - type: string -components: - schemas: - MetricDescriptor: - title: MetricDescriptor - x-dcl-id: projects/{{project}}/metricDescriptors/{{type}} - x-dcl-uses-state-hint: true - x-dcl-parent-container: project - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - type - - metricKind - - valueType - - project - properties: - description: - type: string - x-dcl-go-name: Description - description: A detailed description of the metric, which can be used in - documentation. - x-kubernetes-immutable: true - displayName: - type: string - x-dcl-go-name: DisplayName - description: A concise name for the metric, which can be displayed in user - interfaces. Use sentence case without an ending period, for example "Request - count". This field is optional but it is recommended to be set for any - metrics associated with user-visible concepts, such as Quota. - x-kubernetes-immutable: true - labels: - type: array - x-dcl-go-name: Labels - description: The set of labels that can be used to describe a specific instance - of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies` - metric type has a label for the HTTP response code, `response_code`, so - you can look at latencies for successful responses or just for responses - that failed. - x-kubernetes-immutable: true - x-dcl-send-empty: true - x-dcl-list-type: set - items: - type: object - x-dcl-go-type: MetricDescriptorLabels - properties: - description: - type: string - x-dcl-go-name: Description - description: A human-readable description for the label. - x-kubernetes-immutable: true - key: - type: string - x-dcl-go-name: Key - description: 'The key for this label. The key must meet the following - criteria: * Does not exceed 100 characters. * Matches the following - regular expression: `a-zA-Z*` * The first character must be an upper- - or lower-case letter. * The remaining characters must be letters, - digits, or underscores.' - x-kubernetes-immutable: true - valueType: - type: string - x-dcl-go-name: ValueType - x-dcl-go-type: MetricDescriptorLabelsValueTypeEnum - description: 'The type of data that can be assigned to the label. - Possible values: STRING, BOOL, INT64' - x-kubernetes-immutable: true - enum: - - STRING - - BOOL - - INT64 - launchStage: - type: string - x-dcl-go-name: LaunchStage - x-dcl-go-type: MetricDescriptorLaunchStageEnum - description: 'Optional. The launch stage of the metric definition. Possible - values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, - ALPHA, BETA, GA, DEPRECATED' - x-kubernetes-immutable: true - enum: - - LAUNCH_STAGE_UNSPECIFIED - - UNIMPLEMENTED - - PRELAUNCH - - EARLY_ACCESS - - ALPHA - - BETA - - GA - - DEPRECATED - x-dcl-mutable-unreadable: true - metadata: - type: object - x-dcl-go-name: Metadata - x-dcl-go-type: MetricDescriptorMetadata - description: Optional. Metadata which can be used to guide usage of the - metric. - x-kubernetes-immutable: true - x-dcl-mutable-unreadable: true - properties: - ingestDelay: - type: string - x-dcl-go-name: IngestDelay - description: The delay of data points caused by ingestion. Data points - older than this age are guaranteed to be ingested and available to - be read, excluding data loss due to errors. - x-kubernetes-immutable: true - launchStage: - type: string - x-dcl-go-name: LaunchStage - x-dcl-go-type: MetricDescriptorMetadataLaunchStageEnum - description: 'Deprecated. Must use the MetricDescriptor.launch_stage - instead. Possible values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, - PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED' - x-kubernetes-immutable: true - enum: - - LAUNCH_STAGE_UNSPECIFIED - - UNIMPLEMENTED - - PRELAUNCH - - EARLY_ACCESS - - ALPHA - - BETA - - GA - - DEPRECATED - samplePeriod: - type: string - x-dcl-go-name: SamplePeriod - description: The sampling period of metric data points. For metrics - which are written periodically, consecutive data points are stored - at this time interval, excluding data loss due to errors. Metrics - with a higher granularity have a smaller sampling period. - x-kubernetes-immutable: true - metricKind: - type: string - x-dcl-go-name: MetricKind - x-dcl-go-type: MetricDescriptorMetricKindEnum - description: 'Whether the metric records instantaneous values, changes to - a value, etc. Some combinations of `metric_kind` and `value_type` might - not be supported. Possible values: METRIC_KIND_UNSPECIFIED, GAUGE, DELTA, - CUMULATIVE' - x-kubernetes-immutable: true - enum: - - METRIC_KIND_UNSPECIFIED - - GAUGE - - DELTA - - CUMULATIVE - monitoredResourceTypes: - type: array - x-dcl-go-name: MonitoredResourceTypes - readOnly: true - description: Read-only. If present, then a time series, which is identified - partially by a metric type and a MonitoredResourceDescriptor, that is - associated with this metric type can only be associated with one of the - monitored resource types listed here. - x-kubernetes-immutable: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - project: - type: string - x-dcl-go-name: Project - description: The project for the resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - selfLink: - type: string - x-dcl-go-name: SelfLink - readOnly: true - description: The resource name of the metric descriptor. - x-kubernetes-immutable: true - type: - type: string - x-dcl-go-name: Type - description: 'The metric type, including its DNS name prefix. The type is - not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com` - or `external.googleapis.com`. Metric types should use a natural hierarchical - grouping. For example: "custom.googleapis.com/invoice/paid/amount" "external.googleapis.com/prometheus/up" - "appengine.googleapis.com/http/server/response_latencies"' - x-kubernetes-immutable: true - x-dcl-forward-slash-allowed: true - unit: - type: string - x-dcl-go-name: Unit - description: 'The units in which the metric value is reported. It is only - applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. - The `unit` defines the representation of the stored metric values. Different - systems might scale the values to be more easily displayed (so a value - of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` - _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then - the value of the metric is always in thousands of bytes, no matter how - it might be displayed. If you want a custom metric to record the exact - number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` - metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). - If the job uses 12,005 CPU-seconds, then the value is written as `12005`. - Alternatively, if you want a custom metric to record data in a more granular - way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, - and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` - and write `11.723` (which is `12005/1024`). The supported units are a - subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) - standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second - * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** - * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) - * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) - * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico - (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) - * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi - (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also - includes these connectors: * `/` division or ratio (as an infix operator). - For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost - never have `/s` in a metric `unit`; rates should always be computed at - query time from the underlying cumulative or delta value). * `.` multiplication - or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. - The grammar for a unit is as follows: Expression = Component: { "." Component - } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation - ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: * `Annotation` - is just a comment if it follows a `UNIT`. If the annotation is used alone, - then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, - `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable - ASCII characters not containing `{` or `}`. * `1` represents a unitary - [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) - of 1, such as in `1/s`. It is typically used when none of the basic units - are appropriate. For example, "new users per day" can be represented as - `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new users). - Alternatively, "thousands of page views per day" would be represented - as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` - would mean "5300 page views per day"). * `%` represents dimensionless - value of 1/100, and annotates values giving a percentage (so the metric - values are typically in the range of 0..100, and a metric value `3` means - "3 percent"). * `10^2.%` indicates a metric contains a ratio, typically - in the range 0..1, that will be multiplied by 100 and displayed as a percentage - (so a metric value `0.03` means "3 percent").' - x-kubernetes-immutable: true - valueType: - type: string - x-dcl-go-name: ValueType - x-dcl-go-type: MetricDescriptorValueTypeEnum - description: 'Whether the measurement is an integer, a floating-point number, - etc. Some combinations of `metric_kind` and `value_type` might not be - supported. Possible values: STRING, BOOL, INT64' - x-kubernetes-immutable: true - enum: - - STRING - - BOOL - - INT64 diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_internal.go deleted file mode 100644 index 224aa87baf..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_internal.go +++ /dev/null @@ -1,1706 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - "time" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *MetricDescriptor) validate() error { - - if err := dcl.Required(r, "type"); err != nil { - return err - } - if err := dcl.Required(r, "metricKind"); err != nil { - return err - } - if err := dcl.Required(r, "valueType"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Metadata) { - if err := r.Metadata.validate(); err != nil { - return err - } - } - return nil -} -func (r *MetricDescriptorLabels) validate() error { - return nil -} -func (r *MetricDescriptorMetadata) validate() error { - return nil -} -func (r *MetricDescriptor) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v3/", params) -} - -func (r *MetricDescriptor) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "type": dcl.ValueOrEmptyString(nr.Type), - } - return dcl.URL("projects/{{project}}/metricDescriptors/{{type}}", nr.basePath(), userBasePath, params), nil -} - -func (r *MetricDescriptor) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/metricDescriptors", nr.basePath(), userBasePath, params), nil - -} - -func (r *MetricDescriptor) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/metricDescriptors", nr.basePath(), userBasePath, params), nil - -} - -func (r *MetricDescriptor) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "type": dcl.ValueOrEmptyString(nr.Type), - } - return dcl.URL("projects/{{project}}/metricDescriptors/{{type}}", nr.basePath(), userBasePath, params), nil -} - -// metricDescriptorApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type metricDescriptorApiOperation interface { - do(context.Context, *MetricDescriptor, *Client) error -} - -func (c *Client) listMetricDescriptorRaw(ctx context.Context, r *MetricDescriptor, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != MetricDescriptorMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listMetricDescriptorOperation struct { - MetricDescriptors []map[string]interface{} `json:"metricDescriptors"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listMetricDescriptor(ctx context.Context, r *MetricDescriptor, pageToken string, pageSize int32) ([]*MetricDescriptor, string, error) { - b, err := c.listMetricDescriptorRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listMetricDescriptorOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*MetricDescriptor - for _, v := range m.MetricDescriptors { - res, err := unmarshalMapMetricDescriptor(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllMetricDescriptor(ctx context.Context, f func(*MetricDescriptor) bool, resources []*MetricDescriptor) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteMetricDescriptor(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteMetricDescriptorOperation struct{} - -func (op *deleteMetricDescriptorOperation) do(ctx context.Context, r *MetricDescriptor, c *Client) error { - r, err := c.GetMetricDescriptor(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "MetricDescriptor not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetMetricDescriptor checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete MetricDescriptor: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetMetricDescriptor(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createMetricDescriptorOperation struct { - response map[string]interface{} -} - -func (op *createMetricDescriptorOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createMetricDescriptorOperation) do(ctx context.Context, r *MetricDescriptor, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - // Poll for the MetricDescriptor resource to be created. MetricDescriptor resources are eventually consistent but do not support operations - // so we must repeatedly poll to check for their creation. - requiredSuccesses := 10 - start := time.Now() - err = dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - getResp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, nil) - if err != nil { - // If the error is a transient server error (e.g., 500) or not found (i.e., the resource has not yet been created), - // continue retrying until the transient error is resolved, the resource is created, or we time out. - if dcl.IsRetryableRequestError(c.Config, err, true, start) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - getResp.Response.Body.Close() - requiredSuccesses-- - if requiredSuccesses > 0 { - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return getResp, nil - }, c.Config.RetryProvider) - - if _, err := c.GetMetricDescriptor(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getMetricDescriptorRaw(ctx context.Context, r *MetricDescriptor) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) metricDescriptorDiffsForRawDesired(ctx context.Context, rawDesired *MetricDescriptor, opts ...dcl.ApplyOption) (initial, desired *MetricDescriptor, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *MetricDescriptor - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*MetricDescriptor); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected MetricDescriptor, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetMetricDescriptor(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a MetricDescriptor resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve MetricDescriptor resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that MetricDescriptor resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeMetricDescriptorDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for MetricDescriptor: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for MetricDescriptor: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractMetricDescriptorFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeMetricDescriptorInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for MetricDescriptor: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeMetricDescriptorDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for MetricDescriptor: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffMetricDescriptor(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeMetricDescriptorInitialState(rawInitial, rawDesired *MetricDescriptor) (*MetricDescriptor, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeMetricDescriptorDesiredState(rawDesired, rawInitial *MetricDescriptor, opts ...dcl.ApplyOption) (*MetricDescriptor, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.Metadata = canonicalizeMetricDescriptorMetadata(rawDesired.Metadata, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &MetricDescriptor{} - if dcl.StringCanonicalize(rawDesired.Type, rawInitial.Type) { - canonicalDesired.Type = rawInitial.Type - } else { - canonicalDesired.Type = rawDesired.Type - } - canonicalDesired.Labels = canonicalizeMetricDescriptorLabelsSlice(rawDesired.Labels, rawInitial.Labels, opts...) - if dcl.IsZeroValue(rawDesired.MetricKind) || (dcl.IsEmptyValueIndirect(rawDesired.MetricKind) && dcl.IsEmptyValueIndirect(rawInitial.MetricKind)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.MetricKind = rawInitial.MetricKind - } else { - canonicalDesired.MetricKind = rawDesired.MetricKind - } - if canonicalizeMetricDescriptorValueType(rawDesired.ValueType, rawInitial.ValueType) { - canonicalDesired.ValueType = rawInitial.ValueType - } else { - canonicalDesired.ValueType = rawDesired.ValueType - } - if dcl.StringCanonicalize(rawDesired.Unit, rawInitial.Unit) { - canonicalDesired.Unit = rawInitial.Unit - } else { - canonicalDesired.Unit = rawDesired.Unit - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { - canonicalDesired.DisplayName = rawInitial.DisplayName - } else { - canonicalDesired.DisplayName = rawDesired.DisplayName - } - canonicalDesired.Metadata = canonicalizeMetricDescriptorMetadata(rawDesired.Metadata, rawInitial.Metadata, opts...) - if dcl.IsZeroValue(rawDesired.LaunchStage) || (dcl.IsEmptyValueIndirect(rawDesired.LaunchStage) && dcl.IsEmptyValueIndirect(rawInitial.LaunchStage)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.LaunchStage = rawInitial.LaunchStage - } else { - canonicalDesired.LaunchStage = rawDesired.LaunchStage - } - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - return canonicalDesired, nil -} - -func canonicalizeMetricDescriptorNewState(c *Client, rawNew, rawDesired *MetricDescriptor) (*MetricDescriptor, error) { - - if dcl.IsEmptyValueIndirect(rawNew.SelfLink) && dcl.IsEmptyValueIndirect(rawDesired.SelfLink) { - rawNew.SelfLink = rawDesired.SelfLink - } else { - if dcl.StringCanonicalize(rawDesired.SelfLink, rawNew.SelfLink) { - rawNew.SelfLink = rawDesired.SelfLink - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Type) && dcl.IsEmptyValueIndirect(rawDesired.Type) { - rawNew.Type = rawDesired.Type - } else { - if dcl.StringCanonicalize(rawDesired.Type, rawNew.Type) { - rawNew.Type = rawDesired.Type - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { - rawNew.Labels = rawDesired.Labels - } else { - rawNew.Labels = canonicalizeNewMetricDescriptorLabelsSet(c, rawDesired.Labels, rawNew.Labels) - } - - if dcl.IsEmptyValueIndirect(rawNew.MetricKind) && dcl.IsEmptyValueIndirect(rawDesired.MetricKind) { - rawNew.MetricKind = rawDesired.MetricKind - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.ValueType) && dcl.IsEmptyValueIndirect(rawDesired.ValueType) { - rawNew.ValueType = rawDesired.ValueType - } else { - if canonicalizeMetricDescriptorValueType(rawDesired.ValueType, rawNew.ValueType) { - rawNew.ValueType = rawDesired.ValueType - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Unit) && dcl.IsEmptyValueIndirect(rawDesired.Unit) { - rawNew.Unit = rawDesired.Unit - } else { - if dcl.StringCanonicalize(rawDesired.Unit, rawNew.Unit) { - rawNew.Unit = rawDesired.Unit - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } else { - if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } - } - - rawNew.Metadata = rawDesired.Metadata - - rawNew.LaunchStage = rawDesired.LaunchStage - - if dcl.IsEmptyValueIndirect(rawNew.MonitoredResourceTypes) && dcl.IsEmptyValueIndirect(rawDesired.MonitoredResourceTypes) { - rawNew.MonitoredResourceTypes = rawDesired.MonitoredResourceTypes - } else { - if dcl.StringArrayCanonicalize(rawDesired.MonitoredResourceTypes, rawNew.MonitoredResourceTypes) { - rawNew.MonitoredResourceTypes = rawDesired.MonitoredResourceTypes - } - } - - rawNew.Project = rawDesired.Project - - return rawNew, nil -} - -func canonicalizeMetricDescriptorLabels(des, initial *MetricDescriptorLabels, opts ...dcl.ApplyOption) *MetricDescriptorLabels { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MetricDescriptorLabels{} - - if dcl.StringCanonicalize(des.Key, initial.Key) || dcl.IsZeroValue(des.Key) { - cDes.Key = initial.Key - } else { - cDes.Key = des.Key - } - if canonicalizeMetricDescriptorLabelsValueType(des.ValueType, initial.ValueType) || dcl.IsZeroValue(des.ValueType) { - cDes.ValueType = initial.ValueType - } else { - cDes.ValueType = des.ValueType - } - if dcl.StringCanonicalize(des.Description, initial.Description) || dcl.IsZeroValue(des.Description) { - cDes.Description = initial.Description - } else { - cDes.Description = des.Description - } - - return cDes -} - -func canonicalizeMetricDescriptorLabelsSlice(des, initial []MetricDescriptorLabels, opts ...dcl.ApplyOption) []MetricDescriptorLabels { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]MetricDescriptorLabels, 0, len(des)) - for _, d := range des { - cd := canonicalizeMetricDescriptorLabels(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MetricDescriptorLabels, 0, len(des)) - for i, d := range des { - cd := canonicalizeMetricDescriptorLabels(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMetricDescriptorLabels(c *Client, des, nw *MetricDescriptorLabels) *MetricDescriptorLabels { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MetricDescriptorLabels while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Key, nw.Key) { - nw.Key = des.Key - } - if canonicalizeMetricDescriptorLabelsValueType(des.ValueType, nw.ValueType) { - nw.ValueType = des.ValueType - } - if dcl.StringCanonicalize(des.Description, nw.Description) { - nw.Description = des.Description - } - - return nw -} - -func canonicalizeNewMetricDescriptorLabelsSet(c *Client, des, nw []MetricDescriptorLabels) []MetricDescriptorLabels { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MetricDescriptorLabels - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMetricDescriptorLabelsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMetricDescriptorLabels(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMetricDescriptorLabelsSlice(c *Client, des, nw []MetricDescriptorLabels) []MetricDescriptorLabels { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MetricDescriptorLabels - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMetricDescriptorLabels(c, &d, &n)) - } - - return items -} - -func canonicalizeMetricDescriptorMetadata(des, initial *MetricDescriptorMetadata, opts ...dcl.ApplyOption) *MetricDescriptorMetadata { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MetricDescriptorMetadata{} - - if dcl.IsZeroValue(des.LaunchStage) || (dcl.IsEmptyValueIndirect(des.LaunchStage) && dcl.IsEmptyValueIndirect(initial.LaunchStage)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.LaunchStage = initial.LaunchStage - } else { - cDes.LaunchStage = des.LaunchStage - } - if dcl.StringCanonicalize(des.SamplePeriod, initial.SamplePeriod) || dcl.IsZeroValue(des.SamplePeriod) { - cDes.SamplePeriod = initial.SamplePeriod - } else { - cDes.SamplePeriod = des.SamplePeriod - } - if dcl.StringCanonicalize(des.IngestDelay, initial.IngestDelay) || dcl.IsZeroValue(des.IngestDelay) { - cDes.IngestDelay = initial.IngestDelay - } else { - cDes.IngestDelay = des.IngestDelay - } - - return cDes -} - -func canonicalizeMetricDescriptorMetadataSlice(des, initial []MetricDescriptorMetadata, opts ...dcl.ApplyOption) []MetricDescriptorMetadata { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MetricDescriptorMetadata, 0, len(des)) - for _, d := range des { - cd := canonicalizeMetricDescriptorMetadata(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MetricDescriptorMetadata, 0, len(des)) - for i, d := range des { - cd := canonicalizeMetricDescriptorMetadata(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMetricDescriptorMetadata(c *Client, des, nw *MetricDescriptorMetadata) *MetricDescriptorMetadata { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MetricDescriptorMetadata while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.SamplePeriod, nw.SamplePeriod) { - nw.SamplePeriod = des.SamplePeriod - } - if dcl.StringCanonicalize(des.IngestDelay, nw.IngestDelay) { - nw.IngestDelay = des.IngestDelay - } - - return nw -} - -func canonicalizeNewMetricDescriptorMetadataSet(c *Client, des, nw []MetricDescriptorMetadata) []MetricDescriptorMetadata { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MetricDescriptorMetadata - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMetricDescriptorMetadataNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMetricDescriptorMetadata(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMetricDescriptorMetadataSlice(c *Client, des, nw []MetricDescriptorMetadata) []MetricDescriptorMetadata { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MetricDescriptorMetadata - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMetricDescriptorMetadata(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffMetricDescriptor(c *Client, desired, actual *MetricDescriptor, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.SelfLink, actual.SelfLink, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Type")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{Type: "Set", ObjectFunction: compareMetricDescriptorLabelsNewStyle, EmptyObject: EmptyMetricDescriptorLabels, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.MetricKind, actual.MetricKind, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricKind")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ValueType, actual.ValueType, dcl.DiffInfo{Type: "EnumType", CustomDiff: canonicalizeMetricDescriptorValueType, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ValueType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Unit, actual.Unit, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Unit")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Metadata, actual.Metadata, dcl.DiffInfo{Ignore: true, ObjectFunction: compareMetricDescriptorMetadataNewStyle, EmptyObject: EmptyMetricDescriptorMetadata, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Metadata")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.LaunchStage, actual.LaunchStage, dcl.DiffInfo{Ignore: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LaunchStage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.MonitoredResourceTypes, actual.MonitoredResourceTypes, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoredResourceTypes")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareMetricDescriptorLabelsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MetricDescriptorLabels) - if !ok { - desiredNotPointer, ok := d.(MetricDescriptorLabels) - if !ok { - return nil, fmt.Errorf("obj %v is not a MetricDescriptorLabels or *MetricDescriptorLabels", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MetricDescriptorLabels) - if !ok { - actualNotPointer, ok := a.(MetricDescriptorLabels) - if !ok { - return nil, fmt.Errorf("obj %v is not a MetricDescriptorLabels", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Key, actual.Key, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Key")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ValueType, actual.ValueType, dcl.DiffInfo{Type: "EnumType", CustomDiff: canonicalizeMetricDescriptorLabelsValueType, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ValueType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareMetricDescriptorMetadataNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MetricDescriptorMetadata) - if !ok { - desiredNotPointer, ok := d.(MetricDescriptorMetadata) - if !ok { - return nil, fmt.Errorf("obj %v is not a MetricDescriptorMetadata or *MetricDescriptorMetadata", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MetricDescriptorMetadata) - if !ok { - actualNotPointer, ok := a.(MetricDescriptorMetadata) - if !ok { - return nil, fmt.Errorf("obj %v is not a MetricDescriptorMetadata", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.LaunchStage, actual.LaunchStage, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("LaunchStage")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.SamplePeriod, actual.SamplePeriod, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("SamplePeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.IngestDelay, actual.IngestDelay, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("IngestDelay")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *MetricDescriptor) urlNormalized() *MetricDescriptor { - normalized := dcl.Copy(*r).(MetricDescriptor) - normalized.SelfLink = dcl.SelfLinkToName(r.SelfLink) - normalized.Type = r.Type - normalized.Unit = dcl.SelfLinkToName(r.Unit) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) - normalized.Project = dcl.SelfLinkToName(r.Project) - return &normalized -} - -func (r *MetricDescriptor) updateURL(userBasePath, updateName string) (string, error) { - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the MetricDescriptor resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *MetricDescriptor) marshal(c *Client) ([]byte, error) { - m, err := expandMetricDescriptor(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling MetricDescriptor: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalMetricDescriptor decodes JSON responses into the MetricDescriptor resource schema. -func unmarshalMetricDescriptor(b []byte, c *Client, res *MetricDescriptor) (*MetricDescriptor, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapMetricDescriptor(m, c, res) -} - -func unmarshalMapMetricDescriptor(m map[string]interface{}, c *Client, res *MetricDescriptor) (*MetricDescriptor, error) { - - flattened := flattenMetricDescriptor(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandMetricDescriptor expands MetricDescriptor into a JSON request object. -func expandMetricDescriptor(c *Client, f *MetricDescriptor) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v := f.Type; dcl.ValueShouldBeSent(v) { - m["type"] = v - } - if v, err := expandMetricDescriptorLabelsSlice(c, f.Labels, res); err != nil { - return nil, fmt.Errorf("error expanding Labels into labels: %w", err) - } else if v != nil { - m["labels"] = v - } - if v := f.MetricKind; dcl.ValueShouldBeSent(v) { - m["metricKind"] = v - } - if v := f.ValueType; dcl.ValueShouldBeSent(v) { - m["valueType"] = v - } - if v := f.Unit; dcl.ValueShouldBeSent(v) { - m["unit"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.DisplayName; dcl.ValueShouldBeSent(v) { - m["displayName"] = v - } - if v, err := expandMetricDescriptorMetadata(c, f.Metadata, res); err != nil { - return nil, fmt.Errorf("error expanding Metadata into metadata: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["metadata"] = v - } - if v := f.LaunchStage; dcl.ValueShouldBeSent(v) { - m["launchStage"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - - return m, nil -} - -// flattenMetricDescriptor flattens MetricDescriptor from a JSON request object into the -// MetricDescriptor type. -func flattenMetricDescriptor(c *Client, i interface{}, res *MetricDescriptor) *MetricDescriptor { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &MetricDescriptor{} - resultRes.SelfLink = dcl.FlattenString(m["name"]) - resultRes.Type = dcl.FlattenString(m["type"]) - resultRes.Labels = flattenMetricDescriptorLabelsSlice(c, m["labels"], res) - resultRes.MetricKind = flattenMetricDescriptorMetricKindEnum(m["metricKind"]) - resultRes.ValueType = flattenMetricDescriptorValueTypeEnum(m["valueType"]) - resultRes.Unit = dcl.FlattenString(m["unit"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.DisplayName = dcl.FlattenString(m["displayName"]) - resultRes.Metadata = flattenMetricDescriptorMetadata(c, m["metadata"], res) - resultRes.LaunchStage = flattenMetricDescriptorLaunchStageEnum(m["launchStage"]) - resultRes.MonitoredResourceTypes = dcl.FlattenStringSlice(m["monitoredResourceTypes"]) - resultRes.Project = dcl.FlattenString(m["project"]) - - return resultRes -} - -// expandMetricDescriptorLabelsMap expands the contents of MetricDescriptorLabels into a JSON -// request object. -func expandMetricDescriptorLabelsMap(c *Client, f map[string]MetricDescriptorLabels, res *MetricDescriptor) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMetricDescriptorLabels(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMetricDescriptorLabelsSlice expands the contents of MetricDescriptorLabels into a JSON -// request object. -func expandMetricDescriptorLabelsSlice(c *Client, f []MetricDescriptorLabels, res *MetricDescriptor) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMetricDescriptorLabels(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMetricDescriptorLabelsMap flattens the contents of MetricDescriptorLabels from a JSON -// response object. -func flattenMetricDescriptorLabelsMap(c *Client, i interface{}, res *MetricDescriptor) map[string]MetricDescriptorLabels { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MetricDescriptorLabels{} - } - - if len(a) == 0 { - return map[string]MetricDescriptorLabels{} - } - - items := make(map[string]MetricDescriptorLabels) - for k, item := range a { - items[k] = *flattenMetricDescriptorLabels(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMetricDescriptorLabelsSlice flattens the contents of MetricDescriptorLabels from a JSON -// response object. -func flattenMetricDescriptorLabelsSlice(c *Client, i interface{}, res *MetricDescriptor) []MetricDescriptorLabels { - a, ok := i.([]interface{}) - if !ok { - return []MetricDescriptorLabels{} - } - - if len(a) == 0 { - return []MetricDescriptorLabels{} - } - - items := make([]MetricDescriptorLabels, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMetricDescriptorLabels(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMetricDescriptorLabels expands an instance of MetricDescriptorLabels into a JSON -// request object. -func expandMetricDescriptorLabels(c *Client, f *MetricDescriptorLabels, res *MetricDescriptor) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Key; !dcl.IsEmptyValueIndirect(v) { - m["key"] = v - } - if v := f.ValueType; !dcl.IsEmptyValueIndirect(v) { - m["valueType"] = v - } - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - m["description"] = v - } - - return m, nil -} - -// flattenMetricDescriptorLabels flattens an instance of MetricDescriptorLabels from a JSON -// response object. -func flattenMetricDescriptorLabels(c *Client, i interface{}, res *MetricDescriptor) *MetricDescriptorLabels { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MetricDescriptorLabels{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMetricDescriptorLabels - } - r.Key = dcl.FlattenString(m["key"]) - r.ValueType = flattenMetricDescriptorLabelsValueTypeEnum(m["valueType"]) - r.Description = dcl.FlattenString(m["description"]) - - return r -} - -// expandMetricDescriptorMetadataMap expands the contents of MetricDescriptorMetadata into a JSON -// request object. -func expandMetricDescriptorMetadataMap(c *Client, f map[string]MetricDescriptorMetadata, res *MetricDescriptor) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMetricDescriptorMetadata(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMetricDescriptorMetadataSlice expands the contents of MetricDescriptorMetadata into a JSON -// request object. -func expandMetricDescriptorMetadataSlice(c *Client, f []MetricDescriptorMetadata, res *MetricDescriptor) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMetricDescriptorMetadata(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMetricDescriptorMetadataMap flattens the contents of MetricDescriptorMetadata from a JSON -// response object. -func flattenMetricDescriptorMetadataMap(c *Client, i interface{}, res *MetricDescriptor) map[string]MetricDescriptorMetadata { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MetricDescriptorMetadata{} - } - - if len(a) == 0 { - return map[string]MetricDescriptorMetadata{} - } - - items := make(map[string]MetricDescriptorMetadata) - for k, item := range a { - items[k] = *flattenMetricDescriptorMetadata(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMetricDescriptorMetadataSlice flattens the contents of MetricDescriptorMetadata from a JSON -// response object. -func flattenMetricDescriptorMetadataSlice(c *Client, i interface{}, res *MetricDescriptor) []MetricDescriptorMetadata { - a, ok := i.([]interface{}) - if !ok { - return []MetricDescriptorMetadata{} - } - - if len(a) == 0 { - return []MetricDescriptorMetadata{} - } - - items := make([]MetricDescriptorMetadata, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMetricDescriptorMetadata(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMetricDescriptorMetadata expands an instance of MetricDescriptorMetadata into a JSON -// request object. -func expandMetricDescriptorMetadata(c *Client, f *MetricDescriptorMetadata, res *MetricDescriptor) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.LaunchStage; !dcl.IsEmptyValueIndirect(v) { - m["launchStage"] = v - } - if v := f.SamplePeriod; !dcl.IsEmptyValueIndirect(v) { - m["samplePeriod"] = v - } - if v := f.IngestDelay; !dcl.IsEmptyValueIndirect(v) { - m["ingestDelay"] = v - } - - return m, nil -} - -// flattenMetricDescriptorMetadata flattens an instance of MetricDescriptorMetadata from a JSON -// response object. -func flattenMetricDescriptorMetadata(c *Client, i interface{}, res *MetricDescriptor) *MetricDescriptorMetadata { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MetricDescriptorMetadata{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMetricDescriptorMetadata - } - r.LaunchStage = flattenMetricDescriptorMetadataLaunchStageEnum(m["launchStage"]) - r.SamplePeriod = dcl.FlattenString(m["samplePeriod"]) - r.IngestDelay = dcl.FlattenString(m["ingestDelay"]) - - return r -} - -// flattenMetricDescriptorLabelsValueTypeEnumMap flattens the contents of MetricDescriptorLabelsValueTypeEnum from a JSON -// response object. -func flattenMetricDescriptorLabelsValueTypeEnumMap(c *Client, i interface{}, res *MetricDescriptor) map[string]MetricDescriptorLabelsValueTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MetricDescriptorLabelsValueTypeEnum{} - } - - if len(a) == 0 { - return map[string]MetricDescriptorLabelsValueTypeEnum{} - } - - items := make(map[string]MetricDescriptorLabelsValueTypeEnum) - for k, item := range a { - items[k] = *flattenMetricDescriptorLabelsValueTypeEnum(item.(interface{})) - } - - return items -} - -// flattenMetricDescriptorLabelsValueTypeEnumSlice flattens the contents of MetricDescriptorLabelsValueTypeEnum from a JSON -// response object. -func flattenMetricDescriptorLabelsValueTypeEnumSlice(c *Client, i interface{}, res *MetricDescriptor) []MetricDescriptorLabelsValueTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []MetricDescriptorLabelsValueTypeEnum{} - } - - if len(a) == 0 { - return []MetricDescriptorLabelsValueTypeEnum{} - } - - items := make([]MetricDescriptorLabelsValueTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMetricDescriptorLabelsValueTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenMetricDescriptorLabelsValueTypeEnum asserts that an interface is a string, and returns a -// pointer to a *MetricDescriptorLabelsValueTypeEnum with the same value as that string. -func flattenMetricDescriptorLabelsValueTypeEnum(i interface{}) *MetricDescriptorLabelsValueTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return MetricDescriptorLabelsValueTypeEnumRef(s) -} - -// flattenMetricDescriptorMetricKindEnumMap flattens the contents of MetricDescriptorMetricKindEnum from a JSON -// response object. -func flattenMetricDescriptorMetricKindEnumMap(c *Client, i interface{}, res *MetricDescriptor) map[string]MetricDescriptorMetricKindEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MetricDescriptorMetricKindEnum{} - } - - if len(a) == 0 { - return map[string]MetricDescriptorMetricKindEnum{} - } - - items := make(map[string]MetricDescriptorMetricKindEnum) - for k, item := range a { - items[k] = *flattenMetricDescriptorMetricKindEnum(item.(interface{})) - } - - return items -} - -// flattenMetricDescriptorMetricKindEnumSlice flattens the contents of MetricDescriptorMetricKindEnum from a JSON -// response object. -func flattenMetricDescriptorMetricKindEnumSlice(c *Client, i interface{}, res *MetricDescriptor) []MetricDescriptorMetricKindEnum { - a, ok := i.([]interface{}) - if !ok { - return []MetricDescriptorMetricKindEnum{} - } - - if len(a) == 0 { - return []MetricDescriptorMetricKindEnum{} - } - - items := make([]MetricDescriptorMetricKindEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMetricDescriptorMetricKindEnum(item.(interface{}))) - } - - return items -} - -// flattenMetricDescriptorMetricKindEnum asserts that an interface is a string, and returns a -// pointer to a *MetricDescriptorMetricKindEnum with the same value as that string. -func flattenMetricDescriptorMetricKindEnum(i interface{}) *MetricDescriptorMetricKindEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return MetricDescriptorMetricKindEnumRef(s) -} - -// flattenMetricDescriptorValueTypeEnumMap flattens the contents of MetricDescriptorValueTypeEnum from a JSON -// response object. -func flattenMetricDescriptorValueTypeEnumMap(c *Client, i interface{}, res *MetricDescriptor) map[string]MetricDescriptorValueTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MetricDescriptorValueTypeEnum{} - } - - if len(a) == 0 { - return map[string]MetricDescriptorValueTypeEnum{} - } - - items := make(map[string]MetricDescriptorValueTypeEnum) - for k, item := range a { - items[k] = *flattenMetricDescriptorValueTypeEnum(item.(interface{})) - } - - return items -} - -// flattenMetricDescriptorValueTypeEnumSlice flattens the contents of MetricDescriptorValueTypeEnum from a JSON -// response object. -func flattenMetricDescriptorValueTypeEnumSlice(c *Client, i interface{}, res *MetricDescriptor) []MetricDescriptorValueTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []MetricDescriptorValueTypeEnum{} - } - - if len(a) == 0 { - return []MetricDescriptorValueTypeEnum{} - } - - items := make([]MetricDescriptorValueTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMetricDescriptorValueTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenMetricDescriptorValueTypeEnum asserts that an interface is a string, and returns a -// pointer to a *MetricDescriptorValueTypeEnum with the same value as that string. -func flattenMetricDescriptorValueTypeEnum(i interface{}) *MetricDescriptorValueTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return MetricDescriptorValueTypeEnumRef(s) -} - -// flattenMetricDescriptorMetadataLaunchStageEnumMap flattens the contents of MetricDescriptorMetadataLaunchStageEnum from a JSON -// response object. -func flattenMetricDescriptorMetadataLaunchStageEnumMap(c *Client, i interface{}, res *MetricDescriptor) map[string]MetricDescriptorMetadataLaunchStageEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MetricDescriptorMetadataLaunchStageEnum{} - } - - if len(a) == 0 { - return map[string]MetricDescriptorMetadataLaunchStageEnum{} - } - - items := make(map[string]MetricDescriptorMetadataLaunchStageEnum) - for k, item := range a { - items[k] = *flattenMetricDescriptorMetadataLaunchStageEnum(item.(interface{})) - } - - return items -} - -// flattenMetricDescriptorMetadataLaunchStageEnumSlice flattens the contents of MetricDescriptorMetadataLaunchStageEnum from a JSON -// response object. -func flattenMetricDescriptorMetadataLaunchStageEnumSlice(c *Client, i interface{}, res *MetricDescriptor) []MetricDescriptorMetadataLaunchStageEnum { - a, ok := i.([]interface{}) - if !ok { - return []MetricDescriptorMetadataLaunchStageEnum{} - } - - if len(a) == 0 { - return []MetricDescriptorMetadataLaunchStageEnum{} - } - - items := make([]MetricDescriptorMetadataLaunchStageEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMetricDescriptorMetadataLaunchStageEnum(item.(interface{}))) - } - - return items -} - -// flattenMetricDescriptorMetadataLaunchStageEnum asserts that an interface is a string, and returns a -// pointer to a *MetricDescriptorMetadataLaunchStageEnum with the same value as that string. -func flattenMetricDescriptorMetadataLaunchStageEnum(i interface{}) *MetricDescriptorMetadataLaunchStageEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return MetricDescriptorMetadataLaunchStageEnumRef(s) -} - -// flattenMetricDescriptorLaunchStageEnumMap flattens the contents of MetricDescriptorLaunchStageEnum from a JSON -// response object. -func flattenMetricDescriptorLaunchStageEnumMap(c *Client, i interface{}, res *MetricDescriptor) map[string]MetricDescriptorLaunchStageEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MetricDescriptorLaunchStageEnum{} - } - - if len(a) == 0 { - return map[string]MetricDescriptorLaunchStageEnum{} - } - - items := make(map[string]MetricDescriptorLaunchStageEnum) - for k, item := range a { - items[k] = *flattenMetricDescriptorLaunchStageEnum(item.(interface{})) - } - - return items -} - -// flattenMetricDescriptorLaunchStageEnumSlice flattens the contents of MetricDescriptorLaunchStageEnum from a JSON -// response object. -func flattenMetricDescriptorLaunchStageEnumSlice(c *Client, i interface{}, res *MetricDescriptor) []MetricDescriptorLaunchStageEnum { - a, ok := i.([]interface{}) - if !ok { - return []MetricDescriptorLaunchStageEnum{} - } - - if len(a) == 0 { - return []MetricDescriptorLaunchStageEnum{} - } - - items := make([]MetricDescriptorLaunchStageEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMetricDescriptorLaunchStageEnum(item.(interface{}))) - } - - return items -} - -// flattenMetricDescriptorLaunchStageEnum asserts that an interface is a string, and returns a -// pointer to a *MetricDescriptorLaunchStageEnum with the same value as that string. -func flattenMetricDescriptorLaunchStageEnum(i interface{}) *MetricDescriptorLaunchStageEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return MetricDescriptorLaunchStageEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *MetricDescriptor) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalMetricDescriptor(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Type == nil && ncr.Type == nil { - c.Config.Logger.Info("Both Type fields null - considering equal.") - } else if nr.Type == nil || ncr.Type == nil { - c.Config.Logger.Info("Only one Type field is null - considering unequal.") - return false - } else if *nr.Type != *ncr.Type { - return false - } - return true - } -} - -type metricDescriptorDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp metricDescriptorApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToMetricDescriptorDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]metricDescriptorDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []metricDescriptorDiff - // For each operation name, create a metricDescriptorDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := metricDescriptorDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToMetricDescriptorApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToMetricDescriptorApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (metricDescriptorApiOperation, error) { - switch opName { - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractMetricDescriptorFields(r *MetricDescriptor) error { - vMetadata := r.Metadata - if vMetadata == nil { - // note: explicitly not the empty object. - vMetadata = &MetricDescriptorMetadata{} - } - if err := extractMetricDescriptorMetadataFields(r, vMetadata); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetadata) { - r.Metadata = vMetadata - } - return nil -} -func extractMetricDescriptorLabelsFields(r *MetricDescriptor, o *MetricDescriptorLabels) error { - return nil -} -func extractMetricDescriptorMetadataFields(r *MetricDescriptor, o *MetricDescriptorMetadata) error { - return nil -} - -func postReadExtractMetricDescriptorFields(r *MetricDescriptor) error { - vMetadata := r.Metadata - if vMetadata == nil { - // note: explicitly not the empty object. - vMetadata = &MetricDescriptorMetadata{} - } - if err := postReadExtractMetricDescriptorMetadataFields(r, vMetadata); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetadata) { - r.Metadata = vMetadata - } - return nil -} -func postReadExtractMetricDescriptorLabelsFields(r *MetricDescriptor, o *MetricDescriptorLabels) error { - return nil -} -func postReadExtractMetricDescriptorMetadataFields(r *MetricDescriptor, o *MetricDescriptorMetadata) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_schema.go deleted file mode 100644 index a7db77571e..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_schema.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLMetricDescriptorSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/MetricDescriptor", - Description: "The Monitoring MetricDescriptor resource", - StructName: "MetricDescriptor", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a MetricDescriptor", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "metricDescriptor", - Required: true, - Description: "A full instance of a MetricDescriptor", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a MetricDescriptor", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "metricDescriptor", - Required: true, - Description: "A full instance of a MetricDescriptor", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a MetricDescriptor", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "metricDescriptor", - Required: true, - Description: "A full instance of a MetricDescriptor", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all MetricDescriptor", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many MetricDescriptor", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "MetricDescriptor": &dcl.Component{ - Title: "MetricDescriptor", - ID: "projects/{{project}}/metricDescriptors/{{type}}", - UsesStateHint: true, - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "type", - "metricKind", - "valueType", - "project", - }, - Properties: map[string]*dcl.Property{ - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "A detailed description of the metric, which can be used in documentation.", - Immutable: true, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example \"Request count\". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.", - Immutable: true, - }, - "labels": &dcl.Property{ - Type: "array", - GoName: "Labels", - Description: "The set of labels that can be used to describe a specific instance of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies` metric type has a label for the HTTP response code, `response_code`, so you can look at latencies for successful responses or just for responses that failed.", - Immutable: true, - SendEmpty: true, - ListType: "set", - Items: &dcl.Property{ - Type: "object", - GoType: "MetricDescriptorLabels", - Properties: map[string]*dcl.Property{ - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "A human-readable description for the label.", - Immutable: true, - }, - "key": &dcl.Property{ - Type: "string", - GoName: "Key", - Description: "The key for this label. The key must meet the following criteria: * Does not exceed 100 characters. * Matches the following regular expression: `a-zA-Z*` * The first character must be an upper- or lower-case letter. * The remaining characters must be letters, digits, or underscores.", - Immutable: true, - }, - "valueType": &dcl.Property{ - Type: "string", - GoName: "ValueType", - GoType: "MetricDescriptorLabelsValueTypeEnum", - Description: "The type of data that can be assigned to the label. Possible values: STRING, BOOL, INT64", - Immutable: true, - Enum: []string{ - "STRING", - "BOOL", - "INT64", - }, - }, - }, - }, - }, - "launchStage": &dcl.Property{ - Type: "string", - GoName: "LaunchStage", - GoType: "MetricDescriptorLaunchStageEnum", - Description: "Optional. The launch stage of the metric definition. Possible values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED", - Immutable: true, - Enum: []string{ - "LAUNCH_STAGE_UNSPECIFIED", - "UNIMPLEMENTED", - "PRELAUNCH", - "EARLY_ACCESS", - "ALPHA", - "BETA", - "GA", - "DEPRECATED", - }, - Unreadable: true, - }, - "metadata": &dcl.Property{ - Type: "object", - GoName: "Metadata", - GoType: "MetricDescriptorMetadata", - Description: "Optional. Metadata which can be used to guide usage of the metric.", - Immutable: true, - Unreadable: true, - Properties: map[string]*dcl.Property{ - "ingestDelay": &dcl.Property{ - Type: "string", - GoName: "IngestDelay", - Description: "The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors.", - Immutable: true, - }, - "launchStage": &dcl.Property{ - Type: "string", - GoName: "LaunchStage", - GoType: "MetricDescriptorMetadataLaunchStageEnum", - Description: "Deprecated. Must use the MetricDescriptor.launch_stage instead. Possible values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED", - Immutable: true, - Enum: []string{ - "LAUNCH_STAGE_UNSPECIFIED", - "UNIMPLEMENTED", - "PRELAUNCH", - "EARLY_ACCESS", - "ALPHA", - "BETA", - "GA", - "DEPRECATED", - }, - }, - "samplePeriod": &dcl.Property{ - Type: "string", - GoName: "SamplePeriod", - Description: "The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.", - Immutable: true, - }, - }, - }, - "metricKind": &dcl.Property{ - Type: "string", - GoName: "MetricKind", - GoType: "MetricDescriptorMetricKindEnum", - Description: "Whether the metric records instantaneous values, changes to a value, etc. Some combinations of `metric_kind` and `value_type` might not be supported. Possible values: METRIC_KIND_UNSPECIFIED, GAUGE, DELTA, CUMULATIVE", - Immutable: true, - Enum: []string{ - "METRIC_KIND_UNSPECIFIED", - "GAUGE", - "DELTA", - "CUMULATIVE", - }, - }, - "monitoredResourceTypes": &dcl.Property{ - Type: "array", - GoName: "MonitoredResourceTypes", - ReadOnly: true, - Description: "Read-only. If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "selfLink": &dcl.Property{ - Type: "string", - GoName: "SelfLink", - ReadOnly: true, - Description: "The resource name of the metric descriptor.", - Immutable: true, - }, - "type": &dcl.Property{ - Type: "string", - GoName: "Type", - Description: "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com` or `external.googleapis.com`. Metric types should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\"", - Immutable: true, - ForwardSlashAllowed: true, - }, - "unit": &dcl.Property{ - Type: "string", - GoName: "Unit", - Description: "The units in which the metric value is reported. It is only applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` defines the representation of the stored metric values. Different systems might scale the values to be more easily displayed (so a value of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then the value of the metric is always in thousands of bytes, no matter how it might be displayed. If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 CPU-seconds, then the value is written as `12005`. Alternatively, if you want a custom metric to record data in a more granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). The supported units are a subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)** * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also includes these connectors: * `/` division or ratio (as an infix operator). For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost never have `/s` in a metric `unit`; rates should always be computed at query time from the underlying cumulative or delta value). * `.` multiplication or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`. The grammar for a unit is as follows: Expression = Component: { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation` is just a comment if it follows a `UNIT`. If the annotation is used alone, then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable ASCII characters not containing `{` or `}`. * `1` represents a unitary [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in `1/s`. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3` would mean \"5300 page views per day\"). * `%` represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value `3` means \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value `0.03` means \"3 percent\").", - Immutable: true, - }, - "valueType": &dcl.Property{ - Type: "string", - GoName: "ValueType", - GoType: "MetricDescriptorValueTypeEnum", - Description: "Whether the measurement is an integer, a floating-point number, etc. Some combinations of `metric_kind` and `value_type` might not be supported. Possible values: STRING, BOOL, INT64", - Immutable: true, - Enum: []string{ - "STRING", - "BOOL", - "INT64", - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_yaml_embed.go deleted file mode 100644 index 7a36d5092a..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metric_descriptor_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_metric_descriptor blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/metric_descriptor.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/metric_descriptor.yaml -var YAML_metric_descriptor = []byte("info:\n title: Monitoring/MetricDescriptor\n description: The Monitoring MetricDescriptor resource\n x-dcl-struct-name: MetricDescriptor\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a MetricDescriptor\n parameters:\n - name: metricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n apply:\n description: The function used to apply information about a MetricDescriptor\n parameters:\n - name: metricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n delete:\n description: The function used to delete a MetricDescriptor\n parameters:\n - name: metricDescriptor\n required: true\n description: A full instance of a MetricDescriptor\n deleteAll:\n description: The function used to delete all MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many MetricDescriptor\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n MetricDescriptor:\n title: MetricDescriptor\n x-dcl-id: projects/{{project}}/metricDescriptors/{{type}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - type\n - metricKind\n - valueType\n - project\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A detailed description of the metric, which can be used in\n documentation.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A concise name for the metric, which can be displayed in user\n interfaces. Use sentence case without an ending period, for example \"Request\n count\". This field is optional but it is recommended to be set for any\n metrics associated with user-visible concepts, such as Quota.\n x-kubernetes-immutable: true\n labels:\n type: array\n x-dcl-go-name: Labels\n description: The set of labels that can be used to describe a specific instance\n of this metric type. For example, the `appengine.googleapis.com/http/server/response_latencies`\n metric type has a label for the HTTP response code, `response_code`, so\n you can look at latencies for successful responses or just for responses\n that failed.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: set\n items:\n type: object\n x-dcl-go-type: MetricDescriptorLabels\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: A human-readable description for the label.\n x-kubernetes-immutable: true\n key:\n type: string\n x-dcl-go-name: Key\n description: 'The key for this label. The key must meet the following\n criteria: * Does not exceed 100 characters. * Matches the following\n regular expression: `a-zA-Z*` * The first character must be an upper-\n or lower-case letter. * The remaining characters must be letters,\n digits, or underscores.'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorLabelsValueTypeEnum\n description: 'The type of data that can be assigned to the label.\n Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorLaunchStageEnum\n description: 'Optional. The launch stage of the metric definition. Possible\n values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED, PRELAUNCH, EARLY_ACCESS,\n ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n x-dcl-mutable-unreadable: true\n metadata:\n type: object\n x-dcl-go-name: Metadata\n x-dcl-go-type: MetricDescriptorMetadata\n description: Optional. Metadata which can be used to guide usage of the\n metric.\n x-kubernetes-immutable: true\n x-dcl-mutable-unreadable: true\n properties:\n ingestDelay:\n type: string\n x-dcl-go-name: IngestDelay\n description: The delay of data points caused by ingestion. Data points\n older than this age are guaranteed to be ingested and available to\n be read, excluding data loss due to errors.\n x-kubernetes-immutable: true\n launchStage:\n type: string\n x-dcl-go-name: LaunchStage\n x-dcl-go-type: MetricDescriptorMetadataLaunchStageEnum\n description: 'Deprecated. Must use the MetricDescriptor.launch_stage\n instead. Possible values: LAUNCH_STAGE_UNSPECIFIED, UNIMPLEMENTED,\n PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, DEPRECATED'\n x-kubernetes-immutable: true\n enum:\n - LAUNCH_STAGE_UNSPECIFIED\n - UNIMPLEMENTED\n - PRELAUNCH\n - EARLY_ACCESS\n - ALPHA\n - BETA\n - GA\n - DEPRECATED\n samplePeriod:\n type: string\n x-dcl-go-name: SamplePeriod\n description: The sampling period of metric data points. For metrics\n which are written periodically, consecutive data points are stored\n at this time interval, excluding data loss due to errors. Metrics\n with a higher granularity have a smaller sampling period.\n x-kubernetes-immutable: true\n metricKind:\n type: string\n x-dcl-go-name: MetricKind\n x-dcl-go-type: MetricDescriptorMetricKindEnum\n description: 'Whether the metric records instantaneous values, changes to\n a value, etc. Some combinations of `metric_kind` and `value_type` might\n not be supported. Possible values: METRIC_KIND_UNSPECIFIED, GAUGE, DELTA,\n CUMULATIVE'\n x-kubernetes-immutable: true\n enum:\n - METRIC_KIND_UNSPECIFIED\n - GAUGE\n - DELTA\n - CUMULATIVE\n monitoredResourceTypes:\n type: array\n x-dcl-go-name: MonitoredResourceTypes\n readOnly: true\n description: Read-only. If present, then a time series, which is identified\n partially by a metric type and a MonitoredResourceDescriptor, that is\n associated with this metric type can only be associated with one of the\n monitored resource types listed here.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n selfLink:\n type: string\n x-dcl-go-name: SelfLink\n readOnly: true\n description: The resource name of the metric descriptor.\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n description: 'The metric type, including its DNS name prefix. The type is\n not URL-encoded. All user-defined metric types have the DNS name `custom.googleapis.com`\n or `external.googleapis.com`. Metric types should use a natural hierarchical\n grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\"\n \"appengine.googleapis.com/http/server/response_latencies\"'\n x-kubernetes-immutable: true\n x-dcl-forward-slash-allowed: true\n unit:\n type: string\n x-dcl-go-name: Unit\n description: 'The units in which the metric value is reported. It is only\n applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.\n The `unit` defines the representation of the stored metric values. Different\n systems might scale the values to be more easily displayed (so a value\n of `0.02kBy` _might_ be displayed as `20By`, and a value of `3523kBy`\n _might_ be displayed as `3.5MBy`). However, if the `unit` is `kBy`, then\n the value of the metric is always in thousands of bytes, no matter how\n it might be displayed. If you want a custom metric to record the exact\n number of CPU-seconds used by a job, you can create an `INT64 CUMULATIVE`\n metric whose `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`).\n If the job uses 12,005 CPU-seconds, then the value is written as `12005`.\n Alternatively, if you want a custom metric to record data in a more granular\n way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is `ks{CPU}`,\n and then write the value `12.005` (which is `12005/1000`), or use `Kis{CPU}`\n and write `11.723` (which is `12005/1024`). The supported units are a\n subset of [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html)\n standard: **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second\n * `min` minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**\n * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera (10^12)\n * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) * `Y` yotta (10^24)\n * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano (10^-9) * `p` pico\n (10^-12) * `f` femto (10^-15) * `a` atto (10^-18) * `z` zepto (10^-21)\n * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi` mebi (2^20) * `Gi` gibi\n (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50) **Grammar** The grammar also\n includes these connectors: * `/` division or ratio (as an infix operator).\n For examples, `kBy/{email}` or `MiBy/10ms` (although you should almost\n never have `/s` in a metric `unit`; rates should always be computed at\n query time from the underlying cumulative or delta value). * `.` multiplication\n or composition (as an infix operator). For examples, `GBy.d` or `k{watt}.h`.\n The grammar for a unit is as follows: Expression = Component: { \".\" Component\n } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation\n ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: * `Annotation`\n is just a comment if it follows a `UNIT`. If the annotation is used alone,\n then the unit is equivalent to `1`. For examples, `{request}/s == 1/s`,\n `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank printable\n ASCII characters not containing `{` or `}`. * `1` represents a unitary\n [dimensionless unit](https://en.wikipedia.org/wiki/Dimensionless_quantity)\n of 1, such as in `1/s`. It is typically used when none of the basic units\n are appropriate. For example, \"new users per day\" can be represented as\n `1/d` or `{new-users}/d` (and a metric value `5` would mean \"5 new users).\n Alternatively, \"thousands of page views per day\" would be represented\n as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric value of `5.3`\n would mean \"5300 page views per day\"). * `%` represents dimensionless\n value of 1/100, and annotates values giving a percentage (so the metric\n values are typically in the range of 0..100, and a metric value `3` means\n \"3 percent\"). * `10^2.%` indicates a metric contains a ratio, typically\n in the range 0..1, that will be multiplied by 100 and displayed as a percentage\n (so a metric value `0.03` means \"3 percent\").'\n x-kubernetes-immutable: true\n valueType:\n type: string\n x-dcl-go-name: ValueType\n x-dcl-go-type: MetricDescriptorValueTypeEnum\n description: 'Whether the measurement is an integer, a floating-point number,\n etc. Some combinations of `metric_kind` and `value_type` might not be\n supported. Possible values: STRING, BOOL, INT64'\n x-kubernetes-immutable: true\n enum:\n - STRING\n - BOOL\n - INT64\n") - -// 13522 bytes -// MD5: 1a7798838aef4d4e6f10d0dca7ade10e diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.go deleted file mode 100644 index 951ad1f53c..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type MetricsScope struct { - Name *string `json:"name"` - CreateTime *string `json:"createTime"` - UpdateTime *string `json:"updateTime"` - MonitoredProjects []MetricsScopeMonitoredProjects `json:"monitoredProjects"` -} - -func (r *MetricsScope) String() string { - return dcl.SprintResource(r) -} - -type MetricsScopeMonitoredProjects struct { - empty bool `json:"-"` - Name *string `json:"name"` - CreateTime *string `json:"createTime"` -} - -type jsonMetricsScopeMonitoredProjects MetricsScopeMonitoredProjects - -func (r *MetricsScopeMonitoredProjects) UnmarshalJSON(data []byte) error { - var res jsonMetricsScopeMonitoredProjects - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyMetricsScopeMonitoredProjects - } else { - - r.Name = res.Name - - r.CreateTime = res.CreateTime - - } - return nil -} - -// This object is used to assert a desired state where this MetricsScopeMonitoredProjects is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyMetricsScopeMonitoredProjects *MetricsScopeMonitoredProjects = &MetricsScopeMonitoredProjects{empty: true} - -func (r *MetricsScopeMonitoredProjects) Empty() bool { - return r.empty -} - -func (r *MetricsScopeMonitoredProjects) String() string { - return dcl.SprintResource(r) -} - -func (r *MetricsScopeMonitoredProjects) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *MetricsScope) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "MetricsScope", - Version: "monitoring", - } -} - -func (r *MetricsScope) ID() (string, error) { - if err := extractMetricsScopeFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "update_time": dcl.ValueOrEmptyString(nr.UpdateTime), - "monitored_projects": dcl.ValueOrEmptyString(nr.MonitoredProjects), - } - return dcl.Nprintf("locations/global/metricsScopes/{{name}}", params), nil -} - -const MetricsScopeMaxPage = -1 - -type MetricsScopeList struct { - Items []*MetricsScope - - nextToken string - - resource *MetricsScope -} - -func (c *Client) GetMetricsScope(ctx context.Context, r *MetricsScope) (*MetricsScope, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractMetricsScopeFields(r) - - b, err := c.getMetricsScopeRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalMetricsScope(b, c, r) - if err != nil { - return nil, err - } - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeMetricsScopeNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractMetricsScopeFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) ApplyMetricsScope(ctx context.Context, rawDesired *MetricsScope, opts ...dcl.ApplyOption) (*MetricsScope, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *MetricsScope - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyMetricsScopeHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyMetricsScopeHelper(c *Client, ctx context.Context, rawDesired *MetricsScope, opts ...dcl.ApplyOption) (*MetricsScope, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyMetricsScope...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractMetricsScopeFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.metricsScopeDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToMetricsScopeDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - return nil, dcl.ApplyInfeasibleError{Message: "No initial state found for singleton resource."} - } else { - for _, d := range diffs { - if d.UpdateOp == nil { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) no update method found for field", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - var ops []metricsScopeApiOperation - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyMetricsScopeDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyMetricsScopeDiff(c *Client, ctx context.Context, desired *MetricsScope, rawDesired *MetricsScope, ops []metricsScopeApiOperation, opts ...dcl.ApplyOption) (*MetricsScope, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetMetricsScope(ctx, desired) - if err != nil { - return nil, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeMetricsScopeNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeMetricsScopeDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractMetricsScopeFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractMetricsScopeFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffMetricsScope(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.yaml deleted file mode 100644 index b153aafd32..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/MetricsScope - description: The Monitoring MetricsScope resource - x-dcl-struct-name: MetricsScope - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a MetricsScope - parameters: - - name: metricsScope - required: true - description: A full instance of a MetricsScope - apply: - description: The function used to apply information about a MetricsScope - parameters: - - name: metricsScope - required: true - description: A full instance of a MetricsScope -components: - schemas: - MetricsScope: - title: MetricsScope - x-dcl-id: locations/global/metricsScopes/{{name}} - x-dcl-locations: - - global - x-dcl-has-create: false - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - properties: - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The time when this `Metrics Scope` was created. - x-kubernetes-immutable: true - monitoredProjects: - type: array - x-dcl-go-name: MonitoredProjects - readOnly: true - description: Output only. The list of projects monitored by this `Metrics - Scope`. - x-kubernetes-immutable: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: MetricsScopeMonitoredProjects - properties: - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The time when this `MonitoredProject` was - created. - x-kubernetes-immutable: true - name: - type: string - x-dcl-go-name: Name - description: 'Immutable. The resource name of the `MonitoredProject`. - On input, the resource name includes the scoping project ID and - monitored project ID. On output, it contains the equivalent project - numbers. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}`' - x-kubernetes-immutable: true - name: - type: string - x-dcl-go-name: Name - description: 'Immutable. The resource name of the Monitoring Metrics Scope. - On input, the resource name can be specified with the scoping project - ID or number. On output, the resource name is specified with the scoping - project number. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}`' - x-kubernetes-immutable: true - updateTime: - type: string - format: date-time - x-dcl-go-name: UpdateTime - readOnly: true - description: Output only. The time when this `Metrics Scope` record was - last updated. - x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_internal.go deleted file mode 100644 index a546e449ad..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_internal.go +++ /dev/null @@ -1,680 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *MetricsScope) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - return nil -} -func (r *MetricsScopeMonitoredProjects) validate() error { - return nil -} -func (r *MetricsScope) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v1/", params) -} - -func (r *MetricsScope) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("locations/global/metricsScopes/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// metricsScopeApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type metricsScopeApiOperation interface { - do(context.Context, *MetricsScope, *Client) error -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createMetricsScopeOperation struct { - response map[string]interface{} -} - -func (op *createMetricsScopeOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (c *Client) getMetricsScopeRaw(ctx context.Context, r *MetricsScope) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) metricsScopeDiffsForRawDesired(ctx context.Context, rawDesired *MetricsScope, opts ...dcl.ApplyOption) (initial, desired *MetricsScope, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *MetricsScope - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*MetricsScope); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected MetricsScope, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetMetricsScope(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a MetricsScope resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve MetricsScope resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that MetricsScope resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeMetricsScopeDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for MetricsScope: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for MetricsScope: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractMetricsScopeFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeMetricsScopeInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for MetricsScope: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeMetricsScopeDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for MetricsScope: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffMetricsScope(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeMetricsScopeInitialState(rawInitial, rawDesired *MetricsScope) (*MetricsScope, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeMetricsScopeDesiredState(rawDesired, rawInitial *MetricsScope, opts ...dcl.ApplyOption) (*MetricsScope, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - - return rawDesired, nil - } - canonicalDesired := &MetricsScope{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - return canonicalDesired, nil -} - -func canonicalizeMetricsScopeNewState(c *Client, rawNew, rawDesired *MetricsScope) (*MetricsScope, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) { - rawNew.UpdateTime = rawDesired.UpdateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.MonitoredProjects) && dcl.IsEmptyValueIndirect(rawDesired.MonitoredProjects) { - rawNew.MonitoredProjects = rawDesired.MonitoredProjects - } else { - rawNew.MonitoredProjects = canonicalizeNewMetricsScopeMonitoredProjectsSlice(c, rawDesired.MonitoredProjects, rawNew.MonitoredProjects) - } - - return rawNew, nil -} - -func canonicalizeMetricsScopeMonitoredProjects(des, initial *MetricsScopeMonitoredProjects, opts ...dcl.ApplyOption) *MetricsScopeMonitoredProjects { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &MetricsScopeMonitoredProjects{} - - if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { - cDes.Name = initial.Name - } else { - cDes.Name = des.Name - } - - return cDes -} - -func canonicalizeMetricsScopeMonitoredProjectsSlice(des, initial []MetricsScopeMonitoredProjects, opts ...dcl.ApplyOption) []MetricsScopeMonitoredProjects { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]MetricsScopeMonitoredProjects, 0, len(des)) - for _, d := range des { - cd := canonicalizeMetricsScopeMonitoredProjects(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]MetricsScopeMonitoredProjects, 0, len(des)) - for i, d := range des { - cd := canonicalizeMetricsScopeMonitoredProjects(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewMetricsScopeMonitoredProjects(c *Client, des, nw *MetricsScopeMonitoredProjects) *MetricsScopeMonitoredProjects { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for MetricsScopeMonitoredProjects while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Name, nw.Name) { - nw.Name = des.Name - } - - return nw -} - -func canonicalizeNewMetricsScopeMonitoredProjectsSet(c *Client, des, nw []MetricsScopeMonitoredProjects) []MetricsScopeMonitoredProjects { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []MetricsScopeMonitoredProjects - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareMetricsScopeMonitoredProjectsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewMetricsScopeMonitoredProjects(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewMetricsScopeMonitoredProjectsSlice(c *Client, des, nw []MetricsScopeMonitoredProjects) []MetricsScopeMonitoredProjects { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []MetricsScopeMonitoredProjects - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewMetricsScopeMonitoredProjects(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffMetricsScope(c *Client, desired, actual *MetricsScope, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.MonitoredProjects, actual.MonitoredProjects, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMetricsScopeMonitoredProjectsNewStyle, EmptyObject: EmptyMetricsScopeMonitoredProjects, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoredProjects")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareMetricsScopeMonitoredProjectsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*MetricsScopeMonitoredProjects) - if !ok { - desiredNotPointer, ok := d.(MetricsScopeMonitoredProjects) - if !ok { - return nil, fmt.Errorf("obj %v is not a MetricsScopeMonitoredProjects or *MetricsScopeMonitoredProjects", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*MetricsScopeMonitoredProjects) - if !ok { - actualNotPointer, ok := a.(MetricsScopeMonitoredProjects) - if !ok { - return nil, fmt.Errorf("obj %v is not a MetricsScopeMonitoredProjects", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *MetricsScope) urlNormalized() *MetricsScope { - normalized := dcl.Copy(*r).(MetricsScope) - normalized.Name = dcl.SelfLinkToName(r.Name) - return &normalized -} - -func (r *MetricsScope) updateURL(userBasePath, updateName string) (string, error) { - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the MetricsScope resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *MetricsScope) marshal(c *Client) ([]byte, error) { - m, err := expandMetricsScope(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling MetricsScope: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalMetricsScope decodes JSON responses into the MetricsScope resource schema. -func unmarshalMetricsScope(b []byte, c *Client, res *MetricsScope) (*MetricsScope, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapMetricsScope(m, c, res) -} - -func unmarshalMapMetricsScope(m map[string]interface{}, c *Client, res *MetricsScope) (*MetricsScope, error) { - - flattened := flattenMetricsScope(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandMetricsScope expands MetricsScope into a JSON request object. -func expandMetricsScope(c *Client, f *MetricsScope) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.ExpandProjectIDsToNumbers(c.Config, f.Name); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - - return m, nil -} - -// flattenMetricsScope flattens MetricsScope from a JSON request object into the -// MetricsScope type. -func flattenMetricsScope(c *Client, i interface{}, res *MetricsScope) *MetricsScope { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &MetricsScope{} - resultRes.Name = dcl.FlattenProjectNumbersToIDs(c.Config, dcl.FlattenString(m["name"])) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.UpdateTime = dcl.FlattenString(m["updateTime"]) - resultRes.MonitoredProjects = flattenMetricsScopeMonitoredProjectsSlice(c, m["monitoredProjects"], res) - - return resultRes -} - -// expandMetricsScopeMonitoredProjectsMap expands the contents of MetricsScopeMonitoredProjects into a JSON -// request object. -func expandMetricsScopeMonitoredProjectsMap(c *Client, f map[string]MetricsScopeMonitoredProjects, res *MetricsScope) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandMetricsScopeMonitoredProjects(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandMetricsScopeMonitoredProjectsSlice expands the contents of MetricsScopeMonitoredProjects into a JSON -// request object. -func expandMetricsScopeMonitoredProjectsSlice(c *Client, f []MetricsScopeMonitoredProjects, res *MetricsScope) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandMetricsScopeMonitoredProjects(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenMetricsScopeMonitoredProjectsMap flattens the contents of MetricsScopeMonitoredProjects from a JSON -// response object. -func flattenMetricsScopeMonitoredProjectsMap(c *Client, i interface{}, res *MetricsScope) map[string]MetricsScopeMonitoredProjects { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]MetricsScopeMonitoredProjects{} - } - - if len(a) == 0 { - return map[string]MetricsScopeMonitoredProjects{} - } - - items := make(map[string]MetricsScopeMonitoredProjects) - for k, item := range a { - items[k] = *flattenMetricsScopeMonitoredProjects(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenMetricsScopeMonitoredProjectsSlice flattens the contents of MetricsScopeMonitoredProjects from a JSON -// response object. -func flattenMetricsScopeMonitoredProjectsSlice(c *Client, i interface{}, res *MetricsScope) []MetricsScopeMonitoredProjects { - a, ok := i.([]interface{}) - if !ok { - return []MetricsScopeMonitoredProjects{} - } - - if len(a) == 0 { - return []MetricsScopeMonitoredProjects{} - } - - items := make([]MetricsScopeMonitoredProjects, 0, len(a)) - for _, item := range a { - items = append(items, *flattenMetricsScopeMonitoredProjects(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandMetricsScopeMonitoredProjects expands an instance of MetricsScopeMonitoredProjects into a JSON -// request object. -func expandMetricsScopeMonitoredProjects(c *Client, f *MetricsScopeMonitoredProjects, res *MetricsScope) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Name; !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - - return m, nil -} - -// flattenMetricsScopeMonitoredProjects flattens an instance of MetricsScopeMonitoredProjects from a JSON -// response object. -func flattenMetricsScopeMonitoredProjects(c *Client, i interface{}, res *MetricsScope) *MetricsScopeMonitoredProjects { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &MetricsScopeMonitoredProjects{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyMetricsScopeMonitoredProjects - } - r.Name = dcl.FlattenString(m["name"]) - r.CreateTime = dcl.FlattenString(m["createTime"]) - - return r -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *MetricsScope) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalMetricsScope(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type metricsScopeDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp metricsScopeApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToMetricsScopeDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]metricsScopeDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []metricsScopeDiff - // For each operation name, create a metricsScopeDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := metricsScopeDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToMetricsScopeApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToMetricsScopeApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (metricsScopeApiOperation, error) { - switch opName { - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractMetricsScopeFields(r *MetricsScope) error { - return nil -} -func extractMetricsScopeMonitoredProjectsFields(r *MetricsScope, o *MetricsScopeMonitoredProjects) error { - return nil -} - -func postReadExtractMetricsScopeFields(r *MetricsScope) error { - return nil -} -func postReadExtractMetricsScopeMonitoredProjectsFields(r *MetricsScope, o *MetricsScopeMonitoredProjects) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_schema.go deleted file mode 100644 index 3341038775..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_schema.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLMetricsScopeSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/MetricsScope", - Description: "The Monitoring MetricsScope resource", - StructName: "MetricsScope", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a MetricsScope", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "metricsScope", - Required: true, - Description: "A full instance of a MetricsScope", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a MetricsScope", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "metricsScope", - Required: true, - Description: "A full instance of a MetricsScope", - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "MetricsScope": &dcl.Component{ - Title: "MetricsScope", - ID: "locations/global/metricsScopes/{{name}}", - Locations: []string{ - "global", - }, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time when this `Metrics Scope` was created.", - Immutable: true, - }, - "monitoredProjects": &dcl.Property{ - Type: "array", - GoName: "MonitoredProjects", - ReadOnly: true, - Description: "Output only. The list of projects monitored by this `Metrics Scope`.", - Immutable: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "MetricsScopeMonitoredProjects", - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time when this `MonitoredProject` was created.", - Immutable: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Immutable. The resource name of the `MonitoredProject`. On input, the resource name includes the scoping project ID and monitored project ID. On output, it contains the equivalent project numbers. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}`", - Immutable: true, - }, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Immutable. The resource name of the Monitoring Metrics Scope. On input, the resource name can be specified with the scoping project ID or number. On output, the resource name is specified with the scoping project number. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}`", - Immutable: true, - }, - "updateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "UpdateTime", - ReadOnly: true, - Description: "Output only. The time when this `Metrics Scope` record was last updated.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_yaml_embed.go deleted file mode 100644 index 3b0244f5ca..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/metrics_scope_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_metrics_scope blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/metrics_scope.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/metrics_scope.yaml -var YAML_metrics_scope = []byte("info:\n title: Monitoring/MetricsScope\n description: The Monitoring MetricsScope resource\n x-dcl-struct-name: MetricsScope\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a MetricsScope\n parameters:\n - name: metricsScope\n required: true\n description: A full instance of a MetricsScope\n apply:\n description: The function used to apply information about a MetricsScope\n parameters:\n - name: metricsScope\n required: true\n description: A full instance of a MetricsScope\ncomponents:\n schemas:\n MetricsScope:\n title: MetricsScope\n x-dcl-id: locations/global/metricsScopes/{{name}}\n x-dcl-locations:\n - global\n x-dcl-has-create: false\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time when this `Metrics Scope` was created.\n x-kubernetes-immutable: true\n monitoredProjects:\n type: array\n x-dcl-go-name: MonitoredProjects\n readOnly: true\n description: Output only. The list of projects monitored by this `Metrics\n Scope`.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: MetricsScopeMonitoredProjects\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time when this `MonitoredProject` was\n created.\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Immutable. The resource name of the `MonitoredProject`.\n On input, the resource name includes the scoping project ID and\n monitored project ID. On output, it contains the equivalent project\n numbers. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}`'\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Immutable. The resource name of the Monitoring Metrics Scope.\n On input, the resource name can be specified with the scoping project\n ID or number. On output, the resource name is specified with the scoping\n project number. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}`'\n x-kubernetes-immutable: true\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time when this `Metrics Scope` record was\n last updated.\n x-kubernetes-immutable: true\n") - -// 3179 bytes -// MD5: be6bbb3b86b646949a9e2c5d6f16127c diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project.go deleted file mode 100644 index 976a04a2ec..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "fmt" - "time" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type MonitoredProject struct { - Name *string `json:"name"` - CreateTime *string `json:"createTime"` - MetricsScope *string `json:"metricsScope"` -} - -func (r *MonitoredProject) String() string { - return dcl.SprintResource(r) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *MonitoredProject) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "MonitoredProject", - Version: "monitoring", - } -} - -func (r *MonitoredProject) ID() (string, error) { - if err := extractMonitoredProjectFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "metrics_scope": dcl.ValueOrEmptyString(nr.MetricsScope), - } - return dcl.Nprintf("locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}", params), nil -} - -const MonitoredProjectMaxPage = -1 - -type MonitoredProjectList struct { - Items []*MonitoredProject - - nextToken string - - pageSize int32 - - resource *MonitoredProject -} - -func (l *MonitoredProjectList) HasNext() bool { - return l.nextToken != "" -} - -func (l *MonitoredProjectList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listMonitoredProject(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListMonitoredProject(ctx context.Context, metricsScope string) (*MonitoredProjectList, error) { - ctx = dcl.ContextWithRequestID(ctx) - c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ - 403: dcl.Retryability{ - Retryable: true, - Pattern: "The caller does not have permission", - Timeout: 120000000000, - }, - }))) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListMonitoredProjectWithMaxResults(ctx, metricsScope, MonitoredProjectMaxPage) - -} - -func (c *Client) ListMonitoredProjectWithMaxResults(ctx context.Context, metricsScope string, pageSize int32) (*MonitoredProjectList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &MonitoredProject{ - MetricsScope: &metricsScope, - } - items, token, err := c.listMonitoredProject(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &MonitoredProjectList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) DeleteMonitoredProject(ctx context.Context, r *MonitoredProject) error { - ctx = dcl.ContextWithRequestID(ctx) - c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ - 403: dcl.Retryability{ - Retryable: true, - Pattern: "The caller does not have permission", - Timeout: 120000000000, - }, - }))) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("MonitoredProject resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting MonitoredProject...") - deleteOp := deleteMonitoredProjectOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllMonitoredProject deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllMonitoredProject(ctx context.Context, metricsScope string, filter func(*MonitoredProject) bool) error { - listObj, err := c.ListMonitoredProject(ctx, metricsScope) - if err != nil { - return err - } - - err = c.deleteAllMonitoredProject(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllMonitoredProject(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyMonitoredProject(ctx context.Context, rawDesired *MonitoredProject, opts ...dcl.ApplyOption) (*MonitoredProject, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - c = NewClient(c.Config.Clone(dcl.WithCodeRetryability(map[int]dcl.Retryability{ - 403: dcl.Retryability{ - Retryable: true, - Pattern: "The caller does not have permission", - Timeout: 120000000000, - }, - }))) - var resultNewState *MonitoredProject - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyMonitoredProjectHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyMonitoredProjectHelper(c *Client, ctx context.Context, rawDesired *MonitoredProject, opts ...dcl.ApplyOption) (*MonitoredProject, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyMonitoredProject...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractMonitoredProjectFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.monitoredProjectDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToMonitoredProjectDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []monitoredProjectApiOperation - if create { - ops = append(ops, &createMonitoredProjectOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyMonitoredProjectDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyMonitoredProjectDiff(c *Client, ctx context.Context, desired *MonitoredProject, rawDesired *MonitoredProject, ops []monitoredProjectApiOperation, opts ...dcl.ApplyOption) (*MonitoredProject, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetMonitoredProject(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createMonitoredProjectOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapMonitoredProject(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeMonitoredProjectNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeMonitoredProjectNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeMonitoredProjectDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractMonitoredProjectFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractMonitoredProjectFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffMonitoredProject(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project.yaml deleted file mode 100644 index 38406941c1..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project.yaml +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/MonitoredProject - description: Monitored Project allows you to set a project as monitored by a _metrics - scope_, which is a term for a project used to group the metrics of multiple projects, - potentially across multiple organizations. This enables you to view these groups - in the Monitoring page of the cloud console. - x-dcl-struct-name: MonitoredProject - x-dcl-has-iam: false - x-dcl-ref: - text: REST API - url: https://cloud.google.com/monitoring/api/ref_v3/rest/v1/locations.global.metricsScopes - x-dcl-guides: - - text: Understanding metrics scopes - url: https://cloud.google.com/monitoring/settings#concept-scope - - text: API notes - url: https://cloud.google.com/monitoring/settings/manage-api -paths: - get: - description: The function used to get information about a MonitoredProject - parameters: - - name: monitoredProject - required: true - description: A full instance of a MonitoredProject - apply: - description: The function used to apply information about a MonitoredProject - parameters: - - name: monitoredProject - required: true - description: A full instance of a MonitoredProject - delete: - description: The function used to delete a MonitoredProject - parameters: - - name: monitoredProject - required: true - description: A full instance of a MonitoredProject - deleteAll: - description: The function used to delete all MonitoredProject - parameters: - - name: metricsScope - required: true - schema: - type: string - list: - description: The function used to list information about many MonitoredProject - parameters: - - name: metricsScope - required: true - schema: - type: string -components: - schemas: - MonitoredProject: - title: MonitoredProject - x-dcl-id: locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}} - x-dcl-locations: - - global - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - metricsScope - properties: - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Output only. The time when this `MonitoredProject` was created. - x-kubernetes-immutable: true - metricsScope: - type: string - x-dcl-go-name: MetricsScope - description: 'Required. The resource name of the existing Metrics Scope - that will monitor this project. Example: locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}' - x-kubernetes-immutable: true - name: - type: string - x-dcl-go-name: Name - description: 'Immutable. The resource name of the `MonitoredProject`. On - input, the resource name includes the scoping project ID and monitored - project ID. On output, it contains the equivalent project numbers. Example: - `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}`' - x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_internal.go deleted file mode 100644 index d1e0f0db0b..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_internal.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations" -) - -func (r *MonitoredProject) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.MetricsScope, "MetricsScope"); err != nil { - return err - } - return nil -} -func (r *MonitoredProject) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v1/", params) -} - -func (r *MonitoredProject) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "metricsScope": dcl.ValueOrEmptyString(nr.MetricsScope), - } - return dcl.URL("locations/global/metricsScopes/{{metricsScope}}", nr.basePath(), userBasePath, params), nil -} - -func (r *MonitoredProject) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "metricsScope": dcl.ValueOrEmptyString(nr.MetricsScope), - } - return dcl.URL("locations/global/metricsScopes/{{metricsScope}}", nr.basePath(), userBasePath, params), nil - -} - -func (r *MonitoredProject) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "metricsScope": dcl.ValueOrEmptyString(nr.MetricsScope), - } - return dcl.URL("locations/global/metricsScopes/{{metricsScope}}/projects", nr.basePath(), userBasePath, params), nil - -} - -func (r *MonitoredProject) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "metricsScope": dcl.ValueOrEmptyString(nr.MetricsScope), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("locations/global/metricsScopes/{{metricsScope}}/projects/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// monitoredProjectApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type monitoredProjectApiOperation interface { - do(context.Context, *MonitoredProject, *Client) error -} - -func (c *Client) listMonitoredProjectRaw(ctx context.Context, r *MonitoredProject, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != MonitoredProjectMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listMonitoredProjectOperation struct { - MonitoredProjects []map[string]interface{} `json:"monitoredProjects"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listMonitoredProject(ctx context.Context, r *MonitoredProject, pageToken string, pageSize int32) ([]*MonitoredProject, string, error) { - b, err := c.listMonitoredProjectRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listMonitoredProjectOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*MonitoredProject - for _, v := range m.MonitoredProjects { - res, err := unmarshalMapMonitoredProject(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.MetricsScope = r.MetricsScope - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllMonitoredProject(ctx context.Context, f func(*MonitoredProject) bool, resources []*MonitoredProject) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteMonitoredProject(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteMonitoredProjectOperation struct{} - -func (op *deleteMonitoredProjectOperation) do(ctx context.Context, r *MonitoredProject, c *Client) error { - r, err := c.GetMonitoredProject(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "MonitoredProject not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetMonitoredProject checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return err - } - - // wait for object to be deleted. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - return err - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetMonitoredProject(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createMonitoredProjectOperation struct { - response map[string]interface{} -} - -func (op *createMonitoredProjectOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createMonitoredProjectOperation) do(ctx context.Context, r *MonitoredProject, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - // wait for object to be created. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) - return err - } - c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") - op.response, _ = o.FirstResponse() - - if _, err := c.GetMonitoredProject(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) monitoredProjectDiffsForRawDesired(ctx context.Context, rawDesired *MonitoredProject, opts ...dcl.ApplyOption) (initial, desired *MonitoredProject, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *MonitoredProject - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*MonitoredProject); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected MonitoredProject, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetMonitoredProject(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a MonitoredProject resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve MonitoredProject resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that MonitoredProject resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeMonitoredProjectDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for MonitoredProject: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for MonitoredProject: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractMonitoredProjectFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeMonitoredProjectInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for MonitoredProject: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeMonitoredProjectDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for MonitoredProject: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffMonitoredProject(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeMonitoredProjectInitialState(rawInitial, rawDesired *MonitoredProject) (*MonitoredProject, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeMonitoredProjectDesiredState(rawDesired, rawInitial *MonitoredProject, opts ...dcl.ApplyOption) (*MonitoredProject, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - - return rawDesired, nil - } - canonicalDesired := &MonitoredProject{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.NameToSelfLink(rawDesired.MetricsScope, rawInitial.MetricsScope) { - canonicalDesired.MetricsScope = rawInitial.MetricsScope - } else { - canonicalDesired.MetricsScope = rawDesired.MetricsScope - } - return canonicalDesired, nil -} - -func canonicalizeMonitoredProjectNewState(c *Client, rawNew, rawDesired *MonitoredProject) (*MonitoredProject, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - rawNew.MetricsScope = rawDesired.MetricsScope - - return rawNew, nil -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffMonitoredProject(c *Client, desired, actual *MonitoredProject, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.MetricsScope, actual.MetricsScope, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MetricsScope")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *MonitoredProject) urlNormalized() *MonitoredProject { - normalized := dcl.Copy(*r).(MonitoredProject) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.MetricsScope = dcl.SelfLinkToName(r.MetricsScope) - return &normalized -} - -func (r *MonitoredProject) updateURL(userBasePath, updateName string) (string, error) { - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the MonitoredProject resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *MonitoredProject) marshal(c *Client) ([]byte, error) { - m, err := expandMonitoredProject(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling MonitoredProject: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalMonitoredProject decodes JSON responses into the MonitoredProject resource schema. -func unmarshalMonitoredProject(b []byte, c *Client, res *MonitoredProject) (*MonitoredProject, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapMonitoredProject(m, c, res) -} - -func unmarshalMapMonitoredProject(m map[string]interface{}, c *Client, res *MonitoredProject) (*MonitoredProject, error) { - - flattened := flattenMonitoredProject(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandMonitoredProject expands MonitoredProject into a JSON request object. -func expandMonitoredProject(c *Client, f *MonitoredProject) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("locations/global/metricsScopes/%s/projects/%s", f.Name, dcl.SelfLinkToName(f.MetricsScope), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding MetricsScope into metricsScope: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["metricsScope"] = v - } - - return m, nil -} - -// flattenMonitoredProject flattens MonitoredProject from a JSON request object into the -// MonitoredProject type. -func flattenMonitoredProject(c *Client, i interface{}, res *MonitoredProject) *MonitoredProject { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &MonitoredProject{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.MetricsScope = dcl.FlattenString(m["metricsScope"]) - - return resultRes -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *MonitoredProject) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalMonitoredProject(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.MetricsScope == nil && ncr.MetricsScope == nil { - c.Config.Logger.Info("Both MetricsScope fields null - considering equal.") - } else if nr.MetricsScope == nil || ncr.MetricsScope == nil { - c.Config.Logger.Info("Only one MetricsScope field is null - considering unequal.") - return false - } else if *nr.MetricsScope != *ncr.MetricsScope { - return false - } - return true - } -} - -type monitoredProjectDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp monitoredProjectApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToMonitoredProjectDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]monitoredProjectDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []monitoredProjectDiff - // For each operation name, create a monitoredProjectDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := monitoredProjectDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToMonitoredProjectApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToMonitoredProjectApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (monitoredProjectApiOperation, error) { - switch opName { - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractMonitoredProjectFields(r *MonitoredProject) error { - return nil -} - -func postReadExtractMonitoredProjectFields(r *MonitoredProject) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_schema.go deleted file mode 100644 index c9343a8b7b..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_schema.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLMonitoredProjectSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/MonitoredProject", - Description: "Monitored Project allows you to set a project as monitored by a _metrics scope_, which is a term for a project used to group the metrics of multiple projects, potentially across multiple organizations. This enables you to view these groups in the Monitoring page of the cloud console.", - StructName: "MonitoredProject", - Reference: &dcl.Link{ - Text: "REST API", - URL: "https://cloud.google.com/monitoring/api/ref_v3/rest/v1/locations.global.metricsScopes", - }, - Guides: []*dcl.Link{ - &dcl.Link{ - Text: "Understanding metrics scopes", - URL: "https://cloud.google.com/monitoring/settings#concept-scope", - }, - &dcl.Link{ - Text: "API notes", - URL: "https://cloud.google.com/monitoring/settings/manage-api", - }, - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a MonitoredProject", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "monitoredProject", - Required: true, - Description: "A full instance of a MonitoredProject", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a MonitoredProject", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "monitoredProject", - Required: true, - Description: "A full instance of a MonitoredProject", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a MonitoredProject", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "monitoredProject", - Required: true, - Description: "A full instance of a MonitoredProject", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all MonitoredProject", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "metricsScope", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many MonitoredProject", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "metricsScope", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "MonitoredProject": &dcl.Component{ - Title: "MonitoredProject", - ID: "locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}", - Locations: []string{ - "global", - }, - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "metricsScope", - }, - Properties: map[string]*dcl.Property{ - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Output only. The time when this `MonitoredProject` was created.", - Immutable: true, - }, - "metricsScope": &dcl.Property{ - Type: "string", - GoName: "MetricsScope", - Description: "Required. The resource name of the existing Metrics Scope that will monitor this project. Example: locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}", - Immutable: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Immutable. The resource name of the `MonitoredProject`. On input, the resource name includes the scoping project ID and monitored project ID. On output, it contains the equivalent project numbers. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}`", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_yaml_embed.go deleted file mode 100644 index f29e39fdf4..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitored_project_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_monitored_project blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/monitored_project.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/monitored_project.yaml -var YAML_monitored_project = []byte("info:\n title: Monitoring/MonitoredProject\n description: Monitored Project allows you to set a project as monitored by a _metrics\n scope_, which is a term for a project used to group the metrics of multiple projects,\n potentially across multiple organizations. This enables you to view these groups\n in the Monitoring page of the cloud console.\n x-dcl-struct-name: MonitoredProject\n x-dcl-has-iam: false\n x-dcl-ref:\n text: REST API\n url: https://cloud.google.com/monitoring/api/ref_v3/rest/v1/locations.global.metricsScopes\n x-dcl-guides:\n - text: Understanding metrics scopes\n url: https://cloud.google.com/monitoring/settings#concept-scope\n - text: API notes\n url: https://cloud.google.com/monitoring/settings/manage-api\npaths:\n get:\n description: The function used to get information about a MonitoredProject\n parameters:\n - name: monitoredProject\n required: true\n description: A full instance of a MonitoredProject\n apply:\n description: The function used to apply information about a MonitoredProject\n parameters:\n - name: monitoredProject\n required: true\n description: A full instance of a MonitoredProject\n delete:\n description: The function used to delete a MonitoredProject\n parameters:\n - name: monitoredProject\n required: true\n description: A full instance of a MonitoredProject\n deleteAll:\n description: The function used to delete all MonitoredProject\n parameters:\n - name: metricsScope\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many MonitoredProject\n parameters:\n - name: metricsScope\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n MonitoredProject:\n title: MonitoredProject\n x-dcl-id: locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}\n x-dcl-locations:\n - global\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - metricsScope\n properties:\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time when this `MonitoredProject` was created.\n x-kubernetes-immutable: true\n metricsScope:\n type: string\n x-dcl-go-name: MetricsScope\n description: 'Required. The resource name of the existing Metrics Scope\n that will monitor this project. Example: locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}'\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Immutable. The resource name of the `MonitoredProject`. On\n input, the resource name includes the scoping project ID and monitored\n project ID. On output, it contains the equivalent project numbers. Example:\n `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}`'\n x-kubernetes-immutable: true\n") - -// 3225 bytes -// MD5: a2568c724b961c0d484ec49d4439ae8b diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitoring_utils.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitoring_utils.go deleted file mode 100644 index 497ca8a034..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/monitoring_utils.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Package monitoring provides methods and types for managing monitoring GCP resources. -package monitoring - -import ( - "bytes" - "context" - "io/ioutil" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager" -) - -func equalsMetricDescriptorValueType(m, n *MetricDescriptorValueTypeEnum) bool { - mStr := dcl.ValueOrEmptyString(m) - if mStr == "" { - mStr = "STRING" - } - nStr := dcl.ValueOrEmptyString(n) - if nStr == "" { - nStr = "STRING" - } - return mStr == nStr -} - -func equalsMetricDescriptorLabelsValueType(m, n *MetricDescriptorLabelsValueTypeEnum) bool { - mStr := dcl.ValueOrEmptyString(m) - if mStr == "" { - mStr = "STRING" - } - nStr := dcl.ValueOrEmptyString(n) - if nStr == "" { - nStr = "STRING" - } - return mStr == nStr -} - -func canonicalizeMetricDescriptorValueType(m, n interface{}) bool { - if m == nil && n == nil { - return true - } - - mVal, _ := m.(*MetricDescriptorValueTypeEnum) - nVal, _ := n.(*MetricDescriptorValueTypeEnum) - return equalsMetricDescriptorValueType(mVal, nVal) -} - -func canonicalizeMetricDescriptorLabelsValueType(m, n interface{}) bool { - if m == nil && n == nil { - return true - } - - mVal, _ := m.(*MetricDescriptorLabelsValueTypeEnum) - nVal, _ := n.(*MetricDescriptorLabelsValueTypeEnum) - return equalsMetricDescriptorLabelsValueType(mVal, nVal) -} - -// GetMonitoredProject is a custom method because projects are returned as project numbers instead of project ids. -func (c *Client) GetMonitoredProject(ctx context.Context, r *MonitoredProject) (*MonitoredProject, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - b, err := c.getMonitoredProjectRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalMonitoredProject(b, c, r) - if err != nil { - return nil, err - } - result.MetricsScope = r.MetricsScope - result.Name = r.Name - - c.Config.Logger.Infof("Retrieved raw result state: %v", result) - c.Config.Logger.Infof("Canonicalizing with specified state: %v", r) - result, err = canonicalizeMonitoredProjectNewState(c, result, r) - if err != nil { - return nil, err - } - c.Config.Logger.Infof("Created result state: %v", result) - - return result, nil -} - -func (c *Client) getMonitoredProjectRaw(ctx context.Context, r *MonitoredProject) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - b, err = dcl.ExtractElementFromList(b, "monitoredProjects", r.customMatcher(ctx, c)) - if err != nil { - return nil, err - } - return b, nil -} - -// This resource has a custom matcher to do a lookup and convert between project ids and project numbers. -func (r *MonitoredProject) customMatcher(ctx context.Context, c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalMonitoredProject(b, c, r) - if err != nil { - c.Config.Logger.Warning("Failed to unmarshal provided resource in matcher.") - return false - } - // URL Normalize both resources to compare only the short names. - nr := r.urlNormalized() - ncr := cr.urlNormalized() - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - return true - } - if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } - // Create a client with an empty base path so that it doesn't inherit the base path from the - // monitoring client. - cloudresourcemanagerCl := cloudresourcemanager.NewClient(c.Config.Clone(dcl.WithBasePath(""))) - project, err := cloudresourcemanagerCl.GetProject(ctx, &cloudresourcemanager.Project{ - Name: nr.Name, - }) - if err != nil { - c.Config.Logger.Warningf("Could not look up project %s", *nr.Name) - return false - } - projectNumber := dcl.ValueOrEmptyString(project.ProjectNumber) - c.Config.Logger.Infof("Attempting to match %v with %v.", projectNumber, ncr.Name) - return projectNumber == *ncr.Name - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.go deleted file mode 100644 index c5e724180f..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type NotificationChannel struct { - Description *string `json:"description"` - DisplayName *string `json:"displayName"` - Enabled *bool `json:"enabled"` - Labels map[string]string `json:"labels"` - Name *string `json:"name"` - Type *string `json:"type"` - UserLabels map[string]string `json:"userLabels"` - VerificationStatus *NotificationChannelVerificationStatusEnum `json:"verificationStatus"` - Project *string `json:"project"` -} - -func (r *NotificationChannel) String() string { - return dcl.SprintResource(r) -} - -// The enum NotificationChannelVerificationStatusEnum. -type NotificationChannelVerificationStatusEnum string - -// NotificationChannelVerificationStatusEnumRef returns a *NotificationChannelVerificationStatusEnum with the value of string s -// If the empty string is provided, nil is returned. -func NotificationChannelVerificationStatusEnumRef(s string) *NotificationChannelVerificationStatusEnum { - v := NotificationChannelVerificationStatusEnum(s) - return &v -} - -func (v NotificationChannelVerificationStatusEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"VERIFICATION_STATUS_UNSPECIFIED", "UNVERIFIED", "VERIFIED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "NotificationChannelVerificationStatusEnum", - Value: string(v), - Valid: []string{}, - } -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *NotificationChannel) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "NotificationChannel", - Version: "monitoring", - } -} - -func (r *NotificationChannel) ID() (string, error) { - if err := extractNotificationChannelFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "description": dcl.ValueOrEmptyString(nr.Description), - "display_name": dcl.ValueOrEmptyString(nr.DisplayName), - "enabled": dcl.ValueOrEmptyString(nr.Enabled), - "labels": dcl.ValueOrEmptyString(nr.Labels), - "name": dcl.ValueOrEmptyString(nr.Name), - "type": dcl.ValueOrEmptyString(nr.Type), - "user_labels": dcl.ValueOrEmptyString(nr.UserLabels), - "verification_status": dcl.ValueOrEmptyString(nr.VerificationStatus), - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.Nprintf("projects/{{project}}/notificationChannels/{{name}}", params), nil -} - -const NotificationChannelMaxPage = -1 - -type NotificationChannelList struct { - Items []*NotificationChannel - - nextToken string - - pageSize int32 - - resource *NotificationChannel -} - -func (l *NotificationChannelList) HasNext() bool { - return l.nextToken != "" -} - -func (l *NotificationChannelList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listNotificationChannel(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListNotificationChannel(ctx context.Context, project string) (*NotificationChannelList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListNotificationChannelWithMaxResults(ctx, project, NotificationChannelMaxPage) - -} - -func (c *Client) ListNotificationChannelWithMaxResults(ctx context.Context, project string, pageSize int32) (*NotificationChannelList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &NotificationChannel{ - Project: &project, - } - items, token, err := c.listNotificationChannel(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &NotificationChannelList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetNotificationChannel(ctx context.Context, r *NotificationChannel) (*NotificationChannel, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractNotificationChannelFields(r) - - b, err := c.getNotificationChannelRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalNotificationChannel(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Name = r.Name - if dcl.IsZeroValue(result.Enabled) { - result.Enabled = dcl.Bool(true) - } - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeNotificationChannelNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractNotificationChannelFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteNotificationChannel(ctx context.Context, r *NotificationChannel) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("NotificationChannel resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting NotificationChannel...") - deleteOp := deleteNotificationChannelOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllNotificationChannel deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllNotificationChannel(ctx context.Context, project string, filter func(*NotificationChannel) bool) error { - listObj, err := c.ListNotificationChannel(ctx, project) - if err != nil { - return err - } - - err = c.deleteAllNotificationChannel(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllNotificationChannel(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyNotificationChannel(ctx context.Context, rawDesired *NotificationChannel, opts ...dcl.ApplyOption) (*NotificationChannel, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *NotificationChannel - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyNotificationChannelHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyNotificationChannelHelper(c *Client, ctx context.Context, rawDesired *NotificationChannel, opts ...dcl.ApplyOption) (*NotificationChannel, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyNotificationChannel...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractNotificationChannelFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.notificationChannelDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToNotificationChannelDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []notificationChannelApiOperation - if create { - ops = append(ops, &createNotificationChannelOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyNotificationChannelDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyNotificationChannelDiff(c *Client, ctx context.Context, desired *NotificationChannel, rawDesired *NotificationChannel, ops []notificationChannelApiOperation, opts ...dcl.ApplyOption) (*NotificationChannel, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetNotificationChannel(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createNotificationChannelOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapNotificationChannel(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeNotificationChannelNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeNotificationChannelNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeNotificationChannelDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractNotificationChannelFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractNotificationChannelFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffNotificationChannel(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.yaml deleted file mode 100644 index 9e0b4c0185..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel.yaml +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/NotificationChannel - description: The Monitoring NotificationChannel resource - x-dcl-struct-name: NotificationChannel - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a NotificationChannel - parameters: - - name: notificationChannel - required: true - description: A full instance of a NotificationChannel - apply: - description: The function used to apply information about a NotificationChannel - parameters: - - name: notificationChannel - required: true - description: A full instance of a NotificationChannel - delete: - description: The function used to delete a NotificationChannel - parameters: - - name: notificationChannel - required: true - description: A full instance of a NotificationChannel - deleteAll: - description: The function used to delete all NotificationChannel - parameters: - - name: project - required: true - schema: - type: string - list: - description: The function used to list information about many NotificationChannel - parameters: - - name: project - required: true - schema: - type: string -components: - schemas: - NotificationChannel: - title: NotificationChannel - x-dcl-id: projects/{{project}}/notificationChannels/{{name}} - x-dcl-parent-container: project - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - properties: - description: - type: string - x-dcl-go-name: Description - description: An optional human-readable description of this notification - channel. This description may provide additional details, beyond the display - name, for the channel. This may not exceed 1024 Unicode characters. - displayName: - type: string - x-dcl-go-name: DisplayName - description: An optional human-readable name for this notification channel. - It is recommended that you specify a non-empty and unique name in order - to make it easier to identify the channels in your project, though this - is not enforced. The display name is limited to 512 Unicode characters. - enabled: - type: boolean - x-dcl-go-name: Enabled - description: Whether notifications are forwarded to the described channel. - This makes it possible to disable delivery of notifications to a particular - channel without removing the channel from all alerting policies that reference - the channel. This is a more convenient approach when the change is temporary - and you want to receive notifications from the same set of alerting policies - on the channel at some point in the future. - default: true - labels: - type: object - additionalProperties: - type: string - x-dcl-go-name: Labels - description: Configuration fields that define the channel and its behavior. - The permissible and required labels are specified in the [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] - of the `NotificationChannelDescriptor` corresponding to the `type` field. - name: - type: string - x-dcl-go-name: Name - description: 'The full REST resource name for this channel. The format is: - projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] The - `[CHANNEL_ID]` is automatically assigned by the server on creation.' - x-kubernetes-immutable: true - x-dcl-server-generated-parameter: true - project: - type: string - x-dcl-go-name: Project - description: The project for this notification channel. - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - type: - type: string - x-dcl-go-name: Type - description: The type of the notification channel. This field matches the - value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] - field. - userLabels: - type: object - additionalProperties: - type: string - x-dcl-go-name: UserLabels - description: User-supplied key/value data that does not need to conform - to the corresponding `NotificationChannelDescriptor`'s schema, unlike - the `labels` field. This field is intended to be used for orv3nizing and - identifying the `NotificationChannel` objects. The field can contain up - to 64 entries. Each key and value is limited to 63 Unicode characters - or 128 bytes, whichever is smaller. Labels and values can contain only - lowercase letters, numerals, underscores, and dashes. Keys must begin - with a letter. - verificationStatus: - type: string - x-dcl-go-name: VerificationStatus - x-dcl-go-type: NotificationChannelVerificationStatusEnum - readOnly: true - description: 'Indicates whether this channel has been verified or not. On - a [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] - or [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] - operation, this field is expected to be populated. If the value is `UNVERIFIED`, - then it indicates that the channel is non-functioning (it both requires - verification and lacks verification); otherwise, it is assumed that the - channel works. If the channel is neither `VERIFIED` nor `UNVERIFIED`, - it implies that the channel is of a type that does not require verification - or that this specific channel has been exempted from verification because - it was created prior to verification being required for channels of this - type. This field cannot be modified using a standard [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] - operation. To change the value of this field, you must call [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. - Possible values: VERIFICATION_STATUS_UNSPECIFIED, UNVERIFIED, VERIFIED' - x-kubernetes-immutable: true - enum: - - VERIFICATION_STATUS_UNSPECIFIED - - UNVERIFIED - - VERIFIED diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_internal.go deleted file mode 100644 index 151a7e1525..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_internal.go +++ /dev/null @@ -1,889 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *NotificationChannel) validate() error { - - return nil -} -func (r *NotificationChannel) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v3/", params) -} - -func (r *NotificationChannel) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/notificationChannels/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *NotificationChannel) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/notificationChannels", nr.basePath(), userBasePath, params), nil - -} - -func (r *NotificationChannel) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/notificationChannels", nr.basePath(), userBasePath, params), nil - -} - -func (r *NotificationChannel) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/notificationChannels/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// notificationChannelApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type notificationChannelApiOperation interface { - do(context.Context, *NotificationChannel, *Client) error -} - -// newUpdateNotificationChannelUpdateRequest creates a request for an -// NotificationChannel resource's update update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateNotificationChannelUpdateRequest(ctx context.Context, f *NotificationChannel, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - req["displayName"] = v - } - if v := f.Enabled; !dcl.IsEmptyValueIndirect(v) { - req["enabled"] = v - } - if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { - req["labels"] = v - } - if v := f.Type; !dcl.IsEmptyValueIndirect(v) { - req["type"] = v - } - if v := f.UserLabels; !dcl.IsEmptyValueIndirect(v) { - req["userLabels"] = v - } - req["name"] = fmt.Sprintf("projects/%s/notificationChannels/%s", *f.Project, *f.Name) - - return req, nil -} - -// marshalUpdateNotificationChannelUpdateRequest converts the update into -// the final JSON request body. -func marshalUpdateNotificationChannelUpdateRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateNotificationChannelUpdateOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateNotificationChannelUpdateOperation) do(ctx context.Context, r *NotificationChannel, c *Client) error { - _, err := c.GetNotificationChannel(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "update") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateNotificationChannelUpdateRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateNotificationChannelUpdateRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listNotificationChannelRaw(ctx context.Context, r *NotificationChannel, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != NotificationChannelMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listNotificationChannelOperation struct { - NotificationChannels []map[string]interface{} `json:"notificationChannels"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listNotificationChannel(ctx context.Context, r *NotificationChannel, pageToken string, pageSize int32) ([]*NotificationChannel, string, error) { - b, err := c.listNotificationChannelRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listNotificationChannelOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*NotificationChannel - for _, v := range m.NotificationChannels { - res, err := unmarshalMapNotificationChannel(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllNotificationChannel(ctx context.Context, f func(*NotificationChannel) bool, resources []*NotificationChannel) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteNotificationChannel(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteNotificationChannelOperation struct{} - -func (op *deleteNotificationChannelOperation) do(ctx context.Context, r *NotificationChannel, c *Client) error { - r, err := c.GetNotificationChannel(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "NotificationChannel not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetNotificationChannel checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete NotificationChannel: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetNotificationChannel(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createNotificationChannelOperation struct { - response map[string]interface{} -} - -func (op *createNotificationChannelOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createNotificationChannelOperation) do(ctx context.Context, r *NotificationChannel, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - if r.Name != nil { - // Allowing creation to continue with Name set could result in a NotificationChannel with the wrong Name. - return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - // Include Name in URL substitution for initial GET request. - m := op.response - r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) - - if _, err := c.GetNotificationChannel(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getNotificationChannelRaw(ctx context.Context, r *NotificationChannel) ([]byte, error) { - if dcl.IsZeroValue(r.Enabled) { - r.Enabled = dcl.Bool(true) - } - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) notificationChannelDiffsForRawDesired(ctx context.Context, rawDesired *NotificationChannel, opts ...dcl.ApplyOption) (initial, desired *NotificationChannel, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *NotificationChannel - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*NotificationChannel); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected NotificationChannel, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - if fetchState.Name == nil { - // We cannot perform a get because of lack of information. We have to assume - // that this is being created for the first time. - desired, err := canonicalizeNotificationChannelDesiredState(rawDesired, nil) - return nil, desired, nil, err - } - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetNotificationChannel(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a NotificationChannel resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve NotificationChannel resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that NotificationChannel resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeNotificationChannelDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for NotificationChannel: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for NotificationChannel: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractNotificationChannelFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeNotificationChannelInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for NotificationChannel: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeNotificationChannelDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for NotificationChannel: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffNotificationChannel(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeNotificationChannelInitialState(rawInitial, rawDesired *NotificationChannel) (*NotificationChannel, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeNotificationChannelDesiredState(rawDesired, rawInitial *NotificationChannel, opts ...dcl.ApplyOption) (*NotificationChannel, error) { - - if dcl.IsZeroValue(rawDesired.Enabled) { - rawDesired.Enabled = dcl.Bool(true) - } - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - - return rawDesired, nil - } - canonicalDesired := &NotificationChannel{} - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { - canonicalDesired.DisplayName = rawInitial.DisplayName - } else { - canonicalDesired.DisplayName = rawDesired.DisplayName - } - if dcl.BoolCanonicalize(rawDesired.Enabled, rawInitial.Enabled) { - canonicalDesired.Enabled = rawInitial.Enabled - } else { - canonicalDesired.Enabled = rawDesired.Enabled - } - if dcl.IsZeroValue(rawDesired.Labels) || (dcl.IsEmptyValueIndirect(rawDesired.Labels) && dcl.IsEmptyValueIndirect(rawInitial.Labels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Labels = rawInitial.Labels - } else { - canonicalDesired.Labels = rawDesired.Labels - } - if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.Type, rawInitial.Type) { - canonicalDesired.Type = rawInitial.Type - } else { - canonicalDesired.Type = rawDesired.Type - } - if dcl.IsZeroValue(rawDesired.UserLabels) || (dcl.IsEmptyValueIndirect(rawDesired.UserLabels) && dcl.IsEmptyValueIndirect(rawInitial.UserLabels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.UserLabels = rawInitial.UserLabels - } else { - canonicalDesired.UserLabels = rawDesired.UserLabels - } - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - return canonicalDesired, nil -} - -func canonicalizeNotificationChannelNewState(c *Client, rawNew, rawDesired *NotificationChannel) (*NotificationChannel, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } else { - if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Enabled) && dcl.IsEmptyValueIndirect(rawDesired.Enabled) { - rawNew.Enabled = rawDesired.Enabled - } else { - if dcl.BoolCanonicalize(rawDesired.Enabled, rawNew.Enabled) { - rawNew.Enabled = rawDesired.Enabled - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Labels) && dcl.IsEmptyValueIndirect(rawDesired.Labels) { - rawNew.Labels = rawDesired.Labels - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Type) && dcl.IsEmptyValueIndirect(rawDesired.Type) { - rawNew.Type = rawDesired.Type - } else { - if dcl.StringCanonicalize(rawDesired.Type, rawNew.Type) { - rawNew.Type = rawDesired.Type - } - } - - if dcl.IsEmptyValueIndirect(rawNew.UserLabels) && dcl.IsEmptyValueIndirect(rawDesired.UserLabels) { - rawNew.UserLabels = rawDesired.UserLabels - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.VerificationStatus) && dcl.IsEmptyValueIndirect(rawDesired.VerificationStatus) { - rawNew.VerificationStatus = rawDesired.VerificationStatus - } else { - } - - rawNew.Project = rawDesired.Project - - return rawNew, nil -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffNotificationChannel(c *Client, desired, actual *NotificationChannel, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNotificationChannelUpdateOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNotificationChannelUpdateOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Enabled, actual.Enabled, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNotificationChannelUpdateOperation")}, fn.AddNest("Enabled")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNotificationChannelUpdateOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNotificationChannelUpdateOperation")}, fn.AddNest("Type")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UserLabels, actual.UserLabels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateNotificationChannelUpdateOperation")}, fn.AddNest("UserLabels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.VerificationStatus, actual.VerificationStatus, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("VerificationStatus")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *NotificationChannel) urlNormalized() *NotificationChannel { - normalized := dcl.Copy(*r).(NotificationChannel) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Type = dcl.SelfLinkToName(r.Type) - normalized.Project = dcl.SelfLinkToName(r.Project) - return &normalized -} - -func (r *NotificationChannel) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "update" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/notificationChannels/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the NotificationChannel resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *NotificationChannel) marshal(c *Client) ([]byte, error) { - m, err := expandNotificationChannel(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling NotificationChannel: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalNotificationChannel decodes JSON responses into the NotificationChannel resource schema. -func unmarshalNotificationChannel(b []byte, c *Client, res *NotificationChannel) (*NotificationChannel, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapNotificationChannel(m, c, res) -} - -func unmarshalMapNotificationChannel(m map[string]interface{}, c *Client, res *NotificationChannel) (*NotificationChannel, error) { - - flattened := flattenNotificationChannel(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandNotificationChannel expands NotificationChannel into a JSON request object. -func expandNotificationChannel(c *Client, f *NotificationChannel) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v := f.DisplayName; dcl.ValueShouldBeSent(v) { - m["displayName"] = v - } - if v := f.Enabled; dcl.ValueShouldBeSent(v) { - m["enabled"] = v - } - if v := f.Labels; dcl.ValueShouldBeSent(v) { - m["labels"] = v - } - if v := f.Name; dcl.ValueShouldBeSent(v) { - m["name"] = v - } - if v := f.Type; dcl.ValueShouldBeSent(v) { - m["type"] = v - } - if v := f.UserLabels; dcl.ValueShouldBeSent(v) { - m["userLabels"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - - return m, nil -} - -// flattenNotificationChannel flattens NotificationChannel from a JSON request object into the -// NotificationChannel type. -func flattenNotificationChannel(c *Client, i interface{}, res *NotificationChannel) *NotificationChannel { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &NotificationChannel{} - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.DisplayName = dcl.FlattenString(m["displayName"]) - resultRes.Enabled = dcl.FlattenBool(m["enabled"]) - if _, ok := m["enabled"]; !ok { - c.Config.Logger.Info("Using default value for enabled") - resultRes.Enabled = dcl.Bool(true) - } - resultRes.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) - resultRes.Type = dcl.FlattenString(m["type"]) - resultRes.UserLabels = dcl.FlattenKeyValuePairs(m["userLabels"]) - resultRes.VerificationStatus = flattenNotificationChannelVerificationStatusEnum(m["verificationStatus"]) - resultRes.Project = dcl.FlattenString(m["project"]) - - return resultRes -} - -// flattenNotificationChannelVerificationStatusEnumMap flattens the contents of NotificationChannelVerificationStatusEnum from a JSON -// response object. -func flattenNotificationChannelVerificationStatusEnumMap(c *Client, i interface{}, res *NotificationChannel) map[string]NotificationChannelVerificationStatusEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]NotificationChannelVerificationStatusEnum{} - } - - if len(a) == 0 { - return map[string]NotificationChannelVerificationStatusEnum{} - } - - items := make(map[string]NotificationChannelVerificationStatusEnum) - for k, item := range a { - items[k] = *flattenNotificationChannelVerificationStatusEnum(item.(interface{})) - } - - return items -} - -// flattenNotificationChannelVerificationStatusEnumSlice flattens the contents of NotificationChannelVerificationStatusEnum from a JSON -// response object. -func flattenNotificationChannelVerificationStatusEnumSlice(c *Client, i interface{}, res *NotificationChannel) []NotificationChannelVerificationStatusEnum { - a, ok := i.([]interface{}) - if !ok { - return []NotificationChannelVerificationStatusEnum{} - } - - if len(a) == 0 { - return []NotificationChannelVerificationStatusEnum{} - } - - items := make([]NotificationChannelVerificationStatusEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenNotificationChannelVerificationStatusEnum(item.(interface{}))) - } - - return items -} - -// flattenNotificationChannelVerificationStatusEnum asserts that an interface is a string, and returns a -// pointer to a *NotificationChannelVerificationStatusEnum with the same value as that string. -func flattenNotificationChannelVerificationStatusEnum(i interface{}) *NotificationChannelVerificationStatusEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return NotificationChannelVerificationStatusEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *NotificationChannel) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalNotificationChannel(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type notificationChannelDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp notificationChannelApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToNotificationChannelDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]notificationChannelDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []notificationChannelDiff - // For each operation name, create a notificationChannelDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := notificationChannelDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToNotificationChannelApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToNotificationChannelApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (notificationChannelApiOperation, error) { - switch opName { - - case "updateNotificationChannelUpdateOperation": - return &updateNotificationChannelUpdateOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractNotificationChannelFields(r *NotificationChannel) error { - return nil -} - -func postReadExtractNotificationChannelFields(r *NotificationChannel) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_schema.go deleted file mode 100644 index e6fb7c3df9..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_schema.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLNotificationChannelSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/NotificationChannel", - Description: "The Monitoring NotificationChannel resource", - StructName: "NotificationChannel", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a NotificationChannel", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "notificationChannel", - Required: true, - Description: "A full instance of a NotificationChannel", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a NotificationChannel", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "notificationChannel", - Required: true, - Description: "A full instance of a NotificationChannel", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a NotificationChannel", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "notificationChannel", - Required: true, - Description: "A full instance of a NotificationChannel", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all NotificationChannel", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many NotificationChannel", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "NotificationChannel": &dcl.Component{ - Title: "NotificationChannel", - ID: "projects/{{project}}/notificationChannels/{{name}}", - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Properties: map[string]*dcl.Property{ - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "An optional human-readable description of this notification channel. This description may provide additional details, beyond the display name, for the channel. This may not exceed 1024 Unicode characters.", - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "An optional human-readable name for this notification channel. It is recommended that you specify a non-empty and unique name in order to make it easier to identify the channels in your project, though this is not enforced. The display name is limited to 512 Unicode characters.", - }, - "enabled": &dcl.Property{ - Type: "boolean", - GoName: "Enabled", - Description: "Whether notifications are forwarded to the described channel. This makes it possible to disable delivery of notifications to a particular channel without removing the channel from all alerting policies that reference the channel. This is a more convenient approach when the change is temporary and you want to receive notifications from the same set of alerting policies on the channel at some point in the future.", - Default: true, - }, - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Configuration fields that define the channel and its behavior. The permissible and required labels are specified in the [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the `NotificationChannelDescriptor` corresponding to the `type` field.", - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "The full REST resource name for this channel. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] The `[CHANNEL_ID]` is automatically assigned by the server on creation.", - Immutable: true, - ServerGeneratedParameter: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for this notification channel.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "type": &dcl.Property{ - Type: "string", - GoName: "Type", - Description: "The type of the notification channel. This field matches the value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field.", - }, - "userLabels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "UserLabels", - Description: "User-supplied key/value data that does not need to conform to the corresponding `NotificationChannelDescriptor`'s schema, unlike the `labels` field. This field is intended to be used for orv3nizing and identifying the `NotificationChannel` objects. The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.", - }, - "verificationStatus": &dcl.Property{ - Type: "string", - GoName: "VerificationStatus", - GoType: "NotificationChannelVerificationStatusEnum", - ReadOnly: true, - Description: "Indicates whether this channel has been verified or not. On a [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] or [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] operation, this field is expected to be populated. If the value is `UNVERIFIED`, then it indicates that the channel is non-functioning (it both requires verification and lacks verification); otherwise, it is assumed that the channel works. If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that the channel is of a type that does not require verification or that this specific channel has been exempted from verification because it was created prior to verification being required for channels of this type. This field cannot be modified using a standard [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] operation. To change the value of this field, you must call [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. Possible values: VERIFICATION_STATUS_UNSPECIFIED, UNVERIFIED, VERIFIED", - Immutable: true, - Enum: []string{ - "VERIFICATION_STATUS_UNSPECIFIED", - "UNVERIFIED", - "VERIFIED", - }, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_yaml_embed.go deleted file mode 100644 index bf959c7ef4..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/notification_channel_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_notification_channel blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/notification_channel.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/notification_channel.yaml -var YAML_notification_channel = []byte("info:\n title: Monitoring/NotificationChannel\n description: The Monitoring NotificationChannel resource\n x-dcl-struct-name: NotificationChannel\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a NotificationChannel\n parameters:\n - name: notificationChannel\n required: true\n description: A full instance of a NotificationChannel\n apply:\n description: The function used to apply information about a NotificationChannel\n parameters:\n - name: notificationChannel\n required: true\n description: A full instance of a NotificationChannel\n delete:\n description: The function used to delete a NotificationChannel\n parameters:\n - name: notificationChannel\n required: true\n description: A full instance of a NotificationChannel\n deleteAll:\n description: The function used to delete all NotificationChannel\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many NotificationChannel\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n NotificationChannel:\n title: NotificationChannel\n x-dcl-id: projects/{{project}}/notificationChannels/{{name}}\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: An optional human-readable description of this notification\n channel. This description may provide additional details, beyond the display\n name, for the channel. This may not exceed 1024 Unicode characters.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: An optional human-readable name for this notification channel.\n It is recommended that you specify a non-empty and unique name in order\n to make it easier to identify the channels in your project, though this\n is not enforced. The display name is limited to 512 Unicode characters.\n enabled:\n type: boolean\n x-dcl-go-name: Enabled\n description: Whether notifications are forwarded to the described channel.\n This makes it possible to disable delivery of notifications to a particular\n channel without removing the channel from all alerting policies that reference\n the channel. This is a more convenient approach when the change is temporary\n and you want to receive notifications from the same set of alerting policies\n on the channel at some point in the future.\n default: true\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Configuration fields that define the channel and its behavior.\n The permissible and required labels are specified in the [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels]\n of the `NotificationChannelDescriptor` corresponding to the `type` field.\n name:\n type: string\n x-dcl-go-name: Name\n description: 'The full REST resource name for this channel. The format is:\n projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] The\n `[CHANNEL_ID]` is automatically assigned by the server on creation.'\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for this notification channel.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n type:\n type: string\n x-dcl-go-name: Type\n description: The type of the notification channel. This field matches the\n value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type]\n field.\n userLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: UserLabels\n description: User-supplied key/value data that does not need to conform\n to the corresponding `NotificationChannelDescriptor`'s schema, unlike\n the `labels` field. This field is intended to be used for orv3nizing and\n identifying the `NotificationChannel` objects. The field can contain up\n to 64 entries. Each key and value is limited to 63 Unicode characters\n or 128 bytes, whichever is smaller. Labels and values can contain only\n lowercase letters, numerals, underscores, and dashes. Keys must begin\n with a letter.\n verificationStatus:\n type: string\n x-dcl-go-name: VerificationStatus\n x-dcl-go-type: NotificationChannelVerificationStatusEnum\n readOnly: true\n description: 'Indicates whether this channel has been verified or not. On\n a [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels]\n or [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]\n operation, this field is expected to be populated. If the value is `UNVERIFIED`,\n then it indicates that the channel is non-functioning (it both requires\n verification and lacks verification); otherwise, it is assumed that the\n channel works. If the channel is neither `VERIFIED` nor `UNVERIFIED`,\n it implies that the channel is of a type that does not require verification\n or that this specific channel has been exempted from verification because\n it was created prior to verification being required for channels of this\n type. This field cannot be modified using a standard [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]\n operation. To change the value of this field, you must call [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel].\n Possible values: VERIFICATION_STATUS_UNSPECIFIED, UNVERIFIED, VERIFIED'\n x-kubernetes-immutable: true\n enum:\n - VERIFICATION_STATUS_UNSPECIFIED\n - UNVERIFIED\n - VERIFIED\n") - -// 6783 bytes -// MD5: bec4a53064e865b4f3960c5be143e55f diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.go deleted file mode 100644 index 8db6c74af2..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.go +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type Service struct { - Name *string `json:"name"` - DisplayName *string `json:"displayName"` - Custom *ServiceCustom `json:"custom"` - Telemetry *ServiceTelemetry `json:"telemetry"` - UserLabels map[string]string `json:"userLabels"` - Project *string `json:"project"` -} - -func (r *Service) String() string { - return dcl.SprintResource(r) -} - -type ServiceCustom struct { - empty bool `json:"-"` -} - -type jsonServiceCustom ServiceCustom - -func (r *ServiceCustom) UnmarshalJSON(data []byte) error { - var res jsonServiceCustom - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceCustom - } else { - - } - return nil -} - -// This object is used to assert a desired state where this ServiceCustom is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceCustom *ServiceCustom = &ServiceCustom{empty: true} - -func (r *ServiceCustom) Empty() bool { - return r.empty -} - -func (r *ServiceCustom) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceCustom) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceTelemetry struct { - empty bool `json:"-"` - ResourceName *string `json:"resourceName"` -} - -type jsonServiceTelemetry ServiceTelemetry - -func (r *ServiceTelemetry) UnmarshalJSON(data []byte) error { - var res jsonServiceTelemetry - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceTelemetry - } else { - - r.ResourceName = res.ResourceName - - } - return nil -} - -// This object is used to assert a desired state where this ServiceTelemetry is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceTelemetry *ServiceTelemetry = &ServiceTelemetry{empty: true} - -func (r *ServiceTelemetry) Empty() bool { - return r.empty -} - -func (r *ServiceTelemetry) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceTelemetry) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *Service) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "Service", - Version: "monitoring", - } -} - -func (r *Service) ID() (string, error) { - if err := extractServiceFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "display_name": dcl.ValueOrEmptyString(nr.DisplayName), - "custom": dcl.ValueOrEmptyString(nr.Custom), - "telemetry": dcl.ValueOrEmptyString(nr.Telemetry), - "user_labels": dcl.ValueOrEmptyString(nr.UserLabels), - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.Nprintf("projects/{{project}}/services/{{name}}", params), nil -} - -const ServiceMaxPage = -1 - -type ServiceList struct { - Items []*Service - - nextToken string - - pageSize int32 - - resource *Service -} - -func (l *ServiceList) HasNext() bool { - return l.nextToken != "" -} - -func (l *ServiceList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listService(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListService(ctx context.Context, project string) (*ServiceList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListServiceWithMaxResults(ctx, project, ServiceMaxPage) - -} - -func (c *Client) ListServiceWithMaxResults(ctx context.Context, project string, pageSize int32) (*ServiceList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &Service{ - Project: &project, - } - items, token, err := c.listService(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &ServiceList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetService(ctx context.Context, r *Service) (*Service, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractServiceFields(r) - - b, err := c.getServiceRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalService(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Name = r.Name - if dcl.IsZeroValue(result.Custom) { - result.Custom = &ServiceCustom{} - } - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeServiceNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractServiceFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteService(ctx context.Context, r *Service) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("Service resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting Service...") - deleteOp := deleteServiceOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllService deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllService(ctx context.Context, project string, filter func(*Service) bool) error { - listObj, err := c.ListService(ctx, project) - if err != nil { - return err - } - - err = c.deleteAllService(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllService(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyService(ctx context.Context, rawDesired *Service, opts ...dcl.ApplyOption) (*Service, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *Service - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyServiceHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyServiceHelper(c *Client, ctx context.Context, rawDesired *Service, opts ...dcl.ApplyOption) (*Service, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyService...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractServiceFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.serviceDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToServiceDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []serviceApiOperation - if create { - ops = append(ops, &createServiceOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyServiceDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyServiceDiff(c *Client, ctx context.Context, desired *Service, rawDesired *Service, ops []serviceApiOperation, opts ...dcl.ApplyOption) (*Service, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetService(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createServiceOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapService(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeServiceNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeServiceNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeServiceDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractServiceFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractServiceFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffService(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.yaml deleted file mode 100644 index 79c885bc8b..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service.yaml +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/Service - description: The Monitoring Service resource - x-dcl-struct-name: Service - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a Service - parameters: - - name: service - required: true - description: A full instance of a Service - apply: - description: The function used to apply information about a Service - parameters: - - name: service - required: true - description: A full instance of a Service - delete: - description: The function used to delete a Service - parameters: - - name: service - required: true - description: A full instance of a Service - deleteAll: - description: The function used to delete all Service - parameters: - - name: project - required: true - schema: - type: string - list: - description: The function used to list information about many Service - parameters: - - name: project - required: true - schema: - type: string -components: - schemas: - Service: - title: Service - x-dcl-id: projects/{{project}}/services/{{name}} - x-dcl-parent-container: project - x-dcl-labels: userLabels - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - project - properties: - displayName: - type: string - x-dcl-go-name: DisplayName - description: Name used for UI elements listing this Service. - name: - type: string - x-dcl-go-name: Name - description: 'Resource name for this Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]' - x-kubernetes-immutable: true - project: - type: string - x-dcl-go-name: Project - description: The project for the resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - telemetry: - type: object - x-dcl-go-name: Telemetry - x-dcl-go-type: ServiceTelemetry - description: Configuration for how to query telemetry on a Service. - properties: - resourceName: - type: string - x-dcl-go-name: ResourceName - description: The full name of the resource that defines this service. - Formatted as described in https://cloud.google.com/apis/design/resource_names. - userLabels: - type: object - additionalProperties: - type: string - x-dcl-go-name: UserLabels - description: Labels which have been used to annotate the service. Label - keys must start with a letter. Label keys and values may contain lowercase - letters, numbers, underscores, and dashes. Label keys and values have - a maximum length of 63 characters, and must be less than 128 bytes in - size. Up to 64 label entries may be stored. For labels which do not have - a semantic value, the empty string may be supplied for the label value. diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_internal.go deleted file mode 100644 index a8bf358f75..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_internal.go +++ /dev/null @@ -1,1304 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *Service) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Custom) { - if err := r.Custom.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Telemetry) { - if err := r.Telemetry.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceCustom) validate() error { - return nil -} -func (r *ServiceTelemetry) validate() error { - return nil -} -func (r *Service) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v3/", params) -} - -func (r *Service) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/services/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *Service) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/services", nr.basePath(), userBasePath, params), nil - -} - -func (r *Service) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/services?serviceId={{name}}", nr.basePath(), userBasePath, params), nil - -} - -func (r *Service) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/services/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// serviceApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type serviceApiOperation interface { - do(context.Context, *Service, *Client) error -} - -// newUpdateServiceUpdateServiceRequest creates a request for an -// Service resource's UpdateService update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateServiceUpdateServiceRequest(ctx context.Context, f *Service, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - req["displayName"] = v - } - if v, err := expandServiceCustom(c, f.Custom, res); err != nil { - return nil, fmt.Errorf("error expanding Custom into custom: %w", err) - } else if v != nil { - req["custom"] = v - } - if v, err := expandServiceTelemetry(c, f.Telemetry, res); err != nil { - return nil, fmt.Errorf("error expanding Telemetry into telemetry: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["telemetry"] = v - } - if v := f.UserLabels; !dcl.IsEmptyValueIndirect(v) { - req["userLabels"] = v - } - return req, nil -} - -// marshalUpdateServiceUpdateServiceRequest converts the update into -// the final JSON request body. -func marshalUpdateServiceUpdateServiceRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateServiceUpdateServiceOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateServiceUpdateServiceOperation) do(ctx context.Context, r *Service, c *Client) error { - _, err := c.GetService(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateService") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateServiceUpdateServiceRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateServiceUpdateServiceRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listServiceRaw(ctx context.Context, r *Service, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != ServiceMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listServiceOperation struct { - Services []map[string]interface{} `json:"services"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listService(ctx context.Context, r *Service, pageToken string, pageSize int32) ([]*Service, string, error) { - b, err := c.listServiceRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listServiceOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*Service - for _, v := range m.Services { - res, err := unmarshalMapService(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllService(ctx context.Context, f func(*Service) bool, resources []*Service) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteService(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteServiceOperation struct{} - -func (op *deleteServiceOperation) do(ctx context.Context, r *Service, c *Client) error { - r, err := c.GetService(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "Service not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetService checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete Service: %w", err) - } - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createServiceOperation struct { - response map[string]interface{} -} - -func (op *createServiceOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createServiceOperation) do(ctx context.Context, r *Service, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - if _, err := c.GetService(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getServiceRaw(ctx context.Context, r *Service) ([]byte, error) { - if dcl.IsZeroValue(r.Custom) { - r.Custom = &ServiceCustom{} - } - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) serviceDiffsForRawDesired(ctx context.Context, rawDesired *Service, opts ...dcl.ApplyOption) (initial, desired *Service, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *Service - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*Service); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected Service, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetService(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a Service resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve Service resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that Service resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeServiceDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for Service: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for Service: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractServiceFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeServiceInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for Service: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeServiceDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for Service: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffService(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeServiceInitialState(rawInitial, rawDesired *Service) (*Service, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeServiceDesiredState(rawDesired, rawInitial *Service, opts ...dcl.ApplyOption) (*Service, error) { - - if dcl.IsZeroValue(rawDesired.Custom) { - rawDesired.Custom = &ServiceCustom{} - } - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.Custom = canonicalizeServiceCustom(rawDesired.Custom, nil, opts...) - rawDesired.Telemetry = canonicalizeServiceTelemetry(rawDesired.Telemetry, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &Service{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { - canonicalDesired.DisplayName = rawInitial.DisplayName - } else { - canonicalDesired.DisplayName = rawDesired.DisplayName - } - canonicalDesired.Custom = canonicalizeServiceCustom(rawDesired.Custom, rawInitial.Custom, opts...) - canonicalDesired.Telemetry = canonicalizeServiceTelemetry(rawDesired.Telemetry, rawInitial.Telemetry, opts...) - if dcl.IsZeroValue(rawDesired.UserLabels) || (dcl.IsEmptyValueIndirect(rawDesired.UserLabels) && dcl.IsEmptyValueIndirect(rawInitial.UserLabels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.UserLabels = rawInitial.UserLabels - } else { - canonicalDesired.UserLabels = rawDesired.UserLabels - } - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - return canonicalDesired, nil -} - -func canonicalizeServiceNewState(c *Client, rawNew, rawDesired *Service) (*Service, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } else { - if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Custom) && dcl.IsEmptyValueIndirect(rawDesired.Custom) { - rawNew.Custom = rawDesired.Custom - } else { - rawNew.Custom = canonicalizeNewServiceCustom(c, rawDesired.Custom, rawNew.Custom) - } - - if dcl.IsEmptyValueIndirect(rawNew.Telemetry) && dcl.IsEmptyValueIndirect(rawDesired.Telemetry) { - rawNew.Telemetry = rawDesired.Telemetry - } else { - rawNew.Telemetry = canonicalizeNewServiceTelemetry(c, rawDesired.Telemetry, rawNew.Telemetry) - } - - if dcl.IsEmptyValueIndirect(rawNew.UserLabels) && dcl.IsEmptyValueIndirect(rawDesired.UserLabels) { - rawNew.UserLabels = rawDesired.UserLabels - } else { - } - - rawNew.Project = rawDesired.Project - - return rawNew, nil -} - -func canonicalizeServiceCustom(des, initial *ServiceCustom, opts ...dcl.ApplyOption) *ServiceCustom { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &ServiceCustom{} - - return cDes -} - -func canonicalizeServiceCustomSlice(des, initial []ServiceCustom, opts ...dcl.ApplyOption) []ServiceCustom { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceCustom, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceCustom(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceCustom, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceCustom(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceCustom(c *Client, des, nw *ServiceCustom) *ServiceCustom { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceCustom while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceCustomSet(c *Client, des, nw []ServiceCustom) []ServiceCustom { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceCustom - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceCustomNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceCustom(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceCustomSlice(c *Client, des, nw []ServiceCustom) []ServiceCustom { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceCustom - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceCustom(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceTelemetry(des, initial *ServiceTelemetry, opts ...dcl.ApplyOption) *ServiceTelemetry { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceTelemetry{} - - if dcl.StringCanonicalize(des.ResourceName, initial.ResourceName) || dcl.IsZeroValue(des.ResourceName) { - cDes.ResourceName = initial.ResourceName - } else { - cDes.ResourceName = des.ResourceName - } - - return cDes -} - -func canonicalizeServiceTelemetrySlice(des, initial []ServiceTelemetry, opts ...dcl.ApplyOption) []ServiceTelemetry { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceTelemetry, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceTelemetry(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceTelemetry, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceTelemetry(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceTelemetry(c *Client, des, nw *ServiceTelemetry) *ServiceTelemetry { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceTelemetry while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.ResourceName, nw.ResourceName) { - nw.ResourceName = des.ResourceName - } - - return nw -} - -func canonicalizeNewServiceTelemetrySet(c *Client, des, nw []ServiceTelemetry) []ServiceTelemetry { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceTelemetry - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceTelemetryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceTelemetry(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceTelemetrySlice(c *Client, des, nw []ServiceTelemetry) []ServiceTelemetry { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceTelemetry - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceTelemetry(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffService(c *Client, desired, actual *Service, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceUpdateServiceOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Custom, actual.Custom, dcl.DiffInfo{ObjectFunction: compareServiceCustomNewStyle, EmptyObject: EmptyServiceCustom, OperationSelector: dcl.TriggersOperation("updateServiceUpdateServiceOperation")}, fn.AddNest("Custom")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Telemetry, actual.Telemetry, dcl.DiffInfo{ObjectFunction: compareServiceTelemetryNewStyle, EmptyObject: EmptyServiceTelemetry, OperationSelector: dcl.TriggersOperation("updateServiceUpdateServiceOperation")}, fn.AddNest("Telemetry")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UserLabels, actual.UserLabels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceUpdateServiceOperation")}, fn.AddNest("UserLabels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareServiceCustomNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareServiceTelemetryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceTelemetry) - if !ok { - desiredNotPointer, ok := d.(ServiceTelemetry) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceTelemetry or *ServiceTelemetry", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceTelemetry) - if !ok { - actualNotPointer, ok := a.(ServiceTelemetry) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceTelemetry", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ResourceName, actual.ResourceName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceUpdateServiceOperation")}, fn.AddNest("ResourceName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *Service) urlNormalized() *Service { - normalized := dcl.Copy(*r).(Service) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) - normalized.Project = dcl.SelfLinkToName(r.Project) - return &normalized -} - -func (r *Service) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateService" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/services/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the Service resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *Service) marshal(c *Client) ([]byte, error) { - m, err := expandService(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling Service: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalService decodes JSON responses into the Service resource schema. -func unmarshalService(b []byte, c *Client, res *Service) (*Service, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapService(m, c, res) -} - -func unmarshalMapService(m map[string]interface{}, c *Client, res *Service) (*Service, error) { - - flattened := flattenService(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandService expands Service into a JSON request object. -func expandService(c *Client, f *Service) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("projects/%s/services/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.DisplayName; dcl.ValueShouldBeSent(v) { - m["displayName"] = v - } - if v, err := expandServiceCustom(c, f.Custom, res); err != nil { - return nil, fmt.Errorf("error expanding Custom into custom: %w", err) - } else if v != nil { - m["custom"] = v - } - if v, err := expandServiceTelemetry(c, f.Telemetry, res); err != nil { - return nil, fmt.Errorf("error expanding Telemetry into telemetry: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["telemetry"] = v - } - if v := f.UserLabels; dcl.ValueShouldBeSent(v) { - m["userLabels"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - - return m, nil -} - -// flattenService flattens Service from a JSON request object into the -// Service type. -func flattenService(c *Client, i interface{}, res *Service) *Service { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &Service{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.DisplayName = dcl.FlattenString(m["displayName"]) - resultRes.Custom = flattenServiceCustom(c, m["custom"], res) - if _, ok := m["custom"]; !ok { - c.Config.Logger.Info("Using default value for custom") - resultRes.Custom = &ServiceCustom{} - } - resultRes.Telemetry = flattenServiceTelemetry(c, m["telemetry"], res) - resultRes.UserLabels = dcl.FlattenKeyValuePairs(m["userLabels"]) - resultRes.Project = dcl.FlattenString(m["project"]) - - return resultRes -} - -// expandServiceCustomMap expands the contents of ServiceCustom into a JSON -// request object. -func expandServiceCustomMap(c *Client, f map[string]ServiceCustom, res *Service) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceCustom(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceCustomSlice expands the contents of ServiceCustom into a JSON -// request object. -func expandServiceCustomSlice(c *Client, f []ServiceCustom, res *Service) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceCustom(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceCustomMap flattens the contents of ServiceCustom from a JSON -// response object. -func flattenServiceCustomMap(c *Client, i interface{}, res *Service) map[string]ServiceCustom { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceCustom{} - } - - if len(a) == 0 { - return map[string]ServiceCustom{} - } - - items := make(map[string]ServiceCustom) - for k, item := range a { - items[k] = *flattenServiceCustom(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceCustomSlice flattens the contents of ServiceCustom from a JSON -// response object. -func flattenServiceCustomSlice(c *Client, i interface{}, res *Service) []ServiceCustom { - a, ok := i.([]interface{}) - if !ok { - return []ServiceCustom{} - } - - if len(a) == 0 { - return []ServiceCustom{} - } - - items := make([]ServiceCustom, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceCustom(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceCustom expands an instance of ServiceCustom into a JSON -// request object. -func expandServiceCustom(c *Client, f *ServiceCustom, res *Service) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenServiceCustom flattens an instance of ServiceCustom from a JSON -// response object. -func flattenServiceCustom(c *Client, i interface{}, res *Service) *ServiceCustom { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceCustom{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceCustom - } - - return r -} - -// expandServiceTelemetryMap expands the contents of ServiceTelemetry into a JSON -// request object. -func expandServiceTelemetryMap(c *Client, f map[string]ServiceTelemetry, res *Service) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceTelemetry(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceTelemetrySlice expands the contents of ServiceTelemetry into a JSON -// request object. -func expandServiceTelemetrySlice(c *Client, f []ServiceTelemetry, res *Service) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceTelemetry(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceTelemetryMap flattens the contents of ServiceTelemetry from a JSON -// response object. -func flattenServiceTelemetryMap(c *Client, i interface{}, res *Service) map[string]ServiceTelemetry { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceTelemetry{} - } - - if len(a) == 0 { - return map[string]ServiceTelemetry{} - } - - items := make(map[string]ServiceTelemetry) - for k, item := range a { - items[k] = *flattenServiceTelemetry(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceTelemetrySlice flattens the contents of ServiceTelemetry from a JSON -// response object. -func flattenServiceTelemetrySlice(c *Client, i interface{}, res *Service) []ServiceTelemetry { - a, ok := i.([]interface{}) - if !ok { - return []ServiceTelemetry{} - } - - if len(a) == 0 { - return []ServiceTelemetry{} - } - - items := make([]ServiceTelemetry, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceTelemetry(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceTelemetry expands an instance of ServiceTelemetry into a JSON -// request object. -func expandServiceTelemetry(c *Client, f *ServiceTelemetry, res *Service) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.ResourceName; !dcl.IsEmptyValueIndirect(v) { - m["resourceName"] = v - } - - return m, nil -} - -// flattenServiceTelemetry flattens an instance of ServiceTelemetry from a JSON -// response object. -func flattenServiceTelemetry(c *Client, i interface{}, res *Service) *ServiceTelemetry { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceTelemetry{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceTelemetry - } - r.ResourceName = dcl.FlattenString(m["resourceName"]) - - return r -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *Service) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalService(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type serviceDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp serviceApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToServiceDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]serviceDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []serviceDiff - // For each operation name, create a serviceDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := serviceDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToServiceApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToServiceApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (serviceApiOperation, error) { - switch opName { - - case "updateServiceUpdateServiceOperation": - return &updateServiceUpdateServiceOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractServiceFields(r *Service) error { - vCustom := r.Custom - if vCustom == nil { - // note: explicitly not the empty object. - vCustom = &ServiceCustom{} - } - if err := extractServiceCustomFields(r, vCustom); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vCustom) { - r.Custom = vCustom - } - vTelemetry := r.Telemetry - if vTelemetry == nil { - // note: explicitly not the empty object. - vTelemetry = &ServiceTelemetry{} - } - if err := extractServiceTelemetryFields(r, vTelemetry); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTelemetry) { - r.Telemetry = vTelemetry - } - return nil -} -func extractServiceCustomFields(r *Service, o *ServiceCustom) error { - return nil -} -func extractServiceTelemetryFields(r *Service, o *ServiceTelemetry) error { - return nil -} - -func postReadExtractServiceFields(r *Service) error { - vCustom := r.Custom - if vCustom == nil { - // note: explicitly not the empty object. - vCustom = &ServiceCustom{} - } - if err := postReadExtractServiceCustomFields(r, vCustom); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vCustom) { - r.Custom = vCustom - } - vTelemetry := r.Telemetry - if vTelemetry == nil { - // note: explicitly not the empty object. - vTelemetry = &ServiceTelemetry{} - } - if err := postReadExtractServiceTelemetryFields(r, vTelemetry); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTelemetry) { - r.Telemetry = vTelemetry - } - return nil -} -func postReadExtractServiceCustomFields(r *Service, o *ServiceCustom) error { - return nil -} -func postReadExtractServiceTelemetryFields(r *Service, o *ServiceTelemetry) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.go deleted file mode 100644 index eba5c15664..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.go +++ /dev/null @@ -1,1772 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type ServiceLevelObjective struct { - Name *string `json:"name"` - DisplayName *string `json:"displayName"` - ServiceLevelIndicator *ServiceLevelObjectiveServiceLevelIndicator `json:"serviceLevelIndicator"` - Goal *float64 `json:"goal"` - RollingPeriod *string `json:"rollingPeriod"` - CalendarPeriod *ServiceLevelObjectiveCalendarPeriodEnum `json:"calendarPeriod"` - CreateTime *string `json:"createTime"` - DeleteTime *string `json:"deleteTime"` - ServiceManagementOwned *bool `json:"serviceManagementOwned"` - UserLabels map[string]string `json:"userLabels"` - Project *string `json:"project"` - Service *string `json:"service"` -} - -func (r *ServiceLevelObjective) String() string { - return dcl.SprintResource(r) -} - -// The enum ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum. -type ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum string - -// ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnumRef returns a *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum with the value of string s -// If the empty string is provided, nil is returned. -func ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnumRef(s string) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum { - v := ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum(s) - return &v -} - -func (v ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"LATENCY_EXPERIENCE_UNSPECIFIED", "DELIGHTING", "SATISFYING", "ANNOYING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum. -type ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum string - -// ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnumRef returns a *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum with the value of string s -// If the empty string is provided, nil is returned. -func ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnumRef(s string) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum { - v := ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum(s) - return &v -} - -func (v ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"LATENCY_EXPERIENCE_UNSPECIFIED", "DELIGHTING", "SATISFYING", "ANNOYING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum. -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum string - -// ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnumRef returns a *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum with the value of string s -// If the empty string is provided, nil is returned. -func ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnumRef(s string) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum { - v := ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum(s) - return &v -} - -func (v ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"LATENCY_EXPERIENCE_UNSPECIFIED", "DELIGHTING", "SATISFYING", "ANNOYING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum. -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum string - -// ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnumRef returns a *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum with the value of string s -// If the empty string is provided, nil is returned. -func ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnumRef(s string) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum { - v := ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum(s) - return &v -} - -func (v ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"LATENCY_EXPERIENCE_UNSPECIFIED", "DELIGHTING", "SATISFYING", "ANNOYING"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum ServiceLevelObjectiveCalendarPeriodEnum. -type ServiceLevelObjectiveCalendarPeriodEnum string - -// ServiceLevelObjectiveCalendarPeriodEnumRef returns a *ServiceLevelObjectiveCalendarPeriodEnum with the value of string s -// If the empty string is provided, nil is returned. -func ServiceLevelObjectiveCalendarPeriodEnumRef(s string) *ServiceLevelObjectiveCalendarPeriodEnum { - v := ServiceLevelObjectiveCalendarPeriodEnum(s) - return &v -} - -func (v ServiceLevelObjectiveCalendarPeriodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"CALENDAR_PERIOD_UNSPECIFIED", "DAY", "WEEK", "FORTNIGHT", "MONTH", "QUARTER", "HALF", "YEAR"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "ServiceLevelObjectiveCalendarPeriodEnum", - Value: string(v), - Valid: []string{}, - } -} - -type ServiceLevelObjectiveServiceLevelIndicator struct { - empty bool `json:"-"` - BasicSli *ServiceLevelObjectiveServiceLevelIndicatorBasicSli `json:"basicSli"` - RequestBased *ServiceLevelObjectiveServiceLevelIndicatorRequestBased `json:"requestBased"` - WindowsBased *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased `json:"windowsBased"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicator ServiceLevelObjectiveServiceLevelIndicator - -func (r *ServiceLevelObjectiveServiceLevelIndicator) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicator - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicator - } else { - - r.BasicSli = res.BasicSli - - r.RequestBased = res.RequestBased - - r.WindowsBased = res.WindowsBased - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicator is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicator *ServiceLevelObjectiveServiceLevelIndicator = &ServiceLevelObjectiveServiceLevelIndicator{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicator) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicator) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicator) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorBasicSli struct { - empty bool `json:"-"` - Method []string `json:"method"` - Location []string `json:"location"` - Version []string `json:"version"` - Availability *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability `json:"availability"` - Latency *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency `json:"latency"` - OperationAvailability *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability `json:"operationAvailability"` - OperationLatency *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency `json:"operationLatency"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorBasicSli ServiceLevelObjectiveServiceLevelIndicatorBasicSli - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSli) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorBasicSli - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSli - } else { - - r.Method = res.Method - - r.Location = res.Location - - r.Version = res.Version - - r.Availability = res.Availability - - r.Latency = res.Latency - - r.OperationAvailability = res.OperationAvailability - - r.OperationLatency = res.OperationLatency - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorBasicSli is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSli *ServiceLevelObjectiveServiceLevelIndicatorBasicSli = &ServiceLevelObjectiveServiceLevelIndicatorBasicSli{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSli) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSli) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSli) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability struct { - empty bool `json:"-"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability - } else { - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency struct { - empty bool `json:"-"` - Threshold *string `json:"threshold"` - Experience *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum `json:"experience"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency - } else { - - r.Threshold = res.Threshold - - r.Experience = res.Experience - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability struct { - empty bool `json:"-"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability - } else { - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency struct { - empty bool `json:"-"` - Threshold *string `json:"threshold"` - Experience *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum `json:"experience"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency - } else { - - r.Threshold = res.Threshold - - r.Experience = res.Experience - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorRequestBased struct { - empty bool `json:"-"` - GoodTotalRatio *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio `json:"goodTotalRatio"` - DistributionCut *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut `json:"distributionCut"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorRequestBased ServiceLevelObjectiveServiceLevelIndicatorRequestBased - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBased) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorRequestBased - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBased - } else { - - r.GoodTotalRatio = res.GoodTotalRatio - - r.DistributionCut = res.DistributionCut - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorRequestBased is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBased *ServiceLevelObjectiveServiceLevelIndicatorRequestBased = &ServiceLevelObjectiveServiceLevelIndicatorRequestBased{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBased) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBased) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBased) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio struct { - empty bool `json:"-"` - GoodServiceFilter *string `json:"goodServiceFilter"` - BadServiceFilter *string `json:"badServiceFilter"` - TotalServiceFilter *string `json:"totalServiceFilter"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio - } else { - - r.GoodServiceFilter = res.GoodServiceFilter - - r.BadServiceFilter = res.BadServiceFilter - - r.TotalServiceFilter = res.TotalServiceFilter - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut struct { - empty bool `json:"-"` - DistributionFilter *string `json:"distributionFilter"` - Range *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange `json:"range"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut - } else { - - r.DistributionFilter = res.DistributionFilter - - r.Range = res.Range - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange struct { - empty bool `json:"-"` - Min *float64 `json:"min"` - Max *float64 `json:"max"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange - } else { - - r.Min = res.Min - - r.Max = res.Max - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBased struct { - empty bool `json:"-"` - GoodBadMetricFilter *string `json:"goodBadMetricFilter"` - GoodTotalRatioThreshold *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold `json:"goodTotalRatioThreshold"` - MetricMeanInRange *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange `json:"metricMeanInRange"` - MetricSumInRange *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange `json:"metricSumInRange"` - WindowPeriod *string `json:"windowPeriod"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBased ServiceLevelObjectiveServiceLevelIndicatorWindowsBased - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBased - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBased - } else { - - r.GoodBadMetricFilter = res.GoodBadMetricFilter - - r.GoodTotalRatioThreshold = res.GoodTotalRatioThreshold - - r.MetricMeanInRange = res.MetricMeanInRange - - r.MetricSumInRange = res.MetricSumInRange - - r.WindowPeriod = res.WindowPeriod - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBased is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBased *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold struct { - empty bool `json:"-"` - Performance *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance `json:"performance"` - BasicSliPerformance *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance `json:"basicSliPerformance"` - Threshold *float64 `json:"threshold"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold - } else { - - r.Performance = res.Performance - - r.BasicSliPerformance = res.BasicSliPerformance - - r.Threshold = res.Threshold - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance struct { - empty bool `json:"-"` - GoodTotalRatio *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio `json:"goodTotalRatio"` - DistributionCut *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut `json:"distributionCut"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance - } else { - - r.GoodTotalRatio = res.GoodTotalRatio - - r.DistributionCut = res.DistributionCut - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio struct { - empty bool `json:"-"` - GoodServiceFilter *string `json:"goodServiceFilter"` - BadServiceFilter *string `json:"badServiceFilter"` - TotalServiceFilter *string `json:"totalServiceFilter"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio - } else { - - r.GoodServiceFilter = res.GoodServiceFilter - - r.BadServiceFilter = res.BadServiceFilter - - r.TotalServiceFilter = res.TotalServiceFilter - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut struct { - empty bool `json:"-"` - DistributionFilter *string `json:"distributionFilter"` - Range *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange `json:"range"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut - } else { - - r.DistributionFilter = res.DistributionFilter - - r.Range = res.Range - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange struct { - empty bool `json:"-"` - Min *float64 `json:"min"` - Max *float64 `json:"max"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange - } else { - - r.Min = res.Min - - r.Max = res.Max - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance struct { - empty bool `json:"-"` - Method []string `json:"method"` - Location []string `json:"location"` - Version []string `json:"version"` - Availability *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability `json:"availability"` - Latency *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency `json:"latency"` - OperationAvailability *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability `json:"operationAvailability"` - OperationLatency *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency `json:"operationLatency"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance - } else { - - r.Method = res.Method - - r.Location = res.Location - - r.Version = res.Version - - r.Availability = res.Availability - - r.Latency = res.Latency - - r.OperationAvailability = res.OperationAvailability - - r.OperationLatency = res.OperationLatency - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability struct { - empty bool `json:"-"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability - } else { - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency struct { - empty bool `json:"-"` - Threshold *string `json:"threshold"` - Experience *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum `json:"experience"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency - } else { - - r.Threshold = res.Threshold - - r.Experience = res.Experience - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability struct { - empty bool `json:"-"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability - } else { - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency struct { - empty bool `json:"-"` - Threshold *string `json:"threshold"` - Experience *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum `json:"experience"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency - } else { - - r.Threshold = res.Threshold - - r.Experience = res.Experience - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange struct { - empty bool `json:"-"` - TimeSeries *string `json:"timeSeries"` - Range *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange `json:"range"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange - } else { - - r.TimeSeries = res.TimeSeries - - r.Range = res.Range - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange struct { - empty bool `json:"-"` - Min *float64 `json:"min"` - Max *float64 `json:"max"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange - } else { - - r.Min = res.Min - - r.Max = res.Max - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange struct { - empty bool `json:"-"` - TimeSeries *string `json:"timeSeries"` - Range *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange `json:"range"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange - } else { - - r.TimeSeries = res.TimeSeries - - r.Range = res.Range - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange struct { - empty bool `json:"-"` - Min *float64 `json:"min"` - Max *float64 `json:"max"` -} - -type jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) UnmarshalJSON(data []byte) error { - var res jsonServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange - } else { - - r.Min = res.Min - - r.Max = res.Max - - } - return nil -} - -// This object is used to assert a desired state where this ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{empty: true} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) Empty() bool { - return r.empty -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) String() string { - return dcl.SprintResource(r) -} - -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *ServiceLevelObjective) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "ServiceLevelObjective", - Version: "monitoring", - } -} - -func (r *ServiceLevelObjective) ID() (string, error) { - if err := extractServiceLevelObjectiveFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "display_name": dcl.ValueOrEmptyString(nr.DisplayName), - "service_level_indicator": dcl.ValueOrEmptyString(nr.ServiceLevelIndicator), - "goal": dcl.ValueOrEmptyString(nr.Goal), - "rolling_period": dcl.ValueOrEmptyString(nr.RollingPeriod), - "calendar_period": dcl.ValueOrEmptyString(nr.CalendarPeriod), - "create_time": dcl.ValueOrEmptyString(nr.CreateTime), - "delete_time": dcl.ValueOrEmptyString(nr.DeleteTime), - "service_management_owned": dcl.ValueOrEmptyString(nr.ServiceManagementOwned), - "user_labels": dcl.ValueOrEmptyString(nr.UserLabels), - "project": dcl.ValueOrEmptyString(nr.Project), - "service": dcl.ValueOrEmptyString(nr.Service), - } - return dcl.Nprintf("projects/{{project}}/services/{{service}}/serviceLevelObjectives/{{name}}", params), nil -} - -const ServiceLevelObjectiveMaxPage = -1 - -type ServiceLevelObjectiveList struct { - Items []*ServiceLevelObjective - - nextToken string - - pageSize int32 - - resource *ServiceLevelObjective -} - -func (l *ServiceLevelObjectiveList) HasNext() bool { - return l.nextToken != "" -} - -func (l *ServiceLevelObjectiveList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listServiceLevelObjective(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListServiceLevelObjective(ctx context.Context, project, service string) (*ServiceLevelObjectiveList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListServiceLevelObjectiveWithMaxResults(ctx, project, service, ServiceLevelObjectiveMaxPage) - -} - -func (c *Client) ListServiceLevelObjectiveWithMaxResults(ctx context.Context, project, service string, pageSize int32) (*ServiceLevelObjectiveList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &ServiceLevelObjective{ - Project: &project, - Service: &service, - } - items, token, err := c.listServiceLevelObjective(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &ServiceLevelObjectiveList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetServiceLevelObjective(ctx context.Context, r *ServiceLevelObjective) (*ServiceLevelObjective, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractServiceLevelObjectiveFields(r) - - b, err := c.getServiceLevelObjectiveRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalServiceLevelObjective(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Service = r.Service - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeServiceLevelObjectiveNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractServiceLevelObjectiveFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteServiceLevelObjective(ctx context.Context, r *ServiceLevelObjective) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("ServiceLevelObjective resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting ServiceLevelObjective...") - deleteOp := deleteServiceLevelObjectiveOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllServiceLevelObjective deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllServiceLevelObjective(ctx context.Context, project, service string, filter func(*ServiceLevelObjective) bool) error { - listObj, err := c.ListServiceLevelObjective(ctx, project, service) - if err != nil { - return err - } - - err = c.deleteAllServiceLevelObjective(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllServiceLevelObjective(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyServiceLevelObjective(ctx context.Context, rawDesired *ServiceLevelObjective, opts ...dcl.ApplyOption) (*ServiceLevelObjective, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *ServiceLevelObjective - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyServiceLevelObjectiveHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyServiceLevelObjectiveHelper(c *Client, ctx context.Context, rawDesired *ServiceLevelObjective, opts ...dcl.ApplyOption) (*ServiceLevelObjective, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyServiceLevelObjective...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractServiceLevelObjectiveFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.serviceLevelObjectiveDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToServiceLevelObjectiveDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []serviceLevelObjectiveApiOperation - if create { - ops = append(ops, &createServiceLevelObjectiveOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyServiceLevelObjectiveDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyServiceLevelObjectiveDiff(c *Client, ctx context.Context, desired *ServiceLevelObjective, rawDesired *ServiceLevelObjective, ops []serviceLevelObjectiveApiOperation, opts ...dcl.ApplyOption) (*ServiceLevelObjective, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetServiceLevelObjective(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createServiceLevelObjectiveOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapServiceLevelObjective(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeServiceLevelObjectiveNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeServiceLevelObjectiveNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeServiceLevelObjectiveDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractServiceLevelObjectiveFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractServiceLevelObjectiveFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffServiceLevelObjective(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.yaml deleted file mode 100644 index 4c123de045..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective.yaml +++ /dev/null @@ -1,705 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/ServiceLevelObjective - description: The Monitoring ServiceLevelObjective resource - x-dcl-struct-name: ServiceLevelObjective - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a ServiceLevelObjective - parameters: - - name: serviceLevelObjective - required: true - description: A full instance of a ServiceLevelObjective - apply: - description: The function used to apply information about a ServiceLevelObjective - parameters: - - name: serviceLevelObjective - required: true - description: A full instance of a ServiceLevelObjective - delete: - description: The function used to delete a ServiceLevelObjective - parameters: - - name: serviceLevelObjective - required: true - description: A full instance of a ServiceLevelObjective - deleteAll: - description: The function used to delete all ServiceLevelObjective - parameters: - - name: project - required: true - schema: - type: string - - name: service - required: true - schema: - type: string - list: - description: The function used to list information about many ServiceLevelObjective - parameters: - - name: project - required: true - schema: - type: string - - name: service - required: true - schema: - type: string -components: - schemas: - ServiceLevelObjective: - title: ServiceLevelObjective - x-dcl-id: projects/{{project}}/services/{{service}}/serviceLevelObjectives/{{name}} - x-dcl-parent-container: project - x-dcl-labels: userLabels - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - goal - - project - - service - properties: - calendarPeriod: - type: string - x-dcl-go-name: CalendarPeriod - x-dcl-go-type: ServiceLevelObjectiveCalendarPeriodEnum - description: 'A calendar period, semantically "since the start of the current - ``". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and `MONTH` are supported. - Possible values: CALENDAR_PERIOD_UNSPECIFIED, DAY, WEEK, FORTNIGHT, MONTH, - QUARTER, HALF, YEAR' - x-dcl-conflicts: - - rollingPeriod - enum: - - CALENDAR_PERIOD_UNSPECIFIED - - DAY - - WEEK - - FORTNIGHT - - MONTH - - QUARTER - - HALF - - YEAR - createTime: - type: string - format: date-time - x-dcl-go-name: CreateTime - readOnly: true - description: Time stamp of the `Create` or most recent `Update` command - on this `Slo`. - x-kubernetes-immutable: true - deleteTime: - type: string - format: date-time - x-dcl-go-name: DeleteTime - readOnly: true - description: Time stamp of the `Update` or `Delete` command that made this - no longer a current `Slo`. This field is not populated in `ServiceLevelObjective`s - returned from calls to `GetServiceLevelObjective` and `ListServiceLevelObjectives`, - because it is always empty in the current version. It is populated in - `ServiceLevelObjective`s representing previous versions in the output - of `ListServiceLevelObjectiveVersions`. Because all old configuration - versions are stored, `Update` operations mark the obsoleted version as - deleted. - x-kubernetes-immutable: true - displayName: - type: string - x-dcl-go-name: DisplayName - description: Name used for UI elements listing this SLO. - goal: - type: number - format: double - x-dcl-go-name: Goal - description: The fraction of service that must be good in order for this - objective to be met. `0 < goal <= 0.999`. - name: - type: string - x-dcl-go-name: Name - description: 'Resource name for this `ServiceLevelObjective`. The format - is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]' - x-kubernetes-immutable: true - project: - type: string - x-dcl-go-name: Project - description: The project for the resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - rollingPeriod: - type: string - x-dcl-go-name: RollingPeriod - description: A rolling time period, semantically "in the past ``". Must - be an integer multiple of 1 day no larger than 30 days. - x-dcl-conflicts: - - calendarPeriod - service: - type: string - x-dcl-go-name: Service - description: The service for the resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Monitoring/Service - field: name - parent: true - serviceLevelIndicator: - type: object - x-dcl-go-name: ServiceLevelIndicator - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicator - description: The definition of good service, used to measure and calculate - the quality of the `Service`'s performance with respect to a single aspect - of service quality. - properties: - basicSli: - type: object - x-dcl-go-name: BasicSli - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSli - description: Basic SLI on a well-known service type. - x-dcl-conflicts: - - requestBased - - windowsBased - properties: - availability: - type: object - x-dcl-go-name: Availability - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability - description: Good service is defined to be the count of requests - made to this service that return successfully. - x-dcl-conflicts: - - latency - - operationAvailability - - operationLatency - latency: - type: object - x-dcl-go-name: Latency - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency - description: Good service is defined to be the count of requests - made to this service that are fast enough with respect to `latency.threshold`. - x-dcl-conflicts: - - availability - - operationAvailability - - operationLatency - properties: - experience: - type: string - x-dcl-go-name: Experience - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum - description: 'A description of the experience associated with - failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, - DELIGHTING, SATISFYING, ANNOYING' - enum: - - LATENCY_EXPERIENCE_UNSPECIFIED - - DELIGHTING - - SATISFYING - - ANNOYING - threshold: - type: string - x-dcl-go-name: Threshold - description: Good service is defined to be the count of requests - made to this service that return in no more than `threshold`. - location: - type: array - x-dcl-go-name: Location - description: 'OPTIONAL: The set of locations to which this SLI is - relevant. Telemetry from other locations will not be used to calculate - performance for this SLI. If omitted, this SLI applies to all - locations in which the Service has activity. For service types - that don''t support breaking down by location, setting this field - will result in an error.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - method: - type: array - x-dcl-go-name: Method - description: 'OPTIONAL: The set of RPCs to which this SLI is relevant. - Telemetry from other methods will not be used to calculate performance - for this SLI. If omitted, this SLI applies to all the Service''s - methods. For service types that don''t support breaking down by - method, setting this field will result in an error.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - operationAvailability: - type: object - x-dcl-go-name: OperationAvailability - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability - description: Good service is defined to be the count of operations - performed by this service that return successfully - x-dcl-conflicts: - - availability - - latency - - operationLatency - operationLatency: - type: object - x-dcl-go-name: OperationLatency - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency - description: Good service is defined to be the count of operations - performed by this service that are fast enough with respect to - `operation_latency.threshold`. - x-dcl-conflicts: - - availability - - latency - - operationAvailability - properties: - experience: - type: string - x-dcl-go-name: Experience - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum - description: 'A description of the experience associated with - failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, - DELIGHTING, SATISFYING, ANNOYING' - enum: - - LATENCY_EXPERIENCE_UNSPECIFIED - - DELIGHTING - - SATISFYING - - ANNOYING - threshold: - type: string - x-dcl-go-name: Threshold - description: Good service is defined to be the count of operations - that are completed in no more than `threshold`. - version: - type: array - x-dcl-go-name: Version - description: 'OPTIONAL: The set of API versions to which this SLI - is relevant. Telemetry from other API versions will not be used - to calculate performance for this SLI. If omitted, this SLI applies - to all API versions. For service types that don''t support breaking - down by version, setting this field will result in an error.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - requestBased: - type: object - x-dcl-go-name: RequestBased - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorRequestBased - description: Request-based SLIs - x-dcl-conflicts: - - basicSli - - windowsBased - properties: - distributionCut: - type: object - x-dcl-go-name: DistributionCut - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut - description: '`distribution_cut` is used when `good_service` is - a count of values aggregated in a `Distribution` that fall into - a good range. The `total_service` is the total count of all values - aggregated in the `Distribution`.' - x-dcl-conflicts: - - goodTotalRatio - properties: - distributionFilter: - type: string - x-dcl-go-name: DistributionFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` aggregating values. Must have `ValueType - = DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = - CUMULATIVE`. - range: - type: object - x-dcl-go-name: Range - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange - description: Range of values considered "good." For a one-sided - range, set one bound to an infinite value. - properties: - max: - type: number - format: double - x-dcl-go-name: Max - description: Range maximum. - min: - type: number - format: double - x-dcl-go-name: Min - description: Range minimum. - goodTotalRatio: - type: object - x-dcl-go-name: GoodTotalRatio - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio - description: '`good_total_ratio` is used when the ratio of `good_service` - to `total_service` is computed from two `TimeSeries`.' - x-dcl-conflicts: - - distributionCut - properties: - badServiceFilter: - type: string - x-dcl-go-name: BadServiceFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` quantifying bad service, either - demanded service that was not provided or demanded service - that was of inadequate quality. Must have `ValueType = DOUBLE` - or `ValueType = INT64` and must have `MetricKind = DELTA` - or `MetricKind = CUMULATIVE`. - goodServiceFilter: - type: string - x-dcl-go-name: GoodServiceFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` quantifying good service provided. - Must have `ValueType = DOUBLE` or `ValueType = INT64` and - must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. - totalServiceFilter: - type: string - x-dcl-go-name: TotalServiceFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` quantifying total demanded service. - Must have `ValueType = DOUBLE` or `ValueType = INT64` and - must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. - windowsBased: - type: object - x-dcl-go-name: WindowsBased - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBased - description: Windows-based SLIs - x-dcl-conflicts: - - basicSli - - requestBased - properties: - goodBadMetricFilter: - type: string - x-dcl-go-name: GoodBadMetricFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` with `ValueType = BOOL`. The window - is good if any `true` values appear in the window. - x-dcl-conflicts: - - goodTotalRatioThreshold - - metricMeanInRange - - metricSumInRange - goodTotalRatioThreshold: - type: object - x-dcl-go-name: GoodTotalRatioThreshold - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold - description: A window is good if its `performance` is high enough. - x-dcl-conflicts: - - goodBadMetricFilter - - metricMeanInRange - - metricSumInRange - properties: - basicSliPerformance: - type: object - x-dcl-go-name: BasicSliPerformance - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance - description: '`BasicSli` to evaluate to judge window quality.' - x-dcl-conflicts: - - performance - properties: - availability: - type: object - x-dcl-go-name: Availability - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability - description: Good service is defined to be the count of - requests made to this service that return successfully. - x-dcl-conflicts: - - latency - - operationAvailability - - operationLatency - latency: - type: object - x-dcl-go-name: Latency - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency - description: Good service is defined to be the count of - requests made to this service that are fast enough with - respect to `latency.threshold`. - x-dcl-conflicts: - - availability - - operationAvailability - - operationLatency - properties: - experience: - type: string - x-dcl-go-name: Experience - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum - description: 'A description of the experience associated - with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, - DELIGHTING, SATISFYING, ANNOYING' - enum: - - LATENCY_EXPERIENCE_UNSPECIFIED - - DELIGHTING - - SATISFYING - - ANNOYING - threshold: - type: string - x-dcl-go-name: Threshold - description: Good service is defined to be the count - of requests made to this service that return in no - more than `threshold`. - location: - type: array - x-dcl-go-name: Location - description: 'OPTIONAL: The set of locations to which this - SLI is relevant. Telemetry from other locations will not - be used to calculate performance for this SLI. If omitted, - this SLI applies to all locations in which the Service - has activity. For service types that don''t support breaking - down by location, setting this field will result in an - error.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - method: - type: array - x-dcl-go-name: Method - description: 'OPTIONAL: The set of RPCs to which this SLI - is relevant. Telemetry from other methods will not be - used to calculate performance for this SLI. If omitted, - this SLI applies to all the Service''s methods. For service - types that don''t support breaking down by method, setting - this field will result in an error.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - operationAvailability: - type: object - x-dcl-go-name: OperationAvailability - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability - description: Good service is defined to be the count of - operations performed by this service that return successfully - x-dcl-conflicts: - - availability - - latency - - operationLatency - operationLatency: - type: object - x-dcl-go-name: OperationLatency - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency - description: Good service is defined to be the count of - operations performed by this service that are fast enough - with respect to `operation_latency.threshold`. - x-dcl-conflicts: - - availability - - latency - - operationAvailability - properties: - experience: - type: string - x-dcl-go-name: Experience - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum - description: 'A description of the experience associated - with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, - DELIGHTING, SATISFYING, ANNOYING' - enum: - - LATENCY_EXPERIENCE_UNSPECIFIED - - DELIGHTING - - SATISFYING - - ANNOYING - threshold: - type: string - x-dcl-go-name: Threshold - description: Good service is defined to be the count - of operations that are completed in no more than `threshold`. - version: - type: array - x-dcl-go-name: Version - description: 'OPTIONAL: The set of API versions to which - this SLI is relevant. Telemetry from other API versions - will not be used to calculate performance for this SLI. - If omitted, this SLI applies to all API versions. For - service types that don''t support breaking down by version, - setting this field will result in an error.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - performance: - type: object - x-dcl-go-name: Performance - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance - description: '`RequestBasedSli` to evaluate to judge window - quality.' - x-dcl-conflicts: - - basicSliPerformance - properties: - distributionCut: - type: object - x-dcl-go-name: DistributionCut - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut - description: '`distribution_cut` is used when `good_service` - is a count of values aggregated in a `Distribution` that - fall into a good range. The `total_service` is the total - count of all values aggregated in the `Distribution`.' - x-dcl-conflicts: - - goodTotalRatio - properties: - distributionFilter: - type: string - x-dcl-go-name: DistributionFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` aggregating values. Must - have `ValueType = DISTRIBUTION` and `MetricKind = - DELTA` or `MetricKind = CUMULATIVE`. - range: - type: object - x-dcl-go-name: Range - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange - description: Range of values considered "good." For - a one-sided range, set one bound to an infinite value. - properties: - max: - type: number - format: double - x-dcl-go-name: Max - description: Range maximum. - min: - type: number - format: double - x-dcl-go-name: Min - description: Range minimum. - goodTotalRatio: - type: object - x-dcl-go-name: GoodTotalRatio - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio - description: '`good_total_ratio` is used when the ratio - of `good_service` to `total_service` is computed from - two `TimeSeries`.' - x-dcl-conflicts: - - distributionCut - properties: - badServiceFilter: - type: string - x-dcl-go-name: BadServiceFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` quantifying bad service, - either demanded service that was not provided or demanded - service that was of inadequate quality. Must have - `ValueType = DOUBLE` or `ValueType = INT64` and must - have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. - goodServiceFilter: - type: string - x-dcl-go-name: GoodServiceFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` quantifying good service - provided. Must have `ValueType = DOUBLE` or `ValueType - = INT64` and must have `MetricKind = DELTA` or `MetricKind - = CUMULATIVE`. - totalServiceFilter: - type: string - x-dcl-go-name: TotalServiceFilter - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying a `TimeSeries` quantifying total demanded - service. Must have `ValueType = DOUBLE` or `ValueType - = INT64` and must have `MetricKind = DELTA` or `MetricKind - = CUMULATIVE`. - threshold: - type: number - format: double - x-dcl-go-name: Threshold - description: If window `performance >= threshold`, the window - is counted as good. - metricMeanInRange: - type: object - x-dcl-go-name: MetricMeanInRange - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange - description: A window is good if the metric's value is in a good - range, averaged across returned streams. - x-dcl-conflicts: - - goodBadMetricFilter - - goodTotalRatioThreshold - - metricSumInRange - properties: - range: - type: object - x-dcl-go-name: Range - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange - description: Range of values considered "good." For a one-sided - range, set one bound to an infinite value. - properties: - max: - type: number - format: double - x-dcl-go-name: Max - description: Range maximum. - min: - type: number - format: double - x-dcl-go-name: Min - description: Range minimum. - timeSeries: - type: string - x-dcl-go-name: TimeSeries - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying the `TimeSeries` to use for evaluating window quality. - metricSumInRange: - type: object - x-dcl-go-name: MetricSumInRange - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange - description: A window is good if the metric's value is in a good - range, summed across returned streams. - x-dcl-conflicts: - - goodBadMetricFilter - - goodTotalRatioThreshold - - metricMeanInRange - properties: - range: - type: object - x-dcl-go-name: Range - x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange - description: Range of values considered "good." For a one-sided - range, set one bound to an infinite value. - properties: - max: - type: number - format: double - x-dcl-go-name: Max - description: Range maximum. - min: - type: number - format: double - x-dcl-go-name: Min - description: Range minimum. - timeSeries: - type: string - x-dcl-go-name: TimeSeries - description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) - specifying the `TimeSeries` to use for evaluating window quality. - windowPeriod: - type: string - x-dcl-go-name: WindowPeriod - description: Duration over which window quality is evaluated. Must - be an integer fraction of a day and at least `60s`. - serviceManagementOwned: - type: boolean - x-dcl-go-name: ServiceManagementOwned - readOnly: true - description: Output only. If set, this SLO is managed at the [Service Management](https://cloud.google.com/service-management/overview) - level. Therefore the service yaml file is the source of truth for this - SLO, and API `Update` and `Delete` operations are forbidden. - x-kubernetes-immutable: true - userLabels: - type: object - additionalProperties: - type: string - x-dcl-go-name: UserLabels - description: Labels which have been used to annotate the service-level objective. - Label keys must start with a letter. Label keys and values may contain - lowercase letters, numbers, underscores, and dashes. Label keys and values - have a maximum length of 63 characters, and must be less than 128 bytes - in size. Up to 64 label entries may be stored. For labels which do not - have a semantic value, the empty string may be supplied for the label - value. diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_internal.go deleted file mode 100644 index e3045282a3..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_internal.go +++ /dev/null @@ -1,9260 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *ServiceLevelObjective) validate() error { - - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"RollingPeriod", "CalendarPeriod"}, r.RollingPeriod, r.CalendarPeriod); err != nil { - return err - } - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.Required(r, "goal"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Service, "Service"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.ServiceLevelIndicator) { - if err := r.ServiceLevelIndicator.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicator) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"BasicSli", "RequestBased", "WindowsBased"}, r.BasicSli, r.RequestBased, r.WindowsBased); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.BasicSli) { - if err := r.BasicSli.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.RequestBased) { - if err := r.RequestBased.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.WindowsBased) { - if err := r.WindowsBased.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSli) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Availability", "Latency", "OperationAvailability", "OperationLatency"}, r.Availability, r.Latency, r.OperationAvailability, r.OperationLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Availability) { - if err := r.Availability.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Latency) { - if err := r.Latency.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.OperationAvailability) { - if err := r.OperationAvailability.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.OperationLatency) { - if err := r.OperationLatency.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBased) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"GoodTotalRatio", "DistributionCut"}, r.GoodTotalRatio, r.DistributionCut); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.GoodTotalRatio) { - if err := r.GoodTotalRatio.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.DistributionCut) { - if err := r.DistributionCut.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) validate() error { - if !dcl.IsEmptyValueIndirect(r.Range) { - if err := r.Range.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"GoodBadMetricFilter", "GoodTotalRatioThreshold", "MetricMeanInRange", "MetricSumInRange"}, r.GoodBadMetricFilter, r.GoodTotalRatioThreshold, r.MetricMeanInRange, r.MetricSumInRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.GoodTotalRatioThreshold) { - if err := r.GoodTotalRatioThreshold.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.MetricMeanInRange) { - if err := r.MetricMeanInRange.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.MetricSumInRange) { - if err := r.MetricSumInRange.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Performance", "BasicSliPerformance"}, r.Performance, r.BasicSliPerformance); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Performance) { - if err := r.Performance.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.BasicSliPerformance) { - if err := r.BasicSliPerformance.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"GoodTotalRatio", "DistributionCut"}, r.GoodTotalRatio, r.DistributionCut); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.GoodTotalRatio) { - if err := r.GoodTotalRatio.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.DistributionCut) { - if err := r.DistributionCut.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) validate() error { - if !dcl.IsEmptyValueIndirect(r.Range) { - if err := r.Range.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Availability", "Latency", "OperationAvailability", "OperationLatency"}, r.Availability, r.Latency, r.OperationAvailability, r.OperationLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Availability) { - if err := r.Availability.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Latency) { - if err := r.Latency.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.OperationAvailability) { - if err := r.OperationAvailability.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.OperationLatency) { - if err := r.OperationLatency.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) validate() error { - if !dcl.IsEmptyValueIndirect(r.Range) { - if err := r.Range.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) validate() error { - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) validate() error { - if !dcl.IsEmptyValueIndirect(r.Range) { - if err := r.Range.validate(); err != nil { - return err - } - } - return nil -} -func (r *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) validate() error { - return nil -} -func (r *ServiceLevelObjective) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v3/", params) -} - -func (r *ServiceLevelObjective) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "service": dcl.ValueOrEmptyString(nr.Service), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/services/{{service}}/serviceLevelObjectives/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *ServiceLevelObjective) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "service": dcl.ValueOrEmptyString(nr.Service), - } - return dcl.URL("projects/{{project}}/services/{{service}}/serviceLevelObjectives", nr.basePath(), userBasePath, params), nil - -} - -func (r *ServiceLevelObjective) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "service": dcl.ValueOrEmptyString(nr.Service), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/services/{{service}}/serviceLevelObjectives?serviceLevelObjectiveId={{name}}", nr.basePath(), userBasePath, params), nil - -} - -func (r *ServiceLevelObjective) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "service": dcl.ValueOrEmptyString(nr.Service), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/services/{{service}}/serviceLevelObjectives/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// serviceLevelObjectiveApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type serviceLevelObjectiveApiOperation interface { - do(context.Context, *ServiceLevelObjective, *Client) error -} - -// newUpdateServiceLevelObjectiveUpdateServiceLevelObjectiveRequest creates a request for an -// ServiceLevelObjective resource's UpdateServiceLevelObjective update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateServiceLevelObjectiveUpdateServiceLevelObjectiveRequest(ctx context.Context, f *ServiceLevelObjective, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - req["displayName"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicator(c, f.ServiceLevelIndicator, res); err != nil { - return nil, fmt.Errorf("error expanding ServiceLevelIndicator into serviceLevelIndicator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["serviceLevelIndicator"] = v - } - if v := f.Goal; !dcl.IsEmptyValueIndirect(v) { - req["goal"] = v - } - if v := f.RollingPeriod; !dcl.IsEmptyValueIndirect(v) { - req["rollingPeriod"] = v - } - if v := f.CalendarPeriod; !dcl.IsEmptyValueIndirect(v) { - req["calendarPeriod"] = v - } - if v := f.UserLabels; !dcl.IsEmptyValueIndirect(v) { - req["userLabels"] = v - } - return req, nil -} - -// marshalUpdateServiceLevelObjectiveUpdateServiceLevelObjectiveRequest converts the update into -// the final JSON request body. -func marshalUpdateServiceLevelObjectiveUpdateServiceLevelObjectiveRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation) do(ctx context.Context, r *ServiceLevelObjective, c *Client) error { - _, err := c.GetServiceLevelObjective(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateServiceLevelObjective") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateServiceLevelObjectiveUpdateServiceLevelObjectiveRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateServiceLevelObjectiveUpdateServiceLevelObjectiveRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listServiceLevelObjectiveRaw(ctx context.Context, r *ServiceLevelObjective, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != ServiceLevelObjectiveMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listServiceLevelObjectiveOperation struct { - ServiceLevelObjectives []map[string]interface{} `json:"serviceLevelObjectives"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listServiceLevelObjective(ctx context.Context, r *ServiceLevelObjective, pageToken string, pageSize int32) ([]*ServiceLevelObjective, string, error) { - b, err := c.listServiceLevelObjectiveRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listServiceLevelObjectiveOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*ServiceLevelObjective - for _, v := range m.ServiceLevelObjectives { - res, err := unmarshalMapServiceLevelObjective(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - res.Service = r.Service - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllServiceLevelObjective(ctx context.Context, f func(*ServiceLevelObjective) bool, resources []*ServiceLevelObjective) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteServiceLevelObjective(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteServiceLevelObjectiveOperation struct{} - -func (op *deleteServiceLevelObjectiveOperation) do(ctx context.Context, r *ServiceLevelObjective, c *Client) error { - r, err := c.GetServiceLevelObjective(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "ServiceLevelObjective not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetServiceLevelObjective checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete ServiceLevelObjective: %w", err) - } - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createServiceLevelObjectiveOperation struct { - response map[string]interface{} -} - -func (op *createServiceLevelObjectiveOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createServiceLevelObjectiveOperation) do(ctx context.Context, r *ServiceLevelObjective, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - if _, err := c.GetServiceLevelObjective(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getServiceLevelObjectiveRaw(ctx context.Context, r *ServiceLevelObjective) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) serviceLevelObjectiveDiffsForRawDesired(ctx context.Context, rawDesired *ServiceLevelObjective, opts ...dcl.ApplyOption) (initial, desired *ServiceLevelObjective, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *ServiceLevelObjective - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*ServiceLevelObjective); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected ServiceLevelObjective, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetServiceLevelObjective(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a ServiceLevelObjective resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve ServiceLevelObjective resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that ServiceLevelObjective resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeServiceLevelObjectiveDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for ServiceLevelObjective: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for ServiceLevelObjective: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractServiceLevelObjectiveFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeServiceLevelObjectiveInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for ServiceLevelObjective: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeServiceLevelObjectiveDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for ServiceLevelObjective: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffServiceLevelObjective(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeServiceLevelObjectiveInitialState(rawInitial, rawDesired *ServiceLevelObjective) (*ServiceLevelObjective, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - - if !dcl.IsZeroValue(rawInitial.RollingPeriod) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.CalendarPeriod) { - rawInitial.RollingPeriod = dcl.String("") - } - } - - if !dcl.IsZeroValue(rawInitial.CalendarPeriod) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.RollingPeriod) { - rawInitial.CalendarPeriod = ServiceLevelObjectiveCalendarPeriodEnumRef("") - } - } - - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeServiceLevelObjectiveDesiredState(rawDesired, rawInitial *ServiceLevelObjective, opts ...dcl.ApplyOption) (*ServiceLevelObjective, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.ServiceLevelIndicator = canonicalizeServiceLevelObjectiveServiceLevelIndicator(rawDesired.ServiceLevelIndicator, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &ServiceLevelObjective{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { - canonicalDesired.DisplayName = rawInitial.DisplayName - } else { - canonicalDesired.DisplayName = rawDesired.DisplayName - } - canonicalDesired.ServiceLevelIndicator = canonicalizeServiceLevelObjectiveServiceLevelIndicator(rawDesired.ServiceLevelIndicator, rawInitial.ServiceLevelIndicator, opts...) - if dcl.IsZeroValue(rawDesired.Goal) || (dcl.IsEmptyValueIndirect(rawDesired.Goal) && dcl.IsEmptyValueIndirect(rawInitial.Goal)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Goal = rawInitial.Goal - } else { - canonicalDesired.Goal = rawDesired.Goal - } - if dcl.StringCanonicalize(rawDesired.RollingPeriod, rawInitial.RollingPeriod) { - canonicalDesired.RollingPeriod = rawInitial.RollingPeriod - } else { - canonicalDesired.RollingPeriod = rawDesired.RollingPeriod - } - if dcl.IsZeroValue(rawDesired.CalendarPeriod) || (dcl.IsEmptyValueIndirect(rawDesired.CalendarPeriod) && dcl.IsEmptyValueIndirect(rawInitial.CalendarPeriod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.CalendarPeriod = rawInitial.CalendarPeriod - } else { - canonicalDesired.CalendarPeriod = rawDesired.CalendarPeriod - } - if dcl.IsZeroValue(rawDesired.UserLabels) || (dcl.IsEmptyValueIndirect(rawDesired.UserLabels) && dcl.IsEmptyValueIndirect(rawInitial.UserLabels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.UserLabels = rawInitial.UserLabels - } else { - canonicalDesired.UserLabels = rawDesired.UserLabels - } - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - if dcl.NameToSelfLink(rawDesired.Service, rawInitial.Service) { - canonicalDesired.Service = rawInitial.Service - } else { - canonicalDesired.Service = rawDesired.Service - } - - if canonicalDesired.RollingPeriod != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.CalendarPeriod) { - canonicalDesired.RollingPeriod = dcl.String("") - } - } - - if canonicalDesired.CalendarPeriod != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.RollingPeriod) { - canonicalDesired.CalendarPeriod = ServiceLevelObjectiveCalendarPeriodEnumRef("") - } - } - - return canonicalDesired, nil -} - -func canonicalizeServiceLevelObjectiveNewState(c *Client, rawNew, rawDesired *ServiceLevelObjective) (*ServiceLevelObjective, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } else { - if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } - } - - if dcl.IsEmptyValueIndirect(rawNew.ServiceLevelIndicator) && dcl.IsEmptyValueIndirect(rawDesired.ServiceLevelIndicator) { - rawNew.ServiceLevelIndicator = rawDesired.ServiceLevelIndicator - } else { - rawNew.ServiceLevelIndicator = canonicalizeNewServiceLevelObjectiveServiceLevelIndicator(c, rawDesired.ServiceLevelIndicator, rawNew.ServiceLevelIndicator) - } - - if dcl.IsEmptyValueIndirect(rawNew.Goal) && dcl.IsEmptyValueIndirect(rawDesired.Goal) { - rawNew.Goal = rawDesired.Goal - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.RollingPeriod) && dcl.IsEmptyValueIndirect(rawDesired.RollingPeriod) { - rawNew.RollingPeriod = rawDesired.RollingPeriod - } else { - if dcl.StringCanonicalize(rawDesired.RollingPeriod, rawNew.RollingPeriod) { - rawNew.RollingPeriod = rawDesired.RollingPeriod - } - } - - if dcl.IsEmptyValueIndirect(rawNew.CalendarPeriod) && dcl.IsEmptyValueIndirect(rawDesired.CalendarPeriod) { - rawNew.CalendarPeriod = rawDesired.CalendarPeriod - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) { - rawNew.CreateTime = rawDesired.CreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.DeleteTime) && dcl.IsEmptyValueIndirect(rawDesired.DeleteTime) { - rawNew.DeleteTime = rawDesired.DeleteTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.ServiceManagementOwned) && dcl.IsEmptyValueIndirect(rawDesired.ServiceManagementOwned) { - rawNew.ServiceManagementOwned = rawDesired.ServiceManagementOwned - } else { - if dcl.BoolCanonicalize(rawDesired.ServiceManagementOwned, rawNew.ServiceManagementOwned) { - rawNew.ServiceManagementOwned = rawDesired.ServiceManagementOwned - } - } - - if dcl.IsEmptyValueIndirect(rawNew.UserLabels) && dcl.IsEmptyValueIndirect(rawDesired.UserLabels) { - rawNew.UserLabels = rawDesired.UserLabels - } else { - } - - rawNew.Project = rawDesired.Project - - rawNew.Service = rawDesired.Service - - return rawNew, nil -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicator(des, initial *ServiceLevelObjectiveServiceLevelIndicator, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicator { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.BasicSli != nil || (initial != nil && initial.BasicSli != nil) { - // Check if anything else is set. - if dcl.AnySet(des.RequestBased, des.WindowsBased) { - des.BasicSli = nil - if initial != nil { - initial.BasicSli = nil - } - } - } - - if des.RequestBased != nil || (initial != nil && initial.RequestBased != nil) { - // Check if anything else is set. - if dcl.AnySet(des.BasicSli, des.WindowsBased) { - des.RequestBased = nil - if initial != nil { - initial.RequestBased = nil - } - } - } - - if des.WindowsBased != nil || (initial != nil && initial.WindowsBased != nil) { - // Check if anything else is set. - if dcl.AnySet(des.BasicSli, des.RequestBased) { - des.WindowsBased = nil - if initial != nil { - initial.WindowsBased = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicator{} - - cDes.BasicSli = canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSli(des.BasicSli, initial.BasicSli, opts...) - cDes.RequestBased = canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBased(des.RequestBased, initial.RequestBased, opts...) - cDes.WindowsBased = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBased(des.WindowsBased, initial.WindowsBased, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicator, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicator { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicator, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicator(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicator, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicator(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicator(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicator) *ServiceLevelObjectiveServiceLevelIndicator { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicator while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.BasicSli = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, des.BasicSli, nw.BasicSli) - nw.RequestBased = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, des.RequestBased, nw.RequestBased) - nw.WindowsBased = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, des.WindowsBased, nw.WindowsBased) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicator) []ServiceLevelObjectiveServiceLevelIndicator { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicator - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicator(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicator) []ServiceLevelObjectiveServiceLevelIndicator { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicator - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicator(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSli(des, initial *ServiceLevelObjectiveServiceLevelIndicatorBasicSli, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorBasicSli { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Availability != nil || (initial != nil && initial.Availability != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Latency, des.OperationAvailability, des.OperationLatency) { - des.Availability = nil - if initial != nil { - initial.Availability = nil - } - } - } - - if des.Latency != nil || (initial != nil && initial.Latency != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Availability, des.OperationAvailability, des.OperationLatency) { - des.Latency = nil - if initial != nil { - initial.Latency = nil - } - } - } - - if des.OperationAvailability != nil || (initial != nil && initial.OperationAvailability != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Availability, des.Latency, des.OperationLatency) { - des.OperationAvailability = nil - if initial != nil { - initial.OperationAvailability = nil - } - } - } - - if des.OperationLatency != nil || (initial != nil && initial.OperationLatency != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Availability, des.Latency, des.OperationAvailability) { - des.OperationLatency = nil - if initial != nil { - initial.OperationLatency = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorBasicSli{} - - if dcl.StringArrayCanonicalize(des.Method, initial.Method) { - cDes.Method = initial.Method - } else { - cDes.Method = des.Method - } - if dcl.StringArrayCanonicalize(des.Location, initial.Location) { - cDes.Location = initial.Location - } else { - cDes.Location = des.Location - } - if dcl.StringArrayCanonicalize(des.Version, initial.Version) { - cDes.Version = initial.Version - } else { - cDes.Version = des.Version - } - cDes.Availability = canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(des.Availability, initial.Availability, opts...) - cDes.Latency = canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(des.Latency, initial.Latency, opts...) - cDes.OperationAvailability = canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(des.OperationAvailability, initial.OperationAvailability, opts...) - cDes.OperationLatency = canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(des.OperationLatency, initial.OperationLatency, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorBasicSli, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorBasicSli { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSli, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSli(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSli, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSli(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSli(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorBasicSli) *ServiceLevelObjectiveServiceLevelIndicatorBasicSli { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorBasicSli while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.Method, nw.Method) { - nw.Method = des.Method - } - if dcl.StringArrayCanonicalize(des.Location, nw.Location) { - nw.Location = des.Location - } - if dcl.StringArrayCanonicalize(des.Version, nw.Version) { - nw.Version = des.Version - } - nw.Availability = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, des.Availability, nw.Availability) - nw.Latency = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, des.Latency, nw.Latency) - nw.OperationAvailability = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, des.OperationAvailability, nw.OperationAvailability) - nw.OperationLatency = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, des.OperationLatency, nw.OperationLatency) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSli) []ServiceLevelObjectiveServiceLevelIndicatorBasicSli { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSli - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorBasicSliNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSli) []ServiceLevelObjectiveServiceLevelIndicatorBasicSli { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSli - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(des, initial *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{} - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilitySlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilitySet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilitySlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(des, initial *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{} - - if dcl.StringCanonicalize(des.Threshold, initial.Threshold) || dcl.IsZeroValue(des.Threshold) { - cDes.Threshold = initial.Threshold - } else { - cDes.Threshold = des.Threshold - } - if dcl.IsZeroValue(des.Experience) || (dcl.IsEmptyValueIndirect(des.Experience) && dcl.IsEmptyValueIndirect(initial.Experience)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Experience = initial.Experience - } else { - cDes.Experience = des.Experience - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencySlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Threshold, nw.Threshold) { - nw.Threshold = des.Threshold - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencySet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencySlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(des, initial *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{} - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilitySlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilitySet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilitySlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(des, initial *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{} - - if dcl.StringCanonicalize(des.Threshold, initial.Threshold) || dcl.IsZeroValue(des.Threshold) { - cDes.Threshold = initial.Threshold - } else { - cDes.Threshold = des.Threshold - } - if dcl.IsZeroValue(des.Experience) || (dcl.IsEmptyValueIndirect(des.Experience) && dcl.IsEmptyValueIndirect(initial.Experience)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Experience = initial.Experience - } else { - cDes.Experience = des.Experience - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencySlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Threshold, nw.Threshold) { - nw.Threshold = des.Threshold - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencySet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencySlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBased(des, initial *ServiceLevelObjectiveServiceLevelIndicatorRequestBased, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorRequestBased { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.GoodTotalRatio != nil || (initial != nil && initial.GoodTotalRatio != nil) { - // Check if anything else is set. - if dcl.AnySet(des.DistributionCut) { - des.GoodTotalRatio = nil - if initial != nil { - initial.GoodTotalRatio = nil - } - } - } - - if des.DistributionCut != nil || (initial != nil && initial.DistributionCut != nil) { - // Check if anything else is set. - if dcl.AnySet(des.GoodTotalRatio) { - des.DistributionCut = nil - if initial != nil { - initial.DistributionCut = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorRequestBased{} - - cDes.GoodTotalRatio = canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(des.GoodTotalRatio, initial.GoodTotalRatio, opts...) - cDes.DistributionCut = canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(des.DistributionCut, initial.DistributionCut, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorRequestBased, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorRequestBased { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBased, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBased(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBased, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBased(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBased(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorRequestBased) *ServiceLevelObjectiveServiceLevelIndicatorRequestBased { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorRequestBased while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.GoodTotalRatio = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, des.GoodTotalRatio, nw.GoodTotalRatio) - nw.DistributionCut = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, des.DistributionCut, nw.DistributionCut) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorRequestBased) []ServiceLevelObjectiveServiceLevelIndicatorRequestBased { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorRequestBased - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorRequestBased) []ServiceLevelObjectiveServiceLevelIndicatorRequestBased { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorRequestBased - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(des, initial *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{} - - if dcl.StringCanonicalize(des.GoodServiceFilter, initial.GoodServiceFilter) || dcl.IsZeroValue(des.GoodServiceFilter) { - cDes.GoodServiceFilter = initial.GoodServiceFilter - } else { - cDes.GoodServiceFilter = des.GoodServiceFilter - } - if dcl.StringCanonicalize(des.BadServiceFilter, initial.BadServiceFilter) || dcl.IsZeroValue(des.BadServiceFilter) { - cDes.BadServiceFilter = initial.BadServiceFilter - } else { - cDes.BadServiceFilter = des.BadServiceFilter - } - if dcl.StringCanonicalize(des.TotalServiceFilter, initial.TotalServiceFilter) || dcl.IsZeroValue(des.TotalServiceFilter) { - cDes.TotalServiceFilter = initial.TotalServiceFilter - } else { - cDes.TotalServiceFilter = des.TotalServiceFilter - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.GoodServiceFilter, nw.GoodServiceFilter) { - nw.GoodServiceFilter = des.GoodServiceFilter - } - if dcl.StringCanonicalize(des.BadServiceFilter, nw.BadServiceFilter) { - nw.BadServiceFilter = des.BadServiceFilter - } - if dcl.StringCanonicalize(des.TotalServiceFilter, nw.TotalServiceFilter) { - nw.TotalServiceFilter = des.TotalServiceFilter - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(des, initial *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{} - - if dcl.StringCanonicalize(des.DistributionFilter, initial.DistributionFilter) || dcl.IsZeroValue(des.DistributionFilter) { - cDes.DistributionFilter = initial.DistributionFilter - } else { - cDes.DistributionFilter = des.DistributionFilter - } - cDes.Range = canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(des.Range, initial.Range, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.DistributionFilter, nw.DistributionFilter) { - nw.DistributionFilter = des.DistributionFilter - } - nw.Range = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, des.Range, nw.Range) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(des, initial *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{} - - if dcl.IsZeroValue(des.Min) || (dcl.IsEmptyValueIndirect(des.Min) && dcl.IsEmptyValueIndirect(initial.Min)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Min = initial.Min - } else { - cDes.Min = des.Min - } - if dcl.IsZeroValue(des.Max) || (dcl.IsEmptyValueIndirect(des.Max) && dcl.IsEmptyValueIndirect(initial.Max)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Max = initial.Max - } else { - cDes.Max = des.Max - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBased(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.GoodBadMetricFilter != nil || (initial != nil && initial.GoodBadMetricFilter != nil) { - // Check if anything else is set. - if dcl.AnySet(des.GoodTotalRatioThreshold, des.MetricMeanInRange, des.MetricSumInRange) { - des.GoodBadMetricFilter = nil - if initial != nil { - initial.GoodBadMetricFilter = nil - } - } - } - - if des.GoodTotalRatioThreshold != nil || (initial != nil && initial.GoodTotalRatioThreshold != nil) { - // Check if anything else is set. - if dcl.AnySet(des.GoodBadMetricFilter, des.MetricMeanInRange, des.MetricSumInRange) { - des.GoodTotalRatioThreshold = nil - if initial != nil { - initial.GoodTotalRatioThreshold = nil - } - } - } - - if des.MetricMeanInRange != nil || (initial != nil && initial.MetricMeanInRange != nil) { - // Check if anything else is set. - if dcl.AnySet(des.GoodBadMetricFilter, des.GoodTotalRatioThreshold, des.MetricSumInRange) { - des.MetricMeanInRange = nil - if initial != nil { - initial.MetricMeanInRange = nil - } - } - } - - if des.MetricSumInRange != nil || (initial != nil && initial.MetricSumInRange != nil) { - // Check if anything else is set. - if dcl.AnySet(des.GoodBadMetricFilter, des.GoodTotalRatioThreshold, des.MetricMeanInRange) { - des.MetricSumInRange = nil - if initial != nil { - initial.MetricSumInRange = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{} - - if dcl.StringCanonicalize(des.GoodBadMetricFilter, initial.GoodBadMetricFilter) || dcl.IsZeroValue(des.GoodBadMetricFilter) { - cDes.GoodBadMetricFilter = initial.GoodBadMetricFilter - } else { - cDes.GoodBadMetricFilter = des.GoodBadMetricFilter - } - cDes.GoodTotalRatioThreshold = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(des.GoodTotalRatioThreshold, initial.GoodTotalRatioThreshold, opts...) - cDes.MetricMeanInRange = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(des.MetricMeanInRange, initial.MetricMeanInRange, opts...) - cDes.MetricSumInRange = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(des.MetricSumInRange, initial.MetricSumInRange, opts...) - if dcl.StringCanonicalize(des.WindowPeriod, initial.WindowPeriod) || dcl.IsZeroValue(des.WindowPeriod) { - cDes.WindowPeriod = initial.WindowPeriod - } else { - cDes.WindowPeriod = des.WindowPeriod - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBased, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBased(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBased, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBased(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBased while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.GoodBadMetricFilter, nw.GoodBadMetricFilter) { - nw.GoodBadMetricFilter = des.GoodBadMetricFilter - } - nw.GoodTotalRatioThreshold = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, des.GoodTotalRatioThreshold, nw.GoodTotalRatioThreshold) - nw.MetricMeanInRange = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, des.MetricMeanInRange, nw.MetricMeanInRange) - nw.MetricSumInRange = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, des.MetricSumInRange, nw.MetricSumInRange) - if dcl.StringCanonicalize(des.WindowPeriod, nw.WindowPeriod) { - nw.WindowPeriod = des.WindowPeriod - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Performance != nil || (initial != nil && initial.Performance != nil) { - // Check if anything else is set. - if dcl.AnySet(des.BasicSliPerformance) { - des.Performance = nil - if initial != nil { - initial.Performance = nil - } - } - } - - if des.BasicSliPerformance != nil || (initial != nil && initial.BasicSliPerformance != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Performance) { - des.BasicSliPerformance = nil - if initial != nil { - initial.BasicSliPerformance = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{} - - cDes.Performance = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(des.Performance, initial.Performance, opts...) - cDes.BasicSliPerformance = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(des.BasicSliPerformance, initial.BasicSliPerformance, opts...) - if dcl.IsZeroValue(des.Threshold) || (dcl.IsEmptyValueIndirect(des.Threshold) && dcl.IsEmptyValueIndirect(initial.Threshold)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Threshold = initial.Threshold - } else { - cDes.Threshold = des.Threshold - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Performance = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, des.Performance, nw.Performance) - nw.BasicSliPerformance = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, des.BasicSliPerformance, nw.BasicSliPerformance) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.GoodTotalRatio != nil || (initial != nil && initial.GoodTotalRatio != nil) { - // Check if anything else is set. - if dcl.AnySet(des.DistributionCut) { - des.GoodTotalRatio = nil - if initial != nil { - initial.GoodTotalRatio = nil - } - } - } - - if des.DistributionCut != nil || (initial != nil && initial.DistributionCut != nil) { - // Check if anything else is set. - if dcl.AnySet(des.GoodTotalRatio) { - des.DistributionCut = nil - if initial != nil { - initial.DistributionCut = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{} - - cDes.GoodTotalRatio = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(des.GoodTotalRatio, initial.GoodTotalRatio, opts...) - cDes.DistributionCut = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(des.DistributionCut, initial.DistributionCut, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.GoodTotalRatio = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, des.GoodTotalRatio, nw.GoodTotalRatio) - nw.DistributionCut = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, des.DistributionCut, nw.DistributionCut) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{} - - if dcl.StringCanonicalize(des.GoodServiceFilter, initial.GoodServiceFilter) || dcl.IsZeroValue(des.GoodServiceFilter) { - cDes.GoodServiceFilter = initial.GoodServiceFilter - } else { - cDes.GoodServiceFilter = des.GoodServiceFilter - } - if dcl.StringCanonicalize(des.BadServiceFilter, initial.BadServiceFilter) || dcl.IsZeroValue(des.BadServiceFilter) { - cDes.BadServiceFilter = initial.BadServiceFilter - } else { - cDes.BadServiceFilter = des.BadServiceFilter - } - if dcl.StringCanonicalize(des.TotalServiceFilter, initial.TotalServiceFilter) || dcl.IsZeroValue(des.TotalServiceFilter) { - cDes.TotalServiceFilter = initial.TotalServiceFilter - } else { - cDes.TotalServiceFilter = des.TotalServiceFilter - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.GoodServiceFilter, nw.GoodServiceFilter) { - nw.GoodServiceFilter = des.GoodServiceFilter - } - if dcl.StringCanonicalize(des.BadServiceFilter, nw.BadServiceFilter) { - nw.BadServiceFilter = des.BadServiceFilter - } - if dcl.StringCanonicalize(des.TotalServiceFilter, nw.TotalServiceFilter) { - nw.TotalServiceFilter = des.TotalServiceFilter - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{} - - if dcl.StringCanonicalize(des.DistributionFilter, initial.DistributionFilter) || dcl.IsZeroValue(des.DistributionFilter) { - cDes.DistributionFilter = initial.DistributionFilter - } else { - cDes.DistributionFilter = des.DistributionFilter - } - cDes.Range = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(des.Range, initial.Range, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.DistributionFilter, nw.DistributionFilter) { - nw.DistributionFilter = des.DistributionFilter - } - nw.Range = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, des.Range, nw.Range) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{} - - if dcl.IsZeroValue(des.Min) || (dcl.IsEmptyValueIndirect(des.Min) && dcl.IsEmptyValueIndirect(initial.Min)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Min = initial.Min - } else { - cDes.Min = des.Min - } - if dcl.IsZeroValue(des.Max) || (dcl.IsEmptyValueIndirect(des.Max) && dcl.IsEmptyValueIndirect(initial.Max)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Max = initial.Max - } else { - cDes.Max = des.Max - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Availability != nil || (initial != nil && initial.Availability != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Latency, des.OperationAvailability, des.OperationLatency) { - des.Availability = nil - if initial != nil { - initial.Availability = nil - } - } - } - - if des.Latency != nil || (initial != nil && initial.Latency != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Availability, des.OperationAvailability, des.OperationLatency) { - des.Latency = nil - if initial != nil { - initial.Latency = nil - } - } - } - - if des.OperationAvailability != nil || (initial != nil && initial.OperationAvailability != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Availability, des.Latency, des.OperationLatency) { - des.OperationAvailability = nil - if initial != nil { - initial.OperationAvailability = nil - } - } - } - - if des.OperationLatency != nil || (initial != nil && initial.OperationLatency != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Availability, des.Latency, des.OperationAvailability) { - des.OperationLatency = nil - if initial != nil { - initial.OperationLatency = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{} - - if dcl.StringArrayCanonicalize(des.Method, initial.Method) { - cDes.Method = initial.Method - } else { - cDes.Method = des.Method - } - if dcl.StringArrayCanonicalize(des.Location, initial.Location) { - cDes.Location = initial.Location - } else { - cDes.Location = des.Location - } - if dcl.StringArrayCanonicalize(des.Version, initial.Version) { - cDes.Version = initial.Version - } else { - cDes.Version = des.Version - } - cDes.Availability = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(des.Availability, initial.Availability, opts...) - cDes.Latency = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(des.Latency, initial.Latency, opts...) - cDes.OperationAvailability = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(des.OperationAvailability, initial.OperationAvailability, opts...) - cDes.OperationLatency = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(des.OperationLatency, initial.OperationLatency, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringArrayCanonicalize(des.Method, nw.Method) { - nw.Method = des.Method - } - if dcl.StringArrayCanonicalize(des.Location, nw.Location) { - nw.Location = des.Location - } - if dcl.StringArrayCanonicalize(des.Version, nw.Version) { - nw.Version = des.Version - } - nw.Availability = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, des.Availability, nw.Availability) - nw.Latency = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, des.Latency, nw.Latency) - nw.OperationAvailability = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, des.OperationAvailability, nw.OperationAvailability) - nw.OperationLatency = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, des.OperationLatency, nw.OperationLatency) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{} - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilitySlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilitySet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilitySlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{} - - if dcl.StringCanonicalize(des.Threshold, initial.Threshold) || dcl.IsZeroValue(des.Threshold) { - cDes.Threshold = initial.Threshold - } else { - cDes.Threshold = des.Threshold - } - if dcl.IsZeroValue(des.Experience) || (dcl.IsEmptyValueIndirect(des.Experience) && dcl.IsEmptyValueIndirect(initial.Experience)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Experience = initial.Experience - } else { - cDes.Experience = des.Experience - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencySlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Threshold, nw.Threshold) { - nw.Threshold = des.Threshold - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencySet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencySlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability { - if des == nil { - return initial - } - if des.empty { - return des - } - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{} - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilitySlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilitySet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilitySlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{} - - if dcl.StringCanonicalize(des.Threshold, initial.Threshold) || dcl.IsZeroValue(des.Threshold) { - cDes.Threshold = initial.Threshold - } else { - cDes.Threshold = des.Threshold - } - if dcl.IsZeroValue(des.Experience) || (dcl.IsEmptyValueIndirect(des.Experience) && dcl.IsEmptyValueIndirect(initial.Experience)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Experience = initial.Experience - } else { - cDes.Experience = des.Experience - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencySlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Threshold, nw.Threshold) { - nw.Threshold = des.Threshold - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencySet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencySlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{} - - if dcl.StringCanonicalize(des.TimeSeries, initial.TimeSeries) || dcl.IsZeroValue(des.TimeSeries) { - cDes.TimeSeries = initial.TimeSeries - } else { - cDes.TimeSeries = des.TimeSeries - } - cDes.Range = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(des.Range, initial.Range, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.TimeSeries, nw.TimeSeries) { - nw.TimeSeries = des.TimeSeries - } - nw.Range = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, des.Range, nw.Range) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{} - - if dcl.IsZeroValue(des.Min) || (dcl.IsEmptyValueIndirect(des.Min) && dcl.IsEmptyValueIndirect(initial.Min)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Min = initial.Min - } else { - cDes.Min = des.Min - } - if dcl.IsZeroValue(des.Max) || (dcl.IsEmptyValueIndirect(des.Max) && dcl.IsEmptyValueIndirect(initial.Max)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Max = initial.Max - } else { - cDes.Max = des.Max - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{} - - if dcl.StringCanonicalize(des.TimeSeries, initial.TimeSeries) || dcl.IsZeroValue(des.TimeSeries) { - cDes.TimeSeries = initial.TimeSeries - } else { - cDes.TimeSeries = des.TimeSeries - } - cDes.Range = canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(des.Range, initial.Range, opts...) - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.TimeSeries, nw.TimeSeries) { - nw.TimeSeries = des.TimeSeries - } - nw.Range = canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, des.Range, nw.Range) - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, &d, &n)) - } - - return items -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(des, initial *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, opts ...dcl.ApplyOption) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{} - - if dcl.IsZeroValue(des.Min) || (dcl.IsEmptyValueIndirect(des.Min) && dcl.IsEmptyValueIndirect(initial.Min)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Min = initial.Min - } else { - cDes.Min = des.Min - } - if dcl.IsZeroValue(des.Max) || (dcl.IsEmptyValueIndirect(des.Max) && dcl.IsEmptyValueIndirect(initial.Max)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Max = initial.Max - } else { - cDes.Max = des.Max - } - - return cDes -} - -func canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeSlice(des, initial []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, opts ...dcl.ApplyOption) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, 0, len(des)) - for _, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, 0, len(des)) - for i, d := range des { - cd := canonicalizeServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c *Client, des, nw *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeSet(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeSlice(c *Client, des, nw []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffServiceLevelObjective(c *Client, desired, actual *ServiceLevelObjective, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ServiceLevelIndicator, actual.ServiceLevelIndicator, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicator, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("ServiceLevelIndicator")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Goal, actual.Goal, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Goal")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.RollingPeriod, actual.RollingPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("RollingPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CalendarPeriod, actual.CalendarPeriod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("CalendarPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DeleteTime, actual.DeleteTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("DeleteTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ServiceManagementOwned, actual.ServiceManagementOwned, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ServiceManagementOwned")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.UserLabels, actual.UserLabels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("UserLabels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Service, actual.Service, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Service")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareServiceLevelObjectiveServiceLevelIndicatorNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicator) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicator) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicator or *ServiceLevelObjectiveServiceLevelIndicator", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicator) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicator) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicator", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.BasicSli, actual.BasicSli, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorBasicSliNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSli, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("BasicSli")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.RequestBased, actual.RequestBased, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBased, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("RequestBased")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.WindowsBased, actual.WindowsBased, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBased, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("WindowsBased")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorBasicSliNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorBasicSli) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorBasicSli) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorBasicSli or *ServiceLevelObjectiveServiceLevelIndicatorBasicSli", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorBasicSli) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorBasicSli) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorBasicSli", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Method, actual.Method, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Method")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Availability, actual.Availability, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Availability")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Latency, actual.Latency, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Latency")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OperationAvailability, actual.OperationAvailability, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("OperationAvailability")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OperationLatency, actual.OperationLatency, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("OperationLatency")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency or *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Threshold, actual.Threshold, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Threshold")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Experience, actual.Experience, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Experience")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency or *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Threshold, actual.Threshold, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Threshold")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Experience, actual.Experience, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Experience")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorRequestBased) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorRequestBased) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorRequestBased or *ServiceLevelObjectiveServiceLevelIndicatorRequestBased", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorRequestBased) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorRequestBased) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorRequestBased", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GoodTotalRatio, actual.GoodTotalRatio, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("GoodTotalRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DistributionCut, actual.DistributionCut, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("DistributionCut")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio or *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GoodServiceFilter, actual.GoodServiceFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("GoodServiceFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BadServiceFilter, actual.BadServiceFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("BadServiceFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TotalServiceFilter, actual.TotalServiceFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("TotalServiceFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut or *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DistributionFilter, actual.DistributionFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("DistributionFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Range, actual.Range, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Range")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange or *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Min, actual.Min, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Min")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Max, actual.Max, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Max")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBased or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBased", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GoodBadMetricFilter, actual.GoodBadMetricFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("GoodBadMetricFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GoodTotalRatioThreshold, actual.GoodTotalRatioThreshold, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("GoodTotalRatioThreshold")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MetricMeanInRange, actual.MetricMeanInRange, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("MetricMeanInRange")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MetricSumInRange, actual.MetricSumInRange, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("MetricSumInRange")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.WindowPeriod, actual.WindowPeriod, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("WindowPeriod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Performance, actual.Performance, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Performance")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BasicSliPerformance, actual.BasicSliPerformance, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("BasicSliPerformance")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Threshold, actual.Threshold, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Threshold")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GoodTotalRatio, actual.GoodTotalRatio, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("GoodTotalRatio")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DistributionCut, actual.DistributionCut, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("DistributionCut")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GoodServiceFilter, actual.GoodServiceFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("GoodServiceFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BadServiceFilter, actual.BadServiceFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("BadServiceFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.TotalServiceFilter, actual.TotalServiceFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("TotalServiceFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DistributionFilter, actual.DistributionFilter, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("DistributionFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Range, actual.Range, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Range")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Min, actual.Min, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Min")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Max, actual.Max, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Max")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Method, actual.Method, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Method")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Version, actual.Version, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Version")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Availability, actual.Availability, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Availability")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Latency, actual.Latency, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Latency")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OperationAvailability, actual.OperationAvailability, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("OperationAvailability")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OperationLatency, actual.OperationLatency, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("OperationLatency")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Threshold, actual.Threshold, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Threshold")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Experience, actual.Experience, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Experience")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Threshold, actual.Threshold, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Threshold")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Experience, actual.Experience, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Experience")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeries, actual.TimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("TimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Range, actual.Range, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Range")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Min, actual.Min, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Min")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Max, actual.Max, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Max")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.TimeSeries, actual.TimeSeries, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("TimeSeries")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Range, actual.Range, dcl.DiffInfo{ObjectFunction: compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeNewStyle, EmptyObject: EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Range")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) - if !ok { - desiredNotPointer, ok := d.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange or *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) - if !ok { - actualNotPointer, ok := a.(ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) - if !ok { - return nil, fmt.Errorf("obj %v is not a ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Min, actual.Min, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Min")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Max, actual.Max, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation")}, fn.AddNest("Max")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *ServiceLevelObjective) urlNormalized() *ServiceLevelObjective { - normalized := dcl.Copy(*r).(ServiceLevelObjective) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) - normalized.RollingPeriod = dcl.SelfLinkToName(r.RollingPeriod) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Service = dcl.SelfLinkToName(r.Service) - return &normalized -} - -func (r *ServiceLevelObjective) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateServiceLevelObjective" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "service": dcl.ValueOrEmptyString(nr.Service), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/services/{{service}}/serviceLevelObjectives/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the ServiceLevelObjective resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *ServiceLevelObjective) marshal(c *Client) ([]byte, error) { - m, err := expandServiceLevelObjective(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling ServiceLevelObjective: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalServiceLevelObjective decodes JSON responses into the ServiceLevelObjective resource schema. -func unmarshalServiceLevelObjective(b []byte, c *Client, res *ServiceLevelObjective) (*ServiceLevelObjective, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapServiceLevelObjective(m, c, res) -} - -func unmarshalMapServiceLevelObjective(m map[string]interface{}, c *Client, res *ServiceLevelObjective) (*ServiceLevelObjective, error) { - - flattened := flattenServiceLevelObjective(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandServiceLevelObjective expands ServiceLevelObjective into a JSON request object. -func expandServiceLevelObjective(c *Client, f *ServiceLevelObjective) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("projects/%s/services/%s/serviceLevelObjectives/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Service), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.DisplayName; dcl.ValueShouldBeSent(v) { - m["displayName"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicator(c, f.ServiceLevelIndicator, res); err != nil { - return nil, fmt.Errorf("error expanding ServiceLevelIndicator into serviceLevelIndicator: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["serviceLevelIndicator"] = v - } - if v := f.Goal; dcl.ValueShouldBeSent(v) { - m["goal"] = v - } - if v := f.RollingPeriod; dcl.ValueShouldBeSent(v) { - m["rollingPeriod"] = v - } - if v := f.CalendarPeriod; dcl.ValueShouldBeSent(v) { - m["calendarPeriod"] = v - } - if v := f.UserLabels; dcl.ValueShouldBeSent(v) { - m["userLabels"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Service into service: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["service"] = v - } - - return m, nil -} - -// flattenServiceLevelObjective flattens ServiceLevelObjective from a JSON request object into the -// ServiceLevelObjective type. -func flattenServiceLevelObjective(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjective { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &ServiceLevelObjective{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.DisplayName = dcl.FlattenString(m["displayName"]) - resultRes.ServiceLevelIndicator = flattenServiceLevelObjectiveServiceLevelIndicator(c, m["serviceLevelIndicator"], res) - resultRes.Goal = dcl.FlattenDouble(m["goal"]) - resultRes.RollingPeriod = dcl.FlattenString(m["rollingPeriod"]) - resultRes.CalendarPeriod = flattenServiceLevelObjectiveCalendarPeriodEnum(m["calendarPeriod"]) - resultRes.CreateTime = dcl.FlattenString(m["createTime"]) - resultRes.DeleteTime = dcl.FlattenString(m["deleteTime"]) - resultRes.ServiceManagementOwned = dcl.FlattenBool(m["serviceManagementOwned"]) - resultRes.UserLabels = dcl.FlattenKeyValuePairs(m["userLabels"]) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.Service = dcl.FlattenString(m["service"]) - - return resultRes -} - -// expandServiceLevelObjectiveServiceLevelIndicatorMap expands the contents of ServiceLevelObjectiveServiceLevelIndicator into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicator, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicator(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicator into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicator, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicator(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicator from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicator { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicator{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicator{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicator) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicator(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicator from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicator { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicator{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicator{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicator, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicator(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicator expands an instance of ServiceLevelObjectiveServiceLevelIndicator into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicator(c *Client, f *ServiceLevelObjectiveServiceLevelIndicator, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, f.BasicSli, res); err != nil { - return nil, fmt.Errorf("error expanding BasicSli into basicSli: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["basicSli"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, f.RequestBased, res); err != nil { - return nil, fmt.Errorf("error expanding RequestBased into requestBased: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["requestBased"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, f.WindowsBased, res); err != nil { - return nil, fmt.Errorf("error expanding WindowsBased into windowsBased: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["windowsBased"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicator flattens an instance of ServiceLevelObjectiveServiceLevelIndicator from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicator(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicator { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicator{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicator - } - r.BasicSli = flattenServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, m["basicSli"], res) - r.RequestBased = flattenServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, m["requestBased"], res) - r.WindowsBased = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, m["windowsBased"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSli into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSli, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSli into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorBasicSli, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSli from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSli { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSli{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSli{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSli) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSli from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorBasicSli { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSli{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSli{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSli, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSli(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSli expands an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSli into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSli(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorBasicSli, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Method; v != nil { - m["method"] = v - } - if v := f.Location; v != nil { - m["location"] = v - } - if v := f.Version; v != nil { - m["version"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, f.Availability, res); err != nil { - return nil, fmt.Errorf("error expanding Availability into availability: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["availability"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, f.Latency, res); err != nil { - return nil, fmt.Errorf("error expanding Latency into latency: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["latency"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, f.OperationAvailability, res); err != nil { - return nil, fmt.Errorf("error expanding OperationAvailability into operationAvailability: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["operationAvailability"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, f.OperationLatency, res); err != nil { - return nil, fmt.Errorf("error expanding OperationLatency into operationLatency: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["operationLatency"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSli flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSli from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSli(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorBasicSli { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorBasicSli{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSli - } - r.Method = dcl.FlattenStringSlice(m["method"]) - r.Location = dcl.FlattenStringSlice(m["location"]) - r.Version = dcl.FlattenStringSlice(m["version"]) - r.Availability = flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, m["availability"], res) - r.Latency = flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, m["latency"], res) - r.OperationAvailability = flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, m["operationAvailability"], res) - r.OperationLatency = flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, m["operationLatency"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilitySlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilitySlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilitySlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilitySlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability expands an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability - } - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencySlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencySlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencySlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencySlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency expands an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Threshold; !dcl.IsEmptyValueIndirect(v) { - m["threshold"] = v - } - if v := f.Experience; !dcl.IsEmptyValueIndirect(v) { - m["experience"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency - } - r.Threshold = dcl.FlattenString(m["threshold"]) - r.Experience = flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum(m["experience"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilitySlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilitySlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilitySlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilitySlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability expands an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability - } - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencySlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencySlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencySlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencySlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency expands an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Threshold; !dcl.IsEmptyValueIndirect(v) { - m["threshold"] = v - } - if v := f.Experience; !dcl.IsEmptyValueIndirect(v) { - m["experience"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency - } - r.Threshold = dcl.FlattenString(m["threshold"]) - r.Experience = flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum(m["experience"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBased into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBased, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBased into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorRequestBased, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBased from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBased { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBased{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBased{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBased) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBased from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorRequestBased { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorRequestBased{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorRequestBased{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBased, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorRequestBased(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBased expands an instance of ServiceLevelObjectiveServiceLevelIndicatorRequestBased into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBased(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorRequestBased, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, f.GoodTotalRatio, res); err != nil { - return nil, fmt.Errorf("error expanding GoodTotalRatio into goodTotalRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["goodTotalRatio"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, f.DistributionCut, res); err != nil { - return nil, fmt.Errorf("error expanding DistributionCut into distributionCut: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["distributionCut"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBased flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorRequestBased from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBased(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorRequestBased { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorRequestBased{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBased - } - r.GoodTotalRatio = flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, m["goodTotalRatio"], res) - r.DistributionCut = flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, m["distributionCut"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio expands an instance of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.GoodServiceFilter; !dcl.IsEmptyValueIndirect(v) { - m["goodServiceFilter"] = v - } - if v := f.BadServiceFilter; !dcl.IsEmptyValueIndirect(v) { - m["badServiceFilter"] = v - } - if v := f.TotalServiceFilter; !dcl.IsEmptyValueIndirect(v) { - m["totalServiceFilter"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio - } - r.GoodServiceFilter = dcl.FlattenString(m["goodServiceFilter"]) - r.BadServiceFilter = dcl.FlattenString(m["badServiceFilter"]) - r.TotalServiceFilter = dcl.FlattenString(m["totalServiceFilter"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut expands an instance of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.DistributionFilter; !dcl.IsEmptyValueIndirect(v) { - m["distributionFilter"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, f.Range, res); err != nil { - return nil, fmt.Errorf("error expanding Range into range: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["range"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut - } - r.DistributionFilter = dcl.FlattenString(m["distributionFilter"]) - r.Range = flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, m["range"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange expands an instance of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Min; !dcl.IsEmptyValueIndirect(v) { - m["min"] = v - } - if v := f.Max; !dcl.IsEmptyValueIndirect(v) { - m["max"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange - } - r.Min = dcl.FlattenDouble(m["min"]) - r.Max = dcl.FlattenDouble(m["max"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBased into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBased, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBased into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBased from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBased { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBased from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBased, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBased expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBased into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.GoodBadMetricFilter; !dcl.IsEmptyValueIndirect(v) { - m["goodBadMetricFilter"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, f.GoodTotalRatioThreshold, res); err != nil { - return nil, fmt.Errorf("error expanding GoodTotalRatioThreshold into goodTotalRatioThreshold: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["goodTotalRatioThreshold"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, f.MetricMeanInRange, res); err != nil { - return nil, fmt.Errorf("error expanding MetricMeanInRange into metricMeanInRange: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["metricMeanInRange"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, f.MetricSumInRange, res); err != nil { - return nil, fmt.Errorf("error expanding MetricSumInRange into metricSumInRange: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["metricSumInRange"] = v - } - if v := f.WindowPeriod; !dcl.IsEmptyValueIndirect(v) { - m["windowPeriod"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBased flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBased from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBased(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBased - } - r.GoodBadMetricFilter = dcl.FlattenString(m["goodBadMetricFilter"]) - r.GoodTotalRatioThreshold = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, m["goodTotalRatioThreshold"], res) - r.MetricMeanInRange = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, m["metricMeanInRange"], res) - r.MetricSumInRange = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, m["metricSumInRange"], res) - r.WindowPeriod = dcl.FlattenString(m["windowPeriod"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, f.Performance, res); err != nil { - return nil, fmt.Errorf("error expanding Performance into performance: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["performance"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, f.BasicSliPerformance, res); err != nil { - return nil, fmt.Errorf("error expanding BasicSliPerformance into basicSliPerformance: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["basicSliPerformance"] = v - } - if v := f.Threshold; !dcl.IsEmptyValueIndirect(v) { - m["threshold"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold - } - r.Performance = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, m["performance"], res) - r.BasicSliPerformance = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, m["basicSliPerformance"], res) - r.Threshold = dcl.FlattenDouble(m["threshold"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, f.GoodTotalRatio, res); err != nil { - return nil, fmt.Errorf("error expanding GoodTotalRatio into goodTotalRatio: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["goodTotalRatio"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, f.DistributionCut, res); err != nil { - return nil, fmt.Errorf("error expanding DistributionCut into distributionCut: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["distributionCut"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance - } - r.GoodTotalRatio = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, m["goodTotalRatio"], res) - r.DistributionCut = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, m["distributionCut"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.GoodServiceFilter; !dcl.IsEmptyValueIndirect(v) { - m["goodServiceFilter"] = v - } - if v := f.BadServiceFilter; !dcl.IsEmptyValueIndirect(v) { - m["badServiceFilter"] = v - } - if v := f.TotalServiceFilter; !dcl.IsEmptyValueIndirect(v) { - m["totalServiceFilter"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio - } - r.GoodServiceFilter = dcl.FlattenString(m["goodServiceFilter"]) - r.BadServiceFilter = dcl.FlattenString(m["badServiceFilter"]) - r.TotalServiceFilter = dcl.FlattenString(m["totalServiceFilter"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.DistributionFilter; !dcl.IsEmptyValueIndirect(v) { - m["distributionFilter"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, f.Range, res); err != nil { - return nil, fmt.Errorf("error expanding Range into range: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["range"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut - } - r.DistributionFilter = dcl.FlattenString(m["distributionFilter"]) - r.Range = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, m["range"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Min; !dcl.IsEmptyValueIndirect(v) { - m["min"] = v - } - if v := f.Max; !dcl.IsEmptyValueIndirect(v) { - m["max"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange - } - r.Min = dcl.FlattenDouble(m["min"]) - r.Max = dcl.FlattenDouble(m["max"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Method; v != nil { - m["method"] = v - } - if v := f.Location; v != nil { - m["location"] = v - } - if v := f.Version; v != nil { - m["version"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, f.Availability, res); err != nil { - return nil, fmt.Errorf("error expanding Availability into availability: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["availability"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, f.Latency, res); err != nil { - return nil, fmt.Errorf("error expanding Latency into latency: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["latency"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, f.OperationAvailability, res); err != nil { - return nil, fmt.Errorf("error expanding OperationAvailability into operationAvailability: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["operationAvailability"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, f.OperationLatency, res); err != nil { - return nil, fmt.Errorf("error expanding OperationLatency into operationLatency: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["operationLatency"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance - } - r.Method = dcl.FlattenStringSlice(m["method"]) - r.Location = dcl.FlattenStringSlice(m["location"]) - r.Version = dcl.FlattenStringSlice(m["version"]) - r.Availability = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, m["availability"], res) - r.Latency = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, m["latency"], res) - r.OperationAvailability = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, m["operationAvailability"], res) - r.OperationLatency = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, m["operationLatency"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilitySlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilitySlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilitySlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilitySlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability - } - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencySlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencySlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencySlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencySlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Threshold; !dcl.IsEmptyValueIndirect(v) { - m["threshold"] = v - } - if v := f.Experience; !dcl.IsEmptyValueIndirect(v) { - m["experience"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency - } - r.Threshold = dcl.FlattenString(m["threshold"]) - r.Experience = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum(m["experience"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilitySlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilitySlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilitySlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilitySlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability { - _, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability - } - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencySlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencySlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencySlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencySlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Threshold; !dcl.IsEmptyValueIndirect(v) { - m["threshold"] = v - } - if v := f.Experience; !dcl.IsEmptyValueIndirect(v) { - m["experience"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency - } - r.Threshold = dcl.FlattenString(m["threshold"]) - r.Experience = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum(m["experience"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.TimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["timeSeries"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, f.Range, res); err != nil { - return nil, fmt.Errorf("error expanding Range into range: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["range"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange - } - r.TimeSeries = dcl.FlattenString(m["timeSeries"]) - r.Range = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, m["range"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Min; !dcl.IsEmptyValueIndirect(v) { - m["min"] = v - } - if v := f.Max; !dcl.IsEmptyValueIndirect(v) { - m["max"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange - } - r.Min = dcl.FlattenDouble(m["min"]) - r.Max = dcl.FlattenDouble(m["max"]) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.TimeSeries; !dcl.IsEmptyValueIndirect(v) { - m["timeSeries"] = v - } - if v, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, f.Range, res); err != nil { - return nil, fmt.Errorf("error expanding Range into range: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["range"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange - } - r.TimeSeries = dcl.FlattenString(m["timeSeries"]) - r.Range = flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, m["range"], res) - - return r -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeMap expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeMap(c *Client, f map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeSlice expands the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeSlice(c *Client, f []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, res *ServiceLevelObjective) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange expands an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange into a JSON -// request object. -func expandServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c *Client, f *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange, res *ServiceLevelObjective) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Min; !dcl.IsEmptyValueIndirect(v) { - m["min"] = v - } - if v := f.Max; !dcl.IsEmptyValueIndirect(v) { - m["max"] = v - } - - return m, nil -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange flattens an instance of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange(c *Client, i interface{}, res *ServiceLevelObjective) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange - } - r.Min = dcl.FlattenDouble(m["min"]) - r.Max = dcl.FlattenDouble(m["max"]) - - return r -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnumMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnumMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum(item.(interface{})) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnumSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnumSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum(item.(interface{}))) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum asserts that an interface is a string, and returns a -// pointer to a *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum with the same value as that string. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum(i interface{}) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnumRef(s) -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnumMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnumMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum(item.(interface{})) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnumSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnumSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum(item.(interface{}))) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum asserts that an interface is a string, and returns a -// pointer to a *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum with the same value as that string. -func flattenServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum(i interface{}) *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnumRef(s) -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnumMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnumMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum(item.(interface{})) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnumSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnumSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum(item.(interface{}))) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum asserts that an interface is a string, and returns a -// pointer to a *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum with the same value as that string. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum(i interface{}) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnumRef(s) -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnumMap flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnumMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum{} - } - - items := make(map[string]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum(item.(interface{})) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnumSlice flattens the contents of ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum from a JSON -// response object. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnumSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum{} - } - - items := make([]ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum(item.(interface{}))) - } - - return items -} - -// flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum asserts that an interface is a string, and returns a -// pointer to a *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum with the same value as that string. -func flattenServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum(i interface{}) *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnumRef(s) -} - -// flattenServiceLevelObjectiveCalendarPeriodEnumMap flattens the contents of ServiceLevelObjectiveCalendarPeriodEnum from a JSON -// response object. -func flattenServiceLevelObjectiveCalendarPeriodEnumMap(c *Client, i interface{}, res *ServiceLevelObjective) map[string]ServiceLevelObjectiveCalendarPeriodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]ServiceLevelObjectiveCalendarPeriodEnum{} - } - - if len(a) == 0 { - return map[string]ServiceLevelObjectiveCalendarPeriodEnum{} - } - - items := make(map[string]ServiceLevelObjectiveCalendarPeriodEnum) - for k, item := range a { - items[k] = *flattenServiceLevelObjectiveCalendarPeriodEnum(item.(interface{})) - } - - return items -} - -// flattenServiceLevelObjectiveCalendarPeriodEnumSlice flattens the contents of ServiceLevelObjectiveCalendarPeriodEnum from a JSON -// response object. -func flattenServiceLevelObjectiveCalendarPeriodEnumSlice(c *Client, i interface{}, res *ServiceLevelObjective) []ServiceLevelObjectiveCalendarPeriodEnum { - a, ok := i.([]interface{}) - if !ok { - return []ServiceLevelObjectiveCalendarPeriodEnum{} - } - - if len(a) == 0 { - return []ServiceLevelObjectiveCalendarPeriodEnum{} - } - - items := make([]ServiceLevelObjectiveCalendarPeriodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenServiceLevelObjectiveCalendarPeriodEnum(item.(interface{}))) - } - - return items -} - -// flattenServiceLevelObjectiveCalendarPeriodEnum asserts that an interface is a string, and returns a -// pointer to a *ServiceLevelObjectiveCalendarPeriodEnum with the same value as that string. -func flattenServiceLevelObjectiveCalendarPeriodEnum(i interface{}) *ServiceLevelObjectiveCalendarPeriodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return ServiceLevelObjectiveCalendarPeriodEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *ServiceLevelObjective) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalServiceLevelObjective(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Service == nil && ncr.Service == nil { - c.Config.Logger.Info("Both Service fields null - considering equal.") - } else if nr.Service == nil || ncr.Service == nil { - c.Config.Logger.Info("Only one Service field is null - considering unequal.") - return false - } else if *nr.Service != *ncr.Service { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type serviceLevelObjectiveDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp serviceLevelObjectiveApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToServiceLevelObjectiveDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]serviceLevelObjectiveDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []serviceLevelObjectiveDiff - // For each operation name, create a serviceLevelObjectiveDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := serviceLevelObjectiveDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToServiceLevelObjectiveApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToServiceLevelObjectiveApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (serviceLevelObjectiveApiOperation, error) { - switch opName { - - case "updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation": - return &updateServiceLevelObjectiveUpdateServiceLevelObjectiveOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractServiceLevelObjectiveFields(r *ServiceLevelObjective) error { - vServiceLevelIndicator := r.ServiceLevelIndicator - if vServiceLevelIndicator == nil { - // note: explicitly not the empty object. - vServiceLevelIndicator = &ServiceLevelObjectiveServiceLevelIndicator{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorFields(r, vServiceLevelIndicator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vServiceLevelIndicator) { - r.ServiceLevelIndicator = vServiceLevelIndicator - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicator) error { - vBasicSli := o.BasicSli - if vBasicSli == nil { - // note: explicitly not the empty object. - vBasicSli = &ServiceLevelObjectiveServiceLevelIndicatorBasicSli{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliFields(r, vBasicSli); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBasicSli) { - o.BasicSli = vBasicSli - } - vRequestBased := o.RequestBased - if vRequestBased == nil { - // note: explicitly not the empty object. - vRequestBased = &ServiceLevelObjectiveServiceLevelIndicatorRequestBased{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedFields(r, vRequestBased); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRequestBased) { - o.RequestBased = vRequestBased - } - vWindowsBased := o.WindowsBased - if vWindowsBased == nil { - // note: explicitly not the empty object. - vWindowsBased = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedFields(r, vWindowsBased); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vWindowsBased) { - o.WindowsBased = vWindowsBased - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorBasicSliFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSli) error { - vAvailability := o.Availability - if vAvailability == nil { - // note: explicitly not the empty object. - vAvailability = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityFields(r, vAvailability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAvailability) { - o.Availability = vAvailability - } - vLatency := o.Latency - if vLatency == nil { - // note: explicitly not the empty object. - vLatency = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyFields(r, vLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLatency) { - o.Latency = vLatency - } - vOperationAvailability := o.OperationAvailability - if vOperationAvailability == nil { - // note: explicitly not the empty object. - vOperationAvailability = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityFields(r, vOperationAvailability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vOperationAvailability) { - o.OperationAvailability = vOperationAvailability - } - vOperationLatency := o.OperationLatency - if vOperationLatency == nil { - // note: explicitly not the empty object. - vOperationLatency = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyFields(r, vOperationLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vOperationLatency) { - o.OperationLatency = vOperationLatency - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorRequestBased) error { - vGoodTotalRatio := o.GoodTotalRatio - if vGoodTotalRatio == nil { - // note: explicitly not the empty object. - vGoodTotalRatio = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioFields(r, vGoodTotalRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGoodTotalRatio) { - o.GoodTotalRatio = vGoodTotalRatio - } - vDistributionCut := o.DistributionCut - if vDistributionCut == nil { - // note: explicitly not the empty object. - vDistributionCut = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutFields(r, vDistributionCut); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDistributionCut) { - o.DistributionCut = vDistributionCut - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) error { - vRange := o.Range - if vRange == nil { - // note: explicitly not the empty object. - vRange = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeFields(r, vRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRange) { - o.Range = vRange - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) error { - vGoodTotalRatioThreshold := o.GoodTotalRatioThreshold - if vGoodTotalRatioThreshold == nil { - // note: explicitly not the empty object. - vGoodTotalRatioThreshold = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdFields(r, vGoodTotalRatioThreshold); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGoodTotalRatioThreshold) { - o.GoodTotalRatioThreshold = vGoodTotalRatioThreshold - } - vMetricMeanInRange := o.MetricMeanInRange - if vMetricMeanInRange == nil { - // note: explicitly not the empty object. - vMetricMeanInRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeFields(r, vMetricMeanInRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetricMeanInRange) { - o.MetricMeanInRange = vMetricMeanInRange - } - vMetricSumInRange := o.MetricSumInRange - if vMetricSumInRange == nil { - // note: explicitly not the empty object. - vMetricSumInRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeFields(r, vMetricSumInRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetricSumInRange) { - o.MetricSumInRange = vMetricSumInRange - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) error { - vPerformance := o.Performance - if vPerformance == nil { - // note: explicitly not the empty object. - vPerformance = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceFields(r, vPerformance); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPerformance) { - o.Performance = vPerformance - } - vBasicSliPerformance := o.BasicSliPerformance - if vBasicSliPerformance == nil { - // note: explicitly not the empty object. - vBasicSliPerformance = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceFields(r, vBasicSliPerformance); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBasicSliPerformance) { - o.BasicSliPerformance = vBasicSliPerformance - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) error { - vGoodTotalRatio := o.GoodTotalRatio - if vGoodTotalRatio == nil { - // note: explicitly not the empty object. - vGoodTotalRatio = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioFields(r, vGoodTotalRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGoodTotalRatio) { - o.GoodTotalRatio = vGoodTotalRatio - } - vDistributionCut := o.DistributionCut - if vDistributionCut == nil { - // note: explicitly not the empty object. - vDistributionCut = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutFields(r, vDistributionCut); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDistributionCut) { - o.DistributionCut = vDistributionCut - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) error { - vRange := o.Range - if vRange == nil { - // note: explicitly not the empty object. - vRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeFields(r, vRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRange) { - o.Range = vRange - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) error { - vAvailability := o.Availability - if vAvailability == nil { - // note: explicitly not the empty object. - vAvailability = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityFields(r, vAvailability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAvailability) { - o.Availability = vAvailability - } - vLatency := o.Latency - if vLatency == nil { - // note: explicitly not the empty object. - vLatency = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyFields(r, vLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLatency) { - o.Latency = vLatency - } - vOperationAvailability := o.OperationAvailability - if vOperationAvailability == nil { - // note: explicitly not the empty object. - vOperationAvailability = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityFields(r, vOperationAvailability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vOperationAvailability) { - o.OperationAvailability = vOperationAvailability - } - vOperationLatency := o.OperationLatency - if vOperationLatency == nil { - // note: explicitly not the empty object. - vOperationLatency = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyFields(r, vOperationLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vOperationLatency) { - o.OperationLatency = vOperationLatency - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) error { - vRange := o.Range - if vRange == nil { - // note: explicitly not the empty object. - vRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeFields(r, vRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRange) { - o.Range = vRange - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) error { - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) error { - vRange := o.Range - if vRange == nil { - // note: explicitly not the empty object. - vRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeFields(r, vRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRange) { - o.Range = vRange - } - return nil -} -func extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) error { - return nil -} - -func postReadExtractServiceLevelObjectiveFields(r *ServiceLevelObjective) error { - vServiceLevelIndicator := r.ServiceLevelIndicator - if vServiceLevelIndicator == nil { - // note: explicitly not the empty object. - vServiceLevelIndicator = &ServiceLevelObjectiveServiceLevelIndicator{} - } - if err := postReadExtractServiceLevelObjectiveServiceLevelIndicatorFields(r, vServiceLevelIndicator); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vServiceLevelIndicator) { - r.ServiceLevelIndicator = vServiceLevelIndicator - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicator) error { - vBasicSli := o.BasicSli - if vBasicSli == nil { - // note: explicitly not the empty object. - vBasicSli = &ServiceLevelObjectiveServiceLevelIndicatorBasicSli{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliFields(r, vBasicSli); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBasicSli) { - o.BasicSli = vBasicSli - } - vRequestBased := o.RequestBased - if vRequestBased == nil { - // note: explicitly not the empty object. - vRequestBased = &ServiceLevelObjectiveServiceLevelIndicatorRequestBased{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedFields(r, vRequestBased); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRequestBased) { - o.RequestBased = vRequestBased - } - vWindowsBased := o.WindowsBased - if vWindowsBased == nil { - // note: explicitly not the empty object. - vWindowsBased = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBased{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedFields(r, vWindowsBased); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vWindowsBased) { - o.WindowsBased = vWindowsBased - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorBasicSliFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSli) error { - vAvailability := o.Availability - if vAvailability == nil { - // note: explicitly not the empty object. - vAvailability = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityFields(r, vAvailability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAvailability) { - o.Availability = vAvailability - } - vLatency := o.Latency - if vLatency == nil { - // note: explicitly not the empty object. - vLatency = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyFields(r, vLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLatency) { - o.Latency = vLatency - } - vOperationAvailability := o.OperationAvailability - if vOperationAvailability == nil { - // note: explicitly not the empty object. - vOperationAvailability = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityFields(r, vOperationAvailability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vOperationAvailability) { - o.OperationAvailability = vOperationAvailability - } - vOperationLatency := o.OperationLatency - if vOperationLatency == nil { - // note: explicitly not the empty object. - vOperationLatency = &ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyFields(r, vOperationLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vOperationLatency) { - o.OperationLatency = vOperationLatency - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailabilityFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailabilityFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorRequestBasedFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorRequestBased) error { - vGoodTotalRatio := o.GoodTotalRatio - if vGoodTotalRatio == nil { - // note: explicitly not the empty object. - vGoodTotalRatio = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioFields(r, vGoodTotalRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGoodTotalRatio) { - o.GoodTotalRatio = vGoodTotalRatio - } - vDistributionCut := o.DistributionCut - if vDistributionCut == nil { - // note: explicitly not the empty object. - vDistributionCut = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutFields(r, vDistributionCut); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDistributionCut) { - o.DistributionCut = vDistributionCut - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatioFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut) error { - vRange := o.Range - if vRange == nil { - // note: explicitly not the empty object. - vRange = &ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeFields(r, vRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRange) { - o.Range = vRange - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBased) error { - vGoodTotalRatioThreshold := o.GoodTotalRatioThreshold - if vGoodTotalRatioThreshold == nil { - // note: explicitly not the empty object. - vGoodTotalRatioThreshold = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdFields(r, vGoodTotalRatioThreshold); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGoodTotalRatioThreshold) { - o.GoodTotalRatioThreshold = vGoodTotalRatioThreshold - } - vMetricMeanInRange := o.MetricMeanInRange - if vMetricMeanInRange == nil { - // note: explicitly not the empty object. - vMetricMeanInRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeFields(r, vMetricMeanInRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetricMeanInRange) { - o.MetricMeanInRange = vMetricMeanInRange - } - vMetricSumInRange := o.MetricSumInRange - if vMetricSumInRange == nil { - // note: explicitly not the empty object. - vMetricSumInRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeFields(r, vMetricSumInRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMetricSumInRange) { - o.MetricSumInRange = vMetricSumInRange - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold) error { - vPerformance := o.Performance - if vPerformance == nil { - // note: explicitly not the empty object. - vPerformance = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceFields(r, vPerformance); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPerformance) { - o.Performance = vPerformance - } - vBasicSliPerformance := o.BasicSliPerformance - if vBasicSliPerformance == nil { - // note: explicitly not the empty object. - vBasicSliPerformance = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceFields(r, vBasicSliPerformance); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vBasicSliPerformance) { - o.BasicSliPerformance = vBasicSliPerformance - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance) error { - vGoodTotalRatio := o.GoodTotalRatio - if vGoodTotalRatio == nil { - // note: explicitly not the empty object. - vGoodTotalRatio = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioFields(r, vGoodTotalRatio); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGoodTotalRatio) { - o.GoodTotalRatio = vGoodTotalRatio - } - vDistributionCut := o.DistributionCut - if vDistributionCut == nil { - // note: explicitly not the empty object. - vDistributionCut = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutFields(r, vDistributionCut); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDistributionCut) { - o.DistributionCut = vDistributionCut - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatioFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut) error { - vRange := o.Range - if vRange == nil { - // note: explicitly not the empty object. - vRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeFields(r, vRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRange) { - o.Range = vRange - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance) error { - vAvailability := o.Availability - if vAvailability == nil { - // note: explicitly not the empty object. - vAvailability = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityFields(r, vAvailability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAvailability) { - o.Availability = vAvailability - } - vLatency := o.Latency - if vLatency == nil { - // note: explicitly not the empty object. - vLatency = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyFields(r, vLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vLatency) { - o.Latency = vLatency - } - vOperationAvailability := o.OperationAvailability - if vOperationAvailability == nil { - // note: explicitly not the empty object. - vOperationAvailability = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityFields(r, vOperationAvailability); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vOperationAvailability) { - o.OperationAvailability = vOperationAvailability - } - vOperationLatency := o.OperationLatency - if vOperationLatency == nil { - // note: explicitly not the empty object. - vOperationLatency = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyFields(r, vOperationLatency); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vOperationLatency) { - o.OperationLatency = vOperationLatency - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailabilityFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailabilityFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange) error { - vRange := o.Range - if vRange == nil { - // note: explicitly not the empty object. - vRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeFields(r, vRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRange) { - o.Range = vRange - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange) error { - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange) error { - vRange := o.Range - if vRange == nil { - // note: explicitly not the empty object. - vRange = &ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange{} - } - if err := extractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeFields(r, vRange); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRange) { - o.Range = vRange - } - return nil -} -func postReadExtractServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRangeFields(r *ServiceLevelObjective, o *ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_schema.go deleted file mode 100644 index 6a38f1ff47..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_schema.go +++ /dev/null @@ -1,755 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLServiceLevelObjectiveSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/ServiceLevelObjective", - Description: "The Monitoring ServiceLevelObjective resource", - StructName: "ServiceLevelObjective", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a ServiceLevelObjective", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "serviceLevelObjective", - Required: true, - Description: "A full instance of a ServiceLevelObjective", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a ServiceLevelObjective", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "serviceLevelObjective", - Required: true, - Description: "A full instance of a ServiceLevelObjective", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a ServiceLevelObjective", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "serviceLevelObjective", - Required: true, - Description: "A full instance of a ServiceLevelObjective", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all ServiceLevelObjective", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "service", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many ServiceLevelObjective", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "service", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "ServiceLevelObjective": &dcl.Component{ - Title: "ServiceLevelObjective", - ID: "projects/{{project}}/services/{{service}}/serviceLevelObjectives/{{name}}", - ParentContainer: "project", - LabelsField: "userLabels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "goal", - "project", - "service", - }, - Properties: map[string]*dcl.Property{ - "calendarPeriod": &dcl.Property{ - Type: "string", - GoName: "CalendarPeriod", - GoType: "ServiceLevelObjectiveCalendarPeriodEnum", - Description: "A calendar period, semantically \"since the start of the current ``\". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and `MONTH` are supported. Possible values: CALENDAR_PERIOD_UNSPECIFIED, DAY, WEEK, FORTNIGHT, MONTH, QUARTER, HALF, YEAR", - Conflicts: []string{ - "rollingPeriod", - }, - Enum: []string{ - "CALENDAR_PERIOD_UNSPECIFIED", - "DAY", - "WEEK", - "FORTNIGHT", - "MONTH", - "QUARTER", - "HALF", - "YEAR", - }, - }, - "createTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "CreateTime", - ReadOnly: true, - Description: "Time stamp of the `Create` or most recent `Update` command on this `Slo`.", - Immutable: true, - }, - "deleteTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "DeleteTime", - ReadOnly: true, - Description: "Time stamp of the `Update` or `Delete` command that made this no longer a current `Slo`. This field is not populated in `ServiceLevelObjective`s returned from calls to `GetServiceLevelObjective` and `ListServiceLevelObjectives`, because it is always empty in the current version. It is populated in `ServiceLevelObjective`s representing previous versions in the output of `ListServiceLevelObjectiveVersions`. Because all old configuration versions are stored, `Update` operations mark the obsoleted version as deleted.", - Immutable: true, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Name used for UI elements listing this SLO.", - }, - "goal": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Goal", - Description: "The fraction of service that must be good in order for this objective to be met. `0 < goal <= 0.999`.", - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Resource name for this `ServiceLevelObjective`. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]", - Immutable: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "rollingPeriod": &dcl.Property{ - Type: "string", - GoName: "RollingPeriod", - Description: "A rolling time period, semantically \"in the past ``\". Must be an integer multiple of 1 day no larger than 30 days.", - Conflicts: []string{ - "calendarPeriod", - }, - }, - "service": &dcl.Property{ - Type: "string", - GoName: "Service", - Description: "The service for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Monitoring/Service", - Field: "name", - Parent: true, - }, - }, - }, - "serviceLevelIndicator": &dcl.Property{ - Type: "object", - GoName: "ServiceLevelIndicator", - GoType: "ServiceLevelObjectiveServiceLevelIndicator", - Description: "The definition of good service, used to measure and calculate the quality of the `Service`'s performance with respect to a single aspect of service quality.", - Properties: map[string]*dcl.Property{ - "basicSli": &dcl.Property{ - Type: "object", - GoName: "BasicSli", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSli", - Description: "Basic SLI on a well-known service type.", - Conflicts: []string{ - "requestBased", - "windowsBased", - }, - Properties: map[string]*dcl.Property{ - "availability": &dcl.Property{ - Type: "object", - GoName: "Availability", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability", - Description: "Good service is defined to be the count of requests made to this service that return successfully.", - Conflicts: []string{ - "latency", - "operationAvailability", - "operationLatency", - }, - Properties: map[string]*dcl.Property{}, - }, - "latency": &dcl.Property{ - Type: "object", - GoName: "Latency", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency", - Description: "Good service is defined to be the count of requests made to this service that are fast enough with respect to `latency.threshold`.", - Conflicts: []string{ - "availability", - "operationAvailability", - "operationLatency", - }, - Properties: map[string]*dcl.Property{ - "experience": &dcl.Property{ - Type: "string", - GoName: "Experience", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum", - Description: "A description of the experience associated with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, DELIGHTING, SATISFYING, ANNOYING", - Enum: []string{ - "LATENCY_EXPERIENCE_UNSPECIFIED", - "DELIGHTING", - "SATISFYING", - "ANNOYING", - }, - }, - "threshold": &dcl.Property{ - Type: "string", - GoName: "Threshold", - Description: "Good service is defined to be the count of requests made to this service that return in no more than `threshold`.", - }, - }, - }, - "location": &dcl.Property{ - Type: "array", - GoName: "Location", - Description: "OPTIONAL: The set of locations to which this SLI is relevant. Telemetry from other locations will not be used to calculate performance for this SLI. If omitted, this SLI applies to all locations in which the Service has activity. For service types that don't support breaking down by location, setting this field will result in an error.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "method": &dcl.Property{ - Type: "array", - GoName: "Method", - Description: "OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from other methods will not be used to calculate performance for this SLI. If omitted, this SLI applies to all the Service's methods. For service types that don't support breaking down by method, setting this field will result in an error.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "operationAvailability": &dcl.Property{ - Type: "object", - GoName: "OperationAvailability", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability", - Description: "Good service is defined to be the count of operations performed by this service that return successfully", - Conflicts: []string{ - "availability", - "latency", - "operationLatency", - }, - Properties: map[string]*dcl.Property{}, - }, - "operationLatency": &dcl.Property{ - Type: "object", - GoName: "OperationLatency", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency", - Description: "Good service is defined to be the count of operations performed by this service that are fast enough with respect to `operation_latency.threshold`.", - Conflicts: []string{ - "availability", - "latency", - "operationAvailability", - }, - Properties: map[string]*dcl.Property{ - "experience": &dcl.Property{ - Type: "string", - GoName: "Experience", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum", - Description: "A description of the experience associated with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, DELIGHTING, SATISFYING, ANNOYING", - Enum: []string{ - "LATENCY_EXPERIENCE_UNSPECIFIED", - "DELIGHTING", - "SATISFYING", - "ANNOYING", - }, - }, - "threshold": &dcl.Property{ - Type: "string", - GoName: "Threshold", - Description: "Good service is defined to be the count of operations that are completed in no more than `threshold`.", - }, - }, - }, - "version": &dcl.Property{ - Type: "array", - GoName: "Version", - Description: "OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry from other API versions will not be used to calculate performance for this SLI. If omitted, this SLI applies to all API versions. For service types that don't support breaking down by version, setting this field will result in an error.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "requestBased": &dcl.Property{ - Type: "object", - GoName: "RequestBased", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorRequestBased", - Description: "Request-based SLIs", - Conflicts: []string{ - "basicSli", - "windowsBased", - }, - Properties: map[string]*dcl.Property{ - "distributionCut": &dcl.Property{ - Type: "object", - GoName: "DistributionCut", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut", - Description: "`distribution_cut` is used when `good_service` is a count of values aggregated in a `Distribution` that fall into a good range. The `total_service` is the total count of all values aggregated in the `Distribution`.", - Conflicts: []string{ - "goodTotalRatio", - }, - Properties: map[string]*dcl.Property{ - "distributionFilter": &dcl.Property{ - Type: "string", - GoName: "DistributionFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` aggregating values. Must have `ValueType = DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.", - }, - "range": &dcl.Property{ - Type: "object", - GoName: "Range", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange", - Description: "Range of values considered \"good.\" For a one-sided range, set one bound to an infinite value.", - Properties: map[string]*dcl.Property{ - "max": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Max", - Description: "Range maximum.", - }, - "min": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Min", - Description: "Range minimum.", - }, - }, - }, - }, - }, - "goodTotalRatio": &dcl.Property{ - Type: "object", - GoName: "GoodTotalRatio", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio", - Description: "`good_total_ratio` is used when the ratio of `good_service` to `total_service` is computed from two `TimeSeries`.", - Conflicts: []string{ - "distributionCut", - }, - Properties: map[string]*dcl.Property{ - "badServiceFilter": &dcl.Property{ - Type: "string", - GoName: "BadServiceFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying bad service, either demanded service that was not provided or demanded service that was of inadequate quality. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.", - }, - "goodServiceFilter": &dcl.Property{ - Type: "string", - GoName: "GoodServiceFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying good service provided. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.", - }, - "totalServiceFilter": &dcl.Property{ - Type: "string", - GoName: "TotalServiceFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying total demanded service. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.", - }, - }, - }, - }, - }, - "windowsBased": &dcl.Property{ - Type: "object", - GoName: "WindowsBased", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBased", - Description: "Windows-based SLIs", - Conflicts: []string{ - "basicSli", - "requestBased", - }, - Properties: map[string]*dcl.Property{ - "goodBadMetricFilter": &dcl.Property{ - Type: "string", - GoName: "GoodBadMetricFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` with `ValueType = BOOL`. The window is good if any `true` values appear in the window.", - Conflicts: []string{ - "goodTotalRatioThreshold", - "metricMeanInRange", - "metricSumInRange", - }, - }, - "goodTotalRatioThreshold": &dcl.Property{ - Type: "object", - GoName: "GoodTotalRatioThreshold", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold", - Description: "A window is good if its `performance` is high enough.", - Conflicts: []string{ - "goodBadMetricFilter", - "metricMeanInRange", - "metricSumInRange", - }, - Properties: map[string]*dcl.Property{ - "basicSliPerformance": &dcl.Property{ - Type: "object", - GoName: "BasicSliPerformance", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance", - Description: "`BasicSli` to evaluate to judge window quality.", - Conflicts: []string{ - "performance", - }, - Properties: map[string]*dcl.Property{ - "availability": &dcl.Property{ - Type: "object", - GoName: "Availability", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability", - Description: "Good service is defined to be the count of requests made to this service that return successfully.", - Conflicts: []string{ - "latency", - "operationAvailability", - "operationLatency", - }, - Properties: map[string]*dcl.Property{}, - }, - "latency": &dcl.Property{ - Type: "object", - GoName: "Latency", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency", - Description: "Good service is defined to be the count of requests made to this service that are fast enough with respect to `latency.threshold`.", - Conflicts: []string{ - "availability", - "operationAvailability", - "operationLatency", - }, - Properties: map[string]*dcl.Property{ - "experience": &dcl.Property{ - Type: "string", - GoName: "Experience", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum", - Description: "A description of the experience associated with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, DELIGHTING, SATISFYING, ANNOYING", - Enum: []string{ - "LATENCY_EXPERIENCE_UNSPECIFIED", - "DELIGHTING", - "SATISFYING", - "ANNOYING", - }, - }, - "threshold": &dcl.Property{ - Type: "string", - GoName: "Threshold", - Description: "Good service is defined to be the count of requests made to this service that return in no more than `threshold`.", - }, - }, - }, - "location": &dcl.Property{ - Type: "array", - GoName: "Location", - Description: "OPTIONAL: The set of locations to which this SLI is relevant. Telemetry from other locations will not be used to calculate performance for this SLI. If omitted, this SLI applies to all locations in which the Service has activity. For service types that don't support breaking down by location, setting this field will result in an error.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "method": &dcl.Property{ - Type: "array", - GoName: "Method", - Description: "OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from other methods will not be used to calculate performance for this SLI. If omitted, this SLI applies to all the Service's methods. For service types that don't support breaking down by method, setting this field will result in an error.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "operationAvailability": &dcl.Property{ - Type: "object", - GoName: "OperationAvailability", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability", - Description: "Good service is defined to be the count of operations performed by this service that return successfully", - Conflicts: []string{ - "availability", - "latency", - "operationLatency", - }, - Properties: map[string]*dcl.Property{}, - }, - "operationLatency": &dcl.Property{ - Type: "object", - GoName: "OperationLatency", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency", - Description: "Good service is defined to be the count of operations performed by this service that are fast enough with respect to `operation_latency.threshold`.", - Conflicts: []string{ - "availability", - "latency", - "operationAvailability", - }, - Properties: map[string]*dcl.Property{ - "experience": &dcl.Property{ - Type: "string", - GoName: "Experience", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum", - Description: "A description of the experience associated with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED, DELIGHTING, SATISFYING, ANNOYING", - Enum: []string{ - "LATENCY_EXPERIENCE_UNSPECIFIED", - "DELIGHTING", - "SATISFYING", - "ANNOYING", - }, - }, - "threshold": &dcl.Property{ - Type: "string", - GoName: "Threshold", - Description: "Good service is defined to be the count of operations that are completed in no more than `threshold`.", - }, - }, - }, - "version": &dcl.Property{ - Type: "array", - GoName: "Version", - Description: "OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry from other API versions will not be used to calculate performance for this SLI. If omitted, this SLI applies to all API versions. For service types that don't support breaking down by version, setting this field will result in an error.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - }, - }, - "performance": &dcl.Property{ - Type: "object", - GoName: "Performance", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance", - Description: "`RequestBasedSli` to evaluate to judge window quality.", - Conflicts: []string{ - "basicSliPerformance", - }, - Properties: map[string]*dcl.Property{ - "distributionCut": &dcl.Property{ - Type: "object", - GoName: "DistributionCut", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut", - Description: "`distribution_cut` is used when `good_service` is a count of values aggregated in a `Distribution` that fall into a good range. The `total_service` is the total count of all values aggregated in the `Distribution`.", - Conflicts: []string{ - "goodTotalRatio", - }, - Properties: map[string]*dcl.Property{ - "distributionFilter": &dcl.Property{ - Type: "string", - GoName: "DistributionFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` aggregating values. Must have `ValueType = DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.", - }, - "range": &dcl.Property{ - Type: "object", - GoName: "Range", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange", - Description: "Range of values considered \"good.\" For a one-sided range, set one bound to an infinite value.", - Properties: map[string]*dcl.Property{ - "max": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Max", - Description: "Range maximum.", - }, - "min": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Min", - Description: "Range minimum.", - }, - }, - }, - }, - }, - "goodTotalRatio": &dcl.Property{ - Type: "object", - GoName: "GoodTotalRatio", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio", - Description: "`good_total_ratio` is used when the ratio of `good_service` to `total_service` is computed from two `TimeSeries`.", - Conflicts: []string{ - "distributionCut", - }, - Properties: map[string]*dcl.Property{ - "badServiceFilter": &dcl.Property{ - Type: "string", - GoName: "BadServiceFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying bad service, either demanded service that was not provided or demanded service that was of inadequate quality. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.", - }, - "goodServiceFilter": &dcl.Property{ - Type: "string", - GoName: "GoodServiceFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying good service provided. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.", - }, - "totalServiceFilter": &dcl.Property{ - Type: "string", - GoName: "TotalServiceFilter", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying a `TimeSeries` quantifying total demanded service. Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.", - }, - }, - }, - }, - }, - "threshold": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Threshold", - Description: "If window `performance >= threshold`, the window is counted as good.", - }, - }, - }, - "metricMeanInRange": &dcl.Property{ - Type: "object", - GoName: "MetricMeanInRange", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange", - Description: "A window is good if the metric's value is in a good range, averaged across returned streams.", - Conflicts: []string{ - "goodBadMetricFilter", - "goodTotalRatioThreshold", - "metricSumInRange", - }, - Properties: map[string]*dcl.Property{ - "range": &dcl.Property{ - Type: "object", - GoName: "Range", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange", - Description: "Range of values considered \"good.\" For a one-sided range, set one bound to an infinite value.", - Properties: map[string]*dcl.Property{ - "max": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Max", - Description: "Range maximum.", - }, - "min": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Min", - Description: "Range minimum.", - }, - }, - }, - "timeSeries": &dcl.Property{ - Type: "string", - GoName: "TimeSeries", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying the `TimeSeries` to use for evaluating window quality.", - }, - }, - }, - "metricSumInRange": &dcl.Property{ - Type: "object", - GoName: "MetricSumInRange", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange", - Description: "A window is good if the metric's value is in a good range, summed across returned streams.", - Conflicts: []string{ - "goodBadMetricFilter", - "goodTotalRatioThreshold", - "metricMeanInRange", - }, - Properties: map[string]*dcl.Property{ - "range": &dcl.Property{ - Type: "object", - GoName: "Range", - GoType: "ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange", - Description: "Range of values considered \"good.\" For a one-sided range, set one bound to an infinite value.", - Properties: map[string]*dcl.Property{ - "max": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Max", - Description: "Range maximum.", - }, - "min": &dcl.Property{ - Type: "number", - Format: "double", - GoName: "Min", - Description: "Range minimum.", - }, - }, - }, - "timeSeries": &dcl.Property{ - Type: "string", - GoName: "TimeSeries", - Description: "A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) specifying the `TimeSeries` to use for evaluating window quality.", - }, - }, - }, - "windowPeriod": &dcl.Property{ - Type: "string", - GoName: "WindowPeriod", - Description: "Duration over which window quality is evaluated. Must be an integer fraction of a day and at least `60s`.", - }, - }, - }, - }, - }, - "serviceManagementOwned": &dcl.Property{ - Type: "boolean", - GoName: "ServiceManagementOwned", - ReadOnly: true, - Description: "Output only. If set, this SLO is managed at the [Service Management](https://cloud.google.com/service-management/overview) level. Therefore the service yaml file is the source of truth for this SLO, and API `Update` and `Delete` operations are forbidden.", - Immutable: true, - }, - "userLabels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "UserLabels", - Description: "Labels which have been used to annotate the service-level objective. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value.", - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_yaml_embed.go deleted file mode 100644 index d9a8167e3d..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_level_objective_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_service_level_objective blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/service_level_objective.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/service_level_objective.yaml -var YAML_service_level_objective = []byte("info:\n title: Monitoring/ServiceLevelObjective\n description: The Monitoring ServiceLevelObjective resource\n x-dcl-struct-name: ServiceLevelObjective\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a ServiceLevelObjective\n parameters:\n - name: serviceLevelObjective\n required: true\n description: A full instance of a ServiceLevelObjective\n apply:\n description: The function used to apply information about a ServiceLevelObjective\n parameters:\n - name: serviceLevelObjective\n required: true\n description: A full instance of a ServiceLevelObjective\n delete:\n description: The function used to delete a ServiceLevelObjective\n parameters:\n - name: serviceLevelObjective\n required: true\n description: A full instance of a ServiceLevelObjective\n deleteAll:\n description: The function used to delete all ServiceLevelObjective\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: service\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many ServiceLevelObjective\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: service\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n ServiceLevelObjective:\n title: ServiceLevelObjective\n x-dcl-id: projects/{{project}}/services/{{service}}/serviceLevelObjectives/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: userLabels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - goal\n - project\n - service\n properties:\n calendarPeriod:\n type: string\n x-dcl-go-name: CalendarPeriod\n x-dcl-go-type: ServiceLevelObjectiveCalendarPeriodEnum\n description: 'A calendar period, semantically \"since the start of the current\n ``\". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and `MONTH` are supported.\n Possible values: CALENDAR_PERIOD_UNSPECIFIED, DAY, WEEK, FORTNIGHT, MONTH,\n QUARTER, HALF, YEAR'\n x-dcl-conflicts:\n - rollingPeriod\n enum:\n - CALENDAR_PERIOD_UNSPECIFIED\n - DAY\n - WEEK\n - FORTNIGHT\n - MONTH\n - QUARTER\n - HALF\n - YEAR\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Time stamp of the `Create` or most recent `Update` command\n on this `Slo`.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Time stamp of the `Update` or `Delete` command that made this\n no longer a current `Slo`. This field is not populated in `ServiceLevelObjective`s\n returned from calls to `GetServiceLevelObjective` and `ListServiceLevelObjectives`,\n because it is always empty in the current version. It is populated in\n `ServiceLevelObjective`s representing previous versions in the output\n of `ListServiceLevelObjectiveVersions`. Because all old configuration\n versions are stored, `Update` operations mark the obsoleted version as\n deleted.\n x-kubernetes-immutable: true\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Name used for UI elements listing this SLO.\n goal:\n type: number\n format: double\n x-dcl-go-name: Goal\n description: The fraction of service that must be good in order for this\n objective to be met. `0 < goal <= 0.999`.\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Resource name for this `ServiceLevelObjective`. The format\n is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n rollingPeriod:\n type: string\n x-dcl-go-name: RollingPeriod\n description: A rolling time period, semantically \"in the past ``\". Must\n be an integer multiple of 1 day no larger than 30 days.\n x-dcl-conflicts:\n - calendarPeriod\n service:\n type: string\n x-dcl-go-name: Service\n description: The service for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Monitoring/Service\n field: name\n parent: true\n serviceLevelIndicator:\n type: object\n x-dcl-go-name: ServiceLevelIndicator\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicator\n description: The definition of good service, used to measure and calculate\n the quality of the `Service`'s performance with respect to a single aspect\n of service quality.\n properties:\n basicSli:\n type: object\n x-dcl-go-name: BasicSli\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSli\n description: Basic SLI on a well-known service type.\n x-dcl-conflicts:\n - requestBased\n - windowsBased\n properties:\n availability:\n type: object\n x-dcl-go-name: Availability\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliAvailability\n description: Good service is defined to be the count of requests\n made to this service that return successfully.\n x-dcl-conflicts:\n - latency\n - operationAvailability\n - operationLatency\n latency:\n type: object\n x-dcl-go-name: Latency\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatency\n description: Good service is defined to be the count of requests\n made to this service that are fast enough with respect to `latency.threshold`.\n x-dcl-conflicts:\n - availability\n - operationAvailability\n - operationLatency\n properties:\n experience:\n type: string\n x-dcl-go-name: Experience\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliLatencyExperienceEnum\n description: 'A description of the experience associated with\n failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED,\n DELIGHTING, SATISFYING, ANNOYING'\n enum:\n - LATENCY_EXPERIENCE_UNSPECIFIED\n - DELIGHTING\n - SATISFYING\n - ANNOYING\n threshold:\n type: string\n x-dcl-go-name: Threshold\n description: Good service is defined to be the count of requests\n made to this service that return in no more than `threshold`.\n location:\n type: array\n x-dcl-go-name: Location\n description: 'OPTIONAL: The set of locations to which this SLI is\n relevant. Telemetry from other locations will not be used to calculate\n performance for this SLI. If omitted, this SLI applies to all\n locations in which the Service has activity. For service types\n that don''t support breaking down by location, setting this field\n will result in an error.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n method:\n type: array\n x-dcl-go-name: Method\n description: 'OPTIONAL: The set of RPCs to which this SLI is relevant.\n Telemetry from other methods will not be used to calculate performance\n for this SLI. If omitted, this SLI applies to all the Service''s\n methods. For service types that don''t support breaking down by\n method, setting this field will result in an error.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n operationAvailability:\n type: object\n x-dcl-go-name: OperationAvailability\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationAvailability\n description: Good service is defined to be the count of operations\n performed by this service that return successfully\n x-dcl-conflicts:\n - availability\n - latency\n - operationLatency\n operationLatency:\n type: object\n x-dcl-go-name: OperationLatency\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatency\n description: Good service is defined to be the count of operations\n performed by this service that are fast enough with respect to\n `operation_latency.threshold`.\n x-dcl-conflicts:\n - availability\n - latency\n - operationAvailability\n properties:\n experience:\n type: string\n x-dcl-go-name: Experience\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorBasicSliOperationLatencyExperienceEnum\n description: 'A description of the experience associated with\n failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED,\n DELIGHTING, SATISFYING, ANNOYING'\n enum:\n - LATENCY_EXPERIENCE_UNSPECIFIED\n - DELIGHTING\n - SATISFYING\n - ANNOYING\n threshold:\n type: string\n x-dcl-go-name: Threshold\n description: Good service is defined to be the count of operations\n that are completed in no more than `threshold`.\n version:\n type: array\n x-dcl-go-name: Version\n description: 'OPTIONAL: The set of API versions to which this SLI\n is relevant. Telemetry from other API versions will not be used\n to calculate performance for this SLI. If omitted, this SLI applies\n to all API versions. For service types that don''t support breaking\n down by version, setting this field will result in an error.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n requestBased:\n type: object\n x-dcl-go-name: RequestBased\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorRequestBased\n description: Request-based SLIs\n x-dcl-conflicts:\n - basicSli\n - windowsBased\n properties:\n distributionCut:\n type: object\n x-dcl-go-name: DistributionCut\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCut\n description: '`distribution_cut` is used when `good_service` is\n a count of values aggregated in a `Distribution` that fall into\n a good range. The `total_service` is the total count of all values\n aggregated in the `Distribution`.'\n x-dcl-conflicts:\n - goodTotalRatio\n properties:\n distributionFilter:\n type: string\n x-dcl-go-name: DistributionFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` aggregating values. Must have `ValueType\n = DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind =\n CUMULATIVE`.\n range:\n type: object\n x-dcl-go-name: Range\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorRequestBasedDistributionCutRange\n description: Range of values considered \"good.\" For a one-sided\n range, set one bound to an infinite value.\n properties:\n max:\n type: number\n format: double\n x-dcl-go-name: Max\n description: Range maximum.\n min:\n type: number\n format: double\n x-dcl-go-name: Min\n description: Range minimum.\n goodTotalRatio:\n type: object\n x-dcl-go-name: GoodTotalRatio\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorRequestBasedGoodTotalRatio\n description: '`good_total_ratio` is used when the ratio of `good_service`\n to `total_service` is computed from two `TimeSeries`.'\n x-dcl-conflicts:\n - distributionCut\n properties:\n badServiceFilter:\n type: string\n x-dcl-go-name: BadServiceFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` quantifying bad service, either\n demanded service that was not provided or demanded service\n that was of inadequate quality. Must have `ValueType = DOUBLE`\n or `ValueType = INT64` and must have `MetricKind = DELTA`\n or `MetricKind = CUMULATIVE`.\n goodServiceFilter:\n type: string\n x-dcl-go-name: GoodServiceFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` quantifying good service provided.\n Must have `ValueType = DOUBLE` or `ValueType = INT64` and\n must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.\n totalServiceFilter:\n type: string\n x-dcl-go-name: TotalServiceFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` quantifying total demanded service.\n Must have `ValueType = DOUBLE` or `ValueType = INT64` and\n must have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.\n windowsBased:\n type: object\n x-dcl-go-name: WindowsBased\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBased\n description: Windows-based SLIs\n x-dcl-conflicts:\n - basicSli\n - requestBased\n properties:\n goodBadMetricFilter:\n type: string\n x-dcl-go-name: GoodBadMetricFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` with `ValueType = BOOL`. The window\n is good if any `true` values appear in the window.\n x-dcl-conflicts:\n - goodTotalRatioThreshold\n - metricMeanInRange\n - metricSumInRange\n goodTotalRatioThreshold:\n type: object\n x-dcl-go-name: GoodTotalRatioThreshold\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThreshold\n description: A window is good if its `performance` is high enough.\n x-dcl-conflicts:\n - goodBadMetricFilter\n - metricMeanInRange\n - metricSumInRange\n properties:\n basicSliPerformance:\n type: object\n x-dcl-go-name: BasicSliPerformance\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformance\n description: '`BasicSli` to evaluate to judge window quality.'\n x-dcl-conflicts:\n - performance\n properties:\n availability:\n type: object\n x-dcl-go-name: Availability\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceAvailability\n description: Good service is defined to be the count of\n requests made to this service that return successfully.\n x-dcl-conflicts:\n - latency\n - operationAvailability\n - operationLatency\n latency:\n type: object\n x-dcl-go-name: Latency\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatency\n description: Good service is defined to be the count of\n requests made to this service that are fast enough with\n respect to `latency.threshold`.\n x-dcl-conflicts:\n - availability\n - operationAvailability\n - operationLatency\n properties:\n experience:\n type: string\n x-dcl-go-name: Experience\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceLatencyExperienceEnum\n description: 'A description of the experience associated\n with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED,\n DELIGHTING, SATISFYING, ANNOYING'\n enum:\n - LATENCY_EXPERIENCE_UNSPECIFIED\n - DELIGHTING\n - SATISFYING\n - ANNOYING\n threshold:\n type: string\n x-dcl-go-name: Threshold\n description: Good service is defined to be the count\n of requests made to this service that return in no\n more than `threshold`.\n location:\n type: array\n x-dcl-go-name: Location\n description: 'OPTIONAL: The set of locations to which this\n SLI is relevant. Telemetry from other locations will not\n be used to calculate performance for this SLI. If omitted,\n this SLI applies to all locations in which the Service\n has activity. For service types that don''t support breaking\n down by location, setting this field will result in an\n error.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n method:\n type: array\n x-dcl-go-name: Method\n description: 'OPTIONAL: The set of RPCs to which this SLI\n is relevant. Telemetry from other methods will not be\n used to calculate performance for this SLI. If omitted,\n this SLI applies to all the Service''s methods. For service\n types that don''t support breaking down by method, setting\n this field will result in an error.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n operationAvailability:\n type: object\n x-dcl-go-name: OperationAvailability\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationAvailability\n description: Good service is defined to be the count of\n operations performed by this service that return successfully\n x-dcl-conflicts:\n - availability\n - latency\n - operationLatency\n operationLatency:\n type: object\n x-dcl-go-name: OperationLatency\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatency\n description: Good service is defined to be the count of\n operations performed by this service that are fast enough\n with respect to `operation_latency.threshold`.\n x-dcl-conflicts:\n - availability\n - latency\n - operationAvailability\n properties:\n experience:\n type: string\n x-dcl-go-name: Experience\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdBasicSliPerformanceOperationLatencyExperienceEnum\n description: 'A description of the experience associated\n with failing requests. Possible values: LATENCY_EXPERIENCE_UNSPECIFIED,\n DELIGHTING, SATISFYING, ANNOYING'\n enum:\n - LATENCY_EXPERIENCE_UNSPECIFIED\n - DELIGHTING\n - SATISFYING\n - ANNOYING\n threshold:\n type: string\n x-dcl-go-name: Threshold\n description: Good service is defined to be the count\n of operations that are completed in no more than `threshold`.\n version:\n type: array\n x-dcl-go-name: Version\n description: 'OPTIONAL: The set of API versions to which\n this SLI is relevant. Telemetry from other API versions\n will not be used to calculate performance for this SLI.\n If omitted, this SLI applies to all API versions. For\n service types that don''t support breaking down by version,\n setting this field will result in an error.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n performance:\n type: object\n x-dcl-go-name: Performance\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformance\n description: '`RequestBasedSli` to evaluate to judge window\n quality.'\n x-dcl-conflicts:\n - basicSliPerformance\n properties:\n distributionCut:\n type: object\n x-dcl-go-name: DistributionCut\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCut\n description: '`distribution_cut` is used when `good_service`\n is a count of values aggregated in a `Distribution` that\n fall into a good range. The `total_service` is the total\n count of all values aggregated in the `Distribution`.'\n x-dcl-conflicts:\n - goodTotalRatio\n properties:\n distributionFilter:\n type: string\n x-dcl-go-name: DistributionFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` aggregating values. Must\n have `ValueType = DISTRIBUTION` and `MetricKind =\n DELTA` or `MetricKind = CUMULATIVE`.\n range:\n type: object\n x-dcl-go-name: Range\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceDistributionCutRange\n description: Range of values considered \"good.\" For\n a one-sided range, set one bound to an infinite value.\n properties:\n max:\n type: number\n format: double\n x-dcl-go-name: Max\n description: Range maximum.\n min:\n type: number\n format: double\n x-dcl-go-name: Min\n description: Range minimum.\n goodTotalRatio:\n type: object\n x-dcl-go-name: GoodTotalRatio\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedGoodTotalRatioThresholdPerformanceGoodTotalRatio\n description: '`good_total_ratio` is used when the ratio\n of `good_service` to `total_service` is computed from\n two `TimeSeries`.'\n x-dcl-conflicts:\n - distributionCut\n properties:\n badServiceFilter:\n type: string\n x-dcl-go-name: BadServiceFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` quantifying bad service,\n either demanded service that was not provided or demanded\n service that was of inadequate quality. Must have\n `ValueType = DOUBLE` or `ValueType = INT64` and must\n have `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.\n goodServiceFilter:\n type: string\n x-dcl-go-name: GoodServiceFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` quantifying good service\n provided. Must have `ValueType = DOUBLE` or `ValueType\n = INT64` and must have `MetricKind = DELTA` or `MetricKind\n = CUMULATIVE`.\n totalServiceFilter:\n type: string\n x-dcl-go-name: TotalServiceFilter\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying a `TimeSeries` quantifying total demanded\n service. Must have `ValueType = DOUBLE` or `ValueType\n = INT64` and must have `MetricKind = DELTA` or `MetricKind\n = CUMULATIVE`.\n threshold:\n type: number\n format: double\n x-dcl-go-name: Threshold\n description: If window `performance >= threshold`, the window\n is counted as good.\n metricMeanInRange:\n type: object\n x-dcl-go-name: MetricMeanInRange\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRange\n description: A window is good if the metric's value is in a good\n range, averaged across returned streams.\n x-dcl-conflicts:\n - goodBadMetricFilter\n - goodTotalRatioThreshold\n - metricSumInRange\n properties:\n range:\n type: object\n x-dcl-go-name: Range\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricMeanInRangeRange\n description: Range of values considered \"good.\" For a one-sided\n range, set one bound to an infinite value.\n properties:\n max:\n type: number\n format: double\n x-dcl-go-name: Max\n description: Range maximum.\n min:\n type: number\n format: double\n x-dcl-go-name: Min\n description: Range minimum.\n timeSeries:\n type: string\n x-dcl-go-name: TimeSeries\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying the `TimeSeries` to use for evaluating window quality.\n metricSumInRange:\n type: object\n x-dcl-go-name: MetricSumInRange\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRange\n description: A window is good if the metric's value is in a good\n range, summed across returned streams.\n x-dcl-conflicts:\n - goodBadMetricFilter\n - goodTotalRatioThreshold\n - metricMeanInRange\n properties:\n range:\n type: object\n x-dcl-go-name: Range\n x-dcl-go-type: ServiceLevelObjectiveServiceLevelIndicatorWindowsBasedMetricSumInRangeRange\n description: Range of values considered \"good.\" For a one-sided\n range, set one bound to an infinite value.\n properties:\n max:\n type: number\n format: double\n x-dcl-go-name: Max\n description: Range maximum.\n min:\n type: number\n format: double\n x-dcl-go-name: Min\n description: Range minimum.\n timeSeries:\n type: string\n x-dcl-go-name: TimeSeries\n description: A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)\n specifying the `TimeSeries` to use for evaluating window quality.\n windowPeriod:\n type: string\n x-dcl-go-name: WindowPeriod\n description: Duration over which window quality is evaluated. Must\n be an integer fraction of a day and at least `60s`.\n serviceManagementOwned:\n type: boolean\n x-dcl-go-name: ServiceManagementOwned\n readOnly: true\n description: Output only. If set, this SLO is managed at the [Service Management](https://cloud.google.com/service-management/overview)\n level. Therefore the service yaml file is the source of truth for this\n SLO, and API `Update` and `Delete` operations are forbidden.\n x-kubernetes-immutable: true\n userLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: UserLabels\n description: Labels which have been used to annotate the service-level objective.\n Label keys must start with a letter. Label keys and values may contain\n lowercase letters, numbers, underscores, and dashes. Label keys and values\n have a maximum length of 63 characters, and must be less than 128 bytes\n in size. Up to 64 label entries may be stored. For labels which do not\n have a semantic value, the empty string may be supplied for the label\n value.\n") - -// 35986 bytes -// MD5: 5b8759abf4bffde4575272089d73685b diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_schema.go deleted file mode 100644 index 8134ac4b88..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_schema.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLServiceSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/Service", - Description: "The Monitoring Service resource", - StructName: "Service", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a Service", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "service", - Required: true, - Description: "A full instance of a Service", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a Service", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "service", - Required: true, - Description: "A full instance of a Service", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a Service", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "service", - Required: true, - Description: "A full instance of a Service", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all Service", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many Service", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "Service": &dcl.Component{ - Title: "Service", - ID: "projects/{{project}}/services/{{name}}", - ParentContainer: "project", - LabelsField: "userLabels", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "project", - }, - Properties: map[string]*dcl.Property{ - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "Name used for UI elements listing this Service.", - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Resource name for this Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]", - Immutable: true, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "telemetry": &dcl.Property{ - Type: "object", - GoName: "Telemetry", - GoType: "ServiceTelemetry", - Description: "Configuration for how to query telemetry on a Service.", - Properties: map[string]*dcl.Property{ - "resourceName": &dcl.Property{ - Type: "string", - GoName: "ResourceName", - Description: "The full name of the resource that defines this service. Formatted as described in https://cloud.google.com/apis/design/resource_names.", - }, - }, - }, - "userLabels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "UserLabels", - Description: "Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value.", - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_yaml_embed.go deleted file mode 100644 index 4049fe9e4f..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/service_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_service blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/service.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/service.yaml -var YAML_service = []byte("info:\n title: Monitoring/Service\n description: The Monitoring Service resource\n x-dcl-struct-name: Service\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a Service\n parameters:\n - name: service\n required: true\n description: A full instance of a Service\n apply:\n description: The function used to apply information about a Service\n parameters:\n - name: service\n required: true\n description: A full instance of a Service\n delete:\n description: The function used to delete a Service\n parameters:\n - name: service\n required: true\n description: A full instance of a Service\n deleteAll:\n description: The function used to delete all Service\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many Service\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n Service:\n title: Service\n x-dcl-id: projects/{{project}}/services/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: userLabels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - project\n properties:\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: Name used for UI elements listing this Service.\n name:\n type: string\n x-dcl-go-name: Name\n description: 'Resource name for this Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]'\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n telemetry:\n type: object\n x-dcl-go-name: Telemetry\n x-dcl-go-type: ServiceTelemetry\n description: Configuration for how to query telemetry on a Service.\n properties:\n resourceName:\n type: string\n x-dcl-go-name: ResourceName\n description: The full name of the resource that defines this service.\n Formatted as described in https://cloud.google.com/apis/design/resource_names.\n userLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: UserLabels\n description: Labels which have been used to annotate the service. Label\n keys must start with a letter. Label keys and values may contain lowercase\n letters, numbers, underscores, and dashes. Label keys and values have\n a maximum length of 63 characters, and must be less than 128 bytes in\n size. Up to 64 label entries may be stored. For labels which do not have\n a semantic value, the empty string may be supplied for the label value.\n") - -// 3227 bytes -// MD5: 53c127549fe3c40be3319b17193e3d5c diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.go deleted file mode 100644 index d01d9e112f..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.go +++ /dev/null @@ -1,807 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type UptimeCheckConfig struct { - Name *string `json:"name"` - DisplayName *string `json:"displayName"` - MonitoredResource *UptimeCheckConfigMonitoredResource `json:"monitoredResource"` - ResourceGroup *UptimeCheckConfigResourceGroup `json:"resourceGroup"` - HttpCheck *UptimeCheckConfigHttpCheck `json:"httpCheck"` - TcpCheck *UptimeCheckConfigTcpCheck `json:"tcpCheck"` - Period *string `json:"period"` - Timeout *string `json:"timeout"` - ContentMatchers []UptimeCheckConfigContentMatchers `json:"contentMatchers"` - SelectedRegions []string `json:"selectedRegions"` - Project *string `json:"project"` -} - -func (r *UptimeCheckConfig) String() string { - return dcl.SprintResource(r) -} - -// The enum UptimeCheckConfigResourceGroupResourceTypeEnum. -type UptimeCheckConfigResourceGroupResourceTypeEnum string - -// UptimeCheckConfigResourceGroupResourceTypeEnumRef returns a *UptimeCheckConfigResourceGroupResourceTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func UptimeCheckConfigResourceGroupResourceTypeEnumRef(s string) *UptimeCheckConfigResourceGroupResourceTypeEnum { - v := UptimeCheckConfigResourceGroupResourceTypeEnum(s) - return &v -} - -func (v UptimeCheckConfigResourceGroupResourceTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"RESOURCE_TYPE_UNSPECIFIED", "INSTANCE", "AWS_ELB_LOAD_BALANCER"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "UptimeCheckConfigResourceGroupResourceTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum UptimeCheckConfigHttpCheckRequestMethodEnum. -type UptimeCheckConfigHttpCheckRequestMethodEnum string - -// UptimeCheckConfigHttpCheckRequestMethodEnumRef returns a *UptimeCheckConfigHttpCheckRequestMethodEnum with the value of string s -// If the empty string is provided, nil is returned. -func UptimeCheckConfigHttpCheckRequestMethodEnumRef(s string) *UptimeCheckConfigHttpCheckRequestMethodEnum { - v := UptimeCheckConfigHttpCheckRequestMethodEnum(s) - return &v -} - -func (v UptimeCheckConfigHttpCheckRequestMethodEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"METHOD_UNSPECIFIED", "GET", "POST"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "UptimeCheckConfigHttpCheckRequestMethodEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum UptimeCheckConfigHttpCheckContentTypeEnum. -type UptimeCheckConfigHttpCheckContentTypeEnum string - -// UptimeCheckConfigHttpCheckContentTypeEnumRef returns a *UptimeCheckConfigHttpCheckContentTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func UptimeCheckConfigHttpCheckContentTypeEnumRef(s string) *UptimeCheckConfigHttpCheckContentTypeEnum { - v := UptimeCheckConfigHttpCheckContentTypeEnum(s) - return &v -} - -func (v UptimeCheckConfigHttpCheckContentTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"TYPE_UNSPECIFIED", "URL_ENCODED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "UptimeCheckConfigHttpCheckContentTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum UptimeCheckConfigContentMatchersMatcherEnum. -type UptimeCheckConfigContentMatchersMatcherEnum string - -// UptimeCheckConfigContentMatchersMatcherEnumRef returns a *UptimeCheckConfigContentMatchersMatcherEnum with the value of string s -// If the empty string is provided, nil is returned. -func UptimeCheckConfigContentMatchersMatcherEnumRef(s string) *UptimeCheckConfigContentMatchersMatcherEnum { - v := UptimeCheckConfigContentMatchersMatcherEnum(s) - return &v -} - -func (v UptimeCheckConfigContentMatchersMatcherEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"CONTENT_MATCHER_OPTION_UNSPECIFIED", "CONTAINS_STRING", "NOT_CONTAINS_STRING", "MATCHES_REGEX", "NOT_MATCHES_REGEX"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "UptimeCheckConfigContentMatchersMatcherEnum", - Value: string(v), - Valid: []string{}, - } -} - -type UptimeCheckConfigMonitoredResource struct { - empty bool `json:"-"` - Type *string `json:"type"` - FilterLabels map[string]string `json:"filterLabels"` -} - -type jsonUptimeCheckConfigMonitoredResource UptimeCheckConfigMonitoredResource - -func (r *UptimeCheckConfigMonitoredResource) UnmarshalJSON(data []byte) error { - var res jsonUptimeCheckConfigMonitoredResource - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyUptimeCheckConfigMonitoredResource - } else { - - r.Type = res.Type - - r.FilterLabels = res.FilterLabels - - } - return nil -} - -// This object is used to assert a desired state where this UptimeCheckConfigMonitoredResource is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyUptimeCheckConfigMonitoredResource *UptimeCheckConfigMonitoredResource = &UptimeCheckConfigMonitoredResource{empty: true} - -func (r *UptimeCheckConfigMonitoredResource) Empty() bool { - return r.empty -} - -func (r *UptimeCheckConfigMonitoredResource) String() string { - return dcl.SprintResource(r) -} - -func (r *UptimeCheckConfigMonitoredResource) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type UptimeCheckConfigResourceGroup struct { - empty bool `json:"-"` - GroupId *string `json:"groupId"` - ResourceType *UptimeCheckConfigResourceGroupResourceTypeEnum `json:"resourceType"` -} - -type jsonUptimeCheckConfigResourceGroup UptimeCheckConfigResourceGroup - -func (r *UptimeCheckConfigResourceGroup) UnmarshalJSON(data []byte) error { - var res jsonUptimeCheckConfigResourceGroup - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyUptimeCheckConfigResourceGroup - } else { - - r.GroupId = res.GroupId - - r.ResourceType = res.ResourceType - - } - return nil -} - -// This object is used to assert a desired state where this UptimeCheckConfigResourceGroup is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyUptimeCheckConfigResourceGroup *UptimeCheckConfigResourceGroup = &UptimeCheckConfigResourceGroup{empty: true} - -func (r *UptimeCheckConfigResourceGroup) Empty() bool { - return r.empty -} - -func (r *UptimeCheckConfigResourceGroup) String() string { - return dcl.SprintResource(r) -} - -func (r *UptimeCheckConfigResourceGroup) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type UptimeCheckConfigHttpCheck struct { - empty bool `json:"-"` - RequestMethod *UptimeCheckConfigHttpCheckRequestMethodEnum `json:"requestMethod"` - UseSsl *bool `json:"useSsl"` - Path *string `json:"path"` - Port *int64 `json:"port"` - AuthInfo *UptimeCheckConfigHttpCheckAuthInfo `json:"authInfo"` - MaskHeaders *bool `json:"maskHeaders"` - Headers map[string]string `json:"headers"` - ContentType *UptimeCheckConfigHttpCheckContentTypeEnum `json:"contentType"` - ValidateSsl *bool `json:"validateSsl"` - Body *string `json:"body"` -} - -type jsonUptimeCheckConfigHttpCheck UptimeCheckConfigHttpCheck - -func (r *UptimeCheckConfigHttpCheck) UnmarshalJSON(data []byte) error { - var res jsonUptimeCheckConfigHttpCheck - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyUptimeCheckConfigHttpCheck - } else { - - r.RequestMethod = res.RequestMethod - - r.UseSsl = res.UseSsl - - r.Path = res.Path - - r.Port = res.Port - - r.AuthInfo = res.AuthInfo - - r.MaskHeaders = res.MaskHeaders - - r.Headers = res.Headers - - r.ContentType = res.ContentType - - r.ValidateSsl = res.ValidateSsl - - r.Body = res.Body - - } - return nil -} - -// This object is used to assert a desired state where this UptimeCheckConfigHttpCheck is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyUptimeCheckConfigHttpCheck *UptimeCheckConfigHttpCheck = &UptimeCheckConfigHttpCheck{empty: true} - -func (r *UptimeCheckConfigHttpCheck) Empty() bool { - return r.empty -} - -func (r *UptimeCheckConfigHttpCheck) String() string { - return dcl.SprintResource(r) -} - -func (r *UptimeCheckConfigHttpCheck) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type UptimeCheckConfigHttpCheckAuthInfo struct { - empty bool `json:"-"` - Username *string `json:"username"` - Password *string `json:"password"` -} - -type jsonUptimeCheckConfigHttpCheckAuthInfo UptimeCheckConfigHttpCheckAuthInfo - -func (r *UptimeCheckConfigHttpCheckAuthInfo) UnmarshalJSON(data []byte) error { - var res jsonUptimeCheckConfigHttpCheckAuthInfo - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyUptimeCheckConfigHttpCheckAuthInfo - } else { - - r.Username = res.Username - - r.Password = res.Password - - } - return nil -} - -// This object is used to assert a desired state where this UptimeCheckConfigHttpCheckAuthInfo is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyUptimeCheckConfigHttpCheckAuthInfo *UptimeCheckConfigHttpCheckAuthInfo = &UptimeCheckConfigHttpCheckAuthInfo{empty: true} - -func (r *UptimeCheckConfigHttpCheckAuthInfo) Empty() bool { - return r.empty -} - -func (r *UptimeCheckConfigHttpCheckAuthInfo) String() string { - return dcl.SprintResource(r) -} - -func (r *UptimeCheckConfigHttpCheckAuthInfo) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type UptimeCheckConfigTcpCheck struct { - empty bool `json:"-"` - Port *int64 `json:"port"` -} - -type jsonUptimeCheckConfigTcpCheck UptimeCheckConfigTcpCheck - -func (r *UptimeCheckConfigTcpCheck) UnmarshalJSON(data []byte) error { - var res jsonUptimeCheckConfigTcpCheck - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyUptimeCheckConfigTcpCheck - } else { - - r.Port = res.Port - - } - return nil -} - -// This object is used to assert a desired state where this UptimeCheckConfigTcpCheck is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyUptimeCheckConfigTcpCheck *UptimeCheckConfigTcpCheck = &UptimeCheckConfigTcpCheck{empty: true} - -func (r *UptimeCheckConfigTcpCheck) Empty() bool { - return r.empty -} - -func (r *UptimeCheckConfigTcpCheck) String() string { - return dcl.SprintResource(r) -} - -func (r *UptimeCheckConfigTcpCheck) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type UptimeCheckConfigContentMatchers struct { - empty bool `json:"-"` - Content *string `json:"content"` - Matcher *UptimeCheckConfigContentMatchersMatcherEnum `json:"matcher"` -} - -type jsonUptimeCheckConfigContentMatchers UptimeCheckConfigContentMatchers - -func (r *UptimeCheckConfigContentMatchers) UnmarshalJSON(data []byte) error { - var res jsonUptimeCheckConfigContentMatchers - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyUptimeCheckConfigContentMatchers - } else { - - r.Content = res.Content - - r.Matcher = res.Matcher - - } - return nil -} - -// This object is used to assert a desired state where this UptimeCheckConfigContentMatchers is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyUptimeCheckConfigContentMatchers *UptimeCheckConfigContentMatchers = &UptimeCheckConfigContentMatchers{empty: true} - -func (r *UptimeCheckConfigContentMatchers) Empty() bool { - return r.empty -} - -func (r *UptimeCheckConfigContentMatchers) String() string { - return dcl.SprintResource(r) -} - -func (r *UptimeCheckConfigContentMatchers) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *UptimeCheckConfig) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "monitoring", - Type: "UptimeCheckConfig", - Version: "monitoring", - } -} - -func (r *UptimeCheckConfig) ID() (string, error) { - if err := extractUptimeCheckConfigFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "display_name": dcl.ValueOrEmptyString(nr.DisplayName), - "monitored_resource": dcl.ValueOrEmptyString(nr.MonitoredResource), - "resource_group": dcl.ValueOrEmptyString(nr.ResourceGroup), - "http_check": dcl.ValueOrEmptyString(nr.HttpCheck), - "tcp_check": dcl.ValueOrEmptyString(nr.TcpCheck), - "period": dcl.ValueOrEmptyString(nr.Period), - "timeout": dcl.ValueOrEmptyString(nr.Timeout), - "content_matchers": dcl.ValueOrEmptyString(nr.ContentMatchers), - "selected_regions": dcl.ValueOrEmptyString(nr.SelectedRegions), - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.Nprintf("projects/{{project}}/uptimeCheckConfigs/{{name}}", params), nil -} - -const UptimeCheckConfigMaxPage = -1 - -type UptimeCheckConfigList struct { - Items []*UptimeCheckConfig - - nextToken string - - pageSize int32 - - resource *UptimeCheckConfig -} - -func (l *UptimeCheckConfigList) HasNext() bool { - return l.nextToken != "" -} - -func (l *UptimeCheckConfigList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listUptimeCheckConfig(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListUptimeCheckConfig(ctx context.Context, project string) (*UptimeCheckConfigList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListUptimeCheckConfigWithMaxResults(ctx, project, UptimeCheckConfigMaxPage) - -} - -func (c *Client) ListUptimeCheckConfigWithMaxResults(ctx context.Context, project string, pageSize int32) (*UptimeCheckConfigList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &UptimeCheckConfig{ - Project: &project, - } - items, token, err := c.listUptimeCheckConfig(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &UptimeCheckConfigList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetUptimeCheckConfig(ctx context.Context, r *UptimeCheckConfig) (*UptimeCheckConfig, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractUptimeCheckConfigFields(r) - - b, err := c.getUptimeCheckConfigRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalUptimeCheckConfig(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Name = r.Name - if dcl.IsZeroValue(result.Period) { - result.Period = dcl.String("60s") - } - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeUptimeCheckConfigNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractUptimeCheckConfigFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteUptimeCheckConfig(ctx context.Context, r *UptimeCheckConfig) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("UptimeCheckConfig resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting UptimeCheckConfig...") - deleteOp := deleteUptimeCheckConfigOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllUptimeCheckConfig deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllUptimeCheckConfig(ctx context.Context, project string, filter func(*UptimeCheckConfig) bool) error { - listObj, err := c.ListUptimeCheckConfig(ctx, project) - if err != nil { - return err - } - - err = c.deleteAllUptimeCheckConfig(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllUptimeCheckConfig(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyUptimeCheckConfig(ctx context.Context, rawDesired *UptimeCheckConfig, opts ...dcl.ApplyOption) (*UptimeCheckConfig, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *UptimeCheckConfig - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyUptimeCheckConfigHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyUptimeCheckConfigHelper(c *Client, ctx context.Context, rawDesired *UptimeCheckConfig, opts ...dcl.ApplyOption) (*UptimeCheckConfig, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyUptimeCheckConfig...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractUptimeCheckConfigFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.uptimeCheckConfigDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToUptimeCheckConfigDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []uptimeCheckConfigApiOperation - if create { - ops = append(ops, &createUptimeCheckConfigOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyUptimeCheckConfigDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyUptimeCheckConfigDiff(c *Client, ctx context.Context, desired *UptimeCheckConfig, rawDesired *UptimeCheckConfig, ops []uptimeCheckConfigApiOperation, opts ...dcl.ApplyOption) (*UptimeCheckConfig, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetUptimeCheckConfig(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createUptimeCheckConfigOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapUptimeCheckConfig(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeUptimeCheckConfigNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeUptimeCheckConfigNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeUptimeCheckConfigDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractUptimeCheckConfigFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractUptimeCheckConfigFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffUptimeCheckConfig(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.yaml deleted file mode 100644 index 5bcc3b41db..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config.yaml +++ /dev/null @@ -1,332 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: Monitoring/UptimeCheckConfig - description: The Monitoring UptimeCheckConfig resource - x-dcl-struct-name: UptimeCheckConfig - x-dcl-has-iam: false -paths: - get: - description: The function used to get information about a UptimeCheckConfig - parameters: - - name: uptimeCheckConfig - required: true - description: A full instance of a UptimeCheckConfig - apply: - description: The function used to apply information about a UptimeCheckConfig - parameters: - - name: uptimeCheckConfig - required: true - description: A full instance of a UptimeCheckConfig - delete: - description: The function used to delete a UptimeCheckConfig - parameters: - - name: uptimeCheckConfig - required: true - description: A full instance of a UptimeCheckConfig - deleteAll: - description: The function used to delete all UptimeCheckConfig - parameters: - - name: project - required: true - schema: - type: string - list: - description: The function used to list information about many UptimeCheckConfig - parameters: - - name: project - required: true - schema: - type: string -components: - schemas: - UptimeCheckConfig: - title: UptimeCheckConfig - x-dcl-id: projects/{{project}}/uptimeCheckConfigs/{{name}} - x-dcl-uses-state-hint: true - x-dcl-parent-container: project - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - displayName - - timeout - properties: - contentMatchers: - type: array - x-dcl-go-name: ContentMatchers - description: The content that is expected to appear in the data returned - by the target server against which the check is run. Currently, only - the first entry in the `content_matchers` list is supported, and additional - entries will be ignored. This field is optional and should only be specified - if a content match is required as part of the/ Uptime check. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: UptimeCheckConfigContentMatchers - required: - - content - properties: - content: - type: string - x-dcl-go-name: Content - matcher: - type: string - x-dcl-go-name: Matcher - x-dcl-go-type: UptimeCheckConfigContentMatchersMatcherEnum - description: ' Possible values: CONTENT_MATCHER_OPTION_UNSPECIFIED, - CONTAINS_STRING, NOT_CONTAINS_STRING, MATCHES_REGEX, NOT_MATCHES_REGEX' - default: CONTAINS_STRING - enum: - - CONTENT_MATCHER_OPTION_UNSPECIFIED - - CONTAINS_STRING - - NOT_CONTAINS_STRING - - MATCHES_REGEX - - NOT_MATCHES_REGEX - displayName: - type: string - x-dcl-go-name: DisplayName - description: A human-friendly name for the Uptime check configuration. The - display name should be unique within a Stackdriver Workspace in order - to make it easier to identify; however, uniqueness is not enforced. Required. - httpCheck: - type: object - x-dcl-go-name: HttpCheck - x-dcl-go-type: UptimeCheckConfigHttpCheck - description: Contains information needed to make an HTTP or HTTPS check. - x-dcl-conflicts: - - tcpCheck - properties: - authInfo: - type: object - x-dcl-go-name: AuthInfo - x-dcl-go-type: UptimeCheckConfigHttpCheckAuthInfo - description: The authentication information. Optional when creating - an HTTP check; defaults to empty. - required: - - username - - password - properties: - password: - type: string - x-dcl-go-name: Password - x-dcl-sensitive: true - x-dcl-mutable-unreadable: true - username: - type: string - x-dcl-go-name: Username - body: - type: string - x-dcl-go-name: Body - description: 'The request body associated with the HTTP POST request. - If `content_type` is `URL_ENCODED`, the body passed in must be URL-encoded. - Users can provide a `Content-Length` header via the `headers` field - or the API will do so. If the `request_method` is `GET` and `body` - is not empty, the API will return an error. The maximum byte size - is 1 megabyte. Note: As with all `bytes` fields JSON representations - are base64 encoded. e.g.: "foo=bar" in URL-encoded form is "foo%3Dbar" - and in base64 encoding is "Zm9vJTI1M0RiYXI=".' - contentType: - type: string - x-dcl-go-name: ContentType - x-dcl-go-type: UptimeCheckConfigHttpCheckContentTypeEnum - description: 'The content type to use for the check. Possible values: - TYPE_UNSPECIFIED, URL_ENCODED' - x-kubernetes-immutable: true - enum: - - TYPE_UNSPECIFIED - - URL_ENCODED - headers: - type: object - additionalProperties: - type: string - x-dcl-go-name: Headers - description: The list of headers to send as part of the Uptime check - request. If two headers have the same key and different values, they - should be entered as a single header, with the value being a comma-separated - list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt - (page 31). Entering two separate headers with the same key in a Create - call will cause the first to be overwritten by the second. The maximum - number of headers allowed is 100. - x-dcl-server-default: true - x-dcl-mutable-unreadable: true - maskHeaders: - type: boolean - x-dcl-go-name: MaskHeaders - description: Boolean specifying whether to encrypt the header information. - Encryption should be specified for any headers related to authentication - that you do not wish to be seen when retrieving the configuration. - The server will be responsible for encrypting the headers. On Get/List - calls, if `mask_headers` is set to `true` then the headers will be - obscured with `******.` - x-kubernetes-immutable: true - path: - type: string - x-dcl-go-name: Path - description: Optional (defaults to "/"). The path to the page against - which to run the check. Will be combined with the `host` (specified - within the `monitored_resource`) and `port` to construct the full - URL. If the provided path does not begin with "/", a "/" will be prepended - automatically. - default: / - port: - type: integer - format: int64 - x-dcl-go-name: Port - description: Optional (defaults to 80 when `use_ssl` is `false`, and - 443 when `use_ssl` is `true`). The TCP port on the HTTP server against - which to run the check. Will be combined with host (specified within - the `monitored_resource`) and `path` to construct the full URL. - x-dcl-server-default: true - requestMethod: - type: string - x-dcl-go-name: RequestMethod - x-dcl-go-type: UptimeCheckConfigHttpCheckRequestMethodEnum - description: The HTTP request method to use for the check. If set to - `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`. - x-kubernetes-immutable: true - default: GET - enum: - - METHOD_UNSPECIFIED - - GET - - POST - useSsl: - type: boolean - x-dcl-go-name: UseSsl - description: If `true`, use HTTPS instead of HTTP to run the check. - validateSsl: - type: boolean - x-dcl-go-name: ValidateSsl - description: Boolean specifying whether to include SSL certificate validation - as a part of the Uptime check. Only applies to checks where `monitored_resource` - is set to `uptime_url`. If `use_ssl` is `false`, setting `validate_ssl` - to `true` has no effect. - monitoredResource: - type: object - x-dcl-go-name: MonitoredResource - x-dcl-go-type: UptimeCheckConfigMonitoredResource - description: 'The [monitored resource](https://cloud.google.com/monitoring/api/resources) - associated with the configuration. The following monitored resource types - are supported for Uptime checks: `uptime_url`, `gce_instance`, `gae_app`, `aws_ec2_instance`, `aws_elb_load_balancer`' - x-kubernetes-immutable: true - x-dcl-conflicts: - - resourceGroup - required: - - type - - filterLabels - properties: - filterLabels: - type: object - additionalProperties: - type: string - x-dcl-go-name: FilterLabels - x-kubernetes-immutable: true - type: - type: string - x-dcl-go-name: Type - x-kubernetes-immutable: true - name: - type: string - x-dcl-go-name: Name - description: 'A unique resource name for this Uptime check configuration. - The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] - This field should be omitted when creating the Uptime check configuration; - on create, the resource name is assigned by the server and included in - the response.' - x-kubernetes-immutable: true - x-dcl-server-generated-parameter: true - period: - type: string - x-dcl-go-name: Period - description: How often, in seconds, the Uptime check is performed. Currently, - the only supported values are `60s` (1 minute), `300s` (5 minutes), `600s` - (10 minutes), and `900s` (15 minutes). Optional, defaults to `60s`. - default: 60s - project: - type: string - x-dcl-go-name: Project - description: The project for this uptime check config. - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - resourceGroup: - type: object - x-dcl-go-name: ResourceGroup - x-dcl-go-type: UptimeCheckConfigResourceGroup - description: The group resource associated with the configuration. - x-kubernetes-immutable: true - x-dcl-conflicts: - - monitoredResource - properties: - groupId: - type: string - x-dcl-go-name: GroupId - description: The group of resources being monitored. Should be only - the `[GROUP_ID]`, and not the full-path `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`. - x-kubernetes-immutable: true - x-dcl-references: - - resource: Monitoring/Group - field: name - resourceType: - type: string - x-dcl-go-name: ResourceType - x-dcl-go-type: UptimeCheckConfigResourceGroupResourceTypeEnum - description: 'The resource type of the group members. Possible values: - RESOURCE_TYPE_UNSPECIFIED, INSTANCE, AWS_ELB_LOAD_BALANCER' - x-kubernetes-immutable: true - enum: - - RESOURCE_TYPE_UNSPECIFIED - - INSTANCE - - AWS_ELB_LOAD_BALANCER - selectedRegions: - type: array - x-dcl-go-name: SelectedRegions - description: The list of regions from which the check will be run. Some - regions contain one location, and others contain more than one. If this - field is specified, enough regions must be provided to include a minimum - of 3 locations. Not specifying this field will result in Uptime checks - running from all available regions. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - tcpCheck: - type: object - x-dcl-go-name: TcpCheck - x-dcl-go-type: UptimeCheckConfigTcpCheck - description: Contains information needed to make a TCP check. - x-dcl-conflicts: - - httpCheck - required: - - port - properties: - port: - type: integer - format: int64 - x-dcl-go-name: Port - description: The TCP port on the server against which to run the check. - Will be combined with host (specified within the `monitored_resource`) - to construct the full URL. Required. - timeout: - type: string - x-dcl-go-name: Timeout - description: The maximum amount of time to wait for the request to complete - (must be between 1 and 60 seconds). Required. diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_internal.go deleted file mode 100644 index 1f7dcf942f..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_internal.go +++ /dev/null @@ -1,3200 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *UptimeCheckConfig) validate() error { - - if err := dcl.ValidateExactlyOneOfFieldsSet([]string{"MonitoredResource", "ResourceGroup"}, r.MonitoredResource, r.ResourceGroup); err != nil { - return err - } - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"HttpCheck", "TcpCheck"}, r.HttpCheck, r.TcpCheck); err != nil { - return err - } - if err := dcl.Required(r, "displayName"); err != nil { - return err - } - if err := dcl.Required(r, "timeout"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.MonitoredResource) { - if err := r.MonitoredResource.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.ResourceGroup) { - if err := r.ResourceGroup.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.HttpCheck) { - if err := r.HttpCheck.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.TcpCheck) { - if err := r.TcpCheck.validate(); err != nil { - return err - } - } - return nil -} -func (r *UptimeCheckConfigMonitoredResource) validate() error { - if err := dcl.Required(r, "type"); err != nil { - return err - } - if err := dcl.Required(r, "filterLabels"); err != nil { - return err - } - return nil -} -func (r *UptimeCheckConfigResourceGroup) validate() error { - return nil -} -func (r *UptimeCheckConfigHttpCheck) validate() error { - if !dcl.IsEmptyValueIndirect(r.AuthInfo) { - if err := r.AuthInfo.validate(); err != nil { - return err - } - } - return nil -} -func (r *UptimeCheckConfigHttpCheckAuthInfo) validate() error { - if err := dcl.Required(r, "username"); err != nil { - return err - } - if err := dcl.Required(r, "password"); err != nil { - return err - } - return nil -} -func (r *UptimeCheckConfigTcpCheck) validate() error { - if err := dcl.Required(r, "port"); err != nil { - return err - } - return nil -} -func (r *UptimeCheckConfigContentMatchers) validate() error { - if err := dcl.Required(r, "content"); err != nil { - return err - } - return nil -} -func (r *UptimeCheckConfig) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://monitoring.googleapis.com/v3/", params) -} - -func (r *UptimeCheckConfig) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/uptimeCheckConfigs/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *UptimeCheckConfig) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/uptimeCheckConfigs", nr.basePath(), userBasePath, params), nil - -} - -func (r *UptimeCheckConfig) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - } - return dcl.URL("projects/{{project}}/uptimeCheckConfigs", nr.basePath(), userBasePath, params), nil - -} - -func (r *UptimeCheckConfig) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/uptimeCheckConfigs/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// uptimeCheckConfigApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type uptimeCheckConfigApiOperation interface { - do(context.Context, *UptimeCheckConfig, *Client) error -} - -// newUpdateUptimeCheckConfigUpdateUptimeCheckConfigRequest creates a request for an -// UptimeCheckConfig resource's UpdateUptimeCheckConfig update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateUptimeCheckConfigUpdateUptimeCheckConfigRequest(ctx context.Context, f *UptimeCheckConfig, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - req["displayName"] = v - } - if v, err := expandUptimeCheckConfigHttpCheck(c, f.HttpCheck, res); err != nil { - return nil, fmt.Errorf("error expanding HttpCheck into httpCheck: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["httpCheck"] = v - } - if v, err := expandUptimeCheckConfigTcpCheck(c, f.TcpCheck, res); err != nil { - return nil, fmt.Errorf("error expanding TcpCheck into tcpCheck: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["tcpCheck"] = v - } - if v := f.Period; !dcl.IsEmptyValueIndirect(v) { - req["period"] = v - } - if v := f.Timeout; !dcl.IsEmptyValueIndirect(v) { - req["timeout"] = v - } - if v, err := expandUptimeCheckConfigContentMatchersSlice(c, f.ContentMatchers, res); err != nil { - return nil, fmt.Errorf("error expanding ContentMatchers into contentMatchers: %w", err) - } else if v != nil { - req["contentMatchers"] = v - } - if v := f.SelectedRegions; v != nil { - req["selectedRegions"] = v - } - req["name"] = fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", *f.Project, *f.Name) - - return req, nil -} - -// marshalUpdateUptimeCheckConfigUpdateUptimeCheckConfigRequest converts the update into -// the final JSON request body. -func marshalUpdateUptimeCheckConfigUpdateUptimeCheckConfigRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - return json.Marshal(m) -} - -type updateUptimeCheckConfigUpdateUptimeCheckConfigOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (op *updateUptimeCheckConfigUpdateUptimeCheckConfigOperation) do(ctx context.Context, r *UptimeCheckConfig, c *Client) error { - _, err := c.GetUptimeCheckConfig(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateUptimeCheckConfig") - if err != nil { - return err - } - mask := dcl.UpdateMask(op.FieldDiffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateUptimeCheckConfigUpdateUptimeCheckConfigRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateUptimeCheckConfigUpdateUptimeCheckConfigRequest(c, req) - if err != nil { - return err - } - _, err = dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - return nil -} - -func (c *Client) listUptimeCheckConfigRaw(ctx context.Context, r *UptimeCheckConfig, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != UptimeCheckConfigMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listUptimeCheckConfigOperation struct { - UptimeCheckConfigs []map[string]interface{} `json:"uptimeCheckConfigs"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listUptimeCheckConfig(ctx context.Context, r *UptimeCheckConfig, pageToken string, pageSize int32) ([]*UptimeCheckConfig, string, error) { - b, err := c.listUptimeCheckConfigRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listUptimeCheckConfigOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*UptimeCheckConfig - for _, v := range m.UptimeCheckConfigs { - res, err := unmarshalMapUptimeCheckConfig(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllUptimeCheckConfig(ctx context.Context, f func(*UptimeCheckConfig) bool, resources []*UptimeCheckConfig) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteUptimeCheckConfig(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteUptimeCheckConfigOperation struct{} - -func (op *deleteUptimeCheckConfigOperation) do(ctx context.Context, r *UptimeCheckConfig, c *Client) error { - r, err := c.GetUptimeCheckConfig(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "UptimeCheckConfig not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetUptimeCheckConfig checking for existence. error: %v", err) - return err - } - - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return fmt.Errorf("failed to delete UptimeCheckConfig: %w", err) - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetUptimeCheckConfig(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createUptimeCheckConfigOperation struct { - response map[string]interface{} -} - -func (op *createUptimeCheckConfigOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (op *createUptimeCheckConfigOperation) do(ctx context.Context, r *UptimeCheckConfig, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - if r.Name != nil { - // Allowing creation to continue with Name set could result in a UptimeCheckConfig with the wrong Name. - return fmt.Errorf("server-generated parameter Name was specified by user as %v, should be unspecified", dcl.ValueOrEmptyString(r.Name)) - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - o, err := dcl.ResponseBodyAsJSON(resp) - if err != nil { - return fmt.Errorf("error decoding response body into JSON: %w", err) - } - op.response = o - - // Include Name in URL substitution for initial GET request. - m := op.response - r.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) - - if _, err := c.GetUptimeCheckConfig(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (c *Client) getUptimeCheckConfigRaw(ctx context.Context, r *UptimeCheckConfig) ([]byte, error) { - if dcl.IsZeroValue(r.Period) { - r.Period = dcl.String("60s") - } - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) uptimeCheckConfigDiffsForRawDesired(ctx context.Context, rawDesired *UptimeCheckConfig, opts ...dcl.ApplyOption) (initial, desired *UptimeCheckConfig, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *UptimeCheckConfig - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*UptimeCheckConfig); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected UptimeCheckConfig, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - if fetchState.Name == nil { - // We cannot perform a get because of lack of information. We have to assume - // that this is being created for the first time. - desired, err := canonicalizeUptimeCheckConfigDesiredState(rawDesired, nil) - return nil, desired, nil, err - } - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetUptimeCheckConfig(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a UptimeCheckConfig resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve UptimeCheckConfig resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that UptimeCheckConfig resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeUptimeCheckConfigDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for UptimeCheckConfig: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for UptimeCheckConfig: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractUptimeCheckConfigFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeUptimeCheckConfigInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for UptimeCheckConfig: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeUptimeCheckConfigDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for UptimeCheckConfig: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffUptimeCheckConfig(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeUptimeCheckConfigInitialState(rawInitial, rawDesired *UptimeCheckConfig) (*UptimeCheckConfig, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - - if !dcl.IsZeroValue(rawInitial.MonitoredResource) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.ResourceGroup) { - rawInitial.MonitoredResource = EmptyUptimeCheckConfigMonitoredResource - } - } - - if !dcl.IsZeroValue(rawInitial.ResourceGroup) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.MonitoredResource) { - rawInitial.ResourceGroup = EmptyUptimeCheckConfigResourceGroup - } - } - - if !dcl.IsZeroValue(rawInitial.HttpCheck) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.TcpCheck) { - rawInitial.HttpCheck = EmptyUptimeCheckConfigHttpCheck - } - } - - if !dcl.IsZeroValue(rawInitial.TcpCheck) { - // Check if anything else is set. - if dcl.AnySet(rawInitial.HttpCheck) { - rawInitial.TcpCheck = EmptyUptimeCheckConfigTcpCheck - } - } - - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeUptimeCheckConfigDesiredState(rawDesired, rawInitial *UptimeCheckConfig, opts ...dcl.ApplyOption) (*UptimeCheckConfig, error) { - - if dcl.IsZeroValue(rawDesired.Period) { - rawDesired.Period = dcl.String("60s") - } - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.MonitoredResource = canonicalizeUptimeCheckConfigMonitoredResource(rawDesired.MonitoredResource, nil, opts...) - rawDesired.ResourceGroup = canonicalizeUptimeCheckConfigResourceGroup(rawDesired.ResourceGroup, nil, opts...) - rawDesired.HttpCheck = canonicalizeUptimeCheckConfigHttpCheck(rawDesired.HttpCheck, nil, opts...) - rawDesired.TcpCheck = canonicalizeUptimeCheckConfigTcpCheck(rawDesired.TcpCheck, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &UptimeCheckConfig{} - if dcl.IsZeroValue(rawDesired.Name) || (dcl.IsEmptyValueIndirect(rawDesired.Name) && dcl.IsEmptyValueIndirect(rawInitial.Name)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.DisplayName, rawInitial.DisplayName) { - canonicalDesired.DisplayName = rawInitial.DisplayName - } else { - canonicalDesired.DisplayName = rawDesired.DisplayName - } - canonicalDesired.MonitoredResource = canonicalizeUptimeCheckConfigMonitoredResource(rawDesired.MonitoredResource, rawInitial.MonitoredResource, opts...) - canonicalDesired.ResourceGroup = canonicalizeUptimeCheckConfigResourceGroup(rawDesired.ResourceGroup, rawInitial.ResourceGroup, opts...) - canonicalDesired.HttpCheck = canonicalizeUptimeCheckConfigHttpCheck(rawDesired.HttpCheck, rawInitial.HttpCheck, opts...) - canonicalDesired.TcpCheck = canonicalizeUptimeCheckConfigTcpCheck(rawDesired.TcpCheck, rawInitial.TcpCheck, opts...) - if dcl.StringCanonicalize(rawDesired.Period, rawInitial.Period) { - canonicalDesired.Period = rawInitial.Period - } else { - canonicalDesired.Period = rawDesired.Period - } - if dcl.StringCanonicalize(rawDesired.Timeout, rawInitial.Timeout) { - canonicalDesired.Timeout = rawInitial.Timeout - } else { - canonicalDesired.Timeout = rawDesired.Timeout - } - canonicalDesired.ContentMatchers = canonicalizeUptimeCheckConfigContentMatchersSlice(rawDesired.ContentMatchers, rawInitial.ContentMatchers, opts...) - if dcl.StringArrayCanonicalize(rawDesired.SelectedRegions, rawInitial.SelectedRegions) { - canonicalDesired.SelectedRegions = rawInitial.SelectedRegions - } else { - canonicalDesired.SelectedRegions = rawDesired.SelectedRegions - } - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - - if canonicalDesired.MonitoredResource != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.ResourceGroup) { - canonicalDesired.MonitoredResource = EmptyUptimeCheckConfigMonitoredResource - } - } - - if canonicalDesired.ResourceGroup != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.MonitoredResource) { - canonicalDesired.ResourceGroup = EmptyUptimeCheckConfigResourceGroup - } - } - - if canonicalDesired.HttpCheck != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.TcpCheck) { - canonicalDesired.HttpCheck = EmptyUptimeCheckConfigHttpCheck - } - } - - if canonicalDesired.TcpCheck != nil { - // Check if anything else is set. - if dcl.AnySet(rawDesired.HttpCheck) { - canonicalDesired.TcpCheck = EmptyUptimeCheckConfigTcpCheck - } - } - - return canonicalDesired, nil -} - -func canonicalizeUptimeCheckConfigNewState(c *Client, rawNew, rawDesired *UptimeCheckConfig) (*UptimeCheckConfig, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.DisplayName) && dcl.IsEmptyValueIndirect(rawDesired.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } else { - if dcl.StringCanonicalize(rawDesired.DisplayName, rawNew.DisplayName) { - rawNew.DisplayName = rawDesired.DisplayName - } - } - - if dcl.IsEmptyValueIndirect(rawNew.MonitoredResource) && dcl.IsEmptyValueIndirect(rawDesired.MonitoredResource) { - rawNew.MonitoredResource = rawDesired.MonitoredResource - } else { - rawNew.MonitoredResource = canonicalizeNewUptimeCheckConfigMonitoredResource(c, rawDesired.MonitoredResource, rawNew.MonitoredResource) - } - - if dcl.IsEmptyValueIndirect(rawNew.ResourceGroup) && dcl.IsEmptyValueIndirect(rawDesired.ResourceGroup) { - rawNew.ResourceGroup = rawDesired.ResourceGroup - } else { - rawNew.ResourceGroup = canonicalizeNewUptimeCheckConfigResourceGroup(c, rawDesired.ResourceGroup, rawNew.ResourceGroup) - } - - if dcl.IsEmptyValueIndirect(rawNew.HttpCheck) && dcl.IsEmptyValueIndirect(rawDesired.HttpCheck) { - rawNew.HttpCheck = rawDesired.HttpCheck - } else { - rawNew.HttpCheck = canonicalizeNewUptimeCheckConfigHttpCheck(c, rawDesired.HttpCheck, rawNew.HttpCheck) - } - - if dcl.IsEmptyValueIndirect(rawNew.TcpCheck) && dcl.IsEmptyValueIndirect(rawDesired.TcpCheck) { - rawNew.TcpCheck = rawDesired.TcpCheck - } else { - rawNew.TcpCheck = canonicalizeNewUptimeCheckConfigTcpCheck(c, rawDesired.TcpCheck, rawNew.TcpCheck) - } - - if dcl.IsEmptyValueIndirect(rawNew.Period) && dcl.IsEmptyValueIndirect(rawDesired.Period) { - rawNew.Period = rawDesired.Period - } else { - if dcl.StringCanonicalize(rawDesired.Period, rawNew.Period) { - rawNew.Period = rawDesired.Period - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Timeout) && dcl.IsEmptyValueIndirect(rawDesired.Timeout) { - rawNew.Timeout = rawDesired.Timeout - } else { - if dcl.StringCanonicalize(rawDesired.Timeout, rawNew.Timeout) { - rawNew.Timeout = rawDesired.Timeout - } - } - - if dcl.IsEmptyValueIndirect(rawNew.ContentMatchers) && dcl.IsEmptyValueIndirect(rawDesired.ContentMatchers) { - rawNew.ContentMatchers = rawDesired.ContentMatchers - } else { - rawNew.ContentMatchers = canonicalizeNewUptimeCheckConfigContentMatchersSlice(c, rawDesired.ContentMatchers, rawNew.ContentMatchers) - } - - if dcl.IsEmptyValueIndirect(rawNew.SelectedRegions) && dcl.IsEmptyValueIndirect(rawDesired.SelectedRegions) { - rawNew.SelectedRegions = rawDesired.SelectedRegions - } else { - if dcl.StringArrayCanonicalize(rawDesired.SelectedRegions, rawNew.SelectedRegions) { - rawNew.SelectedRegions = rawDesired.SelectedRegions - } - } - - rawNew.Project = rawDesired.Project - - return rawNew, nil -} - -func canonicalizeUptimeCheckConfigMonitoredResource(des, initial *UptimeCheckConfigMonitoredResource, opts ...dcl.ApplyOption) *UptimeCheckConfigMonitoredResource { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &UptimeCheckConfigMonitoredResource{} - - if dcl.StringCanonicalize(des.Type, initial.Type) || dcl.IsZeroValue(des.Type) { - cDes.Type = initial.Type - } else { - cDes.Type = des.Type - } - if dcl.IsZeroValue(des.FilterLabels) || (dcl.IsEmptyValueIndirect(des.FilterLabels) && dcl.IsEmptyValueIndirect(initial.FilterLabels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.FilterLabels = initial.FilterLabels - } else { - cDes.FilterLabels = des.FilterLabels - } - - return cDes -} - -func canonicalizeUptimeCheckConfigMonitoredResourceSlice(des, initial []UptimeCheckConfigMonitoredResource, opts ...dcl.ApplyOption) []UptimeCheckConfigMonitoredResource { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]UptimeCheckConfigMonitoredResource, 0, len(des)) - for _, d := range des { - cd := canonicalizeUptimeCheckConfigMonitoredResource(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]UptimeCheckConfigMonitoredResource, 0, len(des)) - for i, d := range des { - cd := canonicalizeUptimeCheckConfigMonitoredResource(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewUptimeCheckConfigMonitoredResource(c *Client, des, nw *UptimeCheckConfigMonitoredResource) *UptimeCheckConfigMonitoredResource { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for UptimeCheckConfigMonitoredResource while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Type, nw.Type) { - nw.Type = des.Type - } - - return nw -} - -func canonicalizeNewUptimeCheckConfigMonitoredResourceSet(c *Client, des, nw []UptimeCheckConfigMonitoredResource) []UptimeCheckConfigMonitoredResource { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []UptimeCheckConfigMonitoredResource - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareUptimeCheckConfigMonitoredResourceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewUptimeCheckConfigMonitoredResource(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewUptimeCheckConfigMonitoredResourceSlice(c *Client, des, nw []UptimeCheckConfigMonitoredResource) []UptimeCheckConfigMonitoredResource { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []UptimeCheckConfigMonitoredResource - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewUptimeCheckConfigMonitoredResource(c, &d, &n)) - } - - return items -} - -func canonicalizeUptimeCheckConfigResourceGroup(des, initial *UptimeCheckConfigResourceGroup, opts ...dcl.ApplyOption) *UptimeCheckConfigResourceGroup { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &UptimeCheckConfigResourceGroup{} - - if dcl.IsZeroValue(des.GroupId) || (dcl.IsEmptyValueIndirect(des.GroupId) && dcl.IsEmptyValueIndirect(initial.GroupId)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.GroupId = initial.GroupId - } else { - cDes.GroupId = des.GroupId - } - if dcl.IsZeroValue(des.ResourceType) || (dcl.IsEmptyValueIndirect(des.ResourceType) && dcl.IsEmptyValueIndirect(initial.ResourceType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ResourceType = initial.ResourceType - } else { - cDes.ResourceType = des.ResourceType - } - - return cDes -} - -func canonicalizeUptimeCheckConfigResourceGroupSlice(des, initial []UptimeCheckConfigResourceGroup, opts ...dcl.ApplyOption) []UptimeCheckConfigResourceGroup { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]UptimeCheckConfigResourceGroup, 0, len(des)) - for _, d := range des { - cd := canonicalizeUptimeCheckConfigResourceGroup(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]UptimeCheckConfigResourceGroup, 0, len(des)) - for i, d := range des { - cd := canonicalizeUptimeCheckConfigResourceGroup(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewUptimeCheckConfigResourceGroup(c *Client, des, nw *UptimeCheckConfigResourceGroup) *UptimeCheckConfigResourceGroup { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for UptimeCheckConfigResourceGroup while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewUptimeCheckConfigResourceGroupSet(c *Client, des, nw []UptimeCheckConfigResourceGroup) []UptimeCheckConfigResourceGroup { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []UptimeCheckConfigResourceGroup - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareUptimeCheckConfigResourceGroupNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewUptimeCheckConfigResourceGroup(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewUptimeCheckConfigResourceGroupSlice(c *Client, des, nw []UptimeCheckConfigResourceGroup) []UptimeCheckConfigResourceGroup { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []UptimeCheckConfigResourceGroup - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewUptimeCheckConfigResourceGroup(c, &d, &n)) - } - - return items -} - -func canonicalizeUptimeCheckConfigHttpCheck(des, initial *UptimeCheckConfigHttpCheck, opts ...dcl.ApplyOption) *UptimeCheckConfigHttpCheck { - if des == nil { - return initial - } - if des.empty { - return des - } - - if dcl.IsZeroValue(des.RequestMethod) { - des.RequestMethod = UptimeCheckConfigHttpCheckRequestMethodEnumRef("GET") - } - - if dcl.IsZeroValue(des.Path) { - des.Path = dcl.String("/") - } - - if initial == nil { - return des - } - - cDes := &UptimeCheckConfigHttpCheck{} - - if dcl.IsZeroValue(des.RequestMethod) || (dcl.IsEmptyValueIndirect(des.RequestMethod) && dcl.IsEmptyValueIndirect(initial.RequestMethod)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.RequestMethod = initial.RequestMethod - } else { - cDes.RequestMethod = des.RequestMethod - } - if dcl.BoolCanonicalize(des.UseSsl, initial.UseSsl) || dcl.IsZeroValue(des.UseSsl) { - cDes.UseSsl = initial.UseSsl - } else { - cDes.UseSsl = des.UseSsl - } - if dcl.StringCanonicalize(des.Path, initial.Path) || dcl.IsZeroValue(des.Path) { - cDes.Path = initial.Path - } else { - cDes.Path = des.Path - } - if dcl.IsZeroValue(des.Port) || (dcl.IsEmptyValueIndirect(des.Port) && dcl.IsEmptyValueIndirect(initial.Port)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Port = initial.Port - } else { - cDes.Port = des.Port - } - cDes.AuthInfo = canonicalizeUptimeCheckConfigHttpCheckAuthInfo(des.AuthInfo, initial.AuthInfo, opts...) - if dcl.BoolCanonicalize(des.MaskHeaders, initial.MaskHeaders) || dcl.IsZeroValue(des.MaskHeaders) { - cDes.MaskHeaders = initial.MaskHeaders - } else { - cDes.MaskHeaders = des.MaskHeaders - } - if dcl.IsZeroValue(des.Headers) || (dcl.IsEmptyValueIndirect(des.Headers) && dcl.IsEmptyValueIndirect(initial.Headers)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Headers = initial.Headers - } else { - cDes.Headers = des.Headers - } - if dcl.IsZeroValue(des.ContentType) || (dcl.IsEmptyValueIndirect(des.ContentType) && dcl.IsEmptyValueIndirect(initial.ContentType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ContentType = initial.ContentType - } else { - cDes.ContentType = des.ContentType - } - if dcl.BoolCanonicalize(des.ValidateSsl, initial.ValidateSsl) || dcl.IsZeroValue(des.ValidateSsl) { - cDes.ValidateSsl = initial.ValidateSsl - } else { - cDes.ValidateSsl = des.ValidateSsl - } - if dcl.StringCanonicalize(des.Body, initial.Body) || dcl.IsZeroValue(des.Body) { - cDes.Body = initial.Body - } else { - cDes.Body = des.Body - } - - return cDes -} - -func canonicalizeUptimeCheckConfigHttpCheckSlice(des, initial []UptimeCheckConfigHttpCheck, opts ...dcl.ApplyOption) []UptimeCheckConfigHttpCheck { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]UptimeCheckConfigHttpCheck, 0, len(des)) - for _, d := range des { - cd := canonicalizeUptimeCheckConfigHttpCheck(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]UptimeCheckConfigHttpCheck, 0, len(des)) - for i, d := range des { - cd := canonicalizeUptimeCheckConfigHttpCheck(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewUptimeCheckConfigHttpCheck(c *Client, des, nw *UptimeCheckConfigHttpCheck) *UptimeCheckConfigHttpCheck { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for UptimeCheckConfigHttpCheck while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.IsZeroValue(nw.RequestMethod) { - nw.RequestMethod = UptimeCheckConfigHttpCheckRequestMethodEnumRef("GET") - } - - if dcl.IsZeroValue(nw.Path) { - nw.Path = dcl.String("/") - } - - if dcl.BoolCanonicalize(des.UseSsl, nw.UseSsl) { - nw.UseSsl = des.UseSsl - } - if dcl.StringCanonicalize(des.Path, nw.Path) { - nw.Path = des.Path - } - nw.AuthInfo = canonicalizeNewUptimeCheckConfigHttpCheckAuthInfo(c, des.AuthInfo, nw.AuthInfo) - if dcl.BoolCanonicalize(des.MaskHeaders, nw.MaskHeaders) { - nw.MaskHeaders = des.MaskHeaders - } - nw.Headers = des.Headers - if dcl.BoolCanonicalize(des.ValidateSsl, nw.ValidateSsl) { - nw.ValidateSsl = des.ValidateSsl - } - if dcl.StringCanonicalize(des.Body, nw.Body) { - nw.Body = des.Body - } - - return nw -} - -func canonicalizeNewUptimeCheckConfigHttpCheckSet(c *Client, des, nw []UptimeCheckConfigHttpCheck) []UptimeCheckConfigHttpCheck { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []UptimeCheckConfigHttpCheck - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareUptimeCheckConfigHttpCheckNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewUptimeCheckConfigHttpCheck(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewUptimeCheckConfigHttpCheckSlice(c *Client, des, nw []UptimeCheckConfigHttpCheck) []UptimeCheckConfigHttpCheck { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []UptimeCheckConfigHttpCheck - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewUptimeCheckConfigHttpCheck(c, &d, &n)) - } - - return items -} - -func canonicalizeUptimeCheckConfigHttpCheckAuthInfo(des, initial *UptimeCheckConfigHttpCheckAuthInfo, opts ...dcl.ApplyOption) *UptimeCheckConfigHttpCheckAuthInfo { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &UptimeCheckConfigHttpCheckAuthInfo{} - - if dcl.StringCanonicalize(des.Username, initial.Username) || dcl.IsZeroValue(des.Username) { - cDes.Username = initial.Username - } else { - cDes.Username = des.Username - } - if dcl.StringCanonicalize(des.Password, initial.Password) || dcl.IsZeroValue(des.Password) { - cDes.Password = initial.Password - } else { - cDes.Password = des.Password - } - - return cDes -} - -func canonicalizeUptimeCheckConfigHttpCheckAuthInfoSlice(des, initial []UptimeCheckConfigHttpCheckAuthInfo, opts ...dcl.ApplyOption) []UptimeCheckConfigHttpCheckAuthInfo { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]UptimeCheckConfigHttpCheckAuthInfo, 0, len(des)) - for _, d := range des { - cd := canonicalizeUptimeCheckConfigHttpCheckAuthInfo(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]UptimeCheckConfigHttpCheckAuthInfo, 0, len(des)) - for i, d := range des { - cd := canonicalizeUptimeCheckConfigHttpCheckAuthInfo(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewUptimeCheckConfigHttpCheckAuthInfo(c *Client, des, nw *UptimeCheckConfigHttpCheckAuthInfo) *UptimeCheckConfigHttpCheckAuthInfo { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for UptimeCheckConfigHttpCheckAuthInfo while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Username, nw.Username) { - nw.Username = des.Username - } - nw.Password = des.Password - - return nw -} - -func canonicalizeNewUptimeCheckConfigHttpCheckAuthInfoSet(c *Client, des, nw []UptimeCheckConfigHttpCheckAuthInfo) []UptimeCheckConfigHttpCheckAuthInfo { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []UptimeCheckConfigHttpCheckAuthInfo - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareUptimeCheckConfigHttpCheckAuthInfoNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewUptimeCheckConfigHttpCheckAuthInfo(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewUptimeCheckConfigHttpCheckAuthInfoSlice(c *Client, des, nw []UptimeCheckConfigHttpCheckAuthInfo) []UptimeCheckConfigHttpCheckAuthInfo { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []UptimeCheckConfigHttpCheckAuthInfo - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewUptimeCheckConfigHttpCheckAuthInfo(c, &d, &n)) - } - - return items -} - -func canonicalizeUptimeCheckConfigTcpCheck(des, initial *UptimeCheckConfigTcpCheck, opts ...dcl.ApplyOption) *UptimeCheckConfigTcpCheck { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &UptimeCheckConfigTcpCheck{} - - if dcl.IsZeroValue(des.Port) || (dcl.IsEmptyValueIndirect(des.Port) && dcl.IsEmptyValueIndirect(initial.Port)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Port = initial.Port - } else { - cDes.Port = des.Port - } - - return cDes -} - -func canonicalizeUptimeCheckConfigTcpCheckSlice(des, initial []UptimeCheckConfigTcpCheck, opts ...dcl.ApplyOption) []UptimeCheckConfigTcpCheck { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]UptimeCheckConfigTcpCheck, 0, len(des)) - for _, d := range des { - cd := canonicalizeUptimeCheckConfigTcpCheck(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]UptimeCheckConfigTcpCheck, 0, len(des)) - for i, d := range des { - cd := canonicalizeUptimeCheckConfigTcpCheck(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewUptimeCheckConfigTcpCheck(c *Client, des, nw *UptimeCheckConfigTcpCheck) *UptimeCheckConfigTcpCheck { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for UptimeCheckConfigTcpCheck while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewUptimeCheckConfigTcpCheckSet(c *Client, des, nw []UptimeCheckConfigTcpCheck) []UptimeCheckConfigTcpCheck { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []UptimeCheckConfigTcpCheck - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareUptimeCheckConfigTcpCheckNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewUptimeCheckConfigTcpCheck(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewUptimeCheckConfigTcpCheckSlice(c *Client, des, nw []UptimeCheckConfigTcpCheck) []UptimeCheckConfigTcpCheck { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []UptimeCheckConfigTcpCheck - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewUptimeCheckConfigTcpCheck(c, &d, &n)) - } - - return items -} - -func canonicalizeUptimeCheckConfigContentMatchers(des, initial *UptimeCheckConfigContentMatchers, opts ...dcl.ApplyOption) *UptimeCheckConfigContentMatchers { - if des == nil { - return initial - } - if des.empty { - return des - } - - if dcl.IsZeroValue(des.Matcher) { - des.Matcher = UptimeCheckConfigContentMatchersMatcherEnumRef("CONTAINS_STRING") - } - - if initial == nil { - return des - } - - cDes := &UptimeCheckConfigContentMatchers{} - - if dcl.StringCanonicalize(des.Content, initial.Content) || dcl.IsZeroValue(des.Content) { - cDes.Content = initial.Content - } else { - cDes.Content = des.Content - } - if dcl.IsZeroValue(des.Matcher) || (dcl.IsEmptyValueIndirect(des.Matcher) && dcl.IsEmptyValueIndirect(initial.Matcher)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Matcher = initial.Matcher - } else { - cDes.Matcher = des.Matcher - } - - return cDes -} - -func canonicalizeUptimeCheckConfigContentMatchersSlice(des, initial []UptimeCheckConfigContentMatchers, opts ...dcl.ApplyOption) []UptimeCheckConfigContentMatchers { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]UptimeCheckConfigContentMatchers, 0, len(des)) - for _, d := range des { - cd := canonicalizeUptimeCheckConfigContentMatchers(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]UptimeCheckConfigContentMatchers, 0, len(des)) - for i, d := range des { - cd := canonicalizeUptimeCheckConfigContentMatchers(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewUptimeCheckConfigContentMatchers(c *Client, des, nw *UptimeCheckConfigContentMatchers) *UptimeCheckConfigContentMatchers { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for UptimeCheckConfigContentMatchers while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.IsZeroValue(nw.Matcher) { - nw.Matcher = UptimeCheckConfigContentMatchersMatcherEnumRef("CONTAINS_STRING") - } - - if dcl.StringCanonicalize(des.Content, nw.Content) { - nw.Content = des.Content - } - - return nw -} - -func canonicalizeNewUptimeCheckConfigContentMatchersSet(c *Client, des, nw []UptimeCheckConfigContentMatchers) []UptimeCheckConfigContentMatchers { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []UptimeCheckConfigContentMatchers - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareUptimeCheckConfigContentMatchersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewUptimeCheckConfigContentMatchers(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewUptimeCheckConfigContentMatchersSlice(c *Client, des, nw []UptimeCheckConfigContentMatchers) []UptimeCheckConfigContentMatchers { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []UptimeCheckConfigContentMatchers - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewUptimeCheckConfigContentMatchers(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffUptimeCheckConfig(c *Client, desired, actual *UptimeCheckConfig, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.MonitoredResource, actual.MonitoredResource, dcl.DiffInfo{ObjectFunction: compareUptimeCheckConfigMonitoredResourceNewStyle, EmptyObject: EmptyUptimeCheckConfigMonitoredResource, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoredResource")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceGroup, actual.ResourceGroup, dcl.DiffInfo{ObjectFunction: compareUptimeCheckConfigResourceGroupNewStyle, EmptyObject: EmptyUptimeCheckConfigResourceGroup, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceGroup")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.HttpCheck, actual.HttpCheck, dcl.DiffInfo{ObjectFunction: compareUptimeCheckConfigHttpCheckNewStyle, EmptyObject: EmptyUptimeCheckConfigHttpCheck, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("HttpCheck")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.TcpCheck, actual.TcpCheck, dcl.DiffInfo{ObjectFunction: compareUptimeCheckConfigTcpCheckNewStyle, EmptyObject: EmptyUptimeCheckConfigTcpCheck, OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("TcpCheck")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Period, actual.Period, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("Period")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Timeout, actual.Timeout, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("Timeout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.ContentMatchers, actual.ContentMatchers, dcl.DiffInfo{ObjectFunction: compareUptimeCheckConfigContentMatchersNewStyle, EmptyObject: EmptyUptimeCheckConfigContentMatchers, OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("ContentMatchers")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.SelectedRegions, actual.SelectedRegions, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("SelectedRegions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareUptimeCheckConfigMonitoredResourceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*UptimeCheckConfigMonitoredResource) - if !ok { - desiredNotPointer, ok := d.(UptimeCheckConfigMonitoredResource) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigMonitoredResource or *UptimeCheckConfigMonitoredResource", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*UptimeCheckConfigMonitoredResource) - if !ok { - actualNotPointer, ok := a.(UptimeCheckConfigMonitoredResource) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigMonitoredResource", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Type, actual.Type, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Type")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.FilterLabels, actual.FilterLabels, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareUptimeCheckConfigResourceGroupNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*UptimeCheckConfigResourceGroup) - if !ok { - desiredNotPointer, ok := d.(UptimeCheckConfigResourceGroup) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigResourceGroup or *UptimeCheckConfigResourceGroup", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*UptimeCheckConfigResourceGroup) - if !ok { - actualNotPointer, ok := a.(UptimeCheckConfigResourceGroup) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigResourceGroup", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.GroupId, actual.GroupId, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("GroupId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceType, actual.ResourceType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ResourceType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareUptimeCheckConfigHttpCheckNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*UptimeCheckConfigHttpCheck) - if !ok { - desiredNotPointer, ok := d.(UptimeCheckConfigHttpCheck) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigHttpCheck or *UptimeCheckConfigHttpCheck", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*UptimeCheckConfigHttpCheck) - if !ok { - actualNotPointer, ok := a.(UptimeCheckConfigHttpCheck) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigHttpCheck", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.RequestMethod, actual.RequestMethod, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RequestMethod")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.UseSsl, actual.UseSsl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("UseSsl")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Path, actual.Path, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("Path")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Port, actual.Port, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("Port")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AuthInfo, actual.AuthInfo, dcl.DiffInfo{ObjectFunction: compareUptimeCheckConfigHttpCheckAuthInfoNewStyle, EmptyObject: EmptyUptimeCheckConfigHttpCheckAuthInfo, OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("AuthInfo")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MaskHeaders, actual.MaskHeaders, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MaskHeaders")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Headers, actual.Headers, dcl.DiffInfo{ServerDefault: true, OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("Headers")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ContentType, actual.ContentType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ContentType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ValidateSsl, actual.ValidateSsl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("ValidateSsl")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Body, actual.Body, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("Body")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareUptimeCheckConfigHttpCheckAuthInfoNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*UptimeCheckConfigHttpCheckAuthInfo) - if !ok { - desiredNotPointer, ok := d.(UptimeCheckConfigHttpCheckAuthInfo) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigHttpCheckAuthInfo or *UptimeCheckConfigHttpCheckAuthInfo", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*UptimeCheckConfigHttpCheckAuthInfo) - if !ok { - actualNotPointer, ok := a.(UptimeCheckConfigHttpCheckAuthInfo) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigHttpCheckAuthInfo", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Username, actual.Username, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("Username")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Password, actual.Password, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateUptimeCheckConfigUpdateUptimeCheckConfigOperation")}, fn.AddNest("Password")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareUptimeCheckConfigTcpCheckNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*UptimeCheckConfigTcpCheck) - if !ok { - desiredNotPointer, ok := d.(UptimeCheckConfigTcpCheck) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigTcpCheck or *UptimeCheckConfigTcpCheck", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*UptimeCheckConfigTcpCheck) - if !ok { - actualNotPointer, ok := a.(UptimeCheckConfigTcpCheck) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigTcpCheck", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Port, actual.Port, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Port")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareUptimeCheckConfigContentMatchersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*UptimeCheckConfigContentMatchers) - if !ok { - desiredNotPointer, ok := d.(UptimeCheckConfigContentMatchers) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigContentMatchers or *UptimeCheckConfigContentMatchers", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*UptimeCheckConfigContentMatchers) - if !ok { - actualNotPointer, ok := a.(UptimeCheckConfigContentMatchers) - if !ok { - return nil, fmt.Errorf("obj %v is not a UptimeCheckConfigContentMatchers", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Content, actual.Content, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Content")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Matcher, actual.Matcher, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Matcher")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *UptimeCheckConfig) urlNormalized() *UptimeCheckConfig { - normalized := dcl.Copy(*r).(UptimeCheckConfig) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.DisplayName = dcl.SelfLinkToName(r.DisplayName) - normalized.Period = dcl.SelfLinkToName(r.Period) - normalized.Timeout = dcl.SelfLinkToName(r.Timeout) - normalized.Project = dcl.SelfLinkToName(r.Project) - return &normalized -} - -func (r *UptimeCheckConfig) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateUptimeCheckConfig" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/uptimeCheckConfigs/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the UptimeCheckConfig resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *UptimeCheckConfig) marshal(c *Client) ([]byte, error) { - m, err := expandUptimeCheckConfig(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling UptimeCheckConfig: %w", err) - } - - return json.Marshal(m) -} - -// unmarshalUptimeCheckConfig decodes JSON responses into the UptimeCheckConfig resource schema. -func unmarshalUptimeCheckConfig(b []byte, c *Client, res *UptimeCheckConfig) (*UptimeCheckConfig, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapUptimeCheckConfig(m, c, res) -} - -func unmarshalMapUptimeCheckConfig(m map[string]interface{}, c *Client, res *UptimeCheckConfig) (*UptimeCheckConfig, error) { - - flattened := flattenUptimeCheckConfig(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandUptimeCheckConfig expands UptimeCheckConfig into a JSON request object. -func expandUptimeCheckConfig(c *Client, f *UptimeCheckConfig) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v := f.Name; dcl.ValueShouldBeSent(v) { - m["name"] = v - } - if v := f.DisplayName; dcl.ValueShouldBeSent(v) { - m["displayName"] = v - } - if v, err := expandUptimeCheckConfigMonitoredResource(c, f.MonitoredResource, res); err != nil { - return nil, fmt.Errorf("error expanding MonitoredResource into monitoredResource: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["monitoredResource"] = v - } - if v, err := expandUptimeCheckConfigResourceGroup(c, f.ResourceGroup, res); err != nil { - return nil, fmt.Errorf("error expanding ResourceGroup into resourceGroup: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["resourceGroup"] = v - } - if v, err := expandUptimeCheckConfigHttpCheck(c, f.HttpCheck, res); err != nil { - return nil, fmt.Errorf("error expanding HttpCheck into httpCheck: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["httpCheck"] = v - } - if v, err := expandUptimeCheckConfigTcpCheck(c, f.TcpCheck, res); err != nil { - return nil, fmt.Errorf("error expanding TcpCheck into tcpCheck: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["tcpCheck"] = v - } - if v := f.Period; dcl.ValueShouldBeSent(v) { - m["period"] = v - } - if v := f.Timeout; dcl.ValueShouldBeSent(v) { - m["timeout"] = v - } - if v, err := expandUptimeCheckConfigContentMatchersSlice(c, f.ContentMatchers, res); err != nil { - return nil, fmt.Errorf("error expanding ContentMatchers into contentMatchers: %w", err) - } else if v != nil { - m["contentMatchers"] = v - } - if v := f.SelectedRegions; v != nil { - m["selectedRegions"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - - return m, nil -} - -// flattenUptimeCheckConfig flattens UptimeCheckConfig from a JSON request object into the -// UptimeCheckConfig type. -func flattenUptimeCheckConfig(c *Client, i interface{}, res *UptimeCheckConfig) *UptimeCheckConfig { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &UptimeCheckConfig{} - resultRes.Name = dcl.SelfLinkToName(dcl.FlattenString(m["name"])) - resultRes.DisplayName = dcl.FlattenString(m["displayName"]) - resultRes.MonitoredResource = flattenUptimeCheckConfigMonitoredResource(c, m["monitoredResource"], res) - resultRes.ResourceGroup = flattenUptimeCheckConfigResourceGroup(c, m["resourceGroup"], res) - resultRes.HttpCheck = flattenUptimeCheckConfigHttpCheck(c, m["httpCheck"], res) - resultRes.TcpCheck = flattenUptimeCheckConfigTcpCheck(c, m["tcpCheck"], res) - resultRes.Period = dcl.FlattenString(m["period"]) - if _, ok := m["period"]; !ok { - c.Config.Logger.Info("Using default value for period") - resultRes.Period = dcl.String("60s") - } - resultRes.Timeout = dcl.FlattenString(m["timeout"]) - resultRes.ContentMatchers = flattenUptimeCheckConfigContentMatchersSlice(c, m["contentMatchers"], res) - resultRes.SelectedRegions = dcl.FlattenStringSlice(m["selectedRegions"]) - resultRes.Project = dcl.FlattenString(m["project"]) - - return resultRes -} - -// expandUptimeCheckConfigMonitoredResourceMap expands the contents of UptimeCheckConfigMonitoredResource into a JSON -// request object. -func expandUptimeCheckConfigMonitoredResourceMap(c *Client, f map[string]UptimeCheckConfigMonitoredResource, res *UptimeCheckConfig) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandUptimeCheckConfigMonitoredResource(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandUptimeCheckConfigMonitoredResourceSlice expands the contents of UptimeCheckConfigMonitoredResource into a JSON -// request object. -func expandUptimeCheckConfigMonitoredResourceSlice(c *Client, f []UptimeCheckConfigMonitoredResource, res *UptimeCheckConfig) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandUptimeCheckConfigMonitoredResource(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenUptimeCheckConfigMonitoredResourceMap flattens the contents of UptimeCheckConfigMonitoredResource from a JSON -// response object. -func flattenUptimeCheckConfigMonitoredResourceMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigMonitoredResource { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigMonitoredResource{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigMonitoredResource{} - } - - items := make(map[string]UptimeCheckConfigMonitoredResource) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigMonitoredResource(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenUptimeCheckConfigMonitoredResourceSlice flattens the contents of UptimeCheckConfigMonitoredResource from a JSON -// response object. -func flattenUptimeCheckConfigMonitoredResourceSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigMonitoredResource { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigMonitoredResource{} - } - - if len(a) == 0 { - return []UptimeCheckConfigMonitoredResource{} - } - - items := make([]UptimeCheckConfigMonitoredResource, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigMonitoredResource(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandUptimeCheckConfigMonitoredResource expands an instance of UptimeCheckConfigMonitoredResource into a JSON -// request object. -func expandUptimeCheckConfigMonitoredResource(c *Client, f *UptimeCheckConfigMonitoredResource, res *UptimeCheckConfig) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Type; !dcl.IsEmptyValueIndirect(v) { - m["type"] = v - } - if v := f.FilterLabels; !dcl.IsEmptyValueIndirect(v) { - m["labels"] = v - } - - return m, nil -} - -// flattenUptimeCheckConfigMonitoredResource flattens an instance of UptimeCheckConfigMonitoredResource from a JSON -// response object. -func flattenUptimeCheckConfigMonitoredResource(c *Client, i interface{}, res *UptimeCheckConfig) *UptimeCheckConfigMonitoredResource { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &UptimeCheckConfigMonitoredResource{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyUptimeCheckConfigMonitoredResource - } - r.Type = dcl.FlattenString(m["type"]) - r.FilterLabels = dcl.FlattenKeyValuePairs(m["labels"]) - - return r -} - -// expandUptimeCheckConfigResourceGroupMap expands the contents of UptimeCheckConfigResourceGroup into a JSON -// request object. -func expandUptimeCheckConfigResourceGroupMap(c *Client, f map[string]UptimeCheckConfigResourceGroup, res *UptimeCheckConfig) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandUptimeCheckConfigResourceGroup(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandUptimeCheckConfigResourceGroupSlice expands the contents of UptimeCheckConfigResourceGroup into a JSON -// request object. -func expandUptimeCheckConfigResourceGroupSlice(c *Client, f []UptimeCheckConfigResourceGroup, res *UptimeCheckConfig) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandUptimeCheckConfigResourceGroup(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenUptimeCheckConfigResourceGroupMap flattens the contents of UptimeCheckConfigResourceGroup from a JSON -// response object. -func flattenUptimeCheckConfigResourceGroupMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigResourceGroup { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigResourceGroup{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigResourceGroup{} - } - - items := make(map[string]UptimeCheckConfigResourceGroup) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigResourceGroup(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenUptimeCheckConfigResourceGroupSlice flattens the contents of UptimeCheckConfigResourceGroup from a JSON -// response object. -func flattenUptimeCheckConfigResourceGroupSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigResourceGroup { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigResourceGroup{} - } - - if len(a) == 0 { - return []UptimeCheckConfigResourceGroup{} - } - - items := make([]UptimeCheckConfigResourceGroup, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigResourceGroup(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandUptimeCheckConfigResourceGroup expands an instance of UptimeCheckConfigResourceGroup into a JSON -// request object. -func expandUptimeCheckConfigResourceGroup(c *Client, f *UptimeCheckConfigResourceGroup, res *UptimeCheckConfig) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := dcl.SelfLinkToNameExpander(f.GroupId); err != nil { - return nil, fmt.Errorf("error expanding GroupId into groupId: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["groupId"] = v - } - if v := f.ResourceType; !dcl.IsEmptyValueIndirect(v) { - m["resourceType"] = v - } - - return m, nil -} - -// flattenUptimeCheckConfigResourceGroup flattens an instance of UptimeCheckConfigResourceGroup from a JSON -// response object. -func flattenUptimeCheckConfigResourceGroup(c *Client, i interface{}, res *UptimeCheckConfig) *UptimeCheckConfigResourceGroup { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &UptimeCheckConfigResourceGroup{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyUptimeCheckConfigResourceGroup - } - r.GroupId = dcl.FlattenString(m["groupId"]) - r.ResourceType = flattenUptimeCheckConfigResourceGroupResourceTypeEnum(m["resourceType"]) - - return r -} - -// expandUptimeCheckConfigHttpCheckMap expands the contents of UptimeCheckConfigHttpCheck into a JSON -// request object. -func expandUptimeCheckConfigHttpCheckMap(c *Client, f map[string]UptimeCheckConfigHttpCheck, res *UptimeCheckConfig) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandUptimeCheckConfigHttpCheck(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandUptimeCheckConfigHttpCheckSlice expands the contents of UptimeCheckConfigHttpCheck into a JSON -// request object. -func expandUptimeCheckConfigHttpCheckSlice(c *Client, f []UptimeCheckConfigHttpCheck, res *UptimeCheckConfig) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandUptimeCheckConfigHttpCheck(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenUptimeCheckConfigHttpCheckMap flattens the contents of UptimeCheckConfigHttpCheck from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigHttpCheck { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigHttpCheck{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigHttpCheck{} - } - - items := make(map[string]UptimeCheckConfigHttpCheck) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigHttpCheck(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenUptimeCheckConfigHttpCheckSlice flattens the contents of UptimeCheckConfigHttpCheck from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigHttpCheck { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigHttpCheck{} - } - - if len(a) == 0 { - return []UptimeCheckConfigHttpCheck{} - } - - items := make([]UptimeCheckConfigHttpCheck, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigHttpCheck(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandUptimeCheckConfigHttpCheck expands an instance of UptimeCheckConfigHttpCheck into a JSON -// request object. -func expandUptimeCheckConfigHttpCheck(c *Client, f *UptimeCheckConfigHttpCheck, res *UptimeCheckConfig) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.RequestMethod; !dcl.IsEmptyValueIndirect(v) { - m["requestMethod"] = v - } - if v := f.UseSsl; !dcl.IsEmptyValueIndirect(v) { - m["useSsl"] = v - } - if v := f.Path; !dcl.IsEmptyValueIndirect(v) { - m["path"] = v - } - if v := f.Port; !dcl.IsEmptyValueIndirect(v) { - m["port"] = v - } - if v, err := expandUptimeCheckConfigHttpCheckAuthInfo(c, f.AuthInfo, res); err != nil { - return nil, fmt.Errorf("error expanding AuthInfo into authInfo: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["authInfo"] = v - } - if v := f.MaskHeaders; !dcl.IsEmptyValueIndirect(v) { - m["maskHeaders"] = v - } - if v := f.Headers; !dcl.IsEmptyValueIndirect(v) { - m["headers"] = v - } - if v := f.ContentType; !dcl.IsEmptyValueIndirect(v) { - m["contentType"] = v - } - if v := f.ValidateSsl; !dcl.IsEmptyValueIndirect(v) { - m["validateSsl"] = v - } - if v := f.Body; !dcl.IsEmptyValueIndirect(v) { - m["body"] = v - } - - return m, nil -} - -// flattenUptimeCheckConfigHttpCheck flattens an instance of UptimeCheckConfigHttpCheck from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheck(c *Client, i interface{}, res *UptimeCheckConfig) *UptimeCheckConfigHttpCheck { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &UptimeCheckConfigHttpCheck{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyUptimeCheckConfigHttpCheck - } - r.RequestMethod = flattenUptimeCheckConfigHttpCheckRequestMethodEnum(m["requestMethod"]) - if dcl.IsEmptyValueIndirect(m["requestMethod"]) { - c.Config.Logger.Info("Using default value for requestMethod.") - r.RequestMethod = UptimeCheckConfigHttpCheckRequestMethodEnumRef("GET") - } - r.UseSsl = dcl.FlattenBool(m["useSsl"]) - r.Path = dcl.FlattenString(m["path"]) - if dcl.IsEmptyValueIndirect(m["path"]) { - c.Config.Logger.Info("Using default value for path.") - r.Path = dcl.String("/") - } - r.Port = dcl.FlattenInteger(m["port"]) - r.AuthInfo = flattenUptimeCheckConfigHttpCheckAuthInfo(c, m["authInfo"], res) - r.MaskHeaders = dcl.FlattenBool(m["maskHeaders"]) - r.Headers = dcl.FlattenKeyValuePairs(m["headers"]) - r.ContentType = flattenUptimeCheckConfigHttpCheckContentTypeEnum(m["contentType"]) - r.ValidateSsl = dcl.FlattenBool(m["validateSsl"]) - r.Body = dcl.FlattenString(m["body"]) - - return r -} - -// expandUptimeCheckConfigHttpCheckAuthInfoMap expands the contents of UptimeCheckConfigHttpCheckAuthInfo into a JSON -// request object. -func expandUptimeCheckConfigHttpCheckAuthInfoMap(c *Client, f map[string]UptimeCheckConfigHttpCheckAuthInfo, res *UptimeCheckConfig) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandUptimeCheckConfigHttpCheckAuthInfo(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandUptimeCheckConfigHttpCheckAuthInfoSlice expands the contents of UptimeCheckConfigHttpCheckAuthInfo into a JSON -// request object. -func expandUptimeCheckConfigHttpCheckAuthInfoSlice(c *Client, f []UptimeCheckConfigHttpCheckAuthInfo, res *UptimeCheckConfig) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandUptimeCheckConfigHttpCheckAuthInfo(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenUptimeCheckConfigHttpCheckAuthInfoMap flattens the contents of UptimeCheckConfigHttpCheckAuthInfo from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckAuthInfoMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigHttpCheckAuthInfo { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigHttpCheckAuthInfo{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigHttpCheckAuthInfo{} - } - - items := make(map[string]UptimeCheckConfigHttpCheckAuthInfo) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigHttpCheckAuthInfo(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenUptimeCheckConfigHttpCheckAuthInfoSlice flattens the contents of UptimeCheckConfigHttpCheckAuthInfo from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckAuthInfoSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigHttpCheckAuthInfo { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigHttpCheckAuthInfo{} - } - - if len(a) == 0 { - return []UptimeCheckConfigHttpCheckAuthInfo{} - } - - items := make([]UptimeCheckConfigHttpCheckAuthInfo, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigHttpCheckAuthInfo(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandUptimeCheckConfigHttpCheckAuthInfo expands an instance of UptimeCheckConfigHttpCheckAuthInfo into a JSON -// request object. -func expandUptimeCheckConfigHttpCheckAuthInfo(c *Client, f *UptimeCheckConfigHttpCheckAuthInfo, res *UptimeCheckConfig) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Username; !dcl.IsEmptyValueIndirect(v) { - m["username"] = v - } - if v := f.Password; !dcl.IsEmptyValueIndirect(v) { - m["password"] = v - } - - return m, nil -} - -// flattenUptimeCheckConfigHttpCheckAuthInfo flattens an instance of UptimeCheckConfigHttpCheckAuthInfo from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckAuthInfo(c *Client, i interface{}, res *UptimeCheckConfig) *UptimeCheckConfigHttpCheckAuthInfo { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &UptimeCheckConfigHttpCheckAuthInfo{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyUptimeCheckConfigHttpCheckAuthInfo - } - r.Username = dcl.FlattenString(m["username"]) - r.Password = dcl.FlattenString(m["password"]) - - return r -} - -// expandUptimeCheckConfigTcpCheckMap expands the contents of UptimeCheckConfigTcpCheck into a JSON -// request object. -func expandUptimeCheckConfigTcpCheckMap(c *Client, f map[string]UptimeCheckConfigTcpCheck, res *UptimeCheckConfig) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandUptimeCheckConfigTcpCheck(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandUptimeCheckConfigTcpCheckSlice expands the contents of UptimeCheckConfigTcpCheck into a JSON -// request object. -func expandUptimeCheckConfigTcpCheckSlice(c *Client, f []UptimeCheckConfigTcpCheck, res *UptimeCheckConfig) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandUptimeCheckConfigTcpCheck(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenUptimeCheckConfigTcpCheckMap flattens the contents of UptimeCheckConfigTcpCheck from a JSON -// response object. -func flattenUptimeCheckConfigTcpCheckMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigTcpCheck { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigTcpCheck{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigTcpCheck{} - } - - items := make(map[string]UptimeCheckConfigTcpCheck) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigTcpCheck(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenUptimeCheckConfigTcpCheckSlice flattens the contents of UptimeCheckConfigTcpCheck from a JSON -// response object. -func flattenUptimeCheckConfigTcpCheckSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigTcpCheck { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigTcpCheck{} - } - - if len(a) == 0 { - return []UptimeCheckConfigTcpCheck{} - } - - items := make([]UptimeCheckConfigTcpCheck, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigTcpCheck(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandUptimeCheckConfigTcpCheck expands an instance of UptimeCheckConfigTcpCheck into a JSON -// request object. -func expandUptimeCheckConfigTcpCheck(c *Client, f *UptimeCheckConfigTcpCheck, res *UptimeCheckConfig) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Port; !dcl.IsEmptyValueIndirect(v) { - m["port"] = v - } - - return m, nil -} - -// flattenUptimeCheckConfigTcpCheck flattens an instance of UptimeCheckConfigTcpCheck from a JSON -// response object. -func flattenUptimeCheckConfigTcpCheck(c *Client, i interface{}, res *UptimeCheckConfig) *UptimeCheckConfigTcpCheck { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &UptimeCheckConfigTcpCheck{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyUptimeCheckConfigTcpCheck - } - r.Port = dcl.FlattenInteger(m["port"]) - - return r -} - -// expandUptimeCheckConfigContentMatchersMap expands the contents of UptimeCheckConfigContentMatchers into a JSON -// request object. -func expandUptimeCheckConfigContentMatchersMap(c *Client, f map[string]UptimeCheckConfigContentMatchers, res *UptimeCheckConfig) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandUptimeCheckConfigContentMatchers(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandUptimeCheckConfigContentMatchersSlice expands the contents of UptimeCheckConfigContentMatchers into a JSON -// request object. -func expandUptimeCheckConfigContentMatchersSlice(c *Client, f []UptimeCheckConfigContentMatchers, res *UptimeCheckConfig) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandUptimeCheckConfigContentMatchers(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenUptimeCheckConfigContentMatchersMap flattens the contents of UptimeCheckConfigContentMatchers from a JSON -// response object. -func flattenUptimeCheckConfigContentMatchersMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigContentMatchers { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigContentMatchers{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigContentMatchers{} - } - - items := make(map[string]UptimeCheckConfigContentMatchers) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigContentMatchers(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenUptimeCheckConfigContentMatchersSlice flattens the contents of UptimeCheckConfigContentMatchers from a JSON -// response object. -func flattenUptimeCheckConfigContentMatchersSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigContentMatchers { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigContentMatchers{} - } - - if len(a) == 0 { - return []UptimeCheckConfigContentMatchers{} - } - - items := make([]UptimeCheckConfigContentMatchers, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigContentMatchers(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandUptimeCheckConfigContentMatchers expands an instance of UptimeCheckConfigContentMatchers into a JSON -// request object. -func expandUptimeCheckConfigContentMatchers(c *Client, f *UptimeCheckConfigContentMatchers, res *UptimeCheckConfig) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Content; !dcl.IsEmptyValueIndirect(v) { - m["content"] = v - } - if v := f.Matcher; !dcl.IsEmptyValueIndirect(v) { - m["matcher"] = v - } - - return m, nil -} - -// flattenUptimeCheckConfigContentMatchers flattens an instance of UptimeCheckConfigContentMatchers from a JSON -// response object. -func flattenUptimeCheckConfigContentMatchers(c *Client, i interface{}, res *UptimeCheckConfig) *UptimeCheckConfigContentMatchers { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &UptimeCheckConfigContentMatchers{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyUptimeCheckConfigContentMatchers - } - r.Content = dcl.FlattenString(m["content"]) - r.Matcher = flattenUptimeCheckConfigContentMatchersMatcherEnum(m["matcher"]) - if dcl.IsEmptyValueIndirect(m["matcher"]) { - c.Config.Logger.Info("Using default value for matcher.") - r.Matcher = UptimeCheckConfigContentMatchersMatcherEnumRef("CONTAINS_STRING") - } - - return r -} - -// flattenUptimeCheckConfigResourceGroupResourceTypeEnumMap flattens the contents of UptimeCheckConfigResourceGroupResourceTypeEnum from a JSON -// response object. -func flattenUptimeCheckConfigResourceGroupResourceTypeEnumMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigResourceGroupResourceTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigResourceGroupResourceTypeEnum{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigResourceGroupResourceTypeEnum{} - } - - items := make(map[string]UptimeCheckConfigResourceGroupResourceTypeEnum) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigResourceGroupResourceTypeEnum(item.(interface{})) - } - - return items -} - -// flattenUptimeCheckConfigResourceGroupResourceTypeEnumSlice flattens the contents of UptimeCheckConfigResourceGroupResourceTypeEnum from a JSON -// response object. -func flattenUptimeCheckConfigResourceGroupResourceTypeEnumSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigResourceGroupResourceTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigResourceGroupResourceTypeEnum{} - } - - if len(a) == 0 { - return []UptimeCheckConfigResourceGroupResourceTypeEnum{} - } - - items := make([]UptimeCheckConfigResourceGroupResourceTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigResourceGroupResourceTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenUptimeCheckConfigResourceGroupResourceTypeEnum asserts that an interface is a string, and returns a -// pointer to a *UptimeCheckConfigResourceGroupResourceTypeEnum with the same value as that string. -func flattenUptimeCheckConfigResourceGroupResourceTypeEnum(i interface{}) *UptimeCheckConfigResourceGroupResourceTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return UptimeCheckConfigResourceGroupResourceTypeEnumRef(s) -} - -// flattenUptimeCheckConfigHttpCheckRequestMethodEnumMap flattens the contents of UptimeCheckConfigHttpCheckRequestMethodEnum from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckRequestMethodEnumMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigHttpCheckRequestMethodEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigHttpCheckRequestMethodEnum{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigHttpCheckRequestMethodEnum{} - } - - items := make(map[string]UptimeCheckConfigHttpCheckRequestMethodEnum) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigHttpCheckRequestMethodEnum(item.(interface{})) - } - - return items -} - -// flattenUptimeCheckConfigHttpCheckRequestMethodEnumSlice flattens the contents of UptimeCheckConfigHttpCheckRequestMethodEnum from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckRequestMethodEnumSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigHttpCheckRequestMethodEnum { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigHttpCheckRequestMethodEnum{} - } - - if len(a) == 0 { - return []UptimeCheckConfigHttpCheckRequestMethodEnum{} - } - - items := make([]UptimeCheckConfigHttpCheckRequestMethodEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigHttpCheckRequestMethodEnum(item.(interface{}))) - } - - return items -} - -// flattenUptimeCheckConfigHttpCheckRequestMethodEnum asserts that an interface is a string, and returns a -// pointer to a *UptimeCheckConfigHttpCheckRequestMethodEnum with the same value as that string. -func flattenUptimeCheckConfigHttpCheckRequestMethodEnum(i interface{}) *UptimeCheckConfigHttpCheckRequestMethodEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return UptimeCheckConfigHttpCheckRequestMethodEnumRef(s) -} - -// flattenUptimeCheckConfigHttpCheckContentTypeEnumMap flattens the contents of UptimeCheckConfigHttpCheckContentTypeEnum from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckContentTypeEnumMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigHttpCheckContentTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigHttpCheckContentTypeEnum{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigHttpCheckContentTypeEnum{} - } - - items := make(map[string]UptimeCheckConfigHttpCheckContentTypeEnum) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigHttpCheckContentTypeEnum(item.(interface{})) - } - - return items -} - -// flattenUptimeCheckConfigHttpCheckContentTypeEnumSlice flattens the contents of UptimeCheckConfigHttpCheckContentTypeEnum from a JSON -// response object. -func flattenUptimeCheckConfigHttpCheckContentTypeEnumSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigHttpCheckContentTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigHttpCheckContentTypeEnum{} - } - - if len(a) == 0 { - return []UptimeCheckConfigHttpCheckContentTypeEnum{} - } - - items := make([]UptimeCheckConfigHttpCheckContentTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigHttpCheckContentTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenUptimeCheckConfigHttpCheckContentTypeEnum asserts that an interface is a string, and returns a -// pointer to a *UptimeCheckConfigHttpCheckContentTypeEnum with the same value as that string. -func flattenUptimeCheckConfigHttpCheckContentTypeEnum(i interface{}) *UptimeCheckConfigHttpCheckContentTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return UptimeCheckConfigHttpCheckContentTypeEnumRef(s) -} - -// flattenUptimeCheckConfigContentMatchersMatcherEnumMap flattens the contents of UptimeCheckConfigContentMatchersMatcherEnum from a JSON -// response object. -func flattenUptimeCheckConfigContentMatchersMatcherEnumMap(c *Client, i interface{}, res *UptimeCheckConfig) map[string]UptimeCheckConfigContentMatchersMatcherEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]UptimeCheckConfigContentMatchersMatcherEnum{} - } - - if len(a) == 0 { - return map[string]UptimeCheckConfigContentMatchersMatcherEnum{} - } - - items := make(map[string]UptimeCheckConfigContentMatchersMatcherEnum) - for k, item := range a { - items[k] = *flattenUptimeCheckConfigContentMatchersMatcherEnum(item.(interface{})) - } - - return items -} - -// flattenUptimeCheckConfigContentMatchersMatcherEnumSlice flattens the contents of UptimeCheckConfigContentMatchersMatcherEnum from a JSON -// response object. -func flattenUptimeCheckConfigContentMatchersMatcherEnumSlice(c *Client, i interface{}, res *UptimeCheckConfig) []UptimeCheckConfigContentMatchersMatcherEnum { - a, ok := i.([]interface{}) - if !ok { - return []UptimeCheckConfigContentMatchersMatcherEnum{} - } - - if len(a) == 0 { - return []UptimeCheckConfigContentMatchersMatcherEnum{} - } - - items := make([]UptimeCheckConfigContentMatchersMatcherEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenUptimeCheckConfigContentMatchersMatcherEnum(item.(interface{}))) - } - - return items -} - -// flattenUptimeCheckConfigContentMatchersMatcherEnum asserts that an interface is a string, and returns a -// pointer to a *UptimeCheckConfigContentMatchersMatcherEnum with the same value as that string. -func flattenUptimeCheckConfigContentMatchersMatcherEnum(i interface{}) *UptimeCheckConfigContentMatchersMatcherEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return UptimeCheckConfigContentMatchersMatcherEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *UptimeCheckConfig) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalUptimeCheckConfig(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type uptimeCheckConfigDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp uptimeCheckConfigApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToUptimeCheckConfigDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]uptimeCheckConfigDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []uptimeCheckConfigDiff - // For each operation name, create a uptimeCheckConfigDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := uptimeCheckConfigDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToUptimeCheckConfigApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToUptimeCheckConfigApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (uptimeCheckConfigApiOperation, error) { - switch opName { - - case "updateUptimeCheckConfigUpdateUptimeCheckConfigOperation": - return &updateUptimeCheckConfigUpdateUptimeCheckConfigOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractUptimeCheckConfigFields(r *UptimeCheckConfig) error { - vMonitoredResource := r.MonitoredResource - if vMonitoredResource == nil { - // note: explicitly not the empty object. - vMonitoredResource = &UptimeCheckConfigMonitoredResource{} - } - if err := extractUptimeCheckConfigMonitoredResourceFields(r, vMonitoredResource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMonitoredResource) { - r.MonitoredResource = vMonitoredResource - } - vResourceGroup := r.ResourceGroup - if vResourceGroup == nil { - // note: explicitly not the empty object. - vResourceGroup = &UptimeCheckConfigResourceGroup{} - } - if err := extractUptimeCheckConfigResourceGroupFields(r, vResourceGroup); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vResourceGroup) { - r.ResourceGroup = vResourceGroup - } - vHttpCheck := r.HttpCheck - if vHttpCheck == nil { - // note: explicitly not the empty object. - vHttpCheck = &UptimeCheckConfigHttpCheck{} - } - if err := extractUptimeCheckConfigHttpCheckFields(r, vHttpCheck); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vHttpCheck) { - r.HttpCheck = vHttpCheck - } - vTcpCheck := r.TcpCheck - if vTcpCheck == nil { - // note: explicitly not the empty object. - vTcpCheck = &UptimeCheckConfigTcpCheck{} - } - if err := extractUptimeCheckConfigTcpCheckFields(r, vTcpCheck); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTcpCheck) { - r.TcpCheck = vTcpCheck - } - return nil -} -func extractUptimeCheckConfigMonitoredResourceFields(r *UptimeCheckConfig, o *UptimeCheckConfigMonitoredResource) error { - return nil -} -func extractUptimeCheckConfigResourceGroupFields(r *UptimeCheckConfig, o *UptimeCheckConfigResourceGroup) error { - return nil -} -func extractUptimeCheckConfigHttpCheckFields(r *UptimeCheckConfig, o *UptimeCheckConfigHttpCheck) error { - vAuthInfo := o.AuthInfo - if vAuthInfo == nil { - // note: explicitly not the empty object. - vAuthInfo = &UptimeCheckConfigHttpCheckAuthInfo{} - } - if err := extractUptimeCheckConfigHttpCheckAuthInfoFields(r, vAuthInfo); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAuthInfo) { - o.AuthInfo = vAuthInfo - } - return nil -} -func extractUptimeCheckConfigHttpCheckAuthInfoFields(r *UptimeCheckConfig, o *UptimeCheckConfigHttpCheckAuthInfo) error { - return nil -} -func extractUptimeCheckConfigTcpCheckFields(r *UptimeCheckConfig, o *UptimeCheckConfigTcpCheck) error { - return nil -} -func extractUptimeCheckConfigContentMatchersFields(r *UptimeCheckConfig, o *UptimeCheckConfigContentMatchers) error { - return nil -} - -func postReadExtractUptimeCheckConfigFields(r *UptimeCheckConfig) error { - vMonitoredResource := r.MonitoredResource - if vMonitoredResource == nil { - // note: explicitly not the empty object. - vMonitoredResource = &UptimeCheckConfigMonitoredResource{} - } - if err := postReadExtractUptimeCheckConfigMonitoredResourceFields(r, vMonitoredResource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMonitoredResource) { - r.MonitoredResource = vMonitoredResource - } - vResourceGroup := r.ResourceGroup - if vResourceGroup == nil { - // note: explicitly not the empty object. - vResourceGroup = &UptimeCheckConfigResourceGroup{} - } - if err := postReadExtractUptimeCheckConfigResourceGroupFields(r, vResourceGroup); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vResourceGroup) { - r.ResourceGroup = vResourceGroup - } - vHttpCheck := r.HttpCheck - if vHttpCheck == nil { - // note: explicitly not the empty object. - vHttpCheck = &UptimeCheckConfigHttpCheck{} - } - if err := postReadExtractUptimeCheckConfigHttpCheckFields(r, vHttpCheck); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vHttpCheck) { - r.HttpCheck = vHttpCheck - } - vTcpCheck := r.TcpCheck - if vTcpCheck == nil { - // note: explicitly not the empty object. - vTcpCheck = &UptimeCheckConfigTcpCheck{} - } - if err := postReadExtractUptimeCheckConfigTcpCheckFields(r, vTcpCheck); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vTcpCheck) { - r.TcpCheck = vTcpCheck - } - return nil -} -func postReadExtractUptimeCheckConfigMonitoredResourceFields(r *UptimeCheckConfig, o *UptimeCheckConfigMonitoredResource) error { - return nil -} -func postReadExtractUptimeCheckConfigResourceGroupFields(r *UptimeCheckConfig, o *UptimeCheckConfigResourceGroup) error { - return nil -} -func postReadExtractUptimeCheckConfigHttpCheckFields(r *UptimeCheckConfig, o *UptimeCheckConfigHttpCheck) error { - vAuthInfo := o.AuthInfo - if vAuthInfo == nil { - // note: explicitly not the empty object. - vAuthInfo = &UptimeCheckConfigHttpCheckAuthInfo{} - } - if err := extractUptimeCheckConfigHttpCheckAuthInfoFields(r, vAuthInfo); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vAuthInfo) { - o.AuthInfo = vAuthInfo - } - return nil -} -func postReadExtractUptimeCheckConfigHttpCheckAuthInfoFields(r *UptimeCheckConfig, o *UptimeCheckConfigHttpCheckAuthInfo) error { - return nil -} -func postReadExtractUptimeCheckConfigTcpCheckFields(r *UptimeCheckConfig, o *UptimeCheckConfigTcpCheck) error { - return nil -} -func postReadExtractUptimeCheckConfigContentMatchersFields(r *UptimeCheckConfig, o *UptimeCheckConfigContentMatchers) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_schema.go deleted file mode 100644 index 5c0db53f08..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_schema.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package monitoring - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLUptimeCheckConfigSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "Monitoring/UptimeCheckConfig", - Description: "The Monitoring UptimeCheckConfig resource", - StructName: "UptimeCheckConfig", - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a UptimeCheckConfig", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "uptimeCheckConfig", - Required: true, - Description: "A full instance of a UptimeCheckConfig", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a UptimeCheckConfig", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "uptimeCheckConfig", - Required: true, - Description: "A full instance of a UptimeCheckConfig", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a UptimeCheckConfig", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "uptimeCheckConfig", - Required: true, - Description: "A full instance of a UptimeCheckConfig", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all UptimeCheckConfig", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many UptimeCheckConfig", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "UptimeCheckConfig": &dcl.Component{ - Title: "UptimeCheckConfig", - ID: "projects/{{project}}/uptimeCheckConfigs/{{name}}", - UsesStateHint: true, - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "displayName", - "timeout", - }, - Properties: map[string]*dcl.Property{ - "contentMatchers": &dcl.Property{ - Type: "array", - GoName: "ContentMatchers", - Description: "The content that is expected to appear in the data returned by the target server against which the check is run. Currently, only the first entry in the `content_matchers` list is supported, and additional entries will be ignored. This field is optional and should only be specified if a content match is required as part of the/ Uptime check.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "UptimeCheckConfigContentMatchers", - Required: []string{ - "content", - }, - Properties: map[string]*dcl.Property{ - "content": &dcl.Property{ - Type: "string", - GoName: "Content", - }, - "matcher": &dcl.Property{ - Type: "string", - GoName: "Matcher", - GoType: "UptimeCheckConfigContentMatchersMatcherEnum", - Description: " Possible values: CONTENT_MATCHER_OPTION_UNSPECIFIED, CONTAINS_STRING, NOT_CONTAINS_STRING, MATCHES_REGEX, NOT_MATCHES_REGEX", - Default: "CONTAINS_STRING", - Enum: []string{ - "CONTENT_MATCHER_OPTION_UNSPECIFIED", - "CONTAINS_STRING", - "NOT_CONTAINS_STRING", - "MATCHES_REGEX", - "NOT_MATCHES_REGEX", - }, - }, - }, - }, - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "A human-friendly name for the Uptime check configuration. The display name should be unique within a Stackdriver Workspace in order to make it easier to identify; however, uniqueness is not enforced. Required.", - }, - "httpCheck": &dcl.Property{ - Type: "object", - GoName: "HttpCheck", - GoType: "UptimeCheckConfigHttpCheck", - Description: "Contains information needed to make an HTTP or HTTPS check.", - Conflicts: []string{ - "tcpCheck", - }, - Properties: map[string]*dcl.Property{ - "authInfo": &dcl.Property{ - Type: "object", - GoName: "AuthInfo", - GoType: "UptimeCheckConfigHttpCheckAuthInfo", - Description: "The authentication information. Optional when creating an HTTP check; defaults to empty.", - Required: []string{ - "username", - "password", - }, - Properties: map[string]*dcl.Property{ - "password": &dcl.Property{ - Type: "string", - GoName: "Password", - Sensitive: true, - Unreadable: true, - }, - "username": &dcl.Property{ - Type: "string", - GoName: "Username", - }, - }, - }, - "body": &dcl.Property{ - Type: "string", - GoName: "Body", - Description: "The request body associated with the HTTP POST request. If `content_type` is `URL_ENCODED`, the body passed in must be URL-encoded. Users can provide a `Content-Length` header via the `headers` field or the API will do so. If the `request_method` is `GET` and `body` is not empty, the API will return an error. The maximum byte size is 1 megabyte. Note: As with all `bytes` fields JSON representations are base64 encoded. e.g.: \"foo=bar\" in URL-encoded form is \"foo%3Dbar\" and in base64 encoding is \"Zm9vJTI1M0RiYXI=\".", - }, - "contentType": &dcl.Property{ - Type: "string", - GoName: "ContentType", - GoType: "UptimeCheckConfigHttpCheckContentTypeEnum", - Description: "The content type to use for the check. Possible values: TYPE_UNSPECIFIED, URL_ENCODED", - Immutable: true, - Enum: []string{ - "TYPE_UNSPECIFIED", - "URL_ENCODED", - }, - }, - "headers": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Headers", - Description: "The list of headers to send as part of the Uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100.", - ServerDefault: true, - Unreadable: true, - }, - "maskHeaders": &dcl.Property{ - Type: "boolean", - GoName: "MaskHeaders", - Description: "Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if `mask_headers` is set to `true` then the headers will be obscured with `******.`", - Immutable: true, - }, - "path": &dcl.Property{ - Type: "string", - GoName: "Path", - Description: "Optional (defaults to \"/\"). The path to the page against which to run the check. Will be combined with the `host` (specified within the `monitored_resource`) and `port` to construct the full URL. If the provided path does not begin with \"/\", a \"/\" will be prepended automatically.", - Default: "/", - }, - "port": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Port", - Description: "Optional (defaults to 80 when `use_ssl` is `false`, and 443 when `use_ssl` is `true`). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the `monitored_resource`) and `path` to construct the full URL.", - ServerDefault: true, - }, - "requestMethod": &dcl.Property{ - Type: "string", - GoName: "RequestMethod", - GoType: "UptimeCheckConfigHttpCheckRequestMethodEnum", - Description: "The HTTP request method to use for the check. If set to `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`.", - Immutable: true, - Default: "GET", - Enum: []string{ - "METHOD_UNSPECIFIED", - "GET", - "POST", - }, - }, - "useSsl": &dcl.Property{ - Type: "boolean", - GoName: "UseSsl", - Description: "If `true`, use HTTPS instead of HTTP to run the check.", - }, - "validateSsl": &dcl.Property{ - Type: "boolean", - GoName: "ValidateSsl", - Description: "Boolean specifying whether to include SSL certificate validation as a part of the Uptime check. Only applies to checks where `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`, setting `validate_ssl` to `true` has no effect.", - }, - }, - }, - "monitoredResource": &dcl.Property{ - Type: "object", - GoName: "MonitoredResource", - GoType: "UptimeCheckConfigMonitoredResource", - Description: "The [monitored resource](https://cloud.google.com/monitoring/api/resources) associated with the configuration. The following monitored resource types are supported for Uptime checks: `uptime_url`, `gce_instance`, `gae_app`, `aws_ec2_instance`, `aws_elb_load_balancer`", - Immutable: true, - Conflicts: []string{ - "resourceGroup", - }, - Required: []string{ - "type", - "filterLabels", - }, - Properties: map[string]*dcl.Property{ - "filterLabels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "FilterLabels", - Immutable: true, - }, - "type": &dcl.Property{ - Type: "string", - GoName: "Type", - Immutable: true, - }, - }, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "A unique resource name for this Uptime check configuration. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] This field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.", - Immutable: true, - ServerGeneratedParameter: true, - }, - "period": &dcl.Property{ - Type: "string", - GoName: "Period", - Description: "How often, in seconds, the Uptime check is performed. Currently, the only supported values are `60s` (1 minute), `300s` (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, defaults to `60s`.", - Default: "60s", - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for this uptime check config.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "resourceGroup": &dcl.Property{ - Type: "object", - GoName: "ResourceGroup", - GoType: "UptimeCheckConfigResourceGroup", - Description: "The group resource associated with the configuration.", - Immutable: true, - Conflicts: []string{ - "monitoredResource", - }, - Properties: map[string]*dcl.Property{ - "groupId": &dcl.Property{ - Type: "string", - GoName: "GroupId", - Description: "The group of resources being monitored. Should be only the `[GROUP_ID]`, and not the full-path `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`.", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Monitoring/Group", - Field: "name", - }, - }, - }, - "resourceType": &dcl.Property{ - Type: "string", - GoName: "ResourceType", - GoType: "UptimeCheckConfigResourceGroupResourceTypeEnum", - Description: "The resource type of the group members. Possible values: RESOURCE_TYPE_UNSPECIFIED, INSTANCE, AWS_ELB_LOAD_BALANCER", - Immutable: true, - Enum: []string{ - "RESOURCE_TYPE_UNSPECIFIED", - "INSTANCE", - "AWS_ELB_LOAD_BALANCER", - }, - }, - }, - }, - "selectedRegions": &dcl.Property{ - Type: "array", - GoName: "SelectedRegions", - Description: "The list of regions from which the check will be run. Some regions contain one location, and others contain more than one. If this field is specified, enough regions must be provided to include a minimum of 3 locations. Not specifying this field will result in Uptime checks running from all available regions.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "tcpCheck": &dcl.Property{ - Type: "object", - GoName: "TcpCheck", - GoType: "UptimeCheckConfigTcpCheck", - Description: "Contains information needed to make a TCP check.", - Conflicts: []string{ - "httpCheck", - }, - Required: []string{ - "port", - }, - Properties: map[string]*dcl.Property{ - "port": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Port", - Description: "The TCP port on the server against which to run the check. Will be combined with host (specified within the `monitored_resource`) to construct the full URL. Required.", - }, - }, - }, - "timeout": &dcl.Property{ - Type: "string", - GoName: "Timeout", - Description: "The maximum amount of time to wait for the request to complete (must be between 1 and 60 seconds). Required.", - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_yaml_embed.go deleted file mode 100644 index 92907c4ed7..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring/uptime_check_config_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package monitoring -var YAML_uptime_check_config blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/uptime_check_config.yaml - -package monitoring - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/monitoring/uptime_check_config.yaml -var YAML_uptime_check_config = []byte("info:\n title: Monitoring/UptimeCheckConfig\n description: The Monitoring UptimeCheckConfig resource\n x-dcl-struct-name: UptimeCheckConfig\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a UptimeCheckConfig\n parameters:\n - name: uptimeCheckConfig\n required: true\n description: A full instance of a UptimeCheckConfig\n apply:\n description: The function used to apply information about a UptimeCheckConfig\n parameters:\n - name: uptimeCheckConfig\n required: true\n description: A full instance of a UptimeCheckConfig\n delete:\n description: The function used to delete a UptimeCheckConfig\n parameters:\n - name: uptimeCheckConfig\n required: true\n description: A full instance of a UptimeCheckConfig\n deleteAll:\n description: The function used to delete all UptimeCheckConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many UptimeCheckConfig\n parameters:\n - name: project\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n UptimeCheckConfig:\n title: UptimeCheckConfig\n x-dcl-id: projects/{{project}}/uptimeCheckConfigs/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - displayName\n - timeout\n properties:\n contentMatchers:\n type: array\n x-dcl-go-name: ContentMatchers\n description: The content that is expected to appear in the data returned\n by the target server against which the check is run. Currently, only\n the first entry in the `content_matchers` list is supported, and additional\n entries will be ignored. This field is optional and should only be specified\n if a content match is required as part of the/ Uptime check.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: UptimeCheckConfigContentMatchers\n required:\n - content\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n matcher:\n type: string\n x-dcl-go-name: Matcher\n x-dcl-go-type: UptimeCheckConfigContentMatchersMatcherEnum\n description: ' Possible values: CONTENT_MATCHER_OPTION_UNSPECIFIED,\n CONTAINS_STRING, NOT_CONTAINS_STRING, MATCHES_REGEX, NOT_MATCHES_REGEX'\n default: CONTAINS_STRING\n enum:\n - CONTENT_MATCHER_OPTION_UNSPECIFIED\n - CONTAINS_STRING\n - NOT_CONTAINS_STRING\n - MATCHES_REGEX\n - NOT_MATCHES_REGEX\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: A human-friendly name for the Uptime check configuration. The\n display name should be unique within a Stackdriver Workspace in order\n to make it easier to identify; however, uniqueness is not enforced. Required.\n httpCheck:\n type: object\n x-dcl-go-name: HttpCheck\n x-dcl-go-type: UptimeCheckConfigHttpCheck\n description: Contains information needed to make an HTTP or HTTPS check.\n x-dcl-conflicts:\n - tcpCheck\n properties:\n authInfo:\n type: object\n x-dcl-go-name: AuthInfo\n x-dcl-go-type: UptimeCheckConfigHttpCheckAuthInfo\n description: The authentication information. Optional when creating\n an HTTP check; defaults to empty.\n required:\n - username\n - password\n properties:\n password:\n type: string\n x-dcl-go-name: Password\n x-dcl-sensitive: true\n x-dcl-mutable-unreadable: true\n username:\n type: string\n x-dcl-go-name: Username\n body:\n type: string\n x-dcl-go-name: Body\n description: 'The request body associated with the HTTP POST request.\n If `content_type` is `URL_ENCODED`, the body passed in must be URL-encoded.\n Users can provide a `Content-Length` header via the `headers` field\n or the API will do so. If the `request_method` is `GET` and `body`\n is not empty, the API will return an error. The maximum byte size\n is 1 megabyte. Note: As with all `bytes` fields JSON representations\n are base64 encoded. e.g.: \"foo=bar\" in URL-encoded form is \"foo%3Dbar\"\n and in base64 encoding is \"Zm9vJTI1M0RiYXI=\".'\n contentType:\n type: string\n x-dcl-go-name: ContentType\n x-dcl-go-type: UptimeCheckConfigHttpCheckContentTypeEnum\n description: 'The content type to use for the check. Possible values:\n TYPE_UNSPECIFIED, URL_ENCODED'\n x-kubernetes-immutable: true\n enum:\n - TYPE_UNSPECIFIED\n - URL_ENCODED\n headers:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Headers\n description: The list of headers to send as part of the Uptime check\n request. If two headers have the same key and different values, they\n should be entered as a single header, with the value being a comma-separated\n list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt\n (page 31). Entering two separate headers with the same key in a Create\n call will cause the first to be overwritten by the second. The maximum\n number of headers allowed is 100.\n x-dcl-server-default: true\n x-dcl-mutable-unreadable: true\n maskHeaders:\n type: boolean\n x-dcl-go-name: MaskHeaders\n description: Boolean specifying whether to encrypt the header information.\n Encryption should be specified for any headers related to authentication\n that you do not wish to be seen when retrieving the configuration.\n The server will be responsible for encrypting the headers. On Get/List\n calls, if `mask_headers` is set to `true` then the headers will be\n obscured with `******.`\n x-kubernetes-immutable: true\n path:\n type: string\n x-dcl-go-name: Path\n description: Optional (defaults to \"/\"). The path to the page against\n which to run the check. Will be combined with the `host` (specified\n within the `monitored_resource`) and `port` to construct the full\n URL. If the provided path does not begin with \"/\", a \"/\" will be prepended\n automatically.\n default: /\n port:\n type: integer\n format: int64\n x-dcl-go-name: Port\n description: Optional (defaults to 80 when `use_ssl` is `false`, and\n 443 when `use_ssl` is `true`). The TCP port on the HTTP server against\n which to run the check. Will be combined with host (specified within\n the `monitored_resource`) and `path` to construct the full URL.\n x-dcl-server-default: true\n requestMethod:\n type: string\n x-dcl-go-name: RequestMethod\n x-dcl-go-type: UptimeCheckConfigHttpCheckRequestMethodEnum\n description: The HTTP request method to use for the check. If set to\n `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`.\n x-kubernetes-immutable: true\n default: GET\n enum:\n - METHOD_UNSPECIFIED\n - GET\n - POST\n useSsl:\n type: boolean\n x-dcl-go-name: UseSsl\n description: If `true`, use HTTPS instead of HTTP to run the check.\n validateSsl:\n type: boolean\n x-dcl-go-name: ValidateSsl\n description: Boolean specifying whether to include SSL certificate validation\n as a part of the Uptime check. Only applies to checks where `monitored_resource`\n is set to `uptime_url`. If `use_ssl` is `false`, setting `validate_ssl`\n to `true` has no effect.\n monitoredResource:\n type: object\n x-dcl-go-name: MonitoredResource\n x-dcl-go-type: UptimeCheckConfigMonitoredResource\n description: 'The [monitored resource](https://cloud.google.com/monitoring/api/resources)\n associated with the configuration. The following monitored resource types\n are supported for Uptime checks: `uptime_url`, `gce_instance`, `gae_app`, `aws_ec2_instance`, `aws_elb_load_balancer`'\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - resourceGroup\n required:\n - type\n - filterLabels\n properties:\n filterLabels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: FilterLabels\n x-kubernetes-immutable: true\n type:\n type: string\n x-dcl-go-name: Type\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: 'A unique resource name for this Uptime check configuration.\n The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]\n This field should be omitted when creating the Uptime check configuration;\n on create, the resource name is assigned by the server and included in\n the response.'\n x-kubernetes-immutable: true\n x-dcl-server-generated-parameter: true\n period:\n type: string\n x-dcl-go-name: Period\n description: How often, in seconds, the Uptime check is performed. Currently,\n the only supported values are `60s` (1 minute), `300s` (5 minutes), `600s`\n (10 minutes), and `900s` (15 minutes). Optional, defaults to `60s`.\n default: 60s\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for this uptime check config.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n resourceGroup:\n type: object\n x-dcl-go-name: ResourceGroup\n x-dcl-go-type: UptimeCheckConfigResourceGroup\n description: The group resource associated with the configuration.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - monitoredResource\n properties:\n groupId:\n type: string\n x-dcl-go-name: GroupId\n description: The group of resources being monitored. Should be only\n the `[GROUP_ID]`, and not the full-path `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Monitoring/Group\n field: name\n resourceType:\n type: string\n x-dcl-go-name: ResourceType\n x-dcl-go-type: UptimeCheckConfigResourceGroupResourceTypeEnum\n description: 'The resource type of the group members. Possible values:\n RESOURCE_TYPE_UNSPECIFIED, INSTANCE, AWS_ELB_LOAD_BALANCER'\n x-kubernetes-immutable: true\n enum:\n - RESOURCE_TYPE_UNSPECIFIED\n - INSTANCE\n - AWS_ELB_LOAD_BALANCER\n selectedRegions:\n type: array\n x-dcl-go-name: SelectedRegions\n description: The list of regions from which the check will be run. Some\n regions contain one location, and others contain more than one. If this\n field is specified, enough regions must be provided to include a minimum\n of 3 locations. Not specifying this field will result in Uptime checks\n running from all available regions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n tcpCheck:\n type: object\n x-dcl-go-name: TcpCheck\n x-dcl-go-type: UptimeCheckConfigTcpCheck\n description: Contains information needed to make a TCP check.\n x-dcl-conflicts:\n - httpCheck\n required:\n - port\n properties:\n port:\n type: integer\n format: int64\n x-dcl-go-name: Port\n description: The TCP port on the server against which to run the check.\n Will be combined with host (specified within the `monitored_resource`)\n to construct the full URL. Required.\n timeout:\n type: string\n x-dcl-go-name: Timeout\n description: The maximum amount of time to wait for the request to complete\n (must be between 1 and 60 seconds). Required.\n") - -// 13844 bytes -// MD5: aadad35e1f627493336f7e830aa4d61d diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/client.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/client.go deleted file mode 100644 index e20f944535..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/client.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Package osconfig defines operations in the declarative SDK. -package osconfig - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -// The Client is the base struct of all operations. This will receive the -// Get, Delete, List, and Apply operations on all resources. -type Client struct { - Config *dcl.Config -} - -// NewClient creates a client that retries all operations a few times each. -func NewClient(c *dcl.Config) *Client { - return &Client{ - Config: c, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment.go deleted file mode 100644 index 1dd3c88dbd..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment.go +++ /dev/null @@ -1,2919 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package osconfig - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/googleapi" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -type OSPolicyAssignment struct { - Name *string `json:"name"` - Description *string `json:"description"` - OSPolicies []OSPolicyAssignmentOSPolicies `json:"osPolicies"` - InstanceFilter *OSPolicyAssignmentInstanceFilter `json:"instanceFilter"` - Rollout *OSPolicyAssignmentRollout `json:"rollout"` - RevisionId *string `json:"revisionId"` - RevisionCreateTime *string `json:"revisionCreateTime"` - Etag *string `json:"etag"` - RolloutState *OSPolicyAssignmentRolloutStateEnum `json:"rolloutState"` - Baseline *bool `json:"baseline"` - Deleted *bool `json:"deleted"` - Reconciling *bool `json:"reconciling"` - Uid *string `json:"uid"` - Project *string `json:"project"` - Location *string `json:"location"` - SkipAwaitRollout *bool `json:"skipAwaitRollout"` -} - -func (r *OSPolicyAssignment) String() string { - return dcl.SprintResource(r) -} - -// The enum OSPolicyAssignmentOSPoliciesModeEnum. -type OSPolicyAssignmentOSPoliciesModeEnum string - -// OSPolicyAssignmentOSPoliciesModeEnumRef returns a *OSPolicyAssignmentOSPoliciesModeEnum with the value of string s -// If the empty string is provided, nil is returned. -func OSPolicyAssignmentOSPoliciesModeEnumRef(s string) *OSPolicyAssignmentOSPoliciesModeEnum { - v := OSPolicyAssignmentOSPoliciesModeEnum(s) - return &v -} - -func (v OSPolicyAssignmentOSPoliciesModeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"MODE_UNSPECIFIED", "VALIDATION", "ENFORCEMENT"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "OSPolicyAssignmentOSPoliciesModeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum. -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum string - -// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumRef returns a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumRef(s string) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum { - v := OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(s) - return &v -} - -func (v OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"DESIRED_STATE_UNSPECIFIED", "INSTALLED", "REMOVED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum. -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum string - -// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumRef returns a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum with the value of string s -// If the empty string is provided, nil is returned. -func OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumRef(s string) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum { - v := OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(s) - return &v -} - -func (v OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ARCHIVE_TYPE_UNSPECIFIED", "DEB", "DEB_SRC"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum. -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum string - -// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumRef returns a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum with the value of string s -// If the empty string is provided, nil is returned. -func OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumRef(s string) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum { - v := OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(s) - return &v -} - -func (v OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"INTERPRETER_UNSPECIFIED", "NONE", "SHELL", "POWERSHELL"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum. -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum string - -// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumRef returns a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum with the value of string s -// If the empty string is provided, nil is returned. -func OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumRef(s string) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum { - v := OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(s) - return &v -} - -func (v OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"INTERPRETER_UNSPECIFIED", "NONE", "SHELL", "POWERSHELL"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum. -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum string - -// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumRef returns a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumRef(s string) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum { - v := OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(s) - return &v -} - -func (v OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED", "COMPLIANT", "NON_COMPLIANT", "UNKNOWN", "NO_OS_POLICIES_APPLICABLE"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -// The enum OSPolicyAssignmentRolloutStateEnum. -type OSPolicyAssignmentRolloutStateEnum string - -// OSPolicyAssignmentRolloutStateEnumRef returns a *OSPolicyAssignmentRolloutStateEnum with the value of string s -// If the empty string is provided, nil is returned. -func OSPolicyAssignmentRolloutStateEnumRef(s string) *OSPolicyAssignmentRolloutStateEnum { - v := OSPolicyAssignmentRolloutStateEnum(s) - return &v -} - -func (v OSPolicyAssignmentRolloutStateEnum) Validate() error { - if string(v) == "" { - // Empty enum is okay. - return nil - } - for _, s := range []string{"ROLLOUT_STATE_UNSPECIFIED", "IN_PROGRESS", "CANCELLING", "CANCELLED", "SUCCEEDED"} { - if string(v) == s { - return nil - } - } - return &dcl.EnumInvalidError{ - Enum: "OSPolicyAssignmentRolloutStateEnum", - Value: string(v), - Valid: []string{}, - } -} - -type OSPolicyAssignmentOSPolicies struct { - empty bool `json:"-"` - Id *string `json:"id"` - Description *string `json:"description"` - Mode *OSPolicyAssignmentOSPoliciesModeEnum `json:"mode"` - ResourceGroups []OSPolicyAssignmentOSPoliciesResourceGroups `json:"resourceGroups"` - AllowNoResourceGroupMatch *bool `json:"allowNoResourceGroupMatch"` -} - -type jsonOSPolicyAssignmentOSPolicies OSPolicyAssignmentOSPolicies - -func (r *OSPolicyAssignmentOSPolicies) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPolicies - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPolicies - } else { - - r.Id = res.Id - - r.Description = res.Description - - r.Mode = res.Mode - - r.ResourceGroups = res.ResourceGroups - - r.AllowNoResourceGroupMatch = res.AllowNoResourceGroupMatch - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPolicies is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPolicies *OSPolicyAssignmentOSPolicies = &OSPolicyAssignmentOSPolicies{empty: true} - -func (r *OSPolicyAssignmentOSPolicies) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPolicies) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPolicies) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroups struct { - empty bool `json:"-"` - InventoryFilters []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters `json:"inventoryFilters"` - Resources []OSPolicyAssignmentOSPoliciesResourceGroupsResources `json:"resources"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroups OSPolicyAssignmentOSPoliciesResourceGroups - -func (r *OSPolicyAssignmentOSPoliciesResourceGroups) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroups - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroups - } else { - - r.InventoryFilters = res.InventoryFilters - - r.Resources = res.Resources - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroups is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroups *OSPolicyAssignmentOSPoliciesResourceGroups = &OSPolicyAssignmentOSPoliciesResourceGroups{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroups) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroups) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroups) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters struct { - empty bool `json:"-"` - OSShortName *string `json:"osShortName"` - OSVersion *string `json:"osVersion"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters - } else { - - r.OSShortName = res.OSShortName - - r.OSVersion = res.OSVersion - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters = &OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResources struct { - empty bool `json:"-"` - Id *string `json:"id"` - Pkg *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg `json:"pkg"` - Repository *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository `json:"repository"` - Exec *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec `json:"exec"` - File *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile `json:"file"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResources OSPolicyAssignmentOSPoliciesResourceGroupsResources - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResources) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResources - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResources - } else { - - r.Id = res.Id - - r.Pkg = res.Pkg - - r.Repository = res.Repository - - r.Exec = res.Exec - - r.File = res.File - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResources is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResources *OSPolicyAssignmentOSPoliciesResourceGroupsResources = &OSPolicyAssignmentOSPoliciesResourceGroupsResources{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResources) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResources) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResources) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg struct { - empty bool `json:"-"` - DesiredState *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum `json:"desiredState"` - Apt *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt `json:"apt"` - Deb *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb `json:"deb"` - Yum *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum `json:"yum"` - Zypper *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper `json:"zypper"` - Rpm *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm `json:"rpm"` - Googet *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget `json:"googet"` - Msi *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi `json:"msi"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - } else { - - r.DesiredState = res.DesiredState - - r.Apt = res.Apt - - r.Deb = res.Deb - - r.Yum = res.Yum - - r.Zypper = res.Zypper - - r.Rpm = res.Rpm - - r.Googet = res.Googet - - r.Msi = res.Msi - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt struct { - empty bool `json:"-"` - Name *string `json:"name"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - } else { - - r.Name = res.Name - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb struct { - empty bool `json:"-"` - Source *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource `json:"source"` - PullDeps *bool `json:"pullDeps"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - } else { - - r.Source = res.Source - - r.PullDeps = res.PullDeps - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource struct { - empty bool `json:"-"` - Remote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote `json:"remote"` - Gcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs `json:"gcs"` - LocalPath *string `json:"localPath"` - AllowInsecure *bool `json:"allowInsecure"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - } else { - - r.Remote = res.Remote - - r.Gcs = res.Gcs - - r.LocalPath = res.LocalPath - - r.AllowInsecure = res.AllowInsecure - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote struct { - empty bool `json:"-"` - Uri *string `json:"uri"` - Sha256Checksum *string `json:"sha256Checksum"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - } else { - - r.Uri = res.Uri - - r.Sha256Checksum = res.Sha256Checksum - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs struct { - empty bool `json:"-"` - Bucket *string `json:"bucket"` - Object *string `json:"object"` - Generation *int64 `json:"generation"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - } else { - - r.Bucket = res.Bucket - - r.Object = res.Object - - r.Generation = res.Generation - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum struct { - empty bool `json:"-"` - Name *string `json:"name"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - } else { - - r.Name = res.Name - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper struct { - empty bool `json:"-"` - Name *string `json:"name"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - } else { - - r.Name = res.Name - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm struct { - empty bool `json:"-"` - Source *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource `json:"source"` - PullDeps *bool `json:"pullDeps"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - } else { - - r.Source = res.Source - - r.PullDeps = res.PullDeps - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource struct { - empty bool `json:"-"` - Remote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote `json:"remote"` - Gcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs `json:"gcs"` - LocalPath *string `json:"localPath"` - AllowInsecure *bool `json:"allowInsecure"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - } else { - - r.Remote = res.Remote - - r.Gcs = res.Gcs - - r.LocalPath = res.LocalPath - - r.AllowInsecure = res.AllowInsecure - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote struct { - empty bool `json:"-"` - Uri *string `json:"uri"` - Sha256Checksum *string `json:"sha256Checksum"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - } else { - - r.Uri = res.Uri - - r.Sha256Checksum = res.Sha256Checksum - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs struct { - empty bool `json:"-"` - Bucket *string `json:"bucket"` - Object *string `json:"object"` - Generation *int64 `json:"generation"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - } else { - - r.Bucket = res.Bucket - - r.Object = res.Object - - r.Generation = res.Generation - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget struct { - empty bool `json:"-"` - Name *string `json:"name"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - } else { - - r.Name = res.Name - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi struct { - empty bool `json:"-"` - Source *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource `json:"source"` - Properties []string `json:"properties"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - } else { - - r.Source = res.Source - - r.Properties = res.Properties - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource struct { - empty bool `json:"-"` - Remote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote `json:"remote"` - Gcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs `json:"gcs"` - LocalPath *string `json:"localPath"` - AllowInsecure *bool `json:"allowInsecure"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - } else { - - r.Remote = res.Remote - - r.Gcs = res.Gcs - - r.LocalPath = res.LocalPath - - r.AllowInsecure = res.AllowInsecure - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote struct { - empty bool `json:"-"` - Uri *string `json:"uri"` - Sha256Checksum *string `json:"sha256Checksum"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - } else { - - r.Uri = res.Uri - - r.Sha256Checksum = res.Sha256Checksum - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs struct { - empty bool `json:"-"` - Bucket *string `json:"bucket"` - Object *string `json:"object"` - Generation *int64 `json:"generation"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - } else { - - r.Bucket = res.Bucket - - r.Object = res.Object - - r.Generation = res.Generation - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository struct { - empty bool `json:"-"` - Apt *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt `json:"apt"` - Yum *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum `json:"yum"` - Zypper *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper `json:"zypper"` - Goo *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo `json:"goo"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - } else { - - r.Apt = res.Apt - - r.Yum = res.Yum - - r.Zypper = res.Zypper - - r.Goo = res.Goo - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt struct { - empty bool `json:"-"` - ArchiveType *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum `json:"archiveType"` - Uri *string `json:"uri"` - Distribution *string `json:"distribution"` - Components []string `json:"components"` - GpgKey *string `json:"gpgKey"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - } else { - - r.ArchiveType = res.ArchiveType - - r.Uri = res.Uri - - r.Distribution = res.Distribution - - r.Components = res.Components - - r.GpgKey = res.GpgKey - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum struct { - empty bool `json:"-"` - Id *string `json:"id"` - DisplayName *string `json:"displayName"` - BaseUrl *string `json:"baseUrl"` - GpgKeys []string `json:"gpgKeys"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - } else { - - r.Id = res.Id - - r.DisplayName = res.DisplayName - - r.BaseUrl = res.BaseUrl - - r.GpgKeys = res.GpgKeys - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper struct { - empty bool `json:"-"` - Id *string `json:"id"` - DisplayName *string `json:"displayName"` - BaseUrl *string `json:"baseUrl"` - GpgKeys []string `json:"gpgKeys"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - } else { - - r.Id = res.Id - - r.DisplayName = res.DisplayName - - r.BaseUrl = res.BaseUrl - - r.GpgKeys = res.GpgKeys - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo struct { - empty bool `json:"-"` - Name *string `json:"name"` - Url *string `json:"url"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - } else { - - r.Name = res.Name - - r.Url = res.Url - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec struct { - empty bool `json:"-"` - Validate *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate `json:"validate"` - Enforce *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce `json:"enforce"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - } else { - - r.Validate = res.Validate - - r.Enforce = res.Enforce - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate struct { - empty bool `json:"-"` - File *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile `json:"file"` - Script *string `json:"script"` - Args []string `json:"args"` - Interpreter *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum `json:"interpreter"` - OutputFilePath *string `json:"outputFilePath"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - } else { - - r.File = res.File - - r.Script = res.Script - - r.Args = res.Args - - r.Interpreter = res.Interpreter - - r.OutputFilePath = res.OutputFilePath - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile struct { - empty bool `json:"-"` - Remote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote `json:"remote"` - Gcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs `json:"gcs"` - LocalPath *string `json:"localPath"` - AllowInsecure *bool `json:"allowInsecure"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - } else { - - r.Remote = res.Remote - - r.Gcs = res.Gcs - - r.LocalPath = res.LocalPath - - r.AllowInsecure = res.AllowInsecure - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote struct { - empty bool `json:"-"` - Uri *string `json:"uri"` - Sha256Checksum *string `json:"sha256Checksum"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - } else { - - r.Uri = res.Uri - - r.Sha256Checksum = res.Sha256Checksum - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs struct { - empty bool `json:"-"` - Bucket *string `json:"bucket"` - Object *string `json:"object"` - Generation *int64 `json:"generation"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - } else { - - r.Bucket = res.Bucket - - r.Object = res.Object - - r.Generation = res.Generation - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce struct { - empty bool `json:"-"` - File *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile `json:"file"` - Script *string `json:"script"` - Args []string `json:"args"` - Interpreter *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum `json:"interpreter"` - OutputFilePath *string `json:"outputFilePath"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - } else { - - r.File = res.File - - r.Script = res.Script - - r.Args = res.Args - - r.Interpreter = res.Interpreter - - r.OutputFilePath = res.OutputFilePath - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile struct { - empty bool `json:"-"` - Remote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote `json:"remote"` - Gcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs `json:"gcs"` - LocalPath *string `json:"localPath"` - AllowInsecure *bool `json:"allowInsecure"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - } else { - - r.Remote = res.Remote - - r.Gcs = res.Gcs - - r.LocalPath = res.LocalPath - - r.AllowInsecure = res.AllowInsecure - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote struct { - empty bool `json:"-"` - Uri *string `json:"uri"` - Sha256Checksum *string `json:"sha256Checksum"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - } else { - - r.Uri = res.Uri - - r.Sha256Checksum = res.Sha256Checksum - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs struct { - empty bool `json:"-"` - Bucket *string `json:"bucket"` - Object *string `json:"object"` - Generation *int64 `json:"generation"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - } else { - - r.Bucket = res.Bucket - - r.Object = res.Object - - r.Generation = res.Generation - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile struct { - empty bool `json:"-"` - File *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile `json:"file"` - Content *string `json:"content"` - Path *string `json:"path"` - State *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum `json:"state"` - Permissions *string `json:"permissions"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - } else { - - r.File = res.File - - r.Content = res.Content - - r.Path = res.Path - - r.State = res.State - - r.Permissions = res.Permissions - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile struct { - empty bool `json:"-"` - Remote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote `json:"remote"` - Gcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs `json:"gcs"` - LocalPath *string `json:"localPath"` - AllowInsecure *bool `json:"allowInsecure"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - } else { - - r.Remote = res.Remote - - r.Gcs = res.Gcs - - r.LocalPath = res.LocalPath - - r.AllowInsecure = res.AllowInsecure - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote struct { - empty bool `json:"-"` - Uri *string `json:"uri"` - Sha256Checksum *string `json:"sha256Checksum"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - } else { - - r.Uri = res.Uri - - r.Sha256Checksum = res.Sha256Checksum - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs struct { - empty bool `json:"-"` - Bucket *string `json:"bucket"` - Object *string `json:"object"` - Generation *int64 `json:"generation"` -} - -type jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - } else { - - r.Bucket = res.Bucket - - r.Object = res.Object - - r.Generation = res.Generation - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{empty: true} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentInstanceFilter struct { - empty bool `json:"-"` - All *bool `json:"all"` - InclusionLabels []OSPolicyAssignmentInstanceFilterInclusionLabels `json:"inclusionLabels"` - ExclusionLabels []OSPolicyAssignmentInstanceFilterExclusionLabels `json:"exclusionLabels"` - Inventories []OSPolicyAssignmentInstanceFilterInventories `json:"inventories"` -} - -type jsonOSPolicyAssignmentInstanceFilter OSPolicyAssignmentInstanceFilter - -func (r *OSPolicyAssignmentInstanceFilter) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentInstanceFilter - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentInstanceFilter - } else { - - r.All = res.All - - r.InclusionLabels = res.InclusionLabels - - r.ExclusionLabels = res.ExclusionLabels - - r.Inventories = res.Inventories - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentInstanceFilter is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentInstanceFilter *OSPolicyAssignmentInstanceFilter = &OSPolicyAssignmentInstanceFilter{empty: true} - -func (r *OSPolicyAssignmentInstanceFilter) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentInstanceFilter) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentInstanceFilter) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentInstanceFilterInclusionLabels struct { - empty bool `json:"-"` - Labels map[string]string `json:"labels"` -} - -type jsonOSPolicyAssignmentInstanceFilterInclusionLabels OSPolicyAssignmentInstanceFilterInclusionLabels - -func (r *OSPolicyAssignmentInstanceFilterInclusionLabels) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentInstanceFilterInclusionLabels - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentInstanceFilterInclusionLabels - } else { - - r.Labels = res.Labels - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentInstanceFilterInclusionLabels is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentInstanceFilterInclusionLabels *OSPolicyAssignmentInstanceFilterInclusionLabels = &OSPolicyAssignmentInstanceFilterInclusionLabels{empty: true} - -func (r *OSPolicyAssignmentInstanceFilterInclusionLabels) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentInstanceFilterInclusionLabels) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentInstanceFilterInclusionLabels) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentInstanceFilterExclusionLabels struct { - empty bool `json:"-"` - Labels map[string]string `json:"labels"` -} - -type jsonOSPolicyAssignmentInstanceFilterExclusionLabels OSPolicyAssignmentInstanceFilterExclusionLabels - -func (r *OSPolicyAssignmentInstanceFilterExclusionLabels) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentInstanceFilterExclusionLabels - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentInstanceFilterExclusionLabels - } else { - - r.Labels = res.Labels - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentInstanceFilterExclusionLabels is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentInstanceFilterExclusionLabels *OSPolicyAssignmentInstanceFilterExclusionLabels = &OSPolicyAssignmentInstanceFilterExclusionLabels{empty: true} - -func (r *OSPolicyAssignmentInstanceFilterExclusionLabels) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentInstanceFilterExclusionLabels) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentInstanceFilterExclusionLabels) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentInstanceFilterInventories struct { - empty bool `json:"-"` - OSShortName *string `json:"osShortName"` - OSVersion *string `json:"osVersion"` -} - -type jsonOSPolicyAssignmentInstanceFilterInventories OSPolicyAssignmentInstanceFilterInventories - -func (r *OSPolicyAssignmentInstanceFilterInventories) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentInstanceFilterInventories - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentInstanceFilterInventories - } else { - - r.OSShortName = res.OSShortName - - r.OSVersion = res.OSVersion - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentInstanceFilterInventories is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentInstanceFilterInventories *OSPolicyAssignmentInstanceFilterInventories = &OSPolicyAssignmentInstanceFilterInventories{empty: true} - -func (r *OSPolicyAssignmentInstanceFilterInventories) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentInstanceFilterInventories) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentInstanceFilterInventories) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentRollout struct { - empty bool `json:"-"` - DisruptionBudget *OSPolicyAssignmentRolloutDisruptionBudget `json:"disruptionBudget"` - MinWaitDuration *string `json:"minWaitDuration"` -} - -type jsonOSPolicyAssignmentRollout OSPolicyAssignmentRollout - -func (r *OSPolicyAssignmentRollout) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentRollout - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentRollout - } else { - - r.DisruptionBudget = res.DisruptionBudget - - r.MinWaitDuration = res.MinWaitDuration - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentRollout is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentRollout *OSPolicyAssignmentRollout = &OSPolicyAssignmentRollout{empty: true} - -func (r *OSPolicyAssignmentRollout) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentRollout) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentRollout) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -type OSPolicyAssignmentRolloutDisruptionBudget struct { - empty bool `json:"-"` - Fixed *int64 `json:"fixed"` - Percent *int64 `json:"percent"` -} - -type jsonOSPolicyAssignmentRolloutDisruptionBudget OSPolicyAssignmentRolloutDisruptionBudget - -func (r *OSPolicyAssignmentRolloutDisruptionBudget) UnmarshalJSON(data []byte) error { - var res jsonOSPolicyAssignmentRolloutDisruptionBudget - if err := json.Unmarshal(data, &res); err != nil { - return err - } - - var m map[string]interface{} - json.Unmarshal(data, &m) - - if len(m) == 0 { - *r = *EmptyOSPolicyAssignmentRolloutDisruptionBudget - } else { - - r.Fixed = res.Fixed - - r.Percent = res.Percent - - } - return nil -} - -// This object is used to assert a desired state where this OSPolicyAssignmentRolloutDisruptionBudget is -// empty. Go lacks global const objects, but this object should be treated -// as one. Modifying this object will have undesirable results. -var EmptyOSPolicyAssignmentRolloutDisruptionBudget *OSPolicyAssignmentRolloutDisruptionBudget = &OSPolicyAssignmentRolloutDisruptionBudget{empty: true} - -func (r *OSPolicyAssignmentRolloutDisruptionBudget) Empty() bool { - return r.empty -} - -func (r *OSPolicyAssignmentRolloutDisruptionBudget) String() string { - return dcl.SprintResource(r) -} - -func (r *OSPolicyAssignmentRolloutDisruptionBudget) HashCode() string { - // Placeholder for a more complex hash method that handles ordering, etc - // Hash resource body for easy comparison later - hash := sha256.New().Sum([]byte(r.String())) - return fmt.Sprintf("%x", hash) -} - -// Describe returns a simple description of this resource to ensure that automated tools -// can identify it. -func (r *OSPolicyAssignment) Describe() dcl.ServiceTypeVersion { - return dcl.ServiceTypeVersion{ - Service: "os_config", - Type: "OSPolicyAssignment", - Version: "osconfig", - } -} - -func (r *OSPolicyAssignment) ID() (string, error) { - if err := extractOSPolicyAssignmentFields(r); err != nil { - return "", err - } - nr := r.urlNormalized() - params := map[string]interface{}{ - "name": dcl.ValueOrEmptyString(nr.Name), - "description": dcl.ValueOrEmptyString(nr.Description), - "os_policies": dcl.ValueOrEmptyString(nr.OSPolicies), - "instance_filter": dcl.ValueOrEmptyString(nr.InstanceFilter), - "rollout": dcl.ValueOrEmptyString(nr.Rollout), - "revision_id": dcl.ValueOrEmptyString(nr.RevisionId), - "revision_create_time": dcl.ValueOrEmptyString(nr.RevisionCreateTime), - "etag": dcl.ValueOrEmptyString(nr.Etag), - "rollout_state": dcl.ValueOrEmptyString(nr.RolloutState), - "baseline": dcl.ValueOrEmptyString(nr.Baseline), - "deleted": dcl.ValueOrEmptyString(nr.Deleted), - "reconciling": dcl.ValueOrEmptyString(nr.Reconciling), - "uid": dcl.ValueOrEmptyString(nr.Uid), - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "skip_await_rollout": dcl.ValueOrEmptyString(nr.SkipAwaitRollout), - } - return dcl.Nprintf("projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}", params), nil -} - -const OSPolicyAssignmentMaxPage = -1 - -type OSPolicyAssignmentList struct { - Items []*OSPolicyAssignment - - nextToken string - - pageSize int32 - - resource *OSPolicyAssignment -} - -func (l *OSPolicyAssignmentList) HasNext() bool { - return l.nextToken != "" -} - -func (l *OSPolicyAssignmentList) Next(ctx context.Context, c *Client) error { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if !l.HasNext() { - return fmt.Errorf("no next page") - } - items, token, err := c.listOSPolicyAssignment(ctx, l.resource, l.nextToken, l.pageSize) - if err != nil { - return err - } - l.Items = items - l.nextToken = token - return err -} - -func (c *Client) ListOSPolicyAssignment(ctx context.Context, project, location string) (*OSPolicyAssignmentList, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - return c.ListOSPolicyAssignmentWithMaxResults(ctx, project, location, OSPolicyAssignmentMaxPage) - -} - -func (c *Client) ListOSPolicyAssignmentWithMaxResults(ctx context.Context, project, location string, pageSize int32) (*OSPolicyAssignmentList, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // Create a resource object so that we can use proper url normalization methods. - r := &OSPolicyAssignment{ - Project: &project, - Location: &location, - } - items, token, err := c.listOSPolicyAssignment(ctx, r, "", pageSize) - if err != nil { - return nil, err - } - return &OSPolicyAssignmentList{ - Items: items, - nextToken: token, - pageSize: pageSize, - resource: r, - }, nil -} - -func (c *Client) GetOSPolicyAssignment(ctx context.Context, r *OSPolicyAssignment) (*OSPolicyAssignment, error) { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - // This is *purposefully* supressing errors. - // This function is used with url-normalized values + not URL normalized values. - // URL Normalized values will throw unintentional errors, since those values are not of the proper parent form. - extractOSPolicyAssignmentFields(r) - - b, err := c.getOSPolicyAssignmentRaw(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - return nil, &googleapi.Error{ - Code: 404, - Message: err.Error(), - } - } - return nil, err - } - result, err := unmarshalOSPolicyAssignment(b, c, r) - if err != nil { - return nil, err - } - result.Project = r.Project - result.Location = r.Location - result.Name = r.Name - - c.Config.Logger.InfoWithContextf(ctx, "Retrieved raw result state: %v", result) - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with specified state: %v", r) - result, err = canonicalizeOSPolicyAssignmentNewState(c, result, r) - if err != nil { - return nil, err - } - if err := postReadExtractOSPolicyAssignmentFields(result); err != nil { - return result, err - } - c.Config.Logger.InfoWithContextf(ctx, "Created result state: %v", result) - - return result, nil -} - -func (c *Client) DeleteOSPolicyAssignment(ctx context.Context, r *OSPolicyAssignment) error { - ctx = dcl.ContextWithRequestID(ctx) - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - if r == nil { - return fmt.Errorf("OSPolicyAssignment resource is nil") - } - c.Config.Logger.InfoWithContext(ctx, "Deleting OSPolicyAssignment...") - deleteOp := deleteOSPolicyAssignmentOperation{} - return deleteOp.do(ctx, r, c) -} - -// DeleteAllOSPolicyAssignment deletes all resources that the filter functions returns true on. -func (c *Client) DeleteAllOSPolicyAssignment(ctx context.Context, project, location string, filter func(*OSPolicyAssignment) bool) error { - listObj, err := c.ListOSPolicyAssignment(ctx, project, location) - if err != nil { - return err - } - - err = c.deleteAllOSPolicyAssignment(ctx, filter, listObj.Items) - if err != nil { - return err - } - for listObj.HasNext() { - err = listObj.Next(ctx, c) - if err != nil { - return nil - } - err = c.deleteAllOSPolicyAssignment(ctx, filter, listObj.Items) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) ApplyOSPolicyAssignment(ctx context.Context, rawDesired *OSPolicyAssignment, opts ...dcl.ApplyOption) (*OSPolicyAssignment, error) { - ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second)) - defer cancel() - - ctx = dcl.ContextWithRequestID(ctx) - var resultNewState *OSPolicyAssignment - err := dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - newState, err := applyOSPolicyAssignmentHelper(c, ctx, rawDesired, opts...) - resultNewState = newState - if err != nil { - // If the error is 409, there is conflict in resource update. - // Here we want to apply changes based on latest state. - if dcl.IsConflictError(err) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{Err: err} - } - return nil, err - } - return nil, nil - }, c.Config.RetryProvider) - return resultNewState, err -} - -func applyOSPolicyAssignmentHelper(c *Client, ctx context.Context, rawDesired *OSPolicyAssignment, opts ...dcl.ApplyOption) (*OSPolicyAssignment, error) { - c.Config.Logger.InfoWithContext(ctx, "Beginning ApplyOSPolicyAssignment...") - c.Config.Logger.InfoWithContextf(ctx, "User specified desired state: %v", rawDesired) - - // 1.1: Validation of user-specified fields in desired state. - if err := rawDesired.validate(); err != nil { - return nil, err - } - - if err := extractOSPolicyAssignmentFields(rawDesired); err != nil { - return nil, err - } - - initial, desired, fieldDiffs, err := c.oSPolicyAssignmentDiffsForRawDesired(ctx, rawDesired, opts...) - if err != nil { - return nil, fmt.Errorf("failed to create a diff: %w", err) - } - - diffs, err := convertFieldDiffsToOSPolicyAssignmentDiffs(c.Config, fieldDiffs, opts) - if err != nil { - return nil, err - } - - // TODO(magic-modules-eng): 2.2 Feasibility check (all updates are feasible so far). - - // 2.3: Lifecycle Directive Check - var create bool - lp := dcl.FetchLifecycleParams(opts) - if initial == nil { - if dcl.HasLifecycleParam(lp, dcl.BlockCreation) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Creation blocked by lifecycle params: %#v.", desired)} - } - create = true - } else if dcl.HasLifecycleParam(lp, dcl.BlockAcquire) { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("Resource already exists - apply blocked by lifecycle params: %#v.", initial), - } - } else { - for _, d := range diffs { - if d.RequiresRecreate { - return nil, dcl.ApplyInfeasibleError{ - Message: fmt.Sprintf("infeasible update: (%v) would require recreation", d), - } - } - if dcl.HasLifecycleParam(lp, dcl.BlockModification) { - return nil, dcl.ApplyInfeasibleError{Message: fmt.Sprintf("Modification blocked, diff (%v) unresolvable.", d)} - } - } - } - - // 2.4 Imperative Request Planning - var ops []oSPolicyAssignmentApiOperation - if create { - ops = append(ops, &createOSPolicyAssignmentOperation{}) - } else { - for _, d := range diffs { - ops = append(ops, d.UpdateOp) - } - } - c.Config.Logger.InfoWithContextf(ctx, "Created plan: %#v", ops) - - // 2.5 Request Actuation - for _, op := range ops { - c.Config.Logger.InfoWithContextf(ctx, "Performing operation %T %+v", op, op) - if err := op.do(ctx, desired, c); err != nil { - c.Config.Logger.InfoWithContextf(ctx, "Failed operation %T %+v: %v", op, op, err) - return nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Finished operation %T %+v", op, op) - } - return applyOSPolicyAssignmentDiff(c, ctx, desired, rawDesired, ops, opts...) -} - -func applyOSPolicyAssignmentDiff(c *Client, ctx context.Context, desired *OSPolicyAssignment, rawDesired *OSPolicyAssignment, ops []oSPolicyAssignmentApiOperation, opts ...dcl.ApplyOption) (*OSPolicyAssignment, error) { - // 3.1, 3.2a Retrieval of raw new state & canonicalization with desired state - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state...") - rawNew, err := c.GetOSPolicyAssignment(ctx, desired) - if err != nil { - return nil, err - } - // Get additional values from the first response. - // These values should be merged into the newState above. - if len(ops) > 0 { - lastOp := ops[len(ops)-1] - if o, ok := lastOp.(*createOSPolicyAssignmentOperation); ok { - if r, hasR := o.FirstResponse(); hasR { - - c.Config.Logger.InfoWithContext(ctx, "Retrieving raw new state from operation...") - - fullResp, err := unmarshalMapOSPolicyAssignment(r, c, rawDesired) - if err != nil { - return nil, err - } - - rawNew, err = canonicalizeOSPolicyAssignmentNewState(c, rawNew, fullResp) - if err != nil { - return nil, err - } - } - } - } - - c.Config.Logger.InfoWithContextf(ctx, "Canonicalizing with raw desired state: %v", rawDesired) - // 3.2b Canonicalization of raw new state using raw desired state - newState, err := canonicalizeOSPolicyAssignmentNewState(c, rawNew, rawDesired) - if err != nil { - return rawNew, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created canonical new state: %v", newState) - // 3.3 Comparison of the new state and raw desired state. - // TODO(magic-modules-eng): EVENTUALLY_CONSISTENT_UPDATE - newDesired, err := canonicalizeOSPolicyAssignmentDesiredState(rawDesired, newState) - if err != nil { - return newState, err - } - - if err := postReadExtractOSPolicyAssignmentFields(newState); err != nil { - return newState, err - } - - // Need to ensure any transformations made here match acceptably in differ. - if err := postReadExtractOSPolicyAssignmentFields(newDesired); err != nil { - return newState, err - } - - c.Config.Logger.InfoWithContextf(ctx, "Diffing using canonicalized desired state: %v", newDesired) - newDiffs, err := diffOSPolicyAssignment(c, newDesired, newState) - if err != nil { - return newState, err - } - - if len(newDiffs) == 0 { - c.Config.Logger.InfoWithContext(ctx, "No diffs found. Apply was successful.") - } else { - c.Config.Logger.InfoWithContextf(ctx, "Found diffs: %v", newDiffs) - diffMessages := make([]string, len(newDiffs)) - for i, d := range newDiffs { - diffMessages[i] = fmt.Sprintf("%v", d) - } - return newState, dcl.DiffAfterApplyError{Diffs: diffMessages} - } - c.Config.Logger.InfoWithContext(ctx, "Done Apply.") - return newState, nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment.yaml deleted file mode 100644 index 91a675bf82..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment.yaml +++ /dev/null @@ -1,1353 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -info: - title: OSConfig/OSPolicyAssignment - description: Represents an OSPolicyAssignment resource. - x-dcl-struct-name: OSPolicyAssignment - x-dcl-has-iam: false - x-dcl-ref: - text: API documentation - url: https://cloud.google.com/compute/docs/osconfig/rest/v1/projects.locations.osPolicyAssignments -paths: - get: - description: The function used to get information about a OSPolicyAssignment - parameters: - - name: oSPolicyAssignment - required: true - description: A full instance of a OSPolicyAssignment - apply: - description: The function used to apply information about a OSPolicyAssignment - parameters: - - name: oSPolicyAssignment - required: true - description: A full instance of a OSPolicyAssignment - delete: - description: The function used to delete a OSPolicyAssignment - parameters: - - name: oSPolicyAssignment - required: true - description: A full instance of a OSPolicyAssignment - deleteAll: - description: The function used to delete all OSPolicyAssignment - parameters: - - name: project - required: true - schema: - type: string - - name: location - required: true - schema: - type: string - list: - description: The function used to list information about many OSPolicyAssignment - parameters: - - name: project - required: true - schema: - type: string - - name: location - required: true - schema: - type: string -components: - schemas: - OSPolicyAssignment: - title: OSPolicyAssignment - x-dcl-id: projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}} - x-dcl-uses-state-hint: true - x-dcl-parent-container: project - x-dcl-has-create: true - x-dcl-has-iam: false - x-dcl-read-timeout: 0 - x-dcl-apply-timeout: 0 - x-dcl-delete-timeout: 0 - type: object - required: - - name - - osPolicies - - instanceFilter - - rollout - - project - - location - properties: - baseline: - type: boolean - x-dcl-go-name: Baseline - readOnly: true - description: Output only. Indicates that this revision has been successfully - rolled out in this zone and new VMs will be assigned OS policies from - this revision. For a given OS policy assignment, there is only one revision - with a value of `true` for this field. - x-kubernetes-immutable: true - deleted: - type: boolean - x-dcl-go-name: Deleted - readOnly: true - description: Output only. Indicates that this revision deletes the OS policy - assignment. - x-kubernetes-immutable: true - description: - type: string - x-dcl-go-name: Description - description: OS policy assignment description. Length of the description - is limited to 1024 characters. - etag: - type: string - x-dcl-go-name: Etag - readOnly: true - description: The etag for this OS policy assignment. If this is provided - on update, it must match the server's etag. - x-kubernetes-immutable: true - instanceFilter: - type: object - x-dcl-go-name: InstanceFilter - x-dcl-go-type: OSPolicyAssignmentInstanceFilter - description: Required. Filter to select VMs. - properties: - all: - type: boolean - x-dcl-go-name: All - description: Target all VMs in the project. If true, no other criteria - is permitted. - x-dcl-send-empty: true - exclusionLabels: - type: array - x-dcl-go-name: ExclusionLabels - description: List of label sets used for VM exclusion. If the list has - more than one label set, the VM is excluded if any of the label sets - are applicable for the VM. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: OSPolicyAssignmentInstanceFilterExclusionLabels - properties: - labels: - type: object - additionalProperties: - type: string - x-dcl-go-name: Labels - description: Labels are identified by key/value pairs in this - map. A VM should contain all the key/value pairs specified in - this map to be selected. - inclusionLabels: - type: array - x-dcl-go-name: InclusionLabels - description: List of label sets used for VM inclusion. If the list has - more than one `LabelSet`, the VM is included if any of the label sets - are applicable for the VM. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: OSPolicyAssignmentInstanceFilterInclusionLabels - properties: - labels: - type: object - additionalProperties: - type: string - x-dcl-go-name: Labels - description: Labels are identified by key/value pairs in this - map. A VM should contain all the key/value pairs specified in - this map to be selected. - inventories: - type: array - x-dcl-go-name: Inventories - description: List of inventories to select VMs. A VM is selected if - its inventory data matches at least one of the following inventories. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: OSPolicyAssignmentInstanceFilterInventories - required: - - osShortName - properties: - osShortName: - type: string - x-dcl-go-name: OSShortName - description: Required. The OS short name - osVersion: - type: string - x-dcl-go-name: OSVersion - description: The OS version Prefix matches are supported if asterisk(*) - is provided as the last character. For example, to match all - versions with a major version of `7`, specify the following - value for this field `7.*` An empty string matches all OS versions. - location: - type: string - x-dcl-go-name: Location - description: The location for the resource - x-kubernetes-immutable: true - name: - type: string - x-dcl-go-name: Name - description: Resource name. - x-kubernetes-immutable: true - osPolicies: - type: array - x-dcl-go-name: OSPolicies - description: Required. List of OS policies to be applied to the VMs. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: OSPolicyAssignmentOSPolicies - required: - - id - - mode - - resourceGroups - properties: - allowNoResourceGroupMatch: - type: boolean - x-dcl-go-name: AllowNoResourceGroupMatch - description: This flag determines the OS policy compliance status - when none of the resource groups within the policy are applicable - for a VM. Set this value to `true` if the policy needs to be reported - as compliant even if the policy has nothing to validate or enforce. - description: - type: string - x-dcl-go-name: Description - description: Policy description. Length of the description is limited - to 1024 characters. - id: - type: string - x-dcl-go-name: Id - description: 'Required. The id of the OS policy with the following - restrictions: * Must contain only lowercase letters, numbers, and - hyphens. * Must start with a letter. * Must be between 1-63 characters. - * Must end with a number or a letter. * Must be unique within the - assignment.' - mode: - type: string - x-dcl-go-name: Mode - x-dcl-go-type: OSPolicyAssignmentOSPoliciesModeEnum - description: 'Required. Policy mode Possible values: MODE_UNSPECIFIED, - VALIDATION, ENFORCEMENT' - enum: - - MODE_UNSPECIFIED - - VALIDATION - - ENFORCEMENT - resourceGroups: - type: array - x-dcl-go-name: ResourceGroups - description: Required. List of resource groups for the policy. For - a particular VM, resource groups are evaluated in the order specified - and the first resource group that is applicable is selected and - the rest are ignored. If none of the resource groups are applicable - for a VM, the VM is considered to be non-compliant w.r.t this policy. - This behavior can be toggled by the flag `allow_no_resource_group_match` - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroups - required: - - resources - properties: - inventoryFilters: - type: array - x-dcl-go-name: InventoryFilters - description: 'List of inventory filters for the resource group. - The resources in this resource group are applied to the target - VM if it satisfies at least one of the following inventory - filters. For example, to apply this resource group to VMs - running either `RHEL` or `CentOS` operating systems, specify - 2 items for the list with following values: inventory_filters[0].os_short_name=''rhel'' - and inventory_filters[1].os_short_name=''centos'' If the list - is empty, this resource group will be applied to the target - VM unconditionally.' - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters - required: - - osShortName - properties: - osShortName: - type: string - x-dcl-go-name: OSShortName - description: Required. The OS short name - osVersion: - type: string - x-dcl-go-name: OSVersion - description: The OS version Prefix matches are supported - if asterisk(*) is provided as the last character. For - example, to match all versions with a major version - of `7`, specify the following value for this field `7.*` - An empty string matches all OS versions. - resources: - type: array - x-dcl-go-name: Resources - description: Required. List of resources configured for this - resource group. The resources are executed in the exact order - specified here. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: object - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResources - required: - - id - properties: - exec: - type: object - x-dcl-go-name: Exec - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - description: Exec resource - x-dcl-conflicts: - - pkg - - repository - - file - required: - - validate - properties: - enforce: - type: object - x-dcl-go-name: Enforce - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - description: What to run to bring this resource into - the desired state. An exit code of 100 indicates - "success", any other exit code indicates a failure - running enforce. - required: - - interpreter - properties: - args: - type: array - x-dcl-go-name: Args - description: Optional arguments to pass to the - source during execution. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - file: - type: object - x-dcl-go-name: File - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - description: A remote or local file. - x-dcl-conflicts: - - script - properties: - allowInsecure: - type: boolean - x-dcl-go-name: AllowInsecure - description: 'Defaults to false. When false, - files are subject to validations based on - the file type: Remote: A checksum must be - specified. Cloud Storage: An object generation - number must be specified.' - gcs: - type: object - x-dcl-go-name: Gcs - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - description: A Cloud Storage object. - x-dcl-conflicts: - - remote - - localPath - required: - - bucket - - object - properties: - bucket: - type: string - x-dcl-go-name: Bucket - description: Required. Bucket of the Cloud - Storage object. - generation: - type: integer - format: int64 - x-dcl-go-name: Generation - description: Generation number of the - Cloud Storage object. - object: - type: string - x-dcl-go-name: Object - description: Required. Name of the Cloud - Storage object. - localPath: - type: string - x-dcl-go-name: LocalPath - description: A local path within the VM to - use. - x-dcl-conflicts: - - remote - - gcs - remote: - type: object - x-dcl-go-name: Remote - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - description: A generic remote file. - x-dcl-conflicts: - - gcs - - localPath - required: - - uri - properties: - sha256Checksum: - type: string - x-dcl-go-name: Sha256Checksum - description: SHA256 checksum of the remote - file. - uri: - type: string - x-dcl-go-name: Uri - description: Required. URI from which - to fetch the object. It should contain - both the protocol and path following - the format `{protocol}://{location}`. - interpreter: - type: string - x-dcl-go-name: Interpreter - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum - description: 'Required. The script interpreter - to use. Possible values: INTERPRETER_UNSPECIFIED, - NONE, SHELL, POWERSHELL' - enum: - - INTERPRETER_UNSPECIFIED - - NONE - - SHELL - - POWERSHELL - outputFilePath: - type: string - x-dcl-go-name: OutputFilePath - description: Only recorded for enforce Exec. Path - to an output file (that is created by this Exec) - whose content will be recorded in OSPolicyResourceCompliance - after a successful run. Absence or failure to - read this file will result in this ExecResource - being non-compliant. Output file size is limited - to 100K bytes. - script: - type: string - x-dcl-go-name: Script - description: An inline script. The size of the - script is limited to 1024 characters. - x-dcl-conflicts: - - file - validate: - type: object - x-dcl-go-name: Validate - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - description: Required. What to run to validate this - resource is in the desired state. An exit code of - 100 indicates "in desired state", and exit code - of 101 indicates "not in desired state". Any other - exit code indicates a failure running validate. - required: - - interpreter - properties: - args: - type: array - x-dcl-go-name: Args - description: Optional arguments to pass to the - source during execution. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - file: - type: object - x-dcl-go-name: File - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - description: A remote or local file. - x-dcl-conflicts: - - script - properties: - allowInsecure: - type: boolean - x-dcl-go-name: AllowInsecure - description: 'Defaults to false. When false, - files are subject to validations based on - the file type: Remote: A checksum must be - specified. Cloud Storage: An object generation - number must be specified.' - gcs: - type: object - x-dcl-go-name: Gcs - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - description: A Cloud Storage object. - x-dcl-conflicts: - - remote - - localPath - required: - - bucket - - object - properties: - bucket: - type: string - x-dcl-go-name: Bucket - description: Required. Bucket of the Cloud - Storage object. - generation: - type: integer - format: int64 - x-dcl-go-name: Generation - description: Generation number of the - Cloud Storage object. - object: - type: string - x-dcl-go-name: Object - description: Required. Name of the Cloud - Storage object. - localPath: - type: string - x-dcl-go-name: LocalPath - description: A local path within the VM to - use. - x-dcl-conflicts: - - remote - - gcs - remote: - type: object - x-dcl-go-name: Remote - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - description: A generic remote file. - x-dcl-conflicts: - - gcs - - localPath - required: - - uri - properties: - sha256Checksum: - type: string - x-dcl-go-name: Sha256Checksum - description: SHA256 checksum of the remote - file. - uri: - type: string - x-dcl-go-name: Uri - description: Required. URI from which - to fetch the object. It should contain - both the protocol and path following - the format `{protocol}://{location}`. - interpreter: - type: string - x-dcl-go-name: Interpreter - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum - description: 'Required. The script interpreter - to use. Possible values: INTERPRETER_UNSPECIFIED, - NONE, SHELL, POWERSHELL' - enum: - - INTERPRETER_UNSPECIFIED - - NONE - - SHELL - - POWERSHELL - outputFilePath: - type: string - x-dcl-go-name: OutputFilePath - description: Only recorded for enforce Exec. Path - to an output file (that is created by this Exec) - whose content will be recorded in OSPolicyResourceCompliance - after a successful run. Absence or failure to - read this file will result in this ExecResource - being non-compliant. Output file size is limited - to 100K bytes. - script: - type: string - x-dcl-go-name: Script - description: An inline script. The size of the - script is limited to 1024 characters. - x-dcl-conflicts: - - file - file: - type: object - x-dcl-go-name: File - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - description: File resource - x-dcl-conflicts: - - pkg - - repository - - exec - required: - - path - - state - properties: - content: - type: string - x-dcl-go-name: Content - description: A a file with this content. The size - of the content is limited to 1024 characters. - x-dcl-conflicts: - - file - file: - type: object - x-dcl-go-name: File - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - description: A remote or local source. - x-dcl-conflicts: - - content - properties: - allowInsecure: - type: boolean - x-dcl-go-name: AllowInsecure - description: 'Defaults to false. When false, files - are subject to validations based on the file - type: Remote: A checksum must be specified. - Cloud Storage: An object generation number must - be specified.' - gcs: - type: object - x-dcl-go-name: Gcs - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - description: A Cloud Storage object. - x-dcl-conflicts: - - remote - - localPath - required: - - bucket - - object - properties: - bucket: - type: string - x-dcl-go-name: Bucket - description: Required. Bucket of the Cloud - Storage object. - generation: - type: integer - format: int64 - x-dcl-go-name: Generation - description: Generation number of the Cloud - Storage object. - object: - type: string - x-dcl-go-name: Object - description: Required. Name of the Cloud Storage - object. - localPath: - type: string - x-dcl-go-name: LocalPath - description: A local path within the VM to use. - x-dcl-conflicts: - - remote - - gcs - remote: - type: object - x-dcl-go-name: Remote - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - description: A generic remote file. - x-dcl-conflicts: - - gcs - - localPath - required: - - uri - properties: - sha256Checksum: - type: string - x-dcl-go-name: Sha256Checksum - description: SHA256 checksum of the remote - file. - uri: - type: string - x-dcl-go-name: Uri - description: Required. URI from which to fetch - the object. It should contain both the protocol - and path following the format `{protocol}://{location}`. - path: - type: string - x-dcl-go-name: Path - description: Required. The absolute path of the file - within the VM. - permissions: - type: string - x-dcl-go-name: Permissions - readOnly: true - description: 'Consists of three octal digits which - represent, in order, the permissions of the owner, - group, and other users for the file (similarly to - the numeric mode used in the linux chmod utility). - Each digit represents a three bit number with the - 4 bit corresponding to the read permissions, the - 2 bit corresponds to the write bit, and the one - bit corresponds to the execute permission. Default - behavior is 755. Below are some examples of permissions - and their associated values: read, write, and execute: - 7 read and execute: 5 read and write: 6 read only: - 4' - state: - type: string - x-dcl-go-name: State - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum - description: 'Required. Desired state of the file. - Possible values: OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED, - COMPLIANT, NON_COMPLIANT, UNKNOWN, NO_OS_POLICIES_APPLICABLE' - enum: - - OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED - - COMPLIANT - - NON_COMPLIANT - - UNKNOWN - - NO_OS_POLICIES_APPLICABLE - id: - type: string - x-dcl-go-name: Id - description: 'Required. The id of the resource with the - following restrictions: * Must contain only lowercase - letters, numbers, and hyphens. * Must start with a letter. - * Must be between 1-63 characters. * Must end with a - number or a letter. * Must be unique within the OS policy.' - pkg: - type: object - x-dcl-go-name: Pkg - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - description: Package resource - x-dcl-conflicts: - - repository - - exec - - file - required: - - desiredState - properties: - apt: - type: object - x-dcl-go-name: Apt - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - description: A package managed by Apt. - x-dcl-conflicts: - - deb - - yum - - zypper - - rpm - - googet - - msi - required: - - name - properties: - name: - type: string - x-dcl-go-name: Name - description: Required. Package name. - deb: - type: object - x-dcl-go-name: Deb - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - description: A deb package file. - x-dcl-conflicts: - - apt - - yum - - zypper - - rpm - - googet - - msi - required: - - source - properties: - pullDeps: - type: boolean - x-dcl-go-name: PullDeps - description: 'Whether dependencies should also - be installed. - install when false: `dpkg -i - package` - install when true: `apt-get update - && apt-get -y install package.deb`' - source: - type: object - x-dcl-go-name: Source - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - description: Required. A deb package. - properties: - allowInsecure: - type: boolean - x-dcl-go-name: AllowInsecure - description: 'Defaults to false. When false, - files are subject to validations based on - the file type: Remote: A checksum must be - specified. Cloud Storage: An object generation - number must be specified.' - gcs: - type: object - x-dcl-go-name: Gcs - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - description: A Cloud Storage object. - x-dcl-conflicts: - - remote - - localPath - required: - - bucket - - object - properties: - bucket: - type: string - x-dcl-go-name: Bucket - description: Required. Bucket of the Cloud - Storage object. - generation: - type: integer - format: int64 - x-dcl-go-name: Generation - description: Generation number of the - Cloud Storage object. - object: - type: string - x-dcl-go-name: Object - description: Required. Name of the Cloud - Storage object. - localPath: - type: string - x-dcl-go-name: LocalPath - description: A local path within the VM to - use. - x-dcl-conflicts: - - remote - - gcs - remote: - type: object - x-dcl-go-name: Remote - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - description: A generic remote file. - x-dcl-conflicts: - - gcs - - localPath - required: - - uri - properties: - sha256Checksum: - type: string - x-dcl-go-name: Sha256Checksum - description: SHA256 checksum of the remote - file. - uri: - type: string - x-dcl-go-name: Uri - description: Required. URI from which - to fetch the object. It should contain - both the protocol and path following - the format `{protocol}://{location}`. - desiredState: - type: string - x-dcl-go-name: DesiredState - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum - description: 'Required. The desired state the agent - should maintain for this package. Possible values: - DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED' - enum: - - DESIRED_STATE_UNSPECIFIED - - INSTALLED - - REMOVED - googet: - type: object - x-dcl-go-name: Googet - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - description: A package managed by GooGet. - x-dcl-conflicts: - - apt - - deb - - yum - - zypper - - rpm - - msi - required: - - name - properties: - name: - type: string - x-dcl-go-name: Name - description: Required. Package name. - msi: - type: object - x-dcl-go-name: Msi - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - description: An MSI package. - x-dcl-conflicts: - - apt - - deb - - yum - - zypper - - rpm - - googet - required: - - source - properties: - properties: - type: array - x-dcl-go-name: Properties - description: Additional properties to use during - installation. This should be in the format of - Property=Setting. Appended to the defaults of - `ACTION=INSTALL REBOOT=ReallySuppress`. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - source: - type: object - x-dcl-go-name: Source - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - description: Required. The MSI package. - properties: - allowInsecure: - type: boolean - x-dcl-go-name: AllowInsecure - description: 'Defaults to false. When false, - files are subject to validations based on - the file type: Remote: A checksum must be - specified. Cloud Storage: An object generation - number must be specified.' - gcs: - type: object - x-dcl-go-name: Gcs - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - description: A Cloud Storage object. - x-dcl-conflicts: - - remote - - localPath - required: - - bucket - - object - properties: - bucket: - type: string - x-dcl-go-name: Bucket - description: Required. Bucket of the Cloud - Storage object. - generation: - type: integer - format: int64 - x-dcl-go-name: Generation - description: Generation number of the - Cloud Storage object. - object: - type: string - x-dcl-go-name: Object - description: Required. Name of the Cloud - Storage object. - localPath: - type: string - x-dcl-go-name: LocalPath - description: A local path within the VM to - use. - x-dcl-conflicts: - - remote - - gcs - remote: - type: object - x-dcl-go-name: Remote - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - description: A generic remote file. - x-dcl-conflicts: - - gcs - - localPath - required: - - uri - properties: - sha256Checksum: - type: string - x-dcl-go-name: Sha256Checksum - description: SHA256 checksum of the remote - file. - uri: - type: string - x-dcl-go-name: Uri - description: Required. URI from which - to fetch the object. It should contain - both the protocol and path following - the format `{protocol}://{location}`. - rpm: - type: object - x-dcl-go-name: Rpm - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - description: An rpm package file. - x-dcl-conflicts: - - apt - - deb - - yum - - zypper - - googet - - msi - required: - - source - properties: - pullDeps: - type: boolean - x-dcl-go-name: PullDeps - description: 'Whether dependencies should also - be installed. - install when false: `rpm --upgrade - --replacepkgs package.rpm` - install when true: - `yum -y install package.rpm` or `zypper -y install - package.rpm`' - source: - type: object - x-dcl-go-name: Source - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - description: Required. An rpm package. - properties: - allowInsecure: - type: boolean - x-dcl-go-name: AllowInsecure - description: 'Defaults to false. When false, - files are subject to validations based on - the file type: Remote: A checksum must be - specified. Cloud Storage: An object generation - number must be specified.' - gcs: - type: object - x-dcl-go-name: Gcs - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - description: A Cloud Storage object. - x-dcl-conflicts: - - remote - - localPath - required: - - bucket - - object - properties: - bucket: - type: string - x-dcl-go-name: Bucket - description: Required. Bucket of the Cloud - Storage object. - generation: - type: integer - format: int64 - x-dcl-go-name: Generation - description: Generation number of the - Cloud Storage object. - object: - type: string - x-dcl-go-name: Object - description: Required. Name of the Cloud - Storage object. - localPath: - type: string - x-dcl-go-name: LocalPath - description: A local path within the VM to - use. - x-dcl-conflicts: - - remote - - gcs - remote: - type: object - x-dcl-go-name: Remote - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - description: A generic remote file. - x-dcl-conflicts: - - gcs - - localPath - required: - - uri - properties: - sha256Checksum: - type: string - x-dcl-go-name: Sha256Checksum - description: SHA256 checksum of the remote - file. - uri: - type: string - x-dcl-go-name: Uri - description: Required. URI from which - to fetch the object. It should contain - both the protocol and path following - the format `{protocol}://{location}`. - yum: - type: object - x-dcl-go-name: Yum - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - description: A package managed by YUM. - x-dcl-conflicts: - - apt - - deb - - zypper - - rpm - - googet - - msi - required: - - name - properties: - name: - type: string - x-dcl-go-name: Name - description: Required. Package name. - zypper: - type: object - x-dcl-go-name: Zypper - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - description: A package managed by Zypper. - x-dcl-conflicts: - - apt - - deb - - yum - - rpm - - googet - - msi - required: - - name - properties: - name: - type: string - x-dcl-go-name: Name - description: Required. Package name. - repository: - type: object - x-dcl-go-name: Repository - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - description: Package repository resource - x-dcl-conflicts: - - pkg - - exec - - file - properties: - apt: - type: object - x-dcl-go-name: Apt - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - description: An Apt Repository. - x-dcl-conflicts: - - yum - - zypper - - goo - required: - - archiveType - - uri - - distribution - - components - properties: - archiveType: - type: string - x-dcl-go-name: ArchiveType - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum - description: 'Required. Type of archive files - in this repository. Possible values: ARCHIVE_TYPE_UNSPECIFIED, - DEB, DEB_SRC' - enum: - - ARCHIVE_TYPE_UNSPECIFIED - - DEB - - DEB_SRC - components: - type: array - x-dcl-go-name: Components - description: Required. List of components for - this repository. Must contain at least one item. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - distribution: - type: string - x-dcl-go-name: Distribution - description: Required. Distribution of this repository. - gpgKey: - type: string - x-dcl-go-name: GpgKey - description: URI of the key file for this repository. - The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`. - uri: - type: string - x-dcl-go-name: Uri - description: Required. URI for this repository. - goo: - type: object - x-dcl-go-name: Goo - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - description: A Goo Repository. - x-dcl-conflicts: - - apt - - yum - - zypper - required: - - name - - url - properties: - name: - type: string - x-dcl-go-name: Name - description: Required. The name of the repository. - url: - type: string - x-dcl-go-name: Url - description: Required. The url of the repository. - yum: - type: object - x-dcl-go-name: Yum - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - description: A Yum Repository. - x-dcl-conflicts: - - apt - - zypper - - goo - required: - - id - - baseUrl - properties: - baseUrl: - type: string - x-dcl-go-name: BaseUrl - description: Required. The location of the repository - directory. - displayName: - type: string - x-dcl-go-name: DisplayName - description: The display name of the repository. - gpgKeys: - type: array - x-dcl-go-name: GpgKeys - description: URIs of GPG keys. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - id: - type: string - x-dcl-go-name: Id - description: Required. A one word, unique name - for this repository. This is the `repo id` in - the yum config file and also the `display_name` - if `display_name` is omitted. This id is also - used as the unique identifier when checking - for resource conflicts. - zypper: - type: object - x-dcl-go-name: Zypper - x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - description: A Zypper Repository. - x-dcl-conflicts: - - apt - - yum - - goo - required: - - id - - baseUrl - properties: - baseUrl: - type: string - x-dcl-go-name: BaseUrl - description: Required. The location of the repository - directory. - displayName: - type: string - x-dcl-go-name: DisplayName - description: The display name of the repository. - gpgKeys: - type: array - x-dcl-go-name: GpgKeys - description: URIs of GPG keys. - x-dcl-send-empty: true - x-dcl-list-type: list - items: - type: string - x-dcl-go-type: string - id: - type: string - x-dcl-go-name: Id - description: Required. A one word, unique name - for this repository. This is the `repo id` in - the zypper config file and also the `display_name` - if `display_name` is omitted. This id is also - used as the unique identifier when checking - for GuestPolicy conflicts. - project: - type: string - x-dcl-go-name: Project - description: The project for the resource - x-kubernetes-immutable: true - x-dcl-references: - - resource: Cloudresourcemanager/Project - field: name - parent: true - reconciling: - type: boolean - x-dcl-go-name: Reconciling - readOnly: true - description: 'Output only. Indicates that reconciliation is in progress - for the revision. This value is `true` when the `rollout_state` is one - of: * IN_PROGRESS * CANCELLING' - x-kubernetes-immutable: true - revisionCreateTime: - type: string - format: date-time - x-dcl-go-name: RevisionCreateTime - readOnly: true - description: Output only. The timestamp that the revision was created. - x-kubernetes-immutable: true - revisionId: - type: string - x-dcl-go-name: RevisionId - readOnly: true - description: Output only. The assignment revision ID A new revision is committed - whenever a rollout is triggered for a OS policy assignment - x-kubernetes-immutable: true - rollout: - type: object - x-dcl-go-name: Rollout - x-dcl-go-type: OSPolicyAssignmentRollout - description: 'Required. Rollout to deploy the OS policy assignment. A rollout - is triggered in the following situations: 1) OSPolicyAssignment is created. - 2) OSPolicyAssignment is updated and the update contains changes to one - of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment - is deleted.' - required: - - disruptionBudget - - minWaitDuration - properties: - disruptionBudget: - type: object - x-dcl-go-name: DisruptionBudget - x-dcl-go-type: OSPolicyAssignmentRolloutDisruptionBudget - description: Required. The maximum number (or percentage) of VMs per - zone to disrupt at any given moment. - properties: - fixed: - type: integer - format: int64 - x-dcl-go-name: Fixed - description: Specifies a fixed value. - x-dcl-conflicts: - - percent - percent: - type: integer - format: int64 - x-dcl-go-name: Percent - description: Specifies the relative value defined as a percentage, - which will be multiplied by a reference value. - x-dcl-conflicts: - - fixed - minWaitDuration: - type: string - x-dcl-go-name: MinWaitDuration - description: Required. This determines the minimum duration of time - to wait after the configuration changes are applied through the current - rollout. A VM continues to count towards the `disruption_budget` at - least until this duration of time has passed after configuration changes - are applied. - rolloutState: - type: string - x-dcl-go-name: RolloutState - x-dcl-go-type: OSPolicyAssignmentRolloutStateEnum - readOnly: true - description: 'Output only. OS policy assignment rollout state Possible values: - ROLLOUT_STATE_UNSPECIFIED, IN_PROGRESS, CANCELLING, CANCELLED, SUCCEEDED' - x-kubernetes-immutable: true - enum: - - ROLLOUT_STATE_UNSPECIFIED - - IN_PROGRESS - - CANCELLING - - CANCELLED - - SUCCEEDED - skipAwaitRollout: - type: boolean - x-dcl-go-name: SkipAwaitRollout - description: Set to true to skip awaiting rollout during resource creation - and update. - x-dcl-mutable-unreadable: true - uid: - type: string - x-dcl-go-name: Uid - readOnly: true - description: Output only. Server generated unique id for the OS policy assignment - resource. - x-kubernetes-immutable: true diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_internal.go deleted file mode 100644 index a61c47f891..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_internal.go +++ /dev/null @@ -1,16467 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package osconfig - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func (r *OSPolicyAssignment) validate() error { - - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.Required(r, "osPolicies"); err != nil { - return err - } - if err := dcl.Required(r, "instanceFilter"); err != nil { - return err - } - if err := dcl.Required(r, "rollout"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Project, "Project"); err != nil { - return err - } - if err := dcl.RequiredParameter(r.Location, "Location"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.InstanceFilter) { - if err := r.InstanceFilter.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Rollout) { - if err := r.Rollout.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPolicies) validate() error { - if err := dcl.Required(r, "id"); err != nil { - return err - } - if err := dcl.Required(r, "mode"); err != nil { - return err - } - if err := dcl.Required(r, "resourceGroups"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroups) validate() error { - if err := dcl.Required(r, "resources"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) validate() error { - if err := dcl.Required(r, "osShortName"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResources) validate() error { - if err := dcl.Required(r, "id"); err != nil { - return err - } - if err := dcl.ValidateExactlyOneOfFieldsSet([]string{"Pkg", "Repository", "Exec", "File"}, r.Pkg, r.Repository, r.Exec, r.File); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Pkg) { - if err := r.Pkg.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Repository) { - if err := r.Repository.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Exec) { - if err := r.Exec.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.File) { - if err := r.File.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) validate() error { - if err := dcl.Required(r, "desiredState"); err != nil { - return err - } - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Apt", "Deb", "Yum", "Zypper", "Rpm", "Googet", "Msi"}, r.Apt, r.Deb, r.Yum, r.Zypper, r.Rpm, r.Googet, r.Msi); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Apt) { - if err := r.Apt.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Deb) { - if err := r.Deb.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Yum) { - if err := r.Yum.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Zypper) { - if err := r.Zypper.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Rpm) { - if err := r.Rpm.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Googet) { - if err := r.Googet.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Msi) { - if err := r.Msi.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) validate() error { - if err := dcl.Required(r, "name"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) validate() error { - if err := dcl.Required(r, "source"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Source) { - if err := r.Source.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Remote", "Gcs", "LocalPath"}, r.Remote, r.Gcs, r.LocalPath); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Remote) { - if err := r.Remote.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Gcs) { - if err := r.Gcs.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) validate() error { - if err := dcl.Required(r, "uri"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) validate() error { - if err := dcl.Required(r, "bucket"); err != nil { - return err - } - if err := dcl.Required(r, "object"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) validate() error { - if err := dcl.Required(r, "name"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) validate() error { - if err := dcl.Required(r, "name"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) validate() error { - if err := dcl.Required(r, "source"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Source) { - if err := r.Source.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Remote", "Gcs", "LocalPath"}, r.Remote, r.Gcs, r.LocalPath); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Remote) { - if err := r.Remote.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Gcs) { - if err := r.Gcs.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) validate() error { - if err := dcl.Required(r, "uri"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) validate() error { - if err := dcl.Required(r, "bucket"); err != nil { - return err - } - if err := dcl.Required(r, "object"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) validate() error { - if err := dcl.Required(r, "name"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) validate() error { - if err := dcl.Required(r, "source"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Source) { - if err := r.Source.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Remote", "Gcs", "LocalPath"}, r.Remote, r.Gcs, r.LocalPath); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Remote) { - if err := r.Remote.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Gcs) { - if err := r.Gcs.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) validate() error { - if err := dcl.Required(r, "uri"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) validate() error { - if err := dcl.Required(r, "bucket"); err != nil { - return err - } - if err := dcl.Required(r, "object"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Apt", "Yum", "Zypper", "Goo"}, r.Apt, r.Yum, r.Zypper, r.Goo); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Apt) { - if err := r.Apt.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Yum) { - if err := r.Yum.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Zypper) { - if err := r.Zypper.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Goo) { - if err := r.Goo.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) validate() error { - if err := dcl.Required(r, "archiveType"); err != nil { - return err - } - if err := dcl.Required(r, "uri"); err != nil { - return err - } - if err := dcl.Required(r, "distribution"); err != nil { - return err - } - if err := dcl.Required(r, "components"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) validate() error { - if err := dcl.Required(r, "id"); err != nil { - return err - } - if err := dcl.Required(r, "baseUrl"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) validate() error { - if err := dcl.Required(r, "id"); err != nil { - return err - } - if err := dcl.Required(r, "baseUrl"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) validate() error { - if err := dcl.Required(r, "name"); err != nil { - return err - } - if err := dcl.Required(r, "url"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) validate() error { - if err := dcl.Required(r, "validate"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Validate) { - if err := r.Validate.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Enforce) { - if err := r.Enforce.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) validate() error { - if err := dcl.Required(r, "interpreter"); err != nil { - return err - } - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"File", "Script"}, r.File, r.Script); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.File) { - if err := r.File.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Remote", "Gcs", "LocalPath"}, r.Remote, r.Gcs, r.LocalPath); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Remote) { - if err := r.Remote.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Gcs) { - if err := r.Gcs.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) validate() error { - if err := dcl.Required(r, "uri"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) validate() error { - if err := dcl.Required(r, "bucket"); err != nil { - return err - } - if err := dcl.Required(r, "object"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) validate() error { - if err := dcl.Required(r, "interpreter"); err != nil { - return err - } - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"File", "Script"}, r.File, r.Script); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.File) { - if err := r.File.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Remote", "Gcs", "LocalPath"}, r.Remote, r.Gcs, r.LocalPath); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Remote) { - if err := r.Remote.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Gcs) { - if err := r.Gcs.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) validate() error { - if err := dcl.Required(r, "uri"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) validate() error { - if err := dcl.Required(r, "bucket"); err != nil { - return err - } - if err := dcl.Required(r, "object"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) validate() error { - if err := dcl.Required(r, "path"); err != nil { - return err - } - if err := dcl.Required(r, "state"); err != nil { - return err - } - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"File", "Content"}, r.File, r.Content); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.File) { - if err := r.File.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) validate() error { - if err := dcl.ValidateAtMostOneOfFieldsSet([]string{"Remote", "Gcs", "LocalPath"}, r.Remote, r.Gcs, r.LocalPath); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.Remote) { - if err := r.Remote.validate(); err != nil { - return err - } - } - if !dcl.IsEmptyValueIndirect(r.Gcs) { - if err := r.Gcs.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) validate() error { - if err := dcl.Required(r, "uri"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) validate() error { - if err := dcl.Required(r, "bucket"); err != nil { - return err - } - if err := dcl.Required(r, "object"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentInstanceFilter) validate() error { - return nil -} -func (r *OSPolicyAssignmentInstanceFilterInclusionLabels) validate() error { - return nil -} -func (r *OSPolicyAssignmentInstanceFilterExclusionLabels) validate() error { - return nil -} -func (r *OSPolicyAssignmentInstanceFilterInventories) validate() error { - if err := dcl.Required(r, "osShortName"); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignmentRollout) validate() error { - if err := dcl.Required(r, "disruptionBudget"); err != nil { - return err - } - if err := dcl.Required(r, "minWaitDuration"); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(r.DisruptionBudget) { - if err := r.DisruptionBudget.validate(); err != nil { - return err - } - } - return nil -} -func (r *OSPolicyAssignmentRolloutDisruptionBudget) validate() error { - if err := dcl.ValidateExactlyOneOfFieldsSet([]string{"Fixed", "Percent"}, r.Fixed, r.Percent); err != nil { - return err - } - return nil -} -func (r *OSPolicyAssignment) basePath() string { - params := map[string]interface{}{} - return dcl.Nprintf("https://osconfig.googleapis.com/v1", params) -} - -func (r *OSPolicyAssignment) getURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}", nr.basePath(), userBasePath, params), nil -} - -func (r *OSPolicyAssignment) listURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/osPolicyAssignments", nr.basePath(), userBasePath, params), nil - -} - -func (r *OSPolicyAssignment) createURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/osPolicyAssignments?osPolicyAssignmentId={{name}}", nr.basePath(), userBasePath, params), nil - -} - -func (r *OSPolicyAssignment) deleteURL(userBasePath string) (string, error) { - nr := r.urlNormalized() - params := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}", nr.basePath(), userBasePath, params), nil -} - -// oSPolicyAssignmentApiOperation represents a mutable operation in the underlying REST -// API such as Create, Update, or Delete. -type oSPolicyAssignmentApiOperation interface { - do(context.Context, *OSPolicyAssignment, *Client) error -} - -// newUpdateOSPolicyAssignmentUpdateOSPolicyAssignmentRequest creates a request for an -// OSPolicyAssignment resource's UpdateOSPolicyAssignment update type by filling in the update -// fields based on the intended state of the resource. -func newUpdateOSPolicyAssignmentUpdateOSPolicyAssignmentRequest(ctx context.Context, f *OSPolicyAssignment, c *Client) (map[string]interface{}, error) { - req := map[string]interface{}{} - res := f - _ = res - - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - req["description"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesSlice(c, f.OSPolicies, res); err != nil { - return nil, fmt.Errorf("error expanding OSPolicies into osPolicies: %w", err) - } else if v != nil { - req["osPolicies"] = v - } - if v, err := expandOSPolicyAssignmentInstanceFilter(c, f.InstanceFilter, res); err != nil { - return nil, fmt.Errorf("error expanding InstanceFilter into instanceFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["instanceFilter"] = v - } - if v, err := expandOSPolicyAssignmentRollout(c, f.Rollout, res); err != nil { - return nil, fmt.Errorf("error expanding Rollout into rollout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - req["rollout"] = v - } - if v := f.SkipAwaitRollout; !dcl.IsEmptyValueIndirect(v) { - req["skipAwaitRollout"] = v - } - return req, nil -} - -// marshalUpdateOSPolicyAssignmentUpdateOSPolicyAssignmentRequest converts the update into -// the final JSON request body. -func marshalUpdateOSPolicyAssignmentUpdateOSPolicyAssignmentRequest(c *Client, m map[string]interface{}) ([]byte, error) { - - dcl.MoveMapEntry( - m, - []string{"skipAwaitRollout"}, - []string{}, - ) - return json.Marshal(m) -} - -type updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation struct { - // If the update operation has the REQUIRES_APPLY_OPTIONS trait, this will be populated. - // Usually it will be nil - this is to prevent us from accidentally depending on apply - // options, which should usually be unnecessary. - ApplyOptions []dcl.ApplyOption - FieldDiffs []*dcl.FieldDiff -} - -// do creates a request and sends it to the appropriate URL. In most operations, -// do will transcribe a subset of the resource into a request object and send a -// PUT request to a single URL. - -func (c *Client) listOSPolicyAssignmentRaw(ctx context.Context, r *OSPolicyAssignment, pageToken string, pageSize int32) ([]byte, error) { - u, err := r.urlNormalized().listURL(c.Config.BasePath) - if err != nil { - return nil, err - } - - m := make(map[string]string) - if pageToken != "" { - m["pageToken"] = pageToken - } - - if pageSize != OSPolicyAssignmentMaxPage { - m["pageSize"] = fmt.Sprintf("%v", pageSize) - } - - u, err = dcl.AddQueryParams(u, m) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - return ioutil.ReadAll(resp.Response.Body) -} - -type listOSPolicyAssignmentOperation struct { - OsPolicyAssignments []map[string]interface{} `json:"osPolicyAssignments"` - Token string `json:"nextPageToken"` -} - -func (c *Client) listOSPolicyAssignment(ctx context.Context, r *OSPolicyAssignment, pageToken string, pageSize int32) ([]*OSPolicyAssignment, string, error) { - b, err := c.listOSPolicyAssignmentRaw(ctx, r, pageToken, pageSize) - if err != nil { - return nil, "", err - } - - var m listOSPolicyAssignmentOperation - if err := json.Unmarshal(b, &m); err != nil { - return nil, "", err - } - - var l []*OSPolicyAssignment - for _, v := range m.OsPolicyAssignments { - res, err := unmarshalMapOSPolicyAssignment(v, c, r) - if err != nil { - return nil, m.Token, err - } - res.Project = r.Project - res.Location = r.Location - l = append(l, res) - } - - return l, m.Token, nil -} - -func (c *Client) deleteAllOSPolicyAssignment(ctx context.Context, f func(*OSPolicyAssignment) bool, resources []*OSPolicyAssignment) error { - var errors []string - for _, res := range resources { - if f(res) { - // We do not want deleteAll to fail on a deletion or else it will stop deleting other resources. - err := c.DeleteOSPolicyAssignment(ctx, res) - if err != nil { - errors = append(errors, err.Error()) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("%v", strings.Join(errors, "\n")) - } else { - return nil - } -} - -type deleteOSPolicyAssignmentOperation struct{} - -// Create operations are similar to Update operations, although they do not have -// specific request objects. The Create request object is the json encoding of -// the resource, which is modified by res.marshal to form the base request body. -type createOSPolicyAssignmentOperation struct { - response map[string]interface{} -} - -func (op *createOSPolicyAssignmentOperation) FirstResponse() (map[string]interface{}, bool) { - return op.response, len(op.response) > 0 -} - -func (c *Client) getOSPolicyAssignmentRaw(ctx context.Context, r *OSPolicyAssignment) ([]byte, error) { - - u, err := r.getURL(c.Config.BasePath) - if err != nil { - return nil, err - } - resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider) - if err != nil { - return nil, err - } - defer resp.Response.Body.Close() - b, err := ioutil.ReadAll(resp.Response.Body) - if err != nil { - return nil, err - } - - return b, nil -} - -func (c *Client) oSPolicyAssignmentDiffsForRawDesired(ctx context.Context, rawDesired *OSPolicyAssignment, opts ...dcl.ApplyOption) (initial, desired *OSPolicyAssignment, diffs []*dcl.FieldDiff, err error) { - c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...") - // First, let us see if the user provided a state hint. If they did, we will start fetching based on that. - var fetchState *OSPolicyAssignment - if sh := dcl.FetchStateHint(opts); sh != nil { - if r, ok := sh.(*OSPolicyAssignment); !ok { - c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected OSPolicyAssignment, got %T", sh) - } else { - fetchState = r - } - } - if fetchState == nil { - fetchState = rawDesired - } - - // 1.2: Retrieval of raw initial state from API - rawInitial, err := c.GetOSPolicyAssignment(ctx, fetchState) - if rawInitial == nil { - if !dcl.IsNotFound(err) { - c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a OSPolicyAssignment resource already exists: %s", err) - return nil, nil, nil, fmt.Errorf("failed to retrieve OSPolicyAssignment resource: %v", err) - } - c.Config.Logger.InfoWithContext(ctx, "Found that OSPolicyAssignment resource did not exist.") - // Perform canonicalization to pick up defaults. - desired, err = canonicalizeOSPolicyAssignmentDesiredState(rawDesired, rawInitial) - return nil, desired, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Found initial state for OSPolicyAssignment: %v", rawInitial) - c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for OSPolicyAssignment: %v", rawDesired) - - // The Get call applies postReadExtract and so the result may contain fields that are not part of API version. - if err := extractOSPolicyAssignmentFields(rawInitial); err != nil { - return nil, nil, nil, err - } - - // 1.3: Canonicalize raw initial state into initial state. - initial, err = canonicalizeOSPolicyAssignmentInitialState(rawInitial, rawDesired) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for OSPolicyAssignment: %v", initial) - - // 1.4: Canonicalize raw desired state into desired state. - desired, err = canonicalizeOSPolicyAssignmentDesiredState(rawDesired, rawInitial, opts...) - if err != nil { - return nil, nil, nil, err - } - c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for OSPolicyAssignment: %v", desired) - - // 2.1: Comparison of initial and desired state. - diffs, err = diffOSPolicyAssignment(c, desired, initial, opts...) - return initial, desired, diffs, err -} - -func canonicalizeOSPolicyAssignmentInitialState(rawInitial, rawDesired *OSPolicyAssignment) (*OSPolicyAssignment, error) { - // TODO(magic-modules-eng): write canonicalizer once relevant traits are added. - return rawInitial, nil -} - -/* -* Canonicalizers -* -* These are responsible for converting either a user-specified config or a -* GCP API response to a standard format that can be used for difference checking. -* */ - -func canonicalizeOSPolicyAssignmentDesiredState(rawDesired, rawInitial *OSPolicyAssignment, opts ...dcl.ApplyOption) (*OSPolicyAssignment, error) { - - if rawInitial == nil { - // Since the initial state is empty, the desired state is all we have. - // We canonicalize the remaining nested objects with nil to pick up defaults. - rawDesired.InstanceFilter = canonicalizeOSPolicyAssignmentInstanceFilter(rawDesired.InstanceFilter, nil, opts...) - rawDesired.Rollout = canonicalizeOSPolicyAssignmentRollout(rawDesired.Rollout, nil, opts...) - - return rawDesired, nil - } - canonicalDesired := &OSPolicyAssignment{} - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) { - canonicalDesired.Name = rawInitial.Name - } else { - canonicalDesired.Name = rawDesired.Name - } - if dcl.StringCanonicalize(rawDesired.Description, rawInitial.Description) { - canonicalDesired.Description = rawInitial.Description - } else { - canonicalDesired.Description = rawDesired.Description - } - canonicalDesired.OSPolicies = canonicalizeOSPolicyAssignmentOSPoliciesSlice(rawDesired.OSPolicies, rawInitial.OSPolicies, opts...) - canonicalDesired.InstanceFilter = canonicalizeOSPolicyAssignmentInstanceFilter(rawDesired.InstanceFilter, rawInitial.InstanceFilter, opts...) - canonicalDesired.Rollout = canonicalizeOSPolicyAssignmentRollout(rawDesired.Rollout, rawInitial.Rollout, opts...) - if dcl.NameToSelfLink(rawDesired.Project, rawInitial.Project) { - canonicalDesired.Project = rawInitial.Project - } else { - canonicalDesired.Project = rawDesired.Project - } - if dcl.NameToSelfLink(rawDesired.Location, rawInitial.Location) { - canonicalDesired.Location = rawInitial.Location - } else { - canonicalDesired.Location = rawDesired.Location - } - if dcl.BoolCanonicalize(rawDesired.SkipAwaitRollout, rawInitial.SkipAwaitRollout) { - canonicalDesired.SkipAwaitRollout = rawInitial.SkipAwaitRollout - } else { - canonicalDesired.SkipAwaitRollout = rawDesired.SkipAwaitRollout - } - return canonicalDesired, nil -} - -func canonicalizeOSPolicyAssignmentNewState(c *Client, rawNew, rawDesired *OSPolicyAssignment) (*OSPolicyAssignment, error) { - - if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) { - rawNew.Name = rawDesired.Name - } else { - if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) { - rawNew.Name = rawDesired.Name - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Description) && dcl.IsEmptyValueIndirect(rawDesired.Description) { - rawNew.Description = rawDesired.Description - } else { - if dcl.StringCanonicalize(rawDesired.Description, rawNew.Description) { - rawNew.Description = rawDesired.Description - } - } - - if dcl.IsEmptyValueIndirect(rawNew.OSPolicies) && dcl.IsEmptyValueIndirect(rawDesired.OSPolicies) { - rawNew.OSPolicies = rawDesired.OSPolicies - } else { - rawNew.OSPolicies = canonicalizeNewOSPolicyAssignmentOSPoliciesSlice(c, rawDesired.OSPolicies, rawNew.OSPolicies) - } - - if dcl.IsEmptyValueIndirect(rawNew.InstanceFilter) && dcl.IsEmptyValueIndirect(rawDesired.InstanceFilter) { - rawNew.InstanceFilter = rawDesired.InstanceFilter - } else { - rawNew.InstanceFilter = canonicalizeNewOSPolicyAssignmentInstanceFilter(c, rawDesired.InstanceFilter, rawNew.InstanceFilter) - } - - if dcl.IsEmptyValueIndirect(rawNew.Rollout) && dcl.IsEmptyValueIndirect(rawDesired.Rollout) { - rawNew.Rollout = rawDesired.Rollout - } else { - rawNew.Rollout = canonicalizeNewOSPolicyAssignmentRollout(c, rawDesired.Rollout, rawNew.Rollout) - } - - if dcl.IsEmptyValueIndirect(rawNew.RevisionId) && dcl.IsEmptyValueIndirect(rawDesired.RevisionId) { - rawNew.RevisionId = rawDesired.RevisionId - } else { - if dcl.StringCanonicalize(rawDesired.RevisionId, rawNew.RevisionId) { - rawNew.RevisionId = rawDesired.RevisionId - } - } - - if dcl.IsEmptyValueIndirect(rawNew.RevisionCreateTime) && dcl.IsEmptyValueIndirect(rawDesired.RevisionCreateTime) { - rawNew.RevisionCreateTime = rawDesired.RevisionCreateTime - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Etag) && dcl.IsEmptyValueIndirect(rawDesired.Etag) { - rawNew.Etag = rawDesired.Etag - } else { - if dcl.StringCanonicalize(rawDesired.Etag, rawNew.Etag) { - rawNew.Etag = rawDesired.Etag - } - } - - if dcl.IsEmptyValueIndirect(rawNew.RolloutState) && dcl.IsEmptyValueIndirect(rawDesired.RolloutState) { - rawNew.RolloutState = rawDesired.RolloutState - } else { - } - - if dcl.IsEmptyValueIndirect(rawNew.Baseline) && dcl.IsEmptyValueIndirect(rawDesired.Baseline) { - rawNew.Baseline = rawDesired.Baseline - } else { - if dcl.BoolCanonicalize(rawDesired.Baseline, rawNew.Baseline) { - rawNew.Baseline = rawDesired.Baseline - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Deleted) && dcl.IsEmptyValueIndirect(rawDesired.Deleted) { - rawNew.Deleted = rawDesired.Deleted - } else { - if dcl.BoolCanonicalize(rawDesired.Deleted, rawNew.Deleted) { - rawNew.Deleted = rawDesired.Deleted - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Reconciling) && dcl.IsEmptyValueIndirect(rawDesired.Reconciling) { - rawNew.Reconciling = rawDesired.Reconciling - } else { - if dcl.BoolCanonicalize(rawDesired.Reconciling, rawNew.Reconciling) { - rawNew.Reconciling = rawDesired.Reconciling - } - } - - if dcl.IsEmptyValueIndirect(rawNew.Uid) && dcl.IsEmptyValueIndirect(rawDesired.Uid) { - rawNew.Uid = rawDesired.Uid - } else { - if dcl.StringCanonicalize(rawDesired.Uid, rawNew.Uid) { - rawNew.Uid = rawDesired.Uid - } - } - - rawNew.Project = rawDesired.Project - - rawNew.Location = rawDesired.Location - - if dcl.IsEmptyValueIndirect(rawNew.SkipAwaitRollout) && dcl.IsEmptyValueIndirect(rawDesired.SkipAwaitRollout) { - rawNew.SkipAwaitRollout = rawDesired.SkipAwaitRollout - } else { - rawNew.SkipAwaitRollout = rawDesired.SkipAwaitRollout - } - - return rawNew, nil -} - -func canonicalizeOSPolicyAssignmentOSPolicies(des, initial *OSPolicyAssignmentOSPolicies, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPolicies { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPolicies{} - - if dcl.StringCanonicalize(des.Id, initial.Id) || dcl.IsZeroValue(des.Id) { - cDes.Id = initial.Id - } else { - cDes.Id = des.Id - } - if dcl.StringCanonicalize(des.Description, initial.Description) || dcl.IsZeroValue(des.Description) { - cDes.Description = initial.Description - } else { - cDes.Description = des.Description - } - if dcl.IsZeroValue(des.Mode) || (dcl.IsEmptyValueIndirect(des.Mode) && dcl.IsEmptyValueIndirect(initial.Mode)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Mode = initial.Mode - } else { - cDes.Mode = des.Mode - } - cDes.ResourceGroups = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsSlice(des.ResourceGroups, initial.ResourceGroups, opts...) - if dcl.BoolCanonicalize(des.AllowNoResourceGroupMatch, initial.AllowNoResourceGroupMatch) || dcl.IsZeroValue(des.AllowNoResourceGroupMatch) { - cDes.AllowNoResourceGroupMatch = initial.AllowNoResourceGroupMatch - } else { - cDes.AllowNoResourceGroupMatch = des.AllowNoResourceGroupMatch - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesSlice(des, initial []OSPolicyAssignmentOSPolicies, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPolicies { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPolicies, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPolicies(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPolicies, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPolicies(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPolicies(c *Client, des, nw *OSPolicyAssignmentOSPolicies) *OSPolicyAssignmentOSPolicies { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPolicies while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Id, nw.Id) { - nw.Id = des.Id - } - if dcl.StringCanonicalize(des.Description, nw.Description) { - nw.Description = des.Description - } - nw.ResourceGroups = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsSlice(c, des.ResourceGroups, nw.ResourceGroups) - if dcl.BoolCanonicalize(des.AllowNoResourceGroupMatch, nw.AllowNoResourceGroupMatch) { - nw.AllowNoResourceGroupMatch = des.AllowNoResourceGroupMatch - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesSet(c *Client, des, nw []OSPolicyAssignmentOSPolicies) []OSPolicyAssignmentOSPolicies { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPolicies - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPolicies(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesSlice(c *Client, des, nw []OSPolicyAssignmentOSPolicies) []OSPolicyAssignmentOSPolicies { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPolicies - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPolicies(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroups(des, initial *OSPolicyAssignmentOSPoliciesResourceGroups, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroups { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroups{} - - cDes.InventoryFilters = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice(des.InventoryFilters, initial.InventoryFilters, opts...) - cDes.Resources = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice(des.Resources, initial.Resources, opts...) - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroups, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroups { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroups, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroups(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroups, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroups(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroups(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroups) *OSPolicyAssignmentOSPoliciesResourceGroups { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroups while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.InventoryFilters = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice(c, des.InventoryFilters, nw.InventoryFilters) - nw.Resources = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice(c, des.Resources, nw.Resources) - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroups) []OSPolicyAssignmentOSPoliciesResourceGroups { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroups - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroups(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroups) []OSPolicyAssignmentOSPoliciesResourceGroups { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroups - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroups(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{} - - if dcl.StringCanonicalize(des.OSShortName, initial.OSShortName) || dcl.IsZeroValue(des.OSShortName) { - cDes.OSShortName = initial.OSShortName - } else { - cDes.OSShortName = des.OSShortName - } - if dcl.StringCanonicalize(des.OSVersion, initial.OSVersion) || dcl.IsZeroValue(des.OSVersion) { - cDes.OSVersion = initial.OSVersion - } else { - cDes.OSVersion = des.OSVersion - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.OSShortName, nw.OSShortName) { - nw.OSShortName = des.OSShortName - } - if dcl.StringCanonicalize(des.OSVersion, nw.OSVersion) { - nw.OSVersion = des.OSVersion - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResources(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResources, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResources { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Pkg != nil || (initial != nil && initial.Pkg != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Repository, des.Exec, des.File) { - des.Pkg = nil - if initial != nil { - initial.Pkg = nil - } - } - } - - if des.Repository != nil || (initial != nil && initial.Repository != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Pkg, des.Exec, des.File) { - des.Repository = nil - if initial != nil { - initial.Repository = nil - } - } - } - - if des.Exec != nil || (initial != nil && initial.Exec != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Pkg, des.Repository, des.File) { - des.Exec = nil - if initial != nil { - initial.Exec = nil - } - } - } - - if des.File != nil || (initial != nil && initial.File != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Pkg, des.Repository, des.Exec) { - des.File = nil - if initial != nil { - initial.File = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResources{} - - if dcl.StringCanonicalize(des.Id, initial.Id) || dcl.IsZeroValue(des.Id) { - cDes.Id = initial.Id - } else { - cDes.Id = des.Id - } - cDes.Pkg = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(des.Pkg, initial.Pkg, opts...) - cDes.Repository = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(des.Repository, initial.Repository, opts...) - cDes.Exec = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(des.Exec, initial.Exec, opts...) - cDes.File = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(des.File, initial.File, opts...) - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResources, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResources { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResources(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResources(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResources(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResources) *OSPolicyAssignmentOSPoliciesResourceGroupsResources { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResources while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Id, nw.Id) { - nw.Id = des.Id - } - nw.Pkg = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, des.Pkg, nw.Pkg) - nw.Repository = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, des.Repository, nw.Repository) - nw.Exec = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, des.Exec, nw.Exec) - nw.File = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, des.File, nw.File) - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResources) []OSPolicyAssignmentOSPoliciesResourceGroupsResources { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResources - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResources(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResources) []OSPolicyAssignmentOSPoliciesResourceGroupsResources { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResources - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResources(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Apt != nil || (initial != nil && initial.Apt != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Deb, des.Yum, des.Zypper, des.Rpm, des.Googet, des.Msi) { - des.Apt = nil - if initial != nil { - initial.Apt = nil - } - } - } - - if des.Deb != nil || (initial != nil && initial.Deb != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Yum, des.Zypper, des.Rpm, des.Googet, des.Msi) { - des.Deb = nil - if initial != nil { - initial.Deb = nil - } - } - } - - if des.Yum != nil || (initial != nil && initial.Yum != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Deb, des.Zypper, des.Rpm, des.Googet, des.Msi) { - des.Yum = nil - if initial != nil { - initial.Yum = nil - } - } - } - - if des.Zypper != nil || (initial != nil && initial.Zypper != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Deb, des.Yum, des.Rpm, des.Googet, des.Msi) { - des.Zypper = nil - if initial != nil { - initial.Zypper = nil - } - } - } - - if des.Rpm != nil || (initial != nil && initial.Rpm != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Deb, des.Yum, des.Zypper, des.Googet, des.Msi) { - des.Rpm = nil - if initial != nil { - initial.Rpm = nil - } - } - } - - if des.Googet != nil || (initial != nil && initial.Googet != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Deb, des.Yum, des.Zypper, des.Rpm, des.Msi) { - des.Googet = nil - if initial != nil { - initial.Googet = nil - } - } - } - - if des.Msi != nil || (initial != nil && initial.Msi != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Deb, des.Yum, des.Zypper, des.Rpm, des.Googet) { - des.Msi = nil - if initial != nil { - initial.Msi = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{} - - if dcl.IsZeroValue(des.DesiredState) || (dcl.IsEmptyValueIndirect(des.DesiredState) && dcl.IsEmptyValueIndirect(initial.DesiredState)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.DesiredState = initial.DesiredState - } else { - cDes.DesiredState = des.DesiredState - } - cDes.Apt = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(des.Apt, initial.Apt, opts...) - cDes.Deb = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(des.Deb, initial.Deb, opts...) - cDes.Yum = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(des.Yum, initial.Yum, opts...) - cDes.Zypper = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(des.Zypper, initial.Zypper, opts...) - cDes.Rpm = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(des.Rpm, initial.Rpm, opts...) - cDes.Googet = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(des.Googet, initial.Googet, opts...) - cDes.Msi = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(des.Msi, initial.Msi, opts...) - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Apt = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, des.Apt, nw.Apt) - nw.Deb = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, des.Deb, nw.Deb) - nw.Yum = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, des.Yum, nw.Yum) - nw.Zypper = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, des.Zypper, nw.Zypper) - nw.Rpm = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, des.Rpm, nw.Rpm) - nw.Googet = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, des.Googet, nw.Googet) - nw.Msi = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, des.Msi, nw.Msi) - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{} - - if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { - cDes.Name = initial.Name - } else { - cDes.Name = des.Name - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Name, nw.Name) { - nw.Name = des.Name - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{} - - cDes.Source = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(des.Source, initial.Source, opts...) - if dcl.BoolCanonicalize(des.PullDeps, initial.PullDeps) || dcl.IsZeroValue(des.PullDeps) { - cDes.PullDeps = initial.PullDeps - } else { - cDes.PullDeps = des.PullDeps - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Source = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, des.Source, nw.Source) - if dcl.BoolCanonicalize(des.PullDeps, nw.PullDeps) { - nw.PullDeps = des.PullDeps - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Remote != nil || (initial != nil && initial.Remote != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Gcs, des.LocalPath) { - des.Remote = nil - if initial != nil { - initial.Remote = nil - } - } - } - - if des.Gcs != nil || (initial != nil && initial.Gcs != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.LocalPath) { - des.Gcs = nil - if initial != nil { - initial.Gcs = nil - } - } - } - - if des.LocalPath != nil || (initial != nil && initial.LocalPath != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.Gcs) { - des.LocalPath = nil - if initial != nil { - initial.LocalPath = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{} - - cDes.Remote = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(des.Remote, initial.Remote, opts...) - cDes.Gcs = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(des.Gcs, initial.Gcs, opts...) - if dcl.StringCanonicalize(des.LocalPath, initial.LocalPath) || dcl.IsZeroValue(des.LocalPath) { - cDes.LocalPath = initial.LocalPath - } else { - cDes.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, initial.AllowInsecure) || dcl.IsZeroValue(des.AllowInsecure) { - cDes.AllowInsecure = initial.AllowInsecure - } else { - cDes.AllowInsecure = des.AllowInsecure - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Remote = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, des.Remote, nw.Remote) - nw.Gcs = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, des.Gcs, nw.Gcs) - if dcl.StringCanonicalize(des.LocalPath, nw.LocalPath) { - nw.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, nw.AllowInsecure) { - nw.AllowInsecure = des.AllowInsecure - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{} - - if dcl.StringCanonicalize(des.Uri, initial.Uri) || dcl.IsZeroValue(des.Uri) { - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, initial.Sha256Checksum) || dcl.IsZeroValue(des.Sha256Checksum) { - cDes.Sha256Checksum = initial.Sha256Checksum - } else { - cDes.Sha256Checksum = des.Sha256Checksum - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Uri, nw.Uri) { - nw.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, nw.Sha256Checksum) { - nw.Sha256Checksum = des.Sha256Checksum - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{} - - if dcl.StringCanonicalize(des.Bucket, initial.Bucket) || dcl.IsZeroValue(des.Bucket) { - cDes.Bucket = initial.Bucket - } else { - cDes.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, initial.Object) || dcl.IsZeroValue(des.Object) { - cDes.Object = initial.Object - } else { - cDes.Object = des.Object - } - if dcl.IsZeroValue(des.Generation) || (dcl.IsEmptyValueIndirect(des.Generation) && dcl.IsEmptyValueIndirect(initial.Generation)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Generation = initial.Generation - } else { - cDes.Generation = des.Generation - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Bucket, nw.Bucket) { - nw.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, nw.Object) { - nw.Object = des.Object - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{} - - if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { - cDes.Name = initial.Name - } else { - cDes.Name = des.Name - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Name, nw.Name) { - nw.Name = des.Name - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{} - - if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { - cDes.Name = initial.Name - } else { - cDes.Name = des.Name - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Name, nw.Name) { - nw.Name = des.Name - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{} - - cDes.Source = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(des.Source, initial.Source, opts...) - if dcl.BoolCanonicalize(des.PullDeps, initial.PullDeps) || dcl.IsZeroValue(des.PullDeps) { - cDes.PullDeps = initial.PullDeps - } else { - cDes.PullDeps = des.PullDeps - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Source = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, des.Source, nw.Source) - if dcl.BoolCanonicalize(des.PullDeps, nw.PullDeps) { - nw.PullDeps = des.PullDeps - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Remote != nil || (initial != nil && initial.Remote != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Gcs, des.LocalPath) { - des.Remote = nil - if initial != nil { - initial.Remote = nil - } - } - } - - if des.Gcs != nil || (initial != nil && initial.Gcs != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.LocalPath) { - des.Gcs = nil - if initial != nil { - initial.Gcs = nil - } - } - } - - if des.LocalPath != nil || (initial != nil && initial.LocalPath != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.Gcs) { - des.LocalPath = nil - if initial != nil { - initial.LocalPath = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{} - - cDes.Remote = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(des.Remote, initial.Remote, opts...) - cDes.Gcs = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(des.Gcs, initial.Gcs, opts...) - if dcl.StringCanonicalize(des.LocalPath, initial.LocalPath) || dcl.IsZeroValue(des.LocalPath) { - cDes.LocalPath = initial.LocalPath - } else { - cDes.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, initial.AllowInsecure) || dcl.IsZeroValue(des.AllowInsecure) { - cDes.AllowInsecure = initial.AllowInsecure - } else { - cDes.AllowInsecure = des.AllowInsecure - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Remote = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, des.Remote, nw.Remote) - nw.Gcs = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, des.Gcs, nw.Gcs) - if dcl.StringCanonicalize(des.LocalPath, nw.LocalPath) { - nw.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, nw.AllowInsecure) { - nw.AllowInsecure = des.AllowInsecure - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{} - - if dcl.StringCanonicalize(des.Uri, initial.Uri) || dcl.IsZeroValue(des.Uri) { - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, initial.Sha256Checksum) || dcl.IsZeroValue(des.Sha256Checksum) { - cDes.Sha256Checksum = initial.Sha256Checksum - } else { - cDes.Sha256Checksum = des.Sha256Checksum - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Uri, nw.Uri) { - nw.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, nw.Sha256Checksum) { - nw.Sha256Checksum = des.Sha256Checksum - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{} - - if dcl.StringCanonicalize(des.Bucket, initial.Bucket) || dcl.IsZeroValue(des.Bucket) { - cDes.Bucket = initial.Bucket - } else { - cDes.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, initial.Object) || dcl.IsZeroValue(des.Object) { - cDes.Object = initial.Object - } else { - cDes.Object = des.Object - } - if dcl.IsZeroValue(des.Generation) || (dcl.IsEmptyValueIndirect(des.Generation) && dcl.IsEmptyValueIndirect(initial.Generation)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Generation = initial.Generation - } else { - cDes.Generation = des.Generation - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Bucket, nw.Bucket) { - nw.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, nw.Object) { - nw.Object = des.Object - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{} - - if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { - cDes.Name = initial.Name - } else { - cDes.Name = des.Name - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Name, nw.Name) { - nw.Name = des.Name - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{} - - cDes.Source = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(des.Source, initial.Source, opts...) - if dcl.StringArrayCanonicalize(des.Properties, initial.Properties) { - cDes.Properties = initial.Properties - } else { - cDes.Properties = des.Properties - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Source = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, des.Source, nw.Source) - if dcl.StringArrayCanonicalize(des.Properties, nw.Properties) { - nw.Properties = des.Properties - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Remote != nil || (initial != nil && initial.Remote != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Gcs, des.LocalPath) { - des.Remote = nil - if initial != nil { - initial.Remote = nil - } - } - } - - if des.Gcs != nil || (initial != nil && initial.Gcs != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.LocalPath) { - des.Gcs = nil - if initial != nil { - initial.Gcs = nil - } - } - } - - if des.LocalPath != nil || (initial != nil && initial.LocalPath != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.Gcs) { - des.LocalPath = nil - if initial != nil { - initial.LocalPath = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{} - - cDes.Remote = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(des.Remote, initial.Remote, opts...) - cDes.Gcs = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(des.Gcs, initial.Gcs, opts...) - if dcl.StringCanonicalize(des.LocalPath, initial.LocalPath) || dcl.IsZeroValue(des.LocalPath) { - cDes.LocalPath = initial.LocalPath - } else { - cDes.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, initial.AllowInsecure) || dcl.IsZeroValue(des.AllowInsecure) { - cDes.AllowInsecure = initial.AllowInsecure - } else { - cDes.AllowInsecure = des.AllowInsecure - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Remote = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, des.Remote, nw.Remote) - nw.Gcs = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, des.Gcs, nw.Gcs) - if dcl.StringCanonicalize(des.LocalPath, nw.LocalPath) { - nw.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, nw.AllowInsecure) { - nw.AllowInsecure = des.AllowInsecure - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{} - - if dcl.StringCanonicalize(des.Uri, initial.Uri) || dcl.IsZeroValue(des.Uri) { - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, initial.Sha256Checksum) || dcl.IsZeroValue(des.Sha256Checksum) { - cDes.Sha256Checksum = initial.Sha256Checksum - } else { - cDes.Sha256Checksum = des.Sha256Checksum - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Uri, nw.Uri) { - nw.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, nw.Sha256Checksum) { - nw.Sha256Checksum = des.Sha256Checksum - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{} - - if dcl.StringCanonicalize(des.Bucket, initial.Bucket) || dcl.IsZeroValue(des.Bucket) { - cDes.Bucket = initial.Bucket - } else { - cDes.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, initial.Object) || dcl.IsZeroValue(des.Object) { - cDes.Object = initial.Object - } else { - cDes.Object = des.Object - } - if dcl.IsZeroValue(des.Generation) || (dcl.IsEmptyValueIndirect(des.Generation) && dcl.IsEmptyValueIndirect(initial.Generation)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Generation = initial.Generation - } else { - cDes.Generation = des.Generation - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Bucket, nw.Bucket) { - nw.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, nw.Object) { - nw.Object = des.Object - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Apt != nil || (initial != nil && initial.Apt != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Yum, des.Zypper, des.Goo) { - des.Apt = nil - if initial != nil { - initial.Apt = nil - } - } - } - - if des.Yum != nil || (initial != nil && initial.Yum != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Zypper, des.Goo) { - des.Yum = nil - if initial != nil { - initial.Yum = nil - } - } - } - - if des.Zypper != nil || (initial != nil && initial.Zypper != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Yum, des.Goo) { - des.Zypper = nil - if initial != nil { - initial.Zypper = nil - } - } - } - - if des.Goo != nil || (initial != nil && initial.Goo != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Apt, des.Yum, des.Zypper) { - des.Goo = nil - if initial != nil { - initial.Goo = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{} - - cDes.Apt = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(des.Apt, initial.Apt, opts...) - cDes.Yum = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(des.Yum, initial.Yum, opts...) - cDes.Zypper = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(des.Zypper, initial.Zypper, opts...) - cDes.Goo = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(des.Goo, initial.Goo, opts...) - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Apt = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, des.Apt, nw.Apt) - nw.Yum = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, des.Yum, nw.Yum) - nw.Zypper = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, des.Zypper, nw.Zypper) - nw.Goo = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, des.Goo, nw.Goo) - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{} - - if dcl.IsZeroValue(des.ArchiveType) || (dcl.IsEmptyValueIndirect(des.ArchiveType) && dcl.IsEmptyValueIndirect(initial.ArchiveType)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.ArchiveType = initial.ArchiveType - } else { - cDes.ArchiveType = des.ArchiveType - } - if dcl.StringCanonicalize(des.Uri, initial.Uri) || dcl.IsZeroValue(des.Uri) { - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Distribution, initial.Distribution) || dcl.IsZeroValue(des.Distribution) { - cDes.Distribution = initial.Distribution - } else { - cDes.Distribution = des.Distribution - } - if dcl.StringArrayCanonicalize(des.Components, initial.Components) { - cDes.Components = initial.Components - } else { - cDes.Components = des.Components - } - if dcl.StringCanonicalize(des.GpgKey, initial.GpgKey) || dcl.IsZeroValue(des.GpgKey) { - cDes.GpgKey = initial.GpgKey - } else { - cDes.GpgKey = des.GpgKey - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Uri, nw.Uri) { - nw.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Distribution, nw.Distribution) { - nw.Distribution = des.Distribution - } - if dcl.StringArrayCanonicalize(des.Components, nw.Components) { - nw.Components = des.Components - } - if dcl.StringCanonicalize(des.GpgKey, nw.GpgKey) { - nw.GpgKey = des.GpgKey - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{} - - if dcl.StringCanonicalize(des.Id, initial.Id) || dcl.IsZeroValue(des.Id) { - cDes.Id = initial.Id - } else { - cDes.Id = des.Id - } - if dcl.StringCanonicalize(des.DisplayName, initial.DisplayName) || dcl.IsZeroValue(des.DisplayName) { - cDes.DisplayName = initial.DisplayName - } else { - cDes.DisplayName = des.DisplayName - } - if dcl.StringCanonicalize(des.BaseUrl, initial.BaseUrl) || dcl.IsZeroValue(des.BaseUrl) { - cDes.BaseUrl = initial.BaseUrl - } else { - cDes.BaseUrl = des.BaseUrl - } - if dcl.StringArrayCanonicalize(des.GpgKeys, initial.GpgKeys) { - cDes.GpgKeys = initial.GpgKeys - } else { - cDes.GpgKeys = des.GpgKeys - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Id, nw.Id) { - nw.Id = des.Id - } - if dcl.StringCanonicalize(des.DisplayName, nw.DisplayName) { - nw.DisplayName = des.DisplayName - } - if dcl.StringCanonicalize(des.BaseUrl, nw.BaseUrl) { - nw.BaseUrl = des.BaseUrl - } - if dcl.StringArrayCanonicalize(des.GpgKeys, nw.GpgKeys) { - nw.GpgKeys = des.GpgKeys - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{} - - if dcl.StringCanonicalize(des.Id, initial.Id) || dcl.IsZeroValue(des.Id) { - cDes.Id = initial.Id - } else { - cDes.Id = des.Id - } - if dcl.StringCanonicalize(des.DisplayName, initial.DisplayName) || dcl.IsZeroValue(des.DisplayName) { - cDes.DisplayName = initial.DisplayName - } else { - cDes.DisplayName = des.DisplayName - } - if dcl.StringCanonicalize(des.BaseUrl, initial.BaseUrl) || dcl.IsZeroValue(des.BaseUrl) { - cDes.BaseUrl = initial.BaseUrl - } else { - cDes.BaseUrl = des.BaseUrl - } - if dcl.StringArrayCanonicalize(des.GpgKeys, initial.GpgKeys) { - cDes.GpgKeys = initial.GpgKeys - } else { - cDes.GpgKeys = des.GpgKeys - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Id, nw.Id) { - nw.Id = des.Id - } - if dcl.StringCanonicalize(des.DisplayName, nw.DisplayName) { - nw.DisplayName = des.DisplayName - } - if dcl.StringCanonicalize(des.BaseUrl, nw.BaseUrl) { - nw.BaseUrl = des.BaseUrl - } - if dcl.StringArrayCanonicalize(des.GpgKeys, nw.GpgKeys) { - nw.GpgKeys = des.GpgKeys - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{} - - if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) { - cDes.Name = initial.Name - } else { - cDes.Name = des.Name - } - if dcl.StringCanonicalize(des.Url, initial.Url) || dcl.IsZeroValue(des.Url) { - cDes.Url = initial.Url - } else { - cDes.Url = des.Url - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Name, nw.Name) { - nw.Name = des.Name - } - if dcl.StringCanonicalize(des.Url, nw.Url) { - nw.Url = des.Url - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{} - - cDes.Validate = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(des.Validate, initial.Validate, opts...) - cDes.Enforce = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(des.Enforce, initial.Enforce, opts...) - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Validate = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, des.Validate, nw.Validate) - nw.Enforce = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, des.Enforce, nw.Enforce) - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.File != nil || (initial != nil && initial.File != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Script) { - des.File = nil - if initial != nil { - initial.File = nil - } - } - } - - if des.Script != nil || (initial != nil && initial.Script != nil) { - // Check if anything else is set. - if dcl.AnySet(des.File) { - des.Script = nil - if initial != nil { - initial.Script = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{} - - cDes.File = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(des.File, initial.File, opts...) - if dcl.StringCanonicalize(des.Script, initial.Script) || dcl.IsZeroValue(des.Script) { - cDes.Script = initial.Script - } else { - cDes.Script = des.Script - } - if dcl.StringArrayCanonicalize(des.Args, initial.Args) { - cDes.Args = initial.Args - } else { - cDes.Args = des.Args - } - if dcl.IsZeroValue(des.Interpreter) || (dcl.IsEmptyValueIndirect(des.Interpreter) && dcl.IsEmptyValueIndirect(initial.Interpreter)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Interpreter = initial.Interpreter - } else { - cDes.Interpreter = des.Interpreter - } - if dcl.StringCanonicalize(des.OutputFilePath, initial.OutputFilePath) || dcl.IsZeroValue(des.OutputFilePath) { - cDes.OutputFilePath = initial.OutputFilePath - } else { - cDes.OutputFilePath = des.OutputFilePath - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.File = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, des.File, nw.File) - if dcl.StringCanonicalize(des.Script, nw.Script) { - nw.Script = des.Script - } - if dcl.StringArrayCanonicalize(des.Args, nw.Args) { - nw.Args = des.Args - } - if dcl.StringCanonicalize(des.OutputFilePath, nw.OutputFilePath) { - nw.OutputFilePath = des.OutputFilePath - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Remote != nil || (initial != nil && initial.Remote != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Gcs, des.LocalPath) { - des.Remote = nil - if initial != nil { - initial.Remote = nil - } - } - } - - if des.Gcs != nil || (initial != nil && initial.Gcs != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.LocalPath) { - des.Gcs = nil - if initial != nil { - initial.Gcs = nil - } - } - } - - if des.LocalPath != nil || (initial != nil && initial.LocalPath != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.Gcs) { - des.LocalPath = nil - if initial != nil { - initial.LocalPath = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{} - - cDes.Remote = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(des.Remote, initial.Remote, opts...) - cDes.Gcs = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(des.Gcs, initial.Gcs, opts...) - if dcl.StringCanonicalize(des.LocalPath, initial.LocalPath) || dcl.IsZeroValue(des.LocalPath) { - cDes.LocalPath = initial.LocalPath - } else { - cDes.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, initial.AllowInsecure) || dcl.IsZeroValue(des.AllowInsecure) { - cDes.AllowInsecure = initial.AllowInsecure - } else { - cDes.AllowInsecure = des.AllowInsecure - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Remote = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, des.Remote, nw.Remote) - nw.Gcs = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, des.Gcs, nw.Gcs) - if dcl.StringCanonicalize(des.LocalPath, nw.LocalPath) { - nw.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, nw.AllowInsecure) { - nw.AllowInsecure = des.AllowInsecure - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{} - - if dcl.StringCanonicalize(des.Uri, initial.Uri) || dcl.IsZeroValue(des.Uri) { - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, initial.Sha256Checksum) || dcl.IsZeroValue(des.Sha256Checksum) { - cDes.Sha256Checksum = initial.Sha256Checksum - } else { - cDes.Sha256Checksum = des.Sha256Checksum - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Uri, nw.Uri) { - nw.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, nw.Sha256Checksum) { - nw.Sha256Checksum = des.Sha256Checksum - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{} - - if dcl.StringCanonicalize(des.Bucket, initial.Bucket) || dcl.IsZeroValue(des.Bucket) { - cDes.Bucket = initial.Bucket - } else { - cDes.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, initial.Object) || dcl.IsZeroValue(des.Object) { - cDes.Object = initial.Object - } else { - cDes.Object = des.Object - } - if dcl.IsZeroValue(des.Generation) || (dcl.IsEmptyValueIndirect(des.Generation) && dcl.IsEmptyValueIndirect(initial.Generation)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Generation = initial.Generation - } else { - cDes.Generation = des.Generation - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Bucket, nw.Bucket) { - nw.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, nw.Object) { - nw.Object = des.Object - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.File != nil || (initial != nil && initial.File != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Script) { - des.File = nil - if initial != nil { - initial.File = nil - } - } - } - - if des.Script != nil || (initial != nil && initial.Script != nil) { - // Check if anything else is set. - if dcl.AnySet(des.File) { - des.Script = nil - if initial != nil { - initial.Script = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{} - - cDes.File = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(des.File, initial.File, opts...) - if dcl.StringCanonicalize(des.Script, initial.Script) || dcl.IsZeroValue(des.Script) { - cDes.Script = initial.Script - } else { - cDes.Script = des.Script - } - if dcl.StringArrayCanonicalize(des.Args, initial.Args) { - cDes.Args = initial.Args - } else { - cDes.Args = des.Args - } - if dcl.IsZeroValue(des.Interpreter) || (dcl.IsEmptyValueIndirect(des.Interpreter) && dcl.IsEmptyValueIndirect(initial.Interpreter)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Interpreter = initial.Interpreter - } else { - cDes.Interpreter = des.Interpreter - } - if dcl.StringCanonicalize(des.OutputFilePath, initial.OutputFilePath) || dcl.IsZeroValue(des.OutputFilePath) { - cDes.OutputFilePath = initial.OutputFilePath - } else { - cDes.OutputFilePath = des.OutputFilePath - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.File = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, des.File, nw.File) - if dcl.StringCanonicalize(des.Script, nw.Script) { - nw.Script = des.Script - } - if dcl.StringArrayCanonicalize(des.Args, nw.Args) { - nw.Args = des.Args - } - if dcl.StringCanonicalize(des.OutputFilePath, nw.OutputFilePath) { - nw.OutputFilePath = des.OutputFilePath - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Remote != nil || (initial != nil && initial.Remote != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Gcs, des.LocalPath) { - des.Remote = nil - if initial != nil { - initial.Remote = nil - } - } - } - - if des.Gcs != nil || (initial != nil && initial.Gcs != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.LocalPath) { - des.Gcs = nil - if initial != nil { - initial.Gcs = nil - } - } - } - - if des.LocalPath != nil || (initial != nil && initial.LocalPath != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.Gcs) { - des.LocalPath = nil - if initial != nil { - initial.LocalPath = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{} - - cDes.Remote = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(des.Remote, initial.Remote, opts...) - cDes.Gcs = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(des.Gcs, initial.Gcs, opts...) - if dcl.StringCanonicalize(des.LocalPath, initial.LocalPath) || dcl.IsZeroValue(des.LocalPath) { - cDes.LocalPath = initial.LocalPath - } else { - cDes.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, initial.AllowInsecure) || dcl.IsZeroValue(des.AllowInsecure) { - cDes.AllowInsecure = initial.AllowInsecure - } else { - cDes.AllowInsecure = des.AllowInsecure - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Remote = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, des.Remote, nw.Remote) - nw.Gcs = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, des.Gcs, nw.Gcs) - if dcl.StringCanonicalize(des.LocalPath, nw.LocalPath) { - nw.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, nw.AllowInsecure) { - nw.AllowInsecure = des.AllowInsecure - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{} - - if dcl.StringCanonicalize(des.Uri, initial.Uri) || dcl.IsZeroValue(des.Uri) { - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, initial.Sha256Checksum) || dcl.IsZeroValue(des.Sha256Checksum) { - cDes.Sha256Checksum = initial.Sha256Checksum - } else { - cDes.Sha256Checksum = des.Sha256Checksum - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Uri, nw.Uri) { - nw.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, nw.Sha256Checksum) { - nw.Sha256Checksum = des.Sha256Checksum - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{} - - if dcl.StringCanonicalize(des.Bucket, initial.Bucket) || dcl.IsZeroValue(des.Bucket) { - cDes.Bucket = initial.Bucket - } else { - cDes.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, initial.Object) || dcl.IsZeroValue(des.Object) { - cDes.Object = initial.Object - } else { - cDes.Object = des.Object - } - if dcl.IsZeroValue(des.Generation) || (dcl.IsEmptyValueIndirect(des.Generation) && dcl.IsEmptyValueIndirect(initial.Generation)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Generation = initial.Generation - } else { - cDes.Generation = des.Generation - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Bucket, nw.Bucket) { - nw.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, nw.Object) { - nw.Object = des.Object - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.File != nil || (initial != nil && initial.File != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Content) { - des.File = nil - if initial != nil { - initial.File = nil - } - } - } - - if des.Content != nil || (initial != nil && initial.Content != nil) { - // Check if anything else is set. - if dcl.AnySet(des.File) { - des.Content = nil - if initial != nil { - initial.Content = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{} - - cDes.File = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(des.File, initial.File, opts...) - if dcl.StringCanonicalize(des.Content, initial.Content) || dcl.IsZeroValue(des.Content) { - cDes.Content = initial.Content - } else { - cDes.Content = des.Content - } - if dcl.StringCanonicalize(des.Path, initial.Path) || dcl.IsZeroValue(des.Path) { - cDes.Path = initial.Path - } else { - cDes.Path = des.Path - } - if dcl.IsZeroValue(des.State) || (dcl.IsEmptyValueIndirect(des.State) && dcl.IsEmptyValueIndirect(initial.State)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.State = initial.State - } else { - cDes.State = des.State - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.File = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, des.File, nw.File) - if dcl.StringCanonicalize(des.Content, nw.Content) { - nw.Content = des.Content - } - if dcl.StringCanonicalize(des.Path, nw.Path) { - nw.Path = des.Path - } - if dcl.StringCanonicalize(des.Permissions, nw.Permissions) { - nw.Permissions = des.Permissions - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Remote != nil || (initial != nil && initial.Remote != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Gcs, des.LocalPath) { - des.Remote = nil - if initial != nil { - initial.Remote = nil - } - } - } - - if des.Gcs != nil || (initial != nil && initial.Gcs != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.LocalPath) { - des.Gcs = nil - if initial != nil { - initial.Gcs = nil - } - } - } - - if des.LocalPath != nil || (initial != nil && initial.LocalPath != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Remote, des.Gcs) { - des.LocalPath = nil - if initial != nil { - initial.LocalPath = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{} - - cDes.Remote = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(des.Remote, initial.Remote, opts...) - cDes.Gcs = canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(des.Gcs, initial.Gcs, opts...) - if dcl.StringCanonicalize(des.LocalPath, initial.LocalPath) || dcl.IsZeroValue(des.LocalPath) { - cDes.LocalPath = initial.LocalPath - } else { - cDes.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, initial.AllowInsecure) || dcl.IsZeroValue(des.AllowInsecure) { - cDes.AllowInsecure = initial.AllowInsecure - } else { - cDes.AllowInsecure = des.AllowInsecure - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.Remote = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, des.Remote, nw.Remote) - nw.Gcs = canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, des.Gcs, nw.Gcs) - if dcl.StringCanonicalize(des.LocalPath, nw.LocalPath) { - nw.LocalPath = des.LocalPath - } - if dcl.BoolCanonicalize(des.AllowInsecure, nw.AllowInsecure) { - nw.AllowInsecure = des.AllowInsecure - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{} - - if dcl.StringCanonicalize(des.Uri, initial.Uri) || dcl.IsZeroValue(des.Uri) { - cDes.Uri = initial.Uri - } else { - cDes.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, initial.Sha256Checksum) || dcl.IsZeroValue(des.Sha256Checksum) { - cDes.Sha256Checksum = initial.Sha256Checksum - } else { - cDes.Sha256Checksum = des.Sha256Checksum - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Uri, nw.Uri) { - nw.Uri = des.Uri - } - if dcl.StringCanonicalize(des.Sha256Checksum, nw.Sha256Checksum) { - nw.Sha256Checksum = des.Sha256Checksum - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(des, initial *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, opts ...dcl.ApplyOption) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{} - - if dcl.StringCanonicalize(des.Bucket, initial.Bucket) || dcl.IsZeroValue(des.Bucket) { - cDes.Bucket = initial.Bucket - } else { - cDes.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, initial.Object) || dcl.IsZeroValue(des.Object) { - cDes.Object = initial.Object - } else { - cDes.Object = des.Object - } - if dcl.IsZeroValue(des.Generation) || (dcl.IsEmptyValueIndirect(des.Generation) && dcl.IsEmptyValueIndirect(initial.Generation)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Generation = initial.Generation - } else { - cDes.Generation = des.Generation - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSlice(des, initial []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, opts ...dcl.ApplyOption) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c *Client, des, nw *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.Bucket, nw.Bucket) { - nw.Bucket = des.Bucket - } - if dcl.StringCanonicalize(des.Object, nw.Object) { - nw.Object = des.Object - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSet(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSlice(c *Client, des, nw []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentInstanceFilter(des, initial *OSPolicyAssignmentInstanceFilter, opts ...dcl.ApplyOption) *OSPolicyAssignmentInstanceFilter { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentInstanceFilter{} - - if dcl.BoolCanonicalize(des.All, initial.All) || dcl.IsZeroValue(des.All) { - cDes.All = initial.All - } else { - cDes.All = des.All - } - cDes.InclusionLabels = canonicalizeOSPolicyAssignmentInstanceFilterInclusionLabelsSlice(des.InclusionLabels, initial.InclusionLabels, opts...) - cDes.ExclusionLabels = canonicalizeOSPolicyAssignmentInstanceFilterExclusionLabelsSlice(des.ExclusionLabels, initial.ExclusionLabels, opts...) - cDes.Inventories = canonicalizeOSPolicyAssignmentInstanceFilterInventoriesSlice(des.Inventories, initial.Inventories, opts...) - - return cDes -} - -func canonicalizeOSPolicyAssignmentInstanceFilterSlice(des, initial []OSPolicyAssignmentInstanceFilter, opts ...dcl.ApplyOption) []OSPolicyAssignmentInstanceFilter { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentInstanceFilter, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentInstanceFilter(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentInstanceFilter, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentInstanceFilter(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilter(c *Client, des, nw *OSPolicyAssignmentInstanceFilter) *OSPolicyAssignmentInstanceFilter { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentInstanceFilter while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.BoolCanonicalize(des.All, nw.All) { - nw.All = des.All - } - nw.InclusionLabels = canonicalizeNewOSPolicyAssignmentInstanceFilterInclusionLabelsSlice(c, des.InclusionLabels, nw.InclusionLabels) - nw.ExclusionLabels = canonicalizeNewOSPolicyAssignmentInstanceFilterExclusionLabelsSlice(c, des.ExclusionLabels, nw.ExclusionLabels) - nw.Inventories = canonicalizeNewOSPolicyAssignmentInstanceFilterInventoriesSlice(c, des.Inventories, nw.Inventories) - - return nw -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterSet(c *Client, des, nw []OSPolicyAssignmentInstanceFilter) []OSPolicyAssignmentInstanceFilter { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentInstanceFilter - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentInstanceFilterNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentInstanceFilter(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterSlice(c *Client, des, nw []OSPolicyAssignmentInstanceFilter) []OSPolicyAssignmentInstanceFilter { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentInstanceFilter - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentInstanceFilter(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentInstanceFilterInclusionLabels(des, initial *OSPolicyAssignmentInstanceFilterInclusionLabels, opts ...dcl.ApplyOption) *OSPolicyAssignmentInstanceFilterInclusionLabels { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentInstanceFilterInclusionLabels{} - - if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Labels = initial.Labels - } else { - cDes.Labels = des.Labels - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentInstanceFilterInclusionLabelsSlice(des, initial []OSPolicyAssignmentInstanceFilterInclusionLabels, opts ...dcl.ApplyOption) []OSPolicyAssignmentInstanceFilterInclusionLabels { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentInstanceFilterInclusionLabels, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentInstanceFilterInclusionLabels(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentInstanceFilterInclusionLabels, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentInstanceFilterInclusionLabels(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterInclusionLabels(c *Client, des, nw *OSPolicyAssignmentInstanceFilterInclusionLabels) *OSPolicyAssignmentInstanceFilterInclusionLabels { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentInstanceFilterInclusionLabels while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterInclusionLabelsSet(c *Client, des, nw []OSPolicyAssignmentInstanceFilterInclusionLabels) []OSPolicyAssignmentInstanceFilterInclusionLabels { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentInstanceFilterInclusionLabels - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentInstanceFilterInclusionLabelsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentInstanceFilterInclusionLabels(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterInclusionLabelsSlice(c *Client, des, nw []OSPolicyAssignmentInstanceFilterInclusionLabels) []OSPolicyAssignmentInstanceFilterInclusionLabels { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentInstanceFilterInclusionLabels - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentInstanceFilterInclusionLabels(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentInstanceFilterExclusionLabels(des, initial *OSPolicyAssignmentInstanceFilterExclusionLabels, opts ...dcl.ApplyOption) *OSPolicyAssignmentInstanceFilterExclusionLabels { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentInstanceFilterExclusionLabels{} - - if dcl.IsZeroValue(des.Labels) || (dcl.IsEmptyValueIndirect(des.Labels) && dcl.IsEmptyValueIndirect(initial.Labels)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Labels = initial.Labels - } else { - cDes.Labels = des.Labels - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentInstanceFilterExclusionLabelsSlice(des, initial []OSPolicyAssignmentInstanceFilterExclusionLabels, opts ...dcl.ApplyOption) []OSPolicyAssignmentInstanceFilterExclusionLabels { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentInstanceFilterExclusionLabels, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentInstanceFilterExclusionLabels(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentInstanceFilterExclusionLabels, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentInstanceFilterExclusionLabels(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterExclusionLabels(c *Client, des, nw *OSPolicyAssignmentInstanceFilterExclusionLabels) *OSPolicyAssignmentInstanceFilterExclusionLabels { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentInstanceFilterExclusionLabels while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterExclusionLabelsSet(c *Client, des, nw []OSPolicyAssignmentInstanceFilterExclusionLabels) []OSPolicyAssignmentInstanceFilterExclusionLabels { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentInstanceFilterExclusionLabels - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentInstanceFilterExclusionLabelsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentInstanceFilterExclusionLabels(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterExclusionLabelsSlice(c *Client, des, nw []OSPolicyAssignmentInstanceFilterExclusionLabels) []OSPolicyAssignmentInstanceFilterExclusionLabels { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentInstanceFilterExclusionLabels - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentInstanceFilterExclusionLabels(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentInstanceFilterInventories(des, initial *OSPolicyAssignmentInstanceFilterInventories, opts ...dcl.ApplyOption) *OSPolicyAssignmentInstanceFilterInventories { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentInstanceFilterInventories{} - - if dcl.StringCanonicalize(des.OSShortName, initial.OSShortName) || dcl.IsZeroValue(des.OSShortName) { - cDes.OSShortName = initial.OSShortName - } else { - cDes.OSShortName = des.OSShortName - } - if dcl.StringCanonicalize(des.OSVersion, initial.OSVersion) || dcl.IsZeroValue(des.OSVersion) { - cDes.OSVersion = initial.OSVersion - } else { - cDes.OSVersion = des.OSVersion - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentInstanceFilterInventoriesSlice(des, initial []OSPolicyAssignmentInstanceFilterInventories, opts ...dcl.ApplyOption) []OSPolicyAssignmentInstanceFilterInventories { - if des == nil { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentInstanceFilterInventories, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentInstanceFilterInventories(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentInstanceFilterInventories, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentInstanceFilterInventories(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterInventories(c *Client, des, nw *OSPolicyAssignmentInstanceFilterInventories) *OSPolicyAssignmentInstanceFilterInventories { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentInstanceFilterInventories while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - if dcl.StringCanonicalize(des.OSShortName, nw.OSShortName) { - nw.OSShortName = des.OSShortName - } - if dcl.StringCanonicalize(des.OSVersion, nw.OSVersion) { - nw.OSVersion = des.OSVersion - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterInventoriesSet(c *Client, des, nw []OSPolicyAssignmentInstanceFilterInventories) []OSPolicyAssignmentInstanceFilterInventories { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentInstanceFilterInventories - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentInstanceFilterInventoriesNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentInstanceFilterInventories(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentInstanceFilterInventoriesSlice(c *Client, des, nw []OSPolicyAssignmentInstanceFilterInventories) []OSPolicyAssignmentInstanceFilterInventories { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentInstanceFilterInventories - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentInstanceFilterInventories(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentRollout(des, initial *OSPolicyAssignmentRollout, opts ...dcl.ApplyOption) *OSPolicyAssignmentRollout { - if des == nil { - return initial - } - if des.empty { - return des - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentRollout{} - - cDes.DisruptionBudget = canonicalizeOSPolicyAssignmentRolloutDisruptionBudget(des.DisruptionBudget, initial.DisruptionBudget, opts...) - if canonicalizeOSPolicyAssignmentRolloutMinWaitDuration(des.MinWaitDuration, initial.MinWaitDuration) || dcl.IsZeroValue(des.MinWaitDuration) { - cDes.MinWaitDuration = initial.MinWaitDuration - } else { - cDes.MinWaitDuration = des.MinWaitDuration - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentRolloutSlice(des, initial []OSPolicyAssignmentRollout, opts ...dcl.ApplyOption) []OSPolicyAssignmentRollout { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentRollout, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentRollout(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentRollout, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentRollout(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentRollout(c *Client, des, nw *OSPolicyAssignmentRollout) *OSPolicyAssignmentRollout { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentRollout while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - nw.DisruptionBudget = canonicalizeNewOSPolicyAssignmentRolloutDisruptionBudget(c, des.DisruptionBudget, nw.DisruptionBudget) - if canonicalizeOSPolicyAssignmentRolloutMinWaitDuration(des.MinWaitDuration, nw.MinWaitDuration) { - nw.MinWaitDuration = des.MinWaitDuration - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentRolloutSet(c *Client, des, nw []OSPolicyAssignmentRollout) []OSPolicyAssignmentRollout { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentRollout - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentRolloutNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentRollout(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentRolloutSlice(c *Client, des, nw []OSPolicyAssignmentRollout) []OSPolicyAssignmentRollout { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentRollout - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentRollout(c, &d, &n)) - } - - return items -} - -func canonicalizeOSPolicyAssignmentRolloutDisruptionBudget(des, initial *OSPolicyAssignmentRolloutDisruptionBudget, opts ...dcl.ApplyOption) *OSPolicyAssignmentRolloutDisruptionBudget { - if des == nil { - return initial - } - if des.empty { - return des - } - - if des.Fixed != nil || (initial != nil && initial.Fixed != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Percent) { - des.Fixed = nil - if initial != nil { - initial.Fixed = nil - } - } - } - - if des.Percent != nil || (initial != nil && initial.Percent != nil) { - // Check if anything else is set. - if dcl.AnySet(des.Fixed) { - des.Percent = nil - if initial != nil { - initial.Percent = nil - } - } - } - - if initial == nil { - return des - } - - cDes := &OSPolicyAssignmentRolloutDisruptionBudget{} - - if dcl.IsZeroValue(des.Fixed) || (dcl.IsEmptyValueIndirect(des.Fixed) && dcl.IsEmptyValueIndirect(initial.Fixed)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Fixed = initial.Fixed - } else { - cDes.Fixed = des.Fixed - } - if dcl.IsZeroValue(des.Percent) || (dcl.IsEmptyValueIndirect(des.Percent) && dcl.IsEmptyValueIndirect(initial.Percent)) { - // Desired and initial values are equivalent, so set canonical desired value to initial value. - cDes.Percent = initial.Percent - } else { - cDes.Percent = des.Percent - } - - return cDes -} - -func canonicalizeOSPolicyAssignmentRolloutDisruptionBudgetSlice(des, initial []OSPolicyAssignmentRolloutDisruptionBudget, opts ...dcl.ApplyOption) []OSPolicyAssignmentRolloutDisruptionBudget { - if dcl.IsEmptyValueIndirect(des) { - return initial - } - - if len(des) != len(initial) { - - items := make([]OSPolicyAssignmentRolloutDisruptionBudget, 0, len(des)) - for _, d := range des { - cd := canonicalizeOSPolicyAssignmentRolloutDisruptionBudget(&d, nil, opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - } - - items := make([]OSPolicyAssignmentRolloutDisruptionBudget, 0, len(des)) - for i, d := range des { - cd := canonicalizeOSPolicyAssignmentRolloutDisruptionBudget(&d, &initial[i], opts...) - if cd != nil { - items = append(items, *cd) - } - } - return items - -} - -func canonicalizeNewOSPolicyAssignmentRolloutDisruptionBudget(c *Client, des, nw *OSPolicyAssignmentRolloutDisruptionBudget) *OSPolicyAssignmentRolloutDisruptionBudget { - - if des == nil { - return nw - } - - if nw == nil { - if dcl.IsEmptyValueIndirect(des) { - c.Config.Logger.Info("Found explicitly empty value for OSPolicyAssignmentRolloutDisruptionBudget while comparing non-nil desired to nil actual. Returning desired object.") - return des - } - return nil - } - - return nw -} - -func canonicalizeNewOSPolicyAssignmentRolloutDisruptionBudgetSet(c *Client, des, nw []OSPolicyAssignmentRolloutDisruptionBudget) []OSPolicyAssignmentRolloutDisruptionBudget { - if des == nil { - return nw - } - - // Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw. - var items []OSPolicyAssignmentRolloutDisruptionBudget - for _, d := range des { - matchedIndex := -1 - for i, n := range nw { - if diffs, _ := compareOSPolicyAssignmentRolloutDisruptionBudgetNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 { - matchedIndex = i - break - } - } - if matchedIndex != -1 { - items = append(items, *canonicalizeNewOSPolicyAssignmentRolloutDisruptionBudget(c, &d, &nw[matchedIndex])) - nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...) - } - } - // Also include elements in nw that are not matched in des. - items = append(items, nw...) - - return items -} - -func canonicalizeNewOSPolicyAssignmentRolloutDisruptionBudgetSlice(c *Client, des, nw []OSPolicyAssignmentRolloutDisruptionBudget) []OSPolicyAssignmentRolloutDisruptionBudget { - if des == nil { - return nw - } - - // Lengths are unequal. A diff will occur later, so we shouldn't canonicalize. - // Return the original array. - if len(des) != len(nw) { - return nw - } - - var items []OSPolicyAssignmentRolloutDisruptionBudget - for i, d := range des { - n := nw[i] - items = append(items, *canonicalizeNewOSPolicyAssignmentRolloutDisruptionBudget(c, &d, &n)) - } - - return items -} - -// The differ returns a list of diffs, along with a list of operations that should be taken -// to remedy them. Right now, it does not attempt to consolidate operations - if several -// fields can be fixed with a patch update, it will perform the patch several times. -// Diffs on some fields will be ignored if the `desired` state has an empty (nil) -// value. This empty value indicates that the user does not care about the state for -// the field. Empty fields on the actual object will cause diffs. -// TODO(magic-modules-eng): for efficiency in some resources, add batching. -func diffOSPolicyAssignment(c *Client, desired, actual *OSPolicyAssignment, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) { - if desired == nil || actual == nil { - return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual) - } - - c.Config.Logger.Infof("Diff function called with desired state: %v", desired) - c.Config.Logger.Infof("Diff function called with actual state: %v", actual) - - var fn dcl.FieldName - var newDiffs []*dcl.FieldDiff - // New style diffs. - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.OSPolicies, actual.OSPolicies, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPolicies, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("OsPolicies")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.InstanceFilter, actual.InstanceFilter, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentInstanceFilterNewStyle, EmptyObject: EmptyOSPolicyAssignmentInstanceFilter, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("InstanceFilter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Rollout, actual.Rollout, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentRolloutNewStyle, EmptyObject: EmptyOSPolicyAssignmentRollout, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Rollout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.RevisionId, actual.RevisionId, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RevisionId")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.RevisionCreateTime, actual.RevisionCreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RevisionCreateTime")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Etag, actual.Etag, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Etag")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.RolloutState, actual.RolloutState, dcl.DiffInfo{OutputOnly: true, Type: "EnumType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("RolloutState")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Baseline, actual.Baseline, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Baseline")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Deleted, actual.Deleted, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Deleted")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Reconciling, actual.Reconciling, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Reconciling")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Uid, actual.Uid, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Uid")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Project, actual.Project, dcl.DiffInfo{Type: "ReferenceType", OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Project")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.Location, actual.Location, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Location")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if ds, err := dcl.Diff(desired.SkipAwaitRollout, actual.SkipAwaitRollout, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("SkipAwaitRollout")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - newDiffs = append(newDiffs, ds...) - } - - if len(newDiffs) > 0 { - c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs) - } - return newDiffs, nil -} -func compareOSPolicyAssignmentOSPoliciesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPolicies) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPolicies) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPolicies or *OSPolicyAssignmentOSPolicies", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPolicies) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPolicies) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPolicies", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Id, actual.Id, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Id")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Description, actual.Description, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Description")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Mode, actual.Mode, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Mode")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ResourceGroups, actual.ResourceGroups, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroups, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("ResourceGroups")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AllowNoResourceGroupMatch, actual.AllowNoResourceGroupMatch, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("AllowNoResourceGroupMatch")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroups) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroups) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroups or *OSPolicyAssignmentOSPoliciesResourceGroups", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroups) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroups) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroups", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.InventoryFilters, actual.InventoryFilters, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("InventoryFilters")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Resources, actual.Resources, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResources, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Resources")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters or *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.OSShortName, actual.OSShortName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("OsShortName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OSVersion, actual.OSVersion, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("OsVersion")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResources) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResources) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResources or *OSPolicyAssignmentOSPoliciesResourceGroupsResources", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResources) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResources) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResources", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Id, actual.Id, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Id")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Pkg, actual.Pkg, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Pkg")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Repository, actual.Repository, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Repository")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Exec, actual.Exec, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Exec")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.File, actual.File, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("File")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DesiredState, actual.DesiredState, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("DesiredState")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Apt, actual.Apt, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Apt")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Deb, actual.Deb, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Deb")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Yum, actual.Yum, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Yum")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Zypper, actual.Zypper, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Zypper")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Rpm, actual.Rpm, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Rpm")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Googet, actual.Googet, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Googet")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Msi, actual.Msi, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Msi")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Source, actual.Source, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Source")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PullDeps, actual.PullDeps, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("PullDeps")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Remote, actual.Remote, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Remote")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Gcs, actual.Gcs, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Gcs")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalPath, actual.LocalPath, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("LocalPath")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AllowInsecure, actual.AllowInsecure, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("AllowInsecure")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Sha256Checksum, actual.Sha256Checksum, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Sha256Checksum")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Bucket, actual.Bucket, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Bucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Object, actual.Object, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Object")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Generation, actual.Generation, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Generation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Source, actual.Source, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Source")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.PullDeps, actual.PullDeps, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("PullDeps")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Remote, actual.Remote, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Remote")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Gcs, actual.Gcs, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Gcs")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalPath, actual.LocalPath, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("LocalPath")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AllowInsecure, actual.AllowInsecure, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("AllowInsecure")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Sha256Checksum, actual.Sha256Checksum, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Sha256Checksum")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Bucket, actual.Bucket, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Bucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Object, actual.Object, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Object")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Generation, actual.Generation, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Generation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Source, actual.Source, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Source")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Properties, actual.Properties, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Properties")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Remote, actual.Remote, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Remote")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Gcs, actual.Gcs, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Gcs")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalPath, actual.LocalPath, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("LocalPath")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AllowInsecure, actual.AllowInsecure, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("AllowInsecure")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Sha256Checksum, actual.Sha256Checksum, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Sha256Checksum")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Bucket, actual.Bucket, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Bucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Object, actual.Object, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Object")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Generation, actual.Generation, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Generation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Apt, actual.Apt, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Apt")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Yum, actual.Yum, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Yum")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Zypper, actual.Zypper, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Zypper")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Goo, actual.Goo, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Goo")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.ArchiveType, actual.ArchiveType, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("ArchiveType")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Distribution, actual.Distribution, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Distribution")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Components, actual.Components, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Components")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GpgKey, actual.GpgKey, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("GpgKey")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Id, actual.Id, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Id")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BaseUrl, actual.BaseUrl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("BaseUrl")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GpgKeys, actual.GpgKeys, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("GpgKeys")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Id, actual.Id, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Id")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.DisplayName, actual.DisplayName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("DisplayName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.BaseUrl, actual.BaseUrl, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("BaseUrl")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.GpgKeys, actual.GpgKeys, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("GpgKeys")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Name")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Url, actual.Url, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Url")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Validate, actual.Validate, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Validate")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Enforce, actual.Enforce, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Enforce")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.File, actual.File, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("File")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Script, actual.Script, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Script")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Args")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Interpreter, actual.Interpreter, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Interpreter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OutputFilePath, actual.OutputFilePath, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("OutputFilePath")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Remote, actual.Remote, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Remote")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Gcs, actual.Gcs, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Gcs")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalPath, actual.LocalPath, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("LocalPath")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AllowInsecure, actual.AllowInsecure, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("AllowInsecure")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Sha256Checksum, actual.Sha256Checksum, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Sha256Checksum")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Bucket, actual.Bucket, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Bucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Object, actual.Object, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Object")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Generation, actual.Generation, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Generation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.File, actual.File, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("File")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Script, actual.Script, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Script")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Args, actual.Args, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Args")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Interpreter, actual.Interpreter, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Interpreter")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OutputFilePath, actual.OutputFilePath, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("OutputFilePath")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Remote, actual.Remote, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Remote")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Gcs, actual.Gcs, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Gcs")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalPath, actual.LocalPath, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("LocalPath")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AllowInsecure, actual.AllowInsecure, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("AllowInsecure")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Sha256Checksum, actual.Sha256Checksum, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Sha256Checksum")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Bucket, actual.Bucket, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Bucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Object, actual.Object, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Object")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Generation, actual.Generation, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Generation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.File, actual.File, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("File")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Content, actual.Content, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Content")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Path, actual.Path, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Path")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.State, actual.State, dcl.DiffInfo{Type: "EnumType", OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("State")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Permissions, actual.Permissions, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Permissions")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Remote, actual.Remote, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Remote")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Gcs, actual.Gcs, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsNewStyle, EmptyObject: EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Gcs")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.LocalPath, actual.LocalPath, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("LocalPath")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.AllowInsecure, actual.AllowInsecure, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("AllowInsecure")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Uri, actual.Uri, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Uri")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Sha256Checksum, actual.Sha256Checksum, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Sha256Checksum")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs or *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Bucket, actual.Bucket, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Bucket")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Object, actual.Object, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Object")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Generation, actual.Generation, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Generation")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentInstanceFilterNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentInstanceFilter) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentInstanceFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentInstanceFilter or *OSPolicyAssignmentInstanceFilter", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentInstanceFilter) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentInstanceFilter) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentInstanceFilter", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.All, actual.All, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("All")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.InclusionLabels, actual.InclusionLabels, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentInstanceFilterInclusionLabelsNewStyle, EmptyObject: EmptyOSPolicyAssignmentInstanceFilterInclusionLabels, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("InclusionLabels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.ExclusionLabels, actual.ExclusionLabels, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentInstanceFilterExclusionLabelsNewStyle, EmptyObject: EmptyOSPolicyAssignmentInstanceFilterExclusionLabels, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("ExclusionLabels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Inventories, actual.Inventories, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentInstanceFilterInventoriesNewStyle, EmptyObject: EmptyOSPolicyAssignmentInstanceFilterInventories, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Inventories")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentInstanceFilterInclusionLabelsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentInstanceFilterInclusionLabels) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentInstanceFilterInclusionLabels) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentInstanceFilterInclusionLabels or *OSPolicyAssignmentInstanceFilterInclusionLabels", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentInstanceFilterInclusionLabels) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentInstanceFilterInclusionLabels) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentInstanceFilterInclusionLabels", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentInstanceFilterExclusionLabelsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentInstanceFilterExclusionLabels) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentInstanceFilterExclusionLabels) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentInstanceFilterExclusionLabels or *OSPolicyAssignmentInstanceFilterExclusionLabels", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentInstanceFilterExclusionLabels) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentInstanceFilterExclusionLabels) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentInstanceFilterExclusionLabels", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Labels, actual.Labels, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Labels")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentInstanceFilterInventoriesNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentInstanceFilterInventories) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentInstanceFilterInventories) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentInstanceFilterInventories or *OSPolicyAssignmentInstanceFilterInventories", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentInstanceFilterInventories) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentInstanceFilterInventories) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentInstanceFilterInventories", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.OSShortName, actual.OSShortName, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("OsShortName")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.OSVersion, actual.OSVersion, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("OsVersion")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentRolloutNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentRollout) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentRollout) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentRollout or *OSPolicyAssignmentRollout", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentRollout) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentRollout) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentRollout", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.DisruptionBudget, actual.DisruptionBudget, dcl.DiffInfo{ObjectFunction: compareOSPolicyAssignmentRolloutDisruptionBudgetNewStyle, EmptyObject: EmptyOSPolicyAssignmentRolloutDisruptionBudget, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("DisruptionBudget")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.MinWaitDuration, actual.MinWaitDuration, dcl.DiffInfo{CustomDiff: canonicalizeOSPolicyAssignmentRolloutMinWaitDuration, OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("MinWaitDuration")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -func compareOSPolicyAssignmentRolloutDisruptionBudgetNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) { - var diffs []*dcl.FieldDiff - - desired, ok := d.(*OSPolicyAssignmentRolloutDisruptionBudget) - if !ok { - desiredNotPointer, ok := d.(OSPolicyAssignmentRolloutDisruptionBudget) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentRolloutDisruptionBudget or *OSPolicyAssignmentRolloutDisruptionBudget", d) - } - desired = &desiredNotPointer - } - actual, ok := a.(*OSPolicyAssignmentRolloutDisruptionBudget) - if !ok { - actualNotPointer, ok := a.(OSPolicyAssignmentRolloutDisruptionBudget) - if !ok { - return nil, fmt.Errorf("obj %v is not a OSPolicyAssignmentRolloutDisruptionBudget", a) - } - actual = &actualNotPointer - } - - if ds, err := dcl.Diff(desired.Fixed, actual.Fixed, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Fixed")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - - if ds, err := dcl.Diff(desired.Percent, actual.Percent, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation")}, fn.AddNest("Percent")); len(ds) != 0 || err != nil { - if err != nil { - return nil, err - } - diffs = append(diffs, ds...) - } - return diffs, nil -} - -// urlNormalized returns a copy of the resource struct with values normalized -// for URL substitutions. For instance, it converts long-form self-links to -// short-form so they can be substituted in. -func (r *OSPolicyAssignment) urlNormalized() *OSPolicyAssignment { - normalized := dcl.Copy(*r).(OSPolicyAssignment) - normalized.Name = dcl.SelfLinkToName(r.Name) - normalized.Description = dcl.SelfLinkToName(r.Description) - normalized.RevisionId = dcl.SelfLinkToName(r.RevisionId) - normalized.Etag = dcl.SelfLinkToName(r.Etag) - normalized.Uid = dcl.SelfLinkToName(r.Uid) - normalized.Project = dcl.SelfLinkToName(r.Project) - normalized.Location = dcl.SelfLinkToName(r.Location) - return &normalized -} - -func (r *OSPolicyAssignment) updateURL(userBasePath, updateName string) (string, error) { - nr := r.urlNormalized() - if updateName == "UpdateOSPolicyAssignment" { - fields := map[string]interface{}{ - "project": dcl.ValueOrEmptyString(nr.Project), - "location": dcl.ValueOrEmptyString(nr.Location), - "name": dcl.ValueOrEmptyString(nr.Name), - } - return dcl.URL("projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}", nr.basePath(), userBasePath, fields), nil - - } - - return "", fmt.Errorf("unknown update name: %s", updateName) -} - -// marshal encodes the OSPolicyAssignment resource into JSON for a Create request, and -// performs transformations from the resource schema to the API schema if -// necessary. -func (r *OSPolicyAssignment) marshal(c *Client) ([]byte, error) { - m, err := expandOSPolicyAssignment(c, r) - if err != nil { - return nil, fmt.Errorf("error marshalling OSPolicyAssignment: %w", err) - } - dcl.MoveMapEntry( - m, - []string{"skipAwaitRollout"}, - []string{}, - ) - - return json.Marshal(m) -} - -// unmarshalOSPolicyAssignment decodes JSON responses into the OSPolicyAssignment resource schema. -func unmarshalOSPolicyAssignment(b []byte, c *Client, res *OSPolicyAssignment) (*OSPolicyAssignment, error) { - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return unmarshalMapOSPolicyAssignment(m, c, res) -} - -func unmarshalMapOSPolicyAssignment(m map[string]interface{}, c *Client, res *OSPolicyAssignment) (*OSPolicyAssignment, error) { - - flattened := flattenOSPolicyAssignment(c, m, res) - if flattened == nil { - return nil, fmt.Errorf("attempted to flatten empty json object") - } - return flattened, nil -} - -// expandOSPolicyAssignment expands OSPolicyAssignment into a JSON request object. -func expandOSPolicyAssignment(c *Client, f *OSPolicyAssignment) (map[string]interface{}, error) { - m := make(map[string]interface{}) - res := f - _ = res - if v, err := dcl.DeriveField("projects/%s/locations/%s/osPolicyAssignments/%s", f.Name, dcl.SelfLinkToName(f.Project), dcl.SelfLinkToName(f.Location), dcl.SelfLinkToName(f.Name)); err != nil { - return nil, fmt.Errorf("error expanding Name into name: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Description; dcl.ValueShouldBeSent(v) { - m["description"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesSlice(c, f.OSPolicies, res); err != nil { - return nil, fmt.Errorf("error expanding OSPolicies into osPolicies: %w", err) - } else if v != nil { - m["osPolicies"] = v - } - if v, err := expandOSPolicyAssignmentInstanceFilter(c, f.InstanceFilter, res); err != nil { - return nil, fmt.Errorf("error expanding InstanceFilter into instanceFilter: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["instanceFilter"] = v - } - if v, err := expandOSPolicyAssignmentRollout(c, f.Rollout, res); err != nil { - return nil, fmt.Errorf("error expanding Rollout into rollout: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["rollout"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Project into project: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["project"] = v - } - if v, err := dcl.EmptyValue(); err != nil { - return nil, fmt.Errorf("error expanding Location into location: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["location"] = v - } - if v := f.SkipAwaitRollout; dcl.ValueShouldBeSent(v) { - m["skipAwaitRollout"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignment flattens OSPolicyAssignment from a JSON request object into the -// OSPolicyAssignment type. -func flattenOSPolicyAssignment(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignment { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - if len(m) == 0 { - return nil - } - - resultRes := &OSPolicyAssignment{} - resultRes.Name = dcl.FlattenString(m["name"]) - resultRes.Description = dcl.FlattenString(m["description"]) - resultRes.OSPolicies = flattenOSPolicyAssignmentOSPoliciesSlice(c, m["osPolicies"], res) - resultRes.InstanceFilter = flattenOSPolicyAssignmentInstanceFilter(c, m["instanceFilter"], res) - resultRes.Rollout = flattenOSPolicyAssignmentRollout(c, m["rollout"], res) - resultRes.RevisionId = dcl.FlattenString(m["revisionId"]) - resultRes.RevisionCreateTime = dcl.FlattenString(m["revisionCreateTime"]) - resultRes.Etag = dcl.FlattenString(m["etag"]) - resultRes.RolloutState = flattenOSPolicyAssignmentRolloutStateEnum(m["rolloutState"]) - resultRes.Baseline = dcl.FlattenBool(m["baseline"]) - resultRes.Deleted = dcl.FlattenBool(m["deleted"]) - resultRes.Reconciling = dcl.FlattenBool(m["reconciling"]) - resultRes.Uid = dcl.FlattenString(m["uid"]) - resultRes.Project = dcl.FlattenString(m["project"]) - resultRes.Location = dcl.FlattenString(m["location"]) - resultRes.SkipAwaitRollout = dcl.FlattenBool(m["skipAwaitRollout"]) - - return resultRes -} - -// expandOSPolicyAssignmentOSPoliciesMap expands the contents of OSPolicyAssignmentOSPolicies into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesMap(c *Client, f map[string]OSPolicyAssignmentOSPolicies, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPolicies(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesSlice expands the contents of OSPolicyAssignmentOSPolicies into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesSlice(c *Client, f []OSPolicyAssignmentOSPolicies, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPolicies(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesMap flattens the contents of OSPolicyAssignmentOSPolicies from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPolicies { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPolicies{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPolicies{} - } - - items := make(map[string]OSPolicyAssignmentOSPolicies) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPolicies(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesSlice flattens the contents of OSPolicyAssignmentOSPolicies from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPolicies { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPolicies{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPolicies{} - } - - items := make([]OSPolicyAssignmentOSPolicies, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPolicies(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPolicies expands an instance of OSPolicyAssignmentOSPolicies into a JSON -// request object. -func expandOSPolicyAssignmentOSPolicies(c *Client, f *OSPolicyAssignmentOSPolicies, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Id; !dcl.IsEmptyValueIndirect(v) { - m["id"] = v - } - if v := f.Description; !dcl.IsEmptyValueIndirect(v) { - m["description"] = v - } - if v := f.Mode; !dcl.IsEmptyValueIndirect(v) { - m["mode"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsSlice(c, f.ResourceGroups, res); err != nil { - return nil, fmt.Errorf("error expanding ResourceGroups into resourceGroups: %w", err) - } else if v != nil { - m["resourceGroups"] = v - } - if v := f.AllowNoResourceGroupMatch; !dcl.IsEmptyValueIndirect(v) { - m["allowNoResourceGroupMatch"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPolicies flattens an instance of OSPolicyAssignmentOSPolicies from a JSON -// response object. -func flattenOSPolicyAssignmentOSPolicies(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPolicies { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPolicies{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPolicies - } - r.Id = dcl.FlattenString(m["id"]) - r.Description = dcl.FlattenString(m["description"]) - r.Mode = flattenOSPolicyAssignmentOSPoliciesModeEnum(m["mode"]) - r.ResourceGroups = flattenOSPolicyAssignmentOSPoliciesResourceGroupsSlice(c, m["resourceGroups"], res) - r.AllowNoResourceGroupMatch = dcl.FlattenBool(m["allowNoResourceGroupMatch"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroups into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroups, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroups(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroups into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroups, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroups(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroups from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroups { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroups{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroups{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroups) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroups(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroups from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroups { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroups{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroups{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroups, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroups(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroups expands an instance of OSPolicyAssignmentOSPoliciesResourceGroups into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroups(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroups, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice(c, f.InventoryFilters, res); err != nil { - return nil, fmt.Errorf("error expanding InventoryFilters into inventoryFilters: %w", err) - } else if v != nil { - m["inventoryFilters"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice(c, f.Resources, res); err != nil { - return nil, fmt.Errorf("error expanding Resources into resources: %w", err) - } else if v != nil { - m["resources"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroups flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroups from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroups(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroups { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroups{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroups - } - r.InventoryFilters = flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice(c, m["inventoryFilters"], res) - r.Resources = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice(c, m["resources"], res) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.OSShortName; !dcl.IsEmptyValueIndirect(v) { - m["osShortName"] = v - } - if v := f.OSVersion; !dcl.IsEmptyValueIndirect(v) { - m["osVersion"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters - } - r.OSShortName = dcl.FlattenString(m["osShortName"]) - r.OSVersion = dcl.FlattenString(m["osVersion"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResources into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResources, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResources(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResources into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResources, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResources(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResources from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResources { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResources{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResources{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResources) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResources(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResources from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResources { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResources{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResources{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResources(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResources expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResources into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResources(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResources, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Id; !dcl.IsEmptyValueIndirect(v) { - m["id"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, f.Pkg, res); err != nil { - return nil, fmt.Errorf("error expanding Pkg into pkg: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["pkg"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, f.Repository, res); err != nil { - return nil, fmt.Errorf("error expanding Repository into repository: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["repository"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, f.Exec, res); err != nil { - return nil, fmt.Errorf("error expanding Exec into exec: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["exec"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, f.File, res); err != nil { - return nil, fmt.Errorf("error expanding File into file: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["file"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResources flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResources from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResources(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResources { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResources{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResources - } - r.Id = dcl.FlattenString(m["id"]) - r.Pkg = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, m["pkg"], res) - r.Repository = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, m["repository"], res) - r.Exec = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, m["exec"], res) - r.File = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, m["file"], res) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.DesiredState; !dcl.IsEmptyValueIndirect(v) { - m["desiredState"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, f.Apt, res); err != nil { - return nil, fmt.Errorf("error expanding Apt into apt: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["apt"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, f.Deb, res); err != nil { - return nil, fmt.Errorf("error expanding Deb into deb: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["deb"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, f.Yum, res); err != nil { - return nil, fmt.Errorf("error expanding Yum into yum: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["yum"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, f.Zypper, res); err != nil { - return nil, fmt.Errorf("error expanding Zypper into zypper: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["zypper"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, f.Rpm, res); err != nil { - return nil, fmt.Errorf("error expanding Rpm into rpm: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["rpm"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, f.Googet, res); err != nil { - return nil, fmt.Errorf("error expanding Googet into googet: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["googet"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, f.Msi, res); err != nil { - return nil, fmt.Errorf("error expanding Msi into msi: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["msi"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - } - r.DesiredState = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(m["desiredState"]) - r.Apt = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, m["apt"], res) - r.Deb = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, m["deb"], res) - r.Yum = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, m["yum"], res) - r.Zypper = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, m["zypper"], res) - r.Rpm = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, m["rpm"], res) - r.Googet = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, m["googet"], res) - r.Msi = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, m["msi"], res) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Name; !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - } - r.Name = dcl.FlattenString(m["name"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, f.Source, res); err != nil { - return nil, fmt.Errorf("error expanding Source into source: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["source"] = v - } - if v := f.PullDeps; !dcl.IsEmptyValueIndirect(v) { - m["pullDeps"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - } - r.Source = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, m["source"], res) - r.PullDeps = dcl.FlattenBool(m["pullDeps"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, f.Remote, res); err != nil { - return nil, fmt.Errorf("error expanding Remote into remote: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["remote"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, f.Gcs, res); err != nil { - return nil, fmt.Errorf("error expanding Gcs into gcs: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gcs"] = v - } - if v := f.LocalPath; !dcl.IsEmptyValueIndirect(v) { - m["localPath"] = v - } - if v := f.AllowInsecure; !dcl.IsEmptyValueIndirect(v) { - m["allowInsecure"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - } - r.Remote = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, m["remote"], res) - r.Gcs = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, m["gcs"], res) - r.LocalPath = dcl.FlattenString(m["localPath"]) - r.AllowInsecure = dcl.FlattenBool(m["allowInsecure"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - if v := f.Sha256Checksum; !dcl.IsEmptyValueIndirect(v) { - m["sha256Checksum"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - } - r.Uri = dcl.FlattenString(m["uri"]) - r.Sha256Checksum = dcl.FlattenString(m["sha256Checksum"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Bucket; !dcl.IsEmptyValueIndirect(v) { - m["bucket"] = v - } - if v := f.Object; !dcl.IsEmptyValueIndirect(v) { - m["object"] = v - } - if v := f.Generation; !dcl.IsEmptyValueIndirect(v) { - m["generation"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - } - r.Bucket = dcl.FlattenString(m["bucket"]) - r.Object = dcl.FlattenString(m["object"]) - r.Generation = dcl.FlattenInteger(m["generation"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Name; !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - } - r.Name = dcl.FlattenString(m["name"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Name; !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - } - r.Name = dcl.FlattenString(m["name"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, f.Source, res); err != nil { - return nil, fmt.Errorf("error expanding Source into source: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["source"] = v - } - if v := f.PullDeps; !dcl.IsEmptyValueIndirect(v) { - m["pullDeps"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - } - r.Source = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, m["source"], res) - r.PullDeps = dcl.FlattenBool(m["pullDeps"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, f.Remote, res); err != nil { - return nil, fmt.Errorf("error expanding Remote into remote: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["remote"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, f.Gcs, res); err != nil { - return nil, fmt.Errorf("error expanding Gcs into gcs: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gcs"] = v - } - if v := f.LocalPath; !dcl.IsEmptyValueIndirect(v) { - m["localPath"] = v - } - if v := f.AllowInsecure; !dcl.IsEmptyValueIndirect(v) { - m["allowInsecure"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - } - r.Remote = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, m["remote"], res) - r.Gcs = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, m["gcs"], res) - r.LocalPath = dcl.FlattenString(m["localPath"]) - r.AllowInsecure = dcl.FlattenBool(m["allowInsecure"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - if v := f.Sha256Checksum; !dcl.IsEmptyValueIndirect(v) { - m["sha256Checksum"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - } - r.Uri = dcl.FlattenString(m["uri"]) - r.Sha256Checksum = dcl.FlattenString(m["sha256Checksum"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Bucket; !dcl.IsEmptyValueIndirect(v) { - m["bucket"] = v - } - if v := f.Object; !dcl.IsEmptyValueIndirect(v) { - m["object"] = v - } - if v := f.Generation; !dcl.IsEmptyValueIndirect(v) { - m["generation"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - } - r.Bucket = dcl.FlattenString(m["bucket"]) - r.Object = dcl.FlattenString(m["object"]) - r.Generation = dcl.FlattenInteger(m["generation"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Name; !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - } - r.Name = dcl.FlattenString(m["name"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, f.Source, res); err != nil { - return nil, fmt.Errorf("error expanding Source into source: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["source"] = v - } - if v := f.Properties; v != nil { - m["properties"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - } - r.Source = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, m["source"], res) - r.Properties = dcl.FlattenStringSlice(m["properties"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, f.Remote, res); err != nil { - return nil, fmt.Errorf("error expanding Remote into remote: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["remote"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, f.Gcs, res); err != nil { - return nil, fmt.Errorf("error expanding Gcs into gcs: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gcs"] = v - } - if v := f.LocalPath; !dcl.IsEmptyValueIndirect(v) { - m["localPath"] = v - } - if v := f.AllowInsecure; !dcl.IsEmptyValueIndirect(v) { - m["allowInsecure"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - } - r.Remote = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, m["remote"], res) - r.Gcs = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, m["gcs"], res) - r.LocalPath = dcl.FlattenString(m["localPath"]) - r.AllowInsecure = dcl.FlattenBool(m["allowInsecure"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - if v := f.Sha256Checksum; !dcl.IsEmptyValueIndirect(v) { - m["sha256Checksum"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - } - r.Uri = dcl.FlattenString(m["uri"]) - r.Sha256Checksum = dcl.FlattenString(m["sha256Checksum"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Bucket; !dcl.IsEmptyValueIndirect(v) { - m["bucket"] = v - } - if v := f.Object; !dcl.IsEmptyValueIndirect(v) { - m["object"] = v - } - if v := f.Generation; !dcl.IsEmptyValueIndirect(v) { - m["generation"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - } - r.Bucket = dcl.FlattenString(m["bucket"]) - r.Object = dcl.FlattenString(m["object"]) - r.Generation = dcl.FlattenInteger(m["generation"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, f.Apt, res); err != nil { - return nil, fmt.Errorf("error expanding Apt into apt: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["apt"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, f.Yum, res); err != nil { - return nil, fmt.Errorf("error expanding Yum into yum: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["yum"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, f.Zypper, res); err != nil { - return nil, fmt.Errorf("error expanding Zypper into zypper: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["zypper"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, f.Goo, res); err != nil { - return nil, fmt.Errorf("error expanding Goo into goo: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["goo"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - } - r.Apt = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, m["apt"], res) - r.Yum = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, m["yum"], res) - r.Zypper = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, m["zypper"], res) - r.Goo = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, m["goo"], res) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.ArchiveType; !dcl.IsEmptyValueIndirect(v) { - m["archiveType"] = v - } - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - if v := f.Distribution; !dcl.IsEmptyValueIndirect(v) { - m["distribution"] = v - } - if v := f.Components; v != nil { - m["components"] = v - } - if v := f.GpgKey; !dcl.IsEmptyValueIndirect(v) { - m["gpgKey"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - } - r.ArchiveType = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(m["archiveType"]) - r.Uri = dcl.FlattenString(m["uri"]) - r.Distribution = dcl.FlattenString(m["distribution"]) - r.Components = dcl.FlattenStringSlice(m["components"]) - r.GpgKey = dcl.FlattenString(m["gpgKey"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Id; !dcl.IsEmptyValueIndirect(v) { - m["id"] = v - } - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - m["displayName"] = v - } - if v := f.BaseUrl; !dcl.IsEmptyValueIndirect(v) { - m["baseUrl"] = v - } - if v := f.GpgKeys; v != nil { - m["gpgKeys"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - } - r.Id = dcl.FlattenString(m["id"]) - r.DisplayName = dcl.FlattenString(m["displayName"]) - r.BaseUrl = dcl.FlattenString(m["baseUrl"]) - r.GpgKeys = dcl.FlattenStringSlice(m["gpgKeys"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Id; !dcl.IsEmptyValueIndirect(v) { - m["id"] = v - } - if v := f.DisplayName; !dcl.IsEmptyValueIndirect(v) { - m["displayName"] = v - } - if v := f.BaseUrl; !dcl.IsEmptyValueIndirect(v) { - m["baseUrl"] = v - } - if v := f.GpgKeys; v != nil { - m["gpgKeys"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - } - r.Id = dcl.FlattenString(m["id"]) - r.DisplayName = dcl.FlattenString(m["displayName"]) - r.BaseUrl = dcl.FlattenString(m["baseUrl"]) - r.GpgKeys = dcl.FlattenStringSlice(m["gpgKeys"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Name; !dcl.IsEmptyValueIndirect(v) { - m["name"] = v - } - if v := f.Url; !dcl.IsEmptyValueIndirect(v) { - m["url"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - } - r.Name = dcl.FlattenString(m["name"]) - r.Url = dcl.FlattenString(m["url"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, f.Validate, res); err != nil { - return nil, fmt.Errorf("error expanding Validate into validate: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["validate"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, f.Enforce, res); err != nil { - return nil, fmt.Errorf("error expanding Enforce into enforce: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["enforce"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - } - r.Validate = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, m["validate"], res) - r.Enforce = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, m["enforce"], res) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, f.File, res); err != nil { - return nil, fmt.Errorf("error expanding File into file: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["file"] = v - } - if v := f.Script; !dcl.IsEmptyValueIndirect(v) { - m["script"] = v - } - if v := f.Args; v != nil { - m["args"] = v - } - if v := f.Interpreter; !dcl.IsEmptyValueIndirect(v) { - m["interpreter"] = v - } - if v := f.OutputFilePath; !dcl.IsEmptyValueIndirect(v) { - m["outputFilePath"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - } - r.File = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, m["file"], res) - r.Script = dcl.FlattenString(m["script"]) - r.Args = dcl.FlattenStringSlice(m["args"]) - r.Interpreter = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(m["interpreter"]) - r.OutputFilePath = dcl.FlattenString(m["outputFilePath"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, f.Remote, res); err != nil { - return nil, fmt.Errorf("error expanding Remote into remote: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["remote"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, f.Gcs, res); err != nil { - return nil, fmt.Errorf("error expanding Gcs into gcs: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gcs"] = v - } - if v := f.LocalPath; !dcl.IsEmptyValueIndirect(v) { - m["localPath"] = v - } - if v := f.AllowInsecure; !dcl.IsEmptyValueIndirect(v) { - m["allowInsecure"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - } - r.Remote = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, m["remote"], res) - r.Gcs = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, m["gcs"], res) - r.LocalPath = dcl.FlattenString(m["localPath"]) - r.AllowInsecure = dcl.FlattenBool(m["allowInsecure"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - if v := f.Sha256Checksum; !dcl.IsEmptyValueIndirect(v) { - m["sha256Checksum"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - } - r.Uri = dcl.FlattenString(m["uri"]) - r.Sha256Checksum = dcl.FlattenString(m["sha256Checksum"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Bucket; !dcl.IsEmptyValueIndirect(v) { - m["bucket"] = v - } - if v := f.Object; !dcl.IsEmptyValueIndirect(v) { - m["object"] = v - } - if v := f.Generation; !dcl.IsEmptyValueIndirect(v) { - m["generation"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - } - r.Bucket = dcl.FlattenString(m["bucket"]) - r.Object = dcl.FlattenString(m["object"]) - r.Generation = dcl.FlattenInteger(m["generation"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, f.File, res); err != nil { - return nil, fmt.Errorf("error expanding File into file: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["file"] = v - } - if v := f.Script; !dcl.IsEmptyValueIndirect(v) { - m["script"] = v - } - if v := f.Args; v != nil { - m["args"] = v - } - if v := f.Interpreter; !dcl.IsEmptyValueIndirect(v) { - m["interpreter"] = v - } - if v := f.OutputFilePath; !dcl.IsEmptyValueIndirect(v) { - m["outputFilePath"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - } - r.File = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, m["file"], res) - r.Script = dcl.FlattenString(m["script"]) - r.Args = dcl.FlattenStringSlice(m["args"]) - r.Interpreter = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(m["interpreter"]) - r.OutputFilePath = dcl.FlattenString(m["outputFilePath"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, f.Remote, res); err != nil { - return nil, fmt.Errorf("error expanding Remote into remote: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["remote"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, f.Gcs, res); err != nil { - return nil, fmt.Errorf("error expanding Gcs into gcs: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gcs"] = v - } - if v := f.LocalPath; !dcl.IsEmptyValueIndirect(v) { - m["localPath"] = v - } - if v := f.AllowInsecure; !dcl.IsEmptyValueIndirect(v) { - m["allowInsecure"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - } - r.Remote = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, m["remote"], res) - r.Gcs = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, m["gcs"], res) - r.LocalPath = dcl.FlattenString(m["localPath"]) - r.AllowInsecure = dcl.FlattenBool(m["allowInsecure"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - if v := f.Sha256Checksum; !dcl.IsEmptyValueIndirect(v) { - m["sha256Checksum"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - } - r.Uri = dcl.FlattenString(m["uri"]) - r.Sha256Checksum = dcl.FlattenString(m["sha256Checksum"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Bucket; !dcl.IsEmptyValueIndirect(v) { - m["bucket"] = v - } - if v := f.Object; !dcl.IsEmptyValueIndirect(v) { - m["object"] = v - } - if v := f.Generation; !dcl.IsEmptyValueIndirect(v) { - m["generation"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - } - r.Bucket = dcl.FlattenString(m["bucket"]) - r.Object = dcl.FlattenString(m["object"]) - r.Generation = dcl.FlattenInteger(m["generation"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, f.File, res); err != nil { - return nil, fmt.Errorf("error expanding File into file: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["file"] = v - } - if v := f.Content; !dcl.IsEmptyValueIndirect(v) { - m["content"] = v - } - if v := f.Path; !dcl.IsEmptyValueIndirect(v) { - m["path"] = v - } - if v := f.State; !dcl.IsEmptyValueIndirect(v) { - m["state"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - } - r.File = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, m["file"], res) - r.Content = dcl.FlattenString(m["content"]) - r.Path = dcl.FlattenString(m["path"]) - r.State = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(m["state"]) - r.Permissions = dcl.FlattenString(m["permissions"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, f.Remote, res); err != nil { - return nil, fmt.Errorf("error expanding Remote into remote: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["remote"] = v - } - if v, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, f.Gcs, res); err != nil { - return nil, fmt.Errorf("error expanding Gcs into gcs: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["gcs"] = v - } - if v := f.LocalPath; !dcl.IsEmptyValueIndirect(v) { - m["localPath"] = v - } - if v := f.AllowInsecure; !dcl.IsEmptyValueIndirect(v) { - m["allowInsecure"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - } - r.Remote = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, m["remote"], res) - r.Gcs = flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, m["gcs"], res) - r.LocalPath = dcl.FlattenString(m["localPath"]) - r.AllowInsecure = dcl.FlattenBool(m["allowInsecure"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Uri; !dcl.IsEmptyValueIndirect(v) { - m["uri"] = v - } - if v := f.Sha256Checksum; !dcl.IsEmptyValueIndirect(v) { - m["sha256Checksum"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - } - r.Uri = dcl.FlattenString(m["uri"]) - r.Sha256Checksum = dcl.FlattenString(m["sha256Checksum"]) - - return r -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsMap expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsMap(c *Client, f map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSlice expands the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSlice(c *Client, f []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs expands an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs into a JSON -// request object. -func expandOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c *Client, f *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Bucket; !dcl.IsEmptyValueIndirect(v) { - m["bucket"] = v - } - if v := f.Object; !dcl.IsEmptyValueIndirect(v) { - m["object"] = v - } - if v := f.Generation; !dcl.IsEmptyValueIndirect(v) { - m["generation"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs flattens an instance of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - } - r.Bucket = dcl.FlattenString(m["bucket"]) - r.Object = dcl.FlattenString(m["object"]) - r.Generation = dcl.FlattenInteger(m["generation"]) - - return r -} - -// expandOSPolicyAssignmentInstanceFilterMap expands the contents of OSPolicyAssignmentInstanceFilter into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterMap(c *Client, f map[string]OSPolicyAssignmentInstanceFilter, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentInstanceFilter(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentInstanceFilterSlice expands the contents of OSPolicyAssignmentInstanceFilter into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterSlice(c *Client, f []OSPolicyAssignmentInstanceFilter, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentInstanceFilter(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentInstanceFilterMap flattens the contents of OSPolicyAssignmentInstanceFilter from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentInstanceFilter { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentInstanceFilter{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentInstanceFilter{} - } - - items := make(map[string]OSPolicyAssignmentInstanceFilter) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentInstanceFilter(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentInstanceFilterSlice flattens the contents of OSPolicyAssignmentInstanceFilter from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentInstanceFilter { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentInstanceFilter{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentInstanceFilter{} - } - - items := make([]OSPolicyAssignmentInstanceFilter, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentInstanceFilter(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentInstanceFilter expands an instance of OSPolicyAssignmentInstanceFilter into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilter(c *Client, f *OSPolicyAssignmentInstanceFilter, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.All; v != nil { - m["all"] = v - } - if v, err := expandOSPolicyAssignmentInstanceFilterInclusionLabelsSlice(c, f.InclusionLabels, res); err != nil { - return nil, fmt.Errorf("error expanding InclusionLabels into inclusionLabels: %w", err) - } else if v != nil { - m["inclusionLabels"] = v - } - if v, err := expandOSPolicyAssignmentInstanceFilterExclusionLabelsSlice(c, f.ExclusionLabels, res); err != nil { - return nil, fmt.Errorf("error expanding ExclusionLabels into exclusionLabels: %w", err) - } else if v != nil { - m["exclusionLabels"] = v - } - if v, err := expandOSPolicyAssignmentInstanceFilterInventoriesSlice(c, f.Inventories, res); err != nil { - return nil, fmt.Errorf("error expanding Inventories into inventories: %w", err) - } else if v != nil { - m["inventories"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentInstanceFilter flattens an instance of OSPolicyAssignmentInstanceFilter from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilter(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentInstanceFilter { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentInstanceFilter{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentInstanceFilter - } - r.All = dcl.FlattenBool(m["all"]) - r.InclusionLabels = flattenOSPolicyAssignmentInstanceFilterInclusionLabelsSlice(c, m["inclusionLabels"], res) - r.ExclusionLabels = flattenOSPolicyAssignmentInstanceFilterExclusionLabelsSlice(c, m["exclusionLabels"], res) - r.Inventories = flattenOSPolicyAssignmentInstanceFilterInventoriesSlice(c, m["inventories"], res) - - return r -} - -// expandOSPolicyAssignmentInstanceFilterInclusionLabelsMap expands the contents of OSPolicyAssignmentInstanceFilterInclusionLabels into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterInclusionLabelsMap(c *Client, f map[string]OSPolicyAssignmentInstanceFilterInclusionLabels, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentInstanceFilterInclusionLabels(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentInstanceFilterInclusionLabelsSlice expands the contents of OSPolicyAssignmentInstanceFilterInclusionLabels into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterInclusionLabelsSlice(c *Client, f []OSPolicyAssignmentInstanceFilterInclusionLabels, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentInstanceFilterInclusionLabels(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentInstanceFilterInclusionLabelsMap flattens the contents of OSPolicyAssignmentInstanceFilterInclusionLabels from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterInclusionLabelsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentInstanceFilterInclusionLabels { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentInstanceFilterInclusionLabels{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentInstanceFilterInclusionLabels{} - } - - items := make(map[string]OSPolicyAssignmentInstanceFilterInclusionLabels) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentInstanceFilterInclusionLabels(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentInstanceFilterInclusionLabelsSlice flattens the contents of OSPolicyAssignmentInstanceFilterInclusionLabels from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterInclusionLabelsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentInstanceFilterInclusionLabels { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentInstanceFilterInclusionLabels{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentInstanceFilterInclusionLabels{} - } - - items := make([]OSPolicyAssignmentInstanceFilterInclusionLabels, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentInstanceFilterInclusionLabels(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentInstanceFilterInclusionLabels expands an instance of OSPolicyAssignmentInstanceFilterInclusionLabels into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterInclusionLabels(c *Client, f *OSPolicyAssignmentInstanceFilterInclusionLabels, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { - m["labels"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentInstanceFilterInclusionLabels flattens an instance of OSPolicyAssignmentInstanceFilterInclusionLabels from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterInclusionLabels(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentInstanceFilterInclusionLabels { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentInstanceFilterInclusionLabels{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentInstanceFilterInclusionLabels - } - r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - - return r -} - -// expandOSPolicyAssignmentInstanceFilterExclusionLabelsMap expands the contents of OSPolicyAssignmentInstanceFilterExclusionLabels into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterExclusionLabelsMap(c *Client, f map[string]OSPolicyAssignmentInstanceFilterExclusionLabels, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentInstanceFilterExclusionLabels(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentInstanceFilterExclusionLabelsSlice expands the contents of OSPolicyAssignmentInstanceFilterExclusionLabels into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterExclusionLabelsSlice(c *Client, f []OSPolicyAssignmentInstanceFilterExclusionLabels, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentInstanceFilterExclusionLabels(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentInstanceFilterExclusionLabelsMap flattens the contents of OSPolicyAssignmentInstanceFilterExclusionLabels from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterExclusionLabelsMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentInstanceFilterExclusionLabels { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentInstanceFilterExclusionLabels{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentInstanceFilterExclusionLabels{} - } - - items := make(map[string]OSPolicyAssignmentInstanceFilterExclusionLabels) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentInstanceFilterExclusionLabels(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentInstanceFilterExclusionLabelsSlice flattens the contents of OSPolicyAssignmentInstanceFilterExclusionLabels from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterExclusionLabelsSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentInstanceFilterExclusionLabels { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentInstanceFilterExclusionLabels{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentInstanceFilterExclusionLabels{} - } - - items := make([]OSPolicyAssignmentInstanceFilterExclusionLabels, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentInstanceFilterExclusionLabels(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentInstanceFilterExclusionLabels expands an instance of OSPolicyAssignmentInstanceFilterExclusionLabels into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterExclusionLabels(c *Client, f *OSPolicyAssignmentInstanceFilterExclusionLabels, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Labels; !dcl.IsEmptyValueIndirect(v) { - m["labels"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentInstanceFilterExclusionLabels flattens an instance of OSPolicyAssignmentInstanceFilterExclusionLabels from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterExclusionLabels(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentInstanceFilterExclusionLabels { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentInstanceFilterExclusionLabels{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentInstanceFilterExclusionLabels - } - r.Labels = dcl.FlattenKeyValuePairs(m["labels"]) - - return r -} - -// expandOSPolicyAssignmentInstanceFilterInventoriesMap expands the contents of OSPolicyAssignmentInstanceFilterInventories into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterInventoriesMap(c *Client, f map[string]OSPolicyAssignmentInstanceFilterInventories, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentInstanceFilterInventories(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentInstanceFilterInventoriesSlice expands the contents of OSPolicyAssignmentInstanceFilterInventories into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterInventoriesSlice(c *Client, f []OSPolicyAssignmentInstanceFilterInventories, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentInstanceFilterInventories(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentInstanceFilterInventoriesMap flattens the contents of OSPolicyAssignmentInstanceFilterInventories from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterInventoriesMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentInstanceFilterInventories { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentInstanceFilterInventories{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentInstanceFilterInventories{} - } - - items := make(map[string]OSPolicyAssignmentInstanceFilterInventories) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentInstanceFilterInventories(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentInstanceFilterInventoriesSlice flattens the contents of OSPolicyAssignmentInstanceFilterInventories from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterInventoriesSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentInstanceFilterInventories { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentInstanceFilterInventories{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentInstanceFilterInventories{} - } - - items := make([]OSPolicyAssignmentInstanceFilterInventories, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentInstanceFilterInventories(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentInstanceFilterInventories expands an instance of OSPolicyAssignmentInstanceFilterInventories into a JSON -// request object. -func expandOSPolicyAssignmentInstanceFilterInventories(c *Client, f *OSPolicyAssignmentInstanceFilterInventories, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.OSShortName; !dcl.IsEmptyValueIndirect(v) { - m["osShortName"] = v - } - if v := f.OSVersion; !dcl.IsEmptyValueIndirect(v) { - m["osVersion"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentInstanceFilterInventories flattens an instance of OSPolicyAssignmentInstanceFilterInventories from a JSON -// response object. -func flattenOSPolicyAssignmentInstanceFilterInventories(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentInstanceFilterInventories { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentInstanceFilterInventories{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentInstanceFilterInventories - } - r.OSShortName = dcl.FlattenString(m["osShortName"]) - r.OSVersion = dcl.FlattenString(m["osVersion"]) - - return r -} - -// expandOSPolicyAssignmentRolloutMap expands the contents of OSPolicyAssignmentRollout into a JSON -// request object. -func expandOSPolicyAssignmentRolloutMap(c *Client, f map[string]OSPolicyAssignmentRollout, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentRollout(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentRolloutSlice expands the contents of OSPolicyAssignmentRollout into a JSON -// request object. -func expandOSPolicyAssignmentRolloutSlice(c *Client, f []OSPolicyAssignmentRollout, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentRollout(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentRolloutMap flattens the contents of OSPolicyAssignmentRollout from a JSON -// response object. -func flattenOSPolicyAssignmentRolloutMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentRollout { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentRollout{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentRollout{} - } - - items := make(map[string]OSPolicyAssignmentRollout) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentRollout(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentRolloutSlice flattens the contents of OSPolicyAssignmentRollout from a JSON -// response object. -func flattenOSPolicyAssignmentRolloutSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentRollout { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentRollout{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentRollout{} - } - - items := make([]OSPolicyAssignmentRollout, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentRollout(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentRollout expands an instance of OSPolicyAssignmentRollout into a JSON -// request object. -func expandOSPolicyAssignmentRollout(c *Client, f *OSPolicyAssignmentRollout, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v, err := expandOSPolicyAssignmentRolloutDisruptionBudget(c, f.DisruptionBudget, res); err != nil { - return nil, fmt.Errorf("error expanding DisruptionBudget into disruptionBudget: %w", err) - } else if !dcl.IsEmptyValueIndirect(v) { - m["disruptionBudget"] = v - } - if v := f.MinWaitDuration; !dcl.IsEmptyValueIndirect(v) { - m["minWaitDuration"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentRollout flattens an instance of OSPolicyAssignmentRollout from a JSON -// response object. -func flattenOSPolicyAssignmentRollout(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentRollout { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentRollout{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentRollout - } - r.DisruptionBudget = flattenOSPolicyAssignmentRolloutDisruptionBudget(c, m["disruptionBudget"], res) - r.MinWaitDuration = dcl.FlattenString(m["minWaitDuration"]) - - return r -} - -// expandOSPolicyAssignmentRolloutDisruptionBudgetMap expands the contents of OSPolicyAssignmentRolloutDisruptionBudget into a JSON -// request object. -func expandOSPolicyAssignmentRolloutDisruptionBudgetMap(c *Client, f map[string]OSPolicyAssignmentRolloutDisruptionBudget, res *OSPolicyAssignment) (map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := make(map[string]interface{}) - for k, item := range f { - i, err := expandOSPolicyAssignmentRolloutDisruptionBudget(c, &item, res) - if err != nil { - return nil, err - } - if i != nil { - items[k] = i - } - } - - return items, nil -} - -// expandOSPolicyAssignmentRolloutDisruptionBudgetSlice expands the contents of OSPolicyAssignmentRolloutDisruptionBudget into a JSON -// request object. -func expandOSPolicyAssignmentRolloutDisruptionBudgetSlice(c *Client, f []OSPolicyAssignmentRolloutDisruptionBudget, res *OSPolicyAssignment) ([]map[string]interface{}, error) { - if f == nil { - return nil, nil - } - - items := []map[string]interface{}{} - for _, item := range f { - i, err := expandOSPolicyAssignmentRolloutDisruptionBudget(c, &item, res) - if err != nil { - return nil, err - } - - items = append(items, i) - } - - return items, nil -} - -// flattenOSPolicyAssignmentRolloutDisruptionBudgetMap flattens the contents of OSPolicyAssignmentRolloutDisruptionBudget from a JSON -// response object. -func flattenOSPolicyAssignmentRolloutDisruptionBudgetMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentRolloutDisruptionBudget { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentRolloutDisruptionBudget{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentRolloutDisruptionBudget{} - } - - items := make(map[string]OSPolicyAssignmentRolloutDisruptionBudget) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentRolloutDisruptionBudget(c, item.(map[string]interface{}), res) - } - - return items -} - -// flattenOSPolicyAssignmentRolloutDisruptionBudgetSlice flattens the contents of OSPolicyAssignmentRolloutDisruptionBudget from a JSON -// response object. -func flattenOSPolicyAssignmentRolloutDisruptionBudgetSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentRolloutDisruptionBudget { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentRolloutDisruptionBudget{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentRolloutDisruptionBudget{} - } - - items := make([]OSPolicyAssignmentRolloutDisruptionBudget, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentRolloutDisruptionBudget(c, item.(map[string]interface{}), res)) - } - - return items -} - -// expandOSPolicyAssignmentRolloutDisruptionBudget expands an instance of OSPolicyAssignmentRolloutDisruptionBudget into a JSON -// request object. -func expandOSPolicyAssignmentRolloutDisruptionBudget(c *Client, f *OSPolicyAssignmentRolloutDisruptionBudget, res *OSPolicyAssignment) (map[string]interface{}, error) { - if dcl.IsEmptyValueIndirect(f) { - return nil, nil - } - - m := make(map[string]interface{}) - if v := f.Fixed; !dcl.IsEmptyValueIndirect(v) { - m["fixed"] = v - } - if v := f.Percent; !dcl.IsEmptyValueIndirect(v) { - m["percent"] = v - } - - return m, nil -} - -// flattenOSPolicyAssignmentRolloutDisruptionBudget flattens an instance of OSPolicyAssignmentRolloutDisruptionBudget from a JSON -// response object. -func flattenOSPolicyAssignmentRolloutDisruptionBudget(c *Client, i interface{}, res *OSPolicyAssignment) *OSPolicyAssignmentRolloutDisruptionBudget { - m, ok := i.(map[string]interface{}) - if !ok { - return nil - } - - r := &OSPolicyAssignmentRolloutDisruptionBudget{} - - if dcl.IsEmptyValueIndirect(i) { - return EmptyOSPolicyAssignmentRolloutDisruptionBudget - } - r.Fixed = dcl.FlattenInteger(m["fixed"]) - r.Percent = dcl.FlattenInteger(m["percent"]) - - return r -} - -// flattenOSPolicyAssignmentOSPoliciesModeEnumMap flattens the contents of OSPolicyAssignmentOSPoliciesModeEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesModeEnumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesModeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesModeEnum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesModeEnum{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesModeEnum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesModeEnum(item.(interface{})) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesModeEnumSlice flattens the contents of OSPolicyAssignmentOSPoliciesModeEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesModeEnumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesModeEnum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesModeEnum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesModeEnum{} - } - - items := make([]OSPolicyAssignmentOSPoliciesModeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesModeEnum(item.(interface{}))) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesModeEnum asserts that an interface is a string, and returns a -// pointer to a *OSPolicyAssignmentOSPoliciesModeEnum with the same value as that string. -func flattenOSPolicyAssignmentOSPoliciesModeEnum(i interface{}) *OSPolicyAssignmentOSPoliciesModeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return OSPolicyAssignmentOSPoliciesModeEnumRef(s) -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(item.(interface{})) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(item.(interface{}))) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum asserts that an interface is a string, and returns a -// pointer to a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum with the same value as that string. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(i interface{}) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumRef(s) -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(item.(interface{})) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(item.(interface{}))) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum asserts that an interface is a string, and returns a -// pointer to a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum with the same value as that string. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(i interface{}) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumRef(s) -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(item.(interface{})) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(item.(interface{}))) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum asserts that an interface is a string, and returns a -// pointer to a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum with the same value as that string. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(i interface{}) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumRef(s) -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(item.(interface{})) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(item.(interface{}))) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum asserts that an interface is a string, and returns a -// pointer to a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum with the same value as that string. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(i interface{}) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumRef(s) -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumMap flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum{} - } - - items := make(map[string]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(item.(interface{})) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumSlice flattens the contents of OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum from a JSON -// response object. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum{} - } - - items := make([]OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(item.(interface{}))) - } - - return items -} - -// flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum asserts that an interface is a string, and returns a -// pointer to a *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum with the same value as that string. -func flattenOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(i interface{}) *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumRef(s) -} - -// flattenOSPolicyAssignmentRolloutStateEnumMap flattens the contents of OSPolicyAssignmentRolloutStateEnum from a JSON -// response object. -func flattenOSPolicyAssignmentRolloutStateEnumMap(c *Client, i interface{}, res *OSPolicyAssignment) map[string]OSPolicyAssignmentRolloutStateEnum { - a, ok := i.(map[string]interface{}) - if !ok { - return map[string]OSPolicyAssignmentRolloutStateEnum{} - } - - if len(a) == 0 { - return map[string]OSPolicyAssignmentRolloutStateEnum{} - } - - items := make(map[string]OSPolicyAssignmentRolloutStateEnum) - for k, item := range a { - items[k] = *flattenOSPolicyAssignmentRolloutStateEnum(item.(interface{})) - } - - return items -} - -// flattenOSPolicyAssignmentRolloutStateEnumSlice flattens the contents of OSPolicyAssignmentRolloutStateEnum from a JSON -// response object. -func flattenOSPolicyAssignmentRolloutStateEnumSlice(c *Client, i interface{}, res *OSPolicyAssignment) []OSPolicyAssignmentRolloutStateEnum { - a, ok := i.([]interface{}) - if !ok { - return []OSPolicyAssignmentRolloutStateEnum{} - } - - if len(a) == 0 { - return []OSPolicyAssignmentRolloutStateEnum{} - } - - items := make([]OSPolicyAssignmentRolloutStateEnum, 0, len(a)) - for _, item := range a { - items = append(items, *flattenOSPolicyAssignmentRolloutStateEnum(item.(interface{}))) - } - - return items -} - -// flattenOSPolicyAssignmentRolloutStateEnum asserts that an interface is a string, and returns a -// pointer to a *OSPolicyAssignmentRolloutStateEnum with the same value as that string. -func flattenOSPolicyAssignmentRolloutStateEnum(i interface{}) *OSPolicyAssignmentRolloutStateEnum { - s, ok := i.(string) - if !ok { - return nil - } - - return OSPolicyAssignmentRolloutStateEnumRef(s) -} - -// This function returns a matcher that checks whether a serialized resource matches this resource -// in its parameters (as defined by the fields in a Get, which definitionally define resource -// identity). This is useful in extracting the element from a List call. -func (r *OSPolicyAssignment) matcher(c *Client) func([]byte) bool { - return func(b []byte) bool { - cr, err := unmarshalOSPolicyAssignment(b, c, r) - if err != nil { - c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.") - return false - } - nr := r.urlNormalized() - ncr := cr.urlNormalized() - c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr) - - if nr.Project == nil && ncr.Project == nil { - c.Config.Logger.Info("Both Project fields null - considering equal.") - } else if nr.Project == nil || ncr.Project == nil { - c.Config.Logger.Info("Only one Project field is null - considering unequal.") - return false - } else if *nr.Project != *ncr.Project { - return false - } - if nr.Location == nil && ncr.Location == nil { - c.Config.Logger.Info("Both Location fields null - considering equal.") - } else if nr.Location == nil || ncr.Location == nil { - c.Config.Logger.Info("Only one Location field is null - considering unequal.") - return false - } else if *nr.Location != *ncr.Location { - return false - } - if nr.Name == nil && ncr.Name == nil { - c.Config.Logger.Info("Both Name fields null - considering equal.") - } else if nr.Name == nil || ncr.Name == nil { - c.Config.Logger.Info("Only one Name field is null - considering unequal.") - return false - } else if *nr.Name != *ncr.Name { - return false - } - return true - } -} - -type oSPolicyAssignmentDiff struct { - // The diff should include one or the other of RequiresRecreate or UpdateOp. - RequiresRecreate bool - UpdateOp oSPolicyAssignmentApiOperation - FieldName string // used for error logging -} - -func convertFieldDiffsToOSPolicyAssignmentDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]oSPolicyAssignmentDiff, error) { - opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff) - // Map each operation name to the field diffs associated with it. - for _, fd := range fds { - for _, ro := range fd.ResultingOperation { - if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok { - fieldDiffs = append(fieldDiffs, fd) - opNamesToFieldDiffs[ro] = fieldDiffs - } else { - config.Logger.Infof("%s required due to diff: %v", ro, fd) - opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd} - } - } - } - var diffs []oSPolicyAssignmentDiff - // For each operation name, create a oSPolicyAssignmentDiff which contains the operation. - for opName, fieldDiffs := range opNamesToFieldDiffs { - // Use the first field diff's field name for logging required recreate error. - diff := oSPolicyAssignmentDiff{FieldName: fieldDiffs[0].FieldName} - if opName == "Recreate" { - diff.RequiresRecreate = true - } else { - apiOp, err := convertOpNameToOSPolicyAssignmentApiOperation(opName, fieldDiffs, opts...) - if err != nil { - return diffs, err - } - diff.UpdateOp = apiOp - } - diffs = append(diffs, diff) - } - return diffs, nil -} - -func convertOpNameToOSPolicyAssignmentApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (oSPolicyAssignmentApiOperation, error) { - switch opName { - - case "updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation": - return &updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation{FieldDiffs: fieldDiffs}, nil - - default: - return nil, fmt.Errorf("no such operation with name: %v", opName) - } -} - -func extractOSPolicyAssignmentFields(r *OSPolicyAssignment) error { - vInstanceFilter := r.InstanceFilter - if vInstanceFilter == nil { - // note: explicitly not the empty object. - vInstanceFilter = &OSPolicyAssignmentInstanceFilter{} - } - if err := extractOSPolicyAssignmentInstanceFilterFields(r, vInstanceFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vInstanceFilter) { - r.InstanceFilter = vInstanceFilter - } - vRollout := r.Rollout - if vRollout == nil { - // note: explicitly not the empty object. - vRollout = &OSPolicyAssignmentRollout{} - } - if err := extractOSPolicyAssignmentRolloutFields(r, vRollout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRollout) { - r.Rollout = vRollout - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPolicies) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroups) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResources) error { - vPkg := o.Pkg - if vPkg == nil { - // note: explicitly not the empty object. - vPkg = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgFields(r, vPkg); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPkg) { - o.Pkg = vPkg - } - vRepository := o.Repository - if vRepository == nil { - // note: explicitly not the empty object. - vRepository = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryFields(r, vRepository); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRepository) { - o.Repository = vRepository - } - vExec := o.Exec - if vExec == nil { - // note: explicitly not the empty object. - vExec = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecFields(r, vExec); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vExec) { - o.Exec = vExec - } - vFile := o.File - if vFile == nil { - // note: explicitly not the empty object. - vFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFields(r, vFile); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFile) { - o.File = vFile - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) error { - vApt := o.Apt - if vApt == nil { - // note: explicitly not the empty object. - vApt = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptFields(r, vApt); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vApt) { - o.Apt = vApt - } - vDeb := o.Deb - if vDeb == nil { - // note: explicitly not the empty object. - vDeb = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebFields(r, vDeb); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDeb) { - o.Deb = vDeb - } - vYum := o.Yum - if vYum == nil { - // note: explicitly not the empty object. - vYum = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumFields(r, vYum); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYum) { - o.Yum = vYum - } - vZypper := o.Zypper - if vZypper == nil { - // note: explicitly not the empty object. - vZypper = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperFields(r, vZypper); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vZypper) { - o.Zypper = vZypper - } - vRpm := o.Rpm - if vRpm == nil { - // note: explicitly not the empty object. - vRpm = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmFields(r, vRpm); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRpm) { - o.Rpm = vRpm - } - vGooget := o.Googet - if vGooget == nil { - // note: explicitly not the empty object. - vGooget = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetFields(r, vGooget); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGooget) { - o.Googet = vGooget - } - vMsi := o.Msi - if vMsi == nil { - // note: explicitly not the empty object. - vMsi = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiFields(r, vMsi); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMsi) { - o.Msi = vMsi - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) error { - vSource := o.Source - if vSource == nil { - // note: explicitly not the empty object. - vSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceFields(r, vSource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSource) { - o.Source = vSource - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) error { - vSource := o.Source - if vSource == nil { - // note: explicitly not the empty object. - vSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceFields(r, vSource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSource) { - o.Source = vSource - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) error { - vSource := o.Source - if vSource == nil { - // note: explicitly not the empty object. - vSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceFields(r, vSource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSource) { - o.Source = vSource - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) error { - vApt := o.Apt - if vApt == nil { - // note: explicitly not the empty object. - vApt = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptFields(r, vApt); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vApt) { - o.Apt = vApt - } - vYum := o.Yum - if vYum == nil { - // note: explicitly not the empty object. - vYum = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumFields(r, vYum); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYum) { - o.Yum = vYum - } - vZypper := o.Zypper - if vZypper == nil { - // note: explicitly not the empty object. - vZypper = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperFields(r, vZypper); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vZypper) { - o.Zypper = vZypper - } - vGoo := o.Goo - if vGoo == nil { - // note: explicitly not the empty object. - vGoo = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooFields(r, vGoo); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGoo) { - o.Goo = vGoo - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) error { - vValidate := o.Validate - if vValidate == nil { - // note: explicitly not the empty object. - vValidate = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFields(r, vValidate); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vValidate) { - o.Validate = vValidate - } - vEnforce := o.Enforce - if vEnforce == nil { - // note: explicitly not the empty object. - vEnforce = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFields(r, vEnforce); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEnforce) { - o.Enforce = vEnforce - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) error { - vFile := o.File - if vFile == nil { - // note: explicitly not the empty object. - vFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileFields(r, vFile); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFile) { - o.File = vFile - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) error { - vFile := o.File - if vFile == nil { - // note: explicitly not the empty object. - vFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileFields(r, vFile); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFile) { - o.File = vFile - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) error { - vFile := o.File - if vFile == nil { - // note: explicitly not the empty object. - vFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileFields(r, vFile); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFile) { - o.File = vFile - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) error { - return nil -} -func extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) error { - return nil -} -func extractOSPolicyAssignmentInstanceFilterFields(r *OSPolicyAssignment, o *OSPolicyAssignmentInstanceFilter) error { - return nil -} -func extractOSPolicyAssignmentInstanceFilterInclusionLabelsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentInstanceFilterInclusionLabels) error { - return nil -} -func extractOSPolicyAssignmentInstanceFilterExclusionLabelsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentInstanceFilterExclusionLabels) error { - return nil -} -func extractOSPolicyAssignmentInstanceFilterInventoriesFields(r *OSPolicyAssignment, o *OSPolicyAssignmentInstanceFilterInventories) error { - return nil -} -func extractOSPolicyAssignmentRolloutFields(r *OSPolicyAssignment, o *OSPolicyAssignmentRollout) error { - vDisruptionBudget := o.DisruptionBudget - if vDisruptionBudget == nil { - // note: explicitly not the empty object. - vDisruptionBudget = &OSPolicyAssignmentRolloutDisruptionBudget{} - } - if err := extractOSPolicyAssignmentRolloutDisruptionBudgetFields(r, vDisruptionBudget); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDisruptionBudget) { - o.DisruptionBudget = vDisruptionBudget - } - return nil -} -func extractOSPolicyAssignmentRolloutDisruptionBudgetFields(r *OSPolicyAssignment, o *OSPolicyAssignmentRolloutDisruptionBudget) error { - return nil -} - -func postReadExtractOSPolicyAssignmentFields(r *OSPolicyAssignment) error { - vInstanceFilter := r.InstanceFilter - if vInstanceFilter == nil { - // note: explicitly not the empty object. - vInstanceFilter = &OSPolicyAssignmentInstanceFilter{} - } - if err := postReadExtractOSPolicyAssignmentInstanceFilterFields(r, vInstanceFilter); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vInstanceFilter) { - r.InstanceFilter = vInstanceFilter - } - vRollout := r.Rollout - if vRollout == nil { - // note: explicitly not the empty object. - vRollout = &OSPolicyAssignmentRollout{} - } - if err := postReadExtractOSPolicyAssignmentRolloutFields(r, vRollout); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRollout) { - r.Rollout = vRollout - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPolicies) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroups) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResources) error { - vPkg := o.Pkg - if vPkg == nil { - // note: explicitly not the empty object. - vPkg = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgFields(r, vPkg); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vPkg) { - o.Pkg = vPkg - } - vRepository := o.Repository - if vRepository == nil { - // note: explicitly not the empty object. - vRepository = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryFields(r, vRepository); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRepository) { - o.Repository = vRepository - } - vExec := o.Exec - if vExec == nil { - // note: explicitly not the empty object. - vExec = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecFields(r, vExec); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vExec) { - o.Exec = vExec - } - vFile := o.File - if vFile == nil { - // note: explicitly not the empty object. - vFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFields(r, vFile); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFile) { - o.File = vFile - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) error { - vApt := o.Apt - if vApt == nil { - // note: explicitly not the empty object. - vApt = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptFields(r, vApt); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vApt) { - o.Apt = vApt - } - vDeb := o.Deb - if vDeb == nil { - // note: explicitly not the empty object. - vDeb = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebFields(r, vDeb); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDeb) { - o.Deb = vDeb - } - vYum := o.Yum - if vYum == nil { - // note: explicitly not the empty object. - vYum = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumFields(r, vYum); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYum) { - o.Yum = vYum - } - vZypper := o.Zypper - if vZypper == nil { - // note: explicitly not the empty object. - vZypper = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperFields(r, vZypper); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vZypper) { - o.Zypper = vZypper - } - vRpm := o.Rpm - if vRpm == nil { - // note: explicitly not the empty object. - vRpm = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmFields(r, vRpm); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRpm) { - o.Rpm = vRpm - } - vGooget := o.Googet - if vGooget == nil { - // note: explicitly not the empty object. - vGooget = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetFields(r, vGooget); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGooget) { - o.Googet = vGooget - } - vMsi := o.Msi - if vMsi == nil { - // note: explicitly not the empty object. - vMsi = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiFields(r, vMsi); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vMsi) { - o.Msi = vMsi - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) error { - vSource := o.Source - if vSource == nil { - // note: explicitly not the empty object. - vSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceFields(r, vSource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSource) { - o.Source = vSource - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) error { - vSource := o.Source - if vSource == nil { - // note: explicitly not the empty object. - vSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceFields(r, vSource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSource) { - o.Source = vSource - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) error { - vSource := o.Source - if vSource == nil { - // note: explicitly not the empty object. - vSource = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceFields(r, vSource); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vSource) { - o.Source = vSource - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) error { - vApt := o.Apt - if vApt == nil { - // note: explicitly not the empty object. - vApt = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptFields(r, vApt); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vApt) { - o.Apt = vApt - } - vYum := o.Yum - if vYum == nil { - // note: explicitly not the empty object. - vYum = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumFields(r, vYum); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vYum) { - o.Yum = vYum - } - vZypper := o.Zypper - if vZypper == nil { - // note: explicitly not the empty object. - vZypper = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperFields(r, vZypper); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vZypper) { - o.Zypper = vZypper - } - vGoo := o.Goo - if vGoo == nil { - // note: explicitly not the empty object. - vGoo = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooFields(r, vGoo); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGoo) { - o.Goo = vGoo - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) error { - vValidate := o.Validate - if vValidate == nil { - // note: explicitly not the empty object. - vValidate = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFields(r, vValidate); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vValidate) { - o.Validate = vValidate - } - vEnforce := o.Enforce - if vEnforce == nil { - // note: explicitly not the empty object. - vEnforce = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFields(r, vEnforce); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vEnforce) { - o.Enforce = vEnforce - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) error { - vFile := o.File - if vFile == nil { - // note: explicitly not the empty object. - vFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileFields(r, vFile); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFile) { - o.File = vFile - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) error { - vFile := o.File - if vFile == nil { - // note: explicitly not the empty object. - vFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileFields(r, vFile); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFile) { - o.File = vFile - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) error { - vFile := o.File - if vFile == nil { - // note: explicitly not the empty object. - vFile = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileFields(r, vFile); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vFile) { - o.File = vFile - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) error { - vRemote := o.Remote - if vRemote == nil { - // note: explicitly not the empty object. - vRemote = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteFields(r, vRemote); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vRemote) { - o.Remote = vRemote - } - vGcs := o.Gcs - if vGcs == nil { - // note: explicitly not the empty object. - vGcs = &OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{} - } - if err := extractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsFields(r, vGcs); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vGcs) { - o.Gcs = vGcs - } - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) error { - return nil -} -func postReadExtractOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) error { - return nil -} -func postReadExtractOSPolicyAssignmentInstanceFilterFields(r *OSPolicyAssignment, o *OSPolicyAssignmentInstanceFilter) error { - return nil -} -func postReadExtractOSPolicyAssignmentInstanceFilterInclusionLabelsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentInstanceFilterInclusionLabels) error { - return nil -} -func postReadExtractOSPolicyAssignmentInstanceFilterExclusionLabelsFields(r *OSPolicyAssignment, o *OSPolicyAssignmentInstanceFilterExclusionLabels) error { - return nil -} -func postReadExtractOSPolicyAssignmentInstanceFilterInventoriesFields(r *OSPolicyAssignment, o *OSPolicyAssignmentInstanceFilterInventories) error { - return nil -} -func postReadExtractOSPolicyAssignmentRolloutFields(r *OSPolicyAssignment, o *OSPolicyAssignmentRollout) error { - vDisruptionBudget := o.DisruptionBudget - if vDisruptionBudget == nil { - // note: explicitly not the empty object. - vDisruptionBudget = &OSPolicyAssignmentRolloutDisruptionBudget{} - } - if err := extractOSPolicyAssignmentRolloutDisruptionBudgetFields(r, vDisruptionBudget); err != nil { - return err - } - if !dcl.IsEmptyValueIndirect(vDisruptionBudget) { - o.DisruptionBudget = vDisruptionBudget - } - return nil -} -func postReadExtractOSPolicyAssignmentRolloutDisruptionBudgetFields(r *OSPolicyAssignment, o *OSPolicyAssignmentRolloutDisruptionBudget) error { - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_schema.go deleted file mode 100644 index 91b794a125..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_schema.go +++ /dev/null @@ -1,1484 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package osconfig - -import ( - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" -) - -func DCLOSPolicyAssignmentSchema() *dcl.Schema { - return &dcl.Schema{ - Info: &dcl.Info{ - Title: "OSConfig/OSPolicyAssignment", - Description: "Represents an OSPolicyAssignment resource.", - StructName: "OSPolicyAssignment", - Reference: &dcl.Link{ - Text: "API documentation", - URL: "https://cloud.google.com/compute/docs/osconfig/rest/v1/projects.locations.osPolicyAssignments", - }, - }, - Paths: &dcl.Paths{ - Get: &dcl.Path{ - Description: "The function used to get information about a OSPolicyAssignment", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "oSPolicyAssignment", - Required: true, - Description: "A full instance of a OSPolicyAssignment", - }, - }, - }, - Apply: &dcl.Path{ - Description: "The function used to apply information about a OSPolicyAssignment", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "oSPolicyAssignment", - Required: true, - Description: "A full instance of a OSPolicyAssignment", - }, - }, - }, - Delete: &dcl.Path{ - Description: "The function used to delete a OSPolicyAssignment", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "oSPolicyAssignment", - Required: true, - Description: "A full instance of a OSPolicyAssignment", - }, - }, - }, - DeleteAll: &dcl.Path{ - Description: "The function used to delete all OSPolicyAssignment", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - List: &dcl.Path{ - Description: "The function used to list information about many OSPolicyAssignment", - Parameters: []dcl.PathParameters{ - dcl.PathParameters{ - Name: "project", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - dcl.PathParameters{ - Name: "location", - Required: true, - Schema: &dcl.PathParametersSchema{ - Type: "string", - }, - }, - }, - }, - }, - Components: &dcl.Components{ - Schemas: map[string]*dcl.Component{ - "OSPolicyAssignment": &dcl.Component{ - Title: "OSPolicyAssignment", - ID: "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}", - UsesStateHint: true, - ParentContainer: "project", - HasCreate: true, - SchemaProperty: dcl.Property{ - Type: "object", - Required: []string{ - "name", - "osPolicies", - "instanceFilter", - "rollout", - "project", - "location", - }, - Properties: map[string]*dcl.Property{ - "baseline": &dcl.Property{ - Type: "boolean", - GoName: "Baseline", - ReadOnly: true, - Description: "Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.", - Immutable: true, - }, - "deleted": &dcl.Property{ - Type: "boolean", - GoName: "Deleted", - ReadOnly: true, - Description: "Output only. Indicates that this revision deletes the OS policy assignment.", - Immutable: true, - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "OS policy assignment description. Length of the description is limited to 1024 characters.", - }, - "etag": &dcl.Property{ - Type: "string", - GoName: "Etag", - ReadOnly: true, - Description: "The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.", - Immutable: true, - }, - "instanceFilter": &dcl.Property{ - Type: "object", - GoName: "InstanceFilter", - GoType: "OSPolicyAssignmentInstanceFilter", - Description: "Required. Filter to select VMs.", - Properties: map[string]*dcl.Property{ - "all": &dcl.Property{ - Type: "boolean", - GoName: "All", - Description: "Target all VMs in the project. If true, no other criteria is permitted.", - SendEmpty: true, - }, - "exclusionLabels": &dcl.Property{ - Type: "array", - GoName: "ExclusionLabels", - Description: "List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "OSPolicyAssignmentInstanceFilterExclusionLabels", - Properties: map[string]*dcl.Property{ - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.", - }, - }, - }, - }, - "inclusionLabels": &dcl.Property{ - Type: "array", - GoName: "InclusionLabels", - Description: "List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "OSPolicyAssignmentInstanceFilterInclusionLabels", - Properties: map[string]*dcl.Property{ - "labels": &dcl.Property{ - Type: "object", - AdditionalProperties: &dcl.Property{ - Type: "string", - }, - GoName: "Labels", - Description: "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.", - }, - }, - }, - }, - "inventories": &dcl.Property{ - Type: "array", - GoName: "Inventories", - Description: "List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "OSPolicyAssignmentInstanceFilterInventories", - Required: []string{ - "osShortName", - }, - Properties: map[string]*dcl.Property{ - "osShortName": &dcl.Property{ - Type: "string", - GoName: "OSShortName", - Description: "Required. The OS short name", - }, - "osVersion": &dcl.Property{ - Type: "string", - GoName: "OSVersion", - Description: "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.", - }, - }, - }, - }, - }, - }, - "location": &dcl.Property{ - Type: "string", - GoName: "Location", - Description: "The location for the resource", - Immutable: true, - }, - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Resource name.", - Immutable: true, - }, - "osPolicies": &dcl.Property{ - Type: "array", - GoName: "OSPolicies", - Description: "Required. List of OS policies to be applied to the VMs.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "OSPolicyAssignmentOSPolicies", - Required: []string{ - "id", - "mode", - "resourceGroups", - }, - Properties: map[string]*dcl.Property{ - "allowNoResourceGroupMatch": &dcl.Property{ - Type: "boolean", - GoName: "AllowNoResourceGroupMatch", - Description: "This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.", - }, - "description": &dcl.Property{ - Type: "string", - GoName: "Description", - Description: "Policy description. Length of the description is limited to 1024 characters.", - }, - "id": &dcl.Property{ - Type: "string", - GoName: "Id", - Description: "Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.", - }, - "mode": &dcl.Property{ - Type: "string", - GoName: "Mode", - GoType: "OSPolicyAssignmentOSPoliciesModeEnum", - Description: "Required. Policy mode Possible values: MODE_UNSPECIFIED, VALIDATION, ENFORCEMENT", - Enum: []string{ - "MODE_UNSPECIFIED", - "VALIDATION", - "ENFORCEMENT", - }, - }, - "resourceGroups": &dcl.Property{ - Type: "array", - GoName: "ResourceGroups", - Description: "Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroups", - Required: []string{ - "resources", - }, - Properties: map[string]*dcl.Property{ - "inventoryFilters": &dcl.Property{ - Type: "array", - GoName: "InventoryFilters", - Description: "List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters", - Required: []string{ - "osShortName", - }, - Properties: map[string]*dcl.Property{ - "osShortName": &dcl.Property{ - Type: "string", - GoName: "OSShortName", - Description: "Required. The OS short name", - }, - "osVersion": &dcl.Property{ - Type: "string", - GoName: "OSVersion", - Description: "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.", - }, - }, - }, - }, - "resources": &dcl.Property{ - Type: "array", - GoName: "Resources", - Description: "Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "object", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResources", - Required: []string{ - "id", - }, - Properties: map[string]*dcl.Property{ - "exec": &dcl.Property{ - Type: "object", - GoName: "Exec", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec", - Description: "Exec resource", - Conflicts: []string{ - "pkg", - "repository", - "file", - }, - Required: []string{ - "validate", - }, - Properties: map[string]*dcl.Property{ - "enforce": &dcl.Property{ - Type: "object", - GoName: "Enforce", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce", - Description: "What to run to bring this resource into the desired state. An exit code of 100 indicates \"success\", any other exit code indicates a failure running enforce.", - Required: []string{ - "interpreter", - }, - Properties: map[string]*dcl.Property{ - "args": &dcl.Property{ - Type: "array", - GoName: "Args", - Description: "Optional arguments to pass to the source during execution.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "file": &dcl.Property{ - Type: "object", - GoName: "File", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile", - Description: "A remote or local file.", - Conflicts: []string{ - "script", - }, - Properties: map[string]*dcl.Property{ - "allowInsecure": &dcl.Property{ - Type: "boolean", - GoName: "AllowInsecure", - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - "gcs": &dcl.Property{ - Type: "object", - GoName: "Gcs", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs", - Description: "A Cloud Storage object.", - Conflicts: []string{ - "remote", - "localPath", - }, - Required: []string{ - "bucket", - "object", - }, - Properties: map[string]*dcl.Property{ - "bucket": &dcl.Property{ - Type: "string", - GoName: "Bucket", - Description: "Required. Bucket of the Cloud Storage object.", - }, - "generation": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Generation", - Description: "Generation number of the Cloud Storage object.", - }, - "object": &dcl.Property{ - Type: "string", - GoName: "Object", - Description: "Required. Name of the Cloud Storage object.", - }, - }, - }, - "localPath": &dcl.Property{ - Type: "string", - GoName: "LocalPath", - Description: "A local path within the VM to use.", - Conflicts: []string{ - "remote", - "gcs", - }, - }, - "remote": &dcl.Property{ - Type: "object", - GoName: "Remote", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote", - Description: "A generic remote file.", - Conflicts: []string{ - "gcs", - "localPath", - }, - Required: []string{ - "uri", - }, - Properties: map[string]*dcl.Property{ - "sha256Checksum": &dcl.Property{ - Type: "string", - GoName: "Sha256Checksum", - Description: "SHA256 checksum of the remote file.", - }, - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - }, - }, - }, - }, - "interpreter": &dcl.Property{ - Type: "string", - GoName: "Interpreter", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum", - Description: "Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL", - Enum: []string{ - "INTERPRETER_UNSPECIFIED", - "NONE", - "SHELL", - "POWERSHELL", - }, - }, - "outputFilePath": &dcl.Property{ - Type: "string", - GoName: "OutputFilePath", - Description: "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.", - }, - "script": &dcl.Property{ - Type: "string", - GoName: "Script", - Description: "An inline script. The size of the script is limited to 1024 characters.", - Conflicts: []string{ - "file", - }, - }, - }, - }, - "validate": &dcl.Property{ - Type: "object", - GoName: "Validate", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate", - Description: "Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates \"in desired state\", and exit code of 101 indicates \"not in desired state\". Any other exit code indicates a failure running validate.", - Required: []string{ - "interpreter", - }, - Properties: map[string]*dcl.Property{ - "args": &dcl.Property{ - Type: "array", - GoName: "Args", - Description: "Optional arguments to pass to the source during execution.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "file": &dcl.Property{ - Type: "object", - GoName: "File", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile", - Description: "A remote or local file.", - Conflicts: []string{ - "script", - }, - Properties: map[string]*dcl.Property{ - "allowInsecure": &dcl.Property{ - Type: "boolean", - GoName: "AllowInsecure", - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - "gcs": &dcl.Property{ - Type: "object", - GoName: "Gcs", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs", - Description: "A Cloud Storage object.", - Conflicts: []string{ - "remote", - "localPath", - }, - Required: []string{ - "bucket", - "object", - }, - Properties: map[string]*dcl.Property{ - "bucket": &dcl.Property{ - Type: "string", - GoName: "Bucket", - Description: "Required. Bucket of the Cloud Storage object.", - }, - "generation": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Generation", - Description: "Generation number of the Cloud Storage object.", - }, - "object": &dcl.Property{ - Type: "string", - GoName: "Object", - Description: "Required. Name of the Cloud Storage object.", - }, - }, - }, - "localPath": &dcl.Property{ - Type: "string", - GoName: "LocalPath", - Description: "A local path within the VM to use.", - Conflicts: []string{ - "remote", - "gcs", - }, - }, - "remote": &dcl.Property{ - Type: "object", - GoName: "Remote", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote", - Description: "A generic remote file.", - Conflicts: []string{ - "gcs", - "localPath", - }, - Required: []string{ - "uri", - }, - Properties: map[string]*dcl.Property{ - "sha256Checksum": &dcl.Property{ - Type: "string", - GoName: "Sha256Checksum", - Description: "SHA256 checksum of the remote file.", - }, - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - }, - }, - }, - }, - "interpreter": &dcl.Property{ - Type: "string", - GoName: "Interpreter", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum", - Description: "Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL", - Enum: []string{ - "INTERPRETER_UNSPECIFIED", - "NONE", - "SHELL", - "POWERSHELL", - }, - }, - "outputFilePath": &dcl.Property{ - Type: "string", - GoName: "OutputFilePath", - Description: "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.", - }, - "script": &dcl.Property{ - Type: "string", - GoName: "Script", - Description: "An inline script. The size of the script is limited to 1024 characters.", - Conflicts: []string{ - "file", - }, - }, - }, - }, - }, - }, - "file": &dcl.Property{ - Type: "object", - GoName: "File", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile", - Description: "File resource", - Conflicts: []string{ - "pkg", - "repository", - "exec", - }, - Required: []string{ - "path", - "state", - }, - Properties: map[string]*dcl.Property{ - "content": &dcl.Property{ - Type: "string", - GoName: "Content", - Description: "A a file with this content. The size of the content is limited to 1024 characters.", - Conflicts: []string{ - "file", - }, - }, - "file": &dcl.Property{ - Type: "object", - GoName: "File", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile", - Description: "A remote or local source.", - Conflicts: []string{ - "content", - }, - Properties: map[string]*dcl.Property{ - "allowInsecure": &dcl.Property{ - Type: "boolean", - GoName: "AllowInsecure", - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - "gcs": &dcl.Property{ - Type: "object", - GoName: "Gcs", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs", - Description: "A Cloud Storage object.", - Conflicts: []string{ - "remote", - "localPath", - }, - Required: []string{ - "bucket", - "object", - }, - Properties: map[string]*dcl.Property{ - "bucket": &dcl.Property{ - Type: "string", - GoName: "Bucket", - Description: "Required. Bucket of the Cloud Storage object.", - }, - "generation": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Generation", - Description: "Generation number of the Cloud Storage object.", - }, - "object": &dcl.Property{ - Type: "string", - GoName: "Object", - Description: "Required. Name of the Cloud Storage object.", - }, - }, - }, - "localPath": &dcl.Property{ - Type: "string", - GoName: "LocalPath", - Description: "A local path within the VM to use.", - Conflicts: []string{ - "remote", - "gcs", - }, - }, - "remote": &dcl.Property{ - Type: "object", - GoName: "Remote", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote", - Description: "A generic remote file.", - Conflicts: []string{ - "gcs", - "localPath", - }, - Required: []string{ - "uri", - }, - Properties: map[string]*dcl.Property{ - "sha256Checksum": &dcl.Property{ - Type: "string", - GoName: "Sha256Checksum", - Description: "SHA256 checksum of the remote file.", - }, - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - }, - }, - }, - }, - "path": &dcl.Property{ - Type: "string", - GoName: "Path", - Description: "Required. The absolute path of the file within the VM.", - }, - "permissions": &dcl.Property{ - Type: "string", - GoName: "Permissions", - ReadOnly: true, - Description: "Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4", - }, - "state": &dcl.Property{ - Type: "string", - GoName: "State", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum", - Description: "Required. Desired state of the file. Possible values: OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED, COMPLIANT, NON_COMPLIANT, UNKNOWN, NO_OS_POLICIES_APPLICABLE", - Enum: []string{ - "OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED", - "COMPLIANT", - "NON_COMPLIANT", - "UNKNOWN", - "NO_OS_POLICIES_APPLICABLE", - }, - }, - }, - }, - "id": &dcl.Property{ - Type: "string", - GoName: "Id", - Description: "Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.", - }, - "pkg": &dcl.Property{ - Type: "object", - GoName: "Pkg", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg", - Description: "Package resource", - Conflicts: []string{ - "repository", - "exec", - "file", - }, - Required: []string{ - "desiredState", - }, - Properties: map[string]*dcl.Property{ - "apt": &dcl.Property{ - Type: "object", - GoName: "Apt", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt", - Description: "A package managed by Apt.", - Conflicts: []string{ - "deb", - "yum", - "zypper", - "rpm", - "googet", - "msi", - }, - Required: []string{ - "name", - }, - Properties: map[string]*dcl.Property{ - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. Package name.", - }, - }, - }, - "deb": &dcl.Property{ - Type: "object", - GoName: "Deb", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb", - Description: "A deb package file.", - Conflicts: []string{ - "apt", - "yum", - "zypper", - "rpm", - "googet", - "msi", - }, - Required: []string{ - "source", - }, - Properties: map[string]*dcl.Property{ - "pullDeps": &dcl.Property{ - Type: "boolean", - GoName: "PullDeps", - Description: "Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`", - }, - "source": &dcl.Property{ - Type: "object", - GoName: "Source", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource", - Description: "Required. A deb package.", - Properties: map[string]*dcl.Property{ - "allowInsecure": &dcl.Property{ - Type: "boolean", - GoName: "AllowInsecure", - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - "gcs": &dcl.Property{ - Type: "object", - GoName: "Gcs", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs", - Description: "A Cloud Storage object.", - Conflicts: []string{ - "remote", - "localPath", - }, - Required: []string{ - "bucket", - "object", - }, - Properties: map[string]*dcl.Property{ - "bucket": &dcl.Property{ - Type: "string", - GoName: "Bucket", - Description: "Required. Bucket of the Cloud Storage object.", - }, - "generation": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Generation", - Description: "Generation number of the Cloud Storage object.", - }, - "object": &dcl.Property{ - Type: "string", - GoName: "Object", - Description: "Required. Name of the Cloud Storage object.", - }, - }, - }, - "localPath": &dcl.Property{ - Type: "string", - GoName: "LocalPath", - Description: "A local path within the VM to use.", - Conflicts: []string{ - "remote", - "gcs", - }, - }, - "remote": &dcl.Property{ - Type: "object", - GoName: "Remote", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote", - Description: "A generic remote file.", - Conflicts: []string{ - "gcs", - "localPath", - }, - Required: []string{ - "uri", - }, - Properties: map[string]*dcl.Property{ - "sha256Checksum": &dcl.Property{ - Type: "string", - GoName: "Sha256Checksum", - Description: "SHA256 checksum of the remote file.", - }, - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - }, - }, - }, - }, - }, - }, - "desiredState": &dcl.Property{ - Type: "string", - GoName: "DesiredState", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum", - Description: "Required. The desired state the agent should maintain for this package. Possible values: DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED", - Enum: []string{ - "DESIRED_STATE_UNSPECIFIED", - "INSTALLED", - "REMOVED", - }, - }, - "googet": &dcl.Property{ - Type: "object", - GoName: "Googet", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget", - Description: "A package managed by GooGet.", - Conflicts: []string{ - "apt", - "deb", - "yum", - "zypper", - "rpm", - "msi", - }, - Required: []string{ - "name", - }, - Properties: map[string]*dcl.Property{ - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. Package name.", - }, - }, - }, - "msi": &dcl.Property{ - Type: "object", - GoName: "Msi", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi", - Description: "An MSI package.", - Conflicts: []string{ - "apt", - "deb", - "yum", - "zypper", - "rpm", - "googet", - }, - Required: []string{ - "source", - }, - Properties: map[string]*dcl.Property{ - "properties": &dcl.Property{ - Type: "array", - GoName: "Properties", - Description: "Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "source": &dcl.Property{ - Type: "object", - GoName: "Source", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource", - Description: "Required. The MSI package.", - Properties: map[string]*dcl.Property{ - "allowInsecure": &dcl.Property{ - Type: "boolean", - GoName: "AllowInsecure", - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - "gcs": &dcl.Property{ - Type: "object", - GoName: "Gcs", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs", - Description: "A Cloud Storage object.", - Conflicts: []string{ - "remote", - "localPath", - }, - Required: []string{ - "bucket", - "object", - }, - Properties: map[string]*dcl.Property{ - "bucket": &dcl.Property{ - Type: "string", - GoName: "Bucket", - Description: "Required. Bucket of the Cloud Storage object.", - }, - "generation": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Generation", - Description: "Generation number of the Cloud Storage object.", - }, - "object": &dcl.Property{ - Type: "string", - GoName: "Object", - Description: "Required. Name of the Cloud Storage object.", - }, - }, - }, - "localPath": &dcl.Property{ - Type: "string", - GoName: "LocalPath", - Description: "A local path within the VM to use.", - Conflicts: []string{ - "remote", - "gcs", - }, - }, - "remote": &dcl.Property{ - Type: "object", - GoName: "Remote", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote", - Description: "A generic remote file.", - Conflicts: []string{ - "gcs", - "localPath", - }, - Required: []string{ - "uri", - }, - Properties: map[string]*dcl.Property{ - "sha256Checksum": &dcl.Property{ - Type: "string", - GoName: "Sha256Checksum", - Description: "SHA256 checksum of the remote file.", - }, - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - }, - }, - }, - }, - }, - }, - "rpm": &dcl.Property{ - Type: "object", - GoName: "Rpm", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm", - Description: "An rpm package file.", - Conflicts: []string{ - "apt", - "deb", - "yum", - "zypper", - "googet", - "msi", - }, - Required: []string{ - "source", - }, - Properties: map[string]*dcl.Property{ - "pullDeps": &dcl.Property{ - Type: "boolean", - GoName: "PullDeps", - Description: "Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`", - }, - "source": &dcl.Property{ - Type: "object", - GoName: "Source", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource", - Description: "Required. An rpm package.", - Properties: map[string]*dcl.Property{ - "allowInsecure": &dcl.Property{ - Type: "boolean", - GoName: "AllowInsecure", - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - "gcs": &dcl.Property{ - Type: "object", - GoName: "Gcs", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs", - Description: "A Cloud Storage object.", - Conflicts: []string{ - "remote", - "localPath", - }, - Required: []string{ - "bucket", - "object", - }, - Properties: map[string]*dcl.Property{ - "bucket": &dcl.Property{ - Type: "string", - GoName: "Bucket", - Description: "Required. Bucket of the Cloud Storage object.", - }, - "generation": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Generation", - Description: "Generation number of the Cloud Storage object.", - }, - "object": &dcl.Property{ - Type: "string", - GoName: "Object", - Description: "Required. Name of the Cloud Storage object.", - }, - }, - }, - "localPath": &dcl.Property{ - Type: "string", - GoName: "LocalPath", - Description: "A local path within the VM to use.", - Conflicts: []string{ - "remote", - "gcs", - }, - }, - "remote": &dcl.Property{ - Type: "object", - GoName: "Remote", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote", - Description: "A generic remote file.", - Conflicts: []string{ - "gcs", - "localPath", - }, - Required: []string{ - "uri", - }, - Properties: map[string]*dcl.Property{ - "sha256Checksum": &dcl.Property{ - Type: "string", - GoName: "Sha256Checksum", - Description: "SHA256 checksum of the remote file.", - }, - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - }, - }, - }, - }, - }, - }, - "yum": &dcl.Property{ - Type: "object", - GoName: "Yum", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum", - Description: "A package managed by YUM.", - Conflicts: []string{ - "apt", - "deb", - "zypper", - "rpm", - "googet", - "msi", - }, - Required: []string{ - "name", - }, - Properties: map[string]*dcl.Property{ - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. Package name.", - }, - }, - }, - "zypper": &dcl.Property{ - Type: "object", - GoName: "Zypper", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper", - Description: "A package managed by Zypper.", - Conflicts: []string{ - "apt", - "deb", - "yum", - "rpm", - "googet", - "msi", - }, - Required: []string{ - "name", - }, - Properties: map[string]*dcl.Property{ - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. Package name.", - }, - }, - }, - }, - }, - "repository": &dcl.Property{ - Type: "object", - GoName: "Repository", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository", - Description: "Package repository resource", - Conflicts: []string{ - "pkg", - "exec", - "file", - }, - Properties: map[string]*dcl.Property{ - "apt": &dcl.Property{ - Type: "object", - GoName: "Apt", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt", - Description: "An Apt Repository.", - Conflicts: []string{ - "yum", - "zypper", - "goo", - }, - Required: []string{ - "archiveType", - "uri", - "distribution", - "components", - }, - Properties: map[string]*dcl.Property{ - "archiveType": &dcl.Property{ - Type: "string", - GoName: "ArchiveType", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum", - Description: "Required. Type of archive files in this repository. Possible values: ARCHIVE_TYPE_UNSPECIFIED, DEB, DEB_SRC", - Enum: []string{ - "ARCHIVE_TYPE_UNSPECIFIED", - "DEB", - "DEB_SRC", - }, - }, - "components": &dcl.Property{ - Type: "array", - GoName: "Components", - Description: "Required. List of components for this repository. Must contain at least one item.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "distribution": &dcl.Property{ - Type: "string", - GoName: "Distribution", - Description: "Required. Distribution of this repository.", - }, - "gpgKey": &dcl.Property{ - Type: "string", - GoName: "GpgKey", - Description: "URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.", - }, - "uri": &dcl.Property{ - Type: "string", - GoName: "Uri", - Description: "Required. URI for this repository.", - }, - }, - }, - "goo": &dcl.Property{ - Type: "object", - GoName: "Goo", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo", - Description: "A Goo Repository.", - Conflicts: []string{ - "apt", - "yum", - "zypper", - }, - Required: []string{ - "name", - "url", - }, - Properties: map[string]*dcl.Property{ - "name": &dcl.Property{ - Type: "string", - GoName: "Name", - Description: "Required. The name of the repository.", - }, - "url": &dcl.Property{ - Type: "string", - GoName: "Url", - Description: "Required. The url of the repository.", - }, - }, - }, - "yum": &dcl.Property{ - Type: "object", - GoName: "Yum", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum", - Description: "A Yum Repository.", - Conflicts: []string{ - "apt", - "zypper", - "goo", - }, - Required: []string{ - "id", - "baseUrl", - }, - Properties: map[string]*dcl.Property{ - "baseUrl": &dcl.Property{ - Type: "string", - GoName: "BaseUrl", - Description: "Required. The location of the repository directory.", - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "The display name of the repository.", - }, - "gpgKeys": &dcl.Property{ - Type: "array", - GoName: "GpgKeys", - Description: "URIs of GPG keys.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "id": &dcl.Property{ - Type: "string", - GoName: "Id", - Description: "Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.", - }, - }, - }, - "zypper": &dcl.Property{ - Type: "object", - GoName: "Zypper", - GoType: "OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper", - Description: "A Zypper Repository.", - Conflicts: []string{ - "apt", - "yum", - "goo", - }, - Required: []string{ - "id", - "baseUrl", - }, - Properties: map[string]*dcl.Property{ - "baseUrl": &dcl.Property{ - Type: "string", - GoName: "BaseUrl", - Description: "Required. The location of the repository directory.", - }, - "displayName": &dcl.Property{ - Type: "string", - GoName: "DisplayName", - Description: "The display name of the repository.", - }, - "gpgKeys": &dcl.Property{ - Type: "array", - GoName: "GpgKeys", - Description: "URIs of GPG keys.", - SendEmpty: true, - ListType: "list", - Items: &dcl.Property{ - Type: "string", - GoType: "string", - }, - }, - "id": &dcl.Property{ - Type: "string", - GoName: "Id", - Description: "Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "project": &dcl.Property{ - Type: "string", - GoName: "Project", - Description: "The project for the resource", - Immutable: true, - ResourceReferences: []*dcl.PropertyResourceReference{ - &dcl.PropertyResourceReference{ - Resource: "Cloudresourcemanager/Project", - Field: "name", - Parent: true, - }, - }, - }, - "reconciling": &dcl.Property{ - Type: "boolean", - GoName: "Reconciling", - ReadOnly: true, - Description: "Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING", - Immutable: true, - }, - "revisionCreateTime": &dcl.Property{ - Type: "string", - Format: "date-time", - GoName: "RevisionCreateTime", - ReadOnly: true, - Description: "Output only. The timestamp that the revision was created.", - Immutable: true, - }, - "revisionId": &dcl.Property{ - Type: "string", - GoName: "RevisionId", - ReadOnly: true, - Description: "Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment", - Immutable: true, - }, - "rollout": &dcl.Property{ - Type: "object", - GoName: "Rollout", - GoType: "OSPolicyAssignmentRollout", - Description: "Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.", - Required: []string{ - "disruptionBudget", - "minWaitDuration", - }, - Properties: map[string]*dcl.Property{ - "disruptionBudget": &dcl.Property{ - Type: "object", - GoName: "DisruptionBudget", - GoType: "OSPolicyAssignmentRolloutDisruptionBudget", - Description: "Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.", - Properties: map[string]*dcl.Property{ - "fixed": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Fixed", - Description: "Specifies a fixed value.", - Conflicts: []string{ - "percent", - }, - }, - "percent": &dcl.Property{ - Type: "integer", - Format: "int64", - GoName: "Percent", - Description: "Specifies the relative value defined as a percentage, which will be multiplied by a reference value.", - Conflicts: []string{ - "fixed", - }, - }, - }, - }, - "minWaitDuration": &dcl.Property{ - Type: "string", - GoName: "MinWaitDuration", - Description: "Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.", - }, - }, - }, - "rolloutState": &dcl.Property{ - Type: "string", - GoName: "RolloutState", - GoType: "OSPolicyAssignmentRolloutStateEnum", - ReadOnly: true, - Description: "Output only. OS policy assignment rollout state Possible values: ROLLOUT_STATE_UNSPECIFIED, IN_PROGRESS, CANCELLING, CANCELLED, SUCCEEDED", - Immutable: true, - Enum: []string{ - "ROLLOUT_STATE_UNSPECIFIED", - "IN_PROGRESS", - "CANCELLING", - "CANCELLED", - "SUCCEEDED", - }, - }, - "skipAwaitRollout": &dcl.Property{ - Type: "boolean", - GoName: "SkipAwaitRollout", - Description: "Set to true to skip awaiting rollout during resource creation and update.", - Unreadable: true, - }, - "uid": &dcl.Property{ - Type: "string", - GoName: "Uid", - ReadOnly: true, - Description: "Output only. Server generated unique id for the OS policy assignment resource.", - Immutable: true, - }, - }, - }, - }, - }, - }, - } -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_yaml_embed.go deleted file mode 100644 index f552619936..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/os_policy_assignment_yaml_embed.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// GENERATED BY gen_go_data.go -// gen_go_data -package osconfig -var YAML_os_policy_assignment blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/osconfig/os_policy_assignment.yaml - -package osconfig - -// blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/osconfig/os_policy_assignment.yaml -var YAML_os_policy_assignment = []byte("info:\n title: OSConfig/OSPolicyAssignment\n description: Represents an OSPolicyAssignment resource.\n x-dcl-struct-name: OSPolicyAssignment\n x-dcl-has-iam: false\n x-dcl-ref:\n text: API documentation\n url: https://cloud.google.com/compute/docs/osconfig/rest/v1/projects.locations.osPolicyAssignments\npaths:\n get:\n description: The function used to get information about a OSPolicyAssignment\n parameters:\n - name: oSPolicyAssignment\n required: true\n description: A full instance of a OSPolicyAssignment\n apply:\n description: The function used to apply information about a OSPolicyAssignment\n parameters:\n - name: oSPolicyAssignment\n required: true\n description: A full instance of a OSPolicyAssignment\n delete:\n description: The function used to delete a OSPolicyAssignment\n parameters:\n - name: oSPolicyAssignment\n required: true\n description: A full instance of a OSPolicyAssignment\n deleteAll:\n description: The function used to delete all OSPolicyAssignment\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many OSPolicyAssignment\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n OSPolicyAssignment:\n title: OSPolicyAssignment\n x-dcl-id: projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}\n x-dcl-uses-state-hint: true\n x-dcl-parent-container: project\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - osPolicies\n - instanceFilter\n - rollout\n - project\n - location\n properties:\n baseline:\n type: boolean\n x-dcl-go-name: Baseline\n readOnly: true\n description: Output only. Indicates that this revision has been successfully\n rolled out in this zone and new VMs will be assigned OS policies from\n this revision. For a given OS policy assignment, there is only one revision\n with a value of `true` for this field.\n x-kubernetes-immutable: true\n deleted:\n type: boolean\n x-dcl-go-name: Deleted\n readOnly: true\n description: Output only. Indicates that this revision deletes the OS policy\n assignment.\n x-kubernetes-immutable: true\n description:\n type: string\n x-dcl-go-name: Description\n description: OS policy assignment description. Length of the description\n is limited to 1024 characters.\n etag:\n type: string\n x-dcl-go-name: Etag\n readOnly: true\n description: The etag for this OS policy assignment. If this is provided\n on update, it must match the server's etag.\n x-kubernetes-immutable: true\n instanceFilter:\n type: object\n x-dcl-go-name: InstanceFilter\n x-dcl-go-type: OSPolicyAssignmentInstanceFilter\n description: Required. Filter to select VMs.\n properties:\n all:\n type: boolean\n x-dcl-go-name: All\n description: Target all VMs in the project. If true, no other criteria\n is permitted.\n x-dcl-send-empty: true\n exclusionLabels:\n type: array\n x-dcl-go-name: ExclusionLabels\n description: List of label sets used for VM exclusion. If the list has\n more than one label set, the VM is excluded if any of the label sets\n are applicable for the VM.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: OSPolicyAssignmentInstanceFilterExclusionLabels\n properties:\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Labels are identified by key/value pairs in this\n map. A VM should contain all the key/value pairs specified in\n this map to be selected.\n inclusionLabels:\n type: array\n x-dcl-go-name: InclusionLabels\n description: List of label sets used for VM inclusion. If the list has\n more than one `LabelSet`, the VM is included if any of the label sets\n are applicable for the VM.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: OSPolicyAssignmentInstanceFilterInclusionLabels\n properties:\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Labels are identified by key/value pairs in this\n map. A VM should contain all the key/value pairs specified in\n this map to be selected.\n inventories:\n type: array\n x-dcl-go-name: Inventories\n description: List of inventories to select VMs. A VM is selected if\n its inventory data matches at least one of the following inventories.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: OSPolicyAssignmentInstanceFilterInventories\n required:\n - osShortName\n properties:\n osShortName:\n type: string\n x-dcl-go-name: OSShortName\n description: Required. The OS short name\n osVersion:\n type: string\n x-dcl-go-name: OSVersion\n description: The OS version Prefix matches are supported if asterisk(*)\n is provided as the last character. For example, to match all\n versions with a major version of `7`, specify the following\n value for this field `7.*` An empty string matches all OS versions.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: Resource name.\n x-kubernetes-immutable: true\n osPolicies:\n type: array\n x-dcl-go-name: OSPolicies\n description: Required. List of OS policies to be applied to the VMs.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: OSPolicyAssignmentOSPolicies\n required:\n - id\n - mode\n - resourceGroups\n properties:\n allowNoResourceGroupMatch:\n type: boolean\n x-dcl-go-name: AllowNoResourceGroupMatch\n description: This flag determines the OS policy compliance status\n when none of the resource groups within the policy are applicable\n for a VM. Set this value to `true` if the policy needs to be reported\n as compliant even if the policy has nothing to validate or enforce.\n description:\n type: string\n x-dcl-go-name: Description\n description: Policy description. Length of the description is limited\n to 1024 characters.\n id:\n type: string\n x-dcl-go-name: Id\n description: 'Required. The id of the OS policy with the following\n restrictions: * Must contain only lowercase letters, numbers, and\n hyphens. * Must start with a letter. * Must be between 1-63 characters.\n * Must end with a number or a letter. * Must be unique within the\n assignment.'\n mode:\n type: string\n x-dcl-go-name: Mode\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesModeEnum\n description: 'Required. Policy mode Possible values: MODE_UNSPECIFIED,\n VALIDATION, ENFORCEMENT'\n enum:\n - MODE_UNSPECIFIED\n - VALIDATION\n - ENFORCEMENT\n resourceGroups:\n type: array\n x-dcl-go-name: ResourceGroups\n description: Required. List of resource groups for the policy. For\n a particular VM, resource groups are evaluated in the order specified\n and the first resource group that is applicable is selected and\n the rest are ignored. If none of the resource groups are applicable\n for a VM, the VM is considered to be non-compliant w.r.t this policy.\n This behavior can be toggled by the flag `allow_no_resource_group_match`\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroups\n required:\n - resources\n properties:\n inventoryFilters:\n type: array\n x-dcl-go-name: InventoryFilters\n description: 'List of inventory filters for the resource group.\n The resources in this resource group are applied to the target\n VM if it satisfies at least one of the following inventory\n filters. For example, to apply this resource group to VMs\n running either `RHEL` or `CentOS` operating systems, specify\n 2 items for the list with following values: inventory_filters[0].os_short_name=''rhel''\n and inventory_filters[1].os_short_name=''centos'' If the list\n is empty, this resource group will be applied to the target\n VM unconditionally.'\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters\n required:\n - osShortName\n properties:\n osShortName:\n type: string\n x-dcl-go-name: OSShortName\n description: Required. The OS short name\n osVersion:\n type: string\n x-dcl-go-name: OSVersion\n description: The OS version Prefix matches are supported\n if asterisk(*) is provided as the last character. For\n example, to match all versions with a major version\n of `7`, specify the following value for this field `7.*`\n An empty string matches all OS versions.\n resources:\n type: array\n x-dcl-go-name: Resources\n description: Required. List of resources configured for this\n resource group. The resources are executed in the exact order\n specified here.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResources\n required:\n - id\n properties:\n exec:\n type: object\n x-dcl-go-name: Exec\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec\n description: Exec resource\n x-dcl-conflicts:\n - pkg\n - repository\n - file\n required:\n - validate\n properties:\n enforce:\n type: object\n x-dcl-go-name: Enforce\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce\n description: What to run to bring this resource into\n the desired state. An exit code of 100 indicates\n \"success\", any other exit code indicates a failure\n running enforce.\n required:\n - interpreter\n properties:\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional arguments to pass to the\n source during execution.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n file:\n type: object\n x-dcl-go-name: File\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile\n description: A remote or local file.\n x-dcl-conflicts:\n - script\n properties:\n allowInsecure:\n type: boolean\n x-dcl-go-name: AllowInsecure\n description: 'Defaults to false. When false,\n files are subject to validations based on\n the file type: Remote: A checksum must be\n specified. Cloud Storage: An object generation\n number must be specified.'\n gcs:\n type: object\n x-dcl-go-name: Gcs\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs\n description: A Cloud Storage object.\n x-dcl-conflicts:\n - remote\n - localPath\n required:\n - bucket\n - object\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: Required. Bucket of the Cloud\n Storage object.\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n description: Generation number of the\n Cloud Storage object.\n object:\n type: string\n x-dcl-go-name: Object\n description: Required. Name of the Cloud\n Storage object.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: A local path within the VM to\n use.\n x-dcl-conflicts:\n - remote\n - gcs\n remote:\n type: object\n x-dcl-go-name: Remote\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote\n description: A generic remote file.\n x-dcl-conflicts:\n - gcs\n - localPath\n required:\n - uri\n properties:\n sha256Checksum:\n type: string\n x-dcl-go-name: Sha256Checksum\n description: SHA256 checksum of the remote\n file.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI from which\n to fetch the object. It should contain\n both the protocol and path following\n the format `{protocol}://{location}`.\n interpreter:\n type: string\n x-dcl-go-name: Interpreter\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum\n description: 'Required. The script interpreter\n to use. Possible values: INTERPRETER_UNSPECIFIED,\n NONE, SHELL, POWERSHELL'\n enum:\n - INTERPRETER_UNSPECIFIED\n - NONE\n - SHELL\n - POWERSHELL\n outputFilePath:\n type: string\n x-dcl-go-name: OutputFilePath\n description: Only recorded for enforce Exec. Path\n to an output file (that is created by this Exec)\n whose content will be recorded in OSPolicyResourceCompliance\n after a successful run. Absence or failure to\n read this file will result in this ExecResource\n being non-compliant. Output file size is limited\n to 100K bytes.\n script:\n type: string\n x-dcl-go-name: Script\n description: An inline script. The size of the\n script is limited to 1024 characters.\n x-dcl-conflicts:\n - file\n validate:\n type: object\n x-dcl-go-name: Validate\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate\n description: Required. What to run to validate this\n resource is in the desired state. An exit code of\n 100 indicates \"in desired state\", and exit code\n of 101 indicates \"not in desired state\". Any other\n exit code indicates a failure running validate.\n required:\n - interpreter\n properties:\n args:\n type: array\n x-dcl-go-name: Args\n description: Optional arguments to pass to the\n source during execution.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n file:\n type: object\n x-dcl-go-name: File\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile\n description: A remote or local file.\n x-dcl-conflicts:\n - script\n properties:\n allowInsecure:\n type: boolean\n x-dcl-go-name: AllowInsecure\n description: 'Defaults to false. When false,\n files are subject to validations based on\n the file type: Remote: A checksum must be\n specified. Cloud Storage: An object generation\n number must be specified.'\n gcs:\n type: object\n x-dcl-go-name: Gcs\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs\n description: A Cloud Storage object.\n x-dcl-conflicts:\n - remote\n - localPath\n required:\n - bucket\n - object\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: Required. Bucket of the Cloud\n Storage object.\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n description: Generation number of the\n Cloud Storage object.\n object:\n type: string\n x-dcl-go-name: Object\n description: Required. Name of the Cloud\n Storage object.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: A local path within the VM to\n use.\n x-dcl-conflicts:\n - remote\n - gcs\n remote:\n type: object\n x-dcl-go-name: Remote\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote\n description: A generic remote file.\n x-dcl-conflicts:\n - gcs\n - localPath\n required:\n - uri\n properties:\n sha256Checksum:\n type: string\n x-dcl-go-name: Sha256Checksum\n description: SHA256 checksum of the remote\n file.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI from which\n to fetch the object. It should contain\n both the protocol and path following\n the format `{protocol}://{location}`.\n interpreter:\n type: string\n x-dcl-go-name: Interpreter\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum\n description: 'Required. The script interpreter\n to use. Possible values: INTERPRETER_UNSPECIFIED,\n NONE, SHELL, POWERSHELL'\n enum:\n - INTERPRETER_UNSPECIFIED\n - NONE\n - SHELL\n - POWERSHELL\n outputFilePath:\n type: string\n x-dcl-go-name: OutputFilePath\n description: Only recorded for enforce Exec. Path\n to an output file (that is created by this Exec)\n whose content will be recorded in OSPolicyResourceCompliance\n after a successful run. Absence or failure to\n read this file will result in this ExecResource\n being non-compliant. Output file size is limited\n to 100K bytes.\n script:\n type: string\n x-dcl-go-name: Script\n description: An inline script. The size of the\n script is limited to 1024 characters.\n x-dcl-conflicts:\n - file\n file:\n type: object\n x-dcl-go-name: File\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile\n description: File resource\n x-dcl-conflicts:\n - pkg\n - repository\n - exec\n required:\n - path\n - state\n properties:\n content:\n type: string\n x-dcl-go-name: Content\n description: A a file with this content. The size\n of the content is limited to 1024 characters.\n x-dcl-conflicts:\n - file\n file:\n type: object\n x-dcl-go-name: File\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile\n description: A remote or local source.\n x-dcl-conflicts:\n - content\n properties:\n allowInsecure:\n type: boolean\n x-dcl-go-name: AllowInsecure\n description: 'Defaults to false. When false, files\n are subject to validations based on the file\n type: Remote: A checksum must be specified.\n Cloud Storage: An object generation number must\n be specified.'\n gcs:\n type: object\n x-dcl-go-name: Gcs\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs\n description: A Cloud Storage object.\n x-dcl-conflicts:\n - remote\n - localPath\n required:\n - bucket\n - object\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: Required. Bucket of the Cloud\n Storage object.\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n description: Generation number of the Cloud\n Storage object.\n object:\n type: string\n x-dcl-go-name: Object\n description: Required. Name of the Cloud Storage\n object.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: A local path within the VM to use.\n x-dcl-conflicts:\n - remote\n - gcs\n remote:\n type: object\n x-dcl-go-name: Remote\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote\n description: A generic remote file.\n x-dcl-conflicts:\n - gcs\n - localPath\n required:\n - uri\n properties:\n sha256Checksum:\n type: string\n x-dcl-go-name: Sha256Checksum\n description: SHA256 checksum of the remote\n file.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI from which to fetch\n the object. It should contain both the protocol\n and path following the format `{protocol}://{location}`.\n path:\n type: string\n x-dcl-go-name: Path\n description: Required. The absolute path of the file\n within the VM.\n permissions:\n type: string\n x-dcl-go-name: Permissions\n readOnly: true\n description: 'Consists of three octal digits which\n represent, in order, the permissions of the owner,\n group, and other users for the file (similarly to\n the numeric mode used in the linux chmod utility).\n Each digit represents a three bit number with the\n 4 bit corresponding to the read permissions, the\n 2 bit corresponds to the write bit, and the one\n bit corresponds to the execute permission. Default\n behavior is 755. Below are some examples of permissions\n and their associated values: read, write, and execute:\n 7 read and execute: 5 read and write: 6 read only:\n 4'\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum\n description: 'Required. Desired state of the file.\n Possible values: OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED,\n COMPLIANT, NON_COMPLIANT, UNKNOWN, NO_OS_POLICIES_APPLICABLE'\n enum:\n - OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED\n - COMPLIANT\n - NON_COMPLIANT\n - UNKNOWN\n - NO_OS_POLICIES_APPLICABLE\n id:\n type: string\n x-dcl-go-name: Id\n description: 'Required. The id of the resource with the\n following restrictions: * Must contain only lowercase\n letters, numbers, and hyphens. * Must start with a letter.\n * Must be between 1-63 characters. * Must end with a\n number or a letter. * Must be unique within the OS policy.'\n pkg:\n type: object\n x-dcl-go-name: Pkg\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg\n description: Package resource\n x-dcl-conflicts:\n - repository\n - exec\n - file\n required:\n - desiredState\n properties:\n apt:\n type: object\n x-dcl-go-name: Apt\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt\n description: A package managed by Apt.\n x-dcl-conflicts:\n - deb\n - yum\n - zypper\n - rpm\n - googet\n - msi\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Package name.\n deb:\n type: object\n x-dcl-go-name: Deb\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb\n description: A deb package file.\n x-dcl-conflicts:\n - apt\n - yum\n - zypper\n - rpm\n - googet\n - msi\n required:\n - source\n properties:\n pullDeps:\n type: boolean\n x-dcl-go-name: PullDeps\n description: 'Whether dependencies should also\n be installed. - install when false: `dpkg -i\n package` - install when true: `apt-get update\n && apt-get -y install package.deb`'\n source:\n type: object\n x-dcl-go-name: Source\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource\n description: Required. A deb package.\n properties:\n allowInsecure:\n type: boolean\n x-dcl-go-name: AllowInsecure\n description: 'Defaults to false. When false,\n files are subject to validations based on\n the file type: Remote: A checksum must be\n specified. Cloud Storage: An object generation\n number must be specified.'\n gcs:\n type: object\n x-dcl-go-name: Gcs\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs\n description: A Cloud Storage object.\n x-dcl-conflicts:\n - remote\n - localPath\n required:\n - bucket\n - object\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: Required. Bucket of the Cloud\n Storage object.\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n description: Generation number of the\n Cloud Storage object.\n object:\n type: string\n x-dcl-go-name: Object\n description: Required. Name of the Cloud\n Storage object.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: A local path within the VM to\n use.\n x-dcl-conflicts:\n - remote\n - gcs\n remote:\n type: object\n x-dcl-go-name: Remote\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote\n description: A generic remote file.\n x-dcl-conflicts:\n - gcs\n - localPath\n required:\n - uri\n properties:\n sha256Checksum:\n type: string\n x-dcl-go-name: Sha256Checksum\n description: SHA256 checksum of the remote\n file.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI from which\n to fetch the object. It should contain\n both the protocol and path following\n the format `{protocol}://{location}`.\n desiredState:\n type: string\n x-dcl-go-name: DesiredState\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum\n description: 'Required. The desired state the agent\n should maintain for this package. Possible values:\n DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED'\n enum:\n - DESIRED_STATE_UNSPECIFIED\n - INSTALLED\n - REMOVED\n googet:\n type: object\n x-dcl-go-name: Googet\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget\n description: A package managed by GooGet.\n x-dcl-conflicts:\n - apt\n - deb\n - yum\n - zypper\n - rpm\n - msi\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Package name.\n msi:\n type: object\n x-dcl-go-name: Msi\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi\n description: An MSI package.\n x-dcl-conflicts:\n - apt\n - deb\n - yum\n - zypper\n - rpm\n - googet\n required:\n - source\n properties:\n properties:\n type: array\n x-dcl-go-name: Properties\n description: Additional properties to use during\n installation. This should be in the format of\n Property=Setting. Appended to the defaults of\n `ACTION=INSTALL REBOOT=ReallySuppress`.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n source:\n type: object\n x-dcl-go-name: Source\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource\n description: Required. The MSI package.\n properties:\n allowInsecure:\n type: boolean\n x-dcl-go-name: AllowInsecure\n description: 'Defaults to false. When false,\n files are subject to validations based on\n the file type: Remote: A checksum must be\n specified. Cloud Storage: An object generation\n number must be specified.'\n gcs:\n type: object\n x-dcl-go-name: Gcs\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs\n description: A Cloud Storage object.\n x-dcl-conflicts:\n - remote\n - localPath\n required:\n - bucket\n - object\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: Required. Bucket of the Cloud\n Storage object.\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n description: Generation number of the\n Cloud Storage object.\n object:\n type: string\n x-dcl-go-name: Object\n description: Required. Name of the Cloud\n Storage object.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: A local path within the VM to\n use.\n x-dcl-conflicts:\n - remote\n - gcs\n remote:\n type: object\n x-dcl-go-name: Remote\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote\n description: A generic remote file.\n x-dcl-conflicts:\n - gcs\n - localPath\n required:\n - uri\n properties:\n sha256Checksum:\n type: string\n x-dcl-go-name: Sha256Checksum\n description: SHA256 checksum of the remote\n file.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI from which\n to fetch the object. It should contain\n both the protocol and path following\n the format `{protocol}://{location}`.\n rpm:\n type: object\n x-dcl-go-name: Rpm\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm\n description: An rpm package file.\n x-dcl-conflicts:\n - apt\n - deb\n - yum\n - zypper\n - googet\n - msi\n required:\n - source\n properties:\n pullDeps:\n type: boolean\n x-dcl-go-name: PullDeps\n description: 'Whether dependencies should also\n be installed. - install when false: `rpm --upgrade\n --replacepkgs package.rpm` - install when true:\n `yum -y install package.rpm` or `zypper -y install\n package.rpm`'\n source:\n type: object\n x-dcl-go-name: Source\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource\n description: Required. An rpm package.\n properties:\n allowInsecure:\n type: boolean\n x-dcl-go-name: AllowInsecure\n description: 'Defaults to false. When false,\n files are subject to validations based on\n the file type: Remote: A checksum must be\n specified. Cloud Storage: An object generation\n number must be specified.'\n gcs:\n type: object\n x-dcl-go-name: Gcs\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs\n description: A Cloud Storage object.\n x-dcl-conflicts:\n - remote\n - localPath\n required:\n - bucket\n - object\n properties:\n bucket:\n type: string\n x-dcl-go-name: Bucket\n description: Required. Bucket of the Cloud\n Storage object.\n generation:\n type: integer\n format: int64\n x-dcl-go-name: Generation\n description: Generation number of the\n Cloud Storage object.\n object:\n type: string\n x-dcl-go-name: Object\n description: Required. Name of the Cloud\n Storage object.\n localPath:\n type: string\n x-dcl-go-name: LocalPath\n description: A local path within the VM to\n use.\n x-dcl-conflicts:\n - remote\n - gcs\n remote:\n type: object\n x-dcl-go-name: Remote\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote\n description: A generic remote file.\n x-dcl-conflicts:\n - gcs\n - localPath\n required:\n - uri\n properties:\n sha256Checksum:\n type: string\n x-dcl-go-name: Sha256Checksum\n description: SHA256 checksum of the remote\n file.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI from which\n to fetch the object. It should contain\n both the protocol and path following\n the format `{protocol}://{location}`.\n yum:\n type: object\n x-dcl-go-name: Yum\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum\n description: A package managed by YUM.\n x-dcl-conflicts:\n - apt\n - deb\n - zypper\n - rpm\n - googet\n - msi\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Package name.\n zypper:\n type: object\n x-dcl-go-name: Zypper\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper\n description: A package managed by Zypper.\n x-dcl-conflicts:\n - apt\n - deb\n - yum\n - rpm\n - googet\n - msi\n required:\n - name\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. Package name.\n repository:\n type: object\n x-dcl-go-name: Repository\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository\n description: Package repository resource\n x-dcl-conflicts:\n - pkg\n - exec\n - file\n properties:\n apt:\n type: object\n x-dcl-go-name: Apt\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt\n description: An Apt Repository.\n x-dcl-conflicts:\n - yum\n - zypper\n - goo\n required:\n - archiveType\n - uri\n - distribution\n - components\n properties:\n archiveType:\n type: string\n x-dcl-go-name: ArchiveType\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum\n description: 'Required. Type of archive files\n in this repository. Possible values: ARCHIVE_TYPE_UNSPECIFIED,\n DEB, DEB_SRC'\n enum:\n - ARCHIVE_TYPE_UNSPECIFIED\n - DEB\n - DEB_SRC\n components:\n type: array\n x-dcl-go-name: Components\n description: Required. List of components for\n this repository. Must contain at least one item.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n distribution:\n type: string\n x-dcl-go-name: Distribution\n description: Required. Distribution of this repository.\n gpgKey:\n type: string\n x-dcl-go-name: GpgKey\n description: URI of the key file for this repository.\n The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.\n uri:\n type: string\n x-dcl-go-name: Uri\n description: Required. URI for this repository.\n goo:\n type: object\n x-dcl-go-name: Goo\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo\n description: A Goo Repository.\n x-dcl-conflicts:\n - apt\n - yum\n - zypper\n required:\n - name\n - url\n properties:\n name:\n type: string\n x-dcl-go-name: Name\n description: Required. The name of the repository.\n url:\n type: string\n x-dcl-go-name: Url\n description: Required. The url of the repository.\n yum:\n type: object\n x-dcl-go-name: Yum\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum\n description: A Yum Repository.\n x-dcl-conflicts:\n - apt\n - zypper\n - goo\n required:\n - id\n - baseUrl\n properties:\n baseUrl:\n type: string\n x-dcl-go-name: BaseUrl\n description: Required. The location of the repository\n directory.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The display name of the repository.\n gpgKeys:\n type: array\n x-dcl-go-name: GpgKeys\n description: URIs of GPG keys.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n id:\n type: string\n x-dcl-go-name: Id\n description: Required. A one word, unique name\n for this repository. This is the `repo id` in\n the yum config file and also the `display_name`\n if `display_name` is omitted. This id is also\n used as the unique identifier when checking\n for resource conflicts.\n zypper:\n type: object\n x-dcl-go-name: Zypper\n x-dcl-go-type: OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper\n description: A Zypper Repository.\n x-dcl-conflicts:\n - apt\n - yum\n - goo\n required:\n - id\n - baseUrl\n properties:\n baseUrl:\n type: string\n x-dcl-go-name: BaseUrl\n description: Required. The location of the repository\n directory.\n displayName:\n type: string\n x-dcl-go-name: DisplayName\n description: The display name of the repository.\n gpgKeys:\n type: array\n x-dcl-go-name: GpgKeys\n description: URIs of GPG keys.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n id:\n type: string\n x-dcl-go-name: Id\n description: Required. A one word, unique name\n for this repository. This is the `repo id` in\n the zypper config file and also the `display_name`\n if `display_name` is omitted. This id is also\n used as the unique identifier when checking\n for GuestPolicy conflicts.\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n reconciling:\n type: boolean\n x-dcl-go-name: Reconciling\n readOnly: true\n description: 'Output only. Indicates that reconciliation is in progress\n for the revision. This value is `true` when the `rollout_state` is one\n of: * IN_PROGRESS * CANCELLING'\n x-kubernetes-immutable: true\n revisionCreateTime:\n type: string\n format: date-time\n x-dcl-go-name: RevisionCreateTime\n readOnly: true\n description: Output only. The timestamp that the revision was created.\n x-kubernetes-immutable: true\n revisionId:\n type: string\n x-dcl-go-name: RevisionId\n readOnly: true\n description: Output only. The assignment revision ID A new revision is committed\n whenever a rollout is triggered for a OS policy assignment\n x-kubernetes-immutable: true\n rollout:\n type: object\n x-dcl-go-name: Rollout\n x-dcl-go-type: OSPolicyAssignmentRollout\n description: 'Required. Rollout to deploy the OS policy assignment. A rollout\n is triggered in the following situations: 1) OSPolicyAssignment is created.\n 2) OSPolicyAssignment is updated and the update contains changes to one\n of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment\n is deleted.'\n required:\n - disruptionBudget\n - minWaitDuration\n properties:\n disruptionBudget:\n type: object\n x-dcl-go-name: DisruptionBudget\n x-dcl-go-type: OSPolicyAssignmentRolloutDisruptionBudget\n description: Required. The maximum number (or percentage) of VMs per\n zone to disrupt at any given moment.\n properties:\n fixed:\n type: integer\n format: int64\n x-dcl-go-name: Fixed\n description: Specifies a fixed value.\n x-dcl-conflicts:\n - percent\n percent:\n type: integer\n format: int64\n x-dcl-go-name: Percent\n description: Specifies the relative value defined as a percentage,\n which will be multiplied by a reference value.\n x-dcl-conflicts:\n - fixed\n minWaitDuration:\n type: string\n x-dcl-go-name: MinWaitDuration\n description: Required. This determines the minimum duration of time\n to wait after the configuration changes are applied through the current\n rollout. A VM continues to count towards the `disruption_budget` at\n least until this duration of time has passed after configuration changes\n are applied.\n rolloutState:\n type: string\n x-dcl-go-name: RolloutState\n x-dcl-go-type: OSPolicyAssignmentRolloutStateEnum\n readOnly: true\n description: 'Output only. OS policy assignment rollout state Possible values:\n ROLLOUT_STATE_UNSPECIFIED, IN_PROGRESS, CANCELLING, CANCELLED, SUCCEEDED'\n x-kubernetes-immutable: true\n enum:\n - ROLLOUT_STATE_UNSPECIFIED\n - IN_PROGRESS\n - CANCELLING\n - CANCELLED\n - SUCCEEDED\n skipAwaitRollout:\n type: boolean\n x-dcl-go-name: SkipAwaitRollout\n description: Set to true to skip awaiting rollout during resource creation\n and update.\n x-dcl-mutable-unreadable: true\n uid:\n type: string\n x-dcl-go-name: Uid\n readOnly: true\n description: Output only. Server generated unique id for the OS policy assignment\n resource.\n x-kubernetes-immutable: true\n") - -// 73353 bytes -// MD5: fb683ef5ed536f19f669a17a052fb49d diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/osconfig_ga_utils.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/osconfig_ga_utils.go deleted file mode 100644 index b73421c8cb..0000000000 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/osconfig_ga_utils.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2023 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Package osconfig defines types and functions for managing osconfig GCP resources. -package osconfig - -import ( - "bytes" - "context" - "time" - - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations" -) - -// Returns true if m and n represent the same floating point value of seconds. -func canonicalizeOSPolicyAssignmentRolloutMinWaitDuration(m, n interface{}) bool { - mStr := dcl.ValueOrEmptyString(m) - nStr := dcl.ValueOrEmptyString(n) - if mStr == "" && nStr == "" { - return true - } - if mStr == "" || nStr == "" { - return false - } - mDuration, err := time.ParseDuration(mStr) - if err != nil { - return false - } - nDuration, err := time.ParseDuration(nStr) - if err != nil { - return false - } - return mDuration == nDuration -} - -// Waits for os policy assignment to be done reconciling before deletion. -func (r *OSPolicyAssignment) waitForNotReconciling(ctx context.Context, client *Client) error { - return dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - nr, err := client.GetOSPolicyAssignment(ctx, r) - if err != nil { - return nil, err - } - if dcl.ValueOrEmptyBool(nr.Reconciling) { - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, nil - }, client.Config.RetryProvider) -} - -func (op *createOSPolicyAssignmentOperation) do(ctx context.Context, r *OSPolicyAssignment, c *Client) error { - c.Config.Logger.InfoWithContextf(ctx, "Attempting to create %v", r) - u, err := r.createURL(c.Config.BasePath) - if err != nil { - return err - } - - req, err := r.marshal(c) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "POST", u, bytes.NewBuffer(req), c.Config.RetryProvider) - if err != nil { - return err - } - - if !dcl.ValueOrEmptyBool(r.SkipAwaitRollout) { - // wait for object to be created. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - c.Config.Logger.Warningf("Creation failed after waiting for operation: %v", err) - return err - } - c.Config.Logger.InfoWithContextf(ctx, "Successfully waited for operation") - op.response, _ = o.FirstResponse() - } - - if _, err := c.GetOSPolicyAssignment(ctx, r); err != nil { - c.Config.Logger.WarningWithContextf(ctx, "get returned error: %v", err) - return err - } - - return nil -} - -func (op *updateOSPolicyAssignmentUpdateOSPolicyAssignmentOperation) do(ctx context.Context, r *OSPolicyAssignment, c *Client) error { - _, err := c.GetOSPolicyAssignment(ctx, r) - if err != nil { - return err - } - - u, err := r.updateURL(c.Config.BasePath, "UpdateOSPolicyAssignment") - if err != nil { - return err - } - diffs := make([]*dcl.FieldDiff, 0) - for _, d := range op.FieldDiffs { - // skipAwaitUpdate is a custom field not available in the API and should not be included in an update mask - if d.FieldName != "SkipAwaitRollout" { - diffs = append(diffs, d) - } - } - if len(diffs) == 0 { - // Only diff was skipAwaitUpdate, return success - return nil - } - mask := dcl.TopLevelUpdateMask(diffs) - u, err = dcl.AddQueryParams(u, map[string]string{"updateMask": mask}) - if err != nil { - return err - } - - req, err := newUpdateOSPolicyAssignmentUpdateOSPolicyAssignmentRequest(ctx, r, c) - if err != nil { - return err - } - - c.Config.Logger.InfoWithContextf(ctx, "Created update: %#v", req) - body, err := marshalUpdateOSPolicyAssignmentUpdateOSPolicyAssignmentRequest(c, req) - if err != nil { - return err - } - resp, err := dcl.SendRequest(ctx, c.Config, "PATCH", u, bytes.NewBuffer(body), c.Config.RetryProvider) - if err != nil { - return err - } - - if !dcl.ValueOrEmptyBool(r.SkipAwaitRollout) { - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - err = o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET") - - if err != nil { - return err - } - } - - return nil -} - -func (op *deleteOSPolicyAssignmentOperation) do(ctx context.Context, r *OSPolicyAssignment, c *Client) error { - r, err := c.GetOSPolicyAssignment(ctx, r) - if err != nil { - if dcl.IsNotFound(err) { - c.Config.Logger.InfoWithContextf(ctx, "OSPolicyAssignment not found, returning. Original error: %v", err) - return nil - } - c.Config.Logger.WarningWithContextf(ctx, "GetOSPolicyAssignment checking for existence. error: %v", err) - return err - } - err = r.waitForNotReconciling(ctx, c) - if err != nil { - return err - } - u, err := r.deleteURL(c.Config.BasePath) - if err != nil { - return err - } - - // Delete should never have a body - body := &bytes.Buffer{} - resp, err := dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) - if err != nil { - return err - } - - if !dcl.ValueOrEmptyBool(r.SkipAwaitRollout) { - // wait for object to be deleted. - var o operations.StandardGCPOperation - if err := dcl.ParseResponse(resp.Response, &o); err != nil { - return err - } - if err := o.Wait(context.WithValue(ctx, dcl.DoNotLogRequestsKey, true), c.Config, r.basePath(), "GET"); err != nil { - return err - } - } - - // We saw a race condition where for some successful delete operation, the Get calls returned resources for a short duration. - // This is the reason we are adding retry to handle that case. - retriesRemaining := 10 - dcl.Do(ctx, func(ctx context.Context) (*dcl.RetryDetails, error) { - _, err := c.GetOSPolicyAssignment(ctx, r) - if dcl.IsNotFound(err) { - return nil, nil - } - if retriesRemaining > 0 { - retriesRemaining-- - return &dcl.RetryDetails{}, dcl.OperationNotDone{} - } - return nil, dcl.NotDeletedError{ExistingResource: r} - }, c.Config.RetryProvider) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.go index 01f43ad162..69732cc51b 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.go @@ -661,9 +661,10 @@ func (r *CaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsages) Has } type CaPoolIssuancePolicyBaselineValuesCaOptions struct { - empty bool `json:"-"` - IsCa *bool `json:"isCa"` - MaxIssuerPathLength *int64 `json:"maxIssuerPathLength"` + empty bool `json:"-"` + IsCa *bool `json:"isCa"` + MaxIssuerPathLength *int64 `json:"maxIssuerPathLength"` + ZeroMaxIssuerPathLength *bool `json:"zeroMaxIssuerPathLength"` } type jsonCaPoolIssuancePolicyBaselineValuesCaOptions CaPoolIssuancePolicyBaselineValuesCaOptions @@ -685,6 +686,8 @@ func (r *CaPoolIssuancePolicyBaselineValuesCaOptions) UnmarshalJSON(data []byte) r.MaxIssuerPathLength = res.MaxIssuerPathLength + r.ZeroMaxIssuerPathLength = res.ZeroMaxIssuerPathLength + } return nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.yaml index 29fa530ef1..912c8cbbe7 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool.yaml @@ -244,6 +244,13 @@ components: If this value is less than 0, the request will fail. If this value is missing, the max path length will be omitted from the CA certificate. + zeroMaxIssuerPathLength: + type: boolean + x-dcl-go-name: ZeroMaxIssuerPathLength + description: Optional. When true, the "path length constraint" + in Basic Constraints extension will be set to 0. if both max_issuer_path_length + and zero_max_issuer_path_length are unset, the max path length + will be omitted from the CA certificate. keyUsage: type: object x-dcl-go-name: KeyUsage diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_internal.go index 6ac1516b5c..2ee4963b27 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_internal.go @@ -2021,6 +2021,11 @@ func canonicalizeCaPoolIssuancePolicyBaselineValuesCaOptions(des, initial *CaPoo } else { cDes.MaxIssuerPathLength = des.MaxIssuerPathLength } + if dcl.BoolCanonicalize(des.ZeroMaxIssuerPathLength, initial.ZeroMaxIssuerPathLength) || dcl.IsZeroValue(des.ZeroMaxIssuerPathLength) { + cDes.ZeroMaxIssuerPathLength = initial.ZeroMaxIssuerPathLength + } else { + cDes.ZeroMaxIssuerPathLength = des.ZeroMaxIssuerPathLength + } return cDes } @@ -2070,6 +2075,9 @@ func canonicalizeNewCaPoolIssuancePolicyBaselineValuesCaOptions(c *Client, des, if dcl.BoolCanonicalize(des.IsCa, nw.IsCa) { nw.IsCa = des.IsCa } + if dcl.BoolCanonicalize(des.ZeroMaxIssuerPathLength, nw.ZeroMaxIssuerPathLength) { + nw.ZeroMaxIssuerPathLength = des.ZeroMaxIssuerPathLength + } return nw } @@ -3691,6 +3699,13 @@ func compareCaPoolIssuancePolicyBaselineValuesCaOptionsNewStyle(d, a interface{} } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.ZeroMaxIssuerPathLength, actual.ZeroMaxIssuerPathLength, dcl.DiffInfo{OperationSelector: dcl.TriggersOperation("updateCaPoolUpdateCaPoolOperation")}, fn.AddNest("ZeroMaxIssuerPathLength")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -4818,7 +4833,7 @@ func expandCaPoolIssuancePolicyBaselineValues(c *Client, f *CaPoolIssuancePolicy } else if !dcl.IsEmptyValueIndirect(v) { m["keyUsage"] = v } - if v, err := expandCaPoolIssuancePolicyBaselineValuesCaOptions(c, f.CaOptions, res); err != nil { + if v, err := expandCaPoolIssuancePolicyBaselineValuesCAOptions(c, f.CaOptions, res); err != nil { return nil, fmt.Errorf("error expanding CaOptions into caOptions: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["caOptions"] = v @@ -4854,7 +4869,7 @@ func flattenCaPoolIssuancePolicyBaselineValues(c *Client, i interface{}, res *Ca return EmptyCaPoolIssuancePolicyBaselineValues } r.KeyUsage = flattenCaPoolIssuancePolicyBaselineValuesKeyUsage(c, m["keyUsage"], res) - r.CaOptions = flattenCaPoolIssuancePolicyBaselineValuesCaOptions(c, m["caOptions"], res) + r.CaOptions = flattenCaPoolIssuancePolicyBaselineValuesCAOptions(c, m["caOptions"], res) r.PolicyIds = flattenCaPoolIssuancePolicyBaselineValuesPolicyIdsSlice(c, m["policyIds"], res) r.AiaOcspServers = dcl.FlattenStringSlice(m["aiaOcspServers"]) r.AdditionalExtensions = flattenCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsSlice(c, m["additionalExtensions"], res) @@ -5479,6 +5494,9 @@ func expandCaPoolIssuancePolicyBaselineValuesCaOptions(c *Client, f *CaPoolIssua if v := f.MaxIssuerPathLength; !dcl.IsEmptyValueIndirect(v) { m["maxIssuerPathLength"] = v } + if v := f.ZeroMaxIssuerPathLength; !dcl.IsEmptyValueIndirect(v) { + m["zeroMaxIssuerPathLength"] = v + } return m, nil } @@ -5498,6 +5516,7 @@ func flattenCaPoolIssuancePolicyBaselineValuesCaOptions(c *Client, i interface{} } r.IsCa = dcl.FlattenBool(m["isCa"]) r.MaxIssuerPathLength = dcl.FlattenInteger(m["maxIssuerPathLength"]) + r.ZeroMaxIssuerPathLength = dcl.FlattenBool(m["zeroMaxIssuerPathLength"]) return r } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_schema.go index d065cc0a37..da3e115cad 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_schema.go @@ -278,6 +278,11 @@ func DCLCaPoolSchema() *dcl.Schema { GoName: "MaxIssuerPathLength", Description: "Optional. Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this value is missing, the max path length will be omitted from the CA certificate.", }, + "zeroMaxIssuerPathLength": &dcl.Property{ + Type: "boolean", + GoName: "ZeroMaxIssuerPathLength", + Description: "Optional. When true, the \"path length constraint\" in Basic Constraints extension will be set to 0. if both max_issuer_path_length and zero_max_issuer_path_length are unset, the max path length will be omitted from the CA certificate.", + }, }, }, "keyUsage": &dcl.Property{ diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_yaml_embed.go index c638959d02..ff72d1e008 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/ca_pool_yaml_embed.go @@ -17,7 +17,7 @@ package privateca // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/privateca/ca_pool.yaml -var YAML_ca_pool = []byte("info:\n title: Privateca/CaPool\n description: The Privateca CaPool resource\n x-dcl-struct-name: CaPool\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a CaPool\n parameters:\n - name: caPool\n required: true\n description: A full instance of a CaPool\n apply:\n description: The function used to apply information about a CaPool\n parameters:\n - name: caPool\n required: true\n description: A full instance of a CaPool\n delete:\n description: The function used to delete a CaPool\n parameters:\n - name: caPool\n required: true\n description: A full instance of a CaPool\n deleteAll:\n description: The function used to delete all CaPool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many CaPool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n CaPool:\n title: CaPool\n x-dcl-id: projects/{{project}}/locations/{{location}}/caPools/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - tier\n - project\n - location\n properties:\n issuancePolicy:\n type: object\n x-dcl-go-name: IssuancePolicy\n x-dcl-go-type: CaPoolIssuancePolicy\n description: Optional. The IssuancePolicy to control how Certificates will\n be issued from this CaPool.\n properties:\n allowedIssuanceModes:\n type: object\n x-dcl-go-name: AllowedIssuanceModes\n x-dcl-go-type: CaPoolIssuancePolicyAllowedIssuanceModes\n description: Optional. If specified, then only methods allowed in the\n IssuanceModes may be used to issue Certificates.\n properties:\n allowConfigBasedIssuance:\n type: boolean\n x-dcl-go-name: AllowConfigBasedIssuance\n description: Optional. When true, allows callers to create Certificates\n by specifying a CertificateConfig.\n allowCsrBasedIssuance:\n type: boolean\n x-dcl-go-name: AllowCsrBasedIssuance\n description: Optional. When true, allows callers to create Certificates\n by specifying a CSR.\n allowedKeyTypes:\n type: array\n x-dcl-go-name: AllowedKeyTypes\n description: Optional. If any AllowedKeyType is specified, then the\n certificate request's public key must match one of the key types listed\n here. Otherwise, any key may be used.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyAllowedKeyTypes\n properties:\n ellipticCurve:\n type: object\n x-dcl-go-name: EllipticCurve\n x-dcl-go-type: CaPoolIssuancePolicyAllowedKeyTypesEllipticCurve\n description: Represents an allowed Elliptic Curve key type.\n x-dcl-conflicts:\n - rsa\n properties:\n signatureAlgorithm:\n type: string\n x-dcl-go-name: SignatureAlgorithm\n x-dcl-go-type: CaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithmEnum\n description: 'Optional. A signature algorithm that must be\n used. If this is omitted, any EC-based signature algorithm\n will be allowed. Possible values: EC_SIGNATURE_ALGORITHM_UNSPECIFIED,\n ECDSA_P256, ECDSA_P384, EDDSA_25519'\n enum:\n - EC_SIGNATURE_ALGORITHM_UNSPECIFIED\n - ECDSA_P256\n - ECDSA_P384\n - EDDSA_25519\n rsa:\n type: object\n x-dcl-go-name: Rsa\n x-dcl-go-type: CaPoolIssuancePolicyAllowedKeyTypesRsa\n description: Represents an allowed RSA key type.\n x-dcl-conflicts:\n - ellipticCurve\n properties:\n maxModulusSize:\n type: integer\n format: int64\n x-dcl-go-name: MaxModulusSize\n description: Optional. The maximum allowed RSA modulus size,\n in bits. If this is not set, or if set to zero, the service\n will not enforce an explicit upper bound on RSA modulus\n sizes.\n minModulusSize:\n type: integer\n format: int64\n x-dcl-go-name: MinModulusSize\n description: Optional. The minimum allowed RSA modulus size,\n in bits. If this is not set, or if set to zero, the service-level\n min RSA modulus size will continue to apply.\n baselineValues:\n type: object\n x-dcl-go-name: BaselineValues\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValues\n description: Optional. A set of X.509 values that will be applied to\n all certificates issued through this CaPool. If a certificate request\n includes conflicting values for the same properties, they will be\n overwritten by the values defined here. If a certificate request uses\n a CertificateTemplate that defines conflicting predefined_values for\n the same properties, the certificate issuance request will fail.\n properties:\n additionalExtensions:\n type: array\n x-dcl-go-name: AdditionalExtensions\n description: Optional. Describes custom X.509 extensions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesAdditionalExtensions\n required:\n - objectId\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this extension\n is critical (i.e., if the client does not know how to handle\n this extension, the client should consider this to be an\n error).\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesAdditionalExtensionsObjectId\n description: Required. The OID for this X.509 extension.\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n aiaOcspServers:\n type: array\n x-dcl-go-name: AiaOcspServers\n description: Optional. Describes Online Certificate Status Protocol\n (OCSP) endpoint addresses that appear in the \"Authority Information\n Access\" extension in the certificate.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n caOptions:\n type: object\n x-dcl-go-name: CaOptions\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesCaOptions\n description: Optional. Describes options in this X509Parameters\n that are relevant in a CA certificate.\n properties:\n isCa:\n type: boolean\n x-dcl-go-name: IsCa\n description: Optional. Refers to the \"CA\" X.509 extension, which\n is a boolean value. When this value is missing, the extension\n will be omitted from the CA certificate.\n maxIssuerPathLength:\n type: integer\n format: int64\n x-dcl-go-name: MaxIssuerPathLength\n description: Optional. Refers to the path length restriction\n X.509 extension. For a CA certificate, this value describes\n the depth of subordinate CA certificates that are allowed.\n If this value is less than 0, the request will fail. If this\n value is missing, the max path length will be omitted from\n the CA certificate.\n keyUsage:\n type: object\n x-dcl-go-name: KeyUsage\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesKeyUsage\n description: Optional. Indicates the intended use for keys that\n correspond to a certificate.\n properties:\n baseKeyUsage:\n type: object\n x-dcl-go-name: BaseKeyUsage\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsage\n description: Describes high-level ways in which a key may be\n used.\n properties:\n certSign:\n type: boolean\n x-dcl-go-name: CertSign\n description: The key may be used to sign certificates.\n contentCommitment:\n type: boolean\n x-dcl-go-name: ContentCommitment\n description: The key may be used for cryptographic commitments.\n Note that this may also be referred to as \"non-repudiation\".\n crlSign:\n type: boolean\n x-dcl-go-name: CrlSign\n description: The key may be used sign certificate revocation\n lists.\n dataEncipherment:\n type: boolean\n x-dcl-go-name: DataEncipherment\n description: The key may be used to encipher data.\n decipherOnly:\n type: boolean\n x-dcl-go-name: DecipherOnly\n description: The key may be used to decipher only.\n digitalSignature:\n type: boolean\n x-dcl-go-name: DigitalSignature\n description: The key may be used for digital signatures.\n encipherOnly:\n type: boolean\n x-dcl-go-name: EncipherOnly\n description: The key may be used to encipher only.\n keyAgreement:\n type: boolean\n x-dcl-go-name: KeyAgreement\n description: The key may be used in a key agreement protocol.\n keyEncipherment:\n type: boolean\n x-dcl-go-name: KeyEncipherment\n description: The key may be used to encipher other keys.\n extendedKeyUsage:\n type: object\n x-dcl-go-name: ExtendedKeyUsage\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsage\n description: Detailed scenarios in which a key may be used.\n properties:\n clientAuth:\n type: boolean\n x-dcl-go-name: ClientAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially\n described as \"TLS WWW client authentication\", though regularly\n used for non-WWW TLS.\n codeSigning:\n type: boolean\n x-dcl-go-name: CodeSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially\n described as \"Signing of downloadable executable code\n client authentication\".\n emailProtection:\n type: boolean\n x-dcl-go-name: EmailProtection\n description: Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially\n described as \"Email protection\".\n ocspSigning:\n type: boolean\n x-dcl-go-name: OcspSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially\n described as \"Signing OCSP responses\".\n serverAuth:\n type: boolean\n x-dcl-go-name: ServerAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially\n described as \"TLS WWW server authentication\", though regularly\n used for non-WWW TLS.\n timeStamping:\n type: boolean\n x-dcl-go-name: TimeStamping\n description: Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially\n described as \"Binding the hash of an object to a time\".\n unknownExtendedKeyUsages:\n type: array\n x-dcl-go-name: UnknownExtendedKeyUsages\n description: Used to describe extended key usages that are not\n listed in the KeyUsage.ExtendedKeyUsageOptions message.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsages\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n policyIds:\n type: array\n x-dcl-go-name: PolicyIds\n description: Optional. Describes the X.509 certificate policy object\n identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesPolicyIds\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n identityConstraints:\n type: object\n x-dcl-go-name: IdentityConstraints\n x-dcl-go-type: CaPoolIssuancePolicyIdentityConstraints\n description: Optional. Describes constraints on identities that may\n appear in Certificates issued through this CaPool. If this is omitted,\n then this CaPool will not add restrictions on a certificate's identity.\n required:\n - allowSubjectPassthrough\n - allowSubjectAltNamesPassthrough\n properties:\n allowSubjectAltNamesPassthrough:\n type: boolean\n x-dcl-go-name: AllowSubjectAltNamesPassthrough\n description: Required. If this is true, the SubjectAltNames extension\n may be copied from a certificate request into the signed certificate.\n Otherwise, the requested SubjectAltNames will be discarded.\n allowSubjectPassthrough:\n type: boolean\n x-dcl-go-name: AllowSubjectPassthrough\n description: Required. If this is true, the Subject field may be\n copied from a certificate request into the signed certificate.\n Otherwise, the requested Subject will be discarded.\n celExpression:\n type: object\n x-dcl-go-name: CelExpression\n x-dcl-go-type: CaPoolIssuancePolicyIdentityConstraintsCelExpression\n description: Optional. A CEL expression that may be used to validate\n the resolved X.509 Subject and/or Subject Alternative Name before\n a certificate is signed. To see the full allowed syntax and some\n examples, see https://cloud.google.com/certificate-authority-service/docs/using-cel\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Description of the expression. This is\n a longer text which describes the expression, e.g. when hovered\n over it in a UI.\n expression:\n type: string\n x-dcl-go-name: Expression\n description: Textual representation of an expression in Common\n Expression Language syntax.\n location:\n type: string\n x-dcl-go-name: Location\n description: Optional. String indicating the location of the\n expression for error reporting, e.g. a file name and a position\n in the file.\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. Title for the expression, i.e. a short\n string describing its purpose. This can be used e.g. in UIs\n which allow to enter the expression.\n maximumLifetime:\n type: string\n x-dcl-go-name: MaximumLifetime\n description: Optional. The maximum lifetime allowed for issued Certificates.\n Note that if the issuing CertificateAuthority expires before a Certificate's\n requested maximum_lifetime, the effective lifetime will be explicitly\n truncated to match it.\n passthroughExtensions:\n type: object\n x-dcl-go-name: PassthroughExtensions\n x-dcl-go-type: CaPoolIssuancePolicyPassthroughExtensions\n description: Optional. Describes the set of X.509 extensions that may\n appear in a Certificate issued through this CaPool. If a certificate\n request sets extensions that don't appear in the passthrough_extensions,\n those extensions will be dropped. If a certificate request uses a\n CertificateTemplate with predefined_values that don't appear here,\n the certificate issuance request will fail. If this is omitted, then\n this CaPool will not add restrictions on a certificate's X.509 extensions.\n These constraints do not apply to X.509 extensions set in this CaPool's\n baseline_values.\n properties:\n additionalExtensions:\n type: array\n x-dcl-go-name: AdditionalExtensions\n description: Optional. A set of ObjectIds identifying custom X.509\n extensions. Will be combined with known_extensions to determine\n the full set of X.509 extensions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyPassthroughExtensionsAdditionalExtensions\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n knownExtensions:\n type: array\n x-dcl-go-name: KnownExtensions\n description: Optional. A set of named X.509 extensions. Will be\n combined with additional_extensions to determine the full set\n of X.509 extensions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: CaPoolIssuancePolicyPassthroughExtensionsKnownExtensionsEnum\n enum:\n - KNOWN_CERTIFICATE_EXTENSION_UNSPECIFIED\n - BASE_KEY_USAGE\n - EXTENDED_KEY_USAGE\n - CA_OPTIONS\n - POLICY_IDS\n - AIA_OCSP_SERVERS\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. Labels with user-defined metadata.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The resource name for this CaPool in the format `projects/*/locations/*/caPools/*`.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n publishingOptions:\n type: object\n x-dcl-go-name: PublishingOptions\n x-dcl-go-type: CaPoolPublishingOptions\n description: Optional. The PublishingOptions to follow when issuing Certificates\n from any CertificateAuthority in this CaPool.\n properties:\n publishCaCert:\n type: boolean\n x-dcl-go-name: PublishCaCert\n description: Optional. When true, publishes each CertificateAuthority's\n CA certificate and includes its URL in the \"Authority Information\n Access\" X.509 extension in all issued Certificates. If this is false,\n the CA certificate will not be published and the corresponding X.509\n extension will not be written in issued certificates.\n publishCrl:\n type: boolean\n x-dcl-go-name: PublishCrl\n description: Optional. When true, publishes each CertificateAuthority's\n CRL and includes its URL in the \"CRL Distribution Points\" X.509 extension\n in all issued Certificates. If this is false, CRLs will not be published\n and the corresponding X.509 extension will not be written in issued\n certificates. CRLs will expire 7 days from their creation. However,\n we will rebuild daily. CRLs are also rebuilt shortly after a certificate\n is revoked.\n tier:\n type: string\n x-dcl-go-name: Tier\n x-dcl-go-type: CaPoolTierEnum\n description: 'Required. Immutable. The Tier of this CaPool. Possible values:\n TIER_UNSPECIFIED, ENTERPRISE, DEVOPS'\n x-kubernetes-immutable: true\n enum:\n - TIER_UNSPECIFIED\n - ENTERPRISE\n - DEVOPS\n") +var YAML_ca_pool = []byte("info:\n title: Privateca/CaPool\n description: The Privateca CaPool resource\n x-dcl-struct-name: CaPool\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a CaPool\n parameters:\n - name: caPool\n required: true\n description: A full instance of a CaPool\n apply:\n description: The function used to apply information about a CaPool\n parameters:\n - name: caPool\n required: true\n description: A full instance of a CaPool\n delete:\n description: The function used to delete a CaPool\n parameters:\n - name: caPool\n required: true\n description: A full instance of a CaPool\n deleteAll:\n description: The function used to delete all CaPool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many CaPool\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n CaPool:\n title: CaPool\n x-dcl-id: projects/{{project}}/locations/{{location}}/caPools/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - tier\n - project\n - location\n properties:\n issuancePolicy:\n type: object\n x-dcl-go-name: IssuancePolicy\n x-dcl-go-type: CaPoolIssuancePolicy\n description: Optional. The IssuancePolicy to control how Certificates will\n be issued from this CaPool.\n properties:\n allowedIssuanceModes:\n type: object\n x-dcl-go-name: AllowedIssuanceModes\n x-dcl-go-type: CaPoolIssuancePolicyAllowedIssuanceModes\n description: Optional. If specified, then only methods allowed in the\n IssuanceModes may be used to issue Certificates.\n properties:\n allowConfigBasedIssuance:\n type: boolean\n x-dcl-go-name: AllowConfigBasedIssuance\n description: Optional. When true, allows callers to create Certificates\n by specifying a CertificateConfig.\n allowCsrBasedIssuance:\n type: boolean\n x-dcl-go-name: AllowCsrBasedIssuance\n description: Optional. When true, allows callers to create Certificates\n by specifying a CSR.\n allowedKeyTypes:\n type: array\n x-dcl-go-name: AllowedKeyTypes\n description: Optional. If any AllowedKeyType is specified, then the\n certificate request's public key must match one of the key types listed\n here. Otherwise, any key may be used.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyAllowedKeyTypes\n properties:\n ellipticCurve:\n type: object\n x-dcl-go-name: EllipticCurve\n x-dcl-go-type: CaPoolIssuancePolicyAllowedKeyTypesEllipticCurve\n description: Represents an allowed Elliptic Curve key type.\n x-dcl-conflicts:\n - rsa\n properties:\n signatureAlgorithm:\n type: string\n x-dcl-go-name: SignatureAlgorithm\n x-dcl-go-type: CaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithmEnum\n description: 'Optional. A signature algorithm that must be\n used. If this is omitted, any EC-based signature algorithm\n will be allowed. Possible values: EC_SIGNATURE_ALGORITHM_UNSPECIFIED,\n ECDSA_P256, ECDSA_P384, EDDSA_25519'\n enum:\n - EC_SIGNATURE_ALGORITHM_UNSPECIFIED\n - ECDSA_P256\n - ECDSA_P384\n - EDDSA_25519\n rsa:\n type: object\n x-dcl-go-name: Rsa\n x-dcl-go-type: CaPoolIssuancePolicyAllowedKeyTypesRsa\n description: Represents an allowed RSA key type.\n x-dcl-conflicts:\n - ellipticCurve\n properties:\n maxModulusSize:\n type: integer\n format: int64\n x-dcl-go-name: MaxModulusSize\n description: Optional. The maximum allowed RSA modulus size,\n in bits. If this is not set, or if set to zero, the service\n will not enforce an explicit upper bound on RSA modulus\n sizes.\n minModulusSize:\n type: integer\n format: int64\n x-dcl-go-name: MinModulusSize\n description: Optional. The minimum allowed RSA modulus size,\n in bits. If this is not set, or if set to zero, the service-level\n min RSA modulus size will continue to apply.\n baselineValues:\n type: object\n x-dcl-go-name: BaselineValues\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValues\n description: Optional. A set of X.509 values that will be applied to\n all certificates issued through this CaPool. If a certificate request\n includes conflicting values for the same properties, they will be\n overwritten by the values defined here. If a certificate request uses\n a CertificateTemplate that defines conflicting predefined_values for\n the same properties, the certificate issuance request will fail.\n properties:\n additionalExtensions:\n type: array\n x-dcl-go-name: AdditionalExtensions\n description: Optional. Describes custom X.509 extensions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesAdditionalExtensions\n required:\n - objectId\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this extension\n is critical (i.e., if the client does not know how to handle\n this extension, the client should consider this to be an\n error).\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesAdditionalExtensionsObjectId\n description: Required. The OID for this X.509 extension.\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n aiaOcspServers:\n type: array\n x-dcl-go-name: AiaOcspServers\n description: Optional. Describes Online Certificate Status Protocol\n (OCSP) endpoint addresses that appear in the \"Authority Information\n Access\" extension in the certificate.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n caOptions:\n type: object\n x-dcl-go-name: CaOptions\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesCaOptions\n description: Optional. Describes options in this X509Parameters\n that are relevant in a CA certificate.\n properties:\n isCa:\n type: boolean\n x-dcl-go-name: IsCa\n description: Optional. Refers to the \"CA\" X.509 extension, which\n is a boolean value. When this value is missing, the extension\n will be omitted from the CA certificate.\n maxIssuerPathLength:\n type: integer\n format: int64\n x-dcl-go-name: MaxIssuerPathLength\n description: Optional. Refers to the path length restriction\n X.509 extension. For a CA certificate, this value describes\n the depth of subordinate CA certificates that are allowed.\n If this value is less than 0, the request will fail. If this\n value is missing, the max path length will be omitted from\n the CA certificate.\n zeroMaxIssuerPathLength:\n type: boolean\n x-dcl-go-name: ZeroMaxIssuerPathLength\n description: Optional. When true, the \"path length constraint\"\n in Basic Constraints extension will be set to 0. if both max_issuer_path_length\n and zero_max_issuer_path_length are unset, the max path length\n will be omitted from the CA certificate.\n keyUsage:\n type: object\n x-dcl-go-name: KeyUsage\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesKeyUsage\n description: Optional. Indicates the intended use for keys that\n correspond to a certificate.\n properties:\n baseKeyUsage:\n type: object\n x-dcl-go-name: BaseKeyUsage\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsage\n description: Describes high-level ways in which a key may be\n used.\n properties:\n certSign:\n type: boolean\n x-dcl-go-name: CertSign\n description: The key may be used to sign certificates.\n contentCommitment:\n type: boolean\n x-dcl-go-name: ContentCommitment\n description: The key may be used for cryptographic commitments.\n Note that this may also be referred to as \"non-repudiation\".\n crlSign:\n type: boolean\n x-dcl-go-name: CrlSign\n description: The key may be used sign certificate revocation\n lists.\n dataEncipherment:\n type: boolean\n x-dcl-go-name: DataEncipherment\n description: The key may be used to encipher data.\n decipherOnly:\n type: boolean\n x-dcl-go-name: DecipherOnly\n description: The key may be used to decipher only.\n digitalSignature:\n type: boolean\n x-dcl-go-name: DigitalSignature\n description: The key may be used for digital signatures.\n encipherOnly:\n type: boolean\n x-dcl-go-name: EncipherOnly\n description: The key may be used to encipher only.\n keyAgreement:\n type: boolean\n x-dcl-go-name: KeyAgreement\n description: The key may be used in a key agreement protocol.\n keyEncipherment:\n type: boolean\n x-dcl-go-name: KeyEncipherment\n description: The key may be used to encipher other keys.\n extendedKeyUsage:\n type: object\n x-dcl-go-name: ExtendedKeyUsage\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsage\n description: Detailed scenarios in which a key may be used.\n properties:\n clientAuth:\n type: boolean\n x-dcl-go-name: ClientAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially\n described as \"TLS WWW client authentication\", though regularly\n used for non-WWW TLS.\n codeSigning:\n type: boolean\n x-dcl-go-name: CodeSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially\n described as \"Signing of downloadable executable code\n client authentication\".\n emailProtection:\n type: boolean\n x-dcl-go-name: EmailProtection\n description: Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially\n described as \"Email protection\".\n ocspSigning:\n type: boolean\n x-dcl-go-name: OcspSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially\n described as \"Signing OCSP responses\".\n serverAuth:\n type: boolean\n x-dcl-go-name: ServerAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially\n described as \"TLS WWW server authentication\", though regularly\n used for non-WWW TLS.\n timeStamping:\n type: boolean\n x-dcl-go-name: TimeStamping\n description: Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially\n described as \"Binding the hash of an object to a time\".\n unknownExtendedKeyUsages:\n type: array\n x-dcl-go-name: UnknownExtendedKeyUsages\n description: Used to describe extended key usages that are not\n listed in the KeyUsage.ExtendedKeyUsageOptions message.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsages\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n policyIds:\n type: array\n x-dcl-go-name: PolicyIds\n description: Optional. Describes the X.509 certificate policy object\n identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyBaselineValuesPolicyIds\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n identityConstraints:\n type: object\n x-dcl-go-name: IdentityConstraints\n x-dcl-go-type: CaPoolIssuancePolicyIdentityConstraints\n description: Optional. Describes constraints on identities that may\n appear in Certificates issued through this CaPool. If this is omitted,\n then this CaPool will not add restrictions on a certificate's identity.\n required:\n - allowSubjectPassthrough\n - allowSubjectAltNamesPassthrough\n properties:\n allowSubjectAltNamesPassthrough:\n type: boolean\n x-dcl-go-name: AllowSubjectAltNamesPassthrough\n description: Required. If this is true, the SubjectAltNames extension\n may be copied from a certificate request into the signed certificate.\n Otherwise, the requested SubjectAltNames will be discarded.\n allowSubjectPassthrough:\n type: boolean\n x-dcl-go-name: AllowSubjectPassthrough\n description: Required. If this is true, the Subject field may be\n copied from a certificate request into the signed certificate.\n Otherwise, the requested Subject will be discarded.\n celExpression:\n type: object\n x-dcl-go-name: CelExpression\n x-dcl-go-type: CaPoolIssuancePolicyIdentityConstraintsCelExpression\n description: Optional. A CEL expression that may be used to validate\n the resolved X.509 Subject and/or Subject Alternative Name before\n a certificate is signed. To see the full allowed syntax and some\n examples, see https://cloud.google.com/certificate-authority-service/docs/using-cel\n properties:\n description:\n type: string\n x-dcl-go-name: Description\n description: Optional. Description of the expression. This is\n a longer text which describes the expression, e.g. when hovered\n over it in a UI.\n expression:\n type: string\n x-dcl-go-name: Expression\n description: Textual representation of an expression in Common\n Expression Language syntax.\n location:\n type: string\n x-dcl-go-name: Location\n description: Optional. String indicating the location of the\n expression for error reporting, e.g. a file name and a position\n in the file.\n title:\n type: string\n x-dcl-go-name: Title\n description: Optional. Title for the expression, i.e. a short\n string describing its purpose. This can be used e.g. in UIs\n which allow to enter the expression.\n maximumLifetime:\n type: string\n x-dcl-go-name: MaximumLifetime\n description: Optional. The maximum lifetime allowed for issued Certificates.\n Note that if the issuing CertificateAuthority expires before a Certificate's\n requested maximum_lifetime, the effective lifetime will be explicitly\n truncated to match it.\n passthroughExtensions:\n type: object\n x-dcl-go-name: PassthroughExtensions\n x-dcl-go-type: CaPoolIssuancePolicyPassthroughExtensions\n description: Optional. Describes the set of X.509 extensions that may\n appear in a Certificate issued through this CaPool. If a certificate\n request sets extensions that don't appear in the passthrough_extensions,\n those extensions will be dropped. If a certificate request uses a\n CertificateTemplate with predefined_values that don't appear here,\n the certificate issuance request will fail. If this is omitted, then\n this CaPool will not add restrictions on a certificate's X.509 extensions.\n These constraints do not apply to X.509 extensions set in this CaPool's\n baseline_values.\n properties:\n additionalExtensions:\n type: array\n x-dcl-go-name: AdditionalExtensions\n description: Optional. A set of ObjectIds identifying custom X.509\n extensions. Will be combined with known_extensions to determine\n the full set of X.509 extensions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CaPoolIssuancePolicyPassthroughExtensionsAdditionalExtensions\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n knownExtensions:\n type: array\n x-dcl-go-name: KnownExtensions\n description: Optional. A set of named X.509 extensions. Will be\n combined with additional_extensions to determine the full set\n of X.509 extensions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: CaPoolIssuancePolicyPassthroughExtensionsKnownExtensionsEnum\n enum:\n - KNOWN_CERTIFICATE_EXTENSION_UNSPECIFIED\n - BASE_KEY_USAGE\n - EXTENDED_KEY_USAGE\n - CA_OPTIONS\n - POLICY_IDS\n - AIA_OCSP_SERVERS\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. Labels with user-defined metadata.\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The resource name for this CaPool in the format `projects/*/locations/*/caPools/*`.\n x-kubernetes-immutable: true\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n publishingOptions:\n type: object\n x-dcl-go-name: PublishingOptions\n x-dcl-go-type: CaPoolPublishingOptions\n description: Optional. The PublishingOptions to follow when issuing Certificates\n from any CertificateAuthority in this CaPool.\n properties:\n publishCaCert:\n type: boolean\n x-dcl-go-name: PublishCaCert\n description: Optional. When true, publishes each CertificateAuthority's\n CA certificate and includes its URL in the \"Authority Information\n Access\" X.509 extension in all issued Certificates. If this is false,\n the CA certificate will not be published and the corresponding X.509\n extension will not be written in issued certificates.\n publishCrl:\n type: boolean\n x-dcl-go-name: PublishCrl\n description: Optional. When true, publishes each CertificateAuthority's\n CRL and includes its URL in the \"CRL Distribution Points\" X.509 extension\n in all issued Certificates. If this is false, CRLs will not be published\n and the corresponding X.509 extension will not be written in issued\n certificates. CRLs will expire 7 days from their creation. However,\n we will rebuild daily. CRLs are also rebuilt shortly after a certificate\n is revoked.\n tier:\n type: string\n x-dcl-go-name: Tier\n x-dcl-go-type: CaPoolTierEnum\n description: 'Required. Immutable. The Tier of this CaPool. Possible values:\n TIER_UNSPECIFIED, ENTERPRISE, DEVOPS'\n x-kubernetes-immutable: true\n enum:\n - TIER_UNSPECIFIED\n - ENTERPRISE\n - DEVOPS\n") -// 26986 bytes -// MD5: 72c69198c2e02c48b76edc92cce8d118 +// 27469 bytes +// MD5: 4c8ae9ebe41e2775519d6f4850a1e13d diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.go index 3368218a7c..6cef361b38 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.go @@ -825,9 +825,10 @@ func (r *CertificateAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsages) H } type CertificateAuthorityConfigX509ConfigCaOptions struct { - empty bool `json:"-"` - IsCa *bool `json:"isCa"` - MaxIssuerPathLength *int64 `json:"maxIssuerPathLength"` + empty bool `json:"-"` + IsCa *bool `json:"isCa"` + MaxIssuerPathLength *int64 `json:"maxIssuerPathLength"` + ZeroMaxIssuerPathLength *bool `json:"zeroMaxIssuerPathLength"` } type jsonCertificateAuthorityConfigX509ConfigCaOptions CertificateAuthorityConfigX509ConfigCaOptions @@ -849,6 +850,8 @@ func (r *CertificateAuthorityConfigX509ConfigCaOptions) UnmarshalJSON(data []byt r.MaxIssuerPathLength = res.MaxIssuerPathLength + r.ZeroMaxIssuerPathLength = res.ZeroMaxIssuerPathLength + } return nil } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.yaml b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.yaml index 93a3333484..b45fab9003 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.yaml +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority.yaml @@ -884,6 +884,14 @@ components: value is missing, the max path length will be omitted from the CA certificate. x-kubernetes-immutable: true + zeroMaxIssuerPathLength: + type: boolean + x-dcl-go-name: ZeroMaxIssuerPathLength + description: Optional. When true, the "path length constraint" + in Basic Constraints extension will be set to 0. if both max_issuer_path_length + and zero_max_issuer_path_length are unset, the max path length + will be omitted from the CA certificate. + x-kubernetes-immutable: true keyUsage: type: object x-dcl-go-name: KeyUsage diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_internal.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_internal.go index 78fce63d75..fa3d44e4f6 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_internal.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_internal.go @@ -2460,6 +2460,11 @@ func canonicalizeCertificateAuthorityConfigX509ConfigCaOptions(des, initial *Cer } else { cDes.MaxIssuerPathLength = des.MaxIssuerPathLength } + if dcl.BoolCanonicalize(des.ZeroMaxIssuerPathLength, initial.ZeroMaxIssuerPathLength) || dcl.IsZeroValue(des.ZeroMaxIssuerPathLength) { + cDes.ZeroMaxIssuerPathLength = initial.ZeroMaxIssuerPathLength + } else { + cDes.ZeroMaxIssuerPathLength = des.ZeroMaxIssuerPathLength + } return cDes } @@ -2509,6 +2514,9 @@ func canonicalizeNewCertificateAuthorityConfigX509ConfigCaOptions(c *Client, des if dcl.BoolCanonicalize(des.IsCa, nw.IsCa) { nw.IsCa = des.IsCa } + if dcl.BoolCanonicalize(des.ZeroMaxIssuerPathLength, nw.ZeroMaxIssuerPathLength) { + nw.ZeroMaxIssuerPathLength = des.ZeroMaxIssuerPathLength + } return nw } @@ -6817,6 +6825,13 @@ func compareCertificateAuthorityConfigX509ConfigCaOptionsNewStyle(d, a interface } diffs = append(diffs, ds...) } + + if ds, err := dcl.Diff(desired.ZeroMaxIssuerPathLength, actual.ZeroMaxIssuerPathLength, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("ZeroMaxIssuerPathLength")); len(ds) != 0 || err != nil { + if err != nil { + return nil, err + } + diffs = append(diffs, ds...) + } return diffs, nil } @@ -8979,7 +8994,7 @@ func expandCertificateAuthorityConfigX509Config(c *Client, f *CertificateAuthori } else if !dcl.IsEmptyValueIndirect(v) { m["keyUsage"] = v } - if v, err := expandCertificateAuthorityConfigX509ConfigCaOptions(c, f.CaOptions, res); err != nil { + if v, err := expandCertificateAuthorityConfigX509ConfigCAOptions(c, f.CaOptions, res); err != nil { return nil, fmt.Errorf("error expanding CaOptions into caOptions: %w", err) } else if !dcl.IsEmptyValueIndirect(v) { m["caOptions"] = v @@ -9012,7 +9027,7 @@ func flattenCertificateAuthorityConfigX509Config(c *Client, i interface{}, res * return EmptyCertificateAuthorityConfigX509Config } r.KeyUsage = flattenCertificateAuthorityConfigX509ConfigKeyUsage(c, m["keyUsage"], res) - r.CaOptions = flattenCertificateAuthorityConfigX509ConfigCaOptions(c, m["caOptions"], res) + r.CaOptions = flattenCertificateAuthorityConfigX509ConfigCAOptions(c, m["caOptions"], res) r.PolicyIds = flattenCertificateAuthorityConfigX509ConfigPolicyIdsSlice(c, m["policyIds"], res) r.AiaOcspServers = dcl.FlattenStringSlice(m["aiaOcspServers"]) r.AdditionalExtensions = flattenCertificateAuthorityConfigX509ConfigAdditionalExtensionsSlice(c, m["additionalExtensions"], res) @@ -9637,6 +9652,9 @@ func expandCertificateAuthorityConfigX509ConfigCaOptions(c *Client, f *Certifica if v := f.MaxIssuerPathLength; !dcl.IsEmptyValueIndirect(v) { m["maxIssuerPathLength"] = v } + if v := f.ZeroMaxIssuerPathLength; !dcl.IsEmptyValueIndirect(v) { + m["zeroMaxIssuerPathLength"] = v + } return m, nil } @@ -9656,6 +9674,7 @@ func flattenCertificateAuthorityConfigX509ConfigCaOptions(c *Client, i interface } r.IsCa = dcl.FlattenBool(m["isCa"]) r.MaxIssuerPathLength = dcl.FlattenInteger(m["maxIssuerPathLength"]) + r.ZeroMaxIssuerPathLength = dcl.FlattenBool(m["zeroMaxIssuerPathLength"]) return r } diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_schema.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_schema.go index c0cbf9ab04..44210f1645 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_schema.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_schema.go @@ -1012,6 +1012,12 @@ func DCLCertificateAuthoritySchema() *dcl.Schema { Description: "Optional. Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this value is missing, the max path length will be omitted from the CA certificate.", Immutable: true, }, + "zeroMaxIssuerPathLength": &dcl.Property{ + Type: "boolean", + GoName: "ZeroMaxIssuerPathLength", + Description: "Optional. When true, the \"path length constraint\" in Basic Constraints extension will be set to 0. if both max_issuer_path_length and zero_max_issuer_path_length are unset, the max path length will be omitted from the CA certificate.", + Immutable: true, + }, }, }, "keyUsage": &dcl.Property{ diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_yaml_embed.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_yaml_embed.go index e34ba3eafd..1fd52968a7 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_yaml_embed.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/certificate_authority_yaml_embed.go @@ -17,7 +17,7 @@ package privateca // blaze-out/k8-fastbuild/genfiles/cloud/graphite/mmv2/services/google/privateca/certificate_authority.yaml -var YAML_certificate_authority = []byte("info:\n title: Privateca/CertificateAuthority\n description: The Privateca CertificateAuthority resource\n x-dcl-struct-name: CertificateAuthority\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a CertificateAuthority\n parameters:\n - name: certificateAuthority\n required: true\n description: A full instance of a CertificateAuthority\n apply:\n description: The function used to apply information about a CertificateAuthority\n parameters:\n - name: certificateAuthority\n required: true\n description: A full instance of a CertificateAuthority\n delete:\n description: The function used to delete a CertificateAuthority\n parameters:\n - name: certificateAuthority\n required: true\n description: A full instance of a CertificateAuthority\n deleteAll:\n description: The function used to delete all CertificateAuthority\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: caPool\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many CertificateAuthority\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: caPool\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n CertificateAuthority:\n title: CertificateAuthority\n x-dcl-id: projects/{{project}}/locations/{{location}}/caPools/{{ca_pool}}/certificateAuthorities/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - type\n - config\n - lifetime\n - keySpec\n - project\n - location\n - caPool\n properties:\n accessUrls:\n type: object\n x-dcl-go-name: AccessUrls\n x-dcl-go-type: CertificateAuthorityAccessUrls\n readOnly: true\n description: Output only. URLs for accessing content published by this CA,\n such as the CA certificate and CRLs.\n x-kubernetes-immutable: true\n properties:\n caCertificateAccessUrl:\n type: string\n x-dcl-go-name: CaCertificateAccessUrl\n description: The URL where this CertificateAuthority's CA certificate\n is published. This will only be set for CAs that have been activated.\n x-kubernetes-immutable: true\n crlAccessUrls:\n type: array\n x-dcl-go-name: CrlAccessUrls\n description: The URLs where this CertificateAuthority's CRLs are published.\n This will only be set for CAs that have been activated.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n caCertificateDescriptions:\n type: array\n x-dcl-go-name: CaCertificateDescriptions\n readOnly: true\n description: Output only. A structured description of this CertificateAuthority's\n CA certificate and its issuers. Ordered as self-to-root.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptions\n properties:\n aiaIssuingCertificateUrls:\n type: array\n x-dcl-go-name: AiaIssuingCertificateUrls\n description: Describes lists of issuer CA certificate URLs that appear\n in the \"Authority Information Access\" extension in the certificate.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n authorityKeyId:\n type: object\n x-dcl-go-name: AuthorityKeyId\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsAuthorityKeyId\n description: Identifies the subject_key_id of the parent certificate,\n per https://tools.ietf.org/html/rfc5280#section-4.2.1.1\n properties:\n keyId:\n type: string\n x-dcl-go-name: KeyId\n description: Optional. The value of this KeyId encoded in lowercase\n hexadecimal. This is most likely the 160 bit SHA-1 hash of the\n public key.\n certFingerprint:\n type: object\n x-dcl-go-name: CertFingerprint\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsCertFingerprint\n description: The hash of the x.509 certificate.\n properties:\n sha256Hash:\n type: string\n x-dcl-go-name: Sha256Hash\n description: The SHA 256 hash, encoded in hexadecimal, of the\n DER x509 certificate.\n crlDistributionPoints:\n type: array\n x-dcl-go-name: CrlDistributionPoints\n description: Describes a list of locations to obtain CRL information,\n i.e. the DistributionPoint.fullName described by https://tools.ietf.org/html/rfc5280#section-4.2.1.13\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n publicKey:\n type: object\n x-dcl-go-name: PublicKey\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsPublicKey\n description: The public key that corresponds to an issued certificate.\n required:\n - key\n - format\n properties:\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsPublicKeyFormatEnum\n description: 'Required. The format of the public key. Possible\n values: PEM'\n enum:\n - PEM\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A public key. The padding and encoding\n must match with the `KeyFormat` value specified for the `format`\n field.\n subjectDescription:\n type: object\n x-dcl-go-name: SubjectDescription\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescription\n description: Describes some of the values in a certificate that are\n related to the subject and lifetime.\n properties:\n hexSerialNumber:\n type: string\n x-dcl-go-name: HexSerialNumber\n description: The serial number encoded in lowercase hexadecimal.\n lifetime:\n type: string\n x-dcl-go-name: Lifetime\n description: For convenience, the actual lifetime of an issued\n certificate.\n notAfterTime:\n type: string\n format: date-time\n x-dcl-go-name: NotAfterTime\n description: The time after which the certificate is expired.\n Per RFC 5280, the validity period for a certificate is the period\n of time from not_before_time through not_after_time, inclusive.\n Corresponds to 'not_before_time' + 'lifetime' - 1 second.\n notBeforeTime:\n type: string\n format: date-time\n x-dcl-go-name: NotBeforeTime\n description: The time at which the certificate becomes valid.\n subject:\n type: object\n x-dcl-go-name: Subject\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescriptionSubject\n description: Contains distinguished name fields such as the common\n name, location and organization.\n properties:\n commonName:\n type: string\n x-dcl-go-name: CommonName\n description: The \"common name\" of the subject.\n countryCode:\n type: string\n x-dcl-go-name: CountryCode\n description: The country code of the subject.\n locality:\n type: string\n x-dcl-go-name: Locality\n description: The locality or city of the subject.\n organization:\n type: string\n x-dcl-go-name: Organization\n description: The organization of the subject.\n organizationalUnit:\n type: string\n x-dcl-go-name: OrganizationalUnit\n description: The organizational_unit of the subject.\n postalCode:\n type: string\n x-dcl-go-name: PostalCode\n description: The postal code of the subject.\n province:\n type: string\n x-dcl-go-name: Province\n description: The province, territory, or regional state of\n the subject.\n streetAddress:\n type: string\n x-dcl-go-name: StreetAddress\n description: The street address of the subject.\n subjectAltName:\n type: object\n x-dcl-go-name: SubjectAltName\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescriptionSubjectAltName\n description: The subject alternative name fields.\n properties:\n customSans:\n type: array\n x-dcl-go-name: CustomSans\n description: Contains additional subject alternative name\n values.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescriptionSubjectAltNameCustomSans\n required:\n - objectId\n - critical\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this\n extension is critical (i.e., if the client does not\n know how to handle this extension, the client should\n consider this to be an error).\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescriptionSubjectAltNameCustomSansObjectId\n description: Required. The OID for this X.509 extension.\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path.\n The most significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n dnsNames:\n type: array\n x-dcl-go-name: DnsNames\n description: Contains only valid, fully-qualified host names.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n emailAddresses:\n type: array\n x-dcl-go-name: EmailAddresses\n description: Contains only valid RFC 2822 E-mail addresses.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n ipAddresses:\n type: array\n x-dcl-go-name: IPAddresses\n description: Contains only valid 32-bit IPv4 addresses or\n RFC 4291 IPv6 addresses.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n uris:\n type: array\n x-dcl-go-name: Uris\n description: Contains only valid RFC 3986 URIs.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n subjectKeyId:\n type: object\n x-dcl-go-name: SubjectKeyId\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectKeyId\n description: Provides a means of identifiying certificates that contain\n a particular public key, per https://tools.ietf.org/html/rfc5280#section-4.2.1.2.\n properties:\n keyId:\n type: string\n x-dcl-go-name: KeyId\n description: Optional. The value of this KeyId encoded in lowercase\n hexadecimal. This is most likely the 160 bit SHA-1 hash of the\n public key.\n x509Description:\n type: object\n x-dcl-go-name: X509Description\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509Description\n description: Describes some of the technical X.509 fields in a certificate.\n properties:\n additionalExtensions:\n type: array\n x-dcl-go-name: AdditionalExtensions\n description: Optional. Describes custom X.509 extensions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionAdditionalExtensions\n required:\n - objectId\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this extension\n is critical (i.e., if the client does not know how to\n handle this extension, the client should consider this\n to be an error).\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionAdditionalExtensionsObjectId\n description: Required. The OID for this X.509 extension.\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The\n most significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n aiaOcspServers:\n type: array\n x-dcl-go-name: AiaOcspServers\n readOnly: true\n description: Optional. Describes Online Certificate Status Protocol\n (OCSP) endpoint addresses that appear in the \"Authority Information\n Access\" extension in the certificate.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n caOptions:\n type: object\n x-dcl-go-name: CaOptions\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionCaOptions\n description: Optional. Describes options in this X509Parameters\n that are relevant in a CA certificate.\n properties:\n isCa:\n type: boolean\n x-dcl-go-name: IsCa\n description: Optional. Refers to the \"CA\" X.509 extension,\n which is a boolean value. When this value is missing, the\n extension will be omitted from the CA certificate.\n maxIssuerPathLength:\n type: integer\n format: int64\n x-dcl-go-name: MaxIssuerPathLength\n description: Optional. Refers to the path length restriction\n X.509 extension. For a CA certificate, this value describes\n the depth of subordinate CA certificates that are allowed.\n If this value is less than 0, the request will fail. If\n this value is missing, the max path length will be omitted\n from the CA certificate.\n keyUsage:\n type: object\n x-dcl-go-name: KeyUsage\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionKeyUsage\n description: Optional. Indicates the intended use for keys that\n correspond to a certificate.\n properties:\n baseKeyUsage:\n type: object\n x-dcl-go-name: BaseKeyUsage\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionKeyUsageBaseKeyUsage\n description: Describes high-level ways in which a key may\n be used.\n properties:\n certSign:\n type: boolean\n x-dcl-go-name: CertSign\n description: The key may be used to sign certificates.\n contentCommitment:\n type: boolean\n x-dcl-go-name: ContentCommitment\n description: The key may be used for cryptographic commitments.\n Note that this may also be referred to as \"non-repudiation\".\n crlSign:\n type: boolean\n x-dcl-go-name: CrlSign\n description: The key may be used sign certificate revocation\n lists.\n dataEncipherment:\n type: boolean\n x-dcl-go-name: DataEncipherment\n description: The key may be used to encipher data.\n decipherOnly:\n type: boolean\n x-dcl-go-name: DecipherOnly\n description: The key may be used to decipher only.\n digitalSignature:\n type: boolean\n x-dcl-go-name: DigitalSignature\n description: The key may be used for digital signatures.\n encipherOnly:\n type: boolean\n x-dcl-go-name: EncipherOnly\n description: The key may be used to encipher only.\n keyAgreement:\n type: boolean\n x-dcl-go-name: KeyAgreement\n description: The key may be used in a key agreement protocol.\n keyEncipherment:\n type: boolean\n x-dcl-go-name: KeyEncipherment\n description: The key may be used to encipher other keys.\n extendedKeyUsage:\n type: object\n x-dcl-go-name: ExtendedKeyUsage\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionKeyUsageExtendedKeyUsage\n description: Detailed scenarios in which a key may be used.\n properties:\n clientAuth:\n type: boolean\n x-dcl-go-name: ClientAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially\n described as \"TLS WWW client authentication\", though\n regularly used for non-WWW TLS.\n codeSigning:\n type: boolean\n x-dcl-go-name: CodeSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially\n described as \"Signing of downloadable executable code\n client authentication\".\n emailProtection:\n type: boolean\n x-dcl-go-name: EmailProtection\n description: Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially\n described as \"Email protection\".\n ocspSigning:\n type: boolean\n x-dcl-go-name: OcspSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially\n described as \"Signing OCSP responses\".\n serverAuth:\n type: boolean\n x-dcl-go-name: ServerAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially\n described as \"TLS WWW server authentication\", though\n regularly used for non-WWW TLS.\n timeStamping:\n type: boolean\n x-dcl-go-name: TimeStamping\n description: Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially\n described as \"Binding the hash of an object to a time\".\n unknownExtendedKeyUsages:\n type: array\n x-dcl-go-name: UnknownExtendedKeyUsages\n description: Used to describe extended key usages that are\n not listed in the KeyUsage.ExtendedKeyUsageOptions message.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionKeyUsageUnknownExtendedKeyUsages\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The\n most significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n policyIds:\n type: array\n x-dcl-go-name: PolicyIds\n description: Optional. Describes the X.509 certificate policy\n object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionPolicyIds\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n caPool:\n type: string\n x-dcl-go-name: CaPool\n description: The caPool for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Privateca/CaPool\n field: name\n parent: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: CertificateAuthorityConfig\n description: Required. Immutable. The config used to create a self-signed\n X.509 certificate or CSR.\n x-kubernetes-immutable: true\n required:\n - subjectConfig\n - x509Config\n properties:\n publicKey:\n type: object\n x-dcl-go-name: PublicKey\n x-dcl-go-type: CertificateAuthorityConfigPublicKey\n readOnly: true\n description: Optional. The public key that corresponds to this config.\n This is, for example, used when issuing Certificates, but not when\n creating a self-signed CertificateAuthority or CertificateAuthority\n CSR.\n x-kubernetes-immutable: true\n required:\n - key\n - format\n properties:\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: CertificateAuthorityConfigPublicKeyFormatEnum\n description: 'Required. The format of the public key. Possible values:\n PEM'\n x-kubernetes-immutable: true\n enum:\n - PEM\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A public key. The padding and encoding must\n match with the `KeyFormat` value specified for the `format` field.\n x-kubernetes-immutable: true\n subjectConfig:\n type: object\n x-dcl-go-name: SubjectConfig\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfig\n description: Required. Specifies some of the values in a certificate\n that are related to the subject.\n x-kubernetes-immutable: true\n required:\n - subject\n properties:\n subject:\n type: object\n x-dcl-go-name: Subject\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfigSubject\n description: Required. Contains distinguished name fields such as\n the common name, location and organization.\n x-kubernetes-immutable: true\n properties:\n commonName:\n type: string\n x-dcl-go-name: CommonName\n description: The \"common name\" of the subject.\n x-kubernetes-immutable: true\n countryCode:\n type: string\n x-dcl-go-name: CountryCode\n description: The country code of the subject.\n x-kubernetes-immutable: true\n locality:\n type: string\n x-dcl-go-name: Locality\n description: The locality or city of the subject.\n x-kubernetes-immutable: true\n organization:\n type: string\n x-dcl-go-name: Organization\n description: The organization of the subject.\n x-kubernetes-immutable: true\n organizationalUnit:\n type: string\n x-dcl-go-name: OrganizationalUnit\n description: The organizational_unit of the subject.\n x-kubernetes-immutable: true\n postalCode:\n type: string\n x-dcl-go-name: PostalCode\n description: The postal code of the subject.\n x-kubernetes-immutable: true\n province:\n type: string\n x-dcl-go-name: Province\n description: The province, territory, or regional state of the\n subject.\n x-kubernetes-immutable: true\n streetAddress:\n type: string\n x-dcl-go-name: StreetAddress\n description: The street address of the subject.\n x-kubernetes-immutable: true\n subjectAltName:\n type: object\n x-dcl-go-name: SubjectAltName\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfigSubjectAltName\n description: Optional. The subject alternative name fields.\n x-kubernetes-immutable: true\n properties:\n customSans:\n type: array\n x-dcl-go-name: CustomSans\n description: Contains additional subject alternative name values.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfigSubjectAltNameCustomSans\n required:\n - objectId\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this extension\n is critical (i.e., if the client does not know how to\n handle this extension, the client should consider this\n to be an error).\n x-kubernetes-immutable: true\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfigSubjectAltNameCustomSansObjectId\n description: Required. The OID for this X.509 extension.\n x-kubernetes-immutable: true\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The\n most significant parts of the path come first.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n x-kubernetes-immutable: true\n dnsNames:\n type: array\n x-dcl-go-name: DnsNames\n description: Contains only valid, fully-qualified host names.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n emailAddresses:\n type: array\n x-dcl-go-name: EmailAddresses\n description: Contains only valid RFC 2822 E-mail addresses.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n ipAddresses:\n type: array\n x-dcl-go-name: IPAddresses\n description: Contains only valid 32-bit IPv4 addresses or RFC\n 4291 IPv6 addresses.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n uris:\n type: array\n x-dcl-go-name: Uris\n description: Contains only valid RFC 3986 URIs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x509Config:\n type: object\n x-dcl-go-name: X509Config\n x-dcl-go-type: CertificateAuthorityConfigX509Config\n description: Required. Describes how some of the technical X.509 fields\n in a certificate should be populated.\n x-kubernetes-immutable: true\n properties:\n additionalExtensions:\n type: array\n x-dcl-go-name: AdditionalExtensions\n description: Optional. Describes custom X.509 extensions.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigAdditionalExtensions\n required:\n - objectId\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this extension\n is critical (i.e., if the client does not know how to handle\n this extension, the client should consider this to be an\n error).\n x-kubernetes-immutable: true\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigAdditionalExtensionsObjectId\n description: Required. The OID for this X.509 extension.\n x-kubernetes-immutable: true\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n x-kubernetes-immutable: true\n aiaOcspServers:\n type: array\n x-dcl-go-name: AiaOcspServers\n readOnly: true\n description: Optional. Describes Online Certificate Status Protocol\n (OCSP) endpoint addresses that appear in the \"Authority Information\n Access\" extension in the certificate.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n caOptions:\n type: object\n x-dcl-go-name: CaOptions\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigCaOptions\n description: Optional. Describes options in this X509Parameters\n that are relevant in a CA certificate.\n x-kubernetes-immutable: true\n properties:\n isCa:\n type: boolean\n x-dcl-go-name: IsCa\n description: Optional. Refers to the \"CA\" X.509 extension, which\n is a boolean value. When this value is missing, the extension\n will be omitted from the CA certificate.\n x-kubernetes-immutable: true\n maxIssuerPathLength:\n type: integer\n format: int64\n x-dcl-go-name: MaxIssuerPathLength\n description: Optional. Refers to the path length restriction\n X.509 extension. For a CA certificate, this value describes\n the depth of subordinate CA certificates that are allowed.\n If this value is less than 0, the request will fail. If this\n value is missing, the max path length will be omitted from\n the CA certificate.\n x-kubernetes-immutable: true\n keyUsage:\n type: object\n x-dcl-go-name: KeyUsage\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigKeyUsage\n description: Optional. Indicates the intended use for keys that\n correspond to a certificate.\n x-kubernetes-immutable: true\n properties:\n baseKeyUsage:\n type: object\n x-dcl-go-name: BaseKeyUsage\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsage\n description: Describes high-level ways in which a key may be\n used.\n x-kubernetes-immutable: true\n properties:\n certSign:\n type: boolean\n x-dcl-go-name: CertSign\n description: The key may be used to sign certificates.\n x-kubernetes-immutable: true\n contentCommitment:\n type: boolean\n x-dcl-go-name: ContentCommitment\n description: The key may be used for cryptographic commitments.\n Note that this may also be referred to as \"non-repudiation\".\n x-kubernetes-immutable: true\n crlSign:\n type: boolean\n x-dcl-go-name: CrlSign\n description: The key may be used sign certificate revocation\n lists.\n x-kubernetes-immutable: true\n dataEncipherment:\n type: boolean\n x-dcl-go-name: DataEncipherment\n description: The key may be used to encipher data.\n x-kubernetes-immutable: true\n decipherOnly:\n type: boolean\n x-dcl-go-name: DecipherOnly\n description: The key may be used to decipher only.\n x-kubernetes-immutable: true\n digitalSignature:\n type: boolean\n x-dcl-go-name: DigitalSignature\n description: The key may be used for digital signatures.\n x-kubernetes-immutable: true\n encipherOnly:\n type: boolean\n x-dcl-go-name: EncipherOnly\n description: The key may be used to encipher only.\n x-kubernetes-immutable: true\n keyAgreement:\n type: boolean\n x-dcl-go-name: KeyAgreement\n description: The key may be used in a key agreement protocol.\n x-kubernetes-immutable: true\n keyEncipherment:\n type: boolean\n x-dcl-go-name: KeyEncipherment\n description: The key may be used to encipher other keys.\n x-kubernetes-immutable: true\n extendedKeyUsage:\n type: object\n x-dcl-go-name: ExtendedKeyUsage\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsage\n description: Detailed scenarios in which a key may be used.\n x-kubernetes-immutable: true\n properties:\n clientAuth:\n type: boolean\n x-dcl-go-name: ClientAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially\n described as \"TLS WWW client authentication\", though regularly\n used for non-WWW TLS.\n x-kubernetes-immutable: true\n codeSigning:\n type: boolean\n x-dcl-go-name: CodeSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially\n described as \"Signing of downloadable executable code\n client authentication\".\n x-kubernetes-immutable: true\n emailProtection:\n type: boolean\n x-dcl-go-name: EmailProtection\n description: Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially\n described as \"Email protection\".\n x-kubernetes-immutable: true\n ocspSigning:\n type: boolean\n x-dcl-go-name: OcspSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially\n described as \"Signing OCSP responses\".\n x-kubernetes-immutable: true\n serverAuth:\n type: boolean\n x-dcl-go-name: ServerAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially\n described as \"TLS WWW server authentication\", though regularly\n used for non-WWW TLS.\n x-kubernetes-immutable: true\n timeStamping:\n type: boolean\n x-dcl-go-name: TimeStamping\n description: Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially\n described as \"Binding the hash of an object to a time\".\n x-kubernetes-immutable: true\n unknownExtendedKeyUsages:\n type: array\n x-dcl-go-name: UnknownExtendedKeyUsages\n description: Used to describe extended key usages that are not\n listed in the KeyUsage.ExtendedKeyUsageOptions message.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsages\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n policyIds:\n type: array\n x-dcl-go-name: PolicyIds\n description: Optional. Describes the X.509 certificate policy object\n identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigPolicyIds\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this CertificateAuthority was\n created.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Output only. The time at which this CertificateAuthority was\n soft deleted, if it is in the DELETED state.\n x-kubernetes-immutable: true\n expireTime:\n type: string\n format: date-time\n x-dcl-go-name: ExpireTime\n readOnly: true\n description: Output only. The time at which this CertificateAuthority will\n be permanently purged, if it is in the DELETED state.\n x-kubernetes-immutable: true\n gcsBucket:\n type: string\n x-dcl-go-name: GcsBucket\n description: Immutable. The name of a Cloud Storage bucket where this CertificateAuthority\n will publish content, such as the CA certificate and CRLs. This must be\n a bucket name, without any prefixes (such as `gs://`) or suffixes (such\n as `.googleapis.com`). For example, to use a bucket named `my-bucket`,\n you would simply specify `my-bucket`. If not specified, a managed bucket\n will be created.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n keySpec:\n type: object\n x-dcl-go-name: KeySpec\n x-dcl-go-type: CertificateAuthorityKeySpec\n description: Required. Immutable. Used when issuing certificates for this\n CertificateAuthority. If this CertificateAuthority is a self-signed CertificateAuthority,\n this key is also used to sign the self-signed CA certificate. Otherwise,\n it is used to sign a CSR.\n x-kubernetes-immutable: true\n properties:\n algorithm:\n type: string\n x-dcl-go-name: Algorithm\n x-dcl-go-type: CertificateAuthorityKeySpecAlgorithmEnum\n description: 'The algorithm to use for creating a managed Cloud KMS\n key for a for a simplified experience. All managed keys will be have\n their ProtectionLevel as `HSM`. Possible values: RSA_PSS_2048_SHA256,\n RSA_PSS_3072_SHA256, RSA_PSS_4096_SHA256, RSA_PKCS1_2048_SHA256, RSA_PKCS1_3072_SHA256,\n RSA_PKCS1_4096_SHA256, EC_P256_SHA256, EC_P384_SHA384'\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - cloudKmsKeyVersion\n enum:\n - RSA_PSS_2048_SHA256\n - RSA_PSS_3072_SHA256\n - RSA_PSS_4096_SHA256\n - RSA_PKCS1_2048_SHA256\n - RSA_PKCS1_3072_SHA256\n - RSA_PKCS1_4096_SHA256\n - EC_P256_SHA256\n - EC_P384_SHA384\n cloudKmsKeyVersion:\n type: string\n x-dcl-go-name: CloudKmsKeyVersion\n description: The resource name for an existing Cloud KMS CryptoKeyVersion\n in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.\n This option enables full flexibility in the key's capabilities and\n properties.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - algorithm\n x-dcl-references:\n - resource: Cloudkms/CryptoKeyVersion\n field: name\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. Labels with user-defined metadata.\n lifetime:\n type: string\n x-dcl-go-name: Lifetime\n description: Required. The desired lifetime of the CA certificate. Used\n to create the \"not_before_time\" and \"not_after_time\" fields inside an\n X.509 certificate.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The resource name for this CertificateAuthority in the format\n `projects/*/locations/*/caPools/*/certificateAuthorities/*`.\n x-kubernetes-immutable: true\n pemCaCertificates:\n type: array\n x-dcl-go-name: PemCaCertificates\n readOnly: true\n description: Output only. This CertificateAuthority's certificate chain,\n including the current CertificateAuthority's certificate. Ordered such\n that the root issuer is the final element (consistent with RFC 5246).\n For a self-signed CA, this will only list the current CertificateAuthority's\n certificate.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: CertificateAuthorityStateEnum\n readOnly: true\n description: 'Output only. The State for this CertificateAuthority. Possible\n values: ENABLED, DISABLED, STAGED, AWAITING_USER_ACTIVATION, DELETED'\n x-kubernetes-immutable: true\n enum:\n - ENABLED\n - DISABLED\n - STAGED\n - AWAITING_USER_ACTIVATION\n - DELETED\n subordinateConfig:\n type: object\n x-dcl-go-name: SubordinateConfig\n x-dcl-go-type: CertificateAuthoritySubordinateConfig\n readOnly: true\n description: Optional. If this is a subordinate CertificateAuthority, this\n field will be set with the subordinate configuration, which describes\n its issuers. This may be updated, but this CertificateAuthority must continue\n to validate.\n x-kubernetes-immutable: true\n properties:\n certificateAuthority:\n type: string\n x-dcl-go-name: CertificateAuthority\n description: Required. This can refer to a CertificateAuthority in the\n same project that was used to create a subordinate CertificateAuthority.\n This field is used for information and usability purposes only. The\n resource name is in the format `projects/*/locations/*/caPools/*/certificateAuthorities/*`.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - pemIssuerChain\n x-dcl-references:\n - resource: Privateca/CertificateAuthority\n field: selfLink\n pemIssuerChain:\n type: object\n x-dcl-go-name: PemIssuerChain\n x-dcl-go-type: CertificateAuthoritySubordinateConfigPemIssuerChain\n description: Required. Contains the PEM certificate chain for the issuers\n of this CertificateAuthority, but not pem certificate for this CA\n itself.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - certificateAuthority\n required:\n - pemCertificates\n properties:\n pemCertificates:\n type: array\n x-dcl-go-name: PemCertificates\n description: Required. Expected to be in leaf-to-root order according\n to RFC 5246.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n tier:\n type: string\n x-dcl-go-name: Tier\n x-dcl-go-type: CertificateAuthorityTierEnum\n readOnly: true\n description: 'Output only. The CaPool.Tier of the CaPool that includes this\n CertificateAuthority. Possible values: ENTERPRISE, DEVOPS'\n x-kubernetes-immutable: true\n enum:\n - ENTERPRISE\n - DEVOPS\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: CertificateAuthorityTypeEnum\n description: 'Required. Immutable. The Type of this CertificateAuthority.\n Possible values: SELF_SIGNED, SUBORDINATE'\n x-kubernetes-immutable: true\n enum:\n - SELF_SIGNED\n - SUBORDINATE\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this CertificateAuthority was\n last updated.\n x-kubernetes-immutable: true\n") +var YAML_certificate_authority = []byte("info:\n title: Privateca/CertificateAuthority\n description: The Privateca CertificateAuthority resource\n x-dcl-struct-name: CertificateAuthority\n x-dcl-has-iam: false\npaths:\n get:\n description: The function used to get information about a CertificateAuthority\n parameters:\n - name: certificateAuthority\n required: true\n description: A full instance of a CertificateAuthority\n apply:\n description: The function used to apply information about a CertificateAuthority\n parameters:\n - name: certificateAuthority\n required: true\n description: A full instance of a CertificateAuthority\n delete:\n description: The function used to delete a CertificateAuthority\n parameters:\n - name: certificateAuthority\n required: true\n description: A full instance of a CertificateAuthority\n deleteAll:\n description: The function used to delete all CertificateAuthority\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: caPool\n required: true\n schema:\n type: string\n list:\n description: The function used to list information about many CertificateAuthority\n parameters:\n - name: project\n required: true\n schema:\n type: string\n - name: location\n required: true\n schema:\n type: string\n - name: caPool\n required: true\n schema:\n type: string\ncomponents:\n schemas:\n CertificateAuthority:\n title: CertificateAuthority\n x-dcl-id: projects/{{project}}/locations/{{location}}/caPools/{{ca_pool}}/certificateAuthorities/{{name}}\n x-dcl-parent-container: project\n x-dcl-labels: labels\n x-dcl-has-create: true\n x-dcl-has-iam: false\n x-dcl-read-timeout: 0\n x-dcl-apply-timeout: 0\n x-dcl-delete-timeout: 0\n type: object\n required:\n - name\n - type\n - config\n - lifetime\n - keySpec\n - project\n - location\n - caPool\n properties:\n accessUrls:\n type: object\n x-dcl-go-name: AccessUrls\n x-dcl-go-type: CertificateAuthorityAccessUrls\n readOnly: true\n description: Output only. URLs for accessing content published by this CA,\n such as the CA certificate and CRLs.\n x-kubernetes-immutable: true\n properties:\n caCertificateAccessUrl:\n type: string\n x-dcl-go-name: CaCertificateAccessUrl\n description: The URL where this CertificateAuthority's CA certificate\n is published. This will only be set for CAs that have been activated.\n x-kubernetes-immutable: true\n crlAccessUrls:\n type: array\n x-dcl-go-name: CrlAccessUrls\n description: The URLs where this CertificateAuthority's CRLs are published.\n This will only be set for CAs that have been activated.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n caCertificateDescriptions:\n type: array\n x-dcl-go-name: CaCertificateDescriptions\n readOnly: true\n description: Output only. A structured description of this CertificateAuthority's\n CA certificate and its issuers. Ordered as self-to-root.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptions\n properties:\n aiaIssuingCertificateUrls:\n type: array\n x-dcl-go-name: AiaIssuingCertificateUrls\n description: Describes lists of issuer CA certificate URLs that appear\n in the \"Authority Information Access\" extension in the certificate.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n authorityKeyId:\n type: object\n x-dcl-go-name: AuthorityKeyId\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsAuthorityKeyId\n description: Identifies the subject_key_id of the parent certificate,\n per https://tools.ietf.org/html/rfc5280#section-4.2.1.1\n properties:\n keyId:\n type: string\n x-dcl-go-name: KeyId\n description: Optional. The value of this KeyId encoded in lowercase\n hexadecimal. This is most likely the 160 bit SHA-1 hash of the\n public key.\n certFingerprint:\n type: object\n x-dcl-go-name: CertFingerprint\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsCertFingerprint\n description: The hash of the x.509 certificate.\n properties:\n sha256Hash:\n type: string\n x-dcl-go-name: Sha256Hash\n description: The SHA 256 hash, encoded in hexadecimal, of the\n DER x509 certificate.\n crlDistributionPoints:\n type: array\n x-dcl-go-name: CrlDistributionPoints\n description: Describes a list of locations to obtain CRL information,\n i.e. the DistributionPoint.fullName described by https://tools.ietf.org/html/rfc5280#section-4.2.1.13\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n publicKey:\n type: object\n x-dcl-go-name: PublicKey\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsPublicKey\n description: The public key that corresponds to an issued certificate.\n required:\n - key\n - format\n properties:\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsPublicKeyFormatEnum\n description: 'Required. The format of the public key. Possible\n values: PEM'\n enum:\n - PEM\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A public key. The padding and encoding\n must match with the `KeyFormat` value specified for the `format`\n field.\n subjectDescription:\n type: object\n x-dcl-go-name: SubjectDescription\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescription\n description: Describes some of the values in a certificate that are\n related to the subject and lifetime.\n properties:\n hexSerialNumber:\n type: string\n x-dcl-go-name: HexSerialNumber\n description: The serial number encoded in lowercase hexadecimal.\n lifetime:\n type: string\n x-dcl-go-name: Lifetime\n description: For convenience, the actual lifetime of an issued\n certificate.\n notAfterTime:\n type: string\n format: date-time\n x-dcl-go-name: NotAfterTime\n description: The time after which the certificate is expired.\n Per RFC 5280, the validity period for a certificate is the period\n of time from not_before_time through not_after_time, inclusive.\n Corresponds to 'not_before_time' + 'lifetime' - 1 second.\n notBeforeTime:\n type: string\n format: date-time\n x-dcl-go-name: NotBeforeTime\n description: The time at which the certificate becomes valid.\n subject:\n type: object\n x-dcl-go-name: Subject\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescriptionSubject\n description: Contains distinguished name fields such as the common\n name, location and organization.\n properties:\n commonName:\n type: string\n x-dcl-go-name: CommonName\n description: The \"common name\" of the subject.\n countryCode:\n type: string\n x-dcl-go-name: CountryCode\n description: The country code of the subject.\n locality:\n type: string\n x-dcl-go-name: Locality\n description: The locality or city of the subject.\n organization:\n type: string\n x-dcl-go-name: Organization\n description: The organization of the subject.\n organizationalUnit:\n type: string\n x-dcl-go-name: OrganizationalUnit\n description: The organizational_unit of the subject.\n postalCode:\n type: string\n x-dcl-go-name: PostalCode\n description: The postal code of the subject.\n province:\n type: string\n x-dcl-go-name: Province\n description: The province, territory, or regional state of\n the subject.\n streetAddress:\n type: string\n x-dcl-go-name: StreetAddress\n description: The street address of the subject.\n subjectAltName:\n type: object\n x-dcl-go-name: SubjectAltName\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescriptionSubjectAltName\n description: The subject alternative name fields.\n properties:\n customSans:\n type: array\n x-dcl-go-name: CustomSans\n description: Contains additional subject alternative name\n values.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescriptionSubjectAltNameCustomSans\n required:\n - objectId\n - critical\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this\n extension is critical (i.e., if the client does not\n know how to handle this extension, the client should\n consider this to be an error).\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectDescriptionSubjectAltNameCustomSansObjectId\n description: Required. The OID for this X.509 extension.\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path.\n The most significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n dnsNames:\n type: array\n x-dcl-go-name: DnsNames\n description: Contains only valid, fully-qualified host names.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n emailAddresses:\n type: array\n x-dcl-go-name: EmailAddresses\n description: Contains only valid RFC 2822 E-mail addresses.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n ipAddresses:\n type: array\n x-dcl-go-name: IPAddresses\n description: Contains only valid 32-bit IPv4 addresses or\n RFC 4291 IPv6 addresses.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n uris:\n type: array\n x-dcl-go-name: Uris\n description: Contains only valid RFC 3986 URIs.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n subjectKeyId:\n type: object\n x-dcl-go-name: SubjectKeyId\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsSubjectKeyId\n description: Provides a means of identifiying certificates that contain\n a particular public key, per https://tools.ietf.org/html/rfc5280#section-4.2.1.2.\n properties:\n keyId:\n type: string\n x-dcl-go-name: KeyId\n description: Optional. The value of this KeyId encoded in lowercase\n hexadecimal. This is most likely the 160 bit SHA-1 hash of the\n public key.\n x509Description:\n type: object\n x-dcl-go-name: X509Description\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509Description\n description: Describes some of the technical X.509 fields in a certificate.\n properties:\n additionalExtensions:\n type: array\n x-dcl-go-name: AdditionalExtensions\n description: Optional. Describes custom X.509 extensions.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionAdditionalExtensions\n required:\n - objectId\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this extension\n is critical (i.e., if the client does not know how to\n handle this extension, the client should consider this\n to be an error).\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionAdditionalExtensionsObjectId\n description: Required. The OID for this X.509 extension.\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The\n most significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n aiaOcspServers:\n type: array\n x-dcl-go-name: AiaOcspServers\n readOnly: true\n description: Optional. Describes Online Certificate Status Protocol\n (OCSP) endpoint addresses that appear in the \"Authority Information\n Access\" extension in the certificate.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n caOptions:\n type: object\n x-dcl-go-name: CaOptions\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionCaOptions\n description: Optional. Describes options in this X509Parameters\n that are relevant in a CA certificate.\n properties:\n isCa:\n type: boolean\n x-dcl-go-name: IsCa\n description: Optional. Refers to the \"CA\" X.509 extension,\n which is a boolean value. When this value is missing, the\n extension will be omitted from the CA certificate.\n maxIssuerPathLength:\n type: integer\n format: int64\n x-dcl-go-name: MaxIssuerPathLength\n description: Optional. Refers to the path length restriction\n X.509 extension. For a CA certificate, this value describes\n the depth of subordinate CA certificates that are allowed.\n If this value is less than 0, the request will fail. If\n this value is missing, the max path length will be omitted\n from the CA certificate.\n keyUsage:\n type: object\n x-dcl-go-name: KeyUsage\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionKeyUsage\n description: Optional. Indicates the intended use for keys that\n correspond to a certificate.\n properties:\n baseKeyUsage:\n type: object\n x-dcl-go-name: BaseKeyUsage\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionKeyUsageBaseKeyUsage\n description: Describes high-level ways in which a key may\n be used.\n properties:\n certSign:\n type: boolean\n x-dcl-go-name: CertSign\n description: The key may be used to sign certificates.\n contentCommitment:\n type: boolean\n x-dcl-go-name: ContentCommitment\n description: The key may be used for cryptographic commitments.\n Note that this may also be referred to as \"non-repudiation\".\n crlSign:\n type: boolean\n x-dcl-go-name: CrlSign\n description: The key may be used sign certificate revocation\n lists.\n dataEncipherment:\n type: boolean\n x-dcl-go-name: DataEncipherment\n description: The key may be used to encipher data.\n decipherOnly:\n type: boolean\n x-dcl-go-name: DecipherOnly\n description: The key may be used to decipher only.\n digitalSignature:\n type: boolean\n x-dcl-go-name: DigitalSignature\n description: The key may be used for digital signatures.\n encipherOnly:\n type: boolean\n x-dcl-go-name: EncipherOnly\n description: The key may be used to encipher only.\n keyAgreement:\n type: boolean\n x-dcl-go-name: KeyAgreement\n description: The key may be used in a key agreement protocol.\n keyEncipherment:\n type: boolean\n x-dcl-go-name: KeyEncipherment\n description: The key may be used to encipher other keys.\n extendedKeyUsage:\n type: object\n x-dcl-go-name: ExtendedKeyUsage\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionKeyUsageExtendedKeyUsage\n description: Detailed scenarios in which a key may be used.\n properties:\n clientAuth:\n type: boolean\n x-dcl-go-name: ClientAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially\n described as \"TLS WWW client authentication\", though\n regularly used for non-WWW TLS.\n codeSigning:\n type: boolean\n x-dcl-go-name: CodeSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially\n described as \"Signing of downloadable executable code\n client authentication\".\n emailProtection:\n type: boolean\n x-dcl-go-name: EmailProtection\n description: Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially\n described as \"Email protection\".\n ocspSigning:\n type: boolean\n x-dcl-go-name: OcspSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially\n described as \"Signing OCSP responses\".\n serverAuth:\n type: boolean\n x-dcl-go-name: ServerAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially\n described as \"TLS WWW server authentication\", though\n regularly used for non-WWW TLS.\n timeStamping:\n type: boolean\n x-dcl-go-name: TimeStamping\n description: Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially\n described as \"Binding the hash of an object to a time\".\n unknownExtendedKeyUsages:\n type: array\n x-dcl-go-name: UnknownExtendedKeyUsages\n description: Used to describe extended key usages that are\n not listed in the KeyUsage.ExtendedKeyUsageOptions message.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionKeyUsageUnknownExtendedKeyUsages\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The\n most significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n policyIds:\n type: array\n x-dcl-go-name: PolicyIds\n description: Optional. Describes the X.509 certificate policy\n object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityCaCertificateDescriptionsX509DescriptionPolicyIds\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n caPool:\n type: string\n x-dcl-go-name: CaPool\n description: The caPool for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Privateca/CaPool\n field: name\n parent: true\n config:\n type: object\n x-dcl-go-name: Config\n x-dcl-go-type: CertificateAuthorityConfig\n description: Required. Immutable. The config used to create a self-signed\n X.509 certificate or CSR.\n x-kubernetes-immutable: true\n required:\n - subjectConfig\n - x509Config\n properties:\n publicKey:\n type: object\n x-dcl-go-name: PublicKey\n x-dcl-go-type: CertificateAuthorityConfigPublicKey\n readOnly: true\n description: Optional. The public key that corresponds to this config.\n This is, for example, used when issuing Certificates, but not when\n creating a self-signed CertificateAuthority or CertificateAuthority\n CSR.\n x-kubernetes-immutable: true\n required:\n - key\n - format\n properties:\n format:\n type: string\n x-dcl-go-name: Format\n x-dcl-go-type: CertificateAuthorityConfigPublicKeyFormatEnum\n description: 'Required. The format of the public key. Possible values:\n PEM'\n x-kubernetes-immutable: true\n enum:\n - PEM\n key:\n type: string\n x-dcl-go-name: Key\n description: Required. A public key. The padding and encoding must\n match with the `KeyFormat` value specified for the `format` field.\n x-kubernetes-immutable: true\n subjectConfig:\n type: object\n x-dcl-go-name: SubjectConfig\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfig\n description: Required. Specifies some of the values in a certificate\n that are related to the subject.\n x-kubernetes-immutable: true\n required:\n - subject\n properties:\n subject:\n type: object\n x-dcl-go-name: Subject\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfigSubject\n description: Required. Contains distinguished name fields such as\n the common name, location and organization.\n x-kubernetes-immutable: true\n properties:\n commonName:\n type: string\n x-dcl-go-name: CommonName\n description: The \"common name\" of the subject.\n x-kubernetes-immutable: true\n countryCode:\n type: string\n x-dcl-go-name: CountryCode\n description: The country code of the subject.\n x-kubernetes-immutable: true\n locality:\n type: string\n x-dcl-go-name: Locality\n description: The locality or city of the subject.\n x-kubernetes-immutable: true\n organization:\n type: string\n x-dcl-go-name: Organization\n description: The organization of the subject.\n x-kubernetes-immutable: true\n organizationalUnit:\n type: string\n x-dcl-go-name: OrganizationalUnit\n description: The organizational_unit of the subject.\n x-kubernetes-immutable: true\n postalCode:\n type: string\n x-dcl-go-name: PostalCode\n description: The postal code of the subject.\n x-kubernetes-immutable: true\n province:\n type: string\n x-dcl-go-name: Province\n description: The province, territory, or regional state of the\n subject.\n x-kubernetes-immutable: true\n streetAddress:\n type: string\n x-dcl-go-name: StreetAddress\n description: The street address of the subject.\n x-kubernetes-immutable: true\n subjectAltName:\n type: object\n x-dcl-go-name: SubjectAltName\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfigSubjectAltName\n description: Optional. The subject alternative name fields.\n x-kubernetes-immutable: true\n properties:\n customSans:\n type: array\n x-dcl-go-name: CustomSans\n description: Contains additional subject alternative name values.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfigSubjectAltNameCustomSans\n required:\n - objectId\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this extension\n is critical (i.e., if the client does not know how to\n handle this extension, the client should consider this\n to be an error).\n x-kubernetes-immutable: true\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CertificateAuthorityConfigSubjectConfigSubjectAltNameCustomSansObjectId\n description: Required. The OID for this X.509 extension.\n x-kubernetes-immutable: true\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The\n most significant parts of the path come first.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n x-kubernetes-immutable: true\n dnsNames:\n type: array\n x-dcl-go-name: DnsNames\n description: Contains only valid, fully-qualified host names.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n emailAddresses:\n type: array\n x-dcl-go-name: EmailAddresses\n description: Contains only valid RFC 2822 E-mail addresses.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n ipAddresses:\n type: array\n x-dcl-go-name: IPAddresses\n description: Contains only valid 32-bit IPv4 addresses or RFC\n 4291 IPv6 addresses.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n uris:\n type: array\n x-dcl-go-name: Uris\n description: Contains only valid RFC 3986 URIs.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n x509Config:\n type: object\n x-dcl-go-name: X509Config\n x-dcl-go-type: CertificateAuthorityConfigX509Config\n description: Required. Describes how some of the technical X.509 fields\n in a certificate should be populated.\n x-kubernetes-immutable: true\n properties:\n additionalExtensions:\n type: array\n x-dcl-go-name: AdditionalExtensions\n description: Optional. Describes custom X.509 extensions.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigAdditionalExtensions\n required:\n - objectId\n - value\n properties:\n critical:\n type: boolean\n x-dcl-go-name: Critical\n description: Optional. Indicates whether or not this extension\n is critical (i.e., if the client does not know how to handle\n this extension, the client should consider this to be an\n error).\n x-kubernetes-immutable: true\n objectId:\n type: object\n x-dcl-go-name: ObjectId\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigAdditionalExtensionsObjectId\n description: Required. The OID for this X.509 extension.\n x-kubernetes-immutable: true\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n value:\n type: string\n x-dcl-go-name: Value\n description: Required. The value of this X.509 extension.\n x-kubernetes-immutable: true\n aiaOcspServers:\n type: array\n x-dcl-go-name: AiaOcspServers\n readOnly: true\n description: Optional. Describes Online Certificate Status Protocol\n (OCSP) endpoint addresses that appear in the \"Authority Information\n Access\" extension in the certificate.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n caOptions:\n type: object\n x-dcl-go-name: CaOptions\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigCaOptions\n description: Optional. Describes options in this X509Parameters\n that are relevant in a CA certificate.\n x-kubernetes-immutable: true\n properties:\n isCa:\n type: boolean\n x-dcl-go-name: IsCa\n description: Optional. Refers to the \"CA\" X.509 extension, which\n is a boolean value. When this value is missing, the extension\n will be omitted from the CA certificate.\n x-kubernetes-immutable: true\n maxIssuerPathLength:\n type: integer\n format: int64\n x-dcl-go-name: MaxIssuerPathLength\n description: Optional. Refers to the path length restriction\n X.509 extension. For a CA certificate, this value describes\n the depth of subordinate CA certificates that are allowed.\n If this value is less than 0, the request will fail. If this\n value is missing, the max path length will be omitted from\n the CA certificate.\n x-kubernetes-immutable: true\n zeroMaxIssuerPathLength:\n type: boolean\n x-dcl-go-name: ZeroMaxIssuerPathLength\n description: Optional. When true, the \"path length constraint\"\n in Basic Constraints extension will be set to 0. if both max_issuer_path_length\n and zero_max_issuer_path_length are unset, the max path length\n will be omitted from the CA certificate.\n x-kubernetes-immutable: true\n keyUsage:\n type: object\n x-dcl-go-name: KeyUsage\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigKeyUsage\n description: Optional. Indicates the intended use for keys that\n correspond to a certificate.\n x-kubernetes-immutable: true\n properties:\n baseKeyUsage:\n type: object\n x-dcl-go-name: BaseKeyUsage\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsage\n description: Describes high-level ways in which a key may be\n used.\n x-kubernetes-immutable: true\n properties:\n certSign:\n type: boolean\n x-dcl-go-name: CertSign\n description: The key may be used to sign certificates.\n x-kubernetes-immutable: true\n contentCommitment:\n type: boolean\n x-dcl-go-name: ContentCommitment\n description: The key may be used for cryptographic commitments.\n Note that this may also be referred to as \"non-repudiation\".\n x-kubernetes-immutable: true\n crlSign:\n type: boolean\n x-dcl-go-name: CrlSign\n description: The key may be used sign certificate revocation\n lists.\n x-kubernetes-immutable: true\n dataEncipherment:\n type: boolean\n x-dcl-go-name: DataEncipherment\n description: The key may be used to encipher data.\n x-kubernetes-immutable: true\n decipherOnly:\n type: boolean\n x-dcl-go-name: DecipherOnly\n description: The key may be used to decipher only.\n x-kubernetes-immutable: true\n digitalSignature:\n type: boolean\n x-dcl-go-name: DigitalSignature\n description: The key may be used for digital signatures.\n x-kubernetes-immutable: true\n encipherOnly:\n type: boolean\n x-dcl-go-name: EncipherOnly\n description: The key may be used to encipher only.\n x-kubernetes-immutable: true\n keyAgreement:\n type: boolean\n x-dcl-go-name: KeyAgreement\n description: The key may be used in a key agreement protocol.\n x-kubernetes-immutable: true\n keyEncipherment:\n type: boolean\n x-dcl-go-name: KeyEncipherment\n description: The key may be used to encipher other keys.\n x-kubernetes-immutable: true\n extendedKeyUsage:\n type: object\n x-dcl-go-name: ExtendedKeyUsage\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsage\n description: Detailed scenarios in which a key may be used.\n x-kubernetes-immutable: true\n properties:\n clientAuth:\n type: boolean\n x-dcl-go-name: ClientAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially\n described as \"TLS WWW client authentication\", though regularly\n used for non-WWW TLS.\n x-kubernetes-immutable: true\n codeSigning:\n type: boolean\n x-dcl-go-name: CodeSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially\n described as \"Signing of downloadable executable code\n client authentication\".\n x-kubernetes-immutable: true\n emailProtection:\n type: boolean\n x-dcl-go-name: EmailProtection\n description: Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially\n described as \"Email protection\".\n x-kubernetes-immutable: true\n ocspSigning:\n type: boolean\n x-dcl-go-name: OcspSigning\n description: Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially\n described as \"Signing OCSP responses\".\n x-kubernetes-immutable: true\n serverAuth:\n type: boolean\n x-dcl-go-name: ServerAuth\n description: Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially\n described as \"TLS WWW server authentication\", though regularly\n used for non-WWW TLS.\n x-kubernetes-immutable: true\n timeStamping:\n type: boolean\n x-dcl-go-name: TimeStamping\n description: Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially\n described as \"Binding the hash of an object to a time\".\n x-kubernetes-immutable: true\n unknownExtendedKeyUsages:\n type: array\n x-dcl-go-name: UnknownExtendedKeyUsages\n description: Used to describe extended key usages that are not\n listed in the KeyUsage.ExtendedKeyUsageOptions message.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsages\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n policyIds:\n type: array\n x-dcl-go-name: PolicyIds\n description: Optional. Describes the X.509 certificate policy object\n identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: object\n x-dcl-go-type: CertificateAuthorityConfigX509ConfigPolicyIds\n required:\n - objectIdPath\n properties:\n objectIdPath:\n type: array\n x-dcl-go-name: ObjectIdPath\n description: Required. The parts of an OID path. The most\n significant parts of the path come first.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: integer\n format: int64\n x-dcl-go-type: int64\n createTime:\n type: string\n format: date-time\n x-dcl-go-name: CreateTime\n readOnly: true\n description: Output only. The time at which this CertificateAuthority was\n created.\n x-kubernetes-immutable: true\n deleteTime:\n type: string\n format: date-time\n x-dcl-go-name: DeleteTime\n readOnly: true\n description: Output only. The time at which this CertificateAuthority was\n soft deleted, if it is in the DELETED state.\n x-kubernetes-immutable: true\n expireTime:\n type: string\n format: date-time\n x-dcl-go-name: ExpireTime\n readOnly: true\n description: Output only. The time at which this CertificateAuthority will\n be permanently purged, if it is in the DELETED state.\n x-kubernetes-immutable: true\n gcsBucket:\n type: string\n x-dcl-go-name: GcsBucket\n description: Immutable. The name of a Cloud Storage bucket where this CertificateAuthority\n will publish content, such as the CA certificate and CRLs. This must be\n a bucket name, without any prefixes (such as `gs://`) or suffixes (such\n as `.googleapis.com`). For example, to use a bucket named `my-bucket`,\n you would simply specify `my-bucket`. If not specified, a managed bucket\n will be created.\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Storage/Bucket\n field: name\n keySpec:\n type: object\n x-dcl-go-name: KeySpec\n x-dcl-go-type: CertificateAuthorityKeySpec\n description: Required. Immutable. Used when issuing certificates for this\n CertificateAuthority. If this CertificateAuthority is a self-signed CertificateAuthority,\n this key is also used to sign the self-signed CA certificate. Otherwise,\n it is used to sign a CSR.\n x-kubernetes-immutable: true\n properties:\n algorithm:\n type: string\n x-dcl-go-name: Algorithm\n x-dcl-go-type: CertificateAuthorityKeySpecAlgorithmEnum\n description: 'The algorithm to use for creating a managed Cloud KMS\n key for a for a simplified experience. All managed keys will be have\n their ProtectionLevel as `HSM`. Possible values: RSA_PSS_2048_SHA256,\n RSA_PSS_3072_SHA256, RSA_PSS_4096_SHA256, RSA_PKCS1_2048_SHA256, RSA_PKCS1_3072_SHA256,\n RSA_PKCS1_4096_SHA256, EC_P256_SHA256, EC_P384_SHA384'\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - cloudKmsKeyVersion\n enum:\n - RSA_PSS_2048_SHA256\n - RSA_PSS_3072_SHA256\n - RSA_PSS_4096_SHA256\n - RSA_PKCS1_2048_SHA256\n - RSA_PKCS1_3072_SHA256\n - RSA_PKCS1_4096_SHA256\n - EC_P256_SHA256\n - EC_P384_SHA384\n cloudKmsKeyVersion:\n type: string\n x-dcl-go-name: CloudKmsKeyVersion\n description: The resource name for an existing Cloud KMS CryptoKeyVersion\n in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.\n This option enables full flexibility in the key's capabilities and\n properties.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - algorithm\n x-dcl-references:\n - resource: Cloudkms/CryptoKeyVersion\n field: name\n labels:\n type: object\n additionalProperties:\n type: string\n x-dcl-go-name: Labels\n description: Optional. Labels with user-defined metadata.\n lifetime:\n type: string\n x-dcl-go-name: Lifetime\n description: Required. The desired lifetime of the CA certificate. Used\n to create the \"not_before_time\" and \"not_after_time\" fields inside an\n X.509 certificate.\n x-kubernetes-immutable: true\n location:\n type: string\n x-dcl-go-name: Location\n description: The location for the resource\n x-kubernetes-immutable: true\n name:\n type: string\n x-dcl-go-name: Name\n description: The resource name for this CertificateAuthority in the format\n `projects/*/locations/*/caPools/*/certificateAuthorities/*`.\n x-kubernetes-immutable: true\n pemCaCertificates:\n type: array\n x-dcl-go-name: PemCaCertificates\n readOnly: true\n description: Output only. This CertificateAuthority's certificate chain,\n including the current CertificateAuthority's certificate. Ordered such\n that the root issuer is the final element (consistent with RFC 5246).\n For a self-signed CA, this will only list the current CertificateAuthority's\n certificate.\n x-kubernetes-immutable: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n project:\n type: string\n x-dcl-go-name: Project\n description: The project for the resource\n x-kubernetes-immutable: true\n x-dcl-references:\n - resource: Cloudresourcemanager/Project\n field: name\n parent: true\n state:\n type: string\n x-dcl-go-name: State\n x-dcl-go-type: CertificateAuthorityStateEnum\n readOnly: true\n description: 'Output only. The State for this CertificateAuthority. Possible\n values: ENABLED, DISABLED, STAGED, AWAITING_USER_ACTIVATION, DELETED'\n x-kubernetes-immutable: true\n enum:\n - ENABLED\n - DISABLED\n - STAGED\n - AWAITING_USER_ACTIVATION\n - DELETED\n subordinateConfig:\n type: object\n x-dcl-go-name: SubordinateConfig\n x-dcl-go-type: CertificateAuthoritySubordinateConfig\n readOnly: true\n description: Optional. If this is a subordinate CertificateAuthority, this\n field will be set with the subordinate configuration, which describes\n its issuers. This may be updated, but this CertificateAuthority must continue\n to validate.\n x-kubernetes-immutable: true\n properties:\n certificateAuthority:\n type: string\n x-dcl-go-name: CertificateAuthority\n description: Required. This can refer to a CertificateAuthority in the\n same project that was used to create a subordinate CertificateAuthority.\n This field is used for information and usability purposes only. The\n resource name is in the format `projects/*/locations/*/caPools/*/certificateAuthorities/*`.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - pemIssuerChain\n x-dcl-references:\n - resource: Privateca/CertificateAuthority\n field: selfLink\n pemIssuerChain:\n type: object\n x-dcl-go-name: PemIssuerChain\n x-dcl-go-type: CertificateAuthoritySubordinateConfigPemIssuerChain\n description: Required. Contains the PEM certificate chain for the issuers\n of this CertificateAuthority, but not pem certificate for this CA\n itself.\n x-kubernetes-immutable: true\n x-dcl-conflicts:\n - certificateAuthority\n required:\n - pemCertificates\n properties:\n pemCertificates:\n type: array\n x-dcl-go-name: PemCertificates\n description: Required. Expected to be in leaf-to-root order according\n to RFC 5246.\n x-kubernetes-immutable: true\n x-dcl-send-empty: true\n x-dcl-list-type: list\n items:\n type: string\n x-dcl-go-type: string\n tier:\n type: string\n x-dcl-go-name: Tier\n x-dcl-go-type: CertificateAuthorityTierEnum\n readOnly: true\n description: 'Output only. The CaPool.Tier of the CaPool that includes this\n CertificateAuthority. Possible values: ENTERPRISE, DEVOPS'\n x-kubernetes-immutable: true\n enum:\n - ENTERPRISE\n - DEVOPS\n type:\n type: string\n x-dcl-go-name: Type\n x-dcl-go-type: CertificateAuthorityTypeEnum\n description: 'Required. Immutable. The Type of this CertificateAuthority.\n Possible values: SELF_SIGNED, SUBORDINATE'\n x-kubernetes-immutable: true\n enum:\n - SELF_SIGNED\n - SUBORDINATE\n updateTime:\n type: string\n format: date-time\n x-dcl-go-name: UpdateTime\n readOnly: true\n description: Output only. The time at which this CertificateAuthority was\n last updated.\n x-kubernetes-immutable: true\n") -// 60286 bytes -// MD5: eb7c5fe22b0c4f1ff2ac49c8a4718660 +// 60820 bytes +// MD5: a23e49e188d3fdb6494ba574f8d3ca1f diff --git a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/privateca_utils.go b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/privateca_utils.go index 4d2660e814..b9219c6cf4 100644 --- a/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/privateca_utils.go +++ b/terraform/providers/google/vendor/github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca/privateca_utils.go @@ -241,3 +241,103 @@ func flattenCertificateTemplateExtendedKeyUsage(_ *Client, i interface{}, res *C return r } + +func expandCaPoolIssuancePolicyBaselineValuesCAOptions(_ *Client, f *CaPoolIssuancePolicyBaselineValuesCaOptions, res *CaPool) (map[string]any, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]any) + if v := f.IsCa; !dcl.IsEmptyValueIndirect(v) { + m["isCa"] = v + } + + maxIssuerPathLength := dcl.ValueOrEmptyInt64(f.MaxIssuerPathLength) + zeroPathLength := dcl.ValueOrEmptyBool(f.ZeroMaxIssuerPathLength) + if zeroPathLength && maxIssuerPathLength > 0 { + return nil, fmt.Errorf("max_issuer_path_length and zero_max_issuer_path_length are mutually exclusive") + } + if maxIssuerPathLength > 0 || zeroPathLength { + m["maxIssuerPathLength"] = maxIssuerPathLength + } + + return m, nil +} + +func flattenCaPoolIssuancePolicyBaselineValuesCAOptions(_ *Client, i any, res *CaPool) *CaPoolIssuancePolicyBaselineValuesCaOptions { + m, ok := i.(map[string]any) + if !ok { + return nil + } + + r := &CaPoolIssuancePolicyBaselineValuesCaOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyCaPoolIssuancePolicyBaselineValuesCaOptions + } + + isCA, ok := m["isCa"].(bool) + if ok { + r.IsCa = dcl.Bool(isCA) + } + + if _, ok := m["maxIssuerPathLength"]; ok { + pathLen := dcl.FlattenInteger(m["maxIssuerPathLength"]) + r.MaxIssuerPathLength = pathLen + if dcl.ValueOrEmptyInt64(pathLen) == 0 { + r.ZeroMaxIssuerPathLength = dcl.Bool(true) + } + } + + return r +} + +func flattenCertificateAuthorityConfigX509ConfigCAOptions(_ *Client, i any, res *CertificateAuthority) *CertificateAuthorityConfigX509ConfigCaOptions { + m, ok := i.(map[string]any) + if !ok { + return nil + } + + r := &CertificateAuthorityConfigX509ConfigCaOptions{} + + if dcl.IsEmptyValueIndirect(i) { + return EmptyCertificateAuthorityConfigX509ConfigCaOptions + } + + isCA, ok := m["isCa"].(bool) + if ok { + r.IsCa = dcl.Bool(isCA) + } + + if _, ok := m["maxIssuerPathLength"]; ok { + pathLen := dcl.FlattenInteger(m["maxIssuerPathLength"]) + r.MaxIssuerPathLength = pathLen + if dcl.ValueOrEmptyInt64(pathLen) == 0 { + r.ZeroMaxIssuerPathLength = dcl.Bool(true) + } + } + + return r +} + +func expandCertificateAuthorityConfigX509ConfigCAOptions(_ *Client, f *CertificateAuthorityConfigX509ConfigCaOptions, res *CertificateAuthority) (map[string]any, error) { + if dcl.IsEmptyValueIndirect(f) { + return nil, nil + } + + m := make(map[string]any) + if v := f.IsCa; !dcl.IsEmptyValueIndirect(v) { + m["isCa"] = v + } + + maxIssuerPathLength := dcl.ValueOrEmptyInt64(f.MaxIssuerPathLength) + zeroPathLength := dcl.ValueOrEmptyBool(f.ZeroMaxIssuerPathLength) + if zeroPathLength && maxIssuerPathLength > 0 { + return nil, fmt.Errorf("max_issuer_path_length and zero_max_issuer_path_length are mutually exclusive") + } + if maxIssuerPathLength > 0 || zeroPathLength { + m["maxIssuerPathLength"] = maxIssuerPathLength + } + + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go index 3536641675..0185b7c186 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go @@ -34,10 +34,13 @@ type OrcaLoadReport struct { CpuUtilization float64 `protobuf:"fixed64,1,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` MemUtilization float64 `protobuf:"fixed64,2,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"` // Deprecated: Do not use. - Rps uint64 `protobuf:"varint,3,opt,name=rps,proto3" json:"rps,omitempty"` - RequestCost map[string]float64 `protobuf:"bytes,4,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` - Utilization map[string]float64 `protobuf:"bytes,5,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` - RpsFractional float64 `protobuf:"fixed64,6,opt,name=rps_fractional,json=rpsFractional,proto3" json:"rps_fractional,omitempty"` + Rps uint64 `protobuf:"varint,3,opt,name=rps,proto3" json:"rps,omitempty"` + RequestCost map[string]float64 `protobuf:"bytes,4,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + Utilization map[string]float64 `protobuf:"bytes,5,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + RpsFractional float64 `protobuf:"fixed64,6,opt,name=rps_fractional,json=rpsFractional,proto3" json:"rps_fractional,omitempty"` + Eps float64 `protobuf:"fixed64,7,opt,name=eps,proto3" json:"eps,omitempty"` + NamedMetrics map[string]float64 `protobuf:"bytes,8,rep,name=named_metrics,json=namedMetrics,proto3" json:"named_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + ApplicationUtilization float64 `protobuf:"fixed64,9,opt,name=application_utilization,json=applicationUtilization,proto3" json:"application_utilization,omitempty"` } func (x *OrcaLoadReport) Reset() { @@ -115,6 +118,27 @@ func (x *OrcaLoadReport) GetRpsFractional() float64 { return 0 } +func (x *OrcaLoadReport) GetEps() float64 { + if x != nil { + return x.Eps + } + return 0 +} + +func (x *OrcaLoadReport) GetNamedMetrics() map[string]float64 { + if x != nil { + return x.NamedMetrics + } + return nil +} + +func (x *OrcaLoadReport) GetApplicationUtilization() float64 { + if x != nil { + return x.ApplicationUtilization + } + return 0 +} + var File_xds_data_orca_v3_orca_load_report_proto protoreflect.FileDescriptor var file_xds_data_orca_v3_orca_load_report_proto_rawDesc = []byte{ @@ -123,49 +147,65 @@ var file_xds_data_orca_v3_orca_load_report_proto_rawDesc = []byte{ 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbe, 0x04, 0x0a, 0x0e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, - 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb5, 0x06, 0x0a, 0x0e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, - 0x42, 0x1c, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x0e, - 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x45, - 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x1c, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x19, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x03, 0x72, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x42, 0x02, 0x18, 0x01, 0x52, 0x03, 0x72, 0x70, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, - 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, - 0x74, 0x12, 0x7b, 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x26, 0xfa, 0x42, 0x10, 0x9a, 0x01, - 0x0d, 0x2a, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0x42, - 0x10, 0x9a, 0x01, 0x0d, 0x2a, 0x0b, 0x12, 0x09, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, - 0x3f, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, - 0x0a, 0x0e, 0x72, 0x70, 0x73, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0d, 0x72, 0x70, 0x73, 0x46, 0x72, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x1a, 0x3e, 0x0a, 0x10, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5d, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, - 0x61, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, - 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, - 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x45, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x1c, 0xfa, 0x42, 0x0b, 0x12, 0x09, + 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x19, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x03, 0x72, 0x70, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, 0x18, 0x01, 0x52, 0x03, 0x72, 0x70, 0x73, 0x12, 0x54, 0x0a, + 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, + 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, + 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x6f, 0x73, 0x74, 0x12, 0x7b, 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, + 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x26, 0xfa, 0x42, 0x10, + 0x9a, 0x01, 0x0d, 0x2a, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xfa, 0x42, 0x10, 0x9a, 0x01, 0x0d, 0x2a, 0x0b, 0x12, 0x09, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0x3f, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x35, 0x0a, 0x0e, 0x72, 0x70, 0x73, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0d, 0x72, 0x70, 0x73, 0x46, 0x72, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x70, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x03, 0x65, 0x70, 0x73, 0x12, 0x57, 0x0a, 0x0d, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, + 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x12, 0x47, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x16, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, + 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5d, 0x0a, 0x1b, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4f, 0x72, 0x63, + 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x64, + 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -180,20 +220,22 @@ func file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP() []byte { return file_xds_data_orca_v3_orca_load_report_proto_rawDescData } -var file_xds_data_orca_v3_orca_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_xds_data_orca_v3_orca_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_xds_data_orca_v3_orca_load_report_proto_goTypes = []interface{}{ (*OrcaLoadReport)(nil), // 0: xds.data.orca.v3.OrcaLoadReport nil, // 1: xds.data.orca.v3.OrcaLoadReport.RequestCostEntry nil, // 2: xds.data.orca.v3.OrcaLoadReport.UtilizationEntry + nil, // 3: xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry } var file_xds_data_orca_v3_orca_load_report_proto_depIdxs = []int32{ 1, // 0: xds.data.orca.v3.OrcaLoadReport.request_cost:type_name -> xds.data.orca.v3.OrcaLoadReport.RequestCostEntry 2, // 1: xds.data.orca.v3.OrcaLoadReport.utilization:type_name -> xds.data.orca.v3.OrcaLoadReport.UtilizationEntry - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 3, // 2: xds.data.orca.v3.OrcaLoadReport.named_metrics:type_name -> xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_xds_data_orca_v3_orca_load_report_proto_init() } @@ -221,7 +263,7 @@ func file_xds_data_orca_v3_orca_load_report_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_xds_data_orca_v3_orca_load_report_proto_rawDesc, NumEnums: 0, - NumMessages: 3, + NumMessages: 4, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go index 19750a09a9..2f505db727 100644 --- a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go @@ -41,10 +41,10 @@ func (m *OrcaLoadReport) Validate() error { return nil } - if val := m.GetCpuUtilization(); val < 0 || val > 1 { + if m.GetCpuUtilization() < 0 { return OrcaLoadReportValidationError{ field: "CpuUtilization", - reason: "value must be inside range [0, 1]", + reason: "value must be greater than or equal to 0", } } @@ -80,6 +80,22 @@ func (m *OrcaLoadReport) Validate() error { } } + if m.GetEps() < 0 { + return OrcaLoadReportValidationError{ + field: "Eps", + reason: "value must be greater than or equal to 0", + } + } + + // no validation rules for NamedMetrics + + if m.GetApplicationUtilization() < 0 { + return OrcaLoadReportValidationError{ + field: "ApplicationUtilization", + reason: "value must be greater than or equal to 0", + } + } + return nil } diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go new file mode 100644 index 0000000000..0aafd1ab67 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go @@ -0,0 +1,298 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.18.0 +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + context "context" + v3 "github.com/cncf/xds/go/xds/data/orca/v3" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type OrcaLoadReportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReportInterval *duration.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"` + RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"` +} + +func (x *OrcaLoadReportRequest) Reset() { + *x = OrcaLoadReportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrcaLoadReportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrcaLoadReportRequest) ProtoMessage() {} + +func (x *OrcaLoadReportRequest) ProtoReflect() protoreflect.Message { + mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrcaLoadReportRequest.ProtoReflect.Descriptor instead. +func (*OrcaLoadReportRequest) Descriptor() ([]byte, []int) { + return file_xds_service_orca_v3_orca_proto_rawDescGZIP(), []int{0} +} + +func (x *OrcaLoadReportRequest) GetReportInterval() *duration.Duration { + if x != nil { + return x.ReportInterval + } + return nil +} + +func (x *OrcaLoadReportRequest) GetRequestCostNames() []string { + if x != nil { + return x.RequestCostNames + } + return nil +} + +var File_xds_service_orca_v3_orca_proto protoreflect.FileDescriptor + +var file_xds_service_orca_v3_orca_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72, + 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72, + 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, + 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, + 0x01, 0x0a, 0x15, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x43, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x32, 0x75, 0x0a, 0x0e, 0x4f, 0x70, + 0x65, 0x6e, 0x52, 0x63, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x11, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x72, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x12, 0x2a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, + 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x30, + 0x01, 0x42, 0x59, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72, 0x63, 0x61, + 0x2e, 0x76, 0x33, 0x42, 0x09, 0x4f, 0x72, 0x63, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, + 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_service_orca_v3_orca_proto_rawDescOnce sync.Once + file_xds_service_orca_v3_orca_proto_rawDescData = file_xds_service_orca_v3_orca_proto_rawDesc +) + +func file_xds_service_orca_v3_orca_proto_rawDescGZIP() []byte { + file_xds_service_orca_v3_orca_proto_rawDescOnce.Do(func() { + file_xds_service_orca_v3_orca_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_service_orca_v3_orca_proto_rawDescData) + }) + return file_xds_service_orca_v3_orca_proto_rawDescData +} + +var file_xds_service_orca_v3_orca_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_service_orca_v3_orca_proto_goTypes = []interface{}{ + (*OrcaLoadReportRequest)(nil), // 0: xds.service.orca.v3.OrcaLoadReportRequest + (*duration.Duration)(nil), // 1: google.protobuf.Duration + (*v3.OrcaLoadReport)(nil), // 2: xds.data.orca.v3.OrcaLoadReport +} +var file_xds_service_orca_v3_orca_proto_depIdxs = []int32{ + 1, // 0: xds.service.orca.v3.OrcaLoadReportRequest.report_interval:type_name -> google.protobuf.Duration + 0, // 1: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:input_type -> xds.service.orca.v3.OrcaLoadReportRequest + 2, // 2: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:output_type -> xds.data.orca.v3.OrcaLoadReport + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_service_orca_v3_orca_proto_init() } +func file_xds_service_orca_v3_orca_proto_init() { + if File_xds_service_orca_v3_orca_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_service_orca_v3_orca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrcaLoadReportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_service_orca_v3_orca_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_xds_service_orca_v3_orca_proto_goTypes, + DependencyIndexes: file_xds_service_orca_v3_orca_proto_depIdxs, + MessageInfos: file_xds_service_orca_v3_orca_proto_msgTypes, + }.Build() + File_xds_service_orca_v3_orca_proto = out.File + file_xds_service_orca_v3_orca_proto_rawDesc = nil + file_xds_service_orca_v3_orca_proto_goTypes = nil + file_xds_service_orca_v3_orca_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// OpenRcaServiceClient is the client API for OpenRcaService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type OpenRcaServiceClient interface { + StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) +} + +type openRcaServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient { + return &openRcaServiceClient{cc} +} + +func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) { + stream, err := c.cc.NewStream(ctx, &_OpenRcaService_serviceDesc.Streams[0], "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics", opts...) + if err != nil { + return nil, err + } + x := &openRcaServiceStreamCoreMetricsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenRcaService_StreamCoreMetricsClient interface { + Recv() (*v3.OrcaLoadReport, error) + grpc.ClientStream +} + +type openRcaServiceStreamCoreMetricsClient struct { + grpc.ClientStream +} + +func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) { + m := new(v3.OrcaLoadReport) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// OpenRcaServiceServer is the server API for OpenRcaService service. +type OpenRcaServiceServer interface { + StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error +} + +// UnimplementedOpenRcaServiceServer can be embedded to have forward compatible implementations. +type UnimplementedOpenRcaServiceServer struct { +} + +func (*UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented") +} + +func RegisterOpenRcaServiceServer(s *grpc.Server, srv OpenRcaServiceServer) { + s.RegisterService(&_OpenRcaService_serviceDesc, srv) +} + +func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(OrcaLoadReportRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream}) +} + +type OpenRcaService_StreamCoreMetricsServer interface { + Send(*v3.OrcaLoadReport) error + grpc.ServerStream +} + +type openRcaServiceStreamCoreMetricsServer struct { + grpc.ServerStream +} + +func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error { + return x.ServerStream.SendMsg(m) +} + +var _OpenRcaService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "xds.service.orca.v3.OpenRcaService", + HandlerType: (*OpenRcaServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamCoreMetrics", + Handler: _OpenRcaService_StreamCoreMetrics_Handler, + ServerStreams: true, + }, + }, + Metadata: "xds/service/orca/v3/orca.proto", +} diff --git a/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go new file mode 100644 index 0000000000..5c7c765847 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go @@ -0,0 +1,111 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} +) + +// Validate checks the field values on OrcaLoadReportRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, an error is returned. +func (m *OrcaLoadReportRequest) Validate() error { + if m == nil { + return nil + } + + if v, ok := interface{}(m.GetReportInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + return nil +} + +// OrcaLoadReportRequestValidationError is the validation error returned by +// OrcaLoadReportRequest.Validate if the designated constraints aren't met. +type OrcaLoadReportRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrcaLoadReportRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrcaLoadReportRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrcaLoadReportRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrcaLoadReportRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrcaLoadReportRequestValidationError) ErrorName() string { + return "OrcaLoadReportRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e OrcaLoadReportRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrcaLoadReportRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrcaLoadReportRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrcaLoadReportRequestValidationError{} diff --git a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/LICENSE b/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/LICENSE deleted file mode 100644 index 9a46132086..0000000000 --- a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2015-2016 Marin Atanasov Nikolov -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer - in this position and unchanged. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/cassette/cassette.go b/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/cassette/cassette.go deleted file mode 100644 index cf9e340d30..0000000000 --- a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/cassette/cassette.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2015 Marin Atanasov Nikolov -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer -// in this position and unchanged. -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR -// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -// IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package cassette - -import ( - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "sync" - - "gopkg.in/yaml.v2" -) - -// Cassette format versions -const ( - cassetteFormatV1 = 1 -) - -var ( - // ErrInteractionNotFound indicates that a requested - // interaction was not found in the cassette file - ErrInteractionNotFound = errors.New("Requested interaction not found") -) - -// Request represents a client request as recorded in the -// cassette file -type Request struct { - // Body of request - Body string `yaml:"body"` - - // Form values - Form url.Values `yaml:"form"` - - // Request headers - Headers http.Header `yaml:"headers"` - - // Request URL - URL string `yaml:"url"` - - // Request method - Method string `yaml:"method"` -} - -// Response represents a server response as recorded in the -// cassette file -type Response struct { - // Body of response - Body string `yaml:"body"` - - // Response headers - Headers http.Header `yaml:"headers"` - - // Response status message - Status string `yaml:"status"` - - // Response status code - Code int `yaml:"code"` - - // Response duration (something like "100ms" or "10s") - Duration string `yaml:"duration"` - - replayed bool -} - -// Interaction type contains a pair of request/response for a -// single HTTP interaction between a client and a server -type Interaction struct { - Request `yaml:"request"` - Response `yaml:"response"` -} - -// Matcher function returns true when the actual request matches -// a single HTTP interaction's request according to the function's -// own criteria. -type Matcher func(*http.Request, Request) bool - -// DefaultMatcher is used when a custom matcher is not defined -// and compares only the method and URL. -func DefaultMatcher(r *http.Request, i Request) bool { - return r.Method == i.Method && r.URL.String() == i.URL -} - -// Filter function allows modification of an interaction before saving. -type Filter func(*Interaction) error - -// Cassette type -type Cassette struct { - // Name of the cassette - Name string `yaml:"-"` - - // File name of the cassette as written on disk - File string `yaml:"-"` - - // Cassette format version - Version int `yaml:"version"` - - // Mutex to lock accessing Interactions. omitempty is set - // to prevent the mutex appearing in the recorded YAML. - Mu sync.RWMutex `yaml:"mu,omitempty"` - // Interactions between client and server - Interactions []*Interaction `yaml:"interactions"` - - // Matches actual request with interaction requests. - Matcher Matcher `yaml:"-"` - - // Filters interactions before being saved. - Filters []Filter `yaml:"-"` -} - -// New creates a new empty cassette -func New(name string) *Cassette { - c := &Cassette{ - Name: name, - File: fmt.Sprintf("%s.yaml", name), - Version: cassetteFormatV1, - Interactions: make([]*Interaction, 0), - Matcher: DefaultMatcher, - Filters: make([]Filter, 0), - } - - return c -} - -// Load reads a cassette file from disk -func Load(name string) (*Cassette, error) { - c := New(name) - data, err := ioutil.ReadFile(c.File) - if err != nil { - return nil, err - } - - err = yaml.Unmarshal(data, &c) - - return c, err -} - -// AddInteraction appends a new interaction to the cassette -func (c *Cassette) AddInteraction(i *Interaction) { - c.Mu.Lock() - c.Interactions = append(c.Interactions, i) - c.Mu.Unlock() -} - -// GetInteraction retrieves a recorded request/response interaction -func (c *Cassette) GetInteraction(r *http.Request) (*Interaction, error) { - c.Mu.Lock() - defer c.Mu.Unlock() - for _, i := range c.Interactions { - if !i.replayed && c.Matcher(r, i.Request) { - i.replayed = true - return i, nil - } - } - - return nil, ErrInteractionNotFound -} - -// Save writes the cassette data on disk for future re-use -func (c *Cassette) Save() error { - c.Mu.RLock() - defer c.Mu.RUnlock() - // Save cassette file only if there were any interactions made - if len(c.Interactions) == 0 { - return nil - } - - // Create directory for cassette if missing - cassetteDir := filepath.Dir(c.File) - if _, err := os.Stat(cassetteDir); os.IsNotExist(err) { - if err = os.MkdirAll(cassetteDir, 0755); err != nil { - return err - } - } - - // Marshal to YAML and save interactions - data, err := yaml.Marshal(c) - if err != nil { - return err - } - - f, err := os.Create(c.File) - if err != nil { - return err - } - - defer f.Close() - - // Honor the YAML structure specification - // http://www.yaml.org/spec/1.2/spec.html#id2760395 - _, err = f.Write([]byte("---\n")) - if err != nil { - return err - } - - _, err = f.Write(data) - if err != nil { - return err - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/go17_nobody.go b/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/go17_nobody.go deleted file mode 100644 index 465961abbf..0000000000 --- a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/go17_nobody.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2015-2016 Marin Atanasov Nikolov -// Copyright (c) 2016 David Jack -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer -// in this position and unchanged. -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR -// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -// IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// +build !go1.8 - -package recorder - -import ( - "io" -) - -// isNoBody returns true iff r is an http.NoBody. -// http.NoBody didn't exist before Go 1.7, so the version in this file -// always returns false. -func isNoBody(r io.ReadCloser) bool { return false } diff --git a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/go18_nobody.go b/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/go18_nobody.go deleted file mode 100644 index dac213af8f..0000000000 --- a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/go18_nobody.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2015-2016 Marin Atanasov Nikolov -// Copyright (c) 2016 David Jack -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer -// in this position and unchanged. -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR -// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -// IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// +build go1.8 - -package recorder - -import ( - "io" - "net/http" -) - -// isNoBody returns true iff r is an http.NoBody. -func isNoBody(r io.ReadCloser) bool { return r == http.NoBody } diff --git a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/recorder.go b/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/recorder.go deleted file mode 100644 index 2b88919526..0000000000 --- a/terraform/providers/google/vendor/github.com/dnaeon/go-vcr/recorder/recorder.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright (c) 2015-2016 Marin Atanasov Nikolov -// Copyright (c) 2016 David Jack -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer -// in this position and unchanged. -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR -// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -// IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package recorder - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "os" - "strconv" - "time" - - "github.com/dnaeon/go-vcr/cassette" -) - -// Mode represents recording/playback mode -type Mode int - -// Recorder states -const ( - ModeRecording Mode = iota - ModeReplaying - ModeDisabled -) - -// Recorder represents a type used to record and replay -// client and server interactions -type Recorder struct { - // Operating mode of the recorder - mode Mode - - // Cassette used by the recorder - cassette *cassette.Cassette - - // realTransport is the underlying http.RoundTripper to make real requests - realTransport http.RoundTripper -} - -// SetTransport can be used to configure the behavior of the 'real' client used in record-mode -func (r *Recorder) SetTransport(t http.RoundTripper) { - r.realTransport = t -} - -// Proxies client requests to their original destination -func requestHandler(r *http.Request, c *cassette.Cassette, mode Mode, realTransport http.RoundTripper) (*cassette.Interaction, error) { - // Return interaction from cassette if in replay mode - if mode == ModeReplaying { - if err := r.Context().Err(); err != nil { - return nil, err - } - return c.GetInteraction(r) - } - - // Copy the original request, so we can read the form values - reqBytes, err := httputil.DumpRequestOut(r, true) - if err != nil { - return nil, err - } - - reqBuffer := bytes.NewBuffer(reqBytes) - copiedReq, err := http.ReadRequest(bufio.NewReader(reqBuffer)) - if err != nil { - return nil, err - } - - err = copiedReq.ParseForm() - if err != nil { - return nil, err - } - - reqBody := &bytes.Buffer{} - if r.Body != nil && !isNoBody(r.Body) { - // Record the request body so we can add it to the cassette - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, reqBody)) - } - - // Perform client request to it's original - // destination and record interactions - resp, err := realTransport.RoundTrip(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - // Add interaction to cassette - interaction := &cassette.Interaction{ - Request: cassette.Request{ - Body: reqBody.String(), - Form: copiedReq.PostForm, - Headers: r.Header, - URL: r.URL.String(), - Method: r.Method, - }, - Response: cassette.Response{ - Body: string(respBody), - Headers: resp.Header, - Status: resp.Status, - Code: resp.StatusCode, - }, - } - for _, filter := range c.Filters { - err = filter(interaction) - if err != nil { - return nil, err - } - } - c.AddInteraction(interaction) - - return interaction, nil -} - -// New creates a new recorder -func New(cassetteName string) (*Recorder, error) { - // Default mode is "replay" if file exists - return NewAsMode(cassetteName, ModeReplaying, nil) -} - -// NewAsMode creates a new recorder in the specified mode -func NewAsMode(cassetteName string, mode Mode, realTransport http.RoundTripper) (*Recorder, error) { - var c *cassette.Cassette - cassetteFile := fmt.Sprintf("%s.yaml", cassetteName) - - if mode != ModeDisabled { - // Depending on whether the cassette file exists or not we - // either create a new empty cassette or load from file - if _, err := os.Stat(cassetteFile); os.IsNotExist(err) || mode == ModeRecording { - // Create new cassette and enter in recording mode - c = cassette.New(cassetteName) - mode = ModeRecording - } else { - // Load cassette from file and enter replay mode - c, err = cassette.Load(cassetteName) - if err != nil { - return nil, err - } - mode = ModeReplaying - } - } - - if realTransport == nil { - realTransport = http.DefaultTransport - } - - r := &Recorder{ - mode: mode, - cassette: c, - realTransport: realTransport, - } - - return r, nil -} - -// Stop is used to stop the recorder and save any recorded interactions -func (r *Recorder) Stop() error { - if r.mode == ModeRecording { - if err := r.cassette.Save(); err != nil { - return err - } - } - - return nil -} - -// RoundTrip implements the http.RoundTripper interface -func (r *Recorder) RoundTrip(req *http.Request) (*http.Response, error) { - if r.mode == ModeDisabled { - return r.realTransport.RoundTrip(req) - } - // Pass cassette and mode to handler, so that interactions can be - // retrieved or recorded depending on the current recorder mode - interaction, err := requestHandler(req, r.cassette, r.mode, r.realTransport) - - if err != nil { - return nil, err - } - - select { - case <-req.Context().Done(): - return nil, req.Context().Err() - default: - buf := bytes.NewBuffer([]byte(interaction.Response.Body)) - // apply the duration defined in the interaction - if interaction.Response.Duration != "" { - d, err := time.ParseDuration(interaction.Duration) - if err != nil { - return nil, err - } - // block for the configured 'duration' to simulate the network latency and server processing time. - <-time.After(d) - } - - contentLength := int64(buf.Len()) - // For HTTP HEAD requests, the ContentLength should be set to the size - // of the body that would have been sent for a GET. - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 - if req.Method == "HEAD" { - if hdr := interaction.Response.Headers.Get("Content-Length"); hdr != "" { - cl, err := strconv.ParseInt(hdr, 10, 64) - if err == nil { - contentLength = cl - } - } - } - return &http.Response{ - Status: interaction.Response.Status, - StatusCode: interaction.Response.Code, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Request: req, - Header: interaction.Response.Headers, - Close: true, - ContentLength: contentLength, - Body: ioutil.NopCloser(buf), - }, nil - } -} - -// CancelRequest implements the github.com/coreos/etcd/client.CancelableTransport interface -func (r *Recorder) CancelRequest(req *http.Request) { - type cancelableTransport interface { - CancelRequest(req *http.Request) - } - if ct, ok := r.realTransport.(cancelableTransport); ok { - ct.CancelRequest(req) - } -} - -// SetMatcher sets a function to match requests against recorded HTTP interactions. -func (r *Recorder) SetMatcher(matcher cassette.Matcher) { - if r.cassette != nil { - r.cassette.Matcher = matcher - } -} - -// AddFilter appends a hook to modify a request before it is recorded. -// -// Filters are useful for filtering out sensitive parameters from the recorded data. -func (r *Recorder) AddFilter(filter cassette.Filter) { - if r.cassette != nil { - r.cassette.Filters = append(r.cassette.Filters, filter) - } -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go index 01d655cc2b..d5a8b601c9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/certs.proto package adminv3 @@ -22,7 +22,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to +// Proto representation of certificate details. Admin endpoint uses this wrapper for ``/certs`` to // display certificate information. See :ref:`/certs ` for more // information. type Certificates struct { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go index e8c9575f02..4294f96506 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go @@ -586,17 +586,45 @@ func (m *SubjectAlternateName) validate(all bool) error { var errors []error - switch m.Name.(type) { - + switch v := m.Name.(type) { case *SubjectAlternateName_Dns: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for Dns - case *SubjectAlternateName_Uri: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for Uri - case *SubjectAlternateName_IpAddress: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for IpAddress - + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go index 2d0d0204ce..c3277cdd31 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/clusters.proto package adminv3 @@ -24,7 +24,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Admin endpoint uses this wrapper for `/clusters` to display cluster status information. +// Admin endpoint uses this wrapper for ``/clusters`` to display cluster status information. // See :ref:`/clusters ` for more information. type Clusters struct { state protoimpl.MessageState @@ -75,7 +75,7 @@ func (x *Clusters) GetClusterStatuses() []*ClusterStatus { } // Details an individual cluster's current status. -// [#next-free-field: 8] +// [#next-free-field: 9] type ClusterStatus struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -88,10 +88,10 @@ type ClusterStatus struct { // The success rate threshold used in the last interval. // If // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used to calculate the threshold. + // is ``false``, all errors: externally and locally generated were used to calculate the threshold. // If // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used to calculate the threshold. + // is ``true``, only externally generated errors were used to calculate the threshold. // The threshold is used to eject hosts based on their success rate. See // :ref:`Cluster outlier detection ` documentation for details. // @@ -109,7 +109,7 @@ type ClusterStatus struct { // taken into account and externally originated errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. The threshold is used to eject hosts based on their success rate. + // is ``true``. The threshold is used to eject hosts based on their success rate. // See :ref:`Cluster outlier detection ` documentation for // details. // @@ -125,6 +125,8 @@ type ClusterStatus struct { CircuitBreakers *v31.CircuitBreakers `protobuf:"bytes,6,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"` // Observability name of the cluster. ObservabilityName string `protobuf:"bytes,7,opt,name=observability_name,json=observabilityName,proto3" json:"observability_name,omitempty"` + // The :ref:`EDS service name ` if the cluster is an EDS cluster. + EdsServiceName string `protobuf:"bytes,8,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` } func (x *ClusterStatus) Reset() { @@ -208,6 +210,13 @@ func (x *ClusterStatus) GetObservabilityName() string { return "" } +func (x *ClusterStatus) GetEdsServiceName() string { + if x != nil { + return x.EdsServiceName + } + return "" +} + // Current state of a particular host. // [#next-free-field: 10] type HostStatus struct { @@ -224,10 +233,10 @@ type HostStatus struct { // Request success rate for this host over the last calculated interval. // If // :ref:`outlier_detection.split_external_local_origin_errors` - // is *false*, all errors: externally and locally generated were used in success rate + // is ``false``, all errors: externally and locally generated were used in success rate // calculation. If // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*, only externally generated errors were used in success rate calculation. + // is ``true``, only externally generated errors were used in success rate calculation. // See :ref:`Cluster outlier detection ` documentation for // details. // @@ -246,7 +255,7 @@ type HostStatus struct { // errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` - // is *true*. + // is ``true``. // See :ref:`Cluster outlier detection ` documentation for // details. // @@ -501,7 +510,7 @@ var file_envoy_admin_v3_clusters_proto_rawDesc = []byte{ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x73, 0x22, 0x8c, 0x04, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x74, 0x65, 0x72, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, @@ -531,83 +540,86 @@ var file_envoy_admin_v3_clusters_proto_rawDesc = []byte{ 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, - 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0x81, 0x04, 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x69, 0x6d, 0x70, - 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, - 0x45, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, - 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x12, 0x51, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x16, 0x6c, 0x6f, - 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x52, 0x61, 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x93, 0x04, 0x0a, 0x10, 0x48, 0x6f, 0x73, 0x74, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x1a, - 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, - 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x1c, 0x66, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x19, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x44, - 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x36, 0x0a, 0x17, - 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x70, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, - 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, - 0x12, 0x42, 0x0a, 0x1e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, - 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x66, 0x61, - 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x64, 0x56, 0x69, 0x61, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x48, 0x63, - 0x46, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, - 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x12, 0x4e, 0x0a, 0x11, 0x65, 0x64, 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x0f, 0x65, 0x64, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, - 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x76, 0x0a, - 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0xba, 0x80, 0xc8, - 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x81, 0x04, + 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x39, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0b, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x51, 0x0a, 0x19, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x3a, + 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x93, 0x04, 0x0a, 0x10, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x75, + 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x1c, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x66, 0x61, 0x69, + 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x2a, + 0x0a, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x68, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, 0x12, 0x42, 0x0a, 0x1e, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1a, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x49, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x48, 0x63, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x2a, + 0x0a, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x48, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x65, 0x64, + 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x65, 0x64, 0x73, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, + 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x76, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go index 3c87ae291e..a147a9b757 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go @@ -321,6 +321,8 @@ func (m *ClusterStatus) validate(all bool) error { // no validation rules for ObservabilityName + // no validation rules for EdsServiceName + if len(errors) > 0 { return ClusterStatusMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go index 5dc76d3e9d..68ca40f8bd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/config_dump.proto package adminv3 @@ -9,7 +9,7 @@ package adminv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -24,74 +24,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Resource status from the view of a xDS client, which tells the synchronization -// status between the xDS client and the xDS server. -type ClientResourceStatus int32 - -const ( - // Resource status is not available/unknown. - ClientResourceStatus_UNKNOWN ClientResourceStatus = 0 - // Client requested this resource but hasn't received any update from management - // server. The client will not fail requests, but will queue them until update - // arrives or the client times out waiting for the resource. - ClientResourceStatus_REQUESTED ClientResourceStatus = 1 - // This resource has been requested by the client but has either not been - // delivered by the server or was previously delivered by the server and then - // subsequently removed from resources provided by the server. For more - // information, please refer to the :ref:`"Knowing When a Requested Resource - // Does Not Exist" ` section. - ClientResourceStatus_DOES_NOT_EXIST ClientResourceStatus = 2 - // Client received this resource and replied with ACK. - ClientResourceStatus_ACKED ClientResourceStatus = 3 - // Client received this resource and replied with NACK. - ClientResourceStatus_NACKED ClientResourceStatus = 4 -) - -// Enum value maps for ClientResourceStatus. -var ( - ClientResourceStatus_name = map[int32]string{ - 0: "UNKNOWN", - 1: "REQUESTED", - 2: "DOES_NOT_EXIST", - 3: "ACKED", - 4: "NACKED", - } - ClientResourceStatus_value = map[string]int32{ - "UNKNOWN": 0, - "REQUESTED": 1, - "DOES_NOT_EXIST": 2, - "ACKED": 3, - "NACKED": 4, - } -) - -func (x ClientResourceStatus) Enum() *ClientResourceStatus { - p := new(ClientResourceStatus) - *p = x - return p -} - -func (x ClientResourceStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ClientResourceStatus) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_admin_v3_config_dump_proto_enumTypes[0].Descriptor() -} - -func (ClientResourceStatus) Type() protoreflect.EnumType { - return &file_envoy_admin_v3_config_dump_proto_enumTypes[0] -} - -func (x ClientResourceStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ClientResourceStatus.Descriptor instead. -func (ClientResourceStatus) EnumDescriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{0} -} - // The :ref:`/config_dump ` admin endpoint uses this wrapper // message to maintain and serve arbitrary configuration information from any component in Envoy. type ConfigDump struct { @@ -105,22 +37,27 @@ type ConfigDump struct { // The following configurations are currently supported and will be dumped in the order given // below: // - // * *bootstrap*: :ref:`BootstrapConfigDump ` - // * *clusters*: :ref:`ClustersConfigDump ` - // * *endpoints*: :ref:`EndpointsConfigDump ` - // * *listeners*: :ref:`ListenersConfigDump ` - // * *scoped_routes*: :ref:`ScopedRoutesConfigDump ` - // * *routes*: :ref:`RoutesConfigDump ` - // * *secrets*: :ref:`SecretsConfigDump ` + // * ``bootstrap``: :ref:`BootstrapConfigDump ` + // * ``clusters``: :ref:`ClustersConfigDump ` + // * ``ecds_filter_http``: :ref:`EcdsConfigDump ` + // * ``ecds_filter_tcp_listener``: :ref:`EcdsConfigDump ` + // * ``endpoints``: :ref:`EndpointsConfigDump ` + // * ``listeners``: :ref:`ListenersConfigDump ` + // * ``scoped_routes``: :ref:`ScopedRoutesConfigDump ` + // * ``routes``: :ref:`RoutesConfigDump ` + // * ``secrets``: :ref:`SecretsConfigDump ` // - // EDS Configuration will only be dumped by using parameter `?include_eds` + // EDS Configuration will only be dumped by using parameter ``?include_eds`` + // + // Currently ECDS is supported in HTTP and listener filters. Note, ECDS configuration for + // either HTTP or listener filter will only be dumped if it is actually configured. // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, // :ref:`/config_dump?mask={} `, // or :ref:`/config_dump?resource={},mask={} // ` for more information. - Configs []*any.Any `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"` + Configs []*any1.Any `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"` } func (x *ConfigDump) Reset() { @@ -155,90 +92,13 @@ func (*ConfigDump) Descriptor() ([]byte, []int) { return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{0} } -func (x *ConfigDump) GetConfigs() []*any.Any { +func (x *ConfigDump) GetConfigs() []*any1.Any { if x != nil { return x.Configs } return nil } -type UpdateFailureState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // What the component configuration would have been if the update had succeeded. - // This field may not be populated by xDS clients due to storage overhead. - FailedConfiguration *any.Any `protobuf:"bytes,1,opt,name=failed_configuration,json=failedConfiguration,proto3" json:"failed_configuration,omitempty"` - // Time of the latest failed update attempt. - LastUpdateAttempt *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_update_attempt,json=lastUpdateAttempt,proto3" json:"last_update_attempt,omitempty"` - // Details about the last failed update attempt. - Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` - // This is the version of the rejected resource. - // [#not-implemented-hide:] - VersionInfo string `protobuf:"bytes,4,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` -} - -func (x *UpdateFailureState) Reset() { - *x = UpdateFailureState{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateFailureState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateFailureState) ProtoMessage() {} - -func (x *UpdateFailureState) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateFailureState.ProtoReflect.Descriptor instead. -func (*UpdateFailureState) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{1} -} - -func (x *UpdateFailureState) GetFailedConfiguration() *any.Any { - if x != nil { - return x.FailedConfiguration - } - return nil -} - -func (x *UpdateFailureState) GetLastUpdateAttempt() *timestamp.Timestamp { - if x != nil { - return x.LastUpdateAttempt - } - return nil -} - -func (x *UpdateFailureState) GetDetails() string { - if x != nil { - return x.Details - } - return "" -} - -func (x *UpdateFailureState) GetVersionInfo() string { - if x != nil { - return x.VersionInfo - } - return "" -} - // This message describes the bootstrap configuration that Envoy was started with. This includes // any CLI overrides that were merged. Bootstrap configuration information can be used to recreate // the static portions of an Envoy configuration by reusing the output as the bootstrap @@ -256,7 +116,7 @@ type BootstrapConfigDump struct { func (x *BootstrapConfigDump) Reset() { *x = BootstrapConfigDump{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2] + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -269,7 +129,7 @@ func (x *BootstrapConfigDump) String() string { func (*BootstrapConfigDump) ProtoMessage() {} func (x *BootstrapConfigDump) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2] + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -282,7 +142,7 @@ func (x *BootstrapConfigDump) ProtoReflect() protoreflect.Message { // Deprecated: Use BootstrapConfigDump.ProtoReflect.Descriptor instead. func (*BootstrapConfigDump) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2} + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{1} } func (x *BootstrapConfigDump) GetBootstrap() *v3.Bootstrap { @@ -299,41 +159,39 @@ func (x *BootstrapConfigDump) GetLastUpdated() *timestamp.Timestamp { return nil } -// Envoy's listener manager fills this message with all currently known listeners. Listener -// configuration information can be used to recreate an Envoy configuration by populating all -// listeners as static listeners or by returning them in a LDS response. -type ListenersConfigDump struct { +// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. +type SecretsConfigDump struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // This is the :ref:`version_info ` in the - // last processed LDS discovery response. If there are only static bootstrap listeners, this field - // will be "". - VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - // The statically loaded listener configs. - StaticListeners []*ListenersConfigDump_StaticListener `protobuf:"bytes,2,rep,name=static_listeners,json=staticListeners,proto3" json:"static_listeners,omitempty"` - // State for any warming, active, or draining listeners. - DynamicListeners []*ListenersConfigDump_DynamicListener `protobuf:"bytes,3,rep,name=dynamic_listeners,json=dynamicListeners,proto3" json:"dynamic_listeners,omitempty"` + // The statically loaded secrets. + StaticSecrets []*SecretsConfigDump_StaticSecret `protobuf:"bytes,1,rep,name=static_secrets,json=staticSecrets,proto3" json:"static_secrets,omitempty"` + // The dynamically loaded active secrets. These are secrets that are available to service + // clusters or listeners. + DynamicActiveSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,2,rep,name=dynamic_active_secrets,json=dynamicActiveSecrets,proto3" json:"dynamic_active_secrets,omitempty"` + // The dynamically loaded warming secrets. These are secrets that are currently undergoing + // warming in preparation to service clusters or listeners. + DynamicWarmingSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,3,rep,name=dynamic_warming_secrets,json=dynamicWarmingSecrets,proto3" json:"dynamic_warming_secrets,omitempty"` } -func (x *ListenersConfigDump) Reset() { - *x = ListenersConfigDump{} +func (x *SecretsConfigDump) Reset() { + *x = SecretsConfigDump{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3] + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ListenersConfigDump) String() string { +func (x *SecretsConfigDump) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListenersConfigDump) ProtoMessage() {} +func (*SecretsConfigDump) ProtoMessage() {} -func (x *ListenersConfigDump) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3] +func (x *SecretsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -344,73 +202,77 @@ func (x *ListenersConfigDump) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListenersConfigDump.ProtoReflect.Descriptor instead. -func (*ListenersConfigDump) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{3} +// Deprecated: Use SecretsConfigDump.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2} } -func (x *ListenersConfigDump) GetVersionInfo() string { +func (x *SecretsConfigDump) GetStaticSecrets() []*SecretsConfigDump_StaticSecret { if x != nil { - return x.VersionInfo + return x.StaticSecrets } - return "" + return nil } -func (x *ListenersConfigDump) GetStaticListeners() []*ListenersConfigDump_StaticListener { +func (x *SecretsConfigDump) GetDynamicActiveSecrets() []*SecretsConfigDump_DynamicSecret { if x != nil { - return x.StaticListeners + return x.DynamicActiveSecrets } return nil } -func (x *ListenersConfigDump) GetDynamicListeners() []*ListenersConfigDump_DynamicListener { +func (x *SecretsConfigDump) GetDynamicWarmingSecrets() []*SecretsConfigDump_DynamicSecret { if x != nil { - return x.DynamicListeners + return x.DynamicWarmingSecrets } return nil } -// Envoy's cluster manager fills this message with all currently known clusters. Cluster -// configuration information can be used to recreate an Envoy configuration by populating all -// clusters as static clusters or by returning them in a CDS response. -type ClustersConfigDump struct { +// DynamicSecret contains secret information fetched via SDS. +// [#next-free-field: 7] +type SecretsConfigDump_DynamicSecret struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // This is the :ref:`version_info ` in the - // last processed CDS discovery response. If there are only static bootstrap clusters, this field - // will be "". - VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - // The statically loaded cluster configs. - StaticClusters []*ClustersConfigDump_StaticCluster `protobuf:"bytes,2,rep,name=static_clusters,json=staticClusters,proto3" json:"static_clusters,omitempty"` - // The dynamically loaded active clusters. These are clusters that are available to service - // data plane traffic. - DynamicActiveClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,3,rep,name=dynamic_active_clusters,json=dynamicActiveClusters,proto3" json:"dynamic_active_clusters,omitempty"` - // The dynamically loaded warming clusters. These are clusters that are currently undergoing - // warming in preparation to service data plane traffic. Note that if attempting to recreate an - // Envoy configuration from a configuration dump, the warming clusters should generally be - // discarded. - DynamicWarmingClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,4,rep,name=dynamic_warming_clusters,json=dynamicWarmingClusters,proto3" json:"dynamic_warming_clusters,omitempty"` + // The name assigned to the secret. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This is the per-resource version information. + VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The timestamp when the secret was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + Secret *any1.Any `protobuf:"bytes,4,opt,name=secret,proto3" json:"secret,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The *error_state* field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` } -func (x *ClustersConfigDump) Reset() { - *x = ClustersConfigDump{} +func (x *SecretsConfigDump_DynamicSecret) Reset() { + *x = SecretsConfigDump_DynamicSecret{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4] + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ClustersConfigDump) String() string { +func (x *SecretsConfigDump_DynamicSecret) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ClustersConfigDump) ProtoMessage() {} +func (*SecretsConfigDump_DynamicSecret) ProtoMessage() {} -func (x *ClustersConfigDump) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4] +func (x *SecretsConfigDump_DynamicSecret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -421,72 +283,86 @@ func (x *ClustersConfigDump) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ClustersConfigDump.ProtoReflect.Descriptor instead. -func (*ClustersConfigDump) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{4} +// Deprecated: Use SecretsConfigDump_DynamicSecret.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump_DynamicSecret) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *SecretsConfigDump_DynamicSecret) GetName() string { + if x != nil { + return x.Name + } + return "" } -func (x *ClustersConfigDump) GetVersionInfo() string { +func (x *SecretsConfigDump_DynamicSecret) GetVersionInfo() string { if x != nil { return x.VersionInfo } return "" } -func (x *ClustersConfigDump) GetStaticClusters() []*ClustersConfigDump_StaticCluster { +func (x *SecretsConfigDump_DynamicSecret) GetLastUpdated() *timestamp.Timestamp { if x != nil { - return x.StaticClusters + return x.LastUpdated } return nil } -func (x *ClustersConfigDump) GetDynamicActiveClusters() []*ClustersConfigDump_DynamicCluster { +func (x *SecretsConfigDump_DynamicSecret) GetSecret() *any1.Any { if x != nil { - return x.DynamicActiveClusters + return x.Secret } return nil } -func (x *ClustersConfigDump) GetDynamicWarmingClusters() []*ClustersConfigDump_DynamicCluster { +func (x *SecretsConfigDump_DynamicSecret) GetErrorState() *UpdateFailureState { if x != nil { - return x.DynamicWarmingClusters + return x.ErrorState } return nil } -// Envoy's RDS implementation fills this message with all currently loaded routes, as described by -// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration -// or defined inline while configuring listeners are separated from those configured dynamically via RDS. -// Route configuration information can be used to recreate an Envoy configuration by populating all routes -// as static routes or by returning them in RDS responses. -type RoutesConfigDump struct { +func (x *SecretsConfigDump_DynamicSecret) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// StaticSecret specifies statically loaded secret in bootstrap. +type SecretsConfigDump_StaticSecret struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The statically loaded route configs. - StaticRouteConfigs []*RoutesConfigDump_StaticRouteConfig `protobuf:"bytes,2,rep,name=static_route_configs,json=staticRouteConfigs,proto3" json:"static_route_configs,omitempty"` - // The dynamically loaded route configs. - DynamicRouteConfigs []*RoutesConfigDump_DynamicRouteConfig `protobuf:"bytes,3,rep,name=dynamic_route_configs,json=dynamicRouteConfigs,proto3" json:"dynamic_route_configs,omitempty"` + // The name assigned to the secret. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The timestamp when the secret was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + Secret *any1.Any `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"` } -func (x *RoutesConfigDump) Reset() { - *x = RoutesConfigDump{} +func (x *SecretsConfigDump_StaticSecret) Reset() { + *x = SecretsConfigDump_StaticSecret{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[5] + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RoutesConfigDump) String() string { +func (x *SecretsConfigDump_StaticSecret) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RoutesConfigDump) ProtoMessage() {} +func (*SecretsConfigDump_StaticSecret) ProtoMessage() {} -func (x *RoutesConfigDump) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[5] +func (x *SecretsConfigDump_StaticSecret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -497,1784 +373,191 @@ func (x *RoutesConfigDump) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RoutesConfigDump.ProtoReflect.Descriptor instead. -func (*RoutesConfigDump) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{5} +// Deprecated: Use SecretsConfigDump_StaticSecret.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump_StaticSecret) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 1} } -func (x *RoutesConfigDump) GetStaticRouteConfigs() []*RoutesConfigDump_StaticRouteConfig { +func (x *SecretsConfigDump_StaticSecret) GetName() string { if x != nil { - return x.StaticRouteConfigs + return x.Name } - return nil + return "" } -func (x *RoutesConfigDump) GetDynamicRouteConfigs() []*RoutesConfigDump_DynamicRouteConfig { +func (x *SecretsConfigDump_StaticSecret) GetLastUpdated() *timestamp.Timestamp { if x != nil { - return x.DynamicRouteConfigs + return x.LastUpdated } return nil } -// Envoy's scoped RDS implementation fills this message with all currently loaded route -// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both -// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the -// dynamically obtained scopes via the SRDS API. -type ScopedRoutesConfigDump struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The statically loaded scoped route configs. - InlineScopedRouteConfigs []*ScopedRoutesConfigDump_InlineScopedRouteConfigs `protobuf:"bytes,1,rep,name=inline_scoped_route_configs,json=inlineScopedRouteConfigs,proto3" json:"inline_scoped_route_configs,omitempty"` - // The dynamically loaded scoped route configs. - DynamicScopedRouteConfigs []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs `protobuf:"bytes,2,rep,name=dynamic_scoped_route_configs,json=dynamicScopedRouteConfigs,proto3" json:"dynamic_scoped_route_configs,omitempty"` -} - -func (x *ScopedRoutesConfigDump) Reset() { - *x = ScopedRoutesConfigDump{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SecretsConfigDump_StaticSecret) GetSecret() *any1.Any { + if x != nil { + return x.Secret } + return nil } -func (x *ScopedRoutesConfigDump) String() string { - return protoimpl.X.MessageStringOf(x) +var File_envoy_admin_v3_config_dump_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_config_dump_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, + 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, + 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x42, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x12, 0x42, 0x0a, 0x09, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x22, 0xb7, 0x07, 0x0a, 0x11, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x12, 0x65, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x52, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x15, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x1a, 0xff, 0x02, 0x0a, 0x0d, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, + 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x1a, 0xca, 0x01, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x42, 0x78, + 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (*ScopedRoutesConfigDump) ProtoMessage() {} +var ( + file_envoy_admin_v3_config_dump_proto_rawDescOnce sync.Once + file_envoy_admin_v3_config_dump_proto_rawDescData = file_envoy_admin_v3_config_dump_proto_rawDesc +) -func (x *ScopedRoutesConfigDump) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func file_envoy_admin_v3_config_dump_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_config_dump_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_config_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_proto_rawDescData) + }) + return file_envoy_admin_v3_config_dump_proto_rawDescData } -// Deprecated: Use ScopedRoutesConfigDump.ProtoReflect.Descriptor instead. -func (*ScopedRoutesConfigDump) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{6} +var file_envoy_admin_v3_config_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_admin_v3_config_dump_proto_goTypes = []interface{}{ + (*ConfigDump)(nil), // 0: envoy.admin.v3.ConfigDump + (*BootstrapConfigDump)(nil), // 1: envoy.admin.v3.BootstrapConfigDump + (*SecretsConfigDump)(nil), // 2: envoy.admin.v3.SecretsConfigDump + (*SecretsConfigDump_DynamicSecret)(nil), // 3: envoy.admin.v3.SecretsConfigDump.DynamicSecret + (*SecretsConfigDump_StaticSecret)(nil), // 4: envoy.admin.v3.SecretsConfigDump.StaticSecret + (*any1.Any)(nil), // 5: google.protobuf.Any + (*v3.Bootstrap)(nil), // 6: envoy.config.bootstrap.v3.Bootstrap + (*timestamp.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*UpdateFailureState)(nil), // 8: envoy.admin.v3.UpdateFailureState + (ClientResourceStatus)(0), // 9: envoy.admin.v3.ClientResourceStatus } - -func (x *ScopedRoutesConfigDump) GetInlineScopedRouteConfigs() []*ScopedRoutesConfigDump_InlineScopedRouteConfigs { - if x != nil { - return x.InlineScopedRouteConfigs - } - return nil +var file_envoy_admin_v3_config_dump_proto_depIdxs = []int32{ + 5, // 0: envoy.admin.v3.ConfigDump.configs:type_name -> google.protobuf.Any + 6, // 1: envoy.admin.v3.BootstrapConfigDump.bootstrap:type_name -> envoy.config.bootstrap.v3.Bootstrap + 7, // 2: envoy.admin.v3.BootstrapConfigDump.last_updated:type_name -> google.protobuf.Timestamp + 4, // 3: envoy.admin.v3.SecretsConfigDump.static_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.StaticSecret + 3, // 4: envoy.admin.v3.SecretsConfigDump.dynamic_active_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret + 3, // 5: envoy.admin.v3.SecretsConfigDump.dynamic_warming_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret + 7, // 6: envoy.admin.v3.SecretsConfigDump.DynamicSecret.last_updated:type_name -> google.protobuf.Timestamp + 5, // 7: envoy.admin.v3.SecretsConfigDump.DynamicSecret.secret:type_name -> google.protobuf.Any + 8, // 8: envoy.admin.v3.SecretsConfigDump.DynamicSecret.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 9, // 9: envoy.admin.v3.SecretsConfigDump.DynamicSecret.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 7, // 10: envoy.admin.v3.SecretsConfigDump.StaticSecret.last_updated:type_name -> google.protobuf.Timestamp + 5, // 11: envoy.admin.v3.SecretsConfigDump.StaticSecret.secret:type_name -> google.protobuf.Any + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } -func (x *ScopedRoutesConfigDump) GetDynamicScopedRouteConfigs() []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs { - if x != nil { - return x.DynamicScopedRouteConfigs - } - return nil -} - -// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. -type SecretsConfigDump struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The statically loaded secrets. - StaticSecrets []*SecretsConfigDump_StaticSecret `protobuf:"bytes,1,rep,name=static_secrets,json=staticSecrets,proto3" json:"static_secrets,omitempty"` - // The dynamically loaded active secrets. These are secrets that are available to service - // clusters or listeners. - DynamicActiveSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,2,rep,name=dynamic_active_secrets,json=dynamicActiveSecrets,proto3" json:"dynamic_active_secrets,omitempty"` - // The dynamically loaded warming secrets. These are secrets that are currently undergoing - // warming in preparation to service clusters or listeners. - DynamicWarmingSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,3,rep,name=dynamic_warming_secrets,json=dynamicWarmingSecrets,proto3" json:"dynamic_warming_secrets,omitempty"` -} - -func (x *SecretsConfigDump) Reset() { - *x = SecretsConfigDump{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecretsConfigDump) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecretsConfigDump) ProtoMessage() {} - -func (x *SecretsConfigDump) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecretsConfigDump.ProtoReflect.Descriptor instead. -func (*SecretsConfigDump) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{7} -} - -func (x *SecretsConfigDump) GetStaticSecrets() []*SecretsConfigDump_StaticSecret { - if x != nil { - return x.StaticSecrets - } - return nil -} - -func (x *SecretsConfigDump) GetDynamicActiveSecrets() []*SecretsConfigDump_DynamicSecret { - if x != nil { - return x.DynamicActiveSecrets - } - return nil -} - -func (x *SecretsConfigDump) GetDynamicWarmingSecrets() []*SecretsConfigDump_DynamicSecret { - if x != nil { - return x.DynamicWarmingSecrets - } - return nil -} - -// Envoy's admin fill this message with all currently known endpoints. Endpoint -// configuration information can be used to recreate an Envoy configuration by populating all -// endpoints as static endpoints or by returning them in an EDS response. -type EndpointsConfigDump struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The statically loaded endpoint configs. - StaticEndpointConfigs []*EndpointsConfigDump_StaticEndpointConfig `protobuf:"bytes,2,rep,name=static_endpoint_configs,json=staticEndpointConfigs,proto3" json:"static_endpoint_configs,omitempty"` - // The dynamically loaded endpoint configs. - DynamicEndpointConfigs []*EndpointsConfigDump_DynamicEndpointConfig `protobuf:"bytes,3,rep,name=dynamic_endpoint_configs,json=dynamicEndpointConfigs,proto3" json:"dynamic_endpoint_configs,omitempty"` -} - -func (x *EndpointsConfigDump) Reset() { - *x = EndpointsConfigDump{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EndpointsConfigDump) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EndpointsConfigDump) ProtoMessage() {} - -func (x *EndpointsConfigDump) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EndpointsConfigDump.ProtoReflect.Descriptor instead. -func (*EndpointsConfigDump) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{8} -} - -func (x *EndpointsConfigDump) GetStaticEndpointConfigs() []*EndpointsConfigDump_StaticEndpointConfig { - if x != nil { - return x.StaticEndpointConfigs - } - return nil -} - -func (x *EndpointsConfigDump) GetDynamicEndpointConfigs() []*EndpointsConfigDump_DynamicEndpointConfig { - if x != nil { - return x.DynamicEndpointConfigs - } - return nil -} - -// Describes a statically loaded listener. -type ListenersConfigDump_StaticListener struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The listener config. - Listener *any.Any `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"` - // The timestamp when the Listener was last successfully updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` -} - -func (x *ListenersConfigDump_StaticListener) Reset() { - *x = ListenersConfigDump_StaticListener{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListenersConfigDump_StaticListener) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListenersConfigDump_StaticListener) ProtoMessage() {} - -func (x *ListenersConfigDump_StaticListener) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListenersConfigDump_StaticListener.ProtoReflect.Descriptor instead. -func (*ListenersConfigDump_StaticListener) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *ListenersConfigDump_StaticListener) GetListener() *any.Any { - if x != nil { - return x.Listener - } - return nil -} - -func (x *ListenersConfigDump_StaticListener) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -type ListenersConfigDump_DynamicListenerState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the listener was loaded. In the future, discrete per-listener versions may be supported - // by the API. - VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - // The listener config. - Listener *any.Any `protobuf:"bytes,2,opt,name=listener,proto3" json:"listener,omitempty"` - // The timestamp when the Listener was last successfully updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` -} - -func (x *ListenersConfigDump_DynamicListenerState) Reset() { - *x = ListenersConfigDump_DynamicListenerState{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListenersConfigDump_DynamicListenerState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListenersConfigDump_DynamicListenerState) ProtoMessage() {} - -func (x *ListenersConfigDump_DynamicListenerState) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListenersConfigDump_DynamicListenerState.ProtoReflect.Descriptor instead. -func (*ListenersConfigDump_DynamicListenerState) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{3, 1} -} - -func (x *ListenersConfigDump_DynamicListenerState) GetVersionInfo() string { - if x != nil { - return x.VersionInfo - } - return "" -} - -func (x *ListenersConfigDump_DynamicListenerState) GetListener() *any.Any { - if x != nil { - return x.Listener - } - return nil -} - -func (x *ListenersConfigDump_DynamicListenerState) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -// Describes a dynamically loaded listener via the LDS API. -// [#next-free-field: 7] -type ListenersConfigDump_DynamicListener struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name or unique id of this listener, pulled from the DynamicListenerState config. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The listener state for any active listener by this name. - // These are listeners that are available to service data plane traffic. - ActiveState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,2,opt,name=active_state,json=activeState,proto3" json:"active_state,omitempty"` - // The listener state for any warming listener by this name. - // These are listeners that are currently undergoing warming in preparation to service data - // plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the warming listeners should generally be discarded. - WarmingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,3,opt,name=warming_state,json=warmingState,proto3" json:"warming_state,omitempty"` - // The listener state for any draining listener by this name. - // These are listeners that are currently undergoing draining in preparation to stop servicing - // data plane traffic. Note that if attempting to recreate an Envoy configuration from a - // configuration dump, the draining listeners should generally be discarded. - DrainingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,4,opt,name=draining_state,json=drainingState,proto3" json:"draining_state,omitempty"` - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` - // The client status of this resource. - // [#not-implemented-hide:] - ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` -} - -func (x *ListenersConfigDump_DynamicListener) Reset() { - *x = ListenersConfigDump_DynamicListener{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListenersConfigDump_DynamicListener) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListenersConfigDump_DynamicListener) ProtoMessage() {} - -func (x *ListenersConfigDump_DynamicListener) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListenersConfigDump_DynamicListener.ProtoReflect.Descriptor instead. -func (*ListenersConfigDump_DynamicListener) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{3, 2} -} - -func (x *ListenersConfigDump_DynamicListener) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ListenersConfigDump_DynamicListener) GetActiveState() *ListenersConfigDump_DynamicListenerState { - if x != nil { - return x.ActiveState - } - return nil -} - -func (x *ListenersConfigDump_DynamicListener) GetWarmingState() *ListenersConfigDump_DynamicListenerState { - if x != nil { - return x.WarmingState - } - return nil -} - -func (x *ListenersConfigDump_DynamicListener) GetDrainingState() *ListenersConfigDump_DynamicListenerState { - if x != nil { - return x.DrainingState - } - return nil -} - -func (x *ListenersConfigDump_DynamicListener) GetErrorState() *UpdateFailureState { - if x != nil { - return x.ErrorState - } - return nil -} - -func (x *ListenersConfigDump_DynamicListener) GetClientStatus() ClientResourceStatus { - if x != nil { - return x.ClientStatus - } - return ClientResourceStatus_UNKNOWN -} - -// Describes a statically loaded cluster. -type ClustersConfigDump_StaticCluster struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The cluster config. - Cluster *any.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - // The timestamp when the Cluster was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` -} - -func (x *ClustersConfigDump_StaticCluster) Reset() { - *x = ClustersConfigDump_StaticCluster{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClustersConfigDump_StaticCluster) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClustersConfigDump_StaticCluster) ProtoMessage() {} - -func (x *ClustersConfigDump_StaticCluster) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClustersConfigDump_StaticCluster.ProtoReflect.Descriptor instead. -func (*ClustersConfigDump_StaticCluster) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *ClustersConfigDump_StaticCluster) GetCluster() *any.Any { - if x != nil { - return x.Cluster - } - return nil -} - -func (x *ClustersConfigDump_StaticCluster) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -// Describes a dynamically loaded cluster via the CDS API. -// [#next-free-field: 6] -type ClustersConfigDump_DynamicCluster struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time - // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by - // the API. - VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - // The cluster config. - Cluster *any.Any `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` - // The timestamp when the Cluster was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` - // The client status of this resource. - // [#not-implemented-hide:] - ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` -} - -func (x *ClustersConfigDump_DynamicCluster) Reset() { - *x = ClustersConfigDump_DynamicCluster{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClustersConfigDump_DynamicCluster) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClustersConfigDump_DynamicCluster) ProtoMessage() {} - -func (x *ClustersConfigDump_DynamicCluster) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClustersConfigDump_DynamicCluster.ProtoReflect.Descriptor instead. -func (*ClustersConfigDump_DynamicCluster) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{4, 1} -} - -func (x *ClustersConfigDump_DynamicCluster) GetVersionInfo() string { - if x != nil { - return x.VersionInfo - } - return "" -} - -func (x *ClustersConfigDump_DynamicCluster) GetCluster() *any.Any { - if x != nil { - return x.Cluster - } - return nil -} - -func (x *ClustersConfigDump_DynamicCluster) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -func (x *ClustersConfigDump_DynamicCluster) GetErrorState() *UpdateFailureState { - if x != nil { - return x.ErrorState - } - return nil -} - -func (x *ClustersConfigDump_DynamicCluster) GetClientStatus() ClientResourceStatus { - if x != nil { - return x.ClientStatus - } - return ClientResourceStatus_UNKNOWN -} - -type RoutesConfigDump_StaticRouteConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The route config. - RouteConfig *any.Any `protobuf:"bytes,1,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` - // The timestamp when the Route was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` -} - -func (x *RoutesConfigDump_StaticRouteConfig) Reset() { - *x = RoutesConfigDump_StaticRouteConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RoutesConfigDump_StaticRouteConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RoutesConfigDump_StaticRouteConfig) ProtoMessage() {} - -func (x *RoutesConfigDump_StaticRouteConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RoutesConfigDump_StaticRouteConfig.ProtoReflect.Descriptor instead. -func (*RoutesConfigDump_StaticRouteConfig) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *RoutesConfigDump_StaticRouteConfig) GetRouteConfig() *any.Any { - if x != nil { - return x.RouteConfig - } - return nil -} - -func (x *RoutesConfigDump_StaticRouteConfig) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -// [#next-free-field: 6] -type RoutesConfigDump_DynamicRouteConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the route configuration was loaded. - VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - // The route config. - RouteConfig *any.Any `protobuf:"bytes,2,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` - // The timestamp when the Route was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` - // The client status of this resource. - // [#not-implemented-hide:] - ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` -} - -func (x *RoutesConfigDump_DynamicRouteConfig) Reset() { - *x = RoutesConfigDump_DynamicRouteConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RoutesConfigDump_DynamicRouteConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RoutesConfigDump_DynamicRouteConfig) ProtoMessage() {} - -func (x *RoutesConfigDump_DynamicRouteConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RoutesConfigDump_DynamicRouteConfig.ProtoReflect.Descriptor instead. -func (*RoutesConfigDump_DynamicRouteConfig) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{5, 1} -} - -func (x *RoutesConfigDump_DynamicRouteConfig) GetVersionInfo() string { - if x != nil { - return x.VersionInfo - } - return "" -} - -func (x *RoutesConfigDump_DynamicRouteConfig) GetRouteConfig() *any.Any { - if x != nil { - return x.RouteConfig - } - return nil -} - -func (x *RoutesConfigDump_DynamicRouteConfig) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -func (x *RoutesConfigDump_DynamicRouteConfig) GetErrorState() *UpdateFailureState { - if x != nil { - return x.ErrorState - } - return nil -} - -func (x *RoutesConfigDump_DynamicRouteConfig) GetClientStatus() ClientResourceStatus { - if x != nil { - return x.ClientStatus - } - return ClientResourceStatus_UNKNOWN -} - -type ScopedRoutesConfigDump_InlineScopedRouteConfigs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name assigned to the scoped route configurations. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The scoped route configurations. - ScopedRouteConfigs []*any.Any `protobuf:"bytes,2,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` - // The timestamp when the scoped route config set was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` -} - -func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Reset() { - *x = ScopedRoutesConfigDump_InlineScopedRouteConfigs{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoMessage() {} - -func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ScopedRoutesConfigDump_InlineScopedRouteConfigs.ProtoReflect.Descriptor instead. -func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetScopedRouteConfigs() []*any.Any { - if x != nil { - return x.ScopedRouteConfigs - } - return nil -} - -func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -// [#next-free-field: 7] -type ScopedRoutesConfigDump_DynamicScopedRouteConfigs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name assigned to the scoped route configurations. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the scoped routes configuration was loaded. - VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - // The scoped route configurations. - ScopedRouteConfigs []*any.Any `protobuf:"bytes,3,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` - // The timestamp when the scoped route config set was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,4,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` - // The client status of this resource. - // [#not-implemented-hide:] - ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` -} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Reset() { - *x = ScopedRoutesConfigDump_DynamicScopedRouteConfigs{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoMessage() {} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ProtoReflect.Descriptor instead. -func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{6, 1} -} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetVersionInfo() string { - if x != nil { - return x.VersionInfo - } - return "" -} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetScopedRouteConfigs() []*any.Any { - if x != nil { - return x.ScopedRouteConfigs - } - return nil -} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetErrorState() *UpdateFailureState { - if x != nil { - return x.ErrorState - } - return nil -} - -func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetClientStatus() ClientResourceStatus { - if x != nil { - return x.ClientStatus - } - return ClientResourceStatus_UNKNOWN -} - -// DynamicSecret contains secret information fetched via SDS. -// [#next-free-field: 7] -type SecretsConfigDump_DynamicSecret struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name assigned to the secret. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // This is the per-resource version information. - VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - // The timestamp when the secret was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - Secret *any.Any `protobuf:"bytes,4,opt,name=secret,proto3" json:"secret,omitempty"` - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` - // The client status of this resource. - // [#not-implemented-hide:] - ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` -} - -func (x *SecretsConfigDump_DynamicSecret) Reset() { - *x = SecretsConfigDump_DynamicSecret{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecretsConfigDump_DynamicSecret) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecretsConfigDump_DynamicSecret) ProtoMessage() {} - -func (x *SecretsConfigDump_DynamicSecret) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecretsConfigDump_DynamicSecret.ProtoReflect.Descriptor instead. -func (*SecretsConfigDump_DynamicSecret) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{7, 0} -} - -func (x *SecretsConfigDump_DynamicSecret) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *SecretsConfigDump_DynamicSecret) GetVersionInfo() string { - if x != nil { - return x.VersionInfo - } - return "" -} - -func (x *SecretsConfigDump_DynamicSecret) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -func (x *SecretsConfigDump_DynamicSecret) GetSecret() *any.Any { - if x != nil { - return x.Secret - } - return nil -} - -func (x *SecretsConfigDump_DynamicSecret) GetErrorState() *UpdateFailureState { - if x != nil { - return x.ErrorState - } - return nil -} - -func (x *SecretsConfigDump_DynamicSecret) GetClientStatus() ClientResourceStatus { - if x != nil { - return x.ClientStatus - } - return ClientResourceStatus_UNKNOWN -} - -// StaticSecret specifies statically loaded secret in bootstrap. -type SecretsConfigDump_StaticSecret struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name assigned to the secret. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The timestamp when the secret was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` - // The actual secret information. - // Security sensitive information is redacted (replaced with "[redacted]") for - // private keys and passwords in TLS certificates. - Secret *any.Any `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"` -} - -func (x *SecretsConfigDump_StaticSecret) Reset() { - *x = SecretsConfigDump_StaticSecret{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecretsConfigDump_StaticSecret) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecretsConfigDump_StaticSecret) ProtoMessage() {} - -func (x *SecretsConfigDump_StaticSecret) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecretsConfigDump_StaticSecret.ProtoReflect.Descriptor instead. -func (*SecretsConfigDump_StaticSecret) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{7, 1} -} - -func (x *SecretsConfigDump_StaticSecret) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *SecretsConfigDump_StaticSecret) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -func (x *SecretsConfigDump_StaticSecret) GetSecret() *any.Any { - if x != nil { - return x.Secret - } - return nil -} - -type EndpointsConfigDump_StaticEndpointConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The endpoint config. - EndpointConfig *any.Any `protobuf:"bytes,1,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` -} - -func (x *EndpointsConfigDump_StaticEndpointConfig) Reset() { - *x = EndpointsConfigDump_StaticEndpointConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EndpointsConfigDump_StaticEndpointConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EndpointsConfigDump_StaticEndpointConfig) ProtoMessage() {} - -func (x *EndpointsConfigDump_StaticEndpointConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EndpointsConfigDump_StaticEndpointConfig.ProtoReflect.Descriptor instead. -func (*EndpointsConfigDump_StaticEndpointConfig) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{8, 0} -} - -func (x *EndpointsConfigDump_StaticEndpointConfig) GetEndpointConfig() *any.Any { - if x != nil { - return x.EndpointConfig - } - return nil -} - -func (x *EndpointsConfigDump_StaticEndpointConfig) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -// [#next-free-field: 6] -type EndpointsConfigDump_DynamicEndpointConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the - // :ref:`version_info ` field at the time that - // the endpoint configuration was loaded. - VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - // The endpoint config. - EndpointConfig *any.Any `protobuf:"bytes,2,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` - // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. - LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` - // Set if the last update failed, cleared after the next successful update. - // The *error_state* field contains the rejected version of this particular - // resource along with the reason and timestamp. For successfully updated or - // acknowledged resource, this field should be empty. - // [#not-implemented-hide:] - ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` - // The client status of this resource. - // [#not-implemented-hide:] - ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` -} - -func (x *EndpointsConfigDump_DynamicEndpointConfig) Reset() { - *x = EndpointsConfigDump_DynamicEndpointConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EndpointsConfigDump_DynamicEndpointConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EndpointsConfigDump_DynamicEndpointConfig) ProtoMessage() {} - -func (x *EndpointsConfigDump_DynamicEndpointConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EndpointsConfigDump_DynamicEndpointConfig.ProtoReflect.Descriptor instead. -func (*EndpointsConfigDump_DynamicEndpointConfig) Descriptor() ([]byte, []int) { - return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{8, 1} -} - -func (x *EndpointsConfigDump_DynamicEndpointConfig) GetVersionInfo() string { - if x != nil { - return x.VersionInfo - } - return "" -} - -func (x *EndpointsConfigDump_DynamicEndpointConfig) GetEndpointConfig() *any.Any { - if x != nil { - return x.EndpointConfig - } - return nil -} - -func (x *EndpointsConfigDump_DynamicEndpointConfig) GetLastUpdated() *timestamp.Timestamp { - if x != nil { - return x.LastUpdated - } - return nil -} - -func (x *EndpointsConfigDump_DynamicEndpointConfig) GetErrorState() *UpdateFailureState { - if x != nil { - return x.ErrorState - } - return nil -} - -func (x *EndpointsConfigDump_DynamicEndpointConfig) GetClientStatus() ClientResourceStatus { - if x != nil { - return x.ClientStatus - } - return ClientResourceStatus_UNKNOWN -} - -var File_envoy_admin_v3_config_dump_proto protoreflect.FileDescriptor - -var file_envoy_admin_v3_config_dump_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, - 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, - 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, - 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x0a, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, - 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, - 0x22, 0x95, 0x02, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x13, 0x66, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x4a, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, - 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, - 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x42, 0x6f, 0x6f, - 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, - 0x12, 0x42, 0x0a, 0x09, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, - 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x74, 0x73, - 0x74, 0x72, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x64, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, - 0x75, 0x6d, 0x70, 0x22, 0xf3, 0x09, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x5d, - 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x0f, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, - 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x64, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x1a, - 0xc0, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x12, 0x30, 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x64, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, - 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x1a, 0xef, 0x01, 0x0a, 0x14, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, - 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, - 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, - 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x1a, 0x92, 0x04, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x0c, - 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5d, 0x0a, 0x0d, 0x77, 0x61, 0x72, - 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x72, 0x6d, - 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x0e, 0x64, 0x72, 0x61, 0x69, - 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, - 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, - 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, - 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xca, 0x07, 0x0a, 0x12, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, - 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x59, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0e, - 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, - 0x0a, 0x17, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, - 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x15, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x6b, 0x0a, 0x18, 0x64, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x16, - 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x1a, 0xbb, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x69, - 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x1a, 0xf0, 0x02, 0x0a, 0x0e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, - 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, - 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, - 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xdd, 0x06, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x14, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x12, 0x67, 0x0a, 0x15, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, - 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x11, 0x53, - 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x37, 0x0a, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, - 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, - 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, - 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xff, 0x02, 0x0a, 0x12, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x37, 0x0a, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, - 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, - 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, - 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, - 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0x8c, 0x08, 0x0a, 0x16, 0x53, 0x63, 0x6f, 0x70, 0x65, - 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, - 0x70, 0x12, 0x7e, 0x0a, 0x1b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, - 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x18, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, - 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x12, 0x81, 0x01, 0x0a, 0x1c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x19, 0x64, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x81, 0x02, 0x0a, 0x18, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, - 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, - 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x4a, 0x9a, - 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, - 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xb6, 0x03, 0x0a, 0x19, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x46, - 0x0a, 0x14, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x12, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x4b, 0x9a, 0xc5, 0x88, 0x1e, 0x46, 0x0a, 0x44, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, - 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xb7, 0x07, 0x0a, 0x11, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x55, 0x0a, 0x0e, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x73, 0x12, 0x65, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x52, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x64, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x15, 0x64, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x73, 0x1a, 0xff, 0x02, 0x0a, 0x0d, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, - 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, - 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, - 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x1a, 0xca, 0x01, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, - 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, - 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, - 0xde, 0x05, 0x0a, 0x13, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x70, 0x0a, 0x17, 0x73, 0x74, 0x61, 0x74, 0x69, - 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x73, 0x0a, 0x18, 0x64, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x94, - 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xc8, 0x02, 0x0a, 0x15, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, - 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x2a, 0x5d, 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, - 0x45, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x4f, 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x54, - 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x43, 0x4b, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x04, 0x42, - 0x78, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, - 0x0f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, - 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_envoy_admin_v3_config_dump_proto_rawDescOnce sync.Once - file_envoy_admin_v3_config_dump_proto_rawDescData = file_envoy_admin_v3_config_dump_proto_rawDesc -) - -func file_envoy_admin_v3_config_dump_proto_rawDescGZIP() []byte { - file_envoy_admin_v3_config_dump_proto_rawDescOnce.Do(func() { - file_envoy_admin_v3_config_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_proto_rawDescData) - }) - return file_envoy_admin_v3_config_dump_proto_rawDescData -} - -var file_envoy_admin_v3_config_dump_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_admin_v3_config_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 22) -var file_envoy_admin_v3_config_dump_proto_goTypes = []interface{}{ - (ClientResourceStatus)(0), // 0: envoy.admin.v3.ClientResourceStatus - (*ConfigDump)(nil), // 1: envoy.admin.v3.ConfigDump - (*UpdateFailureState)(nil), // 2: envoy.admin.v3.UpdateFailureState - (*BootstrapConfigDump)(nil), // 3: envoy.admin.v3.BootstrapConfigDump - (*ListenersConfigDump)(nil), // 4: envoy.admin.v3.ListenersConfigDump - (*ClustersConfigDump)(nil), // 5: envoy.admin.v3.ClustersConfigDump - (*RoutesConfigDump)(nil), // 6: envoy.admin.v3.RoutesConfigDump - (*ScopedRoutesConfigDump)(nil), // 7: envoy.admin.v3.ScopedRoutesConfigDump - (*SecretsConfigDump)(nil), // 8: envoy.admin.v3.SecretsConfigDump - (*EndpointsConfigDump)(nil), // 9: envoy.admin.v3.EndpointsConfigDump - (*ListenersConfigDump_StaticListener)(nil), // 10: envoy.admin.v3.ListenersConfigDump.StaticListener - (*ListenersConfigDump_DynamicListenerState)(nil), // 11: envoy.admin.v3.ListenersConfigDump.DynamicListenerState - (*ListenersConfigDump_DynamicListener)(nil), // 12: envoy.admin.v3.ListenersConfigDump.DynamicListener - (*ClustersConfigDump_StaticCluster)(nil), // 13: envoy.admin.v3.ClustersConfigDump.StaticCluster - (*ClustersConfigDump_DynamicCluster)(nil), // 14: envoy.admin.v3.ClustersConfigDump.DynamicCluster - (*RoutesConfigDump_StaticRouteConfig)(nil), // 15: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig - (*RoutesConfigDump_DynamicRouteConfig)(nil), // 16: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig - (*ScopedRoutesConfigDump_InlineScopedRouteConfigs)(nil), // 17: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs - (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs)(nil), // 18: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs - (*SecretsConfigDump_DynamicSecret)(nil), // 19: envoy.admin.v3.SecretsConfigDump.DynamicSecret - (*SecretsConfigDump_StaticSecret)(nil), // 20: envoy.admin.v3.SecretsConfigDump.StaticSecret - (*EndpointsConfigDump_StaticEndpointConfig)(nil), // 21: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig - (*EndpointsConfigDump_DynamicEndpointConfig)(nil), // 22: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig - (*any.Any)(nil), // 23: google.protobuf.Any - (*timestamp.Timestamp)(nil), // 24: google.protobuf.Timestamp - (*v3.Bootstrap)(nil), // 25: envoy.config.bootstrap.v3.Bootstrap -} -var file_envoy_admin_v3_config_dump_proto_depIdxs = []int32{ - 23, // 0: envoy.admin.v3.ConfigDump.configs:type_name -> google.protobuf.Any - 23, // 1: envoy.admin.v3.UpdateFailureState.failed_configuration:type_name -> google.protobuf.Any - 24, // 2: envoy.admin.v3.UpdateFailureState.last_update_attempt:type_name -> google.protobuf.Timestamp - 25, // 3: envoy.admin.v3.BootstrapConfigDump.bootstrap:type_name -> envoy.config.bootstrap.v3.Bootstrap - 24, // 4: envoy.admin.v3.BootstrapConfigDump.last_updated:type_name -> google.protobuf.Timestamp - 10, // 5: envoy.admin.v3.ListenersConfigDump.static_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.StaticListener - 12, // 6: envoy.admin.v3.ListenersConfigDump.dynamic_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListener - 13, // 7: envoy.admin.v3.ClustersConfigDump.static_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.StaticCluster - 14, // 8: envoy.admin.v3.ClustersConfigDump.dynamic_active_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster - 14, // 9: envoy.admin.v3.ClustersConfigDump.dynamic_warming_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster - 15, // 10: envoy.admin.v3.RoutesConfigDump.static_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.StaticRouteConfig - 16, // 11: envoy.admin.v3.RoutesConfigDump.dynamic_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig - 17, // 12: envoy.admin.v3.ScopedRoutesConfigDump.inline_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs - 18, // 13: envoy.admin.v3.ScopedRoutesConfigDump.dynamic_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs - 20, // 14: envoy.admin.v3.SecretsConfigDump.static_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.StaticSecret - 19, // 15: envoy.admin.v3.SecretsConfigDump.dynamic_active_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret - 19, // 16: envoy.admin.v3.SecretsConfigDump.dynamic_warming_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret - 21, // 17: envoy.admin.v3.EndpointsConfigDump.static_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig - 22, // 18: envoy.admin.v3.EndpointsConfigDump.dynamic_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig - 23, // 19: envoy.admin.v3.ListenersConfigDump.StaticListener.listener:type_name -> google.protobuf.Any - 24, // 20: envoy.admin.v3.ListenersConfigDump.StaticListener.last_updated:type_name -> google.protobuf.Timestamp - 23, // 21: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.listener:type_name -> google.protobuf.Any - 24, // 22: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.last_updated:type_name -> google.protobuf.Timestamp - 11, // 23: envoy.admin.v3.ListenersConfigDump.DynamicListener.active_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState - 11, // 24: envoy.admin.v3.ListenersConfigDump.DynamicListener.warming_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState - 11, // 25: envoy.admin.v3.ListenersConfigDump.DynamicListener.draining_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState - 2, // 26: envoy.admin.v3.ListenersConfigDump.DynamicListener.error_state:type_name -> envoy.admin.v3.UpdateFailureState - 0, // 27: envoy.admin.v3.ListenersConfigDump.DynamicListener.client_status:type_name -> envoy.admin.v3.ClientResourceStatus - 23, // 28: envoy.admin.v3.ClustersConfigDump.StaticCluster.cluster:type_name -> google.protobuf.Any - 24, // 29: envoy.admin.v3.ClustersConfigDump.StaticCluster.last_updated:type_name -> google.protobuf.Timestamp - 23, // 30: envoy.admin.v3.ClustersConfigDump.DynamicCluster.cluster:type_name -> google.protobuf.Any - 24, // 31: envoy.admin.v3.ClustersConfigDump.DynamicCluster.last_updated:type_name -> google.protobuf.Timestamp - 2, // 32: envoy.admin.v3.ClustersConfigDump.DynamicCluster.error_state:type_name -> envoy.admin.v3.UpdateFailureState - 0, // 33: envoy.admin.v3.ClustersConfigDump.DynamicCluster.client_status:type_name -> envoy.admin.v3.ClientResourceStatus - 23, // 34: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.route_config:type_name -> google.protobuf.Any - 24, // 35: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.last_updated:type_name -> google.protobuf.Timestamp - 23, // 36: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.route_config:type_name -> google.protobuf.Any - 24, // 37: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.last_updated:type_name -> google.protobuf.Timestamp - 2, // 38: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState - 0, // 39: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus - 23, // 40: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any - 24, // 41: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp - 23, // 42: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any - 24, // 43: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp - 2, // 44: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.error_state:type_name -> envoy.admin.v3.UpdateFailureState - 0, // 45: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.client_status:type_name -> envoy.admin.v3.ClientResourceStatus - 24, // 46: envoy.admin.v3.SecretsConfigDump.DynamicSecret.last_updated:type_name -> google.protobuf.Timestamp - 23, // 47: envoy.admin.v3.SecretsConfigDump.DynamicSecret.secret:type_name -> google.protobuf.Any - 2, // 48: envoy.admin.v3.SecretsConfigDump.DynamicSecret.error_state:type_name -> envoy.admin.v3.UpdateFailureState - 0, // 49: envoy.admin.v3.SecretsConfigDump.DynamicSecret.client_status:type_name -> envoy.admin.v3.ClientResourceStatus - 24, // 50: envoy.admin.v3.SecretsConfigDump.StaticSecret.last_updated:type_name -> google.protobuf.Timestamp - 23, // 51: envoy.admin.v3.SecretsConfigDump.StaticSecret.secret:type_name -> google.protobuf.Any - 23, // 52: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.endpoint_config:type_name -> google.protobuf.Any - 24, // 53: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp - 23, // 54: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.endpoint_config:type_name -> google.protobuf.Any - 24, // 55: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp - 2, // 56: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState - 0, // 57: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus - 58, // [58:58] is the sub-list for method output_type - 58, // [58:58] is the sub-list for method input_type - 58, // [58:58] is the sub-list for extension type_name - 58, // [58:58] is the sub-list for extension extendee - 0, // [0:58] is the sub-list for field type_name -} - -func init() { file_envoy_admin_v3_config_dump_proto_init() } -func file_envoy_admin_v3_config_dump_proto_init() { - if File_envoy_admin_v3_config_dump_proto != nil { - return +func init() { file_envoy_admin_v3_config_dump_proto_init() } +func file_envoy_admin_v3_config_dump_proto_init() { + if File_envoy_admin_v3_config_dump_proto != nil { + return } + file_envoy_admin_v3_config_dump_shared_proto_init() if !protoimpl.UnsafeEnabled { file_envoy_admin_v3_config_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConfigDump); i { @@ -2289,18 +572,6 @@ func file_envoy_admin_v3_config_dump_proto_init() { } } file_envoy_admin_v3_config_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateFailureState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BootstrapConfigDump); i { case 0: return &v.state @@ -2312,55 +583,7 @@ func file_envoy_admin_v3_config_dump_proto_init() { return nil } } - file_envoy_admin_v3_config_dump_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListenersConfigDump); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClustersConfigDump); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RoutesConfigDump); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ScopedRoutesConfigDump); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_envoy_admin_v3_config_dump_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SecretsConfigDump); i { case 0: return &v.state @@ -2372,127 +595,7 @@ func file_envoy_admin_v3_config_dump_proto_init() { return nil } } - file_envoy_admin_v3_config_dump_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EndpointsConfigDump); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListenersConfigDump_StaticListener); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListenersConfigDump_DynamicListenerState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListenersConfigDump_DynamicListener); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClustersConfigDump_StaticCluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClustersConfigDump_DynamicCluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RoutesConfigDump_StaticRouteConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RoutesConfigDump_DynamicRouteConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ScopedRoutesConfigDump_InlineScopedRouteConfigs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ScopedRoutesConfigDump_DynamicScopedRouteConfigs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_envoy_admin_v3_config_dump_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SecretsConfigDump_DynamicSecret); i { case 0: return &v.state @@ -2504,7 +607,7 @@ func file_envoy_admin_v3_config_dump_proto_init() { return nil } } - file_envoy_admin_v3_config_dump_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_envoy_admin_v3_config_dump_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SecretsConfigDump_StaticSecret); i { case 0: return &v.state @@ -2516,44 +619,19 @@ func file_envoy_admin_v3_config_dump_proto_init() { return nil } } - file_envoy_admin_v3_config_dump_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EndpointsConfigDump_StaticEndpointConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_admin_v3_config_dump_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EndpointsConfigDump_DynamicEndpointConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_admin_v3_config_dump_proto_rawDesc, - NumEnums: 1, - NumMessages: 22, + NumEnums: 0, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, GoTypes: file_envoy_admin_v3_config_dump_proto_goTypes, DependencyIndexes: file_envoy_admin_v3_config_dump_proto_depIdxs, - EnumInfos: file_envoy_admin_v3_config_dump_proto_enumTypes, MessageInfos: file_envoy_admin_v3_config_dump_proto_msgTypes, }.Build() File_envoy_admin_v3_config_dump_proto = out.File diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go index 9e34b25a93..57d1a77f06 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go @@ -168,170 +168,6 @@ var _ interface { ErrorName() string } = ConfigDumpValidationError{} -// Validate checks the field values on UpdateFailureState with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *UpdateFailureState) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on UpdateFailureState with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// UpdateFailureStateMultiError, or nil if none found. -func (m *UpdateFailureState) ValidateAll() error { - return m.validate(true) -} - -func (m *UpdateFailureState) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetFailedConfiguration()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, UpdateFailureStateValidationError{ - field: "FailedConfiguration", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, UpdateFailureStateValidationError{ - field: "FailedConfiguration", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetFailedConfiguration()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return UpdateFailureStateValidationError{ - field: "FailedConfiguration", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetLastUpdateAttempt()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, UpdateFailureStateValidationError{ - field: "LastUpdateAttempt", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, UpdateFailureStateValidationError{ - field: "LastUpdateAttempt", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdateAttempt()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return UpdateFailureStateValidationError{ - field: "LastUpdateAttempt", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for Details - - // no validation rules for VersionInfo - - if len(errors) > 0 { - return UpdateFailureStateMultiError(errors) - } - - return nil -} - -// UpdateFailureStateMultiError is an error wrapping multiple validation errors -// returned by UpdateFailureState.ValidateAll() if the designated constraints -// aren't met. -type UpdateFailureStateMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m UpdateFailureStateMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m UpdateFailureStateMultiError) AllErrors() []error { return m } - -// UpdateFailureStateValidationError is the validation error returned by -// UpdateFailureState.Validate if the designated constraints aren't met. -type UpdateFailureStateValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e UpdateFailureStateValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e UpdateFailureStateValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e UpdateFailureStateValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e UpdateFailureStateValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e UpdateFailureStateValidationError) ErrorName() string { - return "UpdateFailureStateValidationError" -} - -// Error satisfies the builtin error interface -func (e UpdateFailureStateValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sUpdateFailureState.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = UpdateFailureStateValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = UpdateFailureStateValidationError{} - // Validate checks the field values on BootstrapConfigDump with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -492,47 +328,79 @@ var _ interface { ErrorName() string } = BootstrapConfigDumpValidationError{} -// Validate checks the field values on ListenersConfigDump with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *ListenersConfigDump) Validate() error { +// Validate checks the field values on SecretsConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SecretsConfigDump) Validate() error { return m.validate(false) } -// ValidateAll checks the field values on ListenersConfigDump with the rules +// ValidateAll checks the field values on SecretsConfigDump with the rules // defined in the proto definition for this message. If any rules are // violated, the result is a list of violation errors wrapped in -// ListenersConfigDumpMultiError, or nil if none found. -func (m *ListenersConfigDump) ValidateAll() error { +// SecretsConfigDumpMultiError, or nil if none found. +func (m *SecretsConfigDump) ValidateAll() error { return m.validate(true) } -func (m *ListenersConfigDump) validate(all bool) error { +func (m *SecretsConfigDump) validate(all bool) error { if m == nil { return nil } var errors []error - // no validation rules for VersionInfo + for idx, item := range m.GetStaticSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } - for idx, item := range m.GetStaticListeners() { + } + + for idx, item := range m.GetDynamicActiveSecrets() { _, _ = idx, item if all { switch v := interface{}(item).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDumpValidationError{ - field: fmt.Sprintf("StaticListeners[%v]", idx), + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), reason: "embedded message failed validation", cause: err, }) } case interface{ Validate() error }: if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDumpValidationError{ - field: fmt.Sprintf("StaticListeners[%v]", idx), + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), reason: "embedded message failed validation", cause: err, }) @@ -540,8 +408,8 @@ func (m *ListenersConfigDump) validate(all bool) error { } } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { - return ListenersConfigDumpValidationError{ - field: fmt.Sprintf("StaticListeners[%v]", idx), + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), reason: "embedded message failed validation", cause: err, } @@ -550,23 +418,23 @@ func (m *ListenersConfigDump) validate(all bool) error { } - for idx, item := range m.GetDynamicListeners() { + for idx, item := range m.GetDynamicWarmingSecrets() { _, _ = idx, item if all { switch v := interface{}(item).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicListeners[%v]", idx), + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), reason: "embedded message failed validation", cause: err, }) } case interface{ Validate() error }: if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicListeners[%v]", idx), + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), reason: "embedded message failed validation", cause: err, }) @@ -574,8 +442,8 @@ func (m *ListenersConfigDump) validate(all bool) error { } } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { - return ListenersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicListeners[%v]", idx), + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), reason: "embedded message failed validation", cause: err, } @@ -585,19 +453,19 @@ func (m *ListenersConfigDump) validate(all bool) error { } if len(errors) > 0 { - return ListenersConfigDumpMultiError(errors) + return SecretsConfigDumpMultiError(errors) } return nil } -// ListenersConfigDumpMultiError is an error wrapping multiple validation -// errors returned by ListenersConfigDump.ValidateAll() if the designated -// constraints aren't met. -type ListenersConfigDumpMultiError []error +// SecretsConfigDumpMultiError is an error wrapping multiple validation errors +// returned by SecretsConfigDump.ValidateAll() if the designated constraints +// aren't met. +type SecretsConfigDumpMultiError []error // Error returns a concatenation of all the error messages it wraps. -func (m ListenersConfigDumpMultiError) Error() string { +func (m SecretsConfigDumpMultiError) Error() string { var msgs []string for _, err := range m { msgs = append(msgs, err.Error()) @@ -606,11 +474,11 @@ func (m ListenersConfigDumpMultiError) Error() string { } // AllErrors returns a list of validation violation errors. -func (m ListenersConfigDumpMultiError) AllErrors() []error { return m } +func (m SecretsConfigDumpMultiError) AllErrors() []error { return m } -// ListenersConfigDumpValidationError is the validation error returned by -// ListenersConfigDump.Validate if the designated constraints aren't met. -type ListenersConfigDumpValidationError struct { +// SecretsConfigDumpValidationError is the validation error returned by +// SecretsConfigDump.Validate if the designated constraints aren't met. +type SecretsConfigDumpValidationError struct { field string reason string cause error @@ -618,24 +486,24 @@ type ListenersConfigDumpValidationError struct { } // Field function returns field value. -func (e ListenersConfigDumpValidationError) Field() string { return e.field } +func (e SecretsConfigDumpValidationError) Field() string { return e.field } // Reason function returns reason value. -func (e ListenersConfigDumpValidationError) Reason() string { return e.reason } +func (e SecretsConfigDumpValidationError) Reason() string { return e.reason } // Cause function returns cause value. -func (e ListenersConfigDumpValidationError) Cause() error { return e.cause } +func (e SecretsConfigDumpValidationError) Cause() error { return e.cause } // Key function returns key value. -func (e ListenersConfigDumpValidationError) Key() bool { return e.key } +func (e SecretsConfigDumpValidationError) Key() bool { return e.key } // ErrorName returns error name. -func (e ListenersConfigDumpValidationError) ErrorName() string { - return "ListenersConfigDumpValidationError" +func (e SecretsConfigDumpValidationError) ErrorName() string { + return "SecretsConfigDumpValidationError" } // Error satisfies the builtin error interface -func (e ListenersConfigDumpValidationError) Error() string { +func (e SecretsConfigDumpValidationError) Error() string { cause := "" if e.cause != nil { cause = fmt.Sprintf(" | caused by: %v", e.cause) @@ -647,14 +515,14 @@ func (e ListenersConfigDumpValidationError) Error() string { } return fmt.Sprintf( - "invalid %sListenersConfigDump.%s: %s%s", + "invalid %sSecretsConfigDump.%s: %s%s", key, e.field, e.reason, cause) } -var _ error = ListenersConfigDumpValidationError{} +var _ error = SecretsConfigDumpValidationError{} var _ interface { Field() string @@ -662,2830 +530,59 @@ var _ interface { Key() bool Cause() error ErrorName() string -} = ListenersConfigDumpValidationError{} +} = SecretsConfigDumpValidationError{} -// Validate checks the field values on ClustersConfigDump with the rules -// defined in the proto definition for this message. If any rules are +// Validate checks the field values on SecretsConfigDump_DynamicSecret with the +// rules defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. -func (m *ClustersConfigDump) Validate() error { +func (m *SecretsConfigDump_DynamicSecret) Validate() error { return m.validate(false) } -// ValidateAll checks the field values on ClustersConfigDump with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// ClustersConfigDumpMultiError, or nil if none found. -func (m *ClustersConfigDump) ValidateAll() error { +// ValidateAll checks the field values on SecretsConfigDump_DynamicSecret with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SecretsConfigDump_DynamicSecretMultiError, or nil if none found. +func (m *SecretsConfigDump_DynamicSecret) ValidateAll() error { return m.validate(true) } -func (m *ClustersConfigDump) validate(all bool) error { +func (m *SecretsConfigDump_DynamicSecret) validate(all bool) error { if m == nil { return nil } var errors []error - // no validation rules for VersionInfo + // no validation rules for Name - for idx, item := range m.GetStaticClusters() { - _, _ = idx, item + // no validation rules for VersionInfo - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ClustersConfigDumpValidationError{ - field: fmt.Sprintf("StaticClusters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ClustersConfigDumpValidationError{ - field: fmt.Sprintf("StaticClusters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + case interface{ Validate() error }: if err := v.Validate(); err != nil { - return ClustersConfigDumpValidationError{ - field: fmt.Sprintf("StaticClusters[%v]", idx), + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", reason: "embedded message failed validation", cause: err, - } - } - } - - } - - for idx, item := range m.GetDynamicActiveClusters() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ClustersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ClustersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ClustersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - for idx, item := range m.GetDynamicWarmingClusters() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ClustersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ClustersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ClustersConfigDumpValidationError{ - field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return ClustersConfigDumpMultiError(errors) - } - - return nil -} - -// ClustersConfigDumpMultiError is an error wrapping multiple validation errors -// returned by ClustersConfigDump.ValidateAll() if the designated constraints -// aren't met. -type ClustersConfigDumpMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ClustersConfigDumpMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ClustersConfigDumpMultiError) AllErrors() []error { return m } - -// ClustersConfigDumpValidationError is the validation error returned by -// ClustersConfigDump.Validate if the designated constraints aren't met. -type ClustersConfigDumpValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ClustersConfigDumpValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ClustersConfigDumpValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ClustersConfigDumpValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ClustersConfigDumpValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ClustersConfigDumpValidationError) ErrorName() string { - return "ClustersConfigDumpValidationError" -} - -// Error satisfies the builtin error interface -func (e ClustersConfigDumpValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sClustersConfigDump.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ClustersConfigDumpValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ClustersConfigDumpValidationError{} - -// Validate checks the field values on RoutesConfigDump with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *RoutesConfigDump) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RoutesConfigDump with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// RoutesConfigDumpMultiError, or nil if none found. -func (m *RoutesConfigDump) ValidateAll() error { - return m.validate(true) -} - -func (m *RoutesConfigDump) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - for idx, item := range m.GetStaticRouteConfigs() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RoutesConfigDumpValidationError{ - field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RoutesConfigDumpValidationError{ - field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RoutesConfigDumpValidationError{ - field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - for idx, item := range m.GetDynamicRouteConfigs() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RoutesConfigDumpValidationError{ - field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RoutesConfigDumpValidationError{ - field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RoutesConfigDumpValidationError{ - field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return RoutesConfigDumpMultiError(errors) - } - - return nil -} - -// RoutesConfigDumpMultiError is an error wrapping multiple validation errors -// returned by RoutesConfigDump.ValidateAll() if the designated constraints -// aren't met. -type RoutesConfigDumpMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RoutesConfigDumpMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RoutesConfigDumpMultiError) AllErrors() []error { return m } - -// RoutesConfigDumpValidationError is the validation error returned by -// RoutesConfigDump.Validate if the designated constraints aren't met. -type RoutesConfigDumpValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RoutesConfigDumpValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RoutesConfigDumpValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RoutesConfigDumpValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RoutesConfigDumpValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RoutesConfigDumpValidationError) ErrorName() string { return "RoutesConfigDumpValidationError" } - -// Error satisfies the builtin error interface -func (e RoutesConfigDumpValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRoutesConfigDump.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RoutesConfigDumpValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RoutesConfigDumpValidationError{} - -// Validate checks the field values on ScopedRoutesConfigDump with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *ScopedRoutesConfigDump) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ScopedRoutesConfigDump with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// ScopedRoutesConfigDumpMultiError, or nil if none found. -func (m *ScopedRoutesConfigDump) ValidateAll() error { - return m.validate(true) -} - -func (m *ScopedRoutesConfigDump) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - for idx, item := range m.GetInlineScopedRouteConfigs() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ScopedRoutesConfigDumpValidationError{ - field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ScopedRoutesConfigDumpValidationError{ - field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ScopedRoutesConfigDumpValidationError{ - field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - for idx, item := range m.GetDynamicScopedRouteConfigs() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ScopedRoutesConfigDumpValidationError{ - field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ScopedRoutesConfigDumpValidationError{ - field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ScopedRoutesConfigDumpValidationError{ - field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return ScopedRoutesConfigDumpMultiError(errors) - } - - return nil -} - -// ScopedRoutesConfigDumpMultiError is an error wrapping multiple validation -// errors returned by ScopedRoutesConfigDump.ValidateAll() if the designated -// constraints aren't met. -type ScopedRoutesConfigDumpMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ScopedRoutesConfigDumpMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ScopedRoutesConfigDumpMultiError) AllErrors() []error { return m } - -// ScopedRoutesConfigDumpValidationError is the validation error returned by -// ScopedRoutesConfigDump.Validate if the designated constraints aren't met. -type ScopedRoutesConfigDumpValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ScopedRoutesConfigDumpValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ScopedRoutesConfigDumpValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ScopedRoutesConfigDumpValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ScopedRoutesConfigDumpValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ScopedRoutesConfigDumpValidationError) ErrorName() string { - return "ScopedRoutesConfigDumpValidationError" -} - -// Error satisfies the builtin error interface -func (e ScopedRoutesConfigDumpValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sScopedRoutesConfigDump.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ScopedRoutesConfigDumpValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ScopedRoutesConfigDumpValidationError{} - -// Validate checks the field values on SecretsConfigDump with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *SecretsConfigDump) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on SecretsConfigDump with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// SecretsConfigDumpMultiError, or nil if none found. -func (m *SecretsConfigDump) ValidateAll() error { - return m.validate(true) -} - -func (m *SecretsConfigDump) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - for idx, item := range m.GetStaticSecrets() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SecretsConfigDumpValidationError{ - field: fmt.Sprintf("StaticSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SecretsConfigDumpValidationError{ - field: fmt.Sprintf("StaticSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SecretsConfigDumpValidationError{ - field: fmt.Sprintf("StaticSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - for idx, item := range m.GetDynamicActiveSecrets() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SecretsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SecretsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SecretsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - for idx, item := range m.GetDynamicWarmingSecrets() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SecretsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SecretsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SecretsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return SecretsConfigDumpMultiError(errors) - } - - return nil -} - -// SecretsConfigDumpMultiError is an error wrapping multiple validation errors -// returned by SecretsConfigDump.ValidateAll() if the designated constraints -// aren't met. -type SecretsConfigDumpMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SecretsConfigDumpMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SecretsConfigDumpMultiError) AllErrors() []error { return m } - -// SecretsConfigDumpValidationError is the validation error returned by -// SecretsConfigDump.Validate if the designated constraints aren't met. -type SecretsConfigDumpValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SecretsConfigDumpValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SecretsConfigDumpValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SecretsConfigDumpValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SecretsConfigDumpValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SecretsConfigDumpValidationError) ErrorName() string { - return "SecretsConfigDumpValidationError" -} - -// Error satisfies the builtin error interface -func (e SecretsConfigDumpValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSecretsConfigDump.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SecretsConfigDumpValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SecretsConfigDumpValidationError{} - -// Validate checks the field values on EndpointsConfigDump with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *EndpointsConfigDump) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on EndpointsConfigDump with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// EndpointsConfigDumpMultiError, or nil if none found. -func (m *EndpointsConfigDump) ValidateAll() error { - return m.validate(true) -} - -func (m *EndpointsConfigDump) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - for idx, item := range m.GetStaticEndpointConfigs() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, EndpointsConfigDumpValidationError{ - field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, EndpointsConfigDumpValidationError{ - field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return EndpointsConfigDumpValidationError{ - field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - for idx, item := range m.GetDynamicEndpointConfigs() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, EndpointsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, EndpointsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return EndpointsConfigDumpValidationError{ - field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return EndpointsConfigDumpMultiError(errors) - } - - return nil -} - -// EndpointsConfigDumpMultiError is an error wrapping multiple validation -// errors returned by EndpointsConfigDump.ValidateAll() if the designated -// constraints aren't met. -type EndpointsConfigDumpMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m EndpointsConfigDumpMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m EndpointsConfigDumpMultiError) AllErrors() []error { return m } - -// EndpointsConfigDumpValidationError is the validation error returned by -// EndpointsConfigDump.Validate if the designated constraints aren't met. -type EndpointsConfigDumpValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e EndpointsConfigDumpValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e EndpointsConfigDumpValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e EndpointsConfigDumpValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e EndpointsConfigDumpValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e EndpointsConfigDumpValidationError) ErrorName() string { - return "EndpointsConfigDumpValidationError" -} - -// Error satisfies the builtin error interface -func (e EndpointsConfigDumpValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sEndpointsConfigDump.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = EndpointsConfigDumpValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = EndpointsConfigDumpValidationError{} - -// Validate checks the field values on ListenersConfigDump_StaticListener with -// the rules defined in the proto definition for this message. If any rules -// are violated, the first error encountered is returned, or nil if there are -// no violations. -func (m *ListenersConfigDump_StaticListener) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ListenersConfigDump_StaticListener -// with the rules defined in the proto definition for this message. If any -// rules are violated, the result is a list of violation errors wrapped in -// ListenersConfigDump_StaticListenerMultiError, or nil if none found. -func (m *ListenersConfigDump_StaticListener) ValidateAll() error { - return m.validate(true) -} - -func (m *ListenersConfigDump_StaticListener) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetListener()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ - field: "Listener", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ - field: "Listener", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListenersConfigDump_StaticListenerValidationError{ - field: "Listener", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListenersConfigDump_StaticListenerValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return ListenersConfigDump_StaticListenerMultiError(errors) - } - - return nil -} - -// ListenersConfigDump_StaticListenerMultiError is an error wrapping multiple -// validation errors returned by -// ListenersConfigDump_StaticListener.ValidateAll() if the designated -// constraints aren't met. -type ListenersConfigDump_StaticListenerMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ListenersConfigDump_StaticListenerMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ListenersConfigDump_StaticListenerMultiError) AllErrors() []error { return m } - -// ListenersConfigDump_StaticListenerValidationError is the validation error -// returned by ListenersConfigDump_StaticListener.Validate if the designated -// constraints aren't met. -type ListenersConfigDump_StaticListenerValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListenersConfigDump_StaticListenerValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListenersConfigDump_StaticListenerValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListenersConfigDump_StaticListenerValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListenersConfigDump_StaticListenerValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListenersConfigDump_StaticListenerValidationError) ErrorName() string { - return "ListenersConfigDump_StaticListenerValidationError" -} - -// Error satisfies the builtin error interface -func (e ListenersConfigDump_StaticListenerValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListenersConfigDump_StaticListener.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListenersConfigDump_StaticListenerValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListenersConfigDump_StaticListenerValidationError{} - -// Validate checks the field values on ListenersConfigDump_DynamicListenerState -// with the rules defined in the proto definition for this message. If any -// rules are violated, the first error encountered is returned, or nil if -// there are no violations. -func (m *ListenersConfigDump_DynamicListenerState) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// ListenersConfigDump_DynamicListenerState with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in -// ListenersConfigDump_DynamicListenerStateMultiError, or nil if none found. -func (m *ListenersConfigDump_DynamicListenerState) ValidateAll() error { - return m.validate(true) -} - -func (m *ListenersConfigDump_DynamicListenerState) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for VersionInfo - - if all { - switch v := interface{}(m.GetListener()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ - field: "Listener", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ - field: "Listener", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListenersConfigDump_DynamicListenerStateValidationError{ - field: "Listener", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListenersConfigDump_DynamicListenerStateValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return ListenersConfigDump_DynamicListenerStateMultiError(errors) - } - - return nil -} - -// ListenersConfigDump_DynamicListenerStateMultiError is an error wrapping -// multiple validation errors returned by -// ListenersConfigDump_DynamicListenerState.ValidateAll() if the designated -// constraints aren't met. -type ListenersConfigDump_DynamicListenerStateMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ListenersConfigDump_DynamicListenerStateMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ListenersConfigDump_DynamicListenerStateMultiError) AllErrors() []error { return m } - -// ListenersConfigDump_DynamicListenerStateValidationError is the validation -// error returned by ListenersConfigDump_DynamicListenerState.Validate if the -// designated constraints aren't met. -type ListenersConfigDump_DynamicListenerStateValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListenersConfigDump_DynamicListenerStateValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListenersConfigDump_DynamicListenerStateValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListenersConfigDump_DynamicListenerStateValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListenersConfigDump_DynamicListenerStateValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListenersConfigDump_DynamicListenerStateValidationError) ErrorName() string { - return "ListenersConfigDump_DynamicListenerStateValidationError" -} - -// Error satisfies the builtin error interface -func (e ListenersConfigDump_DynamicListenerStateValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListenersConfigDump_DynamicListenerState.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListenersConfigDump_DynamicListenerStateValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListenersConfigDump_DynamicListenerStateValidationError{} - -// Validate checks the field values on ListenersConfigDump_DynamicListener with -// the rules defined in the proto definition for this message. If any rules -// are violated, the first error encountered is returned, or nil if there are -// no violations. -func (m *ListenersConfigDump_DynamicListener) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ListenersConfigDump_DynamicListener -// with the rules defined in the proto definition for this message. If any -// rules are violated, the result is a list of violation errors wrapped in -// ListenersConfigDump_DynamicListenerMultiError, or nil if none found. -func (m *ListenersConfigDump_DynamicListener) ValidateAll() error { - return m.validate(true) -} - -func (m *ListenersConfigDump_DynamicListener) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Name - - if all { - switch v := interface{}(m.GetActiveState()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ - field: "ActiveState", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ - field: "ActiveState", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetActiveState()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListenersConfigDump_DynamicListenerValidationError{ - field: "ActiveState", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetWarmingState()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ - field: "WarmingState", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ - field: "WarmingState", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetWarmingState()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListenersConfigDump_DynamicListenerValidationError{ - field: "WarmingState", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetDrainingState()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ - field: "DrainingState", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ - field: "DrainingState", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetDrainingState()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListenersConfigDump_DynamicListenerValidationError{ - field: "DrainingState", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetErrorState()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListenersConfigDump_DynamicListenerValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for ClientStatus - - if len(errors) > 0 { - return ListenersConfigDump_DynamicListenerMultiError(errors) - } - - return nil -} - -// ListenersConfigDump_DynamicListenerMultiError is an error wrapping multiple -// validation errors returned by -// ListenersConfigDump_DynamicListener.ValidateAll() if the designated -// constraints aren't met. -type ListenersConfigDump_DynamicListenerMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ListenersConfigDump_DynamicListenerMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ListenersConfigDump_DynamicListenerMultiError) AllErrors() []error { return m } - -// ListenersConfigDump_DynamicListenerValidationError is the validation error -// returned by ListenersConfigDump_DynamicListener.Validate if the designated -// constraints aren't met. -type ListenersConfigDump_DynamicListenerValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListenersConfigDump_DynamicListenerValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListenersConfigDump_DynamicListenerValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListenersConfigDump_DynamicListenerValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListenersConfigDump_DynamicListenerValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListenersConfigDump_DynamicListenerValidationError) ErrorName() string { - return "ListenersConfigDump_DynamicListenerValidationError" -} - -// Error satisfies the builtin error interface -func (e ListenersConfigDump_DynamicListenerValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListenersConfigDump_DynamicListener.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListenersConfigDump_DynamicListenerValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListenersConfigDump_DynamicListenerValidationError{} - -// Validate checks the field values on ClustersConfigDump_StaticCluster with -// the rules defined in the proto definition for this message. If any rules -// are violated, the first error encountered is returned, or nil if there are -// no violations. -func (m *ClustersConfigDump_StaticCluster) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ClustersConfigDump_StaticCluster with -// the rules defined in the proto definition for this message. If any rules -// are violated, the result is a list of violation errors wrapped in -// ClustersConfigDump_StaticClusterMultiError, or nil if none found. -func (m *ClustersConfigDump_StaticCluster) ValidateAll() error { - return m.validate(true) -} - -func (m *ClustersConfigDump_StaticCluster) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetCluster()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ - field: "Cluster", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ - field: "Cluster", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ClustersConfigDump_StaticClusterValidationError{ - field: "Cluster", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ClustersConfigDump_StaticClusterValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return ClustersConfigDump_StaticClusterMultiError(errors) - } - - return nil -} - -// ClustersConfigDump_StaticClusterMultiError is an error wrapping multiple -// validation errors returned by -// ClustersConfigDump_StaticCluster.ValidateAll() if the designated -// constraints aren't met. -type ClustersConfigDump_StaticClusterMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ClustersConfigDump_StaticClusterMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ClustersConfigDump_StaticClusterMultiError) AllErrors() []error { return m } - -// ClustersConfigDump_StaticClusterValidationError is the validation error -// returned by ClustersConfigDump_StaticCluster.Validate if the designated -// constraints aren't met. -type ClustersConfigDump_StaticClusterValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ClustersConfigDump_StaticClusterValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ClustersConfigDump_StaticClusterValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ClustersConfigDump_StaticClusterValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ClustersConfigDump_StaticClusterValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ClustersConfigDump_StaticClusterValidationError) ErrorName() string { - return "ClustersConfigDump_StaticClusterValidationError" -} - -// Error satisfies the builtin error interface -func (e ClustersConfigDump_StaticClusterValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sClustersConfigDump_StaticCluster.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ClustersConfigDump_StaticClusterValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ClustersConfigDump_StaticClusterValidationError{} - -// Validate checks the field values on ClustersConfigDump_DynamicCluster with -// the rules defined in the proto definition for this message. If any rules -// are violated, the first error encountered is returned, or nil if there are -// no violations. -func (m *ClustersConfigDump_DynamicCluster) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ClustersConfigDump_DynamicCluster -// with the rules defined in the proto definition for this message. If any -// rules are violated, the result is a list of violation errors wrapped in -// ClustersConfigDump_DynamicClusterMultiError, or nil if none found. -func (m *ClustersConfigDump_DynamicCluster) ValidateAll() error { - return m.validate(true) -} - -func (m *ClustersConfigDump_DynamicCluster) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for VersionInfo - - if all { - switch v := interface{}(m.GetCluster()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ - field: "Cluster", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ - field: "Cluster", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ClustersConfigDump_DynamicClusterValidationError{ - field: "Cluster", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ClustersConfigDump_DynamicClusterValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetErrorState()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ClustersConfigDump_DynamicClusterValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for ClientStatus - - if len(errors) > 0 { - return ClustersConfigDump_DynamicClusterMultiError(errors) - } - - return nil -} - -// ClustersConfigDump_DynamicClusterMultiError is an error wrapping multiple -// validation errors returned by -// ClustersConfigDump_DynamicCluster.ValidateAll() if the designated -// constraints aren't met. -type ClustersConfigDump_DynamicClusterMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ClustersConfigDump_DynamicClusterMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ClustersConfigDump_DynamicClusterMultiError) AllErrors() []error { return m } - -// ClustersConfigDump_DynamicClusterValidationError is the validation error -// returned by ClustersConfigDump_DynamicCluster.Validate if the designated -// constraints aren't met. -type ClustersConfigDump_DynamicClusterValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ClustersConfigDump_DynamicClusterValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ClustersConfigDump_DynamicClusterValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ClustersConfigDump_DynamicClusterValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ClustersConfigDump_DynamicClusterValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ClustersConfigDump_DynamicClusterValidationError) ErrorName() string { - return "ClustersConfigDump_DynamicClusterValidationError" -} - -// Error satisfies the builtin error interface -func (e ClustersConfigDump_DynamicClusterValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sClustersConfigDump_DynamicCluster.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ClustersConfigDump_DynamicClusterValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ClustersConfigDump_DynamicClusterValidationError{} - -// Validate checks the field values on RoutesConfigDump_StaticRouteConfig with -// the rules defined in the proto definition for this message. If any rules -// are violated, the first error encountered is returned, or nil if there are -// no violations. -func (m *RoutesConfigDump_StaticRouteConfig) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RoutesConfigDump_StaticRouteConfig -// with the rules defined in the proto definition for this message. If any -// rules are violated, the result is a list of violation errors wrapped in -// RoutesConfigDump_StaticRouteConfigMultiError, or nil if none found. -func (m *RoutesConfigDump_StaticRouteConfig) ValidateAll() error { - return m.validate(true) -} - -func (m *RoutesConfigDump_StaticRouteConfig) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetRouteConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ - field: "RouteConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ - field: "RouteConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RoutesConfigDump_StaticRouteConfigValidationError{ - field: "RouteConfig", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RoutesConfigDump_StaticRouteConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return RoutesConfigDump_StaticRouteConfigMultiError(errors) - } - - return nil -} - -// RoutesConfigDump_StaticRouteConfigMultiError is an error wrapping multiple -// validation errors returned by -// RoutesConfigDump_StaticRouteConfig.ValidateAll() if the designated -// constraints aren't met. -type RoutesConfigDump_StaticRouteConfigMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RoutesConfigDump_StaticRouteConfigMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RoutesConfigDump_StaticRouteConfigMultiError) AllErrors() []error { return m } - -// RoutesConfigDump_StaticRouteConfigValidationError is the validation error -// returned by RoutesConfigDump_StaticRouteConfig.Validate if the designated -// constraints aren't met. -type RoutesConfigDump_StaticRouteConfigValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RoutesConfigDump_StaticRouteConfigValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RoutesConfigDump_StaticRouteConfigValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RoutesConfigDump_StaticRouteConfigValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RoutesConfigDump_StaticRouteConfigValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RoutesConfigDump_StaticRouteConfigValidationError) ErrorName() string { - return "RoutesConfigDump_StaticRouteConfigValidationError" -} - -// Error satisfies the builtin error interface -func (e RoutesConfigDump_StaticRouteConfigValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRoutesConfigDump_StaticRouteConfig.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RoutesConfigDump_StaticRouteConfigValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RoutesConfigDump_StaticRouteConfigValidationError{} - -// Validate checks the field values on RoutesConfigDump_DynamicRouteConfig with -// the rules defined in the proto definition for this message. If any rules -// are violated, the first error encountered is returned, or nil if there are -// no violations. -func (m *RoutesConfigDump_DynamicRouteConfig) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RoutesConfigDump_DynamicRouteConfig -// with the rules defined in the proto definition for this message. If any -// rules are violated, the result is a list of violation errors wrapped in -// RoutesConfigDump_DynamicRouteConfigMultiError, or nil if none found. -func (m *RoutesConfigDump_DynamicRouteConfig) ValidateAll() error { - return m.validate(true) -} - -func (m *RoutesConfigDump_DynamicRouteConfig) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for VersionInfo - - if all { - switch v := interface{}(m.GetRouteConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "RouteConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "RouteConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "RouteConfig", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetErrorState()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RoutesConfigDump_DynamicRouteConfigValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for ClientStatus - - if len(errors) > 0 { - return RoutesConfigDump_DynamicRouteConfigMultiError(errors) - } - - return nil -} - -// RoutesConfigDump_DynamicRouteConfigMultiError is an error wrapping multiple -// validation errors returned by -// RoutesConfigDump_DynamicRouteConfig.ValidateAll() if the designated -// constraints aren't met. -type RoutesConfigDump_DynamicRouteConfigMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RoutesConfigDump_DynamicRouteConfigMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RoutesConfigDump_DynamicRouteConfigMultiError) AllErrors() []error { return m } - -// RoutesConfigDump_DynamicRouteConfigValidationError is the validation error -// returned by RoutesConfigDump_DynamicRouteConfig.Validate if the designated -// constraints aren't met. -type RoutesConfigDump_DynamicRouteConfigValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RoutesConfigDump_DynamicRouteConfigValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RoutesConfigDump_DynamicRouteConfigValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RoutesConfigDump_DynamicRouteConfigValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RoutesConfigDump_DynamicRouteConfigValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RoutesConfigDump_DynamicRouteConfigValidationError) ErrorName() string { - return "RoutesConfigDump_DynamicRouteConfigValidationError" -} - -// Error satisfies the builtin error interface -func (e RoutesConfigDump_DynamicRouteConfigValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRoutesConfigDump_DynamicRouteConfig.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RoutesConfigDump_DynamicRouteConfigValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RoutesConfigDump_DynamicRouteConfigValidationError{} - -// Validate checks the field values on -// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in -// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError, or nil if none found. -func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ValidateAll() error { - return m.validate(true) -} - -func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Name - - for idx, item := range m.GetScopedRouteConfigs() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ - field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ - field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ - field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError(errors) - } - - return nil -} - -// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError is an error -// wrapping multiple validation errors returned by -// ScopedRoutesConfigDump_InlineScopedRouteConfigs.ValidateAll() if the -// designated constraints aren't met. -type ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) AllErrors() []error { return m } - -// ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError is the -// validation error returned by -// ScopedRoutesConfigDump_InlineScopedRouteConfigs.Validate if the designated -// constraints aren't met. -type ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Field() string { - return e.field -} - -// Reason function returns reason value. -func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Reason() string { - return e.reason -} - -// Cause function returns cause value. -func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) ErrorName() string { - return "ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError" -} - -// Error satisfies the builtin error interface -func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sScopedRoutesConfigDump_InlineScopedRouteConfigs.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{} - -// Validate checks the field values on -// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in -// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError, or nil if none found. -func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ValidateAll() error { - return m.validate(true) -} - -func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Name - - // no validation rules for VersionInfo - - for idx, item := range m.GetScopedRouteConfigs() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetErrorState()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for ClientStatus - - if len(errors) > 0 { - return ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError(errors) - } - - return nil -} - -// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError is an error -// wrapping multiple validation errors returned by -// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ValidateAll() if the -// designated constraints aren't met. -type ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) AllErrors() []error { return m } - -// ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError is the -// validation error returned by -// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.Validate if the designated -// constraints aren't met. -type ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Field() string { - return e.field -} - -// Reason function returns reason value. -func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Reason() string { - return e.reason -} - -// Cause function returns cause value. -func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Cause() error { - return e.cause -} - -// Key function returns key value. -func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) ErrorName() string { - return "ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError" -} - -// Error satisfies the builtin error interface -func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sScopedRoutesConfigDump_DynamicScopedRouteConfigs.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{} - -// Validate checks the field values on SecretsConfigDump_DynamicSecret with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *SecretsConfigDump_DynamicSecret) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on SecretsConfigDump_DynamicSecret with -// the rules defined in the proto definition for this message. If any rules -// are violated, the result is a list of violation errors wrapped in -// SecretsConfigDump_DynamicSecretMultiError, or nil if none found. -func (m *SecretsConfigDump_DynamicSecret) ValidateAll() error { - return m.validate(true) -} - -func (m *SecretsConfigDump_DynamicSecret) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Name - - // no validation rules for VersionInfo - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SecretsConfigDump_DynamicSecretValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetSecret()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ - field: "Secret", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ - field: "Secret", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SecretsConfigDump_DynamicSecretValidationError{ - field: "Secret", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetErrorState()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SecretsConfigDump_DynamicSecretValidationError{ - field: "ErrorState", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for ClientStatus - - if len(errors) > 0 { - return SecretsConfigDump_DynamicSecretMultiError(errors) - } - - return nil -} - -// SecretsConfigDump_DynamicSecretMultiError is an error wrapping multiple -// validation errors returned by SecretsConfigDump_DynamicSecret.ValidateAll() -// if the designated constraints aren't met. -type SecretsConfigDump_DynamicSecretMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SecretsConfigDump_DynamicSecretMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SecretsConfigDump_DynamicSecretMultiError) AllErrors() []error { return m } - -// SecretsConfigDump_DynamicSecretValidationError is the validation error -// returned by SecretsConfigDump_DynamicSecret.Validate if the designated -// constraints aren't met. -type SecretsConfigDump_DynamicSecretValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SecretsConfigDump_DynamicSecretValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SecretsConfigDump_DynamicSecretValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SecretsConfigDump_DynamicSecretValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SecretsConfigDump_DynamicSecretValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SecretsConfigDump_DynamicSecretValidationError) ErrorName() string { - return "SecretsConfigDump_DynamicSecretValidationError" -} - -// Error satisfies the builtin error interface -func (e SecretsConfigDump_DynamicSecretValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSecretsConfigDump_DynamicSecret.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SecretsConfigDump_DynamicSecretValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SecretsConfigDump_DynamicSecretValidationError{} - -// Validate checks the field values on SecretsConfigDump_StaticSecret with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *SecretsConfigDump_StaticSecret) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on SecretsConfigDump_StaticSecret with -// the rules defined in the proto definition for this message. If any rules -// are violated, the result is a list of violation errors wrapped in -// SecretsConfigDump_StaticSecretMultiError, or nil if none found. -func (m *SecretsConfigDump_StaticSecret) ValidateAll() error { - return m.validate(true) -} - -func (m *SecretsConfigDump_StaticSecret) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Name - - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SecretsConfigDump_StaticSecretValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, } } } @@ -3494,7 +591,7 @@ func (m *SecretsConfigDump_StaticSecret) validate(all bool) error { switch v := interface{}(m.GetSecret()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { - errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ field: "Secret", reason: "embedded message failed validation", cause: err, @@ -3502,7 +599,7 @@ func (m *SecretsConfigDump_StaticSecret) validate(all bool) error { } case interface{ Validate() error }: if err := v.Validate(); err != nil { - errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ field: "Secret", reason: "embedded message failed validation", cause: err, @@ -3511,7 +608,7 @@ func (m *SecretsConfigDump_StaticSecret) validate(all bool) error { } } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { - return SecretsConfigDump_StaticSecretValidationError{ + return SecretsConfigDump_DynamicSecretValidationError{ field: "Secret", reason: "embedded message failed validation", cause: err, @@ -3519,184 +616,51 @@ func (m *SecretsConfigDump_StaticSecret) validate(all bool) error { } } - if len(errors) > 0 { - return SecretsConfigDump_StaticSecretMultiError(errors) - } - - return nil -} - -// SecretsConfigDump_StaticSecretMultiError is an error wrapping multiple -// validation errors returned by SecretsConfigDump_StaticSecret.ValidateAll() -// if the designated constraints aren't met. -type SecretsConfigDump_StaticSecretMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SecretsConfigDump_StaticSecretMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SecretsConfigDump_StaticSecretMultiError) AllErrors() []error { return m } - -// SecretsConfigDump_StaticSecretValidationError is the validation error -// returned by SecretsConfigDump_StaticSecret.Validate if the designated -// constraints aren't met. -type SecretsConfigDump_StaticSecretValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SecretsConfigDump_StaticSecretValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SecretsConfigDump_StaticSecretValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SecretsConfigDump_StaticSecretValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SecretsConfigDump_StaticSecretValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SecretsConfigDump_StaticSecretValidationError) ErrorName() string { - return "SecretsConfigDump_StaticSecretValidationError" -} - -// Error satisfies the builtin error interface -func (e SecretsConfigDump_StaticSecretValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSecretsConfigDump_StaticSecret.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SecretsConfigDump_StaticSecretValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SecretsConfigDump_StaticSecretValidationError{} - -// Validate checks the field values on EndpointsConfigDump_StaticEndpointConfig -// with the rules defined in the proto definition for this message. If any -// rules are violated, the first error encountered is returned, or nil if -// there are no violations. -func (m *EndpointsConfigDump_StaticEndpointConfig) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// EndpointsConfigDump_StaticEndpointConfig with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in -// EndpointsConfigDump_StaticEndpointConfigMultiError, or nil if none found. -func (m *EndpointsConfigDump_StaticEndpointConfig) ValidateAll() error { - return m.validate(true) -} - -func (m *EndpointsConfigDump_StaticEndpointConfig) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - if all { - switch v := interface{}(m.GetEndpointConfig()).(type) { + switch v := interface{}(m.GetErrorState()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { - errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ - field: "EndpointConfig", + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", reason: "embedded message failed validation", cause: err, }) } case interface{ Validate() error }: if err := v.Validate(); err != nil { - errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ - field: "EndpointConfig", + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", reason: "embedded message failed validation", cause: err, }) } } - } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { - return EndpointsConfigDump_StaticEndpointConfigValidationError{ - field: "EndpointConfig", + return SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", reason: "embedded message failed validation", cause: err, } } } - if all { - switch v := interface{}(m.GetLastUpdated()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return EndpointsConfigDump_StaticEndpointConfigValidationError{ - field: "LastUpdated", - reason: "embedded message failed validation", - cause: err, - } - } - } + // no validation rules for ClientStatus if len(errors) > 0 { - return EndpointsConfigDump_StaticEndpointConfigMultiError(errors) + return SecretsConfigDump_DynamicSecretMultiError(errors) } return nil } -// EndpointsConfigDump_StaticEndpointConfigMultiError is an error wrapping -// multiple validation errors returned by -// EndpointsConfigDump_StaticEndpointConfig.ValidateAll() if the designated -// constraints aren't met. -type EndpointsConfigDump_StaticEndpointConfigMultiError []error +// SecretsConfigDump_DynamicSecretMultiError is an error wrapping multiple +// validation errors returned by SecretsConfigDump_DynamicSecret.ValidateAll() +// if the designated constraints aren't met. +type SecretsConfigDump_DynamicSecretMultiError []error // Error returns a concatenation of all the error messages it wraps. -func (m EndpointsConfigDump_StaticEndpointConfigMultiError) Error() string { +func (m SecretsConfigDump_DynamicSecretMultiError) Error() string { var msgs []string for _, err := range m { msgs = append(msgs, err.Error()) @@ -3705,12 +669,12 @@ func (m EndpointsConfigDump_StaticEndpointConfigMultiError) Error() string { } // AllErrors returns a list of validation violation errors. -func (m EndpointsConfigDump_StaticEndpointConfigMultiError) AllErrors() []error { return m } +func (m SecretsConfigDump_DynamicSecretMultiError) AllErrors() []error { return m } -// EndpointsConfigDump_StaticEndpointConfigValidationError is the validation -// error returned by EndpointsConfigDump_StaticEndpointConfig.Validate if the -// designated constraints aren't met. -type EndpointsConfigDump_StaticEndpointConfigValidationError struct { +// SecretsConfigDump_DynamicSecretValidationError is the validation error +// returned by SecretsConfigDump_DynamicSecret.Validate if the designated +// constraints aren't met. +type SecretsConfigDump_DynamicSecretValidationError struct { field string reason string cause error @@ -3718,24 +682,24 @@ type EndpointsConfigDump_StaticEndpointConfigValidationError struct { } // Field function returns field value. -func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Field() string { return e.field } +func (e SecretsConfigDump_DynamicSecretValidationError) Field() string { return e.field } // Reason function returns reason value. -func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Reason() string { return e.reason } +func (e SecretsConfigDump_DynamicSecretValidationError) Reason() string { return e.reason } // Cause function returns cause value. -func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Cause() error { return e.cause } +func (e SecretsConfigDump_DynamicSecretValidationError) Cause() error { return e.cause } // Key function returns key value. -func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Key() bool { return e.key } +func (e SecretsConfigDump_DynamicSecretValidationError) Key() bool { return e.key } // ErrorName returns error name. -func (e EndpointsConfigDump_StaticEndpointConfigValidationError) ErrorName() string { - return "EndpointsConfigDump_StaticEndpointConfigValidationError" +func (e SecretsConfigDump_DynamicSecretValidationError) ErrorName() string { + return "SecretsConfigDump_DynamicSecretValidationError" } // Error satisfies the builtin error interface -func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Error() string { +func (e SecretsConfigDump_DynamicSecretValidationError) Error() string { cause := "" if e.cause != nil { cause = fmt.Sprintf(" | caused by: %v", e.cause) @@ -3747,14 +711,14 @@ func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Error() string } return fmt.Sprintf( - "invalid %sEndpointsConfigDump_StaticEndpointConfig.%s: %s%s", + "invalid %sSecretsConfigDump_DynamicSecret.%s: %s%s", key, e.field, e.reason, cause) } -var _ error = EndpointsConfigDump_StaticEndpointConfigValidationError{} +var _ error = SecretsConfigDump_DynamicSecretValidationError{} var _ interface { Field() string @@ -3762,68 +726,37 @@ var _ interface { Key() bool Cause() error ErrorName() string -} = EndpointsConfigDump_StaticEndpointConfigValidationError{} +} = SecretsConfigDump_DynamicSecretValidationError{} -// Validate checks the field values on -// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *EndpointsConfigDump_DynamicEndpointConfig) Validate() error { +// Validate checks the field values on SecretsConfigDump_StaticSecret with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SecretsConfigDump_StaticSecret) Validate() error { return m.validate(false) } -// ValidateAll checks the field values on -// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in -// EndpointsConfigDump_DynamicEndpointConfigMultiError, or nil if none found. -func (m *EndpointsConfigDump_DynamicEndpointConfig) ValidateAll() error { +// ValidateAll checks the field values on SecretsConfigDump_StaticSecret with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SecretsConfigDump_StaticSecretMultiError, or nil if none found. +func (m *SecretsConfigDump_StaticSecret) ValidateAll() error { return m.validate(true) } -func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error { +func (m *SecretsConfigDump_StaticSecret) validate(all bool) error { if m == nil { return nil } var errors []error - // no validation rules for VersionInfo - - if all { - switch v := interface{}(m.GetEndpointConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ - field: "EndpointConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ - field: "EndpointConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return EndpointsConfigDump_DynamicEndpointConfigValidationError{ - field: "EndpointConfig", - reason: "embedded message failed validation", - cause: err, - } - } - } + // no validation rules for Name if all { switch v := interface{}(m.GetLastUpdated()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { - errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ field: "LastUpdated", reason: "embedded message failed validation", cause: err, @@ -3831,7 +764,7 @@ func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error { } case interface{ Validate() error }: if err := v.Validate(); err != nil { - errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ field: "LastUpdated", reason: "embedded message failed validation", cause: err, @@ -3840,7 +773,7 @@ func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error { } } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { - return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + return SecretsConfigDump_StaticSecretValidationError{ field: "LastUpdated", reason: "embedded message failed validation", cause: err, @@ -3849,51 +782,48 @@ func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error { } if all { - switch v := interface{}(m.GetErrorState()).(type) { + switch v := interface{}(m.GetSecret()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { - errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ - field: "ErrorState", + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", reason: "embedded message failed validation", cause: err, }) } case interface{ Validate() error }: if err := v.Validate(); err != nil { - errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ - field: "ErrorState", + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", reason: "embedded message failed validation", cause: err, }) } } - } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { - return EndpointsConfigDump_DynamicEndpointConfigValidationError{ - field: "ErrorState", + return SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", reason: "embedded message failed validation", cause: err, } } } - // no validation rules for ClientStatus - if len(errors) > 0 { - return EndpointsConfigDump_DynamicEndpointConfigMultiError(errors) + return SecretsConfigDump_StaticSecretMultiError(errors) } return nil } -// EndpointsConfigDump_DynamicEndpointConfigMultiError is an error wrapping -// multiple validation errors returned by -// EndpointsConfigDump_DynamicEndpointConfig.ValidateAll() if the designated -// constraints aren't met. -type EndpointsConfigDump_DynamicEndpointConfigMultiError []error +// SecretsConfigDump_StaticSecretMultiError is an error wrapping multiple +// validation errors returned by SecretsConfigDump_StaticSecret.ValidateAll() +// if the designated constraints aren't met. +type SecretsConfigDump_StaticSecretMultiError []error // Error returns a concatenation of all the error messages it wraps. -func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) Error() string { +func (m SecretsConfigDump_StaticSecretMultiError) Error() string { var msgs []string for _, err := range m { msgs = append(msgs, err.Error()) @@ -3902,12 +832,12 @@ func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) Error() string { } // AllErrors returns a list of validation violation errors. -func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) AllErrors() []error { return m } +func (m SecretsConfigDump_StaticSecretMultiError) AllErrors() []error { return m } -// EndpointsConfigDump_DynamicEndpointConfigValidationError is the validation -// error returned by EndpointsConfigDump_DynamicEndpointConfig.Validate if the -// designated constraints aren't met. -type EndpointsConfigDump_DynamicEndpointConfigValidationError struct { +// SecretsConfigDump_StaticSecretValidationError is the validation error +// returned by SecretsConfigDump_StaticSecret.Validate if the designated +// constraints aren't met. +type SecretsConfigDump_StaticSecretValidationError struct { field string reason string cause error @@ -3915,24 +845,24 @@ type EndpointsConfigDump_DynamicEndpointConfigValidationError struct { } // Field function returns field value. -func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Field() string { return e.field } +func (e SecretsConfigDump_StaticSecretValidationError) Field() string { return e.field } // Reason function returns reason value. -func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Reason() string { return e.reason } +func (e SecretsConfigDump_StaticSecretValidationError) Reason() string { return e.reason } // Cause function returns cause value. -func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Cause() error { return e.cause } +func (e SecretsConfigDump_StaticSecretValidationError) Cause() error { return e.cause } // Key function returns key value. -func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Key() bool { return e.key } +func (e SecretsConfigDump_StaticSecretValidationError) Key() bool { return e.key } // ErrorName returns error name. -func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) ErrorName() string { - return "EndpointsConfigDump_DynamicEndpointConfigValidationError" +func (e SecretsConfigDump_StaticSecretValidationError) ErrorName() string { + return "SecretsConfigDump_StaticSecretValidationError" } // Error satisfies the builtin error interface -func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Error() string { +func (e SecretsConfigDump_StaticSecretValidationError) Error() string { cause := "" if e.cause != nil { cause = fmt.Sprintf(" | caused by: %v", e.cause) @@ -3944,14 +874,14 @@ func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Error() string } return fmt.Sprintf( - "invalid %sEndpointsConfigDump_DynamicEndpointConfig.%s: %s%s", + "invalid %sSecretsConfigDump_StaticSecret.%s: %s%s", key, e.field, e.reason, cause) } -var _ error = EndpointsConfigDump_DynamicEndpointConfigValidationError{} +var _ error = SecretsConfigDump_StaticSecretValidationError{} var _ interface { Field() string @@ -3959,4 +889,4 @@ var _ interface { Key() bool Cause() error ErrorName() string -} = EndpointsConfigDump_DynamicEndpointConfigValidationError{} +} = SecretsConfigDump_StaticSecretValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go new file mode 100644 index 0000000000..5673dd972f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go @@ -0,0 +1,2242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + any1 "github.com/golang/protobuf/ptypes/any" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Resource status from the view of a xDS client, which tells the synchronization +// status between the xDS client and the xDS server. +type ClientResourceStatus int32 + +const ( + // Resource status is not available/unknown. + ClientResourceStatus_UNKNOWN ClientResourceStatus = 0 + // Client requested this resource but hasn't received any update from management + // server. The client will not fail requests, but will queue them until update + // arrives or the client times out waiting for the resource. + ClientResourceStatus_REQUESTED ClientResourceStatus = 1 + // This resource has been requested by the client but has either not been + // delivered by the server or was previously delivered by the server and then + // subsequently removed from resources provided by the server. For more + // information, please refer to the :ref:`"Knowing When a Requested Resource + // Does Not Exist" ` section. + ClientResourceStatus_DOES_NOT_EXIST ClientResourceStatus = 2 + // Client received this resource and replied with ACK. + ClientResourceStatus_ACKED ClientResourceStatus = 3 + // Client received this resource and replied with NACK. + ClientResourceStatus_NACKED ClientResourceStatus = 4 +) + +// Enum value maps for ClientResourceStatus. +var ( + ClientResourceStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "REQUESTED", + 2: "DOES_NOT_EXIST", + 3: "ACKED", + 4: "NACKED", + } + ClientResourceStatus_value = map[string]int32{ + "UNKNOWN": 0, + "REQUESTED": 1, + "DOES_NOT_EXIST": 2, + "ACKED": 3, + "NACKED": 4, + } +) + +func (x ClientResourceStatus) Enum() *ClientResourceStatus { + p := new(ClientResourceStatus) + *p = x + return p +} + +func (x ClientResourceStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientResourceStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0].Descriptor() +} + +func (ClientResourceStatus) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0] +} + +func (x ClientResourceStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientResourceStatus.Descriptor instead. +func (ClientResourceStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0} +} + +type UpdateFailureState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // What the component configuration would have been if the update had succeeded. + // This field may not be populated by xDS clients due to storage overhead. + FailedConfiguration *any1.Any `protobuf:"bytes,1,opt,name=failed_configuration,json=failedConfiguration,proto3" json:"failed_configuration,omitempty"` + // Time of the latest failed update attempt. + LastUpdateAttempt *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_update_attempt,json=lastUpdateAttempt,proto3" json:"last_update_attempt,omitempty"` + // Details about the last failed update attempt. + Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` + // This is the version of the rejected resource. + // [#not-implemented-hide:] + VersionInfo string `protobuf:"bytes,4,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` +} + +func (x *UpdateFailureState) Reset() { + *x = UpdateFailureState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateFailureState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateFailureState) ProtoMessage() {} + +func (x *UpdateFailureState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateFailureState.ProtoReflect.Descriptor instead. +func (*UpdateFailureState) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0} +} + +func (x *UpdateFailureState) GetFailedConfiguration() *any1.Any { + if x != nil { + return x.FailedConfiguration + } + return nil +} + +func (x *UpdateFailureState) GetLastUpdateAttempt() *timestamp.Timestamp { + if x != nil { + return x.LastUpdateAttempt + } + return nil +} + +func (x *UpdateFailureState) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +func (x *UpdateFailureState) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +// Envoy's listener manager fills this message with all currently known listeners. Listener +// configuration information can be used to recreate an Envoy configuration by populating all +// listeners as static listeners or by returning them in a LDS response. +type ListenersConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The statically loaded listener configs. + StaticListeners []*ListenersConfigDump_StaticListener `protobuf:"bytes,2,rep,name=static_listeners,json=staticListeners,proto3" json:"static_listeners,omitempty"` + // State for any warming, active, or draining listeners. + DynamicListeners []*ListenersConfigDump_DynamicListener `protobuf:"bytes,3,rep,name=dynamic_listeners,json=dynamicListeners,proto3" json:"dynamic_listeners,omitempty"` +} + +func (x *ListenersConfigDump) Reset() { + *x = ListenersConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump) ProtoMessage() {} + +func (x *ListenersConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1} +} + +func (x *ListenersConfigDump) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ListenersConfigDump) GetStaticListeners() []*ListenersConfigDump_StaticListener { + if x != nil { + return x.StaticListeners + } + return nil +} + +func (x *ListenersConfigDump) GetDynamicListeners() []*ListenersConfigDump_DynamicListener { + if x != nil { + return x.DynamicListeners + } + return nil +} + +// Envoy's cluster manager fills this message with all currently known clusters. Cluster +// configuration information can be used to recreate an Envoy configuration by populating all +// clusters as static clusters or by returning them in a CDS response. +type ClustersConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The statically loaded cluster configs. + StaticClusters []*ClustersConfigDump_StaticCluster `protobuf:"bytes,2,rep,name=static_clusters,json=staticClusters,proto3" json:"static_clusters,omitempty"` + // The dynamically loaded active clusters. These are clusters that are available to service + // data plane traffic. + DynamicActiveClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,3,rep,name=dynamic_active_clusters,json=dynamicActiveClusters,proto3" json:"dynamic_active_clusters,omitempty"` + // The dynamically loaded warming clusters. These are clusters that are currently undergoing + // warming in preparation to service data plane traffic. Note that if attempting to recreate an + // Envoy configuration from a configuration dump, the warming clusters should generally be + // discarded. + DynamicWarmingClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,4,rep,name=dynamic_warming_clusters,json=dynamicWarmingClusters,proto3" json:"dynamic_warming_clusters,omitempty"` +} + +func (x *ClustersConfigDump) Reset() { + *x = ClustersConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump) ProtoMessage() {} + +func (x *ClustersConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2} +} + +func (x *ClustersConfigDump) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ClustersConfigDump) GetStaticClusters() []*ClustersConfigDump_StaticCluster { + if x != nil { + return x.StaticClusters + } + return nil +} + +func (x *ClustersConfigDump) GetDynamicActiveClusters() []*ClustersConfigDump_DynamicCluster { + if x != nil { + return x.DynamicActiveClusters + } + return nil +} + +func (x *ClustersConfigDump) GetDynamicWarmingClusters() []*ClustersConfigDump_DynamicCluster { + if x != nil { + return x.DynamicWarmingClusters + } + return nil +} + +// Envoy's RDS implementation fills this message with all currently loaded routes, as described by +// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration +// or defined inline while configuring listeners are separated from those configured dynamically via RDS. +// Route configuration information can be used to recreate an Envoy configuration by populating all routes +// as static routes or by returning them in RDS responses. +type RoutesConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded route configs. + StaticRouteConfigs []*RoutesConfigDump_StaticRouteConfig `protobuf:"bytes,2,rep,name=static_route_configs,json=staticRouteConfigs,proto3" json:"static_route_configs,omitempty"` + // The dynamically loaded route configs. + DynamicRouteConfigs []*RoutesConfigDump_DynamicRouteConfig `protobuf:"bytes,3,rep,name=dynamic_route_configs,json=dynamicRouteConfigs,proto3" json:"dynamic_route_configs,omitempty"` +} + +func (x *RoutesConfigDump) Reset() { + *x = RoutesConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump) ProtoMessage() {} + +func (x *RoutesConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3} +} + +func (x *RoutesConfigDump) GetStaticRouteConfigs() []*RoutesConfigDump_StaticRouteConfig { + if x != nil { + return x.StaticRouteConfigs + } + return nil +} + +func (x *RoutesConfigDump) GetDynamicRouteConfigs() []*RoutesConfigDump_DynamicRouteConfig { + if x != nil { + return x.DynamicRouteConfigs + } + return nil +} + +// Envoy's scoped RDS implementation fills this message with all currently loaded route +// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both +// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the +// dynamically obtained scopes via the SRDS API. +type ScopedRoutesConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded scoped route configs. + InlineScopedRouteConfigs []*ScopedRoutesConfigDump_InlineScopedRouteConfigs `protobuf:"bytes,1,rep,name=inline_scoped_route_configs,json=inlineScopedRouteConfigs,proto3" json:"inline_scoped_route_configs,omitempty"` + // The dynamically loaded scoped route configs. + DynamicScopedRouteConfigs []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs `protobuf:"bytes,2,rep,name=dynamic_scoped_route_configs,json=dynamicScopedRouteConfigs,proto3" json:"dynamic_scoped_route_configs,omitempty"` +} + +func (x *ScopedRoutesConfigDump) Reset() { + *x = ScopedRoutesConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4} +} + +func (x *ScopedRoutesConfigDump) GetInlineScopedRouteConfigs() []*ScopedRoutesConfigDump_InlineScopedRouteConfigs { + if x != nil { + return x.InlineScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump) GetDynamicScopedRouteConfigs() []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs { + if x != nil { + return x.DynamicScopedRouteConfigs + } + return nil +} + +// Envoy's admin fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +type EndpointsConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded endpoint configs. + StaticEndpointConfigs []*EndpointsConfigDump_StaticEndpointConfig `protobuf:"bytes,2,rep,name=static_endpoint_configs,json=staticEndpointConfigs,proto3" json:"static_endpoint_configs,omitempty"` + // The dynamically loaded endpoint configs. + DynamicEndpointConfigs []*EndpointsConfigDump_DynamicEndpointConfig `protobuf:"bytes,3,rep,name=dynamic_endpoint_configs,json=dynamicEndpointConfigs,proto3" json:"dynamic_endpoint_configs,omitempty"` +} + +func (x *EndpointsConfigDump) Reset() { + *x = EndpointsConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump) ProtoMessage() {} + +func (x *EndpointsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5} +} + +func (x *EndpointsConfigDump) GetStaticEndpointConfigs() []*EndpointsConfigDump_StaticEndpointConfig { + if x != nil { + return x.StaticEndpointConfigs + } + return nil +} + +func (x *EndpointsConfigDump) GetDynamicEndpointConfigs() []*EndpointsConfigDump_DynamicEndpointConfig { + if x != nil { + return x.DynamicEndpointConfigs + } + return nil +} + +// Envoy's ECDS service fills this message with all currently extension +// configuration. Extension configuration information can be used to recreate +// an Envoy ECDS listener and HTTP filters as static filters or by returning +// them in ECDS response. +type EcdsConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ECDS filter configs. + EcdsFilters []*EcdsConfigDump_EcdsFilterConfig `protobuf:"bytes,1,rep,name=ecds_filters,json=ecdsFilters,proto3" json:"ecds_filters,omitempty"` +} + +func (x *EcdsConfigDump) Reset() { + *x = EcdsConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EcdsConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EcdsConfigDump) ProtoMessage() {} + +func (x *EcdsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EcdsConfigDump.ProtoReflect.Descriptor instead. +func (*EcdsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6} +} + +func (x *EcdsConfigDump) GetEcdsFilters() []*EcdsConfigDump_EcdsFilterConfig { + if x != nil { + return x.EcdsFilters + } + return nil +} + +// Describes a statically loaded listener. +type ListenersConfigDump_StaticListener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The listener config. + Listener *any1.Any `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"` + // The timestamp when the Listener was last successfully updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ListenersConfigDump_StaticListener) Reset() { + *x = ListenersConfigDump_StaticListener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_StaticListener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_StaticListener) ProtoMessage() {} + +func (x *ListenersConfigDump_StaticListener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_StaticListener.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_StaticListener) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *ListenersConfigDump_StaticListener) GetListener() *any1.Any { + if x != nil { + return x.Listener + } + return nil +} + +func (x *ListenersConfigDump_StaticListener) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +type ListenersConfigDump_DynamicListenerState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the listener was loaded. In the future, discrete per-listener versions may be supported + // by the API. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The listener config. + Listener *any1.Any `protobuf:"bytes,2,opt,name=listener,proto3" json:"listener,omitempty"` + // The timestamp when the Listener was last successfully updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ListenersConfigDump_DynamicListenerState) Reset() { + *x = ListenersConfigDump_DynamicListenerState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_DynamicListenerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_DynamicListenerState) ProtoMessage() {} + +func (x *ListenersConfigDump_DynamicListenerState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_DynamicListenerState.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_DynamicListenerState) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *ListenersConfigDump_DynamicListenerState) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ListenersConfigDump_DynamicListenerState) GetListener() *any1.Any { + if x != nil { + return x.Listener + } + return nil +} + +func (x *ListenersConfigDump_DynamicListenerState) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// Describes a dynamically loaded listener via the LDS API. +// [#next-free-field: 7] +type ListenersConfigDump_DynamicListener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name or unique id of this listener, pulled from the DynamicListenerState config. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The listener state for any active listener by this name. + // These are listeners that are available to service data plane traffic. + ActiveState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,2,opt,name=active_state,json=activeState,proto3" json:"active_state,omitempty"` + // The listener state for any warming listener by this name. + // These are listeners that are currently undergoing warming in preparation to service data + // plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the warming listeners should generally be discarded. + WarmingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,3,opt,name=warming_state,json=warmingState,proto3" json:"warming_state,omitempty"` + // The listener state for any draining listener by this name. + // These are listeners that are currently undergoing draining in preparation to stop servicing + // data plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the draining listeners should generally be discarded. + DrainingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,4,opt,name=draining_state,json=drainingState,proto3" json:"draining_state,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The ``error_state`` field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ListenersConfigDump_DynamicListener) Reset() { + *x = ListenersConfigDump_DynamicListener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_DynamicListener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_DynamicListener) ProtoMessage() {} + +func (x *ListenersConfigDump_DynamicListener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_DynamicListener.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_DynamicListener) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *ListenersConfigDump_DynamicListener) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListenersConfigDump_DynamicListener) GetActiveState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.ActiveState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetWarmingState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.WarmingState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetDrainingState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.DrainingState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// Describes a statically loaded cluster. +type ClustersConfigDump_StaticCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster config. + Cluster *any1.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // The timestamp when the Cluster was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ClustersConfigDump_StaticCluster) Reset() { + *x = ClustersConfigDump_StaticCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump_StaticCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump_StaticCluster) ProtoMessage() {} + +func (x *ClustersConfigDump_StaticCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump_StaticCluster.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump_StaticCluster) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ClustersConfigDump_StaticCluster) GetCluster() *any1.Any { + if x != nil { + return x.Cluster + } + return nil +} + +func (x *ClustersConfigDump_StaticCluster) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// Describes a dynamically loaded cluster via the CDS API. +// [#next-free-field: 6] +type ClustersConfigDump_DynamicCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by + // the API. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The cluster config. + Cluster *any1.Any `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + // The timestamp when the Cluster was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The ``error_state`` field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ClustersConfigDump_DynamicCluster) Reset() { + *x = ClustersConfigDump_DynamicCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump_DynamicCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump_DynamicCluster) ProtoMessage() {} + +func (x *ClustersConfigDump_DynamicCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump_DynamicCluster.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump_DynamicCluster) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *ClustersConfigDump_DynamicCluster) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ClustersConfigDump_DynamicCluster) GetCluster() *any1.Any { + if x != nil { + return x.Cluster + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type RoutesConfigDump_StaticRouteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The route config. + RouteConfig *any1.Any `protobuf:"bytes,1,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` + // The timestamp when the Route was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *RoutesConfigDump_StaticRouteConfig) Reset() { + *x = RoutesConfigDump_StaticRouteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump_StaticRouteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump_StaticRouteConfig) ProtoMessage() {} + +func (x *RoutesConfigDump_StaticRouteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump_StaticRouteConfig.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump_StaticRouteConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *RoutesConfigDump_StaticRouteConfig) GetRouteConfig() *any1.Any { + if x != nil { + return x.RouteConfig + } + return nil +} + +func (x *RoutesConfigDump_StaticRouteConfig) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 6] +type RoutesConfigDump_DynamicRouteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the route configuration was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The route config. + RouteConfig *any1.Any `protobuf:"bytes,2,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` + // The timestamp when the Route was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The ``error_state`` field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *RoutesConfigDump_DynamicRouteConfig) Reset() { + *x = RoutesConfigDump_DynamicRouteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump_DynamicRouteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump_DynamicRouteConfig) ProtoMessage() {} + +func (x *RoutesConfigDump_DynamicRouteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump_DynamicRouteConfig.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump_DynamicRouteConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetRouteConfig() *any1.Any { + if x != nil { + return x.RouteConfig + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type ScopedRoutesConfigDump_InlineScopedRouteConfigs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the scoped route configurations. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The scoped route configurations. + ScopedRouteConfigs []*any1.Any `protobuf:"bytes,2,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` + // The timestamp when the scoped route config set was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Reset() { + *x = ScopedRoutesConfigDump_InlineScopedRouteConfigs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump_InlineScopedRouteConfigs.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetScopedRouteConfigs() []*any1.Any { + if x != nil { + return x.ScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 7] +type ScopedRoutesConfigDump_DynamicScopedRouteConfigs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the scoped route configurations. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the scoped routes configuration was loaded. + VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The scoped route configurations. + ScopedRouteConfigs []*any1.Any `protobuf:"bytes,3,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` + // The timestamp when the scoped route config set was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,4,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The ``error_state`` field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Reset() { + *x = ScopedRoutesConfigDump_DynamicScopedRouteConfigs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetScopedRouteConfigs() []*any1.Any { + if x != nil { + return x.ScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type EndpointsConfigDump_StaticEndpointConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The endpoint config. + EndpointConfig *any1.Any `protobuf:"bytes,1,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) Reset() { + *x = EndpointsConfigDump_StaticEndpointConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump_StaticEndpointConfig) ProtoMessage() {} + +func (x *EndpointsConfigDump_StaticEndpointConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump_StaticEndpointConfig.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump_StaticEndpointConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) GetEndpointConfig() *any1.Any { + if x != nil { + return x.EndpointConfig + } + return nil +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 6] +type EndpointsConfigDump_DynamicEndpointConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The endpoint config. + EndpointConfig *any1.Any `protobuf:"bytes,2,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The ``error_state`` field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) Reset() { + *x = EndpointsConfigDump_DynamicEndpointConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump_DynamicEndpointConfig) ProtoMessage() {} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump_DynamicEndpointConfig.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump_DynamicEndpointConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 1} +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetEndpointConfig() *any1.Any { + if x != nil { + return x.EndpointConfig + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// [#next-free-field: 6] +type EcdsConfigDump_EcdsFilterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently + // taken from the :ref:`version_info + // ` + // field at the time that the ECDS filter was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The ECDS filter config. + EcdsFilter *any1.Any `protobuf:"bytes,2,opt,name=ecds_filter,json=ecdsFilter,proto3" json:"ecds_filter,omitempty"` + // The timestamp when the ECDS filter was last updated. + LastUpdated *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The ``error_state`` field contains the rejected version of this + // particular resource along with the reason and timestamp. For successfully + // updated or acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *EcdsConfigDump_EcdsFilterConfig) Reset() { + *x = EcdsConfigDump_EcdsFilterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EcdsConfigDump_EcdsFilterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EcdsConfigDump_EcdsFilterConfig) ProtoMessage() {} + +func (x *EcdsConfigDump_EcdsFilterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EcdsConfigDump_EcdsFilterConfig.ProtoReflect.Descriptor instead. +func (*EcdsConfigDump_EcdsFilterConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetEcdsFilter() *any1.Any { + if x != nil { + return x.EcdsFilter + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetLastUpdated() *timestamp.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +var File_envoy_admin_v3_config_dump_shared_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_config_dump_shared_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x02, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, + 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x13, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x65, + 0x6d, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, + 0xf3, 0x09, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x5d, 0x0a, 0x10, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x11, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x1a, 0xc0, 0x01, 0x0a, 0x0e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x30, + 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, + 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x1a, 0xef, + 0x01, 0x0a, 0x14, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x0a, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, + 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x43, 0x9a, 0xc5, 0x88, + 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x1a, 0x92, 0x04, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x0c, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5d, 0x0a, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x0e, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xca, 0x07, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x59, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x64, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x15, + 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x6b, 0x0a, 0x18, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x1a, 0xbb, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x1a, 0xf0, 0x02, 0x0a, 0x0e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x22, 0xdd, 0x06, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x67, 0x0a, + 0x15, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0c, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0xff, 0x02, 0x0a, 0x12, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x37, 0x0a, + 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x22, 0x8c, 0x08, 0x0a, 0x16, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x7e, 0x0a, + 0x1b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x18, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x81, 0x01, + 0x0a, 0x1c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x19, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x1a, 0x81, 0x02, 0x0a, 0x18, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, + 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, + 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xb6, 0x03, 0x0a, 0x19, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x3a, 0x4b, 0x9a, 0xc5, 0x88, 0x1e, 0x46, 0x0a, 0x44, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x3a, 0x31, + 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x22, 0xde, 0x05, 0x0a, 0x13, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x70, 0x0a, 0x17, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x73, 0x0a, 0x18, 0x64, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x1a, 0x94, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xc8, 0x02, 0x0a, 0x15, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x89, 0x04, 0x0a, 0x0e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x52, 0x0a, 0x0c, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x63, 0x64, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0xf7, 0x02, 0x0a, 0x10, 0x45, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x35, 0x0a, 0x0b, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2a, 0x5d, + 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x4f, 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, + 0x58, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, + 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x04, 0x42, 0x7e, 0x0a, + 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x15, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce sync.Once + file_envoy_admin_v3_config_dump_shared_proto_rawDescData = file_envoy_admin_v3_config_dump_shared_proto_rawDesc +) + +func file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_config_dump_shared_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_shared_proto_rawDescData) + }) + return file_envoy_admin_v3_config_dump_shared_proto_rawDescData +} + +var file_envoy_admin_v3_config_dump_shared_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_admin_v3_config_dump_shared_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_envoy_admin_v3_config_dump_shared_proto_goTypes = []interface{}{ + (ClientResourceStatus)(0), // 0: envoy.admin.v3.ClientResourceStatus + (*UpdateFailureState)(nil), // 1: envoy.admin.v3.UpdateFailureState + (*ListenersConfigDump)(nil), // 2: envoy.admin.v3.ListenersConfigDump + (*ClustersConfigDump)(nil), // 3: envoy.admin.v3.ClustersConfigDump + (*RoutesConfigDump)(nil), // 4: envoy.admin.v3.RoutesConfigDump + (*ScopedRoutesConfigDump)(nil), // 5: envoy.admin.v3.ScopedRoutesConfigDump + (*EndpointsConfigDump)(nil), // 6: envoy.admin.v3.EndpointsConfigDump + (*EcdsConfigDump)(nil), // 7: envoy.admin.v3.EcdsConfigDump + (*ListenersConfigDump_StaticListener)(nil), // 8: envoy.admin.v3.ListenersConfigDump.StaticListener + (*ListenersConfigDump_DynamicListenerState)(nil), // 9: envoy.admin.v3.ListenersConfigDump.DynamicListenerState + (*ListenersConfigDump_DynamicListener)(nil), // 10: envoy.admin.v3.ListenersConfigDump.DynamicListener + (*ClustersConfigDump_StaticCluster)(nil), // 11: envoy.admin.v3.ClustersConfigDump.StaticCluster + (*ClustersConfigDump_DynamicCluster)(nil), // 12: envoy.admin.v3.ClustersConfigDump.DynamicCluster + (*RoutesConfigDump_StaticRouteConfig)(nil), // 13: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig + (*RoutesConfigDump_DynamicRouteConfig)(nil), // 14: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig + (*ScopedRoutesConfigDump_InlineScopedRouteConfigs)(nil), // 15: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs + (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs)(nil), // 16: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs + (*EndpointsConfigDump_StaticEndpointConfig)(nil), // 17: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig + (*EndpointsConfigDump_DynamicEndpointConfig)(nil), // 18: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig + (*EcdsConfigDump_EcdsFilterConfig)(nil), // 19: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig + (*any1.Any)(nil), // 20: google.protobuf.Any + (*timestamp.Timestamp)(nil), // 21: google.protobuf.Timestamp +} +var file_envoy_admin_v3_config_dump_shared_proto_depIdxs = []int32{ + 20, // 0: envoy.admin.v3.UpdateFailureState.failed_configuration:type_name -> google.protobuf.Any + 21, // 1: envoy.admin.v3.UpdateFailureState.last_update_attempt:type_name -> google.protobuf.Timestamp + 8, // 2: envoy.admin.v3.ListenersConfigDump.static_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.StaticListener + 10, // 3: envoy.admin.v3.ListenersConfigDump.dynamic_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListener + 11, // 4: envoy.admin.v3.ClustersConfigDump.static_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.StaticCluster + 12, // 5: envoy.admin.v3.ClustersConfigDump.dynamic_active_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster + 12, // 6: envoy.admin.v3.ClustersConfigDump.dynamic_warming_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster + 13, // 7: envoy.admin.v3.RoutesConfigDump.static_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.StaticRouteConfig + 14, // 8: envoy.admin.v3.RoutesConfigDump.dynamic_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig + 15, // 9: envoy.admin.v3.ScopedRoutesConfigDump.inline_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs + 16, // 10: envoy.admin.v3.ScopedRoutesConfigDump.dynamic_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs + 17, // 11: envoy.admin.v3.EndpointsConfigDump.static_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig + 18, // 12: envoy.admin.v3.EndpointsConfigDump.dynamic_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig + 19, // 13: envoy.admin.v3.EcdsConfigDump.ecds_filters:type_name -> envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig + 20, // 14: envoy.admin.v3.ListenersConfigDump.StaticListener.listener:type_name -> google.protobuf.Any + 21, // 15: envoy.admin.v3.ListenersConfigDump.StaticListener.last_updated:type_name -> google.protobuf.Timestamp + 20, // 16: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.listener:type_name -> google.protobuf.Any + 21, // 17: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.last_updated:type_name -> google.protobuf.Timestamp + 9, // 18: envoy.admin.v3.ListenersConfigDump.DynamicListener.active_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 9, // 19: envoy.admin.v3.ListenersConfigDump.DynamicListener.warming_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 9, // 20: envoy.admin.v3.ListenersConfigDump.DynamicListener.draining_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 1, // 21: envoy.admin.v3.ListenersConfigDump.DynamicListener.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 22: envoy.admin.v3.ListenersConfigDump.DynamicListener.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 23: envoy.admin.v3.ClustersConfigDump.StaticCluster.cluster:type_name -> google.protobuf.Any + 21, // 24: envoy.admin.v3.ClustersConfigDump.StaticCluster.last_updated:type_name -> google.protobuf.Timestamp + 20, // 25: envoy.admin.v3.ClustersConfigDump.DynamicCluster.cluster:type_name -> google.protobuf.Any + 21, // 26: envoy.admin.v3.ClustersConfigDump.DynamicCluster.last_updated:type_name -> google.protobuf.Timestamp + 1, // 27: envoy.admin.v3.ClustersConfigDump.DynamicCluster.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 28: envoy.admin.v3.ClustersConfigDump.DynamicCluster.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 29: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.route_config:type_name -> google.protobuf.Any + 21, // 30: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.last_updated:type_name -> google.protobuf.Timestamp + 20, // 31: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.route_config:type_name -> google.protobuf.Any + 21, // 32: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 33: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 34: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 35: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any + 21, // 36: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp + 20, // 37: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any + 21, // 38: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp + 1, // 39: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 40: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 41: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.endpoint_config:type_name -> google.protobuf.Any + 21, // 42: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp + 20, // 43: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.endpoint_config:type_name -> google.protobuf.Any + 21, // 44: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 45: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 46: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 47: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.ecds_filter:type_name -> google.protobuf.Any + 21, // 48: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 49: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 50: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 51, // [51:51] is the sub-list for method output_type + 51, // [51:51] is the sub-list for method input_type + 51, // [51:51] is the sub-list for extension type_name + 51, // [51:51] is the sub-list for extension extendee + 0, // [0:51] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_config_dump_shared_proto_init() } +func file_envoy_admin_v3_config_dump_shared_proto_init() { + if File_envoy_admin_v3_config_dump_shared_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateFailureState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EcdsConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_StaticListener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_DynamicListenerState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_DynamicListener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump_StaticCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump_DynamicCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump_StaticRouteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump_DynamicRouteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump_InlineScopedRouteConfigs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump_DynamicScopedRouteConfigs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump_StaticEndpointConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump_DynamicEndpointConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EcdsConfigDump_EcdsFilterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_config_dump_shared_proto_rawDesc, + NumEnums: 1, + NumMessages: 19, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_config_dump_shared_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_config_dump_shared_proto_depIdxs, + EnumInfos: file_envoy_admin_v3_config_dump_shared_proto_enumTypes, + MessageInfos: file_envoy_admin_v3_config_dump_shared_proto_msgTypes, + }.Build() + File_envoy_admin_v3_config_dump_shared_proto = out.File + file_envoy_admin_v3_config_dump_shared_proto_rawDesc = nil + file_envoy_admin_v3_config_dump_shared_proto_goTypes = nil + file_envoy_admin_v3_config_dump_shared_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go new file mode 100644 index 0000000000..3a78136a98 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go @@ -0,0 +1,3434 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UpdateFailureState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateFailureState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateFailureState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateFailureStateMultiError, or nil if none found. +func (m *UpdateFailureState) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateFailureState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetFailedConfiguration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFailedConfiguration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdateAttempt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdateAttempt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Details + + // no validation rules for VersionInfo + + if len(errors) > 0 { + return UpdateFailureStateMultiError(errors) + } + + return nil +} + +// UpdateFailureStateMultiError is an error wrapping multiple validation errors +// returned by UpdateFailureState.ValidateAll() if the designated constraints +// aren't met. +type UpdateFailureStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateFailureStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateFailureStateMultiError) AllErrors() []error { return m } + +// UpdateFailureStateValidationError is the validation error returned by +// UpdateFailureState.Validate if the designated constraints aren't met. +type UpdateFailureStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateFailureStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateFailureStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateFailureStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateFailureStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateFailureStateValidationError) ErrorName() string { + return "UpdateFailureStateValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateFailureStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateFailureState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateFailureStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateFailureStateValidationError{} + +// Validate checks the field values on ListenersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListenersConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListenersConfigDumpMultiError, or nil if none found. +func (m *ListenersConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + for idx, item := range m.GetStaticListeners() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicListeners() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenersConfigDumpMultiError(errors) + } + + return nil +} + +// ListenersConfigDumpMultiError is an error wrapping multiple validation +// errors returned by ListenersConfigDump.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDumpMultiError) AllErrors() []error { return m } + +// ListenersConfigDumpValidationError is the validation error returned by +// ListenersConfigDump.Validate if the designated constraints aren't met. +type ListenersConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDumpValidationError) ErrorName() string { + return "ListenersConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDumpValidationError{} + +// Validate checks the field values on ClustersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClustersConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClustersConfigDumpMultiError, or nil if none found. +func (m *ClustersConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + for idx, item := range m.GetStaticClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicActiveClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicWarmingClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ClustersConfigDumpMultiError(errors) + } + + return nil +} + +// ClustersConfigDumpMultiError is an error wrapping multiple validation errors +// returned by ClustersConfigDump.ValidateAll() if the designated constraints +// aren't met. +type ClustersConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDumpMultiError) AllErrors() []error { return m } + +// ClustersConfigDumpValidationError is the validation error returned by +// ClustersConfigDump.Validate if the designated constraints aren't met. +type ClustersConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDumpValidationError) ErrorName() string { + return "ClustersConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDumpValidationError{} + +// Validate checks the field values on RoutesConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RoutesConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RoutesConfigDumpMultiError, or nil if none found. +func (m *RoutesConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStaticRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RoutesConfigDumpMultiError(errors) + } + + return nil +} + +// RoutesConfigDumpMultiError is an error wrapping multiple validation errors +// returned by RoutesConfigDump.ValidateAll() if the designated constraints +// aren't met. +type RoutesConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDumpMultiError) AllErrors() []error { return m } + +// RoutesConfigDumpValidationError is the validation error returned by +// RoutesConfigDump.Validate if the designated constraints aren't met. +type RoutesConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDumpValidationError) ErrorName() string { return "RoutesConfigDumpValidationError" } + +// Error satisfies the builtin error interface +func (e RoutesConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDumpValidationError{} + +// Validate checks the field values on ScopedRoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRoutesConfigDumpMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetInlineScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRoutesConfigDumpMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDumpMultiError is an error wrapping multiple validation +// errors returned by ScopedRoutesConfigDump.ValidateAll() if the designated +// constraints aren't met. +type ScopedRoutesConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDumpMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDumpValidationError is the validation error returned by +// ScopedRoutesConfigDump.Validate if the designated constraints aren't met. +type ScopedRoutesConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutesConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutesConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutesConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDumpValidationError) ErrorName() string { + return "ScopedRoutesConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDumpValidationError{} + +// Validate checks the field values on EndpointsConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EndpointsConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EndpointsConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EndpointsConfigDumpMultiError, or nil if none found. +func (m *EndpointsConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStaticEndpointConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicEndpointConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EndpointsConfigDumpMultiError(errors) + } + + return nil +} + +// EndpointsConfigDumpMultiError is an error wrapping multiple validation +// errors returned by EndpointsConfigDump.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDumpMultiError) AllErrors() []error { return m } + +// EndpointsConfigDumpValidationError is the validation error returned by +// EndpointsConfigDump.Validate if the designated constraints aren't met. +type EndpointsConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDumpValidationError) ErrorName() string { + return "EndpointsConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDumpValidationError{} + +// Validate checks the field values on EcdsConfigDump with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EcdsConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EcdsConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in EcdsConfigDumpMultiError, +// or nil if none found. +func (m *EcdsConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *EcdsConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetEcdsFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EcdsConfigDumpMultiError(errors) + } + + return nil +} + +// EcdsConfigDumpMultiError is an error wrapping multiple validation errors +// returned by EcdsConfigDump.ValidateAll() if the designated constraints +// aren't met. +type EcdsConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EcdsConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EcdsConfigDumpMultiError) AllErrors() []error { return m } + +// EcdsConfigDumpValidationError is the validation error returned by +// EcdsConfigDump.Validate if the designated constraints aren't met. +type EcdsConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EcdsConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EcdsConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EcdsConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EcdsConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EcdsConfigDumpValidationError) ErrorName() string { return "EcdsConfigDumpValidationError" } + +// Error satisfies the builtin error interface +func (e EcdsConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEcdsConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EcdsConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EcdsConfigDumpValidationError{} + +// Validate checks the field values on ListenersConfigDump_StaticListener with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ListenersConfigDump_StaticListener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump_StaticListener +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ListenersConfigDump_StaticListenerMultiError, or nil if none found. +func (m *ListenersConfigDump_StaticListener) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_StaticListener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ListenersConfigDump_StaticListenerMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_StaticListenerMultiError is an error wrapping multiple +// validation errors returned by +// ListenersConfigDump_StaticListener.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_StaticListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_StaticListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_StaticListenerMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_StaticListenerValidationError is the validation error +// returned by ListenersConfigDump_StaticListener.Validate if the designated +// constraints aren't met. +type ListenersConfigDump_StaticListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_StaticListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_StaticListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_StaticListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_StaticListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_StaticListenerValidationError) ErrorName() string { + return "ListenersConfigDump_StaticListenerValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_StaticListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_StaticListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_StaticListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_StaticListenerValidationError{} + +// Validate checks the field values on ListenersConfigDump_DynamicListenerState +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *ListenersConfigDump_DynamicListenerState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ListenersConfigDump_DynamicListenerState with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ListenersConfigDump_DynamicListenerStateMultiError, or nil if none found. +func (m *ListenersConfigDump_DynamicListenerState) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_DynamicListenerState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ListenersConfigDump_DynamicListenerStateMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_DynamicListenerStateMultiError is an error wrapping +// multiple validation errors returned by +// ListenersConfigDump_DynamicListenerState.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_DynamicListenerStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_DynamicListenerStateMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_DynamicListenerStateValidationError is the validation +// error returned by ListenersConfigDump_DynamicListenerState.Validate if the +// designated constraints aren't met. +type ListenersConfigDump_DynamicListenerStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_DynamicListenerStateValidationError) ErrorName() string { + return "ListenersConfigDump_DynamicListenerStateValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_DynamicListenerStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_DynamicListenerState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_DynamicListenerStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_DynamicListenerStateValidationError{} + +// Validate checks the field values on ListenersConfigDump_DynamicListener with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ListenersConfigDump_DynamicListener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump_DynamicListener +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ListenersConfigDump_DynamicListenerMultiError, or nil if none found. +func (m *ListenersConfigDump_DynamicListener) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_DynamicListener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetActiveState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetActiveState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWarmingState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWarmingState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDrainingState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDrainingState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ListenersConfigDump_DynamicListenerMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_DynamicListenerMultiError is an error wrapping multiple +// validation errors returned by +// ListenersConfigDump_DynamicListener.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_DynamicListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_DynamicListenerMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_DynamicListenerValidationError is the validation error +// returned by ListenersConfigDump_DynamicListener.Validate if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_DynamicListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_DynamicListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_DynamicListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_DynamicListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_DynamicListenerValidationError) ErrorName() string { + return "ListenersConfigDump_DynamicListenerValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_DynamicListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_DynamicListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_DynamicListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_DynamicListenerValidationError{} + +// Validate checks the field values on ClustersConfigDump_StaticCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ClustersConfigDump_StaticCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump_StaticCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ClustersConfigDump_StaticClusterMultiError, or nil if none found. +func (m *ClustersConfigDump_StaticCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump_StaticCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClustersConfigDump_StaticClusterMultiError(errors) + } + + return nil +} + +// ClustersConfigDump_StaticClusterMultiError is an error wrapping multiple +// validation errors returned by +// ClustersConfigDump_StaticCluster.ValidateAll() if the designated +// constraints aren't met. +type ClustersConfigDump_StaticClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDump_StaticClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDump_StaticClusterMultiError) AllErrors() []error { return m } + +// ClustersConfigDump_StaticClusterValidationError is the validation error +// returned by ClustersConfigDump_StaticCluster.Validate if the designated +// constraints aren't met. +type ClustersConfigDump_StaticClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDump_StaticClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDump_StaticClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDump_StaticClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDump_StaticClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDump_StaticClusterValidationError) ErrorName() string { + return "ClustersConfigDump_StaticClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDump_StaticClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump_StaticCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDump_StaticClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDump_StaticClusterValidationError{} + +// Validate checks the field values on ClustersConfigDump_DynamicCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ClustersConfigDump_DynamicCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump_DynamicCluster +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ClustersConfigDump_DynamicClusterMultiError, or nil if none found. +func (m *ClustersConfigDump_DynamicCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump_DynamicCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ClustersConfigDump_DynamicClusterMultiError(errors) + } + + return nil +} + +// ClustersConfigDump_DynamicClusterMultiError is an error wrapping multiple +// validation errors returned by +// ClustersConfigDump_DynamicCluster.ValidateAll() if the designated +// constraints aren't met. +type ClustersConfigDump_DynamicClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDump_DynamicClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDump_DynamicClusterMultiError) AllErrors() []error { return m } + +// ClustersConfigDump_DynamicClusterValidationError is the validation error +// returned by ClustersConfigDump_DynamicCluster.Validate if the designated +// constraints aren't met. +type ClustersConfigDump_DynamicClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDump_DynamicClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDump_DynamicClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDump_DynamicClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDump_DynamicClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDump_DynamicClusterValidationError) ErrorName() string { + return "ClustersConfigDump_DynamicClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDump_DynamicClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump_DynamicCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDump_DynamicClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDump_DynamicClusterValidationError{} + +// Validate checks the field values on RoutesConfigDump_StaticRouteConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RoutesConfigDump_StaticRouteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump_StaticRouteConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RoutesConfigDump_StaticRouteConfigMultiError, or nil if none found. +func (m *RoutesConfigDump_StaticRouteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump_StaticRouteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RoutesConfigDump_StaticRouteConfigMultiError(errors) + } + + return nil +} + +// RoutesConfigDump_StaticRouteConfigMultiError is an error wrapping multiple +// validation errors returned by +// RoutesConfigDump_StaticRouteConfig.ValidateAll() if the designated +// constraints aren't met. +type RoutesConfigDump_StaticRouteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDump_StaticRouteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDump_StaticRouteConfigMultiError) AllErrors() []error { return m } + +// RoutesConfigDump_StaticRouteConfigValidationError is the validation error +// returned by RoutesConfigDump_StaticRouteConfig.Validate if the designated +// constraints aren't met. +type RoutesConfigDump_StaticRouteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDump_StaticRouteConfigValidationError) ErrorName() string { + return "RoutesConfigDump_StaticRouteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RoutesConfigDump_StaticRouteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump_StaticRouteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDump_StaticRouteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDump_StaticRouteConfigValidationError{} + +// Validate checks the field values on RoutesConfigDump_DynamicRouteConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RoutesConfigDump_DynamicRouteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump_DynamicRouteConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RoutesConfigDump_DynamicRouteConfigMultiError, or nil if none found. +func (m *RoutesConfigDump_DynamicRouteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump_DynamicRouteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return RoutesConfigDump_DynamicRouteConfigMultiError(errors) + } + + return nil +} + +// RoutesConfigDump_DynamicRouteConfigMultiError is an error wrapping multiple +// validation errors returned by +// RoutesConfigDump_DynamicRouteConfig.ValidateAll() if the designated +// constraints aren't met. +type RoutesConfigDump_DynamicRouteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDump_DynamicRouteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDump_DynamicRouteConfigMultiError) AllErrors() []error { return m } + +// RoutesConfigDump_DynamicRouteConfigValidationError is the validation error +// returned by RoutesConfigDump_DynamicRouteConfig.Validate if the designated +// constraints aren't met. +type RoutesConfigDump_DynamicRouteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) ErrorName() string { + return "RoutesConfigDump_DynamicRouteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump_DynamicRouteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDump_DynamicRouteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDump_DynamicRouteConfigValidationError{} + +// Validate checks the field values on +// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + for idx, item := range m.GetScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError is an error +// wrapping multiple validation errors returned by +// ScopedRoutesConfigDump_InlineScopedRouteConfigs.ValidateAll() if the +// designated constraints aren't met. +type ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError is the +// validation error returned by +// ScopedRoutesConfigDump_InlineScopedRouteConfigs.Validate if the designated +// constraints aren't met. +type ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) ErrorName() string { + return "ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump_InlineScopedRouteConfigs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{} + +// Validate checks the field values on +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for VersionInfo + + for idx, item := range m.GetScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError is an error +// wrapping multiple validation errors returned by +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ValidateAll() if the +// designated constraints aren't met. +type ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError is the +// validation error returned by +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.Validate if the designated +// constraints aren't met. +type ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) ErrorName() string { + return "ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump_DynamicScopedRouteConfigs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{} + +// Validate checks the field values on EndpointsConfigDump_StaticEndpointConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *EndpointsConfigDump_StaticEndpointConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// EndpointsConfigDump_StaticEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// EndpointsConfigDump_StaticEndpointConfigMultiError, or nil if none found. +func (m *EndpointsConfigDump_StaticEndpointConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEndpointConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return EndpointsConfigDump_StaticEndpointConfigMultiError(errors) + } + + return nil +} + +// EndpointsConfigDump_StaticEndpointConfigMultiError is an error wrapping +// multiple validation errors returned by +// EndpointsConfigDump_StaticEndpointConfig.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDump_StaticEndpointConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDump_StaticEndpointConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDump_StaticEndpointConfigMultiError) AllErrors() []error { return m } + +// EndpointsConfigDump_StaticEndpointConfigValidationError is the validation +// error returned by EndpointsConfigDump_StaticEndpointConfig.Validate if the +// designated constraints aren't met. +type EndpointsConfigDump_StaticEndpointConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) ErrorName() string { + return "EndpointsConfigDump_StaticEndpointConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump_StaticEndpointConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDump_StaticEndpointConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDump_StaticEndpointConfigValidationError{} + +// Validate checks the field values on +// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EndpointsConfigDump_DynamicEndpointConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// EndpointsConfigDump_DynamicEndpointConfigMultiError, or nil if none found. +func (m *EndpointsConfigDump_DynamicEndpointConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetEndpointConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return EndpointsConfigDump_DynamicEndpointConfigMultiError(errors) + } + + return nil +} + +// EndpointsConfigDump_DynamicEndpointConfigMultiError is an error wrapping +// multiple validation errors returned by +// EndpointsConfigDump_DynamicEndpointConfig.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDump_DynamicEndpointConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) AllErrors() []error { return m } + +// EndpointsConfigDump_DynamicEndpointConfigValidationError is the validation +// error returned by EndpointsConfigDump_DynamicEndpointConfig.Validate if the +// designated constraints aren't met. +type EndpointsConfigDump_DynamicEndpointConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) ErrorName() string { + return "EndpointsConfigDump_DynamicEndpointConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump_DynamicEndpointConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDump_DynamicEndpointConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDump_DynamicEndpointConfigValidationError{} + +// Validate checks the field values on EcdsConfigDump_EcdsFilterConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EcdsConfigDump_EcdsFilterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EcdsConfigDump_EcdsFilterConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// EcdsConfigDump_EcdsFilterConfigMultiError, or nil if none found. +func (m *EcdsConfigDump_EcdsFilterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EcdsConfigDump_EcdsFilterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetEcdsFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEcdsFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return EcdsConfigDump_EcdsFilterConfigMultiError(errors) + } + + return nil +} + +// EcdsConfigDump_EcdsFilterConfigMultiError is an error wrapping multiple +// validation errors returned by EcdsConfigDump_EcdsFilterConfig.ValidateAll() +// if the designated constraints aren't met. +type EcdsConfigDump_EcdsFilterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EcdsConfigDump_EcdsFilterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EcdsConfigDump_EcdsFilterConfigMultiError) AllErrors() []error { return m } + +// EcdsConfigDump_EcdsFilterConfigValidationError is the validation error +// returned by EcdsConfigDump_EcdsFilterConfig.Validate if the designated +// constraints aren't met. +type EcdsConfigDump_EcdsFilterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) ErrorName() string { + return "EcdsConfigDump_EcdsFilterConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEcdsConfigDump_EcdsFilterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EcdsConfigDump_EcdsFilterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EcdsConfigDump_EcdsFilterConfigValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go index 5b81a0a561..e91e784b4e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/init_dump.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go index 5a4e8813c8..1718165ddb 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/listeners.proto package adminv3 @@ -22,7 +22,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Admin endpoint uses this wrapper for `/listeners` to display listener status information. +// Admin endpoint uses this wrapper for ``/listeners`` to display listener status information. // See :ref:`/listeners ` for more information. type Listeners struct { state protoimpl.MessageState @@ -85,7 +85,6 @@ type ListenerStatus struct { LocalAddress *v3.Address `protobuf:"bytes,2,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"` // The additional addresses the listener is listening on as specified via the :ref:`additional_addresses ` // configuration. - // [#not-implemented-hide:] AdditionalLocalAddresses []*v3.Address `protobuf:"bytes,3,rep,name=additional_local_addresses,json=additionalLocalAddresses,proto3" json:"additional_local_addresses,omitempty"` } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go index 5283c86216..d3f7cc547f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/memory.proto package adminv3 @@ -31,24 +31,24 @@ type Memory struct { unknownFields protoimpl.UnknownFields // The number of bytes allocated by the heap for Envoy. This is an alias for - // `generic.current_allocated_bytes`. + // ``generic.current_allocated_bytes``. Allocated uint64 `protobuf:"varint,1,opt,name=allocated,proto3" json:"allocated,omitempty"` // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for - // `generic.heap_size`. + // ``generic.heap_size``. HeapSize uint64 `protobuf:"varint,2,opt,name=heap_size,json=heapSize,proto3" json:"heap_size,omitempty"` // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards // virtual memory usage, and depending on the OS, typically do not count towards physical memory - // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. + // usage. This is an alias for ``tcmalloc.pageheap_unmapped_bytes``. PageheapUnmapped uint64 `protobuf:"varint,3,opt,name=pageheap_unmapped,json=pageheapUnmapped,proto3" json:"pageheap_unmapped,omitempty"` // The number of bytes in free, mapped pages in the page heap. These bytes always count towards // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also - // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. + // count towards physical memory usage. This is an alias for ``tcmalloc.pageheap_free_bytes``. PageheapFree uint64 `protobuf:"varint,4,opt,name=pageheap_free,json=pageheapFree,proto3" json:"pageheap_free,omitempty"` // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias - // for `tcmalloc.current_total_thread_cache_bytes`. + // for ``tcmalloc.current_total_thread_cache_bytes``. TotalThreadCache uint64 `protobuf:"varint,5,opt,name=total_thread_cache,json=totalThreadCache,proto3" json:"total_thread_cache,omitempty"` // The number of bytes of the physical memory usage by the allocator. This is an alias for - // `generic.total_physical_bytes`. + // ``generic.total_physical_bytes``. TotalPhysicalBytes uint64 `protobuf:"varint,6,opt,name=total_physical_bytes,json=totalPhysicalBytes,proto3" json:"total_physical_bytes,omitempty"` } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go index 0d626446ec..a7e0729713 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/metrics.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go index 07cbfd556d..b50b0c615b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/mutex_stats.proto package adminv3 @@ -22,11 +22,11 @@ const ( ) // Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run -// under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` +// under :option:`--enable-mutex-tracing`. For more information, see the ``absl::Mutex`` // [docs](https://abseil.io/about/design/mutex#extra-features). // -// *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not -// correspond to core clock frequency. For more information, see the `CycleClock` +// *NB*: The wait cycles below are measured by ``absl::base_internal::CycleClock``, and may not +// correspond to core clock frequency. For more information, see the ``CycleClock`` // [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). type MutexStats struct { state protoimpl.MessageState diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go index 7526f8f76b..a3a084baee 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/server_info.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go index 5fbea6b980..1111cc8e56 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/admin/v3/tap.proto package adminv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go index a1889139a0..2a73462bfb 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/annotations/deprecation.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go index f8543a638e..597d6da5c9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/annotations/resource.proto package annotations diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/address.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/address.pb.go deleted file mode 100644 index 229af066cb..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/address.pb.go +++ /dev/null @@ -1,805 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/address.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SocketAddress_Protocol int32 - -const ( - SocketAddress_TCP SocketAddress_Protocol = 0 - SocketAddress_UDP SocketAddress_Protocol = 1 -) - -// Enum value maps for SocketAddress_Protocol. -var ( - SocketAddress_Protocol_name = map[int32]string{ - 0: "TCP", - 1: "UDP", - } - SocketAddress_Protocol_value = map[string]int32{ - "TCP": 0, - "UDP": 1, - } -) - -func (x SocketAddress_Protocol) Enum() *SocketAddress_Protocol { - p := new(SocketAddress_Protocol) - *p = x - return p -} - -func (x SocketAddress_Protocol) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SocketAddress_Protocol) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_address_proto_enumTypes[0].Descriptor() -} - -func (SocketAddress_Protocol) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_address_proto_enumTypes[0] -} - -func (x SocketAddress_Protocol) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SocketAddress_Protocol.Descriptor instead. -func (SocketAddress_Protocol) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_address_proto_rawDescGZIP(), []int{1, 0} -} - -type Pipe struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Unix Domain Socket path. On Linux, paths starting with '@' will use the - // abstract namespace. The starting '@' is replaced by a null byte by Envoy. - // Paths starting with '@' will result in an error in environments other than - // Linux. - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - // The mode for the Pipe. Not applicable for abstract sockets. - Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` -} - -func (x *Pipe) Reset() { - *x = Pipe{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Pipe) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Pipe) ProtoMessage() {} - -func (x *Pipe) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Pipe.ProtoReflect.Descriptor instead. -func (*Pipe) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_address_proto_rawDescGZIP(), []int{0} -} - -func (x *Pipe) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *Pipe) GetMode() uint32 { - if x != nil { - return x.Mode - } - return 0 -} - -// [#next-free-field: 7] -type SocketAddress struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Protocol SocketAddress_Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=envoy.api.v2.core.SocketAddress_Protocol" json:"protocol,omitempty"` - // The address for this socket. :ref:`Listeners ` will bind - // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` - // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: - // It is possible to distinguish a Listener address via the prefix/suffix matching - // in :ref:`FilterChainMatch `.] When used - // within an upstream :ref:`BindConfig `, the address - // controls the source address of outbound connections. For :ref:`clusters - // `, the cluster type determines whether the - // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - // via :ref:`resolver_name `. - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - // Types that are assignable to PortSpecifier: - // *SocketAddress_PortValue - // *SocketAddress_NamedPort - PortSpecifier isSocketAddress_PortSpecifier `protobuf_oneof:"port_specifier"` - // The name of the custom resolver. This must have been registered with Envoy. If - // this is empty, a context dependent default applies. If the address is a concrete - // IP address, no resolution will occur. If address is a hostname this - // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. - ResolverName string `protobuf:"bytes,5,opt,name=resolver_name,json=resolverName,proto3" json:"resolver_name,omitempty"` - // When binding to an IPv6 address above, this enables `IPv4 compatibility - // `_. Binding to ``::`` will - // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into - // IPv6 space as ``::FFFF:``. - Ipv4Compat bool `protobuf:"varint,6,opt,name=ipv4_compat,json=ipv4Compat,proto3" json:"ipv4_compat,omitempty"` -} - -func (x *SocketAddress) Reset() { - *x = SocketAddress{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SocketAddress) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SocketAddress) ProtoMessage() {} - -func (x *SocketAddress) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SocketAddress.ProtoReflect.Descriptor instead. -func (*SocketAddress) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_address_proto_rawDescGZIP(), []int{1} -} - -func (x *SocketAddress) GetProtocol() SocketAddress_Protocol { - if x != nil { - return x.Protocol - } - return SocketAddress_TCP -} - -func (x *SocketAddress) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (m *SocketAddress) GetPortSpecifier() isSocketAddress_PortSpecifier { - if m != nil { - return m.PortSpecifier - } - return nil -} - -func (x *SocketAddress) GetPortValue() uint32 { - if x, ok := x.GetPortSpecifier().(*SocketAddress_PortValue); ok { - return x.PortValue - } - return 0 -} - -func (x *SocketAddress) GetNamedPort() string { - if x, ok := x.GetPortSpecifier().(*SocketAddress_NamedPort); ok { - return x.NamedPort - } - return "" -} - -func (x *SocketAddress) GetResolverName() string { - if x != nil { - return x.ResolverName - } - return "" -} - -func (x *SocketAddress) GetIpv4Compat() bool { - if x != nil { - return x.Ipv4Compat - } - return false -} - -type isSocketAddress_PortSpecifier interface { - isSocketAddress_PortSpecifier() -} - -type SocketAddress_PortValue struct { - PortValue uint32 `protobuf:"varint,3,opt,name=port_value,json=portValue,proto3,oneof"` -} - -type SocketAddress_NamedPort struct { - // This is only valid if :ref:`resolver_name - // ` is specified below and the - // named resolver is capable of named port resolution. - NamedPort string `protobuf:"bytes,4,opt,name=named_port,json=namedPort,proto3,oneof"` -} - -func (*SocketAddress_PortValue) isSocketAddress_PortSpecifier() {} - -func (*SocketAddress_NamedPort) isSocketAddress_PortSpecifier() {} - -type TcpKeepalive struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Maximum number of keepalive probes to send without response before deciding - // the connection is dead. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 9.) - KeepaliveProbes *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=keepalive_probes,json=keepaliveProbes,proto3" json:"keepalive_probes,omitempty"` - // The number of seconds a connection needs to be idle before keep-alive probes - // start being sent. Default is to use the OS level configuration (unless - // overridden, Linux defaults to 7200s (i.e., 2 hours.) - KeepaliveTime *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=keepalive_time,json=keepaliveTime,proto3" json:"keepalive_time,omitempty"` - // The number of seconds between keep-alive probes. Default is to use the OS - // level configuration (unless overridden, Linux defaults to 75s.) - KeepaliveInterval *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=keepalive_interval,json=keepaliveInterval,proto3" json:"keepalive_interval,omitempty"` -} - -func (x *TcpKeepalive) Reset() { - *x = TcpKeepalive{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TcpKeepalive) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TcpKeepalive) ProtoMessage() {} - -func (x *TcpKeepalive) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TcpKeepalive.ProtoReflect.Descriptor instead. -func (*TcpKeepalive) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_address_proto_rawDescGZIP(), []int{2} -} - -func (x *TcpKeepalive) GetKeepaliveProbes() *wrappers.UInt32Value { - if x != nil { - return x.KeepaliveProbes - } - return nil -} - -func (x *TcpKeepalive) GetKeepaliveTime() *wrappers.UInt32Value { - if x != nil { - return x.KeepaliveTime - } - return nil -} - -func (x *TcpKeepalive) GetKeepaliveInterval() *wrappers.UInt32Value { - if x != nil { - return x.KeepaliveInterval - } - return nil -} - -type BindConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The address to bind to when creating a socket. - SourceAddress *SocketAddress `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"` - // Whether to set the *IP_FREEBIND* option when creating the socket. When this - // flag is set to true, allows the :ref:`source_address - // ` to be an IP address - // that is not configured on the system running Envoy. When this flag is set - // to false, the option *IP_FREEBIND* is disabled on the socket. When this - // flag is not set (default), the socket is not modified, i.e. the option is - // neither enabled nor disabled. - Freebind *wrappers.BoolValue `protobuf:"bytes,2,opt,name=freebind,proto3" json:"freebind,omitempty"` - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` -} - -func (x *BindConfig) Reset() { - *x = BindConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BindConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BindConfig) ProtoMessage() {} - -func (x *BindConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BindConfig.ProtoReflect.Descriptor instead. -func (*BindConfig) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_address_proto_rawDescGZIP(), []int{3} -} - -func (x *BindConfig) GetSourceAddress() *SocketAddress { - if x != nil { - return x.SourceAddress - } - return nil -} - -func (x *BindConfig) GetFreebind() *wrappers.BoolValue { - if x != nil { - return x.Freebind - } - return nil -} - -func (x *BindConfig) GetSocketOptions() []*SocketOption { - if x != nil { - return x.SocketOptions - } - return nil -} - -// Addresses specify either a logical or physical address and port, which are -// used to tell Envoy where to bind/listen, connect to upstream and find -// management servers. -type Address struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Address: - // *Address_SocketAddress - // *Address_Pipe - Address isAddress_Address `protobuf_oneof:"address"` -} - -func (x *Address) Reset() { - *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Address) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Address) ProtoMessage() {} - -func (x *Address) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Address.ProtoReflect.Descriptor instead. -func (*Address) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_address_proto_rawDescGZIP(), []int{4} -} - -func (m *Address) GetAddress() isAddress_Address { - if m != nil { - return m.Address - } - return nil -} - -func (x *Address) GetSocketAddress() *SocketAddress { - if x, ok := x.GetAddress().(*Address_SocketAddress); ok { - return x.SocketAddress - } - return nil -} - -func (x *Address) GetPipe() *Pipe { - if x, ok := x.GetAddress().(*Address_Pipe); ok { - return x.Pipe - } - return nil -} - -type isAddress_Address interface { - isAddress_Address() -} - -type Address_SocketAddress struct { - SocketAddress *SocketAddress `protobuf:"bytes,1,opt,name=socket_address,json=socketAddress,proto3,oneof"` -} - -type Address_Pipe struct { - Pipe *Pipe `protobuf:"bytes,2,opt,name=pipe,proto3,oneof"` -} - -func (*Address_SocketAddress) isAddress_Address() {} - -func (*Address_Pipe) isAddress_Address() {} - -// CidrRange specifies an IP Address and a prefix length to construct -// the subnet mask for a `CIDR `_ range. -type CidrRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. - AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` - // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. - PrefixLen *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` -} - -func (x *CidrRange) Reset() { - *x = CidrRange{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CidrRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CidrRange) ProtoMessage() {} - -func (x *CidrRange) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_address_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead. -func (*CidrRange) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_address_proto_rawDescGZIP(), []int{5} -} - -func (x *CidrRange) GetAddressPrefix() string { - if x != nil { - return x.AddressPrefix - } - return "" -} - -func (x *CidrRange) GetPrefixLen() *wrappers.UInt32Value { - if x != nil { - return x.PrefixLen - } - return nil -} - -var File_envoy_api_v2_core_address_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_address_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, - 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, - 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x41, 0x0a, 0x04, 0x50, 0x69, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x20, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0xff, 0x03, - 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0xcb, 0x02, 0x0a, 0x0d, 0x53, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x20, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x0a, - 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, 0x09, 0x70, - 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, - 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x70, 0x76, 0x34, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x22, - 0x1c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x54, - 0x43, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x01, 0x42, 0x15, 0x0a, - 0x0e, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, - 0x03, 0xf8, 0x42, 0x01, 0x22, 0xe9, 0x01, 0x0a, 0x0c, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, - 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, - 0x76, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x6b, - 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x12, 0x43, - 0x0a, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, - 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6b, - 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x22, 0xdf, 0x01, 0x0a, 0x0a, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x51, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x12, 0x36, 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, 0x46, 0x0a, 0x0e, 0x73, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x49, - 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x70, 0x69, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x69, 0x70, 0x65, - 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x70, 0x65, 0x42, 0x0e, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, - 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, - 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x8f, 0x01, - 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x42, 0x0c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, - 0x16, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_address_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_address_proto_rawDescData = file_envoy_api_v2_core_address_proto_rawDesc -) - -func file_envoy_api_v2_core_address_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_address_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_address_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_address_proto_rawDescData) - }) - return file_envoy_api_v2_core_address_proto_rawDescData -} - -var file_envoy_api_v2_core_address_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_api_v2_core_address_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_envoy_api_v2_core_address_proto_goTypes = []interface{}{ - (SocketAddress_Protocol)(0), // 0: envoy.api.v2.core.SocketAddress.Protocol - (*Pipe)(nil), // 1: envoy.api.v2.core.Pipe - (*SocketAddress)(nil), // 2: envoy.api.v2.core.SocketAddress - (*TcpKeepalive)(nil), // 3: envoy.api.v2.core.TcpKeepalive - (*BindConfig)(nil), // 4: envoy.api.v2.core.BindConfig - (*Address)(nil), // 5: envoy.api.v2.core.Address - (*CidrRange)(nil), // 6: envoy.api.v2.core.CidrRange - (*wrappers.UInt32Value)(nil), // 7: google.protobuf.UInt32Value - (*wrappers.BoolValue)(nil), // 8: google.protobuf.BoolValue - (*SocketOption)(nil), // 9: envoy.api.v2.core.SocketOption -} -var file_envoy_api_v2_core_address_proto_depIdxs = []int32{ - 0, // 0: envoy.api.v2.core.SocketAddress.protocol:type_name -> envoy.api.v2.core.SocketAddress.Protocol - 7, // 1: envoy.api.v2.core.TcpKeepalive.keepalive_probes:type_name -> google.protobuf.UInt32Value - 7, // 2: envoy.api.v2.core.TcpKeepalive.keepalive_time:type_name -> google.protobuf.UInt32Value - 7, // 3: envoy.api.v2.core.TcpKeepalive.keepalive_interval:type_name -> google.protobuf.UInt32Value - 2, // 4: envoy.api.v2.core.BindConfig.source_address:type_name -> envoy.api.v2.core.SocketAddress - 8, // 5: envoy.api.v2.core.BindConfig.freebind:type_name -> google.protobuf.BoolValue - 9, // 6: envoy.api.v2.core.BindConfig.socket_options:type_name -> envoy.api.v2.core.SocketOption - 2, // 7: envoy.api.v2.core.Address.socket_address:type_name -> envoy.api.v2.core.SocketAddress - 1, // 8: envoy.api.v2.core.Address.pipe:type_name -> envoy.api.v2.core.Pipe - 7, // 9: envoy.api.v2.core.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value - 10, // [10:10] is the sub-list for method output_type - 10, // [10:10] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_address_proto_init() } -func file_envoy_api_v2_core_address_proto_init() { - if File_envoy_api_v2_core_address_proto != nil { - return - } - file_envoy_api_v2_core_socket_option_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_address_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Pipe); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_address_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SocketAddress); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_address_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TcpKeepalive); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_address_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BindConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_address_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_address_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CidrRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_address_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*SocketAddress_PortValue)(nil), - (*SocketAddress_NamedPort)(nil), - } - file_envoy_api_v2_core_address_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*Address_SocketAddress)(nil), - (*Address_Pipe)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_address_proto_rawDesc, - NumEnums: 1, - NumMessages: 6, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_address_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_address_proto_depIdxs, - EnumInfos: file_envoy_api_v2_core_address_proto_enumTypes, - MessageInfos: file_envoy_api_v2_core_address_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_address_proto = out.File - file_envoy_api_v2_core_address_proto_rawDesc = nil - file_envoy_api_v2_core_address_proto_goTypes = nil - file_envoy_api_v2_core_address_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/address.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/address.pb.validate.go deleted file mode 100644 index 2b9930480e..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/address.pb.validate.go +++ /dev/null @@ -1,999 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/address.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on Pipe with the rules defined in the proto -// definition for this message. If any rules are violated, the first error -// encountered is returned, or nil if there are no violations. -func (m *Pipe) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Pipe with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in PipeMultiError, or nil if none found. -func (m *Pipe) ValidateAll() error { - return m.validate(true) -} - -func (m *Pipe) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetPath()) < 1 { - err := PipeValidationError{ - field: "Path", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if m.GetMode() > 511 { - err := PipeValidationError{ - field: "Mode", - reason: "value must be less than or equal to 511", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return PipeMultiError(errors) - } - - return nil -} - -// PipeMultiError is an error wrapping multiple validation errors returned by -// Pipe.ValidateAll() if the designated constraints aren't met. -type PipeMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m PipeMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m PipeMultiError) AllErrors() []error { return m } - -// PipeValidationError is the validation error returned by Pipe.Validate if the -// designated constraints aren't met. -type PipeValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PipeValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PipeValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PipeValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PipeValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PipeValidationError) ErrorName() string { return "PipeValidationError" } - -// Error satisfies the builtin error interface -func (e PipeValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPipe.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PipeValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PipeValidationError{} - -// Validate checks the field values on SocketAddress with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *SocketAddress) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on SocketAddress with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in SocketAddressMultiError, or -// nil if none found. -func (m *SocketAddress) ValidateAll() error { - return m.validate(true) -} - -func (m *SocketAddress) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if _, ok := SocketAddress_Protocol_name[int32(m.GetProtocol())]; !ok { - err := SocketAddressValidationError{ - field: "Protocol", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(m.GetAddress()) < 1 { - err := SocketAddressValidationError{ - field: "Address", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - // no validation rules for ResolverName - - // no validation rules for Ipv4Compat - - switch m.PortSpecifier.(type) { - - case *SocketAddress_PortValue: - - if m.GetPortValue() > 65535 { - err := SocketAddressValidationError{ - field: "PortValue", - reason: "value must be less than or equal to 65535", - } - if !all { - return err - } - errors = append(errors, err) - } - - case *SocketAddress_NamedPort: - // no validation rules for NamedPort - - default: - err := SocketAddressValidationError{ - field: "PortSpecifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return SocketAddressMultiError(errors) - } - - return nil -} - -// SocketAddressMultiError is an error wrapping multiple validation errors -// returned by SocketAddress.ValidateAll() if the designated constraints -// aren't met. -type SocketAddressMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SocketAddressMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SocketAddressMultiError) AllErrors() []error { return m } - -// SocketAddressValidationError is the validation error returned by -// SocketAddress.Validate if the designated constraints aren't met. -type SocketAddressValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SocketAddressValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SocketAddressValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SocketAddressValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SocketAddressValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SocketAddressValidationError) ErrorName() string { return "SocketAddressValidationError" } - -// Error satisfies the builtin error interface -func (e SocketAddressValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSocketAddress.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SocketAddressValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SocketAddressValidationError{} - -// Validate checks the field values on TcpKeepalive with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *TcpKeepalive) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on TcpKeepalive with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in TcpKeepaliveMultiError, or -// nil if none found. -func (m *TcpKeepalive) ValidateAll() error { - return m.validate(true) -} - -func (m *TcpKeepalive) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetKeepaliveProbes()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, TcpKeepaliveValidationError{ - field: "KeepaliveProbes", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, TcpKeepaliveValidationError{ - field: "KeepaliveProbes", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetKeepaliveProbes()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return TcpKeepaliveValidationError{ - field: "KeepaliveProbes", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetKeepaliveTime()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, TcpKeepaliveValidationError{ - field: "KeepaliveTime", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, TcpKeepaliveValidationError{ - field: "KeepaliveTime", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetKeepaliveTime()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return TcpKeepaliveValidationError{ - field: "KeepaliveTime", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetKeepaliveInterval()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, TcpKeepaliveValidationError{ - field: "KeepaliveInterval", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, TcpKeepaliveValidationError{ - field: "KeepaliveInterval", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetKeepaliveInterval()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return TcpKeepaliveValidationError{ - field: "KeepaliveInterval", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return TcpKeepaliveMultiError(errors) - } - - return nil -} - -// TcpKeepaliveMultiError is an error wrapping multiple validation errors -// returned by TcpKeepalive.ValidateAll() if the designated constraints aren't met. -type TcpKeepaliveMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m TcpKeepaliveMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m TcpKeepaliveMultiError) AllErrors() []error { return m } - -// TcpKeepaliveValidationError is the validation error returned by -// TcpKeepalive.Validate if the designated constraints aren't met. -type TcpKeepaliveValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e TcpKeepaliveValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e TcpKeepaliveValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e TcpKeepaliveValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e TcpKeepaliveValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e TcpKeepaliveValidationError) ErrorName() string { return "TcpKeepaliveValidationError" } - -// Error satisfies the builtin error interface -func (e TcpKeepaliveValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sTcpKeepalive.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = TcpKeepaliveValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = TcpKeepaliveValidationError{} - -// Validate checks the field values on BindConfig with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *BindConfig) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on BindConfig with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in BindConfigMultiError, or -// nil if none found. -func (m *BindConfig) ValidateAll() error { - return m.validate(true) -} - -func (m *BindConfig) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if m.GetSourceAddress() == nil { - err := BindConfigValidationError{ - field: "SourceAddress", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetSourceAddress()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, BindConfigValidationError{ - field: "SourceAddress", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, BindConfigValidationError{ - field: "SourceAddress", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSourceAddress()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return BindConfigValidationError{ - field: "SourceAddress", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetFreebind()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, BindConfigValidationError{ - field: "Freebind", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, BindConfigValidationError{ - field: "Freebind", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetFreebind()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return BindConfigValidationError{ - field: "Freebind", - reason: "embedded message failed validation", - cause: err, - } - } - } - - for idx, item := range m.GetSocketOptions() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, BindConfigValidationError{ - field: fmt.Sprintf("SocketOptions[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, BindConfigValidationError{ - field: fmt.Sprintf("SocketOptions[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return BindConfigValidationError{ - field: fmt.Sprintf("SocketOptions[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return BindConfigMultiError(errors) - } - - return nil -} - -// BindConfigMultiError is an error wrapping multiple validation errors -// returned by BindConfig.ValidateAll() if the designated constraints aren't met. -type BindConfigMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m BindConfigMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m BindConfigMultiError) AllErrors() []error { return m } - -// BindConfigValidationError is the validation error returned by -// BindConfig.Validate if the designated constraints aren't met. -type BindConfigValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e BindConfigValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e BindConfigValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e BindConfigValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e BindConfigValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e BindConfigValidationError) ErrorName() string { return "BindConfigValidationError" } - -// Error satisfies the builtin error interface -func (e BindConfigValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sBindConfig.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = BindConfigValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = BindConfigValidationError{} - -// Validate checks the field values on Address with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Address) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Address with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in AddressMultiError, or nil if none found. -func (m *Address) ValidateAll() error { - return m.validate(true) -} - -func (m *Address) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.Address.(type) { - - case *Address_SocketAddress: - - if all { - switch v := interface{}(m.GetSocketAddress()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, AddressValidationError{ - field: "SocketAddress", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, AddressValidationError{ - field: "SocketAddress", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSocketAddress()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return AddressValidationError{ - field: "SocketAddress", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *Address_Pipe: - - if all { - switch v := interface{}(m.GetPipe()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, AddressValidationError{ - field: "Pipe", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, AddressValidationError{ - field: "Pipe", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetPipe()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return AddressValidationError{ - field: "Pipe", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := AddressValidationError{ - field: "Address", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return AddressMultiError(errors) - } - - return nil -} - -// AddressMultiError is an error wrapping multiple validation errors returned -// by Address.ValidateAll() if the designated constraints aren't met. -type AddressMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m AddressMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m AddressMultiError) AllErrors() []error { return m } - -// AddressValidationError is the validation error returned by Address.Validate -// if the designated constraints aren't met. -type AddressValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e AddressValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e AddressValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e AddressValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e AddressValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e AddressValidationError) ErrorName() string { return "AddressValidationError" } - -// Error satisfies the builtin error interface -func (e AddressValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sAddress.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = AddressValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = AddressValidationError{} - -// Validate checks the field values on CidrRange with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *CidrRange) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on CidrRange with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in CidrRangeMultiError, or nil -// if none found. -func (m *CidrRange) ValidateAll() error { - return m.validate(true) -} - -func (m *CidrRange) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetAddressPrefix()) < 1 { - err := CidrRangeValidationError{ - field: "AddressPrefix", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if wrapper := m.GetPrefixLen(); wrapper != nil { - - if wrapper.GetValue() > 128 { - err := CidrRangeValidationError{ - field: "PrefixLen", - reason: "value must be less than or equal to 128", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - if len(errors) > 0 { - return CidrRangeMultiError(errors) - } - - return nil -} - -// CidrRangeMultiError is an error wrapping multiple validation errors returned -// by CidrRange.ValidateAll() if the designated constraints aren't met. -type CidrRangeMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m CidrRangeMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m CidrRangeMultiError) AllErrors() []error { return m } - -// CidrRangeValidationError is the validation error returned by -// CidrRange.Validate if the designated constraints aren't met. -type CidrRangeValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e CidrRangeValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e CidrRangeValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e CidrRangeValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e CidrRangeValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" } - -// Error satisfies the builtin error interface -func (e CidrRangeValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sCidrRange.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = CidrRangeValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = CidrRangeValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/backoff.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/backoff.pb.go deleted file mode 100644 index 5d58463796..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/backoff.pb.go +++ /dev/null @@ -1,191 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/backoff.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Configuration defining a jittered exponential back off strategy. -type BackoffStrategy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The base interval to be used for the next back off computation. It should - // be greater than zero and less than or equal to :ref:`max_interval - // `. - BaseInterval *duration.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` - // Specifies the maximum interval between retries. This parameter is optional, - // but must be greater than or equal to the :ref:`base_interval - // ` if set. The default - // is 10 times the :ref:`base_interval - // `. - MaxInterval *duration.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` -} - -func (x *BackoffStrategy) Reset() { - *x = BackoffStrategy{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_backoff_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BackoffStrategy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BackoffStrategy) ProtoMessage() {} - -func (x *BackoffStrategy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_backoff_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BackoffStrategy.ProtoReflect.Descriptor instead. -func (*BackoffStrategy) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_backoff_proto_rawDescGZIP(), []int{0} -} - -func (x *BackoffStrategy) GetBaseInterval() *duration.Duration { - if x != nil { - return x.BaseInterval - } - return nil -} - -func (x *BackoffStrategy) GetMaxInterval() *duration.Duration { - if x != nil { - return x.MaxInterval - } - return nil -} - -var File_envoy_api_v2_core_backoff_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_backoff_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x01, 0x0a, - 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, - 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, - 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x8f, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0c, 0x42, 0x61, - 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, - 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_backoff_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_backoff_proto_rawDescData = file_envoy_api_v2_core_backoff_proto_rawDesc -) - -func file_envoy_api_v2_core_backoff_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_backoff_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_backoff_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_backoff_proto_rawDescData) - }) - return file_envoy_api_v2_core_backoff_proto_rawDescData -} - -var file_envoy_api_v2_core_backoff_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_api_v2_core_backoff_proto_goTypes = []interface{}{ - (*BackoffStrategy)(nil), // 0: envoy.api.v2.core.BackoffStrategy - (*duration.Duration)(nil), // 1: google.protobuf.Duration -} -var file_envoy_api_v2_core_backoff_proto_depIdxs = []int32{ - 1, // 0: envoy.api.v2.core.BackoffStrategy.base_interval:type_name -> google.protobuf.Duration - 1, // 1: envoy.api.v2.core.BackoffStrategy.max_interval:type_name -> google.protobuf.Duration - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_backoff_proto_init() } -func file_envoy_api_v2_core_backoff_proto_init() { - if File_envoy_api_v2_core_backoff_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_backoff_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackoffStrategy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_backoff_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_backoff_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_backoff_proto_depIdxs, - MessageInfos: file_envoy_api_v2_core_backoff_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_backoff_proto = out.File - file_envoy_api_v2_core_backoff_proto_rawDesc = nil - file_envoy_api_v2_core_backoff_proto_goTypes = nil - file_envoy_api_v2_core_backoff_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/backoff.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/backoff.pb.validate.go deleted file mode 100644 index 3a4f727f3d..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/backoff.pb.validate.go +++ /dev/null @@ -1,207 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/backoff.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on BackoffStrategy with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *BackoffStrategy) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on BackoffStrategy with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// BackoffStrategyMultiError, or nil if none found. -func (m *BackoffStrategy) ValidateAll() error { - return m.validate(true) -} - -func (m *BackoffStrategy) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if m.GetBaseInterval() == nil { - err := BackoffStrategyValidationError{ - field: "BaseInterval", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if d := m.GetBaseInterval(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = BackoffStrategyValidationError{ - field: "BaseInterval", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) - - if dur < gte { - err := BackoffStrategyValidationError{ - field: "BaseInterval", - reason: "value must be greater than or equal to 1ms", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if d := m.GetMaxInterval(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = BackoffStrategyValidationError{ - field: "MaxInterval", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := BackoffStrategyValidationError{ - field: "MaxInterval", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if len(errors) > 0 { - return BackoffStrategyMultiError(errors) - } - - return nil -} - -// BackoffStrategyMultiError is an error wrapping multiple validation errors -// returned by BackoffStrategy.ValidateAll() if the designated constraints -// aren't met. -type BackoffStrategyMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m BackoffStrategyMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m BackoffStrategyMultiError) AllErrors() []error { return m } - -// BackoffStrategyValidationError is the validation error returned by -// BackoffStrategy.Validate if the designated constraints aren't met. -type BackoffStrategyValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e BackoffStrategyValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e BackoffStrategyValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e BackoffStrategyValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e BackoffStrategyValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e BackoffStrategyValidationError) ErrorName() string { return "BackoffStrategyValidationError" } - -// Error satisfies the builtin error interface -func (e BackoffStrategyValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sBackoffStrategy.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = BackoffStrategyValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = BackoffStrategyValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/base.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/base.pb.go deleted file mode 100644 index ae88a6e889..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/base.pb.go +++ /dev/null @@ -1,2154 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/base.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _type "github.com/envoyproxy/go-control-plane/envoy/type" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" - _struct "github.com/golang/protobuf/ptypes/struct" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Envoy supports :ref:`upstream priority routing -// ` both at the route and the virtual -// cluster level. The current priority implementation uses different connection -// pool and circuit breaking settings for each priority level. This means that -// even for HTTP/2 requests, two physical connections will be used to an -// upstream host. In the future Envoy will likely support true HTTP/2 priority -// over a single upstream connection. -type RoutingPriority int32 - -const ( - RoutingPriority_DEFAULT RoutingPriority = 0 - RoutingPriority_HIGH RoutingPriority = 1 -) - -// Enum value maps for RoutingPriority. -var ( - RoutingPriority_name = map[int32]string{ - 0: "DEFAULT", - 1: "HIGH", - } - RoutingPriority_value = map[string]int32{ - "DEFAULT": 0, - "HIGH": 1, - } -) - -func (x RoutingPriority) Enum() *RoutingPriority { - p := new(RoutingPriority) - *p = x - return p -} - -func (x RoutingPriority) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (RoutingPriority) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_base_proto_enumTypes[0].Descriptor() -} - -func (RoutingPriority) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_base_proto_enumTypes[0] -} - -func (x RoutingPriority) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use RoutingPriority.Descriptor instead. -func (RoutingPriority) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{0} -} - -// HTTP request method. -type RequestMethod int32 - -const ( - RequestMethod_METHOD_UNSPECIFIED RequestMethod = 0 - RequestMethod_GET RequestMethod = 1 - RequestMethod_HEAD RequestMethod = 2 - RequestMethod_POST RequestMethod = 3 - RequestMethod_PUT RequestMethod = 4 - RequestMethod_DELETE RequestMethod = 5 - RequestMethod_CONNECT RequestMethod = 6 - RequestMethod_OPTIONS RequestMethod = 7 - RequestMethod_TRACE RequestMethod = 8 - RequestMethod_PATCH RequestMethod = 9 -) - -// Enum value maps for RequestMethod. -var ( - RequestMethod_name = map[int32]string{ - 0: "METHOD_UNSPECIFIED", - 1: "GET", - 2: "HEAD", - 3: "POST", - 4: "PUT", - 5: "DELETE", - 6: "CONNECT", - 7: "OPTIONS", - 8: "TRACE", - 9: "PATCH", - } - RequestMethod_value = map[string]int32{ - "METHOD_UNSPECIFIED": 0, - "GET": 1, - "HEAD": 2, - "POST": 3, - "PUT": 4, - "DELETE": 5, - "CONNECT": 6, - "OPTIONS": 7, - "TRACE": 8, - "PATCH": 9, - } -) - -func (x RequestMethod) Enum() *RequestMethod { - p := new(RequestMethod) - *p = x - return p -} - -func (x RequestMethod) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (RequestMethod) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_base_proto_enumTypes[1].Descriptor() -} - -func (RequestMethod) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_base_proto_enumTypes[1] -} - -func (x RequestMethod) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use RequestMethod.Descriptor instead. -func (RequestMethod) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{1} -} - -// Identifies the direction of the traffic relative to the local Envoy. -type TrafficDirection int32 - -const ( - // Default option is unspecified. - TrafficDirection_UNSPECIFIED TrafficDirection = 0 - // The transport is used for incoming traffic. - TrafficDirection_INBOUND TrafficDirection = 1 - // The transport is used for outgoing traffic. - TrafficDirection_OUTBOUND TrafficDirection = 2 -) - -// Enum value maps for TrafficDirection. -var ( - TrafficDirection_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "INBOUND", - 2: "OUTBOUND", - } - TrafficDirection_value = map[string]int32{ - "UNSPECIFIED": 0, - "INBOUND": 1, - "OUTBOUND": 2, - } -) - -func (x TrafficDirection) Enum() *TrafficDirection { - p := new(TrafficDirection) - *p = x - return p -} - -func (x TrafficDirection) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_base_proto_enumTypes[2].Descriptor() -} - -func (TrafficDirection) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_base_proto_enumTypes[2] -} - -func (x TrafficDirection) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use TrafficDirection.Descriptor instead. -func (TrafficDirection) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{2} -} - -// Identifies location of where either Envoy runs or where upstream hosts run. -type Locality struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Region this :ref:`zone ` belongs to. - Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"` - // Defines the local service zone where Envoy is running. Though optional, it - // should be set if discovery service routing is used and the discovery - // service exposes :ref:`zone data `, - // either in this message or via :option:`--service-zone`. The meaning of zone - // is context dependent, e.g. `Availability Zone (AZ) - // `_ - // on AWS, `Zone `_ on - // GCP, etc. - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - // When used for locality of upstream hosts, this field further splits zone - // into smaller chunks of sub-zones so they can be load balanced - // independently. - SubZone string `protobuf:"bytes,3,opt,name=sub_zone,json=subZone,proto3" json:"sub_zone,omitempty"` -} - -func (x *Locality) Reset() { - *x = Locality{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Locality) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Locality) ProtoMessage() {} - -func (x *Locality) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Locality.ProtoReflect.Descriptor instead. -func (*Locality) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{0} -} - -func (x *Locality) GetRegion() string { - if x != nil { - return x.Region - } - return "" -} - -func (x *Locality) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *Locality) GetSubZone() string { - if x != nil { - return x.SubZone - } - return "" -} - -// BuildVersion combines SemVer version of extension with free-form build information -// (i.e. 'alpha', 'private-build') as a set of strings. -type BuildVersion struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // SemVer version of extension. - Version *_type.SemanticVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // Free-form build information. - // Envoy defines several well known keys in the source/common/version/version.h file - Metadata *_struct.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (x *BuildVersion) Reset() { - *x = BuildVersion{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BuildVersion) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BuildVersion) ProtoMessage() {} - -func (x *BuildVersion) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BuildVersion.ProtoReflect.Descriptor instead. -func (*BuildVersion) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{1} -} - -func (x *BuildVersion) GetVersion() *_type.SemanticVersion { - if x != nil { - return x.Version - } - return nil -} - -func (x *BuildVersion) GetMetadata() *_struct.Struct { - if x != nil { - return x.Metadata - } - return nil -} - -// Version and identification for an Envoy extension. -// [#next-free-field: 6] -type Extension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This is the name of the Envoy filter as specified in the Envoy - // configuration, e.g. envoy.filters.http.router, com.acme.widget. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Category of the extension. - // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" - // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from - // acme.com vendor. - // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] - Category string `protobuf:"bytes,2,opt,name=category,proto3" json:"category,omitempty"` - // [#not-implemented-hide:] Type descriptor of extension configuration proto. - // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] - // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] - TypeDescriptor string `protobuf:"bytes,3,opt,name=type_descriptor,json=typeDescriptor,proto3" json:"type_descriptor,omitempty"` - // The version is a property of the extension and maintained independently - // of other extensions and the Envoy API. - // This field is not set when extension did not provide version information. - Version *BuildVersion `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - // Indicates that the extension is present but was disabled via dynamic configuration. - Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (x *Extension) Reset() { - *x = Extension{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Extension) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Extension) ProtoMessage() {} - -func (x *Extension) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Extension.ProtoReflect.Descriptor instead. -func (*Extension) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{2} -} - -func (x *Extension) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Extension) GetCategory() string { - if x != nil { - return x.Category - } - return "" -} - -func (x *Extension) GetTypeDescriptor() string { - if x != nil { - return x.TypeDescriptor - } - return "" -} - -func (x *Extension) GetVersion() *BuildVersion { - if x != nil { - return x.Version - } - return nil -} - -func (x *Extension) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -// Identifies a specific Envoy instance. The node identifier is presented to the -// management server, which may use this identifier to distinguish per Envoy -// configuration for serving. -// [#next-free-field: 12] -type Node struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An opaque node identifier for the Envoy node. This also provides the local - // service node name. It should be set if any of the following features are - // used: :ref:`statsd `, :ref:`CDS - // `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-node`. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Defines the local service cluster name where Envoy is running. Though - // optional, it should be set if any of the following features are used: - // :ref:`statsd `, :ref:`health check cluster - // verification - // `, - // :ref:`runtime override directory `, - // :ref:`user agent addition - // `, - // :ref:`HTTP global rate limiting `, - // :ref:`CDS `, and :ref:`HTTP tracing - // `, either in this message or via - // :option:`--service-cluster`. - Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` - // Opaque metadata extending the node identifier. Envoy will pass this - // directly to the management server. - Metadata *_struct.Struct `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` - // Locality specifying where the Envoy instance is running. - Locality *Locality `protobuf:"bytes,4,opt,name=locality,proto3" json:"locality,omitempty"` - // This is motivated by informing a management server during canary which - // version of Envoy is being tested in a heterogeneous fleet. This will be set - // by Envoy in management server RPCs. - // This field is deprecated in favor of the user_agent_name and user_agent_version values. - // - // Deprecated: Do not use. - BuildVersion string `protobuf:"bytes,5,opt,name=build_version,json=buildVersion,proto3" json:"build_version,omitempty"` - // Free-form string that identifies the entity requesting config. - // E.g. "envoy" or "grpc" - UserAgentName string `protobuf:"bytes,6,opt,name=user_agent_name,json=userAgentName,proto3" json:"user_agent_name,omitempty"` - // Types that are assignable to UserAgentVersionType: - // *Node_UserAgentVersion - // *Node_UserAgentBuildVersion - UserAgentVersionType isNode_UserAgentVersionType `protobuf_oneof:"user_agent_version_type"` - // List of extensions and their versions supported by the node. - Extensions []*Extension `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty"` - // Client feature support list. These are well known features described - // in the Envoy API repository for a given major version of an API. Client features - // use reverse DNS naming scheme, for example `com.acme.feature`. - // See :ref:`the list of features ` that xDS client may - // support. - ClientFeatures []string `protobuf:"bytes,10,rep,name=client_features,json=clientFeatures,proto3" json:"client_features,omitempty"` - // Known listening ports on the node as a generic hint to the management server - // for filtering :ref:`listeners ` to be returned. For example, - // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. - ListeningAddresses []*Address `protobuf:"bytes,11,rep,name=listening_addresses,json=listeningAddresses,proto3" json:"listening_addresses,omitempty"` -} - -func (x *Node) Reset() { - *x = Node{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Node) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Node) ProtoMessage() {} - -func (x *Node) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Node.ProtoReflect.Descriptor instead. -func (*Node) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{3} -} - -func (x *Node) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Node) GetCluster() string { - if x != nil { - return x.Cluster - } - return "" -} - -func (x *Node) GetMetadata() *_struct.Struct { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *Node) GetLocality() *Locality { - if x != nil { - return x.Locality - } - return nil -} - -// Deprecated: Do not use. -func (x *Node) GetBuildVersion() string { - if x != nil { - return x.BuildVersion - } - return "" -} - -func (x *Node) GetUserAgentName() string { - if x != nil { - return x.UserAgentName - } - return "" -} - -func (m *Node) GetUserAgentVersionType() isNode_UserAgentVersionType { - if m != nil { - return m.UserAgentVersionType - } - return nil -} - -func (x *Node) GetUserAgentVersion() string { - if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentVersion); ok { - return x.UserAgentVersion - } - return "" -} - -func (x *Node) GetUserAgentBuildVersion() *BuildVersion { - if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentBuildVersion); ok { - return x.UserAgentBuildVersion - } - return nil -} - -func (x *Node) GetExtensions() []*Extension { - if x != nil { - return x.Extensions - } - return nil -} - -func (x *Node) GetClientFeatures() []string { - if x != nil { - return x.ClientFeatures - } - return nil -} - -func (x *Node) GetListeningAddresses() []*Address { - if x != nil { - return x.ListeningAddresses - } - return nil -} - -type isNode_UserAgentVersionType interface { - isNode_UserAgentVersionType() -} - -type Node_UserAgentVersion struct { - // Free-form string that identifies the version of the entity requesting config. - // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" - UserAgentVersion string `protobuf:"bytes,7,opt,name=user_agent_version,json=userAgentVersion,proto3,oneof"` -} - -type Node_UserAgentBuildVersion struct { - // Structured version of the entity requesting config. - UserAgentBuildVersion *BuildVersion `protobuf:"bytes,8,opt,name=user_agent_build_version,json=userAgentBuildVersion,proto3,oneof"` -} - -func (*Node_UserAgentVersion) isNode_UserAgentVersionType() {} - -func (*Node_UserAgentBuildVersion) isNode_UserAgentVersionType() {} - -// Metadata provides additional inputs to filters based on matched listeners, -// filter chains, routes and endpoints. It is structured as a map, usually from -// filter name (in reverse DNS format) to metadata specific to the filter. Metadata -// key-values for a filter are merged as connection and request handling occurs, -// with later values for the same key overriding earlier values. -// -// An example use of metadata is providing additional values to -// http_connection_manager in the envoy.http_connection_manager.access_log -// namespace. -// -// Another example use of metadata is to per service config info in cluster metadata, which may get -// consumed by multiple filters. -// -// For load balancing, Metadata provides a means to subset cluster endpoints. -// Endpoints have a Metadata object associated and routes contain a Metadata -// object to match against. There are some well defined metadata used today for -// this purpose: -// -// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an -// endpoint and is also used during header processing -// (x-envoy-upstream-canary) and for stats purposes. -// [#next-major-version: move to type/metadata/v2] -type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* - // namespace is reserved for Envoy's built-in filters. - FilterMetadata map[string]*_struct.Struct `protobuf:"bytes,1,rep,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Metadata) Reset() { - *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Metadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metadata) ProtoMessage() {} - -func (x *Metadata) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. -func (*Metadata) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{4} -} - -func (x *Metadata) GetFilterMetadata() map[string]*_struct.Struct { - if x != nil { - return x.FilterMetadata - } - return nil -} - -// Runtime derived uint32 with a default when not specified. -type RuntimeUInt32 struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Default value if runtime value is not available. - DefaultValue uint32 `protobuf:"varint,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` - // Runtime key to get value for comparison. This value is used if defined. - RuntimeKey string `protobuf:"bytes,3,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` -} - -func (x *RuntimeUInt32) Reset() { - *x = RuntimeUInt32{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RuntimeUInt32) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RuntimeUInt32) ProtoMessage() {} - -func (x *RuntimeUInt32) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RuntimeUInt32.ProtoReflect.Descriptor instead. -func (*RuntimeUInt32) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{5} -} - -func (x *RuntimeUInt32) GetDefaultValue() uint32 { - if x != nil { - return x.DefaultValue - } - return 0 -} - -func (x *RuntimeUInt32) GetRuntimeKey() string { - if x != nil { - return x.RuntimeKey - } - return "" -} - -// Runtime derived double with a default when not specified. -type RuntimeDouble struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Default value if runtime value is not available. - DefaultValue float64 `protobuf:"fixed64,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` - // Runtime key to get value for comparison. This value is used if defined. - RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` -} - -func (x *RuntimeDouble) Reset() { - *x = RuntimeDouble{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RuntimeDouble) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RuntimeDouble) ProtoMessage() {} - -func (x *RuntimeDouble) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RuntimeDouble.ProtoReflect.Descriptor instead. -func (*RuntimeDouble) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{6} -} - -func (x *RuntimeDouble) GetDefaultValue() float64 { - if x != nil { - return x.DefaultValue - } - return 0 -} - -func (x *RuntimeDouble) GetRuntimeKey() string { - if x != nil { - return x.RuntimeKey - } - return "" -} - -// Runtime derived bool with a default when not specified. -type RuntimeFeatureFlag struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Default value if runtime value is not available. - DefaultValue *wrappers.BoolValue `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` - // Runtime key to get value for comparison. This value is used if defined. The boolean value must - // be represented via its - // `canonical JSON encoding `_. - RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` -} - -func (x *RuntimeFeatureFlag) Reset() { - *x = RuntimeFeatureFlag{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RuntimeFeatureFlag) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RuntimeFeatureFlag) ProtoMessage() {} - -func (x *RuntimeFeatureFlag) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RuntimeFeatureFlag.ProtoReflect.Descriptor instead. -func (*RuntimeFeatureFlag) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{7} -} - -func (x *RuntimeFeatureFlag) GetDefaultValue() *wrappers.BoolValue { - if x != nil { - return x.DefaultValue - } - return nil -} - -func (x *RuntimeFeatureFlag) GetRuntimeKey() string { - if x != nil { - return x.RuntimeKey - } - return "" -} - -// Header name/value pair. -type HeaderValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Header name. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Header value. - // - // The same :ref:`format specifier ` as used for - // :ref:`HTTP access logging ` applies here, however - // unknown header values are replaced with the empty string instead of `-`. - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *HeaderValue) Reset() { - *x = HeaderValue{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HeaderValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeaderValue) ProtoMessage() {} - -func (x *HeaderValue) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead. -func (*HeaderValue) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{8} -} - -func (x *HeaderValue) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *HeaderValue) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -// Header name/value pair plus option to control append behavior. -type HeaderValueOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Header name/value pair that this option applies to. - Header *HeaderValue `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // Should the value be appended? If true (default), the value is appended to - // existing values. - Append *wrappers.BoolValue `protobuf:"bytes,2,opt,name=append,proto3" json:"append,omitempty"` -} - -func (x *HeaderValueOption) Reset() { - *x = HeaderValueOption{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HeaderValueOption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeaderValueOption) ProtoMessage() {} - -func (x *HeaderValueOption) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeaderValueOption.ProtoReflect.Descriptor instead. -func (*HeaderValueOption) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{9} -} - -func (x *HeaderValueOption) GetHeader() *HeaderValue { - if x != nil { - return x.Header - } - return nil -} - -func (x *HeaderValueOption) GetAppend() *wrappers.BoolValue { - if x != nil { - return x.Append - } - return nil -} - -// Wrapper for a set of headers. -type HeaderMap struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Headers []*HeaderValue `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` -} - -func (x *HeaderMap) Reset() { - *x = HeaderMap{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HeaderMap) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeaderMap) ProtoMessage() {} - -func (x *HeaderMap) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeaderMap.ProtoReflect.Descriptor instead. -func (*HeaderMap) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{10} -} - -func (x *HeaderMap) GetHeaders() []*HeaderValue { - if x != nil { - return x.Headers - } - return nil -} - -// Data source consisting of either a file or an inline value. -type DataSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Specifier: - // *DataSource_Filename - // *DataSource_InlineBytes - // *DataSource_InlineString - Specifier isDataSource_Specifier `protobuf_oneof:"specifier"` -} - -func (x *DataSource) Reset() { - *x = DataSource{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DataSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DataSource) ProtoMessage() {} - -func (x *DataSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DataSource.ProtoReflect.Descriptor instead. -func (*DataSource) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{11} -} - -func (m *DataSource) GetSpecifier() isDataSource_Specifier { - if m != nil { - return m.Specifier - } - return nil -} - -func (x *DataSource) GetFilename() string { - if x, ok := x.GetSpecifier().(*DataSource_Filename); ok { - return x.Filename - } - return "" -} - -func (x *DataSource) GetInlineBytes() []byte { - if x, ok := x.GetSpecifier().(*DataSource_InlineBytes); ok { - return x.InlineBytes - } - return nil -} - -func (x *DataSource) GetInlineString() string { - if x, ok := x.GetSpecifier().(*DataSource_InlineString); ok { - return x.InlineString - } - return "" -} - -type isDataSource_Specifier interface { - isDataSource_Specifier() -} - -type DataSource_Filename struct { - // Local filesystem data source. - Filename string `protobuf:"bytes,1,opt,name=filename,proto3,oneof"` -} - -type DataSource_InlineBytes struct { - // Bytes inlined in the configuration. - InlineBytes []byte `protobuf:"bytes,2,opt,name=inline_bytes,json=inlineBytes,proto3,oneof"` -} - -type DataSource_InlineString struct { - // String inlined in the configuration. - InlineString string `protobuf:"bytes,3,opt,name=inline_string,json=inlineString,proto3,oneof"` -} - -func (*DataSource_Filename) isDataSource_Specifier() {} - -func (*DataSource_InlineBytes) isDataSource_Specifier() {} - -func (*DataSource_InlineString) isDataSource_Specifier() {} - -// The message specifies the retry policy of remote data source when fetching fails. -type RetryPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies parameters that control :ref:`retry backoff strategy `. - // This parameter is optional, in which case the default base interval is 1000 milliseconds. The - // default maximum interval is 10 times the base interval. - RetryBackOff *BackoffStrategy `protobuf:"bytes,1,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"` - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. - NumRetries *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` -} - -func (x *RetryPolicy) Reset() { - *x = RetryPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RetryPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RetryPolicy) ProtoMessage() {} - -func (x *RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. -func (*RetryPolicy) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{12} -} - -func (x *RetryPolicy) GetRetryBackOff() *BackoffStrategy { - if x != nil { - return x.RetryBackOff - } - return nil -} - -func (x *RetryPolicy) GetNumRetries() *wrappers.UInt32Value { - if x != nil { - return x.NumRetries - } - return nil -} - -// The message specifies how to fetch data from remote and how to verify it. -type RemoteDataSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The HTTP URI to fetch the remote data. - HttpUri *HttpUri `protobuf:"bytes,1,opt,name=http_uri,json=httpUri,proto3" json:"http_uri,omitempty"` - // SHA256 string for verifying data. - Sha256 string `protobuf:"bytes,2,opt,name=sha256,proto3" json:"sha256,omitempty"` - // Retry policy for fetching remote data. - RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` -} - -func (x *RemoteDataSource) Reset() { - *x = RemoteDataSource{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoteDataSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoteDataSource) ProtoMessage() {} - -func (x *RemoteDataSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoteDataSource.ProtoReflect.Descriptor instead. -func (*RemoteDataSource) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{13} -} - -func (x *RemoteDataSource) GetHttpUri() *HttpUri { - if x != nil { - return x.HttpUri - } - return nil -} - -func (x *RemoteDataSource) GetSha256() string { - if x != nil { - return x.Sha256 - } - return "" -} - -func (x *RemoteDataSource) GetRetryPolicy() *RetryPolicy { - if x != nil { - return x.RetryPolicy - } - return nil -} - -// Async data source which support async data fetch. -type AsyncDataSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Specifier: - // *AsyncDataSource_Local - // *AsyncDataSource_Remote - Specifier isAsyncDataSource_Specifier `protobuf_oneof:"specifier"` -} - -func (x *AsyncDataSource) Reset() { - *x = AsyncDataSource{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AsyncDataSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AsyncDataSource) ProtoMessage() {} - -func (x *AsyncDataSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AsyncDataSource.ProtoReflect.Descriptor instead. -func (*AsyncDataSource) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{14} -} - -func (m *AsyncDataSource) GetSpecifier() isAsyncDataSource_Specifier { - if m != nil { - return m.Specifier - } - return nil -} - -func (x *AsyncDataSource) GetLocal() *DataSource { - if x, ok := x.GetSpecifier().(*AsyncDataSource_Local); ok { - return x.Local - } - return nil -} - -func (x *AsyncDataSource) GetRemote() *RemoteDataSource { - if x, ok := x.GetSpecifier().(*AsyncDataSource_Remote); ok { - return x.Remote - } - return nil -} - -type isAsyncDataSource_Specifier interface { - isAsyncDataSource_Specifier() -} - -type AsyncDataSource_Local struct { - // Local async data source. - Local *DataSource `protobuf:"bytes,1,opt,name=local,proto3,oneof"` -} - -type AsyncDataSource_Remote struct { - // Remote async data source. - Remote *RemoteDataSource `protobuf:"bytes,2,opt,name=remote,proto3,oneof"` -} - -func (*AsyncDataSource_Local) isAsyncDataSource_Specifier() {} - -func (*AsyncDataSource_Remote) isAsyncDataSource_Specifier() {} - -// Configuration for transport socket in :ref:`listeners ` and -// :ref:`clusters `. If the configuration is -// empty, a default transport socket implementation and configuration will be -// chosen based on the platform and existence of tls_context. -type TransportSocket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the transport socket to instantiate. The name must match a supported transport - // socket implementation. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Implementation specific configuration which depends on the implementation being instantiated. - // See the supported transport socket implementations for further documentation. - // - // Types that are assignable to ConfigType: - // *TransportSocket_Config - // *TransportSocket_TypedConfig - ConfigType isTransportSocket_ConfigType `protobuf_oneof:"config_type"` -} - -func (x *TransportSocket) Reset() { - *x = TransportSocket{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TransportSocket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TransportSocket) ProtoMessage() {} - -func (x *TransportSocket) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TransportSocket.ProtoReflect.Descriptor instead. -func (*TransportSocket) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{15} -} - -func (x *TransportSocket) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (m *TransportSocket) GetConfigType() isTransportSocket_ConfigType { - if m != nil { - return m.ConfigType - } - return nil -} - -// Deprecated: Do not use. -func (x *TransportSocket) GetConfig() *_struct.Struct { - if x, ok := x.GetConfigType().(*TransportSocket_Config); ok { - return x.Config - } - return nil -} - -func (x *TransportSocket) GetTypedConfig() *any.Any { - if x, ok := x.GetConfigType().(*TransportSocket_TypedConfig); ok { - return x.TypedConfig - } - return nil -} - -type isTransportSocket_ConfigType interface { - isTransportSocket_ConfigType() -} - -type TransportSocket_Config struct { - // Deprecated: Do not use. - Config *_struct.Struct `protobuf:"bytes,2,opt,name=config,proto3,oneof"` -} - -type TransportSocket_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` -} - -func (*TransportSocket_Config) isTransportSocket_ConfigType() {} - -func (*TransportSocket_TypedConfig) isTransportSocket_ConfigType() {} - -// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not -// specified via a runtime key. -// -// .. note:: -// -// Parsing of the runtime key's data is implemented such that it may be represented as a -// :ref:`FractionalPercent ` proto represented as JSON/YAML -// and may also be represented as an integer with the assumption that the value is an integral -// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse -// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. -type RuntimeFractionalPercent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Default value if the runtime value's for the numerator/denominator keys are not available. - DefaultValue *_type.FractionalPercent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` - // Runtime key for a YAML representation of a FractionalPercent. - RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` -} - -func (x *RuntimeFractionalPercent) Reset() { - *x = RuntimeFractionalPercent{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RuntimeFractionalPercent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RuntimeFractionalPercent) ProtoMessage() {} - -func (x *RuntimeFractionalPercent) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RuntimeFractionalPercent.ProtoReflect.Descriptor instead. -func (*RuntimeFractionalPercent) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{16} -} - -func (x *RuntimeFractionalPercent) GetDefaultValue() *_type.FractionalPercent { - if x != nil { - return x.DefaultValue - } - return nil -} - -func (x *RuntimeFractionalPercent) GetRuntimeKey() string { - if x != nil { - return x.RuntimeKey - } - return "" -} - -// Identifies a specific ControlPlane instance that Envoy is connected to. -type ControlPlane struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An opaque control plane identifier that uniquely identifies an instance - // of control plane. This can be used to identify which control plane instance, - // the Envoy is connected to. - Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` -} - -func (x *ControlPlane) Reset() { - *x = ControlPlane{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ControlPlane) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ControlPlane) ProtoMessage() {} - -func (x *ControlPlane) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_base_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ControlPlane.ProtoReflect.Descriptor instead. -func (*ControlPlane) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_base_proto_rawDescGZIP(), []int{17} -} - -func (x *ControlPlane) GetIdentifier() string { - if x != nil { - return x.Identifier - } - return "" -} - -var File_envoy_api_v2_core_base_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_base_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, - 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x73, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, - 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, - 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x51, 0x0a, 0x08, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, - 0x6e, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0x7a, 0x0a, - 0x0c, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xbb, 0x01, 0x0a, 0x09, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, - 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, - 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x74, 0x79, 0x70, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x12, 0x39, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xca, 0x04, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x37, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x72, - 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x18, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x15, - 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x13, - 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x12, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x22, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x58, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5a, 0x0a, 0x13, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5e, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, - 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x5e, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, - 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x49, - 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x4b, 0x65, 0x79, 0x22, 0x59, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x23, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x20, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc0, 0x01, 0x01, 0xc8, - 0x01, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, 0x80, 0x80, - 0x01, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x89, - 0x01, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x22, 0x45, 0x0a, 0x09, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x12, 0x38, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x22, 0xa3, 0x01, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x25, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x48, 0x00, 0x52, 0x08, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, - 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x7a, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x20, 0x01, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x48, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, - 0x65, 0x67, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, - 0x66, 0x12, 0x3d, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, - 0x22, 0xb7, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, - 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, - 0x55, 0x72, 0x69, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x68, - 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, - 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x12, 0x41, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x99, 0x01, 0x0a, 0x0f, 0x41, - 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x35, - 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x05, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x3d, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xaf, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, - 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, - 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, - 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x89, 0x01, 0x0a, 0x18, 0x52, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x4c, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x4b, 0x65, 0x79, 0x22, 0x2e, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, - 0x6c, 0x61, 0x6e, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x2a, 0x28, 0x0a, 0x0f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, - 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, - 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, 0x48, 0x10, 0x01, 0x2a, 0x89, - 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, - 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x44, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x50, - 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0a, - 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, - 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x54, 0x49, 0x4f, - 0x4e, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x12, - 0x09, 0x0a, 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, 0x10, 0x09, 0x2a, 0x3e, 0x0a, 0x10, 0x54, 0x72, - 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, - 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x8c, 0x01, 0x0a, 0x1f, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x09, - 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x50, 0x0b, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_base_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_base_proto_rawDescData = file_envoy_api_v2_core_base_proto_rawDesc -) - -func file_envoy_api_v2_core_base_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_base_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_base_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_base_proto_rawDescData) - }) - return file_envoy_api_v2_core_base_proto_rawDescData -} - -var file_envoy_api_v2_core_base_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_envoy_api_v2_core_base_proto_msgTypes = make([]protoimpl.MessageInfo, 19) -var file_envoy_api_v2_core_base_proto_goTypes = []interface{}{ - (RoutingPriority)(0), // 0: envoy.api.v2.core.RoutingPriority - (RequestMethod)(0), // 1: envoy.api.v2.core.RequestMethod - (TrafficDirection)(0), // 2: envoy.api.v2.core.TrafficDirection - (*Locality)(nil), // 3: envoy.api.v2.core.Locality - (*BuildVersion)(nil), // 4: envoy.api.v2.core.BuildVersion - (*Extension)(nil), // 5: envoy.api.v2.core.Extension - (*Node)(nil), // 6: envoy.api.v2.core.Node - (*Metadata)(nil), // 7: envoy.api.v2.core.Metadata - (*RuntimeUInt32)(nil), // 8: envoy.api.v2.core.RuntimeUInt32 - (*RuntimeDouble)(nil), // 9: envoy.api.v2.core.RuntimeDouble - (*RuntimeFeatureFlag)(nil), // 10: envoy.api.v2.core.RuntimeFeatureFlag - (*HeaderValue)(nil), // 11: envoy.api.v2.core.HeaderValue - (*HeaderValueOption)(nil), // 12: envoy.api.v2.core.HeaderValueOption - (*HeaderMap)(nil), // 13: envoy.api.v2.core.HeaderMap - (*DataSource)(nil), // 14: envoy.api.v2.core.DataSource - (*RetryPolicy)(nil), // 15: envoy.api.v2.core.RetryPolicy - (*RemoteDataSource)(nil), // 16: envoy.api.v2.core.RemoteDataSource - (*AsyncDataSource)(nil), // 17: envoy.api.v2.core.AsyncDataSource - (*TransportSocket)(nil), // 18: envoy.api.v2.core.TransportSocket - (*RuntimeFractionalPercent)(nil), // 19: envoy.api.v2.core.RuntimeFractionalPercent - (*ControlPlane)(nil), // 20: envoy.api.v2.core.ControlPlane - nil, // 21: envoy.api.v2.core.Metadata.FilterMetadataEntry - (*_type.SemanticVersion)(nil), // 22: envoy.type.SemanticVersion - (*_struct.Struct)(nil), // 23: google.protobuf.Struct - (*Address)(nil), // 24: envoy.api.v2.core.Address - (*wrappers.BoolValue)(nil), // 25: google.protobuf.BoolValue - (*BackoffStrategy)(nil), // 26: envoy.api.v2.core.BackoffStrategy - (*wrappers.UInt32Value)(nil), // 27: google.protobuf.UInt32Value - (*HttpUri)(nil), // 28: envoy.api.v2.core.HttpUri - (*any.Any)(nil), // 29: google.protobuf.Any - (*_type.FractionalPercent)(nil), // 30: envoy.type.FractionalPercent -} -var file_envoy_api_v2_core_base_proto_depIdxs = []int32{ - 22, // 0: envoy.api.v2.core.BuildVersion.version:type_name -> envoy.type.SemanticVersion - 23, // 1: envoy.api.v2.core.BuildVersion.metadata:type_name -> google.protobuf.Struct - 4, // 2: envoy.api.v2.core.Extension.version:type_name -> envoy.api.v2.core.BuildVersion - 23, // 3: envoy.api.v2.core.Node.metadata:type_name -> google.protobuf.Struct - 3, // 4: envoy.api.v2.core.Node.locality:type_name -> envoy.api.v2.core.Locality - 4, // 5: envoy.api.v2.core.Node.user_agent_build_version:type_name -> envoy.api.v2.core.BuildVersion - 5, // 6: envoy.api.v2.core.Node.extensions:type_name -> envoy.api.v2.core.Extension - 24, // 7: envoy.api.v2.core.Node.listening_addresses:type_name -> envoy.api.v2.core.Address - 21, // 8: envoy.api.v2.core.Metadata.filter_metadata:type_name -> envoy.api.v2.core.Metadata.FilterMetadataEntry - 25, // 9: envoy.api.v2.core.RuntimeFeatureFlag.default_value:type_name -> google.protobuf.BoolValue - 11, // 10: envoy.api.v2.core.HeaderValueOption.header:type_name -> envoy.api.v2.core.HeaderValue - 25, // 11: envoy.api.v2.core.HeaderValueOption.append:type_name -> google.protobuf.BoolValue - 11, // 12: envoy.api.v2.core.HeaderMap.headers:type_name -> envoy.api.v2.core.HeaderValue - 26, // 13: envoy.api.v2.core.RetryPolicy.retry_back_off:type_name -> envoy.api.v2.core.BackoffStrategy - 27, // 14: envoy.api.v2.core.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value - 28, // 15: envoy.api.v2.core.RemoteDataSource.http_uri:type_name -> envoy.api.v2.core.HttpUri - 15, // 16: envoy.api.v2.core.RemoteDataSource.retry_policy:type_name -> envoy.api.v2.core.RetryPolicy - 14, // 17: envoy.api.v2.core.AsyncDataSource.local:type_name -> envoy.api.v2.core.DataSource - 16, // 18: envoy.api.v2.core.AsyncDataSource.remote:type_name -> envoy.api.v2.core.RemoteDataSource - 23, // 19: envoy.api.v2.core.TransportSocket.config:type_name -> google.protobuf.Struct - 29, // 20: envoy.api.v2.core.TransportSocket.typed_config:type_name -> google.protobuf.Any - 30, // 21: envoy.api.v2.core.RuntimeFractionalPercent.default_value:type_name -> envoy.type.FractionalPercent - 23, // 22: envoy.api.v2.core.Metadata.FilterMetadataEntry.value:type_name -> google.protobuf.Struct - 23, // [23:23] is the sub-list for method output_type - 23, // [23:23] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 23, // [23:23] is the sub-list for extension extendee - 0, // [0:23] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_base_proto_init() } -func file_envoy_api_v2_core_base_proto_init() { - if File_envoy_api_v2_core_base_proto != nil { - return - } - file_envoy_api_v2_core_address_proto_init() - file_envoy_api_v2_core_backoff_proto_init() - file_envoy_api_v2_core_http_uri_proto_init() - file_envoy_api_v2_core_socket_option_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_base_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Locality); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BuildVersion); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Extension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Node); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RuntimeUInt32); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RuntimeDouble); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RuntimeFeatureFlag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderValueOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderMap); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DataSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoteDataSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AsyncDataSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransportSocket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RuntimeFractionalPercent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_base_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ControlPlane); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_base_proto_msgTypes[3].OneofWrappers = []interface{}{ - (*Node_UserAgentVersion)(nil), - (*Node_UserAgentBuildVersion)(nil), - } - file_envoy_api_v2_core_base_proto_msgTypes[11].OneofWrappers = []interface{}{ - (*DataSource_Filename)(nil), - (*DataSource_InlineBytes)(nil), - (*DataSource_InlineString)(nil), - } - file_envoy_api_v2_core_base_proto_msgTypes[14].OneofWrappers = []interface{}{ - (*AsyncDataSource_Local)(nil), - (*AsyncDataSource_Remote)(nil), - } - file_envoy_api_v2_core_base_proto_msgTypes[15].OneofWrappers = []interface{}{ - (*TransportSocket_Config)(nil), - (*TransportSocket_TypedConfig)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_base_proto_rawDesc, - NumEnums: 3, - NumMessages: 19, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_base_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_base_proto_depIdxs, - EnumInfos: file_envoy_api_v2_core_base_proto_enumTypes, - MessageInfos: file_envoy_api_v2_core_base_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_base_proto = out.File - file_envoy_api_v2_core_base_proto_rawDesc = nil - file_envoy_api_v2_core_base_proto_goTypes = nil - file_envoy_api_v2_core_base_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/base.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/base.pb.validate.go deleted file mode 100644 index 838b1b337c..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/base.pb.validate.go +++ /dev/null @@ -1,2766 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/base.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on Locality with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Locality) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Locality with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in LocalityMultiError, or nil -// if none found. -func (m *Locality) ValidateAll() error { - return m.validate(true) -} - -func (m *Locality) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Region - - // no validation rules for Zone - - // no validation rules for SubZone - - if len(errors) > 0 { - return LocalityMultiError(errors) - } - - return nil -} - -// LocalityMultiError is an error wrapping multiple validation errors returned -// by Locality.ValidateAll() if the designated constraints aren't met. -type LocalityMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m LocalityMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m LocalityMultiError) AllErrors() []error { return m } - -// LocalityValidationError is the validation error returned by -// Locality.Validate if the designated constraints aren't met. -type LocalityValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e LocalityValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e LocalityValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e LocalityValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e LocalityValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e LocalityValidationError) ErrorName() string { return "LocalityValidationError" } - -// Error satisfies the builtin error interface -func (e LocalityValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sLocality.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = LocalityValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = LocalityValidationError{} - -// Validate checks the field values on BuildVersion with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *BuildVersion) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on BuildVersion with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in BuildVersionMultiError, or -// nil if none found. -func (m *BuildVersion) ValidateAll() error { - return m.validate(true) -} - -func (m *BuildVersion) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetVersion()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, BuildVersionValidationError{ - field: "Version", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, BuildVersionValidationError{ - field: "Version", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return BuildVersionValidationError{ - field: "Version", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetMetadata()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, BuildVersionValidationError{ - field: "Metadata", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, BuildVersionValidationError{ - field: "Metadata", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return BuildVersionValidationError{ - field: "Metadata", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return BuildVersionMultiError(errors) - } - - return nil -} - -// BuildVersionMultiError is an error wrapping multiple validation errors -// returned by BuildVersion.ValidateAll() if the designated constraints aren't met. -type BuildVersionMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m BuildVersionMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m BuildVersionMultiError) AllErrors() []error { return m } - -// BuildVersionValidationError is the validation error returned by -// BuildVersion.Validate if the designated constraints aren't met. -type BuildVersionValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e BuildVersionValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e BuildVersionValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e BuildVersionValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e BuildVersionValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e BuildVersionValidationError) ErrorName() string { return "BuildVersionValidationError" } - -// Error satisfies the builtin error interface -func (e BuildVersionValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sBuildVersion.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = BuildVersionValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = BuildVersionValidationError{} - -// Validate checks the field values on Extension with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Extension) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Extension with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in ExtensionMultiError, or nil -// if none found. -func (m *Extension) ValidateAll() error { - return m.validate(true) -} - -func (m *Extension) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Name - - // no validation rules for Category - - // no validation rules for TypeDescriptor - - if all { - switch v := interface{}(m.GetVersion()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ExtensionValidationError{ - field: "Version", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ExtensionValidationError{ - field: "Version", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ExtensionValidationError{ - field: "Version", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for Disabled - - if len(errors) > 0 { - return ExtensionMultiError(errors) - } - - return nil -} - -// ExtensionMultiError is an error wrapping multiple validation errors returned -// by Extension.ValidateAll() if the designated constraints aren't met. -type ExtensionMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ExtensionMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ExtensionMultiError) AllErrors() []error { return m } - -// ExtensionValidationError is the validation error returned by -// Extension.Validate if the designated constraints aren't met. -type ExtensionValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ExtensionValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ExtensionValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ExtensionValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ExtensionValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ExtensionValidationError) ErrorName() string { return "ExtensionValidationError" } - -// Error satisfies the builtin error interface -func (e ExtensionValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sExtension.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ExtensionValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ExtensionValidationError{} - -// Validate checks the field values on Node with the rules defined in the proto -// definition for this message. If any rules are violated, the first error -// encountered is returned, or nil if there are no violations. -func (m *Node) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Node with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in NodeMultiError, or nil if none found. -func (m *Node) ValidateAll() error { - return m.validate(true) -} - -func (m *Node) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Id - - // no validation rules for Cluster - - if all { - switch v := interface{}(m.GetMetadata()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, NodeValidationError{ - field: "Metadata", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, NodeValidationError{ - field: "Metadata", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return NodeValidationError{ - field: "Metadata", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetLocality()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, NodeValidationError{ - field: "Locality", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, NodeValidationError{ - field: "Locality", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return NodeValidationError{ - field: "Locality", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for BuildVersion - - // no validation rules for UserAgentName - - for idx, item := range m.GetExtensions() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, NodeValidationError{ - field: fmt.Sprintf("Extensions[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, NodeValidationError{ - field: fmt.Sprintf("Extensions[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return NodeValidationError{ - field: fmt.Sprintf("Extensions[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - for idx, item := range m.GetListeningAddresses() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, NodeValidationError{ - field: fmt.Sprintf("ListeningAddresses[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, NodeValidationError{ - field: fmt.Sprintf("ListeningAddresses[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return NodeValidationError{ - field: fmt.Sprintf("ListeningAddresses[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - switch m.UserAgentVersionType.(type) { - - case *Node_UserAgentVersion: - // no validation rules for UserAgentVersion - - case *Node_UserAgentBuildVersion: - - if all { - switch v := interface{}(m.GetUserAgentBuildVersion()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, NodeValidationError{ - field: "UserAgentBuildVersion", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, NodeValidationError{ - field: "UserAgentBuildVersion", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetUserAgentBuildVersion()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return NodeValidationError{ - field: "UserAgentBuildVersion", - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return NodeMultiError(errors) - } - - return nil -} - -// NodeMultiError is an error wrapping multiple validation errors returned by -// Node.ValidateAll() if the designated constraints aren't met. -type NodeMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m NodeMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m NodeMultiError) AllErrors() []error { return m } - -// NodeValidationError is the validation error returned by Node.Validate if the -// designated constraints aren't met. -type NodeValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e NodeValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e NodeValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e NodeValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e NodeValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e NodeValidationError) ErrorName() string { return "NodeValidationError" } - -// Error satisfies the builtin error interface -func (e NodeValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sNode.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = NodeValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = NodeValidationError{} - -// Validate checks the field values on Metadata with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Metadata) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Metadata with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in MetadataMultiError, or nil -// if none found. -func (m *Metadata) ValidateAll() error { - return m.validate(true) -} - -func (m *Metadata) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - { - sorted_keys := make([]string, len(m.GetFilterMetadata())) - i := 0 - for key := range m.GetFilterMetadata() { - sorted_keys[i] = key - i++ - } - sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) - for _, key := range sorted_keys { - val := m.GetFilterMetadata()[key] - _ = val - - // no validation rules for FilterMetadata[key] - - if all { - switch v := interface{}(val).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, MetadataValidationError{ - field: fmt.Sprintf("FilterMetadata[%v]", key), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, MetadataValidationError{ - field: fmt.Sprintf("FilterMetadata[%v]", key), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return MetadataValidationError{ - field: fmt.Sprintf("FilterMetadata[%v]", key), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - } - - if len(errors) > 0 { - return MetadataMultiError(errors) - } - - return nil -} - -// MetadataMultiError is an error wrapping multiple validation errors returned -// by Metadata.ValidateAll() if the designated constraints aren't met. -type MetadataMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m MetadataMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m MetadataMultiError) AllErrors() []error { return m } - -// MetadataValidationError is the validation error returned by -// Metadata.Validate if the designated constraints aren't met. -type MetadataValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e MetadataValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e MetadataValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e MetadataValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e MetadataValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e MetadataValidationError) ErrorName() string { return "MetadataValidationError" } - -// Error satisfies the builtin error interface -func (e MetadataValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sMetadata.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = MetadataValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = MetadataValidationError{} - -// Validate checks the field values on RuntimeUInt32 with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *RuntimeUInt32) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RuntimeUInt32 with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in RuntimeUInt32MultiError, or -// nil if none found. -func (m *RuntimeUInt32) ValidateAll() error { - return m.validate(true) -} - -func (m *RuntimeUInt32) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for DefaultValue - - if len(m.GetRuntimeKey()) < 1 { - err := RuntimeUInt32ValidationError{ - field: "RuntimeKey", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return RuntimeUInt32MultiError(errors) - } - - return nil -} - -// RuntimeUInt32MultiError is an error wrapping multiple validation errors -// returned by RuntimeUInt32.ValidateAll() if the designated constraints -// aren't met. -type RuntimeUInt32MultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RuntimeUInt32MultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RuntimeUInt32MultiError) AllErrors() []error { return m } - -// RuntimeUInt32ValidationError is the validation error returned by -// RuntimeUInt32.Validate if the designated constraints aren't met. -type RuntimeUInt32ValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RuntimeUInt32ValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RuntimeUInt32ValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RuntimeUInt32ValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RuntimeUInt32ValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RuntimeUInt32ValidationError) ErrorName() string { return "RuntimeUInt32ValidationError" } - -// Error satisfies the builtin error interface -func (e RuntimeUInt32ValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRuntimeUInt32.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RuntimeUInt32ValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RuntimeUInt32ValidationError{} - -// Validate checks the field values on RuntimeDouble with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *RuntimeDouble) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RuntimeDouble with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in RuntimeDoubleMultiError, or -// nil if none found. -func (m *RuntimeDouble) ValidateAll() error { - return m.validate(true) -} - -func (m *RuntimeDouble) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for DefaultValue - - if len(m.GetRuntimeKey()) < 1 { - err := RuntimeDoubleValidationError{ - field: "RuntimeKey", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return RuntimeDoubleMultiError(errors) - } - - return nil -} - -// RuntimeDoubleMultiError is an error wrapping multiple validation errors -// returned by RuntimeDouble.ValidateAll() if the designated constraints -// aren't met. -type RuntimeDoubleMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RuntimeDoubleMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RuntimeDoubleMultiError) AllErrors() []error { return m } - -// RuntimeDoubleValidationError is the validation error returned by -// RuntimeDouble.Validate if the designated constraints aren't met. -type RuntimeDoubleValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RuntimeDoubleValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RuntimeDoubleValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RuntimeDoubleValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RuntimeDoubleValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RuntimeDoubleValidationError) ErrorName() string { return "RuntimeDoubleValidationError" } - -// Error satisfies the builtin error interface -func (e RuntimeDoubleValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRuntimeDouble.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RuntimeDoubleValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RuntimeDoubleValidationError{} - -// Validate checks the field values on RuntimeFeatureFlag with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *RuntimeFeatureFlag) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RuntimeFeatureFlag with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// RuntimeFeatureFlagMultiError, or nil if none found. -func (m *RuntimeFeatureFlag) ValidateAll() error { - return m.validate(true) -} - -func (m *RuntimeFeatureFlag) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if m.GetDefaultValue() == nil { - err := RuntimeFeatureFlagValidationError{ - field: "DefaultValue", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetDefaultValue()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RuntimeFeatureFlagValidationError{ - field: "DefaultValue", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RuntimeFeatureFlagValidationError{ - field: "DefaultValue", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RuntimeFeatureFlagValidationError{ - field: "DefaultValue", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(m.GetRuntimeKey()) < 1 { - err := RuntimeFeatureFlagValidationError{ - field: "RuntimeKey", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return RuntimeFeatureFlagMultiError(errors) - } - - return nil -} - -// RuntimeFeatureFlagMultiError is an error wrapping multiple validation errors -// returned by RuntimeFeatureFlag.ValidateAll() if the designated constraints -// aren't met. -type RuntimeFeatureFlagMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RuntimeFeatureFlagMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RuntimeFeatureFlagMultiError) AllErrors() []error { return m } - -// RuntimeFeatureFlagValidationError is the validation error returned by -// RuntimeFeatureFlag.Validate if the designated constraints aren't met. -type RuntimeFeatureFlagValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RuntimeFeatureFlagValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RuntimeFeatureFlagValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RuntimeFeatureFlagValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RuntimeFeatureFlagValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RuntimeFeatureFlagValidationError) ErrorName() string { - return "RuntimeFeatureFlagValidationError" -} - -// Error satisfies the builtin error interface -func (e RuntimeFeatureFlagValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRuntimeFeatureFlag.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RuntimeFeatureFlagValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RuntimeFeatureFlagValidationError{} - -// Validate checks the field values on HeaderValue with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *HeaderValue) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HeaderValue with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in HeaderValueMultiError, or -// nil if none found. -func (m *HeaderValue) ValidateAll() error { - return m.validate(true) -} - -func (m *HeaderValue) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if l := len(m.GetKey()); l < 1 || l > 16384 { - err := HeaderValueValidationError{ - field: "Key", - reason: "value length must be between 1 and 16384 bytes, inclusive", - } - if !all { - return err - } - errors = append(errors, err) - } - - if !_HeaderValue_Key_Pattern.MatchString(m.GetKey()) { - err := HeaderValueValidationError{ - field: "Key", - reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(m.GetValue()) > 16384 { - err := HeaderValueValidationError{ - field: "Value", - reason: "value length must be at most 16384 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if !_HeaderValue_Value_Pattern.MatchString(m.GetValue()) { - err := HeaderValueValidationError{ - field: "Value", - reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return HeaderValueMultiError(errors) - } - - return nil -} - -// HeaderValueMultiError is an error wrapping multiple validation errors -// returned by HeaderValue.ValidateAll() if the designated constraints aren't met. -type HeaderValueMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HeaderValueMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HeaderValueMultiError) AllErrors() []error { return m } - -// HeaderValueValidationError is the validation error returned by -// HeaderValue.Validate if the designated constraints aren't met. -type HeaderValueValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HeaderValueValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HeaderValueValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HeaderValueValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HeaderValueValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HeaderValueValidationError) ErrorName() string { return "HeaderValueValidationError" } - -// Error satisfies the builtin error interface -func (e HeaderValueValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHeaderValue.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HeaderValueValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HeaderValueValidationError{} - -var _HeaderValue_Key_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") - -var _HeaderValue_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") - -// Validate checks the field values on HeaderValueOption with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *HeaderValueOption) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HeaderValueOption with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HeaderValueOptionMultiError, or nil if none found. -func (m *HeaderValueOption) ValidateAll() error { - return m.validate(true) -} - -func (m *HeaderValueOption) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if m.GetHeader() == nil { - err := HeaderValueOptionValidationError{ - field: "Header", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetHeader()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HeaderValueOptionValidationError{ - field: "Header", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HeaderValueOptionValidationError{ - field: "Header", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HeaderValueOptionValidationError{ - field: "Header", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetAppend()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HeaderValueOptionValidationError{ - field: "Append", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HeaderValueOptionValidationError{ - field: "Append", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HeaderValueOptionValidationError{ - field: "Append", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return HeaderValueOptionMultiError(errors) - } - - return nil -} - -// HeaderValueOptionMultiError is an error wrapping multiple validation errors -// returned by HeaderValueOption.ValidateAll() if the designated constraints -// aren't met. -type HeaderValueOptionMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HeaderValueOptionMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HeaderValueOptionMultiError) AllErrors() []error { return m } - -// HeaderValueOptionValidationError is the validation error returned by -// HeaderValueOption.Validate if the designated constraints aren't met. -type HeaderValueOptionValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HeaderValueOptionValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HeaderValueOptionValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HeaderValueOptionValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HeaderValueOptionValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HeaderValueOptionValidationError) ErrorName() string { - return "HeaderValueOptionValidationError" -} - -// Error satisfies the builtin error interface -func (e HeaderValueOptionValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHeaderValueOption.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HeaderValueOptionValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HeaderValueOptionValidationError{} - -// Validate checks the field values on HeaderMap with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *HeaderMap) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HeaderMap with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in HeaderMapMultiError, or nil -// if none found. -func (m *HeaderMap) ValidateAll() error { - return m.validate(true) -} - -func (m *HeaderMap) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - for idx, item := range m.GetHeaders() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HeaderMapValidationError{ - field: fmt.Sprintf("Headers[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HeaderMapValidationError{ - field: fmt.Sprintf("Headers[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HeaderMapValidationError{ - field: fmt.Sprintf("Headers[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return HeaderMapMultiError(errors) - } - - return nil -} - -// HeaderMapMultiError is an error wrapping multiple validation errors returned -// by HeaderMap.ValidateAll() if the designated constraints aren't met. -type HeaderMapMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HeaderMapMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HeaderMapMultiError) AllErrors() []error { return m } - -// HeaderMapValidationError is the validation error returned by -// HeaderMap.Validate if the designated constraints aren't met. -type HeaderMapValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HeaderMapValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HeaderMapValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HeaderMapValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HeaderMapValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HeaderMapValidationError) ErrorName() string { return "HeaderMapValidationError" } - -// Error satisfies the builtin error interface -func (e HeaderMapValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHeaderMap.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HeaderMapValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HeaderMapValidationError{} - -// Validate checks the field values on DataSource with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *DataSource) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on DataSource with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in DataSourceMultiError, or -// nil if none found. -func (m *DataSource) ValidateAll() error { - return m.validate(true) -} - -func (m *DataSource) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.Specifier.(type) { - - case *DataSource_Filename: - - if len(m.GetFilename()) < 1 { - err := DataSourceValidationError{ - field: "Filename", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - case *DataSource_InlineBytes: - - if len(m.GetInlineBytes()) < 1 { - err := DataSourceValidationError{ - field: "InlineBytes", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - case *DataSource_InlineString: - - if len(m.GetInlineString()) < 1 { - err := DataSourceValidationError{ - field: "InlineString", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - default: - err := DataSourceValidationError{ - field: "Specifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return DataSourceMultiError(errors) - } - - return nil -} - -// DataSourceMultiError is an error wrapping multiple validation errors -// returned by DataSource.ValidateAll() if the designated constraints aren't met. -type DataSourceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m DataSourceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m DataSourceMultiError) AllErrors() []error { return m } - -// DataSourceValidationError is the validation error returned by -// DataSource.Validate if the designated constraints aren't met. -type DataSourceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e DataSourceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e DataSourceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e DataSourceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e DataSourceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e DataSourceValidationError) ErrorName() string { return "DataSourceValidationError" } - -// Error satisfies the builtin error interface -func (e DataSourceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sDataSource.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = DataSourceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = DataSourceValidationError{} - -// Validate checks the field values on RetryPolicy with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *RetryPolicy) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RetryPolicy with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in RetryPolicyMultiError, or -// nil if none found. -func (m *RetryPolicy) ValidateAll() error { - return m.validate(true) -} - -func (m *RetryPolicy) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetRetryBackOff()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RetryPolicyValidationError{ - field: "RetryBackOff", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RetryPolicyValidationError{ - field: "RetryBackOff", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRetryBackOff()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RetryPolicyValidationError{ - field: "RetryBackOff", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetNumRetries()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RetryPolicyValidationError{ - field: "NumRetries", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RetryPolicyValidationError{ - field: "NumRetries", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetNumRetries()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RetryPolicyValidationError{ - field: "NumRetries", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return RetryPolicyMultiError(errors) - } - - return nil -} - -// RetryPolicyMultiError is an error wrapping multiple validation errors -// returned by RetryPolicy.ValidateAll() if the designated constraints aren't met. -type RetryPolicyMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RetryPolicyMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RetryPolicyMultiError) AllErrors() []error { return m } - -// RetryPolicyValidationError is the validation error returned by -// RetryPolicy.Validate if the designated constraints aren't met. -type RetryPolicyValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RetryPolicyValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RetryPolicyValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RetryPolicyValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RetryPolicyValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RetryPolicyValidationError) ErrorName() string { return "RetryPolicyValidationError" } - -// Error satisfies the builtin error interface -func (e RetryPolicyValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRetryPolicy.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RetryPolicyValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RetryPolicyValidationError{} - -// Validate checks the field values on RemoteDataSource with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *RemoteDataSource) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RemoteDataSource with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// RemoteDataSourceMultiError, or nil if none found. -func (m *RemoteDataSource) ValidateAll() error { - return m.validate(true) -} - -func (m *RemoteDataSource) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if m.GetHttpUri() == nil { - err := RemoteDataSourceValidationError{ - field: "HttpUri", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetHttpUri()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RemoteDataSourceValidationError{ - field: "HttpUri", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RemoteDataSourceValidationError{ - field: "HttpUri", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetHttpUri()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RemoteDataSourceValidationError{ - field: "HttpUri", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(m.GetSha256()) < 1 { - err := RemoteDataSourceValidationError{ - field: "Sha256", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetRetryPolicy()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RemoteDataSourceValidationError{ - field: "RetryPolicy", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RemoteDataSourceValidationError{ - field: "RetryPolicy", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RemoteDataSourceValidationError{ - field: "RetryPolicy", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return RemoteDataSourceMultiError(errors) - } - - return nil -} - -// RemoteDataSourceMultiError is an error wrapping multiple validation errors -// returned by RemoteDataSource.ValidateAll() if the designated constraints -// aren't met. -type RemoteDataSourceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RemoteDataSourceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RemoteDataSourceMultiError) AllErrors() []error { return m } - -// RemoteDataSourceValidationError is the validation error returned by -// RemoteDataSource.Validate if the designated constraints aren't met. -type RemoteDataSourceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RemoteDataSourceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RemoteDataSourceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RemoteDataSourceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RemoteDataSourceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RemoteDataSourceValidationError) ErrorName() string { return "RemoteDataSourceValidationError" } - -// Error satisfies the builtin error interface -func (e RemoteDataSourceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRemoteDataSource.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RemoteDataSourceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RemoteDataSourceValidationError{} - -// Validate checks the field values on AsyncDataSource with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *AsyncDataSource) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on AsyncDataSource with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// AsyncDataSourceMultiError, or nil if none found. -func (m *AsyncDataSource) ValidateAll() error { - return m.validate(true) -} - -func (m *AsyncDataSource) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.Specifier.(type) { - - case *AsyncDataSource_Local: - - if all { - switch v := interface{}(m.GetLocal()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, AsyncDataSourceValidationError{ - field: "Local", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, AsyncDataSourceValidationError{ - field: "Local", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLocal()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return AsyncDataSourceValidationError{ - field: "Local", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *AsyncDataSource_Remote: - - if all { - switch v := interface{}(m.GetRemote()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, AsyncDataSourceValidationError{ - field: "Remote", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, AsyncDataSourceValidationError{ - field: "Remote", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRemote()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return AsyncDataSourceValidationError{ - field: "Remote", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := AsyncDataSourceValidationError{ - field: "Specifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return AsyncDataSourceMultiError(errors) - } - - return nil -} - -// AsyncDataSourceMultiError is an error wrapping multiple validation errors -// returned by AsyncDataSource.ValidateAll() if the designated constraints -// aren't met. -type AsyncDataSourceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m AsyncDataSourceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m AsyncDataSourceMultiError) AllErrors() []error { return m } - -// AsyncDataSourceValidationError is the validation error returned by -// AsyncDataSource.Validate if the designated constraints aren't met. -type AsyncDataSourceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e AsyncDataSourceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e AsyncDataSourceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e AsyncDataSourceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e AsyncDataSourceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e AsyncDataSourceValidationError) ErrorName() string { return "AsyncDataSourceValidationError" } - -// Error satisfies the builtin error interface -func (e AsyncDataSourceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sAsyncDataSource.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = AsyncDataSourceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = AsyncDataSourceValidationError{} - -// Validate checks the field values on TransportSocket with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *TransportSocket) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on TransportSocket with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// TransportSocketMultiError, or nil if none found. -func (m *TransportSocket) ValidateAll() error { - return m.validate(true) -} - -func (m *TransportSocket) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetName()) < 1 { - err := TransportSocketValidationError{ - field: "Name", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - switch m.ConfigType.(type) { - - case *TransportSocket_Config: - - if all { - switch v := interface{}(m.GetConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, TransportSocketValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, TransportSocketValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return TransportSocketValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *TransportSocket_TypedConfig: - - if all { - switch v := interface{}(m.GetTypedConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, TransportSocketValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, TransportSocketValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return TransportSocketValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return TransportSocketMultiError(errors) - } - - return nil -} - -// TransportSocketMultiError is an error wrapping multiple validation errors -// returned by TransportSocket.ValidateAll() if the designated constraints -// aren't met. -type TransportSocketMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m TransportSocketMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m TransportSocketMultiError) AllErrors() []error { return m } - -// TransportSocketValidationError is the validation error returned by -// TransportSocket.Validate if the designated constraints aren't met. -type TransportSocketValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e TransportSocketValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e TransportSocketValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e TransportSocketValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e TransportSocketValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e TransportSocketValidationError) ErrorName() string { return "TransportSocketValidationError" } - -// Error satisfies the builtin error interface -func (e TransportSocketValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sTransportSocket.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = TransportSocketValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = TransportSocketValidationError{} - -// Validate checks the field values on RuntimeFractionalPercent with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *RuntimeFractionalPercent) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RuntimeFractionalPercent with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// RuntimeFractionalPercentMultiError, or nil if none found. -func (m *RuntimeFractionalPercent) ValidateAll() error { - return m.validate(true) -} - -func (m *RuntimeFractionalPercent) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if m.GetDefaultValue() == nil { - err := RuntimeFractionalPercentValidationError{ - field: "DefaultValue", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetDefaultValue()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RuntimeFractionalPercentValidationError{ - field: "DefaultValue", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RuntimeFractionalPercentValidationError{ - field: "DefaultValue", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RuntimeFractionalPercentValidationError{ - field: "DefaultValue", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for RuntimeKey - - if len(errors) > 0 { - return RuntimeFractionalPercentMultiError(errors) - } - - return nil -} - -// RuntimeFractionalPercentMultiError is an error wrapping multiple validation -// errors returned by RuntimeFractionalPercent.ValidateAll() if the designated -// constraints aren't met. -type RuntimeFractionalPercentMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RuntimeFractionalPercentMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RuntimeFractionalPercentMultiError) AllErrors() []error { return m } - -// RuntimeFractionalPercentValidationError is the validation error returned by -// RuntimeFractionalPercent.Validate if the designated constraints aren't met. -type RuntimeFractionalPercentValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RuntimeFractionalPercentValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RuntimeFractionalPercentValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RuntimeFractionalPercentValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RuntimeFractionalPercentValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RuntimeFractionalPercentValidationError) ErrorName() string { - return "RuntimeFractionalPercentValidationError" -} - -// Error satisfies the builtin error interface -func (e RuntimeFractionalPercentValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRuntimeFractionalPercent.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RuntimeFractionalPercentValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RuntimeFractionalPercentValidationError{} - -// Validate checks the field values on ControlPlane with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *ControlPlane) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ControlPlane with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in ControlPlaneMultiError, or -// nil if none found. -func (m *ControlPlane) ValidateAll() error { - return m.validate(true) -} - -func (m *ControlPlane) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Identifier - - if len(errors) > 0 { - return ControlPlaneMultiError(errors) - } - - return nil -} - -// ControlPlaneMultiError is an error wrapping multiple validation errors -// returned by ControlPlane.ValidateAll() if the designated constraints aren't met. -type ControlPlaneMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ControlPlaneMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ControlPlaneMultiError) AllErrors() []error { return m } - -// ControlPlaneValidationError is the validation error returned by -// ControlPlane.Validate if the designated constraints aren't met. -type ControlPlaneValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ControlPlaneValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ControlPlaneValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ControlPlaneValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ControlPlaneValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ControlPlaneValidationError) ErrorName() string { return "ControlPlaneValidationError" } - -// Error satisfies the builtin error interface -func (e ControlPlaneValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sControlPlane.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ControlPlaneValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ControlPlaneValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/config_source.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/config_source.pb.go deleted file mode 100644 index 46d19ff088..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/config_source.pb.go +++ /dev/null @@ -1,865 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/config_source.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/go-control-plane/envoy/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// xDS API version. This is used to describe both resource and transport -// protocol versions (in distinct configuration fields). -type ApiVersion int32 - -const ( - // When not specified, we assume v2, to ease migration to Envoy's stable API - // versioning. If a client does not support v2 (e.g. due to deprecation), this - // is an invalid value. - // - // Deprecated: Do not use. - ApiVersion_AUTO ApiVersion = 0 - // Use xDS v2 API. - // - // Deprecated: Do not use. - ApiVersion_V2 ApiVersion = 1 - // Use xDS v3 API. - ApiVersion_V3 ApiVersion = 2 -) - -// Enum value maps for ApiVersion. -var ( - ApiVersion_name = map[int32]string{ - 0: "AUTO", - 1: "V2", - 2: "V3", - } - ApiVersion_value = map[string]int32{ - "AUTO": 0, - "V2": 1, - "V3": 2, - } -) - -func (x ApiVersion) Enum() *ApiVersion { - p := new(ApiVersion) - *p = x - return p -} - -func (x ApiVersion) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ApiVersion) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_config_source_proto_enumTypes[0].Descriptor() -} - -func (ApiVersion) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_config_source_proto_enumTypes[0] -} - -func (x ApiVersion) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ApiVersion.Descriptor instead. -func (ApiVersion) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_config_source_proto_rawDescGZIP(), []int{0} -} - -// APIs may be fetched via either REST or gRPC. -type ApiConfigSource_ApiType int32 - -const ( - // Ideally this would be 'reserved 0' but one can't reserve the default - // value. Instead we throw an exception if this is ever used. - // - // Deprecated: Do not use. - ApiConfigSource_UNSUPPORTED_REST_LEGACY ApiConfigSource_ApiType = 0 - // REST-JSON v2 API. The `canonical JSON encoding - // `_ for - // the v2 protos is used. - ApiConfigSource_REST ApiConfigSource_ApiType = 1 - // gRPC v2 API. - ApiConfigSource_GRPC ApiConfigSource_ApiType = 2 - // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} - // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state - // with every update, the xDS server only sends what has changed since the last update. - ApiConfigSource_DELTA_GRPC ApiConfigSource_ApiType = 3 -) - -// Enum value maps for ApiConfigSource_ApiType. -var ( - ApiConfigSource_ApiType_name = map[int32]string{ - 0: "UNSUPPORTED_REST_LEGACY", - 1: "REST", - 2: "GRPC", - 3: "DELTA_GRPC", - } - ApiConfigSource_ApiType_value = map[string]int32{ - "UNSUPPORTED_REST_LEGACY": 0, - "REST": 1, - "GRPC": 2, - "DELTA_GRPC": 3, - } -) - -func (x ApiConfigSource_ApiType) Enum() *ApiConfigSource_ApiType { - p := new(ApiConfigSource_ApiType) - *p = x - return p -} - -func (x ApiConfigSource_ApiType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ApiConfigSource_ApiType) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_config_source_proto_enumTypes[1].Descriptor() -} - -func (ApiConfigSource_ApiType) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_config_source_proto_enumTypes[1] -} - -func (x ApiConfigSource_ApiType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ApiConfigSource_ApiType.Descriptor instead. -func (ApiConfigSource_ApiType) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_config_source_proto_rawDescGZIP(), []int{0, 0} -} - -// API configuration source. This identifies the API type and cluster that Envoy -// will use to fetch an xDS API. -// [#next-free-field: 9] -type ApiConfigSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // API type (gRPC, REST, delta gRPC) - ApiType ApiConfigSource_ApiType `protobuf:"varint,1,opt,name=api_type,json=apiType,proto3,enum=envoy.api.v2.core.ApiConfigSource_ApiType" json:"api_type,omitempty"` - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - TransportApiVersion ApiVersion `protobuf:"varint,8,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.api.v2.core.ApiVersion" json:"transport_api_version,omitempty"` - // Cluster names should be used only with REST. If > 1 - // cluster is defined, clusters will be cycled through if any kind of failure - // occurs. - // - // .. note:: - // - // The cluster with name ``cluster_name`` must be statically defined and its - // type must not be ``EDS``. - ClusterNames []string `protobuf:"bytes,2,rep,name=cluster_names,json=clusterNames,proto3" json:"cluster_names,omitempty"` - // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - // services will be cycled through if any kind of failure occurs. - GrpcServices []*GrpcService `protobuf:"bytes,4,rep,name=grpc_services,json=grpcServices,proto3" json:"grpc_services,omitempty"` - // For REST APIs, the delay between successive polls. - RefreshDelay *duration.Duration `protobuf:"bytes,3,opt,name=refresh_delay,json=refreshDelay,proto3" json:"refresh_delay,omitempty"` - // For REST APIs, the request timeout. If not set, a default value of 1s will be used. - RequestTimeout *duration.Duration `protobuf:"bytes,5,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` - // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - // rate limited. - RateLimitSettings *RateLimitSettings `protobuf:"bytes,6,opt,name=rate_limit_settings,json=rateLimitSettings,proto3" json:"rate_limit_settings,omitempty"` - // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - SetNodeOnFirstMessageOnly bool `protobuf:"varint,7,opt,name=set_node_on_first_message_only,json=setNodeOnFirstMessageOnly,proto3" json:"set_node_on_first_message_only,omitempty"` -} - -func (x *ApiConfigSource) Reset() { - *x = ApiConfigSource{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ApiConfigSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ApiConfigSource) ProtoMessage() {} - -func (x *ApiConfigSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ApiConfigSource.ProtoReflect.Descriptor instead. -func (*ApiConfigSource) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_config_source_proto_rawDescGZIP(), []int{0} -} - -func (x *ApiConfigSource) GetApiType() ApiConfigSource_ApiType { - if x != nil { - return x.ApiType - } - return ApiConfigSource_UNSUPPORTED_REST_LEGACY -} - -func (x *ApiConfigSource) GetTransportApiVersion() ApiVersion { - if x != nil { - return x.TransportApiVersion - } - return ApiVersion_AUTO -} - -func (x *ApiConfigSource) GetClusterNames() []string { - if x != nil { - return x.ClusterNames - } - return nil -} - -func (x *ApiConfigSource) GetGrpcServices() []*GrpcService { - if x != nil { - return x.GrpcServices - } - return nil -} - -func (x *ApiConfigSource) GetRefreshDelay() *duration.Duration { - if x != nil { - return x.RefreshDelay - } - return nil -} - -func (x *ApiConfigSource) GetRequestTimeout() *duration.Duration { - if x != nil { - return x.RequestTimeout - } - return nil -} - -func (x *ApiConfigSource) GetRateLimitSettings() *RateLimitSettings { - if x != nil { - return x.RateLimitSettings - } - return nil -} - -func (x *ApiConfigSource) GetSetNodeOnFirstMessageOnly() bool { - if x != nil { - return x.SetNodeOnFirstMessageOnly - } - return false -} - -// Aggregated Discovery Service (ADS) options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that ADS is to be used. -type AggregatedConfigSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *AggregatedConfigSource) Reset() { - *x = AggregatedConfigSource{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AggregatedConfigSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AggregatedConfigSource) ProtoMessage() {} - -func (x *AggregatedConfigSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AggregatedConfigSource.ProtoReflect.Descriptor instead. -func (*AggregatedConfigSource) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_config_source_proto_rawDescGZIP(), []int{1} -} - -// [#not-implemented-hide:] -// Self-referencing config source options. This is currently empty, but when -// set in :ref:`ConfigSource ` can be used to -// specify that other data can be obtained from the same server. -type SelfConfigSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // API version for xDS transport protocol. This describes the xDS gRPC/REST - // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - TransportApiVersion ApiVersion `protobuf:"varint,1,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.api.v2.core.ApiVersion" json:"transport_api_version,omitempty"` -} - -func (x *SelfConfigSource) Reset() { - *x = SelfConfigSource{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SelfConfigSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SelfConfigSource) ProtoMessage() {} - -func (x *SelfConfigSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SelfConfigSource.ProtoReflect.Descriptor instead. -func (*SelfConfigSource) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_config_source_proto_rawDescGZIP(), []int{2} -} - -func (x *SelfConfigSource) GetTransportApiVersion() ApiVersion { - if x != nil { - return x.TransportApiVersion - } - return ApiVersion_AUTO -} - -// Rate Limit settings to be applied for discovery requests made by Envoy. -type RateLimitSettings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a - // default value of 100 will be used. - MaxTokens *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` - // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens - // per second will be used. - FillRate *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=fill_rate,json=fillRate,proto3" json:"fill_rate,omitempty"` -} - -func (x *RateLimitSettings) Reset() { - *x = RateLimitSettings{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RateLimitSettings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RateLimitSettings) ProtoMessage() {} - -func (x *RateLimitSettings) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RateLimitSettings.ProtoReflect.Descriptor instead. -func (*RateLimitSettings) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_config_source_proto_rawDescGZIP(), []int{3} -} - -func (x *RateLimitSettings) GetMaxTokens() *wrappers.UInt32Value { - if x != nil { - return x.MaxTokens - } - return nil -} - -func (x *RateLimitSettings) GetFillRate() *wrappers.DoubleValue { - if x != nil { - return x.FillRate - } - return nil -} - -// Configuration for :ref:`listeners `, :ref:`clusters -// `, :ref:`routes -// `, :ref:`endpoints -// ` etc. may either be sourced from the -// filesystem or from an xDS API source. Filesystem configs are watched with -// inotify for updates. -// [#next-free-field: 7] -type ConfigSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ConfigSourceSpecifier: - // *ConfigSource_Path - // *ConfigSource_ApiConfigSource - // *ConfigSource_Ads - // *ConfigSource_Self - ConfigSourceSpecifier isConfigSource_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"` - // When this timeout is specified, Envoy will wait no longer than the specified time for first - // config response on this xDS subscription during the :ref:`initialization process - // `. After reaching the timeout, Envoy will move to the next - // initialization phase, even if the first config is not delivered yet. The timer is activated - // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 - // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another - // timeout applies). The default is 15s. - InitialFetchTimeout *duration.Duration `protobuf:"bytes,4,opt,name=initial_fetch_timeout,json=initialFetchTimeout,proto3" json:"initial_fetch_timeout,omitempty"` - // API version for xDS resources. This implies the type URLs that the client - // will request for resources and the resource type that the client will in - // turn expect to be delivered. - ResourceApiVersion ApiVersion `protobuf:"varint,6,opt,name=resource_api_version,json=resourceApiVersion,proto3,enum=envoy.api.v2.core.ApiVersion" json:"resource_api_version,omitempty"` -} - -func (x *ConfigSource) Reset() { - *x = ConfigSource{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConfigSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConfigSource) ProtoMessage() {} - -func (x *ConfigSource) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_config_source_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConfigSource.ProtoReflect.Descriptor instead. -func (*ConfigSource) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_config_source_proto_rawDescGZIP(), []int{4} -} - -func (m *ConfigSource) GetConfigSourceSpecifier() isConfigSource_ConfigSourceSpecifier { - if m != nil { - return m.ConfigSourceSpecifier - } - return nil -} - -func (x *ConfigSource) GetPath() string { - if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Path); ok { - return x.Path - } - return "" -} - -func (x *ConfigSource) GetApiConfigSource() *ApiConfigSource { - if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_ApiConfigSource); ok { - return x.ApiConfigSource - } - return nil -} - -func (x *ConfigSource) GetAds() *AggregatedConfigSource { - if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Ads); ok { - return x.Ads - } - return nil -} - -func (x *ConfigSource) GetSelf() *SelfConfigSource { - if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Self); ok { - return x.Self - } - return nil -} - -func (x *ConfigSource) GetInitialFetchTimeout() *duration.Duration { - if x != nil { - return x.InitialFetchTimeout - } - return nil -} - -func (x *ConfigSource) GetResourceApiVersion() ApiVersion { - if x != nil { - return x.ResourceApiVersion - } - return ApiVersion_AUTO -} - -type isConfigSource_ConfigSourceSpecifier interface { - isConfigSource_ConfigSourceSpecifier() -} - -type ConfigSource_Path struct { - // Path on the filesystem to source and watch for configuration updates. - // When sourcing configuration for :ref:`secret `, - // the certificate and key files are also watched for updates. - // - // .. note:: - // - // The path to the source must exist at config load time. - // - // .. note:: - // - // Envoy will only watch the file path for *moves.* This is because in general only moves - // are atomic. The same method of swapping files as is demonstrated in the - // :ref:`runtime documentation ` can be used here also. - Path string `protobuf:"bytes,1,opt,name=path,proto3,oneof"` -} - -type ConfigSource_ApiConfigSource struct { - // API configuration source. - ApiConfigSource *ApiConfigSource `protobuf:"bytes,2,opt,name=api_config_source,json=apiConfigSource,proto3,oneof"` -} - -type ConfigSource_Ads struct { - // When set, ADS will be used to fetch resources. The ADS API configuration - // source in the bootstrap configuration is used. - Ads *AggregatedConfigSource `protobuf:"bytes,3,opt,name=ads,proto3,oneof"` -} - -type ConfigSource_Self struct { - // [#not-implemented-hide:] - // When set, the client will access the resources from the same server it got the - // ConfigSource from, although not necessarily from the same stream. This is similar to the - // :ref:`ads` field, except that the client may use a - // different stream to the same server. As a result, this field can be used for things - // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) - // LDS to RDS on the same server without requiring the management server to know its name - // or required credentials. - // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since - // this field can implicitly mean to use the same stream in the case where the ConfigSource - // is provided via ADS and the specified data can also be obtained via ADS.] - Self *SelfConfigSource `protobuf:"bytes,5,opt,name=self,proto3,oneof"` -} - -func (*ConfigSource_Path) isConfigSource_ConfigSourceSpecifier() {} - -func (*ConfigSource_ApiConfigSource) isConfigSource_ConfigSourceSpecifier() {} - -func (*ConfigSource_Ads) isConfigSource_ConfigSourceSpecifier() {} - -func (*ConfigSource_Self) isConfigSource_ConfigSourceSpecifier() {} - -var File_envoy_api_v2_core_config_source_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_config_source_proto_rawDesc = []byte{ - 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x67, 0x72, - 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa6, 0x05, - 0x0a, 0x0f, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x4f, 0x0a, 0x08, 0x61, 0x70, 0x69, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x70, 0x69, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x5b, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x67, 0x72, 0x70, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x72, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x4c, 0x0a, 0x0f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x54, 0x0a, 0x13, 0x72, 0x61, 0x74, 0x65, 0x5f, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x11, 0x72, 0x61, 0x74, 0x65, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x41, 0x0a, - 0x1e, 0x73, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x72, - 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x4f, 0x6e, - 0x46, 0x69, 0x72, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x6e, 0x6c, 0x79, - 0x22, 0x54, 0x0a, 0x07, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x17, 0x55, - 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x5f, - 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x00, 0x1a, 0x08, 0x08, 0x01, 0xa8, 0xf7, 0xb4, 0x8b, - 0x02, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x47, 0x52, 0x50, 0x43, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, - 0x47, 0x52, 0x50, 0x43, 0x10, 0x03, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x22, 0x6f, 0x0a, 0x10, 0x53, 0x65, 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, - 0x74, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x9b, 0x01, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x72, 0x61, 0x74, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x21, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x6c, 0x52, 0x61, 0x74, 0x65, 0x22, - 0xba, 0x03, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x11, 0x61, 0x70, 0x69, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x61, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x03, 0x61, 0x64, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x48, 0x00, 0x52, 0x03, 0x61, 0x64, 0x73, 0x12, 0x39, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x04, 0x73, 0x65, - 0x6c, 0x66, 0x12, 0x4d, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x66, 0x65, - 0x74, 0x63, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x12, 0x59, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x1e, 0x0a, 0x17, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x2a, 0x2e, 0x0a, 0x0a, - 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0c, 0x0a, 0x04, 0x41, 0x55, - 0x54, 0x4f, 0x10, 0x00, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0a, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, - 0x1a, 0x02, 0x08, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x02, 0x42, 0x94, 0x01, 0x0a, - 0x1f, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x42, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xf2, - 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, - 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_config_source_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_config_source_proto_rawDescData = file_envoy_api_v2_core_config_source_proto_rawDesc -) - -func file_envoy_api_v2_core_config_source_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_config_source_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_config_source_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_config_source_proto_rawDescData) - }) - return file_envoy_api_v2_core_config_source_proto_rawDescData -} - -var file_envoy_api_v2_core_config_source_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_envoy_api_v2_core_config_source_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_envoy_api_v2_core_config_source_proto_goTypes = []interface{}{ - (ApiVersion)(0), // 0: envoy.api.v2.core.ApiVersion - (ApiConfigSource_ApiType)(0), // 1: envoy.api.v2.core.ApiConfigSource.ApiType - (*ApiConfigSource)(nil), // 2: envoy.api.v2.core.ApiConfigSource - (*AggregatedConfigSource)(nil), // 3: envoy.api.v2.core.AggregatedConfigSource - (*SelfConfigSource)(nil), // 4: envoy.api.v2.core.SelfConfigSource - (*RateLimitSettings)(nil), // 5: envoy.api.v2.core.RateLimitSettings - (*ConfigSource)(nil), // 6: envoy.api.v2.core.ConfigSource - (*GrpcService)(nil), // 7: envoy.api.v2.core.GrpcService - (*duration.Duration)(nil), // 8: google.protobuf.Duration - (*wrappers.UInt32Value)(nil), // 9: google.protobuf.UInt32Value - (*wrappers.DoubleValue)(nil), // 10: google.protobuf.DoubleValue -} -var file_envoy_api_v2_core_config_source_proto_depIdxs = []int32{ - 1, // 0: envoy.api.v2.core.ApiConfigSource.api_type:type_name -> envoy.api.v2.core.ApiConfigSource.ApiType - 0, // 1: envoy.api.v2.core.ApiConfigSource.transport_api_version:type_name -> envoy.api.v2.core.ApiVersion - 7, // 2: envoy.api.v2.core.ApiConfigSource.grpc_services:type_name -> envoy.api.v2.core.GrpcService - 8, // 3: envoy.api.v2.core.ApiConfigSource.refresh_delay:type_name -> google.protobuf.Duration - 8, // 4: envoy.api.v2.core.ApiConfigSource.request_timeout:type_name -> google.protobuf.Duration - 5, // 5: envoy.api.v2.core.ApiConfigSource.rate_limit_settings:type_name -> envoy.api.v2.core.RateLimitSettings - 0, // 6: envoy.api.v2.core.SelfConfigSource.transport_api_version:type_name -> envoy.api.v2.core.ApiVersion - 9, // 7: envoy.api.v2.core.RateLimitSettings.max_tokens:type_name -> google.protobuf.UInt32Value - 10, // 8: envoy.api.v2.core.RateLimitSettings.fill_rate:type_name -> google.protobuf.DoubleValue - 2, // 9: envoy.api.v2.core.ConfigSource.api_config_source:type_name -> envoy.api.v2.core.ApiConfigSource - 3, // 10: envoy.api.v2.core.ConfigSource.ads:type_name -> envoy.api.v2.core.AggregatedConfigSource - 4, // 11: envoy.api.v2.core.ConfigSource.self:type_name -> envoy.api.v2.core.SelfConfigSource - 8, // 12: envoy.api.v2.core.ConfigSource.initial_fetch_timeout:type_name -> google.protobuf.Duration - 0, // 13: envoy.api.v2.core.ConfigSource.resource_api_version:type_name -> envoy.api.v2.core.ApiVersion - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_config_source_proto_init() } -func file_envoy_api_v2_core_config_source_proto_init() { - if File_envoy_api_v2_core_config_source_proto != nil { - return - } - file_envoy_api_v2_core_grpc_service_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_config_source_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApiConfigSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_config_source_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AggregatedConfigSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_config_source_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SelfConfigSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_config_source_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimitSettings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_config_source_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConfigSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_config_source_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*ConfigSource_Path)(nil), - (*ConfigSource_ApiConfigSource)(nil), - (*ConfigSource_Ads)(nil), - (*ConfigSource_Self)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_config_source_proto_rawDesc, - NumEnums: 2, - NumMessages: 5, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_config_source_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_config_source_proto_depIdxs, - EnumInfos: file_envoy_api_v2_core_config_source_proto_enumTypes, - MessageInfos: file_envoy_api_v2_core_config_source_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_config_source_proto = out.File - file_envoy_api_v2_core_config_source_proto_rawDesc = nil - file_envoy_api_v2_core_config_source_proto_goTypes = nil - file_envoy_api_v2_core_config_source_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/config_source.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/config_source.pb.validate.go deleted file mode 100644 index 3517b6cfd1..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/config_source.pb.validate.go +++ /dev/null @@ -1,890 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/config_source.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on ApiConfigSource with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *ApiConfigSource) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ApiConfigSource with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// ApiConfigSourceMultiError, or nil if none found. -func (m *ApiConfigSource) ValidateAll() error { - return m.validate(true) -} - -func (m *ApiConfigSource) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if _, ok := ApiConfigSource_ApiType_name[int32(m.GetApiType())]; !ok { - err := ApiConfigSourceValidationError{ - field: "ApiType", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok { - err := ApiConfigSourceValidationError{ - field: "TransportApiVersion", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - for idx, item := range m.GetGrpcServices() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ApiConfigSourceValidationError{ - field: fmt.Sprintf("GrpcServices[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ApiConfigSourceValidationError{ - field: fmt.Sprintf("GrpcServices[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ApiConfigSourceValidationError{ - field: fmt.Sprintf("GrpcServices[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if all { - switch v := interface{}(m.GetRefreshDelay()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ApiConfigSourceValidationError{ - field: "RefreshDelay", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ApiConfigSourceValidationError{ - field: "RefreshDelay", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRefreshDelay()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ApiConfigSourceValidationError{ - field: "RefreshDelay", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if d := m.GetRequestTimeout(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = ApiConfigSourceValidationError{ - field: "RequestTimeout", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := ApiConfigSourceValidationError{ - field: "RequestTimeout", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if all { - switch v := interface{}(m.GetRateLimitSettings()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ApiConfigSourceValidationError{ - field: "RateLimitSettings", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ApiConfigSourceValidationError{ - field: "RateLimitSettings", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRateLimitSettings()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ApiConfigSourceValidationError{ - field: "RateLimitSettings", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for SetNodeOnFirstMessageOnly - - if len(errors) > 0 { - return ApiConfigSourceMultiError(errors) - } - - return nil -} - -// ApiConfigSourceMultiError is an error wrapping multiple validation errors -// returned by ApiConfigSource.ValidateAll() if the designated constraints -// aren't met. -type ApiConfigSourceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ApiConfigSourceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ApiConfigSourceMultiError) AllErrors() []error { return m } - -// ApiConfigSourceValidationError is the validation error returned by -// ApiConfigSource.Validate if the designated constraints aren't met. -type ApiConfigSourceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ApiConfigSourceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ApiConfigSourceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ApiConfigSourceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ApiConfigSourceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ApiConfigSourceValidationError) ErrorName() string { return "ApiConfigSourceValidationError" } - -// Error satisfies the builtin error interface -func (e ApiConfigSourceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sApiConfigSource.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ApiConfigSourceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ApiConfigSourceValidationError{} - -// Validate checks the field values on AggregatedConfigSource with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *AggregatedConfigSource) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on AggregatedConfigSource with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// AggregatedConfigSourceMultiError, or nil if none found. -func (m *AggregatedConfigSource) ValidateAll() error { - return m.validate(true) -} - -func (m *AggregatedConfigSource) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(errors) > 0 { - return AggregatedConfigSourceMultiError(errors) - } - - return nil -} - -// AggregatedConfigSourceMultiError is an error wrapping multiple validation -// errors returned by AggregatedConfigSource.ValidateAll() if the designated -// constraints aren't met. -type AggregatedConfigSourceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m AggregatedConfigSourceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m AggregatedConfigSourceMultiError) AllErrors() []error { return m } - -// AggregatedConfigSourceValidationError is the validation error returned by -// AggregatedConfigSource.Validate if the designated constraints aren't met. -type AggregatedConfigSourceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e AggregatedConfigSourceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e AggregatedConfigSourceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e AggregatedConfigSourceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e AggregatedConfigSourceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e AggregatedConfigSourceValidationError) ErrorName() string { - return "AggregatedConfigSourceValidationError" -} - -// Error satisfies the builtin error interface -func (e AggregatedConfigSourceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sAggregatedConfigSource.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = AggregatedConfigSourceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = AggregatedConfigSourceValidationError{} - -// Validate checks the field values on SelfConfigSource with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *SelfConfigSource) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on SelfConfigSource with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// SelfConfigSourceMultiError, or nil if none found. -func (m *SelfConfigSource) ValidateAll() error { - return m.validate(true) -} - -func (m *SelfConfigSource) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok { - err := SelfConfigSourceValidationError{ - field: "TransportApiVersion", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return SelfConfigSourceMultiError(errors) - } - - return nil -} - -// SelfConfigSourceMultiError is an error wrapping multiple validation errors -// returned by SelfConfigSource.ValidateAll() if the designated constraints -// aren't met. -type SelfConfigSourceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SelfConfigSourceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SelfConfigSourceMultiError) AllErrors() []error { return m } - -// SelfConfigSourceValidationError is the validation error returned by -// SelfConfigSource.Validate if the designated constraints aren't met. -type SelfConfigSourceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SelfConfigSourceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SelfConfigSourceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SelfConfigSourceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SelfConfigSourceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SelfConfigSourceValidationError) ErrorName() string { return "SelfConfigSourceValidationError" } - -// Error satisfies the builtin error interface -func (e SelfConfigSourceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSelfConfigSource.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SelfConfigSourceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SelfConfigSourceValidationError{} - -// Validate checks the field values on RateLimitSettings with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *RateLimitSettings) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RateLimitSettings with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// RateLimitSettingsMultiError, or nil if none found. -func (m *RateLimitSettings) ValidateAll() error { - return m.validate(true) -} - -func (m *RateLimitSettings) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetMaxTokens()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RateLimitSettingsValidationError{ - field: "MaxTokens", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RateLimitSettingsValidationError{ - field: "MaxTokens", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetMaxTokens()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RateLimitSettingsValidationError{ - field: "MaxTokens", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if wrapper := m.GetFillRate(); wrapper != nil { - - if wrapper.GetValue() <= 0 { - err := RateLimitSettingsValidationError{ - field: "FillRate", - reason: "value must be greater than 0", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - if len(errors) > 0 { - return RateLimitSettingsMultiError(errors) - } - - return nil -} - -// RateLimitSettingsMultiError is an error wrapping multiple validation errors -// returned by RateLimitSettings.ValidateAll() if the designated constraints -// aren't met. -type RateLimitSettingsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RateLimitSettingsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RateLimitSettingsMultiError) AllErrors() []error { return m } - -// RateLimitSettingsValidationError is the validation error returned by -// RateLimitSettings.Validate if the designated constraints aren't met. -type RateLimitSettingsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RateLimitSettingsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RateLimitSettingsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RateLimitSettingsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RateLimitSettingsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RateLimitSettingsValidationError) ErrorName() string { - return "RateLimitSettingsValidationError" -} - -// Error satisfies the builtin error interface -func (e RateLimitSettingsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRateLimitSettings.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RateLimitSettingsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RateLimitSettingsValidationError{} - -// Validate checks the field values on ConfigSource with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *ConfigSource) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ConfigSource with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in ConfigSourceMultiError, or -// nil if none found. -func (m *ConfigSource) ValidateAll() error { - return m.validate(true) -} - -func (m *ConfigSource) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetInitialFetchTimeout()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ConfigSourceValidationError{ - field: "InitialFetchTimeout", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ConfigSourceValidationError{ - field: "InitialFetchTimeout", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetInitialFetchTimeout()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ConfigSourceValidationError{ - field: "InitialFetchTimeout", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if _, ok := ApiVersion_name[int32(m.GetResourceApiVersion())]; !ok { - err := ConfigSourceValidationError{ - field: "ResourceApiVersion", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - switch m.ConfigSourceSpecifier.(type) { - - case *ConfigSource_Path: - // no validation rules for Path - - case *ConfigSource_ApiConfigSource: - - if all { - switch v := interface{}(m.GetApiConfigSource()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ConfigSourceValidationError{ - field: "ApiConfigSource", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ConfigSourceValidationError{ - field: "ApiConfigSource", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetApiConfigSource()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ConfigSourceValidationError{ - field: "ApiConfigSource", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *ConfigSource_Ads: - - if all { - switch v := interface{}(m.GetAds()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ConfigSourceValidationError{ - field: "Ads", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ConfigSourceValidationError{ - field: "Ads", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetAds()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ConfigSourceValidationError{ - field: "Ads", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *ConfigSource_Self: - - if all { - switch v := interface{}(m.GetSelf()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ConfigSourceValidationError{ - field: "Self", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ConfigSourceValidationError{ - field: "Self", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSelf()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ConfigSourceValidationError{ - field: "Self", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := ConfigSourceValidationError{ - field: "ConfigSourceSpecifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return ConfigSourceMultiError(errors) - } - - return nil -} - -// ConfigSourceMultiError is an error wrapping multiple validation errors -// returned by ConfigSource.ValidateAll() if the designated constraints aren't met. -type ConfigSourceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ConfigSourceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ConfigSourceMultiError) AllErrors() []error { return m } - -// ConfigSourceValidationError is the validation error returned by -// ConfigSource.Validate if the designated constraints aren't met. -type ConfigSourceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ConfigSourceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ConfigSourceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ConfigSourceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ConfigSourceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ConfigSourceValidationError) ErrorName() string { return "ConfigSourceValidationError" } - -// Error satisfies the builtin error interface -func (e ConfigSourceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sConfigSource.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ConfigSourceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ConfigSourceValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/event_service_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/event_service_config.pb.go deleted file mode 100644 index c6d651437b..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/event_service_config.pb.go +++ /dev/null @@ -1,196 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/event_service_config.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// [#not-implemented-hide:] -// Configuration of the event reporting service endpoint. -type EventServiceConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ConfigSourceSpecifier: - // *EventServiceConfig_GrpcService - ConfigSourceSpecifier isEventServiceConfig_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"` -} - -func (x *EventServiceConfig) Reset() { - *x = EventServiceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_event_service_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EventServiceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EventServiceConfig) ProtoMessage() {} - -func (x *EventServiceConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_event_service_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EventServiceConfig.ProtoReflect.Descriptor instead. -func (*EventServiceConfig) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_event_service_config_proto_rawDescGZIP(), []int{0} -} - -func (m *EventServiceConfig) GetConfigSourceSpecifier() isEventServiceConfig_ConfigSourceSpecifier { - if m != nil { - return m.ConfigSourceSpecifier - } - return nil -} - -func (x *EventServiceConfig) GetGrpcService() *GrpcService { - if x, ok := x.GetConfigSourceSpecifier().(*EventServiceConfig_GrpcService); ok { - return x.GrpcService - } - return nil -} - -type isEventServiceConfig_ConfigSourceSpecifier interface { - isEventServiceConfig_ConfigSourceSpecifier() -} - -type EventServiceConfig_GrpcService struct { - // Specifies the gRPC service that hosts the event reporting service. - GrpcService *GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3,oneof"` -} - -func (*EventServiceConfig_GrpcService) isEventServiceConfig_ConfigSourceSpecifier() {} - -var File_envoy_api_v2_core_event_service_config_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_event_service_config_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, - 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x79, 0x0a, 0x12, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x43, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x67, - 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x9a, 0x01, 0x0a, 0x1f, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x17, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_event_service_config_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_event_service_config_proto_rawDescData = file_envoy_api_v2_core_event_service_config_proto_rawDesc -) - -func file_envoy_api_v2_core_event_service_config_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_event_service_config_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_event_service_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_event_service_config_proto_rawDescData) - }) - return file_envoy_api_v2_core_event_service_config_proto_rawDescData -} - -var file_envoy_api_v2_core_event_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_api_v2_core_event_service_config_proto_goTypes = []interface{}{ - (*EventServiceConfig)(nil), // 0: envoy.api.v2.core.EventServiceConfig - (*GrpcService)(nil), // 1: envoy.api.v2.core.GrpcService -} -var file_envoy_api_v2_core_event_service_config_proto_depIdxs = []int32{ - 1, // 0: envoy.api.v2.core.EventServiceConfig.grpc_service:type_name -> envoy.api.v2.core.GrpcService - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_event_service_config_proto_init() } -func file_envoy_api_v2_core_event_service_config_proto_init() { - if File_envoy_api_v2_core_event_service_config_proto != nil { - return - } - file_envoy_api_v2_core_grpc_service_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_event_service_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EventServiceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_event_service_config_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*EventServiceConfig_GrpcService)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_event_service_config_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_event_service_config_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_event_service_config_proto_depIdxs, - MessageInfos: file_envoy_api_v2_core_event_service_config_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_event_service_config_proto = out.File - file_envoy_api_v2_core_event_service_config_proto_rawDesc = nil - file_envoy_api_v2_core_event_service_config_proto_goTypes = nil - file_envoy_api_v2_core_event_service_config_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/event_service_config.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/event_service_config.pb.validate.go deleted file mode 100644 index e64c93ff7e..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/event_service_config.pb.validate.go +++ /dev/null @@ -1,183 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/event_service_config.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on EventServiceConfig with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *EventServiceConfig) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on EventServiceConfig with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// EventServiceConfigMultiError, or nil if none found. -func (m *EventServiceConfig) ValidateAll() error { - return m.validate(true) -} - -func (m *EventServiceConfig) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.ConfigSourceSpecifier.(type) { - - case *EventServiceConfig_GrpcService: - - if all { - switch v := interface{}(m.GetGrpcService()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, EventServiceConfigValidationError{ - field: "GrpcService", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, EventServiceConfigValidationError{ - field: "GrpcService", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return EventServiceConfigValidationError{ - field: "GrpcService", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := EventServiceConfigValidationError{ - field: "ConfigSourceSpecifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return EventServiceConfigMultiError(errors) - } - - return nil -} - -// EventServiceConfigMultiError is an error wrapping multiple validation errors -// returned by EventServiceConfig.ValidateAll() if the designated constraints -// aren't met. -type EventServiceConfigMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m EventServiceConfigMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m EventServiceConfigMultiError) AllErrors() []error { return m } - -// EventServiceConfigValidationError is the validation error returned by -// EventServiceConfig.Validate if the designated constraints aren't met. -type EventServiceConfigValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e EventServiceConfigValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e EventServiceConfigValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e EventServiceConfigValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e EventServiceConfigValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e EventServiceConfigValidationError) ErrorName() string { - return "EventServiceConfigValidationError" -} - -// Error satisfies the builtin error interface -func (e EventServiceConfigValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sEventServiceConfig.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = EventServiceConfigValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = EventServiceConfigValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_method_list.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_method_list.pb.go deleted file mode 100644 index 318ad12668..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_method_list.pb.go +++ /dev/null @@ -1,241 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/grpc_method_list.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// A list of gRPC methods which can be used as an allowlist, for example. -type GrpcMethodList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Services []*GrpcMethodList_Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` -} - -func (x *GrpcMethodList) Reset() { - *x = GrpcMethodList{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_method_list_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcMethodList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcMethodList) ProtoMessage() {} - -func (x *GrpcMethodList) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_method_list_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcMethodList.ProtoReflect.Descriptor instead. -func (*GrpcMethodList) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_method_list_proto_rawDescGZIP(), []int{0} -} - -func (x *GrpcMethodList) GetServices() []*GrpcMethodList_Service { - if x != nil { - return x.Services - } - return nil -} - -type GrpcMethodList_Service struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the gRPC service. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The names of the gRPC methods in this service. - MethodNames []string `protobuf:"bytes,2,rep,name=method_names,json=methodNames,proto3" json:"method_names,omitempty"` -} - -func (x *GrpcMethodList_Service) Reset() { - *x = GrpcMethodList_Service{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_method_list_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcMethodList_Service) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcMethodList_Service) ProtoMessage() {} - -func (x *GrpcMethodList_Service) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_method_list_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcMethodList_Service.ProtoReflect.Descriptor instead. -func (*GrpcMethodList_Service) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_method_list_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *GrpcMethodList_Service) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *GrpcMethodList_Service) GetMethodNames() []string { - if x != nil { - return x.MethodNames - } - return nil -} - -var File_envoy_api_v2_core_grpc_method_list_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_grpc_method_list_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, - 0x6c, 0x69, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1e, 0x75, - 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, - 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xac, 0x01, 0x0a, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, - 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, - 0x53, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, - 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x42, 0x96, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x13, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, - 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_grpc_method_list_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_grpc_method_list_proto_rawDescData = file_envoy_api_v2_core_grpc_method_list_proto_rawDesc -) - -func file_envoy_api_v2_core_grpc_method_list_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_grpc_method_list_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_grpc_method_list_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_grpc_method_list_proto_rawDescData) - }) - return file_envoy_api_v2_core_grpc_method_list_proto_rawDescData -} - -var file_envoy_api_v2_core_grpc_method_list_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_envoy_api_v2_core_grpc_method_list_proto_goTypes = []interface{}{ - (*GrpcMethodList)(nil), // 0: envoy.api.v2.core.GrpcMethodList - (*GrpcMethodList_Service)(nil), // 1: envoy.api.v2.core.GrpcMethodList.Service -} -var file_envoy_api_v2_core_grpc_method_list_proto_depIdxs = []int32{ - 1, // 0: envoy.api.v2.core.GrpcMethodList.services:type_name -> envoy.api.v2.core.GrpcMethodList.Service - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_grpc_method_list_proto_init() } -func file_envoy_api_v2_core_grpc_method_list_proto_init() { - if File_envoy_api_v2_core_grpc_method_list_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_grpc_method_list_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcMethodList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_method_list_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcMethodList_Service); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_grpc_method_list_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_grpc_method_list_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_grpc_method_list_proto_depIdxs, - MessageInfos: file_envoy_api_v2_core_grpc_method_list_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_grpc_method_list_proto = out.File - file_envoy_api_v2_core_grpc_method_list_proto_rawDesc = nil - file_envoy_api_v2_core_grpc_method_list_proto_goTypes = nil - file_envoy_api_v2_core_grpc_method_list_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_method_list.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_method_list.pb.validate.go deleted file mode 100644 index a2ba0401dd..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_method_list.pb.validate.go +++ /dev/null @@ -1,294 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/grpc_method_list.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on GrpcMethodList with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *GrpcMethodList) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on GrpcMethodList with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in GrpcMethodListMultiError, -// or nil if none found. -func (m *GrpcMethodList) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcMethodList) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - for idx, item := range m.GetServices() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcMethodListValidationError{ - field: fmt.Sprintf("Services[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcMethodListValidationError{ - field: fmt.Sprintf("Services[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcMethodListValidationError{ - field: fmt.Sprintf("Services[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return GrpcMethodListMultiError(errors) - } - - return nil -} - -// GrpcMethodListMultiError is an error wrapping multiple validation errors -// returned by GrpcMethodList.ValidateAll() if the designated constraints -// aren't met. -type GrpcMethodListMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcMethodListMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcMethodListMultiError) AllErrors() []error { return m } - -// GrpcMethodListValidationError is the validation error returned by -// GrpcMethodList.Validate if the designated constraints aren't met. -type GrpcMethodListValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcMethodListValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcMethodListValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcMethodListValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcMethodListValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcMethodListValidationError) ErrorName() string { return "GrpcMethodListValidationError" } - -// Error satisfies the builtin error interface -func (e GrpcMethodListValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcMethodList.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcMethodListValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcMethodListValidationError{} - -// Validate checks the field values on GrpcMethodList_Service with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *GrpcMethodList_Service) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on GrpcMethodList_Service with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// GrpcMethodList_ServiceMultiError, or nil if none found. -func (m *GrpcMethodList_Service) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcMethodList_Service) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetName()) < 1 { - err := GrpcMethodList_ServiceValidationError{ - field: "Name", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(m.GetMethodNames()) < 1 { - err := GrpcMethodList_ServiceValidationError{ - field: "MethodNames", - reason: "value must contain at least 1 item(s)", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return GrpcMethodList_ServiceMultiError(errors) - } - - return nil -} - -// GrpcMethodList_ServiceMultiError is an error wrapping multiple validation -// errors returned by GrpcMethodList_Service.ValidateAll() if the designated -// constraints aren't met. -type GrpcMethodList_ServiceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcMethodList_ServiceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcMethodList_ServiceMultiError) AllErrors() []error { return m } - -// GrpcMethodList_ServiceValidationError is the validation error returned by -// GrpcMethodList_Service.Validate if the designated constraints aren't met. -type GrpcMethodList_ServiceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcMethodList_ServiceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcMethodList_ServiceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcMethodList_ServiceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcMethodList_ServiceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcMethodList_ServiceValidationError) ErrorName() string { - return "GrpcMethodList_ServiceValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcMethodList_ServiceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcMethodList_Service.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcMethodList_ServiceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcMethodList_ServiceValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_service.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_service.pb.go deleted file mode 100644 index 5fe0654a9f..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_service.pb.go +++ /dev/null @@ -1,1470 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/grpc_service.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - _struct "github.com/golang/protobuf/ptypes/struct" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// gRPC service configuration. This is used by :ref:`ApiConfigSource -// ` and filter configurations. -// [#next-free-field: 6] -type GrpcService struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to TargetSpecifier: - // *GrpcService_EnvoyGrpc_ - // *GrpcService_GoogleGrpc_ - TargetSpecifier isGrpcService_TargetSpecifier `protobuf_oneof:"target_specifier"` - // The timeout for the gRPC request. This is the timeout for a specific - // request. - Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` - // Additional metadata to include in streams initiated to the GrpcService. - // This can be used for scenarios in which additional ad hoc authorization - // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. - InitialMetadata []*HeaderValue `protobuf:"bytes,5,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"` -} - -func (x *GrpcService) Reset() { - *x = GrpcService{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService) ProtoMessage() {} - -func (x *GrpcService) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService.ProtoReflect.Descriptor instead. -func (*GrpcService) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0} -} - -func (m *GrpcService) GetTargetSpecifier() isGrpcService_TargetSpecifier { - if m != nil { - return m.TargetSpecifier - } - return nil -} - -func (x *GrpcService) GetEnvoyGrpc() *GrpcService_EnvoyGrpc { - if x, ok := x.GetTargetSpecifier().(*GrpcService_EnvoyGrpc_); ok { - return x.EnvoyGrpc - } - return nil -} - -func (x *GrpcService) GetGoogleGrpc() *GrpcService_GoogleGrpc { - if x, ok := x.GetTargetSpecifier().(*GrpcService_GoogleGrpc_); ok { - return x.GoogleGrpc - } - return nil -} - -func (x *GrpcService) GetTimeout() *duration.Duration { - if x != nil { - return x.Timeout - } - return nil -} - -func (x *GrpcService) GetInitialMetadata() []*HeaderValue { - if x != nil { - return x.InitialMetadata - } - return nil -} - -type isGrpcService_TargetSpecifier interface { - isGrpcService_TargetSpecifier() -} - -type GrpcService_EnvoyGrpc_ struct { - // Envoy's in-built gRPC client. - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - EnvoyGrpc *GrpcService_EnvoyGrpc `protobuf:"bytes,1,opt,name=envoy_grpc,json=envoyGrpc,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_ struct { - // `Google C++ gRPC client `_ - // See the :ref:`gRPC services overview ` - // documentation for discussion on gRPC client selection. - GoogleGrpc *GrpcService_GoogleGrpc `protobuf:"bytes,2,opt,name=google_grpc,json=googleGrpc,proto3,oneof"` -} - -func (*GrpcService_EnvoyGrpc_) isGrpcService_TargetSpecifier() {} - -func (*GrpcService_GoogleGrpc_) isGrpcService_TargetSpecifier() {} - -type GrpcService_EnvoyGrpc struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the upstream gRPC cluster. SSL credentials will be supplied - // in the :ref:`Cluster ` :ref:`transport_socket - // `. - ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` -} - -func (x *GrpcService_EnvoyGrpc) Reset() { - *x = GrpcService_EnvoyGrpc{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_EnvoyGrpc) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_EnvoyGrpc) ProtoMessage() {} - -func (x *GrpcService_EnvoyGrpc) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_EnvoyGrpc.ProtoReflect.Descriptor instead. -func (*GrpcService_EnvoyGrpc) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *GrpcService_EnvoyGrpc) GetClusterName() string { - if x != nil { - return x.ClusterName - } - return "" -} - -// [#next-free-field: 7] -type GrpcService_GoogleGrpc struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The target URI when using the `Google C++ gRPC client - // `_. SSL credentials will be supplied in - // :ref:`channel_credentials `. - TargetUri string `protobuf:"bytes,1,opt,name=target_uri,json=targetUri,proto3" json:"target_uri,omitempty"` - ChannelCredentials *GrpcService_GoogleGrpc_ChannelCredentials `protobuf:"bytes,2,opt,name=channel_credentials,json=channelCredentials,proto3" json:"channel_credentials,omitempty"` - // A set of call credentials that can be composed with `channel credentials - // `_. - CallCredentials []*GrpcService_GoogleGrpc_CallCredentials `protobuf:"bytes,3,rep,name=call_credentials,json=callCredentials,proto3" json:"call_credentials,omitempty"` - // The human readable prefix to use when emitting statistics for the gRPC - // service. - // - // .. csv-table:: - // :header: Name, Type, Description - // :widths: 1, 1, 2 - // - // streams_total, Counter, Total number of streams opened - // streams_closed_, Counter, Total streams closed with - StatPrefix string `protobuf:"bytes,4,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` - // The name of the Google gRPC credentials factory to use. This must have been registered with - // Envoy. If this is empty, a default credentials factory will be used that sets up channel - // credentials based on other configuration parameters. - CredentialsFactoryName string `protobuf:"bytes,5,opt,name=credentials_factory_name,json=credentialsFactoryName,proto3" json:"credentials_factory_name,omitempty"` - // Additional configuration for site-specific customizations of the Google - // gRPC library. - Config *_struct.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"` -} - -func (x *GrpcService_GoogleGrpc) Reset() { - *x = GrpcService_GoogleGrpc{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *GrpcService_GoogleGrpc) GetTargetUri() string { - if x != nil { - return x.TargetUri - } - return "" -} - -func (x *GrpcService_GoogleGrpc) GetChannelCredentials() *GrpcService_GoogleGrpc_ChannelCredentials { - if x != nil { - return x.ChannelCredentials - } - return nil -} - -func (x *GrpcService_GoogleGrpc) GetCallCredentials() []*GrpcService_GoogleGrpc_CallCredentials { - if x != nil { - return x.CallCredentials - } - return nil -} - -func (x *GrpcService_GoogleGrpc) GetStatPrefix() string { - if x != nil { - return x.StatPrefix - } - return "" -} - -func (x *GrpcService_GoogleGrpc) GetCredentialsFactoryName() string { - if x != nil { - return x.CredentialsFactoryName - } - return "" -} - -func (x *GrpcService_GoogleGrpc) GetConfig() *_struct.Struct { - if x != nil { - return x.Config - } - return nil -} - -// See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. -type GrpcService_GoogleGrpc_SslCredentials struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // PEM encoded server root certificates. - RootCerts *DataSource `protobuf:"bytes,1,opt,name=root_certs,json=rootCerts,proto3" json:"root_certs,omitempty"` - // PEM encoded client private key. - PrivateKey *DataSource `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` - // PEM encoded client certificate chain. - CertChain *DataSource `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` -} - -func (x *GrpcService_GoogleGrpc_SslCredentials) Reset() { - *x = GrpcService_GoogleGrpc_SslCredentials{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc_SslCredentials) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc_SslCredentials) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc_SslCredentials) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc_SslCredentials.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc_SslCredentials) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1, 0} -} - -func (x *GrpcService_GoogleGrpc_SslCredentials) GetRootCerts() *DataSource { - if x != nil { - return x.RootCerts - } - return nil -} - -func (x *GrpcService_GoogleGrpc_SslCredentials) GetPrivateKey() *DataSource { - if x != nil { - return x.PrivateKey - } - return nil -} - -func (x *GrpcService_GoogleGrpc_SslCredentials) GetCertChain() *DataSource { - if x != nil { - return x.CertChain - } - return nil -} - -// Local channel credentials. Only UDS is supported for now. -// See https://github.com/grpc/grpc/pull/15909. -type GrpcService_GoogleGrpc_GoogleLocalCredentials struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) Reset() { - *x = GrpcService_GoogleGrpc_GoogleLocalCredentials{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc_GoogleLocalCredentials.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1, 1} -} - -// See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call -// credential types. -type GrpcService_GoogleGrpc_ChannelCredentials struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to CredentialSpecifier: - // *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials - // *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault - // *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials - CredentialSpecifier isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"` -} - -func (x *GrpcService_GoogleGrpc_ChannelCredentials) Reset() { - *x = GrpcService_GoogleGrpc_ChannelCredentials{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc_ChannelCredentials) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc_ChannelCredentials) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc_ChannelCredentials) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc_ChannelCredentials.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc_ChannelCredentials) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1, 2} -} - -func (m *GrpcService_GoogleGrpc_ChannelCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier { - if m != nil { - return m.CredentialSpecifier - } - return nil -} - -func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetSslCredentials() *GrpcService_GoogleGrpc_SslCredentials { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials); ok { - return x.SslCredentials - } - return nil -} - -func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetGoogleDefault() *emptypb.Empty { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault); ok { - return x.GoogleDefault - } - return nil -} - -func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetLocalCredentials() *GrpcService_GoogleGrpc_GoogleLocalCredentials { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials); ok { - return x.LocalCredentials - } - return nil -} - -type isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier interface { - isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() -} - -type GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials struct { - SslCredentials *GrpcService_GoogleGrpc_SslCredentials `protobuf:"bytes,1,opt,name=ssl_credentials,json=sslCredentials,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault struct { - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - GoogleDefault *emptypb.Empty `protobuf:"bytes,2,opt,name=google_default,json=googleDefault,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials struct { - LocalCredentials *GrpcService_GoogleGrpc_GoogleLocalCredentials `protobuf:"bytes,3,opt,name=local_credentials,json=localCredentials,proto3,oneof"` -} - -func (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { -} - -func (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { -} - -func (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { -} - -// [#next-free-field: 8] -type GrpcService_GoogleGrpc_CallCredentials struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to CredentialSpecifier: - // *GrpcService_GoogleGrpc_CallCredentials_AccessToken - // *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine - // *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken - // *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess - // *GrpcService_GoogleGrpc_CallCredentials_GoogleIam - // *GrpcService_GoogleGrpc_CallCredentials_FromPlugin - // *GrpcService_GoogleGrpc_CallCredentials_StsService_ - CredentialSpecifier isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"` -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) Reset() { - *x = GrpcService_GoogleGrpc_CallCredentials{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc_CallCredentials) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc_CallCredentials) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc_CallCredentials) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3} -} - -func (m *GrpcService_GoogleGrpc_CallCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier { - if m != nil { - return m.CredentialSpecifier - } - return nil -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) GetAccessToken() string { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_AccessToken); ok { - return x.AccessToken - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleComputeEngine() *emptypb.Empty { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine); ok { - return x.GoogleComputeEngine - } - return nil -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleRefreshToken() string { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken); ok { - return x.GoogleRefreshToken - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) GetServiceAccountJwtAccess() *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess); ok { - return x.ServiceAccountJwtAccess - } - return nil -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleIam() *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleIam); ok { - return x.GoogleIam - } - return nil -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) GetFromPlugin() *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_FromPlugin); ok { - return x.FromPlugin - } - return nil -} - -func (x *GrpcService_GoogleGrpc_CallCredentials) GetStsService() *GrpcService_GoogleGrpc_CallCredentials_StsService { - if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_StsService_); ok { - return x.StsService - } - return nil -} - -type isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier interface { - isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() -} - -type GrpcService_GoogleGrpc_CallCredentials_AccessToken struct { - // Access token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. - AccessToken string `protobuf:"bytes,1,opt,name=access_token,json=accessToken,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine struct { - // Google Compute Engine credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 - GoogleComputeEngine *emptypb.Empty `protobuf:"bytes,2,opt,name=google_compute_engine,json=googleComputeEngine,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken struct { - // Google refresh token credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. - GoogleRefreshToken string `protobuf:"bytes,3,opt,name=google_refresh_token,json=googleRefreshToken,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess struct { - // Service Account JWT Access credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. - ServiceAccountJwtAccess *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials `protobuf:"bytes,4,opt,name=service_account_jwt_access,json=serviceAccountJwtAccess,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_CallCredentials_GoogleIam struct { - // Google IAM credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. - GoogleIam *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials `protobuf:"bytes,5,opt,name=google_iam,json=googleIam,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_CallCredentials_FromPlugin struct { - // Custom authenticator credentials. - // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. - // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. - FromPlugin *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin `protobuf:"bytes,6,opt,name=from_plugin,json=fromPlugin,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_CallCredentials_StsService_ struct { - // Custom security token service which implements OAuth 2.0 token exchange. - // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 - // See https://github.com/grpc/grpc/pull/19587. - StsService *GrpcService_GoogleGrpc_CallCredentials_StsService `protobuf:"bytes,7,opt,name=sts_service,json=stsService,proto3,oneof"` -} - -func (*GrpcService_GoogleGrpc_CallCredentials_AccessToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { -} - -func (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { -} - -func (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { -} - -func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { -} - -func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { -} - -func (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { -} - -func (*GrpcService_GoogleGrpc_CallCredentials_StsService_) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { -} - -type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - JsonKey string `protobuf:"bytes,1,opt,name=json_key,json=jsonKey,proto3" json:"json_key,omitempty"` - TokenLifetimeSeconds uint64 `protobuf:"varint,2,opt,name=token_lifetime_seconds,json=tokenLifetimeSeconds,proto3" json:"token_lifetime_seconds,omitempty"` -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Reset() { - *x = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 0} -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetJsonKey() string { - if x != nil { - return x.JsonKey - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetTokenLifetimeSeconds() uint64 { - if x != nil { - return x.TokenLifetimeSeconds - } - return 0 -} - -type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AuthorizationToken string `protobuf:"bytes,1,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` - AuthoritySelector string `protobuf:"bytes,2,opt,name=authority_selector,json=authoritySelector,proto3" json:"authority_selector,omitempty"` -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Reset() { - *x = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 1} -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthorizationToken() string { - if x != nil { - return x.AuthorizationToken - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthoritySelector() string { - if x != nil { - return x.AuthoritySelector - } - return "" -} - -type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Types that are assignable to ConfigType: - // *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_Config - // *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig - ConfigType isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType `protobuf_oneof:"config_type"` -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Reset() { - *x = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 2} -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetConfigType() isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType { - if m != nil { - return m.ConfigType - } - return nil -} - -// Deprecated: Do not use. -func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetConfig() *_struct.Struct { - if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_Config); ok { - return x.Config - } - return nil -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *any.Any { - if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok { - return x.TypedConfig - } - return nil -} - -type isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType interface { - isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() -} - -type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_Config struct { - // Deprecated: Do not use. - Config *_struct.Struct `protobuf:"bytes,2,opt,name=config,proto3,oneof"` -} - -type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` -} - -func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_Config) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() { -} - -func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() { -} - -// Security token service configuration that allows Google gRPC to -// fetch security token from an OAuth 2.0 authorization server. -// See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and -// https://github.com/grpc/grpc/pull/19587. -// [#next-free-field: 10] -type GrpcService_GoogleGrpc_CallCredentials_StsService struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // URI of the token exchange service that handles token exchange requests. - // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - // https://github.com/envoyproxy/protoc-gen-validate/issues/303] - TokenExchangeServiceUri string `protobuf:"bytes,1,opt,name=token_exchange_service_uri,json=tokenExchangeServiceUri,proto3" json:"token_exchange_service_uri,omitempty"` - // Location of the target service or resource where the client - // intends to use the requested security token. - Resource string `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` - // Logical name of the target service where the client intends to - // use the requested security token. - Audience string `protobuf:"bytes,3,opt,name=audience,proto3" json:"audience,omitempty"` - // The desired scope of the requested security token in the - // context of the service or resource where the token will be used. - Scope string `protobuf:"bytes,4,opt,name=scope,proto3" json:"scope,omitempty"` - // Type of the requested security token. - RequestedTokenType string `protobuf:"bytes,5,opt,name=requested_token_type,json=requestedTokenType,proto3" json:"requested_token_type,omitempty"` - // The path of subject token, a security token that represents the - // identity of the party on behalf of whom the request is being made. - SubjectTokenPath string `protobuf:"bytes,6,opt,name=subject_token_path,json=subjectTokenPath,proto3" json:"subject_token_path,omitempty"` - // Type of the subject token. - SubjectTokenType string `protobuf:"bytes,7,opt,name=subject_token_type,json=subjectTokenType,proto3" json:"subject_token_type,omitempty"` - // The path of actor token, a security token that represents the identity - // of the acting party. The acting party is authorized to use the - // requested security token and act on behalf of the subject. - ActorTokenPath string `protobuf:"bytes,8,opt,name=actor_token_path,json=actorTokenPath,proto3" json:"actor_token_path,omitempty"` - // Type of the actor token. - ActorTokenType string `protobuf:"bytes,9,opt,name=actor_token_type,json=actorTokenType,proto3" json:"actor_token_type,omitempty"` -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) Reset() { - *x = GrpcService_GoogleGrpc_CallCredentials_StsService{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoMessage() {} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_grpc_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_StsService.ProtoReflect.Descriptor instead. -func (*GrpcService_GoogleGrpc_CallCredentials_StsService) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 3} -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetTokenExchangeServiceUri() string { - if x != nil { - return x.TokenExchangeServiceUri - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetAudience() string { - if x != nil { - return x.Audience - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetScope() string { - if x != nil { - return x.Scope - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetRequestedTokenType() string { - if x != nil { - return x.RequestedTokenType - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenPath() string { - if x != nil { - return x.SubjectTokenPath - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenType() string { - if x != nil { - return x.SubjectTokenType - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenPath() string { - if x != nil { - return x.ActorTokenPath - } - return "" -} - -func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenType() string { - if x != nil { - return x.ActorTokenType - } - return "" -} - -var File_envoy_api_v2_core_grpc_service_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_grpc_service_proto_rawDesc = []byte{ - 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x62, 0x61, 0x73, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, - 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, - 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdb, 0x16, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x5f, 0x67, 0x72, 0x70, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f, - 0x79, 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x09, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, - 0x70, 0x63, 0x12, 0x4c, 0x0a, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x70, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, - 0x70, 0x63, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, - 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x49, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x1a, 0x37, 0x0a, 0x09, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x12, 0x2a, 0x0a, - 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x0b, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xda, 0x13, 0x0a, 0x0a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x12, 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, - 0x12, 0x6d, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, - 0x64, 0x0a, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, - 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x20, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x38, 0x0a, 0x18, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x5f, 0x66, - 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x16, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x61, - 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd4, 0x01, 0x0a, 0x0e, 0x53, - 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3c, 0x0a, - 0x0a, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x46, 0x0a, 0x0b, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x1a, 0x18, 0x0a, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xc8, 0x02, 0x0a, 0x12, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x12, 0x63, 0x0a, 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x6f, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xfd, 0x0b, 0x0a, 0x0f, 0x43, 0x61, 0x6c, 0x6c, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, - 0x4c, 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, - 0x65, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, - 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x9b, 0x01, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6a, 0x77, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x77, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, - 0x6f, 0x0a, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61, 0x6d, - 0x12, 0x7a, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, - 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, - 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x67, 0x0a, 0x0b, - 0x73, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, - 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x73, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x75, 0x0a, 0x22, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, - 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, - 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, - 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x76, 0x0a, 0x14, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x1a, 0xb4, 0x01, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, - 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x8b, 0x03, 0x0a, 0x0a, - 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x10, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x35, - 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x20, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, - 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x93, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_grpc_service_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_grpc_service_proto_rawDescData = file_envoy_api_v2_core_grpc_service_proto_rawDesc -) - -func file_envoy_api_v2_core_grpc_service_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_grpc_service_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_grpc_service_proto_rawDescData) - }) - return file_envoy_api_v2_core_grpc_service_proto_rawDescData -} - -var file_envoy_api_v2_core_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_envoy_api_v2_core_grpc_service_proto_goTypes = []interface{}{ - (*GrpcService)(nil), // 0: envoy.api.v2.core.GrpcService - (*GrpcService_EnvoyGrpc)(nil), // 1: envoy.api.v2.core.GrpcService.EnvoyGrpc - (*GrpcService_GoogleGrpc)(nil), // 2: envoy.api.v2.core.GrpcService.GoogleGrpc - (*GrpcService_GoogleGrpc_SslCredentials)(nil), // 3: envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials - (*GrpcService_GoogleGrpc_GoogleLocalCredentials)(nil), // 4: envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials - (*GrpcService_GoogleGrpc_ChannelCredentials)(nil), // 5: envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials - (*GrpcService_GoogleGrpc_CallCredentials)(nil), // 6: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials - (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials)(nil), // 7: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials - (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials)(nil), // 8: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials - (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin)(nil), // 9: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin - (*GrpcService_GoogleGrpc_CallCredentials_StsService)(nil), // 10: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService - (*duration.Duration)(nil), // 11: google.protobuf.Duration - (*HeaderValue)(nil), // 12: envoy.api.v2.core.HeaderValue - (*_struct.Struct)(nil), // 13: google.protobuf.Struct - (*DataSource)(nil), // 14: envoy.api.v2.core.DataSource - (*emptypb.Empty)(nil), // 15: google.protobuf.Empty - (*any.Any)(nil), // 16: google.protobuf.Any -} -var file_envoy_api_v2_core_grpc_service_proto_depIdxs = []int32{ - 1, // 0: envoy.api.v2.core.GrpcService.envoy_grpc:type_name -> envoy.api.v2.core.GrpcService.EnvoyGrpc - 2, // 1: envoy.api.v2.core.GrpcService.google_grpc:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc - 11, // 2: envoy.api.v2.core.GrpcService.timeout:type_name -> google.protobuf.Duration - 12, // 3: envoy.api.v2.core.GrpcService.initial_metadata:type_name -> envoy.api.v2.core.HeaderValue - 5, // 4: envoy.api.v2.core.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials - 6, // 5: envoy.api.v2.core.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials - 13, // 6: envoy.api.v2.core.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct - 14, // 7: envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.api.v2.core.DataSource - 14, // 8: envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.api.v2.core.DataSource - 14, // 9: envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.api.v2.core.DataSource - 3, // 10: envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials - 15, // 11: envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty - 4, // 12: envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials - 15, // 13: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty - 7, // 14: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials - 8, // 15: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials - 9, // 16: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin - 10, // 17: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService - 13, // 18: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.config:type_name -> google.protobuf.Struct - 16, // 19: envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any - 20, // [20:20] is the sub-list for method output_type - 20, // [20:20] is the sub-list for method input_type - 20, // [20:20] is the sub-list for extension type_name - 20, // [20:20] is the sub-list for extension extendee - 0, // [0:20] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_grpc_service_proto_init() } -func file_envoy_api_v2_core_grpc_service_proto_init() { - if File_envoy_api_v2_core_grpc_service_proto != nil { - return - } - file_envoy_api_v2_core_base_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_EnvoyGrpc); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc_SslCredentials); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc_GoogleLocalCredentials); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc_ChannelCredentials); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc_CallCredentials); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_StsService); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*GrpcService_EnvoyGrpc_)(nil), - (*GrpcService_GoogleGrpc_)(nil), - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials)(nil), - (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault)(nil), - (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials)(nil), - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[6].OneofWrappers = []interface{}{ - (*GrpcService_GoogleGrpc_CallCredentials_AccessToken)(nil), - (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine)(nil), - (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken)(nil), - (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess)(nil), - (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam)(nil), - (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin)(nil), - (*GrpcService_GoogleGrpc_CallCredentials_StsService_)(nil), - } - file_envoy_api_v2_core_grpc_service_proto_msgTypes[9].OneofWrappers = []interface{}{ - (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_Config)(nil), - (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_grpc_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 11, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_grpc_service_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_grpc_service_proto_depIdxs, - MessageInfos: file_envoy_api_v2_core_grpc_service_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_grpc_service_proto = out.File - file_envoy_api_v2_core_grpc_service_proto_rawDesc = nil - file_envoy_api_v2_core_grpc_service_proto_goTypes = nil - file_envoy_api_v2_core_grpc_service_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_service.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_service.pb.validate.go deleted file mode 100644 index f729e4e0dc..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/grpc_service.pb.validate.go +++ /dev/null @@ -1,1981 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/grpc_service.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on GrpcService with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *GrpcService) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on GrpcService with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in GrpcServiceMultiError, or -// nil if none found. -func (m *GrpcService) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetTimeout()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcServiceValidationError{ - field: "Timeout", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcServiceValidationError{ - field: "Timeout", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcServiceValidationError{ - field: "Timeout", - reason: "embedded message failed validation", - cause: err, - } - } - } - - for idx, item := range m.GetInitialMetadata() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcServiceValidationError{ - field: fmt.Sprintf("InitialMetadata[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcServiceValidationError{ - field: fmt.Sprintf("InitialMetadata[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcServiceValidationError{ - field: fmt.Sprintf("InitialMetadata[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - switch m.TargetSpecifier.(type) { - - case *GrpcService_EnvoyGrpc_: - - if all { - switch v := interface{}(m.GetEnvoyGrpc()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcServiceValidationError{ - field: "EnvoyGrpc", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcServiceValidationError{ - field: "EnvoyGrpc", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetEnvoyGrpc()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcServiceValidationError{ - field: "EnvoyGrpc", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *GrpcService_GoogleGrpc_: - - if all { - switch v := interface{}(m.GetGoogleGrpc()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcServiceValidationError{ - field: "GoogleGrpc", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcServiceValidationError{ - field: "GoogleGrpc", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetGoogleGrpc()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcServiceValidationError{ - field: "GoogleGrpc", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := GrpcServiceValidationError{ - field: "TargetSpecifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return GrpcServiceMultiError(errors) - } - - return nil -} - -// GrpcServiceMultiError is an error wrapping multiple validation errors -// returned by GrpcService.ValidateAll() if the designated constraints aren't met. -type GrpcServiceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcServiceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcServiceMultiError) AllErrors() []error { return m } - -// GrpcServiceValidationError is the validation error returned by -// GrpcService.Validate if the designated constraints aren't met. -type GrpcServiceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcServiceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcServiceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcServiceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcServiceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcServiceValidationError) ErrorName() string { return "GrpcServiceValidationError" } - -// Error satisfies the builtin error interface -func (e GrpcServiceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcServiceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcServiceValidationError{} - -// Validate checks the field values on GrpcService_EnvoyGrpc with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *GrpcService_EnvoyGrpc) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on GrpcService_EnvoyGrpc with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// GrpcService_EnvoyGrpcMultiError, or nil if none found. -func (m *GrpcService_EnvoyGrpc) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_EnvoyGrpc) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetClusterName()) < 1 { - err := GrpcService_EnvoyGrpcValidationError{ - field: "ClusterName", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return GrpcService_EnvoyGrpcMultiError(errors) - } - - return nil -} - -// GrpcService_EnvoyGrpcMultiError is an error wrapping multiple validation -// errors returned by GrpcService_EnvoyGrpc.ValidateAll() if the designated -// constraints aren't met. -type GrpcService_EnvoyGrpcMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_EnvoyGrpcMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_EnvoyGrpcMultiError) AllErrors() []error { return m } - -// GrpcService_EnvoyGrpcValidationError is the validation error returned by -// GrpcService_EnvoyGrpc.Validate if the designated constraints aren't met. -type GrpcService_EnvoyGrpcValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_EnvoyGrpcValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcService_EnvoyGrpcValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcService_EnvoyGrpcValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcService_EnvoyGrpcValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcService_EnvoyGrpcValidationError) ErrorName() string { - return "GrpcService_EnvoyGrpcValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_EnvoyGrpcValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_EnvoyGrpc.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_EnvoyGrpcValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_EnvoyGrpcValidationError{} - -// Validate checks the field values on GrpcService_GoogleGrpc with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *GrpcService_GoogleGrpc) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on GrpcService_GoogleGrpc with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// GrpcService_GoogleGrpcMultiError, or nil if none found. -func (m *GrpcService_GoogleGrpc) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetTargetUri()) < 1 { - err := GrpcService_GoogleGrpcValidationError{ - field: "TargetUri", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetChannelCredentials()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpcValidationError{ - field: "ChannelCredentials", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpcValidationError{ - field: "ChannelCredentials", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetChannelCredentials()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpcValidationError{ - field: "ChannelCredentials", - reason: "embedded message failed validation", - cause: err, - } - } - } - - for idx, item := range m.GetCallCredentials() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpcValidationError{ - field: fmt.Sprintf("CallCredentials[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpcValidationError{ - field: fmt.Sprintf("CallCredentials[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpcValidationError{ - field: fmt.Sprintf("CallCredentials[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(m.GetStatPrefix()) < 1 { - err := GrpcService_GoogleGrpcValidationError{ - field: "StatPrefix", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - // no validation rules for CredentialsFactoryName - - if all { - switch v := interface{}(m.GetConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpcValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpcValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpcValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return GrpcService_GoogleGrpcMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpcMultiError is an error wrapping multiple validation -// errors returned by GrpcService_GoogleGrpc.ValidateAll() if the designated -// constraints aren't met. -type GrpcService_GoogleGrpcMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpcMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpcMultiError) AllErrors() []error { return m } - -// GrpcService_GoogleGrpcValidationError is the validation error returned by -// GrpcService_GoogleGrpc.Validate if the designated constraints aren't met. -type GrpcService_GoogleGrpcValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpcValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpcValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpcValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcService_GoogleGrpcValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpcValidationError) ErrorName() string { - return "GrpcService_GoogleGrpcValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpcValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpcValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpcValidationError{} - -// Validate checks the field values on GrpcService_GoogleGrpc_SslCredentials -// with the rules defined in the proto definition for this message. If any -// rules are violated, the first error encountered is returned, or nil if -// there are no violations. -func (m *GrpcService_GoogleGrpc_SslCredentials) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on GrpcService_GoogleGrpc_SslCredentials -// with the rules defined in the proto definition for this message. If any -// rules are violated, the result is a list of violation errors wrapped in -// GrpcService_GoogleGrpc_SslCredentialsMultiError, or nil if none found. -func (m *GrpcService_GoogleGrpc_SslCredentials) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc_SslCredentials) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetRootCerts()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "RootCerts", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "RootCerts", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRootCerts()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "RootCerts", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetPrivateKey()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "PrivateKey", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "PrivateKey", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetPrivateKey()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "PrivateKey", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetCertChain()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "CertChain", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "CertChain", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetCertChain()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_SslCredentialsValidationError{ - field: "CertChain", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return GrpcService_GoogleGrpc_SslCredentialsMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpc_SslCredentialsMultiError is an error wrapping -// multiple validation errors returned by -// GrpcService_GoogleGrpc_SslCredentials.ValidateAll() if the designated -// constraints aren't met. -type GrpcService_GoogleGrpc_SslCredentialsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpc_SslCredentialsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpc_SslCredentialsMultiError) AllErrors() []error { return m } - -// GrpcService_GoogleGrpc_SslCredentialsValidationError is the validation error -// returned by GrpcService_GoogleGrpc_SslCredentials.Validate if the -// designated constraints aren't met. -type GrpcService_GoogleGrpc_SslCredentialsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) ErrorName() string { - return "GrpcService_GoogleGrpc_SslCredentialsValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc_SslCredentials.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpc_SslCredentialsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpc_SslCredentialsValidationError{} - -// Validate checks the field values on -// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in -// GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError, or nil if none found. -func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(errors) > 0 { - return GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError is an error wrapping -// multiple validation errors returned by -// GrpcService_GoogleGrpc_GoogleLocalCredentials.ValidateAll() if the -// designated constraints aren't met. -type GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError) AllErrors() []error { return m } - -// GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError is the -// validation error returned by -// GrpcService_GoogleGrpc_GoogleLocalCredentials.Validate if the designated -// constraints aren't met. -type GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Reason() string { - return e.reason -} - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) ErrorName() string { - return "GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc_GoogleLocalCredentials.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{} - -// Validate checks the field values on -// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *GrpcService_GoogleGrpc_ChannelCredentials) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in -// GrpcService_GoogleGrpc_ChannelCredentialsMultiError, or nil if none found. -func (m *GrpcService_GoogleGrpc_ChannelCredentials) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.CredentialSpecifier.(type) { - - case *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials: - - if all { - switch v := interface{}(m.GetSslCredentials()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "SslCredentials", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "SslCredentials", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSslCredentials()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "SslCredentials", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault: - - if all { - switch v := interface{}(m.GetGoogleDefault()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "GoogleDefault", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "GoogleDefault", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetGoogleDefault()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "GoogleDefault", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials: - - if all { - switch v := interface{}(m.GetLocalCredentials()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "LocalCredentials", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "LocalCredentials", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetLocalCredentials()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "LocalCredentials", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ - field: "CredentialSpecifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return GrpcService_GoogleGrpc_ChannelCredentialsMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpc_ChannelCredentialsMultiError is an error wrapping -// multiple validation errors returned by -// GrpcService_GoogleGrpc_ChannelCredentials.ValidateAll() if the designated -// constraints aren't met. -type GrpcService_GoogleGrpc_ChannelCredentialsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpc_ChannelCredentialsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpc_ChannelCredentialsMultiError) AllErrors() []error { return m } - -// GrpcService_GoogleGrpc_ChannelCredentialsValidationError is the validation -// error returned by GrpcService_GoogleGrpc_ChannelCredentials.Validate if the -// designated constraints aren't met. -type GrpcService_GoogleGrpc_ChannelCredentialsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) ErrorName() string { - return "GrpcService_GoogleGrpc_ChannelCredentialsValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc_ChannelCredentials.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{} - -// Validate checks the field values on GrpcService_GoogleGrpc_CallCredentials -// with the rules defined in the proto definition for this message. If any -// rules are violated, the first error encountered is returned, or nil if -// there are no violations. -func (m *GrpcService_GoogleGrpc_CallCredentials) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// GrpcService_GoogleGrpc_CallCredentials with the rules defined in the proto -// definition for this message. If any rules are violated, the result is a -// list of violation errors wrapped in -// GrpcService_GoogleGrpc_CallCredentialsMultiError, or nil if none found. -func (m *GrpcService_GoogleGrpc_CallCredentials) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.CredentialSpecifier.(type) { - - case *GrpcService_GoogleGrpc_CallCredentials_AccessToken: - // no validation rules for AccessToken - - case *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine: - - if all { - switch v := interface{}(m.GetGoogleComputeEngine()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "GoogleComputeEngine", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "GoogleComputeEngine", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetGoogleComputeEngine()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "GoogleComputeEngine", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken: - // no validation rules for GoogleRefreshToken - - case *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess: - - if all { - switch v := interface{}(m.GetServiceAccountJwtAccess()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "ServiceAccountJwtAccess", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "ServiceAccountJwtAccess", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetServiceAccountJwtAccess()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "ServiceAccountJwtAccess", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *GrpcService_GoogleGrpc_CallCredentials_GoogleIam: - - if all { - switch v := interface{}(m.GetGoogleIam()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "GoogleIam", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "GoogleIam", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetGoogleIam()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "GoogleIam", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *GrpcService_GoogleGrpc_CallCredentials_FromPlugin: - - if all { - switch v := interface{}(m.GetFromPlugin()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "FromPlugin", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "FromPlugin", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetFromPlugin()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "FromPlugin", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *GrpcService_GoogleGrpc_CallCredentials_StsService_: - - if all { - switch v := interface{}(m.GetStsService()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "StsService", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "StsService", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetStsService()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "StsService", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ - field: "CredentialSpecifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return GrpcService_GoogleGrpc_CallCredentialsMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpc_CallCredentialsMultiError is an error wrapping -// multiple validation errors returned by -// GrpcService_GoogleGrpc_CallCredentials.ValidateAll() if the designated -// constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentialsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpc_CallCredentialsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpc_CallCredentialsMultiError) AllErrors() []error { return m } - -// GrpcService_GoogleGrpc_CallCredentialsValidationError is the validation -// error returned by GrpcService_GoogleGrpc_CallCredentials.Validate if the -// designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentialsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) ErrorName() string { - return "GrpcService_GoogleGrpc_CallCredentialsValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc_CallCredentials.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpc_CallCredentialsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpc_CallCredentialsValidationError{} - -// Validate checks the field values on -// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials -// with the rules defined in the proto definition for this message. If any -// rules are violated, the first error encountered is returned, or nil if -// there are no violations. -func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials -// with the rules defined in the proto definition for this message. If any -// rules are violated, the result is a list of violation errors wrapped in -// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError, -// or nil if none found. -func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for JsonKey - - // no validation rules for TokenLifetimeSeconds - - if len(errors) > 0 { - return GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError -// is an error wrapping multiple validation errors returned by -// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ValidateAll() -// if the designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError) AllErrors() []error { - return m -} - -// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError -// is the validation error returned by -// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.Validate -// if the designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Field() string { - return e.field -} - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Reason() string { - return e.reason -} - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Cause() error { - return e.cause -} - -// Key function returns key value. -func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Key() bool { - return e.key -} - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) ErrorName() string { - return "GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{} - -// Validate checks the field values on -// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError, or -// nil if none found. -func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for AuthorizationToken - - // no validation rules for AuthoritySelector - - if len(errors) > 0 { - return GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError is an -// error wrapping multiple validation errors returned by -// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ValidateAll() -// if the designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError) AllErrors() []error { - return m -} - -// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError -// is the validation error returned by -// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.Validate if the -// designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Field() string { - return e.field -} - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Reason() string { - return e.reason -} - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Cause() error { - return e.cause -} - -// Key function returns key value. -func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Key() bool { - return e.key -} - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) ErrorName() string { - return "GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{} - -// Validate checks the field values on -// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with -// the rules defined in the proto definition for this message. If any rules -// are violated, the first error encountered is returned, or nil if there are -// no violations. -func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with -// the rules defined in the proto definition for this message. If any rules -// are violated, the result is a list of violation errors wrapped in -// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError, -// or nil if none found. -func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Name - - switch m.ConfigType.(type) { - - case *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_Config: - - if all { - switch v := interface{}(m.GetConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig: - - if all { - switch v := interface{}(m.GetTypedConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError -// is an error wrapping multiple validation errors returned by -// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ValidateAll() -// if the designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError) AllErrors() []error { - return m -} - -// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError -// is the validation error returned by -// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.Validate -// if the designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Field() string { - return e.field -} - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Reason() string { - return e.reason -} - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Cause() error { - return e.cause -} - -// Key function returns key value. -func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Key() bool { - return e.key -} - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) ErrorName() string { - return "GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{} - -// Validate checks the field values on -// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in -// GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError, or nil if none found. -func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for TokenExchangeServiceUri - - // no validation rules for Resource - - // no validation rules for Audience - - // no validation rules for Scope - - // no validation rules for RequestedTokenType - - if len(m.GetSubjectTokenPath()) < 1 { - err := GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{ - field: "SubjectTokenPath", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(m.GetSubjectTokenType()) < 1 { - err := GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{ - field: "SubjectTokenType", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - // no validation rules for ActorTokenPath - - // no validation rules for ActorTokenType - - if len(errors) > 0 { - return GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError(errors) - } - - return nil -} - -// GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError is an error -// wrapping multiple validation errors returned by -// GrpcService_GoogleGrpc_CallCredentials_StsService.ValidateAll() if the -// designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError) AllErrors() []error { return m } - -// GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError is the -// validation error returned by -// GrpcService_GoogleGrpc_CallCredentials_StsService.Validate if the -// designated constraints aren't met. -type GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Field() string { - return e.field -} - -// Reason function returns reason value. -func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Reason() string { - return e.reason -} - -// Cause function returns cause value. -func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Cause() error { - return e.cause -} - -// Key function returns key value. -func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) ErrorName() string { - return "GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcService_GoogleGrpc_CallCredentials_StsService.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/health_check.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/health_check.pb.go deleted file mode 100644 index 5230f2c61e..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/health_check.pb.go +++ /dev/null @@ -1,1426 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/health_check.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/go-control-plane/envoy/annotations" - _type "github.com/envoyproxy/go-control-plane/envoy/type" - matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - _struct "github.com/golang/protobuf/ptypes/struct" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Endpoint health status. -type HealthStatus int32 - -const ( - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. - HealthStatus_UNKNOWN HealthStatus = 0 - // Healthy. - HealthStatus_HEALTHY HealthStatus = 1 - // Unhealthy. - HealthStatus_UNHEALTHY HealthStatus = 2 - // Connection draining in progress. E.g., - // ``_ - // or - // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. - HealthStatus_DRAINING HealthStatus = 3 - // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. - HealthStatus_TIMEOUT HealthStatus = 4 - // Degraded. - HealthStatus_DEGRADED HealthStatus = 5 -) - -// Enum value maps for HealthStatus. -var ( - HealthStatus_name = map[int32]string{ - 0: "UNKNOWN", - 1: "HEALTHY", - 2: "UNHEALTHY", - 3: "DRAINING", - 4: "TIMEOUT", - 5: "DEGRADED", - } - HealthStatus_value = map[string]int32{ - "UNKNOWN": 0, - "HEALTHY": 1, - "UNHEALTHY": 2, - "DRAINING": 3, - "TIMEOUT": 4, - "DEGRADED": 5, - } -) - -func (x HealthStatus) Enum() *HealthStatus { - p := new(HealthStatus) - *p = x - return p -} - -func (x HealthStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (HealthStatus) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_health_check_proto_enumTypes[0].Descriptor() -} - -func (HealthStatus) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_health_check_proto_enumTypes[0] -} - -func (x HealthStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use HealthStatus.Descriptor instead. -func (HealthStatus) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0} -} - -// [#next-free-field: 23] -type HealthCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The time to wait for a health check response. If the timeout is reached the - // health check attempt will be considered a failure. - Timeout *duration.Duration `protobuf:"bytes,1,opt,name=timeout,proto3" json:"timeout,omitempty"` - // The interval between health checks. - Interval *duration.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` - // An optional jitter amount in milliseconds. If specified, Envoy will start health - // checking after for a random time in ms between 0 and initial_jitter. This only - // applies to the first health check. - InitialJitter *duration.Duration `protobuf:"bytes,20,opt,name=initial_jitter,json=initialJitter,proto3" json:"initial_jitter,omitempty"` - // An optional jitter amount in milliseconds. If specified, during every - // interval Envoy will add interval_jitter to the wait time. - IntervalJitter *duration.Duration `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"` - // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. - // - // If interval_jitter_ms and interval_jitter_percent are both set, both of - // them will be used to increase the wait time. - IntervalJitterPercent uint32 `protobuf:"varint,18,opt,name=interval_jitter_percent,json=intervalJitterPercent,proto3" json:"interval_jitter_percent,omitempty"` - // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with 503 - // this threshold is ignored and the host is considered unhealthy immediately. - UnhealthyThreshold *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=unhealthy_threshold,json=unhealthyThreshold,proto3" json:"unhealthy_threshold,omitempty"` - // The number of healthy health checks required before a host is marked - // healthy. Note that during startup, only a single successful health check is - // required to mark a host healthy. - HealthyThreshold *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=healthy_threshold,json=healthyThreshold,proto3" json:"healthy_threshold,omitempty"` - // [#not-implemented-hide:] Non-serving port for health checking. - AltPort *wrappers.UInt32Value `protobuf:"bytes,6,opt,name=alt_port,json=altPort,proto3" json:"alt_port,omitempty"` - // Reuse health check connection between health checks. Default is true. - ReuseConnection *wrappers.BoolValue `protobuf:"bytes,7,opt,name=reuse_connection,json=reuseConnection,proto3" json:"reuse_connection,omitempty"` - // Types that are assignable to HealthChecker: - // *HealthCheck_HttpHealthCheck_ - // *HealthCheck_TcpHealthCheck_ - // *HealthCheck_GrpcHealthCheck_ - // *HealthCheck_CustomHealthCheck_ - HealthChecker isHealthCheck_HealthChecker `protobuf_oneof:"health_checker"` - // The "no traffic interval" is a special health check interval that is used when a cluster has - // never had traffic routed to it. This lower interval allows cluster information to be kept up to - // date, without sending a potentially large amount of active health checking traffic for no - // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - // standard health check interval that is defined. Note that this interval takes precedence over - // any other. - // - // The default value for "no traffic interval" is 60 seconds. - NoTrafficInterval *duration.Duration `protobuf:"bytes,12,opt,name=no_traffic_interval,json=noTrafficInterval,proto3" json:"no_traffic_interval,omitempty"` - // The "unhealthy interval" is a health check interval that is used for hosts that are marked as - // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - // standard health check interval that is defined. - // - // The default value for "unhealthy interval" is the same as "interval". - UnhealthyInterval *duration.Duration `protobuf:"bytes,14,opt,name=unhealthy_interval,json=unhealthyInterval,proto3" json:"unhealthy_interval,omitempty"` - // The "unhealthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as unhealthy. For subsequent health checks - // Envoy will shift back to using either "unhealthy interval" if present or the standard health - // check interval that is defined. - // - // The default value for "unhealthy edge interval" is the same as "unhealthy interval". - UnhealthyEdgeInterval *duration.Duration `protobuf:"bytes,15,opt,name=unhealthy_edge_interval,json=unhealthyEdgeInterval,proto3" json:"unhealthy_edge_interval,omitempty"` - // The "healthy edge interval" is a special health check interval that is used for the first - // health check right after a host is marked as healthy. For subsequent health checks - // Envoy will shift back to using the standard health check interval that is defined. - // - // The default value for "healthy edge interval" is the same as the default interval. - HealthyEdgeInterval *duration.Duration `protobuf:"bytes,16,opt,name=healthy_edge_interval,json=healthyEdgeInterval,proto3" json:"healthy_edge_interval,omitempty"` - // Specifies the path to the :ref:`health check event log `. - // If empty, no event log will be written. - EventLogPath string `protobuf:"bytes,17,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"` - // [#not-implemented-hide:] - // The gRPC service for the health check event service. - // If empty, health check events won't be sent to a remote endpoint. - EventService *EventServiceConfig `protobuf:"bytes,22,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"` - // If set to true, health check failure events will always be logged. If set to false, only the - // initial health check failure event will be logged. - // The default value is false. - AlwaysLogHealthCheckFailures bool `protobuf:"varint,19,opt,name=always_log_health_check_failures,json=alwaysLogHealthCheckFailures,proto3" json:"always_log_health_check_failures,omitempty"` - // This allows overriding the cluster TLS settings, just for health check connections. - TlsOptions *HealthCheck_TlsOptions `protobuf:"bytes,21,opt,name=tls_options,json=tlsOptions,proto3" json:"tls_options,omitempty"` -} - -func (x *HealthCheck) Reset() { - *x = HealthCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheck) ProtoMessage() {} - -func (x *HealthCheck) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheck.ProtoReflect.Descriptor instead. -func (*HealthCheck) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0} -} - -func (x *HealthCheck) GetTimeout() *duration.Duration { - if x != nil { - return x.Timeout - } - return nil -} - -func (x *HealthCheck) GetInterval() *duration.Duration { - if x != nil { - return x.Interval - } - return nil -} - -func (x *HealthCheck) GetInitialJitter() *duration.Duration { - if x != nil { - return x.InitialJitter - } - return nil -} - -func (x *HealthCheck) GetIntervalJitter() *duration.Duration { - if x != nil { - return x.IntervalJitter - } - return nil -} - -func (x *HealthCheck) GetIntervalJitterPercent() uint32 { - if x != nil { - return x.IntervalJitterPercent - } - return 0 -} - -func (x *HealthCheck) GetUnhealthyThreshold() *wrappers.UInt32Value { - if x != nil { - return x.UnhealthyThreshold - } - return nil -} - -func (x *HealthCheck) GetHealthyThreshold() *wrappers.UInt32Value { - if x != nil { - return x.HealthyThreshold - } - return nil -} - -func (x *HealthCheck) GetAltPort() *wrappers.UInt32Value { - if x != nil { - return x.AltPort - } - return nil -} - -func (x *HealthCheck) GetReuseConnection() *wrappers.BoolValue { - if x != nil { - return x.ReuseConnection - } - return nil -} - -func (m *HealthCheck) GetHealthChecker() isHealthCheck_HealthChecker { - if m != nil { - return m.HealthChecker - } - return nil -} - -func (x *HealthCheck) GetHttpHealthCheck() *HealthCheck_HttpHealthCheck { - if x, ok := x.GetHealthChecker().(*HealthCheck_HttpHealthCheck_); ok { - return x.HttpHealthCheck - } - return nil -} - -func (x *HealthCheck) GetTcpHealthCheck() *HealthCheck_TcpHealthCheck { - if x, ok := x.GetHealthChecker().(*HealthCheck_TcpHealthCheck_); ok { - return x.TcpHealthCheck - } - return nil -} - -func (x *HealthCheck) GetGrpcHealthCheck() *HealthCheck_GrpcHealthCheck { - if x, ok := x.GetHealthChecker().(*HealthCheck_GrpcHealthCheck_); ok { - return x.GrpcHealthCheck - } - return nil -} - -func (x *HealthCheck) GetCustomHealthCheck() *HealthCheck_CustomHealthCheck { - if x, ok := x.GetHealthChecker().(*HealthCheck_CustomHealthCheck_); ok { - return x.CustomHealthCheck - } - return nil -} - -func (x *HealthCheck) GetNoTrafficInterval() *duration.Duration { - if x != nil { - return x.NoTrafficInterval - } - return nil -} - -func (x *HealthCheck) GetUnhealthyInterval() *duration.Duration { - if x != nil { - return x.UnhealthyInterval - } - return nil -} - -func (x *HealthCheck) GetUnhealthyEdgeInterval() *duration.Duration { - if x != nil { - return x.UnhealthyEdgeInterval - } - return nil -} - -func (x *HealthCheck) GetHealthyEdgeInterval() *duration.Duration { - if x != nil { - return x.HealthyEdgeInterval - } - return nil -} - -func (x *HealthCheck) GetEventLogPath() string { - if x != nil { - return x.EventLogPath - } - return "" -} - -func (x *HealthCheck) GetEventService() *EventServiceConfig { - if x != nil { - return x.EventService - } - return nil -} - -func (x *HealthCheck) GetAlwaysLogHealthCheckFailures() bool { - if x != nil { - return x.AlwaysLogHealthCheckFailures - } - return false -} - -func (x *HealthCheck) GetTlsOptions() *HealthCheck_TlsOptions { - if x != nil { - return x.TlsOptions - } - return nil -} - -type isHealthCheck_HealthChecker interface { - isHealthCheck_HealthChecker() -} - -type HealthCheck_HttpHealthCheck_ struct { - // HTTP health check. - HttpHealthCheck *HealthCheck_HttpHealthCheck `protobuf:"bytes,8,opt,name=http_health_check,json=httpHealthCheck,proto3,oneof"` -} - -type HealthCheck_TcpHealthCheck_ struct { - // TCP health check. - TcpHealthCheck *HealthCheck_TcpHealthCheck `protobuf:"bytes,9,opt,name=tcp_health_check,json=tcpHealthCheck,proto3,oneof"` -} - -type HealthCheck_GrpcHealthCheck_ struct { - // gRPC health check. - GrpcHealthCheck *HealthCheck_GrpcHealthCheck `protobuf:"bytes,11,opt,name=grpc_health_check,json=grpcHealthCheck,proto3,oneof"` -} - -type HealthCheck_CustomHealthCheck_ struct { - // Custom health check. - CustomHealthCheck *HealthCheck_CustomHealthCheck `protobuf:"bytes,13,opt,name=custom_health_check,json=customHealthCheck,proto3,oneof"` -} - -func (*HealthCheck_HttpHealthCheck_) isHealthCheck_HealthChecker() {} - -func (*HealthCheck_TcpHealthCheck_) isHealthCheck_HealthChecker() {} - -func (*HealthCheck_GrpcHealthCheck_) isHealthCheck_HealthChecker() {} - -func (*HealthCheck_CustomHealthCheck_) isHealthCheck_HealthChecker() {} - -// Describes the encoding of the payload bytes in the payload. -type HealthCheck_Payload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Payload: - // *HealthCheck_Payload_Text - // *HealthCheck_Payload_Binary - Payload isHealthCheck_Payload_Payload `protobuf_oneof:"payload"` -} - -func (x *HealthCheck_Payload) Reset() { - *x = HealthCheck_Payload{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheck_Payload) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheck_Payload) ProtoMessage() {} - -func (x *HealthCheck_Payload) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheck_Payload.ProtoReflect.Descriptor instead. -func (*HealthCheck_Payload) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0, 0} -} - -func (m *HealthCheck_Payload) GetPayload() isHealthCheck_Payload_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (x *HealthCheck_Payload) GetText() string { - if x, ok := x.GetPayload().(*HealthCheck_Payload_Text); ok { - return x.Text - } - return "" -} - -func (x *HealthCheck_Payload) GetBinary() []byte { - if x, ok := x.GetPayload().(*HealthCheck_Payload_Binary); ok { - return x.Binary - } - return nil -} - -type isHealthCheck_Payload_Payload interface { - isHealthCheck_Payload_Payload() -} - -type HealthCheck_Payload_Text struct { - // Hex encoded payload. E.g., "000000FF". - Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"` -} - -type HealthCheck_Payload_Binary struct { - // [#not-implemented-hide:] Binary payload. - Binary []byte `protobuf:"bytes,2,opt,name=binary,proto3,oneof"` -} - -func (*HealthCheck_Payload_Text) isHealthCheck_Payload_Payload() {} - -func (*HealthCheck_Payload_Binary) isHealthCheck_Payload_Payload() {} - -// [#next-free-field: 12] -type HealthCheck_HttpHealthCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The value of the host header in the HTTP health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The host header can be customized for a specific endpoint by setting the - // :ref:`hostname ` field. - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` - // Specifies the HTTP path that will be requested during health checking. For example - // */healthcheck*. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - // [#not-implemented-hide:] HTTP specific payload. - Send *HealthCheck_Payload `protobuf:"bytes,3,opt,name=send,proto3" json:"send,omitempty"` - // [#not-implemented-hide:] HTTP specific response. - Receive *HealthCheck_Payload `protobuf:"bytes,4,opt,name=receive,proto3" json:"receive,omitempty"` - // An optional service name parameter which is used to validate the identity of - // the health checked cluster. See the :ref:`architecture overview - // ` for more information. - // - // .. attention:: - // - // This field has been deprecated in favor of `service_name_matcher` for better flexibility - // over matching with service-cluster name. - // - // Deprecated: Do not use. - ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - // Specifies a list of HTTP headers that should be added to each request that is sent to the - // health checked cluster. For more information, including details on header value syntax, see - // the documentation on :ref:`custom request headers - // `. - RequestHeadersToAdd []*HeaderValueOption `protobuf:"bytes,6,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` - // Specifies a list of HTTP headers that should be removed from each request that is sent to the - // health checked cluster. - RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` - // If set, health checks will be made using http/2. - // Deprecated, use :ref:`codec_client_type - // ` instead. - // - // Deprecated: Do not use. - UseHttp2 bool `protobuf:"varint,7,opt,name=use_http2,json=useHttp2,proto3" json:"use_http2,omitempty"` - // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - // semantics of :ref:`Int64Range `. The start and end of each - // range are required. Only statuses in the range [100, 600) are allowed. - ExpectedStatuses []*_type.Int64Range `protobuf:"bytes,9,rep,name=expected_statuses,json=expectedStatuses,proto3" json:"expected_statuses,omitempty"` - // Use specified application protocol for health checks. - CodecClientType _type.CodecClientType `protobuf:"varint,10,opt,name=codec_client_type,json=codecClientType,proto3,enum=envoy.type.CodecClientType" json:"codec_client_type,omitempty"` - // An optional service name parameter which is used to validate the identity of - // the health checked cluster using a :ref:`StringMatcher - // `. See the :ref:`architecture overview - // ` for more information. - ServiceNameMatcher *matcher.StringMatcher `protobuf:"bytes,11,opt,name=service_name_matcher,json=serviceNameMatcher,proto3" json:"service_name_matcher,omitempty"` -} - -func (x *HealthCheck_HttpHealthCheck) Reset() { - *x = HealthCheck_HttpHealthCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheck_HttpHealthCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheck_HttpHealthCheck) ProtoMessage() {} - -func (x *HealthCheck_HttpHealthCheck) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheck_HttpHealthCheck.ProtoReflect.Descriptor instead. -func (*HealthCheck_HttpHealthCheck) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *HealthCheck_HttpHealthCheck) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (x *HealthCheck_HttpHealthCheck) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *HealthCheck_HttpHealthCheck) GetSend() *HealthCheck_Payload { - if x != nil { - return x.Send - } - return nil -} - -func (x *HealthCheck_HttpHealthCheck) GetReceive() *HealthCheck_Payload { - if x != nil { - return x.Receive - } - return nil -} - -// Deprecated: Do not use. -func (x *HealthCheck_HttpHealthCheck) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToAdd() []*HeaderValueOption { - if x != nil { - return x.RequestHeadersToAdd - } - return nil -} - -func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToRemove() []string { - if x != nil { - return x.RequestHeadersToRemove - } - return nil -} - -// Deprecated: Do not use. -func (x *HealthCheck_HttpHealthCheck) GetUseHttp2() bool { - if x != nil { - return x.UseHttp2 - } - return false -} - -func (x *HealthCheck_HttpHealthCheck) GetExpectedStatuses() []*_type.Int64Range { - if x != nil { - return x.ExpectedStatuses - } - return nil -} - -func (x *HealthCheck_HttpHealthCheck) GetCodecClientType() _type.CodecClientType { - if x != nil { - return x.CodecClientType - } - return _type.CodecClientType(0) -} - -func (x *HealthCheck_HttpHealthCheck) GetServiceNameMatcher() *matcher.StringMatcher { - if x != nil { - return x.ServiceNameMatcher - } - return nil -} - -type HealthCheck_TcpHealthCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Empty payloads imply a connect-only health check. - Send *HealthCheck_Payload `protobuf:"bytes,1,opt,name=send,proto3" json:"send,omitempty"` - // When checking the response, “fuzzy” matching is performed such that each - // binary block must be found, and in the order specified, but not - // necessarily contiguous. - Receive []*HealthCheck_Payload `protobuf:"bytes,2,rep,name=receive,proto3" json:"receive,omitempty"` -} - -func (x *HealthCheck_TcpHealthCheck) Reset() { - *x = HealthCheck_TcpHealthCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheck_TcpHealthCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheck_TcpHealthCheck) ProtoMessage() {} - -func (x *HealthCheck_TcpHealthCheck) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheck_TcpHealthCheck.ProtoReflect.Descriptor instead. -func (*HealthCheck_TcpHealthCheck) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0, 2} -} - -func (x *HealthCheck_TcpHealthCheck) GetSend() *HealthCheck_Payload { - if x != nil { - return x.Send - } - return nil -} - -func (x *HealthCheck_TcpHealthCheck) GetReceive() []*HealthCheck_Payload { - if x != nil { - return x.Receive - } - return nil -} - -type HealthCheck_RedisHealthCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - // by setting the specified key to any value and waiting for traffic to drain. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` -} - -func (x *HealthCheck_RedisHealthCheck) Reset() { - *x = HealthCheck_RedisHealthCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheck_RedisHealthCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheck_RedisHealthCheck) ProtoMessage() {} - -func (x *HealthCheck_RedisHealthCheck) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheck_RedisHealthCheck.ProtoReflect.Descriptor instead. -func (*HealthCheck_RedisHealthCheck) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0, 3} -} - -func (x *HealthCheck_RedisHealthCheck) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -// `grpc.health.v1.Health -// `_-based -// healthcheck. See `gRPC doc `_ -// for details. -type HealthCheck_GrpcHealthCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An optional service name parameter which will be sent to gRPC service in - // `grpc.health.v1.HealthCheckRequest - // `_. - // message. See `gRPC health-checking overview - // `_ for more information. - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - // The value of the :authority header in the gRPC health check request. If - // left empty (default value), the name of the cluster this health check is associated - // with will be used. The authority header can be customized for a specific endpoint by setting - // the :ref:`hostname ` field. - Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` -} - -func (x *HealthCheck_GrpcHealthCheck) Reset() { - *x = HealthCheck_GrpcHealthCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheck_GrpcHealthCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheck_GrpcHealthCheck) ProtoMessage() {} - -func (x *HealthCheck_GrpcHealthCheck) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheck_GrpcHealthCheck.ProtoReflect.Descriptor instead. -func (*HealthCheck_GrpcHealthCheck) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0, 4} -} - -func (x *HealthCheck_GrpcHealthCheck) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -func (x *HealthCheck_GrpcHealthCheck) GetAuthority() string { - if x != nil { - return x.Authority - } - return "" -} - -// Custom health check. -type HealthCheck_CustomHealthCheck struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The registered name of the custom health checker. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // A custom health checker specific configuration which depends on the custom health checker - // being instantiated. See :api:`envoy/config/health_checker` for reference. - // - // Types that are assignable to ConfigType: - // *HealthCheck_CustomHealthCheck_Config - // *HealthCheck_CustomHealthCheck_TypedConfig - ConfigType isHealthCheck_CustomHealthCheck_ConfigType `protobuf_oneof:"config_type"` -} - -func (x *HealthCheck_CustomHealthCheck) Reset() { - *x = HealthCheck_CustomHealthCheck{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheck_CustomHealthCheck) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheck_CustomHealthCheck) ProtoMessage() {} - -func (x *HealthCheck_CustomHealthCheck) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheck_CustomHealthCheck.ProtoReflect.Descriptor instead. -func (*HealthCheck_CustomHealthCheck) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0, 5} -} - -func (x *HealthCheck_CustomHealthCheck) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (m *HealthCheck_CustomHealthCheck) GetConfigType() isHealthCheck_CustomHealthCheck_ConfigType { - if m != nil { - return m.ConfigType - } - return nil -} - -// Deprecated: Do not use. -func (x *HealthCheck_CustomHealthCheck) GetConfig() *_struct.Struct { - if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_Config); ok { - return x.Config - } - return nil -} - -func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *any.Any { - if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_TypedConfig); ok { - return x.TypedConfig - } - return nil -} - -type isHealthCheck_CustomHealthCheck_ConfigType interface { - isHealthCheck_CustomHealthCheck_ConfigType() -} - -type HealthCheck_CustomHealthCheck_Config struct { - // Deprecated: Do not use. - Config *_struct.Struct `protobuf:"bytes,2,opt,name=config,proto3,oneof"` -} - -type HealthCheck_CustomHealthCheck_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` -} - -func (*HealthCheck_CustomHealthCheck_Config) isHealthCheck_CustomHealthCheck_ConfigType() {} - -func (*HealthCheck_CustomHealthCheck_TypedConfig) isHealthCheck_CustomHealthCheck_ConfigType() {} - -// Health checks occur over the transport socket specified for the cluster. This implies that if a -// cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. -// -// This allows overriding the cluster TLS settings, just for health check connections. -type HealthCheck_TlsOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies the ALPN protocols for health check connections. This is useful if the - // corresponding upstream is using ALPN-based :ref:`FilterChainMatch - // ` along with different protocols for health checks - // versus data connections. If empty, no ALPN protocols will be set on health check connections. - AlpnProtocols []string `protobuf:"bytes,1,rep,name=alpn_protocols,json=alpnProtocols,proto3" json:"alpn_protocols,omitempty"` -} - -func (x *HealthCheck_TlsOptions) Reset() { - *x = HealthCheck_TlsOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheck_TlsOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheck_TlsOptions) ProtoMessage() {} - -func (x *HealthCheck_TlsOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_health_check_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheck_TlsOptions.ProtoReflect.Descriptor instead. -func (*HealthCheck_TlsOptions) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_health_check_proto_rawDescGZIP(), []int{0, 6} -} - -func (x *HealthCheck_TlsOptions) GetAlpnProtocols() []string { - if x != nil { - return x.AlpnProtocols - } - return nil -} - -var File_envoy_api_v2_core_health_check_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_health_check_proto_rawDesc = []byte{ - 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x62, 0x61, 0x73, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe1, 0x16, 0x0a, 0x0b, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, - 0x01, 0x2a, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, 0x08, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, - 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, - 0x40, 0x0a, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, - 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, - 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, - 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, - 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a, - 0x13, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, - 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x61, - 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x61, 0x6c, 0x74, - 0x50, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x10, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x75, 0x73, - 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5c, 0x0a, 0x11, 0x68, - 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x59, 0x0a, 0x10, 0x74, 0x63, 0x70, - 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5c, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, - 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, - 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x12, 0x62, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x48, 0x00, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x53, 0x0a, 0x13, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, - 0x66, 0x66, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, - 0x66, 0x69, 0x63, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x12, 0x75, - 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x75, 0x6e, - 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, - 0x5b, 0x0a, 0x17, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x15, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, - 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x15, - 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, - 0x52, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, - 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4a, 0x0a, 0x0d, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, - 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x1c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x4a, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x0a, 0x74, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x52, 0x0a, 0x07, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x48, 0x00, 0x52, - 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x42, - 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, - 0x9c, 0x05, 0x0a, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x3a, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, - 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, - 0x76, 0x65, 0x12, 0x25, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, - 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, - 0x39, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x25, 0x0a, 0x09, 0x75, 0x73, - 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0x18, - 0x01, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x52, 0x08, 0x75, 0x73, 0x65, 0x48, 0x74, 0x74, 0x70, - 0x32, 0x12, 0x43, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x11, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, - 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x14, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x8e, - 0x01, 0x0a, 0x0e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x12, 0x3a, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, - 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x1a, - 0x24, 0x0a, 0x10, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x1a, 0x52, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x1a, 0xb1, 0x01, 0x0a, 0x11, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, - 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x06, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, - 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x33, 0x0a, - 0x0a, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, - 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x42, 0x15, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x2a, - 0x60, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, - 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x48, - 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, - 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, - 0x54, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, - 0x05, 0x42, 0x93, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, - 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x10, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_health_check_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_health_check_proto_rawDescData = file_envoy_api_v2_core_health_check_proto_rawDesc -) - -func file_envoy_api_v2_core_health_check_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_health_check_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_health_check_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_health_check_proto_rawDescData) - }) - return file_envoy_api_v2_core_health_check_proto_rawDescData -} - -var file_envoy_api_v2_core_health_check_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_api_v2_core_health_check_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_envoy_api_v2_core_health_check_proto_goTypes = []interface{}{ - (HealthStatus)(0), // 0: envoy.api.v2.core.HealthStatus - (*HealthCheck)(nil), // 1: envoy.api.v2.core.HealthCheck - (*HealthCheck_Payload)(nil), // 2: envoy.api.v2.core.HealthCheck.Payload - (*HealthCheck_HttpHealthCheck)(nil), // 3: envoy.api.v2.core.HealthCheck.HttpHealthCheck - (*HealthCheck_TcpHealthCheck)(nil), // 4: envoy.api.v2.core.HealthCheck.TcpHealthCheck - (*HealthCheck_RedisHealthCheck)(nil), // 5: envoy.api.v2.core.HealthCheck.RedisHealthCheck - (*HealthCheck_GrpcHealthCheck)(nil), // 6: envoy.api.v2.core.HealthCheck.GrpcHealthCheck - (*HealthCheck_CustomHealthCheck)(nil), // 7: envoy.api.v2.core.HealthCheck.CustomHealthCheck - (*HealthCheck_TlsOptions)(nil), // 8: envoy.api.v2.core.HealthCheck.TlsOptions - (*duration.Duration)(nil), // 9: google.protobuf.Duration - (*wrappers.UInt32Value)(nil), // 10: google.protobuf.UInt32Value - (*wrappers.BoolValue)(nil), // 11: google.protobuf.BoolValue - (*EventServiceConfig)(nil), // 12: envoy.api.v2.core.EventServiceConfig - (*HeaderValueOption)(nil), // 13: envoy.api.v2.core.HeaderValueOption - (*_type.Int64Range)(nil), // 14: envoy.type.Int64Range - (_type.CodecClientType)(0), // 15: envoy.type.CodecClientType - (*matcher.StringMatcher)(nil), // 16: envoy.type.matcher.StringMatcher - (*_struct.Struct)(nil), // 17: google.protobuf.Struct - (*any.Any)(nil), // 18: google.protobuf.Any -} -var file_envoy_api_v2_core_health_check_proto_depIdxs = []int32{ - 9, // 0: envoy.api.v2.core.HealthCheck.timeout:type_name -> google.protobuf.Duration - 9, // 1: envoy.api.v2.core.HealthCheck.interval:type_name -> google.protobuf.Duration - 9, // 2: envoy.api.v2.core.HealthCheck.initial_jitter:type_name -> google.protobuf.Duration - 9, // 3: envoy.api.v2.core.HealthCheck.interval_jitter:type_name -> google.protobuf.Duration - 10, // 4: envoy.api.v2.core.HealthCheck.unhealthy_threshold:type_name -> google.protobuf.UInt32Value - 10, // 5: envoy.api.v2.core.HealthCheck.healthy_threshold:type_name -> google.protobuf.UInt32Value - 10, // 6: envoy.api.v2.core.HealthCheck.alt_port:type_name -> google.protobuf.UInt32Value - 11, // 7: envoy.api.v2.core.HealthCheck.reuse_connection:type_name -> google.protobuf.BoolValue - 3, // 8: envoy.api.v2.core.HealthCheck.http_health_check:type_name -> envoy.api.v2.core.HealthCheck.HttpHealthCheck - 4, // 9: envoy.api.v2.core.HealthCheck.tcp_health_check:type_name -> envoy.api.v2.core.HealthCheck.TcpHealthCheck - 6, // 10: envoy.api.v2.core.HealthCheck.grpc_health_check:type_name -> envoy.api.v2.core.HealthCheck.GrpcHealthCheck - 7, // 11: envoy.api.v2.core.HealthCheck.custom_health_check:type_name -> envoy.api.v2.core.HealthCheck.CustomHealthCheck - 9, // 12: envoy.api.v2.core.HealthCheck.no_traffic_interval:type_name -> google.protobuf.Duration - 9, // 13: envoy.api.v2.core.HealthCheck.unhealthy_interval:type_name -> google.protobuf.Duration - 9, // 14: envoy.api.v2.core.HealthCheck.unhealthy_edge_interval:type_name -> google.protobuf.Duration - 9, // 15: envoy.api.v2.core.HealthCheck.healthy_edge_interval:type_name -> google.protobuf.Duration - 12, // 16: envoy.api.v2.core.HealthCheck.event_service:type_name -> envoy.api.v2.core.EventServiceConfig - 8, // 17: envoy.api.v2.core.HealthCheck.tls_options:type_name -> envoy.api.v2.core.HealthCheck.TlsOptions - 2, // 18: envoy.api.v2.core.HealthCheck.HttpHealthCheck.send:type_name -> envoy.api.v2.core.HealthCheck.Payload - 2, // 19: envoy.api.v2.core.HealthCheck.HttpHealthCheck.receive:type_name -> envoy.api.v2.core.HealthCheck.Payload - 13, // 20: envoy.api.v2.core.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.api.v2.core.HeaderValueOption - 14, // 21: envoy.api.v2.core.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.Int64Range - 15, // 22: envoy.api.v2.core.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.CodecClientType - 16, // 23: envoy.api.v2.core.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.StringMatcher - 2, // 24: envoy.api.v2.core.HealthCheck.TcpHealthCheck.send:type_name -> envoy.api.v2.core.HealthCheck.Payload - 2, // 25: envoy.api.v2.core.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.api.v2.core.HealthCheck.Payload - 17, // 26: envoy.api.v2.core.HealthCheck.CustomHealthCheck.config:type_name -> google.protobuf.Struct - 18, // 27: envoy.api.v2.core.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any - 28, // [28:28] is the sub-list for method output_type - 28, // [28:28] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 28, // [28:28] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_health_check_proto_init() } -func file_envoy_api_v2_core_health_check_proto_init() { - if File_envoy_api_v2_core_health_check_proto != nil { - return - } - file_envoy_api_v2_core_base_proto_init() - file_envoy_api_v2_core_event_service_config_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_health_check_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_health_check_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheck_Payload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_health_check_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheck_HttpHealthCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_health_check_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheck_TcpHealthCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_health_check_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheck_RedisHealthCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_health_check_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheck_GrpcHealthCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_health_check_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheck_CustomHealthCheck); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_health_check_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheck_TlsOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_health_check_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*HealthCheck_HttpHealthCheck_)(nil), - (*HealthCheck_TcpHealthCheck_)(nil), - (*HealthCheck_GrpcHealthCheck_)(nil), - (*HealthCheck_CustomHealthCheck_)(nil), - } - file_envoy_api_v2_core_health_check_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*HealthCheck_Payload_Text)(nil), - (*HealthCheck_Payload_Binary)(nil), - } - file_envoy_api_v2_core_health_check_proto_msgTypes[6].OneofWrappers = []interface{}{ - (*HealthCheck_CustomHealthCheck_Config)(nil), - (*HealthCheck_CustomHealthCheck_TypedConfig)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_health_check_proto_rawDesc, - NumEnums: 1, - NumMessages: 8, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_health_check_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_health_check_proto_depIdxs, - EnumInfos: file_envoy_api_v2_core_health_check_proto_enumTypes, - MessageInfos: file_envoy_api_v2_core_health_check_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_health_check_proto = out.File - file_envoy_api_v2_core_health_check_proto_rawDesc = nil - file_envoy_api_v2_core_health_check_proto_goTypes = nil - file_envoy_api_v2_core_health_check_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/health_check.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/health_check.pb.validate.go deleted file mode 100644 index 3f1ca4dba6..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/health_check.pb.validate.go +++ /dev/null @@ -1,1827 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/health_check.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" - - _type "github.com/envoyproxy/go-control-plane/envoy/type" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort - - _ = _type.CodecClientType(0) -) - -// Validate checks the field values on HealthCheck with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *HealthCheck) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HealthCheck with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in HealthCheckMultiError, or -// nil if none found. -func (m *HealthCheck) ValidateAll() error { - return m.validate(true) -} - -func (m *HealthCheck) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if m.GetTimeout() == nil { - err := HealthCheckValidationError{ - field: "Timeout", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if d := m.GetTimeout(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = HealthCheckValidationError{ - field: "Timeout", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := HealthCheckValidationError{ - field: "Timeout", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if m.GetInterval() == nil { - err := HealthCheckValidationError{ - field: "Interval", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if d := m.GetInterval(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = HealthCheckValidationError{ - field: "Interval", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := HealthCheckValidationError{ - field: "Interval", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if all { - switch v := interface{}(m.GetInitialJitter()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "InitialJitter", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "InitialJitter", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetInitialJitter()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "InitialJitter", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetIntervalJitter()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "IntervalJitter", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "IntervalJitter", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "IntervalJitter", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for IntervalJitterPercent - - if m.GetUnhealthyThreshold() == nil { - err := HealthCheckValidationError{ - field: "UnhealthyThreshold", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetUnhealthyThreshold()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "UnhealthyThreshold", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "UnhealthyThreshold", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetUnhealthyThreshold()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "UnhealthyThreshold", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if m.GetHealthyThreshold() == nil { - err := HealthCheckValidationError{ - field: "HealthyThreshold", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetHealthyThreshold()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "HealthyThreshold", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "HealthyThreshold", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetHealthyThreshold()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "HealthyThreshold", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetAltPort()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "AltPort", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "AltPort", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetAltPort()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "AltPort", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetReuseConnection()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "ReuseConnection", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "ReuseConnection", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetReuseConnection()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "ReuseConnection", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if d := m.GetNoTrafficInterval(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = HealthCheckValidationError{ - field: "NoTrafficInterval", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := HealthCheckValidationError{ - field: "NoTrafficInterval", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if d := m.GetUnhealthyInterval(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = HealthCheckValidationError{ - field: "UnhealthyInterval", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := HealthCheckValidationError{ - field: "UnhealthyInterval", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if d := m.GetUnhealthyEdgeInterval(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = HealthCheckValidationError{ - field: "UnhealthyEdgeInterval", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := HealthCheckValidationError{ - field: "UnhealthyEdgeInterval", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if d := m.GetHealthyEdgeInterval(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = HealthCheckValidationError{ - field: "HealthyEdgeInterval", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := HealthCheckValidationError{ - field: "HealthyEdgeInterval", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - // no validation rules for EventLogPath - - if all { - switch v := interface{}(m.GetEventService()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "EventService", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "EventService", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "EventService", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for AlwaysLogHealthCheckFailures - - if all { - switch v := interface{}(m.GetTlsOptions()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "TlsOptions", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "TlsOptions", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetTlsOptions()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "TlsOptions", - reason: "embedded message failed validation", - cause: err, - } - } - } - - switch m.HealthChecker.(type) { - - case *HealthCheck_HttpHealthCheck_: - - if all { - switch v := interface{}(m.GetHttpHealthCheck()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "HttpHealthCheck", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "HttpHealthCheck", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetHttpHealthCheck()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "HttpHealthCheck", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *HealthCheck_TcpHealthCheck_: - - if all { - switch v := interface{}(m.GetTcpHealthCheck()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "TcpHealthCheck", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "TcpHealthCheck", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetTcpHealthCheck()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "TcpHealthCheck", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *HealthCheck_GrpcHealthCheck_: - - if all { - switch v := interface{}(m.GetGrpcHealthCheck()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "GrpcHealthCheck", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "GrpcHealthCheck", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetGrpcHealthCheck()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "GrpcHealthCheck", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *HealthCheck_CustomHealthCheck_: - - if all { - switch v := interface{}(m.GetCustomHealthCheck()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "CustomHealthCheck", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheckValidationError{ - field: "CustomHealthCheck", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetCustomHealthCheck()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheckValidationError{ - field: "CustomHealthCheck", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := HealthCheckValidationError{ - field: "HealthChecker", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return HealthCheckMultiError(errors) - } - - return nil -} - -// HealthCheckMultiError is an error wrapping multiple validation errors -// returned by HealthCheck.ValidateAll() if the designated constraints aren't met. -type HealthCheckMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HealthCheckMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HealthCheckMultiError) AllErrors() []error { return m } - -// HealthCheckValidationError is the validation error returned by -// HealthCheck.Validate if the designated constraints aren't met. -type HealthCheckValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HealthCheckValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HealthCheckValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HealthCheckValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HealthCheckValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HealthCheckValidationError) ErrorName() string { return "HealthCheckValidationError" } - -// Error satisfies the builtin error interface -func (e HealthCheckValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHealthCheck.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HealthCheckValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HealthCheckValidationError{} - -// Validate checks the field values on HealthCheck_Payload with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HealthCheck_Payload) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HealthCheck_Payload with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HealthCheck_PayloadMultiError, or nil if none found. -func (m *HealthCheck_Payload) ValidateAll() error { - return m.validate(true) -} - -func (m *HealthCheck_Payload) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.Payload.(type) { - - case *HealthCheck_Payload_Text: - - if len(m.GetText()) < 1 { - err := HealthCheck_PayloadValidationError{ - field: "Text", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - case *HealthCheck_Payload_Binary: - // no validation rules for Binary - - default: - err := HealthCheck_PayloadValidationError{ - field: "Payload", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return HealthCheck_PayloadMultiError(errors) - } - - return nil -} - -// HealthCheck_PayloadMultiError is an error wrapping multiple validation -// errors returned by HealthCheck_Payload.ValidateAll() if the designated -// constraints aren't met. -type HealthCheck_PayloadMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HealthCheck_PayloadMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HealthCheck_PayloadMultiError) AllErrors() []error { return m } - -// HealthCheck_PayloadValidationError is the validation error returned by -// HealthCheck_Payload.Validate if the designated constraints aren't met. -type HealthCheck_PayloadValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HealthCheck_PayloadValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HealthCheck_PayloadValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HealthCheck_PayloadValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HealthCheck_PayloadValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HealthCheck_PayloadValidationError) ErrorName() string { - return "HealthCheck_PayloadValidationError" -} - -// Error satisfies the builtin error interface -func (e HealthCheck_PayloadValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHealthCheck_Payload.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HealthCheck_PayloadValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HealthCheck_PayloadValidationError{} - -// Validate checks the field values on HealthCheck_HttpHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HealthCheck_HttpHealthCheck) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HealthCheck_HttpHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HealthCheck_HttpHealthCheckMultiError, or nil if none found. -func (m *HealthCheck_HttpHealthCheck) ValidateAll() error { - return m.validate(true) -} - -func (m *HealthCheck_HttpHealthCheck) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Host - - if len(m.GetPath()) < 1 { - err := HealthCheck_HttpHealthCheckValidationError{ - field: "Path", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetSend()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: "Send", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: "Send", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_HttpHealthCheckValidationError{ - field: "Send", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetReceive()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: "Receive", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: "Receive", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetReceive()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_HttpHealthCheckValidationError{ - field: "Receive", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for ServiceName - - if len(m.GetRequestHeadersToAdd()) > 1000 { - err := HealthCheck_HttpHealthCheckValidationError{ - field: "RequestHeadersToAdd", - reason: "value must contain no more than 1000 item(s)", - } - if !all { - return err - } - errors = append(errors, err) - } - - for idx, item := range m.GetRequestHeadersToAdd() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_HttpHealthCheckValidationError{ - field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - // no validation rules for UseHttp2 - - for idx, item := range m.GetExpectedStatuses() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: fmt.Sprintf("ExpectedStatuses[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: fmt.Sprintf("ExpectedStatuses[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_HttpHealthCheckValidationError{ - field: fmt.Sprintf("ExpectedStatuses[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if _, ok := _type.CodecClientType_name[int32(m.GetCodecClientType())]; !ok { - err := HealthCheck_HttpHealthCheckValidationError{ - field: "CodecClientType", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetServiceNameMatcher()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: "ServiceNameMatcher", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: "ServiceNameMatcher", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetServiceNameMatcher()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_HttpHealthCheckValidationError{ - field: "ServiceNameMatcher", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return HealthCheck_HttpHealthCheckMultiError(errors) - } - - return nil -} - -// HealthCheck_HttpHealthCheckMultiError is an error wrapping multiple -// validation errors returned by HealthCheck_HttpHealthCheck.ValidateAll() if -// the designated constraints aren't met. -type HealthCheck_HttpHealthCheckMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HealthCheck_HttpHealthCheckMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HealthCheck_HttpHealthCheckMultiError) AllErrors() []error { return m } - -// HealthCheck_HttpHealthCheckValidationError is the validation error returned -// by HealthCheck_HttpHealthCheck.Validate if the designated constraints -// aren't met. -type HealthCheck_HttpHealthCheckValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HealthCheck_HttpHealthCheckValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HealthCheck_HttpHealthCheckValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HealthCheck_HttpHealthCheckValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HealthCheck_HttpHealthCheckValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HealthCheck_HttpHealthCheckValidationError) ErrorName() string { - return "HealthCheck_HttpHealthCheckValidationError" -} - -// Error satisfies the builtin error interface -func (e HealthCheck_HttpHealthCheckValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHealthCheck_HttpHealthCheck.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HealthCheck_HttpHealthCheckValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HealthCheck_HttpHealthCheckValidationError{} - -// Validate checks the field values on HealthCheck_TcpHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HealthCheck_TcpHealthCheck) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HealthCheck_TcpHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HealthCheck_TcpHealthCheckMultiError, or nil if none found. -func (m *HealthCheck_TcpHealthCheck) ValidateAll() error { - return m.validate(true) -} - -func (m *HealthCheck_TcpHealthCheck) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetSend()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ - field: "Send", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ - field: "Send", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_TcpHealthCheckValidationError{ - field: "Send", - reason: "embedded message failed validation", - cause: err, - } - } - } - - for idx, item := range m.GetReceive() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ - field: fmt.Sprintf("Receive[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ - field: fmt.Sprintf("Receive[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_TcpHealthCheckValidationError{ - field: fmt.Sprintf("Receive[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return HealthCheck_TcpHealthCheckMultiError(errors) - } - - return nil -} - -// HealthCheck_TcpHealthCheckMultiError is an error wrapping multiple -// validation errors returned by HealthCheck_TcpHealthCheck.ValidateAll() if -// the designated constraints aren't met. -type HealthCheck_TcpHealthCheckMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HealthCheck_TcpHealthCheckMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HealthCheck_TcpHealthCheckMultiError) AllErrors() []error { return m } - -// HealthCheck_TcpHealthCheckValidationError is the validation error returned -// by HealthCheck_TcpHealthCheck.Validate if the designated constraints aren't met. -type HealthCheck_TcpHealthCheckValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HealthCheck_TcpHealthCheckValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HealthCheck_TcpHealthCheckValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HealthCheck_TcpHealthCheckValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HealthCheck_TcpHealthCheckValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HealthCheck_TcpHealthCheckValidationError) ErrorName() string { - return "HealthCheck_TcpHealthCheckValidationError" -} - -// Error satisfies the builtin error interface -func (e HealthCheck_TcpHealthCheckValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHealthCheck_TcpHealthCheck.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HealthCheck_TcpHealthCheckValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HealthCheck_TcpHealthCheckValidationError{} - -// Validate checks the field values on HealthCheck_RedisHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HealthCheck_RedisHealthCheck) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HealthCheck_RedisHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HealthCheck_RedisHealthCheckMultiError, or nil if none found. -func (m *HealthCheck_RedisHealthCheck) ValidateAll() error { - return m.validate(true) -} - -func (m *HealthCheck_RedisHealthCheck) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Key - - if len(errors) > 0 { - return HealthCheck_RedisHealthCheckMultiError(errors) - } - - return nil -} - -// HealthCheck_RedisHealthCheckMultiError is an error wrapping multiple -// validation errors returned by HealthCheck_RedisHealthCheck.ValidateAll() if -// the designated constraints aren't met. -type HealthCheck_RedisHealthCheckMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HealthCheck_RedisHealthCheckMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HealthCheck_RedisHealthCheckMultiError) AllErrors() []error { return m } - -// HealthCheck_RedisHealthCheckValidationError is the validation error returned -// by HealthCheck_RedisHealthCheck.Validate if the designated constraints -// aren't met. -type HealthCheck_RedisHealthCheckValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HealthCheck_RedisHealthCheckValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HealthCheck_RedisHealthCheckValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HealthCheck_RedisHealthCheckValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HealthCheck_RedisHealthCheckValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HealthCheck_RedisHealthCheckValidationError) ErrorName() string { - return "HealthCheck_RedisHealthCheckValidationError" -} - -// Error satisfies the builtin error interface -func (e HealthCheck_RedisHealthCheckValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHealthCheck_RedisHealthCheck.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HealthCheck_RedisHealthCheckValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HealthCheck_RedisHealthCheckValidationError{} - -// Validate checks the field values on HealthCheck_GrpcHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HealthCheck_GrpcHealthCheck) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HealthCheck_GrpcHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HealthCheck_GrpcHealthCheckMultiError, or nil if none found. -func (m *HealthCheck_GrpcHealthCheck) ValidateAll() error { - return m.validate(true) -} - -func (m *HealthCheck_GrpcHealthCheck) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for ServiceName - - // no validation rules for Authority - - if len(errors) > 0 { - return HealthCheck_GrpcHealthCheckMultiError(errors) - } - - return nil -} - -// HealthCheck_GrpcHealthCheckMultiError is an error wrapping multiple -// validation errors returned by HealthCheck_GrpcHealthCheck.ValidateAll() if -// the designated constraints aren't met. -type HealthCheck_GrpcHealthCheckMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HealthCheck_GrpcHealthCheckMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HealthCheck_GrpcHealthCheckMultiError) AllErrors() []error { return m } - -// HealthCheck_GrpcHealthCheckValidationError is the validation error returned -// by HealthCheck_GrpcHealthCheck.Validate if the designated constraints -// aren't met. -type HealthCheck_GrpcHealthCheckValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HealthCheck_GrpcHealthCheckValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HealthCheck_GrpcHealthCheckValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HealthCheck_GrpcHealthCheckValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HealthCheck_GrpcHealthCheckValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HealthCheck_GrpcHealthCheckValidationError) ErrorName() string { - return "HealthCheck_GrpcHealthCheckValidationError" -} - -// Error satisfies the builtin error interface -func (e HealthCheck_GrpcHealthCheckValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHealthCheck_GrpcHealthCheck.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HealthCheck_GrpcHealthCheckValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HealthCheck_GrpcHealthCheckValidationError{} - -// Validate checks the field values on HealthCheck_CustomHealthCheck with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HealthCheck_CustomHealthCheck) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HealthCheck_CustomHealthCheck with -// the rules defined in the proto definition for this message. If any rules -// are violated, the result is a list of violation errors wrapped in -// HealthCheck_CustomHealthCheckMultiError, or nil if none found. -func (m *HealthCheck_CustomHealthCheck) ValidateAll() error { - return m.validate(true) -} - -func (m *HealthCheck_CustomHealthCheck) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetName()) < 1 { - err := HealthCheck_CustomHealthCheckValidationError{ - field: "Name", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - switch m.ConfigType.(type) { - - case *HealthCheck_CustomHealthCheck_Config: - - if all { - switch v := interface{}(m.GetConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_CustomHealthCheckValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_CustomHealthCheckValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_CustomHealthCheckValidationError{ - field: "Config", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *HealthCheck_CustomHealthCheck_TypedConfig: - - if all { - switch v := interface{}(m.GetTypedConfig()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_CustomHealthCheckValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_CustomHealthCheckValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_CustomHealthCheckValidationError{ - field: "TypedConfig", - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return HealthCheck_CustomHealthCheckMultiError(errors) - } - - return nil -} - -// HealthCheck_CustomHealthCheckMultiError is an error wrapping multiple -// validation errors returned by HealthCheck_CustomHealthCheck.ValidateAll() -// if the designated constraints aren't met. -type HealthCheck_CustomHealthCheckMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HealthCheck_CustomHealthCheckMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HealthCheck_CustomHealthCheckMultiError) AllErrors() []error { return m } - -// HealthCheck_CustomHealthCheckValidationError is the validation error -// returned by HealthCheck_CustomHealthCheck.Validate if the designated -// constraints aren't met. -type HealthCheck_CustomHealthCheckValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HealthCheck_CustomHealthCheckValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HealthCheck_CustomHealthCheckValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HealthCheck_CustomHealthCheckValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HealthCheck_CustomHealthCheckValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HealthCheck_CustomHealthCheckValidationError) ErrorName() string { - return "HealthCheck_CustomHealthCheckValidationError" -} - -// Error satisfies the builtin error interface -func (e HealthCheck_CustomHealthCheckValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHealthCheck_CustomHealthCheck.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HealthCheck_CustomHealthCheckValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HealthCheck_CustomHealthCheckValidationError{} - -// Validate checks the field values on HealthCheck_TlsOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HealthCheck_TlsOptions) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HealthCheck_TlsOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HealthCheck_TlsOptionsMultiError, or nil if none found. -func (m *HealthCheck_TlsOptions) ValidateAll() error { - return m.validate(true) -} - -func (m *HealthCheck_TlsOptions) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(errors) > 0 { - return HealthCheck_TlsOptionsMultiError(errors) - } - - return nil -} - -// HealthCheck_TlsOptionsMultiError is an error wrapping multiple validation -// errors returned by HealthCheck_TlsOptions.ValidateAll() if the designated -// constraints aren't met. -type HealthCheck_TlsOptionsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HealthCheck_TlsOptionsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HealthCheck_TlsOptionsMultiError) AllErrors() []error { return m } - -// HealthCheck_TlsOptionsValidationError is the validation error returned by -// HealthCheck_TlsOptions.Validate if the designated constraints aren't met. -type HealthCheck_TlsOptionsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HealthCheck_TlsOptionsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HealthCheck_TlsOptionsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HealthCheck_TlsOptionsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HealthCheck_TlsOptionsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HealthCheck_TlsOptionsValidationError) ErrorName() string { - return "HealthCheck_TlsOptionsValidationError" -} - -// Error satisfies the builtin error interface -func (e HealthCheck_TlsOptionsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHealthCheck_TlsOptions.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HealthCheck_TlsOptionsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HealthCheck_TlsOptionsValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/http_uri.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/http_uri.pb.go deleted file mode 100644 index 63c338caf0..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/http_uri.pb.go +++ /dev/null @@ -1,234 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/http_uri.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Envoy external URI descriptor -type HttpUri struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The HTTP server URI. It should be a full FQDN with protocol, host and path. - // - // Example: - // - // .. code-block:: yaml - // - // uri: https://www.googleapis.com/oauth2/v1/certs - // - Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` - // Specify how `uri` is to be fetched. Today, this requires an explicit - // cluster, but in the future we may support dynamic cluster creation or - // inline DNS resolution. See `issue - // `_. - // - // Types that are assignable to HttpUpstreamType: - // *HttpUri_Cluster - HttpUpstreamType isHttpUri_HttpUpstreamType `protobuf_oneof:"http_upstream_type"` - // Sets the maximum duration in milliseconds that a response can take to arrive upon request. - Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` -} - -func (x *HttpUri) Reset() { - *x = HttpUri{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_http_uri_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HttpUri) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HttpUri) ProtoMessage() {} - -func (x *HttpUri) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_http_uri_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HttpUri.ProtoReflect.Descriptor instead. -func (*HttpUri) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_http_uri_proto_rawDescGZIP(), []int{0} -} - -func (x *HttpUri) GetUri() string { - if x != nil { - return x.Uri - } - return "" -} - -func (m *HttpUri) GetHttpUpstreamType() isHttpUri_HttpUpstreamType { - if m != nil { - return m.HttpUpstreamType - } - return nil -} - -func (x *HttpUri) GetCluster() string { - if x, ok := x.GetHttpUpstreamType().(*HttpUri_Cluster); ok { - return x.Cluster - } - return "" -} - -func (x *HttpUri) GetTimeout() *duration.Duration { - if x != nil { - return x.Timeout - } - return nil -} - -type isHttpUri_HttpUpstreamType interface { - isHttpUri_HttpUpstreamType() -} - -type HttpUri_Cluster struct { - // A cluster is created in the Envoy "cluster_manager" config - // section. This field specifies the cluster name. - // - // Example: - // - // .. code-block:: yaml - // - // cluster: jwks_cluster - // - Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3,oneof"` -} - -func (*HttpUri_Cluster) isHttpUri_HttpUpstreamType() {} - -var File_envoy_api_v2_core_http_uri_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_http_uri_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x01, - 0x0a, 0x07, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x69, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x52, - 0x03, 0x75, 0x72, 0x69, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x20, 0x01, 0x48, 0x00, - 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x32, - 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x19, 0x0a, 0x12, 0x68, 0x74, - 0x74, 0x70, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x8f, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0c, 0x48, 0x74, 0x74, 0x70, 0x55, - 0x72, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_http_uri_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_http_uri_proto_rawDescData = file_envoy_api_v2_core_http_uri_proto_rawDesc -) - -func file_envoy_api_v2_core_http_uri_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_http_uri_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_http_uri_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_http_uri_proto_rawDescData) - }) - return file_envoy_api_v2_core_http_uri_proto_rawDescData -} - -var file_envoy_api_v2_core_http_uri_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_api_v2_core_http_uri_proto_goTypes = []interface{}{ - (*HttpUri)(nil), // 0: envoy.api.v2.core.HttpUri - (*duration.Duration)(nil), // 1: google.protobuf.Duration -} -var file_envoy_api_v2_core_http_uri_proto_depIdxs = []int32{ - 1, // 0: envoy.api.v2.core.HttpUri.timeout:type_name -> google.protobuf.Duration - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_http_uri_proto_init() } -func file_envoy_api_v2_core_http_uri_proto_init() { - if File_envoy_api_v2_core_http_uri_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_http_uri_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HttpUri); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_http_uri_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*HttpUri_Cluster)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_http_uri_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_http_uri_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_http_uri_proto_depIdxs, - MessageInfos: file_envoy_api_v2_core_http_uri_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_http_uri_proto = out.File - file_envoy_api_v2_core_http_uri_proto_rawDesc = nil - file_envoy_api_v2_core_http_uri_proto_goTypes = nil - file_envoy_api_v2_core_http_uri_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/http_uri.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/http_uri.pb.validate.go deleted file mode 100644 index 35c1542cce..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/http_uri.pb.validate.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/http_uri.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on HttpUri with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *HttpUri) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HttpUri with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in HttpUriMultiError, or nil if none found. -func (m *HttpUri) ValidateAll() error { - return m.validate(true) -} - -func (m *HttpUri) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetUri()) < 1 { - err := HttpUriValidationError{ - field: "Uri", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if m.GetTimeout() == nil { - err := HttpUriValidationError{ - field: "Timeout", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if d := m.GetTimeout(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = HttpUriValidationError{ - field: "Timeout", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gte := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur < gte { - err := HttpUriValidationError{ - field: "Timeout", - reason: "value must be greater than or equal to 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - switch m.HttpUpstreamType.(type) { - - case *HttpUri_Cluster: - - if len(m.GetCluster()) < 1 { - err := HttpUriValidationError{ - field: "Cluster", - reason: "value length must be at least 1 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - default: - err := HttpUriValidationError{ - field: "HttpUpstreamType", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return HttpUriMultiError(errors) - } - - return nil -} - -// HttpUriMultiError is an error wrapping multiple validation errors returned -// by HttpUri.ValidateAll() if the designated constraints aren't met. -type HttpUriMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HttpUriMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HttpUriMultiError) AllErrors() []error { return m } - -// HttpUriValidationError is the validation error returned by HttpUri.Validate -// if the designated constraints aren't met. -type HttpUriValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HttpUriValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HttpUriValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HttpUriValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HttpUriValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HttpUriValidationError) ErrorName() string { return "HttpUriValidationError" } - -// Error satisfies the builtin error interface -func (e HttpUriValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHttpUri.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HttpUriValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HttpUriValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/protocol.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/protocol.pb.go deleted file mode 100644 index fd43928b89..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/protocol.pb.go +++ /dev/null @@ -1,1255 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/protocol.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Action to take when Envoy receives client request with header names containing underscore -// characters. -// Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented -// as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore -// characters. -type HttpProtocolOptions_HeadersWithUnderscoresAction int32 - -const ( - // Allow headers with underscores. This is the default behavior. - HttpProtocolOptions_ALLOW HttpProtocolOptions_HeadersWithUnderscoresAction = 0 - // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests - // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter - // is incremented for each rejected request. - HttpProtocolOptions_REJECT_REQUEST HttpProtocolOptions_HeadersWithUnderscoresAction = 1 - // Drop the header with name containing underscores. The header is dropped before the filter chain is - // invoked and as such filters will not see dropped headers. The - // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. - HttpProtocolOptions_DROP_HEADER HttpProtocolOptions_HeadersWithUnderscoresAction = 2 -) - -// Enum value maps for HttpProtocolOptions_HeadersWithUnderscoresAction. -var ( - HttpProtocolOptions_HeadersWithUnderscoresAction_name = map[int32]string{ - 0: "ALLOW", - 1: "REJECT_REQUEST", - 2: "DROP_HEADER", - } - HttpProtocolOptions_HeadersWithUnderscoresAction_value = map[string]int32{ - "ALLOW": 0, - "REJECT_REQUEST": 1, - "DROP_HEADER": 2, - } -) - -func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Enum() *HttpProtocolOptions_HeadersWithUnderscoresAction { - p := new(HttpProtocolOptions_HeadersWithUnderscoresAction) - *p = x - return p -} - -func (x HttpProtocolOptions_HeadersWithUnderscoresAction) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (HttpProtocolOptions_HeadersWithUnderscoresAction) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_protocol_proto_enumTypes[0].Descriptor() -} - -func (HttpProtocolOptions_HeadersWithUnderscoresAction) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_protocol_proto_enumTypes[0] -} - -func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use HttpProtocolOptions_HeadersWithUnderscoresAction.Descriptor instead. -func (HttpProtocolOptions_HeadersWithUnderscoresAction) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{2, 0} -} - -// [#not-implemented-hide:] -type TcpProtocolOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *TcpProtocolOptions) Reset() { - *x = TcpProtocolOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TcpProtocolOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TcpProtocolOptions) ProtoMessage() {} - -func (x *TcpProtocolOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TcpProtocolOptions.ProtoReflect.Descriptor instead. -func (*TcpProtocolOptions) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{0} -} - -type UpstreamHttpProtocolOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Set transport socket `SNI `_ for new - // upstream connections based on the downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - AutoSni bool `protobuf:"varint,1,opt,name=auto_sni,json=autoSni,proto3" json:"auto_sni,omitempty"` - // Automatic validate upstream presented certificate for new upstream connections based on the - // downstream HTTP host/authority header, as seen by the - // :ref:`router filter `. - // This field is intended to set with `auto_sni` field. - AutoSanValidation bool `protobuf:"varint,2,opt,name=auto_san_validation,json=autoSanValidation,proto3" json:"auto_san_validation,omitempty"` -} - -func (x *UpstreamHttpProtocolOptions) Reset() { - *x = UpstreamHttpProtocolOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpstreamHttpProtocolOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpstreamHttpProtocolOptions) ProtoMessage() {} - -func (x *UpstreamHttpProtocolOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpstreamHttpProtocolOptions.ProtoReflect.Descriptor instead. -func (*UpstreamHttpProtocolOptions) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{1} -} - -func (x *UpstreamHttpProtocolOptions) GetAutoSni() bool { - if x != nil { - return x.AutoSni - } - return false -} - -func (x *UpstreamHttpProtocolOptions) GetAutoSanValidation() bool { - if x != nil { - return x.AutoSanValidation - } - return false -} - -// [#next-free-field: 6] -type HttpProtocolOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The idle timeout for connections. The idle timeout is defined as the - // period in which there are no active requests. When the - // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - // downstream connection a drain sequence will occur prior to closing the connection, see - // :ref:`drain_timeout - // `. - // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - // - // .. warning:: - // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - // FIN packets, etc. - IdleTimeout *duration.Duration `protobuf:"bytes,1,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` - // The maximum duration of a connection. The duration is defined as a period since a connection - // was established. If not set, there is no max duration. When max_connection_duration is reached - // the connection will be closed. Drain sequence will occur prior to closing the connection if - // if's applicable. See :ref:`drain_timeout - // `. - // Note: not implemented for upstream connections. - MaxConnectionDuration *duration.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"` - // The maximum number of headers. If unconfigured, the default - // maximum number of request headers allowed is 100. Requests that exceed this limit will receive - // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - MaxHeadersCount *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"` - // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - // reset independent of any other timeouts. If not specified, this value is not set. - MaxStreamDuration *duration.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` - // Action to take when a client request with a header name containing underscore characters is received. - // If this setting is not specified, the value defaults to ALLOW. - // Note: upstream responses are not affected by this setting. - HeadersWithUnderscoresAction HttpProtocolOptions_HeadersWithUnderscoresAction `protobuf:"varint,5,opt,name=headers_with_underscores_action,json=headersWithUnderscoresAction,proto3,enum=envoy.api.v2.core.HttpProtocolOptions_HeadersWithUnderscoresAction" json:"headers_with_underscores_action,omitempty"` -} - -func (x *HttpProtocolOptions) Reset() { - *x = HttpProtocolOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HttpProtocolOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HttpProtocolOptions) ProtoMessage() {} - -func (x *HttpProtocolOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HttpProtocolOptions.ProtoReflect.Descriptor instead. -func (*HttpProtocolOptions) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{2} -} - -func (x *HttpProtocolOptions) GetIdleTimeout() *duration.Duration { - if x != nil { - return x.IdleTimeout - } - return nil -} - -func (x *HttpProtocolOptions) GetMaxConnectionDuration() *duration.Duration { - if x != nil { - return x.MaxConnectionDuration - } - return nil -} - -func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrappers.UInt32Value { - if x != nil { - return x.MaxHeadersCount - } - return nil -} - -func (x *HttpProtocolOptions) GetMaxStreamDuration() *duration.Duration { - if x != nil { - return x.MaxStreamDuration - } - return nil -} - -func (x *HttpProtocolOptions) GetHeadersWithUnderscoresAction() HttpProtocolOptions_HeadersWithUnderscoresAction { - if x != nil { - return x.HeadersWithUnderscoresAction - } - return HttpProtocolOptions_ALLOW -} - -// [#next-free-field: 6] -type Http1ProtocolOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Handle HTTP requests with absolute URLs in the requests. These requests - // are generally sent by clients to forward/explicit proxies. This allows clients to configure - // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - // *http_proxy* environment variable. - AllowAbsoluteUrl *wrappers.BoolValue `protobuf:"bytes,1,opt,name=allow_absolute_url,json=allowAbsoluteUrl,proto3" json:"allow_absolute_url,omitempty"` - // Handle incoming HTTP/1.0 and HTTP 0.9 requests. - // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - // style connect logic, dechunking, and handling lack of client host iff - // *default_host_for_http_10* is configured. - AcceptHttp_10 bool `protobuf:"varint,2,opt,name=accept_http_10,json=acceptHttp10,proto3" json:"accept_http_10,omitempty"` - // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - // Envoy does not otherwise support HTTP/1.0 without a Host header. - // This is a no-op if *accept_http_10* is not true. - DefaultHostForHttp_10 string `protobuf:"bytes,3,opt,name=default_host_for_http_10,json=defaultHostForHttp10,proto3" json:"default_host_for_http_10,omitempty"` - // Describes how the keys for response headers should be formatted. By default, all header keys - // are lower cased. - HeaderKeyFormat *Http1ProtocolOptions_HeaderKeyFormat `protobuf:"bytes,4,opt,name=header_key_format,json=headerKeyFormat,proto3" json:"header_key_format,omitempty"` - // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - // - // .. attention:: - // - // Note that this only happens when Envoy is chunk encoding which occurs when: - // - The request is HTTP/1.1. - // - Is neither a HEAD only request nor a HTTP Upgrade. - // - Not a response to a HEAD request. - // - The content length header is not present. - EnableTrailers bool `protobuf:"varint,5,opt,name=enable_trailers,json=enableTrailers,proto3" json:"enable_trailers,omitempty"` -} - -func (x *Http1ProtocolOptions) Reset() { - *x = Http1ProtocolOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Http1ProtocolOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Http1ProtocolOptions) ProtoMessage() {} - -func (x *Http1ProtocolOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Http1ProtocolOptions.ProtoReflect.Descriptor instead. -func (*Http1ProtocolOptions) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{3} -} - -func (x *Http1ProtocolOptions) GetAllowAbsoluteUrl() *wrappers.BoolValue { - if x != nil { - return x.AllowAbsoluteUrl - } - return nil -} - -func (x *Http1ProtocolOptions) GetAcceptHttp_10() bool { - if x != nil { - return x.AcceptHttp_10 - } - return false -} - -func (x *Http1ProtocolOptions) GetDefaultHostForHttp_10() string { - if x != nil { - return x.DefaultHostForHttp_10 - } - return "" -} - -func (x *Http1ProtocolOptions) GetHeaderKeyFormat() *Http1ProtocolOptions_HeaderKeyFormat { - if x != nil { - return x.HeaderKeyFormat - } - return nil -} - -func (x *Http1ProtocolOptions) GetEnableTrailers() bool { - if x != nil { - return x.EnableTrailers - } - return false -} - -// [#next-free-field: 14] -type Http2ProtocolOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // `Maximum table size `_ - // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values - // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header - // compression. - HpackTableSize *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=hpack_table_size,json=hpackTableSize,proto3" json:"hpack_table_size,omitempty"` - // `Maximum concurrent streams `_ - // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) - // and defaults to 2147483647. - // - // For upstream connections, this also limits how many streams Envoy will initiate concurrently - // on a single connection. If the limit is reached, Envoy may queue requests or establish - // additional connections (as allowed per circuit breaker limits). - MaxConcurrentStreams *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` - // `Initial stream-level flow-control window - // `_ size. Valid values range from 65535 - // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 - // (256 * 1024 * 1024). - // - // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default - // window size now, so it's also the minimum. - // - // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - // stop the flow of data to the codec buffers. - InitialStreamWindowSize *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` - // Similar to *initial_stream_window_size*, but for connection-level flow-control - // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. - InitialConnectionWindowSize *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` - // Allows proxying Websocket and other upgrades over H2 connect. - AllowConnect bool `protobuf:"varint,5,opt,name=allow_connect,json=allowConnect,proto3" json:"allow_connect,omitempty"` - // [#not-implemented-hide:] Hiding until envoy has full metadata support. - // Still under implementation. DO NOT USE. - // - // Allows metadata. See [metadata - // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more - // information. - AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"` - // Limit the number of pending outbound downstream frames of all types (frames that are waiting to - // be written into the socket). Exceeding this limit triggers flood mitigation and connection is - // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due - // to flood mitigation. The default limit is 10000. - // [#comment:TODO: implement same limits for upstream outbound frames as well.] - MaxOutboundFrames *wrappers.UInt32Value `protobuf:"bytes,7,opt,name=max_outbound_frames,json=maxOutboundFrames,proto3" json:"max_outbound_frames,omitempty"` - // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, - // preventing high memory utilization when receiving continuous stream of these frames. Exceeding - // this limit triggers flood mitigation and connection is terminated. The - // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood - // mitigation. The default limit is 1000. - // [#comment:TODO: implement same limits for upstream outbound frames as well.] - MaxOutboundControlFrames *wrappers.UInt32Value `protobuf:"bytes,8,opt,name=max_outbound_control_frames,json=maxOutboundControlFrames,proto3" json:"max_outbound_control_frames,omitempty"` - // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an - // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but - // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` - // stat tracks the number of connections terminated due to flood mitigation. - // Setting this to 0 will terminate connection upon receiving first frame with an empty payload - // and no end stream flag. The default limit is 1. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - MaxConsecutiveInboundFramesWithEmptyPayload *wrappers.UInt32Value `protobuf:"bytes,9,opt,name=max_consecutive_inbound_frames_with_empty_payload,json=maxConsecutiveInboundFramesWithEmptyPayload,proto3" json:"max_consecutive_inbound_frames_with_empty_payload,omitempty"` - // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number - // of PRIORITY frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // max_inbound_priority_frames_per_stream * (1 + inbound_streams) - // - // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 100. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - MaxInboundPriorityFramesPerStream *wrappers.UInt32Value `protobuf:"bytes,10,opt,name=max_inbound_priority_frames_per_stream,json=maxInboundPriorityFramesPerStream,proto3" json:"max_inbound_priority_frames_per_stream,omitempty"` - // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number - // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated - // using this formula:: - // - // 1 + 2 * (inbound_streams + - // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) - // - // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks - // the number of connections terminated due to flood mitigation. The default limit is 10. - // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, - // but more complex implementations that try to estimate available bandwidth require at least 2. - // [#comment:TODO: implement same limits for upstream inbound frames as well.] - MaxInboundWindowUpdateFramesPerDataFrameSent *wrappers.UInt32Value `protobuf:"bytes,11,opt,name=max_inbound_window_update_frames_per_data_frame_sent,json=maxInboundWindowUpdateFramesPerDataFrameSent,proto3" json:"max_inbound_window_update_frames_per_data_frame_sent,omitempty"` - // Allows invalid HTTP messaging and headers. When this option is disabled (default), then - // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, - // when this option is enabled, only the offending stream is terminated. - // - // See `RFC7540, sec. 8.1 `_ for details. - StreamErrorOnInvalidHttpMessaging bool `protobuf:"varint,12,opt,name=stream_error_on_invalid_http_messaging,json=streamErrorOnInvalidHttpMessaging,proto3" json:"stream_error_on_invalid_http_messaging,omitempty"` - // [#not-implemented-hide:] - // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: - // - // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by - // Envoy. - // - // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field - // 'allow_connect'. - // - // Note that custom parameters specified through this field can not also be set in the - // corresponding named parameters: - // - // .. code-block:: text - // - // ID Field Name - // ---------------- - // 0x1 hpack_table_size - // 0x3 max_concurrent_streams - // 0x4 initial_stream_window_size - // - // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies - // between custom parameters with the same identifier will trigger a failure. - // - // See `IANA HTTP/2 Settings - // `_ for - // standardized identifiers. - CustomSettingsParameters []*Http2ProtocolOptions_SettingsParameter `protobuf:"bytes,13,rep,name=custom_settings_parameters,json=customSettingsParameters,proto3" json:"custom_settings_parameters,omitempty"` -} - -func (x *Http2ProtocolOptions) Reset() { - *x = Http2ProtocolOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Http2ProtocolOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Http2ProtocolOptions) ProtoMessage() {} - -func (x *Http2ProtocolOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Http2ProtocolOptions.ProtoReflect.Descriptor instead. -func (*Http2ProtocolOptions) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{4} -} - -func (x *Http2ProtocolOptions) GetHpackTableSize() *wrappers.UInt32Value { - if x != nil { - return x.HpackTableSize - } - return nil -} - -func (x *Http2ProtocolOptions) GetMaxConcurrentStreams() *wrappers.UInt32Value { - if x != nil { - return x.MaxConcurrentStreams - } - return nil -} - -func (x *Http2ProtocolOptions) GetInitialStreamWindowSize() *wrappers.UInt32Value { - if x != nil { - return x.InitialStreamWindowSize - } - return nil -} - -func (x *Http2ProtocolOptions) GetInitialConnectionWindowSize() *wrappers.UInt32Value { - if x != nil { - return x.InitialConnectionWindowSize - } - return nil -} - -func (x *Http2ProtocolOptions) GetAllowConnect() bool { - if x != nil { - return x.AllowConnect - } - return false -} - -func (x *Http2ProtocolOptions) GetAllowMetadata() bool { - if x != nil { - return x.AllowMetadata - } - return false -} - -func (x *Http2ProtocolOptions) GetMaxOutboundFrames() *wrappers.UInt32Value { - if x != nil { - return x.MaxOutboundFrames - } - return nil -} - -func (x *Http2ProtocolOptions) GetMaxOutboundControlFrames() *wrappers.UInt32Value { - if x != nil { - return x.MaxOutboundControlFrames - } - return nil -} - -func (x *Http2ProtocolOptions) GetMaxConsecutiveInboundFramesWithEmptyPayload() *wrappers.UInt32Value { - if x != nil { - return x.MaxConsecutiveInboundFramesWithEmptyPayload - } - return nil -} - -func (x *Http2ProtocolOptions) GetMaxInboundPriorityFramesPerStream() *wrappers.UInt32Value { - if x != nil { - return x.MaxInboundPriorityFramesPerStream - } - return nil -} - -func (x *Http2ProtocolOptions) GetMaxInboundWindowUpdateFramesPerDataFrameSent() *wrappers.UInt32Value { - if x != nil { - return x.MaxInboundWindowUpdateFramesPerDataFrameSent - } - return nil -} - -func (x *Http2ProtocolOptions) GetStreamErrorOnInvalidHttpMessaging() bool { - if x != nil { - return x.StreamErrorOnInvalidHttpMessaging - } - return false -} - -func (x *Http2ProtocolOptions) GetCustomSettingsParameters() []*Http2ProtocolOptions_SettingsParameter { - if x != nil { - return x.CustomSettingsParameters - } - return nil -} - -// [#not-implemented-hide:] -type GrpcProtocolOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Http2ProtocolOptions *Http2ProtocolOptions `protobuf:"bytes,1,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` -} - -func (x *GrpcProtocolOptions) Reset() { - *x = GrpcProtocolOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcProtocolOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcProtocolOptions) ProtoMessage() {} - -func (x *GrpcProtocolOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcProtocolOptions.ProtoReflect.Descriptor instead. -func (*GrpcProtocolOptions) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{5} -} - -func (x *GrpcProtocolOptions) GetHttp2ProtocolOptions() *Http2ProtocolOptions { - if x != nil { - return x.Http2ProtocolOptions - } - return nil -} - -type Http1ProtocolOptions_HeaderKeyFormat struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to HeaderFormat: - // *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_ - HeaderFormat isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat `protobuf_oneof:"header_format"` -} - -func (x *Http1ProtocolOptions_HeaderKeyFormat) Reset() { - *x = Http1ProtocolOptions_HeaderKeyFormat{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Http1ProtocolOptions_HeaderKeyFormat) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Http1ProtocolOptions_HeaderKeyFormat) ProtoMessage() {} - -func (x *Http1ProtocolOptions_HeaderKeyFormat) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat.ProtoReflect.Descriptor instead. -func (*Http1ProtocolOptions_HeaderKeyFormat) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{3, 0} -} - -func (m *Http1ProtocolOptions_HeaderKeyFormat) GetHeaderFormat() isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat { - if m != nil { - return m.HeaderFormat - } - return nil -} - -func (x *Http1ProtocolOptions_HeaderKeyFormat) GetProperCaseWords() *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords { - if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_); ok { - return x.ProperCaseWords - } - return nil -} - -type isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat interface { - isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() -} - -type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_ struct { - // Formats the header by proper casing words: the first character and any character following - // a special character will be capitalized if it's an alpha character. For example, - // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - // Note that while this results in most headers following conventional casing, certain headers - // are not covered. For example, the "TE" header will be formatted as "Te". - ProperCaseWords *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords `protobuf:"bytes,1,opt,name=proper_case_words,json=properCaseWords,proto3,oneof"` -} - -func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() { -} - -type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Reset() { - *x = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoMessage() {} - -func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ProtoReflect.Descriptor instead. -func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{3, 0, 0} -} - -// Defines a parameter to be sent in the SETTINGS frame. -// See `RFC7540, sec. 6.5.1 `_ for details. -type Http2ProtocolOptions_SettingsParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The 16 bit parameter identifier. - Identifier *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` - // The 32 bit parameter value. - Value *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Http2ProtocolOptions_SettingsParameter) Reset() { - *x = Http2ProtocolOptions_SettingsParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Http2ProtocolOptions_SettingsParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Http2ProtocolOptions_SettingsParameter) ProtoMessage() {} - -func (x *Http2ProtocolOptions_SettingsParameter) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_protocol_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Http2ProtocolOptions_SettingsParameter.ProtoReflect.Descriptor instead. -func (*Http2ProtocolOptions_SettingsParameter) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_protocol_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *Http2ProtocolOptions_SettingsParameter) GetIdentifier() *wrappers.UInt32Value { - if x != nil { - return x.Identifier - } - return nil -} - -func (x *Http2ProtocolOptions_SettingsParameter) GetValue() *wrappers.UInt32Value { - if x != nil { - return x.Value - } - return nil -} - -var File_envoy_api_v2_core_protocol_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_protocol_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x14, 0x0a, - 0x12, 0x54, 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x68, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, - 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x12, 0x2e, 0x0a, - 0x13, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, - 0x53, 0x61, 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa1, 0x04, - 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, 0x6d, 0x61, 0x78, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8a, 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, - 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x43, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, - 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, - 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, - 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, - 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, - 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, - 0x02, 0x22, 0x80, 0x04, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, - 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x68, - 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, - 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, 0x70, - 0x31, 0x30, 0x12, 0x63, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, - 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, - 0x1a, 0xb1, 0x01, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x75, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, - 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, - 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, - 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x11, 0x0a, 0x0f, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x42, 0x14, - 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x03, 0xf8, 0x42, 0x01, 0x22, 0xa9, 0x0b, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, - 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, - 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, - 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, - 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, - 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, - 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, - 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, - 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, - 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, - 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, - 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, - 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, - 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, - 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, - 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, - 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, - 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, - 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, - 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x21, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, - 0x12, 0x77, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, - 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0xa4, 0x01, 0x0a, 0x11, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, - 0x51, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x13, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x18, 0x80, 0x80, 0x04, 0x28, 0x01, 0xfa, 0x42, - 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x74, 0x0a, 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, - 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x90, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0d, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_envoy_api_v2_core_protocol_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_protocol_proto_rawDescData = file_envoy_api_v2_core_protocol_proto_rawDesc -) - -func file_envoy_api_v2_core_protocol_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_protocol_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_protocol_proto_rawDescData) - }) - return file_envoy_api_v2_core_protocol_proto_rawDescData -} - -var file_envoy_api_v2_core_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_api_v2_core_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_envoy_api_v2_core_protocol_proto_goTypes = []interface{}{ - (HttpProtocolOptions_HeadersWithUnderscoresAction)(0), // 0: envoy.api.v2.core.HttpProtocolOptions.HeadersWithUnderscoresAction - (*TcpProtocolOptions)(nil), // 1: envoy.api.v2.core.TcpProtocolOptions - (*UpstreamHttpProtocolOptions)(nil), // 2: envoy.api.v2.core.UpstreamHttpProtocolOptions - (*HttpProtocolOptions)(nil), // 3: envoy.api.v2.core.HttpProtocolOptions - (*Http1ProtocolOptions)(nil), // 4: envoy.api.v2.core.Http1ProtocolOptions - (*Http2ProtocolOptions)(nil), // 5: envoy.api.v2.core.Http2ProtocolOptions - (*GrpcProtocolOptions)(nil), // 6: envoy.api.v2.core.GrpcProtocolOptions - (*Http1ProtocolOptions_HeaderKeyFormat)(nil), // 7: envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat - (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords)(nil), // 8: envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords - (*Http2ProtocolOptions_SettingsParameter)(nil), // 9: envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter - (*duration.Duration)(nil), // 10: google.protobuf.Duration - (*wrappers.UInt32Value)(nil), // 11: google.protobuf.UInt32Value - (*wrappers.BoolValue)(nil), // 12: google.protobuf.BoolValue -} -var file_envoy_api_v2_core_protocol_proto_depIdxs = []int32{ - 10, // 0: envoy.api.v2.core.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration - 10, // 1: envoy.api.v2.core.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration - 11, // 2: envoy.api.v2.core.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value - 10, // 3: envoy.api.v2.core.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration - 0, // 4: envoy.api.v2.core.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.api.v2.core.HttpProtocolOptions.HeadersWithUnderscoresAction - 12, // 5: envoy.api.v2.core.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue - 7, // 6: envoy.api.v2.core.Http1ProtocolOptions.header_key_format:type_name -> envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat - 11, // 7: envoy.api.v2.core.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value - 11, // 8: envoy.api.v2.core.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value - 11, // 9: envoy.api.v2.core.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value - 11, // 10: envoy.api.v2.core.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value - 11, // 11: envoy.api.v2.core.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value - 11, // 12: envoy.api.v2.core.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value - 11, // 13: envoy.api.v2.core.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value - 11, // 14: envoy.api.v2.core.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value - 11, // 15: envoy.api.v2.core.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value - 9, // 16: envoy.api.v2.core.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter - 5, // 17: envoy.api.v2.core.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.api.v2.core.Http2ProtocolOptions - 8, // 18: envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords - 11, // 19: envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value - 11, // 20: envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value - 21, // [21:21] is the sub-list for method output_type - 21, // [21:21] is the sub-list for method input_type - 21, // [21:21] is the sub-list for extension type_name - 21, // [21:21] is the sub-list for extension extendee - 0, // [0:21] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_protocol_proto_init() } -func file_envoy_api_v2_core_protocol_proto_init() { - if File_envoy_api_v2_core_protocol_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TcpProtocolOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpstreamHttpProtocolOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HttpProtocolOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Http1ProtocolOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Http2ProtocolOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcProtocolOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Http2ProtocolOptions_SettingsParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_protocol_proto_msgTypes[6].OneofWrappers = []interface{}{ - (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_protocol_proto_rawDesc, - NumEnums: 1, - NumMessages: 9, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_protocol_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_protocol_proto_depIdxs, - EnumInfos: file_envoy_api_v2_core_protocol_proto_enumTypes, - MessageInfos: file_envoy_api_v2_core_protocol_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_protocol_proto = out.File - file_envoy_api_v2_core_protocol_proto_rawDesc = nil - file_envoy_api_v2_core_protocol_proto_goTypes = nil - file_envoy_api_v2_core_protocol_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/protocol.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/protocol.pb.validate.go deleted file mode 100644 index 120ddfa2b4..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/protocol.pb.validate.go +++ /dev/null @@ -1,1501 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/protocol.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on TcpProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *TcpProtocolOptions) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on TcpProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// TcpProtocolOptionsMultiError, or nil if none found. -func (m *TcpProtocolOptions) ValidateAll() error { - return m.validate(true) -} - -func (m *TcpProtocolOptions) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(errors) > 0 { - return TcpProtocolOptionsMultiError(errors) - } - - return nil -} - -// TcpProtocolOptionsMultiError is an error wrapping multiple validation errors -// returned by TcpProtocolOptions.ValidateAll() if the designated constraints -// aren't met. -type TcpProtocolOptionsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m TcpProtocolOptionsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m TcpProtocolOptionsMultiError) AllErrors() []error { return m } - -// TcpProtocolOptionsValidationError is the validation error returned by -// TcpProtocolOptions.Validate if the designated constraints aren't met. -type TcpProtocolOptionsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e TcpProtocolOptionsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e TcpProtocolOptionsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e TcpProtocolOptionsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e TcpProtocolOptionsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e TcpProtocolOptionsValidationError) ErrorName() string { - return "TcpProtocolOptionsValidationError" -} - -// Error satisfies the builtin error interface -func (e TcpProtocolOptionsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sTcpProtocolOptions.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = TcpProtocolOptionsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = TcpProtocolOptionsValidationError{} - -// Validate checks the field values on UpstreamHttpProtocolOptions with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *UpstreamHttpProtocolOptions) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on UpstreamHttpProtocolOptions with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// UpstreamHttpProtocolOptionsMultiError, or nil if none found. -func (m *UpstreamHttpProtocolOptions) ValidateAll() error { - return m.validate(true) -} - -func (m *UpstreamHttpProtocolOptions) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for AutoSni - - // no validation rules for AutoSanValidation - - if len(errors) > 0 { - return UpstreamHttpProtocolOptionsMultiError(errors) - } - - return nil -} - -// UpstreamHttpProtocolOptionsMultiError is an error wrapping multiple -// validation errors returned by UpstreamHttpProtocolOptions.ValidateAll() if -// the designated constraints aren't met. -type UpstreamHttpProtocolOptionsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m UpstreamHttpProtocolOptionsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m UpstreamHttpProtocolOptionsMultiError) AllErrors() []error { return m } - -// UpstreamHttpProtocolOptionsValidationError is the validation error returned -// by UpstreamHttpProtocolOptions.Validate if the designated constraints -// aren't met. -type UpstreamHttpProtocolOptionsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e UpstreamHttpProtocolOptionsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e UpstreamHttpProtocolOptionsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e UpstreamHttpProtocolOptionsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e UpstreamHttpProtocolOptionsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e UpstreamHttpProtocolOptionsValidationError) ErrorName() string { - return "UpstreamHttpProtocolOptionsValidationError" -} - -// Error satisfies the builtin error interface -func (e UpstreamHttpProtocolOptionsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sUpstreamHttpProtocolOptions.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = UpstreamHttpProtocolOptionsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = UpstreamHttpProtocolOptionsValidationError{} - -// Validate checks the field values on HttpProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HttpProtocolOptions) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HttpProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HttpProtocolOptionsMultiError, or nil if none found. -func (m *HttpProtocolOptions) ValidateAll() error { - return m.validate(true) -} - -func (m *HttpProtocolOptions) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetIdleTimeout()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HttpProtocolOptionsValidationError{ - field: "IdleTimeout", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HttpProtocolOptionsValidationError{ - field: "IdleTimeout", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HttpProtocolOptionsValidationError{ - field: "IdleTimeout", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetMaxConnectionDuration()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HttpProtocolOptionsValidationError{ - field: "MaxConnectionDuration", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HttpProtocolOptionsValidationError{ - field: "MaxConnectionDuration", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetMaxConnectionDuration()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HttpProtocolOptionsValidationError{ - field: "MaxConnectionDuration", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if wrapper := m.GetMaxHeadersCount(); wrapper != nil { - - if wrapper.GetValue() < 1 { - err := HttpProtocolOptionsValidationError{ - field: "MaxHeadersCount", - reason: "value must be greater than or equal to 1", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - if all { - switch v := interface{}(m.GetMaxStreamDuration()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HttpProtocolOptionsValidationError{ - field: "MaxStreamDuration", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HttpProtocolOptionsValidationError{ - field: "MaxStreamDuration", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HttpProtocolOptionsValidationError{ - field: "MaxStreamDuration", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for HeadersWithUnderscoresAction - - if len(errors) > 0 { - return HttpProtocolOptionsMultiError(errors) - } - - return nil -} - -// HttpProtocolOptionsMultiError is an error wrapping multiple validation -// errors returned by HttpProtocolOptions.ValidateAll() if the designated -// constraints aren't met. -type HttpProtocolOptionsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HttpProtocolOptionsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HttpProtocolOptionsMultiError) AllErrors() []error { return m } - -// HttpProtocolOptionsValidationError is the validation error returned by -// HttpProtocolOptions.Validate if the designated constraints aren't met. -type HttpProtocolOptionsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HttpProtocolOptionsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HttpProtocolOptionsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HttpProtocolOptionsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HttpProtocolOptionsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HttpProtocolOptionsValidationError) ErrorName() string { - return "HttpProtocolOptionsValidationError" -} - -// Error satisfies the builtin error interface -func (e HttpProtocolOptionsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHttpProtocolOptions.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HttpProtocolOptionsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HttpProtocolOptionsValidationError{} - -// Validate checks the field values on Http1ProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *Http1ProtocolOptions) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Http1ProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// Http1ProtocolOptionsMultiError, or nil if none found. -func (m *Http1ProtocolOptions) ValidateAll() error { - return m.validate(true) -} - -func (m *Http1ProtocolOptions) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetAllowAbsoluteUrl()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, Http1ProtocolOptionsValidationError{ - field: "AllowAbsoluteUrl", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, Http1ProtocolOptionsValidationError{ - field: "AllowAbsoluteUrl", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetAllowAbsoluteUrl()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return Http1ProtocolOptionsValidationError{ - field: "AllowAbsoluteUrl", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for AcceptHttp_10 - - // no validation rules for DefaultHostForHttp_10 - - if all { - switch v := interface{}(m.GetHeaderKeyFormat()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, Http1ProtocolOptionsValidationError{ - field: "HeaderKeyFormat", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, Http1ProtocolOptionsValidationError{ - field: "HeaderKeyFormat", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetHeaderKeyFormat()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return Http1ProtocolOptionsValidationError{ - field: "HeaderKeyFormat", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for EnableTrailers - - if len(errors) > 0 { - return Http1ProtocolOptionsMultiError(errors) - } - - return nil -} - -// Http1ProtocolOptionsMultiError is an error wrapping multiple validation -// errors returned by Http1ProtocolOptions.ValidateAll() if the designated -// constraints aren't met. -type Http1ProtocolOptionsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Http1ProtocolOptionsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Http1ProtocolOptionsMultiError) AllErrors() []error { return m } - -// Http1ProtocolOptionsValidationError is the validation error returned by -// Http1ProtocolOptions.Validate if the designated constraints aren't met. -type Http1ProtocolOptionsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Http1ProtocolOptionsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e Http1ProtocolOptionsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e Http1ProtocolOptionsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e Http1ProtocolOptionsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Http1ProtocolOptionsValidationError) ErrorName() string { - return "Http1ProtocolOptionsValidationError" -} - -// Error satisfies the builtin error interface -func (e Http1ProtocolOptionsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHttp1ProtocolOptions.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Http1ProtocolOptionsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Http1ProtocolOptionsValidationError{} - -// Validate checks the field values on Http2ProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *Http2ProtocolOptions) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Http2ProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// Http2ProtocolOptionsMultiError, or nil if none found. -func (m *Http2ProtocolOptions) ValidateAll() error { - return m.validate(true) -} - -func (m *Http2ProtocolOptions) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetHpackTableSize()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, Http2ProtocolOptionsValidationError{ - field: "HpackTableSize", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, Http2ProtocolOptionsValidationError{ - field: "HpackTableSize", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetHpackTableSize()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return Http2ProtocolOptionsValidationError{ - field: "HpackTableSize", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if wrapper := m.GetMaxConcurrentStreams(); wrapper != nil { - - if val := wrapper.GetValue(); val < 1 || val > 2147483647 { - err := Http2ProtocolOptionsValidationError{ - field: "MaxConcurrentStreams", - reason: "value must be inside range [1, 2147483647]", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil { - - if val := wrapper.GetValue(); val < 65535 || val > 2147483647 { - err := Http2ProtocolOptionsValidationError{ - field: "InitialStreamWindowSize", - reason: "value must be inside range [65535, 2147483647]", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil { - - if val := wrapper.GetValue(); val < 65535 || val > 2147483647 { - err := Http2ProtocolOptionsValidationError{ - field: "InitialConnectionWindowSize", - reason: "value must be inside range [65535, 2147483647]", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - // no validation rules for AllowConnect - - // no validation rules for AllowMetadata - - if wrapper := m.GetMaxOutboundFrames(); wrapper != nil { - - if wrapper.GetValue() < 1 { - err := Http2ProtocolOptionsValidationError{ - field: "MaxOutboundFrames", - reason: "value must be greater than or equal to 1", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - if wrapper := m.GetMaxOutboundControlFrames(); wrapper != nil { - - if wrapper.GetValue() < 1 { - err := Http2ProtocolOptionsValidationError{ - field: "MaxOutboundControlFrames", - reason: "value must be greater than or equal to 1", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - if all { - switch v := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, Http2ProtocolOptionsValidationError{ - field: "MaxConsecutiveInboundFramesWithEmptyPayload", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, Http2ProtocolOptionsValidationError{ - field: "MaxConsecutiveInboundFramesWithEmptyPayload", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return Http2ProtocolOptionsValidationError{ - field: "MaxConsecutiveInboundFramesWithEmptyPayload", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if all { - switch v := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, Http2ProtocolOptionsValidationError{ - field: "MaxInboundPriorityFramesPerStream", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, Http2ProtocolOptionsValidationError{ - field: "MaxInboundPriorityFramesPerStream", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return Http2ProtocolOptionsValidationError{ - field: "MaxInboundPriorityFramesPerStream", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if wrapper := m.GetMaxInboundWindowUpdateFramesPerDataFrameSent(); wrapper != nil { - - if wrapper.GetValue() < 1 { - err := Http2ProtocolOptionsValidationError{ - field: "MaxInboundWindowUpdateFramesPerDataFrameSent", - reason: "value must be greater than or equal to 1", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - // no validation rules for StreamErrorOnInvalidHttpMessaging - - for idx, item := range m.GetCustomSettingsParameters() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, Http2ProtocolOptionsValidationError{ - field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, Http2ProtocolOptionsValidationError{ - field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return Http2ProtocolOptionsValidationError{ - field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return Http2ProtocolOptionsMultiError(errors) - } - - return nil -} - -// Http2ProtocolOptionsMultiError is an error wrapping multiple validation -// errors returned by Http2ProtocolOptions.ValidateAll() if the designated -// constraints aren't met. -type Http2ProtocolOptionsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Http2ProtocolOptionsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Http2ProtocolOptionsMultiError) AllErrors() []error { return m } - -// Http2ProtocolOptionsValidationError is the validation error returned by -// Http2ProtocolOptions.Validate if the designated constraints aren't met. -type Http2ProtocolOptionsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Http2ProtocolOptionsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e Http2ProtocolOptionsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e Http2ProtocolOptionsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e Http2ProtocolOptionsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Http2ProtocolOptionsValidationError) ErrorName() string { - return "Http2ProtocolOptionsValidationError" -} - -// Error satisfies the builtin error interface -func (e Http2ProtocolOptionsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHttp2ProtocolOptions.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Http2ProtocolOptionsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Http2ProtocolOptionsValidationError{} - -// Validate checks the field values on GrpcProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *GrpcProtocolOptions) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on GrpcProtocolOptions with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// GrpcProtocolOptionsMultiError, or nil if none found. -func (m *GrpcProtocolOptions) ValidateAll() error { - return m.validate(true) -} - -func (m *GrpcProtocolOptions) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetHttp2ProtocolOptions()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, GrpcProtocolOptionsValidationError{ - field: "Http2ProtocolOptions", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, GrpcProtocolOptionsValidationError{ - field: "Http2ProtocolOptions", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return GrpcProtocolOptionsValidationError{ - field: "Http2ProtocolOptions", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return GrpcProtocolOptionsMultiError(errors) - } - - return nil -} - -// GrpcProtocolOptionsMultiError is an error wrapping multiple validation -// errors returned by GrpcProtocolOptions.ValidateAll() if the designated -// constraints aren't met. -type GrpcProtocolOptionsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m GrpcProtocolOptionsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m GrpcProtocolOptionsMultiError) AllErrors() []error { return m } - -// GrpcProtocolOptionsValidationError is the validation error returned by -// GrpcProtocolOptions.Validate if the designated constraints aren't met. -type GrpcProtocolOptionsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcProtocolOptionsValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcProtocolOptionsValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcProtocolOptionsValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcProtocolOptionsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcProtocolOptionsValidationError) ErrorName() string { - return "GrpcProtocolOptionsValidationError" -} - -// Error satisfies the builtin error interface -func (e GrpcProtocolOptionsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcProtocolOptions.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcProtocolOptionsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcProtocolOptionsValidationError{} - -// Validate checks the field values on Http1ProtocolOptions_HeaderKeyFormat -// with the rules defined in the proto definition for this message. If any -// rules are violated, the first error encountered is returned, or nil if -// there are no violations. -func (m *Http1ProtocolOptions_HeaderKeyFormat) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Http1ProtocolOptions_HeaderKeyFormat -// with the rules defined in the proto definition for this message. If any -// rules are violated, the result is a list of violation errors wrapped in -// Http1ProtocolOptions_HeaderKeyFormatMultiError, or nil if none found. -func (m *Http1ProtocolOptions_HeaderKeyFormat) ValidateAll() error { - return m.validate(true) -} - -func (m *Http1ProtocolOptions_HeaderKeyFormat) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.HeaderFormat.(type) { - - case *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_: - - if all { - switch v := interface{}(m.GetProperCaseWords()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ - field: "ProperCaseWords", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ - field: "ProperCaseWords", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetProperCaseWords()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return Http1ProtocolOptions_HeaderKeyFormatValidationError{ - field: "ProperCaseWords", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ - field: "HeaderFormat", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return Http1ProtocolOptions_HeaderKeyFormatMultiError(errors) - } - - return nil -} - -// Http1ProtocolOptions_HeaderKeyFormatMultiError is an error wrapping multiple -// validation errors returned by -// Http1ProtocolOptions_HeaderKeyFormat.ValidateAll() if the designated -// constraints aren't met. -type Http1ProtocolOptions_HeaderKeyFormatMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Http1ProtocolOptions_HeaderKeyFormatMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Http1ProtocolOptions_HeaderKeyFormatMultiError) AllErrors() []error { return m } - -// Http1ProtocolOptions_HeaderKeyFormatValidationError is the validation error -// returned by Http1ProtocolOptions_HeaderKeyFormat.Validate if the designated -// constraints aren't met. -type Http1ProtocolOptions_HeaderKeyFormatValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) ErrorName() string { - return "Http1ProtocolOptions_HeaderKeyFormatValidationError" -} - -// Error satisfies the builtin error interface -func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHttp1ProtocolOptions_HeaderKeyFormat.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Http1ProtocolOptions_HeaderKeyFormatValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Http1ProtocolOptions_HeaderKeyFormatValidationError{} - -// Validate checks the field values on -// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in -// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError, or nil if -// none found. -func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ValidateAll() error { - return m.validate(true) -} - -func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(errors) > 0 { - return Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError(errors) - } - - return nil -} - -// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError is an error -// wrapping multiple validation errors returned by -// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ValidateAll() if the -// designated constraints aren't met. -type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError) AllErrors() []error { return m } - -// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError is the -// validation error returned by -// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.Validate if the -// designated constraints aren't met. -type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Field() string { - return e.field -} - -// Reason function returns reason value. -func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Reason() string { - return e.reason -} - -// Cause function returns cause value. -func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Cause() error { - return e.cause -} - -// Key function returns key value. -func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) ErrorName() string { - return "Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError" -} - -// Error satisfies the builtin error interface -func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHttp1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{} - -// Validate checks the field values on Http2ProtocolOptions_SettingsParameter -// with the rules defined in the proto definition for this message. If any -// rules are violated, the first error encountered is returned, or nil if -// there are no violations. -func (m *Http2ProtocolOptions_SettingsParameter) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on -// Http2ProtocolOptions_SettingsParameter with the rules defined in the proto -// definition for this message. If any rules are violated, the result is a -// list of violation errors wrapped in -// Http2ProtocolOptions_SettingsParameterMultiError, or nil if none found. -func (m *Http2ProtocolOptions_SettingsParameter) ValidateAll() error { - return m.validate(true) -} - -func (m *Http2ProtocolOptions_SettingsParameter) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if wrapper := m.GetIdentifier(); wrapper != nil { - - if val := wrapper.GetValue(); val < 1 || val > 65536 { - err := Http2ProtocolOptions_SettingsParameterValidationError{ - field: "Identifier", - reason: "value must be inside range [1, 65536]", - } - if !all { - return err - } - errors = append(errors, err) - } - - } else { - err := Http2ProtocolOptions_SettingsParameterValidationError{ - field: "Identifier", - reason: "value is required and must not be nil.", - } - if !all { - return err - } - errors = append(errors, err) - } - - if m.GetValue() == nil { - err := Http2ProtocolOptions_SettingsParameterValidationError{ - field: "Value", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetValue()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, Http2ProtocolOptions_SettingsParameterValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, Http2ProtocolOptions_SettingsParameterValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return Http2ProtocolOptions_SettingsParameterValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return Http2ProtocolOptions_SettingsParameterMultiError(errors) - } - - return nil -} - -// Http2ProtocolOptions_SettingsParameterMultiError is an error wrapping -// multiple validation errors returned by -// Http2ProtocolOptions_SettingsParameter.ValidateAll() if the designated -// constraints aren't met. -type Http2ProtocolOptions_SettingsParameterMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Http2ProtocolOptions_SettingsParameterMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Http2ProtocolOptions_SettingsParameterMultiError) AllErrors() []error { return m } - -// Http2ProtocolOptions_SettingsParameterValidationError is the validation -// error returned by Http2ProtocolOptions_SettingsParameter.Validate if the -// designated constraints aren't met. -type Http2ProtocolOptions_SettingsParameterValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Http2ProtocolOptions_SettingsParameterValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e Http2ProtocolOptions_SettingsParameterValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e Http2ProtocolOptions_SettingsParameterValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e Http2ProtocolOptions_SettingsParameterValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Http2ProtocolOptions_SettingsParameterValidationError) ErrorName() string { - return "Http2ProtocolOptions_SettingsParameterValidationError" -} - -// Error satisfies the builtin error interface -func (e Http2ProtocolOptions_SettingsParameterValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHttp2ProtocolOptions_SettingsParameter.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Http2ProtocolOptions_SettingsParameterValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Http2ProtocolOptions_SettingsParameterValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/socket_option.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/socket_option.pb.go deleted file mode 100644 index 74874f2b3c..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/socket_option.pb.go +++ /dev/null @@ -1,313 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/api/v2/core/socket_option.proto - -package core - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SocketOption_SocketState int32 - -const ( - // Socket options are applied after socket creation but before binding the socket to a port - SocketOption_STATE_PREBIND SocketOption_SocketState = 0 - // Socket options are applied after binding the socket to a port but before calling listen() - SocketOption_STATE_BOUND SocketOption_SocketState = 1 - // Socket options are applied after calling listen() - SocketOption_STATE_LISTENING SocketOption_SocketState = 2 -) - -// Enum value maps for SocketOption_SocketState. -var ( - SocketOption_SocketState_name = map[int32]string{ - 0: "STATE_PREBIND", - 1: "STATE_BOUND", - 2: "STATE_LISTENING", - } - SocketOption_SocketState_value = map[string]int32{ - "STATE_PREBIND": 0, - "STATE_BOUND": 1, - "STATE_LISTENING": 2, - } -) - -func (x SocketOption_SocketState) Enum() *SocketOption_SocketState { - p := new(SocketOption_SocketState) - *p = x - return p -} - -func (x SocketOption_SocketState) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SocketOption_SocketState) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_api_v2_core_socket_option_proto_enumTypes[0].Descriptor() -} - -func (SocketOption_SocketState) Type() protoreflect.EnumType { - return &file_envoy_api_v2_core_socket_option_proto_enumTypes[0] -} - -func (x SocketOption_SocketState) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SocketOption_SocketState.Descriptor instead. -func (SocketOption_SocketState) EnumDescriptor() ([]byte, []int) { - return file_envoy_api_v2_core_socket_option_proto_rawDescGZIP(), []int{0, 0} -} - -// Generic socket option message. This would be used to set socket options that -// might not exist in upstream kernels or precompiled Envoy binaries. -// [#next-free-field: 7] -type SocketOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An optional name to give this socket option for debugging, etc. - // Uniqueness is not required and no special meaning is assumed. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` - // The numeric name as passed to setsockopt - Name int64 `protobuf:"varint,3,opt,name=name,proto3" json:"name,omitempty"` - // Types that are assignable to Value: - // *SocketOption_IntValue - // *SocketOption_BufValue - Value isSocketOption_Value `protobuf_oneof:"value"` - // The state in which the option will be applied. When used in BindConfig - // STATE_PREBIND is currently the only valid value. - State SocketOption_SocketState `protobuf:"varint,6,opt,name=state,proto3,enum=envoy.api.v2.core.SocketOption_SocketState" json:"state,omitempty"` -} - -func (x *SocketOption) Reset() { - *x = SocketOption{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_api_v2_core_socket_option_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SocketOption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SocketOption) ProtoMessage() {} - -func (x *SocketOption) ProtoReflect() protoreflect.Message { - mi := &file_envoy_api_v2_core_socket_option_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SocketOption.ProtoReflect.Descriptor instead. -func (*SocketOption) Descriptor() ([]byte, []int) { - return file_envoy_api_v2_core_socket_option_proto_rawDescGZIP(), []int{0} -} - -func (x *SocketOption) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *SocketOption) GetLevel() int64 { - if x != nil { - return x.Level - } - return 0 -} - -func (x *SocketOption) GetName() int64 { - if x != nil { - return x.Name - } - return 0 -} - -func (m *SocketOption) GetValue() isSocketOption_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *SocketOption) GetIntValue() int64 { - if x, ok := x.GetValue().(*SocketOption_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (x *SocketOption) GetBufValue() []byte { - if x, ok := x.GetValue().(*SocketOption_BufValue); ok { - return x.BufValue - } - return nil -} - -func (x *SocketOption) GetState() SocketOption_SocketState { - if x != nil { - return x.State - } - return SocketOption_STATE_PREBIND -} - -type isSocketOption_Value interface { - isSocketOption_Value() -} - -type SocketOption_IntValue struct { - // Because many sockopts take an int value. - IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3,oneof"` -} - -type SocketOption_BufValue struct { - // Otherwise it's a byte buffer. - BufValue []byte `protobuf:"bytes,5,opt,name=buf_value,json=bufValue,proto3,oneof"` -} - -func (*SocketOption_IntValue) isSocketOption_Value() {} - -func (*SocketOption_BufValue) isSocketOption_Value() {} - -var File_envoy_api_v2_core_socket_option_proto protoreflect.FileDescriptor - -var file_envoy_api_v2_core_socket_option_proto_rawDesc = []byte{ - 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, - 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xbb, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, - 0x0a, 0x09, 0x62, 0x75, 0x66, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0c, 0x48, 0x00, 0x52, 0x08, 0x62, 0x75, 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4b, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x46, 0x0a, 0x0b, 0x53, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x42, 0x49, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, - 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, - 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x49, 0x4e, 0x47, - 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x42, 0x94, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x42, 0x11, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_api_v2_core_socket_option_proto_rawDescOnce sync.Once - file_envoy_api_v2_core_socket_option_proto_rawDescData = file_envoy_api_v2_core_socket_option_proto_rawDesc -) - -func file_envoy_api_v2_core_socket_option_proto_rawDescGZIP() []byte { - file_envoy_api_v2_core_socket_option_proto_rawDescOnce.Do(func() { - file_envoy_api_v2_core_socket_option_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_api_v2_core_socket_option_proto_rawDescData) - }) - return file_envoy_api_v2_core_socket_option_proto_rawDescData -} - -var file_envoy_api_v2_core_socket_option_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_api_v2_core_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_api_v2_core_socket_option_proto_goTypes = []interface{}{ - (SocketOption_SocketState)(0), // 0: envoy.api.v2.core.SocketOption.SocketState - (*SocketOption)(nil), // 1: envoy.api.v2.core.SocketOption -} -var file_envoy_api_v2_core_socket_option_proto_depIdxs = []int32{ - 0, // 0: envoy.api.v2.core.SocketOption.state:type_name -> envoy.api.v2.core.SocketOption.SocketState - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_api_v2_core_socket_option_proto_init() } -func file_envoy_api_v2_core_socket_option_proto_init() { - if File_envoy_api_v2_core_socket_option_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_api_v2_core_socket_option_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SocketOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_api_v2_core_socket_option_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*SocketOption_IntValue)(nil), - (*SocketOption_BufValue)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_api_v2_core_socket_option_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_api_v2_core_socket_option_proto_goTypes, - DependencyIndexes: file_envoy_api_v2_core_socket_option_proto_depIdxs, - EnumInfos: file_envoy_api_v2_core_socket_option_proto_enumTypes, - MessageInfos: file_envoy_api_v2_core_socket_option_proto_msgTypes, - }.Build() - File_envoy_api_v2_core_socket_option_proto = out.File - file_envoy_api_v2_core_socket_option_proto_rawDesc = nil - file_envoy_api_v2_core_socket_option_proto_goTypes = nil - file_envoy_api_v2_core_socket_option_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/socket_option.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/socket_option.pb.validate.go deleted file mode 100644 index 1dac04b6a2..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/api/v2/core/socket_option.pb.validate.go +++ /dev/null @@ -1,172 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/api/v2/core/socket_option.proto - -package core - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on SocketOption with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *SocketOption) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on SocketOption with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in SocketOptionMultiError, or -// nil if none found. -func (m *SocketOption) ValidateAll() error { - return m.validate(true) -} - -func (m *SocketOption) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Description - - // no validation rules for Level - - // no validation rules for Name - - if _, ok := SocketOption_SocketState_name[int32(m.GetState())]; !ok { - err := SocketOptionValidationError{ - field: "State", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - switch m.Value.(type) { - - case *SocketOption_IntValue: - // no validation rules for IntValue - - case *SocketOption_BufValue: - // no validation rules for BufValue - - default: - err := SocketOptionValidationError{ - field: "Value", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return SocketOptionMultiError(errors) - } - - return nil -} - -// SocketOptionMultiError is an error wrapping multiple validation errors -// returned by SocketOption.ValidateAll() if the designated constraints aren't met. -type SocketOptionMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SocketOptionMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SocketOptionMultiError) AllErrors() []error { return m } - -// SocketOptionValidationError is the validation error returned by -// SocketOption.Validate if the designated constraints aren't met. -type SocketOptionValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SocketOptionValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SocketOptionValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SocketOptionValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SocketOptionValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SocketOptionValidationError) ErrorName() string { return "SocketOptionValidationError" } - -// Error satisfies the builtin error interface -func (e SocketOptionValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSocketOption.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SocketOptionValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SocketOptionValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go index c5254a96c1..6d0f52456b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/accesslog/v3/accesslog.proto package accesslogv3 @@ -10,10 +10,11 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" v33 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -241,7 +242,7 @@ func (m *AccessLog) GetConfigType() isAccessLog_ConfigType { return nil } -func (x *AccessLog) GetTypedConfig() *any.Any { +func (x *AccessLog) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*AccessLog_TypedConfig); ok { return x.TypedConfig } @@ -253,12 +254,12 @@ type isAccessLog_ConfigType interface { } type AccessLog_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*AccessLog_TypedConfig) isAccessLog_ConfigType() {} -// [#next-free-field: 13] +// [#next-free-field: 14] type AccessLogFilter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -277,6 +278,7 @@ type AccessLogFilter struct { // *AccessLogFilter_GrpcStatusFilter // *AccessLogFilter_ExtensionFilter // *AccessLogFilter_MetadataFilter + // *AccessLogFilter_LogTypeFilter FilterSpecifier isAccessLogFilter_FilterSpecifier `protobuf_oneof:"filter_specifier"` } @@ -403,6 +405,13 @@ func (x *AccessLogFilter) GetMetadataFilter() *MetadataFilter { return nil } +func (x *AccessLogFilter) GetLogTypeFilter() *LogTypeFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_LogTypeFilter); ok { + return x.LogTypeFilter + } + return nil +} + type isAccessLogFilter_FilterSpecifier interface { isAccessLogFilter_FilterSpecifier() } @@ -468,6 +477,11 @@ type AccessLogFilter_MetadataFilter struct { MetadataFilter *MetadataFilter `protobuf:"bytes,12,opt,name=metadata_filter,json=metadataFilter,proto3,oneof"` } +type AccessLogFilter_LogTypeFilter struct { + // Log Type Filter + LogTypeFilter *LogTypeFilter `protobuf:"bytes,13,opt,name=log_type_filter,json=logTypeFilter,proto3,oneof"` +} + func (*AccessLogFilter_StatusCodeFilter) isAccessLogFilter_FilterSpecifier() {} func (*AccessLogFilter_DurationFilter) isAccessLogFilter_FilterSpecifier() {} @@ -492,6 +506,8 @@ func (*AccessLogFilter_ExtensionFilter) isAccessLogFilter_FilterSpecifier() {} func (*AccessLogFilter_MetadataFilter) isAccessLogFilter_FilterSpecifier() {} +func (*AccessLogFilter_LogTypeFilter) isAccessLogFilter_FilterSpecifier() {} + // Filter on an integer comparison. type ComparisonFilter struct { state protoimpl.MessageState @@ -599,7 +615,10 @@ func (x *StatusCodeFilter) GetComparison() *ComparisonFilter { return nil } -// Filters on total request duration in milliseconds. +// Filters based on the duration of the request or stream, in milliseconds. +// For end of stream access logs, the total duration of the stream will be used. +// For :ref:`periodic access logs`, +// the duration of the stream at the time of log recording will be used. type DurationFilter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -735,7 +754,7 @@ type RuntimeFilter struct { unknownFields protoimpl.UnknownFields // Runtime key to get an optional overridden numerator for use in the - // *percent_sampled* field. If found in runtime, this value will replace the + // ``percent_sampled`` field. If found in runtime, this value will replace the // default numerator. RuntimeKey string `protobuf:"bytes,1,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` // The default sampling percentage. If not specified, defaults to 0% with @@ -747,9 +766,9 @@ type RuntimeFilter struct { // is present, the filter will consistently sample across multiple hosts based // on the runtime key value and the value extracted from // :ref:`x-request-id`. If it is - // missing, or *use_independent_randomness* is set to true, the filter will + // missing, or ``use_independent_randomness`` is set to true, the filter will // randomly sample based on the runtime key value alone. - // *use_independent_randomness* can be used for logging kill switches within + // ``use_independent_randomness`` can be used for logging kill switches within // complex nested :ref:`AndFilter // ` and :ref:`OrFilter // ` blocks that are easier to @@ -1143,6 +1162,65 @@ func (x *MetadataFilter) GetMatchIfKeyNotFound() *wrappers.BoolValue { return nil } +// Filters based on access log type. +type LogTypeFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Logs only records which their type is one of the types defined in this field. + Types []v34.AccessLogType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=envoy.data.accesslog.v3.AccessLogType" json:"types,omitempty"` + // If this field is set to true, the filter will instead block all records + // with a access log type in types field, and allow all other records. + Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"` +} + +func (x *LogTypeFilter) Reset() { + *x = LogTypeFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogTypeFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogTypeFilter) ProtoMessage() {} + +func (x *LogTypeFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogTypeFilter.ProtoReflect.Descriptor instead. +func (*LogTypeFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{14} +} + +func (x *LogTypeFilter) GetTypes() []v34.AccessLogType { + if x != nil { + return x.Types + } + return nil +} + +func (x *LogTypeFilter) GetExclude() bool { + if x != nil { + return x.Exclude + } + return false +} + // Extension filter is statically registered at runtime. type ExtensionFilter struct { state protoimpl.MessageState @@ -1162,7 +1240,7 @@ type ExtensionFilter struct { func (x *ExtensionFilter) Reset() { *x = ExtensionFilter{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14] + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1175,7 +1253,7 @@ func (x *ExtensionFilter) String() string { func (*ExtensionFilter) ProtoMessage() {} func (x *ExtensionFilter) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14] + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1188,7 +1266,7 @@ func (x *ExtensionFilter) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionFilter.ProtoReflect.Descriptor instead. func (*ExtensionFilter) Descriptor() ([]byte, []int) { - return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{14} + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{15} } func (x *ExtensionFilter) GetName() string { @@ -1205,7 +1283,7 @@ func (m *ExtensionFilter) GetConfigType() isExtensionFilter_ConfigType { return nil } -func (x *ExtensionFilter) GetTypedConfig() *any.Any { +func (x *ExtensionFilter) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*ExtensionFilter_TypedConfig); ok { return x.TypedConfig } @@ -1217,7 +1295,7 @@ type isExtensionFilter_ConfigType interface { } type ExtensionFilter_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*ExtensionFilter_TypedConfig) isExtensionFilter_ConfigType() {} @@ -1234,278 +1312,293 @@ var file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = []byte{ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, - 0x01, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x42, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, - 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, - 0x6f, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, - 0xf6, 0x08, 0x0a, 0x0f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, - 0x64, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x54, 0x0a, 0x0f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74, + 0x61, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, - 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x68, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x5f, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x39, 0x0a, + 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, + 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xca, 0x09, 0x0a, 0x0f, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x68, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x48, 0x00, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x10, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x09, 0x61, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x09, + 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x4e, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x57, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x61, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, 0x61, + 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, + 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x57, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x61, - 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0e, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0a, - 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x64, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x61, 0x6e, 0x64, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x52, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x17, 0x0a, 0x10, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf9, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x02, 0x6f, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6f, - 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x70, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x02, 0x6f, 0x70, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1c, 0x0a, 0x02, 0x4f, 0x70, 0x12, + 0x06, 0x0a, 0x02, 0x45, 0x51, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x45, 0x10, 0x01, 0x12, + 0x06, 0x0a, 0x02, 0x4c, 0x45, 0x10, 0x02, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x22, 0xa3, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, + 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x3a, 0x38, 0x9a, + 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, + 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x01, 0x0a, 0x0e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, 0x67, 0x72, - 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, - 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, - 0x17, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf9, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, - 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, - 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, - 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x70, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x02, 0x6f, 0x70, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1c, 0x0a, 0x02, - 0x4f, 0x70, 0x12, 0x06, 0x0a, 0x02, 0x45, 0x51, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x45, - 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x45, 0x10, 0x02, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, - 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x22, 0xa3, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, - 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, - 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, - 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, - 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, - 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, + 0x6e, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x14, 0x4e, 0x6f, 0x74, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, + 0x4a, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x0d, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, + 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x52, 0x0e, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x75, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x70, + 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73, + 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x43, 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x01, 0x0a, 0x0e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, - 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, - 0x69, 0x73, 0x6f, 0x6e, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x14, - 0x4e, 0x6f, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, - 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x22, 0x4a, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x09, 0x41, 0x6e, 0x64, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, - 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf9, - 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x0f, 0x70, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x75, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, - 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x49, 0x6e, - 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, - 0x65, 0x73, 0x73, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x09, 0x41, - 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, - 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, - 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, - 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x32, 0x2e, 0x41, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x08, - 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, - 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, - 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, - 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x0c, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x06, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xea, 0x01, 0x0a, 0x12, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x97, 0x01, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x42, 0x80, 0x01, 0xfa, 0x42, 0x7d, 0x92, 0x01, 0x7a, 0x22, 0x78, 0x72, 0x76, 0x52, 0x02, 0x4c, - 0x48, 0x52, 0x02, 0x55, 0x48, 0x52, 0x02, 0x55, 0x54, 0x52, 0x02, 0x4c, 0x52, 0x52, 0x02, 0x55, - 0x52, 0x52, 0x02, 0x55, 0x46, 0x52, 0x02, 0x55, 0x43, 0x52, 0x02, 0x55, 0x4f, 0x52, 0x02, 0x4e, - 0x52, 0x52, 0x02, 0x44, 0x49, 0x52, 0x02, 0x46, 0x49, 0x52, 0x02, 0x52, 0x4c, 0x52, 0x04, 0x55, - 0x41, 0x45, 0x58, 0x52, 0x04, 0x52, 0x4c, 0x53, 0x45, 0x52, 0x02, 0x44, 0x43, 0x52, 0x03, 0x55, - 0x52, 0x58, 0x52, 0x02, 0x53, 0x49, 0x52, 0x02, 0x49, 0x48, 0x52, 0x03, 0x44, 0x50, 0x45, 0x52, - 0x05, 0x55, 0x4d, 0x53, 0x44, 0x52, 0x52, 0x04, 0x52, 0x46, 0x43, 0x46, 0x52, 0x04, 0x4e, 0x46, - 0x43, 0x46, 0x52, 0x02, 0x44, 0x54, 0x52, 0x03, 0x55, 0x50, 0x45, 0x52, 0x02, 0x4e, 0x43, 0x52, - 0x02, 0x4f, 0x4d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, - 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x80, 0x04, 0x0a, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x08, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x32, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, - 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb8, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, - 0x47, 0x55, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, - 0x4c, 0x49, 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, - 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, - 0x0a, 0x0e, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, - 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, - 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, - 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, - 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, - 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, - 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, - 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, - 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, - 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, - 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x3a, - 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x0e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x07, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, - 0x0a, 0x16, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x49, 0x66, 0x4b, 0x65, 0x79, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x36, - 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, + 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x08, 0x4f, 0x72, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xea, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x97, 0x01, + 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x80, 0x01, + 0xfa, 0x42, 0x7d, 0x92, 0x01, 0x7a, 0x22, 0x78, 0x72, 0x76, 0x52, 0x02, 0x4c, 0x48, 0x52, 0x02, + 0x55, 0x48, 0x52, 0x02, 0x55, 0x54, 0x52, 0x02, 0x4c, 0x52, 0x52, 0x02, 0x55, 0x52, 0x52, 0x02, + 0x55, 0x46, 0x52, 0x02, 0x55, 0x43, 0x52, 0x02, 0x55, 0x4f, 0x52, 0x02, 0x4e, 0x52, 0x52, 0x02, + 0x44, 0x49, 0x52, 0x02, 0x46, 0x49, 0x52, 0x02, 0x52, 0x4c, 0x52, 0x04, 0x55, 0x41, 0x45, 0x58, + 0x52, 0x04, 0x52, 0x4c, 0x53, 0x45, 0x52, 0x02, 0x44, 0x43, 0x52, 0x03, 0x55, 0x52, 0x58, 0x52, + 0x02, 0x53, 0x49, 0x52, 0x02, 0x49, 0x48, 0x52, 0x03, 0x44, 0x50, 0x45, 0x52, 0x05, 0x55, 0x4d, + 0x53, 0x44, 0x52, 0x52, 0x04, 0x52, 0x46, 0x43, 0x46, 0x52, 0x04, 0x4e, 0x46, 0x43, 0x46, 0x52, + 0x02, 0x44, 0x54, 0x52, 0x03, 0x55, 0x50, 0x45, 0x52, 0x02, 0x4e, 0x43, 0x52, 0x02, 0x4f, 0x4d, + 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x22, 0x80, 0x04, 0x0a, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, + 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x22, 0xb8, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, + 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, + 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, + 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, + 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41, + 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, + 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, + 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, + 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, + 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, + 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, + 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, + 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x3a, 0x38, 0x9a, 0xc5, + 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x16, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x66, + 0x4b, 0x65, 0x79, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x36, 0x9a, 0xc5, 0x88, + 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x22, 0x76, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0f, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, + 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37, + 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xb6, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, - 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, - 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, - 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, - 0x91, 0x01, 0x0a, 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x3b, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, - 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0x91, 0x01, 0x0a, 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1521,7 +1614,7 @@ func file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP() []byte { } var file_envoy_config_accesslog_v3_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_envoy_config_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_envoy_config_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_envoy_config_accesslog_v3_accesslog_proto_goTypes = []interface{}{ (ComparisonFilter_Op)(0), // 0: envoy.config.accesslog.v3.ComparisonFilter.Op (GrpcStatusFilter_Status)(0), // 1: envoy.config.accesslog.v3.GrpcStatusFilter.Status @@ -1539,17 +1632,19 @@ var file_envoy_config_accesslog_v3_accesslog_proto_goTypes = []interface{}{ (*ResponseFlagFilter)(nil), // 13: envoy.config.accesslog.v3.ResponseFlagFilter (*GrpcStatusFilter)(nil), // 14: envoy.config.accesslog.v3.GrpcStatusFilter (*MetadataFilter)(nil), // 15: envoy.config.accesslog.v3.MetadataFilter - (*ExtensionFilter)(nil), // 16: envoy.config.accesslog.v3.ExtensionFilter - (*any.Any)(nil), // 17: google.protobuf.Any - (*v3.RuntimeUInt32)(nil), // 18: envoy.config.core.v3.RuntimeUInt32 - (*v31.FractionalPercent)(nil), // 19: envoy.type.v3.FractionalPercent - (*v32.HeaderMatcher)(nil), // 20: envoy.config.route.v3.HeaderMatcher - (*v33.MetadataMatcher)(nil), // 21: envoy.type.matcher.v3.MetadataMatcher - (*wrappers.BoolValue)(nil), // 22: google.protobuf.BoolValue + (*LogTypeFilter)(nil), // 16: envoy.config.accesslog.v3.LogTypeFilter + (*ExtensionFilter)(nil), // 17: envoy.config.accesslog.v3.ExtensionFilter + (*any1.Any)(nil), // 18: google.protobuf.Any + (*v3.RuntimeUInt32)(nil), // 19: envoy.config.core.v3.RuntimeUInt32 + (*v31.FractionalPercent)(nil), // 20: envoy.type.v3.FractionalPercent + (*v32.HeaderMatcher)(nil), // 21: envoy.config.route.v3.HeaderMatcher + (*v33.MetadataMatcher)(nil), // 22: envoy.type.matcher.v3.MetadataMatcher + (*wrappers.BoolValue)(nil), // 23: google.protobuf.BoolValue + (v34.AccessLogType)(0), // 24: envoy.data.accesslog.v3.AccessLogType } var file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = []int32{ 3, // 0: envoy.config.accesslog.v3.AccessLog.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter - 17, // 1: envoy.config.accesslog.v3.AccessLog.typed_config:type_name -> google.protobuf.Any + 18, // 1: envoy.config.accesslog.v3.AccessLog.typed_config:type_name -> google.protobuf.Any 5, // 2: envoy.config.accesslog.v3.AccessLogFilter.status_code_filter:type_name -> envoy.config.accesslog.v3.StatusCodeFilter 6, // 3: envoy.config.accesslog.v3.AccessLogFilter.duration_filter:type_name -> envoy.config.accesslog.v3.DurationFilter 7, // 4: envoy.config.accesslog.v3.AccessLogFilter.not_health_check_filter:type_name -> envoy.config.accesslog.v3.NotHealthCheckFilter @@ -1560,25 +1655,27 @@ var file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = []int32{ 12, // 9: envoy.config.accesslog.v3.AccessLogFilter.header_filter:type_name -> envoy.config.accesslog.v3.HeaderFilter 13, // 10: envoy.config.accesslog.v3.AccessLogFilter.response_flag_filter:type_name -> envoy.config.accesslog.v3.ResponseFlagFilter 14, // 11: envoy.config.accesslog.v3.AccessLogFilter.grpc_status_filter:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter - 16, // 12: envoy.config.accesslog.v3.AccessLogFilter.extension_filter:type_name -> envoy.config.accesslog.v3.ExtensionFilter + 17, // 12: envoy.config.accesslog.v3.AccessLogFilter.extension_filter:type_name -> envoy.config.accesslog.v3.ExtensionFilter 15, // 13: envoy.config.accesslog.v3.AccessLogFilter.metadata_filter:type_name -> envoy.config.accesslog.v3.MetadataFilter - 0, // 14: envoy.config.accesslog.v3.ComparisonFilter.op:type_name -> envoy.config.accesslog.v3.ComparisonFilter.Op - 18, // 15: envoy.config.accesslog.v3.ComparisonFilter.value:type_name -> envoy.config.core.v3.RuntimeUInt32 - 4, // 16: envoy.config.accesslog.v3.StatusCodeFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter - 4, // 17: envoy.config.accesslog.v3.DurationFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter - 19, // 18: envoy.config.accesslog.v3.RuntimeFilter.percent_sampled:type_name -> envoy.type.v3.FractionalPercent - 3, // 19: envoy.config.accesslog.v3.AndFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter - 3, // 20: envoy.config.accesslog.v3.OrFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter - 20, // 21: envoy.config.accesslog.v3.HeaderFilter.header:type_name -> envoy.config.route.v3.HeaderMatcher - 1, // 22: envoy.config.accesslog.v3.GrpcStatusFilter.statuses:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter.Status - 21, // 23: envoy.config.accesslog.v3.MetadataFilter.matcher:type_name -> envoy.type.matcher.v3.MetadataMatcher - 22, // 24: envoy.config.accesslog.v3.MetadataFilter.match_if_key_not_found:type_name -> google.protobuf.BoolValue - 17, // 25: envoy.config.accesslog.v3.ExtensionFilter.typed_config:type_name -> google.protobuf.Any - 26, // [26:26] is the sub-list for method output_type - 26, // [26:26] is the sub-list for method input_type - 26, // [26:26] is the sub-list for extension type_name - 26, // [26:26] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name + 16, // 14: envoy.config.accesslog.v3.AccessLogFilter.log_type_filter:type_name -> envoy.config.accesslog.v3.LogTypeFilter + 0, // 15: envoy.config.accesslog.v3.ComparisonFilter.op:type_name -> envoy.config.accesslog.v3.ComparisonFilter.Op + 19, // 16: envoy.config.accesslog.v3.ComparisonFilter.value:type_name -> envoy.config.core.v3.RuntimeUInt32 + 4, // 17: envoy.config.accesslog.v3.StatusCodeFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter + 4, // 18: envoy.config.accesslog.v3.DurationFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter + 20, // 19: envoy.config.accesslog.v3.RuntimeFilter.percent_sampled:type_name -> envoy.type.v3.FractionalPercent + 3, // 20: envoy.config.accesslog.v3.AndFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 3, // 21: envoy.config.accesslog.v3.OrFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 21, // 22: envoy.config.accesslog.v3.HeaderFilter.header:type_name -> envoy.config.route.v3.HeaderMatcher + 1, // 23: envoy.config.accesslog.v3.GrpcStatusFilter.statuses:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter.Status + 22, // 24: envoy.config.accesslog.v3.MetadataFilter.matcher:type_name -> envoy.type.matcher.v3.MetadataMatcher + 23, // 25: envoy.config.accesslog.v3.MetadataFilter.match_if_key_not_found:type_name -> google.protobuf.BoolValue + 24, // 26: envoy.config.accesslog.v3.LogTypeFilter.types:type_name -> envoy.data.accesslog.v3.AccessLogType + 18, // 27: envoy.config.accesslog.v3.ExtensionFilter.typed_config:type_name -> google.protobuf.Any + 28, // [28:28] is the sub-list for method output_type + 28, // [28:28] is the sub-list for method input_type + 28, // [28:28] is the sub-list for extension type_name + 28, // [28:28] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name } func init() { file_envoy_config_accesslog_v3_accesslog_proto_init() } @@ -1756,6 +1853,18 @@ func file_envoy_config_accesslog_v3_accesslog_proto_init() { } } file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogTypeFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionFilter); i { case 0: return &v.state @@ -1784,8 +1893,9 @@ func file_envoy_config_accesslog_v3_accesslog_proto_init() { (*AccessLogFilter_GrpcStatusFilter)(nil), (*AccessLogFilter_ExtensionFilter)(nil), (*AccessLogFilter_MetadataFilter)(nil), + (*AccessLogFilter_LogTypeFilter)(nil), } - file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14].OneofWrappers = []interface{}{ + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].OneofWrappers = []interface{}{ (*ExtensionFilter_TypedConfig)(nil), } type x struct{} @@ -1794,7 +1904,7 @@ func file_envoy_config_accesslog_v3_accesslog_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_accesslog_v3_accesslog_proto_rawDesc, NumEnums: 2, - NumMessages: 15, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go index aa700c658a..e8486a5584 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go @@ -17,6 +17,8 @@ import ( "unicode/utf8" "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" ) // ensure the imports are used @@ -33,6 +35,8 @@ var ( _ = (*mail.Address)(nil) _ = anypb.Any{} _ = sort.Sort + + _ = v3.AccessLogType(0) ) // Validate checks the field values on AccessLog with the rules defined in the @@ -88,9 +92,18 @@ func (m *AccessLog) validate(all bool) error { } } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *AccessLog_TypedConfig: + if v == nil { + err := AccessLogValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -121,6 +134,8 @@ func (m *AccessLog) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -222,9 +237,20 @@ func (m *AccessLogFilter) validate(all bool) error { var errors []error - switch m.FilterSpecifier.(type) { - + oneofFilterSpecifierPresent := false + switch v := m.FilterSpecifier.(type) { case *AccessLogFilter_StatusCodeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetStatusCodeFilter()).(type) { @@ -256,6 +282,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_DurationFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetDurationFilter()).(type) { @@ -287,6 +324,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_NotHealthCheckFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetNotHealthCheckFilter()).(type) { @@ -318,6 +366,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_TraceableFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetTraceableFilter()).(type) { @@ -349,6 +408,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_RuntimeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetRuntimeFilter()).(type) { @@ -380,6 +450,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_AndFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetAndFilter()).(type) { @@ -411,6 +492,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_OrFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetOrFilter()).(type) { @@ -442,6 +534,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_HeaderFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetHeaderFilter()).(type) { @@ -473,6 +576,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_ResponseFlagFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetResponseFlagFilter()).(type) { @@ -504,6 +618,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_GrpcStatusFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetGrpcStatusFilter()).(type) { @@ -535,6 +660,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_ExtensionFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetExtensionFilter()).(type) { @@ -566,6 +702,17 @@ func (m *AccessLogFilter) validate(all bool) error { } case *AccessLogFilter_MetadataFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true if all { switch v := interface{}(m.GetMetadataFilter()).(type) { @@ -596,7 +743,52 @@ func (m *AccessLogFilter) validate(all bool) error { } } + case *AccessLogFilter_LogTypeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLogTypeFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLogTypeFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: + _ = v // ensures v is used + } + if !oneofFilterSpecifierPresent { err := AccessLogFilterValidationError{ field: "FilterSpecifier", reason: "value is required", @@ -605,7 +797,6 @@ func (m *AccessLogFilter) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -2311,6 +2502,124 @@ var _ interface { ErrorName() string } = MetadataFilterValidationError{} +// Validate checks the field values on LogTypeFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LogTypeFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LogTypeFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LogTypeFilterMultiError, or +// nil if none found. +func (m *LogTypeFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *LogTypeFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetTypes() { + _, _ = idx, item + + if _, ok := v3.AccessLogType_name[int32(item)]; !ok { + err := LogTypeFilterValidationError{ + field: fmt.Sprintf("Types[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for Exclude + + if len(errors) > 0 { + return LogTypeFilterMultiError(errors) + } + + return nil +} + +// LogTypeFilterMultiError is an error wrapping multiple validation errors +// returned by LogTypeFilter.ValidateAll() if the designated constraints +// aren't met. +type LogTypeFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LogTypeFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LogTypeFilterMultiError) AllErrors() []error { return m } + +// LogTypeFilterValidationError is the validation error returned by +// LogTypeFilter.Validate if the designated constraints aren't met. +type LogTypeFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LogTypeFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LogTypeFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LogTypeFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LogTypeFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LogTypeFilterValidationError) ErrorName() string { return "LogTypeFilterValidationError" } + +// Error satisfies the builtin error interface +func (e LogTypeFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLogTypeFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LogTypeFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LogTypeFilterValidationError{} + // Validate checks the field values on ExtensionFilter with the rules defined // in the proto definition for this message. If any rules are violated, the // first error encountered is returned, or nil if there are no violations. @@ -2335,9 +2644,18 @@ func (m *ExtensionFilter) validate(all bool) error { // no validation rules for Name - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *ExtensionFilter_TypedConfig: + if v == nil { + err := ExtensionFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -2368,6 +2686,8 @@ func (m *ExtensionFilter) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go index 6cbbc4e642..522d6ea508 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/bootstrap/v3/bootstrap.proto package bootstrapv3 @@ -149,7 +149,7 @@ func (CustomInlineHeader_InlineHeaderType) EnumDescriptor() ([]byte, []int) { } // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 35] +// [#next-free-field: 38] type Bootstrap struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -210,7 +210,7 @@ type Bootstrap struct { // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and // gauges at a periodic interval. If not specified the default is 5000ms (5 - // seconds). Only one of `stats_flush_interval` or `stats_flush_on_admin` + // seconds). Only one of ``stats_flush_interval`` or ``stats_flush_on_admin`` // can be set. // Duration must be at least 1ms and at most 5 min. StatsFlushInterval *duration.Duration `protobuf:"bytes,7,opt,name=stats_flush_interval,json=statsFlushInterval,proto3" json:"stats_flush_interval,omitempty"` @@ -219,7 +219,7 @@ type Bootstrap struct { StatsFlush isBootstrap_StatsFlush `protobuf_oneof:"stats_flush"` // Optional watchdog configuration. // This is for a single watchdog configuration for the entire system. - // Deprecated in favor of *watchdogs* which has finer granularity. + // Deprecated in favor of ``watchdogs`` which has finer granularity. // // Deprecated: Do not use. Watchdog *Watchdog `protobuf:"bytes,8,opt,name=watchdog,proto3" json:"watchdog,omitempty"` @@ -269,7 +269,7 @@ type Bootstrap struct { // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. - // This field is deprecated in favor of *dns_resolution_config* + // This field is deprecated in favor of ``dns_resolution_config`` // which aggregates all of the DNS resolver configuration in a single message. // // Deprecated: Do not use. @@ -287,12 +287,12 @@ type Bootstrap struct { // or any other DNS resolver types and the related parameters. // For example, an object of // :ref:`CaresDnsResolverConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration replaces the + // can be packed into this ``typed_dns_resolver_config``. This configuration replaces the // :ref:`dns_resolution_config ` // configuration. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // when *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. + // During the transition period when both ``dns_resolution_config`` and ``typed_dns_resolver_config`` exists, + // when ``typed_dns_resolver_config`` is in place, Envoy will use it and ignore ``dns_resolution_config``. + // When ``typed_dns_resolver_config`` is missing, the default behavior is in place. // [#extension-category: envoy.network.dns_resolver] TypedDnsResolverConfig *v3.TypedExtensionConfig `protobuf:"bytes,31,opt,name=typed_dns_resolver_config,json=typedDnsResolverConfig,proto3" json:"typed_dns_resolver_config,omitempty"` // Specifies optional bootstrap extensions to be instantiated at startup time. @@ -306,18 +306,18 @@ type Bootstrap struct { // xdstp:// URL authority resolution. The algorithm is as // follows: // 1. The authority field is taken from the xdstp:// URL, call - // this *resource_authority*. - // 2. *resource_authority* is compared against the authorities in any peer - // *ConfigSource*. The peer *ConfigSource* is the configuration source + // this ``resource_authority``. + // 2. ``resource_authority`` is compared against the authorities in any peer + // ``ConfigSource``. The peer ``ConfigSource`` is the configuration source // message which would have been used unconditionally for resolution // with opaque resource names. If there is a match with an authority, the - // peer *ConfigSource* message is used. - // 3. *resource_authority* is compared sequentially with the authorities in - // each configuration source in *config_sources*. The first *ConfigSource* + // peer ``ConfigSource`` message is used. + // 3. ``resource_authority`` is compared sequentially with the authorities in + // each configuration source in ``config_sources``. The first ``ConfigSource`` // to match wins. // 4. As a fallback, if no configuration source matches, then - // *default_config_source* is used. - // 5. If *default_config_source* is not specified, resolution fails. + // ``default_config_source`` is used. + // 5. If ``default_config_source`` is not specified, resolution fails. // [#not-implemented-hide:] ConfigSources []*v3.ConfigSource `protobuf:"bytes,22,rep,name=config_sources,json=configSources,proto3" json:"config_sources,omitempty"` // Default configuration source for xdstp:// URLs if all @@ -346,6 +346,28 @@ type Bootstrap struct { // If the value is not specified, Google RE2 will be used by default. // [#extension-category: envoy.regex_engines] DefaultRegexEngine *v3.TypedExtensionConfig `protobuf:"bytes,34,opt,name=default_regex_engine,json=defaultRegexEngine,proto3" json:"default_regex_engine,omitempty"` + // Optional XdsResourcesDelegate configuration, which allows plugging custom logic into both + // fetch and load events during xDS processing. + // If a value is not specified, no XdsResourcesDelegate will be used. + // TODO(abeyad): Add public-facing documentation. + // [#not-implemented-hide:] + XdsDelegateExtension *v3.TypedExtensionConfig `protobuf:"bytes,35,opt,name=xds_delegate_extension,json=xdsDelegateExtension,proto3" json:"xds_delegate_extension,omitempty"` + // Optional XdsConfigTracker configuration, which allows tracking xDS responses in external components, + // e.g., external tracer or monitor. It provides the process point when receive, ingest, or fail to + // process xDS resources and messages. If a value is not specified, no XdsConfigTracker will be used. + // + // .. note:: + // + // There are no in-repo extensions currently, and the :repo:`XdsConfigTracker ` + // interface should be implemented before using. + // See :repo:`xds_config_tracker_integration_test ` + // for an example usage of the interface. + XdsConfigTrackerExtension *v3.TypedExtensionConfig `protobuf:"bytes,36,opt,name=xds_config_tracker_extension,json=xdsConfigTrackerExtension,proto3" json:"xds_config_tracker_extension,omitempty"` + // [#not-implemented-hide:] + // This controls the type of listener manager configured for Envoy. Currently + // Envoy only supports ListenerManager for this field and Envoy Mobile + // supports ApiListenerManager. + ListenerManager *v3.TypedExtensionConfig `protobuf:"bytes,37,opt,name=listener_manager,json=listenerManager,proto3" json:"listener_manager,omitempty"` } func (x *Bootstrap) Reset() { @@ -615,14 +637,35 @@ func (x *Bootstrap) GetDefaultRegexEngine() *v3.TypedExtensionConfig { return nil } +func (x *Bootstrap) GetXdsDelegateExtension() *v3.TypedExtensionConfig { + if x != nil { + return x.XdsDelegateExtension + } + return nil +} + +func (x *Bootstrap) GetXdsConfigTrackerExtension() *v3.TypedExtensionConfig { + if x != nil { + return x.XdsConfigTrackerExtension + } + return nil +} + +func (x *Bootstrap) GetListenerManager() *v3.TypedExtensionConfig { + if x != nil { + return x.ListenerManager + } + return nil +} + type isBootstrap_StatsFlush interface { isBootstrap_StatsFlush() } type Bootstrap_StatsFlushOnAdmin struct { // Flush stats to sinks only when queried for on the admin interface. If set, - // a flush timer is not created. Only one of `stats_flush_on_admin` or - // `stats_flush_interval` can be set. + // a flush timer is not created. Only one of ``stats_flush_on_admin`` or + // ``stats_flush_interval`` can be set. StatsFlushOnAdmin bool `protobuf:"varint,29,opt,name=stats_flush_on_admin,json=statsFlushOnAdmin,proto3,oneof"` } @@ -642,7 +685,7 @@ type Admin struct { // The path to write the access log for the administration server. If no // access log is desired specify ‘/dev/null’. This is only required if // :ref:`address ` is set. - // Deprecated in favor of *access_log* which offers more options. + // Deprecated in favor of ``access_log`` which offers more options. // // Deprecated: Do not use. AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"` @@ -744,7 +787,7 @@ type ClusterManager struct { // Name of the local cluster (i.e., the cluster that owns the Envoy running // this configuration). In order to enable :ref:`zone aware routing // ` this option must be set. - // If *local_cluster_name* is defined then :ref:`clusters + // If ``local_cluster_name`` is defined then :ref:`clusters // ` must be defined in the :ref:`Bootstrap // static cluster resources // `. This is unrelated to @@ -757,7 +800,7 @@ type ClusterManager struct { // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. UpstreamBindConfig *v3.BindConfig `protobuf:"bytes,3,opt,name=upstream_bind_config,json=upstreamBindConfig,proto3" json:"upstream_bind_config,omitempty"` // A management server endpoint to stream load stats to via - // *StreamLoadStats*. This must have :ref:`api_type + // ``StreamLoadStats``. This must have :ref:`api_type // ` :ref:`GRPC // `. LoadStatsConfig *v3.ApiConfigSource `protobuf:"bytes,4,opt,name=load_stats_config,json=loadStatsConfig,proto3" json:"load_stats_config,omitempty"` @@ -893,31 +936,31 @@ type Watchdog struct { unknownFields protoimpl.UnknownFields // Register actions that will fire on given WatchDog events. - // See *WatchDogAction* for priority of events. + // See ``WatchDogAction`` for priority of events. Actions []*Watchdog_WatchdogAction `protobuf:"bytes,7,rep,name=actions,proto3" json:"actions,omitempty"` // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_miss* statistic. If not specified the default is 200ms. + // ``watchdog_miss`` statistic. If not specified the default is 200ms. MissTimeout *duration.Duration `protobuf:"bytes,1,opt,name=miss_timeout,json=missTimeout,proto3" json:"miss_timeout,omitempty"` // The duration after which Envoy counts a nonresponsive thread in the - // *watchdog_mega_miss* statistic. If not specified the default is + // ``watchdog_mega_miss`` statistic. If not specified the default is // 1000ms. MegamissTimeout *duration.Duration `protobuf:"bytes,2,opt,name=megamiss_timeout,json=megamissTimeout,proto3" json:"megamiss_timeout,omitempty"` // If a watched thread has been nonresponsive for this duration, assume a // programming error and kill the entire Envoy process. Set to 0 to disable // kill behavior. If not specified the default is 0 (disabled). KillTimeout *duration.Duration `protobuf:"bytes,3,opt,name=kill_timeout,json=killTimeout,proto3" json:"kill_timeout,omitempty"` - // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is + // Defines the maximum jitter used to adjust the ``kill_timeout`` if ``kill_timeout`` is // enabled. Enabling this feature would help to reduce risk of synchronized // watchdog kill events across proxies due to external triggers. Set to 0 to // disable. If not specified the default is 0 (disabled). MaxKillTimeoutJitter *duration.Duration `protobuf:"bytes,6,opt,name=max_kill_timeout_jitter,json=maxKillTimeoutJitter,proto3" json:"max_kill_timeout_jitter,omitempty"` - // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) + // If ``max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))`` // threads have been nonresponsive for at least this duration kill the entire // Envoy process. Set to 0 to disable this behavior. If not specified the // default is 0 (disabled). MultikillTimeout *duration.Duration `protobuf:"bytes,4,opt,name=multikill_timeout,json=multikillTimeout,proto3" json:"multikill_timeout,omitempty"` - // Sets the threshold for *multikill_timeout* in terms of the percentage of - // nonresponsive threads required for the *multikill_timeout*. + // Sets the threshold for ``multikill_timeout`` in terms of the percentage of + // nonresponsive threads required for the ``multikill_timeout``. // If not specified the default is 0. MultikillThreshold *v35.Percent `protobuf:"bytes,5,opt,name=multikill_threshold,json=multikillThreshold,proto3" json:"multikill_threshold,omitempty"` } @@ -1009,7 +1052,7 @@ func (x *Watchdog) GetMultikillThreshold() *v35.Percent { // have an out of band system to terminate the process. // // The interface for the extension is ``Envoy::Server::Configuration::FatalAction``. -// *FatalAction* extensions live in the ``envoy.extensions.fatal_actions`` API +// ``FatalAction`` extensions live in the ``envoy.extensions.fatal_actions`` API // namespace. type FatalAction struct { state protoimpl.MessageState @@ -1810,7 +1853,7 @@ type RuntimeLayer_RtdsLayer struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Resource to subscribe to at *rtds_config* for the RTDS layer. + // Resource to subscribe to at ``rtds_config`` for the RTDS layer. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // RTDS configuration source. RtdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=rtds_config,json=rtdsConfig,proto3" json:"rtds_config,omitempty"` @@ -1925,7 +1968,7 @@ var file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = []byte{ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xbc, 0x1a, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, + 0x6f, 0x22, 0xe2, 0x1c, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, @@ -2081,297 +2124,316 @@ var file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = []byte{ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x65, 0x67, 0x65, 0x78, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x1a, 0x9a, 0x02, 0x0a, 0x0f, 0x53, - 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x40, - 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, - 0x12, 0x3c, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, - 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, - 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, - 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x89, 0x03, 0x0a, 0x10, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0a, - 0x6c, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x32, 0x0a, 0x15, 0x6c, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, - 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0a, 0x63, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x64, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x64, 0x73, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x64, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x61, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x1a, 0x7b, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x42, 0x0d, 0x0a, 0x0b, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x0a, 0x10, - 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x22, 0x89, 0x03, 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, - 0x33, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, - 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, - 0x50, 0x61, 0x74, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, - 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, - 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x22, 0xcb, 0x04, 0x0a, - 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, - 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, - 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x11, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x6c, 0x6f, - 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc9, 0x01, - 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x40, 0x9a, 0xc5, 0x88, 0x1e, 0x3b, 0x0a, 0x39, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, - 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, - 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, - 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, - 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x57, - 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x55, 0x0a, 0x14, 0x6d, 0x61, 0x69, 0x6e, - 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, - 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x52, 0x12, 0x6d, 0x61, 0x69, - 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, - 0x4c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, - 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x52, 0x0e, 0x77, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x22, 0xba, 0x06, - 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, - 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, - 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x6d, 0x69, 0x73, 0x73, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x44, 0x0a, 0x10, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, - 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6d, 0x65, 0x67, - 0x61, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, - 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6b, - 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x17, 0x6d, 0x61, - 0x78, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6a, - 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, - 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x11, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, - 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x75, - 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x47, - 0x0a, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x1a, 0x85, 0x02, 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, - 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x65, 0x67, 0x65, 0x78, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x78, 0x64, + 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, - 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, - 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, - 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x22, 0x4d, 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, - 0x0a, 0x04, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x55, 0x4c, 0x54, - 0x49, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x45, 0x47, 0x41, 0x4d, - 0x49, 0x53, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x04, 0x3a, - 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, 0x78, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x6b, 0x0a, 0x1c, + 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, + 0x65, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x19, + 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x10, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x25, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x1a, 0x9a, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x89, 0x03, + 0x0a, 0x10, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, 0x64, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0a, 0x63, 0x64, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x09, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, + 0x63, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x64, 0x73, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x61, 0x64, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x1a, 0x7b, 0x0a, 0x21, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x07, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x89, 0x03, 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x33, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0d, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, 0x0a, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, + 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, + 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x22, 0xcb, 0x04, 0x0a, 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, + 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, + 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, + 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x14, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x51, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0xc9, 0x01, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, + 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d, + 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x40, 0x9a, + 0xc5, 0x88, 0x1e, 0x3b, 0x0a, 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, + 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, - 0x32, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x22, 0x51, 0x0a, 0x0b, 0x46, 0x61, - 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xdc, 0x01, - 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, - 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, - 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, - 0x12, 0x33, 0x0a, 0x15, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x75, 0x62, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x04, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x62, 0x61, - 0x73, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xdb, 0x06, 0x0a, - 0x0c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, - 0x74, 0x69, 0x63, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, - 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, + 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x55, + 0x0a, 0x14, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, - 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, - 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, + 0x67, 0x52, 0x12, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, + 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x64, 0x6f, 0x67, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x64, 0x6f, 0x67, 0x22, 0xba, 0x06, 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, - 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x72, 0x74, 0x64, 0x73, 0x5f, 0x6c, 0x61, 0x79, 0x65, - 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x0c, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x44, 0x0a, 0x10, + 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0f, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x5a, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4b, 0x69, 0x6c, 0x6c, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x11, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, + 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x1a, 0x85, 0x02, + 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, + 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, + 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x4d, 0x45, 0x47, 0x41, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4d, + 0x49, 0x53, 0x53, 0x10, 0x04, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x22, 0x51, 0x0a, 0x0b, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x22, 0xdc, 0x01, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, + 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x04, 0x62, + 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, + 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x22, 0xdb, 0x06, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, + 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, + 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, + 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, + 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, - 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x72, 0x74, - 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0xc1, 0x01, 0x0a, 0x09, 0x44, 0x69, 0x73, 0x6b, - 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, - 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, - 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x34, 0x0a, 0x16, - 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, - 0x70, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, - 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, - 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0x46, 0x0a, 0x0a, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, - 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, - 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, - 0x79, 0x65, 0x72, 0x1a, 0x9d, 0x01, 0x0a, 0x09, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x72, 0x74, 0x64, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, - 0x72, 0x74, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, - 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, - 0x79, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, - 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, - 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x4c, - 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, - 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x72, 0x74, 0x64, + 0x73, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x3a, 0x2f, - 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, - 0xb1, 0x02, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x12, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, - 0x00, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x76, 0x0a, 0x12, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, - 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x49, - 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x22, 0x66, 0x0a, 0x10, 0x49, - 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, - 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x54, - 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x50, - 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, - 0x10, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, - 0x52, 0x10, 0x03, 0x42, 0x91, 0x01, 0x0a, 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, - 0x0e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, - 0x2f, 0x76, 0x33, 0x3b, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x76, 0x33, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x09, 0x72, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0xc1, 0x01, + 0x0a, 0x09, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, + 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, + 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, + 0x72, 0x1a, 0x46, 0x0a, 0x0a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, + 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0x9d, 0x01, 0x0a, 0x09, 0x52, 0x74, + 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x72, + 0x74, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x72, 0x74, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, + 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x61, + 0x79, 0x65, 0x72, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xb1, 0x02, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x12, + 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, + 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x76, 0x0a, 0x12, 0x69, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x66, 0x0a, 0x10, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x51, + 0x55, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x01, 0x12, 0x13, + 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, + 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, + 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x03, 0x42, 0x91, 0x01, 0x0a, 0x27, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2456,45 +2518,48 @@ var file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = []int32{ 14, // 21: envoy.config.bootstrap.v3.Bootstrap.certificate_provider_instances:type_name -> envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry 11, // 22: envoy.config.bootstrap.v3.Bootstrap.inline_headers:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader 29, // 23: envoy.config.bootstrap.v3.Bootstrap.default_regex_engine:type_name -> envoy.config.core.v3.TypedExtensionConfig - 31, // 24: envoy.config.bootstrap.v3.Admin.access_log:type_name -> envoy.config.accesslog.v3.AccessLog - 32, // 25: envoy.config.bootstrap.v3.Admin.address:type_name -> envoy.config.core.v3.Address - 33, // 26: envoy.config.bootstrap.v3.Admin.socket_options:type_name -> envoy.config.core.v3.SocketOption - 15, // 27: envoy.config.bootstrap.v3.ClusterManager.outlier_detection:type_name -> envoy.config.bootstrap.v3.ClusterManager.OutlierDetection - 34, // 28: envoy.config.bootstrap.v3.ClusterManager.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig - 21, // 29: envoy.config.bootstrap.v3.ClusterManager.load_stats_config:type_name -> envoy.config.core.v3.ApiConfigSource - 6, // 30: envoy.config.bootstrap.v3.Watchdogs.main_thread_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog - 6, // 31: envoy.config.bootstrap.v3.Watchdogs.worker_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog - 16, // 32: envoy.config.bootstrap.v3.Watchdog.actions:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction - 24, // 33: envoy.config.bootstrap.v3.Watchdog.miss_timeout:type_name -> google.protobuf.Duration - 24, // 34: envoy.config.bootstrap.v3.Watchdog.megamiss_timeout:type_name -> google.protobuf.Duration - 24, // 35: envoy.config.bootstrap.v3.Watchdog.kill_timeout:type_name -> google.protobuf.Duration - 24, // 36: envoy.config.bootstrap.v3.Watchdog.max_kill_timeout_jitter:type_name -> google.protobuf.Duration - 24, // 37: envoy.config.bootstrap.v3.Watchdog.multikill_timeout:type_name -> google.protobuf.Duration - 35, // 38: envoy.config.bootstrap.v3.Watchdog.multikill_threshold:type_name -> envoy.type.v3.Percent - 29, // 39: envoy.config.bootstrap.v3.FatalAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 36, // 40: envoy.config.bootstrap.v3.Runtime.base:type_name -> google.protobuf.Struct - 36, // 41: envoy.config.bootstrap.v3.RuntimeLayer.static_layer:type_name -> google.protobuf.Struct - 17, // 42: envoy.config.bootstrap.v3.RuntimeLayer.disk_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer - 18, // 43: envoy.config.bootstrap.v3.RuntimeLayer.admin_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer - 19, // 44: envoy.config.bootstrap.v3.RuntimeLayer.rtds_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer - 9, // 45: envoy.config.bootstrap.v3.LayeredRuntime.layers:type_name -> envoy.config.bootstrap.v3.RuntimeLayer - 1, // 46: envoy.config.bootstrap.v3.CustomInlineHeader.inline_header_type:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType - 37, // 47: envoy.config.bootstrap.v3.Bootstrap.StaticResources.listeners:type_name -> envoy.config.listener.v3.Listener - 38, // 48: envoy.config.bootstrap.v3.Bootstrap.StaticResources.clusters:type_name -> envoy.config.cluster.v3.Cluster - 39, // 49: envoy.config.bootstrap.v3.Bootstrap.StaticResources.secrets:type_name -> envoy.extensions.transport_sockets.tls.v3.Secret - 30, // 50: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.lds_config:type_name -> envoy.config.core.v3.ConfigSource - 30, // 51: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.cds_config:type_name -> envoy.config.core.v3.ConfigSource - 21, // 52: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.ads_config:type_name -> envoy.config.core.v3.ApiConfigSource - 29, // 53: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry.value:type_name -> envoy.config.core.v3.TypedExtensionConfig - 40, // 54: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection.event_service:type_name -> envoy.config.core.v3.EventServiceConfig - 29, // 55: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 0, // 56: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.event:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent - 30, // 57: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer.rtds_config:type_name -> envoy.config.core.v3.ConfigSource - 58, // [58:58] is the sub-list for method output_type - 58, // [58:58] is the sub-list for method input_type - 58, // [58:58] is the sub-list for extension type_name - 58, // [58:58] is the sub-list for extension extendee - 0, // [0:58] is the sub-list for field type_name + 29, // 24: envoy.config.bootstrap.v3.Bootstrap.xds_delegate_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 29, // 25: envoy.config.bootstrap.v3.Bootstrap.xds_config_tracker_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 29, // 26: envoy.config.bootstrap.v3.Bootstrap.listener_manager:type_name -> envoy.config.core.v3.TypedExtensionConfig + 31, // 27: envoy.config.bootstrap.v3.Admin.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 32, // 28: envoy.config.bootstrap.v3.Admin.address:type_name -> envoy.config.core.v3.Address + 33, // 29: envoy.config.bootstrap.v3.Admin.socket_options:type_name -> envoy.config.core.v3.SocketOption + 15, // 30: envoy.config.bootstrap.v3.ClusterManager.outlier_detection:type_name -> envoy.config.bootstrap.v3.ClusterManager.OutlierDetection + 34, // 31: envoy.config.bootstrap.v3.ClusterManager.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig + 21, // 32: envoy.config.bootstrap.v3.ClusterManager.load_stats_config:type_name -> envoy.config.core.v3.ApiConfigSource + 6, // 33: envoy.config.bootstrap.v3.Watchdogs.main_thread_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 6, // 34: envoy.config.bootstrap.v3.Watchdogs.worker_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 16, // 35: envoy.config.bootstrap.v3.Watchdog.actions:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction + 24, // 36: envoy.config.bootstrap.v3.Watchdog.miss_timeout:type_name -> google.protobuf.Duration + 24, // 37: envoy.config.bootstrap.v3.Watchdog.megamiss_timeout:type_name -> google.protobuf.Duration + 24, // 38: envoy.config.bootstrap.v3.Watchdog.kill_timeout:type_name -> google.protobuf.Duration + 24, // 39: envoy.config.bootstrap.v3.Watchdog.max_kill_timeout_jitter:type_name -> google.protobuf.Duration + 24, // 40: envoy.config.bootstrap.v3.Watchdog.multikill_timeout:type_name -> google.protobuf.Duration + 35, // 41: envoy.config.bootstrap.v3.Watchdog.multikill_threshold:type_name -> envoy.type.v3.Percent + 29, // 42: envoy.config.bootstrap.v3.FatalAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 36, // 43: envoy.config.bootstrap.v3.Runtime.base:type_name -> google.protobuf.Struct + 36, // 44: envoy.config.bootstrap.v3.RuntimeLayer.static_layer:type_name -> google.protobuf.Struct + 17, // 45: envoy.config.bootstrap.v3.RuntimeLayer.disk_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer + 18, // 46: envoy.config.bootstrap.v3.RuntimeLayer.admin_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer + 19, // 47: envoy.config.bootstrap.v3.RuntimeLayer.rtds_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer + 9, // 48: envoy.config.bootstrap.v3.LayeredRuntime.layers:type_name -> envoy.config.bootstrap.v3.RuntimeLayer + 1, // 49: envoy.config.bootstrap.v3.CustomInlineHeader.inline_header_type:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType + 37, // 50: envoy.config.bootstrap.v3.Bootstrap.StaticResources.listeners:type_name -> envoy.config.listener.v3.Listener + 38, // 51: envoy.config.bootstrap.v3.Bootstrap.StaticResources.clusters:type_name -> envoy.config.cluster.v3.Cluster + 39, // 52: envoy.config.bootstrap.v3.Bootstrap.StaticResources.secrets:type_name -> envoy.extensions.transport_sockets.tls.v3.Secret + 30, // 53: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.lds_config:type_name -> envoy.config.core.v3.ConfigSource + 30, // 54: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.cds_config:type_name -> envoy.config.core.v3.ConfigSource + 21, // 55: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.ads_config:type_name -> envoy.config.core.v3.ApiConfigSource + 29, // 56: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry.value:type_name -> envoy.config.core.v3.TypedExtensionConfig + 40, // 57: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection.event_service:type_name -> envoy.config.core.v3.EventServiceConfig + 29, // 58: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 0, // 59: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.event:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent + 30, // 60: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer.rtds_config:type_name -> envoy.config.core.v3.ConfigSource + 61, // [61:61] is the sub-list for method output_type + 61, // [61:61] is the sub-list for method input_type + 61, // [61:61] is the sub-list for extension type_name + 61, // [61:61] is the sub-list for extension extendee + 0, // [0:61] is the sub-list for field type_name } func init() { file_envoy_config_bootstrap_v3_bootstrap_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go index c3e62bdf18..0537932c0f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go @@ -809,9 +809,105 @@ func (m *Bootstrap) validate(all bool) error { } } - switch m.StatsFlush.(type) { + if all { + switch v := interface{}(m.GetXdsDelegateExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetXdsDelegateExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetXdsConfigTrackerExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetXdsConfigTrackerExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetListenerManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListenerManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + switch v := m.StatsFlush.(type) { case *Bootstrap_StatsFlushOnAdmin: + if v == nil { + err := BootstrapValidationError{ + field: "StatsFlush", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if m.GetStatsFlushOnAdmin() != true { err := BootstrapValidationError{ @@ -824,6 +920,8 @@ func (m *Bootstrap) validate(all bool) error { errors = append(errors, err) } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -2052,9 +2150,20 @@ func (m *RuntimeLayer) validate(all bool) error { errors = append(errors, err) } - switch m.LayerSpecifier.(type) { - + oneofLayerSpecifierPresent := false + switch v := m.LayerSpecifier.(type) { case *RuntimeLayer_StaticLayer: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true if all { switch v := interface{}(m.GetStaticLayer()).(type) { @@ -2086,6 +2195,17 @@ func (m *RuntimeLayer) validate(all bool) error { } case *RuntimeLayer_DiskLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true if all { switch v := interface{}(m.GetDiskLayer()).(type) { @@ -2117,6 +2237,17 @@ func (m *RuntimeLayer) validate(all bool) error { } case *RuntimeLayer_AdminLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true if all { switch v := interface{}(m.GetAdminLayer()).(type) { @@ -2148,6 +2279,17 @@ func (m *RuntimeLayer) validate(all bool) error { } case *RuntimeLayer_RtdsLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true if all { switch v := interface{}(m.GetRtdsLayer()).(type) { @@ -2179,6 +2321,9 @@ func (m *RuntimeLayer) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofLayerSpecifierPresent { err := RuntimeLayerValidationError{ field: "LayerSpecifier", reason: "value is required", @@ -2187,7 +2332,6 @@ func (m *RuntimeLayer) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go index 0ba499b0e7..264ea82efa 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/cluster/v3/circuit_breaker.proto package clusterv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go index 1156a18184..4e68a99cfc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/cluster/v3/cluster.proto package clusterv3 @@ -14,7 +14,7 @@ import ( v31 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" _struct "github.com/golang/protobuf/ptypes/struct" wrappers "github.com/golang/protobuf/ptypes/wrappers" @@ -366,6 +366,92 @@ func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) EnumDescriptor() ([]byte, [ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0} } +type Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy int32 + +const ( + // No fallback. Route metadata will be used as-is. + Cluster_LbSubsetConfig_METADATA_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = 0 + // A special metadata key ``fallback_list`` will be used to provide variants of metadata to try. + // Value of ``fallback_list`` key has to be a list. Every list element has to be a struct - it will + // be merged with route metadata, overriding keys that appear in both places. + // ``fallback_list`` entries will be used in order until a host is found. + // + // ``fallback_list`` key itself is removed from metadata before subset load balancing is performed. + // + // Example: + // + // for metadata: + // + // .. code-block:: yaml + // + // version: 1.0 + // fallback_list: + // - version: 2.0 + // hardware: c64 + // - hardware: c32 + // - version: 3.0 + // + // at first, metadata: + // + // .. code-block:: json + // + // {"version": "2.0", "hardware": "c64"} + // + // will be used for load balancing. If no host is found, metadata: + // + // .. code-block:: json + // + // {"version": "1.0", "hardware": "c32"} + // + // is next to try. If it still results in no host, finally metadata: + // + // .. code-block:: json + // + // {"version": "3.0"} + // + // is used. + Cluster_LbSubsetConfig_FALLBACK_LIST Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = 1 +) + +// Enum value maps for Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy. +var ( + Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_name = map[int32]string{ + 0: "METADATA_NO_FALLBACK", + 1: "FALLBACK_LIST", + } + Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_value = map[string]int32{ + "METADATA_NO_FALLBACK": 0, + "FALLBACK_LIST": 1, + } +) + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy { + p := new(Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) + *p = x + return p +} + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[5].Descriptor() +} + +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[5] +} + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy.Descriptor instead. +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 1} +} + // Allows to override top level fallback policy per selector. type Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy int32 @@ -417,11 +503,11 @@ func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) } func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_config_cluster_v3_cluster_proto_enumTypes[5].Descriptor() + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[6].Descriptor() } func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Type() protoreflect.EnumType { - return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[5] + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[6] } func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Number() protoreflect.EnumNumber { @@ -468,11 +554,11 @@ func (x Cluster_RingHashLbConfig_HashFunction) String() string { } func (Cluster_RingHashLbConfig_HashFunction) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_config_cluster_v3_cluster_proto_enumTypes[6].Descriptor() + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[7].Descriptor() } func (Cluster_RingHashLbConfig_HashFunction) Type() protoreflect.EnumType { - return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[6] + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[7] } func (x Cluster_RingHashLbConfig_HashFunction) Number() protoreflect.EnumNumber { @@ -484,7 +570,7 @@ func (Cluster_RingHashLbConfig_HashFunction) EnumDescriptor() ([]byte, []int) { return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 7, 0} } -// Cluster list collections. Entries are *Cluster* resources or references. +// Cluster list collections. Entries are ``Cluster`` resources or references. // [#not-implemented-hide:] type ClusterCollection struct { state protoimpl.MessageState @@ -541,7 +627,7 @@ type Cluster struct { unknownFields protoimpl.UnknownFields // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket_match* in the + // The entry of ``envoy.transport_socket_match`` in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. @@ -561,16 +647,16 @@ type Cluster struct { // transport_socket: // name: envoy.transport_sockets.raw_buffer // - // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* + // Connections to the endpoints whose metadata value under ``envoy.transport_socket_match`` // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // - // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - // *transport_socket* specified in this cluster. + // If an endpoint metadata's value under ``envoy.transport_socket_match`` does not match any + // ``TransportSocketMatch``, socket configuration fallbacks to use the ``tls_context`` or + // ``transport_socket`` specified in this cluster. // // This field allows gradual and flexible transport socket configuration changes. // @@ -581,8 +667,8 @@ type Cluster struct { // // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - // *TransportSocketMatch* in this field. Other client Envoys receive CDS without - // *transport_socket_match* set, and still send plain text traffic to the same cluster. + // ``TransportSocketMatch`` in this field. Other client Envoys receive CDS without + // ``transport_socket_match`` set, and still send plain text traffic to the same cluster. // // This field can be used to specify custom transport socket configurations for health // checks by adding matching key/value pairs in a health check's @@ -623,7 +709,7 @@ type Cluster struct { // :ref:`STATIC`, // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes the *hosts* field in the v2 API. + // This field supersedes the ``hosts`` field in the v2 API. // // .. attention:: // @@ -689,7 +775,7 @@ type Cluster struct { // set so that Envoy will assume that the upstream supports HTTP/2 when // making new HTTP connection pool connections. Currently, Envoy only // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // with ALPN, ``http2_protocol_options`` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. // This has been deprecated in favor of http2_protocol_options fields in the // :ref:`http_protocol_options ` @@ -706,7 +792,7 @@ type Cluster struct { // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. // [#next-major-version: make this a list of typed extensions.] - TypedExtensionProtocolOptions map[string]*any.Any `protobuf:"bytes,36,rep,name=typed_extension_protocol_options,json=typedExtensionProtocolOptions,proto3" json:"typed_extension_protocol_options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedExtensionProtocolOptions map[string]*any1.Any `protobuf:"bytes,36,rep,name=typed_extension_protocol_options,json=typedExtensionProtocolOptions,proto3" json:"typed_extension_protocol_options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, @@ -744,13 +830,13 @@ type Cluster struct { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - // This field is deprecated in favor of *dns_resolution_config* + // This field is deprecated in favor of ``dns_resolution_config`` // which aggregates all of the DNS resolver configuration in a single message. // // Deprecated: Do not use. DnsResolvers []*v32.Address `protobuf:"bytes,18,rep,name=dns_resolvers,json=dnsResolvers,proto3" json:"dns_resolvers,omitempty"` // Always use TCP queries instead of UDP queries for DNS lookups. - // This field is deprecated in favor of *dns_resolution_config* + // This field is deprecated in favor of ``dns_resolution_config`` // which aggregates all of the DNS resolver configuration in a single message. // // Deprecated: Do not use. @@ -765,17 +851,18 @@ type Cluster struct { // or any other DNS resolver types and the related parameters. // For example, an object of // :ref:`CaresDnsResolverConfig ` - // can be packed into this *typed_dns_resolver_config*. This configuration replaces the + // can be packed into this ``typed_dns_resolver_config``. This configuration replaces the // :ref:`dns_resolution_config ` // configuration. - // During the transition period when both *dns_resolution_config* and *typed_dns_resolver_config* exists, - // when *typed_dns_resolver_config* is in place, Envoy will use it and ignore *dns_resolution_config*. - // When *typed_dns_resolver_config* is missing, the default behavior is in place. + // During the transition period when both ``dns_resolution_config`` and ``typed_dns_resolver_config`` exists, + // when ``typed_dns_resolver_config`` is in place, Envoy will use it and ignore ``dns_resolution_config``. + // When ``typed_dns_resolver_config`` is missing, the default behavior is in place. // [#extension-category: envoy.network.dns_resolver] TypedDnsResolverConfig *v32.TypedExtensionConfig `protobuf:"bytes,55,opt,name=typed_dns_resolver_config,json=typedDnsResolverConfig,proto3" json:"typed_dns_resolver_config,omitempty"` // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`. + // or :ref:`LOGICAL_DNS`, + // or :ref:`Redis Cluster`. // If true, cluster readiness blocks on warm-up. If false, the cluster will complete // initialization whether or not warm-up has completed. Defaults to true. WaitForWarmOnInit *wrappers.BoolValue `protobuf:"bytes,54,opt,name=wait_for_warm_on_init,json=waitForWarmOnInit,proto3" json:"wait_for_warm_on_init,omitempty"` @@ -822,8 +909,8 @@ type Cluster struct { // Common configuration for all load balancer implementations. CommonLbConfig *Cluster_CommonLbConfig `protobuf:"bytes,27,opt,name=common_lb_config,json=commonLbConfig,proto3" json:"common_lb_config,omitempty"` // Optional custom transport socket implementation to use for upstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`UpstreamTlsContexts ` in the `typed_config`. + // To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and + // :ref:`UpstreamTlsContexts ` in the ``typed_config``. // If no transport socket configuration is specified, new connections // will be set up with plaintext. TransportSocket *v32.TransportSocket `protobuf:"bytes,24,opt,name=transport_socket,json=transportSocket,proto3" json:"transport_socket,omitempty"` @@ -831,7 +918,7 @@ type Cluster struct { // cluster. It can be used for stats, logging, and varying filter behavior. // Fields should use reverse DNS notation to denote which entity within Envoy // will need the information. For instance, if the metadata is intended for - // the Router filter, the filter name should be specified as *envoy.filters.http.router*. + // the Router filter, the filter name should be specified as ``envoy.filters.http.router``. Metadata *v32.Metadata `protobuf:"bytes,25,opt,name=metadata,proto3" json:"metadata,omitempty"` // Determines how Envoy selects the protocol used to speak to upstream hosts. // This has been deprecated in favor of setting explicit protocol selection @@ -892,7 +979,7 @@ type Cluster struct { // // .. attention:: // - // This field has been deprecated in favor of `timeout_budgets`, part of + // This field has been deprecated in favor of ``timeout_budgets``, part of // :ref:`track_cluster_stats `. // // Deprecated: Do not use. @@ -903,7 +990,7 @@ type Cluster struct { // TCP upstreams. // // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream - // HTTP, using the http connection pool and the codec from `http2_protocol_options` + // HTTP, using the http connection pool and the codec from ``http2_protocol_options`` // // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. @@ -920,7 +1007,7 @@ type Cluster struct { TrackClusterStats *TrackClusterStats `protobuf:"bytes,49,opt,name=track_cluster_stats,json=trackClusterStats,proto3" json:"track_cluster_stats,omitempty"` // Preconnect configuration for this cluster. PreconnectPolicy *Cluster_PreconnectPolicy `protobuf:"bytes,50,opt,name=preconnect_policy,json=preconnectPolicy,proto3" json:"preconnect_policy,omitempty"` - // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate + // If ``connection_pool_per_downstream_connection`` is true, the cluster will use a separate // connection pool for every downstream connection ConnectionPoolPerDownstreamConnection bool `protobuf:"varint,51,opt,name=connection_pool_per_downstream_connection,json=connectionPoolPerDownstreamConnection,proto3" json:"connection_pool_per_downstream_connection,omitempty"` } @@ -1088,7 +1175,7 @@ func (x *Cluster) GetHttp2ProtocolOptions() *v32.Http2ProtocolOptions { return nil } -func (x *Cluster) GetTypedExtensionProtocolOptions() map[string]*any.Any { +func (x *Cluster) GetTypedExtensionProtocolOptions() map[string]*any1.Any { if x != nil { return x.TypedExtensionProtocolOptions } @@ -1465,56 +1552,6 @@ func (x *LoadBalancingPolicy) GetPolicies() []*LoadBalancingPolicy_Policy { return nil } -// An extensible structure containing the address Envoy should bind to when -// establishing upstream connections. -type UpstreamBindConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The address Envoy should bind to when establishing upstream connections. - SourceAddress *v32.Address `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"` -} - -func (x *UpstreamBindConfig) Reset() { - *x = UpstreamBindConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpstreamBindConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpstreamBindConfig) ProtoMessage() {} - -func (x *UpstreamBindConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpstreamBindConfig.ProtoReflect.Descriptor instead. -func (*UpstreamBindConfig) Descriptor() ([]byte, []int) { - return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3} -} - -func (x *UpstreamBindConfig) GetSourceAddress() *v32.Address { - if x != nil { - return x.SourceAddress - } - return nil -} - type UpstreamConnectionOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1531,7 +1568,7 @@ type UpstreamConnectionOptions struct { func (x *UpstreamConnectionOptions) Reset() { *x = UpstreamConnectionOptions{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[4] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1544,7 +1581,7 @@ func (x *UpstreamConnectionOptions) String() string { func (*UpstreamConnectionOptions) ProtoMessage() {} func (x *UpstreamConnectionOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[4] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1557,7 +1594,7 @@ func (x *UpstreamConnectionOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use UpstreamConnectionOptions.ProtoReflect.Descriptor instead. func (*UpstreamConnectionOptions) Descriptor() ([]byte, []int) { - return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{4} + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3} } func (x *UpstreamConnectionOptions) GetTcpKeepalive() *v32.TcpKeepalive { @@ -1594,7 +1631,7 @@ type TrackClusterStats struct { func (x *TrackClusterStats) Reset() { *x = TrackClusterStats{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[5] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1607,7 +1644,7 @@ func (x *TrackClusterStats) String() string { func (*TrackClusterStats) ProtoMessage() {} func (x *TrackClusterStats) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[5] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1620,7 +1657,7 @@ func (x *TrackClusterStats) ProtoReflect() protoreflect.Message { // Deprecated: Use TrackClusterStats.ProtoReflect.Descriptor instead. func (*TrackClusterStats) Descriptor() ([]byte, []int) { - return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{5} + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{4} } func (x *TrackClusterStats) GetTimeoutBudgets() bool { @@ -1649,7 +1686,7 @@ type Cluster_TransportSocketMatch struct { // Optional endpoint metadata match criteria. // The connection to the endpoint with metadata matching what is set in this field // will use the transport socket configuration specified here. - // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match + // The endpoint's metadata entry in ``envoy.transport_socket_match`` is used to match // against the values specified in this field. Match *_struct.Struct `protobuf:"bytes,2,opt,name=match,proto3" json:"match,omitempty"` // The configuration of the transport socket. @@ -1660,7 +1697,7 @@ type Cluster_TransportSocketMatch struct { func (x *Cluster_TransportSocketMatch) Reset() { *x = Cluster_TransportSocketMatch{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[6] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1673,7 +1710,7 @@ func (x *Cluster_TransportSocketMatch) String() string { func (*Cluster_TransportSocketMatch) ProtoMessage() {} func (x *Cluster_TransportSocketMatch) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[6] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1721,13 +1758,13 @@ type Cluster_CustomClusterType struct { // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. // [#extension-category: envoy.clusters] - TypedConfig *any.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *any1.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *Cluster_CustomClusterType) Reset() { *x = Cluster_CustomClusterType{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[7] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1740,7 +1777,7 @@ func (x *Cluster_CustomClusterType) String() string { func (*Cluster_CustomClusterType) ProtoMessage() {} func (x *Cluster_CustomClusterType) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[7] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1763,7 +1800,7 @@ func (x *Cluster_CustomClusterType) GetName() string { return "" } -func (x *Cluster_CustomClusterType) GetTypedConfig() *any.Any { +func (x *Cluster_CustomClusterType) GetTypedConfig() *any1.Any { if x != nil { return x.TypedConfig } @@ -1787,7 +1824,7 @@ type Cluster_EdsClusterConfig struct { func (x *Cluster_EdsClusterConfig) Reset() { *x = Cluster_EdsClusterConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[8] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1800,7 +1837,7 @@ func (x *Cluster_EdsClusterConfig) String() string { func (*Cluster_EdsClusterConfig) ProtoMessage() {} func (x *Cluster_EdsClusterConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[8] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1832,7 +1869,7 @@ func (x *Cluster_EdsClusterConfig) GetServiceName() string { // Optionally divide the endpoints in this cluster into subsets defined by // endpoint metadata and selected by route and weighted cluster metadata. -// [#next-free-field: 8] +// [#next-free-field: 9] type Cluster_LbSubsetConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1846,13 +1883,13 @@ type Cluster_LbSubsetConfig struct { // fallback_policy is // :ref:`DEFAULT_SUBSET`. // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // compared to the matching LbEndpoint.Metadata under the ``envoy.lb`` // namespace. It is valid for no hosts to match, in which case the behavior // is the same as a fallback_policy of // :ref:`NO_FALLBACK`. DefaultSubset *_struct.Struct `protobuf:"bytes,2,opt,name=default_subset,json=defaultSubset,proto3" json:"default_subset,omitempty"` // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique + // ``envoy.lb`` namespace is traversed and a subset is created for each unique // combination of key and value. For example: // // .. code-block:: json @@ -1893,12 +1930,20 @@ type Cluster_LbSubsetConfig struct { // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value // and any of the elements in the list matches the criteria. ListAsAny bool `protobuf:"varint,7,opt,name=list_as_any,json=listAsAny,proto3" json:"list_as_any,omitempty"` + // Fallback mechanism that allows to try different route metadata until a host is found. + // If load balancing process, including all its mechanisms (like + // :ref:`fallback_policy`) + // fails to select a host, this policy decides if and how the process is repeated using another metadata. + // + // The value defaults to + // :ref:`METADATA_NO_FALLBACK`. + MetadataFallbackPolicy Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy `protobuf:"varint,8,opt,name=metadata_fallback_policy,json=metadataFallbackPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy" json:"metadata_fallback_policy,omitempty"` } func (x *Cluster_LbSubsetConfig) Reset() { *x = Cluster_LbSubsetConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[9] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1911,7 +1956,7 @@ func (x *Cluster_LbSubsetConfig) String() string { func (*Cluster_LbSubsetConfig) ProtoMessage() {} func (x *Cluster_LbSubsetConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[9] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1976,6 +2021,13 @@ func (x *Cluster_LbSubsetConfig) GetListAsAny() bool { return false } +func (x *Cluster_LbSubsetConfig) GetMetadataFallbackPolicy() Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy { + if x != nil { + return x.MetadataFallbackPolicy + } + return Cluster_LbSubsetConfig_METADATA_NO_FALLBACK +} + // Configuration for :ref:`slow start mode `. type Cluster_SlowStartConfig struct { state protoimpl.MessageState @@ -1993,8 +2045,8 @@ type Cluster_SlowStartConfig struct { // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. // // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: - // `new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))`, - // where `time_factor=(time_since_start_seconds / slow_start_time_seconds)`. + // ``new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))``, + // where ``time_factor=(time_since_start_seconds / slow_start_time_seconds)``. // // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. // Once host exits slow start, time_factor and aggression no longer affect its weight. @@ -2008,7 +2060,7 @@ type Cluster_SlowStartConfig struct { func (x *Cluster_SlowStartConfig) Reset() { *x = Cluster_SlowStartConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[10] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2021,7 +2073,7 @@ func (x *Cluster_SlowStartConfig) String() string { func (*Cluster_SlowStartConfig) ProtoMessage() {} func (x *Cluster_SlowStartConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[10] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2072,7 +2124,7 @@ type Cluster_RoundRobinLbConfig struct { func (x *Cluster_RoundRobinLbConfig) Reset() { *x = Cluster_RoundRobinLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[11] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2085,7 +2137,7 @@ func (x *Cluster_RoundRobinLbConfig) String() string { func (*Cluster_RoundRobinLbConfig) ProtoMessage() {} func (x *Cluster_RoundRobinLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[11] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2120,18 +2172,18 @@ type Cluster_LeastRequestLbConfig struct { // The following formula is used to calculate the dynamic weights when hosts have different load // balancing weights: // - // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` + // ``weight = load_balancing_weight / (active_requests + 1)^active_request_bias`` // // The larger the active request bias is, the more aggressively active requests will lower the // effective weight when all host weights are not equal. // - // `active_request_bias` must be greater than or equal to 0.0. + // ``active_request_bias`` must be greater than or equal to 0.0. // - // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number + // When ``active_request_bias == 0.0`` the Least Request Load Balancer doesn't consider the number // of active requests at the time it picks a host and behaves like the Round Robin Load // Balancer. // - // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing + // When ``active_request_bias > 0.0`` the Least Request Load Balancer scales the load balancing // weight by the number of active requests at the time it does a pick. // // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's @@ -2149,7 +2201,7 @@ type Cluster_LeastRequestLbConfig struct { func (x *Cluster_LeastRequestLbConfig) Reset() { *x = Cluster_LeastRequestLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[12] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2162,7 +2214,7 @@ func (x *Cluster_LeastRequestLbConfig) String() string { func (*Cluster_LeastRequestLbConfig) ProtoMessage() {} func (x *Cluster_LeastRequestLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[12] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2223,7 +2275,7 @@ type Cluster_RingHashLbConfig struct { func (x *Cluster_RingHashLbConfig) Reset() { *x = Cluster_RingHashLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[13] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2236,7 +2288,7 @@ func (x *Cluster_RingHashLbConfig) String() string { func (*Cluster_RingHashLbConfig) ProtoMessage() {} func (x *Cluster_RingHashLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[13] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2280,8 +2332,8 @@ type Cluster_MaglevLbConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. - // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same + // The table size for Maglev hashing. Maglev aims for "minimal disruption" rather than an absolute guarantee. + // Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same // upstream as it was before. Increasing the table size reduces the amount of disruption. // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. TableSize *wrappers.UInt64Value `protobuf:"bytes,1,opt,name=table_size,json=tableSize,proto3" json:"table_size,omitempty"` @@ -2290,7 +2342,7 @@ type Cluster_MaglevLbConfig struct { func (x *Cluster_MaglevLbConfig) Reset() { *x = Cluster_MaglevLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[14] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2303,7 +2355,7 @@ func (x *Cluster_MaglevLbConfig) String() string { func (*Cluster_MaglevLbConfig) ProtoMessage() {} func (x *Cluster_MaglevLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[14] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2329,6 +2381,7 @@ func (x *Cluster_MaglevLbConfig) GetTableSize() *wrappers.UInt64Value { // Specific configuration for the // :ref:`Original Destination ` // load balancing policy. +// [#extension: envoy.clusters.original_dst] type Cluster_OriginalDstLbConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2350,12 +2403,15 @@ type Cluster_OriginalDstLbConfig struct { // The http header to override destination address if :ref:`use_http_header `. // is set to true. If the value is empty, :ref:`x-envoy-original-dst-host ` will be used. HttpHeaderName string `protobuf:"bytes,2,opt,name=http_header_name,json=httpHeaderName,proto3" json:"http_header_name,omitempty"` + // The port to override for the original dst address. This port + // will take precedence over filter state and header override ports + UpstreamPortOverride *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=upstream_port_override,json=upstreamPortOverride,proto3" json:"upstream_port_override,omitempty"` } func (x *Cluster_OriginalDstLbConfig) Reset() { *x = Cluster_OriginalDstLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[15] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2368,7 +2424,7 @@ func (x *Cluster_OriginalDstLbConfig) String() string { func (*Cluster_OriginalDstLbConfig) ProtoMessage() {} func (x *Cluster_OriginalDstLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[15] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2398,6 +2454,13 @@ func (x *Cluster_OriginalDstLbConfig) GetHttpHeaderName() string { return "" } +func (x *Cluster_OriginalDstLbConfig) GetUpstreamPortOverride() *wrappers.UInt32Value { + if x != nil { + return x.UpstreamPortOverride + } + return nil +} + // Common configuration for all load balancer implementations. // [#next-free-field: 9] type Cluster_CommonLbConfig struct { @@ -2435,7 +2498,7 @@ type Cluster_CommonLbConfig struct { // when computing load balancing weights until they have been health checked for the first time. // This will have no effect unless active health checking is also configured. IgnoreNewHostsUntilFirstHc bool `protobuf:"varint,5,opt,name=ignore_new_hosts_until_first_hc,json=ignoreNewHostsUntilFirstHc,proto3" json:"ignore_new_hosts_until_first_hc,omitempty"` - // If set to `true`, the cluster manager will drain all existing + // If set to ``true``, the cluster manager will drain all existing // connections to upstream hosts whenever hosts are added or removed from the cluster. CloseConnectionsOnHostSetChange bool `protobuf:"varint,6,opt,name=close_connections_on_host_set_change,json=closeConnectionsOnHostSetChange,proto3" json:"close_connections_on_host_set_change,omitempty"` // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) @@ -2452,7 +2515,7 @@ type Cluster_CommonLbConfig struct { func (x *Cluster_CommonLbConfig) Reset() { *x = Cluster_CommonLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[16] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2465,7 +2528,7 @@ func (x *Cluster_CommonLbConfig) String() string { func (*Cluster_CommonLbConfig) ProtoMessage() {} func (x *Cluster_CommonLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[16] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2581,7 +2644,7 @@ type Cluster_RefreshRate struct { func (x *Cluster_RefreshRate) Reset() { *x = Cluster_RefreshRate{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[17] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2594,7 +2657,7 @@ func (x *Cluster_RefreshRate) String() string { func (*Cluster_RefreshRate) ProtoMessage() {} func (x *Cluster_RefreshRate) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[17] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2654,10 +2717,10 @@ type Cluster_PreconnectPolicy struct { // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can // harm latency more than the preconnecting helps. PerUpstreamPreconnectRatio *wrappers.DoubleValue `protobuf:"bytes,1,opt,name=per_upstream_preconnect_ratio,json=perUpstreamPreconnectRatio,proto3" json:"per_upstream_preconnect_ratio,omitempty"` - // Indicates how many many streams (rounded up) can be anticipated across a cluster for each + // Indicates how many streams (rounded up) can be anticipated across a cluster for each // stream, useful for low QPS services. This is currently supported for a subset of // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike *per_upstream_preconnect_ratio* this preconnects across the upstream instances in a + // Unlike ``per_upstream_preconnect_ratio`` this preconnects across the upstream instances in a // cluster, doing best effort predictions of what upstream would be picked next and // pre-establishing a connection. // @@ -2682,7 +2745,7 @@ type Cluster_PreconnectPolicy struct { func (x *Cluster_PreconnectPolicy) Reset() { *x = Cluster_PreconnectPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[18] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2695,7 +2758,7 @@ func (x *Cluster_PreconnectPolicy) String() string { func (*Cluster_PreconnectPolicy) ProtoMessage() {} func (x *Cluster_PreconnectPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[18] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2736,12 +2799,9 @@ type Cluster_LbSubsetConfig_LbSubsetSelector struct { // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for // choosing a host, but updating hosts is faster, especially for large numbers of hosts. // - // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy. + // If a match is found to a host, that host will be used regardless of priority levels. // - // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains - // only one entry. - // - // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys` + // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in ``keys`` // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are // present in the current configuration. @@ -2757,14 +2817,14 @@ type Cluster_LbSubsetConfig_LbSubsetSelector struct { // For any other fallback policy the parameter is not used and should not be set. // Only values also present in // :ref:`keys` are allowed, but - // `fallback_keys_subset` cannot be equal to `keys`. + // ``fallback_keys_subset`` cannot be equal to ``keys``. FallbackKeysSubset []string `protobuf:"bytes,3,rep,name=fallback_keys_subset,json=fallbackKeysSubset,proto3" json:"fallback_keys_subset,omitempty"` } func (x *Cluster_LbSubsetConfig_LbSubsetSelector) Reset() { *x = Cluster_LbSubsetConfig_LbSubsetSelector{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[20] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2777,7 +2837,7 @@ func (x *Cluster_LbSubsetConfig_LbSubsetSelector) String() string { func (*Cluster_LbSubsetConfig_LbSubsetSelector) ProtoMessage() {} func (x *Cluster_LbSubsetConfig_LbSubsetSelector) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[20] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2849,7 +2909,7 @@ type Cluster_CommonLbConfig_ZoneAwareLbConfig struct { func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) Reset() { *x = Cluster_CommonLbConfig_ZoneAwareLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[21] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2862,7 +2922,7 @@ func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) String() string { func (*Cluster_CommonLbConfig_ZoneAwareLbConfig) ProtoMessage() {} func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[21] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2910,7 +2970,7 @@ type Cluster_CommonLbConfig_LocalityWeightedLbConfig struct { func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) Reset() { *x = Cluster_CommonLbConfig_LocalityWeightedLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[22] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2923,7 +2983,7 @@ func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) String() string { func (*Cluster_CommonLbConfig_LocalityWeightedLbConfig) ProtoMessage() {} func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[22] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2945,7 +3005,7 @@ type Cluster_CommonLbConfig_ConsistentHashingLbConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // If set to `true`, the cluster will use hostname instead of the resolved + // If set to ``true``, the cluster will use hostname instead of the resolved // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. UseHostnameForHashing bool `protobuf:"varint,1,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 @@ -2956,7 +3016,7 @@ type Cluster_CommonLbConfig_ConsistentHashingLbConfig struct { // Applies to both Ring Hash and Maglev load balancers. // // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified - // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests + // ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the @@ -2964,7 +3024,7 @@ type Cluster_CommonLbConfig_ConsistentHashingLbConfig struct { // // If weights are specified on the hosts, they are respected. // - // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts + // This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts // being probed, so use a higher value if you require better performance. HashBalanceFactor *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` } @@ -2972,7 +3032,7 @@ type Cluster_CommonLbConfig_ConsistentHashingLbConfig struct { func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) Reset() { *x = Cluster_CommonLbConfig_ConsistentHashingLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[23] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2985,7 +3045,7 @@ func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) String() string { func (*Cluster_CommonLbConfig_ConsistentHashingLbConfig) ProtoMessage() {} func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[23] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3020,13 +3080,14 @@ type LoadBalancingPolicy_Policy struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // [#extension-category: envoy.load_balancing_policies] TypedExtensionConfig *v32.TypedExtensionConfig `protobuf:"bytes,4,opt,name=typed_extension_config,json=typedExtensionConfig,proto3" json:"typed_extension_config,omitempty"` } func (x *LoadBalancingPolicy_Policy) Reset() { *x = LoadBalancingPolicy_Policy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[24] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3039,7 +3100,7 @@ func (x *LoadBalancingPolicy_Policy) String() string { func (*LoadBalancingPolicy_Policy) ProtoMessage() {} func (x *LoadBalancingPolicy_Policy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[24] + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3125,7 +3186,7 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, - 0x8c, 0x50, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x18, 0x74, + 0xcf, 0x52, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x18, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x2b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, @@ -3449,7 +3510,7 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc0, 0x08, 0x0a, 0x0e, 0x4c, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xa4, 0x0a, 0x0a, 0x0e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, @@ -3480,355 +3541,367 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x41, 0x73, - 0x41, 0x6e, 0x79, 0x1a, 0xda, 0x03, 0x0a, 0x10, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, - 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x0a, 0x16, - 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, - 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x69, - 0x6e, 0x67, 0x6c, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x12, 0x92, 0x01, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5f, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, - 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, - 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x62, - 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4b, 0x65, - 0x79, 0x73, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x22, 0x79, 0x0a, 0x1e, 0x4c, 0x62, 0x53, 0x75, - 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, - 0x54, 0x5f, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, - 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, - 0x41, 0x4e, 0x59, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x12, - 0x0a, 0x0e, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, - 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, - 0x54, 0x10, 0x04, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x22, 0x4f, 0x0a, 0x16, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, - 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, - 0x4e, 0x59, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, - 0x0e, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, - 0x02, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, - 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe3, 0x01, - 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, - 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, - 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x1a, 0x72, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, - 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, - 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc5, 0x02, 0x0a, 0x14, 0x4c, 0x65, 0x61, 0x73, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, - 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, - 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, - 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, - 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, - 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x91, 0x03, 0x0a, 0x10, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, - 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, - 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x68, 0x61, - 0x73, 0x68, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, - 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, - 0x2e, 0x0a, 0x0c, 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x0b, 0x0a, 0x07, 0x58, 0x58, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, - 0x4d, 0x55, 0x52, 0x4d, 0x55, 0x52, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x01, 0x3a, - 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, - 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, - 0x02, 0x10, 0x03, 0x1a, 0x59, 0x0a, 0x0e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0xcb, - 0x96, 0xb1, 0x02, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x98, - 0x01, 0x0a, 0x13, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, - 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0d, 0x75, 0x73, 0x65, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x28, - 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, - 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, - 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd5, 0x0b, 0x0a, 0x0e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x17, - 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x41, 0x6e, 0x79, 0x12, 0x92, 0x01, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x16, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, + 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xda, 0x03, 0x0a, 0x10, 0x4c, 0x62, 0x53, + 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, + 0x73, 0x12, 0x33, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x13, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x65, 0x72, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x92, 0x01, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0e, 0x66, 0x61, 0x6c, + 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x66, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x73, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x66, 0x61, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x22, 0x79, 0x0a, + 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, + 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, + 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x4b, 0x45, 0x59, 0x53, 0x5f, + 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x04, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, + 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x4f, 0x0a, 0x16, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, + 0x42, 0x53, 0x45, 0x54, 0x10, 0x02, 0x22, 0x4d, 0x0a, 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, + 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x41, + 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, + 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x4c, + 0x49, 0x53, 0x54, 0x10, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0xe3, 0x01, 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, + 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x44, 0x0a, 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x50, 0x61, - 0x6e, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x74, 0x0a, 0x14, - 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, - 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, - 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x72, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x6e, 0x64, + 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, + 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc5, 0x02, 0x0a, 0x14, + 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, + 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, + 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, + 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, + 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0x91, 0x03, 0x0a, 0x10, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, + 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6d, + 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, + 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, + 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, + 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, + 0x69, 0x7a, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x58, 0x58, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, + 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, 0x4d, 0x55, 0x52, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, + 0x32, 0x10, 0x01, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0x59, 0x0a, 0x0e, 0x4d, 0x61, 0x67, 0x6c, 0x65, + 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, + 0x32, 0x05, 0x18, 0xcb, 0x96, 0xb1, 0x02, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x1a, 0xf7, 0x01, 0x0a, 0x13, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, + 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, + 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x5d, 0x0a, 0x16, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, + 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, + 0x6f, 0x72, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, + 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd5, 0x0b, 0x0a, + 0x0e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x4e, 0x0a, 0x17, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, + 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x79, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, + 0x74, 0x0a, 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x6c, 0x62, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, + 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x48, 0x00, 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x49, 0x0a, 0x13, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, + 0x65, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x1f, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, + 0x5f, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x68, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4e, 0x65, 0x77, + 0x48, 0x6f, 0x73, 0x74, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x46, 0x69, 0x72, 0x73, 0x74, 0x48, + 0x63, 0x12, 0x4d, 0x0a, 0x24, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, + 0x65, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x1f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x4f, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x8a, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, + 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, + 0x65, 0x74, 0x52, 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x48, 0x6f, 0x73, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x02, 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, + 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, + 0x10, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, + 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, + 0x63, 0x4f, 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, + 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5f, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, - 0x0a, 0x13, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x77, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x65, - 0x72, 0x67, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x1f, 0x69, 0x67, 0x6e, - 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x75, 0x6e, - 0x74, 0x69, 0x6c, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x68, 0x63, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4e, 0x65, 0x77, 0x48, 0x6f, 0x73, - 0x74, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x46, 0x69, 0x72, 0x73, 0x74, 0x48, 0x63, 0x12, 0x4d, - 0x0a, 0x24, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6c, - 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x6e, - 0x48, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x8a, 0x01, - 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, - 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x14, 0x6f, 0x76, - 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, 0x52, - 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x02, 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, - 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, 0x69, - 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, - 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, 0x6e, - 0x50, 0x61, 0x6e, 0x69, 0x63, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, + 0x69, 0x67, 0x3a, 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf1, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, + 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, + 0x0a, 0x13, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, + 0x28, 0x64, 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x44, 0x9a, 0xc5, 0x88, 0x1e, 0x3f, 0x0a, 0x3d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0x5f, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, - 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf1, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, 0x68, - 0x61, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x52, - 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, - 0x6f, 0x72, 0x3a, 0x44, 0x9a, 0xc5, 0x88, 0x1e, 0x3f, 0x0a, 0x3d, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, - 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, - 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, - 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x1a, 0xd2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, - 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x2a, 0x04, 0x10, - 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x12, 0x4a, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, 0x3d, - 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x27, 0x9a, - 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x02, 0x0a, 0x10, 0x50, 0x72, 0x65, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x78, 0x0a, 0x1d, 0x70, - 0x65, 0x72, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x65, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, - 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x55, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x75, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, - 0x3f, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x72, 0x65, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x66, 0x0a, 0x22, - 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, - 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, - 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x44, 0x4e, 0x53, 0x10, - 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, 0x53, - 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x4f, - 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, 0x54, 0x10, 0x04, 0x22, 0xa4, 0x01, - 0x0a, 0x08, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, - 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, - 0x45, 0x41, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, 0x0a, - 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x47, - 0x4c, 0x45, 0x56, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, - 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x06, 0x12, 0x20, 0x0a, 0x1c, 0x4c, - 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x4f, - 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x07, 0x22, 0x04, 0x08, - 0x04, 0x10, 0x04, 0x2a, 0x0f, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, - 0x54, 0x5f, 0x4c, 0x42, 0x22, 0x50, 0x0a, 0x0f, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, 0x10, - 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x34, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x0b, - 0x0a, 0x07, 0x56, 0x36, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x56, - 0x34, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x03, 0x12, 0x07, 0x0a, - 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x04, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, - 0x55, 0x52, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x00, 0x12, - 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, 0x41, - 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x01, 0x3a, 0x1b, 0x9a, 0xc5, - 0x88, 0x1e, 0x16, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x07, - 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x23, 0x10, 0x24, 0x52, 0x05, - 0x68, 0x6f, 0x73, 0x74, 0x73, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x52, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xda, - 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, - 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, - 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x82, 0x01, 0x0a, 0x12, - 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x44, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, - 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x22, 0xf9, 0x01, 0x0a, 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, - 0x0a, 0x0d, 0x74, 0x63, 0x70, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x63, 0x70, - 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x0c, 0x74, 0x63, 0x70, 0x4b, 0x65, - 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x64, 0x0a, 0x30, 0x73, 0x65, 0x74, 0x5f, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x2a, 0x73, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2d, 0x9a, - 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x72, 0x0a, 0x11, - 0x54, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, - 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, - 0x42, 0x89, 0x01, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, + 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x1a, 0xd2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x52, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, + 0x2a, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x4a, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, + 0xc0, 0x84, 0x3d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x02, 0x0a, 0x10, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x78, + 0x0a, 0x1d, 0x70, 0x65, 0x72, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, + 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x1a, 0x70, 0x65, + 0x72, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x75, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, + 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, + 0x66, 0x0a, 0x22, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, + 0x49, 0x43, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x44, + 0x4e, 0x53, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, + 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x03, 0x12, 0x10, + 0x0a, 0x0c, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, 0x54, 0x10, 0x04, + 0x22, 0xa4, 0x01, 0x0a, 0x08, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, + 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x11, + 0x0a, 0x0d, 0x4c, 0x45, 0x41, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, + 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x02, + 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, + 0x4d, 0x41, 0x47, 0x4c, 0x45, 0x56, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x55, 0x53, + 0x54, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x06, 0x12, 0x20, + 0x0a, 0x1c, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x49, 0x4e, 0x47, + 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x07, + 0x22, 0x04, 0x08, 0x04, 0x10, 0x04, 0x2a, 0x0f, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, + 0x5f, 0x44, 0x53, 0x54, 0x5f, 0x4c, 0x42, 0x22, 0x50, 0x0a, 0x0f, 0x44, 0x6e, 0x73, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, + 0x54, 0x4f, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x34, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x36, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x10, + 0x0a, 0x0c, 0x56, 0x34, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x03, + 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x04, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, + 0x46, 0x49, 0x47, 0x55, 0x52, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, + 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, + 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x01, 0x3a, + 0x1b, 0x9a, 0xc5, 0x88, 0x1e, 0x16, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, + 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x23, 0x10, + 0x24, 0x52, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xda, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x06, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xf9, + 0x01, 0x0a, 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x0d, + 0x74, 0x63, 0x70, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, + 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x0c, 0x74, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x64, 0x0a, 0x30, 0x73, 0x65, 0x74, 0x5f, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x2a, 0x73, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x72, 0x0a, 0x11, 0x54, 0x72, + 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x42, 0x89, + 0x01, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -3843,20 +3916,20 @@ func file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP() []byte { return file_envoy_config_cluster_v3_cluster_proto_rawDescData } -var file_envoy_config_cluster_v3_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_envoy_config_cluster_v3_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 25) +var file_envoy_config_cluster_v3_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 8) +var file_envoy_config_cluster_v3_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 24) var file_envoy_config_cluster_v3_cluster_proto_goTypes = []interface{}{ (Cluster_DiscoveryType)(0), // 0: envoy.config.cluster.v3.Cluster.DiscoveryType (Cluster_LbPolicy)(0), // 1: envoy.config.cluster.v3.Cluster.LbPolicy (Cluster_DnsLookupFamily)(0), // 2: envoy.config.cluster.v3.Cluster.DnsLookupFamily (Cluster_ClusterProtocolSelection)(0), // 3: envoy.config.cluster.v3.Cluster.ClusterProtocolSelection (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy)(0), // 4: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy - (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy)(0), // 5: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy - (Cluster_RingHashLbConfig_HashFunction)(0), // 6: envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction - (*ClusterCollection)(nil), // 7: envoy.config.cluster.v3.ClusterCollection - (*Cluster)(nil), // 8: envoy.config.cluster.v3.Cluster - (*LoadBalancingPolicy)(nil), // 9: envoy.config.cluster.v3.LoadBalancingPolicy - (*UpstreamBindConfig)(nil), // 10: envoy.config.cluster.v3.UpstreamBindConfig + (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy)(0), // 5: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy + (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy)(0), // 6: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy + (Cluster_RingHashLbConfig_HashFunction)(0), // 7: envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction + (*ClusterCollection)(nil), // 8: envoy.config.cluster.v3.ClusterCollection + (*Cluster)(nil), // 9: envoy.config.cluster.v3.Cluster + (*LoadBalancingPolicy)(nil), // 10: envoy.config.cluster.v3.LoadBalancingPolicy (*UpstreamConnectionOptions)(nil), // 11: envoy.config.cluster.v3.UpstreamConnectionOptions (*TrackClusterStats)(nil), // 12: envoy.config.cluster.v3.TrackClusterStats (*Cluster_TransportSocketMatch)(nil), // 13: envoy.config.cluster.v3.Cluster.TransportSocketMatch @@ -3900,7 +3973,7 @@ var file_envoy_config_cluster_v3_cluster_proto_goTypes = []interface{}{ (*v32.ConfigSource)(nil), // 51: envoy.config.core.v3.ConfigSource (*v32.TcpKeepalive)(nil), // 52: envoy.config.core.v3.TcpKeepalive (*_struct.Struct)(nil), // 53: google.protobuf.Struct - (*any.Any)(nil), // 54: google.protobuf.Any + (*any1.Any)(nil), // 54: google.protobuf.Any (*v32.RuntimeDouble)(nil), // 55: envoy.config.core.v3.RuntimeDouble (*v33.Percent)(nil), // 56: envoy.type.v3.Percent (*wrappers.UInt64Value)(nil), // 57: google.protobuf.UInt64Value @@ -3947,21 +4020,21 @@ var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ 3, // 36: envoy.config.cluster.v3.Cluster.protocol_selection:type_name -> envoy.config.cluster.v3.Cluster.ClusterProtocolSelection 11, // 37: envoy.config.cluster.v3.Cluster.upstream_connection_options:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions 50, // 38: envoy.config.cluster.v3.Cluster.filters:type_name -> envoy.config.cluster.v3.Filter - 9, // 39: envoy.config.cluster.v3.Cluster.load_balancing_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy + 10, // 39: envoy.config.cluster.v3.Cluster.load_balancing_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy 51, // 40: envoy.config.cluster.v3.Cluster.lrs_server:type_name -> envoy.config.core.v3.ConfigSource 44, // 41: envoy.config.cluster.v3.Cluster.upstream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig 12, // 42: envoy.config.cluster.v3.Cluster.track_cluster_stats:type_name -> envoy.config.cluster.v3.TrackClusterStats 25, // 43: envoy.config.cluster.v3.Cluster.preconnect_policy:type_name -> envoy.config.cluster.v3.Cluster.PreconnectPolicy 31, // 44: envoy.config.cluster.v3.LoadBalancingPolicy.policies:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy.Policy - 42, // 45: envoy.config.cluster.v3.UpstreamBindConfig.source_address:type_name -> envoy.config.core.v3.Address - 52, // 46: envoy.config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive:type_name -> envoy.config.core.v3.TcpKeepalive - 53, // 47: envoy.config.cluster.v3.Cluster.TransportSocketMatch.match:type_name -> google.protobuf.Struct - 48, // 48: envoy.config.cluster.v3.Cluster.TransportSocketMatch.transport_socket:type_name -> envoy.config.core.v3.TransportSocket - 54, // 49: envoy.config.cluster.v3.Cluster.CustomClusterType.typed_config:type_name -> google.protobuf.Any - 51, // 50: envoy.config.cluster.v3.Cluster.EdsClusterConfig.eds_config:type_name -> envoy.config.core.v3.ConfigSource - 4, // 51: envoy.config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy - 53, // 52: envoy.config.cluster.v3.Cluster.LbSubsetConfig.default_subset:type_name -> google.protobuf.Struct - 27, // 53: envoy.config.cluster.v3.Cluster.LbSubsetConfig.subset_selectors:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + 52, // 45: envoy.config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive:type_name -> envoy.config.core.v3.TcpKeepalive + 53, // 46: envoy.config.cluster.v3.Cluster.TransportSocketMatch.match:type_name -> google.protobuf.Struct + 48, // 47: envoy.config.cluster.v3.Cluster.TransportSocketMatch.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 54, // 48: envoy.config.cluster.v3.Cluster.CustomClusterType.typed_config:type_name -> google.protobuf.Any + 51, // 49: envoy.config.cluster.v3.Cluster.EdsClusterConfig.eds_config:type_name -> envoy.config.core.v3.ConfigSource + 4, // 50: envoy.config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy + 53, // 51: envoy.config.cluster.v3.Cluster.LbSubsetConfig.default_subset:type_name -> google.protobuf.Struct + 27, // 52: envoy.config.cluster.v3.Cluster.LbSubsetConfig.subset_selectors:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + 5, // 53: envoy.config.cluster.v3.Cluster.LbSubsetConfig.metadata_fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy 33, // 54: envoy.config.cluster.v3.Cluster.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration 55, // 55: envoy.config.cluster.v3.Cluster.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble 56, // 56: envoy.config.cluster.v3.Cluster.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent @@ -3970,30 +4043,31 @@ var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ 55, // 59: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble 17, // 60: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig 57, // 61: envoy.config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size:type_name -> google.protobuf.UInt64Value - 6, // 62: envoy.config.cluster.v3.Cluster.RingHashLbConfig.hash_function:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction + 7, // 62: envoy.config.cluster.v3.Cluster.RingHashLbConfig.hash_function:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction 57, // 63: envoy.config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size:type_name -> google.protobuf.UInt64Value 57, // 64: envoy.config.cluster.v3.Cluster.MaglevLbConfig.table_size:type_name -> google.protobuf.UInt64Value - 56, // 65: envoy.config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold:type_name -> envoy.type.v3.Percent - 28, // 66: envoy.config.cluster.v3.Cluster.CommonLbConfig.zone_aware_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig - 29, // 67: envoy.config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig - 33, // 68: envoy.config.cluster.v3.Cluster.CommonLbConfig.update_merge_window:type_name -> google.protobuf.Duration - 30, // 69: envoy.config.cluster.v3.Cluster.CommonLbConfig.consistent_hashing_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig - 58, // 70: envoy.config.cluster.v3.Cluster.CommonLbConfig.override_host_status:type_name -> envoy.config.core.v3.HealthStatusSet - 33, // 71: envoy.config.cluster.v3.Cluster.RefreshRate.base_interval:type_name -> google.protobuf.Duration - 33, // 72: envoy.config.cluster.v3.Cluster.RefreshRate.max_interval:type_name -> google.protobuf.Duration - 59, // 73: envoy.config.cluster.v3.Cluster.PreconnectPolicy.per_upstream_preconnect_ratio:type_name -> google.protobuf.DoubleValue - 59, // 74: envoy.config.cluster.v3.Cluster.PreconnectPolicy.predictive_preconnect_ratio:type_name -> google.protobuf.DoubleValue - 54, // 75: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry.value:type_name -> google.protobuf.Any - 5, // 76: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy - 56, // 77: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent - 57, // 78: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value - 34, // 79: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value - 44, // 80: envoy.config.cluster.v3.LoadBalancingPolicy.Policy.typed_extension_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 81, // [81:81] is the sub-list for method output_type - 81, // [81:81] is the sub-list for method input_type - 81, // [81:81] is the sub-list for extension type_name - 81, // [81:81] is the sub-list for extension extendee - 0, // [0:81] is the sub-list for field type_name + 34, // 65: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.upstream_port_override:type_name -> google.protobuf.UInt32Value + 56, // 66: envoy.config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold:type_name -> envoy.type.v3.Percent + 28, // 67: envoy.config.cluster.v3.Cluster.CommonLbConfig.zone_aware_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig + 29, // 68: envoy.config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig + 33, // 69: envoy.config.cluster.v3.Cluster.CommonLbConfig.update_merge_window:type_name -> google.protobuf.Duration + 30, // 70: envoy.config.cluster.v3.Cluster.CommonLbConfig.consistent_hashing_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig + 58, // 71: envoy.config.cluster.v3.Cluster.CommonLbConfig.override_host_status:type_name -> envoy.config.core.v3.HealthStatusSet + 33, // 72: envoy.config.cluster.v3.Cluster.RefreshRate.base_interval:type_name -> google.protobuf.Duration + 33, // 73: envoy.config.cluster.v3.Cluster.RefreshRate.max_interval:type_name -> google.protobuf.Duration + 59, // 74: envoy.config.cluster.v3.Cluster.PreconnectPolicy.per_upstream_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 59, // 75: envoy.config.cluster.v3.Cluster.PreconnectPolicy.predictive_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 54, // 76: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry.value:type_name -> google.protobuf.Any + 6, // 77: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy + 56, // 78: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent + 57, // 79: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value + 34, // 80: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 44, // 81: envoy.config.cluster.v3.LoadBalancingPolicy.Policy.typed_extension_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 82, // [82:82] is the sub-list for method output_type + 82, // [82:82] is the sub-list for method input_type + 82, // [82:82] is the sub-list for extension type_name + 82, // [82:82] is the sub-list for extension extendee + 0, // [0:82] is the sub-list for field type_name } func init() { file_envoy_config_cluster_v3_cluster_proto_init() } @@ -4042,18 +4116,6 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { } } file_envoy_config_cluster_v3_cluster_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpstreamBindConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpstreamConnectionOptions); i { case 0: return &v.state @@ -4065,7 +4127,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TrackClusterStats); i { case 0: return &v.state @@ -4077,7 +4139,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_TransportSocketMatch); i { case 0: return &v.state @@ -4089,7 +4151,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_CustomClusterType); i { case 0: return &v.state @@ -4101,7 +4163,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_EdsClusterConfig); i { case 0: return &v.state @@ -4113,7 +4175,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_LbSubsetConfig); i { case 0: return &v.state @@ -4125,7 +4187,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_SlowStartConfig); i { case 0: return &v.state @@ -4137,7 +4199,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_RoundRobinLbConfig); i { case 0: return &v.state @@ -4149,7 +4211,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_LeastRequestLbConfig); i { case 0: return &v.state @@ -4161,7 +4223,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_RingHashLbConfig); i { case 0: return &v.state @@ -4173,7 +4235,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_MaglevLbConfig); i { case 0: return &v.state @@ -4185,7 +4247,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_OriginalDstLbConfig); i { case 0: return &v.state @@ -4197,7 +4259,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_CommonLbConfig); i { case 0: return &v.state @@ -4209,7 +4271,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_RefreshRate); i { case 0: return &v.state @@ -4221,7 +4283,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_PreconnectPolicy); i { case 0: return &v.state @@ -4233,7 +4295,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_LbSubsetConfig_LbSubsetSelector); i { case 0: return &v.state @@ -4245,7 +4307,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_CommonLbConfig_ZoneAwareLbConfig); i { case 0: return &v.state @@ -4257,7 +4319,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_CommonLbConfig_LocalityWeightedLbConfig); i { case 0: return &v.state @@ -4269,7 +4331,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Cluster_CommonLbConfig_ConsistentHashingLbConfig); i { case 0: return &v.state @@ -4281,7 +4343,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { return nil } } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LoadBalancingPolicy_Policy); i { case 0: return &v.state @@ -4303,7 +4365,7 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { (*Cluster_LeastRequestLbConfig_)(nil), (*Cluster_RoundRobinLbConfig_)(nil), } - file_envoy_config_cluster_v3_cluster_proto_msgTypes[16].OneofWrappers = []interface{}{ + file_envoy_config_cluster_v3_cluster_proto_msgTypes[15].OneofWrappers = []interface{}{ (*Cluster_CommonLbConfig_ZoneAwareLbConfig_)(nil), (*Cluster_CommonLbConfig_LocalityWeightedLbConfig_)(nil), } @@ -4312,8 +4374,8 @@ func file_envoy_config_cluster_v3_cluster_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_cluster_v3_cluster_proto_rawDesc, - NumEnums: 7, - NumMessages: 25, + NumEnums: 8, + NumMessages: 24, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go index 2ad9dfb843..85d0367721 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go @@ -1233,9 +1233,18 @@ func (m *Cluster) validate(all bool) error { // no validation rules for ConnectionPoolPerDownstreamConnection - switch m.ClusterDiscoveryType.(type) { - + switch v := m.ClusterDiscoveryType.(type) { case *Cluster_Type: + if v == nil { + err := ClusterValidationError{ + field: "ClusterDiscoveryType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if _, ok := Cluster_DiscoveryType_name[int32(m.GetType())]; !ok { err := ClusterValidationError{ @@ -1249,6 +1258,16 @@ func (m *Cluster) validate(all bool) error { } case *Cluster_ClusterType: + if v == nil { + err := ClusterValidationError{ + field: "ClusterDiscoveryType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetClusterType()).(type) { @@ -1279,11 +1298,21 @@ func (m *Cluster) validate(all bool) error { } } + default: + _ = v // ensures v is used } - - switch m.LbConfig.(type) { - + switch v := m.LbConfig.(type) { case *Cluster_RingHashLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetRingHashLbConfig()).(type) { @@ -1315,6 +1344,16 @@ func (m *Cluster) validate(all bool) error { } case *Cluster_MaglevLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetMaglevLbConfig()).(type) { @@ -1346,6 +1385,16 @@ func (m *Cluster) validate(all bool) error { } case *Cluster_OriginalDstLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetOriginalDstLbConfig()).(type) { @@ -1377,6 +1426,16 @@ func (m *Cluster) validate(all bool) error { } case *Cluster_LeastRequestLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetLeastRequestLbConfig()).(type) { @@ -1408,6 +1467,16 @@ func (m *Cluster) validate(all bool) error { } case *Cluster_RoundRobinLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetRoundRobinLbConfig()).(type) { @@ -1438,6 +1507,8 @@ func (m *Cluster) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -1653,137 +1724,6 @@ var _ interface { ErrorName() string } = LoadBalancingPolicyValidationError{} -// Validate checks the field values on UpstreamBindConfig with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *UpstreamBindConfig) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on UpstreamBindConfig with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// UpstreamBindConfigMultiError, or nil if none found. -func (m *UpstreamBindConfig) ValidateAll() error { - return m.validate(true) -} - -func (m *UpstreamBindConfig) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetSourceAddress()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, UpstreamBindConfigValidationError{ - field: "SourceAddress", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, UpstreamBindConfigValidationError{ - field: "SourceAddress", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSourceAddress()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return UpstreamBindConfigValidationError{ - field: "SourceAddress", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return UpstreamBindConfigMultiError(errors) - } - - return nil -} - -// UpstreamBindConfigMultiError is an error wrapping multiple validation errors -// returned by UpstreamBindConfig.ValidateAll() if the designated constraints -// aren't met. -type UpstreamBindConfigMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m UpstreamBindConfigMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m UpstreamBindConfigMultiError) AllErrors() []error { return m } - -// UpstreamBindConfigValidationError is the validation error returned by -// UpstreamBindConfig.Validate if the designated constraints aren't met. -type UpstreamBindConfigValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e UpstreamBindConfigValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e UpstreamBindConfigValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e UpstreamBindConfigValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e UpstreamBindConfigValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e UpstreamBindConfigValidationError) ErrorName() string { - return "UpstreamBindConfigValidationError" -} - -// Error satisfies the builtin error interface -func (e UpstreamBindConfigValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sUpstreamBindConfig.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = UpstreamBindConfigValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = UpstreamBindConfigValidationError{} - // Validate checks the field values on UpstreamConnectionOptions with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -2574,6 +2514,17 @@ func (m *Cluster_LbSubsetConfig) validate(all bool) error { // no validation rules for ListAsAny + if _, ok := Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_name[int32(m.GetMetadataFallbackPolicy())]; !ok { + err := Cluster_LbSubsetConfigValidationError{ + field: "MetadataFallbackPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return Cluster_LbSubsetConfigMultiError(errors) } @@ -3436,6 +3387,21 @@ func (m *Cluster_OriginalDstLbConfig) validate(all bool) error { // no validation rules for HttpHeaderName + if wrapper := m.GetUpstreamPortOverride(); wrapper != nil { + + if wrapper.GetValue() > 65535 { + err := Cluster_OriginalDstLbConfigValidationError{ + field: "UpstreamPortOverride", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return Cluster_OriginalDstLbConfigMultiError(errors) } @@ -3659,9 +3625,18 @@ func (m *Cluster_CommonLbConfig) validate(all bool) error { } } - switch m.LocalityConfigSpecifier.(type) { - + switch v := m.LocalityConfigSpecifier.(type) { case *Cluster_CommonLbConfig_ZoneAwareLbConfig_: + if v == nil { + err := Cluster_CommonLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetZoneAwareLbConfig()).(type) { @@ -3693,6 +3668,16 @@ func (m *Cluster_CommonLbConfig) validate(all bool) error { } case *Cluster_CommonLbConfig_LocalityWeightedLbConfig_: + if v == nil { + err := Cluster_CommonLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { @@ -3723,6 +3708,8 @@ func (m *Cluster_CommonLbConfig) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go index 8ec63a3c68..04718a1170 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/cluster/v3/filter.proto package clusterv3 @@ -9,7 +9,7 @@ package clusterv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -34,7 +34,7 @@ type Filter struct { // instantiated. See the supported filters for further documentation. // Note that Envoy's :ref:`downstream network // filters ` are not valid upstream filters. - TypedConfig *any.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *any1.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *Filter) Reset() { @@ -76,7 +76,7 @@ func (x *Filter) GetName() string { return "" } -func (x *Filter) GetTypedConfig() *any.Any { +func (x *Filter) GetTypedConfig() *any1.Any { if x != nil { return x.TypedConfig } @@ -131,8 +131,8 @@ func file_envoy_config_cluster_v3_filter_proto_rawDescGZIP() []byte { var file_envoy_config_cluster_v3_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_cluster_v3_filter_proto_goTypes = []interface{}{ - (*Filter)(nil), // 0: envoy.config.cluster.v3.Filter - (*any.Any)(nil), // 1: google.protobuf.Any + (*Filter)(nil), // 0: envoy.config.cluster.v3.Filter + (*any1.Any)(nil), // 1: google.protobuf.Any } var file_envoy_config_cluster_v3_filter_proto_depIdxs = []int32{ 1, // 0: envoy.config.cluster.v3.Filter.typed_config:type_name -> google.protobuf.Any diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go index e8aa21613b..32333d3d1a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/cluster/v3/outlier_detection.proto package clusterv3 @@ -32,9 +32,9 @@ type OutlierDetection struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The number of consecutive 5xx responses or local origin errors that are mapped - // to 5xx error codes before a consecutive 5xx ejection - // occurs. Defaults to 5. + // The number of consecutive server-side error responses (for HTTP traffic, + // 5xx responses; for TCP traffic, connection failures; for Redis, failure to + // respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5. Consecutive_5Xx *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=consecutive_5xx,json=consecutive5xx,proto3" json:"consecutive_5xx,omitempty"` // The time interval between ejection analysis sweeps. This can result in // both new ejections as well as hosts being returned to service. Defaults diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go index daac929407..2e656bb70b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/common/matcher/v3/matcher.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go index 953a15afef..a60724bbca 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go @@ -85,9 +85,20 @@ func (m *Matcher) validate(all bool) error { } } - switch m.MatcherType.(type) { - + oneofMatcherTypePresent := false + switch v := m.MatcherType.(type) { case *Matcher_MatcherList_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherTypePresent = true if all { switch v := interface{}(m.GetMatcherList()).(type) { @@ -119,6 +130,17 @@ func (m *Matcher) validate(all bool) error { } case *Matcher_MatcherTree_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherTypePresent = true if all { switch v := interface{}(m.GetMatcherTree()).(type) { @@ -150,6 +172,9 @@ func (m *Matcher) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofMatcherTypePresent { err := MatcherValidationError{ field: "MatcherType", reason: "value is required", @@ -158,7 +183,6 @@ func (m *Matcher) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -260,9 +284,20 @@ func (m *MatchPredicate) validate(all bool) error { var errors []error - switch m.Rule.(type) { - + oneofRulePresent := false + switch v := m.Rule.(type) { case *MatchPredicate_OrMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetOrMatch()).(type) { @@ -294,6 +329,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_AndMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetAndMatch()).(type) { @@ -325,6 +371,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_NotMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetNotMatch()).(type) { @@ -356,6 +413,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_AnyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if m.GetAnyMatch() != true { err := MatchPredicateValidationError{ @@ -369,6 +437,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpRequestHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpRequestHeadersMatch()).(type) { @@ -400,6 +479,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpRequestTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpRequestTrailersMatch()).(type) { @@ -431,6 +521,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpResponseHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpResponseHeadersMatch()).(type) { @@ -462,6 +563,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpResponseTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpResponseTrailersMatch()).(type) { @@ -493,6 +605,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpRequestGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpRequestGenericBodyMatch()).(type) { @@ -524,6 +647,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpResponseGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpResponseGenericBodyMatch()).(type) { @@ -555,6 +689,9 @@ func (m *MatchPredicate) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofRulePresent { err := MatchPredicateValidationError{ field: "Rule", reason: "value is required", @@ -563,7 +700,6 @@ func (m *MatchPredicate) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -949,9 +1085,20 @@ func (m *Matcher_OnMatch) validate(all bool) error { var errors []error - switch m.OnMatch.(type) { - + oneofOnMatchPresent := false + switch v := m.OnMatch.(type) { case *Matcher_OnMatch_Matcher: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true if all { switch v := interface{}(m.GetMatcher()).(type) { @@ -983,6 +1130,17 @@ func (m *Matcher_OnMatch) validate(all bool) error { } case *Matcher_OnMatch_Action: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true if all { switch v := interface{}(m.GetAction()).(type) { @@ -1014,6 +1172,9 @@ func (m *Matcher_OnMatch) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofOnMatchPresent { err := Matcher_OnMatchValidationError{ field: "OnMatch", reason: "value is required", @@ -1022,7 +1183,6 @@ func (m *Matcher_OnMatch) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1312,9 +1472,20 @@ func (m *Matcher_MatcherTree) validate(all bool) error { } } - switch m.TreeType.(type) { - + oneofTreeTypePresent := false + switch v := m.TreeType.(type) { case *Matcher_MatcherTree_ExactMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true if all { switch v := interface{}(m.GetExactMatchMap()).(type) { @@ -1346,6 +1517,17 @@ func (m *Matcher_MatcherTree) validate(all bool) error { } case *Matcher_MatcherTree_PrefixMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true if all { switch v := interface{}(m.GetPrefixMatchMap()).(type) { @@ -1377,6 +1559,17 @@ func (m *Matcher_MatcherTree) validate(all bool) error { } case *Matcher_MatcherTree_CustomMatch: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true if all { switch v := interface{}(m.GetCustomMatch()).(type) { @@ -1408,6 +1601,9 @@ func (m *Matcher_MatcherTree) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofTreeTypePresent { err := Matcher_MatcherTreeValidationError{ field: "TreeType", reason: "value is required", @@ -1416,7 +1612,6 @@ func (m *Matcher_MatcherTree) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1521,9 +1716,20 @@ func (m *Matcher_MatcherList_Predicate) validate(all bool) error { var errors []error - switch m.MatchType.(type) { - + oneofMatchTypePresent := false + switch v := m.MatchType.(type) { case *Matcher_MatcherList_Predicate_SinglePredicate_: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true if all { switch v := interface{}(m.GetSinglePredicate()).(type) { @@ -1555,6 +1761,17 @@ func (m *Matcher_MatcherList_Predicate) validate(all bool) error { } case *Matcher_MatcherList_Predicate_OrMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true if all { switch v := interface{}(m.GetOrMatcher()).(type) { @@ -1586,6 +1803,17 @@ func (m *Matcher_MatcherList_Predicate) validate(all bool) error { } case *Matcher_MatcherList_Predicate_AndMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true if all { switch v := interface{}(m.GetAndMatcher()).(type) { @@ -1617,6 +1845,17 @@ func (m *Matcher_MatcherList_Predicate) validate(all bool) error { } case *Matcher_MatcherList_Predicate_NotMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true if all { switch v := interface{}(m.GetNotMatcher()).(type) { @@ -1648,6 +1887,9 @@ func (m *Matcher_MatcherList_Predicate) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofMatchTypePresent { err := Matcher_MatcherList_PredicateValidationError{ field: "MatchType", reason: "value is required", @@ -1656,7 +1898,6 @@ func (m *Matcher_MatcherList_Predicate) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1989,9 +2230,20 @@ func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error } } - switch m.Matcher.(type) { - + oneofMatcherPresent := false + switch v := m.Matcher.(type) { case *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true if all { switch v := interface{}(m.GetValueMatch()).(type) { @@ -2023,6 +2275,17 @@ func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error } case *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true if all { switch v := interface{}(m.GetCustomMatch()).(type) { @@ -2054,6 +2317,9 @@ func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error } default: + _ = v // ensures v is used + } + if !oneofMatcherPresent { err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ field: "Matcher", reason: "value is required", @@ -2062,7 +2328,6 @@ func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -2631,9 +2896,20 @@ func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { var errors []error - switch m.Rule.(type) { - + oneofRulePresent := false + switch v := m.Rule.(type) { case *HttpGenericBodyMatch_GenericTextMatch_StringMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if utf8.RuneCountInString(m.GetStringMatch()) < 1 { err := HttpGenericBodyMatch_GenericTextMatchValidationError{ @@ -2647,6 +2923,17 @@ func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { } case *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if len(m.GetBinaryMatch()) < 1 { err := HttpGenericBodyMatch_GenericTextMatchValidationError{ @@ -2660,6 +2947,9 @@ func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofRulePresent { err := HttpGenericBodyMatch_GenericTextMatchValidationError{ field: "Rule", reason: "value is required", @@ -2668,7 +2958,6 @@ func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go index 6fa1ca8ece..7f61c14b8a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go @@ -1,13 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/address.proto package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -130,8 +131,7 @@ func (x *Pipe) GetMode() uint32 { } // The address represents an envoy internal listener. -// [#comment: TODO(lambdai): Make this address available for listener and endpoint. -// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.] +// [#comment: TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.] type EnvoyInternalAddress struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -140,6 +140,10 @@ type EnvoyInternalAddress struct { // Types that are assignable to AddressNameSpecifier: // *EnvoyInternalAddress_ServerListenerName AddressNameSpecifier isEnvoyInternalAddress_AddressNameSpecifier `protobuf_oneof:"address_name_specifier"` + // Specifies an endpoint identifier to distinguish between multiple endpoints for the same internal listener in a + // single upstream pool. Only used in the upstream addresses for tracking changes to individual endpoints. This, for + // example, may be set to the final destination IP for the target internal listener. + EndpointId string `protobuf:"bytes,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"` } func (x *EnvoyInternalAddress) Reset() { @@ -188,12 +192,20 @@ func (x *EnvoyInternalAddress) GetServerListenerName() string { return "" } +func (x *EnvoyInternalAddress) GetEndpointId() string { + if x != nil { + return x.EndpointId + } + return "" +} + type isEnvoyInternalAddress_AddressNameSpecifier interface { isEnvoyInternalAddress_AddressNameSpecifier() } type EnvoyInternalAddress_ServerListenerName struct { - // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. + // Specifies the :ref:`name ` of the + // internal listener. ServerListenerName string `protobuf:"bytes,1,opt,name=server_listener_name,json=serverListenerName,proto3,oneof"` } @@ -214,8 +226,8 @@ type SocketAddress struct { // within an upstream :ref:`BindConfig `, the address // controls the source address of outbound connections. For :ref:`clusters // `, the cluster type determines whether the - // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized + // address must be an IP (``STATIC`` or ``EDS`` clusters) or a hostname resolved by DNS + // (``STRICT_DNS`` or ``LOGICAL_DNS`` clusters). Address resolution can be customized // via :ref:`resolver_name `. Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // Types that are assignable to PortSpecifier: @@ -226,7 +238,7 @@ type SocketAddress struct { // this is empty, a context dependent default applies. If the address is a concrete // IP address, no resolution will occur. If address is a hostname this // should be set for resolution other than DNS. Specifying a custom resolver with - // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. + // ``STRICT_DNS`` or ``LOGICAL_DNS`` will generate an error at runtime. ResolverName string `protobuf:"bytes,5,opt,name=resolver_name,json=resolverName,proto3" json:"resolver_name,omitempty"` // When binding to an IPv6 address above, this enables `IPv4 compatibility // `_. Binding to ``::`` will @@ -406,6 +418,70 @@ func (x *TcpKeepalive) GetKeepaliveInterval() *wrappers.UInt32Value { return nil } +type ExtraSourceAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The additional address to bind. + Address *SocketAddress `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. If specified, this will override the + // :ref:`socket_options ` + // in the BindConfig. If specified with no + // :ref:`socket_options ` + // or an empty list of :ref:`socket_options `, + // it means no socket option will apply. + SocketOptions *SocketOptionsOverride `protobuf:"bytes,2,opt,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` +} + +func (x *ExtraSourceAddress) Reset() { + *x = ExtraSourceAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtraSourceAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtraSourceAddress) ProtoMessage() {} + +func (x *ExtraSourceAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtraSourceAddress.ProtoReflect.Descriptor instead. +func (*ExtraSourceAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtraSourceAddress) GetAddress() *SocketAddress { + if x != nil { + return x.Address + } + return nil +} + +func (x *ExtraSourceAddress) GetSocketOptions() *SocketOptionsOverride { + if x != nil { + return x.SocketOptions + } + return nil +} + +// [#next-free-field: 6] type BindConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -413,23 +489,37 @@ type BindConfig struct { // The address to bind to when creating a socket. SourceAddress *SocketAddress `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"` - // Whether to set the *IP_FREEBIND* option when creating the socket. When this + // Whether to set the ``IP_FREEBIND`` option when creating the socket. When this // flag is set to true, allows the :ref:`source_address - // ` to be an IP address + // ` to be an IP address // that is not configured on the system running Envoy. When this flag is set - // to false, the option *IP_FREEBIND* is disabled on the socket. When this + // to false, the option ``IP_FREEBIND`` is disabled on the socket. When this // flag is not set (default), the socket is not modified, i.e. the option is // neither enabled nor disabled. Freebind *wrappers.BoolValue `protobuf:"bytes,2,opt,name=freebind,proto3" json:"freebind,omitempty"` // Additional socket options that may not be present in Envoy source code or // precompiled binaries. SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` + // Extra source addresses appended to the address specified in the `source_address` + // field. This enables to specify multiple source addresses. Currently, only one extra + // address can be supported, and the extra address should have a different IP version + // with the address in the `source_address` field. The address which has the same IP + // version with the target host's address IP version will be used as bind address. If more + // than one extra address specified, only the first address matched IP version will be + // returned. If there is no same IP version address found, the address in the `source_address` + // will be returned. + ExtraSourceAddresses []*ExtraSourceAddress `protobuf:"bytes,5,rep,name=extra_source_addresses,json=extraSourceAddresses,proto3" json:"extra_source_addresses,omitempty"` + // Deprecated by + // :ref:`extra_source_addresses ` + // + // Deprecated: Do not use. + AdditionalSourceAddresses []*SocketAddress `protobuf:"bytes,4,rep,name=additional_source_addresses,json=additionalSourceAddresses,proto3" json:"additional_source_addresses,omitempty"` } func (x *BindConfig) Reset() { *x = BindConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_address_proto_msgTypes[4] + mi := &file_envoy_config_core_v3_address_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -442,7 +532,7 @@ func (x *BindConfig) String() string { func (*BindConfig) ProtoMessage() {} func (x *BindConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_address_proto_msgTypes[4] + mi := &file_envoy_config_core_v3_address_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -455,7 +545,7 @@ func (x *BindConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use BindConfig.ProtoReflect.Descriptor instead. func (*BindConfig) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{4} + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{5} } func (x *BindConfig) GetSourceAddress() *SocketAddress { @@ -479,6 +569,21 @@ func (x *BindConfig) GetSocketOptions() []*SocketOption { return nil } +func (x *BindConfig) GetExtraSourceAddresses() []*ExtraSourceAddress { + if x != nil { + return x.ExtraSourceAddresses + } + return nil +} + +// Deprecated: Do not use. +func (x *BindConfig) GetAdditionalSourceAddresses() []*SocketAddress { + if x != nil { + return x.AdditionalSourceAddresses + } + return nil +} + // Addresses specify either a logical or physical address and port, which are // used to tell Envoy where to bind/listen, connect to upstream and find // management servers. @@ -497,7 +602,7 @@ type Address struct { func (x *Address) Reset() { *x = Address{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_address_proto_msgTypes[5] + mi := &file_envoy_config_core_v3_address_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -510,7 +615,7 @@ func (x *Address) String() string { func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_address_proto_msgTypes[5] + mi := &file_envoy_config_core_v3_address_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -523,7 +628,7 @@ func (x *Address) ProtoReflect() protoreflect.Message { // Deprecated: Use Address.ProtoReflect.Descriptor instead. func (*Address) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{5} + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{6} } func (m *Address) GetAddress() isAddress_Address { @@ -567,7 +672,8 @@ type Address_Pipe struct { } type Address_EnvoyInternalAddress struct { - // [#not-implemented-hide:] + // Specifies a user-space address handled by :ref:`internal listeners + // `. EnvoyInternalAddress *EnvoyInternalAddress `protobuf:"bytes,3,opt,name=envoy_internal_address,json=envoyInternalAddress,proto3,oneof"` } @@ -593,7 +699,7 @@ type CidrRange struct { func (x *CidrRange) Reset() { *x = CidrRange{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_address_proto_msgTypes[6] + mi := &file_envoy_config_core_v3_address_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -606,7 +712,7 @@ func (x *CidrRange) String() string { func (*CidrRange) ProtoMessage() {} func (x *CidrRange) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_address_proto_msgTypes[6] + mi := &file_envoy_config_core_v3_address_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -619,7 +725,7 @@ func (x *CidrRange) ProtoReflect() protoreflect.Message { // Deprecated: Use CidrRange.ProtoReflect.Descriptor instead. func (*CidrRange) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{6} + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{7} } func (x *CidrRange) GetAddressPrefix() string { @@ -647,120 +753,148 @@ var file_envoy_config_core_v3_address_proto_rawDesc = []byte{ 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x60, 0x0a, 0x04, 0x50, 0x69, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0xff, 0x03, 0x52, 0x04, 0x6d, 0x6f, - 0x64, 0x65, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x69, 0x70, - 0x65, 0x22, 0x69, 0x0a, 0x14, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x1d, 0x0a, - 0x16, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf6, 0x02, 0x0a, - 0x0d, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, - 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x6f, - 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x6c, - 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x70, 0x76, 0x34, 0x5f, - 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x70, - 0x76, 0x34, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x22, 0x1c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x55, 0x44, 0x50, 0x10, 0x01, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x15, - 0x0a, 0x0e, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x90, 0x02, 0x0a, 0x0c, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, - 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, - 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, - 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x12, - 0x43, 0x0a, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x04, 0x50, 0x69, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x04, 0x6d, 0x6f, 0x64, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0xff, + 0x03, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x45, 0x6e, 0x76, 0x6f, 0x79, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x32, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x49, 0x64, 0x42, 0x1d, 0x0a, 0x16, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, + 0xf8, 0x42, 0x01, 0x22, 0xf6, 0x02, 0x0a, 0x0d, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x0a, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, 0x09, 0x70, + 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, + 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x70, 0x76, 0x34, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x22, + 0x1c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x54, + 0x43, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x01, 0x3a, 0x26, 0x9a, + 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x90, 0x02, 0x0a, + 0x0c, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x47, 0x0a, + 0x10, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, + 0x50, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, + 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x6b, 0x65, + 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6b, + 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, - 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x63, 0x70, 0x4b, - 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x0a, 0x42, 0x69, 0x6e, - 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0d, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x36, 0x0a, - 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x66, 0x72, 0x65, - 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9f, 0x02, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, - 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x30, 0x0a, 0x04, 0x70, 0x69, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x70, - 0x65, 0x12, 0x62, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, - 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0e, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xa6, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, - 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, - 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x3a, 0x22, 0x9a, 0xc5, - 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x42, 0x80, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, - 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, - 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, - 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, + 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x22, + 0xb1, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x74, 0x72, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x47, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x52, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0xd2, 0x03, 0x0a, 0x0a, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x4a, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, + 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x36, + 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x66, 0x72, + 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5e, 0x0a, 0x16, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x14, 0x65, 0x78, 0x74, + 0x72, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x12, 0x70, 0x0a, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0b, 0x18, 0x01, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, + 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9f, 0x02, 0x0a, 0x07, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x30, 0x0a, 0x04, 0x70, 0x69, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, + 0x70, 0x69, 0x70, 0x65, 0x12, 0x62, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x76, 0x6f, + 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x48, 0x00, 0x52, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, + 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0e, 0x0a, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xa6, 0x01, 0x0a, 0x09, 0x43, + 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, + 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x3a, + 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x42, 0x80, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -776,37 +910,43 @@ func file_envoy_config_core_v3_address_proto_rawDescGZIP() []byte { } var file_envoy_config_core_v3_address_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_config_core_v3_address_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_config_core_v3_address_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_envoy_config_core_v3_address_proto_goTypes = []interface{}{ - (SocketAddress_Protocol)(0), // 0: envoy.config.core.v3.SocketAddress.Protocol - (*Pipe)(nil), // 1: envoy.config.core.v3.Pipe - (*EnvoyInternalAddress)(nil), // 2: envoy.config.core.v3.EnvoyInternalAddress - (*SocketAddress)(nil), // 3: envoy.config.core.v3.SocketAddress - (*TcpKeepalive)(nil), // 4: envoy.config.core.v3.TcpKeepalive - (*BindConfig)(nil), // 5: envoy.config.core.v3.BindConfig - (*Address)(nil), // 6: envoy.config.core.v3.Address - (*CidrRange)(nil), // 7: envoy.config.core.v3.CidrRange - (*wrappers.UInt32Value)(nil), // 8: google.protobuf.UInt32Value - (*wrappers.BoolValue)(nil), // 9: google.protobuf.BoolValue - (*SocketOption)(nil), // 10: envoy.config.core.v3.SocketOption + (SocketAddress_Protocol)(0), // 0: envoy.config.core.v3.SocketAddress.Protocol + (*Pipe)(nil), // 1: envoy.config.core.v3.Pipe + (*EnvoyInternalAddress)(nil), // 2: envoy.config.core.v3.EnvoyInternalAddress + (*SocketAddress)(nil), // 3: envoy.config.core.v3.SocketAddress + (*TcpKeepalive)(nil), // 4: envoy.config.core.v3.TcpKeepalive + (*ExtraSourceAddress)(nil), // 5: envoy.config.core.v3.ExtraSourceAddress + (*BindConfig)(nil), // 6: envoy.config.core.v3.BindConfig + (*Address)(nil), // 7: envoy.config.core.v3.Address + (*CidrRange)(nil), // 8: envoy.config.core.v3.CidrRange + (*wrappers.UInt32Value)(nil), // 9: google.protobuf.UInt32Value + (*SocketOptionsOverride)(nil), // 10: envoy.config.core.v3.SocketOptionsOverride + (*wrappers.BoolValue)(nil), // 11: google.protobuf.BoolValue + (*SocketOption)(nil), // 12: envoy.config.core.v3.SocketOption } var file_envoy_config_core_v3_address_proto_depIdxs = []int32{ 0, // 0: envoy.config.core.v3.SocketAddress.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol - 8, // 1: envoy.config.core.v3.TcpKeepalive.keepalive_probes:type_name -> google.protobuf.UInt32Value - 8, // 2: envoy.config.core.v3.TcpKeepalive.keepalive_time:type_name -> google.protobuf.UInt32Value - 8, // 3: envoy.config.core.v3.TcpKeepalive.keepalive_interval:type_name -> google.protobuf.UInt32Value - 3, // 4: envoy.config.core.v3.BindConfig.source_address:type_name -> envoy.config.core.v3.SocketAddress - 9, // 5: envoy.config.core.v3.BindConfig.freebind:type_name -> google.protobuf.BoolValue - 10, // 6: envoy.config.core.v3.BindConfig.socket_options:type_name -> envoy.config.core.v3.SocketOption - 3, // 7: envoy.config.core.v3.Address.socket_address:type_name -> envoy.config.core.v3.SocketAddress - 1, // 8: envoy.config.core.v3.Address.pipe:type_name -> envoy.config.core.v3.Pipe - 2, // 9: envoy.config.core.v3.Address.envoy_internal_address:type_name -> envoy.config.core.v3.EnvoyInternalAddress - 8, // 10: envoy.config.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value - 11, // [11:11] is the sub-list for method output_type - 11, // [11:11] is the sub-list for method input_type - 11, // [11:11] is the sub-list for extension type_name - 11, // [11:11] is the sub-list for extension extendee - 0, // [0:11] is the sub-list for field type_name + 9, // 1: envoy.config.core.v3.TcpKeepalive.keepalive_probes:type_name -> google.protobuf.UInt32Value + 9, // 2: envoy.config.core.v3.TcpKeepalive.keepalive_time:type_name -> google.protobuf.UInt32Value + 9, // 3: envoy.config.core.v3.TcpKeepalive.keepalive_interval:type_name -> google.protobuf.UInt32Value + 3, // 4: envoy.config.core.v3.ExtraSourceAddress.address:type_name -> envoy.config.core.v3.SocketAddress + 10, // 5: envoy.config.core.v3.ExtraSourceAddress.socket_options:type_name -> envoy.config.core.v3.SocketOptionsOverride + 3, // 6: envoy.config.core.v3.BindConfig.source_address:type_name -> envoy.config.core.v3.SocketAddress + 11, // 7: envoy.config.core.v3.BindConfig.freebind:type_name -> google.protobuf.BoolValue + 12, // 8: envoy.config.core.v3.BindConfig.socket_options:type_name -> envoy.config.core.v3.SocketOption + 5, // 9: envoy.config.core.v3.BindConfig.extra_source_addresses:type_name -> envoy.config.core.v3.ExtraSourceAddress + 3, // 10: envoy.config.core.v3.BindConfig.additional_source_addresses:type_name -> envoy.config.core.v3.SocketAddress + 3, // 11: envoy.config.core.v3.Address.socket_address:type_name -> envoy.config.core.v3.SocketAddress + 1, // 12: envoy.config.core.v3.Address.pipe:type_name -> envoy.config.core.v3.Pipe + 2, // 13: envoy.config.core.v3.Address.envoy_internal_address:type_name -> envoy.config.core.v3.EnvoyInternalAddress + 9, // 14: envoy.config.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_address_proto_init() } @@ -865,7 +1005,7 @@ func file_envoy_config_core_v3_address_proto_init() { } } file_envoy_config_core_v3_address_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BindConfig); i { + switch v := v.(*ExtraSourceAddress); i { case 0: return &v.state case 1: @@ -877,7 +1017,7 @@ func file_envoy_config_core_v3_address_proto_init() { } } file_envoy_config_core_v3_address_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Address); i { + switch v := v.(*BindConfig); i { case 0: return &v.state case 1: @@ -889,6 +1029,18 @@ func file_envoy_config_core_v3_address_proto_init() { } } file_envoy_config_core_v3_address_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CidrRange); i { case 0: return &v.state @@ -908,7 +1060,7 @@ func file_envoy_config_core_v3_address_proto_init() { (*SocketAddress_PortValue)(nil), (*SocketAddress_NamedPort)(nil), } - file_envoy_config_core_v3_address_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_envoy_config_core_v3_address_proto_msgTypes[6].OneofWrappers = []interface{}{ (*Address_SocketAddress)(nil), (*Address_Pipe)(nil), (*Address_EnvoyInternalAddress)(nil), @@ -919,7 +1071,7 @@ func file_envoy_config_core_v3_address_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_core_v3_address_proto_rawDesc, NumEnums: 1, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go index 862ed83270..25610ab041 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go @@ -177,12 +177,27 @@ func (m *EnvoyInternalAddress) validate(all bool) error { var errors []error - switch m.AddressNameSpecifier.(type) { + // no validation rules for EndpointId + oneofAddressNameSpecifierPresent := false + switch v := m.AddressNameSpecifier.(type) { case *EnvoyInternalAddress_ServerListenerName: + if v == nil { + err := EnvoyInternalAddressValidationError{ + field: "AddressNameSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressNameSpecifierPresent = true // no validation rules for ServerListenerName - default: + _ = v // ensures v is used + } + if !oneofAddressNameSpecifierPresent { err := EnvoyInternalAddressValidationError{ field: "AddressNameSpecifier", reason: "value is required", @@ -191,7 +206,6 @@ func (m *EnvoyInternalAddress) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -322,9 +336,20 @@ func (m *SocketAddress) validate(all bool) error { // no validation rules for Ipv4Compat - switch m.PortSpecifier.(type) { - + oneofPortSpecifierPresent := false + switch v := m.PortSpecifier.(type) { case *SocketAddress_PortValue: + if v == nil { + err := SocketAddressValidationError{ + field: "PortSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPortSpecifierPresent = true if m.GetPortValue() > 65535 { err := SocketAddressValidationError{ @@ -338,9 +363,22 @@ func (m *SocketAddress) validate(all bool) error { } case *SocketAddress_NamedPort: + if v == nil { + err := SocketAddressValidationError{ + field: "PortSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPortSpecifierPresent = true // no validation rules for NamedPort - default: + _ = v // ensures v is used + } + if !oneofPortSpecifierPresent { err := SocketAddressValidationError{ field: "PortSpecifier", reason: "value is required", @@ -349,7 +387,6 @@ func (m *SocketAddress) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -616,6 +653,177 @@ var _ interface { ErrorName() string } = TcpKeepaliveValidationError{} +// Validate checks the field values on ExtraSourceAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ExtraSourceAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExtraSourceAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExtraSourceAddressMultiError, or nil if none found. +func (m *ExtraSourceAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *ExtraSourceAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetAddress() == nil { + err := ExtraSourceAddressValidationError{ + field: "Address", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSocketOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSocketOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ExtraSourceAddressMultiError(errors) + } + + return nil +} + +// ExtraSourceAddressMultiError is an error wrapping multiple validation errors +// returned by ExtraSourceAddress.ValidateAll() if the designated constraints +// aren't met. +type ExtraSourceAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtraSourceAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtraSourceAddressMultiError) AllErrors() []error { return m } + +// ExtraSourceAddressValidationError is the validation error returned by +// ExtraSourceAddress.Validate if the designated constraints aren't met. +type ExtraSourceAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtraSourceAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtraSourceAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtraSourceAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtraSourceAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtraSourceAddressValidationError) ErrorName() string { + return "ExtraSourceAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e ExtraSourceAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtraSourceAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtraSourceAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtraSourceAddressValidationError{} + // Validate checks the field values on BindConfig with the rules defined in the // proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. @@ -638,17 +846,6 @@ func (m *BindConfig) validate(all bool) error { var errors []error - if m.GetSourceAddress() == nil { - err := BindConfigValidationError{ - field: "SourceAddress", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - if all { switch v := interface{}(m.GetSourceAddress()).(type) { case interface{ ValidateAll() error }: @@ -741,6 +938,74 @@ func (m *BindConfig) validate(all bool) error { } + for idx, item := range m.GetExtraSourceAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetAdditionalSourceAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + if len(errors) > 0 { return BindConfigMultiError(errors) } @@ -839,9 +1104,20 @@ func (m *Address) validate(all bool) error { var errors []error - switch m.Address.(type) { - + oneofAddressPresent := false + switch v := m.Address.(type) { case *Address_SocketAddress: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true if all { switch v := interface{}(m.GetSocketAddress()).(type) { @@ -873,6 +1149,17 @@ func (m *Address) validate(all bool) error { } case *Address_Pipe: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true if all { switch v := interface{}(m.GetPipe()).(type) { @@ -904,6 +1191,17 @@ func (m *Address) validate(all bool) error { } case *Address_EnvoyInternalAddress: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true if all { switch v := interface{}(m.GetEnvoyInternalAddress()).(type) { @@ -935,6 +1233,9 @@ func (m *Address) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofAddressPresent { err := AddressValidationError{ field: "Address", reason: "value is required", @@ -943,7 +1244,6 @@ func (m *Address) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go index d749f29dee..63066573a3 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/backoff.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go index 7f9b3ab9f3..d9ec4f4f84 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/base.proto package corev3 @@ -12,7 +12,7 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" _struct "github.com/golang/protobuf/ptypes/struct" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -205,7 +205,7 @@ func (TrafficDirection) EnumDescriptor() ([]byte, []int) { return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2} } -// [#not-implemented-hide:] Describes the supported actions types for header append action. +// Describes the supported actions types for header append action. type HeaderValueOption_HeaderAppendAction int32 const ( @@ -557,14 +557,14 @@ type Node struct { Extensions []*Extension `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty"` // Client feature support list. These are well known features described // in the Envoy API repository for a given major version of an API. Client features - // use reverse DNS naming scheme, for example `com.acme.feature`. + // use reverse DNS naming scheme, for example ``com.acme.feature``. // See :ref:`the list of features ` that xDS client may // support. ClientFeatures []string `protobuf:"bytes,10,rep,name=client_features,json=clientFeatures,proto3" json:"client_features,omitempty"` // Known listening ports on the node as a generic hint to the management server // for filtering :ref:`listeners ` to be returned. For example, // if there is a listener bound to port 80, the list can optionally contain the - // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. + // SocketAddress ``(0.0.0.0,80)``. The field is optional and just a hint. // // Deprecated: Do not use. ListeningAddresses []*Address `protobuf:"bytes,11,rep,name=listening_addresses,json=listeningAddresses,proto3" json:"listening_addresses,omitempty"` @@ -733,20 +733,20 @@ type Metadata struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* + // Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` // namespace is reserved for Envoy's built-in filters. - // If both *filter_metadata* and + // If both ``filter_metadata`` and // :ref:`typed_filter_metadata ` // fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. + // only ``typed_filter_metadata`` field will be parsed. FilterMetadata map[string]*_struct.Struct `protobuf:"bytes,1,rep,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* + // Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` // namespace is reserved for Envoy's built-in filters. // The value is encoded as google.protobuf.Any. // If both :ref:`filter_metadata ` - // and *typed_filter_metadata* fields are present in the metadata with same keys, - // only *typed_filter_metadata* field will be parsed. - TypedFilterMetadata map[string]*any.Any `protobuf:"bytes,2,rep,name=typed_filter_metadata,json=typedFilterMetadata,proto3" json:"typed_filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // and ``typed_filter_metadata`` fields are present in the metadata with same keys, + // only ``typed_filter_metadata`` field will be parsed. + TypedFilterMetadata map[string]*any1.Any `protobuf:"bytes,2,rep,name=typed_filter_metadata,json=typedFilterMetadata,proto3" json:"typed_filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Metadata) Reset() { @@ -788,7 +788,7 @@ func (x *Metadata) GetFilterMetadata() map[string]*_struct.Struct { return nil } -func (x *Metadata) GetTypedFilterMetadata() map[string]*any.Any { +func (x *Metadata) GetTypedFilterMetadata() map[string]*any1.Any { if x != nil { return x.TypedFilterMetadata } @@ -1099,7 +1099,7 @@ type HeaderValue struct { // // The same :ref:`format specifier ` as used for // :ref:`HTTP access logging ` applies here, however - // unknown header values are replaced with the empty string instead of `-`. + // unknown header values are replaced with the empty string instead of ``-``. Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } @@ -1159,9 +1159,20 @@ type HeaderValueOption struct { Header *HeaderValue `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` // Should the value be appended? If true (default), the value is appended to // existing values. Otherwise it replaces any existing values. + // This field is deprecated and please use + // :ref:`append_action ` as replacement. + // + // .. note:: + // The :ref:`external authorization service ` and + // :ref:`external processor service ` have + // default value (``false``) for this field. + // + // Deprecated: Do not use. Append *wrappers.BoolValue `protobuf:"bytes,2,opt,name=append,proto3" json:"append,omitempty"` - // [#not-implemented-hide:] Describes the action taken to append/overwrite the given value for an existing header - // or to only add this header if it's absent. Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD`. + // Describes the action taken to append/overwrite the given value for an existing header + // or to only add this header if it's absent. + // Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD + // `. AppendAction HeaderValueOption_HeaderAppendAction `protobuf:"varint,3,opt,name=append_action,json=appendAction,proto3,enum=envoy.config.core.v3.HeaderValueOption_HeaderAppendAction" json:"append_action,omitempty"` // Is the header value allowed to be empty? If false (default), custom headers with empty values are dropped, // otherwise they are added. @@ -1207,6 +1218,7 @@ func (x *HeaderValueOption) GetHeader() *HeaderValue { return nil } +// Deprecated: Do not use. func (x *HeaderValueOption) GetAppend() *wrappers.BoolValue { if x != nil { return x.Append @@ -1716,7 +1728,7 @@ func (m *TransportSocket) GetConfigType() isTransportSocket_ConfigType { return nil } -func (x *TransportSocket) GetTypedConfig() *any.Any { +func (x *TransportSocket) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*TransportSocket_TypedConfig); ok { return x.TypedConfig } @@ -1728,7 +1740,7 @@ type isTransportSocket_ConfigType interface { } type TransportSocket_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*TransportSocket_TypedConfig) isTransportSocket_ConfigType() {} @@ -1742,7 +1754,7 @@ func (*TransportSocket_TypedConfig) isTransportSocket_ConfigType() {} // :ref:`FractionalPercent ` proto represented as JSON/YAML // and may also be represented as an integer with the assumption that the value is an integral // percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse -// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. +// as a ``FractionalPercent`` whose numerator is 42 and denominator is HUNDRED. type RuntimeFractionalPercent struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2048,153 +2060,153 @@ var file_envoy_config_core_v3_base_proto_rawDesc = []byte{ 0x01, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0xb3, 0x03, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, + 0xc0, 0x03, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x06, 0x61, 0x70, + 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x69, - 0x0a, 0x0d, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x61, 0x70, 0x70, - 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x65, - 0x70, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x64, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, - 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, - 0x45, 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, - 0x5f, 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, - 0x5f, 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, - 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, - 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x02, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, - 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, - 0x61, 0x70, 0x12, 0x3b, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, - 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x4d, 0x61, 0x70, 0x22, 0x2f, 0x0a, 0x10, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x22, 0xf4, 0x01, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, - 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x69, 0x6e, - 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, - 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3c, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, - 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, - 0x13, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, - 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xd4, 0x01, 0x0a, 0x0b, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4b, 0x0a, 0x0e, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x6f, - 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x69, 0x0a, 0x0d, 0x61, + 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x64, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, + 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, + 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, + 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, + 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, + 0x5f, 0x41, 0x44, 0x44, 0x10, 0x02, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x6c, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x12, + 0x3b, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x22, 0x9a, 0xc5, + 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, + 0x22, 0x2f, 0x0a, 0x10, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x22, 0xf4, 0x01, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x25, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, + 0x0b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, + 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x12, 0x3c, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x13, 0x65, 0x6e, + 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xd4, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, + 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, + 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, + 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0xe8, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x07, 0x68, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, + 0x35, 0x36, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, + 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, + 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, + 0x00, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, + 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xb0, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, 0x98, 0xfe, - 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, - 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x24, 0x9a, 0xc5, - 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0xe8, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, - 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x07, 0x68, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x06, 0x73, - 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x12, 0x44, 0x0a, 0x0c, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xc9, 0x01, - 0x0a, 0x0f, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x38, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x06, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3a, 0x28, 0x9a, - 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xb0, 0x01, 0x0a, 0x0f, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, - 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x42, - 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, - 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbf, 0x01, 0x0a, - 0x18, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x31, 0x9a, 0xc5, 0x88, - 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0x55, - 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x12, 0x1e, - 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, 0x25, - 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x2a, 0x28, 0x0a, 0x0f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, - 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, 0x48, 0x10, 0x01, 0x2a, - 0x89, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, - 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x44, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, - 0x50, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x55, 0x54, 0x10, 0x04, 0x12, - 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x43, - 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x54, 0x49, - 0x4f, 0x4e, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, - 0x12, 0x09, 0x0a, 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, 0x10, 0x09, 0x2a, 0x3e, 0x0a, 0x10, 0x54, - 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, - 0x08, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x7d, 0x0a, 0x22, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x42, 0x09, 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, - 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x0d, 0x0a, 0x0b, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, + 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, + 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x0c, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, 0x25, 0x9a, 0xc5, 0x88, + 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, + 0x6e, 0x65, 0x2a, 0x28, 0x0a, 0x0f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, 0x48, 0x10, 0x01, 0x2a, 0x89, 0x01, 0x0a, + 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, + 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, + 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x44, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, + 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, + 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, + 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x12, 0x09, 0x0a, + 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, 0x10, 0x09, 0x2a, 0x3e, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x55, + 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x7d, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x09, + 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2248,7 +2260,7 @@ var file_envoy_config_core_v3_base_proto_goTypes = []interface{}{ (*BackoffStrategy)(nil), // 33: envoy.config.core.v3.BackoffStrategy (*wrappers.UInt32Value)(nil), // 34: google.protobuf.UInt32Value (*HttpUri)(nil), // 35: envoy.config.core.v3.HttpUri - (*any.Any)(nil), // 36: google.protobuf.Any + (*any1.Any)(nil), // 36: google.protobuf.Any (*v3.FractionalPercent)(nil), // 37: envoy.type.v3.FractionalPercent (*v31.ContextParams)(nil), // 38: xds.core.v3.ContextParams } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go index 47d09142d6..696a394630 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go @@ -632,12 +632,30 @@ func (m *Node) validate(all bool) error { } - switch m.UserAgentVersionType.(type) { - + switch v := m.UserAgentVersionType.(type) { case *Node_UserAgentVersion: + if v == nil { + err := NodeValidationError{ + field: "UserAgentVersionType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for UserAgentVersion - case *Node_UserAgentBuildVersion: + if v == nil { + err := NodeValidationError{ + field: "UserAgentVersionType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetUserAgentBuildVersion()).(type) { @@ -668,6 +686,8 @@ func (m *Node) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -2178,9 +2198,20 @@ func (m *DataSource) validate(all bool) error { var errors []error - switch m.Specifier.(type) { - + oneofSpecifierPresent := false + switch v := m.Specifier.(type) { case *DataSource_Filename: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true if utf8.RuneCountInString(m.GetFilename()) < 1 { err := DataSourceValidationError{ @@ -2194,12 +2225,43 @@ func (m *DataSource) validate(all bool) error { } case *DataSource_InlineBytes: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true // no validation rules for InlineBytes - case *DataSource_InlineString: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true // no validation rules for InlineString - case *DataSource_EnvironmentVariable: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true if utf8.RuneCountInString(m.GetEnvironmentVariable()) < 1 { err := DataSourceValidationError{ @@ -2213,6 +2275,9 @@ func (m *DataSource) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofSpecifierPresent { err := DataSourceValidationError{ field: "Specifier", reason: "value is required", @@ -2221,7 +2286,6 @@ func (m *DataSource) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -2660,9 +2724,20 @@ func (m *AsyncDataSource) validate(all bool) error { var errors []error - switch m.Specifier.(type) { - + oneofSpecifierPresent := false + switch v := m.Specifier.(type) { case *AsyncDataSource_Local: + if v == nil { + err := AsyncDataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true if all { switch v := interface{}(m.GetLocal()).(type) { @@ -2694,6 +2769,17 @@ func (m *AsyncDataSource) validate(all bool) error { } case *AsyncDataSource_Remote: + if v == nil { + err := AsyncDataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true if all { switch v := interface{}(m.GetRemote()).(type) { @@ -2725,6 +2811,9 @@ func (m *AsyncDataSource) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofSpecifierPresent { err := AsyncDataSourceValidationError{ field: "Specifier", reason: "value is required", @@ -2733,7 +2822,6 @@ func (m *AsyncDataSource) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -2847,9 +2935,18 @@ func (m *TransportSocket) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *TransportSocket_TypedConfig: + if v == nil { + err := TransportSocketValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -2880,6 +2977,8 @@ func (m *TransportSocket) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go index 41c7d793c6..f0584ffed2 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/config_source.proto package corev3 @@ -11,7 +11,7 @@ import ( v3 "github.com/cncf/xds/go/xds/core/v3" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -200,7 +200,7 @@ type ApiConfigSource struct { // A list of config validators that will be executed when a new update is // received from the ApiConfigSource. Note that each validator handles a // specific xDS service type, and only the validators corresponding to the - // type url (in `:ref: DiscoveryResponse` or `:ref: DeltaDiscoveryResponse`) + // type url (in ``:ref: DiscoveryResponse`` or ``:ref: DeltaDiscoveryResponse``) // will be invoked. // If the validator returns false or throws an exception, the config will be rejected by // the client, and a NACK will be sent. @@ -473,23 +473,23 @@ type PathConfigSource struct { // // .. note:: // - // If `watched_directory` is *not* configured, Envoy will watch the file path for *moves.* + // If ``watched_directory`` is *not* configured, Envoy will watch the file path for *moves*. // This is because in general only moves are atomic. The same method of swapping files as is // demonstrated in the :ref:`runtime documentation ` can be - // used here also. If `watched_directory` is configured, no watch will be placed directly on - // this path. Instead, the configured `watched_directory` will be used to trigger reloads of + // used here also. If ``watched_directory`` is configured, no watch will be placed directly on + // this path. Instead, the configured ``watched_directory`` will be used to trigger reloads of // this path. This is required in certain deployment scenarios. See below for more information. Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - // If configured, this directory will be watched for *moves.* When an entry in this directory is - // moved to, the `path` will be reloaded. This is required in certain deployment scenarios. + // If configured, this directory will be watched for *moves*. When an entry in this directory is + // moved to, the ``path`` will be reloaded. This is required in certain deployment scenarios. // // Specifically, if trying to load an xDS resource using a // `Kubernetes ConfigMap `_, the // following configuration might be used: // 1. Store xds.yaml inside a ConfigMap. - // 2. Mount the ConfigMap to `/config_map/xds` - // 3. Configure path `/config_map/xds/xds.yaml` - // 4. Configure watched directory `/config_map/xds` + // 2. Mount the ConfigMap to ``/config_map/xds`` + // 3. Configure path ``/config_map/xds/xds.yaml`` + // 4. Configure watched directory ``/config_map/xds`` // // The above configuration will ensure that Envoy watches the owning directory for moves which is // required due to how Kubernetes manages ConfigMap symbolic links during atomic updates. @@ -555,7 +555,7 @@ type ConfigSource struct { unknownFields protoimpl.UnknownFields // Authorities that this config source may be used for. An authority specified in a xdstp:// URL - // is resolved to a *ConfigSource* prior to configuration fetch. This field provides the + // is resolved to a ``ConfigSource`` prior to configuration fetch. This field provides the // association between authority name and configuration source. // [#not-implemented-hide:] Authorities []*v3.Authority `protobuf:"bytes,7,rep,name=authorities,proto3" json:"authorities,omitempty"` @@ -681,7 +681,7 @@ type isConfigSource_ConfigSourceSpecifier interface { } type ConfigSource_Path struct { - // Deprecated in favor of `path_config_source`. Use that field instead. + // Deprecated in favor of ``path_config_source``. Use that field instead. // // Deprecated: Do not use. Path string `protobuf:"bytes,1,opt,name=path,proto3,oneof"` @@ -747,8 +747,8 @@ type ExtensionConfigSource struct { ConfigSource *ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"` // Optional default configuration to use as the initial configuration if // there is a failure to receive the initial extension configuration or if - // `apply_default_config_without_warming` flag is set. - DefaultConfig *any.Any `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + // ``apply_default_config_without_warming`` flag is set. + DefaultConfig *any1.Any `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` // Use the default config as the initial configuration without warming and // waiting for the first discovery response. Requires the default configuration // to be supplied. @@ -797,7 +797,7 @@ func (x *ExtensionConfigSource) GetConfigSource() *ConfigSource { return nil } -func (x *ExtensionConfigSource) GetDefaultConfig() *any.Any { +func (x *ExtensionConfigSource) GetDefaultConfig() *any1.Any { if x != nil { return x.DefaultConfig } @@ -1044,7 +1044,7 @@ var file_envoy_config_core_v3_config_source_proto_goTypes = []interface{}{ (*wrappers.DoubleValue)(nil), // 13: google.protobuf.DoubleValue (*WatchedDirectory)(nil), // 14: envoy.config.core.v3.WatchedDirectory (*v3.Authority)(nil), // 15: xds.core.v3.Authority - (*any.Any)(nil), // 16: google.protobuf.Any + (*any1.Any)(nil), // 16: google.protobuf.Any } var file_envoy_config_core_v3_config_source_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.ApiConfigSource.api_type:type_name -> envoy.config.core.v3.ApiConfigSource.ApiType diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go index 33367e727a..edab151352 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go @@ -910,12 +910,33 @@ func (m *ConfigSource) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigSourceSpecifier.(type) { - + oneofConfigSourceSpecifierPresent := false + switch v := m.ConfigSourceSpecifier.(type) { case *ConfigSource_Path: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true // no validation rules for Path - case *ConfigSource_PathConfigSource: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true if all { switch v := interface{}(m.GetPathConfigSource()).(type) { @@ -947,6 +968,17 @@ func (m *ConfigSource) validate(all bool) error { } case *ConfigSource_ApiConfigSource: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true if all { switch v := interface{}(m.GetApiConfigSource()).(type) { @@ -978,6 +1010,17 @@ func (m *ConfigSource) validate(all bool) error { } case *ConfigSource_Ads: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true if all { switch v := interface{}(m.GetAds()).(type) { @@ -1009,6 +1052,17 @@ func (m *ConfigSource) validate(all bool) error { } case *ConfigSource_Self: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true if all { switch v := interface{}(m.GetSelf()).(type) { @@ -1040,6 +1094,9 @@ func (m *ConfigSource) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofConfigSourceSpecifierPresent { err := ConfigSourceValidationError{ field: "ConfigSourceSpecifier", reason: "value is required", @@ -1048,7 +1105,6 @@ func (m *ConfigSource) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go index 04420fec87..56dab5b987 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/event_service_config.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go index 971a6d6018..a55672c275 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go @@ -57,9 +57,20 @@ func (m *EventServiceConfig) validate(all bool) error { var errors []error - switch m.ConfigSourceSpecifier.(type) { - + oneofConfigSourceSpecifierPresent := false + switch v := m.ConfigSourceSpecifier.(type) { case *EventServiceConfig_GrpcService: + if v == nil { + err := EventServiceConfigValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true if all { switch v := interface{}(m.GetGrpcService()).(type) { @@ -91,6 +102,9 @@ func (m *EventServiceConfig) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofConfigSourceSpecifierPresent { err := EventServiceConfigValidationError{ field: "ConfigSourceSpecifier", reason: "value is required", @@ -99,7 +113,6 @@ func (m *EventServiceConfig) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go index 4b7e6ba0e5..ecf3e6b5cf 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/extension.proto package corev3 @@ -9,7 +9,7 @@ package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -34,12 +34,12 @@ type TypedExtensionConfig struct { // it serves the role of an opaque identifier. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The typed config for the extension. The type URL will be used to identify - // the extension. In the case that the type URL is *xds.type.v3.TypedStruct* - // (or, for historical reasons, *udpa.type.v1.TypedStruct*), the inner type - // URL of *TypedStruct* will be utilized. See the + // the extension. In the case that the type URL is ``xds.type.v3.TypedStruct`` + // (or, for historical reasons, ``udpa.type.v1.TypedStruct``), the inner type + // URL of ``TypedStruct`` will be utilized. See the // :ref:`extension configuration overview // ` for further details. - TypedConfig *any.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *any1.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *TypedExtensionConfig) Reset() { @@ -81,7 +81,7 @@ func (x *TypedExtensionConfig) GetName() string { return "" } -func (x *TypedExtensionConfig) GetTypedConfig() *any.Any { +func (x *TypedExtensionConfig) GetTypedConfig() *any1.Any { if x != nil { return x.TypedConfig } @@ -134,7 +134,7 @@ func file_envoy_config_core_v3_extension_proto_rawDescGZIP() []byte { var file_envoy_config_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_core_v3_extension_proto_goTypes = []interface{}{ (*TypedExtensionConfig)(nil), // 0: envoy.config.core.v3.TypedExtensionConfig - (*any.Any)(nil), // 1: google.protobuf.Any + (*any1.Any)(nil), // 1: google.protobuf.Any } var file_envoy_config_core_v3_extension_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go index 8cf0785a02..5e0e91178a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/grpc_method_list.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go index 2a2dd78c1e..1bab4b9e83 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/grpc_service.proto package corev3 @@ -9,7 +9,7 @@ package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" _struct "github.com/golang/protobuf/ptypes/struct" wrappers "github.com/golang/protobuf/ptypes/wrappers" @@ -148,9 +148,14 @@ type GrpcService_EnvoyGrpc struct { // in the :ref:`Cluster ` :ref:`transport_socket // `. ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` - // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. + // The ``:authority`` header in the grpc request. If this field is not set, the authority header value will be ``cluster_name``. // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + // Indicates the retry policy for re-establishing the gRPC stream + // This field is optional. If max interval is not provided, it will be set to ten times the provided base interval. + // Currently only supported for xDS gRPC streams. + // If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied. + RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` } func (x *GrpcService_EnvoyGrpc) Reset() { @@ -199,6 +204,13 @@ func (x *GrpcService_EnvoyGrpc) GetAuthority() string { return "" } +func (x *GrpcService_EnvoyGrpc) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + // [#next-free-field: 9] type GrpcService_GoogleGrpc struct { state protoimpl.MessageState @@ -924,7 +936,7 @@ func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) G return nil } -func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *any.Any { +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok { return x.TypedConfig } @@ -936,7 +948,7 @@ type isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_Conf } type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() { @@ -954,7 +966,7 @@ type GrpcService_GoogleGrpc_CallCredentials_StsService struct { // URI of the token exchange service that handles token exchange requests. // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - // https://github.com/envoyproxy/protoc-gen-validate/issues/303] + // https://github.com/bufbuild/protoc-gen-validate/issues/303] TokenExchangeServiceUri string `protobuf:"bytes,1,opt,name=token_exchange_service_uri,json=tokenExchangeServiceUri,proto3" json:"token_exchange_service_uri,omitempty"` // Location of the target service or resource where the client // intends to use the requested security token. @@ -1186,7 +1198,7 @@ var file_envoy_config_core_v3_grpc_service_proto_rawDesc = []byte{ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x8c, 0x21, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x6f, 0x74, 0x6f, 0x22, 0xd2, 0x21, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, @@ -1205,262 +1217,266 @@ var file_envoy_config_core_v3_grpc_service_proto_rawDesc = []byte{ 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x98, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0xde, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x00, 0x28, 0x80, 0x80, 0x01, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, - 0x1a, 0xfa, 0x1c, 0x0a, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x12, - 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x70, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x67, 0x0a, 0x10, 0x63, 0x61, 0x6c, - 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, + 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x1a, 0xfa, 0x1c, 0x0a, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x47, 0x72, 0x70, 0x63, 0x12, 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x70, 0x0a, + 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, + 0x67, 0x0a, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, + 0x1d, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x19, 0x70, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x75, 0x66, + 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x5b, 0x0a, + 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x38, 0x0a, 0x18, - 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x5f, 0x66, 0x61, 0x63, 0x74, - 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, - 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x61, 0x63, 0x74, 0x6f, - 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, 0x1d, 0x70, 0x65, 0x72, 0x5f, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x19, 0x70, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x41, 0x72, 0x67, 0x73, 0x1a, 0x9d, 0x02, 0x0a, 0x0e, 0x53, 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0a, 0x72, 0x6f, 0x6f, 0x74, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, - 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x49, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x52, 0x0b, 0x63, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x9d, 0x02, 0x0a, 0x0e, 0x53, + 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, + 0x0a, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x49, + 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, 0x76, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, + 0x39, 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x3a, 0x46, 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x3a, 0x46, - 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x92, 0x03, 0x0a, 0x12, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x66, 0x0a, - 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, - 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x72, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, - 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, - 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x88, 0x0f, 0x0a, 0x0f, - 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, - 0x23, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x63, - 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, + 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x92, 0x03, 0x0a, + 0x12, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x12, 0x66, 0x0a, 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x13, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x45, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, - 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x9e, 0x01, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6a, 0x77, 0x74, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5f, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x17, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x77, - 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x72, 0x0a, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x51, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, - 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61, 0x6d, 0x12, 0x7d, 0x0a, 0x0b, 0x66, - 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x0a, - 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x6a, 0x0a, 0x0b, 0x73, 0x74, - 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, - 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x73, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x22, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x19, 0x0a, - 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, - 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x3a, 0x62, - 0x9a, 0xc5, 0x88, 0x1e, 0x5d, 0x0a, 0x5b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x72, 0x0a, 0x11, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x1a, 0x88, 0x0f, 0x0a, 0x0f, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x75, + 0x74, 0x65, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x9e, 0x01, 0x0a, + 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x5f, 0x6a, 0x77, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x1a, 0xcc, 0x01, 0x0a, 0x14, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x12, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x54, 0x9a, 0xc5, 0x88, - 0x1e, 0x4f, 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, - 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x3a, 0x5d, 0x9a, 0xc5, 0x88, 0x1e, 0x58, 0x0a, 0x56, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, + 0x6c, 0x73, 0x48, 0x00, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x77, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x72, 0x0a, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61, + 0x6d, 0x12, 0x7d, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x12, 0x6a, 0x0a, 0x0b, 0x73, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd7, - 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, - 0x1a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x17, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, - 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, - 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, - 0x68, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, - 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, - 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, - 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x4a, 0x9a, 0xc5, - 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, - 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, - 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x0a, 0x73, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xd9, 0x01, 0x0a, + 0x22, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, + 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34, + 0x0a, 0x16, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x3a, 0x62, 0x9a, 0xc5, 0x88, 0x1e, 0x5d, 0x0a, 0x5b, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xcc, 0x01, 0x0a, 0x14, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x3a, 0x54, 0x9a, 0xc5, 0x88, 0x1e, 0x4f, 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, + 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, + 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x5d, 0x9a, 0xc5, 0x88, 0x1e, 0x58, 0x0a, + 0x56, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xc3, 0x02, 0x0a, 0x0b, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x2e, 0x41, - 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, - 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x16, 0x0a, 0x0f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, - 0xf8, 0x42, 0x01, 0x1a, 0x77, 0x0a, 0x09, 0x41, 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, + 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd7, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, + 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30, + 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, + 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3f, + 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2f, 0x9a, 0xc5, - 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x3a, 0x24, 0x9a, - 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x42, 0x84, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, - 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, + 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xc3, 0x02, 0x0a, + 0x0b, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x04, + 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x41, 0x72, 0x67, 0x73, 0x2e, 0x41, 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, + 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, + 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x16, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x77, 0x0a, 0x09, 0x41, 0x72, 0x67, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, + 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, + 0x72, 0x70, 0x63, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x84, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1493,41 +1509,43 @@ var file_envoy_config_core_v3_grpc_service_proto_goTypes = []interface{}{ nil, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry (*duration.Duration)(nil), // 14: google.protobuf.Duration (*HeaderValue)(nil), // 15: envoy.config.core.v3.HeaderValue - (*_struct.Struct)(nil), // 16: google.protobuf.Struct - (*wrappers.UInt32Value)(nil), // 17: google.protobuf.UInt32Value - (*DataSource)(nil), // 18: envoy.config.core.v3.DataSource - (*emptypb.Empty)(nil), // 19: google.protobuf.Empty - (*any.Any)(nil), // 20: google.protobuf.Any + (*RetryPolicy)(nil), // 16: envoy.config.core.v3.RetryPolicy + (*_struct.Struct)(nil), // 17: google.protobuf.Struct + (*wrappers.UInt32Value)(nil), // 18: google.protobuf.UInt32Value + (*DataSource)(nil), // 19: envoy.config.core.v3.DataSource + (*emptypb.Empty)(nil), // 20: google.protobuf.Empty + (*any1.Any)(nil), // 21: google.protobuf.Any } var file_envoy_config_core_v3_grpc_service_proto_depIdxs = []int32{ 1, // 0: envoy.config.core.v3.GrpcService.envoy_grpc:type_name -> envoy.config.core.v3.GrpcService.EnvoyGrpc 2, // 1: envoy.config.core.v3.GrpcService.google_grpc:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc 14, // 2: envoy.config.core.v3.GrpcService.timeout:type_name -> google.protobuf.Duration 15, // 3: envoy.config.core.v3.GrpcService.initial_metadata:type_name -> envoy.config.core.v3.HeaderValue - 5, // 4: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials - 6, // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials - 16, // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct - 17, // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.per_stream_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value - 7, // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs - 18, // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.config.core.v3.DataSource - 18, // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.config.core.v3.DataSource - 18, // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.config.core.v3.DataSource - 3, // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials - 19, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty - 4, // 14: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials - 19, // 15: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty - 8, // 16: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials - 9, // 17: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials - 10, // 18: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin - 11, // 19: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService - 13, // 20: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry - 20, // 21: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any - 12, // 22: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry.value:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value - 23, // [23:23] is the sub-list for method output_type - 23, // [23:23] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 23, // [23:23] is the sub-list for extension extendee - 0, // [0:23] is the sub-list for field type_name + 16, // 4: envoy.config.core.v3.GrpcService.EnvoyGrpc.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 5, // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials + 6, // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials + 17, // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct + 18, // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.per_stream_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 7, // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs + 19, // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.config.core.v3.DataSource + 19, // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.config.core.v3.DataSource + 19, // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.config.core.v3.DataSource + 3, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials + 20, // 14: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty + 4, // 15: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials + 20, // 16: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty + 8, // 17: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials + 9, // 18: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials + 10, // 19: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin + 11, // 20: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService + 13, // 21: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry + 21, // 22: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any + 12, // 23: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry.value:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_grpc_service_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go index 56d2507221..8fd2523759 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go @@ -120,9 +120,20 @@ func (m *GrpcService) validate(all bool) error { } - switch m.TargetSpecifier.(type) { - + oneofTargetSpecifierPresent := false + switch v := m.TargetSpecifier.(type) { case *GrpcService_EnvoyGrpc_: + if v == nil { + err := GrpcServiceValidationError{ + field: "TargetSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetSpecifierPresent = true if all { switch v := interface{}(m.GetEnvoyGrpc()).(type) { @@ -154,6 +165,17 @@ func (m *GrpcService) validate(all bool) error { } case *GrpcService_GoogleGrpc_: + if v == nil { + err := GrpcServiceValidationError{ + field: "TargetSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetSpecifierPresent = true if all { switch v := interface{}(m.GetGoogleGrpc()).(type) { @@ -185,6 +207,9 @@ func (m *GrpcService) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofTargetSpecifierPresent { err := GrpcServiceValidationError{ field: "TargetSpecifier", reason: "value is required", @@ -193,7 +218,6 @@ func (m *GrpcService) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -339,6 +363,35 @@ func (m *GrpcService_EnvoyGrpc) validate(all bool) error { errors = append(errors, err) } + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return GrpcService_EnvoyGrpcMultiError(errors) } @@ -1022,9 +1075,20 @@ func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error { var errors []error - switch m.CredentialSpecifier.(type) { - + oneofCredentialSpecifierPresent := false + switch v := m.CredentialSpecifier.(type) { case *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true if all { switch v := interface{}(m.GetSslCredentials()).(type) { @@ -1056,6 +1120,17 @@ func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error { } case *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true if all { switch v := interface{}(m.GetGoogleDefault()).(type) { @@ -1087,6 +1162,17 @@ func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error { } case *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true if all { switch v := interface{}(m.GetLocalCredentials()).(type) { @@ -1118,6 +1204,9 @@ func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofCredentialSpecifierPresent { err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ field: "CredentialSpecifier", reason: "value is required", @@ -1126,7 +1215,6 @@ func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1235,12 +1323,33 @@ func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { var errors []error - switch m.CredentialSpecifier.(type) { - + oneofCredentialSpecifierPresent := false + switch v := m.CredentialSpecifier.(type) { case *GrpcService_GoogleGrpc_CallCredentials_AccessToken: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true // no validation rules for AccessToken - case *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true if all { switch v := interface{}(m.GetGoogleComputeEngine()).(type) { @@ -1272,9 +1381,30 @@ func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { } case *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true // no validation rules for GoogleRefreshToken - case *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true if all { switch v := interface{}(m.GetServiceAccountJwtAccess()).(type) { @@ -1306,6 +1436,17 @@ func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { } case *GrpcService_GoogleGrpc_CallCredentials_GoogleIam: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true if all { switch v := interface{}(m.GetGoogleIam()).(type) { @@ -1337,6 +1478,17 @@ func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { } case *GrpcService_GoogleGrpc_CallCredentials_FromPlugin: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true if all { switch v := interface{}(m.GetFromPlugin()).(type) { @@ -1368,6 +1520,17 @@ func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { } case *GrpcService_GoogleGrpc_CallCredentials_StsService_: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true if all { switch v := interface{}(m.GetStsService()).(type) { @@ -1399,6 +1562,9 @@ func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofCredentialSpecifierPresent { err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ field: "CredentialSpecifier", reason: "value is required", @@ -1407,7 +1573,6 @@ func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1916,9 +2081,18 @@ func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) v // no validation rules for Name - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -1949,6 +2123,8 @@ func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) v } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -2217,15 +2393,38 @@ func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) validate(all bool) error { var errors []error - switch m.ValueSpecifier.(type) { - + oneofValueSpecifierPresent := false + switch v := m.ValueSpecifier.(type) { case *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ + field: "ValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValueSpecifierPresent = true // no validation rules for StringValue - case *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ + field: "ValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValueSpecifierPresent = true // no validation rules for IntValue - default: + _ = v // ensures v is used + } + if !oneofValueSpecifierPresent { err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ field: "ValueSpecifier", reason: "value is required", @@ -2234,7 +2433,6 @@ func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go index be63a5a26b..8578ce93fe 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/health_check.proto package corev3 @@ -11,7 +11,7 @@ import ( v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" _struct "github.com/golang/protobuf/ptypes/struct" wrappers "github.com/golang/protobuf/ptypes/wrappers" @@ -32,7 +32,7 @@ const ( type HealthStatus int32 const ( - // The health status is not known. This is interpreted by Envoy as *HEALTHY*. + // The health status is not known. This is interpreted by Envoy as ``HEALTHY``. HealthStatus_UNKNOWN HealthStatus = 0 // Healthy. HealthStatus_HEALTHY HealthStatus = 1 @@ -42,10 +42,10 @@ const ( // ``_ // or // ``_. - // This is interpreted by Envoy as *UNHEALTHY*. + // This is interpreted by Envoy as ``UNHEALTHY``. HealthStatus_DRAINING HealthStatus = 3 // Health check timed out. This is part of HDS and is interpreted by Envoy as - // *UNHEALTHY*. + // ``UNHEALTHY``. HealthStatus_TIMEOUT HealthStatus = 4 // Degraded. HealthStatus_DEGRADED HealthStatus = 5 @@ -165,14 +165,14 @@ type HealthCheck struct { // interval Envoy will add interval_jitter to the wait time. IntervalJitter *duration.Duration `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"` // An optional jitter amount as a percentage of interval_ms. If specified, - // during every interval Envoy will add interval_ms * - // interval_jitter_percent / 100 to the wait time. + // during every interval Envoy will add ``interval_ms`` * + // ``interval_jitter_percent`` / 100 to the wait time. // // If interval_jitter_ms and interval_jitter_percent are both set, both of // them will be used to increase the wait time. IntervalJitterPercent uint32 `protobuf:"varint,18,opt,name=interval_jitter_percent,json=intervalJitterPercent,proto3" json:"interval_jitter_percent,omitempty"` // The number of unhealthy health checks required before a host is marked - // unhealthy. Note that for *http* health checking if a host responds with a code not in + // unhealthy. Note that for ``http`` health checking if a host responds with a code not in // :ref:`expected_statuses ` // or :ref:`retriable_statuses `, // this threshold is ignored and the host is considered immediately unhealthy. @@ -205,7 +205,7 @@ type HealthCheck struct { // (including new hosts) when the cluster has received no traffic. // // This is useful for when we want to send frequent health checks with - // `no_traffic_interval` but then revert to lower frequency `no_traffic_healthy_interval` once + // ``no_traffic_interval`` but then revert to lower frequency ``no_traffic_healthy_interval`` once // a host in the cluster is marked as healthy. // // Once a cluster has been used for traffic routing, Envoy will shift back to using the @@ -267,7 +267,7 @@ type HealthCheck struct { // name: envoy.transport_sockets.tls // config: { ... } # tls socket configuration // - // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the + // If this field is set, then for health checks it will supersede an entry of ``envoy.transport_socket`` in the // :ref:`LbEndpoint.Metadata `. // This allows using different transport socket capabilities for health checking versus proxying to the // endpoint. @@ -586,7 +586,7 @@ type HealthCheck_Payload_Text struct { } type HealthCheck_Payload_Binary struct { - // [#not-implemented-hide:] Binary payload. + // Binary payload. Binary []byte `protobuf:"bytes,2,opt,name=binary,proto3,oneof"` } @@ -594,7 +594,7 @@ func (*HealthCheck_Payload_Text) isHealthCheck_Payload_Payload() {} func (*HealthCheck_Payload_Binary) isHealthCheck_Payload_Payload() {} -// [#next-free-field: 13] +// [#next-free-field: 15] type HealthCheck_HttpHealthCheck struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -606,12 +606,23 @@ type HealthCheck_HttpHealthCheck struct { // :ref:`hostname ` field. Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // Specifies the HTTP path that will be requested during health checking. For example - // */healthcheck*. + // ``/healthcheck``. Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` // [#not-implemented-hide:] HTTP specific payload. Send *HealthCheck_Payload `protobuf:"bytes,3,opt,name=send,proto3" json:"send,omitempty"` - // [#not-implemented-hide:] HTTP specific response. - Receive *HealthCheck_Payload `protobuf:"bytes,4,opt,name=receive,proto3" json:"receive,omitempty"` + // Specifies a list of HTTP expected responses to match in the first ``response_buffer_size`` bytes of the response body. + // If it is set, both the expected response check and status code determine the health check. + // When checking the response, “fuzzy” matching is performed such that each payload block must be found, + // and in the order specified, but not necessarily contiguous. + // + // .. note:: + // + // It is recommended to set ``response_buffer_size`` based on the total Payload size for efficiency. + // The default buffer size is 1024 bytes when it is not set. + Receive []*HealthCheck_Payload `protobuf:"bytes,4,rep,name=receive,proto3" json:"receive,omitempty"` + // Specifies the size of response buffer in bytes that is used to Payload match. + // The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response. + ResponseBufferSize *wrappers.UInt64Value `protobuf:"bytes,14,opt,name=response_buffer_size,json=responseBufferSize,proto3" json:"response_buffer_size,omitempty"` // Specifies a list of HTTP headers that should be added to each request that is sent to the // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers @@ -643,6 +654,11 @@ type HealthCheck_HttpHealthCheck struct { // `. See the :ref:`architecture overview // ` for more information. ServiceNameMatcher *v31.StringMatcher `protobuf:"bytes,11,opt,name=service_name_matcher,json=serviceNameMatcher,proto3" json:"service_name_matcher,omitempty"` + // HTTP Method that will be used for health checking, default is "GET". + // GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PATCH methods are supported, but making request body is not supported. + // CONNECT method is disallowed because it is not appropriate for health check request. + // If a non-200 response is expected by the method, it needs to be set in :ref:`expected_statuses `. + Method RequestMethod `protobuf:"varint,13,opt,name=method,proto3,enum=envoy.config.core.v3.RequestMethod" json:"method,omitempty"` } func (x *HealthCheck_HttpHealthCheck) Reset() { @@ -698,13 +714,20 @@ func (x *HealthCheck_HttpHealthCheck) GetSend() *HealthCheck_Payload { return nil } -func (x *HealthCheck_HttpHealthCheck) GetReceive() *HealthCheck_Payload { +func (x *HealthCheck_HttpHealthCheck) GetReceive() []*HealthCheck_Payload { if x != nil { return x.Receive } return nil } +func (x *HealthCheck_HttpHealthCheck) GetResponseBufferSize() *wrappers.UInt64Value { + if x != nil { + return x.ResponseBufferSize + } + return nil +} + func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToAdd() []*HeaderValueOption { if x != nil { return x.RequestHeadersToAdd @@ -747,6 +770,13 @@ func (x *HealthCheck_HttpHealthCheck) GetServiceNameMatcher() *v31.StringMatcher return nil } +func (x *HealthCheck_HttpHealthCheck) GetMethod() RequestMethod { + if x != nil { + return x.Method + } + return RequestMethod_METHOD_UNSPECIFIED +} + type HealthCheck_TcpHealthCheck struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -755,7 +785,7 @@ type HealthCheck_TcpHealthCheck struct { // Empty payloads imply a connect-only health check. Send *HealthCheck_Payload `protobuf:"bytes,1,opt,name=send,proto3" json:"send,omitempty"` // When checking the response, “fuzzy” matching is performed such that each - // binary block must be found, and in the order specified, but not + // payload block must be found, and in the order specified, but not // necessarily contiguous. Receive []*HealthCheck_Payload `protobuf:"bytes,2,rep,name=receive,proto3" json:"receive,omitempty"` } @@ -1000,7 +1030,7 @@ func (m *HealthCheck_CustomHealthCheck) GetConfigType() isHealthCheck_CustomHeal return nil } -func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *any.Any { +func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_TypedConfig); ok { return x.TypedConfig } @@ -1012,7 +1042,7 @@ type isHealthCheck_CustomHealthCheck_ConfigType interface { } type HealthCheck_CustomHealthCheck_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*HealthCheck_CustomHealthCheck_TypedConfig) isHealthCheck_CustomHealthCheck_ConfigType() {} @@ -1109,7 +1139,7 @@ var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, - 0x22, 0xef, 0x1c, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x22, 0x91, 0x1e, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, @@ -1232,7 +1262,7 @@ var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{ 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xaa, 0x06, 0x0a, 0x0f, 0x48, 0x74, + 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xcc, 0x07, 0x0a, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x21, @@ -1242,120 +1272,130 @@ var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{ 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, - 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, - 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, - 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, - 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, - 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x67, + 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, + 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72, - 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, - 0x11, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x0f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x56, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, - 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x73, 0x65, - 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a, 0xc9, 0x01, 0x0a, 0x0e, 0x54, 0x63, 0x70, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, - 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x12, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x11, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x63, 0x6f, 0x64, + 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x56, 0x0a, 0x14, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, + 0x04, 0x10, 0x01, 0x20, 0x06, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x34, 0x9a, + 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x09, 0x75, + 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a, 0xc9, 0x01, 0x0a, 0x0e, 0x54, 0x63, 0x70, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3d, 0x0a, 0x04, 0x73, + 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x3a, 0x33, 0x9a, - 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, - 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, - 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, - 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, + 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, - 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, - 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, 0x54, 0x6c, 0x73, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x3a, 0x2f, - 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, - 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x3a, + 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x0a, - 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, - 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, - 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, - 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, - 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, + 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x1a, 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, + 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, + 0x07, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x0d, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, 0x54, + 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, + 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, + 0x08, 0x0a, 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, + 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, + 0x41, 0x44, 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, + 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1388,11 +1428,13 @@ var file_envoy_config_core_v3_health_check_proto_goTypes = []interface{}{ (*wrappers.BoolValue)(nil), // 12: google.protobuf.BoolValue (*EventServiceConfig)(nil), // 13: envoy.config.core.v3.EventServiceConfig (*_struct.Struct)(nil), // 14: google.protobuf.Struct - (*HeaderValueOption)(nil), // 15: envoy.config.core.v3.HeaderValueOption - (*v3.Int64Range)(nil), // 16: envoy.type.v3.Int64Range - (v3.CodecClientType)(0), // 17: envoy.type.v3.CodecClientType - (*v31.StringMatcher)(nil), // 18: envoy.type.matcher.v3.StringMatcher - (*any.Any)(nil), // 19: google.protobuf.Any + (*wrappers.UInt64Value)(nil), // 15: google.protobuf.UInt64Value + (*HeaderValueOption)(nil), // 16: envoy.config.core.v3.HeaderValueOption + (*v3.Int64Range)(nil), // 17: envoy.type.v3.Int64Range + (v3.CodecClientType)(0), // 18: envoy.type.v3.CodecClientType + (*v31.StringMatcher)(nil), // 19: envoy.type.matcher.v3.StringMatcher + (RequestMethod)(0), // 20: envoy.config.core.v3.RequestMethod + (*any1.Any)(nil), // 21: google.protobuf.Any } var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{ 0, // 0: envoy.config.core.v3.HealthStatusSet.statuses:type_name -> envoy.config.core.v3.HealthStatus @@ -1418,20 +1460,22 @@ var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{ 14, // 20: envoy.config.core.v3.HealthCheck.transport_socket_match_criteria:type_name -> google.protobuf.Struct 3, // 21: envoy.config.core.v3.HealthCheck.HttpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload 3, // 22: envoy.config.core.v3.HealthCheck.HttpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload - 15, // 23: envoy.config.core.v3.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 16, // 24: envoy.config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.v3.Int64Range - 16, // 25: envoy.config.core.v3.HealthCheck.HttpHealthCheck.retriable_statuses:type_name -> envoy.type.v3.Int64Range - 17, // 26: envoy.config.core.v3.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.v3.CodecClientType - 18, // 27: envoy.config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.v3.StringMatcher - 3, // 28: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload - 3, // 29: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload - 15, // 30: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption - 19, // 31: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 32, // [32:32] is the sub-list for extension extendee - 0, // [0:32] is the sub-list for field type_name + 15, // 23: envoy.config.core.v3.HealthCheck.HttpHealthCheck.response_buffer_size:type_name -> google.protobuf.UInt64Value + 16, // 24: envoy.config.core.v3.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 17, // 25: envoy.config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.v3.Int64Range + 17, // 26: envoy.config.core.v3.HealthCheck.HttpHealthCheck.retriable_statuses:type_name -> envoy.type.v3.Int64Range + 18, // 27: envoy.config.core.v3.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.v3.CodecClientType + 19, // 28: envoy.config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.v3.StringMatcher + 20, // 29: envoy.config.core.v3.HealthCheck.HttpHealthCheck.method:type_name -> envoy.config.core.v3.RequestMethod + 3, // 30: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload + 3, // 31: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload + 16, // 32: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption + 21, // 33: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_health_check_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go index 937a738577..5abf42ff31 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go @@ -698,9 +698,20 @@ func (m *HealthCheck) validate(all bool) error { } } - switch m.HealthChecker.(type) { - + oneofHealthCheckerPresent := false + switch v := m.HealthChecker.(type) { case *HealthCheck_HttpHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true if all { switch v := interface{}(m.GetHttpHealthCheck()).(type) { @@ -732,6 +743,17 @@ func (m *HealthCheck) validate(all bool) error { } case *HealthCheck_TcpHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true if all { switch v := interface{}(m.GetTcpHealthCheck()).(type) { @@ -763,6 +785,17 @@ func (m *HealthCheck) validate(all bool) error { } case *HealthCheck_GrpcHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true if all { switch v := interface{}(m.GetGrpcHealthCheck()).(type) { @@ -794,6 +827,17 @@ func (m *HealthCheck) validate(all bool) error { } case *HealthCheck_CustomHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true if all { switch v := interface{}(m.GetCustomHealthCheck()).(type) { @@ -825,6 +869,9 @@ func (m *HealthCheck) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofHealthCheckerPresent { err := HealthCheckValidationError{ field: "HealthChecker", reason: "value is required", @@ -833,7 +880,6 @@ func (m *HealthCheck) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -935,9 +981,20 @@ func (m *HealthCheck_Payload) validate(all bool) error { var errors []error - switch m.Payload.(type) { - + oneofPayloadPresent := false + switch v := m.Payload.(type) { case *HealthCheck_Payload_Text: + if v == nil { + err := HealthCheck_PayloadValidationError{ + field: "Payload", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPayloadPresent = true if utf8.RuneCountInString(m.GetText()) < 1 { err := HealthCheck_PayloadValidationError{ @@ -951,9 +1008,22 @@ func (m *HealthCheck_Payload) validate(all bool) error { } case *HealthCheck_Payload_Binary: + if v == nil { + err := HealthCheck_PayloadValidationError{ + field: "Payload", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPayloadPresent = true // no validation rules for Binary - default: + _ = v // ensures v is used + } + if !oneofPayloadPresent { err := HealthCheck_PayloadValidationError{ field: "Payload", reason: "value is required", @@ -962,7 +1032,6 @@ func (m *HealthCheck_Payload) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1129,33 +1198,53 @@ func (m *HealthCheck_HttpHealthCheck) validate(all bool) error { } } - if all { - switch v := interface{}(m.GetReceive()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: "Receive", - reason: "embedded message failed validation", - cause: err, - }) + for idx, item := range m.GetReceive() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } } - case interface{ Validate() error }: + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { if err := v.Validate(); err != nil { - errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ - field: "Receive", + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), reason: "embedded message failed validation", cause: err, - }) + } } } - } else if v, ok := interface{}(m.GetReceive()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HealthCheck_HttpHealthCheckValidationError{ - field: "Receive", - reason: "embedded message failed validation", - cause: err, + + } + + if wrapper := m.GetResponseBufferSize(); wrapper != nil { + + if wrapper.GetValue() < 0 { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "ResponseBufferSize", + reason: "value must be greater than or equal to 0", + } + if !all { + return err } + errors = append(errors, err) } + } if len(m.GetRequestHeadersToAdd()) > 1000 { @@ -1327,6 +1416,28 @@ func (m *HealthCheck_HttpHealthCheck) validate(all bool) error { } } + if _, ok := _HealthCheck_HttpHealthCheck_Method_NotInLookup[m.GetMethod()]; ok { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Method", + reason: "value must not be in list [CONNECT]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := RequestMethod_name[int32(m.GetMethod())]; !ok { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Method", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return HealthCheck_HttpHealthCheckMultiError(errors) } @@ -1414,6 +1525,10 @@ var _HealthCheck_HttpHealthCheck_Path_Pattern = regexp.MustCompile("^[^\x00\n\r] var _HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") +var _HealthCheck_HttpHealthCheck_Method_NotInLookup = map[RequestMethod]struct{}{ + 6: {}, +} + // Validate checks the field values on HealthCheck_TcpHealthCheck with the // rules defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -1880,9 +1995,18 @@ func (m *HealthCheck_CustomHealthCheck) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *HealthCheck_CustomHealthCheck_TypedConfig: + if v == nil { + err := HealthCheck_CustomHealthCheckValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -1913,6 +2037,8 @@ func (m *HealthCheck_CustomHealthCheck) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go index 347f453704..47dc67144b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/http_uri.proto package corev3 @@ -38,7 +38,7 @@ type HttpUri struct { // uri: https://www.googleapis.com/oauth2/v1/certs // Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` - // Specify how `uri` is to be fetched. Today, this requires an explicit + // Specify how ``uri`` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or // inline DNS resolution. See `issue // `_. diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go index c9af6f7dab..0e3ab815ee 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go @@ -108,9 +108,20 @@ func (m *HttpUri) validate(all bool) error { } } - switch m.HttpUpstreamType.(type) { - + oneofHttpUpstreamTypePresent := false + switch v := m.HttpUpstreamType.(type) { case *HttpUri_Cluster: + if v == nil { + err := HttpUriValidationError{ + field: "HttpUpstreamType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHttpUpstreamTypePresent = true if utf8.RuneCountInString(m.GetCluster()) < 1 { err := HttpUriValidationError{ @@ -124,6 +135,9 @@ func (m *HttpUri) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofHttpUpstreamTypePresent { err := HttpUriValidationError{ field: "HttpUpstreamType", reason: "value is required", @@ -132,7 +146,6 @@ func (m *HttpUri) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go index e28426ab11..16a1411437 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/protocol.proto package corev3 @@ -217,9 +217,9 @@ type QuicProtocolOptions struct { // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the stream buffers. InitialStreamWindowSize *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` - // Similar to *initial_stream_window_size*, but for connection-level + // Similar to ``initial_stream_window_size``, but for connection-level // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). - // window. Currently, this has the same minimum/default as *initial_stream_window_size*. + // window. Currently, this has the same minimum/default as ``initial_stream_window_size``. // // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default // window size now, so it's also the minimum. @@ -310,19 +310,22 @@ type UpstreamHttpProtocolOptions struct { // upstream connections based on the downstream HTTP host/authority header or any other arbitrary // header when :ref:`override_auto_sni_header ` // is set, as seen by the :ref:`router filter `. + // Does nothing if a filter before the http router filter sets the corresponding metadata. AutoSni bool `protobuf:"varint,1,opt,name=auto_sni,json=autoSni,proto3" json:"auto_sni,omitempty"` // Automatic validate upstream presented certificate for new upstream connections based on the // downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header ` // is set, as seen by the :ref:`router filter `. - // This field is intended to be set with `auto_sni` field. + // This field is intended to be set with ``auto_sni`` field. + // Does nothing if a filter before the http router filter sets the corresponding metadata. AutoSanValidation bool `protobuf:"varint,2,opt,name=auto_san_validation,json=autoSanValidation,proto3" json:"auto_san_validation,omitempty"` // An optional alternative to the host/authority header to be used for setting the SNI value. // It should be a valid downstream HTTP header, as seen by the // :ref:`router filter `. // If unset, host/authority header will be used for populating the SNI. If the specified header // is not found or the value is empty, host/authority header will be used instead. - // This field is intended to be set with `auto_sni` and/or `auto_san_validation` fields. + // This field is intended to be set with ``auto_sni`` and/or ``auto_san_validation`` fields. // If none of these fields are set then setting this would be a no-op. + // Does nothing if a filter before the http router filter sets the corresponding metadata. OverrideAutoSniHeader string `protobuf:"bytes,3,opt,name=override_auto_sni_header,json=overrideAutoSniHeader,proto3" json:"override_auto_sni_header,omitempty"` } @@ -383,6 +386,7 @@ func (x *UpstreamHttpProtocolOptions) GetOverrideAutoSniHeader() string { // make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for // HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 // for the "HTTPS" DNS resource record. +// [#next-free-field: 6] type AlternateProtocolsCacheOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -410,6 +414,16 @@ type AlternateProtocolsCacheOptions struct { KeyValueStoreConfig *TypedExtensionConfig `protobuf:"bytes,3,opt,name=key_value_store_config,json=keyValueStoreConfig,proto3" json:"key_value_store_config,omitempty"` // Allows pre-populating the cache with entries, as described above. PrepopulatedEntries []*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry `protobuf:"bytes,4,rep,name=prepopulated_entries,json=prepopulatedEntries,proto3" json:"prepopulated_entries,omitempty"` + // Optional list of hostnames suffixes for which Alt-Svc entries can be shared. For example, if + // this list contained the value ``.c.example.com``, then an Alt-Svc entry for ``foo.c.example.com`` + // could be shared with ``bar.c.example.com`` but would not be shared with ``baz.example.com``. On + // the other hand, if the list contained the value ``.example.com`` then all three hosts could share + // Alt-Svc entries. Each entry must start with ``.``. If a hostname matches multiple suffixes, the + // first listed suffix will be used. + // + // Since lookup in this list is O(n), it is recommended that the number of suffixes be limited. + // [#not-implemented-hide:] + CanonicalSuffixes []string `protobuf:"bytes,5,rep,name=canonical_suffixes,json=canonicalSuffixes,proto3" json:"canonical_suffixes,omitempty"` } func (x *AlternateProtocolsCacheOptions) Reset() { @@ -472,6 +486,13 @@ func (x *AlternateProtocolsCacheOptions) GetPrepopulatedEntries() []*AlternatePr return nil } +func (x *AlternateProtocolsCacheOptions) GetCanonicalSuffixes() []string { + if x != nil { + return x.CanonicalSuffixes + } + return nil +} + // [#next-free-field: 7] type HttpProtocolOptions struct { state protoimpl.MessageState @@ -596,7 +617,7 @@ func (x *HttpProtocolOptions) GetMaxRequestsPerConnection() *wrappers.UInt32Valu return nil } -// [#next-free-field: 9] +// [#next-free-field: 11] type Http1ProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -605,16 +626,16 @@ type Http1ProtocolOptions struct { // Handle HTTP requests with absolute URLs in the requests. These requests // are generally sent by clients to forward/explicit proxies. This allows clients to configure // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - // *http_proxy* environment variable. + // ``http_proxy`` environment variable. AllowAbsoluteUrl *wrappers.BoolValue `protobuf:"bytes,1,opt,name=allow_absolute_url,json=allowAbsoluteUrl,proto3" json:"allow_absolute_url,omitempty"` // Handle incoming HTTP/1.0 and HTTP 0.9 requests. // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 // style connect logic, dechunking, and handling lack of client host iff - // *default_host_for_http_10* is configured. + // ``default_host_for_http_10`` is configured. AcceptHttp_10 bool `protobuf:"varint,2,opt,name=accept_http_10,json=acceptHttp10,proto3" json:"accept_http_10,omitempty"` - // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as + // A default host for HTTP/1.0 requests. This is highly suggested if ``accept_http_10`` is true as // Envoy does not otherwise support HTTP/1.0 without a Host header. - // This is a no-op if *accept_http_10* is not true. + // This is a no-op if ``accept_http_10`` is not true. DefaultHostForHttp_10 string `protobuf:"bytes,3,opt,name=default_host_for_http_10,json=defaultHostForHttp10,proto3" json:"default_host_for_http_10,omitempty"` // Describes how the keys for response headers should be formatted. By default, all header keys // are lower cased. @@ -629,7 +650,7 @@ type Http1ProtocolOptions struct { // - Not a response to a HEAD request. // - The content length header is not present. EnableTrailers bool `protobuf:"varint,5,opt,name=enable_trailers,json=enableTrailers,proto3" json:"enable_trailers,omitempty"` - // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding` + // Allows Envoy to process requests/responses with both ``Content-Length`` and ``Transfer-Encoding`` // headers set. By default such messages are rejected, but if option is enabled - Envoy will // remove Content-Length header and process message. // See `RFC7230, sec. 3.3.3 `_ for details. @@ -654,6 +675,24 @@ type Http1ProtocolOptions struct { // (inferred if not present), host (from the host/:authority header) and path // (from first line or :path header). SendFullyQualifiedUrl bool `protobuf:"varint,8,opt,name=send_fully_qualified_url,json=sendFullyQualifiedUrl,proto3" json:"send_fully_qualified_url,omitempty"` + // [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out. + // If set, force HTTP/1 parser: BalsaParser if true, http-parser if false. + // If unset, HTTP/1 parser is selected based on + // envoy.reloadable_features.http1_use_balsa_parser. + // See issue #21245. + UseBalsaParser *wrappers.BoolValue `protobuf:"bytes,9,opt,name=use_balsa_parser,json=useBalsaParser,proto3" json:"use_balsa_parser,omitempty"` + // [#not-implemented-hide:] Hiding so that field can be removed. + // If true, and BalsaParser is used (either `use_balsa_parser` above is true, + // or `envoy.reloadable_features.http1_use_balsa_parser` is true and + // `use_balsa_parser` is unset), then every non-empty method with only valid + // characters is accepted. Otherwise, methods not on the hard-coded list are + // rejected. + // Once UHV is enabled, this field should be removed, and BalsaParser should + // allow any method. UHV validates the method, rejecting empty string or + // invalid characters, and provides :ref:`restrict_http_methods + // ` + // to reject custom methods. + AllowCustomMethods bool `protobuf:"varint,10,opt,name=allow_custom_methods,json=allowCustomMethods,proto3" json:"allow_custom_methods,omitempty"` } func (x *Http1ProtocolOptions) Reset() { @@ -744,6 +783,20 @@ func (x *Http1ProtocolOptions) GetSendFullyQualifiedUrl() bool { return false } +func (x *Http1ProtocolOptions) GetUseBalsaParser() *wrappers.BoolValue { + if x != nil { + return x.UseBalsaParser + } + return nil +} + +func (x *Http1ProtocolOptions) GetAllowCustomMethods() bool { + if x != nil { + return x.AllowCustomMethods + } + return false +} + type KeepaliveSettings struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -831,7 +884,7 @@ func (x *KeepaliveSettings) GetConnectionIdleInterval() *duration.Duration { return nil } -// [#next-free-field: 16] +// [#next-free-field: 17] type Http2ProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -866,8 +919,8 @@ type Http2ProtocolOptions struct { // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the codec buffers. InitialStreamWindowSize *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` - // Similar to *initial_stream_window_size*, but for connection-level flow-control - // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. + // Similar to ``initial_stream_window_size``, but for connection-level flow-control + // window. Currently, this has the same minimum/maximum/default as ``initial_stream_window_size``. InitialConnectionWindowSize *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` // Allows proxying Websocket and other upgrades over H2 connect. AllowConnect bool `protobuf:"varint,5,opt,name=allow_connect,json=allowConnect,proto3" json:"allow_connect,omitempty"` @@ -900,11 +953,11 @@ type Http2ProtocolOptions struct { // of PRIORITY frames received over the lifetime of connection exceeds the value calculated // using this formula:: // - // max_inbound_priority_frames_per_stream * (1 + opened_streams) + // ``max_inbound_priority_frames_per_stream`` * (1 + ``opened_streams``) // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when + // the connection is terminated. For downstream connections the ``opened_streams`` is incremented when // Envoy receives complete response headers from the upstream server. For upstream connection the - // `opened_streams` is incremented when Envoy send the HEADERS frame for a new stream. The + // ``opened_streams`` is incremented when Envoy send the HEADERS frame for a new stream. The // ``http2.inbound_priority_frames_flood`` stat tracks // the number of connections terminated due to flood mitigation. The default limit is 100. MaxInboundPriorityFramesPerStream *wrappers.UInt32Value `protobuf:"bytes,10,opt,name=max_inbound_priority_frames_per_stream,json=maxInboundPriorityFramesPerStream,proto3" json:"max_inbound_priority_frames_per_stream,omitempty"` @@ -912,12 +965,12 @@ type Http2ProtocolOptions struct { // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated // using this formula:: // - // 5 + 2 * (opened_streams + - // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) + // 5 + 2 * (``opened_streams`` + + // ``max_inbound_window_update_frames_per_data_frame_sent`` * ``outbound_data_frames``) // - // the connection is terminated. For downstream connections the `opened_streams` is incremented when + // the connection is terminated. For downstream connections the ``opened_streams`` is incremented when // Envoy receives complete response headers from the upstream server. For upstream connections the - // `opened_streams` is incremented when Envoy sends the HEADERS frame for a new stream. The + // ``opened_streams`` is incremented when Envoy sends the HEADERS frame for a new stream. The // ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, @@ -977,6 +1030,10 @@ type Http2ProtocolOptions struct { // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer // does not respond within the configured timeout, the connection will be aborted. ConnectionKeepalive *KeepaliveSettings `protobuf:"bytes,15,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"` + // [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out. + // If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false. + // If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2. + UseOghttp2Codec *wrappers.BoolValue `protobuf:"bytes,16,opt,name=use_oghttp2_codec,json=useOghttp2Codec,proto3" json:"use_oghttp2_codec,omitempty"` } func (x *Http2ProtocolOptions) Reset() { @@ -1117,6 +1174,13 @@ func (x *Http2ProtocolOptions) GetConnectionKeepalive() *KeepaliveSettings { return nil } +func (x *Http2ProtocolOptions) GetUseOghttp2Codec() *wrappers.BoolValue { + if x != nil { + return x.UseOghttp2Codec + } + return nil +} + // [#not-implemented-hide:] type GrpcProtocolOptions struct { state protoimpl.MessageState @@ -1653,8 +1717,8 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x6e, 0x69, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd7, - 0x03, 0x0a, 0x1e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, + 0x04, 0x0a, 0x1e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, @@ -1676,300 +1740,317 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x70, 0x72, 0x65, 0x70, 0x6f, - 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x68, - 0x0a, 0x1c, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, - 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xd0, 0x01, 0x01, 0x52, 0x08, 0x68, - 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, - 0x20, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, - 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, - 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, - 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x8d, 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, - 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, - 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, - 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x5b, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, - 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, - 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, - 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, - 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, - 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, - 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x87, 0x08, 0x0a, 0x14, 0x48, - 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, - 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, - 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, - 0x70, 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, - 0x73, 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, + 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f, + 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x1a, 0x68, 0x0a, + 0x1c, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, 0x0a, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xd0, 0x01, 0x01, 0x52, 0x08, 0x68, 0x6f, + 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, 0x20, + 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, + 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, + 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, + 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, + 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, + 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, + 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, + 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, + 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, + 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, + 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, + 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, + 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x09, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, - 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, - 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, - 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, - 0x6e, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, - 0x6e, 0x64, 0x46, 0x75, 0x6c, 0x6c, 0x79, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x55, 0x72, 0x6c, 0x1a, 0x9f, 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, - 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x48, 0x00, - 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, - 0x73, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x5f, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, + 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, + 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, + 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61, + 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a, + 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, + 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6e, + 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6e, + 0x64, 0x46, 0x75, 0x6c, 0x6c, 0x79, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x55, + 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x73, 0x61, 0x5f, + 0x70, 0x61, 0x72, 0x73, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, + 0x08, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x42, 0x61, 0x6c, 0x73, 0x61, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x1a, 0x9f, + 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73, + 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x66, 0x75, 0x6c, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x1a, 0x60, - 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, - 0x73, 0x3a, 0x4d, 0x9a, 0xc5, 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, - 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, - 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5, + 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, + 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, - 0x14, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x02, 0x0a, 0x11, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, - 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, - 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, - 0x43, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, - 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x07, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, - 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x18, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xc1, 0x02, 0x0a, 0x11, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, - 0x52, 0x16, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x81, 0x0e, 0x0a, 0x14, 0x48, 0x74, 0x74, - 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x46, 0x0a, 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, 0x70, 0x61, 0x63, 0x6b, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, 0x16, 0x6d, 0x61, 0x78, - 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x2a, 0x08, 0x18, 0xff, - 0xff, 0xff, 0xff, 0x07, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x6a, 0x0a, 0x1a, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, - 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, - 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, - 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, - 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, - 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x11, 0x6d, 0x61, - 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, - 0x64, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, + 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, + 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, + 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, + 0x12, 0x61, 0x0a, 0x18, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, + 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x22, 0xd3, 0x0e, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10, + 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x18, 0x6d, 0x61, 0x78, - 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, - 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, - 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, - 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x2b, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x49, - 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x6f, 0x0a, 0x26, - 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x6d, 0x61, 0x78, 0x49, - 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x91, 0x01, - 0x0a, 0x34, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x77, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x6d, - 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x72, 0x61, 0x6d, - 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, - 0x02, 0x28, 0x01, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x57, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x72, 0x61, 0x6d, 0x65, - 0x73, 0x50, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x6e, - 0x74, 0x12, 0x5e, 0x0a, 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, - 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x21, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, - 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x7a, 0x0a, - 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, - 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, 0x14, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, - 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4b, - 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x65, 0x70, - 0x61, 0x6c, 0x69, 0x76, 0x65, 0x1a, 0xe5, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0a, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, + 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, + 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, + 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61, + 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xfa, - 0x42, 0x08, 0x2a, 0x06, 0x18, 0xff, 0xff, 0x03, 0x28, 0x00, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x3c, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, - 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, - 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa5, 0x01, 0x0a, - 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, - 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb1, 0x02, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x2d, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, + 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43, + 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, + 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69, + 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, + 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, + 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78, + 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e, + 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c, + 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, + 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, + 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x18, 0x01, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x21, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, - 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x16, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, - 0x08, 0x01, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, - 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x22, 0x74, 0x0a, 0x1a, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, - 0x5f, 0x74, 0x6f, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, - 0x52, 0x05, 0x68, 0x74, 0x74, 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x65, 0x54, 0x6f, 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0x10, 0x0a, 0x0e, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x81, - 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, - 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, - 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, - 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x7a, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x18, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, + 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, + 0x12, 0x50, 0x0a, 0x11, 0x75, 0x73, 0x65, 0x5f, 0x6f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, + 0x63, 0x6f, 0x64, 0x65, 0x63, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, + 0x01, 0x52, 0x0f, 0x75, 0x73, 0x65, 0x4f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x43, 0x6f, 0x64, + 0x65, 0x63, 0x1a, 0xe5, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xfa, 0x42, 0x08, 0x2a, + 0x06, 0x18, 0xff, 0xff, 0x03, 0x28, 0x00, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, + 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, + 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa5, 0x01, 0x0a, 0x13, 0x47, 0x72, + 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, + 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xb1, 0x02, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, + 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, + 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x22, 0x74, 0x0a, 0x1a, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x74, 0x6f, + 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x52, 0x05, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x54, 0x6f, + 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x81, 0x01, 0x0a, 0x22, + 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x42, 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2030,34 +2111,36 @@ var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ 20, // 16: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue 14, // 17: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat 20, // 18: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 17, // 19: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration - 17, // 20: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration - 21, // 21: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent - 17, // 22: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration - 18, // 23: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value - 18, // 24: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value - 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value - 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value - 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value - 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value - 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value - 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value - 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value - 20, // 32: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 16, // 33: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter - 8, // 34: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings - 9, // 35: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions - 3, // 36: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions - 20, // 37: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 15, // 38: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords - 19, // 39: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig - 18, // 40: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value - 18, // 41: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value - 42, // [42:42] is the sub-list for method output_type - 42, // [42:42] is the sub-list for method input_type - 42, // [42:42] is the sub-list for extension type_name - 42, // [42:42] is the sub-list for extension extendee - 0, // [0:42] is the sub-list for field type_name + 20, // 19: envoy.config.core.v3.Http1ProtocolOptions.use_balsa_parser:type_name -> google.protobuf.BoolValue + 17, // 20: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration + 17, // 21: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration + 21, // 22: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent + 17, // 23: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration + 18, // 24: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value + 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value + 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value + 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value + 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value + 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value + 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value + 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value + 18, // 32: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value + 20, // 33: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 16, // 34: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter + 8, // 35: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings + 20, // 36: envoy.config.core.v3.Http2ProtocolOptions.use_oghttp2_codec:type_name -> google.protobuf.BoolValue + 9, // 37: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 3, // 38: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions + 20, // 39: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 15, // 40: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords + 19, // 41: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 42: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value + 18, // 43: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value + 44, // [44:44] is the sub-list for method output_type + 44, // [44:44] is the sub-list for method input_type + 44, // [44:44] is the sub-list for extension type_name + 44, // [44:44] is the sub-list for extension extendee + 0, // [0:44] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_protocol_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go index b95b3675de..9e9a9155e9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go @@ -1162,6 +1162,37 @@ func (m *Http1ProtocolOptions) validate(all bool) error { // no validation rules for SendFullyQualifiedUrl + if all { + switch v := interface{}(m.GetUseBalsaParser()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseBalsaParser()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowCustomMethods + if len(errors) > 0 { return Http1ProtocolOptionsMultiError(errors) } @@ -1771,6 +1802,35 @@ func (m *Http2ProtocolOptions) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetUseOghttp2Codec()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseOghttp2Codec()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return Http2ProtocolOptionsMultiError(errors) } @@ -2166,9 +2226,18 @@ func (m *SchemeHeaderTransformation) validate(all bool) error { var errors []error - switch m.Transformation.(type) { - + switch v := m.Transformation.(type) { case *SchemeHeaderTransformation_SchemeToOverwrite: + if v == nil { + err := SchemeHeaderTransformationValidationError{ + field: "Transformation", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if _, ok := _SchemeHeaderTransformation_SchemeToOverwrite_InLookup[m.GetSchemeToOverwrite()]; !ok { err := SchemeHeaderTransformationValidationError{ @@ -2181,6 +2250,8 @@ func (m *SchemeHeaderTransformation) validate(all bool) error { errors = append(errors, err) } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -2437,9 +2508,20 @@ func (m *Http1ProtocolOptions_HeaderKeyFormat) validate(all bool) error { var errors []error - switch m.HeaderFormat.(type) { - + oneofHeaderFormatPresent := false + switch v := m.HeaderFormat.(type) { case *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_: + if v == nil { + err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "HeaderFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHeaderFormatPresent = true if all { switch v := interface{}(m.GetProperCaseWords()).(type) { @@ -2471,6 +2553,17 @@ func (m *Http1ProtocolOptions_HeaderKeyFormat) validate(all bool) error { } case *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter: + if v == nil { + err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "HeaderFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHeaderFormatPresent = true if all { switch v := interface{}(m.GetStatefulFormatter()).(type) { @@ -2502,6 +2595,9 @@ func (m *Http1ProtocolOptions_HeaderKeyFormat) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofHeaderFormatPresent { err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ field: "HeaderFormat", reason: "value is required", @@ -2510,7 +2606,6 @@ func (m *Http1ProtocolOptions_HeaderKeyFormat) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go index 32fb4cd675..90b7aaebe6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go @@ -1,13 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/proxy_protocol.proto package corev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -21,6 +22,54 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type ProxyProtocolPassThroughTLVs_PassTLVsMatchType int32 + +const ( + // Pass all TLVs. + ProxyProtocolPassThroughTLVs_INCLUDE_ALL ProxyProtocolPassThroughTLVs_PassTLVsMatchType = 0 + // Pass specific TLVs defined in tlv_type. + ProxyProtocolPassThroughTLVs_INCLUDE ProxyProtocolPassThroughTLVs_PassTLVsMatchType = 1 +) + +// Enum value maps for ProxyProtocolPassThroughTLVs_PassTLVsMatchType. +var ( + ProxyProtocolPassThroughTLVs_PassTLVsMatchType_name = map[int32]string{ + 0: "INCLUDE_ALL", + 1: "INCLUDE", + } + ProxyProtocolPassThroughTLVs_PassTLVsMatchType_value = map[string]int32{ + "INCLUDE_ALL": 0, + "INCLUDE": 1, + } +) + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Enum() *ProxyProtocolPassThroughTLVs_PassTLVsMatchType { + p := new(ProxyProtocolPassThroughTLVs_PassTLVsMatchType) + *p = x + return p +} + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0].Descriptor() +} + +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0] +} + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProxyProtocolPassThroughTLVs_PassTLVsMatchType.Descriptor instead. +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0, 0} +} + type ProxyProtocolConfig_Version int32 const ( @@ -53,11 +102,11 @@ func (x ProxyProtocolConfig_Version) String() string { } func (ProxyProtocolConfig_Version) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0].Descriptor() + return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[1].Descriptor() } func (ProxyProtocolConfig_Version) Type() protoreflect.EnumType { - return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0] + return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[1] } func (x ProxyProtocolConfig_Version) Number() protoreflect.EnumNumber { @@ -66,7 +115,67 @@ func (x ProxyProtocolConfig_Version) Number() protoreflect.EnumNumber { // Deprecated: Use ProxyProtocolConfig_Version.Descriptor instead. func (ProxyProtocolConfig_Version) EnumDescriptor() ([]byte, []int) { - return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0, 0} + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{1, 0} +} + +type ProxyProtocolPassThroughTLVs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The strategy to pass through TLVs. Default is INCLUDE_ALL. + // If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field. + MatchType ProxyProtocolPassThroughTLVs_PassTLVsMatchType `protobuf:"varint,1,opt,name=match_type,json=matchType,proto3,enum=envoy.config.core.v3.ProxyProtocolPassThroughTLVs_PassTLVsMatchType" json:"match_type,omitempty"` + // The TLV types that are applied based on match_type. + // TLV type is defined as uint8_t in proxy protocol. See `the spec + // `_ for details. + TlvType []uint32 `protobuf:"varint,2,rep,packed,name=tlv_type,json=tlvType,proto3" json:"tlv_type,omitempty"` +} + +func (x *ProxyProtocolPassThroughTLVs) Reset() { + *x = ProxyProtocolPassThroughTLVs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyProtocolPassThroughTLVs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyProtocolPassThroughTLVs) ProtoMessage() {} + +func (x *ProxyProtocolPassThroughTLVs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyProtocolPassThroughTLVs.ProtoReflect.Descriptor instead. +func (*ProxyProtocolPassThroughTLVs) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0} +} + +func (x *ProxyProtocolPassThroughTLVs) GetMatchType() ProxyProtocolPassThroughTLVs_PassTLVsMatchType { + if x != nil { + return x.MatchType + } + return ProxyProtocolPassThroughTLVs_INCLUDE_ALL +} + +func (x *ProxyProtocolPassThroughTLVs) GetTlvType() []uint32 { + if x != nil { + return x.TlvType + } + return nil } type ProxyProtocolConfig struct { @@ -76,12 +185,15 @@ type ProxyProtocolConfig struct { // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details Version ProxyProtocolConfig_Version `protobuf:"varint,1,opt,name=version,proto3,enum=envoy.config.core.v3.ProxyProtocolConfig_Version" json:"version,omitempty"` + // This config controls which TLVs can be passed to upstream if it is Proxy Protocol + // V2 header. If there is no setting for this field, no TLVs will be passed through. + PassThroughTlvs *ProxyProtocolPassThroughTLVs `protobuf:"bytes,2,opt,name=pass_through_tlvs,json=passThroughTlvs,proto3" json:"pass_through_tlvs,omitempty"` } func (x *ProxyProtocolConfig) Reset() { *x = ProxyProtocolConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0] + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -94,7 +206,7 @@ func (x *ProxyProtocolConfig) String() string { func (*ProxyProtocolConfig) ProtoMessage() {} func (x *ProxyProtocolConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0] + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -107,7 +219,7 @@ func (x *ProxyProtocolConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ProxyProtocolConfig.ProtoReflect.Descriptor instead. func (*ProxyProtocolConfig) Descriptor() ([]byte, []int) { - return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0} + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{1} } func (x *ProxyProtocolConfig) GetVersion() ProxyProtocolConfig_Version { @@ -117,6 +229,13 @@ func (x *ProxyProtocolConfig) GetVersion() ProxyProtocolConfig_Version { return ProxyProtocolConfig_V1 } +func (x *ProxyProtocolConfig) GetPassThroughTlvs() *ProxyProtocolPassThroughTLVs { + if x != nil { + return x.PassThroughTlvs + } + return nil +} + var File_envoy_config_core_v3_proxy_protocol_proto protoreflect.FileDescriptor var file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = []byte{ @@ -126,23 +245,45 @@ var file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = []byte{ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x7d, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x06, 0x0a, 0x02, 0x56, 0x31, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x42, - 0x86, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, - 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, - 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x1c, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, + 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, 0x56, 0x73, 0x12, 0x63, 0x0a, 0x0a, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x44, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, + 0x56, 0x73, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x54, 0x4c, 0x56, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x28, 0x0a, 0x08, 0x74, 0x6c, 0x76, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0d, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x2a, 0x03, 0x10, 0x80, 0x02, + 0x52, 0x07, 0x74, 0x6c, 0x76, 0x54, 0x79, 0x70, 0x65, 0x22, 0x31, 0x0a, 0x11, 0x50, 0x61, 0x73, + 0x73, 0x54, 0x4c, 0x56, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x44, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x44, 0x45, 0x10, 0x01, 0x22, 0xdd, 0x01, 0x0a, + 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67, + 0x68, 0x5f, 0x74, 0x6c, 0x76, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, 0x56, 0x73, + 0x52, 0x0f, 0x70, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x6c, 0x76, + 0x73, 0x22, 0x19, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, + 0x56, 0x31, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x42, 0x86, 0x01, 0x0a, + 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x12, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -157,19 +298,23 @@ func file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP() []byte { return file_envoy_config_core_v3_proxy_protocol_proto_rawDescData } -var file_envoy_config_core_v3_proxy_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_config_core_v3_proxy_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_proxy_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_core_v3_proxy_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_envoy_config_core_v3_proxy_protocol_proto_goTypes = []interface{}{ - (ProxyProtocolConfig_Version)(0), // 0: envoy.config.core.v3.ProxyProtocolConfig.Version - (*ProxyProtocolConfig)(nil), // 1: envoy.config.core.v3.ProxyProtocolConfig + (ProxyProtocolPassThroughTLVs_PassTLVsMatchType)(0), // 0: envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType + (ProxyProtocolConfig_Version)(0), // 1: envoy.config.core.v3.ProxyProtocolConfig.Version + (*ProxyProtocolPassThroughTLVs)(nil), // 2: envoy.config.core.v3.ProxyProtocolPassThroughTLVs + (*ProxyProtocolConfig)(nil), // 3: envoy.config.core.v3.ProxyProtocolConfig } var file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = []int32{ - 0, // 0: envoy.config.core.v3.ProxyProtocolConfig.version:type_name -> envoy.config.core.v3.ProxyProtocolConfig.Version - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 0, // 0: envoy.config.core.v3.ProxyProtocolPassThroughTLVs.match_type:type_name -> envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType + 1, // 1: envoy.config.core.v3.ProxyProtocolConfig.version:type_name -> envoy.config.core.v3.ProxyProtocolConfig.Version + 2, // 2: envoy.config.core.v3.ProxyProtocolConfig.pass_through_tlvs:type_name -> envoy.config.core.v3.ProxyProtocolPassThroughTLVs + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_proxy_protocol_proto_init() } @@ -179,6 +324,18 @@ func file_envoy_config_core_v3_proxy_protocol_proto_init() { } if !protoimpl.UnsafeEnabled { file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyProtocolPassThroughTLVs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProxyProtocolConfig); i { case 0: return &v.state @@ -196,8 +353,8 @@ func file_envoy_config_core_v3_proxy_protocol_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_core_v3_proxy_protocol_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, + NumEnums: 2, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go index c25e882e51..2edd9b1165 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go @@ -35,6 +35,127 @@ var ( _ = sort.Sort ) +// Validate checks the field values on ProxyProtocolPassThroughTLVs with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ProxyProtocolPassThroughTLVs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ProxyProtocolPassThroughTLVs with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ProxyProtocolPassThroughTLVsMultiError, or nil if none found. +func (m *ProxyProtocolPassThroughTLVs) ValidateAll() error { + return m.validate(true) +} + +func (m *ProxyProtocolPassThroughTLVs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MatchType + + for idx, item := range m.GetTlvType() { + _, _ = idx, item + + if item >= 256 { + err := ProxyProtocolPassThroughTLVsValidationError{ + field: fmt.Sprintf("TlvType[%v]", idx), + reason: "value must be less than 256", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ProxyProtocolPassThroughTLVsMultiError(errors) + } + + return nil +} + +// ProxyProtocolPassThroughTLVsMultiError is an error wrapping multiple +// validation errors returned by ProxyProtocolPassThroughTLVs.ValidateAll() if +// the designated constraints aren't met. +type ProxyProtocolPassThroughTLVsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ProxyProtocolPassThroughTLVsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ProxyProtocolPassThroughTLVsMultiError) AllErrors() []error { return m } + +// ProxyProtocolPassThroughTLVsValidationError is the validation error returned +// by ProxyProtocolPassThroughTLVs.Validate if the designated constraints +// aren't met. +type ProxyProtocolPassThroughTLVsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ProxyProtocolPassThroughTLVsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ProxyProtocolPassThroughTLVsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ProxyProtocolPassThroughTLVsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ProxyProtocolPassThroughTLVsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ProxyProtocolPassThroughTLVsValidationError) ErrorName() string { + return "ProxyProtocolPassThroughTLVsValidationError" +} + +// Error satisfies the builtin error interface +func (e ProxyProtocolPassThroughTLVsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sProxyProtocolPassThroughTLVs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ProxyProtocolPassThroughTLVsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ProxyProtocolPassThroughTLVsValidationError{} + // Validate checks the field values on ProxyProtocolConfig with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -59,6 +180,35 @@ func (m *ProxyProtocolConfig) validate(all bool) error { // no validation rules for Version + if all { + switch v := interface{}(m.GetPassThroughTlvs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPassThroughTlvs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return ProxyProtocolConfigMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go index eb68582ff7..9094e47f74 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/resolver.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go index 03f8e48327..5ade94fcad 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/socket_option.proto package corev3 @@ -217,6 +217,53 @@ func (*SocketOption_IntValue) isSocketOption_Value() {} func (*SocketOption_BufValue) isSocketOption_Value() {} +type SocketOptionsOverride struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SocketOptions []*SocketOption `protobuf:"bytes,1,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` +} + +func (x *SocketOptionsOverride) Reset() { + *x = SocketOptionsOverride{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOptionsOverride) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOptionsOverride) ProtoMessage() {} + +func (x *SocketOptionsOverride) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOptionsOverride.ProtoReflect.Descriptor instead. +func (*SocketOptionsOverride) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{1} +} + +func (x *SocketOptionsOverride) GetSocketOptions() []*SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + var File_envoy_config_core_v3_socket_option_proto protoreflect.FileDescriptor var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{ @@ -252,16 +299,22 @@ var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{ 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x03, - 0xf8, 0x42, 0x01, 0x42, 0x85, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x53, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, - 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0xf8, 0x42, 0x01, 0x22, 0x62, 0x0a, 0x15, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x0e, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -277,18 +330,20 @@ func file_envoy_config_core_v3_socket_option_proto_rawDescGZIP() []byte { } var file_envoy_config_core_v3_socket_option_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_config_core_v3_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_envoy_config_core_v3_socket_option_proto_goTypes = []interface{}{ (SocketOption_SocketState)(0), // 0: envoy.config.core.v3.SocketOption.SocketState (*SocketOption)(nil), // 1: envoy.config.core.v3.SocketOption + (*SocketOptionsOverride)(nil), // 2: envoy.config.core.v3.SocketOptionsOverride } var file_envoy_config_core_v3_socket_option_proto_depIdxs = []int32{ 0, // 0: envoy.config.core.v3.SocketOption.state:type_name -> envoy.config.core.v3.SocketOption.SocketState - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 1, // 1: envoy.config.core.v3.SocketOptionsOverride.socket_options:type_name -> envoy.config.core.v3.SocketOption + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_envoy_config_core_v3_socket_option_proto_init() } @@ -309,6 +364,18 @@ func file_envoy_config_core_v3_socket_option_proto_init() { return nil } } + file_envoy_config_core_v3_socket_option_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOptionsOverride); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_envoy_config_core_v3_socket_option_proto_msgTypes[0].OneofWrappers = []interface{}{ (*SocketOption_IntValue)(nil), @@ -320,7 +387,7 @@ func file_envoy_config_core_v3_socket_option_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_core_v3_socket_option_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go index 336c7f404d..dc0b53f551 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go @@ -74,15 +74,38 @@ func (m *SocketOption) validate(all bool) error { errors = append(errors, err) } - switch m.Value.(type) { - + oneofValuePresent := false + switch v := m.Value.(type) { case *SocketOption_IntValue: + if v == nil { + err := SocketOptionValidationError{ + field: "Value", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValuePresent = true // no validation rules for IntValue - case *SocketOption_BufValue: + if v == nil { + err := SocketOptionValidationError{ + field: "Value", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValuePresent = true // no validation rules for BufValue - default: + _ = v // ensures v is used + } + if !oneofValuePresent { err := SocketOptionValidationError{ field: "Value", reason: "value is required", @@ -91,7 +114,6 @@ func (m *SocketOption) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -170,3 +192,139 @@ var _ interface { Cause() error ErrorName() string } = SocketOptionValidationError{} + +// Validate checks the field values on SocketOptionsOverride with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOptionsOverride) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOptionsOverride with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SocketOptionsOverrideMultiError, or nil if none found. +func (m *SocketOptionsOverride) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOptionsOverride) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return SocketOptionsOverrideMultiError(errors) + } + + return nil +} + +// SocketOptionsOverrideMultiError is an error wrapping multiple validation +// errors returned by SocketOptionsOverride.ValidateAll() if the designated +// constraints aren't met. +type SocketOptionsOverrideMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOptionsOverrideMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOptionsOverrideMultiError) AllErrors() []error { return m } + +// SocketOptionsOverrideValidationError is the validation error returned by +// SocketOptionsOverride.Validate if the designated constraints aren't met. +type SocketOptionsOverrideValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOptionsOverrideValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOptionsOverrideValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOptionsOverrideValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOptionsOverrideValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOptionsOverrideValidationError) ErrorName() string { + return "SocketOptionsOverrideValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOptionsOverrideValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOptionsOverride.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOptionsOverrideValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOptionsOverrideValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go index a2e6cae983..745b4c1860 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/substitution_format_string.proto package corev3 @@ -43,9 +43,9 @@ type SubstitutionFormatString struct { // empty string, so that empty values are omitted entirely. // * for ``json_format`` the keys with null values are omitted in the output structure. OmitEmptyValues bool `protobuf:"varint,3,opt,name=omit_empty_values,json=omitEmptyValues,proto3" json:"omit_empty_values,omitempty"` - // Specify a *content_type* field. - // If this field is not set then ``text/plain`` is used for *text_format* and - // ``application/json`` is used for *json_format*. + // Specify a ``content_type`` field. + // If this field is not set then ``text/plain`` is used for ``text_format`` and + // ``application/json`` is used for ``json_format``. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go index 3d5d2ed386..2f3615c0b9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go @@ -104,12 +104,33 @@ func (m *SubstitutionFormatString) validate(all bool) error { } - switch m.Format.(type) { - + oneofFormatPresent := false + switch v := m.Format.(type) { case *SubstitutionFormatString_TextFormat: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true // no validation rules for TextFormat - case *SubstitutionFormatString_JsonFormat: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true if m.GetJsonFormat() == nil { err := SubstitutionFormatStringValidationError{ @@ -152,6 +173,17 @@ func (m *SubstitutionFormatString) validate(all bool) error { } case *SubstitutionFormatString_TextFormatSource: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true if all { switch v := interface{}(m.GetTextFormatSource()).(type) { @@ -183,6 +215,9 @@ func (m *SubstitutionFormatString) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofFormatPresent { err := SubstitutionFormatStringValidationError{ field: "Format", reason: "value is required", @@ -191,7 +226,6 @@ func (m *SubstitutionFormatString) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go index a02832f6d5..d5ff6d382f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/core/v3/udp_socket_config.proto package corev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go index 1253a722da..24f1baab7a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/endpoint/v3/endpoint.proto package endpointv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go index b163e4fb5f..0c8f3c0990 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/endpoint/v3/endpoint_components.proto package endpointv3 @@ -125,8 +125,8 @@ type LbEndpoint struct { HealthStatus v3.HealthStatus `protobuf:"varint,2,opt,name=health_status,json=healthStatus,proto3,enum=envoy.config.core.v3.HealthStatus" json:"health_status,omitempty"` // The endpoint metadata specifies values that may be used by the load // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. + // name should be specified as ``envoy.lb``. An example boolean key-value pair + // is ``canary``, providing the optional canary status of the upstream host. // This may be matched against in a route's // :ref:`RouteAction ` metadata_match field // to subset the endpoints considered in cluster load balancing. @@ -307,7 +307,7 @@ type LocalityLbEndpoints struct { Locality *v3.Locality `protobuf:"bytes,1,opt,name=locality,proto3" json:"locality,omitempty"` // The group of endpoints belonging to the locality specified. // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be - // deprecated and replaced by *load_balancer_endpoints*.] + // deprecated and replaced by ``load_balancer_endpoints``.] LbEndpoints []*LbEndpoint `protobuf:"bytes,2,rep,name=lb_endpoints,json=lbEndpoints,proto3" json:"lb_endpoints,omitempty"` // [#not-implemented-hide:] // @@ -440,7 +440,7 @@ type isLocalityLbEndpoints_LbConfig interface { type LocalityLbEndpoints_LoadBalancerEndpoints struct { // The group of endpoints belonging to the locality. - // [#comment:TODO(adisuissa): Once LEDS is implemented the *lb_endpoints* field + // [#comment:TODO(adisuissa): Once LEDS is implemented the ``lb_endpoints`` field // needs to be deprecated.] LoadBalancerEndpoints *LocalityLbEndpoints_LbEndpointList `protobuf:"bytes,7,opt,name=load_balancer_endpoints,json=loadBalancerEndpoints,proto3,oneof"` } @@ -473,6 +473,15 @@ type Endpoint_HealthCheckConfig struct { // to a non-empty value allows overriding the cluster level configuration for a specific // endpoint. Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Optional alternative health check host address. + // + // .. attention:: + // + // The form of the health check host address is expected to be a direct IP address. + Address *v3.Address `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + // Optional flag to control if perform active health check for this endpoint. + // Active health check is enabled by default if there is a health checker. + DisableActiveHealthCheck bool `protobuf:"varint,4,opt,name=disable_active_health_check,json=disableActiveHealthCheck,proto3" json:"disable_active_health_check,omitempty"` } func (x *Endpoint_HealthCheckConfig) Reset() { @@ -521,6 +530,20 @@ func (x *Endpoint_HealthCheckConfig) GetHostname() string { return "" } +func (x *Endpoint_HealthCheckConfig) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Endpoint_HealthCheckConfig) GetDisableActiveHealthCheck() bool { + if x != nil { + return x.DisableActiveHealthCheck + } + return false +} + // [#not-implemented-hide:] // A list of endpoints of a specific locality. type LocalityLbEndpoints_LbEndpointList struct { @@ -595,7 +618,7 @@ var file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc = []byte{ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x81, 0x03, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x37, + 0x6f, 0x22, 0xf9, 0x03, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, @@ -607,110 +630,117 @@ var file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc = []byte{ 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x92, 0x01, 0x0a, 0x11, 0x48, 0x65, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x8a, 0x02, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x25, - 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x91, 0x03, 0x0a, 0x0a, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, - 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x08, 0x65, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0c, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, - 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x3a, 0x27, 0x9a, - 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x4c, 0x62, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x4c, 0x65, - 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x43, 0x0a, 0x0b, 0x6c, 0x65, 0x64, 0x73, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3d, + 0x0a, 0x1b, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x18, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x3a, 0x37, 0x9a, + 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x91, 0x03, + 0x0a, 0x0a, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x08, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x48, 0x00, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x25, + 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x0a, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, - 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, 0x64, 0x73, - 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xe1, - 0x05, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, - 0x6c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, 0x0a, 0x17, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x4c, 0x62, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x15, 0x6c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x73, 0x12, 0x76, 0x0a, 0x1c, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, - 0x52, 0x19, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x15, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, - 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, - 0x80, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, - 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, - 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x69, 0x74, 0x79, 0x1a, 0x59, 0x0a, 0x0e, 0x4c, 0x62, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, - 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, + 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x11, + 0x0a, 0x0f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x4c, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x43, 0x0a, 0x0b, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xe1, 0x05, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, + 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x97, 0x01, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x17, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, - 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x74, 0x73, 0x12, 0x76, 0x0a, 0x17, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x73, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x69, + 0x73, 0x74, 0x48, 0x00, 0x52, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, 0x0a, 0x1c, 0x6c, + 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x65, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x19, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x24, + 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x69, 0x74, + 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x69, 0x74, 0x79, + 0x1a, 0x59, 0x0a, 0x0e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, + 0x6c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, + 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x0b, 0x0a, + 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x97, 0x01, 0x0a, 0x26, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x17, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, + 0x33, 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -754,12 +784,13 @@ var file_envoy_config_endpoint_v3_endpoint_components_proto_depIdxs = []int32{ 2, // 10: envoy.config.endpoint.v3.LocalityLbEndpoints.leds_cluster_locality_config:type_name -> envoy.config.endpoint.v3.LedsClusterLocalityConfig 9, // 11: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancing_weight:type_name -> google.protobuf.UInt32Value 9, // 12: envoy.config.endpoint.v3.LocalityLbEndpoints.proximity:type_name -> google.protobuf.UInt32Value - 1, // 13: envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 6, // 13: envoy.config.endpoint.v3.Endpoint.HealthCheckConfig.address:type_name -> envoy.config.core.v3.Address + 1, // 14: envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_envoy_config_endpoint_v3_endpoint_components_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go index 2cc83c4d13..e2162fbff6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go @@ -266,9 +266,18 @@ func (m *LbEndpoint) validate(all bool) error { } - switch m.HostIdentifier.(type) { - + switch v := m.HostIdentifier.(type) { case *LbEndpoint_Endpoint: + if v == nil { + err := LbEndpointValidationError{ + field: "HostIdentifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetEndpoint()).(type) { @@ -300,8 +309,19 @@ func (m *LbEndpoint) validate(all bool) error { } case *LbEndpoint_EndpointName: + if v == nil { + err := LbEndpointValidationError{ + field: "HostIdentifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for EndpointName - + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -654,9 +674,18 @@ func (m *LocalityLbEndpoints) validate(all bool) error { } } - switch m.LbConfig.(type) { - + switch v := m.LbConfig.(type) { case *LocalityLbEndpoints_LoadBalancerEndpoints: + if v == nil { + err := LocalityLbEndpointsValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetLoadBalancerEndpoints()).(type) { @@ -688,6 +717,16 @@ func (m *LocalityLbEndpoints) validate(all bool) error { } case *LocalityLbEndpoints_LedsClusterLocalityConfig: + if v == nil { + err := LocalityLbEndpointsValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetLedsClusterLocalityConfig()).(type) { @@ -718,6 +757,8 @@ func (m *LocalityLbEndpoints) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -835,6 +876,37 @@ func (m *Endpoint_HealthCheckConfig) validate(all bool) error { // no validation rules for Hostname + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DisableActiveHealthCheck + if len(errors) > 0 { return Endpoint_HealthCheckConfigMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go index d6ef6a73c4..c0aae02a49 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/endpoint/v3/load_report.proto package endpointv3 @@ -361,9 +361,9 @@ type ClusterStats struct { // in the DropOverload policy. DroppedRequests []*ClusterStats_DroppedRequests `protobuf:"bytes,5,rep,name=dropped_requests,json=droppedRequests,proto3" json:"dropped_requests,omitempty"` // Period over which the actual load report occurred. This will be guaranteed to include every - // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy - // and the *LoadStatsResponse* message sent from the management server, this may be longer than - // the requested load reporting interval in the *LoadStatsResponse*. + // request reported. Due to system load and delays between the ``LoadStatsRequest`` sent from Envoy + // and the ``LoadStatsResponse`` message sent from the management server, this may be longer than + // the requested load reporting interval in the ``LoadStatsResponse``. LoadReportInterval *duration.Duration `protobuf:"bytes,4,opt,name=load_report_interval,json=loadReportInterval,proto3" json:"load_report_interval,omitempty"` } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go index 42675c9e9c..e01516c2e6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go @@ -1,14 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/listener/v3/api_listener.proto package listenerv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,7 +38,7 @@ type ApiListener struct { // it would have caused circular dependencies for go protos: lds.proto depends on this file, // and http_connection_manager.proto depends on rds.proto, which is in the same directory as // lds.proto, so lds.proto cannot depend on this file.] - ApiListener *any.Any `protobuf:"bytes,1,opt,name=api_listener,json=apiListener,proto3" json:"api_listener,omitempty"` + ApiListener *any1.Any `protobuf:"bytes,1,opt,name=api_listener,json=apiListener,proto3" json:"api_listener,omitempty"` } func (x *ApiListener) Reset() { @@ -73,7 +73,7 @@ func (*ApiListener) Descriptor() ([]byte, []int) { return file_envoy_config_listener_v3_api_listener_proto_rawDescGZIP(), []int{0} } -func (x *ApiListener) GetApiListener() *any.Any { +func (x *ApiListener) GetApiListener() *any1.Any { if x != nil { return x.ApiListener } @@ -128,7 +128,7 @@ func file_envoy_config_listener_v3_api_listener_proto_rawDescGZIP() []byte { var file_envoy_config_listener_v3_api_listener_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_listener_v3_api_listener_proto_goTypes = []interface{}{ (*ApiListener)(nil), // 0: envoy.config.listener.v3.ApiListener - (*any.Any)(nil), // 1: google.protobuf.Any + (*any1.Any)(nil), // 1: google.protobuf.Any } var file_envoy_config_listener_v3_api_listener_proto_depIdxs = []int32{ 1, // 0: envoy.config.listener.v3.ApiListener.api_listener:type_name -> google.protobuf.Any diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go index f9a42ce2a1..ca07e648cb 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/listener/v3/listener.proto package listenerv3 @@ -82,13 +82,20 @@ func (Listener_DrainType) EnumDescriptor() ([]byte, []int) { } // The additional address the listener is listening on. -// [#not-implemented-hide:] type AdditionalAddress struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. If specified, this will override the + // :ref:`socket_options ` + // in the listener. If specified with no + // :ref:`socket_options ` + // or an empty list of :ref:`socket_options `, + // it means no socket option will apply. + SocketOptions *v3.SocketOptionsOverride `protobuf:"bytes,2,opt,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` } func (x *AdditionalAddress) Reset() { @@ -130,7 +137,14 @@ func (x *AdditionalAddress) GetAddress() *v3.Address { return nil } -// Listener list collections. Entries are *Listener* resources or references. +func (x *AdditionalAddress) GetSocketOptions() *v3.SocketOptionsOverride { + if x != nil { + return x.SocketOptions + } + return nil +} + +// Listener list collections. Entries are ``Listener`` resources or references. // [#not-implemented-hide:] type ListenerCollection struct { state protoimpl.MessageState @@ -192,16 +206,15 @@ type Listener struct { // The address that the listener should listen on. In general, the address must be unique, though // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on // Linux as the actual port will be allocated by the OS. - // Required unless *api_listener* or *listener_specifier* is populated. + // Required unless ``api_listener`` or ``listener_specifier`` is populated. Address *v3.Address `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // The additional addresses the listener should listen on. The addresses must be unique across all // listeners. Multiple addresses with port 0 can be supplied. When using multiple addresses in a single listener, // all addresses use the same protocol, and multiple internal addresses are not supported. - // [#not-implemented-hide:] AdditionalAddresses []*AdditionalAddress `protobuf:"bytes,33,rep,name=additional_addresses,json=additionalAddresses,proto3" json:"additional_addresses,omitempty"` // Optional prefix to use on listener stats. If empty, the stats will be rooted at - // `listener.
.`. If non-empty, stats will be rooted at - // `listener..`. + // ``listener.
.``. If non-empty, stats will be rooted at + // ``listener..``. StatPrefix string `protobuf:"bytes,28,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` // A list of filter chains to consider for this listener. The // :ref:`FilterChain ` with the most specific @@ -228,7 +241,7 @@ type Listener struct { // filter chain is removed or structurally modified, then the drain for its // connections is initiated. FilterChainMatcher *v32.Matcher `protobuf:"bytes,32,opt,name=filter_chain_matcher,json=filterChainMatcher,proto3" json:"filter_chain_matcher,omitempty"` - // If a connection is redirected using *iptables*, the port on which the proxy + // If a connection is redirected using ``iptables``, the port on which the proxy // receives it might be different from the original destination address. When this flag is set to // true, the listener hands off redirected connections to the listener associated with the // original destination address. If there is no listener associated with the original destination @@ -259,7 +272,7 @@ type Listener struct { ListenerFilters []*ListenerFilter `protobuf:"bytes,9,rep,name=listener_filters,json=listenerFilters,proto3" json:"listener_filters,omitempty"` // The timeout to wait for all listener filters to complete operation. If the timeout is reached, // the accepted socket is closed without a connection being created unless - // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the + // ``continue_on_listener_filters_timeout`` is set to true. Specify 0 to disable the // timeout. If not specified, a default timeout of 15s is used. ListenerFiltersTimeout *duration.Duration `protobuf:"bytes,15,opt,name=listener_filters_timeout,json=listenerFiltersTimeout,proto3" json:"listener_filters_timeout,omitempty"` // Whether a connection should be created when listener filters timeout. Default is false. @@ -272,28 +285,31 @@ type Listener struct { ContinueOnListenerFiltersTimeout bool `protobuf:"varint,17,opt,name=continue_on_listener_filters_timeout,json=continueOnListenerFiltersTimeout,proto3" json:"continue_on_listener_filters_timeout,omitempty"` // Whether the listener should be set as a transparent socket. // When this flag is set to true, connections can be redirected to the listener using an - // *iptables* *TPROXY* target, in which case the original source and destination addresses and + // ``iptables`` ``TPROXY`` target, in which case the original source and destination addresses and // ports are preserved on accepted connections. This flag should be used in combination with // :ref:`an original_dst ` :ref:`listener filter // ` to mark the connections' local addresses as // "restored." This can be used to hand off each redirected connection to another listener // associated with the connection's destination address. Direct connections to the socket without - // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are + // using ``TPROXY`` cannot be distinguished from connections redirected using ``TPROXY`` and are // therefore treated as if they were redirected. // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. + // Setting this flag requires Envoy to run with the ``CAP_NET_ADMIN`` capability. // When this flag is not set (default), the socket is not modified, i.e. the transparent option // is neither set nor reset. Transparent *wrappers.BoolValue `protobuf:"bytes,10,opt,name=transparent,proto3" json:"transparent,omitempty"` - // Whether the listener should set the *IP_FREEBIND* socket option. When this + // Whether the listener should set the ``IP_FREEBIND`` socket option. When this // flag is set to true, listeners can be bound to an IP address that is not // configured on the system running Envoy. When this flag is set to false, the - // option *IP_FREEBIND* is disabled on the socket. When this flag is not set + // option ``IP_FREEBIND`` is disabled on the socket. When this flag is not set // (default), the socket is not modified, i.e. the option is neither enabled // nor disabled. Freebind *wrappers.BoolValue `protobuf:"bytes,11,opt,name=freebind,proto3" json:"freebind,omitempty"` // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. + // precompiled binaries. The socket options can be updated for a listener when + // :ref:`enable_reuse_port ` + // is `true`. Otherwise, if socket options change during a listener update the update will be rejected + // to make it clear that the options were not updated. SocketOptions []*v3.SocketOption `protobuf:"bytes,13,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` // Whether the listener should accept TCP Fast Open (TFO) connections. // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on @@ -346,15 +362,17 @@ type Listener struct { // it is recommended to disable the balance config in listener X to avoid the cost of balancing, and // enable the balance config in Y1 and Y2 to balance the connections among the workers. ConnectionBalanceConfig *Listener_ConnectionBalanceConfig `protobuf:"bytes,20,opt,name=connection_balance_config,json=connectionBalanceConfig,proto3" json:"connection_balance_config,omitempty"` - // Deprecated. Use `enable_reuse_port` instead. + // Deprecated. Use ``enable_reuse_port`` instead. // // Deprecated: Do not use. ReusePort bool `protobuf:"varint,21,opt,name=reuse_port,json=reusePort,proto3" json:"reuse_port,omitempty"` - // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and + // When this flag is set to true, listeners set the ``SO_REUSEPORT`` socket option and // create one socket for each worker thread. This makes inbound connections // distribute among worker threads roughly evenly in cases where there are a high number // of connections. When this flag is set to false, all worker threads share one socket. This field - // defaults to true. + // defaults to true. The change of field will be rejected during an listener update when the + // runtime flag ``envoy.reloadable_features.enable_update_listener_socket_options`` is enabled. + // Otherwise, the update of this field will be ignored quietly. // // .. attention:: // @@ -383,9 +401,6 @@ type Listener struct { // to true. Default is true. BindToPort *wrappers.BoolValue `protobuf:"bytes,26,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` // The exclusive listener type and the corresponding config. - // TODO(lambdai): https://github.com/envoyproxy/envoy/issues/15372 - // Will create and add TcpListenerConfig. Will add UdpListenerConfig and ApiListener. - // [#not-implemented-hide:] // // Types that are assignable to ListenerSpecifier: // *Listener_InternalListener @@ -663,23 +678,146 @@ type isListener_ListenerSpecifier interface { type Listener_InternalListener struct { // Used to represent an internal listener which does not listen on OSI L4 address but can be used by the // :ref:`envoy cluster ` to create a user space connection to. - // The internal listener acts as a tcp listener. It supports listener filters and network filter chains. - // The internal listener require :ref:`address ` has - // field `envoy_internal_address`. + // The internal listener acts as a TCP listener. It supports listener filters and network filter chains. + // Upstream clusters refer to the internal listeners by their :ref:`name + // `. :ref:`Address + // ` must not be set on the internal listeners. // - // There are some limitations are derived from the implementation. The known limitations include + // There are some limitations that are derived from the implementation. The known limitations include: // // * :ref:`ConnectionBalanceConfig ` is not - // allowed because both cluster connection and listener connection must be owned by the same dispatcher. + // allowed because both the cluster connection and the listener connection must be owned by the same dispatcher. // * :ref:`tcp_backlog_size ` // * :ref:`freebind ` // * :ref:`transparent ` - // [#not-implemented-hide:] InternalListener *Listener_InternalListenerConfig `protobuf:"bytes,27,opt,name=internal_listener,json=internalListener,proto3,oneof"` } func (*Listener_InternalListener) isListener_ListenerSpecifier() {} +// A placeholder proto so that users can explicitly configure the standard +// Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListenerManager) Reset() { + *x = ListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerManager) ProtoMessage() {} + +func (x *ListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerManager.ProtoReflect.Descriptor instead. +func (*ListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{3} +} + +// A placeholder proto so that users can explicitly configure the standard +// Validation Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ValidationListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidationListenerManager) Reset() { + *x = ValidationListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidationListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidationListenerManager) ProtoMessage() {} + +func (x *ValidationListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidationListenerManager.ProtoReflect.Descriptor instead. +func (*ValidationListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{4} +} + +// A placeholder proto so that users can explicitly configure the API +// Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ApiListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApiListenerManager) Reset() { + *x = ApiListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiListenerManager) ProtoMessage() {} + +func (x *ApiListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiListenerManager.ProtoReflect.Descriptor instead. +func (*ApiListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{5} +} + // [#not-implemented-hide:] type Listener_DeprecatedV1 struct { state protoimpl.MessageState @@ -698,7 +836,7 @@ type Listener_DeprecatedV1 struct { func (x *Listener_DeprecatedV1) Reset() { *x = Listener_DeprecatedV1{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[3] + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -711,7 +849,7 @@ func (x *Listener_DeprecatedV1) String() string { func (*Listener_DeprecatedV1) ProtoMessage() {} func (x *Listener_DeprecatedV1) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[3] + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -749,7 +887,7 @@ type Listener_ConnectionBalanceConfig struct { func (x *Listener_ConnectionBalanceConfig) Reset() { *x = Listener_ConnectionBalanceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[4] + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -762,7 +900,7 @@ func (x *Listener_ConnectionBalanceConfig) String() string { func (*Listener_ConnectionBalanceConfig) ProtoMessage() {} func (x *Listener_ConnectionBalanceConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[4] + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -822,7 +960,6 @@ func (*Listener_ConnectionBalanceConfig_ExtendBalance) isListener_ConnectionBala } // Configuration for envoy internal listener. All the future internal listener features should be added here. -// [#not-implemented-hide:] type Listener_InternalListenerConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -832,7 +969,7 @@ type Listener_InternalListenerConfig struct { func (x *Listener_InternalListenerConfig) Reset() { *x = Listener_InternalListenerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[5] + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -845,7 +982,7 @@ func (x *Listener_InternalListenerConfig) String() string { func (*Listener_InternalListenerConfig) ProtoMessage() {} func (x *Listener_InternalListenerConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[5] + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -876,7 +1013,7 @@ type Listener_ConnectionBalanceConfig_ExactBalance struct { func (x *Listener_ConnectionBalanceConfig_ExactBalance) Reset() { *x = Listener_ConnectionBalanceConfig_ExactBalance{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[6] + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -889,7 +1026,7 @@ func (x *Listener_ConnectionBalanceConfig_ExactBalance) String() string { func (*Listener_ConnectionBalanceConfig_ExactBalance) ProtoMessage() {} func (x *Listener_ConnectionBalanceConfig_ExactBalance) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[6] + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -954,210 +1091,220 @@ var file_envoy_config_listener_v3_listener_proto_rawDesc = []byte{ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x4c, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, - 0x4c, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x85, 0x17, - 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, - 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x5e, 0x0a, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, - 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, - 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x20, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x12, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x44, - 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x64, - 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, - 0x6c, 0x44, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x19, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x6f, 0x0a, - 0x21, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, - 0x1d, 0x70, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, - 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3a, - 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x61, 0x0a, 0x0d, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x31, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x56, 0x31, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, - 0x0c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x12, 0x4b, 0x0a, - 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x10, 0x6c, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x6f, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x52, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x22, 0x85, 0x17, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x5e, 0x0a, 0x14, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x4a, 0x0a, + 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0f, - 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, - 0x53, 0x0a, 0x18, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x6c, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x24, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, - 0x5f, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x4f, 0x6e, 0x4c, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x1a, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x61, 0x73, - 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x74, 0x63, 0x70, 0x46, 0x61, 0x73, 0x74, - 0x4f, 0x70, 0x65, 0x6e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, - 0x53, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x13, 0x75, 0x64, 0x70, 0x5f, 0x6c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x64, 0x70, - 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, - 0x75, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x61, 0x70, 0x69, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, + 0x12, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x4f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x14, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x0b, - 0x61, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x76, 0x0a, 0x19, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x17, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0a, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, - 0x03, 0x33, 0x2e, 0x30, 0x52, 0x09, 0x72, 0x65, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, - 0x46, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, - 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, - 0x75, 0x73, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, - 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x46, 0x0a, 0x10, - 0x74, 0x63, 0x70, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x42, 0x61, 0x63, 0x6b, 0x6c, 0x6f, 0x67, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x5f, - 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x64, 0x54, 0x6f, 0x50, 0x6f, - 0x72, 0x74, 0x12, 0x68, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x70, 0x74, 0x63, 0x70, 0x18, 0x1e, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x70, 0x74, 0x63, 0x70, 0x12, - 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, - 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, - 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0x77, 0x0a, 0x0c, 0x44, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x12, 0x3c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x64, - 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x12, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x6f, 0x0a, 0x21, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, + 0x2a, 0x02, 0x08, 0x01, 0x52, 0x1d, 0x70, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x61, 0x0a, 0x0d, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x31, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x56, 0x31, 0x12, 0x4b, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x53, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x18, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x24, 0x63, 0x6f, 0x6e, + 0x74, 0x69, 0x6e, 0x75, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, + 0x65, 0x4f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x64, - 0x54, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, - 0x31, 0x1a, 0xfc, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, - 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, + 0x69, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, + 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x1a, 0x74, 0x63, + 0x70, 0x5f, 0x66, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x74, 0x63, + 0x70, 0x46, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x6e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x53, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x13, 0x75, 0x64, 0x70, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x11, 0x75, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x61, 0x70, 0x69, 0x5f, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x52, 0x0b, 0x61, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x76, 0x0a, 0x19, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x17, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0a, 0x72, 0x65, 0x75, 0x73, + 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x18, 0x01, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x09, 0x72, 0x65, 0x75, 0x73, 0x65, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, + 0x65, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x43, 0x0a, 0x0a, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x12, 0x46, 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6c, 0x6f, 0x67, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x42, 0x61, + 0x63, 0x6b, 0x6c, 0x6f, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, + 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x62, 0x69, 0x6e, + 0x64, 0x54, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x68, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x70, 0x74, 0x63, + 0x70, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, + 0x70, 0x74, 0x63, 0x70, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x67, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x47, 0x6c, + 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0x77, 0x0a, + 0x0c, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x12, 0x3c, 0x0a, + 0x0c, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x62, 0x69, 0x6e, 0x64, 0x54, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, + 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x1a, 0xfc, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x53, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x51, 0x0a, 0x0c, 0x45, 0x78, 0x61, 0x63, 0x74, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x41, 0x9a, 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, + 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, + 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, - 0x0c, 0x65, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x53, 0x0a, - 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x1a, 0x51, 0x0a, 0x0c, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x3a, 0x41, 0x9a, 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, - 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x13, 0x0a, 0x0c, 0x62, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x1a, 0x18, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x29, 0x0a, 0x09, 0x44, 0x72, - 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, - 0x4c, 0x54, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x59, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, - 0x04, 0x08, 0x17, 0x10, 0x18, 0x42, 0x8d, 0x01, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x42, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, - 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, - 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x42, 0x13, 0x0a, 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x18, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, + 0x29, 0x0a, 0x09, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, + 0x49, 0x46, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, + 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x12, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0x1b, 0x0a, + 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0x14, 0x0a, 0x12, 0x41, 0x70, + 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x42, 0x8d, 0x01, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1173,68 +1320,73 @@ func file_envoy_config_listener_v3_listener_proto_rawDescGZIP() []byte { } var file_envoy_config_listener_v3_listener_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_config_listener_v3_listener_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_config_listener_v3_listener_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_envoy_config_listener_v3_listener_proto_goTypes = []interface{}{ (Listener_DrainType)(0), // 0: envoy.config.listener.v3.Listener.DrainType (*AdditionalAddress)(nil), // 1: envoy.config.listener.v3.AdditionalAddress (*ListenerCollection)(nil), // 2: envoy.config.listener.v3.ListenerCollection (*Listener)(nil), // 3: envoy.config.listener.v3.Listener - (*Listener_DeprecatedV1)(nil), // 4: envoy.config.listener.v3.Listener.DeprecatedV1 - (*Listener_ConnectionBalanceConfig)(nil), // 5: envoy.config.listener.v3.Listener.ConnectionBalanceConfig - (*Listener_InternalListenerConfig)(nil), // 6: envoy.config.listener.v3.Listener.InternalListenerConfig - (*Listener_ConnectionBalanceConfig_ExactBalance)(nil), // 7: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance - (*v3.Address)(nil), // 8: envoy.config.core.v3.Address - (*v31.CollectionEntry)(nil), // 9: xds.core.v3.CollectionEntry - (*FilterChain)(nil), // 10: envoy.config.listener.v3.FilterChain - (*v32.Matcher)(nil), // 11: xds.type.matcher.v3.Matcher - (*wrappers.BoolValue)(nil), // 12: google.protobuf.BoolValue - (*wrappers.UInt32Value)(nil), // 13: google.protobuf.UInt32Value - (*v3.Metadata)(nil), // 14: envoy.config.core.v3.Metadata - (*ListenerFilter)(nil), // 15: envoy.config.listener.v3.ListenerFilter - (*duration.Duration)(nil), // 16: google.protobuf.Duration - (*v3.SocketOption)(nil), // 17: envoy.config.core.v3.SocketOption - (v3.TrafficDirection)(0), // 18: envoy.config.core.v3.TrafficDirection - (*UdpListenerConfig)(nil), // 19: envoy.config.listener.v3.UdpListenerConfig - (*ApiListener)(nil), // 20: envoy.config.listener.v3.ApiListener - (*v33.AccessLog)(nil), // 21: envoy.config.accesslog.v3.AccessLog - (*v3.TypedExtensionConfig)(nil), // 22: envoy.config.core.v3.TypedExtensionConfig + (*ListenerManager)(nil), // 4: envoy.config.listener.v3.ListenerManager + (*ValidationListenerManager)(nil), // 5: envoy.config.listener.v3.ValidationListenerManager + (*ApiListenerManager)(nil), // 6: envoy.config.listener.v3.ApiListenerManager + (*Listener_DeprecatedV1)(nil), // 7: envoy.config.listener.v3.Listener.DeprecatedV1 + (*Listener_ConnectionBalanceConfig)(nil), // 8: envoy.config.listener.v3.Listener.ConnectionBalanceConfig + (*Listener_InternalListenerConfig)(nil), // 9: envoy.config.listener.v3.Listener.InternalListenerConfig + (*Listener_ConnectionBalanceConfig_ExactBalance)(nil), // 10: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance + (*v3.Address)(nil), // 11: envoy.config.core.v3.Address + (*v3.SocketOptionsOverride)(nil), // 12: envoy.config.core.v3.SocketOptionsOverride + (*v31.CollectionEntry)(nil), // 13: xds.core.v3.CollectionEntry + (*FilterChain)(nil), // 14: envoy.config.listener.v3.FilterChain + (*v32.Matcher)(nil), // 15: xds.type.matcher.v3.Matcher + (*wrappers.BoolValue)(nil), // 16: google.protobuf.BoolValue + (*wrappers.UInt32Value)(nil), // 17: google.protobuf.UInt32Value + (*v3.Metadata)(nil), // 18: envoy.config.core.v3.Metadata + (*ListenerFilter)(nil), // 19: envoy.config.listener.v3.ListenerFilter + (*duration.Duration)(nil), // 20: google.protobuf.Duration + (*v3.SocketOption)(nil), // 21: envoy.config.core.v3.SocketOption + (v3.TrafficDirection)(0), // 22: envoy.config.core.v3.TrafficDirection + (*UdpListenerConfig)(nil), // 23: envoy.config.listener.v3.UdpListenerConfig + (*ApiListener)(nil), // 24: envoy.config.listener.v3.ApiListener + (*v33.AccessLog)(nil), // 25: envoy.config.accesslog.v3.AccessLog + (*v3.TypedExtensionConfig)(nil), // 26: envoy.config.core.v3.TypedExtensionConfig } var file_envoy_config_listener_v3_listener_proto_depIdxs = []int32{ - 8, // 0: envoy.config.listener.v3.AdditionalAddress.address:type_name -> envoy.config.core.v3.Address - 9, // 1: envoy.config.listener.v3.ListenerCollection.entries:type_name -> xds.core.v3.CollectionEntry - 8, // 2: envoy.config.listener.v3.Listener.address:type_name -> envoy.config.core.v3.Address - 1, // 3: envoy.config.listener.v3.Listener.additional_addresses:type_name -> envoy.config.listener.v3.AdditionalAddress - 10, // 4: envoy.config.listener.v3.Listener.filter_chains:type_name -> envoy.config.listener.v3.FilterChain - 11, // 5: envoy.config.listener.v3.Listener.filter_chain_matcher:type_name -> xds.type.matcher.v3.Matcher - 12, // 6: envoy.config.listener.v3.Listener.use_original_dst:type_name -> google.protobuf.BoolValue - 10, // 7: envoy.config.listener.v3.Listener.default_filter_chain:type_name -> envoy.config.listener.v3.FilterChain - 13, // 8: envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value - 14, // 9: envoy.config.listener.v3.Listener.metadata:type_name -> envoy.config.core.v3.Metadata - 4, // 10: envoy.config.listener.v3.Listener.deprecated_v1:type_name -> envoy.config.listener.v3.Listener.DeprecatedV1 - 0, // 11: envoy.config.listener.v3.Listener.drain_type:type_name -> envoy.config.listener.v3.Listener.DrainType - 15, // 12: envoy.config.listener.v3.Listener.listener_filters:type_name -> envoy.config.listener.v3.ListenerFilter - 16, // 13: envoy.config.listener.v3.Listener.listener_filters_timeout:type_name -> google.protobuf.Duration - 12, // 14: envoy.config.listener.v3.Listener.transparent:type_name -> google.protobuf.BoolValue - 12, // 15: envoy.config.listener.v3.Listener.freebind:type_name -> google.protobuf.BoolValue - 17, // 16: envoy.config.listener.v3.Listener.socket_options:type_name -> envoy.config.core.v3.SocketOption - 13, // 17: envoy.config.listener.v3.Listener.tcp_fast_open_queue_length:type_name -> google.protobuf.UInt32Value - 18, // 18: envoy.config.listener.v3.Listener.traffic_direction:type_name -> envoy.config.core.v3.TrafficDirection - 19, // 19: envoy.config.listener.v3.Listener.udp_listener_config:type_name -> envoy.config.listener.v3.UdpListenerConfig - 20, // 20: envoy.config.listener.v3.Listener.api_listener:type_name -> envoy.config.listener.v3.ApiListener - 5, // 21: envoy.config.listener.v3.Listener.connection_balance_config:type_name -> envoy.config.listener.v3.Listener.ConnectionBalanceConfig - 12, // 22: envoy.config.listener.v3.Listener.enable_reuse_port:type_name -> google.protobuf.BoolValue - 21, // 23: envoy.config.listener.v3.Listener.access_log:type_name -> envoy.config.accesslog.v3.AccessLog - 13, // 24: envoy.config.listener.v3.Listener.tcp_backlog_size:type_name -> google.protobuf.UInt32Value - 12, // 25: envoy.config.listener.v3.Listener.bind_to_port:type_name -> google.protobuf.BoolValue - 6, // 26: envoy.config.listener.v3.Listener.internal_listener:type_name -> envoy.config.listener.v3.Listener.InternalListenerConfig - 12, // 27: envoy.config.listener.v3.Listener.DeprecatedV1.bind_to_port:type_name -> google.protobuf.BoolValue - 7, // 28: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.exact_balance:type_name -> envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance - 22, // 29: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.extend_balance:type_name -> envoy.config.core.v3.TypedExtensionConfig - 30, // [30:30] is the sub-list for method output_type - 30, // [30:30] is the sub-list for method input_type - 30, // [30:30] is the sub-list for extension type_name - 30, // [30:30] is the sub-list for extension extendee - 0, // [0:30] is the sub-list for field type_name + 11, // 0: envoy.config.listener.v3.AdditionalAddress.address:type_name -> envoy.config.core.v3.Address + 12, // 1: envoy.config.listener.v3.AdditionalAddress.socket_options:type_name -> envoy.config.core.v3.SocketOptionsOverride + 13, // 2: envoy.config.listener.v3.ListenerCollection.entries:type_name -> xds.core.v3.CollectionEntry + 11, // 3: envoy.config.listener.v3.Listener.address:type_name -> envoy.config.core.v3.Address + 1, // 4: envoy.config.listener.v3.Listener.additional_addresses:type_name -> envoy.config.listener.v3.AdditionalAddress + 14, // 5: envoy.config.listener.v3.Listener.filter_chains:type_name -> envoy.config.listener.v3.FilterChain + 15, // 6: envoy.config.listener.v3.Listener.filter_chain_matcher:type_name -> xds.type.matcher.v3.Matcher + 16, // 7: envoy.config.listener.v3.Listener.use_original_dst:type_name -> google.protobuf.BoolValue + 14, // 8: envoy.config.listener.v3.Listener.default_filter_chain:type_name -> envoy.config.listener.v3.FilterChain + 17, // 9: envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 18, // 10: envoy.config.listener.v3.Listener.metadata:type_name -> envoy.config.core.v3.Metadata + 7, // 11: envoy.config.listener.v3.Listener.deprecated_v1:type_name -> envoy.config.listener.v3.Listener.DeprecatedV1 + 0, // 12: envoy.config.listener.v3.Listener.drain_type:type_name -> envoy.config.listener.v3.Listener.DrainType + 19, // 13: envoy.config.listener.v3.Listener.listener_filters:type_name -> envoy.config.listener.v3.ListenerFilter + 20, // 14: envoy.config.listener.v3.Listener.listener_filters_timeout:type_name -> google.protobuf.Duration + 16, // 15: envoy.config.listener.v3.Listener.transparent:type_name -> google.protobuf.BoolValue + 16, // 16: envoy.config.listener.v3.Listener.freebind:type_name -> google.protobuf.BoolValue + 21, // 17: envoy.config.listener.v3.Listener.socket_options:type_name -> envoy.config.core.v3.SocketOption + 17, // 18: envoy.config.listener.v3.Listener.tcp_fast_open_queue_length:type_name -> google.protobuf.UInt32Value + 22, // 19: envoy.config.listener.v3.Listener.traffic_direction:type_name -> envoy.config.core.v3.TrafficDirection + 23, // 20: envoy.config.listener.v3.Listener.udp_listener_config:type_name -> envoy.config.listener.v3.UdpListenerConfig + 24, // 21: envoy.config.listener.v3.Listener.api_listener:type_name -> envoy.config.listener.v3.ApiListener + 8, // 22: envoy.config.listener.v3.Listener.connection_balance_config:type_name -> envoy.config.listener.v3.Listener.ConnectionBalanceConfig + 16, // 23: envoy.config.listener.v3.Listener.enable_reuse_port:type_name -> google.protobuf.BoolValue + 25, // 24: envoy.config.listener.v3.Listener.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 17, // 25: envoy.config.listener.v3.Listener.tcp_backlog_size:type_name -> google.protobuf.UInt32Value + 16, // 26: envoy.config.listener.v3.Listener.bind_to_port:type_name -> google.protobuf.BoolValue + 9, // 27: envoy.config.listener.v3.Listener.internal_listener:type_name -> envoy.config.listener.v3.Listener.InternalListenerConfig + 16, // 28: envoy.config.listener.v3.Listener.DeprecatedV1.bind_to_port:type_name -> google.protobuf.BoolValue + 10, // 29: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.exact_balance:type_name -> envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance + 26, // 30: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.extend_balance:type_name -> envoy.config.core.v3.TypedExtensionConfig + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_envoy_config_listener_v3_listener_proto_init() } @@ -1283,7 +1435,7 @@ func file_envoy_config_listener_v3_listener_proto_init() { } } file_envoy_config_listener_v3_listener_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Listener_DeprecatedV1); i { + switch v := v.(*ListenerManager); i { case 0: return &v.state case 1: @@ -1295,7 +1447,7 @@ func file_envoy_config_listener_v3_listener_proto_init() { } } file_envoy_config_listener_v3_listener_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Listener_ConnectionBalanceConfig); i { + switch v := v.(*ValidationListenerManager); i { case 0: return &v.state case 1: @@ -1307,7 +1459,7 @@ func file_envoy_config_listener_v3_listener_proto_init() { } } file_envoy_config_listener_v3_listener_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Listener_InternalListenerConfig); i { + switch v := v.(*ApiListenerManager); i { case 0: return &v.state case 1: @@ -1319,6 +1471,42 @@ func file_envoy_config_listener_v3_listener_proto_init() { } } file_envoy_config_listener_v3_listener_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_DeprecatedV1); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_ConnectionBalanceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_InternalListenerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Listener_ConnectionBalanceConfig_ExactBalance); i { case 0: return &v.state @@ -1334,7 +1522,7 @@ func file_envoy_config_listener_v3_listener_proto_init() { file_envoy_config_listener_v3_listener_proto_msgTypes[2].OneofWrappers = []interface{}{ (*Listener_InternalListener)(nil), } - file_envoy_config_listener_v3_listener_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_envoy_config_listener_v3_listener_proto_msgTypes[7].OneofWrappers = []interface{}{ (*Listener_ConnectionBalanceConfig_ExactBalance_)(nil), (*Listener_ConnectionBalanceConfig_ExtendBalance)(nil), } @@ -1344,7 +1532,7 @@ func file_envoy_config_listener_v3_listener_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_listener_v3_listener_proto_rawDesc, NumEnums: 1, - NumMessages: 7, + NumMessages: 10, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go index a2e17122b0..6550642a17 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go @@ -90,6 +90,35 @@ func (m *AdditionalAddress) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetSocketOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSocketOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return AdditionalAddressMultiError(errors) } @@ -1007,9 +1036,18 @@ func (m *Listener) validate(all bool) error { // no validation rules for IgnoreGlobalConnLimit - switch m.ListenerSpecifier.(type) { - + switch v := m.ListenerSpecifier.(type) { case *Listener_InternalListener: + if v == nil { + err := ListenerValidationError{ + field: "ListenerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetInternalListener()).(type) { @@ -1040,6 +1078,8 @@ func (m *Listener) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -1119,6 +1159,310 @@ var _ interface { ErrorName() string } = ListenerValidationError{} +// Validate checks the field values on ListenerManager with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListenerManagerMultiError, or nil if none found. +func (m *ListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ListenerManagerMultiError(errors) + } + + return nil +} + +// ListenerManagerMultiError is an error wrapping multiple validation errors +// returned by ListenerManager.ValidateAll() if the designated constraints +// aren't met. +type ListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerManagerMultiError) AllErrors() []error { return m } + +// ListenerManagerValidationError is the validation error returned by +// ListenerManager.Validate if the designated constraints aren't met. +type ListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerManagerValidationError) ErrorName() string { return "ListenerManagerValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerManagerValidationError{} + +// Validate checks the field values on ValidationListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ValidationListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ValidationListenerManager with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ValidationListenerManagerMultiError, or nil if none found. +func (m *ValidationListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ValidationListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ValidationListenerManagerMultiError(errors) + } + + return nil +} + +// ValidationListenerManagerMultiError is an error wrapping multiple validation +// errors returned by ValidationListenerManager.ValidateAll() if the +// designated constraints aren't met. +type ValidationListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ValidationListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ValidationListenerManagerMultiError) AllErrors() []error { return m } + +// ValidationListenerManagerValidationError is the validation error returned by +// ValidationListenerManager.Validate if the designated constraints aren't met. +type ValidationListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ValidationListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ValidationListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ValidationListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ValidationListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ValidationListenerManagerValidationError) ErrorName() string { + return "ValidationListenerManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e ValidationListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sValidationListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ValidationListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ValidationListenerManagerValidationError{} + +// Validate checks the field values on ApiListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ApiListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApiListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ApiListenerManagerMultiError, or nil if none found. +func (m *ApiListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ApiListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ApiListenerManagerMultiError(errors) + } + + return nil +} + +// ApiListenerManagerMultiError is an error wrapping multiple validation errors +// returned by ApiListenerManager.ValidateAll() if the designated constraints +// aren't met. +type ApiListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApiListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApiListenerManagerMultiError) AllErrors() []error { return m } + +// ApiListenerManagerValidationError is the validation error returned by +// ApiListenerManager.Validate if the designated constraints aren't met. +type ApiListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApiListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApiListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApiListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApiListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApiListenerManagerValidationError) ErrorName() string { + return "ApiListenerManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e ApiListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApiListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApiListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApiListenerManagerValidationError{} + // Validate checks the field values on Listener_DeprecatedV1 with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -1273,9 +1617,20 @@ func (m *Listener_ConnectionBalanceConfig) validate(all bool) error { var errors []error - switch m.BalanceType.(type) { - + oneofBalanceTypePresent := false + switch v := m.BalanceType.(type) { case *Listener_ConnectionBalanceConfig_ExactBalance_: + if v == nil { + err := Listener_ConnectionBalanceConfigValidationError{ + field: "BalanceType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofBalanceTypePresent = true if all { switch v := interface{}(m.GetExactBalance()).(type) { @@ -1307,6 +1662,17 @@ func (m *Listener_ConnectionBalanceConfig) validate(all bool) error { } case *Listener_ConnectionBalanceConfig_ExtendBalance: + if v == nil { + err := Listener_ConnectionBalanceConfigValidationError{ + field: "BalanceType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofBalanceTypePresent = true if all { switch v := interface{}(m.GetExtendBalance()).(type) { @@ -1338,6 +1704,9 @@ func (m *Listener_ConnectionBalanceConfig) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofBalanceTypePresent { err := Listener_ConnectionBalanceConfigValidationError{ field: "BalanceType", reason: "value is required", @@ -1346,7 +1715,6 @@ func (m *Listener_ConnectionBalanceConfig) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go index 2a4907a97b..413254a43d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/listener/v3/listener_components.proto package listenerv3 @@ -12,7 +12,7 @@ import ( v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -140,7 +140,7 @@ func (m *Filter) GetConfigType() isFilter_ConfigType { return nil } -func (x *Filter) GetTypedConfig() *any.Any { +func (x *Filter) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*Filter_TypedConfig); ok { return x.TypedConfig } @@ -162,7 +162,7 @@ type Filter_TypedConfig struct { // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. // [#extension-category: envoy.filters.network] - TypedConfig *any.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` } type Filter_ConfigDiscovery struct { @@ -261,6 +261,7 @@ type FilterChainMatch struct { // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. // // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + // The value ``*`` is also not supported, and ``server_names`` should be omitted instead. // // .. attention:: // @@ -422,6 +423,12 @@ type FilterChain struct { // connections established with the listener. Order matters as the filters are // processed sequentially as connection events happen. Note: If the filter // list is empty, the connection will close by default. + // + // For QUIC listeners, network filters other than HTTP Connection Manager (HCM) + // can be created, but due to differences in the connection implementation compared + // to TCP, the onData() method will never be called. Therefore, network filters + // for QUIC listeners should only expect to do work at the start of a new connection + // (i.e. in onNewConnection()). HCM must be the last (or only) filter in the chain. Filters []*Filter `protobuf:"bytes,3,rep,name=filters,proto3" json:"filters,omitempty"` // Whether the listener should expect a PROXY protocol V1 header on new // connections. If this option is enabled, the listener will assume that that @@ -439,8 +446,8 @@ type FilterChain struct { // [#not-implemented-hide:] filter chain metadata. Metadata *v3.Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` // Optional custom transport socket implementation to use for downstream connections. - // To setup TLS, set a transport socket with name `envoy.transport_sockets.tls` and - // :ref:`DownstreamTlsContext ` in the `typed_config`. + // To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and + // :ref:`DownstreamTlsContext ` in the ``typed_config``. // If no transport socket configuration is specified, new connections // will be set up with plaintext. // [#extension-category: envoy.transport_sockets.downstream] @@ -771,7 +778,7 @@ func (m *ListenerFilter) GetConfigType() isListenerFilter_ConfigType { return nil } -func (x *ListenerFilter) GetTypedConfig() *any.Any { +func (x *ListenerFilter) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*ListenerFilter_TypedConfig); ok { return x.TypedConfig } @@ -800,7 +807,7 @@ type ListenerFilter_TypedConfig struct { // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } type ListenerFilter_ConfigDiscovery struct { @@ -1174,7 +1181,7 @@ var file_envoy_config_listener_v3_listener_components_proto_goTypes = []interfac (*ListenerFilter)(nil), // 5: envoy.config.listener.v3.ListenerFilter (*FilterChain_OnDemandConfiguration)(nil), // 6: envoy.config.listener.v3.FilterChain.OnDemandConfiguration (*ListenerFilterChainMatchPredicate_MatchSet)(nil), // 7: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet - (*any.Any)(nil), // 8: google.protobuf.Any + (*any1.Any)(nil), // 8: google.protobuf.Any (*v3.ExtensionConfigSource)(nil), // 9: envoy.config.core.v3.ExtensionConfigSource (*wrappers.UInt32Value)(nil), // 10: google.protobuf.UInt32Value (*v3.CidrRange)(nil), // 11: envoy.config.core.v3.CidrRange diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go index 535d04e819..3241ede8c7 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go @@ -67,9 +67,18 @@ func (m *Filter) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *Filter_TypedConfig: + if v == nil { + err := FilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -101,6 +110,16 @@ func (m *Filter) validate(all bool) error { } case *Filter_ConfigDiscovery: + if v == nil { + err := FilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetConfigDiscovery()).(type) { @@ -131,6 +150,8 @@ func (m *Filter) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -819,9 +840,20 @@ func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { var errors []error - switch m.Rule.(type) { - + oneofRulePresent := false + switch v := m.Rule.(type) { case *ListenerFilterChainMatchPredicate_OrMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetOrMatch()).(type) { @@ -853,6 +885,17 @@ func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { } case *ListenerFilterChainMatchPredicate_AndMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetAndMatch()).(type) { @@ -884,6 +927,17 @@ func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { } case *ListenerFilterChainMatchPredicate_NotMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetNotMatch()).(type) { @@ -915,6 +969,17 @@ func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { } case *ListenerFilterChainMatchPredicate_AnyMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if m.GetAnyMatch() != true { err := ListenerFilterChainMatchPredicateValidationError{ @@ -928,6 +993,17 @@ func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { } case *ListenerFilterChainMatchPredicate_DestinationPortRange: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetDestinationPortRange()).(type) { @@ -959,6 +1035,9 @@ func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofRulePresent { err := ListenerFilterChainMatchPredicateValidationError{ field: "Rule", reason: "value is required", @@ -967,7 +1046,6 @@ func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1114,9 +1192,18 @@ func (m *ListenerFilter) validate(all bool) error { } } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *ListenerFilter_TypedConfig: + if v == nil { + err := ListenerFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -1148,6 +1235,16 @@ func (m *ListenerFilter) validate(all bool) error { } case *ListenerFilter_ConfigDiscovery: + if v == nil { + err := ListenerFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetConfigDiscovery()).(type) { @@ -1178,6 +1275,8 @@ func (m *ListenerFilter) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go index cda301d400..55614afa25 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go @@ -1,13 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/listener/v3/quic_config.proto package listenerv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" duration "github.com/golang/protobuf/ptypes/duration" @@ -26,7 +27,7 @@ const ( ) // Configuration specific to the UDP QUIC listener. -// [#next-free-field: 8] +// [#next-free-field: 10] type QuicProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -53,14 +54,23 @@ type QuicProtocolOptions struct { // The actual number of packets to read in total by the UDP listener is also // bound by 6000, regardless of this field or how many connections there are. PacketsToReadToConnectionCountRatio *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=packets_to_read_to_connection_count_ratio,json=packetsToReadToConnectionCountRatio,proto3" json:"packets_to_read_to_connection_count_ratio,omitempty"` - // Configure which implementation of `quic::QuicCryptoClientStreamBase` to be used for this listener. + // Configure which implementation of ``quic::QuicCryptoClientStreamBase`` to be used for this listener. // If not specified the :ref:`QUICHE default one configured by ` will be used. // [#extension-category: envoy.quic.server.crypto_stream] CryptoStreamConfig *v3.TypedExtensionConfig `protobuf:"bytes,6,opt,name=crypto_stream_config,json=cryptoStreamConfig,proto3" json:"crypto_stream_config,omitempty"` - // Configure which implementation of `quic::ProofSource` to be used for this listener. + // Configure which implementation of ``quic::ProofSource`` to be used for this listener. // If not specified the :ref:`default one configured by ` will be used. // [#extension-category: envoy.quic.proof_source] ProofSourceConfig *v3.TypedExtensionConfig `protobuf:"bytes,7,opt,name=proof_source_config,json=proofSourceConfig,proto3" json:"proof_source_config,omitempty"` + // Config which implementation of ``quic::ConnectionIdGeneratorInterface`` to be used for this listener. + // If not specified the :ref:`default one configured by ` will be used. + // [#extension-category: envoy.quic.connection_id_generator] + ConnectionIdGeneratorConfig *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=connection_id_generator_config,json=connectionIdGeneratorConfig,proto3" json:"connection_id_generator_config,omitempty"` + // Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example ` which configures a pair of v4 and v6 preferred addresses. + // The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with, and only if the client is also QUICHE-based. + // If not specified, Envoy will not advertise any server's preferred address. + // [#extension-category: envoy.quic.server_preferred_address] + ServerPreferredAddressConfig *v3.TypedExtensionConfig `protobuf:"bytes,9,opt,name=server_preferred_address_config,json=serverPreferredAddressConfig,proto3" json:"server_preferred_address_config,omitempty"` } func (x *QuicProtocolOptions) Reset() { @@ -144,6 +154,20 @@ func (x *QuicProtocolOptions) GetProofSourceConfig() *v3.TypedExtensionConfig { return nil } +func (x *QuicProtocolOptions) GetConnectionIdGeneratorConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ConnectionIdGeneratorConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetServerPreferredAddressConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ServerPreferredAddressConfig + } + return nil +} + var File_envoy_config_listener_v3_quic_config_proto protoreflect.FileDescriptor var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ @@ -162,65 +186,82 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x05, - 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x13, 0x71, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x12, 0x53, 0x0a, 0x18, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x68, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x16, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, - 0x61, 0x67, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x7d, 0x0a, 0x29, 0x70, - 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, - 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x23, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, 0x6f, - 0x52, 0x65, 0x61, 0x64, 0x54, 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x5c, 0x0a, 0x14, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x6f, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x8f, 0x01, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, - 0x33, 0x42, 0x0f, 0x51, 0x75, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, - 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, + 0x07, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, + 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x53, 0x0a, 0x18, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x68, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x16, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, + 0x6c, 0x61, 0x67, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x7d, 0x0a, 0x29, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, + 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x23, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, + 0x6f, 0x52, 0x65, 0x61, 0x64, 0x54, 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x5c, 0x0a, 0x14, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, 0x13, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xd2, 0xc6, 0xa4, + 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x8f, 0x01, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x42, 0x0f, 0x51, 0x75, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -252,11 +293,13 @@ var file_envoy_config_listener_v3_quic_config_proto_depIdxs = []int32{ 4, // 4: envoy.config.listener.v3.QuicProtocolOptions.packets_to_read_to_connection_count_ratio:type_name -> google.protobuf.UInt32Value 5, // 5: envoy.config.listener.v3.QuicProtocolOptions.crypto_stream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig 5, // 6: envoy.config.listener.v3.QuicProtocolOptions.proof_source_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 5, // 7: envoy.config.listener.v3.QuicProtocolOptions.connection_id_generator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 8: envoy.config.listener.v3.QuicProtocolOptions.server_preferred_address_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_envoy_config_listener_v3_quic_config_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go index 4eb1a95fa7..ba21cb6cbd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go @@ -246,6 +246,64 @@ func (m *QuicProtocolOptions) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetConnectionIdGeneratorConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionIdGeneratorConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetServerPreferredAddressConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetServerPreferredAddressConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return QuicProtocolOptionsMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go index 8c0e03e14d..4bcd978e06 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/listener/v3/udp_listener_config.proto package listenerv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go index 2e08c98319..ace02175b8 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/metrics/v3/metrics_service.proto package metricsv3 @@ -24,7 +24,60 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink +// HistogramEmitMode is used to configure which metric types should be emitted for histograms. +type HistogramEmitMode int32 + +const ( + // Emit Histogram and Summary metric types. + HistogramEmitMode_SUMMARY_AND_HISTOGRAM HistogramEmitMode = 0 + // Emit only Summary metric types. + HistogramEmitMode_SUMMARY HistogramEmitMode = 1 + // Emit only Histogram metric types. + HistogramEmitMode_HISTOGRAM HistogramEmitMode = 2 +) + +// Enum value maps for HistogramEmitMode. +var ( + HistogramEmitMode_name = map[int32]string{ + 0: "SUMMARY_AND_HISTOGRAM", + 1: "SUMMARY", + 2: "HISTOGRAM", + } + HistogramEmitMode_value = map[string]int32{ + "SUMMARY_AND_HISTOGRAM": 0, + "SUMMARY": 1, + "HISTOGRAM": 2, + } +) + +func (x HistogramEmitMode) Enum() *HistogramEmitMode { + p := new(HistogramEmitMode) + *p = x + return p +} + +func (x HistogramEmitMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HistogramEmitMode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_metrics_v3_metrics_service_proto_enumTypes[0].Descriptor() +} + +func (HistogramEmitMode) Type() protoreflect.EnumType { + return &file_envoy_config_metrics_v3_metrics_service_proto_enumTypes[0] +} + +func (x HistogramEmitMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HistogramEmitMode.Descriptor instead. +func (HistogramEmitMode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_metrics_service_proto_rawDescGZIP(), []int{0} +} + +// Metrics Service is configured as a built-in ``envoy.stat_sinks.metrics_service`` :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. // @@ -39,6 +92,7 @@ const ( // transport_api_version: V3 // // [#extension: envoy.stat_sinks.metrics_service] +// [#next-free-field: 6] type MetricsServiceConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -58,6 +112,8 @@ type MetricsServiceConfig struct { // and the tag extracted name will be used instead of the full name, which may contain values used by the tag // extractor or additional tags added during stats creation. EmitTagsAsLabels bool `protobuf:"varint,4,opt,name=emit_tags_as_labels,json=emitTagsAsLabels,proto3" json:"emit_tags_as_labels,omitempty"` + // Specify which metrics types to emit for histograms. Defaults to SUMMARY_AND_HISTOGRAM. + HistogramEmitMode HistogramEmitMode `protobuf:"varint,5,opt,name=histogram_emit_mode,json=histogramEmitMode,proto3,enum=envoy.config.metrics.v3.HistogramEmitMode" json:"histogram_emit_mode,omitempty"` } func (x *MetricsServiceConfig) Reset() { @@ -120,6 +176,13 @@ func (x *MetricsServiceConfig) GetEmitTagsAsLabels() bool { return false } +func (x *MetricsServiceConfig) GetHistogramEmitMode() HistogramEmitMode { + if x != nil { + return x.HistogramEmitMode + } + return HistogramEmitMode_SUMMARY_AND_HISTOGRAM +} + var File_envoy_config_metrics_v3_metrics_service_proto protoreflect.FileDescriptor var file_envoy_config_metrics_v3_metrics_service_proto_rawDesc = []byte{ @@ -140,7 +203,7 @@ var file_envoy_config_metrics_v3_metrics_service_proto_rawDesc = []byte{ 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x81, 0x03, 0x0a, 0x14, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe7, 0x03, 0x0a, 0x14, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, @@ -161,20 +224,31 @@ var file_envoy_config_metrics_v3_metrics_service_proto_rawDesc = []byte{ 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x65, 0x6d, 0x69, 0x74, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x6d, 0x69, 0x74, 0x54, 0x61, 0x67, 0x73, 0x41, 0x73, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x90, 0x01, 0x0a, 0x25, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x5f, 0x65, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x11, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x33, 0x9a, 0xc5, 0x88, + 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, + 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x02, 0x42, 0x90, 0x01, 0x0a, + 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -189,22 +263,25 @@ func file_envoy_config_metrics_v3_metrics_service_proto_rawDescGZIP() []byte { return file_envoy_config_metrics_v3_metrics_service_proto_rawDescData } +var file_envoy_config_metrics_v3_metrics_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_envoy_config_metrics_v3_metrics_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_envoy_config_metrics_v3_metrics_service_proto_goTypes = []interface{}{ - (*MetricsServiceConfig)(nil), // 0: envoy.config.metrics.v3.MetricsServiceConfig - (*v3.GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService - (v3.ApiVersion)(0), // 2: envoy.config.core.v3.ApiVersion - (*wrappers.BoolValue)(nil), // 3: google.protobuf.BoolValue + (HistogramEmitMode)(0), // 0: envoy.config.metrics.v3.HistogramEmitMode + (*MetricsServiceConfig)(nil), // 1: envoy.config.metrics.v3.MetricsServiceConfig + (*v3.GrpcService)(nil), // 2: envoy.config.core.v3.GrpcService + (v3.ApiVersion)(0), // 3: envoy.config.core.v3.ApiVersion + (*wrappers.BoolValue)(nil), // 4: google.protobuf.BoolValue } var file_envoy_config_metrics_v3_metrics_service_proto_depIdxs = []int32{ - 1, // 0: envoy.config.metrics.v3.MetricsServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService - 2, // 1: envoy.config.metrics.v3.MetricsServiceConfig.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion - 3, // 2: envoy.config.metrics.v3.MetricsServiceConfig.report_counters_as_deltas:type_name -> google.protobuf.BoolValue - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 2, // 0: envoy.config.metrics.v3.MetricsServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 3, // 1: envoy.config.metrics.v3.MetricsServiceConfig.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion + 4, // 2: envoy.config.metrics.v3.MetricsServiceConfig.report_counters_as_deltas:type_name -> google.protobuf.BoolValue + 0, // 3: envoy.config.metrics.v3.MetricsServiceConfig.histogram_emit_mode:type_name -> envoy.config.metrics.v3.HistogramEmitMode + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_envoy_config_metrics_v3_metrics_service_proto_init() } @@ -231,13 +308,14 @@ func file_envoy_config_metrics_v3_metrics_service_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_metrics_v3_metrics_service_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 1, NumExtensions: 0, NumServices: 0, }, GoTypes: file_envoy_config_metrics_v3_metrics_service_proto_goTypes, DependencyIndexes: file_envoy_config_metrics_v3_metrics_service_proto_depIdxs, + EnumInfos: file_envoy_config_metrics_v3_metrics_service_proto_enumTypes, MessageInfos: file_envoy_config_metrics_v3_metrics_service_proto_msgTypes, }.Build() File_envoy_config_metrics_v3_metrics_service_proto = out.File diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go index ea40f23c0b..b91adeefdb 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go @@ -143,6 +143,17 @@ func (m *MetricsServiceConfig) validate(all bool) error { // no validation rules for EmitTagsAsLabels + if _, ok := HistogramEmitMode_name[int32(m.GetHistogramEmitMode())]; !ok { + err := MetricsServiceConfigValidationError{ + field: "HistogramEmitMode", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return MetricsServiceConfigMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go index 75ce5985c8..6f9d035d3a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/metrics/v3/stats.proto package metricsv3 @@ -11,7 +11,7 @@ import ( v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -92,7 +92,7 @@ func (m *StatsSink) GetConfigType() isStatsSink_ConfigType { return nil } -func (x *StatsSink) GetTypedConfig() *any.Any { +func (x *StatsSink) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*StatsSink_TypedConfig); ok { return x.TypedConfig } @@ -104,7 +104,7 @@ type isStatsSink_ConfigType interface { } type StatsSink_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*StatsSink_TypedConfig) isStatsSink_ConfigType() {} @@ -115,10 +115,11 @@ type StatsConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Each stat name is iteratively processed through these tag specifiers. - // When a tag is matched, the first capture group is removed from the name so - // later :ref:`TagSpecifiers ` cannot match that - // same portion of the match. + // Each stat name is independently processed through these tag specifiers. When a tag is + // matched, the first capture group is not immediately removed from the name, so later + // :ref:`TagSpecifiers ` can also match that + // same portion of the match. After all tag matching is complete, a tag-extracted version of + // the name is produced and is used in stats sinks that represent tags, such as Prometheus. StatsTags []*TagSpecifier `protobuf:"bytes,1,rep,name=stats_tags,json=statsTags,proto3" json:"stats_tags,omitempty"` // Use all default tag regexes specified in Envoy. These can be combined with // custom tags specified in :ref:`stats_tags @@ -312,7 +313,7 @@ type isStatsMatcher_StatsMatcher interface { } type StatsMatcher_RejectAll struct { - // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all + // If ``reject_all`` is true, then all stats are disabled. If ``reject_all`` is false, then all // stats are enabled. RejectAll bool `protobuf:"varint,1,opt,name=reject_all,json=rejectAll,proto3,oneof"` } @@ -355,7 +356,11 @@ type TagSpecifier struct { // // .. note:: // - // It is invalid to specify the same tag name twice in a config. + // A stat name may be spelled in such a way that it matches two different + // tag extractors for the same tag name. In that case, all but one of the + // tag values will be dropped. It is not specified which tag value will be + // retained. The extraction will only occur for one of the extractors, and + // only the matched extraction will be removed from the tag name. TagName string `protobuf:"bytes,1,opt,name=tag_name,json=tagName,proto3" json:"tag_name,omitempty"` // Types that are assignable to TagValue: // *TagSpecifier_Regex @@ -470,18 +475,18 @@ type TagSpecifier_Regex struct { // } // ] // - // The two regexes of the specifiers will be processed in the definition order. + // The two regexes of the specifiers will be processed from the elaborated + // stat name. // - // The first regex will remove ``ios.``, leaving the tag extracted name - // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag - // ``envoy.http_user_agent`` will be added with tag value ``ios``. + // The first regex will save ``ios.`` as the tag value for ``envoy.http_user_agent``. It will + // leave it in the name for potential matching with additional tag specifiers. After all tag + // specifiers are processed the tags will be removed from the name. // - // The second regex will remove ``connection_manager_1.`` from the tag - // extracted name produced by the first regex - // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving - // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag - // ``envoy.http_conn_manager_prefix`` will be added with the tag value - // ``connection_manager_1``. + // The second regex will populate tag ``envoy.http_conn_manager_prefix`` with value + // ``connection_manager_1.``, based on the original stat name. + // + // As a final step, the matched tags are removed, leaving + // ``http.user_agent.downstream_cx_total`` as the tag extracted name. Regex string `protobuf:"bytes,2,opt,name=regex,proto3,oneof"` } @@ -501,7 +506,7 @@ type HistogramBucketSettings struct { unknownFields protoimpl.UnknownFields // The stats that this rule applies to. The match is applied to the original stat name - // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. + // before tag-extraction, for example ``cluster.exampleclustername.upstream_cx_length_ms``. Match *v3.StringMatcher `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"` // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. // The order of the buckets does not matter. @@ -554,7 +559,7 @@ func (x *HistogramBucketSettings) GetBuckets() []float64 { return nil } -// Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support +// Stats configuration proto schema for built-in ``envoy.stat_sinks.statsd`` sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] type StatsdSink struct { @@ -677,7 +682,7 @@ func (*StatsdSink_Address) isStatsdSink_StatsdSpecifier() {} func (*StatsdSink_TcpClusterName) isStatsdSink_StatsdSpecifier() {} -// Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. +// Stats configuration proto schema for built-in ``envoy.stat_sinks.dog_statsd`` sink. // The sink emits stats with `DogStatsD `_ // compatible tags. Tags are configurable via :ref:`StatsConfig // `. @@ -774,7 +779,7 @@ type DogStatsdSink_Address struct { func (*DogStatsdSink_Address) isDogStatsdSink_DogStatsdSpecifier() {} -// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. +// Stats configuration proto schema for built-in ``envoy.stat_sinks.hystrix`` sink. // The sink emits stats in `text/event-stream // `_ // formatted stream for use by `Hystrix dashboard @@ -796,7 +801,7 @@ type HystrixSink struct { // in the process). The sink then outputs the aggregate statistics across the // current rolling window to the event stream(s). // - // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets + // ``rolling_window(ms)`` = ``stats_flush_interval(ms)`` * ``num_of_buckets`` // // More detailed explanation can be found in `Hystrix wiki // `_. @@ -1005,7 +1010,7 @@ var file_envoy_config_metrics_v3_stats_proto_goTypes = []interface{}{ (*StatsdSink)(nil), // 5: envoy.config.metrics.v3.StatsdSink (*DogStatsdSink)(nil), // 6: envoy.config.metrics.v3.DogStatsdSink (*HystrixSink)(nil), // 7: envoy.config.metrics.v3.HystrixSink - (*any.Any)(nil), // 8: google.protobuf.Any + (*any1.Any)(nil), // 8: google.protobuf.Any (*wrappers.BoolValue)(nil), // 9: google.protobuf.BoolValue (*v3.ListStringMatcher)(nil), // 10: envoy.type.matcher.v3.ListStringMatcher (*v3.StringMatcher)(nil), // 11: envoy.type.matcher.v3.StringMatcher diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go index 94ddc9b15c..a0fdd84c98 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go @@ -59,9 +59,18 @@ func (m *StatsSink) validate(all bool) error { // no validation rules for Name - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *StatsSink_TypedConfig: + if v == nil { + err := StatsSinkValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -92,6 +101,8 @@ func (m *StatsSink) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -418,12 +429,33 @@ func (m *StatsMatcher) validate(all bool) error { var errors []error - switch m.StatsMatcher.(type) { - + oneofStatsMatcherPresent := false + switch v := m.StatsMatcher.(type) { case *StatsMatcher_RejectAll: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true // no validation rules for RejectAll - case *StatsMatcher_ExclusionList: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true if all { switch v := interface{}(m.GetExclusionList()).(type) { @@ -455,6 +487,17 @@ func (m *StatsMatcher) validate(all bool) error { } case *StatsMatcher_InclusionList: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true if all { switch v := interface{}(m.GetInclusionList()).(type) { @@ -486,6 +529,9 @@ func (m *StatsMatcher) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofStatsMatcherPresent { err := StatsMatcherValidationError{ field: "StatsMatcher", reason: "value is required", @@ -494,7 +540,6 @@ func (m *StatsMatcher) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -598,9 +643,18 @@ func (m *TagSpecifier) validate(all bool) error { // no validation rules for TagName - switch m.TagValue.(type) { - + switch v := m.TagValue.(type) { case *TagSpecifier_Regex: + if v == nil { + err := TagSpecifierValidationError{ + field: "TagValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if len(m.GetRegex()) > 1024 { err := TagSpecifierValidationError{ @@ -614,8 +668,19 @@ func (m *TagSpecifier) validate(all bool) error { } case *TagSpecifier_FixedValue: + if v == nil { + err := TagSpecifierValidationError{ + field: "TagValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for FixedValue - + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -903,9 +968,20 @@ func (m *StatsdSink) validate(all bool) error { // no validation rules for Prefix - switch m.StatsdSpecifier.(type) { - + oneofStatsdSpecifierPresent := false + switch v := m.StatsdSpecifier.(type) { case *StatsdSink_Address: + if v == nil { + err := StatsdSinkValidationError{ + field: "StatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsdSpecifierPresent = true if all { switch v := interface{}(m.GetAddress()).(type) { @@ -937,9 +1013,22 @@ func (m *StatsdSink) validate(all bool) error { } case *StatsdSink_TcpClusterName: + if v == nil { + err := StatsdSinkValidationError{ + field: "StatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsdSpecifierPresent = true // no validation rules for TcpClusterName - default: + _ = v // ensures v is used + } + if !oneofStatsdSpecifierPresent { err := StatsdSinkValidationError{ field: "StatsdSpecifier", reason: "value is required", @@ -948,7 +1037,6 @@ func (m *StatsdSink) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1067,9 +1155,20 @@ func (m *DogStatsdSink) validate(all bool) error { } - switch m.DogStatsdSpecifier.(type) { - + oneofDogStatsdSpecifierPresent := false + switch v := m.DogStatsdSpecifier.(type) { case *DogStatsdSink_Address: + if v == nil { + err := DogStatsdSinkValidationError{ + field: "DogStatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDogStatsdSpecifierPresent = true if all { switch v := interface{}(m.GetAddress()).(type) { @@ -1101,6 +1200,9 @@ func (m *DogStatsdSink) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofDogStatsdSpecifierPresent { err := DogStatsdSinkValidationError{ field: "DogStatsdSpecifier", reason: "value is required", @@ -1109,7 +1211,6 @@ func (m *DogStatsdSink) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go index 0151996295..670801abb4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/overload/v3/overload.proto package overloadv3 @@ -10,7 +10,7 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -152,7 +152,7 @@ func (m *ResourceMonitor) GetConfigType() isResourceMonitor_ConfigType { return nil } -func (x *ResourceMonitor) GetTypedConfig() *any.Any { +func (x *ResourceMonitor) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*ResourceMonitor_TypedConfig); ok { return x.TypedConfig } @@ -164,7 +164,7 @@ type isResourceMonitor_ConfigType interface { } type ResourceMonitor_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*ResourceMonitor_TypedConfig) isResourceMonitor_ConfigType() {} @@ -225,7 +225,7 @@ type ScaledTrigger struct { // If the resource pressure is greater than this value, the trigger will be in the // :ref:`scaling ` state with value - // `(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)`. + // ``(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)``. ScalingThreshold float64 `protobuf:"fixed64,1,opt,name=scaling_threshold,json=scalingThreshold,proto3" json:"scaling_threshold,omitempty"` // If the resource pressure is greater than this value, the trigger will enter saturation. SaturationThreshold float64 `protobuf:"fixed64,2,opt,name=saturation_threshold,json=saturationThreshold,proto3" json:"saturation_threshold,omitempty"` @@ -427,11 +427,15 @@ type OverloadAction struct { // DNS to ensure uniqueness. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // A set of triggers for this action. The state of the action is the maximum - // state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners - // are notified when the overload action changes state. + // state of all triggers, which can be scalar values between 0 and 1 or + // saturated. Listeners are notified when the overload action changes state. + // An overload manager action can only have one trigger for a given resource + // e.g. :ref:`Trigger.name + // ` must be unique + // in this list. Triggers []*Trigger `protobuf:"bytes,2,rep,name=triggers,proto3" json:"triggers,omitempty"` // Configuration for the action being instantiated. - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *OverloadAction) Reset() { @@ -480,13 +484,81 @@ func (x *OverloadAction) GetTriggers() []*Trigger { return nil } -func (x *OverloadAction) GetTypedConfig() *any.Any { +func (x *OverloadAction) GetTypedConfig() *any1.Any { if x != nil { return x.TypedConfig } return nil } +// A point within the connection or request lifecycle that provides context on +// whether to shed load at that given stage for the current entity at the +// point. +type LoadShedPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is just a well-known string for the LoadShedPoint. + // Deployment specific LoadShedPoints e.g. within a custom extension should + // be prefixed by the company / deployment name to avoid colliding with any + // open source LoadShedPoints. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A set of triggers for this LoadShedPoint. The LoadShedPoint will use the + // the maximum state of all triggers, which can be scalar values between 0 and + // 1 or saturated. A LoadShedPoint can only have one trigger for a given + // resource e.g. :ref:`Trigger.name + // ` must be unique in + // this list. + Triggers []*Trigger `protobuf:"bytes,2,rep,name=triggers,proto3" json:"triggers,omitempty"` +} + +func (x *LoadShedPoint) Reset() { + *x = LoadShedPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadShedPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadShedPoint) ProtoMessage() {} + +func (x *LoadShedPoint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadShedPoint.ProtoReflect.Descriptor instead. +func (*LoadShedPoint) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{6} +} + +func (x *LoadShedPoint) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *LoadShedPoint) GetTriggers() []*Trigger { + if x != nil { + return x.Triggers + } + return nil +} + // Configuration for which accounts the WatermarkBuffer Factories should // track. type BufferFactoryConfig struct { @@ -514,7 +586,7 @@ type BufferFactoryConfig struct { func (x *BufferFactoryConfig) Reset() { *x = BufferFactoryConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[6] + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -527,7 +599,7 @@ func (x *BufferFactoryConfig) String() string { func (*BufferFactoryConfig) ProtoMessage() {} func (x *BufferFactoryConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[6] + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -540,7 +612,7 @@ func (x *BufferFactoryConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use BufferFactoryConfig.ProtoReflect.Descriptor instead. func (*BufferFactoryConfig) Descriptor() ([]byte, []int) { - return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{6} + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{7} } func (x *BufferFactoryConfig) GetMinimumAccountToTrackPowerOfTwo() uint32 { @@ -550,6 +622,7 @@ func (x *BufferFactoryConfig) GetMinimumAccountToTrackPowerOfTwo() uint32 { return 0 } +// [#next-free-field: 6] type OverloadManager struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -561,6 +634,8 @@ type OverloadManager struct { ResourceMonitors []*ResourceMonitor `protobuf:"bytes,2,rep,name=resource_monitors,json=resourceMonitors,proto3" json:"resource_monitors,omitempty"` // The set of overload actions. Actions []*OverloadAction `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` + // The set of load shed points. + LoadshedPoints []*LoadShedPoint `protobuf:"bytes,5,rep,name=loadshed_points,json=loadshedPoints,proto3" json:"loadshed_points,omitempty"` // Configuration for buffer factory. BufferFactoryConfig *BufferFactoryConfig `protobuf:"bytes,4,opt,name=buffer_factory_config,json=bufferFactoryConfig,proto3" json:"buffer_factory_config,omitempty"` } @@ -568,7 +643,7 @@ type OverloadManager struct { func (x *OverloadManager) Reset() { *x = OverloadManager{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[7] + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -581,7 +656,7 @@ func (x *OverloadManager) String() string { func (*OverloadManager) ProtoMessage() {} func (x *OverloadManager) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[7] + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -594,7 +669,7 @@ func (x *OverloadManager) ProtoReflect() protoreflect.Message { // Deprecated: Use OverloadManager.ProtoReflect.Descriptor instead. func (*OverloadManager) Descriptor() ([]byte, []int) { - return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{7} + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{8} } func (x *OverloadManager) GetRefreshInterval() *duration.Duration { @@ -618,6 +693,13 @@ func (x *OverloadManager) GetActions() []*OverloadAction { return nil } +func (x *OverloadManager) GetLoadshedPoints() []*LoadShedPoint { + if x != nil { + return x.LoadshedPoints + } + return nil +} + func (x *OverloadManager) GetBufferFactoryConfig() *BufferFactoryConfig { if x != nil { return x.BufferFactoryConfig @@ -641,7 +723,7 @@ type ScaleTimersOverloadActionConfig_ScaleTimer struct { func (x *ScaleTimersOverloadActionConfig_ScaleTimer) Reset() { *x = ScaleTimersOverloadActionConfig_ScaleTimer{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[8] + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -654,7 +736,7 @@ func (x *ScaleTimersOverloadActionConfig_ScaleTimer) String() string { func (*ScaleTimersOverloadActionConfig_ScaleTimer) ProtoMessage() {} func (x *ScaleTimersOverloadActionConfig_ScaleTimer) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[8] + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -831,49 +913,61 @@ var file_envoy_config_overload_v3_overload_proto_rawDesc = []byte{ 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x70, 0x0a, 0x13, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x25, 0x6d, 0x69, 0x6e, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x72, - 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x77, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x77, 0x6f, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0x38, 0x28, - 0x0a, 0x52, 0x1f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x54, 0x6f, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x4f, 0x66, 0x54, - 0x77, 0x6f, 0x22, 0x96, 0x03, 0x0a, 0x0f, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, - 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x72, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x60, 0x0a, 0x11, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, - 0x6f, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x10, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x42, - 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, - 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, - 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x61, 0x0a, 0x15, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x66, 0x61, 0x63, - 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x66, - 0x66, 0x65, 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x13, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, - 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x76, 0x65, 0x72, - 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x42, 0x8d, 0x01, 0x0a, 0x26, - 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, - 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, - 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, 0x76, 0x65, - 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x76, 0x33, 0x3b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, - 0x64, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x22, 0x75, 0x0a, 0x0d, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, + 0x0a, 0x08, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x69, 0x67, + 0x67, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x22, 0x70, 0x0a, 0x13, 0x42, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, + 0x0a, 0x25, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x77, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x2a, 0x04, 0x18, 0x38, 0x28, 0x0a, 0x52, 0x1f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x6f, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x77, 0x65, 0x72, 0x4f, 0x66, 0x54, 0x77, 0x6f, 0x22, 0xe8, 0x03, 0x0a, 0x0f, 0x4f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x44, 0x0a, + 0x10, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x60, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, + 0x02, 0x08, 0x01, 0x52, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x0f, 0x6c, 0x6f, 0x61, + 0x64, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, + 0x61, 0x64, 0x53, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x6c, 0x6f, 0x61, + 0x64, 0x73, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x15, 0x62, + 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x46, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x62, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x34, + 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x42, 0x8d, 0x01, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x42, + 0x0d, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x76, + 0x33, 0x3b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -889,7 +983,7 @@ func file_envoy_config_overload_v3_overload_proto_rawDescGZIP() []byte { } var file_envoy_config_overload_v3_overload_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_config_overload_v3_overload_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_envoy_config_overload_v3_overload_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_envoy_config_overload_v3_overload_proto_goTypes = []interface{}{ (ScaleTimersOverloadActionConfig_TimerType)(0), // 0: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.TimerType (*ResourceMonitor)(nil), // 1: envoy.config.overload.v3.ResourceMonitor @@ -898,32 +992,35 @@ var file_envoy_config_overload_v3_overload_proto_goTypes = []interface{}{ (*Trigger)(nil), // 4: envoy.config.overload.v3.Trigger (*ScaleTimersOverloadActionConfig)(nil), // 5: envoy.config.overload.v3.ScaleTimersOverloadActionConfig (*OverloadAction)(nil), // 6: envoy.config.overload.v3.OverloadAction - (*BufferFactoryConfig)(nil), // 7: envoy.config.overload.v3.BufferFactoryConfig - (*OverloadManager)(nil), // 8: envoy.config.overload.v3.OverloadManager - (*ScaleTimersOverloadActionConfig_ScaleTimer)(nil), // 9: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer - (*any.Any)(nil), // 10: google.protobuf.Any - (*duration.Duration)(nil), // 11: google.protobuf.Duration - (*v3.Percent)(nil), // 12: envoy.type.v3.Percent + (*LoadShedPoint)(nil), // 7: envoy.config.overload.v3.LoadShedPoint + (*BufferFactoryConfig)(nil), // 8: envoy.config.overload.v3.BufferFactoryConfig + (*OverloadManager)(nil), // 9: envoy.config.overload.v3.OverloadManager + (*ScaleTimersOverloadActionConfig_ScaleTimer)(nil), // 10: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer + (*any1.Any)(nil), // 11: google.protobuf.Any + (*duration.Duration)(nil), // 12: google.protobuf.Duration + (*v3.Percent)(nil), // 13: envoy.type.v3.Percent } var file_envoy_config_overload_v3_overload_proto_depIdxs = []int32{ - 10, // 0: envoy.config.overload.v3.ResourceMonitor.typed_config:type_name -> google.protobuf.Any + 11, // 0: envoy.config.overload.v3.ResourceMonitor.typed_config:type_name -> google.protobuf.Any 2, // 1: envoy.config.overload.v3.Trigger.threshold:type_name -> envoy.config.overload.v3.ThresholdTrigger 3, // 2: envoy.config.overload.v3.Trigger.scaled:type_name -> envoy.config.overload.v3.ScaledTrigger - 9, // 3: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.timer_scale_factors:type_name -> envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer + 10, // 3: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.timer_scale_factors:type_name -> envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer 4, // 4: envoy.config.overload.v3.OverloadAction.triggers:type_name -> envoy.config.overload.v3.Trigger - 10, // 5: envoy.config.overload.v3.OverloadAction.typed_config:type_name -> google.protobuf.Any - 11, // 6: envoy.config.overload.v3.OverloadManager.refresh_interval:type_name -> google.protobuf.Duration - 1, // 7: envoy.config.overload.v3.OverloadManager.resource_monitors:type_name -> envoy.config.overload.v3.ResourceMonitor - 6, // 8: envoy.config.overload.v3.OverloadManager.actions:type_name -> envoy.config.overload.v3.OverloadAction - 7, // 9: envoy.config.overload.v3.OverloadManager.buffer_factory_config:type_name -> envoy.config.overload.v3.BufferFactoryConfig - 0, // 10: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.timer:type_name -> envoy.config.overload.v3.ScaleTimersOverloadActionConfig.TimerType - 11, // 11: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.min_timeout:type_name -> google.protobuf.Duration - 12, // 12: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.min_scale:type_name -> envoy.type.v3.Percent - 13, // [13:13] is the sub-list for method output_type - 13, // [13:13] is the sub-list for method input_type - 13, // [13:13] is the sub-list for extension type_name - 13, // [13:13] is the sub-list for extension extendee - 0, // [0:13] is the sub-list for field type_name + 11, // 5: envoy.config.overload.v3.OverloadAction.typed_config:type_name -> google.protobuf.Any + 4, // 6: envoy.config.overload.v3.LoadShedPoint.triggers:type_name -> envoy.config.overload.v3.Trigger + 12, // 7: envoy.config.overload.v3.OverloadManager.refresh_interval:type_name -> google.protobuf.Duration + 1, // 8: envoy.config.overload.v3.OverloadManager.resource_monitors:type_name -> envoy.config.overload.v3.ResourceMonitor + 6, // 9: envoy.config.overload.v3.OverloadManager.actions:type_name -> envoy.config.overload.v3.OverloadAction + 7, // 10: envoy.config.overload.v3.OverloadManager.loadshed_points:type_name -> envoy.config.overload.v3.LoadShedPoint + 8, // 11: envoy.config.overload.v3.OverloadManager.buffer_factory_config:type_name -> envoy.config.overload.v3.BufferFactoryConfig + 0, // 12: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.timer:type_name -> envoy.config.overload.v3.ScaleTimersOverloadActionConfig.TimerType + 12, // 13: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.min_timeout:type_name -> google.protobuf.Duration + 13, // 14: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.min_scale:type_name -> envoy.type.v3.Percent + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_envoy_config_overload_v3_overload_proto_init() } @@ -1005,7 +1102,7 @@ func file_envoy_config_overload_v3_overload_proto_init() { } } file_envoy_config_overload_v3_overload_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BufferFactoryConfig); i { + switch v := v.(*LoadShedPoint); i { case 0: return &v.state case 1: @@ -1017,7 +1114,7 @@ func file_envoy_config_overload_v3_overload_proto_init() { } } file_envoy_config_overload_v3_overload_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OverloadManager); i { + switch v := v.(*BufferFactoryConfig); i { case 0: return &v.state case 1: @@ -1029,6 +1126,18 @@ func file_envoy_config_overload_v3_overload_proto_init() { } } file_envoy_config_overload_v3_overload_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OverloadManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ScaleTimersOverloadActionConfig_ScaleTimer); i { case 0: return &v.state @@ -1048,7 +1157,7 @@ func file_envoy_config_overload_v3_overload_proto_init() { (*Trigger_Threshold)(nil), (*Trigger_Scaled)(nil), } - file_envoy_config_overload_v3_overload_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_envoy_config_overload_v3_overload_proto_msgTypes[9].OneofWrappers = []interface{}{ (*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout)(nil), (*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale)(nil), } @@ -1058,7 +1167,7 @@ func file_envoy_config_overload_v3_overload_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_overload_v3_overload_proto_rawDesc, NumEnums: 1, - NumMessages: 9, + NumMessages: 10, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go index c351a2ae08..ea01c9c108 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go @@ -68,9 +68,18 @@ func (m *ResourceMonitor) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *ResourceMonitor_TypedConfig: + if v == nil { + err := ResourceMonitorValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -101,6 +110,8 @@ func (m *ResourceMonitor) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -446,9 +457,20 @@ func (m *Trigger) validate(all bool) error { errors = append(errors, err) } - switch m.TriggerOneof.(type) { - + oneofTriggerOneofPresent := false + switch v := m.TriggerOneof.(type) { case *Trigger_Threshold: + if v == nil { + err := TriggerValidationError{ + field: "TriggerOneof", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTriggerOneofPresent = true if all { switch v := interface{}(m.GetThreshold()).(type) { @@ -480,6 +502,17 @@ func (m *Trigger) validate(all bool) error { } case *Trigger_Scaled: + if v == nil { + err := TriggerValidationError{ + field: "TriggerOneof", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTriggerOneofPresent = true if all { switch v := interface{}(m.GetScaled()).(type) { @@ -511,6 +544,9 @@ func (m *Trigger) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofTriggerOneofPresent { err := TriggerValidationError{ field: "TriggerOneof", reason: "value is required", @@ -519,7 +555,6 @@ func (m *Trigger) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -932,6 +967,162 @@ var _ interface { ErrorName() string } = OverloadActionValidationError{} +// Validate checks the field values on LoadShedPoint with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LoadShedPoint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadShedPoint with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LoadShedPointMultiError, or +// nil if none found. +func (m *LoadShedPoint) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadShedPoint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := LoadShedPointValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetTriggers()) < 1 { + err := LoadShedPointValidationError{ + field: "Triggers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTriggers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LoadShedPointMultiError(errors) + } + + return nil +} + +// LoadShedPointMultiError is an error wrapping multiple validation errors +// returned by LoadShedPoint.ValidateAll() if the designated constraints +// aren't met. +type LoadShedPointMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadShedPointMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadShedPointMultiError) AllErrors() []error { return m } + +// LoadShedPointValidationError is the validation error returned by +// LoadShedPoint.Validate if the designated constraints aren't met. +type LoadShedPointValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadShedPointValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadShedPointValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadShedPointValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadShedPointValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadShedPointValidationError) ErrorName() string { return "LoadShedPointValidationError" } + +// Error satisfies the builtin error interface +func (e LoadShedPointValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadShedPoint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadShedPointValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadShedPointValidationError{} + // Validate checks the field values on BufferFactoryConfig with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -1175,6 +1366,40 @@ func (m *OverloadManager) validate(all bool) error { } + for idx, item := range m.GetLoadshedPoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + if all { switch v := interface{}(m.GetBufferFactoryConfig()).(type) { case interface{ ValidateAll() error }: @@ -1309,7 +1534,7 @@ func (m *ScaleTimersOverloadActionConfig_ScaleTimer) validate(all bool) error { if _, ok := _ScaleTimersOverloadActionConfig_ScaleTimer_Timer_NotInLookup[m.GetTimer()]; ok { err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ field: "Timer", - reason: "value must not be in list [0]", + reason: "value must not be in list [UNSPECIFIED]", } if !all { return err @@ -1328,9 +1553,20 @@ func (m *ScaleTimersOverloadActionConfig_ScaleTimer) validate(all bool) error { errors = append(errors, err) } - switch m.OverloadAdjust.(type) { - + oneofOverloadAdjustPresent := false + switch v := m.OverloadAdjust.(type) { case *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout: + if v == nil { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "OverloadAdjust", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverloadAdjustPresent = true if all { switch v := interface{}(m.GetMinTimeout()).(type) { @@ -1362,6 +1598,17 @@ func (m *ScaleTimersOverloadActionConfig_ScaleTimer) validate(all bool) error { } case *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale: + if v == nil { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "OverloadAdjust", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverloadAdjustPresent = true if all { switch v := interface{}(m.GetMinScale()).(type) { @@ -1393,6 +1640,9 @@ func (m *ScaleTimersOverloadActionConfig_ScaleTimer) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofOverloadAdjustPresent { err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ field: "OverloadAdjust", reason: "value is required", @@ -1401,7 +1651,6 @@ func (m *ScaleTimersOverloadActionConfig_ScaleTimer) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go index ecb01f161c..dd98cec635 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/rbac/v3/rbac.proto package rbacv3 @@ -38,7 +38,7 @@ const ( // The policies deny access to principals. The rest are allowed. This is block-list style // access control. RBAC_DENY RBAC_Action = 1 - // The policies set the `access_log_hint` dynamic metadata key based on if requests match. + // The policies set the ``access_log_hint`` dynamic metadata key based on if requests match. // All requests are allowed. RBAC_LOG RBAC_Action = 2 ) @@ -84,22 +84,79 @@ func (RBAC_Action) EnumDescriptor() ([]byte, []int) { return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0} } +// Deny and allow here refer to RBAC decisions, not actions. +type RBAC_AuditLoggingOptions_AuditCondition int32 + +const ( + // Never audit. + RBAC_AuditLoggingOptions_NONE RBAC_AuditLoggingOptions_AuditCondition = 0 + // Audit when RBAC denies the request. + RBAC_AuditLoggingOptions_ON_DENY RBAC_AuditLoggingOptions_AuditCondition = 1 + // Audit when RBAC allows the request. + RBAC_AuditLoggingOptions_ON_ALLOW RBAC_AuditLoggingOptions_AuditCondition = 2 + // Audit whether RBAC allows or denies the request. + RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW RBAC_AuditLoggingOptions_AuditCondition = 3 +) + +// Enum value maps for RBAC_AuditLoggingOptions_AuditCondition. +var ( + RBAC_AuditLoggingOptions_AuditCondition_name = map[int32]string{ + 0: "NONE", + 1: "ON_DENY", + 2: "ON_ALLOW", + 3: "ON_DENY_AND_ALLOW", + } + RBAC_AuditLoggingOptions_AuditCondition_value = map[string]int32{ + "NONE": 0, + "ON_DENY": 1, + "ON_ALLOW": 2, + "ON_DENY_AND_ALLOW": 3, + } +) + +func (x RBAC_AuditLoggingOptions_AuditCondition) Enum() *RBAC_AuditLoggingOptions_AuditCondition { + p := new(RBAC_AuditLoggingOptions_AuditCondition) + *p = x + return p +} + +func (x RBAC_AuditLoggingOptions_AuditCondition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RBAC_AuditLoggingOptions_AuditCondition) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_rbac_v3_rbac_proto_enumTypes[1].Descriptor() +} + +func (RBAC_AuditLoggingOptions_AuditCondition) Type() protoreflect.EnumType { + return &file_envoy_config_rbac_v3_rbac_proto_enumTypes[1] +} + +func (x RBAC_AuditLoggingOptions_AuditCondition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions_AuditCondition.Descriptor instead. +func (RBAC_AuditLoggingOptions_AuditCondition) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0, 0} +} + // Role Based Access Control (RBAC) provides service-level and method-level access control for a -// service. Requests are allowed or denied based on the `action` and whether a matching policy is +// service. Requests are allowed or denied based on the ``action`` and whether a matching policy is // found. For instance, if the action is ALLOW and a matching policy is found the request should be // allowed. // // RBAC can also be used to make access logging decisions by communicating with access loggers // through dynamic metadata. When the action is LOG and at least one policy matches, the -// `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating +// ``access_log_hint`` value in the shared key namespace 'envoy.common' is set to ``true`` indicating // the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // -// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so +// * Service account ``cluster.local/ns/default/sa/admin`` has full access to the service, and so // does "cluster.local/ns/default/sa/superuser". // -// * Any user can read ("GET") the service at paths with prefix "/products", so long as the +// * Any user can read (``GET``) the service at paths with prefix ``/products``, so long as the // destination port is either 80 or 443. // // .. code-block:: yaml @@ -143,19 +200,24 @@ type RBAC struct { // // Actions: // - // * ALLOW: Allows the request if and only if there is a policy that matches + // * ``ALLOW``: Allows the request if and only if there is a policy that matches // the request. - // * DENY: Allows the request if and only if there are no policies that + // * ``DENY``: Allows the request if and only if there are no policies that // match the request. - // * LOG: Allows all requests. If at least one policy matches, the dynamic - // metadata key `access_log_hint` is set to the value `true` under the shared - // key namespace 'envoy.common'. If no policies match, it is set to `false`. + // * ``LOG``: Allows all requests. If at least one policy matches, the dynamic + // metadata key ``access_log_hint`` is set to the value ``true`` under the shared + // key namespace ``envoy.common``. If no policies match, it is set to ``false``. // Other actions do not modify this key. // Action RBAC_Action `protobuf:"varint,1,opt,name=action,proto3,enum=envoy.config.rbac.v3.RBAC_Action" json:"action,omitempty"` // Maps from policy name to policy. A match occurs when at least one policy matches the request. // The policies are evaluated in lexicographic order of the policy name. Policies map[string]*Policy `protobuf:"bytes,2,rep,name=policies,proto3" json:"policies,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Audit logging options that include the condition for audit logging to happen + // and audit logger configurations. + // + // [#not-implemented-hide:] + AuditLoggingOptions *RBAC_AuditLoggingOptions `protobuf:"bytes,3,opt,name=audit_logging_options,json=auditLoggingOptions,proto3" json:"audit_logging_options,omitempty"` } func (x *RBAC) Reset() { @@ -204,6 +266,13 @@ func (x *RBAC) GetPolicies() map[string]*Policy { return nil } +func (x *RBAC) GetAuditLoggingOptions() *RBAC_AuditLoggingOptions { + if x != nil { + return x.AuditLoggingOptions + } + return nil +} + // Policy specifies a role and the principals that are assigned/denied the role. // A policy matches if and only if at least one of its permissions match the // action taking place AND at least one of its principals match the downstream @@ -215,11 +284,11 @@ type Policy struct { // Required. The set of permissions that define a role. Each permission is // matched with OR semantics. To match all actions for this policy, a single - // Permission with the `any` field set to true should be used. + // Permission with the ``any`` field set to true should be used. Permissions []*Permission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` // Required. The set of principals that are assigned/denied the role based on // “action”. Each principal is matched with OR semantics. To match all - // downstreams for this policy, a single Principal with the `any` field set to + // downstreams for this policy, a single Principal with the ``any`` field set to // true should be used. Principals []*Principal `protobuf:"bytes,2,rep,name=principals,proto3" json:"principals,omitempty"` // An optional symbolic expression specifying an access control @@ -461,7 +530,7 @@ type Permission_Any struct { type Permission_Header struct { // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. - // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` + // Note: the pseudo-header :path includes the query and fragment string. Use the ``url_path`` // field if you want to match the URL path without the query and fragment string. Header *v3.HeaderMatcher `protobuf:"bytes,4,opt,name=header,proto3,oneof"` } @@ -493,8 +562,8 @@ type Permission_Metadata struct { type Permission_NotRule struct { // Negates matching the provided permission. For instance, if the value of - // `not_rule` would match, this permission would not match. Conversely, if - // the value of `not_rule` would not match, this permission would match. + // ``not_rule`` would match, this permission would not match. Conversely, if + // the value of ``not_rule`` would not match, this permission would match. NotRule *Permission `protobuf:"bytes,8,opt,name=not_rule,json=notRule,proto3,oneof"` } @@ -508,7 +577,7 @@ type Permission_RequestedServerName struct { // as explained below. // // * If the :ref:`TLS Inspector ` - // filter is not added, and if a `FilterChainMatch` is not defined for + // filter is not added, and if a ``FilterChainMatch`` is not defined for // the :ref:`server name // `, // a TLS connection's requested SNI server name will be treated as if it @@ -554,7 +623,7 @@ func (*Permission_Matcher) isPermission_Rule() {} // Principal defines an identity or a group of identities for a downstream // subject. -// [#next-free-field: 12] +// [#next-free-field: 13] type Principal struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -571,6 +640,7 @@ type Principal struct { // *Principal_Header // *Principal_UrlPath // *Principal_Metadata + // *Principal_FilterState // *Principal_NotId Identifier isPrincipal_Identifier `protobuf_oneof:"identifier"` } @@ -685,6 +755,13 @@ func (x *Principal) GetMetadata() *v31.MetadataMatcher { return nil } +func (x *Principal) GetFilterState() *v31.FilterStateMatcher { + if x, ok := x.GetIdentifier().(*Principal_FilterState); ok { + return x.FilterState + } + return nil +} + func (x *Principal) GetNotId() *Principal { if x, ok := x.GetIdentifier().(*Principal_NotId); ok { return x.NotId @@ -722,6 +799,11 @@ type Principal_SourceIp struct { // A CIDR block that describes the downstream IP. // This address will honor proxy protocol, but will not honor XFF. // + // This field is deprecated; either use :ref:`remote_ip + // ` for the same + // behavior, or use + // :ref:`direct_remote_ip `. + // // Deprecated: Do not use. SourceIp *v32.CidrRange `protobuf:"bytes,5,opt,name=source_ip,json=sourceIp,proto3,oneof"` } @@ -748,7 +830,7 @@ type Principal_RemoteIp struct { type Principal_Header struct { // A header (or pseudo-header such as :path or :method) on the incoming HTTP // request. Only available for HTTP request. Note: the pseudo-header :path - // includes the query and fragment string. Use the `url_path` field if you + // includes the query and fragment string. Use the ``url_path`` field if you // want to match the URL path without the query and fragment string. Header *v3.HeaderMatcher `protobuf:"bytes,6,opt,name=header,proto3,oneof"` } @@ -763,10 +845,15 @@ type Principal_Metadata struct { Metadata *v31.MetadataMatcher `protobuf:"bytes,7,opt,name=metadata,proto3,oneof"` } +type Principal_FilterState struct { + // Identifies the principal using a filter state object. + FilterState *v31.FilterStateMatcher `protobuf:"bytes,12,opt,name=filter_state,json=filterState,proto3,oneof"` +} + type Principal_NotId struct { // Negates matching the provided principal. For instance, if the value of - // `not_id` would match, this principal would not match. Conversely, if the - // value of `not_id` would not match, this principal would match. + // ``not_id`` would match, this principal would not match. Conversely, if the + // value of ``not_id`` would not match, this principal would match. NotId *Principal `protobuf:"bytes,8,opt,name=not_id,json=notId,proto3,oneof"` } @@ -790,6 +877,8 @@ func (*Principal_UrlPath) isPrincipal_Identifier() {} func (*Principal_Metadata) isPrincipal_Identifier() {} +func (*Principal_FilterState) isPrincipal_Identifier() {} + func (*Principal_NotId) isPrincipal_Identifier() {} // Action defines the result of allowance or denial when a request matches the matcher. @@ -805,17 +894,17 @@ type Action struct { // // Actions: // - // * ALLOW: If the request gets matched on ALLOW, it is permitted. - // * DENY: If the request gets matched on DENY, it is not permitted. - // * LOG: If the request gets matched on LOG, it is permitted. Besides, the - // dynamic metadata key `access_log_hint` under the shared key namespace - // 'envoy.common' will be set to the value `true`. - // * If the request cannot get matched, it will fallback to DENY. + // * ``ALLOW``: If the request gets matched on ALLOW, it is permitted. + // * ``DENY``: If the request gets matched on DENY, it is not permitted. + // * ``LOG``: If the request gets matched on LOG, it is permitted. Besides, the + // dynamic metadata key ``access_log_hint`` under the shared key namespace + // ``envoy.common`` will be set to the value ``true``. + // * If the request cannot get matched, it will fallback to ``DENY``. // // Log behavior: // // If the RBAC matcher contains at least one LOG action, the dynamic - // metadata key `access_log_hint` will be set based on if the request + // metadata key ``access_log_hint`` will be set based on if the request // get matched on the LOG action. // Action RBAC_Action `protobuf:"varint,2,opt,name=action,proto3,enum=envoy.config.rbac.v3.RBAC_Action" json:"action,omitempty"` @@ -867,7 +956,129 @@ func (x *Action) GetAction() RBAC_Action { return RBAC_ALLOW } -// Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, +type RBAC_AuditLoggingOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Condition for the audit logging to happen. + // If this condition is met, all the audit loggers configured here will be invoked. + // + // [#not-implemented-hide:] + AuditCondition RBAC_AuditLoggingOptions_AuditCondition `protobuf:"varint,1,opt,name=audit_condition,json=auditCondition,proto3,enum=envoy.config.rbac.v3.RBAC_AuditLoggingOptions_AuditCondition" json:"audit_condition,omitempty"` + // Configurations for RBAC-based authorization audit loggers. + // + // [#not-implemented-hide:] + LoggerConfigs []*RBAC_AuditLoggingOptions_AuditLoggerConfig `protobuf:"bytes,2,rep,name=logger_configs,json=loggerConfigs,proto3" json:"logger_configs,omitempty"` +} + +func (x *RBAC_AuditLoggingOptions) Reset() { + *x = RBAC_AuditLoggingOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC_AuditLoggingOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC_AuditLoggingOptions) ProtoMessage() {} + +func (x *RBAC_AuditLoggingOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions.ProtoReflect.Descriptor instead. +func (*RBAC_AuditLoggingOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RBAC_AuditLoggingOptions) GetAuditCondition() RBAC_AuditLoggingOptions_AuditCondition { + if x != nil { + return x.AuditCondition + } + return RBAC_AuditLoggingOptions_NONE +} + +func (x *RBAC_AuditLoggingOptions) GetLoggerConfigs() []*RBAC_AuditLoggingOptions_AuditLoggerConfig { + if x != nil { + return x.LoggerConfigs + } + return nil +} + +// [#not-implemented-hide:] +type RBAC_AuditLoggingOptions_AuditLoggerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Typed logger configuration. + // + // [#extension-category: envoy.rbac.audit_loggers] + AuditLogger *v32.TypedExtensionConfig `protobuf:"bytes,1,opt,name=audit_logger,json=auditLogger,proto3" json:"audit_logger,omitempty"` + // If true, when the logger is not supported, the data plane will not NACK but simply ignore it. + IsOptional bool `protobuf:"varint,2,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) Reset() { + *x = RBAC_AuditLoggingOptions_AuditLoggerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC_AuditLoggingOptions_AuditLoggerConfig) ProtoMessage() {} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions_AuditLoggerConfig.ProtoReflect.Descriptor instead. +func (*RBAC_AuditLoggingOptions_AuditLoggerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) GetAuditLogger() *v32.TypedExtensionConfig { + if x != nil { + return x.AuditLogger + } + return nil +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +// Used in the ``and_rules`` and ``or_rules`` fields in the ``rule`` oneof. Depending on the context, // each are applied with the associated behavior. type Permission_Set struct { state protoimpl.MessageState @@ -880,7 +1091,7 @@ type Permission_Set struct { func (x *Permission_Set) Reset() { *x = Permission_Set{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[6] + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -893,7 +1104,7 @@ func (x *Permission_Set) String() string { func (*Permission_Set) ProtoMessage() {} func (x *Permission_Set) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[6] + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -916,7 +1127,7 @@ func (x *Permission_Set) GetRules() []*Permission { return nil } -// Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. +// Used in the ``and_ids`` and ``or_ids`` fields in the ``identifier`` oneof. // Depending on the context, each are applied with the associated behavior. type Principal_Set struct { state protoimpl.MessageState @@ -929,7 +1140,7 @@ type Principal_Set struct { func (x *Principal_Set) Reset() { *x = Principal_Set{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[7] + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -942,7 +1153,7 @@ func (x *Principal_Set) String() string { func (*Principal_Set) ProtoMessage() {} func (x *Principal_Set) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[7] + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -980,7 +1191,7 @@ type Principal_Authenticated struct { func (x *Principal_Authenticated) Reset() { *x = Principal_Authenticated{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[8] + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -993,7 +1204,7 @@ func (x *Principal_Authenticated) String() string { func (*Principal_Authenticated) ProtoMessage() {} func (x *Principal_Authenticated) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[8] + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1030,217 +1241,259 @@ var file_envoy_config_rbac_v3_rbac_proto_rawDesc = []byte{ 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, - 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, - 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, - 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x52, 0x42, 0x41, 0x43, 0x12, 0x43, - 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, - 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, - 0x43, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x59, 0x0a, 0x0d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, + 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, + 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xe1, 0x06, 0x0a, 0x04, 0x52, 0x42, 0x41, 0x43, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x15, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x13, 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc4, 0x03, 0x0a, 0x13, 0x41, 0x75, 0x64, 0x69, + 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x70, 0x0a, 0x0f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0e, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x67, 0x0a, 0x0e, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, + 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, + 0x6f, 0x67, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6c, 0x6f, 0x67, + 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x83, 0x01, 0x0a, 0x11, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x4d, 0x0a, 0x0c, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0b, 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x22, 0x4c, 0x0a, 0x0e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, + 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x4e, 0x5f, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x4f, 0x4e, 0x5f, 0x44, 0x45, + 0x4e, 0x59, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x03, 0x1a, 0x59, + 0x0a, 0x0d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x26, 0x0a, 0x06, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x44, 0x45, 0x4e, 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x47, 0x10, + 0x02, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x42, 0x41, 0x43, 0x22, 0x93, 0x03, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4c, + 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x0a, + 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, + 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x73, 0x12, 0x5a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x42, 0x1c, 0xf2, 0x98, 0xfe, 0x8f, + 0x05, 0x16, 0x12, 0x14, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, + 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x1c, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, + 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, - 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x26, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x45, 0x4e, - 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x47, 0x10, 0x02, 0x3a, 0x20, 0x9a, 0xc5, - 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x22, 0x93, - 0x03, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4c, 0x0a, 0x0b, 0x70, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, - 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6e, 0x63, - 0x69, 0x70, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, + 0x76, 0x32, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xda, 0x07, 0x0a, 0x0a, 0x50, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x09, 0x61, 0x6e, 0x64, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, - 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, - 0x6c, 0x73, 0x12, 0x5a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x45, 0x78, 0x70, 0x72, 0x42, 0x1c, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, - 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x70, - 0x0a, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, - 0x42, 0x1c, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x10, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x22, 0xda, 0x07, 0x0a, 0x0a, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x09, 0x61, 0x6e, 0x64, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x08, - 0x61, 0x6e, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, - 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, - 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x61, - 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, - 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x12, 0x48, 0x0a, 0x0e, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x70, 0x12, 0x36, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, - 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x51, 0x0a, 0x16, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, - 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x3d, 0x0a, 0x08, 0x6e, 0x6f, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x6e, 0x6f, 0x74, 0x52, - 0x75, 0x6c, 0x65, 0x12, 0x5a, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x46, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x07, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x73, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x40, - 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, - 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x3a, 0x26, 0x9a, 0xc5, - 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, - 0x01, 0x22, 0x9b, 0x08, 0x0a, 0x09, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, - 0x3e, 0x0a, 0x07, 0x61, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, - 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x06, 0x61, 0x6e, 0x64, 0x49, 0x64, 0x73, 0x12, - 0x3c, 0x0a, 0x06, 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, - 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1b, 0x0a, - 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a, - 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, 0x55, 0x0a, 0x0d, 0x61, 0x75, - 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, - 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x48, 0x00, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x4b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, - 0x2e, 0x30, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x4b, - 0x0a, 0x10, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, - 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x70, 0x12, 0x3e, 0x0a, 0x09, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x70, 0x12, 0x3e, 0x0a, 0x06, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x08, 0x75, - 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, + 0x74, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x41, 0x0a, + 0x08, 0x6f, 0x72, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, 0x3e, 0x0a, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, + 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x12, 0x48, + 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x70, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x70, 0x12, 0x36, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x51, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x14, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3d, 0x0a, 0x08, 0x6e, 0x6f, 0x74, + 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x07, 0x6e, 0x6f, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x5a, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x73, 0x0a, 0x03, + 0x53, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x05, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, + 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, + 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xeb, 0x08, 0x0a, 0x09, 0x50, 0x72, 0x69, 0x6e, 0x63, + 0x69, 0x70, 0x61, 0x6c, 0x12, 0x3e, 0x0a, 0x07, 0x61, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x06, 0x61, 0x6e, + 0x64, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x06, 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, + 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x72, 0x49, + 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, + 0x55, 0x0a, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x70, 0x12, 0x4b, 0x0a, 0x10, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x70, + 0x12, 0x3e, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x70, + 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x3f, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x12, 0x44, 0x0a, 0x08, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x38, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, - 0x70, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x64, 0x1a, 0x6d, 0x0a, 0x03, - 0x53, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, - 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x69, 0x64, 0x73, - 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, - 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x1a, 0x97, 0x01, 0x0a, 0x0d, - 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, - 0x0e, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x70, 0x72, 0x69, - 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, - 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, + 0x64, 0x1a, 0x6d, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, + 0x52, 0x03, 0x69, 0x64, 0x73, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, + 0x1a, 0x97, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x3a, + 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, - 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x6c, 0x42, 0x11, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x22, 0x60, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0x11, 0x0a, 0x0a, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, - 0x60, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, - 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x7d, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, 0x61, 0x63, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, - 0x33, 0x3b, 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, + 0x61, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1255,66 +1508,75 @@ func file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP() []byte { return file_envoy_config_rbac_v3_rbac_proto_rawDescData } -var file_envoy_config_rbac_v3_rbac_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_config_rbac_v3_rbac_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_envoy_config_rbac_v3_rbac_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_rbac_v3_rbac_proto_msgTypes = make([]protoimpl.MessageInfo, 11) var file_envoy_config_rbac_v3_rbac_proto_goTypes = []interface{}{ - (RBAC_Action)(0), // 0: envoy.config.rbac.v3.RBAC.Action - (*RBAC)(nil), // 1: envoy.config.rbac.v3.RBAC - (*Policy)(nil), // 2: envoy.config.rbac.v3.Policy - (*Permission)(nil), // 3: envoy.config.rbac.v3.Permission - (*Principal)(nil), // 4: envoy.config.rbac.v3.Principal - (*Action)(nil), // 5: envoy.config.rbac.v3.Action - nil, // 6: envoy.config.rbac.v3.RBAC.PoliciesEntry - (*Permission_Set)(nil), // 7: envoy.config.rbac.v3.Permission.Set - (*Principal_Set)(nil), // 8: envoy.config.rbac.v3.Principal.Set - (*Principal_Authenticated)(nil), // 9: envoy.config.rbac.v3.Principal.Authenticated - (*v1alpha1.Expr)(nil), // 10: google.api.expr.v1alpha1.Expr - (*v1alpha1.CheckedExpr)(nil), // 11: google.api.expr.v1alpha1.CheckedExpr - (*v3.HeaderMatcher)(nil), // 12: envoy.config.route.v3.HeaderMatcher - (*v31.PathMatcher)(nil), // 13: envoy.type.matcher.v3.PathMatcher - (*v32.CidrRange)(nil), // 14: envoy.config.core.v3.CidrRange - (*v33.Int32Range)(nil), // 15: envoy.type.v3.Int32Range - (*v31.MetadataMatcher)(nil), // 16: envoy.type.matcher.v3.MetadataMatcher - (*v31.StringMatcher)(nil), // 17: envoy.type.matcher.v3.StringMatcher - (*v32.TypedExtensionConfig)(nil), // 18: envoy.config.core.v3.TypedExtensionConfig + (RBAC_Action)(0), // 0: envoy.config.rbac.v3.RBAC.Action + (RBAC_AuditLoggingOptions_AuditCondition)(0), // 1: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditCondition + (*RBAC)(nil), // 2: envoy.config.rbac.v3.RBAC + (*Policy)(nil), // 3: envoy.config.rbac.v3.Policy + (*Permission)(nil), // 4: envoy.config.rbac.v3.Permission + (*Principal)(nil), // 5: envoy.config.rbac.v3.Principal + (*Action)(nil), // 6: envoy.config.rbac.v3.Action + (*RBAC_AuditLoggingOptions)(nil), // 7: envoy.config.rbac.v3.RBAC.AuditLoggingOptions + nil, // 8: envoy.config.rbac.v3.RBAC.PoliciesEntry + (*RBAC_AuditLoggingOptions_AuditLoggerConfig)(nil), // 9: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig + (*Permission_Set)(nil), // 10: envoy.config.rbac.v3.Permission.Set + (*Principal_Set)(nil), // 11: envoy.config.rbac.v3.Principal.Set + (*Principal_Authenticated)(nil), // 12: envoy.config.rbac.v3.Principal.Authenticated + (*v1alpha1.Expr)(nil), // 13: google.api.expr.v1alpha1.Expr + (*v1alpha1.CheckedExpr)(nil), // 14: google.api.expr.v1alpha1.CheckedExpr + (*v3.HeaderMatcher)(nil), // 15: envoy.config.route.v3.HeaderMatcher + (*v31.PathMatcher)(nil), // 16: envoy.type.matcher.v3.PathMatcher + (*v32.CidrRange)(nil), // 17: envoy.config.core.v3.CidrRange + (*v33.Int32Range)(nil), // 18: envoy.type.v3.Int32Range + (*v31.MetadataMatcher)(nil), // 19: envoy.type.matcher.v3.MetadataMatcher + (*v31.StringMatcher)(nil), // 20: envoy.type.matcher.v3.StringMatcher + (*v32.TypedExtensionConfig)(nil), // 21: envoy.config.core.v3.TypedExtensionConfig + (*v31.FilterStateMatcher)(nil), // 22: envoy.type.matcher.v3.FilterStateMatcher } var file_envoy_config_rbac_v3_rbac_proto_depIdxs = []int32{ 0, // 0: envoy.config.rbac.v3.RBAC.action:type_name -> envoy.config.rbac.v3.RBAC.Action - 6, // 1: envoy.config.rbac.v3.RBAC.policies:type_name -> envoy.config.rbac.v3.RBAC.PoliciesEntry - 3, // 2: envoy.config.rbac.v3.Policy.permissions:type_name -> envoy.config.rbac.v3.Permission - 4, // 3: envoy.config.rbac.v3.Policy.principals:type_name -> envoy.config.rbac.v3.Principal - 10, // 4: envoy.config.rbac.v3.Policy.condition:type_name -> google.api.expr.v1alpha1.Expr - 11, // 5: envoy.config.rbac.v3.Policy.checked_condition:type_name -> google.api.expr.v1alpha1.CheckedExpr - 7, // 6: envoy.config.rbac.v3.Permission.and_rules:type_name -> envoy.config.rbac.v3.Permission.Set - 7, // 7: envoy.config.rbac.v3.Permission.or_rules:type_name -> envoy.config.rbac.v3.Permission.Set - 12, // 8: envoy.config.rbac.v3.Permission.header:type_name -> envoy.config.route.v3.HeaderMatcher - 13, // 9: envoy.config.rbac.v3.Permission.url_path:type_name -> envoy.type.matcher.v3.PathMatcher - 14, // 10: envoy.config.rbac.v3.Permission.destination_ip:type_name -> envoy.config.core.v3.CidrRange - 15, // 11: envoy.config.rbac.v3.Permission.destination_port_range:type_name -> envoy.type.v3.Int32Range - 16, // 12: envoy.config.rbac.v3.Permission.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher - 3, // 13: envoy.config.rbac.v3.Permission.not_rule:type_name -> envoy.config.rbac.v3.Permission - 17, // 14: envoy.config.rbac.v3.Permission.requested_server_name:type_name -> envoy.type.matcher.v3.StringMatcher - 18, // 15: envoy.config.rbac.v3.Permission.matcher:type_name -> envoy.config.core.v3.TypedExtensionConfig - 8, // 16: envoy.config.rbac.v3.Principal.and_ids:type_name -> envoy.config.rbac.v3.Principal.Set - 8, // 17: envoy.config.rbac.v3.Principal.or_ids:type_name -> envoy.config.rbac.v3.Principal.Set - 9, // 18: envoy.config.rbac.v3.Principal.authenticated:type_name -> envoy.config.rbac.v3.Principal.Authenticated - 14, // 19: envoy.config.rbac.v3.Principal.source_ip:type_name -> envoy.config.core.v3.CidrRange - 14, // 20: envoy.config.rbac.v3.Principal.direct_remote_ip:type_name -> envoy.config.core.v3.CidrRange - 14, // 21: envoy.config.rbac.v3.Principal.remote_ip:type_name -> envoy.config.core.v3.CidrRange - 12, // 22: envoy.config.rbac.v3.Principal.header:type_name -> envoy.config.route.v3.HeaderMatcher - 13, // 23: envoy.config.rbac.v3.Principal.url_path:type_name -> envoy.type.matcher.v3.PathMatcher - 16, // 24: envoy.config.rbac.v3.Principal.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher - 4, // 25: envoy.config.rbac.v3.Principal.not_id:type_name -> envoy.config.rbac.v3.Principal - 0, // 26: envoy.config.rbac.v3.Action.action:type_name -> envoy.config.rbac.v3.RBAC.Action - 2, // 27: envoy.config.rbac.v3.RBAC.PoliciesEntry.value:type_name -> envoy.config.rbac.v3.Policy - 3, // 28: envoy.config.rbac.v3.Permission.Set.rules:type_name -> envoy.config.rbac.v3.Permission - 4, // 29: envoy.config.rbac.v3.Principal.Set.ids:type_name -> envoy.config.rbac.v3.Principal - 17, // 30: envoy.config.rbac.v3.Principal.Authenticated.principal_name:type_name -> envoy.type.matcher.v3.StringMatcher - 31, // [31:31] is the sub-list for method output_type - 31, // [31:31] is the sub-list for method input_type - 31, // [31:31] is the sub-list for extension type_name - 31, // [31:31] is the sub-list for extension extendee - 0, // [0:31] is the sub-list for field type_name + 8, // 1: envoy.config.rbac.v3.RBAC.policies:type_name -> envoy.config.rbac.v3.RBAC.PoliciesEntry + 7, // 2: envoy.config.rbac.v3.RBAC.audit_logging_options:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions + 4, // 3: envoy.config.rbac.v3.Policy.permissions:type_name -> envoy.config.rbac.v3.Permission + 5, // 4: envoy.config.rbac.v3.Policy.principals:type_name -> envoy.config.rbac.v3.Principal + 13, // 5: envoy.config.rbac.v3.Policy.condition:type_name -> google.api.expr.v1alpha1.Expr + 14, // 6: envoy.config.rbac.v3.Policy.checked_condition:type_name -> google.api.expr.v1alpha1.CheckedExpr + 10, // 7: envoy.config.rbac.v3.Permission.and_rules:type_name -> envoy.config.rbac.v3.Permission.Set + 10, // 8: envoy.config.rbac.v3.Permission.or_rules:type_name -> envoy.config.rbac.v3.Permission.Set + 15, // 9: envoy.config.rbac.v3.Permission.header:type_name -> envoy.config.route.v3.HeaderMatcher + 16, // 10: envoy.config.rbac.v3.Permission.url_path:type_name -> envoy.type.matcher.v3.PathMatcher + 17, // 11: envoy.config.rbac.v3.Permission.destination_ip:type_name -> envoy.config.core.v3.CidrRange + 18, // 12: envoy.config.rbac.v3.Permission.destination_port_range:type_name -> envoy.type.v3.Int32Range + 19, // 13: envoy.config.rbac.v3.Permission.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 4, // 14: envoy.config.rbac.v3.Permission.not_rule:type_name -> envoy.config.rbac.v3.Permission + 20, // 15: envoy.config.rbac.v3.Permission.requested_server_name:type_name -> envoy.type.matcher.v3.StringMatcher + 21, // 16: envoy.config.rbac.v3.Permission.matcher:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // 17: envoy.config.rbac.v3.Principal.and_ids:type_name -> envoy.config.rbac.v3.Principal.Set + 11, // 18: envoy.config.rbac.v3.Principal.or_ids:type_name -> envoy.config.rbac.v3.Principal.Set + 12, // 19: envoy.config.rbac.v3.Principal.authenticated:type_name -> envoy.config.rbac.v3.Principal.Authenticated + 17, // 20: envoy.config.rbac.v3.Principal.source_ip:type_name -> envoy.config.core.v3.CidrRange + 17, // 21: envoy.config.rbac.v3.Principal.direct_remote_ip:type_name -> envoy.config.core.v3.CidrRange + 17, // 22: envoy.config.rbac.v3.Principal.remote_ip:type_name -> envoy.config.core.v3.CidrRange + 15, // 23: envoy.config.rbac.v3.Principal.header:type_name -> envoy.config.route.v3.HeaderMatcher + 16, // 24: envoy.config.rbac.v3.Principal.url_path:type_name -> envoy.type.matcher.v3.PathMatcher + 19, // 25: envoy.config.rbac.v3.Principal.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 22, // 26: envoy.config.rbac.v3.Principal.filter_state:type_name -> envoy.type.matcher.v3.FilterStateMatcher + 5, // 27: envoy.config.rbac.v3.Principal.not_id:type_name -> envoy.config.rbac.v3.Principal + 0, // 28: envoy.config.rbac.v3.Action.action:type_name -> envoy.config.rbac.v3.RBAC.Action + 1, // 29: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.audit_condition:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditCondition + 9, // 30: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.logger_configs:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig + 3, // 31: envoy.config.rbac.v3.RBAC.PoliciesEntry.value:type_name -> envoy.config.rbac.v3.Policy + 21, // 32: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig.audit_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig + 4, // 33: envoy.config.rbac.v3.Permission.Set.rules:type_name -> envoy.config.rbac.v3.Permission + 5, // 34: envoy.config.rbac.v3.Principal.Set.ids:type_name -> envoy.config.rbac.v3.Principal + 20, // 35: envoy.config.rbac.v3.Principal.Authenticated.principal_name:type_name -> envoy.type.matcher.v3.StringMatcher + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_envoy_config_rbac_v3_rbac_proto_init() } @@ -1383,8 +1645,8 @@ func file_envoy_config_rbac_v3_rbac_proto_init() { return nil } } - file_envoy_config_rbac_v3_rbac_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Permission_Set); i { + file_envoy_config_rbac_v3_rbac_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC_AuditLoggingOptions); i { case 0: return &v.state case 1: @@ -1396,7 +1658,7 @@ func file_envoy_config_rbac_v3_rbac_proto_init() { } } file_envoy_config_rbac_v3_rbac_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Principal_Set); i { + switch v := v.(*RBAC_AuditLoggingOptions_AuditLoggerConfig); i { case 0: return &v.state case 1: @@ -1408,6 +1670,30 @@ func file_envoy_config_rbac_v3_rbac_proto_init() { } } file_envoy_config_rbac_v3_rbac_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Permission_Set); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Principal_Set); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Principal_Authenticated); i { case 0: return &v.state @@ -1445,6 +1731,7 @@ func file_envoy_config_rbac_v3_rbac_proto_init() { (*Principal_Header)(nil), (*Principal_UrlPath)(nil), (*Principal_Metadata)(nil), + (*Principal_FilterState)(nil), (*Principal_NotId)(nil), } type x struct{} @@ -1452,8 +1739,8 @@ func file_envoy_config_rbac_v3_rbac_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_rbac_v3_rbac_proto_rawDesc, - NumEnums: 1, - NumMessages: 9, + NumEnums: 2, + NumMessages: 11, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go index 5cce0a84ee..f034cc682b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go @@ -113,6 +113,35 @@ func (m *RBAC) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetAuditLoggingOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAuditLoggingOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return RBACMultiError(errors) } @@ -458,9 +487,20 @@ func (m *Permission) validate(all bool) error { var errors []error - switch m.Rule.(type) { - + oneofRulePresent := false + switch v := m.Rule.(type) { case *Permission_AndRules: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetAndRules()).(type) { @@ -492,6 +532,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_OrRules: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetOrRules()).(type) { @@ -523,6 +574,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_Any: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if m.GetAny() != true { err := PermissionValidationError{ @@ -536,6 +598,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_Header: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHeader()).(type) { @@ -567,6 +640,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_UrlPath: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetUrlPath()).(type) { @@ -598,6 +682,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_DestinationIp: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetDestinationIp()).(type) { @@ -629,6 +724,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_DestinationPort: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if m.GetDestinationPort() > 65535 { err := PermissionValidationError{ @@ -642,6 +748,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_DestinationPortRange: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetDestinationPortRange()).(type) { @@ -673,6 +790,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_Metadata: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetMetadata()).(type) { @@ -704,6 +832,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_NotRule: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetNotRule()).(type) { @@ -735,6 +874,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_RequestedServerName: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetRequestedServerName()).(type) { @@ -766,6 +916,17 @@ func (m *Permission) validate(all bool) error { } case *Permission_Matcher: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetMatcher()).(type) { @@ -797,6 +958,9 @@ func (m *Permission) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofRulePresent { err := PermissionValidationError{ field: "Rule", reason: "value is required", @@ -805,7 +969,6 @@ func (m *Permission) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -907,9 +1070,20 @@ func (m *Principal) validate(all bool) error { var errors []error - switch m.Identifier.(type) { - + oneofIdentifierPresent := false + switch v := m.Identifier.(type) { case *Principal_AndIds: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetAndIds()).(type) { @@ -941,6 +1115,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_OrIds: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetOrIds()).(type) { @@ -972,6 +1157,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_Any: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if m.GetAny() != true { err := PrincipalValidationError{ @@ -985,6 +1181,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_Authenticated_: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetAuthenticated()).(type) { @@ -1016,6 +1223,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_SourceIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetSourceIp()).(type) { @@ -1047,6 +1265,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_DirectRemoteIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetDirectRemoteIp()).(type) { @@ -1078,6 +1307,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_RemoteIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetRemoteIp()).(type) { @@ -1109,6 +1349,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_Header: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetHeader()).(type) { @@ -1140,6 +1391,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_UrlPath: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetUrlPath()).(type) { @@ -1171,6 +1433,17 @@ func (m *Principal) validate(all bool) error { } case *Principal_Metadata: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetMetadata()).(type) { @@ -1201,7 +1474,60 @@ func (m *Principal) validate(all bool) error { } } + case *Principal_FilterState: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetFilterState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + } + } + } + case *Principal_NotId: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true if all { switch v := interface{}(m.GetNotId()).(type) { @@ -1233,6 +1559,9 @@ func (m *Principal) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofIdentifierPresent { err := PrincipalValidationError{ field: "Identifier", reason: "value is required", @@ -1241,7 +1570,6 @@ func (m *Principal) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1432,6 +1760,290 @@ var _ interface { ErrorName() string } = ActionValidationError{} +// Validate checks the field values on RBAC_AuditLoggingOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RBAC_AuditLoggingOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBAC_AuditLoggingOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RBAC_AuditLoggingOptionsMultiError, or nil if none found. +func (m *RBAC_AuditLoggingOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC_AuditLoggingOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RBAC_AuditLoggingOptions_AuditCondition_name[int32(m.GetAuditCondition())]; !ok { + err := RBAC_AuditLoggingOptionsValidationError{ + field: "AuditCondition", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetLoggerConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RBAC_AuditLoggingOptionsMultiError(errors) + } + + return nil +} + +// RBAC_AuditLoggingOptionsMultiError is an error wrapping multiple validation +// errors returned by RBAC_AuditLoggingOptions.ValidateAll() if the designated +// constraints aren't met. +type RBAC_AuditLoggingOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBAC_AuditLoggingOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBAC_AuditLoggingOptionsMultiError) AllErrors() []error { return m } + +// RBAC_AuditLoggingOptionsValidationError is the validation error returned by +// RBAC_AuditLoggingOptions.Validate if the designated constraints aren't met. +type RBAC_AuditLoggingOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBAC_AuditLoggingOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBAC_AuditLoggingOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBAC_AuditLoggingOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBAC_AuditLoggingOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBAC_AuditLoggingOptionsValidationError) ErrorName() string { + return "RBAC_AuditLoggingOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e RBAC_AuditLoggingOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC_AuditLoggingOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBAC_AuditLoggingOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBAC_AuditLoggingOptionsValidationError{} + +// Validate checks the field values on +// RBAC_AuditLoggingOptions_AuditLoggerConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RBAC_AuditLoggingOptions_AuditLoggerConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError, or nil if none found. +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAuditLogger()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAuditLogger()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsOptional + + if len(errors) > 0 { + return RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError(errors) + } + + return nil +} + +// RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError is an error wrapping +// multiple validation errors returned by +// RBAC_AuditLoggingOptions_AuditLoggerConfig.ValidateAll() if the designated +// constraints aren't met. +type RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError) AllErrors() []error { return m } + +// RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError is the validation +// error returned by RBAC_AuditLoggingOptions_AuditLoggerConfig.Validate if +// the designated constraints aren't met. +type RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) ErrorName() string { + return "RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC_AuditLoggingOptions_AuditLoggerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{} + // Validate checks the field values on Permission_Set with the rules defined in // the proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go index e6798d7726..d87450fb96 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/route/v3/route.proto package routev3 @@ -10,6 +10,7 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" + any1 "github.com/golang/protobuf/ptypes/any" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -24,7 +25,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// [#next-free-field: 16] +// [#next-free-field: 17] type RouteConfiguration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -38,10 +39,10 @@ type RouteConfiguration struct { // An array of virtual hosts that make up the route table. VirtualHosts []*VirtualHost `protobuf:"bytes,2,rep,name=virtual_hosts,json=virtualHosts,proto3" json:"virtual_hosts,omitempty"` // An array of virtual hosts will be dynamically loaded via the VHDS API. - // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for + // Both ``virtual_hosts`` and ``vhds`` fields will be used when present. ``virtual_hosts`` can be used + // for a base routing table or for infrequently changing virtual hosts. ``vhds`` is used for // on-demand discovery of virtual hosts. The contents of these two fields will be merged to - // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration + // generate a routing table for a given RouteConfiguration, with ``vhds`` derived configuration // taking precedence. Vhds *Vhds `protobuf:"bytes,9,opt,name=vhds,proto3" json:"vhds,omitempty"` // Optionally specifies a list of HTTP headers that the connection manager @@ -107,7 +108,7 @@ type RouteConfiguration struct { MaxDirectResponseBodySizeBytes *wrappers.UInt32Value `protobuf:"bytes,11,opt,name=max_direct_response_body_size_bytes,json=maxDirectResponseBodySizeBytes,proto3" json:"max_direct_response_body_size_bytes,omitempty"` // A list of plugins and their configurations which may be used by a // :ref:`cluster specifier plugin name ` - // within the route. All *extension.name* fields in this list must be unique. + // within the route. All ``extension.name`` fields in this list must be unique. ClusterSpecifierPlugins []*ClusterSpecifierPlugin `protobuf:"bytes,12,rep,name=cluster_specifier_plugins,json=clusterSpecifierPlugins,proto3" json:"cluster_specifier_plugins,omitempty"` // Specify a set of default request mirroring policies which apply to all routes under its virtual hosts. // Note that policies are not merged, the most specific non-empty one becomes the mirror policies. @@ -122,6 +123,19 @@ type RouteConfiguration struct { // Envoy by default takes ":path" as ";". // For users who want to only match path on the "" portion, this option should be true. IgnorePathParametersInPathMatching bool `protobuf:"varint,15,opt,name=ignore_path_parameters_in_path_matching,json=ignorePathParametersInPathMatching,proto3" json:"ignore_path_parameters_in_path_matching,omitempty"` + // The typed_per_filter_config field can be used to provide RouteConfiguration level per filter config. + // The key should match the :ref:`filter config name + // `. + // The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + // be used for the backwards compatibility. If there is no entry referred by the filter config name, the + // entry referred by the canonical filter name will be provided to the filters as fallback. + // + // Use of this field is filter specific; + // see the :ref:`HTTP filter documentation ` for if and how it is utilized. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*any1.Any `protobuf:"bytes,16,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *RouteConfiguration) Reset() { @@ -261,6 +275,13 @@ func (x *RouteConfiguration) GetIgnorePathParametersInPathMatching() bool { return false } +func (x *RouteConfiguration) GetTypedPerFilterConfig() map[string]*any1.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + type Vhds struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -323,110 +344,125 @@ var file_envoy_config_route_v3_route_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf2, - 0x09, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x76, 0x69, 0x72, - 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, - 0x48, 0x6f, 0x73, 0x74, 0x52, 0x0c, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, - 0x74, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x76, 0x68, 0x64, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x52, 0x04, 0x76, - 0x68, 0x64, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, - 0x01, 0xc8, 0x01, 0x00, 0x52, 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4f, 0x6e, - 0x6c, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, - 0x5f, 0x61, 0x64, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, - 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, - 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, - 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, - 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, - 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, - 0x00, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x4c, 0x0a, 0x23, 0x6d, 0x6f, 0x73, - 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x77, 0x69, 0x6e, 0x73, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x6d, 0x6f, 0x73, 0x74, 0x53, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x57, 0x69, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x69, 0x0a, 0x23, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x0b, 0x0a, 0x12, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x52, + 0x0c, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x2f, 0x0a, + 0x04, 0x76, 0x68, 0x64, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x52, 0x04, 0x76, 0x68, 0x64, 0x73, 0x12, 0x44, + 0x0a, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, + 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, + 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4f, 0x6e, 0x6c, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, + 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, + 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, + 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x67, + 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, + 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x4c, 0x0a, 0x23, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x77, 0x69, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1f, 0x6d, 0x6f, 0x73, 0x74, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x57, 0x69, + 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x6d, 0x61, 0x78, - 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, - 0x64, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x19, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x17, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x23, 0x6d, + 0x61, 0x78, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x53, 0x69, 0x7a, + 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x19, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x17, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, + 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x69, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50, + 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x12, 0x53, 0x0a, 0x27, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x22, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x74, 0x68, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x53, 0x0a, 0x27, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, - 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x22, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50, - 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x50, - 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x3a, 0x26, 0x9a, 0xc5, 0x88, - 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x73, 0x0a, 0x04, 0x56, 0x68, 0x64, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, - 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x18, - 0x9a, 0xc5, 0x88, 0x1e, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x42, 0x81, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x42, 0x0a, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x73, 0x0a, 0x04, 0x56, 0x68, 0x64, + 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3a, 0x18, 0x9a, 0xc5, 0x88, 0x1e, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x42, 0x81, + 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -441,33 +477,37 @@ func file_envoy_config_route_v3_route_proto_rawDescGZIP() []byte { return file_envoy_config_route_v3_route_proto_rawDescData } -var file_envoy_config_route_v3_route_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_route_v3_route_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_envoy_config_route_v3_route_proto_goTypes = []interface{}{ (*RouteConfiguration)(nil), // 0: envoy.config.route.v3.RouteConfiguration (*Vhds)(nil), // 1: envoy.config.route.v3.Vhds - (*VirtualHost)(nil), // 2: envoy.config.route.v3.VirtualHost - (*v3.HeaderValueOption)(nil), // 3: envoy.config.core.v3.HeaderValueOption - (*wrappers.BoolValue)(nil), // 4: google.protobuf.BoolValue - (*wrappers.UInt32Value)(nil), // 5: google.protobuf.UInt32Value - (*ClusterSpecifierPlugin)(nil), // 6: envoy.config.route.v3.ClusterSpecifierPlugin - (*RouteAction_RequestMirrorPolicy)(nil), // 7: envoy.config.route.v3.RouteAction.RequestMirrorPolicy - (*v3.ConfigSource)(nil), // 8: envoy.config.core.v3.ConfigSource + nil, // 2: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry + (*VirtualHost)(nil), // 3: envoy.config.route.v3.VirtualHost + (*v3.HeaderValueOption)(nil), // 4: envoy.config.core.v3.HeaderValueOption + (*wrappers.BoolValue)(nil), // 5: google.protobuf.BoolValue + (*wrappers.UInt32Value)(nil), // 6: google.protobuf.UInt32Value + (*ClusterSpecifierPlugin)(nil), // 7: envoy.config.route.v3.ClusterSpecifierPlugin + (*RouteAction_RequestMirrorPolicy)(nil), // 8: envoy.config.route.v3.RouteAction.RequestMirrorPolicy + (*v3.ConfigSource)(nil), // 9: envoy.config.core.v3.ConfigSource + (*any1.Any)(nil), // 10: google.protobuf.Any } var file_envoy_config_route_v3_route_proto_depIdxs = []int32{ - 2, // 0: envoy.config.route.v3.RouteConfiguration.virtual_hosts:type_name -> envoy.config.route.v3.VirtualHost - 1, // 1: envoy.config.route.v3.RouteConfiguration.vhds:type_name -> envoy.config.route.v3.Vhds - 3, // 2: envoy.config.route.v3.RouteConfiguration.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 3, // 3: envoy.config.route.v3.RouteConfiguration.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 4, // 4: envoy.config.route.v3.RouteConfiguration.validate_clusters:type_name -> google.protobuf.BoolValue - 5, // 5: envoy.config.route.v3.RouteConfiguration.max_direct_response_body_size_bytes:type_name -> google.protobuf.UInt32Value - 6, // 6: envoy.config.route.v3.RouteConfiguration.cluster_specifier_plugins:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin - 7, // 7: envoy.config.route.v3.RouteConfiguration.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy - 8, // 8: envoy.config.route.v3.Vhds.config_source:type_name -> envoy.config.core.v3.ConfigSource - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 3, // 0: envoy.config.route.v3.RouteConfiguration.virtual_hosts:type_name -> envoy.config.route.v3.VirtualHost + 1, // 1: envoy.config.route.v3.RouteConfiguration.vhds:type_name -> envoy.config.route.v3.Vhds + 4, // 2: envoy.config.route.v3.RouteConfiguration.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 4, // 3: envoy.config.route.v3.RouteConfiguration.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 5, // 4: envoy.config.route.v3.RouteConfiguration.validate_clusters:type_name -> google.protobuf.BoolValue + 6, // 5: envoy.config.route.v3.RouteConfiguration.max_direct_response_body_size_bytes:type_name -> google.protobuf.UInt32Value + 7, // 6: envoy.config.route.v3.RouteConfiguration.cluster_specifier_plugins:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin + 8, // 7: envoy.config.route.v3.RouteConfiguration.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 2, // 8: envoy.config.route.v3.RouteConfiguration.typed_per_filter_config:type_name -> envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry + 9, // 9: envoy.config.route.v3.Vhds.config_source:type_name -> envoy.config.core.v3.ConfigSource + 10, // 10: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name } func init() { file_envoy_config_route_v3_route_proto_init() } @@ -508,7 +548,7 @@ func file_envoy_config_route_v3_route_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_route_v3_route_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go index 1c52471305..ce8a399e8d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go @@ -392,6 +392,52 @@ func (m *RouteConfiguration) validate(all bool) error { // no validation rules for IgnorePathParametersInPathMatching + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + if len(errors) > 0 { return RouteConfigurationMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go index f13e510f34..8c0220a94b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/route/v3/route_components.proto package routev3 @@ -17,7 +17,7 @@ import ( v34 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -94,6 +94,8 @@ const ( RouteAction_SERVICE_UNAVAILABLE RouteAction_ClusterNotFoundResponseCode = 0 // HTTP status code - 404 Not Found. RouteAction_NOT_FOUND RouteAction_ClusterNotFoundResponseCode = 1 + // HTTP status code - 500 Internal Server Error. + RouteAction_INTERNAL_SERVER_ERROR RouteAction_ClusterNotFoundResponseCode = 2 ) // Enum value maps for RouteAction_ClusterNotFoundResponseCode. @@ -101,10 +103,12 @@ var ( RouteAction_ClusterNotFoundResponseCode_name = map[int32]string{ 0: "SERVICE_UNAVAILABLE", 1: "NOT_FOUND", + 2: "INTERNAL_SERVER_ERROR", } RouteAction_ClusterNotFoundResponseCode_value = map[string]int32{ - "SERVICE_UNAVAILABLE": 0, - "NOT_FOUND": 1, + "SERVICE_UNAVAILABLE": 0, + "NOT_FOUND": 1, + "INTERNAL_SERVER_ERROR": 2, } ) @@ -132,7 +136,7 @@ func (x RouteAction_ClusterNotFoundResponseCode) Number() protoreflect.EnumNumbe // Deprecated: Use RouteAction_ClusterNotFoundResponseCode.Descriptor instead. func (RouteAction_ClusterNotFoundResponseCode) EnumDescriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0} } // Configures :ref:`internal redirect ` behavior. @@ -182,7 +186,7 @@ func (x RouteAction_InternalRedirectAction) Number() protoreflect.EnumNumber { // Deprecated: Use RouteAction_InternalRedirectAction.Descriptor instead. func (RouteAction_InternalRedirectAction) EnumDescriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 1} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1} } type RetryPolicy_ResetHeaderFormat int32 @@ -228,7 +232,7 @@ func (x RetryPolicy_ResetHeaderFormat) Number() protoreflect.EnumNumber { // Deprecated: Use RetryPolicy_ResetHeaderFormat.Descriptor instead. func (RetryPolicy_ResetHeaderFormat) EnumDescriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 0} } type RedirectAction_RedirectResponseCode int32 @@ -288,7 +292,7 @@ func (x RedirectAction_RedirectResponseCode) Number() protoreflect.EnumNumber { // Deprecated: Use RedirectAction_RedirectResponseCode.Descriptor instead. func (RedirectAction_RedirectResponseCode) EnumDescriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{10, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{11, 0} } type RateLimit_Action_MetaData_Source int32 @@ -336,7 +340,7 @@ func (x RateLimit_Action_MetaData_Source) Number() protoreflect.EnumNumber { // Deprecated: Use RateLimit_Action_MetaData_Source.Descriptor instead. func (RateLimit_Action_MetaData_Source) EnumDescriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 8, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 8, 0} } // The top level element in the routing configuration is a virtual host. Each virtual host has @@ -344,7 +348,7 @@ func (RateLimit_Action_MetaData_Source) EnumDescriptor() ([]byte, []int) { // host header. This allows a single listener to service multiple top level domain path trees. Once // a virtual host is selected based on the domain, the routes are processed in order to see which // upstream cluster to route to or whether to perform a redirect. -// [#next-free-field: 23] +// [#next-free-field: 24] type VirtualHost struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -374,10 +378,10 @@ type VirtualHost struct { Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. - // Only one of this and `matcher` can be specified. + // Only one of this and ``matcher`` can be specified. Routes []*Route `protobuf:"bytes,3,rep,name=routes,proto3" json:"routes,omitempty"` // [#next-major-version: This should be included in a oneof with routes wrapped in a message.] - // The match tree to use when resolving route actions for incoming requests. Only one of this and `routes` + // The match tree to use when resolving route actions for incoming requests. Only one of this and ``routes`` // can be specified. Matcher *v3.Matcher `protobuf:"bytes,21,opt,name=matcher,proto3" json:"matcher,omitempty"` // Specifies the type of TLS enforcement the virtual host expects. If this option is not @@ -409,17 +413,31 @@ type VirtualHost struct { // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. ResponseHeadersToRemove []string `protobuf:"bytes,11,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` - // Indicates that the virtual host has a CORS policy. + // Indicates that the virtual host has a CORS policy. This field is ignored if related cors policy is + // found in the + // :ref:`VirtualHost.typed_per_filter_config`. + // + // .. attention:: + // + // This option has been deprecated. Please use + // :ref:`VirtualHost.typed_per_filter_config` + // to configure the CORS HTTP filter. + // + // Deprecated: Do not use. Cors *CorsPolicy `protobuf:"bytes,8,opt,name=cors,proto3" json:"cors,omitempty"` - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. + // The per_filter_config field can be used to provide virtual host-specific configurations for filters. + // The key should match the :ref:`filter config name + // `. + // The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + // be used for the backwards compatibility. If there is no entry referred by the filter config name, the + // entry referred by the canonical filter name will be provided to the filters as fallback. + // + // Use of this field is filter specific; + // see the :ref:`HTTP filter documentation ` for if and how it is utilized. // [#comment: An entry's value may be wrapped in a // :ref:`FilterConfig` // message to specify additional options.] - TypedPerFilterConfig map[string]*any.Any `protobuf:"bytes,15,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedPerFilterConfig map[string]*any1.Any `protobuf:"bytes,15,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Decides whether the :ref:`x-envoy-attempt-count // ` header should be included // in the upstream request. Setting this option will cause it to override any existing header @@ -449,11 +467,14 @@ type VirtualHost struct { // will take precedence over this config and it'll be treated independently (e.g.: values are not // inherited). :ref:`Retry policy ` should not be // set if this field is used. - RetryPolicyTypedConfig *any.Any `protobuf:"bytes,20,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` + RetryPolicyTypedConfig *any1.Any `protobuf:"bytes,20,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` // Indicates the hedge policy for all routes in this virtual host. Note that setting a // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). HedgePolicy *HedgePolicy `protobuf:"bytes,17,opt,name=hedge_policy,json=hedgePolicy,proto3" json:"hedge_policy,omitempty"` + // Decides whether to include the :ref:`x-envoy-is-timeout-retry ` + // request header in retries initiated by per try timeouts. + IncludeIsTimeoutRetryHeader bool `protobuf:"varint,23,opt,name=include_is_timeout_retry_header,json=includeIsTimeoutRetryHeader,proto3" json:"include_is_timeout_retry_header,omitempty"` // The maximum bytes which will be buffered for retries and shadowing. // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum // value of this and the listener per_connection_buffer_limit_bytes. @@ -573,6 +594,7 @@ func (x *VirtualHost) GetResponseHeadersToRemove() []string { return nil } +// Deprecated: Do not use. func (x *VirtualHost) GetCors() *CorsPolicy { if x != nil { return x.Cors @@ -580,7 +602,7 @@ func (x *VirtualHost) GetCors() *CorsPolicy { return nil } -func (x *VirtualHost) GetTypedPerFilterConfig() map[string]*any.Any { +func (x *VirtualHost) GetTypedPerFilterConfig() map[string]*any1.Any { if x != nil { return x.TypedPerFilterConfig } @@ -608,7 +630,7 @@ func (x *VirtualHost) GetRetryPolicy() *RetryPolicy { return nil } -func (x *VirtualHost) GetRetryPolicyTypedConfig() *any.Any { +func (x *VirtualHost) GetRetryPolicyTypedConfig() *any1.Any { if x != nil { return x.RetryPolicyTypedConfig } @@ -622,6 +644,13 @@ func (x *VirtualHost) GetHedgePolicy() *HedgePolicy { return nil } +func (x *VirtualHost) GetIncludeIsTimeoutRetryHeader() bool { + if x != nil { + return x.IncludeIsTimeoutRetryHeader + } + return false +} + func (x *VirtualHost) GetPerRequestBufferLimitBytes() *wrappers.UInt32Value { if x != nil { return x.PerRequestBufferLimitBytes @@ -642,7 +671,7 @@ type FilterAction struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Action *any.Any `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + Action *any1.Any `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` } func (x *FilterAction) Reset() { @@ -677,13 +706,63 @@ func (*FilterAction) Descriptor() ([]byte, []int) { return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{1} } -func (x *FilterAction) GetAction() *any.Any { +func (x *FilterAction) GetAction() *any1.Any { if x != nil { return x.Action } return nil } +// This can be used in route matcher :ref:`VirtualHost.matcher `. +// When the matcher matches, routes will be matched and run. +type RouteList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of routes that will be matched and run, in order. The first route that matches will be used. + Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"` +} + +func (x *RouteList) Reset() { + *x = RouteList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteList) ProtoMessage() {} + +func (x *RouteList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteList.ProtoReflect.Descriptor instead. +func (*RouteList) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{2} +} + +func (x *RouteList) GetRoutes() []*Route { + if x != nil { + return x.Routes + } + return nil +} + // A route is both a specification of how to match a request as well as an indication of what to do // next (e.g., redirect, forward, rewrite, etc.). // @@ -712,19 +791,23 @@ type Route struct { // about the route. It can be used for configuration, stats, and logging. // The metadata should go under the filter namespace that will need it. // For instance, if the metadata is intended for the Router filter, - // the filter name should be specified as *envoy.filters.http.router*. + // the filter name should be specified as ``envoy.filters.http.router``. Metadata *v31.Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` // Decorator for the matched route. Decorator *Decorator `protobuf:"bytes,5,opt,name=decorator,proto3" json:"decorator,omitempty"` - // The typed_per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. + // The per_filter_config field can be used to provide route-specific configurations for filters. + // The key should match the :ref:`filter config name + // `. + // The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + // be used for the backwards compatibility. If there is no entry referred by the filter config name, the + // entry referred by the canonical filter name will be provided to the filters as fallback. + // + // Use of this field is filter specific; + // see the :ref:`HTTP filter documentation ` for if and how it is utilized. // [#comment: An entry's value may be wrapped in a // :ref:`FilterConfig` // message to specify additional options.] - TypedPerFilterConfig map[string]*any.Any `protobuf:"bytes,13,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedPerFilterConfig map[string]*any1.Any `protobuf:"bytes,13,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Specifies a set of headers that will be added to requests matching this // route. Headers specified at this level are applied before headers from the // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and @@ -771,7 +854,7 @@ type Route struct { func (x *Route) Reset() { *x = Route{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -784,7 +867,7 @@ func (x *Route) String() string { func (*Route) ProtoMessage() {} func (x *Route) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -797,7 +880,7 @@ func (x *Route) ProtoReflect() protoreflect.Message { // Deprecated: Use Route.ProtoReflect.Descriptor instead. func (*Route) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{2} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{3} } func (x *Route) GetName() string { @@ -870,7 +953,7 @@ func (x *Route) GetDecorator() *Decorator { return nil } -func (x *Route) GetTypedPerFilterConfig() map[string]*any.Any { +func (x *Route) GetTypedPerFilterConfig() map[string]*any1.Any { if x != nil { return x.TypedPerFilterConfig } @@ -986,13 +1069,17 @@ type WeightedCluster struct { // Specifies one or more upstream clusters associated with the route. Clusters []*WeightedCluster_ClusterWeight `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` // Specifies the total weight across all clusters. The sum of all cluster weights must equal this - // value, which must be greater than 0. Defaults to 100. + // value, if this is greater than 0. + // This field is now deprecated, and the client will use the sum of all + // cluster weights. It is up to the management server to supply the correct weights. + // + // Deprecated: Do not use. TotalWeight *wrappers.UInt32Value `protobuf:"bytes,3,opt,name=total_weight,json=totalWeight,proto3" json:"total_weight,omitempty"` // Specifies the runtime key prefix that should be used to construct the - // runtime keys associated with each cluster. When the *runtime_key_prefix* is + // runtime keys associated with each cluster. When the ``runtime_key_prefix`` is // specified, the router will look for weights associated with each upstream - // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - // *cluster[i]* denotes an entry in the clusters array field. If the runtime + // cluster under the key ``runtime_key_prefix`` + ``.`` + ``cluster[i].name`` where + // ``cluster[i]`` denotes an entry in the clusters array field. If the runtime // key for the cluster does not exist, the value specified in the // configuration file will be used as the default weight. See the :ref:`runtime documentation // ` for how key names map to the underlying implementation. @@ -1005,7 +1092,7 @@ type WeightedCluster struct { func (x *WeightedCluster) Reset() { *x = WeightedCluster{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[3] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1018,7 +1105,7 @@ func (x *WeightedCluster) String() string { func (*WeightedCluster) ProtoMessage() {} func (x *WeightedCluster) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[3] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1031,7 +1118,7 @@ func (x *WeightedCluster) ProtoReflect() protoreflect.Message { // Deprecated: Use WeightedCluster.ProtoReflect.Descriptor instead. func (*WeightedCluster) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{3} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{4} } func (x *WeightedCluster) GetClusters() []*WeightedCluster_ClusterWeight { @@ -1041,6 +1128,7 @@ func (x *WeightedCluster) GetClusters() []*WeightedCluster_ClusterWeight { return nil } +// Deprecated: Do not use. func (x *WeightedCluster) GetTotalWeight() *wrappers.UInt32Value { if x != nil { return x.TotalWeight @@ -1103,7 +1191,7 @@ type ClusterSpecifierPlugin struct { func (x *ClusterSpecifierPlugin) Reset() { *x = ClusterSpecifierPlugin{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[4] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1116,7 +1204,7 @@ func (x *ClusterSpecifierPlugin) String() string { func (*ClusterSpecifierPlugin) ProtoMessage() {} func (x *ClusterSpecifierPlugin) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[4] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1129,7 +1217,7 @@ func (x *ClusterSpecifierPlugin) ProtoReflect() protoreflect.Message { // Deprecated: Use ClusterSpecifierPlugin.ProtoReflect.Descriptor instead. func (*ClusterSpecifierPlugin) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{4} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{5} } func (x *ClusterSpecifierPlugin) GetExtension() *v31.TypedExtensionConfig { @@ -1158,7 +1246,7 @@ type RouteMatch struct { // *RouteMatch_SafeRegex // *RouteMatch_ConnectMatcher_ // *RouteMatch_PathSeparatedPrefix - // *RouteMatch_PathTemplate + // *RouteMatch_PathMatchPolicy PathSpecifier isRouteMatch_PathSpecifier `protobuf_oneof:"path_specifier"` // Indicates that prefix/path matching should be case sensitive. The default // is true. Ignored for safe_regex matching. @@ -1187,9 +1275,9 @@ type RouteMatch struct { // is not in the config). Headers []*HeaderMatcher `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty"` // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header + // match. The router will check the query string from the ``path`` header // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's + // query parameters is nonzero, they all must match the ``path`` header's // query string for a match to occur. // // .. note:: @@ -1219,7 +1307,7 @@ type RouteMatch struct { func (x *RouteMatch) Reset() { *x = RouteMatch{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[5] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1232,7 +1320,7 @@ func (x *RouteMatch) String() string { func (*RouteMatch) ProtoMessage() {} func (x *RouteMatch) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[5] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1245,7 +1333,7 @@ func (x *RouteMatch) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteMatch.ProtoReflect.Descriptor instead. func (*RouteMatch) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{5} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6} } func (m *RouteMatch) GetPathSpecifier() isRouteMatch_PathSpecifier { @@ -1290,11 +1378,11 @@ func (x *RouteMatch) GetPathSeparatedPrefix() string { return "" } -func (x *RouteMatch) GetPathTemplate() string { - if x, ok := x.GetPathSpecifier().(*RouteMatch_PathTemplate); ok { - return x.PathTemplate +func (x *RouteMatch) GetPathMatchPolicy() *v31.TypedExtensionConfig { + if x, ok := x.GetPathSpecifier().(*RouteMatch_PathMatchPolicy); ok { + return x.PathMatchPolicy } - return "" + return nil } func (x *RouteMatch) GetCaseSensitive() *wrappers.BoolValue { @@ -1352,21 +1440,21 @@ type isRouteMatch_PathSpecifier interface { type RouteMatch_Prefix struct { // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. + // match the beginning of the ``:path`` header. Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3,oneof"` } type RouteMatch_Path struct { // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. + // exactly match the ``:path`` header once the query string is removed. Path string `protobuf:"bytes,2,opt,name=path,proto3,oneof"` } type RouteMatch_SafeRegex struct { // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path + // regex must match the ``:path`` header once the query string is removed. The entire path // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. + // subsequence of the ``:path`` header matches the regex. // // [#next-major-version: In the v3 API we should redo how path specification works such // that we utilize StringMatcher, and additionally have consistent options around whether we @@ -1404,34 +1492,9 @@ type RouteMatch_PathSeparatedPrefix struct { PathSeparatedPrefix string `protobuf:"bytes,14,opt,name=path_separated_prefix,json=pathSeparatedPrefix,proto3,oneof"` } -type RouteMatch_PathTemplate struct { - // If specified, the route is a template match rule meaning that the - // ``:path`` header (without the query string) must match the given - // ``path_template`` pattern. - // - // Path template matching types: - // - // * ``*`` : Matches a single path component, up to the next path separator: / - // - // * ``**`` : Matches zero or more path segments. If present, must be the last operator. - // - // * ``{name} or {name=*}`` : A named variable matching one path segment up to the next path separator: /. - // - // * ``{name=videos/*}`` : A named variable matching more than one path segment. - // The path component matching videos/* is captured as the named variable. - // - // * ``{name=**}`` : A named variable matching zero or more path segments. - // - // - // For example: - // - // * ``/videos/*/*/*.m4s`` would match ``videos/123414/hls/1080p5000_00001.m4s`` - // - // * ``/videos/{file}`` would match ``/videos/1080p5000_00001.m4s`` - // - // * ``/**.mpd`` would match ``/content/123/india/dash/55/manifest.mpd`` - // [#not-implemented-hide:] - PathTemplate string `protobuf:"bytes,15,opt,name=path_template,json=pathTemplate,proto3,oneof"` +type RouteMatch_PathMatchPolicy struct { + // [#extension-category: envoy.path.match] + PathMatchPolicy *v31.TypedExtensionConfig `protobuf:"bytes,15,opt,name=path_match_policy,json=pathMatchPolicy,proto3,oneof"` } func (*RouteMatch_Prefix) isRouteMatch_PathSpecifier() {} @@ -1444,9 +1507,17 @@ func (*RouteMatch_ConnectMatcher_) isRouteMatch_PathSpecifier() {} func (*RouteMatch_PathSeparatedPrefix) isRouteMatch_PathSpecifier() {} -func (*RouteMatch_PathTemplate) isRouteMatch_PathSpecifier() {} +func (*RouteMatch_PathMatchPolicy) isRouteMatch_PathSpecifier() {} -// [#next-free-field: 12] +// Cors policy configuration. +// +// .. attention:: +// +// This message has been deprecated. Please use +// :ref:`CorsPolicy in filter extension ` +// as as alternative. +// +// [#next-free-field: 13] type CorsPolicy struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1455,13 +1526,13 @@ type CorsPolicy struct { // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. AllowOriginStringMatch []*v32.StringMatcher `protobuf:"bytes,11,rep,name=allow_origin_string_match,json=allowOriginStringMatch,proto3" json:"allow_origin_string_match,omitempty"` - // Specifies the content for the *access-control-allow-methods* header. + // Specifies the content for the ``access-control-allow-methods`` header. AllowMethods string `protobuf:"bytes,2,opt,name=allow_methods,json=allowMethods,proto3" json:"allow_methods,omitempty"` - // Specifies the content for the *access-control-allow-headers* header. + // Specifies the content for the ``access-control-allow-headers`` header. AllowHeaders string `protobuf:"bytes,3,opt,name=allow_headers,json=allowHeaders,proto3" json:"allow_headers,omitempty"` - // Specifies the content for the *access-control-expose-headers* header. + // Specifies the content for the ``access-control-expose-headers`` header. ExposeHeaders string `protobuf:"bytes,4,opt,name=expose_headers,json=exposeHeaders,proto3" json:"expose_headers,omitempty"` - // Specifies the content for the *access-control-max-age* header. + // Specifies the content for the ``access-control-max-age`` header. MaxAge string `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` // Specifies whether the resource allows credentials. AllowCredentials *wrappers.BoolValue `protobuf:"bytes,6,opt,name=allow_credentials,json=allowCredentials,proto3" json:"allow_credentials,omitempty"` @@ -1476,14 +1547,19 @@ type CorsPolicy struct { // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - // and track the request's *Origin* to determine if it's valid but will not enforce any policies. + // and track the request's ``Origin`` to determine if it's valid but will not enforce any policies. ShadowEnabled *v31.RuntimeFractionalPercent `protobuf:"bytes,10,opt,name=shadow_enabled,json=shadowEnabled,proto3" json:"shadow_enabled,omitempty"` + // Specify whether allow requests whose target server's IP address is more private than that from + // which the request initiator was fetched. + // + // More details refer to https://developer.chrome.com/blog/private-network-access-preflight. + AllowPrivateNetworkAccess *wrappers.BoolValue `protobuf:"bytes,12,opt,name=allow_private_network_access,json=allowPrivateNetworkAccess,proto3" json:"allow_private_network_access,omitempty"` } func (x *CorsPolicy) Reset() { *x = CorsPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[6] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1496,7 +1572,7 @@ func (x *CorsPolicy) String() string { func (*CorsPolicy) ProtoMessage() {} func (x *CorsPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[6] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1509,7 +1585,7 @@ func (x *CorsPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use CorsPolicy.ProtoReflect.Descriptor instead. func (*CorsPolicy) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7} } func (x *CorsPolicy) GetAllowOriginStringMatch() []*v32.StringMatcher { @@ -1575,6 +1651,13 @@ func (x *CorsPolicy) GetShadowEnabled() *v31.RuntimeFractionalPercent { return nil } +func (x *CorsPolicy) GetAllowPrivateNetworkAccess() *wrappers.BoolValue { + if x != nil { + return x.AllowPrivateNetworkAccess + } + return nil +} + type isCorsPolicy_EnabledSpecifier interface { isCorsPolicy_EnabledSpecifier() } @@ -1612,7 +1695,7 @@ type RouteAction struct { // in the upstream cluster with metadata matching what's set in this field will be considered // for load balancing. If using :ref:`weighted_clusters // `, metadata will be merged, with values - // provided there taking precedence. The filter name should be specified as *envoy.lb*. + // provided there taking precedence. The filter name should be specified as ``envoy.lb``. MetadataMatch *v31.Metadata `protobuf:"bytes,4,opt,name=metadata_match,json=metadataMatch,proto3" json:"metadata_match,omitempty"` // Indicates that during forwarding, the matched prefix (or path) should be // swapped with this value. This option allows application URLs to be rooted @@ -1621,15 +1704,15 @@ type RouteAction struct { // ` header. // // Only one of :ref:`regex_rewrite ` - // [#comment:TODO(silverstar194) add the following once path_template_rewrite is implemented: :ref:`path_template_rewrite `] - // or *prefix_rewrite* may be specified. + // :ref:`path_rewrite_policy `, + // or :ref:`prefix_rewrite ` may be specified. // // .. attention:: // // Pay careful attention to the use of trailing slashes in the // :ref:`route's match ` prefix value. // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single + // rewriting ``/prefix`` to ``/`` and ``/prefix/etc`` to ``/etc`` cannot be done in a single // :ref:`Route `, as shown by the below config entries: // // .. code-block:: yaml @@ -1643,8 +1726,8 @@ type RouteAction struct { // route: // prefix_rewrite: "/" // - // Having above entries in the config, requests to */prefix* will be stripped to */*, while - // requests to */prefix/etc* will be stripped to */etc*. + // Having above entries in the config, requests to ``/prefix`` will be stripped to ``/``, while + // requests to ``/prefix/etc`` will be stripped to ``/etc``. PrefixRewrite string `protobuf:"bytes,5,opt,name=prefix_rewrite,json=prefixRewrite,proto3" json:"prefix_rewrite,omitempty"` // Indicates that during forwarding, portions of the path that match the // pattern should be rewritten, even allowing the substitution of capture @@ -1655,9 +1738,10 @@ type RouteAction struct { // before the rewrite into the :ref:`x-envoy-original-path // ` header. // - // Only one of :ref:`prefix_rewrite ` - // [#comment:TODO(silverstar194) add the following once path_template_rewrite is implemented: :ref:`path_template_rewrite `,] - // or *regex_rewrite* may be specified. + // Only one of :ref:`regex_rewrite `, + // :ref:`prefix_rewrite `, or + // :ref:`path_rewrite_policy `] + // may be specified. // // Examples using Google's `RE2 `_ engine: // @@ -1676,47 +1760,8 @@ type RouteAction struct { // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. RegexRewrite *v32.RegexMatchAndSubstitute `protobuf:"bytes,32,opt,name=regex_rewrite,json=regexRewrite,proto3" json:"regex_rewrite,omitempty"` - // Indicates that during forwarding, portions of the path that match the - // pattern should be rewritten, even allowing the substitution of variables - // from the match pattern into the new path as specified by the rewrite template. - // This is useful to allow application paths to be - // rewritten in a way that is aware of segments with variable content like - // identifiers. The router filter will place the original path as it was - // before the rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // Only one of :ref:`prefix_rewrite `, - // :ref:`regex_rewrite `, - // or *path_template_rewrite* may be specified. - // - // Template pattern matching types: - // - // * ``*`` : Matches a single path component, up to the next path separator: / - // - // * ``**`` : Matches zero or more path segments. If present, must be the last operator. - // - // * ``{name} or {name=*}`` : A named variable matching one path segment up to the next path separator: /. - // - // * ``{name=videos/*}`` : A named variable matching more than one path segment. - // The path component matching videos/* is captured as the named variable. - // - // * ``{name=**}`` : A named variable matching zero or more path segments. - // - // Only named matches can be used to perform rewrites. - // - // Examples using path_template_rewrite: - // - // * The pattern ``/{one}/{two}`` paired with a substitution string of ``/{two}/{one}`` would - // transform ``/cat/dog`` into ``/dog/cat``. - // - // * The pattern ``/videos/{language=lang/*}/*`` paired with a substitution string of - // ``/{language}`` would transform ``/videos/lang/en/video.m4s`` into ``lang/en``. - // - // * The path pattern ``/content/{format}/{lang}/{id}/{file}.vtt`` paired with a substitution - // string of ``/{lang}/{format}/{file}.vtt`` would transform ``/content/hls/en-us/12345/en_193913.vtt`` - // into ``/en-us/hls/en_193913.vtt``. - // [#not-implemented-hide:] - PathTemplateRewrite string `protobuf:"bytes,41,opt,name=path_template_rewrite,json=pathTemplateRewrite,proto3" json:"path_template_rewrite,omitempty"` + // [#extension-category: envoy.path.rewrite] + PathRewritePolicy *v31.TypedExtensionConfig `protobuf:"bytes,41,opt,name=path_rewrite_policy,json=pathRewritePolicy,proto3" json:"path_rewrite_policy,omitempty"` // Types that are assignable to HostRewriteSpecifier: // *RouteAction_HostRewriteLiteral // *RouteAction_AutoHostRewrite @@ -1729,7 +1774,8 @@ type RouteAction struct { // :ref:`host_rewrite_header `, or // :ref:`host_rewrite_path_regex `) // causes the original value of the host header, if any, to be appended to the - // :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header. + // :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended. + // This can be disabled by setting the runtime guard `envoy_reloadable_features_append_xfh_idempotent` to false. AppendXForwardedHost bool `protobuf:"varint,38,opt,name=append_x_forwarded_host,json=appendXForwardedHost,proto3" json:"append_x_forwarded_host,omitempty"` // Specifies the upstream timeout for the route. If not specified, the default is 15s. This // spans between the point at which the entire downstream request (i.e. end-of-stream) has been @@ -1779,7 +1825,7 @@ type RouteAction struct { // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, // most internal one becomes the enforced policy). :ref:`Retry policy ` // should not be set if this field is used. - RetryPolicyTypedConfig *any.Any `protobuf:"bytes,33,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` + RetryPolicyTypedConfig *any1.Any `protobuf:"bytes,33,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` // Specify a set of route request mirroring policies. // It takes precedence over the virtual host and route config mirror policy entirely. // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. @@ -1811,7 +1857,18 @@ type RouteAction struct { // there is already a hash generated, the hash is returned immediately, // ignoring the rest of the hash policy list. HashPolicy []*RouteAction_HashPolicy `protobuf:"bytes,15,rep,name=hash_policy,json=hashPolicy,proto3" json:"hash_policy,omitempty"` - // Indicates that the route has a CORS policy. + // Indicates that the route has a CORS policy. This field is ignored if related cors policy is + // found in the :ref:`Route.typed_per_filter_config` or + // :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config`. + // + // .. attention:: + // + // This option has been deprecated. Please use + // :ref:`Route.typed_per_filter_config` or + // :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` + // to configure the CORS HTTP filter. + // + // Deprecated: Do not use. Cors *CorsPolicy `protobuf:"bytes,17,opt,name=cors,proto3" json:"cors,omitempty"` // Deprecated by :ref:`grpc_timeout_header_max ` // If present, and the request is a gRPC request, use the @@ -1819,7 +1876,7 @@ type RouteAction struct { // or its default value (infinity) instead of // :ref:`timeout `, but limit the applied timeout // to the maximum value specified here. If configured as 0, the maximum allowed timeout for - // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used + // gRPC requests is infinity. If not configured at all, the ``grpc-timeout`` header is not used // and gRPC requests time out like any other requests using // :ref:`timeout ` or its default. // This can be used to prevent unexpected upstream request timeouts due to potentially long @@ -1837,7 +1894,7 @@ type RouteAction struct { // Deprecated: Do not use. MaxGrpcTimeout *duration.Duration `protobuf:"bytes,23,opt,name=max_grpc_timeout,json=maxGrpcTimeout,proto3" json:"max_grpc_timeout,omitempty"` // Deprecated by :ref:`grpc_timeout_header_offset `. - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // If present, Envoy will adjust the timeout provided by the ``grpc-timeout`` header by subtracting // the provided duration from the header. This is useful in allowing Envoy to set its global // timeout to be less than that of the deadline imposed by the calling client, which makes it more // likely that Envoy will handle the timeout instead of having the call canceled by the client. @@ -1883,7 +1940,7 @@ type RouteAction struct { func (x *RouteAction) Reset() { *x = RouteAction{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[7] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1896,7 +1953,7 @@ func (x *RouteAction) String() string { func (*RouteAction) ProtoMessage() {} func (x *RouteAction) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[7] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1909,7 +1966,7 @@ func (x *RouteAction) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteAction.ProtoReflect.Descriptor instead. func (*RouteAction) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8} } func (m *RouteAction) GetClusterSpecifier() isRouteAction_ClusterSpecifier { @@ -1982,11 +2039,11 @@ func (x *RouteAction) GetRegexRewrite() *v32.RegexMatchAndSubstitute { return nil } -func (x *RouteAction) GetPathTemplateRewrite() string { +func (x *RouteAction) GetPathRewritePolicy() *v31.TypedExtensionConfig { if x != nil { - return x.PathTemplateRewrite + return x.PathRewritePolicy } - return "" + return nil } func (m *RouteAction) GetHostRewriteSpecifier() isRouteAction_HostRewriteSpecifier { @@ -2059,7 +2116,7 @@ func (x *RouteAction) GetRetryPolicy() *RetryPolicy { return nil } -func (x *RouteAction) GetRetryPolicyTypedConfig() *any.Any { +func (x *RouteAction) GetRetryPolicyTypedConfig() *any1.Any { if x != nil { return x.RetryPolicyTypedConfig } @@ -2102,6 +2159,7 @@ func (x *RouteAction) GetHashPolicy() []*RouteAction_HashPolicy { return nil } +// Deprecated: Do not use. func (x *RouteAction) GetCors() *CorsPolicy { if x != nil { return x.Cors @@ -2187,8 +2245,8 @@ type RouteAction_ClusterHeader struct { // // .. attention:: // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. // // .. note:: // @@ -2246,7 +2304,7 @@ type RouteAction_AutoHostRewrite struct { // Indicates that during forwarding, the host header will be swapped with // the hostname of the upstream host chosen by the cluster manager. This // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster types + // type ``strict_dns`` or ``logical_dns``. Setting this to true with other cluster types // has no effect. Using this option will append the // :ref:`config_http_conn_man_headers_x-forwarded-host` header if // :ref:`append_x_forwarded_host ` @@ -2292,7 +2350,7 @@ type RouteAction_HostRewritePathRegex struct { // regex: "^/(.+)/.+$" // substitution: \1 // - // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`. + // Would rewrite the host header to ``envoyproxy.io`` given the path ``/envoyproxy.io/some/path``. HostRewritePathRegex *v32.RegexMatchAndSubstitute `protobuf:"bytes,35,opt,name=host_rewrite_path_regex,json=hostRewritePathRegex,proto3,oneof"` } @@ -2372,7 +2430,7 @@ type RetryPolicy struct { RetriableStatusCodes []uint32 `protobuf:"varint,7,rep,packed,name=retriable_status_codes,json=retriableStatusCodes,proto3" json:"retriable_status_codes,omitempty"` // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the // default base interval is 25 milliseconds or, if set, the current value of the - // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times + // ``upstream.base_retry_backoff_ms`` runtime parameter. The default maximum interval is 10 times // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` // describes Envoy's back-off algorithm. RetryBackOff *RetryPolicy_RetryBackOff `protobuf:"bytes,8,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"` @@ -2381,7 +2439,7 @@ type RetryPolicy struct { // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to // provide feedback to the client on how long to wait before retrying. If // configured, this back-off strategy will be used instead of the - // default exponential back off strategy (configured using `retry_back_off`) + // default exponential back off strategy (configured using ``retry_back_off``) // whenever a response includes the matching headers. RateLimitedRetryBackOff *RetryPolicy_RateLimitedRetryBackOff `protobuf:"bytes,11,opt,name=rate_limited_retry_back_off,json=rateLimitedRetryBackOff,proto3" json:"rate_limited_retry_back_off,omitempty"` // HTTP response headers that trigger a retry if present in the response. A retry will be @@ -2395,7 +2453,7 @@ type RetryPolicy struct { func (x *RetryPolicy) Reset() { *x = RetryPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[8] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2408,7 +2466,7 @@ func (x *RetryPolicy) String() string { func (*RetryPolicy) ProtoMessage() {} func (x *RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[8] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2421,7 +2479,7 @@ func (x *RetryPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. func (*RetryPolicy) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9} } func (x *RetryPolicy) GetRetryOn() string { @@ -2550,7 +2608,7 @@ type HedgePolicy struct { func (x *HedgePolicy) Reset() { *x = HedgePolicy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[9] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2563,7 +2621,7 @@ func (x *HedgePolicy) String() string { func (*HedgePolicy) ProtoMessage() {} func (x *HedgePolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[9] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2576,7 +2634,7 @@ func (x *HedgePolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use HedgePolicy.ProtoReflect.Descriptor instead. func (*HedgePolicy) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{10} } func (x *HedgePolicy) GetInitialRequests() *wrappers.UInt32Value { @@ -2607,10 +2665,10 @@ type RedirectAction struct { unknownFields protoimpl.UnknownFields // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection + // 1. If the source URI scheme is ``http`` and the port is explicitly + // set to ``:80``, the port will be removed after the redirection + // 2. If the source URI scheme is ``https`` and the port is explicitly + // set to ``:443``, the port will be removed after the redirection // // Types that are assignable to SchemeRewriteSpecifier: // *RedirectAction_HttpsRedirect @@ -2636,7 +2694,7 @@ type RedirectAction struct { func (x *RedirectAction) Reset() { *x = RedirectAction{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[10] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2649,7 +2707,7 @@ func (x *RedirectAction) String() string { func (*RedirectAction) ProtoMessage() {} func (x *RedirectAction) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[10] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2662,7 +2720,7 @@ func (x *RedirectAction) ProtoReflect() protoreflect.Message { // Deprecated: Use RedirectAction.ProtoReflect.Descriptor instead. func (*RedirectAction) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{10} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{11} } func (m *RedirectAction) GetSchemeRewriteSpecifier() isRedirectAction_SchemeRewriteSpecifier { @@ -2841,7 +2899,7 @@ type DirectResponseAction struct { // // .. note:: // - // Headers can be specified using *response_headers_to_add* in the enclosing + // Headers can be specified using ``response_headers_to_add`` in the enclosing // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. Body *v31.DataSource `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` @@ -2850,7 +2908,7 @@ type DirectResponseAction struct { func (x *DirectResponseAction) Reset() { *x = DirectResponseAction{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[11] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2863,7 +2921,7 @@ func (x *DirectResponseAction) String() string { func (*DirectResponseAction) ProtoMessage() {} func (x *DirectResponseAction) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[11] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2876,7 +2934,7 @@ func (x *DirectResponseAction) ProtoReflect() protoreflect.Message { // Deprecated: Use DirectResponseAction.ProtoReflect.Descriptor instead. func (*DirectResponseAction) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{11} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{12} } func (x *DirectResponseAction) GetStatus() uint32 { @@ -2903,7 +2961,7 @@ type NonForwardingAction struct { func (x *NonForwardingAction) Reset() { *x = NonForwardingAction{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[12] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2916,7 +2974,7 @@ func (x *NonForwardingAction) String() string { func (*NonForwardingAction) ProtoMessage() {} func (x *NonForwardingAction) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[12] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2929,7 +2987,7 @@ func (x *NonForwardingAction) ProtoReflect() protoreflect.Message { // Deprecated: Use NonForwardingAction.ProtoReflect.Descriptor instead. func (*NonForwardingAction) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{12} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{13} } type Decorator struct { @@ -2953,7 +3011,7 @@ type Decorator struct { func (x *Decorator) Reset() { *x = Decorator{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[13] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2966,7 +3024,7 @@ func (x *Decorator) String() string { func (*Decorator) ProtoMessage() {} func (x *Decorator) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[13] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2979,7 +3037,7 @@ func (x *Decorator) ProtoReflect() protoreflect.Message { // Deprecated: Use Decorator.ProtoReflect.Descriptor instead. func (*Decorator) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{13} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{14} } func (x *Decorator) GetOperation() string { @@ -3004,7 +3062,7 @@ type Tracing struct { // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // 'tracing.client_enabled' in the :ref:`HTTP Connection Manager // `. // Default: 100% ClientSampling *v33.FractionalPercent `protobuf:"bytes,1,opt,name=client_sampling,json=clientSampling,proto3" json:"client_sampling,omitempty"` @@ -3035,7 +3093,7 @@ type Tracing struct { func (x *Tracing) Reset() { *x = Tracing{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[14] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3048,7 +3106,7 @@ func (x *Tracing) String() string { func (*Tracing) ProtoMessage() {} func (x *Tracing) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[14] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3061,7 +3119,7 @@ func (x *Tracing) ProtoReflect() protoreflect.Message { // Deprecated: Use Tracing.ProtoReflect.Descriptor instead. func (*Tracing) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{14} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{15} } func (x *Tracing) GetClientSampling() *v33.FractionalPercent { @@ -3115,7 +3173,7 @@ type VirtualCluster struct { unknownFields protoimpl.UnknownFields // Specifies a list of header matchers to use for matching requests. Each specified header must - // match. The pseudo-headers `:path` and `:method` can be used to match the request path and + // match. The pseudo-headers ``:path`` and ``:method`` can be used to match the request path and // method, respectively. Headers []*HeaderMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` // Specifies the name of the virtual cluster. The virtual cluster name as well @@ -3127,7 +3185,7 @@ type VirtualCluster struct { func (x *VirtualCluster) Reset() { *x = VirtualCluster{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[15] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3140,7 +3198,7 @@ func (x *VirtualCluster) String() string { func (*VirtualCluster) ProtoMessage() {} func (x *VirtualCluster) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[15] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3153,7 +3211,7 @@ func (x *VirtualCluster) ProtoReflect() protoreflect.Message { // Deprecated: Use VirtualCluster.ProtoReflect.Descriptor instead. func (*VirtualCluster) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{15} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16} } func (x *VirtualCluster) GetHeaders() []*HeaderMatcher { @@ -3204,7 +3262,7 @@ type RateLimit struct { func (x *RateLimit) Reset() { *x = RateLimit{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[16] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3217,7 +3275,7 @@ func (x *RateLimit) String() string { func (*RateLimit) ProtoMessage() {} func (x *RateLimit) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[16] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3230,7 +3288,7 @@ func (x *RateLimit) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit.ProtoReflect.Descriptor instead. func (*RateLimit) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17} } func (x *RateLimit) GetStage() *wrappers.UInt32Value { @@ -3263,19 +3321,21 @@ func (x *RateLimit) GetLimit() *RateLimit_Override { // .. attention:: // -// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* -// header. Thus, if attempting to match on *Host*, match on *:authority* instead. +// Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 ``Host`` +// header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. // // .. attention:: // -// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both +// To route on HTTP method, use the special HTTP/2 ``:method`` header. This works for both // HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., // // .. code-block:: json // // { // "name": ":method", -// "exact_match": "POST" +// "string_match": { +// "exact": "POST" +// } // } // // .. attention:: @@ -3285,7 +3345,7 @@ func (x *RateLimit) GetLimit() *RateLimit_Override { // value. // // [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] -// [#next-free-field: 14] +// [#next-free-field: 15] type HeaderMatcher struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3309,15 +3369,42 @@ type HeaderMatcher struct { // // Examples: // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. + // * The regex ``\d{3}`` does not match the value ``1234``, so it will match when inverted. // * The range [-10,0) will match the value -1, so it will not match when inverted. InvertMatch bool `protobuf:"varint,8,opt,name=invert_match,json=invertMatch,proto3" json:"invert_match,omitempty"` + // If specified, for any header match rule, if the header match rule specified header + // does not exist, this header value will be treated as empty. Defaults to false. + // + // Examples: + // + // * The header match rule specified header "header1" to range match of [0, 10], + // :ref:`invert_match ` + // is set to true and :ref:`treat_missing_header_as_empty ` + // is set to true; The "header1" header is not present. The match rule will + // treat the "header1" as an empty header. The empty header does not match the range, + // so it will match when inverted. + // * The header match rule specified header "header2" to range match of [0, 10], + // :ref:`invert_match ` + // is set to true and :ref:`treat_missing_header_as_empty ` + // is set to false; The "header2" header is not present and the header + // matcher rule for "header2" will be ignored so it will not match. + // * The header match rule specified header "header3" to a string regex match + // ``^$`` which means an empty string, and + // :ref:`treat_missing_header_as_empty ` + // is set to true; The "header3" header is not present. + // The match rule will treat the "header3" header as an empty header so it will match. + // * The header match rule specified header "header4" to a string regex match + // ``^$`` which means an empty string, and + // :ref:`treat_missing_header_as_empty ` + // is set to false; The "header4" header is not present. + // The match rule for "header4" will be ignored so it will not match. + TreatMissingHeaderAsEmpty bool `protobuf:"varint,14,opt,name=treat_missing_header_as_empty,json=treatMissingHeaderAsEmpty,proto3" json:"treat_missing_header_as_empty,omitempty"` } func (x *HeaderMatcher) Reset() { *x = HeaderMatcher{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[17] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3330,7 +3417,7 @@ func (x *HeaderMatcher) String() string { func (*HeaderMatcher) ProtoMessage() {} func (x *HeaderMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[17] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3343,7 +3430,7 @@ func (x *HeaderMatcher) ProtoReflect() protoreflect.Message { // Deprecated: Use HeaderMatcher.ProtoReflect.Descriptor instead. func (*HeaderMatcher) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{18} } func (x *HeaderMatcher) GetName() string { @@ -3428,6 +3515,13 @@ func (x *HeaderMatcher) GetInvertMatch() bool { return false } +func (x *HeaderMatcher) GetTreatMissingHeaderAsEmpty() bool { + if x != nil { + return x.TreatMissingHeaderAsEmpty + } + return false +} + type isHeaderMatcher_HeaderMatchSpecifier interface { isHeaderMatcher_HeaderMatchSpecifier() } @@ -3460,8 +3554,8 @@ type HeaderMatcher_RangeMatch struct { // // Examples: // - // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - // "-1somestring" + // * For range [-10,0), route will match for header value -1, but not for 0, ``somestring``, 10.9, + // ``-1somestring`` RangeMatch *v33.Int64Range `protobuf:"bytes,6,opt,name=range_match,json=rangeMatch,proto3,oneof"` } @@ -3478,7 +3572,7 @@ type HeaderMatcher_PrefixMatch struct { // // Examples: // - // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. + // * The prefix ``abcd`` matches the value ``abcdxyz``, but not for ``abcxyz``. // // Deprecated: Do not use. PrefixMatch string `protobuf:"bytes,9,opt,name=prefix_match,json=prefixMatch,proto3,oneof"` @@ -3491,7 +3585,7 @@ type HeaderMatcher_SuffixMatch struct { // // Examples: // - // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. + // * The suffix ``abcd`` matches the value ``xyzabcd``, but not for ``xyzbcd``. // // Deprecated: Do not use. SuffixMatch string `protobuf:"bytes,10,opt,name=suffix_match,json=suffixMatch,proto3,oneof"` @@ -3505,7 +3599,7 @@ type HeaderMatcher_ContainsMatch struct { // // Examples: // - // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. + // * The value ``abcd`` matches the value ``xyzabcdpqr``, but not for ``xyzbcdpqr``. // // Deprecated: Do not use. ContainsMatch string `protobuf:"bytes,12,opt,name=contains_match,json=containsMatch,proto3,oneof"` @@ -3541,7 +3635,7 @@ type QueryParameterMatcher struct { unknownFields protoimpl.UnknownFields // Specifies the name of a key that must be present in the requested - // *path*'s query string. + // ``path``'s query string. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Types that are assignable to QueryParameterMatchSpecifier: // *QueryParameterMatcher_StringMatch @@ -3552,7 +3646,7 @@ type QueryParameterMatcher struct { func (x *QueryParameterMatcher) Reset() { *x = QueryParameterMatcher{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[18] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3565,7 +3659,7 @@ func (x *QueryParameterMatcher) String() string { func (*QueryParameterMatcher) ProtoMessage() {} func (x *QueryParameterMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[18] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3578,7 +3672,7 @@ func (x *QueryParameterMatcher) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryParameterMatcher.ProtoReflect.Descriptor instead. func (*QueryParameterMatcher) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{18} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{19} } func (x *QueryParameterMatcher) GetName() string { @@ -3659,7 +3753,7 @@ type InternalRedirectPolicy struct { func (x *InternalRedirectPolicy) Reset() { *x = InternalRedirectPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[19] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3672,7 +3766,7 @@ func (x *InternalRedirectPolicy) String() string { func (*InternalRedirectPolicy) ProtoMessage() {} func (x *InternalRedirectPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[19] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3685,7 +3779,7 @@ func (x *InternalRedirectPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use InternalRedirectPolicy.ProtoReflect.Descriptor instead. func (*InternalRedirectPolicy) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{19} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{20} } func (x *InternalRedirectPolicy) GetMaxInternalRedirects() *wrappers.UInt32Value { @@ -3729,17 +3823,30 @@ type FilterConfig struct { unknownFields protoimpl.UnknownFields // The filter config. - Config *any.Any `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Config *any1.Any `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` // If true, the filter is optional, meaning that if the client does // not support the specified filter, it may ignore the map entry rather // than rejecting the config. IsOptional bool `protobuf:"varint,2,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` + // If true, the filter is disabled in the route or virtual host and the ``config`` field is ignored. + // + // .. note:: + // + // This field will take effect when the request arrive and filter chain is created for the request. + // If initial route is selected for the request and a filter is disabled in the initial route, then + // the filter will not be added to the filter chain. + // And if the request is mutated later and re-match to another route, the disabled filter by the + // initial route will not be added back to the filter chain because the filter chain is already + // created and it is too late to change the chain. + // + // This field only make sense for the downstream HTTP filters for now. + Disabled bool `protobuf:"varint,3,opt,name=disabled,proto3" json:"disabled,omitempty"` } func (x *FilterConfig) Reset() { *x = FilterConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[20] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3752,7 +3859,7 @@ func (x *FilterConfig) String() string { func (*FilterConfig) ProtoMessage() {} func (x *FilterConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[20] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3765,10 +3872,10 @@ func (x *FilterConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use FilterConfig.ProtoReflect.Descriptor instead. func (*FilterConfig) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{20} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{21} } -func (x *FilterConfig) GetConfig() *any.Any { +func (x *FilterConfig) GetConfig() *any1.Any { if x != nil { return x.Config } @@ -3782,18 +3889,25 @@ func (x *FilterConfig) GetIsOptional() bool { return false } +func (x *FilterConfig) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + // [#next-free-field: 13] type WeightedCluster_ClusterWeight struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Only one of *name* and *cluster_header* may be specified. + // Only one of ``name`` and ``cluster_header`` may be specified. // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Only one of *name* and *cluster_header* may be specified. + // Only one of ``name`` and ``cluster_header`` may be specified. // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the @@ -3802,23 +3916,24 @@ type WeightedCluster_ClusterWeight struct { // // .. attention:: // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. // // .. note:: // // If the header appears multiple times only the first value is used. ClusterHeader string `protobuf:"bytes,12,opt,name=cluster_header,json=clusterHeader,proto3" json:"cluster_header,omitempty"` - // An integer between 0 and :ref:`total_weight - // `. When a request matches the route, - // the choice of an upstream cluster is determined by its weight. The sum of weights across all - // entries in the clusters array must add up to the total_weight, which defaults to 100. + // The weight of the cluster. This value is relative to the other clusters' + // weights. When a request matches the route, the choice of an upstream cluster + // is determined by its weight. The sum of weights across all + // entries in the clusters array must be greater than 0, and must not exceed + // uint32_t maximal value (4294967295). Weight *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=weight,proto3" json:"weight,omitempty"` // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field will be considered for // load balancing. Note that this will be merged with what's provided in // :ref:`RouteAction.metadata_match `, with - // values here taking precedence. The filter name should be specified as *envoy.lb*. + // values here taking precedence. The filter name should be specified as ``envoy.lb``. MetadataMatch *v31.Metadata `protobuf:"bytes,3,opt,name=metadata_match,json=metadataMatch,proto3" json:"metadata_match,omitempty"` // Specifies a list of headers to be added to requests when this cluster is selected // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. @@ -3842,15 +3957,20 @@ type WeightedCluster_ClusterWeight struct { // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. ResponseHeadersToRemove []string `protobuf:"bytes,6,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. + // The per_filter_config field can be used to provide weighted cluster-specific configurations + // for filters. + // The key should match the :ref:`filter config name + // `. + // The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + // be used for the backwards compatibility. If there is no entry referred by the filter config name, the + // entry referred by the canonical filter name will be provided to the filters as fallback. + // + // Use of this field is filter specific; + // see the :ref:`HTTP filter documentation ` for if and how it is utilized. // [#comment: An entry's value may be wrapped in a // :ref:`FilterConfig` // message to specify additional options.] - TypedPerFilterConfig map[string]*any.Any `protobuf:"bytes,10,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypedPerFilterConfig map[string]*any1.Any `protobuf:"bytes,10,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Types that are assignable to HostRewriteSpecifier: // *WeightedCluster_ClusterWeight_HostRewriteLiteral HostRewriteSpecifier isWeightedCluster_ClusterWeight_HostRewriteSpecifier `protobuf_oneof:"host_rewrite_specifier"` @@ -3859,7 +3979,7 @@ type WeightedCluster_ClusterWeight struct { func (x *WeightedCluster_ClusterWeight) Reset() { *x = WeightedCluster_ClusterWeight{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[23] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3872,7 +3992,7 @@ func (x *WeightedCluster_ClusterWeight) String() string { func (*WeightedCluster_ClusterWeight) ProtoMessage() {} func (x *WeightedCluster_ClusterWeight) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[23] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3885,7 +4005,7 @@ func (x *WeightedCluster_ClusterWeight) ProtoReflect() protoreflect.Message { // Deprecated: Use WeightedCluster_ClusterWeight.ProtoReflect.Descriptor instead. func (*WeightedCluster_ClusterWeight) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{3, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{4, 0} } func (x *WeightedCluster_ClusterWeight) GetName() string { @@ -3944,7 +4064,7 @@ func (x *WeightedCluster_ClusterWeight) GetResponseHeadersToRemove() []string { return nil } -func (x *WeightedCluster_ClusterWeight) GetTypedPerFilterConfig() map[string]*any.Any { +func (x *WeightedCluster_ClusterWeight) GetTypedPerFilterConfig() map[string]*any1.Any { if x != nil { return x.TypedPerFilterConfig } @@ -3987,7 +4107,7 @@ type RouteMatch_GrpcRouteMatchOptions struct { func (x *RouteMatch_GrpcRouteMatchOptions) Reset() { *x = RouteMatch_GrpcRouteMatchOptions{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[25] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4000,7 +4120,7 @@ func (x *RouteMatch_GrpcRouteMatchOptions) String() string { func (*RouteMatch_GrpcRouteMatchOptions) ProtoMessage() {} func (x *RouteMatch_GrpcRouteMatchOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[25] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4013,7 +4133,7 @@ func (x *RouteMatch_GrpcRouteMatchOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteMatch_GrpcRouteMatchOptions.ProtoReflect.Descriptor instead. func (*RouteMatch_GrpcRouteMatchOptions) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{5, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 0} } type RouteMatch_TlsContextMatchOptions struct { @@ -4032,7 +4152,7 @@ type RouteMatch_TlsContextMatchOptions struct { func (x *RouteMatch_TlsContextMatchOptions) Reset() { *x = RouteMatch_TlsContextMatchOptions{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[26] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4045,7 +4165,7 @@ func (x *RouteMatch_TlsContextMatchOptions) String() string { func (*RouteMatch_TlsContextMatchOptions) ProtoMessage() {} func (x *RouteMatch_TlsContextMatchOptions) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[26] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4058,7 +4178,7 @@ func (x *RouteMatch_TlsContextMatchOptions) ProtoReflect() protoreflect.Message // Deprecated: Use RouteMatch_TlsContextMatchOptions.ProtoReflect.Descriptor instead. func (*RouteMatch_TlsContextMatchOptions) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{5, 1} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 1} } func (x *RouteMatch_TlsContextMatchOptions) GetPresented() *wrappers.BoolValue { @@ -4085,7 +4205,7 @@ type RouteMatch_ConnectMatcher struct { func (x *RouteMatch_ConnectMatcher) Reset() { *x = RouteMatch_ConnectMatcher{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[27] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4098,7 +4218,7 @@ func (x *RouteMatch_ConnectMatcher) String() string { func (*RouteMatch_ConnectMatcher) ProtoMessage() {} func (x *RouteMatch_ConnectMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[27] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4111,7 +4231,7 @@ func (x *RouteMatch_ConnectMatcher) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteMatch_ConnectMatcher.ProtoReflect.Descriptor instead. func (*RouteMatch_ConnectMatcher) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{5, 2} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 2} } // The router is capable of shadowing traffic from one cluster to another. The current @@ -4119,24 +4239,28 @@ func (*RouteMatch_ConnectMatcher) Descriptor() ([]byte, []int) { // respond before returning the response from the primary cluster. All normal statistics are // collected for the shadow cluster making this feature useful for testing. // -// During shadowing, the host/authority header is altered such that *-shadow* is appended. This is -// useful for logging. For example, *cluster1* becomes *cluster1-shadow*. +// During shadowing, the host/authority header is altered such that ``-shadow`` is appended. This is +// useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``. // // .. note:: // // Shadowing will not be triggered if the primary cluster does not exist. +// +// .. note:: +// +// Shadowing doesn't support Http CONNECT and upgrades. // [#next-free-field: 6] type RouteAction_RequestMirrorPolicy struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Only one of *cluster* and *cluster_header* can be specified. + // Only one of ``cluster`` and ``cluster_header`` can be specified. // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - // Only one of *cluster* and *cluster_header* can be specified. + // Only one of ``cluster`` and ``cluster_header`` can be specified. // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. Only the first value in header is used, // and no shadow request will happen if the value is not found in headers. Envoy will not wait for @@ -4144,8 +4268,8 @@ type RouteAction_RequestMirrorPolicy struct { // // .. attention:: // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. // // .. note:: // @@ -4153,7 +4277,7 @@ type RouteAction_RequestMirrorPolicy struct { ClusterHeader string `protobuf:"bytes,5,opt,name=cluster_header,json=clusterHeader,proto3" json:"cluster_header,omitempty"` // If not specified, all requests to the target cluster will be mirrored. // - // If specified, this field takes precedence over the `runtime_key` field and requests must also + // If specified, this field takes precedence over the ``runtime_key`` field and requests must also // fall under the percentage of matches indicated by this field. // // For some fraction N/D, a random number in the range [0,D) is selected. If the @@ -4167,7 +4291,7 @@ type RouteAction_RequestMirrorPolicy struct { func (x *RouteAction_RequestMirrorPolicy) Reset() { *x = RouteAction_RequestMirrorPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[28] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4180,7 +4304,7 @@ func (x *RouteAction_RequestMirrorPolicy) String() string { func (*RouteAction_RequestMirrorPolicy) ProtoMessage() {} func (x *RouteAction_RequestMirrorPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[28] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4193,7 +4317,7 @@ func (x *RouteAction_RequestMirrorPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteAction_RequestMirrorPolicy.ProtoReflect.Descriptor instead. func (*RouteAction_RequestMirrorPolicy) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0} } func (x *RouteAction_RequestMirrorPolicy) GetCluster() string { @@ -4264,7 +4388,7 @@ type RouteAction_HashPolicy struct { func (x *RouteAction_HashPolicy) Reset() { *x = RouteAction_HashPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[29] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4277,7 +4401,7 @@ func (x *RouteAction_HashPolicy) String() string { func (*RouteAction_HashPolicy) ProtoMessage() {} func (x *RouteAction_HashPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[29] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4290,7 +4414,7 @@ func (x *RouteAction_HashPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteAction_HashPolicy.ProtoReflect.Descriptor instead. func (*RouteAction_HashPolicy) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 1} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1} } func (m *RouteAction_HashPolicy) GetPolicySpecifier() isRouteAction_HashPolicy_PolicySpecifier { @@ -4408,7 +4532,7 @@ type RouteAction_UpgradeConfig struct { func (x *RouteAction_UpgradeConfig) Reset() { *x = RouteAction_UpgradeConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[30] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4421,7 +4545,7 @@ func (x *RouteAction_UpgradeConfig) String() string { func (*RouteAction_UpgradeConfig) ProtoMessage() {} func (x *RouteAction_UpgradeConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[30] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4434,7 +4558,7 @@ func (x *RouteAction_UpgradeConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteAction_UpgradeConfig.ProtoReflect.Descriptor instead. func (*RouteAction_UpgradeConfig) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 2} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 2} } func (x *RouteAction_UpgradeConfig) GetUpgradeType() string { @@ -4474,10 +4598,10 @@ type RouteAction_MaxStreamDuration struct { MaxStreamDuration *duration.Duration `protobuf:"bytes,1,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` // If present, and the request contains a `grpc-timeout header // `_, use that value as the - // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. - // If set to 0, the `grpc-timeout` header is used without modification. + // ``max_stream_duration``, but limit the applied timeout to the maximum value specified here. + // If set to 0, the ``grpc-timeout`` header is used without modification. GrpcTimeoutHeaderMax *duration.Duration `protobuf:"bytes,2,opt,name=grpc_timeout_header_max,json=grpcTimeoutHeaderMax,proto3" json:"grpc_timeout_header_max,omitempty"` - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by + // If present, Envoy will adjust the timeout provided by the ``grpc-timeout`` header by // subtracting the provided duration from the header. This is useful for allowing Envoy to set // its global timeout to be less than that of the deadline imposed by the calling client, which // makes it more likely that Envoy will handle the timeout instead of having the call canceled @@ -4489,7 +4613,7 @@ type RouteAction_MaxStreamDuration struct { func (x *RouteAction_MaxStreamDuration) Reset() { *x = RouteAction_MaxStreamDuration{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[31] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4502,7 +4626,7 @@ func (x *RouteAction_MaxStreamDuration) String() string { func (*RouteAction_MaxStreamDuration) ProtoMessage() {} func (x *RouteAction_MaxStreamDuration) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[31] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4515,7 +4639,7 @@ func (x *RouteAction_MaxStreamDuration) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteAction_MaxStreamDuration.ProtoReflect.Descriptor instead. func (*RouteAction_MaxStreamDuration) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 3} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 3} } func (x *RouteAction_MaxStreamDuration) GetMaxStreamDuration() *duration.Duration { @@ -4555,7 +4679,7 @@ type RouteAction_HashPolicy_Header struct { func (x *RouteAction_HashPolicy_Header) Reset() { *x = RouteAction_HashPolicy_Header{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[32] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4568,7 +4692,7 @@ func (x *RouteAction_HashPolicy_Header) String() string { func (*RouteAction_HashPolicy_Header) ProtoMessage() {} func (x *RouteAction_HashPolicy_Header) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[32] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4581,7 +4705,7 @@ func (x *RouteAction_HashPolicy_Header) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteAction_HashPolicy_Header.ProtoReflect.Descriptor instead. func (*RouteAction_HashPolicy_Header) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 1, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 0} } func (x *RouteAction_HashPolicy_Header) GetHeaderName() string { @@ -4633,7 +4757,7 @@ type RouteAction_HashPolicy_Cookie struct { func (x *RouteAction_HashPolicy_Cookie) Reset() { *x = RouteAction_HashPolicy_Cookie{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[33] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4646,7 +4770,7 @@ func (x *RouteAction_HashPolicy_Cookie) String() string { func (*RouteAction_HashPolicy_Cookie) ProtoMessage() {} func (x *RouteAction_HashPolicy_Cookie) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[33] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4659,7 +4783,7 @@ func (x *RouteAction_HashPolicy_Cookie) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteAction_HashPolicy_Cookie.ProtoReflect.Descriptor instead. func (*RouteAction_HashPolicy_Cookie) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 1, 1} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 1} } func (x *RouteAction_HashPolicy_Cookie) GetName() string { @@ -4695,7 +4819,7 @@ type RouteAction_HashPolicy_ConnectionProperties struct { func (x *RouteAction_HashPolicy_ConnectionProperties) Reset() { *x = RouteAction_HashPolicy_ConnectionProperties{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[34] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4708,7 +4832,7 @@ func (x *RouteAction_HashPolicy_ConnectionProperties) String() string { func (*RouteAction_HashPolicy_ConnectionProperties) ProtoMessage() {} func (x *RouteAction_HashPolicy_ConnectionProperties) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[34] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4721,7 +4845,7 @@ func (x *RouteAction_HashPolicy_ConnectionProperties) ProtoReflect() protoreflec // Deprecated: Use RouteAction_HashPolicy_ConnectionProperties.ProtoReflect.Descriptor instead. func (*RouteAction_HashPolicy_ConnectionProperties) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 1, 2} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 2} } func (x *RouteAction_HashPolicy_ConnectionProperties) GetSourceIp() bool { @@ -4745,7 +4869,7 @@ type RouteAction_HashPolicy_QueryParameter struct { func (x *RouteAction_HashPolicy_QueryParameter) Reset() { *x = RouteAction_HashPolicy_QueryParameter{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[35] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4758,7 +4882,7 @@ func (x *RouteAction_HashPolicy_QueryParameter) String() string { func (*RouteAction_HashPolicy_QueryParameter) ProtoMessage() {} func (x *RouteAction_HashPolicy_QueryParameter) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[35] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4771,7 +4895,7 @@ func (x *RouteAction_HashPolicy_QueryParameter) ProtoReflect() protoreflect.Mess // Deprecated: Use RouteAction_HashPolicy_QueryParameter.ProtoReflect.Descriptor instead. func (*RouteAction_HashPolicy_QueryParameter) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 1, 3} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 3} } func (x *RouteAction_HashPolicy_QueryParameter) GetName() string { @@ -4795,7 +4919,7 @@ type RouteAction_HashPolicy_FilterState struct { func (x *RouteAction_HashPolicy_FilterState) Reset() { *x = RouteAction_HashPolicy_FilterState{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[36] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4808,7 +4932,7 @@ func (x *RouteAction_HashPolicy_FilterState) String() string { func (*RouteAction_HashPolicy_FilterState) ProtoMessage() {} func (x *RouteAction_HashPolicy_FilterState) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[36] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4821,7 +4945,7 @@ func (x *RouteAction_HashPolicy_FilterState) ProtoReflect() protoreflect.Message // Deprecated: Use RouteAction_HashPolicy_FilterState.ProtoReflect.Descriptor instead. func (*RouteAction_HashPolicy_FilterState) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 1, 4} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 4} } func (x *RouteAction_HashPolicy_FilterState) GetKey() string { @@ -4847,7 +4971,7 @@ type RouteAction_UpgradeConfig_ConnectConfig struct { func (x *RouteAction_UpgradeConfig_ConnectConfig) Reset() { *x = RouteAction_UpgradeConfig_ConnectConfig{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[37] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4860,7 +4984,7 @@ func (x *RouteAction_UpgradeConfig_ConnectConfig) String() string { func (*RouteAction_UpgradeConfig_ConnectConfig) ProtoMessage() {} func (x *RouteAction_UpgradeConfig_ConnectConfig) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[37] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4873,7 +4997,7 @@ func (x *RouteAction_UpgradeConfig_ConnectConfig) ProtoReflect() protoreflect.Me // Deprecated: Use RouteAction_UpgradeConfig_ConnectConfig.ProtoReflect.Descriptor instead. func (*RouteAction_UpgradeConfig_ConnectConfig) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7, 2, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 2, 0} } func (x *RouteAction_UpgradeConfig_ConnectConfig) GetProxyProtocolConfig() *v31.ProxyProtocolConfig { @@ -4906,7 +5030,7 @@ type RetryPolicy_RetryPriority struct { func (x *RetryPolicy_RetryPriority) Reset() { *x = RetryPolicy_RetryPriority{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[38] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4919,7 +5043,7 @@ func (x *RetryPolicy_RetryPriority) String() string { func (*RetryPolicy_RetryPriority) ProtoMessage() {} func (x *RetryPolicy_RetryPriority) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[38] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4932,7 +5056,7 @@ func (x *RetryPolicy_RetryPriority) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryPolicy_RetryPriority.ProtoReflect.Descriptor instead. func (*RetryPolicy_RetryPriority) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 0} } func (x *RetryPolicy_RetryPriority) GetName() string { @@ -4949,7 +5073,7 @@ func (m *RetryPolicy_RetryPriority) GetConfigType() isRetryPolicy_RetryPriority_ return nil } -func (x *RetryPolicy_RetryPriority) GetTypedConfig() *any.Any { +func (x *RetryPolicy_RetryPriority) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*RetryPolicy_RetryPriority_TypedConfig); ok { return x.TypedConfig } @@ -4961,7 +5085,7 @@ type isRetryPolicy_RetryPriority_ConfigType interface { } type RetryPolicy_RetryPriority_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*RetryPolicy_RetryPriority_TypedConfig) isRetryPolicy_RetryPriority_ConfigType() {} @@ -4982,7 +5106,7 @@ type RetryPolicy_RetryHostPredicate struct { func (x *RetryPolicy_RetryHostPredicate) Reset() { *x = RetryPolicy_RetryHostPredicate{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[39] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4995,7 +5119,7 @@ func (x *RetryPolicy_RetryHostPredicate) String() string { func (*RetryPolicy_RetryHostPredicate) ProtoMessage() {} func (x *RetryPolicy_RetryHostPredicate) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[39] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5008,7 +5132,7 @@ func (x *RetryPolicy_RetryHostPredicate) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryPolicy_RetryHostPredicate.ProtoReflect.Descriptor instead. func (*RetryPolicy_RetryHostPredicate) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 1} } func (x *RetryPolicy_RetryHostPredicate) GetName() string { @@ -5025,7 +5149,7 @@ func (m *RetryPolicy_RetryHostPredicate) GetConfigType() isRetryPolicy_RetryHost return nil } -func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *any.Any { +func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { return x.TypedConfig } @@ -5037,7 +5161,7 @@ type isRetryPolicy_RetryHostPredicate_ConfigType interface { } type RetryPolicy_RetryHostPredicate_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*RetryPolicy_RetryHostPredicate_TypedConfig) isRetryPolicy_RetryHostPredicate_ConfigType() {} @@ -5053,8 +5177,8 @@ type RetryPolicy_RetryBackOff struct { // back-off algorithm. BaseInterval *duration.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // greater than or equal to the ``base_interval`` if set. The default is 10 times the + // ``base_interval``. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion // of Envoy's back-off algorithm. MaxInterval *duration.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` } @@ -5062,7 +5186,7 @@ type RetryPolicy_RetryBackOff struct { func (x *RetryPolicy_RetryBackOff) Reset() { *x = RetryPolicy_RetryBackOff{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[40] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5075,7 +5199,7 @@ func (x *RetryPolicy_RetryBackOff) String() string { func (*RetryPolicy_RetryBackOff) ProtoMessage() {} func (x *RetryPolicy_RetryBackOff) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[40] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5088,7 +5212,7 @@ func (x *RetryPolicy_RetryBackOff) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryPolicy_RetryBackOff.ProtoReflect.Descriptor instead. func (*RetryPolicy_RetryBackOff) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 2} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 2} } func (x *RetryPolicy_RetryBackOff) GetBaseInterval() *duration.Duration { @@ -5123,7 +5247,7 @@ type RetryPolicy_ResetHeader struct { func (x *RetryPolicy_ResetHeader) Reset() { *x = RetryPolicy_ResetHeader{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[41] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5136,7 +5260,7 @@ func (x *RetryPolicy_ResetHeader) String() string { func (*RetryPolicy_ResetHeader) ProtoMessage() {} func (x *RetryPolicy_ResetHeader) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[41] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5149,7 +5273,7 @@ func (x *RetryPolicy_ResetHeader) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryPolicy_ResetHeader.ProtoReflect.Descriptor instead. func (*RetryPolicy_ResetHeader) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 3} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 3} } func (x *RetryPolicy_ResetHeader) GetName() string { @@ -5228,7 +5352,7 @@ type RetryPolicy_RateLimitedRetryBackOff struct { func (x *RetryPolicy_RateLimitedRetryBackOff) Reset() { *x = RetryPolicy_RateLimitedRetryBackOff{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[42] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5241,7 +5365,7 @@ func (x *RetryPolicy_RateLimitedRetryBackOff) String() string { func (*RetryPolicy_RateLimitedRetryBackOff) ProtoMessage() {} func (x *RetryPolicy_RateLimitedRetryBackOff) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[42] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5254,7 +5378,7 @@ func (x *RetryPolicy_RateLimitedRetryBackOff) ProtoReflect() protoreflect.Messag // Deprecated: Use RetryPolicy_RateLimitedRetryBackOff.ProtoReflect.Descriptor instead. func (*RetryPolicy_RateLimitedRetryBackOff) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 4} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 4} } func (x *RetryPolicy_RateLimitedRetryBackOff) GetResetHeaders() []*RetryPolicy_ResetHeader { @@ -5271,7 +5395,7 @@ func (x *RetryPolicy_RateLimitedRetryBackOff) GetMaxInterval() *duration.Duratio return nil } -// [#next-free-field: 11] +// [#next-free-field: 12] type RateLimit_Action struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5288,13 +5412,14 @@ type RateLimit_Action struct { // *RateLimit_Action_Metadata // *RateLimit_Action_Extension // *RateLimit_Action_MaskedRemoteAddress_ + // *RateLimit_Action_QueryParameterValueMatch_ ActionSpecifier isRateLimit_Action_ActionSpecifier `protobuf_oneof:"action_specifier"` } func (x *RateLimit_Action) Reset() { *x = RateLimit_Action{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[43] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5307,7 +5432,7 @@ func (x *RateLimit_Action) String() string { func (*RateLimit_Action) ProtoMessage() {} func (x *RateLimit_Action) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[43] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5320,7 +5445,7 @@ func (x *RateLimit_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit_Action.ProtoReflect.Descriptor instead. func (*RateLimit_Action) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0} } func (m *RateLimit_Action) GetActionSpecifier() isRateLimit_Action_ActionSpecifier { @@ -5401,6 +5526,13 @@ func (x *RateLimit_Action) GetMaskedRemoteAddress() *RateLimit_Action_MaskedRemo return nil } +func (x *RateLimit_Action) GetQueryParameterValueMatch() *RateLimit_Action_QueryParameterValueMatch { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_QueryParameterValueMatch_); ok { + return x.QueryParameterValueMatch + } + return nil +} + type isRateLimit_Action_ActionSpecifier interface { isRateLimit_Action_ActionSpecifier() } @@ -5467,6 +5599,11 @@ type RateLimit_Action_MaskedRemoteAddress_ struct { MaskedRemoteAddress *RateLimit_Action_MaskedRemoteAddress `protobuf:"bytes,10,opt,name=masked_remote_address,json=maskedRemoteAddress,proto3,oneof"` } +type RateLimit_Action_QueryParameterValueMatch_ struct { + // Rate limit on the existence of query parameters. + QueryParameterValueMatch *RateLimit_Action_QueryParameterValueMatch `protobuf:"bytes,11,opt,name=query_parameter_value_match,json=queryParameterValueMatch,proto3,oneof"` +} + func (*RateLimit_Action_SourceCluster_) isRateLimit_Action_ActionSpecifier() {} func (*RateLimit_Action_DestinationCluster_) isRateLimit_Action_ActionSpecifier() {} @@ -5487,6 +5624,8 @@ func (*RateLimit_Action_Extension) isRateLimit_Action_ActionSpecifier() {} func (*RateLimit_Action_MaskedRemoteAddress_) isRateLimit_Action_ActionSpecifier() {} +func (*RateLimit_Action_QueryParameterValueMatch_) isRateLimit_Action_ActionSpecifier() {} + type RateLimit_Override struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5500,7 +5639,7 @@ type RateLimit_Override struct { func (x *RateLimit_Override) Reset() { *x = RateLimit_Override{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[44] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5513,7 +5652,7 @@ func (x *RateLimit_Override) String() string { func (*RateLimit_Override) ProtoMessage() {} func (x *RateLimit_Override) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[44] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5526,7 +5665,7 @@ func (x *RateLimit_Override) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit_Override.ProtoReflect.Descriptor instead. func (*RateLimit_Override) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 1} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 1} } func (m *RateLimit_Override) GetOverrideSpecifier() isRateLimit_Override_OverrideSpecifier { @@ -5570,7 +5709,7 @@ type RateLimit_Action_SourceCluster struct { func (x *RateLimit_Action_SourceCluster) Reset() { *x = RateLimit_Action_SourceCluster{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[45] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5583,7 +5722,7 @@ func (x *RateLimit_Action_SourceCluster) String() string { func (*RateLimit_Action_SourceCluster) ProtoMessage() {} func (x *RateLimit_Action_SourceCluster) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[45] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5596,7 +5735,7 @@ func (x *RateLimit_Action_SourceCluster) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit_Action_SourceCluster.ProtoReflect.Descriptor instead. func (*RateLimit_Action_SourceCluster) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 0} } // The following descriptor entry is appended to the descriptor: @@ -5624,7 +5763,7 @@ type RateLimit_Action_DestinationCluster struct { func (x *RateLimit_Action_DestinationCluster) Reset() { *x = RateLimit_Action_DestinationCluster{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[46] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5637,7 +5776,7 @@ func (x *RateLimit_Action_DestinationCluster) String() string { func (*RateLimit_Action_DestinationCluster) ProtoMessage() {} func (x *RateLimit_Action_DestinationCluster) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[46] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5650,11 +5789,11 @@ func (x *RateLimit_Action_DestinationCluster) ProtoReflect() protoreflect.Messag // Deprecated: Use RateLimit_Action_DestinationCluster.ProtoReflect.Descriptor instead. func (*RateLimit_Action_DestinationCluster) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 1} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 1} } // The following descriptor entry is appended when a header contains a key that matches the -// *header_name*: +// ``header_name``: // // .. code-block:: cpp // @@ -5679,7 +5818,7 @@ type RateLimit_Action_RequestHeaders struct { func (x *RateLimit_Action_RequestHeaders) Reset() { *x = RateLimit_Action_RequestHeaders{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[47] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5692,7 +5831,7 @@ func (x *RateLimit_Action_RequestHeaders) String() string { func (*RateLimit_Action_RequestHeaders) ProtoMessage() {} func (x *RateLimit_Action_RequestHeaders) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[47] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5705,7 +5844,7 @@ func (x *RateLimit_Action_RequestHeaders) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit_Action_RequestHeaders.ProtoReflect.Descriptor instead. func (*RateLimit_Action_RequestHeaders) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 2} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 2} } func (x *RateLimit_Action_RequestHeaders) GetHeaderName() string { @@ -5744,7 +5883,7 @@ type RateLimit_Action_RemoteAddress struct { func (x *RateLimit_Action_RemoteAddress) Reset() { *x = RateLimit_Action_RemoteAddress{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[48] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5757,7 +5896,7 @@ func (x *RateLimit_Action_RemoteAddress) String() string { func (*RateLimit_Action_RemoteAddress) ProtoMessage() {} func (x *RateLimit_Action_RemoteAddress) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[48] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5770,7 +5909,7 @@ func (x *RateLimit_Action_RemoteAddress) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit_Action_RemoteAddress.ProtoReflect.Descriptor instead. func (*RateLimit_Action_RemoteAddress) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 3} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 3} } // The following descriptor entry is appended to the descriptor and is populated using the @@ -5786,13 +5925,13 @@ type RateLimit_Action_MaskedRemoteAddress struct { // Length of prefix mask len for IPv4 (e.g. 0, 32). // Defaults to 32 when unset. - // For example, trusted address from x-forwarded-for is `192.168.1.1`, + // For example, trusted address from x-forwarded-for is ``192.168.1.1``, // the descriptor entry is ("masked_remote_address", "192.168.1.1/32"); // if mask len is 24, the descriptor entry is ("masked_remote_address", "192.168.1.0/24"). V4PrefixMaskLen *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=v4_prefix_mask_len,json=v4PrefixMaskLen,proto3" json:"v4_prefix_mask_len,omitempty"` // Length of prefix mask len for IPv6 (e.g. 0, 128). // Defaults to 128 when unset. - // For example, trusted address from x-forwarded-for is `2001:abcd:ef01:2345:6789:abcd:ef01:234`, + // For example, trusted address from x-forwarded-for is ``2001:abcd:ef01:2345:6789:abcd:ef01:234``, // the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345:6789:abcd:ef01:234/128"); // if mask len is 64, the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345::/64"). V6PrefixMaskLen *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=v6_prefix_mask_len,json=v6PrefixMaskLen,proto3" json:"v6_prefix_mask_len,omitempty"` @@ -5801,7 +5940,7 @@ type RateLimit_Action_MaskedRemoteAddress struct { func (x *RateLimit_Action_MaskedRemoteAddress) Reset() { *x = RateLimit_Action_MaskedRemoteAddress{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[49] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5814,7 +5953,7 @@ func (x *RateLimit_Action_MaskedRemoteAddress) String() string { func (*RateLimit_Action_MaskedRemoteAddress) ProtoMessage() {} func (x *RateLimit_Action_MaskedRemoteAddress) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[49] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5827,7 +5966,7 @@ func (x *RateLimit_Action_MaskedRemoteAddress) ProtoReflect() protoreflect.Messa // Deprecated: Use RateLimit_Action_MaskedRemoteAddress.ProtoReflect.Descriptor instead. func (*RateLimit_Action_MaskedRemoteAddress) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 4} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 4} } func (x *RateLimit_Action_MaskedRemoteAddress) GetV4PrefixMaskLen() *wrappers.UInt32Value { @@ -5864,7 +6003,7 @@ type RateLimit_Action_GenericKey struct { func (x *RateLimit_Action_GenericKey) Reset() { *x = RateLimit_Action_GenericKey{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[50] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5877,7 +6016,7 @@ func (x *RateLimit_Action_GenericKey) String() string { func (*RateLimit_Action_GenericKey) ProtoMessage() {} func (x *RateLimit_Action_GenericKey) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[50] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5890,7 +6029,7 @@ func (x *RateLimit_Action_GenericKey) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit_Action_GenericKey.ProtoReflect.Descriptor instead. func (*RateLimit_Action_GenericKey) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 5} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 5} } func (x *RateLimit_Action_GenericKey) GetDescriptorValue() string { @@ -5917,7 +6056,7 @@ type RateLimit_Action_HeaderValueMatch struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The key to use in the descriptor entry. Defaults to `header_match`. + // The key to use in the descriptor entry. Defaults to ``header_match``. DescriptorKey string `protobuf:"bytes,4,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` // The value to use in the descriptor entry. DescriptorValue string `protobuf:"bytes,1,opt,name=descriptor_value,json=descriptorValue,proto3" json:"descriptor_value,omitempty"` @@ -5937,7 +6076,7 @@ type RateLimit_Action_HeaderValueMatch struct { func (x *RateLimit_Action_HeaderValueMatch) Reset() { *x = RateLimit_Action_HeaderValueMatch{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[51] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5950,7 +6089,7 @@ func (x *RateLimit_Action_HeaderValueMatch) String() string { func (*RateLimit_Action_HeaderValueMatch) ProtoMessage() {} func (x *RateLimit_Action_HeaderValueMatch) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[51] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5963,7 +6102,7 @@ func (x *RateLimit_Action_HeaderValueMatch) ProtoReflect() protoreflect.Message // Deprecated: Use RateLimit_Action_HeaderValueMatch.ProtoReflect.Descriptor instead. func (*RateLimit_Action_HeaderValueMatch) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 6} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 6} } func (x *RateLimit_Action_HeaderValueMatch) GetDescriptorKey() string { @@ -6013,7 +6152,7 @@ type RateLimit_Action_DynamicMetaData struct { // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. MetadataKey *v35.MetadataKey `protobuf:"bytes,2,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` - // An optional value to use if *metadata_key* is empty. If not set and + // An optional value to use if ``metadata_key`` is empty. If not set and // no value is present under the metadata_key then no descriptor is generated. DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` } @@ -6021,7 +6160,7 @@ type RateLimit_Action_DynamicMetaData struct { func (x *RateLimit_Action_DynamicMetaData) Reset() { *x = RateLimit_Action_DynamicMetaData{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[52] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6034,7 +6173,7 @@ func (x *RateLimit_Action_DynamicMetaData) String() string { func (*RateLimit_Action_DynamicMetaData) ProtoMessage() {} func (x *RateLimit_Action_DynamicMetaData) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[52] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6047,7 +6186,7 @@ func (x *RateLimit_Action_DynamicMetaData) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit_Action_DynamicMetaData.ProtoReflect.Descriptor instead. func (*RateLimit_Action_DynamicMetaData) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 7} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 7} } func (x *RateLimit_Action_DynamicMetaData) GetDescriptorKey() string { @@ -6076,6 +6215,7 @@ func (x *RateLimit_Action_DynamicMetaData) GetDefaultValue() string { // .. code-block:: cpp // // ("", "") +// [#next-free-field: 6] type RateLimit_Action_MetaData struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -6086,17 +6226,22 @@ type RateLimit_Action_MetaData struct { // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the metadata is of type string. MetadataKey *v35.MetadataKey `protobuf:"bytes,2,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` - // An optional value to use if *metadata_key* is empty. If not set and - // no value is present under the metadata_key then no descriptor is generated. + // An optional value to use if ``metadata_key`` is empty. If not set and + // no value is present under the metadata_key then ``skip_if_absent`` is followed to + // skip calling the rate limiting service or skip the descriptor. DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` // Source of metadata Source RateLimit_Action_MetaData_Source `protobuf:"varint,4,opt,name=source,proto3,enum=envoy.config.route.v3.RateLimit_Action_MetaData_Source" json:"source,omitempty"` + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when ``metadata_key`` is empty and ``default_value`` is not set. By default it skips calling the + // rate limiting service in that case. + SkipIfAbsent bool `protobuf:"varint,5,opt,name=skip_if_absent,json=skipIfAbsent,proto3" json:"skip_if_absent,omitempty"` } func (x *RateLimit_Action_MetaData) Reset() { *x = RateLimit_Action_MetaData{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[53] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6109,7 +6254,7 @@ func (x *RateLimit_Action_MetaData) String() string { func (*RateLimit_Action_MetaData) ProtoMessage() {} func (x *RateLimit_Action_MetaData) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[53] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6122,7 +6267,7 @@ func (x *RateLimit_Action_MetaData) ProtoReflect() protoreflect.Message { // Deprecated: Use RateLimit_Action_MetaData.ProtoReflect.Descriptor instead. func (*RateLimit_Action_MetaData) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 0, 8} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 8} } func (x *RateLimit_Action_MetaData) GetDescriptorKey() string { @@ -6153,6 +6298,100 @@ func (x *RateLimit_Action_MetaData) GetSource() RateLimit_Action_MetaData_Source return RateLimit_Action_MetaData_DYNAMIC } +func (x *RateLimit_Action_MetaData) GetSkipIfAbsent() bool { + if x != nil { + return x.SkipIfAbsent + } + return false +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("query_match", "") +type RateLimit_Action_QueryParameterValueMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. Defaults to ``query_match``. + DescriptorKey string `protobuf:"bytes,4,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // The value to use in the descriptor entry. + DescriptorValue string `protobuf:"bytes,1,opt,name=descriptor_value,json=descriptorValue,proto3" json:"descriptor_value,omitempty"` + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + ExpectMatch *wrappers.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` + // Specifies a set of query parameters that the rate limit action should match + // on. The action will check the request’s query parameters against all the + // specified query parameters in the config. A match will happen if all the + // query parameters in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + QueryParameters []*QueryParameterMatcher `protobuf:"bytes,3,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` +} + +func (x *RateLimit_Action_QueryParameterValueMatch) Reset() { + *x = RateLimit_Action_QueryParameterValueMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_QueryParameterValueMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_QueryParameterValueMatch) ProtoMessage() {} + +func (x *RateLimit_Action_QueryParameterValueMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_QueryParameterValueMatch.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_QueryParameterValueMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 9} +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetDescriptorValue() string { + if x != nil { + return x.DescriptorValue + } + return "" +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetExpectMatch() *wrappers.BoolValue { + if x != nil { + return x.ExpectMatch + } + return nil +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetQueryParameters() []*QueryParameterMatcher { + if x != nil { + return x.QueryParameters + } + return nil +} + // Fetches the override from the dynamic metadata. type RateLimit_Override_DynamicMetadata struct { state protoimpl.MessageState @@ -6169,7 +6408,7 @@ type RateLimit_Override_DynamicMetadata struct { func (x *RateLimit_Override_DynamicMetadata) Reset() { *x = RateLimit_Override_DynamicMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[54] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6182,7 +6421,7 @@ func (x *RateLimit_Override_DynamicMetadata) String() string { func (*RateLimit_Override_DynamicMetadata) ProtoMessage() {} func (x *RateLimit_Override_DynamicMetadata) ProtoReflect() protoreflect.Message { - mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[54] + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6195,7 +6434,7 @@ func (x *RateLimit_Override_DynamicMetadata) ProtoReflect() protoreflect.Message // Deprecated: Use RateLimit_Override_DynamicMetadata.ProtoReflect.Descriptor instead. func (*RateLimit_Override_DynamicMetadata) Descriptor() ([]byte, []int) { - return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16, 1, 0} + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 1, 0} } func (x *RateLimit_Override_DynamicMetadata) GetMetadataKey() *v35.MetadataKey { @@ -6254,7 +6493,7 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x86, 0x0e, 0x0a, 0x0b, 0x56, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x0e, 0x0a, 0x0b, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, @@ -6306,1052 +6545,1082 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, + 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, - 0x73, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, - 0x48, 0x6f, 0x73, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, - 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, 0x65, 0x6d, - 0x70, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x21, 0x69, 0x6e, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x74, 0x74, 0x65, 0x6d, - 0x70, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, - 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x68, 0x65, 0x64, - 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x73, 0x0a, 0x17, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, + 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x21, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, + 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x16, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, 0x0a, + 0x1f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x69, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, + 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x1e, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, + 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x12, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, + 0x4e, 0x45, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, + 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, + 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, + 0x0c, 0x10, 0x0d, 0x52, 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x64, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x41, 0x0a, 0x09, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, + 0xaf, 0x0b, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, + 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x3a, 0x0a, 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x08, + 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x12, 0x56, 0x0a, 0x0f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x60, 0x0a, 0x15, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x13, 0x6e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, + 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x12, 0x6d, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, + 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x19, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, + 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, + 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, + 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, + 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, + 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, + 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x17, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, + 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x1e, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x16, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, - 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x3a, 0x0a, 0x12, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, - 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x3a, 0x25, 0x9a, - 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, - 0x48, 0x6f, 0x73, 0x74, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, - 0x52, 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x22, 0x64, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x0b, 0x0a, 0x05, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x05, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x56, 0x0a, 0x0f, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x60, 0x0a, 0x15, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, - 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, - 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x13, 0x6e, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, 0x0a, - 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x6d, 0x0a, - 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x67, 0x0a, 0x16, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, - 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, - 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, - 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, - 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, - 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0b, 0x20, - 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, - 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, - 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x1e, 0x70, 0x65, - 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, 0x66, 0x66, - 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0x5d, 0x0a, - 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1f, 0x9a, 0xc5, - 0x88, 0x1e, 0x1a, 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x42, 0x0d, 0x0a, - 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x06, - 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xef, 0x0a, 0x0a, 0x0f, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, - 0x5a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, - 0x01, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x48, 0x0a, 0x0c, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, - 0x01, 0x01, 0xc8, 0x01, 0x00, 0x48, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x1a, 0x92, 0x08, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x42, - 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, - 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x34, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x67, - 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, - 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, - 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, - 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, - 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, - 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x85, - 0x01, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, - 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, + 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, - 0x00, 0x48, 0x00, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, - 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, - 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, - 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, - 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0x8d, 0x01, - 0x0a, 0x16, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x3a, 0x1f, 0x9a, 0xc5, 0x88, 0x1e, 0x1a, 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x42, 0x0d, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, + 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0xf3, 0x0a, 0x0a, 0x0f, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, + 0x2c, 0x0a, 0x12, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2e, 0x0a, + 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x48, + 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x92, 0x08, + 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, + 0x2d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x19, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, + 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, + 0xc8, 0x01, 0x00, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x06, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, + 0x64, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, + 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, + 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, + 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, + 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, + 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, + 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, + 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x17, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x3f, 0x0a, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x48, 0x00, 0x52, 0x12, 0x68, + 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x6c, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, + 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, + 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, + 0x16, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x16, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xc5, 0x0a, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x18, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x14, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, + 0x65, 0x67, 0x65, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, + 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x5b, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x15, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x65, 0x70, 0x61, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x15, 0xfa, 0x42, 0x12, 0x72, 0x10, 0x32, 0x0e, 0x5e, 0x5b, 0x5e, 0x3f, 0x23, + 0x5d, 0x2b, 0x5b, 0x5e, 0x3f, 0x23, 0x2f, 0x5d, 0x24, 0x48, 0x00, 0x52, 0x13, 0x70, 0x61, 0x74, + 0x68, 0x53, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x58, 0x0a, 0x11, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, - 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xa1, 0x0a, - 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x18, 0x0a, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x4e, 0x0a, 0x0a, - 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, - 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x5b, 0x0a, 0x0f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x15, 0x70, 0x61, 0x74, - 0x68, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x15, 0xfa, 0x42, 0x12, 0x72, 0x10, 0x32, - 0x0e, 0x5e, 0x5b, 0x5e, 0x3f, 0x23, 0x5d, 0x2b, 0x5b, 0x5e, 0x3f, 0x23, 0x2f, 0x5d, 0x24, 0x48, - 0x00, 0x52, 0x13, 0x70, 0x61, 0x74, 0x68, 0x53, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x65, 0x64, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x34, 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, - 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x02, 0xd0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x0c, - 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x0e, - 0x63, 0x61, 0x73, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x63, 0x61, + 0x73, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, + 0x63, 0x61, 0x73, 0x65, 0x53, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x59, 0x0a, + 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x4b, 0x0a, 0x04, 0x67, 0x72, 0x70, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x04, 0x67, 0x72, 0x70, 0x63, 0x12, 0x59, + 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x74, + 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x51, 0x0a, 0x10, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x53, 0x0a, 0x15, + 0x47, 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x1a, 0xc9, 0x01, 0x0a, 0x16, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x09, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x10, 0x0a, + 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x3a, + 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, + 0xc5, 0x05, 0x0a, 0x0a, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5f, + 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x70, + 0x6f, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x47, 0x0a, 0x11, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0d, 0x63, 0x61, 0x73, 0x65, 0x53, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, - 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x04, 0x67, 0x72, 0x70, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x04, 0x67, 0x72, 0x70, 0x63, - 0x12, 0x59, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x0a, 0x74, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x51, 0x0a, 0x10, 0x64, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x64, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x53, - 0x0a, 0x15, 0x47, 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x72, - 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x1a, 0xc9, 0x01, 0x0a, 0x16, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, - 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, - 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, - 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, - 0x78, 0x22, 0xe8, 0x04, 0x0a, 0x0a, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x5f, 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, - 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, - 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x47, 0x0a, 0x11, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x12, 0x57, 0x0a, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0d, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x55, 0x0a, - 0x0e, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x07, 0x10, - 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, - 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x67, 0x65, 0x78, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xdb, 0x2a, 0x0a, - 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x36, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, - 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x11, 0x77, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, - 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x25, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x76, 0x0a, 0x1f, - 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, - 0x27, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x1c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x12, 0x8e, 0x01, 0x0a, 0x1f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, - 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x1b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x12, 0x57, 0x0a, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x0e, 0x73, + 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x5b, 0x0a, 0x1c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x3a, + 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x07, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x9c, 0x2b, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x0e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, + 0xc8, 0x01, 0x00, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x11, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x16, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x76, 0x0a, 0x1f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x27, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, + 0x00, 0x52, 0x1c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, + 0x8e, 0x01, 0x0a, 0x1f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x1b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, + 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x0d, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, + 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x20, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, + 0x74, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x12, 0x5a, 0x0a, 0x13, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x0e, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, - 0x00, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x61, 0x74, 0x68, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3f, 0x0a, 0x14, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x48, 0x01, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x48, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x48, 0x01, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x48, 0x6f, 0x73, 0x74, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x13, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x1d, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, + 0x00, 0x48, 0x01, 0x52, 0x11, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x17, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, - 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x15, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x02, - 0xd0, 0x01, 0x01, 0x52, 0x13, 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x3f, 0x0a, 0x14, 0x68, 0x6f, 0x73, 0x74, - 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, - 0xc8, 0x01, 0x00, 0x48, 0x01, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x48, 0x0a, 0x11, 0x61, 0x75, 0x74, - 0x6f, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x48, 0x01, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x13, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x48, 0x01, 0x52, - 0x11, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x12, 0x67, 0x0a, 0x17, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x23, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, - 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, - 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x35, 0x0a, 0x17, 0x61, - 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, - 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, - 0x70, 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x65, 0x61, - 0x72, 0x6c, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x45, 0x0a, - 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, + 0x35, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, + 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x69, + 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, + 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x61, 0x72, + 0x6c, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x28, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0f, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x76, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x13, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x68, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, - 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x6d, 0x61, - 0x78, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x17, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0e, 0x6d, 0x61, - 0x78, 0x47, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x13, - 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, - 0x30, 0x52, 0x11, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x12, 0x59, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x67, 0x0a, 0x18, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, + 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, - 0x33, 0x2e, 0x30, 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x16, 0x6d, - 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, - 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0c, - 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x64, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x81, 0x03, 0x0a, 0x13, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, - 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0xf2, 0x98, 0xfe, 0x8f, 0x05, - 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, - 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, - 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x3a, - 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, - 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, - 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x1a, 0x96, 0x0a, - 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4e, 0x0a, 0x06, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x68, 0x52, 0x61, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x73, + 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x6d, + 0x61, 0x78, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0e, 0x6d, + 0x61, 0x78, 0x47, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, + 0x13, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x52, 0x11, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x59, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x12, 0x67, 0x0a, 0x18, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x06, - 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, + 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x16, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x45, 0x0a, + 0x0c, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x64, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x81, 0x03, 0x0a, 0x13, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x24, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0xf2, 0x98, 0xfe, 0x8f, + 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, + 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, + 0x03, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x1a, 0x96, + 0x0a, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4e, 0x0a, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, + 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x79, 0x0a, + 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, - 0x69, 0x65, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x79, 0x0a, 0x15, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x48, - 0x00, 0x52, 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x67, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x48, 0x00, 0x52, 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x67, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x0e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x12, 0x5e, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x1a, 0xc6, 0x01, + 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, + 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x0a, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, + 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x37, 0x9a, + 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x12, 0x5e, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x9f, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x3a, + 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x1a, 0x7a, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x45, 0x9a, + 0xc5, 0x88, 0x1e, 0x40, 0x0a, 0x3e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x1a, 0x6e, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x1a, 0xc6, 0x01, 0x0a, - 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, - 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, - 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, - 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x37, 0x9a, 0xc5, - 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x9f, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, - 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, - 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x3a, 0x37, - 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x1a, 0x66, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x3c, + 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x1a, 0x7a, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x45, 0x9a, 0xc5, - 0x88, 0x1e, 0x40, 0x0a, 0x3e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, + 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x1a, 0x6e, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x1a, 0x66, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x3c, 0x9a, - 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, - 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, - 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xa3, 0x03, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, - 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, - 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x0b, 0x75, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, - 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x8d, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x5f, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x50, 0x6f, 0x73, 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x88, 0x02, 0x0a, - 0x11, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, + 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xa3, 0x03, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0c, 0x75, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x0b, + 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x8d, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x6f, 0x73, 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, + 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x88, 0x02, + 0x0a, 0x11, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, + 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, - 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x67, 0x72, 0x70, 0x63, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x78, + 0x12, 0x56, 0x0a, 0x1a, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x17, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x60, 0x0a, 0x1b, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x52, 0x56, 0x49, + 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, + 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x16, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, + 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, + 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x48, 0x41, 0x4e, 0x44, + 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, + 0x52, 0x45, 0x43, 0x54, 0x10, 0x01, 0x1a, 0x02, 0x18, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x18, 0x0a, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x68, + 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x12, 0x10, + 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, + 0x16, 0x10, 0x17, 0x4a, 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, + 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xbf, 0x10, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, + 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, + 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x67, 0x72, 0x70, 0x63, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x78, 0x12, - 0x56, 0x0a, 0x1a, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, - 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x45, 0x0a, 0x1b, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, - 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x22, 0x5e, - 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x41, 0x53, 0x53, - 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, - 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, - 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, - 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x01, 0x1a, 0x02, 0x18, 0x01, 0x3a, 0x25, - 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x18, 0x0a, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, - 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, - 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x10, 0x10, - 0x11, 0x4a, 0x04, 0x08, 0x16, 0x10, 0x17, 0x4a, 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08, - 0x0a, 0x10, 0x0b, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, - 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xbf, 0x10, 0x0a, 0x0b, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, - 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, - 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, - 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x70, 0x65, 0x72, - 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x70, - 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4a, 0x0a, 0x14, - 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x70, 0x65, 0x72, 0x54, 0x72, 0x79, 0x49, 0x64, 0x6c, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x12, 0x67, 0x0a, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, - 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x12, 0x72, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, - 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x64, 0x0a, 0x18, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, - 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, - 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x68, 0x6f, 0x73, - 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, - 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, - 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, - 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, - 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, - 0x66, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x78, 0x0a, 0x1b, 0x72, 0x61, 0x74, 0x65, 0x5f, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, - 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x54, 0x72, + 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4a, 0x0a, 0x14, 0x70, 0x65, 0x72, 0x5f, + 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x11, 0x70, 0x65, 0x72, 0x54, 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x17, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, - 0x66, 0x12, 0x51, 0x0a, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x52, 0x10, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x17, 0x72, - 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x79, - 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x1a, 0xc3, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, - 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x4a, 0x0a, 0x0d, 0x62, 0x61, 0x73, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, - 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, - 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x32, 0x9a, - 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, - 0x66, 0x1a, 0x88, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, + 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x12, 0x72, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x64, 0x0a, 0x18, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x21, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, + 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0e, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, - 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0xc0, 0x01, 0x0a, - 0x17, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x5d, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, - 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, - 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, - 0x34, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x53, 0x10, - 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x49, 0x58, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, - 0x41, 0x4d, 0x50, 0x10, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, + 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, + 0x4f, 0x66, 0x66, 0x12, 0x78, 0x0a, 0x1b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, + 0x66, 0x66, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x61, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, + 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x17, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x51, 0x0a, + 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x10, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x12, 0x60, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, + 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc3, + 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, + 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x38, 0x9a, + 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, + 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x4a, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, + 0x01, 0x2a, 0x00, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, + 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x1a, 0x88, 0x01, + 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x56, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0xc0, 0x01, 0x0a, 0x17, 0x52, 0x61, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, + 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x5d, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, + 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x34, 0x0a, 0x11, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, + 0x0e, 0x55, 0x4e, 0x49, 0x58, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, + 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x0b, 0x48, 0x65, 0x64, + 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x50, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x19, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, + 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x18, 0x68, 0x65, 0x64, 0x67, + 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x65, 0x64, 0x67, + 0x65, 0x4f, 0x6e, 0x50, 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, 0x64, 0x67, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe1, 0x05, 0x0a, 0x0e, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0e, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x68, 0x74, 0x74, 0x70, 0x73, 0x52, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x12, 0x29, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x30, + 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, + 0x01, 0x00, 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x32, 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, + 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x48, 0x01, 0x52, 0x0c, 0x70, 0x61, 0x74, + 0x68, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x48, 0x01, + 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x55, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, + 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x69, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x69, 0x70, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x22, 0x77, 0x0a, 0x14, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x4f, + 0x56, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x4c, 0x59, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x53, 0x45, 0x45, 0x5f, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, + 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, + 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x04, 0x3a, 0x28, 0x9a, 0xc5, 0x88, + 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x1a, 0x0a, 0x18, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x42, 0x18, 0x0a, 0x16, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x14, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, + 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x3a, + 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x15, 0x0a, 0x13, 0x4e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x44, 0x65, 0x63, 0x6f, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x70, + 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x70, + 0x61, 0x67, 0x61, 0x74, 0x65, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9c, 0x02, 0x0a, - 0x0b, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x50, 0x0a, 0x10, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x69, - 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x5c, - 0x0a, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x18, - 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, - 0x68, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x50, 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe1, 0x05, 0x0a, 0x0e, - 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, - 0x0a, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x68, 0x74, 0x74, 0x70, 0x73, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x29, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, - 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x32, 0x0a, 0x0d, 0x70, 0x61, 0x74, - 0x68, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x48, 0x01, 0x52, - 0x0c, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x34, 0x0a, - 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, - 0x01, 0x00, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, - 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x0c, 0x72, 0x65, - 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x69, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x69, - 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x77, 0x0a, 0x14, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, - 0x0a, 0x11, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, - 0x54, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x5f, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x02, 0x12, - 0x16, 0x0a, 0x12, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x52, 0x45, 0x44, - 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x52, 0x4d, 0x41, - 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x04, 0x3a, - 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x1a, 0x0a, 0x18, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, - 0xa1, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x10, - 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x4e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, - 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x44, - 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x38, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, - 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x65, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, - 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0xd2, - 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, - 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x5f, - 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, - 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, - 0x12, 0x4b, 0x0a, 0x10, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x5f, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, + 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0xd2, 0x02, 0x0a, 0x07, 0x54, + 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, + 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x6f, 0x76, - 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, - 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, - 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, - 0x69, 0x6e, 0x67, 0x22, 0xb4, 0x01, 0x0a, 0x0e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, - 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, - 0x72, 0x6e, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0x85, 0x19, 0x0a, 0x09, 0x52, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x0a, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, + 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x4b, 0x0a, 0x10, + 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, + 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, + 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x3a, 0x21, 0x9a, 0xc5, + 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x22, + 0xb4, 0x01, 0x0a, 0x0e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, + 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, + 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x52, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xc9, 0x1c, 0x0a, 0x09, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x0a, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4b, + 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x3f, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x1a, 0xb5, 0x18, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x6d, 0x0a, 0x13, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x0f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, + 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x55, 0x0a, + 0x0b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x05, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x1a, 0xf1, 0x14, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x5e, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, - 0x6d, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x61, - 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x48, - 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x68, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x10, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x77, + 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, + 0x61, 0x42, 0x11, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, + 0xf2, 0xd2, 0x05, 0x01, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x68, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, - 0x52, 0x10, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x77, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, + 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4a, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x15, 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x73, 0x6b, + 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, + 0x00, 0x52, 0x13, 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x81, 0x01, 0x0a, 0x1b, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, - 0x61, 0x44, 0x61, 0x74, 0x61, 0x42, 0x11, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, - 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x48, - 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4a, 0x0a, 0x09, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x09, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x15, 0x6d, 0x61, 0x73, 0x6b, 0x65, - 0x64, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x1a, 0x49, 0x0a, 0x0d, 0x53, 0x6f, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, + 0x52, 0x18, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x49, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, @@ -7432,7 +7701,7 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xb4, 0x02, 0x0a, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xda, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, @@ -7449,10 +7718,30 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x26, 0x0a, 0x06, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, - 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, - 0x59, 0x10, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, + 0x6b, 0x69, 0x70, 0x5f, 0x69, 0x66, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, + 0x74, 0x22, 0x26, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, + 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x54, + 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x01, 0x1a, 0x97, 0x02, 0x0a, 0x18, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, + 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x61, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x01, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, @@ -7474,7 +7763,7 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x22, 0xa4, 0x05, 0x0a, 0x0d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, + 0x69, 0x74, 0x22, 0xe6, 0x05, 0x0a, 0x0d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0b, 0x65, 0x78, 0x61, 0x63, 0x74, @@ -7510,66 +7799,72 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x65, - 0x72, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x42, 0x18, 0x0a, 0x16, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, - 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0b, 0x72, 0x65, - 0x67, 0x65, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xa1, 0x02, 0x0a, 0x15, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x28, 0x80, 0x08, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x53, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, - 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, - 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, - 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x42, 0x21, 0x0a, 0x1f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0xb9, 0x02, - 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x40, 0x0a, 0x17, - 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x92, 0x01, 0x02, 0x10, 0x05, 0x52, 0x15, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x4a, - 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, - 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, - 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x22, 0x5d, 0x0a, 0x0c, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x8b, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x42, 0x14, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, - 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0xba, 0x80, - 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x40, 0x0a, 0x1d, 0x74, 0x72, 0x65, 0x61, 0x74, + 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, + 0x61, 0x73, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, + 0x74, 0x72, 0x65, 0x61, 0x74, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x41, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x02, + 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0b, + 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xa1, 0x02, 0x0a, 0x15, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x28, 0x80, 0x08, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x53, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x42, 0x21, 0x0a, 0x1f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, + 0xb9, 0x02, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x40, + 0x0a, 0x17, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x10, 0x05, 0x52, 0x15, 0x72, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, + 0x12, 0x4a, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x22, 0x79, 0x0a, 0x0c, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x8b, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -7585,7 +7880,7 @@ func file_envoy_config_route_v3_route_components_proto_rawDescGZIP() []byte { } var file_envoy_config_route_v3_route_components_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_envoy_config_route_v3_route_components_proto_msgTypes = make([]protoimpl.MessageInfo, 55) +var file_envoy_config_route_v3_route_components_proto_msgTypes = make([]protoimpl.MessageInfo, 57) var file_envoy_config_route_v3_route_components_proto_goTypes = []interface{}{ (VirtualHost_TlsRequirementType)(0), // 0: envoy.config.route.v3.VirtualHost.TlsRequirementType (RouteAction_ClusterNotFoundResponseCode)(0), // 1: envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode @@ -7595,238 +7890,247 @@ var file_envoy_config_route_v3_route_components_proto_goTypes = []interface{}{ (RateLimit_Action_MetaData_Source)(0), // 5: envoy.config.route.v3.RateLimit.Action.MetaData.Source (*VirtualHost)(nil), // 6: envoy.config.route.v3.VirtualHost (*FilterAction)(nil), // 7: envoy.config.route.v3.FilterAction - (*Route)(nil), // 8: envoy.config.route.v3.Route - (*WeightedCluster)(nil), // 9: envoy.config.route.v3.WeightedCluster - (*ClusterSpecifierPlugin)(nil), // 10: envoy.config.route.v3.ClusterSpecifierPlugin - (*RouteMatch)(nil), // 11: envoy.config.route.v3.RouteMatch - (*CorsPolicy)(nil), // 12: envoy.config.route.v3.CorsPolicy - (*RouteAction)(nil), // 13: envoy.config.route.v3.RouteAction - (*RetryPolicy)(nil), // 14: envoy.config.route.v3.RetryPolicy - (*HedgePolicy)(nil), // 15: envoy.config.route.v3.HedgePolicy - (*RedirectAction)(nil), // 16: envoy.config.route.v3.RedirectAction - (*DirectResponseAction)(nil), // 17: envoy.config.route.v3.DirectResponseAction - (*NonForwardingAction)(nil), // 18: envoy.config.route.v3.NonForwardingAction - (*Decorator)(nil), // 19: envoy.config.route.v3.Decorator - (*Tracing)(nil), // 20: envoy.config.route.v3.Tracing - (*VirtualCluster)(nil), // 21: envoy.config.route.v3.VirtualCluster - (*RateLimit)(nil), // 22: envoy.config.route.v3.RateLimit - (*HeaderMatcher)(nil), // 23: envoy.config.route.v3.HeaderMatcher - (*QueryParameterMatcher)(nil), // 24: envoy.config.route.v3.QueryParameterMatcher - (*InternalRedirectPolicy)(nil), // 25: envoy.config.route.v3.InternalRedirectPolicy - (*FilterConfig)(nil), // 26: envoy.config.route.v3.FilterConfig - nil, // 27: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry - nil, // 28: envoy.config.route.v3.Route.TypedPerFilterConfigEntry - (*WeightedCluster_ClusterWeight)(nil), // 29: envoy.config.route.v3.WeightedCluster.ClusterWeight - nil, // 30: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry - (*RouteMatch_GrpcRouteMatchOptions)(nil), // 31: envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions - (*RouteMatch_TlsContextMatchOptions)(nil), // 32: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions - (*RouteMatch_ConnectMatcher)(nil), // 33: envoy.config.route.v3.RouteMatch.ConnectMatcher - (*RouteAction_RequestMirrorPolicy)(nil), // 34: envoy.config.route.v3.RouteAction.RequestMirrorPolicy - (*RouteAction_HashPolicy)(nil), // 35: envoy.config.route.v3.RouteAction.HashPolicy - (*RouteAction_UpgradeConfig)(nil), // 36: envoy.config.route.v3.RouteAction.UpgradeConfig - (*RouteAction_MaxStreamDuration)(nil), // 37: envoy.config.route.v3.RouteAction.MaxStreamDuration - (*RouteAction_HashPolicy_Header)(nil), // 38: envoy.config.route.v3.RouteAction.HashPolicy.Header - (*RouteAction_HashPolicy_Cookie)(nil), // 39: envoy.config.route.v3.RouteAction.HashPolicy.Cookie - (*RouteAction_HashPolicy_ConnectionProperties)(nil), // 40: envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties - (*RouteAction_HashPolicy_QueryParameter)(nil), // 41: envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter - (*RouteAction_HashPolicy_FilterState)(nil), // 42: envoy.config.route.v3.RouteAction.HashPolicy.FilterState - (*RouteAction_UpgradeConfig_ConnectConfig)(nil), // 43: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig - (*RetryPolicy_RetryPriority)(nil), // 44: envoy.config.route.v3.RetryPolicy.RetryPriority - (*RetryPolicy_RetryHostPredicate)(nil), // 45: envoy.config.route.v3.RetryPolicy.RetryHostPredicate - (*RetryPolicy_RetryBackOff)(nil), // 46: envoy.config.route.v3.RetryPolicy.RetryBackOff - (*RetryPolicy_ResetHeader)(nil), // 47: envoy.config.route.v3.RetryPolicy.ResetHeader - (*RetryPolicy_RateLimitedRetryBackOff)(nil), // 48: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff - (*RateLimit_Action)(nil), // 49: envoy.config.route.v3.RateLimit.Action - (*RateLimit_Override)(nil), // 50: envoy.config.route.v3.RateLimit.Override - (*RateLimit_Action_SourceCluster)(nil), // 51: envoy.config.route.v3.RateLimit.Action.SourceCluster - (*RateLimit_Action_DestinationCluster)(nil), // 52: envoy.config.route.v3.RateLimit.Action.DestinationCluster - (*RateLimit_Action_RequestHeaders)(nil), // 53: envoy.config.route.v3.RateLimit.Action.RequestHeaders - (*RateLimit_Action_RemoteAddress)(nil), // 54: envoy.config.route.v3.RateLimit.Action.RemoteAddress - (*RateLimit_Action_MaskedRemoteAddress)(nil), // 55: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress - (*RateLimit_Action_GenericKey)(nil), // 56: envoy.config.route.v3.RateLimit.Action.GenericKey - (*RateLimit_Action_HeaderValueMatch)(nil), // 57: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch - (*RateLimit_Action_DynamicMetaData)(nil), // 58: envoy.config.route.v3.RateLimit.Action.DynamicMetaData - (*RateLimit_Action_MetaData)(nil), // 59: envoy.config.route.v3.RateLimit.Action.MetaData - (*RateLimit_Override_DynamicMetadata)(nil), // 60: envoy.config.route.v3.RateLimit.Override.DynamicMetadata - (*v3.Matcher)(nil), // 61: xds.type.matcher.v3.Matcher - (*v31.HeaderValueOption)(nil), // 62: envoy.config.core.v3.HeaderValueOption - (*any.Any)(nil), // 63: google.protobuf.Any - (*wrappers.UInt32Value)(nil), // 64: google.protobuf.UInt32Value - (*v31.Metadata)(nil), // 65: envoy.config.core.v3.Metadata - (*v31.TypedExtensionConfig)(nil), // 66: envoy.config.core.v3.TypedExtensionConfig - (*v32.RegexMatcher)(nil), // 67: envoy.type.matcher.v3.RegexMatcher - (*wrappers.BoolValue)(nil), // 68: google.protobuf.BoolValue - (*v31.RuntimeFractionalPercent)(nil), // 69: envoy.config.core.v3.RuntimeFractionalPercent - (*v32.MetadataMatcher)(nil), // 70: envoy.type.matcher.v3.MetadataMatcher - (*v32.StringMatcher)(nil), // 71: envoy.type.matcher.v3.StringMatcher - (*v32.RegexMatchAndSubstitute)(nil), // 72: envoy.type.matcher.v3.RegexMatchAndSubstitute - (*duration.Duration)(nil), // 73: google.protobuf.Duration - (v31.RoutingPriority)(0), // 74: envoy.config.core.v3.RoutingPriority - (*v33.FractionalPercent)(nil), // 75: envoy.type.v3.FractionalPercent - (*v31.DataSource)(nil), // 76: envoy.config.core.v3.DataSource - (*v34.CustomTag)(nil), // 77: envoy.type.tracing.v3.CustomTag - (*v33.Int64Range)(nil), // 78: envoy.type.v3.Int64Range - (*v31.ProxyProtocolConfig)(nil), // 79: envoy.config.core.v3.ProxyProtocolConfig - (*v35.MetadataKey)(nil), // 80: envoy.type.metadata.v3.MetadataKey + (*RouteList)(nil), // 8: envoy.config.route.v3.RouteList + (*Route)(nil), // 9: envoy.config.route.v3.Route + (*WeightedCluster)(nil), // 10: envoy.config.route.v3.WeightedCluster + (*ClusterSpecifierPlugin)(nil), // 11: envoy.config.route.v3.ClusterSpecifierPlugin + (*RouteMatch)(nil), // 12: envoy.config.route.v3.RouteMatch + (*CorsPolicy)(nil), // 13: envoy.config.route.v3.CorsPolicy + (*RouteAction)(nil), // 14: envoy.config.route.v3.RouteAction + (*RetryPolicy)(nil), // 15: envoy.config.route.v3.RetryPolicy + (*HedgePolicy)(nil), // 16: envoy.config.route.v3.HedgePolicy + (*RedirectAction)(nil), // 17: envoy.config.route.v3.RedirectAction + (*DirectResponseAction)(nil), // 18: envoy.config.route.v3.DirectResponseAction + (*NonForwardingAction)(nil), // 19: envoy.config.route.v3.NonForwardingAction + (*Decorator)(nil), // 20: envoy.config.route.v3.Decorator + (*Tracing)(nil), // 21: envoy.config.route.v3.Tracing + (*VirtualCluster)(nil), // 22: envoy.config.route.v3.VirtualCluster + (*RateLimit)(nil), // 23: envoy.config.route.v3.RateLimit + (*HeaderMatcher)(nil), // 24: envoy.config.route.v3.HeaderMatcher + (*QueryParameterMatcher)(nil), // 25: envoy.config.route.v3.QueryParameterMatcher + (*InternalRedirectPolicy)(nil), // 26: envoy.config.route.v3.InternalRedirectPolicy + (*FilterConfig)(nil), // 27: envoy.config.route.v3.FilterConfig + nil, // 28: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry + nil, // 29: envoy.config.route.v3.Route.TypedPerFilterConfigEntry + (*WeightedCluster_ClusterWeight)(nil), // 30: envoy.config.route.v3.WeightedCluster.ClusterWeight + nil, // 31: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry + (*RouteMatch_GrpcRouteMatchOptions)(nil), // 32: envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions + (*RouteMatch_TlsContextMatchOptions)(nil), // 33: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions + (*RouteMatch_ConnectMatcher)(nil), // 34: envoy.config.route.v3.RouteMatch.ConnectMatcher + (*RouteAction_RequestMirrorPolicy)(nil), // 35: envoy.config.route.v3.RouteAction.RequestMirrorPolicy + (*RouteAction_HashPolicy)(nil), // 36: envoy.config.route.v3.RouteAction.HashPolicy + (*RouteAction_UpgradeConfig)(nil), // 37: envoy.config.route.v3.RouteAction.UpgradeConfig + (*RouteAction_MaxStreamDuration)(nil), // 38: envoy.config.route.v3.RouteAction.MaxStreamDuration + (*RouteAction_HashPolicy_Header)(nil), // 39: envoy.config.route.v3.RouteAction.HashPolicy.Header + (*RouteAction_HashPolicy_Cookie)(nil), // 40: envoy.config.route.v3.RouteAction.HashPolicy.Cookie + (*RouteAction_HashPolicy_ConnectionProperties)(nil), // 41: envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties + (*RouteAction_HashPolicy_QueryParameter)(nil), // 42: envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter + (*RouteAction_HashPolicy_FilterState)(nil), // 43: envoy.config.route.v3.RouteAction.HashPolicy.FilterState + (*RouteAction_UpgradeConfig_ConnectConfig)(nil), // 44: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig + (*RetryPolicy_RetryPriority)(nil), // 45: envoy.config.route.v3.RetryPolicy.RetryPriority + (*RetryPolicy_RetryHostPredicate)(nil), // 46: envoy.config.route.v3.RetryPolicy.RetryHostPredicate + (*RetryPolicy_RetryBackOff)(nil), // 47: envoy.config.route.v3.RetryPolicy.RetryBackOff + (*RetryPolicy_ResetHeader)(nil), // 48: envoy.config.route.v3.RetryPolicy.ResetHeader + (*RetryPolicy_RateLimitedRetryBackOff)(nil), // 49: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff + (*RateLimit_Action)(nil), // 50: envoy.config.route.v3.RateLimit.Action + (*RateLimit_Override)(nil), // 51: envoy.config.route.v3.RateLimit.Override + (*RateLimit_Action_SourceCluster)(nil), // 52: envoy.config.route.v3.RateLimit.Action.SourceCluster + (*RateLimit_Action_DestinationCluster)(nil), // 53: envoy.config.route.v3.RateLimit.Action.DestinationCluster + (*RateLimit_Action_RequestHeaders)(nil), // 54: envoy.config.route.v3.RateLimit.Action.RequestHeaders + (*RateLimit_Action_RemoteAddress)(nil), // 55: envoy.config.route.v3.RateLimit.Action.RemoteAddress + (*RateLimit_Action_MaskedRemoteAddress)(nil), // 56: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress + (*RateLimit_Action_GenericKey)(nil), // 57: envoy.config.route.v3.RateLimit.Action.GenericKey + (*RateLimit_Action_HeaderValueMatch)(nil), // 58: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch + (*RateLimit_Action_DynamicMetaData)(nil), // 59: envoy.config.route.v3.RateLimit.Action.DynamicMetaData + (*RateLimit_Action_MetaData)(nil), // 60: envoy.config.route.v3.RateLimit.Action.MetaData + (*RateLimit_Action_QueryParameterValueMatch)(nil), // 61: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch + (*RateLimit_Override_DynamicMetadata)(nil), // 62: envoy.config.route.v3.RateLimit.Override.DynamicMetadata + (*v3.Matcher)(nil), // 63: xds.type.matcher.v3.Matcher + (*v31.HeaderValueOption)(nil), // 64: envoy.config.core.v3.HeaderValueOption + (*any1.Any)(nil), // 65: google.protobuf.Any + (*wrappers.UInt32Value)(nil), // 66: google.protobuf.UInt32Value + (*v31.Metadata)(nil), // 67: envoy.config.core.v3.Metadata + (*v31.TypedExtensionConfig)(nil), // 68: envoy.config.core.v3.TypedExtensionConfig + (*v32.RegexMatcher)(nil), // 69: envoy.type.matcher.v3.RegexMatcher + (*wrappers.BoolValue)(nil), // 70: google.protobuf.BoolValue + (*v31.RuntimeFractionalPercent)(nil), // 71: envoy.config.core.v3.RuntimeFractionalPercent + (*v32.MetadataMatcher)(nil), // 72: envoy.type.matcher.v3.MetadataMatcher + (*v32.StringMatcher)(nil), // 73: envoy.type.matcher.v3.StringMatcher + (*v32.RegexMatchAndSubstitute)(nil), // 74: envoy.type.matcher.v3.RegexMatchAndSubstitute + (*duration.Duration)(nil), // 75: google.protobuf.Duration + (v31.RoutingPriority)(0), // 76: envoy.config.core.v3.RoutingPriority + (*v33.FractionalPercent)(nil), // 77: envoy.type.v3.FractionalPercent + (*v31.DataSource)(nil), // 78: envoy.config.core.v3.DataSource + (*v34.CustomTag)(nil), // 79: envoy.type.tracing.v3.CustomTag + (*v33.Int64Range)(nil), // 80: envoy.type.v3.Int64Range + (*v31.ProxyProtocolConfig)(nil), // 81: envoy.config.core.v3.ProxyProtocolConfig + (*v35.MetadataKey)(nil), // 82: envoy.type.metadata.v3.MetadataKey } var file_envoy_config_route_v3_route_components_proto_depIdxs = []int32{ - 8, // 0: envoy.config.route.v3.VirtualHost.routes:type_name -> envoy.config.route.v3.Route - 61, // 1: envoy.config.route.v3.VirtualHost.matcher:type_name -> xds.type.matcher.v3.Matcher + 9, // 0: envoy.config.route.v3.VirtualHost.routes:type_name -> envoy.config.route.v3.Route + 63, // 1: envoy.config.route.v3.VirtualHost.matcher:type_name -> xds.type.matcher.v3.Matcher 0, // 2: envoy.config.route.v3.VirtualHost.require_tls:type_name -> envoy.config.route.v3.VirtualHost.TlsRequirementType - 21, // 3: envoy.config.route.v3.VirtualHost.virtual_clusters:type_name -> envoy.config.route.v3.VirtualCluster - 22, // 4: envoy.config.route.v3.VirtualHost.rate_limits:type_name -> envoy.config.route.v3.RateLimit - 62, // 5: envoy.config.route.v3.VirtualHost.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 62, // 6: envoy.config.route.v3.VirtualHost.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 12, // 7: envoy.config.route.v3.VirtualHost.cors:type_name -> envoy.config.route.v3.CorsPolicy - 27, // 8: envoy.config.route.v3.VirtualHost.typed_per_filter_config:type_name -> envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry - 14, // 9: envoy.config.route.v3.VirtualHost.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy - 63, // 10: envoy.config.route.v3.VirtualHost.retry_policy_typed_config:type_name -> google.protobuf.Any - 15, // 11: envoy.config.route.v3.VirtualHost.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy - 64, // 12: envoy.config.route.v3.VirtualHost.per_request_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value - 34, // 13: envoy.config.route.v3.VirtualHost.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy - 63, // 14: envoy.config.route.v3.FilterAction.action:type_name -> google.protobuf.Any - 11, // 15: envoy.config.route.v3.Route.match:type_name -> envoy.config.route.v3.RouteMatch - 13, // 16: envoy.config.route.v3.Route.route:type_name -> envoy.config.route.v3.RouteAction - 16, // 17: envoy.config.route.v3.Route.redirect:type_name -> envoy.config.route.v3.RedirectAction - 17, // 18: envoy.config.route.v3.Route.direct_response:type_name -> envoy.config.route.v3.DirectResponseAction - 7, // 19: envoy.config.route.v3.Route.filter_action:type_name -> envoy.config.route.v3.FilterAction - 18, // 20: envoy.config.route.v3.Route.non_forwarding_action:type_name -> envoy.config.route.v3.NonForwardingAction - 65, // 21: envoy.config.route.v3.Route.metadata:type_name -> envoy.config.core.v3.Metadata - 19, // 22: envoy.config.route.v3.Route.decorator:type_name -> envoy.config.route.v3.Decorator - 28, // 23: envoy.config.route.v3.Route.typed_per_filter_config:type_name -> envoy.config.route.v3.Route.TypedPerFilterConfigEntry - 62, // 24: envoy.config.route.v3.Route.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 62, // 25: envoy.config.route.v3.Route.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 20, // 26: envoy.config.route.v3.Route.tracing:type_name -> envoy.config.route.v3.Tracing - 64, // 27: envoy.config.route.v3.Route.per_request_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value - 29, // 28: envoy.config.route.v3.WeightedCluster.clusters:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight - 64, // 29: envoy.config.route.v3.WeightedCluster.total_weight:type_name -> google.protobuf.UInt32Value - 66, // 30: envoy.config.route.v3.ClusterSpecifierPlugin.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig - 67, // 31: envoy.config.route.v3.RouteMatch.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher - 33, // 32: envoy.config.route.v3.RouteMatch.connect_matcher:type_name -> envoy.config.route.v3.RouteMatch.ConnectMatcher - 68, // 33: envoy.config.route.v3.RouteMatch.case_sensitive:type_name -> google.protobuf.BoolValue - 69, // 34: envoy.config.route.v3.RouteMatch.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent - 23, // 35: envoy.config.route.v3.RouteMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher - 24, // 36: envoy.config.route.v3.RouteMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher - 31, // 37: envoy.config.route.v3.RouteMatch.grpc:type_name -> envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions - 32, // 38: envoy.config.route.v3.RouteMatch.tls_context:type_name -> envoy.config.route.v3.RouteMatch.TlsContextMatchOptions - 70, // 39: envoy.config.route.v3.RouteMatch.dynamic_metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher - 71, // 40: envoy.config.route.v3.CorsPolicy.allow_origin_string_match:type_name -> envoy.type.matcher.v3.StringMatcher - 68, // 41: envoy.config.route.v3.CorsPolicy.allow_credentials:type_name -> google.protobuf.BoolValue - 69, // 42: envoy.config.route.v3.CorsPolicy.filter_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent - 69, // 43: envoy.config.route.v3.CorsPolicy.shadow_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent - 9, // 44: envoy.config.route.v3.RouteAction.weighted_clusters:type_name -> envoy.config.route.v3.WeightedCluster - 10, // 45: envoy.config.route.v3.RouteAction.inline_cluster_specifier_plugin:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin - 1, // 46: envoy.config.route.v3.RouteAction.cluster_not_found_response_code:type_name -> envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode - 65, // 47: envoy.config.route.v3.RouteAction.metadata_match:type_name -> envoy.config.core.v3.Metadata - 72, // 48: envoy.config.route.v3.RouteAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute - 68, // 49: envoy.config.route.v3.RouteAction.auto_host_rewrite:type_name -> google.protobuf.BoolValue - 72, // 50: envoy.config.route.v3.RouteAction.host_rewrite_path_regex:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute - 73, // 51: envoy.config.route.v3.RouteAction.timeout:type_name -> google.protobuf.Duration - 73, // 52: envoy.config.route.v3.RouteAction.idle_timeout:type_name -> google.protobuf.Duration - 66, // 53: envoy.config.route.v3.RouteAction.early_data_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig - 14, // 54: envoy.config.route.v3.RouteAction.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy - 63, // 55: envoy.config.route.v3.RouteAction.retry_policy_typed_config:type_name -> google.protobuf.Any - 34, // 56: envoy.config.route.v3.RouteAction.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy - 74, // 57: envoy.config.route.v3.RouteAction.priority:type_name -> envoy.config.core.v3.RoutingPriority - 22, // 58: envoy.config.route.v3.RouteAction.rate_limits:type_name -> envoy.config.route.v3.RateLimit - 68, // 59: envoy.config.route.v3.RouteAction.include_vh_rate_limits:type_name -> google.protobuf.BoolValue - 35, // 60: envoy.config.route.v3.RouteAction.hash_policy:type_name -> envoy.config.route.v3.RouteAction.HashPolicy - 12, // 61: envoy.config.route.v3.RouteAction.cors:type_name -> envoy.config.route.v3.CorsPolicy - 73, // 62: envoy.config.route.v3.RouteAction.max_grpc_timeout:type_name -> google.protobuf.Duration - 73, // 63: envoy.config.route.v3.RouteAction.grpc_timeout_offset:type_name -> google.protobuf.Duration - 36, // 64: envoy.config.route.v3.RouteAction.upgrade_configs:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig - 25, // 65: envoy.config.route.v3.RouteAction.internal_redirect_policy:type_name -> envoy.config.route.v3.InternalRedirectPolicy - 2, // 66: envoy.config.route.v3.RouteAction.internal_redirect_action:type_name -> envoy.config.route.v3.RouteAction.InternalRedirectAction - 64, // 67: envoy.config.route.v3.RouteAction.max_internal_redirects:type_name -> google.protobuf.UInt32Value - 15, // 68: envoy.config.route.v3.RouteAction.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy - 37, // 69: envoy.config.route.v3.RouteAction.max_stream_duration:type_name -> envoy.config.route.v3.RouteAction.MaxStreamDuration - 64, // 70: envoy.config.route.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value - 73, // 71: envoy.config.route.v3.RetryPolicy.per_try_timeout:type_name -> google.protobuf.Duration - 73, // 72: envoy.config.route.v3.RetryPolicy.per_try_idle_timeout:type_name -> google.protobuf.Duration - 44, // 73: envoy.config.route.v3.RetryPolicy.retry_priority:type_name -> envoy.config.route.v3.RetryPolicy.RetryPriority - 45, // 74: envoy.config.route.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.route.v3.RetryPolicy.RetryHostPredicate - 66, // 75: envoy.config.route.v3.RetryPolicy.retry_options_predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig - 46, // 76: envoy.config.route.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RetryBackOff - 48, // 77: envoy.config.route.v3.RetryPolicy.rate_limited_retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff - 23, // 78: envoy.config.route.v3.RetryPolicy.retriable_headers:type_name -> envoy.config.route.v3.HeaderMatcher - 23, // 79: envoy.config.route.v3.RetryPolicy.retriable_request_headers:type_name -> envoy.config.route.v3.HeaderMatcher - 64, // 80: envoy.config.route.v3.HedgePolicy.initial_requests:type_name -> google.protobuf.UInt32Value - 75, // 81: envoy.config.route.v3.HedgePolicy.additional_request_chance:type_name -> envoy.type.v3.FractionalPercent - 72, // 82: envoy.config.route.v3.RedirectAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute - 4, // 83: envoy.config.route.v3.RedirectAction.response_code:type_name -> envoy.config.route.v3.RedirectAction.RedirectResponseCode - 76, // 84: envoy.config.route.v3.DirectResponseAction.body:type_name -> envoy.config.core.v3.DataSource - 68, // 85: envoy.config.route.v3.Decorator.propagate:type_name -> google.protobuf.BoolValue - 75, // 86: envoy.config.route.v3.Tracing.client_sampling:type_name -> envoy.type.v3.FractionalPercent - 75, // 87: envoy.config.route.v3.Tracing.random_sampling:type_name -> envoy.type.v3.FractionalPercent - 75, // 88: envoy.config.route.v3.Tracing.overall_sampling:type_name -> envoy.type.v3.FractionalPercent - 77, // 89: envoy.config.route.v3.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag - 23, // 90: envoy.config.route.v3.VirtualCluster.headers:type_name -> envoy.config.route.v3.HeaderMatcher - 64, // 91: envoy.config.route.v3.RateLimit.stage:type_name -> google.protobuf.UInt32Value - 49, // 92: envoy.config.route.v3.RateLimit.actions:type_name -> envoy.config.route.v3.RateLimit.Action - 50, // 93: envoy.config.route.v3.RateLimit.limit:type_name -> envoy.config.route.v3.RateLimit.Override - 67, // 94: envoy.config.route.v3.HeaderMatcher.safe_regex_match:type_name -> envoy.type.matcher.v3.RegexMatcher - 78, // 95: envoy.config.route.v3.HeaderMatcher.range_match:type_name -> envoy.type.v3.Int64Range - 71, // 96: envoy.config.route.v3.HeaderMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher - 71, // 97: envoy.config.route.v3.QueryParameterMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher - 64, // 98: envoy.config.route.v3.InternalRedirectPolicy.max_internal_redirects:type_name -> google.protobuf.UInt32Value - 66, // 99: envoy.config.route.v3.InternalRedirectPolicy.predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig - 63, // 100: envoy.config.route.v3.FilterConfig.config:type_name -> google.protobuf.Any - 63, // 101: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any - 63, // 102: envoy.config.route.v3.Route.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any - 64, // 103: envoy.config.route.v3.WeightedCluster.ClusterWeight.weight:type_name -> google.protobuf.UInt32Value - 65, // 104: envoy.config.route.v3.WeightedCluster.ClusterWeight.metadata_match:type_name -> envoy.config.core.v3.Metadata - 62, // 105: envoy.config.route.v3.WeightedCluster.ClusterWeight.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 62, // 106: envoy.config.route.v3.WeightedCluster.ClusterWeight.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 30, // 107: envoy.config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry - 63, // 108: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any - 68, // 109: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.presented:type_name -> google.protobuf.BoolValue - 68, // 110: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.validated:type_name -> google.protobuf.BoolValue - 69, // 111: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent - 68, // 112: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.trace_sampled:type_name -> google.protobuf.BoolValue - 38, // 113: envoy.config.route.v3.RouteAction.HashPolicy.header:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Header - 39, // 114: envoy.config.route.v3.RouteAction.HashPolicy.cookie:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Cookie - 40, // 115: envoy.config.route.v3.RouteAction.HashPolicy.connection_properties:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties - 41, // 116: envoy.config.route.v3.RouteAction.HashPolicy.query_parameter:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter - 42, // 117: envoy.config.route.v3.RouteAction.HashPolicy.filter_state:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.FilterState - 68, // 118: envoy.config.route.v3.RouteAction.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue - 43, // 119: envoy.config.route.v3.RouteAction.UpgradeConfig.connect_config:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig - 73, // 120: envoy.config.route.v3.RouteAction.MaxStreamDuration.max_stream_duration:type_name -> google.protobuf.Duration - 73, // 121: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max:type_name -> google.protobuf.Duration - 73, // 122: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset:type_name -> google.protobuf.Duration - 72, // 123: envoy.config.route.v3.RouteAction.HashPolicy.Header.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute - 73, // 124: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.ttl:type_name -> google.protobuf.Duration - 79, // 125: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig - 63, // 126: envoy.config.route.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any - 63, // 127: envoy.config.route.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any - 73, // 128: envoy.config.route.v3.RetryPolicy.RetryBackOff.base_interval:type_name -> google.protobuf.Duration - 73, // 129: envoy.config.route.v3.RetryPolicy.RetryBackOff.max_interval:type_name -> google.protobuf.Duration - 3, // 130: envoy.config.route.v3.RetryPolicy.ResetHeader.format:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeaderFormat - 47, // 131: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.reset_headers:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeader - 73, // 132: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.max_interval:type_name -> google.protobuf.Duration - 51, // 133: envoy.config.route.v3.RateLimit.Action.source_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.SourceCluster - 52, // 134: envoy.config.route.v3.RateLimit.Action.destination_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.DestinationCluster - 53, // 135: envoy.config.route.v3.RateLimit.Action.request_headers:type_name -> envoy.config.route.v3.RateLimit.Action.RequestHeaders - 54, // 136: envoy.config.route.v3.RateLimit.Action.remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.RemoteAddress - 56, // 137: envoy.config.route.v3.RateLimit.Action.generic_key:type_name -> envoy.config.route.v3.RateLimit.Action.GenericKey - 57, // 138: envoy.config.route.v3.RateLimit.Action.header_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.HeaderValueMatch - 58, // 139: envoy.config.route.v3.RateLimit.Action.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Action.DynamicMetaData - 59, // 140: envoy.config.route.v3.RateLimit.Action.metadata:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData - 66, // 141: envoy.config.route.v3.RateLimit.Action.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig - 55, // 142: envoy.config.route.v3.RateLimit.Action.masked_remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress - 60, // 143: envoy.config.route.v3.RateLimit.Override.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Override.DynamicMetadata - 64, // 144: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v4_prefix_mask_len:type_name -> google.protobuf.UInt32Value - 64, // 145: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v6_prefix_mask_len:type_name -> google.protobuf.UInt32Value - 68, // 146: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.expect_match:type_name -> google.protobuf.BoolValue - 23, // 147: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher - 80, // 148: envoy.config.route.v3.RateLimit.Action.DynamicMetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey - 80, // 149: envoy.config.route.v3.RateLimit.Action.MetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey - 5, // 150: envoy.config.route.v3.RateLimit.Action.MetaData.source:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData.Source - 80, // 151: envoy.config.route.v3.RateLimit.Override.DynamicMetadata.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey - 152, // [152:152] is the sub-list for method output_type - 152, // [152:152] is the sub-list for method input_type - 152, // [152:152] is the sub-list for extension type_name - 152, // [152:152] is the sub-list for extension extendee - 0, // [0:152] is the sub-list for field type_name + 22, // 3: envoy.config.route.v3.VirtualHost.virtual_clusters:type_name -> envoy.config.route.v3.VirtualCluster + 23, // 4: envoy.config.route.v3.VirtualHost.rate_limits:type_name -> envoy.config.route.v3.RateLimit + 64, // 5: envoy.config.route.v3.VirtualHost.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 64, // 6: envoy.config.route.v3.VirtualHost.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 13, // 7: envoy.config.route.v3.VirtualHost.cors:type_name -> envoy.config.route.v3.CorsPolicy + 28, // 8: envoy.config.route.v3.VirtualHost.typed_per_filter_config:type_name -> envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry + 15, // 9: envoy.config.route.v3.VirtualHost.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy + 65, // 10: envoy.config.route.v3.VirtualHost.retry_policy_typed_config:type_name -> google.protobuf.Any + 16, // 11: envoy.config.route.v3.VirtualHost.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy + 66, // 12: envoy.config.route.v3.VirtualHost.per_request_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 35, // 13: envoy.config.route.v3.VirtualHost.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 65, // 14: envoy.config.route.v3.FilterAction.action:type_name -> google.protobuf.Any + 9, // 15: envoy.config.route.v3.RouteList.routes:type_name -> envoy.config.route.v3.Route + 12, // 16: envoy.config.route.v3.Route.match:type_name -> envoy.config.route.v3.RouteMatch + 14, // 17: envoy.config.route.v3.Route.route:type_name -> envoy.config.route.v3.RouteAction + 17, // 18: envoy.config.route.v3.Route.redirect:type_name -> envoy.config.route.v3.RedirectAction + 18, // 19: envoy.config.route.v3.Route.direct_response:type_name -> envoy.config.route.v3.DirectResponseAction + 7, // 20: envoy.config.route.v3.Route.filter_action:type_name -> envoy.config.route.v3.FilterAction + 19, // 21: envoy.config.route.v3.Route.non_forwarding_action:type_name -> envoy.config.route.v3.NonForwardingAction + 67, // 22: envoy.config.route.v3.Route.metadata:type_name -> envoy.config.core.v3.Metadata + 20, // 23: envoy.config.route.v3.Route.decorator:type_name -> envoy.config.route.v3.Decorator + 29, // 24: envoy.config.route.v3.Route.typed_per_filter_config:type_name -> envoy.config.route.v3.Route.TypedPerFilterConfigEntry + 64, // 25: envoy.config.route.v3.Route.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 64, // 26: envoy.config.route.v3.Route.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 21, // 27: envoy.config.route.v3.Route.tracing:type_name -> envoy.config.route.v3.Tracing + 66, // 28: envoy.config.route.v3.Route.per_request_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 30, // 29: envoy.config.route.v3.WeightedCluster.clusters:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight + 66, // 30: envoy.config.route.v3.WeightedCluster.total_weight:type_name -> google.protobuf.UInt32Value + 68, // 31: envoy.config.route.v3.ClusterSpecifierPlugin.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 69, // 32: envoy.config.route.v3.RouteMatch.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher + 34, // 33: envoy.config.route.v3.RouteMatch.connect_matcher:type_name -> envoy.config.route.v3.RouteMatch.ConnectMatcher + 68, // 34: envoy.config.route.v3.RouteMatch.path_match_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 70, // 35: envoy.config.route.v3.RouteMatch.case_sensitive:type_name -> google.protobuf.BoolValue + 71, // 36: envoy.config.route.v3.RouteMatch.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 24, // 37: envoy.config.route.v3.RouteMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 25, // 38: envoy.config.route.v3.RouteMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher + 32, // 39: envoy.config.route.v3.RouteMatch.grpc:type_name -> envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions + 33, // 40: envoy.config.route.v3.RouteMatch.tls_context:type_name -> envoy.config.route.v3.RouteMatch.TlsContextMatchOptions + 72, // 41: envoy.config.route.v3.RouteMatch.dynamic_metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 73, // 42: envoy.config.route.v3.CorsPolicy.allow_origin_string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 70, // 43: envoy.config.route.v3.CorsPolicy.allow_credentials:type_name -> google.protobuf.BoolValue + 71, // 44: envoy.config.route.v3.CorsPolicy.filter_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 71, // 45: envoy.config.route.v3.CorsPolicy.shadow_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 70, // 46: envoy.config.route.v3.CorsPolicy.allow_private_network_access:type_name -> google.protobuf.BoolValue + 10, // 47: envoy.config.route.v3.RouteAction.weighted_clusters:type_name -> envoy.config.route.v3.WeightedCluster + 11, // 48: envoy.config.route.v3.RouteAction.inline_cluster_specifier_plugin:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin + 1, // 49: envoy.config.route.v3.RouteAction.cluster_not_found_response_code:type_name -> envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode + 67, // 50: envoy.config.route.v3.RouteAction.metadata_match:type_name -> envoy.config.core.v3.Metadata + 74, // 51: envoy.config.route.v3.RouteAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 68, // 52: envoy.config.route.v3.RouteAction.path_rewrite_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 70, // 53: envoy.config.route.v3.RouteAction.auto_host_rewrite:type_name -> google.protobuf.BoolValue + 74, // 54: envoy.config.route.v3.RouteAction.host_rewrite_path_regex:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 75, // 55: envoy.config.route.v3.RouteAction.timeout:type_name -> google.protobuf.Duration + 75, // 56: envoy.config.route.v3.RouteAction.idle_timeout:type_name -> google.protobuf.Duration + 68, // 57: envoy.config.route.v3.RouteAction.early_data_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 15, // 58: envoy.config.route.v3.RouteAction.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy + 65, // 59: envoy.config.route.v3.RouteAction.retry_policy_typed_config:type_name -> google.protobuf.Any + 35, // 60: envoy.config.route.v3.RouteAction.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 76, // 61: envoy.config.route.v3.RouteAction.priority:type_name -> envoy.config.core.v3.RoutingPriority + 23, // 62: envoy.config.route.v3.RouteAction.rate_limits:type_name -> envoy.config.route.v3.RateLimit + 70, // 63: envoy.config.route.v3.RouteAction.include_vh_rate_limits:type_name -> google.protobuf.BoolValue + 36, // 64: envoy.config.route.v3.RouteAction.hash_policy:type_name -> envoy.config.route.v3.RouteAction.HashPolicy + 13, // 65: envoy.config.route.v3.RouteAction.cors:type_name -> envoy.config.route.v3.CorsPolicy + 75, // 66: envoy.config.route.v3.RouteAction.max_grpc_timeout:type_name -> google.protobuf.Duration + 75, // 67: envoy.config.route.v3.RouteAction.grpc_timeout_offset:type_name -> google.protobuf.Duration + 37, // 68: envoy.config.route.v3.RouteAction.upgrade_configs:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig + 26, // 69: envoy.config.route.v3.RouteAction.internal_redirect_policy:type_name -> envoy.config.route.v3.InternalRedirectPolicy + 2, // 70: envoy.config.route.v3.RouteAction.internal_redirect_action:type_name -> envoy.config.route.v3.RouteAction.InternalRedirectAction + 66, // 71: envoy.config.route.v3.RouteAction.max_internal_redirects:type_name -> google.protobuf.UInt32Value + 16, // 72: envoy.config.route.v3.RouteAction.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy + 38, // 73: envoy.config.route.v3.RouteAction.max_stream_duration:type_name -> envoy.config.route.v3.RouteAction.MaxStreamDuration + 66, // 74: envoy.config.route.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value + 75, // 75: envoy.config.route.v3.RetryPolicy.per_try_timeout:type_name -> google.protobuf.Duration + 75, // 76: envoy.config.route.v3.RetryPolicy.per_try_idle_timeout:type_name -> google.protobuf.Duration + 45, // 77: envoy.config.route.v3.RetryPolicy.retry_priority:type_name -> envoy.config.route.v3.RetryPolicy.RetryPriority + 46, // 78: envoy.config.route.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.route.v3.RetryPolicy.RetryHostPredicate + 68, // 79: envoy.config.route.v3.RetryPolicy.retry_options_predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig + 47, // 80: envoy.config.route.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RetryBackOff + 49, // 81: envoy.config.route.v3.RetryPolicy.rate_limited_retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff + 24, // 82: envoy.config.route.v3.RetryPolicy.retriable_headers:type_name -> envoy.config.route.v3.HeaderMatcher + 24, // 83: envoy.config.route.v3.RetryPolicy.retriable_request_headers:type_name -> envoy.config.route.v3.HeaderMatcher + 66, // 84: envoy.config.route.v3.HedgePolicy.initial_requests:type_name -> google.protobuf.UInt32Value + 77, // 85: envoy.config.route.v3.HedgePolicy.additional_request_chance:type_name -> envoy.type.v3.FractionalPercent + 74, // 86: envoy.config.route.v3.RedirectAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 4, // 87: envoy.config.route.v3.RedirectAction.response_code:type_name -> envoy.config.route.v3.RedirectAction.RedirectResponseCode + 78, // 88: envoy.config.route.v3.DirectResponseAction.body:type_name -> envoy.config.core.v3.DataSource + 70, // 89: envoy.config.route.v3.Decorator.propagate:type_name -> google.protobuf.BoolValue + 77, // 90: envoy.config.route.v3.Tracing.client_sampling:type_name -> envoy.type.v3.FractionalPercent + 77, // 91: envoy.config.route.v3.Tracing.random_sampling:type_name -> envoy.type.v3.FractionalPercent + 77, // 92: envoy.config.route.v3.Tracing.overall_sampling:type_name -> envoy.type.v3.FractionalPercent + 79, // 93: envoy.config.route.v3.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag + 24, // 94: envoy.config.route.v3.VirtualCluster.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 66, // 95: envoy.config.route.v3.RateLimit.stage:type_name -> google.protobuf.UInt32Value + 50, // 96: envoy.config.route.v3.RateLimit.actions:type_name -> envoy.config.route.v3.RateLimit.Action + 51, // 97: envoy.config.route.v3.RateLimit.limit:type_name -> envoy.config.route.v3.RateLimit.Override + 69, // 98: envoy.config.route.v3.HeaderMatcher.safe_regex_match:type_name -> envoy.type.matcher.v3.RegexMatcher + 80, // 99: envoy.config.route.v3.HeaderMatcher.range_match:type_name -> envoy.type.v3.Int64Range + 73, // 100: envoy.config.route.v3.HeaderMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 73, // 101: envoy.config.route.v3.QueryParameterMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 66, // 102: envoy.config.route.v3.InternalRedirectPolicy.max_internal_redirects:type_name -> google.protobuf.UInt32Value + 68, // 103: envoy.config.route.v3.InternalRedirectPolicy.predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig + 65, // 104: envoy.config.route.v3.FilterConfig.config:type_name -> google.protobuf.Any + 65, // 105: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 65, // 106: envoy.config.route.v3.Route.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 66, // 107: envoy.config.route.v3.WeightedCluster.ClusterWeight.weight:type_name -> google.protobuf.UInt32Value + 67, // 108: envoy.config.route.v3.WeightedCluster.ClusterWeight.metadata_match:type_name -> envoy.config.core.v3.Metadata + 64, // 109: envoy.config.route.v3.WeightedCluster.ClusterWeight.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 64, // 110: envoy.config.route.v3.WeightedCluster.ClusterWeight.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 31, // 111: envoy.config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry + 65, // 112: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 70, // 113: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.presented:type_name -> google.protobuf.BoolValue + 70, // 114: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.validated:type_name -> google.protobuf.BoolValue + 71, // 115: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 70, // 116: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.trace_sampled:type_name -> google.protobuf.BoolValue + 39, // 117: envoy.config.route.v3.RouteAction.HashPolicy.header:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Header + 40, // 118: envoy.config.route.v3.RouteAction.HashPolicy.cookie:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Cookie + 41, // 119: envoy.config.route.v3.RouteAction.HashPolicy.connection_properties:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties + 42, // 120: envoy.config.route.v3.RouteAction.HashPolicy.query_parameter:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter + 43, // 121: envoy.config.route.v3.RouteAction.HashPolicy.filter_state:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.FilterState + 70, // 122: envoy.config.route.v3.RouteAction.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue + 44, // 123: envoy.config.route.v3.RouteAction.UpgradeConfig.connect_config:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig + 75, // 124: envoy.config.route.v3.RouteAction.MaxStreamDuration.max_stream_duration:type_name -> google.protobuf.Duration + 75, // 125: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max:type_name -> google.protobuf.Duration + 75, // 126: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset:type_name -> google.protobuf.Duration + 74, // 127: envoy.config.route.v3.RouteAction.HashPolicy.Header.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 75, // 128: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.ttl:type_name -> google.protobuf.Duration + 81, // 129: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig + 65, // 130: envoy.config.route.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any + 65, // 131: envoy.config.route.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any + 75, // 132: envoy.config.route.v3.RetryPolicy.RetryBackOff.base_interval:type_name -> google.protobuf.Duration + 75, // 133: envoy.config.route.v3.RetryPolicy.RetryBackOff.max_interval:type_name -> google.protobuf.Duration + 3, // 134: envoy.config.route.v3.RetryPolicy.ResetHeader.format:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeaderFormat + 48, // 135: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.reset_headers:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeader + 75, // 136: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.max_interval:type_name -> google.protobuf.Duration + 52, // 137: envoy.config.route.v3.RateLimit.Action.source_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.SourceCluster + 53, // 138: envoy.config.route.v3.RateLimit.Action.destination_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.DestinationCluster + 54, // 139: envoy.config.route.v3.RateLimit.Action.request_headers:type_name -> envoy.config.route.v3.RateLimit.Action.RequestHeaders + 55, // 140: envoy.config.route.v3.RateLimit.Action.remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.RemoteAddress + 57, // 141: envoy.config.route.v3.RateLimit.Action.generic_key:type_name -> envoy.config.route.v3.RateLimit.Action.GenericKey + 58, // 142: envoy.config.route.v3.RateLimit.Action.header_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.HeaderValueMatch + 59, // 143: envoy.config.route.v3.RateLimit.Action.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Action.DynamicMetaData + 60, // 144: envoy.config.route.v3.RateLimit.Action.metadata:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData + 68, // 145: envoy.config.route.v3.RateLimit.Action.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 56, // 146: envoy.config.route.v3.RateLimit.Action.masked_remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress + 61, // 147: envoy.config.route.v3.RateLimit.Action.query_parameter_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch + 62, // 148: envoy.config.route.v3.RateLimit.Override.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Override.DynamicMetadata + 66, // 149: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v4_prefix_mask_len:type_name -> google.protobuf.UInt32Value + 66, // 150: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v6_prefix_mask_len:type_name -> google.protobuf.UInt32Value + 70, // 151: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.expect_match:type_name -> google.protobuf.BoolValue + 24, // 152: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 82, // 153: envoy.config.route.v3.RateLimit.Action.DynamicMetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 82, // 154: envoy.config.route.v3.RateLimit.Action.MetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 5, // 155: envoy.config.route.v3.RateLimit.Action.MetaData.source:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData.Source + 70, // 156: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.expect_match:type_name -> google.protobuf.BoolValue + 25, // 157: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher + 82, // 158: envoy.config.route.v3.RateLimit.Override.DynamicMetadata.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 159, // [159:159] is the sub-list for method output_type + 159, // [159:159] is the sub-list for method input_type + 159, // [159:159] is the sub-list for extension type_name + 159, // [159:159] is the sub-list for extension extendee + 0, // [0:159] is the sub-list for field type_name } func init() { file_envoy_config_route_v3_route_components_proto_init() } @@ -7860,7 +8164,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Route); i { + switch v := v.(*RouteList); i { case 0: return &v.state case 1: @@ -7872,7 +8176,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WeightedCluster); i { + switch v := v.(*Route); i { case 0: return &v.state case 1: @@ -7884,7 +8188,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClusterSpecifierPlugin); i { + switch v := v.(*WeightedCluster); i { case 0: return &v.state case 1: @@ -7896,7 +8200,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RouteMatch); i { + switch v := v.(*ClusterSpecifierPlugin); i { case 0: return &v.state case 1: @@ -7908,7 +8212,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CorsPolicy); i { + switch v := v.(*RouteMatch); i { case 0: return &v.state case 1: @@ -7920,7 +8224,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RouteAction); i { + switch v := v.(*CorsPolicy); i { case 0: return &v.state case 1: @@ -7932,7 +8236,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryPolicy); i { + switch v := v.(*RouteAction); i { case 0: return &v.state case 1: @@ -7944,7 +8248,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HedgePolicy); i { + switch v := v.(*RetryPolicy); i { case 0: return &v.state case 1: @@ -7956,7 +8260,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RedirectAction); i { + switch v := v.(*HedgePolicy); i { case 0: return &v.state case 1: @@ -7968,7 +8272,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DirectResponseAction); i { + switch v := v.(*RedirectAction); i { case 0: return &v.state case 1: @@ -7980,7 +8284,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NonForwardingAction); i { + switch v := v.(*DirectResponseAction); i { case 0: return &v.state case 1: @@ -7992,7 +8296,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decorator); i { + switch v := v.(*NonForwardingAction); i { case 0: return &v.state case 1: @@ -8004,7 +8308,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tracing); i { + switch v := v.(*Decorator); i { case 0: return &v.state case 1: @@ -8016,7 +8320,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VirtualCluster); i { + switch v := v.(*Tracing); i { case 0: return &v.state case 1: @@ -8028,7 +8332,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimit); i { + switch v := v.(*VirtualCluster); i { case 0: return &v.state case 1: @@ -8040,7 +8344,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderMatcher); i { + switch v := v.(*RateLimit); i { case 0: return &v.state case 1: @@ -8052,7 +8356,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryParameterMatcher); i { + switch v := v.(*HeaderMatcher); i { case 0: return &v.state case 1: @@ -8064,7 +8368,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InternalRedirectPolicy); i { + switch v := v.(*QueryParameterMatcher); i { case 0: return &v.state case 1: @@ -8076,6 +8380,18 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } file_envoy_config_route_v3_route_components_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InternalRedirectPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FilterConfig); i { case 0: return &v.state @@ -8087,7 +8403,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WeightedCluster_ClusterWeight); i { case 0: return &v.state @@ -8099,7 +8415,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteMatch_GrpcRouteMatchOptions); i { case 0: return &v.state @@ -8111,7 +8427,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteMatch_TlsContextMatchOptions); i { case 0: return &v.state @@ -8123,7 +8439,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteMatch_ConnectMatcher); i { case 0: return &v.state @@ -8135,7 +8451,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_RequestMirrorPolicy); i { case 0: return &v.state @@ -8147,7 +8463,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_HashPolicy); i { case 0: return &v.state @@ -8159,7 +8475,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_UpgradeConfig); i { case 0: return &v.state @@ -8171,7 +8487,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_MaxStreamDuration); i { case 0: return &v.state @@ -8183,7 +8499,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_HashPolicy_Header); i { case 0: return &v.state @@ -8195,7 +8511,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_HashPolicy_Cookie); i { case 0: return &v.state @@ -8207,7 +8523,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_HashPolicy_ConnectionProperties); i { case 0: return &v.state @@ -8219,7 +8535,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_HashPolicy_QueryParameter); i { case 0: return &v.state @@ -8231,7 +8547,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_HashPolicy_FilterState); i { case 0: return &v.state @@ -8243,7 +8559,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RouteAction_UpgradeConfig_ConnectConfig); i { case 0: return &v.state @@ -8255,7 +8571,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RetryPolicy_RetryPriority); i { case 0: return &v.state @@ -8267,7 +8583,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RetryPolicy_RetryHostPredicate); i { case 0: return &v.state @@ -8279,7 +8595,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RetryPolicy_RetryBackOff); i { case 0: return &v.state @@ -8291,7 +8607,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RetryPolicy_ResetHeader); i { case 0: return &v.state @@ -8303,7 +8619,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RetryPolicy_RateLimitedRetryBackOff); i { case 0: return &v.state @@ -8315,7 +8631,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action); i { case 0: return &v.state @@ -8327,7 +8643,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Override); i { case 0: return &v.state @@ -8339,7 +8655,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_SourceCluster); i { case 0: return &v.state @@ -8351,7 +8667,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_DestinationCluster); i { case 0: return &v.state @@ -8363,7 +8679,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_RequestHeaders); i { case 0: return &v.state @@ -8375,7 +8691,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_RemoteAddress); i { case 0: return &v.state @@ -8387,7 +8703,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_MaskedRemoteAddress); i { case 0: return &v.state @@ -8399,7 +8715,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_GenericKey); i { case 0: return &v.state @@ -8411,7 +8727,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_HeaderValueMatch); i { case 0: return &v.state @@ -8423,7 +8739,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_DynamicMetaData); i { case 0: return &v.state @@ -8435,7 +8751,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Action_MetaData); i { case 0: return &v.state @@ -8447,7 +8763,19 @@ func file_envoy_config_route_v3_route_components_proto_init() { return nil } } - file_envoy_config_route_v3_route_components_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_envoy_config_route_v3_route_components_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_QueryParameterValueMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RateLimit_Override_DynamicMetadata); i { case 0: return &v.state @@ -8460,28 +8788,28 @@ func file_envoy_config_route_v3_route_components_proto_init() { } } } - file_envoy_config_route_v3_route_components_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[3].OneofWrappers = []interface{}{ (*Route_Route)(nil), (*Route_Redirect)(nil), (*Route_DirectResponse)(nil), (*Route_FilterAction)(nil), (*Route_NonForwardingAction)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[3].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[4].OneofWrappers = []interface{}{ (*WeightedCluster_HeaderName)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[6].OneofWrappers = []interface{}{ (*RouteMatch_Prefix)(nil), (*RouteMatch_Path)(nil), (*RouteMatch_SafeRegex)(nil), (*RouteMatch_ConnectMatcher_)(nil), (*RouteMatch_PathSeparatedPrefix)(nil), - (*RouteMatch_PathTemplate)(nil), + (*RouteMatch_PathMatchPolicy)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[6].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[7].OneofWrappers = []interface{}{ (*CorsPolicy_FilterEnabled)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[7].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[8].OneofWrappers = []interface{}{ (*RouteAction_Cluster)(nil), (*RouteAction_ClusterHeader)(nil), (*RouteAction_WeightedClusters)(nil), @@ -8492,14 +8820,14 @@ func file_envoy_config_route_v3_route_components_proto_init() { (*RouteAction_HostRewriteHeader)(nil), (*RouteAction_HostRewritePathRegex)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[11].OneofWrappers = []interface{}{ (*RedirectAction_HttpsRedirect)(nil), (*RedirectAction_SchemeRedirect)(nil), (*RedirectAction_PathRedirect)(nil), (*RedirectAction_PrefixRewrite)(nil), (*RedirectAction_RegexRewrite)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[17].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[18].OneofWrappers = []interface{}{ (*HeaderMatcher_ExactMatch)(nil), (*HeaderMatcher_SafeRegexMatch)(nil), (*HeaderMatcher_RangeMatch)(nil), @@ -8509,27 +8837,27 @@ func file_envoy_config_route_v3_route_components_proto_init() { (*HeaderMatcher_ContainsMatch)(nil), (*HeaderMatcher_StringMatch)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[18].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[19].OneofWrappers = []interface{}{ (*QueryParameterMatcher_StringMatch)(nil), (*QueryParameterMatcher_PresentMatch)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[23].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[24].OneofWrappers = []interface{}{ (*WeightedCluster_ClusterWeight_HostRewriteLiteral)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[29].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[30].OneofWrappers = []interface{}{ (*RouteAction_HashPolicy_Header_)(nil), (*RouteAction_HashPolicy_Cookie_)(nil), (*RouteAction_HashPolicy_ConnectionProperties_)(nil), (*RouteAction_HashPolicy_QueryParameter_)(nil), (*RouteAction_HashPolicy_FilterState_)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[38].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[39].OneofWrappers = []interface{}{ (*RetryPolicy_RetryPriority_TypedConfig)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[39].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[40].OneofWrappers = []interface{}{ (*RetryPolicy_RetryHostPredicate_TypedConfig)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[43].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[44].OneofWrappers = []interface{}{ (*RateLimit_Action_SourceCluster_)(nil), (*RateLimit_Action_DestinationCluster_)(nil), (*RateLimit_Action_RequestHeaders_)(nil), @@ -8540,8 +8868,9 @@ func file_envoy_config_route_v3_route_components_proto_init() { (*RateLimit_Action_Metadata)(nil), (*RateLimit_Action_Extension)(nil), (*RateLimit_Action_MaskedRemoteAddress_)(nil), + (*RateLimit_Action_QueryParameterValueMatch_)(nil), } - file_envoy_config_route_v3_route_components_proto_msgTypes[44].OneofWrappers = []interface{}{ + file_envoy_config_route_v3_route_components_proto_msgTypes[45].OneofWrappers = []interface{}{ (*RateLimit_Override_DynamicMetadata_)(nil), } type x struct{} @@ -8550,7 +8879,7 @@ func file_envoy_config_route_v3_route_components_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_route_v3_route_components_proto_rawDesc, NumEnums: 6, - NumMessages: 55, + NumMessages: 57, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go index a43bb1adf2..74d5e793a5 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go @@ -551,6 +551,8 @@ func (m *VirtualHost) validate(all bool) error { } } + // no validation rules for IncludeIsTimeoutRetryHeader + if all { switch v := interface{}(m.GetPerRequestBufferLimitBytes()).(type) { case interface{ ValidateAll() error }: @@ -825,6 +827,139 @@ var _ interface { ErrorName() string } = FilterActionValidationError{} +// Validate checks the field values on RouteList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteList with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RouteListMultiError, or nil +// if none found. +func (m *RouteList) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRoutes() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RouteListMultiError(errors) + } + + return nil +} + +// RouteListMultiError is an error wrapping multiple validation errors returned +// by RouteList.ValidateAll() if the designated constraints aren't met. +type RouteListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteListMultiError) AllErrors() []error { return m } + +// RouteListValidationError is the validation error returned by +// RouteList.Validate if the designated constraints aren't met. +type RouteListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteListValidationError) ErrorName() string { return "RouteListValidationError" } + +// Error satisfies the builtin error interface +func (e RouteListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteListValidationError{} + // Validate checks the field values on Route with the rules defined in the // proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. @@ -1196,9 +1331,20 @@ func (m *Route) validate(all bool) error { // no validation rules for StatPrefix - switch m.Action.(type) { - + oneofActionPresent := false + switch v := m.Action.(type) { case *Route_Route: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true if all { switch v := interface{}(m.GetRoute()).(type) { @@ -1230,6 +1376,17 @@ func (m *Route) validate(all bool) error { } case *Route_Redirect: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true if all { switch v := interface{}(m.GetRedirect()).(type) { @@ -1261,6 +1418,17 @@ func (m *Route) validate(all bool) error { } case *Route_DirectResponse: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true if all { switch v := interface{}(m.GetDirectResponse()).(type) { @@ -1292,6 +1460,17 @@ func (m *Route) validate(all bool) error { } case *Route_FilterAction: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true if all { switch v := interface{}(m.GetFilterAction()).(type) { @@ -1323,6 +1502,17 @@ func (m *Route) validate(all bool) error { } case *Route_NonForwardingAction: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true if all { switch v := interface{}(m.GetNonForwardingAction()).(type) { @@ -1354,6 +1544,9 @@ func (m *Route) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofActionPresent { err := RouteValidationError{ field: "Action", reason: "value is required", @@ -1362,7 +1555,6 @@ func (m *Route) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1513,26 +1705,49 @@ func (m *WeightedCluster) validate(all bool) error { } - if wrapper := m.GetTotalWeight(); wrapper != nil { - - if wrapper.GetValue() < 1 { - err := WeightedClusterValidationError{ - field: "TotalWeight", - reason: "value must be greater than or equal to 1", + if all { + switch v := interface{}(m.GetTotalWeight()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, + }) } - if !all { - return err + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTotalWeight()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, } - errors = append(errors, err) } - } // no validation rules for RuntimeKeyPrefix - switch m.RandomValueSpecifier.(type) { - + switch v := m.RandomValueSpecifier.(type) { case *WeightedCluster_HeaderName: + if v == nil { + err := WeightedClusterValidationError{ + field: "RandomValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if !_WeightedCluster_HeaderName_Pattern.MatchString(m.GetHeaderName()) { err := WeightedClusterValidationError{ @@ -1545,6 +1760,8 @@ func (m *WeightedCluster) validate(all bool) error { errors = append(errors, err) } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -2011,15 +2228,46 @@ func (m *RouteMatch) validate(all bool) error { } - switch m.PathSpecifier.(type) { - + oneofPathSpecifierPresent := false + switch v := m.PathSpecifier.(type) { case *RouteMatch_Prefix: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true // no validation rules for Prefix - case *RouteMatch_Path: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true // no validation rules for Path - case *RouteMatch_SafeRegex: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true if m.GetSafeRegex() == nil { err := RouteMatchValidationError{ @@ -2062,6 +2310,17 @@ func (m *RouteMatch) validate(all bool) error { } case *RouteMatch_ConnectMatcher_: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true if all { switch v := interface{}(m.GetConnectMatcher()).(type) { @@ -2093,6 +2352,17 @@ func (m *RouteMatch) validate(all bool) error { } case *RouteMatch_PathSeparatedPrefix: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true if !_RouteMatch_PathSeparatedPrefix_Pattern.MatchString(m.GetPathSeparatedPrefix()) { err := RouteMatchValidationError{ @@ -2105,24 +2375,52 @@ func (m *RouteMatch) validate(all bool) error { errors = append(errors, err) } - case *RouteMatch_PathTemplate: - - if m.GetPathTemplate() != "" { + case *RouteMatch_PathMatchPolicy: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true - if l := utf8.RuneCountInString(m.GetPathTemplate()); l < 1 || l > 256 { - err := RouteMatchValidationError{ - field: "PathTemplate", - reason: "value length must be between 1 and 256 runes, inclusive", + if all { + switch v := interface{}(m.GetPathMatchPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, + }) } - if !all { - return err + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathMatchPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, } - errors = append(errors, err) } - } default: + _ = v // ensures v is used + } + if !oneofPathSpecifierPresent { err := RouteMatchValidationError{ field: "PathSpecifier", reason: "value is required", @@ -2131,7 +2429,6 @@ func (m *RouteMatch) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -2335,9 +2632,47 @@ func (m *CorsPolicy) validate(all bool) error { } } - switch m.EnabledSpecifier.(type) { + if all { + switch v := interface{}(m.GetAllowPrivateNetworkAccess()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAllowPrivateNetworkAccess()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + } + } + } + switch v := m.EnabledSpecifier.(type) { case *CorsPolicy_FilterEnabled: + if v == nil { + err := CorsPolicyValidationError{ + field: "EnabledSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetFilterEnabled()).(type) { @@ -2368,6 +2703,8 @@ func (m *CorsPolicy) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -2549,19 +2886,33 @@ func (m *RouteAction) validate(all bool) error { } } - if m.GetPathTemplateRewrite() != "" { - - if l := utf8.RuneCountInString(m.GetPathTemplateRewrite()); l < 1 || l > 256 { - err := RouteActionValidationError{ - field: "PathTemplateRewrite", - reason: "value length must be between 1 and 256 runes, inclusive", + if all { + switch v := interface{}(m.GetPathRewritePolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, + }) } - if !all { - return err + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathRewritePolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, } - errors = append(errors, err) } - } // no validation rules for AppendXForwardedHost @@ -3092,9 +3443,20 @@ func (m *RouteAction) validate(all bool) error { } } - switch m.ClusterSpecifier.(type) { - + oneofClusterSpecifierPresent := false + switch v := m.ClusterSpecifier.(type) { case *RouteAction_Cluster: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true if utf8.RuneCountInString(m.GetCluster()) < 1 { err := RouteActionValidationError{ @@ -3108,6 +3470,17 @@ func (m *RouteAction) validate(all bool) error { } case *RouteAction_ClusterHeader: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true if utf8.RuneCountInString(m.GetClusterHeader()) < 1 { err := RouteActionValidationError{ @@ -3132,6 +3505,17 @@ func (m *RouteAction) validate(all bool) error { } case *RouteAction_WeightedClusters: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true if all { switch v := interface{}(m.GetWeightedClusters()).(type) { @@ -3163,9 +3547,30 @@ func (m *RouteAction) validate(all bool) error { } case *RouteAction_ClusterSpecifierPlugin: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true // no validation rules for ClusterSpecifierPlugin - case *RouteAction_InlineClusterSpecifierPlugin: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true if all { switch v := interface{}(m.GetInlineClusterSpecifierPlugin()).(type) { @@ -3197,6 +3602,9 @@ func (m *RouteAction) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofClusterSpecifierPresent { err := RouteActionValidationError{ field: "ClusterSpecifier", reason: "value is required", @@ -3205,12 +3613,19 @@ func (m *RouteAction) validate(all bool) error { return err } errors = append(errors, err) - } - - switch m.HostRewriteSpecifier.(type) { - + switch v := m.HostRewriteSpecifier.(type) { case *RouteAction_HostRewriteLiteral: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if !_RouteAction_HostRewriteLiteral_Pattern.MatchString(m.GetHostRewriteLiteral()) { err := RouteActionValidationError{ @@ -3224,6 +3639,16 @@ func (m *RouteAction) validate(all bool) error { } case *RouteAction_AutoHostRewrite: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetAutoHostRewrite()).(type) { @@ -3255,6 +3680,16 @@ func (m *RouteAction) validate(all bool) error { } case *RouteAction_HostRewriteHeader: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if !_RouteAction_HostRewriteHeader_Pattern.MatchString(m.GetHostRewriteHeader()) { err := RouteActionValidationError{ @@ -3268,6 +3703,16 @@ func (m *RouteAction) validate(all bool) error { } case *RouteAction_HostRewritePathRegex: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetHostRewritePathRegex()).(type) { @@ -3298,6 +3743,8 @@ func (m *RouteAction) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -3991,19 +4438,46 @@ func (m *RedirectAction) validate(all bool) error { // no validation rules for StripQuery - switch m.SchemeRewriteSpecifier.(type) { - + switch v := m.SchemeRewriteSpecifier.(type) { case *RedirectAction_HttpsRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "SchemeRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for HttpsRedirect - case *RedirectAction_SchemeRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "SchemeRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for SchemeRedirect - + default: + _ = v // ensures v is used } - - switch m.PathRewriteSpecifier.(type) { - + switch v := m.PathRewriteSpecifier.(type) { case *RedirectAction_PathRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if !_RedirectAction_PathRedirect_Pattern.MatchString(m.GetPathRedirect()) { err := RedirectActionValidationError{ @@ -4017,6 +4491,16 @@ func (m *RedirectAction) validate(all bool) error { } case *RedirectAction_PrefixRewrite: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if !_RedirectAction_PrefixRewrite_Pattern.MatchString(m.GetPrefixRewrite()) { err := RedirectActionValidationError{ @@ -4030,6 +4514,16 @@ func (m *RedirectAction) validate(all bool) error { } case *RedirectAction_RegexRewrite: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetRegexRewrite()).(type) { @@ -4060,6 +4554,8 @@ func (m *RedirectAction) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -5129,12 +5625,32 @@ func (m *HeaderMatcher) validate(all bool) error { // no validation rules for InvertMatch - switch m.HeaderMatchSpecifier.(type) { + // no validation rules for TreatMissingHeaderAsEmpty + switch v := m.HeaderMatchSpecifier.(type) { case *HeaderMatcher_ExactMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for ExactMatch - case *HeaderMatcher_SafeRegexMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetSafeRegexMatch()).(type) { @@ -5166,6 +5682,16 @@ func (m *HeaderMatcher) validate(all bool) error { } case *HeaderMatcher_RangeMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetRangeMatch()).(type) { @@ -5197,9 +5723,28 @@ func (m *HeaderMatcher) validate(all bool) error { } case *HeaderMatcher_PresentMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for PresentMatch - case *HeaderMatcher_PrefixMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if utf8.RuneCountInString(m.GetPrefixMatch()) < 1 { err := HeaderMatcherValidationError{ @@ -5213,6 +5758,16 @@ func (m *HeaderMatcher) validate(all bool) error { } case *HeaderMatcher_SuffixMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if utf8.RuneCountInString(m.GetSuffixMatch()) < 1 { err := HeaderMatcherValidationError{ @@ -5226,6 +5781,16 @@ func (m *HeaderMatcher) validate(all bool) error { } case *HeaderMatcher_ContainsMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if utf8.RuneCountInString(m.GetContainsMatch()) < 1 { err := HeaderMatcherValidationError{ @@ -5239,6 +5804,16 @@ func (m *HeaderMatcher) validate(all bool) error { } case *HeaderMatcher_StringMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetStringMatch()).(type) { @@ -5269,6 +5844,8 @@ func (m *HeaderMatcher) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -5395,9 +5972,18 @@ func (m *QueryParameterMatcher) validate(all bool) error { errors = append(errors, err) } - switch m.QueryParameterMatchSpecifier.(type) { - + switch v := m.QueryParameterMatchSpecifier.(type) { case *QueryParameterMatcher_StringMatch: + if v == nil { + err := QueryParameterMatcherValidationError{ + field: "QueryParameterMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if m.GetStringMatch() == nil { err := QueryParameterMatcherValidationError{ @@ -5440,8 +6026,19 @@ func (m *QueryParameterMatcher) validate(all bool) error { } case *QueryParameterMatcher_PresentMatch: + if v == nil { + err := QueryParameterMatcherValidationError{ + field: "QueryParameterMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for PresentMatch - + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -5755,6 +6352,8 @@ func (m *FilterConfig) validate(all bool) error { // no validation rules for IsOptional + // no validation rules for Disabled + if len(errors) > 0 { return FilterConfigMultiError(errors) } @@ -6093,9 +6692,18 @@ func (m *WeightedCluster_ClusterWeight) validate(all bool) error { } } - switch m.HostRewriteSpecifier.(type) { - + switch v := m.HostRewriteSpecifier.(type) { case *WeightedCluster_ClusterWeight_HostRewriteLiteral: + if v == nil { + err := WeightedCluster_ClusterWeightValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if !_WeightedCluster_ClusterWeight_HostRewriteLiteral_Pattern.MatchString(m.GetHostRewriteLiteral()) { err := WeightedCluster_ClusterWeightValidationError{ @@ -6108,6 +6716,8 @@ func (m *WeightedCluster_ClusterWeight) validate(all bool) error { errors = append(errors, err) } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -6769,9 +7379,20 @@ func (m *RouteAction_HashPolicy) validate(all bool) error { // no validation rules for Terminal - switch m.PolicySpecifier.(type) { - + oneofPolicySpecifierPresent := false + switch v := m.PolicySpecifier.(type) { case *RouteAction_HashPolicy_Header_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true if all { switch v := interface{}(m.GetHeader()).(type) { @@ -6803,6 +7424,17 @@ func (m *RouteAction_HashPolicy) validate(all bool) error { } case *RouteAction_HashPolicy_Cookie_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true if all { switch v := interface{}(m.GetCookie()).(type) { @@ -6834,6 +7466,17 @@ func (m *RouteAction_HashPolicy) validate(all bool) error { } case *RouteAction_HashPolicy_ConnectionProperties_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true if all { switch v := interface{}(m.GetConnectionProperties()).(type) { @@ -6865,6 +7508,17 @@ func (m *RouteAction_HashPolicy) validate(all bool) error { } case *RouteAction_HashPolicy_QueryParameter_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true if all { switch v := interface{}(m.GetQueryParameter()).(type) { @@ -6896,6 +7550,17 @@ func (m *RouteAction_HashPolicy) validate(all bool) error { } case *RouteAction_HashPolicy_FilterState_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true if all { switch v := interface{}(m.GetFilterState()).(type) { @@ -6927,6 +7592,9 @@ func (m *RouteAction_HashPolicy) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofPolicySpecifierPresent { err := RouteAction_HashPolicyValidationError{ field: "PolicySpecifier", reason: "value is required", @@ -6935,7 +7603,6 @@ func (m *RouteAction_HashPolicy) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -8203,9 +8870,18 @@ func (m *RetryPolicy_RetryPriority) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *RetryPolicy_RetryPriority_TypedConfig: + if v == nil { + err := RetryPolicy_RetryPriorityValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -8236,6 +8912,8 @@ func (m *RetryPolicy_RetryPriority) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -8351,9 +9029,18 @@ func (m *RetryPolicy_RetryHostPredicate) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *RetryPolicy_RetryHostPredicate_TypedConfig: + if v == nil { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -8384,6 +9071,8 @@ func (m *RetryPolicy_RetryHostPredicate) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -8979,9 +9668,20 @@ func (m *RateLimit_Action) validate(all bool) error { var errors []error - switch m.ActionSpecifier.(type) { - + oneofActionSpecifierPresent := false + switch v := m.ActionSpecifier.(type) { case *RateLimit_Action_SourceCluster_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetSourceCluster()).(type) { @@ -9013,6 +9713,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_DestinationCluster_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetDestinationCluster()).(type) { @@ -9044,6 +9755,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_RequestHeaders_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetRequestHeaders()).(type) { @@ -9075,6 +9797,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_RemoteAddress_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetRemoteAddress()).(type) { @@ -9106,6 +9839,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_GenericKey_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetGenericKey()).(type) { @@ -9137,6 +9881,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_HeaderValueMatch_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetHeaderValueMatch()).(type) { @@ -9168,6 +9923,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_DynamicMetadata: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetDynamicMetadata()).(type) { @@ -9199,6 +9965,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_Metadata: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetMetadata()).(type) { @@ -9230,6 +10007,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_Extension: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetExtension()).(type) { @@ -9261,6 +10049,17 @@ func (m *RateLimit_Action) validate(all bool) error { } case *RateLimit_Action_MaskedRemoteAddress_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true if all { switch v := interface{}(m.GetMaskedRemoteAddress()).(type) { @@ -9291,7 +10090,52 @@ func (m *RateLimit_Action) validate(all bool) error { } } + case *RateLimit_Action_QueryParameterValueMatch_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetQueryParameterValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQueryParameterValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: + _ = v // ensures v is used + } + if !oneofActionSpecifierPresent { err := RateLimit_ActionValidationError{ field: "ActionSpecifier", reason: "value is required", @@ -9300,7 +10144,6 @@ func (m *RateLimit_Action) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -9403,9 +10246,20 @@ func (m *RateLimit_Override) validate(all bool) error { var errors []error - switch m.OverrideSpecifier.(type) { - + oneofOverrideSpecifierPresent := false + switch v := m.OverrideSpecifier.(type) { case *RateLimit_Override_DynamicMetadata_: + if v == nil { + err := RateLimit_OverrideValidationError{ + field: "OverrideSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverrideSpecifierPresent = true if all { switch v := interface{}(m.GetDynamicMetadata()).(type) { @@ -9437,6 +10291,9 @@ func (m *RateLimit_Override) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofOverrideSpecifierPresent { err := RateLimit_OverrideValidationError{ field: "OverrideSpecifier", reason: "value is required", @@ -9445,7 +10302,6 @@ func (m *RateLimit_Override) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -10666,6 +11522,8 @@ func (m *RateLimit_Action_MetaData) validate(all bool) error { errors = append(errors, err) } + // no validation rules for SkipIfAbsent + if len(errors) > 0 { return RateLimit_Action_MetaDataMultiError(errors) } @@ -10746,6 +11604,199 @@ var _ interface { ErrorName() string } = RateLimit_Action_MetaDataValidationError{} +// Validate checks the field values on +// RateLimit_Action_QueryParameterValueMatch with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_QueryParameterValueMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RateLimit_Action_QueryParameterValueMatch with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// RateLimit_Action_QueryParameterValueMatchMultiError, or nil if none found. +func (m *RateLimit_Action_QueryParameterValueMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_QueryParameterValueMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DescriptorKey + + if utf8.RuneCountInString(m.GetDescriptorValue()) < 1 { + err := RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "DescriptorValue", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExpectMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpectMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetQueryParameters()) < 1 { + err := RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "QueryParameters", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetQueryParameters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RateLimit_Action_QueryParameterValueMatchMultiError(errors) + } + + return nil +} + +// RateLimit_Action_QueryParameterValueMatchMultiError is an error wrapping +// multiple validation errors returned by +// RateLimit_Action_QueryParameterValueMatch.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_QueryParameterValueMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_QueryParameterValueMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_QueryParameterValueMatchMultiError) AllErrors() []error { return m } + +// RateLimit_Action_QueryParameterValueMatchValidationError is the validation +// error returned by RateLimit_Action_QueryParameterValueMatch.Validate if the +// designated constraints aren't met. +type RateLimit_Action_QueryParameterValueMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) ErrorName() string { + return "RateLimit_Action_QueryParameterValueMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_QueryParameterValueMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_QueryParameterValueMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_QueryParameterValueMatchValidationError{} + // Validate checks the field values on RateLimit_Override_DynamicMetadata with // the rules defined in the proto definition for this message. If any rules // are violated, the first error encountered is returned, or nil if there are diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go index 400d130035..829d7f162a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/route/v3/scoped_route.proto package routev3 @@ -81,7 +81,7 @@ const ( // Host: foo.com // X-Route-Selector: vip=172.10.10.20 // -// would result in the routing table defined by the `route-config1` +// would result in the routing table defined by the ``route-config1`` // RouteConfiguration being assigned to the HTTP request/stream. // // [#next-free-field: 6] diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go index e2d46c2ffb..5e52729bb8 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go @@ -392,12 +392,25 @@ func (m *ScopedRouteConfiguration_Key_Fragment) validate(all bool) error { var errors []error - switch m.Type.(type) { - + oneofTypePresent := false + switch v := m.Type.(type) { case *ScopedRouteConfiguration_Key_Fragment_StringKey: + if v == nil { + err := ScopedRouteConfiguration_Key_FragmentValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true // no validation rules for StringKey - default: + _ = v // ensures v is used + } + if !oneofTypePresent { err := ScopedRouteConfiguration_Key_FragmentValidationError{ field: "Type", reason: "value is required", @@ -406,7 +419,6 @@ func (m *ScopedRouteConfiguration_Key_Fragment) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go index 3a052b6640..d65d0999cc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/tap/v3/common.proto package tapv3 @@ -796,7 +796,7 @@ func (*StreamingAdminSink) Descriptor() ([]byte, []int) { // BufferedAdminSink configures a tap output to collect traces without returning them until // one of multiple criteria are satisfied. // Similar to StreamingAdminSink, it is only allowed to specify the buffered admin output -// sink if the tap is being configured from the `/tap` admin endpoint. +// sink if the tap is being configured from the ``/tap`` admin endpoint. type BufferedAdminSink struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go index 978e2ec31e..ed89bdc02d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go @@ -283,9 +283,20 @@ func (m *MatchPredicate) validate(all bool) error { var errors []error - switch m.Rule.(type) { - + oneofRulePresent := false + switch v := m.Rule.(type) { case *MatchPredicate_OrMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetOrMatch()).(type) { @@ -317,6 +328,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_AndMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetAndMatch()).(type) { @@ -348,6 +370,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_NotMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetNotMatch()).(type) { @@ -379,6 +412,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_AnyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if m.GetAnyMatch() != true { err := MatchPredicateValidationError{ @@ -392,6 +436,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpRequestHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpRequestHeadersMatch()).(type) { @@ -423,6 +478,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpRequestTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpRequestTrailersMatch()).(type) { @@ -454,6 +520,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpResponseHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpResponseHeadersMatch()).(type) { @@ -485,6 +562,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpResponseTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpResponseTrailersMatch()).(type) { @@ -516,6 +604,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpRequestGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpRequestGenericBodyMatch()).(type) { @@ -547,6 +646,17 @@ func (m *MatchPredicate) validate(all bool) error { } case *MatchPredicate_HttpResponseGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if all { switch v := interface{}(m.GetHttpResponseGenericBodyMatch()).(type) { @@ -578,6 +688,9 @@ func (m *MatchPredicate) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofRulePresent { err := MatchPredicateValidationError{ field: "Rule", reason: "value is required", @@ -586,7 +699,6 @@ func (m *MatchPredicate) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -1187,9 +1299,20 @@ func (m *OutputSink) validate(all bool) error { errors = append(errors, err) } - switch m.OutputSinkType.(type) { - + oneofOutputSinkTypePresent := false + switch v := m.OutputSinkType.(type) { case *OutputSink_StreamingAdmin: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true if all { switch v := interface{}(m.GetStreamingAdmin()).(type) { @@ -1221,6 +1344,17 @@ func (m *OutputSink) validate(all bool) error { } case *OutputSink_FilePerTap: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true if all { switch v := interface{}(m.GetFilePerTap()).(type) { @@ -1252,6 +1386,17 @@ func (m *OutputSink) validate(all bool) error { } case *OutputSink_StreamingGrpc: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true if all { switch v := interface{}(m.GetStreamingGrpc()).(type) { @@ -1283,6 +1428,17 @@ func (m *OutputSink) validate(all bool) error { } case *OutputSink_BufferedAdmin: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true if all { switch v := interface{}(m.GetBufferedAdmin()).(type) { @@ -1314,6 +1470,9 @@ func (m *OutputSink) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofOutputSinkTypePresent { err := OutputSinkValidationError{ field: "OutputSinkType", reason: "value is required", @@ -1322,7 +1481,6 @@ func (m *OutputSink) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -2071,9 +2229,20 @@ func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { var errors []error - switch m.Rule.(type) { - + oneofRulePresent := false + switch v := m.Rule.(type) { case *HttpGenericBodyMatch_GenericTextMatch_StringMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if utf8.RuneCountInString(m.GetStringMatch()) < 1 { err := HttpGenericBodyMatch_GenericTextMatchValidationError{ @@ -2087,6 +2256,17 @@ func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { } case *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if len(m.GetBinaryMatch()) < 1 { err := HttpGenericBodyMatch_GenericTextMatchValidationError{ @@ -2100,6 +2280,9 @@ func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofRulePresent { err := HttpGenericBodyMatch_GenericTextMatchValidationError{ field: "Rule", reason: "value is required", @@ -2108,7 +2291,6 @@ func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go index 528eddbf44..0501e193fc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/datadog.proto package tracev3 @@ -33,6 +33,9 @@ type DatadogConfig struct { CollectorCluster string `protobuf:"bytes,1,opt,name=collector_cluster,json=collectorCluster,proto3" json:"collector_cluster,omitempty"` // The name used for the service when traces are generated by envoy. ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors + // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. + CollectorHostname string `protobuf:"bytes,3,opt,name=collector_hostname,json=collectorHostname,proto3" json:"collector_hostname,omitempty"` } func (x *DatadogConfig) Reset() { @@ -81,6 +84,13 @@ func (x *DatadogConfig) GetServiceName() string { return "" } +func (x *DatadogConfig) GetCollectorHostname() string { + if x != nil { + return x.CollectorHostname + } + return "" +} + var File_envoy_config_trace_v3_datadog_proto protoreflect.FileDescriptor var file_envoy_config_trace_v3_datadog_proto_rawDesc = []byte{ @@ -95,29 +105,32 @@ var file_envoy_config_trace_v3_datadog_proto_rawDesc = []byte{ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcc, 0x01, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, - 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb3, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, - 0x0c, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x76, 0x33, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2a, 0x12, 0x28, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x76, 0x34, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, + 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb3, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, + 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x76, 0x33, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2a, 0x12, 0x28, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x76, 0x34, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go index 0b54b35674..9aacd2f990 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go @@ -79,6 +79,8 @@ func (m *DatadogConfig) validate(all bool) error { errors = append(errors, err) } + // no validation rules for CollectorHostname + if len(errors) > 0 { return DatadogConfigMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go index 5453e0dcd0..0b50e85655 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/dynamic_ot.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go index 7b8a69f182..f003194918 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/http_tracer.proto package tracev3 @@ -9,7 +9,7 @@ package tracev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -149,7 +149,7 @@ func (m *Tracing_Http) GetConfigType() isTracing_Http_ConfigType { return nil } -func (x *Tracing_Http) GetTypedConfig() *any.Any { +func (x *Tracing_Http) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*Tracing_Http_TypedConfig); ok { return x.TypedConfig } @@ -161,7 +161,7 @@ type isTracing_Http_ConfigType interface { } type Tracing_Http_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*Tracing_Http_TypedConfig) isTracing_Http_ConfigType() {} @@ -225,7 +225,7 @@ var file_envoy_config_trace_v3_http_tracer_proto_msgTypes = make([]protoimpl.Mes var file_envoy_config_trace_v3_http_tracer_proto_goTypes = []interface{}{ (*Tracing)(nil), // 0: envoy.config.trace.v3.Tracing (*Tracing_Http)(nil), // 1: envoy.config.trace.v3.Tracing.Http - (*any.Any)(nil), // 2: google.protobuf.Any + (*any1.Any)(nil), // 2: google.protobuf.Any } var file_envoy_config_trace_v3_http_tracer_proto_depIdxs = []int32{ 1, // 0: envoy.config.trace.v3.Tracing.http:type_name -> envoy.config.trace.v3.Tracing.Http diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go index 01ab45a2be..64a37da88a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go @@ -195,9 +195,18 @@ func (m *Tracing_Http) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *Tracing_Http_TypedConfig: + if v == nil { + err := Tracing_HttpValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -228,6 +237,8 @@ func (m *Tracing_Http) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go index 1197fe7556..79a83f7813 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/lightstep.proto package tracev3 @@ -83,6 +83,7 @@ func (LightstepConfig_PropagationMode) EnumDescriptor() ([]byte, []int) { // Configuration for the LightStep tracer. // [#extension: envoy.tracers.lightstep] +// [#not-implemented-hide:] type LightstepConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go index 3ee9ed972c..c32240fa11 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/opencensus.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go index 65bfcfa496..9ee7e93672 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/opentelemetry.proto package tracev3 @@ -9,7 +9,6 @@ package tracev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -32,7 +31,11 @@ type OpenTelemetryConfig struct { // The upstream gRPC cluster that will receive OTLP traces. // Note that the tracer drops traces if the server does not read data fast enough. + // This field can be left empty to disable reporting traces to the collector. GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` + // The name for the service. This will be populated in the ResourceSpan Resource attributes. + // If it is not provided, it will default to "unknown_service:envoy". + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` } func (x *OpenTelemetryConfig) Reset() { @@ -74,6 +77,13 @@ func (x *OpenTelemetryConfig) GetGrpcService() *v3.GrpcService { return nil } +func (x *OpenTelemetryConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + var File_envoy_config_trace_v3_opentelemetry_proto protoreflect.FileDescriptor var file_envoy_config_trace_v3_opentelemetry_proto_rawDesc = []byte{ @@ -85,15 +95,15 @@ var file_envoy_config_trace_v3_opentelemetry_proto_rawDesc = []byte{ 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x65, 0x0a, 0x13, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x72, - 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x67, - 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x89, 0x01, 0x0a, 0x23, 0x69, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7e, 0x0a, 0x13, 0x4f, 0x70, + 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x44, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x89, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x4f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go index 17a7b1f6b1..283cacfbd6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go @@ -57,17 +57,6 @@ func (m *OpenTelemetryConfig) validate(all bool) error { var errors []error - if m.GetGrpcService() == nil { - err := OpenTelemetryConfigValidationError{ - field: "GrpcService", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - if all { switch v := interface{}(m.GetGrpcService()).(type) { case interface{ ValidateAll() error }: @@ -97,6 +86,8 @@ func (m *OpenTelemetryConfig) validate(all bool) error { } } + // no validation rules for ServiceName + if len(errors) > 0 { return OpenTelemetryConfigMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go index 2ef874e28b..e9460cf1ea 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/service.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go index 06d132259d..2c30cc78a6 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/skywalking.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go index de795fce6d..34d38cf15e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go @@ -259,11 +259,21 @@ func (m *ClientConfig) validate(all bool) error { } } - switch m.BackendTokenSpecifier.(type) { - + switch v := m.BackendTokenSpecifier.(type) { case *ClientConfig_BackendToken: + if v == nil { + err := ClientConfigValidationError{ + field: "BackendTokenSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for BackendToken - + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go index 0552c45664..aa069f64cd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/trace.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go index 223e7f6f5a..700016f6da 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/xray.proto package tracev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go index 542cfae325..44c37dcabc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/config/trace/v3/zipkin.proto package tracev3 @@ -91,7 +91,7 @@ func (ZipkinConfig_CollectorEndpointVersion) EnumDescriptor() ([]byte, []int) { // Configuration for the Zipkin tracer. // [#extension: envoy.tracers.zipkin] -// [#next-free-field: 7] +// [#next-free-field: 8] type ZipkinConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -113,6 +113,20 @@ type ZipkinConfig struct { // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. CollectorHostname string `protobuf:"bytes,6,opt,name=collector_hostname,json=collectorHostname,proto3" json:"collector_hostname,omitempty"` + // If this is set to true, then Envoy will be treated as an independent hop in trace chain. A complete span pair will be created for a single + // request. Server span will be created for the downstream request and client span will be created for the related upstream request. + // This should be set to true in the following cases: + // + // * The Envoy Proxy is used as gateway or ingress. + // * The Envoy Proxy is used as sidecar but inbound traffic capturing or outbound traffic capturing is disabled. + // * Any case that the `start_child_span of router ` is set to true. + // + // .. attention:: + // + // If this is set to true, then the + // :ref:`start_child_span of router ` + // SHOULD be set to true also to ensure the correctness of trace chain. + SplitSpansForRequest bool `protobuf:"varint,7,opt,name=split_spans_for_request,json=splitSpansForRequest,proto3" json:"split_spans_for_request,omitempty"` } func (x *ZipkinConfig) Reset() { @@ -189,6 +203,13 @@ func (x *ZipkinConfig) GetCollectorHostname() string { return "" } +func (x *ZipkinConfig) GetSplitSpansForRequest() bool { + if x != nil { + return x.SplitSpansForRequest + } + return false +} + var File_envoy_config_trace_v3_zipkin_proto protoreflect.FileDescriptor var file_envoy_config_trace_v3_zipkin_proto_rawDesc = []byte{ @@ -207,7 +228,7 @@ var file_envoy_config_trace_v3_zipkin_proto_rawDesc = []byte{ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x04, 0x0a, 0x0c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf7, 0x04, 0x0a, 0x0c, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, @@ -233,29 +254,33 @@ var file_envoy_config_trace_v3_zipkin_proto_rawDesc = []byte{ 0x69, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x78, 0x0a, 0x18, 0x43, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x25, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, - 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, - 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, - 0x1a, 0x08, 0x08, 0x01, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x54, - 0x54, 0x50, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x48, 0x54, 0x54, - 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x52, 0x50, - 0x43, 0x10, 0x03, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb1, - 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0xf2, 0x98, 0xfe, 0x8f, 0x05, - 0x29, 0x12, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x7a, 0x69, 0x70, 0x6b, - 0x69, 0x6e, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, - 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x70, 0x6c, + 0x69, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x70, 0x6c, 0x69, + 0x74, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x22, 0x78, 0x0a, 0x18, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x25, + 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55, + 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, + 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x1a, 0x08, 0x08, 0x01, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, + 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x01, + 0x12, 0x0e, 0x0a, 0x0a, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x10, 0x02, + 0x12, 0x08, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x10, 0x03, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, + 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb1, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x5a, + 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x76, 0x33, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x29, 0x12, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x72, 0x73, 0x2e, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go index dac52cea8c..52834f4ed1 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go @@ -114,6 +114,8 @@ func (m *ZipkinConfig) validate(all bool) error { // no validation rules for CollectorHostname + // no validation rules for SplitSpansForRequest + if len(errors) > 0 { return ZipkinConfigMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go new file mode 100644 index 0000000000..77e75ea19a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go @@ -0,0 +1,2562 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + any1 "github.com/golang/protobuf/ptypes/any" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AccessLogType int32 + +const ( + AccessLogType_NotSet AccessLogType = 0 + AccessLogType_TcpUpstreamConnected AccessLogType = 1 + AccessLogType_TcpPeriodic AccessLogType = 2 + AccessLogType_TcpConnectionEnd AccessLogType = 3 + AccessLogType_DownstreamStart AccessLogType = 4 + AccessLogType_DownstreamPeriodic AccessLogType = 5 + AccessLogType_DownstreamEnd AccessLogType = 6 + AccessLogType_UpstreamPoolReady AccessLogType = 7 + AccessLogType_UpstreamPeriodic AccessLogType = 8 + AccessLogType_UpstreamEnd AccessLogType = 9 + AccessLogType_DownstreamTunnelSuccessfullyEstablished AccessLogType = 10 +) + +// Enum value maps for AccessLogType. +var ( + AccessLogType_name = map[int32]string{ + 0: "NotSet", + 1: "TcpUpstreamConnected", + 2: "TcpPeriodic", + 3: "TcpConnectionEnd", + 4: "DownstreamStart", + 5: "DownstreamPeriodic", + 6: "DownstreamEnd", + 7: "UpstreamPoolReady", + 8: "UpstreamPeriodic", + 9: "UpstreamEnd", + 10: "DownstreamTunnelSuccessfullyEstablished", + } + AccessLogType_value = map[string]int32{ + "NotSet": 0, + "TcpUpstreamConnected": 1, + "TcpPeriodic": 2, + "TcpConnectionEnd": 3, + "DownstreamStart": 4, + "DownstreamPeriodic": 5, + "DownstreamEnd": 6, + "UpstreamPoolReady": 7, + "UpstreamPeriodic": 8, + "UpstreamEnd": 9, + "DownstreamTunnelSuccessfullyEstablished": 10, + } +) + +func (x AccessLogType) Enum() *AccessLogType { + p := new(AccessLogType) + *p = x + return p +} + +func (x AccessLogType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AccessLogType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[0].Descriptor() +} + +func (AccessLogType) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[0] +} + +func (x AccessLogType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AccessLogType.Descriptor instead. +func (AccessLogType) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0} +} + +// HTTP version +type HTTPAccessLogEntry_HTTPVersion int32 + +const ( + HTTPAccessLogEntry_PROTOCOL_UNSPECIFIED HTTPAccessLogEntry_HTTPVersion = 0 + HTTPAccessLogEntry_HTTP10 HTTPAccessLogEntry_HTTPVersion = 1 + HTTPAccessLogEntry_HTTP11 HTTPAccessLogEntry_HTTPVersion = 2 + HTTPAccessLogEntry_HTTP2 HTTPAccessLogEntry_HTTPVersion = 3 + HTTPAccessLogEntry_HTTP3 HTTPAccessLogEntry_HTTPVersion = 4 +) + +// Enum value maps for HTTPAccessLogEntry_HTTPVersion. +var ( + HTTPAccessLogEntry_HTTPVersion_name = map[int32]string{ + 0: "PROTOCOL_UNSPECIFIED", + 1: "HTTP10", + 2: "HTTP11", + 3: "HTTP2", + 4: "HTTP3", + } + HTTPAccessLogEntry_HTTPVersion_value = map[string]int32{ + "PROTOCOL_UNSPECIFIED": 0, + "HTTP10": 1, + "HTTP11": 2, + "HTTP2": 3, + "HTTP3": 4, + } +) + +func (x HTTPAccessLogEntry_HTTPVersion) Enum() *HTTPAccessLogEntry_HTTPVersion { + p := new(HTTPAccessLogEntry_HTTPVersion) + *p = x + return p +} + +func (x HTTPAccessLogEntry_HTTPVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HTTPAccessLogEntry_HTTPVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[1].Descriptor() +} + +func (HTTPAccessLogEntry_HTTPVersion) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[1] +} + +func (x HTTPAccessLogEntry_HTTPVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HTTPAccessLogEntry_HTTPVersion.Descriptor instead. +func (HTTPAccessLogEntry_HTTPVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1, 0} +} + +// Reasons why the request was unauthorized +type ResponseFlags_Unauthorized_Reason int32 + +const ( + ResponseFlags_Unauthorized_REASON_UNSPECIFIED ResponseFlags_Unauthorized_Reason = 0 + // The request was denied by the external authorization service. + ResponseFlags_Unauthorized_EXTERNAL_SERVICE ResponseFlags_Unauthorized_Reason = 1 +) + +// Enum value maps for ResponseFlags_Unauthorized_Reason. +var ( + ResponseFlags_Unauthorized_Reason_name = map[int32]string{ + 0: "REASON_UNSPECIFIED", + 1: "EXTERNAL_SERVICE", + } + ResponseFlags_Unauthorized_Reason_value = map[string]int32{ + "REASON_UNSPECIFIED": 0, + "EXTERNAL_SERVICE": 1, + } +) + +func (x ResponseFlags_Unauthorized_Reason) Enum() *ResponseFlags_Unauthorized_Reason { + p := new(ResponseFlags_Unauthorized_Reason) + *p = x + return p +} + +func (x ResponseFlags_Unauthorized_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResponseFlags_Unauthorized_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[2].Descriptor() +} + +func (ResponseFlags_Unauthorized_Reason) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[2] +} + +func (x ResponseFlags_Unauthorized_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResponseFlags_Unauthorized_Reason.Descriptor instead. +func (ResponseFlags_Unauthorized_Reason) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4, 0, 0} +} + +type TLSProperties_TLSVersion int32 + +const ( + TLSProperties_VERSION_UNSPECIFIED TLSProperties_TLSVersion = 0 + TLSProperties_TLSv1 TLSProperties_TLSVersion = 1 + TLSProperties_TLSv1_1 TLSProperties_TLSVersion = 2 + TLSProperties_TLSv1_2 TLSProperties_TLSVersion = 3 + TLSProperties_TLSv1_3 TLSProperties_TLSVersion = 4 +) + +// Enum value maps for TLSProperties_TLSVersion. +var ( + TLSProperties_TLSVersion_name = map[int32]string{ + 0: "VERSION_UNSPECIFIED", + 1: "TLSv1", + 2: "TLSv1_1", + 3: "TLSv1_2", + 4: "TLSv1_3", + } + TLSProperties_TLSVersion_value = map[string]int32{ + "VERSION_UNSPECIFIED": 0, + "TLSv1": 1, + "TLSv1_1": 2, + "TLSv1_2": 3, + "TLSv1_3": 4, + } +) + +func (x TLSProperties_TLSVersion) Enum() *TLSProperties_TLSVersion { + p := new(TLSProperties_TLSVersion) + *p = x + return p +} + +func (x TLSProperties_TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSProperties_TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[3].Descriptor() +} + +func (TLSProperties_TLSVersion) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[3] +} + +func (x TLSProperties_TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSProperties_TLSVersion.Descriptor instead. +func (TLSProperties_TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0} +} + +type TCPAccessLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common properties shared by all Envoy access logs. + CommonProperties *AccessLogCommon `protobuf:"bytes,1,opt,name=common_properties,json=commonProperties,proto3" json:"common_properties,omitempty"` + // Properties of the TCP connection. + ConnectionProperties *ConnectionProperties `protobuf:"bytes,2,opt,name=connection_properties,json=connectionProperties,proto3" json:"connection_properties,omitempty"` +} + +func (x *TCPAccessLogEntry) Reset() { + *x = TCPAccessLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TCPAccessLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TCPAccessLogEntry) ProtoMessage() {} + +func (x *TCPAccessLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TCPAccessLogEntry.ProtoReflect.Descriptor instead. +func (*TCPAccessLogEntry) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0} +} + +func (x *TCPAccessLogEntry) GetCommonProperties() *AccessLogCommon { + if x != nil { + return x.CommonProperties + } + return nil +} + +func (x *TCPAccessLogEntry) GetConnectionProperties() *ConnectionProperties { + if x != nil { + return x.ConnectionProperties + } + return nil +} + +type HTTPAccessLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common properties shared by all Envoy access logs. + CommonProperties *AccessLogCommon `protobuf:"bytes,1,opt,name=common_properties,json=commonProperties,proto3" json:"common_properties,omitempty"` + ProtocolVersion HTTPAccessLogEntry_HTTPVersion `protobuf:"varint,2,opt,name=protocol_version,json=protocolVersion,proto3,enum=envoy.data.accesslog.v3.HTTPAccessLogEntry_HTTPVersion" json:"protocol_version,omitempty"` + // Description of the incoming HTTP request. + Request *HTTPRequestProperties `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` + // Description of the outgoing HTTP response. + Response *HTTPResponseProperties `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"` +} + +func (x *HTTPAccessLogEntry) Reset() { + *x = HTTPAccessLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPAccessLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPAccessLogEntry) ProtoMessage() {} + +func (x *HTTPAccessLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPAccessLogEntry.ProtoReflect.Descriptor instead. +func (*HTTPAccessLogEntry) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1} +} + +func (x *HTTPAccessLogEntry) GetCommonProperties() *AccessLogCommon { + if x != nil { + return x.CommonProperties + } + return nil +} + +func (x *HTTPAccessLogEntry) GetProtocolVersion() HTTPAccessLogEntry_HTTPVersion { + if x != nil { + return x.ProtocolVersion + } + return HTTPAccessLogEntry_PROTOCOL_UNSPECIFIED +} + +func (x *HTTPAccessLogEntry) GetRequest() *HTTPRequestProperties { + if x != nil { + return x.Request + } + return nil +} + +func (x *HTTPAccessLogEntry) GetResponse() *HTTPResponseProperties { + if x != nil { + return x.Response + } + return nil +} + +// Defines fields for a connection +type ConnectionProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of bytes received from downstream. + ReceivedBytes uint64 `protobuf:"varint,1,opt,name=received_bytes,json=receivedBytes,proto3" json:"received_bytes,omitempty"` + // Number of bytes sent to downstream. + SentBytes uint64 `protobuf:"varint,2,opt,name=sent_bytes,json=sentBytes,proto3" json:"sent_bytes,omitempty"` +} + +func (x *ConnectionProperties) Reset() { + *x = ConnectionProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectionProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectionProperties) ProtoMessage() {} + +func (x *ConnectionProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectionProperties.ProtoReflect.Descriptor instead. +func (*ConnectionProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2} +} + +func (x *ConnectionProperties) GetReceivedBytes() uint64 { + if x != nil { + return x.ReceivedBytes + } + return 0 +} + +func (x *ConnectionProperties) GetSentBytes() uint64 { + if x != nil { + return x.SentBytes + } + return 0 +} + +// Defines fields that are shared by all Envoy access logs. +// [#next-free-field: 34] +type AccessLogCommon struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#not-implemented-hide:] + // This field indicates the rate at which this log entry was sampled. + // Valid range is (0.0, 1.0]. + SampleRate float64 `protobuf:"fixed64,1,opt,name=sample_rate,json=sampleRate,proto3" json:"sample_rate,omitempty"` + // This field is the remote/origin address on which the request from the user was received. + // Note: This may not be the physical peer. E.g, if the remote address is inferred from for + // example the x-forwarder-for header, proxy protocol, etc. + DownstreamRemoteAddress *v3.Address `protobuf:"bytes,2,opt,name=downstream_remote_address,json=downstreamRemoteAddress,proto3" json:"downstream_remote_address,omitempty"` + // This field is the local/destination address on which the request from the user was received. + DownstreamLocalAddress *v3.Address `protobuf:"bytes,3,opt,name=downstream_local_address,json=downstreamLocalAddress,proto3" json:"downstream_local_address,omitempty"` + // If the connection is secure,S this field will contain TLS properties. + TlsProperties *TLSProperties `protobuf:"bytes,4,opt,name=tls_properties,json=tlsProperties,proto3" json:"tls_properties,omitempty"` + // The time that Envoy started servicing this request. This is effectively the time that the first + // downstream byte is received. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // Interval between the first downstream byte received and the last + // downstream byte received (i.e. time it takes to receive a request). + TimeToLastRxByte *duration.Duration `protobuf:"bytes,6,opt,name=time_to_last_rx_byte,json=timeToLastRxByte,proto3" json:"time_to_last_rx_byte,omitempty"` + // Interval between the first downstream byte received and the first upstream byte sent. There may + // by considerable delta between ``time_to_last_rx_byte`` and this value due to filters. + // Additionally, the same caveats apply as documented in ``time_to_last_downstream_tx_byte`` about + // not accounting for kernel socket buffer time, etc. + TimeToFirstUpstreamTxByte *duration.Duration `protobuf:"bytes,7,opt,name=time_to_first_upstream_tx_byte,json=timeToFirstUpstreamTxByte,proto3" json:"time_to_first_upstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the last upstream byte sent. There may + // by considerable delta between ``time_to_last_rx_byte`` and this value due to filters. + // Additionally, the same caveats apply as documented in ``time_to_last_downstream_tx_byte`` about + // not accounting for kernel socket buffer time, etc. + TimeToLastUpstreamTxByte *duration.Duration `protobuf:"bytes,8,opt,name=time_to_last_upstream_tx_byte,json=timeToLastUpstreamTxByte,proto3" json:"time_to_last_upstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the first upstream + // byte received (i.e. time it takes to start receiving a response). + TimeToFirstUpstreamRxByte *duration.Duration `protobuf:"bytes,9,opt,name=time_to_first_upstream_rx_byte,json=timeToFirstUpstreamRxByte,proto3" json:"time_to_first_upstream_rx_byte,omitempty"` + // Interval between the first downstream byte received and the last upstream + // byte received (i.e. time it takes to receive a complete response). + TimeToLastUpstreamRxByte *duration.Duration `protobuf:"bytes,10,opt,name=time_to_last_upstream_rx_byte,json=timeToLastUpstreamRxByte,proto3" json:"time_to_last_upstream_rx_byte,omitempty"` + // Interval between the first downstream byte received and the first downstream byte sent. + // There may be a considerable delta between the ``time_to_first_upstream_rx_byte`` and this field + // due to filters. Additionally, the same caveats apply as documented in + // ``time_to_last_downstream_tx_byte`` about not accounting for kernel socket buffer time, etc. + TimeToFirstDownstreamTxByte *duration.Duration `protobuf:"bytes,11,opt,name=time_to_first_downstream_tx_byte,json=timeToFirstDownstreamTxByte,proto3" json:"time_to_first_downstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the last downstream byte sent. + // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta + // between ``time_to_last_upstream_rx_byte`` and this field. Note also that this is an approximate + // time. In the current implementation it does not include kernel socket buffer time. In the + // current implementation it also does not include send window buffering inside the HTTP/2 codec. + // In the future it is likely that work will be done to make this duration more accurate. + TimeToLastDownstreamTxByte *duration.Duration `protobuf:"bytes,12,opt,name=time_to_last_downstream_tx_byte,json=timeToLastDownstreamTxByte,proto3" json:"time_to_last_downstream_tx_byte,omitempty"` + // The upstream remote/destination address that handles this exchange. This does not include + // retries. + UpstreamRemoteAddress *v3.Address `protobuf:"bytes,13,opt,name=upstream_remote_address,json=upstreamRemoteAddress,proto3" json:"upstream_remote_address,omitempty"` + // The upstream local/origin address that handles this exchange. This does not include retries. + UpstreamLocalAddress *v3.Address `protobuf:"bytes,14,opt,name=upstream_local_address,json=upstreamLocalAddress,proto3" json:"upstream_local_address,omitempty"` + // The upstream cluster that ``upstream_remote_address`` belongs to. + UpstreamCluster string `protobuf:"bytes,15,opt,name=upstream_cluster,json=upstreamCluster,proto3" json:"upstream_cluster,omitempty"` + // Flags indicating occurrences during request/response processing. + ResponseFlags *ResponseFlags `protobuf:"bytes,16,opt,name=response_flags,json=responseFlags,proto3" json:"response_flags,omitempty"` + // All metadata encountered during request processing, including endpoint + // selection. + // + // This can be used to associate IDs attached to the various configurations + // used to process this request with the access log entry. For example, a + // route created from a higher level forwarding rule with some ID can place + // that ID in this field and cross reference later. It can also be used to + // determine if a canary endpoint was used or not. + Metadata *v3.Metadata `protobuf:"bytes,17,opt,name=metadata,proto3" json:"metadata,omitempty"` + // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the + // failure reason from the transport socket. The format of this field depends on the configured + // upstream transport socket. Common TLS failures are in + // :ref:`TLS trouble shooting `. + UpstreamTransportFailureReason string `protobuf:"bytes,18,opt,name=upstream_transport_failure_reason,json=upstreamTransportFailureReason,proto3" json:"upstream_transport_failure_reason,omitempty"` + // The name of the route + RouteName string `protobuf:"bytes,19,opt,name=route_name,json=routeName,proto3" json:"route_name,omitempty"` + // This field is the downstream direct remote address on which the request from the user was + // received. Note: This is always the physical peer, even if the remote address is inferred from + // for example the x-forwarder-for header, proxy protocol, etc. + DownstreamDirectRemoteAddress *v3.Address `protobuf:"bytes,20,opt,name=downstream_direct_remote_address,json=downstreamDirectRemoteAddress,proto3" json:"downstream_direct_remote_address,omitempty"` + // Map of filter state in stream info that have been configured to be logged. If the filter + // state serialized to any message other than ``google.protobuf.Any`` it will be packed into + // ``google.protobuf.Any``. + FilterStateObjects map[string]*any1.Any `protobuf:"bytes,21,rep,name=filter_state_objects,json=filterStateObjects,proto3" json:"filter_state_objects,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of custom tags, which annotate logs with additional information. + // To configure this value, users should configure + // :ref:`custom_tags `. + CustomTags map[string]string `protobuf:"bytes,22,rep,name=custom_tags,json=customTags,proto3" json:"custom_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // For HTTP: Total duration in milliseconds of the request from the start time to the last byte out. + // For TCP: Total duration in milliseconds of the downstream connection. + // This is the total duration of the request (i.e., when the request's ActiveStream is destroyed) + // and may be longer than ``time_to_last_downstream_tx_byte``. + Duration *duration.Duration `protobuf:"bytes,23,opt,name=duration,proto3" json:"duration,omitempty"` + // For HTTP: Number of times the request is attempted upstream. Note that the field is omitted when the request was never attempted upstream. + // For TCP: Number of times the connection request is attempted upstream. Note that the field is omitted when the connect request was never attempted upstream. + UpstreamRequestAttemptCount uint32 `protobuf:"varint,24,opt,name=upstream_request_attempt_count,json=upstreamRequestAttemptCount,proto3" json:"upstream_request_attempt_count,omitempty"` + // Connection termination details may provide additional information about why the connection was terminated by Envoy for L4 reasons. + ConnectionTerminationDetails string `protobuf:"bytes,25,opt,name=connection_termination_details,json=connectionTerminationDetails,proto3" json:"connection_termination_details,omitempty"` + // Optional unique id of stream (TCP connection, long-live HTTP2 stream, HTTP request) for logging and tracing. + // This could be any format string that could be used to identify one stream. + StreamId string `protobuf:"bytes,26,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` + // If this log entry is final log entry that flushed after the stream completed or + // intermediate log entry that flushed periodically during the stream. + // There may be multiple intermediate log entries and only one final log entry for each + // long-live stream (TCP connection, long-live HTTP2 stream). + // And if it is necessary, unique ID or identifier can be added to the log entry + // :ref:`stream_id ` to + // correlate all these intermediate log entries and final log entry. + // + // .. attention:: + // + // This field is deprecated in favor of ``access_log_type`` for better indication of the + // type of the access log record. + // + // Deprecated: Do not use. + IntermediateLogEntry bool `protobuf:"varint,27,opt,name=intermediate_log_entry,json=intermediateLogEntry,proto3" json:"intermediate_log_entry,omitempty"` + // If downstream connection in listener failed due to transport socket (e.g. TLS handshake), provides the + // failure reason from the transport socket. The format of this field depends on the configured downstream + // transport socket. Common TLS failures are in :ref:`TLS trouble shooting `. + DownstreamTransportFailureReason string `protobuf:"bytes,28,opt,name=downstream_transport_failure_reason,json=downstreamTransportFailureReason,proto3" json:"downstream_transport_failure_reason,omitempty"` + // For HTTP: Total number of bytes sent to the downstream by the http stream. + // For TCP: Total number of bytes sent to the downstream by the tcp proxy. + DownstreamWireBytesSent uint64 `protobuf:"varint,29,opt,name=downstream_wire_bytes_sent,json=downstreamWireBytesSent,proto3" json:"downstream_wire_bytes_sent,omitempty"` + // For HTTP: Total number of bytes received from the downstream by the http stream. Envoy over counts sizes of received HTTP/1.1 pipelined requests by adding up bytes of requests in the pipeline to the one currently being processed. + // For TCP: Total number of bytes received from the downstream by the tcp proxy. + DownstreamWireBytesReceived uint64 `protobuf:"varint,30,opt,name=downstream_wire_bytes_received,json=downstreamWireBytesReceived,proto3" json:"downstream_wire_bytes_received,omitempty"` + // For HTTP: Total number of bytes sent to the upstream by the http stream. This value accumulates during upstream retries. + // For TCP: Total number of bytes sent to the upstream by the tcp proxy. + UpstreamWireBytesSent uint64 `protobuf:"varint,31,opt,name=upstream_wire_bytes_sent,json=upstreamWireBytesSent,proto3" json:"upstream_wire_bytes_sent,omitempty"` + // For HTTP: Total number of bytes received from the upstream by the http stream. + // For TCP: Total number of bytes sent to the upstream by the tcp proxy. + UpstreamWireBytesReceived uint64 `protobuf:"varint,32,opt,name=upstream_wire_bytes_received,json=upstreamWireBytesReceived,proto3" json:"upstream_wire_bytes_received,omitempty"` + // The type of the access log, which indicates when the log was recorded. + // See :ref:`ACCESS_LOG_TYPE ` for the available values. + // In case the access log was recorded by a flow which does not correspond to one of the supported + // values, then the default value will be ``NotSet``. + // For more information about how access log behaves and when it is being recorded, + // please refer to :ref:`access logging `. + AccessLogType AccessLogType `protobuf:"varint,33,opt,name=access_log_type,json=accessLogType,proto3,enum=envoy.data.accesslog.v3.AccessLogType" json:"access_log_type,omitempty"` +} + +func (x *AccessLogCommon) Reset() { + *x = AccessLogCommon{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessLogCommon) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessLogCommon) ProtoMessage() {} + +func (x *AccessLogCommon) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessLogCommon.ProtoReflect.Descriptor instead. +func (*AccessLogCommon) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{3} +} + +func (x *AccessLogCommon) GetSampleRate() float64 { + if x != nil { + return x.SampleRate + } + return 0 +} + +func (x *AccessLogCommon) GetDownstreamRemoteAddress() *v3.Address { + if x != nil { + return x.DownstreamRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetDownstreamLocalAddress() *v3.Address { + if x != nil { + return x.DownstreamLocalAddress + } + return nil +} + +func (x *AccessLogCommon) GetTlsProperties() *TLSProperties { + if x != nil { + return x.TlsProperties + } + return nil +} + +func (x *AccessLogCommon) GetStartTime() *timestamp.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastRxByte() *duration.Duration { + if x != nil { + return x.TimeToLastRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstUpstreamTxByte() *duration.Duration { + if x != nil { + return x.TimeToFirstUpstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastUpstreamTxByte() *duration.Duration { + if x != nil { + return x.TimeToLastUpstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstUpstreamRxByte() *duration.Duration { + if x != nil { + return x.TimeToFirstUpstreamRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastUpstreamRxByte() *duration.Duration { + if x != nil { + return x.TimeToLastUpstreamRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstDownstreamTxByte() *duration.Duration { + if x != nil { + return x.TimeToFirstDownstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastDownstreamTxByte() *duration.Duration { + if x != nil { + return x.TimeToLastDownstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamRemoteAddress() *v3.Address { + if x != nil { + return x.UpstreamRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamLocalAddress() *v3.Address { + if x != nil { + return x.UpstreamLocalAddress + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamCluster() string { + if x != nil { + return x.UpstreamCluster + } + return "" +} + +func (x *AccessLogCommon) GetResponseFlags() *ResponseFlags { + if x != nil { + return x.ResponseFlags + } + return nil +} + +func (x *AccessLogCommon) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamTransportFailureReason() string { + if x != nil { + return x.UpstreamTransportFailureReason + } + return "" +} + +func (x *AccessLogCommon) GetRouteName() string { + if x != nil { + return x.RouteName + } + return "" +} + +func (x *AccessLogCommon) GetDownstreamDirectRemoteAddress() *v3.Address { + if x != nil { + return x.DownstreamDirectRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetFilterStateObjects() map[string]*any1.Any { + if x != nil { + return x.FilterStateObjects + } + return nil +} + +func (x *AccessLogCommon) GetCustomTags() map[string]string { + if x != nil { + return x.CustomTags + } + return nil +} + +func (x *AccessLogCommon) GetDuration() *duration.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamRequestAttemptCount() uint32 { + if x != nil { + return x.UpstreamRequestAttemptCount + } + return 0 +} + +func (x *AccessLogCommon) GetConnectionTerminationDetails() string { + if x != nil { + return x.ConnectionTerminationDetails + } + return "" +} + +func (x *AccessLogCommon) GetStreamId() string { + if x != nil { + return x.StreamId + } + return "" +} + +// Deprecated: Do not use. +func (x *AccessLogCommon) GetIntermediateLogEntry() bool { + if x != nil { + return x.IntermediateLogEntry + } + return false +} + +func (x *AccessLogCommon) GetDownstreamTransportFailureReason() string { + if x != nil { + return x.DownstreamTransportFailureReason + } + return "" +} + +func (x *AccessLogCommon) GetDownstreamWireBytesSent() uint64 { + if x != nil { + return x.DownstreamWireBytesSent + } + return 0 +} + +func (x *AccessLogCommon) GetDownstreamWireBytesReceived() uint64 { + if x != nil { + return x.DownstreamWireBytesReceived + } + return 0 +} + +func (x *AccessLogCommon) GetUpstreamWireBytesSent() uint64 { + if x != nil { + return x.UpstreamWireBytesSent + } + return 0 +} + +func (x *AccessLogCommon) GetUpstreamWireBytesReceived() uint64 { + if x != nil { + return x.UpstreamWireBytesReceived + } + return 0 +} + +func (x *AccessLogCommon) GetAccessLogType() AccessLogType { + if x != nil { + return x.AccessLogType + } + return AccessLogType_NotSet +} + +// Flags indicating occurrences during request/response processing. +// [#next-free-field: 28] +type ResponseFlags struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates local server healthcheck failed. + FailedLocalHealthcheck bool `protobuf:"varint,1,opt,name=failed_local_healthcheck,json=failedLocalHealthcheck,proto3" json:"failed_local_healthcheck,omitempty"` + // Indicates there was no healthy upstream. + NoHealthyUpstream bool `protobuf:"varint,2,opt,name=no_healthy_upstream,json=noHealthyUpstream,proto3" json:"no_healthy_upstream,omitempty"` + // Indicates an there was an upstream request timeout. + UpstreamRequestTimeout bool `protobuf:"varint,3,opt,name=upstream_request_timeout,json=upstreamRequestTimeout,proto3" json:"upstream_request_timeout,omitempty"` + // Indicates local codec level reset was sent on the stream. + LocalReset bool `protobuf:"varint,4,opt,name=local_reset,json=localReset,proto3" json:"local_reset,omitempty"` + // Indicates remote codec level reset was received on the stream. + UpstreamRemoteReset bool `protobuf:"varint,5,opt,name=upstream_remote_reset,json=upstreamRemoteReset,proto3" json:"upstream_remote_reset,omitempty"` + // Indicates there was a local reset by a connection pool due to an initial connection failure. + UpstreamConnectionFailure bool `protobuf:"varint,6,opt,name=upstream_connection_failure,json=upstreamConnectionFailure,proto3" json:"upstream_connection_failure,omitempty"` + // Indicates the stream was reset due to an upstream connection termination. + UpstreamConnectionTermination bool `protobuf:"varint,7,opt,name=upstream_connection_termination,json=upstreamConnectionTermination,proto3" json:"upstream_connection_termination,omitempty"` + // Indicates the stream was reset because of a resource overflow. + UpstreamOverflow bool `protobuf:"varint,8,opt,name=upstream_overflow,json=upstreamOverflow,proto3" json:"upstream_overflow,omitempty"` + // Indicates no route was found for the request. + NoRouteFound bool `protobuf:"varint,9,opt,name=no_route_found,json=noRouteFound,proto3" json:"no_route_found,omitempty"` + // Indicates that the request was delayed before proxying. + DelayInjected bool `protobuf:"varint,10,opt,name=delay_injected,json=delayInjected,proto3" json:"delay_injected,omitempty"` + // Indicates that the request was aborted with an injected error code. + FaultInjected bool `protobuf:"varint,11,opt,name=fault_injected,json=faultInjected,proto3" json:"fault_injected,omitempty"` + // Indicates that the request was rate-limited locally. + RateLimited bool `protobuf:"varint,12,opt,name=rate_limited,json=rateLimited,proto3" json:"rate_limited,omitempty"` + // Indicates if the request was deemed unauthorized and the reason for it. + UnauthorizedDetails *ResponseFlags_Unauthorized `protobuf:"bytes,13,opt,name=unauthorized_details,json=unauthorizedDetails,proto3" json:"unauthorized_details,omitempty"` + // Indicates that the request was rejected because there was an error in rate limit service. + RateLimitServiceError bool `protobuf:"varint,14,opt,name=rate_limit_service_error,json=rateLimitServiceError,proto3" json:"rate_limit_service_error,omitempty"` + // Indicates the stream was reset due to a downstream connection termination. + DownstreamConnectionTermination bool `protobuf:"varint,15,opt,name=downstream_connection_termination,json=downstreamConnectionTermination,proto3" json:"downstream_connection_termination,omitempty"` + // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. + UpstreamRetryLimitExceeded bool `protobuf:"varint,16,opt,name=upstream_retry_limit_exceeded,json=upstreamRetryLimitExceeded,proto3" json:"upstream_retry_limit_exceeded,omitempty"` + // Indicates that the stream idle timeout was hit, resulting in a downstream 408. + StreamIdleTimeout bool `protobuf:"varint,17,opt,name=stream_idle_timeout,json=streamIdleTimeout,proto3" json:"stream_idle_timeout,omitempty"` + // Indicates that the request was rejected because an envoy request header failed strict + // validation. + InvalidEnvoyRequestHeaders bool `protobuf:"varint,18,opt,name=invalid_envoy_request_headers,json=invalidEnvoyRequestHeaders,proto3" json:"invalid_envoy_request_headers,omitempty"` + // Indicates there was an HTTP protocol error on the downstream request. + DownstreamProtocolError bool `protobuf:"varint,19,opt,name=downstream_protocol_error,json=downstreamProtocolError,proto3" json:"downstream_protocol_error,omitempty"` + // Indicates there was a max stream duration reached on the upstream request. + UpstreamMaxStreamDurationReached bool `protobuf:"varint,20,opt,name=upstream_max_stream_duration_reached,json=upstreamMaxStreamDurationReached,proto3" json:"upstream_max_stream_duration_reached,omitempty"` + // Indicates the response was served from a cache filter. + ResponseFromCacheFilter bool `protobuf:"varint,21,opt,name=response_from_cache_filter,json=responseFromCacheFilter,proto3" json:"response_from_cache_filter,omitempty"` + // Indicates that a filter configuration is not available. + NoFilterConfigFound bool `protobuf:"varint,22,opt,name=no_filter_config_found,json=noFilterConfigFound,proto3" json:"no_filter_config_found,omitempty"` + // Indicates that request or connection exceeded the downstream connection duration. + DurationTimeout bool `protobuf:"varint,23,opt,name=duration_timeout,json=durationTimeout,proto3" json:"duration_timeout,omitempty"` + // Indicates there was an HTTP protocol error in the upstream response. + UpstreamProtocolError bool `protobuf:"varint,24,opt,name=upstream_protocol_error,json=upstreamProtocolError,proto3" json:"upstream_protocol_error,omitempty"` + // Indicates no cluster was found for the request. + NoClusterFound bool `protobuf:"varint,25,opt,name=no_cluster_found,json=noClusterFound,proto3" json:"no_cluster_found,omitempty"` + // Indicates overload manager terminated the request. + OverloadManager bool `protobuf:"varint,26,opt,name=overload_manager,json=overloadManager,proto3" json:"overload_manager,omitempty"` + // Indicates a DNS resolution failed. + DnsResolutionFailure bool `protobuf:"varint,27,opt,name=dns_resolution_failure,json=dnsResolutionFailure,proto3" json:"dns_resolution_failure,omitempty"` +} + +func (x *ResponseFlags) Reset() { + *x = ResponseFlags{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseFlags) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseFlags) ProtoMessage() {} + +func (x *ResponseFlags) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseFlags.ProtoReflect.Descriptor instead. +func (*ResponseFlags) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4} +} + +func (x *ResponseFlags) GetFailedLocalHealthcheck() bool { + if x != nil { + return x.FailedLocalHealthcheck + } + return false +} + +func (x *ResponseFlags) GetNoHealthyUpstream() bool { + if x != nil { + return x.NoHealthyUpstream + } + return false +} + +func (x *ResponseFlags) GetUpstreamRequestTimeout() bool { + if x != nil { + return x.UpstreamRequestTimeout + } + return false +} + +func (x *ResponseFlags) GetLocalReset() bool { + if x != nil { + return x.LocalReset + } + return false +} + +func (x *ResponseFlags) GetUpstreamRemoteReset() bool { + if x != nil { + return x.UpstreamRemoteReset + } + return false +} + +func (x *ResponseFlags) GetUpstreamConnectionFailure() bool { + if x != nil { + return x.UpstreamConnectionFailure + } + return false +} + +func (x *ResponseFlags) GetUpstreamConnectionTermination() bool { + if x != nil { + return x.UpstreamConnectionTermination + } + return false +} + +func (x *ResponseFlags) GetUpstreamOverflow() bool { + if x != nil { + return x.UpstreamOverflow + } + return false +} + +func (x *ResponseFlags) GetNoRouteFound() bool { + if x != nil { + return x.NoRouteFound + } + return false +} + +func (x *ResponseFlags) GetDelayInjected() bool { + if x != nil { + return x.DelayInjected + } + return false +} + +func (x *ResponseFlags) GetFaultInjected() bool { + if x != nil { + return x.FaultInjected + } + return false +} + +func (x *ResponseFlags) GetRateLimited() bool { + if x != nil { + return x.RateLimited + } + return false +} + +func (x *ResponseFlags) GetUnauthorizedDetails() *ResponseFlags_Unauthorized { + if x != nil { + return x.UnauthorizedDetails + } + return nil +} + +func (x *ResponseFlags) GetRateLimitServiceError() bool { + if x != nil { + return x.RateLimitServiceError + } + return false +} + +func (x *ResponseFlags) GetDownstreamConnectionTermination() bool { + if x != nil { + return x.DownstreamConnectionTermination + } + return false +} + +func (x *ResponseFlags) GetUpstreamRetryLimitExceeded() bool { + if x != nil { + return x.UpstreamRetryLimitExceeded + } + return false +} + +func (x *ResponseFlags) GetStreamIdleTimeout() bool { + if x != nil { + return x.StreamIdleTimeout + } + return false +} + +func (x *ResponseFlags) GetInvalidEnvoyRequestHeaders() bool { + if x != nil { + return x.InvalidEnvoyRequestHeaders + } + return false +} + +func (x *ResponseFlags) GetDownstreamProtocolError() bool { + if x != nil { + return x.DownstreamProtocolError + } + return false +} + +func (x *ResponseFlags) GetUpstreamMaxStreamDurationReached() bool { + if x != nil { + return x.UpstreamMaxStreamDurationReached + } + return false +} + +func (x *ResponseFlags) GetResponseFromCacheFilter() bool { + if x != nil { + return x.ResponseFromCacheFilter + } + return false +} + +func (x *ResponseFlags) GetNoFilterConfigFound() bool { + if x != nil { + return x.NoFilterConfigFound + } + return false +} + +func (x *ResponseFlags) GetDurationTimeout() bool { + if x != nil { + return x.DurationTimeout + } + return false +} + +func (x *ResponseFlags) GetUpstreamProtocolError() bool { + if x != nil { + return x.UpstreamProtocolError + } + return false +} + +func (x *ResponseFlags) GetNoClusterFound() bool { + if x != nil { + return x.NoClusterFound + } + return false +} + +func (x *ResponseFlags) GetOverloadManager() bool { + if x != nil { + return x.OverloadManager + } + return false +} + +func (x *ResponseFlags) GetDnsResolutionFailure() bool { + if x != nil { + return x.DnsResolutionFailure + } + return false +} + +// Properties of a negotiated TLS connection. +// [#next-free-field: 8] +type TLSProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of TLS that was negotiated. + TlsVersion TLSProperties_TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=envoy.data.accesslog.v3.TLSProperties_TLSVersion" json:"tls_version,omitempty"` + // TLS cipher suite negotiated during handshake. The value is a + // four-digit hex code defined by the IANA TLS Cipher Suite Registry + // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). + // + // Here it is expressed as an integer. + TlsCipherSuite *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=tls_cipher_suite,json=tlsCipherSuite,proto3" json:"tls_cipher_suite,omitempty"` + // SNI hostname from handshake. + TlsSniHostname string `protobuf:"bytes,3,opt,name=tls_sni_hostname,json=tlsSniHostname,proto3" json:"tls_sni_hostname,omitempty"` + // Properties of the local certificate used to negotiate TLS. + LocalCertificateProperties *TLSProperties_CertificateProperties `protobuf:"bytes,4,opt,name=local_certificate_properties,json=localCertificateProperties,proto3" json:"local_certificate_properties,omitempty"` + // Properties of the peer certificate used to negotiate TLS. + PeerCertificateProperties *TLSProperties_CertificateProperties `protobuf:"bytes,5,opt,name=peer_certificate_properties,json=peerCertificateProperties,proto3" json:"peer_certificate_properties,omitempty"` + // The TLS session ID. + TlsSessionId string `protobuf:"bytes,6,opt,name=tls_session_id,json=tlsSessionId,proto3" json:"tls_session_id,omitempty"` + // The ``JA3`` fingerprint when ``JA3`` fingerprinting is enabled. + Ja3Fingerprint string `protobuf:"bytes,7,opt,name=ja3_fingerprint,json=ja3Fingerprint,proto3" json:"ja3_fingerprint,omitempty"` +} + +func (x *TLSProperties) Reset() { + *x = TLSProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties) ProtoMessage() {} + +func (x *TLSProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties.ProtoReflect.Descriptor instead. +func (*TLSProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5} +} + +func (x *TLSProperties) GetTlsVersion() TLSProperties_TLSVersion { + if x != nil { + return x.TlsVersion + } + return TLSProperties_VERSION_UNSPECIFIED +} + +func (x *TLSProperties) GetTlsCipherSuite() *wrappers.UInt32Value { + if x != nil { + return x.TlsCipherSuite + } + return nil +} + +func (x *TLSProperties) GetTlsSniHostname() string { + if x != nil { + return x.TlsSniHostname + } + return "" +} + +func (x *TLSProperties) GetLocalCertificateProperties() *TLSProperties_CertificateProperties { + if x != nil { + return x.LocalCertificateProperties + } + return nil +} + +func (x *TLSProperties) GetPeerCertificateProperties() *TLSProperties_CertificateProperties { + if x != nil { + return x.PeerCertificateProperties + } + return nil +} + +func (x *TLSProperties) GetTlsSessionId() string { + if x != nil { + return x.TlsSessionId + } + return "" +} + +func (x *TLSProperties) GetJa3Fingerprint() string { + if x != nil { + return x.Ja3Fingerprint + } + return "" +} + +// [#next-free-field: 16] +type HTTPRequestProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The request method (RFC 7231/2616). + RequestMethod v3.RequestMethod `protobuf:"varint,1,opt,name=request_method,json=requestMethod,proto3,enum=envoy.config.core.v3.RequestMethod" json:"request_method,omitempty"` + // The scheme portion of the incoming request URI. + Scheme string `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"` + // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // The port of the incoming request URI + // (unused currently, as port is composed onto authority). + Port *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=port,proto3" json:"port,omitempty"` + // The path portion from the incoming request URI. + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + // Value of the ``User-Agent`` request header. + UserAgent string `protobuf:"bytes,6,opt,name=user_agent,json=userAgent,proto3" json:"user_agent,omitempty"` + // Value of the ``Referer`` request header. + Referer string `protobuf:"bytes,7,opt,name=referer,proto3" json:"referer,omitempty"` + // Value of the ``X-Forwarded-For`` request header. + ForwardedFor string `protobuf:"bytes,8,opt,name=forwarded_for,json=forwardedFor,proto3" json:"forwarded_for,omitempty"` + // Value of the ``X-Request-Id`` request header + // + // This header is used by Envoy to uniquely identify a request. + // It will be generated for all external requests and internal requests that + // do not already have a request ID. + RequestId string `protobuf:"bytes,9,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Value of the ``X-Envoy-Original-Path`` request header. + OriginalPath string `protobuf:"bytes,10,opt,name=original_path,json=originalPath,proto3" json:"original_path,omitempty"` + // Size of the HTTP request headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + RequestHeadersBytes uint64 `protobuf:"varint,11,opt,name=request_headers_bytes,json=requestHeadersBytes,proto3" json:"request_headers_bytes,omitempty"` + // Size of the HTTP request body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + RequestBodyBytes uint64 `protobuf:"varint,12,opt,name=request_body_bytes,json=requestBodyBytes,proto3" json:"request_body_bytes,omitempty"` + // Map of additional headers that have been configured to be logged. + RequestHeaders map[string]string `protobuf:"bytes,13,rep,name=request_headers,json=requestHeaders,proto3" json:"request_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Number of header bytes sent to the upstream by the http stream, including protocol overhead. + // + // This value accumulates during upstream retries. + UpstreamHeaderBytesSent uint64 `protobuf:"varint,14,opt,name=upstream_header_bytes_sent,json=upstreamHeaderBytesSent,proto3" json:"upstream_header_bytes_sent,omitempty"` + // Number of header bytes received from the downstream by the http stream, including protocol overhead. + DownstreamHeaderBytesReceived uint64 `protobuf:"varint,15,opt,name=downstream_header_bytes_received,json=downstreamHeaderBytesReceived,proto3" json:"downstream_header_bytes_received,omitempty"` +} + +func (x *HTTPRequestProperties) Reset() { + *x = HTTPRequestProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPRequestProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPRequestProperties) ProtoMessage() {} + +func (x *HTTPRequestProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPRequestProperties.ProtoReflect.Descriptor instead. +func (*HTTPRequestProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{6} +} + +func (x *HTTPRequestProperties) GetRequestMethod() v3.RequestMethod { + if x != nil { + return x.RequestMethod + } + return v3.RequestMethod(0) +} + +func (x *HTTPRequestProperties) GetScheme() string { + if x != nil { + return x.Scheme + } + return "" +} + +func (x *HTTPRequestProperties) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *HTTPRequestProperties) GetPort() *wrappers.UInt32Value { + if x != nil { + return x.Port + } + return nil +} + +func (x *HTTPRequestProperties) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *HTTPRequestProperties) GetUserAgent() string { + if x != nil { + return x.UserAgent + } + return "" +} + +func (x *HTTPRequestProperties) GetReferer() string { + if x != nil { + return x.Referer + } + return "" +} + +func (x *HTTPRequestProperties) GetForwardedFor() string { + if x != nil { + return x.ForwardedFor + } + return "" +} + +func (x *HTTPRequestProperties) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *HTTPRequestProperties) GetOriginalPath() string { + if x != nil { + return x.OriginalPath + } + return "" +} + +func (x *HTTPRequestProperties) GetRequestHeadersBytes() uint64 { + if x != nil { + return x.RequestHeadersBytes + } + return 0 +} + +func (x *HTTPRequestProperties) GetRequestBodyBytes() uint64 { + if x != nil { + return x.RequestBodyBytes + } + return 0 +} + +func (x *HTTPRequestProperties) GetRequestHeaders() map[string]string { + if x != nil { + return x.RequestHeaders + } + return nil +} + +func (x *HTTPRequestProperties) GetUpstreamHeaderBytesSent() uint64 { + if x != nil { + return x.UpstreamHeaderBytesSent + } + return 0 +} + +func (x *HTTPRequestProperties) GetDownstreamHeaderBytesReceived() uint64 { + if x != nil { + return x.DownstreamHeaderBytesReceived + } + return 0 +} + +// [#next-free-field: 9] +type HTTPResponseProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP response code returned by Envoy. + ResponseCode *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + // Size of the HTTP response headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include protocol overhead or overhead from framing or encoding at other networking layers. + ResponseHeadersBytes uint64 `protobuf:"varint,2,opt,name=response_headers_bytes,json=responseHeadersBytes,proto3" json:"response_headers_bytes,omitempty"` + // Size of the HTTP response body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + ResponseBodyBytes uint64 `protobuf:"varint,3,opt,name=response_body_bytes,json=responseBodyBytes,proto3" json:"response_body_bytes,omitempty"` + // Map of additional headers configured to be logged. + ResponseHeaders map[string]string `protobuf:"bytes,4,rep,name=response_headers,json=responseHeaders,proto3" json:"response_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Map of trailers configured to be logged. + ResponseTrailers map[string]string `protobuf:"bytes,5,rep,name=response_trailers,json=responseTrailers,proto3" json:"response_trailers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The HTTP response code details. + ResponseCodeDetails string `protobuf:"bytes,6,opt,name=response_code_details,json=responseCodeDetails,proto3" json:"response_code_details,omitempty"` + // Number of header bytes received from the upstream by the http stream, including protocol overhead. + UpstreamHeaderBytesReceived uint64 `protobuf:"varint,7,opt,name=upstream_header_bytes_received,json=upstreamHeaderBytesReceived,proto3" json:"upstream_header_bytes_received,omitempty"` + // Number of header bytes sent to the downstream by the http stream, including protocol overhead. + DownstreamHeaderBytesSent uint64 `protobuf:"varint,8,opt,name=downstream_header_bytes_sent,json=downstreamHeaderBytesSent,proto3" json:"downstream_header_bytes_sent,omitempty"` +} + +func (x *HTTPResponseProperties) Reset() { + *x = HTTPResponseProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPResponseProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPResponseProperties) ProtoMessage() {} + +func (x *HTTPResponseProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPResponseProperties.ProtoReflect.Descriptor instead. +func (*HTTPResponseProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{7} +} + +func (x *HTTPResponseProperties) GetResponseCode() *wrappers.UInt32Value { + if x != nil { + return x.ResponseCode + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseHeadersBytes() uint64 { + if x != nil { + return x.ResponseHeadersBytes + } + return 0 +} + +func (x *HTTPResponseProperties) GetResponseBodyBytes() uint64 { + if x != nil { + return x.ResponseBodyBytes + } + return 0 +} + +func (x *HTTPResponseProperties) GetResponseHeaders() map[string]string { + if x != nil { + return x.ResponseHeaders + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseTrailers() map[string]string { + if x != nil { + return x.ResponseTrailers + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseCodeDetails() string { + if x != nil { + return x.ResponseCodeDetails + } + return "" +} + +func (x *HTTPResponseProperties) GetUpstreamHeaderBytesReceived() uint64 { + if x != nil { + return x.UpstreamHeaderBytesReceived + } + return 0 +} + +func (x *HTTPResponseProperties) GetDownstreamHeaderBytesSent() uint64 { + if x != nil { + return x.DownstreamHeaderBytesSent + } + return 0 +} + +type ResponseFlags_Unauthorized struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reason ResponseFlags_Unauthorized_Reason `protobuf:"varint,1,opt,name=reason,proto3,enum=envoy.data.accesslog.v3.ResponseFlags_Unauthorized_Reason" json:"reason,omitempty"` +} + +func (x *ResponseFlags_Unauthorized) Reset() { + *x = ResponseFlags_Unauthorized{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseFlags_Unauthorized) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseFlags_Unauthorized) ProtoMessage() {} + +func (x *ResponseFlags_Unauthorized) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseFlags_Unauthorized.ProtoReflect.Descriptor instead. +func (*ResponseFlags_Unauthorized) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ResponseFlags_Unauthorized) GetReason() ResponseFlags_Unauthorized_Reason { + if x != nil { + return x.Reason + } + return ResponseFlags_Unauthorized_REASON_UNSPECIFIED +} + +type TLSProperties_CertificateProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SANs present in the certificate. + SubjectAltName []*TLSProperties_CertificateProperties_SubjectAltName `protobuf:"bytes,1,rep,name=subject_alt_name,json=subjectAltName,proto3" json:"subject_alt_name,omitempty"` + // The subject field of the certificate. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` +} + +func (x *TLSProperties_CertificateProperties) Reset() { + *x = TLSProperties_CertificateProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties_CertificateProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties_CertificateProperties) ProtoMessage() {} + +func (x *TLSProperties_CertificateProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties_CertificateProperties.ProtoReflect.Descriptor instead. +func (*TLSProperties_CertificateProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *TLSProperties_CertificateProperties) GetSubjectAltName() []*TLSProperties_CertificateProperties_SubjectAltName { + if x != nil { + return x.SubjectAltName + } + return nil +} + +func (x *TLSProperties_CertificateProperties) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +type TLSProperties_CertificateProperties_SubjectAltName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to San: + // *TLSProperties_CertificateProperties_SubjectAltName_Uri + // *TLSProperties_CertificateProperties_SubjectAltName_Dns + San isTLSProperties_CertificateProperties_SubjectAltName_San `protobuf_oneof:"san"` +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) Reset() { + *x = TLSProperties_CertificateProperties_SubjectAltName{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties_CertificateProperties_SubjectAltName) ProtoMessage() {} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties_CertificateProperties_SubjectAltName.ProtoReflect.Descriptor instead. +func (*TLSProperties_CertificateProperties_SubjectAltName) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0, 0} +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) GetSan() isTLSProperties_CertificateProperties_SubjectAltName_San { + if m != nil { + return m.San + } + return nil +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) GetUri() string { + if x, ok := x.GetSan().(*TLSProperties_CertificateProperties_SubjectAltName_Uri); ok { + return x.Uri + } + return "" +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) GetDns() string { + if x, ok := x.GetSan().(*TLSProperties_CertificateProperties_SubjectAltName_Dns); ok { + return x.Dns + } + return "" +} + +type isTLSProperties_CertificateProperties_SubjectAltName_San interface { + isTLSProperties_CertificateProperties_SubjectAltName_San() +} + +type TLSProperties_CertificateProperties_SubjectAltName_Uri struct { + Uri string `protobuf:"bytes,1,opt,name=uri,proto3,oneof"` +} + +type TLSProperties_CertificateProperties_SubjectAltName_Dns struct { + // [#not-implemented-hide:] + Dns string `protobuf:"bytes,2,opt,name=dns,proto3,oneof"` +} + +func (*TLSProperties_CertificateProperties_SubjectAltName_Uri) isTLSProperties_CertificateProperties_SubjectAltName_San() { +} + +func (*TLSProperties_CertificateProperties_SubjectAltName_Dns) isTLSProperties_CertificateProperties_SubjectAltName_San() { +} + +var File_envoy_data_accesslog_v3_accesslog_proto protoreflect.FileDescriptor + +var file_envoy_data_accesslog_v3_accesslog_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x80, 0x02, 0x0a, 0x11, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x55, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x10, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x62, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x14, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0xf0, 0x03, 0x0a, 0x12, 0x48, 0x54, 0x54, 0x50, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x55, 0x0a, + 0x11, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x55, 0x0a, 0x0b, 0x48, 0x54, 0x54, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x14, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, + 0x31, 0x30, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, 0x31, 0x31, 0x10, 0x02, + 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x48, + 0x54, 0x54, 0x50, 0x33, 0x10, 0x04, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x91, 0x01, 0x0a, 0x14, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x6e, + 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, + 0x65, 0x6e, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, + 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x96, 0x15, + 0x0a, 0x0f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x17, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x57, 0x0a, 0x18, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x16, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x4d, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, + 0x0d, 0x74, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x39, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x14, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x52, 0x78, + 0x42, 0x79, 0x74, 0x65, 0x12, 0x5c, 0x0a, 0x1e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, + 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, + 0x69, 0x72, 0x73, 0x74, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, + 0x74, 0x65, 0x12, 0x5a, 0x0a, 0x1d, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5c, + 0x0a, 0x1e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, 0x69, 0x72, 0x73, 0x74, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5a, 0x0a, 0x1d, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, + 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x52, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x60, 0x0a, 0x20, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1b, 0x74, + 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, 0x69, 0x72, 0x73, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5e, 0x0a, 0x1f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, + 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x17, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x53, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x52, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x4d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, + 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x21, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x66, 0x0a, 0x20, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, + 0x1d, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x72, + 0x0a, 0x14, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x59, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x12, 0x35, 0x0a, + 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x1e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, + 0x65, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x44, 0x0a, 0x1e, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x16, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x67, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x18, 0x01, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x14, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x4d, 0x0a, 0x23, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x20, 0x64, 0x6f, + 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3b, + 0x0a, 0x1a, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x1d, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x17, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, + 0x72, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x43, 0x0a, 0x1e, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x1e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x1b, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, + 0x69, 0x72, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x12, 0x37, 0x0a, 0x18, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x72, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x72, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x0f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x5b, 0x0a, 0x17, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe9, 0x0d, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x12, 0x2e, 0x0a, 0x13, 0x6e, 0x6f, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x11, 0x6e, 0x6f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x12, 0x38, 0x0a, 0x18, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x65, 0x74, 0x12, 0x32, 0x0a, + 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, + 0x74, 0x12, 0x3e, 0x0a, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x46, 0x0a, 0x1f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x76, + 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x6e, 0x6f, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x49, 0x6e, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x69, 0x6e, 0x6a, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x12, 0x66, 0x0a, + 0x14, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, + 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, + 0x52, 0x13, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x44, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4a, + 0x0a, 0x21, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x64, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x1d, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x5f, 0x65, 0x78, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x78, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12, 0x2e, 0x0a, + 0x13, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, + 0x1d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x45, 0x6e, 0x76, + 0x6f, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x12, 0x3a, 0x0a, 0x19, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x13, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4e, 0x0a, 0x24, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, + 0x63, 0x68, 0x65, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x1a, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x33, 0x0a, 0x16, 0x6e, 0x6f, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x6f, + 0x75, 0x6e, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6e, 0x6f, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x29, + 0x0a, 0x10, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x6f, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6e, 0x6f, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, + 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x1a, 0xd5, 0x01, 0x0a, + 0x0c, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x52, 0x0a, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x65, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x22, 0x36, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x52, + 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x01, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x22, 0xad, 0x08, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, + 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x73, 0x6e, 0x69, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x53, + 0x6e, 0x69, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x7e, 0x0a, 0x1c, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x1a, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x1b, 0x70, 0x65, + 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x19, 0x70, + 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x74, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x27, + 0x0a, 0x0f, 0x6a, 0x61, 0x33, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6a, 0x61, 0x33, 0x46, 0x69, 0x6e, 0x67, + 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x1a, 0x81, 0x03, 0x0a, 0x15, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x75, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x1a, 0x92, 0x01, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, + 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x3a, 0x51, 0x9a, + 0xc5, 0x88, 0x1e, 0x4c, 0x0a, 0x4a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, + 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x42, 0x05, 0x0a, 0x03, 0x73, 0x61, 0x6e, 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x57, 0x0a, 0x0a, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x56, 0x45, 0x52, + 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, + 0x53, 0x76, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, + 0x5f, 0x33, 0x10, 0x04, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x22, 0xd9, 0x06, 0x0a, 0x15, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x30, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, + 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, + 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, + 0x3b, 0x0a, 0x1a, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x17, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x20, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1d, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x64, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, + 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa0, + 0x06, 0x0a, 0x16, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0d, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x62, + 0x6f, 0x64, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x6f, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x12, 0x72, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, + 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x1e, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x12, 0x3f, 0x0a, 0x1c, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x19, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, + 0x74, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, + 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2a, 0x87, 0x02, 0x0a, 0x0d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x6f, 0x74, 0x53, 0x65, 0x74, 0x10, 0x00, 0x12, + 0x18, 0x0a, 0x14, 0x54, 0x63, 0x70, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x63, 0x70, + 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x63, + 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x10, 0x03, + 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x05, 0x12, 0x11, 0x0a, + 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x10, 0x06, + 0x12, 0x15, 0x0a, 0x11, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x6f, 0x6c, + 0x52, 0x65, 0x61, 0x64, 0x79, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, 0x55, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x08, 0x12, 0x0f, 0x0a, + 0x0b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x10, 0x09, 0x12, 0x2b, + 0x0a, 0x27, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x75, 0x6e, 0x6e, + 0x65, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x45, 0x73, + 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x10, 0x0a, 0x42, 0x8d, 0x01, 0x0a, 0x25, + 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_data_accesslog_v3_accesslog_proto_rawDescOnce sync.Once + file_envoy_data_accesslog_v3_accesslog_proto_rawDescData = file_envoy_data_accesslog_v3_accesslog_proto_rawDesc +) + +func file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP() []byte { + file_envoy_data_accesslog_v3_accesslog_proto_rawDescOnce.Do(func() { + file_envoy_data_accesslog_v3_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_data_accesslog_v3_accesslog_proto_rawDescData) + }) + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescData +} + +var file_envoy_data_accesslog_v3_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_envoy_data_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_envoy_data_accesslog_v3_accesslog_proto_goTypes = []interface{}{ + (AccessLogType)(0), // 0: envoy.data.accesslog.v3.AccessLogType + (HTTPAccessLogEntry_HTTPVersion)(0), // 1: envoy.data.accesslog.v3.HTTPAccessLogEntry.HTTPVersion + (ResponseFlags_Unauthorized_Reason)(0), // 2: envoy.data.accesslog.v3.ResponseFlags.Unauthorized.Reason + (TLSProperties_TLSVersion)(0), // 3: envoy.data.accesslog.v3.TLSProperties.TLSVersion + (*TCPAccessLogEntry)(nil), // 4: envoy.data.accesslog.v3.TCPAccessLogEntry + (*HTTPAccessLogEntry)(nil), // 5: envoy.data.accesslog.v3.HTTPAccessLogEntry + (*ConnectionProperties)(nil), // 6: envoy.data.accesslog.v3.ConnectionProperties + (*AccessLogCommon)(nil), // 7: envoy.data.accesslog.v3.AccessLogCommon + (*ResponseFlags)(nil), // 8: envoy.data.accesslog.v3.ResponseFlags + (*TLSProperties)(nil), // 9: envoy.data.accesslog.v3.TLSProperties + (*HTTPRequestProperties)(nil), // 10: envoy.data.accesslog.v3.HTTPRequestProperties + (*HTTPResponseProperties)(nil), // 11: envoy.data.accesslog.v3.HTTPResponseProperties + nil, // 12: envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry + nil, // 13: envoy.data.accesslog.v3.AccessLogCommon.CustomTagsEntry + (*ResponseFlags_Unauthorized)(nil), // 14: envoy.data.accesslog.v3.ResponseFlags.Unauthorized + (*TLSProperties_CertificateProperties)(nil), // 15: envoy.data.accesslog.v3.TLSProperties.CertificateProperties + (*TLSProperties_CertificateProperties_SubjectAltName)(nil), // 16: envoy.data.accesslog.v3.TLSProperties.CertificateProperties.SubjectAltName + nil, // 17: envoy.data.accesslog.v3.HTTPRequestProperties.RequestHeadersEntry + nil, // 18: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseHeadersEntry + nil, // 19: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseTrailersEntry + (*v3.Address)(nil), // 20: envoy.config.core.v3.Address + (*timestamp.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*duration.Duration)(nil), // 22: google.protobuf.Duration + (*v3.Metadata)(nil), // 23: envoy.config.core.v3.Metadata + (*wrappers.UInt32Value)(nil), // 24: google.protobuf.UInt32Value + (v3.RequestMethod)(0), // 25: envoy.config.core.v3.RequestMethod + (*any1.Any)(nil), // 26: google.protobuf.Any +} +var file_envoy_data_accesslog_v3_accesslog_proto_depIdxs = []int32{ + 7, // 0: envoy.data.accesslog.v3.TCPAccessLogEntry.common_properties:type_name -> envoy.data.accesslog.v3.AccessLogCommon + 6, // 1: envoy.data.accesslog.v3.TCPAccessLogEntry.connection_properties:type_name -> envoy.data.accesslog.v3.ConnectionProperties + 7, // 2: envoy.data.accesslog.v3.HTTPAccessLogEntry.common_properties:type_name -> envoy.data.accesslog.v3.AccessLogCommon + 1, // 3: envoy.data.accesslog.v3.HTTPAccessLogEntry.protocol_version:type_name -> envoy.data.accesslog.v3.HTTPAccessLogEntry.HTTPVersion + 10, // 4: envoy.data.accesslog.v3.HTTPAccessLogEntry.request:type_name -> envoy.data.accesslog.v3.HTTPRequestProperties + 11, // 5: envoy.data.accesslog.v3.HTTPAccessLogEntry.response:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties + 20, // 6: envoy.data.accesslog.v3.AccessLogCommon.downstream_remote_address:type_name -> envoy.config.core.v3.Address + 20, // 7: envoy.data.accesslog.v3.AccessLogCommon.downstream_local_address:type_name -> envoy.config.core.v3.Address + 9, // 8: envoy.data.accesslog.v3.AccessLogCommon.tls_properties:type_name -> envoy.data.accesslog.v3.TLSProperties + 21, // 9: envoy.data.accesslog.v3.AccessLogCommon.start_time:type_name -> google.protobuf.Timestamp + 22, // 10: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_rx_byte:type_name -> google.protobuf.Duration + 22, // 11: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_upstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 12: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_upstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 13: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_upstream_rx_byte:type_name -> google.protobuf.Duration + 22, // 14: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_upstream_rx_byte:type_name -> google.protobuf.Duration + 22, // 15: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_downstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 16: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_downstream_tx_byte:type_name -> google.protobuf.Duration + 20, // 17: envoy.data.accesslog.v3.AccessLogCommon.upstream_remote_address:type_name -> envoy.config.core.v3.Address + 20, // 18: envoy.data.accesslog.v3.AccessLogCommon.upstream_local_address:type_name -> envoy.config.core.v3.Address + 8, // 19: envoy.data.accesslog.v3.AccessLogCommon.response_flags:type_name -> envoy.data.accesslog.v3.ResponseFlags + 23, // 20: envoy.data.accesslog.v3.AccessLogCommon.metadata:type_name -> envoy.config.core.v3.Metadata + 20, // 21: envoy.data.accesslog.v3.AccessLogCommon.downstream_direct_remote_address:type_name -> envoy.config.core.v3.Address + 12, // 22: envoy.data.accesslog.v3.AccessLogCommon.filter_state_objects:type_name -> envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry + 13, // 23: envoy.data.accesslog.v3.AccessLogCommon.custom_tags:type_name -> envoy.data.accesslog.v3.AccessLogCommon.CustomTagsEntry + 22, // 24: envoy.data.accesslog.v3.AccessLogCommon.duration:type_name -> google.protobuf.Duration + 0, // 25: envoy.data.accesslog.v3.AccessLogCommon.access_log_type:type_name -> envoy.data.accesslog.v3.AccessLogType + 14, // 26: envoy.data.accesslog.v3.ResponseFlags.unauthorized_details:type_name -> envoy.data.accesslog.v3.ResponseFlags.Unauthorized + 3, // 27: envoy.data.accesslog.v3.TLSProperties.tls_version:type_name -> envoy.data.accesslog.v3.TLSProperties.TLSVersion + 24, // 28: envoy.data.accesslog.v3.TLSProperties.tls_cipher_suite:type_name -> google.protobuf.UInt32Value + 15, // 29: envoy.data.accesslog.v3.TLSProperties.local_certificate_properties:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties + 15, // 30: envoy.data.accesslog.v3.TLSProperties.peer_certificate_properties:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties + 25, // 31: envoy.data.accesslog.v3.HTTPRequestProperties.request_method:type_name -> envoy.config.core.v3.RequestMethod + 24, // 32: envoy.data.accesslog.v3.HTTPRequestProperties.port:type_name -> google.protobuf.UInt32Value + 17, // 33: envoy.data.accesslog.v3.HTTPRequestProperties.request_headers:type_name -> envoy.data.accesslog.v3.HTTPRequestProperties.RequestHeadersEntry + 24, // 34: envoy.data.accesslog.v3.HTTPResponseProperties.response_code:type_name -> google.protobuf.UInt32Value + 18, // 35: envoy.data.accesslog.v3.HTTPResponseProperties.response_headers:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties.ResponseHeadersEntry + 19, // 36: envoy.data.accesslog.v3.HTTPResponseProperties.response_trailers:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties.ResponseTrailersEntry + 26, // 37: envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry.value:type_name -> google.protobuf.Any + 2, // 38: envoy.data.accesslog.v3.ResponseFlags.Unauthorized.reason:type_name -> envoy.data.accesslog.v3.ResponseFlags.Unauthorized.Reason + 16, // 39: envoy.data.accesslog.v3.TLSProperties.CertificateProperties.subject_alt_name:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties.SubjectAltName + 40, // [40:40] is the sub-list for method output_type + 40, // [40:40] is the sub-list for method input_type + 40, // [40:40] is the sub-list for extension type_name + 40, // [40:40] is the sub-list for extension extendee + 0, // [0:40] is the sub-list for field type_name +} + +func init() { file_envoy_data_accesslog_v3_accesslog_proto_init() } +func file_envoy_data_accesslog_v3_accesslog_proto_init() { + if File_envoy_data_accesslog_v3_accesslog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TCPAccessLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPAccessLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectionProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLogCommon); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseFlags); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPRequestProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPResponseProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseFlags_Unauthorized); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties_CertificateProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties_CertificateProperties_SubjectAltName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*TLSProperties_CertificateProperties_SubjectAltName_Uri)(nil), + (*TLSProperties_CertificateProperties_SubjectAltName_Dns)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_data_accesslog_v3_accesslog_proto_rawDesc, + NumEnums: 4, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_data_accesslog_v3_accesslog_proto_goTypes, + DependencyIndexes: file_envoy_data_accesslog_v3_accesslog_proto_depIdxs, + EnumInfos: file_envoy_data_accesslog_v3_accesslog_proto_enumTypes, + MessageInfos: file_envoy_data_accesslog_v3_accesslog_proto_msgTypes, + }.Build() + File_envoy_data_accesslog_v3_accesslog_proto = out.File + file_envoy_data_accesslog_v3_accesslog_proto_rawDesc = nil + file_envoy_data_accesslog_v3_accesslog_proto_goTypes = nil + file_envoy_data_accesslog_v3_accesslog_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go new file mode 100644 index 0000000000..42ca35d399 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go @@ -0,0 +1,2252 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.RequestMethod(0) +) + +// Validate checks the field values on TCPAccessLogEntry with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TCPAccessLogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TCPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TCPAccessLogEntryMultiError, or nil if none found. +func (m *TCPAccessLogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *TCPAccessLogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TCPAccessLogEntryMultiError(errors) + } + + return nil +} + +// TCPAccessLogEntryMultiError is an error wrapping multiple validation errors +// returned by TCPAccessLogEntry.ValidateAll() if the designated constraints +// aren't met. +type TCPAccessLogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TCPAccessLogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TCPAccessLogEntryMultiError) AllErrors() []error { return m } + +// TCPAccessLogEntryValidationError is the validation error returned by +// TCPAccessLogEntry.Validate if the designated constraints aren't met. +type TCPAccessLogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TCPAccessLogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TCPAccessLogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TCPAccessLogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TCPAccessLogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TCPAccessLogEntryValidationError) ErrorName() string { + return "TCPAccessLogEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e TCPAccessLogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTCPAccessLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TCPAccessLogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TCPAccessLogEntryValidationError{} + +// Validate checks the field values on HTTPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPAccessLogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPAccessLogEntryMultiError, or nil if none found. +func (m *HTTPAccessLogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPAccessLogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ProtocolVersion + + if all { + switch v := interface{}(m.GetRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetResponse()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponse()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HTTPAccessLogEntryMultiError(errors) + } + + return nil +} + +// HTTPAccessLogEntryMultiError is an error wrapping multiple validation errors +// returned by HTTPAccessLogEntry.ValidateAll() if the designated constraints +// aren't met. +type HTTPAccessLogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPAccessLogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPAccessLogEntryMultiError) AllErrors() []error { return m } + +// HTTPAccessLogEntryValidationError is the validation error returned by +// HTTPAccessLogEntry.Validate if the designated constraints aren't met. +type HTTPAccessLogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPAccessLogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPAccessLogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPAccessLogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPAccessLogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPAccessLogEntryValidationError) ErrorName() string { + return "HTTPAccessLogEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPAccessLogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPAccessLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPAccessLogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPAccessLogEntryValidationError{} + +// Validate checks the field values on ConnectionProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ConnectionProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConnectionProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ConnectionPropertiesMultiError, or nil if none found. +func (m *ConnectionProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *ConnectionProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ReceivedBytes + + // no validation rules for SentBytes + + if len(errors) > 0 { + return ConnectionPropertiesMultiError(errors) + } + + return nil +} + +// ConnectionPropertiesMultiError is an error wrapping multiple validation +// errors returned by ConnectionProperties.ValidateAll() if the designated +// constraints aren't met. +type ConnectionPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConnectionPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConnectionPropertiesMultiError) AllErrors() []error { return m } + +// ConnectionPropertiesValidationError is the validation error returned by +// ConnectionProperties.Validate if the designated constraints aren't met. +type ConnectionPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConnectionPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConnectionPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConnectionPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConnectionPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConnectionPropertiesValidationError) ErrorName() string { + return "ConnectionPropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e ConnectionPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConnectionProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConnectionPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConnectionPropertiesValidationError{} + +// Validate checks the field values on AccessLogCommon with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AccessLogCommon) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AccessLogCommon with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AccessLogCommonMultiError, or nil if none found. +func (m *AccessLogCommon) ValidateAll() error { + return m.validate(true) +} + +func (m *AccessLogCommon) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetSampleRate(); val <= 0 || val > 1 { + err := AccessLogCommonValidationError{ + field: "SampleRate", + reason: "value must be inside range (0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDownstreamRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDownstreamLocalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamLocalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStartTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStartTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstUpstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstUpstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastUpstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastUpstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstUpstreamRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstUpstreamRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastUpstreamRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastUpstreamRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstDownstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstDownstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastDownstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastDownstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamLocalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamLocalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamCluster + + if all { + switch v := interface{}(m.GetResponseFlags()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseFlags()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamTransportFailureReason + + // no validation rules for RouteName + + if all { + switch v := interface{}(m.GetDownstreamDirectRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamDirectRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetFilterStateObjects())) + i := 0 + for key := range m.GetFilterStateObjects() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetFilterStateObjects()[key] + _ = val + + // no validation rules for FilterStateObjects[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + // no validation rules for CustomTags + + if all { + switch v := interface{}(m.GetDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamRequestAttemptCount + + // no validation rules for ConnectionTerminationDetails + + // no validation rules for StreamId + + // no validation rules for IntermediateLogEntry + + // no validation rules for DownstreamTransportFailureReason + + // no validation rules for DownstreamWireBytesSent + + // no validation rules for DownstreamWireBytesReceived + + // no validation rules for UpstreamWireBytesSent + + // no validation rules for UpstreamWireBytesReceived + + // no validation rules for AccessLogType + + if len(errors) > 0 { + return AccessLogCommonMultiError(errors) + } + + return nil +} + +// AccessLogCommonMultiError is an error wrapping multiple validation errors +// returned by AccessLogCommon.ValidateAll() if the designated constraints +// aren't met. +type AccessLogCommonMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AccessLogCommonMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AccessLogCommonMultiError) AllErrors() []error { return m } + +// AccessLogCommonValidationError is the validation error returned by +// AccessLogCommon.Validate if the designated constraints aren't met. +type AccessLogCommonValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AccessLogCommonValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AccessLogCommonValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AccessLogCommonValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AccessLogCommonValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AccessLogCommonValidationError) ErrorName() string { return "AccessLogCommonValidationError" } + +// Error satisfies the builtin error interface +func (e AccessLogCommonValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAccessLogCommon.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AccessLogCommonValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AccessLogCommonValidationError{} + +// Validate checks the field values on ResponseFlags with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResponseFlags) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseFlags with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResponseFlagsMultiError, or +// nil if none found. +func (m *ResponseFlags) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseFlags) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FailedLocalHealthcheck + + // no validation rules for NoHealthyUpstream + + // no validation rules for UpstreamRequestTimeout + + // no validation rules for LocalReset + + // no validation rules for UpstreamRemoteReset + + // no validation rules for UpstreamConnectionFailure + + // no validation rules for UpstreamConnectionTermination + + // no validation rules for UpstreamOverflow + + // no validation rules for NoRouteFound + + // no validation rules for DelayInjected + + // no validation rules for FaultInjected + + // no validation rules for RateLimited + + if all { + switch v := interface{}(m.GetUnauthorizedDetails()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUnauthorizedDetails()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RateLimitServiceError + + // no validation rules for DownstreamConnectionTermination + + // no validation rules for UpstreamRetryLimitExceeded + + // no validation rules for StreamIdleTimeout + + // no validation rules for InvalidEnvoyRequestHeaders + + // no validation rules for DownstreamProtocolError + + // no validation rules for UpstreamMaxStreamDurationReached + + // no validation rules for ResponseFromCacheFilter + + // no validation rules for NoFilterConfigFound + + // no validation rules for DurationTimeout + + // no validation rules for UpstreamProtocolError + + // no validation rules for NoClusterFound + + // no validation rules for OverloadManager + + // no validation rules for DnsResolutionFailure + + if len(errors) > 0 { + return ResponseFlagsMultiError(errors) + } + + return nil +} + +// ResponseFlagsMultiError is an error wrapping multiple validation errors +// returned by ResponseFlags.ValidateAll() if the designated constraints +// aren't met. +type ResponseFlagsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseFlagsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseFlagsMultiError) AllErrors() []error { return m } + +// ResponseFlagsValidationError is the validation error returned by +// ResponseFlags.Validate if the designated constraints aren't met. +type ResponseFlagsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseFlagsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseFlagsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseFlagsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseFlagsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseFlagsValidationError) ErrorName() string { return "ResponseFlagsValidationError" } + +// Error satisfies the builtin error interface +func (e ResponseFlagsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseFlags.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseFlagsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseFlagsValidationError{} + +// Validate checks the field values on TLSProperties with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TLSProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TLSProperties with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TLSPropertiesMultiError, or +// nil if none found. +func (m *TLSProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TlsVersion + + if all { + switch v := interface{}(m.GetTlsCipherSuite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCipherSuite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TlsSniHostname + + if all { + switch v := interface{}(m.GetLocalCertificateProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalCertificateProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPeerCertificateProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPeerCertificateProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TlsSessionId + + // no validation rules for Ja3Fingerprint + + if len(errors) > 0 { + return TLSPropertiesMultiError(errors) + } + + return nil +} + +// TLSPropertiesMultiError is an error wrapping multiple validation errors +// returned by TLSProperties.ValidateAll() if the designated constraints +// aren't met. +type TLSPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSPropertiesMultiError) AllErrors() []error { return m } + +// TLSPropertiesValidationError is the validation error returned by +// TLSProperties.Validate if the designated constraints aren't met. +type TLSPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TLSPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TLSPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TLSPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSPropertiesValidationError) ErrorName() string { return "TLSPropertiesValidationError" } + +// Error satisfies the builtin error interface +func (e TLSPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSPropertiesValidationError{} + +// Validate checks the field values on HTTPRequestProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPRequestProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPRequestProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPRequestPropertiesMultiError, or nil if none found. +func (m *HTTPRequestProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPRequestProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := v3.RequestMethod_name[int32(m.GetRequestMethod())]; !ok { + err := HTTPRequestPropertiesValidationError{ + field: "RequestMethod", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Scheme + + // no validation rules for Authority + + if all { + switch v := interface{}(m.GetPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Path + + // no validation rules for UserAgent + + // no validation rules for Referer + + // no validation rules for ForwardedFor + + // no validation rules for RequestId + + // no validation rules for OriginalPath + + // no validation rules for RequestHeadersBytes + + // no validation rules for RequestBodyBytes + + // no validation rules for RequestHeaders + + // no validation rules for UpstreamHeaderBytesSent + + // no validation rules for DownstreamHeaderBytesReceived + + if len(errors) > 0 { + return HTTPRequestPropertiesMultiError(errors) + } + + return nil +} + +// HTTPRequestPropertiesMultiError is an error wrapping multiple validation +// errors returned by HTTPRequestProperties.ValidateAll() if the designated +// constraints aren't met. +type HTTPRequestPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPRequestPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPRequestPropertiesMultiError) AllErrors() []error { return m } + +// HTTPRequestPropertiesValidationError is the validation error returned by +// HTTPRequestProperties.Validate if the designated constraints aren't met. +type HTTPRequestPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPRequestPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPRequestPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPRequestPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPRequestPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPRequestPropertiesValidationError) ErrorName() string { + return "HTTPRequestPropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPRequestPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPRequestProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPRequestPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPRequestPropertiesValidationError{} + +// Validate checks the field values on HTTPResponseProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPResponseProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPResponseProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPResponsePropertiesMultiError, or nil if none found. +func (m *HTTPResponseProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPResponseProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResponseCode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseCode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ResponseHeadersBytes + + // no validation rules for ResponseBodyBytes + + // no validation rules for ResponseHeaders + + // no validation rules for ResponseTrailers + + // no validation rules for ResponseCodeDetails + + // no validation rules for UpstreamHeaderBytesReceived + + // no validation rules for DownstreamHeaderBytesSent + + if len(errors) > 0 { + return HTTPResponsePropertiesMultiError(errors) + } + + return nil +} + +// HTTPResponsePropertiesMultiError is an error wrapping multiple validation +// errors returned by HTTPResponseProperties.ValidateAll() if the designated +// constraints aren't met. +type HTTPResponsePropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPResponsePropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPResponsePropertiesMultiError) AllErrors() []error { return m } + +// HTTPResponsePropertiesValidationError is the validation error returned by +// HTTPResponseProperties.Validate if the designated constraints aren't met. +type HTTPResponsePropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPResponsePropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPResponsePropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPResponsePropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPResponsePropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPResponsePropertiesValidationError) ErrorName() string { + return "HTTPResponsePropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPResponsePropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPResponseProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPResponsePropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPResponsePropertiesValidationError{} + +// Validate checks the field values on ResponseFlags_Unauthorized with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResponseFlags_Unauthorized) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseFlags_Unauthorized with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResponseFlags_UnauthorizedMultiError, or nil if none found. +func (m *ResponseFlags_Unauthorized) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseFlags_Unauthorized) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Reason + + if len(errors) > 0 { + return ResponseFlags_UnauthorizedMultiError(errors) + } + + return nil +} + +// ResponseFlags_UnauthorizedMultiError is an error wrapping multiple +// validation errors returned by ResponseFlags_Unauthorized.ValidateAll() if +// the designated constraints aren't met. +type ResponseFlags_UnauthorizedMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseFlags_UnauthorizedMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseFlags_UnauthorizedMultiError) AllErrors() []error { return m } + +// ResponseFlags_UnauthorizedValidationError is the validation error returned +// by ResponseFlags_Unauthorized.Validate if the designated constraints aren't met. +type ResponseFlags_UnauthorizedValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseFlags_UnauthorizedValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseFlags_UnauthorizedValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseFlags_UnauthorizedValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseFlags_UnauthorizedValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseFlags_UnauthorizedValidationError) ErrorName() string { + return "ResponseFlags_UnauthorizedValidationError" +} + +// Error satisfies the builtin error interface +func (e ResponseFlags_UnauthorizedValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseFlags_Unauthorized.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseFlags_UnauthorizedValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseFlags_UnauthorizedValidationError{} + +// Validate checks the field values on TLSProperties_CertificateProperties with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *TLSProperties_CertificateProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TLSProperties_CertificateProperties +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// TLSProperties_CertificatePropertiesMultiError, or nil if none found. +func (m *TLSProperties_CertificateProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties_CertificateProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetSubjectAltName() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Subject + + if len(errors) > 0 { + return TLSProperties_CertificatePropertiesMultiError(errors) + } + + return nil +} + +// TLSProperties_CertificatePropertiesMultiError is an error wrapping multiple +// validation errors returned by +// TLSProperties_CertificateProperties.ValidateAll() if the designated +// constraints aren't met. +type TLSProperties_CertificatePropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSProperties_CertificatePropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSProperties_CertificatePropertiesMultiError) AllErrors() []error { return m } + +// TLSProperties_CertificatePropertiesValidationError is the validation error +// returned by TLSProperties_CertificateProperties.Validate if the designated +// constraints aren't met. +type TLSProperties_CertificatePropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSProperties_CertificatePropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TLSProperties_CertificatePropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TLSProperties_CertificatePropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TLSProperties_CertificatePropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSProperties_CertificatePropertiesValidationError) ErrorName() string { + return "TLSProperties_CertificatePropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e TLSProperties_CertificatePropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties_CertificateProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSProperties_CertificatePropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSProperties_CertificatePropertiesValidationError{} + +// Validate checks the field values on +// TLSProperties_CertificateProperties_SubjectAltName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TLSProperties_CertificateProperties_SubjectAltName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// TLSProperties_CertificateProperties_SubjectAltName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// TLSProperties_CertificateProperties_SubjectAltNameMultiError, or nil if +// none found. +func (m *TLSProperties_CertificateProperties_SubjectAltName) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.San.(type) { + case *TLSProperties_CertificateProperties_SubjectAltName_Uri: + if v == nil { + err := TLSProperties_CertificateProperties_SubjectAltNameValidationError{ + field: "San", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Uri + case *TLSProperties_CertificateProperties_SubjectAltName_Dns: + if v == nil { + err := TLSProperties_CertificateProperties_SubjectAltNameValidationError{ + field: "San", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Dns + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TLSProperties_CertificateProperties_SubjectAltNameMultiError(errors) + } + + return nil +} + +// TLSProperties_CertificateProperties_SubjectAltNameMultiError is an error +// wrapping multiple validation errors returned by +// TLSProperties_CertificateProperties_SubjectAltName.ValidateAll() if the +// designated constraints aren't met. +type TLSProperties_CertificateProperties_SubjectAltNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSProperties_CertificateProperties_SubjectAltNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSProperties_CertificateProperties_SubjectAltNameMultiError) AllErrors() []error { return m } + +// TLSProperties_CertificateProperties_SubjectAltNameValidationError is the +// validation error returned by +// TLSProperties_CertificateProperties_SubjectAltName.Validate if the +// designated constraints aren't met. +type TLSProperties_CertificateProperties_SubjectAltNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) ErrorName() string { + return "TLSProperties_CertificateProperties_SubjectAltNameValidationError" +} + +// Error satisfies the builtin error interface +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties_CertificateProperties_SubjectAltName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSProperties_CertificateProperties_SubjectAltNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSProperties_CertificateProperties_SubjectAltNameValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go index 2d9b2e0ce4..5ff4c5de1d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/clusters/aggregate/v3/cluster.proto package aggregatev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go index 9c7e19eb01..1442b756b0 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/filters/common/fault/v3/fault.proto package faultv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go index 9579d32625..06015e74d7 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go @@ -86,9 +86,20 @@ func (m *FaultDelay) validate(all bool) error { } } - switch m.FaultDelaySecifier.(type) { - + oneofFaultDelaySecifierPresent := false + switch v := m.FaultDelaySecifier.(type) { case *FaultDelay_FixedDelay: + if v == nil { + err := FaultDelayValidationError{ + field: "FaultDelaySecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFaultDelaySecifierPresent = true if d := m.GetFixedDelay(); d != nil { dur, err := d.AsDuration(), d.CheckValid() @@ -121,6 +132,17 @@ func (m *FaultDelay) validate(all bool) error { } case *FaultDelay_HeaderDelay_: + if v == nil { + err := FaultDelayValidationError{ + field: "FaultDelaySecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFaultDelaySecifierPresent = true if all { switch v := interface{}(m.GetHeaderDelay()).(type) { @@ -152,6 +174,9 @@ func (m *FaultDelay) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofFaultDelaySecifierPresent { err := FaultDelayValidationError{ field: "FaultDelaySecifier", reason: "value is required", @@ -160,7 +185,6 @@ func (m *FaultDelay) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -291,9 +315,20 @@ func (m *FaultRateLimit) validate(all bool) error { } } - switch m.LimitType.(type) { - + oneofLimitTypePresent := false + switch v := m.LimitType.(type) { case *FaultRateLimit_FixedLimit_: + if v == nil { + err := FaultRateLimitValidationError{ + field: "LimitType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLimitTypePresent = true if all { switch v := interface{}(m.GetFixedLimit()).(type) { @@ -325,6 +360,17 @@ func (m *FaultRateLimit) validate(all bool) error { } case *FaultRateLimit_HeaderLimit_: + if v == nil { + err := FaultRateLimitValidationError{ + field: "LimitType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLimitTypePresent = true if all { switch v := interface{}(m.GetHeaderLimit()).(type) { @@ -356,6 +402,9 @@ func (m *FaultRateLimit) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofLimitTypePresent { err := FaultRateLimitValidationError{ field: "LimitType", reason: "value is required", @@ -364,7 +413,6 @@ func (m *FaultRateLimit) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go index c09591a8eb..7d88eb8206 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/filters/http/fault/v3/fault.proto package faultv3 @@ -12,6 +12,7 @@ import ( v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" + _struct "github.com/golang/protobuf/ptypes/struct" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -134,7 +135,7 @@ func (*FaultAbort_GrpcStatus) isFaultAbort_ErrorType() {} func (*FaultAbort_HeaderAbort_) isFaultAbort_ErrorType() {} -// [#next-free-field: 16] +// [#next-free-field: 17] type HTTPFault struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -144,7 +145,7 @@ type HTTPFault struct { // object. Delay *v31.FaultDelay `protobuf:"bytes,1,opt,name=delay,proto3" json:"delay,omitempty"` // If specified, the filter will abort requests based on the values in - // the object. At least *abort* or *delay* must be specified. + // the object. At least ``abort`` or ``delay`` must be specified. Abort *FaultAbort `protobuf:"bytes,2,opt,name=abort,proto3" json:"abort,omitempty"` // Specifies the name of the (destination) upstream cluster that the // filter should match on. Fault injection will be restricted to requests @@ -158,7 +159,7 @@ type HTTPFault struct { // The filter will check the request's headers against all the specified // headers in the filter config. A match will happen if all the headers in the // config are present in the request with the same values (or based on - // presence if the *value* field is not in the config). + // presence if the ``value`` field is not in the config). Headers []*v32.HeaderMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` // Faults are injected for the specified list of downstream hosts. If this // setting is not set, faults are injected for all downstream nodes. @@ -171,9 +172,9 @@ type HTTPFault struct { // filter. Note that because this setting can be overridden at the route level, it's possible // for the number of active faults to be greater than this value (if injected via a different // route). If not specified, defaults to unlimited. This setting can be overridden via - // `runtime ` and any faults that are not injected - // due to overflow will be indicated via the `faults_overflow - // ` stat. + // ``runtime `` and any faults that are not injected + // due to overflow will be indicated via the ``faults_overflow + // `` stat. // // .. attention:: // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy @@ -214,6 +215,12 @@ type HTTPFault struct { // If set to false, dynamic stats storage will be allocated for the downstream cluster name. // Default value is false. DisableDownstreamClusterStats bool `protobuf:"varint,15,opt,name=disable_downstream_cluster_stats,json=disableDownstreamClusterStats,proto3" json:"disable_downstream_cluster_stats,omitempty"` + // When an abort or delay fault is executed, the metadata struct provided here will be added to the + // request's dynamic metadata under the namespace corresponding to the name of the fault filter. + // This data can be logged as part of Access Logs using the :ref:`command operator + // ` %DYNAMIC_METADATA(NAMESPACE)%, where NAMESPACE is the name of + // the fault filter. + FilterMetadata *_struct.Struct `protobuf:"bytes,16,opt,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty"` } func (x *HTTPFault) Reset() { @@ -353,6 +360,13 @@ func (x *HTTPFault) GetDisableDownstreamClusterStats() bool { return false } +func (x *HTTPFault) GetFilterMetadata() *_struct.Struct { + if x != nil { + return x.FilterMetadata + } + return nil +} + // Fault aborts are controlled via an HTTP header (if applicable). See the // :ref:`HTTP fault filter ` documentation for // more information. @@ -410,103 +424,109 @@ var file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc = []byte{ 0x74, 0x65, 0x72, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, - 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, - 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, - 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, - 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x03, 0x0a, 0x0a, 0x46, 0x61, 0x75, - 0x6c, 0x74, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, - 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x68, 0x74, 0x74, - 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0a, - 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x63, 0x0a, 0x0c, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, - 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, - 0x62, 0x6f, 0x72, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, - 0x48, 0x00, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x12, - 0x40, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, - 0x65, 0x1a, 0x4e, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, - 0x3a, 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, - 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, - 0x41, 0x62, 0x6f, 0x72, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, - 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, - 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, - 0x74, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, - 0x85, 0x08, 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x4a, 0x0a, - 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, - 0x61, 0x79, 0x52, 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x61, 0x62, 0x6f, - 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, - 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x61, 0x62, - 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x75, - 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, - 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x29, - 0x0a, 0x10, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6e, 0x6f, 0x64, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x11, 0x6d, 0x61, 0x78, - 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x61, 0x75, - 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, - 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, - 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x11, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x32, 0x0a, - 0x15, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x13, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x61, - 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, - 0x61, 0x62, 0x6f, 0x72, 0x74, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x12, 0x4c, 0x0a, 0x23, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x61, - 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, - 0x39, 0x0a, 0x19, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x16, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x20, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x6f, 0x77, - 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x03, 0x0a, 0x0a, 0x46, 0x61, 0x75, 0x6c, 0x74, + 0x41, 0x62, 0x6f, 0x72, 0x74, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, + 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x72, + 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x63, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, 0x6f, + 0x72, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x48, 0x00, + 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x12, 0x40, 0x0a, + 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, + 0x4e, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x3a, 0x3f, + 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, + 0x6f, 0x72, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x3a, + 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, + 0x62, 0x6f, 0x72, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xc7, 0x08, + 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x4a, 0x0a, 0x05, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x52, 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x61, 0x62, 0x6f, 0x72, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x61, 0x62, 0x6f, 0x72, + 0x74, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x12, 0x68, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x61, + 0x79, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x61, 0x62, 0x6f, 0x72, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x61, 0x62, 0x6f, + 0x72, 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x61, 0x62, + 0x6f, 0x72, 0x74, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x4c, 0x0a, 0x23, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, + 0x19, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x16, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x20, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x42, 0xa3, 0x01, 0x0a, 0x34, 0x69, 0x6f, 0x2e, 0x65, @@ -545,6 +565,7 @@ var file_envoy_extensions_filters_http_fault_v3_fault_proto_goTypes = []interfac (*v32.HeaderMatcher)(nil), // 5: envoy.config.route.v3.HeaderMatcher (*wrappers.UInt32Value)(nil), // 6: google.protobuf.UInt32Value (*v31.FaultRateLimit)(nil), // 7: envoy.extensions.filters.common.fault.v3.FaultRateLimit + (*_struct.Struct)(nil), // 8: google.protobuf.Struct } var file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs = []int32{ 2, // 0: envoy.extensions.filters.http.fault.v3.FaultAbort.header_abort:type_name -> envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort @@ -554,11 +575,12 @@ var file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs = []int32{ 5, // 4: envoy.extensions.filters.http.fault.v3.HTTPFault.headers:type_name -> envoy.config.route.v3.HeaderMatcher 6, // 5: envoy.extensions.filters.http.fault.v3.HTTPFault.max_active_faults:type_name -> google.protobuf.UInt32Value 7, // 6: envoy.extensions.filters.http.fault.v3.HTTPFault.response_rate_limit:type_name -> envoy.extensions.filters.common.fault.v3.FaultRateLimit - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 8, // 7: envoy.extensions.filters.http.fault.v3.HTTPFault.filter_metadata:type_name -> google.protobuf.Struct + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_envoy_extensions_filters_http_fault_v3_fault_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go index d212eeda0d..2521f6fa4c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go @@ -86,9 +86,20 @@ func (m *FaultAbort) validate(all bool) error { } } - switch m.ErrorType.(type) { - + oneofErrorTypePresent := false + switch v := m.ErrorType.(type) { case *FaultAbort_HttpStatus: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true if val := m.GetHttpStatus(); val < 200 || val >= 600 { err := FaultAbortValidationError{ @@ -102,9 +113,30 @@ func (m *FaultAbort) validate(all bool) error { } case *FaultAbort_GrpcStatus: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true // no validation rules for GrpcStatus - case *FaultAbort_HeaderAbort_: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true if all { switch v := interface{}(m.GetHeaderAbort()).(type) { @@ -136,6 +168,9 @@ func (m *FaultAbort) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofErrorTypePresent { err := FaultAbortValidationError{ field: "ErrorType", reason: "value is required", @@ -144,7 +179,6 @@ func (m *FaultAbort) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -414,6 +448,35 @@ func (m *HTTPFault) validate(all bool) error { // no validation rules for DisableDownstreamClusterStats + if all { + switch v := interface{}(m.GetFilterMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return HTTPFaultMultiError(errors) } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go index 981f917623..ece90bfbb5 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/filters/http/rbac/v3/rbac.proto package rbacv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go index 386973fced..2280a4c76e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/filters/http/router/v3/router.proto package routerv3 @@ -9,7 +9,9 @@ package routerv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" + duration "github.com/golang/protobuf/ptypes/duration" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -24,7 +26,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// [#next-free-field: 8] +// [#next-free-field: 10] type Router struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -43,10 +45,12 @@ type Router struct { // an upstream request. Presuming retries are configured, multiple upstream // requests may be made for each downstream (inbound) request. UpstreamLog []*v3.AccessLog `protobuf:"bytes,3,rep,name=upstream_log,json=upstreamLog,proto3" json:"upstream_log,omitempty"` - // Do not add any additional *x-envoy-* headers to requests or responses. This - // only affects the :ref:`router filter generated *x-envoy-* headers + // Additional upstream access log options. + UpstreamLogOptions *Router_UpstreamAccessLogOptions `protobuf:"bytes,9,opt,name=upstream_log_options,json=upstreamLogOptions,proto3" json:"upstream_log_options,omitempty"` + // Do not add any additional ``x-envoy-`` headers to requests or responses. This + // only affects the :ref:`router filter generated x-envoy- headers // `, other Envoy filters and the HTTP - // connection manager may continue to set *x-envoy-* headers. + // connection manager may continue to set ``x-envoy-`` headers. SuppressEnvoyHeaders bool `protobuf:"varint,4,opt,name=suppress_envoy_headers,json=suppressEnvoyHeaders,proto3" json:"suppress_envoy_headers,omitempty"` // Specifies a list of HTTP headers to strictly validate. Envoy will reject a // request and respond with HTTP status 400 if the request contains an invalid @@ -78,6 +82,21 @@ type Router struct { // :ref:`gRPC stats filter` documentation // for more details. SuppressGrpcRequestFailureCodeStats bool `protobuf:"varint,7,opt,name=suppress_grpc_request_failure_code_stats,json=suppressGrpcRequestFailureCodeStats,proto3" json:"suppress_grpc_request_failure_code_stats,omitempty"` + // .. note:: + // Upstream HTTP filters are currently in alpha. + // + // Optional HTTP filters for the upstream filter chain. + // + // These filters will be applied for all requests that pass through the router. + // They will also be applied to shadowed requests. + // Upstream filters cannot change route or cluster. + // Upstream filters specified on the cluster will override these filters. + // + // If using upstream filters, please be aware that local errors sent by + // upstream filters will not trigger retries, and local errors sent by + // upstream filters will count as a final response if hedging is configured. + // [#extension-category: envoy.filters.http.upstream] + UpstreamHttpFilters []*v31.HttpFilter `protobuf:"bytes,8,rep,name=upstream_http_filters,json=upstreamHttpFilters,proto3" json:"upstream_http_filters,omitempty"` } func (x *Router) Reset() { @@ -133,6 +152,13 @@ func (x *Router) GetUpstreamLog() []*v3.AccessLog { return nil } +func (x *Router) GetUpstreamLogOptions() *Router_UpstreamAccessLogOptions { + if x != nil { + return x.UpstreamLogOptions + } + return nil +} + func (x *Router) GetSuppressEnvoyHeaders() bool { if x != nil { return x.SuppressEnvoyHeaders @@ -161,6 +187,78 @@ func (x *Router) GetSuppressGrpcRequestFailureCodeStats() bool { return false } +func (x *Router) GetUpstreamHttpFilters() []*v31.HttpFilter { + if x != nil { + return x.UpstreamHttpFilters + } + return nil +} + +type Router_UpstreamAccessLogOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to true, an upstream access log will be recorded when an upstream stream is + // associated to an http request. Note: Each HTTP request received for an already established + // connection will result in an upstream access log record. This includes, for example, + // consecutive HTTP requests over the same connection or a request that is retried. + // In case a retry is applied, an upstream access log will be recorded for each retry. + FlushUpstreamLogOnUpstreamStream bool `protobuf:"varint,1,opt,name=flush_upstream_log_on_upstream_stream,json=flushUpstreamLogOnUpstreamStream,proto3" json:"flush_upstream_log_on_upstream_stream,omitempty"` + // The interval to flush the upstream access logs. By default, the router will flush an upstream + // access log on stream close, when the HTTP request is complete. If this field is set, the router + // will flush access logs periodically at the specified interval. This is especially useful in the + // case of long-lived requests, such as CONNECT and Websockets. + // The interval must be at least 1 millisecond. + UpstreamLogFlushInterval *duration.Duration `protobuf:"bytes,2,opt,name=upstream_log_flush_interval,json=upstreamLogFlushInterval,proto3" json:"upstream_log_flush_interval,omitempty"` +} + +func (x *Router_UpstreamAccessLogOptions) Reset() { + *x = Router_UpstreamAccessLogOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Router_UpstreamAccessLogOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Router_UpstreamAccessLogOptions) ProtoMessage() {} + +func (x *Router_UpstreamAccessLogOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Router_UpstreamAccessLogOptions.ProtoReflect.Descriptor instead. +func (*Router_UpstreamAccessLogOptions) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Router_UpstreamAccessLogOptions) GetFlushUpstreamLogOnUpstreamStream() bool { + if x != nil { + return x.FlushUpstreamLogOnUpstreamStream + } + return false +} + +func (x *Router_UpstreamAccessLogOptions) GetUpstreamLogFlushInterval() *duration.Duration { + if x != nil { + return x.UpstreamLogFlushInterval + } + return nil +} + var File_envoy_extensions_filters_http_router_v3_router_proto protoreflect.FileDescriptor var file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc = []byte{ @@ -172,66 +270,103 @@ var file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc = []byte{ 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x05, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, - 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x47, 0x0a, 0x0c, 0x75, - 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x0b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x4c, 0x6f, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x45, 0x6e, - 0x76, 0x6f, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0xc7, 0x01, 0x0a, 0x14, 0x73, - 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x94, 0x01, 0xfa, 0x42, 0x90, 0x01, - 0x92, 0x01, 0x8c, 0x01, 0x22, 0x89, 0x01, 0x72, 0x86, 0x01, 0x52, 0x1e, 0x78, 0x2d, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2d, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2d, 0x72, 0x71, 0x2d, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2d, 0x6d, 0x73, 0x52, 0x26, 0x78, 0x2d, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2d, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2d, 0x72, 0x71, 0x2d, - 0x70, 0x65, 0x72, 0x2d, 0x74, 0x72, 0x79, 0x2d, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2d, - 0x6d, 0x73, 0x52, 0x13, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x6d, 0x61, 0x78, 0x2d, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x15, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x2d, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x6f, 0x6e, 0x52, 0x10, - 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x2d, 0x6f, 0x6e, - 0x52, 0x12, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x71, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x72, 0x65, 0x73, 0x70, 0x65, - 0x63, 0x74, 0x45, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x71, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x12, 0x55, 0x0a, 0x28, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, - 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, - 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x23, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x47, - 0x72, 0x70, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, - 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x42, 0xa7, 0x01, 0x0a, - 0x35, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x57, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, - 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x59, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xd3, 0x08, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0d, 0x64, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, + 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x10, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x43, 0x68, 0x69, + 0x6c, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x47, 0x0a, 0x0c, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, + 0x6f, 0x67, 0x52, 0x0b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x12, + 0x7a, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x73, + 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x75, 0x70, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0xc7, 0x01, 0x0a, 0x14, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x94, 0x01, 0xfa, 0x42, 0x90, 0x01, 0x92, 0x01, 0x8c, 0x01, 0x22, 0x89, 0x01, 0x72, 0x86, + 0x01, 0x52, 0x1e, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x75, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x2d, 0x72, 0x71, 0x2d, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2d, 0x6d, + 0x73, 0x52, 0x26, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x75, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x2d, 0x72, 0x71, 0x2d, 0x70, 0x65, 0x72, 0x2d, 0x74, 0x72, 0x79, 0x2d, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2d, 0x6d, 0x73, 0x52, 0x13, 0x78, 0x2d, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2d, 0x6d, 0x61, 0x78, 0x2d, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x15, + 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x2d, 0x67, 0x72, + 0x70, 0x63, 0x2d, 0x6f, 0x6e, 0x52, 0x10, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x2d, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x72, + 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, + 0x72, 0x71, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x18, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x52, 0x71, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x55, 0x0a, 0x28, 0x73, 0x75, + 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x23, 0x73, 0x75, + 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x70, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x7b, 0x0a, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x13, 0x75, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0xd3, + 0x01, 0x0a, 0x18, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x25, 0x66, + 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x66, 0x0a, 0x1b, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x75, + 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, + 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x18, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x42, 0xa7, 0x01, 0x0a, 0x35, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x42, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x57, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -246,20 +381,26 @@ func file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP() []b return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData } -var file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_envoy_extensions_filters_http_router_v3_router_proto_goTypes = []interface{}{ - (*Router)(nil), // 0: envoy.extensions.filters.http.router.v3.Router - (*wrappers.BoolValue)(nil), // 1: google.protobuf.BoolValue - (*v3.AccessLog)(nil), // 2: envoy.config.accesslog.v3.AccessLog + (*Router)(nil), // 0: envoy.extensions.filters.http.router.v3.Router + (*Router_UpstreamAccessLogOptions)(nil), // 1: envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions + (*wrappers.BoolValue)(nil), // 2: google.protobuf.BoolValue + (*v3.AccessLog)(nil), // 3: envoy.config.accesslog.v3.AccessLog + (*v31.HttpFilter)(nil), // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + (*duration.Duration)(nil), // 5: google.protobuf.Duration } var file_envoy_extensions_filters_http_router_v3_router_proto_depIdxs = []int32{ - 1, // 0: envoy.extensions.filters.http.router.v3.Router.dynamic_stats:type_name -> google.protobuf.BoolValue - 2, // 1: envoy.extensions.filters.http.router.v3.Router.upstream_log:type_name -> envoy.config.accesslog.v3.AccessLog - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 2, // 0: envoy.extensions.filters.http.router.v3.Router.dynamic_stats:type_name -> google.protobuf.BoolValue + 3, // 1: envoy.extensions.filters.http.router.v3.Router.upstream_log:type_name -> envoy.config.accesslog.v3.AccessLog + 1, // 2: envoy.extensions.filters.http.router.v3.Router.upstream_log_options:type_name -> envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions + 4, // 3: envoy.extensions.filters.http.router.v3.Router.upstream_http_filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + 5, // 4: envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions.upstream_log_flush_interval:type_name -> google.protobuf.Duration + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name } func init() { file_envoy_extensions_filters_http_router_v3_router_proto_init() } @@ -280,6 +421,18 @@ func file_envoy_extensions_filters_http_router_v3_router_proto_init() { return nil } } + file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Router_UpstreamAccessLogOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -287,7 +440,7 @@ func file_envoy_extensions_filters_http_router_v3_router_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc, NumEnums: 0, - NumMessages: 1, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go index 8f061af69f..bb99e4b66b 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go @@ -121,6 +121,35 @@ func (m *Router) validate(all bool) error { } + if all { + switch v := interface{}(m.GetUpstreamLogOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamLogOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + // no validation rules for SuppressEnvoyHeaders for idx, item := range m.GetStrictCheckHeaders() { @@ -143,6 +172,40 @@ func (m *Router) validate(all bool) error { // no validation rules for SuppressGrpcRequestFailureCodeStats + for idx, item := range m.GetUpstreamHttpFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + if len(errors) > 0 { return RouterMultiError(errors) } @@ -227,3 +290,138 @@ var _Router_StrictCheckHeaders_InLookup = map[string]struct{}{ "x-envoy-retry-grpc-on": {}, "x-envoy-retry-on": {}, } + +// Validate checks the field values on Router_UpstreamAccessLogOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Router_UpstreamAccessLogOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Router_UpstreamAccessLogOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Router_UpstreamAccessLogOptionsMultiError, or nil if none found. +func (m *Router_UpstreamAccessLogOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Router_UpstreamAccessLogOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FlushUpstreamLogOnUpstreamStream + + if d := m.GetUpstreamLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Router_UpstreamAccessLogOptionsValidationError{ + field: "UpstreamLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := Router_UpstreamAccessLogOptionsValidationError{ + field: "UpstreamLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return Router_UpstreamAccessLogOptionsMultiError(errors) + } + + return nil +} + +// Router_UpstreamAccessLogOptionsMultiError is an error wrapping multiple +// validation errors returned by Router_UpstreamAccessLogOptions.ValidateAll() +// if the designated constraints aren't met. +type Router_UpstreamAccessLogOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Router_UpstreamAccessLogOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Router_UpstreamAccessLogOptionsMultiError) AllErrors() []error { return m } + +// Router_UpstreamAccessLogOptionsValidationError is the validation error +// returned by Router_UpstreamAccessLogOptions.Validate if the designated +// constraints aren't met. +type Router_UpstreamAccessLogOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Router_UpstreamAccessLogOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Router_UpstreamAccessLogOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Router_UpstreamAccessLogOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Router_UpstreamAccessLogOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Router_UpstreamAccessLogOptionsValidationError) ErrorName() string { + return "Router_UpstreamAccessLogOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Router_UpstreamAccessLogOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouter_UpstreamAccessLogOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Router_UpstreamAccessLogOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Router_UpstreamAccessLogOptionsValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go index 2ed874ab25..f8a0f1b266 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go @@ -1,13 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto package http_connection_managerv3 import ( _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v31 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -16,7 +17,7 @@ import ( v34 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -337,7 +338,7 @@ func (HttpConnectionManager_Tracing_OperationName) EnumDescriptor() ([]byte, []i return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 0, 0} } -// [#next-free-field: 51] +// [#next-free-field: 57] type HttpConnectionManager struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -381,7 +382,7 @@ type HttpConnectionManager struct { // [#not-implemented-hide:] Http3ProtocolOptions *v3.Http3ProtocolOptions `protobuf:"bytes,44,opt,name=http3_protocol_options,json=http3ProtocolOptions,proto3" json:"http3_protocol_options,omitempty"` // An optional override that the connection manager will write to the server - // header in responses. If not set, the default is *envoy*. + // header in responses. If not set, the default is ``envoy``. ServerName string `protobuf:"bytes,10,opt,name=server_name,json=serverName,proto3" json:"server_name,omitempty"` // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in @@ -488,6 +489,28 @@ type HttpConnectionManager struct { // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. AccessLog []*v31.AccessLog `protobuf:"bytes,13,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` + // .. attention:: + // This field is deprecated in favor of + // :ref:`access_log_flush_interval + // `. + // Note that if both this field and :ref:`access_log_flush_interval + // ` + // are specified, the former (deprecated field) is ignored. + // + // Deprecated: Do not use. + AccessLogFlushInterval *duration.Duration `protobuf:"bytes,54,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + // .. attention:: + // This field is deprecated in favor of + // :ref:`flush_access_log_on_new_request + // `. + // Note that if both this field and :ref:`flush_access_log_on_new_request + // ` + // are specified, the former (deprecated field) is ignored. + // + // Deprecated: Do not use. + FlushAccessLogOnNewRequest bool `protobuf:"varint,55,opt,name=flush_access_log_on_new_request,json=flushAccessLogOnNewRequest,proto3" json:"flush_access_log_on_new_request,omitempty"` + // Additional access log options for HTTP connection manager. + AccessLogOptions *HttpConnectionManager_HcmAccessLogOptions `protobuf:"bytes,56,opt,name=access_log_options,json=accessLogOptions,proto3" json:"access_log_options,omitempty"` // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating // various headers. If set to false or absent, the connection manager will use the @@ -520,6 +543,14 @@ type HttpConnectionManager struct { // // [#extension-category: envoy.http.original_ip_detection] OriginalIpDetectionExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,46,rep,name=original_ip_detection_extensions,json=originalIpDetectionExtensions,proto3" json:"original_ip_detection_extensions,omitempty"` + // The configuration for the early header mutation extensions. + // + // When configured the extensions will be called before any routing, tracing, or any filter processing. + // Each extension will be applied in the order they are configured. + // If the same header is mutated by multiple extensions, then the last extension will win. + // + // [#extension-category: envoy.http.early_header_mutation] + EarlyHeaderMutationExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,52,rep,name=early_header_mutation_extensions,json=earlyHeaderMutationExtensions,proto3" json:"early_header_mutation_extensions,omitempty"` // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more @@ -531,7 +562,7 @@ type HttpConnectionManager struct { // has mutated the request headers. While :ref:`use_remote_address // ` // will also suppress XFF addition, it has consequences for logging and other - // Envoy uses of the remote address, so *skip_xff_append* should be used + // Envoy uses of the remote address, so ``skip_xff_append`` should be used // when only an elision of XFF addition is intended. SkipXffAppend bool `protobuf:"varint,21,opt,name=skip_xff_append,json=skipXffAppend,proto3" json:"skip_xff_append,omitempty"` // Via header value to append to request and response headers. If this is @@ -558,8 +589,8 @@ type HttpConnectionManager struct { // ` // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in // the client certificate to be forwarded. Note that in the - // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - // *By* is always set when the client certificate presents the URI type Subject Alternative Name + // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, ``Hash`` is always set, and + // ``By`` is always set when the client certificate presents the URI type Subject Alternative Name // value. SetCurrentClientCertDetails *HttpConnectionManager_SetCurrentClientCertDetails `protobuf:"bytes,17,opt,name=set_current_client_cert_details,json=setCurrentClientCertDetails,proto3" json:"set_current_client_cert_details,omitempty"` // If proxy_100_continue is true, Envoy will proxy incoming "Expect: @@ -571,7 +602,7 @@ type HttpConnectionManager struct { // :ref:`use_remote_address // ` // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. + // an IPv4 address, the address will be mapped to IPv6 before it is appended to ``x-forwarded-for``. // This is useful for testing compatibility of upstream services that parse the header value. For // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses // `_ for details. This will also affect the @@ -583,7 +614,7 @@ type HttpConnectionManager struct { RepresentIpv4RemoteAddressAsIpv4MappedIpv6 bool `protobuf:"varint,20,opt,name=represent_ipv4_remote_address_as_ipv4_mapped_ipv6,json=representIpv4RemoteAddressAsIpv4MappedIpv6,proto3" json:"represent_ipv4_remote_address_as_ipv4_mapped_ipv6,omitempty"` UpgradeConfigs []*HttpConnectionManager_UpgradeConfig `protobuf:"bytes,23,rep,name=upgrade_configs,json=upgradeConfigs,proto3" json:"upgrade_configs,omitempty"` // Should paths be normalized according to RFC 3986 before any processing of - // requests by HTTP filters or routing? This affects the upstream *:path* header + // requests by HTTP filters or routing? This affects the upstream ``:path`` header // as well. For paths that fail this check, Envoy will respond with 400 to // paths that are malformed. This defaults to false currently but will default // true in the future. When not specified, this value may be overridden by the @@ -598,9 +629,9 @@ type HttpConnectionManager struct { // is present.] NormalizePath *wrappers.BoolValue `protobuf:"bytes,30,opt,name=normalize_path,json=normalizePath,proto3" json:"normalize_path,omitempty"` // Determines if adjacent slashes in the path are merged into one before any processing of - // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - // setting this option, incoming requests with path `//dir///file` will not match against route - // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of + // requests by HTTP filters or routing. This affects the upstream ``:path`` header as well. Without + // setting this option, incoming requests with path ``//dir///file`` will not match against route + // with ``prefix`` match set to ``/dir``. Defaults to ``false``. Note that slash merging is not part of // `HTTP spec `_ and is provided for convenience. // [#comment:TODO: This field is ignored when the // :ref:`header validation configuration ` @@ -641,10 +672,10 @@ type HttpConnectionManager struct { // local port. This affects the upstream host header unless the method is // CONNECT in which case if no filter adds a port the original port will be restored before headers are // sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // Without setting this option, incoming requests with host ``example:443`` will not match against + // route with :ref:`domains` match set to ``example``. Defaults to ``false``. Note that port removal is not part // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. + // Only one of ``strip_matching_host_port`` or ``strip_any_host_port`` can be set. StripMatchingHostPort bool `protobuf:"varint,39,opt,name=strip_matching_host_port,json=stripMatchingHostPort,proto3" json:"strip_matching_host_port,omitempty"` // Types that are assignable to StripPortMode: // *HttpConnectionManager_StripAnyHostPort @@ -663,13 +694,13 @@ type HttpConnectionManager struct { // ` or the new HTTP/2 option // :ref:`override_stream_error_on_invalid_http_message // ` - // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ``not`` the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging // ` StreamErrorOnInvalidHttpMessage *wrappers.BoolValue `protobuf:"bytes,40,opt,name=stream_error_on_invalid_http_message,json=streamErrorOnInvalidHttpMessage,proto3" json:"stream_error_on_invalid_http_message,omitempty"` // [#not-implemented-hide:] Path normalization configuration. This includes // configurations for transformations (e.g. RFC 3986 normalization or merge // adjacent slashes) and the policy to apply them. The policy determines - // whether transformations affect the forwarded *:path* header. RFC 3986 path + // whether transformations affect the forwarded ``:path`` header. RFC 3986 path // normalization is enabled by default and the default policy is that the // normalized header will be forwarded. See :ref:`PathNormalizationOptions // ` @@ -678,11 +709,11 @@ type HttpConnectionManager struct { // Determines if trailing dot of the host should be removed from host/authority header before any // processing of request by HTTP filters or routing. // This affects the upstream host header. - // Without setting this option, incoming requests with host `example.com.` will not match against - // route with :ref:`domains` match set to `example.com`. Defaults to `false`. + // Without setting this option, incoming requests with host ``example.com.`` will not match against + // route with :ref:`domains` match set to ``example.com``. Defaults to ``false``. // When the incoming request contains a host/authority header that includes a port number, // setting this option will strip a trailing dot, if present, from the host section, - // leaving the port as is (e.g. host value `example.com.:443` will be updated to `example.com:443`). + // leaving the port as is (e.g. host value ``example.com.:443`` will be updated to ``example.com:443``). StripTrailingHostDot bool `protobuf:"varint,47,opt,name=strip_trailing_host_dot,json=stripTrailingHostDot,proto3" json:"strip_trailing_host_dot,omitempty"` // Proxy-Status HTTP response header configuration. // If this config is set, the Proxy-Status HTTP response header field is @@ -693,7 +724,7 @@ type HttpConnectionManager struct { // normalization for request attributes, such as URI path. // If the typed_header_validation_config is present it overrides the following options: // ``normalize_path``, ``merge_slashes``, ``path_with_escaped_slashes_action`` - // ``http_protocol_options.allow_chunked_length``. + // ``http_protocol_options.allow_chunked_length``, ``common_http_protocol_options.headers_with_underscores_action``. // // The default UHV checks the following: // @@ -711,6 +742,13 @@ type HttpConnectionManager struct { // [#not-implemented-hide:] // [#extension-category: envoy.http.header_validators] TypedHeaderValidationConfig *v3.TypedExtensionConfig `protobuf:"bytes,50,opt,name=typed_header_validation_config,json=typedHeaderValidationConfig,proto3" json:"typed_header_validation_config,omitempty"` + // Append the `x-forwarded-port` header with the port value client used to connect to Envoy. It + // will be ignored if the `x-forwarded-port` header has been set by any trusted proxy in front of Envoy. + AppendXForwardedPort bool `protobuf:"varint,51,opt,name=append_x_forwarded_port,json=appendXForwardedPort,proto3" json:"append_x_forwarded_port,omitempty"` + // Whether the HCM will add ProxyProtocolFilterState to the Connection lifetime filter state. Defaults to `true`. + // This should be set to `false` in cases where Envoy's view of the downstream address may not correspond to the + // actual client address, for example, if there's another proxy in front of the Envoy. + AddProxyProtocolConnectionState *wrappers.BoolValue `protobuf:"bytes,53,opt,name=add_proxy_protocol_connection_state,json=addProxyProtocolConnectionState,proto3" json:"add_proxy_protocol_connection_state,omitempty"` } func (x *HttpConnectionManager) Reset() { @@ -906,6 +944,29 @@ func (x *HttpConnectionManager) GetAccessLog() []*v31.AccessLog { return nil } +// Deprecated: Do not use. +func (x *HttpConnectionManager) GetAccessLogFlushInterval() *duration.Duration { + if x != nil { + return x.AccessLogFlushInterval + } + return nil +} + +// Deprecated: Do not use. +func (x *HttpConnectionManager) GetFlushAccessLogOnNewRequest() bool { + if x != nil { + return x.FlushAccessLogOnNewRequest + } + return false +} + +func (x *HttpConnectionManager) GetAccessLogOptions() *HttpConnectionManager_HcmAccessLogOptions { + if x != nil { + return x.AccessLogOptions + } + return nil +} + func (x *HttpConnectionManager) GetUseRemoteAddress() *wrappers.BoolValue { if x != nil { return x.UseRemoteAddress @@ -927,6 +988,13 @@ func (x *HttpConnectionManager) GetOriginalIpDetectionExtensions() []*v3.TypedEx return nil } +func (x *HttpConnectionManager) GetEarlyHeaderMutationExtensions() []*v3.TypedExtensionConfig { + if x != nil { + return x.EarlyHeaderMutationExtensions + } + return nil +} + func (x *HttpConnectionManager) GetInternalAddressConfig() *HttpConnectionManager_InternalAddressConfig { if x != nil { return x.InternalAddressConfig @@ -1095,6 +1163,20 @@ func (x *HttpConnectionManager) GetTypedHeaderValidationConfig() *v3.TypedExtens return nil } +func (x *HttpConnectionManager) GetAppendXForwardedPort() bool { + if x != nil { + return x.AppendXForwardedPort + } + return false +} + +func (x *HttpConnectionManager) GetAddProxyProtocolConnectionState() *wrappers.BoolValue { + if x != nil { + return x.AddProxyProtocolConnectionState + } + return nil +} + type isHttpConnectionManager_RouteSpecifier interface { isHttpConnectionManager_RouteSpecifier() } @@ -1131,10 +1213,10 @@ type HttpConnectionManager_StripAnyHostPort struct { // of request by HTTP filters or routing. // This affects the upstream host header unless the method is CONNECT in // which case if no filter adds a port the original port will be restored before headers are sent upstream. - // Without setting this option, incoming requests with host `example:443` will not match against - // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part + // Without setting this option, incoming requests with host ``example:443`` will not match against + // route with :ref:`domains` match set to ``example``. Defaults to ``false``. Note that port removal is not part // of `HTTP spec `_ and is provided for convenience. - // Only one of `strip_matching_host_port` or `strip_any_host_port` can be set. + // Only one of ``strip_matching_host_port`` or ``strip_any_host_port`` can be set. StripAnyHostPort bool `protobuf:"varint,42,opt,name=strip_any_host_port,json=stripAnyHostPort,proto3,oneof"` } @@ -1247,10 +1329,10 @@ type ResponseMapper struct { Filter *v31.AccessLogFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` // The new response status code if specified. StatusCode *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` - // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` - // command operator in the `body_format`. + // The new local reply body text if specified. It will be used in the ``%LOCAL_REPLY_BODY%`` + // command operator in the ``body_format``. Body *v3.DataSource `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` - // A per mapper `body_format` to override the :ref:`body_format `. + // A per mapper ``body_format`` to override the :ref:`body_format `. // It will be used when this mapper is matched. BodyFormatOverride *v3.SubstitutionFormatString `protobuf:"bytes,4,opt,name=body_format_override,json=bodyFormatOverride,proto3" json:"body_format_override,omitempty"` // HTTP headers to add to a local reply. This allows the response mapper to append, to add @@ -1676,7 +1758,7 @@ func (m *HttpFilter) GetConfigType() isHttpFilter_ConfigType { return nil } -func (x *HttpFilter) GetTypedConfig() *any.Any { +func (x *HttpFilter) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*HttpFilter_TypedConfig); ok { return x.TypedConfig } @@ -1709,7 +1791,7 @@ type HttpFilter_TypedConfig struct { // :ref:`ExtensionWithMatcher ` // with the desired HTTP filter. // [#extension-category: envoy.filters.http] - TypedConfig *any.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` } type HttpFilter_ConfigDiscovery struct { @@ -1734,7 +1816,7 @@ type RequestIDExtension struct { unknownFields protoimpl.UnknownFields // Request ID extension specific configuration. - TypedConfig *any.Any `protobuf:"bytes,1,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + TypedConfig *any1.Any `protobuf:"bytes,1,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` } func (x *RequestIDExtension) Reset() { @@ -1769,7 +1851,7 @@ func (*RequestIDExtension) Descriptor() ([]byte, []int) { return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{8} } -func (x *RequestIDExtension) GetTypedConfig() *any.Any { +func (x *RequestIDExtension) GetTypedConfig() *any1.Any { if x != nil { return x.TypedConfig } @@ -1837,7 +1919,7 @@ type HttpConnectionManager_Tracing struct { // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // 'tracing.client_enabled' in the :ref:`HTTP Connection Manager // `. // Default: 100% ClientSampling *v33.Percent `protobuf:"bytes,3,opt,name=client_sampling,json=clientSampling,proto3" json:"client_sampling,omitempty"` @@ -1869,7 +1951,7 @@ type HttpConnectionManager_Tracing struct { // If not specified, no tracing will be performed. // // .. attention:: - // Please be aware that *envoy.tracers.opencensus* provider can only be configured once + // Please be aware that ``envoy.tracers.opencensus`` provider can only be configured once // in Envoy lifetime. // Any attempts to reconfigure it or to use different configurations for different HCM filters // will be rejected. @@ -2200,17 +2282,17 @@ func (x *HttpConnectionManager_UpgradeConfig) GetEnabled() *wrappers.BoolValue { // path will be visible internally if a transformation is enabled. Any path rewrites that the // router performs (e.g. :ref:`regex_rewrite // ` or :ref:`prefix_rewrite -// `) will apply to the *:path* header +// `) will apply to the ``:path`` header // destined for the upstream. // -// Note: access logging and tracing will show the original *:path* header. +// Note: access logging and tracing will show the original ``:path`` header. type HttpConnectionManager_PathNormalizationOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // [#not-implemented-hide:] Normalization applies internally before any processing of requests by - // HTTP filters, routing, and matching *and* will affect the forwarded *:path* header. Defaults + // HTTP filters, routing, and matching *and* will affect the forwarded ``:path`` header. Defaults // to :ref:`NormalizePathRFC3986 // `. When not // specified, this value may be overridden by the runtime variable @@ -2220,7 +2302,7 @@ type HttpConnectionManager_PathNormalizationOptions struct { ForwardingTransformation *v36.PathTransformation `protobuf:"bytes,1,opt,name=forwarding_transformation,json=forwardingTransformation,proto3" json:"forwarding_transformation,omitempty"` // [#not-implemented-hide:] Normalization only applies internally before any processing of // requests by HTTP filters, routing, and matching. These will be applied after full - // transformation is applied. The *:path* header before this transformation will be restored in + // transformation is applied. The ``:path`` header before this transformation will be restored in // the router filter and sent upstream unless it was mutated by a filter. Defaults to no // transformations. // Multiple actions can be applied in the same Transformation, forming a sequential @@ -2293,25 +2375,25 @@ type HttpConnectionManager_ProxyStatusConfig struct { unknownFields protoimpl.UnknownFields // If true, the details field of the Proxy-Status header is not populated with stream_info.response_code_details. - // This value defaults to `false`, i.e. the `details` field is populated by default. + // This value defaults to ``false``, i.e. the ``details`` field is populated by default. RemoveDetails bool `protobuf:"varint,1,opt,name=remove_details,json=removeDetails,proto3" json:"remove_details,omitempty"` // If true, the details field of the Proxy-Status header will not contain - // connection termination details. This value defaults to `false`, i.e. the - // `details` field will contain connection termination details by default. + // connection termination details. This value defaults to ``false``, i.e. the + // ``details`` field will contain connection termination details by default. RemoveConnectionTerminationDetails bool `protobuf:"varint,2,opt,name=remove_connection_termination_details,json=removeConnectionTerminationDetails,proto3" json:"remove_connection_termination_details,omitempty"` // If true, the details field of the Proxy-Status header will not contain an - // enumeration of the Envoy ResponseFlags. This value defaults to `false`, - // i.e. the `details` field will contain a list of ResponseFlags by default. + // enumeration of the Envoy ResponseFlags. This value defaults to ``false``, + // i.e. the ``details`` field will contain a list of ResponseFlags by default. RemoveResponseFlags bool `protobuf:"varint,3,opt,name=remove_response_flags,json=removeResponseFlags,proto3" json:"remove_response_flags,omitempty"` // If true, overwrites the existing Status header with the response code // recommended by the Proxy-Status spec. - // This value defaults to `false`, i.e. the HTTP response code is not + // This value defaults to ``false``, i.e. the HTTP response code is not // overwritten. SetRecommendedResponseCode bool `protobuf:"varint,4,opt,name=set_recommended_response_code,json=setRecommendedResponseCode,proto3" json:"set_recommended_response_code,omitempty"` // The name of the proxy as it appears at the start of the Proxy-Status // header. // - // If neither of these values are set, this value defaults to `server_name`, + // If neither of these values are set, this value defaults to ``server_name``, // which itself defaults to "envoy". // // Types that are assignable to ProxyName: @@ -2406,13 +2488,13 @@ type isHttpConnectionManager_ProxyStatusConfig_ProxyName interface { } type HttpConnectionManager_ProxyStatusConfig_UseNodeId struct { - // If `use_node_id` is set, Proxy-Status headers will use the Envoy's node + // If ``use_node_id`` is set, Proxy-Status headers will use the Envoy's node // ID as the name of the proxy. UseNodeId bool `protobuf:"varint,5,opt,name=use_node_id,json=useNodeId,proto3,oneof"` } type HttpConnectionManager_ProxyStatusConfig_LiteralProxyName struct { - // If `literal_proxy_name` is set, Proxy-Status headers will use this + // If ``literal_proxy_name`` is set, Proxy-Status headers will use this // value as the name of the proxy. LiteralProxyName string `protobuf:"bytes,6,opt,name=literal_proxy_name,json=literalProxyName,proto3,oneof"` } @@ -2423,6 +2505,83 @@ func (*HttpConnectionManager_ProxyStatusConfig_UseNodeId) isHttpConnectionManage func (*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) isHttpConnectionManager_ProxyStatusConfig_ProxyName() { } +type HttpConnectionManager_HcmAccessLogOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The interval to flush the above access logs. By default, the HCM will flush exactly one access log + // on stream close, when the HTTP request is complete. If this field is set, the HCM will flush access + // logs periodically at the specified interval. This is especially useful in the case of long-lived + // requests, such as CONNECT and Websockets. Final access logs can be detected via the + // `requestComplete()` method of `StreamInfo` in access log filters, or thru the `%DURATION%` substitution + // string. + // The interval must be at least 1 millisecond. + AccessLogFlushInterval *duration.Duration `protobuf:"bytes,1,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + // If set to true, HCM will flush an access log when a new HTTP request is received, after request + // headers have been evaluated, before iterating through the HTTP filter chain. + // This log record, if enabled, does not depend on periodic log records or request completion log. + // Details related to upstream cluster, such as upstream host, will not be available for this log. + FlushAccessLogOnNewRequest bool `protobuf:"varint,2,opt,name=flush_access_log_on_new_request,json=flushAccessLogOnNewRequest,proto3" json:"flush_access_log_on_new_request,omitempty"` + // If true, the HCM will flush an access log when a tunnel is successfully established. For example, + // this could be when an upstream has successfully returned 101 Switching Protocols, or when the proxy + // has returned 200 to a CONNECT request. + FlushLogOnTunnelSuccessfullyEstablished bool `protobuf:"varint,3,opt,name=flush_log_on_tunnel_successfully_established,json=flushLogOnTunnelSuccessfullyEstablished,proto3" json:"flush_log_on_tunnel_successfully_established,omitempty"` +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) Reset() { + *x = HttpConnectionManager_HcmAccessLogOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_HcmAccessLogOptions) ProtoMessage() {} + +func (x *HttpConnectionManager_HcmAccessLogOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_HcmAccessLogOptions.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_HcmAccessLogOptions) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetAccessLogFlushInterval() *duration.Duration { + if x != nil { + return x.AccessLogFlushInterval + } + return nil +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetFlushAccessLogOnNewRequest() bool { + if x != nil { + return x.FlushAccessLogOnNewRequest + } + return false +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetFlushLogOnTunnelSuccessfullyEstablished() bool { + if x != nil { + return x.FlushLogOnTunnelSuccessfullyEstablished + } + return false +} + // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These // keys are matched against a set of :ref:`Key` // objects assembled from :ref:`ScopedRouteConfiguration` @@ -2446,7 +2605,7 @@ type ScopedRoutes_ScopeKeyBuilder struct { func (x *ScopedRoutes_ScopeKeyBuilder) Reset() { *x = ScopedRoutes_ScopeKeyBuilder{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16] + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2459,7 +2618,7 @@ func (x *ScopedRoutes_ScopeKeyBuilder) String() string { func (*ScopedRoutes_ScopeKeyBuilder) ProtoMessage() {} func (x *ScopedRoutes_ScopeKeyBuilder) ProtoReflect() protoreflect.Message { - mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16] + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2496,7 +2655,7 @@ type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder struct { func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) Reset() { *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17] + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2509,7 +2668,7 @@ func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) String() string { func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) ProtoMessage() {} func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) ProtoReflect() protoreflect.Message { - mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17] + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2593,7 +2752,7 @@ type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor struct { func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) Reset() { *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18] + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2606,7 +2765,7 @@ func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) Stri func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) ProtoMessage() {} func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) ProtoReflect() protoreflect.Message { - mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18] + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2698,7 +2857,7 @@ type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) Reset() { *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement{} if protoimpl.UnsafeEnabled { - mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19] + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2711,7 +2870,7 @@ func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvEle func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) ProtoMessage() {} func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) ProtoReflect() protoreflect.Message { - mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19] + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2791,733 +2950,792 @@ var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connec 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, - 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc4, 0x39, 0x0a, 0x15, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x85, - 0x01, 0x0a, 0x0a, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd2, 0x40, 0x0a, 0x15, 0x48, 0x74, + 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x12, 0x85, 0x01, 0x0a, 0x0a, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, + 0x65, 0x63, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x09, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x73, + 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x54, 0x0a, 0x03, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x64, 0x73, 0x48, 0x00, 0x52, 0x03, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0c, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0b, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x70, 0x0a, 0x0d, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x1f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, + 0x0c, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x6a, 0x0a, + 0x0c, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, - 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x54, 0x79, 0x70, - 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x63, 0x6f, 0x64, - 0x65, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x12, 0x54, 0x0a, 0x03, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x64, 0x73, 0x48, - 0x00, 0x52, 0x03, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x70, 0x0a, 0x0d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, - 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x6a, 0x0a, 0x0c, 0x68, 0x74, 0x74, 0x70, - 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, - 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x5f, 0x75, 0x73, 0x65, 0x72, - 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, - 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x61, 0x64, 0x64, 0x55, 0x73, 0x65, - 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x74, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, - 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0b, 0x68, 0x74, + 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0e, 0x61, 0x64, 0x64, + 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x61, + 0x64, 0x64, 0x55, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x74, 0x0a, 0x07, 0x74, + 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, + 0x67, 0x12, 0x73, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x19, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a, 0x15, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x13, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x69, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x14, 0x68, 0x74, 0x74, + 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x33, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, + 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, + 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0xb9, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, + 0x1c, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x30, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x5d, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6b, 0x62, 0x18, 0x1d, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0x80, 0x40, 0x20, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x78, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4b, 0x62, + 0x12, 0x52, 0x0a, 0x13, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, + 0x01, 0x52, 0x11, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, + 0x01, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x62, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x29, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, + 0x42, 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x15, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4d, 0x0a, 0x15, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, + 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x1a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x13, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, + 0x6f, 0x67, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x6d, 0x0a, 0x19, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0x18, 0x01, 0xfa, 0x42, 0x09, 0xaa, 0x01, + 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x52, 0x16, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x50, 0x0a, 0x1f, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, + 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x37, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x1a, + 0x66, 0x6c, 0x75, 0x73, 0x68, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, + 0x4e, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x12, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x38, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x66, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x63, - 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x73, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x23, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, 0x8a, - 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x48, 0x74, - 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5e, 0x0a, 0x15, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x68, 0x74, - 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x69, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, 0x8a, - 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, - 0x68, 0x74, 0x74, 0x70, 0x33, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x33, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, - 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0xb9, 0x01, 0x0a, - 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x22, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x6d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, - 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x1a, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, 0x1c, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x30, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x1a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x16, - 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x5f, 0x6b, 0x62, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, - 0x05, 0x18, 0x80, 0x40, 0x20, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4b, 0x62, 0x12, 0x52, 0x0a, 0x13, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x11, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, - 0x4b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x62, 0x0a, 0x17, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x48, 0x63, 0x6d, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x10, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x51, 0x0a, 0x12, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, - 0x32, 0x00, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x12, 0x3e, 0x0a, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x12, 0x4d, 0x0a, 0x15, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x6f, 0x73, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x61, - 0x79, 0x65, 0x64, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, - 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x51, 0x0a, 0x12, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, - 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x10, 0x75, 0x73, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x78, 0x66, 0x66, 0x5f, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x70, 0x73, 0x18, - 0x13, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x78, 0x66, 0x66, 0x4e, 0x75, 0x6d, 0x54, 0x72, 0x75, - 0x73, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x70, 0x73, 0x12, 0x73, 0x0a, 0x20, 0x6f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x70, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2e, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x49, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa0, 0x01, - 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x68, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, - 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x78, 0x66, 0x66, 0x5f, 0x61, 0x70, 0x70, - 0x65, 0x6e, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x58, - 0x66, 0x66, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x1d, 0x0a, 0x03, 0x76, 0x69, 0x61, 0x18, - 0x16, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, - 0x01, 0x00, 0x52, 0x03, 0x76, 0x69, 0x61, 0x12, 0x4a, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x12, 0x47, 0x0a, 0x21, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x73, - 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x69, 0x6e, - 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x1c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xb4, 0x01, - 0x0a, 0x1b, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x10, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, - 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x66, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x1f, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x6e, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, - 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x1b, - 0x73, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x31, 0x30, 0x30, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, - 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x31, 0x30, - 0x30, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x65, 0x0a, 0x31, 0x72, 0x65, 0x70, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x70, - 0x76, 0x34, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x76, 0x36, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x49, - 0x70, 0x76, 0x34, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x41, 0x73, 0x49, 0x70, 0x76, 0x34, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x49, 0x70, 0x76, 0x36, - 0x12, 0x89, 0x01, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, - 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x0e, - 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x1e, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0d, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, - 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x6c, 0x61, - 0x73, 0x68, 0x65, 0x73, 0x12, 0xb7, 0x01, 0x0a, 0x20, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x77, 0x69, - 0x74, 0x68, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, - 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, - 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, - 0x70, 0x65, 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x1c, 0x70, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, - 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x81, - 0x01, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x12, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x70, 0x6c, - 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4d, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x50, 0x0a, 0x18, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, - 0x67, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x27, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, - 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x15, 0x73, 0x74, 0x72, 0x69, - 0x70, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x6f, 0x72, - 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x61, 0x6e, 0x79, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, - 0x52, 0x10, 0x73, 0x74, 0x72, 0x69, 0x70, 0x41, 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x6f, - 0x72, 0x74, 0x12, 0x69, 0x0a, 0x24, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, - 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0xa9, 0x01, - 0x0a, 0x1a, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, - 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x18, 0x70, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x74, 0x72, - 0x69, 0x70, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x6f, 0x73, 0x74, - 0x5f, 0x64, 0x6f, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x74, 0x72, 0x69, - 0x70, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x44, 0x6f, 0x74, - 0x12, 0x94, 0x01, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x64, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, - 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x74, 0x79, 0x70, 0x65, 0x64, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, + 0x08, 0x01, 0x52, 0x10, 0x75, 0x73, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x78, 0x66, 0x66, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x70, 0x73, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x11, 0x78, 0x66, 0x66, 0x4e, 0x75, 0x6d, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, + 0x64, 0x48, 0x6f, 0x70, 0x73, 0x12, 0x73, 0x0a, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x70, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf6, 0x04, 0x0a, 0x07, 0x54, 0x72, 0x61, - 0x63, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, - 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x5f, - 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x61, - 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x10, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, - 0x6c, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, - 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x62, 0x6f, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, 0x62, - 0x6f, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, - 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, - 0x6d, 0x61, 0x78, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x67, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, - 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, - 0x61, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, - 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x3a, 0x5b, - 0x9a, 0xc5, 0x88, 0x1e, 0x56, 0x0a, 0x54, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, - 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, - 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x74, 0x61, 0x67, - 0x73, 0x1a, 0xe7, 0x01, 0x0a, 0x15, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x75, - 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x40, - 0x0a, 0x0b, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x63, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x3a, 0x69, 0x9a, 0xc5, 0x88, 0x1e, 0x64, 0x0a, 0x62, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x6f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x49, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x73, 0x0a, 0x20, 0x65, 0x61, + 0x72, 0x6c, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x34, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x1d, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0xa0, 0x01, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x68, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x98, 0x02, 0x0a, 0x1b, - 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x73, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, - 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x04, 0x63, 0x65, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x64, - 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x10, 0x0a, - 0x03, 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x75, 0x72, 0x69, 0x3a, - 0x6f, 0x9a, 0xc5, 0x88, 0x1e, 0x6a, 0x0a, 0x68, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x78, 0x66, 0x66, 0x5f, 0x61, + 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, + 0x70, 0x58, 0x66, 0x66, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x1d, 0x0a, 0x03, 0x76, 0x69, + 0x61, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, + 0x02, 0xc8, 0x01, 0x00, 0x52, 0x03, 0x76, 0x69, 0x61, 0x12, 0x4a, 0x0a, 0x13, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x47, 0x0a, 0x21, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, + 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, + 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x64, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0xb4, 0x01, 0x0a, 0x1b, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x66, 0x6f, + 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x1f, 0x73, 0x65, 0x74, 0x5f, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x6e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0xae, 0x02, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, - 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, - 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x61, 0x0a, 0x07, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, + 0x52, 0x1b, 0x73, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x31, 0x30, 0x30, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x69, + 0x6e, 0x75, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x31, 0x30, 0x30, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x65, 0x0a, 0x31, 0x72, + 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x73, 0x5f, + 0x69, 0x70, 0x76, 0x34, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x76, 0x36, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, + 0x74, 0x49, 0x70, 0x76, 0x34, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x41, 0x73, 0x49, 0x70, 0x76, 0x34, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x49, 0x70, + 0x76, 0x36, 0x12, 0x89, 0x01, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x34, - 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x3a, 0x61, 0x9a, 0xc5, 0x88, 0x1e, 0x5c, 0x0a, 0x5a, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe5, 0x01, 0x0a, 0x18, 0x50, 0x61, 0x74, 0x68, - 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x63, 0x0a, 0x19, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, - 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, - 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x18, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x1a, 0x68, 0x74, 0x74, - 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, - 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, - 0xe4, 0x02, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x51, 0x0a, 0x25, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x22, 0x72, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, - 0x32, 0x0a, 0x15, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, - 0x61, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, - 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x73, 0x65, 0x74, 0x52, - 0x65, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x6e, 0x6f, - 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x75, - 0x73, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x6c, 0x69, 0x74, 0x65, - 0x72, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x36, 0x0a, 0x09, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x48, 0x54, 0x54, 0x50, 0x31, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, - 0x32, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x33, 0x10, 0x03, 0x22, 0x53, - 0x0a, 0x1a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x0a, 0x09, - 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, - 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, - 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, - 0x48, 0x10, 0x02, 0x22, 0x79, 0x0a, 0x18, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x41, 0x4e, 0x49, 0x54, 0x49, 0x5a, 0x45, 0x10, 0x00, 0x12, 0x10, 0x0a, - 0x0c, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, - 0x44, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x41, 0x4e, 0x49, 0x54, 0x49, 0x5a, 0x45, 0x5f, - 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, - 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x22, 0xa0, - 0x01, 0x0a, 0x1c, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, - 0x65, 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x23, 0x0a, 0x1f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x43, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, - 0x4c, 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x4b, 0x45, 0x45, 0x50, 0x5f, 0x55, 0x4e, 0x43, - 0x48, 0x41, 0x4e, 0x47, 0x45, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, - 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, - 0x55, 0x4e, 0x45, 0x53, 0x43, 0x41, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x44, - 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x4e, 0x45, 0x53, 0x43, - 0x41, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, - 0x04, 0x3a, 0x53, 0x9a, 0xc5, 0x88, 0x1e, 0x4e, 0x0a, 0x4c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x11, - 0x0a, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, - 0x65, 0x4a, 0x04, 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x0c, 0x69, - 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xca, 0x01, 0x0a, 0x10, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x4b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, + 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x41, + 0x0a, 0x0e, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0d, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, + 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0xb7, 0x01, 0x0a, 0x20, 0x70, 0x61, 0x74, 0x68, 0x5f, + 0x77, 0x69, 0x74, 0x68, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x6c, 0x61, + 0x73, 0x68, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x2d, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, - 0x6d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x12, 0x4f, 0x0a, 0x0b, 0x62, 0x6f, 0x64, 0x79, 0x5f, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x62, 0x6f, - 0x64, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x9c, 0x03, 0x0a, 0x0e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, - 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x0b, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42, - 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x60, 0x0a, 0x14, 0x62, - 0x6f, 0x64, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x12, 0x62, 0x6f, 0x64, 0x79, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x58, 0x0a, - 0x0e, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, - 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x22, 0xc7, 0x01, 0x0a, 0x03, 0x52, 0x64, 0x73, 0x12, - 0x51, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, - 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x41, - 0x9a, 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x64, - 0x73, 0x22, 0xf7, 0x01, 0x0a, 0x1d, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x79, 0x0a, 0x1b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, - 0x02, 0x08, 0x01, 0x52, 0x19, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x5b, - 0x9a, 0xc5, 0x88, 0x1e, 0x56, 0x0a, 0x54, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, + 0x63, 0x61, 0x70, 0x65, 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x1c, 0x70, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, + 0x70, 0x65, 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x81, 0x01, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, - 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xdf, 0x0e, 0x0a, 0x0c, - 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x11, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x59, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, - 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, - 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x11, 0x72, - 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x64, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xa5, 0x01, 0x0a, 0x20, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, - 0x73, 0x74, 0x48, 0x00, 0x52, 0x1d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x67, 0x0a, 0x0a, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x64, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x48, - 0x00, 0x52, 0x09, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x1a, 0xd9, 0x09, 0x0a, - 0x0f, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, - 0x12, 0x91, 0x01, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x69, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, + 0x70, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x50, 0x0a, 0x18, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x27, 0x20, + 0x01, 0x28, 0x08, 0x42, 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x73, 0x74, 0x72, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x15, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x61, 0x6e, 0x79, + 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, + 0x48, 0x01, 0x52, 0x10, 0x73, 0x74, 0x72, 0x69, 0x70, 0x41, 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x69, 0x0a, 0x24, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x28, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0xa9, 0x01, 0x0a, 0x1a, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, - 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, - 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x1a, 0xd5, 0x07, 0x0a, 0x0f, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, - 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0xb6, 0x01, 0x0a, 0x16, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x7e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, - 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x14, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, - 0x72, 0x1a, 0x8f, 0x05, 0x0a, 0x14, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x70, 0x61, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0xa5, 0x01, 0x0a, - 0x07, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x88, - 0x01, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, + 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x18, 0x70, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x73, + 0x74, 0x72, 0x69, 0x70, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x44, + 0x6f, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x64, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x32, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x61, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, + 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, + 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x50, 0x6f, 0x72, + 0x74, 0x12, 0x68, 0x0a, 0x23, 0x61, 0x64, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, 0x61, 0x64, 0x64, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xf6, 0x04, 0x0a, 0x07, + 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x61, 0x6e, 0x64, + 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, + 0x6d, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x10, 0x6f, 0x76, 0x65, + 0x72, 0x61, 0x6c, 0x6c, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, + 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x67, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, + 0x67, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x08, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, + 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, + 0x01, 0x3a, 0x5b, 0x9a, 0xc5, 0x88, 0x1e, 0x56, 0x0a, 0x54, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x4a, 0x04, + 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, + 0x74, 0x61, 0x67, 0x73, 0x1a, 0xe7, 0x01, 0x0a, 0x15, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, + 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x63, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x73, 0x3a, 0x69, 0x9a, 0xc5, 0x88, 0x1e, 0x64, 0x0a, 0x62, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x98, + 0x02, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x34, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x64, 0x6e, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x75, + 0x72, 0x69, 0x3a, 0x6f, 0x9a, 0xc5, 0x88, 0x1e, 0x6a, 0x0a, 0x68, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0xae, 0x02, 0x0a, 0x0d, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x75, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x61, + 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, - 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, - 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, - 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x4b, 0x76, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x07, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0xdb, 0x01, 0x0a, 0x09, 0x4b, 0x76, 0x45, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x09, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, - 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x8b, 0x01, 0x9a, 0xc5, 0x88, 0x1e, 0x85, 0x01, 0x0a, 0x82, 0x01, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3a, 0x61, 0x9a, 0xc5, 0x88, 0x1e, 0x5c, 0x0a, 0x5a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, - 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4b, 0x76, 0x45, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x3a, 0x7f, 0x9a, 0xc5, 0x88, 0x1e, 0x7a, 0x0a, 0x78, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, - 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, - 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x6f, 0x72, 0x42, 0x0e, 0x0a, 0x0c, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x3a, 0x6a, 0x9a, 0xc5, 0x88, 0x1e, 0x65, 0x0a, 0x63, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe5, 0x01, 0x0a, 0x18, 0x50, + 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x63, 0x0a, 0x19, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x18, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x1a, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, + 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0xe4, 0x02, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, + 0x51, 0x0a, 0x25, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x22, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x65, + 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x73, + 0x65, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x73, 0x65, + 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, + 0x52, 0x09, 0x75, 0x73, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x6c, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x9d, 0x02, 0x0a, 0x13, 0x48, 0x63, + 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, + 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x1f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x65, 0x77, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, + 0x66, 0x6c, 0x75, 0x73, 0x68, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, + 0x4e, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5d, 0x0a, 0x2c, 0x66, 0x6c, + 0x75, 0x73, 0x68, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x74, 0x75, 0x6e, 0x6e, 0x65, + 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x5f, 0x65, + 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x27, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x54, 0x75, 0x6e, 0x6e, + 0x65, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x45, 0x73, + 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x22, 0x36, 0x0a, 0x09, 0x43, 0x6f, 0x64, + 0x65, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x31, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, + 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x33, 0x10, + 0x03, 0x22, 0x53, 0x0a, 0x1a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0d, 0x0a, 0x09, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x00, 0x12, 0x14, + 0x0a, 0x10, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, + 0x4f, 0x55, 0x47, 0x48, 0x10, 0x02, 0x22, 0x79, 0x0a, 0x18, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x41, 0x4e, 0x49, 0x54, 0x49, 0x5a, 0x45, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x4f, 0x52, + 0x57, 0x41, 0x52, 0x44, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x41, 0x4e, 0x49, 0x54, 0x49, + 0x5a, 0x45, 0x5f, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x57, 0x41, + 0x59, 0x53, 0x5f, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x04, 0x22, 0xa0, 0x01, 0x0a, 0x1c, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, + 0x63, 0x61, 0x70, 0x65, 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x43, 0x5f, 0x44, 0x45, + 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x4b, 0x45, 0x45, 0x50, 0x5f, + 0x55, 0x4e, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x52, + 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, + 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x45, 0x53, 0x43, 0x41, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, + 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x4e, + 0x45, 0x53, 0x43, 0x41, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x57, 0x41, + 0x52, 0x44, 0x10, 0x04, 0x3a, 0x53, 0x9a, 0xc5, 0x88, 0x1e, 0x4e, 0x0a, 0x4c, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x6d, 0x6f, 0x64, 0x65, 0x4a, 0x04, 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, + 0x52, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xca, + 0x01, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, - 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, - 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x42, - 0x0b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x5a, 0x9a, 0xc5, - 0x88, 0x1e, 0x55, 0x0a, 0x53, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, - 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, - 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, 0x0a, - 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, - 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x73, 0x42, 0x17, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf1, 0x01, - 0x0a, 0x09, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x12, 0x65, 0x0a, 0x18, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x15, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x72, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x73, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x3a, 0x47, 0x9a, 0xc5, 0x88, 0x1e, 0x42, 0x0a, - 0x40, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x12, 0x4f, 0x0a, 0x0b, 0x62, 0x6f, + 0x64, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, + 0x0a, 0x62, 0x6f, 0x64, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x9c, 0x03, 0x0a, 0x0e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, + 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x0b, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x0a, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x60, + 0x0a, 0x14, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x6f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x12, 0x62, 0x6f, + 0x64, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, + 0x12, 0x58, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, + 0x64, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x0c, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x22, 0xc7, 0x01, 0x0a, 0x03, 0x52, + 0x64, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4e, 0x61, 0x6d, + 0x65, 0x3a, 0x41, 0x9a, 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x64, 0x73, 0x22, 0xf7, 0x01, 0x0a, 0x1d, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x79, 0x0a, 0x1b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x19, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x3a, 0x5b, 0x9a, 0xc5, 0x88, 0x1e, 0x56, 0x0a, 0x54, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xdf, + 0x0e, 0x0a, 0x0c, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8f, 0x01, 0x0a, + 0x11, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x59, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x4e, + 0x0a, 0x11, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x72, + 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xa5, + 0x01, 0x0a, 0x20, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, - 0x73, 0x22, 0xcc, 0x02, 0x0a, 0x0a, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, - 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, - 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, - 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x3a, 0x48, 0x9a, 0xc5, 0x88, 0x1e, 0x43, 0x0a, 0x41, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, - 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x03, - 0x10, 0x04, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x22, 0x9f, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x3a, 0x50, 0x9a, 0xc5, 0x88, 0x1e, 0x4b, 0x0a, 0x49, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x8e, 0x01, 0x0a, 0x20, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x6f, 0x62, 0x69, - 0x6c, 0x65, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x6a, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x52, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x1d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x67, 0x0a, 0x0a, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, + 0x64, 0x73, 0x48, 0x00, 0x52, 0x09, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x1a, + 0xd9, 0x09, 0x0a, 0x0f, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x12, 0x91, 0x01, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x69, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x42, 0xef, 0x01, 0x0a, 0x49, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, - 0x33, 0x42, 0x1a, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x7c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, - 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2f, 0x76, 0x33, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, - 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x09, 0x66, 0x72, + 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0xd5, 0x07, 0x0a, 0x0f, 0x46, 0x72, 0x61, 0x67, + 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0xb6, 0x01, 0x0a, 0x16, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x65, 0x78, 0x74, + 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x7e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, + 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x14, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x1a, 0x8f, 0x05, 0x0a, 0x14, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1b, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, + 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0xa5, 0x01, 0x0a, 0x07, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x88, 0x01, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, + 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x4b, 0x76, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x07, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0xdb, 0x01, 0x0a, 0x09, 0x4b, 0x76, 0x45, 0x6c, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x09, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x09, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x8b, 0x01, 0x9a, 0xc5, 0x88, 0x1e, 0x85, 0x01, + 0x0a, 0x82, 0x01, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4b, 0x76, 0x45, 0x6c, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0x9a, 0xc5, 0x88, 0x1e, 0x7a, 0x0a, 0x78, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, + 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x0e, 0x0a, 0x0c, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x3a, 0x6a, 0x9a, 0xc5, 0x88, 0x1e, 0x65, 0x0a, 0x63, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x42, 0x0b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, + 0x5a, 0x9a, 0xc5, 0x88, 0x1e, 0x55, 0x0a, 0x53, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, + 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x17, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x22, 0xf1, 0x01, 0x0a, 0x09, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x12, 0x65, + 0x0a, 0x18, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x15, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x72, 0x64, 0x73, 0x5f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x3a, 0x47, 0x9a, 0xc5, 0x88, + 0x1e, 0x42, 0x0a, 0x40, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x64, 0x73, 0x22, 0xcc, 0x02, 0x0a, 0x0a, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, 0x10, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x3a, 0x48, 0x9a, 0xc5, 0x88, 0x1e, 0x43, 0x0a, 0x41, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, + 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x22, 0x9f, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x50, 0x9a, 0xc5, 0x88, 0x1e, 0x4b, 0x0a, 0x49, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8e, 0x01, 0x0a, 0x20, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, + 0x6f, 0x62, 0x69, 0x6c, 0x65, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x6a, 0x0a, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x52, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xef, 0x01, 0x0a, 0x49, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x42, 0x1a, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x7c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x76, 0x33, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3533,7 +3751,7 @@ func file_envoy_extensions_filters_network_http_connection_manager_v3_http_conne } var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 21) var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_goTypes = []interface{}{ (HttpConnectionManager_CodecType)(0), // 0: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.CodecType (HttpConnectionManager_ServerHeaderTransformation)(0), // 1: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ServerHeaderTransformation @@ -3556,109 +3774,115 @@ var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connec (*HttpConnectionManager_UpgradeConfig)(nil), // 18: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig (*HttpConnectionManager_PathNormalizationOptions)(nil), // 19: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions (*HttpConnectionManager_ProxyStatusConfig)(nil), // 20: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ProxyStatusConfig - (*ScopedRoutes_ScopeKeyBuilder)(nil), // 21: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder - (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder)(nil), // 22: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder - (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor)(nil), // 23: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor - (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement)(nil), // 24: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement - (*v32.RouteConfiguration)(nil), // 25: envoy.config.route.v3.RouteConfiguration - (*wrappers.BoolValue)(nil), // 26: google.protobuf.BoolValue - (*v3.HttpProtocolOptions)(nil), // 27: envoy.config.core.v3.HttpProtocolOptions - (*v3.Http1ProtocolOptions)(nil), // 28: envoy.config.core.v3.Http1ProtocolOptions - (*v3.Http2ProtocolOptions)(nil), // 29: envoy.config.core.v3.Http2ProtocolOptions - (*v3.Http3ProtocolOptions)(nil), // 30: envoy.config.core.v3.Http3ProtocolOptions - (*v3.SchemeHeaderTransformation)(nil), // 31: envoy.config.core.v3.SchemeHeaderTransformation - (*wrappers.UInt32Value)(nil), // 32: google.protobuf.UInt32Value - (*duration.Duration)(nil), // 33: google.protobuf.Duration - (*v31.AccessLog)(nil), // 34: envoy.config.accesslog.v3.AccessLog - (*v3.TypedExtensionConfig)(nil), // 35: envoy.config.core.v3.TypedExtensionConfig - (*v3.SubstitutionFormatString)(nil), // 36: envoy.config.core.v3.SubstitutionFormatString - (*v31.AccessLogFilter)(nil), // 37: envoy.config.accesslog.v3.AccessLogFilter - (*v3.DataSource)(nil), // 38: envoy.config.core.v3.DataSource - (*v3.HeaderValueOption)(nil), // 39: envoy.config.core.v3.HeaderValueOption - (*v3.ConfigSource)(nil), // 40: envoy.config.core.v3.ConfigSource - (*v32.ScopedRouteConfiguration)(nil), // 41: envoy.config.route.v3.ScopedRouteConfiguration - (*any.Any)(nil), // 42: google.protobuf.Any - (*v3.ExtensionConfigSource)(nil), // 43: envoy.config.core.v3.ExtensionConfigSource - (*v33.Percent)(nil), // 44: envoy.type.v3.Percent - (*v34.CustomTag)(nil), // 45: envoy.type.tracing.v3.CustomTag - (*v35.Tracing_Http)(nil), // 46: envoy.config.trace.v3.Tracing.Http - (*v3.CidrRange)(nil), // 47: envoy.config.core.v3.CidrRange - (*v36.PathTransformation)(nil), // 48: envoy.type.http.v3.PathTransformation + (*HttpConnectionManager_HcmAccessLogOptions)(nil), // 21: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions + (*ScopedRoutes_ScopeKeyBuilder)(nil), // 22: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder)(nil), // 23: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor)(nil), // 24: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement)(nil), // 25: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement + (*v32.RouteConfiguration)(nil), // 26: envoy.config.route.v3.RouteConfiguration + (*wrappers.BoolValue)(nil), // 27: google.protobuf.BoolValue + (*v3.HttpProtocolOptions)(nil), // 28: envoy.config.core.v3.HttpProtocolOptions + (*v3.Http1ProtocolOptions)(nil), // 29: envoy.config.core.v3.Http1ProtocolOptions + (*v3.Http2ProtocolOptions)(nil), // 30: envoy.config.core.v3.Http2ProtocolOptions + (*v3.Http3ProtocolOptions)(nil), // 31: envoy.config.core.v3.Http3ProtocolOptions + (*v3.SchemeHeaderTransformation)(nil), // 32: envoy.config.core.v3.SchemeHeaderTransformation + (*wrappers.UInt32Value)(nil), // 33: google.protobuf.UInt32Value + (*duration.Duration)(nil), // 34: google.protobuf.Duration + (*v31.AccessLog)(nil), // 35: envoy.config.accesslog.v3.AccessLog + (*v3.TypedExtensionConfig)(nil), // 36: envoy.config.core.v3.TypedExtensionConfig + (*v3.SubstitutionFormatString)(nil), // 37: envoy.config.core.v3.SubstitutionFormatString + (*v31.AccessLogFilter)(nil), // 38: envoy.config.accesslog.v3.AccessLogFilter + (*v3.DataSource)(nil), // 39: envoy.config.core.v3.DataSource + (*v3.HeaderValueOption)(nil), // 40: envoy.config.core.v3.HeaderValueOption + (*v3.ConfigSource)(nil), // 41: envoy.config.core.v3.ConfigSource + (*v32.ScopedRouteConfiguration)(nil), // 42: envoy.config.route.v3.ScopedRouteConfiguration + (*any1.Any)(nil), // 43: google.protobuf.Any + (*v3.ExtensionConfigSource)(nil), // 44: envoy.config.core.v3.ExtensionConfigSource + (*v33.Percent)(nil), // 45: envoy.type.v3.Percent + (*v34.CustomTag)(nil), // 46: envoy.type.tracing.v3.CustomTag + (*v35.Tracing_Http)(nil), // 47: envoy.config.trace.v3.Tracing.Http + (*v3.CidrRange)(nil), // 48: envoy.config.core.v3.CidrRange + (*v36.PathTransformation)(nil), // 49: envoy.type.http.v3.PathTransformation } var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_depIdxs = []int32{ 0, // 0: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.codec_type:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.CodecType 8, // 1: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.rds:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.Rds - 25, // 2: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.route_config:type_name -> envoy.config.route.v3.RouteConfiguration + 26, // 2: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.route_config:type_name -> envoy.config.route.v3.RouteConfiguration 10, // 3: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.scoped_routes:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes 12, // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter - 26, // 5: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent:type_name -> google.protobuf.BoolValue + 27, // 5: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent:type_name -> google.protobuf.BoolValue 15, // 6: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.tracing:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing - 27, // 7: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options:type_name -> envoy.config.core.v3.HttpProtocolOptions - 28, // 8: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_protocol_options:type_name -> envoy.config.core.v3.Http1ProtocolOptions - 29, // 9: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions - 30, // 10: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http3_protocol_options:type_name -> envoy.config.core.v3.Http3ProtocolOptions + 28, // 7: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options:type_name -> envoy.config.core.v3.HttpProtocolOptions + 29, // 8: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_protocol_options:type_name -> envoy.config.core.v3.Http1ProtocolOptions + 30, // 9: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 31, // 10: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http3_protocol_options:type_name -> envoy.config.core.v3.Http3ProtocolOptions 1, // 11: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.server_header_transformation:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ServerHeaderTransformation - 31, // 12: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.scheme_header_transformation:type_name -> envoy.config.core.v3.SchemeHeaderTransformation - 32, // 13: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.max_request_headers_kb:type_name -> google.protobuf.UInt32Value - 33, // 14: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout:type_name -> google.protobuf.Duration - 33, // 15: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout:type_name -> google.protobuf.Duration - 33, // 16: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_headers_timeout:type_name -> google.protobuf.Duration - 33, // 17: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout:type_name -> google.protobuf.Duration - 33, // 18: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.delayed_close_timeout:type_name -> google.protobuf.Duration - 34, // 19: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log:type_name -> envoy.config.accesslog.v3.AccessLog - 26, // 20: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address:type_name -> google.protobuf.BoolValue - 35, // 21: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.original_ip_detection_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig - 16, // 22: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.internal_address_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig - 26, // 23: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.generate_request_id:type_name -> google.protobuf.BoolValue - 2, // 24: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.forward_client_cert_details:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ForwardClientCertDetails - 17, // 25: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.set_current_client_cert_details:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails - 18, // 26: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.upgrade_configs:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig - 26, // 27: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.normalize_path:type_name -> google.protobuf.BoolValue - 3, // 28: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.path_with_escaped_slashes_action:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathWithEscapedSlashesAction - 13, // 29: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_id_extension:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension - 6, // 30: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.local_reply_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig - 26, // 31: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 19, // 32: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.path_normalization_options:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions - 20, // 33: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.proxy_status_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ProxyStatusConfig - 35, // 34: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.typed_header_validation_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 7, // 35: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.mappers:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper - 36, // 36: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format:type_name -> envoy.config.core.v3.SubstitutionFormatString - 37, // 37: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter - 32, // 38: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.status_code:type_name -> google.protobuf.UInt32Value - 38, // 39: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.body:type_name -> envoy.config.core.v3.DataSource - 36, // 40: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.body_format_override:type_name -> envoy.config.core.v3.SubstitutionFormatString - 39, // 41: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption - 40, // 42: envoy.extensions.filters.network.http_connection_manager.v3.Rds.config_source:type_name -> envoy.config.core.v3.ConfigSource - 41, // 43: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList.scoped_route_configurations:type_name -> envoy.config.route.v3.ScopedRouteConfiguration - 21, // 44: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder - 40, // 45: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.rds_config_source:type_name -> envoy.config.core.v3.ConfigSource - 9, // 46: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_route_configurations_list:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList - 11, // 47: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_rds:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds - 40, // 48: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds.scoped_rds_config_source:type_name -> envoy.config.core.v3.ConfigSource - 42, // 49: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.typed_config:type_name -> google.protobuf.Any - 43, // 50: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource - 42, // 51: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension.typed_config:type_name -> google.protobuf.Any - 5, // 52: envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager.config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - 44, // 53: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.client_sampling:type_name -> envoy.type.v3.Percent - 44, // 54: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.random_sampling:type_name -> envoy.type.v3.Percent - 44, // 55: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.overall_sampling:type_name -> envoy.type.v3.Percent - 32, // 56: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.max_path_tag_length:type_name -> google.protobuf.UInt32Value - 45, // 57: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag - 46, // 58: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider:type_name -> envoy.config.trace.v3.Tracing.Http - 47, // 59: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig.cidr_ranges:type_name -> envoy.config.core.v3.CidrRange - 26, // 60: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails.subject:type_name -> google.protobuf.BoolValue - 12, // 61: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter - 26, // 62: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue - 48, // 63: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions.forwarding_transformation:type_name -> envoy.type.http.v3.PathTransformation - 48, // 64: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions.http_filter_transformation:type_name -> envoy.type.http.v3.PathTransformation - 22, // 65: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.fragments:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder - 23, // 66: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.header_value_extractor:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor - 24, // 67: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.element:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement - 68, // [68:68] is the sub-list for method output_type - 68, // [68:68] is the sub-list for method input_type - 68, // [68:68] is the sub-list for extension type_name - 68, // [68:68] is the sub-list for extension extendee - 0, // [0:68] is the sub-list for field type_name + 32, // 12: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.scheme_header_transformation:type_name -> envoy.config.core.v3.SchemeHeaderTransformation + 33, // 13: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.max_request_headers_kb:type_name -> google.protobuf.UInt32Value + 34, // 14: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout:type_name -> google.protobuf.Duration + 34, // 15: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout:type_name -> google.protobuf.Duration + 34, // 16: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_headers_timeout:type_name -> google.protobuf.Duration + 34, // 17: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout:type_name -> google.protobuf.Duration + 34, // 18: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.delayed_close_timeout:type_name -> google.protobuf.Duration + 35, // 19: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 34, // 20: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log_flush_interval:type_name -> google.protobuf.Duration + 21, // 21: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log_options:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions + 27, // 22: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address:type_name -> google.protobuf.BoolValue + 36, // 23: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.original_ip_detection_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 36, // 24: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.early_header_mutation_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 16, // 25: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.internal_address_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig + 27, // 26: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.generate_request_id:type_name -> google.protobuf.BoolValue + 2, // 27: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.forward_client_cert_details:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ForwardClientCertDetails + 17, // 28: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.set_current_client_cert_details:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails + 18, // 29: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.upgrade_configs:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig + 27, // 30: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.normalize_path:type_name -> google.protobuf.BoolValue + 3, // 31: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.path_with_escaped_slashes_action:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathWithEscapedSlashesAction + 13, // 32: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_id_extension:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + 6, // 33: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.local_reply_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig + 27, // 34: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 19, // 35: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.path_normalization_options:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions + 20, // 36: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.proxy_status_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ProxyStatusConfig + 36, // 37: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.typed_header_validation_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 27, // 38: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_proxy_protocol_connection_state:type_name -> google.protobuf.BoolValue + 7, // 39: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.mappers:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper + 37, // 40: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format:type_name -> envoy.config.core.v3.SubstitutionFormatString + 38, // 41: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 33, // 42: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.status_code:type_name -> google.protobuf.UInt32Value + 39, // 43: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.body:type_name -> envoy.config.core.v3.DataSource + 37, // 44: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.body_format_override:type_name -> envoy.config.core.v3.SubstitutionFormatString + 40, // 45: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 41, // 46: envoy.extensions.filters.network.http_connection_manager.v3.Rds.config_source:type_name -> envoy.config.core.v3.ConfigSource + 42, // 47: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList.scoped_route_configurations:type_name -> envoy.config.route.v3.ScopedRouteConfiguration + 22, // 48: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder + 41, // 49: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.rds_config_source:type_name -> envoy.config.core.v3.ConfigSource + 9, // 50: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_route_configurations_list:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList + 11, // 51: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_rds:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds + 41, // 52: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds.scoped_rds_config_source:type_name -> envoy.config.core.v3.ConfigSource + 43, // 53: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.typed_config:type_name -> google.protobuf.Any + 44, // 54: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 43, // 55: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension.typed_config:type_name -> google.protobuf.Any + 5, // 56: envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager.config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + 45, // 57: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.client_sampling:type_name -> envoy.type.v3.Percent + 45, // 58: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.random_sampling:type_name -> envoy.type.v3.Percent + 45, // 59: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.overall_sampling:type_name -> envoy.type.v3.Percent + 33, // 60: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.max_path_tag_length:type_name -> google.protobuf.UInt32Value + 46, // 61: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag + 47, // 62: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider:type_name -> envoy.config.trace.v3.Tracing.Http + 48, // 63: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig.cidr_ranges:type_name -> envoy.config.core.v3.CidrRange + 27, // 64: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails.subject:type_name -> google.protobuf.BoolValue + 12, // 65: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + 27, // 66: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue + 49, // 67: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions.forwarding_transformation:type_name -> envoy.type.http.v3.PathTransformation + 49, // 68: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions.http_filter_transformation:type_name -> envoy.type.http.v3.PathTransformation + 34, // 69: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions.access_log_flush_interval:type_name -> google.protobuf.Duration + 23, // 70: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.fragments:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder + 24, // 71: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.header_value_extractor:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor + 25, // 72: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.element:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement + 73, // [73:73] is the sub-list for method output_type + 73, // [73:73] is the sub-list for method input_type + 73, // [73:73] is the sub-list for extension type_name + 73, // [73:73] is the sub-list for extension extendee + 0, // [0:73] is the sub-list for field type_name } func init() { @@ -3862,7 +4086,7 @@ func file_envoy_extensions_filters_network_http_connection_manager_v3_http_conne } } file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ScopedRoutes_ScopeKeyBuilder); i { + switch v := v.(*HttpConnectionManager_HcmAccessLogOptions); i { case 0: return &v.state case 1: @@ -3874,7 +4098,7 @@ func file_envoy_extensions_filters_network_http_connection_manager_v3_http_conne } } file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder); i { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder); i { case 0: return &v.state case 1: @@ -3886,7 +4110,7 @@ func file_envoy_extensions_filters_network_http_connection_manager_v3_http_conne } } file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor); i { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder); i { case 0: return &v.state case 1: @@ -3898,6 +4122,18 @@ func file_envoy_extensions_filters_network_http_connection_manager_v3_http_conne } } file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement); i { case 0: return &v.state @@ -3928,10 +4164,10 @@ func file_envoy_extensions_filters_network_http_connection_manager_v3_http_conne (*HttpConnectionManager_ProxyStatusConfig_UseNodeId)(nil), (*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName)(nil), } - file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17].OneofWrappers = []interface{}{ + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18].OneofWrappers = []interface{}{ (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_)(nil), } - file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18].OneofWrappers = []interface{}{ + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19].OneofWrappers = []interface{}{ (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index)(nil), (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element)(nil), } @@ -3941,7 +4177,7 @@ func file_envoy_extensions_filters_network_http_connection_manager_v3_http_conne GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc, NumEnums: 5, - NumMessages: 20, + NumMessages: 21, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go index 859586a632..7c81d81b2a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go @@ -533,6 +533,67 @@ func (m *HttpConnectionManager) validate(all bool) error { } + if d := m.GetAccessLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpConnectionManagerValidationError{ + field: "AccessLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := HttpConnectionManagerValidationError{ + field: "AccessLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for FlushAccessLogOnNewRequest + + if all { + switch v := interface{}(m.GetAccessLogOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAccessLogOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + if all { switch v := interface{}(m.GetUseRemoteAddress()).(type) { case interface{ ValidateAll() error }: @@ -598,6 +659,40 @@ func (m *HttpConnectionManager) validate(all bool) error { } + for idx, item := range m.GetEarlyHeaderMutationExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + if all { switch v := interface{}(m.GetInternalAddressConfig()).(type) { case interface{ ValidateAll() error }: @@ -962,9 +1057,51 @@ func (m *HttpConnectionManager) validate(all bool) error { } } - switch m.RouteSpecifier.(type) { + // no validation rules for AppendXForwardedPort + if all { + switch v := interface{}(m.GetAddProxyProtocolConnectionState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddProxyProtocolConnectionState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofRouteSpecifierPresent := false + switch v := m.RouteSpecifier.(type) { case *HttpConnectionManager_Rds: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true if all { switch v := interface{}(m.GetRds()).(type) { @@ -996,6 +1133,17 @@ func (m *HttpConnectionManager) validate(all bool) error { } case *HttpConnectionManager_RouteConfig: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true if all { switch v := interface{}(m.GetRouteConfig()).(type) { @@ -1027,6 +1175,17 @@ func (m *HttpConnectionManager) validate(all bool) error { } case *HttpConnectionManager_ScopedRoutes: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true if all { switch v := interface{}(m.GetScopedRoutes()).(type) { @@ -1058,6 +1217,9 @@ func (m *HttpConnectionManager) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofRouteSpecifierPresent { err := HttpConnectionManagerValidationError{ field: "RouteSpecifier", reason: "value is required", @@ -1066,14 +1228,22 @@ func (m *HttpConnectionManager) validate(all bool) error { return err } errors = append(errors, err) - } - - switch m.StripPortMode.(type) { - + switch v := m.StripPortMode.(type) { case *HttpConnectionManager_StripAnyHostPort: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "StripPortMode", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for StripAnyHostPort - + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -1971,9 +2141,20 @@ func (m *ScopedRoutes) validate(all bool) error { } } - switch m.ConfigSpecifier.(type) { - + oneofConfigSpecifierPresent := false + switch v := m.ConfigSpecifier.(type) { case *ScopedRoutes_ScopedRouteConfigurationsList: + if v == nil { + err := ScopedRoutesValidationError{ + field: "ConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSpecifierPresent = true if all { switch v := interface{}(m.GetScopedRouteConfigurationsList()).(type) { @@ -2005,6 +2186,17 @@ func (m *ScopedRoutes) validate(all bool) error { } case *ScopedRoutes_ScopedRds: + if v == nil { + err := ScopedRoutesValidationError{ + field: "ConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSpecifierPresent = true if all { switch v := interface{}(m.GetScopedRds()).(type) { @@ -2036,6 +2228,9 @@ func (m *ScopedRoutes) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofConfigSpecifierPresent { err := ScopedRoutesValidationError{ field: "ConfigSpecifier", reason: "value is required", @@ -2044,7 +2239,6 @@ func (m *ScopedRoutes) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -2300,9 +2494,18 @@ func (m *HttpFilter) validate(all bool) error { // no validation rules for IsOptional - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *HttpFilter_TypedConfig: + if v == nil { + err := HttpFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -2334,6 +2537,16 @@ func (m *HttpFilter) validate(all bool) error { } case *HttpFilter_ConfigDiscovery: + if v == nil { + err := HttpFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetConfigDiscovery()).(type) { @@ -2364,6 +2577,8 @@ func (m *HttpFilter) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -3653,14 +3868,33 @@ func (m *HttpConnectionManager_ProxyStatusConfig) validate(all bool) error { // no validation rules for SetRecommendedResponseCode - switch m.ProxyName.(type) { - + switch v := m.ProxyName.(type) { case *HttpConnectionManager_ProxyStatusConfig_UseNodeId: + if v == nil { + err := HttpConnectionManager_ProxyStatusConfigValidationError{ + field: "ProxyName", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for UseNodeId - case *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName: + if v == nil { + err := HttpConnectionManager_ProxyStatusConfigValidationError{ + field: "ProxyName", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for LiteralProxyName - + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -3745,6 +3979,146 @@ var _ interface { ErrorName() string } = HttpConnectionManager_ProxyStatusConfigValidationError{} +// Validate checks the field values on +// HttpConnectionManager_HcmAccessLogOptions with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_HcmAccessLogOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_HcmAccessLogOptions with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// HttpConnectionManager_HcmAccessLogOptionsMultiError, or nil if none found. +func (m *HttpConnectionManager_HcmAccessLogOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetAccessLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpConnectionManager_HcmAccessLogOptionsValidationError{ + field: "AccessLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := HttpConnectionManager_HcmAccessLogOptionsValidationError{ + field: "AccessLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for FlushAccessLogOnNewRequest + + // no validation rules for FlushLogOnTunnelSuccessfullyEstablished + + if len(errors) > 0 { + return HttpConnectionManager_HcmAccessLogOptionsMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_HcmAccessLogOptionsMultiError is an error wrapping +// multiple validation errors returned by +// HttpConnectionManager_HcmAccessLogOptions.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_HcmAccessLogOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_HcmAccessLogOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_HcmAccessLogOptionsMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_HcmAccessLogOptionsValidationError is the validation +// error returned by HttpConnectionManager_HcmAccessLogOptions.Validate if the +// designated constraints aren't met. +type HttpConnectionManager_HcmAccessLogOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) ErrorName() string { + return "HttpConnectionManager_HcmAccessLogOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_HcmAccessLogOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_HcmAccessLogOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_HcmAccessLogOptionsValidationError{} + // Validate checks the field values on ScopedRoutes_ScopeKeyBuilder with the // rules defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -3917,9 +4291,20 @@ func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) validate(all bool) error var errors []error - switch m.Type.(type) { - + oneofTypePresent := false + switch v := m.Type.(type) { case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true if all { switch v := interface{}(m.GetHeaderValueExtractor()).(type) { @@ -3951,6 +4336,9 @@ func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) validate(all bool) error } default: + _ = v // ensures v is used + } + if !oneofTypePresent { err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ field: "Type", reason: "value is required", @@ -3959,7 +4347,6 @@ func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) validate(all bool) error return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -4083,12 +4470,30 @@ func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) vali // no validation rules for ElementSeparator - switch m.ExtractType.(type) { - + switch v := m.ExtractType.(type) { case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "ExtractType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for Index - case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "ExtractType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetElement()).(type) { @@ -4119,6 +4524,8 @@ func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) vali } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go new file mode 100644 index 0000000000..6f37c7c264 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go @@ -0,0 +1,301 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + duration "github.com/golang/protobuf/ptypes/duration" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the client_side_weighted_round_robin LB policy. +// +// This policy differs from the built-in ROUND_ROBIN policy in terms of +// how the endpoint weights are determined. In the ROUND_ROBIN policy, +// the endpoint weights are sent by the control plane via EDS. However, +// in this policy, the endpoint weights are instead determined via +// qps (queries per second), eps (errors per second), and CPU utilization +// metrics sent by the endpoint using the Open Request Cost Aggregation (ORCA) +// protocol. A query counts towards qps when successful, otherwise towards both +// qps and eps. What counts as an error is up to the endpoint to define. +// A config parameter error_utilization_penalty controls the penalty to adjust +// endpoint weights using eps and qps. The weight of a given endpoint is +// computed as: qps / (cpu_utilization + eps/qps * error_utilization_penalty) +// +// See the :ref:`load balancing architecture overview` for more information. +// +// [#next-free-field: 7] +type ClientSideWeightedRoundRobin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether to enable out-of-band utilization reporting collection from + // the endpoints. By default, per-request utilization reporting is used. + EnableOobLoadReport *wrappers.BoolValue `protobuf:"bytes,1,opt,name=enable_oob_load_report,json=enableOobLoadReport,proto3" json:"enable_oob_load_report,omitempty"` + // Load reporting interval to request from the server. Note that the + // server may not provide reports as frequently as the client requests. + // Used only when enable_oob_load_report is true. Default is 10 seconds. + OobReportingPeriod *duration.Duration `protobuf:"bytes,2,opt,name=oob_reporting_period,json=oobReportingPeriod,proto3" json:"oob_reporting_period,omitempty"` + // A given endpoint must report load metrics continuously for at least + // this long before the endpoint weight will be used. This avoids + // churn when the set of endpoint addresses changes. Takes effect + // both immediately after we establish a connection to an endpoint and + // after weight_expiration_period has caused us to stop using the most + // recent load metrics. Default is 10 seconds. + BlackoutPeriod *duration.Duration `protobuf:"bytes,3,opt,name=blackout_period,json=blackoutPeriod,proto3" json:"blackout_period,omitempty"` + // If a given endpoint has not reported load metrics in this long, + // then we stop using the reported weight. This ensures that we do + // not continue to use very stale weights. Once we stop using a stale + // value, if we later start seeing fresh reports again, the + // blackout_period applies. Defaults to 3 minutes. + WeightExpirationPeriod *duration.Duration `protobuf:"bytes,4,opt,name=weight_expiration_period,json=weightExpirationPeriod,proto3" json:"weight_expiration_period,omitempty"` + // How often endpoint weights are recalculated. Values less than 100ms are + // capped at 100ms. Default is 1 second. + WeightUpdatePeriod *duration.Duration `protobuf:"bytes,5,opt,name=weight_update_period,json=weightUpdatePeriod,proto3" json:"weight_update_period,omitempty"` + // The multiplier used to adjust endpoint weights with the error rate + // calculated as eps/qps. Configuration is rejected if this value is negative. + // Default is 1.0. + ErrorUtilizationPenalty *wrappers.FloatValue `protobuf:"bytes,6,opt,name=error_utilization_penalty,json=errorUtilizationPenalty,proto3" json:"error_utilization_penalty,omitempty"` +} + +func (x *ClientSideWeightedRoundRobin) Reset() { + *x = ClientSideWeightedRoundRobin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientSideWeightedRoundRobin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientSideWeightedRoundRobin) ProtoMessage() {} + +func (x *ClientSideWeightedRoundRobin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientSideWeightedRoundRobin.ProtoReflect.Descriptor instead. +func (*ClientSideWeightedRoundRobin) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescGZIP(), []int{0} +} + +func (x *ClientSideWeightedRoundRobin) GetEnableOobLoadReport() *wrappers.BoolValue { + if x != nil { + return x.EnableOobLoadReport + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetOobReportingPeriod() *duration.Duration { + if x != nil { + return x.OobReportingPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetBlackoutPeriod() *duration.Duration { + if x != nil { + return x.BlackoutPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetWeightExpirationPeriod() *duration.Duration { + if x != nil { + return x.WeightExpirationPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetWeightUpdatePeriod() *duration.Duration { + if x != nil { + return x.WeightUpdatePeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetErrorUtilizationPenalty() *wrappers.FloatValue { + if x != nil { + return x.ErrorUtilizationPenalty + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc = []byte{ + 0x0a, 0x73, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, + 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x4c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x04, 0x0a, 0x1c, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x4f, 0x0a, 0x16, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x4f, 0x6f, 0x62, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x4b, 0x0a, + 0x14, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x6f, 0x6f, 0x62, 0x52, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x42, 0x0a, 0x0f, 0x62, 0x6c, + 0x61, 0x63, 0x6b, 0x6f, 0x75, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, + 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6f, 0x75, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x53, + 0x0a, 0x18, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x4b, 0x0a, 0x14, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x77, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x12, 0x63, 0x0a, 0x19, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x0a, 0x05, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x52, 0x17, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, + 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x42, 0xa2, 0x02, 0x0a, 0x5a, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x21, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x96, 0x01, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData = file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes = []interface{}{ + (*ClientSideWeightedRoundRobin)(nil), // 0: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin + (*wrappers.BoolValue)(nil), // 1: google.protobuf.BoolValue + (*duration.Duration)(nil), // 2: google.protobuf.Duration + (*wrappers.FloatValue)(nil), // 3: google.protobuf.FloatValue +} +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.enable_oob_load_report:type_name -> google.protobuf.BoolValue + 2, // 1: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.oob_reporting_period:type_name -> google.protobuf.Duration + 2, // 2: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.blackout_period:type_name -> google.protobuf.Duration + 2, // 3: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.weight_expiration_period:type_name -> google.protobuf.Duration + 2, // 4: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.weight_update_period:type_name -> google.protobuf.Duration + 3, // 5: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.error_utilization_penalty:type_name -> google.protobuf.FloatValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_init() +} +func file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_init() { + if File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientSideWeightedRoundRobin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto = out.File + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go new file mode 100644 index 0000000000..6a1f64e5ab --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go @@ -0,0 +1,299 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClientSideWeightedRoundRobin with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientSideWeightedRoundRobin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientSideWeightedRoundRobin with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClientSideWeightedRoundRobinMultiError, or nil if none found. +func (m *ClientSideWeightedRoundRobin) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientSideWeightedRoundRobin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEnableOobLoadReport()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnableOobLoadReport()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOobReportingPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOobReportingPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetBlackoutPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBlackoutPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWeightExpirationPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeightExpirationPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWeightUpdatePeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeightUpdatePeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetErrorUtilizationPenalty(); wrapper != nil { + + if wrapper.GetValue() < 0 { + err := ClientSideWeightedRoundRobinValidationError{ + field: "ErrorUtilizationPenalty", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ClientSideWeightedRoundRobinMultiError(errors) + } + + return nil +} + +// ClientSideWeightedRoundRobinMultiError is an error wrapping multiple +// validation errors returned by ClientSideWeightedRoundRobin.ValidateAll() if +// the designated constraints aren't met. +type ClientSideWeightedRoundRobinMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientSideWeightedRoundRobinMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientSideWeightedRoundRobinMultiError) AllErrors() []error { return m } + +// ClientSideWeightedRoundRobinValidationError is the validation error returned +// by ClientSideWeightedRoundRobin.Validate if the designated constraints +// aren't met. +type ClientSideWeightedRoundRobinValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientSideWeightedRoundRobinValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientSideWeightedRoundRobinValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientSideWeightedRoundRobinValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientSideWeightedRoundRobinValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientSideWeightedRoundRobinValidationError) ErrorName() string { + return "ClientSideWeightedRoundRobinValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientSideWeightedRoundRobinValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientSideWeightedRoundRobin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientSideWeightedRoundRobinValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientSideWeightedRoundRobinValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go new file mode 100644 index 0000000000..0684dda0b6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go @@ -0,0 +1,616 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + duration "github.com/golang/protobuf/ptypes/duration" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LocalityLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LocalityConfigSpecifier: + // *LocalityLbConfig_ZoneAwareLbConfig_ + // *LocalityLbConfig_LocalityWeightedLbConfig_ + LocalityConfigSpecifier isLocalityLbConfig_LocalityConfigSpecifier `protobuf_oneof:"locality_config_specifier"` +} + +func (x *LocalityLbConfig) Reset() { + *x = LocalityLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0} +} + +func (m *LocalityLbConfig) GetLocalityConfigSpecifier() isLocalityLbConfig_LocalityConfigSpecifier { + if m != nil { + return m.LocalityConfigSpecifier + } + return nil +} + +func (x *LocalityLbConfig) GetZoneAwareLbConfig() *LocalityLbConfig_ZoneAwareLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*LocalityLbConfig_ZoneAwareLbConfig_); ok { + return x.ZoneAwareLbConfig + } + return nil +} + +func (x *LocalityLbConfig) GetLocalityWeightedLbConfig() *LocalityLbConfig_LocalityWeightedLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*LocalityLbConfig_LocalityWeightedLbConfig_); ok { + return x.LocalityWeightedLbConfig + } + return nil +} + +type isLocalityLbConfig_LocalityConfigSpecifier interface { + isLocalityLbConfig_LocalityConfigSpecifier() +} + +type LocalityLbConfig_ZoneAwareLbConfig_ struct { + // Configuration for local zone aware load balancing. + ZoneAwareLbConfig *LocalityLbConfig_ZoneAwareLbConfig `protobuf:"bytes,1,opt,name=zone_aware_lb_config,json=zoneAwareLbConfig,proto3,oneof"` +} + +type LocalityLbConfig_LocalityWeightedLbConfig_ struct { + // Enable locality weighted load balancing. + LocalityWeightedLbConfig *LocalityLbConfig_LocalityWeightedLbConfig `protobuf:"bytes,2,opt,name=locality_weighted_lb_config,json=localityWeightedLbConfig,proto3,oneof"` +} + +func (*LocalityLbConfig_ZoneAwareLbConfig_) isLocalityLbConfig_LocalityConfigSpecifier() {} + +func (*LocalityLbConfig_LocalityWeightedLbConfig_) isLocalityLbConfig_LocalityConfigSpecifier() {} + +// Configuration for :ref:`slow start mode `. +type SlowStartConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents the size of slow start window. + // If set, the newly created host remains in slow start mode starting from its creation time + // for the duration of slow start window. + SlowStartWindow *duration.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` + // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + // so that endpoint would get linearly increasing amount of traffic. + // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + // The value of aggression parameter should be greater than 0.0. + // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + // + // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + // ``new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))``, + // where ``time_factor=(time_since_start_seconds / slow_start_time_seconds)``. + // + // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + // Once host exits slow start, time_factor and aggression no longer affect its weight. + Aggression *v3.RuntimeDouble `protobuf:"bytes,2,opt,name=aggression,proto3" json:"aggression,omitempty"` + // Configures the minimum percentage of origin weight that avoids too small new weight, + // which may cause endpoints in slow start mode receive no traffic in slow start window. + // If not specified, the default is 10%. + MinWeightPercent *v31.Percent `protobuf:"bytes,3,opt,name=min_weight_percent,json=minWeightPercent,proto3" json:"min_weight_percent,omitempty"` +} + +func (x *SlowStartConfig) Reset() { + *x = SlowStartConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SlowStartConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SlowStartConfig) ProtoMessage() {} + +func (x *SlowStartConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SlowStartConfig.ProtoReflect.Descriptor instead. +func (*SlowStartConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (x *SlowStartConfig) GetSlowStartWindow() *duration.Duration { + if x != nil { + return x.SlowStartWindow + } + return nil +} + +func (x *SlowStartConfig) GetAggression() *v3.RuntimeDouble { + if x != nil { + return x.Aggression + } + return nil +} + +func (x *SlowStartConfig) GetMinWeightPercent() *v31.Percent { + if x != nil { + return x.MinWeightPercent + } + return nil +} + +// Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) +type ConsistentHashingLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to ``true``, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + UseHostnameForHashing bool `protobuf:"varint,1,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // Applies to both Ring Hash and Maglev load balancers. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts + // being probed, so use a higher value if you require better performance. + HashBalanceFactor *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` +} + +func (x *ConsistentHashingLbConfig) Reset() { + *x = ConsistentHashingLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConsistentHashingLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConsistentHashingLbConfig) ProtoMessage() {} + +func (x *ConsistentHashingLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConsistentHashingLbConfig.ProtoReflect.Descriptor instead. +func (*ConsistentHashingLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *ConsistentHashingLbConfig) GetUseHostnameForHashing() bool { + if x != nil { + return x.UseHostnameForHashing + } + return false +} + +func (x *ConsistentHashingLbConfig) GetHashBalanceFactor() *wrappers.UInt32Value { + if x != nil { + return x.HashBalanceFactor + } + return nil +} + +// Configuration for :ref:`zone aware routing +// `. +type LocalityLbConfig_ZoneAwareLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + RoutingEnabled *v31.Percent `protobuf:"bytes,1,opt,name=routing_enabled,json=routingEnabled,proto3" json:"routing_enabled,omitempty"` + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + MinClusterSize *wrappers.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + FailTrafficOnPanic bool `protobuf:"varint,3,opt,name=fail_traffic_on_panic,json=failTrafficOnPanic,proto3" json:"fail_traffic_on_panic,omitempty"` +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) Reset() { + *x = LocalityLbConfig_ZoneAwareLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig_ZoneAwareLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig_ZoneAwareLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig_ZoneAwareLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetRoutingEnabled() *v31.Percent { + if x != nil { + return x.RoutingEnabled + } + return nil +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrappers.UInt64Value { + if x != nil { + return x.MinClusterSize + } + return nil +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetFailTrafficOnPanic() bool { + if x != nil { + return x.FailTrafficOnPanic + } + return false +} + +// Configuration for :ref:`locality weighted load balancing +// ` +type LocalityLbConfig_LocalityWeightedLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) Reset() { + *x = LocalityLbConfig_LocalityWeightedLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig_LocalityWeightedLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig_LocalityWeightedLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig_LocalityWeightedLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0, 1} +} + +var File_envoy_extensions_load_balancing_policies_common_v3_common_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x04, 0x0a, 0x10, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x89, 0x01, 0x0a, 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, + 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x56, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, + 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x9e, 0x01, 0x0a, + 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x5d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xcf, 0x01, + 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, + 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, + 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, + 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, + 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x1a, + 0x1a, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x20, 0x0a, 0x19, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xe3, 0x01, + 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, + 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, 0x68, 0x61, + 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x52, 0x11, + 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x42, 0xbd, 0x01, 0x0a, 0x40, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x62, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x33, + 0x3b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData = file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes = []interface{}{ + (*LocalityLbConfig)(nil), // 0: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + (*SlowStartConfig)(nil), // 1: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + (*ConsistentHashingLbConfig)(nil), // 2: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + (*LocalityLbConfig_ZoneAwareLbConfig)(nil), // 3: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig + (*LocalityLbConfig_LocalityWeightedLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + (*duration.Duration)(nil), // 5: google.protobuf.Duration + (*v3.RuntimeDouble)(nil), // 6: envoy.config.core.v3.RuntimeDouble + (*v31.Percent)(nil), // 7: envoy.type.v3.Percent + (*wrappers.UInt32Value)(nil), // 8: google.protobuf.UInt32Value + (*wrappers.UInt64Value)(nil), // 9: google.protobuf.UInt64Value +} +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs = []int32{ + 3, // 0: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.zone_aware_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig + 4, // 1: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.locality_weighted_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + 5, // 2: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration + 6, // 3: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble + 7, // 4: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent + 8, // 5: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 7, // 6: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent + 9, // 7: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_common_v3_common_proto_init() } +func file_envoy_extensions_load_balancing_policies_common_v3_common_proto_init() { + if File_envoy_extensions_load_balancing_policies_common_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SlowStartConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConsistentHashingLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig_ZoneAwareLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig_LocalityWeightedLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*LocalityLbConfig_ZoneAwareLbConfig_)(nil), + (*LocalityLbConfig_LocalityWeightedLbConfig_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_common_v3_common_proto = out.File + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go new file mode 100644 index 0000000000..f291a355ca --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go @@ -0,0 +1,813 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LocalityLbConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LocalityLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LocalityLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofLocalityConfigSpecifierPresent := false + switch v := m.LocalityConfigSpecifier.(type) { + case *LocalityLbConfig_ZoneAwareLbConfig_: + if v == nil { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLocalityConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetZoneAwareLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetZoneAwareLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LocalityLbConfig_LocalityWeightedLbConfig_: + if v == nil { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLocalityConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityWeightedLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofLocalityConfigSpecifierPresent { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return LocalityLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfigMultiError is an error wrapping multiple validation errors +// returned by LocalityLbConfig.ValidateAll() if the designated constraints +// aren't met. +type LocalityLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfigValidationError is the validation error returned by +// LocalityLbConfig.Validate if the designated constraints aren't met. +type LocalityLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfigValidationError) ErrorName() string { return "LocalityLbConfigValidationError" } + +// Error satisfies the builtin error interface +func (e LocalityLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfigValidationError{} + +// Validate checks the field values on SlowStartConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SlowStartConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SlowStartConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SlowStartConfigMultiError, or nil if none found. +func (m *SlowStartConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SlowStartConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSlowStartWindow()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartWindow()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAggression()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAggression()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinWeightPercent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinWeightPercent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SlowStartConfigMultiError(errors) + } + + return nil +} + +// SlowStartConfigMultiError is an error wrapping multiple validation errors +// returned by SlowStartConfig.ValidateAll() if the designated constraints +// aren't met. +type SlowStartConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SlowStartConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SlowStartConfigMultiError) AllErrors() []error { return m } + +// SlowStartConfigValidationError is the validation error returned by +// SlowStartConfig.Validate if the designated constraints aren't met. +type SlowStartConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SlowStartConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SlowStartConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SlowStartConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SlowStartConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SlowStartConfigValidationError) ErrorName() string { return "SlowStartConfigValidationError" } + +// Error satisfies the builtin error interface +func (e SlowStartConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSlowStartConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SlowStartConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SlowStartConfigValidationError{} + +// Validate checks the field values on ConsistentHashingLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ConsistentHashingLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConsistentHashingLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ConsistentHashingLbConfigMultiError, or nil if none found. +func (m *ConsistentHashingLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ConsistentHashingLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseHostnameForHashing + + if wrapper := m.GetHashBalanceFactor(); wrapper != nil { + + if wrapper.GetValue() < 100 { + err := ConsistentHashingLbConfigValidationError{ + field: "HashBalanceFactor", + reason: "value must be greater than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ConsistentHashingLbConfigMultiError(errors) + } + + return nil +} + +// ConsistentHashingLbConfigMultiError is an error wrapping multiple validation +// errors returned by ConsistentHashingLbConfig.ValidateAll() if the +// designated constraints aren't met. +type ConsistentHashingLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConsistentHashingLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConsistentHashingLbConfigMultiError) AllErrors() []error { return m } + +// ConsistentHashingLbConfigValidationError is the validation error returned by +// ConsistentHashingLbConfig.Validate if the designated constraints aren't met. +type ConsistentHashingLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConsistentHashingLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConsistentHashingLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConsistentHashingLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConsistentHashingLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConsistentHashingLbConfigValidationError) ErrorName() string { + return "ConsistentHashingLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ConsistentHashingLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConsistentHashingLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConsistentHashingLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConsistentHashingLbConfigValidationError{} + +// Validate checks the field values on LocalityLbConfig_ZoneAwareLbConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *LocalityLbConfig_ZoneAwareLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbConfig_ZoneAwareLbConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// LocalityLbConfig_ZoneAwareLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig_ZoneAwareLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRoutingEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoutingEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinClusterSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinClusterSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for FailTrafficOnPanic + + if len(errors) > 0 { + return LocalityLbConfig_ZoneAwareLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfig_ZoneAwareLbConfigMultiError is an error wrapping multiple +// validation errors returned by +// LocalityLbConfig_ZoneAwareLbConfig.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbConfig_ZoneAwareLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfig_ZoneAwareLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfig_ZoneAwareLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfig_ZoneAwareLbConfigValidationError is the validation error +// returned by LocalityLbConfig_ZoneAwareLbConfig.Validate if the designated +// constraints aren't met. +type LocalityLbConfig_ZoneAwareLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) ErrorName() string { + return "LocalityLbConfig_ZoneAwareLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig_ZoneAwareLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfig_ZoneAwareLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfig_ZoneAwareLbConfigValidationError{} + +// Validate checks the field values on +// LocalityLbConfig_LocalityWeightedLbConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LocalityLbConfig_LocalityWeightedLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// LocalityLbConfig_LocalityWeightedLbConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// LocalityLbConfig_LocalityWeightedLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig_LocalityWeightedLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return LocalityLbConfig_LocalityWeightedLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfig_LocalityWeightedLbConfigMultiError is an error wrapping +// multiple validation errors returned by +// LocalityLbConfig_LocalityWeightedLbConfig.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbConfig_LocalityWeightedLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfig_LocalityWeightedLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfig_LocalityWeightedLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfig_LocalityWeightedLbConfigValidationError is the validation +// error returned by LocalityLbConfig_LocalityWeightedLbConfig.Validate if the +// designated constraints aren't met. +type LocalityLbConfig_LocalityWeightedLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) ErrorName() string { + return "LocalityLbConfig_LocalityWeightedLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig_LocalityWeightedLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfig_LocalityWeightedLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfig_LocalityWeightedLbConfigValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go new file mode 100644 index 0000000000..61328f766c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This configuration allows the built-in PICK_FIRST LB policy to be configured +// via the LB policy extension point. +type PickFirst struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to true, instructs the LB policy to shuffle the list of addresses + // received from the name resolver before attempting to connect to them. + ShuffleAddressList bool `protobuf:"varint,1,opt,name=shuffle_address_list,json=shuffleAddressList,proto3" json:"shuffle_address_list,omitempty"` +} + +func (x *PickFirst) Reset() { + *x = PickFirst{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PickFirst) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PickFirst) ProtoMessage() {} + +func (x *PickFirst) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PickFirst.ProtoReflect.Descriptor instead. +func (*PickFirst) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescGZIP(), []int{0} +} + +func (x *PickFirst) GetShuffleAddressList() bool { + if x != nil { + return x.ShuffleAddressList + } + return false +} + +var File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc = []byte{ + 0x0a, 0x47, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x70, 0x69, 0x63, 0x6b, 0x5f, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2e, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2e, 0x76, + 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x3d, 0x0a, 0x09, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x12, 0x30, 0x0a, + 0x14, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x75, + 0x66, 0x66, 0x6c, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x42, + 0xcc, 0x01, 0x0a, 0x44, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x70, 0x69, 0x63, 0x6b, 0x5f, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, + 0x72, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x6a, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x70, 0x69, 0x63, 0x6b, + 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, + 0x69, 0x72, 0x73, 0x74, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData = file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes = []interface{}{ + (*PickFirst)(nil), // 0: envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst +} +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_init() } +func file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_init() { + if File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PickFirst); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto = out.File + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go new file mode 100644 index 0000000000..a3ea2dd588 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on PickFirst with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PickFirst) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PickFirst with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PickFirstMultiError, or nil +// if none found. +func (m *PickFirst) ValidateAll() error { + return m.validate(true) +} + +func (m *PickFirst) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ShuffleAddressList + + if len(errors) > 0 { + return PickFirstMultiError(errors) + } + + return nil +} + +// PickFirstMultiError is an error wrapping multiple validation errors returned +// by PickFirst.ValidateAll() if the designated constraints aren't met. +type PickFirstMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PickFirstMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PickFirstMultiError) AllErrors() []error { return m } + +// PickFirstValidationError is the validation error returned by +// PickFirst.Validate if the designated constraints aren't met. +type PickFirstValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PickFirstValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PickFirstValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PickFirstValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PickFirstValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PickFirstValidationError) ErrorName() string { return "PickFirstValidationError" } + +// Error satisfies the builtin error interface +func (e PickFirstValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPickFirst.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PickFirstValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PickFirstValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go new file mode 100644 index 0000000000..b715687f4e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go @@ -0,0 +1,390 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The hash function used to hash hosts onto the ketama ring. +type RingHash_HashFunction int32 + +const ( + // Currently defaults to XX_HASH. + RingHash_DEFAULT_HASH RingHash_HashFunction = 0 + // Use `xxHash `_. + RingHash_XX_HASH RingHash_HashFunction = 1 + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + RingHash_MURMUR_HASH_2 RingHash_HashFunction = 2 +) + +// Enum value maps for RingHash_HashFunction. +var ( + RingHash_HashFunction_name = map[int32]string{ + 0: "DEFAULT_HASH", + 1: "XX_HASH", + 2: "MURMUR_HASH_2", + } + RingHash_HashFunction_value = map[string]int32{ + "DEFAULT_HASH": 0, + "XX_HASH": 1, + "MURMUR_HASH_2": 2, + } +) + +func (x RingHash_HashFunction) Enum() *RingHash_HashFunction { + p := new(RingHash_HashFunction) + *p = x + return p +} + +func (x RingHash_HashFunction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RingHash_HashFunction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes[0].Descriptor() +} + +func (RingHash_HashFunction) Type() protoreflect.EnumType { + return &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes[0] +} + +func (x RingHash_HashFunction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RingHash_HashFunction.Descriptor instead. +func (RingHash_HashFunction) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP(), []int{0, 0} +} + +// This configuration allows the built-in RING_HASH LB policy to be configured via the LB policy +// extension point. See the :ref:`load balancing architecture overview +// ` for more information. +// [#next-free-field: 8] +type RingHash struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction RingHash_HashFunction `protobuf:"varint,1,opt,name=hash_function,json=hashFunction,proto3,enum=envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash_HashFunction" json:"hash_function,omitempty"` + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + MinimumRingSize *wrappers.UInt64Value `protobuf:"bytes,2,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + MaximumRingSize *wrappers.UInt64Value `protobuf:"bytes,3,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` + // If set to `true`, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + // + // ..note:: + // This is deprecated and please use :ref:`consistent_hashing_lb_config + // ` instead. + // + // Deprecated: Do not use. + UseHostnameForHashing bool `protobuf:"varint,4,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts + // being probed, so use a higher value if you require better performance. + // + // ..note:: + // This is deprecated and please use :ref:`consistent_hashing_lb_config + // ` instead. + // + // Deprecated: Do not use. + HashBalanceFactor *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` + // Common configuration for hashing-based load balancing policies. + ConsistentHashingLbConfig *v3.ConsistentHashingLbConfig `protobuf:"bytes,6,opt,name=consistent_hashing_lb_config,json=consistentHashingLbConfig,proto3" json:"consistent_hashing_lb_config,omitempty"` + // Enable locality weighted load balancing for ring hash lb explicitly. + LocalityWeightedLbConfig *v3.LocalityLbConfig_LocalityWeightedLbConfig `protobuf:"bytes,7,opt,name=locality_weighted_lb_config,json=localityWeightedLbConfig,proto3" json:"locality_weighted_lb_config,omitempty"` +} + +func (x *RingHash) Reset() { + *x = RingHash{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RingHash) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RingHash) ProtoMessage() {} + +func (x *RingHash) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RingHash.ProtoReflect.Descriptor instead. +func (*RingHash) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP(), []int{0} +} + +func (x *RingHash) GetHashFunction() RingHash_HashFunction { + if x != nil { + return x.HashFunction + } + return RingHash_DEFAULT_HASH +} + +func (x *RingHash) GetMinimumRingSize() *wrappers.UInt64Value { + if x != nil { + return x.MinimumRingSize + } + return nil +} + +func (x *RingHash) GetMaximumRingSize() *wrappers.UInt64Value { + if x != nil { + return x.MaximumRingSize + } + return nil +} + +// Deprecated: Do not use. +func (x *RingHash) GetUseHostnameForHashing() bool { + if x != nil { + return x.UseHostnameForHashing + } + return false +} + +// Deprecated: Do not use. +func (x *RingHash) GetHashBalanceFactor() *wrappers.UInt32Value { + if x != nil { + return x.HashBalanceFactor + } + return nil +} + +func (x *RingHash) GetConsistentHashingLbConfig() *v3.ConsistentHashingLbConfig { + if x != nil { + return x.ConsistentHashingLbConfig + } + return nil +} + +func (x *RingHash) GetLocalityWeightedLbConfig() *v3.LocalityLbConfig_LocalityWeightedLbConfig { + if x != nil { + return x.LocalityWeightedLbConfig + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc = []byte{ + 0x0a, 0x45, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x1a, 0x3f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x06, 0x0a, + 0x08, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x12, 0x7b, 0x0a, 0x0d, 0x68, 0x61, 0x73, + 0x68, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, + 0x68, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x54, 0x0a, 0x11, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, + 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x44, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x6f, + 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x13, 0x68, 0x61, 0x73, 0x68, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x12, 0x18, 0x01, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x8e, 0x01, 0x0a, 0x1c, 0x63, + 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, + 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x9c, 0x01, 0x0a, 0x1b, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, + 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x5d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x40, 0x0a, 0x0c, 0x48, 0x61, + 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x45, + 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, + 0x58, 0x58, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, + 0x4d, 0x55, 0x52, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x02, 0x42, 0xc8, 0x01, 0x0a, + 0x43, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x68, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x76, 0x33, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData = file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes = []interface{}{ + (RingHash_HashFunction)(0), // 0: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.HashFunction + (*RingHash)(nil), // 1: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash + (*wrappers.UInt64Value)(nil), // 2: google.protobuf.UInt64Value + (*wrappers.UInt32Value)(nil), // 3: google.protobuf.UInt32Value + (*v3.ConsistentHashingLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + (*v3.LocalityLbConfig_LocalityWeightedLbConfig)(nil), // 5: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig +} +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs = []int32{ + 0, // 0: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.hash_function:type_name -> envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.HashFunction + 2, // 1: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.minimum_ring_size:type_name -> google.protobuf.UInt64Value + 2, // 2: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.maximum_ring_size:type_name -> google.protobuf.UInt64Value + 3, // 3: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 4, // 4: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.consistent_hashing_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + 5, // 5: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.locality_weighted_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_init() } +func file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_init() { + if File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RingHash); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs, + EnumInfos: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes, + MessageInfos: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto = out.File + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go new file mode 100644 index 0000000000..4f431eeade --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go @@ -0,0 +1,251 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RingHash with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RingHash) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RingHash with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RingHashMultiError, or nil +// if none found. +func (m *RingHash) ValidateAll() error { + return m.validate(true) +} + +func (m *RingHash) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RingHash_HashFunction_name[int32(m.GetHashFunction())]; !ok { + err := RingHashValidationError{ + field: "HashFunction", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetMinimumRingSize(); wrapper != nil { + + if wrapper.GetValue() > 8388608 { + err := RingHashValidationError{ + field: "MinimumRingSize", + reason: "value must be less than or equal to 8388608", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetMaximumRingSize(); wrapper != nil { + + if wrapper.GetValue() > 8388608 { + err := RingHashValidationError{ + field: "MaximumRingSize", + reason: "value must be less than or equal to 8388608", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for UseHostnameForHashing + + if wrapper := m.GetHashBalanceFactor(); wrapper != nil { + + if wrapper.GetValue() < 100 { + err := RingHashValidationError{ + field: "HashBalanceFactor", + reason: "value must be greater than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetConsistentHashingLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsistentHashingLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityWeightedLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RingHashMultiError(errors) + } + + return nil +} + +// RingHashMultiError is an error wrapping multiple validation errors returned +// by RingHash.ValidateAll() if the designated constraints aren't met. +type RingHashMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RingHashMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RingHashMultiError) AllErrors() []error { return m } + +// RingHashValidationError is the validation error returned by +// RingHash.Validate if the designated constraints aren't met. +type RingHashValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RingHashValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RingHashValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RingHashValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RingHashValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RingHashValidationError) ErrorName() string { return "RingHashValidationError" } + +// Error satisfies the builtin error interface +func (e RingHashValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRingHash.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RingHashValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RingHashValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go new file mode 100644 index 0000000000..ccaa74aabc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go @@ -0,0 +1,181 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the wrr_locality LB policy. See the :ref:`load balancing architecture overview +// ` for more information. +type WrrLocality struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The child LB policy to create for endpoint-picking within the chosen locality. + EndpointPickingPolicy *v3.LoadBalancingPolicy `protobuf:"bytes,1,opt,name=endpoint_picking_policy,json=endpointPickingPolicy,proto3" json:"endpoint_picking_policy,omitempty"` +} + +func (x *WrrLocality) Reset() { + *x = WrrLocality{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WrrLocality) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WrrLocality) ProtoMessage() {} + +func (x *WrrLocality) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WrrLocality.ProtoReflect.Descriptor instead. +func (*WrrLocality) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescGZIP(), []int{0} +} + +func (x *WrrLocality) GetEndpointPickingPolicy() *v3.LoadBalancingPolicy { + if x != nil { + return x.EndpointPickingPolicy + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc = []byte{ + 0x0a, 0x4b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x77, 0x72, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x38, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x76, 0x33, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7d, 0x0a, 0x0b, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x6e, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x15, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0xd4, 0x01, 0x0a, 0x46, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2e, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x76, 0x33, + 0x42, 0x10, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x6e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData = file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes = []interface{}{ + (*WrrLocality)(nil), // 0: envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality + (*v3.LoadBalancingPolicy)(nil), // 1: envoy.config.cluster.v3.LoadBalancingPolicy +} +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality.endpoint_picking_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_init() } +func file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_init() { + if File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WrrLocality); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto = out.File + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go new file mode 100644 index 0000000000..ec7470e88b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go @@ -0,0 +1,175 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on WrrLocality with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *WrrLocality) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WrrLocality with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in WrrLocalityMultiError, or +// nil if none found. +func (m *WrrLocality) ValidateAll() error { + return m.validate(true) +} + +func (m *WrrLocality) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetEndpointPickingPolicy() == nil { + err := WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEndpointPickingPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointPickingPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return WrrLocalityMultiError(errors) + } + + return nil +} + +// WrrLocalityMultiError is an error wrapping multiple validation errors +// returned by WrrLocality.ValidateAll() if the designated constraints aren't met. +type WrrLocalityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WrrLocalityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WrrLocalityMultiError) AllErrors() []error { return m } + +// WrrLocalityValidationError is the validation error returned by +// WrrLocality.Validate if the designated constraints aren't met. +type WrrLocalityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WrrLocalityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WrrLocalityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WrrLocalityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WrrLocalityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WrrLocalityValidationError) ErrorName() string { return "WrrLocalityValidationError" } + +// Error satisfies the builtin error interface +func (e WrrLocalityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWrrLocality.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WrrLocalityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WrrLocalityValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go new file mode 100644 index 0000000000..55bec6a611 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Custom configuration for the RBAC audit logger that writes log entries +// directly to the operating system's standard output. +// The logger outputs in JSON format and is currently not configurable. +type StdoutAuditLog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StdoutAuditLog) Reset() { + *x = StdoutAuditLog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StdoutAuditLog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StdoutAuditLog) ProtoMessage() {} + +func (x *StdoutAuditLog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StdoutAuditLog.ProtoReflect.Descriptor instead. +func (*StdoutAuditLog) Descriptor() ([]byte, []int) { + return file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto protoreflect.FileDescriptor + +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, + 0x67, 0x67, 0x65, 0x72, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x2d, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, + 0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x74, + 0x64, 0x6f, 0x75, 0x74, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x42, 0xb3, 0x01, 0x0a, + 0x3b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, + 0x72, 0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x5d, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, + 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x76, + 0x33, 0x3b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescOnce sync.Once + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData = file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc +) + +func file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescGZIP() []byte { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescOnce.Do(func() { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData) + }) + return file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData +} + +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes = []interface{}{ + (*StdoutAuditLog)(nil), // 0: envoy.extensions.rbac.audit_loggers.stream.v3.StdoutAuditLog +} +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_init() } +func file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_init() { + if File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StdoutAuditLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes, + DependencyIndexes: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs, + MessageInfos: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes, + }.Build() + File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto = out.File + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc = nil + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes = nil + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go new file mode 100644 index 0000000000..015fdd8e63 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go @@ -0,0 +1,136 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StdoutAuditLog with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StdoutAuditLog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StdoutAuditLog with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StdoutAuditLogMultiError, +// or nil if none found. +func (m *StdoutAuditLog) ValidateAll() error { + return m.validate(true) +} + +func (m *StdoutAuditLog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return StdoutAuditLogMultiError(errors) + } + + return nil +} + +// StdoutAuditLogMultiError is an error wrapping multiple validation errors +// returned by StdoutAuditLog.ValidateAll() if the designated constraints +// aren't met. +type StdoutAuditLogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StdoutAuditLogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StdoutAuditLogMultiError) AllErrors() []error { return m } + +// StdoutAuditLogValidationError is the validation error returned by +// StdoutAuditLog.Validate if the designated constraints aren't met. +type StdoutAuditLogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StdoutAuditLogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StdoutAuditLogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StdoutAuditLogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StdoutAuditLogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StdoutAuditLogValidationError) ErrorName() string { return "StdoutAuditLogValidationError" } + +// Error satisfies the builtin error interface +func (e StdoutAuditLogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStdoutAuditLog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StdoutAuditLogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StdoutAuditLogValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go index 420d4939f2..50e6f3745a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/transport_sockets/tls/v3/cert.proto package tlsv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go index 34e325eb57..3eee2a308f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/transport_sockets/tls/v3/common.proto package tlsv3 @@ -12,7 +12,7 @@ import ( v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -195,12 +195,20 @@ func (CertificateValidationContext_TrustChainVerification) EnumDescriptor() ([]b return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{6, 0} } +// [#next-free-field: 6] type TlsParameters struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for both clients and servers. + // + // TLS protocol versions below TLSv1_2 require setting compatible ciphers with the + // ``cipher_suites`` setting as the default ciphers no longer include compatible ciphers. + // + // .. attention:: + // + // Using TLS protocol versions below TLSv1_2 has serious security considerations and risks. TlsMinimumProtocolVersion TlsParameters_TlsProtocol `protobuf:"varint,1,opt,name=tls_minimum_protocol_version,json=tlsMinimumProtocolVersion,proto3,enum=envoy.extensions.transport_sockets.tls.v3.TlsParameters_TlsProtocol" json:"tls_minimum_protocol_version,omitempty"` // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for // servers. @@ -266,6 +274,41 @@ type TlsParameters struct { // // P-256 EcdhCurves []string `protobuf:"bytes,4,rep,name=ecdh_curves,json=ecdhCurves,proto3" json:"ecdh_curves,omitempty"` + // If specified, the TLS connection will only support the specified signature algorithms. + // The list is ordered by preference. + // If not specified, the default signature algorithms defined by BoringSSL will be used. + // + // Default signature algorithms selected by BoringSSL (may be out of date): + // + // .. code-block:: none + // + // ecdsa_secp256r1_sha256 + // rsa_pss_rsae_sha256 + // rsa_pkcs1_sha256 + // ecdsa_secp384r1_sha384 + // rsa_pss_rsae_sha384 + // rsa_pkcs1_sha384 + // rsa_pss_rsae_sha512 + // rsa_pkcs1_sha512 + // rsa_pkcs1_sha1 + // + // Signature algorithms supported by BoringSSL (may be out of date): + // + // .. code-block:: none + // + // rsa_pkcs1_sha256 + // rsa_pkcs1_sha384 + // rsa_pkcs1_sha512 + // ecdsa_secp256r1_sha256 + // ecdsa_secp384r1_sha384 + // ecdsa_secp521r1_sha512 + // rsa_pss_rsae_sha256 + // rsa_pss_rsae_sha384 + // rsa_pss_rsae_sha512 + // ed25519 + // rsa_pkcs1_sha1 + // ecdsa_sha1 + SignatureAlgorithms []string `protobuf:"bytes,5,rep,name=signature_algorithms,json=signatureAlgorithms,proto3" json:"signature_algorithms,omitempty"` } func (x *TlsParameters) Reset() { @@ -328,6 +371,13 @@ func (x *TlsParameters) GetEcdhCurves() []string { return nil } +func (x *TlsParameters) GetSignatureAlgorithms() []string { + if x != nil { + return x.SignatureAlgorithms + } + return nil +} + // BoringSSL private key method configuration. The private key methods are used for external // (potentially asynchronous) signing and decryption operations. Some use cases for private key // methods would be TPM support and TLS acceleration. @@ -392,7 +442,7 @@ func (m *PrivateKeyProvider) GetConfigType() isPrivateKeyProvider_ConfigType { return nil } -func (x *PrivateKeyProvider) GetTypedConfig() *any.Any { +func (x *PrivateKeyProvider) GetTypedConfig() *any1.Any { if x, ok := x.GetConfigType().(*PrivateKeyProvider_TypedConfig); ok { return x.TypedConfig } @@ -404,7 +454,7 @@ type isPrivateKeyProvider_ConfigType interface { } type PrivateKeyProvider_TypedConfig struct { - TypedConfig *any.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` + TypedConfig *any1.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` } func (*PrivateKeyProvider_TypedConfig) isPrivateKeyProvider_ConfigType() {} @@ -417,22 +467,22 @@ type TlsCertificate struct { // The TLS certificate chain. // - // If *certificate_chain* is a filesystem path, a watch will be added to the + // If ``certificate_chain`` is a filesystem path, a watch will be added to the // parent directory for any file moves to support rotation. This currently - // only applies to dynamic secrets, when the *TlsCertificate* is delivered via + // only applies to dynamic secrets, when the ``TlsCertificate`` is delivered via // SDS. CertificateChain *v3.DataSource `protobuf:"bytes,1,opt,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` // The TLS private key. // - // If *private_key* is a filesystem path, a watch will be added to the parent + // If ``private_key`` is a filesystem path, a watch will be added to the parent // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *TlsCertificate* is delivered via SDS. + // applies to dynamic secrets, when the ``TlsCertificate`` is delivered via SDS. PrivateKey *v3.DataSource `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` - // `Pkcs12` data containing TLS certificate, chain, and private key. + // ``Pkcs12`` data containing TLS certificate, chain, and private key. // - // If *pkcs12* is a filesystem path, the file will be read, but no watch will - // be added to the parent directory, since *pkcs12* isn't used by SDS. - // This field is mutually exclusive with *certificate_chain*, *private_key* and *private_key_provider*. + // If ``pkcs12`` is a filesystem path, the file will be read, but no watch will + // be added to the parent directory, since ``pkcs12`` isn't used by SDS. + // This field is mutually exclusive with ``certificate_chain``, ``private_key`` and ``private_key_provider``. // This can't be marked as ``oneof`` due to API compatibility reasons. Setting // both :ref:`private_key `, // :ref:`certificate_chain `, @@ -440,16 +490,16 @@ type TlsCertificate struct { // and :ref:`pkcs12 ` // fields will result in an error. Use :ref:`password // ` - // to specify the password to unprotect the `PKCS12` data, if necessary. + // to specify the password to unprotect the ``PKCS12`` data, if necessary. Pkcs12 *v3.DataSource `protobuf:"bytes,8,opt,name=pkcs12,proto3" json:"pkcs12,omitempty"` - // If specified, updates of file-based *certificate_chain* and *private_key* + // If specified, updates of file-based ``certificate_chain`` and ``private_key`` // sources will be triggered by this watch. The certificate/key pair will be // read together and validated for atomic read consistency (i.e. no // intervening modification occurred between cert/key read, verified by file // hash comparisons). This allows explicit control over the path watched, by // default the parent directories of the filesystem paths in - // *certificate_chain* and *private_key* are watched if this field is not - // specified. This only applies when a *TlsCertificate* is delivered by SDS + // ``certificate_chain`` and ``private_key`` are watched if this field is not + // specified. This only applies when a ``TlsCertificate`` is delivered by SDS // with references to filesystem paths. See the :ref:`SDS key rotation // ` documentation for further details. WatchedDirectory *v3.WatchedDirectory `protobuf:"bytes,7,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` @@ -782,33 +832,37 @@ type CertificateValidationContext struct { // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be // provided for all certificate authorities in that chain. Failure to do so will result in // verification failure for both revoked and unrevoked certificates from that chain. - // The behavior of requiring all certificates to contain CRLs if any do can be altered by + // The behavior of requiring all certificates to contain CRLs can be altered by // setting :ref:`only_verify_leaf_cert_crl ` // true. If set to true, only the final certificate in the chain undergoes CRL verification. // // See :ref:`the TLS overview ` for a list of common // system CA locations. // - // If *trusted_ca* is a filesystem path, a watch will be added to the parent + // If ``trusted_ca`` is a filesystem path, a watch will be added to the parent // directory for any file moves to support rotation. This currently only - // applies to dynamic secrets, when the *CertificateValidationContext* is + // applies to dynamic secrets, when the ``CertificateValidationContext`` is // delivered via SDS. // - // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. + // X509_V_FLAG_PARTIAL_CHAIN is set by default, so non-root/intermediate ca certificate in ``trusted_ca`` + // can be treated as trust anchor as well. It allows verification with building valid partial chain instead + // of a full chain. + // + // Only one of ``trusted_ca`` and ``ca_certificate_provider_instance`` may be specified. // // [#next-major-version: This field and watched_directory below should ideally be moved into a // separate sub-message, since there's no point in specifying the latter field without this one.] TrustedCa *v3.DataSource `protobuf:"bytes,1,opt,name=trusted_ca,json=trustedCa,proto3" json:"trusted_ca,omitempty"` // Certificate provider instance for fetching TLS certificates. // - // Only one of *trusted_ca* and *ca_certificate_provider_instance* may be specified. + // Only one of ``trusted_ca`` and ``ca_certificate_provider_instance`` may be specified. // [#not-implemented-hide:] CaCertificateProviderInstance *CertificateProviderPluginInstance `protobuf:"bytes,13,opt,name=ca_certificate_provider_instance,json=caCertificateProviderInstance,proto3" json:"ca_certificate_provider_instance,omitempty"` - // If specified, updates of a file-based *trusted_ca* source will be triggered + // If specified, updates of a file-based ``trusted_ca`` source will be triggered // by this watch. This allows explicit control over the path watched, by - // default the parent directory of the filesystem path in *trusted_ca* is + // default the parent directory of the filesystem path in ``trusted_ca`` is // watched if this field is not specified. This only applies when a - // *CertificateValidationContext* is delivered by SDS with references to + // ``CertificateValidationContext`` is delivered by SDS with references to // filesystem paths. See the :ref:`SDS key rotation ` // documentation for further details. WatchedDirectory *v3.WatchedDirectory `protobuf:"bytes,11,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` @@ -929,11 +983,14 @@ type CertificateValidationContext struct { // If this option is set to true, only the certificate at the end of the // certificate chain will be subject to validation by :ref:`CRL `. OnlyVerifyLeafCertCrl bool `protobuf:"varint,14,opt,name=only_verify_leaf_cert_crl,json=onlyVerifyLeafCertCrl,proto3" json:"only_verify_leaf_cert_crl,omitempty"` - // Config for the max number of intermediate certificates in chain that are parsed during verification. - // This does not include the leaf certificate. If configured, and the certificate chain is longer than allowed, the certificates - // above the limit are ignored, and certificate validation will fail. The default limit is 100, - // though this can be system-dependent. - // https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_set_verify_depth.html + // Defines maximum depth of a certificate chain accepted in verification, the default limit is 100, though this can be system-dependent. + // This number does not include the leaf, so a depth of 1 allows the leaf and one CA certificate. If a trusted issuer appears in the chain, + // but in a depth larger than configured, the certificate validation will fail. + // See `BoringSSL SSL_CTX_set_verify_depth ` + // If you use OpenSSL, its behavior is different from BoringSSL, this will define a limit on the number of certificates between the end-entity and trust-anchor certificates. + // Neither the end-entity nor the trust-anchor certificates count against depth. + // See `OpenSSL SSL set_verify_depth `_. + // Trusted issues are specified by setting :ref:`trusted_ca ` MaxVerifyDepth *wrappers.UInt32Value `protobuf:"bytes,16,opt,name=max_verify_depth,json=maxVerifyDepth,proto3" json:"max_verify_depth,omitempty"` } @@ -1099,7 +1156,7 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf2, 0x03, 0x0a, 0x0d, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x04, 0x0a, 0x0d, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x8f, 0x01, 0x0a, 0x1c, 0x74, 0x6c, 0x73, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, @@ -1123,215 +1180,218 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x63, 0x64, 0x68, 0x5f, 0x63, 0x75, 0x72, 0x76, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x63, 0x64, - 0x68, 0x43, 0x75, 0x72, 0x76, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x0b, 0x54, 0x6c, 0x73, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4c, 0x53, 0x5f, 0x41, 0x55, - 0x54, 0x4f, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x30, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x0b, - 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, - 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x33, 0x10, 0x04, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, - 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, - 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x22, 0xcf, 0x01, 0x0a, 0x12, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, - 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, - 0x74, 0x68, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0xc8, 0x05, 0x0a, 0x0e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x12, 0x49, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, - 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, - 0x40, 0x0a, 0x06, 0x70, 0x6b, 0x63, 0x73, 0x31, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x06, 0x70, 0x6b, 0x63, 0x73, 0x31, - 0x32, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, + 0x68, 0x43, 0x75, 0x72, 0x76, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x73, 0x22, 0x4f, 0x0a, 0x0b, 0x54, 0x6c, + 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4c, 0x53, + 0x5f, 0x41, 0x55, 0x54, 0x4f, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x31, 0x10, + 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x0b, + 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x33, 0x10, 0x04, 0x3a, 0x26, 0x9a, 0xc5, 0x88, + 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x12, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x0d, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x48, 0x00, 0x52, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, + 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xc8, 0x05, 0x0a, 0x0e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x49, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x6f, 0x0a, 0x14, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, - 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, - 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x52, 0x12, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, - 0xa4, 0x02, 0x01, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x41, 0x0a, - 0x0b, 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, + 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x6b, 0x63, 0x73, 0x31, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x6f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, - 0x12, 0x62, 0x0a, 0x1c, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x1a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, - 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x8b, 0x01, - 0x0a, 0x14, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x44, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, - 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x3a, 0x2d, 0x9a, 0xc5, - 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x73, 0x0a, 0x21, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x22, 0xa4, 0x02, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, - 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x08, 0x73, 0x61, - 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x48, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, - 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, - 0x20, 0x00, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x22, 0x50, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x18, 0x0a, 0x14, 0x53, 0x41, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, - 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, 0x07, - 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x50, 0x5f, 0x41, 0x44, - 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x04, 0x22, 0x90, 0x0c, 0x0a, 0x1c, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x72, 0x75, 0x73, - 0x74, 0x65, 0x64, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x16, - 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x43, - 0x61, 0x12, 0xad, 0x01, 0x0a, 0x20, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x16, 0xf2, 0x98, 0xfe, 0x8f, - 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x1d, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x06, 0x70, 0x6b, + 0x63, 0x73, 0x31, 0x32, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x6f, 0x0a, 0x14, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x12, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, - 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x6b, - 0x69, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, - 0x06, 0x72, 0x04, 0x10, 0x2c, 0x28, 0x2c, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x70, 0x6b, 0x69, 0x12, 0x46, - 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, - 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x40, 0x28, 0x5f, 0x52, - 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x82, 0x01, 0x0a, 0x1d, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, - 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x52, 0x19, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, - 0x14, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x24, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x21, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, + 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x12, 0x41, 0x0a, 0x0b, 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x6f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, + 0x70, 0x6c, 0x65, 0x12, 0x62, 0x0a, 0x1c, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x1a, 0x73, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, + 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x22, 0x8b, 0x01, 0x0a, 0x14, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x44, 0x0a, 0x04, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0xfa, 0x42, 0x05, 0x92, 0x01, + 0x02, 0x08, 0x01, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x3a, + 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x73, + 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0xa4, 0x02, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6f, 0x0a, + 0x08, 0x73, 0x61, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x53, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, + 0x04, 0x10, 0x01, 0x20, 0x00, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, + 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x22, 0x50, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x41, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, + 0x05, 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, + 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x50, + 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x04, 0x22, 0x90, 0x0c, 0x0a, 0x1c, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, + 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, - 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x18, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, - 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x54, - 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x16, 0x74, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x38, 0x0a, 0x19, 0x6f, - 0x6e, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x72, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, - 0x6f, 0x6e, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, - 0x72, 0x74, 0x43, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x44, 0x65, 0x70, 0x74, 0x68, 0x22, 0x46, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x12, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x53, 0x54, - 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x43, 0x45, - 0x50, 0x54, 0x5f, 0x55, 0x4e, 0x54, 0x52, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x3a, 0x35, - 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, - 0x06, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0xa8, 0x01, 0x0a, 0x37, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, - 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0xba, 0x80, 0xc8, - 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x42, 0x16, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x65, 0x64, 0x43, 0x61, 0x12, 0xad, 0x01, 0x0a, 0x20, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x16, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x1d, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, + 0x73, 0x70, 0x6b, 0x69, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, + 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x2c, 0x28, 0x2c, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x70, 0x6b, + 0x69, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x40, + 0x28, 0x5f, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x82, 0x01, 0x0a, 0x1d, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x19, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x68, + 0x0a, 0x17, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x52, 0x14, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x24, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x21, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x18, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x16, 0x74, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, 0x17, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x38, + 0x0a, 0x19, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x6c, 0x65, + 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x72, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x15, 0x6f, 0x6e, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4c, 0x65, 0x61, + 0x66, 0x43, 0x65, 0x72, 0x74, 0x43, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x44, 0x65, 0x70, 0x74, 0x68, 0x22, 0x46, 0x0a, 0x16, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x5f, 0x54, 0x52, + 0x55, 0x53, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, + 0x43, 0x43, 0x45, 0x50, 0x54, 0x5f, 0x55, 0x4e, 0x54, 0x52, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, + 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0xa8, 0x01, + 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1359,7 +1419,7 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes = []inte (*CertificateProviderPluginInstance)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance (*SubjectAltNameMatcher)(nil), // 8: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher (*CertificateValidationContext)(nil), // 9: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext - (*any.Any)(nil), // 10: google.protobuf.Any + (*any1.Any)(nil), // 10: google.protobuf.Any (*v3.DataSource)(nil), // 11: envoy.config.core.v3.DataSource (*v3.WatchedDirectory)(nil), // 12: envoy.config.core.v3.WatchedDirectory (*v31.StringMatcher)(nil), // 13: envoy.type.matcher.v3.StringMatcher diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go index a015061171..a9ddc8f57e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go @@ -190,9 +190,18 @@ func (m *PrivateKeyProvider) validate(all bool) error { errors = append(errors, err) } - switch m.ConfigType.(type) { - + switch v := m.ConfigType.(type) { case *PrivateKeyProvider_TypedConfig: + if v == nil { + err := PrivateKeyProviderValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -223,6 +232,8 @@ func (m *PrivateKeyProvider) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -923,7 +934,7 @@ func (m *SubjectAltNameMatcher) validate(all bool) error { if _, ok := _SubjectAltNameMatcher_SanType_NotInLookup[m.GetSanType()]; ok { err := SubjectAltNameMatcherValidationError{ field: "SanType", - reason: "value must not be in list [0]", + reason: "value must not be in list [SAN_TYPE_UNSPECIFIED]", } if !all { return err diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go index 75ee6ef953..9bf97896d9 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/transport_sockets/tls/v3/secret.proto package tlsv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go index 8733bec97c..c34909177a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go @@ -327,9 +327,18 @@ func (m *Secret) validate(all bool) error { // no validation rules for Name - switch m.Type.(type) { - + switch v := m.Type.(type) { case *Secret_TlsCertificate: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetTlsCertificate()).(type) { @@ -361,6 +370,16 @@ func (m *Secret) validate(all bool) error { } case *Secret_SessionTicketKeys: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetSessionTicketKeys()).(type) { @@ -392,6 +411,16 @@ func (m *Secret) validate(all bool) error { } case *Secret_ValidationContext: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetValidationContext()).(type) { @@ -423,6 +452,16 @@ func (m *Secret) validate(all bool) error { } case *Secret_GenericSecret: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetGenericSecret()).(type) { @@ -453,6 +492,8 @@ func (m *Secret) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go index ece1af481b..d6a43aaaf1 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/transport_sockets/tls/v3/tls.proto package tlsv3 @@ -177,7 +177,7 @@ func (x *UpstreamTlsContext) GetMaxSessionKeys() *wrappers.UInt32Value { return nil } -// [#next-free-field: 9] +// [#next-free-field: 10] type DownstreamTlsContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -204,6 +204,10 @@ type DownstreamTlsContext struct { // an accompanying OCSP response or if the response expires at runtime. // Defaults to LENIENT_STAPLING OcspStaplePolicy DownstreamTlsContext_OcspStaplePolicy `protobuf:"varint,8,opt,name=ocsp_staple_policy,json=ocspStaplePolicy,proto3,enum=envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext_OcspStaplePolicy" json:"ocsp_staple_policy,omitempty"` + // Multiple certificates are allowed in Downstream transport socket to serve different SNI. + // If the client provides SNI but no such cert matched, it will decide to full scan certificates or not based on this config. + // Defaults to false. See more details in :ref:`Multiple TLS certificates `. + FullScanCertsOnSniMismatch *wrappers.BoolValue `protobuf:"bytes,9,opt,name=full_scan_certs_on_sni_mismatch,json=fullScanCertsOnSniMismatch,proto3" json:"full_scan_certs_on_sni_mismatch,omitempty"` } func (x *DownstreamTlsContext) Reset() { @@ -301,6 +305,13 @@ func (x *DownstreamTlsContext) GetOcspStaplePolicy() DownstreamTlsContext_OcspSt return DownstreamTlsContext_LENIENT_STAPLING } +func (x *DownstreamTlsContext) GetFullScanCertsOnSniMismatch() *wrappers.BoolValue { + if x != nil { + return x.FullScanCertsOnSniMismatch + } + return nil +} + type isDownstreamTlsContext_SessionTicketKeysType interface { isDownstreamTlsContext_SessionTicketKeysType() } @@ -414,15 +425,12 @@ type CommonTlsContext struct { // TLS protocol versions, cipher suites etc. TlsParams *TlsParameters `protobuf:"bytes,1,opt,name=tls_params,json=tlsParams,proto3" json:"tls_params,omitempty"` + // Only a single TLS certificate is supported in client contexts. In server contexts, // :ref:`Multiple TLS certificates ` can be associated with the - // same context to allow both RSA and ECDSA certificates. + // same context to allow both RSA and ECDSA certificates and support SNI-based selection. // - // Only a single TLS certificate is supported in client contexts. In server contexts, the first - // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - // used for clients that support ECDSA. - // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. + // Only one of ``tls_certificates``, ``tls_certificate_sds_secret_configs``, + // and ``tls_certificate_provider_instance`` may be used. // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's // not legal to put a repeated field in a oneof. In the next major version, we should rework // this to avoid this problem.] @@ -433,16 +441,16 @@ type CommonTlsContext struct { // The same number and types of certificates as :ref:`tls_certificates ` // are valid in the the certificates fetched through this setting. // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. + // Only one of ``tls_certificates``, ``tls_certificate_sds_secret_configs``, + // and ``tls_certificate_provider_instance`` may be used. // [#next-major-version: These mutually exclusive fields should ideally be in a oneof, but it's // not legal to put a repeated field in a oneof. In the next major version, we should rework // this to avoid this problem.] TlsCertificateSdsSecretConfigs []*SdsSecretConfig `protobuf:"bytes,6,rep,name=tls_certificate_sds_secret_configs,json=tlsCertificateSdsSecretConfigs,proto3" json:"tls_certificate_sds_secret_configs,omitempty"` // Certificate provider instance for fetching TLS certs. // - // Only one of *tls_certificates*, *tls_certificate_sds_secret_configs*, - // and *tls_certificate_provider_instance* may be used. + // Only one of ``tls_certificates``, ``tls_certificate_sds_secret_configs``, + // and ``tls_certificate_provider_instance`` may be used. // [#not-implemented-hide:] TlsCertificateProviderInstance *CertificateProviderPluginInstance `protobuf:"bytes,14,opt,name=tls_certificate_provider_instance,json=tlsCertificateProviderInstance,proto3" json:"tls_certificate_provider_instance,omitempty"` // Certificate provider for fetching TLS certificates. @@ -853,13 +861,13 @@ type CommonTlsContext_CombinedCertificateValidationContext struct { // fetched/refreshed over the network asynchronously with respect to the TLS handshake. ValidationContextSdsSecretConfig *SdsSecretConfig `protobuf:"bytes,2,opt,name=validation_context_sds_secret_config,json=validationContextSdsSecretConfig,proto3" json:"validation_context_sds_secret_config,omitempty"` // Certificate provider for fetching CA certs. This will populate the - // *default_validation_context.trusted_ca* field. + // ``default_validation_context.trusted_ca`` field. // [#not-implemented-hide:] // // Deprecated: Do not use. ValidationContextCertificateProvider *CommonTlsContext_CertificateProvider `protobuf:"bytes,3,opt,name=validation_context_certificate_provider,json=validationContextCertificateProvider,proto3" json:"validation_context_certificate_provider,omitempty"` // Certificate provider instance for fetching CA certs. This will populate the - // *default_validation_context.trusted_ca* field. + // ``default_validation_context.trusted_ca`` field. // [#not-implemented-hide:] // // Deprecated: Do not use. @@ -980,8 +988,8 @@ var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc = []byte{ 0x65, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x55, 0x70, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0xea, - 0x07, 0x0a, 0x14, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0xcb, + 0x08, 0x0a, 0x14, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x69, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, @@ -1034,232 +1042,237 @@ var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc = []byte{ 0x65, 0x78, 0x74, 0x2e, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x6f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x4e, 0x0a, 0x10, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x10, 0x4c, 0x45, 0x4e, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, - 0x52, 0x49, 0x43, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, - 0x0f, 0x0a, 0x0b, 0x4d, 0x55, 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x45, 0x10, 0x02, - 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, - 0x1a, 0x0a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x09, - 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x4f, 0x0a, 0x13, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x51, 0x0a, 0x14, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, - 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xf4, 0x17, 0x0a, 0x10, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, - 0x57, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x12, 0x5f, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x73, 0x6e, 0x69, 0x5f, 0x6d, 0x69, 0x73, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x66, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, 0x6e, 0x43, + 0x65, 0x72, 0x74, 0x73, 0x4f, 0x6e, 0x53, 0x6e, 0x69, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x22, 0x4e, 0x0a, 0x10, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x10, 0x4c, 0x45, 0x4e, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x53, + 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x01, + 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x55, 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x45, 0x10, + 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x44, 0x6f, 0x77, 0x6e, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x42, 0x1a, 0x0a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xcc, 0x01, 0x0a, + 0x09, 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x4f, 0x0a, 0x13, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x51, 0x0a, 0x14, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xea, 0x17, 0x0a, 0x10, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x09, + 0x74, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x64, 0x0a, 0x10, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, - 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x09, 0x74, - 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x64, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, - 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x74, - 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x90, - 0x01, 0x0a, 0x22, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x10, - 0x02, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x12, 0x97, 0x01, 0x0a, 0x21, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x1e, 0x74, 0x6c, 0x73, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0xad, 0x01, 0x0a, 0x24, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, + 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0f, + 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, + 0x86, 0x01, 0x0a, 0x22, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x21, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0xad, 0x01, 0x0a, 0x24, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, + 0x21, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x12, 0xc6, 0x01, 0x0a, 0x2d, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x18, 0x01, 0x92, - 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x21, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xc6, 0x01, 0x0a, 0x2d, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x52, 0x29, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x12, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x48, 0x00, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x8c, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, + 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa2, 0x01, 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, + 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x19, + 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0xb5, 0x01, 0x0a, 0x27, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, + 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x18, 0x01, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x48, 0x00, 0x52, 0x24, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x12, 0xce, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, + 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x48, 0x00, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x57, 0x0a, 0x11, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x18, 0x01, - 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x29, 0x74, 0x6c, 0x73, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x8c, - 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, - 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa2, 0x01, - 0x0a, 0x1b, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4c, 0x6f, + 0x67, 0x1a, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6d, 0x0a, 0x1b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xa4, 0x06, 0x0a, 0x24, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, + 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x8f, + 0x01, 0x0a, 0x1a, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x2e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, - 0x64, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0xb5, 0x01, 0x0a, 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, - 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, - 0x2e, 0x30, 0x48, 0x00, 0x52, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xce, 0x01, 0x0a, 0x30, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, - 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, - 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x48, 0x00, 0x52, 0x2c, 0x76, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, - 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x12, 0x57, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x07, 0x6b, - 0x65, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, - 0x6f, 0x67, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x1a, 0x92, 0x01, 0x0a, 0x13, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x4f, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x0d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, - 0x6d, 0x0a, 0x1b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xa4, - 0x06, 0x0a, 0x24, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x8f, 0x01, 0x0a, 0x1a, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x12, 0x94, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xb3, 0x01, 0x0a, 0x27, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, + 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xcc, 0x01, + 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, - 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x20, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0xb3, 0x01, 0x0a, 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, - 0x52, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xcc, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, - 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x4e, 0x9a, 0xc5, 0x88, 0x1e, 0x49, 0x0a, 0x47, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x2e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x42, 0x19, 0x0a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, - 0x06, 0x42, 0xa5, 0x01, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, - 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x54, - 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, - 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x4e, 0x9a, 0xc5, + 0x88, 0x1e, 0x49, 0x0a, 0x47, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x3a, 0x29, 0x9a, 0xc5, + 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x19, 0x0a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0xa5, 0x01, 0x0a, 0x37, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x54, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, + 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1307,31 +1320,32 @@ var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs = []int32{ 11, // 6: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig 12, // 7: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_timeout:type_name -> google.protobuf.Duration 0, // 8: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.ocsp_staple_policy:type_name -> envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.OcspStaplePolicy - 13, // 9: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog.local_address_range:type_name -> envoy.config.core.v3.CidrRange - 13, // 10: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog.remote_address_range:type_name -> envoy.config.core.v3.CidrRange - 14, // 11: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_params:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters - 15, // 12: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificates:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsCertificate - 11, // 13: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_sds_secret_configs:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig - 16, // 14: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance - 5, // 15: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider - 6, // 16: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance - 17, // 17: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext - 11, // 18: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig - 7, // 19: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.combined_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext - 5, // 20: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider - 6, // 21: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance - 18, // 22: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_handshaker:type_name -> envoy.config.core.v3.TypedExtensionConfig - 3, // 23: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.key_log:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsKeyLog - 18, // 24: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider.typed_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 17, // 25: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.default_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext - 11, // 26: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig - 5, // 27: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider - 6, // 28: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance - 29, // [29:29] is the sub-list for method output_type - 29, // [29:29] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 29, // [29:29] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 9, // 9: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.full_scan_certs_on_sni_mismatch:type_name -> google.protobuf.BoolValue + 13, // 10: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog.local_address_range:type_name -> envoy.config.core.v3.CidrRange + 13, // 11: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog.remote_address_range:type_name -> envoy.config.core.v3.CidrRange + 14, // 12: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_params:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters + 15, // 13: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificates:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsCertificate + 11, // 14: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_sds_secret_configs:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 16, // 15: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + 5, // 16: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 17: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 17, // 18: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 11, // 19: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 7, // 20: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.combined_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext + 5, // 21: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 22: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 18, // 23: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_handshaker:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 24: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.key_log:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsKeyLog + 18, // 25: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider.typed_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 17, // 26: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.default_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 11, // 27: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 5, // 28: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 29: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name } func init() { file_envoy_extensions_transport_sockets_tls_v3_tls_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go index 6656de555b..8fdde47658 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go @@ -359,9 +359,47 @@ func (m *DownstreamTlsContext) validate(all bool) error { errors = append(errors, err) } - switch m.SessionTicketKeysType.(type) { + if all { + switch v := interface{}(m.GetFullScanCertsOnSniMismatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFullScanCertsOnSniMismatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + switch v := m.SessionTicketKeysType.(type) { case *DownstreamTlsContext_SessionTicketKeys: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetSessionTicketKeys()).(type) { @@ -393,6 +431,16 @@ func (m *DownstreamTlsContext) validate(all bool) error { } case *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetSessionTicketKeysSdsSecretConfig()).(type) { @@ -424,8 +472,19 @@ func (m *DownstreamTlsContext) validate(all bool) error { } case *DownstreamTlsContext_DisableStatelessSessionResumption: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } // no validation rules for DisableStatelessSessionResumption - + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -771,17 +830,6 @@ func (m *CommonTlsContext) validate(all bool) error { } - if len(m.GetTlsCertificateSdsSecretConfigs()) > 2 { - err := CommonTlsContextValidationError{ - field: "TlsCertificateSdsSecretConfigs", - reason: "value must contain no more than 2 item(s)", - } - if !all { - return err - } - errors = append(errors, err) - } - for idx, item := range m.GetTlsCertificateSdsSecretConfigs() { _, _ = idx, item @@ -961,9 +1009,18 @@ func (m *CommonTlsContext) validate(all bool) error { } } - switch m.ValidationContextType.(type) { - + switch v := m.ValidationContextType.(type) { case *CommonTlsContext_ValidationContext: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetValidationContext()).(type) { @@ -995,6 +1052,16 @@ func (m *CommonTlsContext) validate(all bool) error { } case *CommonTlsContext_ValidationContextSdsSecretConfig: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetValidationContextSdsSecretConfig()).(type) { @@ -1026,6 +1093,16 @@ func (m *CommonTlsContext) validate(all bool) error { } case *CommonTlsContext_CombinedValidationContext: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetCombinedValidationContext()).(type) { @@ -1057,6 +1134,16 @@ func (m *CommonTlsContext) validate(all bool) error { } case *CommonTlsContext_ValidationContextCertificateProvider: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetValidationContextCertificateProvider()).(type) { @@ -1088,6 +1175,16 @@ func (m *CommonTlsContext) validate(all bool) error { } case *CommonTlsContext_ValidationContextCertificateProviderInstance: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetValidationContextCertificateProviderInstance()).(type) { @@ -1118,6 +1215,8 @@ func (m *CommonTlsContext) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -1232,9 +1331,20 @@ func (m *CommonTlsContext_CertificateProvider) validate(all bool) error { errors = append(errors, err) } - switch m.Config.(type) { - + oneofConfigPresent := false + switch v := m.Config.(type) { case *CommonTlsContext_CertificateProvider_TypedConfig: + if v == nil { + err := CommonTlsContext_CertificateProviderValidationError{ + field: "Config", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigPresent = true if all { switch v := interface{}(m.GetTypedConfig()).(type) { @@ -1266,6 +1376,9 @@ func (m *CommonTlsContext_CertificateProvider) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofConfigPresent { err := CommonTlsContext_CertificateProviderValidationError{ field: "Config", reason: "value is required", @@ -1274,7 +1387,6 @@ func (m *CommonTlsContext_CertificateProvider) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go index 124f8c26a5..4e9cfbea30 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto package tlsv3 @@ -42,9 +42,9 @@ const ( // trust_bundle: // filename: "envoy.pem" // -// In this example, a presented peer certificate whose SAN matches `spiffe//foo.com/**` is validated against +// In this example, a presented peer certificate whose SAN matches ``spiffe://foo.com/**`` is validated against // the "foo.pem" x.509 certificate. All the trust bundles are isolated from each other, so no trust domain can mint -// a SVID belonging to another trust domain. That means, in this example, a SVID signed by `envoy.com`'s CA with `spiffe//foo.com/**` +// a SVID belonging to another trust domain. That means, in this example, a SVID signed by ``envoy.com``'s CA with ``spiffe://foo.com/**`` // SAN would be rejected since Envoy selects the trust bundle according to the presented SAN before validate the certificate. // // Note that SPIFFE validator inherits and uses the following options from :ref:`CertificateValidationContext `. @@ -105,7 +105,7 @@ type SPIFFECertValidatorConfig_TrustDomain struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Name of the trust domain, `example.com`, `foo.bar.gov` for example. + // Name of the trust domain, ``example.com``, ``foo.bar.gov`` for example. // Note that this must *not* have "spiffe://" prefix. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Specify a data source holding x.509 trust bundle used for validating incoming SVID(s) in this trust domain. diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go index 1af5b5dca2..72cbf33d4e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/service/discovery/v3/ads.proto package discoveryv3 @@ -101,7 +101,7 @@ var file_envoy_service_discovery_v3_ads_proto_rawDesc = []byte{ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, - 0x01, 0x42, 0x90, 0x01, 0x0a, 0x28, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x01, 0x42, 0x8d, 0x01, 0x0a, 0x28, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x41, 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, @@ -109,8 +109,8 @@ var file_envoy_service_discovery_v3_ads_proto_rawDesc = []byte{ 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x64, 0x69, - 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x76, 0x33, 0x88, 0x01, 0x01, 0xba, 0x80, 0xc8, 0xd1, - 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go index 5eac6df0f6..9586ffd58d 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/service/discovery/v3/discovery.proto package discoveryv3 @@ -10,7 +10,7 @@ import ( _ "github.com/cncf/xds/go/udpa/annotations" v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -171,12 +171,12 @@ type DiscoveryRequest struct { // which will be explicitly enumerated in resource_names. ResourceNames []string `protobuf:"bytes,3,rep,name=resource_names,json=resourceNames,proto3" json:"resource_names,omitempty"` // [#not-implemented-hide:] - // Alternative to *resource_names* field that allows specifying dynamic + // Alternative to ``resource_names`` field that allows specifying dynamic // parameters along with each resource name. Clients that populate this // field must be able to handle responses from the server where resources // are wrapped in a Resource message. // Note that it is legal for a request to have some resources listed - // in *resource_names* and others in *resource_locators*. + // in ``resource_names`` and others in ``resource_locators``. ResourceLocators []*ResourceLocator `protobuf:"bytes,7,rep,name=resource_locators,json=resourceLocators,proto3" json:"resource_locators,omitempty"` // Type of the resource that is being requested, e.g. // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit @@ -190,7 +190,7 @@ type DiscoveryRequest struct { // delta, where it is populated only for new explicit ACKs). ResponseNonce string `protobuf:"bytes,5,opt,name=response_nonce,json=responseNonce,proto3" json:"response_nonce,omitempty"` // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* provides the Envoy + // failed to update configuration. The ``message`` field in ``error_details`` provides the Envoy // internal exception related to the failure. It is only intended for consumption during manual // debugging, the string provided is not guaranteed to be stable across Envoy versions. ErrorDetail *status.Status `protobuf:"bytes,6,opt,name=error_detail,json=errorDetail,proto3" json:"error_detail,omitempty"` @@ -286,7 +286,7 @@ type DiscoveryResponse struct { // The version of the response data. VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The response resources. These resources are typed and depend on the API being called. - Resources []*any.Any `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` + Resources []*any1.Any `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` // [#not-implemented-hide:] // Canary is used to support two Envoy command line flags: // @@ -357,7 +357,7 @@ func (x *DiscoveryResponse) GetVersionInfo() string { return "" } -func (x *DiscoveryResponse) GetResources() []*any.Any { +func (x *DiscoveryResponse) GetResources() []*any1.Any { if x != nil { return x.Resources } @@ -433,9 +433,9 @@ type DeltaDiscoveryRequest struct { // The node making the request. Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Type of the resource that is being requested, e.g. - // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if - // resources are only referenced via *xds_resource_subscribe* and - // *xds_resources_unsubscribe*. + // ``type.googleapis.com/envoy.api.v2.ClusterLoadAssignment``. This does not need to be set if + // resources are only referenced via ``xds_resource_subscribe`` and + // ``xds_resources_unsubscribe``. TypeUrl string `protobuf:"bytes,2,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // DeltaDiscoveryRequests allow the client to add or remove individual // resources to the set of tracked resources in the context of a stream. @@ -462,16 +462,16 @@ type DeltaDiscoveryRequest struct { // A list of Resource names to remove from the list of tracked resources. ResourceNamesUnsubscribe []string `protobuf:"bytes,4,rep,name=resource_names_unsubscribe,json=resourceNamesUnsubscribe,proto3" json:"resource_names_unsubscribe,omitempty"` // [#not-implemented-hide:] - // Alternative to *resource_names_subscribe* field that allows specifying dynamic parameters + // Alternative to ``resource_names_subscribe`` field that allows specifying dynamic parameters // along with each resource name. // Note that it is legal for a request to have some resources listed - // in *resource_names_subscribe* and others in *resource_locators_subscribe*. + // in ``resource_names_subscribe`` and others in ``resource_locators_subscribe``. ResourceLocatorsSubscribe []*ResourceLocator `protobuf:"bytes,8,rep,name=resource_locators_subscribe,json=resourceLocatorsSubscribe,proto3" json:"resource_locators_subscribe,omitempty"` // [#not-implemented-hide:] - // Alternative to *resource_names_unsubscribe* field that allows specifying dynamic parameters + // Alternative to ``resource_names_unsubscribe`` field that allows specifying dynamic parameters // along with each resource name. // Note that it is legal for a request to have some resources listed - // in *resource_names_unsubscribe* and others in *resource_locators_unsubscribe*. + // in ``resource_names_unsubscribe`` and others in ``resource_locators_unsubscribe``. ResourceLocatorsUnsubscribe []*ResourceLocator `protobuf:"bytes,9,rep,name=resource_locators_unsubscribe,json=resourceLocatorsUnsubscribe,proto3" json:"resource_locators_unsubscribe,omitempty"` // Informs the server of the versions of the resources the xDS client knows of, to enable the // client to continue the same logical xDS session even in the face of gRPC stream reconnection. @@ -488,7 +488,7 @@ type DeltaDiscoveryRequest struct { // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. ResponseNonce string `protobuf:"bytes,6,opt,name=response_nonce,json=responseNonce,proto3" json:"response_nonce,omitempty"` // This is populated when the previous :ref:`DiscoveryResponse ` - // failed to update configuration. The *message* field in *error_details* + // failed to update configuration. The ``message`` field in ``error_details`` // provides the Envoy internal exception related to the failure. ErrorDetail *status.Status `protobuf:"bytes,7,opt,name=error_detail,json=errorDetail,proto3" json:"error_detail,omitempty"` } @@ -817,19 +817,19 @@ func (*DynamicParameterConstraints_AndConstraints) isDynamicParameterConstraints func (*DynamicParameterConstraints_NotConstraints) isDynamicParameterConstraints_Type() {} -// [#next-free-field: 9] +// [#next-free-field: 10] type Resource struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The resource's name, to distinguish it from others of the same type of resource. - // Only one of *name* or *resource_name* may be set. + // Only one of ``name`` or ``resource_name`` may be set. Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // Alternative to the *name* field, to be used when the server supports + // Alternative to the ``name`` field, to be used when the server supports // multiple variants of the named resource that are differentiated by // dynamic parameter constraints. - // Only one of *name* or *resource_name* may be set. + // Only one of ``name`` or ``resource_name`` may be set. ResourceName *ResourceName `protobuf:"bytes,8,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The aliases are a list of other names that this resource can go by. Aliases []string `protobuf:"bytes,4,rep,name=aliases,proto3" json:"aliases,omitempty"` @@ -837,7 +837,7 @@ type Resource struct { // resources. Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // The resource being tracked. - Resource *any.Any `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + Resource *any1.Any `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` // Time-to-live value for the resource. For each resource, a timer is started. The timer is // reset each time the resource is received with a new TTL. If the resource is received with // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the @@ -855,6 +855,9 @@ type Resource struct { // Cache control properties for the resource. // [#not-implemented-hide:] CacheControl *Resource_CacheControl `protobuf:"bytes,7,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // The Metadata field can be used to provide additional information for the resource. + // E.g. the trace data for debugging. + Metadata *v3.Metadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"` } func (x *Resource) Reset() { @@ -917,7 +920,7 @@ func (x *Resource) GetVersion() string { return "" } -func (x *Resource) GetResource() *any.Any { +func (x *Resource) GetResource() *any1.Any { if x != nil { return x.Resource } @@ -938,6 +941,13 @@ func (x *Resource) GetCacheControl() *Resource_CacheControl { return nil } +func (x *Resource) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + // A single constraint for a given key. type DynamicParameterConstraints_SingleConstraint struct { state protoimpl.MessageState @@ -1386,7 +1396,7 @@ var file_envoy_service_discovery_v3_discovery_proto_rawDesc = []byte{ 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x61, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa8, 0x03, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xe4, 0x03, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, @@ -1408,22 +1418,26 @@ var file_envoy_service_discovery_v3_discovery_proto_rawDesc = []byte{ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x30, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x0c, 0x64, 0x6f, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x64, 0x6f, 0x4e, 0x6f, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, - 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x93, 0x01, 0x0a, 0x28, 0x69, 0x6f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, - 0x72, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, - 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x64, 0x69, - 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x64, 0x69, 0x73, 0x63, 0x6f, - 0x76, 0x65, 0x72, 0x79, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x1a, 0x30, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x0c, 0x64, 0x6f, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x6f, 0x4e, 0x6f, + 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x93, 0x01, 0x0a, 0x28, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x33, 0x42, 0x0e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1456,9 +1470,10 @@ var file_envoy_service_discovery_v3_discovery_proto_goTypes = []interface{}{ (*Resource_CacheControl)(nil), // 13: envoy.service.discovery.v3.Resource.CacheControl (*v3.Node)(nil), // 14: envoy.config.core.v3.Node (*status.Status)(nil), // 15: google.rpc.Status - (*any.Any)(nil), // 16: google.protobuf.Any + (*any1.Any)(nil), // 16: google.protobuf.Any (*v3.ControlPlane)(nil), // 17: envoy.config.core.v3.ControlPlane (*duration.Duration)(nil), // 18: google.protobuf.Duration + (*v3.Metadata)(nil), // 19: envoy.config.core.v3.Metadata } var file_envoy_service_discovery_v3_discovery_proto_depIdxs = []int32{ 8, // 0: envoy.service.discovery.v3.ResourceLocator.dynamic_parameters:type_name -> envoy.service.discovery.v3.ResourceLocator.DynamicParametersEntry @@ -1484,13 +1499,14 @@ var file_envoy_service_discovery_v3_discovery_proto_depIdxs = []int32{ 16, // 20: envoy.service.discovery.v3.Resource.resource:type_name -> google.protobuf.Any 18, // 21: envoy.service.discovery.v3.Resource.ttl:type_name -> google.protobuf.Duration 13, // 22: envoy.service.discovery.v3.Resource.cache_control:type_name -> envoy.service.discovery.v3.Resource.CacheControl - 12, // 23: envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.exists:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.Exists - 6, // 24: envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList.constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints - 25, // [25:25] is the sub-list for method output_type - 25, // [25:25] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name + 19, // 23: envoy.service.discovery.v3.Resource.metadata:type_name -> envoy.config.core.v3.Metadata + 12, // 24: envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.exists:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.Exists + 6, // 25: envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList.constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_envoy_service_discovery_v3_discovery_proto_init() } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go index 1e99a32da4..913cdb6094 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go @@ -1101,9 +1101,18 @@ func (m *DynamicParameterConstraints) validate(all bool) error { var errors []error - switch m.Type.(type) { - + switch v := m.Type.(type) { case *DynamicParameterConstraints_Constraint: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetConstraint()).(type) { @@ -1135,6 +1144,16 @@ func (m *DynamicParameterConstraints) validate(all bool) error { } case *DynamicParameterConstraints_OrConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetOrConstraints()).(type) { @@ -1166,6 +1185,16 @@ func (m *DynamicParameterConstraints) validate(all bool) error { } case *DynamicParameterConstraints_AndConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetAndConstraints()).(type) { @@ -1197,6 +1226,16 @@ func (m *DynamicParameterConstraints) validate(all bool) error { } case *DynamicParameterConstraints_NotConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetNotConstraints()).(type) { @@ -1227,6 +1266,8 @@ func (m *DynamicParameterConstraints) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -1452,6 +1493,35 @@ func (m *Resource) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return ResourceMultiError(errors) } @@ -1555,12 +1625,33 @@ func (m *DynamicParameterConstraints_SingleConstraint) validate(all bool) error // no validation rules for Key - switch m.ConstraintType.(type) { - + oneofConstraintTypePresent := false + switch v := m.ConstraintType.(type) { case *DynamicParameterConstraints_SingleConstraint_Value: + if v == nil { + err := DynamicParameterConstraints_SingleConstraintValidationError{ + field: "ConstraintType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConstraintTypePresent = true // no validation rules for Value - case *DynamicParameterConstraints_SingleConstraint_Exists_: + if v == nil { + err := DynamicParameterConstraints_SingleConstraintValidationError{ + field: "ConstraintType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConstraintTypePresent = true if all { switch v := interface{}(m.GetExists()).(type) { @@ -1592,6 +1683,9 @@ func (m *DynamicParameterConstraints_SingleConstraint) validate(all bool) error } default: + _ = v // ensures v is used + } + if !oneofConstraintTypePresent { err := DynamicParameterConstraints_SingleConstraintValidationError{ field: "ConstraintType", reason: "value is required", @@ -1600,7 +1694,6 @@ func (m *DynamicParameterConstraints_SingleConstraint) validate(all bool) error return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go index bd8225db4d..75cfaa0efc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/service/load_stats/v3/lrs.proto package load_statsv3 @@ -94,7 +94,7 @@ type LoadStatsResponse struct { unknownFields protoimpl.UnknownFields // Clusters to report stats for. - // Not populated if *send_all_clusters* is true. + // Not populated if ``send_all_clusters`` is true. Clusters []string `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` // If true, the client should send all clusters it knows about. // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their @@ -103,13 +103,13 @@ type LoadStatsResponse struct { // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // // 1. There may be some delay from when the timer fires until stats sampling occurs. - // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - // that is observed in between the corresponding previous *LoadStatsRequest* and this - // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period + // 2. For clusters that were already feature in the previous ``LoadStatsResponse``, any traffic + // that is observed in between the corresponding previous ``LoadStatsRequest`` and this + // ``LoadStatsResponse`` will also be accumulated and billed to the cluster. This avoids a period // of inobservability that might otherwise exists between the messages. New clusters are not // subject to this consideration. LoadReportingInterval *duration.Duration `protobuf:"bytes,2,opt,name=load_reporting_interval,json=loadReportingInterval,proto3" json:"load_reporting_interval,omitempty"` - // Set to *true* if the management server supports endpoint granularity + // Set to ``true`` if the management server supports endpoint granularity // report. ReportEndpointGranularity bool `protobuf:"varint,3,opt,name=report_endpoint_granularity,json=reportEndpointGranularity,proto3" json:"report_endpoint_granularity,omitempty"` } @@ -232,7 +232,7 @@ var file_envoy_service_load_stats_v3_lrs_proto_rawDesc = []byte{ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x93, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x30, 0x01, 0x42, 0x90, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x4c, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4f, 0x67, 0x69, @@ -240,8 +240,8 @@ var file_envoy_service_load_stats_v3_lrs_proto_rawDesc = []byte{ 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x76, 0x33, - 0x3b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x76, 0x33, 0x88, 0x01, 0x01, - 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x3b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x76, 0x33, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go index 85ee5b719e..b41fd65803 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/service/status/v3/csds.proto package statusv3 @@ -13,7 +13,7 @@ import ( _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" - any "github.com/golang/protobuf/ptypes/any" + any1 "github.com/golang/protobuf/ptypes/any" timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" @@ -507,7 +507,7 @@ type ClientConfig_GenericXdsConfig struct { // static bootstrap listeners, this field will be "" VersionInfo string `protobuf:"bytes,3,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` // The xDS resource config. Actual content depends on the type - XdsConfig *any.Any `protobuf:"bytes,4,opt,name=xds_config,json=xdsConfig,proto3" json:"xds_config,omitempty"` + XdsConfig *any1.Any `protobuf:"bytes,4,opt,name=xds_config,json=xdsConfig,proto3" json:"xds_config,omitempty"` // Timestamp when the xDS resource was last updated LastUpdated *timestamp.Timestamp `protobuf:"bytes,5,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` // Per xDS resource config status. It is generated by management servers. @@ -580,7 +580,7 @@ func (x *ClientConfig_GenericXdsConfig) GetVersionInfo() string { return "" } -func (x *ClientConfig_GenericXdsConfig) GetXdsConfig() *any.Any { +func (x *ClientConfig_GenericXdsConfig) GetXdsConfig() *any1.Any { if x != nil { return x.XdsConfig } @@ -628,175 +628,176 @@ var file_envoy_service_status_v3_csds_proto_rawDesc = []byte{ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x73, 0x64, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x65, + 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, - 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xc2, 0x01, 0x0a, 0x13, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0d, 0x6e, - 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, - 0x6e, 0x6f, 0x64, 0x65, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xf9, 0x04, 0x0a, 0x0c, 0x50, 0x65, 0x72, - 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc2, 0x01, 0x0a, 0x13, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0c, 0x6e, + 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x6e, + 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x3a, 0x32, 0x9a, 0xc5, 0x88, + 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0xf9, 0x04, 0x0a, 0x0c, 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x3d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x5d, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x4e, + 0x0a, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, + 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x58, 0x0a, 0x13, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, + 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, + 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x10, 0x0a, 0x0e, 0x70, 0x65, 0x72, + 0x5f, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8b, 0x06, 0x0a, 0x0c, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2e, 0x0a, 0x04, + 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x51, 0x0a, 0x0a, + 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x58, 0x64, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x66, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x78, 0x64, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xe2, 0x03, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x33, + 0x0a, 0x0a, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x4a, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0b, 0x18, 0x01, - 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, - 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0b, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, 0x13, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, - 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, - 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, - 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x10, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x5f, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8b, 0x06, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, - 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x51, 0x0a, 0x0a, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x09, 0x78, - 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x66, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x1a, 0xe2, 0x03, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, 0x0a, 0x78, 0x64, 0x73, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, - 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, - 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x4a, 0x0a, 0x0d, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x49, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, - 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, - 0x4b, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x59, 0x4e, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x4f, 0x54, 0x5f, - 0x53, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, - 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x2a, 0x63, 0x0a, 0x12, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, - 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x02, 0x12, 0x11, - 0x0a, 0x0d, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, - 0x03, 0x32, 0xb8, 0x02, 0x0a, 0x1c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x77, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x69, 0x65, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, + 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x4b, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x4e, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, + 0x05, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x04, 0x2a, 0x63, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, + 0x10, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x43, + 0x4b, 0x45, 0x44, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, + 0x4e, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x03, 0x32, 0xb8, 0x02, 0x0a, 0x1c, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x12, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x12, 0x9e, 0x01, 0x0a, 0x11, 0x46, 0x65, 0x74, 0x63, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x9e, 0x01, 0x0a, 0x11, - 0x46, 0x65, 0x74, 0x63, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x22, 0x1b, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x03, 0x3a, 0x01, 0x2a, 0x42, 0x88, 0x01, 0x0a, - 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x43, 0x73, 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x47, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x2f, 0x76, 0x33, 0x3b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x76, 0x33, 0x88, 0x01, 0x01, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x22, 0x1b, 0x2f, + 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x03, + 0x3a, 0x01, 0x2a, 0x42, 0x85, 0x01, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x43, + 0x73, 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x47, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -828,7 +829,7 @@ var file_envoy_service_status_v3_csds_proto_goTypes = []interface{}{ (*v32.RoutesConfigDump)(nil), // 11: envoy.admin.v3.RoutesConfigDump (*v32.ScopedRoutesConfigDump)(nil), // 12: envoy.admin.v3.ScopedRoutesConfigDump (*v32.EndpointsConfigDump)(nil), // 13: envoy.admin.v3.EndpointsConfigDump - (*any.Any)(nil), // 14: google.protobuf.Any + (*any1.Any)(nil), // 14: google.protobuf.Any (*timestamp.Timestamp)(nil), // 15: google.protobuf.Timestamp (v32.ClientResourceStatus)(0), // 16: envoy.admin.v3.ClientResourceStatus (*v32.UpdateFailureState)(nil), // 17: envoy.admin.v3.UpdateFailureState diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go index 76f3a158ba..846934a596 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go @@ -230,9 +230,18 @@ func (m *PerXdsConfig) validate(all bool) error { // no validation rules for ClientStatus - switch m.PerXdsConfig.(type) { - + switch v := m.PerXdsConfig.(type) { case *PerXdsConfig_ListenerConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetListenerConfig()).(type) { @@ -264,6 +273,16 @@ func (m *PerXdsConfig) validate(all bool) error { } case *PerXdsConfig_ClusterConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetClusterConfig()).(type) { @@ -295,6 +314,16 @@ func (m *PerXdsConfig) validate(all bool) error { } case *PerXdsConfig_RouteConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetRouteConfig()).(type) { @@ -326,6 +355,16 @@ func (m *PerXdsConfig) validate(all bool) error { } case *PerXdsConfig_ScopedRouteConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetScopedRouteConfig()).(type) { @@ -357,6 +396,16 @@ func (m *PerXdsConfig) validate(all bool) error { } case *PerXdsConfig_EndpointConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } if all { switch v := interface{}(m.GetEndpointConfig()).(type) { @@ -387,6 +436,8 @@ func (m *PerXdsConfig) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/hash_policy.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/hash_policy.pb.go deleted file mode 100644 index a6d1ca0d45..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/hash_policy.pb.go +++ /dev/null @@ -1,236 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/hash_policy.proto - -package _type - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Specifies the hash policy -type HashPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to PolicySpecifier: - // *HashPolicy_SourceIp_ - PolicySpecifier isHashPolicy_PolicySpecifier `protobuf_oneof:"policy_specifier"` -} - -func (x *HashPolicy) Reset() { - *x = HashPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_hash_policy_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HashPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HashPolicy) ProtoMessage() {} - -func (x *HashPolicy) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_hash_policy_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HashPolicy.ProtoReflect.Descriptor instead. -func (*HashPolicy) Descriptor() ([]byte, []int) { - return file_envoy_type_hash_policy_proto_rawDescGZIP(), []int{0} -} - -func (m *HashPolicy) GetPolicySpecifier() isHashPolicy_PolicySpecifier { - if m != nil { - return m.PolicySpecifier - } - return nil -} - -func (x *HashPolicy) GetSourceIp() *HashPolicy_SourceIp { - if x, ok := x.GetPolicySpecifier().(*HashPolicy_SourceIp_); ok { - return x.SourceIp - } - return nil -} - -type isHashPolicy_PolicySpecifier interface { - isHashPolicy_PolicySpecifier() -} - -type HashPolicy_SourceIp_ struct { - SourceIp *HashPolicy_SourceIp `protobuf:"bytes,1,opt,name=source_ip,json=sourceIp,proto3,oneof"` -} - -func (*HashPolicy_SourceIp_) isHashPolicy_PolicySpecifier() {} - -// The source IP will be used to compute the hash used by hash-based load balancing -// algorithms. -type HashPolicy_SourceIp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *HashPolicy_SourceIp) Reset() { - *x = HashPolicy_SourceIp{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_hash_policy_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HashPolicy_SourceIp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HashPolicy_SourceIp) ProtoMessage() {} - -func (x *HashPolicy_SourceIp) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_hash_policy_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HashPolicy_SourceIp.ProtoReflect.Descriptor instead. -func (*HashPolicy_SourceIp) Descriptor() ([]byte, []int) { - return file_envoy_type_hash_policy_proto_rawDescGZIP(), []int{0, 0} -} - -var File_envoy_type_hash_policy_proto protoreflect.FileDescriptor - -var file_envoy_type_hash_policy_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x61, 0x73, - 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x71, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x3e, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x70, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, - 0x1a, 0x0a, 0x0a, 0x08, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x42, 0x17, 0x0a, 0x10, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x68, 0x0a, 0x18, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x42, 0x0f, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_hash_policy_proto_rawDescOnce sync.Once - file_envoy_type_hash_policy_proto_rawDescData = file_envoy_type_hash_policy_proto_rawDesc -) - -func file_envoy_type_hash_policy_proto_rawDescGZIP() []byte { - file_envoy_type_hash_policy_proto_rawDescOnce.Do(func() { - file_envoy_type_hash_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_hash_policy_proto_rawDescData) - }) - return file_envoy_type_hash_policy_proto_rawDescData -} - -var file_envoy_type_hash_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_envoy_type_hash_policy_proto_goTypes = []interface{}{ - (*HashPolicy)(nil), // 0: envoy.type.HashPolicy - (*HashPolicy_SourceIp)(nil), // 1: envoy.type.HashPolicy.SourceIp -} -var file_envoy_type_hash_policy_proto_depIdxs = []int32{ - 1, // 0: envoy.type.HashPolicy.source_ip:type_name -> envoy.type.HashPolicy.SourceIp - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_type_hash_policy_proto_init() } -func file_envoy_type_hash_policy_proto_init() { - if File_envoy_type_hash_policy_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_type_hash_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HashPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_hash_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HashPolicy_SourceIp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_type_hash_policy_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*HashPolicy_SourceIp_)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_hash_policy_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_hash_policy_proto_goTypes, - DependencyIndexes: file_envoy_type_hash_policy_proto_depIdxs, - MessageInfos: file_envoy_type_hash_policy_proto_msgTypes, - }.Build() - File_envoy_type_hash_policy_proto = out.File - file_envoy_type_hash_policy_proto_rawDesc = nil - file_envoy_type_hash_policy_proto_goTypes = nil - file_envoy_type_hash_policy_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/hash_policy.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/hash_policy.pb.validate.go deleted file mode 100644 index 535f1f77e7..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/hash_policy.pb.validate.go +++ /dev/null @@ -1,282 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/hash_policy.proto - -package _type - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on HashPolicy with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *HashPolicy) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HashPolicy with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in HashPolicyMultiError, or -// nil if none found. -func (m *HashPolicy) ValidateAll() error { - return m.validate(true) -} - -func (m *HashPolicy) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.PolicySpecifier.(type) { - - case *HashPolicy_SourceIp_: - - if all { - switch v := interface{}(m.GetSourceIp()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, HashPolicyValidationError{ - field: "SourceIp", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, HashPolicyValidationError{ - field: "SourceIp", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSourceIp()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return HashPolicyValidationError{ - field: "SourceIp", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := HashPolicyValidationError{ - field: "PolicySpecifier", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return HashPolicyMultiError(errors) - } - - return nil -} - -// HashPolicyMultiError is an error wrapping multiple validation errors -// returned by HashPolicy.ValidateAll() if the designated constraints aren't met. -type HashPolicyMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HashPolicyMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HashPolicyMultiError) AllErrors() []error { return m } - -// HashPolicyValidationError is the validation error returned by -// HashPolicy.Validate if the designated constraints aren't met. -type HashPolicyValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HashPolicyValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HashPolicyValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HashPolicyValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HashPolicyValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HashPolicyValidationError) ErrorName() string { return "HashPolicyValidationError" } - -// Error satisfies the builtin error interface -func (e HashPolicyValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHashPolicy.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HashPolicyValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HashPolicyValidationError{} - -// Validate checks the field values on HashPolicy_SourceIp with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *HashPolicy_SourceIp) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HashPolicy_SourceIp with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// HashPolicy_SourceIpMultiError, or nil if none found. -func (m *HashPolicy_SourceIp) ValidateAll() error { - return m.validate(true) -} - -func (m *HashPolicy_SourceIp) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(errors) > 0 { - return HashPolicy_SourceIpMultiError(errors) - } - - return nil -} - -// HashPolicy_SourceIpMultiError is an error wrapping multiple validation -// errors returned by HashPolicy_SourceIp.ValidateAll() if the designated -// constraints aren't met. -type HashPolicy_SourceIpMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HashPolicy_SourceIpMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HashPolicy_SourceIpMultiError) AllErrors() []error { return m } - -// HashPolicy_SourceIpValidationError is the validation error returned by -// HashPolicy_SourceIp.Validate if the designated constraints aren't met. -type HashPolicy_SourceIpValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HashPolicy_SourceIpValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HashPolicy_SourceIpValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HashPolicy_SourceIpValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HashPolicy_SourceIpValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HashPolicy_SourceIpValidationError) ErrorName() string { - return "HashPolicy_SourceIpValidationError" -} - -// Error satisfies the builtin error interface -func (e HashPolicy_SourceIpValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHashPolicy_SourceIp.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HashPolicy_SourceIpValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HashPolicy_SourceIpValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http.pb.go deleted file mode 100644 index 042997781b..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http.pb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/http.proto - -package _type - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type CodecClientType int32 - -const ( - CodecClientType_HTTP1 CodecClientType = 0 - CodecClientType_HTTP2 CodecClientType = 1 - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. - CodecClientType_HTTP3 CodecClientType = 2 -) - -// Enum value maps for CodecClientType. -var ( - CodecClientType_name = map[int32]string{ - 0: "HTTP1", - 1: "HTTP2", - 2: "HTTP3", - } - CodecClientType_value = map[string]int32{ - "HTTP1": 0, - "HTTP2": 1, - "HTTP3": 2, - } -) - -func (x CodecClientType) Enum() *CodecClientType { - p := new(CodecClientType) - *p = x - return p -} - -func (x CodecClientType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (CodecClientType) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_type_http_proto_enumTypes[0].Descriptor() -} - -func (CodecClientType) Type() protoreflect.EnumType { - return &file_envoy_type_http_proto_enumTypes[0] -} - -func (x CodecClientType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use CodecClientType.Descriptor instead. -func (CodecClientType) EnumDescriptor() ([]byte, []int) { - return file_envoy_type_http_proto_rawDescGZIP(), []int{0} -} - -var File_envoy_type_http_proto protoreflect.FileDescriptor - -var file_envoy_type_http_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2a, 0x32, 0x0a, 0x0f, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x31, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, - 0x54, 0x54, 0x50, 0x33, 0x10, 0x02, 0x42, 0x62, 0x0a, 0x18, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_envoy_type_http_proto_rawDescOnce sync.Once - file_envoy_type_http_proto_rawDescData = file_envoy_type_http_proto_rawDesc -) - -func file_envoy_type_http_proto_rawDescGZIP() []byte { - file_envoy_type_http_proto_rawDescOnce.Do(func() { - file_envoy_type_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_http_proto_rawDescData) - }) - return file_envoy_type_http_proto_rawDescData -} - -var file_envoy_type_http_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_type_http_proto_goTypes = []interface{}{ - (CodecClientType)(0), // 0: envoy.type.CodecClientType -} -var file_envoy_type_http_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_envoy_type_http_proto_init() } -func file_envoy_type_http_proto_init() { - if File_envoy_type_http_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_http_proto_rawDesc, - NumEnums: 1, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_http_proto_goTypes, - DependencyIndexes: file_envoy_type_http_proto_depIdxs, - EnumInfos: file_envoy_type_http_proto_enumTypes, - }.Build() - File_envoy_type_http_proto = out.File - file_envoy_type_http_proto_rawDesc = nil - file_envoy_type_http_proto_goTypes = nil - file_envoy_type_http_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http.pb.validate.go deleted file mode 100644 index 69e27cb1e5..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http.pb.validate.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/http.proto - -package _type - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go index f57367f81f..19ebac5799 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/http/v3/cookie.proto package httpv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go index 08c18ec8a5..71b820739f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/http/v3/path_transformation.proto package httpv3 @@ -201,10 +201,10 @@ func (*PathTransformation_Operation_NormalizePathRFC3986) Descriptor() ([]byte, } // Determines if adjacent slashes are merged into one. A common use case is for a request path -// header. Using this option in `:ref: PathNormalizationOptions -// ` -// will allow incoming requests with path `//dir///file` to match against route with `prefix` -// match set to `/dir`. When using for header transformations, note that slash merging is not +// header. Using this option in ``:ref: PathNormalizationOptions +// `` +// will allow incoming requests with path ``//dir///file`` to match against route with ``prefix`` +// match set to ``/dir``. When using for header transformations, note that slash merging is not // part of `HTTP spec `_ and is provided for convenience. type PathTransformation_Operation_MergeSlashes struct { state protoimpl.MessageState diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go index 370f5c2b5e..ce91984f20 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go @@ -193,9 +193,20 @@ func (m *PathTransformation_Operation) validate(all bool) error { var errors []error - switch m.OperationSpecifier.(type) { - + oneofOperationSpecifierPresent := false + switch v := m.OperationSpecifier.(type) { case *PathTransformation_Operation_NormalizePathRfc_3986: + if v == nil { + err := PathTransformation_OperationValidationError{ + field: "OperationSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOperationSpecifierPresent = true if all { switch v := interface{}(m.GetNormalizePathRfc_3986()).(type) { @@ -227,6 +238,17 @@ func (m *PathTransformation_Operation) validate(all bool) error { } case *PathTransformation_Operation_MergeSlashes_: + if v == nil { + err := PathTransformation_OperationValidationError{ + field: "OperationSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOperationSpecifierPresent = true if all { switch v := interface{}(m.GetMergeSlashes()).(type) { @@ -258,6 +280,9 @@ func (m *PathTransformation_Operation) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofOperationSpecifierPresent { err := PathTransformation_OperationValidationError{ field: "OperationSpecifier", reason: "value is required", @@ -266,7 +291,6 @@ func (m *PathTransformation_Operation) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http_status.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http_status.pb.go deleted file mode 100644 index 5709341e04..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http_status.pb.go +++ /dev/null @@ -1,453 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/http_status.proto - -package _type - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// HTTP response codes supported in Envoy. -// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml -type StatusCode int32 - -const ( - // Empty - This code not part of the HTTP status code specification, but it is needed for proto - // `enum` type. - StatusCode_Empty StatusCode = 0 - StatusCode_Continue StatusCode = 100 - StatusCode_OK StatusCode = 200 - StatusCode_Created StatusCode = 201 - StatusCode_Accepted StatusCode = 202 - StatusCode_NonAuthoritativeInformation StatusCode = 203 - StatusCode_NoContent StatusCode = 204 - StatusCode_ResetContent StatusCode = 205 - StatusCode_PartialContent StatusCode = 206 - StatusCode_MultiStatus StatusCode = 207 - StatusCode_AlreadyReported StatusCode = 208 - StatusCode_IMUsed StatusCode = 226 - StatusCode_MultipleChoices StatusCode = 300 - StatusCode_MovedPermanently StatusCode = 301 - StatusCode_Found StatusCode = 302 - StatusCode_SeeOther StatusCode = 303 - StatusCode_NotModified StatusCode = 304 - StatusCode_UseProxy StatusCode = 305 - StatusCode_TemporaryRedirect StatusCode = 307 - StatusCode_PermanentRedirect StatusCode = 308 - StatusCode_BadRequest StatusCode = 400 - StatusCode_Unauthorized StatusCode = 401 - StatusCode_PaymentRequired StatusCode = 402 - StatusCode_Forbidden StatusCode = 403 - StatusCode_NotFound StatusCode = 404 - StatusCode_MethodNotAllowed StatusCode = 405 - StatusCode_NotAcceptable StatusCode = 406 - StatusCode_ProxyAuthenticationRequired StatusCode = 407 - StatusCode_RequestTimeout StatusCode = 408 - StatusCode_Conflict StatusCode = 409 - StatusCode_Gone StatusCode = 410 - StatusCode_LengthRequired StatusCode = 411 - StatusCode_PreconditionFailed StatusCode = 412 - StatusCode_PayloadTooLarge StatusCode = 413 - StatusCode_URITooLong StatusCode = 414 - StatusCode_UnsupportedMediaType StatusCode = 415 - StatusCode_RangeNotSatisfiable StatusCode = 416 - StatusCode_ExpectationFailed StatusCode = 417 - StatusCode_MisdirectedRequest StatusCode = 421 - StatusCode_UnprocessableEntity StatusCode = 422 - StatusCode_Locked StatusCode = 423 - StatusCode_FailedDependency StatusCode = 424 - StatusCode_UpgradeRequired StatusCode = 426 - StatusCode_PreconditionRequired StatusCode = 428 - StatusCode_TooManyRequests StatusCode = 429 - StatusCode_RequestHeaderFieldsTooLarge StatusCode = 431 - StatusCode_InternalServerError StatusCode = 500 - StatusCode_NotImplemented StatusCode = 501 - StatusCode_BadGateway StatusCode = 502 - StatusCode_ServiceUnavailable StatusCode = 503 - StatusCode_GatewayTimeout StatusCode = 504 - StatusCode_HTTPVersionNotSupported StatusCode = 505 - StatusCode_VariantAlsoNegotiates StatusCode = 506 - StatusCode_InsufficientStorage StatusCode = 507 - StatusCode_LoopDetected StatusCode = 508 - StatusCode_NotExtended StatusCode = 510 - StatusCode_NetworkAuthenticationRequired StatusCode = 511 -) - -// Enum value maps for StatusCode. -var ( - StatusCode_name = map[int32]string{ - 0: "Empty", - 100: "Continue", - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "NonAuthoritativeInformation", - 204: "NoContent", - 205: "ResetContent", - 206: "PartialContent", - 207: "MultiStatus", - 208: "AlreadyReported", - 226: "IMUsed", - 300: "MultipleChoices", - 301: "MovedPermanently", - 302: "Found", - 303: "SeeOther", - 304: "NotModified", - 305: "UseProxy", - 307: "TemporaryRedirect", - 308: "PermanentRedirect", - 400: "BadRequest", - 401: "Unauthorized", - 402: "PaymentRequired", - 403: "Forbidden", - 404: "NotFound", - 405: "MethodNotAllowed", - 406: "NotAcceptable", - 407: "ProxyAuthenticationRequired", - 408: "RequestTimeout", - 409: "Conflict", - 410: "Gone", - 411: "LengthRequired", - 412: "PreconditionFailed", - 413: "PayloadTooLarge", - 414: "URITooLong", - 415: "UnsupportedMediaType", - 416: "RangeNotSatisfiable", - 417: "ExpectationFailed", - 421: "MisdirectedRequest", - 422: "UnprocessableEntity", - 423: "Locked", - 424: "FailedDependency", - 426: "UpgradeRequired", - 428: "PreconditionRequired", - 429: "TooManyRequests", - 431: "RequestHeaderFieldsTooLarge", - 500: "InternalServerError", - 501: "NotImplemented", - 502: "BadGateway", - 503: "ServiceUnavailable", - 504: "GatewayTimeout", - 505: "HTTPVersionNotSupported", - 506: "VariantAlsoNegotiates", - 507: "InsufficientStorage", - 508: "LoopDetected", - 510: "NotExtended", - 511: "NetworkAuthenticationRequired", - } - StatusCode_value = map[string]int32{ - "Empty": 0, - "Continue": 100, - "OK": 200, - "Created": 201, - "Accepted": 202, - "NonAuthoritativeInformation": 203, - "NoContent": 204, - "ResetContent": 205, - "PartialContent": 206, - "MultiStatus": 207, - "AlreadyReported": 208, - "IMUsed": 226, - "MultipleChoices": 300, - "MovedPermanently": 301, - "Found": 302, - "SeeOther": 303, - "NotModified": 304, - "UseProxy": 305, - "TemporaryRedirect": 307, - "PermanentRedirect": 308, - "BadRequest": 400, - "Unauthorized": 401, - "PaymentRequired": 402, - "Forbidden": 403, - "NotFound": 404, - "MethodNotAllowed": 405, - "NotAcceptable": 406, - "ProxyAuthenticationRequired": 407, - "RequestTimeout": 408, - "Conflict": 409, - "Gone": 410, - "LengthRequired": 411, - "PreconditionFailed": 412, - "PayloadTooLarge": 413, - "URITooLong": 414, - "UnsupportedMediaType": 415, - "RangeNotSatisfiable": 416, - "ExpectationFailed": 417, - "MisdirectedRequest": 421, - "UnprocessableEntity": 422, - "Locked": 423, - "FailedDependency": 424, - "UpgradeRequired": 426, - "PreconditionRequired": 428, - "TooManyRequests": 429, - "RequestHeaderFieldsTooLarge": 431, - "InternalServerError": 500, - "NotImplemented": 501, - "BadGateway": 502, - "ServiceUnavailable": 503, - "GatewayTimeout": 504, - "HTTPVersionNotSupported": 505, - "VariantAlsoNegotiates": 506, - "InsufficientStorage": 507, - "LoopDetected": 508, - "NotExtended": 510, - "NetworkAuthenticationRequired": 511, - } -) - -func (x StatusCode) Enum() *StatusCode { - p := new(StatusCode) - *p = x - return p -} - -func (x StatusCode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (StatusCode) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_type_http_status_proto_enumTypes[0].Descriptor() -} - -func (StatusCode) Type() protoreflect.EnumType { - return &file_envoy_type_http_status_proto_enumTypes[0] -} - -func (x StatusCode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use StatusCode.Descriptor instead. -func (StatusCode) EnumDescriptor() ([]byte, []int) { - return file_envoy_type_http_status_proto_rawDescGZIP(), []int{0} -} - -// HTTP status. -type HttpStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Supplies HTTP response code. - Code StatusCode `protobuf:"varint,1,opt,name=code,proto3,enum=envoy.type.StatusCode" json:"code,omitempty"` -} - -func (x *HttpStatus) Reset() { - *x = HttpStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_http_status_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HttpStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HttpStatus) ProtoMessage() {} - -func (x *HttpStatus) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_http_status_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HttpStatus.ProtoReflect.Descriptor instead. -func (*HttpStatus) Descriptor() ([]byte, []int) { - return file_envoy_type_http_status_proto_rawDescGZIP(), []int{0} -} - -func (x *HttpStatus) GetCode() StatusCode { - if x != nil { - return x.Code - } - return StatusCode_Empty -} - -var File_envoy_type_http_status_proto protoreflect.FileDescriptor - -var file_envoy_type_http_status_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, - 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x44, 0x0a, 0x0a, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x36, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, - 0x20, 0x00, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x2a, 0xb5, 0x09, 0x0a, 0x0a, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x10, 0x64, - 0x12, 0x07, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0xc8, 0x01, 0x12, 0x0c, 0x0a, 0x07, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x10, 0xc9, 0x01, 0x12, 0x0d, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x65, 0x64, 0x10, 0xca, 0x01, 0x12, 0x20, 0x0a, 0x1b, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x10, 0xcb, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x4e, 0x6f, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10, 0xcc, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10, 0xcd, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x50, - 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10, 0xce, 0x01, - 0x12, 0x10, 0x0a, 0x0b, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, - 0xcf, 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10, 0xd0, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x4d, 0x55, 0x73, - 0x65, 0x64, 0x10, 0xe2, 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x73, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x4d, - 0x6f, 0x76, 0x65, 0x64, 0x50, 0x65, 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x10, - 0xad, 0x02, 0x12, 0x0a, 0x0a, 0x05, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0xae, 0x02, 0x12, 0x0d, - 0x0a, 0x08, 0x53, 0x65, 0x65, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x10, 0xaf, 0x02, 0x12, 0x10, 0x0a, - 0x0b, 0x4e, 0x6f, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x10, 0xb0, 0x02, 0x12, - 0x0d, 0x0a, 0x08, 0x55, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x10, 0xb1, 0x02, 0x12, 0x16, - 0x0a, 0x11, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x52, 0x65, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x10, 0xb3, 0x02, 0x12, 0x16, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x6d, 0x61, 0x6e, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x10, 0xb4, 0x02, 0x12, 0x0f, - 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0x90, 0x03, 0x12, - 0x11, 0x0a, 0x0c, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x10, - 0x91, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x50, 0x61, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0x92, 0x03, 0x12, 0x0e, 0x0a, 0x09, 0x46, 0x6f, 0x72, 0x62, - 0x69, 0x64, 0x64, 0x65, 0x6e, 0x10, 0x93, 0x03, 0x12, 0x0d, 0x0a, 0x08, 0x4e, 0x6f, 0x74, 0x46, - 0x6f, 0x75, 0x6e, 0x64, 0x10, 0x94, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4e, 0x6f, 0x74, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x10, 0x95, 0x03, 0x12, 0x12, - 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x10, - 0x96, 0x03, 0x12, 0x20, 0x0a, 0x1b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x41, 0x75, 0x74, 0x68, 0x65, - 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x10, 0x97, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x10, 0x98, 0x03, 0x12, 0x0d, 0x0a, 0x08, 0x43, 0x6f, 0x6e, - 0x66, 0x6c, 0x69, 0x63, 0x74, 0x10, 0x99, 0x03, 0x12, 0x09, 0x0a, 0x04, 0x47, 0x6f, 0x6e, 0x65, - 0x10, 0x9a, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x52, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0x9b, 0x03, 0x12, 0x17, 0x0a, 0x12, 0x50, 0x72, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x9c, - 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x6f, 0x4c, - 0x61, 0x72, 0x67, 0x65, 0x10, 0x9d, 0x03, 0x12, 0x0f, 0x0a, 0x0a, 0x55, 0x52, 0x49, 0x54, 0x6f, - 0x6f, 0x4c, 0x6f, 0x6e, 0x67, 0x10, 0x9e, 0x03, 0x12, 0x19, 0x0a, 0x14, 0x55, 0x6e, 0x73, 0x75, - 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, - 0x10, 0x9f, 0x03, 0x12, 0x18, 0x0a, 0x13, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4e, 0x6f, 0x74, 0x53, - 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x10, 0xa0, 0x03, 0x12, 0x16, 0x0a, - 0x11, 0x45, 0x78, 0x70, 0x65, 0x63, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, - 0x65, 0x64, 0x10, 0xa1, 0x03, 0x12, 0x17, 0x0a, 0x12, 0x4d, 0x69, 0x73, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0xa5, 0x03, 0x12, 0x18, - 0x0a, 0x13, 0x55, 0x6e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x45, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x10, 0xa6, 0x03, 0x12, 0x0b, 0x0a, 0x06, 0x4c, 0x6f, 0x63, 0x6b, - 0x65, 0x64, 0x10, 0xa7, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, - 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x10, 0xa8, 0x03, 0x12, 0x14, 0x0a, 0x0f, - 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, - 0xaa, 0x03, 0x12, 0x19, 0x0a, 0x14, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0xac, 0x03, 0x12, 0x14, 0x0a, - 0x0f, 0x54, 0x6f, 0x6f, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, - 0x10, 0xad, 0x03, 0x12, 0x20, 0x0a, 0x1b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x54, 0x6f, 0x6f, 0x4c, 0x61, 0x72, - 0x67, 0x65, 0x10, 0xaf, 0x03, 0x12, 0x18, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0xf4, 0x03, 0x12, - 0x13, 0x0a, 0x0e, 0x4e, 0x6f, 0x74, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x65, - 0x64, 0x10, 0xf5, 0x03, 0x12, 0x0f, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x47, 0x61, 0x74, 0x65, 0x77, - 0x61, 0x79, 0x10, 0xf6, 0x03, 0x12, 0x17, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x10, 0xf7, 0x03, 0x12, 0x13, - 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x10, 0xf8, 0x03, 0x12, 0x1c, 0x0a, 0x17, 0x48, 0x54, 0x54, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10, 0xf9, - 0x03, 0x12, 0x1a, 0x0a, 0x15, 0x56, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x41, 0x6c, 0x73, 0x6f, - 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x73, 0x10, 0xfa, 0x03, 0x12, 0x18, 0x0a, - 0x13, 0x49, 0x6e, 0x73, 0x75, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x10, 0xfb, 0x03, 0x12, 0x11, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x70, 0x44, - 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x10, 0xfc, 0x03, 0x12, 0x10, 0x0a, 0x0b, 0x4e, 0x6f, - 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x10, 0xfe, 0x03, 0x12, 0x22, 0x0a, 0x1d, - 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0xff, 0x03, - 0x42, 0x68, 0x0a, 0x18, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0f, 0x48, 0x74, - 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_envoy_type_http_status_proto_rawDescOnce sync.Once - file_envoy_type_http_status_proto_rawDescData = file_envoy_type_http_status_proto_rawDesc -) - -func file_envoy_type_http_status_proto_rawDescGZIP() []byte { - file_envoy_type_http_status_proto_rawDescOnce.Do(func() { - file_envoy_type_http_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_http_status_proto_rawDescData) - }) - return file_envoy_type_http_status_proto_rawDescData -} - -var file_envoy_type_http_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_type_http_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_type_http_status_proto_goTypes = []interface{}{ - (StatusCode)(0), // 0: envoy.type.StatusCode - (*HttpStatus)(nil), // 1: envoy.type.HttpStatus -} -var file_envoy_type_http_status_proto_depIdxs = []int32{ - 0, // 0: envoy.type.HttpStatus.code:type_name -> envoy.type.StatusCode - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_type_http_status_proto_init() } -func file_envoy_type_http_status_proto_init() { - if File_envoy_type_http_status_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_type_http_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HttpStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_http_status_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_http_status_proto_goTypes, - DependencyIndexes: file_envoy_type_http_status_proto_depIdxs, - EnumInfos: file_envoy_type_http_status_proto_enumTypes, - MessageInfos: file_envoy_type_http_status_proto_msgTypes, - }.Build() - File_envoy_type_http_status_proto = out.File - file_envoy_type_http_status_proto_rawDesc = nil - file_envoy_type_http_status_proto_goTypes = nil - file_envoy_type_http_status_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http_status.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http_status.pb.validate.go deleted file mode 100644 index 30bb5e1300..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http_status.pb.validate.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/http_status.proto - -package _type - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on HttpStatus with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *HttpStatus) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on HttpStatus with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in HttpStatusMultiError, or -// nil if none found. -func (m *HttpStatus) ValidateAll() error { - return m.validate(true) -} - -func (m *HttpStatus) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if _, ok := _HttpStatus_Code_NotInLookup[m.GetCode()]; ok { - err := HttpStatusValidationError{ - field: "Code", - reason: "value must not be in list [0]", - } - if !all { - return err - } - errors = append(errors, err) - } - - if _, ok := StatusCode_name[int32(m.GetCode())]; !ok { - err := HttpStatusValidationError{ - field: "Code", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return HttpStatusMultiError(errors) - } - - return nil -} - -// HttpStatusMultiError is an error wrapping multiple validation errors -// returned by HttpStatus.ValidateAll() if the designated constraints aren't met. -type HttpStatusMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m HttpStatusMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m HttpStatusMultiError) AllErrors() []error { return m } - -// HttpStatusValidationError is the validation error returned by -// HttpStatus.Validate if the designated constraints aren't met. -type HttpStatusValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HttpStatusValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HttpStatusValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HttpStatusValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HttpStatusValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HttpStatusValidationError) ErrorName() string { return "HttpStatusValidationError" } - -// Error satisfies the builtin error interface -func (e HttpStatusValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHttpStatus.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HttpStatusValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HttpStatusValidationError{} - -var _HttpStatus_Code_NotInLookup = map[StatusCode]struct{}{ - 0: {}, -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/metadata.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/metadata.pb.go deleted file mode 100644 index f7662b3602..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/metadata.pb.go +++ /dev/null @@ -1,282 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/matcher/metadata.proto - -package matcher - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// [#next-major-version: MetadataMatcher should use StructMatcher] -type MetadataMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The filter name to retrieve the Struct from the Metadata. - Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - // The path to retrieve the Value from the Struct. - Path []*MetadataMatcher_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` - // The MetadataMatcher is matched if the value retrieved by path is matched to this value. - Value *ValueMatcher `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *MetadataMatcher) Reset() { - *x = MetadataMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_metadata_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetadataMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetadataMatcher) ProtoMessage() {} - -func (x *MetadataMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_metadata_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetadataMatcher.ProtoReflect.Descriptor instead. -func (*MetadataMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_metadata_proto_rawDescGZIP(), []int{0} -} - -func (x *MetadataMatcher) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *MetadataMatcher) GetPath() []*MetadataMatcher_PathSegment { - if x != nil { - return x.Path - } - return nil -} - -func (x *MetadataMatcher) GetValue() *ValueMatcher { - if x != nil { - return x.Value - } - return nil -} - -// Specifies the segment in a path to retrieve value from Metadata. -// Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that -// if the segment key refers to a list, it has to be the last segment in a path. -type MetadataMatcher_PathSegment struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Segment: - // *MetadataMatcher_PathSegment_Key - Segment isMetadataMatcher_PathSegment_Segment `protobuf_oneof:"segment"` -} - -func (x *MetadataMatcher_PathSegment) Reset() { - *x = MetadataMatcher_PathSegment{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_metadata_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetadataMatcher_PathSegment) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetadataMatcher_PathSegment) ProtoMessage() {} - -func (x *MetadataMatcher_PathSegment) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_metadata_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetadataMatcher_PathSegment.ProtoReflect.Descriptor instead. -func (*MetadataMatcher_PathSegment) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_metadata_proto_rawDescGZIP(), []int{0, 0} -} - -func (m *MetadataMatcher_PathSegment) GetSegment() isMetadataMatcher_PathSegment_Segment { - if m != nil { - return m.Segment - } - return nil -} - -func (x *MetadataMatcher_PathSegment) GetKey() string { - if x, ok := x.GetSegment().(*MetadataMatcher_PathSegment_Key); ok { - return x.Key - } - return "" -} - -type isMetadataMatcher_PathSegment_Segment interface { - isMetadataMatcher_PathSegment_Segment() -} - -type MetadataMatcher_PathSegment_Key struct { - // If specified, use the key to retrieve the value in a Struct. - Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"` -} - -func (*MetadataMatcher_PathSegment_Key) isMetadataMatcher_PathSegment_Segment() {} - -var File_envoy_type_matcher_metadata_proto protoreflect.FileDescriptor - -var file_envoy_type_matcher_metadata_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, - 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xff, 0x01, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, - 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x3a, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, - 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x03, 0xf8, 0x42, - 0x01, 0x42, 0x76, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, - 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_envoy_type_matcher_metadata_proto_rawDescOnce sync.Once - file_envoy_type_matcher_metadata_proto_rawDescData = file_envoy_type_matcher_metadata_proto_rawDesc -) - -func file_envoy_type_matcher_metadata_proto_rawDescGZIP() []byte { - file_envoy_type_matcher_metadata_proto_rawDescOnce.Do(func() { - file_envoy_type_matcher_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_metadata_proto_rawDescData) - }) - return file_envoy_type_matcher_metadata_proto_rawDescData -} - -var file_envoy_type_matcher_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_envoy_type_matcher_metadata_proto_goTypes = []interface{}{ - (*MetadataMatcher)(nil), // 0: envoy.type.matcher.MetadataMatcher - (*MetadataMatcher_PathSegment)(nil), // 1: envoy.type.matcher.MetadataMatcher.PathSegment - (*ValueMatcher)(nil), // 2: envoy.type.matcher.ValueMatcher -} -var file_envoy_type_matcher_metadata_proto_depIdxs = []int32{ - 1, // 0: envoy.type.matcher.MetadataMatcher.path:type_name -> envoy.type.matcher.MetadataMatcher.PathSegment - 2, // 1: envoy.type.matcher.MetadataMatcher.value:type_name -> envoy.type.matcher.ValueMatcher - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_envoy_type_matcher_metadata_proto_init() } -func file_envoy_type_matcher_metadata_proto_init() { - if File_envoy_type_matcher_metadata_proto != nil { - return - } - file_envoy_type_matcher_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_type_matcher_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetadataMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_matcher_metadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetadataMatcher_PathSegment); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_type_matcher_metadata_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*MetadataMatcher_PathSegment_Key)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_matcher_metadata_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_matcher_metadata_proto_goTypes, - DependencyIndexes: file_envoy_type_matcher_metadata_proto_depIdxs, - MessageInfos: file_envoy_type_matcher_metadata_proto_msgTypes, - }.Build() - File_envoy_type_matcher_metadata_proto = out.File - file_envoy_type_matcher_metadata_proto_rawDesc = nil - file_envoy_type_matcher_metadata_proto_goTypes = nil - file_envoy_type_matcher_metadata_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/metadata.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/metadata.pb.validate.go deleted file mode 100644 index d003f6befe..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/metadata.pb.validate.go +++ /dev/null @@ -1,362 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/matcher/metadata.proto - -package matcher - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on MetadataMatcher with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *MetadataMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on MetadataMatcher with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// MetadataMatcherMultiError, or nil if none found. -func (m *MetadataMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *MetadataMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if utf8.RuneCountInString(m.GetFilter()) < 1 { - err := MetadataMatcherValidationError{ - field: "Filter", - reason: "value length must be at least 1 runes", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(m.GetPath()) < 1 { - err := MetadataMatcherValidationError{ - field: "Path", - reason: "value must contain at least 1 item(s)", - } - if !all { - return err - } - errors = append(errors, err) - } - - for idx, item := range m.GetPath() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, MetadataMatcherValidationError{ - field: fmt.Sprintf("Path[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, MetadataMatcherValidationError{ - field: fmt.Sprintf("Path[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return MetadataMatcherValidationError{ - field: fmt.Sprintf("Path[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if m.GetValue() == nil { - err := MetadataMatcherValidationError{ - field: "Value", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetValue()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, MetadataMatcherValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, MetadataMatcherValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return MetadataMatcherValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return MetadataMatcherMultiError(errors) - } - - return nil -} - -// MetadataMatcherMultiError is an error wrapping multiple validation errors -// returned by MetadataMatcher.ValidateAll() if the designated constraints -// aren't met. -type MetadataMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m MetadataMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m MetadataMatcherMultiError) AllErrors() []error { return m } - -// MetadataMatcherValidationError is the validation error returned by -// MetadataMatcher.Validate if the designated constraints aren't met. -type MetadataMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e MetadataMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e MetadataMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e MetadataMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e MetadataMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e MetadataMatcherValidationError) ErrorName() string { return "MetadataMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e MetadataMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sMetadataMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = MetadataMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = MetadataMatcherValidationError{} - -// Validate checks the field values on MetadataMatcher_PathSegment with the -// rules defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *MetadataMatcher_PathSegment) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on MetadataMatcher_PathSegment with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// MetadataMatcher_PathSegmentMultiError, or nil if none found. -func (m *MetadataMatcher_PathSegment) ValidateAll() error { - return m.validate(true) -} - -func (m *MetadataMatcher_PathSegment) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.Segment.(type) { - - case *MetadataMatcher_PathSegment_Key: - - if utf8.RuneCountInString(m.GetKey()) < 1 { - err := MetadataMatcher_PathSegmentValidationError{ - field: "Key", - reason: "value length must be at least 1 runes", - } - if !all { - return err - } - errors = append(errors, err) - } - - default: - err := MetadataMatcher_PathSegmentValidationError{ - field: "Segment", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return MetadataMatcher_PathSegmentMultiError(errors) - } - - return nil -} - -// MetadataMatcher_PathSegmentMultiError is an error wrapping multiple -// validation errors returned by MetadataMatcher_PathSegment.ValidateAll() if -// the designated constraints aren't met. -type MetadataMatcher_PathSegmentMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m MetadataMatcher_PathSegmentMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m MetadataMatcher_PathSegmentMultiError) AllErrors() []error { return m } - -// MetadataMatcher_PathSegmentValidationError is the validation error returned -// by MetadataMatcher_PathSegment.Validate if the designated constraints -// aren't met. -type MetadataMatcher_PathSegmentValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e MetadataMatcher_PathSegmentValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e MetadataMatcher_PathSegmentValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e MetadataMatcher_PathSegmentValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e MetadataMatcher_PathSegmentValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e MetadataMatcher_PathSegmentValidationError) ErrorName() string { - return "MetadataMatcher_PathSegmentValidationError" -} - -// Error satisfies the builtin error interface -func (e MetadataMatcher_PathSegmentValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sMetadataMatcher_PathSegment.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = MetadataMatcher_PathSegmentValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = MetadataMatcher_PathSegmentValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/node.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/node.pb.go deleted file mode 100644 index dfaac49b7d..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/node.pb.go +++ /dev/null @@ -1,182 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/matcher/node.proto - -package matcher - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Specifies the way to match a Node. -// The match follows AND semantics. -type NodeMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies match criteria on the node id. - NodeId *StringMatcher `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - // Specifies match criteria on the node metadata. - NodeMetadatas []*StructMatcher `protobuf:"bytes,2,rep,name=node_metadatas,json=nodeMetadatas,proto3" json:"node_metadatas,omitempty"` -} - -func (x *NodeMatcher) Reset() { - *x = NodeMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_node_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodeMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeMatcher) ProtoMessage() {} - -func (x *NodeMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_node_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeMatcher.ProtoReflect.Descriptor instead. -func (*NodeMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_node_proto_rawDescGZIP(), []int{0} -} - -func (x *NodeMatcher) GetNodeId() *StringMatcher { - if x != nil { - return x.NodeId - } - return nil -} - -func (x *NodeMatcher) GetNodeMetadatas() []*StructMatcher { - if x != nil { - return x.NodeMetadatas - } - return nil -} - -var File_envoy_type_matcher_node_proto protoreflect.FileDescriptor - -var file_envoy_type_matcher_node_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x93, 0x01, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, - 0x12, 0x48, 0x0a, 0x0e, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x6e, 0x6f, 0x64, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x73, 0x42, 0x72, 0x0a, 0x20, 0x69, 0x6f, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x09, - 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_matcher_node_proto_rawDescOnce sync.Once - file_envoy_type_matcher_node_proto_rawDescData = file_envoy_type_matcher_node_proto_rawDesc -) - -func file_envoy_type_matcher_node_proto_rawDescGZIP() []byte { - file_envoy_type_matcher_node_proto_rawDescOnce.Do(func() { - file_envoy_type_matcher_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_node_proto_rawDescData) - }) - return file_envoy_type_matcher_node_proto_rawDescData -} - -var file_envoy_type_matcher_node_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_type_matcher_node_proto_goTypes = []interface{}{ - (*NodeMatcher)(nil), // 0: envoy.type.matcher.NodeMatcher - (*StringMatcher)(nil), // 1: envoy.type.matcher.StringMatcher - (*StructMatcher)(nil), // 2: envoy.type.matcher.StructMatcher -} -var file_envoy_type_matcher_node_proto_depIdxs = []int32{ - 1, // 0: envoy.type.matcher.NodeMatcher.node_id:type_name -> envoy.type.matcher.StringMatcher - 2, // 1: envoy.type.matcher.NodeMatcher.node_metadatas:type_name -> envoy.type.matcher.StructMatcher - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_envoy_type_matcher_node_proto_init() } -func file_envoy_type_matcher_node_proto_init() { - if File_envoy_type_matcher_node_proto != nil { - return - } - file_envoy_type_matcher_string_proto_init() - file_envoy_type_matcher_struct_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_type_matcher_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodeMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_matcher_node_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_matcher_node_proto_goTypes, - DependencyIndexes: file_envoy_type_matcher_node_proto_depIdxs, - MessageInfos: file_envoy_type_matcher_node_proto_msgTypes, - }.Build() - File_envoy_type_matcher_node_proto = out.File - file_envoy_type_matcher_node_proto_rawDesc = nil - file_envoy_type_matcher_node_proto_goTypes = nil - file_envoy_type_matcher_node_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/node.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/node.pb.validate.go deleted file mode 100644 index a789aba138..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/node.pb.validate.go +++ /dev/null @@ -1,198 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/matcher/node.proto - -package matcher - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on NodeMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *NodeMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on NodeMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in NodeMatcherMultiError, or -// nil if none found. -func (m *NodeMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *NodeMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetNodeId()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, NodeMatcherValidationError{ - field: "NodeId", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, NodeMatcherValidationError{ - field: "NodeId", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetNodeId()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return NodeMatcherValidationError{ - field: "NodeId", - reason: "embedded message failed validation", - cause: err, - } - } - } - - for idx, item := range m.GetNodeMetadatas() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, NodeMatcherValidationError{ - field: fmt.Sprintf("NodeMetadatas[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, NodeMatcherValidationError{ - field: fmt.Sprintf("NodeMetadatas[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return NodeMatcherValidationError{ - field: fmt.Sprintf("NodeMetadatas[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return NodeMatcherMultiError(errors) - } - - return nil -} - -// NodeMatcherMultiError is an error wrapping multiple validation errors -// returned by NodeMatcher.ValidateAll() if the designated constraints aren't met. -type NodeMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m NodeMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m NodeMatcherMultiError) AllErrors() []error { return m } - -// NodeMatcherValidationError is the validation error returned by -// NodeMatcher.Validate if the designated constraints aren't met. -type NodeMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e NodeMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e NodeMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e NodeMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e NodeMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e NodeMatcherValidationError) ErrorName() string { return "NodeMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e NodeMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sNodeMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = NodeMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = NodeMatcherValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/number.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/number.pb.go deleted file mode 100644 index a201c248ec..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/number.pb.go +++ /dev/null @@ -1,206 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/matcher/number.proto - -package matcher - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _type "github.com/envoyproxy/go-control-plane/envoy/type" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Specifies the way to match a double value. -type DoubleMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to MatchPattern: - // *DoubleMatcher_Range - // *DoubleMatcher_Exact - MatchPattern isDoubleMatcher_MatchPattern `protobuf_oneof:"match_pattern"` -} - -func (x *DoubleMatcher) Reset() { - *x = DoubleMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_number_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DoubleMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DoubleMatcher) ProtoMessage() {} - -func (x *DoubleMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_number_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DoubleMatcher.ProtoReflect.Descriptor instead. -func (*DoubleMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_number_proto_rawDescGZIP(), []int{0} -} - -func (m *DoubleMatcher) GetMatchPattern() isDoubleMatcher_MatchPattern { - if m != nil { - return m.MatchPattern - } - return nil -} - -func (x *DoubleMatcher) GetRange() *_type.DoubleRange { - if x, ok := x.GetMatchPattern().(*DoubleMatcher_Range); ok { - return x.Range - } - return nil -} - -func (x *DoubleMatcher) GetExact() float64 { - if x, ok := x.GetMatchPattern().(*DoubleMatcher_Exact); ok { - return x.Exact - } - return 0 -} - -type isDoubleMatcher_MatchPattern interface { - isDoubleMatcher_MatchPattern() -} - -type DoubleMatcher_Range struct { - // If specified, the input double value must be in the range specified here. - // Note: The range is using half-open interval semantics [start, end). - Range *_type.DoubleRange `protobuf:"bytes,1,opt,name=range,proto3,oneof"` -} - -type DoubleMatcher_Exact struct { - // If specified, the input double value must be equal to the value specified here. - Exact float64 `protobuf:"fixed64,2,opt,name=exact,proto3,oneof"` -} - -func (*DoubleMatcher_Range) isDoubleMatcher_MatchPattern() {} - -func (*DoubleMatcher_Exact) isDoubleMatcher_MatchPattern() {} - -var File_envoy_type_matcher_number_proto protoreflect.FileDescriptor - -var file_envoy_type_matcher_number_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, - 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6e, 0x0a, 0x0d, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, - 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x42, - 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x74, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, - 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_matcher_number_proto_rawDescOnce sync.Once - file_envoy_type_matcher_number_proto_rawDescData = file_envoy_type_matcher_number_proto_rawDesc -) - -func file_envoy_type_matcher_number_proto_rawDescGZIP() []byte { - file_envoy_type_matcher_number_proto_rawDescOnce.Do(func() { - file_envoy_type_matcher_number_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_number_proto_rawDescData) - }) - return file_envoy_type_matcher_number_proto_rawDescData -} - -var file_envoy_type_matcher_number_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_type_matcher_number_proto_goTypes = []interface{}{ - (*DoubleMatcher)(nil), // 0: envoy.type.matcher.DoubleMatcher - (*_type.DoubleRange)(nil), // 1: envoy.type.DoubleRange -} -var file_envoy_type_matcher_number_proto_depIdxs = []int32{ - 1, // 0: envoy.type.matcher.DoubleMatcher.range:type_name -> envoy.type.DoubleRange - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_type_matcher_number_proto_init() } -func file_envoy_type_matcher_number_proto_init() { - if File_envoy_type_matcher_number_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_type_matcher_number_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DoubleMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_type_matcher_number_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*DoubleMatcher_Range)(nil), - (*DoubleMatcher_Exact)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_matcher_number_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_matcher_number_proto_goTypes, - DependencyIndexes: file_envoy_type_matcher_number_proto_depIdxs, - MessageInfos: file_envoy_type_matcher_number_proto_msgTypes, - }.Build() - File_envoy_type_matcher_number_proto = out.File - file_envoy_type_matcher_number_proto_rawDesc = nil - file_envoy_type_matcher_number_proto_goTypes = nil - file_envoy_type_matcher_number_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/number.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/number.pb.validate.go deleted file mode 100644 index 4001fa4c4f..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/number.pb.validate.go +++ /dev/null @@ -1,184 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/matcher/number.proto - -package matcher - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on DoubleMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *DoubleMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on DoubleMatcher with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in DoubleMatcherMultiError, or -// nil if none found. -func (m *DoubleMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *DoubleMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.MatchPattern.(type) { - - case *DoubleMatcher_Range: - - if all { - switch v := interface{}(m.GetRange()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, DoubleMatcherValidationError{ - field: "Range", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, DoubleMatcherValidationError{ - field: "Range", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetRange()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return DoubleMatcherValidationError{ - field: "Range", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *DoubleMatcher_Exact: - // no validation rules for Exact - - default: - err := DoubleMatcherValidationError{ - field: "MatchPattern", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return DoubleMatcherMultiError(errors) - } - - return nil -} - -// DoubleMatcherMultiError is an error wrapping multiple validation errors -// returned by DoubleMatcher.ValidateAll() if the designated constraints -// aren't met. -type DoubleMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m DoubleMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m DoubleMatcherMultiError) AllErrors() []error { return m } - -// DoubleMatcherValidationError is the validation error returned by -// DoubleMatcher.Validate if the designated constraints aren't met. -type DoubleMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e DoubleMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e DoubleMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e DoubleMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e DoubleMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e DoubleMatcherValidationError) ErrorName() string { return "DoubleMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e DoubleMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sDoubleMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = DoubleMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = DoubleMatcherValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/path.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/path.pb.go deleted file mode 100644 index 95665ff9ff..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/path.pb.go +++ /dev/null @@ -1,190 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/matcher/path.proto - -package matcher - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Specifies the way to match a path on HTTP request. -type PathMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Rule: - // *PathMatcher_Path - Rule isPathMatcher_Rule `protobuf_oneof:"rule"` -} - -func (x *PathMatcher) Reset() { - *x = PathMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_path_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PathMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PathMatcher) ProtoMessage() {} - -func (x *PathMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_path_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PathMatcher.ProtoReflect.Descriptor instead. -func (*PathMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_path_proto_rawDescGZIP(), []int{0} -} - -func (m *PathMatcher) GetRule() isPathMatcher_Rule { - if m != nil { - return m.Rule - } - return nil -} - -func (x *PathMatcher) GetPath() *StringMatcher { - if x, ok := x.GetRule().(*PathMatcher_Path); ok { - return x.Path - } - return nil -} - -type isPathMatcher_Rule interface { - isPathMatcher_Rule() -} - -type PathMatcher_Path struct { - // The `path` must match the URL path portion of the :path header. The query and fragment - // string (if present) are removed in the URL path portion. - // For example, the path */data* will match the *:path* header */data#fragment?param=value*. - Path *StringMatcher `protobuf:"bytes,1,opt,name=path,proto3,oneof"` -} - -func (*PathMatcher_Path) isPathMatcher_Rule() {} - -var File_envoy_type_matcher_path_proto protoreflect.FileDescriptor - -var file_envoy_type_matcher_path_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5d, 0x0a, 0x0b, - 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x0b, - 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x72, 0x0a, 0x20, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, - 0x09, 0x50, 0x61, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, - 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_matcher_path_proto_rawDescOnce sync.Once - file_envoy_type_matcher_path_proto_rawDescData = file_envoy_type_matcher_path_proto_rawDesc -) - -func file_envoy_type_matcher_path_proto_rawDescGZIP() []byte { - file_envoy_type_matcher_path_proto_rawDescOnce.Do(func() { - file_envoy_type_matcher_path_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_path_proto_rawDescData) - }) - return file_envoy_type_matcher_path_proto_rawDescData -} - -var file_envoy_type_matcher_path_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_type_matcher_path_proto_goTypes = []interface{}{ - (*PathMatcher)(nil), // 0: envoy.type.matcher.PathMatcher - (*StringMatcher)(nil), // 1: envoy.type.matcher.StringMatcher -} -var file_envoy_type_matcher_path_proto_depIdxs = []int32{ - 1, // 0: envoy.type.matcher.PathMatcher.path:type_name -> envoy.type.matcher.StringMatcher - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_type_matcher_path_proto_init() } -func file_envoy_type_matcher_path_proto_init() { - if File_envoy_type_matcher_path_proto != nil { - return - } - file_envoy_type_matcher_string_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_type_matcher_path_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_type_matcher_path_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*PathMatcher_Path)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_matcher_path_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_matcher_path_proto_goTypes, - DependencyIndexes: file_envoy_type_matcher_path_proto_depIdxs, - MessageInfos: file_envoy_type_matcher_path_proto_msgTypes, - }.Build() - File_envoy_type_matcher_path_proto = out.File - file_envoy_type_matcher_path_proto_rawDesc = nil - file_envoy_type_matcher_path_proto_goTypes = nil - file_envoy_type_matcher_path_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/path.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/path.pb.validate.go deleted file mode 100644 index 1b547779ba..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/path.pb.validate.go +++ /dev/null @@ -1,191 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/matcher/path.proto - -package matcher - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on PathMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *PathMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on PathMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in PathMatcherMultiError, or -// nil if none found. -func (m *PathMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *PathMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.Rule.(type) { - - case *PathMatcher_Path: - - if m.GetPath() == nil { - err := PathMatcherValidationError{ - field: "Path", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetPath()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, PathMatcherValidationError{ - field: "Path", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, PathMatcherValidationError{ - field: "Path", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetPath()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PathMatcherValidationError{ - field: "Path", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := PathMatcherValidationError{ - field: "Rule", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return PathMatcherMultiError(errors) - } - - return nil -} - -// PathMatcherMultiError is an error wrapping multiple validation errors -// returned by PathMatcher.ValidateAll() if the designated constraints aren't met. -type PathMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m PathMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m PathMatcherMultiError) AllErrors() []error { return m } - -// PathMatcherValidationError is the validation error returned by -// PathMatcher.Validate if the designated constraints aren't met. -type PathMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PathMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PathMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PathMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PathMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PathMatcherValidationError) ErrorName() string { return "PathMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e PathMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPathMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PathMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PathMatcherValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/regex.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/regex.pb.go deleted file mode 100644 index 7eddca0e58..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/regex.pb.go +++ /dev/null @@ -1,387 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/matcher/regex.proto - -package matcher - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// A regex matcher designed for safety when used with untrusted input. -type RegexMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to EngineType: - // *RegexMatcher_GoogleRe2 - EngineType isRegexMatcher_EngineType `protobuf_oneof:"engine_type"` - // The regex match string. The string must be supported by the configured engine. - Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"` -} - -func (x *RegexMatcher) Reset() { - *x = RegexMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_regex_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RegexMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RegexMatcher) ProtoMessage() {} - -func (x *RegexMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_regex_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RegexMatcher.ProtoReflect.Descriptor instead. -func (*RegexMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_regex_proto_rawDescGZIP(), []int{0} -} - -func (m *RegexMatcher) GetEngineType() isRegexMatcher_EngineType { - if m != nil { - return m.EngineType - } - return nil -} - -func (x *RegexMatcher) GetGoogleRe2() *RegexMatcher_GoogleRE2 { - if x, ok := x.GetEngineType().(*RegexMatcher_GoogleRe2); ok { - return x.GoogleRe2 - } - return nil -} - -func (x *RegexMatcher) GetRegex() string { - if x != nil { - return x.Regex - } - return "" -} - -type isRegexMatcher_EngineType interface { - isRegexMatcher_EngineType() -} - -type RegexMatcher_GoogleRe2 struct { - // Google's RE2 regex engine. - GoogleRe2 *RegexMatcher_GoogleRE2 `protobuf:"bytes,1,opt,name=google_re2,json=googleRe2,proto3,oneof"` -} - -func (*RegexMatcher_GoogleRe2) isRegexMatcher_EngineType() {} - -// Describes how to match a string and then produce a new string using a regular -// expression and a substitution string. -type RegexMatchAndSubstitute struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The regular expression used to find portions of a string (hereafter called - // the "subject string") that should be replaced. When a new string is - // produced during the substitution operation, the new string is initially - // the same as the subject string, but then all matches in the subject string - // are replaced by the substitution string. If replacing all matches isn't - // desired, regular expression anchors can be used to ensure a single match, - // so as to replace just one occurrence of a pattern. Capture groups can be - // used in the pattern to extract portions of the subject string, and then - // referenced in the substitution string. - Pattern *RegexMatcher `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"` - // The string that should be substituted into matching portions of the - // subject string during a substitution operation to produce a new string. - // Capture groups in the pattern can be referenced in the substitution - // string. Note, however, that the syntax for referring to capture groups is - // defined by the chosen regular expression engine. Google's `RE2 - // `_ regular expression engine uses a - // backslash followed by the capture group number to denote a numbered - // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers - // to capture group 2. - Substitution string `protobuf:"bytes,2,opt,name=substitution,proto3" json:"substitution,omitempty"` -} - -func (x *RegexMatchAndSubstitute) Reset() { - *x = RegexMatchAndSubstitute{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_regex_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RegexMatchAndSubstitute) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RegexMatchAndSubstitute) ProtoMessage() {} - -func (x *RegexMatchAndSubstitute) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_regex_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RegexMatchAndSubstitute.ProtoReflect.Descriptor instead. -func (*RegexMatchAndSubstitute) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_regex_proto_rawDescGZIP(), []int{1} -} - -func (x *RegexMatchAndSubstitute) GetPattern() *RegexMatcher { - if x != nil { - return x.Pattern - } - return nil -} - -func (x *RegexMatchAndSubstitute) GetSubstitution() string { - if x != nil { - return x.Substitution - } - return "" -} - -// Google's `RE2 `_ regex engine. The regex string must adhere to -// the documented `syntax `_. The engine is designed -// to complete execution in linear time as well as limit the amount of memory used. -// -// Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` -// and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or -// complexity that a compiled regex can have before an exception is thrown or a warning is -// logged, respectively. `re2.max_program_size.error_level` defaults to 100, and -// `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). -// -// Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, -// which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented -// each time the program size exceeds the warn level threshold. -type RegexMatcher_GoogleRE2 struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This field controls the RE2 "program size" which is a rough estimate of how complex a - // compiled regex is to evaluate. A regex that has a program size greater than the configured - // value will fail to compile. In this case, the configured max program size can be increased - // or the regex can be simplified. If not specified, the default is 100. - // - // This field is deprecated; regexp validation should be performed on the management server - // instead of being done by each individual client. - // - // Deprecated: Do not use. - MaxProgramSize *wrappers.UInt32Value `protobuf:"bytes,1,opt,name=max_program_size,json=maxProgramSize,proto3" json:"max_program_size,omitempty"` -} - -func (x *RegexMatcher_GoogleRE2) Reset() { - *x = RegexMatcher_GoogleRE2{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_regex_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RegexMatcher_GoogleRE2) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RegexMatcher_GoogleRE2) ProtoMessage() {} - -func (x *RegexMatcher_GoogleRE2) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_regex_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RegexMatcher_GoogleRE2.ProtoReflect.Descriptor instead. -func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_regex_proto_rawDescGZIP(), []int{0, 0} -} - -// Deprecated: Do not use. -func (x *RegexMatcher_GoogleRE2) GetMaxProgramSize() *wrappers.UInt32Value { - if x != nil { - return x.MaxProgramSize - } - return nil -} - -var File_envoy_type_matcher_regex_proto protoreflect.FileDescriptor - -var file_envoy_type_matcher_regex_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf1, 0x01, 0x0a, - 0x0c, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, - 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x05, 0x72, 0x65, - 0x67, 0x65, 0x78, 0x1a, 0x57, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, - 0x12, 0x4a, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, - 0x78, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x12, 0x0a, 0x0b, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x22, 0x79, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, - 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x70, - 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, - 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x74, - 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, - 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x73, 0x0a, 0x20, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, - 0x0a, 0x52, 0x65, 0x67, 0x65, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_matcher_regex_proto_rawDescOnce sync.Once - file_envoy_type_matcher_regex_proto_rawDescData = file_envoy_type_matcher_regex_proto_rawDesc -) - -func file_envoy_type_matcher_regex_proto_rawDescGZIP() []byte { - file_envoy_type_matcher_regex_proto_rawDescOnce.Do(func() { - file_envoy_type_matcher_regex_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_regex_proto_rawDescData) - }) - return file_envoy_type_matcher_regex_proto_rawDescData -} - -var file_envoy_type_matcher_regex_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_envoy_type_matcher_regex_proto_goTypes = []interface{}{ - (*RegexMatcher)(nil), // 0: envoy.type.matcher.RegexMatcher - (*RegexMatchAndSubstitute)(nil), // 1: envoy.type.matcher.RegexMatchAndSubstitute - (*RegexMatcher_GoogleRE2)(nil), // 2: envoy.type.matcher.RegexMatcher.GoogleRE2 - (*wrappers.UInt32Value)(nil), // 3: google.protobuf.UInt32Value -} -var file_envoy_type_matcher_regex_proto_depIdxs = []int32{ - 2, // 0: envoy.type.matcher.RegexMatcher.google_re2:type_name -> envoy.type.matcher.RegexMatcher.GoogleRE2 - 0, // 1: envoy.type.matcher.RegexMatchAndSubstitute.pattern:type_name -> envoy.type.matcher.RegexMatcher - 3, // 2: envoy.type.matcher.RegexMatcher.GoogleRE2.max_program_size:type_name -> google.protobuf.UInt32Value - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_envoy_type_matcher_regex_proto_init() } -func file_envoy_type_matcher_regex_proto_init() { - if File_envoy_type_matcher_regex_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_type_matcher_regex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegexMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_matcher_regex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegexMatchAndSubstitute); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_matcher_regex_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegexMatcher_GoogleRE2); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_type_matcher_regex_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*RegexMatcher_GoogleRe2)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_matcher_regex_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_matcher_regex_proto_goTypes, - DependencyIndexes: file_envoy_type_matcher_regex_proto_depIdxs, - MessageInfos: file_envoy_type_matcher_regex_proto_msgTypes, - }.Build() - File_envoy_type_matcher_regex_proto = out.File - file_envoy_type_matcher_regex_proto_rawDesc = nil - file_envoy_type_matcher_regex_proto_goTypes = nil - file_envoy_type_matcher_regex_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/regex.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/regex.pb.validate.go deleted file mode 100644 index b5d063bfa0..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/regex.pb.validate.go +++ /dev/null @@ -1,466 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/matcher/regex.proto - -package matcher - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on RegexMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *RegexMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RegexMatcher with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in RegexMatcherMultiError, or -// nil if none found. -func (m *RegexMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *RegexMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if utf8.RuneCountInString(m.GetRegex()) < 1 { - err := RegexMatcherValidationError{ - field: "Regex", - reason: "value length must be at least 1 runes", - } - if !all { - return err - } - errors = append(errors, err) - } - - switch m.EngineType.(type) { - - case *RegexMatcher_GoogleRe2: - - if m.GetGoogleRe2() == nil { - err := RegexMatcherValidationError{ - field: "GoogleRe2", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetGoogleRe2()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RegexMatcherValidationError{ - field: "GoogleRe2", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RegexMatcherValidationError{ - field: "GoogleRe2", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetGoogleRe2()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RegexMatcherValidationError{ - field: "GoogleRe2", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := RegexMatcherValidationError{ - field: "EngineType", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return RegexMatcherMultiError(errors) - } - - return nil -} - -// RegexMatcherMultiError is an error wrapping multiple validation errors -// returned by RegexMatcher.ValidateAll() if the designated constraints aren't met. -type RegexMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RegexMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RegexMatcherMultiError) AllErrors() []error { return m } - -// RegexMatcherValidationError is the validation error returned by -// RegexMatcher.Validate if the designated constraints aren't met. -type RegexMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RegexMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RegexMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RegexMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RegexMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RegexMatcherValidationError) ErrorName() string { return "RegexMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e RegexMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRegexMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RegexMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RegexMatcherValidationError{} - -// Validate checks the field values on RegexMatchAndSubstitute with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *RegexMatchAndSubstitute) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RegexMatchAndSubstitute with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// RegexMatchAndSubstituteMultiError, or nil if none found. -func (m *RegexMatchAndSubstitute) ValidateAll() error { - return m.validate(true) -} - -func (m *RegexMatchAndSubstitute) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetPattern()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RegexMatchAndSubstituteValidationError{ - field: "Pattern", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RegexMatchAndSubstituteValidationError{ - field: "Pattern", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetPattern()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RegexMatchAndSubstituteValidationError{ - field: "Pattern", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for Substitution - - if len(errors) > 0 { - return RegexMatchAndSubstituteMultiError(errors) - } - - return nil -} - -// RegexMatchAndSubstituteMultiError is an error wrapping multiple validation -// errors returned by RegexMatchAndSubstitute.ValidateAll() if the designated -// constraints aren't met. -type RegexMatchAndSubstituteMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RegexMatchAndSubstituteMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RegexMatchAndSubstituteMultiError) AllErrors() []error { return m } - -// RegexMatchAndSubstituteValidationError is the validation error returned by -// RegexMatchAndSubstitute.Validate if the designated constraints aren't met. -type RegexMatchAndSubstituteValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RegexMatchAndSubstituteValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RegexMatchAndSubstituteValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RegexMatchAndSubstituteValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RegexMatchAndSubstituteValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RegexMatchAndSubstituteValidationError) ErrorName() string { - return "RegexMatchAndSubstituteValidationError" -} - -// Error satisfies the builtin error interface -func (e RegexMatchAndSubstituteValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRegexMatchAndSubstitute.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RegexMatchAndSubstituteValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RegexMatchAndSubstituteValidationError{} - -// Validate checks the field values on RegexMatcher_GoogleRE2 with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *RegexMatcher_GoogleRE2) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on RegexMatcher_GoogleRE2 with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// RegexMatcher_GoogleRE2MultiError, or nil if none found. -func (m *RegexMatcher_GoogleRE2) ValidateAll() error { - return m.validate(true) -} - -func (m *RegexMatcher_GoogleRE2) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if all { - switch v := interface{}(m.GetMaxProgramSize()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, RegexMatcher_GoogleRE2ValidationError{ - field: "MaxProgramSize", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, RegexMatcher_GoogleRE2ValidationError{ - field: "MaxProgramSize", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetMaxProgramSize()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RegexMatcher_GoogleRE2ValidationError{ - field: "MaxProgramSize", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return RegexMatcher_GoogleRE2MultiError(errors) - } - - return nil -} - -// RegexMatcher_GoogleRE2MultiError is an error wrapping multiple validation -// errors returned by RegexMatcher_GoogleRE2.ValidateAll() if the designated -// constraints aren't met. -type RegexMatcher_GoogleRE2MultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m RegexMatcher_GoogleRE2MultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m RegexMatcher_GoogleRE2MultiError) AllErrors() []error { return m } - -// RegexMatcher_GoogleRE2ValidationError is the validation error returned by -// RegexMatcher_GoogleRE2.Validate if the designated constraints aren't met. -type RegexMatcher_GoogleRE2ValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RegexMatcher_GoogleRE2ValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RegexMatcher_GoogleRE2ValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RegexMatcher_GoogleRE2ValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RegexMatcher_GoogleRE2ValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RegexMatcher_GoogleRE2ValidationError) ErrorName() string { - return "RegexMatcher_GoogleRE2ValidationError" -} - -// Error satisfies the builtin error interface -func (e RegexMatcher_GoogleRE2ValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRegexMatcher_GoogleRE2.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RegexMatcher_GoogleRE2ValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RegexMatcher_GoogleRE2ValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/string.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/string.pb.go deleted file mode 100644 index 4a1a2dc112..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/string.pb.go +++ /dev/null @@ -1,376 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/matcher/string.proto - -package matcher - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/go-control-plane/envoy/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Specifies the way to match a string. -// [#next-free-field: 7] -type StringMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to MatchPattern: - // *StringMatcher_Exact - // *StringMatcher_Prefix - // *StringMatcher_Suffix - // *StringMatcher_Regex - // *StringMatcher_SafeRegex - MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"` - // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no - // effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. - IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"` -} - -func (x *StringMatcher) Reset() { - *x = StringMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_string_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StringMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StringMatcher) ProtoMessage() {} - -func (x *StringMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_string_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead. -func (*StringMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_string_proto_rawDescGZIP(), []int{0} -} - -func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern { - if m != nil { - return m.MatchPattern - } - return nil -} - -func (x *StringMatcher) GetExact() string { - if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok { - return x.Exact - } - return "" -} - -func (x *StringMatcher) GetPrefix() string { - if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok { - return x.Prefix - } - return "" -} - -func (x *StringMatcher) GetSuffix() string { - if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok { - return x.Suffix - } - return "" -} - -// Deprecated: Do not use. -func (x *StringMatcher) GetRegex() string { - if x, ok := x.GetMatchPattern().(*StringMatcher_Regex); ok { - return x.Regex - } - return "" -} - -func (x *StringMatcher) GetSafeRegex() *RegexMatcher { - if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok { - return x.SafeRegex - } - return nil -} - -func (x *StringMatcher) GetIgnoreCase() bool { - if x != nil { - return x.IgnoreCase - } - return false -} - -type isStringMatcher_MatchPattern interface { - isStringMatcher_MatchPattern() -} - -type StringMatcher_Exact struct { - // The input string must match exactly the string specified here. - // - // Examples: - // - // * *abc* only matches the value *abc*. - Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"` -} - -type StringMatcher_Prefix struct { - // The input string must have the prefix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *abc.xyz* - Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"` -} - -type StringMatcher_Suffix struct { - // The input string must have the suffix specified here. - // Note: empty prefix is not allowed, please use regex instead. - // - // Examples: - // - // * *abc* matches the value *xyz.abc* - Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"` -} - -type StringMatcher_Regex struct { - // The input string must match the regular expression specified here. - // The regex grammar is defined `here - // `_. - // - // Examples: - // - // * The regex ``\d{3}`` matches the value *123* - // * The regex ``\d{3}`` does not match the value *1234* - // * The regex ``\d{3}`` does not match the value *123.456* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex` as it is not safe for use with - // untrusted input in all cases. - // - // Deprecated: Do not use. - Regex string `protobuf:"bytes,4,opt,name=regex,proto3,oneof"` -} - -type StringMatcher_SafeRegex struct { - // The input string must match the regular expression specified here. - SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"` -} - -func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {} - -func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {} - -func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {} - -func (*StringMatcher_Regex) isStringMatcher_MatchPattern() {} - -func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {} - -// Specifies a list of ways to match a string. -type ListStringMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"` -} - -func (x *ListStringMatcher) Reset() { - *x = ListStringMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_string_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListStringMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListStringMatcher) ProtoMessage() {} - -func (x *ListStringMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_string_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead. -func (*ListStringMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_string_proto_rawDescGZIP(), []int{1} -} - -func (x *ListStringMatcher) GetPatterns() []*StringMatcher { - if x != nil { - return x.Patterns - } - return nil -} - -var File_envoy_type_matcher_string_proto protoreflect.FileDescriptor - -var file_envoy_type_matcher_string_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, - 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x9b, 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x21, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, - 0x69, 0x78, 0x12, 0x28, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x10, 0x18, 0x01, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x28, 0x80, 0x08, 0xb8, 0xee, 0xf2, - 0xd2, 0x05, 0x01, 0x48, 0x00, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x12, 0x4b, 0x0a, 0x0a, - 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, - 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, 0x6e, - 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73, 0x65, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x22, 0x5c, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, - 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x42, 0x74, - 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0xba, 0x80, 0xc8, 0xd1, - 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_matcher_string_proto_rawDescOnce sync.Once - file_envoy_type_matcher_string_proto_rawDescData = file_envoy_type_matcher_string_proto_rawDesc -) - -func file_envoy_type_matcher_string_proto_rawDescGZIP() []byte { - file_envoy_type_matcher_string_proto_rawDescOnce.Do(func() { - file_envoy_type_matcher_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_string_proto_rawDescData) - }) - return file_envoy_type_matcher_string_proto_rawDescData -} - -var file_envoy_type_matcher_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_envoy_type_matcher_string_proto_goTypes = []interface{}{ - (*StringMatcher)(nil), // 0: envoy.type.matcher.StringMatcher - (*ListStringMatcher)(nil), // 1: envoy.type.matcher.ListStringMatcher - (*RegexMatcher)(nil), // 2: envoy.type.matcher.RegexMatcher -} -var file_envoy_type_matcher_string_proto_depIdxs = []int32{ - 2, // 0: envoy.type.matcher.StringMatcher.safe_regex:type_name -> envoy.type.matcher.RegexMatcher - 0, // 1: envoy.type.matcher.ListStringMatcher.patterns:type_name -> envoy.type.matcher.StringMatcher - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_envoy_type_matcher_string_proto_init() } -func file_envoy_type_matcher_string_proto_init() { - if File_envoy_type_matcher_string_proto != nil { - return - } - file_envoy_type_matcher_regex_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_type_matcher_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_matcher_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListStringMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_type_matcher_string_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*StringMatcher_Exact)(nil), - (*StringMatcher_Prefix)(nil), - (*StringMatcher_Suffix)(nil), - (*StringMatcher_Regex)(nil), - (*StringMatcher_SafeRegex)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_matcher_string_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_matcher_string_proto_goTypes, - DependencyIndexes: file_envoy_type_matcher_string_proto_depIdxs, - MessageInfos: file_envoy_type_matcher_string_proto_msgTypes, - }.Build() - File_envoy_type_matcher_string_proto = out.File - file_envoy_type_matcher_string_proto_rawDesc = nil - file_envoy_type_matcher_string_proto_goTypes = nil - file_envoy_type_matcher_string_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/string.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/string.pb.validate.go deleted file mode 100644 index fe94c88d82..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/string.pb.validate.go +++ /dev/null @@ -1,383 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/matcher/string.proto - -package matcher - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on StringMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *StringMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on StringMatcher with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in StringMatcherMultiError, or -// nil if none found. -func (m *StringMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *StringMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for IgnoreCase - - switch m.MatchPattern.(type) { - - case *StringMatcher_Exact: - // no validation rules for Exact - - case *StringMatcher_Prefix: - - if utf8.RuneCountInString(m.GetPrefix()) < 1 { - err := StringMatcherValidationError{ - field: "Prefix", - reason: "value length must be at least 1 runes", - } - if !all { - return err - } - errors = append(errors, err) - } - - case *StringMatcher_Suffix: - - if utf8.RuneCountInString(m.GetSuffix()) < 1 { - err := StringMatcherValidationError{ - field: "Suffix", - reason: "value length must be at least 1 runes", - } - if !all { - return err - } - errors = append(errors, err) - } - - case *StringMatcher_Regex: - - if len(m.GetRegex()) > 1024 { - err := StringMatcherValidationError{ - field: "Regex", - reason: "value length must be at most 1024 bytes", - } - if !all { - return err - } - errors = append(errors, err) - } - - case *StringMatcher_SafeRegex: - - if m.GetSafeRegex() == nil { - err := StringMatcherValidationError{ - field: "SafeRegex", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetSafeRegex()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, StringMatcherValidationError{ - field: "SafeRegex", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, StringMatcherValidationError{ - field: "SafeRegex", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return StringMatcherValidationError{ - field: "SafeRegex", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := StringMatcherValidationError{ - field: "MatchPattern", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return StringMatcherMultiError(errors) - } - - return nil -} - -// StringMatcherMultiError is an error wrapping multiple validation errors -// returned by StringMatcher.ValidateAll() if the designated constraints -// aren't met. -type StringMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m StringMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m StringMatcherMultiError) AllErrors() []error { return m } - -// StringMatcherValidationError is the validation error returned by -// StringMatcher.Validate if the designated constraints aren't met. -type StringMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e StringMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e StringMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e StringMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e StringMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e StringMatcherValidationError) ErrorName() string { return "StringMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e StringMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sStringMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = StringMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = StringMatcherValidationError{} - -// Validate checks the field values on ListStringMatcher with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *ListStringMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ListStringMatcher with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// ListStringMatcherMultiError, or nil if none found. -func (m *ListStringMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *ListStringMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetPatterns()) < 1 { - err := ListStringMatcherValidationError{ - field: "Patterns", - reason: "value must contain at least 1 item(s)", - } - if !all { - return err - } - errors = append(errors, err) - } - - for idx, item := range m.GetPatterns() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListStringMatcherValidationError{ - field: fmt.Sprintf("Patterns[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListStringMatcherValidationError{ - field: fmt.Sprintf("Patterns[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListStringMatcherValidationError{ - field: fmt.Sprintf("Patterns[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return ListStringMatcherMultiError(errors) - } - - return nil -} - -// ListStringMatcherMultiError is an error wrapping multiple validation errors -// returned by ListStringMatcher.ValidateAll() if the designated constraints -// aren't met. -type ListStringMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ListStringMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ListStringMatcherMultiError) AllErrors() []error { return m } - -// ListStringMatcherValidationError is the validation error returned by -// ListStringMatcher.Validate if the designated constraints aren't met. -type ListStringMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListStringMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListStringMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListStringMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListStringMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListStringMatcherValidationError) ErrorName() string { - return "ListStringMatcherValidationError" -} - -// Error satisfies the builtin error interface -func (e ListStringMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListStringMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListStringMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListStringMatcherValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/struct.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/struct.pb.go deleted file mode 100644 index 5ed9532804..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/struct.pb.go +++ /dev/null @@ -1,318 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/matcher/struct.proto - -package matcher - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// StructMatcher provides a general interface to check if a given value is matched in -// google.protobuf.Struct. It uses `path` to retrieve the value -// from the struct and then check if it's matched to the specified value. -// -// For example, for the following Struct: -// -// .. code-block:: yaml -// -// fields: -// a: -// struct_value: -// fields: -// b: -// struct_value: -// fields: -// c: -// string_value: pro -// t: -// list_value: -// values: -// - string_value: m -// - string_value: n -// -// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" -// from the Metadata which is matched to the specified prefix match. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: b -// - key: c -// value: -// string_match: -// prefix: pr -// -// The following StructMatcher is matched as the code will match one of the string values in the -// list at the path [a, t]. -// -// .. code-block:: yaml -// -// path: -// - key: a -// - key: t -// value: -// list_match: -// one_of: -// string_match: -// exact: m -// -// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. -type StructMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The path to retrieve the Value from the Struct. - Path []*StructMatcher_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` - // The StructMatcher is matched if the value retrieved by path is matched to this value. - Value *ValueMatcher `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *StructMatcher) Reset() { - *x = StructMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StructMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StructMatcher) ProtoMessage() {} - -func (x *StructMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StructMatcher.ProtoReflect.Descriptor instead. -func (*StructMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_struct_proto_rawDescGZIP(), []int{0} -} - -func (x *StructMatcher) GetPath() []*StructMatcher_PathSegment { - if x != nil { - return x.Path - } - return nil -} - -func (x *StructMatcher) GetValue() *ValueMatcher { - if x != nil { - return x.Value - } - return nil -} - -// Specifies the segment in a path to retrieve value from Struct. -type StructMatcher_PathSegment struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Segment: - // *StructMatcher_PathSegment_Key - Segment isStructMatcher_PathSegment_Segment `protobuf_oneof:"segment"` -} - -func (x *StructMatcher_PathSegment) Reset() { - *x = StructMatcher_PathSegment{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StructMatcher_PathSegment) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StructMatcher_PathSegment) ProtoMessage() {} - -func (x *StructMatcher_PathSegment) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StructMatcher_PathSegment.ProtoReflect.Descriptor instead. -func (*StructMatcher_PathSegment) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_struct_proto_rawDescGZIP(), []int{0, 0} -} - -func (m *StructMatcher_PathSegment) GetSegment() isStructMatcher_PathSegment_Segment { - if m != nil { - return m.Segment - } - return nil -} - -func (x *StructMatcher_PathSegment) GetKey() string { - if x, ok := x.GetSegment().(*StructMatcher_PathSegment_Key); ok { - return x.Key - } - return "" -} - -type isStructMatcher_PathSegment_Segment interface { - isStructMatcher_PathSegment_Segment() -} - -type StructMatcher_PathSegment_Key struct { - // If specified, use the key to retrieve the value in a Struct. - Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"` -} - -func (*StructMatcher_PathSegment_Key) isStructMatcher_PathSegment_Segment() {} - -var File_envoy_type_matcher_struct_proto protoreflect.FileDescriptor - -var file_envoy_type_matcher_struct_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, - 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, - 0x4b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x40, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x3a, - 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, - 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x74, 0x0a, 0x20, 0x69, 0x6f, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, - 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_matcher_struct_proto_rawDescOnce sync.Once - file_envoy_type_matcher_struct_proto_rawDescData = file_envoy_type_matcher_struct_proto_rawDesc -) - -func file_envoy_type_matcher_struct_proto_rawDescGZIP() []byte { - file_envoy_type_matcher_struct_proto_rawDescOnce.Do(func() { - file_envoy_type_matcher_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_struct_proto_rawDescData) - }) - return file_envoy_type_matcher_struct_proto_rawDescData -} - -var file_envoy_type_matcher_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_envoy_type_matcher_struct_proto_goTypes = []interface{}{ - (*StructMatcher)(nil), // 0: envoy.type.matcher.StructMatcher - (*StructMatcher_PathSegment)(nil), // 1: envoy.type.matcher.StructMatcher.PathSegment - (*ValueMatcher)(nil), // 2: envoy.type.matcher.ValueMatcher -} -var file_envoy_type_matcher_struct_proto_depIdxs = []int32{ - 1, // 0: envoy.type.matcher.StructMatcher.path:type_name -> envoy.type.matcher.StructMatcher.PathSegment - 2, // 1: envoy.type.matcher.StructMatcher.value:type_name -> envoy.type.matcher.ValueMatcher - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_envoy_type_matcher_struct_proto_init() } -func file_envoy_type_matcher_struct_proto_init() { - if File_envoy_type_matcher_struct_proto != nil { - return - } - file_envoy_type_matcher_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_type_matcher_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StructMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_matcher_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StructMatcher_PathSegment); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_type_matcher_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*StructMatcher_PathSegment_Key)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_matcher_struct_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_matcher_struct_proto_goTypes, - DependencyIndexes: file_envoy_type_matcher_struct_proto_depIdxs, - MessageInfos: file_envoy_type_matcher_struct_proto_msgTypes, - }.Build() - File_envoy_type_matcher_struct_proto = out.File - file_envoy_type_matcher_struct_proto_rawDesc = nil - file_envoy_type_matcher_struct_proto_goTypes = nil - file_envoy_type_matcher_struct_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/struct.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/struct.pb.validate.go deleted file mode 100644 index 3fb899aad2..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/struct.pb.validate.go +++ /dev/null @@ -1,350 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/matcher/struct.proto - -package matcher - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on StructMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *StructMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on StructMatcher with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in StructMatcherMultiError, or -// nil if none found. -func (m *StructMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *StructMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(m.GetPath()) < 1 { - err := StructMatcherValidationError{ - field: "Path", - reason: "value must contain at least 1 item(s)", - } - if !all { - return err - } - errors = append(errors, err) - } - - for idx, item := range m.GetPath() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, StructMatcherValidationError{ - field: fmt.Sprintf("Path[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, StructMatcherValidationError{ - field: fmt.Sprintf("Path[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return StructMatcherValidationError{ - field: fmt.Sprintf("Path[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if m.GetValue() == nil { - err := StructMatcherValidationError{ - field: "Value", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if all { - switch v := interface{}(m.GetValue()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, StructMatcherValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, StructMatcherValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return StructMatcherValidationError{ - field: "Value", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if len(errors) > 0 { - return StructMatcherMultiError(errors) - } - - return nil -} - -// StructMatcherMultiError is an error wrapping multiple validation errors -// returned by StructMatcher.ValidateAll() if the designated constraints -// aren't met. -type StructMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m StructMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m StructMatcherMultiError) AllErrors() []error { return m } - -// StructMatcherValidationError is the validation error returned by -// StructMatcher.Validate if the designated constraints aren't met. -type StructMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e StructMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e StructMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e StructMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e StructMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e StructMatcherValidationError) ErrorName() string { return "StructMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e StructMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sStructMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = StructMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = StructMatcherValidationError{} - -// Validate checks the field values on StructMatcher_PathSegment with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *StructMatcher_PathSegment) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on StructMatcher_PathSegment with the -// rules defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// StructMatcher_PathSegmentMultiError, or nil if none found. -func (m *StructMatcher_PathSegment) ValidateAll() error { - return m.validate(true) -} - -func (m *StructMatcher_PathSegment) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.Segment.(type) { - - case *StructMatcher_PathSegment_Key: - - if utf8.RuneCountInString(m.GetKey()) < 1 { - err := StructMatcher_PathSegmentValidationError{ - field: "Key", - reason: "value length must be at least 1 runes", - } - if !all { - return err - } - errors = append(errors, err) - } - - default: - err := StructMatcher_PathSegmentValidationError{ - field: "Segment", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return StructMatcher_PathSegmentMultiError(errors) - } - - return nil -} - -// StructMatcher_PathSegmentMultiError is an error wrapping multiple validation -// errors returned by StructMatcher_PathSegment.ValidateAll() if the -// designated constraints aren't met. -type StructMatcher_PathSegmentMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m StructMatcher_PathSegmentMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m StructMatcher_PathSegmentMultiError) AllErrors() []error { return m } - -// StructMatcher_PathSegmentValidationError is the validation error returned by -// StructMatcher_PathSegment.Validate if the designated constraints aren't met. -type StructMatcher_PathSegmentValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e StructMatcher_PathSegmentValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e StructMatcher_PathSegmentValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e StructMatcher_PathSegmentValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e StructMatcher_PathSegmentValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e StructMatcher_PathSegmentValidationError) ErrorName() string { - return "StructMatcher_PathSegmentValidationError" -} - -// Error satisfies the builtin error interface -func (e StructMatcher_PathSegmentValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sStructMatcher_PathSegment.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = StructMatcher_PathSegmentValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = StructMatcher_PathSegmentValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go new file mode 100644 index 0000000000..4ad1d1ce3f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go @@ -0,0 +1,202 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// FilterStateMatcher provides a general interface for matching the filter state objects. +type FilterStateMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter state key to retrieve the object. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Types that are assignable to Matcher: + // *FilterStateMatcher_StringMatch + Matcher isFilterStateMatcher_Matcher `protobuf_oneof:"matcher"` +} + +func (x *FilterStateMatcher) Reset() { + *x = FilterStateMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterStateMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterStateMatcher) ProtoMessage() {} + +func (x *FilterStateMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterStateMatcher.ProtoReflect.Descriptor instead. +func (*FilterStateMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_filter_state_proto_rawDescGZIP(), []int{0} +} + +func (x *FilterStateMatcher) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (m *FilterStateMatcher) GetMatcher() isFilterStateMatcher_Matcher { + if m != nil { + return m.Matcher + } + return nil +} + +func (x *FilterStateMatcher) GetStringMatch() *StringMatcher { + if x, ok := x.GetMatcher().(*FilterStateMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +type isFilterStateMatcher_Matcher interface { + isFilterStateMatcher_Matcher() +} + +type FilterStateMatcher_StringMatch struct { + // Matches the filter state object as a string value. + StringMatch *StringMatcher `protobuf:"bytes,2,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +func (*FilterStateMatcher_StringMatch) isFilterStateMatcher_Matcher() {} + +var File_envoy_type_matcher_v3_filter_state_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_filter_state_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a, 0x01, + 0x0a, 0x12, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x89, 0x01, 0x0a, 0x23, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x42, 0x10, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_filter_state_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_filter_state_proto_rawDescData = file_envoy_type_matcher_v3_filter_state_proto_rawDesc +) + +func file_envoy_type_matcher_v3_filter_state_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_filter_state_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_filter_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_filter_state_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_filter_state_proto_rawDescData +} + +var file_envoy_type_matcher_v3_filter_state_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_filter_state_proto_goTypes = []interface{}{ + (*FilterStateMatcher)(nil), // 0: envoy.type.matcher.v3.FilterStateMatcher + (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_type_matcher_v3_filter_state_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.FilterStateMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_filter_state_proto_init() } +func file_envoy_type_matcher_v3_filter_state_proto_init() { + if File_envoy_type_matcher_v3_filter_state_proto != nil { + return + } + file_envoy_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterStateMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FilterStateMatcher_StringMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_filter_state_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_filter_state_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_filter_state_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_filter_state_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_filter_state_proto = out.File + file_envoy_type_matcher_v3_filter_state_proto_rawDesc = nil + file_envoy_type_matcher_v3_filter_state_proto_goTypes = nil + file_envoy_type_matcher_v3_filter_state_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go new file mode 100644 index 0000000000..0152061d5b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go @@ -0,0 +1,207 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FilterStateMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FilterStateMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterStateMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FilterStateMatcherMultiError, or nil if none found. +func (m *FilterStateMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterStateMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := FilterStateMatcherValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofMatcherPresent := false + switch v := m.Matcher.(type) { + case *FilterStateMatcher_StringMatch: + if v == nil { + err := FilterStateMatcherValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherPresent { + err := FilterStateMatcherValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FilterStateMatcherMultiError(errors) + } + + return nil +} + +// FilterStateMatcherMultiError is an error wrapping multiple validation errors +// returned by FilterStateMatcher.ValidateAll() if the designated constraints +// aren't met. +type FilterStateMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterStateMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterStateMatcherMultiError) AllErrors() []error { return m } + +// FilterStateMatcherValidationError is the validation error returned by +// FilterStateMatcher.Validate if the designated constraints aren't met. +type FilterStateMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterStateMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterStateMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterStateMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterStateMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterStateMatcherValidationError) ErrorName() string { + return "FilterStateMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e FilterStateMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterStateMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterStateMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterStateMatcherValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go index 7608e52a6a..be7272b47e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/http_inputs.proto package matcherv3 @@ -238,6 +238,58 @@ func (x *HttpResponseTrailerMatchInput) GetHeaderName() string { return "" } +// Match input indicates that matching should be done on a specific query parameter. +// The resulting input string will be the first query parameter for the value +// 'query_param'. +// [#extension: envoy.matching.inputs.query_params] +type HttpRequestQueryParamMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The query parameter to match on. + QueryParam string `protobuf:"bytes,1,opt,name=query_param,json=queryParam,proto3" json:"query_param,omitempty"` +} + +func (x *HttpRequestQueryParamMatchInput) Reset() { + *x = HttpRequestQueryParamMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRequestQueryParamMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRequestQueryParamMatchInput) ProtoMessage() {} + +func (x *HttpRequestQueryParamMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRequestQueryParamMatchInput.ProtoReflect.Descriptor instead. +func (*HttpRequestQueryParamMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{4} +} + +func (x *HttpRequestQueryParamMatchInput) GetQueryParam() string { + if x != nil { + return x.QueryParam + } + return "" +} + var File_envoy_type_matcher_v3_http_inputs_proto protoreflect.FileDescriptor var file_envoy_type_matcher_v3_http_inputs_proto_rawDesc = []byte{ @@ -268,16 +320,20 @@ var file_envoy_type_matcher_v3_http_inputs_proto_rawDesc = []byte{ 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, - 0x42, 0x88, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x22, 0x4b, 0x0a, 0x1f, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x42, 0x88, 0x01, + 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -292,12 +348,13 @@ func file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP() []byte { return file_envoy_type_matcher_v3_http_inputs_proto_rawDescData } -var file_envoy_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_envoy_type_matcher_v3_http_inputs_proto_goTypes = []interface{}{ - (*HttpRequestHeaderMatchInput)(nil), // 0: envoy.type.matcher.v3.HttpRequestHeaderMatchInput - (*HttpRequestTrailerMatchInput)(nil), // 1: envoy.type.matcher.v3.HttpRequestTrailerMatchInput - (*HttpResponseHeaderMatchInput)(nil), // 2: envoy.type.matcher.v3.HttpResponseHeaderMatchInput - (*HttpResponseTrailerMatchInput)(nil), // 3: envoy.type.matcher.v3.HttpResponseTrailerMatchInput + (*HttpRequestHeaderMatchInput)(nil), // 0: envoy.type.matcher.v3.HttpRequestHeaderMatchInput + (*HttpRequestTrailerMatchInput)(nil), // 1: envoy.type.matcher.v3.HttpRequestTrailerMatchInput + (*HttpResponseHeaderMatchInput)(nil), // 2: envoy.type.matcher.v3.HttpResponseHeaderMatchInput + (*HttpResponseTrailerMatchInput)(nil), // 3: envoy.type.matcher.v3.HttpResponseTrailerMatchInput + (*HttpRequestQueryParamMatchInput)(nil), // 4: envoy.type.matcher.v3.HttpRequestQueryParamMatchInput } var file_envoy_type_matcher_v3_http_inputs_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -361,6 +418,18 @@ func file_envoy_type_matcher_v3_http_inputs_proto_init() { return nil } } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRequestQueryParamMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -368,7 +437,7 @@ func file_envoy_type_matcher_v3_http_inputs_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_type_matcher_v3_http_inputs_proto_rawDesc, NumEnums: 0, - NumMessages: 4, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go index fbb0c51d1c..24950b97ab 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go @@ -498,3 +498,117 @@ var _ interface { } = HttpResponseTrailerMatchInputValidationError{} var _HttpResponseTrailerMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpRequestQueryParamMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpRequestQueryParamMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpRequestQueryParamMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpRequestQueryParamMatchInputMultiError, or nil if none found. +func (m *HttpRequestQueryParamMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpRequestQueryParamMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetQueryParam()) < 1 { + err := HttpRequestQueryParamMatchInputValidationError{ + field: "QueryParam", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpRequestQueryParamMatchInputMultiError(errors) + } + + return nil +} + +// HttpRequestQueryParamMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpRequestQueryParamMatchInput.ValidateAll() +// if the designated constraints aren't met. +type HttpRequestQueryParamMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpRequestQueryParamMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpRequestQueryParamMatchInputMultiError) AllErrors() []error { return m } + +// HttpRequestQueryParamMatchInputValidationError is the validation error +// returned by HttpRequestQueryParamMatchInput.Validate if the designated +// constraints aren't met. +type HttpRequestQueryParamMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpRequestQueryParamMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpRequestQueryParamMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpRequestQueryParamMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpRequestQueryParamMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpRequestQueryParamMatchInputValidationError) ErrorName() string { + return "HttpRequestQueryParamMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpRequestQueryParamMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpRequestQueryParamMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpRequestQueryParamMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpRequestQueryParamMatchInputValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go index ca38a1139e..ef57a33bdc 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/metadata.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go index a8a6e84894..0a00e4faa3 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go @@ -255,9 +255,20 @@ func (m *MetadataMatcher_PathSegment) validate(all bool) error { var errors []error - switch m.Segment.(type) { - + oneofSegmentPresent := false + switch v := m.Segment.(type) { case *MetadataMatcher_PathSegment_Key: + if v == nil { + err := MetadataMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true if utf8.RuneCountInString(m.GetKey()) < 1 { err := MetadataMatcher_PathSegmentValidationError{ @@ -271,6 +282,9 @@ func (m *MetadataMatcher_PathSegment) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { err := MetadataMatcher_PathSegmentValidationError{ field: "Segment", reason: "value is required", @@ -279,7 +293,6 @@ func (m *MetadataMatcher_PathSegment) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go index 500c848f21..b2e12c84a8 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/node.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go index c86720adab..251d139a12 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/number.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go index 743399dfe6..d656d7f445 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go @@ -57,9 +57,20 @@ func (m *DoubleMatcher) validate(all bool) error { var errors []error - switch m.MatchPattern.(type) { - + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { case *DoubleMatcher_Range: + if v == nil { + err := DoubleMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if all { switch v := interface{}(m.GetRange()).(type) { @@ -91,9 +102,22 @@ func (m *DoubleMatcher) validate(all bool) error { } case *DoubleMatcher_Exact: + if v == nil { + err := DoubleMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true // no validation rules for Exact - default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { err := DoubleMatcherValidationError{ field: "MatchPattern", reason: "value is required", @@ -102,7 +126,6 @@ func (m *DoubleMatcher) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go index 7fd71791f1..ddadfb0cad 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/path.proto package matcherv3 @@ -84,9 +84,9 @@ type isPathMatcher_Rule interface { } type PathMatcher_Path struct { - // The `path` must match the URL path portion of the :path header. The query and fragment + // The ``path`` must match the URL path portion of the :path header. The query and fragment // string (if present) are removed in the URL path portion. - // For example, the path */data* will match the *:path* header */data#fragment?param=value*. + // For example, the path ``/data`` will match the ``:path`` header ``/data#fragment?param=value``. Path *StringMatcher `protobuf:"bytes,1,opt,name=path,proto3,oneof"` } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go index ad61109181..524fae95af 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go @@ -57,9 +57,20 @@ func (m *PathMatcher) validate(all bool) error { var errors []error - switch m.Rule.(type) { - + oneofRulePresent := false + switch v := m.Rule.(type) { case *PathMatcher_Path: + if v == nil { + err := PathMatcherValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true if m.GetPath() == nil { err := PathMatcherValidationError{ @@ -102,6 +113,9 @@ func (m *PathMatcher) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofRulePresent { err := PathMatcherValidationError{ field: "Rule", reason: "value is required", @@ -110,7 +124,6 @@ func (m *PathMatcher) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go index 0d820026ab..38a3bce81c 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/regex.proto package matcherv3 @@ -184,14 +184,14 @@ func (x *RegexMatchAndSubstitute) GetSubstitution() string { // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. // -// Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` -// and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or +// Envoy supports program size checking via runtime. The runtime keys ``re2.max_program_size.error_level`` +// and ``re2.max_program_size.warn_level`` can be set to integers as the maximum program size or // complexity that a compiled regex can have before an exception is thrown or a warning is -// logged, respectively. `re2.max_program_size.error_level` defaults to 100, and -// `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). +// logged, respectively. ``re2.max_program_size.error_level`` defaults to 100, and +// ``re2.max_program_size.warn_level`` has no default if unset (will not check/log a warning). // -// Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, -// which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented +// Envoy emits two stats for tracking the program size of regexes: the histogram ``re2.program_size``, +// which records the program size, and the counter ``re2.exceeded_warn_level``, which is incremented // each time the program size exceeds the warn level threshold. type RegexMatcher_GoogleRE2 struct { state protoimpl.MessageState @@ -272,50 +272,50 @@ var file_envoy_type_matcher_v3_regex_proto_rawDesc = []byte{ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xde, 0x02, 0x0a, 0x0c, 0x52, - 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0a, 0x67, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6, 0x02, 0x0a, 0x0c, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x42, 0x13, - 0x18, 0x01, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, - 0x33, 0x2e, 0x30, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x32, - 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x1a, - 0x92, 0x01, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x12, 0x53, 0x0a, - 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, - 0x2e, 0x30, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, - 0x7a, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, - 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x52, 0x45, 0x32, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xb9, 0x01, 0x0a, 0x17, - 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, - 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, - 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, - 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, - 0x0a, 0x52, 0x65, 0x67, 0x65, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x42, 0x0b, + 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x48, 0x00, 0x52, 0x09, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x1a, 0x92, 0x01, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x52, 0x45, 0x32, 0x12, 0x53, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, + 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x50, + 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, + 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x3a, 0x26, 0x9a, 0xc5, + 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x22, 0xc6, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x12, + 0x47, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x2f, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, + 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, + 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x42, 0x83, 0x01, 0x0a, + 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x65, 0x67, 0x65, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go index 351b2dd0b6..efd4f41446 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go @@ -68,14 +68,12 @@ func (m *RegexMatcher) validate(all bool) error { errors = append(errors, err) } - switch m.EngineType.(type) { - + switch v := m.EngineType.(type) { case *RegexMatcher_GoogleRe2: - - if m.GetGoogleRe2() == nil { + if v == nil { err := RegexMatcherValidationError{ - field: "GoogleRe2", - reason: "value is required", + field: "EngineType", + reason: "oneof value cannot be a typed-nil", } if !all { return err @@ -112,6 +110,8 @@ func (m *RegexMatcher) validate(all bool) error { } } + default: + _ = v // ensures v is used } if len(errors) > 0 { @@ -253,7 +253,16 @@ func (m *RegexMatchAndSubstitute) validate(all bool) error { } } - // no validation rules for Substitution + if !_RegexMatchAndSubstitute_Substitution_Pattern.MatchString(m.GetSubstitution()) { + err := RegexMatchAndSubstituteValidationError{ + field: "Substitution", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } if len(errors) > 0 { return RegexMatchAndSubstituteMultiError(errors) @@ -335,6 +344,8 @@ var _ interface { ErrorName() string } = RegexMatchAndSubstituteValidationError{} +var _RegexMatchAndSubstitute_Substitution_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + // Validate checks the field values on RegexMatcher_GoogleRE2 with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go new file mode 100644 index 0000000000..2209a7d5c6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Match input indicates that matching should be done on the response status +// code. +type HttpResponseStatusCodeMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HttpResponseStatusCodeMatchInput) Reset() { + *x = HttpResponseStatusCodeMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseStatusCodeMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseStatusCodeMatchInput) ProtoMessage() {} + +func (x *HttpResponseStatusCodeMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseStatusCodeMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseStatusCodeMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP(), []int{0} +} + +// Match input indicates that the matching should be done on the class of the +// response status code. For eg: 1xx, 2xx, 3xx, 4xx or 5xx. +type HttpResponseStatusCodeClassMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HttpResponseStatusCodeClassMatchInput) Reset() { + *x = HttpResponseStatusCodeClassMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseStatusCodeClassMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseStatusCodeClassMatchInput) ProtoMessage() {} + +func (x *HttpResponseStatusCodeClassMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseStatusCodeClassMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseStatusCodeClassMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP(), []int{1} +} + +var File_envoy_type_matcher_v3_status_code_input_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_status_code_input_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x22, 0x0a, 0x20, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x27, 0x0a, 0x25, 0x48, 0x74, 0x74, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x42, 0x8d, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_status_code_input_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_status_code_input_proto_rawDescData = file_envoy_type_matcher_v3_status_code_input_proto_rawDesc +) + +func file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_status_code_input_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_status_code_input_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_status_code_input_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescData +} + +var file_envoy_type_matcher_v3_status_code_input_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_status_code_input_proto_goTypes = []interface{}{ + (*HttpResponseStatusCodeMatchInput)(nil), // 0: envoy.type.matcher.v3.HttpResponseStatusCodeMatchInput + (*HttpResponseStatusCodeClassMatchInput)(nil), // 1: envoy.type.matcher.v3.HttpResponseStatusCodeClassMatchInput +} +var file_envoy_type_matcher_v3_status_code_input_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_status_code_input_proto_init() } +func file_envoy_type_matcher_v3_status_code_input_proto_init() { + if File_envoy_type_matcher_v3_status_code_input_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseStatusCodeMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseStatusCodeClassMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_status_code_input_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_status_code_input_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_status_code_input_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_status_code_input_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_status_code_input_proto = out.File + file_envoy_type_matcher_v3_status_code_input_proto_rawDesc = nil + file_envoy_type_matcher_v3_status_code_input_proto_goTypes = nil + file_envoy_type_matcher_v3_status_code_input_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go new file mode 100644 index 0000000000..763fa9f334 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go @@ -0,0 +1,246 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpResponseStatusCodeMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *HttpResponseStatusCodeMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseStatusCodeMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpResponseStatusCodeMatchInputMultiError, or nil if none found. +func (m *HttpResponseStatusCodeMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseStatusCodeMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HttpResponseStatusCodeMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseStatusCodeMatchInputMultiError is an error wrapping multiple +// validation errors returned by +// HttpResponseStatusCodeMatchInput.ValidateAll() if the designated +// constraints aren't met. +type HttpResponseStatusCodeMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseStatusCodeMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseStatusCodeMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseStatusCodeMatchInputValidationError is the validation error +// returned by HttpResponseStatusCodeMatchInput.Validate if the designated +// constraints aren't met. +type HttpResponseStatusCodeMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseStatusCodeMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseStatusCodeMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseStatusCodeMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseStatusCodeMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseStatusCodeMatchInputValidationError) ErrorName() string { + return "HttpResponseStatusCodeMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseStatusCodeMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseStatusCodeMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseStatusCodeMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseStatusCodeMatchInputValidationError{} + +// Validate checks the field values on HttpResponseStatusCodeClassMatchInput +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpResponseStatusCodeClassMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseStatusCodeClassMatchInput +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpResponseStatusCodeClassMatchInputMultiError, or nil if none found. +func (m *HttpResponseStatusCodeClassMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseStatusCodeClassMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HttpResponseStatusCodeClassMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseStatusCodeClassMatchInputMultiError is an error wrapping +// multiple validation errors returned by +// HttpResponseStatusCodeClassMatchInput.ValidateAll() if the designated +// constraints aren't met. +type HttpResponseStatusCodeClassMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseStatusCodeClassMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseStatusCodeClassMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseStatusCodeClassMatchInputValidationError is the validation error +// returned by HttpResponseStatusCodeClassMatchInput.Validate if the +// designated constraints aren't met. +type HttpResponseStatusCodeClassMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseStatusCodeClassMatchInputValidationError) ErrorName() string { + return "HttpResponseStatusCodeClassMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseStatusCodeClassMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseStatusCodeClassMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseStatusCodeClassMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseStatusCodeClassMatchInputValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go index e625871145..65d589aa6a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/string.proto package matcherv3 @@ -38,7 +38,7 @@ type StringMatcher struct { MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"` // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This // has no effect for the safe_regex match. - // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + // For example, the matcher ``data`` will match both input string ``Data`` and ``data`` if set to true. IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"` } @@ -132,7 +132,7 @@ type StringMatcher_Exact struct { // // Examples: // - // * *abc* only matches the value *abc*. + // * ``abc`` only matches the value ``abc``. Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"` } @@ -142,7 +142,7 @@ type StringMatcher_Prefix struct { // // Examples: // - // * *abc* matches the value *abc.xyz* + // * ``abc`` matches the value ``abc.xyz`` Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"` } @@ -152,7 +152,7 @@ type StringMatcher_Suffix struct { // // Examples: // - // * *abc* matches the value *xyz.abc* + // * ``abc`` matches the value ``xyz.abc`` Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"` } @@ -167,7 +167,7 @@ type StringMatcher_Contains struct { // // Examples: // - // * *abc* matches the value *xyz.abc.def* + // * ``abc`` matches the value ``xyz.abc.def`` Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"` } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go index f3c2490f6e..9a67d92a62 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go @@ -59,12 +59,33 @@ func (m *StringMatcher) validate(all bool) error { // no validation rules for IgnoreCase - switch m.MatchPattern.(type) { - + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { case *StringMatcher_Exact: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true // no validation rules for Exact - case *StringMatcher_Prefix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if utf8.RuneCountInString(m.GetPrefix()) < 1 { err := StringMatcherValidationError{ @@ -78,6 +99,17 @@ func (m *StringMatcher) validate(all bool) error { } case *StringMatcher_Suffix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if utf8.RuneCountInString(m.GetSuffix()) < 1 { err := StringMatcherValidationError{ @@ -91,6 +123,17 @@ func (m *StringMatcher) validate(all bool) error { } case *StringMatcher_SafeRegex: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if m.GetSafeRegex() == nil { err := StringMatcherValidationError{ @@ -133,6 +176,17 @@ func (m *StringMatcher) validate(all bool) error { } case *StringMatcher_Contains: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if utf8.RuneCountInString(m.GetContains()) < 1 { err := StringMatcherValidationError{ @@ -146,6 +200,9 @@ func (m *StringMatcher) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { err := StringMatcherValidationError{ field: "MatchPattern", reason: "value is required", @@ -154,7 +211,6 @@ func (m *StringMatcher) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go index 64930cdf8e..294fa99d84 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/struct.proto package matcherv3 @@ -23,7 +23,7 @@ const ( ) // StructMatcher provides a general interface to check if a given value is matched in -// google.protobuf.Struct. It uses `path` to retrieve the value +// google.protobuf.Struct. It uses ``path`` to retrieve the value // from the struct and then check if it's matched to the specified value. // // For example, for the following Struct: diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go index 84939bb091..47d7eeb50a 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go @@ -242,9 +242,20 @@ func (m *StructMatcher_PathSegment) validate(all bool) error { var errors []error - switch m.Segment.(type) { - + oneofSegmentPresent := false + switch v := m.Segment.(type) { case *StructMatcher_PathSegment_Key: + if v == nil { + err := StructMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true if utf8.RuneCountInString(m.GetKey()) < 1 { err := StructMatcher_PathSegmentValidationError{ @@ -258,6 +269,9 @@ func (m *StructMatcher_PathSegment) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { err := StructMatcher_PathSegmentValidationError{ field: "Segment", reason: "value is required", @@ -266,7 +280,6 @@ func (m *StructMatcher_PathSegment) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go index eab3dd4800..0a694d0853 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/matcher/v3/value.proto package matcherv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go index d135b1175e..cff13d9b99 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go @@ -57,9 +57,20 @@ func (m *ValueMatcher) validate(all bool) error { var errors []error - switch m.MatchPattern.(type) { - + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { case *ValueMatcher_NullMatch_: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if all { switch v := interface{}(m.GetNullMatch()).(type) { @@ -91,6 +102,17 @@ func (m *ValueMatcher) validate(all bool) error { } case *ValueMatcher_DoubleMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if all { switch v := interface{}(m.GetDoubleMatch()).(type) { @@ -122,6 +144,17 @@ func (m *ValueMatcher) validate(all bool) error { } case *ValueMatcher_StringMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if all { switch v := interface{}(m.GetStringMatch()).(type) { @@ -153,12 +186,43 @@ func (m *ValueMatcher) validate(all bool) error { } case *ValueMatcher_BoolMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true // no validation rules for BoolMatch - case *ValueMatcher_PresentMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true // no validation rules for PresentMatch - case *ValueMatcher_ListMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if all { switch v := interface{}(m.GetListMatch()).(type) { @@ -190,6 +254,9 @@ func (m *ValueMatcher) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { err := ValueMatcherValidationError{ field: "MatchPattern", reason: "value is required", @@ -198,7 +265,6 @@ func (m *ValueMatcher) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -300,9 +366,20 @@ func (m *ListMatcher) validate(all bool) error { var errors []error - switch m.MatchPattern.(type) { - + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { case *ListMatcher_OneOf: + if v == nil { + err := ListMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true if all { switch v := interface{}(m.GetOneOf()).(type) { @@ -334,6 +411,9 @@ func (m *ListMatcher) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { err := ListMatcherValidationError{ field: "MatchPattern", reason: "value is required", @@ -342,7 +422,6 @@ func (m *ListMatcher) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/value.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/value.pb.go deleted file mode 100644 index 3aecd6b166..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/value.pb.go +++ /dev/null @@ -1,448 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/matcher/value.proto - -package matcher - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. -// StructValue is not supported and is always not matched. -// [#next-free-field: 7] -type ValueMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies how to match a value. - // - // Types that are assignable to MatchPattern: - // *ValueMatcher_NullMatch_ - // *ValueMatcher_DoubleMatch - // *ValueMatcher_StringMatch - // *ValueMatcher_BoolMatch - // *ValueMatcher_PresentMatch - // *ValueMatcher_ListMatch - MatchPattern isValueMatcher_MatchPattern `protobuf_oneof:"match_pattern"` -} - -func (x *ValueMatcher) Reset() { - *x = ValueMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_value_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValueMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValueMatcher) ProtoMessage() {} - -func (x *ValueMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_value_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValueMatcher.ProtoReflect.Descriptor instead. -func (*ValueMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_value_proto_rawDescGZIP(), []int{0} -} - -func (m *ValueMatcher) GetMatchPattern() isValueMatcher_MatchPattern { - if m != nil { - return m.MatchPattern - } - return nil -} - -func (x *ValueMatcher) GetNullMatch() *ValueMatcher_NullMatch { - if x, ok := x.GetMatchPattern().(*ValueMatcher_NullMatch_); ok { - return x.NullMatch - } - return nil -} - -func (x *ValueMatcher) GetDoubleMatch() *DoubleMatcher { - if x, ok := x.GetMatchPattern().(*ValueMatcher_DoubleMatch); ok { - return x.DoubleMatch - } - return nil -} - -func (x *ValueMatcher) GetStringMatch() *StringMatcher { - if x, ok := x.GetMatchPattern().(*ValueMatcher_StringMatch); ok { - return x.StringMatch - } - return nil -} - -func (x *ValueMatcher) GetBoolMatch() bool { - if x, ok := x.GetMatchPattern().(*ValueMatcher_BoolMatch); ok { - return x.BoolMatch - } - return false -} - -func (x *ValueMatcher) GetPresentMatch() bool { - if x, ok := x.GetMatchPattern().(*ValueMatcher_PresentMatch); ok { - return x.PresentMatch - } - return false -} - -func (x *ValueMatcher) GetListMatch() *ListMatcher { - if x, ok := x.GetMatchPattern().(*ValueMatcher_ListMatch); ok { - return x.ListMatch - } - return nil -} - -type isValueMatcher_MatchPattern interface { - isValueMatcher_MatchPattern() -} - -type ValueMatcher_NullMatch_ struct { - // If specified, a match occurs if and only if the target value is a NullValue. - NullMatch *ValueMatcher_NullMatch `protobuf:"bytes,1,opt,name=null_match,json=nullMatch,proto3,oneof"` -} - -type ValueMatcher_DoubleMatch struct { - // If specified, a match occurs if and only if the target value is a double value and is - // matched to this field. - DoubleMatch *DoubleMatcher `protobuf:"bytes,2,opt,name=double_match,json=doubleMatch,proto3,oneof"` -} - -type ValueMatcher_StringMatch struct { - // If specified, a match occurs if and only if the target value is a string value and is - // matched to this field. - StringMatch *StringMatcher `protobuf:"bytes,3,opt,name=string_match,json=stringMatch,proto3,oneof"` -} - -type ValueMatcher_BoolMatch struct { - // If specified, a match occurs if and only if the target value is a bool value and is equal - // to this field. - BoolMatch bool `protobuf:"varint,4,opt,name=bool_match,json=boolMatch,proto3,oneof"` -} - -type ValueMatcher_PresentMatch struct { - // If specified, value match will be performed based on whether the path is referring to a - // valid primitive value in the metadata. If the path is referring to a non-primitive value, - // the result is always not matched. - PresentMatch bool `protobuf:"varint,5,opt,name=present_match,json=presentMatch,proto3,oneof"` -} - -type ValueMatcher_ListMatch struct { - // If specified, a match occurs if and only if the target value is a list value and - // is matched to this field. - ListMatch *ListMatcher `protobuf:"bytes,6,opt,name=list_match,json=listMatch,proto3,oneof"` -} - -func (*ValueMatcher_NullMatch_) isValueMatcher_MatchPattern() {} - -func (*ValueMatcher_DoubleMatch) isValueMatcher_MatchPattern() {} - -func (*ValueMatcher_StringMatch) isValueMatcher_MatchPattern() {} - -func (*ValueMatcher_BoolMatch) isValueMatcher_MatchPattern() {} - -func (*ValueMatcher_PresentMatch) isValueMatcher_MatchPattern() {} - -func (*ValueMatcher_ListMatch) isValueMatcher_MatchPattern() {} - -// Specifies the way to match a list value. -type ListMatcher struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to MatchPattern: - // *ListMatcher_OneOf - MatchPattern isListMatcher_MatchPattern `protobuf_oneof:"match_pattern"` -} - -func (x *ListMatcher) Reset() { - *x = ListMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_value_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListMatcher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListMatcher) ProtoMessage() {} - -func (x *ListMatcher) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_value_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListMatcher.ProtoReflect.Descriptor instead. -func (*ListMatcher) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_value_proto_rawDescGZIP(), []int{1} -} - -func (m *ListMatcher) GetMatchPattern() isListMatcher_MatchPattern { - if m != nil { - return m.MatchPattern - } - return nil -} - -func (x *ListMatcher) GetOneOf() *ValueMatcher { - if x, ok := x.GetMatchPattern().(*ListMatcher_OneOf); ok { - return x.OneOf - } - return nil -} - -type isListMatcher_MatchPattern interface { - isListMatcher_MatchPattern() -} - -type ListMatcher_OneOf struct { - // If specified, at least one of the values in the list must match the value specified. - OneOf *ValueMatcher `protobuf:"bytes,1,opt,name=one_of,json=oneOf,proto3,oneof"` -} - -func (*ListMatcher_OneOf) isListMatcher_MatchPattern() {} - -// NullMatch is an empty message to specify a null value. -type ValueMatcher_NullMatch struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ValueMatcher_NullMatch) Reset() { - *x = ValueMatcher_NullMatch{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_matcher_value_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValueMatcher_NullMatch) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValueMatcher_NullMatch) ProtoMessage() {} - -func (x *ValueMatcher_NullMatch) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_matcher_value_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValueMatcher_NullMatch.ProtoReflect.Descriptor instead. -func (*ValueMatcher_NullMatch) Descriptor() ([]byte, []int) { - return file_envoy_type_matcher_value_proto_rawDescGZIP(), []int{0, 0} -} - -var File_envoy_type_matcher_value_proto protoreflect.FileDescriptor - -var file_envoy_type_matcher_value_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x98, - 0x03, 0x0a, 0x0c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, - 0x4b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, - 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0a, - 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, - 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x12, 0x40, 0x0a, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x0b, 0x0a, 0x09, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, - 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x5e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x06, 0x6f, 0x6e, 0x65, 0x5f, - 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x6e, - 0x65, 0x4f, 0x66, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, - 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x73, 0x0a, 0x20, 0x69, 0x6f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0a, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, - 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_matcher_value_proto_rawDescOnce sync.Once - file_envoy_type_matcher_value_proto_rawDescData = file_envoy_type_matcher_value_proto_rawDesc -) - -func file_envoy_type_matcher_value_proto_rawDescGZIP() []byte { - file_envoy_type_matcher_value_proto_rawDescOnce.Do(func() { - file_envoy_type_matcher_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_value_proto_rawDescData) - }) - return file_envoy_type_matcher_value_proto_rawDescData -} - -var file_envoy_type_matcher_value_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_envoy_type_matcher_value_proto_goTypes = []interface{}{ - (*ValueMatcher)(nil), // 0: envoy.type.matcher.ValueMatcher - (*ListMatcher)(nil), // 1: envoy.type.matcher.ListMatcher - (*ValueMatcher_NullMatch)(nil), // 2: envoy.type.matcher.ValueMatcher.NullMatch - (*DoubleMatcher)(nil), // 3: envoy.type.matcher.DoubleMatcher - (*StringMatcher)(nil), // 4: envoy.type.matcher.StringMatcher -} -var file_envoy_type_matcher_value_proto_depIdxs = []int32{ - 2, // 0: envoy.type.matcher.ValueMatcher.null_match:type_name -> envoy.type.matcher.ValueMatcher.NullMatch - 3, // 1: envoy.type.matcher.ValueMatcher.double_match:type_name -> envoy.type.matcher.DoubleMatcher - 4, // 2: envoy.type.matcher.ValueMatcher.string_match:type_name -> envoy.type.matcher.StringMatcher - 1, // 3: envoy.type.matcher.ValueMatcher.list_match:type_name -> envoy.type.matcher.ListMatcher - 0, // 4: envoy.type.matcher.ListMatcher.one_of:type_name -> envoy.type.matcher.ValueMatcher - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_envoy_type_matcher_value_proto_init() } -func file_envoy_type_matcher_value_proto_init() { - if File_envoy_type_matcher_value_proto != nil { - return - } - file_envoy_type_matcher_number_proto_init() - file_envoy_type_matcher_string_proto_init() - if !protoimpl.UnsafeEnabled { - file_envoy_type_matcher_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValueMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_matcher_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_matcher_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValueMatcher_NullMatch); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_envoy_type_matcher_value_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*ValueMatcher_NullMatch_)(nil), - (*ValueMatcher_DoubleMatch)(nil), - (*ValueMatcher_StringMatch)(nil), - (*ValueMatcher_BoolMatch)(nil), - (*ValueMatcher_PresentMatch)(nil), - (*ValueMatcher_ListMatch)(nil), - } - file_envoy_type_matcher_value_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*ListMatcher_OneOf)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_matcher_value_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_matcher_value_proto_goTypes, - DependencyIndexes: file_envoy_type_matcher_value_proto_depIdxs, - MessageInfos: file_envoy_type_matcher_value_proto_msgTypes, - }.Build() - File_envoy_type_matcher_value_proto = out.File - file_envoy_type_matcher_value_proto_rawDesc = nil - file_envoy_type_matcher_value_proto_goTypes = nil - file_envoy_type_matcher_value_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/value.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/value.pb.validate.go deleted file mode 100644 index 7811b37230..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/value.pb.validate.go +++ /dev/null @@ -1,525 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/matcher/value.proto - -package matcher - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on ValueMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *ValueMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ValueMatcher with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in ValueMatcherMultiError, or -// nil if none found. -func (m *ValueMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *ValueMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.MatchPattern.(type) { - - case *ValueMatcher_NullMatch_: - - if all { - switch v := interface{}(m.GetNullMatch()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ValueMatcherValidationError{ - field: "NullMatch", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ValueMatcherValidationError{ - field: "NullMatch", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetNullMatch()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ValueMatcherValidationError{ - field: "NullMatch", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *ValueMatcher_DoubleMatch: - - if all { - switch v := interface{}(m.GetDoubleMatch()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ValueMatcherValidationError{ - field: "DoubleMatch", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ValueMatcherValidationError{ - field: "DoubleMatch", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetDoubleMatch()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ValueMatcherValidationError{ - field: "DoubleMatch", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *ValueMatcher_StringMatch: - - if all { - switch v := interface{}(m.GetStringMatch()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ValueMatcherValidationError{ - field: "StringMatch", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ValueMatcherValidationError{ - field: "StringMatch", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ValueMatcherValidationError{ - field: "StringMatch", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *ValueMatcher_BoolMatch: - // no validation rules for BoolMatch - - case *ValueMatcher_PresentMatch: - // no validation rules for PresentMatch - - case *ValueMatcher_ListMatch: - - if all { - switch v := interface{}(m.GetListMatch()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ValueMatcherValidationError{ - field: "ListMatch", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ValueMatcherValidationError{ - field: "ListMatch", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetListMatch()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ValueMatcherValidationError{ - field: "ListMatch", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := ValueMatcherValidationError{ - field: "MatchPattern", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return ValueMatcherMultiError(errors) - } - - return nil -} - -// ValueMatcherMultiError is an error wrapping multiple validation errors -// returned by ValueMatcher.ValidateAll() if the designated constraints aren't met. -type ValueMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ValueMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ValueMatcherMultiError) AllErrors() []error { return m } - -// ValueMatcherValidationError is the validation error returned by -// ValueMatcher.Validate if the designated constraints aren't met. -type ValueMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ValueMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ValueMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ValueMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ValueMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ValueMatcherValidationError) ErrorName() string { return "ValueMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e ValueMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sValueMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ValueMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ValueMatcherValidationError{} - -// Validate checks the field values on ListMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *ListMatcher) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ListMatcher with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in ListMatcherMultiError, or -// nil if none found. -func (m *ListMatcher) ValidateAll() error { - return m.validate(true) -} - -func (m *ListMatcher) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - switch m.MatchPattern.(type) { - - case *ListMatcher_OneOf: - - if all { - switch v := interface{}(m.GetOneOf()).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, ListMatcherValidationError{ - field: "OneOf", - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, ListMatcherValidationError{ - field: "OneOf", - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(m.GetOneOf()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListMatcherValidationError{ - field: "OneOf", - reason: "embedded message failed validation", - cause: err, - } - } - } - - default: - err := ListMatcherValidationError{ - field: "MatchPattern", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - - } - - if len(errors) > 0 { - return ListMatcherMultiError(errors) - } - - return nil -} - -// ListMatcherMultiError is an error wrapping multiple validation errors -// returned by ListMatcher.ValidateAll() if the designated constraints aren't met. -type ListMatcherMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ListMatcherMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ListMatcherMultiError) AllErrors() []error { return m } - -// ListMatcherValidationError is the validation error returned by -// ListMatcher.Validate if the designated constraints aren't met. -type ListMatcherValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListMatcherValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListMatcherValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListMatcherValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListMatcherValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListMatcherValidationError) ErrorName() string { return "ListMatcherValidationError" } - -// Error satisfies the builtin error interface -func (e ListMatcherValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListMatcher.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListMatcherValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListMatcherValidationError{} - -// Validate checks the field values on ValueMatcher_NullMatch with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *ValueMatcher_NullMatch) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ValueMatcher_NullMatch with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// ValueMatcher_NullMatchMultiError, or nil if none found. -func (m *ValueMatcher_NullMatch) ValidateAll() error { - return m.validate(true) -} - -func (m *ValueMatcher_NullMatch) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if len(errors) > 0 { - return ValueMatcher_NullMatchMultiError(errors) - } - - return nil -} - -// ValueMatcher_NullMatchMultiError is an error wrapping multiple validation -// errors returned by ValueMatcher_NullMatch.ValidateAll() if the designated -// constraints aren't met. -type ValueMatcher_NullMatchMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ValueMatcher_NullMatchMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ValueMatcher_NullMatchMultiError) AllErrors() []error { return m } - -// ValueMatcher_NullMatchValidationError is the validation error returned by -// ValueMatcher_NullMatch.Validate if the designated constraints aren't met. -type ValueMatcher_NullMatchValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ValueMatcher_NullMatchValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ValueMatcher_NullMatchValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ValueMatcher_NullMatchValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ValueMatcher_NullMatchValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ValueMatcher_NullMatchValidationError) ErrorName() string { - return "ValueMatcher_NullMatchValidationError" -} - -// Error satisfies the builtin error interface -func (e ValueMatcher_NullMatchValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sValueMatcher_NullMatch.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ValueMatcher_NullMatchValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ValueMatcher_NullMatchValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go index 4f37e6cf3e..a1eafc3cbd 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/metadata/v3/metadata.proto package metadatav3 @@ -22,7 +22,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// MetadataKey provides a general interface using `key` and `path` to retrieve value from +// MetadataKey provides a general interface using ``key`` and ``path`` to retrieve value from // :ref:`Metadata `. // // For example, for the following Metadata: diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go index daa9b35f2a..37e45c4f0e 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go @@ -212,9 +212,20 @@ func (m *MetadataKind) validate(all bool) error { var errors []error - switch m.Kind.(type) { - + oneofKindPresent := false + switch v := m.Kind.(type) { case *MetadataKind_Request_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true if all { switch v := interface{}(m.GetRequest()).(type) { @@ -246,6 +257,17 @@ func (m *MetadataKind) validate(all bool) error { } case *MetadataKind_Route_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true if all { switch v := interface{}(m.GetRoute()).(type) { @@ -277,6 +299,17 @@ func (m *MetadataKind) validate(all bool) error { } case *MetadataKind_Cluster_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true if all { switch v := interface{}(m.GetCluster()).(type) { @@ -308,6 +341,17 @@ func (m *MetadataKind) validate(all bool) error { } case *MetadataKind_Host_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true if all { switch v := interface{}(m.GetHost()).(type) { @@ -339,6 +383,9 @@ func (m *MetadataKind) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofKindPresent { err := MetadataKindValidationError{ field: "Kind", reason: "value is required", @@ -347,7 +394,6 @@ func (m *MetadataKind) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { @@ -449,9 +495,20 @@ func (m *MetadataKey_PathSegment) validate(all bool) error { var errors []error - switch m.Segment.(type) { - + oneofSegmentPresent := false + switch v := m.Segment.(type) { case *MetadataKey_PathSegment_Key: + if v == nil { + err := MetadataKey_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true if utf8.RuneCountInString(m.GetKey()) < 1 { err := MetadataKey_PathSegmentValidationError{ @@ -465,6 +522,9 @@ func (m *MetadataKey_PathSegment) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { err := MetadataKey_PathSegmentValidationError{ field: "Segment", reason: "value is required", @@ -473,7 +533,6 @@ func (m *MetadataKey_PathSegment) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/percent.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/percent.pb.go deleted file mode 100644 index c034099258..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/percent.pb.go +++ /dev/null @@ -1,309 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/percent.proto - -package _type - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Fraction percentages support several fixed denominator values. -type FractionalPercent_DenominatorType int32 - -const ( - // 100. - // - // **Example**: 1/100 = 1%. - FractionalPercent_HUNDRED FractionalPercent_DenominatorType = 0 - // 10,000. - // - // **Example**: 1/10000 = 0.01%. - FractionalPercent_TEN_THOUSAND FractionalPercent_DenominatorType = 1 - // 1,000,000. - // - // **Example**: 1/1000000 = 0.0001%. - FractionalPercent_MILLION FractionalPercent_DenominatorType = 2 -) - -// Enum value maps for FractionalPercent_DenominatorType. -var ( - FractionalPercent_DenominatorType_name = map[int32]string{ - 0: "HUNDRED", - 1: "TEN_THOUSAND", - 2: "MILLION", - } - FractionalPercent_DenominatorType_value = map[string]int32{ - "HUNDRED": 0, - "TEN_THOUSAND": 1, - "MILLION": 2, - } -) - -func (x FractionalPercent_DenominatorType) Enum() *FractionalPercent_DenominatorType { - p := new(FractionalPercent_DenominatorType) - *p = x - return p -} - -func (x FractionalPercent_DenominatorType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FractionalPercent_DenominatorType) Descriptor() protoreflect.EnumDescriptor { - return file_envoy_type_percent_proto_enumTypes[0].Descriptor() -} - -func (FractionalPercent_DenominatorType) Type() protoreflect.EnumType { - return &file_envoy_type_percent_proto_enumTypes[0] -} - -func (x FractionalPercent_DenominatorType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use FractionalPercent_DenominatorType.Descriptor instead. -func (FractionalPercent_DenominatorType) EnumDescriptor() ([]byte, []int) { - return file_envoy_type_percent_proto_rawDescGZIP(), []int{1, 0} -} - -// Identifies a percentage, in the range [0.0, 100.0]. -type Percent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Percent) Reset() { - *x = Percent{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_percent_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Percent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Percent) ProtoMessage() {} - -func (x *Percent) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_percent_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Percent.ProtoReflect.Descriptor instead. -func (*Percent) Descriptor() ([]byte, []int) { - return file_envoy_type_percent_proto_rawDescGZIP(), []int{0} -} - -func (x *Percent) GetValue() float64 { - if x != nil { - return x.Value - } - return 0 -} - -// A fractional percentage is used in cases in which for performance reasons performing floating -// point to integer conversions during randomness calculations is undesirable. The message includes -// both a numerator and denominator that together determine the final fractional value. -// -// * **Example**: 1/100 = 1%. -// * **Example**: 3/10000 = 0.03%. -type FractionalPercent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies the numerator. Defaults to 0. - Numerator uint32 `protobuf:"varint,1,opt,name=numerator,proto3" json:"numerator,omitempty"` - // Specifies the denominator. If the denominator specified is less than the numerator, the final - // fractional percentage is capped at 1 (100%). - Denominator FractionalPercent_DenominatorType `protobuf:"varint,2,opt,name=denominator,proto3,enum=envoy.type.FractionalPercent_DenominatorType" json:"denominator,omitempty"` -} - -func (x *FractionalPercent) Reset() { - *x = FractionalPercent{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_percent_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FractionalPercent) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FractionalPercent) ProtoMessage() {} - -func (x *FractionalPercent) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_percent_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FractionalPercent.ProtoReflect.Descriptor instead. -func (*FractionalPercent) Descriptor() ([]byte, []int) { - return file_envoy_type_percent_proto_rawDescGZIP(), []int{1} -} - -func (x *FractionalPercent) GetNumerator() uint32 { - if x != nil { - return x.Numerator - } - return 0 -} - -func (x *FractionalPercent) GetDenominator() FractionalPercent_DenominatorType { - if x != nil { - return x.Denominator - } - return FractionalPercent_HUNDRED -} - -var File_envoy_type_percent_proto protoreflect.FileDescriptor - -var file_envoy_type_percent_proto_rawDesc = []byte{ - 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x70, 0x65, 0x72, - 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x38, - 0x0a, 0x07, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xcb, 0x01, 0x0a, 0x11, 0x46, 0x72, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x1c, - 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x59, 0x0a, 0x0b, - 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x46, - 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x2e, 0x44, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x64, 0x65, 0x6e, 0x6f, - 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x3d, 0x0a, 0x0f, 0x44, 0x65, 0x6e, 0x6f, 0x6d, - 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x55, - 0x4e, 0x44, 0x52, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x45, 0x4e, 0x5f, 0x54, - 0x48, 0x4f, 0x55, 0x53, 0x41, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x49, 0x4c, - 0x4c, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x42, 0x65, 0x0a, 0x18, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x42, 0x0c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2f, 0x74, 0x79, 0x70, 0x65, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_percent_proto_rawDescOnce sync.Once - file_envoy_type_percent_proto_rawDescData = file_envoy_type_percent_proto_rawDesc -) - -func file_envoy_type_percent_proto_rawDescGZIP() []byte { - file_envoy_type_percent_proto_rawDescOnce.Do(func() { - file_envoy_type_percent_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_percent_proto_rawDescData) - }) - return file_envoy_type_percent_proto_rawDescData -} - -var file_envoy_type_percent_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_envoy_type_percent_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_envoy_type_percent_proto_goTypes = []interface{}{ - (FractionalPercent_DenominatorType)(0), // 0: envoy.type.FractionalPercent.DenominatorType - (*Percent)(nil), // 1: envoy.type.Percent - (*FractionalPercent)(nil), // 2: envoy.type.FractionalPercent -} -var file_envoy_type_percent_proto_depIdxs = []int32{ - 0, // 0: envoy.type.FractionalPercent.denominator:type_name -> envoy.type.FractionalPercent.DenominatorType - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_envoy_type_percent_proto_init() } -func file_envoy_type_percent_proto_init() { - if File_envoy_type_percent_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_type_percent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Percent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_percent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FractionalPercent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_percent_proto_rawDesc, - NumEnums: 1, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_percent_proto_goTypes, - DependencyIndexes: file_envoy_type_percent_proto_depIdxs, - EnumInfos: file_envoy_type_percent_proto_enumTypes, - MessageInfos: file_envoy_type_percent_proto_msgTypes, - }.Build() - File_envoy_type_percent_proto = out.File - file_envoy_type_percent_proto_rawDesc = nil - file_envoy_type_percent_proto_goTypes = nil - file_envoy_type_percent_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/percent.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/percent.pb.validate.go deleted file mode 100644 index 01d6e3b967..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/percent.pb.validate.go +++ /dev/null @@ -1,260 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/percent.proto - -package _type - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on Percent with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Percent) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Percent with the rules defined in the -// proto definition for this message. If any rules are violated, the result is -// a list of violation errors wrapped in PercentMultiError, or nil if none found. -func (m *Percent) ValidateAll() error { - return m.validate(true) -} - -func (m *Percent) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if val := m.GetValue(); val < 0 || val > 100 { - err := PercentValidationError{ - field: "Value", - reason: "value must be inside range [0, 100]", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return PercentMultiError(errors) - } - - return nil -} - -// PercentMultiError is an error wrapping multiple validation errors returned -// by Percent.ValidateAll() if the designated constraints aren't met. -type PercentMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m PercentMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m PercentMultiError) AllErrors() []error { return m } - -// PercentValidationError is the validation error returned by Percent.Validate -// if the designated constraints aren't met. -type PercentValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PercentValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PercentValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PercentValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PercentValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PercentValidationError) ErrorName() string { return "PercentValidationError" } - -// Error satisfies the builtin error interface -func (e PercentValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPercent.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PercentValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PercentValidationError{} - -// Validate checks the field values on FractionalPercent with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *FractionalPercent) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on FractionalPercent with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// FractionalPercentMultiError, or nil if none found. -func (m *FractionalPercent) ValidateAll() error { - return m.validate(true) -} - -func (m *FractionalPercent) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Numerator - - if _, ok := FractionalPercent_DenominatorType_name[int32(m.GetDenominator())]; !ok { - err := FractionalPercentValidationError{ - field: "Denominator", - reason: "value must be one of the defined enum values", - } - if !all { - return err - } - errors = append(errors, err) - } - - if len(errors) > 0 { - return FractionalPercentMultiError(errors) - } - - return nil -} - -// FractionalPercentMultiError is an error wrapping multiple validation errors -// returned by FractionalPercent.ValidateAll() if the designated constraints -// aren't met. -type FractionalPercentMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m FractionalPercentMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m FractionalPercentMultiError) AllErrors() []error { return m } - -// FractionalPercentValidationError is the validation error returned by -// FractionalPercent.Validate if the designated constraints aren't met. -type FractionalPercentValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e FractionalPercentValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e FractionalPercentValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e FractionalPercentValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e FractionalPercentValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e FractionalPercentValidationError) ErrorName() string { - return "FractionalPercentValidationError" -} - -// Error satisfies the builtin error interface -func (e FractionalPercentValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sFractionalPercent.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = FractionalPercentValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = FractionalPercentValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/range.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/range.pb.go deleted file mode 100644 index 46f8702314..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/range.pb.go +++ /dev/null @@ -1,315 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/range.proto - -package _type - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Specifies the int64 start and end of the range using half-open interval semantics [start, -// end). -type Int64Range struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // start of the range (inclusive) - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - // end of the range (exclusive) - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` -} - -func (x *Int64Range) Reset() { - *x = Int64Range{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_range_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Int64Range) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Int64Range) ProtoMessage() {} - -func (x *Int64Range) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_range_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Int64Range.ProtoReflect.Descriptor instead. -func (*Int64Range) Descriptor() ([]byte, []int) { - return file_envoy_type_range_proto_rawDescGZIP(), []int{0} -} - -func (x *Int64Range) GetStart() int64 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *Int64Range) GetEnd() int64 { - if x != nil { - return x.End - } - return 0 -} - -// Specifies the int32 start and end of the range using half-open interval semantics [start, -// end). -type Int32Range struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // start of the range (inclusive) - Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - // end of the range (exclusive) - End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` -} - -func (x *Int32Range) Reset() { - *x = Int32Range{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_range_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Int32Range) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Int32Range) ProtoMessage() {} - -func (x *Int32Range) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_range_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Int32Range.ProtoReflect.Descriptor instead. -func (*Int32Range) Descriptor() ([]byte, []int) { - return file_envoy_type_range_proto_rawDescGZIP(), []int{1} -} - -func (x *Int32Range) GetStart() int32 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *Int32Range) GetEnd() int32 { - if x != nil { - return x.End - } - return 0 -} - -// Specifies the double start and end of the range using half-open interval semantics [start, -// end). -type DoubleRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // start of the range (inclusive) - Start float64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"` - // end of the range (exclusive) - End float64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"` -} - -func (x *DoubleRange) Reset() { - *x = DoubleRange{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_range_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DoubleRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DoubleRange) ProtoMessage() {} - -func (x *DoubleRange) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_range_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DoubleRange.ProtoReflect.Descriptor instead. -func (*DoubleRange) Descriptor() ([]byte, []int) { - return file_envoy_type_range_proto_rawDescGZIP(), []int{2} -} - -func (x *DoubleRange) GetStart() float64 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *DoubleRange) GetEnd() float64 { - if x != nil { - return x.End - } - return 0 -} - -var File_envoy_type_range_proto protoreflect.FileDescriptor - -var file_envoy_type_range_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x72, 0x61, 0x6e, - 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x34, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x0a, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0x35, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x63, 0x0a, 0x18, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x42, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, - 0x79, 0x70, 0x65, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_range_proto_rawDescOnce sync.Once - file_envoy_type_range_proto_rawDescData = file_envoy_type_range_proto_rawDesc -) - -func file_envoy_type_range_proto_rawDescGZIP() []byte { - file_envoy_type_range_proto_rawDescOnce.Do(func() { - file_envoy_type_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_range_proto_rawDescData) - }) - return file_envoy_type_range_proto_rawDescData -} - -var file_envoy_type_range_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_envoy_type_range_proto_goTypes = []interface{}{ - (*Int64Range)(nil), // 0: envoy.type.Int64Range - (*Int32Range)(nil), // 1: envoy.type.Int32Range - (*DoubleRange)(nil), // 2: envoy.type.DoubleRange -} -var file_envoy_type_range_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_envoy_type_range_proto_init() } -func file_envoy_type_range_proto_init() { - if File_envoy_type_range_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_type_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Int64Range); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Int32Range); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envoy_type_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DoubleRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_range_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_range_proto_goTypes, - DependencyIndexes: file_envoy_type_range_proto_depIdxs, - MessageInfos: file_envoy_type_range_proto_msgTypes, - }.Build() - File_envoy_type_range_proto = out.File - file_envoy_type_range_proto_rawDesc = nil - file_envoy_type_range_proto_goTypes = nil - file_envoy_type_range_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/range.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/range.pb.validate.go deleted file mode 100644 index 3364e65ae2..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/range.pb.validate.go +++ /dev/null @@ -1,345 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/range.proto - -package _type - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on Int64Range with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Int64Range) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Int64Range with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in Int64RangeMultiError, or -// nil if none found. -func (m *Int64Range) ValidateAll() error { - return m.validate(true) -} - -func (m *Int64Range) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Start - - // no validation rules for End - - if len(errors) > 0 { - return Int64RangeMultiError(errors) - } - - return nil -} - -// Int64RangeMultiError is an error wrapping multiple validation errors -// returned by Int64Range.ValidateAll() if the designated constraints aren't met. -type Int64RangeMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Int64RangeMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Int64RangeMultiError) AllErrors() []error { return m } - -// Int64RangeValidationError is the validation error returned by -// Int64Range.Validate if the designated constraints aren't met. -type Int64RangeValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Int64RangeValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e Int64RangeValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e Int64RangeValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e Int64RangeValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Int64RangeValidationError) ErrorName() string { return "Int64RangeValidationError" } - -// Error satisfies the builtin error interface -func (e Int64RangeValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sInt64Range.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Int64RangeValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Int64RangeValidationError{} - -// Validate checks the field values on Int32Range with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Int32Range) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Int32Range with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in Int32RangeMultiError, or -// nil if none found. -func (m *Int32Range) ValidateAll() error { - return m.validate(true) -} - -func (m *Int32Range) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Start - - // no validation rules for End - - if len(errors) > 0 { - return Int32RangeMultiError(errors) - } - - return nil -} - -// Int32RangeMultiError is an error wrapping multiple validation errors -// returned by Int32Range.ValidateAll() if the designated constraints aren't met. -type Int32RangeMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Int32RangeMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Int32RangeMultiError) AllErrors() []error { return m } - -// Int32RangeValidationError is the validation error returned by -// Int32Range.Validate if the designated constraints aren't met. -type Int32RangeValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Int32RangeValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e Int32RangeValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e Int32RangeValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e Int32RangeValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Int32RangeValidationError) ErrorName() string { return "Int32RangeValidationError" } - -// Error satisfies the builtin error interface -func (e Int32RangeValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sInt32Range.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Int32RangeValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Int32RangeValidationError{} - -// Validate checks the field values on DoubleRange with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *DoubleRange) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on DoubleRange with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in DoubleRangeMultiError, or -// nil if none found. -func (m *DoubleRange) ValidateAll() error { - return m.validate(true) -} - -func (m *DoubleRange) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Start - - // no validation rules for End - - if len(errors) > 0 { - return DoubleRangeMultiError(errors) - } - - return nil -} - -// DoubleRangeMultiError is an error wrapping multiple validation errors -// returned by DoubleRange.ValidateAll() if the designated constraints aren't met. -type DoubleRangeMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m DoubleRangeMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m DoubleRangeMultiError) AllErrors() []error { return m } - -// DoubleRangeValidationError is the validation error returned by -// DoubleRange.Validate if the designated constraints aren't met. -type DoubleRangeValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e DoubleRangeValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e DoubleRangeValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e DoubleRangeValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e DoubleRangeValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e DoubleRangeValidationError) ErrorName() string { return "DoubleRangeValidationError" } - -// Error satisfies the builtin error interface -func (e DoubleRangeValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sDoubleRange.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = DoubleRangeValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = DoubleRangeValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/semantic_version.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/semantic_version.pb.go deleted file mode 100644 index e4f185f749..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/semantic_version.pb.go +++ /dev/null @@ -1,176 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/semantic_version.proto - -package _type - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate -// expected behaviors and APIs, the patch version field is used only -// for security fixes and can be generally ignored. -type SemanticVersion struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - MajorNumber uint32 `protobuf:"varint,1,opt,name=major_number,json=majorNumber,proto3" json:"major_number,omitempty"` - MinorNumber uint32 `protobuf:"varint,2,opt,name=minor_number,json=minorNumber,proto3" json:"minor_number,omitempty"` - Patch uint32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` -} - -func (x *SemanticVersion) Reset() { - *x = SemanticVersion{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_semantic_version_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SemanticVersion) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SemanticVersion) ProtoMessage() {} - -func (x *SemanticVersion) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_semantic_version_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SemanticVersion.ProtoReflect.Descriptor instead. -func (*SemanticVersion) Descriptor() ([]byte, []int) { - return file_envoy_type_semantic_version_proto_rawDescGZIP(), []int{0} -} - -func (x *SemanticVersion) GetMajorNumber() uint32 { - if x != nil { - return x.MajorNumber - } - return 0 -} - -func (x *SemanticVersion) GetMinorNumber() uint32 { - if x != nil { - return x.MinorNumber - } - return 0 -} - -func (x *SemanticVersion) GetPatch() uint32 { - if x != nil { - return x.Patch - } - return 0 -} - -var File_envoy_type_semantic_version_proto protoreflect.FileDescriptor - -var file_envoy_type_semantic_version_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x73, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x1a, - 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6d, - 0x0a, 0x0f, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x6f, - 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x42, 0x6d, 0x0a, - 0x18, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x14, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_semantic_version_proto_rawDescOnce sync.Once - file_envoy_type_semantic_version_proto_rawDescData = file_envoy_type_semantic_version_proto_rawDesc -) - -func file_envoy_type_semantic_version_proto_rawDescGZIP() []byte { - file_envoy_type_semantic_version_proto_rawDescOnce.Do(func() { - file_envoy_type_semantic_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_semantic_version_proto_rawDescData) - }) - return file_envoy_type_semantic_version_proto_rawDescData -} - -var file_envoy_type_semantic_version_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_type_semantic_version_proto_goTypes = []interface{}{ - (*SemanticVersion)(nil), // 0: envoy.type.SemanticVersion -} -var file_envoy_type_semantic_version_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_envoy_type_semantic_version_proto_init() } -func file_envoy_type_semantic_version_proto_init() { - if File_envoy_type_semantic_version_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_type_semantic_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SemanticVersion); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_semantic_version_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_semantic_version_proto_goTypes, - DependencyIndexes: file_envoy_type_semantic_version_proto_depIdxs, - MessageInfos: file_envoy_type_semantic_version_proto_msgTypes, - }.Build() - File_envoy_type_semantic_version_proto = out.File - file_envoy_type_semantic_version_proto_rawDesc = nil - file_envoy_type_semantic_version_proto_goTypes = nil - file_envoy_type_semantic_version_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/semantic_version.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/semantic_version.pb.validate.go deleted file mode 100644 index eb0d03af3e..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/semantic_version.pb.validate.go +++ /dev/null @@ -1,142 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/semantic_version.proto - -package _type - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on SemanticVersion with the rules defined -// in the proto definition for this message. If any rules are violated, the -// first error encountered is returned, or nil if there are no violations. -func (m *SemanticVersion) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on SemanticVersion with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// SemanticVersionMultiError, or nil if none found. -func (m *SemanticVersion) ValidateAll() error { - return m.validate(true) -} - -func (m *SemanticVersion) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for MajorNumber - - // no validation rules for MinorNumber - - // no validation rules for Patch - - if len(errors) > 0 { - return SemanticVersionMultiError(errors) - } - - return nil -} - -// SemanticVersionMultiError is an error wrapping multiple validation errors -// returned by SemanticVersion.ValidateAll() if the designated constraints -// aren't met. -type SemanticVersionMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SemanticVersionMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SemanticVersionMultiError) AllErrors() []error { return m } - -// SemanticVersionValidationError is the validation error returned by -// SemanticVersion.Validate if the designated constraints aren't met. -type SemanticVersionValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SemanticVersionValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SemanticVersionValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SemanticVersionValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SemanticVersionValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SemanticVersionValidationError) ErrorName() string { return "SemanticVersionValidationError" } - -// Error satisfies the builtin error interface -func (e SemanticVersionValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSemanticVersion.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SemanticVersionValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SemanticVersionValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/token_bucket.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/token_bucket.pb.go deleted file mode 100644 index a350bcda9f..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/token_bucket.pb.go +++ /dev/null @@ -1,199 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: envoy/type/token_bucket.proto - -package _type - -import ( - _ "github.com/cncf/xds/go/udpa/annotations" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - duration "github.com/golang/protobuf/ptypes/duration" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Configures a token bucket, typically used for rate limiting. -type TokenBucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket - // initially contains. - MaxTokens uint32 `protobuf:"varint,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` - // The number of tokens added to the bucket during each fill interval. If not specified, defaults - // to a single token. - TokensPerFill *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=tokens_per_fill,json=tokensPerFill,proto3" json:"tokens_per_fill,omitempty"` - // The fill interval that tokens are added to the bucket. During each fill interval - // `tokens_per_fill` are added to the bucket. The bucket will never contain more than - // `max_tokens` tokens. - FillInterval *duration.Duration `protobuf:"bytes,3,opt,name=fill_interval,json=fillInterval,proto3" json:"fill_interval,omitempty"` -} - -func (x *TokenBucket) Reset() { - *x = TokenBucket{} - if protoimpl.UnsafeEnabled { - mi := &file_envoy_type_token_bucket_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TokenBucket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TokenBucket) ProtoMessage() {} - -func (x *TokenBucket) ProtoReflect() protoreflect.Message { - mi := &file_envoy_type_token_bucket_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TokenBucket.ProtoReflect.Descriptor instead. -func (*TokenBucket) Descriptor() ([]byte, []int) { - return file_envoy_type_token_bucket_proto_rawDescGZIP(), []int{0} -} - -func (x *TokenBucket) GetMaxTokens() uint32 { - if x != nil { - return x.MaxTokens - } - return 0 -} - -func (x *TokenBucket) GetTokensPerFill() *wrappers.UInt32Value { - if x != nil { - return x.TokensPerFill - } - return nil -} - -func (x *TokenBucket) GetFillInterval() *duration.Duration { - if x != nil { - return x.FillInterval - } - return nil -} - -var File_envoy_type_token_bucket_proto protoreflect.FileDescriptor - -var file_envoy_type_token_bucket_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, - 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd0, 0x01, 0x0a, 0x0b, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x20, 0x00, - 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x20, 0x00, 0x52, 0x0d, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x73, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x6c, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, - 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, - 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x6c, 0x49, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x69, 0x0a, 0x18, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x42, 0x10, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, - 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, - 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_envoy_type_token_bucket_proto_rawDescOnce sync.Once - file_envoy_type_token_bucket_proto_rawDescData = file_envoy_type_token_bucket_proto_rawDesc -) - -func file_envoy_type_token_bucket_proto_rawDescGZIP() []byte { - file_envoy_type_token_bucket_proto_rawDescOnce.Do(func() { - file_envoy_type_token_bucket_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_token_bucket_proto_rawDescData) - }) - return file_envoy_type_token_bucket_proto_rawDescData -} - -var file_envoy_type_token_bucket_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_envoy_type_token_bucket_proto_goTypes = []interface{}{ - (*TokenBucket)(nil), // 0: envoy.type.TokenBucket - (*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value - (*duration.Duration)(nil), // 2: google.protobuf.Duration -} -var file_envoy_type_token_bucket_proto_depIdxs = []int32{ - 1, // 0: envoy.type.TokenBucket.tokens_per_fill:type_name -> google.protobuf.UInt32Value - 2, // 1: envoy.type.TokenBucket.fill_interval:type_name -> google.protobuf.Duration - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_envoy_type_token_bucket_proto_init() } -func file_envoy_type_token_bucket_proto_init() { - if File_envoy_type_token_bucket_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_envoy_type_token_bucket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TokenBucket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_envoy_type_token_bucket_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_envoy_type_token_bucket_proto_goTypes, - DependencyIndexes: file_envoy_type_token_bucket_proto_depIdxs, - MessageInfos: file_envoy_type_token_bucket_proto_msgTypes, - }.Build() - File_envoy_type_token_bucket_proto = out.File - file_envoy_type_token_bucket_proto_rawDesc = nil - file_envoy_type_token_bucket_proto_goTypes = nil - file_envoy_type_token_bucket_proto_depIdxs = nil -} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/token_bucket.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/token_bucket.pb.validate.go deleted file mode 100644 index 9e04de8bc3..0000000000 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/token_bucket.pb.validate.go +++ /dev/null @@ -1,202 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: envoy/type/token_bucket.proto - -package _type - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on TokenBucket with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *TokenBucket) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on TokenBucket with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in TokenBucketMultiError, or -// nil if none found. -func (m *TokenBucket) ValidateAll() error { - return m.validate(true) -} - -func (m *TokenBucket) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - if m.GetMaxTokens() <= 0 { - err := TokenBucketValidationError{ - field: "MaxTokens", - reason: "value must be greater than 0", - } - if !all { - return err - } - errors = append(errors, err) - } - - if wrapper := m.GetTokensPerFill(); wrapper != nil { - - if wrapper.GetValue() <= 0 { - err := TokenBucketValidationError{ - field: "TokensPerFill", - reason: "value must be greater than 0", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - - if m.GetFillInterval() == nil { - err := TokenBucketValidationError{ - field: "FillInterval", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - - if d := m.GetFillInterval(); d != nil { - dur, err := d.AsDuration(), d.CheckValid() - if err != nil { - err = TokenBucketValidationError{ - field: "FillInterval", - reason: "value is not a valid duration", - cause: err, - } - if !all { - return err - } - errors = append(errors, err) - } else { - - gt := time.Duration(0*time.Second + 0*time.Nanosecond) - - if dur <= gt { - err := TokenBucketValidationError{ - field: "FillInterval", - reason: "value must be greater than 0s", - } - if !all { - return err - } - errors = append(errors, err) - } - - } - } - - if len(errors) > 0 { - return TokenBucketMultiError(errors) - } - - return nil -} - -// TokenBucketMultiError is an error wrapping multiple validation errors -// returned by TokenBucket.ValidateAll() if the designated constraints aren't met. -type TokenBucketMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m TokenBucketMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m TokenBucketMultiError) AllErrors() []error { return m } - -// TokenBucketValidationError is the validation error returned by -// TokenBucket.Validate if the designated constraints aren't met. -type TokenBucketValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e TokenBucketValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e TokenBucketValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e TokenBucketValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e TokenBucketValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e TokenBucketValidationError) ErrorName() string { return "TokenBucketValidationError" } - -// Error satisfies the builtin error interface -func (e TokenBucketValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sTokenBucket.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = TokenBucketValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = TokenBucketValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go index be8bfb8db6..ef150cd344 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/tracing/v3/custom_tag.proto package tracingv3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go index 9b1628829e..eeebc9efde 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go @@ -68,9 +68,20 @@ func (m *CustomTag) validate(all bool) error { errors = append(errors, err) } - switch m.Type.(type) { - + oneofTypePresent := false + switch v := m.Type.(type) { case *CustomTag_Literal_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true if all { switch v := interface{}(m.GetLiteral()).(type) { @@ -102,6 +113,17 @@ func (m *CustomTag) validate(all bool) error { } case *CustomTag_Environment_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true if all { switch v := interface{}(m.GetEnvironment()).(type) { @@ -133,6 +155,17 @@ func (m *CustomTag) validate(all bool) error { } case *CustomTag_RequestHeader: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true if all { switch v := interface{}(m.GetRequestHeader()).(type) { @@ -164,6 +197,17 @@ func (m *CustomTag) validate(all bool) error { } case *CustomTag_Metadata_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true if all { switch v := interface{}(m.GetMetadata()).(type) { @@ -195,6 +239,9 @@ func (m *CustomTag) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofTypePresent { err := CustomTagValidationError{ field: "Type", reason: "value is required", @@ -203,7 +250,6 @@ func (m *CustomTag) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go index 0d3db96097..c03381ddca 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/v3/hash_policy.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go index 3ada0a062b..5cb102e806 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go @@ -57,9 +57,20 @@ func (m *HashPolicy) validate(all bool) error { var errors []error - switch m.PolicySpecifier.(type) { - + oneofPolicySpecifierPresent := false + switch v := m.PolicySpecifier.(type) { case *HashPolicy_SourceIp_: + if v == nil { + err := HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true if all { switch v := interface{}(m.GetSourceIp()).(type) { @@ -91,6 +102,17 @@ func (m *HashPolicy) validate(all bool) error { } case *HashPolicy_FilterState_: + if v == nil { + err := HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true if all { switch v := interface{}(m.GetFilterState()).(type) { @@ -122,6 +144,9 @@ func (m *HashPolicy) validate(all bool) error { } default: + _ = v // ensures v is used + } + if !oneofPolicySpecifierPresent { err := HashPolicyValidationError{ field: "PolicySpecifier", reason: "value is required", @@ -130,7 +155,6 @@ func (m *HashPolicy) validate(all bool) error { return err } errors = append(errors, err) - } if len(errors) > 0 { diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go index e7db0fb94b..7d21a85f66 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/v3/http.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go index 2bb3c98f28..52cd8b77db 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/v3/http_status.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go index 1c79cca0bf..985e8d94ce 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go @@ -60,7 +60,7 @@ func (m *HttpStatus) validate(all bool) error { if _, ok := _HttpStatus_Code_NotInLookup[m.GetCode()]; ok { err := HttpStatusValidationError{ field: "Code", - reason: "value must not be in list [0]", + reason: "value must not be in list [Empty]", } if !all { return err diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go index b40883d616..22f1ff4939 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/v3/percent.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go index dda58db89b..80323c8625 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/v3/range.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go new file mode 100644 index 0000000000..adab81a8c9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go @@ -0,0 +1,406 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Choose between allow all and deny all. +type RateLimitStrategy_BlanketRule int32 + +const ( + RateLimitStrategy_ALLOW_ALL RateLimitStrategy_BlanketRule = 0 + RateLimitStrategy_DENY_ALL RateLimitStrategy_BlanketRule = 1 +) + +// Enum value maps for RateLimitStrategy_BlanketRule. +var ( + RateLimitStrategy_BlanketRule_name = map[int32]string{ + 0: "ALLOW_ALL", + 1: "DENY_ALL", + } + RateLimitStrategy_BlanketRule_value = map[string]int32{ + "ALLOW_ALL": 0, + "DENY_ALL": 1, + } +) + +func (x RateLimitStrategy_BlanketRule) Enum() *RateLimitStrategy_BlanketRule { + p := new(RateLimitStrategy_BlanketRule) + *p = x + return p +} + +func (x RateLimitStrategy_BlanketRule) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimitStrategy_BlanketRule) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_ratelimit_strategy_proto_enumTypes[0].Descriptor() +} + +func (RateLimitStrategy_BlanketRule) Type() protoreflect.EnumType { + return &file_envoy_type_v3_ratelimit_strategy_proto_enumTypes[0] +} + +func (x RateLimitStrategy_BlanketRule) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RateLimitStrategy_BlanketRule.Descriptor instead. +func (RateLimitStrategy_BlanketRule) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0, 0} +} + +type RateLimitStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Strategy: + // *RateLimitStrategy_BlanketRule_ + // *RateLimitStrategy_RequestsPerTimeUnit_ + // *RateLimitStrategy_TokenBucket + Strategy isRateLimitStrategy_Strategy `protobuf_oneof:"strategy"` +} + +func (x *RateLimitStrategy) Reset() { + *x = RateLimitStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitStrategy) ProtoMessage() {} + +func (x *RateLimitStrategy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitStrategy.ProtoReflect.Descriptor instead. +func (*RateLimitStrategy) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0} +} + +func (m *RateLimitStrategy) GetStrategy() isRateLimitStrategy_Strategy { + if m != nil { + return m.Strategy + } + return nil +} + +func (x *RateLimitStrategy) GetBlanketRule() RateLimitStrategy_BlanketRule { + if x, ok := x.GetStrategy().(*RateLimitStrategy_BlanketRule_); ok { + return x.BlanketRule + } + return RateLimitStrategy_ALLOW_ALL +} + +func (x *RateLimitStrategy) GetRequestsPerTimeUnit() *RateLimitStrategy_RequestsPerTimeUnit { + if x, ok := x.GetStrategy().(*RateLimitStrategy_RequestsPerTimeUnit_); ok { + return x.RequestsPerTimeUnit + } + return nil +} + +func (x *RateLimitStrategy) GetTokenBucket() *TokenBucket { + if x, ok := x.GetStrategy().(*RateLimitStrategy_TokenBucket); ok { + return x.TokenBucket + } + return nil +} + +type isRateLimitStrategy_Strategy interface { + isRateLimitStrategy_Strategy() +} + +type RateLimitStrategy_BlanketRule_ struct { + // Allow or Deny the requests. + // If unset, allow all. + BlanketRule RateLimitStrategy_BlanketRule `protobuf:"varint,1,opt,name=blanket_rule,json=blanketRule,proto3,enum=envoy.type.v3.RateLimitStrategy_BlanketRule,oneof"` +} + +type RateLimitStrategy_RequestsPerTimeUnit_ struct { + // Best-effort limit of the number of requests per time unit, f.e. requests per second. + // Does not prescribe any specific rate limiting algorithm, see :ref:`RequestsPerTimeUnit + // ` for details. + RequestsPerTimeUnit *RateLimitStrategy_RequestsPerTimeUnit `protobuf:"bytes,2,opt,name=requests_per_time_unit,json=requestsPerTimeUnit,proto3,oneof"` +} + +type RateLimitStrategy_TokenBucket struct { + // Limit the requests by consuming tokens from the Token Bucket. + // Allow the same number of requests as the number of tokens available in + // the token bucket. + TokenBucket *TokenBucket `protobuf:"bytes,3,opt,name=token_bucket,json=tokenBucket,proto3,oneof"` +} + +func (*RateLimitStrategy_BlanketRule_) isRateLimitStrategy_Strategy() {} + +func (*RateLimitStrategy_RequestsPerTimeUnit_) isRateLimitStrategy_Strategy() {} + +func (*RateLimitStrategy_TokenBucket) isRateLimitStrategy_Strategy() {} + +// Best-effort limit of the number of requests per time unit. +// +// Allows to specify the desired requests per second (RPS, QPS), requests per minute (QPM, RPM), +// etc., without specifying a rate limiting algorithm implementation. +// +// ``RequestsPerTimeUnit`` strategy does not demand any specific rate limiting algorithm to be +// used (in contrast to the :ref:`TokenBucket `, +// for example). It implies that the implementation details of rate limiting algorithm are +// irrelevant as long as the configured number of "requests per time unit" is achieved. +// +// Note that the ``TokenBucket`` is still a valid implementation of the ``RequestsPerTimeUnit`` +// strategy, and may be chosen to enforce the rate limit. However, there's no guarantee it will be +// the ``TokenBucket`` in particular, and not the Leaky Bucket, the Sliding Window, or any other +// rate limiting algorithm that fulfills the requirements. +type RateLimitStrategy_RequestsPerTimeUnit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The desired number of requests per :ref:`time_unit + // ` to allow. + // If set to ``0``, deny all (equivalent to ``BlanketRule.DENY_ALL``). + // + // .. note:: + // Note that the algorithm implementation determines the course of action for the requests + // over the limit. As long as the ``requests_per_time_unit`` converges on the desired value, + // it's allowed to treat this field as a soft-limit: allow bursts, redistribute the allowance + // over time, etc. + // + RequestsPerTimeUnit uint64 `protobuf:"varint,1,opt,name=requests_per_time_unit,json=requestsPerTimeUnit,proto3" json:"requests_per_time_unit,omitempty"` + // The unit of time. Ignored when :ref:`requests_per_time_unit + // ` + // is ``0`` (deny all). + TimeUnit RateLimitUnit `protobuf:"varint,2,opt,name=time_unit,json=timeUnit,proto3,enum=envoy.type.v3.RateLimitUnit" json:"time_unit,omitempty"` +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) Reset() { + *x = RateLimitStrategy_RequestsPerTimeUnit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitStrategy_RequestsPerTimeUnit) ProtoMessage() {} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitStrategy_RequestsPerTimeUnit.ProtoReflect.Descriptor instead. +func (*RateLimitStrategy_RequestsPerTimeUnit) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) GetRequestsPerTimeUnit() uint64 { + if x != nil { + return x.RequestsPerTimeUnit + } + return 0 +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) GetTimeUnit() RateLimitUnit { + if x != nil { + return x.TimeUnit + } + return RateLimitUnit_UNKNOWN +} + +var File_envoy_type_v3_ratelimit_strategy_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_ratelimit_strategy_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, + 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xed, 0x03, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x5b, 0x0a, 0x0c, + 0x62, 0x6c, 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x2e, 0x42, 0x6c, 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, + 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x6b, 0x0a, 0x16, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, + 0x6e, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x48, + 0x00, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, + 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12, 0x3f, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x8f, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12, + 0x33, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, + 0x55, 0x6e, 0x69, 0x74, 0x12, 0x43, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x55, 0x6e, 0x69, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x22, 0x2a, 0x0a, 0x0b, 0x42, 0x6c, 0x61, + 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x4e, 0x59, 0x5f, + 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x42, 0x0f, 0x0a, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, + 0x79, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x84, 0x01, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x52, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_ratelimit_strategy_proto_rawDescOnce sync.Once + file_envoy_type_v3_ratelimit_strategy_proto_rawDescData = file_envoy_type_v3_ratelimit_strategy_proto_rawDesc +) + +func file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP() []byte { + file_envoy_type_v3_ratelimit_strategy_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_ratelimit_strategy_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_ratelimit_strategy_proto_rawDescData) + }) + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescData +} + +var file_envoy_type_v3_ratelimit_strategy_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_ratelimit_strategy_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_v3_ratelimit_strategy_proto_goTypes = []interface{}{ + (RateLimitStrategy_BlanketRule)(0), // 0: envoy.type.v3.RateLimitStrategy.BlanketRule + (*RateLimitStrategy)(nil), // 1: envoy.type.v3.RateLimitStrategy + (*RateLimitStrategy_RequestsPerTimeUnit)(nil), // 2: envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit + (*TokenBucket)(nil), // 3: envoy.type.v3.TokenBucket + (RateLimitUnit)(0), // 4: envoy.type.v3.RateLimitUnit +} +var file_envoy_type_v3_ratelimit_strategy_proto_depIdxs = []int32{ + 0, // 0: envoy.type.v3.RateLimitStrategy.blanket_rule:type_name -> envoy.type.v3.RateLimitStrategy.BlanketRule + 2, // 1: envoy.type.v3.RateLimitStrategy.requests_per_time_unit:type_name -> envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit + 3, // 2: envoy.type.v3.RateLimitStrategy.token_bucket:type_name -> envoy.type.v3.TokenBucket + 4, // 3: envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit.time_unit:type_name -> envoy.type.v3.RateLimitUnit + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_ratelimit_strategy_proto_init() } +func file_envoy_type_v3_ratelimit_strategy_proto_init() { + if File_envoy_type_v3_ratelimit_strategy_proto != nil { + return + } + file_envoy_type_v3_ratelimit_unit_proto_init() + file_envoy_type_v3_token_bucket_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitStrategy_RequestsPerTimeUnit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*RateLimitStrategy_BlanketRule_)(nil), + (*RateLimitStrategy_RequestsPerTimeUnit_)(nil), + (*RateLimitStrategy_TokenBucket)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_ratelimit_strategy_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_ratelimit_strategy_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_ratelimit_strategy_proto_depIdxs, + EnumInfos: file_envoy_type_v3_ratelimit_strategy_proto_enumTypes, + MessageInfos: file_envoy_type_v3_ratelimit_strategy_proto_msgTypes, + }.Build() + File_envoy_type_v3_ratelimit_strategy_proto = out.File + file_envoy_type_v3_ratelimit_strategy_proto_rawDesc = nil + file_envoy_type_v3_ratelimit_strategy_proto_goTypes = nil + file_envoy_type_v3_ratelimit_strategy_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go new file mode 100644 index 0000000000..1d22adb098 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go @@ -0,0 +1,380 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RateLimitStrategy with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RateLimitStrategy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimitStrategy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimitStrategyMultiError, or nil if none found. +func (m *RateLimitStrategy) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimitStrategy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofStrategyPresent := false + switch v := m.Strategy.(type) { + case *RateLimitStrategy_BlanketRule_: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if _, ok := RateLimitStrategy_BlanketRule_name[int32(m.GetBlanketRule())]; !ok { + err := RateLimitStrategyValidationError{ + field: "BlanketRule", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RateLimitStrategy_RequestsPerTimeUnit_: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if all { + switch v := interface{}(m.GetRequestsPerTimeUnit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestsPerTimeUnit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimitStrategy_TokenBucket: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if all { + switch v := interface{}(m.GetTokenBucket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTokenBucket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofStrategyPresent { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimitStrategyMultiError(errors) + } + + return nil +} + +// RateLimitStrategyMultiError is an error wrapping multiple validation errors +// returned by RateLimitStrategy.ValidateAll() if the designated constraints +// aren't met. +type RateLimitStrategyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitStrategyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitStrategyMultiError) AllErrors() []error { return m } + +// RateLimitStrategyValidationError is the validation error returned by +// RateLimitStrategy.Validate if the designated constraints aren't met. +type RateLimitStrategyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitStrategyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitStrategyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitStrategyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitStrategyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitStrategyValidationError) ErrorName() string { + return "RateLimitStrategyValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimitStrategyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimitStrategy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitStrategyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitStrategyValidationError{} + +// Validate checks the field values on RateLimitStrategy_RequestsPerTimeUnit +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RateLimitStrategy_RequestsPerTimeUnit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimitStrategy_RequestsPerTimeUnit +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimitStrategy_RequestsPerTimeUnitMultiError, or nil if none found. +func (m *RateLimitStrategy_RequestsPerTimeUnit) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RequestsPerTimeUnit + + if _, ok := RateLimitUnit_name[int32(m.GetTimeUnit())]; !ok { + err := RateLimitStrategy_RequestsPerTimeUnitValidationError{ + field: "TimeUnit", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimitStrategy_RequestsPerTimeUnitMultiError(errors) + } + + return nil +} + +// RateLimitStrategy_RequestsPerTimeUnitMultiError is an error wrapping +// multiple validation errors returned by +// RateLimitStrategy_RequestsPerTimeUnit.ValidateAll() if the designated +// constraints aren't met. +type RateLimitStrategy_RequestsPerTimeUnitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitStrategy_RequestsPerTimeUnitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitStrategy_RequestsPerTimeUnitMultiError) AllErrors() []error { return m } + +// RateLimitStrategy_RequestsPerTimeUnitValidationError is the validation error +// returned by RateLimitStrategy_RequestsPerTimeUnit.Validate if the +// designated constraints aren't met. +type RateLimitStrategy_RequestsPerTimeUnitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) ErrorName() string { + return "RateLimitStrategy_RequestsPerTimeUnitValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimitStrategy_RequestsPerTimeUnit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitStrategy_RequestsPerTimeUnitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitStrategy_RequestsPerTimeUnitValidationError{} diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go index f859e44fcb..e98d9c5989 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/v3/ratelimit_unit.proto package typev3 @@ -35,6 +35,10 @@ const ( RateLimitUnit_HOUR RateLimitUnit = 3 // The time unit representing a day. RateLimitUnit_DAY RateLimitUnit = 4 + // The time unit representing a month. + RateLimitUnit_MONTH RateLimitUnit = 5 + // The time unit representing a year. + RateLimitUnit_YEAR RateLimitUnit = 6 ) // Enum value maps for RateLimitUnit. @@ -45,6 +49,8 @@ var ( 2: "MINUTE", 3: "HOUR", 4: "DAY", + 5: "MONTH", + 6: "YEAR", } RateLimitUnit_value = map[string]int32{ "UNKNOWN": 0, @@ -52,6 +58,8 @@ var ( "MINUTE": 2, "HOUR": 3, "DAY": 4, + "MONTH": 5, + "YEAR": 6, } ) @@ -90,19 +98,21 @@ var file_envoy_type_v3_ratelimit_unit_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2a, 0x47, 0x0a, 0x0d, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x55, + 0x74, 0x6f, 0x2a, 0x5c, 0x0a, 0x0d, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x55, 0x6e, 0x69, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4e, 0x55, 0x54, 0x45, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, - 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x04, 0x42, 0x78, 0x0a, 0x1b, 0x69, - 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x52, 0x61, 0x74, 0x65, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x55, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, - 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, - 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, + 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, + 0x42, 0x78, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x12, 0x52, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x55, 0x6e, 0x69, 0x74, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, + 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go index 87c616bba2..1909162f83 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/v3/semantic_version.proto package typev3 diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go index 8cb80c9ba3..dd629581c4 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.12 // source: envoy/type/v3/token_bucket.proto package typev3 @@ -37,8 +37,8 @@ type TokenBucket struct { // to a single token. TokensPerFill *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=tokens_per_fill,json=tokensPerFill,proto3" json:"tokens_per_fill,omitempty"` // The fill interval that tokens are added to the bucket. During each fill interval - // `tokens_per_fill` are added to the bucket. The bucket will never contain more than - // `max_tokens` tokens. + // ``tokens_per_fill`` are added to the bucket. The bucket will never contain more than + // ``max_tokens`` tokens. FillInterval *duration.Duration `protobuf:"bytes,3,opt,name=fill_interval,json=fillInterval,proto3" json:"fill_interval,omitempty"` } diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h b/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h index 190272e37d..d6cf6c9d90 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h +++ b/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h @@ -28,7 +28,6 @@ #endif #include "google/protobuf/message.h" -#include "google/protobuf/stubs/strutil.h" // for UTF8Len namespace pgv { using std::string; @@ -151,13 +150,25 @@ static inline bool IsHostname(const string& to_validate) { return true; } -static inline size_t Utf8Len(const string& narrow_string) { +namespace { + +inline int OneCharLen(const char* src) { + return "\1\1\1\1\1\1\1\1\1\1\1\1\2\2\3\4"[(*src & 0xFF) >> 4]; +} + +inline int UTF8FirstLetterNumBytes(const char *utf8_str, int str_len) { + if (str_len == 0) + return 0; + return OneCharLen(utf8_str); +} + +inline size_t Utf8Len(const string& narrow_string) { const char* str_char = narrow_string.c_str(); ptrdiff_t byte_len = narrow_string.length(); size_t unicode_len = 0; int char_len = 1; while (byte_len > 0 && char_len > 0) { - char_len = google::protobuf::UTF8FirstLetterNumBytes(str_char, byte_len); + char_len = UTF8FirstLetterNumBytes(str_char, byte_len); str_char += char_len; byte_len -= char_len; ++unicode_len; @@ -165,6 +176,8 @@ static inline size_t Utf8Len(const string& narrow_string) { return unicode_len; } +} // namespace + } // namespace pgv #endif // _VALIDATE_H diff --git a/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go b/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go index 825774fe4f..a31b2e1a3f 100644 --- a/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go +++ b/terraform/providers/google/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.30.0 +// protoc v4.22.2 // source: validate/validate.proto package validate diff --git a/terraform/providers/google/vendor/github.com/golang/glog/glog.go b/terraform/providers/google/vendor/github.com/golang/glog/glog.go index 718c34f886..e108ae8b4f 100644 --- a/terraform/providers/google/vendor/github.com/golang/glog/glog.go +++ b/terraform/providers/google/vendor/github.com/golang/glog/glog.go @@ -1,6 +1,6 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// Go support for leveled logs, analogous to https://github.com/google/glog. // -// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2023 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -50,15 +50,15 @@ // Log files will be written to this directory instead of the // default temporary directory. // -// Other flags provide aids to debugging. +// Other flags provide aids to debugging. // // -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as +// A comma-separated list of file and line numbers holding a logging +// statement, such as // -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) +// A stack trace will be written to the Info log whenever execution +// hits one of these statements. (Unlike with -vmodule, the ".go" +// must bepresent.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" @@ -66,100 +66,47 @@ // where pattern is a literal file name (minus the ".go" suffix) or // "glob" pattern and N is a V level. For instance, // -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// +// sets the V level to 3 in all Go files whose names begin with "gopher", +// and +// -vmodule=/path/to/glog/glog_test=1 +// sets the V level to 1 in the Go file /path/to/glog/glog_test.go. +// If a glob pattern contains a slash, it is matched against the full path, +// and the file name. Otherwise, the pattern is +// matched only against the file's basename. When both -vmodule and -v +// are specified, the -vmodule values take precedence for the specified +// modules. package glog +// This file contains the parts of the log package that are shared among all +// implementations (file, envelope, and appengine). + import ( - "bufio" "bytes" "errors" - "flag" "fmt" - "io" stdLog "log" "os" - "path/filepath" + "reflect" "runtime" + "runtime/pprof" "strconv" - "strings" "sync" "sync/atomic" + "syscall" "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 + "github.com/golang/glog/internal/logsink" + "github.com/golang/glog/internal/stackdump" ) -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} +var timeNow = time.Now // Stubbed out for testing. -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} +// ErrNoLog is the error we return if no log file has yet been created +// for the specified log type. +var ErrNoLog = errors.New("log file not yet created") // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { @@ -183,724 +130,99 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [...]*OutputStats{ + logsink.Info: &Stats.Info, + logsink.Warning: &Stats.Warning, + logsink.Error: &Stats.Error, + logsink.Fatal: nil, } -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. +// Level specifies a level of verbosity for V logs. The -v flag is of type +// Level and should be modified only through the flag.Value interface. type Level int32 -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} +var metaPool sync.Pool // Pool of *logsink.Meta. -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) +// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a +// pointer, allocating a new one if necessary. (Returning the interface value +// directly avoids an allocation if there was an existing pointer in the pool.) +func metaPoolGet() (any, *logsink.Meta) { + if metai := metaPool.Get(); metai != nil { + return metai, metai.(*logsink.Meta) } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil + meta := new(logsink.Meta) + return meta, meta } -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} +type stack bool -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. +const ( + noStack = stack(false) + withStack = stack(true) +) -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) +func appendBacktrace(depth int, format string, args []any) (string, []any) { + // Capture a backtrace as a stackdump.Stack (both text and PC slice). + // Structured log sinks can extract the backtrace in whichever format they + // prefer (PCs or text), and Text sinks will include it as just another part + // of the log message. + // + // Use depth instead of depth+1 so that the backtrace always includes the + // log function itself - otherwise the reason for the trace appearing in the + // log may not be obvious to the reader. + dump := stackdump.Caller(depth) + + // Add an arg and an entry in the format string for the stack dump. + // + // Copy the "args" slice to avoid a rare but serious aliasing bug + // (corrupting the caller's slice if they passed it to a non-Fatal call + // using "..."). + format = format + "\n\n%v\n" + args = append(append([]any(nil), args...), dump) + + return format, args +} + +// logf writes a log message for a log function call (or log function wrapper) +// at the given depth in the current goroutine's stack. +func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { + now := timeNow() + _, file, line, ok := runtime.Caller(depth + 1) if !ok { file = "???" line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } } - return l.formatHeader(s, file, line), file, line -} -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits + if stack == withStack || backtraceAt(file, line) { + format, args = appendBacktrace(depth+1, format, args) } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: now, + File: file, + Line: line, + Depth: depth + 1, + Severity: severity, + Verbose: verbose, + Thread: int64(pid), } - return copy(buf.tmp[i:], buf.tmp[j:]) + sinkf(meta, format, args...) + metaPool.Put(metai) } -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { +func sinkf(meta *logsink.Meta, format string, args ...any) { + meta.Depth++ + n, err := logsink.Printf(meta, format, args...) + if stats := severityStats[meta.Severity]; stats != nil { atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } + atomic.AddInt64(&stats.bytes, int64(n)) } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } + logsink.Printf(meta, "glog: exiting because of error: %s", err) + sinks.file.Flush() + os.Exit(2) } } @@ -912,9 +234,9 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err)) } // Set a log format that captures the user's file and line: // d.go:23: message @@ -922,9 +244,22 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the Google logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge logsink.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -946,36 +281,72 @@ func (lb logBridge) Write(b []byte) (n int, err error) { line = 1 } } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) + + // The depth below hard-codes details of how stdlog gets here. The alternative would be to walk + // up the stack looking for src/log/log.go but that seems like it would be + // unfortunately slow. + const stdLogDepth = 4 + + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: timeNow(), + File: file, + Line: line, + Depth: stdLogDepth, + Severity: logsink.Severity(lb), + Thread: int64(pid), + } + + format := "%s" + args := []any{text} + if backtraceAt(file, line) { + format, args = appendBacktrace(meta.Depth, format, args) + } + + sinkf(meta, format, args...) + metaPool.Put(metai) + return len(b), nil } -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] +// defaultFormat returns a fmt.Printf format specifier that formats its +// arguments as if they were passed to fmt.Print. +func defaultFormat(args []any) string { + n := len(args) + switch n { + case 0: + return "" + case 1: + return "%v" + } + + b := make([]byte, 0, n*3-1) + wasString := true // Suppress leading space. + for _, arg := range args { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + if wasString || isString { + b = append(b, "%v"...) + } else { + b = append(b, " %v"...) + } + wasString = isString } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] + return string(b) +} + +// lnFormat returns a fmt.Printf format specifier that formats its arguments +// as if they were passed to fmt.Println. +func lnFormat(args []any) string { + if len(args) == 0 { + return "\n" } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } + + b := make([]byte, 0, len(args)*3) + for range args { + b = append(b, "%v "...) } - l.vmap[pc] = 0 - return 0 + b[len(b)-1] = '\n' // Replace the last space with a newline. + return string(b) } // Verbose is a boolean type that implements Infof (like Printf) etc. @@ -986,9 +357,13 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either +// // if glog.V(2) { glog.Info("log this") } +// // or +// // glog.V(2).Info("log this") +// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -997,184 +372,250 @@ type Verbose bool // V is at most the value of -v, or of -vmodule for the source file containing the // call, the V call will log. func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. + return VDepth(1, level) +} - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } +// VDepth acts as V but uses depth to determine which call frame to check vmodule for. +// VDepth(0, level) is the same as V(level). +func VDepth(depth int, level Level) Verbose { + return Verbose(verboseEnabled(depth+1, level)) +} - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...any) { + v.InfoDepth(1, args...) +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...any) { + if v { + logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) } - return Verbose(false) } -// Info is equivalent to the global Info function, guarded by the value of v. +// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { +func (v Verbose) InfoDepthf(depth int, format string, args ...any) { if v { - logging.print(infoLog, args...) + logf(depth+1, logsink.Info, true, noStack, format, args...) } } // Infoln is equivalent to the global Infoln function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { +func (v Verbose) Infoln(args ...any) { if v { - logging.println(infoLog, args...) + logf(1, logsink.Info, true, noStack, lnFormat(args), args...) } } // Infof is equivalent to the global Infof function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { +func (v Verbose) Infof(format string, args ...any) { if v { - logging.printf(infoLog, format, args...) + logf(1, logsink.Info, true, noStack, format, args...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) +func Info(args ...any) { + InfoDepth(1, args...) +} + +// InfoDepth calls Info from a different depth in the call stack. +// This enables a callee to emit logs that use the callsite information of its caller +// or any other callers in the stack. When depth == 0, the original callee's line +// information is emitted. When depth > 0, depth frames are skipped in the call stack +// and the final frame is treated like the original callee to Info. +func InfoDepth(depth int, args ...any) { + logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) } -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) +// InfoDepthf acts as InfoDepth but with format string. +func InfoDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Info, false, noStack, format, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) +func Infoln(args ...any) { + logf(1, logsink.Info, false, noStack, lnFormat(args), args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) +func Infof(format string, args ...any) { + logf(1, logsink.Info, false, noStack, format, args...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) +func Warning(args ...any) { + WarningDepth(1, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) +func WarningDepth(depth int, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) +} + +// WarningDepthf acts as Warningf but uses depth to determine which call frame to log. +// WarningDepthf(0, "msg") is the same as Warningf("msg"). +func WarningDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, format, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) +func Warningln(args ...any) { + logf(1, logsink.Warning, false, noStack, lnFormat(args), args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) +func Warningf(format string, args ...any) { + logf(1, logsink.Warning, false, noStack, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) +func Error(args ...any) { + ErrorDepth(1, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) +func ErrorDepth(depth int, args ...any) { + logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) +} + +// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log. +// ErrorDepthf(0, "msg") is the same as Errorf("msg"). +func ErrorDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Error, false, noStack, format, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) +func Errorln(args ...any) { + logf(1, logsink.Error, false, noStack, lnFormat(args), args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) +func Errorf(format string, args ...any) { + logf(1, logsink.Error, false, noStack, format, args...) +} + +func fatalf(depth int, format string, args ...any) { + logf(depth+1, logsink.Fatal, false, withStack, format, args...) + sinks.file.Flush() + + err := abortProcess() // Should not return. + + // Failed to abort the process using signals. Dump a stack trace and exit. + Errorf("abortProcess returned unexpectedly: %v", err) + sinks.file.Flush() + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + os.Exit(2) // Exit with the same code as the default SIGABRT handler. +} + +// abortProcess attempts to kill the current process in a way that will dump the +// currently-running goroutines someplace useful (Coroner or stderr). +// +// It does this by sending SIGABRT to the current process. Unfortunately, the +// signal may or may not be delivered to the current thread; in order to do that +// portably, we would need to add a cgo dependency and call pthread_kill. +// +// If successful, abortProcess does not return. +func abortProcess() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + if err := p.Signal(syscall.SIGABRT); err != nil { + return err + } + + // Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to + // run (and eventually terminate the process themselves). + // + // We could just "select{}" here, but there's an outside chance that would + // trigger the runtime's deadlock detector if there happen not to be any + // background goroutines running. So we'll sleep a while first to give + // the signal some time. + time.Sleep(10 * time.Second) + select {} } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) +func Fatal(args ...any) { + FatalDepth(1, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) +func FatalDepth(depth int, args ...any) { + fatalf(depth+1, defaultFormat(args), args...) +} + +// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log. +// FatalDepthf(0, "msg") is the same as Fatalf("msg"). +func FatalDepthf(depth int, format string, args ...any) { + fatalf(depth+1, format, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) +func Fatalln(args ...any) { + fatalf(1, lnFormat(args), args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) +func Fatalf(format string, args ...any) { + fatalf(1, format, args...) } -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 +func exitf(depth int, format string, args ...any) { + logf(depth+1, logsink.Fatal, false, noStack, format, args...) + sinks.file.Flush() + os.Exit(1) +} // Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) +func Exit(args ...any) { + ExitDepth(1, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) +func ExitDepth(depth int, args ...any) { + exitf(depth+1, defaultFormat(args), args...) +} + +// ExitDepthf acts as Exitf but uses depth to determine which call frame to log. +// ExitDepthf(0, "msg") is the same as Exitf("msg"). +func ExitDepthf(depth int, format string, args ...any) { + exitf(depth+1, format, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) +func Exitln(args ...any) { + exitf(1, lnFormat(args), args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) +func Exitf(format string, args ...any) { + exitf(1, format, args...) } diff --git a/terraform/providers/google/vendor/github.com/golang/glog/glog_file.go b/terraform/providers/google/vendor/github.com/golang/glog/glog_file.go index 65075d2811..af1c934b82 100644 --- a/terraform/providers/google/vendor/github.com/golang/glog/glog_file.go +++ b/terraform/providers/google/vendor/github.com/golang/glog/glog_file.go @@ -1,6 +1,6 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// Go support for leveled logs, analogous to https://github.com/google/glog. // -// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2023 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,26 +19,34 @@ package glog import ( + "bufio" + "bytes" "errors" "flag" "fmt" + "io" "os" "os/user" "path/filepath" + "runtime" "strings" "sync" "time" -) -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 + "github.com/golang/glog/internal/logsink" +) // logDirs lists the candidate directories for new log files. var logDirs []string -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") +var ( + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files") + logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+ + " (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.") +) func createLogDirs() { if *logDir != "" { @@ -64,9 +72,17 @@ func init() { if err == nil { userName = current.Username } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) + // Sanitize userName since it is used to construct file paths. + userName = strings.Map(func(r rune) rune { + switch { + case r >= 'a' && r <= 'z': + case r >= 'A' && r <= 'Z': + case r >= '0' && r <= '9': + default: + return '_' + } + return r + }, userName) } // shortHostname returns its argument, truncating at the first period. @@ -122,3 +138,270 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer + filenames() []string +} + +var sinks struct { + stderr stderrSink + file fileSink +} + +func init() { + sinks.stderr.w = os.Stderr + + // Register stderr first: that way if we crash during file-writing at least + // the log will have gone somewhere. + logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) + + sinks.file.flushChan = make(chan logsink.Severity, 1) + go sinks.file.flushDaemon() +} + +// stderrSink is a logsink.Text that writes log entries to stderr +// if they meet certain conditions. +type stderrSink struct { + mu sync.Mutex + w io.Writer +} + +// Enabled implements logsink.Text.Enabled. It returns true if any of the +// various stderr flags are enabled for logs of the given severity, if the log +// message is from the standard "log" package, or if google.Init has not yet run +// (and hence file logging is not yet initialized). +func (s *stderrSink) Enabled(m *logsink.Meta) bool { + return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get() +} + +// Emit implements logsink.Text.Emit. +func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + dn, err := s.w.Write(data) + n += dn + return n, err +} + +// severityWriters is an array of flushSyncWriter with a value for each +// logsink.Severity. +type severityWriters [4]flushSyncWriter + +// fileSink is a logsink.Text that prints to a set of Google log files. +type fileSink struct { + mu sync.Mutex + // file holds writer for each of the log types. + file severityWriters + flushChan chan logsink.Severity +} + +// Enabled implements logsink.Text.Enabled. It returns true if google.Init +// has run and both --disable_log_to_disk and --logtostderr are false. +func (s *fileSink) Enabled(m *logsink.Meta) bool { + return !toStderr +} + +// Emit implements logsink.Text.Emit +func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + if err = s.createMissingFiles(m.Severity); err != nil { + return 0, err + } + for sev := m.Severity; sev >= logsink.Info; sev-- { + if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil { + err = fErr // Take the first error. + } + } + n = len(data) + if int(m.Severity) > *logBufLevel { + select { + case s.flushChan <- m.Severity: + default: + } + } + + return n, err +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// s.mu is held for all its methods. +type syncBuffer struct { + sink *fileSink + *bufio.Writer + file *os.File + names []string + sev logsink.Severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + return 0, err + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + return n, err +} + +func (sb *syncBuffer) filenames() []string { + return sb.names +} + +const footer = "\nCONTINUED IN NEXT FILE\n" + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + var err error + pn := "" + file, name, err := create(sb.sev.String(), now) + + if sb.file != nil { + // The current log file becomes the previous log at the end of + // this block, so save its name for use in the header of the next + // file. + pn = sb.file.Name() + sb.Flush() + // If there's an existing file, write a footer with the name of + // the next file in the chain, followed by the constant string + // \nCONTINUED IN NEXT FILE\n to make continuation detection simple. + sb.file.Write([]byte("Next log: ")) + sb.file.Write([]byte(name)) + sb.file.Write([]byte(footer)) + sb.file.Close() + } + + sb.file = file + sb.names = append(sb.names, name) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Previous log: %s\n", pn) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createMissingFiles creates all the log files for severity from infoLog up to +// upTo that have not already been created. +// s.mu is held. +func (s *fileSink) createMissingFiles(upTo logsink.Severity) error { + if s.file[upTo] != nil { + return nil + } + now := time.Now() + // Files are created in increasing severity order, so we can be assured that + // if a high severity logfile exists, then so do all of lower severity. + for sev := logsink.Info; sev <= upTo; sev++ { + if s.file[sev] != nil { + continue + } + sb := &syncBuffer{ + sink: s, + sev: sev, + } + if err := sb.rotateFile(now); err != nil { + return err + } + s.file[sev] = sb + } + return nil +} + +// flushDaemon periodically flushes the log file buffers. +func (s *fileSink) flushDaemon() { + tick := time.NewTicker(30 * time.Second) + defer tick.Stop() + for { + select { + case <-tick.C: + s.Flush() + case sev := <-s.flushChan: + s.flush(sev) + } + } +} + +// Flush flushes all pending log I/O. +func Flush() { + sinks.file.Flush() +} + +// Flush flushes all the logs and attempts to "sync" their data to disk. +func (s *fileSink) Flush() error { + return s.flush(logsink.Info) +} + +// flush flushes all logs of severity threshold or greater. +func (s *fileSink) flush(threshold logsink.Severity) error { + s.mu.Lock() + defer s.mu.Unlock() + + var firstErr error + updateErr := func(err error) { + if err != nil && firstErr == nil { + firstErr = err + } + } + + // Flush from fatal down, in case there's trouble flushing. + for sev := logsink.Fatal; sev >= threshold; sev-- { + file := s.file[sev] + if file != nil { + updateErr(file.Flush()) + updateErr(file.Sync()) + } + } + + return firstErr +} + +// Names returns the names of the log files holding the FATAL, ERROR, +// WARNING, or INFO logs. Returns ErrNoLog if the log for the given +// level doesn't exist (e.g. because no messages of that level have been +// written). This may return multiple names if the log type requested +// has rolled over. +func Names(s string) ([]string, error) { + severity, err := logsink.ParseSeverity(s) + if err != nil { + return nil, err + } + + sinks.file.mu.Lock() + defer sinks.file.mu.Unlock() + f := sinks.file.file[severity] + if f == nil { + return nil, ErrNoLog + } + + return f.filenames(), nil +} diff --git a/terraform/providers/google/vendor/github.com/golang/glog/glog_flags.go b/terraform/providers/google/vendor/github.com/golang/glog/glog_flags.go new file mode 100644 index 0000000000..3060e54d9d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/golang/glog/glog_flags.go @@ -0,0 +1,395 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package glog + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/golang/glog/internal/logsink" +) + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + full bool // The pattern wants to match the full path + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(full, file string) bool { + if m.literal { + if m.full { + return full == m.pattern + } + return file == m.pattern + } + if m.full { + match, _ := filepath.Match(m.pattern, full) + return match + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// isFull reports whether the pattern matches the full file path, that is, +// whether it contains /. +func isFull(pattern string) bool { + return strings.ContainsRune(pattern, '/') +} + +// verboseFlags represents the setting of the -v and -vmodule flags. +type verboseFlags struct { + // moduleLevelCache is a sync.Map storing the -vmodule Level for each V() + // call site, identified by PC. If there is no matching -vmodule filter, + // the cached value is exactly v. moduleLevelCache is replaced with a new + // Map whenever the -vmodule or -v flag changes state. + moduleLevelCache atomic.Value + + // mu guards all fields below. + mu sync.Mutex + + // v stores the value of the -v flag. It may be read safely using + // sync.LoadInt32, but is only modified under mu. + v Level + + // module stores the parsed -vmodule flag. + module []modulePat + + // moduleLength caches len(module). If greater than zero, it + // means vmodule is enabled. It may be read safely using sync.LoadInt32, but + // is only modified under mu. + moduleLength int32 +} + +// NOTE: For compatibility with the open-sourced v1 version of this +// package (github.com/golang/glog) we need to retain that flag.Level +// implements the flag.Value interface. See also go/log-vs-glog. + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(l.Get().(Level)), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() any { + if l == &vflags.v { + // l is the value registered for the -v flag. + return Level(atomic.LoadInt32((*int32)(l))) + } + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + if l == &vflags.v { + // l is the value registered for the -v flag. + vflags.mu.Lock() + defer vflags.mu.Unlock() + vflags.moduleLevelCache.Store(&sync.Map{}) + atomic.StoreInt32((*int32)(l), int32(v)) + return nil + } + *l = Level(v) + return nil +} + +// vModuleFlag is the flag.Value for the --vmodule flag. +type vModuleFlag struct{ *verboseFlags } + +func (f vModuleFlag) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var b bytes.Buffer + for i, f := range f.module { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get returns nil for this flag type since the struct is not exported. +func (f vModuleFlag) Get() any { return nil } + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3 +func (f vModuleFlag) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)}) + } + + f.mu.Lock() + defer f.mu.Unlock() + f.module = filter + atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module))) + f.moduleLevelCache.Store(&sync.Map{}) + return nil +} + +func (f *verboseFlags) levelForPC(pc uintptr) Level { + if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok { + return level.(Level) + } + + f.mu.Lock() + defer f.mu.Unlock() + level := Level(f.v) + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d for + // regular matches, /a/b/c/d for full matches. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + full := file + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range f.module { + if filter.match(full, file) { + level = filter.level + break // Use the first matching level. + } + } + f.moduleLevelCache.Load().(*sync.Map).Store(pc, level) + return level +} + +func (f *verboseFlags) enabled(callerDepth int, level Level) bool { + if atomic.LoadInt32(&f.moduleLength) == 0 { + // No vmodule values specified, so compare against v level. + return Level(atomic.LoadInt32((*int32)(&f.v))) >= level + } + + pcs := [1]uintptr{} + if runtime.Callers(callerDepth+2, pcs[:]) < 1 { + return false + } + frame, _ := runtime.CallersFrames(pcs[:]).Next() + return f.levelForPC(frame.Entry) >= level +} + +// traceLocation represents an entry in the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +func parseTraceLocation(value string) (traceLocation, error) { + fields := strings.Split(value, ":") + if len(fields) != 2 { + return traceLocation{}, errTraceSyntax + } + file, lineStr := fields[0], fields[1] + if !strings.Contains(file, ".") { + return traceLocation{}, errTraceSyntax + } + line, err := strconv.Atoi(lineStr) + if err != nil { + return traceLocation{}, errTraceSyntax + } + if line < 0 { + return traceLocation{}, errors.New("negative value for line") + } + return traceLocation{file, line}, nil +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +func (t traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t traceLocation) String() string { + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// traceLocations represents the -log_backtrace_at flag. +// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456 +// Note that unlike vmodule the file extension is included here. +type traceLocations struct { + mu sync.Mutex + locsLen int32 // Safe for atomic read without mu. + locs []traceLocation +} + +func (t *traceLocations) String() string { + t.mu.Lock() + defer t.mu.Unlock() + + var buf bytes.Buffer + for i, tl := range t.locs { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(tl.String()) + } + return buf.String() +} + +// Get always returns nil for this flag type since the struct is not exported +func (t *traceLocations) Get() any { return nil } + +func (t *traceLocations) Set(value string) error { + var locs []traceLocation + for _, s := range strings.Split(value, ",") { + if s == "" { + continue + } + loc, err := parseTraceLocation(s) + if err != nil { + return err + } + locs = append(locs, loc) + } + + t.mu.Lock() + defer t.mu.Unlock() + atomic.StoreInt32(&t.locsLen, int32(len(locs))) + t.locs = locs + return nil +} + +func (t *traceLocations) match(file string, line int) bool { + if atomic.LoadInt32(&t.locsLen) == 0 { + return false + } + + t.mu.Lock() + defer t.mu.Unlock() + for _, tl := range t.locs { + if tl.match(file, line) { + return true + } + } + return false +} + +// severityFlag is an atomic flag.Value implementation for logsink.Severity. +type severityFlag int32 + +func (s *severityFlag) get() logsink.Severity { + return logsink.Severity(atomic.LoadInt32((*int32)(s))) +} +func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) } +func (s *severityFlag) Get() any { return s.get() } +func (s *severityFlag) Set(value string) error { + threshold, err := logsink.ParseSeverity(value) + if err != nil { + // Not a severity name. Try a raw number. + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = logsink.Severity(v) + if threshold < logsink.Info || threshold > logsink.Fatal { + return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal) + } + } + atomic.StoreInt32((*int32)(s), int32(threshold)) + return nil +} + +var ( + vflags verboseFlags // The -v and -vmodule flags. + + logBacktraceAt traceLocations // The -log_backtrace_at flag. + + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + stderrThreshold severityFlag // The -stderrthreshold flag. +) + +// verboseEnabled returns whether the caller at the given depth should emit +// verbose logs at the given level, with depth 0 identifying the caller of +// verboseEnabled. +func verboseEnabled(callerDepth int, level Level) bool { + return vflags.enabled(callerDepth+1, level) +} + +// backtraceAt returns whether the logging call at the given function and line +// should also emit a backtrace of the current call stack. +func backtraceAt(file string, line int) bool { + return logBacktraceAt.match(file, line) +} + +func init() { + vflags.moduleLevelCache.Store(&sync.Map{}) + + flag.Var(&vflags.v, "v", "log level for V logs") + flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + + flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + stderrThreshold = severityFlag(logsink.Error) + + flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") +} diff --git a/terraform/providers/google/vendor/github.com/golang/glog/internal/logsink/logsink.go b/terraform/providers/google/vendor/github.com/golang/glog/internal/logsink/logsink.go new file mode 100644 index 0000000000..53758e1c9f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/golang/glog/internal/logsink/logsink.go @@ -0,0 +1,387 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logsink + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/glog/internal/stackdump" +) + +// MaxLogMessageLen is the limit on length of a formatted log message, including +// the standard line prefix and trailing newline. +// +// Chosen to match C++ glog. +const MaxLogMessageLen = 15000 + +// A Severity is a severity at which a message can be logged. +type Severity int8 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + Info Severity = iota + Warning + Error + + // Fatal contains logs written immediately before the process terminates. + // + // Sink implementations should not terminate the process themselves: the log + // package will perform any necessary cleanup and terminate the process as + // appropriate. + Fatal +) + +func (s Severity) String() string { + switch s { + case Info: + return "INFO" + case Warning: + return "WARNING" + case Error: + return "ERROR" + case Fatal: + return "FATAL" + } + return fmt.Sprintf("%T(%d)", s, s) +} + +// ParseSeverity returns the case-insensitive Severity value for the given string. +func ParseSeverity(name string) (Severity, error) { + name = strings.ToUpper(name) + for s := Info; s <= Fatal; s++ { + if s.String() == name { + return s, nil + } + } + return -1, fmt.Errorf("logsink: invalid severity %q", name) +} + +// Meta is metadata about a logging call. +type Meta struct { + // Time is the time at which the log call was made. + Time time.Time + + // File is the source file from which the log entry originates. + File string + // Line is the line offset within the source file. + Line int + // Depth is the number of stack frames between the logsink and the log call. + Depth int + + Severity Severity + + // Verbose indicates whether the call was made via "log.V". Log entries below + // the current verbosity threshold are not sent to the sink. + Verbose bool + + // Thread ID. This can be populated with a thread ID from another source, + // such as a system we are importing logs from. In the normal case, this + // will be set to the process ID (PID), since Go doesn't have threads. + Thread int64 + + // Stack trace starting in the logging function. May be nil. + // A logsink should implement the StackWanter interface to request this. + // + // Even if WantStack returns false, this field may be set (e.g. if another + // sink wants a stack trace). + Stack *stackdump.Stack +} + +// Structured is a logging destination that accepts structured data as input. +type Structured interface { + // Printf formats according to a fmt.Printf format specifier and writes a log + // entry. The precise result of formatting depends on the sink, but should + // aim for consistency with fmt.Printf. + // + // Printf returns the number of bytes occupied by the log entry, which + // may not be equal to the total number of bytes written. + // + // Printf returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + Printf(meta *Meta, format string, a ...any) (n int, err error) +} + +// StackWanter can be implemented by a logsink.Structured to indicate that it +// wants a stack trace to accompany at least some of the log messages it receives. +type StackWanter interface { + // WantStack returns true if the sink requires a stack trace for a log message + // with this metadata. + // + // NOTE: Returning true implies that meta.Stack will be non-nil. Returning + // false does NOT imply that meta.Stack will be nil. + WantStack(meta *Meta) bool +} + +// Text is a logging destination that accepts pre-formatted log lines (instead of +// structured data). +type Text interface { + // Enabled returns whether this sink should output messages for the given + // Meta. If the sink returns false for a given Meta, the Printf function will + // not call Emit on it for the corresponding log message. + Enabled(*Meta) bool + + // Emit writes a pre-formatted text log entry (including any applicable + // header) to the log. It returns the number of bytes occupied by the entry + // (which may differ from the length of the passed-in slice). + // + // Emit returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + // + // NOTE: When developing a text sink, keep in mind the surface in which the + // logs will be displayed, and whether it's important that the sink be + // resistent to tampering in the style of b/211428300. Standard text sinks + // (like `stderrSink`) do not protect against this (e.g. by escaping + // characters) because the cases where they would show user-influenced bytes + // are vanishingly small. + Emit(*Meta, []byte) (n int, err error) +} + +// bufs is a pool of *bytes.Buffer used in formatting log entries. +var bufs sync.Pool // Pool of *bytes.Buffer. + +// textPrintf formats a text log entry and emits it to all specified Text sinks. +// +// The returned n is the maximum across all Emit calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) { + // We expect at most file, stderr, and perhaps syslog. If there are more, + // we'll end up allocating - no big deal. + const maxExpectedTextSinks = 3 + var noAllocSinks [maxExpectedTextSinks]Text + + sinks := noAllocSinks[:0] + for _, s := range textSinks { + if s.Enabled(m) { + sinks = append(sinks, s) + } + } + if len(sinks) == 0 && m.Severity != Fatal { + return 0, nil // No TextSinks specified; don't bother formatting. + } + + bufi := bufs.Get() + var buf *bytes.Buffer + if bufi == nil { + buf = bytes.NewBuffer(nil) + bufi = buf + } else { + buf = bufi.(*bytes.Buffer) + buf.Reset() + } + + // Lmmdd hh:mm:ss.uuuuuu PID/GID file:line] + // + // The "PID" entry arguably ought to be TID for consistency with other + // environments, but TID is not meaningful in a Go program due to the + // multiplexing of goroutines across threads. + // + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + const severityChar = "IWEF" + buf.WriteByte(severityChar[m.Severity]) + + _, month, day := m.Time.Date() + hour, minute, second := m.Time.Clock() + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0') + buf.WriteByte(' ') + + nDigits(buf, 7, uint64(m.Thread), ' ') + buf.WriteByte(' ') + + { + file := m.File + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + buf.WriteString(file) + } + + buf.WriteByte(':') + { + var tmp [19]byte + buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10)) + } + buf.WriteString("] ") + + msgStart := buf.Len() + fmt.Fprintf(buf, format, args...) + if buf.Len() > MaxLogMessageLen-1 { + buf.Truncate(MaxLogMessageLen - 1) + } + msgEnd := buf.Len() + if b := buf.Bytes(); b[len(b)-1] != '\n' { + buf.WriteByte('\n') + } + + for _, s := range sinks { + sn, sErr := s.Emit(m, buf.Bytes()) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + + if m.Severity == Fatal { + savedM := *m + fatalMessageStore(savedEntry{ + meta: &savedM, + msg: buf.Bytes()[msgStart:msgEnd], + }) + } else { + bufs.Put(bufi) + } + return n, err +} + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer to buf. +func twoDigits(buf *bytes.Buffer, d int) { + buf.WriteByte(digits[(d/10)%10]) + buf.WriteByte(digits[d%10]) +} + +// nDigits formats an n-digit integer to buf, padding with pad on the left. It +// assumes d != 0. +func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) { + var tmp [20]byte + + cutoff := len(tmp) - n + j := len(tmp) - 1 + for ; d > 0; j-- { + tmp[j] = digits[d%10] + d /= 10 + } + for ; j >= cutoff; j-- { + tmp[j] = pad + } + j++ + buf.Write(tmp[j:]) +} + +// Printf writes a log entry to all registered TextSinks in this package, then +// to all registered StructuredSinks. +// +// The returned n is the maximum across all Emit and Printf calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func Printf(m *Meta, format string, args ...any) (n int, err error) { + m.Depth++ + n, err = textPrintf(m, TextSinks, format, args...) + + for _, sink := range StructuredSinks { + // TODO: Support TextSinks that implement StackWanter? + if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) { + if m.Stack == nil { + // First, try to find a stacktrace in args, otherwise generate one. + for _, arg := range args { + if stack, ok := arg.(stackdump.Stack); ok { + m.Stack = &stack + break + } + } + if m.Stack == nil { + stack := stackdump.Caller( /* skipDepth = */ m.Depth) + m.Stack = &stack + } + } + } + sn, sErr := sink.Printf(m, format, args...) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + return n, err +} + +// The sets of sinks to which logs should be written. +// +// These must only be modified during package init, and are read-only thereafter. +var ( + // StructuredSinks is the set of Structured sink instances to which logs + // should be written. + StructuredSinks []Structured + + // TextSinks is the set of Text sink instances to which logs should be + // written. + // + // These are registered separately from Structured sink implementations to + // avoid the need to repeat the work of formatting a message for each Text + // sink that writes it. The package-level Printf function writes to both sets + // independenty, so a given log destination should only register a Structured + // *or* a Text sink (not both). + TextSinks []Text +) + +type savedEntry struct { + meta *Meta + msg []byte +} + +// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks. +// +// The purpose of this sink is to allow applications to intercept logging calls before they are +// serialized and sent to Text sinks. For example, if one needs to redact PII from logging +// arguments before they reach STDERR, one solution would be to do the redacting in a Structured +// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that +// StructuredTextWrapper instance. This is how one could set this up in their application: +// +// func init() { +// +// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks} +// // sanitizersink will intercept logs and remove PII +// sanitizer := sanitizersink{Sink: &wrapper} +// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer) +// logsink.TextSinks = nil +// +// } +type StructuredTextWrapper struct { + // TextSinks is the set of Text sinks that should receive logs from this + // StructuredTextWrapper instance. + TextSinks []Text +} + +// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper. +func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) { + return textPrintf(meta, w.TextSinks, format, args...) +} diff --git a/terraform/providers/google/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go b/terraform/providers/google/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go new file mode 100644 index 0000000000..3dc269abc2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go @@ -0,0 +1,35 @@ +package logsink + +import ( + "sync/atomic" + "unsafe" +) + +func fatalMessageStore(e savedEntry) { + // Only put a new one in if we haven't assigned before. + atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e)) +} + +var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer + +// FatalMessage returns the Meta and message contents of the first message +// logged with Fatal severity, or false if none has occurred. +func FatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(atomic.LoadPointer(&fatalMessage)) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} + +// DoNotUseRacyFatalMessage is FatalMessage, but worse. +// +//go:norace +//go:nosplit +func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(fatalMessage) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} diff --git a/terraform/providers/google/vendor/github.com/golang/glog/internal/stackdump/stackdump.go b/terraform/providers/google/vendor/github.com/golang/glog/internal/stackdump/stackdump.go new file mode 100644 index 0000000000..3427c9d6bd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/golang/glog/internal/stackdump/stackdump.go @@ -0,0 +1,127 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdump provides wrappers for runtime.Stack and runtime.Callers +// with uniform support for skipping caller frames. +// +// ⚠ Unlike the functions in the runtime package, these may allocate a +// non-trivial quantity of memory: use them with care. ⚠ +package stackdump + +import ( + "bytes" + "runtime" +) + +// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to +// runtime.Stack itself or 0 if it does not. +// +// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc +// compiler does not. +var runtimeStackSelfFrames = func() int { + for n := 1 << 10; n < 1<<20; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if bytes.Contains(buf[:n], []byte("runtime.Stack")) { + return 1 + } else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 { + return 0 + } + } + return 0 +}() + +// Stack is a stack dump for a single goroutine. +type Stack struct { + // Text is a representation of the stack dump in a human-readable format. + Text []byte + + // PC is a representation of the stack dump using raw program counter values. + PC []uintptr +} + +func (s Stack) String() string { return string(s.Text) } + +// Caller returns the Stack dump for the calling goroutine, starting skipDepth +// frames before the caller of Caller. (Caller(0) provides a dump starting at +// the caller of this function.) +func Caller(skipDepth int) Stack { + return Stack{ + Text: CallerText(skipDepth + 1), + PC: CallerPC(skipDepth + 1), + } +} + +// CallerText returns a textual dump of the stack starting skipDepth frames before +// the caller. (CallerText(0) provides a dump starting at the caller of this +// function.) +func CallerText(skipDepth int) []byte { + for n := 1 << 10; ; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if n < len(buf) { + return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n]) + } + } +} + +// CallerPC returns a dump of the program counters of the stack starting +// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at +// the caller of this function.) +func CallerPC(skipDepth int) []uintptr { + for n := 1 << 8; ; n *= 2 { + buf := make([]uintptr, n) + n := runtime.Callers(skipDepth+2, buf) + if n < len(buf) { + return buf[:n] + } + } +} + +// pruneFrames removes the topmost skipDepth frames of the first goroutine in a +// textual stack dump. It overwrites the passed-in slice. +// +// If there are fewer than skipDepth frames in the first goroutine's stack, +// pruneFrames prunes it to an empty stack and leaves the remaining contents +// intact. +func pruneFrames(skipDepth int, stack []byte) []byte { + headerLen := 0 + for i, c := range stack { + if c == '\n' { + headerLen = i + 1 + break + } + } + if headerLen == 0 { + return stack // No header line - not a well-formed stack trace. + } + + skipLen := headerLen + skipNewlines := skipDepth * 2 + for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ { + c := stack[skipLen] + if c != '\n' { + continue + } + skipNewlines-- + skipLen++ + if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' { + break + } + } + + pruned := stack[skipLen-headerLen:] + copy(pruned, stack[:headerLen]) + return pruned +} diff --git a/terraform/providers/google/vendor/github.com/golang/protobuf/jsonpb/decode.go b/terraform/providers/google/vendor/github.com/golang/protobuf/jsonpb/decode.go index 60e82caa9a..6c16c255ff 100644 --- a/terraform/providers/google/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ b/terraform/providers/google/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -386,8 +386,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error } func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } if md := fd.Message(); md != nil { - return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" } return false } diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/.gitignore b/terraform/providers/google/vendor/github.com/google/s2a-go/.gitignore new file mode 100644 index 0000000000..01764d1cdf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/.gitignore @@ -0,0 +1,6 @@ +# Ignore binaries without extension +//example/client/client +//example/server/server +//internal/v2/fakes2av2_server/fakes2av2_server + +.idea/ \ No newline at end of file diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md b/terraform/providers/google/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..dc079b4d66 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md @@ -0,0 +1,93 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + +Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the +Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/CONTRIBUTING.md b/terraform/providers/google/vendor/github.com/google/s2a-go/CONTRIBUTING.md new file mode 100644 index 0000000000..22b241cb73 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/CONTRIBUTING.md @@ -0,0 +1,29 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement (CLA). You (or your employer) retain the copyright to your +contribution; this simply gives us permission to use and redistribute your +contributions as part of the project. Head over to + to see your current agreements on file or +to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows +[Google's Open Source Community Guidelines](https://opensource.google/conduct/). diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/LICENSE.md b/terraform/providers/google/vendor/github.com/google/s2a-go/LICENSE.md new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/LICENSE.md @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/README.md b/terraform/providers/google/vendor/github.com/google/s2a-go/README.md new file mode 100644 index 0000000000..d566950f38 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/README.md @@ -0,0 +1,17 @@ +# Secure Session Agent Client Libraries + +The Secure Session Agent is a service that enables a workload to offload select +operations from the mTLS handshake and protects a workload's private key +material from exfiltration. Specifically, the workload asks the Secure Session +Agent for the TLS configuration to use during the handshake, to perform private +key operations, and to validate the peer certificate chain. The Secure Session +Agent's client libraries enable applications to communicate with the Secure +Session Agent during the TLS handshake, and to encrypt traffic to the peer +after the TLS handshake is complete. + +This repository contains the source code for the Secure Session Agent's Go +client libraries, which allow gRPC-Go applications to use the Secure Session +Agent. This repository supports the Bazel and Golang build systems. + +All code in this repository is experimental and subject to change. We do not +guarantee API stability at this time. diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go b/terraform/providers/google/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go new file mode 100644 index 0000000000..034d1b912c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package fallback provides default implementations of fallback options when S2A fails. +package fallback + +import ( + "context" + "crypto/tls" + "fmt" + "net" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +const ( + alpnProtoStrH2 = "h2" + alpnProtoStrHTTP = "http/1.1" + defaultHTTPSPort = "443" +) + +// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function. +// It supports GRPC use case, thus the alpn is set to 'h2'. +var FallbackTLSConfigGRPC = tls.Config{ + MinVersion: tls.VersionTLS13, + ClientSessionCache: nil, + NextProtos: []string{alpnProtoStrH2}, +} + +// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func. +// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'. +var FallbackTLSConfigHTTP = tls.Config{ + MinVersion: tls.VersionTLS13, + ClientSessionCache: nil, + NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP}, +} + +// ClientHandshake establishes a TLS connection and returns it, plus its auth info. +// Inputs: +// +// targetServer: the server attempted with S2A. +// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func. +// If fallback is successful, the `conn` should be closed. +// err: the error encountered when performing the client-side TLS handshake with S2A. +type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error) + +// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function, +// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info. +// Example use: +// +// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, +// FallbackOpts: &s2a.FallbackOptions{ // optional +// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr), +// }, +// }) +// +// The fallback server's certificate must be verifiable using OS root store. +// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, +// it uses default port 443. +// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, +// and min TLS version is set to 1.3. +func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) { + var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC} + return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext) +} + +func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) { + fallbackServerAddr, err := processFallbackAddr(fallbackAddr) + if err != nil { + if grpclog.V(1) { + grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) + } + return nil, err + } + return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) { + fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr) + if fbErr != nil { + grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr) + return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr) + } + + tc, success := fbConn.(*tls.Conn) + if !success { + grpclog.Infof("the connection with fallback server is expected to be tls but isn't") + return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr) + } + + tlsInfo := credentials.TLSInfo{ + State: tc.ConnectionState(), + CommonAuthInfo: credentials.CommonAuthInfo{ + SecurityLevel: credentials.PrivacyAndIntegrity, + }, + } + if grpclog.V(1) { + grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol) + grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete) + grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName) + } + conn.Close() + return fbConn, tlsInfo, nil + }, nil +} + +// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial. +// Example use: +// +// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr) +// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, // required +// FallbackOpts: &s2a.FallbackOptions{ +// FallbackDialer: &s2a.FallbackDialer{ +// Dialer: fallbackDialer, +// ServerAddr: fallbackServerAddr, +// }, +// }, +// }) +// +// The fallback server's certificate should be verifiable using OS root store. +// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, +// it uses default port 443. +// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, +// and min TLS version is set to 1.3. +func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) { + fallbackServerAddr, err := processFallbackAddr(fallbackAddr) + if err != nil { + if grpclog.V(1) { + grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) + } + return nil, "", err + } + return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil +} + +func processFallbackAddr(fallbackAddr string) (string, error) { + var fallbackServerAddr string + var err error + + if fallbackAddr == "" { + return "", fmt.Errorf("empty fallback address") + } + _, _, err = net.SplitHostPort(fallbackAddr) + if err != nil { + // fallbackAddr does not have port suffix + fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort) + } else { + // FallbackServerAddr already has port suffix + fallbackServerAddr = fallbackAddr + } + return fallbackServerAddr, nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go new file mode 100644 index 0000000000..aa3967f9d1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provides authentication and authorization information that +// results from the TLS handshake. +package authinfo + +import ( + "errors" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto" + grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "google.golang.org/grpc/credentials" +) + +var _ credentials.AuthInfo = (*S2AAuthInfo)(nil) + +const s2aAuthType = "s2a" + +// S2AAuthInfo exposes authentication and authorization information from the +// S2A session result to the gRPC stack. +type S2AAuthInfo struct { + s2aContext *contextpb.S2AContext + commonAuthInfo credentials.CommonAuthInfo +} + +// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result. +func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) { + return newS2AAuthInfo(result) +} + +func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) { + if result == nil { + return nil, errors.New("NewS2aAuthInfo given nil session result") + } + return &S2AAuthInfo{ + s2aContext: &contextpb.S2AContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + TlsVersion: result.GetState().GetTlsVersion(), + Ciphersuite: result.GetState().GetTlsCiphersuite(), + PeerIdentity: result.GetPeerIdentity(), + LocalIdentity: result.GetLocalIdentity(), + PeerCertFingerprint: result.GetPeerCertFingerprint(), + LocalCertFingerprint: result.GetLocalCertFingerprint(), + IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(), + }, + commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + }, nil +} + +// AuthType returns the authentication type. +func (s *S2AAuthInfo) AuthType() string { + return s2aAuthType +} + +// ApplicationProtocol returns the application protocol, e.g. "grpc". +func (s *S2AAuthInfo) ApplicationProtocol() string { + return s.s2aContext.GetApplicationProtocol() +} + +// TLSVersion returns the TLS version negotiated during the handshake. +func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion { + return s.s2aContext.GetTlsVersion() +} + +// Ciphersuite returns the ciphersuite negotiated during the handshake. +func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite { + return s.s2aContext.GetCiphersuite() +} + +// PeerIdentity returns the authenticated identity of the peer. +func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity { + return s.s2aContext.GetPeerIdentity() +} + +// LocalIdentity returns the local identity of the application used during +// session setup. +func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity { + return s.s2aContext.GetLocalIdentity() +} + +// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in +// the S2A handshake. +func (s *S2AAuthInfo) PeerCertFingerprint() []byte { + return s.s2aContext.GetPeerCertFingerprint() +} + +// LocalCertFingerprint returns the SHA256 hash of the local certificate used +// in the S2A handshake. +func (s *S2AAuthInfo) LocalCertFingerprint() []byte { + return s.s2aContext.GetLocalCertFingerprint() +} + +// IsHandshakeResumed returns true if a cached session was used to resume +// the handshake. +func (s *S2AAuthInfo) IsHandshakeResumed() bool { + return s.s2aContext.GetIsHandshakeResumed() +} + +// SecurityLevel returns the security level of the connection. +func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel { + return s.commonAuthInfo.SecurityLevel +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go new file mode 100644 index 0000000000..8297c9a974 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go @@ -0,0 +1,438 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker communicates with the S2A handshaker service. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + "github.com/google/s2a-go/internal/authinfo" + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "github.com/google/s2a-go/internal/record" + "github.com/google/s2a-go/internal/tokenmanager" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +var ( + // appProtocol contains the application protocol accepted by the handshaker. + appProtocol = "grpc" + // frameLimit is the maximum size of a frame in bytes. + frameLimit = 1024 * 64 + // peerNotRespondingError is the error thrown when the peer doesn't respond. + errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted") +) + +// Handshaker defines a handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a TLS handshake from the client side, + // and returns a secure connection along with additional auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a TLS handshake from the server side, + // and returns a secure connection along with additional auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the handshake + // is complete. + Close() error +} + +// ClientHandshakerOptions contains the options needed to configure the S2A +// handshaker service on the client-side. +type ClientHandshakerOptions struct { + // MinTLSVersion specifies the min TLS version supported by the client. + MinTLSVersion commonpb.TLSVersion + // MaxTLSVersion specifies the max TLS version supported by the client. + MaxTLSVersion commonpb.TLSVersion + // TLSCiphersuites is the ordered list of ciphersuites supported by the + // client. + TLSCiphersuites []commonpb.Ciphersuite + // TargetIdentities contains a list of allowed server identities. One of the + // target identities should match the peer identity in the handshake + // result; otherwise, the handshake fails. + TargetIdentities []*commonpb.Identity + // LocalIdentity is the local identity of the client application. If none is + // provided, then the S2A will choose the default identity. + LocalIdentity *commonpb.Identity + // TargetName is the allowed server name, which may be used for server + // authorization check by the S2A if it is provided. + TargetName string + // EnsureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + EnsureProcessSessionTickets *sync.WaitGroup +} + +// ServerHandshakerOptions contains the options needed to configure the S2A +// handshaker service on the server-side. +type ServerHandshakerOptions struct { + // MinTLSVersion specifies the min TLS version supported by the server. + MinTLSVersion commonpb.TLSVersion + // MaxTLSVersion specifies the max TLS version supported by the server. + MaxTLSVersion commonpb.TLSVersion + // TLSCiphersuites is the ordered list of ciphersuites supported by the + // server. + TLSCiphersuites []commonpb.Ciphersuite + // LocalIdentities is the list of local identities that may be assumed by + // the server. If no local identity is specified, then the S2A chooses a + // default local identity. + LocalIdentities []*commonpb.Identity +} + +// s2aHandshaker performs a TLS handshake using the S2A handshaker service. +type s2aHandshaker struct { + // stream is used to communicate with the S2A handshaker service. + stream s2apb.S2AService_SetUpSessionClient + // conn is the connection to the peer. + conn net.Conn + // clientOpts should be non-nil iff the handshaker is client-side. + clientOpts *ClientHandshakerOptions + // serverOpts should be non-nil iff the handshaker is server-side. + serverOpts *ServerHandshakerOptions + // isClient determines if the handshaker is client or server side. + isClient bool + // hsAddr stores the address of the S2A handshaker service. + hsAddr string + // tokenManager manages access tokens for authenticating to S2A. + tokenManager tokenmanager.AccessTokenManager + // localIdentities is the set of local identities for whom the + // tokenManager should fetch a token when preparing a request to be + // sent to S2A. + localIdentities []*commonpb.Identity +} + +// NewClientHandshaker creates an s2aHandshaker instance that performs a +// client-side TLS handshake using the S2A handshaker service. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) { + stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil +} + +func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { + var localIdentities []*commonpb.Identity + if opts != nil { + localIdentities = []*commonpb.Identity{opts.LocalIdentity} + } + return &s2aHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + isClient: true, + hsAddr: hsAddr, + tokenManager: tokenManager, + localIdentities: localIdentities, + } +} + +// NewServerHandshaker creates an s2aHandshaker instance that performs a +// server-side TLS handshake using the S2A handshaker service. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) { + stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil +} + +func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { + var localIdentities []*commonpb.Identity + if opts != nil { + localIdentities = opts.LocalIdentities + } + return &s2aHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + isClient: false, + hsAddr: hsAddr, + tokenManager: tokenManager, + localIdentities: localIdentities, + } +} + +// ClientHandshake performs a client-side TLS handshake using the S2A handshaker +// service. When complete, returns a TLS connection. +func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { + if !h.isClient { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake") + } + // Extract the hostname from the target name. The target name is assumed to be an authority. + hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName) + if err != nil { + // If the target name had no host port or could not be parsed, use it as is. + hostname = h.clientOpts.TargetName + } + + // Prepare a client start message to send to the S2A handshaker service. + req := &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ClientStart{ + ClientStart: &s2apb.ClientSessionStartReq{ + ApplicationProtocols: []string{appProtocol}, + MinTlsVersion: h.clientOpts.MinTLSVersion, + MaxTlsVersion: h.clientOpts.MaxTLSVersion, + TlsCiphersuites: h.clientOpts.TLSCiphersuites, + TargetIdentities: h.clientOpts.TargetIdentities, + LocalIdentity: h.clientOpts.LocalIdentity, + TargetName: hostname, + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + } + conn, result, err := h.setUpSession(req) + if err != nil { + return nil, nil, err + } + authInfo, err := authinfo.NewS2AAuthInfo(result) + if err != nil { + return nil, nil, err + } + return conn, authInfo, nil +} + +// ServerHandshake performs a server-side TLS handshake using the S2A handshaker +// service. When complete, returns a TLS connection. +func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { + if h.isClient { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake") + } + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + // Prepare a server start message to send to the S2A handshaker service. + req := &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ServerStart{ + ServerStart: &s2apb.ServerSessionStartReq{ + ApplicationProtocols: []string{appProtocol}, + MinTlsVersion: h.serverOpts.MinTLSVersion, + MaxTlsVersion: h.serverOpts.MaxTLSVersion, + TlsCiphersuites: h.serverOpts.TLSCiphersuites, + LocalIdentities: h.serverOpts.LocalIdentities, + InBytes: p[:n], + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + } + conn, result, err := h.setUpSession(req) + if err != nil { + return nil, nil, err + } + authInfo, err := authinfo.NewS2AAuthInfo(result) + if err != nil { + return nil, nil, err + } + return conn, authInfo, nil +} + +// setUpSession proxies messages between the peer and the S2A handshaker +// service. +func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check if the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + // Calculate the extra unread bytes from the Session. Attempting to consume + // more than the bytes sent will throw an error. + var extra []byte + if req.GetServerStart() != nil { + if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { + return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") + } + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + if result.GetLocalIdentity() == nil { + return nil, nil, errors.New("local identity must be populated in session result") + } + + // Create a new TLS record protocol using the Session Result. + newConn, err := record.NewConn(&record.ConnParameters{ + NetConn: h.conn, + Ciphersuite: result.GetState().GetTlsCiphersuite(), + TLSVersion: result.GetState().GetTlsVersion(), + InTrafficSecret: result.GetState().GetInKey(), + OutTrafficSecret: result.GetState().GetOutKey(), + UnusedBuf: extra, + InSequence: result.GetState().GetInSequence(), + OutSequence: result.GetState().GetOutSequence(), + HSAddr: h.hsAddr, + ConnectionID: result.GetState().GetConnectionId(), + LocalIdentity: result.GetLocalIdentity(), + EnsureProcessSessionTickets: h.ensureProcessSessionTickets(), + }) + if err != nil { + return nil, nil, err + } + return newConn, result, nil +} + +func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup { + if h.clientOpts == nil { + return nil + } + return h.clientOpts.EnsureProcessSessionTickets +} + +// accessHandshakerService sends the session request to the S2A handshaker +// service and returns the session response. +func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone continues proxying messages between the peer and the S2A +// handshaker service until the handshaker service returns the SessionResult at +// the end of the handshake or an error occurs. +func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, unusedBytes, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service and nothing is + // received from the peer, then we are stuck. This covers the case when + // the peer is not responding. Note that handshaker service connection + // issues are caught in accessHandshakerService before we even get + // here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, errPeerNotResponding + } + // Append extra bytes from the previous interaction with the handshaker + // service with the current buffer read from conn. + p := append(unusedBytes, buf[:n]...) + // From here on, p and unusedBytes point to the same slice. + resp, err = h.accessHandshakerService(&s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_Next{ + Next: &s2apb.SessionNextReq{ + InBytes: p, + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + }) + if err != nil { + return nil, nil, err + } + + // Cache the local identity returned by S2A, if it is populated. This + // overwrites any existing local identities. This is done because, once the + // S2A has selected a local identity, then only that local identity should + // be asserted in future requests until the end of the current handshake. + if resp.GetLocalIdentity() != nil { + h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()} + } + + // Set unusedBytes based on the handshaker service response. + if resp.GetBytesConsumed() > uint32(len(p)) { + return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") + } + unusedBytes = p[resp.GetBytesConsumed():] + } +} + +// Close shuts down the handshaker and the stream to the S2A handshaker service +// when the handshake is complete. It should be called when the caller obtains +// the secure connection at the end of the handshake. +func (h *s2aHandshaker) Close() error { + return h.stream.CloseSend() +} + +func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism { + if h.tokenManager == nil { + return nil + } + // First handle the special case when no local identities have been provided + // by the application. In this case, an AuthenticationMechanism with no local + // identity will be sent. + if len(h.localIdentities) == 0 { + token, err := h.tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("unable to get token for empty local identity: %v", err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + + // Next, handle the case where the application (or the S2A) has provided + // one or more local identities. + var authMechanisms []*s2apb.AuthenticationMechanism + for _, localIdentity := range h.localIdentities { + token, err := h.tokenManager.Token(localIdentity) + if err != nil { + grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err) + continue + } + + authMechanism := &s2apb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + } + authMechanisms = append(authMechanisms, authMechanism) + } + return authMechanisms +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go new file mode 100644 index 0000000000..49573af887 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go @@ -0,0 +1,99 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service is a utility for calling the S2A handshaker service. +package service + +import ( + "context" + "net" + "os" + "strings" + "sync" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/socket" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" +) + +// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. +const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" + +var ( + // appEngineDialerHook is an AppEngine-specific dial option that is set + // during init time. If nil, then the application is not running on Google + // AppEngine. + appEngineDialerHook func(context.Context) grpc.DialOption + // mu guards hsConnMap and hsDialer. + mu sync.Mutex + // hsConnMap represents a mapping from an S2A handshaker service address + // to a corresponding connection to an S2A handshaker service instance. + hsConnMap = make(map[string]*grpc.ClientConn) + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial +) + +func init() { + if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { + return + } + appEngineDialerHook = func(ctx context.Context) grpc.DialOption { + return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return socket.DialTimeout(ctx, "tcp", addr, timeout) + }) + } +} + +// Dial dials the S2A handshaker service. If a connection has already been +// established, this function returns it. Otherwise, a new connection is +// created. +func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + hsConn, ok := hsConnMap[handshakerServiceAddress] + if !ok { + // Create a new connection to the S2A handshaker service. Note that + // this connection stays open until the application is closed. + grpcOpts := []grpc.DialOption{ + grpc.WithInsecure(), + } + if enableAppEngineDialer() && appEngineDialerHook != nil { + if grpclog.V(1) { + grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") + } + grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) + } + var err error + hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) + if err != nil { + return nil, err + } + hsConnMap[handshakerServiceAddress] = hsConn + } + return hsConn, nil +} + +func enableAppEngineDialer() bool { + if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { + return true + } + return false +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go new file mode 100644 index 0000000000..16278a1d99 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -0,0 +1,389 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/common/common.proto + +package common_go_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The ciphersuites supported by S2A. The name determines the confidentiality, +// and authentication ciphers as well as the hash algorithm used for PRF in +// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are: +// - AEAD -- for encryption and authentication, e.g., AES_128_GCM. +// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256. +type Ciphersuite int32 + +const ( + Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0 + Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1 + Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2 +) + +// Enum value maps for Ciphersuite. +var ( + Ciphersuite_name = map[int32]string{ + 0: "AES_128_GCM_SHA256", + 1: "AES_256_GCM_SHA384", + 2: "CHACHA20_POLY1305_SHA256", + } + Ciphersuite_value = map[string]int32{ + "AES_128_GCM_SHA256": 0, + "AES_256_GCM_SHA384": 1, + "CHACHA20_POLY1305_SHA256": 2, + } +) + +func (x Ciphersuite) Enum() *Ciphersuite { + p := new(Ciphersuite) + *p = x + return p +} + +func (x Ciphersuite) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_common_common_proto_enumTypes[0].Descriptor() +} + +func (Ciphersuite) Type() protoreflect.EnumType { + return &file_internal_proto_common_common_proto_enumTypes[0] +} + +func (x Ciphersuite) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ciphersuite.Descriptor instead. +func (Ciphersuite) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} +} + +// The TLS versions supported by S2A's handshaker module. +type TLSVersion int32 + +const ( + TLSVersion_TLS1_2 TLSVersion = 0 + TLSVersion_TLS1_3 TLSVersion = 1 +) + +// Enum value maps for TLSVersion. +var ( + TLSVersion_name = map[int32]string{ + 0: "TLS1_2", + 1: "TLS1_3", + } + TLSVersion_value = map[string]int32{ + "TLS1_2": 0, + "TLS1_3": 1, + } +) + +func (x TLSVersion) Enum() *TLSVersion { + p := new(TLSVersion) + *p = x + return p +} + +func (x TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_common_common_proto_enumTypes[1].Descriptor() +} + +func (TLSVersion) Type() protoreflect.EnumType { + return &file_internal_proto_common_common_proto_enumTypes[1] +} + +func (x TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSVersion.Descriptor instead. +func (TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1} +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_MdbUsername + // *Identity_GaiaId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetMdbUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { + return x.MdbUsername + } + return "" +} + +func (x *Identity) GetGaiaId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { + return x.GaiaId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_MdbUsername struct { + // The MDB username of a connection endpoint. + MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +} + +type Identity_GaiaId struct { + // The Gaia ID of a connection endpoint. + GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} + +func (*Identity_GaiaId) isIdentity_IdentityOneof() {} + +var File_internal_proto_common_common_proto protoreflect.FileDescriptor + +var file_internal_proto_common_common_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, + 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, + 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, + 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, + 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, + 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, + 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, + 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_common_common_proto_rawDescOnce sync.Once + file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc +) + +func file_internal_proto_common_common_proto_rawDescGZIP() []byte { + file_internal_proto_common_common_proto_rawDescOnce.Do(func() { + file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData) + }) + return file_internal_proto_common_common_proto_rawDescData +} + +var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_common_common_proto_goTypes = []interface{}{ + (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite + (TLSVersion)(0), // 1: s2a.proto.TLSVersion + (*Identity)(nil), // 2: s2a.proto.Identity + nil, // 3: s2a.proto.Identity.AttributesEntry +} +var file_internal_proto_common_common_proto_depIdxs = []int32{ + 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_proto_common_common_proto_init() } +func file_internal_proto_common_common_proto_init() { + if File_internal_proto_common_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_MdbUsername)(nil), + (*Identity_GaiaId)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_common_common_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_common_common_proto_goTypes, + DependencyIndexes: file_internal_proto_common_common_proto_depIdxs, + EnumInfos: file_internal_proto_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_common_common_proto_msgTypes, + }.Build() + File_internal_proto_common_common_proto = out.File + file_internal_proto_common_common_proto_rawDesc = nil + file_internal_proto_common_common_proto_goTypes = nil + file_internal_proto_common_common_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go new file mode 100644 index 0000000000..f4f763ae10 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -0,0 +1,267 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/s2a_context/s2a_context.proto + +package s2a_context_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type S2AContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this connection, e.g., 'grpc'. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The TLS version number that the S2A's handshaker module used to set up the + // session. + TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` + // The TLS ciphersuite negotiated by the S2A's handshaker module. + Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the peer certificate used in the handshake. + PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` + // The SHA256 hash of the local certificate used in the handshake. + LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` + // Set to true if a cached session was reused to resume the handshake. + IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` +} + +func (x *S2AContext) Reset() { + *x = S2AContext{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S2AContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S2AContext) ProtoMessage() {} + +func (x *S2AContext) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. +func (*S2AContext) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} +} + +func (x *S2AContext) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.TlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuite + } + return common_go_proto.Ciphersuite(0) +} + +func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *S2AContext) GetPeerCertFingerprint() []byte { + if x != nil { + return x.PeerCertFingerprint + } + return nil +} + +func (x *S2AContext) GetLocalCertFingerprint() []byte { + if x != nil { + return x.LocalCertFingerprint + } + return nil +} + +func (x *S2AContext) GetIsHandshakeResumed() bool { + if x != nil { + return x.IsHandshakeResumed + } + return false +} + +var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor + +var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03, + 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once + file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc +) + +func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { + file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { + file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData) + }) + return file_internal_proto_s2a_context_s2a_context_proto_rawDescData +} + +var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ + (*S2AContext)(nil), // 0: s2a.proto.S2AContext + (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion + (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite + (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity +} +var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{ + 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion + 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite + 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity + 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_internal_proto_s2a_context_s2a_context_proto_init() } +func file_internal_proto_s2a_context_s2a_context_proto_init() { + if File_internal_proto_s2a_context_s2a_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S2AContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes, + DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs, + MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes, + }.Build() + File_internal_proto_s2a_context_s2a_context_proto = out.File + file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil + file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil + file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go new file mode 100644 index 0000000000..0a86ebee59 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -0,0 +1,1377 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/s2a/s2a.proto + +package s2a_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AuthenticationMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // (Optional) Application may specify an identity associated to an + // authentication mechanism. Otherwise, S2A assumes that the authentication + // mechanism is associated with the default identity. If the default identity + // cannot be determined, session setup fails. + Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Types that are assignable to MechanismOneof: + // + // *AuthenticationMechanism_Token + MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` +} + +func (x *AuthenticationMechanism) Reset() { + *x = AuthenticationMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticationMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticationMechanism) ProtoMessage() {} + +func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. +func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { + if m != nil { + return m.MechanismOneof + } + return nil +} + +func (x *AuthenticationMechanism) GetToken() string { + if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { + return x.Token + } + return "" +} + +type isAuthenticationMechanism_MechanismOneof interface { + isAuthenticationMechanism_MechanismOneof() +} + +type AuthenticationMechanism_Token struct { + // A token that the application uses to authenticate itself to the S2A. + Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` +} + +func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} + +type ClientSessionStartReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the client, e.g., "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // (Optional) The minimum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the minimum version it supports. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` + // (Optional) The maximum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the maximum version it supports. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` + // The TLS ciphersuites that the client is willing to support. + TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, session setup fails. + TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, S2A chooses + // the default local identity. If the default identity cannot be determined, + // session setup fails. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The target name that is used by S2A to configure SNI in the TLS handshake. + // It is also used to perform server authorization check if avaiable. This + // check is intended to verify that the peer authenticated identity is + // authorized to run a service with the target name. + // This field MUST only contain the host portion of the server address. It + // MUST not contain the scheme or the port number. For example, if the server + // address is dns://www.example.com:443, the value of this field should be + // set to www.example.com. + TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` +} + +func (x *ClientSessionStartReq) Reset() { + *x = ClientSessionStartReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientSessionStartReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientSessionStartReq) ProtoMessage() {} + +func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead. +func (*ClientSessionStartReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientSessionStartReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuites + } + return nil +} + +func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity { + if x != nil { + return x.TargetIdentities + } + return nil +} + +func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *ClientSessionStartReq) GetTargetName() string { + if x != nil { + return x.TargetName + } + return "" +} + +type ServerSessionStartReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the server, e.g., "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // (Optional) The minimum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the minimum version it supports. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` + // (Optional) The maximum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the maximum version it supports. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` + // The TLS ciphersuites that the server is willing to support. + TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, S2A chooses the default local identity. If the + // default identity cannot be determined, session setup fails. + LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + // The byte representation of the first handshake message received from the + // client peer. It is possible that this first message is split into multiple + // chunks. In this case, the first chunk is sent using this field and the + // following chunks are sent using the in_bytes field of SessionNextReq + // Specifically, if the client peer is using S2A, this field contains the + // bytes in the out_frames field of SessionResp message that the client peer + // received from its S2A after initiating the handshake. + InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *ServerSessionStartReq) Reset() { + *x = ServerSessionStartReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerSessionStartReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerSessionStartReq) ProtoMessage() {} + +func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead. +func (*ServerSessionStartReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerSessionStartReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuites + } + return nil +} + +func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity { + if x != nil { + return x.LocalIdentities + } + return nil +} + +func (x *ServerSessionStartReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type SessionNextReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of session setup, i.e., handshake messages. + // Specifically: + // - All handshake messages sent from the server to the client. + // - All, except for the first, handshake messages sent from the client to + // the server. Note that the first message is communicated to S2A using the + // in_bytes field of ServerSessionStartReq. + // + // If the peer is using S2A, this field contains the bytes in the out_frames + // field of SessionResp message that the peer received from its S2A. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *SessionNextReq) Reset() { + *x = SessionNextReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionNextReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionNextReq) ProtoMessage() {} + +func (x *SessionNextReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead. +func (*SessionNextReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3} +} + +func (x *SessionNextReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type ResumptionTicketReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of a NewSessionTicket message received from the + // server. + InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // A connection identifier that was created and sent by S2A at the end of a + // handshake. + ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + // The local identity that was used by S2A during session setup and included + // in |SessionResult|. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` +} + +func (x *ResumptionTicketReq) Reset() { + *x = ResumptionTicketReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResumptionTicketReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumptionTicketReq) ProtoMessage() {} + +func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead. +func (*ResumptionTicketReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4} +} + +func (x *ResumptionTicketReq) GetInBytes() [][]byte { + if x != nil { + return x.InBytes + } + return nil +} + +func (x *ResumptionTicketReq) GetConnectionId() uint64 { + if x != nil { + return x.ConnectionId + } + return 0 +} + +func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +type SessionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ReqOneof: + // + // *SessionReq_ClientStart + // *SessionReq_ServerStart + // *SessionReq_Next + // *SessionReq_ResumptionTicket + ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` + // (Optional) The authentication mechanisms that the client wishes to use to + // authenticate to the S2A, ordered by preference. The S2A will always use the + // first authentication mechanism that appears in the list and is supported by + // the S2A. + AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"` +} + +func (x *SessionReq) Reset() { + *x = SessionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReq) ProtoMessage() {} + +func (x *SessionReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. +func (*SessionReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5} +} + +func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *SessionReq) GetClientStart() *ClientSessionStartReq { + if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (x *SessionReq) GetServerStart() *ServerSessionStartReq { + if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (x *SessionReq) GetNext() *SessionNextReq { + if x, ok := x.GetReqOneof().(*SessionReq_Next); ok { + return x.Next + } + return nil +} + +func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq { + if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok { + return x.ResumptionTicket + } + return nil +} + +func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism { + if x != nil { + return x.AuthMechanisms + } + return nil +} + +type isSessionReq_ReqOneof interface { + isSessionReq_ReqOneof() +} + +type SessionReq_ClientStart struct { + // The client session setup request message. + ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type SessionReq_ServerStart struct { + // The server session setup request message. + ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type SessionReq_Next struct { + // The next session setup message request message. + Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +type SessionReq_ResumptionTicket struct { + // The resumption ticket that is received from the server. This message is + // only accepted by S2A if it is running as a client and if it is received + // after session setup is complete. If S2A is running as a server and it + // receives this message, the session is terminated. + ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"` +} + +func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {} + +func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {} + +func (*SessionReq_Next) isSessionReq_ReqOneof() {} + +func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {} + +type SessionState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TLS version number that the S2A's handshaker module used to set up the + // session. + TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` + // The TLS ciphersuite negotiated by the S2A's handshaker module. + TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"` + // The sequence number of the next, incoming, TLS record. + InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"` + // The sequence number of the next, outgoing, TLS record. + OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"` + // The key for the inbound direction. + InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"` + // The key for the outbound direction. + OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"` + // The constant part of the record nonce for the outbound direction. + InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"` + // The constant part of the record nonce for the inbound direction. + OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"` + // A connection identifier that can be provided to S2A to perform operations + // related to this connection. This identifier will be stored by the record + // protocol, and included in the |ResumptionTicketReq| message that is later + // sent back to S2A. This field is set only for client-side connections. + ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + // Set to true if a cached session was reused to do an abbreviated handshake. + IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` +} + +func (x *SessionState) Reset() { + *x = SessionState{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionState) ProtoMessage() {} + +func (x *SessionState) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionState.ProtoReflect.Descriptor instead. +func (*SessionState) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6} +} + +func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.TlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuite + } + return common_go_proto.Ciphersuite(0) +} + +func (x *SessionState) GetInSequence() uint64 { + if x != nil { + return x.InSequence + } + return 0 +} + +func (x *SessionState) GetOutSequence() uint64 { + if x != nil { + return x.OutSequence + } + return 0 +} + +func (x *SessionState) GetInKey() []byte { + if x != nil { + return x.InKey + } + return nil +} + +func (x *SessionState) GetOutKey() []byte { + if x != nil { + return x.OutKey + } + return nil +} + +func (x *SessionState) GetInFixedNonce() []byte { + if x != nil { + return x.InFixedNonce + } + return nil +} + +func (x *SessionState) GetOutFixedNonce() []byte { + if x != nil { + return x.OutFixedNonce + } + return nil +} + +func (x *SessionState) GetConnectionId() uint64 { + if x != nil { + return x.ConnectionId + } + return 0 +} + +func (x *SessionState) GetIsHandshakeResumed() bool { + if x != nil { + return x.IsHandshakeResumed + } + return false +} + +type SessionResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this session. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The session state at the end. This state contains all cryptographic + // material required to initialize the record protocol object. + State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the local certificate used in the handshake. + LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` + // The SHA256 hash of the peer certificate used in the handshake. + PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` +} + +func (x *SessionResult) Reset() { + *x = SessionResult{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResult) ProtoMessage() {} + +func (x *SessionResult) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead. +func (*SessionResult) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7} +} + +func (x *SessionResult) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *SessionResult) GetState() *SessionState { + if x != nil { + return x.State + } + return nil +} + +func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionResult) GetLocalCertFingerprint() []byte { + if x != nil { + return x.LocalCertFingerprint + } + return nil +} + +func (x *SessionResult) GetPeerCertFingerprint() []byte { + if x != nil { + return x.PeerCertFingerprint + } + return nil +} + +type SessionStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code that is specific to the application and the implementation + // of S2A, e.g., gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *SessionStatus) Reset() { + *x = SessionStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionStatus) ProtoMessage() {} + +func (x *SessionStatus) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead. +func (*SessionStatus) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8} +} + +func (x *SessionStatus) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *SessionStatus) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type SessionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + // + // If the SessionResult is populated, then this must coincide with the local + // identity specified in the SessionResult; otherwise, the handshake must + // fail. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The byte representation of the frames that should be sent to the peer. May + // be empty if nothing needs to be sent to the peer or if in_bytes in the + // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent + // to the peer even if the session setup status is not OK as these frames may + // contain appropriate alerts. + OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes field that are consumed by S2A. It is + // possible that part of in_bytes is unrelated to the session setup process. + BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set if the session is successfully set up. out_frames may + // still be set to frames that needs to be forwarded to the peer. + Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` + // Status of session setup at the current stage. + Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *SessionResp) Reset() { + *x = SessionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResp) ProtoMessage() {} + +func (x *SessionResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. +func (*SessionResp) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9} +} + +func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionResp) GetOutFrames() []byte { + if x != nil { + return x.OutFrames + } + return nil +} + +func (x *SessionResp) GetBytesConsumed() uint32 { + if x != nil { + return x.BytesConsumed + } + return 0 +} + +func (x *SessionResp) GetResult() *SessionResult { + if x != nil { + return x.Result + } + return nil +} + +func (x *SessionResp) GetStatus() *SessionStatus { + if x != nil { + return x.Status + } + return nil +} + +var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor + +var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a, + 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, + 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, + 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, + 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, + 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04, + 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, + 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, + 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f, + 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d, + 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, + 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, + 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f, + 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, + 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, + 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, + 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75, + 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, + 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, + 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, + 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, + 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a, + 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, + 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, + 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30, + 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once + file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc +) + +func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { + file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() { + file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData) + }) + return file_internal_proto_s2a_s2a_proto_rawDescData +} + +var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ + (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism + (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq + (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq + (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq + (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq + (*SessionReq)(nil), // 5: s2a.proto.SessionReq + (*SessionState)(nil), // 6: s2a.proto.SessionState + (*SessionResult)(nil), // 7: s2a.proto.SessionResult + (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus + (*SessionResp)(nil), // 9: s2a.proto.SessionResp + (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity + (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion + (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite +} +var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{ + 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion + 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion + 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite + 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity + 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity + 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion + 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion + 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite + 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity + 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity + 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq + 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq + 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq + 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq + 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism + 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion + 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite + 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState + 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity + 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity + 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity + 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult + 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus + 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq + 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp + 25, // [25:26] is the sub-list for method output_type + 24, // [24:25] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_internal_proto_s2a_s2a_proto_init() } +func file_internal_proto_s2a_s2a_proto_init() { + if File_internal_proto_s2a_s2a_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticationMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientSessionStartReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerSessionStartReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionNextReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResumptionTicketReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AuthenticationMechanism_Token)(nil), + } + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*SessionReq_ClientStart)(nil), + (*SessionReq_ServerStart)(nil), + (*SessionReq_Next)(nil), + (*SessionReq_ResumptionTicket)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_proto_s2a_s2a_proto_goTypes, + DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs, + MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes, + }.Build() + File_internal_proto_s2a_s2a_proto = out.File + file_internal_proto_s2a_s2a_proto_rawDesc = nil + file_internal_proto_s2a_s2a_proto_goTypes = nil + file_internal_proto_s2a_s2a_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go new file mode 100644 index 0000000000..0fa582fc87 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -0,0 +1,173 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: internal/proto/s2a/s2a.proto + +package s2a_go_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" +) + +// S2AServiceClient is the client API for S2AService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type S2AServiceClient interface { + // S2A service accepts a stream of session setup requests and returns a stream + // of session setup responses. The client of this service is expected to send + // exactly one client_start or server_start message followed by at least one + // next message. Applications running TLS clients can send requests with + // resumption_ticket messages only after the session is successfully set up. + // + // Every time S2A client sends a request, this service sends a response. + // However, clients do not have to wait for service response before sending + // the next request. + SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) +} + +type s2AServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { + return &s2AServiceClient{cc} +} + +func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &s2AServiceSetUpSessionClient{stream} + return x, nil +} + +type S2AService_SetUpSessionClient interface { + Send(*SessionReq) error + Recv() (*SessionResp, error) + grpc.ClientStream +} + +type s2AServiceSetUpSessionClient struct { + grpc.ClientStream +} + +func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { + m := new(SessionResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AServiceServer is the server API for S2AService service. +// All implementations must embed UnimplementedS2AServiceServer +// for forward compatibility +type S2AServiceServer interface { + // S2A service accepts a stream of session setup requests and returns a stream + // of session setup responses. The client of this service is expected to send + // exactly one client_start or server_start message followed by at least one + // next message. Applications running TLS clients can send requests with + // resumption_ticket messages only after the session is successfully set up. + // + // Every time S2A client sends a request, this service sends a response. + // However, clients do not have to wait for service response before sending + // the next request. + SetUpSession(S2AService_SetUpSessionServer) error + mustEmbedUnimplementedS2AServiceServer() +} + +// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. +type UnimplementedS2AServiceServer struct { +} + +func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { + return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") +} +func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} + +// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to S2AServiceServer will +// result in compilation errors. +type UnsafeS2AServiceServer interface { + mustEmbedUnimplementedS2AServiceServer() +} + +func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { + s.RegisterService(&S2AService_ServiceDesc, srv) +} + +func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) +} + +type S2AService_SetUpSessionServer interface { + Send(*SessionResp) error + Recv() (*SessionReq, error) + grpc.ServerStream +} + +type s2AServiceSetUpSessionServer struct { + grpc.ServerStream +} + +func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { + m := new(SessionReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var S2AService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "s2a.proto.S2AService", + HandlerType: (*S2AServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SetUpSession", + Handler: _S2AService_SetUpSession_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/proto/s2a/s2a.proto", +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go new file mode 100644 index 0000000000..c84bed9774 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -0,0 +1,367 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/common/common.proto + +package common_go_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using +// S2A. +type Ciphersuite int32 + +const ( + Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6 +) + +// Enum value maps for Ciphersuite. +var ( + Ciphersuite_name = map[int32]string{ + 0: "CIPHERSUITE_UNSPECIFIED", + 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + } + Ciphersuite_value = map[string]int32{ + "CIPHERSUITE_UNSPECIFIED": 0, + "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1, + "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2, + "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3, + "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4, + "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5, + "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6, + } +) + +func (x Ciphersuite) Enum() *Ciphersuite { + p := new(Ciphersuite) + *p = x + return p +} + +func (x Ciphersuite) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor() +} + +func (Ciphersuite) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[0] +} + +func (x Ciphersuite) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ciphersuite.Descriptor instead. +func (Ciphersuite) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +// The TLS versions supported by S2A's handshaker module. +type TLSVersion int32 + +const ( + TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0 + TLSVersion_TLS_VERSION_1_0 TLSVersion = 1 + TLSVersion_TLS_VERSION_1_1 TLSVersion = 2 + TLSVersion_TLS_VERSION_1_2 TLSVersion = 3 + TLSVersion_TLS_VERSION_1_3 TLSVersion = 4 +) + +// Enum value maps for TLSVersion. +var ( + TLSVersion_name = map[int32]string{ + 0: "TLS_VERSION_UNSPECIFIED", + 1: "TLS_VERSION_1_0", + 2: "TLS_VERSION_1_1", + 3: "TLS_VERSION_1_2", + 4: "TLS_VERSION_1_3", + } + TLSVersion_value = map[string]int32{ + "TLS_VERSION_UNSPECIFIED": 0, + "TLS_VERSION_1_0": 1, + "TLS_VERSION_1_1": 2, + "TLS_VERSION_1_2": 3, + "TLS_VERSION_1_3": 4, + } +) + +func (x TLSVersion) Enum() *TLSVersion { + p := new(TLSVersion) + *p = x + return p +} + +func (x TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor() +} + +func (TLSVersion) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[1] +} + +func (x TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSVersion.Descriptor instead. +func (TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1} +} + +// The side in the TLS connection. +type ConnectionSide int32 + +const ( + ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0 + ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1 + ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2 +) + +// Enum value maps for ConnectionSide. +var ( + ConnectionSide_name = map[int32]string{ + 0: "CONNECTION_SIDE_UNSPECIFIED", + 1: "CONNECTION_SIDE_CLIENT", + 2: "CONNECTION_SIDE_SERVER", + } + ConnectionSide_value = map[string]int32{ + "CONNECTION_SIDE_UNSPECIFIED": 0, + "CONNECTION_SIDE_CLIENT": 1, + "CONNECTION_SIDE_SERVER": 2, + } +) + +func (x ConnectionSide) Enum() *ConnectionSide { + p := new(ConnectionSide) + *p = x + return p +} + +func (x ConnectionSide) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor() +} + +func (ConnectionSide) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[2] +} + +func (x ConnectionSide) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnectionSide.Descriptor instead. +func (ConnectionSide) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2} +} + +// The ALPN protocols that the application can negotiate during a TLS handshake. +type AlpnProtocol int32 + +const ( + AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0 + AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1 + AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2 + AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3 +) + +// Enum value maps for AlpnProtocol. +var ( + AlpnProtocol_name = map[int32]string{ + 0: "ALPN_PROTOCOL_UNSPECIFIED", + 1: "ALPN_PROTOCOL_GRPC", + 2: "ALPN_PROTOCOL_HTTP2", + 3: "ALPN_PROTOCOL_HTTP1_1", + } + AlpnProtocol_value = map[string]int32{ + "ALPN_PROTOCOL_UNSPECIFIED": 0, + "ALPN_PROTOCOL_GRPC": 1, + "ALPN_PROTOCOL_HTTP2": 2, + "ALPN_PROTOCOL_HTTP1_1": 3, + } +) + +func (x AlpnProtocol) Enum() *AlpnProtocol { + p := new(AlpnProtocol) + *p = x + return p +} + +func (x AlpnProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor() +} + +func (AlpnProtocol) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[3] +} + +func (x AlpnProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlpnProtocol.Descriptor instead. +func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} +} + +var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, + 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, + 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, + 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, + 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, + 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, + 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, + 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, + 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, + 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, + 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, + 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, + 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, + 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, + 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, + 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, + 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, + 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, + 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once + file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc +) + +func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { + file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData) + }) + return file_internal_proto_v2_common_common_proto_rawDescData +} + +var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ + (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite + (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion + (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide + (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol +} +var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_common_common_proto_init() } +func file_internal_proto_v2_common_common_proto_init() { + if File_internal_proto_v2_common_common_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, + NumEnums: 4, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_v2_common_common_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, + EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + }.Build() + File_internal_proto_v2_common_common_proto = out.File + file_internal_proto_v2_common_common_proto_rawDesc = nil + file_internal_proto_v2_common_common_proto_goTypes = nil + file_internal_proto_v2_common_common_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go new file mode 100644 index 0000000000..b7fd871c7a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -0,0 +1,248 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/s2a_context/s2a_context.proto + +package s2a_context_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type S2AContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The SPIFFE ID from the peer leaf certificate, if present. + // + // This field is only populated if the leaf certificate is a valid SPIFFE + // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid + // SPIFFE ID. + LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"` + // The URIs that are present in the SubjectAltName extension of the peer leaf + // certificate. + // + // Note that the extracted URIs are not validated and may not be properly + // formatted. + LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"` + // The DNSNames that are present in the SubjectAltName extension of the peer + // leaf certificate. + LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"` + // The (ordered) list of fingerprints in the certificate chain used to verify + // the given leaf certificate. The order MUST be from leaf certificate + // fingerprint to root certificate fingerprint. + // + // A fingerprint is the base-64 encoding of the SHA256 hash of the + // DER-encoding of a certificate. The list MAY be populated even if the peer + // certificate chain was NOT validated successfully. + PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` + // The local identity used during session setup. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the DER-encoding of the local leaf certificate used in + // the handshake. + LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` +} + +func (x *S2AContext) Reset() { + *x = S2AContext{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S2AContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S2AContext) ProtoMessage() {} + +func (x *S2AContext) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. +func (*S2AContext) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} +} + +func (x *S2AContext) GetLeafCertSpiffeId() string { + if x != nil { + return x.LeafCertSpiffeId + } + return "" +} + +func (x *S2AContext) GetLeafCertUris() []string { + if x != nil { + return x.LeafCertUris + } + return nil +} + +func (x *S2AContext) GetLeafCertDnsnames() []string { + if x != nil { + return x.LeafCertDnsnames + } + return nil +} + +func (x *S2AContext) GetPeerCertificateChainFingerprints() []string { + if x != nil { + return x.PeerCertificateChainFingerprints + } + return nil +} + +func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *S2AContext) GetLocalLeafCertFingerprint() []byte { + if x != nil { + return x.LocalLeafCertFingerprint + } + return nil +} + +var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, + 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, + 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, + 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, + 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc +) + +func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData) + }) + return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData +} + +var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ + (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext + (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity +} +var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() } +func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { + if File_internal_proto_v2_s2a_context_s2a_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S2AContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs, + MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes, + }.Build() + File_internal_proto_v2_s2a_context_s2a_context_proto = out.File + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil + file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil + file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go new file mode 100644 index 0000000000..e843450c7e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -0,0 +1,2494 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/s2a/s2a.proto + +package s2a_go_proto + +import ( + common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" + s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SignatureAlgorithm int32 + +const ( + SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0 + // RSA Public-Key Cryptography Standards #1. + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3 + // ECDSA. + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4 + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5 + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6 + // RSA Probabilistic Signature Scheme. + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9 + // ED25519. + SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10 +) + +// Enum value maps for SignatureAlgorithm. +var ( + SignatureAlgorithm_name = map[int32]string{ + 0: "S2A_SSL_SIGN_UNSPECIFIED", + 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256", + 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384", + 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512", + 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256", + 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384", + 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512", + 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256", + 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384", + 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512", + 10: "S2A_SSL_SIGN_ED25519", + } + SignatureAlgorithm_value = map[string]int32{ + "S2A_SSL_SIGN_UNSPECIFIED": 0, + "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1, + "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2, + "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3, + "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4, + "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5, + "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9, + "S2A_SSL_SIGN_ED25519": 10, + } +) + +func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { + p := new(SignatureAlgorithm) + *p = x + return p +} + +func (x SignatureAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor() +} + +func (SignatureAlgorithm) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0] +} + +func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SignatureAlgorithm.Descriptor instead. +func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32 + +const ( + GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0 + GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5 +) + +// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate. +var ( + GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "DONT_REQUEST_CLIENT_CERTIFICATE", + 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", + 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY", + 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", + 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY", + } + GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{ + "UNSPECIFIED": 0, + "DONT_REQUEST_CLIENT_CERTIFICATE": 1, + "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2, + "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3, + "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4, + "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5, + } +) + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { + p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) + *p = x + return p +} + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor() +} + +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1] +} + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead. +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0} +} + +type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32 + +const ( + OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0 + // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of + // the TLS handshake must be signed to prove possession of the private key. + // + // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3. + OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1 + // When performing a TLS 1.2 handshake using an RSA algorithm, the key + // exchange algorithm involves the client generating a premaster secret, + // encrypting it using the server's public key, and sending this encrypted + // blob to the server in a ClientKeyExchange message. + // + // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1. + OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2 +) + +// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation. +var ( + OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SIGN", + 2: "DECRYPT", + } + OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{ + "UNSPECIFIED": 0, + "SIGN": 1, + "DECRYPT": 2, + } +) + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation { + p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation) + *p = x + return p +} + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor() +} + +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2] +} + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead. +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0} +} + +type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32 + +const ( + OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0 + OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1 + OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2 +) + +// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation. +var ( + OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "ENCRYPT", + 2: "DECRYPT", + } + OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{ + "UNSPECIFIED": 0, + "ENCRYPT": 1, + "DECRYPT": 2, + } +) + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation { + p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation) + *p = x + return p +} + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor() +} + +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3] +} + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead. +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0} +} + +type ValidatePeerCertificateChainReq_VerificationMode int32 + +const ( + // The default verification mode supported by S2A. + ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0 + // The SPIFFE verification mode selects the set of trusted certificates to + // use for path building based on the SPIFFE trust domain in the peer's leaf + // certificate. + ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1 + // The connect-to-Google verification mode uses the trust bundle for + // connecting to Google, e.g. *.mtls.googleapis.com endpoints. + ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 +) + +// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. +var ( + ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SPIFFE", + 2: "CONNECT_TO_GOOGLE", + } + ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + } +) + +func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode { + p := new(ValidatePeerCertificateChainReq_VerificationMode) + *p = x + return p +} + +func (x ValidatePeerCertificateChainReq_VerificationMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor() +} + +func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4] +} + +func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead. +func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} +} + +type ValidatePeerCertificateChainResp_ValidationResult int32 + +const ( + ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0 + ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1 + ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2 +) + +// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult. +var ( + ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SUCCESS", + 2: "FAILURE", + } + ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{ + "UNSPECIFIED": 0, + "SUCCESS": 1, + "FAILURE": 2, + } +) + +func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult { + p := new(ValidatePeerCertificateChainResp_ValidationResult) + *p = x + return p +} + +func (x ValidatePeerCertificateChainResp_ValidationResult) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor() +} + +func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5] +} + +func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead. +func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0} +} + +type AlpnPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If true, the application MUST perform ALPN negotiation. + EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"` + // The ordered list of ALPN protocols that specify how the application SHOULD + // negotiate ALPN during the TLS handshake. + // + // The application MAY ignore any ALPN protocols in this list that are not + // supported by the application. + AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"` +} + +func (x *AlpnPolicy) Reset() { + *x = AlpnPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlpnPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlpnPolicy) ProtoMessage() {} + +func (x *AlpnPolicy) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead. +func (*AlpnPolicy) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool { + if x != nil { + return x.EnableAlpnNegotiation + } + return false +} + +func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol { + if x != nil { + return x.AlpnProtocols + } + return nil +} + +type AuthenticationMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Applications may specify an identity associated to an authentication + // mechanism. Otherwise, S2A assumes that the authentication mechanism is + // associated with the default identity. If the default identity cannot be + // determined, the request is rejected. + Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Types that are assignable to MechanismOneof: + // + // *AuthenticationMechanism_Token + MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` +} + +func (x *AuthenticationMechanism) Reset() { + *x = AuthenticationMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticationMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticationMechanism) ProtoMessage() {} + +func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. +func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} +} + +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { + if m != nil { + return m.MechanismOneof + } + return nil +} + +func (x *AuthenticationMechanism) GetToken() string { + if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { + return x.Token + } + return "" +} + +type isAuthenticationMechanism_MechanismOneof interface { + isAuthenticationMechanism_MechanismOneof() +} + +type AuthenticationMechanism_Token struct { + // A token that the application uses to authenticate itself to S2A. + Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` +} + +func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} + +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code that is specific to the application and the implementation + // of S2A, e.g., gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2} +} + +func (x *Status) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type GetTlsConfigurationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The role of the application in the TLS connection. + ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"` + // The server name indication (SNI) extension, which MAY be populated when a + // server is offloading to S2A. The SNI is used to determine the server + // identity if the local identity in the request is empty. + Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` +} + +func (x *GetTlsConfigurationReq) Reset() { + *x = GetTlsConfigurationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationReq) ProtoMessage() {} + +func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3} +} + +func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide { + if x != nil { + return x.ConnectionSide + } + return common_go_proto.ConnectionSide(0) +} + +func (x *GetTlsConfigurationReq) GetSni() string { + if x != nil { + return x.Sni + } + return "" +} + +type GetTlsConfigurationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TlsConfiguration: + // + // *GetTlsConfigurationResp_ClientTlsConfiguration_ + // *GetTlsConfigurationResp_ServerTlsConfiguration_ + TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"` +} + +func (x *GetTlsConfigurationResp) Reset() { + *x = GetTlsConfigurationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp) ProtoMessage() {} + +func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4} +} + +func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration { + if m != nil { + return m.TlsConfiguration + } + return nil +} + +func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration { + if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok { + return x.ClientTlsConfiguration + } + return nil +} + +func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration { + if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok { + return x.ServerTlsConfiguration + } + return nil +} + +type isGetTlsConfigurationResp_TlsConfiguration interface { + isGetTlsConfigurationResp_TlsConfiguration() +} + +type GetTlsConfigurationResp_ClientTlsConfiguration_ struct { + ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"` +} + +type GetTlsConfigurationResp_ServerTlsConfiguration_ struct { + ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"` +} + +func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { +} + +func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { +} + +type OffloadPrivateKeyOperationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation the private key is used for. + Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"` + // The signature algorithm to be used for signing operations. + SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"` + // The input bytes to be signed or decrypted. + // + // Types that are assignable to InBytes: + // + // *OffloadPrivateKeyOperationReq_RawBytes + // *OffloadPrivateKeyOperationReq_Sha256Digest + // *OffloadPrivateKeyOperationReq_Sha384Digest + // *OffloadPrivateKeyOperationReq_Sha512Digest + InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"` +} + +func (x *OffloadPrivateKeyOperationReq) Reset() { + *x = OffloadPrivateKeyOperationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadPrivateKeyOperationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadPrivateKeyOperationReq) ProtoMessage() {} + +func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead. +func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5} +} + +func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation { + if x != nil { + return x.Operation + } + return OffloadPrivateKeyOperationReq_UNSPECIFIED +} + +func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm { + if x != nil { + return x.SignatureAlgorithm + } + return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED +} + +func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes { + if m != nil { + return m.InBytes + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok { + return x.RawBytes + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok { + return x.Sha256Digest + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok { + return x.Sha384Digest + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok { + return x.Sha512Digest + } + return nil +} + +type isOffloadPrivateKeyOperationReq_InBytes interface { + isOffloadPrivateKeyOperationReq_InBytes() +} + +type OffloadPrivateKeyOperationReq_RawBytes struct { + // Raw bytes to be hashed and signed, or decrypted. + RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha256Digest struct { + // A SHA256 hash to be signed. Must be 32 bytes. + Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha384Digest struct { + // A SHA384 hash to be signed. Must be 48 bytes. + Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha512Digest struct { + // A SHA512 hash to be signed. Must be 64 bytes. + Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"` +} + +func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +type OffloadPrivateKeyOperationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The signed or decrypted output bytes. + OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` +} + +func (x *OffloadPrivateKeyOperationResp) Reset() { + *x = OffloadPrivateKeyOperationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadPrivateKeyOperationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadPrivateKeyOperationResp) ProtoMessage() {} + +func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead. +func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6} +} + +func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte { + if x != nil { + return x.OutBytes + } + return nil +} + +type OffloadResumptionKeyOperationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation the resumption key is used for. + Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"` + // The bytes to be encrypted or decrypted. + InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *OffloadResumptionKeyOperationReq) Reset() { + *x = OffloadResumptionKeyOperationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadResumptionKeyOperationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadResumptionKeyOperationReq) ProtoMessage() {} + +func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead. +func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7} +} + +func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation { + if x != nil { + return x.Operation + } + return OffloadResumptionKeyOperationReq_UNSPECIFIED +} + +func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type OffloadResumptionKeyOperationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encrypted or decrypted bytes. + OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` +} + +func (x *OffloadResumptionKeyOperationResp) Reset() { + *x = OffloadResumptionKeyOperationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadResumptionKeyOperationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadResumptionKeyOperationResp) ProtoMessage() {} + +func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead. +func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8} +} + +func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte { + if x != nil { + return x.OutBytes + } + return nil +} + +type ValidatePeerCertificateChainReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The verification mode that S2A MUST use to validate the peer certificate + // chain. + Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"` + // Types that are assignable to PeerOneof: + // + // *ValidatePeerCertificateChainReq_ClientPeer_ + // *ValidatePeerCertificateChainReq_ServerPeer_ + PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"` +} + +func (x *ValidatePeerCertificateChainReq) Reset() { + *x = ValidatePeerCertificateChainReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9} +} + +func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode { + if x != nil { + return x.Mode + } + return ValidatePeerCertificateChainReq_UNSPECIFIED +} + +func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof { + if m != nil { + return m.PeerOneof + } + return nil +} + +func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer { + if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok { + return x.ClientPeer + } + return nil +} + +func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer { + if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok { + return x.ServerPeer + } + return nil +} + +type isValidatePeerCertificateChainReq_PeerOneof interface { + isValidatePeerCertificateChainReq_PeerOneof() +} + +type ValidatePeerCertificateChainReq_ClientPeer_ struct { + ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"` +} + +type ValidatePeerCertificateChainReq_ServerPeer_ struct { + ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"` +} + +func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} + +func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} + +type ValidatePeerCertificateChainResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The result of validating the peer certificate chain. + ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"` + // The validation details. This field is only populated when the validation + // result is NOT SUCCESS. + ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"` + // The S2A context contains information from the peer certificate chain. + // + // The S2A context MAY be populated even if validation of the peer certificate + // chain fails. + Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` +} + +func (x *ValidatePeerCertificateChainResp) Reset() { + *x = ValidatePeerCertificateChainResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainResp) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10} +} + +func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult { + if x != nil { + return x.ValidationResult + } + return ValidatePeerCertificateChainResp_UNSPECIFIED +} + +func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string { + if x != nil { + return x.ValidationDetails + } + return "" +} + +func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext { + if x != nil { + return x.Context + } + return nil +} + +type SessionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identity corresponding to the TLS configurations that MUST be used for + // the TLS handshake. + // + // If a managed identity already exists, the local identity and authentication + // mechanisms are ignored. If a managed identity doesn't exist and the local + // identity is not populated, S2A will try to deduce the managed identity to + // use from the SNI extension. If that also fails, S2A uses the default + // identity (if one exists). + LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The authentication mechanisms that the application wishes to use to + // authenticate to S2A, ordered by preference. S2A will always use the first + // authentication mechanism that matches the managed identity. + AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"` + // Types that are assignable to ReqOneof: + // + // *SessionReq_GetTlsConfigurationReq + // *SessionReq_OffloadPrivateKeyOperationReq + // *SessionReq_OffloadResumptionKeyOperationReq + // *SessionReq_ValidatePeerCertificateChainReq + ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` +} + +func (x *SessionReq) Reset() { + *x = SessionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReq) ProtoMessage() {} + +func (x *SessionReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. +func (*SessionReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} +} + +func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism { + if x != nil { + return x.AuthenticationMechanisms + } + return nil +} + +func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq { + if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok { + return x.GetTlsConfigurationReq + } + return nil +} + +func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq { + if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok { + return x.OffloadPrivateKeyOperationReq + } + return nil +} + +func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq { + if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok { + return x.OffloadResumptionKeyOperationReq + } + return nil +} + +func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq { + if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok { + return x.ValidatePeerCertificateChainReq + } + return nil +} + +type isSessionReq_ReqOneof interface { + isSessionReq_ReqOneof() +} + +type SessionReq_GetTlsConfigurationReq struct { + // Requests the certificate chain and TLS configuration corresponding to the + // local identity, which the application MUST use to negotiate the TLS + // handshake. + GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"` +} + +type SessionReq_OffloadPrivateKeyOperationReq struct { + // Signs or decrypts the input bytes using a private key corresponding to + // the local identity in the request. + // + // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the + // S2Av2 by a server during a TLS 1.2 handshake. + OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"` +} + +type SessionReq_OffloadResumptionKeyOperationReq struct { + // Encrypts or decrypts the input bytes using a resumption key corresponding + // to the local identity in the request. + OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"` +} + +type SessionReq_ValidatePeerCertificateChainReq struct { + // Verifies the peer's certificate chain using + // (a) trust bundles corresponding to the local identity in the request, and + // (b) the verification mode in the request. + ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"` +} + +func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {} + +type SessionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Status of the session response. + // + // The status field is populated so that if an error occurs when making an + // individual request, then communication with the S2A may continue. If an + // error is returned directly (e.g. at the gRPC layer), then it may result + // that the bidirectional stream being closed. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // Types that are assignable to RespOneof: + // + // *SessionResp_GetTlsConfigurationResp + // *SessionResp_OffloadPrivateKeyOperationResp + // *SessionResp_OffloadResumptionKeyOperationResp + // *SessionResp_ValidatePeerCertificateChainResp + RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"` +} + +func (x *SessionResp) Reset() { + *x = SessionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResp) ProtoMessage() {} + +func (x *SessionResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. +func (*SessionResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12} +} + +func (x *SessionResp) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof { + if m != nil { + return m.RespOneof + } + return nil +} + +func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp { + if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok { + return x.GetTlsConfigurationResp + } + return nil +} + +func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp { + if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok { + return x.OffloadPrivateKeyOperationResp + } + return nil +} + +func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp { + if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok { + return x.OffloadResumptionKeyOperationResp + } + return nil +} + +func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp { + if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok { + return x.ValidatePeerCertificateChainResp + } + return nil +} + +type isSessionResp_RespOneof interface { + isSessionResp_RespOneof() +} + +type SessionResp_GetTlsConfigurationResp struct { + // Contains the certificate chain and TLS configurations corresponding to + // the local identity. + GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"` +} + +type SessionResp_OffloadPrivateKeyOperationResp struct { + // Contains the signed or encrypted output bytes using the private key + // corresponding to the local identity. + OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"` +} + +type SessionResp_OffloadResumptionKeyOperationResp struct { + // Contains the encrypted or decrypted output bytes using the resumption key + // corresponding to the local identity. + OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"` +} + +type SessionResp_ValidatePeerCertificateChainResp struct { + // Contains the validation result, peer identity and fingerprints of peer + // certificates. + ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"` +} + +func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {} + +// Next ID: 8 +type GetTlsConfigurationResp_ClientTlsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain that the client MUST use for the TLS handshake. + // It's a list of PEM-encoded certificates, ordered from leaf to root, + // excluding the root. + CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The minimum TLS version number that the client MUST use for the TLS + // handshake. If this field is not provided, the client MUST use the default + // minimum version of the client's TLS library. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` + // The maximum TLS version number that the client MUST use for the TLS + // handshake. If this field is not provided, the client MUST use the default + // maximum version of the client's TLS library. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` + // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to + // negotiate in the TLS handshake. + Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` + // The policy that dictates how the client negotiates ALPN during the TLS + // handshake. + AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() { + *x = GetTlsConfigurationResp_ClientTlsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuites + } + return nil +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { + if x != nil { + return x.AlpnPolicy + } + return nil +} + +// Next ID: 12 +type GetTlsConfigurationResp_ServerTlsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain that the server MUST use for the TLS handshake. + // It's a list of PEM-encoded certificates, ordered from leaf to root, + // excluding the root. + CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The minimum TLS version number that the server MUST use for the TLS + // handshake. If this field is not provided, the server MUST use the default + // minimum version of the server's TLS library. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` + // The maximum TLS version number that the server MUST use for the TLS + // handshake. If this field is not provided, the server MUST use the default + // maximum version of the server's TLS library. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` + // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to + // negotiate in the TLS handshake. + Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` + // Whether to enable TLS resumption. + TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"` + // Whether the server MUST request a client certificate (i.e. to negotiate + // TLS vs. mTLS). + RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"` + // Returns the maximum number of extra bytes that + // |OffloadResumptionKeyOperation| can add to the number of unencrypted + // bytes to form the encrypted bytes. + MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"` + // The policy that dictates how the server negotiates ALPN during the TLS + // handshake. + AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() { + *x = GetTlsConfigurationResp_ServerTlsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuites + } + return nil +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool { + if x != nil { + return x.TlsResumptionEnabled + } + return false +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { + if x != nil { + return x.RequestClientCertificate + } + return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 { + if x != nil { + return x.MaxOverheadOfTicketAead + } + return 0 +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { + if x != nil { + return x.AlpnPolicy + } + return nil +} + +type ValidatePeerCertificateChainReq_ClientPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain to be verified. The chain MUST be a list of + // DER-encoded certificates, ordered from leaf to root, excluding the root. + CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() { + *x = ValidatePeerCertificateChainReq_ClientPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte { + if x != nil { + return x.CertificateChain + } + return nil +} + +type ValidatePeerCertificateChainReq_ServerPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain to be verified. The chain MUST be a list of + // DER-encoded certificates, ordered from leaf to root, excluding the root. + CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The expected hostname of the server. + ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"` + // The UnrestrictedClientPolicy specified by the user. + SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"` +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() { + *x = ValidatePeerCertificateChainReq_ServerPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string { + if x != nil { + return x.ServerHostname + } + return "" +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte { + if x != nil { + return x.SerializedUnrestrictedClientPolicy + } + return nil +} + +var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, + 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, + 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, + 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, + 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, + 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, + 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, + 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, + 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, + 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, + 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, + 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, + 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, + 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, + 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, + 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, + 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, + 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, + 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, + 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, + 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, + 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, + 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, + 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, + 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, + 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, + 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, + 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, + 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, + 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, + 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, + 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, + 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, + 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, + 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, + 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, + 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, + 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, + 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, + 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, + 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, + 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, + 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, + 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, + 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, + 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, + 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, + 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, + 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, + 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, + 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once + file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc +) + +func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { + file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData) + }) + return file_internal_proto_v2_s2a_s2a_proto_rawDescData +} + +var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ + (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm + (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate + (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation + (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation + (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode + (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult + (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy + (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism + (*Status)(nil), // 8: s2a.proto.v2.Status + (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq + (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp + (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq + (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp + (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq + (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp + (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq + (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp + (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq + (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp + (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration + (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration + (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer + (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer + (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol + (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide + (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext + (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion + (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite +} +var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ + 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide + 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration + 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration + 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation + 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm + 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation + 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode + 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer + 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer + 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult + 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism + 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq + 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq + 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq + 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq + 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status + 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp + 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp + 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp + 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp + 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion + 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion + 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite + 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy + 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion + 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion + 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite + 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate + 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy + 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq + 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp + 34, // [34:35] is the sub-list for method output_type + 33, // [33:34] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_s2a_s2a_proto_init() } +func file_internal_proto_v2_s2a_s2a_proto_init() { + if File_internal_proto_v2_s2a_s2a_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlpnPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticationMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadPrivateKeyOperationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadPrivateKeyOperationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadResumptionKeyOperationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadResumptionKeyOperationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*AuthenticationMechanism_Token)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), + (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*OffloadPrivateKeyOperationReq_RawBytes)(nil), + (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), + (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), + (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), + (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*SessionReq_GetTlsConfigurationReq)(nil), + (*SessionReq_OffloadPrivateKeyOperationReq)(nil), + (*SessionReq_OffloadResumptionKeyOperationReq)(nil), + (*SessionReq_ValidatePeerCertificateChainReq)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*SessionResp_GetTlsConfigurationResp)(nil), + (*SessionResp_OffloadPrivateKeyOperationResp)(nil), + (*SessionResp_OffloadResumptionKeyOperationResp)(nil), + (*SessionResp_ValidatePeerCertificateChainResp)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc, + NumEnums: 6, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs, + EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes, + MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes, + }.Build() + File_internal_proto_v2_s2a_s2a_proto = out.File + file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil + file_internal_proto_v2_s2a_s2a_proto_goTypes = nil + file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go new file mode 100644 index 0000000000..2566df6c30 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -0,0 +1,159 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: internal/proto/v2/s2a/s2a.proto + +package s2a_go_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" +) + +// S2AServiceClient is the client API for S2AService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type S2AServiceClient interface { + // SetUpSession is a bidirectional stream used by applications to offload + // operations from the TLS handshake. + SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) +} + +type s2AServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { + return &s2AServiceClient{cc} +} + +func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &s2AServiceSetUpSessionClient{stream} + return x, nil +} + +type S2AService_SetUpSessionClient interface { + Send(*SessionReq) error + Recv() (*SessionResp, error) + grpc.ClientStream +} + +type s2AServiceSetUpSessionClient struct { + grpc.ClientStream +} + +func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { + m := new(SessionResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AServiceServer is the server API for S2AService service. +// All implementations must embed UnimplementedS2AServiceServer +// for forward compatibility +type S2AServiceServer interface { + // SetUpSession is a bidirectional stream used by applications to offload + // operations from the TLS handshake. + SetUpSession(S2AService_SetUpSessionServer) error + mustEmbedUnimplementedS2AServiceServer() +} + +// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. +type UnimplementedS2AServiceServer struct { +} + +func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { + return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") +} +func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} + +// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to S2AServiceServer will +// result in compilation errors. +type UnsafeS2AServiceServer interface { + mustEmbedUnimplementedS2AServiceServer() +} + +func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { + s.RegisterService(&S2AService_ServiceDesc, srv) +} + +func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) +} + +type S2AService_SetUpSessionServer interface { + Send(*SessionResp) error + Recv() (*SessionReq, error) + grpc.ServerStream +} + +type s2AServiceSetUpSessionServer struct { + grpc.ServerStream +} + +func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { + m := new(SessionReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var S2AService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "s2a.proto.v2.S2AService", + HandlerType: (*S2AServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SetUpSession", + Handler: _S2AService_SetUpSession_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/proto/v2/s2a/s2a.proto", +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go new file mode 100644 index 0000000000..486f4ec4f2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package aeadcrypter provides the interface for AEAD cipher implementations +// used by S2A's record protocol. +package aeadcrypter + +// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record +// protocol. +type S2AAEADCrypter interface { + // Encrypt encrypts the plaintext and computes the tag of dst and plaintext. + // dst and plaintext may fully overlap or not at all. + Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) + // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may + // fully overlap or not at all. + Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) + // TagSize returns the tag size in bytes. + TagSize() int +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go new file mode 100644 index 0000000000..85c4e595d7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/aes" + "crypto/cipher" + "fmt" +) + +// Supported key sizes in bytes. +const ( + AES128GCMKeySize = 16 + AES256GCMKeySize = 32 +) + +// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter. +type aesgcm struct { + aead cipher.AEAD +} + +// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be +// either 128 bits or 256 bits. +func NewAESGCM(key []byte) (S2AAEADCrypter, error) { + if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize { + return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key)) + } + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aesgcm{aead: a}, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { + return encrypt(s.aead, dst, plaintext, nonce, aad) +} + +func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { + return decrypt(s.aead, dst, ciphertext, nonce, aad) +} + +func (s *aesgcm) TagSize() int { + return TagSize +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go new file mode 100644 index 0000000000..214df4ca41 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/cipher" + "fmt" + + "golang.org/x/crypto/chacha20poly1305" +) + +// Supported key size in bytes. +const ( + Chacha20Poly1305KeySize = 32 +) + +// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD +// crypter. +type chachapoly struct { + aead cipher.AEAD +} + +// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must +// be Chacha20Poly1305KeySize bytes in length. +func NewChachaPoly(key []byte) (S2AAEADCrypter, error) { + if len(key) != Chacha20Poly1305KeySize { + return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key)) + } + c, err := chacha20poly1305.New(key) + if err != nil { + return nil, err + } + return &chachapoly{aead: c}, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { + return encrypt(s.aead, dst, plaintext, nonce, aad) +} + +func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { + return decrypt(s.aead, dst, ciphertext, nonce, aad) +} + +func (s *chachapoly) TagSize() int { + return TagSize +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go new file mode 100644 index 0000000000..b3c36ad95d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/cipher" + "fmt" +) + +const ( + // TagSize is the tag size in bytes for AES-128-GCM-SHA256, + // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. + TagSize = 16 + // NonceSize is the size of the nonce in number of bytes for + // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. + NonceSize = 12 + // SHA256DigestSize is the digest size of sha256 in bytes. + SHA256DigestSize = 32 + // SHA384DigestSize is the digest size of sha384 in bytes. + SHA384DigestSize = 48 +) + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// encrypt is the encryption function for an AEAD crypter. aead determines +// the type of AEAD crypter. dst can contain bytes at the beginning of the +// ciphertext that will not be encrypted but will be authenticated. If dst has +// enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) + } + // If we need to allocate an output buffer, we want to include space for + // the tag to avoid forcing the caller to reallocate as well. + dlen := len(dst) + dst, out := sliceForAppend(dst, len(plaintext)+TagSize) + data := out[:len(plaintext)] + copy(data, plaintext) // data may fully overlap plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, sliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = aead.Seal(dst[:dlen], nonce, data, aad) + return dst, nil +} + +// decrypt is the decryption function for an AEAD crypter, where aead determines +// the type of AEAD crypter, and dst the destination bytes for the decrypted +// ciphertext. The dst buffer may fully overlap with plaintext or not at all. +func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := aead.Open(dst, nonce, ciphertext, aad) + if err != nil { + return nil, fmt.Errorf("message auth failed: %v", err) + } + return plaintext, nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go new file mode 100644 index 0000000000..ddeaa6d77d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import ( + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/aeadcrypter" +) + +// ciphersuite is the interface for retrieving ciphersuite-specific information +// and utilities. +type ciphersuite interface { + // keySize returns the key size in bytes. This refers to the key used by + // the AEAD crypter. This is derived by calling HKDF expand on the traffic + // secret. + keySize() int + // nonceSize returns the nonce size in bytes. + nonceSize() int + // trafficSecretSize returns the traffic secret size in bytes. This refers + // to the secret used to derive the traffic key and nonce, as specified in + // https://tools.ietf.org/html/rfc8446#section-7. + trafficSecretSize() int + // hashFunction returns the hash function for the ciphersuite. + hashFunction() func() hash.Hash + // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite + // using that key. + aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) +} + +func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) { + switch ciphersuite { + case s2apb.Ciphersuite_AES_128_GCM_SHA256: + return &aesgcm128sha256{}, nil + case s2apb.Ciphersuite_AES_256_GCM_SHA384: + return &aesgcm256sha384{}, nil + case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256: + return &chachapolysha256{}, nil + default: + return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite) + } +} + +// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite +// interface. +type aesgcm128sha256 struct{} + +func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize } +func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize } +func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } +func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New } +func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewAESGCM(key) +} + +// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite +// interface. +type aesgcm256sha384 struct{} + +func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize } +func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize } +func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize } +func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 } +func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewAESGCM(key) +} + +// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite +// interface. +type chachapolysha256 struct{} + +func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize } +func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize } +func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } +func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New } +func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewChachaPoly(key) +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go new file mode 100644 index 0000000000..9499cdca75 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import "errors" + +// counter is a 64-bit counter. +type counter struct { + val uint64 + hasOverflowed bool +} + +// newCounter creates a new counter with the initial value set to val. +func newCounter(val uint64) counter { + return counter{val: val} +} + +// value returns the current value of the counter. +func (c *counter) value() (uint64, error) { + if c.hasOverflowed { + return 0, errors.New("counter has overflowed") + } + return c.val, nil +} + +// increment increments the counter and checks for overflow. +func (c *counter) increment() { + // If the counter is already invalid due to overflow, there is no need to + // increase it. We check for the hasOverflowed flag in the call to value(). + if c.hasOverflowed { + return + } + c.val++ + if c.val == 0 { + c.hasOverflowed = true + } +} + +// reset sets the counter value to zero and sets the hasOverflowed flag to +// false. +func (c *counter) reset() { + c.val = 0 + c.hasOverflowed = false +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go new file mode 100644 index 0000000000..e05f2c36a6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import ( + "fmt" + "hash" + + "golang.org/x/crypto/hkdf" +) + +// hkdfExpander is the interface for the HKDF expansion function; see +// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is +// specified in https://tools.ietf.org/html/rfc8446#section-7.2 +type hkdfExpander interface { + // expand takes a secret, a label, and the output length in bytes, and + // returns the resulting expanded key. + expand(secret, label []byte, length int) ([]byte, error) +} + +// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf +// for HKDF expansion. +type defaultHKDFExpander struct { + h func() hash.Hash +} + +// newDefaultHKDFExpander creates an instance of the default HKDF expander +// using the given hash function. +func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander { + return &defaultHKDFExpander{h: h} +} + +func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) { + outBuf := make([]byte, length) + n, err := hkdf.Expand(d.h, secret, label).Read(outBuf) + if err != nil { + return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err) + } + if n < length { + return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length) + } + return outBuf, nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go new file mode 100644 index 0000000000..dff99ff594 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go @@ -0,0 +1,193 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package halfconn manages the inbound or outbound traffic of a TLS 1.3 +// connection. +package halfconn + +import ( + "fmt" + "sync" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/aeadcrypter" + "golang.org/x/crypto/cryptobyte" +) + +// The constants below were taken from Section 7.2 and 7.3 in +// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label +// in HKDF-Expand-Label. +const ( + tls13Key = "tls13 key" + tls13Nonce = "tls13 iv" + tls13Update = "tls13 traffic upd" +) + +// S2AHalfConnection stores the state of the TLS 1.3 connection in the +// inbound or outbound direction. +type S2AHalfConnection struct { + cs ciphersuite + expander hkdfExpander + // mutex guards sequence, aeadCrypter, trafficSecret, and nonce. + mutex sync.Mutex + aeadCrypter aeadcrypter.S2AAEADCrypter + sequence counter + trafficSecret []byte + nonce []byte +} + +// New creates a new instance of S2AHalfConnection given a ciphersuite and a +// traffic secret. +func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) { + cs, err := newCiphersuite(ciphersuite) + if err != nil { + return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite) + } + if cs.trafficSecretSize() != len(trafficSecret) { + return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret)) + } + + hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret} + if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { + return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err) + } + + return hc, nil +} + +// Encrypt encrypts the plaintext and computes the tag of dst and plaintext. +// dst and plaintext may fully overlap or not at all. Note that the sequence +// number will still be incremented on failure, unless the sequence has +// overflowed. +func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) { + hc.mutex.Lock() + sequence, err := hc.getAndIncrementSequence() + if err != nil { + hc.mutex.Unlock() + return nil, err + } + nonce := hc.maskedNonce(sequence) + crypter := hc.aeadCrypter + hc.mutex.Unlock() + return crypter.Encrypt(dst, plaintext, nonce, aad) +} + +// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may +// fully overlap or not at all. Note that the sequence number will still be +// incremented on failure, unless the sequence has overflowed. +func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) { + hc.mutex.Lock() + sequence, err := hc.getAndIncrementSequence() + if err != nil { + hc.mutex.Unlock() + return nil, err + } + nonce := hc.maskedNonce(sequence) + crypter := hc.aeadCrypter + hc.mutex.Unlock() + return crypter.Decrypt(dst, ciphertext, nonce, aad) +} + +// UpdateKey advances the traffic secret key, as specified in +// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives +// a new key and nonce, and resets the sequence number. +func (hc *S2AHalfConnection) UpdateKey() error { + hc.mutex.Lock() + defer hc.mutex.Unlock() + + var err error + hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize()) + if err != nil { + return fmt.Errorf("failed to derive traffic secret: %v", err) + } + + if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { + return fmt.Errorf("failed to update half connection: %v", err) + } + + hc.sequence.reset() + return nil +} + +// TagSize returns the tag size in bytes of the underlying AEAD crypter. +func (hc *S2AHalfConnection) TagSize() int { + return hc.aeadCrypter.TagSize() +} + +// updateCrypterAndNonce takes a new traffic secret and updates the crypter +// and nonce. Note that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error { + key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize()) + if err != nil { + return fmt.Errorf("failed to update key: %v", err) + } + + hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize()) + if err != nil { + return fmt.Errorf("failed to update nonce: %v", err) + } + + hc.aeadCrypter, err = hc.cs.aeadCrypter(key) + if err != nil { + return fmt.Errorf("failed to update AEAD crypter: %v", err) + } + return nil +} + +// getAndIncrement returns the current sequence number and increments it. Note +// that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) { + sequence, err := hc.sequence.value() + if err != nil { + return 0, err + } + hc.sequence.increment() + return sequence, nil +} + +// maskedNonce creates a copy of the nonce that is masked with the sequence +// number. Note that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte { + const uint64Size = 8 + nonce := make([]byte, len(hc.nonce)) + copy(nonce, hc.nonce) + for i := 0; i < uint64Size; i++ { + nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i)) + } + return nonce +} + +// deriveSecret implements the Derive-Secret function, as specified in +// https://tools.ietf.org/html/rfc8446#section-7.1. +func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) { + var hkdfLabel cryptobyte.Builder + hkdfLabel.AddUint16(uint16(length)) + hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(label) + }) + // Append an empty `Context` field to the label, as specified in the RFC. + // The half connection does not use the `Context` field. + hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte("")) + }) + hkdfLabelBytes, err := hkdfLabel.Bytes() + if err != nil { + return nil, fmt.Errorf("deriveSecret failed: %v", err) + } + return hc.expander.expand(secret, hkdfLabelBytes, length) +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/record.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/record.go new file mode 100644 index 0000000000..c60515510a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/record.go @@ -0,0 +1,757 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package record implements the TLS 1.3 record protocol used by the S2A +// transport credentials. +package record + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "sync" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/halfconn" + "github.com/google/s2a-go/internal/tokenmanager" + "google.golang.org/grpc/grpclog" +) + +// recordType is the `ContentType` as described in +// https://tools.ietf.org/html/rfc8446#section-5.1. +type recordType byte + +const ( + alert recordType = 21 + handshake recordType = 22 + applicationData recordType = 23 +) + +// keyUpdateRequest is the `KeyUpdateRequest` as described in +// https://tools.ietf.org/html/rfc8446#section-4.6.3. +type keyUpdateRequest byte + +const ( + updateNotRequested keyUpdateRequest = 0 + updateRequested keyUpdateRequest = 1 +) + +// alertDescription is the `AlertDescription` as described in +// https://tools.ietf.org/html/rfc8446#section-6. +type alertDescription byte + +const ( + closeNotify alertDescription = 0 +) + +// sessionTicketState is used to determine whether session tickets have not yet +// been received, are in the process of being received, or have finished +// receiving. +type sessionTicketState byte + +const ( + ticketsNotYetReceived sessionTicketState = 0 + receivingTickets sessionTicketState = 1 + notReceivingTickets sessionTicketState = 2 +) + +const ( + // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize, + // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from + // https://tools.ietf.org/html/rfc8446#section-5.1. + + // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext + // in a single TLS 1.3 record. + tlsRecordMaxPlaintextSize = 16384 // 2^14 + // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type. + tlsRecordTypeSize = 1 + // tlsTagSize is the size in bytes of the tag of the following three + // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384, + // CHACHA20-POLY1305-SHA256. + tlsTagSize = 16 + // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a + // single TLS 1.3 record. This is the maximum size of the plaintext plus the + // record type byte and 16 bytes of the tag. + tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize + // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record + // header type. + tlsRecordHeaderTypeSize = 1 + // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS + // 1.3 record header legacy record version. + tlsRecordHeaderLegacyRecordVersionSize = 2 + // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3 + // record header payload length. + tlsRecordHeaderPayloadLengthSize = 2 + // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header. + tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize + // tlsRecordMaxSize + tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize + // tlsApplicationData is the application data type of the TLS 1.3 record + // header. + tlsApplicationData = 23 + // tlsLegacyRecordVersion is the legacy record version of the TLS record. + tlsLegacyRecordVersion = 3 + // tlsAlertSize is the size in bytes of an alert of TLS 1.3. + tlsAlertSize = 2 +) + +const ( + // These are TLS 1.3 handshake-specific constants. + + // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session + // ticket message of TLS 1.3. + tlsHandshakeNewSessionTicketType = 4 + // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message + // of TLS 1.3. + tlsHandshakeKeyUpdateType = 24 + // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake + // message type field. + tlsHandshakeMsgTypeSize = 1 + // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake + // message length field. + tlsHandshakeLengthSize = 3 + // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3 + // handshake key update message. + tlsHandshakeKeyUpdateMsgSize = 1 + // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3 + // handshake message. + tlsHandshakePrefixSize = 4 + // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message + // in TLS 1.3. This is the sum of the max sizes of all the fields in the + // NewSessionTicket struct specified in + // https://tools.ietf.org/html/rfc8446#section-4.6.1. + tlsMaxSessionTicketSize = 131338 +) + +const ( + // outBufMaxRecords is the maximum number of records that can fit in the + // ourRecordsBuf buffer. + outBufMaxRecords = 16 + // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer. + outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize + // maxAllowedTickets is the maximum number of session tickets that are + // allowed. The number of tickets are limited to ensure that the size of the + // ticket queue does not grow indefinitely. S2A also keeps a limit on the + // number of tickets that it caches. + maxAllowedTickets = 5 +) + +// preConstructedKeyUpdateMsg holds the key update message. This is needed as an +// optimization so that the same message does not need to be constructed every +// time a key update message is sent. +var preConstructedKeyUpdateMsg = buildKeyUpdateRequest() + +// conn represents a secured TLS connection. It implements the net.Conn +// interface. +type conn struct { + net.Conn + // inConn is the half connection responsible for decrypting incoming bytes. + inConn *halfconn.S2AHalfConnection + // outConn is the half connection responsible for encrypting outgoing bytes. + outConn *halfconn.S2AHalfConnection + // pendingApplicationData holds data that has been read from the connection + // and decrypted, but has not yet been returned by Read. + pendingApplicationData []byte + // unusedBuf holds data read from the network that has not yet been + // decrypted. This data might not consist of a complete record. It may + // consist of several records, the last of which could be incomplete. + unusedBuf []byte + // outRecordsBuf is a buffer used to store outgoing TLS records before + // they are written to the network. + outRecordsBuf []byte + // nextRecord stores the next record info in the unusedBuf buffer. + nextRecord []byte + // overheadSize is the overhead size in bytes of each TLS 1.3 record, which + // is computed as overheadSize = header size + record type byte + tag size. + // Note that there is no padding by zeros in the overhead calculation. + overheadSize int + // readMutex guards against concurrent calls to Read. This is required since + // Close may be called during a Read. + readMutex sync.Mutex + // writeMutex guards against concurrent calls to Write. This is required + // since Close may be called during a Write, and also because a key update + // message may be written during a Read. + writeMutex sync.Mutex + // handshakeBuf holds handshake messages while they are being processed. + handshakeBuf []byte + // ticketState is the current processing state of the session tickets. + ticketState sessionTicketState + // sessionTickets holds the completed session tickets until they are sent to + // the handshaker service for processing. + sessionTickets [][]byte + // ticketSender sends session tickets to the S2A handshaker service. + ticketSender s2aTicketSender + // callComplete is a channel that blocks closing the record protocol until a + // pending call to the S2A completes. + callComplete chan bool +} + +// ConnParameters holds the parameters used for creating a new conn object. +type ConnParameters struct { + // NetConn is the TCP connection to the peer. This parameter is required. + NetConn net.Conn + // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker + // service. This parameter is required. + Ciphersuite commonpb.Ciphersuite + // TLSVersion is the TLS version number negotiated by the S2A handshaker + // service. This parameter is required. + TLSVersion commonpb.TLSVersion + // InTrafficSecret is the traffic secret used to derive the session key for + // the inbound direction. This parameter is required. + InTrafficSecret []byte + // OutTrafficSecret is the traffic secret used to derive the session key + // for the outbound direction. This parameter is required. + OutTrafficSecret []byte + // UnusedBuf is the data read from the network that has not yet been + // decrypted. This parameter is optional. If not provided, then no + // application data was sent in the same flight of messages as the final + // handshake message. + UnusedBuf []byte + // InSequence is the sequence number of the next, incoming, TLS record. + // This parameter is required. + InSequence uint64 + // OutSequence is the sequence number of the next, outgoing, TLS record. + // This parameter is required. + OutSequence uint64 + // HSAddr stores the address of the S2A handshaker service. This parameter + // is optional. If not provided, then TLS resumption is disabled. + HSAddr string + // ConnectionId is the connection identifier that was created and sent by + // S2A at the end of a handshake. + ConnectionID uint64 + // LocalIdentity is the local identity that was used by S2A during session + // setup and included in the session result. + LocalIdentity *commonpb.Identity + // EnsureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + EnsureProcessSessionTickets *sync.WaitGroup +} + +// NewConn creates a TLS record protocol that wraps the TCP connection. +func NewConn(o *ConnParameters) (net.Conn, error) { + if o == nil { + return nil, errors.New("conn options must not be nil") + } + if o.TLSVersion != commonpb.TLSVersion_TLS1_3 { + return nil, errors.New("TLS version must be TLS 1.3") + } + + inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence) + if err != nil { + return nil, fmt.Errorf("failed to create inbound half connection: %v", err) + } + outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence) + if err != nil { + return nil, fmt.Errorf("failed to create outbound half connection: %v", err) + } + + // The tag size for the in/out connections should be the same. + overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize() + var unusedBuf []byte + if o.UnusedBuf == nil { + // We pre-allocate unusedBuf to be of size + // 2*tlsRecordMaxSize-1 during initialization. We only read from the + // network into unusedBuf when unusedBuf does not contain a complete + // record and the incomplete record is at most tlsRecordMaxSize-1 + // (bytes). And we read at most tlsRecordMaxSize bytes of data from the + // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1 + // is large enough to buffer data read from the network. + unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1) + } else { + unusedBuf = make([]byte, len(o.UnusedBuf)) + copy(unusedBuf, o.UnusedBuf) + } + + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + + s2aConn := &conn{ + Conn: o.NetConn, + inConn: inConn, + outConn: outConn, + unusedBuf: unusedBuf, + outRecordsBuf: make([]byte, tlsRecordMaxSize), + nextRecord: unusedBuf, + overheadSize: overheadSize, + ticketState: ticketsNotYetReceived, + // Pre-allocate the buffer for one session ticket message and the max + // plaintext size. This is the largest size that handshakeBuf will need + // to hold. The largest incomplete handshake message is the + // [handshake header size] + [max session ticket size] - 1. + // Then, tlsRecordMaxPlaintextSize is the maximum size that will be + // appended to the handshakeBuf before the handshake message is + // completed. Therefore, the buffer size below should be large enough to + // buffer any handshake messages. + handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1), + ticketSender: &ticketSender{ + hsAddr: o.HSAddr, + connectionID: o.ConnectionID, + localIdentity: o.LocalIdentity, + tokenManager: tokenManager, + ensureProcessSessionTickets: o.EnsureProcessSessionTickets, + }, + callComplete: make(chan bool), + } + return s2aConn, nil +} + +// Read reads and decrypts a TLS 1.3 record from the underlying connection, and +// copies any application data received from the peer into b. If the size of the +// payload is greater than len(b), Read retains the remaining bytes in an +// internal buffer, and subsequent calls to Read will read from this buffer +// until it is exhausted. At most 1 TLS record worth of application data is +// written to b for each call to Read. +// +// Note that for the user to efficiently call this method, the user should +// ensure that the buffer b is allocated such that the buffer does not have any +// unused segments. This can be done by calling Read via io.ReadFull, which +// continually calls Read until the specified buffer has been filled. Also note +// that the user should close the connection via Close() if an error is thrown +// by a call to Read. +func (p *conn) Read(b []byte) (n int, err error) { + p.readMutex.Lock() + defer p.readMutex.Unlock() + // Check if p.pendingApplication data has leftover application data from + // the previous call to Read. + if len(p.pendingApplicationData) == 0 { + // Read a full record from the wire. + record, err := p.readFullRecord() + if err != nil { + return 0, err + } + // Now we have a complete record, so split the header and validate it + // The TLS record is split into 2 pieces: the record header and the + // payload. The payload has the following form: + // [payload] = [ciphertext of application data] + // + [ciphertext of record type byte] + // + [(optionally) ciphertext of padding by zeros] + // + [tag] + header, payload, err := splitAndValidateHeader(record) + if err != nil { + return 0, err + } + // Decrypt the ciphertext. + p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header) + if err != nil { + return 0, err + } + // Remove the padding by zeros and the record type byte from the + // p.pendingApplicationData buffer. + msgType, err := p.stripPaddingAndType() + if err != nil { + return 0, err + } + // Check that the length of the plaintext after stripping the padding + // and record type byte is under the maximum plaintext size. + if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize { + return 0, errors.New("plaintext size larger than maximum") + } + // The expected message types are application data, alert, and + // handshake. For application data, the bytes are directly copied into + // b. For an alert, the type of the alert is checked and the connection + // is closed on a close notify alert. For a handshake message, the + // handshake message type is checked. The handshake message type can be + // a key update type, for which we advance the traffic secret, and a + // new session ticket type, for which we send the received ticket to S2A + // for processing. + switch msgType { + case applicationData: + if len(p.handshakeBuf) > 0 { + return 0, errors.New("application data received while processing fragmented handshake messages") + } + if p.ticketState == receivingTickets { + p.ticketState = notReceivingTickets + grpclog.Infof("Sending session tickets to S2A.") + p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) + } + case alert: + return 0, p.handleAlertMessage() + case handshake: + if err = p.handleHandshakeMessage(); err != nil { + return 0, err + } + return 0, nil + default: + return 0, errors.New("unknown record type") + } + } + // Write as much application data as possible to b, the output buffer. + n = copy(b, p.pendingApplicationData) + p.pendingApplicationData = p.pendingApplicationData[n:] + return n, nil +} + +// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a +// TLS 1.3 record (of type "application data") from each segment, and sends +// the record to the peer. It returns the number of plaintext bytes that were +// successfully sent to the peer. +func (p *conn) Write(b []byte) (n int, err error) { + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + return p.writeTLSRecord(b, tlsApplicationData) +} + +// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord, +// builds a TLS 1.3 record (of type recordType) from each segment, and sends +// the record to the peer. It returns the number of plaintext bytes that were +// successfully sent to the peer. +func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) { + // Create a record of only header, record type, and tag if given empty + // byte array. + if len(b) == 0 { + recordEndIndex, _, err := p.buildRecord(b, recordType, 0) + if err != nil { + return 0, err + } + + // Write the bytes stored in outRecordsBuf to p.Conn. Since we return + // the number of plaintext bytes written without overhead, we will + // always return 0 while p.Conn.Write returns the entire record length. + _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) + return 0, err + } + + numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize))) + totalRecordsSize := len(b) + numRecords*p.overheadSize + partialBSize := len(b) + if totalRecordsSize > outBufMaxSize { + totalRecordsSize = outBufMaxSize + partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize + } + if len(p.outRecordsBuf) < totalRecordsSize { + p.outRecordsBuf = make([]byte, totalRecordsSize) + } + for bStart := 0; bStart < len(b); bStart += partialBSize { + bEnd := bStart + partialBSize + if bEnd > len(b) { + bEnd = len(b) + } + partialB := b[bStart:bEnd] + recordEndIndex := 0 + for len(partialB) > 0 { + recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex) + if err != nil { + // Return the amount of bytes written prior to the error. + return bStart, err + } + } + // Write the bytes stored in outRecordsBuf to p.Conn. If there is an + // error, calculate the total number of plaintext bytes of complete + // records successfully written to the peer and return it. + nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) + if err != nil { + numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize))) + return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err + } + } + return len(b), nil +} + +// buildRecord builds a TLS 1.3 record of type recordType from plaintext, +// and writes the record to outRecordsBuf at recordStartIndex. The record will +// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the +// index of outRecordsBuf where the current record ends, as well as any +// remaining plaintext bytes. +func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) { + // Construct the payload, which consists of application data and record type. + dataLen := len(plaintext) + if dataLen > tlsRecordMaxPlaintextSize { + dataLen = tlsRecordMaxPlaintextSize + } + remainingPlaintext = plaintext[dataLen:] + newRecordBuf := p.outRecordsBuf[recordStartIndex:] + + copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen]) + newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType + payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType. + // Construct the header. + newRecordBuf[0] = tlsApplicationData + newRecordBuf[1] = tlsLegacyRecordVersion + newRecordBuf[2] = tlsLegacyRecordVersion + binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize)) + header := newRecordBuf[:tlsRecordHeaderSize] + + // Encrypt the payload using header as aad. + encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header) + if err != nil { + return 0, plaintext, err + } + recordStartIndex += len(header) + len(encryptedPayload) + return recordStartIndex, remainingPlaintext, nil +} + +func (p *conn) Close() error { + p.readMutex.Lock() + defer p.readMutex.Unlock() + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + // If p.ticketState is equal to notReceivingTickets, then S2A has + // been sent a flight of session tickets, and we must wait for the + // call to S2A to complete before closing the record protocol. + if p.ticketState == notReceivingTickets { + <-p.callComplete + grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") + } + return p.Conn.Close() +} + +// stripPaddingAndType strips the padding by zeros and record type from +// p.pendingApplicationData and returns the record type. Note that +// p.pendingApplicationData should be of the form: +// [application data] + [record type byte] + [trailing zeros] +func (p *conn) stripPaddingAndType() (recordType, error) { + if len(p.pendingApplicationData) == 0 { + return 0, errors.New("application data had length 0") + } + i := len(p.pendingApplicationData) - 1 + // Search for the index of the record type byte. + for i > 0 { + if p.pendingApplicationData[i] != 0 { + break + } + i-- + } + rt := recordType(p.pendingApplicationData[i]) + p.pendingApplicationData = p.pendingApplicationData[:i] + return rt, nil +} + +// readFullRecord reads from the wire until a record is completed and returns +// the full record. +func (p *conn) readFullRecord() (fullRecord []byte, err error) { + fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize) + if err != nil { + return nil, err + } + // Check whether the next record to be decrypted has been completely + // received. + if len(fullRecord) == 0 { + copy(p.unusedBuf, p.nextRecord) + p.unusedBuf = p.unusedBuf[:len(p.nextRecord)] + // Always copy next incomplete record to the beginning of the + // unusedBuf buffer and reset nextRecord to it. + p.nextRecord = p.unusedBuf + } + // Keep reading from the wire until we have a complete record. + for len(fullRecord) == 0 { + if len(p.unusedBuf) == cap(p.unusedBuf) { + tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize) + copy(tmp, p.unusedBuf) + p.unusedBuf = tmp + } + n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)]) + if err != nil { + return nil, err + } + p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n] + fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize) + if err != nil { + return nil, err + } + } + return fullRecord, nil +} + +// parseReadBuffer parses the provided buffer and returns a full record and any +// remaining bytes in that buffer. If the record is incomplete, nil is returned +// for the first return value and the given byte buffer is returned for the +// second return value. The length of the payload specified by the header should +// not be greater than maxLen, otherwise an error is returned. Note that this +// function does not allocate or copy any buffers. +func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) { + // If the header is not complete, return the provided buffer as remaining + // buffer. + if len(b) < tlsRecordHeaderSize { + return nil, b, nil + } + msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize] + length := binary.BigEndian.Uint16(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen) + } + if len(b) < int(length)+tlsRecordHeaderSize { + // Record is not complete yet. + return nil, b, nil + } + return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil +} + +// splitAndValidateHeader splits the header from the payload in the TLS 1.3 +// record and returns them. Note that the header is checked for validity, and an +// error is returned when an invalid header is parsed. Also note that this +// function does not allocate or copy any buffers. +func splitAndValidateHeader(record []byte) (header, payload []byte, err error) { + if len(record) < tlsRecordHeaderSize { + return nil, nil, fmt.Errorf("record was smaller than the header size") + } + header = record[:tlsRecordHeaderSize] + payload = record[tlsRecordHeaderSize:] + if header[0] != tlsApplicationData { + return nil, nil, fmt.Errorf("incorrect type in the header") + } + // Check the legacy record version, which should be 0x03, 0x03. + if header[1] != 0x03 || header[2] != 0x03 { + return nil, nil, fmt.Errorf("incorrect legacy record version in the header") + } + return header, payload, nil +} + +// handleAlertMessage handles an alert message. +func (p *conn) handleAlertMessage() error { + if len(p.pendingApplicationData) != tlsAlertSize { + return errors.New("invalid alert message size") + } + alertType := p.pendingApplicationData[1] + // Clear the body of the alert message. + p.pendingApplicationData = p.pendingApplicationData[:0] + if alertType == byte(closeNotify) { + return errors.New("received a close notify alert") + } + // TODO(matthewstevenson88): Add support for more alert types. + return fmt.Errorf("received an unrecognized alert type: %v", alertType) +} + +// parseHandshakeHeader parses a handshake message from the handshake buffer. +// It returns the message type, the message length, the message, the raw message +// that includes the type and length bytes and a flag indicating whether the +// handshake message has been fully parsed. i.e. whether the entire handshake +// message was in the handshake buffer. +func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) { + // Handle the case where the 4 byte handshake header is fragmented. + if len(p.handshakeBuf) < tlsHandshakePrefixSize { + return 0, 0, nil, nil, false + } + msgType = p.handshakeBuf[0] + msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize]) + if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) { + return 0, 0, nil, nil, false + } + msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen] + rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen] + p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:] + return msgType, msgLen, msg, rawMsg, true +} + +// handleHandshakeMessage handles a handshake message. Note that the first +// complete handshake message from the handshake buffer is removed, if it +// exists. +func (p *conn) handleHandshakeMessage() error { + // Copy the pending application data to the handshake buffer. At this point, + // we are guaranteed that the pending application data contains only parts + // of a handshake message. + p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...) + p.pendingApplicationData = p.pendingApplicationData[:0] + // Several handshake messages may be coalesced into a single record. + // Continue reading them until the handshake buffer is empty. + for len(p.handshakeBuf) > 0 { + handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + if !ok { + // The handshake could not be fully parsed, so read in another + // record and try again later. + break + } + switch handshakeMsgType { + case tlsHandshakeKeyUpdateType: + if msgLen != tlsHandshakeKeyUpdateMsgSize { + return errors.New("invalid handshake key update message length") + } + if len(p.handshakeBuf) != 0 { + return errors.New("key update message must be the last message of a handshake record") + } + if err := p.handleKeyUpdateMsg(msg); err != nil { + return err + } + case tlsHandshakeNewSessionTicketType: + // Ignore tickets that are received after a batch of tickets has + // been sent to S2A. + if p.ticketState == notReceivingTickets { + continue + } + if p.ticketState == ticketsNotYetReceived { + p.ticketState = receivingTickets + } + p.sessionTickets = append(p.sessionTickets, rawMsg) + if len(p.sessionTickets) == maxAllowedTickets { + p.ticketState = notReceivingTickets + grpclog.Infof("Sending session tickets to S2A.") + p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) + } + default: + return errors.New("unknown handshake message type") + } + } + return nil +} + +func buildKeyUpdateRequest() []byte { + b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize) + b[0] = tlsHandshakeKeyUpdateType + b[1] = 0 + b[2] = 0 + b[3] = tlsHandshakeKeyUpdateMsgSize + b[4] = byte(updateNotRequested) + return b +} + +// handleKeyUpdateMsg handles a key update message. +func (p *conn) handleKeyUpdateMsg(msg []byte) error { + keyUpdateRequest := msg[0] + if keyUpdateRequest != byte(updateNotRequested) && + keyUpdateRequest != byte(updateRequested) { + return errors.New("invalid handshake key update message") + } + if err := p.inConn.UpdateKey(); err != nil { + return err + } + // Send a key update message back to the peer if requested. + if keyUpdateRequest == byte(updateRequested) { + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake)) + if err != nil { + return err + } + if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize { + return errors.New("key update request message wrote less bytes than expected") + } + if err = p.outConn.UpdateKey(); err != nil { + return err + } + } + return nil +} + +// bidEndianInt24 converts the given byte buffer of at least size 3 and +// outputs the resulting 24 bit integer as a uint32. This is needed because +// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does +// not provide a way to transform a byte buffer into a 3 byte integer. +func bigEndianInt24(b []byte) uint32 { + _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/ticketsender.go new file mode 100644 index 0000000000..33fa3c55d4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/record/ticketsender.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package record + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/s2a-go/internal/handshaker/service" + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "github.com/google/s2a-go/internal/tokenmanager" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +// sessionTimeout is the timeout for creating a session with the S2A handshaker +// service. +const sessionTimeout = time.Second * 5 + +// s2aTicketSender sends session tickets to the S2A handshaker service. +type s2aTicketSender interface { + // sendTicketsToS2A sends the given session tickets to the S2A handshaker + // service. + sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) +} + +// ticketStream is the stream used to send and receive session information. +type ticketStream interface { + Send(*s2apb.SessionReq) error + Recv() (*s2apb.SessionResp, error) +} + +type ticketSender struct { + // hsAddr stores the address of the S2A handshaker service. + hsAddr string + // connectionID is the connection identifier that was created and sent by + // S2A at the end of a handshake. + connectionID uint64 + // localIdentity is the local identity that was used by S2A during session + // setup and included in the session result. + localIdentity *commonpb.Identity + // tokenManager manages access tokens for authenticating to S2A. + tokenManager tokenmanager.AccessTokenManager + // ensureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + ensureProcessSessionTickets *sync.WaitGroup +} + +// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker +// service. This is done asynchronously and writes to the error logs if an error +// occurs. +func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) { + // Note that the goroutine is in the function rather than at the caller + // because the fake ticket sender used for testing must run synchronously + // so that the session tickets can be accessed from it after the tests have + // been run. + if t.ensureProcessSessionTickets != nil { + t.ensureProcessSessionTickets.Add(1) + } + go func() { + if err := func() error { + defer func() { + if t.ensureProcessSessionTickets != nil { + t.ensureProcessSessionTickets.Done() + } + }() + hsConn, err := service.Dial(t.hsAddr) + if err != nil { + return err + } + client := s2apb.NewS2AServiceClient(hsConn) + ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) + defer cancel() + session, err := client.SetUpSession(ctx) + if err != nil { + return err + } + defer func() { + if err := session.CloseSend(); err != nil { + grpclog.Error(err) + } + }() + return t.writeTicketsToStream(session, sessionTickets) + }(); err != nil { + grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v", + t.localIdentity, err) + } + callComplete <- true + close(callComplete) + }() +} + +// writeTicketsToStream writes the given session tickets to the given stream. +func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error { + if err := stream.Send( + &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ResumptionTicket{ + ResumptionTicket: &s2apb.ResumptionTicketReq{ + InBytes: sessionTickets, + ConnectionId: t.connectionID, + LocalIdentity: t.localIdentity, + }, + }, + AuthMechanisms: t.getAuthMechanisms(), + }, + ); err != nil { + return err + } + sessionResp, err := stream.Recv() + if err != nil { + return err + } + if sessionResp.GetStatus().GetCode() != uint32(codes.OK) { + return fmt.Errorf("s2a session ticket response had error status: %v, %v", + sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails()) + } + return nil +} + +func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism { + if t.tokenManager == nil { + return nil + } + // First handle the special case when no local identity has been provided + // by the application. In this case, an AuthenticationMechanism with no local + // identity will be sent. + if t.localIdentity == nil { + token, err := t.tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("unable to get token for empty local identity: %v", err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + + // Next, handle the case where the application (or the S2A) has specified + // a local identity. + token, err := t.tokenManager.Token(t.localIdentity) + if err != nil { + grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + Identity: t.localIdentity, + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go new file mode 100644 index 0000000000..ec96ba3b6a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tokenmanager provides tokens for authenticating to S2A. +package tokenmanager + +import ( + "fmt" + "os" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" +) + +const ( + s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN" +) + +// AccessTokenManager manages tokens for authenticating to S2A. +type AccessTokenManager interface { + // DefaultToken returns a token that an application with no specified local + // identity must use to authenticate to S2A. + DefaultToken() (token string, err error) + // Token returns a token that an application with local identity equal to + // identity must use to authenticate to S2A. + Token(identity *commonpb.Identity) (token string, err error) +} + +type singleTokenAccessTokenManager struct { + token string +} + +// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance +// that will always manage the same token. +// +// The token to be managed is read from the s2aAccessTokenEnvironmentVariable +// environment variable. If this environment variable is not set, then this +// function returns an error. +func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) { + token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable) + if !variableExists { + return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable) + } + return &singleTokenAccessTokenManager{token: token}, nil +} + +// DefaultToken always returns the token managed by the +// singleTokenAccessTokenManager. +func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { + return m.token, nil +} + +// Token always returns the token managed by the singleTokenAccessTokenManager. +func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { + return m.token, nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/README.md b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/README.md new file mode 100644 index 0000000000..3806d1e9cc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/README.md @@ -0,0 +1 @@ +**This directory has the implementation of the S2Av2's gRPC-Go client libraries** diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go new file mode 100644 index 0000000000..cc811879b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go @@ -0,0 +1,122 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package certverifier offloads verifications to S2Av2. +package certverifier + +import ( + "crypto/x509" + "fmt" + + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and +// receives a SessionResp. +func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // Offload verification to S2Av2. + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.") + } + if err := s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ + ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ + Mode: verificationMode, + PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{ + ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{ + CertificateChain: rawCerts, + }, + }, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.") + return err + } + + // Get the response from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.") + return err + } + + // Parse the response. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + + } + + if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { + return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) + } + + return nil + } +} + +// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and +// receives a SessionResp. +func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // Offload verification to S2Av2. + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.") + } + if err := s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ + ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ + Mode: verificationMode, + PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{ + ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{ + CertificateChain: rawCerts, + ServerHostname: hostname, + SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy, + }, + }, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.") + return err + } + + // Get the response from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.") + return err + } + + // Parse the response. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { + return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) + } + + return nil + } +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der new file mode 100644 index 0000000000..958f3cfadd Binary files /dev/null and b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der differ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der new file mode 100644 index 0000000000..d2817641ba Binary files /dev/null and b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der differ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der new file mode 100644 index 0000000000..d8c3710c85 Binary files /dev/null and b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der differ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der new file mode 100644 index 0000000000..dae619c097 Binary files /dev/null and b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der differ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der new file mode 100644 index 0000000000..ce7f8d31d6 Binary files /dev/null and b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der differ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der new file mode 100644 index 0000000000..04b0d73600 Binary files /dev/null and b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der differ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go new file mode 100644 index 0000000000..e7478d43fb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package remotesigner offloads private key operations to S2Av2. +package remotesigner + +import ( + "crypto" + "crypto/rsa" + "crypto/x509" + "fmt" + "io" + + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// remoteSigner implementes the crypto.Signer interface. +type remoteSigner struct { + leafCert *x509.Certificate + s2AStream stream.S2AStream +} + +// New returns an instance of RemoteSigner, an implementation of the +// crypto.Signer interface. +func New(leafCert *x509.Certificate, s2AStream stream.S2AStream) crypto.Signer { + return &remoteSigner{leafCert, s2AStream} +} + +func (s *remoteSigner) Public() crypto.PublicKey { + return s.leafCert.PublicKey +} + +func (s *remoteSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + signatureAlgorithm, err := getSignatureAlgorithm(opts, s.leafCert) + if err != nil { + return nil, err + } + + req, err := getSignReq(signatureAlgorithm, digest) + if err != nil { + return nil, err + } + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for signing operation.") + } + if err := s.s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_OffloadPrivateKeyOperationReq{ + OffloadPrivateKeyOperationReq: req, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for signing operation.") + return nil, err + } + + resp, err := s.s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive signing operation response from S2Av2.") + return nil, err + } + + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to offload signing with private key to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + return resp.GetOffloadPrivateKeyOperationResp().GetOutBytes(), nil +} + +// getCert returns the leafCert field in s. +func (s *remoteSigner) getCert() *x509.Certificate { + return s.leafCert +} + +// getStream returns the s2AStream field in s. +func (s *remoteSigner) getStream() stream.S2AStream { + return s.s2AStream +} + +func getSignReq(signatureAlgorithm s2av2pb.SignatureAlgorithm, digest []byte) (*s2av2pb.OffloadPrivateKeyOperationReq, error) { + if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha256Digest{ + Sha256Digest: digest, + }, + }, nil + } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha384Digest{ + Sha384Digest: digest, + }, + }, nil + } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha512Digest{ + Sha512Digest: digest, + }, + }, nil + } else { + return nil, fmt.Errorf("unknown signature algorithm: %v", signatureAlgorithm) + } +} + +// getSignatureAlgorithm returns the signature algorithm that S2A must use when +// performing a signing operation that has been offloaded by an application +// using the crypto/tls libraries. +func getSignatureAlgorithm(opts crypto.SignerOpts, leafCert *x509.Certificate) (s2av2pb.SignatureAlgorithm, error) { + if opts == nil || leafCert == nil { + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } + switch leafCert.PublicKeyAlgorithm { + case x509.RSA: + if rsaPSSOpts, ok := opts.(*rsa.PSSOptions); ok { + return rsaPSSAlgorithm(rsaPSSOpts) + } + return rsaPPKCS1Algorithm(opts) + case x509.ECDSA: + return ecdsaAlgorithm(opts) + case x509.Ed25519: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm: %q", leafCert.PublicKeyAlgorithm) + } +} + +func rsaPSSAlgorithm(opts *rsa.PSSOptions) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} + +func rsaPPKCS1Algorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} + +func ecdsaAlgorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der new file mode 100644 index 0000000000..d8c3710c85 Binary files /dev/null and b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der differ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem new file mode 100644 index 0000000000..493a5a2648 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem new file mode 100644 index 0000000000..55a7f10c74 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der new file mode 100644 index 0000000000..04b0d73600 Binary files /dev/null and b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der differ diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem new file mode 100644 index 0000000000..0f98322c72 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem new file mode 100644 index 0000000000..81afea783d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/s2av2.go new file mode 100644 index 0000000000..ff172883f2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -0,0 +1,354 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package v2 provides the S2Av2 transport credentials used by a gRPC +// application. +package v2 + +import ( + "context" + "crypto/tls" + "errors" + "net" + "os" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/internal/handshaker/service" + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2/tlsconfigstore" + "github.com/google/s2a-go/stream" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + s2aSecurityProtocol = "tls" + defaultS2ATimeout = 3 * time.Second +) + +// An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake. +const s2aTimeoutEnv = "S2A_TIMEOUT" + +type s2av2TransportCreds struct { + info *credentials.ProtocolInfo + isClient bool + serverName string + s2av2Address string + tokenManager *tokenmanager.AccessTokenManager + // localIdentity should only be used by the client. + localIdentity *commonpbv1.Identity + // localIdentities should only be used by the server. + localIdentities []*commonpbv1.Identity + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode + fallbackClientHandshake fallback.ClientHandshake + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + serverAuthorizationPolicy []byte +} + +// NewClientCreds returns a client-side transport credentials object that uses +// the S2Av2 to establish a secure connection with a server. +func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { + // Create an AccessTokenManager instance to use to authenticate to S2Av2. + accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + + creds := &s2av2TransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + isClient: true, + serverName: "", + s2av2Address: s2av2Address, + localIdentity: localIdentity, + verificationMode: verificationMode, + fallbackClientHandshake: fallbackClientHandshakeFunc, + getS2AStream: getS2AStream, + serverAuthorizationPolicy: serverAuthorizationPolicy, + } + if err != nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &accessTokenManager + } + if grpclog.V(1) { + grpclog.Info("Created client S2Av2 transport credentials.") + } + return creds, nil +} + +// NewServerCreds returns a server-side transport credentials object that uses +// the S2Av2 to establish a secure connection with a client. +func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { + // Create an AccessTokenManager instance to use to authenticate to S2Av2. + accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + creds := &s2av2TransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + isClient: false, + s2av2Address: s2av2Address, + localIdentities: localIdentities, + verificationMode: verificationMode, + getS2AStream: getS2AStream, + } + if err != nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &accessTokenManager + } + if grpclog.V(1) { + grpclog.Info("Created server S2Av2 transport credentials.") + } + return creds, nil +} + +// ClientHandshake performs a client-side mTLS handshake using the S2Av2. +func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if !c.isClient { + return nil, nil, errors.New("client handshake called using server transport credentials") + } + // Remove the port from serverAuthority. + serverName := removeServerNamePort(serverAuthority) + timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout()) + defer cancel() + s2AStream, err := createStream(timeoutCtx, c.s2av2Address, c.getS2AStream) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + defer s2AStream.CloseSend() + if grpclog.V(1) { + grpclog.Infof("Connected to S2Av2.") + } + var config *tls.Config + + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + + if c.serverName == "" { + config, err = tlsconfigstore.GetTLSConfigurationForClient(serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + } else { + config, err = tlsconfigstore.GetTLSConfigurationForClient(c.serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + } + if grpclog.V(1) { + grpclog.Infof("Got client TLS config from S2Av2.") + } + creds := credentials.NewTLS(config) + + conn, authInfo, err := creds.ClientHandshake(ctx, serverName, rawConn) + if err != nil { + grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) + + return conn, authInfo, err +} + +// ServerHandshake performs a server-side mTLS handshake using the S2Av2. +func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if c.isClient { + return nil, nil, errors.New("server handshake called using client transport credentials") + } + ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout()) + defer cancel() + s2AStream, err := createStream(ctx, c.s2av2Address, c.getS2AStream) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + return nil, nil, err + } + defer s2AStream.CloseSend() + if grpclog.V(1) { + grpclog.Infof("Connected to S2Av2.") + } + + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + + config, err := tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + if err != nil { + grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err) + return nil, nil, err + } + if grpclog.V(1) { + grpclog.Infof("Got server TLS config from S2Av2.") + } + creds := credentials.NewTLS(config) + return creds.ServerHandshake(rawConn) +} + +// Info returns protocol info of s2av2TransportCreds. +func (c *s2av2TransportCreds) Info() credentials.ProtocolInfo { + return *c.info +} + +// Clone makes a deep copy of s2av2TransportCreds. +func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { + info := *c.info + serverName := c.serverName + fallbackClientHandshake := c.fallbackClientHandshake + + s2av2Address := c.s2av2Address + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + verificationMode := c.verificationMode + var localIdentity *commonpbv1.Identity + if c.localIdentity != nil { + localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) + } + var localIdentities []*commonpbv1.Identity + if c.localIdentities != nil { + localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) + for i, localIdentity := range c.localIdentities { + localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) + } + } + creds := &s2av2TransportCreds{ + info: &info, + isClient: c.isClient, + serverName: serverName, + fallbackClientHandshake: fallbackClientHandshake, + s2av2Address: s2av2Address, + localIdentity: localIdentity, + localIdentities: localIdentities, + verificationMode: verificationMode, + } + if c.tokenManager == nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &tokenManager + } + return creds +} + +// NewClientTLSConfig returns a tls.Config instance that uses S2Av2 to establish a TLS connection as +// a client. The tls.Config MUST only be used to establish a single TLS connection. +func NewClientTLSConfig( + ctx context.Context, + s2av2Address string, + tokenManager tokenmanager.AccessTokenManager, + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, + serverName string, + serverAuthorizationPolicy []byte) (*tls.Config, error) { + s2AStream, err := createStream(ctx, s2av2Address, nil) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + return nil, err + } + + return tlsconfigstore.GetTLSConfigurationForClient(removeServerNamePort(serverName), s2AStream, tokenManager, nil, verificationMode, serverAuthorizationPolicy) +} + +// OverrideServerName sets the ServerName in the s2av2TransportCreds protocol +// info. The ServerName MUST be a hostname. +func (c *s2av2TransportCreds) OverrideServerName(serverNameOverride string) error { + serverName := removeServerNamePort(serverNameOverride) + c.info.ServerName = serverName + c.serverName = serverName + return nil +} + +// Remove the trailing port from server name. +func removeServerNamePort(serverName string) string { + name, _, err := net.SplitHostPort(serverName) + if err != nil { + name = serverName + } + return name +} + +type s2AGrpcStream struct { + stream s2av2pb.S2AService_SetUpSessionClient +} + +func (x s2AGrpcStream) Send(m *s2av2pb.SessionReq) error { + return x.stream.Send(m) +} + +func (x s2AGrpcStream) Recv() (*s2av2pb.SessionResp, error) { + return x.stream.Recv() +} + +func (x s2AGrpcStream) CloseSend() error { + return x.stream.CloseSend() +} + +func createStream(ctx context.Context, s2av2Address string, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { + if getS2AStream != nil { + return getS2AStream(ctx, s2av2Address) + } + // TODO(rmehta19): Consider whether to close the connection to S2Av2. + conn, err := service.Dial(s2av2Address) + if err != nil { + return nil, err + } + client := s2av2pb.NewS2AServiceClient(conn) + gRPCStream, err := client.SetUpSession(ctx, []grpc.CallOption{}...) + if err != nil { + return nil, err + } + return &s2AGrpcStream{ + stream: gRPCStream, + }, nil +} + +// GetS2ATimeout returns the timeout enforced on the connection to the S2A service for handshake. +func GetS2ATimeout() time.Duration { + timeout, err := time.ParseDuration(os.Getenv(s2aTimeoutEnv)) + if err != nil { + return defaultS2ATimeout + } + return timeout +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem new file mode 100644 index 0000000000..493a5a2648 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem new file mode 100644 index 0000000000..55a7f10c74 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem new file mode 100644 index 0000000000..0f98322c72 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem new file mode 100644 index 0000000000..81afea783d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem new file mode 100644 index 0000000000..493a5a2648 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem new file mode 100644 index 0000000000..55a7f10c74 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem new file mode 100644 index 0000000000..0f98322c72 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem new file mode 100644 index 0000000000..81afea783d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go new file mode 100644 index 0000000000..4d91913229 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -0,0 +1,404 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tlsconfigstore offloads operations to S2Av2. +package tlsconfigstore + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2/certverifier" + "github.com/google/s2a-go/internal/v2/remotesigner" + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + // HTTP/2 + h2 = "h2" +) + +// GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. +func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { + authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) + + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for client TLS config.") + } + // Send request to S2Av2 for config. + if err := s2AStream.Send(&s2av2pb.SessionReq{ + LocalIdentity: localIdentity, + AuthenticationMechanisms: authMechanisms, + ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ + GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ + ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_CLIENT, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for client TLS config") + return nil, err + } + + // Get the response containing config from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive client TLS config response from S2Av2.") + return nil, err + } + + // TODO(rmehta19): Add unit test for this if statement. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + // Extract TLS configiguration from SessionResp. + tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() + + var cert tls.Certificate + for i, v := range tlsConfig.CertificateChain { + // Populate Certificates field. + block, _ := pem.Decode([]byte(v)) + if block == nil { + return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") + } + x509Cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + cert.Certificate = append(cert.Certificate, x509Cert.Raw) + if i == 0 { + cert.Leaf = x509Cert + } + } + + if len(tlsConfig.CertificateChain) > 0 { + cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) + if cert.PrivateKey == nil { + return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") + } + } + + minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig) + if err != nil { + return nil, err + } + + // Create mTLS credentials for client. + config := &tls.Config{ + VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy), + ServerName: serverHostname, + InsecureSkipVerify: true, // NOLINT + ClientSessionCache: nil, + SessionTicketsDisabled: true, + MinVersion: minVersion, + MaxVersion: maxVersion, + NextProtos: []string{h2}, + } + if len(tlsConfig.CertificateChain) > 0 { + config.Certificates = []tls.Certificate{cert} + } + return config, nil +} + +// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { + return &tls.Config{ + GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), + }, nil +} + +// ClientConfig builds a TLS config for a server to establish a secure +// connection with a client, based on SNI communicated during ClientHello. +// Ensures that server presents the correct certificate to establish a TLS +// connection. +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { + return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { + tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) + if err != nil { + return nil, err + } + + var cert tls.Certificate + for i, v := range tlsConfig.CertificateChain { + // Populate Certificates field. + block, _ := pem.Decode([]byte(v)) + if block == nil { + return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") + } + x509Cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + cert.Certificate = append(cert.Certificate, x509Cert.Raw) + if i == 0 { + cert.Leaf = x509Cert + } + } + + cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) + if cert.PrivateKey == nil { + return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") + } + + minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig) + if err != nil { + return nil, err + } + + clientAuth := getTLSClientAuthType(tlsConfig) + + var cipherSuites []uint16 + cipherSuites = getCipherSuites(tlsConfig.Ciphersuites) + + // Create mTLS credentials for server. + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream), + ClientAuth: clientAuth, + CipherSuites: cipherSuites, + SessionTicketsDisabled: true, + MinVersion: minVersion, + MaxVersion: maxVersion, + NextProtos: []string{h2}, + }, nil + } +} + +func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 { + var tlsGoCipherSuites []uint16 + for _, v := range tlsConfigCipherSuites { + s := getTLSCipherSuite(v) + if s != 0xffff { + tlsGoCipherSuites = append(tlsGoCipherSuites, s) + } + } + return tlsGoCipherSuites +} + +func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { + switch tlsCipherSuite { + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: + return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: + return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256: + return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384: + return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: + return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + default: + return 0xffff + } +} + +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { + authMechanisms := getAuthMechanisms(tokenManager, localIdentities) + var locID *commonpbv1.Identity + if localIdentities != nil { + locID = localIdentities[0] + } + + if err := s2AStream.Send(&s2av2pb.SessionReq{ + LocalIdentity: locID, + AuthenticationMechanisms: authMechanisms, + ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ + GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ + ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER, + Sni: sni, + }, + }, + }); err != nil { + return nil, err + } + + resp, err := s2AStream.Recv() + if err != nil { + return nil, err + } + + // TODO(rmehta19): Add unit test for this if statement. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil +} + +func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType { + var clientAuth tls.ClientAuthType + switch x := tlsConfig.RequestClientCertificate; x { + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE: + clientAuth = tls.NoClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + clientAuth = tls.RequestClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: + // This case actually maps to tls.VerifyClientCertIfGiven. However this + // mapping triggers normal verification, followed by custom verification, + // specified in VerifyPeerCertificate. To bypass normal verification, and + // only do custom verification we set clientAuth to RequireAnyClientCert or + // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full + // discussion. + clientAuth = tls.RequireAnyClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + clientAuth = tls.RequireAnyClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: + // This case actually maps to tls.RequireAndVerifyClientCert. However this + // mapping triggers normal verification, followed by custom verification, + // specified in VerifyPeerCertificate. To bypass normal verification, and + // only do custom verification we set clientAuth to RequireAnyClientCert or + // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full + // discussion. + clientAuth = tls.RequireAnyClientCert + default: + clientAuth = tls.RequireAnyClientCert + } + return clientAuth +} + +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { + if tokenManager == nil { + return nil + } + if len(localIdentities) == 0 { + token, err := tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("Unable to get token for empty local identity: %v", err) + return nil + } + return []*s2av2pb.AuthenticationMechanism{ + { + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + var authMechanisms []*s2av2pb.AuthenticationMechanism + for _, localIdentity := range localIdentities { + if localIdentity == nil { + token, err := tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err) + continue + } + authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }) + } else { + token, err := tokenManager.Token(localIdentity) + if err != nil { + grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err) + continue + } + authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }) + } + } + return authMechanisms +} + +// TODO(rmehta19): refactor switch statements into a helper function. +func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) { + // Map S2Av2 TLSVersion to consts defined in tls package. + var minVersion uint16 + var maxVersion uint16 + switch x := tlsConfig.MinTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + minVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + minVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + minVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + minVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) + } + + switch x := tlsConfig.MaxTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + maxVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + maxVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + maxVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + maxVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) + } + if minVersion > maxVersion { + return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") + } + return minVersion, maxVersion, nil +} + +func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) { + // Map S2Av2 TLSVersion to consts defined in tls package. + var minVersion uint16 + var maxVersion uint16 + switch x := tlsConfig.MinTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + minVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + minVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + minVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + minVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) + } + + switch x := tlsConfig.MaxTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + maxVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + maxVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + maxVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + maxVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) + } + if minVersion > maxVersion { + return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") + } + return minVersion, maxVersion, nil +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/s2a.go b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a.go new file mode 100644 index 0000000000..1c1349de4a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a.go @@ -0,0 +1,412 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package s2a provides the S2A transport credentials used by a gRPC +// application. +package s2a + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/internal/handshaker" + "github.com/google/s2a-go/internal/handshaker/service" + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + s2aSecurityProtocol = "tls" + // defaultTimeout specifies the default server handshake timeout. + defaultTimeout = 30.0 * time.Second +) + +// s2aTransportCreds are the transport credentials required for establishing +// a secure connection using the S2A. They implement the +// credentials.TransportCredentials interface. +type s2aTransportCreds struct { + info *credentials.ProtocolInfo + minTLSVersion commonpb.TLSVersion + maxTLSVersion commonpb.TLSVersion + // tlsCiphersuites contains the ciphersuites used in the S2A connection. + // Note that these are currently unconfigurable. + tlsCiphersuites []commonpb.Ciphersuite + // localIdentity should only be used by the client. + localIdentity *commonpb.Identity + // localIdentities should only be used by the server. + localIdentities []*commonpb.Identity + // targetIdentities should only be used by the client. + targetIdentities []*commonpb.Identity + isClient bool + s2aAddr string + ensureProcessSessionTickets *sync.WaitGroup +} + +// NewClientCreds returns a client-side transport credentials object that uses +// the S2A to establish a secure connection with a server. +func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) { + if opts == nil { + return nil, errors.New("nil client options") + } + var targetIdentities []*commonpb.Identity + for _, targetIdentity := range opts.TargetIdentities { + protoTargetIdentity, err := toProtoIdentity(targetIdentity) + if err != nil { + return nil, err + } + targetIdentities = append(targetIdentities, protoTargetIdentity) + } + localIdentity, err := toProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + if opts.EnableLegacyMode { + return &s2aTransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + minTLSVersion: commonpb.TLSVersion_TLS1_3, + maxTLSVersion: commonpb.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpb.Ciphersuite{ + commonpb.Ciphersuite_AES_128_GCM_SHA256, + commonpb.Ciphersuite_AES_256_GCM_SHA384, + commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + }, + localIdentity: localIdentity, + targetIdentities: targetIdentities, + isClient: true, + s2aAddr: opts.S2AAddress, + ensureProcessSessionTickets: opts.EnsureProcessSessionTickets, + }, nil + } + verificationMode := getVerificationMode(opts.VerificationMode) + var fallbackFunc fallback.ClientHandshake + if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { + fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc + } + return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) +} + +// NewServerCreds returns a server-side transport credentials object that uses +// the S2A to establish a secure connection with a client. +func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) { + if opts == nil { + return nil, errors.New("nil server options") + } + var localIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + localIdentities = append(localIdentities, protoLocalIdentity) + } + if opts.EnableLegacyMode { + return &s2aTransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + minTLSVersion: commonpb.TLSVersion_TLS1_3, + maxTLSVersion: commonpb.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpb.Ciphersuite{ + commonpb.Ciphersuite_AES_128_GCM_SHA256, + commonpb.Ciphersuite_AES_256_GCM_SHA384, + commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + }, + localIdentities: localIdentities, + isClient: false, + s2aAddr: opts.S2AAddress, + }, nil + } + verificationMode := getVerificationMode(opts.VerificationMode) + return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) +} + +// ClientHandshake initiates a client-side TLS handshake using the S2A. +func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if !c.isClient { + return nil, nil, errors.New("client handshake called using server transport credentials") + } + + // Connect to the S2A. + hsConn, err := service.Dial(c.s2aAddr) + if err != nil { + grpclog.Infof("Failed to connect to S2A: %v", err) + return nil, nil, err + } + + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + opts := &handshaker.ClientHandshakerOptions{ + MinTLSVersion: c.minTLSVersion, + MaxTLSVersion: c.maxTLSVersion, + TLSCiphersuites: c.tlsCiphersuites, + TargetIdentities: c.targetIdentities, + LocalIdentity: c.localIdentity, + TargetName: serverAuthority, + EnsureProcessSessionTickets: c.ensureProcessSessionTickets, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) + if err != nil { + grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err) + return nil, nil, err + } + defer func() { + if err != nil { + if closeErr := chs.Close(); closeErr != nil { + grpclog.Infof("Close failed unexpectedly: %v", err) + err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) + } + } + }() + + secConn, authInfo, err := chs.ClientHandshake(context.Background()) + if err != nil { + grpclog.Infof("Handshake failed: %v", err) + return nil, nil, err + } + return secConn, authInfo, nil +} + +// ServerHandshake initiates a server-side TLS handshake using the S2A. +func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if c.isClient { + return nil, nil, errors.New("server handshake called using client transport credentials") + } + + // Connect to the S2A. + hsConn, err := service.Dial(c.s2aAddr) + if err != nil { + grpclog.Infof("Failed to connect to S2A: %v", err) + return nil, nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + + opts := &handshaker.ServerHandshakerOptions{ + MinTLSVersion: c.minTLSVersion, + MaxTLSVersion: c.maxTLSVersion, + TLSCiphersuites: c.tlsCiphersuites, + LocalIdentities: c.localIdentities, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) + if err != nil { + grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err) + return nil, nil, err + } + defer func() { + if err != nil { + if closeErr := shs.Close(); closeErr != nil { + grpclog.Infof("Close failed unexpectedly: %v", err) + err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) + } + } + }() + + secConn, authInfo, err := shs.ServerHandshake(context.Background()) + if err != nil { + grpclog.Infof("Handshake failed: %v", err) + return nil, nil, err + } + return secConn, authInfo, nil +} + +func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { + return *c.info +} + +func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { + info := *c.info + var localIdentity *commonpb.Identity + if c.localIdentity != nil { + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + } + var localIdentities []*commonpb.Identity + if c.localIdentities != nil { + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + for i, localIdentity := range c.localIdentities { + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + } + } + var targetIdentities []*commonpb.Identity + if c.targetIdentities != nil { + targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + for i, targetIdentity := range c.targetIdentities { + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + } + } + return &s2aTransportCreds{ + info: &info, + minTLSVersion: c.minTLSVersion, + maxTLSVersion: c.maxTLSVersion, + tlsCiphersuites: c.tlsCiphersuites, + localIdentity: localIdentity, + localIdentities: localIdentities, + targetIdentities: targetIdentities, + isClient: c.isClient, + s2aAddr: c.s2aAddr, + } +} + +func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error { + c.info.ServerName = serverNameOverride + return nil +} + +// TLSClientConfigOptions specifies parameters for creating client TLS config. +type TLSClientConfigOptions struct { + // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate. + // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{ + // ServerName: "example.com", + // }) + ServerName string +} + +// TLSClientConfigFactory defines the interface for a client TLS config factory. +type TLSClientConfigFactory interface { + Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) +} + +// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory. +func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) { + if opts == nil { + return nil, fmt.Errorf("opts must be non-nil") + } + if opts.EnableLegacyMode { + return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2") + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + // The only possible error is: access token not set in the environment, + // which is okay in environments other than serverless. + grpclog.Infof("Access token manager not initialized: %v", err) + return &s2aTLSClientConfigFactory{ + s2av2Address: opts.S2AAddress, + tokenManager: nil, + verificationMode: getVerificationMode(opts.VerificationMode), + serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + }, nil + } + return &s2aTLSClientConfigFactory{ + s2av2Address: opts.S2AAddress, + tokenManager: tokenManager, + verificationMode: getVerificationMode(opts.VerificationMode), + serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + }, nil +} + +type s2aTLSClientConfigFactory struct { + s2av2Address string + tokenManager tokenmanager.AccessTokenManager + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode + serverAuthorizationPolicy []byte +} + +func (f *s2aTLSClientConfigFactory) Build( + ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) { + serverName := "" + if opts != nil && opts.ServerName != "" { + serverName = opts.ServerName + } + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) +} + +func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { + switch verificationMode { + case ConnectToGoogle: + return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE + case Spiffe: + return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + default: + return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED + } +} + +// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A. +// Example use with http.RoundTripper: +// +// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, // required +// }) +// transport := http.DefaultTransport +// transport.DialTLSContext = dialTLSContext +func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) { + + return func(ctx context.Context, network, addr string) (net.Conn, error) { + + fallback := func(err error) (net.Conn, error) { + if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil && + opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" { + fbDialer := opts.FallbackOpts.FallbackDialer + grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr) + fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr) + if fbErr != nil { + return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err) + } + return fbConn, nil + } + return nil, err + } + + factory, err := NewTLSClientConfigFactory(opts) + if err != nil { + grpclog.Infof("error creating S2A client config factory: %v", err) + return fallback(err) + } + + serverName, _, err := net.SplitHostPort(addr) + if err != nil { + serverName = addr + } + timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) + defer cancel() + s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ + ServerName: serverName, + }) + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return fallback(err) + } + + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } + c, err := s2aDialer.DialContext(ctx, network, addr) + if err != nil { + grpclog.Infof("error dialing with S2A to %s: %v", addr, err) + return fallback(err) + } + grpclog.Infof("success dialing MTLS to %s with S2A", addr) + return c, nil + } +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_options.go b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_options.go new file mode 100644 index 0000000000..94feafb9cf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_options.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package s2a + +import ( + "context" + "crypto/tls" + "errors" + "sync" + + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/stream" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" +) + +// Identity is the interface for S2A identities. +type Identity interface { + // Name returns the name of the identity. + Name() string +} + +type spiffeID struct { + spiffeID string +} + +func (s *spiffeID) Name() string { return s.spiffeID } + +// NewSpiffeID creates a SPIFFE ID from id. +func NewSpiffeID(id string) Identity { + return &spiffeID{spiffeID: id} +} + +type hostname struct { + hostname string +} + +func (h *hostname) Name() string { return h.hostname } + +// NewHostname creates a hostname from name. +func NewHostname(name string) Identity { + return &hostname{hostname: name} +} + +type uid struct { + uid string +} + +func (h *uid) Name() string { return h.uid } + +// NewUID creates a UID from name. +func NewUID(name string) Identity { + return &uid{uid: name} +} + +// VerificationModeType specifies the mode that S2A must use to verify the peer +// certificate chain. +type VerificationModeType int + +// Three types of verification modes. +const ( + Unspecified = iota + ConnectToGoogle + Spiffe +) + +// ClientOptions contains the client-side options used to establish a secure +// channel using the S2A handshaker service. +type ClientOptions struct { + // TargetIdentities contains a list of allowed server identities. One of the + // target identities should match the peer identity in the handshake + // result; otherwise, the handshake fails. + TargetIdentities []Identity + // LocalIdentity is the local identity of the client application. If none is + // provided, then the S2A will choose the default identity, if one exists. + LocalIdentity Identity + // S2AAddress is the address of the S2A. + S2AAddress string + // EnsureProcessSessionTickets waits for all session tickets to be sent to + // S2A before a process completes. + // + // This functionality is crucial for processes that complete very soon after + // using S2A to establish a TLS connection, but it can be ignored for longer + // lived processes. + // + // Usage example: + // func main() { + // var ensureProcessSessionTickets sync.WaitGroup + // clientOpts := &s2a.ClientOptions{ + // EnsureProcessSessionTickets: &ensureProcessSessionTickets, + // // Set other members. + // } + // creds, _ := s2a.NewClientCreds(clientOpts) + // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) + // defer conn.Close() + // + // // Make RPC call. + // + // // The process terminates right after the RPC call ends. + // // ensureProcessSessionTickets can be used to ensure resumption + // // tickets are fully processed. If the process is long-lived, using + // // ensureProcessSessionTickets is not necessary. + // ensureProcessSessionTickets.Wait() + // } + EnsureProcessSessionTickets *sync.WaitGroup + // If true, enables the use of legacy S2Av1. + EnableLegacyMode bool + // VerificationMode specifies the mode that S2A must use to verify the + // peer certificate chain. + VerificationMode VerificationModeType + + // Optional fallback after dialing with S2A fails. + FallbackOpts *FallbackOptions + + // Generates an S2AStream interface for talking to the S2A server. + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + + // Serialized user specified policy for server authorization. + serverAuthorizationPolicy []byte +} + +// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A. +type FallbackOptions struct { + // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds(). + // It will be called by ClientHandshake function, after handshake with S2A fails. + // s2a.NewClientCreds() ignores the other FallbackDialer field. + FallbackClientHandshakeFunc fallback.ClientHandshake + + // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc(). + // It passes in a custom fallback dialer and server address to use after dialing with S2A fails. + // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field. + FallbackDialer *FallbackDialer +} + +// FallbackDialer contains a fallback tls.Dialer and a server address to connect to. +type FallbackDialer struct { + // Dialer specifies a fallback tls.Dialer. + Dialer *tls.Dialer + // ServerAddr is used by Dialer to establish fallback connection. + ServerAddr string +} + +// DefaultClientOptions returns the default client options. +func DefaultClientOptions(s2aAddress string) *ClientOptions { + return &ClientOptions{ + S2AAddress: s2aAddress, + VerificationMode: ConnectToGoogle, + } +} + +// ServerOptions contains the server-side options used to establish a secure +// channel using the S2A handshaker service. +type ServerOptions struct { + // LocalIdentities is the list of local identities that may be assumed by + // the server. If no local identity is specified, then the S2A chooses a + // default local identity, if one exists. + LocalIdentities []Identity + // S2AAddress is the address of the S2A. + S2AAddress string + // If true, enables the use of legacy S2Av1. + EnableLegacyMode bool + // VerificationMode specifies the mode that S2A must use to verify the + // peer certificate chain. + VerificationMode VerificationModeType + + // Generates an S2AStream interface for talking to the S2A server. + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) +} + +// DefaultServerOptions returns the default server options. +func DefaultServerOptions(s2aAddress string) *ServerOptions { + return &ServerOptions{ + S2AAddress: s2aAddress, + VerificationMode: ConnectToGoogle, + } +} + +func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil + default: + return nil, errors.New("unrecognized identity type") + } +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_utils.go b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_utils.go new file mode 100644 index 0000000000..d649cc4614 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/s2a_utils.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package s2a + +import ( + "context" + "errors" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" +) + +// AuthInfo exposes security information from the S2A to the application. +type AuthInfo interface { + // AuthType returns the authentication type. + AuthType() string + // ApplicationProtocol returns the application protocol, e.g. "grpc". + ApplicationProtocol() string + // TLSVersion returns the TLS version negotiated during the handshake. + TLSVersion() commonpb.TLSVersion + // Ciphersuite returns the ciphersuite negotiated during the handshake. + Ciphersuite() commonpb.Ciphersuite + // PeerIdentity returns the authenticated identity of the peer. + PeerIdentity() *commonpb.Identity + // LocalIdentity returns the local identity of the application used during + // session setup. + LocalIdentity() *commonpb.Identity + // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in + // the S2A handshake. + PeerCertFingerprint() []byte + // LocalCertFingerprint returns the SHA256 hash of the local certificate used + // in the S2A handshake. + LocalCertFingerprint() []byte + // IsHandshakeResumed returns true if a cached session was used to resume + // the handshake. + IsHandshakeResumed() bool + // SecurityLevel returns the security level of the connection. + SecurityLevel() credentials.SecurityLevel +} + +// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given +// peer, if it exists. This API should be used by gRPC clients after +// obtaining a peer object using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + s2aAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no S2AAuthInfo found in Peer") + } + return s2aAuthInfo, nil +} + +// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given +// context, if it exists. This API should be used by gRPC server RPC handlers +// to get information about the peer. On the client-side, use the grpc.Peer() +// CallOption and the AuthInfoFromPeer function. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/terraform/providers/google/vendor/github.com/google/s2a-go/stream/s2a_stream.go new file mode 100644 index 0000000000..584bf32b1c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/stream/s2a_stream.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stream provides an interface for bidirectional streaming to the S2A server. +package stream + +import ( + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream. +type S2AStream interface { + // Send sends the message to the S2A server. + Send(*s2av2pb.SessionReq) error + // Recv receives the message from the S2A server. + Recv() (*s2av2pb.SessionResp, error) + // Closes the channel to the S2A server. + CloseSend() error +} diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/client_cert.pem new file mode 100644 index 0000000000..493a5a2648 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/client_key.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/client_key.pem new file mode 100644 index 0000000000..55a7f10c74 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/server_cert.pem new file mode 100644 index 0000000000..0f98322c72 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/server_key.pem b/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/server_key.pem new file mode 100644 index 0000000000..81afea783d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/google/s2a-go/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 10295639c5..91d60a809f 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.7.1" + "v2": "2.11.0" } diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 41a7ca94d4..e17b196f6c 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,50 @@ # Changelog +## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) + + +### Features + +* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) + + +### Bug Fixes + +* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) + +## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) + + +### Features + +* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) + +## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) + + +### Bug Fixes + +* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) + +## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) + + +### Features + +* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) + + +### Documentation + +* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) + +## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) + + +### Features + +* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) + ## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index ed862c8b39..d785a065ca 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -29,6 +29,10 @@ // Package apierror implements a wrapper error for parsing error details from // API calls. Both HTTP & gRPC status errors are supported. +// +// For examples of how to use [APIError] with client libraries please reference +// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) +// in the client library documentation. package apierror import ( @@ -345,3 +349,13 @@ func parseHTTPDetails(gae *googleapi.Error) ErrDetails { return parseDetails(details) } + +// HTTPCode returns the underlying HTTP response status code. This method returns +// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To +// check gRPC error codes use [google.golang.org/grpc/status.Code]. +func (a *APIError) HTTPCode() int { + if a.httpErr == nil { + return -1 + } + return a.httpErr.Code +} diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/call_option.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/call_option.go index e092005563..c52e03f643 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -218,6 +218,14 @@ func (p pathOpt) Resolve(s *CallSettings) { s.Path = p.p } +type timeoutOpt struct { + t time.Duration +} + +func (t timeoutOpt) Resolve(s *CallSettings) { + s.timeout = t.t +} + // WithPath applies a Path override to the HTTP-based APICall. // // This is for internal use only. @@ -230,6 +238,15 @@ func WithGRPCOptions(opt ...grpc.CallOption) CallOption { return grpcOpt(append([]grpc.CallOption(nil), opt...)) } +// WithTimeout is a convenience option for setting a context.WithTimeout on the +// singular context.Context used for **all** APICall attempts. Calculated from +// the start of the first APICall attempt. +// If the context.Context provided to Invoke already has a Deadline set, that +// will always be respected over the deadline calculated using this option. +func WithTimeout(t time.Duration) CallOption { + return &timeoutOpt{t: t} +} + // CallSettings allow fine-grained control over how calls are made. type CallSettings struct { // Retry returns a Retryer to be used to control retry logic of a method call. @@ -241,4 +258,8 @@ type CallSettings struct { // Path is an HTTP override for an APICall. Path string + + // Timeout defines the amount of time that Invoke has to complete. + // Unexported so it cannot be changed by the code in an APICall. + timeout time.Duration } diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/header.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/header.go index 139371a0bf..6488461f4d 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/header.go @@ -29,7 +29,73 @@ package gax -import "bytes" +import ( + "bytes" + "runtime" + "strings" + "unicode" +) + +var ( + // GoVersion is a header-safe representation of the current runtime + // environment's Go version. This is for GAX consumers that need to + // report the Go runtime version in API calls. + GoVersion string + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +func init() { + GoVersion = goVersion() +} + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} // XGoogHeader is for use by the Google Cloud Libraries only. // diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 936873ec4f..374dcdb115 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.7.1" +const Version = "2.11.0" diff --git a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/invoke.go b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/invoke.go index 9fcc29959b..721d1af551 100644 --- a/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/invoke.go +++ b/terraform/providers/google/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -68,6 +68,16 @@ type sleeper func(ctx context.Context, d time.Duration) error // invoke implements Invoke, taking an additional sleeper argument for testing. func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { var retryer Retryer + + // Only use the value provided via WithTimeout if the context doesn't + // already have a deadline. This is important for backwards compatibility if + // the user already set a deadline on the context given to Invoke. + if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { + c, cc := context.WithTimeout(ctx, settings.timeout) + defer cc() + ctx = c + } + for { err := call(ctx, settings) if err == nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/LICENSE b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/LICENSE new file mode 100644 index 0000000000..0f5a4e378a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/LICENSE @@ -0,0 +1,356 @@ +Copyright (c) 2022 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag/diag.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag/diag.go new file mode 100644 index 0000000000..66db1417b2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag/diag.go @@ -0,0 +1,72 @@ +package validatordiag + +import ( + "fmt" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// InvalidAttributeValueDiagnostic returns an error Diagnostic to be used when an attribute has an invalid value. +func InvalidAttributeValueDiagnostic(path path.Path, description string, value string) diag.Diagnostic { + return diag.NewAttributeErrorDiagnostic( + path, + "Invalid Attribute Value", + fmt.Sprintf("Attribute %s %s, got: %s", path, description, value), + ) +} + +// InvalidAttributeValueLengthDiagnostic returns an error Diagnostic to be used when an attribute's value has an invalid length. +func InvalidAttributeValueLengthDiagnostic(path path.Path, description string, value string) diag.Diagnostic { + return diag.NewAttributeErrorDiagnostic( + path, + "Invalid Attribute Value Length", + fmt.Sprintf("Attribute %s %s, got: %s", path, description, value), + ) +} + +// InvalidAttributeValueMatchDiagnostic returns an error Diagnostic to be used when an attribute's value has an invalid match. +func InvalidAttributeValueMatchDiagnostic(path path.Path, description string, value string) diag.Diagnostic { + return diag.NewAttributeErrorDiagnostic( + path, + "Invalid Attribute Value Match", + fmt.Sprintf("Attribute %s %s, got: %s", path, description, value), + ) +} + +// InvalidAttributeCombinationDiagnostic returns an error Diagnostic to be used when a schemavalidator of attributes is invalid. +func InvalidAttributeCombinationDiagnostic(path path.Path, description string) diag.Diagnostic { + return diag.NewAttributeErrorDiagnostic( + path, + "Invalid Attribute Combination", + capitalize(description), + ) +} + +// InvalidAttributeTypeDiagnostic returns an error Diagnostic to be used when an attribute has an invalid type. +func InvalidAttributeTypeDiagnostic(path path.Path, description string, value string) diag.Diagnostic { + return diag.NewAttributeErrorDiagnostic( + path, + "Invalid Attribute Type", + fmt.Sprintf("Attribute %s %s, got: %s", path, description, value), + ) +} + +func BugInProviderDiagnostic(summary string) diag.Diagnostic { + return diag.NewErrorDiagnostic(summary, + "This is a bug in the provider, which should be reported in the provider's own issue tracker", + ) +} + +// capitalize will uppercase the first letter in a UTF-8 string. +func capitalize(str string) string { + if str == "" { + return "" + } + + firstRune, size := utf8.DecodeRuneInString(str) + + return string(unicode.ToUpper(firstRune)) + str[size:] +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag/doc.go new file mode 100644 index 0000000000..c31457a032 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag/doc.go @@ -0,0 +1,2 @@ +// Package validatordiag provides diagnostics helpers for validator implementations. +package validatordiag diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/also_requires.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/also_requires.go new file mode 100644 index 0000000000..6696f7a7f0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/also_requires.go @@ -0,0 +1,225 @@ +package schemavalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// This type of validator must satisfy all types. +var ( + _ validator.Bool = AlsoRequiresValidator{} + _ validator.Float64 = AlsoRequiresValidator{} + _ validator.Int64 = AlsoRequiresValidator{} + _ validator.List = AlsoRequiresValidator{} + _ validator.Map = AlsoRequiresValidator{} + _ validator.Number = AlsoRequiresValidator{} + _ validator.Object = AlsoRequiresValidator{} + _ validator.Set = AlsoRequiresValidator{} + _ validator.String = AlsoRequiresValidator{} +) + +// AlsoRequiresValidator is the underlying struct implementing AlsoRequires. +type AlsoRequiresValidator struct { + PathExpressions path.Expressions +} + +type AlsoRequiresValidatorRequest struct { + Config tfsdk.Config + ConfigValue attr.Value + Path path.Path + PathExpression path.Expression +} + +type AlsoRequiresValidatorResponse struct { + Diagnostics diag.Diagnostics +} + +func (av AlsoRequiresValidator) Description(ctx context.Context) string { + return av.MarkdownDescription(ctx) +} + +func (av AlsoRequiresValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("Ensure that if an attribute is set, also these are set: %q", av.PathExpressions) +} + +func (av AlsoRequiresValidator) Validate(ctx context.Context, req AlsoRequiresValidatorRequest, res *AlsoRequiresValidatorResponse) { + // If attribute configuration is null, there is nothing else to validate + if req.ConfigValue.IsNull() { + return + } + + expressions := req.PathExpression.MergeExpressions(av.PathExpressions...) + + for _, expression := range expressions { + matchedPaths, diags := req.Config.PathMatches(ctx, expression) + + res.Diagnostics.Append(diags...) + + // Collect all errors + if diags.HasError() { + continue + } + + for _, mp := range matchedPaths { + // If the user specifies the same attribute this validator is applied to, + // also as part of the input, skip it + if mp.Equal(req.Path) { + continue + } + + var mpVal attr.Value + diags := req.Config.GetAttribute(ctx, mp, &mpVal) + res.Diagnostics.Append(diags...) + + // Collect all errors + if diags.HasError() { + continue + } + + // Delay validation until all involved attribute have a known value + if mpVal.IsUnknown() { + return + } + + if mpVal.IsNull() { + res.Diagnostics.Append(validatordiag.InvalidAttributeCombinationDiagnostic( + req.Path, + fmt.Sprintf("Attribute %q must be specified when %q is specified", mp, req.Path), + )) + } + } + } +} + +func (av AlsoRequiresValidator) ValidateBool(ctx context.Context, req validator.BoolRequest, resp *validator.BoolResponse) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AlsoRequiresValidator) ValidateFloat64(ctx context.Context, req validator.Float64Request, resp *validator.Float64Response) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AlsoRequiresValidator) ValidateInt64(ctx context.Context, req validator.Int64Request, resp *validator.Int64Response) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AlsoRequiresValidator) ValidateList(ctx context.Context, req validator.ListRequest, resp *validator.ListResponse) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AlsoRequiresValidator) ValidateMap(ctx context.Context, req validator.MapRequest, resp *validator.MapResponse) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AlsoRequiresValidator) ValidateNumber(ctx context.Context, req validator.NumberRequest, resp *validator.NumberResponse) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AlsoRequiresValidator) ValidateObject(ctx context.Context, req validator.ObjectRequest, resp *validator.ObjectResponse) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AlsoRequiresValidator) ValidateSet(ctx context.Context, req validator.SetRequest, resp *validator.SetResponse) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AlsoRequiresValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + validateReq := AlsoRequiresValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AlsoRequiresValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/at_least_one_of.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/at_least_one_of.go new file mode 100644 index 0000000000..d98bc87950 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/at_least_one_of.go @@ -0,0 +1,221 @@ +package schemavalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// This type of validator must satisfy all types. +var ( + _ validator.Bool = AtLeastOneOfValidator{} + _ validator.Float64 = AtLeastOneOfValidator{} + _ validator.Int64 = AtLeastOneOfValidator{} + _ validator.List = AtLeastOneOfValidator{} + _ validator.Map = AtLeastOneOfValidator{} + _ validator.Number = AtLeastOneOfValidator{} + _ validator.Object = AtLeastOneOfValidator{} + _ validator.Set = AtLeastOneOfValidator{} + _ validator.String = AtLeastOneOfValidator{} +) + +// AtLeastOneOfValidator is the underlying struct implementing AtLeastOneOf. +type AtLeastOneOfValidator struct { + PathExpressions path.Expressions +} + +type AtLeastOneOfValidatorRequest struct { + Config tfsdk.Config + ConfigValue attr.Value + Path path.Path + PathExpression path.Expression +} + +type AtLeastOneOfValidatorResponse struct { + Diagnostics diag.Diagnostics +} + +func (av AtLeastOneOfValidator) Description(ctx context.Context) string { + return av.MarkdownDescription(ctx) +} + +func (av AtLeastOneOfValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("Ensure that at least one attribute from this collection is set: %s", av.PathExpressions) +} + +func (av AtLeastOneOfValidator) Validate(ctx context.Context, req AtLeastOneOfValidatorRequest, res *AtLeastOneOfValidatorResponse) { + // If attribute configuration is not null, validator already succeeded. + if !req.ConfigValue.IsNull() { + return + } + + expressions := req.PathExpression.MergeExpressions(av.PathExpressions...) + + for _, expression := range expressions { + matchedPaths, diags := req.Config.PathMatches(ctx, expression) + + res.Diagnostics.Append(diags...) + + // Collect all errors + if diags.HasError() { + continue + } + + for _, mp := range matchedPaths { + var mpVal attr.Value + diags := req.Config.GetAttribute(ctx, mp, &mpVal) + res.Diagnostics.Append(diags...) + + // Collect all errors + if diags.HasError() { + continue + } + + // Delay validation until all involved attribute have a known value + if mpVal.IsUnknown() { + return + } + + if !mpVal.IsNull() { + return + } + } + } + + res.Diagnostics.Append(validatordiag.InvalidAttributeCombinationDiagnostic( + req.Path, + fmt.Sprintf("At least one attribute out of %s must be specified", expressions), + )) +} + +func (av AtLeastOneOfValidator) ValidateBool(ctx context.Context, req validator.BoolRequest, resp *validator.BoolResponse) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AtLeastOneOfValidator) ValidateFloat64(ctx context.Context, req validator.Float64Request, resp *validator.Float64Response) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AtLeastOneOfValidator) ValidateInt64(ctx context.Context, req validator.Int64Request, resp *validator.Int64Response) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AtLeastOneOfValidator) ValidateList(ctx context.Context, req validator.ListRequest, resp *validator.ListResponse) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AtLeastOneOfValidator) ValidateMap(ctx context.Context, req validator.MapRequest, resp *validator.MapResponse) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AtLeastOneOfValidator) ValidateNumber(ctx context.Context, req validator.NumberRequest, resp *validator.NumberResponse) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AtLeastOneOfValidator) ValidateObject(ctx context.Context, req validator.ObjectRequest, resp *validator.ObjectResponse) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AtLeastOneOfValidator) ValidateSet(ctx context.Context, req validator.SetRequest, resp *validator.SetResponse) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av AtLeastOneOfValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + validateReq := AtLeastOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &AtLeastOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/conflicts_with.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/conflicts_with.go new file mode 100644 index 0000000000..dd06138660 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/conflicts_with.go @@ -0,0 +1,225 @@ +package schemavalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// This type of validator must satisfy all types. +var ( + _ validator.Bool = ConflictsWithValidator{} + _ validator.Float64 = ConflictsWithValidator{} + _ validator.Int64 = ConflictsWithValidator{} + _ validator.List = ConflictsWithValidator{} + _ validator.Map = ConflictsWithValidator{} + _ validator.Number = ConflictsWithValidator{} + _ validator.Object = ConflictsWithValidator{} + _ validator.Set = ConflictsWithValidator{} + _ validator.String = ConflictsWithValidator{} +) + +// ConflictsWithValidator is the underlying struct implementing ConflictsWith. +type ConflictsWithValidator struct { + PathExpressions path.Expressions +} + +type ConflictsWithValidatorRequest struct { + Config tfsdk.Config + ConfigValue attr.Value + Path path.Path + PathExpression path.Expression +} + +type ConflictsWithValidatorResponse struct { + Diagnostics diag.Diagnostics +} + +func (av ConflictsWithValidator) Description(ctx context.Context) string { + return av.MarkdownDescription(ctx) +} + +func (av ConflictsWithValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("Ensure that if an attribute is set, these are not set: %q", av.PathExpressions) +} + +func (av ConflictsWithValidator) Validate(ctx context.Context, req ConflictsWithValidatorRequest, res *ConflictsWithValidatorResponse) { + // If attribute configuration is null, it cannot conflict with others + if req.ConfigValue.IsNull() { + return + } + + expressions := req.PathExpression.MergeExpressions(av.PathExpressions...) + + for _, expression := range expressions { + matchedPaths, diags := req.Config.PathMatches(ctx, expression) + + res.Diagnostics.Append(diags...) + + // Collect all errors + if diags.HasError() { + continue + } + + for _, mp := range matchedPaths { + // If the user specifies the same attribute this validator is applied to, + // also as part of the input, skip it + if mp.Equal(req.Path) { + continue + } + + var mpVal attr.Value + diags := req.Config.GetAttribute(ctx, mp, &mpVal) + res.Diagnostics.Append(diags...) + + // Collect all errors + if diags.HasError() { + continue + } + + // Delay validation until all involved attribute have a known value + if mpVal.IsUnknown() { + return + } + + if !mpVal.IsNull() { + res.Diagnostics.Append(validatordiag.InvalidAttributeCombinationDiagnostic( + req.Path, + fmt.Sprintf("Attribute %q cannot be specified when %q is specified", mp, req.Path), + )) + } + } + } +} + +func (av ConflictsWithValidator) ValidateBool(ctx context.Context, req validator.BoolRequest, resp *validator.BoolResponse) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ConflictsWithValidator) ValidateFloat64(ctx context.Context, req validator.Float64Request, resp *validator.Float64Response) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ConflictsWithValidator) ValidateInt64(ctx context.Context, req validator.Int64Request, resp *validator.Int64Response) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ConflictsWithValidator) ValidateList(ctx context.Context, req validator.ListRequest, resp *validator.ListResponse) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ConflictsWithValidator) ValidateMap(ctx context.Context, req validator.MapRequest, resp *validator.MapResponse) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ConflictsWithValidator) ValidateNumber(ctx context.Context, req validator.NumberRequest, resp *validator.NumberResponse) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ConflictsWithValidator) ValidateObject(ctx context.Context, req validator.ObjectRequest, resp *validator.ObjectResponse) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ConflictsWithValidator) ValidateSet(ctx context.Context, req validator.SetRequest, resp *validator.SetResponse) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ConflictsWithValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + validateReq := ConflictsWithValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ConflictsWithValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/doc.go new file mode 100644 index 0000000000..31c085b079 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/doc.go @@ -0,0 +1,11 @@ +// Package schemavalidator provides validators to express relationships between +// multiple attributes within the schema of a resource, data source, or provider. +// For example, checking that an attribute is present when another is present, or vice-versa. +// +// These validators are implemented on a starting attribute, where +// relationships can be expressed as absolute paths to others or relative to +// the starting attribute. For multiple attribute validators that are defined +// outside the schema, which may be easier to implement in provider code +// generation situations or suit provider code preferences differently, refer +// to the datasourcevalidator, providervalidator, or resourcevalidator package. +package schemavalidator diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/exactly_one_of.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/exactly_one_of.go new file mode 100644 index 0000000000..38d1f0295f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator/exactly_one_of.go @@ -0,0 +1,245 @@ +package schemavalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// This type of validator must satisfy all types. +var ( + _ validator.Bool = ExactlyOneOfValidator{} + _ validator.Float64 = ExactlyOneOfValidator{} + _ validator.Int64 = ExactlyOneOfValidator{} + _ validator.List = ExactlyOneOfValidator{} + _ validator.Map = ExactlyOneOfValidator{} + _ validator.Number = ExactlyOneOfValidator{} + _ validator.Object = ExactlyOneOfValidator{} + _ validator.Set = ExactlyOneOfValidator{} + _ validator.String = ExactlyOneOfValidator{} +) + +// ExactlyOneOfValidator is the underlying struct implementing ExactlyOneOf. +type ExactlyOneOfValidator struct { + PathExpressions path.Expressions +} + +type ExactlyOneOfValidatorRequest struct { + Config tfsdk.Config + ConfigValue attr.Value + Path path.Path + PathExpression path.Expression +} + +type ExactlyOneOfValidatorResponse struct { + Diagnostics diag.Diagnostics +} + +func (av ExactlyOneOfValidator) Description(ctx context.Context) string { + return av.MarkdownDescription(ctx) +} + +func (av ExactlyOneOfValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("Ensure that one and only one attribute from this collection is set: %q", av.PathExpressions) +} + +func (av ExactlyOneOfValidator) Validate(ctx context.Context, req ExactlyOneOfValidatorRequest, res *ExactlyOneOfValidatorResponse) { + count := 0 + expressions := req.PathExpression.MergeExpressions(av.PathExpressions...) + + // If current attribute is unknown, delay validation + if req.ConfigValue.IsUnknown() { + return + } + + // Now that we know the current attribute is known, check whether it is + // null to determine if it should contribute to the count. Later logic + // will remove a duplicate matching path, should it be included in the + // given expressions. + if !req.ConfigValue.IsNull() { + count++ + } + + for _, expression := range expressions { + matchedPaths, diags := req.Config.PathMatches(ctx, expression) + + res.Diagnostics.Append(diags...) + + // Collect all errors + if diags.HasError() { + continue + } + + for _, mp := range matchedPaths { + // If the user specifies the same attribute this validator is applied to, + // also as part of the input, skip it + if mp.Equal(req.Path) { + continue + } + + var mpVal attr.Value + diags := req.Config.GetAttribute(ctx, mp, &mpVal) + res.Diagnostics.Append(diags...) + + // Collect all errors + if diags.HasError() { + continue + } + + // Delay validation until all involved attribute have a known value + if mpVal.IsUnknown() { + return + } + + if !mpVal.IsNull() { + count++ + } + } + } + + if count == 0 { + res.Diagnostics.Append(validatordiag.InvalidAttributeCombinationDiagnostic( + req.Path, + fmt.Sprintf("No attribute specified when one (and only one) of %s is required", expressions), + )) + } + + if count > 1 { + res.Diagnostics.Append(validatordiag.InvalidAttributeCombinationDiagnostic( + req.Path, + fmt.Sprintf("%d attributes specified when one (and only one) of %s is required", count, expressions), + )) + } +} + +func (av ExactlyOneOfValidator) ValidateBool(ctx context.Context, req validator.BoolRequest, resp *validator.BoolResponse) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ExactlyOneOfValidator) ValidateFloat64(ctx context.Context, req validator.Float64Request, resp *validator.Float64Response) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ExactlyOneOfValidator) ValidateInt64(ctx context.Context, req validator.Int64Request, resp *validator.Int64Response) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ExactlyOneOfValidator) ValidateList(ctx context.Context, req validator.ListRequest, resp *validator.ListResponse) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ExactlyOneOfValidator) ValidateMap(ctx context.Context, req validator.MapRequest, resp *validator.MapResponse) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ExactlyOneOfValidator) ValidateNumber(ctx context.Context, req validator.NumberRequest, resp *validator.NumberResponse) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ExactlyOneOfValidator) ValidateObject(ctx context.Context, req validator.ObjectRequest, resp *validator.ObjectResponse) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ExactlyOneOfValidator) ValidateSet(ctx context.Context, req validator.SetRequest, resp *validator.SetResponse) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} + +func (av ExactlyOneOfValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + validateReq := ExactlyOneOfValidatorRequest{ + Config: req.Config, + ConfigValue: req.ConfigValue, + Path: req.Path, + PathExpression: req.PathExpression, + } + validateResp := &ExactlyOneOfValidatorResponse{} + + av.Validate(ctx, validateReq, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/all.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/all.go new file mode 100644 index 0000000000..661b8e085d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/all.go @@ -0,0 +1,54 @@ +package stringvalidator + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// All returns a validator which ensures that any configured attribute value +// attribute value validates against all the given validators. +// +// Use of All is only necessary when used in conjunction with Any or AnyWithAllWarnings +// as the Validators field automatically applies a logical AND. +func All(validators ...validator.String) validator.String { + return allValidator{ + validators: validators, + } +} + +var _ validator.String = allValidator{} + +// allValidator implements the validator. +type allValidator struct { + validators []validator.String +} + +// Description describes the validation in plain text formatting. +func (v allValidator) Description(ctx context.Context) string { + var descriptions []string + + for _, subValidator := range v.validators { + descriptions = append(descriptions, subValidator.Description(ctx)) + } + + return fmt.Sprintf("Value must satisfy all of the validations: %s", strings.Join(descriptions, " + ")) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (v allValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// ValidateString performs the validation. +func (v allValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + for _, subValidator := range v.validators { + validateResp := &validator.StringResponse{} + + subValidator.ValidateString(ctx, req, validateResp) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/also_requires.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/also_requires.go new file mode 100644 index 0000000000..1204b78196 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/also_requires.go @@ -0,0 +1,23 @@ +package stringvalidator + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// AlsoRequires checks that a set of path.Expression has a non-null value, +// if the current attribute or block also has a non-null value. +// +// This implements the validation logic declaratively within the schema. +// Refer to [datasourcevalidator.RequiredTogether], +// [providervalidator.RequiredTogether], or [resourcevalidator.RequiredTogether] +// for declaring this type of validation outside the schema definition. +// +// Relative path.Expression will be resolved using the attribute or block +// being validated. +func AlsoRequires(expressions ...path.Expression) validator.String { + return schemavalidator.AlsoRequiresValidator{ + PathExpressions: expressions, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/any.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/any.go new file mode 100644 index 0000000000..189bf3a8ad --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/any.go @@ -0,0 +1,62 @@ +package stringvalidator + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// Any returns a validator which ensures that any configured attribute value +// passes at least one of the given validators. +// +// To prevent practitioner confusion should non-passing validators have +// conflicting logic, only warnings from the passing validator are returned. +// Use AnyWithAllWarnings() to return warnings from non-passing validators +// as well. +func Any(validators ...validator.String) validator.String { + return anyValidator{ + validators: validators, + } +} + +var _ validator.String = anyValidator{} + +// anyValidator implements the validator. +type anyValidator struct { + validators []validator.String +} + +// Description describes the validation in plain text formatting. +func (v anyValidator) Description(ctx context.Context) string { + var descriptions []string + + for _, subValidator := range v.validators { + descriptions = append(descriptions, subValidator.Description(ctx)) + } + + return fmt.Sprintf("Value must satisfy at least one of the validations: %s", strings.Join(descriptions, " + ")) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (v anyValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// ValidateString performs the validation. +func (v anyValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + for _, subValidator := range v.validators { + validateResp := &validator.StringResponse{} + + subValidator.ValidateString(ctx, req, validateResp) + + if !validateResp.Diagnostics.HasError() { + resp.Diagnostics = validateResp.Diagnostics + + return + } + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/any_with_all_warnings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/any_with_all_warnings.go new file mode 100644 index 0000000000..48e8c63dbb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/any_with_all_warnings.go @@ -0,0 +1,64 @@ +package stringvalidator + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// AnyWithAllWarnings returns a validator which ensures that any configured +// attribute value passes at least one of the given validators. This validator +// returns all warnings, including failed validators. +// +// Use Any() to return warnings only from the passing validator. +func AnyWithAllWarnings(validators ...validator.String) validator.String { + return anyWithAllWarningsValidator{ + validators: validators, + } +} + +var _ validator.String = anyWithAllWarningsValidator{} + +// anyWithAllWarningsValidator implements the validator. +type anyWithAllWarningsValidator struct { + validators []validator.String +} + +// Description describes the validation in plain text formatting. +func (v anyWithAllWarningsValidator) Description(ctx context.Context) string { + var descriptions []string + + for _, subValidator := range v.validators { + descriptions = append(descriptions, subValidator.Description(ctx)) + } + + return fmt.Sprintf("Value must satisfy at least one of the validations: %s", strings.Join(descriptions, " + ")) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (v anyWithAllWarningsValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// ValidateString performs the validation. +func (v anyWithAllWarningsValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + anyValid := false + + for _, subValidator := range v.validators { + validateResp := &validator.StringResponse{} + + subValidator.ValidateString(ctx, req, validateResp) + + if !validateResp.Diagnostics.HasError() { + anyValid = true + } + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } + + if anyValid { + resp.Diagnostics = resp.Diagnostics.Warnings() + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/at_least_one_of.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/at_least_one_of.go new file mode 100644 index 0000000000..e731aba4b0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/at_least_one_of.go @@ -0,0 +1,24 @@ +package stringvalidator + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// AtLeastOneOf checks that of a set of path.Expression, +// including the attribute this validator is applied to, +// at least one has a non-null value. +// +// This implements the validation logic declaratively within the tfsdk.Schema. +// Refer to [datasourcevalidator.AtLeastOneOf], +// [providervalidator.AtLeastOneOf], or [resourcevalidator.AtLeastOneOf] +// for declaring this type of validation outside the schema definition. +// +// Any relative path.Expression will be resolved using the attribute being +// validated. +func AtLeastOneOf(expressions ...path.Expression) validator.String { + return schemavalidator.AtLeastOneOfValidator{ + PathExpressions: expressions, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/conflicts_with.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/conflicts_with.go new file mode 100644 index 0000000000..2565d1c009 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/conflicts_with.go @@ -0,0 +1,24 @@ +package stringvalidator + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// ConflictsWith checks that a set of path.Expression, +// including the attribute the validator is applied to, +// do not have a value simultaneously. +// +// This implements the validation logic declaratively within the schema. +// Refer to [datasourcevalidator.Conflicting], +// [providervalidator.Conflicting], or [resourcevalidator.Conflicting] +// for declaring this type of validation outside the schema definition. +// +// Relative path.Expression will be resolved using the attribute being +// validated. +func ConflictsWith(expressions ...path.Expression) validator.String { + return schemavalidator.ConflictsWithValidator{ + PathExpressions: expressions, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/doc.go new file mode 100644 index 0000000000..1ba12e16a0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/doc.go @@ -0,0 +1,2 @@ +// Package stringvalidator provides validators for types.String attributes. +package stringvalidator diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/exactly_one_of.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/exactly_one_of.go new file mode 100644 index 0000000000..c54565b5a6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/exactly_one_of.go @@ -0,0 +1,25 @@ +package stringvalidator + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// ExactlyOneOf checks that of a set of path.Expression, +// including the attribute the validator is applied to, +// one and only one attribute has a value. +// It will also cause a validation error if none are specified. +// +// This implements the validation logic declaratively within the schema. +// Refer to [datasourcevalidator.ExactlyOneOf], +// [providervalidator.ExactlyOneOf], or [resourcevalidator.ExactlyOneOf] +// for declaring this type of validation outside the schema definition. +// +// Relative path.Expression will be resolved using the attribute being +// validated. +func ExactlyOneOf(expressions ...path.Expression) validator.String { + return schemavalidator.ExactlyOneOfValidator{ + PathExpressions: expressions, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_at_least.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_at_least.go new file mode 100644 index 0000000000..dfd07ce6ee --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_at_least.go @@ -0,0 +1,62 @@ +package stringvalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" +) + +var _ validator.String = lengthAtLeastValidator{} + +// stringLenAtLeastValidator validates that a string Attribute's length is at least a certain value. +type lengthAtLeastValidator struct { + minLength int +} + +// Description describes the validation in plain text formatting. +func (validator lengthAtLeastValidator) Description(_ context.Context) string { + return fmt.Sprintf("string length must be at least %d", validator.minLength) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (validator lengthAtLeastValidator) MarkdownDescription(ctx context.Context) string { + return validator.Description(ctx) +} + +// Validate performs the validation. +func (v lengthAtLeastValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + if l := len(value); l < v.minLength { + response.Diagnostics.Append(validatordiag.InvalidAttributeValueLengthDiagnostic( + request.Path, + v.Description(ctx), + fmt.Sprintf("%d", l), + )) + + return + } +} + +// LengthAtLeast returns an validator which ensures that any configured +// attribute value is of single-byte character length greater than or equal +// to the given minimum. Null (unconfigured) and unknown (known after apply) +// values are skipped. +// +// Use UTF8LengthAtLeast for checking multiple-byte characters. +func LengthAtLeast(minLength int) validator.String { + if minLength < 0 { + return nil + } + + return lengthAtLeastValidator{ + minLength: minLength, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_at_most.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_at_most.go new file mode 100644 index 0000000000..b7c4415c1a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_at_most.go @@ -0,0 +1,61 @@ +package stringvalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +var _ validator.String = lengthAtMostValidator{} + +// lengthAtMostValidator validates that a string Attribute's length is at most a certain value. +type lengthAtMostValidator struct { + maxLength int +} + +// Description describes the validation in plain text formatting. +func (validator lengthAtMostValidator) Description(_ context.Context) string { + return fmt.Sprintf("string length must be at most %d", validator.maxLength) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (validator lengthAtMostValidator) MarkdownDescription(ctx context.Context) string { + return validator.Description(ctx) +} + +// Validate performs the validation. +func (v lengthAtMostValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + if l := len(value); l > v.maxLength { + response.Diagnostics.Append(validatordiag.InvalidAttributeValueLengthDiagnostic( + request.Path, + v.Description(ctx), + fmt.Sprintf("%d", l), + )) + + return + } +} + +// LengthAtMost returns an validator which ensures that any configured +// attribute value is of single-byte character length less than or equal +// to the given maximum. Null (unconfigured) and unknown (known after apply) +// values are skipped. +// +// Use UTF8LengthAtMost for checking multiple-byte characters. +func LengthAtMost(maxLength int) validator.String { + if maxLength < 0 { + return nil + } + + return lengthAtMostValidator{ + maxLength: maxLength, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_between.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_between.go new file mode 100644 index 0000000000..bdb5701dc5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/length_between.go @@ -0,0 +1,62 @@ +package stringvalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +var _ validator.String = lengthBetweenValidator{} + +// stringLenBetweenValidator validates that a string Attribute's length is in a range. +type lengthBetweenValidator struct { + minLength, maxLength int +} + +// Description describes the validation in plain text formatting. +func (validator lengthBetweenValidator) Description(_ context.Context) string { + return fmt.Sprintf("string length must be between %d and %d", validator.minLength, validator.maxLength) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (validator lengthBetweenValidator) MarkdownDescription(ctx context.Context) string { + return validator.Description(ctx) +} + +// Validate performs the validation. +func (v lengthBetweenValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + if l := len(value); l < v.minLength || l > v.maxLength { + response.Diagnostics.Append(validatordiag.InvalidAttributeValueLengthDiagnostic( + request.Path, + v.Description(ctx), + fmt.Sprintf("%d", l), + )) + + return + } +} + +// LengthBetween returns an validator which ensures that any configured +// attribute value is of single-byte character length greater than the given +// minimum and less than the given maximum. Null (unconfigured) and unknown +// (known after apply) values are skipped. +// +// Use UTF8LengthBetween for checking multiple-byte characters. +func LengthBetween(minLength, maxLength int) validator.String { + if minLength < 0 || maxLength < 0 || minLength > maxLength { + return nil + } + + return lengthBetweenValidator{ + minLength: minLength, + maxLength: maxLength, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/none_of.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/none_of.go new file mode 100644 index 0000000000..bdd547c178 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/none_of.go @@ -0,0 +1,62 @@ +package stringvalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" +) + +var _ validator.String = noneOfValidator{} + +// noneOfValidator validates that the value does not match one of the values. +type noneOfValidator struct { + values []types.String +} + +func (v noneOfValidator) Description(ctx context.Context) string { + return v.MarkdownDescription(ctx) +} + +func (v noneOfValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("value must be none of: %q", v.values) +} + +func (v noneOfValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue + + for _, otherValue := range v.values { + if !value.Equal(otherValue) { + continue + } + + response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic( + request.Path, + v.Description(ctx), + value.String(), + )) + + break + } +} + +// NoneOf checks that the String held in the attribute +// is none of the given `values`. +func NoneOf(values ...string) validator.String { + frameworkValues := make([]types.String, 0, len(values)) + + for _, value := range values { + frameworkValues = append(frameworkValues, types.StringValue(value)) + } + + return noneOfValidator{ + values: frameworkValues, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/none_of_case_insensitive.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/none_of_case_insensitive.go new file mode 100644 index 0000000000..461944da61 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/none_of_case_insensitive.go @@ -0,0 +1,61 @@ +package stringvalidator + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" +) + +var _ validator.String = noneOfCaseInsensitiveValidator{} + +// noneOfCaseInsensitiveValidator validates that the value matches one of expected values. +type noneOfCaseInsensitiveValidator struct { + values []types.String +} + +func (v noneOfCaseInsensitiveValidator) Description(ctx context.Context) string { + return v.MarkdownDescription(ctx) +} + +func (v noneOfCaseInsensitiveValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("value must be none of: %q", v.values) +} + +func (v noneOfCaseInsensitiveValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue + + for _, otherValue := range v.values { + if strings.EqualFold(value.ValueString(), otherValue.ValueString()) { + response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic( + request.Path, + v.Description(ctx), + value.String(), + )) + + return + } + } +} + +// NoneOfCaseInsensitive checks that the String held in the attribute +// is none of the given `values`. +func NoneOfCaseInsensitive(values ...string) validator.String { + frameworkValues := make([]types.String, 0, len(values)) + + for _, value := range values { + frameworkValues = append(frameworkValues, types.StringValue(value)) + } + + return noneOfCaseInsensitiveValidator{ + values: frameworkValues, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/one_of.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/one_of.go new file mode 100644 index 0000000000..3eef1082c5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/one_of.go @@ -0,0 +1,60 @@ +package stringvalidator + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" +) + +var _ validator.String = oneOfValidator{} + +// oneOfValidator validates that the value matches one of expected values. +type oneOfValidator struct { + values []types.String +} + +func (v oneOfValidator) Description(ctx context.Context) string { + return v.MarkdownDescription(ctx) +} + +func (v oneOfValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("value must be one of: %q", v.values) +} + +func (v oneOfValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue + + for _, otherValue := range v.values { + if value.Equal(otherValue) { + return + } + } + + response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic( + request.Path, + v.Description(ctx), + value.String(), + )) +} + +// OneOf checks that the String held in the attribute +// is none of the given `values`. +func OneOf(values ...string) validator.String { + frameworkValues := make([]types.String, 0, len(values)) + + for _, value := range values { + frameworkValues = append(frameworkValues, types.StringValue(value)) + } + + return oneOfValidator{ + values: frameworkValues, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/one_of_case_insensitive.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/one_of_case_insensitive.go new file mode 100644 index 0000000000..567e5dd3d7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/one_of_case_insensitive.go @@ -0,0 +1,61 @@ +package stringvalidator + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" +) + +var _ validator.String = oneOfCaseInsensitiveValidator{} + +// oneOfCaseInsensitiveValidator validates that the value matches one of expected values. +type oneOfCaseInsensitiveValidator struct { + values []types.String +} + +func (v oneOfCaseInsensitiveValidator) Description(ctx context.Context) string { + return v.MarkdownDescription(ctx) +} + +func (v oneOfCaseInsensitiveValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("value must be one of: %q", v.values) +} + +func (v oneOfCaseInsensitiveValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue + + for _, otherValue := range v.values { + if strings.EqualFold(value.ValueString(), otherValue.ValueString()) { + return + } + } + + response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic( + request.Path, + v.Description(ctx), + value.String(), + )) +} + +// OneOfCaseInsensitive checks that the String held in the attribute +// is none of the given `values`. +func OneOfCaseInsensitive(values ...string) validator.String { + frameworkValues := make([]types.String, 0, len(values)) + + for _, value := range values { + frameworkValues = append(frameworkValues, types.StringValue(value)) + } + + return oneOfCaseInsensitiveValidator{ + values: frameworkValues, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/regex_matches.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/regex_matches.go new file mode 100644 index 0000000000..5ab9803a92 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/regex_matches.go @@ -0,0 +1,64 @@ +package stringvalidator + +import ( + "context" + "fmt" + "regexp" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +var _ validator.String = regexMatchesValidator{} + +// regexMatchesValidator validates that a string Attribute's value matches the specified regular expression. +type regexMatchesValidator struct { + regexp *regexp.Regexp + message string +} + +// Description describes the validation in plain text formatting. +func (validator regexMatchesValidator) Description(_ context.Context) string { + if validator.message != "" { + return validator.message + } + return fmt.Sprintf("value must match regular expression '%s'", validator.regexp) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (validator regexMatchesValidator) MarkdownDescription(ctx context.Context) string { + return validator.Description(ctx) +} + +// Validate performs the validation. +func (v regexMatchesValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + if !v.regexp.MatchString(value) { + response.Diagnostics.Append(validatordiag.InvalidAttributeValueMatchDiagnostic( + request.Path, + v.Description(ctx), + value, + )) + } +} + +// RegexMatches returns an AttributeValidator which ensures that any configured +// attribute value: +// +// - Is a string. +// - Matches the given regular expression https://github.com/google/re2/wiki/Syntax. +// +// Null (unconfigured) and unknown (known after apply) values are skipped. +// Optionally an error message can be provided to return something friendlier +// than "value must match regular expression 'regexp'". +func RegexMatches(regexp *regexp.Regexp, message string) validator.String { + return regexMatchesValidator{ + regexp: regexp, + message: message, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_at_least.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_at_least.go new file mode 100644 index 0000000000..8cca7cd182 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_at_least.go @@ -0,0 +1,65 @@ +package stringvalidator + +import ( + "context" + "fmt" + "unicode/utf8" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" +) + +var _ validator.String = utf8LengthAtLeastValidator{} + +// utf8LengthAtLeastValidator implements the validator. +type utf8LengthAtLeastValidator struct { + minLength int +} + +// Description describes the validation in plain text formatting. +func (validator utf8LengthAtLeastValidator) Description(_ context.Context) string { + return fmt.Sprintf("UTF-8 character count must be at least %d", validator.minLength) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (validator utf8LengthAtLeastValidator) MarkdownDescription(ctx context.Context) string { + return validator.Description(ctx) +} + +// Validate performs the validation. +func (v utf8LengthAtLeastValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + count := utf8.RuneCountInString(value) + + if count < v.minLength { + response.Diagnostics.Append(validatordiag.InvalidAttributeValueLengthDiagnostic( + request.Path, + v.Description(ctx), + fmt.Sprintf("%d", count), + )) + + return + } +} + +// UTF8LengthAtLeast returns an validator which ensures that any configured +// attribute value is of UTF-8 character count greater than or equal to the +// given minimum. Null (unconfigured) and unknown (known after apply) values +// are skipped. +// +// Use LengthAtLeast for checking single-byte character counts. +func UTF8LengthAtLeast(minLength int) validator.String { + if minLength < 0 { + return nil + } + + return utf8LengthAtLeastValidator{ + minLength: minLength, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_at_most.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_at_most.go new file mode 100644 index 0000000000..60364fc7e7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_at_most.go @@ -0,0 +1,65 @@ +package stringvalidator + +import ( + "context" + "fmt" + "unicode/utf8" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" +) + +var _ validator.String = utf8LengthAtMostValidator{} + +// utf8LengthAtMostValidator implements the validator. +type utf8LengthAtMostValidator struct { + maxLength int +} + +// Description describes the validation in plain text formatting. +func (validator utf8LengthAtMostValidator) Description(_ context.Context) string { + return fmt.Sprintf("UTF-8 character count must be at most %d", validator.maxLength) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (validator utf8LengthAtMostValidator) MarkdownDescription(ctx context.Context) string { + return validator.Description(ctx) +} + +// Validate performs the validation. +func (v utf8LengthAtMostValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + count := utf8.RuneCountInString(value) + + if count > v.maxLength { + response.Diagnostics.Append(validatordiag.InvalidAttributeValueLengthDiagnostic( + request.Path, + v.Description(ctx), + fmt.Sprintf("%d", count), + )) + + return + } +} + +// UTF8LengthAtMost returns an validator which ensures that any configured +// attribute value is of UTF-8 character count less than or equal to the +// given maximum. Null (unconfigured) and unknown (known after apply) values +// are skipped. +// +// Use LengthAtMost for checking single-byte character counts. +func UTF8LengthAtMost(maxLength int) validator.String { + if maxLength < 0 { + return nil + } + + return utf8LengthAtMostValidator{ + maxLength: maxLength, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_between.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_between.go new file mode 100644 index 0000000000..adfe1a1c9b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator/utf8_length_between.go @@ -0,0 +1,67 @@ +package stringvalidator + +import ( + "context" + "fmt" + "unicode/utf8" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" +) + +var _ validator.String = utf8LengthBetweenValidator{} + +// utf8LengthBetweenValidator implements the validator. +type utf8LengthBetweenValidator struct { + maxLength int + minLength int +} + +// Description describes the validation in plain text formatting. +func (v utf8LengthBetweenValidator) Description(_ context.Context) string { + return fmt.Sprintf("UTF-8 character count must be between %d and %d", v.minLength, v.maxLength) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (v utf8LengthBetweenValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// Validate performs the validation. +func (v utf8LengthBetweenValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + + count := utf8.RuneCountInString(value) + + if count < v.minLength || count > v.maxLength { + response.Diagnostics.Append(validatordiag.InvalidAttributeValueLengthDiagnostic( + request.Path, + v.Description(ctx), + fmt.Sprintf("%d", count), + )) + + return + } +} + +// UTF8LengthBetween returns an validator which ensures that any configured +// attribute value is of UTF-8 character count greater than or equal to the +// given minimum and less than or equal to the given maximum. Null +// (unconfigured) and unknown (known after apply) values are skipped. +// +// Use LengthBetween for checking single-byte character counts. +func UTF8LengthBetween(minLength int, maxLength int) validator.String { + if minLength < 0 || maxLength < 0 || minLength > maxLength { + return nil + } + + return utf8LengthBetweenValidator{ + maxLength: maxLength, + minLength: minLength, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/attr/xattr/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/attr/xattr/doc.go new file mode 100644 index 0000000000..2405695725 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/attr/xattr/doc.go @@ -0,0 +1,3 @@ +// Package xattr contains additional interfaces for attr types. This package +// is separate from the core attr package to prevent import cycles. +package xattr diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/attr/xattr/type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/attr/xattr/type.go new file mode 100644 index 0000000000..79be3604d7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/attr/xattr/type.go @@ -0,0 +1,22 @@ +package xattr + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// TypeWithValidate extends the attr.Type interface to include a Validate +// method, used to bundle consistent validation logic with the Type. +type TypeWithValidate interface { + attr.Type + + // Validate returns any warnings or errors about the value that is + // being used to populate the Type. It is generally used to check the + // data format and ensure that it complies with the requirements of the + // Type. + Validate(context.Context, tftypes.Value, path.Path) diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/config_validator.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/config_validator.go new file mode 100644 index 0000000000..33918cbf9c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/config_validator.go @@ -0,0 +1,25 @@ +package datasource + +import "context" + +// ConfigValidator describes reusable data source configuration validation functionality. +type ConfigValidator interface { + // Description describes the validation in plain text formatting. + // + // This information may be automatically added to data source plain text + // descriptions by external tooling. + Description(context.Context) string + + // MarkdownDescription describes the validation in Markdown formatting. + // + // This information may be automatically added to data source Markdown + // descriptions by external tooling. + MarkdownDescription(context.Context) string + + // ValidateDataSource performs the validation. + // + // This method name is separate from the provider.ConfigValidator + // interface ValidateProvider method name and resource.ConfigValidator + // interface ValidateResource method name to allow generic validators. + ValidateDataSource(context.Context, ValidateConfigRequest, *ValidateConfigResponse) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/configure.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/configure.go new file mode 100644 index 0000000000..d2212e9661 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/configure.go @@ -0,0 +1,31 @@ +package datasource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +// ConfigureRequest represents a request for the provider to configure a data +// source, i.e., set provider-level data or clients. An instance of this +// request struct is supplied as an argument to the DataSource type Configure +// method. +type ConfigureRequest struct { + // ProviderData is the data set in the + // [provider.ConfigureResponse.DataSourceData] field. This data is + // provider-specifc and therefore can contain any necessary remote system + // clients, custom provider data, or anything else pertinent to the + // functionality of the DataSource. + // + // This data is only set after the ConfigureProvider RPC has been called + // by Terraform. + ProviderData any +} + +// ConfigureResponse represents a response to a ConfigureRequest. An +// instance of this response struct is supplied as an argument to the +// DataSource type Configure method. +type ConfigureResponse struct { + // Diagnostics report errors or warnings related to configuring of the + // Datasource. An empty slice indicates a successful operation with no + // warnings or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/data_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/data_source.go new file mode 100644 index 0000000000..8229c54a95 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/data_source.go @@ -0,0 +1,74 @@ +package datasource + +import ( + "context" +) + +// DataSource represents an instance of a data source type. This is the core +// interface that all data sources must implement. +// +// Data sources can optionally implement these additional concepts: +// +// - Configure: Include provider-level data or clients. +// - Validation: Schema-based or entire configuration +// via DataSourceWithConfigValidators or DataSourceWithValidateConfig. +type DataSource interface { + // Metadata should return the full name of the data source, such as + // examplecloud_thing. + Metadata(context.Context, MetadataRequest, *MetadataResponse) + + // Schema should return the schema for this data source. + Schema(context.Context, SchemaRequest, *SchemaResponse) + + // Read is called when the provider must read data source values in + // order to update state. Config values should be read from the + // ReadRequest and new state values set on the ReadResponse. + Read(context.Context, ReadRequest, *ReadResponse) +} + +// DataSourceWithConfigure is an interface type that extends DataSource to +// include a method which the framework will automatically call so provider +// developers have the opportunity to setup any necessary provider-level data +// or clients in the DataSource type. +// +// This method is intended to replace the provider.DataSourceType type +// NewDataSource method in a future release. +type DataSourceWithConfigure interface { + DataSource + + // Configure enables provider-level data or clients to be set in the + // provider-defined DataSource type. It is separately executed for each + // ReadDataSource RPC. + Configure(context.Context, ConfigureRequest, *ConfigureResponse) +} + +// DataSourceWithConfigValidators is an interface type that extends DataSource to include declarative validations. +// +// Declaring validation using this methodology simplifies implmentation of +// reusable functionality. These also include descriptions, which can be used +// for automating documentation. +// +// Validation will include ConfigValidators and ValidateConfig, if both are +// implemented, in addition to any Attribute or Type validation. +type DataSourceWithConfigValidators interface { + DataSource + + // ConfigValidators returns a list of ConfigValidators. Each ConfigValidator's Validate method will be called when validating the data source. + ConfigValidators(context.Context) []ConfigValidator +} + +// DataSourceWithValidateConfig is an interface type that extends DataSource to include imperative validation. +// +// Declaring validation using this methodology simplifies one-off +// functionality that typically applies to a single data source. Any +// documentation of this functionality must be manually added into schema +// descriptions. +// +// Validation will include ConfigValidators and ValidateConfig, if both are +// implemented, in addition to any Attribute or Type validation. +type DataSourceWithValidateConfig interface { + DataSource + + // ValidateConfig performs the validation. + ValidateConfig(context.Context, ValidateConfigRequest, *ValidateConfigResponse) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/doc.go new file mode 100644 index 0000000000..9c92917910 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/doc.go @@ -0,0 +1,16 @@ +// Package datasource contains all interfaces, request types, and response +// types for a data source implementation. +// +// In Terraform, a data source is a concept which enables provider developers +// to offer practitioners a read-only source of information, which is saved +// into the Terraform state and can be referenced by other parts of a +// configuration. Data sources are defined by a data source type/name, such as +// "examplecloud_thing", a schema representing the structure and data types of +// configuration and state, and read logic. +// +// The main starting point for implementations in this package is the +// DataSource type which represents an instance of a data source type that has +// its own configuration, read logic, and state. The DataSource implementations +// are referenced by a [provider.Provider] type DataSources method, which +// enables the data source for practitioner and testing usage. +package datasource diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/metadata.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/metadata.go new file mode 100644 index 0000000000..37aef828d6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/metadata.go @@ -0,0 +1,21 @@ +package datasource + +// MetadataRequest represents a request for the DataSource to return metadata, +// such as its type name. An instance of this request struct is supplied as an +// argument to the DataSource type Metadata method. +type MetadataRequest struct { + // ProviderTypeName is the string returned from + // [provider.MetadataResponse.TypeName], if the Provider type implements + // the Metadata method. This string should prefix the DataSource type name + // with an underscore in the response. + ProviderTypeName string +} + +// MetadataResponse represents a response to a MetadataRequest. An +// instance of this response struct is supplied as an argument to the +// DataSource type Metadata method. +type MetadataResponse struct { + // TypeName should be the full data source type, including the provider + // type prefix and an underscore. For example, examplecloud_thing. + TypeName string +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/read.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/read.go new file mode 100644 index 0000000000..30b67ce3e9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/read.go @@ -0,0 +1,37 @@ +package datasource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ReadRequest represents a request for the provider to read a data +// source, i.e., update values in state according to the real state of the +// data source. An instance of this request struct is supplied as an argument +// to the data source's Read function. +type ReadRequest struct { + // Config is the configuration the user supplied for the data source. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config tfsdk.Config + + // ProviderMeta is metadata from the provider_meta block of the module. + ProviderMeta tfsdk.Config +} + +// ReadResponse represents a response to a ReadRequest. An +// instance of this response struct is supplied as an argument to the data +// source's Read function, in which the provider should set values on the +// ReadResponse as appropriate. +type ReadResponse struct { + // State is the state of the data source following the Read operation. + // This field should be set during the resource's Read operation. + State tfsdk.State + + // Diagnostics report errors or warnings related to reading the data + // source. An empty slice indicates a successful operation with no + // warnings or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema.go new file mode 100644 index 0000000000..c9ce9a39be --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema.go @@ -0,0 +1,24 @@ +package datasource + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +// SchemaRequest represents a request for the DataSource to return its schema. +// An instance of this request struct is supplied as an argument to the +// DataSource type Schema method. +type SchemaRequest struct{} + +// SchemaResponse represents a response to a SchemaRequest. An instance of this +// response struct is supplied as an argument to the DataSource type Schema +// method. +type SchemaResponse struct { + // Schema is the schema of the data source. + Schema schema.Schema + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/attribute.go new file mode 100644 index 0000000000..b7d1e91272 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/attribute.go @@ -0,0 +1,33 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Attribute define a value field inside the Schema. Implementations in this +// package include: +// - BoolAttribute +// - Float64Attribute +// - Int64Attribute +// - ListAttribute +// - MapAttribute +// - NumberAttribute +// - ObjectAttribute +// - SetAttribute +// - StringAttribute +// +// Additionally, the NestedAttribute interface extends Attribute with nested +// attributes. Only supported in protocol version 6. Implementations in this +// package include: +// - ListNestedAttribute +// - MapNestedAttribute +// - SetNestedAttribute +// - SingleNestedAttribute +// +// In practitioner configurations, an equals sign (=) is required to set +// the value. [Configuration Reference] +// +// [Configuration Reference]: https://developer.hashicorp.com/terraform/language/syntax/configuration +type Attribute interface { + fwschema.Attribute +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/block.go new file mode 100644 index 0000000000..f6e27e6426 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/block.go @@ -0,0 +1,27 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Block defines a structural field inside a Schema. Implementations in this +// package include: +// - ListNestedBlock +// - SetNestedBlock +// - SingleNestedBlock +// +// In practitioner configurations, an equals sign (=) cannot be used to set the +// value. Blocks are instead repeated as necessary, or require the use of +// [Dynamic Block Expressions]. +// +// Prefer NestedAttribute over Block. Blocks should typically be used for +// configuration compatibility with previously existing schemas from an older +// Terraform Plugin SDK. Efforts should be made to convert from Block to +// NestedAttribute as a breaking change for practitioners. +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +// +// [Configuration Reference]: https://developer.hashicorp.com/terraform/language/syntax/configuration +type Block interface { + fwschema.Block +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/bool_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/bool_attribute.go new file mode 100644 index 0000000000..4e9f5df9a7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/bool_attribute.go @@ -0,0 +1,184 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = BoolAttribute{} + _ fwxschema.AttributeWithBoolValidators = BoolAttribute{} +) + +// BoolAttribute represents a schema attribute that is a boolean. When +// retrieving the value for this attribute, use types.Bool as the value type +// unless the CustomType field is set. +// +// Terraform configurations configure this attribute using expressions that +// return a boolean or directly via the true/false keywords. +// +// example_attribute = true +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type BoolAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.BoolType. When retrieving data, the basetypes.BoolValuable + // associated with this custom type must be used in place of types.Bool. + CustomType basetypes.BoolTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Bool +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a BoolAttribute. +func (a BoolAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// BoolValidators returns the Validators field value. +func (a BoolAttribute) BoolValidators() []validator.Bool { + return a.Validators +} + +// Equal returns true if the given Attribute is a BoolAttribute +// and all fields are equal. +func (a BoolAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(BoolAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a BoolAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a BoolAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a BoolAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.StringType or the CustomType field value if defined. +func (a BoolAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.BoolType +} + +// IsComputed returns the Computed field value. +func (a BoolAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a BoolAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a BoolAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a BoolAttribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/doc.go new file mode 100644 index 0000000000..f5542260f4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/doc.go @@ -0,0 +1,5 @@ +// Package schema contains all available schema functionality for data sources. +// Data source schemas define the structure and value types for configuration +// and state data. Schemas are implemented via the datasource.DataSource type +// Schema method. +package schema diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/float64_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/float64_attribute.go new file mode 100644 index 0000000000..cdb6e5c22d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/float64_attribute.go @@ -0,0 +1,187 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = Float64Attribute{} + _ fwxschema.AttributeWithFloat64Validators = Float64Attribute{} +) + +// Float64Attribute represents a schema attribute that is a 64-bit floating +// point number. When retrieving the value for this attribute, use +// types.Float64 as the value type unless the CustomType field is set. +// +// Use Int64Attribute for 64-bit integer attributes or NumberAttribute for +// 512-bit generic number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via a floating point value. +// +// example_attribute = 123.45 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type Float64Attribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.Float64Type. When retrieving data, the basetypes.Float64Valuable + // associated with this custom type must be used in place of types.Float64. + CustomType basetypes.Float64Typable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Float64 +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a Float64Attribute. +func (a Float64Attribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a Float64Attribute +// and all fields are equal. +func (a Float64Attribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(Float64Attribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// Float64Validators returns the Validators field value. +func (a Float64Attribute) Float64Validators() []validator.Float64 { + return a.Validators +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a Float64Attribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a Float64Attribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a Float64Attribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.Float64Type or the CustomType field value if defined. +func (a Float64Attribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.Float64Type +} + +// IsComputed returns the Computed field value. +func (a Float64Attribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a Float64Attribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a Float64Attribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a Float64Attribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/int64_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/int64_attribute.go new file mode 100644 index 0000000000..b3efd6169f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/int64_attribute.go @@ -0,0 +1,187 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = Int64Attribute{} + _ fwxschema.AttributeWithInt64Validators = Int64Attribute{} +) + +// Int64Attribute represents a schema attribute that is a 64-bit integer. +// When retrieving the value for this attribute, use types.Int64 as the value +// type unless the CustomType field is set. +// +// Use Float64Attribute for 64-bit floating point number attributes or +// NumberAttribute for 512-bit generic number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via an integer value. +// +// example_attribute = 123 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type Int64Attribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.Int64Type. When retrieving data, the basetypes.Int64Valuable + // associated with this custom type must be used in place of types.Int64. + CustomType basetypes.Int64Typable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Int64 +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a Int64Attribute. +func (a Int64Attribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a Int64Attribute +// and all fields are equal. +func (a Int64Attribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(Int64Attribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a Int64Attribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a Int64Attribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a Int64Attribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.Int64Type or the CustomType field value if defined. +func (a Int64Attribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.Int64Type +} + +// Int64Validators returns the Validators field value. +func (a Int64Attribute) Int64Validators() []validator.Int64 { + return a.Validators +} + +// IsComputed returns the Computed field value. +func (a Int64Attribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a Int64Attribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a Int64Attribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a Int64Attribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_attribute.go new file mode 100644 index 0000000000..01a323c433 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_attribute.go @@ -0,0 +1,197 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = ListAttribute{} + _ fwxschema.AttributeWithListValidators = ListAttribute{} +) + +// ListAttribute represents a schema attribute that is a list with a single +// element type. When retrieving the value for this attribute, use types.List +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use ListNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list or directly via square brace syntax. +// +// # list of strings +// example_attribute = ["first", "second"] +// +// Terraform configurations reference this attribute using expressions that +// accept a list or an element directly via square brace 0-based index syntax: +// +// # first known element +// .example_attribute[0] +type ListAttribute struct { + // ElementType is the type for all elements of the list. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ListType. When retrieving data, the basetypes.ListValuable + // associated with this custom type must be used in place of types.List. + CustomType basetypes.ListTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a list +// index or an error. +func (a ListAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a ListAttribute +// and all fields are equal. +func (a ListAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ListAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ListAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ListAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ListAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.ListType or the CustomType field value if defined. +func (a ListAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ListType{ + ElemType: a.ElementType, + } +} + +// IsComputed returns the Computed field value. +func (a ListAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a ListAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ListAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ListAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ListValidators returns the Validators field value. +func (a ListAttribute) ListValidators() []validator.List { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_nested_attribute.go new file mode 100644 index 0000000000..a45379db25 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_nested_attribute.go @@ -0,0 +1,224 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = ListNestedAttribute{} + _ fwxschema.AttributeWithListValidators = ListNestedAttribute{} +) + +// ListNestedAttribute represents an attribute that is a list of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.List +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use ListAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list of objects or directly via square and curly brace syntax. +// +// # list of objects +// example_attribute = [ +// { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a list of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_attribute[0] +// # first known object nested_attribute value +// .example_attribute[0].nested_attribute +type ListNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.ListType of types.ObjectType. When retrieving data, the + // basetypes.ListValuable associated with this custom type must be used in + // place of types.List. + CustomType basetypes.ListTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyInt, otherwise returns an error. +func (a ListNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyInt) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to ListNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a ListNestedAttribute +// and all fields are equal. +func (a ListNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ListNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ListNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ListNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ListNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a ListNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a ListNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeList +} + +// GetType returns ListType of ObjectType or CustomType. +func (a ListNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ListType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed returns the Computed field value. +func (a ListNestedAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a ListNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ListNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ListNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ListValidators returns the Validators field value. +func (a ListNestedAttribute) ListValidators() []validator.List { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_nested_block.go new file mode 100644 index 0000000000..30c5e77922 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/list_nested_block.go @@ -0,0 +1,185 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = ListNestedBlock{} + _ fwxschema.BlockWithListValidators = ListNestedBlock{} +) + +// ListNestedBlock represents a block that is a list of objects where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.List +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. +// +// Prefer ListNestedAttribute over ListNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block repeatedly using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # list of blocks with two elements +// example_block { +// nested_attribute = #... +// } +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept a list of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_block[0] +// # first known object nested_attribute value +// .example_block[0].nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type ListNestedBlock struct { + // NestedObject is the underlying object that contains nested attributes or + // blocks. This field must be set. + NestedObject NestedBlockObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.ListType of types.ObjectType. When retrieving data, the + // basetypes.ListValuable associated with this custom type must be used in + // place of types.List. + CustomType basetypes.ListTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List +} + +// ApplyTerraform5AttributePathStep returns the NestedObject field value if step +// is ElementKeyInt, otherwise returns an error. +func (b ListNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyInt) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to ListNestedBlock", step) + } + + return b.NestedObject, nil +} + +// Equal returns true if the given Block is ListNestedBlock +// and all fields are equal. +func (b ListNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(ListNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b ListNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b ListNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b ListNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (b ListNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return b.NestedObject +} + +// GetNestingMode always returns BlockNestingModeList. +func (b ListNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeList +} + +// ListValidators returns the Validators field value. +func (b ListNestedBlock) ListValidators() []validator.List { + return b.Validators +} + +// Type returns ListType of ObjectType or CustomType. +func (b ListNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + return types.ListType{ + ElemType: b.NestedObject.Type(), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/map_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/map_attribute.go new file mode 100644 index 0000000000..d1cd782d0e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/map_attribute.go @@ -0,0 +1,200 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = MapAttribute{} + _ fwxschema.AttributeWithMapValidators = MapAttribute{} +) + +// MapAttribute represents a schema attribute that is a list with a single +// element type. When retrieving the value for this attribute, use types.Map +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use MapNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list or directly via curly brace syntax. +// +// # map of strings +// example_attribute = { +// key1 = "first", +// key2 = "second", +// } +// +// Terraform configurations reference this attribute using expressions that +// accept a map or an element directly via square brace string syntax: +// +// # key1 known element +// .example_attribute["key1"] +type MapAttribute struct { + // ElementType is the type for all elements of the map. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.MapType. When retrieving data, the basetypes.MapValuable + // associated with this custom type must be used in place of types.Map. + CustomType basetypes.MapTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Map +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a map +// index or an error. +func (a MapAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a MapAttribute +// and all fields are equal. +func (a MapAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(MapAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a MapAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a MapAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a MapAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.MapType or the CustomType field value if defined. +func (a MapAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.MapType{ + ElemType: a.ElementType, + } +} + +// IsComputed returns the Computed field value. +func (a MapAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a MapAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a MapAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a MapAttribute) IsSensitive() bool { + return a.Sensitive +} + +// MapValidators returns the Validators field value. +func (a MapAttribute) MapValidators() []validator.Map { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/map_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/map_nested_attribute.go new file mode 100644 index 0000000000..2ca7cd89b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/map_nested_attribute.go @@ -0,0 +1,225 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = MapNestedAttribute{} + _ fwxschema.AttributeWithMapValidators = MapNestedAttribute{} +) + +// MapNestedAttribute represents an attribute that is a set of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Map +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use MapAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set of objects or directly via curly brace syntax. +// +// # map of objects +// example_attribute = { +// key = { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a map of objects or an element directly via square brace string +// syntax: +// +// # known object at key +// .example_attribute["key"] +// # known object nested_attribute value at key +// .example_attribute["key"].nested_attribute +type MapNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.MapType of types.ObjectType. When retrieving data, the + // basetypes.MapValuable associated with this custom type must be used in + // place of types.Map. + CustomType basetypes.MapTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Map +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyString, otherwise returns an error. +func (a MapNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyString) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to MapNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a MapNestedAttribute +// and all fields are equal. +func (a MapNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(MapNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a MapNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a MapNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a MapNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a MapNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a MapNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeMap +} + +// GetType returns MapType of ObjectType or CustomType. +func (a MapNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.MapType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed returns the Computed field value. +func (a MapNestedAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a MapNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a MapNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a MapNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// MapValidators returns the Validators field value. +func (a MapNestedAttribute) MapValidators() []validator.Map { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_attribute.go new file mode 100644 index 0000000000..5429975dae --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_attribute.go @@ -0,0 +1,11 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Nested attributes are only compatible with protocol version 6. +type NestedAttribute interface { + Attribute + fwschema.NestedAttribute +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_attribute_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_attribute_object.go new file mode 100644 index 0000000000..ae41a1873b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_attribute_object.go @@ -0,0 +1,79 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var _ fwxschema.NestedAttributeObjectWithValidators = NestedAttributeObject{} + +// NestedAttributeObject is the object containing the underlying attributes +// for a ListNestedAttribute, MapNestedAttribute, SetNestedAttribute, or +// SingleNestedAttribute (automatically generated). When retrieving the value +// for this attribute, use types.Object as the value type unless the CustomType +// field is set. The Attributes field must be set. Nested attributes are only +// compatible with protocol version 6. +// +// This object enables customizing and simplifying details within its parent +// NestedAttribute, therefore it cannot have Terraform schema fields such as +// Required, Description, etc. +type NestedAttributeObject struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. This field must be set. + Attributes map[string]Attribute + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep performs an AttributeName step on the +// underlying attributes or returns an error. +func (o NestedAttributeObject) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.NestedAttributeObjectApplyTerraform5AttributePathStep(o, step) +} + +// Equal returns true if the given NestedAttributeObject is equivalent. +func (o NestedAttributeObject) Equal(other fwschema.NestedAttributeObject) bool { + if _, ok := other.(NestedAttributeObject); !ok { + return false + } + + return fwschema.NestedAttributeObjectEqual(o, other) +} + +// GetAttributes returns the Attributes field value. +func (o NestedAttributeObject) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(o.Attributes) +} + +// ObjectValidators returns the Validators field value. +func (o NestedAttributeObject) ObjectValidators() []validator.Object { + return o.Validators +} + +// Type returns the framework type of the NestedAttributeObject. +func (o NestedAttributeObject) Type() basetypes.ObjectTypable { + if o.CustomType != nil { + return o.CustomType + } + + return fwschema.NestedAttributeObjectType(o) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_block_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_block_object.go new file mode 100644 index 0000000000..ea0cf61d6a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/nested_block_object.go @@ -0,0 +1,91 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var _ fwxschema.NestedBlockObjectWithValidators = NestedBlockObject{} + +// NestedBlockObject is the object containing the underlying attributes and +// blocks for a ListNestedBlock or SetNestedBlock. When retrieving the value +// for this attribute, use types.Object as the value type unless the CustomType +// field is set. +// +// This object enables customizing and simplifying details within its parent +// Block, therefore it cannot have Terraform schema fields such as Description, +// etc. +type NestedBlockObject struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep performs an AttributeName step on the +// underlying attributes or returns an error. +func (o NestedBlockObject) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.NestedBlockObjectApplyTerraform5AttributePathStep(o, step) +} + +// Equal returns true if the given NestedBlockObject is equivalent. +func (o NestedBlockObject) Equal(other fwschema.NestedBlockObject) bool { + if _, ok := other.(NestedBlockObject); !ok { + return false + } + + return fwschema.NestedBlockObjectEqual(o, other) +} + +// GetAttributes returns the Attributes field value. +func (o NestedBlockObject) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(o.Attributes) +} + +// GetAttributes returns the Blocks field value. +func (o NestedBlockObject) GetBlocks() map[string]fwschema.Block { + return schemaBlocks(o.Blocks) +} + +// ObjectValidators returns the Validators field value. +func (o NestedBlockObject) ObjectValidators() []validator.Object { + return o.Validators +} + +// Type returns the framework type of the NestedBlockObject. +func (o NestedBlockObject) Type() basetypes.ObjectTypable { + if o.CustomType != nil { + return o.CustomType + } + + return fwschema.NestedBlockObjectType(o) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/number_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/number_attribute.go new file mode 100644 index 0000000000..5e2b57fe78 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/number_attribute.go @@ -0,0 +1,188 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = NumberAttribute{} + _ fwxschema.AttributeWithNumberValidators = NumberAttribute{} +) + +// NumberAttribute represents a schema attribute that is a generic number with +// up to 512 bits of floating point or integer precision. When retrieving the +// value for this attribute, use types.Number as the value type unless the +// CustomType field is set. +// +// Use Float64Attribute for 64-bit floating point number attributes or +// Int64Attribute for 64-bit integer number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via a floating point or integer value. +// +// example_attribute = 123 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type NumberAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.NumberType. When retrieving data, the basetypes.NumberValuable + // associated with this custom type must be used in place of types.Number. + CustomType basetypes.NumberTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Number +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a NumberAttribute. +func (a NumberAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a NumberAttribute +// and all fields are equal. +func (a NumberAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(NumberAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a NumberAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a NumberAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a NumberAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.NumberType or the CustomType field value if defined. +func (a NumberAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.NumberType +} + +// IsComputed returns the Computed field value. +func (a NumberAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a NumberAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a NumberAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a NumberAttribute) IsSensitive() bool { + return a.Sensitive +} + +// NumberValidators returns the Validators field value. +func (a NumberAttribute) NumberValidators() []validator.Number { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/object_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/object_attribute.go new file mode 100644 index 0000000000..9d52f5d067 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/object_attribute.go @@ -0,0 +1,199 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = ObjectAttribute{} + _ fwxschema.AttributeWithObjectValidators = ObjectAttribute{} +) + +// ObjectAttribute represents a schema attribute that is an object with only +// type information for underlying attributes. When retrieving the value for +// this attribute, use types.Object as the value type unless the CustomType +// field is set. The AttributeTypes field must be set. +// +// Prefer SingleNestedAttribute over ObjectAttribute if the provider is +// using protocol version 6 and full attribute functionality is needed. +// +// Terraform configurations configure this attribute using expressions that +// return an object or directly via curly brace syntax. +// +// # object with one attribute +// example_attribute = { +// underlying_attribute = #... +// } +// +// Terraform configurations reference this attribute using expressions that +// accept an object or an attribute directly via period syntax: +// +// # underlying attribute +// .example_attribute.underlying_attribute +type ObjectAttribute struct { + // AttributeTypes is the mapping of underlying attribute names to attribute + // types. This field must be set. + AttributeTypes map[string]attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into an +// attribute name or an error. +func (a ObjectAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a ObjectAttribute +// and all fields are equal. +func (a ObjectAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ObjectAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ObjectAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ObjectAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ObjectAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.ObjectType or the CustomType field value if defined. +func (a ObjectAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ObjectType{ + AttrTypes: a.AttributeTypes, + } +} + +// IsComputed returns the Computed field value. +func (a ObjectAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a ObjectAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ObjectAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ObjectAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ObjectValidators returns the Validators field value. +func (a ObjectAttribute) ObjectValidators() []validator.Object { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/schema.go new file mode 100644 index 0000000000..d071c68eb4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/schema.go @@ -0,0 +1,272 @@ +package schema + +import ( + "context" + "fmt" + "regexp" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// Schema must satify the fwschema.Schema interface. +var _ fwschema.Schema = Schema{} + +// Schema defines the structure and value types of data source data. This type +// is used as the datasource.SchemaResponse type Schema field, which is +// implemented by the datasource.DataSource type Schema method. +type Schema struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this data source is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this data source is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this data source. The warning diagnostic + // summary is automatically set to "Data Source Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Use examplecloud_other data source instead. This data source + // will be removed in the next major version of the provider." + // - "Remove this data source as it no longer is valid and + // will be removed in the next major version of the provider." + // + DeprecationMessage string +} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// schema. +func (s Schema) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.SchemaApplyTerraform5AttributePathStep(s, step) +} + +// AttributeAtPath returns the Attribute at the passed path. If the path points +// to an element or attribute of a complex type, rather than to an Attribute, +// it will return an ErrPathInsideAtomicAttribute error. +func (s Schema) AttributeAtPath(ctx context.Context, p path.Path) (fwschema.Attribute, diag.Diagnostics) { + return fwschema.SchemaAttributeAtPath(ctx, s, p) +} + +// AttributeAtPath returns the Attribute at the passed path. If the path points +// to an element or attribute of a complex type, rather than to an Attribute, +// it will return an ErrPathInsideAtomicAttribute error. +func (s Schema) AttributeAtTerraformPath(ctx context.Context, p *tftypes.AttributePath) (fwschema.Attribute, error) { + return fwschema.SchemaAttributeAtTerraformPath(ctx, s, p) +} + +// GetAttributes returns the Attributes field value. +func (s Schema) GetAttributes() map[string]fwschema.Attribute { + return schemaAttributes(s.Attributes) +} + +// GetBlocks returns the Blocks field value. +func (s Schema) GetBlocks() map[string]fwschema.Block { + return schemaBlocks(s.Blocks) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (s Schema) GetDeprecationMessage() string { + return s.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (s Schema) GetDescription() string { + return s.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (s Schema) GetMarkdownDescription() string { + return s.MarkdownDescription +} + +// GetVersion always returns 0 as data source schemas cannot be versioned. +func (s Schema) GetVersion() int64 { + return 0 +} + +// Type returns the framework type of the schema. +func (s Schema) Type() attr.Type { + return fwschema.SchemaType(s) +} + +// TypeAtPath returns the framework type at the given schema path. +func (s Schema) TypeAtPath(ctx context.Context, p path.Path) (attr.Type, diag.Diagnostics) { + return fwschema.SchemaTypeAtPath(ctx, s, p) +} + +// TypeAtTerraformPath returns the framework type at the given tftypes path. +func (s Schema) TypeAtTerraformPath(ctx context.Context, p *tftypes.AttributePath) (attr.Type, error) { + return fwschema.SchemaTypeAtTerraformPath(ctx, s, p) +} + +// Validate verifies that the schema is not using a reserved field name for a top-level attribute. +func (s Schema) Validate() diag.Diagnostics { + var diags diag.Diagnostics + + // Raise error diagnostics when data source configuration uses reserved + // field names for root-level attributes. + reservedFieldNames := map[string]struct{}{ + "connection": {}, + "count": {}, + "depends_on": {}, + "lifecycle": {}, + "provider": {}, + "provisioner": {}, + } + + attributes := s.GetAttributes() + + for k, v := range attributes { + if _, ok := reservedFieldNames[k]; ok { + diags.AddAttributeError( + path.Root(k), + "Schema Using Reserved Field Name", + fmt.Sprintf("%q is a reserved field name", k), + ) + } + + d := validateAttributeFieldName(path.Root(k), k, v) + + diags.Append(d...) + } + + blocks := s.GetBlocks() + + for k, v := range blocks { + if _, ok := reservedFieldNames[k]; ok { + diags.AddAttributeError( + path.Root(k), + "Schema Using Reserved Field Name", + fmt.Sprintf("%q is a reserved field name", k), + ) + } + + d := validateBlockFieldName(path.Root(k), k, v) + + diags.Append(d...) + } + + return diags +} + +// validFieldNameRegex is used to verify that name used for attributes and blocks +// comply with the defined regular expression. +var validFieldNameRegex = regexp.MustCompile("^[a-z0-9_]+$") + +// validateAttributeFieldName verifies that the name used for an attribute complies with the regular +// expression defined in validFieldNameRegex. +func validateAttributeFieldName(path path.Path, name string, attr fwschema.Attribute) diag.Diagnostics { + var diags diag.Diagnostics + + if !validFieldNameRegex.MatchString(name) { + diags.AddAttributeError( + path, + "Invalid Schema Field Name", + fmt.Sprintf("Field name %q is invalid, the only allowed characters are a-z, 0-9 and _. This is always a problem with the provider and should be reported to the provider developer.", name), + ) + } + + if na, ok := attr.(fwschema.NestedAttribute); ok { + nestedObject := na.GetNestedObject() + + if nestedObject == nil { + return diags + } + + attributes := nestedObject.GetAttributes() + + for k, v := range attributes { + d := validateAttributeFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + } + + return diags +} + +// validateBlockFieldName verifies that the name used for a block complies with the regular +// expression defined in validFieldNameRegex. +func validateBlockFieldName(path path.Path, name string, b fwschema.Block) diag.Diagnostics { + var diags diag.Diagnostics + + if !validFieldNameRegex.MatchString(name) { + diags.AddAttributeError( + path, + "Invalid Schema Field Name", + fmt.Sprintf("Field name %q is invalid, the only allowed characters are a-z, 0-9 and _. This is always a problem with the provider and should be reported to the provider developer.", name), + ) + } + + nestedObject := b.GetNestedObject() + + if nestedObject == nil { + return diags + } + + blocks := nestedObject.GetBlocks() + + for k, v := range blocks { + d := validateBlockFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + + attributes := nestedObject.GetAttributes() + + for k, v := range attributes { + d := validateAttributeFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + + return diags +} + +// schemaAttributes is a datasource to fwschema type conversion function. +func schemaAttributes(attributes map[string]Attribute) map[string]fwschema.Attribute { + result := make(map[string]fwschema.Attribute, len(attributes)) + + for name, attribute := range attributes { + result[name] = attribute + } + + return result +} + +// schemaBlocks is a datasource to fwschema type conversion function. +func schemaBlocks(blocks map[string]Block) map[string]fwschema.Block { + result := make(map[string]fwschema.Block, len(blocks)) + + for name, block := range blocks { + result[name] = block + } + + return result +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_attribute.go new file mode 100644 index 0000000000..2eed086774 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_attribute.go @@ -0,0 +1,195 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = SetAttribute{} + _ fwxschema.AttributeWithSetValidators = SetAttribute{} +) + +// SetAttribute represents a schema attribute that is a set with a single +// element type. When retrieving the value for this attribute, use types.Set +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use SetNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set or directly via square brace syntax. +// +// # set of strings +// example_attribute = ["first", "second"] +// +// Terraform configurations reference this attribute using expressions that +// accept a set. Sets cannot be indexed in Terraform, therefore an expression +// is required to access an explicit element. +type SetAttribute struct { + // ElementType is the type for all elements of the set. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.SetType. When retrieving data, the basetypes.SetValuable + // associated with this custom type must be used in place of types.Set. + CustomType basetypes.SetTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a set +// index or an error. +func (a SetAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a SetAttribute +// and all fields are equal. +func (a SetAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SetAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SetAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SetAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SetAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.SetType or the CustomType field value if defined. +func (a SetAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.SetType{ + ElemType: a.ElementType, + } +} + +// IsComputed returns the Computed field value. +func (a SetAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a SetAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SetAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SetAttribute) IsSensitive() bool { + return a.Sensitive +} + +// SetValidators returns the Validators field value. +func (a SetAttribute) SetValidators() []validator.Set { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_nested_attribute.go new file mode 100644 index 0000000000..40dc28051d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_nested_attribute.go @@ -0,0 +1,220 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = SetNestedAttribute{} + _ fwxschema.AttributeWithSetValidators = SetNestedAttribute{} +) + +// SetNestedAttribute represents an attribute that is a set of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Set +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use SetAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set of objects or directly via square and curly brace syntax. +// +// # set of objects +// example_attribute = [ +// { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a set of objects. Sets cannot be indexed in Terraform, therefore +// an expression is required to access an explicit element. +type SetNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.SetType of types.ObjectType. When retrieving data, the + // basetypes.SetValuable associated with this custom type must be used in + // place of types.Set. + CustomType basetypes.SetTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyValue, otherwise returns an error. +func (a SetNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyValue) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SetNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a SetNestedAttribute +// and all fields are equal. +func (a SetNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SetNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SetNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SetNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SetNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a SetNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a SetNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeSet +} + +// GetType returns SetType of ObjectType or CustomType. +func (a SetNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.SetType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed returns the Computed field value. +func (a SetNestedAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a SetNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SetNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SetNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// SetValidators returns the Validators field value. +func (a SetNestedAttribute) SetValidators() []validator.Set { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_nested_block.go new file mode 100644 index 0000000000..63992226d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/set_nested_block.go @@ -0,0 +1,185 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = SetNestedBlock{} + _ fwxschema.BlockWithSetValidators = SetNestedBlock{} +) + +// SetNestedBlock represents a block that is a set of objects where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.Set +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. +// +// Prefer SetNestedAttribute over SetNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block repeatedly using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # set of blocks with two elements +// example_block { +// nested_attribute = #... +// } +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept a set of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_block[0] +// # first known object nested_attribute value +// .example_block[0].nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type SetNestedBlock struct { + // NestedObject is the underlying object that contains nested attributes or + // blocks. This field must be set. + NestedObject NestedBlockObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.SetType of types.ObjectType. When retrieving data, the + // basetypes.SetValuable associated with this custom type must be used in + // place of types.Set. + CustomType basetypes.SetTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set +} + +// ApplyTerraform5AttributePathStep returns the NestedObject field value if step +// is ElementKeyValue, otherwise returns an error. +func (b SetNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyValue) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SetNestedBlock", step) + } + + return b.NestedObject, nil +} + +// Equal returns true if the given Block is SetNestedBlock +// and all fields are equal. +func (b SetNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(SetNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b SetNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b SetNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b SetNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (b SetNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return b.NestedObject +} + +// GetNestingMode always returns BlockNestingModeSet. +func (b SetNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeSet +} + +// SetValidators returns the Validators field value. +func (b SetNestedBlock) SetValidators() []validator.Set { + return b.Validators +} + +// Type returns SetType of ObjectType or CustomType. +func (b SetNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + return types.SetType{ + ElemType: b.NestedObject.Type(), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/single_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/single_nested_attribute.go new file mode 100644 index 0000000000..77726bb63e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/single_nested_attribute.go @@ -0,0 +1,241 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = SingleNestedAttribute{} + _ fwxschema.AttributeWithObjectValidators = SingleNestedAttribute{} +) + +// SingleNestedAttribute represents an attribute that is a single object where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Object +// as the value type unless the CustomType field is set. The Attributes field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use ObjectAttribute if the underlying attributes do not require definition +// beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return an object or directly via curly brace syntax. +// +// # single object +// example_attribute = { +// nested_attribute = #... +// } +// +// Terraform configurations reference this attribute using expressions that +// accept an object or an attribute name directly via period syntax: +// +// # object nested_attribute value +// .example_attribute.nested_attribute +type SingleNestedAttribute struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. This field must be set. + Attributes map[string]Attribute + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is AttributeName, otherwise returns an error. +func (a SingleNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SingleNestedAttribute", step) + } + + attribute, ok := a.Attributes[string(name)] + + if !ok { + return nil, fmt.Errorf("no attribute %q on SingleNestedAttribute", name) + } + + return attribute, nil +} + +// Equal returns true if the given Attribute is a SingleNestedAttribute +// and all fields are equal. +func (a SingleNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SingleNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetAttributes returns the Attributes field value. +func (a SingleNestedAttribute) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(a.Attributes) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SingleNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SingleNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SingleNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns a generated NestedAttributeObject from the +// Attributes, CustomType, and Validators field values. +func (a SingleNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return NestedAttributeObject{ + Attributes: a.Attributes, + CustomType: a.CustomType, + Validators: a.Validators, + } +} + +// GetNestingMode always returns NestingModeList. +func (a SingleNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeSingle +} + +// GetType returns ListType of ObjectType or CustomType. +func (a SingleNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + attrTypes := make(map[string]attr.Type, len(a.Attributes)) + + for name, attribute := range a.Attributes { + attrTypes[name] = attribute.GetType() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} + +// IsComputed returns the Computed field value. +func (a SingleNestedAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a SingleNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SingleNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SingleNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ObjectValidators returns the Validators field value. +func (a SingleNestedAttribute) ObjectValidators() []validator.Object { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/single_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/single_nested_block.go new file mode 100644 index 0000000000..4a8b9734cb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/single_nested_block.go @@ -0,0 +1,210 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = SingleNestedBlock{} + _ fwxschema.BlockWithObjectValidators = SingleNestedBlock{} +) + +// SingleNestedBlock represents a block that is a single object where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.Object +// as the value type unless the CustomType field is set. +// +// Prefer SingleNestedAttribute over SingleNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block only once using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # single block +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept an object or an attribute name directly via period syntax: +// +// # object nested_attribute value +// .example_block.nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type SingleNestedBlock struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is AttributeName, otherwise returns an error. +func (b SingleNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SingleNestedBlock", step) + } + + if attribute, ok := b.Attributes[string(name)]; ok { + return attribute, nil + } + + if block, ok := b.Blocks[string(name)]; ok { + return block, nil + } + + return nil, fmt.Errorf("no attribute or block %q on SingleNestedBlock", name) +} + +// Equal returns true if the given Attribute is b SingleNestedBlock +// and all fields are equal. +func (b SingleNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(SingleNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b SingleNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b SingleNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b SingleNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns a generated NestedBlockObject from the +// Attributes, CustomType, and Validators field values. +func (b SingleNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return NestedBlockObject{ + Attributes: b.Attributes, + Blocks: b.Blocks, + CustomType: b.CustomType, + Validators: b.Validators, + } +} + +// GetNestingMode always returns BlockNestingModeSingle. +func (b SingleNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeSingle +} + +// ObjectValidators returns the Validators field value. +func (b SingleNestedBlock) ObjectValidators() []validator.Object { + return b.Validators +} + +// Type returns ObjectType or CustomType. +func (b SingleNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + attrTypes := make(map[string]attr.Type, len(b.Attributes)+len(b.Blocks)) + + for name, attribute := range b.Attributes { + attrTypes[name] = attribute.GetType() + } + + for name, block := range b.Blocks { + attrTypes[name] = block.Type() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/string_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/string_attribute.go new file mode 100644 index 0000000000..ee6ee38302 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/schema/string_attribute.go @@ -0,0 +1,184 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = StringAttribute{} + _ fwxschema.AttributeWithStringValidators = StringAttribute{} +) + +// StringAttribute represents a schema attribute that is a string. When +// retrieving the value for this attribute, use types.String as the value type +// unless the CustomType field is set. +// +// Terraform configurations configure this attribute using expressions that +// return a string or directly via double quote syntax. +// +// example_attribute = "value" +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type StringAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.StringType. When retrieving data, the basetypes.StringValuable + // associated with this custom type must be used in place of types.String. + CustomType basetypes.StringTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.String +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a StringAttribute. +func (a StringAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a StringAttribute +// and all fields are equal. +func (a StringAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(StringAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a StringAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a StringAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a StringAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.StringType or the CustomType field value if defined. +func (a StringAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.StringType +} + +// IsComputed returns the Computed field value. +func (a StringAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a StringAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a StringAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a StringAttribute) IsSensitive() bool { + return a.Sensitive +} + +// StringValidators returns the Validators field value. +func (a StringAttribute) StringValidators() []validator.String { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/validate_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/validate_config.go new file mode 100644 index 0000000000..d01936ad45 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/datasource/validate_config.go @@ -0,0 +1,30 @@ +package datasource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ValidateConfigRequest represents a request to validate the +// configuration of a data source. An instance of this request struct is +// supplied as an argument to the DataSource ValidateConfig receiver method +// or automatically passed through to each ConfigValidator. +type ValidateConfigRequest struct { + // Config is the configuration the user supplied for the data source. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config tfsdk.Config +} + +// ValidateConfigResponse represents a response to a +// ValidateConfigRequest. An instance of this response struct is +// supplied as an argument to the DataSource ValidateConfig receiver method +// or automatically passed through to each ConfigValidator. +type ValidateConfigResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/applyresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/applyresourcechange.go new file mode 100644 index 0000000000..c1f8d5b9ad --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/applyresourcechange.go @@ -0,0 +1,74 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// ApplyResourceChangeRequest returns the *fwserver.ApplyResourceChangeRequest +// equivalent of a *tfprotov5.ApplyResourceChangeRequest. +func ApplyResourceChangeRequest(ctx context.Context, proto5 *tfprotov5.ApplyResourceChangeRequest, resource resource.Resource, resourceSchema fwschema.Schema, providerMetaSchema fwschema.Schema) (*fwserver.ApplyResourceChangeRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if resourceSchema == nil { + diags.AddError( + "Missing Resource Schema", + "An unexpected error was encountered when handling the request. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.ApplyResourceChangeRequest{ + ResourceSchema: resourceSchema, + Resource: resource, + } + + config, configDiags := Config(ctx, proto5.Config, resourceSchema) + + diags.Append(configDiags...) + + fw.Config = config + + plannedState, plannedStateDiags := Plan(ctx, proto5.PlannedState, resourceSchema) + + diags.Append(plannedStateDiags...) + + fw.PlannedState = plannedState + + priorState, priorStateDiags := State(ctx, proto5.PriorState, resourceSchema) + + diags.Append(priorStateDiags...) + + fw.PriorState = priorState + + providerMeta, providerMetaDiags := ProviderMeta(ctx, proto5.ProviderMeta, providerMetaSchema) + + diags.Append(providerMetaDiags...) + + fw.ProviderMeta = providerMeta + + privateData, privateDataDiags := privatestate.NewData(ctx, proto5.PlannedPrivate) + + diags.Append(privateDataDiags...) + + fw.PlannedPrivate = privateData + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/config.go new file mode 100644 index 0000000000..eb0977e54b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/config.go @@ -0,0 +1,50 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// Config returns the *tfsdk.Config for a *tfprotov5.DynamicValue and +// fwschema.Schema. +func Config(ctx context.Context, proto5DynamicValue *tfprotov5.DynamicValue, schema fwschema.Schema) (*tfsdk.Config, diag.Diagnostics) { + if proto5DynamicValue == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if schema == nil { + diags.AddError( + "Unable to Convert Configuration", + "An unexpected error was encountered when converting the configuration from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + data, dynamicValueDiags := DynamicValue(ctx, proto5DynamicValue, schema, fwschemadata.DataDescriptionConfiguration) + + diags.Append(dynamicValueDiags...) + + if diags.HasError() { + return nil, diags + } + + fw := &tfsdk.Config{ + Raw: data.TerraformValue, + Schema: schema, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/configureprovider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/configureprovider.go new file mode 100644 index 0000000000..9ce200f3a3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/configureprovider.go @@ -0,0 +1,30 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ConfigureProviderRequest returns the *fwserver.ConfigureProviderRequest +// equivalent of a *tfprotov5.ConfigureProviderRequest. +func ConfigureProviderRequest(ctx context.Context, proto5 *tfprotov5.ConfigureProviderRequest, providerSchema fwschema.Schema) (*provider.ConfigureRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + fw := &provider.ConfigureRequest{ + TerraformVersion: proto5.TerraformVersion, + } + + config, diags := Config(ctx, proto5.Config, providerSchema) + + if config != nil { + fw.Config = *config + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/doc.go new file mode 100644 index 0000000000..283efb17ef --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/doc.go @@ -0,0 +1,3 @@ +// Package fromproto5 contains functions to convert from protocol version 5 +// (tfprotov5) types to framework types. +package fromproto5 diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/dynamic_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/dynamic_value.go new file mode 100644 index 0000000000..bbe51366d8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/dynamic_value.go @@ -0,0 +1,51 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// DynamicValue returns the fwschemadata.Data for a given +// *tfprotov5.DynamicValue. +// +// If necessary, the underlying data is modified to convert list and set block +// values from an empty collection to a null collection. This is to prevent +// developers from needing to understand Terraform's differences between +// block and attribute values where blocks are technically never null, but from +// a developer perspective this distinction introduces unnecessary complexity. +func DynamicValue(ctx context.Context, proto5 *tfprotov5.DynamicValue, schema fwschema.Schema, description fwschemadata.DataDescription) (fwschemadata.Data, diag.Diagnostics) { + var diags diag.Diagnostics + + data := &fwschemadata.Data{ + Description: description, + Schema: schema, + } + + if proto5 == nil { + return *data, diags + } + + proto5Value, err := proto5.Unmarshal(schema.Type().TerraformType(ctx)) + + if err != nil { + diags.AddError( + "Unable to Convert "+description.Title(), + "An unexpected error was encountered when converting the "+description.String()+" from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Unable to unmarshal DynamicValue: "+err.Error(), + ) + + return *data, diags + } + + data.TerraformValue = proto5Value + + diags.Append(data.NullifyCollectionBlocks(ctx)...) + + return *data, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/getproviderschema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/getproviderschema.go new file mode 100644 index 0000000000..d84f8cc4c1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/getproviderschema.go @@ -0,0 +1,20 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// GetProviderSchemaRequest returns the *fwserver.GetProviderSchemaRequest +// equivalent of a *tfprotov5.GetProviderSchemaRequest. +func GetProviderSchemaRequest(ctx context.Context, proto5 *tfprotov5.GetProviderSchemaRequest) *fwserver.GetProviderSchemaRequest { + if proto5 == nil { + return nil + } + + fw := &fwserver.GetProviderSchemaRequest{} + + return fw +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/importresourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/importresourcestate.go new file mode 100644 index 0000000000..4323d82dc5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/importresourcestate.go @@ -0,0 +1,49 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ImportResourceStateRequest returns the *fwserver.ImportResourceStateRequest +// equivalent of a *tfprotov5.ImportResourceStateRequest. +func ImportResourceStateRequest(ctx context.Context, proto5 *tfprotov5.ImportResourceStateRequest, resource resource.Resource, resourceSchema fwschema.Schema) (*fwserver.ImportResourceStateRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if resourceSchema == nil { + diags.AddError( + "Unable to Create Empty State", + "An unexpected error was encountered when creating the empty state. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.ImportResourceStateRequest{ + EmptyState: tfsdk.State{ + Raw: tftypes.NewValue(resourceSchema.Type().TerraformType(ctx), nil), + Schema: resourceSchema, + }, + ID: proto5.ID, + Resource: resource, + TypeName: proto5.TypeName, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/plan.go new file mode 100644 index 0000000000..c3c9bb13f2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/plan.go @@ -0,0 +1,50 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// Plan returns the *tfsdk.Plan for a *tfprotov5.DynamicValue and +// fwschema.Schema. +func Plan(ctx context.Context, proto5DynamicValue *tfprotov5.DynamicValue, schema fwschema.Schema) (*tfsdk.Plan, diag.Diagnostics) { + if proto5DynamicValue == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if schema == nil { + diags.AddError( + "Unable to Convert Plan", + "An unexpected error was encountered when converting the plan from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + data, dynamicValueDiags := DynamicValue(ctx, proto5DynamicValue, schema, fwschemadata.DataDescriptionPlan) + + diags.Append(dynamicValueDiags...) + + if diags.HasError() { + return nil, diags + } + + fw := &tfsdk.Plan{ + Raw: data.TerraformValue, + Schema: schema, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/planresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/planresourcechange.go new file mode 100644 index 0000000000..487337f079 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/planresourcechange.go @@ -0,0 +1,74 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// PlanResourceChangeRequest returns the *fwserver.PlanResourceChangeRequest +// equivalent of a *tfprotov5.PlanResourceChangeRequest. +func PlanResourceChangeRequest(ctx context.Context, proto5 *tfprotov5.PlanResourceChangeRequest, resource resource.Resource, resourceSchema fwschema.Schema, providerMetaSchema fwschema.Schema) (*fwserver.PlanResourceChangeRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if resourceSchema == nil { + diags.AddError( + "Missing Resource Schema", + "An unexpected error was encountered when handling the request. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.PlanResourceChangeRequest{ + ResourceSchema: resourceSchema, + Resource: resource, + } + + config, configDiags := Config(ctx, proto5.Config, resourceSchema) + + diags.Append(configDiags...) + + fw.Config = config + + priorState, priorStateDiags := State(ctx, proto5.PriorState, resourceSchema) + + diags.Append(priorStateDiags...) + + fw.PriorState = priorState + + proposedNewState, proposedNewStateDiags := Plan(ctx, proto5.ProposedNewState, resourceSchema) + + diags.Append(proposedNewStateDiags...) + + fw.ProposedNewState = proposedNewState + + providerMeta, providerMetaDiags := ProviderMeta(ctx, proto5.ProviderMeta, providerMetaSchema) + + diags.Append(providerMetaDiags...) + + fw.ProviderMeta = providerMeta + + privateData, privateDataDiags := privatestate.NewData(ctx, proto5.PriorPrivate) + + diags.Append(privateDataDiags...) + + fw.PriorPrivate = privateData + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/prepareproviderconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/prepareproviderconfig.go new file mode 100644 index 0000000000..f85b9352bf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/prepareproviderconfig.go @@ -0,0 +1,26 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// PrepareProviderConfigRequest returns the *fwserver.ValidateProviderConfigRequest +// equivalent of a *tfprotov5.PrepareProviderConfigRequest. +func PrepareProviderConfigRequest(ctx context.Context, proto5 *tfprotov5.PrepareProviderConfigRequest, providerSchema fwschema.Schema) (*fwserver.ValidateProviderConfigRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + fw := &fwserver.ValidateProviderConfigRequest{} + + config, diags := Config(ctx, proto5.Config, providerSchema) + + fw.Config = config + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/providermeta.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/providermeta.go new file mode 100644 index 0000000000..7dc47934fb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/providermeta.go @@ -0,0 +1,51 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ProviderMeta returns the *tfsdk.Config for a *tfprotov5.DynamicValue and +// fwschema.Schema. This data handling is different than Config to simplify +// implementors, in that: +// +// - Missing Schema will return nil, rather than an error +// - Missing DynamicValue will return nil typed Value, rather than an error +func ProviderMeta(ctx context.Context, proto5DynamicValue *tfprotov5.DynamicValue, schema fwschema.Schema) (*tfsdk.Config, diag.Diagnostics) { + if schema == nil { + return nil, nil + } + + var diags diag.Diagnostics + + fw := &tfsdk.Config{ + Raw: tftypes.NewValue(schema.Type().TerraformType(ctx), nil), + Schema: schema, + } + + if proto5DynamicValue == nil { + return fw, nil + } + + proto5Value, err := proto5DynamicValue.Unmarshal(schema.Type().TerraformType(ctx)) + + if err != nil { + diags.AddError( + "Unable to Convert Provider Meta Configuration", + "An unexpected error was encountered when converting the provider meta configuration from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+err.Error(), + ) + + return nil, diags + } + + fw.Raw = proto5Value + + return fw, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/readdatasource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/readdatasource.go new file mode 100644 index 0000000000..b183a1c15a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/readdatasource.go @@ -0,0 +1,54 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ReadDataSourceRequest returns the *fwserver.ReadDataSourceRequest +// equivalent of a *tfprotov5.ReadDataSourceRequest. +func ReadDataSourceRequest(ctx context.Context, proto5 *tfprotov5.ReadDataSourceRequest, dataSource datasource.DataSource, dataSourceSchema fwschema.Schema, providerMetaSchema fwschema.Schema) (*fwserver.ReadDataSourceRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if dataSourceSchema == nil { + diags.AddError( + "Missing DataSource Schema", + "An unexpected error was encountered when handling the request. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.ReadDataSourceRequest{ + DataSource: dataSource, + DataSourceSchema: dataSourceSchema, + } + + config, configDiags := Config(ctx, proto5.Config, dataSourceSchema) + + diags.Append(configDiags...) + + fw.Config = config + + providerMeta, providerMetaDiags := ProviderMeta(ctx, proto5.ProviderMeta, providerMetaSchema) + + diags.Append(providerMetaDiags...) + + fw.ProviderMeta = providerMeta + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/readresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/readresource.go new file mode 100644 index 0000000000..e4ddd83d53 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/readresource.go @@ -0,0 +1,47 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// ReadResourceRequest returns the *fwserver.ReadResourceRequest +// equivalent of a *tfprotov5.ReadResourceRequest. +func ReadResourceRequest(ctx context.Context, proto5 *tfprotov5.ReadResourceRequest, resource resource.Resource, resourceSchema fwschema.Schema, providerMetaSchema fwschema.Schema) (*fwserver.ReadResourceRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + fw := &fwserver.ReadResourceRequest{ + Resource: resource, + } + + currentState, currentStateDiags := State(ctx, proto5.CurrentState, resourceSchema) + + diags.Append(currentStateDiags...) + + fw.CurrentState = currentState + + providerMeta, providerMetaDiags := ProviderMeta(ctx, proto5.ProviderMeta, providerMetaSchema) + + diags.Append(providerMetaDiags...) + + fw.ProviderMeta = providerMeta + + privateData, privateDataDiags := privatestate.NewData(ctx, proto5.Private) + + diags.Append(privateDataDiags...) + + fw.Private = privateData + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/state.go new file mode 100644 index 0000000000..b5769ff948 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/state.go @@ -0,0 +1,50 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// State returns the *tfsdk.State for a *tfprotov5.DynamicValue and +// fwschema.Schema. +func State(ctx context.Context, proto5DynamicValue *tfprotov5.DynamicValue, schema fwschema.Schema) (*tfsdk.State, diag.Diagnostics) { + if proto5DynamicValue == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if schema == nil { + diags.AddError( + "Unable to Convert State", + "An unexpected error was encountered when converting the state from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + data, dynamicValueDiags := DynamicValue(ctx, proto5DynamicValue, schema, fwschemadata.DataDescriptionState) + + diags.Append(dynamicValueDiags...) + + if diags.HasError() { + return nil, diags + } + + fw := &tfsdk.State{ + Raw: data.TerraformValue, + Schema: schema, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/upgraderesourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/upgraderesourcestate.go new file mode 100644 index 0000000000..081b28ceec --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/upgraderesourcestate.go @@ -0,0 +1,45 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// UpgradeResourceStateRequest returns the *fwserver.UpgradeResourceStateRequest +// equivalent of a *tfprotov5.UpgradeResourceStateRequest. +func UpgradeResourceStateRequest(ctx context.Context, proto5 *tfprotov5.UpgradeResourceStateRequest, resource resource.Resource, resourceSchema fwschema.Schema) (*fwserver.UpgradeResourceStateRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if resourceSchema == nil { + diags.AddError( + "Unable to Create Empty State", + "An unexpected error was encountered when creating the empty state. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.UpgradeResourceStateRequest{ + RawState: (*tfprotov6.RawState)(proto5.RawState), + ResourceSchema: resourceSchema, + Resource: resource, + Version: proto5.Version, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/validatedatasourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/validatedatasourceconfig.go new file mode 100644 index 0000000000..ce87987ccc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/validatedatasourceconfig.go @@ -0,0 +1,28 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ValidateDataSourceConfigRequest returns the *fwserver.ValidateDataSourceConfigRequest +// equivalent of a *tfprotov5.ValidateDataSourceConfigRequest. +func ValidateDataSourceConfigRequest(ctx context.Context, proto5 *tfprotov5.ValidateDataSourceConfigRequest, dataSource datasource.DataSource, dataSourceSchema fwschema.Schema) (*fwserver.ValidateDataSourceConfigRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + fw := &fwserver.ValidateDataSourceConfigRequest{} + + config, diags := Config(ctx, proto5.Config, dataSourceSchema) + + fw.Config = config + fw.DataSource = dataSource + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/validateresourcetypeconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/validateresourcetypeconfig.go new file mode 100644 index 0000000000..ab454cc5c0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto5/validateresourcetypeconfig.go @@ -0,0 +1,28 @@ +package fromproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ValidateResourceTypeConfigRequest returns the *fwserver.ValidateResourceConfigRequest +// equivalent of a *tfprotov5.ValidateResourceTypeConfigRequest. +func ValidateResourceTypeConfigRequest(ctx context.Context, proto5 *tfprotov5.ValidateResourceTypeConfigRequest, resource resource.Resource, resourceSchema fwschema.Schema) (*fwserver.ValidateResourceConfigRequest, diag.Diagnostics) { + if proto5 == nil { + return nil, nil + } + + fw := &fwserver.ValidateResourceConfigRequest{} + + config, diags := Config(ctx, proto5.Config, resourceSchema) + + fw.Config = config + fw.Resource = resource + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/applyresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/applyresourcechange.go new file mode 100644 index 0000000000..45f7e7acff --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/applyresourcechange.go @@ -0,0 +1,74 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// ApplyResourceChangeRequest returns the *fwserver.ApplyResourceChangeRequest +// equivalent of a *tfprotov6.ApplyResourceChangeRequest. +func ApplyResourceChangeRequest(ctx context.Context, proto6 *tfprotov6.ApplyResourceChangeRequest, resource resource.Resource, resourceSchema fwschema.Schema, providerMetaSchema fwschema.Schema) (*fwserver.ApplyResourceChangeRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if resourceSchema == nil { + diags.AddError( + "Missing Resource Schema", + "An unexpected error was encountered when handling the request. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.ApplyResourceChangeRequest{ + ResourceSchema: resourceSchema, + Resource: resource, + } + + config, configDiags := Config(ctx, proto6.Config, resourceSchema) + + diags.Append(configDiags...) + + fw.Config = config + + plannedState, plannedStateDiags := Plan(ctx, proto6.PlannedState, resourceSchema) + + diags.Append(plannedStateDiags...) + + fw.PlannedState = plannedState + + priorState, priorStateDiags := State(ctx, proto6.PriorState, resourceSchema) + + diags.Append(priorStateDiags...) + + fw.PriorState = priorState + + providerMeta, providerMetaDiags := ProviderMeta(ctx, proto6.ProviderMeta, providerMetaSchema) + + diags.Append(providerMetaDiags...) + + fw.ProviderMeta = providerMeta + + privateData, privateDataDiags := privatestate.NewData(ctx, proto6.PlannedPrivate) + + diags.Append(privateDataDiags...) + + fw.PlannedPrivate = privateData + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/config.go new file mode 100644 index 0000000000..ca0165f369 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/config.go @@ -0,0 +1,50 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// Config returns the *tfsdk.Config for a *tfprotov6.DynamicValue and +// fwschema.Schema. +func Config(ctx context.Context, proto6DynamicValue *tfprotov6.DynamicValue, schema fwschema.Schema) (*tfsdk.Config, diag.Diagnostics) { + if proto6DynamicValue == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if schema == nil { + diags.AddError( + "Unable to Convert Configuration", + "An unexpected error was encountered when converting the configuration from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + data, dynamicValueDiags := DynamicValue(ctx, proto6DynamicValue, schema, fwschemadata.DataDescriptionConfiguration) + + diags.Append(dynamicValueDiags...) + + if diags.HasError() { + return nil, diags + } + + fw := &tfsdk.Config{ + Raw: data.TerraformValue, + Schema: schema, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/configureprovider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/configureprovider.go new file mode 100644 index 0000000000..8efcd764e9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/configureprovider.go @@ -0,0 +1,30 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ConfigureProviderRequest returns the *fwserver.ConfigureProviderRequest +// equivalent of a *tfprotov6.ConfigureProviderRequest. +func ConfigureProviderRequest(ctx context.Context, proto6 *tfprotov6.ConfigureProviderRequest, providerSchema fwschema.Schema) (*provider.ConfigureRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + fw := &provider.ConfigureRequest{ + TerraformVersion: proto6.TerraformVersion, + } + + config, diags := Config(ctx, proto6.Config, providerSchema) + + if config != nil { + fw.Config = *config + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/doc.go new file mode 100644 index 0000000000..8330b89e70 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/doc.go @@ -0,0 +1,3 @@ +// Package fromproto6 contains functions to convert from protocol version 6 +// (tfprotov6) types to framework types. +package fromproto6 diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/dynamic_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/dynamic_value.go new file mode 100644 index 0000000000..3b5400bd40 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/dynamic_value.go @@ -0,0 +1,51 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// DynamicValue returns the fwschemadata.Data for a given +// *tfprotov6.DynamicValue. +// +// If necessary, the underlying data is modified to convert list and set block +// values from an empty collection to a null collection. This is to prevent +// developers from needing to understand Terraform's differences between +// block and attribute values where blocks are technically never null, but from +// a developer perspective this distinction introduces unnecessary complexity. +func DynamicValue(ctx context.Context, proto6 *tfprotov6.DynamicValue, schema fwschema.Schema, description fwschemadata.DataDescription) (fwschemadata.Data, diag.Diagnostics) { + var diags diag.Diagnostics + + data := &fwschemadata.Data{ + Description: description, + Schema: schema, + } + + if proto6 == nil { + return *data, diags + } + + proto6Value, err := proto6.Unmarshal(schema.Type().TerraformType(ctx)) + + if err != nil { + diags.AddError( + "Unable to Convert "+description.Title(), + "An unexpected error was encountered when converting the "+description.String()+" from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Unable to unmarshal DynamicValue: "+err.Error(), + ) + + return *data, diags + } + + data.TerraformValue = proto6Value + + diags.Append(data.NullifyCollectionBlocks(ctx)...) + + return *data, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/getproviderschema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/getproviderschema.go new file mode 100644 index 0000000000..8ebe7b21f1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/getproviderschema.go @@ -0,0 +1,20 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// GetProviderSchemaRequest returns the *fwserver.GetProviderSchemaRequest +// equivalent of a *tfprotov6.GetProviderSchemaRequest. +func GetProviderSchemaRequest(ctx context.Context, proto6 *tfprotov6.GetProviderSchemaRequest) *fwserver.GetProviderSchemaRequest { + if proto6 == nil { + return nil + } + + fw := &fwserver.GetProviderSchemaRequest{} + + return fw +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/importresourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/importresourcestate.go new file mode 100644 index 0000000000..79c4f8d37b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/importresourcestate.go @@ -0,0 +1,49 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ImportResourceStateRequest returns the *fwserver.ImportResourceStateRequest +// equivalent of a *tfprotov6.ImportResourceStateRequest. +func ImportResourceStateRequest(ctx context.Context, proto6 *tfprotov6.ImportResourceStateRequest, resource resource.Resource, resourceSchema fwschema.Schema) (*fwserver.ImportResourceStateRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if resourceSchema == nil { + diags.AddError( + "Unable to Create Empty State", + "An unexpected error was encountered when creating the empty state. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.ImportResourceStateRequest{ + EmptyState: tfsdk.State{ + Raw: tftypes.NewValue(resourceSchema.Type().TerraformType(ctx), nil), + Schema: resourceSchema, + }, + ID: proto6.ID, + Resource: resource, + TypeName: proto6.TypeName, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/plan.go new file mode 100644 index 0000000000..09e0520904 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/plan.go @@ -0,0 +1,50 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// Plan returns the *tfsdk.Plan for a *tfprotov6.DynamicValue and +// fwschema.Schema. +func Plan(ctx context.Context, proto6DynamicValue *tfprotov6.DynamicValue, schema fwschema.Schema) (*tfsdk.Plan, diag.Diagnostics) { + if proto6DynamicValue == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if schema == nil { + diags.AddError( + "Unable to Convert Plan", + "An unexpected error was encountered when converting the plan from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + data, dynamicValueDiags := DynamicValue(ctx, proto6DynamicValue, schema, fwschemadata.DataDescriptionPlan) + + diags.Append(dynamicValueDiags...) + + if diags.HasError() { + return nil, diags + } + + fw := &tfsdk.Plan{ + Raw: data.TerraformValue, + Schema: schema, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/planresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/planresourcechange.go new file mode 100644 index 0000000000..3db72042b8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/planresourcechange.go @@ -0,0 +1,74 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// PlanResourceChangeRequest returns the *fwserver.PlanResourceChangeRequest +// equivalent of a *tfprotov6.PlanResourceChangeRequest. +func PlanResourceChangeRequest(ctx context.Context, proto6 *tfprotov6.PlanResourceChangeRequest, resource resource.Resource, resourceSchema fwschema.Schema, providerMetaSchema fwschema.Schema) (*fwserver.PlanResourceChangeRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if resourceSchema == nil { + diags.AddError( + "Missing Resource Schema", + "An unexpected error was encountered when handling the request. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.PlanResourceChangeRequest{ + ResourceSchema: resourceSchema, + Resource: resource, + } + + config, configDiags := Config(ctx, proto6.Config, resourceSchema) + + diags.Append(configDiags...) + + fw.Config = config + + priorState, priorStateDiags := State(ctx, proto6.PriorState, resourceSchema) + + diags.Append(priorStateDiags...) + + fw.PriorState = priorState + + proposedNewState, proposedNewStateDiags := Plan(ctx, proto6.ProposedNewState, resourceSchema) + + diags.Append(proposedNewStateDiags...) + + fw.ProposedNewState = proposedNewState + + providerMeta, providerMetaDiags := ProviderMeta(ctx, proto6.ProviderMeta, providerMetaSchema) + + diags.Append(providerMetaDiags...) + + fw.ProviderMeta = providerMeta + + privateData, privateDataDiags := privatestate.NewData(ctx, proto6.PriorPrivate) + + diags.Append(privateDataDiags...) + + fw.PriorPrivate = privateData + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/providermeta.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/providermeta.go new file mode 100644 index 0000000000..1f30c7083e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/providermeta.go @@ -0,0 +1,51 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ProviderMeta returns the *tfsdk.Config for a *tfprotov6.DynamicValue and +// fwschema.Schema. This data handling is different than Config to simplify +// implementors, in that: +// +// - Missing Schema will return nil, rather than an error +// - Missing DynamicValue will return nil typed Value, rather than an error +func ProviderMeta(ctx context.Context, proto6DynamicValue *tfprotov6.DynamicValue, schema fwschema.Schema) (*tfsdk.Config, diag.Diagnostics) { + if schema == nil { + return nil, nil + } + + var diags diag.Diagnostics + + fw := &tfsdk.Config{ + Raw: tftypes.NewValue(schema.Type().TerraformType(ctx), nil), + Schema: schema, + } + + if proto6DynamicValue == nil { + return fw, nil + } + + proto6Value, err := proto6DynamicValue.Unmarshal(schema.Type().TerraformType(ctx)) + + if err != nil { + diags.AddError( + "Unable to Convert Provider Meta Configuration", + "An unexpected error was encountered when converting the provider meta configuration from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+err.Error(), + ) + + return nil, diags + } + + fw.Raw = proto6Value + + return fw, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/readdatasource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/readdatasource.go new file mode 100644 index 0000000000..776c5864ce --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/readdatasource.go @@ -0,0 +1,54 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ReadDataSourceRequest returns the *fwserver.ReadDataSourceRequest +// equivalent of a *tfprotov6.ReadDataSourceRequest. +func ReadDataSourceRequest(ctx context.Context, proto6 *tfprotov6.ReadDataSourceRequest, dataSource datasource.DataSource, dataSourceSchema fwschema.Schema, providerMetaSchema fwschema.Schema) (*fwserver.ReadDataSourceRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if dataSourceSchema == nil { + diags.AddError( + "Missing DataSource Schema", + "An unexpected error was encountered when handling the request. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.ReadDataSourceRequest{ + DataSourceSchema: dataSourceSchema, + DataSource: dataSource, + } + + config, configDiags := Config(ctx, proto6.Config, dataSourceSchema) + + diags.Append(configDiags...) + + fw.Config = config + + providerMeta, providerMetaDiags := ProviderMeta(ctx, proto6.ProviderMeta, providerMetaSchema) + + diags.Append(providerMetaDiags...) + + fw.ProviderMeta = providerMeta + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/readresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/readresource.go new file mode 100644 index 0000000000..a9f5f9a71b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/readresource.go @@ -0,0 +1,47 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// ReadResourceRequest returns the *fwserver.ReadResourceRequest +// equivalent of a *tfprotov6.ReadResourceRequest. +func ReadResourceRequest(ctx context.Context, proto6 *tfprotov6.ReadResourceRequest, resource resource.Resource, resourceSchema fwschema.Schema, providerMetaSchema fwschema.Schema) (*fwserver.ReadResourceRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + fw := &fwserver.ReadResourceRequest{ + Resource: resource, + } + + currentState, currentStateDiags := State(ctx, proto6.CurrentState, resourceSchema) + + diags.Append(currentStateDiags...) + + fw.CurrentState = currentState + + providerMeta, providerMetaDiags := ProviderMeta(ctx, proto6.ProviderMeta, providerMetaSchema) + + diags.Append(providerMetaDiags...) + + fw.ProviderMeta = providerMeta + + privateData, privateDataDiags := privatestate.NewData(ctx, proto6.Private) + + diags.Append(privateDataDiags...) + + fw.Private = privateData + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/state.go new file mode 100644 index 0000000000..2c05d5383c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/state.go @@ -0,0 +1,50 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// State returns the *tfsdk.State for a *tfprotov6.DynamicValue and +// fwschema.Schema. +func State(ctx context.Context, proto6DynamicValue *tfprotov6.DynamicValue, schema fwschema.Schema) (*tfsdk.State, diag.Diagnostics) { + if proto6DynamicValue == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if schema == nil { + diags.AddError( + "Unable to Convert State", + "An unexpected error was encountered when converting the state from the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + data, dynamicValueDiags := DynamicValue(ctx, proto6DynamicValue, schema, fwschemadata.DataDescriptionState) + + diags.Append(dynamicValueDiags...) + + if diags.HasError() { + return nil, diags + } + + fw := &tfsdk.State{ + Raw: data.TerraformValue, + Schema: schema, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/upgraderesourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/upgraderesourcestate.go new file mode 100644 index 0000000000..f137cd27bd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/upgraderesourcestate.go @@ -0,0 +1,44 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// UpgradeResourceStateRequest returns the *fwserver.UpgradeResourceStateRequest +// equivalent of a *tfprotov6.UpgradeResourceStateRequest. +func UpgradeResourceStateRequest(ctx context.Context, proto6 *tfprotov6.UpgradeResourceStateRequest, resource resource.Resource, resourceSchema fwschema.Schema) (*fwserver.UpgradeResourceStateRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Panic prevention here to simplify the calling implementations. + // This should not happen, but just in case. + if resourceSchema == nil { + diags.AddError( + "Unable to Create Empty State", + "An unexpected error was encountered when creating the empty state. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Missing schema.", + ) + + return nil, diags + } + + fw := &fwserver.UpgradeResourceStateRequest{ + RawState: proto6.RawState, + ResourceSchema: resourceSchema, + Resource: resource, + Version: proto6.Version, + } + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validatedatasourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validatedatasourceconfig.go new file mode 100644 index 0000000000..55b6656cac --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validatedatasourceconfig.go @@ -0,0 +1,28 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateDataSourceConfigRequest returns the *fwserver.ValidateDataSourceConfigRequest +// equivalent of a *tfprotov6.ValidateDataSourceConfigRequest. +func ValidateDataSourceConfigRequest(ctx context.Context, proto6 *tfprotov6.ValidateDataResourceConfigRequest, dataSource datasource.DataSource, dataSourceSchema fwschema.Schema) (*fwserver.ValidateDataSourceConfigRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + fw := &fwserver.ValidateDataSourceConfigRequest{} + + config, diags := Config(ctx, proto6.Config, dataSourceSchema) + + fw.Config = config + fw.DataSource = dataSource + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validateproviderconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validateproviderconfig.go new file mode 100644 index 0000000000..09bd0e1209 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validateproviderconfig.go @@ -0,0 +1,26 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateProviderConfigRequest returns the *fwserver.ValidateProviderConfigRequest +// equivalent of a *tfprotov6.ValidateProviderConfigRequest. +func ValidateProviderConfigRequest(ctx context.Context, proto6 *tfprotov6.ValidateProviderConfigRequest, providerSchema fwschema.Schema) (*fwserver.ValidateProviderConfigRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + fw := &fwserver.ValidateProviderConfigRequest{} + + config, diags := Config(ctx, proto6.Config, providerSchema) + + fw.Config = config + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validateresourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validateresourceconfig.go new file mode 100644 index 0000000000..19a5cdd5b1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromproto6/validateresourceconfig.go @@ -0,0 +1,28 @@ +package fromproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateResourceConfigRequest returns the *fwserver.ValidateResourceConfigRequest +// equivalent of a *tfprotov6.ValidateResourceConfigRequest. +func ValidateResourceConfigRequest(ctx context.Context, proto6 *tfprotov6.ValidateResourceConfigRequest, resource resource.Resource, resourceSchema fwschema.Schema) (*fwserver.ValidateResourceConfigRequest, diag.Diagnostics) { + if proto6 == nil { + return nil, nil + } + + fw := &fwserver.ValidateResourceConfigRequest{} + + config, diags := Config(ctx, proto6.Config, resourceSchema) + + fw.Config = config + fw.Resource = resource + + return fw, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/attribute_path.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/attribute_path.go new file mode 100644 index 0000000000..d3e848676e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/attribute_path.go @@ -0,0 +1,88 @@ +package fromtftypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// AttributePath returns the path.Path equivalent of a *tftypes.AttributePath. +func AttributePath(ctx context.Context, tfType *tftypes.AttributePath, schema fwschema.Schema) (path.Path, diag.Diagnostics) { + fwPath := path.Empty() + + for tfTypeStepIndex, tfTypeStep := range tfType.Steps() { + currentTfTypeSteps := tfType.Steps()[:tfTypeStepIndex+1] + currentTfTypePath := tftypes.NewAttributePathWithSteps(currentTfTypeSteps) + attrType, err := schema.TypeAtTerraformPath(ctx, currentTfTypePath) + + if err != nil { + return path.Empty(), diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Unable to Convert Attribute Path", + "An unexpected error occurred while trying to convert an attribute path. "+ + "This is an error in terraform-plugin-framework used by the provider. "+ + "Please report the following to the provider developers.\n\n"+ + // Since this is an error with the attribute path + // conversion, we cannot return a protocol path-based + // diagnostic. Returning a framework human-readable + // representation seems like the next best thing to do. + fmt.Sprintf("Attribute Path: %s\n", currentTfTypePath.String())+ + fmt.Sprintf("Original Error: %s", err), + ), + } + } + + fwStep, err := AttributePathStep(ctx, tfTypeStep, attrType) + + if err != nil { + return path.Empty(), diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Unable to Convert Attribute Path", + "An unexpected error occurred while trying to convert an attribute path. "+ + "This is either an error in terraform-plugin-framework or a custom attribute type used by the provider. "+ + "Please report the following to the provider developers.\n\n"+ + // Since this is an error with the attribute path + // conversion, we cannot return a protocol path-based + // diagnostic. Returning a framework human-readable + // representation seems like the next best thing to do. + fmt.Sprintf("Attribute Path: %s\n", currentTfTypePath.String())+ + fmt.Sprintf("Original Error: %s", err), + ), + } + } + + // In lieu of creating a path.NewPathFromSteps function, this path + // building logic is inlined to not expand the path package API. + switch fwStep := fwStep.(type) { + case path.PathStepAttributeName: + fwPath = fwPath.AtName(string(fwStep)) + case path.PathStepElementKeyInt: + fwPath = fwPath.AtListIndex(int(fwStep)) + case path.PathStepElementKeyString: + fwPath = fwPath.AtMapKey(string(fwStep)) + case path.PathStepElementKeyValue: + fwPath = fwPath.AtSetValue(fwStep.Value) + default: + return fwPath, diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Unable to Convert Attribute Path", + "An unexpected error occurred while trying to convert an attribute path. "+ + "This is an error in terraform-plugin-framework used by the provider. "+ + "Please report the following to the provider developers.\n\n"+ + // Since this is an error with the attribute path + // conversion, we cannot return a protocol path-based + // diagnostic. Returning a framework human-readable + // representation seems like the next best thing to do. + fmt.Sprintf("Attribute Path: %s\n", currentTfTypePath.String())+ + fmt.Sprintf("Original Error: unknown path.PathStep type: %#v", fwStep), + ), + } + } + } + + return fwPath, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/attribute_path_step.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/attribute_path_step.go new file mode 100644 index 0000000000..aef594bd4a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/attribute_path_step.go @@ -0,0 +1,35 @@ +package fromtftypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// AttributePathStep returns the path.PathStep equivalent of a +// tftypes.AttributePathStep. An error is returned instead of diag.Diagnostics +// so callers can include appropriate logical context about when the error +// occurred. +func AttributePathStep(ctx context.Context, tfType tftypes.AttributePathStep, attrType attr.Type) (path.PathStep, error) { + switch tfType := tfType.(type) { + case tftypes.AttributeName: + return path.PathStepAttributeName(string(tfType)), nil + case tftypes.ElementKeyInt: + return path.PathStepElementKeyInt(int64(tfType)), nil + case tftypes.ElementKeyString: + return path.PathStepElementKeyString(string(tfType)), nil + case tftypes.ElementKeyValue: + attrValue, err := Value(ctx, tftypes.Value(tfType), attrType) + + if err != nil { + return nil, fmt.Errorf("unable to create PathStepElementKeyValue from tftypes.Value: %w", err) + } + + return path.PathStepElementKeyValue{Value: attrValue}, nil + default: + return nil, fmt.Errorf("unknown tftypes.AttributePathStep: %#v", tfType) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/doc.go new file mode 100644 index 0000000000..c97dd11495 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/doc.go @@ -0,0 +1,3 @@ +// Package fromtftypes contains functions to convert from terraform-plugin-go +// tftypes types to framework types. +package fromtftypes diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/value.go new file mode 100644 index 0000000000..4fc20875db --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes/value.go @@ -0,0 +1,24 @@ +package fromtftypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Value returns the attr.Value equivalent to the tftypes.Value. +func Value(ctx context.Context, tfType tftypes.Value, attrType attr.Type) (attr.Value, error) { + if attrType == nil { + return nil, fmt.Errorf("unable to convert tftypes.Value (%s) to attr.Value: missing attr.Type", tfType.String()) + } + + attrValue, err := attrType.ValueFromTerraform(ctx, tfType) + + if err != nil { + return nil, fmt.Errorf("unable to convert tftypes.Value (%s) to attr.Value: %w", tfType.String(), err) + } + + return attrValue, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/attribute.go new file mode 100644 index 0000000000..580a4af320 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/attribute.go @@ -0,0 +1,102 @@ +package fwschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Attribute is the core interface required for implementing Terraform +// schema functionality that can accept a value. Refer to NestedAttribute for +// the additional interface that defines nested attributes. +// +// Refer to the internal/fwschema/fwxschema package for optional interfaces +// that define framework-specific functionality, such a plan modification and +// validation. +type Attribute interface { + // Implementations should include the tftypes.AttributePathStepper + // interface methods for proper path and data handling. + tftypes.AttributePathStepper + + // Equal should return true if the other attribute is exactly equivalent. + Equal(o Attribute) bool + + // GetDeprecationMessage should return a non-empty string if an attribute + // is deprecated. This is named differently than DeprecationMessage to + // prevent a conflict with the tfsdk.Attribute field name. + GetDeprecationMessage() string + + // GetDescription should return a non-empty string if an attribute + // has a plaintext description. This is named differently than Description + // to prevent a conflict with the tfsdk.Attribute field name. + GetDescription() string + + // GetMarkdownDescription should return a non-empty string if an attribute + // has a Markdown description. This is named differently than + // MarkdownDescription to prevent a conflict with the tfsdk.Attribute field + // name. + GetMarkdownDescription() string + + // GetType should return the framework type of an attribute. This is named + // differently than Type to prevent a conflict with the tfsdk.Attribute + // field name. + GetType() attr.Type + + // IsComputed should return true if the attribute configuration value is + // computed. This is named differently than Computed to prevent a conflict + // with the tfsdk.Attribute field name. + IsComputed() bool + + // IsOptional should return true if the attribute configuration value is + // optional. This is named differently than Optional to prevent a conflict + // with the tfsdk.Attribute field name. + IsOptional() bool + + // IsRequired should return true if the attribute configuration value is + // required. This is named differently than Required to prevent a conflict + // with the tfsdk.Attribute field name. + IsRequired() bool + + // IsSensitive should return true if the attribute configuration value is + // sensitive. This is named differently than Sensitive to prevent a + // conflict with the tfsdk.Attribute field name. + IsSensitive() bool +} + +// AttributesEqual is a helper function to perform equality testing on two +// Attribute. Attribute Equal implementations should still compare the concrete +// types in addition to using this helper. +func AttributesEqual(a, b Attribute) bool { + if !a.GetType().Equal(b.GetType()) { + return false + } + + if a.GetDeprecationMessage() != b.GetDeprecationMessage() { + return false + } + + if a.GetDescription() != b.GetDescription() { + return false + } + + if a.GetMarkdownDescription() != b.GetMarkdownDescription() { + return false + } + + if a.IsRequired() != b.IsRequired() { + return false + } + + if a.IsOptional() != b.IsOptional() { + return false + } + + if a.IsComputed() != b.IsComputed() { + return false + } + + if a.IsSensitive() != b.IsSensitive() { + return false + } + + return true +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/attribute_nesting_mode.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/attribute_nesting_mode.go new file mode 100644 index 0000000000..47e40cd03a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/attribute_nesting_mode.go @@ -0,0 +1,32 @@ +package fwschema + +// NestingMode is an enum type of the ways nested attributes can be nested in +// an attribute. They can be a list, a set, a map (with string +// keys), or they can be nested directly, like an object. +type NestingMode uint8 + +const ( + // NestingModeUnknown is an invalid nesting mode, used to catch when a + // nesting mode is expected and not set. + NestingModeUnknown NestingMode = 0 + + // NestingModeSingle is for attributes that represent a struct or + // object, a single instance of those attributes directly nested under + // another attribute. + NestingModeSingle NestingMode = 1 + + // NestingModeList is for attributes that represent a list of objects, + // with multiple instances of those attributes nested inside a list + // under another attribute. + NestingModeList NestingMode = 2 + + // NestingModeSet is for attributes that represent a set of objects, + // with multiple, unique instances of those attributes nested inside a + // set under another attribute. + NestingModeSet NestingMode = 3 + + // NestingModeMap is for attributes that represent a map of objects, + // with multiple instances of those attributes, each associated with a + // unique string key, nested inside a map under another attribute. + NestingModeMap NestingMode = 4 +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/block.go new file mode 100644 index 0000000000..36bd6ec3c3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/block.go @@ -0,0 +1,109 @@ +package fwschema + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Block is the core interface required for implementing Terraform schema +// functionality that structurally holds attributes and blocks. This is +// intended to be the first abstraction of tfsdk.Block functionality into +// data source, provider, and resource specific functionality. +// +// Refer to the internal/fwschema/fwxschema package for optional interfaces +// that define framework-specific functionality, such a plan modification and +// validation. +// +// Note that MaxItems and MinItems support, while defined in the Terraform +// protocol, is intentially not present. Terraform can only perform limited +// static analysis of blocks and errors generated occur before the provider +// is called for configuration validation, which means that practitioners do +// not get all configuration errors at the same time. Provider developers can +// implement validators to achieve the same validation functionality. +type Block interface { + // Implementations should include the tftypes.AttributePathStepper + // interface methods for proper path and data handling. + tftypes.AttributePathStepper + + // Equal should return true if the other block is exactly equivalent. + Equal(o Block) bool + + // GetDeprecationMessage should return a non-empty string if an attribute + // is deprecated. This is named differently than DeprecationMessage to + // prevent a conflict with the tfsdk.Attribute field name. + GetDeprecationMessage() string + + // GetDescription should return a non-empty string if an attribute + // has a plaintext description. This is named differently than Description + // to prevent a conflict with the tfsdk.Attribute field name. + GetDescription() string + + // GetMarkdownDescription should return a non-empty string if an attribute + // has a Markdown description. This is named differently than + // MarkdownDescription to prevent a conflict with the tfsdk.Attribute field + // name. + GetMarkdownDescription() string + + // GetNestedObject should return the object underneath the block. + // For single nesting mode, the NestedBlockObject can be generated from + // the Block. + GetNestedObject() NestedBlockObject + + // GetNestingMode should return the nesting mode of a block. This is named + // differently than NestingMode to prevent a conflict with the tfsdk.Block + // field name. + GetNestingMode() BlockNestingMode + + // Type should return the framework type of a block. + Type() attr.Type +} + +// BlocksEqual is a helper function to perform equality testing on two +// Block. Attribute Equal implementations should still compare the concrete +// types in addition to using this helper. +func BlocksEqual(a, b Block) bool { + if !a.Type().Equal(b.Type()) { + return false + } + + if a.GetDeprecationMessage() != b.GetDeprecationMessage() { + return false + } + + if a.GetDescription() != b.GetDescription() { + return false + } + + if a.GetMarkdownDescription() != b.GetMarkdownDescription() { + return false + } + + return true +} + +// BlockPathExpressions recursively returns a slice of the current path +// expression and all underlying path expressions which represent a Block. +func BlockPathExpressions(ctx context.Context, block Block, pathExpression path.Expression) path.Expressions { + result := path.Expressions{pathExpression} + + for name, nestedBlock := range block.GetNestedObject().GetBlocks() { + nestingMode := block.GetNestingMode() + + switch nestingMode { + case BlockNestingModeList: + result = append(result, BlockPathExpressions(ctx, nestedBlock, pathExpression.AtAnyListIndex().AtName(name))...) + case BlockNestingModeSet: + result = append(result, BlockPathExpressions(ctx, nestedBlock, pathExpression.AtAnySetValue().AtName(name))...) + case BlockNestingModeSingle: + result = append(result, BlockPathExpressions(ctx, nestedBlock, pathExpression.AtName(name))...) + default: + panic(fmt.Sprintf("unhandled BlockNestingMode: %T", nestingMode)) + } + } + + return result +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/block_nested_mode.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/block_nested_mode.go new file mode 100644 index 0000000000..d92e622b35 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/block_nested_mode.go @@ -0,0 +1,36 @@ +package fwschema + +// BlockNestingMode is an enum type of the ways attributes and blocks can be +// nested in a block. They can be a list or a set. +// +// While the protocol and theoretically Terraform itself support map and group +// nesting modes, this framework intentionally only supports list, set, and +// single blocks as those other modes were not typically implemented or +// tested with Terraform since the older Terraform Plugin SDK did not support +// them. +type BlockNestingMode uint8 + +const ( + // BlockNestingModeUnknown is an invalid nesting mode, used to catch when a + // nesting mode is expected and not set. + BlockNestingModeUnknown BlockNestingMode = 0 + + // BlockNestingModeList is for attributes that represent a list of objects, + // with multiple instances of those attributes nested inside a list + // under another attribute. + BlockNestingModeList BlockNestingMode = 1 + + // BlockNestingModeSet is for attributes that represent a set of objects, + // with multiple, unique instances of those attributes nested inside a + // set under another attribute. + BlockNestingModeSet BlockNestingMode = 2 + + // BlockNestingModeSingle is for attributes that represent a single object. + // The object cannot be repeated in the practitioner configuration. + // + // While the framework implements support for this block nesting mode, it + // is not thoroughly tested in production Terraform environments beyond the + // resource timeouts block from the older Terraform Plugin SDK. Use single + // nested attributes for new implementations instead. + BlockNestingModeSingle BlockNestingMode = 3 +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/doc.go new file mode 100644 index 0000000000..35f544904c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/doc.go @@ -0,0 +1,7 @@ +// Package fwschema implements shared logic for describing the structure, +// data types, and behaviors of framework data for data sources, providers, +// and resources. +// +// Refer to the internal/fwschemadata package for logic built on values based +// on this schema information. +package fwschema diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/errors.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/errors.go new file mode 100644 index 0000000000..d1be30e441 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/errors.go @@ -0,0 +1,15 @@ +package fwschema + +import "errors" + +var ( + // ErrPathInsideAtomicAttribute is used with AttributeAtPath is called + // on a path that doesn't have a schema associated with it, because + // it's an element, attribute, or block of a complex type, not a nested + // attribute. + ErrPathInsideAtomicAttribute = errors.New("path leads to element, attribute, or block of a schema.Attribute that has no schema associated with it") + + // ErrPathIsBlock is used with AttributeAtPath is called on a path is a + // block, not an attribute. Use blockAtPath on the path instead. + ErrPathIsBlock = errors.New("path leads to block, not an attribute") +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/attribute_plan_modification.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/attribute_plan_modification.go new file mode 100644 index 0000000000..813b729ea5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/attribute_plan_modification.go @@ -0,0 +1,87 @@ +package fwxschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" +) + +// AttributeWithBoolPlanModifiers is an optional interface on Attribute which +// enables Bool plan modifier support. +type AttributeWithBoolPlanModifiers interface { + fwschema.Attribute + + // BoolPlanModifiers should return a list of Bool plan modifiers. + BoolPlanModifiers() []planmodifier.Bool +} + +// AttributeWithFloat64PlanModifiers is an optional interface on Attribute which +// enables Float64 plan modifier support. +type AttributeWithFloat64PlanModifiers interface { + fwschema.Attribute + + // Float64PlanModifiers should return a list of Float64 plan modifiers. + Float64PlanModifiers() []planmodifier.Float64 +} + +// AttributeWithInt64PlanModifiers is an optional interface on Attribute which +// enables Int64 plan modifier support. +type AttributeWithInt64PlanModifiers interface { + fwschema.Attribute + + // Int64PlanModifiers should return a list of Int64 plan modifiers. + Int64PlanModifiers() []planmodifier.Int64 +} + +// AttributeWithListPlanModifiers is an optional interface on Attribute which +// enables List plan modifier support. +type AttributeWithListPlanModifiers interface { + fwschema.Attribute + + // ListPlanModifiers should return a list of List plan modifiers. + ListPlanModifiers() []planmodifier.List +} + +// AttributeWithMapPlanModifiers is an optional interface on Attribute which +// enables Map plan modifier support. +type AttributeWithMapPlanModifiers interface { + fwschema.Attribute + + // MapPlanModifiers should return a list of Map plan modifiers. + MapPlanModifiers() []planmodifier.Map +} + +// AttributeWithNumberPlanModifiers is an optional interface on Attribute which +// enables Number plan modifier support. +type AttributeWithNumberPlanModifiers interface { + fwschema.Attribute + + // NumberPlanModifiers should return a list of Number plan modifiers. + NumberPlanModifiers() []planmodifier.Number +} + +// AttributeWithObjectPlanModifiers is an optional interface on Attribute which +// enables Object plan modifier support. +type AttributeWithObjectPlanModifiers interface { + fwschema.Attribute + + // ObjectPlanModifiers should return a list of Object plan modifiers. + ObjectPlanModifiers() []planmodifier.Object +} + +// AttributeWithSetPlanModifiers is an optional interface on Attribute which +// enables Set plan modifier support. +type AttributeWithSetPlanModifiers interface { + fwschema.Attribute + + // SetPlanModifiers should return a list of Set plan modifiers. + SetPlanModifiers() []planmodifier.Set +} + +// AttributeWithStringPlanModifiers is an optional interface on Attribute which +// enables String plan modifier support. +type AttributeWithStringPlanModifiers interface { + fwschema.Attribute + + // StringPlanModifiers should return a list of String plan modifiers. + StringPlanModifiers() []planmodifier.String +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/attribute_validation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/attribute_validation.go new file mode 100644 index 0000000000..458b665c06 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/attribute_validation.go @@ -0,0 +1,87 @@ +package fwxschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// AttributeWithBoolValidators is an optional interface on Attribute which +// enables Bool validation support. +type AttributeWithBoolValidators interface { + fwschema.Attribute + + // BoolValidators should return a list of Bool validators. + BoolValidators() []validator.Bool +} + +// AttributeWithFloat64Validators is an optional interface on Attribute which +// enables Float64 validation support. +type AttributeWithFloat64Validators interface { + fwschema.Attribute + + // Float64Validators should return a list of Float64 validators. + Float64Validators() []validator.Float64 +} + +// AttributeWithInt64Validators is an optional interface on Attribute which +// enables Int64 validation support. +type AttributeWithInt64Validators interface { + fwschema.Attribute + + // Int64Validators should return a list of Int64 validators. + Int64Validators() []validator.Int64 +} + +// AttributeWithListValidators is an optional interface on Attribute which +// enables List validation support. +type AttributeWithListValidators interface { + fwschema.Attribute + + // ListValidators should return a list of List validators. + ListValidators() []validator.List +} + +// AttributeWithMapValidators is an optional interface on Attribute which +// enables Map validation support. +type AttributeWithMapValidators interface { + fwschema.Attribute + + // MapValidators should return a list of Map validators. + MapValidators() []validator.Map +} + +// AttributeWithNumberValidators is an optional interface on Attribute which +// enables Number validation support. +type AttributeWithNumberValidators interface { + fwschema.Attribute + + // NumberValidators should return a list of Number validators. + NumberValidators() []validator.Number +} + +// AttributeWithObjectValidators is an optional interface on Attribute which +// enables Object validation support. +type AttributeWithObjectValidators interface { + fwschema.Attribute + + // ObjectValidators should return a list of Object validators. + ObjectValidators() []validator.Object +} + +// AttributeWithSetValidators is an optional interface on Attribute which +// enables Set validation support. +type AttributeWithSetValidators interface { + fwschema.Attribute + + // SetValidators should return a list of Set validators. + SetValidators() []validator.Set +} + +// AttributeWithStringValidators is an optional interface on Attribute which +// enables String validation support. +type AttributeWithStringValidators interface { + fwschema.Attribute + + // StringValidators should return a list of String validators. + StringValidators() []validator.String +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/block_plan_modification.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/block_plan_modification.go new file mode 100644 index 0000000000..4a09099418 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/block_plan_modification.go @@ -0,0 +1,33 @@ +package fwxschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" +) + +// BlockWithListPlanModifiers is an optional interface on Block which +// enables List plan modifier support. +type BlockWithListPlanModifiers interface { + fwschema.Block + + // ListPlanModifiers should return a list of List plan modifiers. + ListPlanModifiers() []planmodifier.List +} + +// BlockWithObjectPlanModifiers is an optional interface on Block which +// enables Object plan modifier support. +type BlockWithObjectPlanModifiers interface { + fwschema.Block + + // ObjectPlanModifiers should return a list of Object plan modifiers. + ObjectPlanModifiers() []planmodifier.Object +} + +// BlockWithSetPlanModifiers is an optional interface on Block which +// enables Set plan modifier support. +type BlockWithSetPlanModifiers interface { + fwschema.Block + + // SetPlanModifiers should return a list of Set plan modifiers. + SetPlanModifiers() []planmodifier.Set +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/block_validation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/block_validation.go new file mode 100644 index 0000000000..22f664cccc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/block_validation.go @@ -0,0 +1,33 @@ +package fwxschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// BlockWithListValidators is an optional interface on Block which +// enables List validation support. +type BlockWithListValidators interface { + fwschema.Block + + // ListValidators should return a list of List validators. + ListValidators() []validator.List +} + +// BlockWithObjectValidators is an optional interface on Block which +// enables Object validation support. +type BlockWithObjectValidators interface { + fwschema.Block + + // ObjectValidators should return a list of Object validators. + ObjectValidators() []validator.Object +} + +// BlockWithSetValidators is an optional interface on Block which +// enables Set validation support. +type BlockWithSetValidators interface { + fwschema.Block + + // SetValidators should return a list of Set validators. + SetValidators() []validator.Set +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/doc.go new file mode 100644 index 0000000000..ade502876c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/doc.go @@ -0,0 +1,6 @@ +// Package fwxschema implements extra framework-based schema +// functionality on top of base Terraform attribute functionality. +// +// This package is separated from fwschema to prevent import cycles +// with existing tfsdk functionality. +package fwxschema diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_attribute_object_plan_modification.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_attribute_object_plan_modification.go new file mode 100644 index 0000000000..e40597469c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_attribute_object_plan_modification.go @@ -0,0 +1,15 @@ +package fwxschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" +) + +// NestedAttributeObjectWithPlanModifiers is an optional interface on +// NestedAttributeObject which enables Object plan modification support. +type NestedAttributeObjectWithPlanModifiers interface { + fwschema.NestedAttributeObject + + // ObjectPlanModifiers should return a list of Object plan modifiers. + ObjectPlanModifiers() []planmodifier.Object +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_attribute_object_validation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_attribute_object_validation.go new file mode 100644 index 0000000000..78dd234536 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_attribute_object_validation.go @@ -0,0 +1,15 @@ +package fwxschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// NestedAttributeObjectWithValidators is an optional interface on +// NestedAttributeObject which enables Object validation support. +type NestedAttributeObjectWithValidators interface { + fwschema.NestedAttributeObject + + // ObjectValidators should return a list of Object validators. + ObjectValidators() []validator.Object +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_block_object_plan_modification.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_block_object_plan_modification.go new file mode 100644 index 0000000000..2cf9782c66 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_block_object_plan_modification.go @@ -0,0 +1,15 @@ +package fwxschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" +) + +// NestedBlockObjectWithPlanModifiers is an optional interface on +// NestedBlockObject which enables Object plan modification support. +type NestedBlockObjectWithPlanModifiers interface { + fwschema.NestedBlockObject + + // ObjectPlanModifiers should return a list of Object plan modifiers. + ObjectPlanModifiers() []planmodifier.Object +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_block_object_validators.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_block_object_validators.go new file mode 100644 index 0000000000..48e0b05072 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema/nested_block_object_validators.go @@ -0,0 +1,15 @@ +package fwxschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// NestedBlockObjectWithValidators is an optional interface on +// NestedBlockObject which enables Object validation support. +type NestedBlockObjectWithValidators interface { + fwschema.NestedBlockObject + + // ObjectValidators should return a list of Object validators. + ObjectValidators() []validator.Object +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_attribute.go new file mode 100644 index 0000000000..f2bcb1f5f2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_attribute.go @@ -0,0 +1,16 @@ +package fwschema + +// NestedAttribute defines a schema attribute that contains nested attributes. +type NestedAttribute interface { + Attribute + + // GetNestedObject should return the object underneath the nested + // attribute. For single nesting mode, the NestedAttributeObject can be + // generated from the Attribute. + GetNestedObject() NestedAttributeObject + + // GetNestingMode should return the nesting mode (list, map, set, or + // single) of the nested attributes or left unset if this Attribute + // does not represent nested attributes. + GetNestingMode() NestingMode +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_attribute_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_attribute_object.go new file mode 100644 index 0000000000..cc6f5bae71 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_attribute_object.go @@ -0,0 +1,90 @@ +package fwschema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// NestedAttributeObject represents the Object inside a NestedAttribute. +// Refer to the fwxschema package for validation and plan modification +// extensions to this interface. +type NestedAttributeObject interface { + tftypes.AttributePathStepper + + // Equal should return true if given NestedAttributeObject is equivalent. + Equal(NestedAttributeObject) bool + + // GetAttributes should return the nested attributes of an attribute. + GetAttributes() UnderlyingAttributes + + // Type should return the framework type of the object. + Type() basetypes.ObjectTypable +} + +// NestedAttributeObjectApplyTerraform5AttributePathStep is a helper function +// to perform base tftypes.AttributePathStepper handling using the +// GetAttributes method. NestedAttributeObject implementations should still +// include custom type functionality in addition to using this helper. +func NestedAttributeObjectApplyTerraform5AttributePathStep(o NestedAttributeObject, step tftypes.AttributePathStep) (any, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply AttributePathStep %T to NestedAttributeObject", step) + } + + attribute, ok := o.GetAttributes()[string(name)] + + if ok { + return attribute, nil + } + + return nil, fmt.Errorf("no attribute %q on NestedAttributeObject", name) +} + +// NestedAttributeObjectEqual is a helper function to perform base equality testing +// on two NestedAttributeObject. NestedAttributeObject implementations should still +// compare the concrete types and other custom functionality in addition to +// using this helper. +func NestedAttributeObjectEqual(a, b NestedAttributeObject) bool { + if !a.Type().Equal(b.Type()) { + return false + } + + if len(a.GetAttributes()) != len(b.GetAttributes()) { + return false + } + + for name, aAttribute := range a.GetAttributes() { + bAttribute, ok := b.GetAttributes()[name] + + if !ok { + return false + } + + if !aAttribute.Equal(bAttribute) { + return false + } + } + + return true +} + +// NestedAttributeObjectType is a helper function to perform base type handling +// using the GetAttributes and GetBlocks methods. NestedAttributeObject +// implementations should still include custom type functionality in addition +// to using this helper. +func NestedAttributeObjectType(o NestedAttributeObject) basetypes.ObjectTypable { + attrTypes := make(map[string]attr.Type, len(o.GetAttributes())) + + for name, attribute := range o.GetAttributes() { + attrTypes[name] = attribute.GetType() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_block_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_block_object.go new file mode 100644 index 0000000000..8e2bc17e31 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/nested_block_object.go @@ -0,0 +1,119 @@ +package fwschema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// NestedBlockObject represents the Object inside a Block. +// Refer to the fwxschema package for validation and plan modification +// extensions to this interface. +type NestedBlockObject interface { + tftypes.AttributePathStepper + + // Equal should return true if given NestedBlockObject is equivalent. + Equal(NestedBlockObject) bool + + // GetAttributes should return the nested attributes of the object. + GetAttributes() UnderlyingAttributes + + // GetBlocks should return the nested attributes of the object. + GetBlocks() map[string]Block + + // Type should return the framework type of the object. + Type() basetypes.ObjectTypable +} + +// NestedBlockObjectApplyTerraform5AttributePathStep is a helper function to +// perform base tftypes.AttributePathStepper handling using the GetAttributes +// and GetBlocks methods. NestedBlockObject implementations should still +// include custom type functionality in addition to using this helper. +func NestedBlockObjectApplyTerraform5AttributePathStep(o NestedBlockObject, step tftypes.AttributePathStep) (any, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply AttributePathStep %T to NestedBlockObject", step) + } + + attribute, ok := o.GetAttributes()[string(name)] + + if ok { + return attribute, nil + } + + block, ok := o.GetBlocks()[string(name)] + + if ok { + return block, nil + } + + return nil, fmt.Errorf("no attribute or block %q on NestedBlockObject", name) +} + +// NestedBlockObjectEqual is a helper function to perform base equality testing +// on two NestedBlockObject. NestedBlockObject implementations should still +// compare the concrete types and other custom functionality in addition to +// using this helper. +func NestedBlockObjectEqual(a, b NestedBlockObject) bool { + if !a.Type().Equal(b.Type()) { + return false + } + + if len(a.GetAttributes()) != len(b.GetAttributes()) { + return false + } + + for name, aAttribute := range a.GetAttributes() { + bAttribute, ok := b.GetAttributes()[name] + + if !ok { + return false + } + + if !aAttribute.Equal(bAttribute) { + return false + } + } + + if len(a.GetBlocks()) != len(b.GetBlocks()) { + return false + } + + for name, aBlock := range a.GetBlocks() { + bBlock, ok := b.GetBlocks()[name] + + if !ok { + return false + } + + if !aBlock.Equal(bBlock) { + return false + } + } + + return true +} + +// NestedBlockObjectType is a helper function to perform base type handling +// using the GetAttributes and GetBlocks methods. NestedBlockObject +// implementations should still include custom type functionality in addition +// to using this helper. +func NestedBlockObjectType(o NestedBlockObject) basetypes.ObjectTypable { + attrTypes := make(map[string]attr.Type, len(o.GetAttributes())+len(o.GetBlocks())) + + for name, attribute := range o.GetAttributes() { + attrTypes[name] = attribute.GetType() + } + + for name, block := range o.GetBlocks() { + attrTypes[name] = block.Type() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/schema.go new file mode 100644 index 0000000000..93bfae4268 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/schema.go @@ -0,0 +1,237 @@ +package fwschema + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/totftypes" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Schema is the core interface required for data sources, providers, and +// resources. +type Schema interface { + // Implementations should include the tftypes.AttributePathStepper + // interface methods for proper path and data handling. + tftypes.AttributePathStepper + + // AttributeAtPath should return the Attribute at the given path or return + // an error. + AttributeAtPath(context.Context, path.Path) (Attribute, diag.Diagnostics) + + // AttributeAtTerraformPath should return the Attribute at the given + // Terraform path or return an error. + AttributeAtTerraformPath(context.Context, *tftypes.AttributePath) (Attribute, error) + + // GetAttributes should return the attributes of a schema. This is named + // differently than Attributes to prevent a conflict with the tfsdk.Schema + // field name. + GetAttributes() map[string]Attribute + + // GetBlocks should return the blocks of a schema. This is named + // differently than Blocks to prevent a conflict with the tfsdk.Schema + // field name. + GetBlocks() map[string]Block + + // GetDeprecationMessage should return a non-empty string if a schema + // is deprecated. This is named differently than DeprecationMessage to + // prevent a conflict with the tfsdk.Schema field name. + GetDeprecationMessage() string + + // GetDescription should return a non-empty string if a schema has a + // plaintext description. This is named differently than Description + // to prevent a conflict with the tfsdk.Schema field name. + GetDescription() string + + // GetMarkdownDescription should return a non-empty string if a schema has + // a Markdown description. This is named differently than + // MarkdownDescription to prevent a conflict with the tfsdk.Schema field + // name. + GetMarkdownDescription() string + + // GetVersion should return the version of a schema. This is named + // differently than Version to prevent a conflict with the tfsdk.Schema + // field name. + GetVersion() int64 + + // Type should return the framework type of the schema. + Type() attr.Type + + // TypeAtPath should return the framework type of the Attribute at the + // the given path or return an error. + TypeAtPath(context.Context, path.Path) (attr.Type, diag.Diagnostics) + + // AttributeTypeAtPath should return the framework type of the Attribute at + // the given Terraform path or return an error. + TypeAtTerraformPath(context.Context, *tftypes.AttributePath) (attr.Type, error) +} + +// SchemaApplyTerraform5AttributePathStep is a helper function to perform base +// tftypes.AttributePathStepper handling using the GetAttributes and GetBlocks +// methods. +func SchemaApplyTerraform5AttributePathStep(s Schema, step tftypes.AttributePathStep) (any, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply AttributePathStep %T to schema", step) + } + + if attr, ok := s.GetAttributes()[string(name)]; ok { + return attr, nil + } + + if block, ok := s.GetBlocks()[string(name)]; ok { + return block, nil + } + + return nil, fmt.Errorf("could not find attribute or block %q in schema", name) +} + +// SchemaAttributeAtPath is a helper function to perform base type handling using +// the AttributeAtTerraformPath method. +func SchemaAttributeAtPath(ctx context.Context, s Schema, p path.Path) (Attribute, diag.Diagnostics) { + var diags diag.Diagnostics + + tftypesPath, tftypesDiags := totftypes.AttributePath(ctx, p) + + diags.Append(tftypesDiags...) + + if diags.HasError() { + return nil, diags + } + + attribute, err := s.AttributeAtTerraformPath(ctx, tftypesPath) + + if err != nil { + diags.AddAttributeError( + p, + "Invalid Schema Path", + "When attempting to get the framework attribute associated with a schema path, an unexpected error was returned. "+ + "This is always an issue with the provider. Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Path: %s\n", p)+ + fmt.Sprintf("Original Error: %s", err), + ) + return nil, diags + } + + return attribute, diags +} + +// SchemaAttributeAtTerraformPath is a helper function to perform base type +// handling using the tftypes.AttributePathStepper interface. +func SchemaAttributeAtTerraformPath(ctx context.Context, s Schema, p *tftypes.AttributePath) (Attribute, error) { + rawType, remaining, err := tftypes.WalkAttributePath(s, p) + + if err != nil { + return nil, fmt.Errorf("%v still remains in the path: %w", remaining, err) + } + + switch typ := rawType.(type) { + case attr.Type: + return nil, ErrPathInsideAtomicAttribute + case Attribute: + return typ, nil + case Block: + return nil, ErrPathIsBlock + case NestedAttributeObject: + return nil, ErrPathInsideAtomicAttribute + case NestedBlockObject: + return nil, ErrPathInsideAtomicAttribute + case UnderlyingAttributes: + return nil, ErrPathInsideAtomicAttribute + default: + return nil, fmt.Errorf("got unexpected type %T", rawType) + } +} + +// SchemaBlockPathExpressions returns a slice of all path expressions which +// represent a Block according to the Schema. +func SchemaBlockPathExpressions(ctx context.Context, s Schema) path.Expressions { + result := path.Expressions{} + + for name, block := range s.GetBlocks() { + result = append(result, BlockPathExpressions(ctx, block, path.MatchRoot(name))...) + } + + return result +} + +// SchemaTypeAtPath is a helper function to perform base type handling using +// the TypeAtTerraformPath method. +func SchemaTypeAtPath(ctx context.Context, s Schema, p path.Path) (attr.Type, diag.Diagnostics) { + var diags diag.Diagnostics + + tftypesPath, tftypesDiags := totftypes.AttributePath(ctx, p) + + diags.Append(tftypesDiags...) + + if diags.HasError() { + return nil, diags + } + + attrType, err := s.TypeAtTerraformPath(ctx, tftypesPath) + + if err != nil { + diags.AddAttributeError( + p, + "Invalid Schema Path", + "When attempting to get the framework type associated with a schema path, an unexpected error was returned. "+ + "This is always an issue with the provider. Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Path: %s\n", p)+ + fmt.Sprintf("Original Error: %s", err), + ) + return nil, diags + } + + return attrType, diags +} + +// SchemaTypeAtTerraformPath is a helper function to perform base type handling +// using the tftypes.AttributePathStepper interface. +func SchemaTypeAtTerraformPath(ctx context.Context, s Schema, p *tftypes.AttributePath) (attr.Type, error) { + rawType, remaining, err := tftypes.WalkAttributePath(s, p) + + if err != nil { + return nil, fmt.Errorf("%v still remains in the path: %w", remaining, err) + } + + switch typ := rawType.(type) { + case attr.Type: + return typ, nil + case Attribute: + return typ.GetType(), nil + case Block: + return typ.Type(), nil + case NestedAttributeObject: + return typ.Type(), nil + case NestedBlockObject: + return typ.Type(), nil + case Schema: + return typ.Type(), nil + case UnderlyingAttributes: + return typ.Type(), nil + default: + return nil, fmt.Errorf("got unexpected type %T", rawType) + } +} + +// SchemaType is a helper function to perform base type handling using the +// GetAttributes and GetBlocks methods. +func SchemaType(s Schema) attr.Type { + attrTypes := map[string]attr.Type{} + + for name, attr := range s.GetAttributes() { + attrTypes[name] = attr.GetType() + } + + for name, block := range s.GetBlocks() { + attrTypes[name] = block.Type() + } + + return types.ObjectType{AttrTypes: attrTypes} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/underlying_attributes.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/underlying_attributes.go new file mode 100644 index 0000000000..1210b47bfe --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschema/underlying_attributes.go @@ -0,0 +1,67 @@ +package fwschema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure UnderlyingAttributes satisfies the expected interfaces. +var _ tftypes.AttributePathStepper = UnderlyingAttributes{} + +// UnderlyingAttributes represents attributes under a nested attribute. +type UnderlyingAttributes map[string]Attribute + +// ApplyTerraform5AttributePathStep performs an AttributeName step on the +// underlying attributes or returns an error. +func (u UnderlyingAttributes) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("can't apply %T to Attributes", step) + } + + attribute, ok := u[string(name)] + + if !ok { + return nil, fmt.Errorf("no attribute %q on Attributes", name) + } + + return attribute, nil +} + +// Equal returns true if all underlying attributes are equal. +func (u UnderlyingAttributes) Equal(o UnderlyingAttributes) bool { + if len(u) != len(o) { + return false + } + + for name, uAttribute := range u { + oAttribute, ok := o[name] + + if !ok { + return false + } + + if !uAttribute.Equal(oAttribute) { + return false + } + } + + return true +} + +// Type returns the framework type of the underlying attributes. +func (u UnderlyingAttributes) Type() basetypes.ObjectTypable { + attrTypes := make(map[string]attr.Type, len(u)) + + for name, attr := range u { + attrTypes[name] = attr.GetType() + } + + return basetypes.ObjectType{ + AttrTypes: attrTypes, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data.go new file mode 100644 index 0000000000..8d941f2100 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data.go @@ -0,0 +1,24 @@ +package fwschemadata + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Data is the shared storage implementation for schema-based values, such as +// configuration, plan, and state. +type Data struct { + // Description contains the human friendly type of the data. Used in error + // diagnostics. + Description DataDescription + + // Schema contains the data structure and types for the value. + Schema fwschema.Schema + + // TerraformValue contains the terraform-plugin-go value implementation. + // + // TODO: In the future this may be migrated to attr.Value, or more + // succinctly, types.Object. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/172 + TerraformValue tftypes.Value +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_description.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_description.go new file mode 100644 index 0000000000..5eb6384518 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_description.go @@ -0,0 +1,43 @@ +package fwschemadata + +const ( + // DataDescriptionConfiguration is used for Data that represents + // a configuration-based value. + DataDescriptionConfiguration DataDescription = "configuration" + + // DataDescriptionPlan is used for Data that represents + // a plan-based value. + DataDescriptionPlan DataDescription = "plan" + + // DataDescriptionState is used for Data that represents + // a state-based value. + DataDescriptionState DataDescription = "state" +) + +// DataDescription is a human friendly type for Data. Used in error +// diagnostics. +type DataDescription string + +// String returns the lowercase string of the description. +func (d DataDescription) String() string { + switch d { + case "": + return "data" + default: + return string(d) + } +} + +// Title returns the titlecase string of the description. +func (d DataDescription) Title() string { + switch d { + case DataDescriptionConfiguration: + return "Configuration" + case DataDescriptionPlan: + return "Plan" + case DataDescriptionState: + return "State" + default: + return "Data" + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_get.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_get.go new file mode 100644 index 0000000000..1e0b88d688 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_get.go @@ -0,0 +1,14 @@ +package fwschemadata + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// Get populates the struct passed as `target` with the entire state. +func (d Data) Get(ctx context.Context, target any) diag.Diagnostics { + return reflect.Into(ctx, d.Schema.Type(), d.TerraformValue, target, reflect.Options{}, path.Empty()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_get_at_path.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_get_at_path.go new file mode 100644 index 0000000000..752c99f26f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_get_at_path.go @@ -0,0 +1,57 @@ +package fwschemadata + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// GetAtPath retrieves the attribute found at `path` and populates the +// `target` with the value. +func (d Data) GetAtPath(ctx context.Context, schemaPath path.Path, target any) diag.Diagnostics { + ctx = logging.FrameworkWithAttributePath(ctx, schemaPath.String()) + + attrValue, diags := d.ValueAtPath(ctx, schemaPath) + + if diags.HasError() { + return diags + } + + if attrValue == nil { + diags.AddAttributeError( + schemaPath, + d.Description.Title()+" Read Error", + "An unexpected error was encountered trying to read an attribute from the "+d.Description.String()+". This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Missing attribute value, however no error was returned. Preventing the panic from this situation.", + ) + return diags + } + + if reflect.IsGenericAttrValue(ctx, target) { + *(target.(*attr.Value)) = attrValue + return nil + } + + raw, err := attrValue.ToTerraformValue(ctx) + + if err != nil { + diags.AddAttributeError( + schemaPath, + d.Description.Title()+" Value Conversion Error", + fmt.Sprintf("An unexpected error was encountered converting a %T to its equivalent Terraform representation. This is always a bug in the provider.\n\n"+ + "Error: %s", attrValue, err), + ) + return diags + } + + reflectDiags := reflect.Into(ctx, attrValue.Type(ctx), raw, target, reflect.Options{}, schemaPath) + + diags.Append(reflectDiags...) + + return diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_nullify_collection_blocks.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_nullify_collection_blocks.go new file mode 100644 index 0000000000..3913c4bce3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_nullify_collection_blocks.go @@ -0,0 +1,80 @@ +package fwschemadata + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// NullifyCollectionBlocks converts list and set block empty values to null +// values. The reverse conversion is ReifyNullCollectionBlocks. +func (d *Data) NullifyCollectionBlocks(ctx context.Context) diag.Diagnostics { + var diags diag.Diagnostics + + blockPathExpressions := fwschema.SchemaBlockPathExpressions(ctx, d.Schema) + + // Errors are handled as richer diag.Diagnostics instead. + d.TerraformValue, _ = tftypes.Transform(d.TerraformValue, func(tfTypePath *tftypes.AttributePath, tfTypeValue tftypes.Value) (tftypes.Value, error) { + // Do not transform if value is already null or is not fully known. + if tfTypeValue.IsNull() || !tfTypeValue.IsFullyKnown() { + return tfTypeValue, nil + } + + fwPath, fwPathDiags := fromtftypes.AttributePath(ctx, tfTypePath, d.Schema) + + diags.Append(fwPathDiags...) + + // Do not transform if path cannot be converted. + // Checking against fwPathDiags will capture all errors. + if fwPathDiags.HasError() { + return tfTypeValue, nil + } + + // Do not transform if path is not a block. + if !blockPathExpressions.Matches(fwPath) { + return tfTypeValue, nil + } + + var elements []tftypes.Value + + switch tfTypeValue.Type().(type) { + case tftypes.List, tftypes.Set: + err := tfTypeValue.As(&elements) + + // If this occurs, it likely is an upstream issue in Terraform + // or terraform-plugin-go. + if err != nil { + diags.AddAttributeError( + fwPath, + d.Description.Title()+" Data Transformation Error", + "An unexpected error occurred while transforming "+d.Description.String()+" data. "+ + "This is always an issue with terraform-plugin-framework and should be reported to the provider developers.\n\n"+ + "Path: "+fwPath.String()+"\n"+ + "Error: (tftypes.Value).As() error: "+err.Error(), + ) + + return tfTypeValue, nil //nolint:nilerr // Using richer diag.Diagnostics instead. + } + default: + return tfTypeValue, nil + } + + // Do not transform if there are any elements. + if len(elements) > 0 { + return tfTypeValue, nil + } + + // Transform to null value. + logging.FrameworkTrace(ctx, "Transforming empty block to null block", map[string]any{ + logging.KeyAttributePath: fwPath.String(), + logging.KeyDescription: d.Description.String(), + }) + return tftypes.NewValue(tfTypeValue.Type(), nil), nil + }) + + return diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_path_exists.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_path_exists.go new file mode 100644 index 0000000000..46f6fa2f4b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_path_exists.go @@ -0,0 +1,44 @@ +package fwschemadata + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/totftypes" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// PathExists returns true if the path can be reached. The value at the path +// may be null or unknown. +func (d Data) PathExists(ctx context.Context, path path.Path) (bool, diag.Diagnostics) { + var diags diag.Diagnostics + + tftypesPath, tftypesPathDiags := totftypes.AttributePath(ctx, path) + + diags.Append(tftypesPathDiags...) + + if diags.HasError() { + return false, diags + } + + _, remaining, err := tftypes.WalkAttributePath(d.TerraformValue, tftypesPath) + + if err != nil { + if errors.Is(err, tftypes.ErrInvalidStep) { + return false, diags + } + + diags.AddAttributeError( + path, + d.Description.Title()+" Read Error", + "An unexpected error was encountered trying to read an attribute from the "+d.Description.String()+". This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Cannot walk attribute path in %s: %s", d.Description, err), + ) + return false, diags + } + + return len(remaining.Steps()) == 0, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_path_matches.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_path_matches.go new file mode 100644 index 0000000000..656d38f8b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_path_matches.go @@ -0,0 +1,77 @@ +package fwschemadata + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// PathMatches returns all matching path.Paths from the given path.Expression. +// +// If a parent path is null or unknown, which would prevent a full expression +// from matching, the parent path is returned rather than no match to prevent +// false positives. +func (d Data) PathMatches(ctx context.Context, pathExpr path.Expression) (path.Paths, diag.Diagnostics) { + var diags diag.Diagnostics + var paths path.Paths + + if !d.ValidPathExpression(ctx, pathExpr) { + diags.AddError( + "Invalid Path Expression for Schema", + "The Terraform Provider unexpectedly provided a path expression that does not match the current schema. "+ + "This can happen if the path expression does not correctly follow the schema in structure or types. "+ + "Please report this to the provider developers.\n\n"+ + "Path Expression: "+pathExpr.String(), + ) + + return paths, diags + } + + _ = tftypes.Walk(d.TerraformValue, func(tfTypePath *tftypes.AttributePath, tfTypeValue tftypes.Value) (bool, error) { + fwPath, fwPathDiags := fromtftypes.AttributePath(ctx, tfTypePath, d.Schema) + + diags.Append(fwPathDiags...) + + if diags.HasError() { + // If there was an error with conversion of the path at this level, + // no need to traverse further since a deeper path will error. + return false, nil + } + + if pathExpr.Matches(fwPath) { + paths.Append(fwPath) + + // If we matched, there is no need to traverse further since a + // deeper path will never match. + return false, nil + } + + // If current path cannot be parent path, there is no need to traverse + // further since a deeper path will never match. + if !pathExpr.MatchesParent(fwPath) { + return false, nil + } + + // If value at current path (now known to be a parent path of the + // expression) is null or unknown, return it as a valid path match + // since Walk will stop traversing deeper anyways and we want + // consumers to know about the path with the null or unknown value. + // + // This behavior may be confusing for consumers as fetching the value + // at this parent path will return a potentially unexpected type, + // however this is an implementation tradeoff to prevent false + // positives of missing null or unknown values. + if tfTypeValue.IsNull() || !tfTypeValue.IsKnown() { + paths.Append(fwPath) + + return false, nil + } + + return true, nil + }) + + return paths, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_reify_null_collection_blocks.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_reify_null_collection_blocks.go new file mode 100644 index 0000000000..b9d049c7dc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_reify_null_collection_blocks.go @@ -0,0 +1,56 @@ +package fwschemadata + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ReifyNullCollectionBlocks converts list and set block null values to empty +// values. This is the reverse conversion of NullifyCollectionBlocks. +func (d *Data) ReifyNullCollectionBlocks(ctx context.Context) diag.Diagnostics { + var diags diag.Diagnostics + + blockPathExpressions := fwschema.SchemaBlockPathExpressions(ctx, d.Schema) + + // Errors are handled as richer diag.Diagnostics instead. + d.TerraformValue, _ = tftypes.Transform(d.TerraformValue, func(tfTypePath *tftypes.AttributePath, tfTypeValue tftypes.Value) (tftypes.Value, error) { + // Only transform null values. + if !tfTypeValue.IsNull() { + return tfTypeValue, nil + } + + fwPath, fwPathDiags := fromtftypes.AttributePath(ctx, tfTypePath, d.Schema) + + diags.Append(fwPathDiags...) + + // Do not transform if path cannot be converted. + // Checking against fwPathDiags will capture all errors. + if fwPathDiags.HasError() { + return tfTypeValue, nil + } + + // Do not transform if path is not a block. + if !blockPathExpressions.Matches(fwPath) { + return tfTypeValue, nil + } + + // Transform to empty value. + switch tfTypeValue.Type().(type) { + case tftypes.List, tftypes.Set: + logging.FrameworkTrace(ctx, "Transforming null block to empty block", map[string]any{ + logging.KeyAttributePath: fwPath.String(), + logging.KeyDescription: d.Description.String(), + }) + return tftypes.NewValue(tfTypeValue.Type(), []tftypes.Value{}), nil + default: + return tfTypeValue, nil + } + }) + + return diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_set.go new file mode 100644 index 0000000000..3bdcb848b7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_set.go @@ -0,0 +1,35 @@ +package fwschemadata + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// Set replaces the entire value. The value should be a struct whose fields +// have one of the attr.Value types. Each field must have the tfsdk field tag. +func (d *Data) Set(ctx context.Context, val any) diag.Diagnostics { + attrValue, diags := reflect.FromValue(ctx, d.Schema.Type(), val, path.Empty()) + + if diags.HasError() { + return diags + } + + tfValue, err := attrValue.ToTerraformValue(ctx) + + if err != nil { + diags.AddError( + d.Description.Title()+" Write Error", + "An unexpected error was encountered trying to write the "+d.Description.String()+". This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Error: Unable to run ToTerraformValue on new value: %s", err), + ) + return diags + } + + d.TerraformValue = tfValue + + return diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_set_at_path.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_set_at_path.go new file mode 100644 index 0000000000..c51fa4f02e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_set_at_path.go @@ -0,0 +1,199 @@ +package fwschemadata + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/internal/totftypes" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// SetAtPath sets the attribute at `path` using the supplied Go value. +// +// The attribute path and value must be valid with the current schema. If the +// attribute path already has a value, it will be overwritten. If the attribute +// path does not have a value, it will be added, including any parent attribute +// paths as necessary. +// +// Lists can only have the next element added according to the current length. +func (d *Data) SetAtPath(ctx context.Context, path path.Path, val interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + ctx = logging.FrameworkWithAttributePath(ctx, path.String()) + + tftypesPath, tftypesPathDiags := totftypes.AttributePath(ctx, path) + + diags.Append(tftypesPathDiags...) + + if diags.HasError() { + return diags + } + + attrType, err := d.Schema.TypeAtTerraformPath(ctx, tftypesPath) + + if err != nil { + diags.AddAttributeError( + path, + d.Description.Title()+" Write Error", + "An unexpected error was encountered trying to retrieve type information at a given path. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + ) + return diags + } + + newVal, newValDiags := reflect.FromValue(ctx, attrType, val, path) + diags.Append(newValDiags...) + + if diags.HasError() { + return diags + } + + tfVal, err := newVal.ToTerraformValue(ctx) + + if err != nil { + diags.AddAttributeError( + path, + d.Description.Title()+" Write Error", + "An unexpected error was encountered trying to write an attribute to the "+d.Description.String()+". This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Error: Cannot run ToTerraformValue on new data value: "+err.Error(), + ) + return diags + } + + if attrTypeWithValidate, ok := attrType.(xattr.TypeWithValidate); ok { + logging.FrameworkTrace(ctx, "Type implements TypeWithValidate") + logging.FrameworkDebug(ctx, "Calling provider defined Type Validate") + diags.Append(attrTypeWithValidate.Validate(ctx, tfVal, path)...) + logging.FrameworkDebug(ctx, "Called provider defined Type Validate") + + if diags.HasError() { + return diags + } + } + + transformFunc, transformFuncDiags := d.SetAtPathTransformFunc(ctx, path, tfVal, nil) + diags.Append(transformFuncDiags...) + + if diags.HasError() { + return diags + } + + d.TerraformValue, err = tftypes.Transform(d.TerraformValue, transformFunc) + + if err != nil { + diags.AddAttributeError( + path, + d.Description.Title()+" Write Error", + "An unexpected error was encountered trying to write an attribute to the "+d.Description.String()+". This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Error: Cannot transform data: "+err.Error(), + ) + return diags + } + + return diags +} + +// SetAttributeTransformFunc recursively creates a value based on the current +// Plan values along the path. If the value at the path does not yet exist, +// this will perform recursion to add the child value to a parent value, +// creating the parent value if necessary. +func (d Data) SetAtPathTransformFunc(ctx context.Context, path path.Path, tfVal tftypes.Value, diags diag.Diagnostics) (func(*tftypes.AttributePath, tftypes.Value) (tftypes.Value, error), diag.Diagnostics) { + exists, pathExistsDiags := d.PathExists(ctx, path) + diags.Append(pathExistsDiags...) + + if diags.HasError() { + return nil, diags + } + + tftypesPath, tftypesPathDiags := totftypes.AttributePath(ctx, path) + + diags.Append(tftypesPathDiags...) + + if diags.HasError() { + return nil, diags + } + + if exists { + // Overwrite existing value + return func(p *tftypes.AttributePath, v tftypes.Value) (tftypes.Value, error) { + if p.Equal(tftypesPath) { + return tfVal, nil + } + return v, nil + }, diags + } + + parentPath := path.ParentPath() + parentTftypesPath := tftypesPath.WithoutLastStep() + parentAttrType, err := d.Schema.TypeAtTerraformPath(ctx, parentTftypesPath) + + if err != nil { + err = fmt.Errorf("error getting parent attribute type in schema: %w", err) + diags.AddAttributeError( + parentPath, + d.Description.Title()+" Write Error", + "An unexpected error was encountered trying to write an attribute to the "+d.Description.String()+". This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + parentValue, err := d.TerraformValueAtTerraformPath(ctx, parentTftypesPath) + + if err != nil && !errors.Is(err, tftypes.ErrInvalidStep) { + diags.AddAttributeError( + parentPath, + d.Description.Title()+" Read Error", + "An unexpected error was encountered trying to read an attribute from the "+d.Description.String()+". This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + if parentValue.IsNull() || !parentValue.IsKnown() { + // TODO: This will break when DynamicPsuedoType is introduced. + // tftypes.Type should implement AttributePathStepper, but it currently does not. + // When it does, we should use: tftypes.WalkAttributePath(p.Raw.Type(), parentPath) + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/110 + parentType := parentAttrType.TerraformType(ctx) + var childValue interface{} + + if !parentValue.IsKnown() { + childValue = tftypes.UnknownValue + } + + var parentValueDiags diag.Diagnostics + parentValue, parentValueDiags = CreateParentTerraformValue(ctx, parentPath, parentType, childValue) + diags.Append(parentValueDiags...) + + if diags.HasError() { + return nil, diags + } + } + + var childValueDiags diag.Diagnostics + childStep, _ := path.Steps().LastStep() + parentValue, childValueDiags = UpsertChildTerraformValue(ctx, parentPath, parentValue, childStep, tfVal) + diags.Append(childValueDiags...) + + if diags.HasError() { + return nil, diags + } + + if attrTypeWithValidate, ok := parentAttrType.(xattr.TypeWithValidate); ok { + logging.FrameworkTrace(ctx, "Type implements TypeWithValidate") + logging.FrameworkDebug(ctx, "Calling provider defined Type Validate") + diags.Append(attrTypeWithValidate.Validate(ctx, parentValue, parentPath)...) + logging.FrameworkDebug(ctx, "Called provider defined Type Validate") + + if diags.HasError() { + return nil, diags + } + } + + return d.SetAtPathTransformFunc(ctx, parentPath, parentValue, diags) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_terraform_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_terraform_value.go new file mode 100644 index 0000000000..47de0cc2af --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_terraform_value.go @@ -0,0 +1,26 @@ +package fwschemadata + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// TerraformValueAtTerraformPath returns the tftypes.Value at a given +// tftypes.AttributePath or an error. +func (d Data) TerraformValueAtTerraformPath(_ context.Context, path *tftypes.AttributePath) (tftypes.Value, error) { + rawValue, remaining, err := tftypes.WalkAttributePath(d.TerraformValue, path) + + if err != nil { + return tftypes.Value{}, fmt.Errorf("%v still remains in the path: %w", remaining, err) + } + + attrValue, ok := rawValue.(tftypes.Value) + + if !ok { + return tftypes.Value{}, fmt.Errorf("got non-tftypes.Value result %v", rawValue) + } + + return attrValue, err +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_valid_path_expression.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_valid_path_expression.go new file mode 100644 index 0000000000..e759c3abfd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_valid_path_expression.go @@ -0,0 +1,99 @@ +package fwschemadata + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ValidPathExpression returns true if the given expression is valid for the +// schema underlying the Data. This can be used to determine if there was an +// expression implementation error versus an expression returning no path +// matches based on implementation details of the underlying data storage. +func (d Data) ValidPathExpression(ctx context.Context, expression path.Expression) bool { + expressionSteps := expression.Resolve().Steps() + + if len(expressionSteps) == 0 { + return false + } + + return validatePathExpressionSteps(ctx, d.Schema.Type(), expressionSteps) +} + +// validatePathExpressionSteps is a recursive function which returns true if +// the path expression steps can be applied to the type. +func validatePathExpressionSteps(ctx context.Context, currentType attr.Type, currentExpressionSteps path.ExpressionSteps) bool { + currentExpressionStep, nextSteps := currentExpressionSteps.NextStep() + + // Generate a tftypes step based on the expression. For type definitions, + // any value should be acceptable for element steps. + var currentTfStep tftypes.AttributePathStep + + switch step := currentExpressionStep.(type) { + case nil: + // There are no more expression steps. + return true + case path.ExpressionStepAttributeNameExact: + currentTfStep = tftypes.AttributeName(step) + case path.ExpressionStepElementKeyIntAny: + currentTfStep = tftypes.ElementKeyInt(0) + case path.ExpressionStepElementKeyIntExact: + currentTfStep = tftypes.ElementKeyInt(step) + case path.ExpressionStepElementKeyStringAny: + currentTfStep = tftypes.ElementKeyString("") + case path.ExpressionStepElementKeyStringExact: + currentTfStep = tftypes.ElementKeyString(step) + case path.ExpressionStepElementKeyValueAny: + tfValue := tftypes.NewValue( + currentType.TerraformType(ctx), + nil, + ) + currentTfStep = tftypes.ElementKeyValue(tfValue) + case path.ExpressionStepElementKeyValueExact: + // Best effort + tfValue, err := step.Value.ToTerraformValue(ctx) + + if err != nil { + tfValue = tftypes.NewValue( + currentType.TerraformType(ctx), + nil, + ) + } + + currentTfStep = tftypes.ElementKeyValue(tfValue) + default: + // If new, resolved path.ExpressionStep are introduced, they must be + // added as cases to this switch statement. + panic(fmt.Sprintf("unimplemented path.ExpressionStep type: %T", currentExpressionStep)) + } + + nextTypeIface, err := currentType.ApplyTerraform5AttributePathStep(currentTfStep) + + if err != nil { + // Debug, not error, log entry for troubleshooting as validation may + // be running in a scenario where invalid expressions are okay. + logging.FrameworkDebug( + ctx, + fmt.Sprintf("Returning false due to error while calling %T ApplyTerraform5AttributePathStep with %T", currentType, currentTfStep), + map[string]any{ + logging.KeyError: err, + }, + ) + + return false + } + + nextType, ok := nextTypeIface.(attr.Type) + + if !ok { + // Raise a more descriptive panic message instead of the type assertion + // panic. + panic(fmt.Sprintf("%T returned unexpected type %T from ApplyTerraform5AttributePathStep", currentType, nextTypeIface)) + } + + return validatePathExpressionSteps(ctx, nextType, nextSteps) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_value.go new file mode 100644 index 0000000000..fb34ce5109 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/data_value.go @@ -0,0 +1,100 @@ +package fwschemadata + +import ( + "context" + "errors" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/totftypes" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ValueAtPath retrieves the attribute found at `path` and returns it as an +// attr.Value. Consumers should assert the type of the returned value with the +// desired attr.Type. +func (d Data) ValueAtPath(ctx context.Context, schemaPath path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + tftypesPath, tftypesPathDiags := totftypes.AttributePath(ctx, schemaPath) + + diags.Append(tftypesPathDiags...) + + if diags.HasError() { + return nil, diags + } + + attrType, err := d.Schema.TypeAtTerraformPath(ctx, tftypesPath) + + if err != nil { + diags.AddAttributeError( + schemaPath, + d.Description.Title()+" Read Error", + "An unexpected error was encountered trying to retrieve type information at a given path. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + ) + return nil, diags + } + + // if the data is null, return a null value of the type + if d.TerraformValue.IsNull() { + attrValue, err := attrType.ValueFromTerraform(ctx, tftypes.NewValue(attrType.TerraformType(ctx), nil)) + + if err != nil { + diags.AddAttributeError( + schemaPath, + d.Description.Title()+" Read Error", + "An unexpected error was encountered trying to create a null attribute value from the given path. "+ + "Please report the following to the provider developer:\n\n"+ + "Type: "+attrType.String()+"\n"+ + "Error:"+err.Error(), + ) + } + + return attrValue, diags + } + + tfValue, err := d.TerraformValueAtTerraformPath(ctx, tftypesPath) + + // Ignoring ErrInvalidStep will allow this method to return a null value of the type. + if err != nil && !errors.Is(err, tftypes.ErrInvalidStep) { + diags.AddAttributeError( + schemaPath, + d.Description.Title()+" Read Error", + "An unexpected error was encountered trying to retrieve an attribute value from the given path. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + // TODO: If ErrInvalidStep, check parent paths for unknown value. + // If found, convert this value to an unknown value. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/186 + + if attrTypeWithValidate, ok := attrType.(xattr.TypeWithValidate); ok { + logging.FrameworkTrace(ctx, "Type implements TypeWithValidate") + logging.FrameworkDebug(ctx, "Calling provider defined Type Validate") + diags.Append(attrTypeWithValidate.Validate(ctx, tfValue, schemaPath)...) + logging.FrameworkDebug(ctx, "Called provider defined Type Validate") + + if diags.HasError() { + return nil, diags + } + } + + attrValue, err := attrType.ValueFromTerraform(ctx, tfValue) + + if err != nil { + diags.AddAttributeError( + schemaPath, + d.Description.Title()+" Read Error", + "An unexpected error was encountered trying to convert an attribute value from the "+d.Description.String()+". This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + ) + return nil, diags + } + + return attrValue, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/doc.go new file mode 100644 index 0000000000..ddb98c2f0b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/doc.go @@ -0,0 +1,3 @@ +// Package fwschemadata implements the shared schema-based data implementation +// for configuration, plan, and state values. +package fwschemadata diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/tftypes_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/tftypes_value.go new file mode 100644 index 0000000000..41d2ee4f2a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata/tftypes_value.go @@ -0,0 +1,207 @@ +package fwschemadata + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// CreateParentTerraformValue ensures that the given parent value can have children +// values upserted. If the parent value is known and not null, it is returned +// without modification. A null Object or Tuple is converted to known with null +// children. An unknown Object or Tuple is converted to known with unknown +// children. List, Map, and Set are created with empty elements. +func CreateParentTerraformValue(_ context.Context, parentPath path.Path, parentType tftypes.Type, childValue interface{}) (tftypes.Value, diag.Diagnostics) { + var diags diag.Diagnostics + var parentValue tftypes.Value + + switch parentType := parentType.(type) { + case tftypes.List: + parentValue = tftypes.NewValue(parentType, []tftypes.Value{}) + case tftypes.Set: + parentValue = tftypes.NewValue(parentType, []tftypes.Value{}) + case tftypes.Map: + parentValue = tftypes.NewValue(parentType, map[string]tftypes.Value{}) + case tftypes.Object: + vals := map[string]tftypes.Value{} + + for name, t := range parentType.AttributeTypes { + vals[name] = tftypes.NewValue(t, childValue) + } + + parentValue = tftypes.NewValue(parentType, vals) + case tftypes.Tuple: + vals := []tftypes.Value{} + + for _, elementType := range parentType.ElementTypes { + vals = append(vals, tftypes.NewValue(elementType, childValue)) + } + + parentValue = tftypes.NewValue(parentType, vals) + default: + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Unknown parent type %s to create value.", parentType), + ) + return parentValue, diags + } + + return parentValue, diags +} + +// UpsertChildTerraformValue will upsert a child value into a parent value. If the +// path step already has a value, it will be overwritten. Otherwise, the child +// value will be added. +// +// Lists can only have the next element added according to the current length. +func UpsertChildTerraformValue(_ context.Context, parentPath path.Path, parentValue tftypes.Value, childStep path.PathStep, childValue tftypes.Value) (tftypes.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + // TODO: Add Tuple support + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/54 + switch childStep := childStep.(type) { + case path.PathStepAttributeName: + // Set in Object + if !parentValue.Type().Is(tftypes.Object{}) { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Cannot add attribute into parent type: %s", parentValue.Type()), + ) + return parentValue, diags + } + + var parentAttrs map[string]tftypes.Value + err := parentValue.Copy().As(&parentAttrs) + + if err != nil { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Unable to extract object elements from parent value: %s", err), + ) + return parentValue, diags + } + + parentAttrs[string(childStep)] = childValue + parentValue = tftypes.NewValue(parentValue.Type(), parentAttrs) + case path.PathStepElementKeyInt: + // Upsert List element, except past length + 1 + if !parentValue.Type().Is(tftypes.List{}) { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Cannot add list element into parent type: %s", parentValue.Type()), + ) + return parentValue, diags + } + + var parentElems []tftypes.Value + err := parentValue.Copy().As(&parentElems) + + if err != nil { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Unable to extract list elements from parent value: %s", err), + ) + return parentValue, diags + } + + if int(childStep) > len(parentElems) { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Cannot add list element %d as list currently has %d length. To prevent ambiguity, only the next element can be added to a list. Add empty elements into the list prior to this call, if appropriate.", int(childStep)+1, len(parentElems)), + ) + return parentValue, diags + } + + if int(childStep) == len(parentElems) { + parentElems = append(parentElems, childValue) + } else { + parentElems[int(childStep)] = childValue + } + + parentValue = tftypes.NewValue(parentValue.Type(), parentElems) + case path.PathStepElementKeyString: + // Upsert Map element + if !parentValue.Type().Is(tftypes.Map{}) { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Cannot add map value into parent type: %s", parentValue.Type()), + ) + return parentValue, diags + } + + var parentElems map[string]tftypes.Value + err := parentValue.Copy().As(&parentElems) + + if err != nil { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Unable to extract map elements from parent value: %s", err), + ) + return parentValue, diags + } + + parentElems[string(childStep)] = childValue + parentValue = tftypes.NewValue(parentValue.Type(), parentElems) + case path.PathStepElementKeyValue: + // Upsert Set element + if !parentValue.Type().Is(tftypes.Set{}) { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Cannot add set element into parent type: %s", parentValue.Type()), + ) + return parentValue, diags + } + + var parentElems []tftypes.Value + err := parentValue.Copy().As(&parentElems) + + if err != nil { + diags.AddAttributeError( + parentPath, + "Value Conversion Error", + "An unexpected error was encountered trying to create a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Unable to extract set elements from parent value: %s", err), + ) + return parentValue, diags + } + + // Prevent duplicates + var found bool + + for _, parentElem := range parentElems { + if parentElem.Equal(childValue) { + found = true + break + } + } + + if !found { + parentElems = append(parentElems, childValue) + } + + parentValue = tftypes.NewValue(parentValue.Type(), parentElems) + } + + return parentValue, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attr_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attr_value.go new file mode 100644 index 0000000000..36072f1f05 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attr_value.go @@ -0,0 +1,181 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +func coerceListValue(ctx context.Context, schemaPath path.Path, value attr.Value) (types.List, diag.Diagnostics) { + listVal, ok := value.(basetypes.ListValuable) + + if !ok { + return types.ListNull(nil), diag.Diagnostics{ + attributePlanModificationWalkError(schemaPath, value), + } + } + + return listVal.ToListValue(ctx) +} + +func coerceMapValue(ctx context.Context, schemaPath path.Path, value attr.Value) (types.Map, diag.Diagnostics) { + mapVal, ok := value.(basetypes.MapValuable) + + if !ok { + return types.MapNull(nil), diag.Diagnostics{ + attributePlanModificationWalkError(schemaPath, value), + } + } + + return mapVal.ToMapValue(ctx) +} + +func coerceObjectValue(ctx context.Context, schemaPath path.Path, value attr.Value) (types.Object, diag.Diagnostics) { + objectVal, ok := value.(basetypes.ObjectValuable) + + if !ok { + return types.ObjectNull(nil), diag.Diagnostics{ + attributePlanModificationWalkError(schemaPath, value), + } + } + + return objectVal.ToObjectValue(ctx) +} + +func coerceSetValue(ctx context.Context, schemaPath path.Path, value attr.Value) (types.Set, diag.Diagnostics) { + setVal, ok := value.(basetypes.SetValuable) + + if !ok { + return types.SetNull(nil), diag.Diagnostics{ + attributePlanModificationWalkError(schemaPath, value), + } + } + + return setVal.ToSetValue(ctx) +} + +func listElemObject(ctx context.Context, schemaPath path.Path, list types.List, index int, description fwschemadata.DataDescription) (types.Object, diag.Diagnostics) { + if list.IsNull() { + return listElemObjectFromTerraformValue(ctx, schemaPath, list, description, nil) + } + + if list.IsUnknown() { + return listElemObjectFromTerraformValue(ctx, schemaPath, list, description, tftypes.UnknownValue) + } + + if index >= len(list.Elements()) { + return listElemObjectFromTerraformValue(ctx, schemaPath, list, description, nil) + } + + return coerceObjectValue(ctx, schemaPath, list.Elements()[index]) +} + +func listElemObjectFromTerraformValue(ctx context.Context, schemaPath path.Path, list types.List, description fwschemadata.DataDescription, tfValue any) (types.Object, diag.Diagnostics) { + elemType := list.ElementType(ctx) + elemValue, err := elemType.ValueFromTerraform(ctx, tftypes.NewValue(elemType.TerraformType(ctx), tfValue)) + + if err != nil { + return types.ObjectNull(nil), diag.Diagnostics{ + attributePlanModificationValueError(ctx, list, description, err), + } + } + + return coerceObjectValue(ctx, schemaPath, elemValue) +} + +func mapElemObject(ctx context.Context, schemaPath path.Path, m types.Map, key string, description fwschemadata.DataDescription) (types.Object, diag.Diagnostics) { + if m.IsNull() { + return mapElemObjectFromTerraformValue(ctx, schemaPath, m, description, nil) + } + + if m.IsUnknown() { + return mapElemObjectFromTerraformValue(ctx, schemaPath, m, description, tftypes.UnknownValue) + } + + elemValue, ok := m.Elements()[key] + + if !ok { + return mapElemObjectFromTerraformValue(ctx, schemaPath, m, description, nil) + } + + return coerceObjectValue(ctx, schemaPath, elemValue) +} + +func mapElemObjectFromTerraformValue(ctx context.Context, schemaPath path.Path, m types.Map, description fwschemadata.DataDescription, tfValue any) (types.Object, diag.Diagnostics) { + elemType := m.ElementType(ctx) + elemValue, err := elemType.ValueFromTerraform(ctx, tftypes.NewValue(elemType.TerraformType(ctx), tfValue)) + + if err != nil { + return types.ObjectNull(nil), diag.Diagnostics{ + attributePlanModificationValueError(ctx, m, description, err), + } + } + + return coerceObjectValue(ctx, schemaPath, elemValue) +} + +func objectAttributeValue(ctx context.Context, object types.Object, attributeName string, description fwschemadata.DataDescription) (attr.Value, diag.Diagnostics) { + if object.IsNull() { + return objectAttributeValueFromTerraformValue(ctx, object, attributeName, description, nil) + } + + if object.IsUnknown() { + return objectAttributeValueFromTerraformValue(ctx, object, attributeName, description, tftypes.UnknownValue) + } + + // A panic here indicates a bug somewhere else in the framework or an + // invalid test case. + return object.Attributes()[attributeName], nil +} + +func objectAttributeValueFromTerraformValue(ctx context.Context, object types.Object, attributeName string, description fwschemadata.DataDescription, tfValue any) (attr.Value, diag.Diagnostics) { + // A panic here indicates a bug somewhere else in the framework or an + // invalid test case. + attrType := object.AttributeTypes(ctx)[attributeName] + + elemValue, err := attrType.ValueFromTerraform(ctx, tftypes.NewValue(attrType.TerraformType(ctx), tfValue)) + + if err != nil { + return nil, diag.Diagnostics{ + attributePlanModificationValueError(ctx, object, description, err), + } + } + + return elemValue, nil +} + +func setElemObject(ctx context.Context, schemaPath path.Path, set types.Set, index int, description fwschemadata.DataDescription) (types.Object, diag.Diagnostics) { + if set.IsNull() { + return setElemObjectFromTerraformValue(ctx, schemaPath, set, description, nil) + } + + if set.IsUnknown() { + return setElemObjectFromTerraformValue(ctx, schemaPath, set, description, tftypes.UnknownValue) + } + + if index >= len(set.Elements()) { + return setElemObjectFromTerraformValue(ctx, schemaPath, set, description, nil) + } + + return coerceObjectValue(ctx, schemaPath, set.Elements()[index]) +} + +func setElemObjectFromTerraformValue(ctx context.Context, schemaPath path.Path, set types.Set, description fwschemadata.DataDescription, tfValue any) (types.Object, diag.Diagnostics) { + elemType := set.ElementType(ctx) + elemValue, err := elemType.ValueFromTerraform(ctx, tftypes.NewValue(elemType.TerraformType(ctx), tfValue)) + + if err != nil { + return types.ObjectNull(nil), diag.Diagnostics{ + attributePlanModificationValueError(ctx, set, description, err), + } + } + + return coerceObjectValue(ctx, schemaPath, elemValue) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attribute_plan_modification.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attribute_plan_modification.go new file mode 100644 index 0000000000..1afbf2c9a9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attribute_plan_modification.go @@ -0,0 +1,1778 @@ +package fwserver + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// ModifyAttributePlanRequest represents a request for the provider to modify an +// attribute value, or mark it as requiring replacement, at plan time. An +// instance of this request struct is supplied as an argument to the Modify +// function of an attribute's plan modifier(s). +type ModifyAttributePlanRequest struct { + // AttributePath is the path of the attribute. Use this path for any + // response diagnostics. + AttributePath path.Path + + // AttributePathExpression is the expression matching the exact path of the + // attribute. + AttributePathExpression path.Expression + + // Config is the configuration the user supplied for the resource. + Config tfsdk.Config + + // State is the current state of the resource. + State tfsdk.State + + // Plan is the planned new state for the resource. + Plan tfsdk.Plan + + // AttributeConfig is the configuration the user supplied for the attribute. + AttributeConfig attr.Value + + // AttributeState is the current state of the attribute. + AttributeState attr.Value + + // AttributePlan is the planned new state for the attribute. + AttributePlan attr.Value + + // ProviderMeta is metadata from the provider_meta block of the module. + ProviderMeta tfsdk.Config + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // ModifyAttributePlanResponse.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // ModifyAttributePlanResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +type ModifyAttributePlanResponse struct { + AttributePlan attr.Value + Diagnostics diag.Diagnostics + RequiresReplace path.Paths + Private *privatestate.ProviderData +} + +// AttributeModifyPlan runs all AttributePlanModifiers +// +// TODO: Clean up this abstraction back into an internal Attribute type method. +// The extra Attribute parameter is a carry-over of creating the proto6server +// package from the tfsdk package and not wanting to export the method. +// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/365 +func AttributeModifyPlan(ctx context.Context, a fwschema.Attribute, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + ctx = logging.FrameworkWithAttributePath(ctx, req.AttributePath.String()) + + if req.Private != nil { + resp.Private = req.Private + } + + switch attributeWithPlanModifiers := a.(type) { + case fwxschema.AttributeWithBoolPlanModifiers: + AttributePlanModifyBool(ctx, attributeWithPlanModifiers, req, resp) + case fwxschema.AttributeWithFloat64PlanModifiers: + AttributePlanModifyFloat64(ctx, attributeWithPlanModifiers, req, resp) + case fwxschema.AttributeWithInt64PlanModifiers: + AttributePlanModifyInt64(ctx, attributeWithPlanModifiers, req, resp) + case fwxschema.AttributeWithListPlanModifiers: + AttributePlanModifyList(ctx, attributeWithPlanModifiers, req, resp) + case fwxschema.AttributeWithMapPlanModifiers: + AttributePlanModifyMap(ctx, attributeWithPlanModifiers, req, resp) + case fwxschema.AttributeWithNumberPlanModifiers: + AttributePlanModifyNumber(ctx, attributeWithPlanModifiers, req, resp) + case fwxschema.AttributeWithObjectPlanModifiers: + AttributePlanModifyObject(ctx, attributeWithPlanModifiers, req, resp) + case fwxschema.AttributeWithSetPlanModifiers: + AttributePlanModifySet(ctx, attributeWithPlanModifiers, req, resp) + case fwxschema.AttributeWithStringPlanModifiers: + AttributePlanModifyString(ctx, attributeWithPlanModifiers, req, resp) + } + + if resp.Diagnostics.HasError() { + return + } + + // Null and unknown values should not have nested schema to modify. + if resp.AttributePlan.IsNull() || resp.AttributePlan.IsUnknown() { + return + } + + nestedAttribute, ok := a.(fwschema.NestedAttribute) + + if !ok { + return + } + + nestedAttributeObject := nestedAttribute.GetNestedObject() + + nm := nestedAttribute.GetNestingMode() + switch nm { + case fwschema.NestingModeList: + configList, diags := coerceListValue(ctx, req.AttributePath, req.AttributeConfig) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planList, diags := coerceListValue(ctx, req.AttributePath, req.AttributePlan) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateList, diags := coerceListValue(ctx, req.AttributePath, req.AttributeState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planElements := planList.Elements() + + for idx, planElem := range planElements { + attrPath := req.AttributePath.AtListIndex(idx) + + configObject, diags := listElemObject(ctx, attrPath, configList, idx, fwschemadata.DataDescriptionConfiguration) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planObject, diags := coerceObjectValue(ctx, attrPath, planElem) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateObject, diags := listElemObject(ctx, attrPath, stateList, idx, fwschemadata.DataDescriptionState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + objectReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configObject, + Path: attrPath, + PathExpression: attrPath.Expression(), + Plan: req.Plan, + PlanValue: planObject, + Private: resp.Private, + State: req.State, + StateValue: stateObject, + } + objectResp := &ModifyAttributePlanResponse{ + AttributePlan: objectReq.PlanValue, + Private: objectReq.Private, + } + + NestedAttributeObjectPlanModify(ctx, nestedAttributeObject, objectReq, objectResp) + + planElements[idx] = objectResp.AttributePlan + resp.Diagnostics.Append(objectResp.Diagnostics...) + resp.Private = objectResp.Private + resp.RequiresReplace.Append(objectResp.RequiresReplace...) + } + + resp.AttributePlan, diags = types.ListValue(planList.ElementType(ctx), planElements) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + case fwschema.NestingModeSet: + configSet, diags := coerceSetValue(ctx, req.AttributePath, req.AttributeConfig) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planSet, diags := coerceSetValue(ctx, req.AttributePath, req.AttributePlan) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateSet, diags := coerceSetValue(ctx, req.AttributePath, req.AttributeState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planElements := planSet.Elements() + + for idx, planElem := range planElements { + attrPath := req.AttributePath.AtSetValue(planElem) + + configObject, diags := setElemObject(ctx, attrPath, configSet, idx, fwschemadata.DataDescriptionConfiguration) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planObject, diags := coerceObjectValue(ctx, attrPath, planElem) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateObject, diags := setElemObject(ctx, attrPath, stateSet, idx, fwschemadata.DataDescriptionState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + objectReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configObject, + Path: attrPath, + PathExpression: attrPath.Expression(), + Plan: req.Plan, + PlanValue: planObject, + Private: resp.Private, + State: req.State, + StateValue: stateObject, + } + objectResp := &ModifyAttributePlanResponse{ + AttributePlan: objectReq.PlanValue, + Private: objectReq.Private, + } + + NestedAttributeObjectPlanModify(ctx, nestedAttributeObject, objectReq, objectResp) + + planElements[idx] = objectResp.AttributePlan + resp.Diagnostics.Append(objectResp.Diagnostics...) + resp.Private = objectResp.Private + resp.RequiresReplace.Append(objectResp.RequiresReplace...) + } + + resp.AttributePlan, diags = types.SetValue(planSet.ElementType(ctx), planElements) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + case fwschema.NestingModeMap: + configMap, diags := coerceMapValue(ctx, req.AttributePath, req.AttributeConfig) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planMap, diags := coerceMapValue(ctx, req.AttributePath, req.AttributePlan) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateMap, diags := coerceMapValue(ctx, req.AttributePath, req.AttributeState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planElements := planMap.Elements() + + for key, planElem := range planElements { + attrPath := req.AttributePath.AtMapKey(key) + + configObject, diags := mapElemObject(ctx, attrPath, configMap, key, fwschemadata.DataDescriptionConfiguration) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planObject, diags := coerceObjectValue(ctx, attrPath, planElem) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateObject, diags := mapElemObject(ctx, attrPath, stateMap, key, fwschemadata.DataDescriptionState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + objectReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configObject, + Path: attrPath, + PathExpression: attrPath.Expression(), + Plan: req.Plan, + PlanValue: planObject, + Private: resp.Private, + State: req.State, + StateValue: stateObject, + } + objectResp := &ModifyAttributePlanResponse{ + AttributePlan: objectReq.PlanValue, + Private: objectReq.Private, + } + + NestedAttributeObjectPlanModify(ctx, nestedAttributeObject, objectReq, objectResp) + + planElements[key] = objectResp.AttributePlan + resp.Diagnostics.Append(objectResp.Diagnostics...) + resp.Private = objectResp.Private + resp.RequiresReplace.Append(objectResp.RequiresReplace...) + } + + resp.AttributePlan, diags = types.MapValue(planMap.ElementType(ctx), planElements) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + case fwschema.NestingModeSingle: + configObject, diags := coerceObjectValue(ctx, req.AttributePath, req.AttributeConfig) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planObject, diags := coerceObjectValue(ctx, req.AttributePath, req.AttributePlan) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateObject, diags := coerceObjectValue(ctx, req.AttributePath, req.AttributeState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + objectReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configObject, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planObject, + Private: resp.Private, + State: req.State, + StateValue: stateObject, + } + objectResp := &ModifyAttributePlanResponse{ + AttributePlan: objectReq.PlanValue, + Private: objectReq.Private, + } + + NestedAttributeObjectPlanModify(ctx, nestedAttributeObject, objectReq, objectResp) + + resp.AttributePlan = objectResp.AttributePlan + resp.Diagnostics.Append(objectResp.Diagnostics...) + resp.Private = objectResp.Private + resp.RequiresReplace.Append(objectResp.RequiresReplace...) + default: + err := fmt.Errorf("unknown attribute nesting mode (%T: %v) at path: %s", nm, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Attribute Plan Modification Error", + "Attribute plan modifier cannot walk schema. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } +} + +// AttributePlanModifyBool performs all types.Bool plan modification. +func AttributePlanModifyBool(ctx context.Context, attribute fwxschema.AttributeWithBoolPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.BoolValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.BoolValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Bool Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Bool attribute plan modification. "+ + "The value type must implement the basetypes.BoolValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToBoolValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.BoolValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Bool Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Bool attribute plan modification. "+ + "The value type must implement the basetypes.BoolValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToBoolValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.BoolValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Bool Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Bool attribute plan modification. "+ + "The value type must implement the basetypes.BoolValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToBoolValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.BoolRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.BoolPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.BoolResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Bool", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyBool(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Bool", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// AttributePlanModifyFloat64 performs all types.Float64 plan modification. +func AttributePlanModifyFloat64(ctx context.Context, attribute fwxschema.AttributeWithFloat64PlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.Float64Valuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.Float64Valuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Float64 Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Float64 attribute plan modification. "+ + "The value type must implement the basetypes.Float64Valuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToFloat64Value(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.Float64Valuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Float64 Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Float64 attribute plan modification. "+ + "The value type must implement the basetypes.Float64Valuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToFloat64Value(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.Float64Valuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Float64 Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Float64 attribute plan modification. "+ + "The value type must implement the basetypes.Float64Valuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToFloat64Value(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.Float64Request{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.Float64PlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.Float64Response{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Float64", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyFloat64(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Float64", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// AttributePlanModifyInt64 performs all types.Int64 plan modification. +func AttributePlanModifyInt64(ctx context.Context, attribute fwxschema.AttributeWithInt64PlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.Int64Valuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.Int64Valuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Int64 Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Int64 attribute plan modification. "+ + "The value type must implement the basetypes.Int64Valuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToInt64Value(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.Int64Valuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Int64 Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Int64 attribute plan modification. "+ + "The value type must implement the basetypes.Int64Valuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToInt64Value(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.Int64Valuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Int64 Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Int64 attribute plan modification. "+ + "The value type must implement the basetypes.Int64Valuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToInt64Value(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.Int64Request{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.Int64PlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.Int64Response{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Int64", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyInt64(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Int64", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// AttributePlanModifyList performs all types.List plan modification. +func AttributePlanModifyList(ctx context.Context, attribute fwxschema.AttributeWithListPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.ListValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.ListValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid List Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform List attribute plan modification. "+ + "The value type must implement the basetypes.ListValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.ListValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid List Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform List attribute plan modification. "+ + "The value type must implement the basetypes.ListValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.ListValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid List Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform List attribute plan modification. "+ + "The value type must implement the basetypes.ListValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.ListRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.ListPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.ListResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.List", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyList(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.List", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// AttributePlanModifyMap performs all types.Map plan modification. +func AttributePlanModifyMap(ctx context.Context, attribute fwxschema.AttributeWithMapPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.MapValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.MapValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Map Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Map attribute plan modification. "+ + "The value type must implement the basetypes.MapValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToMapValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.MapValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Map Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Map attribute plan modification. "+ + "The value type must implement the basetypes.MapValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToMapValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.MapValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Map Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Map attribute plan modification. "+ + "The value type must implement the basetypes.MapValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToMapValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.MapRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.MapPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.MapResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Map", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyMap(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Map", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// AttributePlanModifyNumber performs all types.Number plan modification. +func AttributePlanModifyNumber(ctx context.Context, attribute fwxschema.AttributeWithNumberPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.NumberValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.NumberValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Number Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Number attribute plan modification. "+ + "The value type must implement the basetypes.NumberValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToNumberValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.NumberValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Number Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Number attribute plan modification. "+ + "The value type must implement the basetypes.NumberValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToNumberValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.NumberValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Number Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Number attribute plan modification. "+ + "The value type must implement the basetypes.NumberValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToNumberValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.NumberRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.NumberPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.NumberResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Number", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyNumber(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Number", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// AttributePlanModifyObject performs all types.Object plan modification. +func AttributePlanModifyObject(ctx context.Context, attribute fwxschema.AttributeWithObjectPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.ObjectValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Object Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Object attribute plan modification. "+ + "The value type must implement the basetypes.ObjectValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Object Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Object attribute plan modification. "+ + "The value type must implement the basetypes.ObjectValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Object Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Object attribute plan modification. "+ + "The value type must implement the basetypes.ObjectValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.ObjectPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.ObjectResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Object", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyObject(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Object", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// AttributePlanModifySet performs all types.Set plan modification. +func AttributePlanModifySet(ctx context.Context, attribute fwxschema.AttributeWithSetPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.SetValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.SetValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Set Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Set attribute plan modification. "+ + "The value type must implement the basetypes.SetValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.SetValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Set Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Set attribute plan modification. "+ + "The value type must implement the basetypes.SetValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.SetValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Set Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Set attribute plan modification. "+ + "The value type must implement the basetypes.SetValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.SetRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.SetPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.SetResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Set", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifySet(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Set", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// AttributePlanModifyString performs all types.String plan modification. +func AttributePlanModifyString(ctx context.Context, attribute fwxschema.AttributeWithStringPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.StringValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.StringValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid String Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform String attribute plan modification. "+ + "The value type must implement the basetypes.StringValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToStringValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.StringValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid String Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform String attribute plan modification. "+ + "The value type must implement the basetypes.StringValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToStringValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.StringValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid String Attribute Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform String attribute plan modification. "+ + "The value type must implement the basetypes.StringValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToStringValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.StringRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range attribute.StringPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.StringResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.String", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyString(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.String", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +func NestedAttributeObjectPlanModify(ctx context.Context, o fwschema.NestedAttributeObject, req planmodifier.ObjectRequest, resp *ModifyAttributePlanResponse) { + if objectWithPlanModifiers, ok := o.(fwxschema.NestedAttributeObjectWithPlanModifiers); ok { + for _, objectValidator := range objectWithPlanModifiers.ObjectPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.ObjectResponse{ + PlanValue: req.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Object", + map[string]interface{}{ + logging.KeyDescription: objectValidator.Description(ctx), + }, + ) + + objectValidator.PlanModifyObject(ctx, req, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Object", + map[string]interface{}{ + logging.KeyDescription: objectValidator.Description(ctx), + }, + ) + + req.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.Path) + } + + // only on new errors + if planModifyResp.Diagnostics.HasError() { + return + } + } + } + + newPlanValueAttributes := req.PlanValue.Attributes() + + for nestedName, nestedAttr := range o.GetAttributes() { + nestedAttrConfig, diags := objectAttributeValue(ctx, req.ConfigValue, nestedName, fwschemadata.DataDescriptionConfiguration) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedAttrPlan, diags := objectAttributeValue(ctx, req.PlanValue, nestedName, fwschemadata.DataDescriptionPlan) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedAttrState, diags := objectAttributeValue(ctx, req.StateValue, nestedName, fwschemadata.DataDescriptionState) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedAttrReq := ModifyAttributePlanRequest{ + AttributeConfig: nestedAttrConfig, + AttributePath: req.Path.AtName(nestedName), + AttributePathExpression: req.PathExpression.AtName(nestedName), + AttributePlan: nestedAttrPlan, + AttributeState: nestedAttrState, + Config: req.Config, + Plan: req.Plan, + Private: resp.Private, + State: req.State, + } + nestedAttrResp := &ModifyAttributePlanResponse{ + AttributePlan: nestedAttrReq.AttributePlan, + RequiresReplace: resp.RequiresReplace, + Private: nestedAttrReq.Private, + } + + AttributeModifyPlan(ctx, nestedAttr, nestedAttrReq, nestedAttrResp) + + newPlanValueAttributes[nestedName] = nestedAttrResp.AttributePlan + resp.Diagnostics.Append(nestedAttrResp.Diagnostics...) + resp.Private = nestedAttrResp.Private + resp.RequiresReplace.Append(nestedAttrResp.RequiresReplace...) + } + + newPlanValue, diags := types.ObjectValue(req.PlanValue.AttributeTypes(ctx), newPlanValueAttributes) + + resp.Diagnostics.Append(diags...) + + resp.AttributePlan = newPlanValue +} + +func attributePlanModificationValueError(ctx context.Context, value attr.Value, description fwschemadata.DataDescription, err error) diag.Diagnostic { + return diag.NewErrorDiagnostic( + "Attribute Plan Modification "+description.Title()+" Value Error", + "An unexpected error occurred while fetching a "+value.Type(ctx).String()+" element value in the "+description.String()+". "+ + "This is an issue with the provider and should be reported to the provider developers.\n\n"+ + "Original Error: "+err.Error(), + ) +} + +func attributePlanModificationWalkError(schemaPath path.Path, value attr.Value) diag.Diagnostic { + return diag.NewAttributeErrorDiagnostic( + schemaPath, + "Attribute Plan Modification Walk Error", + "An unexpected error occurred while walking the schema for attribute plan modification. "+ + "This is an issue with terraform-plugin-framework and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("unknown attribute value type (%T) at path: %s", value, schemaPath), + ) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attribute_validation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attribute_validation.go new file mode 100644 index 0000000000..1425c56920 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/attribute_validation.go @@ -0,0 +1,967 @@ +package fwserver + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// ValidateAttributeRequest repesents a request for attribute validation. +type ValidateAttributeRequest struct { + // AttributePath contains the path of the attribute. Use this path for any + // response diagnostics. + AttributePath path.Path + + // AttributePathExpression contains the expression matching the exact path + // of the attribute. + AttributePathExpression path.Expression + + // AttributeConfig contains the value of the attribute in the configuration. + AttributeConfig attr.Value + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config +} + +// ValidateAttributeResponse represents a response to a +// ValidateAttributeRequest. An instance of this response struct is +// automatically passed through to each AttributeValidator. +type ValidateAttributeResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} + +// AttributeValidate performs all Attribute validation. +// +// TODO: Clean up this abstraction back into an internal Attribute type method. +// The extra Attribute parameter is a carry-over of creating the proto6server +// package from the tfsdk package and not wanting to export the method. +// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/365 +func AttributeValidate(ctx context.Context, a fwschema.Attribute, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + ctx = logging.FrameworkWithAttributePath(ctx, req.AttributePath.String()) + + if !a.IsRequired() && !a.IsOptional() && !a.IsComputed() { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Attribute Definition", + "Attribute missing Required, Optional, or Computed definition. This is always a problem with the provider and should be reported to the provider developer.", + ) + + return + } + + configData := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionConfiguration, + Schema: req.Config.Schema, + TerraformValue: req.Config.Raw, + } + + attributeConfig, diags := configData.ValueAtPath(ctx, req.AttributePath) + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + // Terraform CLI does not automatically perform certain configuration + // checks yet. If it eventually does, this logic should remain at least + // until Terraform CLI versions 0.12 through the release containing the + // checks are considered end-of-life. + // Reference: https://github.com/hashicorp/terraform/issues/30669 + if a.IsComputed() && !a.IsOptional() && !attributeConfig.IsNull() { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Configuration for Read-Only Attribute", + "Cannot set value for this attribute as the provider has marked it as read-only. Remove the configuration line setting the value.\n\n"+ + "Refer to the provider documentation or contact the provider developers for additional information about configurable and read-only attributes that are supported.", + ) + } + + // Terraform CLI does not automatically perform certain configuration + // checks yet. If it eventually does, this logic should remain at least + // until Terraform CLI versions 0.12 through the release containing the + // checks are considered end-of-life. + // Reference: https://github.com/hashicorp/terraform/issues/30669 + if a.IsRequired() && attributeConfig.IsNull() { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Missing Configuration for Required Attribute", + fmt.Sprintf("Must set a configuration value for the %s attribute as the provider has marked it as required.\n\n", req.AttributePath.String())+ + "Refer to the provider documentation or contact the provider developers for additional information about configurable attributes that are required.", + ) + } + + req.AttributeConfig = attributeConfig + + switch attributeWithValidators := a.(type) { + case fwxschema.AttributeWithBoolValidators: + AttributeValidateBool(ctx, attributeWithValidators, req, resp) + case fwxschema.AttributeWithFloat64Validators: + AttributeValidateFloat64(ctx, attributeWithValidators, req, resp) + case fwxschema.AttributeWithInt64Validators: + AttributeValidateInt64(ctx, attributeWithValidators, req, resp) + case fwxschema.AttributeWithListValidators: + AttributeValidateList(ctx, attributeWithValidators, req, resp) + case fwxschema.AttributeWithMapValidators: + AttributeValidateMap(ctx, attributeWithValidators, req, resp) + case fwxschema.AttributeWithNumberValidators: + AttributeValidateNumber(ctx, attributeWithValidators, req, resp) + case fwxschema.AttributeWithObjectValidators: + AttributeValidateObject(ctx, attributeWithValidators, req, resp) + case fwxschema.AttributeWithSetValidators: + AttributeValidateSet(ctx, attributeWithValidators, req, resp) + case fwxschema.AttributeWithStringValidators: + AttributeValidateString(ctx, attributeWithValidators, req, resp) + } + + AttributeValidateNestedAttributes(ctx, a, req, resp) + + // Show deprecation warnings only for known values. + if a.GetDeprecationMessage() != "" && !attributeConfig.IsNull() && !attributeConfig.IsUnknown() { + resp.Diagnostics.AddAttributeWarning( + req.AttributePath, + "Attribute Deprecated", + a.GetDeprecationMessage(), + ) + } +} + +// AttributeValidateBool performs all types.Bool validation. +func AttributeValidateBool(ctx context.Context, attribute fwxschema.AttributeWithBoolValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.BoolValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.BoolValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Bool Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Bool attribute validation. "+ + "The value type must implement the basetypes.BoolValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToBoolValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.BoolRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.BoolValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.BoolResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Bool", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateBool(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Bool", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateFloat64 performs all types.Float64 validation. +func AttributeValidateFloat64(ctx context.Context, attribute fwxschema.AttributeWithFloat64Validators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.Float64Valuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.Float64Valuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Float64 Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Float64 attribute validation. "+ + "The value type must implement the basetypes.Float64Valuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToFloat64Value(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.Float64Request{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.Float64Validators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.Float64Response{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Float64", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateFloat64(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Float64", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateInt64 performs all types.Int64 validation. +func AttributeValidateInt64(ctx context.Context, attribute fwxschema.AttributeWithInt64Validators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.Int64Valuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.Int64Valuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Int64 Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Int64 attribute validation. "+ + "The value type must implement the basetypes.Int64Valuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToInt64Value(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.Int64Request{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.Int64Validators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.Int64Response{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Int64", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateInt64(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Int64", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateList performs all types.List validation. +func AttributeValidateList(ctx context.Context, attribute fwxschema.AttributeWithListValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.ListValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.ListValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid List Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform List attribute validation. "+ + "The value type must implement the basetypes.ListValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.ListRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.ListValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.ListResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.List", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateList(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.List", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateMap performs all types.Map validation. +func AttributeValidateMap(ctx context.Context, attribute fwxschema.AttributeWithMapValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.MapValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.MapValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Map Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Map attribute validation. "+ + "The value type must implement the basetypes.MapValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToMapValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.MapRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.MapValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.MapResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Map", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateMap(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Map", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateNumber performs all types.Number validation. +func AttributeValidateNumber(ctx context.Context, attribute fwxschema.AttributeWithNumberValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.NumberValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.NumberValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Number Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Number attribute validation. "+ + "The value type must implement the basetypes.NumberValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToNumberValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.NumberRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.NumberValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.NumberResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Number", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateNumber(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Number", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateObject performs all types.Object validation. +func AttributeValidateObject(ctx context.Context, attribute fwxschema.AttributeWithObjectValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.ObjectValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Object Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Object attribute validation. "+ + "The value type must implement the basetypes.ObjectValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.ObjectRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.ObjectValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.ObjectResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Object", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateObject(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Object", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateSet performs all types.Set validation. +func AttributeValidateSet(ctx context.Context, attribute fwxschema.AttributeWithSetValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.SetValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.SetValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Set Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Set attribute validation. "+ + "The value type must implement the basetypes.SetValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.SetRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.SetValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.SetResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Set", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateSet(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Set", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateString performs all types.String validation. +func AttributeValidateString(ctx context.Context, attribute fwxschema.AttributeWithStringValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.StringValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.StringValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid String Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform String attribute validation. "+ + "The value type must implement the basetypes.StringValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToStringValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.StringRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, attributeValidator := range attribute.StringValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.StringResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.String", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + attributeValidator.ValidateString(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.String", + map[string]interface{}{ + logging.KeyDescription: attributeValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// AttributeValidateNestedAttributes performs all nested Attributes validation. +// +// TODO: Clean up this abstraction back into an internal Attribute type method. +// The extra Attribute parameter is a carry-over of creating the proto6server +// package from the tfsdk package and not wanting to export the method. +// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/365 +func AttributeValidateNestedAttributes(ctx context.Context, a fwschema.Attribute, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + nestedAttribute, ok := a.(fwschema.NestedAttribute) + + if !ok { + return + } + + nestedAttributeObject := nestedAttribute.GetNestedObject() + + nm := nestedAttribute.GetNestingMode() + switch nm { + case fwschema.NestingModeList: + listVal, ok := req.AttributeConfig.(basetypes.ListValuable) + + if !ok { + err := fmt.Errorf("unknown attribute value type (%T) for nesting mode (%T) at path: %s", req.AttributeConfig, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Attribute Validation Error Invalid Value Type", + "A type that implements basetypes.ListValuable is expected here. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } + + l, diags := listVal.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + for idx, value := range l.Elements() { + nestedAttributeObjectReq := ValidateAttributeRequest{ + AttributeConfig: value, + AttributePath: req.AttributePath.AtListIndex(idx), + AttributePathExpression: req.AttributePathExpression.AtListIndex(idx), + Config: req.Config, + } + nestedAttributeObjectResp := &ValidateAttributeResponse{} + + NestedAttributeObjectValidate(ctx, nestedAttributeObject, nestedAttributeObjectReq, nestedAttributeObjectResp) + + resp.Diagnostics.Append(nestedAttributeObjectResp.Diagnostics...) + } + case fwschema.NestingModeSet: + setVal, ok := req.AttributeConfig.(basetypes.SetValuable) + + if !ok { + err := fmt.Errorf("unknown attribute value type (%T) for nesting mode (%T) at path: %s", req.AttributeConfig, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Attribute Validation Error Invalid Value Type", + "A type that implements basetypes.SetValuable is expected here. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } + + s, diags := setVal.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + for _, value := range s.Elements() { + nestedAttributeObjectReq := ValidateAttributeRequest{ + AttributeConfig: value, + AttributePath: req.AttributePath.AtSetValue(value), + AttributePathExpression: req.AttributePathExpression.AtSetValue(value), + Config: req.Config, + } + nestedAttributeObjectResp := &ValidateAttributeResponse{} + + NestedAttributeObjectValidate(ctx, nestedAttributeObject, nestedAttributeObjectReq, nestedAttributeObjectResp) + + resp.Diagnostics.Append(nestedAttributeObjectResp.Diagnostics...) + } + case fwschema.NestingModeMap: + mapVal, ok := req.AttributeConfig.(basetypes.MapValuable) + + if !ok { + err := fmt.Errorf("unknown attribute value type (%T) for nesting mode (%T) at path: %s", req.AttributeConfig, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Attribute Validation Error Invalid Value Type", + "A type that implements basetypes.MapValuable is expected here. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } + + m, diags := mapVal.ToMapValue(ctx) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + for key, value := range m.Elements() { + nestedAttributeObjectReq := ValidateAttributeRequest{ + AttributeConfig: value, + AttributePath: req.AttributePath.AtMapKey(key), + AttributePathExpression: req.AttributePathExpression.AtMapKey(key), + Config: req.Config, + } + nestedAttributeObjectResp := &ValidateAttributeResponse{} + + NestedAttributeObjectValidate(ctx, nestedAttributeObject, nestedAttributeObjectReq, nestedAttributeObjectResp) + + resp.Diagnostics.Append(nestedAttributeObjectResp.Diagnostics...) + } + case fwschema.NestingModeSingle: + objectVal, ok := req.AttributeConfig.(basetypes.ObjectValuable) + + if !ok { + err := fmt.Errorf("unknown attribute value type (%T) for nesting mode (%T) at path: %s", req.AttributeConfig, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Attribute Validation Error Invalid Value Type", + "A type that implements basetypes.ObjectValuable is expected here. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } + + o, diags := objectVal.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if o.IsNull() || o.IsUnknown() { + return + } + + nestedAttributeObjectReq := ValidateAttributeRequest{ + AttributeConfig: o, + AttributePath: req.AttributePath, + AttributePathExpression: req.AttributePathExpression, + Config: req.Config, + } + nestedAttributeObjectResp := &ValidateAttributeResponse{} + + NestedAttributeObjectValidate(ctx, nestedAttributeObject, nestedAttributeObjectReq, nestedAttributeObjectResp) + + resp.Diagnostics.Append(nestedAttributeObjectResp.Diagnostics...) + default: + err := fmt.Errorf("unknown attribute validation nesting mode (%T: %v) at path: %s", nm, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Attribute Validation Error", + "Attribute validation cannot walk schema. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } +} + +func NestedAttributeObjectValidate(ctx context.Context, o fwschema.NestedAttributeObject, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + objectWithValidators, ok := o.(fwxschema.NestedAttributeObjectWithValidators) + + if ok { + objectVal, ok := req.AttributeConfig.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Attribute Validation Walk Error", + "An unexpected error occurred while walking the schema for attribute validation. "+ + "This is an issue with terraform-plugin-framework and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Unknown attribute value type (%T) at path: %s", req.AttributeConfig, req.AttributePath), + ) + + return + } + + object, diags := objectVal.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have + // errors from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.ObjectRequest{ + Config: req.Config, + ConfigValue: object, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, objectValidator := range objectWithValidators.ObjectValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.ObjectResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Object", + map[string]interface{}{ + logging.KeyDescription: objectValidator.Description(ctx), + }, + ) + + objectValidator.ValidateObject(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Object", + map[string]interface{}{ + logging.KeyDescription: objectValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } + } + + for nestedName, nestedAttr := range o.GetAttributes() { + nestedAttrReq := ValidateAttributeRequest{ + AttributePath: req.AttributePath.AtName(nestedName), + AttributePathExpression: req.AttributePathExpression.AtName(nestedName), + Config: req.Config, + } + nestedAttrResp := &ValidateAttributeResponse{} + + AttributeValidate(ctx, nestedAttr, nestedAttrReq, nestedAttrResp) + + resp.Diagnostics.Append(nestedAttrResp.Diagnostics...) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/block_plan_modification.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/block_plan_modification.go new file mode 100644 index 0000000000..91dafc524b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/block_plan_modification.go @@ -0,0 +1,834 @@ +package fwserver + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// BlockModifyPlan performs all Block plan modification. +// +// TODO: Clean up this abstraction back into an internal Block type method. +// The extra Block parameter is a carry-over of creating the proto6server +// package from the tfsdk package and not wanting to export the method. +// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/365 +func BlockModifyPlan(ctx context.Context, b fwschema.Block, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + if req.Private != nil { + resp.Private = req.Private + } + + switch blockWithPlanModifiers := b.(type) { + case fwxschema.BlockWithListPlanModifiers: + BlockPlanModifyList(ctx, blockWithPlanModifiers, req, resp) + case fwxschema.BlockWithObjectPlanModifiers: + BlockPlanModifyObject(ctx, blockWithPlanModifiers, req, resp) + case fwxschema.BlockWithSetPlanModifiers: + BlockPlanModifySet(ctx, blockWithPlanModifiers, req, resp) + } + + if resp.Diagnostics.HasError() { + return + } + + // Null and unknown values should not have nested schema to modify. + if resp.AttributePlan.IsNull() || resp.AttributePlan.IsUnknown() { + return + } + + nestedBlockObject := b.GetNestedObject() + + nm := b.GetNestingMode() + switch nm { + case fwschema.BlockNestingModeList: + configList, diags := coerceListValue(ctx, req.AttributePath, req.AttributeConfig) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planList, diags := coerceListValue(ctx, req.AttributePath, req.AttributePlan) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateList, diags := coerceListValue(ctx, req.AttributePath, req.AttributeState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planElements := planList.Elements() + + for idx, planElem := range planElements { + attrPath := req.AttributePath.AtListIndex(idx) + + configObject, diags := listElemObject(ctx, attrPath, configList, idx, fwschemadata.DataDescriptionConfiguration) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planObject, diags := coerceObjectValue(ctx, attrPath, planElem) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateObject, diags := listElemObject(ctx, attrPath, stateList, idx, fwschemadata.DataDescriptionState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + objectReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configObject, + Path: attrPath, + PathExpression: attrPath.Expression(), + Plan: req.Plan, + PlanValue: planObject, + Private: resp.Private, + State: req.State, + StateValue: stateObject, + } + objectResp := &ModifyAttributePlanResponse{ + AttributePlan: objectReq.PlanValue, + Private: objectReq.Private, + } + + NestedBlockObjectPlanModify(ctx, nestedBlockObject, objectReq, objectResp) + + planElements[idx] = objectResp.AttributePlan + resp.Diagnostics.Append(objectResp.Diagnostics...) + resp.Private = objectResp.Private + resp.RequiresReplace.Append(objectResp.RequiresReplace...) + } + + resp.AttributePlan, diags = types.ListValue(planList.ElementType(ctx), planElements) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + case fwschema.BlockNestingModeSet: + configSet, diags := coerceSetValue(ctx, req.AttributePath, req.AttributeConfig) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planSet, diags := coerceSetValue(ctx, req.AttributePath, req.AttributePlan) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateSet, diags := coerceSetValue(ctx, req.AttributePath, req.AttributeState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planElements := planSet.Elements() + + for idx, planElem := range planElements { + attrPath := req.AttributePath.AtSetValue(planElem) + + configObject, diags := setElemObject(ctx, attrPath, configSet, idx, fwschemadata.DataDescriptionConfiguration) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planObject, diags := coerceObjectValue(ctx, attrPath, planElem) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateObject, diags := setElemObject(ctx, attrPath, stateSet, idx, fwschemadata.DataDescriptionState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + objectReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configObject, + Path: attrPath, + PathExpression: attrPath.Expression(), + Plan: req.Plan, + PlanValue: planObject, + Private: resp.Private, + State: req.State, + StateValue: stateObject, + } + objectResp := &ModifyAttributePlanResponse{ + AttributePlan: objectReq.PlanValue, + Private: objectReq.Private, + } + + NestedBlockObjectPlanModify(ctx, nestedBlockObject, objectReq, objectResp) + + planElements[idx] = objectResp.AttributePlan + resp.Diagnostics.Append(objectResp.Diagnostics...) + resp.Private = objectResp.Private + resp.RequiresReplace.Append(objectResp.RequiresReplace...) + } + + resp.AttributePlan, diags = types.SetValue(planSet.ElementType(ctx), planElements) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + case fwschema.BlockNestingModeSingle: + configObject, diags := coerceObjectValue(ctx, req.AttributePath, req.AttributeConfig) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + planObject, diags := coerceObjectValue(ctx, req.AttributePath, req.AttributePlan) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + stateObject, diags := coerceObjectValue(ctx, req.AttributePath, req.AttributeState) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + objectReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configObject, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planObject, + Private: resp.Private, + State: req.State, + StateValue: stateObject, + } + objectResp := &ModifyAttributePlanResponse{ + AttributePlan: objectReq.PlanValue, + Private: objectReq.Private, + } + + NestedBlockObjectPlanModify(ctx, nestedBlockObject, objectReq, objectResp) + + resp.AttributePlan = objectResp.AttributePlan + resp.Diagnostics.Append(objectResp.Diagnostics...) + resp.Private = objectResp.Private + resp.RequiresReplace.Append(objectResp.RequiresReplace...) + default: + err := fmt.Errorf("unknown block plan modification nesting mode (%T: %v) at path: %s", nm, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Block Plan Modification Error", + "Block plan modification cannot walk schema. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } +} + +// BlockPlanModifyList performs all types.List plan modification. +func BlockPlanModifyList(ctx context.Context, block fwxschema.BlockWithListPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.ListValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.ListValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid List Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform List block plan modification. "+ + "The value type must implement the basetypes.ListValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.ListValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid List Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform List block plan modification. "+ + "The value type must implement the basetypes.ListValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.ListValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid List Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform List block plan modification. "+ + "The value type must implement the basetypes.ListValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.ListRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range block.ListPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.ListResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.List", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyList(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.List", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// BlockPlanModifyObject performs all types.Object plan modification. +func BlockPlanModifyObject(ctx context.Context, block fwxschema.BlockWithObjectPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.ObjectValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Object Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Object block plan modification. "+ + "The value type must implement the basetypes.ObjectValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Object Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Object block plan modification. "+ + "The value type must implement the basetypes.ObjectValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Object Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Object block plan modification. "+ + "The value type must implement the basetypes.ObjectValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.ObjectRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range block.ObjectPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.ObjectResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Object", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifyObject(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Object", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +// BlockPlanModifySet performs all types.Set plan modification. +func BlockPlanModifySet(ctx context.Context, block fwxschema.BlockWithSetPlanModifiers, req ModifyAttributePlanRequest, resp *ModifyAttributePlanResponse) { + // Use basetypes.SetValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.SetValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Set Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Set block plan modification. "+ + "The value type must implement the basetypes.SetValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planValuable, ok := req.AttributePlan.(basetypes.SetValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Set Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Set block plan modification. "+ + "The value type must implement the basetypes.SetValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributePlan), + ) + + return + } + + planValue, diags := planValuable.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + stateValuable, ok := req.AttributeState.(basetypes.SetValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Set Block Plan Modifier Value Type", + "An unexpected value type was encountered while attempting to perform Set block plan modification. "+ + "The value type must implement the basetypes.SetValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeState), + ) + + return + } + + stateValue, diags := stateValuable.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + planModifyReq := planmodifier.SetRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + Plan: req.Plan, + PlanValue: planValue, + Private: req.Private, + State: req.State, + StateValue: stateValue, + } + + for _, planModifier := range block.SetPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.SetResponse{ + PlanValue: planModifyReq.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Set", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifier.PlanModifySet(ctx, planModifyReq, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Set", + map[string]interface{}{ + logging.KeyDescription: planModifier.Description(ctx), + }, + ) + + planModifyReq.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.AttributePath) + } + + // Only on new errors. + if planModifyResp.Diagnostics.HasError() { + return + } + } +} + +func NestedBlockObjectPlanModify(ctx context.Context, o fwschema.NestedBlockObject, req planmodifier.ObjectRequest, resp *ModifyAttributePlanResponse) { + if objectWithPlanModifiers, ok := o.(fwxschema.NestedBlockObjectWithPlanModifiers); ok { + for _, objectValidator := range objectWithPlanModifiers.ObjectPlanModifiers() { + // Instantiate a new response for each request to prevent plan modifiers + // from modifying or removing diagnostics. + planModifyResp := &planmodifier.ObjectResponse{ + PlanValue: req.PlanValue, + Private: resp.Private, + } + + logging.FrameworkDebug( + ctx, + "Calling provider defined planmodifier.Object", + map[string]interface{}{ + logging.KeyDescription: objectValidator.Description(ctx), + }, + ) + + objectValidator.PlanModifyObject(ctx, req, planModifyResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined planmodifier.Object", + map[string]interface{}{ + logging.KeyDescription: objectValidator.Description(ctx), + }, + ) + + req.PlanValue = planModifyResp.PlanValue + resp.AttributePlan = planModifyResp.PlanValue + resp.Diagnostics.Append(planModifyResp.Diagnostics...) + resp.Private = planModifyResp.Private + + if planModifyResp.RequiresReplace { + resp.RequiresReplace.Append(req.Path) + } + + // only on new errors + if planModifyResp.Diagnostics.HasError() { + return + } + } + } + + newPlanValueAttributes := req.PlanValue.Attributes() + + for nestedName, nestedAttr := range o.GetAttributes() { + nestedAttrConfig, diags := objectAttributeValue(ctx, req.ConfigValue, nestedName, fwschemadata.DataDescriptionConfiguration) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedAttrPlan, diags := objectAttributeValue(ctx, req.PlanValue, nestedName, fwschemadata.DataDescriptionPlan) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedAttrState, diags := objectAttributeValue(ctx, req.StateValue, nestedName, fwschemadata.DataDescriptionState) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedAttrReq := ModifyAttributePlanRequest{ + AttributeConfig: nestedAttrConfig, + AttributePath: req.Path.AtName(nestedName), + AttributePathExpression: req.PathExpression.AtName(nestedName), + AttributePlan: nestedAttrPlan, + AttributeState: nestedAttrState, + Config: req.Config, + Plan: req.Plan, + Private: resp.Private, + State: req.State, + } + nestedAttrResp := &ModifyAttributePlanResponse{ + AttributePlan: nestedAttrReq.AttributePlan, + RequiresReplace: resp.RequiresReplace, + Private: nestedAttrReq.Private, + } + + AttributeModifyPlan(ctx, nestedAttr, nestedAttrReq, nestedAttrResp) + + newPlanValueAttributes[nestedName] = nestedAttrResp.AttributePlan + resp.Diagnostics.Append(nestedAttrResp.Diagnostics...) + resp.Private = nestedAttrResp.Private + resp.RequiresReplace.Append(nestedAttrResp.RequiresReplace...) + } + + for nestedName, nestedBlock := range o.GetBlocks() { + nestedBlockConfig, diags := objectAttributeValue(ctx, req.ConfigValue, nestedName, fwschemadata.DataDescriptionConfiguration) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedBlockPlan, diags := objectAttributeValue(ctx, req.PlanValue, nestedName, fwschemadata.DataDescriptionPlan) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedBlockState, diags := objectAttributeValue(ctx, req.StateValue, nestedName, fwschemadata.DataDescriptionState) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + nestedBlockReq := ModifyAttributePlanRequest{ + AttributeConfig: nestedBlockConfig, + AttributePath: req.Path.AtName(nestedName), + AttributePathExpression: req.PathExpression.AtName(nestedName), + AttributePlan: nestedBlockPlan, + AttributeState: nestedBlockState, + Config: req.Config, + Plan: req.Plan, + Private: resp.Private, + State: req.State, + } + nestedBlockResp := &ModifyAttributePlanResponse{ + AttributePlan: nestedBlockReq.AttributePlan, + RequiresReplace: resp.RequiresReplace, + Private: nestedBlockReq.Private, + } + + BlockModifyPlan(ctx, nestedBlock, nestedBlockReq, nestedBlockResp) + + newPlanValueAttributes[nestedName] = nestedBlockResp.AttributePlan + resp.Diagnostics.Append(nestedBlockResp.Diagnostics...) + resp.Private = nestedBlockResp.Private + resp.RequiresReplace.Append(nestedBlockResp.RequiresReplace...) + } + + newPlanValue, diags := types.ObjectValue(req.PlanValue.AttributeTypes(ctx), newPlanValueAttributes) + + resp.Diagnostics.Append(diags...) + + resp.AttributePlan = newPlanValue +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/block_validation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/block_validation.go new file mode 100644 index 0000000000..25eb965019 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/block_validation.go @@ -0,0 +1,453 @@ +package fwserver + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// BlockValidate performs all Block validation. +// +// TODO: Clean up this abstraction back into an internal Block type method. +// The extra Block parameter is a carry-over of creating the proto6server +// package from the tfsdk package and not wanting to export the method. +// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/365 +func BlockValidate(ctx context.Context, b fwschema.Block, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + configData := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionConfiguration, + Schema: req.Config.Schema, + TerraformValue: req.Config.Raw, + } + + attributeConfig, diags := configData.ValueAtPath(ctx, req.AttributePath) + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + req.AttributeConfig = attributeConfig + + switch blockWithValidators := b.(type) { + case fwxschema.BlockWithListValidators: + BlockValidateList(ctx, blockWithValidators, req, resp) + case fwxschema.BlockWithObjectValidators: + BlockValidateObject(ctx, blockWithValidators, req, resp) + case fwxschema.BlockWithSetValidators: + BlockValidateSet(ctx, blockWithValidators, req, resp) + } + + nestedBlockObject := b.GetNestedObject() + + nm := b.GetNestingMode() + switch nm { + case fwschema.BlockNestingModeList: + listVal, ok := req.AttributeConfig.(basetypes.ListValuable) + + if !ok { + err := fmt.Errorf("unknown block value type (%T) for nesting mode (%T) at path: %s", req.AttributeConfig, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Block Validation Error Invalid Value Type", + "A type that implements basetypes.ListValuable is expected here. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } + + l, diags := listVal.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + for idx, value := range l.Elements() { + nestedBlockObjectReq := ValidateAttributeRequest{ + AttributeConfig: value, + AttributePath: req.AttributePath.AtListIndex(idx), + AttributePathExpression: req.AttributePathExpression.AtListIndex(idx), + Config: req.Config, + } + nestedBlockObjectResp := &ValidateAttributeResponse{} + + NestedBlockObjectValidate(ctx, nestedBlockObject, nestedBlockObjectReq, nestedBlockObjectResp) + + resp.Diagnostics.Append(nestedBlockObjectResp.Diagnostics...) + } + case fwschema.BlockNestingModeSet: + setVal, ok := req.AttributeConfig.(basetypes.SetValuable) + + if !ok { + err := fmt.Errorf("unknown block value type (%T) for nesting mode (%T) at path: %s", req.AttributeConfig, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Block Validation Error Invalid Value Type", + "A type that implements basetypes.SetValuable is expected here. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } + + s, diags := setVal.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + for _, value := range s.Elements() { + nestedBlockObjectReq := ValidateAttributeRequest{ + AttributeConfig: value, + AttributePath: req.AttributePath.AtSetValue(value), + AttributePathExpression: req.AttributePathExpression.AtSetValue(value), + Config: req.Config, + } + nestedBlockObjectResp := &ValidateAttributeResponse{} + + NestedBlockObjectValidate(ctx, nestedBlockObject, nestedBlockObjectReq, nestedBlockObjectResp) + + resp.Diagnostics.Append(nestedBlockObjectResp.Diagnostics...) + } + case fwschema.BlockNestingModeSingle: + objectVal, ok := req.AttributeConfig.(basetypes.ObjectValuable) + + if !ok { + err := fmt.Errorf("unknown block value type (%T) for nesting mode (%T) at path: %s", req.AttributeConfig, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Block Validation Error Invalid Value Type", + "A type that implements basetypes.ObjectValuable is expected here. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } + + o, diags := objectVal.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + nestedBlockObjectReq := ValidateAttributeRequest{ + AttributeConfig: o, + AttributePath: req.AttributePath, + AttributePathExpression: req.AttributePathExpression, + Config: req.Config, + } + nestedBlockObjectResp := &ValidateAttributeResponse{} + + NestedBlockObjectValidate(ctx, nestedBlockObject, nestedBlockObjectReq, nestedBlockObjectResp) + + resp.Diagnostics.Append(nestedBlockObjectResp.Diagnostics...) + default: + err := fmt.Errorf("unknown block validation nesting mode (%T: %v) at path: %s", nm, nm, req.AttributePath) + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Block Validation Error", + "Block validation cannot walk schema. Report this to the provider developer:\n\n"+err.Error(), + ) + + return + } + + // Show deprecation warning only on known values. + if b.GetDeprecationMessage() != "" && !attributeConfig.IsNull() && !attributeConfig.IsUnknown() { + resp.Diagnostics.AddAttributeWarning( + req.AttributePath, + "Block Deprecated", + b.GetDeprecationMessage(), + ) + } +} + +// BlockValidateList performs all types.List validation. +func BlockValidateList(ctx context.Context, block fwxschema.BlockWithListValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.ListValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.ListValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid List Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform List attribute validation. "+ + "The value type must implement the basetypes.ListValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToListValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.ListRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, blockValidator := range block.ListValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.ListResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.List", + map[string]interface{}{ + logging.KeyDescription: blockValidator.Description(ctx), + }, + ) + + blockValidator.ValidateList(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.List", + map[string]interface{}{ + logging.KeyDescription: blockValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// BlockValidateObject performs all types.Object validation. +func BlockValidateObject(ctx context.Context, block fwxschema.BlockWithObjectValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.ObjectValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Object Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Object attribute validation. "+ + "The value type must implement the basetypes.ObjectValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.ObjectRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, blockValidator := range block.ObjectValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.ObjectResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Object", + map[string]interface{}{ + logging.KeyDescription: blockValidator.Description(ctx), + }, + ) + + blockValidator.ValidateObject(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Object", + map[string]interface{}{ + logging.KeyDescription: blockValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +// BlockValidateSet performs all types.Set validation. +func BlockValidateSet(ctx context.Context, block fwxschema.BlockWithSetValidators, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + // Use basetypes.SetValuable until custom types cannot re-implement + // ValueFromTerraform. Until then, custom types are not technically + // required to implement this interface. This opts to enforce the + // requirement before compatibility promises would interfere. + configValuable, ok := req.AttributeConfig.(basetypes.SetValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Invalid Set Attribute Validator Value Type", + "An unexpected value type was encountered while attempting to perform Set attribute validation. "+ + "The value type must implement the basetypes.SetValuable interface. "+ + "Please report this to the provider developers.\n\n"+ + fmt.Sprintf("Incoming Value Type: %T", req.AttributeConfig), + ) + + return + } + + configValue, diags := configValuable.ToSetValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have errors + // from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.SetRequest{ + Config: req.Config, + ConfigValue: configValue, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, blockValidator := range block.SetValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.SetResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Set", + map[string]interface{}{ + logging.KeyDescription: blockValidator.Description(ctx), + }, + ) + + blockValidator.ValidateSet(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Set", + map[string]interface{}{ + logging.KeyDescription: blockValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } +} + +func NestedBlockObjectValidate(ctx context.Context, o fwschema.NestedBlockObject, req ValidateAttributeRequest, resp *ValidateAttributeResponse) { + objectWithValidators, ok := o.(fwxschema.NestedBlockObjectWithValidators) + + if ok { + objectVal, ok := req.AttributeConfig.(basetypes.ObjectValuable) + + if !ok { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + "Block Validation Walk Error", + "An unexpected error occurred while walking the schema for block validation. "+ + "This is an issue with terraform-plugin-framework and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Unknown block value type (%T) at path: %s", req.AttributeConfig, req.AttributePath), + ) + + return + } + + object, diags := objectVal.ToObjectValue(ctx) + + resp.Diagnostics.Append(diags...) + + // Only return early on new errors as the resp.Diagnostics may have + // errors from other attributes. + if diags.HasError() { + return + } + + validateReq := validator.ObjectRequest{ + Config: req.Config, + ConfigValue: object, + Path: req.AttributePath, + PathExpression: req.AttributePathExpression, + } + + for _, objectValidator := range objectWithValidators.ObjectValidators() { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateResp := &validator.ObjectResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined validator.Object", + map[string]interface{}{ + logging.KeyDescription: objectValidator.Description(ctx), + }, + ) + + objectValidator.ValidateObject(ctx, validateReq, validateResp) + + logging.FrameworkDebug( + ctx, + "Called provider defined validator.Object", + map[string]interface{}{ + logging.KeyDescription: objectValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(validateResp.Diagnostics...) + } + } + + for nestedName, nestedAttr := range o.GetAttributes() { + nestedAttrReq := ValidateAttributeRequest{ + AttributePath: req.AttributePath.AtName(nestedName), + AttributePathExpression: req.AttributePathExpression.AtName(nestedName), + Config: req.Config, + } + nestedAttrResp := &ValidateAttributeResponse{} + + AttributeValidate(ctx, nestedAttr, nestedAttrReq, nestedAttrResp) + + resp.Diagnostics.Append(nestedAttrResp.Diagnostics...) + } + + for nestedName, nestedBlock := range o.GetBlocks() { + nestedBlockReq := ValidateAttributeRequest{ + AttributePath: req.AttributePath.AtName(nestedName), + AttributePathExpression: req.AttributePathExpression.AtName(nestedName), + Config: req.Config, + } + nestedBlockResp := &ValidateAttributeResponse{} + + BlockValidate(ctx, nestedBlock, nestedBlockReq, nestedBlockResp) + + resp.Diagnostics.Append(nestedBlockResp.Diagnostics...) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/doc.go new file mode 100644 index 0000000000..577051167b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/doc.go @@ -0,0 +1,5 @@ +// Package fwserver contains the framework provider server implementation. +// This package should only ever contain framework-native types, while specific +// protocol version compatible implementations, such as proto6server, are +// implemented on top of this abstraction. +package fwserver diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/schema_plan_modification.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/schema_plan_modification.go new file mode 100644 index 0000000000..d9e910c675 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/schema_plan_modification.go @@ -0,0 +1,194 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ModifySchemaPlanRequest represents a request for a schema to run all +// attribute plan modification functions. +type ModifySchemaPlanRequest struct { + // Config is the configuration the user supplied for the resource. + Config tfsdk.Config + + // State is the current state of the resource. + State tfsdk.State + + // Plan is the planned new state for the resource. + Plan tfsdk.Plan + + // ProviderMeta is metadata from the provider_meta block of the module. + ProviderMeta tfsdk.Config + + // Private is provider private state data. + Private *privatestate.ProviderData +} + +// ModifySchemaPlanResponse represents a response to a ModifySchemaPlanRequest. +type ModifySchemaPlanResponse struct { + // Plan is the planned new state for the resource. + Plan tfsdk.Plan + + // RequiresReplace is a list of attribute paths that require the + // resource to be replaced. They should point to the specific field + // that changed that requires the resource to be destroyed and + // recreated. + RequiresReplace path.Paths + + // Private is provider private state data following potential modifications. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to running all attribute + // plan modifiers. Returning an empty slice indicates a successful + // plan modification with no warnings or errors generated. + Diagnostics diag.Diagnostics +} + +// SchemaModifyPlan runs all AttributePlanModifiers in all schema attributes +// and blocks. +// +// TODO: Clean up this abstraction back into an internal Schema type method. +// The extra Schema parameter is a carry-over of creating the proto6server +// package from the tfsdk package and not wanting to export the method. +// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/365 +func SchemaModifyPlan(ctx context.Context, s fwschema.Schema, req ModifySchemaPlanRequest, resp *ModifySchemaPlanResponse) { + var diags diag.Diagnostics + + configData := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionConfiguration, + Schema: req.Config.Schema, + TerraformValue: req.Config.Raw, + } + + planData := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionPlan, + Schema: req.Plan.Schema, + TerraformValue: req.Plan.Raw, + } + + stateData := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionState, + Schema: req.State.Schema, + TerraformValue: req.State.Raw, + } + + for name, attribute := range s.GetAttributes() { + attrReq := ModifyAttributePlanRequest{ + AttributePath: path.Root(name), + Config: req.Config, + State: req.State, + Plan: req.Plan, + ProviderMeta: req.ProviderMeta, + Private: req.Private, + } + + attrReq.AttributeConfig, diags = configData.ValueAtPath(ctx, attrReq.AttributePath) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + attrReq.AttributePlan, diags = planData.ValueAtPath(ctx, attrReq.AttributePath) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + attrReq.AttributeState, diags = stateData.ValueAtPath(ctx, attrReq.AttributePath) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + attrResp := ModifyAttributePlanResponse{ + AttributePlan: attrReq.AttributePlan, + Private: attrReq.Private, + } + + AttributeModifyPlan(ctx, attribute, attrReq, &attrResp) + + resp.Diagnostics.Append(attrResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.Plan.SetAttribute(ctx, attrReq.AttributePath, attrResp.AttributePlan)...) + + if resp.Diagnostics.HasError() { + return + } + + resp.RequiresReplace = append(resp.RequiresReplace, attrResp.RequiresReplace...) + resp.Private = attrResp.Private + } + + for name, block := range s.GetBlocks() { + blockReq := ModifyAttributePlanRequest{ + AttributePath: path.Root(name), + Config: req.Config, + State: req.State, + Plan: req.Plan, + ProviderMeta: req.ProviderMeta, + Private: req.Private, + } + + blockReq.AttributeConfig, diags = configData.ValueAtPath(ctx, blockReq.AttributePath) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + blockReq.AttributePlan, diags = planData.ValueAtPath(ctx, blockReq.AttributePath) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + blockReq.AttributeState, diags = stateData.ValueAtPath(ctx, blockReq.AttributePath) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + blockResp := ModifyAttributePlanResponse{ + AttributePlan: blockReq.AttributePlan, + Private: blockReq.Private, + } + + BlockModifyPlan(ctx, block, blockReq, &blockResp) + + resp.Diagnostics.Append(blockResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.Plan.SetAttribute(ctx, blockReq.AttributePath, blockResp.AttributePlan)...) + + if resp.Diagnostics.HasError() { + return + } + + resp.RequiresReplace = append(resp.RequiresReplace, blockResp.RequiresReplace...) + resp.Private = blockResp.Private + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/schema_validation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/schema_validation.go new file mode 100644 index 0000000000..2830d1196e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/schema_validation.go @@ -0,0 +1,74 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ValidateSchemaRequest repesents a request for validating a Schema. +type ValidateSchemaRequest struct { + // Config contains the entire configuration of the data source, provider, or resource. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config tfsdk.Config +} + +// ValidateSchemaResponse represents a response to a +// ValidateSchemaRequest. +type ValidateSchemaResponse struct { + // Diagnostics report errors or warnings related to validating the schema. + // An empty slice indicates success, with no warnings or errors generated. + Diagnostics diag.Diagnostics +} + +// SchemaValidate performs all Attribute and Block validation. +// +// TODO: Clean up this abstraction back into an internal Schema type method. +// The extra Schema parameter is a carry-over of creating the proto6server +// package from the tfsdk package and not wanting to export the method. +// Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/365 +func SchemaValidate(ctx context.Context, s fwschema.Schema, req ValidateSchemaRequest, resp *ValidateSchemaResponse) { + for name, attribute := range s.GetAttributes() { + + attributeReq := ValidateAttributeRequest{ + AttributePath: path.Root(name), + AttributePathExpression: path.MatchRoot(name), + Config: req.Config, + } + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + attributeResp := &ValidateAttributeResponse{} + + AttributeValidate(ctx, attribute, attributeReq, attributeResp) + + resp.Diagnostics.Append(attributeResp.Diagnostics...) + } + + for name, block := range s.GetBlocks() { + attributeReq := ValidateAttributeRequest{ + AttributePath: path.Root(name), + AttributePathExpression: path.MatchRoot(name), + Config: req.Config, + } + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + attributeResp := &ValidateAttributeResponse{} + + BlockValidate(ctx, block, attributeReq, attributeResp) + + resp.Diagnostics.Append(attributeResp.Diagnostics...) + } + + if s.GetDeprecationMessage() != "" { + resp.Diagnostics.AddWarning( + "Deprecated", + s.GetDeprecationMessage(), + ) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server.go new file mode 100644 index 0000000000..4911757b44 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server.go @@ -0,0 +1,453 @@ +package fwserver + +import ( + "context" + "fmt" + "sync" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// Server implements the framework provider server. Protocol specific +// implementations wrap this handling along with calling all request and +// response type conversions. +type Server struct { + Provider provider.Provider + + // DataSourceConfigureData is the + // [provider.ConfigureResponse.DataSourceData] field value which is passed + // to [datasource.ConfigureRequest.ProviderData]. + DataSourceConfigureData any + + // ResourceConfigureData is the + // [provider.ConfigureResponse.ResourceData] field value which is passed + // to [resource.ConfigureRequest.ProviderData]. + ResourceConfigureData any + + // dataSourceSchemas is the cached DataSource Schemas for RPCs that need to + // convert configuration data from the protocol. If not found, it will be + // fetched from the DataSourceType.GetSchema() method. + dataSourceSchemas map[string]fwschema.Schema + + // dataSourceSchemasDiags is the cached Diagnostics obtained while populating + // dataSourceSchemas. This is to ensure any warnings or errors are also + // returned appropriately when fetching dataSourceSchemas. + dataSourceSchemasDiags diag.Diagnostics + + // dataSourceSchemasMutex is a mutex to protect concurrent dataSourceSchemas + // access from race conditions. + dataSourceSchemasMutex sync.Mutex + + // dataSourceFuncs is the cached DataSource functions for RPCs that need to + // access data sources. If not found, it will be fetched from the + // Provider.DataSources() method. + dataSourceFuncs map[string]func() datasource.DataSource + + // dataSourceTypesDiags is the cached Diagnostics obtained while populating + // dataSourceTypes. This is to ensure any warnings or errors are also + // returned appropriately when fetching dataSourceTypes. + dataSourceTypesDiags diag.Diagnostics + + // dataSourceTypesMutex is a mutex to protect concurrent dataSourceTypes + // access from race conditions. + dataSourceTypesMutex sync.Mutex + + // providerSchema is the cached Provider Schema for RPCs that need to + // convert configuration data from the protocol. If not found, it will be + // fetched from the Provider.GetSchema() method. + providerSchema fwschema.Schema + + // providerSchemaDiags is the cached Diagnostics obtained while populating + // providerSchema. This is to ensure any warnings or errors are also + // returned appropriately when fetching providerSchema. + providerSchemaDiags diag.Diagnostics + + // providerSchemaMutex is a mutex to protect concurrent providerSchema + // access from race conditions. + providerSchemaMutex sync.Mutex + + // providerMetaSchema is the cached Provider Meta Schema for RPCs that need + // to convert configuration data from the protocol. If not found, it will + // be fetched from the Provider.GetMetaSchema() method. + providerMetaSchema fwschema.Schema + + // providerMetaSchemaDiags is the cached Diagnostics obtained while populating + // providerMetaSchema. This is to ensure any warnings or errors are also + // returned appropriately when fetching providerMetaSchema. + providerMetaSchemaDiags diag.Diagnostics + + // providerMetaSchemaMutex is a mutex to protect concurrent providerMetaSchema + // access from race conditions. + providerMetaSchemaMutex sync.Mutex + + // providerTypeName is the type name of the provider, if the provider + // implemented the Metadata method. + providerTypeName string + + // resourceSchemas is the cached Resource Schemas for RPCs that need to + // convert configuration data from the protocol. If not found, it will be + // fetched from the ResourceType.GetSchema() method. + resourceSchemas map[string]fwschema.Schema + + // resourceSchemasDiags is the cached Diagnostics obtained while populating + // resourceSchemas. This is to ensure any warnings or errors are also + // returned appropriately when fetching resourceSchemas. + resourceSchemasDiags diag.Diagnostics + + // resourceSchemasMutex is a mutex to protect concurrent resourceSchemas + // access from race conditions. + resourceSchemasMutex sync.Mutex + + // resourceFuncs is the cached Resource functions for RPCs that need to + // access resources. If not found, it will be fetched from the + // Provider.Resources() method. + resourceFuncs map[string]func() resource.Resource + + // resourceTypesDiags is the cached Diagnostics obtained while populating + // resourceTypes. This is to ensure any warnings or errors are also + // returned appropriately when fetching resourceTypes. + resourceTypesDiags diag.Diagnostics + + // resourceTypesMutex is a mutex to protect concurrent resourceTypes + // access from race conditions. + resourceTypesMutex sync.Mutex +} + +// DataSource returns the DataSource for a given type name. +func (s *Server) DataSource(ctx context.Context, typeName string) (datasource.DataSource, diag.Diagnostics) { + dataSourceFuncs, diags := s.DataSourceFuncs(ctx) + + dataSourceFunc, ok := dataSourceFuncs[typeName] + + if !ok { + diags.AddError( + "Data Source Type Not Found", + fmt.Sprintf("No data source type named %q was found in the provider.", typeName), + ) + + return nil, diags + } + + return dataSourceFunc(), diags +} + +// DataSourceFuncs returns a map of DataSource functions. The results are cached +// on first use. +func (s *Server) DataSourceFuncs(ctx context.Context) (map[string]func() datasource.DataSource, diag.Diagnostics) { + logging.FrameworkTrace(ctx, "Checking DataSourceTypes lock") + s.dataSourceTypesMutex.Lock() + defer s.dataSourceTypesMutex.Unlock() + + if s.dataSourceFuncs != nil { + return s.dataSourceFuncs, s.dataSourceTypesDiags + } + + s.dataSourceFuncs = make(map[string]func() datasource.DataSource) + + logging.FrameworkDebug(ctx, "Calling provider defined Provider DataSources") + dataSourceFuncsSlice := s.Provider.DataSources(ctx) + logging.FrameworkDebug(ctx, "Called provider defined Provider DataSources") + + for _, dataSourceFunc := range dataSourceFuncsSlice { + dataSource := dataSourceFunc() + + dataSourceTypeNameReq := datasource.MetadataRequest{ + ProviderTypeName: s.providerTypeName, + } + dataSourceTypeNameResp := datasource.MetadataResponse{} + + dataSource.Metadata(ctx, dataSourceTypeNameReq, &dataSourceTypeNameResp) + + if dataSourceTypeNameResp.TypeName == "" { + s.dataSourceTypesDiags.AddError( + "Data Source Type Name Missing", + fmt.Sprintf("The %T DataSource returned an empty string from the Metadata method. ", dataSource)+ + "This is always an issue with the provider and should be reported to the provider developers.", + ) + continue + } + + logging.FrameworkTrace(ctx, "Found data source type", map[string]interface{}{logging.KeyDataSourceType: dataSourceTypeNameResp.TypeName}) + + if _, ok := s.dataSourceFuncs[dataSourceTypeNameResp.TypeName]; ok { + s.dataSourceTypesDiags.AddError( + "Duplicate Data Source Type Defined", + fmt.Sprintf("The %s data source type name was returned for multiple data sources. ", dataSourceTypeNameResp.TypeName)+ + "Data source type names must be unique. "+ + "This is always an issue with the provider and should be reported to the provider developers.", + ) + continue + } + + s.dataSourceFuncs[dataSourceTypeNameResp.TypeName] = dataSourceFunc + } + + return s.dataSourceFuncs, s.dataSourceTypesDiags +} + +// DataSourceSchema returns the Schema associated with the DataSourceType for +// the given type name. +func (s *Server) DataSourceSchema(ctx context.Context, typeName string) (fwschema.Schema, diag.Diagnostics) { + dataSourceSchemas, diags := s.DataSourceSchemas(ctx) + + dataSourceSchema, ok := dataSourceSchemas[typeName] + + if !ok { + diags.AddError( + "Data Source Schema Not Found", + fmt.Sprintf("No data source type named %q was found in the provider to fetch the schema. ", typeName)+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.", + ) + + return nil, diags + } + + return dataSourceSchema, diags +} + +// DataSourceSchemas returns the map of DataSourceType Schemas. The results are +// cached on first use. +func (s *Server) DataSourceSchemas(ctx context.Context) (map[string]fwschema.Schema, diag.Diagnostics) { + logging.FrameworkTrace(ctx, "Checking DataSourceSchemas lock") + s.dataSourceSchemasMutex.Lock() + defer s.dataSourceSchemasMutex.Unlock() + + if s.dataSourceSchemas != nil { + return s.dataSourceSchemas, s.dataSourceSchemasDiags + } + + s.dataSourceSchemas = map[string]fwschema.Schema{} + + dataSourceFuncs, diags := s.DataSourceFuncs(ctx) + + s.dataSourceSchemasDiags = diags + + for dataSourceTypeName, dataSourceFunc := range dataSourceFuncs { + dataSource := dataSourceFunc() + + schemaReq := datasource.SchemaRequest{} + schemaResp := datasource.SchemaResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined DataSource Schema", map[string]interface{}{logging.KeyDataSourceType: dataSourceTypeName}) + dataSource.Schema(ctx, schemaReq, &schemaResp) + logging.FrameworkDebug(ctx, "Called provider defined DataSource Schema", map[string]interface{}{logging.KeyDataSourceType: dataSourceTypeName}) + + s.dataSourceSchemasDiags.Append(schemaResp.Diagnostics...) + + if s.dataSourceSchemasDiags.HasError() { + return s.dataSourceSchemas, s.dataSourceSchemasDiags + } + + s.dataSourceSchemasDiags.Append(schemaResp.Schema.Validate()...) + + if s.dataSourceSchemasDiags.HasError() { + return s.dataSourceSchemas, s.dataSourceSchemasDiags + } + + s.dataSourceSchemas[dataSourceTypeName] = schemaResp.Schema + } + + return s.dataSourceSchemas, s.dataSourceSchemasDiags +} + +// ProviderSchema returns the Schema associated with the Provider. The Schema +// and Diagnostics are cached on first use. +func (s *Server) ProviderSchema(ctx context.Context) (fwschema.Schema, diag.Diagnostics) { + logging.FrameworkTrace(ctx, "Checking ProviderSchema lock") + s.providerSchemaMutex.Lock() + defer s.providerSchemaMutex.Unlock() + + if s.providerSchema != nil { + return s.providerSchema, s.providerSchemaDiags + } + + schemaReq := provider.SchemaRequest{} + schemaResp := provider.SchemaResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Provider Schema") + s.Provider.Schema(ctx, schemaReq, &schemaResp) + logging.FrameworkDebug(ctx, "Called provider defined Provider Schema") + + s.providerSchema = schemaResp.Schema + s.providerSchemaDiags = schemaResp.Diagnostics + + s.providerSchemaDiags.Append(schemaResp.Schema.Validate()...) + + return s.providerSchema, s.providerSchemaDiags +} + +// ProviderMetaSchema returns the Meta Schema associated with the Provider, if +// it implements the ProviderWithMetaSchema interface. The Schema and +// Diagnostics are cached on first use. +func (s *Server) ProviderMetaSchema(ctx context.Context) (fwschema.Schema, diag.Diagnostics) { + providerWithMetaSchema, ok := s.Provider.(provider.ProviderWithMetaSchema) + + if !ok { + return nil, nil + } + + logging.FrameworkTrace(ctx, "Provider implements ProviderWithMetaSchema") + logging.FrameworkTrace(ctx, "Checking ProviderMetaSchema lock") + s.providerMetaSchemaMutex.Lock() + defer s.providerMetaSchemaMutex.Unlock() + + if s.providerMetaSchema != nil { + return s.providerMetaSchema, s.providerMetaSchemaDiags + } + + req := provider.MetaSchemaRequest{} + resp := &provider.MetaSchemaResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Provider MetaSchema") + providerWithMetaSchema.MetaSchema(ctx, req, resp) + logging.FrameworkDebug(ctx, "Called provider defined Provider MetaSchema") + + s.providerMetaSchema = resp.Schema + s.providerMetaSchemaDiags = resp.Diagnostics + + s.providerMetaSchemaDiags.Append(resp.Schema.Validate()...) + + return s.providerMetaSchema, s.providerMetaSchemaDiags +} + +// Resource returns the Resource for a given type name. +func (s *Server) Resource(ctx context.Context, typeName string) (resource.Resource, diag.Diagnostics) { + resourceFuncs, diags := s.ResourceFuncs(ctx) + + resourceFunc, ok := resourceFuncs[typeName] + + if !ok { + diags.AddError( + "Resource Type Not Found", + fmt.Sprintf("No resource type named %q was found in the provider.", typeName), + ) + + return nil, diags + } + + return resourceFunc(), diags +} + +// ResourceFuncs returns a map of Resource functions. The results are cached +// on first use. +func (s *Server) ResourceFuncs(ctx context.Context) (map[string]func() resource.Resource, diag.Diagnostics) { + logging.FrameworkTrace(ctx, "Checking ResourceTypes lock") + s.resourceTypesMutex.Lock() + defer s.resourceTypesMutex.Unlock() + + if s.resourceFuncs != nil { + return s.resourceFuncs, s.resourceTypesDiags + } + + s.resourceFuncs = make(map[string]func() resource.Resource) + + logging.FrameworkDebug(ctx, "Calling provider defined Provider Resources") + resourceFuncsSlice := s.Provider.Resources(ctx) + logging.FrameworkDebug(ctx, "Called provider defined Provider Resources") + + for _, resourceFunc := range resourceFuncsSlice { + res := resourceFunc() + + resourceTypeNameReq := resource.MetadataRequest{ + ProviderTypeName: s.providerTypeName, + } + resourceTypeNameResp := resource.MetadataResponse{} + + res.Metadata(ctx, resourceTypeNameReq, &resourceTypeNameResp) + + if resourceTypeNameResp.TypeName == "" { + s.resourceTypesDiags.AddError( + "Resource Type Name Missing", + fmt.Sprintf("The %T Resource returned an empty string from the Metadata method. ", res)+ + "This is always an issue with the provider and should be reported to the provider developers.", + ) + continue + } + + logging.FrameworkTrace(ctx, "Found resource type", map[string]interface{}{logging.KeyResourceType: resourceTypeNameResp.TypeName}) + + if _, ok := s.resourceFuncs[resourceTypeNameResp.TypeName]; ok { + s.resourceTypesDiags.AddError( + "Duplicate Resource Type Defined", + fmt.Sprintf("The %s resource type name was returned for multiple resources. ", resourceTypeNameResp.TypeName)+ + "Resource type names must be unique. "+ + "This is always an issue with the provider and should be reported to the provider developers.", + ) + continue + } + + s.resourceFuncs[resourceTypeNameResp.TypeName] = resourceFunc + } + + return s.resourceFuncs, s.resourceTypesDiags +} + +// ResourceSchema returns the Schema associated with the ResourceType for +// the given type name. +func (s *Server) ResourceSchema(ctx context.Context, typeName string) (fwschema.Schema, diag.Diagnostics) { + resourceSchemas, diags := s.ResourceSchemas(ctx) + + resourceSchema, ok := resourceSchemas[typeName] + + if !ok { + diags.AddError( + "Resource Schema Not Found", + fmt.Sprintf("No resource type named %q was found in the provider to fetch the schema. ", typeName)+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.", + ) + + return nil, diags + } + + return resourceSchema, diags +} + +// ResourceSchemas returns the map of ResourceType Schemas. The results are +// cached on first use. +func (s *Server) ResourceSchemas(ctx context.Context) (map[string]fwschema.Schema, diag.Diagnostics) { + logging.FrameworkTrace(ctx, "Checking ResourceSchemas lock") + s.resourceSchemasMutex.Lock() + defer s.resourceSchemasMutex.Unlock() + + if s.resourceSchemas != nil { + return s.resourceSchemas, s.resourceSchemasDiags + } + + s.resourceSchemas = map[string]fwschema.Schema{} + + resourceFuncs, diags := s.ResourceFuncs(ctx) + + s.resourceSchemasDiags = diags + + for resourceTypeName, resourceFunc := range resourceFuncs { + res := resourceFunc() + + schemaReq := resource.SchemaRequest{} + schemaResp := resource.SchemaResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Schema", map[string]interface{}{logging.KeyResourceType: resourceTypeName}) + res.Schema(ctx, schemaReq, &schemaResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Schema", map[string]interface{}{logging.KeyResourceType: resourceTypeName}) + + s.resourceSchemasDiags.Append(schemaResp.Diagnostics...) + + if s.resourceSchemasDiags.HasError() { + return s.resourceSchemas, s.resourceSchemasDiags + } + + s.resourceSchemasDiags.Append(schemaResp.Schema.Validate()...) + + if s.resourceSchemasDiags.HasError() { + return s.resourceSchemas, s.resourceSchemasDiags + } + + s.resourceSchemas[resourceTypeName] = schemaResp.Schema + } + + return s.resourceSchemas, s.resourceSchemasDiags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_applyresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_applyresourcechange.go new file mode 100644 index 0000000000..ef9c5ab4b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_applyresourcechange.go @@ -0,0 +1,104 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ApplyResourceChangeRequest is the framework server request for the +// ApplyResourceChange RPC. +type ApplyResourceChangeRequest struct { + Config *tfsdk.Config + PlannedPrivate *privatestate.Data + PlannedState *tfsdk.Plan + PriorState *tfsdk.State + ProviderMeta *tfsdk.Config + ResourceSchema fwschema.Schema + Resource resource.Resource +} + +// ApplyResourceChangeResponse is the framework server response for the +// ApplyResourceChange RPC. +type ApplyResourceChangeResponse struct { + Diagnostics diag.Diagnostics + NewState *tfsdk.State + Private *privatestate.Data +} + +// ApplyResourceChange implements the framework server ApplyResourceChange RPC. +func (s *Server) ApplyResourceChange(ctx context.Context, req *ApplyResourceChangeRequest, resp *ApplyResourceChangeResponse) { + if req == nil { + return + } + + // If PriorState is missing/null, its a Create request. + if req.PriorState == nil || req.PriorState.Raw.IsNull() { + logging.FrameworkTrace(ctx, "ApplyResourceChange received no PriorState, running CreateResource") + + createReq := &CreateResourceRequest{ + Config: req.Config, + PlannedPrivate: req.PlannedPrivate, + PlannedState: req.PlannedState, + ProviderMeta: req.ProviderMeta, + ResourceSchema: req.ResourceSchema, + Resource: req.Resource, + } + createResp := &CreateResourceResponse{} + + s.CreateResource(ctx, createReq, createResp) + + resp.Diagnostics = createResp.Diagnostics + resp.NewState = createResp.NewState + resp.Private = createResp.Private + + return + } + + // If PlannedState is missing/null, its a Delete request. + if req.PlannedState == nil || req.PlannedState.Raw.IsNull() { + logging.FrameworkTrace(ctx, "ApplyResourceChange received no PlannedState, running DeleteResource") + + deleteReq := &DeleteResourceRequest{ + PlannedPrivate: req.PlannedPrivate, + PriorState: req.PriorState, + ProviderMeta: req.ProviderMeta, + ResourceSchema: req.ResourceSchema, + Resource: req.Resource, + } + deleteResp := &DeleteResourceResponse{} + + s.DeleteResource(ctx, deleteReq, deleteResp) + + resp.Diagnostics = deleteResp.Diagnostics + resp.NewState = deleteResp.NewState + resp.Private = deleteResp.Private + + return + } + + // Otherwise, assume its an Update request. + logging.FrameworkTrace(ctx, "ApplyResourceChange running UpdateResource") + + updateReq := &UpdateResourceRequest{ + Config: req.Config, + PlannedPrivate: req.PlannedPrivate, + PlannedState: req.PlannedState, + PriorState: req.PriorState, + ProviderMeta: req.ProviderMeta, + ResourceSchema: req.ResourceSchema, + Resource: req.Resource, + } + updateResp := &UpdateResourceResponse{} + + s.UpdateResource(ctx, updateReq, updateResp) + + resp.Diagnostics = updateResp.Diagnostics + resp.NewState = updateResp.NewState + resp.Private = updateResp.Private +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_capabilities.go new file mode 100644 index 0000000000..eef3fc6f07 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_capabilities.go @@ -0,0 +1,14 @@ +package fwserver + +// ServerCapabilities is a combination of tfprotov5.ServerCapabilities and +// tfprotov6.ServerCapabilties, which may diverge over time. If that happens, +// the toproto5 conversion logic will handle the appropriate filtering and the +// proto5server/fwserver logic will need to account for missing features. +type ServerCapabilities struct { + // PlanDestroy signals that the provider is ready for the + // PlanResourceChange RPC on resource destruction. + // + // This should always be enabled in framework providers and requires + // Terraform 1.3 or later. + PlanDestroy bool +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_configureprovider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_configureprovider.go new file mode 100644 index 0000000000..059300e8e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_configureprovider.go @@ -0,0 +1,24 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/provider" +) + +// ConfigureProvider implements the framework server ConfigureProvider RPC. +func (s *Server) ConfigureProvider(ctx context.Context, req *provider.ConfigureRequest, resp *provider.ConfigureResponse) { + logging.FrameworkDebug(ctx, "Calling provider defined Provider Configure") + + if req != nil { + s.Provider.Configure(ctx, *req, resp) + } else { + s.Provider.Configure(ctx, provider.ConfigureRequest{}, resp) + } + + logging.FrameworkDebug(ctx, "Called provider defined Provider Configure") + + s.DataSourceConfigureData = resp.DataSourceData + s.ResourceConfigureData = resp.ResourceData +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_createresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_createresource.go new file mode 100644 index 0000000000..85d3973d58 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_createresource.go @@ -0,0 +1,126 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// CreateResourceRequest is the framework server request for a create request +// with the ApplyResourceChange RPC. +type CreateResourceRequest struct { + Config *tfsdk.Config + PlannedPrivate *privatestate.Data + PlannedState *tfsdk.Plan + ProviderMeta *tfsdk.Config + ResourceSchema fwschema.Schema + Resource resource.Resource +} + +// CreateResourceResponse is the framework server response for a create request +// with the ApplyResourceChange RPC. +type CreateResourceResponse struct { + Diagnostics diag.Diagnostics + NewState *tfsdk.State + Private *privatestate.Data +} + +// CreateResource implements the framework server create request logic for the +// ApplyResourceChange RPC. +func (s *Server) CreateResource(ctx context.Context, req *CreateResourceRequest, resp *CreateResourceResponse) { + if req == nil { + return + } + + if _, ok := req.Resource.(resource.ResourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigure") + + configureReq := resource.ConfigureRequest{ + ProviderData: s.ResourceConfigureData, + } + configureResp := resource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Configure") + req.Resource.(resource.ResourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + nullSchemaData := tftypes.NewValue(req.ResourceSchema.Type().TerraformType(ctx), nil) + + createReq := resource.CreateRequest{ + Config: tfsdk.Config{ + Schema: req.ResourceSchema, + Raw: nullSchemaData, + }, + Plan: tfsdk.Plan{ + Schema: req.ResourceSchema, + Raw: nullSchemaData, + }, + } + + privateProviderData := privatestate.EmptyProviderData(ctx) + + createResp := resource.CreateResponse{ + State: tfsdk.State{ + Schema: req.ResourceSchema, + Raw: nullSchemaData, + }, + Private: privateProviderData, + } + + if req.Config != nil { + createReq.Config = *req.Config + } + + if req.PlannedState != nil { + createReq.Plan = *req.PlannedState + } + + if req.ProviderMeta != nil { + createReq.ProviderMeta = *req.ProviderMeta + } + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Create") + req.Resource.Create(ctx, createReq, &createResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Create") + + resp.Diagnostics = createResp.Diagnostics + resp.NewState = &createResp.State + + if !resp.Diagnostics.HasError() && createResp.State.Raw.Equal(nullSchemaData) { + detail := "The Terraform Provider unexpectedly returned no resource state after having no errors in the resource creation. " + + "This is always an issue in the Terraform Provider and should be reported to the provider developers.\n\n" + + "The resource may have been successfully created, but Terraform is not tracking it. " + + "Applying the configuration again with no other action may result in duplicate resource errors." + + if _, ok := req.Resource.(resource.ResourceWithImportState); ok { + detail += " Import the resource if the resource was actually created and Terraform should be tracking it." + } + + resp.Diagnostics.AddError( + "Missing Resource State After Create", + detail, + ) + } + + if createResp.Private != nil { + if resp.Private == nil { + resp.Private = &privatestate.Data{} + } + + resp.Private.Provider = createResp.Private + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_deleteresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_deleteresource.go new file mode 100644 index 0000000000..1dbcb5a6b9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_deleteresource.go @@ -0,0 +1,97 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// DeleteResourceRequest is the framework server request for a delete request +// with the ApplyResourceChange RPC. +type DeleteResourceRequest struct { + PlannedPrivate *privatestate.Data + PriorState *tfsdk.State + ProviderMeta *tfsdk.Config + ResourceSchema fwschema.Schema + Resource resource.Resource +} + +// DeleteResourceResponse is the framework server response for a delete request +// with the ApplyResourceChange RPC. +type DeleteResourceResponse struct { + Diagnostics diag.Diagnostics + NewState *tfsdk.State + Private *privatestate.Data +} + +// DeleteResource implements the framework server delete request logic for the +// ApplyResourceChange RPC. +func (s *Server) DeleteResource(ctx context.Context, req *DeleteResourceRequest, resp *DeleteResourceResponse) { + if req == nil { + return + } + + if _, ok := req.Resource.(resource.ResourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigure") + + configureReq := resource.ConfigureRequest{ + ProviderData: s.ResourceConfigureData, + } + configureResp := resource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Configure") + req.Resource.(resource.ResourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + deleteReq := resource.DeleteRequest{ + State: tfsdk.State{ + Schema: req.ResourceSchema, + Raw: tftypes.NewValue(req.ResourceSchema.Type().TerraformType(ctx), nil), + }, + } + deleteResp := resource.DeleteResponse{ + State: tfsdk.State{ + Schema: req.ResourceSchema, + Raw: tftypes.NewValue(req.ResourceSchema.Type().TerraformType(ctx), nil), + }, + } + + if req.PriorState != nil { + deleteReq.State = *req.PriorState + deleteResp.State = *req.PriorState + } + + if req.ProviderMeta != nil { + deleteReq.ProviderMeta = *req.ProviderMeta + } + + if req.PlannedPrivate != nil { + deleteReq.Private = req.PlannedPrivate.Provider + } + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Delete") + req.Resource.Delete(ctx, deleteReq, &deleteResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Delete") + + if !deleteResp.Diagnostics.HasError() { + logging.FrameworkTrace(ctx, "No provider defined Delete errors detected, ensuring State is cleared") + deleteResp.State.RemoveResource(ctx) + } + + resp.Diagnostics = deleteResp.Diagnostics + resp.NewState = &deleteResp.State +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_getproviderschema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_getproviderschema.go new file mode 100644 index 0000000000..88c870ff96 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_getproviderschema.go @@ -0,0 +1,81 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/provider" +) + +// GetProviderSchemaRequest is the framework server request for the +// GetProviderSchema RPC. +type GetProviderSchemaRequest struct{} + +// GetProviderSchemaResponse is the framework server response for the +// GetProviderSchema RPC. +type GetProviderSchemaResponse struct { + ServerCapabilities *ServerCapabilities + Provider fwschema.Schema + ProviderMeta fwschema.Schema + ResourceSchemas map[string]fwschema.Schema + DataSourceSchemas map[string]fwschema.Schema + Diagnostics diag.Diagnostics +} + +// GetProviderSchema implements the framework server GetProviderSchema RPC. +func (s *Server) GetProviderSchema(ctx context.Context, req *GetProviderSchemaRequest, resp *GetProviderSchemaResponse) { + resp.ServerCapabilities = &ServerCapabilities{ + PlanDestroy: true, + } + + metadataReq := provider.MetadataRequest{} + metadataResp := provider.MetadataResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Provider Metadata") + s.Provider.Metadata(ctx, metadataReq, &metadataResp) + logging.FrameworkDebug(ctx, "Called provider defined Provider Metadata") + + s.providerTypeName = metadataResp.TypeName + + providerSchema, diags := s.ProviderSchema(ctx) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + resp.Provider = providerSchema + + providerMetaSchema, diags := s.ProviderMetaSchema(ctx) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + resp.ProviderMeta = providerMetaSchema + + resourceSchemas, diags := s.ResourceSchemas(ctx) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + resp.ResourceSchemas = resourceSchemas + + dataSourceSchemas, diags := s.DataSourceSchemas(ctx) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + resp.DataSourceSchemas = dataSourceSchemas +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_importresourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_importresourcestate.go new file mode 100644 index 0000000000..0c70c7a3a4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_importresourcestate.go @@ -0,0 +1,135 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ImportedResource represents a resource that was imported. +type ImportedResource struct { + Private *privatestate.Data + State tfsdk.State + TypeName string +} + +// ImportResourceStateRequest is the framework server request for the +// ImportResourceState RPC. +type ImportResourceStateRequest struct { + ID string + Resource resource.Resource + + // EmptyState is an empty State for the resource schema. This is used to + // initialize the ImportedResource State of the ImportResourceStateResponse + // and allow the framework server to verify that the provider updated the + // state after the provider defined logic. + EmptyState tfsdk.State + + // TypeName is the resource type name, which is necessary for populating + // the ImportedResource TypeName of the ImportResourceStateResponse. + TypeName string +} + +// ImportResourceStateResponse is the framework server response for the +// ImportResourceState RPC. +type ImportResourceStateResponse struct { + Diagnostics diag.Diagnostics + ImportedResources []ImportedResource +} + +// ImportResourceState implements the framework server ImportResourceState RPC. +func (s *Server) ImportResourceState(ctx context.Context, req *ImportResourceStateRequest, resp *ImportResourceStateResponse) { + if req == nil { + return + } + + if _, ok := req.Resource.(resource.ResourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigure") + + configureReq := resource.ConfigureRequest{ + ProviderData: s.ResourceConfigureData, + } + configureResp := resource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Configure") + req.Resource.(resource.ResourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + resourceWithImportState, ok := req.Resource.(resource.ResourceWithImportState) + + if !ok { + // If there is a feature request for customizing this messaging, + // provider developers can implement a ImportState method that + // immediately returns a custom error diagnostic. + // + // However, implementing the ImportState method could cause issues + // with automated documentation generation, which likely would check + // if the resource implements the ResourceWithImportState interface. + // Instead, a separate "ResourceWithoutImportState" interface could be + // created with a method such as: + // ImportNotImplementedMessage(context.Context) string. + resp.Diagnostics.AddError( + "Resource Import Not Implemented", + "This resource does not support import. Please contact the provider developer for additional information.", + ) + return + } + + importReq := resource.ImportStateRequest{ + ID: req.ID, + } + + privateProviderData := privatestate.EmptyProviderData(ctx) + + importResp := resource.ImportStateResponse{ + State: tfsdk.State{ + Raw: req.EmptyState.Raw.Copy(), + Schema: req.EmptyState.Schema, + }, + Private: privateProviderData, + } + + logging.FrameworkDebug(ctx, "Calling provider defined Resource ImportState") + resourceWithImportState.ImportState(ctx, importReq, &importResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource ImportState") + + resp.Diagnostics.Append(importResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + + if importResp.State.Raw.Equal(req.EmptyState.Raw) { + resp.Diagnostics.AddError( + "Missing Resource Import State", + "An unexpected error was encountered when importing the resource. This is always a problem with the provider. Please give the following information to the provider developer:\n\n"+ + "Resource ImportState method returned no State in response. If import is intentionally not supported, remove the Resource type ImportState method or return an error.", + ) + return + } + + private := &privatestate.Data{} + + if importResp.Private != nil { + private.Provider = importResp.Private + } + + resp.ImportedResources = []ImportedResource{ + { + State: importResp.State, + TypeName: req.TypeName, + Private: private, + }, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_planresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_planresourcechange.go new file mode 100644 index 0000000000..bc8aff65e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_planresourcechange.go @@ -0,0 +1,360 @@ +package fwserver + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// PlanResourceChangeRequest is the framework server request for the +// PlanResourceChange RPC. +type PlanResourceChangeRequest struct { + Config *tfsdk.Config + PriorPrivate *privatestate.Data + PriorState *tfsdk.State + ProposedNewState *tfsdk.Plan + ProviderMeta *tfsdk.Config + ResourceSchema fwschema.Schema + Resource resource.Resource +} + +// PlanResourceChangeResponse is the framework server response for the +// PlanResourceChange RPC. +type PlanResourceChangeResponse struct { + Diagnostics diag.Diagnostics + PlannedPrivate *privatestate.Data + PlannedState *tfsdk.State + RequiresReplace path.Paths +} + +// PlanResourceChange implements the framework server PlanResourceChange RPC. +func (s *Server) PlanResourceChange(ctx context.Context, req *PlanResourceChangeRequest, resp *PlanResourceChangeResponse) { + if req == nil { + return + } + + if _, ok := req.Resource.(resource.ResourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigure") + + configureReq := resource.ConfigureRequest{ + ProviderData: s.ResourceConfigureData, + } + configureResp := resource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Configure") + req.Resource.(resource.ResourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + nullTfValue := tftypes.NewValue(req.ResourceSchema.Type().TerraformType(ctx), nil) + + // Prevent potential panics by ensuring incoming Config/Plan/State are null + // instead of nil. + if req.Config == nil { + req.Config = &tfsdk.Config{ + Raw: nullTfValue, + Schema: req.ResourceSchema, + } + } + + if req.ProposedNewState == nil { + req.ProposedNewState = &tfsdk.Plan{ + Raw: nullTfValue, + Schema: req.ResourceSchema, + } + } + + if req.PriorState == nil { + req.PriorState = &tfsdk.State{ + Raw: nullTfValue, + Schema: req.ResourceSchema, + } + } + + // Ensure that resp.PlannedPrivate is never nil. + resp.PlannedPrivate = privatestate.EmptyData(ctx) + + if req.PriorPrivate != nil { + // Overwrite resp.PlannedPrivate with req.PriorPrivate providing + // it is not nil. + resp.PlannedPrivate = req.PriorPrivate + + // Ensure that resp.PlannedPrivate.Provider is never nil. + if resp.PlannedPrivate.Provider == nil { + resp.PlannedPrivate.Provider = privatestate.EmptyProviderData(ctx) + } + } + + resp.PlannedState = planToState(*req.ProposedNewState) + + // Execute any AttributePlanModifiers. + // + // This pass is before any Computed-only attributes are marked as unknown + // to ensure any plan changes will trigger that behavior. These plan + // modifiers are run again after that marking to allow setting values + // and preventing extraneous plan differences. + // + // We only do this if there's a plan to modify; otherwise, it + // represents a resource being deleted and there's no point. + // + // TODO: Enabling this pass will generate the following test error: + // + // --- FAIL: TestServerPlanResourceChange/two_modifyplan_add_list_elem (0.00s) + // serve_test.go:3303: An unexpected error was encountered trying to read an attribute from the configuration. This is always an error in the provider. Please report the following to the provider developer: + // + // ElementKeyInt(1).AttributeName("name") still remains in the path: step cannot be applied to this value + // + // To fix this, (Config).GetAttribute() should return nil instead of the error. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/183 + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/150 + // See also: https://github.com/hashicorp/terraform-plugin-framework/pull/167 + + // Execute any resource-level ModifyPlan method. + // + // This pass is before any Computed-only attributes are marked as unknown + // to ensure any plan changes will trigger that behavior. These plan + // modifiers be run again after that marking to allow setting values and + // preventing extraneous plan differences. + // + // TODO: Enabling this pass will generate the following test error: + // + // --- FAIL: TestServerPlanResourceChange/two_modifyplan_add_list_elem (0.00s) + // serve_test.go:3303: An unexpected error was encountered trying to read an attribute from the configuration. This is always an error in the provider. Please report the following to the provider developer: + // + // ElementKeyInt(1).AttributeName("name") still remains in the path: step cannot be applied to this value + // + // To fix this, (Config).GetAttribute() should return nil instead of the error. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/183 + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/150 + // See also: https://github.com/hashicorp/terraform-plugin-framework/pull/167 + + // After ensuring there are proposed changes, mark any computed attributes + // that are null in the config as unknown in the plan, so providers have + // the choice to update them. + // + // Later attribute and resource plan modifier passes can override the + // unknown with a known value using any plan modifiers. + // + // We only do this if there's a plan to modify; otherwise, it + // represents a resource being deleted and there's no point. + if !resp.PlannedState.Raw.IsNull() && !resp.PlannedState.Raw.Equal(req.PriorState.Raw) { + logging.FrameworkTrace(ctx, "Marking Computed null Config values as unknown in Plan") + + modifiedPlan, err := tftypes.Transform(resp.PlannedState.Raw, MarkComputedNilsAsUnknown(ctx, req.Config.Raw, req.ResourceSchema)) + + if err != nil { + resp.Diagnostics.AddError( + "Error modifying plan", + "There was an unexpected error updating the plan. This is always a problem with the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + + return + } + + if !resp.PlannedState.Raw.Equal(modifiedPlan) { + logging.FrameworkTrace(ctx, "At least one Computed null Config value was changed to unknown") + } + + resp.PlannedState.Raw = modifiedPlan + } + + // Execute any AttributePlanModifiers again. This allows overwriting + // any unknown values. + // + // We only do this if there's a plan to modify; otherwise, it + // represents a resource being deleted and there's no point. + if !resp.PlannedState.Raw.IsNull() { + modifySchemaPlanReq := ModifySchemaPlanRequest{ + Config: *req.Config, + Plan: stateToPlan(*resp.PlannedState), + State: *req.PriorState, + Private: resp.PlannedPrivate.Provider, + } + + if req.ProviderMeta != nil { + modifySchemaPlanReq.ProviderMeta = *req.ProviderMeta + } + + modifySchemaPlanResp := ModifySchemaPlanResponse{ + Diagnostics: resp.Diagnostics, + Plan: modifySchemaPlanReq.Plan, + Private: modifySchemaPlanReq.Private, + } + + SchemaModifyPlan(ctx, req.ResourceSchema, modifySchemaPlanReq, &modifySchemaPlanResp) + + resp.Diagnostics = modifySchemaPlanResp.Diagnostics + resp.PlannedState = planToState(modifySchemaPlanResp.Plan) + resp.RequiresReplace = append(resp.RequiresReplace, modifySchemaPlanResp.RequiresReplace...) + resp.PlannedPrivate.Provider = modifySchemaPlanResp.Private + + if resp.Diagnostics.HasError() { + return + } + } + + // Execute any resource-level ModifyPlan method again. This allows + // overwriting any unknown values. + // + // We do this regardless of whether the plan is null or not, because we + // want resources to be able to return diagnostics when planning to + // delete resources, e.g. to inform practitioners that the resource + // _can't_ be deleted in the API and will just be removed from + // Terraform's state + if resourceWithModifyPlan, ok := req.Resource.(resource.ResourceWithModifyPlan); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithModifyPlan") + + modifyPlanReq := resource.ModifyPlanRequest{ + Config: *req.Config, + Plan: stateToPlan(*resp.PlannedState), + State: *req.PriorState, + Private: resp.PlannedPrivate.Provider, + } + + if req.ProviderMeta != nil { + modifyPlanReq.ProviderMeta = *req.ProviderMeta + } + + modifyPlanResp := resource.ModifyPlanResponse{ + Diagnostics: resp.Diagnostics, + Plan: modifyPlanReq.Plan, + RequiresReplace: path.Paths{}, + Private: modifyPlanReq.Private, + } + + logging.FrameworkDebug(ctx, "Calling provider defined Resource ModifyPlan") + resourceWithModifyPlan.ModifyPlan(ctx, modifyPlanReq, &modifyPlanResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource ModifyPlan") + + resp.Diagnostics = modifyPlanResp.Diagnostics + resp.PlannedState = planToState(modifyPlanResp.Plan) + resp.RequiresReplace = append(resp.RequiresReplace, modifyPlanResp.RequiresReplace...) + resp.PlannedPrivate.Provider = modifyPlanResp.Private + } + + // Ensure deterministic RequiresReplace by sorting and deduplicating + resp.RequiresReplace = NormaliseRequiresReplace(ctx, resp.RequiresReplace) + + // If this was a destroy resource plan, ensure the plan remained null. + if req.ProposedNewState.Raw.IsNull() && !resp.PlannedState.Raw.IsNull() { + resp.Diagnostics.AddError( + "Unexpected Planned Resource State on Destroy", + "The Terraform Provider unexpectedly returned resource state data when the resource was planned for destruction. "+ + "This is always an issue in the Terraform Provider and should be reported to the provider developers.\n\n"+ + "Ensure all resource plan modifiers do not attempt to change resource plan data from being a null value if the request plan is a null value.", + ) + } +} + +func MarkComputedNilsAsUnknown(ctx context.Context, config tftypes.Value, resourceSchema fwschema.Schema) func(*tftypes.AttributePath, tftypes.Value) (tftypes.Value, error) { + return func(path *tftypes.AttributePath, val tftypes.Value) (tftypes.Value, error) { + ctx = logging.FrameworkWithAttributePath(ctx, path.String()) + + // we are only modifying attributes, not the entire resource + if len(path.Steps()) < 1 { + return val, nil + } + + configVal, _, err := tftypes.WalkAttributePath(config, path) + + if err != tftypes.ErrInvalidStep && err != nil { + logging.FrameworkError(ctx, "error walking attribute path") + return val, err + } else if err != tftypes.ErrInvalidStep && !configVal.(tftypes.Value).IsNull() { + logging.FrameworkTrace(ctx, "attribute not null in config, not marking unknown") + return val, nil + } + + attribute, err := resourceSchema.AttributeAtTerraformPath(ctx, path) + + if err != nil { + if errors.Is(err, fwschema.ErrPathInsideAtomicAttribute) { + // ignore attributes/elements inside schema.Attributes, they have no schema of their own + logging.FrameworkTrace(ctx, "attribute is a non-schema attribute, not marking unknown") + return val, nil + } + + if errors.Is(err, fwschema.ErrPathIsBlock) { + // ignore blocks, they do not have a computed field + logging.FrameworkTrace(ctx, "attribute is a block, not marking unknown") + return val, nil + } + + logging.FrameworkError(ctx, "couldn't find attribute in resource schema") + + return tftypes.Value{}, fmt.Errorf("couldn't find attribute in resource schema: %w", err) + } + if !attribute.IsComputed() { + logging.FrameworkTrace(ctx, "attribute is not computed in schema, not marking unknown") + + return val, nil + } + + logging.FrameworkDebug(ctx, "marking computed attribute that is null in the config as unknown") + + return tftypes.NewValue(val.Type(), tftypes.UnknownValue), nil + } +} + +// NormaliseRequiresReplace sorts and deduplicates the slice of AttributePaths +// used in the RequiresReplace response field. +// Sorting is lexical based on the string representation of each AttributePath. +func NormaliseRequiresReplace(ctx context.Context, rs path.Paths) path.Paths { + if len(rs) < 2 { + return rs + } + + sort.Slice(rs, func(i, j int) bool { + return rs[i].String() < rs[j].String() + }) + + ret := make(path.Paths, len(rs)) + ret[0] = rs[0] + + // deduplicate + j := 1 + + for i := 1; i < len(rs); i++ { + if rs[i].Equal(ret[j-1]) { + logging.FrameworkDebug(ctx, "attribute found multiple times in RequiresReplace, removing duplicate", map[string]interface{}{logging.KeyAttributePath: rs[i]}) + continue + } + ret[j] = rs[i] + j++ + } + + return ret[:j] +} + +// planToState returns a *tfsdk.State with a copied value from a tfsdk.Plan. +func planToState(plan tfsdk.Plan) *tfsdk.State { + return &tfsdk.State{ + Raw: plan.Raw.Copy(), + Schema: plan.Schema, + } +} + +// stateToPlan returns a tfsdk.Plan with a copied value from a tfsdk.State. +func stateToPlan(state tfsdk.State) tfsdk.Plan { + return tfsdk.Plan{ + Raw: state.Raw.Copy(), + Schema: state.Schema, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_readdatasource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_readdatasource.go new file mode 100644 index 0000000000..4c9e8a8193 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_readdatasource.go @@ -0,0 +1,80 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ReadDataSourceRequest is the framework server request for the +// ReadDataSource RPC. +type ReadDataSourceRequest struct { + Config *tfsdk.Config + DataSourceSchema fwschema.Schema + DataSource datasource.DataSource + ProviderMeta *tfsdk.Config +} + +// ReadDataSourceResponse is the framework server response for the +// ReadDataSource RPC. +type ReadDataSourceResponse struct { + Diagnostics diag.Diagnostics + State *tfsdk.State +} + +// ReadDataSource implements the framework server ReadDataSource RPC. +func (s *Server) ReadDataSource(ctx context.Context, req *ReadDataSourceRequest, resp *ReadDataSourceResponse) { + if req == nil { + return + } + + if _, ok := req.DataSource.(datasource.DataSourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "DataSource implements DataSourceWithConfigure") + + configureReq := datasource.ConfigureRequest{ + ProviderData: s.DataSourceConfigureData, + } + configureResp := datasource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined DataSource Configure") + req.DataSource.(datasource.DataSourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined DataSource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + readReq := datasource.ReadRequest{ + Config: tfsdk.Config{ + Schema: req.DataSourceSchema, + }, + } + readResp := datasource.ReadResponse{ + State: tfsdk.State{ + Schema: req.DataSourceSchema, + }, + } + + if req.Config != nil { + readReq.Config = *req.Config + readResp.State.Raw = req.Config.Raw.Copy() + } + + if req.ProviderMeta != nil { + readReq.ProviderMeta = *req.ProviderMeta + } + + logging.FrameworkDebug(ctx, "Calling provider defined DataSource Read") + req.DataSource.Read(ctx, readReq, &readResp) + logging.FrameworkDebug(ctx, "Called provider defined DataSource Read") + + resp.Diagnostics = readResp.Diagnostics + resp.State = &readResp.State +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_readresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_readresource.go new file mode 100644 index 0000000000..ab94e447df --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_readresource.go @@ -0,0 +1,110 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ReadResourceRequest is the framework server request for the +// ReadResource RPC. +type ReadResourceRequest struct { + CurrentState *tfsdk.State + Resource resource.Resource + Private *privatestate.Data + ProviderMeta *tfsdk.Config +} + +// ReadResourceResponse is the framework server response for the +// ReadResource RPC. +type ReadResourceResponse struct { + Diagnostics diag.Diagnostics + NewState *tfsdk.State + Private *privatestate.Data +} + +// ReadResource implements the framework server ReadResource RPC. +func (s *Server) ReadResource(ctx context.Context, req *ReadResourceRequest, resp *ReadResourceResponse) { + if req == nil { + return + } + + if req.CurrentState == nil { + resp.Diagnostics.AddError( + "Unexpected Read Request", + "An unexpected error was encountered when reading the resource. The current state was missing.\n\n"+ + "This is always a problem with Terraform or terraform-plugin-framework. Please report this to the provider developer.", + ) + + return + } + + if _, ok := req.Resource.(resource.ResourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigure") + + configureReq := resource.ConfigureRequest{ + ProviderData: s.ResourceConfigureData, + } + configureResp := resource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Configure") + req.Resource.(resource.ResourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + readReq := resource.ReadRequest{ + State: tfsdk.State{ + Schema: req.CurrentState.Schema, + Raw: req.CurrentState.Raw.Copy(), + }, + } + readResp := resource.ReadResponse{ + State: tfsdk.State{ + Schema: req.CurrentState.Schema, + Raw: req.CurrentState.Raw.Copy(), + }, + } + + if req.ProviderMeta != nil { + readReq.ProviderMeta = *req.ProviderMeta + } + + privateProviderData := privatestate.EmptyProviderData(ctx) + + readReq.Private = privateProviderData + readResp.Private = privateProviderData + + if req.Private != nil { + if req.Private.Provider != nil { + readReq.Private = req.Private.Provider + readResp.Private = req.Private.Provider + } + + resp.Private = req.Private + } + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Read") + req.Resource.Read(ctx, readReq, &readResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Read") + + resp.Diagnostics = readResp.Diagnostics + resp.NewState = &readResp.State + + if readResp.Private != nil { + if resp.Private == nil { + resp.Private = &privatestate.Data{} + } + + resp.Private.Provider = readResp.Private + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_updateresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_updateresource.go new file mode 100644 index 0000000000..323b31fc2d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_updateresource.go @@ -0,0 +1,139 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// UpdateResourceRequest is the framework server request for an update request +// with the ApplyResourceChange RPC. +type UpdateResourceRequest struct { + Config *tfsdk.Config + PlannedPrivate *privatestate.Data + PlannedState *tfsdk.Plan + PriorState *tfsdk.State + ProviderMeta *tfsdk.Config + ResourceSchema fwschema.Schema + Resource resource.Resource +} + +// UpdateResourceResponse is the framework server response for an update request +// with the ApplyResourceChange RPC. +type UpdateResourceResponse struct { + Diagnostics diag.Diagnostics + NewState *tfsdk.State + Private *privatestate.Data +} + +// UpdateResource implements the framework server update request logic for the +// ApplyResourceChange RPC. +func (s *Server) UpdateResource(ctx context.Context, req *UpdateResourceRequest, resp *UpdateResourceResponse) { + if req == nil { + return + } + + if _, ok := req.Resource.(resource.ResourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigure") + + configureReq := resource.ConfigureRequest{ + ProviderData: s.ResourceConfigureData, + } + configureResp := resource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Configure") + req.Resource.(resource.ResourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + nullSchemaData := tftypes.NewValue(req.ResourceSchema.Type().TerraformType(ctx), nil) + + updateReq := resource.UpdateRequest{ + Config: tfsdk.Config{ + Schema: req.ResourceSchema, + Raw: nullSchemaData, + }, + Plan: tfsdk.Plan{ + Schema: req.ResourceSchema, + Raw: nullSchemaData, + }, + State: tfsdk.State{ + Schema: req.ResourceSchema, + Raw: nullSchemaData, + }, + } + updateResp := resource.UpdateResponse{ + State: tfsdk.State{ + Schema: req.ResourceSchema, + Raw: nullSchemaData, + }, + } + + if req.Config != nil { + updateReq.Config = *req.Config + } + + if req.PlannedState != nil { + updateReq.Plan = *req.PlannedState + } + + if req.PriorState != nil { + updateReq.State = *req.PriorState + // Require explicit provider updates for tracking successful updates. + updateResp.State = *req.PriorState + } + + if req.ProviderMeta != nil { + updateReq.ProviderMeta = *req.ProviderMeta + } + + privateProviderData := privatestate.EmptyProviderData(ctx) + + updateReq.Private = privateProviderData + updateResp.Private = privateProviderData + + if req.PlannedPrivate != nil { + if req.PlannedPrivate.Provider != nil { + updateReq.Private = req.PlannedPrivate.Provider + updateResp.Private = req.PlannedPrivate.Provider + } + + resp.Private = req.PlannedPrivate + } + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Update") + req.Resource.Update(ctx, updateReq, &updateResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Update") + + resp.Diagnostics = updateResp.Diagnostics + resp.NewState = &updateResp.State + + if !resp.Diagnostics.HasError() && updateResp.State.Raw.Equal(nullSchemaData) { + resp.Diagnostics.AddError( + "Missing Resource State After Update", + "The Terraform Provider unexpectedly returned no resource state after having no errors in the resource update. "+ + "This is always an issue in the Terraform Provider and should be reported to the provider developers.", + ) + } + + if updateResp.Private != nil { + if resp.Private == nil { + resp.Private = &privatestate.Data{} + } + + resp.Private.Provider = updateResp.Private + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_upgraderesourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_upgraderesourcestate.go new file mode 100644 index 0000000000..8d31cae7b7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_upgraderesourcestate.go @@ -0,0 +1,237 @@ +package fwserver + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// UpgradeResourceStateRequest is the framework server request for the +// UpgradeResourceState RPC. +type UpgradeResourceStateRequest struct { + // TODO: Create framework defined type that is not protocol specific. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/340 + RawState *tfprotov6.RawState + + ResourceSchema fwschema.Schema + Resource resource.Resource + Version int64 +} + +// UpgradeResourceStateResponse is the framework server response for the +// UpgradeResourceState RPC. +type UpgradeResourceStateResponse struct { + Diagnostics diag.Diagnostics + UpgradedState *tfsdk.State +} + +// UpgradeResourceState implements the framework server UpgradeResourceState RPC. +func (s *Server) UpgradeResourceState(ctx context.Context, req *UpgradeResourceStateRequest, resp *UpgradeResourceStateResponse) { + if req == nil { + return + } + + // No UpgradedState to return. This could return an error diagnostic about + // the odd scenario, but seems best to allow Terraform CLI to handle the + // situation itself in case it might be expected behavior. + if req.RawState == nil { + return + } + + // Define options to be used when unmarshalling raw state. + // IgnoreUndefinedAttributes will silently skip over fields in the JSON + // that do not have a matching entry in the schema. + unmarshalOpts := tfprotov6.UnmarshalOpts{ + ValueFromJSONOpts: tftypes.ValueFromJSONOpts{ + IgnoreUndefinedAttributes: true, + }, + } + + // Terraform CLI can call UpgradeResourceState even if the stored state + // version matches the current schema. Presumably this is to account for + // the previous terraform-plugin-sdk implementation, which handled some + // state fixups on behalf of Terraform CLI. When this happens, we do not + // want to return errors for a missing ResourceWithUpgradeState + // implementation or an undefined version within an existing + // ResourceWithUpgradeState implementation as that would be confusing + // detail for provider developers. Instead, the framework will attempt to + // roundtrip the prior RawState to a State matching the current Schema. + // + // TODO: To prevent provider developers from accidentally implementing + // ResourceWithUpgradeState with a version matching the current schema + // version which would never get called, the framework can introduce a + // unit test helper. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/113 + // + // UnmarshalWithOpts allows optionally ignoring instances in which elements being + // do not have a corresponding attribute within the schema. + if req.Version == req.ResourceSchema.GetVersion() { + logging.FrameworkTrace(ctx, "UpgradeResourceState request version matches current Schema version, using framework defined passthrough implementation") + + resourceSchemaType := req.ResourceSchema.Type().TerraformType(ctx) + + rawStateValue, err := req.RawState.UnmarshalWithOpts(resourceSchemaType, unmarshalOpts) + + if err != nil { + resp.Diagnostics.AddError( + "Unable to Read Previously Saved State for UpgradeResourceState", + "There was an error reading the saved resource state using the current resource schema.\n\n"+ + "If this resource state was last refreshed with Terraform CLI 0.11 and earlier, it must be refreshed or applied with an older provider version first. "+ + "If you manually modified the resource state, you will need to manually modify it to match the current resource schema. "+ + "Otherwise, please report this to the provider developer:\n\n"+err.Error(), + ) + return + } + + resp.UpgradedState = &tfsdk.State{ + Schema: req.ResourceSchema, + Raw: rawStateValue, + } + + return + } + + if _, ok := req.Resource.(resource.ResourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigure") + + configureReq := resource.ConfigureRequest{ + ProviderData: s.ResourceConfigureData, + } + configureResp := resource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Configure") + req.Resource.(resource.ResourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + resourceWithUpgradeState, ok := req.Resource.(resource.ResourceWithUpgradeState) + + if !ok { + resp.Diagnostics.AddError( + "Unable to Upgrade Resource State", + "This resource was implemented without an UpgradeState() method, "+ + fmt.Sprintf("however Terraform was expecting an implementation for version %d upgrade.\n\n", req.Version)+ + "This is always an issue with the Terraform Provider and should be reported to the provider developer.", + ) + return + } + + logging.FrameworkTrace(ctx, "Resource implements ResourceWithUpgradeState") + + logging.FrameworkDebug(ctx, "Calling provider defined Resource UpgradeState") + resourceStateUpgraders := resourceWithUpgradeState.UpgradeState(ctx) + logging.FrameworkDebug(ctx, "Called provider defined Resource UpgradeState") + + // Panic prevention + if resourceStateUpgraders == nil { + resourceStateUpgraders = make(map[int64]resource.StateUpgrader, 0) + } + + resourceStateUpgrader, ok := resourceStateUpgraders[req.Version] + + if !ok { + resp.Diagnostics.AddError( + "Unable to Upgrade Resource State", + "This resource was implemented with an UpgradeState() method, "+ + fmt.Sprintf("however Terraform was expecting an implementation for version %d upgrade.\n\n", req.Version)+ + "This is always an issue with the Terraform Provider and should be reported to the provider developer.", + ) + return + } + + upgradeResourceStateRequest := resource.UpgradeStateRequest{ + RawState: req.RawState, + } + + if resourceStateUpgrader.PriorSchema != nil { + logging.FrameworkTrace(ctx, "Initializing populated UpgradeResourceStateRequest state from provider defined prior schema and request RawState") + + priorSchemaType := resourceStateUpgrader.PriorSchema.Type().TerraformType(ctx) + + rawStateValue, err := req.RawState.UnmarshalWithOpts(priorSchemaType, unmarshalOpts) + + if err != nil { + resp.Diagnostics.AddError( + "Unable to Read Previously Saved State for UpgradeResourceState", + fmt.Sprintf("There was an error reading the saved resource state using the prior resource schema defined for version %d upgrade.\n\n", req.Version)+ + "Please report this to the provider developer:\n\n"+err.Error(), + ) + return + } + + upgradeResourceStateRequest.State = &tfsdk.State{ + Raw: rawStateValue, + Schema: *resourceStateUpgrader.PriorSchema, + } + } + + upgradeResourceStateResponse := resource.UpgradeStateResponse{ + State: tfsdk.State{ + Schema: req.ResourceSchema, + // Raw is intentionally not set. + }, + } + + // To simplify provider logic, this could perform a best effort attempt + // to populate the response State by looping through all Attribute/Block + // by calling the equivalent of SetAttribute(GetAttribute()) and skipping + // any errors. + + logging.FrameworkDebug(ctx, "Calling provider defined StateUpgrader") + resourceStateUpgrader.StateUpgrader(ctx, upgradeResourceStateRequest, &upgradeResourceStateResponse) + logging.FrameworkDebug(ctx, "Called provider defined StateUpgrader") + + resp.Diagnostics.Append(upgradeResourceStateResponse.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + + if upgradeResourceStateResponse.DynamicValue != nil { + logging.FrameworkTrace(ctx, "UpgradeResourceStateResponse DynamicValue set, overriding State") + + upgradedStateValue, err := upgradeResourceStateResponse.DynamicValue.Unmarshal(req.ResourceSchema.Type().TerraformType(ctx)) + + if err != nil { + resp.Diagnostics.AddError( + "Unable to Upgrade Resource State", + fmt.Sprintf("After attempting a resource state upgrade to version %d, the provider returned state data that was not compatible with the current schema.\n\n", req.Version)+ + "This is always an issue with the Terraform Provider and should be reported to the provider developer:\n\n"+err.Error(), + ) + return + } + + resp.UpgradedState = &tfsdk.State{ + Schema: req.ResourceSchema, + Raw: upgradedStateValue, + } + + return + } + + if upgradeResourceStateResponse.State.Raw.Type() == nil || upgradeResourceStateResponse.State.Raw.IsNull() { + resp.Diagnostics.AddError( + "Missing Upgraded Resource State", + fmt.Sprintf("After attempting a resource state upgrade to version %d, the provider did not return any state data. ", req.Version)+ + "Preventing the unexpected loss of resource state data. "+ + "This is always an issue with the Terraform Provider and should be reported to the provider developer.", + ) + return + } + + resp.UpgradedState = &upgradeResourceStateResponse.State +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validatedatasourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validatedatasourceconfig.go new file mode 100644 index 0000000000..34b51d2d10 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validatedatasourceconfig.go @@ -0,0 +1,106 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ValidateDataSourceConfigRequest is the framework server request for the +// ValidateDataSourceConfig RPC. +type ValidateDataSourceConfigRequest struct { + Config *tfsdk.Config + DataSource datasource.DataSource +} + +// ValidateDataSourceConfigResponse is the framework server response for the +// ValidateDataSourceConfig RPC. +type ValidateDataSourceConfigResponse struct { + Diagnostics diag.Diagnostics +} + +// ValidateDataSourceConfig implements the framework server ValidateDataSourceConfig RPC. +func (s *Server) ValidateDataSourceConfig(ctx context.Context, req *ValidateDataSourceConfigRequest, resp *ValidateDataSourceConfigResponse) { + if req == nil || req.Config == nil { + return + } + + if _, ok := req.DataSource.(datasource.DataSourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "DataSource implements DataSourceWithConfigure") + + configureReq := datasource.ConfigureRequest{ + ProviderData: s.DataSourceConfigureData, + } + configureResp := datasource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined DataSource Configure") + req.DataSource.(datasource.DataSourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined DataSource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + vdscReq := datasource.ValidateConfigRequest{ + Config: *req.Config, + } + + if dataSource, ok := req.DataSource.(datasource.DataSourceWithConfigValidators); ok { + logging.FrameworkTrace(ctx, "DataSource implements DataSourceWithConfigValidators") + + for _, configValidator := range dataSource.ConfigValidators(ctx) { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + vdscResp := &datasource.ValidateConfigResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined ConfigValidator", + map[string]interface{}{ + logging.KeyDescription: configValidator.Description(ctx), + }, + ) + configValidator.ValidateDataSource(ctx, vdscReq, vdscResp) + logging.FrameworkDebug( + ctx, + "Called provider defined ConfigValidator", + map[string]interface{}{ + logging.KeyDescription: configValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(vdscResp.Diagnostics...) + } + } + + if dataSource, ok := req.DataSource.(datasource.DataSourceWithValidateConfig); ok { + logging.FrameworkTrace(ctx, "DataSource implements DataSourceWithValidateConfig") + + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + vdscResp := &datasource.ValidateConfigResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined DataSource ValidateConfig") + dataSource.ValidateConfig(ctx, vdscReq, vdscResp) + logging.FrameworkDebug(ctx, "Called provider defined DataSource ValidateConfig") + + resp.Diagnostics.Append(vdscResp.Diagnostics...) + } + + validateSchemaReq := ValidateSchemaRequest{ + Config: *req.Config, + } + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateSchemaResp := ValidateSchemaResponse{} + + SchemaValidate(ctx, req.Config.Schema, validateSchemaReq, &validateSchemaResp) + + resp.Diagnostics.Append(validateSchemaResp.Diagnostics...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validateproviderconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validateproviderconfig.go new file mode 100644 index 0000000000..abc760252f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validateproviderconfig.go @@ -0,0 +1,97 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ValidateProviderConfigRequest is the framework server request for the +// ValidateProviderConfig RPC. +type ValidateProviderConfigRequest struct { + Config *tfsdk.Config +} + +// ValidateProviderConfigResponse is the framework server response for the +// ValidateProviderConfig RPC. +type ValidateProviderConfigResponse struct { + PreparedConfig *tfsdk.Config + Diagnostics diag.Diagnostics +} + +// ValidateProviderConfig implements the framework server ValidateProviderConfig RPC. +func (s *Server) ValidateProviderConfig(ctx context.Context, req *ValidateProviderConfigRequest, resp *ValidateProviderConfigResponse) { + if req == nil || req.Config == nil { + return + } + + vpcReq := provider.ValidateConfigRequest{ + Config: *req.Config, + } + + if providerWithConfigValidators, ok := s.Provider.(provider.ProviderWithConfigValidators); ok { + logging.FrameworkTrace(ctx, "Provider implements ProviderWithConfigValidators") + + for _, configValidator := range providerWithConfigValidators.ConfigValidators(ctx) { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + vpcRes := &provider.ValidateConfigResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined ConfigValidator", + map[string]interface{}{ + logging.KeyDescription: configValidator.Description(ctx), + }, + ) + configValidator.ValidateProvider(ctx, vpcReq, vpcRes) + logging.FrameworkDebug( + ctx, + "Called provider defined ConfigValidator", + map[string]interface{}{ + logging.KeyDescription: configValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(vpcRes.Diagnostics...) + } + } + + if providerWithValidateConfig, ok := s.Provider.(provider.ProviderWithValidateConfig); ok { + logging.FrameworkTrace(ctx, "Provider implements ProviderWithValidateConfig") + + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + vpcRes := &provider.ValidateConfigResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Provider ValidateConfig") + providerWithValidateConfig.ValidateConfig(ctx, vpcReq, vpcRes) + logging.FrameworkDebug(ctx, "Called provider defined Provider ValidateConfig") + + resp.Diagnostics.Append(vpcRes.Diagnostics...) + } + + validateSchemaReq := ValidateSchemaRequest{ + Config: *req.Config, + } + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateSchemaResp := ValidateSchemaResponse{} + + SchemaValidate(ctx, req.Config.Schema, validateSchemaReq, &validateSchemaResp) + + resp.Diagnostics.Append(validateSchemaResp.Diagnostics...) + + // This RPC allows a modified configuration to be returned. This was + // previously used to allow a "required" provider attribute (as defined + // by a schema) to still be "optional" with a default value, typically + // through an environment variable. Other tooling based on the provider + // schema information could not determine this implementation detail. + // To ensure accuracy going forward, this implementation is opinionated + // towards accurate provider schema definitions and optional values + // can be filled in or return errors during ConfigureProvider(). + resp.PreparedConfig = req.Config +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validateresourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validateresourceconfig.go new file mode 100644 index 0000000000..0dfb28bf1a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/fwserver/server_validateresourceconfig.go @@ -0,0 +1,106 @@ +package fwserver + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ValidateResourceConfigRequest is the framework server request for the +// ValidateResourceConfig RPC. +type ValidateResourceConfigRequest struct { + Config *tfsdk.Config + Resource resource.Resource +} + +// ValidateResourceConfigResponse is the framework server response for the +// ValidateResourceConfig RPC. +type ValidateResourceConfigResponse struct { + Diagnostics diag.Diagnostics +} + +// ValidateResourceConfig implements the framework server ValidateResourceConfig RPC. +func (s *Server) ValidateResourceConfig(ctx context.Context, req *ValidateResourceConfigRequest, resp *ValidateResourceConfigResponse) { + if req == nil || req.Config == nil { + return + } + + if _, ok := req.Resource.(resource.ResourceWithConfigure); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigure") + + configureReq := resource.ConfigureRequest{ + ProviderData: s.ResourceConfigureData, + } + configureResp := resource.ConfigureResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource Configure") + req.Resource.(resource.ResourceWithConfigure).Configure(ctx, configureReq, &configureResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource Configure") + + resp.Diagnostics.Append(configureResp.Diagnostics...) + + if resp.Diagnostics.HasError() { + return + } + } + + vdscReq := resource.ValidateConfigRequest{ + Config: *req.Config, + } + + if resourceWithConfigValidators, ok := req.Resource.(resource.ResourceWithConfigValidators); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithConfigValidators") + + for _, configValidator := range resourceWithConfigValidators.ConfigValidators(ctx) { + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + vdscResp := &resource.ValidateConfigResponse{} + + logging.FrameworkDebug( + ctx, + "Calling provider defined ResourceConfigValidator", + map[string]interface{}{ + logging.KeyDescription: configValidator.Description(ctx), + }, + ) + configValidator.ValidateResource(ctx, vdscReq, vdscResp) + logging.FrameworkDebug( + ctx, + "Called provider defined ResourceConfigValidator", + map[string]interface{}{ + logging.KeyDescription: configValidator.Description(ctx), + }, + ) + + resp.Diagnostics.Append(vdscResp.Diagnostics...) + } + } + + if resourceWithValidateConfig, ok := req.Resource.(resource.ResourceWithValidateConfig); ok { + logging.FrameworkTrace(ctx, "Resource implements ResourceWithValidateConfig") + + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + vdscResp := &resource.ValidateConfigResponse{} + + logging.FrameworkDebug(ctx, "Calling provider defined Resource ValidateConfig") + resourceWithValidateConfig.ValidateConfig(ctx, vdscReq, vdscResp) + logging.FrameworkDebug(ctx, "Called provider defined Resource ValidateConfig") + + resp.Diagnostics.Append(vdscResp.Diagnostics...) + } + + validateSchemaReq := ValidateSchemaRequest{ + Config: *req.Config, + } + // Instantiate a new response for each request to prevent validators + // from modifying or removing diagnostics. + validateSchemaResp := ValidateSchemaResponse{} + + SchemaValidate(ctx, req.Config.Schema, validateSchemaReq, &validateSchemaResp) + + resp.Diagnostics.Append(validateSchemaResp.Diagnostics...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/context.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/context.go new file mode 100644 index 0000000000..8f286cd675 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/context.go @@ -0,0 +1,22 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +// InitContext creates SDK logger contexts. The incoming context will +// already have the root SDK logger and root provider logger setup from +// terraform-plugin-go tf6server RPC handlers. +func InitContext(ctx context.Context) context.Context { + ctx = tfsdklog.NewSubsystem(ctx, SubsystemFramework, + // All calls are through the Framework* helper functions + tfsdklog.WithAdditionalLocationOffset(1), + tfsdklog.WithLevelFromEnv(EnvTfLogSdkFramework), + // Propagate tf_req_id, tf_rpc, etc. fields + tfsdklog.WithRootFields(), + ) + + return ctx +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/doc.go new file mode 100644 index 0000000000..88596bfc4f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/doc.go @@ -0,0 +1,3 @@ +// Package logging contains framework internal helpers for consistent logger +// and log entry handling. +package logging diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/environment_variables.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/environment_variables.go new file mode 100644 index 0000000000..295e888ab0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/environment_variables.go @@ -0,0 +1,9 @@ +package logging + +// Environment variables. +const ( + // EnvTfLogSdkFramework is an environment variable that sets the logging + // level of SDK framework loggers. Infers root SDK logging level, if + // unset. + EnvTfLogSdkFramework = "TF_LOG_SDK_FRAMEWORK" +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/framework.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/framework.go new file mode 100644 index 0000000000..eb3e8da649 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/framework.go @@ -0,0 +1,40 @@ +package logging + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tfsdklog" +) + +const ( + // SubsystemFramework is the tfsdklog subsystem name for framework. + SubsystemFramework = "framework" +) + +// FrameworkDebug emits a framework subsystem log at DEBUG level. +func FrameworkDebug(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemDebug(ctx, SubsystemFramework, msg, additionalFields...) +} + +// FrameworkError emits a framework subsystem log at ERROR level. +func FrameworkError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemError(ctx, SubsystemFramework, msg, additionalFields...) +} + +// FrameworkTrace emits a framework subsystem log at TRACE level. +func FrameworkTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemTrace(ctx, SubsystemFramework, msg, additionalFields...) +} + +// FrameworkWarn emits a framework subsystem log at WARN level. +func FrameworkWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) { + tfsdklog.SubsystemWarn(ctx, SubsystemFramework, msg, additionalFields...) +} + +// FrameworkWithAttributePath returns a new Context with KeyAttributePath set. +// The attribute path is expected to be string, so the logging package does not +// need to import path handling code. +func FrameworkWithAttributePath(ctx context.Context, attributePath string) context.Context { + ctx = tfsdklog.SubsystemSetField(ctx, SubsystemFramework, KeyAttributePath, attributePath) + return ctx +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/keys.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/keys.go new file mode 100644 index 0000000000..5c65214ded --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/logging/keys.go @@ -0,0 +1,27 @@ +package logging + +// Structured logging keys. +// +// Practitioners or tooling reading logs may be depending on these keys, so be +// conscious of that when changing them. +// +// Refer to the terraform-plugin-go logging keys as well, which should be +// equivalent to these when possible. +const ( + // Attribute path representation, which is typically in flatmap form such + // as parent.0.child in this project. + KeyAttributePath = "tf_attribute_path" + + // The type of data source being operated on, such as "archive_file" + KeyDataSourceType = "tf_data_source_type" + + // Human readable string when calling a provider defined type that must + // implement the Description() method, such as validators. + KeyDescription = "description" + + // Underlying Go error string when logging an error. + KeyError = "error" + + // The type of resource being operated on, such as "random_pet" + KeyResourceType = "tf_resource_type" +) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/privatestate/data.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/privatestate/data.go new file mode 100644 index 0000000000..6fb38a6082 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/privatestate/data.go @@ -0,0 +1,403 @@ +package privatestate + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "unicode/utf8" + + "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" +) + +// Data contains private state data for the framework and providers. +type Data struct { + // Potential future usage: + // Framework contains private state data for framework usage. + Framework map[string][]byte + + // Provider contains private state data for provider usage. + Provider *ProviderData +} + +// Bytes returns a JSON encoded slice of bytes containing the merged +// framework and provider private state data. +func (d *Data) Bytes(ctx context.Context) ([]byte, diag.Diagnostics) { + var diags diag.Diagnostics + + if d == nil { + return nil, nil + } + + if (d.Provider == nil || len(d.Provider.data) == 0) && len(d.Framework) == 0 { + return nil, nil + } + + var providerData map[string][]byte + + if d.Provider != nil { + providerData = d.Provider.data + } + + mergedMap := make(map[string][]byte, len(d.Framework)+len(providerData)) + + for _, m := range []map[string][]byte{d.Framework, providerData} { + for k, v := range m { + if len(v) == 0 { + continue + } + + // Values in FrameworkData and ProviderData should never be invalid UTF-8, but let's make sure. + if !utf8.Valid(v) { + diags.AddError( + "Error Encoding Private State", + "An error was encountered when validating private state value."+ + fmt.Sprintf("The value associated with key %q is is not valid UTF-8.\n\n", k)+ + "This is always a problem with Terraform or terraform-plugin-framework. Please report this to the provider developer.", + ) + + tflog.Error(ctx, "error encoding private state: invalid UTF-8 value", map[string]interface{}{"key": k, "value": v}) + + continue + } + + // Values in FrameworkData and ProviderData should never be invalid JSON, but let's make sure. + if !json.Valid(v) { + diags.AddError( + "Error Encoding Private State", + fmt.Sprintf("An error was encountered when validating private state value."+ + fmt.Sprintf("The value associated with key %q is is not valid JSON.\n\n", k)+ + "This is always a problem with Terraform or terraform-plugin-framework. Please report this to the provider developer."), + ) + + tflog.Error(ctx, "error encoding private state: invalid JSON value", map[string]interface{}{"key": k, "value": v}) + + continue + } + + mergedMap[k] = v + } + } + + if diags.HasError() { + return nil, diags + } + + bytes, err := json.Marshal(mergedMap) + if err != nil { + diags.AddError( + "Error Encoding Private State", + fmt.Sprintf("An error was encountered when encoding private state: %s.\n\n"+ + "This is always a problem with Terraform or terraform-plugin-framework. Please report this to the provider developer.", err), + ) + + return nil, diags + } + + return bytes, diags +} + +// NewData creates a new Data based on the given slice of bytes. +// It must be a JSON encoded slice of bytes, that is map[string][]byte. +func NewData(ctx context.Context, data []byte) (*Data, diag.Diagnostics) { + var ( + dataMap map[string][]byte + diags diag.Diagnostics + ) + + if len(data) == 0 { + return nil, nil + } + + err := json.Unmarshal(data, &dataMap) + if err != nil { + // terraform-plugin-sdk stored private state by marshalling its data + // as map[string]any, which is slightly incompatible with trying to + // unmarshal it as map[string][]byte. If unmarshalling with + // map[string]any works, we can ignore it for now, as provider + // developers did not have access to managing the private state data. + // + // TODO: We can extract the terraform-plugin-sdk resource timeouts key + // here to extract its prior data, if necessary. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/400 + if anyErr := json.Unmarshal(data, new(map[string]any)); anyErr == nil { + logging.FrameworkWarn(ctx, "Discarding incompatible resource private state data", map[string]any{logging.KeyError: err.Error()}) + return nil, nil + } + + diags.AddError( + "Error Decoding Private State", + fmt.Sprintf("An error was encountered when decoding private state: %s.\n\n"+ + "This is always a problem with Terraform or terraform-plugin-framework. Please report this to the provider developer.", err), + ) + + return nil, diags + } + + output := Data{ + Framework: make(map[string][]byte), + Provider: &ProviderData{ + make(map[string][]byte), + }, + } + + for k, v := range dataMap { + if !utf8.Valid(v) { + diags.AddError( + "Error Decoding Private State", + "An error was encountered when validating private state value.\n"+ + fmt.Sprintf("The value being supplied for key %q is is not valid UTF-8.\n\n", k)+ + "This is always a problem with Terraform or terraform-plugin-framework. Please report this to the provider developer.", + ) + + tflog.Error(ctx, "error decoding private state: invalid UTF-8 value", map[string]interface{}{"key": k, "value": v}) + + continue + } + + if !json.Valid(v) { + diags.AddError( + "Error Decoding Private State", + "An error was encountered when validating private state value.\n"+ + fmt.Sprintf("The value being supplied for key %q is is not valid JSON.\n\n", k)+ + "This is always a problem with Terraform or terraform-plugin-framework. Please report this to the provider developer.", + ) + + tflog.Error(ctx, "error decoding private state: invalid JSON value", map[string]interface{}{"key": k, "value": v}) + + continue + } + + if isInvalidProviderDataKey(ctx, k) { + output.Framework[k] = v + continue + } + + output.Provider.data[k] = v + } + + if diags.HasError() { + return nil, diags + } + + return &output, diags +} + +// EmptyData creates an initialised but empty Data. +func EmptyData(ctx context.Context) *Data { + return &Data{ + Provider: EmptyProviderData(ctx), + } +} + +// NewProviderData creates a new ProviderData based on the given slice of bytes. +// It must be a JSON encoded slice of bytes, that is map[string][]byte. +func NewProviderData(ctx context.Context, data []byte) (*ProviderData, diag.Diagnostics) { + providerData := EmptyProviderData(ctx) + + if len(data) == 0 { + return providerData, nil + } + + var ( + dataMap map[string][]byte + diags diag.Diagnostics + ) + + err := json.Unmarshal(data, &dataMap) + if err != nil { + diags.AddError( + "Error Decoding Provider Data", + fmt.Sprintf("An error was encountered when decoding provider data: %s.\n\n"+ + "Please check that the data you are supplying is a byte representation of valid JSON.", err), + ) + + return nil, diags + } + + for k, v := range dataMap { + diags.Append(providerData.SetKey(ctx, k, v)...) + } + + if diags.HasError() { + return nil, diags + } + + return providerData, diags +} + +// EmptyProviderData creates a ProviderData containing initialised but empty data. +func EmptyProviderData(ctx context.Context) *ProviderData { + return &ProviderData{ + data: make(map[string][]byte), + } +} + +// ProviderData contains private state data for provider usage. +type ProviderData struct { + data map[string][]byte +} + +// Equal returns true if the given ProviderData is exactly equivalent. The +// internal data is compared byte-for-byte, not accounting for semantic +// equivalency such as JSON whitespace or property reordering. +func (d *ProviderData) Equal(o *ProviderData) bool { + if d == nil && o == nil { + return true + } + + if d == nil || o == nil { + return false + } + + if !reflect.DeepEqual(d.data, o.data) { + return false + } + + return true +} + +// GetKey returns the private state data associated with the given key. +// +// If the key is reserved for framework usage, an error diagnostic +// is returned. If the key is valid, but private state data is not found, +// nil is returned. +// +// The naming of keys only matters in context of a single resource, +// however care should be taken that any historical keys are not reused +// without accounting for older resource instances that may still have +// older data at the key. +func (d *ProviderData) GetKey(ctx context.Context, key string) ([]byte, diag.Diagnostics) { + if d == nil || d.data == nil { + return nil, nil + } + + diags := ValidateProviderDataKey(ctx, key) + + if diags.HasError() { + return nil, diags + } + + value, ok := d.data[key] + if !ok { + return nil, nil + } + + return value, nil +} + +// SetKey sets the private state data at the given key. +// +// If the key is reserved for framework usage, an error diagnostic +// is returned. The data must be valid JSON and UTF-8 safe or an error +// diagnostic is returned. +// +// The naming of keys only matters in context of a single resource, +// however care should be taken that any historical keys are not reused +// without accounting for older resource instances that may still have +// older data at the key. +func (d *ProviderData) SetKey(ctx context.Context, key string, value []byte) diag.Diagnostics { + var diags diag.Diagnostics + + if d == nil { + tflog.Error(ctx, "error calling SetKey on uninitialized ProviderData") + + diags.AddError("Uninitialized ProviderData", + "ProviderData must be initialized before it is used.\n\n"+ + "Call privatestate.NewProviderData to obtain an initialized instance of ProviderData.", + ) + + return diags + } + + if d.data == nil { + d.data = make(map[string][]byte) + } + + diags.Append(ValidateProviderDataKey(ctx, key)...) + + if diags.HasError() { + return diags + } + + if !utf8.Valid(value) { + tflog.Error(ctx, "invalid UTF-8 value", map[string]interface{}{"key": key, "value": value}) + + diags.AddError("UTF-8 Invalid", + "Values stored in private state must be valid UTF-8.\n\n"+ + fmt.Sprintf("The value being supplied for key %q is invalid. Please verify that the value is valid UTF-8.", key), + ) + + return diags + } + + if !json.Valid(value) { + tflog.Error(ctx, "invalid JSON value", map[string]interface{}{"key": key, "value": value}) + + diags.AddError("JSON Invalid", + "Values stored in private state must be valid JSON.\n\n"+ + fmt.Sprintf("The value being supplied for key %q is invalid. Please verify that the value is valid JSON.", key), + ) + + return diags + } + + d.data[key] = value + + return nil +} + +// ValidateProviderDataKey determines whether the key supplied is allowed on the basis of any +// restrictions that are in place, such as key prefixes that are reserved for use with +// framework private state data. +func ValidateProviderDataKey(ctx context.Context, key string) diag.Diagnostics { + if isInvalidProviderDataKey(ctx, key) { + return diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Restricted Resource Private State Namespace", + "Using a period ('.') as a prefix for a key used in private state is not allowed.\n\n"+ + fmt.Sprintf("The key %q is invalid. Please check the key you are supplying does not use a a period ('.') as a prefix.", key), + ), + } + } + + return nil +} + +// isInvalidProviderDataKey determines whether the supplied key has a prefix that is reserved for +// keys in Data.Framework +func isInvalidProviderDataKey(_ context.Context, key string) bool { + return strings.HasPrefix(key, ".") +} + +// MustMarshalToJson is for use in tests and panics if input cannot be marshalled to JSON. +func MustMarshalToJson(input map[string][]byte) []byte { + output, err := json.Marshal(input) + if err != nil { + panic(err) + } + + return output +} + +// MustProviderData is for use in tests and panics if the underlying call to NewProviderData +// returns diag.Diagnostics that contains any errors. +func MustProviderData(ctx context.Context, data []byte) *ProviderData { + providerData, diags := NewProviderData(ctx, data) + + if diags.HasError() { + var diagMsgs []string + + for _, v := range diags { + diagMsgs = append(diagMsgs, fmt.Sprintf("%s: %s", v.Summary(), v.Detail())) + } + + panic(fmt.Sprintf("error creating new provider data: %s", strings.Join(diagMsgs, ", "))) + } + + return providerData +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/privatestate/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/privatestate/doc.go new file mode 100644 index 0000000000..db2c56cc9a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/privatestate/doc.go @@ -0,0 +1,3 @@ +// Package privatestate contains the type used for handling private resource +// state data. +package privatestate diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/doc.go new file mode 100644 index 0000000000..ac6a3f0d60 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/doc.go @@ -0,0 +1,3 @@ +// Package proto5server contains the provider server implementation compatible +// with protocol version 5 (tfprotov5.ProviderServer). +package proto5server diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/serve.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/serve.go new file mode 100644 index 0000000000..5d4e586a61 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/serve.go @@ -0,0 +1,43 @@ +package proto5server + +import ( + "context" + "sync" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +var _ tfprotov5.ProviderServer = &Server{} + +// Provider server implementation. +type Server struct { + FrameworkServer fwserver.Server + + contextCancels []context.CancelFunc + contextCancelsMu sync.Mutex +} + +func (s *Server) registerContext(in context.Context) context.Context { + ctx, cancel := context.WithCancel(in) + s.contextCancelsMu.Lock() + defer s.contextCancelsMu.Unlock() + s.contextCancels = append(s.contextCancels, cancel) + return ctx +} + +func (s *Server) cancelRegisteredContexts(_ context.Context) { + s.contextCancelsMu.Lock() + defer s.contextCancelsMu.Unlock() + for _, cancel := range s.contextCancels { + cancel() + } + s.contextCancels = nil +} + +// StopProvider satisfies the tfprotov5.ProviderServer interface. +func (s *Server) StopProvider(ctx context.Context, _ *tfprotov5.StopProviderRequest) (*tfprotov5.StopProviderResponse, error) { + s.cancelRegisteredContexts(ctx) + + return &tfprotov5.StopProviderResponse{}, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_applyresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_applyresourcechange.go new file mode 100644 index 0000000000..7dfe24d845 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_applyresourcechange.go @@ -0,0 +1,55 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ApplyResourceChange satisfies the tfprotov5.ProviderServer interface. +func (s *Server) ApplyResourceChange(ctx context.Context, proto5Req *tfprotov5.ApplyResourceChangeRequest) (*tfprotov5.ApplyResourceChangeResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ApplyResourceChangeResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ApplyResourceChangeResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ApplyResourceChangeResponse(ctx, fwResp), nil + } + + providerMetaSchema, diags := s.FrameworkServer.ProviderMetaSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ApplyResourceChangeResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.ApplyResourceChangeRequest(ctx, proto5Req, resource, resourceSchema, providerMetaSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ApplyResourceChangeResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ApplyResourceChange(ctx, fwReq, fwResp) + + return toproto5.ApplyResourceChangeResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_configureprovider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_configureprovider.go new file mode 100644 index 0000000000..115cb68def --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_configureprovider.go @@ -0,0 +1,39 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ConfigureProvider satisfies the tfprotov5.ProviderServer interface. +func (s *Server) ConfigureProvider(ctx context.Context, proto5Req *tfprotov5.ConfigureProviderRequest) (*tfprotov5.ConfigureProviderResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &provider.ConfigureResponse{} + + providerSchema, diags := s.FrameworkServer.ProviderSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ConfigureProviderResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.ConfigureProviderRequest(ctx, proto5Req, providerSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ConfigureProviderResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ConfigureProvider(ctx, fwReq, fwResp) + + return toproto5.ConfigureProviderResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_getproviderschema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_getproviderschema.go new file mode 100644 index 0000000000..285b771bf7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_getproviderschema.go @@ -0,0 +1,24 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// GetProviderSchema satisfies the tfprotov5.ProviderServer interface. +func (s *Server) GetProviderSchema(ctx context.Context, proto5Req *tfprotov5.GetProviderSchemaRequest) (*tfprotov5.GetProviderSchemaResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwReq := fromproto5.GetProviderSchemaRequest(ctx, proto5Req) + fwResp := &fwserver.GetProviderSchemaResponse{} + + s.FrameworkServer.GetProviderSchema(ctx, fwReq, fwResp) + + return toproto5.GetProviderSchemaResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_importresourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_importresourcestate.go new file mode 100644 index 0000000000..76cf7de43e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_importresourcestate.go @@ -0,0 +1,47 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ImportResourceState satisfies the tfprotov5.ProviderServer interface. +func (s *Server) ImportResourceState(ctx context.Context, proto5Req *tfprotov5.ImportResourceStateRequest) (*tfprotov5.ImportResourceStateResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ImportResourceStateResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ImportResourceStateResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ImportResourceStateResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.ImportResourceStateRequest(ctx, proto5Req, resource, resourceSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ImportResourceStateResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ImportResourceState(ctx, fwReq, fwResp) + + return toproto5.ImportResourceStateResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_planresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_planresourcechange.go new file mode 100644 index 0000000000..60e71c4455 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_planresourcechange.go @@ -0,0 +1,55 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// PlanResourceChange satisfies the tfprotov5.ProviderServer interface. +func (s *Server) PlanResourceChange(ctx context.Context, proto5Req *tfprotov5.PlanResourceChangeRequest) (*tfprotov5.PlanResourceChangeResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.PlanResourceChangeResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.PlanResourceChangeResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.PlanResourceChangeResponse(ctx, fwResp), nil + } + + providerMetaSchema, diags := s.FrameworkServer.ProviderMetaSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.PlanResourceChangeResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.PlanResourceChangeRequest(ctx, proto5Req, resource, resourceSchema, providerMetaSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.PlanResourceChangeResponse(ctx, fwResp), nil + } + + s.FrameworkServer.PlanResourceChange(ctx, fwReq, fwResp) + + return toproto5.PlanResourceChangeResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_prepareproviderconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_prepareproviderconfig.go new file mode 100644 index 0000000000..a04bcda073 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_prepareproviderconfig.go @@ -0,0 +1,39 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// PrepareProviderConfig satisfies the tfprotov5.ProviderServer interface. +func (s *Server) PrepareProviderConfig(ctx context.Context, proto5Req *tfprotov5.PrepareProviderConfigRequest) (*tfprotov5.PrepareProviderConfigResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ValidateProviderConfigResponse{} + + providerSchema, diags := s.FrameworkServer.ProviderSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.PrepareProviderConfigResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.PrepareProviderConfigRequest(ctx, proto5Req, providerSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.PrepareProviderConfigResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ValidateProviderConfig(ctx, fwReq, fwResp) + + return toproto5.PrepareProviderConfigResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_readdatasource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_readdatasource.go new file mode 100644 index 0000000000..22d1bff335 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_readdatasource.go @@ -0,0 +1,55 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ReadDataSource satisfies the tfprotov5.ProviderServer interface. +func (s *Server) ReadDataSource(ctx context.Context, proto5Req *tfprotov5.ReadDataSourceRequest) (*tfprotov5.ReadDataSourceResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ReadDataSourceResponse{} + + dataSource, diags := s.FrameworkServer.DataSource(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ReadDataSourceResponse(ctx, fwResp), nil + } + + dataSourceSchema, diags := s.FrameworkServer.DataSourceSchema(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ReadDataSourceResponse(ctx, fwResp), nil + } + + providerMetaSchema, diags := s.FrameworkServer.ProviderMetaSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ReadDataSourceResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.ReadDataSourceRequest(ctx, proto5Req, dataSource, dataSourceSchema, providerMetaSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ReadDataSourceResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ReadDataSource(ctx, fwReq, fwResp) + + return toproto5.ReadDataSourceResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_readresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_readresource.go new file mode 100644 index 0000000000..73c1b0f1f3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_readresource.go @@ -0,0 +1,56 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" +) + +// ReadResource satisfies the tfprotov5.ProviderServer interface. +func (s *Server) ReadResource(ctx context.Context, proto5Req *tfprotov5.ReadResourceRequest) (*tfprotov5.ReadResourceResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ReadResourceResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ReadResourceResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ReadResourceResponse(ctx, fwResp), nil + } + + providerMetaSchema, diags := s.FrameworkServer.ProviderMetaSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ReadResourceResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.ReadResourceRequest(ctx, proto5Req, resource, resourceSchema, providerMetaSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ReadResourceResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ReadResource(ctx, fwReq, fwResp) + + return toproto5.ReadResourceResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_upgraderesourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_upgraderesourcestate.go new file mode 100644 index 0000000000..f998e74cf0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_upgraderesourcestate.go @@ -0,0 +1,51 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// UpgradeResourceState satisfies the tfprotov5.ProviderServer interface. +func (s *Server) UpgradeResourceState(ctx context.Context, proto5Req *tfprotov5.UpgradeResourceStateRequest) (*tfprotov5.UpgradeResourceStateResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.UpgradeResourceStateResponse{} + + if proto5Req == nil { + return toproto5.UpgradeResourceStateResponse(ctx, fwResp), nil + } + + resource, diags := s.FrameworkServer.Resource(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.UpgradeResourceStateResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.UpgradeResourceStateResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.UpgradeResourceStateRequest(ctx, proto5Req, resource, resourceSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.UpgradeResourceStateResponse(ctx, fwResp), nil + } + + s.FrameworkServer.UpgradeResourceState(ctx, fwReq, fwResp) + + return toproto5.UpgradeResourceStateResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_validatedatasourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_validatedatasourceconfig.go new file mode 100644 index 0000000000..179503a109 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_validatedatasourceconfig.go @@ -0,0 +1,47 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ValidateDataSourceConfig satisfies the tfprotov5.ProviderServer interface. +func (s *Server) ValidateDataSourceConfig(ctx context.Context, proto5Req *tfprotov5.ValidateDataSourceConfigRequest) (*tfprotov5.ValidateDataSourceConfigResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ValidateDataSourceConfigResponse{} + + dataSource, diags := s.FrameworkServer.DataSource(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ValidateDataSourceConfigResponse(ctx, fwResp), nil + } + + dataSourceSchema, diags := s.FrameworkServer.DataSourceSchema(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ValidateDataSourceConfigResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.ValidateDataSourceConfigRequest(ctx, proto5Req, dataSource, dataSourceSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ValidateDataSourceConfigResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ValidateDataSourceConfig(ctx, fwReq, fwResp) + + return toproto5.ValidateDataSourceConfigResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_validateresourcetypeconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_validateresourcetypeconfig.go new file mode 100644 index 0000000000..9572d22f0b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto5server/server_validateresourcetypeconfig.go @@ -0,0 +1,47 @@ +package proto5server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto5" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ValidateResourceTypeConfig satisfies the tfprotov5.ProviderServer interface. +func (s *Server) ValidateResourceTypeConfig(ctx context.Context, proto5Req *tfprotov5.ValidateResourceTypeConfigRequest) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ValidateResourceConfigResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ValidateResourceTypeConfigResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto5Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ValidateResourceTypeConfigResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto5.ValidateResourceTypeConfigRequest(ctx, proto5Req, resource, resourceSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto5.ValidateResourceTypeConfigResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ValidateResourceConfig(ctx, fwReq, fwResp) + + return toproto5.ValidateResourceTypeConfigResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/doc.go new file mode 100644 index 0000000000..94aa9305d3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/doc.go @@ -0,0 +1,3 @@ +// Package proto6server contains the provider server implementation compatible +// with protocol version 6 (tfprotov6.ProviderServer). +package proto6server diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/serve.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/serve.go new file mode 100644 index 0000000000..2c106c3d0c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/serve.go @@ -0,0 +1,43 @@ +package proto6server + +import ( + "context" + "sync" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +var _ tfprotov6.ProviderServer = &Server{} + +// Provider server implementation. +type Server struct { + FrameworkServer fwserver.Server + + contextCancels []context.CancelFunc + contextCancelsMu sync.Mutex +} + +func (s *Server) registerContext(in context.Context) context.Context { + ctx, cancel := context.WithCancel(in) + s.contextCancelsMu.Lock() + defer s.contextCancelsMu.Unlock() + s.contextCancels = append(s.contextCancels, cancel) + return ctx +} + +func (s *Server) cancelRegisteredContexts(_ context.Context) { + s.contextCancelsMu.Lock() + defer s.contextCancelsMu.Unlock() + for _, cancel := range s.contextCancels { + cancel() + } + s.contextCancels = nil +} + +// StopProvider satisfies the tfprotov6.ProviderServer interface. +func (s *Server) StopProvider(ctx context.Context, _ *tfprotov6.StopProviderRequest) (*tfprotov6.StopProviderResponse, error) { + s.cancelRegisteredContexts(ctx) + + return &tfprotov6.StopProviderResponse{}, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_applyresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_applyresourcechange.go new file mode 100644 index 0000000000..101603c1b2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_applyresourcechange.go @@ -0,0 +1,55 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ApplyResourceChange satisfies the tfprotov6.ProviderServer interface. +func (s *Server) ApplyResourceChange(ctx context.Context, proto6Req *tfprotov6.ApplyResourceChangeRequest) (*tfprotov6.ApplyResourceChangeResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ApplyResourceChangeResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ApplyResourceChangeResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ApplyResourceChangeResponse(ctx, fwResp), nil + } + + providerMetaSchema, diags := s.FrameworkServer.ProviderMetaSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ApplyResourceChangeResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.ApplyResourceChangeRequest(ctx, proto6Req, resource, resourceSchema, providerMetaSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ApplyResourceChangeResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ApplyResourceChange(ctx, fwReq, fwResp) + + return toproto6.ApplyResourceChangeResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_configureprovider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_configureprovider.go new file mode 100644 index 0000000000..448d2d6e69 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_configureprovider.go @@ -0,0 +1,39 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ConfigureProvider satisfies the tfprotov6.ProviderServer interface. +func (s *Server) ConfigureProvider(ctx context.Context, proto6Req *tfprotov6.ConfigureProviderRequest) (*tfprotov6.ConfigureProviderResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &provider.ConfigureResponse{} + + providerSchema, diags := s.FrameworkServer.ProviderSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ConfigureProviderResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.ConfigureProviderRequest(ctx, proto6Req, providerSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ConfigureProviderResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ConfigureProvider(ctx, fwReq, fwResp) + + return toproto6.ConfigureProviderResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_getproviderschema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_getproviderschema.go new file mode 100644 index 0000000000..7229351baf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_getproviderschema.go @@ -0,0 +1,24 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// GetProviderSchema satisfies the tfprotov6.ProviderServer interface. +func (s *Server) GetProviderSchema(ctx context.Context, proto6Req *tfprotov6.GetProviderSchemaRequest) (*tfprotov6.GetProviderSchemaResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwReq := fromproto6.GetProviderSchemaRequest(ctx, proto6Req) + fwResp := &fwserver.GetProviderSchemaResponse{} + + s.FrameworkServer.GetProviderSchema(ctx, fwReq, fwResp) + + return toproto6.GetProviderSchemaResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_importresourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_importresourcestate.go new file mode 100644 index 0000000000..8743508f2a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_importresourcestate.go @@ -0,0 +1,47 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ImportResourceState satisfies the tfprotov6.ProviderServer interface. +func (s *Server) ImportResourceState(ctx context.Context, proto6Req *tfprotov6.ImportResourceStateRequest) (*tfprotov6.ImportResourceStateResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ImportResourceStateResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ImportResourceStateResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ImportResourceStateResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.ImportResourceStateRequest(ctx, proto6Req, resource, resourceSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ImportResourceStateResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ImportResourceState(ctx, fwReq, fwResp) + + return toproto6.ImportResourceStateResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_planresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_planresourcechange.go new file mode 100644 index 0000000000..73a8123871 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_planresourcechange.go @@ -0,0 +1,55 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// PlanResourceChange satisfies the tfprotov6.ProviderServer interface. +func (s *Server) PlanResourceChange(ctx context.Context, proto6Req *tfprotov6.PlanResourceChangeRequest) (*tfprotov6.PlanResourceChangeResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.PlanResourceChangeResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.PlanResourceChangeResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.PlanResourceChangeResponse(ctx, fwResp), nil + } + + providerMetaSchema, diags := s.FrameworkServer.ProviderMetaSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.PlanResourceChangeResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.PlanResourceChangeRequest(ctx, proto6Req, resource, resourceSchema, providerMetaSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.PlanResourceChangeResponse(ctx, fwResp), nil + } + + s.FrameworkServer.PlanResourceChange(ctx, fwReq, fwResp) + + return toproto6.PlanResourceChangeResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_readdatasource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_readdatasource.go new file mode 100644 index 0000000000..7af936df1b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_readdatasource.go @@ -0,0 +1,55 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ReadDataSource satisfies the tfprotov6.ProviderServer interface. +func (s *Server) ReadDataSource(ctx context.Context, proto6Req *tfprotov6.ReadDataSourceRequest) (*tfprotov6.ReadDataSourceResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ReadDataSourceResponse{} + + dataSource, diags := s.FrameworkServer.DataSource(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ReadDataSourceResponse(ctx, fwResp), nil + } + + dataSourceSchema, diags := s.FrameworkServer.DataSourceSchema(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ReadDataSourceResponse(ctx, fwResp), nil + } + + providerMetaSchema, diags := s.FrameworkServer.ProviderMetaSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ReadDataSourceResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.ReadDataSourceRequest(ctx, proto6Req, dataSource, dataSourceSchema, providerMetaSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ReadDataSourceResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ReadDataSource(ctx, fwReq, fwResp) + + return toproto6.ReadDataSourceResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_readresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_readresource.go new file mode 100644 index 0000000000..441eec9849 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_readresource.go @@ -0,0 +1,55 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ReadResource satisfies the tfprotov6.ProviderServer interface. +func (s *Server) ReadResource(ctx context.Context, proto6Req *tfprotov6.ReadResourceRequest) (*tfprotov6.ReadResourceResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ReadResourceResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ReadResourceResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ReadResourceResponse(ctx, fwResp), nil + } + + providerMetaSchema, diags := s.FrameworkServer.ProviderMetaSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ReadResourceResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.ReadResourceRequest(ctx, proto6Req, resource, resourceSchema, providerMetaSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ReadResourceResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ReadResource(ctx, fwReq, fwResp) + + return toproto6.ReadResourceResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_upgraderesourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_upgraderesourcestate.go new file mode 100644 index 0000000000..1e6049c027 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_upgraderesourcestate.go @@ -0,0 +1,51 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// UpgradeResourceState satisfies the tfprotov6.ProviderServer interface. +func (s *Server) UpgradeResourceState(ctx context.Context, proto6Req *tfprotov6.UpgradeResourceStateRequest) (*tfprotov6.UpgradeResourceStateResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.UpgradeResourceStateResponse{} + + if proto6Req == nil { + return toproto6.UpgradeResourceStateResponse(ctx, fwResp), nil + } + + resource, diags := s.FrameworkServer.Resource(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.UpgradeResourceStateResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.UpgradeResourceStateResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.UpgradeResourceStateRequest(ctx, proto6Req, resource, resourceSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.UpgradeResourceStateResponse(ctx, fwResp), nil + } + + s.FrameworkServer.UpgradeResourceState(ctx, fwReq, fwResp) + + return toproto6.UpgradeResourceStateResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validatedataresourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validatedataresourceconfig.go new file mode 100644 index 0000000000..8a26e47867 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validatedataresourceconfig.go @@ -0,0 +1,47 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateDataResourceConfig satisfies the tfprotov6.ProviderServer interface. +func (s *Server) ValidateDataResourceConfig(ctx context.Context, proto6Req *tfprotov6.ValidateDataResourceConfigRequest) (*tfprotov6.ValidateDataResourceConfigResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ValidateDataSourceConfigResponse{} + + dataSource, diags := s.FrameworkServer.DataSource(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ValidateDataSourceConfigResponse(ctx, fwResp), nil + } + + dataSourceSchema, diags := s.FrameworkServer.DataSourceSchema(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ValidateDataSourceConfigResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.ValidateDataSourceConfigRequest(ctx, proto6Req, dataSource, dataSourceSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ValidateDataSourceConfigResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ValidateDataSourceConfig(ctx, fwReq, fwResp) + + return toproto6.ValidateDataSourceConfigResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validateproviderconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validateproviderconfig.go new file mode 100644 index 0000000000..c42b7c38bd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validateproviderconfig.go @@ -0,0 +1,39 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateProviderConfig satisfies the tfprotov6.ProviderServer interface. +func (s *Server) ValidateProviderConfig(ctx context.Context, proto6Req *tfprotov6.ValidateProviderConfigRequest) (*tfprotov6.ValidateProviderConfigResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ValidateProviderConfigResponse{} + + providerSchema, diags := s.FrameworkServer.ProviderSchema(ctx) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ValidateProviderConfigResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.ValidateProviderConfigRequest(ctx, proto6Req, providerSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ValidateProviderConfigResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ValidateProviderConfig(ctx, fwReq, fwResp) + + return toproto6.ValidateProviderConfigResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validateresourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validateresourceconfig.go new file mode 100644 index 0000000000..583f4560f4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/proto6server/server_validateresourceconfig.go @@ -0,0 +1,47 @@ +package proto6server + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fromproto6" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/logging" + "github.com/hashicorp/terraform-plugin-framework/internal/toproto6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateResourceConfig satisfies the tfprotov6.ProviderServer interface. +func (s *Server) ValidateResourceConfig(ctx context.Context, proto6Req *tfprotov6.ValidateResourceConfigRequest) (*tfprotov6.ValidateResourceConfigResponse, error) { + ctx = s.registerContext(ctx) + ctx = logging.InitContext(ctx) + + fwResp := &fwserver.ValidateResourceConfigResponse{} + + resource, diags := s.FrameworkServer.Resource(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ValidateResourceConfigResponse(ctx, fwResp), nil + } + + resourceSchema, diags := s.FrameworkServer.ResourceSchema(ctx, proto6Req.TypeName) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ValidateResourceConfigResponse(ctx, fwResp), nil + } + + fwReq, diags := fromproto6.ValidateResourceConfigRequest(ctx, proto6Req, resource, resourceSchema) + + fwResp.Diagnostics.Append(diags...) + + if fwResp.Diagnostics.HasError() { + return toproto6.ValidateResourceConfigResponse(ctx, fwResp), nil + } + + s.FrameworkServer.ValidateResourceConfig(ctx, fwReq, fwResp) + + return toproto6.ValidateResourceConfigResponse(ctx, fwResp), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/diags.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/diags.go new file mode 100644 index 0000000000..748222c631 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/diags.go @@ -0,0 +1,113 @@ +package reflect + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +func toTerraform5ValueErrorDiag(err error, path path.Path) diag.DiagnosticWithPath { + return diag.NewAttributeErrorDiagnostic( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert into a Terraform value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) +} + +func toTerraformValueErrorDiag(err error, path path.Path) diag.DiagnosticWithPath { + return diag.NewAttributeErrorDiagnostic( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert the Attribute value into a Terraform value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) +} + +func validateValueErrorDiag(err error, path path.Path) diag.DiagnosticWithPath { + return diag.NewAttributeErrorDiagnostic( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to validate the Terraform value type. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) +} + +func valueFromTerraformErrorDiag(err error, path path.Path) diag.DiagnosticWithPath { + return diag.NewAttributeErrorDiagnostic( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert the Terraform value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) +} + +type DiagIntoIncompatibleType struct { + Val tftypes.Value + TargetType reflect.Type + Err error +} + +func (d DiagIntoIncompatibleType) Severity() diag.Severity { + return diag.SeverityError +} + +func (d DiagIntoIncompatibleType) Summary() string { + return "Value Conversion Error" +} + +func (d DiagIntoIncompatibleType) Detail() string { + return fmt.Sprintf("An unexpected error was encountered trying to convert %T into %s. This is always an error in the provider. Please report the following to the provider developer:\n\n%s", d.Val, d.TargetType, d.Err.Error()) +} + +func (d DiagIntoIncompatibleType) Equal(o diag.Diagnostic) bool { + od, ok := o.(DiagIntoIncompatibleType) + if !ok { + return false + } + if !d.Val.Equal(od.Val) { + return false + } + if d.TargetType != od.TargetType { + return false + } + if d.Err.Error() != od.Err.Error() { + return false + } + return true +} + +type DiagNewAttributeValueIntoWrongType struct { + ValType reflect.Type + TargetType reflect.Type + SchemaType attr.Type +} + +func (d DiagNewAttributeValueIntoWrongType) Severity() diag.Severity { + return diag.SeverityError +} + +func (d DiagNewAttributeValueIntoWrongType) Summary() string { + return "Value Conversion Error" +} + +func (d DiagNewAttributeValueIntoWrongType) Detail() string { + return fmt.Sprintf("An unexpected error was encountered trying to convert into a Terraform value. This is always an error in the provider. Please report the following to the provider developer:\n\nCannot use attr.Value %s, only %s is supported because %T is the type in the schema", d.TargetType, d.ValType, d.SchemaType) +} + +func (d DiagNewAttributeValueIntoWrongType) Equal(o diag.Diagnostic) bool { + od, ok := o.(DiagNewAttributeValueIntoWrongType) + if !ok { + return false + } + if d.ValType != od.ValType { + return false + } + if d.TargetType != od.TargetType { + return false + } + if !d.SchemaType.Equal(od.SchemaType) { + return false + } + return true +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/doc.go new file mode 100644 index 0000000000..82abdb6826 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/doc.go @@ -0,0 +1,3 @@ +// Package reflect contains the implementation for converting framework-defined +// data into and from provider-defined Go types. +package reflect diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/generic_attr_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/generic_attr_value.go new file mode 100644 index 0000000000..8d700fadac --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/generic_attr_value.go @@ -0,0 +1,12 @@ +package reflect + +import ( + "context" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" +) + +func IsGenericAttrValue(ctx context.Context, target interface{}) bool { + return reflect.TypeOf((*attr.Value)(nil)) == reflect.TypeOf(target) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/helpers.go new file mode 100644 index 0000000000..ff0c8da53d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/helpers.go @@ -0,0 +1,96 @@ +package reflect + +import ( + "context" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// trueReflectValue returns the reflect.Value for `in` after derefencing all +// the pointers and unwrapping all the interfaces. It's the concrete value +// beneath it all. +func trueReflectValue(val reflect.Value) reflect.Value { + kind := val.Type().Kind() + for kind == reflect.Interface || kind == reflect.Ptr { + innerVal := val.Elem() + if !innerVal.IsValid() { + break + } + val = innerVal + kind = val.Type().Kind() + } + return val +} + +// commaSeparatedString returns an English joining of the strings in `in`, +// using "and" and commas as appropriate. +func commaSeparatedString(in []string) string { + switch len(in) { + case 0: + return "" + case 1: + return in[0] + case 2: + return strings.Join(in, " and ") + default: + in[len(in)-1] = "and " + in[len(in)-1] + return strings.Join(in, ", ") + } +} + +// getStructTags returns a map of Terraform field names to their position in +// the tags of the struct `in`. `in` must be a struct. +func getStructTags(_ context.Context, in reflect.Value, path path.Path) (map[string]int, error) { + tags := map[string]int{} + typ := trueReflectValue(in).Type() + if typ.Kind() != reflect.Struct { + return nil, fmt.Errorf("%s: can't get struct tags of %s, is not a struct", path, in.Type()) + } + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + if field.PkgPath != "" { + // skip unexported fields + continue + } + tag := field.Tag.Get(`tfsdk`) + if tag == "-" { + // skip explicitly excluded fields + continue + } + if tag == "" { + return nil, fmt.Errorf(`%s: need a struct tag for "tfsdk" on %s`, path, field.Name) + } + path := path.AtName(tag) + if !isValidFieldName(tag) { + return nil, fmt.Errorf("%s: invalid field name, must only use lowercase letters, underscores, and numbers, and must start with a letter", path) + } + if other, ok := tags[tag]; ok { + return nil, fmt.Errorf("%s: can't use field name for both %s and %s", path, typ.Field(other).Name, field.Name) + } + tags[tag] = i + } + return tags, nil +} + +// isValidFieldName returns true if `name` can be used as a field name in a +// Terraform resource or data source. +func isValidFieldName(name string) bool { + re := regexp.MustCompile("^[a-z][a-z0-9_]*$") + return re.MatchString(name) +} + +// canBeNil returns true if `target`'s type can hold a nil value +func canBeNil(target reflect.Value) bool { + switch target.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // these types can all hold nils + return true + default: + // nothing else can be set to nil + return false + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/interfaces.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/interfaces.go new file mode 100644 index 0000000000..6db4006fdc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/interfaces.go @@ -0,0 +1,329 @@ +package reflect + +import ( + "context" + "fmt" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Unknownable is an interface for types that can be explicitly set to known or +// unknown. +type Unknownable interface { + SetUnknown(context.Context, bool) error + SetValue(context.Context, interface{}) error + GetUnknown(context.Context) bool + GetValue(context.Context) interface{} +} + +// NewUnknownable creates a zero value of `target` (or the concrete type it's +// referencing, if it's a pointer) and calls its SetUnknown method. +// +// It is meant to be called through Into, not directly. +func NewUnknownable(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + receiver := pointerSafeZeroValue(ctx, target) + method := receiver.MethodByName("SetUnknown") + if !method.IsValid() { + err := fmt.Errorf("cannot find SetUnknown method on type %s", receiver.Type().String()) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + results := method.Call([]reflect.Value{ + reflect.ValueOf(ctx), + reflect.ValueOf(!val.IsKnown()), + }) + err := results[0].Interface() + if err != nil { + var underlyingErr error + switch e := err.(type) { + case error: + underlyingErr = e + default: + underlyingErr = fmt.Errorf("unknown error type %T: %v", e, e) + } + underlyingErr = fmt.Errorf("reflection error: %w", underlyingErr) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert into a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+underlyingErr.Error(), + ) + return target, diags + } + return receiver, diags +} + +// FromUnknownable creates an attr.Value from the data in an Unknownable. +// +// It is meant to be called through FromValue, not directly. +func FromUnknownable(ctx context.Context, typ attr.Type, val Unknownable, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + if val.GetUnknown(ctx) { + tfVal := tftypes.NewValue(typ.TerraformType(ctx), tftypes.UnknownValue) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + res, err := typ.ValueFromTerraform(ctx, tfVal) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + return res, nil + } + err := tftypes.ValidateValue(typ.TerraformType(ctx), val.GetValue(ctx)) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + + tfVal := tftypes.NewValue(typ.TerraformType(ctx), val.GetValue(ctx)) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + res, err := typ.ValueFromTerraform(ctx, tfVal) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + return res, nil +} + +// Nullable is an interface for types that can be explicitly set to null. +type Nullable interface { + SetNull(context.Context, bool) error + SetValue(context.Context, interface{}) error + GetNull(context.Context) bool + GetValue(context.Context) interface{} +} + +// NewNullable creates a zero value of `target` (or the concrete type it's +// referencing, if it's a pointer) and calls its SetNull method. +// +// It is meant to be called through Into, not directly. +func NewNullable(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + receiver := pointerSafeZeroValue(ctx, target) + method := receiver.MethodByName("SetNull") + if !method.IsValid() { + err := fmt.Errorf("cannot find SetNull method on type %s", receiver.Type().String()) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + results := method.Call([]reflect.Value{ + reflect.ValueOf(ctx), + reflect.ValueOf(val.IsNull()), + }) + err := results[0].Interface() + if err != nil { + var underlyingErr error + switch e := err.(type) { + case error: + underlyingErr = e + default: + underlyingErr = fmt.Errorf("unknown error type: %T", e) + } + underlyingErr = fmt.Errorf("reflection error: %w", underlyingErr) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert into a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+underlyingErr.Error(), + ) + return target, diags + } + return receiver, diags +} + +// FromNullable creates an attr.Value from the data in a Nullable. +// +// It is meant to be called through FromValue, not directly. +func FromNullable(ctx context.Context, typ attr.Type, val Nullable, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + if val.GetNull(ctx) { + tfVal := tftypes.NewValue(typ.TerraformType(ctx), nil) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + res, err := typ.ValueFromTerraform(ctx, tfVal) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + return res, nil + } + err := tftypes.ValidateValue(typ.TerraformType(ctx), val.GetValue(ctx)) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + + tfVal := tftypes.NewValue(typ.TerraformType(ctx), val.GetValue(ctx)) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + res, err := typ.ValueFromTerraform(ctx, tfVal) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + return res, diags +} + +// NewValueConverter creates a zero value of `target` (or the concrete type +// it's referencing, if it's a pointer) and calls its FromTerraform5Value +// method. +// +// It is meant to be called through Into, not directly. +func NewValueConverter(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + receiver := pointerSafeZeroValue(ctx, target) + method := receiver.MethodByName("FromTerraform5Value") + if !method.IsValid() { + err := fmt.Errorf("could not find FromTerraform5Type method on type %s", receiver.Type().String()) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert into a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + results := method.Call([]reflect.Value{reflect.ValueOf(val)}) + err := results[0].Interface() + if err != nil { + var underlyingErr error + switch e := err.(type) { + case error: + underlyingErr = e + default: + underlyingErr = fmt.Errorf("unknown error type: %T", e) + } + underlyingErr = fmt.Errorf("reflection error: %w", underlyingErr) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert into a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+underlyingErr.Error(), + ) + return target, diags + } + return receiver, diags +} + +// FromValueCreator creates an attr.Value from the data in a +// tftypes.ValueCreator, calling its ToTerraform5Value method and converting +// the result to an attr.Value using `typ`. +// +// It is meant to be called from FromValue, not directly. +func FromValueCreator(ctx context.Context, typ attr.Type, val tftypes.ValueCreator, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + raw, err := val.ToTerraform5Value() + if err != nil { + return nil, append(diags, toTerraform5ValueErrorDiag(err, path)) + } + err = tftypes.ValidateValue(typ.TerraformType(ctx), raw) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + tfVal := tftypes.NewValue(typ.TerraformType(ctx), raw) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + res, err := typ.ValueFromTerraform(ctx, tfVal) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + return res, diags +} + +// NewAttributeValue creates a new reflect.Value by calling the +// ValueFromTerraform method on `typ`. It will return an error if the returned +// `attr.Value` is not the same type as `target`. +// +// It is meant to be called through Into, not directly. +func NewAttributeValue(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, val, path)...) + + if diags.HasError() { + return target, diags + } + } + + res, err := typ.ValueFromTerraform(ctx, val) + if err != nil { + return target, append(diags, valueFromTerraformErrorDiag(err, path)) + } + if reflect.TypeOf(res) != target.Type() { + diags.Append(diag.WithPath(path, DiagNewAttributeValueIntoWrongType{ + ValType: reflect.TypeOf(res), + TargetType: target.Type(), + SchemaType: typ, + })) + return target, diags + } + return reflect.ValueOf(res), diags +} + +// FromAttributeValue creates an attr.Value from an attr.Value. It just returns +// the attr.Value it is passed, but reserves the right in the future to do some +// validation on that attr.Value to make sure it matches the type produced by +// `typ`. +// +// It is meant to be called through FromValue, not directly. +func FromAttributeValue(ctx context.Context, typ attr.Type, val attr.Value, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + tfVal, err := val.ToTerraformValue(ctx) + if err != nil { + return val, append(diags, toTerraformValueErrorDiag(err, path)) + } + + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return val, diags + } + } + + return val, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/into.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/into.go new file mode 100644 index 0000000000..d5dbf52c4b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/into.go @@ -0,0 +1,195 @@ +package reflect + +import ( + "context" + "fmt" + "math/big" + "reflect" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// Into uses the data in `val` to populate `target`, using the reflection +// package to recursively reflect into structs and slices. If `target` is an +// attr.Value, its assignment method will be used instead of reflecting. If +// `target` is a tftypes.ValueConverter, the FromTerraformValue method will be +// used instead of using reflection. Primitives are set using the val.As +// method. Structs use reflection: each exported struct field must have a +// "tfsdk" tag with the name of the field in the tftypes.Value, and all fields +// in the tftypes.Value must have a corresponding property in the struct. Into +// will be called for each struct field. Slices will have Into called for each +// element. +func Into(ctx context.Context, typ attr.Type, val tftypes.Value, target interface{}, opts Options, path path.Path) diag.Diagnostics { + var diags diag.Diagnostics + + v := reflect.ValueOf(target) + if v.Kind() != reflect.Ptr { + err := fmt.Errorf("target must be a pointer, got %T, which is a %s", target, v.Kind()) + diags.AddAttributeError( + path, + "Value Conversion Error", + fmt.Sprintf("An unexpected error was encountered trying to convert the value. This is always an error in the provider. Please report the following to the provider developer:\n\nPath: %s\nError: %s", path.String(), err.Error()), + ) + return diags + } + result, diags := BuildValue(ctx, typ, val, v.Elem(), opts, path) + if diags.HasError() { + return diags + } + v.Elem().Set(result) + return diags +} + +// BuildValue constructs a reflect.Value of the same type as `target`, +// populated with the data in `val`. It will defensively instantiate new values +// to set, making it safe for use with pointer types which may be nil. It tries +// to give consumers the ability to override its default behaviors wherever +// possible. +func BuildValue(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + // if this isn't a valid reflect.Value, bail before we accidentally + // panic + if !target.IsValid() { + err := fmt.Errorf("invalid target") + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to build a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + // if this is an attr.Value, build the type from that + if target.Type().Implements(reflect.TypeOf((*attr.Value)(nil)).Elem()) { + return NewAttributeValue(ctx, typ, val, target, opts, path) + } + // if this tells tftypes how to build an instance of it out of a + // tftypes.Value, well, that's what we want, so do that instead of our + // default logic. + if target.Type().Implements(reflect.TypeOf((*tftypes.ValueConverter)(nil)).Elem()) { + return NewValueConverter(ctx, typ, val, target, opts, path) + } + // if this can explicitly be set to unknown, do that + if target.Type().Implements(reflect.TypeOf((*Unknownable)(nil)).Elem()) { + res, unknownableDiags := NewUnknownable(ctx, typ, val, target, opts, path) + diags.Append(unknownableDiags...) + if diags.HasError() { + return target, diags + } + target = res + // only return if it's unknown; we want to call SetUnknown + // either way, but if the value is unknown, there's nothing + // else to do, so bail + if !val.IsKnown() { + return target, nil + } + } + // if this can explicitly be set to null, do that + if target.Type().Implements(reflect.TypeOf((*Nullable)(nil)).Elem()) { + res, nullableDiags := NewNullable(ctx, typ, val, target, opts, path) + diags.Append(nullableDiags...) + if diags.HasError() { + return target, diags + } + target = res + // only return if it's null; we want to call SetNull either + // way, but if the value is null, there's nothing else to do, + // so bail + if val.IsNull() { + return target, nil + } + } + if !val.IsKnown() { + // we already handled unknown the only ways we can + // we checked that target doesn't have a SetUnknown method we + // can call + // we checked that target isn't an attr.Value + // all that's left to us now is to set it as an empty value or + // throw an error, depending on what's in opts + if !opts.UnhandledUnknownAsEmpty { + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to build a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Received unknown value, however the target type cannot handle unknown values. Use the corresponding `types` package type or a custom type that handles unknown values.\n\n"+ + fmt.Sprintf("Path: %s\nTarget Type: %s\nSuggested Type: %s", path.String(), target.Type(), reflect.TypeOf(typ.ValueType(ctx))), + ) + return target, diags + } + // we want to set unhandled unknowns to the empty value + return reflect.Zero(target.Type()), diags + } + + if val.IsNull() { + // we already handled null the only ways we can + // we checked that target doesn't have a SetNull method we can + // call + // we checked that target isn't an attr.Value + // all that's left to us now is to set it as an empty value or + // throw an error, depending on what's in opts + if canBeNil(target) || opts.UnhandledNullAsEmpty { + return reflect.Zero(target.Type()), nil + } + + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to build a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Received null value, however the target type cannot handle null values. Use the corresponding `types` package type, a pointer type or a custom type that handles null values.\n\n"+ + fmt.Sprintf("Path: %s\nTarget Type: %s\nSuggested `types` Type: %s\nSuggested Pointer Type: *%s", path.String(), target.Type(), reflect.TypeOf(typ.ValueType(ctx)), target.Type()), + ) + + return target, diags + } + // *big.Float and *big.Int are technically pointers, but we want them + // handled as numbers + if target.Type() == reflect.TypeOf(big.NewFloat(0)) || target.Type() == reflect.TypeOf(big.NewInt(0)) { + return Number(ctx, typ, val, target, opts, path) + } + switch target.Kind() { + case reflect.Struct: + val, valDiags := Struct(ctx, typ, val, target, opts, path) + diags.Append(valDiags...) + return val, diags + case reflect.Bool, reflect.String: + val, valDiags := Primitive(ctx, typ, val, target, path) + diags.Append(valDiags...) + return val, diags + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64: + // numbers are the wooooorst and need their own special handling + // because we can't just hand them off to tftypes and also + // because we can't just make people use *big.Floats, because a + // nil *big.Float will crash everything if we don't handle it + // as a special case, so let's just special case numbers and + // let people use the types they want + val, valDiags := Number(ctx, typ, val, target, opts, path) + diags.Append(valDiags...) + return val, diags + case reflect.Slice: + val, valDiags := reflectSlice(ctx, typ, val, target, opts, path) + diags.Append(valDiags...) + return val, diags + case reflect.Map: + val, valDiags := Map(ctx, typ, val, target, opts, path) + diags.Append(valDiags...) + return val, diags + case reflect.Ptr: + val, valDiags := Pointer(ctx, typ, val, target, opts, path) + diags.Append(valDiags...) + return val, diags + default: + err := fmt.Errorf("don't know how to reflect %s into %s", val.Type(), target.Type()) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to build a value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/map.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/map.go new file mode 100644 index 0000000000..db2b6fe63b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/map.go @@ -0,0 +1,186 @@ +package reflect + +import ( + "context" + "fmt" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Map creates a map value that matches the type of `target`, and populates it +// with the contents of `val`. +func Map(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + underlyingValue := trueReflectValue(target) + + // this only works with maps, so check that out first + if underlyingValue.Kind() != reflect.Map { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: fmt.Errorf("expected a map type, got %s", target.Type()), + })) + return target, diags + } + if !val.Type().Is(tftypes.Map{}) { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: fmt.Errorf("cannot reflect %s into a map, must be a map", val.Type().String()), + })) + return target, diags + } + elemTyper, ok := typ.(attr.TypeWithElementType) + if !ok { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: fmt.Errorf("cannot reflect map using type information provided by %T, %T must be an attr.TypeWithElementType", typ, typ), + })) + return target, diags + } + + // we need our value to become a map of values so we can iterate over + // them and handle them individually + values := map[string]tftypes.Value{} + err := val.As(&values) + if err != nil { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: err, + })) + return target, diags + } + + // we need to know the type the slice is wrapping + elemType := underlyingValue.Type().Elem() + elemAttrType := elemTyper.ElementType() + + // we want an empty version of the map + m := reflect.MakeMapWithSize(underlyingValue.Type(), len(values)) + + // go over each of the values passed in, create a Go value of the right + // type for them, and add it to our new map + for key, value := range values { + // create a new Go value of the type that can go in the map + targetValue := reflect.Zero(elemType) + + // update our path so we can have nice errors + path := path.AtMapKey(key) + + // reflect the value into our new target + result, elemDiags := BuildValue(ctx, elemAttrType, value, targetValue, opts, path) + diags.Append(elemDiags...) + + if diags.HasError() { + return target, diags + } + + m.SetMapIndex(reflect.ValueOf(key), result) + } + + return m, diags +} + +// FromMap returns an attr.Value representing the data contained in `val`. +// `val` must be a map type with keys that are a string type. The attr.Value +// will be of the type produced by `typ`. +// +// It is meant to be called through FromValue, not directly. +func FromMap(ctx context.Context, typ attr.TypeWithElementType, val reflect.Value, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + tfType := typ.TerraformType(ctx) + + if val.IsNil() { + tfVal := tftypes.NewValue(tfType, nil) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + attrVal, err := typ.ValueFromTerraform(ctx, tfVal) + + if err != nil { + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from map value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + return attrVal, diags + } + + elemType := typ.ElementType() + tfElems := map[string]tftypes.Value{} + for _, key := range val.MapKeys() { + if key.Kind() != reflect.String { + err := fmt.Errorf("map keys must be strings, got %s", key.Type()) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert into a Terraform value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + val, valDiags := FromValue(ctx, elemType, val.MapIndex(key).Interface(), path.AtMapKey(key.String())) + diags.Append(valDiags...) + + if diags.HasError() { + return nil, diags + } + tfVal, err := val.ToTerraformValue(ctx) + if err != nil { + return nil, append(diags, toTerraformValueErrorDiag(err, path)) + } + + if typeWithValidate, ok := elemType.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path.AtMapKey(key.String()))...) + + if diags.HasError() { + return nil, diags + } + } + + tfElems[key.String()] = tfVal + } + + err := tftypes.ValidateValue(tfType, tfElems) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + + tfVal := tftypes.NewValue(tfType, tfElems) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + attrVal, err := typ.ValueFromTerraform(ctx, tfVal) + + if err != nil { + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert to map value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + return attrVal, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/number.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/number.go new file mode 100644 index 0000000000..17b5cbcb9c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/number.go @@ -0,0 +1,372 @@ +package reflect + +import ( + "context" + "fmt" + "math" + "math/big" + "reflect" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Number creates a *big.Float and populates it with the data in `val`. It then +// gets converted to the type of `target`, as long as `target` is a valid +// number type (any of the built-in int, uint, or float types, *big.Float, and +// *big.Int). +// +// Number will loudly fail when a number cannot be losslessly represented using +// the requested type, unless opts.AllowRoundingNumbers is set to true. This +// setting is mildly dangerous, because Terraform does not like when you round +// things, as a general rule of thumb. +// +// It is meant to be called through Into, not directly. +func Number(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + result := big.NewFloat(0) + err := val.As(&result) + if err != nil { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Err: err, + TargetType: target.Type(), + Val: val, + })) + return target, diags + } + roundingError := fmt.Errorf("cannot store %s in %s", result.String(), target.Type()) + roundingErrorDiag := diag.NewAttributeErrorDiagnostic( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert to number. This is always an error in the provider. Please report the following to the provider developer:\n\n"+roundingError.Error(), + ) + + switch target.Type() { + case reflect.TypeOf(big.NewFloat(0)): + return reflect.ValueOf(result), diags + case reflect.TypeOf(big.NewInt(0)): + intResult, acc := result.Int(nil) + if acc != big.Exact && !opts.AllowRoundingNumbers { + return reflect.ValueOf(result), append(diags, roundingErrorDiag) + } + return reflect.ValueOf(intResult), diags + } + switch target.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + intResult, acc := result.Int64() + if acc != big.Exact && !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + switch target.Kind() { + case reflect.Int: + if strconv.IntSize == 32 && intResult > math.MaxInt32 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + intResult = math.MaxInt32 + } + if strconv.IntSize == 32 && intResult < math.MinInt32 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + intResult = math.MinInt32 + } + return reflect.ValueOf(int(intResult)), diags + case reflect.Int8: + if intResult > math.MaxInt8 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + intResult = math.MaxInt8 + } + if intResult < math.MinInt8 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + intResult = math.MinInt8 + } + return reflect.ValueOf(int8(intResult)), diags + case reflect.Int16: + if intResult > math.MaxInt16 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + intResult = math.MaxInt16 + } + if intResult < math.MinInt16 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + intResult = math.MinInt16 + } + return reflect.ValueOf(int16(intResult)), diags + case reflect.Int32: + if intResult > math.MaxInt32 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + intResult = math.MaxInt32 + } + if intResult < math.MinInt32 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + intResult = math.MinInt32 + } + return reflect.ValueOf(int32(intResult)), diags + case reflect.Int64: + return reflect.ValueOf(intResult), diags + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + uintResult, acc := result.Uint64() + if acc != big.Exact && !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + switch target.Kind() { + case reflect.Uint: + if strconv.IntSize == 32 && uintResult > math.MaxUint32 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + uintResult = math.MaxUint32 + } + return reflect.ValueOf(uint(uintResult)), diags + case reflect.Uint8: + if uintResult > math.MaxUint8 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + uintResult = math.MaxUint8 + } + return reflect.ValueOf(uint8(uintResult)), diags + case reflect.Uint16: + if uintResult > math.MaxUint16 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + uintResult = math.MaxUint16 + } + return reflect.ValueOf(uint16(uintResult)), diags + case reflect.Uint32: + if uintResult > math.MaxUint32 { + if !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + uintResult = math.MaxUint32 + } + return reflect.ValueOf(uint32(uintResult)), diags + case reflect.Uint64: + return reflect.ValueOf(uintResult), diags + } + case reflect.Float32: + floatResult, acc := result.Float32() + if acc != big.Exact && !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } else if acc == big.Above { + floatResult = math.MaxFloat32 + } else if acc == big.Below { + floatResult = math.SmallestNonzeroFloat32 + } else if acc != big.Exact { + err := fmt.Errorf("unsure how to round %s and %f", acc, floatResult) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert to number. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + return reflect.ValueOf(floatResult), diags + case reflect.Float64: + floatResult, acc := result.Float64() + if acc != big.Exact && !opts.AllowRoundingNumbers { + return target, append(diags, roundingErrorDiag) + } + if acc == big.Above { + if floatResult == math.Inf(1) || floatResult == math.MaxFloat64 { + floatResult = math.MaxFloat64 + } else if floatResult == 0.0 || floatResult == math.SmallestNonzeroFloat64 { + floatResult = -math.SmallestNonzeroFloat64 + } else { + err := fmt.Errorf("not sure how to round %s and %f", acc, floatResult) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert to number. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + } else if acc == big.Below { + if floatResult == math.Inf(-1) || floatResult == -math.MaxFloat64 { + floatResult = -math.MaxFloat64 + } else if floatResult == -0.0 || floatResult == -math.SmallestNonzeroFloat64 { //nolint:staticcheck + floatResult = math.SmallestNonzeroFloat64 + } else { + err := fmt.Errorf("not sure how to round %s and %f", acc, floatResult) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert to number. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + } else if acc != big.Exact { + err := fmt.Errorf("not sure how to round %s and %f", acc, floatResult) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert to number. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + return reflect.ValueOf(floatResult), diags + } + err = fmt.Errorf("cannot convert number to %s", target.Type()) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert to number. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags +} + +// FromInt creates an attr.Value using `typ` from an int64. +// +// It is meant to be called through FromValue, not directly. +func FromInt(ctx context.Context, typ attr.Type, val int64, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + err := tftypes.ValidateValue(tftypes.Number, val) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + tfNum := tftypes.NewValue(tftypes.Number, val) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfNum, path)...) + + if diags.HasError() { + return nil, diags + } + } + + num, err := typ.ValueFromTerraform(ctx, tfNum) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + + return num, diags +} + +// FromUint creates an attr.Value using `typ` from a uint64. +// +// It is meant to be called through FromValue, not directly. +func FromUint(ctx context.Context, typ attr.Type, val uint64, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + err := tftypes.ValidateValue(tftypes.Number, val) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + tfNum := tftypes.NewValue(tftypes.Number, val) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfNum, path)...) + + if diags.HasError() { + return nil, diags + } + } + + num, err := typ.ValueFromTerraform(ctx, tfNum) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + + return num, diags +} + +// FromFloat creates an attr.Value using `typ` from a float64. +// +// It is meant to be called through FromValue, not directly. +func FromFloat(ctx context.Context, typ attr.Type, val float64, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + err := tftypes.ValidateValue(tftypes.Number, val) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + tfNum := tftypes.NewValue(tftypes.Number, val) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfNum, path)...) + + if diags.HasError() { + return nil, diags + } + } + + num, err := typ.ValueFromTerraform(ctx, tfNum) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + + return num, diags +} + +// FromBigFloat creates an attr.Value using `typ` from a *big.Float. +// +// It is meant to be called through FromValue, not directly. +func FromBigFloat(ctx context.Context, typ attr.Type, val *big.Float, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + err := tftypes.ValidateValue(tftypes.Number, val) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + tfNum := tftypes.NewValue(tftypes.Number, val) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfNum, path)...) + + if diags.HasError() { + return nil, diags + } + } + + num, err := typ.ValueFromTerraform(ctx, tfNum) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + + return num, diags +} + +// FromBigInt creates an attr.Value using `typ` from a *big.Int. +// +// It is meant to be called through FromValue, not directly. +func FromBigInt(ctx context.Context, typ attr.Type, val *big.Int, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + fl := big.NewFloat(0).SetInt(val) + err := tftypes.ValidateValue(tftypes.Number, fl) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + tfNum := tftypes.NewValue(tftypes.Number, fl) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfNum, path)...) + + if diags.HasError() { + return nil, diags + } + } + + num, err := typ.ValueFromTerraform(ctx, tfNum) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + + return num, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/options.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/options.go new file mode 100644 index 0000000000..0179e27d82 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/options.go @@ -0,0 +1,20 @@ +package reflect + +// Options provides configuration settings for how the reflection behavior +// works, letting callers tweak different behaviors based on their needs. +type Options struct { + // UnhandledNullAsEmpty controls whether null values should be + // translated into empty values without provider interaction, or if + // they must be explicitly handled. + UnhandledNullAsEmpty bool + + // UnhandledUnknownAsEmpty controls whether null values should be + // translated into empty values without provider interaction, or if + // they must be explicitly handled. + UnhandledUnknownAsEmpty bool + + // AllowRoundingNumbers silently rounds numbers that don't fit + // perfectly in the types they're being stored in, rather than + // returning errors. Numbers will always be rounded towards 0. + AllowRoundingNumbers bool +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/outof.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/outof.go new file mode 100644 index 0000000000..179cafcf0a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/outof.go @@ -0,0 +1,92 @@ +package reflect + +import ( + "context" + "fmt" + "math/big" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// FromValue is the inverse of Into, taking a Go value (`val`) and transforming it +// into an attr.Value using the attr.Type supplied. `val` will first be +// transformed into a tftypes.Value, then passed to `typ`'s ValueFromTerraform +// method. +func FromValue(ctx context.Context, typ attr.Type, val interface{}, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + if v, ok := val.(attr.Value); ok { + return FromAttributeValue(ctx, typ, v, path) + } + if v, ok := val.(tftypes.ValueCreator); ok { + return FromValueCreator(ctx, typ, v, path) + } + if v, ok := val.(Unknownable); ok { + return FromUnknownable(ctx, typ, v, path) + } + if v, ok := val.(Nullable); ok { + return FromNullable(ctx, typ, v, path) + } + if bf, ok := val.(*big.Float); ok { + return FromBigFloat(ctx, typ, bf, path) + } + if bi, ok := val.(*big.Int); ok { + return FromBigInt(ctx, typ, bi, path) + } + value := reflect.ValueOf(val) + kind := value.Kind() + switch kind { + case reflect.Struct: + t, ok := typ.(attr.TypeWithAttributeTypes) + if !ok { + err := fmt.Errorf("cannot use type %T as schema type %T; %T must be an attr.TypeWithAttributeTypes to hold %T", val, typ, typ, val) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + return FromStruct(ctx, t, value, path) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + return FromInt(ctx, typ, value.Int(), path) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return FromUint(ctx, typ, value.Uint(), path) + case reflect.Float32, reflect.Float64: + return FromFloat(ctx, typ, value.Float(), path) + case reflect.Bool: + return FromBool(ctx, typ, value.Bool(), path) + case reflect.String: + return FromString(ctx, typ, value.String(), path) + case reflect.Slice: + return FromSlice(ctx, typ, value, path) + case reflect.Map: + t, ok := typ.(attr.TypeWithElementType) + if !ok { + err := fmt.Errorf("cannot use type %T as schema type %T; %T must be an attr.TypeWithElementType to hold %T", val, typ, typ, val) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + return FromMap(ctx, t, value, path) + case reflect.Ptr: + return FromPointer(ctx, typ, value, path) + default: + err := fmt.Errorf("cannot construct attr.Type from %T (%s)", val, kind) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/pointer.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/pointer.go new file mode 100644 index 0000000000..04aa9cc8a3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/pointer.go @@ -0,0 +1,119 @@ +package reflect + +import ( + "context" + "fmt" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Pointer builds a new zero value of the concrete type that `target` +// references, populates it with BuildValue, and takes a pointer to it. +// +// It is meant to be called through Into, not directly. +func Pointer(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + if target.Kind() != reflect.Ptr { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: fmt.Errorf("cannot dereference pointer, not a pointer, is a %s (%s)", target.Type(), target.Kind()), + })) + return target, diags + } + // we may have gotten a nil pointer, so we need to create our own that + // we can set + pointer := reflect.New(target.Type().Elem()) + // build out whatever the pointer is pointing to + pointed, pointedDiags := BuildValue(ctx, typ, val, pointer.Elem(), opts, path) + diags.Append(pointedDiags...) + + if diags.HasError() { + return target, diags + } + // to be able to set the pointer to our new pointer, we need to create + // a pointer to the pointer + pointerPointer := reflect.New(pointer.Type()) + // we set the pointer we created on the pointer to the pointer + pointerPointer.Elem().Set(pointer) + // then it's settable, so we can now set the concrete value we created + // on the pointer + pointerPointer.Elem().Elem().Set(pointed) + // return the pointer we created + return pointerPointer.Elem(), diags +} + +// create a zero value of concrete type underlying any number of pointers, then +// wrap it in that number of pointers again. The end result is to wind up with +// the same exact type, except now you can be sure it's pointing to actual data +// and will not give you a nil pointer dereference panic unexpectedly. +func pointerSafeZeroValue(_ context.Context, target reflect.Value) reflect.Value { + pointer := target.Type() + var pointers int + for pointer.Kind() == reflect.Ptr { + pointer = pointer.Elem() + pointers++ + } + receiver := reflect.Zero(pointer) + for i := 0; i < pointers; i++ { + newReceiver := reflect.New(receiver.Type()) + newReceiver.Elem().Set(receiver) + receiver = newReceiver + } + return receiver +} + +// FromPointer turns a pointer into an attr.Value using `typ`. If the pointer +// is nil, the attr.Value will use its null representation. If it is not nil, +// it will recurse into FromValue to find the attr.Value of the type the value +// the pointer is referencing. +// +// It is meant to be called through FromValue, not directly. +func FromPointer(ctx context.Context, typ attr.Type, value reflect.Value, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + if value.Kind() != reflect.Ptr { + err := fmt.Errorf("cannot use type %s as a pointer", value.Type()) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from pointer value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + if value.IsNil() { + tfVal := tftypes.NewValue(typ.TerraformType(ctx), nil) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + attrVal, err := typ.ValueFromTerraform(ctx, tfVal) + + if err != nil { + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from pointer value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + return attrVal, diags + } + + attrVal, attrValDiags := FromValue(ctx, typ, value.Elem().Interface(), path) + diags.Append(attrValDiags...) + + return attrVal, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/primitive.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/primitive.go new file mode 100644 index 0000000000..a2491eafe9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/primitive.go @@ -0,0 +1,109 @@ +package reflect + +import ( + "context" + "errors" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Primitive builds a string or boolean, depending on the type of `target`, and +// populates it with the data in `val`. +// +// It is meant to be called through `Into`, not directly. +func Primitive(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + switch target.Kind() { + case reflect.Bool: + var b bool + err := val.As(&b) + if err != nil { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: err, + })) + return target, diags + } + return reflect.ValueOf(b).Convert(target.Type()), nil + case reflect.String: + var s string + err := val.As(&s) + if err != nil { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: err, + })) + return target, diags + } + return reflect.ValueOf(s).Convert(target.Type()), nil + default: + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: errors.New("unknown type"), + })) + return target, diags + } +} + +// FromString returns an attr.Value as produced by `typ` from a string. +// +// It is meant to be called through FromValue, not directly. +func FromString(ctx context.Context, typ attr.Type, val string, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + err := tftypes.ValidateValue(tftypes.String, val) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + tfStr := tftypes.NewValue(tftypes.String, val) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfStr, path)...) + + if diags.HasError() { + return nil, diags + } + } + + str, err := typ.ValueFromTerraform(ctx, tfStr) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + + return str, diags +} + +// FromBool returns an attr.Value as produced by `typ` from a bool. +// +// It is meant to be called through FromValue, not directly. +func FromBool(ctx context.Context, typ attr.Type, val bool, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + err := tftypes.ValidateValue(tftypes.Bool, val) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + tfBool := tftypes.NewValue(tftypes.Bool, val) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfBool, path)...) + + if diags.HasError() { + return nil, diags + } + } + + b, err := typ.ValueFromTerraform(ctx, tfBool) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + + return b, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/slice.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/slice.go new file mode 100644 index 0000000000..73b0061527 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/slice.go @@ -0,0 +1,212 @@ +package reflect + +import ( + "context" + "fmt" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// build a slice of elements, matching the type of `target`, and fill it with +// the data in `val`. +func reflectSlice(ctx context.Context, typ attr.Type, val tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + // this only works with slices, so check that out first + if target.Kind() != reflect.Slice { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: fmt.Errorf("expected a slice type, got %s", target.Type()), + })) + return target, diags + } + // TODO: check that the val is a list or set or tuple + elemTyper, ok := typ.(attr.TypeWithElementType) + if !ok { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: fmt.Errorf("cannot reflect %s using type information provided by %T, %T must be an attr.TypeWithElementType", val.Type(), typ, typ), + })) + return target, diags + } + + // we need our value to become a list of values so we can iterate over + // them and handle them individually + var values []tftypes.Value + err := val.As(&values) + if err != nil { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: val, + TargetType: target.Type(), + Err: err, + })) + return target, diags + } + + // we need to know the type the slice is wrapping + elemType := target.Type().Elem() + elemAttrType := elemTyper.ElementType() + + // we want an empty version of the slice + slice := reflect.MakeSlice(target.Type(), 0, len(values)) + + // go over each of the values passed in, create a Go value of the right + // type for them, and add it to our new slice + for pos, value := range values { + // create a new Go value of the type that can go in the slice + targetValue := reflect.Zero(elemType) + + // update our path so we can have nice errors + valPath := path.AtListIndex(pos) + + if typ.TerraformType(ctx).Is(tftypes.Set{}) { + attrVal, err := elemAttrType.ValueFromTerraform(ctx, value) + + if err != nil { + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert to slice value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return target, diags + } + + valPath = path.AtSetValue(attrVal) + } + + // reflect the value into our new target + val, valDiags := BuildValue(ctx, elemAttrType, value, targetValue, opts, valPath) + diags.Append(valDiags...) + + if diags.HasError() { + return target, diags + } + + // add the new target to our slice + slice = reflect.Append(slice, val) + } + + return slice, diags +} + +// FromSlice returns an attr.Value as produced by `typ` using the data in +// `val`. `val` must be a slice. `typ` must be an attr.TypeWithElementType or +// attr.TypeWithElementTypes. If the slice is nil, the representation of null +// for `typ` will be returned. Otherwise, FromSlice will recurse into FromValue +// for each element in the slice, using the element type or types defined on +// `typ` to construct values for them. +// +// It is meant to be called through FromValue, not directly. +func FromSlice(ctx context.Context, typ attr.Type, val reflect.Value, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + // TODO: support tuples, which are attr.TypeWithElementTypes + tfType := typ.TerraformType(ctx) + + if val.IsNil() { + tfVal := tftypes.NewValue(tfType, nil) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + attrVal, err := typ.ValueFromTerraform(ctx, tfVal) + + if err != nil { + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from slice value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + return attrVal, diags + } + + t, ok := typ.(attr.TypeWithElementType) + if !ok { + err := fmt.Errorf("cannot use type %T as schema type %T; %T must be an attr.TypeWithElementType to hold %T", val, typ, typ, val) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from slice value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + elemType := t.ElementType() + tfElems := make([]tftypes.Value, 0, val.Len()) + for i := 0; i < val.Len(); i++ { + // The underlying reflect.Slice is fetched by Index(). For set types, + // the path is value-based instead of index-based. Since there is only + // the index until the value is retrieved, this will pass the + // technically incorrect index-based path at first for framework + // debugging purposes, then correct the path afterwards. + valPath := path.AtListIndex(i) + + val, valDiags := FromValue(ctx, elemType, val.Index(i).Interface(), valPath) + diags.Append(valDiags...) + + if diags.HasError() { + return nil, diags + } + + tfVal, err := val.ToTerraformValue(ctx) + if err != nil { + return nil, append(diags, toTerraformValueErrorDiag(err, path)) + } + + if tfType.Is(tftypes.Set{}) { + valPath = path.AtSetValue(val) + } + + if typeWithValidate, ok := elemType.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, valPath)...) + if diags.HasError() { + return nil, diags + } + } + + tfElems = append(tfElems, tfVal) + } + + err := tftypes.ValidateValue(tfType, tfElems) + if err != nil { + return nil, append(diags, validateValueErrorDiag(err, path)) + } + + tfVal := tftypes.NewValue(tfType, tfElems) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + attrVal, err := typ.ValueFromTerraform(ctx, tfVal) + + if err != nil { + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from slice value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + return attrVal, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/struct.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/struct.go new file mode 100644 index 0000000000..53801a2338 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/reflect/struct.go @@ -0,0 +1,226 @@ +package reflect + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Struct builds a new struct using the data in `object`, as long as `object` +// is a `tftypes.Object`. It will take the struct type from `target`, which +// must be a struct type. +// +// The properties on `target` must be tagged with a "tfsdk" label containing +// the field name to map to that property. Every property must be tagged, and +// every property must be present in the type of `object`, and all the +// attributes in the type of `object` must have a corresponding property. +// Properties that don't map to object attributes must have a `tfsdk:"-"` tag, +// explicitly defining them as not part of the object. This is to catch typos +// and other mistakes early. +// +// Struct is meant to be called from Into, not directly. +func Struct(ctx context.Context, typ attr.Type, object tftypes.Value, target reflect.Value, opts Options, path path.Path) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + // this only works with object values, so make sure that constraint is + // met + if target.Kind() != reflect.Struct { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: object, + TargetType: target.Type(), + Err: fmt.Errorf("expected a struct type, got %s", target.Type()), + })) + return target, diags + } + if !object.Type().Is(tftypes.Object{}) { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: object, + TargetType: target.Type(), + Err: fmt.Errorf("cannot reflect %s into a struct, must be an object", object.Type().String()), + })) + return target, diags + } + attrsType, ok := typ.(attr.TypeWithAttributeTypes) + if !ok { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: object, + TargetType: target.Type(), + Err: fmt.Errorf("cannot reflect object using type information provided by %T, %T must be an attr.TypeWithAttributeTypes", typ, typ), + })) + return target, diags + } + + // collect a map of fields that are in the object passed in + var objectFields map[string]tftypes.Value + err := object.As(&objectFields) + if err != nil { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: object, + TargetType: target.Type(), + Err: err, + })) + return target, diags + } + + // collect a map of fields that are defined in the tags of the struct + // passed in + targetFields, err := getStructTags(ctx, target, path) + if err != nil { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: object, + TargetType: target.Type(), + Err: fmt.Errorf("error retrieving field names from struct tags: %w", err), + })) + return target, diags + } + + // we require an exact, 1:1 match of these fields to avoid typos + // leading to surprises, so let's ensure they have the exact same + // fields defined + var objectMissing, targetMissing []string + for field := range targetFields { + if _, ok := objectFields[field]; !ok { + objectMissing = append(objectMissing, field) + } + } + for field := range objectFields { + if _, ok := targetFields[field]; !ok { + targetMissing = append(targetMissing, field) + } + } + if len(objectMissing) > 0 || len(targetMissing) > 0 { + var missing []string + if len(objectMissing) > 0 { + missing = append(missing, fmt.Sprintf("Struct defines fields not found in object: %s.", commaSeparatedString(objectMissing))) + } + if len(targetMissing) > 0 { + missing = append(missing, fmt.Sprintf("Object defines fields not found in struct: %s.", commaSeparatedString(targetMissing))) + } + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: object, + TargetType: target.Type(), + Err: fmt.Errorf("mismatch between struct and object: %s", strings.Join(missing, " ")), + })) + return target, diags + } + + attrTypes := attrsType.AttributeTypes() + + // now that we know they match perfectly, fill the struct with the + // values in the object + result := reflect.New(target.Type()).Elem() + for field, structFieldPos := range targetFields { + attrType, ok := attrTypes[field] + if !ok { + diags.Append(diag.WithPath(path, DiagIntoIncompatibleType{ + Val: object, + TargetType: target.Type(), + Err: fmt.Errorf("could not find type information for attribute in supplied attr.Type %T", typ), + })) + return target, diags + } + structField := result.Field(structFieldPos) + fieldVal, fieldValDiags := BuildValue(ctx, attrType, objectFields[field], structField, opts, path.AtName(field)) + diags.Append(fieldValDiags...) + + if diags.HasError() { + return target, diags + } + structField.Set(fieldVal) + } + return result, diags +} + +// FromStruct builds an attr.Value as produced by `typ` from the data in `val`. +// `val` must be a struct type, and must have all its properties tagged and be +// a 1:1 match with the attributes reported by `typ`. FromStruct will recurse +// into FromValue for each attribute, using the type of the attribute as +// reported by `typ`. +// +// It is meant to be called through FromValue, not directly. +func FromStruct(ctx context.Context, typ attr.TypeWithAttributeTypes, val reflect.Value, path path.Path) (attr.Value, diag.Diagnostics) { + var diags diag.Diagnostics + objTypes := map[string]tftypes.Type{} + objValues := map[string]tftypes.Value{} + + // collect a map of fields that are defined in the tags of the struct + // passed in + targetFields, err := getStructTags(ctx, val, path) + if err != nil { + err = fmt.Errorf("error retrieving field names from struct tags: %w", err) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from struct value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + attrTypes := typ.AttributeTypes() + for name, fieldNo := range targetFields { + path := path.AtName(name) + fieldValue := val.Field(fieldNo) + + attrVal, attrValDiags := FromValue(ctx, attrTypes[name], fieldValue.Interface(), path) + diags.Append(attrValDiags...) + + if diags.HasError() { + return nil, diags + } + + attrType, ok := attrTypes[name] + if !ok || attrType == nil { + err := fmt.Errorf("couldn't find type information for attribute in supplied attr.Type %T", typ) + diags.AddAttributeError( + path, + "Value Conversion Error", + "An unexpected error was encountered trying to convert from struct value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return nil, diags + } + + objTypes[name] = attrType.TerraformType(ctx) + + tfObjVal, err := attrVal.ToTerraformValue(ctx) + if err != nil { + return nil, append(diags, toTerraformValueErrorDiag(err, path)) + } + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfObjVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + objValues[name] = tfObjVal + } + + tfVal := tftypes.NewValue(tftypes.Object{ + AttributeTypes: objTypes, + }, objValues) + + if typeWithValidate, ok := typ.(xattr.TypeWithValidate); ok { + diags.Append(typeWithValidate.Validate(ctx, tfVal, path)...) + + if diags.HasError() { + return nil, diags + } + } + + retType := typ.WithAttributeTypes(attrTypes) + ret, err := retType.ValueFromTerraform(ctx, tfVal) + if err != nil { + return nil, append(diags, valueFromTerraformErrorDiag(err, path)) + } + + return ret, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/applyresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/applyresourcechange.go new file mode 100644 index 0000000000..86013295a5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/applyresourcechange.go @@ -0,0 +1,33 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" +) + +// ApplyResourceChangeResponse returns the *tfprotov5.ApplyResourceChangeResponse +// equivalent of a *fwserver.ApplyResourceChangeResponse. +func ApplyResourceChangeResponse(ctx context.Context, fw *fwserver.ApplyResourceChangeResponse) *tfprotov5.ApplyResourceChangeResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.ApplyResourceChangeResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + newState, diags := State(ctx, fw.NewState) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.NewState = newState + + newPrivate, diags := fw.Private.Bytes(ctx) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.Private = newPrivate + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/block.go new file mode 100644 index 0000000000..18d380bb84 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/block.go @@ -0,0 +1,94 @@ +package toproto5 + +import ( + "context" + "sort" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Block returns the *tfprotov5.SchemaNestedBlock equivalent of a Block. +// Errors will be tftypes.AttributePathErrors based on `path`. `name` is the +// name of the attribute. +func Block(ctx context.Context, name string, path *tftypes.AttributePath, b fwschema.Block) (*tfprotov5.SchemaNestedBlock, error) { + schemaNestedBlock := &tfprotov5.SchemaNestedBlock{ + Block: &tfprotov5.SchemaBlock{ + Deprecated: b.GetDeprecationMessage() != "", + }, + TypeName: name, + } + + if b.GetDescription() != "" { + schemaNestedBlock.Block.Description = b.GetDescription() + schemaNestedBlock.Block.DescriptionKind = tfprotov5.StringKindPlain + } + + if b.GetMarkdownDescription() != "" { + schemaNestedBlock.Block.Description = b.GetMarkdownDescription() + schemaNestedBlock.Block.DescriptionKind = tfprotov5.StringKindMarkdown + } + + nm := b.GetNestingMode() + switch nm { + case fwschema.BlockNestingModeList: + schemaNestedBlock.Nesting = tfprotov5.SchemaNestedBlockNestingModeList + case fwschema.BlockNestingModeSet: + schemaNestedBlock.Nesting = tfprotov5.SchemaNestedBlockNestingModeSet + case fwschema.BlockNestingModeSingle: + schemaNestedBlock.Nesting = tfprotov5.SchemaNestedBlockNestingModeSingle + default: + return nil, path.NewErrorf("unrecognized nesting mode %v", nm) + } + + nestedBlockObject := b.GetNestedObject() + + for attrName, attr := range nestedBlockObject.GetAttributes() { + attrPath := path.WithAttributeName(attrName) + attrProto5, err := SchemaAttribute(ctx, attrName, attrPath, attr) + + if err != nil { + return nil, err + } + + schemaNestedBlock.Block.Attributes = append(schemaNestedBlock.Block.Attributes, attrProto5) + } + + for blockName, block := range nestedBlockObject.GetBlocks() { + blockPath := path.WithAttributeName(blockName) + blockProto5, err := Block(ctx, blockName, blockPath, block) + + if err != nil { + return nil, err + } + + schemaNestedBlock.Block.BlockTypes = append(schemaNestedBlock.Block.BlockTypes, blockProto5) + } + + sort.Slice(schemaNestedBlock.Block.Attributes, func(i, j int) bool { + if schemaNestedBlock.Block.Attributes[i] == nil { + return true + } + + if schemaNestedBlock.Block.Attributes[j] == nil { + return false + } + + return schemaNestedBlock.Block.Attributes[i].Name < schemaNestedBlock.Block.Attributes[j].Name + }) + + sort.Slice(schemaNestedBlock.Block.BlockTypes, func(i, j int) bool { + if schemaNestedBlock.Block.BlockTypes[i] == nil { + return true + } + + if schemaNestedBlock.Block.BlockTypes[j] == nil { + return false + } + + return schemaNestedBlock.Block.BlockTypes[i].TypeName < schemaNestedBlock.Block.BlockTypes[j].TypeName + }) + + return schemaNestedBlock, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/config.go new file mode 100644 index 0000000000..db8ae26c81 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/config.go @@ -0,0 +1,25 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// Config returns the *tfprotov5.DynamicValue for a *tfsdk.Config. +func Config(ctx context.Context, fw *tfsdk.Config) (*tfprotov5.DynamicValue, diag.Diagnostics) { + if fw == nil { + return nil, nil + } + + data := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionConfiguration, + Schema: fw.Schema, + TerraformValue: fw.Raw, + } + + return DynamicValue(ctx, data) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/configureprovider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/configureprovider.go new file mode 100644 index 0000000000..1c8b8cb657 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/configureprovider.go @@ -0,0 +1,22 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ConfigureProviderResponse returns the *tfprotov5.ConfigureProviderResponse +// equivalent of a *fwserver.ConfigureProviderResponse. +func ConfigureProviderResponse(ctx context.Context, fw *provider.ConfigureResponse) *tfprotov5.ConfigureProviderResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.ConfigureProviderResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/diagnostics.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/diagnostics.go new file mode 100644 index 0000000000..c3e0ab6f09 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/diagnostics.go @@ -0,0 +1,48 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/totftypes" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// DiagnosticSeverity converts diag.Severity into tfprotov5.DiagnosticSeverity. +func DiagnosticSeverity(s diag.Severity) tfprotov5.DiagnosticSeverity { + switch s { + case diag.SeverityError: + return tfprotov5.DiagnosticSeverityError + case diag.SeverityWarning: + return tfprotov5.DiagnosticSeverityWarning + default: + return tfprotov5.DiagnosticSeverityInvalid + } +} + +// Diagnostics converts the diagnostics into the tfprotov5 collection type. +func Diagnostics(ctx context.Context, diagnostics diag.Diagnostics) []*tfprotov5.Diagnostic { + var results []*tfprotov5.Diagnostic + + for _, diagnostic := range diagnostics { + tfprotov5Diagnostic := &tfprotov5.Diagnostic{ + Detail: diagnostic.Detail(), + Severity: DiagnosticSeverity(diagnostic.Severity()), + Summary: diagnostic.Summary(), + } + + if diagWithPath, ok := diagnostic.(diag.DiagnosticWithPath); ok { + var diags diag.Diagnostics + + tfprotov5Diagnostic.Attribute, diags = totftypes.AttributePath(ctx, diagWithPath.Path()) + + if diags.HasError() { + results = append(results, Diagnostics(ctx, diags)...) + } + } + + results = append(results, tfprotov5Diagnostic) + } + + return results +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/doc.go new file mode 100644 index 0000000000..1eff714db2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/doc.go @@ -0,0 +1,3 @@ +// Package toproto5 contains functions to convert from framework types to +// protocol version 5 (tfprotov5) types. +package toproto5 diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/dynamic_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/dynamic_value.go new file mode 100644 index 0000000000..18a0e1022d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/dynamic_value.go @@ -0,0 +1,44 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// DynamicValue returns the *tfprotov5.DynamicValue for a given +// fwschemadata.Data. +// +// If necessary, the underlying data is modified to convert list and set block +// values from a null collection to an empty collection. This is to prevent +// developers from needing to understand Terraform's differences between +// block and attribute values where blocks are technically never null, but from +// a developer perspective this distinction introduces unnecessary complexity. +func DynamicValue(ctx context.Context, data *fwschemadata.Data) (*tfprotov5.DynamicValue, diag.Diagnostics) { + if data == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Prevent Terraform core errors for null list/set blocks. + diags.Append(data.ReifyNullCollectionBlocks(ctx)...) + + proto5, err := tfprotov5.NewDynamicValue(data.Schema.Type().TerraformType(ctx), data.TerraformValue) + + if err != nil { + diags.AddError( + "Unable to Convert "+data.Description.Title(), + "An unexpected error was encountered when converting the "+data.Description.String()+" to the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Unable to create DynamicValue: "+err.Error(), + ) + + return nil, diags + } + + return &proto5, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/getproviderschema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/getproviderschema.go new file mode 100644 index 0000000000..d808225e78 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/getproviderschema.go @@ -0,0 +1,79 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// GetProviderSchemaResponse returns the *tfprotov5.GetProviderSchemaResponse +// equivalent of a *fwserver.GetProviderSchemaResponse. +func GetProviderSchemaResponse(ctx context.Context, fw *fwserver.GetProviderSchemaResponse) *tfprotov5.GetProviderSchemaResponse { + if fw == nil { + return nil + } + + protov5 := &tfprotov5.GetProviderSchemaResponse{ + DataSourceSchemas: map[string]*tfprotov5.Schema{}, + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + ResourceSchemas: map[string]*tfprotov5.Schema{}, + ServerCapabilities: ServerCapabilities(ctx, fw.ServerCapabilities), + } + + var err error + + protov5.Provider, err = Schema(ctx, fw.Provider) + + if err != nil { + protov5.Diagnostics = append(protov5.Diagnostics, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Error converting provider schema", + Detail: "The provider schema couldn't be converted into a usable type. This is always a problem with the provider. Please report the following to the provider developer:\n\n" + err.Error(), + }) + + return protov5 + } + + protov5.ProviderMeta, err = Schema(ctx, fw.ProviderMeta) + + if err != nil { + protov5.Diagnostics = append(protov5.Diagnostics, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Error converting provider_meta schema", + Detail: "The provider_meta schema couldn't be converted into a usable type. This is always a problem with the provider. Please report the following to the provider developer:\n\n" + err.Error(), + }) + + return protov5 + } + + for dataSourceType, dataSourceSchema := range fw.DataSourceSchemas { + protov5.DataSourceSchemas[dataSourceType], err = Schema(ctx, dataSourceSchema) + + if err != nil { + protov5.Diagnostics = append(protov5.Diagnostics, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Error converting data source schema", + Detail: "The schema for the data source \"" + dataSourceType + "\" couldn't be converted into a usable type. This is always a problem with the provider. Please report the following to the provider developer:\n\n" + err.Error(), + }) + + return protov5 + } + } + + for resourceType, resourceSchema := range fw.ResourceSchemas { + protov5.ResourceSchemas[resourceType], err = Schema(ctx, resourceSchema) + + if err != nil { + protov5.Diagnostics = append(protov5.Diagnostics, &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Error converting resource schema", + Detail: "The schema for the resource \"" + resourceType + "\" couldn't be converted into a usable type. This is always a problem with the provider. Please report the following to the provider developer:\n\n" + err.Error(), + }) + + return protov5 + } + } + + return protov5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/importedresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/importedresource.go new file mode 100644 index 0000000000..fb9085cd86 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/importedresource.go @@ -0,0 +1,33 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" +) + +// ImportedResource returns the *tfprotov5.ImportedResource equivalent of a +// *fwserver.ImportedResource. +func ImportedResource(ctx context.Context, fw *fwserver.ImportedResource) (*tfprotov5.ImportedResource, diag.Diagnostics) { + if fw == nil { + return nil, nil + } + + proto5 := &tfprotov5.ImportedResource{ + TypeName: fw.TypeName, + } + + state, diags := State(ctx, &fw.State) + + proto5.State = state + + newPrivate, privateDiags := fw.Private.Bytes(ctx) + + diags = append(diags, privateDiags...) + proto5.Private = newPrivate + + return proto5, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/importresourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/importresourcestate.go new file mode 100644 index 0000000000..bfaa9303dd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/importresourcestate.go @@ -0,0 +1,34 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ImportResourceStateResponse returns the *tfprotov5.ImportResourceStateResponse +// equivalent of a *fwserver.ImportResourceStateResponse. +func ImportResourceStateResponse(ctx context.Context, fw *fwserver.ImportResourceStateResponse) *tfprotov5.ImportResourceStateResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.ImportResourceStateResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + for _, fwImportedResource := range fw.ImportedResources { + proto5ImportedResource, diags := ImportedResource(ctx, &fwImportedResource) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + + if diags.HasError() { + continue + } + + proto5.ImportedResources = append(proto5.ImportedResources, proto5ImportedResource) + } + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/planresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/planresourcechange.go new file mode 100644 index 0000000000..20a116980f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/planresourcechange.go @@ -0,0 +1,39 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/totftypes" +) + +// PlanResourceChangeResponse returns the *tfprotov5.PlanResourceChangeResponse +// equivalent of a *fwserver.PlanResourceChangeResponse. +func PlanResourceChangeResponse(ctx context.Context, fw *fwserver.PlanResourceChangeResponse) *tfprotov5.PlanResourceChangeResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.PlanResourceChangeResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + plannedState, diags := State(ctx, fw.PlannedState) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.PlannedState = plannedState + + requiresReplace, diags := totftypes.AttributePaths(ctx, fw.RequiresReplace) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.RequiresReplace = requiresReplace + + plannedPrivate, diags := fw.PlannedPrivate.Bytes(ctx) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.PlannedPrivate = plannedPrivate + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/prepareproviderconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/prepareproviderconfig.go new file mode 100644 index 0000000000..0fd98bd187 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/prepareproviderconfig.go @@ -0,0 +1,27 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// PrepareProviderConfigResponse returns the *tfprotov5.PrepareProviderConfigResponse +// equivalent of a *fwserver.ValidateProviderConfigResponse. +func PrepareProviderConfigResponse(ctx context.Context, fw *fwserver.ValidateProviderConfigResponse) *tfprotov5.PrepareProviderConfigResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.PrepareProviderConfigResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + preparedConfig, diags := Config(ctx, fw.PreparedConfig) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.PreparedConfig = preparedConfig + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/readdatasource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/readdatasource.go new file mode 100644 index 0000000000..b08f2a2125 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/readdatasource.go @@ -0,0 +1,27 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ReadDataSourceResponse returns the *tfprotov5.ReadDataSourceResponse +// equivalent of a *fwserver.ReadDataSourceResponse. +func ReadDataSourceResponse(ctx context.Context, fw *fwserver.ReadDataSourceResponse) *tfprotov5.ReadDataSourceResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.ReadDataSourceResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + state, diags := State(ctx, fw.State) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.State = state + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/readresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/readresource.go new file mode 100644 index 0000000000..7a8ac372cb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/readresource.go @@ -0,0 +1,33 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" +) + +// ReadResourceResponse returns the *tfprotov5.ReadResourceResponse +// equivalent of a *fwserver.ReadResourceResponse. +func ReadResourceResponse(ctx context.Context, fw *fwserver.ReadResourceResponse) *tfprotov5.ReadResourceResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.ReadResourceResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + newState, diags := State(ctx, fw.NewState) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.NewState = newState + + newPrivate, diags := fw.Private.Bytes(ctx) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.Private = newPrivate + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/schema.go new file mode 100644 index 0000000000..1ca1530f62 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/schema.go @@ -0,0 +1,88 @@ +package toproto5 + +import ( + "context" + "sort" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Schema returns the *tfprotov5.Schema equivalent of a Schema. +func Schema(ctx context.Context, s fwschema.Schema) (*tfprotov5.Schema, error) { + if s == nil { + return nil, nil + } + + result := &tfprotov5.Schema{ + Version: s.GetVersion(), + } + + var attrs []*tfprotov5.SchemaAttribute + var blocks []*tfprotov5.SchemaNestedBlock + + for name, attr := range s.GetAttributes() { + a, err := SchemaAttribute(ctx, name, tftypes.NewAttributePath().WithAttributeName(name), attr) + + if err != nil { + return nil, err + } + + attrs = append(attrs, a) + } + + for name, block := range s.GetBlocks() { + proto5, err := Block(ctx, name, tftypes.NewAttributePath().WithAttributeName(name), block) + + if err != nil { + return nil, err + } + + blocks = append(blocks, proto5) + } + + sort.Slice(attrs, func(i, j int) bool { + if attrs[i] == nil { + return true + } + + if attrs[j] == nil { + return false + } + + return attrs[i].Name < attrs[j].Name + }) + + sort.Slice(blocks, func(i, j int) bool { + if blocks[i] == nil { + return true + } + + if blocks[j] == nil { + return false + } + + return blocks[i].TypeName < blocks[j].TypeName + }) + + result.Block = &tfprotov5.SchemaBlock{ + // core doesn't do anything with version, as far as I can tell, + // so let's not set it. + Attributes: attrs, + BlockTypes: blocks, + Deprecated: s.GetDeprecationMessage() != "", + } + + if s.GetDescription() != "" { + result.Block.Description = s.GetDescription() + result.Block.DescriptionKind = tfprotov5.StringKindPlain + } + + if s.GetMarkdownDescription() != "" { + result.Block.Description = s.GetMarkdownDescription() + result.Block.DescriptionKind = tfprotov5.StringKindMarkdown + } + + return result, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/schema_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/schema_attribute.go new file mode 100644 index 0000000000..a409229970 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/schema_attribute.go @@ -0,0 +1,51 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// SchemaAttribute returns the *tfprotov5.SchemaAttribute equivalent of an +// Attribute. Errors will be tftypes.AttributePathErrors based on `path`. +// `name` is the name of the attribute. +func SchemaAttribute(ctx context.Context, name string, path *tftypes.AttributePath, a fwschema.Attribute) (*tfprotov5.SchemaAttribute, error) { + if _, ok := a.(fwschema.NestedAttribute); ok { + return nil, path.NewErrorf("protocol version 5 cannot have Attributes set") + } + + if a.GetType() == nil { + return nil, path.NewErrorf("must have Type set") + } + + if !a.IsRequired() && !a.IsOptional() && !a.IsComputed() { + return nil, path.NewErrorf("must have Required, Optional, or Computed set") + } + + schemaAttribute := &tfprotov5.SchemaAttribute{ + Name: name, + Required: a.IsRequired(), + Optional: a.IsOptional(), + Computed: a.IsComputed(), + Sensitive: a.IsSensitive(), + Type: a.GetType().TerraformType(ctx), + } + + if a.GetDeprecationMessage() != "" { + schemaAttribute.Deprecated = true + } + + if a.GetDescription() != "" { + schemaAttribute.Description = a.GetDescription() + schemaAttribute.DescriptionKind = tfprotov5.StringKindPlain + } + + if a.GetMarkdownDescription() != "" { + schemaAttribute.Description = a.GetMarkdownDescription() + schemaAttribute.DescriptionKind = tfprotov5.StringKindMarkdown + } + + return schemaAttribute, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/server_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/server_capabilities.go new file mode 100644 index 0000000000..f7b8e9f368 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/server_capabilities.go @@ -0,0 +1,20 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ServerCapabilities returns the *tfprotov5.ServerCapabilities for a +// *fwserver.ServerCapabilities. +func ServerCapabilities(ctx context.Context, fw *fwserver.ServerCapabilities) *tfprotov5.ServerCapabilities { + if fw == nil { + return nil + } + + return &tfprotov5.ServerCapabilities{ + PlanDestroy: fw.PlanDestroy, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/state.go new file mode 100644 index 0000000000..639a693f03 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/state.go @@ -0,0 +1,25 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// State returns the *tfprotov5.DynamicValue for a *tfsdk.State. +func State(ctx context.Context, fw *tfsdk.State) (*tfprotov5.DynamicValue, diag.Diagnostics) { + if fw == nil { + return nil, nil + } + + data := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionState, + Schema: fw.Schema, + TerraformValue: fw.Raw, + } + + return DynamicValue(ctx, data) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/upgraderesourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/upgraderesourcestate.go new file mode 100644 index 0000000000..a27b9ad789 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/upgraderesourcestate.go @@ -0,0 +1,27 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// UpgradeResourceStateResponse returns the *tfprotov5.UpgradeResourceStateResponse +// equivalent of a *fwserver.UpgradeResourceStateResponse. +func UpgradeResourceStateResponse(ctx context.Context, fw *fwserver.UpgradeResourceStateResponse) *tfprotov5.UpgradeResourceStateResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.UpgradeResourceStateResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + upgradedState, diags := State(ctx, fw.UpgradedState) + + proto5.Diagnostics = append(proto5.Diagnostics, Diagnostics(ctx, diags)...) + proto5.UpgradedState = upgradedState + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/validatedatasourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/validatedatasourceconfig.go new file mode 100644 index 0000000000..333653c5d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/validatedatasourceconfig.go @@ -0,0 +1,22 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ValidateDataSourceConfigResponse returns the *tfprotov5.ValidateDataSourceConfigResponse +// equivalent of a *fwserver.ValidateDataSourceConfigResponse. +func ValidateDataSourceConfigResponse(ctx context.Context, fw *fwserver.ValidateDataSourceConfigResponse) *tfprotov5.ValidateDataSourceConfigResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.ValidateDataSourceConfigResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/validateresourcetypeconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/validateresourcetypeconfig.go new file mode 100644 index 0000000000..174d61b511 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto5/validateresourcetypeconfig.go @@ -0,0 +1,22 @@ +package toproto5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" +) + +// ValidateResourceTypeConfigResponse returns the *tfprotov5.ValidateResourceTypeConfigResponse +// equivalent of a *fwserver.ValidateResourceConfigResponse. +func ValidateResourceTypeConfigResponse(ctx context.Context, fw *fwserver.ValidateResourceConfigResponse) *tfprotov5.ValidateResourceTypeConfigResponse { + if fw == nil { + return nil + } + + proto5 := &tfprotov5.ValidateResourceTypeConfigResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + return proto5 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/applyresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/applyresourcechange.go new file mode 100644 index 0000000000..c47c58f5ba --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/applyresourcechange.go @@ -0,0 +1,33 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" +) + +// ApplyResourceChangeResponse returns the *tfprotov6.ApplyResourceChangeResponse +// equivalent of a *fwserver.ApplyResourceChangeResponse. +func ApplyResourceChangeResponse(ctx context.Context, fw *fwserver.ApplyResourceChangeResponse) *tfprotov6.ApplyResourceChangeResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.ApplyResourceChangeResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + newState, diags := State(ctx, fw.NewState) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.NewState = newState + + newPrivate, diags := fw.Private.Bytes(ctx) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.Private = newPrivate + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/block.go new file mode 100644 index 0000000000..3da25a2007 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/block.go @@ -0,0 +1,94 @@ +package toproto6 + +import ( + "context" + "sort" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Block returns the *tfprotov6.SchemaNestedBlock equivalent of a Block. +// Errors will be tftypes.AttributePathErrors based on `path`. `name` is the +// name of the attribute. +func Block(ctx context.Context, name string, path *tftypes.AttributePath, b fwschema.Block) (*tfprotov6.SchemaNestedBlock, error) { + schemaNestedBlock := &tfprotov6.SchemaNestedBlock{ + Block: &tfprotov6.SchemaBlock{ + Deprecated: b.GetDeprecationMessage() != "", + }, + TypeName: name, + } + + if b.GetDescription() != "" { + schemaNestedBlock.Block.Description = b.GetDescription() + schemaNestedBlock.Block.DescriptionKind = tfprotov6.StringKindPlain + } + + if b.GetMarkdownDescription() != "" { + schemaNestedBlock.Block.Description = b.GetMarkdownDescription() + schemaNestedBlock.Block.DescriptionKind = tfprotov6.StringKindMarkdown + } + + nm := b.GetNestingMode() + switch nm { + case fwschema.BlockNestingModeList: + schemaNestedBlock.Nesting = tfprotov6.SchemaNestedBlockNestingModeList + case fwschema.BlockNestingModeSet: + schemaNestedBlock.Nesting = tfprotov6.SchemaNestedBlockNestingModeSet + case fwschema.BlockNestingModeSingle: + schemaNestedBlock.Nesting = tfprotov6.SchemaNestedBlockNestingModeSingle + default: + return nil, path.NewErrorf("unrecognized nesting mode %v", nm) + } + + nestedBlockObject := b.GetNestedObject() + + for attrName, attr := range nestedBlockObject.GetAttributes() { + attrPath := path.WithAttributeName(attrName) + attrProto6, err := SchemaAttribute(ctx, attrName, attrPath, attr) + + if err != nil { + return nil, err + } + + schemaNestedBlock.Block.Attributes = append(schemaNestedBlock.Block.Attributes, attrProto6) + } + + for blockName, block := range nestedBlockObject.GetBlocks() { + blockPath := path.WithAttributeName(blockName) + blockProto6, err := Block(ctx, blockName, blockPath, block) + + if err != nil { + return nil, err + } + + schemaNestedBlock.Block.BlockTypes = append(schemaNestedBlock.Block.BlockTypes, blockProto6) + } + + sort.Slice(schemaNestedBlock.Block.Attributes, func(i, j int) bool { + if schemaNestedBlock.Block.Attributes[i] == nil { + return true + } + + if schemaNestedBlock.Block.Attributes[j] == nil { + return false + } + + return schemaNestedBlock.Block.Attributes[i].Name < schemaNestedBlock.Block.Attributes[j].Name + }) + + sort.Slice(schemaNestedBlock.Block.BlockTypes, func(i, j int) bool { + if schemaNestedBlock.Block.BlockTypes[i] == nil { + return true + } + + if schemaNestedBlock.Block.BlockTypes[j] == nil { + return false + } + + return schemaNestedBlock.Block.BlockTypes[i].TypeName < schemaNestedBlock.Block.BlockTypes[j].TypeName + }) + + return schemaNestedBlock, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/config.go new file mode 100644 index 0000000000..313656f9ca --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/config.go @@ -0,0 +1,25 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// Config returns the *tfprotov6.DynamicValue for a *tfsdk.Config. +func Config(ctx context.Context, fw *tfsdk.Config) (*tfprotov6.DynamicValue, diag.Diagnostics) { + if fw == nil { + return nil, nil + } + + data := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionConfiguration, + Schema: fw.Schema, + TerraformValue: fw.Raw, + } + + return DynamicValue(ctx, data) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/configureprovider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/configureprovider.go new file mode 100644 index 0000000000..66c2a35555 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/configureprovider.go @@ -0,0 +1,22 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ConfigureProviderResponse returns the *tfprotov6.ConfigureProviderResponse +// equivalent of a *fwserver.ConfigureProviderResponse. +func ConfigureProviderResponse(ctx context.Context, fw *provider.ConfigureResponse) *tfprotov6.ConfigureProviderResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.ConfigureProviderResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/diagnostics.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/diagnostics.go new file mode 100644 index 0000000000..d13b94d749 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/diagnostics.go @@ -0,0 +1,48 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/totftypes" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// DiagnosticSeverity converts diag.Severity into tfprotov6.DiagnosticSeverity. +func DiagnosticSeverity(s diag.Severity) tfprotov6.DiagnosticSeverity { + switch s { + case diag.SeverityError: + return tfprotov6.DiagnosticSeverityError + case diag.SeverityWarning: + return tfprotov6.DiagnosticSeverityWarning + default: + return tfprotov6.DiagnosticSeverityInvalid + } +} + +// Diagnostics converts the diagnostics into the tfprotov6 collection type. +func Diagnostics(ctx context.Context, diagnostics diag.Diagnostics) []*tfprotov6.Diagnostic { + var results []*tfprotov6.Diagnostic + + for _, diagnostic := range diagnostics { + tfprotov6Diagnostic := &tfprotov6.Diagnostic{ + Detail: diagnostic.Detail(), + Severity: DiagnosticSeverity(diagnostic.Severity()), + Summary: diagnostic.Summary(), + } + + if diagWithPath, ok := diagnostic.(diag.DiagnosticWithPath); ok { + var diags diag.Diagnostics + + tfprotov6Diagnostic.Attribute, diags = totftypes.AttributePath(ctx, diagWithPath.Path()) + + if diags.HasError() { + results = append(results, Diagnostics(ctx, diags)...) + } + } + + results = append(results, tfprotov6Diagnostic) + } + + return results +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/doc.go new file mode 100644 index 0000000000..512ea57597 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/doc.go @@ -0,0 +1,3 @@ +// Package toproto6 contains functions to convert from framework types to +// protocol version 6 (tfprotov6) types. +package toproto6 diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/dynamic_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/dynamic_value.go new file mode 100644 index 0000000000..83946ea23a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/dynamic_value.go @@ -0,0 +1,44 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// DynamicValue returns the *tfprotov6.DynamicValue for a given +// fwschemadata.Data. +// +// If necessary, the underlying data is modified to convert list and set block +// values from a null collection to an empty collection. This is to prevent +// developers from needing to understand Terraform's differences between +// block and attribute values where blocks are technically never null, but from +// a developer perspective this distinction introduces unnecessary complexity. +func DynamicValue(ctx context.Context, data *fwschemadata.Data) (*tfprotov6.DynamicValue, diag.Diagnostics) { + if data == nil { + return nil, nil + } + + var diags diag.Diagnostics + + // Prevent Terraform core errors for null list/set blocks. + diags.Append(data.ReifyNullCollectionBlocks(ctx)...) + + proto6, err := tfprotov6.NewDynamicValue(data.Schema.Type().TerraformType(ctx), data.TerraformValue) + + if err != nil { + diags.AddError( + "Unable to Convert "+data.Description.Title(), + "An unexpected error was encountered when converting the "+data.Description.String()+" to the protocol type. "+ + "This is always an issue in terraform-plugin-framework used to implement the provider and should be reported to the provider developers.\n\n"+ + "Please report this to the provider developer:\n\n"+ + "Unable to create DynamicValue: "+err.Error(), + ) + + return nil, diags + } + + return &proto6, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/getproviderschema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/getproviderschema.go new file mode 100644 index 0000000000..174151656c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/getproviderschema.go @@ -0,0 +1,79 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// GetProviderSchemaResponse returns the *tfprotov6.GetProviderSchemaResponse +// equivalent of a *fwserver.GetProviderSchemaResponse. +func GetProviderSchemaResponse(ctx context.Context, fw *fwserver.GetProviderSchemaResponse) *tfprotov6.GetProviderSchemaResponse { + if fw == nil { + return nil + } + + protov6 := &tfprotov6.GetProviderSchemaResponse{ + DataSourceSchemas: map[string]*tfprotov6.Schema{}, + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + ResourceSchemas: map[string]*tfprotov6.Schema{}, + ServerCapabilities: ServerCapabilities(ctx, fw.ServerCapabilities), + } + + var err error + + protov6.Provider, err = Schema(ctx, fw.Provider) + + if err != nil { + protov6.Diagnostics = append(protov6.Diagnostics, &tfprotov6.Diagnostic{ + Severity: tfprotov6.DiagnosticSeverityError, + Summary: "Error converting provider schema", + Detail: "The provider schema couldn't be converted into a usable type. This is always a problem with the provider. Please report the following to the provider developer:\n\n" + err.Error(), + }) + + return protov6 + } + + protov6.ProviderMeta, err = Schema(ctx, fw.ProviderMeta) + + if err != nil { + protov6.Diagnostics = append(protov6.Diagnostics, &tfprotov6.Diagnostic{ + Severity: tfprotov6.DiagnosticSeverityError, + Summary: "Error converting provider_meta schema", + Detail: "The provider_meta schema couldn't be converted into a usable type. This is always a problem with the provider. Please report the following to the provider developer:\n\n" + err.Error(), + }) + + return protov6 + } + + for dataSourceType, dataSourceSchema := range fw.DataSourceSchemas { + protov6.DataSourceSchemas[dataSourceType], err = Schema(ctx, dataSourceSchema) + + if err != nil { + protov6.Diagnostics = append(protov6.Diagnostics, &tfprotov6.Diagnostic{ + Severity: tfprotov6.DiagnosticSeverityError, + Summary: "Error converting data source schema", + Detail: "The schema for the data source \"" + dataSourceType + "\" couldn't be converted into a usable type. This is always a problem with the provider. Please report the following to the provider developer:\n\n" + err.Error(), + }) + + return protov6 + } + } + + for resourceType, resourceSchema := range fw.ResourceSchemas { + protov6.ResourceSchemas[resourceType], err = Schema(ctx, resourceSchema) + + if err != nil { + protov6.Diagnostics = append(protov6.Diagnostics, &tfprotov6.Diagnostic{ + Severity: tfprotov6.DiagnosticSeverityError, + Summary: "Error converting resource schema", + Detail: "The schema for the resource \"" + resourceType + "\" couldn't be converted into a usable type. This is always a problem with the provider. Please report the following to the provider developer:\n\n" + err.Error(), + }) + + return protov6 + } + } + + return protov6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/importedresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/importedresource.go new file mode 100644 index 0000000000..6ddffbd280 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/importedresource.go @@ -0,0 +1,33 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" +) + +// ImportedResource returns the *tfprotov6.ImportedResource equivalent of a +// *fwserver.ImportedResource. +func ImportedResource(ctx context.Context, fw *fwserver.ImportedResource) (*tfprotov6.ImportedResource, diag.Diagnostics) { + if fw == nil { + return nil, nil + } + + proto6 := &tfprotov6.ImportedResource{ + TypeName: fw.TypeName, + } + + state, diags := State(ctx, &fw.State) + + proto6.State = state + + newPrivate, privateDiags := fw.Private.Bytes(ctx) + + diags = append(diags, privateDiags...) + proto6.Private = newPrivate + + return proto6, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/importresourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/importresourcestate.go new file mode 100644 index 0000000000..2e5294010c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/importresourcestate.go @@ -0,0 +1,34 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ImportResourceStateResponse returns the *tfprotov6.ImportResourceStateResponse +// equivalent of a *fwserver.ImportResourceStateResponse. +func ImportResourceStateResponse(ctx context.Context, fw *fwserver.ImportResourceStateResponse) *tfprotov6.ImportResourceStateResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.ImportResourceStateResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + for _, fwImportedResource := range fw.ImportedResources { + proto6ImportedResource, diags := ImportedResource(ctx, &fwImportedResource) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + + if diags.HasError() { + continue + } + + proto6.ImportedResources = append(proto6.ImportedResources, proto6ImportedResource) + } + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/planresourcechange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/planresourcechange.go new file mode 100644 index 0000000000..908abccf09 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/planresourcechange.go @@ -0,0 +1,39 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/totftypes" +) + +// PlanResourceChangeResponse returns the *tfprotov6.PlanResourceChangeResponse +// equivalent of a *fwserver.PlanResourceChangeResponse. +func PlanResourceChangeResponse(ctx context.Context, fw *fwserver.PlanResourceChangeResponse) *tfprotov6.PlanResourceChangeResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.PlanResourceChangeResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + plannedState, diags := State(ctx, fw.PlannedState) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.PlannedState = plannedState + + requiresReplace, diags := totftypes.AttributePaths(ctx, fw.RequiresReplace) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.RequiresReplace = requiresReplace + + plannedPrivate, diags := fw.PlannedPrivate.Bytes(ctx) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.PlannedPrivate = plannedPrivate + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/readdatasource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/readdatasource.go new file mode 100644 index 0000000000..7c1c70f6c1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/readdatasource.go @@ -0,0 +1,27 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ReadDataSourceResponse returns the *tfprotov6.ReadDataSourceResponse +// equivalent of a *fwserver.ReadDataSourceResponse. +func ReadDataSourceResponse(ctx context.Context, fw *fwserver.ReadDataSourceResponse) *tfprotov6.ReadDataSourceResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.ReadDataSourceResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + state, diags := State(ctx, fw.State) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.State = state + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/readresource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/readresource.go new file mode 100644 index 0000000000..5e983d45c7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/readresource.go @@ -0,0 +1,33 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" +) + +// ReadResourceResponse returns the *tfprotov6.ReadResourceResponse +// equivalent of a *fwserver.ReadResourceResponse. +func ReadResourceResponse(ctx context.Context, fw *fwserver.ReadResourceResponse) *tfprotov6.ReadResourceResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.ReadResourceResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + newState, diags := State(ctx, fw.NewState) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.NewState = newState + + newPrivate, diags := fw.Private.Bytes(ctx) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.Private = newPrivate + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/schema.go new file mode 100644 index 0000000000..da1ce1e73e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/schema.go @@ -0,0 +1,88 @@ +package toproto6 + +import ( + "context" + "sort" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Schema returns the *tfprotov6.Schema equivalent of a Schema. +func Schema(ctx context.Context, s fwschema.Schema) (*tfprotov6.Schema, error) { + if s == nil { + return nil, nil + } + + result := &tfprotov6.Schema{ + Version: s.GetVersion(), + } + + var attrs []*tfprotov6.SchemaAttribute + var blocks []*tfprotov6.SchemaNestedBlock + + for name, attr := range s.GetAttributes() { + a, err := SchemaAttribute(ctx, name, tftypes.NewAttributePath().WithAttributeName(name), attr) + + if err != nil { + return nil, err + } + + attrs = append(attrs, a) + } + + for name, block := range s.GetBlocks() { + proto6, err := Block(ctx, name, tftypes.NewAttributePath().WithAttributeName(name), block) + + if err != nil { + return nil, err + } + + blocks = append(blocks, proto6) + } + + sort.Slice(attrs, func(i, j int) bool { + if attrs[i] == nil { + return true + } + + if attrs[j] == nil { + return false + } + + return attrs[i].Name < attrs[j].Name + }) + + sort.Slice(blocks, func(i, j int) bool { + if blocks[i] == nil { + return true + } + + if blocks[j] == nil { + return false + } + + return blocks[i].TypeName < blocks[j].TypeName + }) + + result.Block = &tfprotov6.SchemaBlock{ + // core doesn't do anything with version, as far as I can tell, + // so let's not set it. + Attributes: attrs, + BlockTypes: blocks, + Deprecated: s.GetDeprecationMessage() != "", + } + + if s.GetDescription() != "" { + result.Block.Description = s.GetDescription() + result.Block.DescriptionKind = tfprotov6.StringKindPlain + } + + if s.GetMarkdownDescription() != "" { + result.Block.Description = s.GetMarkdownDescription() + result.Block.DescriptionKind = tfprotov6.StringKindMarkdown + } + + return result, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/schema_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/schema_attribute.go new file mode 100644 index 0000000000..f7d25b6318 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/schema_attribute.go @@ -0,0 +1,90 @@ +package toproto6 + +import ( + "context" + "sort" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// SchemaAttribute returns the *tfprotov6.SchemaAttribute equivalent of an +// Attribute. Errors will be tftypes.AttributePathErrors based on `path`. +// `name` is the name of the attribute. +func SchemaAttribute(ctx context.Context, name string, path *tftypes.AttributePath, a fwschema.Attribute) (*tfprotov6.SchemaAttribute, error) { + if !a.IsRequired() && !a.IsOptional() && !a.IsComputed() { + return nil, path.NewErrorf("must have Required, Optional, or Computed set") + } + + schemaAttribute := &tfprotov6.SchemaAttribute{ + Name: name, + Required: a.IsRequired(), + Optional: a.IsOptional(), + Computed: a.IsComputed(), + Sensitive: a.IsSensitive(), + Type: a.GetType().TerraformType(ctx), + } + + if a.GetDeprecationMessage() != "" { + schemaAttribute.Deprecated = true + } + + if a.GetDescription() != "" { + schemaAttribute.Description = a.GetDescription() + schemaAttribute.DescriptionKind = tfprotov6.StringKindPlain + } + + if a.GetMarkdownDescription() != "" { + schemaAttribute.Description = a.GetMarkdownDescription() + schemaAttribute.DescriptionKind = tfprotov6.StringKindMarkdown + } + + nestedAttribute, ok := a.(fwschema.NestedAttribute) + + if !ok { + return schemaAttribute, nil + } + + object := &tfprotov6.SchemaObject{} + nm := nestedAttribute.GetNestingMode() + switch nm { + case fwschema.NestingModeSingle: + object.Nesting = tfprotov6.SchemaObjectNestingModeSingle + case fwschema.NestingModeList: + object.Nesting = tfprotov6.SchemaObjectNestingModeList + case fwschema.NestingModeSet: + object.Nesting = tfprotov6.SchemaObjectNestingModeSet + case fwschema.NestingModeMap: + object.Nesting = tfprotov6.SchemaObjectNestingModeMap + default: + return nil, path.NewErrorf("unrecognized nesting mode %v", nm) + } + + for nestedName, nestedA := range nestedAttribute.GetNestedObject().GetAttributes() { + nestedSchemaAttribute, err := SchemaAttribute(ctx, nestedName, path.WithAttributeName(nestedName), nestedA) + + if err != nil { + return nil, err + } + + object.Attributes = append(object.Attributes, nestedSchemaAttribute) + } + + sort.Slice(object.Attributes, func(i, j int) bool { + if object.Attributes[i] == nil { + return true + } + + if object.Attributes[j] == nil { + return false + } + + return object.Attributes[i].Name < object.Attributes[j].Name + }) + + schemaAttribute.NestedType = object + schemaAttribute.Type = nil + + return schemaAttribute, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/server_capabilities.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/server_capabilities.go new file mode 100644 index 0000000000..8794b5f0c3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/server_capabilities.go @@ -0,0 +1,20 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ServerCapabilities returns the *tfprotov6.ServerCapabilities for a +// *fwserver.ServerCapabilities. +func ServerCapabilities(ctx context.Context, fw *fwserver.ServerCapabilities) *tfprotov6.ServerCapabilities { + if fw == nil { + return nil + } + + return &tfprotov6.ServerCapabilities{ + PlanDestroy: fw.PlanDestroy, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/state.go new file mode 100644 index 0000000000..d91aec53eb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/state.go @@ -0,0 +1,25 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// State returns the *tfprotov6.DynamicValue for a *tfsdk.State. +func State(ctx context.Context, fw *tfsdk.State) (*tfprotov6.DynamicValue, diag.Diagnostics) { + if fw == nil { + return nil, nil + } + + data := &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionState, + Schema: fw.Schema, + TerraformValue: fw.Raw, + } + + return DynamicValue(ctx, data) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/upgraderesourcestate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/upgraderesourcestate.go new file mode 100644 index 0000000000..829e8df50a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/upgraderesourcestate.go @@ -0,0 +1,27 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// UpgradeResourceStateResponse returns the *tfprotov6.UpgradeResourceStateResponse +// equivalent of a *fwserver.UpgradeResourceStateResponse. +func UpgradeResourceStateResponse(ctx context.Context, fw *fwserver.UpgradeResourceStateResponse) *tfprotov6.UpgradeResourceStateResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.UpgradeResourceStateResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + upgradedState, diags := State(ctx, fw.UpgradedState) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.UpgradedState = upgradedState + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validatedatasourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validatedatasourceconfig.go new file mode 100644 index 0000000000..c93641c2d7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validatedatasourceconfig.go @@ -0,0 +1,22 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateDataSourceConfigResponse returns the *tfprotov6.ValidateDataSourceConfigResponse +// equivalent of a *fwserver.ValidateDataSourceConfigResponse. +func ValidateDataSourceConfigResponse(ctx context.Context, fw *fwserver.ValidateDataSourceConfigResponse) *tfprotov6.ValidateDataResourceConfigResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.ValidateDataResourceConfigResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validateproviderconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validateproviderconfig.go new file mode 100644 index 0000000000..701b899025 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validateproviderconfig.go @@ -0,0 +1,27 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateProviderConfigResponse returns the *tfprotov6.ValidateProviderConfigResponse +// equivalent of a *fwserver.ValidateProviderConfigResponse. +func ValidateProviderConfigResponse(ctx context.Context, fw *fwserver.ValidateProviderConfigResponse) *tfprotov6.ValidateProviderConfigResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.ValidateProviderConfigResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + preparedConfig, diags := Config(ctx, fw.PreparedConfig) + + proto6.Diagnostics = append(proto6.Diagnostics, Diagnostics(ctx, diags)...) + proto6.PreparedConfig = preparedConfig + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validateresourceconfig.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validateresourceconfig.go new file mode 100644 index 0000000000..76117c8ba6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/toproto6/validateresourceconfig.go @@ -0,0 +1,22 @@ +package toproto6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// ValidateResourceConfigResponse returns the *tfprotov6.ValidateResourceConfigResponse +// equivalent of a *fwserver.ValidateResourceConfigResponse. +func ValidateResourceConfigResponse(ctx context.Context, fw *fwserver.ValidateResourceConfigResponse) *tfprotov6.ValidateResourceConfigResponse { + if fw == nil { + return nil + } + + proto6 := &tfprotov6.ValidateResourceConfigResponse{ + Diagnostics: Diagnostics(ctx, fw.Diagnostics), + } + + return proto6 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_path.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_path.go new file mode 100644 index 0000000000..5ef94d83d3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_path.go @@ -0,0 +1,40 @@ +package totftypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// AttributePath returns the *tftypes.AttributePath equivalent of a path.Path. +func AttributePath(ctx context.Context, fw path.Path) (*tftypes.AttributePath, diag.Diagnostics) { + var tfTypeSteps []tftypes.AttributePathStep + + for _, step := range fw.Steps() { + tfTypeStep, err := AttributePathStep(ctx, step) + + if err != nil { + return nil, diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Unable to Convert Attribute Path", + "An unexpected error occurred while trying to convert an attribute path. "+ + "This is either an error in terraform-plugin-framework or a custom attribute type used by the provider. "+ + "Please report the following to the provider developers.\n\n"+ + // Since this is an error with the attribute path + // conversion, we cannot return a protocol path-based + // diagnostic. Returning a framework human-readable + // representation seems like the next best thing to do. + fmt.Sprintf("Attribute Path: %s\n", fw.String())+ + fmt.Sprintf("Original Error: %s", err), + ), + } + } + + tfTypeSteps = append(tfTypeSteps, tfTypeStep) + } + + return tftypes.NewAttributePathWithSteps(tfTypeSteps), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_path_step.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_path_step.go new file mode 100644 index 0000000000..d360b43205 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_path_step.go @@ -0,0 +1,33 @@ +package totftypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// AttributePathStep returns the tftypes.AttributePathStep equivalent of an +// path.PathStep. An error is returned instead of diag.Diagnostics so callers +// can include appropriate logical context about when the error occurred. +func AttributePathStep(ctx context.Context, fw path.PathStep) (tftypes.AttributePathStep, error) { + switch fw := fw.(type) { + case path.PathStepAttributeName: + return tftypes.AttributeName(string(fw)), nil + case path.PathStepElementKeyInt: + return tftypes.ElementKeyInt(int64(fw)), nil + case path.PathStepElementKeyString: + return tftypes.ElementKeyString(string(fw)), nil + case path.PathStepElementKeyValue: + tfTypesValue, err := fw.Value.ToTerraformValue(ctx) + + if err != nil { + return nil, fmt.Errorf("unable to convert attr.Value (%s) to tftypes.Value: %w", fw.Value.String(), err) + } + + return tftypes.ElementKeyValue(tfTypesValue), nil + default: + return nil, fmt.Errorf("unknown path.PathStep: %#v", fw) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_paths.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_paths.go new file mode 100644 index 0000000000..19d5c1c9ee --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/attribute_paths.go @@ -0,0 +1,30 @@ +package totftypes + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// AttributePaths returns the []*tftypes.AttributePath equivalent of a path.Paths. +func AttributePaths(ctx context.Context, fw path.Paths) ([]*tftypes.AttributePath, diag.Diagnostics) { + if fw == nil { + return nil, nil + } + + result := make([]*tftypes.AttributePath, 0, len(fw)) + + for _, path := range fw { + tfType, diags := AttributePath(ctx, path) + + if diags.HasError() { + return result, diags + } + + result = append(result, tfType) + } + + return result, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/doc.go new file mode 100644 index 0000000000..011a662a36 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/internal/totftypes/doc.go @@ -0,0 +1,3 @@ +// Package totftypes contains functions to convert from framework types to +// terraform-plugin-go tftypes types. +package totftypes diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/config_validator.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/config_validator.go new file mode 100644 index 0000000000..3c30044768 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/config_validator.go @@ -0,0 +1,25 @@ +package provider + +import "context" + +// ConfigValidator describes reusable Provider configuration validation functionality. +type ConfigValidator interface { + // Description describes the validation in plain text formatting. + // + // This information may be automatically added to provider plain text + // descriptions by external tooling. + Description(context.Context) string + + // MarkdownDescription describes the validation in Markdown formatting. + // + // This information may be automatically added to provider Markdown + // descriptions by external tooling. + MarkdownDescription(context.Context) string + + // ValidateProvider performs the validation. + // + // This method name is separate from the ConfigValidator + // interface ValidateDataSource method name and ResourceConfigValidator + // interface ValidateResource method name to allow generic validators. + ValidateProvider(context.Context, ValidateConfigRequest, *ValidateConfigResponse) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/configure.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/configure.go new file mode 100644 index 0000000000..d489c6209e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/configure.go @@ -0,0 +1,45 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ConfigureRequest represents a request containing the values the user +// specified for the provider configuration block, along with other runtime +// information from Terraform or the Plugin SDK. An instance of this request +// struct is supplied as an argument to the provider's Configure function. +type ConfigureRequest struct { + // TerraformVersion is the version of Terraform executing the request. + // This is supplied for logging, analytics, and User-Agent purposes + // only. Providers should not try to gate provider behavior on + // Terraform versions. + TerraformVersion string + + // Config is the configuration the user supplied for the provider. This + // information should usually be persisted to the underlying type + // that's implementing the Provider interface, for use in later + // resource CRUD operations. + Config tfsdk.Config +} + +// ConfigureResponse represents a response to a +// ConfigureRequest. An instance of this response struct is supplied as +// an argument to the provider's Configure function, in which the provider +// should set values on the ConfigureResponse as appropriate. +type ConfigureResponse struct { + // DataSourceData is provider-defined data, clients, etc. that is passed + // to [datasource.ConfigureRequest.ProviderData] for each DataSource type + // that implements the Configure method. + DataSourceData any + + // Diagnostics report errors or warnings related to configuring the + // provider. An empty slice indicates success, with no warnings or + // errors generated. + Diagnostics diag.Diagnostics + + // ResourceData is provider-defined data, clients, etc. that is passed + // to [resource.ConfigureRequest.ProviderData] for each Resource type + // that implements the Configure method. + ResourceData any +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/doc.go new file mode 100644 index 0000000000..954f7afc76 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/doc.go @@ -0,0 +1,22 @@ +// Package provider contains all interfaces, request types, and response +// types for a provider implementation. +// +// In Terraform, a provider is a concept which enables provider developers +// to offer practitioners data sources and managed resources. Those concepts +// are described in more detail in their respective datasource and resource +// packages. +// +// Providers generally store any infrastructure clients or shared data that is +// applicable across data sources and managed resources. Providers are +// generally configured early in Terraform operations, such as plan and apply, +// before data source and managed resource logic is called. However, this early +// provider configuration is not guaranteed in the case there are unknown +// Terraform configuration values, so additional logic checks may be required +// throughout an implementation to handle this case. Providers may contain a +// schema representing the structure and data types of Terraform-based +// configuration. +// +// The main starting point for implementations in this package is the +// Provider type which represents an instance of a provider that has +// its own configuration. +package provider diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metadata.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metadata.go new file mode 100644 index 0000000000..cbab966f7b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metadata.go @@ -0,0 +1,21 @@ +package provider + +// MetadataRequest represents a request for the Provider to return its type +// name. An instance of this request struct is supplied as an argument to the +// Provider type Metadata method. +type MetadataRequest struct{} + +// MetadataResponse represents a response to a MetadataRequest. An +// instance of this response struct is supplied as an argument to the +// Provider type Metadata method. +type MetadataResponse struct { + // TypeName should be the provider type. For example, examplecloud, if + // the intended resource or data source types are examplecloud_thing, etc. + TypeName string + + // Version should be the provider version, such as 1.2.3. + // + // This is not connected to any framework functionality currently, but may + // be in the future. + Version string +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema.go new file mode 100644 index 0000000000..9ce9080fb9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema.go @@ -0,0 +1,24 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider/metaschema" +) + +// MetaSchemaRequest represents a request for the Provider to return its schema. +// An instance of this request struct is supplied as an argument to the +// Provider type Schema method. +type MetaSchemaRequest struct{} + +// MetaSchemaResponse represents a response to a MetaSchemaRequest. An instance of this +// response struct is supplied as an argument to the Provider type Schema +// method. +type MetaSchemaResponse struct { + // Schema is the meta schema of the provider. + Schema metaschema.Schema + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/attribute.go new file mode 100644 index 0000000000..ed2fe0399e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/attribute.go @@ -0,0 +1,33 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Attribute define a value field inside the Schema. Implementations in this +// package include: +// - BoolAttribute +// - Float64Attribute +// - Int64Attribute +// - ListAttribute +// - MapAttribute +// - NumberAttribute +// - ObjectAttribute +// - SetAttribute +// - StringAttribute +// +// Additionally, the NestedAttribute interface extends Attribute with nested +// attributes. Only supported in protocol version 6. Implementations in this +// package include: +// - ListNestedAttribute +// - MapNestedAttribute +// - SetNestedAttribute +// - SingleNestedAttribute +// +// In practitioner configurations, an equals sign (=) is required to set +// the value. [Configuration Reference] +// +// [Configuration Reference]: https://developer.hashicorp.com/terraform/language/syntax/configuration +type Attribute interface { + fwschema.Attribute +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/bool_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/bool_attribute.go new file mode 100644 index 0000000000..f3c45d452f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/bool_attribute.go @@ -0,0 +1,116 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = BoolAttribute{} +) + +// BoolAttribute represents a schema attribute that is a boolean. When +// retrieving the value for this attribute, use types.Bool as the value type +// unless the CustomType field is set. +// +// Terraform configurations configure this attribute using expressions that +// return a boolean or directly via the true/false keywords. +// +// example_attribute = true +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type BoolAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.BoolType. When retrieving data, the basetypes.BoolValuable + // associated with this custom type must be used in place of types.Bool. + CustomType basetypes.BoolTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a BoolAttribute. +func (a BoolAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a BoolAttribute +// and all fields are equal. +func (a BoolAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(BoolAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a BoolAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a BoolAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a BoolAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.StringType or the CustomType field value if defined. +func (a BoolAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.BoolType +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a BoolAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a BoolAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a BoolAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a BoolAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/doc.go new file mode 100644 index 0000000000..72d8f0c1a6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/doc.go @@ -0,0 +1,5 @@ +// Package metaschema contains all available meta schema functionality for +// providers. Provider meta schemas define the structure and value types for +// provider_meta configuration data. Meta schemas are implemented via the +// provider.ProviderWithMetaSchema type MetaSchema method. +package metaschema diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/float64_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/float64_attribute.go new file mode 100644 index 0000000000..77c556f7e7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/float64_attribute.go @@ -0,0 +1,119 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = Float64Attribute{} +) + +// Float64Attribute represents a schema attribute that is a 64-bit floating +// point number. When retrieving the value for this attribute, use +// types.Float64 as the value type unless the CustomType field is set. +// +// Use Int64Attribute for 64-bit integer attributes or NumberAttribute for +// 512-bit generic number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via a floating point value. +// +// example_attribute = 123.45 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type Float64Attribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.Float64Type. When retrieving data, the basetypes.Float64Valuable + // associated with this custom type must be used in place of types.Float64. + CustomType basetypes.Float64Typable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a Float64Attribute. +func (a Float64Attribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a Float64Attribute +// and all fields are equal. +func (a Float64Attribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(Float64Attribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a Float64Attribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a Float64Attribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a Float64Attribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.Float64Type or the CustomType field value if defined. +func (a Float64Attribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.Float64Type +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a Float64Attribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a Float64Attribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a Float64Attribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a Float64Attribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/int64_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/int64_attribute.go new file mode 100644 index 0000000000..2ad6216f7c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/int64_attribute.go @@ -0,0 +1,119 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = Int64Attribute{} +) + +// Int64Attribute represents a schema attribute that is a 64-bit integer. +// When retrieving the value for this attribute, use types.Int64 as the value +// type unless the CustomType field is set. +// +// Use Float64Attribute for 64-bit floating point number attributes or +// NumberAttribute for 512-bit generic number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via an integer value. +// +// example_attribute = 123 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type Int64Attribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.Int64Type. When retrieving data, the basetypes.Int64Valuable + // associated with this custom type must be used in place of types.Int64. + CustomType basetypes.Int64Typable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a Int64Attribute. +func (a Int64Attribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a Int64Attribute +// and all fields are equal. +func (a Int64Attribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(Int64Attribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a Int64Attribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a Int64Attribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a Int64Attribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.Int64Type or the CustomType field value if defined. +func (a Int64Attribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.Int64Type +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a Int64Attribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a Int64Attribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a Int64Attribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a Int64Attribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/list_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/list_attribute.go new file mode 100644 index 0000000000..3fc662627e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/list_attribute.go @@ -0,0 +1,129 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = ListAttribute{} +) + +// ListAttribute represents a schema attribute that is a list with a single +// element type. When retrieving the value for this attribute, use types.List +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use ListNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list or directly via square brace syntax. +// +// # list of strings +// example_attribute = ["first", "second"] +// +// Terraform configurations reference this attribute using expressions that +// accept a list or an element directly via square brace 0-based index syntax: +// +// # first known element +// .example_attribute[0] +type ListAttribute struct { + // ElementType is the type for all elements of the list. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ListType. When retrieving data, the basetypes.ListValuable + // associated with this custom type must be used in place of types.List. + CustomType basetypes.ListTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a list +// index or an error. +func (a ListAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a ListAttribute +// and all fields are equal. +func (a ListAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ListAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a ListAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a ListAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ListAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.ListType or the CustomType field value if defined. +func (a ListAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ListType{ + ElemType: a.ElementType, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a ListAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a ListAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ListAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a ListAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/list_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/list_nested_attribute.go new file mode 100644 index 0000000000..29146a0a1e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/list_nested_attribute.go @@ -0,0 +1,156 @@ +package metaschema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = ListNestedAttribute{} +) + +// ListNestedAttribute represents an attribute that is a list of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.List +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use ListAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list of objects or directly via square and curly brace syntax. +// +// # list of objects +// example_attribute = [ +// { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a list of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_attribute[0] +// # first known object nested_attribute value +// .example_attribute[0].nested_attribute +type ListNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.ListType of types.ObjectType. When retrieving data, the + // basetypes.ListValuable associated with this custom type must be used in + // place of types.List. + CustomType basetypes.ListTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyInt, otherwise returns an error. +func (a ListNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyInt) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to ListNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a ListNestedAttribute +// and all fields are equal. +func (a ListNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ListNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a ListNestedAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a ListNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ListNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a ListNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a ListNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeList +} + +// GetType returns ListType of ObjectType or CustomType. +func (a ListNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ListType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a ListNestedAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a ListNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ListNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a ListNestedAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/map_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/map_attribute.go new file mode 100644 index 0000000000..839ea9e4d3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/map_attribute.go @@ -0,0 +1,132 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = MapAttribute{} +) + +// MapAttribute represents a schema attribute that is a list with a single +// element type. When retrieving the value for this attribute, use types.Map +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use MapNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list or directly via curly brace syntax. +// +// # map of strings +// example_attribute = { +// key1 = "first", +// key2 = "second", +// } +// +// Terraform configurations reference this attribute using expressions that +// accept a map or an element directly via square brace string syntax: +// +// # key1 known element +// .example_attribute["key1"] +type MapAttribute struct { + // ElementType is the type for all elements of the map. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.MapType. When retrieving data, the basetypes.MapValuable + // associated with this custom type must be used in place of types.Map. + CustomType basetypes.MapTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a map +// index or an error. +func (a MapAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a MapAttribute +// and all fields are equal. +func (a MapAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(MapAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a MapAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a MapAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a MapAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.MapType or the CustomType field value if defined. +func (a MapAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.MapType{ + ElemType: a.ElementType, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a MapAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a MapAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a MapAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a MapAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/map_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/map_nested_attribute.go new file mode 100644 index 0000000000..518ec0c82d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/map_nested_attribute.go @@ -0,0 +1,156 @@ +package metaschema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = MapNestedAttribute{} +) + +// MapNestedAttribute represents an attribute that is a set of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Map +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use MapAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set of objects or directly via curly brace syntax. +// +// # map of objects +// example_attribute = { +// key = { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a map of objects or an element directly via square brace string +// syntax: +// +// # known object at key +// .example_attribute["key"] +// # known object nested_attribute value at key +// .example_attribute["key"].nested_attribute +type MapNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.MapType of types.ObjectType. When retrieving data, the + // basetypes.MapValuable associated with this custom type must be used in + // place of types.Map. + CustomType basetypes.MapTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyString, otherwise returns an error. +func (a MapNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyString) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to MapNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a MapNestedAttribute +// and all fields are equal. +func (a MapNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(MapNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a MapNestedAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a MapNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a MapNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a MapNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a MapNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeMap +} + +// GetType returns MapType of ObjectType or CustomType. +func (a MapNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.MapType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a MapNestedAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a MapNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a MapNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a MapNestedAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/nested_attribute.go new file mode 100644 index 0000000000..cd96146d12 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/nested_attribute.go @@ -0,0 +1,11 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Nested attributes are only compatible with protocol version 6. +type NestedAttribute interface { + Attribute + fwschema.NestedAttribute +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/nested_attribute_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/nested_attribute_object.go new file mode 100644 index 0000000000..4755b01878 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/nested_attribute_object.go @@ -0,0 +1,60 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var _ fwschema.NestedAttributeObject = NestedAttributeObject{} + +// NestedAttributeObject is the object containing the underlying attributes +// for a ListNestedAttribute, MapNestedAttribute, SetNestedAttribute, or +// SingleNestedAttribute (automatically generated). When retrieving the value +// for this attribute, use types.Object as the value type unless the CustomType +// field is set. The Attributes field must be set. Nested attributes are only +// compatible with protocol version 6. +// +// This object enables customizing and simplifying details within its parent +// NestedAttribute, therefore it cannot have Terraform schema fields such as +// Required, Description, etc. +type NestedAttributeObject struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. This field must be set. + Attributes map[string]Attribute + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable +} + +// ApplyTerraform5AttributePathStep performs an AttributeName step on the +// underlying attributes or returns an error. +func (o NestedAttributeObject) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.NestedAttributeObjectApplyTerraform5AttributePathStep(o, step) +} + +// Equal returns true if the given NestedAttributeObject is equivalent. +func (o NestedAttributeObject) Equal(other fwschema.NestedAttributeObject) bool { + if _, ok := other.(NestedAttributeObject); !ok { + return false + } + + return fwschema.NestedAttributeObjectEqual(o, other) +} + +// GetAttributes returns the Attributes field value. +func (o NestedAttributeObject) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(o.Attributes) +} + +// Type returns the framework type of the NestedAttributeObject. +func (o NestedAttributeObject) Type() basetypes.ObjectTypable { + if o.CustomType != nil { + return o.CustomType + } + + return fwschema.NestedAttributeObjectType(o) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/number_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/number_attribute.go new file mode 100644 index 0000000000..039cfdc0e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/number_attribute.go @@ -0,0 +1,120 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = NumberAttribute{} +) + +// NumberAttribute represents a schema attribute that is a generic number with +// up to 512 bits of floating point or integer precision. When retrieving the +// value for this attribute, use types.Number as the value type unless the +// CustomType field is set. +// +// Use Float64Attribute for 64-bit floating point number attributes or +// Int64Attribute for 64-bit integer number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via a floating point or integer value. +// +// example_attribute = 123 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type NumberAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.NumberType. When retrieving data, the basetypes.NumberValuable + // associated with this custom type must be used in place of types.Number. + CustomType basetypes.NumberTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a NumberAttribute. +func (a NumberAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a NumberAttribute +// and all fields are equal. +func (a NumberAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(NumberAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a NumberAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a NumberAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a NumberAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.NumberType or the CustomType field value if defined. +func (a NumberAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.NumberType +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a NumberAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a NumberAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a NumberAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a NumberAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/object_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/object_attribute.go new file mode 100644 index 0000000000..dfb9f91cba --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/object_attribute.go @@ -0,0 +1,131 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = ObjectAttribute{} +) + +// ObjectAttribute represents a schema attribute that is an object with only +// type information for underlying attributes. When retrieving the value for +// this attribute, use types.Object as the value type unless the CustomType +// field is set. The AttributeTypes field must be set. +// +// Prefer SingleNestedAttribute over ObjectAttribute if the provider is +// using protocol version 6 and full attribute functionality is needed. +// +// Terraform configurations configure this attribute using expressions that +// return an object or directly via curly brace syntax. +// +// # object with one attribute +// example_attribute = { +// underlying_attribute = #... +// } +// +// Terraform configurations reference this attribute using expressions that +// accept an object or an attribute directly via period syntax: +// +// # underlying attribute +// .example_attribute.underlying_attribute +type ObjectAttribute struct { + // AttributeTypes is the mapping of underlying attribute names to attribute + // types. This field must be set. + AttributeTypes map[string]attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into an +// attribute name or an error. +func (a ObjectAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a ObjectAttribute +// and all fields are equal. +func (a ObjectAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ObjectAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a ObjectAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a ObjectAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ObjectAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.ObjectType or the CustomType field value if defined. +func (a ObjectAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ObjectType{ + AttrTypes: a.AttributeTypes, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a ObjectAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a ObjectAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ObjectAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a ObjectAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/schema.go new file mode 100644 index 0000000000..bc827b2b4b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/schema.go @@ -0,0 +1,162 @@ +package metaschema + +import ( + "context" + "fmt" + "regexp" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// Schema must satify the fwschema.Schema interface. +var _ fwschema.Schema = Schema{} + +// Schema defines the structure and value types of provider_meta configuration +// data. This type is used as the provider.MetaSchemaResponse type Schema +// field, which is implemented by the provider.ProviderWithMetaSchema type +// MetaSchema method. +type Schema struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute +} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// schema. +func (s Schema) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.SchemaApplyTerraform5AttributePathStep(s, step) +} + +// AttributeAtPath returns the Attribute at the passed path. If the path points +// to an element or attribute of a complex type, rather than to an Attribute, +// it will return an ErrPathInsideAtomicAttribute error. +func (s Schema) AttributeAtPath(ctx context.Context, p path.Path) (fwschema.Attribute, diag.Diagnostics) { + return fwschema.SchemaAttributeAtPath(ctx, s, p) +} + +// AttributeAtPath returns the Attribute at the passed path. If the path points +// to an element or attribute of a complex type, rather than to an Attribute, +// it will return an ErrPathInsideAtomicAttribute error. +func (s Schema) AttributeAtTerraformPath(ctx context.Context, p *tftypes.AttributePath) (fwschema.Attribute, error) { + return fwschema.SchemaAttributeAtTerraformPath(ctx, s, p) +} + +// GetAttributes returns the Attributes field value. +func (s Schema) GetAttributes() map[string]fwschema.Attribute { + return schemaAttributes(s.Attributes) +} + +// GetBlocks always returns nil as meta schemas cannot contain blocks. +func (s Schema) GetBlocks() map[string]fwschema.Block { + return nil +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for meta schemas. +func (s Schema) GetDeprecationMessage() string { + return "" +} + +// GetDescription always returns an empty string as there is no purpose for +// a meta schema description. The provider schema description should describe +// the provider itself. +func (s Schema) GetDescription() string { + return "" +} + +// GetMarkdownDescription always returns an empty string as there is no purpose +// for a meta schema description. The provider schema description should +// describe the provider itself. +func (s Schema) GetMarkdownDescription() string { + return "" +} + +// GetVersion always returns 0 as provider meta schemas cannot be versioned. +func (s Schema) GetVersion() int64 { + return 0 +} + +// Type returns the framework type of the schema. +func (s Schema) Type() attr.Type { + return fwschema.SchemaType(s) +} + +// TypeAtPath returns the framework type at the given schema path. +func (s Schema) TypeAtPath(ctx context.Context, p path.Path) (attr.Type, diag.Diagnostics) { + return fwschema.SchemaTypeAtPath(ctx, s, p) +} + +// TypeAtTerraformPath returns the framework type at the given tftypes path. +func (s Schema) TypeAtTerraformPath(ctx context.Context, p *tftypes.AttributePath) (attr.Type, error) { + return fwschema.SchemaTypeAtTerraformPath(ctx, s, p) +} + +// Validate verifies that the schema is not using a reserved field name for a top-level attribute. +func (s Schema) Validate() diag.Diagnostics { + var diags diag.Diagnostics + + attributes := s.GetAttributes() + + for k, v := range attributes { + d := validateAttributeFieldName(path.Root(k), k, v) + + diags.Append(d...) + } + + return diags +} + +// validFieldNameRegex is used to verify that name used for attributes and blocks +// comply with the defined regular expression. +var validFieldNameRegex = regexp.MustCompile("^[a-z0-9_]+$") + +// validateAttributeFieldName verifies that the name used for an attribute complies with the regular +// expression defined in validFieldNameRegex. +func validateAttributeFieldName(path path.Path, name string, attr fwschema.Attribute) diag.Diagnostics { + var diags diag.Diagnostics + + if !validFieldNameRegex.MatchString(name) { + diags.AddAttributeError( + path, + "Invalid Schema Field Name", + fmt.Sprintf("Field name %q is invalid, the only allowed characters are a-z, 0-9 and _. This is always a problem with the provider and should be reported to the provider developer.", name), + ) + } + + if na, ok := attr.(fwschema.NestedAttribute); ok { + nestedObject := na.GetNestedObject() + + if nestedObject == nil { + return diags + } + + attributes := nestedObject.GetAttributes() + + for k, v := range attributes { + d := validateAttributeFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + } + + return diags +} + +// schemaAttributes is a provider to fwschema type conversion function. +func schemaAttributes(attributes map[string]Attribute) map[string]fwschema.Attribute { + result := make(map[string]fwschema.Attribute, len(attributes)) + + for name, attribute := range attributes { + result[name] = attribute + } + + return result +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/set_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/set_attribute.go new file mode 100644 index 0000000000..661ca85bba --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/set_attribute.go @@ -0,0 +1,127 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = SetAttribute{} +) + +// SetAttribute represents a schema attribute that is a set with a single +// element type. When retrieving the value for this attribute, use types.Set +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use SetNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set or directly via square brace syntax. +// +// # set of strings +// example_attribute = ["first", "second"] +// +// Terraform configurations reference this attribute using expressions that +// accept a set. Sets cannot be indexed in Terraform, therefore an expression +// is required to access an explicit element. +type SetAttribute struct { + // ElementType is the type for all elements of the set. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.SetType. When retrieving data, the basetypes.SetValuable + // associated with this custom type must be used in place of types.Set. + CustomType basetypes.SetTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a set +// index or an error. +func (a SetAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a SetAttribute +// and all fields are equal. +func (a SetAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SetAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a SetAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a SetAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SetAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.SetType or the CustomType field value if defined. +func (a SetAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.SetType{ + ElemType: a.ElementType, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a SetAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a SetAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SetAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a SetAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/set_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/set_nested_attribute.go new file mode 100644 index 0000000000..9f9f2b9cb8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/set_nested_attribute.go @@ -0,0 +1,151 @@ +package metaschema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = SetNestedAttribute{} +) + +// SetNestedAttribute represents an attribute that is a set of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Set +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use SetAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set of objects or directly via square and curly brace syntax. +// +// # set of objects +// example_attribute = [ +// { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a set of objects. Sets cannot be indexed in Terraform, therefore +// an expression is required to access an explicit element. +type SetNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.SetType of types.ObjectType. When retrieving data, the + // basetypes.SetValuable associated with this custom type must be used in + // place of types.Set. + CustomType basetypes.SetTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyValue, otherwise returns an error. +func (a SetNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyValue) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SetNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a SetNestedAttribute +// and all fields are equal. +func (a SetNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SetNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a SetNestedAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a SetNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SetNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a SetNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a SetNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeSet +} + +// GetType returns SetType of ObjectType or CustomType. +func (a SetNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.SetType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a SetNestedAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a SetNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SetNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a SetNestedAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/single_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/single_nested_attribute.go new file mode 100644 index 0000000000..c204a2ad97 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/single_nested_attribute.go @@ -0,0 +1,171 @@ +package metaschema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = SingleNestedAttribute{} +) + +// SingleNestedAttribute represents an attribute that is a single object where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Object +// as the value type unless the CustomType field is set. The Attributes field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use ObjectAttribute if the underlying attributes do not require definition +// beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return an object or directly via curly brace syntax. +// +// # single object +// example_attribute = { +// nested_attribute = #... +// } +// +// Terraform configurations reference this attribute using expressions that +// accept an object or an attribute name directly via period syntax: +// +// # object nested_attribute value +// .example_attribute.nested_attribute +type SingleNestedAttribute struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. This field must be set. + Attributes map[string]Attribute + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is AttributeName, otherwise returns an error. +func (a SingleNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SingleNestedAttribute", step) + } + + attribute, ok := a.Attributes[string(name)] + + if !ok { + return nil, fmt.Errorf("no attribute %q on SingleNestedAttribute", name) + } + + return attribute, nil +} + +// Equal returns true if the given Attribute is a SingleNestedAttribute +// and all fields are equal. +func (a SingleNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SingleNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetAttributes returns the Attributes field value. +func (a SingleNestedAttribute) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(a.Attributes) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a SingleNestedAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a SingleNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SingleNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns a generated NestedAttributeObject from the +// Attributes and CustomType field values. +func (a SingleNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return NestedAttributeObject{ + Attributes: a.Attributes, + CustomType: a.CustomType, + } +} + +// GetNestingMode always returns NestingModeList. +func (a SingleNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeSingle +} + +// GetType returns ListType of ObjectType or CustomType. +func (a SingleNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + attrTypes := make(map[string]attr.Type, len(a.Attributes)) + + for name, attribute := range a.Attributes { + attrTypes[name] = attribute.GetType() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a SingleNestedAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a SingleNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SingleNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a SingleNestedAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/string_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/string_attribute.go new file mode 100644 index 0000000000..293b4d211f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/metaschema/string_attribute.go @@ -0,0 +1,116 @@ +package metaschema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = StringAttribute{} +) + +// StringAttribute represents a schema attribute that is a string. When +// retrieving the value for this attribute, use types.String as the value type +// unless the CustomType field is set. +// +// Terraform configurations configure this attribute using expressions that +// return a string or directly via double quote syntax. +// +// example_attribute = "value" +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type StringAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.StringType. When retrieving data, the basetypes.StringValuable + // associated with this custom type must be used in place of types.String. + CustomType basetypes.StringTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a StringAttribute. +func (a StringAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a StringAttribute +// and all fields are equal. +func (a StringAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(StringAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage always returns an empty string as there is no +// deprecation validation support for provider meta schemas. +func (a StringAttribute) GetDeprecationMessage() string { + return "" +} + +// GetDescription returns the Description field value. +func (a StringAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a StringAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.StringType or the CustomType field value if defined. +func (a StringAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.StringType +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a StringAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a StringAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a StringAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive always returns false as there is no plan for provider meta +// schema data. +func (a StringAttribute) IsSensitive() bool { + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/provider.go new file mode 100644 index 0000000000..60b1893ff7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/provider.go @@ -0,0 +1,102 @@ +package provider + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// Provider is the core interface that all Terraform providers must implement. +// +// Providers can optionally implement these additional concepts: +// +// - Validation: Schema-based or entire configuration +// via ProviderWithConfigValidators or ProviderWithValidateConfig. +// - Meta Schema: ProviderWithMetaSchema +type Provider interface { + // Metadata should return the metadata for the provider, such as + // a type name and version data. + // + // Implementing the MetadataResponse.TypeName will populate the + // datasource.MetadataRequest.ProviderTypeName and + // resource.MetadataRequest.ProviderTypeName fields automatically. + Metadata(context.Context, MetadataRequest, *MetadataResponse) + + // Schema should return the schema for this provider. + Schema(context.Context, SchemaRequest, *SchemaResponse) + + // Configure is called at the beginning of the provider lifecycle, when + // Terraform sends to the provider the values the user specified in the + // provider configuration block. These are supplied in the + // ConfigureProviderRequest argument. + // Values from provider configuration are often used to initialise an + // API client, which should be stored on the struct implementing the + // Provider interface. + Configure(context.Context, ConfigureRequest, *ConfigureResponse) + + // DataSources returns a slice of functions to instantiate each DataSource + // implementation. + // + // The data source type name is determined by the DataSource implementing + // the Metadata method. All data sources must have unique names. + DataSources(context.Context) []func() datasource.DataSource + + // Resources returns a slice of functions to instantiate each Resource + // implementation. + // + // The resource type name is determined by the Resource implementing + // the Metadata method. All resources must have unique names. + Resources(context.Context) []func() resource.Resource +} + +// ProviderWithConfigValidators is an interface type that extends Provider to include declarative validations. +// +// Declaring validation using this methodology simplifies implementation of +// reusable functionality. These also include descriptions, which can be used +// for automating documentation. +// +// Validation will include ConfigValidators and ValidateConfig, if both are +// implemented, in addition to any Attribute or Type validation. +type ProviderWithConfigValidators interface { + Provider + + // ConfigValidators returns a list of functions which will all be performed during validation. + ConfigValidators(context.Context) []ConfigValidator +} + +// ProviderWithMetaSchema is a provider with a provider meta schema, which +// is configured by practitioners via the provider_meta configuration block +// and the configuration data is included with certain data source and resource +// operations. The intended use case is to enable Terraform module authors +// within the same organization of the provider to track module usage in +// requests. Other use cases are explicitly not supported. All provider +// instances (aliases) receive the same data. +// +// This functionality is currently experimental and subject to change or break +// without warning. It is not protected by version compatibility guarantees. +type ProviderWithMetaSchema interface { + Provider + + // MetaSchema should return the meta schema for this provider. + // + // This functionality is currently experimental and subject to change or + // break without warning. It is not protected by version compatibility + // guarantees. + MetaSchema(context.Context, MetaSchemaRequest, *MetaSchemaResponse) +} + +// ProviderWithValidateConfig is an interface type that extends Provider to include imperative validation. +// +// Declaring validation using this methodology simplifies one-off +// functionality that typically applies to a single provider. Any documentation +// of this functionality must be manually added into schema descriptions. +// +// Validation will include ConfigValidators and ValidateConfig, if both are +// implemented, in addition to any Attribute or Type validation. +type ProviderWithValidateConfig interface { + Provider + + // ValidateConfig performs the validation. + ValidateConfig(context.Context, ValidateConfigRequest, *ValidateConfigResponse) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema.go new file mode 100644 index 0000000000..ce8337f859 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema.go @@ -0,0 +1,24 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" +) + +// SchemaRequest represents a request for the Provider to return its schema. +// An instance of this request struct is supplied as an argument to the +// Provider type Schema method. +type SchemaRequest struct{} + +// SchemaResponse represents a response to a SchemaRequest. An instance of this +// response struct is supplied as an argument to the Provider type Schema +// method. +type SchemaResponse struct { + // Schema is the schema of the provider. + Schema schema.Schema + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/attribute.go new file mode 100644 index 0000000000..b7d1e91272 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/attribute.go @@ -0,0 +1,33 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Attribute define a value field inside the Schema. Implementations in this +// package include: +// - BoolAttribute +// - Float64Attribute +// - Int64Attribute +// - ListAttribute +// - MapAttribute +// - NumberAttribute +// - ObjectAttribute +// - SetAttribute +// - StringAttribute +// +// Additionally, the NestedAttribute interface extends Attribute with nested +// attributes. Only supported in protocol version 6. Implementations in this +// package include: +// - ListNestedAttribute +// - MapNestedAttribute +// - SetNestedAttribute +// - SingleNestedAttribute +// +// In practitioner configurations, an equals sign (=) is required to set +// the value. [Configuration Reference] +// +// [Configuration Reference]: https://developer.hashicorp.com/terraform/language/syntax/configuration +type Attribute interface { + fwschema.Attribute +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/block.go new file mode 100644 index 0000000000..f6e27e6426 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/block.go @@ -0,0 +1,27 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Block defines a structural field inside a Schema. Implementations in this +// package include: +// - ListNestedBlock +// - SetNestedBlock +// - SingleNestedBlock +// +// In practitioner configurations, an equals sign (=) cannot be used to set the +// value. Blocks are instead repeated as necessary, or require the use of +// [Dynamic Block Expressions]. +// +// Prefer NestedAttribute over Block. Blocks should typically be used for +// configuration compatibility with previously existing schemas from an older +// Terraform Plugin SDK. Efforts should be made to convert from Block to +// NestedAttribute as a breaking change for practitioners. +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +// +// [Configuration Reference]: https://developer.hashicorp.com/terraform/language/syntax/configuration +type Block interface { + fwschema.Block +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/bool_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/bool_attribute.go new file mode 100644 index 0000000000..1855638601 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/bool_attribute.go @@ -0,0 +1,177 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = BoolAttribute{} + _ fwxschema.AttributeWithBoolValidators = BoolAttribute{} +) + +// BoolAttribute represents a schema attribute that is a boolean. When +// retrieving the value for this attribute, use types.Bool as the value type +// unless the CustomType field is set. +// +// Terraform configurations configure this attribute using expressions that +// return a boolean or directly via the true/false keywords. +// +// example_attribute = true +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type BoolAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.BoolType. When retrieving data, the basetypes.BoolValuable + // associated with this custom type must be used in place of types.Bool. + CustomType basetypes.BoolTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Bool +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a BoolAttribute. +func (a BoolAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// BoolValidators returns the Validators field value. +func (a BoolAttribute) BoolValidators() []validator.Bool { + return a.Validators +} + +// Equal returns true if the given Attribute is a BoolAttribute +// and all fields are equal. +func (a BoolAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(BoolAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a BoolAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a BoolAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a BoolAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.StringType or the CustomType field value if defined. +func (a BoolAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.BoolType +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a BoolAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a BoolAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a BoolAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a BoolAttribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/doc.go new file mode 100644 index 0000000000..f5542260f4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/doc.go @@ -0,0 +1,5 @@ +// Package schema contains all available schema functionality for data sources. +// Data source schemas define the structure and value types for configuration +// and state data. Schemas are implemented via the datasource.DataSource type +// Schema method. +package schema diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/float64_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/float64_attribute.go new file mode 100644 index 0000000000..1f89f928e3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/float64_attribute.go @@ -0,0 +1,180 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = Float64Attribute{} + _ fwxschema.AttributeWithFloat64Validators = Float64Attribute{} +) + +// Float64Attribute represents a schema attribute that is a 64-bit floating +// point number. When retrieving the value for this attribute, use +// types.Float64 as the value type unless the CustomType field is set. +// +// Use Int64Attribute for 64-bit integer attributes or NumberAttribute for +// 512-bit generic number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via a floating point value. +// +// example_attribute = 123.45 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type Float64Attribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.Float64Type. When retrieving data, the basetypes.Float64Valuable + // associated with this custom type must be used in place of types.Float64. + CustomType basetypes.Float64Typable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Float64 +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a Float64Attribute. +func (a Float64Attribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a Float64Attribute +// and all fields are equal. +func (a Float64Attribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(Float64Attribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// Float64Validators returns the Validators field value. +func (a Float64Attribute) Float64Validators() []validator.Float64 { + return a.Validators +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a Float64Attribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a Float64Attribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a Float64Attribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.Float64Type or the CustomType field value if defined. +func (a Float64Attribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.Float64Type +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a Float64Attribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a Float64Attribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a Float64Attribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a Float64Attribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/int64_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/int64_attribute.go new file mode 100644 index 0000000000..b9741700b8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/int64_attribute.go @@ -0,0 +1,180 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = Int64Attribute{} + _ fwxschema.AttributeWithInt64Validators = Int64Attribute{} +) + +// Int64Attribute represents a schema attribute that is a 64-bit integer. +// When retrieving the value for this attribute, use types.Int64 as the value +// type unless the CustomType field is set. +// +// Use Float64Attribute for 64-bit floating point number attributes or +// NumberAttribute for 512-bit generic number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via an integer value. +// +// example_attribute = 123 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type Int64Attribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.Int64Type. When retrieving data, the basetypes.Int64Valuable + // associated with this custom type must be used in place of types.Int64. + CustomType basetypes.Int64Typable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Int64 +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a Int64Attribute. +func (a Int64Attribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a Int64Attribute +// and all fields are equal. +func (a Int64Attribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(Int64Attribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a Int64Attribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a Int64Attribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a Int64Attribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.Int64Type or the CustomType field value if defined. +func (a Int64Attribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.Int64Type +} + +// Int64Validators returns the Validators field value. +func (a Int64Attribute) Int64Validators() []validator.Int64 { + return a.Validators +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a Int64Attribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a Int64Attribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a Int64Attribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a Int64Attribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_attribute.go new file mode 100644 index 0000000000..b439efa620 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_attribute.go @@ -0,0 +1,190 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = ListAttribute{} + _ fwxschema.AttributeWithListValidators = ListAttribute{} +) + +// ListAttribute represents a schema attribute that is a list with a single +// element type. When retrieving the value for this attribute, use types.List +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use ListNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list or directly via square brace syntax. +// +// # list of strings +// example_attribute = ["first", "second"] +// +// Terraform configurations reference this attribute using expressions that +// accept a list or an element directly via square brace 0-based index syntax: +// +// # first known element +// .example_attribute[0] +type ListAttribute struct { + // ElementType is the type for all elements of the list. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ListType. When retrieving data, the basetypes.ListValuable + // associated with this custom type must be used in place of types.List. + CustomType basetypes.ListTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a list +// index or an error. +func (a ListAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a ListAttribute +// and all fields are equal. +func (a ListAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ListAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ListAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ListAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ListAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.ListType or the CustomType field value if defined. +func (a ListAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ListType{ + ElemType: a.ElementType, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a ListAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a ListAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ListAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ListAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ListValidators returns the Validators field value. +func (a ListAttribute) ListValidators() []validator.List { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_nested_attribute.go new file mode 100644 index 0000000000..fa8c577b4c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_nested_attribute.go @@ -0,0 +1,217 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = ListNestedAttribute{} + _ fwxschema.AttributeWithListValidators = ListNestedAttribute{} +) + +// ListNestedAttribute represents an attribute that is a list of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.List +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use ListAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list of objects or directly via square and curly brace syntax. +// +// # list of objects +// example_attribute = [ +// { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a list of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_attribute[0] +// # first known object nested_attribute value +// .example_attribute[0].nested_attribute +type ListNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.ListType of types.ObjectType. When retrieving data, the + // basetypes.ListValuable associated with this custom type must be used in + // place of types.List. + CustomType basetypes.ListTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyInt, otherwise returns an error. +func (a ListNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyInt) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to ListNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a ListNestedAttribute +// and all fields are equal. +func (a ListNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ListNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ListNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ListNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ListNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a ListNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a ListNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeList +} + +// GetType returns ListType of ObjectType or CustomType. +func (a ListNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ListType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a ListNestedAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a ListNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ListNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ListNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ListValidators returns the Validators field value. +func (a ListNestedAttribute) ListValidators() []validator.List { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_nested_block.go new file mode 100644 index 0000000000..30c5e77922 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/list_nested_block.go @@ -0,0 +1,185 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = ListNestedBlock{} + _ fwxschema.BlockWithListValidators = ListNestedBlock{} +) + +// ListNestedBlock represents a block that is a list of objects where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.List +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. +// +// Prefer ListNestedAttribute over ListNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block repeatedly using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # list of blocks with two elements +// example_block { +// nested_attribute = #... +// } +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept a list of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_block[0] +// # first known object nested_attribute value +// .example_block[0].nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type ListNestedBlock struct { + // NestedObject is the underlying object that contains nested attributes or + // blocks. This field must be set. + NestedObject NestedBlockObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.ListType of types.ObjectType. When retrieving data, the + // basetypes.ListValuable associated with this custom type must be used in + // place of types.List. + CustomType basetypes.ListTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List +} + +// ApplyTerraform5AttributePathStep returns the NestedObject field value if step +// is ElementKeyInt, otherwise returns an error. +func (b ListNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyInt) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to ListNestedBlock", step) + } + + return b.NestedObject, nil +} + +// Equal returns true if the given Block is ListNestedBlock +// and all fields are equal. +func (b ListNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(ListNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b ListNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b ListNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b ListNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (b ListNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return b.NestedObject +} + +// GetNestingMode always returns BlockNestingModeList. +func (b ListNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeList +} + +// ListValidators returns the Validators field value. +func (b ListNestedBlock) ListValidators() []validator.List { + return b.Validators +} + +// Type returns ListType of ObjectType or CustomType. +func (b ListNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + return types.ListType{ + ElemType: b.NestedObject.Type(), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/map_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/map_attribute.go new file mode 100644 index 0000000000..13100e5069 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/map_attribute.go @@ -0,0 +1,193 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = MapAttribute{} + _ fwxschema.AttributeWithMapValidators = MapAttribute{} +) + +// MapAttribute represents a schema attribute that is a list with a single +// element type. When retrieving the value for this attribute, use types.Map +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use MapNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list or directly via curly brace syntax. +// +// # map of strings +// example_attribute = { +// key1 = "first", +// key2 = "second", +// } +// +// Terraform configurations reference this attribute using expressions that +// accept a map or an element directly via square brace string syntax: +// +// # key1 known element +// .example_attribute["key1"] +type MapAttribute struct { + // ElementType is the type for all elements of the map. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.MapType. When retrieving data, the basetypes.MapValuable + // associated with this custom type must be used in place of types.Map. + CustomType basetypes.MapTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Map +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a map +// index or an error. +func (a MapAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a MapAttribute +// and all fields are equal. +func (a MapAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(MapAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a MapAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a MapAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a MapAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.MapType or the CustomType field value if defined. +func (a MapAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.MapType{ + ElemType: a.ElementType, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a MapAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a MapAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a MapAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a MapAttribute) IsSensitive() bool { + return a.Sensitive +} + +// MapValidators returns the Validators field value. +func (a MapAttribute) MapValidators() []validator.Map { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/map_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/map_nested_attribute.go new file mode 100644 index 0000000000..196c2e1ab6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/map_nested_attribute.go @@ -0,0 +1,218 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = MapNestedAttribute{} + _ fwxschema.AttributeWithMapValidators = MapNestedAttribute{} +) + +// MapNestedAttribute represents an attribute that is a set of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Map +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use MapAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set of objects or directly via curly brace syntax. +// +// # map of objects +// example_attribute = { +// key = { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a map of objects or an element directly via square brace string +// syntax: +// +// # known object at key +// .example_attribute["key"] +// # known object nested_attribute value at key +// .example_attribute["key"].nested_attribute +type MapNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.MapType of types.ObjectType. When retrieving data, the + // basetypes.MapValuable associated with this custom type must be used in + // place of types.Map. + CustomType basetypes.MapTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Map +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyString, otherwise returns an error. +func (a MapNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyString) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to MapNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a MapNestedAttribute +// and all fields are equal. +func (a MapNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(MapNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a MapNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a MapNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a MapNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a MapNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a MapNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeMap +} + +// GetType returns MapType of ObjectType or CustomType. +func (a MapNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.MapType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a MapNestedAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a MapNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a MapNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a MapNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// MapValidators returns the Validators field value. +func (a MapNestedAttribute) MapValidators() []validator.Map { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_attribute.go new file mode 100644 index 0000000000..5429975dae --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_attribute.go @@ -0,0 +1,11 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Nested attributes are only compatible with protocol version 6. +type NestedAttribute interface { + Attribute + fwschema.NestedAttribute +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_attribute_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_attribute_object.go new file mode 100644 index 0000000000..ae41a1873b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_attribute_object.go @@ -0,0 +1,79 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var _ fwxschema.NestedAttributeObjectWithValidators = NestedAttributeObject{} + +// NestedAttributeObject is the object containing the underlying attributes +// for a ListNestedAttribute, MapNestedAttribute, SetNestedAttribute, or +// SingleNestedAttribute (automatically generated). When retrieving the value +// for this attribute, use types.Object as the value type unless the CustomType +// field is set. The Attributes field must be set. Nested attributes are only +// compatible with protocol version 6. +// +// This object enables customizing and simplifying details within its parent +// NestedAttribute, therefore it cannot have Terraform schema fields such as +// Required, Description, etc. +type NestedAttributeObject struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. This field must be set. + Attributes map[string]Attribute + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep performs an AttributeName step on the +// underlying attributes or returns an error. +func (o NestedAttributeObject) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.NestedAttributeObjectApplyTerraform5AttributePathStep(o, step) +} + +// Equal returns true if the given NestedAttributeObject is equivalent. +func (o NestedAttributeObject) Equal(other fwschema.NestedAttributeObject) bool { + if _, ok := other.(NestedAttributeObject); !ok { + return false + } + + return fwschema.NestedAttributeObjectEqual(o, other) +} + +// GetAttributes returns the Attributes field value. +func (o NestedAttributeObject) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(o.Attributes) +} + +// ObjectValidators returns the Validators field value. +func (o NestedAttributeObject) ObjectValidators() []validator.Object { + return o.Validators +} + +// Type returns the framework type of the NestedAttributeObject. +func (o NestedAttributeObject) Type() basetypes.ObjectTypable { + if o.CustomType != nil { + return o.CustomType + } + + return fwschema.NestedAttributeObjectType(o) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_block_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_block_object.go new file mode 100644 index 0000000000..ea0cf61d6a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/nested_block_object.go @@ -0,0 +1,91 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var _ fwxschema.NestedBlockObjectWithValidators = NestedBlockObject{} + +// NestedBlockObject is the object containing the underlying attributes and +// blocks for a ListNestedBlock or SetNestedBlock. When retrieving the value +// for this attribute, use types.Object as the value type unless the CustomType +// field is set. +// +// This object enables customizing and simplifying details within its parent +// Block, therefore it cannot have Terraform schema fields such as Description, +// etc. +type NestedBlockObject struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep performs an AttributeName step on the +// underlying attributes or returns an error. +func (o NestedBlockObject) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.NestedBlockObjectApplyTerraform5AttributePathStep(o, step) +} + +// Equal returns true if the given NestedBlockObject is equivalent. +func (o NestedBlockObject) Equal(other fwschema.NestedBlockObject) bool { + if _, ok := other.(NestedBlockObject); !ok { + return false + } + + return fwschema.NestedBlockObjectEqual(o, other) +} + +// GetAttributes returns the Attributes field value. +func (o NestedBlockObject) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(o.Attributes) +} + +// GetAttributes returns the Blocks field value. +func (o NestedBlockObject) GetBlocks() map[string]fwschema.Block { + return schemaBlocks(o.Blocks) +} + +// ObjectValidators returns the Validators field value. +func (o NestedBlockObject) ObjectValidators() []validator.Object { + return o.Validators +} + +// Type returns the framework type of the NestedBlockObject. +func (o NestedBlockObject) Type() basetypes.ObjectTypable { + if o.CustomType != nil { + return o.CustomType + } + + return fwschema.NestedBlockObjectType(o) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/number_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/number_attribute.go new file mode 100644 index 0000000000..7281519e15 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/number_attribute.go @@ -0,0 +1,181 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = NumberAttribute{} + _ fwxschema.AttributeWithNumberValidators = NumberAttribute{} +) + +// NumberAttribute represents a schema attribute that is a generic number with +// up to 512 bits of floating point or integer precision. When retrieving the +// value for this attribute, use types.Number as the value type unless the +// CustomType field is set. +// +// Use Float64Attribute for 64-bit floating point number attributes or +// Int64Attribute for 64-bit integer number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via a floating point or integer value. +// +// example_attribute = 123 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type NumberAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.NumberType. When retrieving data, the basetypes.NumberValuable + // associated with this custom type must be used in place of types.Number. + CustomType basetypes.NumberTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Number +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a NumberAttribute. +func (a NumberAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a NumberAttribute +// and all fields are equal. +func (a NumberAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(NumberAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a NumberAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a NumberAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a NumberAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.NumberType or the CustomType field value if defined. +func (a NumberAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.NumberType +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a NumberAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a NumberAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a NumberAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a NumberAttribute) IsSensitive() bool { + return a.Sensitive +} + +// NumberValidators returns the Validators field value. +func (a NumberAttribute) NumberValidators() []validator.Number { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/object_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/object_attribute.go new file mode 100644 index 0000000000..76ad803a82 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/object_attribute.go @@ -0,0 +1,192 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = ObjectAttribute{} + _ fwxschema.AttributeWithObjectValidators = ObjectAttribute{} +) + +// ObjectAttribute represents a schema attribute that is an object with only +// type information for underlying attributes. When retrieving the value for +// this attribute, use types.Object as the value type unless the CustomType +// field is set. The AttributeTypes field must be set. +// +// Prefer SingleNestedAttribute over ObjectAttribute if the provider is +// using protocol version 6 and full attribute functionality is needed. +// +// Terraform configurations configure this attribute using expressions that +// return an object or directly via curly brace syntax. +// +// # object with one attribute +// example_attribute = { +// underlying_attribute = #... +// } +// +// Terraform configurations reference this attribute using expressions that +// accept an object or an attribute directly via period syntax: +// +// # underlying attribute +// .example_attribute.underlying_attribute +type ObjectAttribute struct { + // AttributeTypes is the mapping of underlying attribute names to attribute + // types. This field must be set. + AttributeTypes map[string]attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into an +// attribute name or an error. +func (a ObjectAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a ObjectAttribute +// and all fields are equal. +func (a ObjectAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ObjectAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ObjectAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ObjectAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ObjectAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.ObjectType or the CustomType field value if defined. +func (a ObjectAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ObjectType{ + AttrTypes: a.AttributeTypes, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a ObjectAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a ObjectAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ObjectAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ObjectAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ObjectValidators returns the Validators field value. +func (a ObjectAttribute) ObjectValidators() []validator.Object { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/schema.go new file mode 100644 index 0000000000..11b1135b40 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/schema.go @@ -0,0 +1,266 @@ +package schema + +import ( + "context" + "fmt" + "regexp" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// Schema must satify the fwschema.Schema interface. +var _ fwschema.Schema = Schema{} + +// Schema defines the structure and value types of provider configuration data. +// This type is used as the provider.SchemaResponse type Schema field, which is +// implemented by the provider.Provider type Schema method. +type Schema struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this provider is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this provider is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this provider. The warning diagnostic + // summary is automatically set to "Provider Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Use examplenewcloud provider instead." + // - "Remove this provider as it no longer is valid." + // + DeprecationMessage string +} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// schema. +func (s Schema) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.SchemaApplyTerraform5AttributePathStep(s, step) +} + +// AttributeAtPath returns the Attribute at the passed path. If the path points +// to an element or attribute of a complex type, rather than to an Attribute, +// it will return an ErrPathInsideAtomicAttribute error. +func (s Schema) AttributeAtPath(ctx context.Context, p path.Path) (fwschema.Attribute, diag.Diagnostics) { + return fwschema.SchemaAttributeAtPath(ctx, s, p) +} + +// AttributeAtPath returns the Attribute at the passed path. If the path points +// to an element or attribute of a complex type, rather than to an Attribute, +// it will return an ErrPathInsideAtomicAttribute error. +func (s Schema) AttributeAtTerraformPath(ctx context.Context, p *tftypes.AttributePath) (fwschema.Attribute, error) { + return fwschema.SchemaAttributeAtTerraformPath(ctx, s, p) +} + +// GetAttributes returns the Attributes field value. +func (s Schema) GetAttributes() map[string]fwschema.Attribute { + return schemaAttributes(s.Attributes) +} + +// GetBlocks returns the Blocks field value. +func (s Schema) GetBlocks() map[string]fwschema.Block { + return schemaBlocks(s.Blocks) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (s Schema) GetDeprecationMessage() string { + return s.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (s Schema) GetDescription() string { + return s.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (s Schema) GetMarkdownDescription() string { + return s.MarkdownDescription +} + +// GetVersion always returns 0 as provider schemas cannot be versioned. +func (s Schema) GetVersion() int64 { + return 0 +} + +// Type returns the framework type of the schema. +func (s Schema) Type() attr.Type { + return fwschema.SchemaType(s) +} + +// TypeAtPath returns the framework type at the given schema path. +func (s Schema) TypeAtPath(ctx context.Context, p path.Path) (attr.Type, diag.Diagnostics) { + return fwschema.SchemaTypeAtPath(ctx, s, p) +} + +// TypeAtTerraformPath returns the framework type at the given tftypes path. +func (s Schema) TypeAtTerraformPath(ctx context.Context, p *tftypes.AttributePath) (attr.Type, error) { + return fwschema.SchemaTypeAtTerraformPath(ctx, s, p) +} + +// Validate verifies that the schema is not using a reserved field name for a top-level attribute. +func (s Schema) Validate() diag.Diagnostics { + var diags diag.Diagnostics + + // Raise error diagnostics when data source configuration uses reserved + // field names for root-level attributes. + reservedFieldNames := map[string]struct{}{ + "alias": {}, + "version": {}, + } + + attributes := s.GetAttributes() + + for k, v := range attributes { + if _, ok := reservedFieldNames[k]; ok { + diags.AddAttributeError( + path.Root(k), + "Schema Using Reserved Field Name", + fmt.Sprintf("%q is a reserved field name", k), + ) + } + + d := validateAttributeFieldName(path.Root(k), k, v) + + diags.Append(d...) + } + + blocks := s.GetBlocks() + + for k, v := range blocks { + if _, ok := reservedFieldNames[k]; ok { + diags.AddAttributeError( + path.Root(k), + "Schema Using Reserved Field Name", + fmt.Sprintf("%q is a reserved field name", k), + ) + } + + d := validateBlockFieldName(path.Root(k), k, v) + + diags.Append(d...) + } + + return diags +} + +// validFieldNameRegex is used to verify that name used for attributes and blocks +// comply with the defined regular expression. +var validFieldNameRegex = regexp.MustCompile("^[a-z0-9_]+$") + +// validateAttributeFieldName verifies that the name used for an attribute complies with the regular +// expression defined in validFieldNameRegex. +func validateAttributeFieldName(path path.Path, name string, attr fwschema.Attribute) diag.Diagnostics { + var diags diag.Diagnostics + + if !validFieldNameRegex.MatchString(name) { + diags.AddAttributeError( + path, + "Invalid Schema Field Name", + fmt.Sprintf("Field name %q is invalid, the only allowed characters are a-z, 0-9 and _. This is always a problem with the provider and should be reported to the provider developer.", name), + ) + } + + if na, ok := attr.(fwschema.NestedAttribute); ok { + nestedObject := na.GetNestedObject() + + if nestedObject == nil { + return diags + } + + attributes := nestedObject.GetAttributes() + + for k, v := range attributes { + d := validateAttributeFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + } + + return diags +} + +// validateBlockFieldName verifies that the name used for a block complies with the regular +// expression defined in validFieldNameRegex. +func validateBlockFieldName(path path.Path, name string, b fwschema.Block) diag.Diagnostics { + var diags diag.Diagnostics + + if !validFieldNameRegex.MatchString(name) { + diags.AddAttributeError( + path, + "Invalid Schema Field Name", + fmt.Sprintf("Field name %q is invalid, the only allowed characters are a-z, 0-9 and _. This is always a problem with the provider and should be reported to the provider developer.", name), + ) + } + + nestedObject := b.GetNestedObject() + + if nestedObject == nil { + return diags + } + + blocks := nestedObject.GetBlocks() + + for k, v := range blocks { + d := validateBlockFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + + attributes := nestedObject.GetAttributes() + + for k, v := range attributes { + d := validateAttributeFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + + return diags +} + +// schemaAttributes is a provider to fwschema type conversion function. +func schemaAttributes(attributes map[string]Attribute) map[string]fwschema.Attribute { + result := make(map[string]fwschema.Attribute, len(attributes)) + + for name, attribute := range attributes { + result[name] = attribute + } + + return result +} + +// schemaBlocks is a provider to fwschema type conversion function. +func schemaBlocks(blocks map[string]Block) map[string]fwschema.Block { + result := make(map[string]fwschema.Block, len(blocks)) + + for name, block := range blocks { + result[name] = block + } + + return result +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_attribute.go new file mode 100644 index 0000000000..3478eafc4f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_attribute.go @@ -0,0 +1,188 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = SetAttribute{} + _ fwxschema.AttributeWithSetValidators = SetAttribute{} +) + +// SetAttribute represents a schema attribute that is a set with a single +// element type. When retrieving the value for this attribute, use types.Set +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use SetNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set or directly via square brace syntax. +// +// # set of strings +// example_attribute = ["first", "second"] +// +// Terraform configurations reference this attribute using expressions that +// accept a set. Sets cannot be indexed in Terraform, therefore an expression +// is required to access an explicit element. +type SetAttribute struct { + // ElementType is the type for all elements of the set. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.SetType. When retrieving data, the basetypes.SetValuable + // associated with this custom type must be used in place of types.Set. + CustomType basetypes.SetTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a set +// index or an error. +func (a SetAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a SetAttribute +// and all fields are equal. +func (a SetAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SetAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SetAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SetAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SetAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.SetType or the CustomType field value if defined. +func (a SetAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.SetType{ + ElemType: a.ElementType, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a SetAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a SetAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SetAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SetAttribute) IsSensitive() bool { + return a.Sensitive +} + +// SetValidators returns the Validators field value. +func (a SetAttribute) SetValidators() []validator.Set { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_nested_attribute.go new file mode 100644 index 0000000000..d04b6f8383 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_nested_attribute.go @@ -0,0 +1,213 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = SetNestedAttribute{} + _ fwxschema.AttributeWithSetValidators = SetNestedAttribute{} +) + +// SetNestedAttribute represents an attribute that is a set of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Set +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use SetAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set of objects or directly via square and curly brace syntax. +// +// # set of objects +// example_attribute = [ +// { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a set of objects. Sets cannot be indexed in Terraform, therefore +// an expression is required to access an explicit element. +type SetNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.SetType of types.ObjectType. When retrieving data, the + // basetypes.SetValuable associated with this custom type must be used in + // place of types.Set. + CustomType basetypes.SetTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyValue, otherwise returns an error. +func (a SetNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyValue) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SetNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a SetNestedAttribute +// and all fields are equal. +func (a SetNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SetNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SetNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SetNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SetNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a SetNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a SetNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeSet +} + +// GetType returns SetType of ObjectType or CustomType. +func (a SetNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.SetType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a SetNestedAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a SetNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SetNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SetNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// SetValidators returns the Validators field value. +func (a SetNestedAttribute) SetValidators() []validator.Set { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_nested_block.go new file mode 100644 index 0000000000..63992226d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/set_nested_block.go @@ -0,0 +1,185 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = SetNestedBlock{} + _ fwxschema.BlockWithSetValidators = SetNestedBlock{} +) + +// SetNestedBlock represents a block that is a set of objects where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.Set +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. +// +// Prefer SetNestedAttribute over SetNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block repeatedly using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # set of blocks with two elements +// example_block { +// nested_attribute = #... +// } +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept a set of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_block[0] +// # first known object nested_attribute value +// .example_block[0].nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type SetNestedBlock struct { + // NestedObject is the underlying object that contains nested attributes or + // blocks. This field must be set. + NestedObject NestedBlockObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.SetType of types.ObjectType. When retrieving data, the + // basetypes.SetValuable associated with this custom type must be used in + // place of types.Set. + CustomType basetypes.SetTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set +} + +// ApplyTerraform5AttributePathStep returns the NestedObject field value if step +// is ElementKeyValue, otherwise returns an error. +func (b SetNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyValue) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SetNestedBlock", step) + } + + return b.NestedObject, nil +} + +// Equal returns true if the given Block is SetNestedBlock +// and all fields are equal. +func (b SetNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(SetNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b SetNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b SetNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b SetNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (b SetNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return b.NestedObject +} + +// GetNestingMode always returns BlockNestingModeSet. +func (b SetNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeSet +} + +// SetValidators returns the Validators field value. +func (b SetNestedBlock) SetValidators() []validator.Set { + return b.Validators +} + +// Type returns SetType of ObjectType or CustomType. +func (b SetNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + return types.SetType{ + ElemType: b.NestedObject.Type(), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/single_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/single_nested_attribute.go new file mode 100644 index 0000000000..de92fb990c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/single_nested_attribute.go @@ -0,0 +1,234 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = SingleNestedAttribute{} + _ fwxschema.AttributeWithObjectValidators = SingleNestedAttribute{} +) + +// SingleNestedAttribute represents an attribute that is a single object where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Object +// as the value type unless the CustomType field is set. The Attributes field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use ObjectAttribute if the underlying attributes do not require definition +// beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return an object or directly via curly brace syntax. +// +// # single object +// example_attribute = { +// nested_attribute = #... +// } +// +// Terraform configurations reference this attribute using expressions that +// accept an object or an attribute name directly via period syntax: +// +// # object nested_attribute value +// .example_attribute.nested_attribute +type SingleNestedAttribute struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. This field must be set. + Attributes map[string]Attribute + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is AttributeName, otherwise returns an error. +func (a SingleNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SingleNestedAttribute", step) + } + + attribute, ok := a.Attributes[string(name)] + + if !ok { + return nil, fmt.Errorf("no attribute %q on SingleNestedAttribute", name) + } + + return attribute, nil +} + +// Equal returns true if the given Attribute is a SingleNestedAttribute +// and all fields are equal. +func (a SingleNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SingleNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetAttributes returns the Attributes field value. +func (a SingleNestedAttribute) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(a.Attributes) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SingleNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SingleNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SingleNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns a generated NestedAttributeObject from the +// Attributes, CustomType, and Validators field values. +func (a SingleNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return NestedAttributeObject{ + Attributes: a.Attributes, + CustomType: a.CustomType, + Validators: a.Validators, + } +} + +// GetNestingMode always returns NestingModeList. +func (a SingleNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeSingle +} + +// GetType returns ListType of ObjectType or CustomType. +func (a SingleNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + attrTypes := make(map[string]attr.Type, len(a.Attributes)) + + for name, attribute := range a.Attributes { + attrTypes[name] = attribute.GetType() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a SingleNestedAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a SingleNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SingleNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SingleNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ObjectValidators returns the Validators field value. +func (a SingleNestedAttribute) ObjectValidators() []validator.Object { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/single_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/single_nested_block.go new file mode 100644 index 0000000000..4a8b9734cb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/single_nested_block.go @@ -0,0 +1,210 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = SingleNestedBlock{} + _ fwxschema.BlockWithObjectValidators = SingleNestedBlock{} +) + +// SingleNestedBlock represents a block that is a single object where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.Object +// as the value type unless the CustomType field is set. +// +// Prefer SingleNestedAttribute over SingleNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block only once using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # single block +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept an object or an attribute name directly via period syntax: +// +// # object nested_attribute value +// .example_block.nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type SingleNestedBlock struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is AttributeName, otherwise returns an error. +func (b SingleNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SingleNestedBlock", step) + } + + if attribute, ok := b.Attributes[string(name)]; ok { + return attribute, nil + } + + if block, ok := b.Blocks[string(name)]; ok { + return block, nil + } + + return nil, fmt.Errorf("no attribute or block %q on SingleNestedBlock", name) +} + +// Equal returns true if the given Attribute is b SingleNestedBlock +// and all fields are equal. +func (b SingleNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(SingleNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b SingleNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b SingleNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b SingleNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns a generated NestedBlockObject from the +// Attributes, CustomType, and Validators field values. +func (b SingleNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return NestedBlockObject{ + Attributes: b.Attributes, + Blocks: b.Blocks, + CustomType: b.CustomType, + Validators: b.Validators, + } +} + +// GetNestingMode always returns BlockNestingModeSingle. +func (b SingleNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeSingle +} + +// ObjectValidators returns the Validators field value. +func (b SingleNestedBlock) ObjectValidators() []validator.Object { + return b.Validators +} + +// Type returns ObjectType or CustomType. +func (b SingleNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + attrTypes := make(map[string]attr.Type, len(b.Attributes)+len(b.Blocks)) + + for name, attribute := range b.Attributes { + attrTypes[name] = attribute.GetType() + } + + for name, block := range b.Blocks { + attrTypes[name] = block.Type() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/string_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/string_attribute.go new file mode 100644 index 0000000000..eef7da7e9e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/schema/string_attribute.go @@ -0,0 +1,177 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = StringAttribute{} + _ fwxschema.AttributeWithStringValidators = StringAttribute{} +) + +// StringAttribute represents a schema attribute that is a string. When +// retrieving the value for this attribute, use types.String as the value type +// unless the CustomType field is set. +// +// Terraform configurations configure this attribute using expressions that +// return a string or directly via double quote syntax. +// +// example_attribute = "value" +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type StringAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.StringType. When retrieving data, the basetypes.StringValuable + // associated with this custom type must be used in place of types.String. + CustomType basetypes.StringTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.String +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a StringAttribute. +func (a StringAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a StringAttribute +// and all fields are equal. +func (a StringAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(StringAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a StringAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a StringAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a StringAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.StringType or the CustomType field value if defined. +func (a StringAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.StringType +} + +// IsComputed always returns false as provider schemas cannot be Computed. +func (a StringAttribute) IsComputed() bool { + return false +} + +// IsOptional returns the Optional field value. +func (a StringAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a StringAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a StringAttribute) IsSensitive() bool { + return a.Sensitive +} + +// StringValidators returns the Validators field value. +func (a StringAttribute) StringValidators() []validator.String { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/validate_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/validate_config.go new file mode 100644 index 0000000000..2102f3f136 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/provider/validate_config.go @@ -0,0 +1,30 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ValidateConfigRequest represents a request to validate the +// configuration of a provider. An instance of this request struct is +// supplied as an argument to the Provider ValidateConfig receiver method +// or automatically passed through to each ConfigValidator. +type ValidateConfigRequest struct { + // Config is the configuration the user supplied for the provider. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config tfsdk.Config +} + +// ValidateConfigResponse represents a response to a +// ValidateConfigRequest. An instance of this response struct is +// supplied as an argument to the Provider ValidateConfig receiver method +// or automatically passed through to each ConfigValidator. +type ValidateConfigResponse struct { + // Diagnostics report errors or warnings related to validating the provider + // configuration. An empty slice indicates success, with no warnings or + // errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/doc.go new file mode 100644 index 0000000000..ebea40f15a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/doc.go @@ -0,0 +1,14 @@ +// Package providerserver implements functionality for serving a provider, +// such as directly starting a server in a production binary and conversion +// functions for testing. +// +// For production usage, call the Serve function from binary startup, such as +// from the provider codebase main package. If multiplexing the provider server +// via terraform-plugin-mux functionality, use the NewProtocol* functions and +// call the Serve function from that Go module. For testing usage, call the +// NewProtocol* functions. +// +// All functionality in this package requires the provider.Provider type, which +// contains the provider implementation including all managed resources and +// data sources. +package providerserver diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/providerserver.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/providerserver.go new file mode 100644 index 0000000000..920bebef90 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/providerserver.go @@ -0,0 +1,125 @@ +package providerserver + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/internal/fwserver" + "github.com/hashicorp/terraform-plugin-framework/internal/proto5server" + "github.com/hashicorp/terraform-plugin-framework/internal/proto6server" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" +) + +// NewProtocol5 returns a protocol version 5 ProviderServer implementation +// based on the given Provider and suitable for usage with the +// github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server.Serve() +// function and various terraform-plugin-mux functions. +func NewProtocol5(p provider.Provider) func() tfprotov5.ProviderServer { + return func() tfprotov5.ProviderServer { + return &proto5server.Server{ + FrameworkServer: fwserver.Server{ + Provider: p, + }, + } + } +} + +// NewProtocol5WithError returns a protocol version 5 ProviderServer +// implementation based on the given Provider and suitable for usage with +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource.TestCase.ProtoV5ProviderFactories. +// +// The error return is not currently used, but it may be in the future. +func NewProtocol5WithError(p provider.Provider) func() (tfprotov5.ProviderServer, error) { + return func() (tfprotov5.ProviderServer, error) { + return &proto5server.Server{ + FrameworkServer: fwserver.Server{ + Provider: p, + }, + }, nil + } +} + +// NewProtocol6 returns a protocol version 6 ProviderServer implementation +// based on the given Provider and suitable for usage with the +// github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server.Serve() +// function and various terraform-plugin-mux functions. +func NewProtocol6(p provider.Provider) func() tfprotov6.ProviderServer { + return func() tfprotov6.ProviderServer { + return &proto6server.Server{ + FrameworkServer: fwserver.Server{ + Provider: p, + }, + } + } +} + +// NewProtocol6WithError returns a protocol version 6 ProviderServer +// implementation based on the given Provider and suitable for usage with +// github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource.TestCase.ProtoV6ProviderFactories. +// +// The error return is not currently used, but it may be in the future. +func NewProtocol6WithError(p provider.Provider) func() (tfprotov6.ProviderServer, error) { + return func() (tfprotov6.ProviderServer, error) { + return &proto6server.Server{ + FrameworkServer: fwserver.Server{ + Provider: p, + }, + }, nil + } +} + +// Serve serves a provider, blocking until the context is canceled. +func Serve(ctx context.Context, providerFunc func() provider.Provider, opts ServeOpts) error { + err := opts.validate(ctx) + + if err != nil { + return fmt.Errorf("unable to validate ServeOpts: %w", err) + } + + switch opts.ProtocolVersion { + case 5: + var tf5serverOpts []tf5server.ServeOpt + + if opts.Debug { + tf5serverOpts = append(tf5serverOpts, tf5server.WithManagedDebug()) + } + + return tf5server.Serve( + opts.Address, + func() tfprotov5.ProviderServer { + provider := providerFunc() + + return &proto5server.Server{ + FrameworkServer: fwserver.Server{ + Provider: provider, + }, + } + }, + tf5serverOpts..., + ) + default: + var tf6serverOpts []tf6server.ServeOpt + + if opts.Debug { + tf6serverOpts = append(tf6serverOpts, tf6server.WithManagedDebug()) + } + + return tf6server.Serve( + opts.Address, + func() tfprotov6.ProviderServer { + provider := providerFunc() + + return &proto6server.Server{ + FrameworkServer: fwserver.Server{ + Provider: provider, + }, + } + }, + tf6serverOpts..., + ) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/serve_opts.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/serve_opts.go new file mode 100644 index 0000000000..487c09b429 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/providerserver/serve_opts.go @@ -0,0 +1,84 @@ +package providerserver + +import ( + "context" + "fmt" + "strings" +) + +// ServeOpts are options for serving the provider. +type ServeOpts struct { + // Address is the full address of the provider. Full address form has three + // parts separated by forward slashes (/): Hostname, namespace, and + // provider type ("name"). + // + // For example: registry.terraform.io/hashicorp/random. + Address string + + // Debug runs the provider in a mode acceptable for debugging and testing + // processes, such as delve, by managing the process lifecycle. Information + // needed for Terraform CLI to connect to the provider is output to stdout. + // os.Interrupt (Ctrl-c) can be used to stop the provider. + Debug bool + + // ProtocolVersion is the protocol version that should be used when serving + // the provider. Either protocol version 5 or protocol version 6 can be + // used. Defaults to protocol version 6. + // + // Protocol version 5 has the following functionality limitations, which + // will raise an error during the GetProviderSchema or other RPCs: + // + // - tfsdk.Attribute cannot use Attributes field (nested attributes). + // + ProtocolVersion int +} + +// Validate a given provider address. This is only used for the Address field +// to preserve backwards compatibility for the Name field. +// +// This logic is manually implemented over importing +// github.com/hashicorp/terraform-registry-address as its functionality such as +// ParseAndInferProviderSourceString and ParseRawProviderSourceString allow +// shorter address formats, which would then require post-validation anyways. +func (opts ServeOpts) validateAddress(_ context.Context) error { + addressParts := strings.Split(opts.Address, "/") + formatErr := fmt.Errorf("expected hostname/namespace/type format, got: %s", opts.Address) + + if len(addressParts) != 3 { + return formatErr + } + + if addressParts[0] == "" || addressParts[1] == "" || addressParts[2] == "" { + return formatErr + } + + return nil +} + +// Validation checks for provider defined ServeOpts. +// +// Current checks which return errors: +// +// - If Address is not set +// - Address is a valid full provider address +// - ProtocolVersion, if set, is 5 or 6 +func (opts ServeOpts) validate(ctx context.Context) error { + if opts.Address == "" { + return fmt.Errorf("Address must be provided") + } + + err := opts.validateAddress(ctx) + + if err != nil { + return fmt.Errorf("unable to validate Address: %w", err) + } + + switch opts.ProtocolVersion { + // 0 represents unset, which Serve will use default. + case 0, 5, 6: + default: + return fmt.Errorf("ProtocolVersion, if set, must be 5 or 6") + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/config_validator.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/config_validator.go new file mode 100644 index 0000000000..217a54c0e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/config_validator.go @@ -0,0 +1,25 @@ +package resource + +import "context" + +// ConfigValidator describes reusable Resource configuration validation functionality. +type ConfigValidator interface { + // Description describes the validation in plain text formatting. + // + // This information may be automatically added to resource plain text + // descriptions by external tooling. + Description(context.Context) string + + // MarkdownDescription describes the validation in Markdown formatting. + // + // This information may be automatically added to resource Markdown + // descriptions by external tooling. + MarkdownDescription(context.Context) string + + // ValidateResource performs the validation. + // + // This method name is separate from the datasource.ConfigValidator + // interface ValidateDataSource method name and provider.ConfigValidator + // interface ValidateProvider method name to allow generic validators. + ValidateResource(context.Context, ValidateConfigRequest, *ValidateConfigResponse) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/configure.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/configure.go new file mode 100644 index 0000000000..1cb349b348 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/configure.go @@ -0,0 +1,31 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +// ConfigureRequest represents a request for the provider to configure a +// resource, i.e., set provider-level data or clients. An instance of this +// request struct is supplied as an argument to the Resource type Configure +// method. +type ConfigureRequest struct { + // ProviderData is the data set in the + // [provider.ConfigureResponse.ResourceData] field. This data is + // provider-specifc and therefore can contain any necessary remote system + // clients, custom provider data, or anything else pertinent to the + // functionality of the Resource. + // + // This data is only set after the ConfigureProvider RPC has been called + // by Terraform. + ProviderData any +} + +// ConfigureResponse represents a response to a ConfigureRequest. An +// instance of this response struct is supplied as an argument to the +// Resource type Configure method. +type ConfigureResponse struct { + // Diagnostics report errors or warnings related to configuring of the + // Datasource. An empty slice indicates a successful operation with no + // warnings or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/create.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/create.go new file mode 100644 index 0000000000..0fce340135 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/create.go @@ -0,0 +1,46 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// CreateRequest represents a request for the provider to create a +// resource. An instance of this request struct is supplied as an argument to +// the resource's Create function. +type CreateRequest struct { + // Config is the configuration the user supplied for the resource. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config tfsdk.Config + + // Plan is the planned state for the resource. + Plan tfsdk.Plan + + // ProviderMeta is metadata from the provider_meta block of the module. + ProviderMeta tfsdk.Config +} + +// CreateResponse represents a response to a CreateRequest. An +// instance of this response struct is supplied as +// an argument to the resource's Create function, in which the provider +// should set values on the CreateResponse as appropriate. +type CreateResponse struct { + // State is the state of the resource following the Create operation. + // This field is pre-populated from CreateRequest.Plan and + // should be set during the resource's Create operation. + State tfsdk.State + + // Private is the private state resource data following the Create operation. + // This field is not pre-populated as there is no pre-existing private state + // data during the resource's Create operation. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to creating the + // resource. An empty slice indicates a successful operation with no + // warnings or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/delete.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/delete.go new file mode 100644 index 0000000000..26023b1560 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/delete.go @@ -0,0 +1,41 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// DeleteRequest represents a request for the provider to delete a +// resource. An instance of this request struct is supplied as an argument to +// the resource's Delete function. +type DeleteRequest struct { + // State is the current state of the resource prior to the Delete + // operation. + State tfsdk.State + + // ProviderMeta is metadata from the provider_meta block of the module. + ProviderMeta tfsdk.Config + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. + // + // Use the GetKey method to read data. + Private *privatestate.ProviderData +} + +// DeleteResponse represents a response to a DeleteRequest. An +// instance of this response struct is supplied as +// an argument to the resource's Delete function, in which the provider +// should set values on the DeleteResponse as appropriate. +type DeleteResponse struct { + // State is the state of the resource following the Delete operation. + // This field is pre-populated from UpdateResourceRequest.Plan and + // should be set during the resource's Update operation. + State tfsdk.State + + // Diagnostics report errors or warnings related to deleting the + // resource. An empty slice indicates a successful operation with no + // warnings or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/doc.go new file mode 100644 index 0000000000..1d8077e079 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/doc.go @@ -0,0 +1,21 @@ +// Package resource contains all interfaces, request types, and response types +// for a managed resource implementation. +// +// In Terraform, a managed resource is a concept which enables provider +// developers to offer practitioners full lifecycle management (create, read, +// update, and delete) of a infrastructure component. Managed resources can +// also stand in for one-time infrastructure operations that require tracking, +// by implementing create logic, while omitting update and delete logic. +// +// Resources are saved into the Terraform state and can be referenced by other +// parts of a configuration. Resources are defined by a resource type/name, +// such as "examplecloud_thing", a schema representing the structure and data +// types of configuration, plan, and state, and lifecycle logic. +// +// The main starting point for implementations in this package is the +// Resource type which represents an instance of a resource type that has +// its own configuration, plan, state, and lifecycle logic. The +// [resource.Resource] implementations are referenced by the +// [provider.Provider] type Resources method, which enables the resource +// practitioner and testing usage. +package resource diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/import_state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/import_state.go new file mode 100644 index 0000000000..7b647cc827 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/import_state.go @@ -0,0 +1,59 @@ +package resource + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ImportStateRequest represents a request for the provider to import a +// resource. An instance of this request struct is supplied as an argument to +// the Resource's ImportState method. +type ImportStateRequest struct { + // ID represents the import identifier supplied by the practitioner when + // calling the import command. In many cases, this may align with the + // unique identifier for the resource, which can optionally be stored + // as an Attribute. However, this identifier can also be treated as + // its own type of value and parsed during import. This value + // is not stored in the state unless the provider explicitly stores it. + ID string +} + +// ImportStateResponse represents a response to a ImportStateRequest. +// An instance of this response struct is supplied as an argument to the +// Resource's ImportState method, in which the provider should set values on +// the ImportStateResponse as appropriate. +type ImportStateResponse struct { + // Diagnostics report errors or warnings related to importing the + // resource. An empty slice indicates a successful operation with no + // warnings or errors generated. + Diagnostics diag.Diagnostics + + // State is the state of the resource following the import operation. + // It must contain enough information so Terraform can successfully + // refresh the resource, e.g. call the Resource Read method. + State tfsdk.State + + // Private is the private state resource data following the Import operation. + // This field is not pre-populated as there is no pre-existing private state + // data during the resource's Import operation. + Private *privatestate.ProviderData +} + +// ImportStatePassthroughID is a helper function to set the import +// identifier to a given state attribute path. The attribute must accept a +// string value. +func ImportStatePassthroughID(ctx context.Context, attrPath path.Path, req ImportStateRequest, resp *ImportStateResponse) { + if attrPath.Equal(path.Empty()) { + resp.Diagnostics.AddError( + "Resource Import Passthrough Missing Attribute Path", + "This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + "Resource ImportState method call to ImportStatePassthroughID path must be set to a valid attribute path that can accept a string value.", + ) + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, attrPath, req.ID)...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/metadata.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/metadata.go new file mode 100644 index 0000000000..683292113a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/metadata.go @@ -0,0 +1,21 @@ +package resource + +// MetadataRequest represents a request for the Resource to return metadata, +// such as its type name. An instance of this request struct is supplied as +// an argument to the Resource type Metadata method. +type MetadataRequest struct { + // ProviderTypeName is the string returned from + // [provider.MetadataResponse.TypeName], if the Provider type implements + // the Metadata method. This string should prefix the Resource type name + // with an underscore in the response. + ProviderTypeName string +} + +// MetadataResponse represents a response to a MetadataRequest. An +// instance of this response struct is supplied as an argument to the +// Resource type Metadata method. +type MetadataResponse struct { + // TypeName should be the full resource type, including the provider + // type prefix and an underscore. For example, examplecloud_thing. + TypeName string +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/modify_plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/modify_plan.go new file mode 100644 index 0000000000..4fcd92d331 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/modify_plan.go @@ -0,0 +1,65 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ModifyPlanRequest represents a request for the provider to modify the +// planned new state that Terraform has generated for the resource. +type ModifyPlanRequest struct { + // Config is the configuration the user supplied for the resource. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config tfsdk.Config + + // State is the current state of the resource. + State tfsdk.State + + // Plan is the planned new state for the resource. Terraform 1.3 and later + // supports resource destroy planning, in which this will contain a null + // value. + Plan tfsdk.Plan + + // ProviderMeta is metadata from the provider_meta block of the module. + ProviderMeta tfsdk.Config + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // ModifyPlanResponse.Private to prevent accidental private state data loss. + // + // Use the GetKey method to read data. Use the SetKey method on + // ModifyPlanResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// ModifyPlanResponse represents a response to a +// ModifyPlanRequest. An instance of this response struct is supplied +// as an argument to the resource's ModifyPlan function, in which the provider +// should modify the Plan and populate the RequiresReplace field as appropriate. +type ModifyPlanResponse struct { + // Plan is the planned new state for the resource. + Plan tfsdk.Plan + + // RequiresReplace is a list of attribute paths that require the + // resource to be replaced. They should point to the specific field + // that changed that requires the resource to be destroyed and + // recreated. + RequiresReplace path.Paths + + // Private is the private state resource data following the ModifyPlan operation. + // This field is pre-populated from ModifyPlanRequest.Private and + // can be modified during the resource's ModifyPlan operation. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to determining the + // planned state of the requested resource. Returning an empty slice + // indicates a successful plan modification with no warnings or errors + // generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/read.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/read.go new file mode 100644 index 0000000000..943a03cd77 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/read.go @@ -0,0 +1,50 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ReadRequest represents a request for the provider to read a +// resource, i.e., update values in state according to the real state of the +// resource. An instance of this request struct is supplied as an argument to +// the resource's Read function. +type ReadRequest struct { + // State is the current state of the resource prior to the Read + // operation. + State tfsdk.State + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // ReadResourceResponse.Private to prevent accidental private state data loss. + // + // Use the GetKey method to read data. Use the SetKey method on + // ReadResourceResponse.Private to update or remove a value. + Private *privatestate.ProviderData + + // ProviderMeta is metadata from the provider_meta block of the module. + ProviderMeta tfsdk.Config +} + +// ReadResponse represents a response to a ReadRequest. An +// instance of this response struct is supplied as +// an argument to the resource's Read function, in which the provider +// should set values on the ReadResponse as appropriate. +type ReadResponse struct { + // State is the state of the resource following the Read operation. + // This field is pre-populated from ReadRequest.State and + // should be set during the resource's Read operation. + State tfsdk.State + + // Private is the private state resource data following the Read operation. + // This field is pre-populated from ReadResourceRequest.Private and + // can be modified during the resource's Read operation. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to reading the + // resource. An empty slice indicates a successful operation with no + // warnings or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/resource.go new file mode 100644 index 0000000000..9000fda5df --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/resource.go @@ -0,0 +1,166 @@ +package resource + +import ( + "context" +) + +// Resource represents an instance of a managed resource type. This is the core +// interface that all resources must implement. +// +// Resources can optionally implement these additional concepts: +// +// - Configure: Include provider-level data or clients. +// - Import: ResourceWithImportState +// - Validation: Schema-based or entire configuration +// via ResourceWithConfigValidators or ResourceWithValidateConfig. +// - Plan Modification: Schema-based or entire plan +// via ResourceWithModifyPlan. +// - State Upgrades: ResourceWithUpgradeState +// +// Although not required, it is conventional for resources to implement the +// ResourceWithImportState interface. +type Resource interface { + // Metadata should return the full name of the resource, such as + // examplecloud_thing. + Metadata(context.Context, MetadataRequest, *MetadataResponse) + + // Schema should return the schema for this resource. + Schema(context.Context, SchemaRequest, *SchemaResponse) + + // Create is called when the provider must create a new resource. Config + // and planned state values should be read from the + // CreateRequest and new state values set on the CreateResponse. + Create(context.Context, CreateRequest, *CreateResponse) + + // Read is called when the provider must read resource values in order + // to update state. Planned state values should be read from the + // ReadRequest and new state values set on the ReadResponse. + Read(context.Context, ReadRequest, *ReadResponse) + + // Update is called to update the state of the resource. Config, planned + // state, and prior state values should be read from the + // UpdateRequest and new state values set on the UpdateResponse. + Update(context.Context, UpdateRequest, *UpdateResponse) + + // Delete is called when the provider must delete the resource. Config + // values may be read from the DeleteRequest. + // + // If execution completes without error, the framework will automatically + // call DeleteResponse.State.RemoveResource(), so it can be omitted + // from provider logic. + Delete(context.Context, DeleteRequest, *DeleteResponse) +} + +// ResourceWithConfigure is an interface type that extends Resource to +// include a method which the framework will automatically call so provider +// developers have the opportunity to setup any necessary provider-level data +// or clients in the Resource type. +// +// This method is intended to replace the provider.ResourceType type +// NewResource method in a future release. +type ResourceWithConfigure interface { + Resource + + // Configure enables provider-level data or clients to be set in the + // provider-defined DataSource type. It is separately executed for each + // ReadDataSource RPC. + Configure(context.Context, ConfigureRequest, *ConfigureResponse) +} + +// ResourceWithConfigValidators is an interface type that extends Resource to include declarative validations. +// +// Declaring validation using this methodology simplifies implmentation of +// reusable functionality. These also include descriptions, which can be used +// for automating documentation. +// +// Validation will include ConfigValidators and ValidateConfig, if both are +// implemented, in addition to any Attribute or Type validation. +type ResourceWithConfigValidators interface { + Resource + + // ConfigValidators returns a list of functions which will all be performed during validation. + ConfigValidators(context.Context) []ConfigValidator +} + +// Optional interface on top of Resource that enables provider control over +// the ImportResourceState RPC. This RPC is called by Terraform when the +// `terraform import` command is executed. Afterwards, the ReadResource RPC +// is executed to allow providers to fully populate the resource state. +type ResourceWithImportState interface { + Resource + + // ImportState is called when the provider must import the state of a + // resource instance. This method must return enough state so the Read + // method can properly refresh the full resource. + // + // If setting an attribute with the import identifier, it is recommended + // to use the ImportStatePassthroughID() call in this method. + ImportState(context.Context, ImportStateRequest, *ImportStateResponse) +} + +// ResourceWithModifyPlan represents a resource instance with a ModifyPlan +// function. +type ResourceWithModifyPlan interface { + Resource + + // ModifyPlan is called when the provider has an opportunity to modify + // the plan: once during the plan phase when Terraform is determining + // the diff that should be shown to the user for approval, and once + // during the apply phase with any unknown values from configuration + // filled in with their final values. + // + // The planned new state is represented by + // ModifyPlanResponse.Plan. It must meet the following + // constraints: + // 1. Any non-Computed attribute set in config must preserve the exact + // config value or return the corresponding attribute value from the + // prior state (ModifyPlanRequest.State). + // 2. Any attribute with a known value must not have its value changed + // in subsequent calls to ModifyPlan or Create/Read/Update. + // 3. Any attribute with an unknown value may either remain unknown + // or take on any value of the expected type. + // + // Any errors will prevent further resource-level plan modifications. + ModifyPlan(context.Context, ModifyPlanRequest, *ModifyPlanResponse) +} + +// Optional interface on top of Resource that enables provider control over +// the UpgradeResourceState RPC. This RPC is automatically called by Terraform +// when the current Schema type Version field is greater than the stored state. +// Terraform does not store previous Schema information, so any breaking +// changes to state data types must be handled by providers. +// +// Terraform CLI can execute the UpgradeResourceState RPC even when the prior +// state version matches the current schema version. The framework will +// automatically intercept this request and attempt to respond with the +// existing state. In this situation the framework will not execute any +// provider defined logic, so declaring it for this version is extraneous. +type ResourceWithUpgradeState interface { + Resource + + // A mapping of prior state version to current schema version state upgrade + // implementations. Only the specified state upgrader for the prior state + // version is called, rather than each version in between, so it must + // encapsulate all logic to convert the prior state to the current schema + // version. + // + // Version keys begin at 0, which is the default schema version when + // undefined. The framework will return an error diagnostic should the + // requested state version not be implemented. + UpgradeState(context.Context) map[int64]StateUpgrader +} + +// ResourceWithValidateConfig is an interface type that extends Resource to include imperative validation. +// +// Declaring validation using this methodology simplifies one-off +// functionality that typically applies to a single resource. Any documentation +// of this functionality must be manually added into schema descriptions. +// +// Validation will include ConfigValidators and ValidateConfig, if both are +// implemented, in addition to any Attribute or Type validation. +type ResourceWithValidateConfig interface { + Resource + + // ValidateConfig performs the validation. + ValidateConfig(context.Context, ValidateConfigRequest, *ValidateConfigResponse) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema.go new file mode 100644 index 0000000000..f1e59dce22 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema.go @@ -0,0 +1,24 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" +) + +// SchemaRequest represents a request for the Resource to return its schema. +// An instance of this request struct is supplied as an argument to the +// Resource type Schema method. +type SchemaRequest struct{} + +// SchemaResponse represents a response to a SchemaRequest. An instance of this +// response struct is supplied as an argument to the Resource type Schema +// method. +type SchemaResponse struct { + // Schema is the schema of the data source. + Schema schema.Schema + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/attribute.go new file mode 100644 index 0000000000..b7d1e91272 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/attribute.go @@ -0,0 +1,33 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Attribute define a value field inside the Schema. Implementations in this +// package include: +// - BoolAttribute +// - Float64Attribute +// - Int64Attribute +// - ListAttribute +// - MapAttribute +// - NumberAttribute +// - ObjectAttribute +// - SetAttribute +// - StringAttribute +// +// Additionally, the NestedAttribute interface extends Attribute with nested +// attributes. Only supported in protocol version 6. Implementations in this +// package include: +// - ListNestedAttribute +// - MapNestedAttribute +// - SetNestedAttribute +// - SingleNestedAttribute +// +// In practitioner configurations, an equals sign (=) is required to set +// the value. [Configuration Reference] +// +// [Configuration Reference]: https://developer.hashicorp.com/terraform/language/syntax/configuration +type Attribute interface { + fwschema.Attribute +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/block.go new file mode 100644 index 0000000000..f6e27e6426 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/block.go @@ -0,0 +1,27 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Block defines a structural field inside a Schema. Implementations in this +// package include: +// - ListNestedBlock +// - SetNestedBlock +// - SingleNestedBlock +// +// In practitioner configurations, an equals sign (=) cannot be used to set the +// value. Blocks are instead repeated as necessary, or require the use of +// [Dynamic Block Expressions]. +// +// Prefer NestedAttribute over Block. Blocks should typically be used for +// configuration compatibility with previously existing schemas from an older +// Terraform Plugin SDK. Efforts should be made to convert from Block to +// NestedAttribute as a breaking change for practitioners. +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +// +// [Configuration Reference]: https://developer.hashicorp.com/terraform/language/syntax/configuration +type Block interface { + fwschema.Block +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/bool_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/bool_attribute.go new file mode 100644 index 0000000000..d99233e864 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/bool_attribute.go @@ -0,0 +1,208 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = BoolAttribute{} + _ fwxschema.AttributeWithBoolPlanModifiers = BoolAttribute{} + _ fwxschema.AttributeWithBoolValidators = BoolAttribute{} +) + +// BoolAttribute represents a schema attribute that is a boolean. When +// retrieving the value for this attribute, use types.Bool as the value type +// unless the CustomType field is set. +// +// Terraform configurations configure this attribute using expressions that +// return a boolean or directly via the true/false keywords. +// +// example_attribute = true +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type BoolAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.BoolType. When retrieving data, the basetypes.BoolValuable + // associated with this custom type must be used in place of types.Bool. + CustomType basetypes.BoolTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Bool + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Bool +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a BoolAttribute. +func (a BoolAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// BoolPlanModifiers returns the PlanModifiers field value. +func (a BoolAttribute) BoolPlanModifiers() []planmodifier.Bool { + return a.PlanModifiers +} + +// BoolValidators returns the Validators field value. +func (a BoolAttribute) BoolValidators() []validator.Bool { + return a.Validators +} + +// Equal returns true if the given Attribute is a BoolAttribute +// and all fields are equal. +func (a BoolAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(BoolAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a BoolAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a BoolAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a BoolAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.StringType or the CustomType field value if defined. +func (a BoolAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.BoolType +} + +// IsComputed returns the Computed field value. +func (a BoolAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a BoolAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a BoolAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a BoolAttribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/doc.go new file mode 100644 index 0000000000..14ee29ae7f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/doc.go @@ -0,0 +1,5 @@ +// Package schema contains all available schema functionality for resources. +// Resource schemas define the structure and value types for configuration, +// plan, and state data. Schemas are implemented via the resource.Resource type +// Schema method. +package schema diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/float64_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/float64_attribute.go new file mode 100644 index 0000000000..0ab00417ba --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/float64_attribute.go @@ -0,0 +1,211 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = Float64Attribute{} + _ fwxschema.AttributeWithFloat64PlanModifiers = Float64Attribute{} + _ fwxschema.AttributeWithFloat64Validators = Float64Attribute{} +) + +// Float64Attribute represents a schema attribute that is a 64-bit floating +// point number. When retrieving the value for this attribute, use +// types.Float64 as the value type unless the CustomType field is set. +// +// Use Int64Attribute for 64-bit integer attributes or NumberAttribute for +// 512-bit generic number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via a floating point value. +// +// example_attribute = 123.45 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type Float64Attribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.Float64Type. When retrieving data, the basetypes.Float64Valuable + // associated with this custom type must be used in place of types.Float64. + CustomType basetypes.Float64Typable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Float64 + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Float64 +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a Float64Attribute. +func (a Float64Attribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a Float64Attribute +// and all fields are equal. +func (a Float64Attribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(Float64Attribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// Float64PlanModifiers returns the PlanModifiers field value. +func (a Float64Attribute) Float64PlanModifiers() []planmodifier.Float64 { + return a.PlanModifiers +} + +// Float64Validators returns the Validators field value. +func (a Float64Attribute) Float64Validators() []validator.Float64 { + return a.Validators +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a Float64Attribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a Float64Attribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a Float64Attribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.Float64Type or the CustomType field value if defined. +func (a Float64Attribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.Float64Type +} + +// IsComputed returns the Computed field value. +func (a Float64Attribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a Float64Attribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a Float64Attribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a Float64Attribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/int64_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/int64_attribute.go new file mode 100644 index 0000000000..0dd4b93e05 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/int64_attribute.go @@ -0,0 +1,211 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = Int64Attribute{} + _ fwxschema.AttributeWithInt64PlanModifiers = Int64Attribute{} + _ fwxschema.AttributeWithInt64Validators = Int64Attribute{} +) + +// Int64Attribute represents a schema attribute that is a 64-bit integer. +// When retrieving the value for this attribute, use types.Int64 as the value +// type unless the CustomType field is set. +// +// Use Float64Attribute for 64-bit floating point number attributes or +// NumberAttribute for 512-bit generic number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via an integer value. +// +// example_attribute = 123 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type Int64Attribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.Int64Type. When retrieving data, the basetypes.Int64Valuable + // associated with this custom type must be used in place of types.Int64. + CustomType basetypes.Int64Typable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Int64 + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Int64 +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a Int64Attribute. +func (a Int64Attribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a Int64Attribute +// and all fields are equal. +func (a Int64Attribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(Int64Attribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a Int64Attribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a Int64Attribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a Int64Attribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.Int64Type or the CustomType field value if defined. +func (a Int64Attribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.Int64Type +} + +// Int64PlanModifiers returns the PlanModifiers field value. +func (a Int64Attribute) Int64PlanModifiers() []planmodifier.Int64 { + return a.PlanModifiers +} + +// Int64Validators returns the Validators field value. +func (a Int64Attribute) Int64Validators() []validator.Int64 { + return a.Validators +} + +// IsComputed returns the Computed field value. +func (a Int64Attribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a Int64Attribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a Int64Attribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a Int64Attribute) IsSensitive() bool { + return a.Sensitive +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_attribute.go new file mode 100644 index 0000000000..f65535174d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_attribute.go @@ -0,0 +1,221 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = ListAttribute{} + _ fwxschema.AttributeWithListPlanModifiers = ListAttribute{} + _ fwxschema.AttributeWithListValidators = ListAttribute{} +) + +// ListAttribute represents a schema attribute that is a list with a single +// element type. When retrieving the value for this attribute, use types.List +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use ListNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list or directly via square brace syntax. +// +// # list of strings +// example_attribute = ["first", "second"] +// +// Terraform configurations reference this attribute using expressions that +// accept a list or an element directly via square brace 0-based index syntax: +// +// # first known element +// .example_attribute[0] +type ListAttribute struct { + // ElementType is the type for all elements of the list. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ListType. When retrieving data, the basetypes.ListValuable + // associated with this custom type must be used in place of types.List. + CustomType basetypes.ListTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.List +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a list +// index or an error. +func (a ListAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a ListAttribute +// and all fields are equal. +func (a ListAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ListAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ListAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ListAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ListAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.ListType or the CustomType field value if defined. +func (a ListAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ListType{ + ElemType: a.ElementType, + } +} + +// IsComputed returns the Computed field value. +func (a ListAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a ListAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ListAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ListAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ListPlanModifiers returns the PlanModifiers field value. +func (a ListAttribute) ListPlanModifiers() []planmodifier.List { + return a.PlanModifiers +} + +// ListValidators returns the Validators field value. +func (a ListAttribute) ListValidators() []validator.List { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_nested_attribute.go new file mode 100644 index 0000000000..fdd6a32738 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_nested_attribute.go @@ -0,0 +1,248 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = ListNestedAttribute{} + _ fwxschema.AttributeWithListPlanModifiers = ListNestedAttribute{} + _ fwxschema.AttributeWithListValidators = ListNestedAttribute{} +) + +// ListNestedAttribute represents an attribute that is a list of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.List +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use ListAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list of objects or directly via square and curly brace syntax. +// +// # list of objects +// example_attribute = [ +// { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a list of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_attribute[0] +// # first known object nested_attribute value +// .example_attribute[0].nested_attribute +type ListNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.ListType of types.ObjectType. When retrieving data, the + // basetypes.ListValuable associated with this custom type must be used in + // place of types.List. + CustomType basetypes.ListTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.List +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyInt, otherwise returns an error. +func (a ListNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyInt) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to ListNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a ListNestedAttribute +// and all fields are equal. +func (a ListNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ListNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ListNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ListNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ListNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a ListNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a ListNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeList +} + +// GetType returns ListType of ObjectType or CustomType. +func (a ListNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ListType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed returns the Computed field value. +func (a ListNestedAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a ListNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ListNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ListNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ListPlanModifiers returns the PlanModifiers field value. +func (a ListNestedAttribute) ListPlanModifiers() []planmodifier.List { + return a.PlanModifiers +} + +// ListValidators returns the Validators field value. +func (a ListNestedAttribute) ListValidators() []validator.List { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_nested_block.go new file mode 100644 index 0000000000..15896ae030 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/list_nested_block.go @@ -0,0 +1,209 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = ListNestedBlock{} + _ fwxschema.BlockWithListPlanModifiers = ListNestedBlock{} + _ fwxschema.BlockWithListValidators = ListNestedBlock{} +) + +// ListNestedBlock represents a block that is a list of objects where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.List +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. +// +// Prefer ListNestedAttribute over ListNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block repeatedly using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # list of blocks with two elements +// example_block { +// nested_attribute = #... +// } +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept a list of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_block[0] +// # first known object nested_attribute value +// .example_block[0].nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type ListNestedBlock struct { + // NestedObject is the underlying object that contains nested attributes or + // blocks. This field must be set. + NestedObject NestedBlockObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.ListType of types.ObjectType. When retrieving data, the + // basetypes.ListValuable associated with this custom type must be used in + // place of types.List. + CustomType basetypes.ListTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.List + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.List +} + +// ApplyTerraform5AttributePathStep returns the NestedObject field value if step +// is ElementKeyInt, otherwise returns an error. +func (b ListNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyInt) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to ListNestedBlock", step) + } + + return b.NestedObject, nil +} + +// Equal returns true if the given Block is ListNestedBlock +// and all fields are equal. +func (b ListNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(ListNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b ListNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b ListNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b ListNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (b ListNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return b.NestedObject +} + +// GetNestingMode always returns BlockNestingModeList. +func (b ListNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeList +} + +// ListPlanModifiers returns the PlanModifiers field value. +func (b ListNestedBlock) ListPlanModifiers() []planmodifier.List { + return b.PlanModifiers +} + +// ListValidators returns the Validators field value. +func (b ListNestedBlock) ListValidators() []validator.List { + return b.Validators +} + +// Type returns ListType of ObjectType or CustomType. +func (b ListNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + return types.ListType{ + ElemType: b.NestedObject.Type(), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/map_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/map_attribute.go new file mode 100644 index 0000000000..d9b8ee294a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/map_attribute.go @@ -0,0 +1,224 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = MapAttribute{} + _ fwxschema.AttributeWithMapPlanModifiers = MapAttribute{} + _ fwxschema.AttributeWithMapValidators = MapAttribute{} +) + +// MapAttribute represents a schema attribute that is a list with a single +// element type. When retrieving the value for this attribute, use types.Map +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use MapNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a list or directly via curly brace syntax. +// +// # map of strings +// example_attribute = { +// key1 = "first", +// key2 = "second", +// } +// +// Terraform configurations reference this attribute using expressions that +// accept a map or an element directly via square brace string syntax: +// +// # key1 known element +// .example_attribute["key1"] +type MapAttribute struct { + // ElementType is the type for all elements of the map. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.MapType. When retrieving data, the basetypes.MapValuable + // associated with this custom type must be used in place of types.Map. + CustomType basetypes.MapTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Map + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Map +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a map +// index or an error. +func (a MapAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a MapAttribute +// and all fields are equal. +func (a MapAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(MapAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a MapAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a MapAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a MapAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.MapType or the CustomType field value if defined. +func (a MapAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.MapType{ + ElemType: a.ElementType, + } +} + +// IsComputed returns the Computed field value. +func (a MapAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a MapAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a MapAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a MapAttribute) IsSensitive() bool { + return a.Sensitive +} + +// MapPlanModifiers returns the PlanModifiers field value. +func (a MapAttribute) MapPlanModifiers() []planmodifier.Map { + return a.PlanModifiers +} + +// MapValidators returns the Validators field value. +func (a MapAttribute) MapValidators() []validator.Map { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/map_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/map_nested_attribute.go new file mode 100644 index 0000000000..e46197d1c1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/map_nested_attribute.go @@ -0,0 +1,249 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = MapNestedAttribute{} + _ fwxschema.AttributeWithMapPlanModifiers = MapNestedAttribute{} + _ fwxschema.AttributeWithMapValidators = MapNestedAttribute{} +) + +// MapNestedAttribute represents an attribute that is a set of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Map +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use MapAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set of objects or directly via curly brace syntax. +// +// # map of objects +// example_attribute = { +// key = { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a map of objects or an element directly via square brace string +// syntax: +// +// # known object at key +// .example_attribute["key"] +// # known object nested_attribute value at key +// .example_attribute["key"].nested_attribute +type MapNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.MapType of types.ObjectType. When retrieving data, the + // basetypes.MapValuable associated with this custom type must be used in + // place of types.Map. + CustomType basetypes.MapTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Map + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Map +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyString, otherwise returns an error. +func (a MapNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyString) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to MapNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a MapNestedAttribute +// and all fields are equal. +func (a MapNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(MapNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a MapNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a MapNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a MapNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a MapNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a MapNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeMap +} + +// GetType returns MapType of ObjectType or CustomType. +func (a MapNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.MapType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed returns the Computed field value. +func (a MapNestedAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a MapNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a MapNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a MapNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// MapPlanModifiers returns the PlanModifiers field value. +func (a MapNestedAttribute) MapPlanModifiers() []planmodifier.Map { + return a.PlanModifiers +} + +// MapValidators returns the Validators field value. +func (a MapNestedAttribute) MapValidators() []validator.Map { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_attribute.go new file mode 100644 index 0000000000..5429975dae --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_attribute.go @@ -0,0 +1,11 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" +) + +// Nested attributes are only compatible with protocol version 6. +type NestedAttribute interface { + Attribute + fwschema.NestedAttribute +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_attribute_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_attribute_object.go new file mode 100644 index 0000000000..e49d019888 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_attribute_object.go @@ -0,0 +1,105 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ fwxschema.NestedAttributeObjectWithPlanModifiers = NestedAttributeObject{} + _ fwxschema.NestedAttributeObjectWithValidators = NestedAttributeObject{} +) + +// NestedAttributeObject is the object containing the underlying attributes +// for a ListNestedAttribute, MapNestedAttribute, SetNestedAttribute, or +// SingleNestedAttribute (automatically generated). When retrieving the value +// for this attribute, use types.Object as the value type unless the CustomType +// field is set. The Attributes field must be set. Nested attributes are only +// compatible with protocol version 6. +// +// This object enables customizing and simplifying details within its parent +// NestedAttribute, therefore it cannot have Terraform schema fields such as +// Required, Description, etc. +type NestedAttributeObject struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. This field must be set. + Attributes map[string]Attribute + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Object +} + +// ApplyTerraform5AttributePathStep performs an AttributeName step on the +// underlying attributes or returns an error. +func (o NestedAttributeObject) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.NestedAttributeObjectApplyTerraform5AttributePathStep(o, step) +} + +// Equal returns true if the given NestedAttributeObject is equivalent. +func (o NestedAttributeObject) Equal(other fwschema.NestedAttributeObject) bool { + if _, ok := other.(NestedAttributeObject); !ok { + return false + } + + return fwschema.NestedAttributeObjectEqual(o, other) +} + +// GetAttributes returns the Attributes field value. +func (o NestedAttributeObject) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(o.Attributes) +} + +// ObjectPlanModifiers returns the PlanModifiers field value. +func (o NestedAttributeObject) ObjectPlanModifiers() []planmodifier.Object { + return o.PlanModifiers +} + +// ObjectValidators returns the Validators field value. +func (o NestedAttributeObject) ObjectValidators() []validator.Object { + return o.Validators +} + +// Type returns the framework type of the NestedAttributeObject. +func (o NestedAttributeObject) Type() basetypes.ObjectTypable { + if o.CustomType != nil { + return o.CustomType + } + + return fwschema.NestedAttributeObjectType(o) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_block_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_block_object.go new file mode 100644 index 0000000000..4f2e449b2d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/nested_block_object.go @@ -0,0 +1,117 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ fwxschema.NestedBlockObjectWithPlanModifiers = NestedBlockObject{} + _ fwxschema.NestedBlockObjectWithValidators = NestedBlockObject{} +) + +// NestedBlockObject is the object containing the underlying attributes and +// blocks for a ListNestedBlock or SetNestedBlock. When retrieving the value +// for this attribute, use types.Object as the value type unless the CustomType +// field is set. +// +// This object enables customizing and simplifying details within its parent +// Block, therefore it cannot have Terraform schema fields such as Description, +// etc. +type NestedBlockObject struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Object +} + +// ApplyTerraform5AttributePathStep performs an AttributeName step on the +// underlying attributes or returns an error. +func (o NestedBlockObject) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.NestedBlockObjectApplyTerraform5AttributePathStep(o, step) +} + +// Equal returns true if the given NestedBlockObject is equivalent. +func (o NestedBlockObject) Equal(other fwschema.NestedBlockObject) bool { + if _, ok := other.(NestedBlockObject); !ok { + return false + } + + return fwschema.NestedBlockObjectEqual(o, other) +} + +// GetAttributes returns the Attributes field value. +func (o NestedBlockObject) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(o.Attributes) +} + +// GetAttributes returns the Blocks field value. +func (o NestedBlockObject) GetBlocks() map[string]fwschema.Block { + return schemaBlocks(o.Blocks) +} + +// ObjectPlanModifiers returns the PlanModifiers field value. +func (o NestedBlockObject) ObjectPlanModifiers() []planmodifier.Object { + return o.PlanModifiers +} + +// ObjectValidators returns the Validators field value. +func (o NestedBlockObject) ObjectValidators() []validator.Object { + return o.Validators +} + +// Type returns the framework type of the NestedBlockObject. +func (o NestedBlockObject) Type() basetypes.ObjectTypable { + if o.CustomType != nil { + return o.CustomType + } + + return fwschema.NestedBlockObjectType(o) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/number_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/number_attribute.go new file mode 100644 index 0000000000..ae55d7e1cd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/number_attribute.go @@ -0,0 +1,212 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = NumberAttribute{} + _ fwxschema.AttributeWithNumberPlanModifiers = NumberAttribute{} + _ fwxschema.AttributeWithNumberValidators = NumberAttribute{} +) + +// NumberAttribute represents a schema attribute that is a generic number with +// up to 512 bits of floating point or integer precision. When retrieving the +// value for this attribute, use types.Number as the value type unless the +// CustomType field is set. +// +// Use Float64Attribute for 64-bit floating point number attributes or +// Int64Attribute for 64-bit integer number attributes. +// +// Terraform configurations configure this attribute using expressions that +// return a number or directly via a floating point or integer value. +// +// example_attribute = 123 +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type NumberAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.NumberType. When retrieving data, the basetypes.NumberValuable + // associated with this custom type must be used in place of types.Number. + CustomType basetypes.NumberTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Number + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Number +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a NumberAttribute. +func (a NumberAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a NumberAttribute +// and all fields are equal. +func (a NumberAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(NumberAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a NumberAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a NumberAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a NumberAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.NumberType or the CustomType field value if defined. +func (a NumberAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.NumberType +} + +// IsComputed returns the Computed field value. +func (a NumberAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a NumberAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a NumberAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a NumberAttribute) IsSensitive() bool { + return a.Sensitive +} + +// NumberPlanModifiers returns the PlanModifiers field value. +func (a NumberAttribute) NumberPlanModifiers() []planmodifier.Number { + return a.PlanModifiers +} + +// NumberValidators returns the Validators field value. +func (a NumberAttribute) NumberValidators() []validator.Number { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/object_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/object_attribute.go new file mode 100644 index 0000000000..9740dd08e2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/object_attribute.go @@ -0,0 +1,223 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = ObjectAttribute{} + _ fwxschema.AttributeWithObjectPlanModifiers = ObjectAttribute{} + _ fwxschema.AttributeWithObjectValidators = ObjectAttribute{} +) + +// ObjectAttribute represents a schema attribute that is an object with only +// type information for underlying attributes. When retrieving the value for +// this attribute, use types.Object as the value type unless the CustomType +// field is set. The AttributeTypes field must be set. +// +// Prefer SingleNestedAttribute over ObjectAttribute if the provider is +// using protocol version 6 and full attribute functionality is needed. +// +// Terraform configurations configure this attribute using expressions that +// return an object or directly via curly brace syntax. +// +// # object with one attribute +// example_attribute = { +// underlying_attribute = #... +// } +// +// Terraform configurations reference this attribute using expressions that +// accept an object or an attribute directly via period syntax: +// +// # underlying attribute +// .example_attribute.underlying_attribute +type ObjectAttribute struct { + // AttributeTypes is the mapping of underlying attribute names to attribute + // types. This field must be set. + AttributeTypes map[string]attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Object +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into an +// attribute name or an error. +func (a ObjectAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a ObjectAttribute +// and all fields are equal. +func (a ObjectAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(ObjectAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a ObjectAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a ObjectAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a ObjectAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.ObjectType or the CustomType field value if defined. +func (a ObjectAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.ObjectType{ + AttrTypes: a.AttributeTypes, + } +} + +// IsComputed returns the Computed field value. +func (a ObjectAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a ObjectAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a ObjectAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a ObjectAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ObjectPlanModifiers returns the PlanModifiers field value. +func (a ObjectAttribute) ObjectPlanModifiers() []planmodifier.Object { + return a.PlanModifiers +} + +// ObjectValidators returns the Validators field value. +func (a ObjectAttribute) ObjectValidators() []validator.Object { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/bool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/bool.go new file mode 100644 index 0000000000..8fe7d007e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/bool.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Bool is a schema validator for types.Bool attributes. +type Bool interface { + Describer + + // PlanModifyBool should perform the modification. + PlanModifyBool(context.Context, BoolRequest, *BoolResponse) +} + +// BoolRequest is a request for types.Bool schema plan modification. +type BoolRequest struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.Bool + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.Bool + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.Bool + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // BoolResponse.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // BoolResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// BoolResponse is a response to a BoolRequest. +type BoolResponse struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.Bool + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifyBool operation. + // This field is pre-populated from BoolRequest.Private and + // can be modified during the resource's PlanModifyBool operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/describer.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/describer.go new file mode 100644 index 0000000000..c29b6005ec --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/describer.go @@ -0,0 +1,29 @@ +package planmodifier + +import ( + "context" +) + +// Describer is the common documentation interface for extensible schema +// plan modifier functionality. +type Describer interface { + // Description should describe the plan modifier in plain text formatting. + // This information is used by provider logging and provider tooling such + // as documentation generation. + // + // The description should: + // - Begin with a lowercase or other character suitable for the middle of + // a sentence. + // - End without punctuation. + Description(context.Context) string + + // MarkdownDescription should describe the plan modifier in Markdown + // formatting. This information is used by provider logging and provider + // tooling such as documentation generation. + // + // The description should: + // - Begin with a lowercase or other character suitable for the middle of + // a sentence. + // - End without punctuation. + MarkdownDescription(context.Context) string +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/doc.go new file mode 100644 index 0000000000..182964a46d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/doc.go @@ -0,0 +1,33 @@ +// Package planmodifier contains schema plan modifier interfaces and +// request/response implementations. These plan modifier interfaces +// are used by resource/schema and internally in the framework. +// Refer to the typed plan modifier packages, such as stringplanmodifier, +// for framework-defined plan modifiers that can be used in +// provider-defined schemas. +// +// Each attr.Type has a corresponding {TYPE}PlanModifer interface which +// implements concretely typed Modify{TYPE} methods, such as +// StringPlanModifer and ModifyString. +// +// The framework has to choose between plan modifier developers handling a +// concrete framework value type, such as types.Bool, or the framework +// interface for custom value basetypes. such as basetypes.BoolValuable. +// +// In the framework type model, the developer can immediately use the value. +// If the value was associated with a custom type and using the custom value +// type is desired, the developer must use the type's ValueFrom{TYPE} method. +// +// In the custom type model, the developer must always convert to a concreate +// type before using the value unless checking for null or unknown. Since any +// custom type may be passed due to the schema, it is possible, if not likely, +// that unknown concrete types will be passed to the plan modifier. +// +// The framework chooses to pass the framework value type. This prevents the +// potential for unexpected runtime panics and simplifies development for +// easier use cases where the framework type is sufficient. More advanced +// developers can choose to call the type's ValueFrom{TYPE} method to get the +// desired custom type in a plan modifier. +// +// PlanModifers that are not type dependent need to implement all interfaces, +// but can use shared logic to reduce implementation code. +package planmodifier diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/float64.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/float64.go new file mode 100644 index 0000000000..cbdd2fb675 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/float64.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Float64 is a schema validator for types.Float64 attributes. +type Float64 interface { + Describer + + // PlanModifyFloat64 should perform the modification. + PlanModifyFloat64(context.Context, Float64Request, *Float64Response) +} + +// Float64Request is a request for types.Float64 schema plan modification. +type Float64Request struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.Float64 + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.Float64 + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.Float64 + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // Float64Response.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // Float64Response.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// Float64Response is a response to a Float64Request. +type Float64Response struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.Float64 + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifyFloat64 operation. + // This field is pre-populated from Float64Request.Private and + // can be modified during the resource's PlanModifyFloat64 operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/int64.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/int64.go new file mode 100644 index 0000000000..70d78725f6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/int64.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Int64 is a schema validator for types.Int64 attributes. +type Int64 interface { + Describer + + // PlanModifyInt64 should perform the modification. + PlanModifyInt64(context.Context, Int64Request, *Int64Response) +} + +// Int64Request is a request for types.Int64 schema plan modification. +type Int64Request struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.Int64 + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.Int64 + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.Int64 + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // Int64Response.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // Int64Response.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// Int64Response is a response to a Int64Request. +type Int64Response struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.Int64 + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifyInt64 operation. + // This field is pre-populated from Int64Request.Private and + // can be modified during the resource's PlanModifyInt64 operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/list.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/list.go new file mode 100644 index 0000000000..971a09c0b0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/list.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// List is a schema validator for types.List attributes. +type List interface { + Describer + + // PlanModifyList should perform the modification. + PlanModifyList(context.Context, ListRequest, *ListResponse) +} + +// ListRequest is a request for types.List schema plan modification. +type ListRequest struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.List + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.List + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.List + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // ListResponse.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // ListResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// ListResponse is a response to a ListRequest. +type ListResponse struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.List + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifyList operation. + // This field is pre-populated from ListRequest.Private and + // can be modified during the resource's PlanModifyList operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/map.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/map.go new file mode 100644 index 0000000000..9ddd519439 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/map.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Map is a schema validator for types.Map attributes. +type Map interface { + Describer + + // PlanModifyMap should perform the modification. + PlanModifyMap(context.Context, MapRequest, *MapResponse) +} + +// MapRequest is a request for types.Map schema plan modification. +type MapRequest struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.Map + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.Map + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.Map + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // MapResponse.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // MapResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// MapResponse is a response to a MapRequest. +type MapResponse struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.Map + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifyMap operation. + // This field is pre-populated from MapRequest.Private and + // can be modified during the resource's PlanModifyMap operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/number.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/number.go new file mode 100644 index 0000000000..210a5762f1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/number.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Number is a schema validator for types.Number attributes. +type Number interface { + Describer + + // PlanModifyNumber should perform the modification. + PlanModifyNumber(context.Context, NumberRequest, *NumberResponse) +} + +// NumberRequest is a request for types.Number schema plan modification. +type NumberRequest struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.Number + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.Number + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.Number + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // NumberResponse.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // NumberResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// NumberResponse is a response to a NumberRequest. +type NumberResponse struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.Number + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifyNumber operation. + // This field is pre-populated from NumberRequest.Private and + // can be modified during the resource's PlanModifyNumber operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/object.go new file mode 100644 index 0000000000..1d2260bab4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/object.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Object is a schema validator for types.Object attributes. +type Object interface { + Describer + + // PlanModifyObject should perform the modification. + PlanModifyObject(context.Context, ObjectRequest, *ObjectResponse) +} + +// ObjectRequest is a request for types.Object schema plan modification. +type ObjectRequest struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.Object + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.Object + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.Object + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // ObjectResponse.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // ObjectResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// ObjectResponse is a response to a ObjectRequest. +type ObjectResponse struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.Object + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifyObject operation. + // This field is pre-populated from ObjectRequest.Private and + // can be modified during the resource's PlanModifyObject operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/set.go new file mode 100644 index 0000000000..93fcf5667c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/set.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Set is a schema validator for types.Set attributes. +type Set interface { + Describer + + // PlanModifySet should perform the modification. + PlanModifySet(context.Context, SetRequest, *SetResponse) +} + +// SetRequest is a request for types.Set schema plan modification. +type SetRequest struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.Set + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.Set + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.Set + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // SetResponse.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // SetResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// SetResponse is a response to a SetRequest. +type SetResponse struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.Set + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifySet operation. + // This field is pre-populated from SetRequest.Private and + // can be modified during the resource's PlanModifySet operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/string.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/string.go new file mode 100644 index 0000000000..78cce9496c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier/string.go @@ -0,0 +1,85 @@ +package planmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// String is a schema validator for types.String attributes. +type String interface { + Describer + + // PlanModifyString should perform the modification. + PlanModifyString(context.Context, StringRequest, *StringResponse) +} + +// StringRequest is a request for types.String schema plan modification. +type StringRequest struct { + // Path contains the path of the attribute for modification. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for modification. + PathExpression path.Expression + + // Config contains the entire configuration of the resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for modification from the configuration. + ConfigValue types.String + + // Plan contains the entire proposed new state of the resource. + Plan tfsdk.Plan + + // PlanValue contains the value of the attribute for modification from the proposed new state. + PlanValue types.String + + // State contains the entire prior state of the resource. + State tfsdk.State + + // StateValue contains the value of the attribute for modification from the prior state. + StateValue types.String + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. This data is opaque to Terraform and does + // not affect plan output. Any existing data is copied to + // StringResponse.Private to prevent accidental private state data loss. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + // + // Use the GetKey method to read data. Use the SetKey method on + // StringResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// StringResponse is a response to a StringRequest. +type StringResponse struct { + // PlanValue is the planned new state for the attribute. + PlanValue types.String + + // RequiresReplace indicates whether a change in the attribute + // requires replacement of the whole resource. + RequiresReplace bool + + // Private is the private state resource data following the PlanModifyString operation. + // This field is pre-populated from StringRequest.Private and + // can be modified during the resource's PlanModifyString operation. + // + // The private state data is always the original data when the schema-based plan + // modification began or, is updated as the logic traverses deeper into underlying + // attributes. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/schema.go new file mode 100644 index 0000000000..91813203de --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/schema.go @@ -0,0 +1,282 @@ +package schema + +import ( + "context" + "fmt" + "regexp" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// Schema must satify the fwschema.Schema interface. +var _ fwschema.Schema = Schema{} + +// Schema defines the structure and value types of resource data. This type +// is used as the resource.SchemaResponse type Schema field, which is +// implemented by the resource.DataSource type Schema method. +type Schema struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this resource is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this resource is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this resource. The warning diagnostic + // summary is automatically set to "Resource Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Use examplecloud_other resource instead. This resource + // will be removed in the next major version of the provider." + // - "Remove this resource as it no longer is valid and + // will be removed in the next major version of the provider." + // + DeprecationMessage string + + // Version indicates the current version of the resource schema. Resource + // schema versioning enables state upgrades in conjunction with the + // [resource.ResourceWithStateUpgrades] interface. Versioning is only + // required if there is a breaking change involving existing state data, + // such as changing an attribute or block type in a manner that is + // incompatible with the Terraform type. + // + // Versions are conventionally only incremented by one each release. + Version int64 +} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// schema. +func (s Schema) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (any, error) { + return fwschema.SchemaApplyTerraform5AttributePathStep(s, step) +} + +// AttributeAtPath returns the Attribute at the passed path. If the path points +// to an element or attribute of a complex type, rather than to an Attribute, +// it will return an ErrPathInsideAtomicAttribute error. +func (s Schema) AttributeAtPath(ctx context.Context, p path.Path) (fwschema.Attribute, diag.Diagnostics) { + return fwschema.SchemaAttributeAtPath(ctx, s, p) +} + +// AttributeAtPath returns the Attribute at the passed path. If the path points +// to an element or attribute of a complex type, rather than to an Attribute, +// it will return an ErrPathInsideAtomicAttribute error. +func (s Schema) AttributeAtTerraformPath(ctx context.Context, p *tftypes.AttributePath) (fwschema.Attribute, error) { + return fwschema.SchemaAttributeAtTerraformPath(ctx, s, p) +} + +// GetAttributes returns the Attributes field value. +func (s Schema) GetAttributes() map[string]fwschema.Attribute { + return schemaAttributes(s.Attributes) +} + +// GetBlocks returns the Blocks field value. +func (s Schema) GetBlocks() map[string]fwschema.Block { + return schemaBlocks(s.Blocks) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (s Schema) GetDeprecationMessage() string { + return s.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (s Schema) GetDescription() string { + return s.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (s Schema) GetMarkdownDescription() string { + return s.MarkdownDescription +} + +// GetVersion returns the Version field value. +func (s Schema) GetVersion() int64 { + return s.Version +} + +// Type returns the framework type of the schema. +func (s Schema) Type() attr.Type { + return fwschema.SchemaType(s) +} + +// TypeAtPath returns the framework type at the given schema path. +func (s Schema) TypeAtPath(ctx context.Context, p path.Path) (attr.Type, diag.Diagnostics) { + return fwschema.SchemaTypeAtPath(ctx, s, p) +} + +// TypeAtTerraformPath returns the framework type at the given tftypes path. +func (s Schema) TypeAtTerraformPath(ctx context.Context, p *tftypes.AttributePath) (attr.Type, error) { + return fwschema.SchemaTypeAtTerraformPath(ctx, s, p) +} + +// Validate verifies that the schema is not using a reserved field name for a top-level attribute. +func (s Schema) Validate() diag.Diagnostics { + var diags diag.Diagnostics + + // Raise error diagnostics when data source configuration uses reserved + // field names for root-level attributes. + reservedFieldNames := map[string]struct{}{ + "connection": {}, + "count": {}, + "depends_on": {}, + "lifecycle": {}, + "provider": {}, + "provisioner": {}, + } + + attributes := s.GetAttributes() + + for k, v := range attributes { + if _, ok := reservedFieldNames[k]; ok { + diags.AddAttributeError( + path.Root(k), + "Schema Using Reserved Field Name", + fmt.Sprintf("%q is a reserved field name", k), + ) + } + + d := validateAttributeFieldName(path.Root(k), k, v) + + diags.Append(d...) + } + + blocks := s.GetBlocks() + + for k, v := range blocks { + if _, ok := reservedFieldNames[k]; ok { + diags.AddAttributeError( + path.Root(k), + "Schema Using Reserved Field Name", + fmt.Sprintf("%q is a reserved field name", k), + ) + } + + d := validateBlockFieldName(path.Root(k), k, v) + + diags.Append(d...) + } + + return diags +} + +// validFieldNameRegex is used to verify that name used for attributes and blocks +// comply with the defined regular expression. +var validFieldNameRegex = regexp.MustCompile("^[a-z0-9_]+$") + +// validateAttributeFieldName verifies that the name used for an attribute complies with the regular +// expression defined in validFieldNameRegex. +func validateAttributeFieldName(path path.Path, name string, attr fwschema.Attribute) diag.Diagnostics { + var diags diag.Diagnostics + + if !validFieldNameRegex.MatchString(name) { + diags.AddAttributeError( + path, + "Invalid Schema Field Name", + fmt.Sprintf("Field name %q is invalid, the only allowed characters are a-z, 0-9 and _. This is always a problem with the provider and should be reported to the provider developer.", name), + ) + } + + if na, ok := attr.(fwschema.NestedAttribute); ok { + nestedObject := na.GetNestedObject() + + if nestedObject == nil { + return diags + } + + attributes := nestedObject.GetAttributes() + + for k, v := range attributes { + d := validateAttributeFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + } + + return diags +} + +// validateBlockFieldName verifies that the name used for a block complies with the regular +// expression defined in validFieldNameRegex. +func validateBlockFieldName(path path.Path, name string, b fwschema.Block) diag.Diagnostics { + var diags diag.Diagnostics + + if !validFieldNameRegex.MatchString(name) { + diags.AddAttributeError( + path, + "Invalid Schema Field Name", + fmt.Sprintf("Field name %q is invalid, the only allowed characters are a-z, 0-9 and _. This is always a problem with the provider and should be reported to the provider developer.", name), + ) + } + + nestedObject := b.GetNestedObject() + + if nestedObject == nil { + return diags + } + + blocks := nestedObject.GetBlocks() + + for k, v := range blocks { + d := validateBlockFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + + attributes := nestedObject.GetAttributes() + + for k, v := range attributes { + d := validateAttributeFieldName(path.AtName(k), k, v) + + diags.Append(d...) + } + + return diags +} + +// schemaAttributes is a resource to fwschema type conversion function. +func schemaAttributes(attributes map[string]Attribute) map[string]fwschema.Attribute { + result := make(map[string]fwschema.Attribute, len(attributes)) + + for name, attribute := range attributes { + result[name] = attribute + } + + return result +} + +// schemaBlocks is a resource to fwschema type conversion function. +func schemaBlocks(blocks map[string]Block) map[string]fwschema.Block { + result := make(map[string]fwschema.Block, len(blocks)) + + for name, block := range blocks { + result[name] = block + } + + return result +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_attribute.go new file mode 100644 index 0000000000..2a73823e79 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_attribute.go @@ -0,0 +1,219 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = SetAttribute{} + _ fwxschema.AttributeWithSetPlanModifiers = SetAttribute{} + _ fwxschema.AttributeWithSetValidators = SetAttribute{} +) + +// SetAttribute represents a schema attribute that is a set with a single +// element type. When retrieving the value for this attribute, use types.Set +// as the value type unless the CustomType field is set. The ElementType field +// must be set. +// +// Use SetNestedAttribute if the underlying elements should be objects and +// require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set or directly via square brace syntax. +// +// # set of strings +// example_attribute = ["first", "second"] +// +// Terraform configurations reference this attribute using expressions that +// accept a set. Sets cannot be indexed in Terraform, therefore an expression +// is required to access an explicit element. +type SetAttribute struct { + // ElementType is the type for all elements of the set. This field must be + // set. + ElementType attr.Type + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.SetType. When retrieving data, the basetypes.SetValuable + // associated with this custom type must be used in place of types.Set. + CustomType basetypes.SetTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Set +} + +// ApplyTerraform5AttributePathStep returns the result of stepping into a set +// index or an error. +func (a SetAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a SetAttribute +// and all fields are equal. +func (a SetAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SetAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SetAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SetAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SetAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.SetType or the CustomType field value if defined. +func (a SetAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.SetType{ + ElemType: a.ElementType, + } +} + +// IsComputed returns the Computed field value. +func (a SetAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a SetAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SetAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SetAttribute) IsSensitive() bool { + return a.Sensitive +} + +// SetPlanModifiers returns the PlanModifiers field value. +func (a SetAttribute) SetPlanModifiers() []planmodifier.Set { + return a.PlanModifiers +} + +// SetValidators returns the Validators field value. +func (a SetAttribute) SetValidators() []validator.Set { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_nested_attribute.go new file mode 100644 index 0000000000..17a6a65b20 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_nested_attribute.go @@ -0,0 +1,244 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = SetNestedAttribute{} + _ fwxschema.AttributeWithSetPlanModifiers = SetNestedAttribute{} + _ fwxschema.AttributeWithSetValidators = SetNestedAttribute{} +) + +// SetNestedAttribute represents an attribute that is a set of objects where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Set +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use SetAttribute if the underlying elements are of a single type and do +// not require definition beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return a set of objects or directly via square and curly brace syntax. +// +// # set of objects +// example_attribute = [ +// { +// nested_attribute = #... +// }, +// ] +// +// Terraform configurations reference this attribute using expressions that +// accept a set of objects. Sets cannot be indexed in Terraform, therefore +// an expression is required to access an explicit element. +type SetNestedAttribute struct { + // NestedObject is the underlying object that contains nested attributes. + // This field must be set. + NestedObject NestedAttributeObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.SetType of types.ObjectType. When retrieving data, the + // basetypes.SetValuable associated with this custom type must be used in + // place of types.Set. + CustomType basetypes.SetTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Set +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is ElementKeyValue, otherwise returns an error. +func (a SetNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyValue) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SetNestedAttribute", step) + } + + return a.NestedObject, nil +} + +// Equal returns true if the given Attribute is a SetNestedAttribute +// and all fields are equal. +func (a SetNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SetNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SetNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SetNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SetNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (a SetNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return a.NestedObject +} + +// GetNestingMode always returns NestingModeList. +func (a SetNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeSet +} + +// GetType returns SetType of ObjectType or CustomType. +func (a SetNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.SetType{ + ElemType: a.NestedObject.Type(), + } +} + +// IsComputed returns the Computed field value. +func (a SetNestedAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a SetNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SetNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SetNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// SetPlanModifiers returns the PlanModifiers field value. +func (a SetNestedAttribute) SetPlanModifiers() []planmodifier.Set { + return a.PlanModifiers +} + +// SetValidators returns the Validators field value. +func (a SetNestedAttribute) SetValidators() []validator.Set { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_nested_block.go new file mode 100644 index 0000000000..cde1bbc117 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/set_nested_block.go @@ -0,0 +1,209 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = SetNestedBlock{} + _ fwxschema.BlockWithSetPlanModifiers = SetNestedBlock{} + _ fwxschema.BlockWithSetValidators = SetNestedBlock{} +) + +// SetNestedBlock represents a block that is a set of objects where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.Set +// as the value type unless the CustomType field is set. The NestedObject field +// must be set. +// +// Prefer SetNestedAttribute over SetNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block repeatedly using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # set of blocks with two elements +// example_block { +// nested_attribute = #... +// } +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept a set of objects or an element directly via square brace 0-based +// index syntax: +// +// # first known object +// .example_block[0] +// # first known object nested_attribute value +// .example_block[0].nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type SetNestedBlock struct { + // NestedObject is the underlying object that contains nested attributes or + // blocks. This field must be set. + NestedObject NestedBlockObject + + // CustomType enables the use of a custom attribute type in place of the + // default types.SetType of types.ObjectType. When retrieving data, the + // basetypes.SetValuable associated with this custom type must be used in + // place of types.Set. + CustomType basetypes.SetTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Set + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Set +} + +// ApplyTerraform5AttributePathStep returns the NestedObject field value if step +// is ElementKeyValue, otherwise returns an error. +func (b SetNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + _, ok := step.(tftypes.ElementKeyValue) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SetNestedBlock", step) + } + + return b.NestedObject, nil +} + +// Equal returns true if the given Block is SetNestedBlock +// and all fields are equal. +func (b SetNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(SetNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b SetNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b SetNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b SetNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns the NestedObject field value. +func (b SetNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return b.NestedObject +} + +// GetNestingMode always returns BlockNestingModeSet. +func (b SetNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeSet +} + +// SetPlanModifiers returns the PlanModifiers field value. +func (b SetNestedBlock) SetPlanModifiers() []planmodifier.Set { + return b.PlanModifiers +} + +// SetValidators returns the Validators field value. +func (b SetNestedBlock) SetValidators() []validator.Set { + return b.Validators +} + +// Type returns SetType of ObjectType or CustomType. +func (b SetNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + return types.SetType{ + ElemType: b.NestedObject.Type(), + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/single_nested_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/single_nested_attribute.go new file mode 100644 index 0000000000..c49903205b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/single_nested_attribute.go @@ -0,0 +1,265 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ NestedAttribute = SingleNestedAttribute{} + _ fwxschema.AttributeWithObjectPlanModifiers = SingleNestedAttribute{} + _ fwxschema.AttributeWithObjectValidators = SingleNestedAttribute{} +) + +// SingleNestedAttribute represents an attribute that is a single object where +// the object attributes can be fully defined, including further nested +// attributes. When retrieving the value for this attribute, use types.Object +// as the value type unless the CustomType field is set. The Attributes field +// must be set. Nested attributes are only compatible with protocol version 6. +// +// Use ObjectAttribute if the underlying attributes do not require definition +// beyond type information. +// +// Terraform configurations configure this attribute using expressions that +// return an object or directly via curly brace syntax. +// +// # single object +// example_attribute = { +// nested_attribute = #... +// } +// +// Terraform configurations reference this attribute using expressions that +// accept an object or an attribute name directly via period syntax: +// +// # object nested_attribute value +// .example_attribute.nested_attribute +type SingleNestedAttribute struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. This field must be set. + Attributes map[string]Attribute + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Object +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is AttributeName, otherwise returns an error. +func (a SingleNestedAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SingleNestedAttribute", step) + } + + attribute, ok := a.Attributes[string(name)] + + if !ok { + return nil, fmt.Errorf("no attribute %q on SingleNestedAttribute", name) + } + + return attribute, nil +} + +// Equal returns true if the given Attribute is a SingleNestedAttribute +// and all fields are equal. +func (a SingleNestedAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(SingleNestedAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetAttributes returns the Attributes field value. +func (a SingleNestedAttribute) GetAttributes() fwschema.UnderlyingAttributes { + return schemaAttributes(a.Attributes) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a SingleNestedAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a SingleNestedAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a SingleNestedAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetNestedObject returns a generated NestedAttributeObject from the +// Attributes, CustomType, and Validators field values. +func (a SingleNestedAttribute) GetNestedObject() fwschema.NestedAttributeObject { + return NestedAttributeObject{ + Attributes: a.Attributes, + CustomType: a.CustomType, + Validators: a.Validators, + } +} + +// GetNestingMode always returns NestingModeList. +func (a SingleNestedAttribute) GetNestingMode() fwschema.NestingMode { + return fwschema.NestingModeSingle +} + +// GetType returns ListType of ObjectType or CustomType. +func (a SingleNestedAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + attrTypes := make(map[string]attr.Type, len(a.Attributes)) + + for name, attribute := range a.Attributes { + attrTypes[name] = attribute.GetType() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} + +// IsComputed returns the Computed field value. +func (a SingleNestedAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a SingleNestedAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a SingleNestedAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a SingleNestedAttribute) IsSensitive() bool { + return a.Sensitive +} + +// ObjectPlanModifiers returns the PlanModifiers field value. +func (a SingleNestedAttribute) ObjectPlanModifiers() []planmodifier.Object { + return a.PlanModifiers +} + +// ObjectValidators returns the Validators field value. +func (a SingleNestedAttribute) ObjectValidators() []validator.Object { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/single_nested_block.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/single_nested_block.go new file mode 100644 index 0000000000..3f2ac51ec4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/single_nested_block.go @@ -0,0 +1,234 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Block = SingleNestedBlock{} + _ fwxschema.BlockWithObjectPlanModifiers = SingleNestedBlock{} + _ fwxschema.BlockWithObjectValidators = SingleNestedBlock{} +) + +// SingleNestedBlock represents a block that is a single object where +// the object attributes can be fully defined, including further attributes +// or blocks. When retrieving the value for this block, use types.Object +// as the value type unless the CustomType field is set. +// +// Prefer SingleNestedAttribute over SingleNestedBlock if the provider is +// using protocol version 6. Nested attributes allow practitioners to configure +// values directly with expressions. +// +// Terraform configurations configure this block only once using curly brace +// syntax without an equals (=) sign or [Dynamic Block Expressions]. +// +// # single block +// example_block { +// nested_attribute = #... +// } +// +// Terraform configurations reference this block using expressions that +// accept an object or an attribute name directly via period syntax: +// +// # object nested_attribute value +// .example_block.nested_attribute +// +// [Dynamic Block Expressions]: https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks +type SingleNestedBlock struct { + // Attributes is the mapping of underlying attribute names to attribute + // definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Blocks names. + Attributes map[string]Attribute + + // Blocks is the mapping of underlying block names to block definitions. + // + // Names must only contain lowercase letters, numbers, and underscores. + // Names must not collide with any Attributes names. + Blocks map[string]Block + + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.ObjectType. When retrieving data, the basetypes.ObjectValuable + // associated with this custom type must be used in place of types.Object. + CustomType basetypes.ObjectTypable + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.Object + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.Object +} + +// ApplyTerraform5AttributePathStep returns the Attributes field value if step +// is AttributeName, otherwise returns an error. +func (b SingleNestedBlock) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + name, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to SingleNestedBlock", step) + } + + if attribute, ok := b.Attributes[string(name)]; ok { + return attribute, nil + } + + if block, ok := b.Blocks[string(name)]; ok { + return block, nil + } + + return nil, fmt.Errorf("no attribute or block %q on SingleNestedBlock", name) +} + +// Equal returns true if the given Attribute is b SingleNestedBlock +// and all fields are equal. +func (b SingleNestedBlock) Equal(o fwschema.Block) bool { + if _, ok := o.(SingleNestedBlock); !ok { + return false + } + + return fwschema.BlocksEqual(b, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (b SingleNestedBlock) GetDeprecationMessage() string { + return b.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (b SingleNestedBlock) GetDescription() string { + return b.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (b SingleNestedBlock) GetMarkdownDescription() string { + return b.MarkdownDescription +} + +// GetNestedObject returns a generated NestedBlockObject from the +// Attributes, CustomType, and Validators field values. +func (b SingleNestedBlock) GetNestedObject() fwschema.NestedBlockObject { + return NestedBlockObject{ + Attributes: b.Attributes, + Blocks: b.Blocks, + CustomType: b.CustomType, + Validators: b.Validators, + } +} + +// GetNestingMode always returns BlockNestingModeSingle. +func (b SingleNestedBlock) GetNestingMode() fwschema.BlockNestingMode { + return fwschema.BlockNestingModeSingle +} + +// ObjectPlanModifiers returns the PlanModifiers field value. +func (b SingleNestedBlock) ObjectPlanModifiers() []planmodifier.Object { + return b.PlanModifiers +} + +// ObjectValidators returns the Validators field value. +func (b SingleNestedBlock) ObjectValidators() []validator.Object { + return b.Validators +} + +// Type returns ObjectType or CustomType. +func (b SingleNestedBlock) Type() attr.Type { + if b.CustomType != nil { + return b.CustomType + } + + attrTypes := make(map[string]attr.Type, len(b.Attributes)+len(b.Blocks)) + + for name, attribute := range b.Attributes { + attrTypes[name] = attribute.GetType() + } + + for name, block := range b.Blocks { + attrTypes[name] = block.Type() + } + + return types.ObjectType{ + AttrTypes: attrTypes, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/string_attribute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/string_attribute.go new file mode 100644 index 0000000000..2d20b63fa0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/schema/string_attribute.go @@ -0,0 +1,208 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Ensure the implementation satisifies the desired interfaces. +var ( + _ Attribute = StringAttribute{} + _ fwxschema.AttributeWithStringPlanModifiers = StringAttribute{} + _ fwxschema.AttributeWithStringValidators = StringAttribute{} +) + +// StringAttribute represents a schema attribute that is a string. When +// retrieving the value for this attribute, use types.String as the value type +// unless the CustomType field is set. +// +// Terraform configurations configure this attribute using expressions that +// return a string or directly via double quote syntax. +// +// example_attribute = "value" +// +// Terraform configurations reference this attribute using the attribute name. +// +// .example_attribute +type StringAttribute struct { + // CustomType enables the use of a custom attribute type in place of the + // default basetypes.StringType. When retrieving data, the basetypes.StringValuable + // associated with this custom type must be used in place of types.String. + CustomType basetypes.StringTypable + + // Required indicates whether the practitioner must enter a value for + // this attribute or not. Required and Optional cannot both be true, + // and Required and Computed cannot both be true. + Required bool + + // Optional indicates whether the practitioner can choose to enter a value + // for this attribute or not. Optional and Required cannot both be true. + Optional bool + + // Computed indicates whether the provider may return its own value for + // this Attribute or not. Required and Computed cannot both be true. If + // Required and Optional are both false, Computed must be true, and the + // attribute will be considered "read only" for the practitioner, with + // only the provider able to set its value. + Computed bool + + // Sensitive indicates whether the value of this attribute should be + // considered sensitive data. Setting it to true will obscure the value + // in CLI output. Sensitive does not impact how values are stored, and + // practitioners are encouraged to store their state as if the entire + // file is sensitive. + Sensitive bool + + // Description is used in various tooling, like the language server, to + // give practitioners more information about what this attribute is, + // what it's for, and how it should be used. It should be written as + // plain text, with no special formatting. + Description string + + // MarkdownDescription is used in various tooling, like the + // documentation generator, to give practitioners more information + // about what this attribute is, what it's for, and how it should be + // used. It should be formatted using Markdown. + MarkdownDescription string + + // DeprecationMessage defines warning diagnostic details to display when + // practitioner configurations use this Attribute. The warning diagnostic + // summary is automatically set to "Attribute Deprecated" along with + // configuration source file and line information. + // + // Set this field to a practitioner actionable message such as: + // + // - "Configure other_attribute instead. This attribute will be removed + // in the next major version of the provider." + // - "Remove this attribute's configuration as it no longer is used and + // the attribute will be removed in the next major version of the + // provider." + // + // In Terraform 1.2.7 and later, this warning diagnostic is displayed any + // time a practitioner attempts to configure a value for this attribute and + // certain scenarios where this attribute is referenced. + // + // In Terraform 1.2.6 and earlier, this warning diagnostic is only + // displayed when the Attribute is Required or Optional, and if the + // practitioner configuration sets the value to a known or unknown value + // (which may eventually be null). It has no effect when the Attribute is + // Computed-only (read-only; not Required or Optional). + // + // Across any Terraform version, there are no warnings raised for + // practitioner configuration values set directly to null, as there is no + // way for the framework to differentiate between an unset and null + // configuration due to how Terraform sends configuration information + // across the protocol. + // + // Additional information about deprecation enhancements for read-only + // attributes can be found in: + // + // - https://github.com/hashicorp/terraform/issues/7569 + // + DeprecationMessage string + + // Validators define value validation functionality for the attribute. All + // elements of the slice of AttributeValidator are run, regardless of any + // previous error diagnostics. + // + // Many common use case validators can be found in the + // github.com/hashicorp/terraform-plugin-framework-validators Go module. + // + // If the Type field points to a custom type that implements the + // xattr.TypeWithValidate interface, the validators defined in this field + // are run in addition to the validation defined by the type. + Validators []validator.String + + // PlanModifiers defines a sequence of modifiers for this attribute at + // plan time. Schema-based plan modifications occur before any + // resource-level plan modifications. + // + // Schema-based plan modifications can adjust Terraform's plan by: + // + // - Requiring resource recreation. Typically used for configuration + // updates which cannot be done in-place. + // - Setting the planned value. Typically used for enhancing the plan + // to replace unknown values. Computed must be true or Terraform will + // return an error. If the plan value is known due to a known + // configuration value, the plan value cannot be changed or Terraform + // will return an error. + // + // Any errors will prevent further execution of this sequence or modifiers. + PlanModifiers []planmodifier.String +} + +// ApplyTerraform5AttributePathStep always returns an error as it is not +// possible to step further into a StringAttribute. +func (a StringAttribute) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return a.GetType().ApplyTerraform5AttributePathStep(step) +} + +// Equal returns true if the given Attribute is a StringAttribute +// and all fields are equal. +func (a StringAttribute) Equal(o fwschema.Attribute) bool { + if _, ok := o.(StringAttribute); !ok { + return false + } + + return fwschema.AttributesEqual(a, o) +} + +// GetDeprecationMessage returns the DeprecationMessage field value. +func (a StringAttribute) GetDeprecationMessage() string { + return a.DeprecationMessage +} + +// GetDescription returns the Description field value. +func (a StringAttribute) GetDescription() string { + return a.Description +} + +// GetMarkdownDescription returns the MarkdownDescription field value. +func (a StringAttribute) GetMarkdownDescription() string { + return a.MarkdownDescription +} + +// GetType returns types.StringType or the CustomType field value if defined. +func (a StringAttribute) GetType() attr.Type { + if a.CustomType != nil { + return a.CustomType + } + + return types.StringType +} + +// IsComputed returns the Computed field value. +func (a StringAttribute) IsComputed() bool { + return a.Computed +} + +// IsOptional returns the Optional field value. +func (a StringAttribute) IsOptional() bool { + return a.Optional +} + +// IsRequired returns the Required field value. +func (a StringAttribute) IsRequired() bool { + return a.Required +} + +// IsSensitive returns the Sensitive field value. +func (a StringAttribute) IsSensitive() bool { + return a.Sensitive +} + +// StringPlanModifiers returns the PlanModifiers field value. +func (a StringAttribute) StringPlanModifiers() []planmodifier.String { + return a.PlanModifiers +} + +// StringValidators returns the Validators field value. +func (a StringAttribute) StringValidators() []validator.String { + return a.Validators +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/state_upgrader.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/state_upgrader.go new file mode 100644 index 0000000000..3b724581ee --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/state_upgrader.go @@ -0,0 +1,36 @@ +package resource + +import ( + "context" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" +) + +// Implementation handler for a UpgradeState operation. +// +// This is used to encapsulate all upgrade logic from a prior state to the +// current schema version when a Resource implements the +// ResourceWithUpgradeState interface. +type StateUpgrader struct { + // Schema information for the prior state version. While not required, + // setting this will populate the UpgradeStateRequest type State + // field similar to other Resource data types. This allows for easier data + // handling such as calling Get() or GetAttribute(). + // + // If not set, prior state data is available in the + // UpgradeResourceStateRequest type RawState field. + PriorSchema *schema.Schema + + // Provider defined logic for upgrading a resource state from the prior + // state version to the current schema version. + // + // The context.Context parameter contains framework-defined loggers and + // supports request cancellation. + // + // The UpgradeStateRequest parameter contains the prior state data. + // If PriorSchema was set, the State field will be available. Otherwise, + // the RawState must be used. + // + // The UpgradeStateResponse parameter should contain the upgraded + // state data and can be used to signal any logic warnings or errors. + StateUpgrader func(context.Context, UpgradeStateRequest, *UpgradeStateResponse) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/update.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/update.go new file mode 100644 index 0000000000..78ca95ef0f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/update.go @@ -0,0 +1,58 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/privatestate" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// UpdateRequest represents a request for the provider to update a +// resource. An instance of this request struct is supplied as an argument to +// the resource's Update function. +type UpdateRequest struct { + // Config is the configuration the user supplied for the resource. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config tfsdk.Config + + // Plan is the planned state for the resource. + Plan tfsdk.Plan + + // State is the current state of the resource prior to the Update + // operation. + State tfsdk.State + + // ProviderMeta is metadata from the provider_meta block of the module. + ProviderMeta tfsdk.Config + + // Private is provider-defined resource private state data which was previously + // stored with the resource state. Any existing data is copied to + // UpdateResponse.Private to prevent accidental private state data loss. + // + // Use the GetKey method to read data. Use the SetKey method on + // UpdateResponse.Private to update or remove a value. + Private *privatestate.ProviderData +} + +// UpdateResponse represents a response to an UpdateRequest. An +// instance of this response struct is supplied as +// an argument to the resource's Update function, in which the provider +// should set values on the UpdateResponse as appropriate. +type UpdateResponse struct { + // State is the state of the resource following the Update operation. + // This field is pre-populated from UpdateResourceRequest.Plan and + // should be set during the resource's Update operation. + State tfsdk.State + + // Private is the private state resource data following the Update operation. + // This field is pre-populated from UpdateRequest.Private and + // can be modified during the resource's Update operation. + Private *privatestate.ProviderData + + // Diagnostics report errors or warnings related to updating the + // resource. An empty slice indicates a successful operation with no + // warnings or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/upgrade_state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/upgrade_state.go new file mode 100644 index 0000000000..ea4510f87b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/upgrade_state.go @@ -0,0 +1,71 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// Request information for the provider logic to update a resource state +// from a prior state version to the current schema version. An instance of +// this is supplied as a parameter to a StateUpgrader, which ultimately comes +// from a Resource's UpgradeState method. +type UpgradeStateRequest struct { + // Previous state of the resource in JSON (Terraform CLI 0.12 and later) + // or flatmap format, depending on which version of Terraform CLI last + // wrote the resource state. This data is always available, regardless + // whether the wrapping StateUpgrader type PriorSchema field was + // present. + // + // This is advanced functionality for providers wanting to skip the full + // redeclaration of older schemas and instead use lower level handlers to + // transform data. A typical implementation for working with this data will + // call the Unmarshal() method. + // + // TODO: Create framework defined type that is not protocol specific. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/340 + RawState *tfprotov6.RawState + + // Previous state of the resource if the wrapping StateUpgrader + // type PriorSchema field was present. When available, this allows for + // easier data handling such as calling Get() or GetAttribute(). + State *tfsdk.State +} + +// Response information for the provider logic to update a resource state +// from a prior state version to the current schema version. An instance of +// this is supplied as a parameter to a StateUpgrader, which ultimately came +// from a Resource's UpgradeState method. +type UpgradeStateResponse struct { + // Diagnostics report errors or warnings related to upgrading the resource + // state. An empty slice indicates a successful operation with no warnings + // or errors generated. + Diagnostics diag.Diagnostics + + // Upgraded state of the resource, which should match the current schema + // version. If set, this will override State. + // + // This field is intended only for advanced provider functionality, such as + // skipping the full redeclaration of older schemas or using lower level + // handlers to transform data. Call tfprotov6.NewDynamicValue() to set this + // value. + // + // All data must be populated to prevent data loss during the upgrade + // operation. No prior state data is copied automatically. + // + // TODO: Remove in preference of requiring State, rather than using either + // a new framework defined type or keeping this protocol specific type. + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/340 + DynamicValue *tfprotov6.DynamicValue + + // Upgraded state of the resource, which should match the current schema + // version. If DynamicValue is set, it will override this value. + // + // This field allows for easier data handling such as calling Set() or + // SetAttribute(). It is generally recommended over working with the lower + // level types and functionality required for DynamicValue. + // + // All data must be populated to prevent data loss during the upgrade + // operation. No prior state data is copied automatically. + State tfsdk.State +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/validate_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/validate_config.go new file mode 100644 index 0000000000..43e508a48f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/resource/validate_config.go @@ -0,0 +1,30 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// ValidateConfigRequest represents a request to validate the +// configuration of a resource. An instance of this request struct is +// supplied as an argument to the Resource ValidateConfig receiver method +// or automatically passed through to each ConfigValidator. +type ValidateConfigRequest struct { + // Config is the configuration the user supplied for the resource. + // + // This configuration may contain unknown values if a user uses + // interpolation or other functionality that would prevent Terraform + // from knowing the value at request time. + Config tfsdk.Config +} + +// ValidateConfigResponse represents a response to a +// ValidateConfigRequest. An instance of this response struct is +// supplied as an argument to the Resource ValidateConfig receiver method +// or automatically passed through to each ConfigValidator. +type ValidateConfigResponse struct { + // Diagnostics report errors or warnings related to validating the resource + // configuration. An empty slice indicates success, with no warnings or + // errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/bool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/bool.go new file mode 100644 index 0000000000..0a0f0d6ddc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/bool.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Bool is a schema validator for types.Bool attributes. +type Bool interface { + Describer + + // ValidateBool should perform the validation. + ValidateBool(context.Context, BoolRequest, *BoolResponse) +} + +// BoolRequest is a request for types.Bool schema validation. +type BoolRequest struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.Bool +} + +// BoolResponse is a response to a BoolRequest. +type BoolResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/describer.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/describer.go new file mode 100644 index 0000000000..2c1b1e99d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/describer.go @@ -0,0 +1,37 @@ +package validator + +import ( + "context" +) + +// Describer is the common documentation interface for extensible schema +// validation functionality. +type Describer interface { + // Description should describe the validation in plain text formatting. + // This information is used by provider logging and provider tooling such + // as documentation generation. + // + // The description should: + // - Begin with a lowercase or other character suitable for the middle of + // a sentence. + // - End without punctuation. + // - Use actionable language, such as "must" or "cannot". + // - Avoid newlines. Prefer separate validators instead. + // + // For example, "size must be less than 50 elements". + Description(context.Context) string + + // MarkdownDescription should describe the validation in Markdown + // formatting. This information is used by provider logging and provider + // tooling such as documentation generation. + // + // The description should: + // - Begin with a lowercase or other character suitable for the middle of + // a sentence. + // - End without punctuation. + // - Use actionable language, such as "must" or "cannot". + // - Avoid newlines. Prefer separate validators instead. + // + // For example, "value must be `one` or `two`". + MarkdownDescription(context.Context) string +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/doc.go new file mode 100644 index 0000000000..c90e9901d0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/doc.go @@ -0,0 +1,33 @@ +// Package validator contains common schema validator interfaces and +// implementations. These validators are used by concept specific packages +// such as datasource/schema, provider/schema, and resource/schema. +// +// Each attr.Type has a corresponding {TYPE}Validator interface which +// implements concretely typed Validate{TYPE} methods, such as +// StringValidator and ValidateString. Custom attr.Type can also consider +// implementing native type validation via the attr/xattr.TypeWithValidate +// interface instead of schema validators. +// +// The framework has to choose between validator developers handling a concrete +// framework value type, such as types.Bool, or the framework interface for +// custom value basetypes. such as basetypes.BoolValuable. +// +// In the framework type model, the developer can immediately use the value. +// If the value was associated with a custom type and using the custom value +// type is desired, the developer must use the type's ValueFrom{TYPE} method. +// +// In the custom type model, the developer must always convert to a concreate +// type before using the value unless checking for null or unknown. Since any +// custom type may be passed due to the schema, it is possible, if not likely, +// that unknown concrete types will be passed to the validator. +// +// The framework chooses to pass the framework value type. This prevents the +// potential for unexpected runtime panics and simplifies development for +// easier use cases where the framework type is sufficient. More advanced +// developers can choose to implement native type validation for custom +// types or call the type's ValueFrom{TYPE} method to get the desired +// desired custom type in a validator. +// +// Validators that are not type dependent need to implement all interfaces, +// but can use shared logic to reduce implementation code. +package validator diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/float64.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/float64.go new file mode 100644 index 0000000000..7097509e8d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/float64.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Float64 is a schema validator for types.Float64 attributes. +type Float64 interface { + Describer + + // ValidateFloat64 should perform the validation. + ValidateFloat64(context.Context, Float64Request, *Float64Response) +} + +// Float64Request is a request for types.Float64 schema validation. +type Float64Request struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.Float64 +} + +// Float64Response is a response to a Float64Request. +type Float64Response struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/int64.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/int64.go new file mode 100644 index 0000000000..85b44d768f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/int64.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Int64 is a schema validator for types.Int64 attributes. +type Int64 interface { + Describer + + // ValidateInt64 should perform the validation. + ValidateInt64(context.Context, Int64Request, *Int64Response) +} + +// Int64Request is a request for types.Int64 schema validation. +type Int64Request struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.Int64 +} + +// Int64Response is a response to a Int64Request. +type Int64Response struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/list.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/list.go new file mode 100644 index 0000000000..cd417317b4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/list.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// List is a schema validator for types.List attributes. +type List interface { + Describer + + // ValidateList should perform the validation. + ValidateList(context.Context, ListRequest, *ListResponse) +} + +// ListRequest is a request for types.List schema validation. +type ListRequest struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.List +} + +// ListResponse is a response to a ListRequest. +type ListResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/map.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/map.go new file mode 100644 index 0000000000..bee8f3a707 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/map.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Map is a schema validator for types.Map attributes. +type Map interface { + Describer + + // ValidateMap should perform the validation. + ValidateMap(context.Context, MapRequest, *MapResponse) +} + +// MapRequest is a request for types.Map schema validation. +type MapRequest struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.Map +} + +// MapResponse is a response to a MapRequest. +type MapResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/number.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/number.go new file mode 100644 index 0000000000..5630f5d4b3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/number.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Number is a schema validator for types.Number attributes. +type Number interface { + Describer + + // ValidateNumber should perform the validation. + ValidateNumber(context.Context, NumberRequest, *NumberResponse) +} + +// NumberRequest is a request for types.Number schema validation. +type NumberRequest struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.Number +} + +// NumberResponse is a response to a NumberRequest. +type NumberResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/object.go new file mode 100644 index 0000000000..bab1d3a685 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/object.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Object is a schema validator for types.Object attributes. +type Object interface { + Describer + + // ValidateObject should perform the validation. + ValidateObject(context.Context, ObjectRequest, *ObjectResponse) +} + +// ObjectRequest is a request for types.Object schema validation. +type ObjectRequest struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.Object +} + +// ObjectResponse is a response to a ObjectRequest. +type ObjectResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/set.go new file mode 100644 index 0000000000..d064659d41 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/set.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Set is a schema validator for types.Set attributes. +type Set interface { + Describer + + // ValidateSet should perform the validation. + ValidateSet(context.Context, SetRequest, *SetResponse) +} + +// SetRequest is a request for types.Set schema validation. +type SetRequest struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.Set +} + +// SetResponse is a response to a SetRequest. +type SetResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/string.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/string.go new file mode 100644 index 0000000000..725e2c3c01 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/schema/validator/string.go @@ -0,0 +1,43 @@ +package validator + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// String is a schema validator for types.String attributes. +type String interface { + Describer + + // ValidateString should perform the validation. + ValidateString(context.Context, StringRequest, *StringResponse) +} + +// StringRequest is a request for types.String schema validation. +type StringRequest struct { + // Path contains the path of the attribute for validation. Use this path + // for any response diagnostics. + Path path.Path + + // PathExpression contains the expression matching the exact path + // of the attribute for validation. + PathExpression path.Expression + + // Config contains the entire configuration of the data source, provider, or resource. + Config tfsdk.Config + + // ConfigValue contains the value of the attribute for validation from the configuration. + ConfigValue types.String +} + +// StringResponse is a response to a StringRequest. +type StringResponse struct { + // Diagnostics report errors or warnings related to validating the data + // source configuration. An empty slice indicates success, with no warnings + // or errors generated. + Diagnostics diag.Diagnostics +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/config.go new file mode 100644 index 0000000000..4bc5129c4f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/config.go @@ -0,0 +1,50 @@ +package tfsdk + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Config represents a Terraform config. +type Config struct { + Raw tftypes.Value + Schema fwschema.Schema +} + +// Get populates the struct passed as `target` with the entire config. +func (c Config) Get(ctx context.Context, target interface{}) diag.Diagnostics { + return c.data().Get(ctx, target) +} + +// GetAttribute retrieves the attribute or block found at `path` and populates +// the `target` with the value. This method is intended for top level schema +// attributes or blocks. Use `types` package methods or custom types to step +// into collections. +// +// Attributes or elements under null or unknown collections return null +// values, however this behavior is not protected by compatibility promises. +func (c Config) GetAttribute(ctx context.Context, path path.Path, target interface{}) diag.Diagnostics { + return c.data().GetAtPath(ctx, path, target) +} + +// PathMatches returns all matching path.Paths from the given path.Expression. +// +// If a parent path is null or unknown, which would prevent a full expression +// from matching, the parent path is returned rather than no match to prevent +// false positives. +func (c Config) PathMatches(ctx context.Context, pathExpr path.Expression) (path.Paths, diag.Diagnostics) { + return c.data().PathMatches(ctx, pathExpr) +} + +func (c Config) data() fwschemadata.Data { + return fwschemadata.Data{ + Description: fwschemadata.DataDescriptionConfiguration, + Schema: c.Schema, + TerraformValue: c.Raw, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/convert.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/convert.go new file mode 100644 index 0000000000..bfbcc60fdc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/convert.go @@ -0,0 +1,28 @@ +package tfsdk + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +// ConvertValue creates a new attr.Value of the attr.Type `typ`, using the data +// in `val`, which can be of any attr.Type so long as its TerraformType method +// returns a tftypes.Type that `typ`'s ValueFromTerraform method can accept. +func ConvertValue(ctx context.Context, val attr.Value, typ attr.Type) (attr.Value, diag.Diagnostics) { + newVal, err := val.ToTerraformValue(ctx) + if err != nil { + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Error converting value", + fmt.Sprintf("An unexpected error was encountered converting a %T to a %s. This is always a problem with the provider. Please tell the provider developers that %T ran into the following error during ToTerraformValue: %s", val, typ, val, err), + )} + } + res, err := typ.ValueFromTerraform(ctx, newVal) + if err != nil { + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Error converting value", + fmt.Sprintf("An unexpected error was encountered converting a %T to a %s. This is always a problem with the provider. Please tell the provider developers that %s returned the following error when calling ValueFromTerraform: %s", val, typ, typ, err), + )} + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/doc.go new file mode 100644 index 0000000000..5711119d72 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/doc.go @@ -0,0 +1,2 @@ +// Package tfsdk contains core framework functionality for schema data. +package tfsdk diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/plan.go new file mode 100644 index 0000000000..c2916c7eb2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/plan.go @@ -0,0 +1,91 @@ +package tfsdk + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Plan represents a Terraform plan. +type Plan struct { + Raw tftypes.Value + Schema fwschema.Schema +} + +// Get populates the struct passed as `target` with the entire plan. +func (p Plan) Get(ctx context.Context, target interface{}) diag.Diagnostics { + return p.data().Get(ctx, target) +} + +// GetAttribute retrieves the attribute or block found at `path` and populates +// the `target` with the value. This method is intended for top level schema +// attributes or blocks. Use `types` package methods or custom types to step +// into collections. +// +// Attributes or elements under null or unknown collections return null +// values, however this behavior is not protected by compatibility promises. +func (p Plan) GetAttribute(ctx context.Context, path path.Path, target interface{}) diag.Diagnostics { + return p.data().GetAtPath(ctx, path, target) +} + +// PathMatches returns all matching path.Paths from the given path.Expression. +// +// If a parent path is null or unknown, which would prevent a full expression +// from matching, the parent path is returned rather than no match to prevent +// false positives. +func (p Plan) PathMatches(ctx context.Context, pathExpr path.Expression) (path.Paths, diag.Diagnostics) { + return p.data().PathMatches(ctx, pathExpr) +} + +// Set populates the entire plan using the supplied Go value. The value `val` +// should be a struct whose values have one of the attr.Value types. Each field +// must be tagged with the corresponding schema field. +func (p *Plan) Set(ctx context.Context, val interface{}) diag.Diagnostics { + data := p.data() + diags := data.Set(ctx, val) + + if diags.HasError() { + return diags + } + + p.Raw = data.TerraformValue + + return diags +} + +// SetAttribute sets the attribute at `path` using the supplied Go value. +// +// The attribute path and value must be valid with the current schema. If the +// attribute path already has a value, it will be overwritten. If the attribute +// path does not have a value, it will be added, including any parent attribute +// paths as necessary. +// +// The value must not be an untyped nil. Use a typed nil or types package null +// value function instead. For example with a types.StringType attribute, +// use (*string)(nil) or types.StringNull(). +// +// Lists can only have the next element added according to the current length. +func (p *Plan) SetAttribute(ctx context.Context, path path.Path, val interface{}) diag.Diagnostics { + data := p.data() + diags := data.SetAtPath(ctx, path, val) + + if diags.HasError() { + return diags + } + + p.Raw = data.TerraformValue + + return diags +} + +func (p Plan) data() *fwschemadata.Data { + return &fwschemadata.Data{ + Description: fwschemadata.DataDescriptionPlan, + Schema: p.Schema, + TerraformValue: p.Raw, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/state.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/state.go new file mode 100644 index 0000000000..aa744753bd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/state.go @@ -0,0 +1,110 @@ +package tfsdk + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschema" + "github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// State represents a Terraform state. +type State struct { + Raw tftypes.Value + Schema fwschema.Schema +} + +// Get populates the struct passed as `target` with the entire state. +func (s State) Get(ctx context.Context, target interface{}) diag.Diagnostics { + return s.data().Get(ctx, target) +} + +// GetAttribute retrieves the attribute or block found at `path` and populates +// the `target` with the value. This method is intended for top level schema +// attributes or blocks. Use `types` package methods or custom types to step +// into collections. +// +// Attributes or elements under null or unknown collections return null +// values, however this behavior is not protected by compatibility promises. +func (s State) GetAttribute(ctx context.Context, path path.Path, target interface{}) diag.Diagnostics { + return s.data().GetAtPath(ctx, path, target) +} + +// PathMatches returns all matching path.Paths from the given path.Expression. +// +// If a parent path is null or unknown, which would prevent a full expression +// from matching, the parent path is returned rather than no match to prevent +// false positives. +func (s State) PathMatches(ctx context.Context, pathExpr path.Expression) (path.Paths, diag.Diagnostics) { + return s.data().PathMatches(ctx, pathExpr) +} + +// Set populates the entire state using the supplied Go value. The value `val` +// should be a struct whose values have one of the attr.Value types. Each field +// must be tagged with the corresponding schema field. +func (s *State) Set(ctx context.Context, val interface{}) diag.Diagnostics { + if val == nil { + err := fmt.Errorf("cannot set nil as entire state; to remove a resource from state, call State.RemoveResource, instead") + return diag.Diagnostics{ + diag.NewErrorDiagnostic( + "State Read Error", + "An unexpected error was encountered trying to write the state. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ), + } + } + + data := s.data() + diags := data.Set(ctx, val) + + if diags.HasError() { + return diags + } + + s.Raw = data.TerraformValue + + return diags +} + +// SetAttribute sets the attribute at `path` using the supplied Go value. +// +// The attribute path and value must be valid with the current schema. If the +// attribute path already has a value, it will be overwritten. If the attribute +// path does not have a value, it will be added, including any parent attribute +// paths as necessary. +// +// The value must not be an untyped nil. Use a typed nil or types package null +// value function instead. For example with a types.StringType attribute, +// use (*string)(nil) or types.StringNull(). +// +// Lists can only have the next element added according to the current length. +func (s *State) SetAttribute(ctx context.Context, path path.Path, val interface{}) diag.Diagnostics { + data := s.data() + diags := data.SetAtPath(ctx, path, val) + + if diags.HasError() { + return diags + } + + s.Raw = data.TerraformValue + + return diags +} + +// RemoveResource removes the entire resource from state. +// +// If a Resource type Delete method is completed without error, this is +// automatically called on the DeleteResourceResponse.State. +func (s *State) RemoveResource(ctx context.Context) { + s.Raw = tftypes.NewValue(s.Schema.Type().TerraformType(ctx), nil) +} + +func (s State) data() fwschemadata.Data { + return fwschemadata.Data{ + Description: fwschemadata.DataDescriptionState, + Schema: s.Schema, + TerraformValue: s.Raw, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/value_as.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/value_as.go new file mode 100644 index 0000000000..0f75adf486 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/value_as.go @@ -0,0 +1,27 @@ +package tfsdk + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// ValueAs takes the attr.Value `val` and populates the Go value `target` with its content. +// +// This is achieved using reflection rules provided by the internal/reflect package. +func ValueAs(ctx context.Context, val attr.Value, target interface{}) diag.Diagnostics { + if reflect.IsGenericAttrValue(ctx, target) { + *(target.(*attr.Value)) = val + return nil + } + raw, err := val.ToTerraformValue(ctx) + if err != nil { + return diag.Diagnostics{diag.NewErrorDiagnostic("Error converting value", + fmt.Sprintf("An unexpected error was encountered converting a %T to its equivalent Terraform representation. This is always a bug in the provider.\n\nError: %s", val, err))} + } + return reflect.Into(ctx, val.Type(ctx), raw, target, reflect.Options{}, path.Empty()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/value_from.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/value_from.go new file mode 100644 index 0000000000..0c0bcbbf1d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/tfsdk/value_from.go @@ -0,0 +1,23 @@ +package tfsdk + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +// ValueFrom takes the Go value `val` and populates `target` with an attr.Value, +// based on the type definition provided in `targetType`. +// +// This is achieved using reflection rules provided by the internal/reflect package. +func ValueFrom(ctx context.Context, val interface{}, targetType attr.Type, target interface{}) diag.Diagnostics { + v, diags := reflect.FromValue(ctx, targetType, val, path.Empty()) + if diags.HasError() { + return diags + } + + return ValueAs(ctx, v, target) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/bool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/bool.go new file mode 100644 index 0000000000..d0e3a29107 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/bool.go @@ -0,0 +1,137 @@ +package basetypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +var ( + _ BoolValuable = BoolValue{} +) + +// BoolValuable extends attr.Value for boolean value types. +// Implement this interface to create a custom Bool value type. +type BoolValuable interface { + attr.Value + + // ToBoolValue should convert the value type to a Bool. + ToBoolValue(ctx context.Context) (BoolValue, diag.Diagnostics) +} + +// NewBoolNull creates a Bool with a null value. Determine whether the value is +// null via the Bool type IsNull method. +func NewBoolNull() BoolValue { + return BoolValue{ + state: attr.ValueStateNull, + } +} + +// NewBoolUnknown creates a Bool with an unknown value. Determine whether the +// value is unknown via the Bool type IsUnknown method. +func NewBoolUnknown() BoolValue { + return BoolValue{ + state: attr.ValueStateUnknown, + } +} + +// NewBoolValue creates a Bool with a known value. Access the value via the Bool +// type ValueBool method. +func NewBoolValue(value bool) BoolValue { + return BoolValue{ + state: attr.ValueStateKnown, + value: value, + } +} + +// BoolValue represents a boolean value. +type BoolValue struct { + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState + + // value contains the known value, if not null or unknown. + value bool +} + +// Type returns a BoolType. +func (b BoolValue) Type(_ context.Context) attr.Type { + return BoolType{} +} + +// ToTerraformValue returns the data contained in the Bool as a tftypes.Value. +func (b BoolValue) ToTerraformValue(_ context.Context) (tftypes.Value, error) { + switch b.state { + case attr.ValueStateKnown: + if err := tftypes.ValidateValue(tftypes.Bool, b.value); err != nil { + return tftypes.NewValue(tftypes.Bool, tftypes.UnknownValue), err + } + + return tftypes.NewValue(tftypes.Bool, b.value), nil + case attr.ValueStateNull: + return tftypes.NewValue(tftypes.Bool, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(tftypes.Bool, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Bool state in ToTerraformValue: %s", b.state)) + } +} + +// Equal returns true if `other` is a *Bool and has the same value as `b`. +func (b BoolValue) Equal(other attr.Value) bool { + o, ok := other.(BoolValue) + + if !ok { + return false + } + + if b.state != o.state { + return false + } + + if b.state != attr.ValueStateKnown { + return true + } + + return b.value == o.value +} + +// IsNull returns true if the Bool represents a null value. +func (b BoolValue) IsNull() bool { + return b.state == attr.ValueStateNull +} + +// IsUnknown returns true if the Bool represents a currently unknown value. +func (b BoolValue) IsUnknown() bool { + return b.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the Bool value. +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (b BoolValue) String() string { + if b.IsUnknown() { + return attr.UnknownValueString + } + + if b.IsNull() { + return attr.NullValueString + } + + return fmt.Sprintf("%t", b.value) +} + +// ValueBool returns the known bool value. If Bool is null or unknown, returns +// false. +func (b BoolValue) ValueBool() bool { + return b.value +} + +// ToBoolValue returns Bool. +func (b BoolValue) ToBoolValue(context.Context) (BoolValue, diag.Diagnostics) { + return b, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/bool_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/bool_type.go new file mode 100644 index 0000000000..5891ce9a24 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/bool_type.go @@ -0,0 +1,83 @@ +package basetypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// BoolTypable extends attr.Type for bool types. +// Implement this interface to create a custom BoolType type. +type BoolTypable interface { + attr.Type + + // ValueFromBool should convert the Bool to a BoolValuable type. + ValueFromBool(context.Context, BoolValue) (BoolValuable, diag.Diagnostics) +} + +var _ BoolTypable = BoolType{} + +// BoolType is the base framework type for a boolean. BoolValue is the +// associated value type. +type BoolType struct{} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// type. +func (t BoolType) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return nil, fmt.Errorf("cannot apply AttributePathStep %T to %s", step, t.String()) +} + +// Equal returns true if the given type is equivalent. +func (t BoolType) Equal(o attr.Type) bool { + _, ok := o.(BoolType) + + return ok +} + +// String returns a human readable string of the type name. +func (t BoolType) String() string { + return "basetypes.BoolType" +} + +// TerraformType returns the tftypes.Type that should be used to represent this +// framework type. +func (t BoolType) TerraformType(_ context.Context) tftypes.Type { + return tftypes.Bool +} + +// ValueFromBool returns a BoolValuable type given a BoolValue. +func (t BoolType) ValueFromBool(_ context.Context, v BoolValue) (BoolValuable, diag.Diagnostics) { + return v, nil +} + +// ValueFromTerraform returns a Value given a tftypes.Value. This is meant to +// convert the tftypes.Value into a more convenient Go type for the provider to +// consume the data with. +func (t BoolType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if !in.IsKnown() { + return NewBoolUnknown(), nil + } + + if in.IsNull() { + return NewBoolNull(), nil + } + + var v bool + + err := in.As(&v) + + if err != nil { + return nil, err + } + + return NewBoolValue(v), nil +} + +// ValueType returns the Value type. +func (t BoolType) ValueType(_ context.Context) attr.Value { + // This Value does not need to be valid. + return BoolValue{} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/doc.go new file mode 100644 index 0000000000..25ce0e1e10 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/doc.go @@ -0,0 +1,4 @@ +// Package basetypes contains the implementations for framework-defined data +// types and values, such as boolean, floating point, integer, list, map, +// object, set, and string. Embed these implementations to create custom types. +package basetypes diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/float64.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/float64.go new file mode 100644 index 0000000000..d59b9cf991 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/float64.go @@ -0,0 +1,143 @@ +package basetypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +var ( + _ Float64Valuable = Float64Value{} +) + +// Float64Valuable extends attr.Value for float64 value types. +// Implement this interface to create a custom Float64 value type. +type Float64Valuable interface { + attr.Value + + // ToFloat64Value should convert the value type to a Float64. + ToFloat64Value(ctx context.Context) (Float64Value, diag.Diagnostics) +} + +// Float64Null creates a Float64 with a null value. Determine whether the value is +// null via the Float64 type IsNull method. +func NewFloat64Null() Float64Value { + return Float64Value{ + state: attr.ValueStateNull, + } +} + +// Float64Unknown creates a Float64 with an unknown value. Determine whether the +// value is unknown via the Float64 type IsUnknown method. +// +// Setting the deprecated Float64 type Null, Unknown, or Value fields after +// creating a Float64 with this function has no effect. +func NewFloat64Unknown() Float64Value { + return Float64Value{ + state: attr.ValueStateUnknown, + } +} + +// Float64Value creates a Float64 with a known value. Access the value via the Float64 +// type ValueFloat64 method. +// +// Setting the deprecated Float64 type Null, Unknown, or Value fields after +// creating a Float64 with this function has no effect. +func NewFloat64Value(value float64) Float64Value { + return Float64Value{ + state: attr.ValueStateKnown, + value: value, + } +} + +// Float64Value represents a 64-bit floating point value, exposed as a float64. +type Float64Value struct { + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState + + // value contains the known value, if not null or unknown. + value float64 +} + +// Equal returns true if `other` is a Float64 and has the same value as `f`. +func (f Float64Value) Equal(other attr.Value) bool { + o, ok := other.(Float64Value) + + if !ok { + return false + } + + if f.state != o.state { + return false + } + + if f.state != attr.ValueStateKnown { + return true + } + + return f.value == o.value +} + +// ToTerraformValue returns the data contained in the Float64 as a tftypes.Value. +func (f Float64Value) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + switch f.state { + case attr.ValueStateKnown: + if err := tftypes.ValidateValue(tftypes.Number, f.value); err != nil { + return tftypes.NewValue(tftypes.Number, tftypes.UnknownValue), err + } + + return tftypes.NewValue(tftypes.Number, f.value), nil + case attr.ValueStateNull: + return tftypes.NewValue(tftypes.Number, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(tftypes.Number, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Float64 state in ToTerraformValue: %s", f.state)) + } +} + +// Type returns a Float64Type. +func (f Float64Value) Type(ctx context.Context) attr.Type { + return Float64Type{} +} + +// IsNull returns true if the Float64 represents a null value. +func (f Float64Value) IsNull() bool { + return f.state == attr.ValueStateNull +} + +// IsUnknown returns true if the Float64 represents a currently unknown value. +func (f Float64Value) IsUnknown() bool { + return f.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the Float64 value. +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (f Float64Value) String() string { + if f.IsUnknown() { + return attr.UnknownValueString + } + + if f.IsNull() { + return attr.NullValueString + } + + return fmt.Sprintf("%f", f.value) +} + +// ValueFloat64 returns the known float64 value. If Float64 is null or unknown, returns +// 0.0. +func (f Float64Value) ValueFloat64() float64 { + return f.value +} + +// ToFloat64Value returns Float64. +func (f Float64Value) ToFloat64Value(context.Context) (Float64Value, diag.Diagnostics) { + return f, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/float64_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/float64_type.go new file mode 100644 index 0000000000..76494dc904 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/float64_type.go @@ -0,0 +1,154 @@ +package basetypes + +import ( + "context" + "fmt" + "math" + "math/big" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Float64Typable extends attr.Type for float64 types. +// Implement this interface to create a custom Float64Type type. +type Float64Typable interface { + xattr.TypeWithValidate + + // ValueFromFloat64 should convert the Float64 to a Float64Valuable type. + ValueFromFloat64(context.Context, Float64Value) (Float64Valuable, diag.Diagnostics) +} + +var _ Float64Typable = Float64Type{} + +// Float64Type is the base framework type for a floating point number. +// Float64Value is the associated value type. +type Float64Type struct{} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// type. +func (t Float64Type) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return nil, fmt.Errorf("cannot apply AttributePathStep %T to %s", step, t.String()) +} + +// Equal returns true if the given type is equivalent. +func (t Float64Type) Equal(o attr.Type) bool { + _, ok := o.(Float64Type) + + return ok +} + +// String returns a human readable string of the type name. +func (t Float64Type) String() string { + return "basetypes.Float64Type" +} + +// TerraformType returns the tftypes.Type that should be used to represent this +// framework type. +func (t Float64Type) TerraformType(_ context.Context) tftypes.Type { + return tftypes.Number +} + +// Validate implements type validation. +func (t Float64Type) Validate(ctx context.Context, in tftypes.Value, path path.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if in.Type() == nil { + return diags + } + + if !in.Type().Equal(tftypes.Number) { + diags.AddAttributeError( + path, + "Float64 Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Expected Number value, received %T with value: %v", in, in), + ) + return diags + } + + if !in.IsKnown() || in.IsNull() { + return diags + } + + var value *big.Float + err := in.As(&value) + + if err != nil { + diags.AddAttributeError( + path, + "Float64 Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Cannot convert value to big.Float: %s", err), + ) + return diags + } + + float64Value, accuracy := value.Float64() + + // Underflow + // Reference: https://pkg.go.dev/math/big#Float.Float64 + if float64Value == 0 && accuracy != big.Exact { + diags.AddAttributeError( + path, + "Float64 Type Validation Error", + fmt.Sprintf("Value %s cannot be represented as a 64-bit floating point.", value), + ) + return diags + } + + // Overflow + // Reference: https://pkg.go.dev/math/big#Float.Float64 + if math.IsInf(float64Value, 0) { + diags.AddAttributeError( + path, + "Float64 Type Validation Error", + fmt.Sprintf("Value %s cannot be represented as a 64-bit floating point.", value), + ) + return diags + } + + return diags +} + +// ValueFromFloat64 returns a Float64Valuable type given a Float64Value. +func (t Float64Type) ValueFromFloat64(_ context.Context, v Float64Value) (Float64Valuable, diag.Diagnostics) { + return v, nil +} + +// ValueFromTerraform returns a Value given a tftypes.Value. This is meant to +// convert the tftypes.Value into a more convenient Go type for the provider to +// consume the data with. +func (t Float64Type) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if !in.IsKnown() { + return NewFloat64Unknown(), nil + } + + if in.IsNull() { + return NewFloat64Null(), nil + } + + var bigF *big.Float + err := in.As(&bigF) + + if err != nil { + return nil, err + } + + f, accuracy := bigF.Float64() + + if accuracy != 0 { + return nil, fmt.Errorf("Value %s cannot be represented as a 64-bit floating point.", bigF) + } + + return NewFloat64Value(f), nil +} + +// ValueType returns the Value type. +func (t Float64Type) ValueType(_ context.Context) attr.Value { + // This Value does not need to be valid. + return Float64Value{} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/int64.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/int64.go new file mode 100644 index 0000000000..d924c5b971 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/int64.go @@ -0,0 +1,137 @@ +package basetypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +var ( + _ Int64Valuable = Int64Value{} +) + +// Int64Valuable extends attr.Value for int64 value types. +// Implement this interface to create a custom Int64 value type. +type Int64Valuable interface { + attr.Value + + // ToInt64Value should convert the value type to an Int64. + ToInt64Value(ctx context.Context) (Int64Value, diag.Diagnostics) +} + +// NewInt64Null creates a Int64 with a null value. Determine whether the value is +// null via the Int64 type IsNull method. +func NewInt64Null() Int64Value { + return Int64Value{ + state: attr.ValueStateNull, + } +} + +// NewInt64Unknown creates a Int64 with an unknown value. Determine whether the +// value is unknown via the Int64 type IsUnknown method. +func NewInt64Unknown() Int64Value { + return Int64Value{ + state: attr.ValueStateUnknown, + } +} + +// NewInt64Value creates a Int64 with a known value. Access the value via the Int64 +// type ValueInt64 method. +func NewInt64Value(value int64) Int64Value { + return Int64Value{ + state: attr.ValueStateKnown, + value: value, + } +} + +// Int64Value represents a 64-bit integer value, exposed as an int64. +type Int64Value struct { + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState + + // value contains the known value, if not null or unknown. + value int64 +} + +// Equal returns true if `other` is an Int64 and has the same value as `i`. +func (i Int64Value) Equal(other attr.Value) bool { + o, ok := other.(Int64Value) + + if !ok { + return false + } + + if i.state != o.state { + return false + } + + if i.state != attr.ValueStateKnown { + return true + } + + return i.value == o.value +} + +// ToTerraformValue returns the data contained in the Int64 as a tftypes.Value. +func (i Int64Value) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + switch i.state { + case attr.ValueStateKnown: + if err := tftypes.ValidateValue(tftypes.Number, i.value); err != nil { + return tftypes.NewValue(tftypes.Number, tftypes.UnknownValue), err + } + + return tftypes.NewValue(tftypes.Number, i.value), nil + case attr.ValueStateNull: + return tftypes.NewValue(tftypes.Number, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(tftypes.Number, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Int64 state in ToTerraformValue: %s", i.state)) + } +} + +// Type returns a Int64Type. +func (i Int64Value) Type(ctx context.Context) attr.Type { + return Int64Type{} +} + +// IsNull returns true if the Int64 represents a null value. +func (i Int64Value) IsNull() bool { + return i.state == attr.ValueStateNull +} + +// IsUnknown returns true if the Int64 represents a currently unknown value. +func (i Int64Value) IsUnknown() bool { + return i.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the Int64 value. +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (i Int64Value) String() string { + if i.IsUnknown() { + return attr.UnknownValueString + } + + if i.IsNull() { + return attr.NullValueString + } + + return fmt.Sprintf("%d", i.value) +} + +// ValueInt64 returns the known float64 value. If Int64 is null or unknown, returns +// 0.0. +func (i Int64Value) ValueInt64() int64 { + return i.value +} + +// ToInt64Value returns Int64. +func (i Int64Value) ToInt64Value(context.Context) (Int64Value, diag.Diagnostics) { + return i, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/int64_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/int64_type.go new file mode 100644 index 0000000000..91c9fdceb5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/int64_type.go @@ -0,0 +1,153 @@ +package basetypes + +import ( + "context" + "fmt" + "math/big" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Int64Typable extends attr.Type for int64 types. +// Implement this interface to create a custom Int64Type type. +type Int64Typable interface { + xattr.TypeWithValidate + + // ValueFromInt64 should convert the Int64 to a Int64Valuable type. + ValueFromInt64(context.Context, Int64Value) (Int64Valuable, diag.Diagnostics) +} + +var _ Int64Typable = Int64Type{} + +// Int64Type is the base framework type for an integer number. +// Int64Value is the associated value type. +type Int64Type struct{} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// type. +func (t Int64Type) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return nil, fmt.Errorf("cannot apply AttributePathStep %T to %s", step, t.String()) +} + +// Equal returns true if the given type is equivalent. +func (t Int64Type) Equal(o attr.Type) bool { + _, ok := o.(Int64Type) + + return ok +} + +// String returns a human readable string of the type name. +func (t Int64Type) String() string { + return "basetypes.Int64Type" +} + +// TerraformType returns the tftypes.Type that should be used to represent this +// framework type. +func (t Int64Type) TerraformType(_ context.Context) tftypes.Type { + return tftypes.Number +} + +// Validate implements type validation. +func (t Int64Type) Validate(ctx context.Context, in tftypes.Value, path path.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if in.Type() == nil { + return diags + } + + if !in.Type().Equal(tftypes.Number) { + diags.AddAttributeError( + path, + "Int64 Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Expected Number value, received %T with value: %v", in, in), + ) + return diags + } + + if !in.IsKnown() || in.IsNull() { + return diags + } + + var value *big.Float + err := in.As(&value) + + if err != nil { + diags.AddAttributeError( + path, + "Int64 Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Cannot convert value to big.Float: %s", err), + ) + return diags + } + + if !value.IsInt() { + diags.AddAttributeError( + path, + "Int64 Type Validation Error", + fmt.Sprintf("Value %s is not an integer.", value), + ) + return diags + } + + _, accuracy := value.Int64() + + if accuracy != 0 { + diags.AddAttributeError( + path, + "Int64 Type Validation Error", + fmt.Sprintf("Value %s cannot be represented as a 64-bit integer.", value), + ) + return diags + } + + return diags +} + +// ValueFromInt64 returns a Int64Valuable type given a Int64Value. +func (t Int64Type) ValueFromInt64(_ context.Context, v Int64Value) (Int64Valuable, diag.Diagnostics) { + return v, nil +} + +// ValueFromTerraform returns a Value given a tftypes.Value. This is meant to +// convert the tftypes.Value into a more convenient Go type for the provider to +// consume the data with. +func (t Int64Type) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if !in.IsKnown() { + return NewInt64Unknown(), nil + } + + if in.IsNull() { + return NewInt64Null(), nil + } + + var bigF *big.Float + err := in.As(&bigF) + + if err != nil { + return nil, err + } + + if !bigF.IsInt() { + return nil, fmt.Errorf("Value %s is not an integer.", bigF) + } + + i, accuracy := bigF.Int64() + + if accuracy != 0 { + return nil, fmt.Errorf("Value %s cannot be represented as a 64-bit integer.", bigF) + } + + return NewInt64Value(i), nil +} + +// ValueType returns the Value type. +func (t Int64Type) ValueType(_ context.Context) attr.Value { + // This Value does not need to be valid. + return Int64Value{} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/list.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/list.go new file mode 100644 index 0000000000..7acc4c15e1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/list.go @@ -0,0 +1,460 @@ +package basetypes + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +var ( + _ ListTypable = ListType{} + _ ListValuable = &ListValue{} +) + +// ListTypable extends attr.Type for list types. +// Implement this interface to create a custom ListType type. +type ListTypable interface { + attr.Type + + // ValueFromList should convert the List to a ListValuable type. + ValueFromList(context.Context, ListValue) (ListValuable, diag.Diagnostics) +} + +// ListValuable extends attr.Value for list value types. +// Implement this interface to create a custom List value type. +type ListValuable interface { + attr.Value + + // ToListValue should convert the value type to a List. + ToListValue(ctx context.Context) (ListValue, diag.Diagnostics) +} + +// ListType is an AttributeType representing a list of values. All values must +// be of the same type, which the provider must specify as the ElemType +// property. +type ListType struct { + ElemType attr.Type +} + +// ElementType returns the attr.Type elements will be created from. +func (l ListType) ElementType() attr.Type { + return l.ElemType +} + +// WithElementType returns a ListType that is identical to `l`, but with the +// element type set to `typ`. +func (l ListType) WithElementType(typ attr.Type) attr.TypeWithElementType { + return ListType{ElemType: typ} +} + +// TerraformType returns the tftypes.Type that should be used to +// represent this type. This constrains what user input will be +// accepted and what kind of data can be set in state. The framework +// will use this to translate the AttributeType to something Terraform +// can understand. +func (l ListType) TerraformType(ctx context.Context) tftypes.Type { + return tftypes.List{ + ElementType: l.ElemType.TerraformType(ctx), + } +} + +// ValueFromTerraform returns an attr.Value given a tftypes.Value. +// This is meant to convert the tftypes.Value into a more convenient Go +// type for the provider to consume the data with. +func (l ListType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewListNull(l.ElemType), nil + } + if !in.Type().Equal(l.TerraformType(ctx)) { + return nil, fmt.Errorf("can't use %s as value of List with ElementType %T, can only use %s values", in.String(), l.ElemType, l.ElemType.TerraformType(ctx).String()) + } + if !in.IsKnown() { + return NewListUnknown(l.ElemType), nil + } + if in.IsNull() { + return NewListNull(l.ElemType), nil + } + val := []tftypes.Value{} + err := in.As(&val) + if err != nil { + return nil, err + } + elems := make([]attr.Value, 0, len(val)) + for _, elem := range val { + av, err := l.ElemType.ValueFromTerraform(ctx, elem) + if err != nil { + return nil, err + } + elems = append(elems, av) + } + // ValueFromTerraform above on each element should make this safe. + // Otherwise, this will need to do some Diagnostics to error conversion. + return NewListValueMust(l.ElemType, elems), nil +} + +// Equal returns true if `o` is also a ListType and has the same ElemType. +func (l ListType) Equal(o attr.Type) bool { + if l.ElemType == nil { + return false + } + other, ok := o.(ListType) + if !ok { + return false + } + return l.ElemType.Equal(other.ElemType) +} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// list. +func (l ListType) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + if _, ok := step.(tftypes.ElementKeyInt); !ok { + return nil, fmt.Errorf("cannot apply step %T to ListType", step) + } + + return l.ElemType, nil +} + +// String returns a human-friendly description of the ListType. +func (l ListType) String() string { + return "types.ListType[" + l.ElemType.String() + "]" +} + +// Validate validates all elements of the list that are of type +// xattr.TypeWithValidate. +func (l ListType) Validate(ctx context.Context, in tftypes.Value, path path.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if in.Type() == nil { + return diags + } + + if !in.Type().Is(tftypes.List{}) { + err := fmt.Errorf("expected List value, received %T with value: %v", in, in) + diags.AddAttributeError( + path, + "List Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return diags + } + + if !in.IsKnown() || in.IsNull() { + return diags + } + + var elems []tftypes.Value + + if err := in.As(&elems); err != nil { + diags.AddAttributeError( + path, + "List Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return diags + } + + validatableType, isValidatable := l.ElemType.(xattr.TypeWithValidate) + if !isValidatable { + return diags + } + + for index, elem := range elems { + if !elem.IsFullyKnown() { + continue + } + diags = append(diags, validatableType.Validate(ctx, elem, path.AtListIndex(index))...) + } + + return diags +} + +// ValueType returns the Value type. +func (l ListType) ValueType(_ context.Context) attr.Value { + return ListValue{ + elementType: l.ElemType, + } +} + +// ValueFromList returns a ListValuable type given a List. +func (l ListType) ValueFromList(_ context.Context, list ListValue) (ListValuable, diag.Diagnostics) { + return list, nil +} + +// NewListNull creates a List with a null value. Determine whether the value is +// null via the List type IsNull method. +func NewListNull(elementType attr.Type) ListValue { + return ListValue{ + elementType: elementType, + state: attr.ValueStateNull, + } +} + +// NewListUnknown creates a List with an unknown value. Determine whether the +// value is unknown via the List type IsUnknown method. +func NewListUnknown(elementType attr.Type) ListValue { + return ListValue{ + elementType: elementType, + state: attr.ValueStateUnknown, + } +} + +// NewListValue creates a List with a known value. Access the value via the List +// type Elements or ElementsAs methods. +func NewListValue(elementType attr.Type, elements []attr.Value) (ListValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for idx, element := range elements { + if !elementType.Equal(element.Type(ctx)) { + diags.AddError( + "Invalid List Element Type", + "While creating a List value, an invalid element was detected. "+ + "A List must use the single, given element type. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("List Element Type: %s\n", elementType.String())+ + fmt.Sprintf("List Index (%d) Element Type: %s", idx, element.Type(ctx)), + ) + } + } + + if diags.HasError() { + return NewListUnknown(elementType), diags + } + + return ListValue{ + elementType: elementType, + elements: elements, + state: attr.ValueStateKnown, + }, nil +} + +// NewListValueFrom creates a List with a known value, using reflection rules. +// The elements must be a slice which can convert into the given element type. +// Access the value via the List type Elements or ElementsAs methods. +func NewListValueFrom(ctx context.Context, elementType attr.Type, elements any) (ListValue, diag.Diagnostics) { + attrValue, diags := reflect.FromValue( + ctx, + ListType{ElemType: elementType}, + elements, + path.Empty(), + ) + + if diags.HasError() { + return NewListUnknown(elementType), diags + } + + list, ok := attrValue.(ListValue) + + // This should not happen, but ensure there is an error if it does. + if !ok { + diags.AddError( + "Unable to Convert List Value", + "An unexpected result occurred when creating a List using NewListValueFrom. "+ + "This is an issue with terraform-plugin-framework and should be reported to the provider developers.", + ) + } + + return list, diags +} + +// NewListValueMust creates a List with a known value, converting any diagnostics +// into a panic at runtime. Access the value via the List +// type Elements or ElementsAs methods. +// +// This creation function is only recommended to create List values which will +// not potentially affect practitioners, such as testing, or exhaustively +// tested provider logic. +func NewListValueMust(elementType attr.Type, elements []attr.Value) ListValue { + list, diags := NewListValue(elementType, elements) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewListValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return list +} + +// ListValue represents a list of attr.Values, all of the same type, indicated +// by ElemType. +type ListValue struct { + // elements is the collection of known values in the List. + elements []attr.Value + + // elementType is the type of the elements in the List. + elementType attr.Type + + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState +} + +// Elements returns a copy of the collection of elements for the List. +func (l ListValue) Elements() []attr.Value { + // Ensure callers cannot mutate the internal elements + result := make([]attr.Value, 0, len(l.elements)) + result = append(result, l.elements...) + + return result +} + +// ElementsAs populates `target` with the elements of the ListValue, throwing an +// error if the elements cannot be stored in `target`. +func (l ListValue) ElementsAs(ctx context.Context, target interface{}, allowUnhandled bool) diag.Diagnostics { + // we need a tftypes.Value for this List to be able to use it with our + // reflection code + values, err := l.ToTerraformValue(ctx) + if err != nil { + return diag.Diagnostics{ + diag.NewErrorDiagnostic( + "List Element Conversion Error", + "An unexpected error was encountered trying to convert list elements. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ), + } + } + return reflect.Into(ctx, ListType{ElemType: l.elementType}, values, target, reflect.Options{ + UnhandledNullAsEmpty: allowUnhandled, + UnhandledUnknownAsEmpty: allowUnhandled, + }, path.Empty()) +} + +// ElementType returns the element type for the List. +func (l ListValue) ElementType(_ context.Context) attr.Type { + return l.elementType +} + +// Type returns a ListType with the same element type as `l`. +func (l ListValue) Type(ctx context.Context) attr.Type { + return ListType{ElemType: l.ElementType(ctx)} +} + +// ToTerraformValue returns the data contained in the List as a tftypes.Value. +func (l ListValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + listType := tftypes.List{ElementType: l.ElementType(ctx).TerraformType(ctx)} + + switch l.state { + case attr.ValueStateKnown: + vals := make([]tftypes.Value, 0, len(l.elements)) + + for _, elem := range l.elements { + val, err := elem.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(listType, tftypes.UnknownValue), err + } + + vals = append(vals, val) + } + + if err := tftypes.ValidateValue(listType, vals); err != nil { + return tftypes.NewValue(listType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(listType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(listType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(listType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled List state in ToTerraformValue: %s", l.state)) + } +} + +// Equal returns true if the List is considered semantically equal +// (same type and same value) to the attr.Value passed as an argument. +func (l ListValue) Equal(o attr.Value) bool { + other, ok := o.(ListValue) + + if !ok { + return false + } + + if !l.elementType.Equal(other.elementType) { + return false + } + + if l.state != other.state { + return false + } + + if l.state != attr.ValueStateKnown { + return true + } + + if len(l.elements) != len(other.elements) { + return false + } + + for idx, lElem := range l.elements { + otherElem := other.elements[idx] + + if !lElem.Equal(otherElem) { + return false + } + } + + return true +} + +// IsNull returns true if the List represents a null value. +func (l ListValue) IsNull() bool { + return l.state == attr.ValueStateNull +} + +// IsUnknown returns true if the List represents a currently unknown value. +// Returns false if the List has a known number of elements, even if all are +// unknown values. +func (l ListValue) IsUnknown() bool { + return l.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the List value. +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (l ListValue) String() string { + if l.IsUnknown() { + return attr.UnknownValueString + } + + if l.IsNull() { + return attr.NullValueString + } + + var res strings.Builder + + res.WriteString("[") + for i, e := range l.Elements() { + if i != 0 { + res.WriteString(",") + } + res.WriteString(e.String()) + } + res.WriteString("]") + + return res.String() +} + +// ToListValue returns the List. +func (l ListValue) ToListValue(context.Context) (ListValue, diag.Diagnostics) { + return l, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/map.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/map.go new file mode 100644 index 0000000000..4dc73607c5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/map.go @@ -0,0 +1,477 @@ +package basetypes + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +var ( + _ MapTypable = MapType{} + _ MapValuable = &MapValue{} +) + +// MapTypable extends attr.Type for map types. +// Implement this interface to create a custom MapType type. +type MapTypable interface { + attr.Type + + // ValueFromMap should convert the Map to a MapValuable type. + ValueFromMap(context.Context, MapValue) (MapValuable, diag.Diagnostics) +} + +// MapValuable extends attr.Value for map value types. +// Implement this interface to create a custom Map value type. +type MapValuable interface { + attr.Value + + // ToMapValue should convert the value type to a Map. + ToMapValue(ctx context.Context) (MapValue, diag.Diagnostics) +} + +// MapType is an AttributeType representing a map of values. All values must +// be of the same type, which the provider must specify as the ElemType +// property. Keys will always be strings. +type MapType struct { + ElemType attr.Type +} + +// WithElementType returns a new copy of the type with its element type set. +func (m MapType) WithElementType(typ attr.Type) attr.TypeWithElementType { + return MapType{ + ElemType: typ, + } +} + +// ElementType returns the type's element type. +func (m MapType) ElementType() attr.Type { + return m.ElemType +} + +// TerraformType returns the tftypes.Type that should be used to represent this +// type. This constrains what user input will be accepted and what kind of data +// can be set in state. The framework will use this to translate the +// AttributeType to something Terraform can understand. +func (m MapType) TerraformType(ctx context.Context) tftypes.Type { + return tftypes.Map{ + ElementType: m.ElemType.TerraformType(ctx), + } +} + +// ValueFromTerraform returns an attr.Value given a tftypes.Value. This is +// meant to convert the tftypes.Value into a more convenient Go type for the +// provider to consume the data with. +func (m MapType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewMapNull(m.ElemType), nil + } + if !in.Type().Is(tftypes.Map{}) { + return nil, fmt.Errorf("can't use %s as value of MapValue, can only use tftypes.Map values", in.String()) + } + if !in.Type().Equal(tftypes.Map{ElementType: m.ElemType.TerraformType(ctx)}) { + return nil, fmt.Errorf("can't use %s as value of Map with ElementType %T, can only use %s values", in.String(), m.ElemType, m.ElemType.TerraformType(ctx).String()) + } + if !in.IsKnown() { + return NewMapUnknown(m.ElemType), nil + } + if in.IsNull() { + return NewMapNull(m.ElemType), nil + } + val := map[string]tftypes.Value{} + err := in.As(&val) + if err != nil { + return nil, err + } + elems := make(map[string]attr.Value, len(val)) + for key, elem := range val { + av, err := m.ElemType.ValueFromTerraform(ctx, elem) + if err != nil { + return nil, err + } + elems[key] = av + } + // ValueFromTerraform above on each element should make this safe. + // Otherwise, this will need to do some Diagnostics to error conversion. + return NewMapValueMust(m.ElemType, elems), nil +} + +// Equal returns true if `o` is also a MapType and has the same ElemType. +func (m MapType) Equal(o attr.Type) bool { + if m.ElemType == nil { + return false + } + other, ok := o.(MapType) + if !ok { + return false + } + return m.ElemType.Equal(other.ElemType) +} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// map. +func (m MapType) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + if _, ok := step.(tftypes.ElementKeyString); !ok { + return nil, fmt.Errorf("cannot apply step %T to MapType", step) + } + + return m.ElemType, nil +} + +// String returns a human-friendly description of the MapType. +func (m MapType) String() string { + return "types.MapType[" + m.ElemType.String() + "]" +} + +// Validate validates all elements of the map that are of type +// xattr.TypeWithValidate. +func (m MapType) Validate(ctx context.Context, in tftypes.Value, path path.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if in.Type() == nil { + return diags + } + + if !in.Type().Is(tftypes.Map{}) { + err := fmt.Errorf("expected Map value, received %T with value: %v", in, in) + diags.AddAttributeError( + path, + "Map Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return diags + } + + if !in.IsKnown() || in.IsNull() { + return diags + } + + var elems map[string]tftypes.Value + + if err := in.As(&elems); err != nil { + diags.AddAttributeError( + path, + "Map Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return diags + } + + validatableType, isValidatable := m.ElemType.(xattr.TypeWithValidate) + if !isValidatable { + return diags + } + + for index, elem := range elems { + if !elem.IsFullyKnown() { + continue + } + diags = append(diags, validatableType.Validate(ctx, elem, path.AtMapKey(index))...) + } + + return diags +} + +// ValueType returns the Value type. +func (m MapType) ValueType(_ context.Context) attr.Value { + return MapValue{ + elementType: m.ElemType, + } +} + +// ValueFromMap returns a MapValuable type given a Map. +func (m MapType) ValueFromMap(_ context.Context, ma MapValue) (MapValuable, diag.Diagnostics) { + return ma, nil +} + +// NewMapNull creates a Map with a null value. Determine whether the value is +// null via the Map type IsNull method. +func NewMapNull(elementType attr.Type) MapValue { + return MapValue{ + elementType: elementType, + state: attr.ValueStateNull, + } +} + +// NewMapUnknown creates a Map with an unknown value. Determine whether the +// value is unknown via the Map type IsUnknown method. +func NewMapUnknown(elementType attr.Type) MapValue { + return MapValue{ + elementType: elementType, + state: attr.ValueStateUnknown, + } +} + +// NewMapValue creates a Map with a known value. Access the value via the Map +// type Elements or ElementsAs methods. +func NewMapValue(elementType attr.Type, elements map[string]attr.Value) (MapValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for key, element := range elements { + if !elementType.Equal(element.Type(ctx)) { + diags.AddError( + "Invalid Map Element Type", + "While creating a Map value, an invalid element was detected. "+ + "A Map must use the single, given element type. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Map Element Type: %s\n", elementType.String())+ + fmt.Sprintf("Map Key (%s) Element Type: %s", key, element.Type(ctx)), + ) + } + } + + if diags.HasError() { + return NewMapUnknown(elementType), diags + } + + return MapValue{ + elementType: elementType, + elements: elements, + state: attr.ValueStateKnown, + }, nil +} + +// NewMapValueFrom creates a Map with a known value, using reflection rules. +// The elements must be a map of string keys to values which can convert into +// the given element type. Access the value via the Map type Elements or +// ElementsAs methods. +func NewMapValueFrom(ctx context.Context, elementType attr.Type, elements any) (MapValue, diag.Diagnostics) { + attrValue, diags := reflect.FromValue( + ctx, + MapType{ElemType: elementType}, + elements, + path.Empty(), + ) + + if diags.HasError() { + return NewMapUnknown(elementType), diags + } + + m, ok := attrValue.(MapValue) + + // This should not happen, but ensure there is an error if it does. + if !ok { + diags.AddError( + "Unable to Convert Map Value", + "An unexpected result occurred when creating a Map using MapValueFrom. "+ + "This is an issue with terraform-plugin-framework and should be reported to the provider developers.", + ) + } + + return m, diags +} + +// NewMapValueMust creates a Map with a known value, converting any diagnostics +// into a panic at runtime. Access the value via the Map +// type Elements or ElementsAs methods. +// +// This creation function is only recommended to create Map values which will +// not potentially effect practitioners, such as testing, or exhaustively +// tested provider logic. +func NewMapValueMust(elementType attr.Type, elements map[string]attr.Value) MapValue { + m, diags := NewMapValue(elementType, elements) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("MapValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return m +} + +// MapValue represents a mapping of string keys to attr.Value values of a single +// type. +type MapValue struct { + // elements is the mapping of known values in the Map. + elements map[string]attr.Value + + // elementType is the type of the elements in the Map. + elementType attr.Type + + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState +} + +// Elements returns a copy of the mapping of elements for the Map. +func (m MapValue) Elements() map[string]attr.Value { + // Ensure callers cannot mutate the internal elements + result := make(map[string]attr.Value, len(m.elements)) + + for key, value := range m.elements { + result[key] = value + } + + return result +} + +// ElementsAs populates `target` with the elements of the MapValue, throwing an +// error if the elements cannot be stored in `target`. +func (m MapValue) ElementsAs(ctx context.Context, target interface{}, allowUnhandled bool) diag.Diagnostics { + // we need a tftypes.Value for this Map to be able to use it with our + // reflection code + val, err := m.ToTerraformValue(ctx) + if err != nil { + err := fmt.Errorf("error getting Terraform value for map: %w", err) + return diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Map Conversion Error", + "An unexpected error was encountered trying to convert the map into an equivalent Terraform value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ), + } + } + + return reflect.Into(ctx, MapType{ElemType: m.elementType}, val, target, reflect.Options{ + UnhandledNullAsEmpty: allowUnhandled, + UnhandledUnknownAsEmpty: allowUnhandled, + }, path.Empty()) +} + +// ElementType returns the element type for the Map. +func (m MapValue) ElementType(_ context.Context) attr.Type { + return m.elementType +} + +// Type returns a MapType with the same element type as `m`. +func (m MapValue) Type(ctx context.Context) attr.Type { + return MapType{ElemType: m.ElementType(ctx)} +} + +// ToTerraformValue returns the data contained in the Map as a tftypes.Value. +func (m MapValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + mapType := tftypes.Map{ElementType: m.ElementType(ctx).TerraformType(ctx)} + + switch m.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, len(m.elements)) + + for key, elem := range m.elements { + val, err := elem.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(mapType, tftypes.UnknownValue), err + } + + vals[key] = val + } + + if err := tftypes.ValidateValue(mapType, vals); err != nil { + return tftypes.NewValue(mapType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(mapType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(mapType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(mapType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Map state in ToTerraformValue: %s", m.state)) + } +} + +// Equal returns true if the Map is considered semantically equal +// (same type and same value) to the attr.Value passed as an argument. +func (m MapValue) Equal(o attr.Value) bool { + other, ok := o.(MapValue) + + if !ok { + return false + } + + if !m.elementType.Equal(other.elementType) { + return false + } + + if m.state != other.state { + return false + } + + if m.state != attr.ValueStateKnown { + return true + } + + if len(m.elements) != len(other.elements) { + return false + } + + for key, mElem := range m.elements { + otherElem := other.elements[key] + + if !mElem.Equal(otherElem) { + return false + } + } + + return true +} + +// IsNull returns true if the Map represents a null value. +func (m MapValue) IsNull() bool { + return m.state == attr.ValueStateNull +} + +// IsUnknown returns true if the Map represents a currently unknown value. +// Returns false if the Map has a known number of elements, even if all are +// unknown values. +func (m MapValue) IsUnknown() bool { + return m.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the Map value. +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (m MapValue) String() string { + if m.IsUnknown() { + return attr.UnknownValueString + } + + if m.IsNull() { + return attr.NullValueString + } + + // We want the output to be consistent, so we sort the output by key + keys := make([]string, 0, len(m.Elements())) + for k := range m.Elements() { + keys = append(keys, k) + } + sort.Strings(keys) + + var res strings.Builder + + res.WriteString("{") + for i, k := range keys { + if i != 0 { + res.WriteString(",") + } + res.WriteString(fmt.Sprintf("%q:%s", k, m.Elements()[k].String())) + } + res.WriteString("}") + + return res.String() +} + +// ToMapValue returns the Map. +func (m MapValue) ToMapValue(context.Context) (MapValue, diag.Diagnostics) { + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/number.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/number.go new file mode 100644 index 0000000000..a67905f8f3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/number.go @@ -0,0 +1,147 @@ +package basetypes + +import ( + "context" + "fmt" + "math/big" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +var ( + _ NumberValuable = NumberValue{} +) + +// NumberValuable extends attr.Value for number value types. +// Implement this interface to create a custom Number value type. +type NumberValuable interface { + attr.Value + + // ToNumberValue should convert the value type to a Number. + ToNumberValue(ctx context.Context) (NumberValue, diag.Diagnostics) +} + +// NewNumberNull creates a Number with a null value. Determine whether the value is +// null via the Number type IsNull method. +func NewNumberNull() NumberValue { + return NumberValue{ + state: attr.ValueStateNull, + } +} + +// NewNumberUnknown creates a Number with an unknown value. Determine whether the +// value is unknown via the Number type IsUnknown method. +func NewNumberUnknown() NumberValue { + return NumberValue{ + state: attr.ValueStateUnknown, + } +} + +// NewNumberValue creates a Number with a known value. Access the value via the Number +// type ValueBigFloat method. If the given value is nil, a null Number is created. +func NewNumberValue(value *big.Float) NumberValue { + if value == nil { + return NewNumberNull() + } + + return NumberValue{ + state: attr.ValueStateKnown, + value: value, + } +} + +// NumberValue represents a number value, exposed as a *big.Float. Numbers can be +// floats or integers. +type NumberValue struct { + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState + + // value contains the known value, if not null or unknown. + value *big.Float +} + +// Type returns a NumberType. +func (n NumberValue) Type(_ context.Context) attr.Type { + return NumberType{} +} + +// ToTerraformValue returns the data contained in the Number as a tftypes.Value. +func (n NumberValue) ToTerraformValue(_ context.Context) (tftypes.Value, error) { + switch n.state { + case attr.ValueStateKnown: + if n.value == nil { + return tftypes.NewValue(tftypes.Number, nil), nil + } + + if err := tftypes.ValidateValue(tftypes.Number, n.value); err != nil { + return tftypes.NewValue(tftypes.Number, tftypes.UnknownValue), err + } + + return tftypes.NewValue(tftypes.Number, n.value), nil + case attr.ValueStateNull: + return tftypes.NewValue(tftypes.Number, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(tftypes.Number, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Number state in ToTerraformValue: %s", n.state)) + } +} + +// Equal returns true if `other` is a Number and has the same value as `n`. +func (n NumberValue) Equal(other attr.Value) bool { + o, ok := other.(NumberValue) + + if !ok { + return false + } + + if n.state != o.state { + return false + } + + if n.state != attr.ValueStateKnown { + return true + } + + return n.value.Cmp(o.value) == 0 +} + +// IsNull returns true if the Number represents a null value. +func (n NumberValue) IsNull() bool { + return n.state == attr.ValueStateNull +} + +// IsUnknown returns true if the Number represents a currently unknown value. +func (n NumberValue) IsUnknown() bool { + return n.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the Number value. +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (n NumberValue) String() string { + if n.IsUnknown() { + return attr.UnknownValueString + } + + if n.IsNull() { + return attr.NullValueString + } + + return n.value.String() +} + +// ValueBigFloat returns the known *big.Float value. If Number is null or unknown, returns +// 0.0. +func (n NumberValue) ValueBigFloat() *big.Float { + return n.value +} + +// ToNumberValue returns Number. +func (n NumberValue) ToNumberValue(context.Context) (NumberValue, diag.Diagnostics) { + return n, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/number_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/number_type.go new file mode 100644 index 0000000000..75d70664f0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/number_type.go @@ -0,0 +1,84 @@ +package basetypes + +import ( + "context" + "fmt" + "math/big" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// NumberTypable extends attr.Type for number types. +// Implement this interface to create a custom NumberType type. +type NumberTypable interface { + attr.Type + + // ValueFromNumber should convert the Number to a NumberValuable type. + ValueFromNumber(context.Context, NumberValue) (NumberValuable, diag.Diagnostics) +} + +var _ NumberTypable = NumberType{} + +// NumberType is the base framework type for a floating point number. +// NumberValue is the associated value type. +type NumberType struct{} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// type. +func (t NumberType) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return nil, fmt.Errorf("cannot apply AttributePathStep %T to %s", step, t.String()) +} + +// Equal returns true if the given type is equivalent. +func (t NumberType) Equal(o attr.Type) bool { + _, ok := o.(NumberType) + + return ok +} + +// String returns a human readable string of the type name. +func (t NumberType) String() string { + return "basetypes.NumberType" +} + +// TerraformType returns the tftypes.Type that should be used to represent this +// framework type. +func (t NumberType) TerraformType(_ context.Context) tftypes.Type { + return tftypes.Number +} + +// ValueFromNumber returns a NumberValuable type given a NumberValue. +func (t NumberType) ValueFromNumber(_ context.Context, v NumberValue) (NumberValuable, diag.Diagnostics) { + return v, nil +} + +// ValueFromTerraform returns a Value given a tftypes.Value. This is meant to +// convert the tftypes.Value into a more convenient Go type for the provider to +// consume the data with. +func (t NumberType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if !in.IsKnown() { + return NewNumberUnknown(), nil + } + + if in.IsNull() { + return NewNumberNull(), nil + } + + n := big.NewFloat(0) + + err := in.As(&n) + + if err != nil { + return nil, err + } + + return NewNumberValue(n), nil +} + +// ValueType returns the Value type. +func (t NumberType) ValueType(_ context.Context) attr.Value { + // This Value does not need to be valid. + return NumberValue{} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/object.go new file mode 100644 index 0000000000..44035e1364 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/object.go @@ -0,0 +1,538 @@ +package basetypes + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +var ( + _ ObjectTypable = ObjectType{} + _ ObjectValuable = &ObjectValue{} +) + +// ObjectTypable extends attr.Type for object types. +// Implement this interface to create a custom ObjectType type. +type ObjectTypable interface { + attr.Type + + // ValueFromObject should convert the Object to an ObjectValuable type. + ValueFromObject(context.Context, ObjectValue) (ObjectValuable, diag.Diagnostics) +} + +// ObjectValuable extends attr.Value for object value types. +// Implement this interface to create a custom Object value type. +type ObjectValuable interface { + attr.Value + + // ToObjectValue should convert the value type to an Object. + ToObjectValue(ctx context.Context) (ObjectValue, diag.Diagnostics) +} + +// ObjectType is an AttributeType representing an object. +type ObjectType struct { + AttrTypes map[string]attr.Type +} + +// WithAttributeTypes returns a new copy of the type with its attribute types +// set. +func (o ObjectType) WithAttributeTypes(typs map[string]attr.Type) attr.TypeWithAttributeTypes { + return ObjectType{ + AttrTypes: typs, + } +} + +// AttributeTypes returns a copy of the type's attribute types. +func (o ObjectType) AttributeTypes() map[string]attr.Type { + // Ensure callers cannot mutate the value + result := make(map[string]attr.Type, len(o.AttrTypes)) + + for key, value := range o.AttrTypes { + result[key] = value + } + + return result +} + +// TerraformType returns the tftypes.Type that should be used to +// represent this type. This constrains what user input will be +// accepted and what kind of data can be set in state. The framework +// will use this to translate the AttributeType to something Terraform +// can understand. +func (o ObjectType) TerraformType(ctx context.Context) tftypes.Type { + attributeTypes := map[string]tftypes.Type{} + for k, v := range o.AttrTypes { + attributeTypes[k] = v.TerraformType(ctx) + } + return tftypes.Object{ + AttributeTypes: attributeTypes, + } +} + +// ValueFromTerraform returns an attr.Value given a tftypes.Value. +// This is meant to convert the tftypes.Value into a more convenient Go +// type for the provider to consume the data with. +func (o ObjectType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewObjectNull(o.AttrTypes), nil + } + if !in.Type().Equal(o.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", o.TerraformType(ctx), in.Type()) + } + if !in.IsKnown() { + return NewObjectUnknown(o.AttrTypes), nil + } + if in.IsNull() { + return NewObjectNull(o.AttrTypes), nil + } + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + err := in.As(&val) + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := o.AttrTypes[k].ValueFromTerraform(ctx, v) + if err != nil { + return nil, err + } + attributes[k] = a + } + // ValueFromTerraform above on each attribute should make this safe. + // Otherwise, this will need to do some Diagnostics to error conversion. + return NewObjectValueMust(o.AttrTypes, attributes), nil +} + +// Equal returns true if `candidate` is also an ObjectType and has the same +// AttributeTypes. +func (o ObjectType) Equal(candidate attr.Type) bool { + other, ok := candidate.(ObjectType) + if !ok { + return false + } + if len(other.AttrTypes) != len(o.AttrTypes) { + return false + } + for k, v := range o.AttrTypes { + attr, ok := other.AttrTypes[k] + if !ok { + return false + } + if !v.Equal(attr) { + return false + } + } + return true +} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// object. +func (o ObjectType) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + attrName, ok := step.(tftypes.AttributeName) + + if !ok { + return nil, fmt.Errorf("cannot apply step %T to ObjectType", step) + } + + attrType, ok := o.AttrTypes[string(attrName)] + + if !ok { + return nil, fmt.Errorf("undefined attribute name %s in ObjectType", attrName) + } + + return attrType, nil +} + +// String returns a human-friendly description of the ObjectType. +func (o ObjectType) String() string { + var res strings.Builder + res.WriteString("types.ObjectType[") + keys := make([]string, 0, len(o.AttrTypes)) + for k := range o.AttrTypes { + keys = append(keys, k) + } + sort.Strings(keys) + for pos, key := range keys { + if pos != 0 { + res.WriteString(", ") + } + res.WriteString(`"` + key + `":`) + res.WriteString(o.AttrTypes[key].String()) + } + res.WriteString("]") + return res.String() +} + +// ValueType returns the Value type. +func (o ObjectType) ValueType(_ context.Context) attr.Value { + return ObjectValue{ + attributeTypes: o.AttrTypes, + } +} + +// ValueFromObject returns an ObjectValuable type given an Object. +func (o ObjectType) ValueFromObject(_ context.Context, obj ObjectValue) (ObjectValuable, diag.Diagnostics) { + return obj, nil +} + +// NewObjectNull creates a Object with a null value. Determine whether the value is +// null via the Object type IsNull method. +func NewObjectNull(attributeTypes map[string]attr.Type) ObjectValue { + return ObjectValue{ + attributeTypes: attributeTypes, + state: attr.ValueStateNull, + } +} + +// NewObjectUnknown creates a Object with an unknown value. Determine whether the +// value is unknown via the Object type IsUnknown method. +func NewObjectUnknown(attributeTypes map[string]attr.Type) ObjectValue { + return ObjectValue{ + attributeTypes: attributeTypes, + state: attr.ValueStateUnknown, + } +} + +// NewObjectValue creates a Object with a known value. Access the value via the Object +// type ElementsAs method. +func NewObjectValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing Object Attribute Value", + "While creating a Object value, a missing attribute value was detected. "+ + "A Object must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Object Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid Object Attribute Type", + "While creating a Object value, an invalid attribute value was detected. "+ + "A Object must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Object Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("Object Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra Object Attribute Value", + "While creating a Object value, an extra attribute value was detected. "+ + "A Object must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra Object Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewObjectUnknown(attributeTypes), diags + } + + return ObjectValue{ + attributeTypes: attributeTypes, + attributes: attributes, + state: attr.ValueStateKnown, + }, nil +} + +// NewObjectValueFrom creates a Object with a known value, using reflection rules. +// The attributes must be a map of string attribute names to attribute values +// which can convert into the given attribute type or a struct with tfsdk field +// tags. Access the value via the Object type Elements or ElementsAs methods. +func NewObjectValueFrom(ctx context.Context, attributeTypes map[string]attr.Type, attributes any) (ObjectValue, diag.Diagnostics) { + attrValue, diags := reflect.FromValue( + ctx, + ObjectType{AttrTypes: attributeTypes}, + attributes, + path.Empty(), + ) + + if diags.HasError() { + return NewObjectUnknown(attributeTypes), diags + } + + m, ok := attrValue.(ObjectValue) + + // This should not happen, but ensure there is an error if it does. + if !ok { + diags.AddError( + "Unable to Convert Object Value", + "An unexpected result occurred when creating a Object using ObjectValueFrom. "+ + "This is an issue with terraform-plugin-framework and should be reported to the provider developers.", + ) + } + + return m, diags +} + +// NewObjectValueMust creates a Object with a known value, converting any diagnostics +// into a panic at runtime. Access the value via the Object +// type Elements or ElementsAs methods. +// +// This creation function is only recommended to create Object values which will +// not potentially effect practitioners, such as testing, or exhaustively +// tested provider logic. +func NewObjectValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ObjectValue { + object, diags := NewObjectValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("ObjectValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +// ObjectValue represents an object +type ObjectValue struct { + // attributes is the mapping of known attribute values in the Object. + attributes map[string]attr.Value + + // attributeTypes is the type of the attributes in the Object. + attributeTypes map[string]attr.Type + + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState +} + +// ObjectAsOptions is a collection of toggles to control the behavior of +// Object.As. +type ObjectAsOptions struct { + // UnhandledNullAsEmpty controls what happens when As needs to put a + // null value in a type that has no way to preserve that distinction. + // When set to true, the type's empty value will be used. When set to + // false, an error will be returned. + UnhandledNullAsEmpty bool + + // UnhandledUnknownAsEmpty controls what happens when As needs to put + // an unknown value in a type that has no way to preserve that + // distinction. When set to true, the type's empty value will be used. + // When set to false, an error will be returned. + UnhandledUnknownAsEmpty bool +} + +// As populates `target` with the data in the ObjectValue, throwing an error if the +// data cannot be stored in `target`. +func (o ObjectValue) As(ctx context.Context, target interface{}, opts ObjectAsOptions) diag.Diagnostics { + // we need a tftypes.Value for this Object to be able to use it with + // our reflection code + obj := ObjectType{AttrTypes: o.attributeTypes} + val, err := o.ToTerraformValue(ctx) + if err != nil { + return diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Object Conversion Error", + "An unexpected error was encountered trying to convert object. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ), + } + } + return reflect.Into(ctx, obj, val, target, reflect.Options{ + UnhandledNullAsEmpty: opts.UnhandledNullAsEmpty, + UnhandledUnknownAsEmpty: opts.UnhandledUnknownAsEmpty, + }, path.Empty()) +} + +// Attributes returns a copy of the mapping of known attribute values for the Object. +func (o ObjectValue) Attributes() map[string]attr.Value { + // Ensure callers cannot mutate the internal attributes + result := make(map[string]attr.Value, len(o.attributes)) + + for name, value := range o.attributes { + result[name] = value + } + + return result +} + +// AttributeTypes returns a copy of the mapping of attribute types for the Object. +func (o ObjectValue) AttributeTypes(_ context.Context) map[string]attr.Type { + // Ensure callers cannot mutate the internal attribute types + result := make(map[string]attr.Type, len(o.attributeTypes)) + + for name, typ := range o.attributeTypes { + result[name] = typ + } + + return result +} + +// Type returns an ObjectType with the same attribute types as `o`. +func (o ObjectValue) Type(ctx context.Context) attr.Type { + return ObjectType{AttrTypes: o.AttributeTypes(ctx)} +} + +// ToTerraformValue returns the data contained in the attr.Value as +// a tftypes.Value. +func (o ObjectValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := map[string]tftypes.Type{} + for attr, typ := range o.AttributeTypes(ctx) { + attrTypes[attr] = typ.TerraformType(ctx) + } + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch o.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, len(o.attributes)) + + for name, v := range o.attributes { + val, err := v.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals[name] = val + } + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", o.state)) + } +} + +// Equal returns true if the Object is considered semantically equal +// (same type and same value) to the attr.Value passed as an argument. +func (o ObjectValue) Equal(c attr.Value) bool { + other, ok := c.(ObjectValue) + + if !ok { + return false + } + + if o.state != other.state { + return false + } + + if o.state != attr.ValueStateKnown { + return true + } + + if len(o.attributeTypes) != len(other.attributeTypes) { + return false + } + + for name, oAttributeType := range o.attributeTypes { + otherAttributeType, ok := other.attributeTypes[name] + + if !ok { + return false + } + + if !oAttributeType.Equal(otherAttributeType) { + return false + } + } + + if len(o.attributes) != len(other.attributes) { + return false + } + + for name, oAttribute := range o.attributes { + otherAttribute, ok := other.attributes[name] + + if !ok { + return false + } + + if !oAttribute.Equal(otherAttribute) { + return false + } + } + + return true +} + +// IsNull returns true if the Object represents a null value. +func (o ObjectValue) IsNull() bool { + return o.state == attr.ValueStateNull +} + +// IsUnknown returns true if the Object represents a currently unknown value. +func (o ObjectValue) IsUnknown() bool { + return o.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the Object value. +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (o ObjectValue) String() string { + if o.IsUnknown() { + return attr.UnknownValueString + } + + if o.IsNull() { + return attr.NullValueString + } + + // We want the output to be consistent, so we sort the output by key + keys := make([]string, 0, len(o.Attributes())) + for k := range o.Attributes() { + keys = append(keys, k) + } + sort.Strings(keys) + + var res strings.Builder + + res.WriteString("{") + for i, k := range keys { + if i != 0 { + res.WriteString(",") + } + res.WriteString(fmt.Sprintf(`"%s":%s`, k, o.Attributes()[k].String())) + } + res.WriteString("}") + + return res.String() +} + +// ToObjectValue returns the Object. +func (o ObjectValue) ToObjectValue(context.Context) (ObjectValue, diag.Diagnostics) { + return o, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/set.go new file mode 100644 index 0000000000..3167172ebf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/set.go @@ -0,0 +1,500 @@ +package basetypes + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/attr/xattr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/internal/reflect" + "github.com/hashicorp/terraform-plugin-framework/path" +) + +var ( + _ SetTypable = SetType{} + _ xattr.TypeWithValidate = SetType{} + _ SetValuable = &SetValue{} +) + +// SetTypable extends attr.Type for set types. +// Implement this interface to create a custom SetType type. +type SetTypable interface { + attr.Type + + // ValueFromSet should convert the Set to a SetValuable type. + ValueFromSet(context.Context, SetValue) (SetValuable, diag.Diagnostics) +} + +// SetValuable extends attr.Value for set value types. +// Implement this interface to create a custom Set value type. +type SetValuable interface { + attr.Value + + // ToSetValue should convert the value type to a Set. + ToSetValue(ctx context.Context) (SetValue, diag.Diagnostics) +} + +// SetType is an AttributeType representing a set of values. All values must +// be of the same type, which the provider must specify as the ElemType +// property. +type SetType struct { + ElemType attr.Type +} + +// ElementType returns the attr.Type elements will be created from. +func (st SetType) ElementType() attr.Type { + return st.ElemType +} + +// WithElementType returns a SetType that is identical to `l`, but with the +// element type set to `typ`. +func (st SetType) WithElementType(typ attr.Type) attr.TypeWithElementType { + return SetType{ElemType: typ} +} + +// TerraformType returns the tftypes.Type that should be used to +// represent this type. This constrains what user input will be +// accepted and what kind of data can be set in state. The framework +// will use this to translate the AttributeType to something Terraform +// can understand. +func (st SetType) TerraformType(ctx context.Context) tftypes.Type { + return tftypes.Set{ + ElementType: st.ElemType.TerraformType(ctx), + } +} + +// ValueFromTerraform returns an attr.Value given a tftypes.Value. +// This is meant to convert the tftypes.Value into a more convenient Go +// type for the provider to consume the data with. +func (st SetType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewSetNull(st.ElemType), nil + } + if !in.Type().Equal(st.TerraformType(ctx)) { + return nil, fmt.Errorf("can't use %s as value of Set with ElementType %T, can only use %s values", in.String(), st.ElemType, st.ElemType.TerraformType(ctx).String()) + } + if !in.IsKnown() { + return NewSetUnknown(st.ElemType), nil + } + if in.IsNull() { + return NewSetNull(st.ElemType), nil + } + val := []tftypes.Value{} + err := in.As(&val) + if err != nil { + return nil, err + } + elems := make([]attr.Value, 0, len(val)) + for _, elem := range val { + av, err := st.ElemType.ValueFromTerraform(ctx, elem) + if err != nil { + return nil, err + } + elems = append(elems, av) + } + // ValueFromTerraform above on each element should make this safe. + // Otherwise, this will need to do some Diagnostics to error conversion. + return NewSetValueMust(st.ElemType, elems), nil +} + +// Equal returns true if `o` is also a SetType and has the same ElemType. +func (st SetType) Equal(o attr.Type) bool { + if st.ElemType == nil { + return false + } + other, ok := o.(SetType) + if !ok { + return false + } + return st.ElemType.Equal(other.ElemType) +} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// set. +func (st SetType) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + if _, ok := step.(tftypes.ElementKeyValue); !ok { + return nil, fmt.Errorf("cannot apply step %T to SetType", step) + } + + return st.ElemType, nil +} + +// String returns a human-friendly description of the SetType. +func (st SetType) String() string { + return "types.SetType[" + st.ElemType.String() + "]" +} + +// Validate implements type validation. This type requires all elements to be +// unique. +func (st SetType) Validate(ctx context.Context, in tftypes.Value, path path.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if in.Type() == nil { + return diags + } + + if !in.Type().Is(tftypes.Set{}) { + err := fmt.Errorf("expected Set value, received %T with value: %v", in, in) + diags.AddAttributeError( + path, + "Set Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return diags + } + + if !in.IsKnown() || in.IsNull() { + return diags + } + + var elems []tftypes.Value + + if err := in.As(&elems); err != nil { + diags.AddAttributeError( + path, + "Set Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return diags + } + + validatableType, isValidatable := st.ElemType.(xattr.TypeWithValidate) + + // Attempting to use map[tftypes.Value]struct{} for duplicate detection yields: + // panic: runtime error: hash of unhashable type tftypes.primitive + // Instead, use for loops. + for indexOuter, elemOuter := range elems { + // Only evaluate fully known values for duplicates and validation. + if !elemOuter.IsFullyKnown() { + continue + } + + // Validate the element first + if isValidatable { + elemValue, err := st.ElemType.ValueFromTerraform(ctx, elemOuter) + if err != nil { + diags.AddAttributeError( + path, + "Set Type Validation Error", + "An unexpected error was encountered trying to validate an attribute value. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ) + return diags + } + diags = append(diags, validatableType.Validate(ctx, elemOuter, path.AtSetValue(elemValue))...) + } + + // Then check for duplicates + for indexInner := indexOuter + 1; indexInner < len(elems); indexInner++ { + elemInner := elems[indexInner] + + if !elemInner.Equal(elemOuter) { + continue + } + + // TODO: Point at element attr.Value when Validate method is converted to attr.Value + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/172 + diags.AddAttributeError( + path, + "Duplicate Set Element", + fmt.Sprintf("This attribute contains duplicate values of: %s", elemInner), + ) + } + } + + return diags +} + +// ValueType returns the Value type. +func (st SetType) ValueType(_ context.Context) attr.Value { + return SetValue{ + elementType: st.ElemType, + } +} + +// ValueFromSet returns a SetValuable type given a Set. +func (st SetType) ValueFromSet(_ context.Context, set SetValue) (SetValuable, diag.Diagnostics) { + return set, nil +} + +// NewSetNull creates a Set with a null value. Determine whether the value is +// null via the Set type IsNull method. +func NewSetNull(elementType attr.Type) SetValue { + return SetValue{ + elementType: elementType, + state: attr.ValueStateNull, + } +} + +// NewSetUnknown creates a Set with an unknown value. Determine whether the +// value is unknown via the Set type IsUnknown method. +func NewSetUnknown(elementType attr.Type) SetValue { + return SetValue{ + elementType: elementType, + state: attr.ValueStateUnknown, + } +} + +// NewSetValue creates a Set with a known value. Access the value via the Set +// type Elements or ElementsAs methods. +func NewSetValue(elementType attr.Type, elements []attr.Value) (SetValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for idx, element := range elements { + if !elementType.Equal(element.Type(ctx)) { + diags.AddError( + "Invalid Set Element Type", + "While creating a Set value, an invalid element was detected. "+ + "A Set must use the single, given element type. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Set Element Type: %s\n", elementType.String())+ + fmt.Sprintf("Set Index (%d) Element Type: %s", idx, element.Type(ctx)), + ) + } + } + + if diags.HasError() { + return NewSetUnknown(elementType), diags + } + + return SetValue{ + elementType: elementType, + elements: elements, + state: attr.ValueStateKnown, + }, nil +} + +// NewSetValueFrom creates a Set with a known value, using reflection rules. +// The elements must be a slice which can convert into the given element type. +// Access the value via the Set type Elements or ElementsAs methods. +func NewSetValueFrom(ctx context.Context, elementType attr.Type, elements any) (SetValue, diag.Diagnostics) { + attrValue, diags := reflect.FromValue( + ctx, + SetType{ElemType: elementType}, + elements, + path.Empty(), + ) + + if diags.HasError() { + return NewSetUnknown(elementType), diags + } + + set, ok := attrValue.(SetValue) + + // This should not happen, but ensure there is an error if it does. + if !ok { + diags.AddError( + "Unable to Convert Set Value", + "An unexpected result occurred when creating a Set using SetValueFrom. "+ + "This is an issue with terraform-plugin-framework and should be reported to the provider developers.", + ) + } + + return set, diags +} + +// NewSetValueMust creates a Set with a known value, converting any diagnostics +// into a panic at runtime. Access the value via the Set +// type Elements or ElementsAs methods. +// +// This creation function is only recommended to create Set values which will +// not potentially effect practitioners, such as testing, or exhaustively +// tested provider logic. +func NewSetValueMust(elementType attr.Type, elements []attr.Value) SetValue { + set, diags := NewSetValue(elementType, elements) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("SetValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return set +} + +// SetValue represents a set of attr.Value, all of the same type, +// indicated by ElemType. +type SetValue struct { + // elements is the collection of known values in the Set. + elements []attr.Value + + // elementType is the type of the elements in the Set. + elementType attr.Type + + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState +} + +// Elements returns a copy of the collection of elements for the Set. +func (s SetValue) Elements() []attr.Value { + // Ensure callers cannot mutate the internal elements + result := make([]attr.Value, 0, len(s.elements)) + result = append(result, s.elements...) + + return result +} + +// ElementsAs populates `target` with the elements of the SetValue, throwing an +// error if the elements cannot be stored in `target`. +func (s SetValue) ElementsAs(ctx context.Context, target interface{}, allowUnhandled bool) diag.Diagnostics { + // we need a tftypes.Value for this Set to be able to use it with our + // reflection code + val, err := s.ToTerraformValue(ctx) + if err != nil { + return diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Set Element Conversion Error", + "An unexpected error was encountered trying to convert set elements. This is always an error in the provider. Please report the following to the provider developer:\n\n"+err.Error(), + ), + } + } + return reflect.Into(ctx, s.Type(ctx), val, target, reflect.Options{ + UnhandledNullAsEmpty: allowUnhandled, + UnhandledUnknownAsEmpty: allowUnhandled, + }, path.Empty()) +} + +// ElementType returns the element type for the Set. +func (s SetValue) ElementType(_ context.Context) attr.Type { + return s.elementType +} + +// Type returns a SetType with the same element type as `s`. +func (s SetValue) Type(ctx context.Context) attr.Type { + return SetType{ElemType: s.ElementType(ctx)} +} + +// ToTerraformValue returns the data contained in the Set as a tftypes.Value. +func (s SetValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + setType := tftypes.Set{ElementType: s.ElementType(ctx).TerraformType(ctx)} + + switch s.state { + case attr.ValueStateKnown: + vals := make([]tftypes.Value, 0, len(s.elements)) + + for _, elem := range s.elements { + val, err := elem.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(setType, tftypes.UnknownValue), err + } + + vals = append(vals, val) + } + + if err := tftypes.ValidateValue(setType, vals); err != nil { + return tftypes.NewValue(setType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(setType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(setType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(setType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Set state in ToTerraformValue: %s", s.state)) + } +} + +// Equal returns true if the Set is considered semantically equal +// (same type and same value) to the attr.Value passed as an argument. +func (s SetValue) Equal(o attr.Value) bool { + other, ok := o.(SetValue) + + if !ok { + return false + } + + if !s.elementType.Equal(other.elementType) { + return false + } + + if s.state != other.state { + return false + } + + if s.state != attr.ValueStateKnown { + return true + } + + if len(s.elements) != len(other.elements) { + return false + } + + for _, elem := range s.elements { + if !other.contains(elem) { + return false + } + } + + return true +} + +func (s SetValue) contains(v attr.Value) bool { + for _, elem := range s.Elements() { + if elem.Equal(v) { + return true + } + } + + return false +} + +// IsNull returns true if the Set represents a null value. +func (s SetValue) IsNull() bool { + return s.state == attr.ValueStateNull +} + +// IsUnknown returns true if the Set represents a currently unknown value. +// Returns false if the Set has a known number of elements, even if all are +// unknown values. +func (s SetValue) IsUnknown() bool { + return s.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the Set value. +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (s SetValue) String() string { + if s.IsUnknown() { + return attr.UnknownValueString + } + + if s.IsNull() { + return attr.NullValueString + } + + var res strings.Builder + + res.WriteString("[") + for i, e := range s.Elements() { + if i != 0 { + res.WriteString(",") + } + res.WriteString(e.String()) + } + res.WriteString("]") + + return res.String() +} + +// ToSetValue returns the Set. +func (s SetValue) ToSetValue(context.Context) (SetValue, diag.Diagnostics) { + return s, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/string.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/string.go new file mode 100644 index 0000000000..a9c7b8cb5f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/string.go @@ -0,0 +1,148 @@ +package basetypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +var ( + _ StringValuable = StringValue{} +) + +// StringValuable extends attr.Value for string value types. +// Implement this interface to create a custom String value type. +type StringValuable interface { + attr.Value + + // ToStringValue should convert the value type to a String. + ToStringValue(ctx context.Context) (StringValue, diag.Diagnostics) +} + +// NewStringNull creates a String with a null value. Determine whether the value is +// null via the String type IsNull method. +// +// Setting the deprecated String type Null, Unknown, or Value fields after +// creating a String with this function has no effect. +func NewStringNull() StringValue { + return StringValue{ + state: attr.ValueStateNull, + } +} + +// NewStringUnknown creates a String with an unknown value. Determine whether the +// value is unknown via the String type IsUnknown method. +// +// Setting the deprecated String type Null, Unknown, or Value fields after +// creating a String with this function has no effect. +func NewStringUnknown() StringValue { + return StringValue{ + state: attr.ValueStateUnknown, + } +} + +// NewStringValue creates a String with a known value. Access the value via the String +// type ValueString method. +// +// Setting the deprecated String type Null, Unknown, or Value fields after +// creating a String with this function has no effect. +func NewStringValue(value string) StringValue { + return StringValue{ + state: attr.ValueStateKnown, + value: value, + } +} + +// StringValue represents a UTF-8 string value. +type StringValue struct { + // state represents whether the value is null, unknown, or known. The + // zero-value is null. + state attr.ValueState + + // value contains the known value, if not null or unknown. + value string +} + +// Type returns a StringType. +func (s StringValue) Type(_ context.Context) attr.Type { + return StringType{} +} + +// ToTerraformValue returns the data contained in the *String as a tftypes.Value. +func (s StringValue) ToTerraformValue(_ context.Context) (tftypes.Value, error) { + switch s.state { + case attr.ValueStateKnown: + if err := tftypes.ValidateValue(tftypes.String, s.value); err != nil { + return tftypes.NewValue(tftypes.String, tftypes.UnknownValue), err + } + + return tftypes.NewValue(tftypes.String, s.value), nil + case attr.ValueStateNull: + return tftypes.NewValue(tftypes.String, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(tftypes.String, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled String state in ToTerraformValue: %s", s.state)) + } +} + +// Equal returns true if `other` is a String and has the same value as `s`. +func (s StringValue) Equal(other attr.Value) bool { + o, ok := other.(StringValue) + + if !ok { + return false + } + + if s.state != o.state { + return false + } + + if s.state != attr.ValueStateKnown { + return true + } + + return s.value == o.value +} + +// IsNull returns true if the String represents a null value. +func (s StringValue) IsNull() bool { + return s.state == attr.ValueStateNull +} + +// IsUnknown returns true if the String represents a currently unknown value. +func (s StringValue) IsUnknown() bool { + return s.state == attr.ValueStateUnknown +} + +// String returns a human-readable representation of the String value. Use +// the ValueString method for Terraform data handling instead. +// +// The string returned here is not protected by any compatibility guarantees, +// and is intended for logging and error reporting. +func (s StringValue) String() string { + if s.IsUnknown() { + return attr.UnknownValueString + } + + if s.IsNull() { + return attr.NullValueString + } + + return fmt.Sprintf("%q", s.value) +} + +// ValueString returns the known string value. If String is null or unknown, returns +// "". +func (s StringValue) ValueString() string { + return s.value +} + +// ToStringValue returns String. +func (s StringValue) ToStringValue(context.Context) (StringValue, diag.Diagnostics) { + return s, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/string_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/string_type.go new file mode 100644 index 0000000000..95c0920c80 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/basetypes/string_type.go @@ -0,0 +1,83 @@ +package basetypes + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// StringTypable extends attr.Type for string types. +// Implement this interface to create a custom StringType type. +type StringTypable interface { + attr.Type + + // ValueFromString should convert the String to a StringValuable type. + ValueFromString(context.Context, StringValue) (StringValuable, diag.Diagnostics) +} + +var _ StringTypable = StringType{} + +// StringType is the base framework type for a string. StringValue is the +// associated value type. +type StringType struct{} + +// ApplyTerraform5AttributePathStep applies the given AttributePathStep to the +// type. +func (t StringType) ApplyTerraform5AttributePathStep(step tftypes.AttributePathStep) (interface{}, error) { + return nil, fmt.Errorf("cannot apply AttributePathStep %T to %s", step, t.String()) +} + +// Equal returns true if the given type is equivalent. +func (t StringType) Equal(o attr.Type) bool { + _, ok := o.(StringType) + + return ok +} + +// String returns a human readable string of the type name. +func (t StringType) String() string { + return "basetypes.StringType" +} + +// TerraformType returns the tftypes.Type that should be used to represent this +// framework type. +func (t StringType) TerraformType(_ context.Context) tftypes.Type { + return tftypes.String +} + +// ValueFromString returns a StringValuable type given a StringValue. +func (t StringType) ValueFromString(_ context.Context, v StringValue) (StringValuable, diag.Diagnostics) { + return v, nil +} + +// ValueFromTerraform returns a Value given a tftypes.Value. This is meant to +// convert the tftypes.Value into a more convenient Go type for the provider to +// consume the data with. +func (t StringType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if !in.IsKnown() { + return NewStringUnknown(), nil + } + + if in.IsNull() { + return NewStringNull(), nil + } + + var s string + + err := in.As(&s) + + if err != nil { + return nil, err + } + + return NewStringValue(s), nil +} + +// ValueType returns the Value type. +func (t StringType) ValueType(_ context.Context) attr.Value { + // This Value does not need to be valid. + return StringValue{} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/bool_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/bool_type.go new file mode 100644 index 0000000000..03b67c74ae --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/bool_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +var BoolType = basetypes.BoolType{} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/bool_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/bool_value.go new file mode 100644 index 0000000000..fd6591a66e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/bool_value.go @@ -0,0 +1,23 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +type Bool = basetypes.BoolValue + +// BoolNull creates a Bool with a null value. Determine whether the value is +// null via the Bool type IsNull method. +func BoolNull() basetypes.BoolValue { + return basetypes.NewBoolNull() +} + +// BoolUnknown creates a Bool with an unknown value. Determine whether the +// value is unknown via the Bool type IsUnknown method. +func BoolUnknown() basetypes.BoolValue { + return basetypes.NewBoolUnknown() +} + +// BoolValue creates a Bool with a known value. Access the value via the Bool +// type ValueBool method. +func BoolValue(value bool) basetypes.BoolValue { + return basetypes.NewBoolValue(value) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/doc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/doc.go new file mode 100644 index 0000000000..d72ca0a17c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/doc.go @@ -0,0 +1,8 @@ +// Package types contains the framework-defined data types and values, such as +// boolean, floating point, integer, list, map, object, set, and string. +// +// This package contains creation functions and type aliases for most provider +// use cases. The actual schema-ready type and value type implementations are +// under the basetypes package. Embed those basetypes implementations to create +// custom types. +package types diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/float64_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/float64_type.go new file mode 100644 index 0000000000..717d99848e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/float64_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +var Float64Type = basetypes.Float64Type{} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/float64_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/float64_value.go new file mode 100644 index 0000000000..cd6bb6c672 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/float64_value.go @@ -0,0 +1,23 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +type Float64 = basetypes.Float64Value + +// Float64Null creates a Float64 with a null value. Determine whether the value is +// null via the Float64 type IsNull method. +func Float64Null() basetypes.Float64Value { + return basetypes.NewFloat64Null() +} + +// Float64Unknown creates a Float64 with an unknown value. Determine whether the +// value is unknown via the Float64 type IsUnknown method. +func Float64Unknown() basetypes.Float64Value { + return basetypes.NewFloat64Unknown() +} + +// Float64Value creates a Float64 with a known value. Access the value via the Float64 +// type ValueFloat64 method. +func Float64Value(value float64) basetypes.Float64Value { + return basetypes.NewFloat64Value(value) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/int64_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/int64_type.go new file mode 100644 index 0000000000..52f20876d4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/int64_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +var Int64Type = basetypes.Int64Type{} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/int64_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/int64_value.go new file mode 100644 index 0000000000..fdb146f523 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/int64_value.go @@ -0,0 +1,23 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +type Int64 = basetypes.Int64Value + +// Int64Null creates a Int64 with a null value. Determine whether the value is +// null via the Int64 type IsNull method. +func Int64Null() basetypes.Int64Value { + return basetypes.NewInt64Null() +} + +// Int64Unknown creates a Int64 with an unknown value. Determine whether the +// value is unknown via the Int64 type IsUnknown method. +func Int64Unknown() basetypes.Int64Value { + return basetypes.NewInt64Unknown() +} + +// Int64Value creates a Int64 with a known value. Access the value via the +// Int64 type ValueInt64 method. +func Int64Value(value int64) basetypes.Int64Value { + return basetypes.NewInt64Value(value) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/list_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/list_type.go new file mode 100644 index 0000000000..8cd6d3df7b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/list_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +type ListType = basetypes.ListType diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/list_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/list_value.go new file mode 100644 index 0000000000..012963e264 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/list_value.go @@ -0,0 +1,47 @@ +package types + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +type List = basetypes.ListValue + +// ListNull creates a List with a null value. Determine whether the value is +// null via the List type IsNull method. +func ListNull(elementType attr.Type) basetypes.ListValue { + return basetypes.NewListNull(elementType) +} + +// ListUnknown creates a List with an unknown value. Determine whether the +// value is unknown via the List type IsUnknown method. +func ListUnknown(elementType attr.Type) basetypes.ListValue { + return basetypes.NewListUnknown(elementType) +} + +// ListValue creates a List with a known value. Access the value via the List +// type Elements or ElementsAs methods. +func ListValue(elementType attr.Type, elements []attr.Value) (basetypes.ListValue, diag.Diagnostics) { + return basetypes.NewListValue(elementType, elements) +} + +// ListValueFrom creates a List with a known value, using reflection rules. +// The elements must be a slice which can convert into the given element type. +// Access the value via the List type Elements or ElementsAs methods. +func ListValueFrom(ctx context.Context, elementType attr.Type, elements any) (basetypes.ListValue, diag.Diagnostics) { + return basetypes.NewListValueFrom(ctx, elementType, elements) +} + +// ListValueMust creates a List with a known value, converting any diagnostics +// into a panic at runtime. Access the value via the List +// type Elements or ElementsAs methods. +// +// This creation function is only recommended to create List values which will +// not potentially affect practitioners, such as testing, or exhaustively +// tested provider logic. +func ListValueMust(elementType attr.Type, elements []attr.Value) basetypes.ListValue { + return basetypes.NewListValueMust(elementType, elements) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/map_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/map_type.go new file mode 100644 index 0000000000..20bcdc5879 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/map_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +type MapType = basetypes.MapType diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/map_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/map_value.go new file mode 100644 index 0000000000..444d79a5f7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/map_value.go @@ -0,0 +1,47 @@ +package types + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +type Map = basetypes.MapValue + +// MapNull creates a Map with a null value. Determine whether the value is +// null via the Map type IsNull method. +func MapNull(elementType attr.Type) basetypes.MapValue { + return basetypes.NewMapNull(elementType) +} + +// MapUnknown creates a Map with an unknown value. Determine whether the +// value is unknown via the Map type IsUnknown method. +func MapUnknown(elementType attr.Type) basetypes.MapValue { + return basetypes.NewMapUnknown(elementType) +} + +// MapValue creates a Map with a known value. Access the value via the Map +// type Elements or ElementsAs methods. +func MapValue(elementType attr.Type, elements map[string]attr.Value) (basetypes.MapValue, diag.Diagnostics) { + return basetypes.NewMapValue(elementType, elements) +} + +// MapValueFrom creates a Map with a known value, using reflection rules. +// The elements must be a map which can convert into the given element type. +// Access the value via the Map type Elements or ElementsAs methods. +func MapValueFrom(ctx context.Context, elementType attr.Type, elements any) (basetypes.MapValue, diag.Diagnostics) { + return basetypes.NewMapValueFrom(ctx, elementType, elements) +} + +// MapValueMust creates a Map with a known value, converting any diagnostics +// into a panic at runtime. Access the value via the Map +// type Elements or ElementsAs methods. +// +// This creation function is only recommended to create Map values which will +// not potentially affect practitioners, such as testing, or exhaustively +// tested provider logic. +func MapValueMust(elementType attr.Type, elements map[string]attr.Value) basetypes.MapValue { + return basetypes.NewMapValueMust(elementType, elements) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/number_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/number_type.go new file mode 100644 index 0000000000..e74c1e1acb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/number_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +var NumberType = basetypes.NumberType{} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/number_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/number_value.go new file mode 100644 index 0000000000..86e26bbdf2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/number_value.go @@ -0,0 +1,27 @@ +package types + +import ( + "math/big" + + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +type Number = basetypes.NumberValue + +// NumberNull creates a Number with a null value. Determine whether the value is +// null via the Number type IsNull method. +func NumberNull() basetypes.NumberValue { + return basetypes.NewNumberNull() +} + +// NumberUnknown creates a Number with an unknown value. Determine whether the +// value is unknown via the Number type IsUnknown method. +func NumberUnknown() basetypes.NumberValue { + return basetypes.NewNumberUnknown() +} + +// NumberValue creates a Number with a known value. Access the value via the Number +// type ValueBigFloat method. If the given value is nil, a null Number is created. +func NumberValue(value *big.Float) basetypes.NumberValue { + return basetypes.NewNumberValue(value) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/object_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/object_type.go new file mode 100644 index 0000000000..688594f9ae --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/object_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +type ObjectType = basetypes.ObjectType diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/object_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/object_value.go new file mode 100644 index 0000000000..453bf3fa9b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/object_value.go @@ -0,0 +1,47 @@ +package types + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +type Object = basetypes.ObjectValue + +// ObjectNull creates a Object with a null value. Determine whether the value is +// null via the Object type IsNull method. +func ObjectNull(attributeTypes map[string]attr.Type) basetypes.ObjectValue { + return basetypes.NewObjectNull(attributeTypes) +} + +// ObjectUnknown creates a Object with an unknown value. Determine whether the +// value is unknown via the Object type IsUnknown method. +func ObjectUnknown(attributeTypes map[string]attr.Type) basetypes.ObjectValue { + return basetypes.NewObjectUnknown(attributeTypes) +} + +// ObjectValue creates a Object with a known value. Access the value via the Object +// type Attributes or As methods. +func ObjectValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (basetypes.ObjectValue, diag.Diagnostics) { + return basetypes.NewObjectValue(attributeTypes, attributes) +} + +// ObjectValueFrom creates a Object with a known value, using reflection rules. +// The attributes must be a struct which can convert into the given attribute types. +// Access the value via the Object type Attributes or As methods. +func ObjectValueFrom(ctx context.Context, attributeTypes map[string]attr.Type, attributes any) (basetypes.ObjectValue, diag.Diagnostics) { + return basetypes.NewObjectValueFrom(ctx, attributeTypes, attributes) +} + +// ObjectValueMust creates a Object with a known value, converting any diagnostics +// into a panic at runtime. Access the value via the Object +// type Attributes or As methods. +// +// This creation function is only recommended to create Object values which will +// not potentially affect practitioners, such as testing, or exhaustively +// tested provider logic. +func ObjectValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) basetypes.ObjectValue { + return basetypes.NewObjectValueMust(attributeTypes, attributes) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/set_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/set_type.go new file mode 100644 index 0000000000..990b143140 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/set_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +type SetType = basetypes.SetType diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/set_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/set_value.go new file mode 100644 index 0000000000..31d5a62500 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/set_value.go @@ -0,0 +1,47 @@ +package types + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +type Set = basetypes.SetValue + +// SetNull creates a Set with a null value. Determine whether the value is +// null via the Set type IsNull method. +func SetNull(elementType attr.Type) basetypes.SetValue { + return basetypes.NewSetNull(elementType) +} + +// SetUnknown creates a Set with an unknown value. Determine whether the +// value is unknown via the Set type IsUnknown method. +func SetUnknown(elementType attr.Type) basetypes.SetValue { + return basetypes.NewSetUnknown(elementType) +} + +// SetValue creates a Set with a known value. Access the value via the Set +// type Elements or ElementsAs methods. +func SetValue(elementType attr.Type, elements []attr.Value) (basetypes.SetValue, diag.Diagnostics) { + return basetypes.NewSetValue(elementType, elements) +} + +// SetValueFrom creates a Set with a known value, using reflection rules. +// The elements must be a slice which can convert into the given element type. +// Access the value via the Set type Elements or ElementsAs methods. +func SetValueFrom(ctx context.Context, elementType attr.Type, elements any) (basetypes.SetValue, diag.Diagnostics) { + return basetypes.NewSetValueFrom(ctx, elementType, elements) +} + +// SetValueMust creates a Set with a known value, converting any diagnostics +// into a panic at runtime. Access the value via the Set +// type Elements or ElementsAs methods. +// +// This creation function is only recommended to create Set values which will +// not potentially affect practitioners, such as testing, or exhaustively +// tested provider logic. +func SetValueMust(elementType attr.Type, elements []attr.Value) basetypes.SetValue { + return basetypes.NewSetValueMust(elementType, elements) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/string_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/string_type.go new file mode 100644 index 0000000000..1f5c251991 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/string_type.go @@ -0,0 +1,5 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +var StringType = basetypes.StringType{} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/string_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/string_value.go new file mode 100644 index 0000000000..b7934199e6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-framework/types/string_value.go @@ -0,0 +1,23 @@ +package types + +import "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + +type String = basetypes.StringValue + +// StringNull creates a String with a null value. Determine whether the value is +// null via the String type IsNull method. +func StringNull() basetypes.StringValue { + return basetypes.NewStringNull() +} + +// StringUnknown creates a String with an unknown value. Determine whether the +// value is unknown via the String type IsUnknown method. +func StringUnknown() basetypes.StringValue { + return basetypes.NewStringUnknown() +} + +// StringValue creates a String with a known value. Access the value via the String +// type ValueString method. +func StringValue(value string) basetypes.StringValue { + return basetypes.NewStringValue(value) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go deleted file mode 100644 index 263a1ff576..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go +++ /dev/null @@ -1,173 +0,0 @@ -package acctest - -import ( - "bytes" - crand "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "math/big" - "math/rand" - "net" - "strings" - "time" - - "github.com/apparentlymart/go-cidr/cidr" - "golang.org/x/crypto/ssh" -) - -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} - -// Helpers for generating random tidbits for use in identifiers to prevent -// collisions in acceptance tests. - -// RandInt generates a random integer -func RandInt() int { - return rand.Int() -} - -// RandomWithPrefix is used to generate a unique name with a prefix, for -// randomizing names in acceptance tests -func RandomWithPrefix(name string) string { - return fmt.Sprintf("%s-%d", name, RandInt()) -} - -// RandIntRange returns a random integer between min (inclusive) and max (exclusive) -func RandIntRange(min int, max int) int { - return rand.Intn(max-min) + min -} - -// RandString generates a random alphanumeric string of the length specified -func RandString(strlen int) string { - return RandStringFromCharSet(strlen, CharSetAlphaNum) -} - -// RandStringFromCharSet generates a random string by selecting characters from -// the charset provided -func RandStringFromCharSet(strlen int, charSet string) string { - result := make([]byte, strlen) - for i := 0; i < strlen; i++ { - result[i] = charSet[RandIntRange(0, len(charSet))] - } - return string(result) -} - -// RandSSHKeyPair generates a public and private SSH key pair. The public key is -// returned in OpenSSH format, and the private key is PEM encoded. -func RandSSHKeyPair(comment string) (string, string, error) { - privateKey, privateKeyPEM, err := genPrivateKey() - if err != nil { - return "", "", err - } - - publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) - if err != nil { - return "", "", err - } - keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey))) - return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyPEM, nil -} - -// RandTLSCert generates a self-signed TLS certificate with a newly created -// private key, and returns both the cert and the private key PEM encoded. -func RandTLSCert(orgName string) (string, string, error) { - template := &x509.Certificate{ - SerialNumber: big.NewInt(int64(RandInt())), - Subject: pkix.Name{ - Organization: []string{orgName}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(24 * time.Hour), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - privateKey, privateKeyPEM, err := genPrivateKey() - if err != nil { - return "", "", err - } - - cert, err := x509.CreateCertificate(crand.Reader, template, template, &privateKey.PublicKey, privateKey) - if err != nil { - return "", "", err - } - - certPEM, err := pemEncode(cert, "CERTIFICATE") - if err != nil { - return "", "", err - } - - return certPEM, privateKeyPEM, nil -} - -// RandIpAddress returns a random IP address in the specified CIDR block. -// The prefix length must be less than 31. -func RandIpAddress(s string) (string, error) { - _, network, err := net.ParseCIDR(s) - if err != nil { - return "", err - } - - firstIp, lastIp := cidr.AddressRange(network) - first := &big.Int{} - first.SetBytes([]byte(firstIp)) - last := &big.Int{} - last.SetBytes([]byte(lastIp)) - r := &big.Int{} - r.Sub(last, first) - if bitLen := r.BitLen(); bitLen > 31 { - return "", fmt.Errorf("CIDR range is too large: %d", bitLen) - } - - max := int(r.Int64()) - if max == 0 { - // panic: invalid argument to Int31n - return firstIp.String(), nil - } - - host, err := cidr.Host(network, RandIntRange(0, max)) - if err != nil { - return "", err - } - - return host.String(), nil -} - -func genPrivateKey() (*rsa.PrivateKey, string, error) { - privateKey, err := rsa.GenerateKey(crand.Reader, 1024) - if err != nil { - return nil, "", err - } - - privateKeyPEM, err := pemEncode(x509.MarshalPKCS1PrivateKey(privateKey), "RSA PRIVATE KEY") - if err != nil { - return nil, "", err - } - - return privateKey, privateKeyPEM, nil -} - -func pemEncode(b []byte, block string) (string, error) { - var buf bytes.Buffer - pb := &pem.Block{Type: block, Bytes: b} - if err := pem.Encode(&buf, pb); err != nil { - return "", err - } - - return buf.String(), nil -} - -const ( - // CharSetAlphaNum is the alphanumeric character set for use with - // RandStringFromCharSet - CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789" - - // CharSetAlpha is the alphabetical character set for use with - // RandStringFromCharSet - CharSetAlpha = "abcdefghijklmnopqrstuvwxyz" -) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/.copywrite.hcl b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/.copywrite.hcl new file mode 100644 index 0000000000..932daa1fc5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/.copywrite.hcl @@ -0,0 +1,27 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2017 + + # (OPTIONAL) A list of globs that should not have copyright/license headers. + # Supports doublestar glob patterns for more flexibility in defining which + # files or folders should be ignored + header_ignore = [ + # Some ignores here are not strictly needed, but protects us if we change the types of files we put in those folders + # See here for file extensions altered by copywrite CLI (all other extensions are ignored) + # https://github.com/hashicorp/copywrite/blob/4af928579f5aa8f1dece9de1bb3098218903053d/addlicense/main.go#L357-L394 + ".release/**", + ".changelog/**", + "examples/**", + "scripts/**", + "google/test-fixtures/**", + "META.d/*.yml", + ".golangci.yml", + ".goreleaser.yml", + ] + + # (OPTIONAL) Links to an upstream repo for determining repo relationships + # This is for special cases and should not normally be set. + upstream = "GoogleCloudPlatform/magic-modules" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/.go-version b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/.go-version index ec6d649be6..91c48c058d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/.go-version +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/.go-version @@ -1 +1 @@ -1.18.1 +1.19.9 diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/CHANGELOG.md b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/CHANGELOG.md index 4fbe82f962..c315a65b69 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/CHANGELOG.md +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/CHANGELOG.md @@ -1,4 +1,423 @@ -## 4.59.0 (Unreleased) +## 4.75.0 (Unreleased) + +## 4.74.0 (July 18, 2023) + +FEATURES: +* **New Resource:** `google_cloudbuildv2_connection` ([#15098](https://github.com/hashicorp/terraform-provider-google/pull/15098)) +* **New Resource:** `google_cloudbuildv2_repository` ([#15098](https://github.com/hashicorp/terraform-provider-google/pull/15098)) +* **New Resource:** `google_gkeonprem_bare_metal_admin_cluster` ([#15099](https://github.com/hashicorp/terraform-provider-google/pull/15099)) +* **New Resource:** `google_network_security_address_group` ([#15111](https://github.com/hashicorp/terraform-provider-google/pull/15111)) +* **New Resource:** `google_network_security_gateway_security_policy_rule` ([#15112](https://github.com/hashicorp/terraform-provider-google/pull/15112)) +* **New Resource:** `google_network_security_gateway_security_policy` ([#15112](https://github.com/hashicorp/terraform-provider-google/pull/15112)) +* **New Resource:** `google_network_security_url_lists` ([#15112](https://github.com/hashicorp/terraform-provider-google/pull/15112)) +* **New Resource:** `google_network_services_gateway` ([#15112](https://github.com/hashicorp/terraform-provider-google/pull/15112)) + +IMPROVEMENTS: +* bigquery: added `storage_billing_model` argument to `google_bigquery_dataset` ([#15115](https://github.com/hashicorp/terraform-provider-google/pull/15115)) +* bigquery: added `external_data_configuration.metadata_cache_mode` and `external_data_configuration.object_metadata` to `google_bigquery_table` ([#15096](https://github.com/hashicorp/terraform-provider-google/pull/15096)) +* bigquery: made `external_data_configuration.source_fomat` optional in `google_bigquery_table` ([#15096](https://github.com/hashicorp/terraform-provider-google/pull/15096)) +* certificatemanager: added `issuance_config` field to `google_certificate_manager_certificate` resource ([#15101](https://github.com/hashicorp/terraform-provider-google/pull/15101)) +* cloudbuild: added `repository_event_config` field to `google_cloudbuild_trigger` resource ([#15098](https://github.com/hashicorp/terraform-provider-google/pull/15098)) +* compute: added field `http_keep_alive_timeout_sec` to resource `google_compute_target_http_proxy` ([#15109](https://github.com/hashicorp/terraform-provider-google/pull/15109)) +* compute: added field `http_keep_alive_timeout_sec` to resource `google_compute_target_https_proxy` ([#15109](https://github.com/hashicorp/terraform-provider-google/pull/15109)) +* compute: added support for updating labels in `google_compute_external_vpn_gateway` ([#15134](https://github.com/hashicorp/terraform-provider-google/pull/15134)) +* container: made `monitoring_config.enable_components` optional on `google_container_cluster` ([#15131](https://github.com/hashicorp/terraform-provider-google/pull/15131)) +* container: added field `tpu_topology` under `placement_policy` in resource `google_container_node_pool` ([#15130](https://github.com/hashicorp/terraform-provider-google/pull/15130)) +* gkehub: promoted the `google_gke_hub_feature` resource's `fleetobservability` block to GA. ([#15105](https://github.com/hashicorp/terraform-provider-google/pull/15105)) +* iamworkforcepool: added `oidc.client_secret` field to `google_iam_workforce_pool_provider` and new enum values `CODE` and `MERGE_ID_TOKEN_OVER_USER_INFO_CLAIMS` to `oidc.web_sso_config.response_type` and `oidc.web_sso_config.assertion_claims_behavior` respectively ([#15069](https://github.com/hashicorp/terraform-provider-google/pull/15069)) +* sql: added `settings.data_cache_config` to `sql_database_instance` resource. ([#15127](https://github.com/hashicorp/terraform-provider-google/pull/15127)) +* sql: added `settings.edition` field to `sql_database_instance` resource. ([#15127](https://github.com/hashicorp/terraform-provider-google/pull/15127)) +* vertexai: supported `shard_size` in `google_vertex_ai_index` ([#15133](https://github.com/hashicorp/terraform-provider-google/pull/15133)) + +BUG FIXES: +* compute: made `google_compute_router_peer.peer_ip_address` optional ([#15095](https://github.com/hashicorp/terraform-provider-google/pull/15095)) +* redis: fixed issue with `google_redis_instance` populating output-only field `maintenance_schedule`. ([#15063](https://github.com/hashicorp/terraform-provider-google/pull/15063)) +* orgpolicy: fixed forcing recreation on imported state for `google_org_policy_policy` ([#15132](https://github.com/hashicorp/terraform-provider-google/pull/15132)) +* osconfig: fixed validation of file resource `state` fields in `google_os_config_os_policy_assignment` ([#15107](https://github.com/hashicorp/terraform-provider-google/pull/15107)) + +## 4.73.2 (July 17, 2023) + +BUG FIXES: +* monitoring: fixed an issue which occurred when `name` field of `google_monitoring_monitored_project` was long-form + +## 4.73.1 (July 13, 2023) + +BUG FIXES: +* monitoring: fixed an issue causing `google_monitoring_monitored_project` to appear to be deleted + +## 4.73.0 (July 10, 2023) + +FEATURES: +* **New Resource:** `google_firebase_extensions_instance` ([#15013](https://github.com/hashicorp/terraform-provider-google/pull/15013)) + +IMPROVEMENTS: +* compute: added the `no_automate_dns_zone` field to `google_compute_forwarding_rule`. ([#15028](https://github.com/hashicorp/terraform-provider-google/pull/15028)) +* compute: promoted `google_compute_disk_async_replication` resource to GA. ([#15029](https://github.com/hashicorp/terraform-provider-google/pull/15029)) +* compute: promoted `async_primary_disk` field in `google_compute_disk` resource to GA. ([#15029](https://github.com/hashicorp/terraform-provider-google/pull/15029)) +* compute: promoted `async_primary_disk` field in `google_compute_region_disk` resource to GA. ([#15029](https://github.com/hashicorp/terraform-provider-google/pull/15029)) +* compute: promoted `disk_consistency_group_policy` field in `google_compute_resource_policy` resource to GA. ([#15029](https://github.com/hashicorp/terraform-provider-google/pull/15029)) +* resourcemanager: fixed handling of `google_service_account_id_token` when authenticated with GCE metadata credentials ([#15003](https://github.com/hashicorp/terraform-provider-google/pull/15003)) + +BUG FIXES: +* networkservices: increased default timeout for `google_network_services_edge_cache_keyset` to 90m ([#15024](https://github.com/hashicorp/terraform-provider-google/pull/15024)) + +## 4.72.1 (July 6, 2023) + +BUG FIXES: +* compute: fixed an issue in `google_compute_instance_template` where initialize params stopped the `disk.disk_size_gb` field being used ([#15054](https://github.com/hashicorp/terraform-provider-google/pull/15054)) + +## 4.72.0 (July 3, 2023) + +FEATURES: +* **New Resource:** `google_public_ca_external_account_key` ([#14983](https://github.com/hashicorp/terraform-provider-google/pull/14983)) + +IMPROVEMENTS: +* compute: added `provisioned_throughput` field to `google_compute_disk` used by `hyperdisk-throughput` pd type ([#14985](https://github.com/hashicorp/terraform-provider-google/pull/14985)) +* container: added field `security_posture_config` to resource `google_container_cluster` ([#14999](https://github.com/hashicorp/terraform-provider-google/pull/14999)) +* logging: added support for `locked` to `google_logging_project_bucket_config` ([#14977](https://github.com/hashicorp/terraform-provider-google/pull/14977)) + +BUG FIXES: +* bigquery: fixed an issue where api default value for `edition` field of `google_bigquery_reservation` was not handled ([#14961](https://github.com/hashicorp/terraform-provider-google/pull/14961)) +* cloudfunction2: fixed permadiffs of some fields of `service_config` in `google_cloudfunctions2_function` resource ([#14975](https://github.com/hashicorp/terraform-provider-google/pull/14975)) +* compute: fixed an issue with setting project field to long form in `google_compute_forwarding_rule` and `google_compute_global_forwarding_rule` ([#14996](https://github.com/hashicorp/terraform-provider-google/pull/14996)) +* gkehub: fixed an issue with setting project field to long form in `google_gke_hub_feature` ([#14996](https://github.com/hashicorp/terraform-provider-google/pull/14996)) + +## 4.71.0 (June 27, 2023) + +FEATURES: +* **New Resource:** `google_gke_hub_feature_iam_*` ([#14912](https://github.com/hashicorp/terraform-provider-google/pull/14912)) +* **New Resource:** `google_gke_hub_feature` ([#14912](https://github.com/hashicorp/terraform-provider-google/pull/14912)) +* **New Resource:** `google_vmwareengine_cluster` ([#14917](https://github.com/hashicorp/terraform-provider-google/pull/14917)) +* **New Resource:** `google_vmwareengine_private_cloud` ([#14917](https://github.com/hashicorp/terraform-provider-google/pull/14917)) + +IMPROVEMENTS: +* apigee: added output-only field `apigee_project_id` to resource `google_apigee_organization` ([#14911](https://github.com/hashicorp/terraform-provider-google/pull/14911)) +* bigtable: increased default timeout for instance operations to 1 hour in resoure `google_bigtable_instance` ([#14909](https://github.com/hashicorp/terraform-provider-google/pull/14909)) +* cloudrunv2: added fields `annotations` and `template.annotations` to resource `google_cloud_run_v2_job` ([#14948](https://github.com/hashicorp/terraform-provider-google/pull/14948)) +* composer: added field `resilience_mode` to resource `google_composer_environment` ([#14939](https://github.com/hashicorp/terraform-provider-google/pull/14939)) +* compute: added support for `params.resource_manager_tags` and `boot_disk.initialize_params.resource_manager_tags` to resource `google_compute_instance` ([#14924](https://github.com/hashicorp/terraform-provider-google/pull/14924)) +* bigquerydatatransfer: made field `service_account_name` mutable in resource `google_bigquery_data_transfer_config` ([#14907](https://github.com/hashicorp/terraform-provider-google/pull/14907)) +* iambeta: added field `jwks_json` to resource `google_iam_workload_identity_pool_provider` ([#14938](https://github.com/hashicorp/terraform-provider-google/pull/14938)) + +BUG FIXES: +* bigtable: validated that `cluster_id` values are unique within resource `google_bigtable_instance` ([#14908](https://github.com/hashicorp/terraform-provider-google/pull/14908)) +* storage: fixed a bug that caused a permadiff when the `autoclass.enabled` field was explicitly set to false in resource `google_storage_bucket` ([#14902](https://github.com/hashicorp/terraform-provider-google/pull/14902)) + +## 4.70.0 (June 20, 2023) + +FEATURES: +* **New Resource:** `google_compute_network_endpoints` ([#14869](https://github.com/hashicorp/terraform-provider-google/pull/14869)) +* **New Resource:** `vertex_ai_index_endpoint` ([#14842](https://github.com/hashicorp/terraform-provider-google/pull/14842)) + +IMPROVEMENTS: +* bigtable: added 20 minutes timeout support to `google_bigtable_gc_policy` ([#14861](https://github.com/hashicorp/terraform-provider-google/pull/14861)) +* cloudfunctions2: added `url` output field to `google_cloudfunctions2_function` ([#14851](https://github.com/hashicorp/terraform-provider-google/pull/14851)) +* compute: added field `network_attachment` to `google_compute_instance_template` ([#14874](https://github.com/hashicorp/terraform-provider-google/pull/14874)) +* compute: surfaced additional information about quota exceeded errors for compute resources. ([#14879](https://github.com/hashicorp/terraform-provider-google/pull/14879)) +* compute: added `path_template_match` and `path_template_rewrite` to `google_compute_url_map`. ([#14873](https://github.com/hashicorp/terraform-provider-google/pull/14873)) +* compute: added ability to update Hyperdisk PD IOPS without recreation to `google_compute_disk` ([#14844](https://github.com/hashicorp/terraform-provider-google/pull/14844)) +* container: added `sole_tenant_config` to `node_config` in `google_container_node_pool` and `google_container_cluster` ([#14897](https://github.com/hashicorp/terraform-provider-google/pull/14897)) +* dataform: added field `workspace_compilation_overrides` to resource `google_dataform_repository` (beta) ([#14839](https://github.com/hashicorp/terraform-provider-google/pull/14839)) +* dlp: added `crypto_hash_config` to `google_data_loss_prevention_deidentify_template` ([#14870](https://github.com/hashicorp/terraform-provider-google/pull/14870)) +* dlp: added `trigger_id` field to `google_data_loss_prevention_job_trigger` ([#14892](https://github.com/hashicorp/terraform-provider-google/pull/14892)) +* dlp: added missing file types `POWERPOINT` and `EXCEL` in `inspect_job.storage_config.cloud_storage_options.file_types` enum to `google_data_loss_prevention_job_trigger` resource ([#14856](https://github.com/hashicorp/terraform-provider-google/pull/14856)) +* dlp: added multiple `sensitivity_score` field to `google_data_loss_prevention_deidentify_template` resource ([#14880](https://github.com/hashicorp/terraform-provider-google/pull/14880)) +* dlp: added multiple `sensitivity_score` field to `google_data_loss_prevention_inspect_template` resource ([#14871](https://github.com/hashicorp/terraform-provider-google/pull/14871)) +* dlp: added multiple `sensitivity_score` field to `google_data_loss_prevention_job_trigger` resource ([#14881](https://github.com/hashicorp/terraform-provider-google/pull/14881)) +* dlp: changed `inspect_template_name` field from required to optional in `google_data_loss_prevention_job_trigger` resource ([#14845](https://github.com/hashicorp/terraform-provider-google/pull/14845)) +* pubsub: allowed `definition` field of `google_pubsub_schema` updatable. (https://cloud.google.com/pubsub/docs/schemas#commit-schema-revision) ([#14857](https://github.com/hashicorp/terraform-provider-google/pull/14857)) +* sql: added `POSTGRES_15` to version docs for `database_version` field to `google_sql_database_instance` ([#14891](https://github.com/hashicorp/terraform-provider-google/pull/14891)) +* vpcaccess: added `connected_projects` field to resource `google_vpc_access_connector`. ([#14835](https://github.com/hashicorp/terraform-provider-google/pull/14835)) + +BUG FIXES: +* provider: fixed an issue on multiple resources where non-retryable quota errors were considered retryable ([#14850](https://github.com/hashicorp/terraform-provider-google/pull/14850)) +* vertexai: made `google_vertex_ai_featurestore_entitytype_feature` always use region corresponding to parent's region ([#14843](https://github.com/hashicorp/terraform-provider-google/pull/14843)) + +## 4.69.1 (June 12, 2023) + +NOTE: +* Added a new user guide to the provider documentation ([#14886](https://github.com/hashicorp/terraform-provider-google/pull/14886)) + +## 4.69.0 (June 12, 2023) + +FEATURES: +* **New Data Source:** `google_vmwareengine_network` ([#14821](https://github.com/hashicorp/terraform-provider-google/pull/14821)) +* **New Resource:** `google_access_context_manager_service_perimeter_egress_policy` ([#14817](https://github.com/hashicorp/terraform-provider-google/pull/14817)) +* **New Resource:** `google_access_context_manager_service_perimeter_ingress_policy` ([#14817](https://github.com/hashicorp/terraform-provider-google/pull/14817)) +* **New Resource:** `google_certificate_manager_certificate_issuance_config` ([#14798](https://github.com/hashicorp/terraform-provider-google/pull/14798)) +* **New Resource:** `google_dataplex_datascan` ([#14798](https://github.com/hashicorp/terraform-provider-google/pull/14798)) +* **New Resource:** `google_dataplex_datascan_iam_*` ([#14828](https://github.com/hashicorp/terraform-provider-google/pull/14828)) +* **New Resource:** `google_vmwareengine_network` ([#14821](https://github.com/hashicorp/terraform-provider-google/pull/14821)) + +IMPROVEMENTS: +* billing: added `lookup_projects` to `google_billing_account` datasource that skips reading the list of associated projects ([#14815](https://github.com/hashicorp/terraform-provider-google/pull/14815)) +* dlp: added `info_type_transformations` block in the `record_transformations` field to `google_data_loss_prevention_deidentify_template` resource. ([#14827](https://github.com/hashicorp/terraform-provider-google/pull/14827)) +* dlp: added `redact_config`, `fixed_size_bucketing_config`, `bucketing_config`, `time_part_config` and `date_shift_config` fields to `google_data_loss_prevention_deidentify_template` resource ([#14797](https://github.com/hashicorp/terraform-provider-google/pull/14797)) +* dlp: added `stored_info_type_id` field to `google_data_loss_prevention_stored_info_type` resource ([#14791](https://github.com/hashicorp/terraform-provider-google/pull/14791)) +* dlp: added `template_id` field to `google_data_loss_prevention_deidentify_template` and `google_data_loss_prevention_inspect_template` ([#14823](https://github.com/hashicorp/terraform-provider-google/pull/14823)) +* dlp: changed `actions` field from required to optional in `google_data_loss_prevention_job_trigger` resource ([#14803](https://github.com/hashicorp/terraform-provider-google/pull/14803)) +* kms: removed validation for `purpose` in `google_kms_crypto_key` to allow newly added values for the field ([#14799](https://github.com/hashicorp/terraform-provider-google/pull/14799)) +* pubsub: allowed `schema_settings` of `google_pubsub_topic` to change without deleting and recreating the resource ([#14819](https://github.com/hashicorp/terraform-provider-google/pull/14819)) + +BUG FIXES: +* tags: fixed providing `projects/ "/24" - // We should only compare the mask portion for this case. - if len(new) > 0 && new[0] == '/' { - oldNetmaskStartPos := strings.LastIndex(old, "/") - - if oldNetmaskStartPos != -1 { - oldNetmask := old[strings.LastIndex(old, "/"):] - if oldNetmask == new { - return true - } - } - } - - return false -} - -// sha256DiffSuppress -// if old is the hex-encoded sha256 sum of new, treat them as equal -func sha256DiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - return hex.EncodeToString(sha256.New().Sum([]byte(old))) == new -} - -func caseDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - return strings.ToUpper(old) == strings.ToUpper(new) -} - -// Port range '80' and '80-80' is equivalent. -// `old` is read from the server and always has the full range format (e.g. '80-80', '1024-2048'). -// `new` can be either a single port or a port range. -func portRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return old == new+"-"+new -} - -// Single-digit hour is equivalent to hour with leading zero e.g. suppress diff 1:00 => 01:00. -// Assume either value could be in either format. -func rfc3339TimeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { - return true - } - return false -} - -// Suppress diffs for blocks where one version is completely unset and the other is set -// to an empty block. This might occur in situations where removing a block completely -// is impossible (if it's computed or part of an AtLeastOneOf), so instead the user sets -// its values to empty. -// NOTE: Using Optional + Computed is *strongly* preferred to this DSF, as it's -// more well understood and resilient to API changes. -func emptyOrUnsetBlockDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - o, n := d.GetChange(strings.TrimSuffix(k, ".#")) - return emptyOrUnsetBlockDiffSuppressLogic(k, old, new, o, n) -} - -// The core logic for emptyOrUnsetBlockDiffSuppress, in a format that is more conducive -// to unit testing. -func emptyOrUnsetBlockDiffSuppressLogic(k, old, new string, o, n interface{}) bool { - if !strings.HasSuffix(k, ".#") { - return false - } - var l []interface{} - if old == "0" && new == "1" { - l = n.([]interface{}) - } else if new == "0" && old == "1" { - l = o.([]interface{}) - } else { - // we don't have one set and one unset, so don't suppress the diff - return false - } - - contents, ok := l[0].(map[string]interface{}) - if !ok { - return false - } - for _, v := range contents { - if !isEmptyValue(reflect.ValueOf(v)) { - return false - } - } - return true -} - -// Suppress diffs for values that are equivalent except for their use of the words "location" -// compared to "region" or "zone" -func locationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return locationDiffSuppressHelper(old, new) || locationDiffSuppressHelper(new, old) -} - -func locationDiffSuppressHelper(a, b string) bool { - return strings.Replace(a, "/locations/", "/regions/", 1) == b || - strings.Replace(a, "/locations/", "/zones/", 1) == b -} - -// For managed SSL certs, if new is an absolute FQDN (trailing '.') but old isn't, treat them as equals. -func absoluteDomainSuppress(k, old, new string, _ *schema.ResourceData) bool { - if strings.HasPrefix(k, "managed.0.domains.") { - return old == strings.TrimRight(new, ".") || new == strings.TrimRight(old, ".") - } - return false -} - -func timestampDiffSuppress(format string) schema.SchemaDiffSuppressFunc { - return func(_, old, new string, _ *schema.ResourceData) bool { - oldT, err := time.Parse(format, old) - if err != nil { - return false - } - - newT, err := time.Parse(format, new) - if err != nil { - return false - } - - return oldT == newT - } -} - -// suppress diff when saved is Ipv4 format while new is required a reference -// this happens for an internal ip for Private Services Connect -func internalIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - return (net.ParseIP(old) != nil) && (net.ParseIP(new) == nil) -} - -// Suppress diffs for duration format. ex "60.0s" and "60s" same -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration -func durationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - oDuration, err := time.ParseDuration(old) - if err != nil { - return false - } - nDuration, err := time.ParseDuration(new) - if err != nil { - return false - } - return oDuration == nDuration -} - -// Use this method when the field accepts either an IP address or a -// self_link referencing a resource (such as google_compute_route's -// next_hop_ilb) -func compareIpAddressOrSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { - // if we can parse `new` as an IP address, then compare as strings - if net.ParseIP(new) != nil { - return new == old - } - - // otherwise compare as self links - return compareSelfLinkOrResourceName("", old, new, nil) -} - -// Use this method when subnet is optioanl and auto_create_subnetworks = true -// API sometimes choose a subnet so the diff needs to be ignored -func compareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { - if isEmptyValue(reflect.ValueOf(new)) { - return true - } - // otherwise compare as self links - return compareSelfLinkOrResourceName("", old, new, nil) -} - -// Suppress diffs in below cases -// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" -// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" -func lastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - if last := len(new) - 1; last >= 0 && new[last] == '/' { - new = new[:last] - } - - if last := len(old) - 1; last >= 0 && old[last] == '/' { - old = old[:last] - } - return new == old -} - -// Suppress diffs when the value read from api -// has the project number instead of the project name -func projectNumberDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - var a2, b2 string - reN := regexp.MustCompile("projects/\\d+") - re := regexp.MustCompile("projects/[^/]+") - replacement := []byte("projects/equal") - a2 = string(reN.ReplaceAll([]byte(old), replacement)) - b2 = string(re.ReplaceAll([]byte(new), replacement)) - return a2 == b2 -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/composer_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/composer_operation.go deleted file mode 100644 index 65f516a1cd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/composer_operation.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "google.golang.org/api/composer/v1" -) - -type ComposerOperationWaiter struct { - Service *composer.ProjectsLocationsService - CommonOperationWaiter -} - -func (w *ComposerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - return w.Service.Operations.Get(w.Op.Name).Do() -} - -func ComposerOperationWaitTime(config *Config, op *composer.Operation, project, activity, userAgent string, timeout time.Duration) error { - w := &ComposerOperationWaiter{ - Service: config.NewComposerClient(userAgent).Projects.Locations, - } - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/config.go deleted file mode 100644 index 168778872f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/config.go +++ /dev/null @@ -1,1360 +0,0 @@ -package google - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "log" - "net/http" - "regexp" - "strconv" - "strings" - "time" - - grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" - "github.com/sirupsen/logrus" - "google.golang.org/api/option" - - "golang.org/x/oauth2" - googleoauth "golang.org/x/oauth2/google" - appengine "google.golang.org/api/appengine/v1" - "google.golang.org/api/bigquery/v2" - "google.golang.org/api/bigtableadmin/v2" - "google.golang.org/api/cloudbilling/v1" - "google.golang.org/api/cloudbuild/v1" - "google.golang.org/api/cloudfunctions/v1" - "google.golang.org/api/cloudidentity/v1" - "google.golang.org/api/cloudiot/v1" - "google.golang.org/api/cloudkms/v1" - "google.golang.org/api/cloudresourcemanager/v1" - resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" - "google.golang.org/api/composer/v1" - "google.golang.org/api/compute/v1" - "google.golang.org/api/container/v1" - dataflow "google.golang.org/api/dataflow/v1b3" - "google.golang.org/api/dataproc/v1" - "google.golang.org/api/dns/v1" - healthcare "google.golang.org/api/healthcare/v1" - "google.golang.org/api/iam/v1" - iamcredentials "google.golang.org/api/iamcredentials/v1" - cloudlogging "google.golang.org/api/logging/v2" - "google.golang.org/api/pubsub/v1" - runadminv2 "google.golang.org/api/run/v2" - "google.golang.org/api/servicemanagement/v1" - "google.golang.org/api/servicenetworking/v1" - "google.golang.org/api/serviceusage/v1" - "google.golang.org/api/sourcerepo/v1" - "google.golang.org/api/spanner/v1" - sqladmin "google.golang.org/api/sqladmin/v1beta4" - "google.golang.org/api/storage/v1" - "google.golang.org/api/storagetransfer/v1" - "google.golang.org/api/transport" - "google.golang.org/grpc" -) - -type providerMeta struct { - ModuleName string `cty:"module_name"` -} - -type Formatter struct { - TimestampFormat string - LogFormat string -} - -// Borrowed logic from https://github.com/sirupsen/logrus/blob/master/json_formatter.go and https://github.com/t-tomalak/logrus-easy-formatter/blob/master/formatter.go -func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { - // Suppress logs if TF_LOG is not DEBUG or TRACE - if !logging.IsDebugOrHigher() { - return nil, nil - } - - // Also suppress based on log content - // - frequent transport spam - // - ListenSocket logs from gRPC - isTransportSpam := strings.Contains(entry.Message, "transport is closing") - listenSocketRegex := regexp.MustCompile(`\[Server #\d+( ListenSocket #\d+)*\]`) // Match patterns like `[Server #00]` or `[Server #00 ListenSocket #00]` - isListenSocketLog := listenSocketRegex.MatchString(entry.Message) - if isTransportSpam || isListenSocketLog { - return nil, nil - } - - output := f.LogFormat - entry.Level = logrus.DebugLevel // Force Entries to be Debug - - timestampFormat := f.TimestampFormat - - output = strings.Replace(output, "%time%", entry.Time.Format(timestampFormat), 1) - - output = strings.Replace(output, "%msg%", entry.Message, 1) - - level := strings.ToUpper(entry.Level.String()) - output = strings.Replace(output, "%lvl%", level, 1) - - var gRPCMessageFlag bool - for k, val := range entry.Data { - switch v := val.(type) { - case string: - output = strings.Replace(output, "%"+k+"%", v, 1) - case int: - s := strconv.Itoa(v) - output = strings.Replace(output, "%"+k+"%", s, 1) - case bool: - s := strconv.FormatBool(v) - output = strings.Replace(output, "%"+k+"%", s, 1) - } - - if k != "system" { - gRPCMessageFlag = true - } - } - - if gRPCMessageFlag { - data := make(logrus.Fields, len(entry.Data)+4) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - encoder := json.NewEncoder(b) - encoder.SetIndent("", " ") - if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) - } - - finalOutput := append([]byte(output), b.Bytes()...) - return finalOutput, nil - } - - return []byte(output), nil -} - -// Config is the configuration structure used to instantiate the Google -// provider. -type Config struct { - DCLConfig - AccessToken string - Credentials string - ImpersonateServiceAccount string - ImpersonateServiceAccountDelegates []string - Project string - Region string - BillingProject string - Zone string - Scopes []string - BatchingConfig *batchingConfig - UserProjectOverride bool - RequestReason string - RequestTimeout time.Duration - // PollInterval is passed to resource.StateChangeConf in common_operation.go - // It controls the interval at which we poll for successful operations - PollInterval time.Duration - - Client *http.Client - context context.Context - UserAgent string - gRPCLoggingOptions []option.ClientOption - - tokenSource oauth2.TokenSource - - AccessApprovalBasePath string - AccessContextManagerBasePath string - ActiveDirectoryBasePath string - AlloydbBasePath string - ApigeeBasePath string - AppEngineBasePath string - ArtifactRegistryBasePath string - BeyondcorpBasePath string - BigQueryBasePath string - BigqueryAnalyticsHubBasePath string - BigqueryConnectionBasePath string - BigqueryDatapolicyBasePath string - BigqueryDataTransferBasePath string - BigqueryReservationBasePath string - BigtableBasePath string - BillingBasePath string - BinaryAuthorizationBasePath string - CertificateManagerBasePath string - CloudAssetBasePath string - CloudBuildBasePath string - CloudFunctionsBasePath string - Cloudfunctions2BasePath string - CloudIdentityBasePath string - CloudIdsBasePath string - CloudIotBasePath string - CloudRunBasePath string - CloudRunV2BasePath string - CloudSchedulerBasePath string - CloudTasksBasePath string - ComputeBasePath string - ContainerAnalysisBasePath string - ContainerAttachedBasePath string - DataCatalogBasePath string - DataFusionBasePath string - DataLossPreventionBasePath string - DataplexBasePath string - DataprocBasePath string - DataprocMetastoreBasePath string - DatastoreBasePath string - DatastreamBasePath string - DeploymentManagerBasePath string - DialogflowBasePath string - DialogflowCXBasePath string - DNSBasePath string - DocumentAIBasePath string - EssentialContactsBasePath string - FilestoreBasePath string - FirestoreBasePath string - GameServicesBasePath string - GKEBackupBasePath string - GKEHubBasePath string - HealthcareBasePath string - IAM2BasePath string - IAMBetaBasePath string - IAMWorkforcePoolBasePath string - IapBasePath string - IdentityPlatformBasePath string - KMSBasePath string - LoggingBasePath string - MemcacheBasePath string - MLEngineBasePath string - MonitoringBasePath string - NetworkManagementBasePath string - NetworkServicesBasePath string - NotebooksBasePath string - OSConfigBasePath string - OSLoginBasePath string - PrivatecaBasePath string - PubsubBasePath string - PubsubLiteBasePath string - RedisBasePath string - ResourceManagerBasePath string - SecretManagerBasePath string - SecurityCenterBasePath string - ServiceManagementBasePath string - ServiceUsageBasePath string - SourceRepoBasePath string - SpannerBasePath string - SQLBasePath string - StorageBasePath string - StorageTransferBasePath string - TagsBasePath string - TPUBasePath string - VertexAIBasePath string - VPCAccessBasePath string - WorkflowsBasePath string - - CloudBillingBasePath string - ComposerBasePath string - ContainerBasePath string - DataflowBasePath string - IamCredentialsBasePath string - ResourceManagerV3BasePath string - IAMBasePath string - CloudIoTBasePath string - ServiceNetworkingBasePath string - BigtableAdminBasePath string - TagsLocationBasePath string - - // dcl - ContainerAwsBasePath string - ContainerAzureBasePath string - - RequestBatcherServiceUsage *RequestBatcher - requestBatcherIam *RequestBatcher -} - -const AccessApprovalBasePathKey = "AccessApproval" -const AccessContextManagerBasePathKey = "AccessContextManager" -const ActiveDirectoryBasePathKey = "ActiveDirectory" -const AlloydbBasePathKey = "Alloydb" -const ApigeeBasePathKey = "Apigee" -const AppEngineBasePathKey = "AppEngine" -const ArtifactRegistryBasePathKey = "ArtifactRegistry" -const BeyondcorpBasePathKey = "Beyondcorp" -const BigQueryBasePathKey = "BigQuery" -const BigqueryAnalyticsHubBasePathKey = "BigqueryAnalyticsHub" -const BigqueryConnectionBasePathKey = "BigqueryConnection" -const BigqueryDatapolicyBasePathKey = "BigqueryDatapolicy" -const BigqueryDataTransferBasePathKey = "BigqueryDataTransfer" -const BigqueryReservationBasePathKey = "BigqueryReservation" -const BigtableBasePathKey = "Bigtable" -const BillingBasePathKey = "Billing" -const BinaryAuthorizationBasePathKey = "BinaryAuthorization" -const CertificateManagerBasePathKey = "CertificateManager" -const CloudAssetBasePathKey = "CloudAsset" -const CloudBuildBasePathKey = "CloudBuild" -const CloudFunctionsBasePathKey = "CloudFunctions" -const Cloudfunctions2BasePathKey = "Cloudfunctions2" -const CloudIdentityBasePathKey = "CloudIdentity" -const CloudIdsBasePathKey = "CloudIds" -const CloudIotBasePathKey = "CloudIot" -const CloudRunBasePathKey = "CloudRun" -const CloudRunV2BasePathKey = "CloudRunV2" -const CloudSchedulerBasePathKey = "CloudScheduler" -const CloudTasksBasePathKey = "CloudTasks" -const ComputeBasePathKey = "Compute" -const ContainerAnalysisBasePathKey = "ContainerAnalysis" -const ContainerAttachedBasePathKey = "ContainerAttached" -const DataCatalogBasePathKey = "DataCatalog" -const DataFusionBasePathKey = "DataFusion" -const DataLossPreventionBasePathKey = "DataLossPrevention" -const DataplexBasePathKey = "Dataplex" -const DataprocBasePathKey = "Dataproc" -const DataprocMetastoreBasePathKey = "DataprocMetastore" -const DatastoreBasePathKey = "Datastore" -const DatastreamBasePathKey = "Datastream" -const DeploymentManagerBasePathKey = "DeploymentManager" -const DialogflowBasePathKey = "Dialogflow" -const DialogflowCXBasePathKey = "DialogflowCX" -const DNSBasePathKey = "DNS" -const DocumentAIBasePathKey = "DocumentAI" -const EssentialContactsBasePathKey = "EssentialContacts" -const FilestoreBasePathKey = "Filestore" -const FirestoreBasePathKey = "Firestore" -const GameServicesBasePathKey = "GameServices" -const GKEBackupBasePathKey = "GKEBackup" -const GKEHubBasePathKey = "GKEHub" -const HealthcareBasePathKey = "Healthcare" -const IAM2BasePathKey = "IAM2" -const IAMBetaBasePathKey = "IAMBeta" -const IAMWorkforcePoolBasePathKey = "IAMWorkforcePool" -const IapBasePathKey = "Iap" -const IdentityPlatformBasePathKey = "IdentityPlatform" -const KMSBasePathKey = "KMS" -const LoggingBasePathKey = "Logging" -const MemcacheBasePathKey = "Memcache" -const MLEngineBasePathKey = "MLEngine" -const MonitoringBasePathKey = "Monitoring" -const NetworkManagementBasePathKey = "NetworkManagement" -const NetworkServicesBasePathKey = "NetworkServices" -const NotebooksBasePathKey = "Notebooks" -const OSConfigBasePathKey = "OSConfig" -const OSLoginBasePathKey = "OSLogin" -const PrivatecaBasePathKey = "Privateca" -const PubsubBasePathKey = "Pubsub" -const PubsubLiteBasePathKey = "PubsubLite" -const RedisBasePathKey = "Redis" -const ResourceManagerBasePathKey = "ResourceManager" -const SecretManagerBasePathKey = "SecretManager" -const SecurityCenterBasePathKey = "SecurityCenter" -const ServiceManagementBasePathKey = "ServiceManagement" -const ServiceUsageBasePathKey = "ServiceUsage" -const SourceRepoBasePathKey = "SourceRepo" -const SpannerBasePathKey = "Spanner" -const SQLBasePathKey = "SQL" -const StorageBasePathKey = "Storage" -const StorageTransferBasePathKey = "StorageTransfer" -const TagsBasePathKey = "Tags" -const TPUBasePathKey = "TPU" -const VertexAIBasePathKey = "VertexAI" -const VPCAccessBasePathKey = "VPCAccess" -const WorkflowsBasePathKey = "Workflows" -const CloudBillingBasePathKey = "CloudBilling" -const ComposerBasePathKey = "Composer" -const ContainerBasePathKey = "Container" -const DataflowBasePathKey = "Dataflow" -const IAMBasePathKey = "IAM" -const IamCredentialsBasePathKey = "IamCredentials" -const ResourceManagerV3BasePathKey = "ResourceManagerV3" -const ServiceNetworkingBasePathKey = "ServiceNetworking" -const BigtableAdminBasePathKey = "BigtableAdmin" -const ContainerAwsBasePathKey = "ContainerAws" -const ContainerAzureBasePathKey = "ContainerAzure" -const TagsLocationBasePathKey = "TagsLocation" - -// Generated product base paths -var DefaultBasePaths = map[string]string{ - AccessApprovalBasePathKey: "https://accessapproval.googleapis.com/v1/", - AccessContextManagerBasePathKey: "https://accesscontextmanager.googleapis.com/v1/", - ActiveDirectoryBasePathKey: "https://managedidentities.googleapis.com/v1/", - AlloydbBasePathKey: "https://alloydb.googleapis.com/v1/", - ApigeeBasePathKey: "https://apigee.googleapis.com/v1/", - AppEngineBasePathKey: "https://appengine.googleapis.com/v1/", - ArtifactRegistryBasePathKey: "https://artifactregistry.googleapis.com/v1/", - BeyondcorpBasePathKey: "https://beyondcorp.googleapis.com/v1/", - BigQueryBasePathKey: "https://bigquery.googleapis.com/bigquery/v2/", - BigqueryAnalyticsHubBasePathKey: "https://analyticshub.googleapis.com/v1/", - BigqueryConnectionBasePathKey: "https://bigqueryconnection.googleapis.com/v1/", - BigqueryDatapolicyBasePathKey: "https://bigquerydatapolicy.googleapis.com/v1/", - BigqueryDataTransferBasePathKey: "https://bigquerydatatransfer.googleapis.com/v1/", - BigqueryReservationBasePathKey: "https://bigqueryreservation.googleapis.com/v1/", - BigtableBasePathKey: "https://bigtableadmin.googleapis.com/v2/", - BillingBasePathKey: "https://billingbudgets.googleapis.com/v1/", - BinaryAuthorizationBasePathKey: "https://binaryauthorization.googleapis.com/v1/", - CertificateManagerBasePathKey: "https://certificatemanager.googleapis.com/v1/", - CloudAssetBasePathKey: "https://cloudasset.googleapis.com/v1/", - CloudBuildBasePathKey: "https://cloudbuild.googleapis.com/v1/", - CloudFunctionsBasePathKey: "https://cloudfunctions.googleapis.com/v1/", - Cloudfunctions2BasePathKey: "https://cloudfunctions.googleapis.com/v2/", - CloudIdentityBasePathKey: "https://cloudidentity.googleapis.com/v1/", - CloudIdsBasePathKey: "https://ids.googleapis.com/v1/", - CloudIotBasePathKey: "https://cloudiot.googleapis.com/v1/", - CloudRunBasePathKey: "https://{{location}}-run.googleapis.com/", - CloudRunV2BasePathKey: "https://run.googleapis.com/v2/", - CloudSchedulerBasePathKey: "https://cloudscheduler.googleapis.com/v1/", - CloudTasksBasePathKey: "https://cloudtasks.googleapis.com/v2/", - ComputeBasePathKey: "https://compute.googleapis.com/compute/v1/", - ContainerAnalysisBasePathKey: "https://containeranalysis.googleapis.com/v1/", - ContainerAttachedBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", - DataCatalogBasePathKey: "https://datacatalog.googleapis.com/v1/", - DataFusionBasePathKey: "https://datafusion.googleapis.com/v1/", - DataLossPreventionBasePathKey: "https://dlp.googleapis.com/v2/", - DataplexBasePathKey: "https://dataplex.googleapis.com/v1/", - DataprocBasePathKey: "https://dataproc.googleapis.com/v1/", - DataprocMetastoreBasePathKey: "https://metastore.googleapis.com/v1/", - DatastoreBasePathKey: "https://datastore.googleapis.com/v1/", - DatastreamBasePathKey: "https://datastream.googleapis.com/v1/", - DeploymentManagerBasePathKey: "https://www.googleapis.com/deploymentmanager/v2/", - DialogflowBasePathKey: "https://dialogflow.googleapis.com/v2/", - DialogflowCXBasePathKey: "https://{{location}}-dialogflow.googleapis.com/v3/", - DNSBasePathKey: "https://dns.googleapis.com/dns/v1/", - DocumentAIBasePathKey: "https://{{location}}-documentai.googleapis.com/v1/", - EssentialContactsBasePathKey: "https://essentialcontacts.googleapis.com/v1/", - FilestoreBasePathKey: "https://file.googleapis.com/v1/", - FirestoreBasePathKey: "https://firestore.googleapis.com/v1/", - GameServicesBasePathKey: "https://gameservices.googleapis.com/v1/", - GKEBackupBasePathKey: "https://gkebackup.googleapis.com/v1/", - GKEHubBasePathKey: "https://gkehub.googleapis.com/v1/", - HealthcareBasePathKey: "https://healthcare.googleapis.com/v1/", - IAM2BasePathKey: "https://iam.googleapis.com/v2/", - IAMBetaBasePathKey: "https://iam.googleapis.com/v1/", - IAMWorkforcePoolBasePathKey: "https://iam.googleapis.com/v1/", - IapBasePathKey: "https://iap.googleapis.com/v1/", - IdentityPlatformBasePathKey: "https://identitytoolkit.googleapis.com/v2/", - KMSBasePathKey: "https://cloudkms.googleapis.com/v1/", - LoggingBasePathKey: "https://logging.googleapis.com/v2/", - MemcacheBasePathKey: "https://memcache.googleapis.com/v1/", - MLEngineBasePathKey: "https://ml.googleapis.com/v1/", - MonitoringBasePathKey: "https://monitoring.googleapis.com/", - NetworkManagementBasePathKey: "https://networkmanagement.googleapis.com/v1/", - NetworkServicesBasePathKey: "https://networkservices.googleapis.com/v1/", - NotebooksBasePathKey: "https://notebooks.googleapis.com/v1/", - OSConfigBasePathKey: "https://osconfig.googleapis.com/v1/", - OSLoginBasePathKey: "https://oslogin.googleapis.com/v1/", - PrivatecaBasePathKey: "https://privateca.googleapis.com/v1/", - PubsubBasePathKey: "https://pubsub.googleapis.com/v1/", - PubsubLiteBasePathKey: "https://{{region}}-pubsublite.googleapis.com/v1/admin/", - RedisBasePathKey: "https://redis.googleapis.com/v1/", - ResourceManagerBasePathKey: "https://cloudresourcemanager.googleapis.com/v1/", - SecretManagerBasePathKey: "https://secretmanager.googleapis.com/v1/", - SecurityCenterBasePathKey: "https://securitycenter.googleapis.com/v1/", - ServiceManagementBasePathKey: "https://servicemanagement.googleapis.com/v1/", - ServiceUsageBasePathKey: "https://serviceusage.googleapis.com/v1/", - SourceRepoBasePathKey: "https://sourcerepo.googleapis.com/v1/", - SpannerBasePathKey: "https://spanner.googleapis.com/v1/", - SQLBasePathKey: "https://sqladmin.googleapis.com/sql/v1beta4/", - StorageBasePathKey: "https://storage.googleapis.com/storage/v1/", - StorageTransferBasePathKey: "https://storagetransfer.googleapis.com/v1/", - TagsBasePathKey: "https://cloudresourcemanager.googleapis.com/v3/", - TPUBasePathKey: "https://tpu.googleapis.com/v1/", - VertexAIBasePathKey: "https://{{region}}-aiplatform.googleapis.com/v1/", - VPCAccessBasePathKey: "https://vpcaccess.googleapis.com/v1/", - WorkflowsBasePathKey: "https://workflows.googleapis.com/v1/", - CloudBillingBasePathKey: "https://cloudbilling.googleapis.com/v1/", - ComposerBasePathKey: "https://composer.googleapis.com/v1/", - ContainerBasePathKey: "https://container.googleapis.com/v1/", - DataflowBasePathKey: "https://dataflow.googleapis.com/v1b3/", - IAMBasePathKey: "https://iam.googleapis.com/v1/", - IamCredentialsBasePathKey: "https://iamcredentials.googleapis.com/v1/", - ResourceManagerV3BasePathKey: "https://cloudresourcemanager.googleapis.com/v3/", - ServiceNetworkingBasePathKey: "https://servicenetworking.googleapis.com/v1/", - BigtableAdminBasePathKey: "https://bigtableadmin.googleapis.com/v2/", - ContainerAwsBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", - ContainerAzureBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", - TagsLocationBasePathKey: "https://{{location}}-cloudresourcemanager.googleapis.com/v3/", -} - -var DefaultClientScopes = []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/userinfo.email", -} - -func (c *Config) LoadAndValidate(ctx context.Context) error { - if len(c.Scopes) == 0 { - c.Scopes = DefaultClientScopes - } - - c.context = ctx - - tokenSource, err := c.getTokenSource(c.Scopes, false) - if err != nil { - return err - } - - c.tokenSource = tokenSource - - cleanCtx := context.WithValue(ctx, oauth2.HTTPClient, cleanhttp.DefaultClient()) - - // 1. MTLS TRANSPORT/CLIENT - sets up proper auth headers - client, _, err := transport.NewHTTPClient(cleanCtx, option.WithTokenSource(tokenSource)) - if err != nil { - return err - } - - // Userinfo is fetched before request logging is enabled to reduce additional noise. - err = c.logGoogleIdentities() - if err != nil { - return err - } - - // 2. Logging Transport - ensure we log HTTP requests to GCP APIs. - loggingTransport := logging.NewTransport("Google", client.Transport) - - // 3. Retry Transport - retries common temporary errors - // Keep order for wrapping logging so we log each retried request as well. - // This value should be used if needed to create shallow copies with additional retry predicates. - // See ClientWithAdditionalRetries - retryTransport := NewTransportWithDefaultRetries(loggingTransport) - - // 4. Header Transport - outer wrapper to inject additional headers we want to apply - // before making requests - headerTransport := newTransportWithHeaders(retryTransport) - if c.RequestReason != "" { - headerTransport.Set("X-Goog-Request-Reason", c.RequestReason) - } - - // Ensure $userProject is set for all HTTP requests using the client if specified by the provider config - // See https://cloud.google.com/apis/docs/system-parameters - if c.UserProjectOverride && c.BillingProject != "" { - headerTransport.Set("X-Goog-User-Project", c.BillingProject) - } - - // Set final transport value. - client.Transport = headerTransport - - // This timeout is a timeout per HTTP request, not per logical operation. - client.Timeout = c.synchronousTimeout() - - c.Client = client - c.context = ctx - c.Region = GetRegionFromRegionSelfLink(c.Region) - c.RequestBatcherServiceUsage = NewRequestBatcher("Service Usage", ctx, c.BatchingConfig) - c.requestBatcherIam = NewRequestBatcher("IAM", ctx, c.BatchingConfig) - c.PollInterval = 10 * time.Second - - // gRPC Logging setup - logger := logrus.StandardLogger() - - logrus.SetLevel(logrus.DebugLevel) - logrus.SetFormatter(&Formatter{ - TimestampFormat: "2006/01/02 15:04:05", - LogFormat: "%time% [%lvl%] %msg% \n", - }) - - alwaysLoggingDeciderClient := func(ctx context.Context, fullMethodName string) bool { return true } - grpc_logrus.ReplaceGrpcLogger(logrus.NewEntry(logger)) - - c.gRPCLoggingOptions = append( - c.gRPCLoggingOptions, option.WithGRPCDialOption(grpc.WithUnaryInterceptor( - grpc_logrus.PayloadUnaryClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), - option.WithGRPCDialOption(grpc.WithStreamInterceptor( - grpc_logrus.PayloadStreamClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), - ) - - return nil -} - -func ExpandProviderBatchingConfig(v interface{}) (*batchingConfig, error) { - config := &batchingConfig{ - SendAfter: time.Second * DefaultBatchSendIntervalSec, - EnableBatching: true, - } - - if v == nil { - return config, nil - } - ls := v.([]interface{}) - if len(ls) == 0 || ls[0] == nil { - return config, nil - } - - cfgV := ls[0].(map[string]interface{}) - if sendAfterV, ok := cfgV["send_after"]; ok { - SendAfter, err := time.ParseDuration(sendAfterV.(string)) - if err != nil { - return nil, fmt.Errorf("unable to parse duration from 'send_after' value %q", sendAfterV) - } - config.SendAfter = SendAfter - } - - if enable, ok := cfgV["enable_batching"]; ok { - config.EnableBatching = enable.(bool) - } - - return config, nil -} - -func (c *Config) synchronousTimeout() time.Duration { - if c.RequestTimeout == 0 { - return 120 * time.Second - } - return c.RequestTimeout -} - -// Print Identities executing terraform API Calls. -func (c *Config) logGoogleIdentities() error { - if c.ImpersonateServiceAccount == "" { - - tokenSource, err := c.getTokenSource(c.Scopes, true) - if err != nil { - return err - } - c.Client = oauth2.NewClient(c.context, tokenSource) // c.Client isn't initialised fully when this code is called. - - email, err := GetCurrentUserEmail(c, c.UserAgent) - if err != nil { - log.Printf("[INFO] error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) - } - - log.Printf("[INFO] Terraform is using this identity: %s", email) - - return nil - - } - - // Drop Impersonated ClientOption from OAuth2 TokenSource to infer original identity - - tokenSource, err := c.getTokenSource(c.Scopes, true) - if err != nil { - return err - } - c.Client = oauth2.NewClient(c.context, tokenSource) // c.Client isn't initialised fully when this code is called. - - email, err := GetCurrentUserEmail(c, c.UserAgent) - if err != nil { - log.Printf("[INFO] error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) - } - - log.Printf("[INFO] Terraform is configured with service account impersonation, original identity: %s, impersonated identity: %s", email, c.ImpersonateServiceAccount) - - // Add the Impersonated ClientOption back in to the OAuth2 TokenSource - - tokenSource, err = c.getTokenSource(c.Scopes, false) - if err != nil { - return err - } - c.Client = oauth2.NewClient(c.context, tokenSource) // c.Client isn't initialised fully when this code is called. - - return nil -} - -// Get a TokenSource based on the Google Credentials configured. -// If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds. -func (c *Config) getTokenSource(clientScopes []string, initialCredentialsOnly bool) (oauth2.TokenSource, error) { - creds, err := c.GetCredentials(clientScopes, initialCredentialsOnly) - if err != nil { - return nil, fmt.Errorf("%s", err) - } - return creds.TokenSource, nil -} - -// Methods to create new services from config -// Some base paths below need the version and possibly more of the path -// set on them. The client libraries are inconsistent about which values they need; -// while most only want the host URL, some older ones also want the version and some -// of those "projects" as well. You can find out if this is required by looking at -// the basePath value in the client library file. -func (c *Config) NewComputeClient(userAgent string) *compute.Service { - log.Printf("[INFO] Instantiating GCE client for path %s", c.ComputeBasePath) - clientCompute, err := compute.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client compute: %s", err) - return nil - } - clientCompute.UserAgent = userAgent - clientCompute.BasePath = c.ComputeBasePath - - return clientCompute -} - -func (c *Config) NewContainerClient(userAgent string) *container.Service { - containerClientBasePath := RemoveBasePathVersion(c.ContainerBasePath) - log.Printf("[INFO] Instantiating GKE client for path %s", containerClientBasePath) - clientContainer, err := container.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client container: %s", err) - return nil - } - clientContainer.UserAgent = userAgent - clientContainer.BasePath = containerClientBasePath - - return clientContainer -} - -func (c *Config) NewDnsClient(userAgent string) *dns.Service { - dnsClientBasePath := RemoveBasePathVersion(c.DNSBasePath) - dnsClientBasePath = strings.ReplaceAll(dnsClientBasePath, "/dns/", "") - log.Printf("[INFO] Instantiating Google Cloud DNS client for path %s", dnsClientBasePath) - clientDns, err := dns.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client dns: %s", err) - return nil - } - clientDns.UserAgent = userAgent - clientDns.BasePath = dnsClientBasePath - - return clientDns -} - -func (c *Config) NewKmsClientWithCtx(ctx context.Context, userAgent string) *cloudkms.Service { - kmsClientBasePath := RemoveBasePathVersion(c.KMSBasePath) - log.Printf("[INFO] Instantiating Google Cloud KMS client for path %s", kmsClientBasePath) - clientKms, err := cloudkms.NewService(ctx, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client kms: %s", err) - return nil - } - clientKms.UserAgent = userAgent - clientKms.BasePath = kmsClientBasePath - - return clientKms -} - -func (c *Config) NewKmsClient(userAgent string) *cloudkms.Service { - return c.NewKmsClientWithCtx(c.context, userAgent) -} - -func (c *Config) NewLoggingClient(userAgent string) *cloudlogging.Service { - loggingClientBasePath := RemoveBasePathVersion(c.LoggingBasePath) - log.Printf("[INFO] Instantiating Google Stackdriver Logging client for path %s", loggingClientBasePath) - clientLogging, err := cloudlogging.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client logging: %s", err) - return nil - } - clientLogging.UserAgent = userAgent - clientLogging.BasePath = loggingClientBasePath - - return clientLogging -} - -func (c *Config) NewStorageClient(userAgent string) *storage.Service { - storageClientBasePath := c.StorageBasePath - log.Printf("[INFO] Instantiating Google Storage client for path %s", storageClientBasePath) - clientStorage, err := storage.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client storage: %s", err) - return nil - } - clientStorage.UserAgent = userAgent - clientStorage.BasePath = storageClientBasePath - - return clientStorage -} - -// For object uploads, we need to override the specific timeout because they are long, synchronous operations. -func (c *Config) NewStorageClientWithTimeoutOverride(userAgent string, timeout time.Duration) *storage.Service { - storageClientBasePath := c.StorageBasePath - log.Printf("[INFO] Instantiating Google Storage client for path %s", storageClientBasePath) - // Copy the existing HTTP client (which has no unexported fields [as of Oct 2021 at least], so this is safe). - // We have to do this because otherwise we will accidentally change the timeout for all other - // synchronous operations, which would not be desirable. - httpClient := &http.Client{ - Transport: c.Client.Transport, - CheckRedirect: c.Client.CheckRedirect, - Jar: c.Client.Jar, - Timeout: timeout, - } - clientStorage, err := storage.NewService(c.context, option.WithHTTPClient(httpClient)) - if err != nil { - log.Printf("[WARN] Error creating client storage: %s", err) - return nil - } - clientStorage.UserAgent = userAgent - clientStorage.BasePath = storageClientBasePath - - return clientStorage -} - -func (c *Config) NewSqlAdminClient(userAgent string) *sqladmin.Service { - sqlClientBasePath := RemoveBasePathVersion(RemoveBasePathVersion(c.SQLBasePath)) - log.Printf("[INFO] Instantiating Google SqlAdmin client for path %s", sqlClientBasePath) - clientSqlAdmin, err := sqladmin.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client storage: %s", err) - return nil - } - clientSqlAdmin.UserAgent = userAgent - clientSqlAdmin.BasePath = sqlClientBasePath - - return clientSqlAdmin -} - -func (c *Config) NewPubsubClient(userAgent string) *pubsub.Service { - pubsubClientBasePath := RemoveBasePathVersion(c.PubsubBasePath) - log.Printf("[INFO] Instantiating Google Pubsub client for path %s", pubsubClientBasePath) - wrappedPubsubClient := ClientWithAdditionalRetries(c.Client, pubsubTopicProjectNotReady) - clientPubsub, err := pubsub.NewService(c.context, option.WithHTTPClient(wrappedPubsubClient)) - if err != nil { - log.Printf("[WARN] Error creating client pubsub: %s", err) - return nil - } - clientPubsub.UserAgent = userAgent - clientPubsub.BasePath = pubsubClientBasePath - - return clientPubsub -} - -func (c *Config) NewDataflowClient(userAgent string) *dataflow.Service { - dataflowClientBasePath := RemoveBasePathVersion(c.DataflowBasePath) - log.Printf("[INFO] Instantiating Google Dataflow client for path %s", dataflowClientBasePath) - clientDataflow, err := dataflow.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client dataflow: %s", err) - return nil - } - clientDataflow.UserAgent = userAgent - clientDataflow.BasePath = dataflowClientBasePath - - return clientDataflow -} - -func (c *Config) NewResourceManagerClient(userAgent string) *cloudresourcemanager.Service { - resourceManagerBasePath := RemoveBasePathVersion(c.ResourceManagerBasePath) - log.Printf("[INFO] Instantiating Google Cloud ResourceManager client for path %s", resourceManagerBasePath) - clientResourceManager, err := cloudresourcemanager.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client resource manager: %s", err) - return nil - } - clientResourceManager.UserAgent = userAgent - clientResourceManager.BasePath = resourceManagerBasePath - - return clientResourceManager -} - -func (c *Config) NewResourceManagerV3Client(userAgent string) *resourceManagerV3.Service { - resourceManagerV3BasePath := RemoveBasePathVersion(c.ResourceManagerV3BasePath) - log.Printf("[INFO] Instantiating Google Cloud ResourceManager V3 client for path %s", resourceManagerV3BasePath) - clientResourceManagerV3, err := resourceManagerV3.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client resource manager v3: %s", err) - return nil - } - clientResourceManagerV3.UserAgent = userAgent - clientResourceManagerV3.BasePath = resourceManagerV3BasePath - - return clientResourceManagerV3 -} - -func (c *Config) NewIamClient(userAgent string) *iam.Service { - iamClientBasePath := RemoveBasePathVersion(c.IAMBasePath) - log.Printf("[INFO] Instantiating Google Cloud IAM client for path %s", iamClientBasePath) - clientIAM, err := iam.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client iam: %s", err) - return nil - } - clientIAM.UserAgent = userAgent - clientIAM.BasePath = iamClientBasePath - - return clientIAM -} - -func (c *Config) NewIamCredentialsClient(userAgent string) *iamcredentials.Service { - iamCredentialsClientBasePath := RemoveBasePathVersion(c.IamCredentialsBasePath) - log.Printf("[INFO] Instantiating Google Cloud IAMCredentials client for path %s", iamCredentialsClientBasePath) - clientIamCredentials, err := iamcredentials.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client iam credentials: %s", err) - return nil - } - clientIamCredentials.UserAgent = userAgent - clientIamCredentials.BasePath = iamCredentialsClientBasePath - - return clientIamCredentials -} - -func (c *Config) NewServiceManClient(userAgent string) *servicemanagement.APIService { - serviceManagementClientBasePath := RemoveBasePathVersion(c.ServiceManagementBasePath) - log.Printf("[INFO] Instantiating Google Cloud Service Management client for path %s", serviceManagementClientBasePath) - clientServiceMan, err := servicemanagement.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client service management: %s", err) - return nil - } - clientServiceMan.UserAgent = userAgent - clientServiceMan.BasePath = serviceManagementClientBasePath - - return clientServiceMan -} - -func (c *Config) NewServiceUsageClient(userAgent string) *serviceusage.Service { - serviceUsageClientBasePath := RemoveBasePathVersion(c.ServiceUsageBasePath) - log.Printf("[INFO] Instantiating Google Cloud Service Usage client for path %s", serviceUsageClientBasePath) - clientServiceUsage, err := serviceusage.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client service usage: %s", err) - return nil - } - clientServiceUsage.UserAgent = userAgent - clientServiceUsage.BasePath = serviceUsageClientBasePath - - return clientServiceUsage -} - -func (c *Config) NewBillingClient(userAgent string) *cloudbilling.APIService { - cloudBillingClientBasePath := RemoveBasePathVersion(c.CloudBillingBasePath) - log.Printf("[INFO] Instantiating Google Cloud Billing client for path %s", cloudBillingClientBasePath) - clientBilling, err := cloudbilling.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client billing: %s", err) - return nil - } - clientBilling.UserAgent = userAgent - clientBilling.BasePath = cloudBillingClientBasePath - - return clientBilling -} - -func (c *Config) NewBuildClient(userAgent string) *cloudbuild.Service { - cloudBuildClientBasePath := RemoveBasePathVersion(c.CloudBuildBasePath) - log.Printf("[INFO] Instantiating Google Cloud Build client for path %s", cloudBuildClientBasePath) - clientBuild, err := cloudbuild.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client build: %s", err) - return nil - } - clientBuild.UserAgent = userAgent - clientBuild.BasePath = cloudBuildClientBasePath - - return clientBuild -} - -func (c *Config) NewCloudFunctionsClient(userAgent string) *cloudfunctions.Service { - cloudFunctionsClientBasePath := RemoveBasePathVersion(c.CloudFunctionsBasePath) - log.Printf("[INFO] Instantiating Google Cloud CloudFunctions Client for path %s", cloudFunctionsClientBasePath) - clientCloudFunctions, err := cloudfunctions.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client cloud functions: %s", err) - return nil - } - clientCloudFunctions.UserAgent = userAgent - clientCloudFunctions.BasePath = cloudFunctionsClientBasePath - - return clientCloudFunctions -} - -func (c *Config) NewSourceRepoClient(userAgent string) *sourcerepo.Service { - sourceRepoClientBasePath := RemoveBasePathVersion(c.SourceRepoBasePath) - log.Printf("[INFO] Instantiating Google Cloud Source Repo client for path %s", sourceRepoClientBasePath) - clientSourceRepo, err := sourcerepo.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client source repo: %s", err) - return nil - } - clientSourceRepo.UserAgent = userAgent - clientSourceRepo.BasePath = sourceRepoClientBasePath - - return clientSourceRepo -} - -func (c *Config) NewBigQueryClient(userAgent string) *bigquery.Service { - bigQueryClientBasePath := c.BigQueryBasePath - log.Printf("[INFO] Instantiating Google Cloud BigQuery client for path %s", bigQueryClientBasePath) - wrappedBigQueryClient := ClientWithAdditionalRetries(c.Client, iamMemberMissing) - clientBigQuery, err := bigquery.NewService(c.context, option.WithHTTPClient(wrappedBigQueryClient)) - if err != nil { - log.Printf("[WARN] Error creating client big query: %s", err) - return nil - } - clientBigQuery.UserAgent = userAgent - clientBigQuery.BasePath = bigQueryClientBasePath - - return clientBigQuery -} - -func (c *Config) NewSpannerClient(userAgent string) *spanner.Service { - spannerClientBasePath := RemoveBasePathVersion(c.SpannerBasePath) - log.Printf("[INFO] Instantiating Google Cloud Spanner client for path %s", spannerClientBasePath) - clientSpanner, err := spanner.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client source repo: %s", err) - return nil - } - clientSpanner.UserAgent = userAgent - clientSpanner.BasePath = spannerClientBasePath - - return clientSpanner -} - -func (c *Config) NewDataprocClient(userAgent string) *dataproc.Service { - dataprocClientBasePath := RemoveBasePathVersion(c.DataprocBasePath) - log.Printf("[INFO] Instantiating Google Cloud Dataproc client for path %s", dataprocClientBasePath) - clientDataproc, err := dataproc.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client dataproc: %s", err) - return nil - } - clientDataproc.UserAgent = userAgent - clientDataproc.BasePath = dataprocClientBasePath - - return clientDataproc -} - -func (c *Config) NewCloudIoTClient(userAgent string) *cloudiot.Service { - cloudIoTClientBasePath := RemoveBasePathVersion(c.CloudIoTBasePath) - log.Printf("[INFO] Instantiating Google Cloud IoT Core client for path %s", cloudIoTClientBasePath) - clientCloudIoT, err := cloudiot.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client cloud iot: %s", err) - return nil - } - clientCloudIoT.UserAgent = userAgent - clientCloudIoT.BasePath = cloudIoTClientBasePath - - return clientCloudIoT -} - -func (c *Config) NewAppEngineClient(userAgent string) *appengine.APIService { - appEngineClientBasePath := RemoveBasePathVersion(c.AppEngineBasePath) - log.Printf("[INFO] Instantiating App Engine client for path %s", appEngineClientBasePath) - clientAppEngine, err := appengine.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client appengine: %s", err) - return nil - } - clientAppEngine.UserAgent = userAgent - clientAppEngine.BasePath = appEngineClientBasePath - - return clientAppEngine -} - -func (c *Config) NewComposerClient(userAgent string) *composer.Service { - composerClientBasePath := RemoveBasePathVersion(c.ComposerBasePath) - log.Printf("[INFO] Instantiating Cloud Composer client for path %s", composerClientBasePath) - clientComposer, err := composer.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client composer: %s", err) - return nil - } - clientComposer.UserAgent = userAgent - clientComposer.BasePath = composerClientBasePath - - return clientComposer -} - -func (c *Config) NewServiceNetworkingClient(userAgent string) *servicenetworking.APIService { - serviceNetworkingClientBasePath := RemoveBasePathVersion(c.ServiceNetworkingBasePath) - log.Printf("[INFO] Instantiating Service Networking client for path %s", serviceNetworkingClientBasePath) - clientServiceNetworking, err := servicenetworking.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client service networking: %s", err) - return nil - } - clientServiceNetworking.UserAgent = userAgent - clientServiceNetworking.BasePath = serviceNetworkingClientBasePath - - return clientServiceNetworking -} - -func (c *Config) NewStorageTransferClient(userAgent string) *storagetransfer.Service { - storageTransferClientBasePath := RemoveBasePathVersion(c.StorageTransferBasePath) - log.Printf("[INFO] Instantiating Google Cloud Storage Transfer client for path %s", storageTransferClientBasePath) - clientStorageTransfer, err := storagetransfer.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client storage transfer: %s", err) - return nil - } - clientStorageTransfer.UserAgent = userAgent - clientStorageTransfer.BasePath = storageTransferClientBasePath - - return clientStorageTransfer -} - -func (c *Config) NewHealthcareClient(userAgent string) *healthcare.Service { - healthcareClientBasePath := RemoveBasePathVersion(c.HealthcareBasePath) - log.Printf("[INFO] Instantiating Google Cloud Healthcare client for path %s", healthcareClientBasePath) - clientHealthcare, err := healthcare.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client healthcare: %s", err) - return nil - } - clientHealthcare.UserAgent = userAgent - clientHealthcare.BasePath = healthcareClientBasePath - - return clientHealthcare -} - -func (c *Config) NewCloudIdentityClient(userAgent string) *cloudidentity.Service { - cloudidentityClientBasePath := RemoveBasePathVersion(c.CloudIdentityBasePath) - log.Printf("[INFO] Instantiating Google Cloud CloudIdentity client for path %s", cloudidentityClientBasePath) - clientCloudIdentity, err := cloudidentity.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client cloud identity: %s", err) - return nil - } - clientCloudIdentity.UserAgent = userAgent - clientCloudIdentity.BasePath = cloudidentityClientBasePath - - return clientCloudIdentity -} - -func (c *Config) BigTableClientFactory(userAgent string) *BigtableClientFactory { - bigtableClientFactory := &BigtableClientFactory{ - UserAgent: userAgent, - TokenSource: c.tokenSource, - gRPCLoggingOptions: c.gRPCLoggingOptions, - BillingProject: c.BillingProject, - UserProjectOverride: c.UserProjectOverride, - } - - return bigtableClientFactory -} - -// Unlike other clients, the Bigtable Admin client doesn't use a single -// service. Instead, there are several distinct services created off -// the base service object. To imitate most other handwritten clients, -// we expose those directly instead of providing the `Service` object -// as a factory. -func (c *Config) NewBigTableProjectsInstancesClient(userAgent string) *bigtableadmin.ProjectsInstancesService { - bigtableAdminBasePath := RemoveBasePathVersion(c.BigtableAdminBasePath) - log.Printf("[INFO] Instantiating Google Cloud BigtableAdmin for path %s", bigtableAdminBasePath) - clientBigtable, err := bigtableadmin.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client big table projects instances: %s", err) - return nil - } - clientBigtable.UserAgent = userAgent - clientBigtable.BasePath = bigtableAdminBasePath - clientBigtableProjectsInstances := bigtableadmin.NewProjectsInstancesService(clientBigtable) - - return clientBigtableProjectsInstances -} - -func (c *Config) NewBigTableProjectsInstancesTablesClient(userAgent string) *bigtableadmin.ProjectsInstancesTablesService { - bigtableAdminBasePath := RemoveBasePathVersion(c.BigtableAdminBasePath) - log.Printf("[INFO] Instantiating Google Cloud BigtableAdmin for path %s", bigtableAdminBasePath) - clientBigtable, err := bigtableadmin.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client projects instances tables: %s", err) - return nil - } - clientBigtable.UserAgent = userAgent - clientBigtable.BasePath = bigtableAdminBasePath - clientBigtableProjectsInstancesTables := bigtableadmin.NewProjectsInstancesTablesService(clientBigtable) - - return clientBigtableProjectsInstancesTables -} - -func (c *Config) NewCloudRunV2Client(userAgent string) *runadminv2.Service { - runAdminV2ClientBasePath := RemoveBasePathVersion(RemoveBasePathVersion(c.CloudRunV2BasePath)) - log.Printf("[INFO] Instantiating Google Cloud Run Admin v2 client for path %s", runAdminV2ClientBasePath) - clientRunAdminV2, err := runadminv2.NewService(c.context, option.WithHTTPClient(c.Client)) - if err != nil { - log.Printf("[WARN] Error creating client run admin: %s", err) - return nil - } - clientRunAdminV2.UserAgent = userAgent - clientRunAdminV2.BasePath = runAdminV2ClientBasePath - - return clientRunAdminV2 -} - -// staticTokenSource is used to be able to identify static token sources without reflection. -type staticTokenSource struct { - oauth2.TokenSource -} - -// Get a set of credentials with a given scope (clientScopes) based on the Config object. -// If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds -// instead. -func (c *Config) GetCredentials(clientScopes []string, initialCredentialsOnly bool) (googleoauth.Credentials, error) { - if c.AccessToken != "" { - contents, _, err := pathOrContents(c.AccessToken) - if err != nil { - return googleoauth.Credentials{}, fmt.Errorf("Error loading access token: %s", err) - } - - token := &oauth2.Token{AccessToken: contents} - if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { - opts := []option.ClientOption{option.WithTokenSource(oauth2.StaticTokenSource(token)), option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), option.WithScopes(clientScopes...)} - creds, err := transport.Creds(context.TODO(), opts...) - if err != nil { - return googleoauth.Credentials{}, err - } - return *creds, nil - } - - log.Printf("[INFO] Authenticating using configured Google JSON 'access_token'...") - log.Printf("[INFO] -- Scopes: %s", clientScopes) - return googleoauth.Credentials{ - TokenSource: staticTokenSource{oauth2.StaticTokenSource(token)}, - }, nil - } - - if c.Credentials != "" { - contents, _, err := pathOrContents(c.Credentials) - if err != nil { - return googleoauth.Credentials{}, fmt.Errorf("error loading credentials: %s", err) - } - - if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { - opts := []option.ClientOption{option.WithCredentialsJSON([]byte(contents)), option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), option.WithScopes(clientScopes...)} - creds, err := transport.Creds(context.TODO(), opts...) - if err != nil { - return googleoauth.Credentials{}, err - } - return *creds, nil - } - - creds, err := googleoauth.CredentialsFromJSON(c.context, []byte(contents), clientScopes...) - if err != nil { - return googleoauth.Credentials{}, fmt.Errorf("unable to parse credentials from '%s': %s", contents, err) - } - - log.Printf("[INFO] Authenticating using configured Google JSON 'credentials'...") - log.Printf("[INFO] -- Scopes: %s", clientScopes) - return *creds, nil - } - - if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { - opts := option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...) - creds, err := transport.Creds(context.TODO(), opts, option.WithScopes(clientScopes...)) - if err != nil { - return googleoauth.Credentials{}, err - } - - return *creds, nil - } - - log.Printf("[INFO] Authenticating using DefaultClient...") - log.Printf("[INFO] -- Scopes: %s", clientScopes) - defaultTS, err := googleoauth.DefaultTokenSource(context.Background(), clientScopes...) - if err != nil { - return googleoauth.Credentials{}, fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) - } - - return googleoauth.Credentials{ - TokenSource: defaultTS, - }, err -} - -// Remove the `/{{version}}/` from a base path if present. -func RemoveBasePathVersion(url string) string { - re := regexp.MustCompile(`(?Phttp[s]://.*)(?P/[^/]+?/$)`) - return re.ReplaceAllString(url, "$1/") -} - -// For a consumer of config.go that isn't a full fledged provider and doesn't -// have its own endpoint mechanism such as sweepers, init {{service}}BasePath -// values to a default. After using this, you should call config.LoadAndValidate. -func ConfigureBasePaths(c *Config) { - // Generated Products - c.AccessApprovalBasePath = DefaultBasePaths[AccessApprovalBasePathKey] - c.AccessContextManagerBasePath = DefaultBasePaths[AccessContextManagerBasePathKey] - c.ActiveDirectoryBasePath = DefaultBasePaths[ActiveDirectoryBasePathKey] - c.AlloydbBasePath = DefaultBasePaths[AlloydbBasePathKey] - c.ApigeeBasePath = DefaultBasePaths[ApigeeBasePathKey] - c.AppEngineBasePath = DefaultBasePaths[AppEngineBasePathKey] - c.ArtifactRegistryBasePath = DefaultBasePaths[ArtifactRegistryBasePathKey] - c.BeyondcorpBasePath = DefaultBasePaths[BeyondcorpBasePathKey] - c.BigQueryBasePath = DefaultBasePaths[BigQueryBasePathKey] - c.BigqueryAnalyticsHubBasePath = DefaultBasePaths[BigqueryAnalyticsHubBasePathKey] - c.BigqueryConnectionBasePath = DefaultBasePaths[BigqueryConnectionBasePathKey] - c.BigqueryDatapolicyBasePath = DefaultBasePaths[BigqueryDatapolicyBasePathKey] - c.BigqueryDataTransferBasePath = DefaultBasePaths[BigqueryDataTransferBasePathKey] - c.BigqueryReservationBasePath = DefaultBasePaths[BigqueryReservationBasePathKey] - c.BigtableBasePath = DefaultBasePaths[BigtableBasePathKey] - c.BillingBasePath = DefaultBasePaths[BillingBasePathKey] - c.BinaryAuthorizationBasePath = DefaultBasePaths[BinaryAuthorizationBasePathKey] - c.CertificateManagerBasePath = DefaultBasePaths[CertificateManagerBasePathKey] - c.CloudAssetBasePath = DefaultBasePaths[CloudAssetBasePathKey] - c.CloudBuildBasePath = DefaultBasePaths[CloudBuildBasePathKey] - c.CloudFunctionsBasePath = DefaultBasePaths[CloudFunctionsBasePathKey] - c.Cloudfunctions2BasePath = DefaultBasePaths[Cloudfunctions2BasePathKey] - c.CloudIdentityBasePath = DefaultBasePaths[CloudIdentityBasePathKey] - c.CloudIdsBasePath = DefaultBasePaths[CloudIdsBasePathKey] - c.CloudIotBasePath = DefaultBasePaths[CloudIotBasePathKey] - c.CloudRunBasePath = DefaultBasePaths[CloudRunBasePathKey] - c.CloudRunV2BasePath = DefaultBasePaths[CloudRunV2BasePathKey] - c.CloudSchedulerBasePath = DefaultBasePaths[CloudSchedulerBasePathKey] - c.CloudTasksBasePath = DefaultBasePaths[CloudTasksBasePathKey] - c.ComputeBasePath = DefaultBasePaths[ComputeBasePathKey] - c.ContainerAnalysisBasePath = DefaultBasePaths[ContainerAnalysisBasePathKey] - c.ContainerAttachedBasePath = DefaultBasePaths[ContainerAttachedBasePathKey] - c.DataCatalogBasePath = DefaultBasePaths[DataCatalogBasePathKey] - c.DataFusionBasePath = DefaultBasePaths[DataFusionBasePathKey] - c.DataLossPreventionBasePath = DefaultBasePaths[DataLossPreventionBasePathKey] - c.DataplexBasePath = DefaultBasePaths[DataplexBasePathKey] - c.DataprocBasePath = DefaultBasePaths[DataprocBasePathKey] - c.DataprocMetastoreBasePath = DefaultBasePaths[DataprocMetastoreBasePathKey] - c.DatastoreBasePath = DefaultBasePaths[DatastoreBasePathKey] - c.DatastreamBasePath = DefaultBasePaths[DatastreamBasePathKey] - c.DeploymentManagerBasePath = DefaultBasePaths[DeploymentManagerBasePathKey] - c.DialogflowBasePath = DefaultBasePaths[DialogflowBasePathKey] - c.DialogflowCXBasePath = DefaultBasePaths[DialogflowCXBasePathKey] - c.DNSBasePath = DefaultBasePaths[DNSBasePathKey] - c.DocumentAIBasePath = DefaultBasePaths[DocumentAIBasePathKey] - c.EssentialContactsBasePath = DefaultBasePaths[EssentialContactsBasePathKey] - c.FilestoreBasePath = DefaultBasePaths[FilestoreBasePathKey] - c.FirestoreBasePath = DefaultBasePaths[FirestoreBasePathKey] - c.GameServicesBasePath = DefaultBasePaths[GameServicesBasePathKey] - c.GKEBackupBasePath = DefaultBasePaths[GKEBackupBasePathKey] - c.GKEHubBasePath = DefaultBasePaths[GKEHubBasePathKey] - c.HealthcareBasePath = DefaultBasePaths[HealthcareBasePathKey] - c.IAM2BasePath = DefaultBasePaths[IAM2BasePathKey] - c.IAMBetaBasePath = DefaultBasePaths[IAMBetaBasePathKey] - c.IAMWorkforcePoolBasePath = DefaultBasePaths[IAMWorkforcePoolBasePathKey] - c.IapBasePath = DefaultBasePaths[IapBasePathKey] - c.IdentityPlatformBasePath = DefaultBasePaths[IdentityPlatformBasePathKey] - c.KMSBasePath = DefaultBasePaths[KMSBasePathKey] - c.LoggingBasePath = DefaultBasePaths[LoggingBasePathKey] - c.MemcacheBasePath = DefaultBasePaths[MemcacheBasePathKey] - c.MLEngineBasePath = DefaultBasePaths[MLEngineBasePathKey] - c.MonitoringBasePath = DefaultBasePaths[MonitoringBasePathKey] - c.NetworkManagementBasePath = DefaultBasePaths[NetworkManagementBasePathKey] - c.NetworkServicesBasePath = DefaultBasePaths[NetworkServicesBasePathKey] - c.NotebooksBasePath = DefaultBasePaths[NotebooksBasePathKey] - c.OSConfigBasePath = DefaultBasePaths[OSConfigBasePathKey] - c.OSLoginBasePath = DefaultBasePaths[OSLoginBasePathKey] - c.PrivatecaBasePath = DefaultBasePaths[PrivatecaBasePathKey] - c.PubsubBasePath = DefaultBasePaths[PubsubBasePathKey] - c.PubsubLiteBasePath = DefaultBasePaths[PubsubLiteBasePathKey] - c.RedisBasePath = DefaultBasePaths[RedisBasePathKey] - c.ResourceManagerBasePath = DefaultBasePaths[ResourceManagerBasePathKey] - c.SecretManagerBasePath = DefaultBasePaths[SecretManagerBasePathKey] - c.SecurityCenterBasePath = DefaultBasePaths[SecurityCenterBasePathKey] - c.ServiceManagementBasePath = DefaultBasePaths[ServiceManagementBasePathKey] - c.ServiceUsageBasePath = DefaultBasePaths[ServiceUsageBasePathKey] - c.SourceRepoBasePath = DefaultBasePaths[SourceRepoBasePathKey] - c.SpannerBasePath = DefaultBasePaths[SpannerBasePathKey] - c.SQLBasePath = DefaultBasePaths[SQLBasePathKey] - c.StorageBasePath = DefaultBasePaths[StorageBasePathKey] - c.StorageTransferBasePath = DefaultBasePaths[StorageTransferBasePathKey] - c.TagsBasePath = DefaultBasePaths[TagsBasePathKey] - c.TPUBasePath = DefaultBasePaths[TPUBasePathKey] - c.VertexAIBasePath = DefaultBasePaths[VertexAIBasePathKey] - c.VPCAccessBasePath = DefaultBasePaths[VPCAccessBasePathKey] - c.WorkflowsBasePath = DefaultBasePaths[WorkflowsBasePathKey] - - // Handwritten Products / Versioned / Atypical Entries - c.CloudBillingBasePath = DefaultBasePaths[CloudBillingBasePathKey] - c.ComposerBasePath = DefaultBasePaths[ComposerBasePathKey] - c.ContainerBasePath = DefaultBasePaths[ContainerBasePathKey] - c.DataprocBasePath = DefaultBasePaths[DataprocBasePathKey] - c.DataflowBasePath = DefaultBasePaths[DataflowBasePathKey] - c.IamCredentialsBasePath = DefaultBasePaths[IamCredentialsBasePathKey] - c.ResourceManagerV3BasePath = DefaultBasePaths[ResourceManagerV3BasePathKey] - c.IAMBasePath = DefaultBasePaths[IAMBasePathKey] - c.ServiceNetworkingBasePath = DefaultBasePaths[ServiceNetworkingBasePathKey] - c.BigQueryBasePath = DefaultBasePaths[BigQueryBasePathKey] - c.BigtableAdminBasePath = DefaultBasePaths[BigtableAdminBasePathKey] - c.TagsLocationBasePath = DefaultBasePaths[TagsLocationBasePathKey] -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/config_test_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/config_test_utils.go deleted file mode 100644 index 23498f5cc3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/config_test_utils.go +++ /dev/null @@ -1,121 +0,0 @@ -package google - -import ( - "net/http/httptest" - "strings" -) - -// NewTestConfig create a config using the http test server. -func NewTestConfig(server *httptest.Server) *Config { - cfg := &Config{} - cfg.Client = server.Client() - configureTestBasePaths(cfg, server.URL) - return cfg -} - -func configureTestBasePaths(c *Config, url string) { - if !strings.HasSuffix(url, "/") { - url = url + "/" - } - // Generated Products - c.AccessApprovalBasePath = url - c.AccessContextManagerBasePath = url - c.ActiveDirectoryBasePath = url - c.AlloydbBasePath = url - c.ApigeeBasePath = url - c.AppEngineBasePath = url - c.ArtifactRegistryBasePath = url - c.BeyondcorpBasePath = url - c.BigQueryBasePath = url - c.BigqueryAnalyticsHubBasePath = url - c.BigqueryConnectionBasePath = url - c.BigqueryDatapolicyBasePath = url - c.BigqueryDataTransferBasePath = url - c.BigqueryReservationBasePath = url - c.BigtableBasePath = url - c.BillingBasePath = url - c.BinaryAuthorizationBasePath = url - c.CertificateManagerBasePath = url - c.CloudAssetBasePath = url - c.CloudBuildBasePath = url - c.CloudFunctionsBasePath = url - c.Cloudfunctions2BasePath = url - c.CloudIdentityBasePath = url - c.CloudIdsBasePath = url - c.CloudIotBasePath = url - c.CloudRunBasePath = url - c.CloudRunV2BasePath = url - c.CloudSchedulerBasePath = url - c.CloudTasksBasePath = url - c.ComputeBasePath = url - c.ContainerAnalysisBasePath = url - c.ContainerAttachedBasePath = url - c.DataCatalogBasePath = url - c.DataFusionBasePath = url - c.DataLossPreventionBasePath = url - c.DataplexBasePath = url - c.DataprocBasePath = url - c.DataprocMetastoreBasePath = url - c.DatastoreBasePath = url - c.DatastreamBasePath = url - c.DeploymentManagerBasePath = url - c.DialogflowBasePath = url - c.DialogflowCXBasePath = url - c.DNSBasePath = url - c.DocumentAIBasePath = url - c.EssentialContactsBasePath = url - c.FilestoreBasePath = url - c.FirestoreBasePath = url - c.GameServicesBasePath = url - c.GKEBackupBasePath = url - c.GKEHubBasePath = url - c.HealthcareBasePath = url - c.IAM2BasePath = url - c.IAMBetaBasePath = url - c.IAMWorkforcePoolBasePath = url - c.IapBasePath = url - c.IdentityPlatformBasePath = url - c.KMSBasePath = url - c.LoggingBasePath = url - c.MemcacheBasePath = url - c.MLEngineBasePath = url - c.MonitoringBasePath = url - c.NetworkManagementBasePath = url - c.NetworkServicesBasePath = url - c.NotebooksBasePath = url - c.OSConfigBasePath = url - c.OSLoginBasePath = url - c.PrivatecaBasePath = url - c.PubsubBasePath = url - c.PubsubLiteBasePath = url - c.RedisBasePath = url - c.ResourceManagerBasePath = url - c.SecretManagerBasePath = url - c.SecurityCenterBasePath = url - c.ServiceManagementBasePath = url - c.ServiceUsageBasePath = url - c.SourceRepoBasePath = url - c.SpannerBasePath = url - c.SQLBasePath = url - c.StorageBasePath = url - c.StorageTransferBasePath = url - c.TagsBasePath = url - c.TPUBasePath = url - c.VertexAIBasePath = url - c.VPCAccessBasePath = url - c.WorkflowsBasePath = url - - // Handwritten Products / Versioned / Atypical Entries - c.CloudBillingBasePath = url - c.ComposerBasePath = url - c.ContainerBasePath = url - c.DataprocBasePath = url - c.DataflowBasePath = url - c.IamCredentialsBasePath = url - c.ResourceManagerV3BasePath = url - c.IAMBasePath = url - c.ServiceNetworkingBasePath = url - c.BigQueryBasePath = url - c.StorageTransferBasePath = url - c.BigtableAdminBasePath = url -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/container_attached_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/container_attached_operation.go deleted file mode 100644 index a321e1714b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/container_attached_operation.go +++ /dev/null @@ -1,64 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type ContainerAttachedOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *ContainerAttachedOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - region := GetRegionFromRegionalSelfLink(w.CommonOperationWaiter.Op.Name) - - // Returns the proper get. - url := fmt.Sprintf("https://%s-gkemulticloud.googleapis.com/v1/%s", region, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createContainerAttachedWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*ContainerAttachedOperationWaiter, error) { - w := &ContainerAttachedOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func ContainerAttachedOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createContainerAttachedWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func ContainerAttachedOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createContainerAttachedWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/container_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/container_operation.go deleted file mode 100644 index 359b6fa77d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/container_operation.go +++ /dev/null @@ -1,120 +0,0 @@ -package google - -import ( - "context" - "errors" - "fmt" - "log" - "time" - - "google.golang.org/api/container/v1" -) - -type ContainerOperationWaiter struct { - Service *container.Service - Context context.Context - Op *container.Operation - Project string - Location string - UserProjectOverride bool -} - -func (w *ContainerOperationWaiter) State() string { - if w == nil || w.Op == nil { - return "" - } - return w.Op.Status -} - -func (w *ContainerOperationWaiter) Error() error { - if w == nil || w.Op == nil { - return nil - } - - // Error gets called during operation polling to see if there is an error. - // Since container's operation doesn't have an "error" field, we must wait - // until it's done and check the status message - for _, pending := range w.PendingStates() { - if w.Op.Status == pending { - return nil - } - } - - if w.Op.StatusMessage != "" { - return fmt.Errorf(w.Op.StatusMessage) - } - - return nil -} - -func (w *ContainerOperationWaiter) IsRetryable(error) bool { - return false -} - -func (w *ContainerOperationWaiter) SetOp(op interface{}) error { - var ok bool - w.Op, ok = op.(*container.Operation) - if !ok { - return fmt.Errorf("Unable to set operation. Bad type!") - } - return nil -} - -func (w *ContainerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil || w.Op == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - name := fmt.Sprintf("projects/%s/locations/%s/operations/%s", - w.Project, w.Location, w.Op.Name) - - var op *container.Operation - select { - case <-w.Context.Done(): - log.Println("[WARN] request has been cancelled early") - return op, errors.New("unable to finish polling, context has been cancelled") - default: - // default must be here to keep the previous case from blocking - } - err := RetryTimeDuration(func() (opErr error) { - opGetCall := w.Service.Projects.Locations.Operations.Get(name) - if w.UserProjectOverride { - opGetCall.Header().Add("X-Goog-User-Project", w.Project) - } - op, opErr = opGetCall.Do() - return opErr - }, DefaultRequestTimeout) - - return op, err -} - -func (w *ContainerOperationWaiter) OpName() string { - if w == nil || w.Op == nil { - return "" - } - return w.Op.Name -} - -func (w *ContainerOperationWaiter) PendingStates() []string { - return []string{"PENDING", "RUNNING"} -} - -func (w *ContainerOperationWaiter) TargetStates() []string { - return []string{"DONE"} -} - -func containerOperationWait(config *Config, op *container.Operation, project, location, activity, userAgent string, timeout time.Duration) error { - w := &ContainerOperationWaiter{ - Service: config.NewContainerClient(userAgent), - Context: config.context, - Op: op, - Project: project, - Location: location, - UserProjectOverride: config.UserProjectOverride, - } - - if err := w.SetOp(op); err != nil { - return err - } - - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/convert.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/convert.go deleted file mode 100644 index 75e4985258..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/convert.go +++ /dev/null @@ -1,97 +0,0 @@ -package google - -import ( - "encoding/json" - "reflect" -) - -// Convert between two types by converting to/from JSON. Intended to switch -// between multiple API versions, as they are strict supersets of one another. -// item and out are pointers to structs -func Convert(item, out interface{}) error { - bytes, err := json.Marshal(item) - if err != nil { - return err - } - - err = json.Unmarshal(bytes, out) - if err != nil { - return err - } - - // Converting between maps and structs only occurs when autogenerated resources convert the result - // of an HTTP request. Those results do not contain omitted fields, so no need to set them. - if _, ok := item.(map[string]interface{}); !ok { - setOmittedFields(item, out) - } - - return nil -} - -// When converting to a map, we can't use setOmittedFields because FieldByName -// fails. Luckily, we don't use the omitted fields anymore with generated -// resources, and this function is used to bridge from handwritten -> generated. -// Since this is a known type, we can create it inline instead of needing to -// pass an object in. -func ConvertToMap(item interface{}) (map[string]interface{}, error) { - out := make(map[string]interface{}) - bytes, err := json.Marshal(item) - if err != nil { - return nil, err - } - - err = json.Unmarshal(bytes, &out) - if err != nil { - return nil, err - } - - return out, nil -} - -func setOmittedFields(item, out interface{}) { - // Both inputs must be pointers, see https://blog.golang.org/laws-of-reflection: - // "To modify a reflection object, the value must be settable." - iVal := reflect.ValueOf(item).Elem() - oVal := reflect.ValueOf(out).Elem() - - // Loop through all the fields of the struct to look for omitted fields and nested fields - for i := 0; i < iVal.NumField(); i++ { - iField := iVal.Field(i) - if isEmptyValue(iField) { - continue - } - - fieldInfo := iVal.Type().Field(i) - oField := oVal.FieldByName(fieldInfo.Name) - - // Only look at fields that exist in the output struct - if !oField.IsValid() { - continue - } - - // If the field contains a 'json:"="' tag, then it was omitted from the Marshal/Unmarshal - // call and needs to be added back in. - if fieldInfo.Tag.Get("json") == "-" { - oField.Set(iField) - } - - // If this field is a struct, *struct, []struct, or []*struct, recurse. - if iField.Kind() == reflect.Struct { - setOmittedFields(iField.Addr().Interface(), oField.Addr().Interface()) - } - if iField.Kind() == reflect.Ptr && iField.Type().Elem().Kind() == reflect.Struct { - setOmittedFields(iField.Interface(), oField.Interface()) - } - if iField.Kind() == reflect.Slice && iField.Type().Elem().Kind() == reflect.Struct { - for j := 0; j < iField.Len(); j++ { - setOmittedFields(iField.Index(j).Addr().Interface(), oField.Index(j).Addr().Interface()) - } - } - if iField.Kind() == reflect.Slice && iField.Type().Elem().Kind() == reflect.Ptr && - iField.Type().Elem().Elem().Kind() == reflect.Struct { - for j := 0; j < iField.Len(); j++ { - setOmittedFields(iField.Index(j).Interface(), oField.Index(j).Interface()) - } - } - } -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_fusion_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_fusion_operation.go deleted file mode 100644 index c7a7e78b8a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_fusion_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type DataFusionOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *DataFusionOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.DataFusionBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createDataFusionWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*DataFusionOperationWaiter, error) { - w := &DataFusionOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func DataFusionOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createDataFusionWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func DataFusionOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createDataFusionWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_google_game_services_game_server_deployment_rollout.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_google_game_services_game_server_deployment_rollout.go deleted file mode 100644 index d57fcaaf07..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_google_game_services_game_server_deployment_rollout.go +++ /dev/null @@ -1,31 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGameServicesGameServerDeploymentRollout() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceGameServicesGameServerDeploymentRollout().Schema) - addRequiredFieldsToSchema(dsSchema, "deployment_id") - - return &schema.Resource{ - Read: dataSourceGameServicesGameServerDeploymentRolloutRead, - Schema: dsSchema, - } -} - -func dataSourceGameServicesGameServerDeploymentRolloutRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - - d.SetId(id) - - return resourceGameServicesGameServerDeploymentRolloutRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_folder_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_folder_service_account.go deleted file mode 100644 index 1e83b7e45e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_folder_service_account.go +++ /dev/null @@ -1,63 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceAccessApprovalFolderServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAccessApprovalFolderServiceAccountRead, - Schema: map[string]*schema.Schema{ - "folder_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "account_email": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAccessApprovalFolderServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/serviceAccount") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessApprovalFolderServiceAccount %q", d.Id())) - } - - if err := d.Set("name", res["name"]); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("account_email", res["accountEmail"]); err != nil { - return fmt.Errorf("Error setting account_email: %s", err) - } - d.SetId(res["name"].(string)) - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_organization_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_organization_service_account.go deleted file mode 100644 index 0d9f90e6ed..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_organization_service_account.go +++ /dev/null @@ -1,63 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceAccessApprovalOrganizationServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAccessApprovalOrganizationServiceAccountRead, - Schema: map[string]*schema.Schema{ - "organization_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "account_email": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAccessApprovalOrganizationServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/serviceAccount") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessApprovalOrganizationServiceAccount %q", d.Id())) - } - - if err := d.Set("name", res["name"]); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("account_email", res["accountEmail"]); err != nil { - return fmt.Errorf("Error setting account_email: %s", err) - } - d.SetId(res["name"].(string)) - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_project_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_project_service_account.go deleted file mode 100644 index 3aaec00c0d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_access_approval_project_service_account.go +++ /dev/null @@ -1,63 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceAccessApprovalProjectServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAccessApprovalProjectServiceAccountRead, - Schema: map[string]*schema.Schema{ - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "account_email": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAccessApprovalProjectServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/serviceAccount") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessApprovalProjectServiceAccount %q", d.Id())) - } - - if err := d.Set("name", res["name"]); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("account_email", res["accountEmail"]); err != nil { - return fmt.Errorf("Error setting account_email: %s", err) - } - d.SetId(res["name"].(string)) - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_artifact_registry_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_artifact_registry_repository.go deleted file mode 100644 index 1eb7135930..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_artifact_registry_repository.go +++ /dev/null @@ -1,47 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceArtifactRegistryRepository() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceArtifactRegistryRepository().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "repository_id", "location") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceArtifactRegistryRepositoryRead, - Schema: dsSchema, - } -} - -func dataSourceArtifactRegistryRepositoryRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - - repository_id := d.Get("repository_id").(string) - d.SetId(fmt.Sprintf("projects/%s/locations/%s/repositories/%s", project, location, repository_id)) - - err = resourceArtifactRegistryRepositoryRead(d, meta) - if err != nil { - return err - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_certificate_authority.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_certificate_authority.go deleted file mode 100644 index 891a530ec2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_certificate_authority.go +++ /dev/null @@ -1,76 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourcePrivatecaCertificateAuthority() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourcePrivatecaCertificateAuthority().Schema) - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "location") - addOptionalFieldsToSchema(dsSchema, "pool") - addOptionalFieldsToSchema(dsSchema, "certificate_authority_id") - - dsSchema["pem_csr"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - return &schema.Resource{ - Read: dataSourcePrivatecaCertificateAuthorityRead, - Schema: dsSchema, - } -} - -func dataSourcePrivatecaCertificateAuthorityRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return fmt.Errorf("Error generating user agent: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - - d.SetId(id) - - err = resourcePrivatecaCertificateAuthorityRead(d, meta) - if err != nil { - return err - } - - // pem_csr is only applicable for SUBORDINATE CertificateAuthorities when their state is AWAITING_USER_ACTIVATION - if d.Get("type") == "SUBORDINATE" && d.Get("state") == "AWAITING_USER_ACTIVATION" { - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:fetch") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PrivatecaCertificateAuthority %q", d.Id())) - } - if err := d.Set("pem_csr", res["pemCsr"]); err != nil { - return fmt.Errorf("Error fetching CertificateAuthority: %s", err) - } - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_identity_group_memberships.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_identity_group_memberships.go deleted file mode 100644 index 226f41b581..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_identity_group_memberships.go +++ /dev/null @@ -1,95 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudidentity/v1" -) - -func DataSourceGoogleCloudIdentityGroupMemberships() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceCloudIdentityGroupMembership().Schema) - - return &schema.Resource{ - Read: dataSourceGoogleCloudIdentityGroupMembershipsRead, - - Schema: map[string]*schema.Schema{ - "memberships": { - Type: schema.TypeList, - Computed: true, - Description: `List of Cloud Identity group memberships.`, - Elem: &schema.Resource{ - Schema: dsSchema, - }, - }, - "group": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Group to get memberships from.`, - }, - }, - } -} - -func dataSourceGoogleCloudIdentityGroupMembershipsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - result := []map[string]interface{}{} - membershipsCall := config.NewCloudIdentityClient(userAgent).Groups.Memberships.List(d.Get("group").(string)).View("FULL") - if config.UserProjectOverride { - billingProject := "" - // err may be nil - project isn't required for this resource - if project, err := getProject(d, config); err == nil { - billingProject = project - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if billingProject != "" { - membershipsCall.Header().Set("X-Goog-User-Project", billingProject) - } - } - - err = membershipsCall.Pages(config.context, func(resp *cloudidentity.ListMembershipsResponse) error { - for _, member := range resp.Memberships { - result = append(result, map[string]interface{}{ - "name": member.Name, - "roles": flattenCloudIdentityGroupMembershipsRoles(member.Roles), - "preferred_member_key": flattenCloudIdentityGroupsEntityKey(member.PreferredMemberKey), - }) - } - - return nil - }) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroupMemberships %q", d.Id())) - } - - if err := d.Set("memberships", result); err != nil { - return fmt.Errorf("Error setting memberships: %s", err) - } - d.SetId(time.Now().UTC().String()) - return nil -} - -func flattenCloudIdentityGroupMembershipsRoles(roles []*cloudidentity.MembershipRole) []interface{} { - transformed := []interface{}{} - - for _, role := range roles { - transformed = append(transformed, map[string]interface{}{ - "name": role.Name, - }) - } - return transformed -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_run_locations.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_run_locations.go deleted file mode 100644 index 633f382032..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_run_locations.go +++ /dev/null @@ -1,81 +0,0 @@ -package google - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleCloudRunLocations() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleCloudRunLocationsRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "locations": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleCloudRunLocationsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "https://run.googleapis.com/v1/projects/{{project}}/locations") - if err != nil { - return err - } - - res, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return fmt.Errorf("Error listing Cloud Run Locations : %s", err) - } - - locationsRaw := flattenCloudRunLocations(res) - - locations := make([]string, len(locationsRaw)) - for i, loc := range locationsRaw { - locations[i] = loc.(string) - } - sort.Strings(locations) - - log.Printf("[DEBUG] Received Google Cloud Run Locations: %q", locations) - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("locations", locations); err != nil { - return fmt.Errorf("Error setting location: %s", err) - } - - d.SetId(fmt.Sprintf("projects/%s", project)) - - return nil -} - -func flattenCloudRunLocations(resp map[string]interface{}) []interface{} { - regionList := resp["locations"].([]interface{}) - regions := make([]interface{}, len(regionList)) - for i, v := range regionList { - regionObj := v.(map[string]interface{}) - regions[i] = regionObj["locationId"] - } - return regions -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_run_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_run_service.go deleted file mode 100644 index a4592ff06a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_run_service.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleCloudRunService() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceCloudRunService().Schema) - addRequiredFieldsToSchema(dsSchema, "name", "location") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleCloudRunServiceRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleCloudRunServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceCloudRunServiceRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_health_check.go deleted file mode 100644 index 64008ba530..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_health_check.go +++ /dev/null @@ -1,29 +0,0 @@ -package google - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func DataSourceGoogleComputeHealthCheck() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeHealthCheck().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleComputeHealthCheckRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - id, err := replaceVars(d, meta.(*Config), "projects/{{project}}/global/healthChecks/{{name}}") - if err != nil { - return err - } - d.SetId(id) - - return resourceComputeHealthCheckRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_network_endpoint_group.go deleted file mode 100644 index ffb6ed0291..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_network_endpoint_group.go +++ /dev/null @@ -1,58 +0,0 @@ -package google - -import ( - "errors" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeNetworkEndpointGroup() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeNetworkEndpointGroup().Schema) - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "zone") - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "self_link") - - return &schema.Resource{ - Read: dataSourceComputeNetworkEndpointGroupRead, - Schema: dsSchema, - } -} - -func dataSourceComputeNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if name, ok := d.GetOk("name"); ok { - project, err := getProject(d, config) - if err != nil { - return err - } - zone, err := getZone(d, config) - if err != nil { - return err - } - d.SetId(fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", project, zone, name.(string))) - } else if selfLink, ok := d.GetOk("self_link"); ok { - parsed, err := ParseNetworkEndpointGroupFieldValue(selfLink.(string), d, config) - if err != nil { - return err - } - if err := d.Set("name", parsed.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("zone", parsed.Zone); err != nil { - return fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("project", parsed.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", parsed.Project, parsed.Zone, parsed.Name)) - } else { - return errors.New("Must provide either `self_link` or `zone/name`") - } - - return resourceComputeNetworkEndpointGroupRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_network_peering.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_network_peering.go deleted file mode 100644 index 8b95e09432..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_network_peering.go +++ /dev/null @@ -1,38 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const regexGCEName = "^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$" - -func DataSourceComputeNetworkPeering() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeNetworkPeering().Schema) - addRequiredFieldsToSchema(dsSchema, "name", "network") - - dsSchema["name"].ValidateFunc = validateRegexp(regexGCEName) - dsSchema["network"].ValidateFunc = validateRegexp(peerNetworkLinkRegex) - return &schema.Resource{ - Read: dataSourceComputeNetworkPeeringRead, - Schema: dsSchema, - Timeouts: &schema.ResourceTimeout{ - Read: schema.DefaultTimeout(4 * time.Minute), - }, - } -} - -func dataSourceComputeNetworkPeeringRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - d.SetId(fmt.Sprintf("%s/%s", networkFieldValue.Name, d.Get("name").(string))) - - return resourceComputeNetworkPeeringRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dataproc_metastore_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dataproc_metastore_service.go deleted file mode 100644 index 96f6c8f3b8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dataproc_metastore_service.go +++ /dev/null @@ -1,29 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceDataprocMetastoreService() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceDataprocMetastoreService().Schema) - addRequiredFieldsToSchema(dsSchema, "service_id") - addRequiredFieldsToSchema(dsSchema, "location") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceDataprocMetastoreServiceRead, - Schema: dsSchema, - } -} - -func dataSourceDataprocMetastoreServiceRead(d *schema.ResourceData, meta interface{}) error { - id, err := replaceVars(d, meta.(*Config), "projects/{{project}}/locations/{{location}}/services/{{service_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceDataprocMetastoreServiceRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_keys.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_keys.go deleted file mode 100644 index ea67f13fd2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_keys.go +++ /dev/null @@ -1,229 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/dns/v1" -) - -// DNSSEC Algorithm Numbers: https://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml -// The following are algorithms that are supported by Cloud DNS -var dnssecAlgoNums = map[string]int{ - "rsasha1": 5, - "rsasha256": 8, - "rsasha512": 10, - "ecdsap256sha256": 13, - "ecdsap384sha384": 14, -} - -// DS RR Digest Types: https://www.iana.org/assignments/ds-rr-types/ds-rr-types.xhtml -// The following are digests that are supported by Cloud DNS -var dnssecDigestType = map[string]int{ - "sha1": 1, - "sha256": 2, - "sha384": 4, -} - -func DataSourceDNSKeys() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDNSKeysRead, - - Schema: map[string]*schema.Schema{ - "managed_zone": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "key_signing_keys": { - Type: schema.TypeList, - Computed: true, - Elem: kskResource(), - }, - "zone_signing_keys": { - Type: schema.TypeList, - Computed: true, - Elem: dnsKeyResource(), - }, - }, - } -} - -func dnsKeyResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "algorithm": { - Type: schema.TypeString, - Computed: true, - }, - "creation_time": { - Type: schema.TypeString, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Computed: true, - }, - "digests": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "digest": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "id": { - Type: schema.TypeString, - Computed: true, - }, - "is_active": { - Type: schema.TypeBool, - Computed: true, - }, - "key_length": { - Type: schema.TypeInt, - Computed: true, - }, - "key_tag": { - Type: schema.TypeInt, - Computed: true, - }, - "public_key": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func kskResource() *schema.Resource { - resource := dnsKeyResource() - - resource.Schema["ds_record"] = &schema.Schema{ - Type: schema.TypeString, - Computed: true, - } - - return resource -} - -func generateDSRecord(signingKey *dns.DnsKey) (string, error) { - algoNum, found := dnssecAlgoNums[signingKey.Algorithm] - if !found { - return "", fmt.Errorf("DNSSEC Algorithm number for %s not found", signingKey.Algorithm) - } - - digestType, found := dnssecDigestType[signingKey.Digests[0].Type] - if !found { - return "", fmt.Errorf("DNSSEC Digest type for %s not found", signingKey.Digests[0].Type) - } - - return fmt.Sprintf("%d %d %d %s", - signingKey.KeyTag, - algoNum, - digestType, - signingKey.Digests[0].Digest), nil -} - -func flattenSigningKeys(signingKeys []*dns.DnsKey, keyType string) []map[string]interface{} { - var keys []map[string]interface{} - - for _, signingKey := range signingKeys { - if signingKey != nil && signingKey.Type == keyType { - data := map[string]interface{}{ - "algorithm": signingKey.Algorithm, - "creation_time": signingKey.CreationTime, - "description": signingKey.Description, - "digests": flattenDigests(signingKey.Digests), - "id": signingKey.Id, - "is_active": signingKey.IsActive, - "key_length": signingKey.KeyLength, - "key_tag": signingKey.KeyTag, - "public_key": signingKey.PublicKey, - } - - if signingKey.Type == "keySigning" && len(signingKey.Digests) > 0 { - dsRecord, err := generateDSRecord(signingKey) - if err == nil { - data["ds_record"] = dsRecord - } - } - - keys = append(keys, data) - } - } - - return keys -} - -func flattenDigests(dnsKeyDigests []*dns.DnsKeyDigest) []map[string]interface{} { - var digests []map[string]interface{} - - for _, dnsKeyDigest := range dnsKeyDigests { - if dnsKeyDigest != nil { - data := map[string]interface{}{ - "digest": dnsKeyDigest.Digest, - "type": dnsKeyDigest.Type, - } - - digests = append(digests, data) - } - } - - return digests -} - -func dataSourceDNSKeysRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - fv, err := parseProjectFieldValue("managedZones", d.Get("managed_zone").(string), "project", d, config, false) - if err != nil { - return err - } - project := fv.Project - managedZone := fv.Name - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(fmt.Sprintf("projects/%s/managedZones/%s", project, managedZone)) - - log.Printf("[DEBUG] Fetching DNS keys from managed zone %s", managedZone) - - response, err := config.NewDnsClient(userAgent).DnsKeys.List(project, managedZone).Do() - if err != nil && !IsGoogleApiErrorWithCode(err, 404) { - return fmt.Errorf("error retrieving DNS keys: %s", err) - } else if IsGoogleApiErrorWithCode(err, 404) { - return nil - } - - log.Printf("[DEBUG] Fetched DNS keys from managed zone %s", managedZone) - - if err := d.Set("key_signing_keys", flattenSigningKeys(response.DnsKeys, "keySigning")); err != nil { - return fmt.Errorf("Error setting key_signing_keys: %s", err) - } - if err := d.Set("zone_signing_keys", flattenSigningKeys(response.DnsKeys, "zoneSigning")); err != nil { - return fmt.Errorf("Error setting zone_signing_keys: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_managed_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_managed_zone.go deleted file mode 100644 index dd41b79b30..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_managed_zone.go +++ /dev/null @@ -1,101 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceDnsManagedZone() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDnsManagedZoneRead, - - Schema: map[string]*schema.Schema{ - "dns_name": { - Type: schema.TypeString, - Computed: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - }, - - "description": { - Type: schema.TypeString, - Computed: true, - }, - - "managed_zone_id": { - Type: schema.TypeInt, - Computed: true, - Description: `Unique identifier for the resource; defined by the server.`, - }, - - "name_servers": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "visibility": { - Type: schema.TypeString, - Computed: true, - }, - - // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. - "project": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - d.SetId(fmt.Sprintf("projects/%s/managedZones/%s", project, name)) - - zone, err := config.NewDnsClient(userAgent).ManagedZones.Get( - project, name).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataSourceDnsManagedZone %q", name)) - } - - if err := d.Set("dns_name", zone.DnsName); err != nil { - return fmt.Errorf("Error setting dns_name: %s", err) - } - if err := d.Set("name", zone.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", zone.Description); err != nil { - return fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("managed_zone_id", zone.Id); err != nil { - return fmt.Errorf("Error setting managed_zone_id: %s", err) - } - if err := d.Set("name_servers", zone.NameServers); err != nil { - return fmt.Errorf("Error setting name_servers: %s", err) - } - if err := d.Set("visibility", zone.Visibility); err != nil { - return fmt.Errorf("Error setting visibility: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_record_set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_record_set.go deleted file mode 100644 index 53ee1f4079..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_dns_record_set.go +++ /dev/null @@ -1,86 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceDnsRecordSet() *schema.Resource { - return &schema.Resource{ - Read: dataSourceDnsRecordSetRead, - - Schema: map[string]*schema.Schema{ - "managed_zone": { - Type: schema.TypeString, - Required: true, - }, - - "name": { - Type: schema.TypeString, - Required: true, - }, - - "rrdatas": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "ttl": { - Type: schema.TypeInt, - Computed: true, - }, - - "type": { - Type: schema.TypeString, - Required: true, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("managed_zone").(string) - name := d.Get("name").(string) - dnsType := d.Get("type").(string) - d.SetId(fmt.Sprintf("projects/%s/managedZones/%s/rrsets/%s/%s", project, zone, name, dnsType)) - - resp, err := config.NewDnsClient(userAgent).ResourceRecordSets.List(project, zone).Name(name).Type(dnsType).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataSourceDnsRecordSet %q", name)) - } - if len(resp.Rrsets) != 1 { - return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) - } - - if err := d.Set("rrdatas", resp.Rrsets[0].Rrdatas); err != nil { - return fmt.Errorf("Error setting rrdatas: %s", err) - } - if err := d.Set("ttl", resp.Rrsets[0].Ttl); err != nil { - return fmt.Errorf("Error setting ttl: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_connection.go deleted file mode 100644 index 3dbbef3c4a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_connection.go +++ /dev/null @@ -1,42 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleBeyondcorpAppConnection() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceBeyondcorpAppConnection().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &schema.Resource{ - Read: dataSourceGoogleBeyondcorpAppConnectionRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleBeyondcorpAppConnectionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/locations/%s/appConnections/%s", project, region, name)) - - return resourceBeyondcorpAppConnectionRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_connector.go deleted file mode 100644 index 2e4dab2952..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_connector.go +++ /dev/null @@ -1,42 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleBeyondcorpAppConnector() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceBeyondcorpAppConnector().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &schema.Resource{ - Read: dataSourceGoogleBeyondcorpAppConnectorRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleBeyondcorpAppConnectorRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/locations/%s/appConnectors/%s", project, region, name)) - - return resourceBeyondcorpAppConnectorRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_gateway.go deleted file mode 100644 index 16de868558..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_beyondcorp_app_gateway.go +++ /dev/null @@ -1,42 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleBeyondcorpAppGateway() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceBeyondcorpAppGateway().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &schema.Resource{ - Read: dataSourceGoogleBeyondcorpAppGatewayRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleBeyondcorpAppGatewayRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/locations/%s/appGateways/%s", project, region, name)) - - return resourceBeyondcorpAppGatewayRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_bigquery_default_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_bigquery_default_service_account.go deleted file mode 100644 index eb384a4a0c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_bigquery_default_service_account.go +++ /dev/null @@ -1,58 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleBigqueryDefaultServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleBigqueryDefaultServiceAccountRead, - Schema: map[string]*schema.Schema{ - "email": { - Type: schema.TypeString, - Computed: true, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "member": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleBigqueryDefaultServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - projectResource, err := config.NewBigQueryClient(userAgent).Projects.GetServiceAccount(project).Do() - if err != nil { - return handleNotFoundError(err, d, "BigQuery service account not found") - } - - d.SetId(projectResource.Email) - if err := d.Set("email", projectResource.Email); err != nil { - return fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("member", "serviceAccount:"+projectResource.Email); err != nil { - return fmt.Errorf("Error setting member: %s", err) - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_billing_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_billing_account.go deleted file mode 100644 index 86b7c1fea7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_billing_account.go +++ /dev/null @@ -1,137 +0,0 @@ -package google - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "google.golang.org/api/cloudbilling/v1" -) - -func DataSourceGoogleBillingAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceBillingAccountRead, - Schema: map[string]*schema.Schema{ - "billing_account": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"display_name"}, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"billing_account"}, - }, - "open": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "project_ids": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func dataSourceBillingAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - open, openOk := d.GetOkExists("open") - - var billingAccount *cloudbilling.BillingAccount - if v, ok := d.GetOk("billing_account"); ok { - resp, err := config.NewBillingClient(userAgent).BillingAccounts.Get(canonicalBillingAccountName(v.(string))).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Billing Account Not Found : %s", v)) - } - - if openOk && resp.Open != open.(bool) { - return fmt.Errorf("Billing account not found: %s", v) - } - - billingAccount = resp - } else if v, ok := d.GetOk("display_name"); ok { - token := "" - for paginate := true; paginate; { - resp, err := config.NewBillingClient(userAgent).BillingAccounts.List().PageToken(token).Do() - if err != nil { - return fmt.Errorf("Error reading billing accounts: %s", err) - } - - for _, ba := range resp.BillingAccounts { - if ba.DisplayName == v.(string) { - if openOk && ba.Open != open.(bool) { - continue - } - if billingAccount != nil { - return fmt.Errorf("More than one matching billing account found") - } - billingAccount = ba - } - } - - token = resp.NextPageToken - paginate = token != "" - } - - if billingAccount == nil { - return fmt.Errorf("Billing account not found: %s", v) - } - } else { - return fmt.Errorf("one of billing_account or display_name must be set") - } - - resp, err := config.NewBillingClient(userAgent).BillingAccounts.Projects.List(billingAccount.Name).Do() - if err != nil { - return fmt.Errorf("Error reading billing account projects: %s", err) - } - projectIds := flattenBillingProjects(resp.ProjectBillingInfo) - - d.SetId(GetResourceNameFromSelfLink(billingAccount.Name)) - if err := d.Set("name", billingAccount.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", billingAccount.DisplayName); err != nil { - return fmt.Errorf("Error setting display_name: %s", err) - } - if err := d.Set("open", billingAccount.Open); err != nil { - return fmt.Errorf("Error setting open: %s", err) - } - if err := d.Set("project_ids", projectIds); err != nil { - return fmt.Errorf("Error setting project_ids: %s", err) - } - - return nil -} - -func canonicalBillingAccountName(ba string) string { - if strings.HasPrefix(ba, "billingAccounts/") { - return ba - } - - return "billingAccounts/" + ba -} - -func flattenBillingProjects(billingProjects []*cloudbilling.ProjectBillingInfo) []string { - projectIds := make([]string, len(billingProjects)) - for i, billingProject := range billingProjects { - projectIds[i] = billingProject.ProjectId - } - - return projectIds -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_client_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_client_config.go deleted file mode 100644 index 0874ff05f3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_client_config.go +++ /dev/null @@ -1,60 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleClientConfig() *schema.Resource { - return &schema.Resource{ - Read: dataSourceClientConfigRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - }, - - "region": { - Type: schema.TypeString, - Computed: true, - }, - - "zone": { - Type: schema.TypeString, - Computed: true, - }, - - "access_token": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - }, - } -} - -func dataSourceClientConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/zones/%s", config.Project, config.Region, config.Zone)) - if err := d.Set("project", config.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", config.Region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("zone", config.Zone); err != nil { - return fmt.Errorf("Error setting zone: %s", err) - } - - token, err := config.tokenSource.Token() - if err != nil { - return err - } - if err := d.Set("access_token", token.AccessToken); err != nil { - return fmt.Errorf("Error setting access_token: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_client_openid_userinfo.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_client_openid_userinfo.go deleted file mode 100644 index 28fe79f533..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_client_openid_userinfo.go +++ /dev/null @@ -1,37 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleClientOpenIDUserinfo() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleClientOpenIDUserinfoRead, - Schema: map[string]*schema.Schema{ - "email": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleClientOpenIDUserinfoRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - email, err := GetCurrentUserEmail(config, userAgent) - if err != nil { - return err - } - d.SetId(email) - if err := d.Set("email", email); err != nil { - return fmt.Errorf("Error setting email: %s", err) - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloud_asset_resources_search_all.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloud_asset_resources_search_all.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloud_asset_resources_search_all.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudbuild_trigger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudbuild_trigger.go deleted file mode 100644 index 9ad6c650d8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudbuild_trigger.go +++ /dev/null @@ -1,36 +0,0 @@ -package google - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleCloudBuildTrigger() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceCloudBuildTrigger().Schema) - - addRequiredFieldsToSchema(dsSchema, "trigger_id", "location") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleCloudBuildTriggerRead, - Schema: dsSchema, - } - -} - -func dataSourceGoogleCloudBuildTriggerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - - id = strings.ReplaceAll(id, "/locations/global/", "/") - - d.SetId(id) - return resourceCloudBuildTriggerRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudfunctions2_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudfunctions2_function.go deleted file mode 100644 index bc68508367..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudfunctions2_function.go +++ /dev/null @@ -1,41 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleCloudFunctions2Function() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceCloudfunctions2function().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name", "location") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleCloudFunctions2FunctionRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleCloudFunctions2FunctionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/locations/%s/functions/%s", project, d.Get("location").(string), d.Get("name").(string))) - - err = resourceCloudfunctions2functionRead(d, meta) - if err != nil { - return err - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudfunctions_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudfunctions_function.go deleted file mode 100644 index 196cda1184..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_cloudfunctions_function.go +++ /dev/null @@ -1,50 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleCloudFunctionsFunction() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceCloudFunctionsFunction().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project", "region") - - return &schema.Resource{ - Read: dataSourceGoogleCloudFunctionsFunctionRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleCloudFunctionsFunctionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - cloudFuncId := &cloudFunctionId{ - Project: project, - Region: region, - Name: d.Get("name").(string), - } - - d.SetId(cloudFuncId.cloudFunctionId()) - - err = resourceCloudFunctionsRead(d, meta) - if err != nil { - return err - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_composer_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_composer_environment.go deleted file mode 100644 index a7396ed878..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_composer_environment.go +++ /dev/null @@ -1,39 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComposerEnvironment() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceComposerEnvironment().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project", "region") - - return &schema.Resource{ - Read: dataSourceGoogleComposerEnvironmentRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - envName := d.Get("name").(string) - - d.SetId(fmt.Sprintf("projects/%s/locations/%s/environments/%s", project, region, envName)) - - return resourceComposerEnvironmentRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_address.go deleted file mode 100644 index 60fa0c52b6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_address.go +++ /dev/null @@ -1,156 +0,0 @@ -package google - -import ( - "fmt" - "regexp" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var ( - computeAddressIdTemplate = "projects/%s/regions/%s/addresses/%s" - computeAddressLinkRegex = regexp.MustCompile("projects/(.+)/regions/(.+)/addresses/(.+)$") -) - -func DataSourceGoogleComputeAddress() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleComputeAddressRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "address": { - Type: schema.TypeString, - Computed: true, - }, - - "status": { - Type: schema.TypeString, - Computed: true, - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func dataSourceGoogleComputeAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - - address, err := config.NewComputeClient(userAgent).Addresses.Get(project, region, name).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Address Not Found : %s", name)) - } - - if err := d.Set("address", address.Address); err != nil { - return fmt.Errorf("Error setting address: %s", err) - } - if err := d.Set("status", address.Status); err != nil { - return fmt.Errorf("Error setting status: %s", err) - } - if err := d.Set("self_link", address.SelfLink); err != nil { - return fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/addresses/%s", project, region, name)) - return nil -} - -type computeAddressId struct { - Project string - Region string - Name string -} - -func (s computeAddressId) canonicalId() string { - return fmt.Sprintf(computeAddressIdTemplate, s.Project, s.Region, s.Name) -} - -func parseComputeAddressId(id string, config *Config) (*computeAddressId, error) { - var parts []string - if computeAddressLinkRegex.MatchString(id) { - parts = computeAddressLinkRegex.FindStringSubmatch(id) - - return &computeAddressId{ - Project: parts[1], - Region: parts[2], - Name: parts[3], - }, nil - } else { - parts = strings.Split(id, "/") - } - - if len(parts) == 3 { - return &computeAddressId{ - Project: parts[0], - Region: parts[1], - Name: parts[2], - }, nil - } else if len(parts) == 2 { - // Project is optional. - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{region}/{name}` id format.") - } - - return &computeAddressId{ - Project: config.Project, - Region: parts[0], - Name: parts[1], - }, nil - } else if len(parts) == 1 { - // Project and region is optional - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{name}` id format.") - } - if config.Region == "" { - return nil, fmt.Errorf("The default region for the provider must be set when using the `{name}` id format.") - } - - return &computeAddressId{ - Project: config.Project, - Region: config.Region, - Name: parts[0], - }, nil - } - - return nil, fmt.Errorf("Invalid compute address id. Expecting resource link, `{project}/{region}/{name}`, `{region}/{name}` or `{name}` format.") -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_backend_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_backend_bucket.go deleted file mode 100644 index 0ae685d5c9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_backend_bucket.go +++ /dev/null @@ -1,37 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeBackendBucket() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeBackendBucket().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceComputeBackendBucketRead, - Schema: dsSchema, - } -} - -func dataSourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - backendBucketName := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/global/backendBuckets/%s", project, backendBucketName)) - - return resourceComputeBackendBucketRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_backend_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_backend_service.go deleted file mode 100644 index 69d290f3a7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_backend_service.go +++ /dev/null @@ -1,37 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeBackendService() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeBackendService().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceComputeBackendServiceRead, - Schema: dsSchema, - } -} - -func dataSourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - serviceName := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/global/backendServices/%s", project, serviceName)) - - return resourceComputeBackendServiceRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_default_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_default_service_account.go deleted file mode 100644 index 44e0faef37..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_default_service_account.go +++ /dev/null @@ -1,83 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeDefaultServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleComputeDefaultServiceAccountRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "email": { - Type: schema.TypeString, - Computed: true, - }, - "unique_id": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "display_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleComputeDefaultServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - projectCompResource, err := config.NewComputeClient(userAgent).Projects.Get(project).Do() - if err != nil { - return handleNotFoundError(err, d, "GCE default service account") - } - - serviceAccountName, err := serviceAccountFQN(projectCompResource.DefaultServiceAccount, d, config) - if err != nil { - return err - } - - sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(serviceAccountName).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", serviceAccountName)) - } - - d.SetId(sa.Name) - if err := d.Set("email", sa.Email); err != nil { - return fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("unique_id", sa.UniqueId); err != nil { - return fmt.Errorf("Error setting unique_id: %s", err) - } - if err := d.Set("project", sa.ProjectId); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("name", sa.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", sa.DisplayName); err != nil { - return fmt.Errorf("Error setting display_name: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_disk.go deleted file mode 100644 index 895c8b3bf0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_disk.go +++ /dev/null @@ -1,31 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeDisk() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeDisk().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "zone") - - return &schema.Resource{ - Read: dataSourceGoogleComputeDiskRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeDiskRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceComputeDiskRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_forwarding_rule.go deleted file mode 100644 index 384c3ff910..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_forwarding_rule.go +++ /dev/null @@ -1,43 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeForwardingRule() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeForwardingRule().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &schema.Resource{ - Read: dataSourceGoogleComputeForwardingRuleRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/forwardingRules/%s", project, region, name)) - - return resourceComputeForwardingRuleRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_global_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_global_address.go deleted file mode 100644 index 9da2970d89..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_global_address.go +++ /dev/null @@ -1,74 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeGlobalAddress() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleComputeGlobalAddressRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "address": { - Type: schema.TypeString, - Computed: true, - }, - - "status": { - Type: schema.TypeString, - Computed: true, - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func dataSourceGoogleComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - address, err := config.NewComputeClient(userAgent).GlobalAddresses.Get(project, name).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Global Address Not Found : %s", name)) - } - - if err := d.Set("address", address.Address); err != nil { - return fmt.Errorf("Error setting address: %s", err) - } - if err := d.Set("status", address.Status); err != nil { - return fmt.Errorf("Error setting status: %s", err) - } - if err := d.Set("self_link", address.SelfLink); err != nil { - return fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(fmt.Sprintf("projects/%s/global/addresses/%s", project, name)) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ha_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ha_vpn_gateway.go deleted file mode 100644 index def034a2bd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ha_vpn_gateway.go +++ /dev/null @@ -1,43 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeHaVpnGateway() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeHaVpnGateway().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &schema.Resource{ - Read: dataSourceGoogleComputeHaVpnGatewayRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeHaVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/vpnGateways/%s", project, region, name)) - - return resourceComputeHaVpnGatewayRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_group_manager.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_group_manager.go deleted file mode 100644 index 3729014560..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_group_manager.go +++ /dev/null @@ -1,61 +0,0 @@ -package google - -import ( - "errors" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeInstanceGroupManager() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeInstanceGroupManager().Schema) - addOptionalFieldsToSchema(dsSchema, "name", "self_link", "project", "zone") - - return &schema.Resource{ - Read: dataSourceComputeInstanceGroupManagerRead, - Schema: dsSchema, - } -} - -func dataSourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if selfLink, ok := d.GetOk("self_link"); ok { - parsed, err := ParseInstanceGroupFieldValue(selfLink.(string), d, config) - if err != nil { - return fmt.Errorf("InstanceGroup name, zone or project could not be parsed from %s", selfLink) - } - if err := d.Set("name", parsed.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("zone", parsed.Zone); err != nil { - return fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("project", parsed.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroupManagers/%s", parsed.Project, parsed.Zone, parsed.Name)) - } else if name, ok := d.GetOk("name"); ok { - zone, err := getZone(d, config) - if err != nil { - return err - } - project, err := getProject(d, config) - if err != nil { - return err - } - d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroupManagers/%s", project, zone, name.(string))) - } else { - return errors.New("Must provide either `self_link` or `zone/name`") - } - - err := resourceComputeInstanceGroupManagerRead(d, meta) - - if err != nil { - return err - } - if d.Id() == "" { - return errors.New("Instance Manager Group not found") - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_template.go deleted file mode 100644 index 09d5b4efba..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_template.go +++ /dev/null @@ -1,89 +0,0 @@ -package google - -import ( - "fmt" - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "google.golang.org/api/compute/v1" -) - -func DataSourceGoogleComputeInstanceTemplate() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeInstanceTemplate().Schema) - - dsSchema["filter"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - } - dsSchema["most_recent"] = &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - } - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "name", "filter", "most_recent", "project") - - dsSchema["name"].ExactlyOneOf = []string{"name", "filter"} - dsSchema["filter"].ExactlyOneOf = []string{"name", "filter"} - - return &schema.Resource{ - Read: datasourceComputeInstanceTemplateRead, - Schema: dsSchema, - } -} - -func datasourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - if v, ok := d.GetOk("name"); ok { - return retrieveInstance(d, meta, project, v.(string)) - } - if v, ok := d.GetOk("filter"); ok { - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - templates, err := config.NewComputeClient(userAgent).InstanceTemplates.List(project).Filter(v.(string)).Do() - if err != nil { - return fmt.Errorf("error retrieving list of instance templates: %s", err) - } - - mostRecent := d.Get("most_recent").(bool) - if mostRecent { - sort.Sort(ByCreationTimestamp(templates.Items)) - } - - count := len(templates.Items) - if count == 1 || count > 1 && mostRecent { - return retrieveInstance(d, meta, project, templates.Items[0].Name) - } - - return fmt.Errorf("your filter has returned %d instance template(s). Please refine your filter or set most_recent to return exactly one instance template", len(templates.Items)) - } - - return fmt.Errorf("one of name or filters must be set") -} - -func retrieveInstance(d *schema.ResourceData, meta interface{}, project, name string) error { - d.SetId("projects/" + project + "/global/instanceTemplates/" + name) - - return resourceComputeInstanceTemplateRead(d, meta) -} - -// ByCreationTimestamp implements sort.Interface for []*InstanceTemplate based on -// the CreationTimestamp field. -type ByCreationTimestamp []*compute.InstanceTemplate - -func (a ByCreationTimestamp) Len() int { return len(a) } -func (a ByCreationTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByCreationTimestamp) Less(i, j int) bool { - return a[i].CreationTimestamp > a[j].CreationTimestamp -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_network_endpoint_group.go deleted file mode 100644 index 418ee4365a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_network_endpoint_group.go +++ /dev/null @@ -1,59 +0,0 @@ -package google - -import ( - "errors" - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeRegionNetworkEndpointGroup() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeRegionNetworkEndpointGroup().Schema) - - addOptionalFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "region") - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "self_link") - - return &schema.Resource{ - Read: dataSourceComputeRegionNetworkEndpointGroupRead, - Schema: dsSchema, - } -} - -func dataSourceComputeRegionNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if name, ok := d.GetOk("name"); ok { - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/networkEndpointGroups/%s", project, region, name.(string))) - } else if selfLink, ok := d.GetOk("self_link"); ok { - parsed, err := ParseNetworkEndpointGroupRegionalFieldValue(selfLink.(string), d, config) - if err != nil { - return err - } - if err := d.Set("name", parsed.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("project", parsed.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", parsed.Region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/networkEndpointGroups/%s", parsed.Project, parsed.Region, parsed.Name)) - } else { - return errors.New("Must provide either `self_link` or `region/name`") - } - - return resourceComputeRegionNetworkEndpointGroupRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_ssl_certificate.go deleted file mode 100644 index 3e384b8068..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_ssl_certificate.go +++ /dev/null @@ -1,37 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleRegionComputeSslCertificate() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeRegionSslCertificate().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &schema.Resource{ - Read: dataSourceComputeRegionSslCertificateRead, - Schema: dsSchema, - } -} - -func dataSourceComputeRegionSslCertificateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, region, name, err := GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/sslCertificates/%s", project, region, name)) - - return resourceComputeRegionSslCertificateRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_resource_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_resource_policy.go deleted file mode 100644 index 67fdb3e7b3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_resource_policy.go +++ /dev/null @@ -1,39 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeResourcePolicy() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeResourcePolicy().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "region") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleComputeResourcePolicyRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeResourcePolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)) - - return resourceComputeResourcePolicyRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router.go deleted file mode 100644 index b6105a205f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router.go +++ /dev/null @@ -1,25 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeRouter() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeRouter().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addRequiredFieldsToSchema(dsSchema, "network") - addOptionalFieldsToSchema(dsSchema, "region") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceComputeRouterRead, - Schema: dsSchema, - } -} - -func dataSourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { - routerName := d.Get("name").(string) - - d.SetId(routerName) - return resourceComputeRouterRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router_nat.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router_nat.go deleted file mode 100644 index 5840fbea8c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router_nat.go +++ /dev/null @@ -1,33 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeRouterNat() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeRouterNat().Schema) - - addRequiredFieldsToSchema(dsSchema, "name", "router") - addOptionalFieldsToSchema(dsSchema, "project", "region") - - return &schema.Resource{ - Read: dataSourceGoogleComputeRouterNatRead, - Schema: dsSchema, - } - -} - -func dataSourceGoogleComputeRouterNatRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{router}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceComputeRouterNatRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ssl_certificate.go deleted file mode 100644 index a63564f476..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ssl_certificate.go +++ /dev/null @@ -1,37 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeSslCertificate() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeSslCertificate().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceComputeSslCertificateRead, - Schema: dsSchema, - } -} - -func dataSourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - certificateName := d.Get("name").(string) - - d.SetId(fmt.Sprintf("projects/%s/global/sslCertificates/%s", project, certificateName)) - - return resourceComputeSslCertificateRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ssl_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ssl_policy.go deleted file mode 100644 index 38674393cf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_ssl_policy.go +++ /dev/null @@ -1,37 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeSslPolicy() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeSslPolicy().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: datasourceComputeSslPolicyRead, - Schema: dsSchema, - } -} - -func datasourceComputeSslPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - policyName := d.Get("name").(string) - - d.SetId(fmt.Sprintf("projects/%s/global/sslPolicies/%s", project, policyName)) - - return resourceComputeSslPolicyRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_vpn_gateway.go deleted file mode 100644 index feb7d7a848..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_vpn_gateway.go +++ /dev/null @@ -1,92 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/compute/v1" -) - -func DataSourceGoogleComputeVpnGateway() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleComputeVpnGatewayRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "region": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "description": { - Type: schema.TypeString, - Computed: true, - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - - "network": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.NewComputeClient(userAgent)) - - gateway, err := vpnGatewaysService.Get(project, region, name).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VPN Gateway Not Found : %s", name)) - } - if err := d.Set("network", ConvertSelfLinkToV1(gateway.Network)); err != nil { - return fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("region", gateway.Region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("self_link", gateway.SelfLink); err != nil { - return fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("description", gateway.Description); err != nil { - return fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(fmt.Sprintf("projects/%s/regions/%s/targetVpnGateways/%s", project, region, name)) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_attached_install_manifest.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_attached_install_manifest.go deleted file mode 100644 index 01723a1dc7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_attached_install_manifest.go +++ /dev/null @@ -1,84 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleContainerAttachedInstallManifest() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleContainerAttachedInstallManifestRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - }, - "location": { - Type: schema.TypeString, - Required: true, - }, - "cluster_id": { - Type: schema.TypeString, - Required: true, - }, - "platform_version": { - Type: schema.TypeString, - Required: true, - }, - "manifest": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleContainerAttachedInstallManifestRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - clusterId := d.Get("cluster_id").(string) - platformVersion := d.Get("platform_version").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - if len(location) == 0 { - return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") - } - - url, err := replaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}:generateAttachedClusterInstallManifest") - if err != nil { - return err - } - params := map[string]string{ - "attached_cluster_id": clusterId, - "platform_version": platformVersion, - } - url, err = addQueryParams(url, params) - if err != nil { - return err - } - res, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return err - } - - if err := d.Set("manifest", res["manifest"]); err != nil { - return fmt.Errorf("Error setting manifest: %s", err) - } - - d.SetId(time.Now().UTC().String()) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_attached_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_attached_versions.go deleted file mode 100644 index 7c349461f6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_attached_versions.go +++ /dev/null @@ -1,70 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleContainerAttachedVersions() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleContainerAttachedVersionsRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - }, - "location": { - Type: schema.TypeString, - Required: true, - }, - "valid_versions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleContainerAttachedVersionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - if len(location) == 0 { - return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") - } - - url, err := replaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedServerConfig") - if err != nil { - return err - } - res, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return err - } - var validVersions []string - for _, v := range res["validVersions"].([]interface{}) { - vm := v.(map[string]interface{}) - validVersions = append(validVersions, vm["version"].(string)) - } - if err := d.Set("valid_versions", validVersions); err != nil { - return err - } - - d.SetId(time.Now().UTC().String()) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_aws_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_aws_versions.go deleted file mode 100644 index e06c8c449f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_aws_versions.go +++ /dev/null @@ -1,78 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleContainerAwsVersions() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleContainerAwsVersionsRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - }, - "location": { - Type: schema.TypeString, - Optional: true, - }, - "valid_versions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "supported_regions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleContainerAwsVersionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - if len(location) == 0 { - return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") - } - - url, err := replaceVars(d, config, "{{ContainerAwsBasePath}}projects/{{project}}/locations/{{location}}/awsServerConfig") - if err != nil { - return err - } - res, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return err - } - if err := d.Set("supported_regions", res["supportedAwsRegions"]); err != nil { - return err - } - var validVersions []string - for _, v := range res["validVersions"].([]interface{}) { - vm := v.(map[string]interface{}) - validVersions = append(validVersions, vm["version"].(string)) - } - if err := d.Set("valid_versions", validVersions); err != nil { - return err - } - - d.SetId(time.Now().UTC().String()) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_azure_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_azure_versions.go deleted file mode 100644 index b26a5c8f56..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_azure_versions.go +++ /dev/null @@ -1,78 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleContainerAzureVersions() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleContainerAzureVersionsRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - }, - "location": { - Type: schema.TypeString, - Optional: true, - }, - "valid_versions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "supported_regions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleContainerAzureVersionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - if len(location) == 0 { - return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") - } - - url, err := replaceVars(d, config, "{{ContainerAzureBasePath}}projects/{{project}}/locations/{{location}}/azureServerConfig") - if err != nil { - return err - } - res, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return err - } - if err := d.Set("supported_regions", res["supportedAzureRegions"]); err != nil { - return err - } - var validVersions []string - for _, v := range res["validVersions"].([]interface{}) { - vm := v.(map[string]interface{}) - validVersions = append(validVersions, vm["version"].(string)) - } - if err := d.Set("valid_versions", validVersions); err != nil { - return err - } - - d.SetId(time.Now().UTC().String()) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_cluster.go deleted file mode 100644 index 6c89522cd9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_cluster.go +++ /dev/null @@ -1,53 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleContainerCluster() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceContainerCluster().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project", "location") - - return &schema.Resource{ - Read: datasourceContainerClusterRead, - Schema: dsSchema, - } -} - -func datasourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - clusterName := d.Get("name").(string) - - location, err := getLocation(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - id := containerClusterFullName(project, location, clusterName) - - d.SetId(id) - - if err := resourceContainerClusterRead(d, meta); err != nil { - return err - } - - if d.Id() == "" { - return fmt.Errorf("%s not found", id) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_android_app.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_android_app.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_android_app.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_apple_app.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_apple_app.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_apple_app.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_apple_app_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_apple_app_config.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_apple_app_config.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_hosting_channel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_hosting_channel.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_hosting_channel.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_web_app.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_web_app.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_web_app.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_web_app_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_web_app_config.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_firebase_web_app_config.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folder_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folder_organization_policy.go deleted file mode 100644 index 936b75c24f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folder_organization_policy.go +++ /dev/null @@ -1,27 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleFolderOrganizationPolicy() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceGoogleFolderOrganizationPolicy().Schema) - - addRequiredFieldsToSchema(dsSchema, "folder") - addRequiredFieldsToSchema(dsSchema, "constraint") - - return &schema.Resource{ - Read: datasourceGoogleFolderOrganizationPolicyRead, - Schema: dsSchema, - } -} - -func datasourceGoogleFolderOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { - - d.SetId(fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) - - return resourceGoogleFolderOrganizationPolicyRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_global_compute_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_global_compute_forwarding_rule.go deleted file mode 100644 index cf108cfc02..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_global_compute_forwarding_rule.go +++ /dev/null @@ -1,37 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleComputeGlobalForwardingRule() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeGlobalForwardingRule().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleComputeGlobalForwardingRuleRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("projects/%s/global/forwardingRules/%s", project, name)) - - return resourceComputeGlobalForwardingRuleRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_role.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_role.go deleted file mode 100644 index c3c2cbd661..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_role.go +++ /dev/null @@ -1,59 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleIamRole() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleIamRoleRead, - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "title": { - Type: schema.TypeString, - Computed: true, - }, - "included_permissions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "stage": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleIamRoleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - roleName := d.Get("name").(string) - role, err := config.NewIamClient(userAgent).Roles.Get(roleName).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Error reading IAM Role %s: %s", roleName, err)) - } - - d.SetId(role.Name) - if err := d.Set("title", role.Title); err != nil { - return fmt.Errorf("Error setting title: %s", err) - } - if err := d.Set("stage", role.Stage); err != nil { - return fmt.Errorf("Error setting stage: %s", err) - } - if err := d.Set("included_permissions", role.IncludedPermissions); err != nil { - return fmt.Errorf("Error setting included_permissions: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_crypto_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_crypto_key.go deleted file mode 100644 index 7b5162c8df..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_crypto_key.go +++ /dev/null @@ -1,35 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleKmsCryptoKey() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceKMSCryptoKey().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addRequiredFieldsToSchema(dsSchema, "key_ring") - - return &schema.Resource{ - Read: dataSourceGoogleKmsCryptoKeyRead, - Schema: dsSchema, - } - -} - -func dataSourceGoogleKmsCryptoKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - keyRingId, err := parseKmsKeyRingId(d.Get("key_ring").(string), config) - if err != nil { - return err - } - - cryptoKeyId := KmsCryptoKeyId{ - KeyRingId: *keyRingId, - Name: d.Get("name").(string), - } - - d.SetId(cryptoKeyId.cryptoKeyId()) - - return resourceKMSCryptoKeyRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_crypto_key_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_crypto_key_version.go deleted file mode 100644 index 981d10bbb0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_crypto_key_version.go +++ /dev/null @@ -1,181 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleKmsCryptoKeyVersion() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleKmsCryptoKeyVersionRead, - Schema: map[string]*schema.Schema{ - "crypto_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - }, - "algorithm": { - Type: schema.TypeString, - Computed: true, - }, - "protection_level": { - Type: schema.TypeString, - Computed: true, - }, - "state": { - Type: schema.TypeString, - Computed: true, - }, - "public_key": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "algorithm": { - Type: schema.TypeString, - Computed: true, - }, - "pem": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func dataSourceGoogleKmsCryptoKeyVersionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/{{version}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Getting attributes for CryptoKeyVersion: %#v", url) - - cryptoKeyId, err := parseKmsCryptoKeyId(d.Get("crypto_key").(string), config) - if err != nil { - return err - } - res, err := SendRequest(config, "GET", cryptoKeyId.KeyRingId.Project, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("KmsCryptoKeyVersion %q", d.Id())) - } - - if err := d.Set("version", flattenKmsCryptoKeyVersionVersion(res["name"], d)); err != nil { - return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - if err := d.Set("name", flattenKmsCryptoKeyVersionName(res["name"], d)); err != nil { - return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - if err := d.Set("state", flattenKmsCryptoKeyVersionState(res["state"], d)); err != nil { - return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - if err := d.Set("protection_level", flattenKmsCryptoKeyVersionProtectionLevel(res["protectionLevel"], d)); err != nil { - return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - if err := d.Set("algorithm", flattenKmsCryptoKeyVersionAlgorithm(res["algorithm"], d)); err != nil { - return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - - url, err = replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Getting purpose of CryptoKey: %#v", url) - res, err = SendRequest(config, "GET", cryptoKeyId.KeyRingId.Project, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("KmsCryptoKey %q", d.Id())) - } - - if res["purpose"] == "ASYMMETRIC_SIGN" || res["purpose"] == "ASYMMETRIC_DECRYPT" { - url, err = replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/{{version}}/publicKey") - if err != nil { - return err - } - log.Printf("[DEBUG] Getting public key of CryptoKeyVersion: %#v", url) - - res, err = SendRequestWithTimeout(config, "GET", cryptoKeyId.KeyRingId.Project, url, userAgent, nil, d.Timeout(schema.TimeoutRead), isCryptoKeyVersionsPendingGeneration) - - if err != nil { - log.Printf("Error generating public key: %s", err) - return err - } - - if err := d.Set("public_key", flattenKmsCryptoKeyVersionPublicKey(res, d)); err != nil { - return fmt.Errorf("Error setting CryptoKeyVersion public key: %s", err) - } - } - d.SetId(fmt.Sprintf("//cloudkms.googleapis.com/v1/%s/cryptoKeyVersions/%d", d.Get("crypto_key"), d.Get("version"))) - - return nil -} - -func flattenKmsCryptoKeyVersionVersion(v interface{}, d *schema.ResourceData) interface{} { - parts := strings.Split(v.(string), "/") - version := parts[len(parts)-1] - // Handles the string fixed64 format - if intVal, err := StringToFixed64(version); err == nil { - return intVal - } // let terraform core handle it if we can't convert the string to an int. - return v -} - -func flattenKmsCryptoKeyVersionName(v interface{}, d *schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionState(v interface{}, d *schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionProtectionLevel(v interface{}, d *schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionAlgorithm(v interface{}, d *schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionPublicKey(v interface{}, d *schema.ResourceData) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pem"] = - flattenKmsCryptoKeyVersionPublicKeyPem(original["pem"], d) - transformed["algorithm"] = - flattenKmsCryptoKeyVersionPublicKeyAlgorithm(original["algorithm"], d) - return []interface{}{transformed} -} -func flattenKmsCryptoKeyVersionPublicKeyPem(v interface{}, d *schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionPublicKeyAlgorithm(v interface{}, d *schema.ResourceData) interface{} { - return v -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_key_ring.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_key_ring.go deleted file mode 100644 index a028ae1a6c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_key_ring.go +++ /dev/null @@ -1,35 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleKmsKeyRing() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceKMSKeyRing().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addRequiredFieldsToSchema(dsSchema, "location") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleKmsKeyRingRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleKmsKeyRingRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - keyRingId := kmsKeyRingId{ - Name: d.Get("name").(string), - Location: d.Get("location").(string), - Project: project, - } - d.SetId(keyRingId.keyRingId()) - - return resourceKMSKeyRingRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret_asymmetric.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret_asymmetric.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret_asymmetric.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_logging_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_logging_sink.go deleted file mode 100644 index 0eadef2db7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_logging_sink.go +++ /dev/null @@ -1,44 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleLoggingSink() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceLoggingSinkSchema()) - dsSchema["id"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: `Required. An identifier for the resource in format: "projects/[PROJECT_ID]/sinks/[SINK_NAME]", "organizations/[ORGANIZATION_ID]/sinks/[SINK_NAME]", "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_NAME]", "folders/[FOLDER_ID]/sinks/[SINK_NAME]"`, - } - - return &schema.Resource{ - Read: dataSourceGoogleLoggingSinkRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleLoggingSinkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - sinkId := d.Get("id").(string) - - sink, err := config.NewLoggingClient(userAgent).Sinks.Get(sinkId).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Logging Sink %s", d.Id())) - } - - if err := flattenResourceLoggingSink(d, sink); err != nil { - return err - } - - d.SetId(sinkId) - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_organization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_organization.go deleted file mode 100644 index c301f385db..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_organization.go +++ /dev/null @@ -1,137 +0,0 @@ -package google - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "google.golang.org/api/cloudresourcemanager/v1" -) - -func DataSourceGoogleOrganization() *schema.Resource { - return &schema.Resource{ - Read: dataSourceOrganizationRead, - Schema: map[string]*schema.Schema{ - "domain": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"organization"}, - }, - "organization": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"domain"}, - }, - "org_id": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "directory_customer_id": { - Type: schema.TypeString, - Computed: true, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - }, - "lifecycle_state": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceOrganizationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - var organization *cloudresourcemanager.Organization - if v, ok := d.GetOk("domain"); ok { - filter := fmt.Sprintf("domain=%s", v.(string)) - var resp *cloudresourcemanager.SearchOrganizationsResponse - err := RetryTimeDuration(func() (err error) { - resp, err = config.NewResourceManagerClient(userAgent).Organizations.Search(&cloudresourcemanager.SearchOrganizationsRequest{ - Filter: filter, - }).Do() - return err - }, d.Timeout(schema.TimeoutRead)) - if err != nil { - return fmt.Errorf("Error reading organization: %s", err) - } - - if len(resp.Organizations) == 0 { - return fmt.Errorf("Organization not found: %s", v) - } - - if len(resp.Organizations) > 1 { - // Attempt to find an exact domain match - for _, org := range resp.Organizations { - if org.DisplayName == v.(string) { - organization = org - break - } - } - if organization == nil { - return fmt.Errorf("Received multiple organizations in the response, but could not find an exact domain match.") - } - } else { - organization = resp.Organizations[0] - } - - } else if v, ok := d.GetOk("organization"); ok { - var resp *cloudresourcemanager.Organization - err := RetryTimeDuration(func() (err error) { - resp, err = config.NewResourceManagerClient(userAgent).Organizations.Get(canonicalOrganizationName(v.(string))).Do() - return err - }, d.Timeout(schema.TimeoutRead)) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Organization Not Found : %s", v)) - } - - organization = resp - } else { - return fmt.Errorf("one of domain or organization must be set") - } - - d.SetId(organization.Name) - if err := d.Set("name", organization.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("org_id", GetResourceNameFromSelfLink(organization.Name)); err != nil { - return fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("domain", organization.DisplayName); err != nil { - return fmt.Errorf("Error setting domain: %s", err) - } - if err := d.Set("create_time", organization.CreationTime); err != nil { - return fmt.Errorf("Error setting create_time: %s", err) - } - if err := d.Set("lifecycle_state", organization.LifecycleState); err != nil { - return fmt.Errorf("Error setting lifecycle_state: %s", err) - } - if organization.Owner != nil { - if err := d.Set("directory_customer_id", organization.Owner.DirectoryCustomerId); err != nil { - return fmt.Errorf("Error setting directory_customer_id: %s", err) - } - } - - return nil -} - -func canonicalOrganizationName(ba string) string { - if strings.HasPrefix(ba, "organizations/") { - return ba - } - - return "organizations/" + ba -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project.go deleted file mode 100644 index e815e5eb80..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project.go +++ /dev/null @@ -1,47 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleProject() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceGoogleProject().Schema) - - addOptionalFieldsToSchema(dsSchema, "project_id") - - dsSchema["project_id"].ValidateFunc = validateDSProjectID() - return &schema.Resource{ - Read: datasourceGoogleProjectRead, - Schema: dsSchema, - } -} - -func datasourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if v, ok := d.GetOk("project_id"); ok { - project := v.(string) - d.SetId(fmt.Sprintf("projects/%s", project)) - } else { - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("no project value set. `project_id` must be set at the resource level, or a default `project` value must be specified on the provider") - } - d.SetId(fmt.Sprintf("projects/%s", project)) - } - - id := d.Id() - - if err := resourceGoogleProjectRead(d, meta); err != nil { - return err - } - - if d.Id() == "" { - return fmt.Errorf("%s not found or not in ACTIVE state", id) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project_organization_policy.go deleted file mode 100644 index a2cbc50ed6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project_organization_policy.go +++ /dev/null @@ -1,27 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleProjectOrganizationPolicy() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceGoogleProjectOrganizationPolicy().Schema) - - addRequiredFieldsToSchema(dsSchema, "project") - addRequiredFieldsToSchema(dsSchema, "constraint") - - return &schema.Resource{ - Read: datasourceGoogleProjectOrganizationPolicyRead, - Schema: dsSchema, - } -} - -func datasourceGoogleProjectOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { - - d.SetId(fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) - - return resourceGoogleProjectOrganizationPolicyRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project_service.go deleted file mode 100644 index 71e81e929c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_project_service.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleProjectService() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceGoogleProjectService().Schema) - addRequiredFieldsToSchema(dsSchema, "service") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleProjectServiceRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleProjectServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "{{project}}/{{service}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceGoogleProjectServiceRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_access_token.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_access_token.go deleted file mode 100644 index 3ea216fadf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_access_token.go +++ /dev/null @@ -1,85 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iamcredentials "google.golang.org/api/iamcredentials/v1" -) - -func DataSourceGoogleServiceAccountAccessToken() *schema.Resource { - - return &schema.Resource{ - Read: dataSourceGoogleServiceAccountAccessTokenRead, - Schema: map[string]*schema.Schema{ - "target_service_account": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp("(" + strings.Join(PossibleServiceAccountNames, "|") + ")"), - }, - "access_token": { - Type: schema.TypeString, - Sensitive: true, - Computed: true, - }, - "scopes": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - // ValidateFunc is not yet supported on lists or sets. - }, - "delegates": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateRegexp(ServiceAccountLinkRegex), - }, - }, - "lifetime": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateDuration(), // duration <=3600s; TODO: support validateDuration(min,max) - Default: "3600s", - }, - }, - } -} - -func dataSourceGoogleServiceAccountAccessTokenRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - log.Printf("[INFO] Acquire Service Account AccessToken for %s", d.Get("target_service_account").(string)) - - service := config.NewIamCredentialsClient(userAgent) - - name := fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) - tokenRequest := &iamcredentials.GenerateAccessTokenRequest{ - Lifetime: d.Get("lifetime").(string), - Delegates: convertStringSet(d.Get("delegates").(*schema.Set)), - Scope: canonicalizeServiceScopes(convertStringSet(d.Get("scopes").(*schema.Set))), - } - at, err := service.Projects.ServiceAccounts.GenerateAccessToken(name, tokenRequest).Do() - if err != nil { - return err - } - - d.SetId(name) - if err := d.Set("access_token", at.AccessToken); err != nil { - return fmt.Errorf("Error setting access_token: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_id_token.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_id_token.go deleted file mode 100644 index 53aea9e229..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_id_token.go +++ /dev/null @@ -1,122 +0,0 @@ -package google - -import ( - "fmt" - "strings" - - iamcredentials "google.golang.org/api/iamcredentials/v1" - "google.golang.org/api/idtoken" - "google.golang.org/api/option" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "golang.org/x/net/context" -) - -const ( - userInfoScope = "https://www.googleapis.com/auth/userinfo.email" -) - -func DataSourceGoogleServiceAccountIdToken() *schema.Resource { - - return &schema.Resource{ - Read: dataSourceGoogleServiceAccountIdTokenRead, - Schema: map[string]*schema.Schema{ - "target_audience": { - Type: schema.TypeString, - Required: true, - }, - "target_service_account": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp("(" + strings.Join(PossibleServiceAccountNames, "|") + ")"), - }, - "delegates": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateRegexp(ServiceAccountLinkRegex), - }, - }, - "include_email": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - // Not used currently - // https://github.com/googleapis/google-api-go-client/issues/542 - // "format": { - // Type: schema.TypeString, - // Optional: true, - // ValidateFunc: validation.StringInSlice([]string{ - // "FULL", "STANDARD"}, true), - // Default: "STANDARD", - // }, - "id_token": { - Type: schema.TypeString, - Sensitive: true, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleServiceAccountIdTokenRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - targetAudience := d.Get("target_audience").(string) - creds, err := config.GetCredentials([]string{userInfoScope}, false) - if err != nil { - return fmt.Errorf("error calling getCredentials(): %v", err) - } - - // If the source credential is not a service account key, use the API to generate the idToken - if creds.JSON == nil { - // Use - // https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/generateIdToken - service := config.NewIamCredentialsClient(userAgent) - name := fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) - tokenRequest := &iamcredentials.GenerateIdTokenRequest{ - Audience: targetAudience, - IncludeEmail: d.Get("include_email").(bool), - Delegates: convertStringSet(d.Get("delegates").(*schema.Set)), - } - at, err := service.Projects.ServiceAccounts.GenerateIdToken(name, tokenRequest).Do() - if err != nil { - return fmt.Errorf("error calling iamcredentials.GenerateIdToken: %v", err) - } - - d.SetId(d.Get("target_service_account").(string)) - if err := d.Set("id_token", at.Token); err != nil { - return fmt.Errorf("Error setting id_token: %s", err) - } - - return nil - } - - ctx := context.Background() - co := []option.ClientOption{} - if creds.JSON != nil { - co = append(co, idtoken.WithCredentialsJSON(creds.JSON)) - } - - idTokenSource, err := idtoken.NewTokenSource(ctx, targetAudience, co...) - if err != nil { - return fmt.Errorf("unable to retrieve TokenSource: %v", err) - } - idToken, err := idTokenSource.Token() - if err != nil { - return fmt.Errorf("unable to retrieve Token: %v", err) - } - - d.SetId(targetAudience) - if err := d.Set("id_token", idToken.AccessToken); err != nil { - return fmt.Errorf("Error setting id_token: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_jwt.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_jwt.go deleted file mode 100644 index 206050a221..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_jwt.go +++ /dev/null @@ -1,105 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "strings" - "time" - - iamcredentials "google.golang.org/api/iamcredentials/v1" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleServiceAccountJwt() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleServiceAccountJwtRead, - Schema: map[string]*schema.Schema{ - "payload": { - Type: schema.TypeString, - Required: true, - Description: `A JSON-encoded JWT claims set that will be included in the signed JWT.`, - }, - "expires_in": { - Type: schema.TypeInt, - Optional: true, - Description: "Number of seconds until the JWT expires. If set and non-zero an `exp` claim will be added to the payload derived from the current timestamp plus expires_in seconds.", - }, - "target_service_account": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp("(" + strings.Join(PossibleServiceAccountNames, "|") + ")"), - }, - "delegates": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateRegexp(ServiceAccountLinkRegex), - }, - }, - "jwt": { - Type: schema.TypeString, - Sensitive: true, - Computed: true, - }, - }, - } -} - -var ( - dataSourceGoogleServiceAccountJwtNow = time.Now -) - -func dataSourceGoogleServiceAccountJwtRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - userAgent, err := generateUserAgentString(d, config.UserAgent) - - if err != nil { - return err - } - - payload := d.Get("payload").(string) - - if expiresIn := d.Get("expires_in").(int); expiresIn != 0 { - var decoded map[string]interface{} - - if err := json.Unmarshal([]byte(payload), &decoded); err != nil { - return fmt.Errorf("error decoding `payload` while adding `exp` field: %w", err) - } - - decoded["exp"] = dataSourceGoogleServiceAccountJwtNow().Add(time.Duration(expiresIn) * time.Second).Unix() - - payloadBytesWithExp, err := json.Marshal(decoded) - - if err != nil { - return fmt.Errorf("error re-encoding `payload` while adding `exp` field: %w", err) - } - - payload = string(payloadBytesWithExp) - } - - name := fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) - - jwtRequest := &iamcredentials.SignJwtRequest{ - Payload: payload, - Delegates: convertStringSet(d.Get("delegates").(*schema.Set)), - } - - service := config.NewIamCredentialsClient(userAgent) - - jwtResponse, err := service.Projects.ServiceAccounts.SignJwt(name, jwtRequest).Do() - - if err != nil { - return fmt.Errorf("error calling iamcredentials.SignJwt: %w", err) - } - - d.SetId(name) - - if err := d.Set("jwt", jwtResponse.SignedJwt); err != nil { - return fmt.Errorf("error setting jwt attribute: %w", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_key.go deleted file mode 100644 index d4d74f362a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account_key.go +++ /dev/null @@ -1,81 +0,0 @@ -package google - -import ( - "fmt" - - "regexp" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func DataSourceGoogleServiceAccountKey() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleServiceAccountKeyRead, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(ServiceAccountKeyNameRegex), - }, - "public_key_type": { - Type: schema.TypeString, - Default: "TYPE_X509_PEM_FILE", - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"TYPE_NONE", "TYPE_X509_PEM_FILE", "TYPE_RAW_PUBLIC_KEY"}, false), - }, - "project": { - Type: schema.TypeString, - Optional: true, - }, - "key_algorithm": { - Type: schema.TypeString, - Computed: true, - }, - "public_key": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleServiceAccountKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - keyName := d.Get("name").(string) - - // Validate name since interpolated values (i.e from a key or service - // account resource) will not get validated at plan time. - r := regexp.MustCompile(ServiceAccountKeyNameRegex) - if !r.MatchString(keyName) { - return fmt.Errorf("invalid key name %q does not match regexp %q", keyName, ServiceAccountKeyNameRegex) - } - - publicKeyType := d.Get("public_key_type").(string) - - // Confirm the service account key exists - sak, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Get(keyName).PublicKeyType(publicKeyType).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", keyName)) - } - - d.SetId(sak.Name) - - if err := d.Set("name", sak.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("key_algorithm", sak.KeyAlgorithm); err != nil { - return fmt.Errorf("Error setting key_algorithm: %s", err) - } - if err := d.Set("public_key", sak.PublicKeyData); err != nil { - return fmt.Errorf("Error setting public_key: %s", err) - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_bucket.go deleted file mode 100644 index 4fbb4feff6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_bucket.go +++ /dev/null @@ -1,38 +0,0 @@ -package google - -import ( - "log" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleStorageBucket() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceStorageBucket().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - return &schema.Resource{ - Read: dataSourceGoogleStorageBucketRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleStorageBucketRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - // Get the bucket and acl - bucket := d.Get("name").(string) - - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - if err != nil { - return err - } - log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) - - return setStorageBucket(d, config, res, bucket, userAgent) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_transfer_project_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_transfer_project_service_account.go deleted file mode 100644 index 4753d88fc5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_transfer_project_service_account.go +++ /dev/null @@ -1,65 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleStorageTransferProjectServiceAccount() *schema.Resource { - return &schema.Resource{ - Read: dataSourceGoogleStorageTransferProjectServiceAccountRead, - Schema: map[string]*schema.Schema{ - "email": { - Type: schema.TypeString, - Computed: true, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "subject_id": { - Type: schema.TypeString, - Computed: true, - }, - "member": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleStorageTransferProjectServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - serviceAccount, err := config.NewStorageTransferClient(userAgent).GoogleServiceAccounts.Get(project).Do() - if err != nil { - return handleNotFoundError(err, d, "Google Cloud Storage Transfer service account not found") - } - - d.SetId(serviceAccount.AccountEmail) - if err := d.Set("email", serviceAccount.AccountEmail); err != nil { - return fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("subject_id", serviceAccount.SubjectId); err != nil { - return fmt.Errorf("Error setting subject_id: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("member", "serviceAccount:"+serviceAccount.AccountEmail); err != nil { - return fmt.Errorf("Error setting member: %s", err) - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iam_beta_workload_identity_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iam_beta_workload_identity_pool.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iam_beta_workload_identity_pool.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iam_beta_workload_identity_pool_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iam_beta_workload_identity_pool_provider.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iam_beta_workload_identity_pool_provider.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iap_client.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iap_client.go deleted file mode 100644 index 3f58889ec0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_iap_client.go +++ /dev/null @@ -1,29 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleIapClient() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceIapClient().Schema) - addRequiredFieldsToSchema(dsSchema, "brand", "client_id") - - return &schema.Resource{ - Read: dataSourceGoogleIapClientRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleIapClientRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "{{brand}}/identityAwareProxyClients/{{client_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceIapClientRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_notification_channel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_notification_channel.go deleted file mode 100644 index f3cd3bd292..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_notification_channel.go +++ /dev/null @@ -1,111 +0,0 @@ -package google - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceMonitoringNotificationChannel() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceMonitoringNotificationChannel().Schema) - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "display_name") - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "type") - addOptionalFieldsToSchema(dsSchema, "labels") - addOptionalFieldsToSchema(dsSchema, "user_labels") - - return &schema.Resource{ - Read: dataSourceMonitoringNotificationChannelRead, - Schema: dsSchema, - } -} - -func dataSourceMonitoringNotificationChannelRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/notificationChannels") - if err != nil { - return err - } - - displayName := d.Get("display_name").(string) - channelType := d.Get("type").(string) - - if displayName == "" && channelType == "" { - return fmt.Errorf("At least one of display_name or type must be provided") - } - - labels, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) - if err != nil { - return err - } - - userLabels, err := expandMonitoringNotificationChannelLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } - - filters := make([]string, 0, len(labels)+2) - - if displayName != "" { - filters = append(filters, fmt.Sprintf(`display_name="%s"`, displayName)) - } - - if channelType != "" { - filters = append(filters, fmt.Sprintf(`type="%s"`, channelType)) - } - - for k, v := range labels { - filters = append(filters, fmt.Sprintf(`labels.%s="%s"`, k, v)) - } - - for k, v := range userLabels { - filters = append(filters, fmt.Sprintf(`user_labels.%s="%s"`, k, v)) - } - - filter := strings.Join(filters, " AND ") - params := map[string]string{ - "filter": filter, - } - url, err = addQueryParams(url, params) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - response, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return fmt.Errorf("Error retrieving NotificationChannels: %s", err) - } - - var channels []interface{} - if v, ok := response["notificationChannels"]; ok { - channels = v.([]interface{}) - } - if len(channels) == 0 { - return fmt.Errorf("No NotificationChannel found using filter: %s", filter) - } - if len(channels) > 1 { - return fmt.Errorf("Found more than one 1 NotificationChannel matching specified filter: %s", filter) - } - res := channels[0].(map[string]interface{}) - - name := flattenMonitoringNotificationChannelName(res["name"], d, config).(string) - if err := d.Set("name", name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - - return resourceMonitoringNotificationChannelRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service.go deleted file mode 100644 index 8dd1c1379e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service.go +++ /dev/null @@ -1,106 +0,0 @@ -package google - -import ( - "fmt" - neturl "net/url" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -type monitoringServiceTypeStateSetter func(map[string]interface{}, *schema.ResourceData, interface{}) error - -// dataSourceMonitoringServiceType creates a Datasource resource for a type of service. It takes -// - schema for identifying the service, specific to the type (AppEngine moduleId) -// - list query filter to filter a specific service (type, ID) from the list of services for a parent -// - typeFlattenF for reading the service-specific schema (typeSchema) -func dataSourceMonitoringServiceType( - typeSchema map[string]*schema.Schema, - listFilter string, - typeStateSetter monitoringServiceTypeStateSetter) *schema.Resource { - - // Convert monitoring schema to ds schema - dsSchema := datasourceSchemaFromResourceSchema(ResourceMonitoringService().Schema) - addOptionalFieldsToSchema(dsSchema, "project") - - // Add schema specific to the service type - dsSchema = mergeSchemas(typeSchema, dsSchema) - - return &schema.Resource{ - Read: dataSourceMonitoringServiceTypeReadFromList(listFilter, typeStateSetter), - Schema: dsSchema, - } -} - -// dataSourceMonitoringServiceRead returns a ReadFunc that calls service.list with proper filters -// to identify both the type of service and underlying service resource. -// It takes the list query filter (i.e. ?filter=$listFilter) and a ReadFunc to handle reading any type-specific schema. -func dataSourceMonitoringServiceTypeReadFromList(listFilter string, typeStateSetter monitoringServiceTypeStateSetter) schema.ReadFunc { - return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - filters, err := replaceVars(d, config, listFilter) - if err != nil { - return err - } - - listUrlTmpl := "{{MonitoringBasePath}}v3/projects/{{project}}/services?filter=" + neturl.QueryEscape(filters) - url, err := replaceVars(d, config, listUrlTmpl) - if err != nil { - return err - } - - resp, err := SendRequest(config, "GET", project, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("unable to list Monitoring Service for data source: %v", err) - } - - v, ok := resp["services"] - if !ok || v == nil { - return fmt.Errorf("no Monitoring Services found for data source") - } - ls, ok := v.([]interface{}) - if !ok { - return fmt.Errorf("no Monitoring Services found for data source") - } - if len(ls) == 0 { - return fmt.Errorf("no Monitoring Services found for data source") - } - if len(ls) > 1 { - return fmt.Errorf("more than one Monitoring Services with given identifier found") - } - res := ls[0].(map[string]interface{}) - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting Service: %s", err) - } - if err := d.Set("display_name", flattenMonitoringServiceDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error setting Service: %s", err) - } - if err := d.Set("telemetry", flattenMonitoringServiceTelemetry(res["telemetry"], d, config)); err != nil { - return fmt.Errorf("Error setting Service: %s", err) - } - if err := d.Set("service_id", flattenMonitoringServiceServiceId(res["name"], d, config)); err != nil { - return fmt.Errorf("Error setting Service: %s", err) - } - if err := typeStateSetter(res, d, config); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - name := flattenMonitoringServiceName(res["name"], d, config).(string) - if err := d.Set("name", name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - - return nil - } -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_pubsub_subscription.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_pubsub_subscription.go deleted file mode 100644 index 4e6732fe26..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_pubsub_subscription.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGooglePubsubSubscription() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourcePubsubSubscription().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGooglePubsubSubscriptionRead, - Schema: dsSchema, - } -} - -func dataSourceGooglePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourcePubsubSubscriptionRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_pubsub_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_pubsub_topic.go deleted file mode 100644 index 2d7983d813..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_pubsub_topic.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGooglePubsubTopic() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourcePubsubTopic().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGooglePubsubTopicRead, - Schema: dsSchema, - } -} - -func dataSourceGooglePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/topics/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourcePubsubTopicRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_redis_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_redis_instance.go deleted file mode 100644 index d191c2f7b2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_redis_instance.go +++ /dev/null @@ -1,29 +0,0 @@ -package google - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func DataSourceGoogleRedisInstance() *schema.Resource { - // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceRedisInstance().Schema) - - // Set 'Required' schema elements - addRequiredFieldsToSchema(dsSchema, "name") - - // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "project", "region") - - return &schema.Resource{ - Read: dataSourceGoogleRedisInstanceRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleRedisInstanceRead(d *schema.ResourceData, meta interface{}) error { - id, err := replaceVars(d, meta.(*Config), "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - d.SetId(id) - - return resourceRedisInstanceRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_runtimeconfig_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_runtimeconfig_config.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_runtimeconfig_config.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_runtimeconfig_variable.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_runtimeconfig_variable.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_runtimeconfig_variable.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret.go deleted file mode 100644 index 0103792012..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceSecretManagerSecret() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceSecretManagerSecret().Schema) - addRequiredFieldsToSchema(dsSchema, "secret_id") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceSecretManagerSecretRead, - Schema: dsSchema, - } -} - -func dataSourceSecretManagerSecretRead(d *schema.ResourceData, meta interface{}) error { - id, err := replaceVars(d, meta.(*Config), "projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceSecretManagerSecretRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret_version_access.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret_version_access.go deleted file mode 100644 index 376c650a9e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret_version_access.go +++ /dev/null @@ -1,116 +0,0 @@ -package google - -import ( - "encoding/base64" - "fmt" - "log" - "regexp" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceSecretManagerSecretVersionAccess() *schema.Resource { - return &schema.Resource{ - Read: dataSourceSecretManagerSecretVersionAccessRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "secret": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "secret_data": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - }, - } -} - -func dataSourceSecretManagerSecretVersionAccessRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - fv, err := parseProjectFieldValue("secrets", d.Get("secret").(string), "project", d, config, false) - if err != nil { - return err - } - if d.Get("project").(string) != "" && d.Get("project").(string) != fv.Project { - return fmt.Errorf("The project set on this secret version (%s) is not equal to the project where this secret exists (%s).", d.Get("project").(string), fv.Project) - } - project := fv.Project - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("secret", fv.Name); err != nil { - return fmt.Errorf("Error setting secret: %s", err) - } - - var url string - versionNum := d.Get("version") - - if versionNum != "" { - url, err = replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/{{version}}") - if err != nil { - return err - } - } else { - url, err = replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/latest") - if err != nil { - return err - } - } - - url = fmt.Sprintf("%s:access", url) - resp, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return fmt.Errorf("Error retrieving available secret manager secret version access: %s", err.Error()) - } - - if err := d.Set("name", resp["name"].(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - - secretVersionRegex := regexp.MustCompile("projects/(.+)/secrets/(.+)/versions/(.+)$") - - parts := secretVersionRegex.FindStringSubmatch(resp["name"].(string)) - // should return [full string, project number, secret name, version number] - if len(parts) != 4 { - panic(fmt.Sprintf("secret name, %s, does not match format, projects/{{project}}/secrets/{{secret}}/versions/{{version}}", resp["name"].(string))) - } - - log.Printf("[DEBUG] Received Google SecretManager Version: %q", parts[3]) - - if err := d.Set("version", parts[3]); err != nil { - return fmt.Errorf("Error setting version: %s", err) - } - - data := resp["payload"].(map[string]interface{}) - secretData, err := base64.StdEncoding.DecodeString(data["data"].(string)) - if err != nil { - return fmt.Errorf("Error decoding secret manager secret version data: %s", err.Error()) - } - if err := d.Set("secret_data", string(secretData)); err != nil { - return fmt.Errorf("Error setting secret_data: %s", err) - } - - d.SetId(resp["name"].(string)) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sourcerepo_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sourcerepo_repository.go deleted file mode 100644 index 82a40d3b0c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sourcerepo_repository.go +++ /dev/null @@ -1,33 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceGoogleSourceRepoRepository() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceSourceRepoRepository().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceGoogleSourceRepoRepositoryRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleSourceRepoRepositoryRead(d *schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceSourceRepoRepositoryRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_spanner_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_spanner_instance.go deleted file mode 100644 index 8e1cea01d1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_spanner_instance.go +++ /dev/null @@ -1,34 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceSpannerInstance() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceSpannerInstance().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "config") // not sure why this is configurable - addOptionalFieldsToSchema(dsSchema, "display_name") // not sure why this is configurable - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceSpannerInstanceRead, - Schema: dsSchema, - } -} - -func dataSourceSpannerInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceSpannerInstanceRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database.go deleted file mode 100644 index 90769e8d6c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database.go +++ /dev/null @@ -1,37 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceSqlDatabase() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceSQLDatabase().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addRequiredFieldsToSchema(dsSchema, "instance") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceSqlDatabaseRead, - Schema: dsSchema, - } -} - -func dataSourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - d.SetId(fmt.Sprintf("projects/%s/instances/%s/databases/%s", project, d.Get("instance").(string), d.Get("name").(string))) - err = resourceSQLDatabaseRead(d, meta) - if err != nil { - return err - } - if err := d.Set("deletion_policy", nil); err != nil { - return fmt.Errorf("Error setting deletion_policy: %s", err) - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database_instance.go deleted file mode 100644 index 0c562f65bc..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database_instance.go +++ /dev/null @@ -1,23 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceSqlDatabaseInstance() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceSqlDatabaseInstance().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project") - - return &schema.Resource{ - Read: dataSourceSqlDatabaseInstanceRead, - Schema: dsSchema, - } -} - -func dataSourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { - - return resourceSqlDatabaseInstanceRead(d, meta) - -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_databases.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_databases.go deleted file mode 100644 index 28f56aac3b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_databases.go +++ /dev/null @@ -1,89 +0,0 @@ -package google - -import ( - "fmt" - "sort" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - sqladmin "google.golang.org/api/sqladmin/v1beta4" -) - -func DataSourceSqlDatabases() *schema.Resource { - - return &schema.Resource{ - Read: dataSourceSqlDatabasesRead, - - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - Description: `Project ID of the project that contains the instance.`, - }, - "instance": { - Type: schema.TypeString, - Required: true, - Description: `The name of the Cloud SQL database instance in which the database belongs.`, - }, - "databases": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: datasourceSchemaFromResourceSchema(ResourceSQLDatabase().Schema), - }, - }, - }, - } -} - -func dataSourceSqlDatabasesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - project, err := getProject(d, config) - if err != nil { - return err - } - var databases *sqladmin.DatabasesListResponse - err = RetryTimeDuration(func() (rerr error) { - databases, rerr = config.NewSqlAdminClient(userAgent).Databases.List(project, d.Get("instance").(string)).Do() - return rerr - }, d.Timeout(schema.TimeoutRead), IsSqlOperationInProgressError) - - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Databases in %q instance", d.Get("instance").(string))) - } - flattenedDatabases := flattenDatabases(databases.Items) - - //client-side sorting to provide consistent ordering of the databases - sort.SliceStable(flattenedDatabases, func(i, j int) bool { - return strings.Compare(flattenedDatabases[i]["name"].(string), flattenedDatabases[j]["name"].(string)) < 1 - }) - if err := d.Set("databases", flattenedDatabases); err != nil { - return fmt.Errorf("Error setting databases: %s", err) - } - d.SetId(fmt.Sprintf("project/%s/instance/%s/databases", project, d.Get("instance").(string))) - return nil -} - -func flattenDatabases(fetchedDatabases []*sqladmin.Database) []map[string]interface{} { - if fetchedDatabases == nil { - return make([]map[string]interface{}, 0) - } - - databases := make([]map[string]interface{}, 0, len(fetchedDatabases)) - for _, rawDatabase := range fetchedDatabases { - database := make(map[string]interface{}) - database["name"] = rawDatabase.Name - database["instance"] = rawDatabase.Instance - database["project"] = rawDatabase.Project - database["charset"] = rawDatabase.Charset - database["collation"] = rawDatabase.Collation - database["self_link"] = rawDatabase.SelfLink - - databases = append(databases, database) - } - return databases -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_storage_bucket_object_content.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_storage_bucket_object_content.go deleted file mode 100644 index c4ecbad356..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_storage_bucket_object_content.go +++ /dev/null @@ -1,61 +0,0 @@ -package google - -import ( - "fmt" - "io/ioutil" - "net/http" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/storage/v1" -) - -func DataSourceGoogleStorageBucketObjectContent() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceStorageBucketObject().Schema) - - addRequiredFieldsToSchema(dsSchema, "bucket") - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "content") - - return &schema.Resource{ - Read: dataSourceGoogleStorageBucketObjectContentRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleStorageBucketObjectContentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - - objectsService := storage.NewObjectsService(config.NewStorageClient(userAgent)) - getCall := objectsService.Get(bucket, name) - - res, err := getCall.Download() - if err != nil { - return fmt.Errorf("Error downloading storage bucket object: %s", err) - } - - defer res.Body.Close() - var bodyString string - - if res.StatusCode == http.StatusOK { - bodyBytes, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("Error reading all from res.Body: %s", err) - } - bodyString = string(bodyBytes) - } - - if err := d.Set("content", bodyString); err != nil { - return fmt.Errorf("Error setting content: %s", err) - } - - d.SetId(bucket + "-" + name) - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tpu_tensorflow_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tpu_tensorflow_versions.go deleted file mode 100644 index e58b907dc1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tpu_tensorflow_versions.go +++ /dev/null @@ -1,91 +0,0 @@ -package google - -import ( - "fmt" - "log" - "sort" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceTpuTensorflowVersions() *schema.Resource { - return &schema.Resource{ - Read: dataSourceTpuTensorFlowVersionsRead, - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "versions": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceTpuTensorFlowVersionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/tensorflowVersions") - if err != nil { - return err - } - - versionsRaw, err := paginatedListRequest(project, url, userAgent, config, flattenTpuTensorflowVersions) - if err != nil { - return fmt.Errorf("Error listing TPU Tensorflow versions: %s", err) - } - - versions := make([]string, len(versionsRaw)) - for i, ver := range versionsRaw { - versions[i] = ver.(string) - } - sort.Strings(versions) - - log.Printf("[DEBUG] Received Google TPU Tensorflow Versions: %q", versions) - - if err := d.Set("versions", versions); err != nil { - return fmt.Errorf("Error setting versions: %s", err) - } - if err := d.Set("zone", zone); err != nil { - return fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(fmt.Sprintf("projects/%s/zones/%s", project, zone)) - - return nil -} - -func flattenTpuTensorflowVersions(resp map[string]interface{}) []interface{} { - verObjList := resp["tensorflowVersions"].([]interface{}) - versions := make([]interface{}, len(verObjList)) - for i, v := range verObjList { - verObj := v.(map[string]interface{}) - versions[i] = verObj["version"] - } - return versions -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_vpc_access_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_vpc_access_connector.go deleted file mode 100644 index c58c66847d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_vpc_access_connector.go +++ /dev/null @@ -1,32 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceVPCAccessConnector() *schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(ResourceVPCAccessConnector().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project", "region") - - return &schema.Resource{ - Read: dataSourceVPCAccessConnectorRead, - Schema: dsSchema, - } -} - -func dataSourceVPCAccessConnectorRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - - d.SetId(id) - - return resourceVPCAccessConnectorRead(d, meta) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_cluster_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_cluster_operation.go deleted file mode 100644 index 0e4b886bf2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_cluster_operation.go +++ /dev/null @@ -1,30 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "google.golang.org/api/dataproc/v1" -) - -type DataprocClusterOperationWaiter struct { - Service *dataproc.Service - CommonOperationWaiter -} - -func (w *DataprocClusterOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - return w.Service.Projects.Regions.Operations.Get(w.Op.Name).Do() -} - -func dataprocClusterOperationWait(config *Config, op *dataproc.Operation, activity, userAgent string, timeout time.Duration) error { - w := &DataprocClusterOperationWaiter{ - Service: config.NewDataprocClient(userAgent), - } - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_metastore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_metastore_operation.go deleted file mode 100644 index 32b7c445e2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_metastore_operation.go +++ /dev/null @@ -1,62 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "time" -) - -type DataprocMetastoreOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *DataprocMetastoreOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.DataprocMetastoreBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createDataprocMetastoreWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*DataprocMetastoreOperationWaiter, error) { - w := &DataprocMetastoreOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func DataprocMetastoreOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createDataprocMetastoreWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datasource_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datasource_helpers.go deleted file mode 100644 index 1214718738..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datasource_helpers.go +++ /dev/null @@ -1,73 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// datasourceSchemaFromResourceSchema is a recursive func that -// converts an existing Resource schema to a Datasource schema. -// All schema elements are copied, but certain attributes are ignored or changed: -// - all attributes have Computed = true -// - all attributes have ForceNew, Required = false -// - Validation funcs and attributes (e.g. MaxItems) are not copied -func datasourceSchemaFromResourceSchema(rs map[string]*schema.Schema) map[string]*schema.Schema { - ds := make(map[string]*schema.Schema, len(rs)) - for k, v := range rs { - dv := &schema.Schema{ - Computed: true, - ForceNew: false, - Required: false, - Description: v.Description, - Type: v.Type, - } - - switch v.Type { - case schema.TypeSet: - dv.Set = v.Set - fallthrough - case schema.TypeList: - // List & Set types are generally used for 2 cases: - // - a list/set of simple primitive values (e.g. list of strings) - // - a sub resource - if elem, ok := v.Elem.(*schema.Resource); ok { - // handle the case where the Element is a sub-resource - dv.Elem = &schema.Resource{ - Schema: datasourceSchemaFromResourceSchema(elem.Schema), - } - } else { - // handle simple primitive case - dv.Elem = v.Elem - } - - default: - // Elem of all other types are copied as-is - dv.Elem = v.Elem - - } - ds[k] = dv - - } - return ds -} - -// fixDatasourceSchemaFlags is a convenience func that toggles the Computed, -// Optional + Required flags on a schema element. This is useful when the schema -// has been generated (using `datasourceSchemaFromResourceSchema` above for -// example) and therefore the attribute flags were not set appropriately when -// first added to the schema definition. Currently only supports top-level -// schema elements. -func fixDatasourceSchemaFlags(schema map[string]*schema.Schema, required bool, keys ...string) { - for _, v := range keys { - schema[v].Computed = false - schema[v].Optional = !required - schema[v].Required = required - } -} - -func addRequiredFieldsToSchema(schema map[string]*schema.Schema, keys ...string) { - fixDatasourceSchemaFlags(schema, true, keys...) -} - -func addOptionalFieldsToSchema(schema map[string]*schema.Schema, keys ...string) { - fixDatasourceSchemaFlags(schema, false, keys...) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datastore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datastore_operation.go deleted file mode 100644 index 666bc18c47..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datastore_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type DatastoreOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *DatastoreOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.DatastoreBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil, datastoreIndex409Contention) -} - -func createDatastoreWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*DatastoreOperationWaiter, error) { - w := &DatastoreOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func DatastoreOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createDatastoreWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func DatastoreOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createDatastoreWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datastream_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datastream_operation.go deleted file mode 100644 index 1e825cd985..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/datastream_operation.go +++ /dev/null @@ -1,130 +0,0 @@ -package google - -import ( - "bytes" - "encoding/json" - "fmt" - "time" - - datastream "google.golang.org/api/datastream/v1" -) - -type DatastreamOperationWaiter struct { - Config *Config - UserAgent string - Project string - Op datastream.Operation - CommonOperationWaiter -} - -func (w *DatastreamOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.DatastreamBasePath, w.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func (w *DatastreamOperationWaiter) Error() error { - if w != nil && w.Op.Error != nil { - return &DatastreamOperationError{Op: w.Op} - } - return nil -} - -func (w *DatastreamOperationWaiter) SetOp(op interface{}) error { - w.CommonOperationWaiter.SetOp(op) - if err := Convert(op, &w.Op); err != nil { - return err - } - return nil -} - -func createDatastreamWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*DatastreamOperationWaiter, error) { - w := &DatastreamOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func DatastreamOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createDatastreamWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.Op.Response), response) -} - -func DatastreamOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createDatastreamWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -// DatastreamOperationError wraps datastream.Status and implements the -// error interface so it can be returned. -type DatastreamOperationError struct { - Op datastream.Operation -} - -func (e DatastreamOperationError) Error() string { - var buf bytes.Buffer - - for _, err := range e.Op.Error.Details { - buf.Write(err) - buf.WriteString("\n") - } - if validations := e.extractFailedValidationResult(); validations != nil { - buf.Write(validations) - buf.WriteString("\n") - } - - return buf.String() -} - -// extractFailedValidationResult extracts the internal failed validations -// if there are any. -func (e DatastreamOperationError) extractFailedValidationResult() []byte { - var metadata datastream.OperationMetadata - data, err := e.Op.Metadata.MarshalJSON() - if err != nil { - return nil - } - err = json.Unmarshal(data, &metadata) - if err != nil { - return nil - } - if metadata.ValidationResult == nil { - return nil - } - var res []byte - for _, v := range metadata.ValidationResult.Validations { - if v.State == "FAILED" { - data, err := v.MarshalJSON() - if err != nil { - return nil - } - res = append(res, data...) - res = append(res, []byte("\n")...) - } - } - return res -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dcl_logger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dcl_logger.go deleted file mode 100644 index 6fb1ba7548..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dcl_logger.go +++ /dev/null @@ -1,38 +0,0 @@ -package google - -import ( - "fmt" - "log" -) - -type dclLogger struct{} - -// Fatal records Fatal errors. -func (l dclLogger) Fatal(args ...interface{}) { - log.Fatal(args...) -} - -// Fatalf records Fatal errors with added arguments. -func (l dclLogger) Fatalf(format string, args ...interface{}) { - log.Fatalf(fmt.Sprintf("[DEBUG][DCL FATAL] %s", format), args...) -} - -// Info records Info errors. -func (l dclLogger) Info(args ...interface{}) { - log.Print(args...) -} - -// Infof records Info errors with added arguments. -func (l dclLogger) Infof(format string, args ...interface{}) { - log.Printf(fmt.Sprintf("[DEBUG][DCL INFO] %s", format), args...) -} - -// Warningf records Warning errors with added arguments. -func (l dclLogger) Warningf(format string, args ...interface{}) { - log.Printf(fmt.Sprintf("[DEBUG][DCL WARNING] %s", format), args...) -} - -// Warning records Warning errors. -func (l dclLogger) Warning(args ...interface{}) { - log.Print(args...) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/deployment_manager_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/deployment_manager_operation.go deleted file mode 100644 index 8fe7ad24b7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/deployment_manager_operation.go +++ /dev/null @@ -1,89 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "time" - - "google.golang.org/api/compute/v1" -) - -type DeploymentManagerOperationWaiter struct { - Config *Config - UserAgent string - Project string - OperationUrl string - ComputeOperationWaiter -} - -func (w *DeploymentManagerOperationWaiter) IsRetryable(error) bool { - return false -} - -func (w *DeploymentManagerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil || w.Op == nil || w.Op.SelfLink == "" { - return nil, fmt.Errorf("cannot query unset/nil operation") - } - - resp, err := SendRequest(w.Config, "GET", w.Project, w.Op.SelfLink, w.UserAgent, nil) - if err != nil { - return nil, err - } - op := &compute.Operation{} - if err := Convert(resp, op); err != nil { - return nil, fmt.Errorf("could not convert response to operation: %v", err) - } - return op, nil -} - -func DeploymentManagerOperationWaitTime(config *Config, resp interface{}, project, activity, userAgent string, timeout time.Duration) error { - op := &compute.Operation{} - err := Convert(resp, op) - if err != nil { - return err - } - - w := &DeploymentManagerOperationWaiter{ - Config: config, - UserAgent: userAgent, - OperationUrl: op.SelfLink, - ComputeOperationWaiter: ComputeOperationWaiter{ - Project: project, - }, - } - if err := w.SetOp(op); err != nil { - return err - } - - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func (w *DeploymentManagerOperationWaiter) Error() error { - if w != nil && w.Op != nil && w.Op.Error != nil { - return DeploymentManagerOperationError{ - HTTPStatusCode: w.Op.HttpErrorStatusCode, - HTTPMessage: w.Op.HttpErrorMessage, - OperationError: *w.Op.Error, - } - } - return nil -} - -// DeploymentManagerOperationError wraps information from the compute.Operation -// in an implementation of Error. -type DeploymentManagerOperationError struct { - HTTPStatusCode int64 - HTTPMessage string - compute.OperationError -} - -func (e DeploymentManagerOperationError) Error() string { - var buf bytes.Buffer - buf.WriteString("Deployment Manager returned errors for this operation, likely due to invalid configuration.") - buf.WriteString(fmt.Sprintf("Operation failed with HTTP error %d: %s.", e.HTTPStatusCode, e.HTTPMessage)) - buf.WriteString("Errors returned: \n") - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - return buf.String() -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dialogflow_cx_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dialogflow_cx_operation.go deleted file mode 100644 index 7ee39e04e2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dialogflow_cx_operation.go +++ /dev/null @@ -1,61 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type DialogflowCXOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter - Location string -} - -func (w *DialogflowCXOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("https://%s-dialogflow.googleapis.com/v3/%s", w.Location, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createDialogflowCXWaiter(config *Config, op map[string]interface{}, activity, userAgent, location string) (*DialogflowCXOperationWaiter, error) { - w := &DialogflowCXOperationWaiter{ - Config: config, - UserAgent: userAgent, - Location: location, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func DialogflowCXOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent, location string, timeout time.Duration) error { - w, err := createDialogflowCXWaiter(config, op, activity, userAgent, location) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func DialogflowCXOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent, location string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createDialogflowCXWaiter(config, op, activity, userAgent, location) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/disk_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/disk_type.go deleted file mode 100644 index deb76ed2c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/disk_type.go +++ /dev/null @@ -1,11 +0,0 @@ -package google - -// readDiskType finds the disk type with the given name. -func readDiskType(c *Config, d TerraformResourceData, name string) (*ZonalFieldValue, error) { - return parseZonalFieldValue("diskTypes", name, "project", "zone", d, c, false) -} - -// readRegionDiskType finds the disk type with the given name. -func readRegionDiskType(c *Config, d TerraformResourceData, name string) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("diskTypes", name, "project", "region", "zone", d, c, false) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/envvar/envvar_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/envvar/envvar_utils.go new file mode 100644 index 0000000000..5348cc7a89 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/envvar/envvar_utils.go @@ -0,0 +1,200 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package envvar + +import ( + "log" + "os" + "testing" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +const TestEnvVar = "TF_ACC" + +var CredsEnvVars = []string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", + "GOOGLE_APPLICATION_CREDENTIALS", + "GOOGLE_USE_DEFAULT_CREDENTIALS", +} + +var ProjectNumberEnvVars = []string{ + "GOOGLE_PROJECT_NUMBER", +} + +var ProjectEnvVars = []string{ + "GOOGLE_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", +} + +var FirestoreProjectEnvVars = []string{ + "GOOGLE_FIRESTORE_PROJECT", +} + +var RegionEnvVars = []string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", +} + +var ZoneEnvVars = []string{ + "GOOGLE_ZONE", + "GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE", +} + +var OrgEnvVars = []string{ + "GOOGLE_ORG", +} + +// This value is the Customer ID of the GOOGLE_ORG_DOMAIN workspace. +// See https://admin.google.com/ac/accountsettings when logged into an org admin for the value. +var CustIdEnvVars = []string{ + "GOOGLE_CUST_ID", +} + +// This value is the username of an identity account within the GOOGLE_ORG_DOMAIN workspace. +// For example in the org example.com with a user "foo@example.com", this would be set to "foo". +// See https://admin.google.com/ac/users when logged into an org admin for a list. +var IdentityUserEnvVars = []string{ + "GOOGLE_IDENTITY_USER", +} + +var OrgEnvDomainVars = []string{ + "GOOGLE_ORG_DOMAIN", +} + +var ServiceAccountEnvVars = []string{ + "GOOGLE_SERVICE_ACCOUNT", +} + +var OrgTargetEnvVars = []string{ + "GOOGLE_ORG_2", +} + +// This is the billing account that will be charged for the infrastructure used during testing. For +// that reason, it is also the billing account used for creating new projects. +var BillingAccountEnvVars = []string{ + "GOOGLE_BILLING_ACCOUNT", +} + +// This is the billing account that will be modified to test billing-related functionality. It is +// expected to have more permissions granted to the test user and support subaccounts. +var MasterBillingAccountEnvVars = []string{ + "GOOGLE_MASTER_BILLING_ACCOUNT", +} + +// This value is the description used for test PublicAdvertisedPrefix setup to avoid required DNS +// setup. This is only used during integration tests and would be invalid to surface to users +var PapDescriptionEnvVars = []string{ + "GOOGLE_PUBLIC_AVERTISED_PREFIX_DESCRIPTION", +} + +// AccTestPreCheck ensures at least one of the project env variables is set. +func GetTestProjectNumberFromEnv() string { + return transport_tpg.MultiEnvSearch(ProjectNumberEnvVars) +} + +// AccTestPreCheck ensures at least one of the project env variables is set. +func GetTestProjectFromEnv() string { + return transport_tpg.MultiEnvSearch(ProjectEnvVars) +} + +// AccTestPreCheck ensures at least one of the credentials env variables is set. +func GetTestCredsFromEnv() string { + // Return empty string if GOOGLE_USE_DEFAULT_CREDENTIALS is set to true. + if transport_tpg.MultiEnvSearch(CredsEnvVars) == "true" { + return "" + } + return transport_tpg.MultiEnvSearch(CredsEnvVars) +} + +// AccTestPreCheck ensures at least one of the region env variables is set. +func GetTestRegionFromEnv() string { + return transport_tpg.MultiEnvSearch(RegionEnvVars) +} + +func GetTestZoneFromEnv() string { + return transport_tpg.MultiEnvSearch(ZoneEnvVars) +} + +func GetTestCustIdFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, CustIdEnvVars...) + return transport_tpg.MultiEnvSearch(CustIdEnvVars) +} + +func GetTestIdentityUserFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, IdentityUserEnvVars...) + return transport_tpg.MultiEnvSearch(IdentityUserEnvVars) +} + +// Firestore can't be enabled at the same time as Datastore, so we need a new +// project to manage it until we can enable Firestore programmatically. +func GetTestFirestoreProjectFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, FirestoreProjectEnvVars...) + return transport_tpg.MultiEnvSearch(FirestoreProjectEnvVars) +} + +// Returns the raw organization id like 1234567890, skipping the test if one is +// not found. +func GetTestOrgFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, OrgEnvVars...) + return transport_tpg.MultiEnvSearch(OrgEnvVars) +} + +// Alternative to GetTestOrgFromEnv that doesn't need *testing.T +// If using this, you need to process unset values at the call site +func UnsafeGetTestOrgFromEnv() string { + return transport_tpg.MultiEnvSearch(OrgEnvVars) +} + +func GetTestOrgDomainFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, OrgEnvDomainVars...) + return transport_tpg.MultiEnvSearch(OrgEnvDomainVars) +} + +func GetTestOrgTargetFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, OrgTargetEnvVars...) + return transport_tpg.MultiEnvSearch(OrgTargetEnvVars) +} + +// This is the billing account that will be charged for the infrastructure used during testing. For +// that reason, it is also the billing account used for creating new projects. +func GetTestBillingAccountFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, BillingAccountEnvVars...) + return transport_tpg.MultiEnvSearch(BillingAccountEnvVars) +} + +// This is the billing account that will be modified to test billing-related functionality. It is +// expected to have more permissions granted to the test user and support subaccounts. +func GetTestMasterBillingAccountFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, MasterBillingAccountEnvVars...) + return transport_tpg.MultiEnvSearch(MasterBillingAccountEnvVars) +} + +func GetTestServiceAccountFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, ServiceAccountEnvVars...) + return transport_tpg.MultiEnvSearch(ServiceAccountEnvVars) +} + +func GetTestPublicAdvertisedPrefixDescriptionFromEnv(t *testing.T) string { + SkipIfEnvNotSet(t, PapDescriptionEnvVars...) + return transport_tpg.MultiEnvSearch(PapDescriptionEnvVars) +} + +func SkipIfEnvNotSet(t *testing.T, envs ...string) { + if t == nil { + log.Printf("[DEBUG] Not running inside of test - skip skipping") + return + } + + for _, k := range envs { + if os.Getenv(k) == "" { + log.Printf("[DEBUG] Warning - environment variable %s is not set - skipping test %s", k, t.Name()) + t.Skipf("Environment variable %s is not set", k) + } + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/expanders.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/expanders.go deleted file mode 100644 index 43b113a8f4..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/expanders.go +++ /dev/null @@ -1,65 +0,0 @@ -package google - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -func expandStringArray(v interface{}) []string { - arr, ok := v.([]string) - - if ok { - return arr - } - - if arr, ok := v.(*schema.Set); ok { - return convertStringSet(arr) - } - - arr = convertStringArr(v.([]interface{})) - if arr == nil { - // Send empty array specifically instead of nil - return make([]string, 0) - } - return arr -} - -func expandIntegerArray(v interface{}) []int64 { - arr, ok := v.([]int64) - - if ok { - return arr - } - - if arr, ok := v.(*schema.Set); ok { - return convertIntegerSet(arr) - } - - return convertIntegerArr(v.([]interface{})) -} - -func convertIntegerSet(v *schema.Set) []int64 { - return convertIntegerArr(v.List()) -} - -func convertIntegerArr(v []interface{}) []int64 { - var vi []int64 - for _, vs := range v { - vi = append(vi, int64(vs.(int))) - } - return vi -} - -// Returns the DCL representation of a three-state boolean value represented by a string in terraform. -func expandEnumBool(v interface{}) *bool { - s, ok := v.(string) - if !ok { - return nil - } - switch s { - case "TRUE": - b := true - return &b - case "FALSE": - b := false - return &b - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/field_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/field_helpers.go deleted file mode 100644 index 8eaeac0440..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/field_helpers.go +++ /dev/null @@ -1,442 +0,0 @@ -package google - -import ( - "fmt" - "regexp" -) - -const ( - globalLinkTemplate = "projects/%s/global/%s/%s" - globalLinkBasePattern = "projects/(.+)/global/%s/(.+)" - zonalLinkTemplate = "projects/%s/zones/%s/%s/%s" - zonalLinkBasePattern = "projects/(.+)/zones/(.+)/%s/(.+)" - zonalPartialLinkBasePattern = "zones/(.+)/%s/(.+)" - regionalLinkTemplate = "projects/%s/regions/%s/%s/%s" - regionalLinkBasePattern = "projects/(.+)/regions/(.+)/%s/(.+)" - regionalPartialLinkBasePattern = "regions/(.+)/%s/(.+)" - projectLinkTemplate = "projects/%s/%s/%s" - projectBasePattern = "projects/(.+)/%s/(.+)" - organizationLinkTemplate = "organizations/%s/%s/%s" - organizationBasePattern = "organizations/(.+)/%s/(.+)" -) - -// ------------------------------------------------------------ -// Field helpers -// ------------------------------------------------------------ - -func ParseNetworkFieldValue(network string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("networks", network, "project", d, config, true) -} - -func ParseSubnetworkFieldValue(subnetwork string, d TerraformResourceData, config *Config) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("subnetworks", subnetwork, "project", "region", "zone", d, config, true) -} - -func ParseSubnetworkFieldValueWithProjectField(subnetwork, projectField string, d TerraformResourceData, config *Config) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("subnetworks", subnetwork, projectField, "region", "zone", d, config, true) -} - -func ParseSslCertificateFieldValue(sslCertificate string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("sslCertificates", sslCertificate, "project", d, config, false) -} - -func ParseHttpHealthCheckFieldValue(healthCheck string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("httpHealthChecks", healthCheck, "project", d, config, false) -} - -func ParseDiskFieldValue(disk string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("disks", disk, "project", "zone", d, config, false) -} - -func ParseRegionDiskFieldValue(disk string, d TerraformResourceData, config *Config) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("disks", disk, "project", "region", "zone", d, config, false) -} - -func ParseOrganizationCustomRoleName(role string) (*OrganizationFieldValue, error) { - return parseOrganizationFieldValue("roles", role, false) -} - -func ParseAcceleratorFieldValue(accelerator string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("acceleratorTypes", accelerator, "project", "zone", d, config, false) -} - -func ParseMachineTypesFieldValue(machineType string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("machineTypes", machineType, "project", "zone", d, config, false) -} - -func ParseInstanceFieldValue(instance string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("instances", instance, "project", "zone", d, config, false) -} - -func ParseInstanceGroupFieldValue(instanceGroup string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("instanceGroups", instanceGroup, "project", "zone", d, config, false) -} - -func ParseInstanceTemplateFieldValue(instanceTemplate string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("instanceTemplates", instanceTemplate, "project", d, config, false) -} - -func ParseMachineImageFieldValue(machineImage string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("machineImages", machineImage, "project", d, config, false) -} - -func ParseSecurityPolicyFieldValue(securityPolicy string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("securityPolicies", securityPolicy, "project", d, config, true) -} - -func ParseNetworkEndpointGroupFieldValue(networkEndpointGroup string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("networkEndpointGroups", networkEndpointGroup, "project", "zone", d, config, false) -} - -func ParseNetworkEndpointGroupRegionalFieldValue(networkEndpointGroup string, d TerraformResourceData, config *Config) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("networkEndpointGroups", networkEndpointGroup, "project", "region", "zone", d, config, false) -} - -// ------------------------------------------------------------ -// Base helpers used to create helpers for specific fields. -// ------------------------------------------------------------ - -type GlobalFieldValue struct { - Project string - Name string - - resourceType string -} - -func (f GlobalFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return fmt.Sprintf(globalLinkTemplate, f.Project, f.resourceType, f.Name) -} - -// Parses a global field supporting 5 different formats: -// - https://www.googleapis.com/compute/ANY_VERSION/projects/{my_project}/global/{resource_type}/{resource_name} -// - projects/{my_project}/global/{resource_type}/{resource_name} -// - global/{resource_type}/{resource_name} -// - resource_name -// - "" (empty string). RelativeLink() returns empty if isEmptyValid is true. -// -// If the project is not specified, it first tries to get the project from the `projectSchemaField` and then fallback on the default project. -func parseGlobalFieldValue(resourceType, fieldValue, projectSchemaField string, d TerraformResourceData, config *Config, isEmptyValid bool) (*GlobalFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &GlobalFieldValue{resourceType: resourceType}, nil - } - return nil, fmt.Errorf("The global field for resource %s cannot be empty", resourceType) - } - - r := regexp.MustCompile(fmt.Sprintf(globalLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &GlobalFieldValue{ - Project: parts[1], - Name: parts[2], - - resourceType: resourceType, - }, nil - } - - project, err := getProjectFromSchema(projectSchemaField, d, config) - if err != nil { - return nil, err - } - - return &GlobalFieldValue{ - Project: project, - Name: GetResourceNameFromSelfLink(fieldValue), - - resourceType: resourceType, - }, nil -} - -type ZonalFieldValue struct { - Project string - Zone string - Name string - - resourceType string -} - -func (f ZonalFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return fmt.Sprintf(zonalLinkTemplate, f.Project, f.Zone, f.resourceType, f.Name) -} - -// Parses a zonal field supporting 5 different formats: -// - https://www.googleapis.com/compute/ANY_VERSION/projects/{my_project}/zones/{zone}/{resource_type}/{resource_name} -// - projects/{my_project}/zones/{zone}/{resource_type}/{resource_name} -// - zones/{zone}/{resource_type}/{resource_name} -// - resource_name -// - "" (empty string). RelativeLink() returns empty if isEmptyValid is true. -// -// If the project is not specified, it first tries to get the project from the `projectSchemaField` and then fallback on the default project. -// If the zone is not specified, it takes the value of `zoneSchemaField`. -func parseZonalFieldValue(resourceType, fieldValue, projectSchemaField, zoneSchemaField string, d TerraformResourceData, config *Config, isEmptyValid bool) (*ZonalFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &ZonalFieldValue{resourceType: resourceType}, nil - } - return nil, fmt.Errorf("The zonal field for resource %s cannot be empty.", resourceType) - } - - r := regexp.MustCompile(fmt.Sprintf(zonalLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &ZonalFieldValue{ - Project: parts[1], - Zone: parts[2], - Name: parts[3], - resourceType: resourceType, - }, nil - } - - project, err := getProjectFromSchema(projectSchemaField, d, config) - if err != nil { - return nil, err - } - - r = regexp.MustCompile(fmt.Sprintf(zonalPartialLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &ZonalFieldValue{ - Project: project, - Zone: parts[1], - Name: parts[2], - resourceType: resourceType, - }, nil - } - - if len(zoneSchemaField) == 0 { - return nil, fmt.Errorf("Invalid field format. Got '%s', expected format '%s'", fieldValue, fmt.Sprintf(globalLinkTemplate, "{project}", resourceType, "{name}")) - } - - zone, ok := d.GetOk(zoneSchemaField) - if !ok { - zone = config.Zone - if zone == "" { - return nil, fmt.Errorf("A zone must be specified") - } - } - - return &ZonalFieldValue{ - Project: project, - Zone: zone.(string), - Name: GetResourceNameFromSelfLink(fieldValue), - resourceType: resourceType, - }, nil -} - -func getProjectFromSchema(projectSchemaField string, d TerraformResourceData, config *Config) (string, error) { - res, ok := d.GetOk(projectSchemaField) - if ok && projectSchemaField != "" { - return res.(string), nil - } - if config.Project != "" { - return config.Project, nil - } - return "", fmt.Errorf("%s: required field is not set", projectSchemaField) -} - -func getBillingProjectFromSchema(billingProjectSchemaField string, d TerraformResourceData, config *Config) (string, error) { - res, ok := d.GetOk(billingProjectSchemaField) - if ok && billingProjectSchemaField != "" { - return res.(string), nil - } - if config.BillingProject != "" { - return config.BillingProject, nil - } - return "", fmt.Errorf("%s: required field is not set", billingProjectSchemaField) -} - -type OrganizationFieldValue struct { - OrgId string - Name string - - resourceType string -} - -func (f OrganizationFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return fmt.Sprintf(organizationLinkTemplate, f.OrgId, f.resourceType, f.Name) -} - -// Parses an organization field with the following formats: -// - organizations/{my_organizations}/{resource_type}/{resource_name} -func parseOrganizationFieldValue(resourceType, fieldValue string, isEmptyValid bool) (*OrganizationFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &OrganizationFieldValue{resourceType: resourceType}, nil - } - return nil, fmt.Errorf("The organization field for resource %s cannot be empty", resourceType) - } - - r := regexp.MustCompile(fmt.Sprintf(organizationBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &OrganizationFieldValue{ - OrgId: parts[1], - Name: parts[2], - - resourceType: resourceType, - }, nil - } - - return nil, fmt.Errorf("Invalid field format. Got '%s', expected format '%s'", fieldValue, fmt.Sprintf(organizationLinkTemplate, "{org_id}", resourceType, "{name}")) -} - -type RegionalFieldValue struct { - Project string - Region string - Name string - - resourceType string -} - -func (f RegionalFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return fmt.Sprintf(regionalLinkTemplate, f.Project, f.Region, f.resourceType, f.Name) -} - -// Parses a regional field supporting 5 different formats: -// - https://www.googleapis.com/compute/ANY_VERSION/projects/{my_project}/regions/{region}/{resource_type}/{resource_name} -// - projects/{my_project}/regions/{region}/{resource_type}/{resource_name} -// - regions/{region}/{resource_type}/{resource_name} -// - resource_name -// - "" (empty string). RelativeLink() returns empty if isEmptyValid is true. -// -// If the project is not specified, it first tries to get the project from the `projectSchemaField` and then fallback on the default project. -// If the region is not specified, see function documentation for `getRegionFromSchema`. -func parseRegionalFieldValue(resourceType, fieldValue, projectSchemaField, regionSchemaField, zoneSchemaField string, d TerraformResourceData, config *Config, isEmptyValid bool) (*RegionalFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &RegionalFieldValue{resourceType: resourceType}, nil - } - return nil, fmt.Errorf("The regional field for resource %s cannot be empty.", resourceType) - } - - r := regexp.MustCompile(fmt.Sprintf(regionalLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &RegionalFieldValue{ - Project: parts[1], - Region: parts[2], - Name: parts[3], - resourceType: resourceType, - }, nil - } - - project, err := getProjectFromSchema(projectSchemaField, d, config) - if err != nil { - return nil, err - } - - r = regexp.MustCompile(fmt.Sprintf(regionalPartialLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &RegionalFieldValue{ - Project: project, - Region: parts[1], - Name: parts[2], - resourceType: resourceType, - }, nil - } - - region, err := getRegionFromSchema(regionSchemaField, zoneSchemaField, d, config) - if err != nil { - return nil, err - } - - return &RegionalFieldValue{ - Project: project, - Region: region, - Name: GetResourceNameFromSelfLink(fieldValue), - resourceType: resourceType, - }, nil -} - -// Infers the region based on the following (in order of priority): -// - `regionSchemaField` in resource schema -// - region extracted from the `zoneSchemaField` in resource schema -// - provider-level region -// - region extracted from the provider-level zone -func getRegionFromSchema(regionSchemaField, zoneSchemaField string, d TerraformResourceData, config *Config) (string, error) { - // if identical such as GKE location, check if it's a zone first and find - // the region if so. Otherwise, return as it's a region. - if regionSchemaField == zoneSchemaField { - if v, ok := d.GetOk(regionSchemaField); ok { - if isZone(v.(string)) { - return getRegionFromZone(v.(string)), nil - } - - return v.(string), nil - } - } - - if v, ok := d.GetOk(regionSchemaField); ok && regionSchemaField != "" { - return GetResourceNameFromSelfLink(v.(string)), nil - } - if v, ok := d.GetOk(zoneSchemaField); ok && zoneSchemaField != "" { - return getRegionFromZone(v.(string)), nil - } - if config.Region != "" { - return config.Region, nil - } - if config.Zone != "" { - return getRegionFromZone(config.Zone), nil - } - - return "", fmt.Errorf("Cannot determine region: set in this resource, or set provider-level 'region' or 'zone'.") -} - -type ProjectFieldValue struct { - Project string - Name string - - resourceType string -} - -func (f ProjectFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return fmt.Sprintf(projectLinkTemplate, f.Project, f.resourceType, f.Name) -} - -// Parses a project field with the following formats: -// - projects/{my_projects}/{resource_type}/{resource_name} -func parseProjectFieldValue(resourceType, fieldValue, projectSchemaField string, d TerraformResourceData, config *Config, isEmptyValid bool) (*ProjectFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &ProjectFieldValue{resourceType: resourceType}, nil - } - return nil, fmt.Errorf("The project field for resource %s cannot be empty", resourceType) - } - - r := regexp.MustCompile(fmt.Sprintf(projectBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &ProjectFieldValue{ - Project: parts[1], - Name: parts[2], - - resourceType: resourceType, - }, nil - } - - project, err := getProjectFromSchema(projectSchemaField, d, config) - if err != nil { - return nil, err - } - - return &ProjectFieldValue{ - Project: project, - Name: GetResourceNameFromSelfLink(fieldValue), - - resourceType: resourceType, - }, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/filestore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/filestore_operation.go deleted file mode 100644 index 1d2ba8333a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/filestore_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type FilestoreOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *FilestoreOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.FilestoreBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil, isNotFilestoreQuotaError) -} - -func createFilestoreWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*FilestoreOperationWaiter, error) { - w := &FilestoreOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func FilestoreOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createFilestoreWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func FilestoreOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createFilestoreWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/firestore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/firestore_operation.go deleted file mode 100644 index f4e9811901..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/firestore_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type FirestoreOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *FirestoreOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.FirestoreBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createFirestoreWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*FirestoreOperationWaiter, error) { - w := &FirestoreOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func FirestoreOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createFirestoreWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func FirestoreOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createFirestoreWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/flatteners.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/flatteners.go deleted file mode 100644 index 707d823b11..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/flatteners.go +++ /dev/null @@ -1,13 +0,0 @@ -package google - -// Returns the terraform representation of a three-state boolean value represented by a pointer to bool in DCL. -func flattenEnumBool(v interface{}) string { - b, ok := v.(*bool) - if !ok || b == nil { - return "" - } - if *b { - return "TRUE" - } - return "FALSE" -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwmodels/provider_model.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwmodels/provider_model.go new file mode 100644 index 0000000000..21356cb170 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwmodels/provider_model.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwmodels + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// ProviderModel describes the provider config data model. +type ProviderModel struct { + Credentials types.String `tfsdk:"credentials"` + AccessToken types.String `tfsdk:"access_token"` + ImpersonateServiceAccount types.String `tfsdk:"impersonate_service_account"` + ImpersonateServiceAccountDelegates types.List `tfsdk:"impersonate_service_account_delegates"` + Project types.String `tfsdk:"project"` + BillingProject types.String `tfsdk:"billing_project"` + Region types.String `tfsdk:"region"` + Zone types.String `tfsdk:"zone"` + Scopes types.List `tfsdk:"scopes"` + Batching types.List `tfsdk:"batching"` + UserProjectOverride types.Bool `tfsdk:"user_project_override"` + RequestTimeout types.String `tfsdk:"request_timeout"` + RequestReason types.String `tfsdk:"request_reason"` + + // Generated Products + AccessApprovalCustomEndpoint types.String `tfsdk:"access_approval_custom_endpoint"` + AccessContextManagerCustomEndpoint types.String `tfsdk:"access_context_manager_custom_endpoint"` + ActiveDirectoryCustomEndpoint types.String `tfsdk:"active_directory_custom_endpoint"` + AlloydbCustomEndpoint types.String `tfsdk:"alloydb_custom_endpoint"` + ApigeeCustomEndpoint types.String `tfsdk:"apigee_custom_endpoint"` + AppEngineCustomEndpoint types.String `tfsdk:"app_engine_custom_endpoint"` + ArtifactRegistryCustomEndpoint types.String `tfsdk:"artifact_registry_custom_endpoint"` + BeyondcorpCustomEndpoint types.String `tfsdk:"beyondcorp_custom_endpoint"` + BigQueryCustomEndpoint types.String `tfsdk:"big_query_custom_endpoint"` + BigqueryAnalyticsHubCustomEndpoint types.String `tfsdk:"bigquery_analytics_hub_custom_endpoint"` + BigqueryConnectionCustomEndpoint types.String `tfsdk:"bigquery_connection_custom_endpoint"` + BigqueryDatapolicyCustomEndpoint types.String `tfsdk:"bigquery_datapolicy_custom_endpoint"` + BigqueryDataTransferCustomEndpoint types.String `tfsdk:"bigquery_data_transfer_custom_endpoint"` + BigqueryReservationCustomEndpoint types.String `tfsdk:"bigquery_reservation_custom_endpoint"` + BigtableCustomEndpoint types.String `tfsdk:"bigtable_custom_endpoint"` + BillingCustomEndpoint types.String `tfsdk:"billing_custom_endpoint"` + BinaryAuthorizationCustomEndpoint types.String `tfsdk:"binary_authorization_custom_endpoint"` + CertificateManagerCustomEndpoint types.String `tfsdk:"certificate_manager_custom_endpoint"` + CloudAssetCustomEndpoint types.String `tfsdk:"cloud_asset_custom_endpoint"` + CloudBuildCustomEndpoint types.String `tfsdk:"cloud_build_custom_endpoint"` + Cloudbuildv2CustomEndpoint types.String `tfsdk:"cloudbuildv2_custom_endpoint"` + CloudFunctionsCustomEndpoint types.String `tfsdk:"cloud_functions_custom_endpoint"` + Cloudfunctions2CustomEndpoint types.String `tfsdk:"cloudfunctions2_custom_endpoint"` + CloudIdentityCustomEndpoint types.String `tfsdk:"cloud_identity_custom_endpoint"` + CloudIdsCustomEndpoint types.String `tfsdk:"cloud_ids_custom_endpoint"` + CloudIotCustomEndpoint types.String `tfsdk:"cloud_iot_custom_endpoint"` + CloudRunCustomEndpoint types.String `tfsdk:"cloud_run_custom_endpoint"` + CloudRunV2CustomEndpoint types.String `tfsdk:"cloud_run_v2_custom_endpoint"` + CloudSchedulerCustomEndpoint types.String `tfsdk:"cloud_scheduler_custom_endpoint"` + CloudTasksCustomEndpoint types.String `tfsdk:"cloud_tasks_custom_endpoint"` + ComputeCustomEndpoint types.String `tfsdk:"compute_custom_endpoint"` + ContainerAnalysisCustomEndpoint types.String `tfsdk:"container_analysis_custom_endpoint"` + ContainerAttachedCustomEndpoint types.String `tfsdk:"container_attached_custom_endpoint"` + DatabaseMigrationServiceCustomEndpoint types.String `tfsdk:"database_migration_service_custom_endpoint"` + DataCatalogCustomEndpoint types.String `tfsdk:"data_catalog_custom_endpoint"` + DataFusionCustomEndpoint types.String `tfsdk:"data_fusion_custom_endpoint"` + DataLossPreventionCustomEndpoint types.String `tfsdk:"data_loss_prevention_custom_endpoint"` + DataplexCustomEndpoint types.String `tfsdk:"dataplex_custom_endpoint"` + DataprocCustomEndpoint types.String `tfsdk:"dataproc_custom_endpoint"` + DataprocMetastoreCustomEndpoint types.String `tfsdk:"dataproc_metastore_custom_endpoint"` + DatastoreCustomEndpoint types.String `tfsdk:"datastore_custom_endpoint"` + DatastreamCustomEndpoint types.String `tfsdk:"datastream_custom_endpoint"` + DeploymentManagerCustomEndpoint types.String `tfsdk:"deployment_manager_custom_endpoint"` + DialogflowCustomEndpoint types.String `tfsdk:"dialogflow_custom_endpoint"` + DialogflowCXCustomEndpoint types.String `tfsdk:"dialogflow_cx_custom_endpoint"` + DNSCustomEndpoint types.String `tfsdk:"dns_custom_endpoint"` + DocumentAICustomEndpoint types.String `tfsdk:"document_ai_custom_endpoint"` + EssentialContactsCustomEndpoint types.String `tfsdk:"essential_contacts_custom_endpoint"` + FilestoreCustomEndpoint types.String `tfsdk:"filestore_custom_endpoint"` + FirestoreCustomEndpoint types.String `tfsdk:"firestore_custom_endpoint"` + GameServicesCustomEndpoint types.String `tfsdk:"game_services_custom_endpoint"` + GKEBackupCustomEndpoint types.String `tfsdk:"gke_backup_custom_endpoint"` + GKEHubCustomEndpoint types.String `tfsdk:"gke_hub_custom_endpoint"` + GKEHub2CustomEndpoint types.String `tfsdk:"gke_hub2_custom_endpoint"` + HealthcareCustomEndpoint types.String `tfsdk:"healthcare_custom_endpoint"` + IAM2CustomEndpoint types.String `tfsdk:"iam2_custom_endpoint"` + IAMBetaCustomEndpoint types.String `tfsdk:"iam_beta_custom_endpoint"` + IAMWorkforcePoolCustomEndpoint types.String `tfsdk:"iam_workforce_pool_custom_endpoint"` + IapCustomEndpoint types.String `tfsdk:"iap_custom_endpoint"` + IdentityPlatformCustomEndpoint types.String `tfsdk:"identity_platform_custom_endpoint"` + KMSCustomEndpoint types.String `tfsdk:"kms_custom_endpoint"` + LoggingCustomEndpoint types.String `tfsdk:"logging_custom_endpoint"` + LookerCustomEndpoint types.String `tfsdk:"looker_custom_endpoint"` + MemcacheCustomEndpoint types.String `tfsdk:"memcache_custom_endpoint"` + MLEngineCustomEndpoint types.String `tfsdk:"ml_engine_custom_endpoint"` + MonitoringCustomEndpoint types.String `tfsdk:"monitoring_custom_endpoint"` + NetworkManagementCustomEndpoint types.String `tfsdk:"network_management_custom_endpoint"` + NetworkSecurityCustomEndpoint types.String `tfsdk:"network_security_custom_endpoint"` + NetworkServicesCustomEndpoint types.String `tfsdk:"network_services_custom_endpoint"` + NotebooksCustomEndpoint types.String `tfsdk:"notebooks_custom_endpoint"` + OSConfigCustomEndpoint types.String `tfsdk:"os_config_custom_endpoint"` + OSLoginCustomEndpoint types.String `tfsdk:"os_login_custom_endpoint"` + PrivatecaCustomEndpoint types.String `tfsdk:"privateca_custom_endpoint"` + PublicCACustomEndpoint types.String `tfsdk:"public_ca_custom_endpoint"` + PubsubCustomEndpoint types.String `tfsdk:"pubsub_custom_endpoint"` + PubsubLiteCustomEndpoint types.String `tfsdk:"pubsub_lite_custom_endpoint"` + RedisCustomEndpoint types.String `tfsdk:"redis_custom_endpoint"` + ResourceManagerCustomEndpoint types.String `tfsdk:"resource_manager_custom_endpoint"` + SecretManagerCustomEndpoint types.String `tfsdk:"secret_manager_custom_endpoint"` + SecurityCenterCustomEndpoint types.String `tfsdk:"security_center_custom_endpoint"` + ServiceManagementCustomEndpoint types.String `tfsdk:"service_management_custom_endpoint"` + ServiceUsageCustomEndpoint types.String `tfsdk:"service_usage_custom_endpoint"` + SourceRepoCustomEndpoint types.String `tfsdk:"source_repo_custom_endpoint"` + SpannerCustomEndpoint types.String `tfsdk:"spanner_custom_endpoint"` + SQLCustomEndpoint types.String `tfsdk:"sql_custom_endpoint"` + StorageCustomEndpoint types.String `tfsdk:"storage_custom_endpoint"` + StorageTransferCustomEndpoint types.String `tfsdk:"storage_transfer_custom_endpoint"` + TagsCustomEndpoint types.String `tfsdk:"tags_custom_endpoint"` + TPUCustomEndpoint types.String `tfsdk:"tpu_custom_endpoint"` + VertexAICustomEndpoint types.String `tfsdk:"vertex_ai_custom_endpoint"` + VPCAccessCustomEndpoint types.String `tfsdk:"vpc_access_custom_endpoint"` + WorkflowsCustomEndpoint types.String `tfsdk:"workflows_custom_endpoint"` + + // Handwritten Products / Versioned / Atypical Entries + CloudBillingCustomEndpoint types.String `tfsdk:"cloud_billing_custom_endpoint"` + ComposerCustomEndpoint types.String `tfsdk:"composer_custom_endpoint"` + ContainerCustomEndpoint types.String `tfsdk:"container_custom_endpoint"` + DataflowCustomEndpoint types.String `tfsdk:"dataflow_custom_endpoint"` + IamCredentialsCustomEndpoint types.String `tfsdk:"iam_credentials_custom_endpoint"` + ResourceManagerV3CustomEndpoint types.String `tfsdk:"resource_manager_v3_custom_endpoint"` + IAMCustomEndpoint types.String `tfsdk:"iam_custom_endpoint"` + ServiceNetworkingCustomEndpoint types.String `tfsdk:"service_networking_custom_endpoint"` + TagsLocationCustomEndpoint types.String `tfsdk:"tags_location_custom_endpoint"` + + // dcl + ContainerAwsCustomEndpoint types.String `tfsdk:"container_aws_custom_endpoint"` + ContainerAzureCustomEndpoint types.String `tfsdk:"container_azure_custom_endpoint"` + + // dcl generated + ApikeysCustomEndpoint types.String `tfsdk:"apikeys_custom_endpoint"` + AssuredWorkloadsCustomEndpoint types.String `tfsdk:"assured_workloads_custom_endpoint"` + CloudBuildWorkerPoolCustomEndpoint types.String `tfsdk:"cloud_build_worker_pool_custom_endpoint"` + CloudDeployCustomEndpoint types.String `tfsdk:"clouddeploy_custom_endpoint"` + CloudResourceManagerCustomEndpoint types.String `tfsdk:"cloud_resource_manager_custom_endpoint"` + EventarcCustomEndpoint types.String `tfsdk:"eventarc_custom_endpoint"` + FirebaserulesCustomEndpoint types.String `tfsdk:"firebaserules_custom_endpoint"` + NetworkConnectivityCustomEndpoint types.String `tfsdk:"network_connectivity_custom_endpoint"` + OrgPolicyCustomEndpoint types.String `tfsdk:"org_policy_custom_endpoint"` + RecaptchaEnterpriseCustomEndpoint types.String `tfsdk:"recaptcha_enterprise_custom_endpoint"` +} + +type ProviderBatching struct { + SendAfter types.String `tfsdk:"send_after"` + EnableBatching types.Bool `tfsdk:"enable_batching"` +} + +var ProviderBatchingAttributes = map[string]attr.Type{ + "send_after": types.StringType, + "enable_batching": types.BoolType, +} + +// ProviderMetaModel describes the provider meta model +type ProviderMetaModel struct { + ModuleName types.String `tfsdk:"module_name"` +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_provider.go new file mode 100644 index 0000000000..48215d1b32 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_provider.go @@ -0,0 +1,796 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwprovider + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/metaschema" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" + "github.com/hashicorp/terraform-provider-google/google/services/dns" + "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ provider.ProviderWithMetaSchema = &FrameworkProvider{} +) + +// New is a helper function to simplify provider server and testing implementation. +func New(version string) provider.ProviderWithMetaSchema { + return &FrameworkProvider{ + Version: version, + } +} + +// FrameworkProvider is the provider implementation. +type FrameworkProvider struct { + fwtransport.FrameworkProviderConfig + Version string +} + +// Metadata returns the provider type name. +func (p *FrameworkProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "google" + resp.Version = p.Version +} + +// MetaSchema returns the provider meta schema. +func (p *FrameworkProvider) MetaSchema(_ context.Context, _ provider.MetaSchemaRequest, resp *provider.MetaSchemaResponse) { + resp.Schema = metaschema.Schema{ + Attributes: map[string]metaschema.Attribute{ + "module_name": metaschema.StringAttribute{ + Optional: true, + }, + }, + } +} + +// Schema defines the provider-level schema for configuration data. +func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "credentials": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot("access_token"), + }...), + CredentialsValidator(), + }, + }, + "access_token": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot("credentials"), + }...), + }, + }, + "impersonate_service_account": schema.StringAttribute{ + Optional: true, + }, + "impersonate_service_account_delegates": schema.ListAttribute{ + Optional: true, + ElementType: types.StringType, + }, + "project": schema.StringAttribute{ + Optional: true, + }, + "billing_project": schema.StringAttribute{ + Optional: true, + }, + "region": schema.StringAttribute{ + Optional: true, + }, + "zone": schema.StringAttribute{ + Optional: true, + }, + "scopes": schema.ListAttribute{ + Optional: true, + ElementType: types.StringType, + }, + "user_project_override": schema.BoolAttribute{ + Optional: true, + }, + "request_timeout": schema.StringAttribute{ + Optional: true, + }, + "request_reason": schema.StringAttribute{ + Optional: true, + }, + + // Generated Products + "access_approval_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "access_context_manager_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "active_directory_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "alloydb_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "apigee_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "app_engine_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "artifact_registry_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "beyondcorp_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "big_query_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "bigquery_analytics_hub_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "bigquery_connection_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "bigquery_datapolicy_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "bigquery_data_transfer_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "bigquery_reservation_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "bigtable_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "billing_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "binary_authorization_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "certificate_manager_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_asset_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_build_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloudbuildv2_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_functions_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloudfunctions2_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_identity_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_ids_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_iot_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_run_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_run_v2_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_scheduler_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "cloud_tasks_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "compute_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "container_analysis_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "container_attached_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "database_migration_service_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "data_catalog_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "data_fusion_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "data_loss_prevention_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "dataplex_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "dataproc_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "dataproc_metastore_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "datastore_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "datastream_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "deployment_manager_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "dialogflow_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "dialogflow_cx_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "dns_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "document_ai_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "essential_contacts_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "filestore_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "firestore_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "game_services_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "gke_backup_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "gke_hub_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "gke_hub2_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "healthcare_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "iam2_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "iam_beta_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "iam_workforce_pool_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "iap_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "identity_platform_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "kms_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "logging_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "looker_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "memcache_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "ml_engine_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "monitoring_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "network_management_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "network_security_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "network_services_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "notebooks_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "os_config_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "os_login_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "privateca_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "public_ca_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "pubsub_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "pubsub_lite_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "redis_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "resource_manager_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "secret_manager_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "security_center_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "service_management_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "service_usage_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "source_repo_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "spanner_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "sql_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "storage_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "storage_transfer_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "tags_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "tpu_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "vertex_ai_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "vpc_access_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "workflows_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + + // Handwritten Products / Versioned / Atypical Entries + "cloud_billing_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "composer_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "container_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "dataflow_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "iam_credentials_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "resource_manager_v3_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "iam_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "service_networking_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "tags_location_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + + // dcl + "container_aws_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "container_azure_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "batching": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "send_after": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + NonNegativeDurationValidator(), + }, + }, + "enable_batching": schema.BoolAttribute{ + Optional: true, + }, + }, + }, + }, + }, + } + + transport_tpg.ConfigureDCLCustomEndpointAttributesFramework(&resp.Schema) +} + +// Configure prepares an API client for data sources and resources. +func (p *FrameworkProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + var data fwmodels.ProviderModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + // Configuration values are now available. + p.LoadAndValidateFramework(ctx, data, req.TerraformVersion, &resp.Diagnostics, p.Version) + if resp.Diagnostics.HasError() { + return + } + + // Example client configuration for data sources and resources + resp.DataSourceData = &p.FrameworkProviderConfig + resp.ResourceData = &p.FrameworkProviderConfig +} + +// DataSources defines the data sources implemented in the provider. +func (p *FrameworkProvider) DataSources(_ context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + resourcemanager.NewGoogleClientConfigDataSource, + resourcemanager.NewGoogleClientOpenIDUserinfoDataSource, + dns.NewGoogleDnsManagedZoneDataSource, + dns.NewGoogleDnsRecordSetDataSource, + dns.NewGoogleDnsKeysDataSource, + } +} + +// Resources defines the resources implemented in the provider. +func (p *FrameworkProvider) Resources(_ context.Context) []func() resource.Resource { + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_validators.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_validators.go new file mode 100644 index 0000000000..496668326a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwprovider/framework_validators.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwprovider + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + googleoauth "golang.org/x/oauth2/google" +) + +// Credentials Validator +var _ validator.String = credentialsValidator{} + +// credentialsValidator validates that a string Attribute's is valid JSON credentials. +type credentialsValidator struct { +} + +// Description describes the validation in plain text formatting. +func (v credentialsValidator) Description(_ context.Context) string { + return "value must be a path to valid JSON credentials or valid, raw, JSON credentials" +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (v credentialsValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// ValidateString performs the validation. +func (v credentialsValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() || request.ConfigValue.Equal(types.StringValue("")) { + return + } + + value := request.ConfigValue.ValueString() + + // if this is a path and we can stat it, assume it's ok + if _, err := os.Stat(value); err == nil { + return + } + if _, err := googleoauth.CredentialsFromJSON(context.Background(), []byte(value)); err != nil { + response.Diagnostics.AddError("JSON credentials are not valid", err.Error()) + } +} + +func CredentialsValidator() validator.String { + return credentialsValidator{} +} + +// Non Negative Duration Validator +type nonnegativedurationValidator struct { +} + +// Description describes the validation in plain text formatting. +func (v nonnegativedurationValidator) Description(_ context.Context) string { + return "value expected to be a string representing a non-negative duration" +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (v nonnegativedurationValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// ValidateString performs the validation. +func (v nonnegativedurationValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue.ValueString() + dur, err := time.ParseDuration(value) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("expected %s to be a duration", value), err.Error()) + return + } + + if dur < 0 { + response.Diagnostics.AddError("duration must be non-negative", fmt.Sprintf("duration provided: %d", dur)) + } +} + +func NonNegativeDurationValidator() validator.String { + return nonnegativedurationValidator{} +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/field_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/field_helpers.go new file mode 100644 index 0000000000..eeb7b58ad8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/field_helpers.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwresource + +import ( + "fmt" + "regexp" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +// GetProject reads the "project" field from the given resource and falls +// back to the provider's value if not given. If the provider's value is not +// given, an error is returned. +func GetProjectFramework(rVal, pVal types.String, diags *diag.Diagnostics) types.String { + return getProjectFromFrameworkSchema("project", rVal, pVal, diags) +} + +func getProjectFromFrameworkSchema(projectSchemaField string, rVal, pVal types.String, diags *diag.Diagnostics) types.String { + if !rVal.IsNull() && rVal.ValueString() != "" { + return rVal + } + + if !pVal.IsNull() && pVal.ValueString() != "" { + return pVal + } + + diags.AddError("required field is not set", fmt.Sprintf("%s is not set", projectSchemaField)) + return types.String{} +} + +// Parses a project field with the following formats: +// - projects/{my_projects}/{resource_type}/{resource_name} +func ParseProjectFieldValueFramework(resourceType, fieldValue, projectSchemaField string, rVal, pVal types.String, isEmptyValid bool, diags *diag.Diagnostics) *tpgresource.ProjectFieldValue { + if len(fieldValue) == 0 { + if isEmptyValid { + return &tpgresource.ProjectFieldValue{ResourceType: resourceType} + } + diags.AddError("field can not be empty", fmt.Sprintf("The project field for resource %s cannot be empty", resourceType)) + return nil + } + + r := regexp.MustCompile(fmt.Sprintf(tpgresource.ProjectBasePattern, resourceType)) + if parts := r.FindStringSubmatch(fieldValue); parts != nil { + return &tpgresource.ProjectFieldValue{ + Project: parts[1], + Name: parts[2], + + ResourceType: resourceType, + } + } + + project := getProjectFromFrameworkSchema(projectSchemaField, rVal, pVal, diags) + if diags.HasError() { + return nil + } + + return &tpgresource.ProjectFieldValue{ + Project: project.ValueString(), + Name: tpgresource.GetResourceNameFromSelfLink(fieldValue), + + ResourceType: resourceType, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/framework_location.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/framework_location.go new file mode 100644 index 0000000000..44ed5803de --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwresource/framework_location.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwresource + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +type LocationDescriber interface { + GetLocationDescription(providerConfig *fwtransport.FrameworkProviderConfig) LocationDescription +} + +type LocationDescription struct { + // Location - not configurable on provider + LocationSchemaField types.String + ResourceLocation types.String + + // Region + RegionSchemaField types.String + ResourceRegion types.String + ProviderRegion types.String + + // Zone + ZoneSchemaField types.String + ResourceZone types.String + ProviderZone types.String +} + +func (ld *LocationDescription) GetLocation() (types.String, error) { + // Location from resource config + if !ld.ResourceLocation.IsNull() && !ld.ResourceLocation.IsUnknown() && !ld.ResourceLocation.Equal(types.StringValue("")) { + return ld.ResourceLocation, nil + } + + // Location from region in resource config + if !ld.ResourceRegion.IsNull() && !ld.ResourceRegion.IsUnknown() && !ld.ResourceRegion.Equal(types.StringValue("")) { + return ld.ResourceRegion, nil + } + + // Location from zone in resource config + if !ld.ResourceZone.IsNull() && !ld.ResourceZone.IsUnknown() && !ld.ResourceZone.Equal(types.StringValue("")) { + location := tpgresource.GetResourceNameFromSelfLink(ld.ResourceZone.ValueString()) // Zone could be a self link + return types.StringValue(location), nil + } + + // Location from zone in provider config + if !ld.ProviderZone.IsNull() && !ld.ProviderZone.IsUnknown() && !ld.ProviderZone.Equal(types.StringValue("")) { + return ld.ProviderZone, nil + } + + var err error + if !ld.LocationSchemaField.IsNull() { + err = fmt.Errorf("location could not be identified, please add `%s` in your resource or set `region` in your provider configuration block", ld.LocationSchemaField.ValueString()) + } else { + err = errors.New("location could not be identified, please add `location` in your resource or `region` in your provider configuration block") + } + return types.StringNull(), err +} + +func (ld *LocationDescription) GetRegion() (types.String, error) { + // TODO(SarahFrench): Make empty strings not ignored, see https://github.com/hashicorp/terraform-provider-google/issues/14447 + // For all checks in this function body + + // Region from resource config + if !ld.ResourceRegion.IsNull() && !ld.ResourceRegion.IsUnknown() && !ld.ResourceRegion.Equal(types.StringValue("")) { + region := tpgresource.GetResourceNameFromSelfLink(ld.ResourceRegion.ValueString()) // Region could be a self link + return types.StringValue(region), nil + } + // Region from zone in resource config + if !ld.ResourceZone.IsNull() && !ld.ResourceZone.IsUnknown() && !ld.ResourceZone.Equal(types.StringValue("")) { + region := tpgresource.GetRegionFromZone(ld.ResourceZone.ValueString()) + return types.StringValue(region), nil + } + // Region from provider config + if !ld.ProviderRegion.IsNull() && !ld.ProviderRegion.IsUnknown() && !ld.ProviderRegion.Equal(types.StringValue("")) { + return ld.ProviderRegion, nil + } + // Region from zone in provider config + if !ld.ProviderZone.IsNull() && !ld.ProviderZone.IsUnknown() && !ld.ProviderZone.Equal(types.StringValue("")) { + region := tpgresource.GetRegionFromZone(ld.ProviderZone.ValueString()) + return types.StringValue(region), nil + } + + var err error + if !ld.RegionSchemaField.IsNull() { + err = fmt.Errorf("region could not be identified, please add `%s` in your resource or set `region` in your provider configuration block", ld.RegionSchemaField.ValueString()) + } else { + err = errors.New("region could not be identified, please add `region` in your resource or provider configuration block") + } + return types.StringNull(), err +} + +func (ld *LocationDescription) GetZone() (types.String, error) { + // TODO(SarahFrench): Make empty strings not ignored, see https://github.com/hashicorp/terraform-provider-google/issues/14447 + // For all checks in this function body + + if !ld.ResourceZone.IsNull() && !ld.ResourceZone.IsUnknown() && !ld.ResourceZone.Equal(types.StringValue("")) { + // Zone could be a self link + zone := tpgresource.GetResourceNameFromSelfLink(ld.ResourceZone.ValueString()) + return types.StringValue(zone), nil + } + if !ld.ProviderZone.IsNull() && !ld.ProviderZone.IsUnknown() && !ld.ProviderZone.Equal(types.StringValue("")) { + return ld.ProviderZone, nil + } + + var err error + if !ld.ZoneSchemaField.IsNull() { + err = fmt.Errorf("zone could not be identified, please add `%s` in your resource or `zone` in your provider configuration block", ld.ZoneSchemaField.ValueString()) + } else { + err = errors.New("zone could not be identified, please add `zone` in your resource or provider configuration block") + } + return types.StringNull(), err +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_config.go new file mode 100644 index 0000000000..31bdf06c2d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_config.go @@ -0,0 +1,1592 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwtransport + +import ( + "context" + "fmt" + "net/http" + "os" + "strconv" + "time" + + "golang.org/x/oauth2" + googleoauth "golang.org/x/oauth2/google" + + "google.golang.org/api/option" + "google.golang.org/api/transport" + "google.golang.org/grpc" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" + "github.com/sirupsen/logrus" +) + +type FrameworkProviderConfig struct { + BillingProject types.String + Client *http.Client + Context context.Context + gRPCLoggingOptions []option.ClientOption + PollInterval time.Duration + Project types.String + Region types.String + Zone types.String + RequestBatcherIam *transport_tpg.RequestBatcher + RequestBatcherServiceUsage *transport_tpg.RequestBatcher + Scopes []string + TokenSource oauth2.TokenSource + UserAgent string + UserProjectOverride bool + + // paths for client setup + AccessApprovalBasePath string + AccessContextManagerBasePath string + ActiveDirectoryBasePath string + AlloydbBasePath string + ApigeeBasePath string + AppEngineBasePath string + ArtifactRegistryBasePath string + BeyondcorpBasePath string + BigQueryBasePath string + BigqueryAnalyticsHubBasePath string + BigqueryConnectionBasePath string + BigqueryDatapolicyBasePath string + BigqueryDataTransferBasePath string + BigqueryReservationBasePath string + BigtableBasePath string + BillingBasePath string + BinaryAuthorizationBasePath string + CertificateManagerBasePath string + CloudAssetBasePath string + CloudBuildBasePath string + Cloudbuildv2BasePath string + CloudFunctionsBasePath string + Cloudfunctions2BasePath string + CloudIdentityBasePath string + CloudIdsBasePath string + CloudIotBasePath string + CloudRunBasePath string + CloudRunV2BasePath string + CloudSchedulerBasePath string + CloudTasksBasePath string + ComputeBasePath string + ContainerAnalysisBasePath string + ContainerAttachedBasePath string + DatabaseMigrationServiceBasePath string + DataCatalogBasePath string + DataFusionBasePath string + DataLossPreventionBasePath string + DataplexBasePath string + DataprocBasePath string + DataprocMetastoreBasePath string + DatastoreBasePath string + DatastreamBasePath string + DeploymentManagerBasePath string + DialogflowBasePath string + DialogflowCXBasePath string + DNSBasePath string + DocumentAIBasePath string + EssentialContactsBasePath string + FilestoreBasePath string + FirestoreBasePath string + GameServicesBasePath string + GKEBackupBasePath string + GKEHubBasePath string + GKEHub2BasePath string + HealthcareBasePath string + IAM2BasePath string + IAMBetaBasePath string + IAMWorkforcePoolBasePath string + IapBasePath string + IdentityPlatformBasePath string + KMSBasePath string + LoggingBasePath string + LookerBasePath string + MemcacheBasePath string + MLEngineBasePath string + MonitoringBasePath string + NetworkManagementBasePath string + NetworkSecurityBasePath string + NetworkServicesBasePath string + NotebooksBasePath string + OSConfigBasePath string + OSLoginBasePath string + PrivatecaBasePath string + PublicCABasePath string + PubsubBasePath string + PubsubLiteBasePath string + RedisBasePath string + ResourceManagerBasePath string + SecretManagerBasePath string + SecurityCenterBasePath string + ServiceManagementBasePath string + ServiceUsageBasePath string + SourceRepoBasePath string + SpannerBasePath string + SQLBasePath string + StorageBasePath string + StorageTransferBasePath string + TagsBasePath string + TPUBasePath string + VertexAIBasePath string + VPCAccessBasePath string + WorkflowsBasePath string +} + +var defaultClientScopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", +} + +// LoadAndValidateFramework handles the bulk of configuring the provider +// it is pulled out so that we can manually call this from our testing provider as well +func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, data fwmodels.ProviderModel, tfVersion string, diags *diag.Diagnostics, providerversion string) { + // Set defaults if needed + p.HandleDefaults(ctx, &data, diags) + if diags.HasError() { + return + } + + p.Context = ctx + + // Handle User Agent string + p.UserAgent = CompileUserAgentString(ctx, "terraform-provider-google", tfVersion, providerversion) + // opt in extension for adding to the User-Agent header + if ext := os.Getenv("GOOGLE_TERRAFORM_USERAGENT_EXTENSION"); ext != "" { + ua := p.UserAgent + p.UserAgent = fmt.Sprintf("%s %s", ua, ext) + } + + // Set up client configuration + p.SetupClient(ctx, data, diags) + if diags.HasError() { + return + } + + // gRPC Logging setup + p.SetupGrpcLogging() + + // Handle Batching Config + batchingConfig := GetBatchingConfig(ctx, data.Batching, diags) + if diags.HasError() { + return + } + + // Setup Base Paths for clients + // Generated products + p.AccessApprovalBasePath = data.AccessApprovalCustomEndpoint.ValueString() + p.AccessContextManagerBasePath = data.AccessContextManagerCustomEndpoint.ValueString() + p.ActiveDirectoryBasePath = data.ActiveDirectoryCustomEndpoint.ValueString() + p.AlloydbBasePath = data.AlloydbCustomEndpoint.ValueString() + p.ApigeeBasePath = data.ApigeeCustomEndpoint.ValueString() + p.AppEngineBasePath = data.AppEngineCustomEndpoint.ValueString() + p.ArtifactRegistryBasePath = data.ArtifactRegistryCustomEndpoint.ValueString() + p.BeyondcorpBasePath = data.BeyondcorpCustomEndpoint.ValueString() + p.BigQueryBasePath = data.BigQueryCustomEndpoint.ValueString() + p.BigqueryAnalyticsHubBasePath = data.BigqueryAnalyticsHubCustomEndpoint.ValueString() + p.BigqueryConnectionBasePath = data.BigqueryConnectionCustomEndpoint.ValueString() + p.BigqueryDatapolicyBasePath = data.BigqueryDatapolicyCustomEndpoint.ValueString() + p.BigqueryDataTransferBasePath = data.BigqueryDataTransferCustomEndpoint.ValueString() + p.BigqueryReservationBasePath = data.BigqueryReservationCustomEndpoint.ValueString() + p.BigtableBasePath = data.BigtableCustomEndpoint.ValueString() + p.BillingBasePath = data.BillingCustomEndpoint.ValueString() + p.BinaryAuthorizationBasePath = data.BinaryAuthorizationCustomEndpoint.ValueString() + p.CertificateManagerBasePath = data.CertificateManagerCustomEndpoint.ValueString() + p.CloudAssetBasePath = data.CloudAssetCustomEndpoint.ValueString() + p.CloudBuildBasePath = data.CloudBuildCustomEndpoint.ValueString() + p.Cloudbuildv2BasePath = data.Cloudbuildv2CustomEndpoint.ValueString() + p.CloudFunctionsBasePath = data.CloudFunctionsCustomEndpoint.ValueString() + p.Cloudfunctions2BasePath = data.Cloudfunctions2CustomEndpoint.ValueString() + p.CloudIdentityBasePath = data.CloudIdentityCustomEndpoint.ValueString() + p.CloudIdsBasePath = data.CloudIdsCustomEndpoint.ValueString() + p.CloudIotBasePath = data.CloudIotCustomEndpoint.ValueString() + p.CloudRunBasePath = data.CloudRunCustomEndpoint.ValueString() + p.CloudRunV2BasePath = data.CloudRunV2CustomEndpoint.ValueString() + p.CloudSchedulerBasePath = data.CloudSchedulerCustomEndpoint.ValueString() + p.CloudTasksBasePath = data.CloudTasksCustomEndpoint.ValueString() + p.ComputeBasePath = data.ComputeCustomEndpoint.ValueString() + p.ContainerAnalysisBasePath = data.ContainerAnalysisCustomEndpoint.ValueString() + p.ContainerAttachedBasePath = data.ContainerAttachedCustomEndpoint.ValueString() + p.DatabaseMigrationServiceBasePath = data.DatabaseMigrationServiceCustomEndpoint.ValueString() + p.DataCatalogBasePath = data.DataCatalogCustomEndpoint.ValueString() + p.DataFusionBasePath = data.DataFusionCustomEndpoint.ValueString() + p.DataLossPreventionBasePath = data.DataLossPreventionCustomEndpoint.ValueString() + p.DataplexBasePath = data.DataplexCustomEndpoint.ValueString() + p.DataprocBasePath = data.DataprocCustomEndpoint.ValueString() + p.DataprocMetastoreBasePath = data.DataprocMetastoreCustomEndpoint.ValueString() + p.DatastoreBasePath = data.DatastoreCustomEndpoint.ValueString() + p.DatastreamBasePath = data.DatastreamCustomEndpoint.ValueString() + p.DeploymentManagerBasePath = data.DeploymentManagerCustomEndpoint.ValueString() + p.DialogflowBasePath = data.DialogflowCustomEndpoint.ValueString() + p.DialogflowCXBasePath = data.DialogflowCXCustomEndpoint.ValueString() + p.DNSBasePath = data.DNSCustomEndpoint.ValueString() + p.DocumentAIBasePath = data.DocumentAICustomEndpoint.ValueString() + p.EssentialContactsBasePath = data.EssentialContactsCustomEndpoint.ValueString() + p.FilestoreBasePath = data.FilestoreCustomEndpoint.ValueString() + p.FirestoreBasePath = data.FirestoreCustomEndpoint.ValueString() + p.GameServicesBasePath = data.GameServicesCustomEndpoint.ValueString() + p.GKEBackupBasePath = data.GKEBackupCustomEndpoint.ValueString() + p.GKEHubBasePath = data.GKEHubCustomEndpoint.ValueString() + p.GKEHub2BasePath = data.GKEHub2CustomEndpoint.ValueString() + p.HealthcareBasePath = data.HealthcareCustomEndpoint.ValueString() + p.IAM2BasePath = data.IAM2CustomEndpoint.ValueString() + p.IAMBetaBasePath = data.IAMBetaCustomEndpoint.ValueString() + p.IAMWorkforcePoolBasePath = data.IAMWorkforcePoolCustomEndpoint.ValueString() + p.IapBasePath = data.IapCustomEndpoint.ValueString() + p.IdentityPlatformBasePath = data.IdentityPlatformCustomEndpoint.ValueString() + p.KMSBasePath = data.KMSCustomEndpoint.ValueString() + p.LoggingBasePath = data.LoggingCustomEndpoint.ValueString() + p.LookerBasePath = data.LookerCustomEndpoint.ValueString() + p.MemcacheBasePath = data.MemcacheCustomEndpoint.ValueString() + p.MLEngineBasePath = data.MLEngineCustomEndpoint.ValueString() + p.MonitoringBasePath = data.MonitoringCustomEndpoint.ValueString() + p.NetworkManagementBasePath = data.NetworkManagementCustomEndpoint.ValueString() + p.NetworkSecurityBasePath = data.NetworkSecurityCustomEndpoint.ValueString() + p.NetworkServicesBasePath = data.NetworkServicesCustomEndpoint.ValueString() + p.NotebooksBasePath = data.NotebooksCustomEndpoint.ValueString() + p.OSConfigBasePath = data.OSConfigCustomEndpoint.ValueString() + p.OSLoginBasePath = data.OSLoginCustomEndpoint.ValueString() + p.PrivatecaBasePath = data.PrivatecaCustomEndpoint.ValueString() + p.PublicCABasePath = data.PublicCACustomEndpoint.ValueString() + p.PubsubBasePath = data.PubsubCustomEndpoint.ValueString() + p.PubsubLiteBasePath = data.PubsubLiteCustomEndpoint.ValueString() + p.RedisBasePath = data.RedisCustomEndpoint.ValueString() + p.ResourceManagerBasePath = data.ResourceManagerCustomEndpoint.ValueString() + p.SecretManagerBasePath = data.SecretManagerCustomEndpoint.ValueString() + p.SecurityCenterBasePath = data.SecurityCenterCustomEndpoint.ValueString() + p.ServiceManagementBasePath = data.ServiceManagementCustomEndpoint.ValueString() + p.ServiceUsageBasePath = data.ServiceUsageCustomEndpoint.ValueString() + p.SourceRepoBasePath = data.SourceRepoCustomEndpoint.ValueString() + p.SpannerBasePath = data.SpannerCustomEndpoint.ValueString() + p.SQLBasePath = data.SQLCustomEndpoint.ValueString() + p.StorageBasePath = data.StorageCustomEndpoint.ValueString() + p.StorageTransferBasePath = data.StorageTransferCustomEndpoint.ValueString() + p.TagsBasePath = data.TagsCustomEndpoint.ValueString() + p.TPUBasePath = data.TPUCustomEndpoint.ValueString() + p.VertexAIBasePath = data.VertexAICustomEndpoint.ValueString() + p.VPCAccessBasePath = data.VPCAccessCustomEndpoint.ValueString() + p.WorkflowsBasePath = data.WorkflowsCustomEndpoint.ValueString() + + p.Context = ctx + p.Region = data.Region + p.Zone = data.Zone + p.PollInterval = 10 * time.Second + p.Project = data.Project + p.RequestBatcherServiceUsage = transport_tpg.NewRequestBatcher("Service Usage", ctx, batchingConfig) + p.RequestBatcherIam = transport_tpg.NewRequestBatcher("IAM", ctx, batchingConfig) +} + +// HandleDefaults will handle all the defaults necessary in the provider +func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmodels.ProviderModel, diags *diag.Diagnostics) { + if data.AccessToken.IsNull() && data.Credentials.IsNull() { + credentials := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", + }, nil) + + if credentials != nil { + data.Credentials = types.StringValue(credentials.(string)) + } + + accessToken := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN", + }, nil) + + if accessToken != nil { + data.AccessToken = types.StringValue(accessToken.(string)) + } + } + + if data.ImpersonateServiceAccount.IsNull() && os.Getenv("GOOGLE_IMPERSONATE_SERVICE_ACCOUNT") != "" { + data.ImpersonateServiceAccount = types.StringValue(os.Getenv("GOOGLE_IMPERSONATE_SERVICE_ACCOUNT")) + } + + if data.Project.IsNull() { + project := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_PROJECT", + "GOOGLE_CLOUD_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }, nil) + if project != nil { + data.Project = types.StringValue(project.(string)) + } + } + + if data.BillingProject.IsNull() && os.Getenv("GOOGLE_BILLING_PROJECT") != "" { + data.BillingProject = types.StringValue(os.Getenv("GOOGLE_BILLING_PROJECT")) + } + + if data.Region.IsNull() { + region := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", + }, nil) + + if region != nil { + data.Region = types.StringValue(region.(string)) + } + } + + if data.Zone.IsNull() { + zone := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ZONE", + "GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE", + }, nil) + + if zone != nil { + data.Zone = types.StringValue(zone.(string)) + } + } + + if len(data.Scopes.Elements()) == 0 { + var d diag.Diagnostics + data.Scopes, d = types.ListValueFrom(ctx, types.StringType, defaultClientScopes) + diags.Append(d...) + if diags.HasError() { + return + } + } + + if !data.Batching.IsNull() { + var pbConfigs []fwmodels.ProviderBatching + d := data.Batching.ElementsAs(ctx, &pbConfigs, true) + diags.Append(d...) + if diags.HasError() { + return + } + + if pbConfigs[0].SendAfter.IsNull() { + pbConfigs[0].SendAfter = types.StringValue("10s") + } + + if pbConfigs[0].EnableBatching.IsNull() { + pbConfigs[0].EnableBatching = types.BoolValue(true) + } + + data.Batching, d = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(fwmodels.ProviderBatchingAttributes), pbConfigs) + } + + if data.UserProjectOverride.IsNull() && os.Getenv("USER_PROJECT_OVERRIDE") != "" { + override, err := strconv.ParseBool(os.Getenv("USER_PROJECT_OVERRIDE")) + if err != nil { + diags.AddError( + "error parsing environment variable `USER_PROJECT_OVERRIDE` into bool", err.Error()) + } + data.UserProjectOverride = types.BoolValue(override) + } + + if data.RequestReason.IsNull() && os.Getenv("CLOUDSDK_CORE_REQUEST_REASON") != "" { + data.RequestReason = types.StringValue(os.Getenv("CLOUDSDK_CORE_REQUEST_REASON")) + } + + if data.RequestTimeout.IsNull() { + data.RequestTimeout = types.StringValue("120s") + } + + // Generated Products + if data.AccessApprovalCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ACCESS_APPROVAL_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.AccessApprovalBasePathKey]) + if customEndpoint != nil { + data.AccessApprovalCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.AccessContextManagerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ACCESS_CONTEXT_MANAGER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.AccessContextManagerBasePathKey]) + if customEndpoint != nil { + data.AccessContextManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ActiveDirectoryCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ACTIVE_DIRECTORY_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ActiveDirectoryBasePathKey]) + if customEndpoint != nil { + data.ActiveDirectoryCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.AlloydbCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ALLOYDB_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.AlloydbBasePathKey]) + if customEndpoint != nil { + data.AlloydbCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ApigeeCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_APIGEE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ApigeeBasePathKey]) + if customEndpoint != nil { + data.ApigeeCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.AppEngineCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_APP_ENGINE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.AppEngineBasePathKey]) + if customEndpoint != nil { + data.AppEngineCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ArtifactRegistryCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ARTIFACT_REGISTRY_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ArtifactRegistryBasePathKey]) + if customEndpoint != nil { + data.ArtifactRegistryCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BeyondcorpCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BEYONDCORP_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BeyondcorpBasePathKey]) + if customEndpoint != nil { + data.BeyondcorpCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BigQueryCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BIG_QUERY_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BigQueryBasePathKey]) + if customEndpoint != nil { + data.BigQueryCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BigqueryAnalyticsHubCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_ANALYTICS_HUB_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BigqueryAnalyticsHubBasePathKey]) + if customEndpoint != nil { + data.BigqueryAnalyticsHubCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BigqueryConnectionCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_CONNECTION_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BigqueryConnectionBasePathKey]) + if customEndpoint != nil { + data.BigqueryConnectionCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BigqueryDatapolicyCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_DATAPOLICY_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BigqueryDatapolicyBasePathKey]) + if customEndpoint != nil { + data.BigqueryDatapolicyCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BigqueryDataTransferCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_DATA_TRANSFER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BigqueryDataTransferBasePathKey]) + if customEndpoint != nil { + data.BigqueryDataTransferCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BigqueryReservationCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_RESERVATION_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BigqueryReservationBasePathKey]) + if customEndpoint != nil { + data.BigqueryReservationCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BigtableCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BIGTABLE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BigtableBasePathKey]) + if customEndpoint != nil { + data.BigtableCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BillingCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BILLING_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BillingBasePathKey]) + if customEndpoint != nil { + data.BillingCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.BinaryAuthorizationCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_BINARY_AUTHORIZATION_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.BinaryAuthorizationBasePathKey]) + if customEndpoint != nil { + data.BinaryAuthorizationCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CertificateManagerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CERTIFICATE_MANAGER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CertificateManagerBasePathKey]) + if customEndpoint != nil { + data.CertificateManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudAssetCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_ASSET_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudAssetBasePathKey]) + if customEndpoint != nil { + data.CloudAssetCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudBuildCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BUILD_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudBuildBasePathKey]) + if customEndpoint != nil { + data.CloudBuildCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.Cloudbuildv2CustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUDBUILDV2_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.Cloudbuildv2BasePathKey]) + if customEndpoint != nil { + data.Cloudbuildv2CustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudFunctionsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_FUNCTIONS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudFunctionsBasePathKey]) + if customEndpoint != nil { + data.CloudFunctionsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.Cloudfunctions2CustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUDFUNCTIONS2_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.Cloudfunctions2BasePathKey]) + if customEndpoint != nil { + data.Cloudfunctions2CustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudIdentityCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_IDENTITY_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudIdentityBasePathKey]) + if customEndpoint != nil { + data.CloudIdentityCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudIdsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_IDS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudIdsBasePathKey]) + if customEndpoint != nil { + data.CloudIdsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudIotCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_IOT_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudIotBasePathKey]) + if customEndpoint != nil { + data.CloudIotCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudRunCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_RUN_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudRunBasePathKey]) + if customEndpoint != nil { + data.CloudRunCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudRunV2CustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_RUN_V2_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudRunV2BasePathKey]) + if customEndpoint != nil { + data.CloudRunV2CustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudSchedulerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_SCHEDULER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudSchedulerBasePathKey]) + if customEndpoint != nil { + data.CloudSchedulerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.CloudTasksCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_TASKS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.CloudTasksBasePathKey]) + if customEndpoint != nil { + data.CloudTasksCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ComputeCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_COMPUTE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ComputeBasePathKey]) + if customEndpoint != nil { + data.ComputeCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ContainerAnalysisCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CONTAINER_ANALYSIS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ContainerAnalysisBasePathKey]) + if customEndpoint != nil { + data.ContainerAnalysisCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ContainerAttachedCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CONTAINER_ATTACHED_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ContainerAttachedBasePathKey]) + if customEndpoint != nil { + data.ContainerAttachedCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DatabaseMigrationServiceCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATABASE_MIGRATION_SERVICE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DatabaseMigrationServiceBasePathKey]) + if customEndpoint != nil { + data.DatabaseMigrationServiceCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DataCatalogCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATA_CATALOG_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DataCatalogBasePathKey]) + if customEndpoint != nil { + data.DataCatalogCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DataFusionCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATA_FUSION_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DataFusionBasePathKey]) + if customEndpoint != nil { + data.DataFusionCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DataLossPreventionCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATA_LOSS_PREVENTION_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DataLossPreventionBasePathKey]) + if customEndpoint != nil { + data.DataLossPreventionCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DataplexCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATAPLEX_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DataplexBasePathKey]) + if customEndpoint != nil { + data.DataplexCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DataprocCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATAPROC_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DataprocBasePathKey]) + if customEndpoint != nil { + data.DataprocCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DataprocMetastoreCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATAPROC_METASTORE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DataprocMetastoreBasePathKey]) + if customEndpoint != nil { + data.DataprocMetastoreCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DatastoreCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATASTORE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DatastoreBasePathKey]) + if customEndpoint != nil { + data.DatastoreCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DatastreamCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATASTREAM_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DatastreamBasePathKey]) + if customEndpoint != nil { + data.DatastreamCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DeploymentManagerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DEPLOYMENT_MANAGER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DeploymentManagerBasePathKey]) + if customEndpoint != nil { + data.DeploymentManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DialogflowCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DIALOGFLOW_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DialogflowBasePathKey]) + if customEndpoint != nil { + data.DialogflowCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DialogflowCXCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DIALOGFLOW_CX_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DialogflowCXBasePathKey]) + if customEndpoint != nil { + data.DialogflowCXCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DNSCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DNS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DNSBasePathKey]) + if customEndpoint != nil { + data.DNSCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.DocumentAICustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DOCUMENT_AI_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DocumentAIBasePathKey]) + if customEndpoint != nil { + data.DocumentAICustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.EssentialContactsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ESSENTIAL_CONTACTS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.EssentialContactsBasePathKey]) + if customEndpoint != nil { + data.EssentialContactsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.FilestoreCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_FILESTORE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.FilestoreBasePathKey]) + if customEndpoint != nil { + data.FilestoreCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.FirestoreCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_FIRESTORE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.FirestoreBasePathKey]) + if customEndpoint != nil { + data.FirestoreCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.GameServicesCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_GAME_SERVICES_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.GameServicesBasePathKey]) + if customEndpoint != nil { + data.GameServicesCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.GKEBackupCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_GKE_BACKUP_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.GKEBackupBasePathKey]) + if customEndpoint != nil { + data.GKEBackupCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.GKEHubCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_GKE_HUB_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.GKEHubBasePathKey]) + if customEndpoint != nil { + data.GKEHubCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.GKEHub2CustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_GKE_HUB2_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.GKEHub2BasePathKey]) + if customEndpoint != nil { + data.GKEHub2CustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.HealthcareCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_HEALTHCARE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.HealthcareBasePathKey]) + if customEndpoint != nil { + data.HealthcareCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.IAM2CustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IAM2_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IAM2BasePathKey]) + if customEndpoint != nil { + data.IAM2CustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.IAMBetaCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IAM_BETA_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IAMBetaBasePathKey]) + if customEndpoint != nil { + data.IAMBetaCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.IAMWorkforcePoolCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IAM_WORKFORCE_POOL_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IAMWorkforcePoolBasePathKey]) + if customEndpoint != nil { + data.IAMWorkforcePoolCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.IapCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IAP_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IapBasePathKey]) + if customEndpoint != nil { + data.IapCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.IdentityPlatformCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IDENTITY_PLATFORM_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IdentityPlatformBasePathKey]) + if customEndpoint != nil { + data.IdentityPlatformCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.KMSCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_KMS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.KMSBasePathKey]) + if customEndpoint != nil { + data.KMSCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.LoggingCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_LOGGING_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.LoggingBasePathKey]) + if customEndpoint != nil { + data.LoggingCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.LookerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_LOOKER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.LookerBasePathKey]) + if customEndpoint != nil { + data.LookerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.MemcacheCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_MEMCACHE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.MemcacheBasePathKey]) + if customEndpoint != nil { + data.MemcacheCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.MLEngineCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ML_ENGINE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.MLEngineBasePathKey]) + if customEndpoint != nil { + data.MLEngineCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.MonitoringCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_MONITORING_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.MonitoringBasePathKey]) + if customEndpoint != nil { + data.MonitoringCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.NetworkManagementCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_NETWORK_MANAGEMENT_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.NetworkManagementBasePathKey]) + if customEndpoint != nil { + data.NetworkManagementCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.NetworkSecurityCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_NETWORK_SECURITY_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.NetworkSecurityBasePathKey]) + if customEndpoint != nil { + data.NetworkSecurityCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.NetworkServicesCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_NETWORK_SERVICES_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.NetworkServicesBasePathKey]) + if customEndpoint != nil { + data.NetworkServicesCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.NotebooksCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_NOTEBOOKS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.NotebooksBasePathKey]) + if customEndpoint != nil { + data.NotebooksCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.OSConfigCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_OS_CONFIG_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.OSConfigBasePathKey]) + if customEndpoint != nil { + data.OSConfigCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.OSLoginCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_OS_LOGIN_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.OSLoginBasePathKey]) + if customEndpoint != nil { + data.OSLoginCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.PrivatecaCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.PrivatecaBasePathKey]) + if customEndpoint != nil { + data.PrivatecaCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.PublicCACustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_PUBLIC_CA_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.PublicCABasePathKey]) + if customEndpoint != nil { + data.PublicCACustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.PubsubCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_PUBSUB_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.PubsubBasePathKey]) + if customEndpoint != nil { + data.PubsubCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.PubsubLiteCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_PUBSUB_LITE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.PubsubLiteBasePathKey]) + if customEndpoint != nil { + data.PubsubLiteCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.RedisCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_REDIS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.RedisBasePathKey]) + if customEndpoint != nil { + data.RedisCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ResourceManagerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_RESOURCE_MANAGER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ResourceManagerBasePathKey]) + if customEndpoint != nil { + data.ResourceManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.SecretManagerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SECRET_MANAGER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.SecretManagerBasePathKey]) + if customEndpoint != nil { + data.SecretManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.SecurityCenterCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SECURITY_CENTER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.SecurityCenterBasePathKey]) + if customEndpoint != nil { + data.SecurityCenterCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ServiceManagementCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SERVICE_MANAGEMENT_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ServiceManagementBasePathKey]) + if customEndpoint != nil { + data.ServiceManagementCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.ServiceUsageCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ServiceUsageBasePathKey]) + if customEndpoint != nil { + data.ServiceUsageCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.SourceRepoCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SOURCE_REPO_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.SourceRepoBasePathKey]) + if customEndpoint != nil { + data.SourceRepoCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.SpannerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SPANNER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.SpannerBasePathKey]) + if customEndpoint != nil { + data.SpannerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.SQLCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SQL_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.SQLBasePathKey]) + if customEndpoint != nil { + data.SQLCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.StorageCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_STORAGE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.StorageBasePathKey]) + if customEndpoint != nil { + data.StorageCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.StorageTransferCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_STORAGE_TRANSFER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.StorageTransferBasePathKey]) + if customEndpoint != nil { + data.StorageTransferCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.TagsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_TAGS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.TagsBasePathKey]) + if customEndpoint != nil { + data.TagsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.TPUCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_TPU_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.TPUBasePathKey]) + if customEndpoint != nil { + data.TPUCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.VertexAICustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_VERTEX_AI_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.VertexAIBasePathKey]) + if customEndpoint != nil { + data.VertexAICustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.VPCAccessCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_VPC_ACCESS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.VPCAccessBasePathKey]) + if customEndpoint != nil { + data.VPCAccessCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + if data.WorkflowsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_WORKFLOWS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.WorkflowsBasePathKey]) + if customEndpoint != nil { + data.WorkflowsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + // Handwritten Products / Versioned / Atypical Entries + if data.CloudBillingCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BILLING_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths["cloud_billing_custom_endpoint"]) + if customEndpoint != nil { + data.CloudBillingCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ComposerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_COMPOSER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ComposerBasePathKey]) + if customEndpoint != nil { + data.ComposerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ContainerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CONTAINER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ContainerBasePathKey]) + if customEndpoint != nil { + data.ContainerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.DataflowCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATAFLOW_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DataflowBasePathKey]) + if customEndpoint != nil { + data.DataflowCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.IamCredentialsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IAM_CREDENTIALS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IamCredentialsBasePathKey]) + if customEndpoint != nil { + data.IamCredentialsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ResourceManagerV3CustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_RESOURCE_MANAGER_V3_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ResourceManagerV3BasePathKey]) + if customEndpoint != nil { + data.ResourceManagerV3CustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.IAMCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IAM_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IAMBasePathKey]) + if customEndpoint != nil { + data.IAMCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ServiceNetworkingCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SERVICE_NETWORKING_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ServiceNetworkingBasePathKey]) + if customEndpoint != nil { + data.ServiceNetworkingCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.TagsLocationCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_TAGS_LOCATION_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.TagsLocationBasePathKey]) + if customEndpoint != nil { + data.TagsLocationCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + // dcl + if data.ContainerAwsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CONTAINERAWS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ContainerAwsBasePathKey]) + if customEndpoint != nil { + data.ContainerAwsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ContainerAzureCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CONTAINERAZURE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ContainerAzureBasePathKey]) + if customEndpoint != nil { + data.ContainerAzureCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + // DCL generated defaults + if data.ApikeysCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_APIKEYS_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.ApikeysCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.AssuredWorkloadsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ASSURED_WORKLOADS_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.AssuredWorkloadsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.CloudBuildWorkerPoolCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BUILD_WORKER_POOL_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.CloudBuildWorkerPoolCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.CloudDeployCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUDDEPLOY_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.CloudDeployCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.CloudResourceManagerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_RESOURCE_MANAGER_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.CloudResourceManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.DataplexCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATAPLEX_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.DataplexCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.EventarcCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_EVENTARC_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.EventarcCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.FirebaserulesCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_FIREBASERULES_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.FirebaserulesCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.NetworkConnectivityCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_NETWORK_CONNECTIVITY_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.NetworkConnectivityCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.RecaptchaEnterpriseCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_RECAPTCHA_ENTERPRISE_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.RecaptchaEnterpriseCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } +} + +func (p *FrameworkProviderConfig) SetupClient(ctx context.Context, data fwmodels.ProviderModel, diags *diag.Diagnostics) { + tokenSource := GetTokenSource(ctx, data, false, diags) + if diags.HasError() { + return + } + + cleanCtx := context.WithValue(ctx, oauth2.HTTPClient, cleanhttp.DefaultClient()) + + // 1. MTLS TRANSPORT/CLIENT - sets up proper auth headers + client, _, err := transport.NewHTTPClient(cleanCtx, option.WithTokenSource(tokenSource)) + if err != nil { + diags.AddError("error creating new http client", err.Error()) + return + } + + // Userinfo is fetched before request logging is enabled to reduce additional noise. + p.logGoogleIdentities(ctx, data, diags) + if diags.HasError() { + return + } + + // 2. Logging Transport - ensure we log HTTP requests to GCP APIs. + loggingTransport := logging.NewTransport("Google", client.Transport) + + // 3. Retry Transport - retries common temporary errors + // Keep order for wrapping logging so we log each retried request as well. + // This value should be used if needed to create shallow copies with additional retry predicates. + // See ClientWithAdditionalRetries + retryTransport := transport_tpg.NewTransportWithDefaultRetries(loggingTransport) + + // 4. Header Transport - outer wrapper to inject additional headers we want to apply + // before making requests + headerTransport := transport_tpg.NewTransportWithHeaders(retryTransport) + if !data.RequestReason.IsNull() { + headerTransport.Set("X-Goog-Request-Reason", data.RequestReason.ValueString()) + } + + // Ensure $userProject is set for all HTTP requests using the client if specified by the provider config + // See https://cloud.google.com/apis/docs/system-parameters + if data.UserProjectOverride.ValueBool() && !data.BillingProject.IsNull() { + headerTransport.Set("X-Goog-User-Project", data.BillingProject.ValueString()) + } + + // Set final transport value. + client.Transport = headerTransport + + // This timeout is a timeout per HTTP request, not per logical operation. + timeout, err := time.ParseDuration(data.RequestTimeout.ValueString()) + if err != nil { + diags.AddError("error parsing request timeout", err.Error()) + } + client.Timeout = timeout + + p.TokenSource = tokenSource + p.Client = client +} + +func (p *FrameworkProviderConfig) SetupGrpcLogging() { + logger := logrus.StandardLogger() + + logrus.SetLevel(logrus.DebugLevel) + logrus.SetFormatter(&transport_tpg.Formatter{ + TimestampFormat: "2006/01/02 15:04:05", + LogFormat: "%time% [%lvl%] %msg% \n", + }) + + alwaysLoggingDeciderClient := func(ctx context.Context, fullMethodName string) bool { return true } + grpc_logrus.ReplaceGrpcLogger(logrus.NewEntry(logger)) + + p.gRPCLoggingOptions = append( + p.gRPCLoggingOptions, option.WithGRPCDialOption(grpc.WithUnaryInterceptor( + grpc_logrus.PayloadUnaryClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), + option.WithGRPCDialOption(grpc.WithStreamInterceptor( + grpc_logrus.PayloadStreamClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), + ) +} + +func (p *FrameworkProviderConfig) logGoogleIdentities(ctx context.Context, data fwmodels.ProviderModel, diags *diag.Diagnostics) { + // GetCurrentUserEmailFramework doesn't pass an error back from logGoogleIdentities, so we want + // a separate diagnostics here + var d diag.Diagnostics + + if data.ImpersonateServiceAccount.IsNull() { + + tokenSource := GetTokenSource(ctx, data, true, diags) + if diags.HasError() { + return + } + + p.Client = oauth2.NewClient(ctx, tokenSource) // p.Client isn't initialised fully when this code is called. + + email := GetCurrentUserEmailFramework(p, p.UserAgent, &d) + if d.HasError() { + tflog.Info(ctx, "error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope?") + } + + tflog.Info(ctx, fmt.Sprintf("Terraform is using this identity: %s", email)) + return + } + + // Drop Impersonated ClientOption from OAuth2 TokenSource to infer original identity + tokenSource := GetTokenSource(ctx, data, true, diags) + if diags.HasError() { + return + } + + p.Client = oauth2.NewClient(ctx, tokenSource) // p.Client isn't initialised fully when this code is called. + email := GetCurrentUserEmailFramework(p, p.UserAgent, &d) + if d.HasError() { + tflog.Info(ctx, "error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope?") + } + + tflog.Info(ctx, fmt.Sprintf("Terraform is configured with service account impersonation, original identity: %s, impersonated identity: %s", email, data.ImpersonateServiceAccount.ValueString())) + + // Add the Impersonated ClientOption back in to the OAuth2 TokenSource + tokenSource = GetTokenSource(ctx, data, false, diags) + if diags.HasError() { + return + } + + p.Client = oauth2.NewClient(ctx, tokenSource) // p.Client isn't initialised fully when this code is called. + + return +} + +// Configuration helpers + +// GetTokenSource gets token source based on the Google Credentials configured. +// If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds. +func GetTokenSource(ctx context.Context, data fwmodels.ProviderModel, initialCredentialsOnly bool, diags *diag.Diagnostics) oauth2.TokenSource { + creds := GetCredentials(ctx, data, initialCredentialsOnly, diags) + + return creds.TokenSource +} + +// GetCredentials gets credentials with a given scope (clientScopes). +// If initialCredentialsOnly is true, don't follow the impersonation +// settings and return the initial set of creds instead. +func GetCredentials(ctx context.Context, data fwmodels.ProviderModel, initialCredentialsOnly bool, diags *diag.Diagnostics) googleoauth.Credentials { + var clientScopes []string + var delegates []string + + d := data.Scopes.ElementsAs(ctx, &clientScopes, false) + diags.Append(d...) + if diags.HasError() { + return googleoauth.Credentials{} + } + + d = data.ImpersonateServiceAccountDelegates.ElementsAs(ctx, &delegates, false) + diags.Append(d...) + if diags.HasError() { + return googleoauth.Credentials{} + } + + if !data.AccessToken.IsNull() { + contents, _, err := verify.PathOrContents(data.AccessToken.ValueString()) + if err != nil { + diags.AddError("error loading access token", err.Error()) + return googleoauth.Credentials{} + } + + token := &oauth2.Token{AccessToken: contents} + if !data.ImpersonateServiceAccount.IsNull() && !initialCredentialsOnly { + opts := []option.ClientOption{option.WithTokenSource(oauth2.StaticTokenSource(token)), option.ImpersonateCredentials(data.ImpersonateServiceAccount.ValueString(), delegates...), option.WithScopes(clientScopes...)} + creds, err := transport.Creds(context.TODO(), opts...) + if err != nil { + diags.AddError("error impersonating credentials", err.Error()) + return googleoauth.Credentials{} + } + return *creds + } + + tflog.Info(ctx, "Authenticating using configured Google JSON 'access_token'...") + tflog.Info(ctx, fmt.Sprintf(" -- Scopes: %s", clientScopes)) + return googleoauth.Credentials{ + TokenSource: transport_tpg.StaticTokenSource{oauth2.StaticTokenSource(token)}, + } + } + + if !data.Credentials.IsNull() { + contents, _, err := verify.PathOrContents(data.Credentials.ValueString()) + if err != nil { + diags.AddError(fmt.Sprintf("error loading credentials: %s", err), err.Error()) + return googleoauth.Credentials{} + } + + if !data.ImpersonateServiceAccount.IsNull() && !initialCredentialsOnly { + opts := []option.ClientOption{option.WithCredentialsJSON([]byte(contents)), option.ImpersonateCredentials(data.ImpersonateServiceAccount.ValueString(), delegates...), option.WithScopes(clientScopes...)} + creds, err := transport.Creds(context.TODO(), opts...) + if err != nil { + diags.AddError("error impersonating credentials", err.Error()) + return googleoauth.Credentials{} + } + return *creds + } + + creds, err := transport.Creds(ctx, option.WithCredentialsJSON([]byte(contents)), option.WithScopes(clientScopes...)) + if err != nil { + diags.AddError("unable to parse credentials", err.Error()) + return googleoauth.Credentials{} + } + + tflog.Info(ctx, "Authenticating using configured Google JSON 'credentials'...") + tflog.Info(ctx, fmt.Sprintf(" -- Scopes: %s", clientScopes)) + return *creds + } + + if !data.ImpersonateServiceAccount.IsNull() && !initialCredentialsOnly { + opts := option.ImpersonateCredentials(data.ImpersonateServiceAccount.ValueString(), delegates...) + creds, err := transport.Creds(context.TODO(), opts, option.WithScopes(clientScopes...)) + if err != nil { + diags.AddError("error impersonating credentials", err.Error()) + return googleoauth.Credentials{} + } + + return *creds + } + + tflog.Info(ctx, "Authenticating using DefaultClient...") + tflog.Info(ctx, fmt.Sprintf(" -- Scopes: %s", clientScopes)) + creds, err := transport.Creds(context.Background(), option.WithScopes(clientScopes...)) + if err != nil { + diags.AddError(fmt.Sprintf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. "+ + "No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'"), err.Error()) + return googleoauth.Credentials{} + } + + return *creds +} + +// GetBatchingConfig returns the batching config object given the +// provider configuration set for batching +func GetBatchingConfig(ctx context.Context, data types.List, diags *diag.Diagnostics) *transport_tpg.BatchingConfig { + bc := &transport_tpg.BatchingConfig{ + SendAfter: time.Second * transport_tpg.DefaultBatchSendIntervalSec, + EnableBatching: true, + } + + if data.IsNull() { + return bc + } + + var pbConfigs []fwmodels.ProviderBatching + d := data.ElementsAs(ctx, &pbConfigs, true) + diags.Append(d...) + if diags.HasError() { + return bc + } + + sendAfter, err := time.ParseDuration(pbConfigs[0].SendAfter.ValueString()) + if err != nil { + diags.AddError("error parsing send after time duration", err.Error()) + return bc + } + + bc.SendAfter = sendAfter + + if !pbConfigs[0].EnableBatching.IsNull() { + bc.EnableBatching = pbConfigs[0].EnableBatching.ValueBool() + } + + return bc +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_provider_clients.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_provider_clients.go new file mode 100644 index 0000000000..0ac998b772 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_provider_clients.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwtransport + +import ( + "fmt" + "strings" + + "google.golang.org/api/dns/v1" + "google.golang.org/api/option" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Methods to create new services from config +// Some base paths below need the version and possibly more of the path +// set on them. The client libraries are inconsistent about which values they need; +// while most only want the host URL, some older ones also want the version and some +// of those "projects" as well. You can find out if this is required by looking at +// the basePath value in the client library file. + +func (p *FrameworkProviderConfig) NewDnsClient(userAgent string, diags *diag.Diagnostics) *dns.Service { + dnsClientBasePath := transport_tpg.RemoveBasePathVersion(p.DNSBasePath) + dnsClientBasePath = strings.ReplaceAll(dnsClientBasePath, "/dns/", "") + tflog.Info(p.Context, fmt.Sprintf("Instantiating Google Cloud DNS client for path %s", dnsClientBasePath)) + clientDns, err := dns.NewService(p.Context, option.WithHTTPClient(p.Client)) + if err != nil { + diags.AddWarning("error creating client dns", err.Error()) + return nil + } + clientDns.UserAgent = userAgent + clientDns.BasePath = dnsClientBasePath + + return clientDns +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_transport.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_transport.go new file mode 100644 index 0000000000..9a637c7534 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_transport.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwtransport + +import ( + "bytes" + "encoding/json" + "net/http" + "time" + + "github.com/hashicorp/terraform-plugin-framework/diag" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" +) + +func SendFrameworkRequest(p *FrameworkProviderConfig, method, project, rawurl, userAgent string, body map[string]interface{}, errorRetryPredicates ...transport_tpg.RetryErrorPredicateFunc) (map[string]interface{}, diag.Diagnostics) { + return SendFrameworkRequestWithTimeout(p, method, project, rawurl, userAgent, body, transport_tpg.DefaultRequestTimeout, errorRetryPredicates...) +} + +func SendFrameworkRequestWithTimeout(p *FrameworkProviderConfig, method, project, rawurl, userAgent string, body map[string]interface{}, timeout time.Duration, errorRetryPredicates ...transport_tpg.RetryErrorPredicateFunc) (map[string]interface{}, diag.Diagnostics) { + var diags diag.Diagnostics + + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", userAgent) + reqHeaders.Set("Content-Type", "application/json") + + if p.UserProjectOverride && project != "" { + // When project is "NO_BILLING_PROJECT_OVERRIDE" in the function GetCurrentUserEmail, + // set the header X-Goog-User-Project to be empty string. + if project == "NO_BILLING_PROJECT_OVERRIDE" { + reqHeaders.Set("X-Goog-User-Project", "") + } else { + // Pass the project into this fn instead of parsing it from the URL because + // both project names and URLs can have colons in them. + reqHeaders.Set("X-Goog-User-Project", project) + } + } + + if timeout == 0 { + timeout = time.Hour + } + + var res *http.Response + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + var buf bytes.Buffer + if body != nil { + err := json.NewEncoder(&buf).Encode(body) + if err != nil { + return err + } + } + + u, err := transport_tpg.AddQueryParams(rawurl, map[string]string{"alt": "json"}) + if err != nil { + return err + } + req, err := http.NewRequest(method, u, &buf) + if err != nil { + return err + } + + req.Header = reqHeaders + res, err = p.Client.Do(req) + if err != nil { + return err + } + + if err := googleapi.CheckResponse(res); err != nil { + googleapi.CloseBody(res) + return err + } + + return nil + }, + Timeout: timeout, + ErrorRetryPredicates: errorRetryPredicates, + }) + if err != nil { + diags.AddError("error sending request", err.Error()) + return nil, diags + } + + if res == nil { + diags.AddError("Unable to parse server response.", "This is most likely a terraform problem, please file a bug at https://github.com/hashicorp/terraform-provider-google/issues.") + return nil, diags + } + + // The defer call must be made outside of the retryFunc otherwise it's closed too soon. + defer googleapi.CloseBody(res) + + // 204 responses will have no body, so we're going to error with "EOF" if we + // try to parse it. Instead, we can just return nil. + if res.StatusCode == 204 { + return nil, diags + } + result := make(map[string]interface{}) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + diags.AddError("error decoding response body", err.Error()) + return nil, diags + } + + return result, diags +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_utils.go new file mode 100644 index 0000000000..7c0a4a9461 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/fwtransport/framework_utils.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package fwtransport + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +const uaEnvVar = "TF_APPEND_USER_AGENT" + +func CompileUserAgentString(ctx context.Context, name, tfVersion, provVersion string) string { + ua := fmt.Sprintf("Terraform/%s (+https://www.terraform.io) Terraform-Plugin-SDK/%s %s/%s", tfVersion, "terraform-plugin-framework", name, provVersion) + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + tflog.Debug(ctx, fmt.Sprintf("Using modified User-Agent: %s", ua)) + } + } + + return ua +} + +func GetCurrentUserEmailFramework(p *FrameworkProviderConfig, userAgent string, diags *diag.Diagnostics) string { + // When environment variables UserProjectOverride and BillingProject are set for the provider, + // the header X-Goog-User-Project is set for the API requests. + // But it causes an error when calling GetCurrUserEmail. Set the project to be "NO_BILLING_PROJECT_OVERRIDE". + // And then it triggers the header X-Goog-User-Project to be set to empty string. + + // See https://github.com/golang/oauth2/issues/306 for a recommendation to do this from a Go maintainer + // URL retrieved from https://accounts.google.com/.well-known/openid-configuration + res, d := SendFrameworkRequest(p, "GET", "NO_BILLING_PROJECT_OVERRIDE", "https://openidconnect.googleapis.com/v1/userinfo", userAgent, nil) + diags.Append(d...) + + if diags.HasError() { + tflog.Info(p.Context, "error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope?") + return "" + } + if res["email"] == nil { + diags.AddError("error retrieving email from userinfo.", "email was nil in the response.") + return "" + } + return res["email"].(string) +} + +func GenerateFrameworkUserAgentString(metaData *fwmodels.ProviderMetaModel, currUserAgent string) string { + if metaData != nil && !metaData.ModuleName.IsNull() && metaData.ModuleName.ValueString() != "" { + return strings.Join([]string{currUserAgent, metaData.ModuleName.ValueString()}, " ") + } + + return currUserAgent +} + +func HandleDatasourceNotFoundError(ctx context.Context, err error, state *tfsdk.State, resource string, diags *diag.Diagnostics) { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + tflog.Warn(ctx, fmt.Sprintf("Removing %s because it's gone", resource)) + // The resource doesn't exist anymore + state.RemoveResource(ctx) + } + + diags.AddError(fmt.Sprintf("Error when reading or editing %s", resource), err.Error()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/game_services_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/game_services_operation.go deleted file mode 100644 index 5d9efc6abb..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/game_services_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type GameServicesOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *GameServicesOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.GameServicesBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createGameServicesWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*GameServicesOperationWaiter, error) { - w := &GameServicesOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func GameServicesOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createGameServicesWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func GameServicesOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createGameServicesWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gke_backup_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gke_backup_operation.go deleted file mode 100644 index 0cbb9b1d33..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gke_backup_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type GKEBackupOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *GKEBackupOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.GKEBackupBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createGKEBackupWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*GKEBackupOperationWaiter, error) { - w := &GKEBackupOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func GKEBackupOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createGKEBackupWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func GKEBackupOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createGKEBackupWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gke_hub_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gke_hub_operation.go deleted file mode 100644 index b0f31ddff2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gke_hub_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type GKEHubOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *GKEHubOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.GKEHubBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createGKEHubWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*GKEHubOperationWaiter, error) { - w := &GKEHubOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func GKEHubOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createGKEHubWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func GKEHubOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createGKEHubWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gkeonprem_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gkeonprem_operation.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/gkeonprem_operation.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/hashcode.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/hashcode.go deleted file mode 100644 index 8df1e7a7de..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/hashcode.go +++ /dev/null @@ -1,22 +0,0 @@ -package google - -import ( - "hash/crc32" -) - -// hashcode hashes a string to a unique hashcode. -// -// crc32 returns a uint32, but for our use we need -// and non negative integer. Here we cast to an integer -// and invert it if the result is negative. -func hashcode(s string) int { - v := int(crc32.ChecksumIEEE([]byte(s))) - if v >= 0 { - return v - } - if -v >= 0 { - return -v - } - // v == MinInt - return 0 -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/healthcare_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/healthcare_utils.go deleted file mode 100644 index ada0f626fe..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/healthcare_utils.go +++ /dev/null @@ -1,235 +0,0 @@ -package google - -import ( - "fmt" - "regexp" - "strings" -) - -type healthcareDatasetId struct { - Project string - Location string - Name string -} - -func (s *healthcareDatasetId) datasetId() string { - return fmt.Sprintf("projects/%s/locations/%s/datasets/%s", s.Project, s.Location, s.Name) -} - -func (s *healthcareDatasetId) terraformId() string { - return fmt.Sprintf("%s/%s/%s", s.Project, s.Location, s.Name) -} - -func parseHealthcareDatasetId(id string, config *Config) (*healthcareDatasetId, error) { - parts := strings.Split(id, "/") - - datasetIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})$") - datasetIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})$") - datasetRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})$") - - if datasetIdRegex.MatchString(id) { - return &healthcareDatasetId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, nil - } - - if datasetIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}` id format.") - } - - return &healthcareDatasetId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, nil - } - - if parts := datasetRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &healthcareDatasetId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, nil - } - return nil, fmt.Errorf("Invalid Dataset id format, expecting `{projectId}/{locationId}/{datasetName}` or `{locationId}/{datasetName}.`") -} - -type healthcareFhirStoreId struct { - DatasetId healthcareDatasetId - Name string -} - -func (s *healthcareFhirStoreId) fhirStoreId() string { - return fmt.Sprintf("%s/fhirStores/%s", s.DatasetId.datasetId(), s.Name) -} - -func (s *healthcareFhirStoreId) terraformId() string { - return fmt.Sprintf("%s/%s", s.DatasetId.terraformId(), s.Name) -} - -func parseHealthcareFhirStoreId(id string, config *Config) (*healthcareFhirStoreId, error) { - parts := strings.Split(id, "/") - - fhirStoreIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - fhirStoreIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - fhirStoreRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/fhirStores/([a-zA-Z0-9_-]{1,256})$") - - if fhirStoreIdRegex.MatchString(id) { - return &healthcareFhirStoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, - Name: parts[3], - }, nil - } - - if fhirStoreIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{fhirStoreName}` id format.") - } - - return &healthcareFhirStoreId{ - DatasetId: healthcareDatasetId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, - Name: parts[2], - }, nil - } - - if parts := fhirStoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &healthcareFhirStoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, nil - } - return nil, fmt.Errorf("Invalid FhirStore id format, expecting `{projectId}/{locationId}/{datasetName}/{fhirStoreName}` or `{locationId}/{datasetName}/{fhirStoreName}.`") -} - -type healthcareHl7V2StoreId struct { - DatasetId healthcareDatasetId - Name string -} - -func (s *healthcareHl7V2StoreId) hl7V2StoreId() string { - return fmt.Sprintf("%s/hl7V2Stores/%s", s.DatasetId.datasetId(), s.Name) -} - -func (s *healthcareHl7V2StoreId) terraformId() string { - return fmt.Sprintf("%s/%s", s.DatasetId.terraformId(), s.Name) -} - -func parseHealthcareHl7V2StoreId(id string, config *Config) (*healthcareHl7V2StoreId, error) { - parts := strings.Split(id, "/") - - hl7V2StoreIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - hl7V2StoreIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - hl7V2StoreRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/hl7V2Stores/([a-zA-Z0-9_-]{1,256})$") - - if hl7V2StoreIdRegex.MatchString(id) { - return &healthcareHl7V2StoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, - Name: parts[3], - }, nil - } - - if hl7V2StoreIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{hl7V2StoreName}` id format.") - } - - return &healthcareHl7V2StoreId{ - DatasetId: healthcareDatasetId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, - Name: parts[2], - }, nil - } - - if parts := hl7V2StoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &healthcareHl7V2StoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, nil - } - return nil, fmt.Errorf("Invalid Hl7V2Store id format, expecting `{projectId}/{locationId}/{datasetName}/{hl7V2StoreName}` or `{locationId}/{datasetName}/{hl7V2StoreName}.`") -} - -type healthcareDicomStoreId struct { - DatasetId healthcareDatasetId - Name string -} - -func (s *healthcareDicomStoreId) dicomStoreId() string { - return fmt.Sprintf("%s/dicomStores/%s", s.DatasetId.datasetId(), s.Name) -} - -func (s *healthcareDicomStoreId) terraformId() string { - return fmt.Sprintf("%s/%s", s.DatasetId.terraformId(), s.Name) -} - -func parseHealthcareDicomStoreId(id string, config *Config) (*healthcareDicomStoreId, error) { - parts := strings.Split(id, "/") - - dicomStoreIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - dicomStoreIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - dicomStoreRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/dicomStores/([a-zA-Z0-9_-]{1,256})$") - - if dicomStoreIdRegex.MatchString(id) { - return &healthcareDicomStoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, - Name: parts[3], - }, nil - } - - if dicomStoreIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{dicomStoreName}` id format.") - } - - return &healthcareDicomStoreId{ - DatasetId: healthcareDatasetId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, - Name: parts[2], - }, nil - } - - if parts := dicomStoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &healthcareDicomStoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, nil - } - return nil, fmt.Errorf("Invalid DicomStore id format, expecting `{projectId}/{locationId}/{datasetName}/{dicomStoreName}` or `{locationId}/{datasetName}/{dicomStoreName}.`") -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam2_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam2_operation.go deleted file mode 100644 index e776f8da51..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam2_operation.go +++ /dev/null @@ -1,60 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "time" -) - -type IAM2OperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *IAM2OperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.IAM2BasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createIAM2Waiter(config *Config, op map[string]interface{}, activity, userAgent string) (*IAM2OperationWaiter, error) { - w := &IAM2OperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func IAM2OperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createIAM2Waiter(config, op, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_access_context_manager_access_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_access_context_manager_access_policy.go deleted file mode 100644 index ad0ffd413a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_access_context_manager_access_policy.go +++ /dev/null @@ -1,167 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var AccessContextManagerAccessPolicyIamSchema = map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type AccessContextManagerAccessPolicyIamUpdater struct { - name string - d TerraformResourceData - Config *Config -} - -func AccessContextManagerAccessPolicyIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &AccessContextManagerAccessPolicyIamUpdater{ - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func AccessContextManagerAccessPolicyIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &AccessContextManagerAccessPolicyIamUpdater{ - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *AccessContextManagerAccessPolicyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyAccessPolicyUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *AccessContextManagerAccessPolicyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAccessPolicyUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *AccessContextManagerAccessPolicyIamUpdater) qualifyAccessPolicyUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{AccessContextManagerBasePath}}%s:%s", fmt.Sprintf("accessPolicies/%s", u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *AccessContextManagerAccessPolicyIamUpdater) GetResourceId() string { - return fmt.Sprintf("accessPolicies/%s", u.name) -} - -func (u *AccessContextManagerAccessPolicyIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-accesscontextmanager-accesspolicy-%s", u.GetResourceId()) -} - -func (u *AccessContextManagerAccessPolicyIamUpdater) DescribeResource() string { - return fmt.Sprintf("accesscontextmanager accesspolicy %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_apigee_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_apigee_environment.go deleted file mode 100644 index 3b126eb27d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_apigee_environment.go +++ /dev/null @@ -1,182 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ApigeeEnvironmentIamSchema = map[string]*schema.Schema{ - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "env_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ApigeeEnvironmentIamUpdater struct { - orgId string - envId string - d TerraformResourceData - Config *Config -} - -func ApigeeEnvironmentIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("org_id"); ok { - values["org_id"] = v.(string) - } - - if v, ok := d.GetOk("env_id"); ok { - values["env_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"(?P.+)/environments/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("env_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ApigeeEnvironmentIamUpdater{ - orgId: values["org_id"], - envId: values["env_id"], - d: d, - Config: config, - } - - if err := d.Set("org_id", u.orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("env_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting env_id: %s", err) - } - - return u, nil -} - -func ApigeeEnvironmentIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"(?P.+)/environments/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ApigeeEnvironmentIamUpdater{ - orgId: values["org_id"], - envId: values["env_id"], - d: d, - Config: config, - } - if err := d.Set("env_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting env_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ApigeeEnvironmentIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyEnvironmentUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ApigeeEnvironmentIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyEnvironmentUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ApigeeEnvironmentIamUpdater) qualifyEnvironmentUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ApigeeBasePath}}%s:%s", fmt.Sprintf("%s/environments/%s", u.orgId, u.envId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ApigeeEnvironmentIamUpdater) GetResourceId() string { - return fmt.Sprintf("%s/environments/%s", u.orgId, u.envId) -} - -func (u *ApigeeEnvironmentIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-apigee-environment-%s", u.GetResourceId()) -} - -func (u *ApigeeEnvironmentIamUpdater) DescribeResource() string { - return fmt.Sprintf("apigee environment %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_artifact_registry_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_artifact_registry_repository.go deleted file mode 100644 index 359e58fb2e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_artifact_registry_repository.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ArtifactRegistryRepositoryIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "repository": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ArtifactRegistryRepositoryIamUpdater struct { - project string - location string - repository string - d TerraformResourceData - Config *Config -} - -func ArtifactRegistryRepositoryIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("repository"); ok { - values["repository"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/repositories/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("repository").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ArtifactRegistryRepositoryIamUpdater{ - project: values["project"], - location: values["location"], - repository: values["repository"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("repository", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting repository: %s", err) - } - - return u, nil -} - -func ArtifactRegistryRepositoryIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/repositories/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ArtifactRegistryRepositoryIamUpdater{ - project: values["project"], - location: values["location"], - repository: values["repository"], - d: d, - Config: config, - } - if err := d.Set("repository", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting repository: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ArtifactRegistryRepositoryIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyRepositoryUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ArtifactRegistryRepositoryIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyRepositoryUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ArtifactRegistryRepositoryIamUpdater) qualifyRepositoryUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ArtifactRegistryBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/repositories/%s", u.project, u.location, u.repository), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ArtifactRegistryRepositoryIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/repositories/%s", u.project, u.location, u.repository) -} - -func (u *ArtifactRegistryRepositoryIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-artifactregistry-repository-%s", u.GetResourceId()) -} - -func (u *ArtifactRegistryRepositoryIamUpdater) DescribeResource() string { - return fmt.Sprintf("artifactregistry repository %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_beta_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_beta_operation.go deleted file mode 100644 index 6a147cd4a9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_beta_operation.go +++ /dev/null @@ -1,62 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "time" -) - -type IAMBetaOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *IAMBetaOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.IAMBetaBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createIAMBetaWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*IAMBetaOperationWaiter, error) { - w := &IAMBetaOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func IAMBetaOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createIAMBetaWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_analytics_hub_data_exchange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_analytics_hub_data_exchange.go deleted file mode 100644 index bf441ce017..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_analytics_hub_data_exchange.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var BigqueryAnalyticsHubDataExchangeIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "data_exchange_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type BigqueryAnalyticsHubDataExchangeIamUpdater struct { - project string - location string - dataExchangeId string - d TerraformResourceData - Config *Config -} - -func BigqueryAnalyticsHubDataExchangeIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("data_exchange_id"); ok { - values["data_exchange_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("data_exchange_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &BigqueryAnalyticsHubDataExchangeIamUpdater{ - project: values["project"], - location: values["location"], - dataExchangeId: values["data_exchange_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("data_exchange_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting data_exchange_id: %s", err) - } - - return u, nil -} - -func BigqueryAnalyticsHubDataExchangeIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &BigqueryAnalyticsHubDataExchangeIamUpdater{ - project: values["project"], - location: values["location"], - dataExchangeId: values["data_exchange_id"], - d: d, - Config: config, - } - if err := d.Set("data_exchange_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting data_exchange_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyDataExchangeUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyDataExchangeUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) qualifyDataExchangeUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{BigqueryAnalyticsHubBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/dataExchanges/%s", u.project, u.location, u.dataExchangeId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/dataExchanges/%s", u.project, u.location, u.dataExchangeId) -} - -func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-bigqueryanalyticshub-dataexchange-%s", u.GetResourceId()) -} - -func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) DescribeResource() string { - return fmt.Sprintf("bigqueryanalyticshub dataexchange %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_analytics_hub_listing.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_analytics_hub_listing.go deleted file mode 100644 index 8b046e5fea..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_analytics_hub_listing.go +++ /dev/null @@ -1,238 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var BigqueryAnalyticsHubListingIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "data_exchange_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "listing_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type BigqueryAnalyticsHubListingIamUpdater struct { - project string - location string - dataExchangeId string - listingId string - d TerraformResourceData - Config *Config -} - -func BigqueryAnalyticsHubListingIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("data_exchange_id"); ok { - values["data_exchange_id"] = v.(string) - } - - if v, ok := d.GetOk("listing_id"); ok { - values["listing_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)/listings/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("listing_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &BigqueryAnalyticsHubListingIamUpdater{ - project: values["project"], - location: values["location"], - dataExchangeId: values["data_exchange_id"], - listingId: values["listing_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("data_exchange_id", u.dataExchangeId); err != nil { - return nil, fmt.Errorf("Error setting data_exchange_id: %s", err) - } - if err := d.Set("listing_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting listing_id: %s", err) - } - - return u, nil -} - -func BigqueryAnalyticsHubListingIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)/listings/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &BigqueryAnalyticsHubListingIamUpdater{ - project: values["project"], - location: values["location"], - dataExchangeId: values["data_exchange_id"], - listingId: values["listing_id"], - d: d, - Config: config, - } - if err := d.Set("listing_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting listing_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *BigqueryAnalyticsHubListingIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyListingUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *BigqueryAnalyticsHubListingIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyListingUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigqueryAnalyticsHubListingIamUpdater) qualifyListingUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{BigqueryAnalyticsHubBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/dataExchanges/%s/listings/%s", u.project, u.location, u.dataExchangeId, u.listingId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *BigqueryAnalyticsHubListingIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/dataExchanges/%s/listings/%s", u.project, u.location, u.dataExchangeId, u.listingId) -} - -func (u *BigqueryAnalyticsHubListingIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-bigqueryanalyticshub-listing-%s", u.GetResourceId()) -} - -func (u *BigqueryAnalyticsHubListingIamUpdater) DescribeResource() string { - return fmt.Sprintf("bigqueryanalyticshub listing %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_connection.go deleted file mode 100644 index a89a32a8d2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_connection.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var BigqueryConnectionConnectionIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "connection_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type BigqueryConnectionConnectionIamUpdater struct { - project string - location string - connectionId string - d TerraformResourceData - Config *Config -} - -func BigqueryConnectionConnectionIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("connection_id"); ok { - values["connection_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("connection_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &BigqueryConnectionConnectionIamUpdater{ - project: values["project"], - location: values["location"], - connectionId: values["connection_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("connection_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting connection_id: %s", err) - } - - return u, nil -} - -func BigqueryConnectionConnectionIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &BigqueryConnectionConnectionIamUpdater{ - project: values["project"], - location: values["location"], - connectionId: values["connection_id"], - d: d, - Config: config, - } - if err := d.Set("connection_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting connection_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *BigqueryConnectionConnectionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyConnectionUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *BigqueryConnectionConnectionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyConnectionUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigqueryConnectionConnectionIamUpdater) qualifyConnectionUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{BigqueryConnectionBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.connectionId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *BigqueryConnectionConnectionIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.connectionId) -} - -func (u *BigqueryConnectionConnectionIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-bigqueryconnection-connection-%s", u.GetResourceId()) -} - -func (u *BigqueryConnectionConnectionIamUpdater) DescribeResource() string { - return fmt.Sprintf("bigqueryconnection connection %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_datapolicy_data_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_datapolicy_data_policy.go deleted file mode 100644 index 82e1b76505..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_datapolicy_data_policy.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var BigqueryDatapolicyDataPolicyIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "data_policy_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type BigqueryDatapolicyDataPolicyIamUpdater struct { - project string - location string - dataPolicyId string - d TerraformResourceData - Config *Config -} - -func BigqueryDatapolicyDataPolicyIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("data_policy_id"); ok { - values["data_policy_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("data_policy_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &BigqueryDatapolicyDataPolicyIamUpdater{ - project: values["project"], - location: values["location"], - dataPolicyId: values["data_policy_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("data_policy_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting data_policy_id: %s", err) - } - - return u, nil -} - -func BigqueryDatapolicyDataPolicyIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &BigqueryDatapolicyDataPolicyIamUpdater{ - project: values["project"], - location: values["location"], - dataPolicyId: values["data_policy_id"], - d: d, - Config: config, - } - if err := d.Set("data_policy_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting data_policy_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *BigqueryDatapolicyDataPolicyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyDataPolicyUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *BigqueryDatapolicyDataPolicyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyDataPolicyUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigqueryDatapolicyDataPolicyIamUpdater) qualifyDataPolicyUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{BigqueryDatapolicyBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/dataPolicies/%s", u.project, u.location, u.dataPolicyId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *BigqueryDatapolicyDataPolicyIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/dataPolicies/%s", u.project, u.location, u.dataPolicyId) -} - -func (u *BigqueryDatapolicyDataPolicyIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-bigquerydatapolicy-datapolicy-%s", u.GetResourceId()) -} - -func (u *BigqueryDatapolicyDataPolicyIamUpdater) DescribeResource() string { - return fmt.Sprintf("bigquerydatapolicy datapolicy %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_table.go deleted file mode 100644 index fce941f67f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_table.go +++ /dev/null @@ -1,221 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var BigQueryTableIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type BigQueryTableIamUpdater struct { - project string - datasetId string - tableId string - d TerraformResourceData - Config *Config -} - -func BigQueryTableIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("dataset_id"); ok { - values["dataset_id"] = v.(string) - } - - if v, ok := d.GetOk("table_id"); ok { - values["table_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("table_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &BigQueryTableIamUpdater{ - project: values["project"], - datasetId: values["dataset_id"], - tableId: values["table_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("dataset_id", u.datasetId); err != nil { - return nil, fmt.Errorf("Error setting dataset_id: %s", err) - } - if err := d.Set("table_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting table_id: %s", err) - } - - return u, nil -} - -func BigQueryTableIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &BigQueryTableIamUpdater{ - project: values["project"], - datasetId: values["dataset_id"], - tableId: values["table_id"], - d: d, - Config: config, - } - if err := d.Set("table_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting table_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *BigQueryTableIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyTableUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": 1, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *BigQueryTableIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - // This is an override of the existing version that might have been set in the resource_iam_member|policy|binding code - json["version"] = 1 - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTableUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigQueryTableIamUpdater) qualifyTableUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{BigQueryBasePath}}%s:%s", fmt.Sprintf("projects/%s/datasets/%s/tables/%s", u.project, u.datasetId, u.tableId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *BigQueryTableIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/datasets/%s/tables/%s", u.project, u.datasetId, u.tableId) -} - -func (u *BigQueryTableIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-bigquery-table-%s", u.GetResourceId()) -} - -func (u *BigQueryTableIamUpdater) DescribeResource() string { - return fmt.Sprintf("bigquery table %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_binary_authorization_attestor.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_binary_authorization_attestor.go deleted file mode 100644 index 8a26f75aaf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_binary_authorization_attestor.go +++ /dev/null @@ -1,199 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var BinaryAuthorizationAttestorIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "attestor": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type BinaryAuthorizationAttestorIamUpdater struct { - project string - attestor string - d TerraformResourceData - Config *Config -} - -func BinaryAuthorizationAttestorIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("attestor"); ok { - values["attestor"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/attestors/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("attestor").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &BinaryAuthorizationAttestorIamUpdater{ - project: values["project"], - attestor: values["attestor"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("attestor", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting attestor: %s", err) - } - - return u, nil -} - -func BinaryAuthorizationAttestorIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/attestors/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &BinaryAuthorizationAttestorIamUpdater{ - project: values["project"], - attestor: values["attestor"], - d: d, - Config: config, - } - if err := d.Set("attestor", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting attestor: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *BinaryAuthorizationAttestorIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyAttestorUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *BinaryAuthorizationAttestorIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAttestorUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BinaryAuthorizationAttestorIamUpdater) qualifyAttestorUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{BinaryAuthorizationBasePath}}%s:%s", fmt.Sprintf("projects/%s/attestors/%s", u.project, u.attestor), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *BinaryAuthorizationAttestorIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/attestors/%s", u.project, u.attestor) -} - -func (u *BinaryAuthorizationAttestorIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-binaryauthorization-attestor-%s", u.GetResourceId()) -} - -func (u *BinaryAuthorizationAttestorIamUpdater) DescribeResource() string { - return fmt.Sprintf("binaryauthorization attestor %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_service.go deleted file mode 100644 index 70dcec606a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_service.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var CloudRunServiceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type CloudRunServiceIamUpdater struct { - project string - location string - service string - d TerraformResourceData - Config *Config -} - -func CloudRunServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("service"); ok { - values["service"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudRunServiceIamUpdater{ - project: values["project"], - location: values["location"], - service: values["service"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("service", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting service: %s", err) - } - - return u, nil -} - -func CloudRunServiceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudRunServiceIamUpdater{ - project: values["project"], - location: values["location"], - service: values["service"], - d: d, - Config: config, - } - if err := d.Set("service", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting service: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *CloudRunServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj, isCloudRunCreationConflict) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *CloudRunServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyServiceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate), isCloudRunCreationConflict) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *CloudRunServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{CloudRunBasePath}}%s:%s", fmt.Sprintf("v1/projects/%s/locations/%s/services/%s", u.project, u.location, u.service), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *CloudRunServiceIamUpdater) GetResourceId() string { - return fmt.Sprintf("v1/projects/%s/locations/%s/services/%s", u.project, u.location, u.service) -} - -func (u *CloudRunServiceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-cloudrun-service-%s", u.GetResourceId()) -} - -func (u *CloudRunServiceIamUpdater) DescribeResource() string { - return fmt.Sprintf("cloudrun service %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_v2_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_v2_job.go deleted file mode 100644 index 1794eab1df..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_v2_job.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var CloudRunV2JobIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type CloudRunV2JobIamUpdater struct { - project string - location string - name string - d TerraformResourceData - Config *Config -} - -func CloudRunV2JobIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudRunV2JobIamUpdater{ - project: values["project"], - location: values["location"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func CloudRunV2JobIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudRunV2JobIamUpdater{ - project: values["project"], - location: values["location"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *CloudRunV2JobIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyJobUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *CloudRunV2JobIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyJobUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *CloudRunV2JobIamUpdater) qualifyJobUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{CloudRunV2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/jobs/%s", u.project, u.location, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *CloudRunV2JobIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/jobs/%s", u.project, u.location, u.name) -} - -func (u *CloudRunV2JobIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-cloudrunv2-job-%s", u.GetResourceId()) -} - -func (u *CloudRunV2JobIamUpdater) DescribeResource() string { - return fmt.Sprintf("cloudrunv2 job %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_v2_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_v2_service.go deleted file mode 100644 index b0dd3eace2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_run_v2_service.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var CloudRunV2ServiceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type CloudRunV2ServiceIamUpdater struct { - project string - location string - name string - d TerraformResourceData - Config *Config -} - -func CloudRunV2ServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudRunV2ServiceIamUpdater{ - project: values["project"], - location: values["location"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func CloudRunV2ServiceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudRunV2ServiceIamUpdater{ - project: values["project"], - location: values["location"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *CloudRunV2ServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *CloudRunV2ServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyServiceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *CloudRunV2ServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{CloudRunV2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *CloudRunV2ServiceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.name) -} - -func (u *CloudRunV2ServiceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-cloudrunv2-service-%s", u.GetResourceId()) -} - -func (u *CloudRunV2ServiceIamUpdater) DescribeResource() string { - return fmt.Sprintf("cloudrunv2 service %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_tasks_queue.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_tasks_queue.go deleted file mode 100644 index 6aa79bd0fa..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloud_tasks_queue.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var CloudTasksQueueIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type CloudTasksQueueIamUpdater struct { - project string - location string - name string - d TerraformResourceData - Config *Config -} - -func CloudTasksQueueIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudTasksQueueIamUpdater{ - project: values["project"], - location: values["location"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func CloudTasksQueueIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudTasksQueueIamUpdater{ - project: values["project"], - location: values["location"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *CloudTasksQueueIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyQueueUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *CloudTasksQueueIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyQueueUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *CloudTasksQueueIamUpdater) qualifyQueueUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{CloudTasksBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/queues/%s", u.project, u.location, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *CloudTasksQueueIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/queues/%s", u.project, u.location, u.name) -} - -func (u *CloudTasksQueueIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-cloudtasks-queue-%s", u.GetResourceId()) -} - -func (u *CloudTasksQueueIamUpdater) DescribeResource() string { - return fmt.Sprintf("cloudtasks queue %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudfunctions2_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudfunctions2_function.go deleted file mode 100644 index 38e7118808..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudfunctions2_function.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var Cloudfunctions2functionIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "cloud_function": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type Cloudfunctions2functionIamUpdater struct { - project string - location string - cloudFunction string - d TerraformResourceData - Config *Config -} - -func Cloudfunctions2functionIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("cloud_function"); ok { - values["cloud_function"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("cloud_function").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &Cloudfunctions2functionIamUpdater{ - project: values["project"], - location: values["location"], - cloudFunction: values["cloud_function"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("cloud_function", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting cloud_function: %s", err) - } - - return u, nil -} - -func Cloudfunctions2functionIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &Cloudfunctions2functionIamUpdater{ - project: values["project"], - location: values["location"], - cloudFunction: values["cloud_function"], - d: d, - Config: config, - } - if err := d.Set("cloud_function", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting cloud_function: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *Cloudfunctions2functionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyfunctionUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *Cloudfunctions2functionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyfunctionUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *Cloudfunctions2functionIamUpdater) qualifyfunctionUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{Cloudfunctions2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.location, u.cloudFunction), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *Cloudfunctions2functionIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.location, u.cloudFunction) -} - -func (u *Cloudfunctions2functionIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-cloudfunctions2-function-%s", u.GetResourceId()) -} - -func (u *Cloudfunctions2functionIamUpdater) DescribeResource() string { - return fmt.Sprintf("cloudfunctions2 function %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudfunctions_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudfunctions_function.go deleted file mode 100644 index 1356c10502..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudfunctions_function.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var CloudFunctionsCloudFunctionIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "cloud_function": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type CloudFunctionsCloudFunctionIamUpdater struct { - project string - region string - cloudFunction string - d TerraformResourceData - Config *Config -} - -func CloudFunctionsCloudFunctionIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("cloud_function"); ok { - values["cloud_function"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("cloud_function").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudFunctionsCloudFunctionIamUpdater{ - project: values["project"], - region: values["region"], - cloudFunction: values["cloud_function"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("cloud_function", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting cloud_function: %s", err) - } - - return u, nil -} - -func CloudFunctionsCloudFunctionIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudFunctionsCloudFunctionIamUpdater{ - project: values["project"], - region: values["region"], - cloudFunction: values["cloud_function"], - d: d, - Config: config, - } - if err := d.Set("cloud_function", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting cloud_function: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyCloudFunctionUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyCloudFunctionUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) qualifyCloudFunctionUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{CloudFunctionsBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.region, u.cloudFunction), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.region, u.cloudFunction) -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-cloudfunctions-cloudfunction-%s", u.GetResourceId()) -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) DescribeResource() string { - return fmt.Sprintf("cloudfunctions cloudfunction %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudiot_registry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudiot_registry.go deleted file mode 100644 index e0616b56be..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_cloudiot_registry.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var CloudIotDeviceRegistryIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type CloudIotDeviceRegistryIamUpdater struct { - project string - region string - name string - d TerraformResourceData - Config *Config -} - -func CloudIotDeviceRegistryIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudIotDeviceRegistryIamUpdater{ - project: values["project"], - region: values["region"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func CloudIotDeviceRegistryIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudIotDeviceRegistryIamUpdater{ - project: values["project"], - region: values["region"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *CloudIotDeviceRegistryIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyDeviceRegistryUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *CloudIotDeviceRegistryIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyDeviceRegistryUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *CloudIotDeviceRegistryIamUpdater) qualifyDeviceRegistryUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{CloudIotBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/registries/%s", u.project, u.region, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *CloudIotDeviceRegistryIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/registries/%s", u.project, u.region, u.name) -} - -func (u *CloudIotDeviceRegistryIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-cloudiot-deviceregistry-%s", u.GetResourceId()) -} - -func (u *CloudIotDeviceRegistryIamUpdater) DescribeResource() string { - return fmt.Sprintf("cloudiot deviceregistry %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_disk.go deleted file mode 100644 index c0dee7cfd5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_disk.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ComputeDiskIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeDiskIamUpdater struct { - project string - zone string - name string - d TerraformResourceData - Config *Config -} - -func ComputeDiskIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - zone, _ := getZone(d, config) - if zone != "" { - if err := d.Set("zone", zone); err != nil { - return nil, fmt.Errorf("Error setting zone: %s", err) - } - } - values["zone"] = zone - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeDiskIamUpdater{ - project: values["project"], - zone: values["zone"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", u.zone); err != nil { - return nil, fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func ComputeDiskIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - zone, _ := getZone(d, config) - if zone != "" { - values["zone"] = zone - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeDiskIamUpdater{ - project: values["project"], - zone: values["zone"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeDiskIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyDiskUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeDiskIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyDiskUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeDiskIamUpdater) qualifyDiskUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/zones/%s/disks/%s", u.project, u.zone, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeDiskIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/zones/%s/disks/%s", u.project, u.zone, u.name) -} - -func (u *ComputeDiskIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-compute-disk-%s", u.GetResourceId()) -} - -func (u *ComputeDiskIamUpdater) DescribeResource() string { - return fmt.Sprintf("compute disk %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_image.go deleted file mode 100644 index d118d445d3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_image.go +++ /dev/null @@ -1,203 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ComputeImageIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "image": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeImageIamUpdater struct { - project string - image string - d TerraformResourceData - Config *Config -} - -func ComputeImageIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("image"); ok { - values["image"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/images/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("image").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeImageIamUpdater{ - project: values["project"], - image: values["image"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("image", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting image: %s", err) - } - - return u, nil -} - -func ComputeImageIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/images/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeImageIamUpdater{ - project: values["project"], - image: values["image"], - d: d, - Config: config, - } - if err := d.Set("image", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting image: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeImageIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyImageUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", IamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeImageIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyImageUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeImageIamUpdater) qualifyImageUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/global/images/%s", u.project, u.image), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeImageIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/global/images/%s", u.project, u.image) -} - -func (u *ComputeImageIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-compute-image-%s", u.GetResourceId()) -} - -func (u *ComputeImageIamUpdater) DescribeResource() string { - return fmt.Sprintf("compute image %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_instance.go deleted file mode 100644 index 2c3abc7485..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_instance.go +++ /dev/null @@ -1,227 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ComputeInstanceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "instance_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeInstanceIamUpdater struct { - project string - zone string - instanceName string - d TerraformResourceData - Config *Config -} - -func ComputeInstanceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - zone, _ := getZone(d, config) - if zone != "" { - if err := d.Set("zone", zone); err != nil { - return nil, fmt.Errorf("Error setting zone: %s", err) - } - } - values["zone"] = zone - if v, ok := d.GetOk("instance_name"); ok { - values["instance_name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance_name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeInstanceIamUpdater{ - project: values["project"], - zone: values["zone"], - instanceName: values["instance_name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", u.zone); err != nil { - return nil, fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("instance_name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting instance_name: %s", err) - } - - return u, nil -} - -func ComputeInstanceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - zone, _ := getZone(d, config) - if zone != "" { - values["zone"] = zone - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeInstanceIamUpdater{ - project: values["project"], - zone: values["zone"], - instanceName: values["instance_name"], - d: d, - Config: config, - } - if err := d.Set("instance_name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting instance_name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyInstanceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", IamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyInstanceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeInstanceIamUpdater) qualifyInstanceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/zones/%s/instances/%s", u.project, u.zone, u.instanceName), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeInstanceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/zones/%s/instances/%s", u.project, u.zone, u.instanceName) -} - -func (u *ComputeInstanceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-compute-instance-%s", u.GetResourceId()) -} - -func (u *ComputeInstanceIamUpdater) DescribeResource() string { - return fmt.Sprintf("compute instance %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_region_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_region_disk.go deleted file mode 100644 index 8996180365..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_region_disk.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ComputeRegionDiskIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeRegionDiskIamUpdater struct { - project string - region string - name string - d TerraformResourceData - Config *Config -} - -func ComputeRegionDiskIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeRegionDiskIamUpdater{ - project: values["project"], - region: values["region"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func ComputeRegionDiskIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeRegionDiskIamUpdater{ - project: values["project"], - region: values["region"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeRegionDiskIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyRegionDiskUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeRegionDiskIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyRegionDiskUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeRegionDiskIamUpdater) qualifyRegionDiskUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/regions/%s/disks/%s", u.project, u.region, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeRegionDiskIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/regions/%s/disks/%s", u.project, u.region, u.name) -} - -func (u *ComputeRegionDiskIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-compute-regiondisk-%s", u.GetResourceId()) -} - -func (u *ComputeRegionDiskIamUpdater) DescribeResource() string { - return fmt.Sprintf("compute regiondisk %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_snapshot.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_snapshot.go deleted file mode 100644 index 73e6544168..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_snapshot.go +++ /dev/null @@ -1,199 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ComputeSnapshotIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeSnapshotIamUpdater struct { - project string - name string - d TerraformResourceData - Config *Config -} - -func ComputeSnapshotIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeSnapshotIamUpdater{ - project: values["project"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func ComputeSnapshotIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeSnapshotIamUpdater{ - project: values["project"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeSnapshotIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifySnapshotUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeSnapshotIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifySnapshotUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeSnapshotIamUpdater) qualifySnapshotUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/global/snapshots/%s", u.project, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeSnapshotIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/global/snapshots/%s", u.project, u.name) -} - -func (u *ComputeSnapshotIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-compute-snapshot-%s", u.GetResourceId()) -} - -func (u *ComputeSnapshotIamUpdater) DescribeResource() string { - return fmt.Sprintf("compute snapshot %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_subnetwork.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_subnetwork.go deleted file mode 100644 index 9e7b1649ee..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_compute_subnetwork.go +++ /dev/null @@ -1,227 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ComputeSubnetworkIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "subnetwork": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeSubnetworkIamUpdater struct { - project string - region string - subnetwork string - d TerraformResourceData - Config *Config -} - -func ComputeSubnetworkIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("subnetwork"); ok { - values["subnetwork"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("subnetwork").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeSubnetworkIamUpdater{ - project: values["project"], - region: values["region"], - subnetwork: values["subnetwork"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("subnetwork", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting subnetwork: %s", err) - } - - return u, nil -} - -func ComputeSubnetworkIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeSubnetworkIamUpdater{ - project: values["project"], - region: values["region"], - subnetwork: values["subnetwork"], - d: d, - Config: config, - } - if err := d.Set("subnetwork", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting subnetwork: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeSubnetworkIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifySubnetworkUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", IamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeSubnetworkIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifySubnetworkUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeSubnetworkIamUpdater) qualifySubnetworkUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", u.project, u.region, u.subnetwork), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeSubnetworkIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", u.project, u.region, u.subnetwork) -} - -func (u *ComputeSubnetworkIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-compute-subnetwork-%s", u.GetResourceId()) -} - -func (u *ComputeSubnetworkIamUpdater) DescribeResource() string { - return fmt.Sprintf("compute subnetwork %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_entry_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_entry_group.go deleted file mode 100644 index bc382602b2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_entry_group.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataCatalogEntryGroupIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "entry_group": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataCatalogEntryGroupIamUpdater struct { - project string - region string - entryGroup string - d TerraformResourceData - Config *Config -} - -func DataCatalogEntryGroupIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("entry_group"); ok { - values["entry_group"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/entryGroups/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("entry_group").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogEntryGroupIamUpdater{ - project: values["project"], - region: values["region"], - entryGroup: values["entry_group"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("entry_group", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting entry_group: %s", err) - } - - return u, nil -} - -func DataCatalogEntryGroupIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/entryGroups/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogEntryGroupIamUpdater{ - project: values["project"], - region: values["region"], - entryGroup: values["entry_group"], - d: d, - Config: config, - } - if err := d.Set("entry_group", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting entry_group: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataCatalogEntryGroupIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyEntryGroupUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataCatalogEntryGroupIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyEntryGroupUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataCatalogEntryGroupIamUpdater) qualifyEntryGroupUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/entryGroups/%s", u.project, u.region, u.entryGroup), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataCatalogEntryGroupIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/entryGroups/%s", u.project, u.region, u.entryGroup) -} - -func (u *DataCatalogEntryGroupIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-datacatalog-entrygroup-%s", u.GetResourceId()) -} - -func (u *DataCatalogEntryGroupIamUpdater) DescribeResource() string { - return fmt.Sprintf("datacatalog entrygroup %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_policy_tag.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_policy_tag.go deleted file mode 100644 index 4b002bae13..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_policy_tag.go +++ /dev/null @@ -1,167 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataCatalogPolicyTagIamSchema = map[string]*schema.Schema{ - "policy_tag": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataCatalogPolicyTagIamUpdater struct { - policyTag string - d TerraformResourceData - Config *Config -} - -func DataCatalogPolicyTagIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("policy_tag"); ok { - values["policy_tag"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"(?P.+)"}, d, config, d.Get("policy_tag").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogPolicyTagIamUpdater{ - policyTag: values["policy_tag"], - d: d, - Config: config, - } - - if err := d.Set("policy_tag", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting policy_tag: %s", err) - } - - return u, nil -} - -func DataCatalogPolicyTagIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"(?P.+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogPolicyTagIamUpdater{ - policyTag: values["policy_tag"], - d: d, - Config: config, - } - if err := d.Set("policy_tag", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting policy_tag: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataCatalogPolicyTagIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyPolicyTagUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataCatalogPolicyTagIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyPolicyTagUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataCatalogPolicyTagIamUpdater) qualifyPolicyTagUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", fmt.Sprintf("%s", u.policyTag), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataCatalogPolicyTagIamUpdater) GetResourceId() string { - return fmt.Sprintf("%s", u.policyTag) -} - -func (u *DataCatalogPolicyTagIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-datacatalog-policytag-%s", u.GetResourceId()) -} - -func (u *DataCatalogPolicyTagIamUpdater) DescribeResource() string { - return fmt.Sprintf("datacatalog policytag %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_tag_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_tag_template.go deleted file mode 100644 index fbc8f2f7fc..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_tag_template.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataCatalogTagTemplateIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "tag_template": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataCatalogTagTemplateIamUpdater struct { - project string - region string - tagTemplate string - d TerraformResourceData - Config *Config -} - -func DataCatalogTagTemplateIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("tag_template"); ok { - values["tag_template"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/tagTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_template").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogTagTemplateIamUpdater{ - project: values["project"], - region: values["region"], - tagTemplate: values["tag_template"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("tag_template", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting tag_template: %s", err) - } - - return u, nil -} - -func DataCatalogTagTemplateIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/tagTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogTagTemplateIamUpdater{ - project: values["project"], - region: values["region"], - tagTemplate: values["tag_template"], - d: d, - Config: config, - } - if err := d.Set("tag_template", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting tag_template: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataCatalogTagTemplateIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyTagTemplateUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataCatalogTagTemplateIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTagTemplateUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataCatalogTagTemplateIamUpdater) qualifyTagTemplateUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/tagTemplates/%s", u.project, u.region, u.tagTemplate), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataCatalogTagTemplateIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/tagTemplates/%s", u.project, u.region, u.tagTemplate) -} - -func (u *DataCatalogTagTemplateIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-datacatalog-tagtemplate-%s", u.GetResourceId()) -} - -func (u *DataCatalogTagTemplateIamUpdater) DescribeResource() string { - return fmt.Sprintf("datacatalog tagtemplate %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_taxonomy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_taxonomy.go deleted file mode 100644 index 4e51427538..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_catalog_taxonomy.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataCatalogTaxonomyIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "taxonomy": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataCatalogTaxonomyIamUpdater struct { - project string - region string - taxonomy string - d TerraformResourceData - Config *Config -} - -func DataCatalogTaxonomyIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("taxonomy"); ok { - values["taxonomy"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/taxonomies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("taxonomy").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogTaxonomyIamUpdater{ - project: values["project"], - region: values["region"], - taxonomy: values["taxonomy"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("taxonomy", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting taxonomy: %s", err) - } - - return u, nil -} - -func DataCatalogTaxonomyIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/taxonomies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogTaxonomyIamUpdater{ - project: values["project"], - region: values["region"], - taxonomy: values["taxonomy"], - d: d, - Config: config, - } - if err := d.Set("taxonomy", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting taxonomy: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataCatalogTaxonomyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyTaxonomyUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataCatalogTaxonomyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTaxonomyUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataCatalogTaxonomyIamUpdater) qualifyTaxonomyUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/taxonomies/%s", u.project, u.region, u.taxonomy), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataCatalogTaxonomyIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/taxonomies/%s", u.project, u.region, u.taxonomy) -} - -func (u *DataCatalogTaxonomyIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-datacatalog-taxonomy-%s", u.GetResourceId()) -} - -func (u *DataCatalogTaxonomyIamUpdater) DescribeResource() string { - return fmt.Sprintf("datacatalog taxonomy %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_fusion_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_fusion_instance.go deleted file mode 100644 index 1888942d0f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_data_fusion_instance.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataFusionInstanceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataFusionInstanceIamUpdater struct { - project string - region string - name string - d TerraformResourceData - Config *Config -} - -func DataFusionInstanceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataFusionInstanceIamUpdater{ - project: values["project"], - region: values["region"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func DataFusionInstanceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataFusionInstanceIamUpdater{ - project: values["project"], - region: values["region"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataFusionInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyInstanceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataFusionInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyInstanceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataFusionInstanceIamUpdater) qualifyInstanceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataFusionBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.region, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataFusionInstanceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.region, u.name) -} - -func (u *DataFusionInstanceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-datafusion-instance-%s", u.GetResourceId()) -} - -func (u *DataFusionInstanceIamUpdater) DescribeResource() string { - return fmt.Sprintf("datafusion instance %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_asset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_asset.go deleted file mode 100644 index 205496fe60..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_asset.go +++ /dev/null @@ -1,253 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataplexAssetIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "lake": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "dataplex_zone": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "asset": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataplexAssetIamUpdater struct { - project string - location string - lake string - dataplexZone string - asset string - d TerraformResourceData - Config *Config -} - -func DataplexAssetIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("lake"); ok { - values["lake"] = v.(string) - } - - if v, ok := d.GetOk("dataplex_zone"); ok { - values["dataplex_zone"] = v.(string) - } - - if v, ok := d.GetOk("asset"); ok { - values["asset"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("asset").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataplexAssetIamUpdater{ - project: values["project"], - location: values["location"], - lake: values["lake"], - dataplexZone: values["dataplex_zone"], - asset: values["asset"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("lake", u.lake); err != nil { - return nil, fmt.Errorf("Error setting lake: %s", err) - } - if err := d.Set("dataplex_zone", u.dataplexZone); err != nil { - return nil, fmt.Errorf("Error setting dataplex_zone: %s", err) - } - if err := d.Set("asset", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting asset: %s", err) - } - - return u, nil -} - -func DataplexAssetIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataplexAssetIamUpdater{ - project: values["project"], - location: values["location"], - lake: values["lake"], - dataplexZone: values["dataplex_zone"], - asset: values["asset"], - d: d, - Config: config, - } - if err := d.Set("asset", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting asset: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataplexAssetIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyAssetUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataplexAssetIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAssetUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataplexAssetIamUpdater) qualifyAssetUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataplexBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", u.project, u.location, u.lake, u.dataplexZone, u.asset), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataplexAssetIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", u.project, u.location, u.lake, u.dataplexZone, u.asset) -} - -func (u *DataplexAssetIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-dataplex-asset-%s", u.GetResourceId()) -} - -func (u *DataplexAssetIamUpdater) DescribeResource() string { - return fmt.Sprintf("dataplex asset %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_lake.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_lake.go deleted file mode 100644 index c3505a2cdf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_lake.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataplexLakeIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "lake": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataplexLakeIamUpdater struct { - project string - location string - lake string - d TerraformResourceData - Config *Config -} - -func DataplexLakeIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("lake"); ok { - values["lake"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("lake").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataplexLakeIamUpdater{ - project: values["project"], - location: values["location"], - lake: values["lake"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("lake", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting lake: %s", err) - } - - return u, nil -} - -func DataplexLakeIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataplexLakeIamUpdater{ - project: values["project"], - location: values["location"], - lake: values["lake"], - d: d, - Config: config, - } - if err := d.Set("lake", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting lake: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataplexLakeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyLakeUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataplexLakeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyLakeUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataplexLakeIamUpdater) qualifyLakeUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataplexBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/lakes/%s", u.project, u.location, u.lake), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataplexLakeIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/lakes/%s", u.project, u.location, u.lake) -} - -func (u *DataplexLakeIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-dataplex-lake-%s", u.GetResourceId()) -} - -func (u *DataplexLakeIamUpdater) DescribeResource() string { - return fmt.Sprintf("dataplex lake %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_zone.go deleted file mode 100644 index cc34ea7b80..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataplex_zone.go +++ /dev/null @@ -1,238 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataplexZoneIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "lake": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "dataplex_zone": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataplexZoneIamUpdater struct { - project string - location string - lake string - dataplexZone string - d TerraformResourceData - Config *Config -} - -func DataplexZoneIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("lake"); ok { - values["lake"] = v.(string) - } - - if v, ok := d.GetOk("dataplex_zone"); ok { - values["dataplex_zone"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("dataplex_zone").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataplexZoneIamUpdater{ - project: values["project"], - location: values["location"], - lake: values["lake"], - dataplexZone: values["dataplex_zone"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("lake", u.lake); err != nil { - return nil, fmt.Errorf("Error setting lake: %s", err) - } - if err := d.Set("dataplex_zone", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting dataplex_zone: %s", err) - } - - return u, nil -} - -func DataplexZoneIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataplexZoneIamUpdater{ - project: values["project"], - location: values["location"], - lake: values["lake"], - dataplexZone: values["dataplex_zone"], - d: d, - Config: config, - } - if err := d.Set("dataplex_zone", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting dataplex_zone: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataplexZoneIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyZoneUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataplexZoneIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyZoneUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataplexZoneIamUpdater) qualifyZoneUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataplexBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s", u.project, u.location, u.lake, u.dataplexZone), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataplexZoneIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s", u.project, u.location, u.lake, u.dataplexZone) -} - -func (u *DataplexZoneIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-dataplex-zone-%s", u.GetResourceId()) -} - -func (u *DataplexZoneIamUpdater) DescribeResource() string { - return fmt.Sprintf("dataplex zone %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_autoscaling_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_autoscaling_policy.go deleted file mode 100644 index e650ba29cf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_autoscaling_policy.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataprocAutoscalingPolicyIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "policy_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataprocAutoscalingPolicyIamUpdater struct { - project string - location string - policyId string - d TerraformResourceData - Config *Config -} - -func DataprocAutoscalingPolicyIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("policy_id"); ok { - values["policy_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("policy_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataprocAutoscalingPolicyIamUpdater{ - project: values["project"], - location: values["location"], - policyId: values["policy_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("policy_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting policy_id: %s", err) - } - - return u, nil -} - -func DataprocAutoscalingPolicyIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataprocAutoscalingPolicyIamUpdater{ - project: values["project"], - location: values["location"], - policyId: values["policy_id"], - d: d, - Config: config, - } - if err := d.Set("policy_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting policy_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataprocAutoscalingPolicyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyAutoscalingPolicyUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataprocAutoscalingPolicyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAutoscalingPolicyUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataprocAutoscalingPolicyIamUpdater) qualifyAutoscalingPolicyUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataprocBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", u.project, u.location, u.policyId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataprocAutoscalingPolicyIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", u.project, u.location, u.policyId) -} - -func (u *DataprocAutoscalingPolicyIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-dataproc-autoscalingpolicy-%s", u.GetResourceId()) -} - -func (u *DataprocAutoscalingPolicyIamUpdater) DescribeResource() string { - return fmt.Sprintf("dataproc autoscalingpolicy %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_metastore_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_metastore_service.go deleted file mode 100644 index bca3b95888..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_metastore_service.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DataprocMetastoreServiceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "service_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataprocMetastoreServiceIamUpdater struct { - project string - location string - serviceId string - d TerraformResourceData - Config *Config -} - -func DataprocMetastoreServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("service_id"); ok { - values["service_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataprocMetastoreServiceIamUpdater{ - project: values["project"], - location: values["location"], - serviceId: values["service_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("service_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting service_id: %s", err) - } - - return u, nil -} - -func DataprocMetastoreServiceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataprocMetastoreServiceIamUpdater{ - project: values["project"], - location: values["location"], - serviceId: values["service_id"], - d: d, - Config: config, - } - if err := d.Set("service_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting service_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataprocMetastoreServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataprocMetastoreServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyServiceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataprocMetastoreServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DataprocMetastoreBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.serviceId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataprocMetastoreServiceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.serviceId) -} - -func (u *DataprocMetastoreServiceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-dataprocmetastore-service-%s", u.GetResourceId()) -} - -func (u *DataprocMetastoreServiceIamUpdater) DescribeResource() string { - return fmt.Sprintf("dataprocmetastore service %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dns_managed_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dns_managed_zone.go deleted file mode 100644 index d7ccc3976d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dns_managed_zone.go +++ /dev/null @@ -1,199 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var DNSManagedZoneIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "managed_zone": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DNSManagedZoneIamUpdater struct { - project string - managedZone string - d TerraformResourceData - Config *Config -} - -func DNSManagedZoneIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("managed_zone"); ok { - values["managed_zone"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/managedZones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("managed_zone").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DNSManagedZoneIamUpdater{ - project: values["project"], - managedZone: values["managed_zone"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("managed_zone", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting managed_zone: %s", err) - } - - return u, nil -} - -func DNSManagedZoneIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/managedZones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DNSManagedZoneIamUpdater{ - project: values["project"], - managedZone: values["managed_zone"], - d: d, - Config: config, - } - if err := d.Set("managed_zone", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting managed_zone: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DNSManagedZoneIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyManagedZoneUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DNSManagedZoneIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyManagedZoneUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DNSManagedZoneIamUpdater) qualifyManagedZoneUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{DNSBasePath}}%s:%s", fmt.Sprintf("projects/%s/managedZones/%s", u.project, u.managedZone), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DNSManagedZoneIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/managedZones/%s", u.project, u.managedZone) -} - -func (u *DNSManagedZoneIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-dns-managedzone-%s", u.GetResourceId()) -} - -func (u *DNSManagedZoneIamUpdater) DescribeResource() string { - return fmt.Sprintf("dns managedzone %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_endpoints_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_endpoints_service.go deleted file mode 100644 index ea4b5782e6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_endpoints_service.go +++ /dev/null @@ -1,167 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ServiceManagementServiceIamSchema = map[string]*schema.Schema{ - "service_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ServiceManagementServiceIamUpdater struct { - serviceName string - d TerraformResourceData - Config *Config -} - -func ServiceManagementServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("service_name"); ok { - values["service_name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"services/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service_name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ServiceManagementServiceIamUpdater{ - serviceName: values["service_name"], - d: d, - Config: config, - } - - if err := d.Set("service_name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting service_name: %s", err) - } - - return u, nil -} - -func ServiceManagementServiceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"services/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ServiceManagementServiceIamUpdater{ - serviceName: values["service_name"], - d: d, - Config: config, - } - if err := d.Set("service_name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting service_name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ServiceManagementServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ServiceManagementServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyServiceUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ServiceManagementServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ServiceManagementBasePath}}%s:%s", fmt.Sprintf("services/%s", u.serviceName), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ServiceManagementServiceIamUpdater) GetResourceId() string { - return fmt.Sprintf("services/%s", u.serviceName) -} - -func (u *ServiceManagementServiceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-servicemanagement-service-%s", u.GetResourceId()) -} - -func (u *ServiceManagementServiceIamUpdater) DescribeResource() string { - return fmt.Sprintf("servicemanagement service %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_endpoints_service_consumers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_endpoints_service_consumers.go deleted file mode 100644 index 43f389e101..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_endpoints_service_consumers.go +++ /dev/null @@ -1,182 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var ServiceManagementServiceConsumersIamSchema = map[string]*schema.Schema{ - "service_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "consumer_project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ServiceManagementServiceConsumersIamUpdater struct { - serviceName string - consumerProject string - d TerraformResourceData - Config *Config -} - -func ServiceManagementServiceConsumersIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("service_name"); ok { - values["service_name"] = v.(string) - } - - if v, ok := d.GetOk("consumer_project"); ok { - values["consumer_project"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"services/(?P[^/]+)/consumers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("consumer_project").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ServiceManagementServiceConsumersIamUpdater{ - serviceName: values["service_name"], - consumerProject: values["consumer_project"], - d: d, - Config: config, - } - - if err := d.Set("service_name", u.serviceName); err != nil { - return nil, fmt.Errorf("Error setting service_name: %s", err) - } - if err := d.Set("consumer_project", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting consumer_project: %s", err) - } - - return u, nil -} - -func ServiceManagementServiceConsumersIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"services/(?P[^/]+)/consumers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ServiceManagementServiceConsumersIamUpdater{ - serviceName: values["service_name"], - consumerProject: values["consumer_project"], - d: d, - Config: config, - } - if err := d.Set("consumer_project", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting consumer_project: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ServiceManagementServiceConsumersIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyServiceConsumersUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ServiceManagementServiceConsumersIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyServiceConsumersUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ServiceManagementServiceConsumersIamUpdater) qualifyServiceConsumersUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{ServiceManagementBasePath}}%s:%s", fmt.Sprintf("services/%s/consumers/%s", u.serviceName, u.consumerProject), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ServiceManagementServiceConsumersIamUpdater) GetResourceId() string { - return fmt.Sprintf("services/%s/consumers/%s", u.serviceName, u.consumerProject) -} - -func (u *ServiceManagementServiceConsumersIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-servicemanagement-serviceconsumers-%s", u.GetResourceId()) -} - -func (u *ServiceManagementServiceConsumersIamUpdater) DescribeResource() string { - return fmt.Sprintf("servicemanagement serviceconsumers %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_folder.go deleted file mode 100644 index 650ecd72fd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_folder.go +++ /dev/null @@ -1,134 +0,0 @@ -package google - -import ( - "fmt" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" - resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" -) - -var IamFolderSchema = map[string]*schema.Schema{ - "folder": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type FolderIamUpdater struct { - folderId string - d TerraformResourceData - Config *Config -} - -func NewFolderIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &FolderIamUpdater{ - folderId: canonicalFolderId(d.Get("folder").(string)), - d: d, - Config: config, - }, nil -} - -func FolderIdParseFunc(d *schema.ResourceData, _ *Config) error { - if !strings.HasPrefix(d.Id(), "folders/") { - d.SetId(fmt.Sprintf("folders/%s", d.Id())) - } - if err := d.Set("folder", d.Id()); err != nil { - return fmt.Errorf("Error setting folder: %s", err) - } - return nil -} - -func (u *FolderIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - return getFolderIamPolicyByFolderName(u.folderId, userAgent, u.Config) -} - -func (u *FolderIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - v2Policy, err := v1PolicyToV2(policy) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewResourceManagerV3Client(userAgent).Folders.SetIamPolicy(u.folderId, &resourceManagerV3.SetIamPolicyRequest{ - Policy: v2Policy, - UpdateMask: "bindings,etag,auditConfigs", - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *FolderIamUpdater) GetResourceId() string { - return u.folderId -} - -func (u *FolderIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-folder-%s", u.folderId) -} - -func (u *FolderIamUpdater) DescribeResource() string { - return fmt.Sprintf("folder %q", u.folderId) -} - -func canonicalFolderId(folder string) string { - if strings.HasPrefix(folder, "folders/") { - return folder - } - - return "folders/" + folder -} - -// v1 and v2 policy are identical -func v1PolicyToV2(in *cloudresourcemanager.Policy) (*resourceManagerV3.Policy, error) { - out := &resourceManagerV3.Policy{} - err := Convert(in, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a v1 policy to a v2 policy: {{err}}", err) - } - return out, nil -} - -func v2PolicyToV1(in *resourceManagerV3.Policy) (*cloudresourcemanager.Policy, error) { - out := &cloudresourcemanager.Policy{} - err := Convert(in, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a v2 policy to a v1 policy: {{err}}", err) - } - return out, nil -} - -// Retrieve the existing IAM Policy for a folder -func getFolderIamPolicyByFolderName(folderName, userAgent string, config *Config) (*cloudresourcemanager.Policy, error) { - p, err := config.NewResourceManagerV3Client(userAgent).Folders.GetIamPolicy(folderName, - &resourceManagerV3.GetIamPolicyRequest{ - Options: &resourceManagerV3.GetPolicyOptions{ - RequestedPolicyVersion: IamPolicyVersion, - }, - }).Do() - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for folder %q: {{err}}", folderName), err) - } - - v1Policy, err := v2PolicyToV1(p) - if err != nil { - return nil, err - } - - return v1Policy, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_gke_backup_backup_plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_gke_backup_backup_plan.go deleted file mode 100644 index 6685a959e5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_gke_backup_backup_plan.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var GKEBackupBackupPlanIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type GKEBackupBackupPlanIamUpdater struct { - project string - location string - name string - d TerraformResourceData - Config *Config -} - -func GKEBackupBackupPlanIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/backupPlans/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &GKEBackupBackupPlanIamUpdater{ - project: values["project"], - location: values["location"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func GKEBackupBackupPlanIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/backupPlans/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &GKEBackupBackupPlanIamUpdater{ - project: values["project"], - location: values["location"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *GKEBackupBackupPlanIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyBackupPlanUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *GKEBackupBackupPlanIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyBackupPlanUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *GKEBackupBackupPlanIamUpdater) qualifyBackupPlanUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{GKEBackupBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/backupPlans/%s", u.project, u.location, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *GKEBackupBackupPlanIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/backupPlans/%s", u.project, u.location, u.name) -} - -func (u *GKEBackupBackupPlanIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-gkebackup-backupplan-%s", u.GetResourceId()) -} - -func (u *GKEBackupBackupPlanIamUpdater) DescribeResource() string { - return fmt.Sprintf("gkebackup backupplan %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_gke_hub_membership.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_gke_hub_membership.go deleted file mode 100644 index f8cde65805..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_gke_hub_membership.go +++ /dev/null @@ -1,199 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var GKEHubMembershipIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "membership_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type GKEHubMembershipIamUpdater struct { - project string - membershipId string - d TerraformResourceData - Config *Config -} - -func GKEHubMembershipIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("membership_id"); ok { - values["membership_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("membership_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &GKEHubMembershipIamUpdater{ - project: values["project"], - membershipId: values["membership_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("membership_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting membership_id: %s", err) - } - - return u, nil -} - -func GKEHubMembershipIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &GKEHubMembershipIamUpdater{ - project: values["project"], - membershipId: values["membership_id"], - d: d, - Config: config, - } - if err := d.Set("membership_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting membership_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *GKEHubMembershipIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyMembershipUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *GKEHubMembershipIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyMembershipUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *GKEHubMembershipIamUpdater) qualifyMembershipUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{GKEHubBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/global/memberships/%s", u.project, u.membershipId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *GKEHubMembershipIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/global/memberships/%s", u.project, u.membershipId) -} - -func (u *GKEHubMembershipIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-gkehub-membership-%s", u.GetResourceId()) -} - -func (u *GKEHubMembershipIamUpdater) DescribeResource() string { - return fmt.Sprintf("gkehub membership %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_consent_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_consent_store.go deleted file mode 100644 index 6d120ac938..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_consent_store.go +++ /dev/null @@ -1,182 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var HealthcareConsentStoreIamSchema = map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "consent_store_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type HealthcareConsentStoreIamUpdater struct { - dataset string - consentStoreId string - d TerraformResourceData - Config *Config -} - -func HealthcareConsentStoreIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("dataset"); ok { - values["dataset"] = v.(string) - } - - if v, ok := d.GetOk("consent_store_id"); ok { - values["consent_store_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"(?P.+)/consentStores/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("consent_store_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &HealthcareConsentStoreIamUpdater{ - dataset: values["dataset"], - consentStoreId: values["consent_store_id"], - d: d, - Config: config, - } - - if err := d.Set("dataset", u.dataset); err != nil { - return nil, fmt.Errorf("Error setting dataset: %s", err) - } - if err := d.Set("consent_store_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting consent_store_id: %s", err) - } - - return u, nil -} - -func HealthcareConsentStoreIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"(?P.+)/consentStores/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &HealthcareConsentStoreIamUpdater{ - dataset: values["dataset"], - consentStoreId: values["consent_store_id"], - d: d, - Config: config, - } - if err := d.Set("consent_store_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting consent_store_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *HealthcareConsentStoreIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyConsentStoreUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *HealthcareConsentStoreIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyConsentStoreUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareConsentStoreIamUpdater) qualifyConsentStoreUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{HealthcareBasePath}}%s:%s", fmt.Sprintf("%s/consentStores/%s", u.dataset, u.consentStoreId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *HealthcareConsentStoreIamUpdater) GetResourceId() string { - return fmt.Sprintf("%s/consentStores/%s", u.dataset, u.consentStoreId) -} - -func (u *HealthcareConsentStoreIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-healthcare-consentstore-%s", u.GetResourceId()) -} - -func (u *HealthcareConsentStoreIamUpdater) DescribeResource() string { - return fmt.Sprintf("healthcare consentstore %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_dataset.go deleted file mode 100644 index 2104223d41..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_dataset.go +++ /dev/null @@ -1,127 +0,0 @@ -package google - -import ( - "fmt" - - healthcare "google.golang.org/api/healthcare/v1" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IamHealthcareDatasetSchema = map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type HealthcareDatasetIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewHealthcareDatasetIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - dataset := d.Get("dataset_id").(string) - datasetId, err := parseHealthcareDatasetId(dataset, config) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", dataset), err) - } - - return &HealthcareDatasetIamUpdater{ - resourceId: datasetId.datasetId(), - d: d, - Config: config, - }, nil -} - -func DatasetIdParseFunc(d *schema.ResourceData, config *Config) error { - datasetId, err := parseHealthcareDatasetId(d.Id(), config) - if err != nil { - return err - } - - if err := d.Set("dataset_id", datasetId.datasetId()); err != nil { - return fmt.Errorf("Error setting dataset_id: %s", err) - } - d.SetId(datasetId.datasetId()) - return nil -} - -func (u *HealthcareDatasetIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.GetIamPolicy(u.resourceId).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *HealthcareDatasetIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.SetIamPolicy(u.resourceId, &healthcare.SetIamPolicyRequest{ - Policy: healthcarePolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareDatasetIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *HealthcareDatasetIamUpdater) GetMutexKey() string { - return u.resourceId -} - -func (u *HealthcareDatasetIamUpdater) DescribeResource() string { - return fmt.Sprintf("Healthcare Dataset %q", u.resourceId) -} - -func resourceManagerToHealthcarePolicy(p *cloudresourcemanager.Policy) (*healthcare.Policy, error) { - out := &healthcare.Policy{} - err := Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a v1 policy to a healthcare policy: {{err}}", err) - } - return out, nil -} - -func healthcareToResourceManagerPolicy(p *healthcare.Policy) (*cloudresourcemanager.Policy, error) { - out := &cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a healthcare policy to a v1 policy: {{err}}", err) - } - return out, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_dicom_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_dicom_store.go deleted file mode 100644 index f16b2a9417..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_dicom_store.go +++ /dev/null @@ -1,108 +0,0 @@ -package google - -import ( - "fmt" - - healthcare "google.golang.org/api/healthcare/v1" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IamHealthcareDicomStoreSchema = map[string]*schema.Schema{ - "dicom_store_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type HealthcareDicomStoreIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewHealthcareDicomStoreIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - dicomStore := d.Get("dicom_store_id").(string) - dicomStoreId, err := parseHealthcareDicomStoreId(dicomStore, config) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", dicomStore), err) - } - - return &HealthcareDicomStoreIamUpdater{ - resourceId: dicomStoreId.dicomStoreId(), - d: d, - Config: config, - }, nil -} - -func DicomStoreIdParseFunc(d *schema.ResourceData, config *Config) error { - dicomStoreId, err := parseHealthcareDicomStoreId(d.Id(), config) - if err != nil { - return err - } - if err := d.Set("dicom_store_id", dicomStoreId.dicomStoreId()); err != nil { - return fmt.Errorf("Error setting dicom_store_id: %s", err) - } - d.SetId(dicomStoreId.dicomStoreId()) - return nil -} - -func (u *HealthcareDicomStoreIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.DicomStores.GetIamPolicy(u.resourceId).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *HealthcareDicomStoreIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.DicomStores.SetIamPolicy(u.resourceId, &healthcare.SetIamPolicyRequest{ - Policy: healthcarePolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareDicomStoreIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *HealthcareDicomStoreIamUpdater) GetMutexKey() string { - return u.resourceId -} - -func (u *HealthcareDicomStoreIamUpdater) DescribeResource() string { - return fmt.Sprintf("Healthcare DicomStore %q", u.resourceId) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_fhir_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_fhir_store.go deleted file mode 100644 index 2c798b391f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_fhir_store.go +++ /dev/null @@ -1,108 +0,0 @@ -package google - -import ( - "fmt" - - healthcare "google.golang.org/api/healthcare/v1" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IamHealthcareFhirStoreSchema = map[string]*schema.Schema{ - "fhir_store_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type HealthcareFhirStoreIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewHealthcareFhirStoreIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - fhirStore := d.Get("fhir_store_id").(string) - fhirStoreId, err := parseHealthcareFhirStoreId(fhirStore, config) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", fhirStore), err) - } - - return &HealthcareFhirStoreIamUpdater{ - resourceId: fhirStoreId.fhirStoreId(), - d: d, - Config: config, - }, nil -} - -func FhirStoreIdParseFunc(d *schema.ResourceData, config *Config) error { - fhirStoreId, err := parseHealthcareFhirStoreId(d.Id(), config) - if err != nil { - return err - } - if err := d.Set("fhir_store_id", fhirStoreId.fhirStoreId()); err != nil { - return fmt.Errorf("Error setting fhir_store_id: %s", err) - } - d.SetId(fhirStoreId.fhirStoreId()) - return nil -} - -func (u *HealthcareFhirStoreIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.FhirStores.GetIamPolicy(u.resourceId).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *HealthcareFhirStoreIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.FhirStores.SetIamPolicy(u.resourceId, &healthcare.SetIamPolicyRequest{ - Policy: healthcarePolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareFhirStoreIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *HealthcareFhirStoreIamUpdater) GetMutexKey() string { - return u.resourceId -} - -func (u *HealthcareFhirStoreIamUpdater) DescribeResource() string { - return fmt.Sprintf("Healthcare FhirStore %q", u.resourceId) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_hl7_v2_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_hl7_v2_store.go deleted file mode 100644 index ddc91b05d1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_healthcare_hl7_v2_store.go +++ /dev/null @@ -1,108 +0,0 @@ -package google - -import ( - "fmt" - - healthcare "google.golang.org/api/healthcare/v1" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IamHealthcareHl7V2StoreSchema = map[string]*schema.Schema{ - "hl7_v2_store_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type HealthcareHl7V2StoreIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewHealthcareHl7V2StoreIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - hl7V2Store := d.Get("hl7_v2_store_id").(string) - hl7V2StoreId, err := parseHealthcareHl7V2StoreId(hl7V2Store, config) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", hl7V2Store), err) - } - - return &HealthcareHl7V2StoreIamUpdater{ - resourceId: hl7V2StoreId.hl7V2StoreId(), - d: d, - Config: config, - }, nil -} - -func Hl7V2StoreIdParseFunc(d *schema.ResourceData, config *Config) error { - hl7V2StoreId, err := parseHealthcareHl7V2StoreId(d.Id(), config) - if err != nil { - return err - } - if err := d.Set("hl7_v2_store_id", hl7V2StoreId.hl7V2StoreId()); err != nil { - return fmt.Errorf("Error setting hl7_v2_store_id: %s", err) - } - d.SetId(hl7V2StoreId.hl7V2StoreId()) - return nil -} - -func (u *HealthcareHl7V2StoreIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.Hl7V2Stores.GetIamPolicy(u.resourceId).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *HealthcareHl7V2StoreIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.Hl7V2Stores.SetIamPolicy(u.resourceId, &healthcare.SetIamPolicyRequest{ - Policy: healthcarePolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareHl7V2StoreIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *HealthcareHl7V2StoreIamUpdater) GetMutexKey() string { - return u.resourceId -} - -func (u *HealthcareHl7V2StoreIamUpdater) DescribeResource() string { - return fmt.Sprintf("Healthcare Hl7V2Store %q", u.resourceId) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_app_engine_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_app_engine_service.go deleted file mode 100644 index 9da4198b10..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_app_engine_service.go +++ /dev/null @@ -1,219 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IapAppEngineServiceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "app_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapAppEngineServiceIamUpdater struct { - project string - appId string - service string - d TerraformResourceData - Config *Config -} - -func IapAppEngineServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("app_id"); ok { - values["appId"] = v.(string) - } - - if v, ok := d.GetOk("service"); ok { - values["service"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapAppEngineServiceIamUpdater{ - project: values["project"], - appId: values["appId"], - service: values["service"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("app_id", u.appId); err != nil { - return nil, fmt.Errorf("Error setting app_id: %s", err) - } - if err := d.Set("service", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting service: %s", err) - } - - return u, nil -} - -func IapAppEngineServiceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapAppEngineServiceIamUpdater{ - project: values["project"], - appId: values["appId"], - service: values["service"], - d: d, - Config: config, - } - if err := d.Set("service", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting service: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapAppEngineServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyAppEngineServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": IamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapAppEngineServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAppEngineServiceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapAppEngineServiceIamUpdater) qualifyAppEngineServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s", u.project, u.appId, u.service), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapAppEngineServiceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s", u.project, u.appId, u.service) -} - -func (u *IapAppEngineServiceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-iap-appengineservice-%s", u.GetResourceId()) -} - -func (u *IapAppEngineServiceIamUpdater) DescribeResource() string { - return fmt.Sprintf("iap appengineservice %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_app_engine_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_app_engine_version.go deleted file mode 100644 index 76c6ec7a39..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_app_engine_version.go +++ /dev/null @@ -1,234 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IapAppEngineVersionIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "app_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "version_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapAppEngineVersionIamUpdater struct { - project string - appId string - service string - versionId string - d TerraformResourceData - Config *Config -} - -func IapAppEngineVersionIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("app_id"); ok { - values["appId"] = v.(string) - } - - if v, ok := d.GetOk("service"); ok { - values["service"] = v.(string) - } - - if v, ok := d.GetOk("version_id"); ok { - values["versionId"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("version_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapAppEngineVersionIamUpdater{ - project: values["project"], - appId: values["appId"], - service: values["service"], - versionId: values["versionId"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("app_id", u.appId); err != nil { - return nil, fmt.Errorf("Error setting app_id: %s", err) - } - if err := d.Set("service", u.service); err != nil { - return nil, fmt.Errorf("Error setting service: %s", err) - } - if err := d.Set("version_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting version_id: %s", err) - } - - return u, nil -} - -func IapAppEngineVersionIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapAppEngineVersionIamUpdater{ - project: values["project"], - appId: values["appId"], - service: values["service"], - versionId: values["versionId"], - d: d, - Config: config, - } - if err := d.Set("version_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting version_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapAppEngineVersionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyAppEngineVersionUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": IamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapAppEngineVersionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAppEngineVersionUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapAppEngineVersionIamUpdater) qualifyAppEngineVersionUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s/versions/%s", u.project, u.appId, u.service, u.versionId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapAppEngineVersionIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s/versions/%s", u.project, u.appId, u.service, u.versionId) -} - -func (u *IapAppEngineVersionIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-iap-appengineversion-%s", u.GetResourceId()) -} - -func (u *IapAppEngineVersionIamUpdater) DescribeResource() string { - return fmt.Sprintf("iap appengineversion %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_tunnel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_tunnel.go deleted file mode 100644 index e8432475b5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_tunnel.go +++ /dev/null @@ -1,190 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IapTunnelIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapTunnelIamUpdater struct { - project string - d TerraformResourceData - Config *Config -} - -func IapTunnelIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel", "(?P[^/]+)"}, d, config, d.Get("project").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapTunnelIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - - return u, nil -} - -func IapTunnelIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapTunnelIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - if err := d.Set("project", u.project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapTunnelIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyTunnelUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": IamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapTunnelIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTunnelUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapTunnelIamUpdater) qualifyTunnelUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_tunnel", u.project), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapTunnelIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/iap_tunnel", u.project) -} - -func (u *IapTunnelIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-iap-tunnel-%s", u.GetResourceId()) -} - -func (u *IapTunnelIamUpdater) DescribeResource() string { - return fmt.Sprintf("iap tunnel %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_tunnel_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_tunnel_instance.go deleted file mode 100644 index fbde980b91..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_tunnel_instance.go +++ /dev/null @@ -1,228 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IapTunnelInstanceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapTunnelInstanceIamUpdater struct { - project string - zone string - instance string - d TerraformResourceData - Config *Config -} - -func IapTunnelInstanceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - zone, _ := getZone(d, config) - if zone != "" { - if err := d.Set("zone", zone); err != nil { - return nil, fmt.Errorf("Error setting zone: %s", err) - } - } - values["zone"] = zone - if v, ok := d.GetOk("instance"); ok { - values["instance"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel/zones/(?P[^/]+)/instances/(?P[^/]+)", "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapTunnelInstanceIamUpdater{ - project: values["project"], - zone: values["zone"], - instance: values["instance"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", u.zone); err != nil { - return nil, fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("instance", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting instance: %s", err) - } - - return u, nil -} - -func IapTunnelInstanceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - zone, _ := getZone(d, config) - if zone != "" { - values["zone"] = zone - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel/zones/(?P[^/]+)/instances/(?P[^/]+)", "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapTunnelInstanceIamUpdater{ - project: values["project"], - zone: values["zone"], - instance: values["instance"], - d: d, - Config: config, - } - if err := d.Set("instance", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting instance: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapTunnelInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyTunnelInstanceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": IamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapTunnelInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTunnelInstanceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapTunnelInstanceIamUpdater) qualifyTunnelInstanceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_tunnel/zones/%s/instances/%s", u.project, u.zone, u.instance), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapTunnelInstanceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/iap_tunnel/zones/%s/instances/%s", u.project, u.zone, u.instance) -} - -func (u *IapTunnelInstanceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-iap-tunnelinstance-%s", u.GetResourceId()) -} - -func (u *IapTunnelInstanceIamUpdater) DescribeResource() string { - return fmt.Sprintf("iap tunnelinstance %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web.go deleted file mode 100644 index 075f164d84..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web.go +++ /dev/null @@ -1,190 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IapWebIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapWebIamUpdater struct { - project string - d TerraformResourceData - Config *Config -} - -func IapWebIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web", "(?P[^/]+)"}, d, config, d.Get("project").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - - return u, nil -} - -func IapWebIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - if err := d.Set("project", u.project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapWebIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyWebUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": IamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapWebIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyWebUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapWebIamUpdater) qualifyWebUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web", u.project), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapWebIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/iap_web", u.project) -} - -func (u *IapWebIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-iap-web-%s", u.GetResourceId()) -} - -func (u *IapWebIamUpdater) DescribeResource() string { - return fmt.Sprintf("iap web %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_backend_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_backend_service.go deleted file mode 100644 index 5b22d37d42..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_backend_service.go +++ /dev/null @@ -1,204 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IapWebBackendServiceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "web_backend_service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapWebBackendServiceIamUpdater struct { - project string - webBackendService string - d TerraformResourceData - Config *Config -} - -func IapWebBackendServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("web_backend_service"); ok { - values["web_backend_service"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("web_backend_service").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebBackendServiceIamUpdater{ - project: values["project"], - webBackendService: values["web_backend_service"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("web_backend_service", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting web_backend_service: %s", err) - } - - return u, nil -} - -func IapWebBackendServiceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebBackendServiceIamUpdater{ - project: values["project"], - webBackendService: values["web_backend_service"], - d: d, - Config: config, - } - if err := d.Set("web_backend_service", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting web_backend_service: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapWebBackendServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyWebBackendServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": IamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapWebBackendServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyWebBackendServiceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapWebBackendServiceIamUpdater) qualifyWebBackendServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/compute/services/%s", u.project, u.webBackendService), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapWebBackendServiceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/iap_web/compute/services/%s", u.project, u.webBackendService) -} - -func (u *IapWebBackendServiceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-iap-webbackendservice-%s", u.GetResourceId()) -} - -func (u *IapWebBackendServiceIamUpdater) DescribeResource() string { - return fmt.Sprintf("iap webbackendservice %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_type_app_engine.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_type_app_engine.go deleted file mode 100644 index a86fec2d9e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_type_app_engine.go +++ /dev/null @@ -1,218 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IapWebTypeAppEngineIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "app_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: IapWebTypeAppEngineDiffSuppress, - }, -} - -func IapWebTypeAppEngineDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - newParts := strings.Split(new, "appengine-") - - if len(newParts) == 1 { - // `new` is only the app engine id - // `old` is always a long name - if strings.HasSuffix(old, fmt.Sprintf("appengine-%s", new)) { - return true - } - } - return old == new -} - -type IapWebTypeAppEngineIamUpdater struct { - project string - appId string - d TerraformResourceData - Config *Config -} - -func IapWebTypeAppEngineIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("app_id"); ok { - values["appId"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("app_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebTypeAppEngineIamUpdater{ - project: values["project"], - appId: values["appId"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("app_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting app_id: %s", err) - } - - return u, nil -} - -func IapWebTypeAppEngineIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebTypeAppEngineIamUpdater{ - project: values["project"], - appId: values["appId"], - d: d, - Config: config, - } - if err := d.Set("app_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting app_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapWebTypeAppEngineIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyWebTypeAppEngineUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": IamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapWebTypeAppEngineIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyWebTypeAppEngineUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapWebTypeAppEngineIamUpdater) qualifyWebTypeAppEngineUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/appengine-%s", u.project, u.appId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapWebTypeAppEngineIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/iap_web/appengine-%s", u.project, u.appId) -} - -func (u *IapWebTypeAppEngineIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-iap-webtypeappengine-%s", u.GetResourceId()) -} - -func (u *IapWebTypeAppEngineIamUpdater) DescribeResource() string { - return fmt.Sprintf("iap webtypeappengine %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_type_compute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_type_compute.go deleted file mode 100644 index e3beca51d5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_iap_web_type_compute.go +++ /dev/null @@ -1,190 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IapWebTypeComputeIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapWebTypeComputeIamUpdater struct { - project string - d TerraformResourceData - Config *Config -} - -func IapWebTypeComputeIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute", "(?P[^/]+)"}, d, config, d.Get("project").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebTypeComputeIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - - return u, nil -} - -func IapWebTypeComputeIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebTypeComputeIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - if err := d.Set("project", u.project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapWebTypeComputeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyWebTypeComputeUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": IamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapWebTypeComputeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyWebTypeComputeUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapWebTypeComputeIamUpdater) qualifyWebTypeComputeUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/compute", u.project), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapWebTypeComputeIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/iap_web/compute", u.project) -} - -func (u *IapWebTypeComputeIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-iap-webtypecompute-%s", u.GetResourceId()) -} - -func (u *IapWebTypeComputeIamUpdater) DescribeResource() string { - return fmt.Sprintf("iap webtypecompute %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_kms_crypto_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_kms_crypto_key.go deleted file mode 100644 index e6789f9b11..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_kms_crypto_key.go +++ /dev/null @@ -1,107 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudkms/v1" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IamKmsCryptoKeySchema = map[string]*schema.Schema{ - "crypto_key_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type KmsCryptoKeyIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewKmsCryptoKeyIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - cryptoKey := d.Get("crypto_key_id").(string) - cryptoKeyId, err := parseKmsCryptoKeyId(cryptoKey, config) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", cryptoKey), err) - } - - return &KmsCryptoKeyIamUpdater{ - resourceId: cryptoKeyId.cryptoKeyId(), - d: d, - Config: config, - }, nil -} - -func CryptoIdParseFunc(d *schema.ResourceData, config *Config) error { - cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) - if err != nil { - return err - } - if err := d.Set("crypto_key_id", cryptoKeyId.cryptoKeyId()); err != nil { - return fmt.Errorf("Error setting crypto_key_id: %s", err) - } - d.SetId(cryptoKeyId.cryptoKeyId()) - return nil -} - -func (u *KmsCryptoKeyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(u.resourceId).OptionsRequestedPolicyVersion(IamPolicyVersion).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := kmsToResourceManagerPolicy(p) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *KmsCryptoKeyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - kmsPolicy, err := resourceManagerToKmsPolicy(policy) - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - _, err = u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.SetIamPolicy(u.resourceId, &cloudkms.SetIamPolicyRequest{ - Policy: kmsPolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *KmsCryptoKeyIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *KmsCryptoKeyIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-kms-crypto-key-%s", u.resourceId) -} - -func (u *KmsCryptoKeyIamUpdater) DescribeResource() string { - return fmt.Sprintf("KMS CryptoKey %q", u.resourceId) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_kms_key_ring.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_kms_key_ring.go deleted file mode 100644 index 7a24165b6f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_kms_key_ring.go +++ /dev/null @@ -1,126 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudkms/v1" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IamKmsKeyRingSchema = map[string]*schema.Schema{ - "key_ring_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type KmsKeyRingIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewKmsKeyRingIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - keyRing := d.Get("key_ring_id").(string) - keyRingId, err := parseKmsKeyRingId(keyRing, config) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", keyRing), err) - } - - return &KmsKeyRingIamUpdater{ - resourceId: keyRingId.keyRingId(), - d: d, - Config: config, - }, nil -} - -func KeyRingIdParseFunc(d *schema.ResourceData, config *Config) error { - keyRingId, err := parseKmsKeyRingId(d.Id(), config) - if err != nil { - return err - } - - if err := d.Set("key_ring_id", keyRingId.keyRingId()); err != nil { - return fmt.Errorf("Error setting key_ring_id: %s", err) - } - d.SetId(keyRingId.keyRingId()) - return nil -} - -func (u *KmsKeyRingIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.GetIamPolicy(u.resourceId).OptionsRequestedPolicyVersion(IamPolicyVersion).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := kmsToResourceManagerPolicy(p) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *KmsKeyRingIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - kmsPolicy, err := resourceManagerToKmsPolicy(policy) - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.SetIamPolicy(u.resourceId, &cloudkms.SetIamPolicyRequest{ - Policy: kmsPolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *KmsKeyRingIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *KmsKeyRingIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-kms-key-ring-%s", u.resourceId) -} - -func (u *KmsKeyRingIamUpdater) DescribeResource() string { - return fmt.Sprintf("KMS KeyRing %q", u.resourceId) -} - -func resourceManagerToKmsPolicy(p *cloudresourcemanager.Policy) (*cloudkms.Policy, error) { - out := &cloudkms.Policy{} - err := Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a v1 policy to a kms policy: {{err}}", err) - } - return out, nil -} - -func kmsToResourceManagerPolicy(p *cloudkms.Policy) (*cloudresourcemanager.Policy, error) { - out := &cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a kms policy to a v1 policy: {{err}}", err) - } - return out, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_notebooks_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_notebooks_instance.go deleted file mode 100644 index d3437e5f63..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_notebooks_instance.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var NotebooksInstanceIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "instance_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type NotebooksInstanceIamUpdater struct { - project string - location string - instanceName string - d TerraformResourceData - Config *Config -} - -func NotebooksInstanceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("instance_name"); ok { - values["instance_name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance_name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &NotebooksInstanceIamUpdater{ - project: values["project"], - location: values["location"], - instanceName: values["instance_name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("instance_name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting instance_name: %s", err) - } - - return u, nil -} - -func NotebooksInstanceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &NotebooksInstanceIamUpdater{ - project: values["project"], - location: values["location"], - instanceName: values["instance_name"], - d: d, - Config: config, - } - if err := d.Set("instance_name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting instance_name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *NotebooksInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyInstanceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *NotebooksInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyInstanceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *NotebooksInstanceIamUpdater) qualifyInstanceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{NotebooksBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.location, u.instanceName), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *NotebooksInstanceIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.location, u.instanceName) -} - -func (u *NotebooksInstanceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-notebooks-instance-%s", u.GetResourceId()) -} - -func (u *NotebooksInstanceIamUpdater) DescribeResource() string { - return fmt.Sprintf("notebooks instance %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_notebooks_runtime.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_notebooks_runtime.go deleted file mode 100644 index 034ac2548d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_notebooks_runtime.go +++ /dev/null @@ -1,223 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var NotebooksRuntimeIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "runtime_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type NotebooksRuntimeIamUpdater struct { - project string - location string - runtimeName string - d TerraformResourceData - Config *Config -} - -func NotebooksRuntimeIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("runtime_name"); ok { - values["runtime_name"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/runtimes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("runtime_name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &NotebooksRuntimeIamUpdater{ - project: values["project"], - location: values["location"], - runtimeName: values["runtime_name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("runtime_name", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting runtime_name: %s", err) - } - - return u, nil -} - -func NotebooksRuntimeIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/runtimes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &NotebooksRuntimeIamUpdater{ - project: values["project"], - location: values["location"], - runtimeName: values["runtime_name"], - d: d, - Config: config, - } - if err := d.Set("runtime_name", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting runtime_name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *NotebooksRuntimeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyRuntimeUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *NotebooksRuntimeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyRuntimeUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *NotebooksRuntimeIamUpdater) qualifyRuntimeUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{NotebooksBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/runtimes/%s", u.project, u.location, u.runtimeName), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *NotebooksRuntimeIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/runtimes/%s", u.project, u.location, u.runtimeName) -} - -func (u *NotebooksRuntimeIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-notebooks-runtime-%s", u.GetResourceId()) -} - -func (u *NotebooksRuntimeIamUpdater) DescribeResource() string { - return fmt.Sprintf("notebooks runtime %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_organization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_organization.go deleted file mode 100644 index bcbefce648..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_organization.go +++ /dev/null @@ -1,90 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IamOrganizationSchema = map[string]*schema.Schema{ - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The numeric ID of the organization in which you want to manage the audit logging config.`, - }, -} - -type OrganizationIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewOrganizationIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &OrganizationIamUpdater{ - resourceId: d.Get("org_id").(string), - d: d, - Config: config, - }, nil -} - -func OrgIdParseFunc(d *schema.ResourceData, _ *Config) error { - if err := d.Set("org_id", d.Id()); err != nil { - return fmt.Errorf("Error setting org_id: %s", err) - } - return nil -} - -func (u *OrganizationIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewResourceManagerClient(userAgent).Organizations.GetIamPolicy( - "organizations/"+u.resourceId, - &cloudresourcemanager.GetIamPolicyRequest{ - Options: &cloudresourcemanager.GetPolicyOptions{ - RequestedPolicyVersion: IamPolicyVersion, - }, - }, - ).Do() - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return p, nil -} - -func (u *OrganizationIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewResourceManagerClient(userAgent).Organizations.SetIamPolicy("organizations/"+u.resourceId, &cloudresourcemanager.SetIamPolicyRequest{ - Policy: policy, - UpdateMask: "bindings,etag,auditConfigs", - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *OrganizationIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *OrganizationIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-organization-%s", u.resourceId) -} - -func (u *OrganizationIamUpdater) DescribeResource() string { - return fmt.Sprintf("organization %q", u.resourceId) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_privateca_ca_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_privateca_ca_pool.go deleted file mode 100644 index 327cbf9743..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_privateca_ca_pool.go +++ /dev/null @@ -1,227 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var PrivatecaCaPoolIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "ca_pool": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type PrivatecaCaPoolIamUpdater struct { - project string - location string - caPool string - d TerraformResourceData - Config *Config -} - -func PrivatecaCaPoolIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("ca_pool"); ok { - values["ca_pool"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Get("ca_pool").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &PrivatecaCaPoolIamUpdater{ - project: values["project"], - location: values["location"], - caPool: values["ca_pool"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("ca_pool", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting ca_pool: %s", err) - } - - return u, nil -} - -func PrivatecaCaPoolIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &PrivatecaCaPoolIamUpdater{ - project: values["project"], - location: values["location"], - caPool: values["ca_pool"], - d: d, - Config: config, - } - if err := d.Set("ca_pool", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting ca_pool: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *PrivatecaCaPoolIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyCaPoolUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"options.requestedPolicyVersion": fmt.Sprintf("%d", IamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *PrivatecaCaPoolIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyCaPoolUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *PrivatecaCaPoolIamUpdater) qualifyCaPoolUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{PrivatecaBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/caPools/%s", u.project, u.location, u.caPool), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *PrivatecaCaPoolIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/caPools/%s", u.project, u.location, u.caPool) -} - -func (u *PrivatecaCaPoolIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-privateca-capool-%s", u.GetResourceId()) -} - -func (u *PrivatecaCaPoolIamUpdater) DescribeResource() string { - return fmt.Sprintf("privateca capool %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_privateca_certificate_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_privateca_certificate_template.go deleted file mode 100644 index aa790e9a94..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_privateca_certificate_template.go +++ /dev/null @@ -1,227 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var PrivatecaCertificateTemplateIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "certificate_template": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type PrivatecaCertificateTemplateIamUpdater struct { - project string - location string - certificateTemplate string - d TerraformResourceData - Config *Config -} - -func PrivatecaCertificateTemplateIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("certificate_template"); ok { - values["certificate_template"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/certificateTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Get("certificate_template").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &PrivatecaCertificateTemplateIamUpdater{ - project: values["project"], - location: values["location"], - certificateTemplate: values["certificate_template"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("certificate_template", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting certificate_template: %s", err) - } - - return u, nil -} - -func PrivatecaCertificateTemplateIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/certificateTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &PrivatecaCertificateTemplateIamUpdater{ - project: values["project"], - location: values["location"], - certificateTemplate: values["certificate_template"], - d: d, - Config: config, - } - if err := d.Set("certificate_template", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting certificate_template: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *PrivatecaCertificateTemplateIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyCertificateTemplateUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"options.requestedPolicyVersion": fmt.Sprintf("%d", IamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *PrivatecaCertificateTemplateIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyCertificateTemplateUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *PrivatecaCertificateTemplateIamUpdater) qualifyCertificateTemplateUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{PrivatecaBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/certificateTemplates/%s", u.project, u.location, u.certificateTemplate), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *PrivatecaCertificateTemplateIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/locations/%s/certificateTemplates/%s", u.project, u.location, u.certificateTemplate) -} - -func (u *PrivatecaCertificateTemplateIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-privateca-certificatetemplate-%s", u.GetResourceId()) -} - -func (u *PrivatecaCertificateTemplateIamUpdater) DescribeResource() string { - return fmt.Sprintf("privateca certificatetemplate %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_project.go deleted file mode 100644 index a89da9840e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_project.go +++ /dev/null @@ -1,103 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var IamProjectSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareProjectName, - }, -} - -type ProjectIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewProjectIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &ProjectIamUpdater{ - resourceId: d.Get("project").(string), - d: d, - Config: config, - }, nil -} - -func ProjectIdParseFunc(d *schema.ResourceData, _ *Config) error { - if err := d.Set("project", d.Id()); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - return nil -} - -func (u *ProjectIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - projectId := GetResourceNameFromSelfLink(u.resourceId) - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewResourceManagerClient(userAgent).Projects.GetIamPolicy(projectId, - &cloudresourcemanager.GetIamPolicyRequest{ - Options: &cloudresourcemanager.GetPolicyOptions{ - RequestedPolicyVersion: IamPolicyVersion, - }, - }).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return p, nil -} - -func (u *ProjectIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - projectId := GetResourceNameFromSelfLink(u.resourceId) - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewResourceManagerClient(userAgent).Projects.SetIamPolicy(projectId, - &cloudresourcemanager.SetIamPolicyRequest{ - Policy: policy, - UpdateMask: "bindings,etag,auditConfigs", - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ProjectIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *ProjectIamUpdater) GetMutexKey() string { - return getProjectIamPolicyMutexKey(u.resourceId) -} - -func (u *ProjectIamUpdater) DescribeResource() string { - return fmt.Sprintf("project %q", u.resourceId) -} - -func compareProjectName(_, old, new string, _ *schema.ResourceData) bool { - // We can either get "projects/project-id" or "project-id", so strip any prefixes - return GetResourceNameFromSelfLink(old) == GetResourceNameFromSelfLink(new) -} - -func getProjectIamPolicyMutexKey(pid string) string { - return fmt.Sprintf("iam-project-%s", pid) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_pubsub_subscription.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_pubsub_subscription.go deleted file mode 100644 index e8493e0e7d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_pubsub_subscription.go +++ /dev/null @@ -1,126 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/pubsub/v1" -) - -var IamPubsubSubscriptionSchema = map[string]*schema.Schema{ - "subscription": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type PubsubSubscriptionIamUpdater struct { - subscription string - d TerraformResourceData - Config *Config -} - -func NewPubsubSubscriptionIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - subscription := getComputedSubscriptionName(project, d.Get("subscription").(string)) - - return &PubsubSubscriptionIamUpdater{ - subscription: subscription, - d: d, - Config: config, - }, nil -} - -func PubsubSubscriptionIdParseFunc(d *schema.ResourceData, _ *Config) error { - if err := d.Set("subscription", d.Id()); err != nil { - return fmt.Errorf("Error setting subscription: %s", err) - } - return nil -} - -func (u *PubsubSubscriptionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewPubsubClient(userAgent).Projects.Subscriptions.GetIamPolicy(u.subscription).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - v1Policy, err := pubsubToResourceManagerPolicy(p) - if err != nil { - return nil, err - } - - return v1Policy, nil -} - -func (u *PubsubSubscriptionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - pubsubPolicy, err := resourceManagerToPubsubPolicy(policy) - if err != nil { - return err - } - - _, err = u.Config.NewPubsubClient(userAgent).Projects.Subscriptions.SetIamPolicy(u.subscription, &pubsub.SetIamPolicyRequest{ - Policy: pubsubPolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *PubsubSubscriptionIamUpdater) GetResourceId() string { - return u.subscription -} - -func (u *PubsubSubscriptionIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-pubsub-subscription-%s", u.subscription) -} - -func (u *PubsubSubscriptionIamUpdater) DescribeResource() string { - return fmt.Sprintf("pubsub subscription %q", u.subscription) -} - -// v1 and v2 policy are identical -func resourceManagerToPubsubPolicy(in *cloudresourcemanager.Policy) (*pubsub.Policy, error) { - out := &pubsub.Policy{} - err := Convert(in, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a v1 policy to a pubsub policy: {{err}}", err) - } - return out, nil -} - -func pubsubToResourceManagerPolicy(in *pubsub.Policy) (*cloudresourcemanager.Policy, error) { - out := &cloudresourcemanager.Policy{} - err := Convert(in, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a pubsub policy to a v1 policy: {{err}}", err) - } - return out, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_pubsub_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_pubsub_topic.go deleted file mode 100644 index 21e7e94035..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_pubsub_topic.go +++ /dev/null @@ -1,199 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var PubsubTopicIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "topic": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type PubsubTopicIamUpdater struct { - project string - topic string - d TerraformResourceData - Config *Config -} - -func PubsubTopicIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("topic"); ok { - values["topic"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/topics/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("topic").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &PubsubTopicIamUpdater{ - project: values["project"], - topic: values["topic"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("topic", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting topic: %s", err) - } - - return u, nil -} - -func PubsubTopicIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/topics/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &PubsubTopicIamUpdater{ - project: values["project"], - topic: values["topic"], - d: d, - Config: config, - } - if err := d.Set("topic", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting topic: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *PubsubTopicIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyTopicUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj, pubsubTopicProjectNotReady) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *PubsubTopicIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTopicUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate), pubsubTopicProjectNotReady) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *PubsubTopicIamUpdater) qualifyTopicUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{PubsubBasePath}}%s:%s", fmt.Sprintf("projects/%s/topics/%s", u.project, u.topic), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *PubsubTopicIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/topics/%s", u.project, u.topic) -} - -func (u *PubsubTopicIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-pubsub-topic-%s", u.GetResourceId()) -} - -func (u *PubsubTopicIamUpdater) DescribeResource() string { - return fmt.Sprintf("pubsub topic %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_scc_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_scc_source.go deleted file mode 100644 index dc9f1870b5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_scc_source.go +++ /dev/null @@ -1,182 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var SecurityCenterSourceIamSchema = map[string]*schema.Schema{ - "organization": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "source": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type SecurityCenterSourceIamUpdater struct { - organization string - source string - d TerraformResourceData - Config *Config -} - -func SecurityCenterSourceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("organization"); ok { - values["organization"] = v.(string) - } - - if v, ok := d.GetOk("source"); ok { - values["source"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"organizations/(?P[^/]+)/sources/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("source").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &SecurityCenterSourceIamUpdater{ - organization: values["organization"], - source: values["source"], - d: d, - Config: config, - } - - if err := d.Set("organization", u.organization); err != nil { - return nil, fmt.Errorf("Error setting organization: %s", err) - } - if err := d.Set("source", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting source: %s", err) - } - - return u, nil -} - -func SecurityCenterSourceIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"organizations/(?P[^/]+)/sources/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &SecurityCenterSourceIamUpdater{ - organization: values["organization"], - source: values["source"], - d: d, - Config: config, - } - if err := d.Set("source", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting source: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *SecurityCenterSourceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifySourceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *SecurityCenterSourceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifySourceUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SecurityCenterSourceIamUpdater) qualifySourceUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{SecurityCenterBasePath}}%s:%s", fmt.Sprintf("organizations/%s/sources/%s", u.organization, u.source), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *SecurityCenterSourceIamUpdater) GetResourceId() string { - return fmt.Sprintf("organizations/%s/sources/%s", u.organization, u.source) -} - -func (u *SecurityCenterSourceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-securitycenter-source-%s", u.GetResourceId()) -} - -func (u *SecurityCenterSourceIamUpdater) DescribeResource() string { - return fmt.Sprintf("securitycenter source %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_secret_manager_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_secret_manager_secret.go deleted file mode 100644 index f69987e829..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_secret_manager_secret.go +++ /dev/null @@ -1,199 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var SecretManagerSecretIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "secret_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type SecretManagerSecretIamUpdater struct { - project string - secretId string - d TerraformResourceData - Config *Config -} - -func SecretManagerSecretIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("secret_id"); ok { - values["secret_id"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/secrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("secret_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &SecretManagerSecretIamUpdater{ - project: values["project"], - secretId: values["secret_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("secret_id", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting secret_id: %s", err) - } - - return u, nil -} - -func SecretManagerSecretIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/secrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &SecretManagerSecretIamUpdater{ - project: values["project"], - secretId: values["secret_id"], - d: d, - Config: config, - } - if err := d.Set("secret_id", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting secret_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *SecretManagerSecretIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifySecretUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *SecretManagerSecretIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifySecretUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SecretManagerSecretIamUpdater) qualifySecretUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{SecretManagerBasePath}}%s:%s", fmt.Sprintf("projects/%s/secrets/%s", u.project, u.secretId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *SecretManagerSecretIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/secrets/%s", u.project, u.secretId) -} - -func (u *SecretManagerSecretIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-secretmanager-secret-%s", u.GetResourceId()) -} - -func (u *SecretManagerSecretIamUpdater) DescribeResource() string { - return fmt.Sprintf("secretmanager secret %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_service_account.go deleted file mode 100644 index 7793fd7ad6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_service_account.go +++ /dev/null @@ -1,112 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/iam/v1" -) - -var IamServiceAccountSchema = map[string]*schema.Schema{ - "service_account_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(ServiceAccountLinkRegex), - }, -} - -type ServiceAccountIamUpdater struct { - serviceAccountId string - d TerraformResourceData - Config *Config -} - -func NewServiceAccountIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &ServiceAccountIamUpdater{ - serviceAccountId: d.Get("service_account_id").(string), - d: d, - Config: config, - }, nil -} - -func ServiceAccountIdParseFunc(d *schema.ResourceData, _ *Config) error { - if err := d.Set("service_account_id", d.Id()); err != nil { - return fmt.Errorf("Error setting service_account_id: %s", err) - } - return nil -} - -func (u *ServiceAccountIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewIamClient(userAgent).Projects.ServiceAccounts.GetIamPolicy(u.serviceAccountId).OptionsRequestedPolicyVersion(IamPolicyVersion).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := iamToResourceManagerPolicy(p) - if err != nil { - return nil, err - } - - return cloudResourcePolicy, nil -} - -func (u *ServiceAccountIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - iamPolicy, err := resourceManagerToIamPolicy(policy) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewIamClient(userAgent).Projects.ServiceAccounts.SetIamPolicy(u.GetResourceId(), &iam.SetIamPolicyRequest{ - Policy: iamPolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ServiceAccountIamUpdater) GetResourceId() string { - return u.serviceAccountId -} - -func (u *ServiceAccountIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-service-account-%s", u.serviceAccountId) -} - -func (u *ServiceAccountIamUpdater) DescribeResource() string { - return fmt.Sprintf("service account '%s'", u.serviceAccountId) -} - -func resourceManagerToIamPolicy(p *cloudresourcemanager.Policy) (*iam.Policy, error) { - out := &iam.Policy{} - err := Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a v1 policy to a iam policy: {{err}}", err) - } - return out, nil -} - -func iamToResourceManagerPolicy(p *iam.Policy) (*cloudresourcemanager.Policy, error) { - out := &cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a iam policy to a v1 policy: {{err}}", err) - } - return out, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_sourcerepo_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_sourcerepo_repository.go deleted file mode 100644 index 8374eaa10a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_sourcerepo_repository.go +++ /dev/null @@ -1,208 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "regexp" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var SourceRepoRepositoryIamSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "repository": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: SourceRepoRepositoryDiffSuppress, - }, -} - -func SourceRepoRepositoryDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - oldParts := regexp.MustCompile("projects/[^/]+/repos/").Split(old, -1) - if len(oldParts) == 2 { - return oldParts[1] == new - } - return new == old -} - -type SourceRepoRepositoryIamUpdater struct { - project string - repository string - d TerraformResourceData - Config *Config -} - -func SourceRepoRepositoryIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("repository"); ok { - values["repository"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/repos/(?P.+)", "(?P.+)"}, d, config, d.Get("repository").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &SourceRepoRepositoryIamUpdater{ - project: values["project"], - repository: values["repository"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("repository", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting repository: %s", err) - } - - return u, nil -} - -func SourceRepoRepositoryIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/repos/(?P.+)", "(?P.+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &SourceRepoRepositoryIamUpdater{ - project: values["project"], - repository: values["repository"], - d: d, - Config: config, - } - if err := d.Set("repository", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting repository: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *SourceRepoRepositoryIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyRepositoryUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *SourceRepoRepositoryIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyRepositoryUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SourceRepoRepositoryIamUpdater) qualifyRepositoryUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{SourceRepoBasePath}}%s:%s", fmt.Sprintf("projects/%s/repos/%s", u.project, u.repository), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *SourceRepoRepositoryIamUpdater) GetResourceId() string { - return fmt.Sprintf("projects/%s/repos/%s", u.project, u.repository) -} - -func (u *SourceRepoRepositoryIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-sourcerepo-repository-%s", u.GetResourceId()) -} - -func (u *SourceRepoRepositoryIamUpdater) DescribeResource() string { - return fmt.Sprintf("sourcerepo repository %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_spanner_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_spanner_database.go deleted file mode 100644 index 303d1adf27..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_spanner_database.go +++ /dev/null @@ -1,171 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/spanner/v1" -) - -var IamSpannerDatabaseSchema = map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "database": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type SpannerDatabaseIamUpdater struct { - project string - instance string - database string - d TerraformResourceData - Config *Config -} - -func NewSpannerDatabaseIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return &SpannerDatabaseIamUpdater{ - project: project, - instance: d.Get("instance").(string), - database: d.Get("database").(string), - d: d, - Config: config, - }, nil -} - -func SpannerDatabaseIdParseFunc(d *schema.ResourceData, config *Config) error { - return parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) -} - -func (u *SpannerDatabaseIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewSpannerClient(userAgent).Projects.Instances.Databases.GetIamPolicy(spannerDatabaseId{ - Project: u.project, - Database: u.database, - Instance: u.instance, - }.databaseUri(), &spanner.GetIamPolicyRequest{ - Options: &spanner.GetPolicyOptions{RequestedPolicyVersion: IamPolicyVersion}, - }).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := spannerToResourceManagerPolicy(p) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy.Version = IamPolicyVersion - - return cloudResourcePolicy, nil -} - -func (u *SpannerDatabaseIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - spannerPolicy, err := resourceManagerToSpannerPolicy(policy) - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - spannerPolicy.Version = IamPolicyVersion - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewSpannerClient(userAgent).Projects.Instances.Databases.SetIamPolicy(spannerDatabaseId{ - Project: u.project, - Database: u.database, - Instance: u.instance, - }.databaseUri(), &spanner.SetIamPolicyRequest{ - Policy: spannerPolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SpannerDatabaseIamUpdater) GetResourceId() string { - return spannerDatabaseId{ - Project: u.project, - Instance: u.instance, - Database: u.database, - }.terraformId() -} - -func (u *SpannerDatabaseIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-spanner-database-%s-%s-%s", u.project, u.instance, u.database) -} - -func (u *SpannerDatabaseIamUpdater) DescribeResource() string { - return fmt.Sprintf("Spanner Database: %s/%s/%s", u.project, u.instance, u.database) -} - -func resourceManagerToSpannerPolicy(p *cloudresourcemanager.Policy) (*spanner.Policy, error) { - out := &spanner.Policy{} - err := Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a resourcemanager policy to a spanner policy: {{err}}", err) - } - return out, nil -} - -func spannerToResourceManagerPolicy(p *spanner.Policy) (*cloudresourcemanager.Policy, error) { - out := &cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a spanner policy to a resourcemanager policy: {{err}}", err) - } - return out, nil -} - -type spannerDatabaseId struct { - Project string - Instance string - Database string -} - -func (s spannerDatabaseId) terraformId() string { - return fmt.Sprintf("%s/%s/%s", s.Project, s.Instance, s.Database) -} - -func (s spannerDatabaseId) parentProjectUri() string { - return fmt.Sprintf("projects/%s", s.Project) -} - -func (s spannerDatabaseId) parentInstanceUri() string { - return fmt.Sprintf("%s/instances/%s", s.parentProjectUri(), s.Instance) -} - -func (s spannerDatabaseId) databaseUri() string { - return fmt.Sprintf("%s/databases/%s", s.parentInstanceUri(), s.Database) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_spanner_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_spanner_instance.go deleted file mode 100644 index 8a02fe63f5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_spanner_instance.go +++ /dev/null @@ -1,168 +0,0 @@ -package google - -import ( - "fmt" - "regexp" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" - spanner "google.golang.org/api/spanner/v1" -) - -var IamSpannerInstanceSchema = map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type SpannerInstanceIamUpdater struct { - project string - instance string - d TerraformResourceData - Config *Config -} - -func NewSpannerInstanceIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return &SpannerInstanceIamUpdater{ - project: project, - instance: d.Get("instance").(string), - d: d, - Config: config, - }, nil -} - -func SpannerInstanceIdParseFunc(d *schema.ResourceData, config *Config) error { - id, err := extractSpannerInstanceId(d.Id()) - if err != nil { - return err - } - if err := d.Set("instance", id.Instance); err != nil { - return fmt.Errorf("Error setting instance: %s", err) - } - if err := d.Set("project", id.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - - // Explicitly set the id so imported resources have the same ID format as non-imported ones. - d.SetId(id.terraformId()) - return nil -} - -func (u *SpannerInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewSpannerClient(userAgent).Projects.Instances.GetIamPolicy(spannerInstanceId{ - Project: u.project, - Instance: u.instance, - }.instanceUri(), &spanner.GetIamPolicyRequest{ - Options: &spanner.GetPolicyOptions{RequestedPolicyVersion: IamPolicyVersion}, - }).Do() - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := spannerToResourceManagerPolicy(p) - - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy.Version = IamPolicyVersion - - return cloudResourcePolicy, nil -} - -func (u *SpannerInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - spannerPolicy, err := resourceManagerToSpannerPolicy(policy) - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - spannerPolicy.Version = IamPolicyVersion - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = u.Config.NewSpannerClient(userAgent).Projects.Instances.SetIamPolicy(spannerInstanceId{ - Project: u.project, - Instance: u.instance, - }.instanceUri(), &spanner.SetIamPolicyRequest{ - Policy: spannerPolicy, - }).Do() - - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SpannerInstanceIamUpdater) GetResourceId() string { - return spannerInstanceId{ - Project: u.project, - Instance: u.instance, - }.terraformId() -} - -func (u *SpannerInstanceIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-spanner-instance-%s-%s", u.project, u.instance) -} - -func (u *SpannerInstanceIamUpdater) DescribeResource() string { - return fmt.Sprintf("Spanner Instance: %s/%s", u.project, u.instance) -} - -type spannerInstanceId struct { - Project string - Instance string -} - -func (s spannerInstanceId) terraformId() string { - return fmt.Sprintf("%s/%s", s.Project, s.Instance) -} - -func (s spannerInstanceId) parentProjectUri() string { - return fmt.Sprintf("projects/%s", s.Project) -} - -func (s spannerInstanceId) instanceUri() string { - return fmt.Sprintf("%s/instances/%s", s.parentProjectUri(), s.Instance) -} - -func (s spannerInstanceId) instanceConfigUri(c string) string { - return fmt.Sprintf("%s/instanceConfigs/%s", s.parentProjectUri(), c) -} - -func extractSpannerInstanceId(id string) (*spannerInstanceId, error) { - if !regexp.MustCompile("^" + ProjectRegex + "/[a-z0-9-]+$").Match([]byte(id)) { - return nil, fmt.Errorf("Invalid spanner id format, expecting {projectId}/{instanceId}") - } - parts := strings.Split(id, "/") - return &spannerInstanceId{ - Project: parts[0], - Instance: parts[1], - }, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_storage_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_storage_bucket.go deleted file mode 100644 index f13b66cb04..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_storage_bucket.go +++ /dev/null @@ -1,174 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var StorageBucketIamSchema = map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: StorageBucketDiffSuppress, - }, -} - -func StorageBucketDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - return compareResourceNames("", old, new, nil) -} - -type StorageBucketIamUpdater struct { - bucket string - d TerraformResourceData - Config *Config -} - -func StorageBucketIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("bucket"); ok { - values["bucket"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"b/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("bucket").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &StorageBucketIamUpdater{ - bucket: values["bucket"], - d: d, - Config: config, - } - - if err := d.Set("bucket", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting bucket: %s", err) - } - - return u, nil -} - -func StorageBucketIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"b/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &StorageBucketIamUpdater{ - bucket: values["bucket"], - d: d, - Config: config, - } - if err := d.Set("bucket", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting bucket: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *StorageBucketIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyBucketUrl("iam") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", IamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "GET", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *StorageBucketIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := json - - url, err := u.qualifyBucketUrl("iam") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "PUT", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *StorageBucketIamUpdater) qualifyBucketUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{StorageBasePath}}%s/%s", fmt.Sprintf("b/%s", u.bucket), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *StorageBucketIamUpdater) GetResourceId() string { - return fmt.Sprintf("b/%s", u.bucket) -} - -func (u *StorageBucketIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-storage-bucket-%s", u.GetResourceId()) -} - -func (u *StorageBucketIamUpdater) DescribeResource() string { - return fmt.Sprintf("storage bucket %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_tags_tag_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_tags_tag_key.go deleted file mode 100644 index aeb197f2cd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_tags_tag_key.go +++ /dev/null @@ -1,167 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var TagsTagKeyIamSchema = map[string]*schema.Schema{ - "tag_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type TagsTagKeyIamUpdater struct { - tagKey string - d TerraformResourceData - Config *Config -} - -func TagsTagKeyIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("tag_key"); ok { - values["tag_key"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"tagKeys/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_key").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &TagsTagKeyIamUpdater{ - tagKey: values["tag_key"], - d: d, - Config: config, - } - - if err := d.Set("tag_key", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting tag_key: %s", err) - } - - return u, nil -} - -func TagsTagKeyIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"tagKeys/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &TagsTagKeyIamUpdater{ - tagKey: values["tag_key"], - d: d, - Config: config, - } - if err := d.Set("tag_key", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting tag_key: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *TagsTagKeyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyTagKeyUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *TagsTagKeyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTagKeyUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *TagsTagKeyIamUpdater) qualifyTagKeyUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{TagsBasePath}}%s:%s", fmt.Sprintf("tagKeys/%s", u.tagKey), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *TagsTagKeyIamUpdater) GetResourceId() string { - return fmt.Sprintf("tagKeys/%s", u.tagKey) -} - -func (u *TagsTagKeyIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-tags-tagkey-%s", u.GetResourceId()) -} - -func (u *TagsTagKeyIamUpdater) DescribeResource() string { - return fmt.Sprintf("tags tagkey %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_tags_tag_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_tags_tag_value.go deleted file mode 100644 index 3e09bc3bd3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_tags_tag_value.go +++ /dev/null @@ -1,167 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -var TagsTagValueIamSchema = map[string]*schema.Schema{ - "tag_value": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type TagsTagValueIamUpdater struct { - tagValue string - d TerraformResourceData - Config *Config -} - -func TagsTagValueIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("tag_value"); ok { - values["tag_value"] = v.(string) - } - - // We may have gotten either a long or short name, so attempt to parse long name if possible - m, err := getImportIdQualifiers([]string{"tagValues/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_value").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &TagsTagValueIamUpdater{ - tagValue: values["tag_value"], - d: d, - Config: config, - } - - if err := d.Set("tag_value", u.GetResourceId()); err != nil { - return nil, fmt.Errorf("Error setting tag_value: %s", err) - } - - return u, nil -} - -func TagsTagValueIdParseFunc(d *schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"tagValues/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &TagsTagValueIamUpdater{ - tagValue: values["tag_value"], - d: d, - Config: config, - } - if err := d.Set("tag_value", u.GetResourceId()); err != nil { - return fmt.Errorf("Error setting tag_value: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *TagsTagValueIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - url, err := u.qualifyTagValueUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return nil, err - } - - policy, err := SendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *TagsTagValueIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTagValueUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(schema.TimeoutCreate)) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *TagsTagValueIamUpdater) qualifyTagValueUrl(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{TagsBasePath}}%s:%s", fmt.Sprintf("tagValues/%s", u.tagValue), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *TagsTagValueIamUpdater) GetResourceId() string { - return fmt.Sprintf("tagValues/%s", u.tagValue) -} - -func (u *TagsTagValueIamUpdater) GetMutexKey() string { - return fmt.Sprintf("iam-tags-tagvalue-%s", u.GetResourceId()) -} - -func (u *TagsTagValueIamUpdater) DescribeResource() string { - return fmt.Sprintf("tags tagvalue %q", u.GetResourceId()) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_workforce_pool_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_workforce_pool_operation.go deleted file mode 100644 index ec24e5fb52..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_workforce_pool_operation.go +++ /dev/null @@ -1,60 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "time" -) - -type IAMWorkforcePoolOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *IAMWorkforcePoolOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.IAMWorkforcePoolBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createIAMWorkforcePoolWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*IAMWorkforcePoolOperationWaiter, error) { - w := &IAMWorkforcePoolOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func IAMWorkforcePoolOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createIAMWorkforcePoolWaiter(config, op, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/kms_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/kms_utils.go deleted file mode 100644 index 9f962de472..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/kms_utils.go +++ /dev/null @@ -1,268 +0,0 @@ -package google - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudkms/v1" -) - -type kmsKeyRingId struct { - Project string - Location string - Name string -} - -func (s *kmsKeyRingId) keyRingId() string { - return fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", s.Project, s.Location, s.Name) -} - -func (s *kmsKeyRingId) terraformId() string { - return fmt.Sprintf("%s/%s/%s", s.Project, s.Location, s.Name) -} - -func parseKmsKeyRingId(id string, config *Config) (*kmsKeyRingId, error) { - parts := strings.Split(id, "/") - - keyRingIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") - keyRingIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") - keyRingRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})$") - - if keyRingIdRegex.MatchString(id) { - return &kmsKeyRingId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, nil - } - - if keyRingIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{keyRingName}` id format.") - } - - return &kmsKeyRingId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, nil - } - - if parts := keyRingRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &kmsKeyRingId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, nil - } - return nil, fmt.Errorf("Invalid KeyRing id format, expecting `{projectId}/{locationId}/{keyRingName}` or `{locationId}/{keyRingName}.`") -} - -func kmsCryptoKeyRingsEquivalent(k, old, new string, d *schema.ResourceData) bool { - keyRingIdWithSpecifiersRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-])+/keyRings/([a-zA-Z0-9_-]{1,63})$") - normalizedKeyRingIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") - if matches := keyRingIdWithSpecifiersRegex.FindStringSubmatch(new); matches != nil { - normMatches := normalizedKeyRingIdRegex.FindStringSubmatch(old) - return normMatches != nil && normMatches[1] == matches[1] && normMatches[2] == matches[2] && normMatches[3] == matches[3] - } - return false -} - -type KmsCryptoKeyId struct { - KeyRingId kmsKeyRingId - Name string -} - -func (s *KmsCryptoKeyId) cryptoKeyId() string { - return fmt.Sprintf("%s/cryptoKeys/%s", s.KeyRingId.keyRingId(), s.Name) -} - -func (s *KmsCryptoKeyId) terraformId() string { - return fmt.Sprintf("%s/%s", s.KeyRingId.terraformId(), s.Name) -} - -type kmsCryptoKeyVersionId struct { - CryptoKeyId KmsCryptoKeyId - Name string -} - -func (s *kmsCryptoKeyVersionId) cryptoKeyVersionId() string { - return fmt.Sprintf(s.Name) -} - -func (s *kmsCryptoKeyVersionId) terraformId() string { - return fmt.Sprintf("%s/%s", s.CryptoKeyId.terraformId(), s.Name) -} - -func validateKmsCryptoKeyRotationPeriod(value interface{}, _ string) (ws []string, errors []error) { - period := value.(string) - pattern := regexp.MustCompile(`^([0-9.]*\d)s$`) - match := pattern.FindStringSubmatch(period) - - if len(match) == 0 { - errors = append(errors, fmt.Errorf("Invalid rotation period format: %s", period)) - // Cannot continue to validate because we cannot extract a number. - return - } - - number := match[1] - seconds, err := strconv.ParseFloat(number, 64) - - if err != nil { - errors = append(errors, err) - } else { - if seconds < 86400.0 { - errors = append(errors, fmt.Errorf("Rotation period must be greater than one day")) - } - - parts := strings.Split(number, ".") - - if len(parts) > 1 && len(parts[1]) > 9 { - errors = append(errors, fmt.Errorf("Rotation period cannot have more than 9 fractional digits")) - } - } - - return -} - -func kmsCryptoKeyNextRotation(now time.Time, period string) (result string, err error) { - var duration time.Duration - - duration, err = time.ParseDuration(period) - - if err == nil { - result = now.UTC().Add(duration).Format(time.RFC3339Nano) - } - - return -} - -func parseKmsCryptoKeyId(id string, config *Config) (*KmsCryptoKeyId, error) { - parts := strings.Split(id, "/") - - cryptoKeyIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})/([a-zA-Z0-9_-]{1,63})$") - cryptoKeyIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})/([a-zA-Z0-9_-]{1,63})$") - cryptoKeyRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})/cryptoKeys/([a-zA-Z0-9_-]{1,63})$") - - if cryptoKeyIdRegex.MatchString(id) { - return &KmsCryptoKeyId{ - KeyRingId: kmsKeyRingId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, - Name: parts[3], - }, nil - } - - if cryptoKeyIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{keyRingName}/{cryptoKeyName}` id format.") - } - - return &KmsCryptoKeyId{ - KeyRingId: kmsKeyRingId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, - Name: parts[2], - }, nil - } - - if parts := cryptoKeyRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &KmsCryptoKeyId{ - KeyRingId: kmsKeyRingId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, nil - } - - return nil, fmt.Errorf("Invalid CryptoKey id format, expecting `{projectId}/{locationId}/{KeyringName}/{cryptoKeyName}` or `{locationId}/{keyRingName}/{cryptoKeyName}, got id: %s`", id) -} -func parseKmsCryptoKeyVersionId(id string, config *Config) (*kmsCryptoKeyVersionId, error) { - cryptoKeyVersionRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})/cryptoKeys/([a-zA-Z0-9_-]{1,63})/cryptoKeyVersions/([a-zA-Z0-9_-]{1,63})$") - - if parts := cryptoKeyVersionRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &kmsCryptoKeyVersionId{ - CryptoKeyId: KmsCryptoKeyId{ - KeyRingId: kmsKeyRingId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, - Name: "projects/" + parts[1] + "/locations/" + parts[2] + "/keyRings/" + parts[3] + "/cryptoKeys/" + parts[4] + "/cryptoKeyVersions/" + parts[5], - }, nil - } - return nil, fmt.Errorf("Invalid CryptoKeyVersion id format, expecting `{projectId}/{locationId}/{KeyringName}/{cryptoKeyName}/{cryptoKeyVersion}` or `{locationId}/{keyRingName}/{cryptoKeyName}/{cryptoKeyVersion}, got id: %s`", id) -} - -func clearCryptoKeyVersions(cryptoKeyId *KmsCryptoKeyId, userAgent string, config *Config) error { - versionsClient := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions - - listCall := versionsClient.List(cryptoKeyId.cryptoKeyId()) - if config.UserProjectOverride { - listCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) - } - versionsResponse, err := listCall.Do() - - if err != nil { - return err - } - - for _, version := range versionsResponse.CryptoKeyVersions { - // skip the versions that have been destroyed earlier - if version.State != "DESTROYED" && version.State != "DESTROY_SCHEDULED" { - request := &cloudkms.DestroyCryptoKeyVersionRequest{} - destroyCall := versionsClient.Destroy(version.Name, request) - if config.UserProjectOverride { - destroyCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) - } - _, err = destroyCall.Do() - - if err != nil { - return err - } - } - } - - return nil -} - -func deleteCryptoKeyVersions(cryptoKeyVersionId *kmsCryptoKeyVersionId, d *schema.ResourceData, userAgent string, config *Config) error { - versionsClient := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions - request := &cloudkms.DestroyCryptoKeyVersionRequest{} - destroyCall := versionsClient.Destroy(cryptoKeyVersionId.Name, request) - if config.UserProjectOverride { - destroyCall.Header().Set("X-Goog-User-Project", cryptoKeyVersionId.CryptoKeyId.KeyRingId.Project) - } - _, err := destroyCall.Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ID %s", cryptoKeyVersionId.Name)) - } - - return nil -} - -func disableCryptoKeyRotation(cryptoKeyId *KmsCryptoKeyId, userAgent string, config *Config) error { - keyClient := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys - patchCall := keyClient.Patch(cryptoKeyId.cryptoKeyId(), &cloudkms.CryptoKey{ - NullFields: []string{"rotationPeriod", "nextRotationTime"}, - }). - UpdateMask("rotationPeriod,nextRotationTime") - if config.UserProjectOverride { - patchCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) - } - _, err := patchCall.Do() - - return err -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_utils.go deleted file mode 100644 index 577dc57505..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_utils.go +++ /dev/null @@ -1,60 +0,0 @@ -package google - -import ( - "fmt" - "regexp" -) - -// loggingSinkResourceTypes contains all the possible Stackdriver Logging resource types. Used to parse ids safely. -var loggingSinkResourceTypes = []string{ - "billingAccounts", - "folders", - "organizations", - "projects", -} - -// LoggingSinkId represents the parts that make up the canonical id used within terraform for a logging resource. -type LoggingSinkId struct { - resourceType string - resourceId string - name string -} - -// loggingSinkIdRegex matches valid logging sink canonical ids -var loggingSinkIdRegex = regexp.MustCompile("(.+)/(.+)/sinks/(.+)") - -// canonicalId returns the LoggingSinkId as the canonical id used within terraform. -func (l LoggingSinkId) canonicalId() string { - return fmt.Sprintf("%s/%s/sinks/%s", l.resourceType, l.resourceId, l.name) -} - -// parent returns the "parent-level" resource that the sink is in (e.g. `folders/foo` for id `folders/foo/sinks/bar`) -func (l LoggingSinkId) parent() string { - return fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) -} - -// parseLoggingSinkId parses a canonical id into a LoggingSinkId, or returns an error on failure. -func parseLoggingSinkId(id string) (*LoggingSinkId, error) { - parts := loggingSinkIdRegex.FindStringSubmatch(id) - if parts == nil { - return nil, fmt.Errorf("unable to parse logging sink id %#v", id) - } - // If our resourceType is not a valid logging sink resource type, complain loudly - validLoggingSinkResourceType := false - for _, v := range loggingSinkResourceTypes { - if v == parts[1] { - validLoggingSinkResourceType = true - break - } - } - - if !validLoggingSinkResourceType { - return nil, fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], - loggingSinkResourceTypes) - } - return &LoggingSinkId{ - resourceType: parts[1], - resourceId: parts[2], - name: parts[3], - }, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/memcache_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/memcache_operation.go deleted file mode 100644 index ec8586e258..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/memcache_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type MemcacheOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *MemcacheOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.MemcacheBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createMemcacheWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*MemcacheOperationWaiter, error) { - w := &MemcacheOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func MemcacheOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createMemcacheWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func MemcacheOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createMemcacheWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/metadata.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/metadata.go deleted file mode 100644 index 24c0f66152..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/metadata.go +++ /dev/null @@ -1,174 +0,0 @@ -package google - -import ( - "errors" - "fmt" - "log" - "sort" - - "google.golang.org/api/compute/v1" -) - -const METADATA_FINGERPRINT_RETRIES = 10 - -// Since the google compute API uses optimistic locking, there is a chance -// we need to resubmit our updated metadata. To do this, you need to provide -// an update function that attempts to submit your metadata -func MetadataRetryWrapper(update func() error) error { - attempt := 0 - for attempt < METADATA_FINGERPRINT_RETRIES { - err := update() - if err == nil { - return nil - } - - if ok, _ := isFingerprintError(err); !ok { - // Something else went wrong, don't retry - return err - } - - log.Printf("[DEBUG] Dismissed an error as retryable as a fingerprint mismatch: %s", err) - attempt++ - } - return fmt.Errorf("Failed to update metadata after %d retries", attempt) -} - -// Update the metadata (serverMD) according to the provided diff (oldMDMap v -// newMDMap). -func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { - curMDMap := make(map[string]string) - // Load metadata on server into map - for _, kv := range serverMD.Items { - // If the server state has a key that we had in our old - // state, but not in our new state, we should delete it - _, okOld := oldMDMap[kv.Key] - _, okNew := newMDMap[kv.Key] - if okOld && !okNew { - continue - } else { - curMDMap[kv.Key] = *kv.Value - } - } - - // Insert new metadata into existing metadata (overwriting when needed) - for key, val := range newMDMap { - curMDMap[key] = val.(string) - } - - // Reformat old metadata into a list - serverMD.Items = nil - for key, val := range curMDMap { - v := val - serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ - Key: key, - Value: &v, - }) - } -} - -// Update the beta metadata (serverMD) according to the provided diff (oldMDMap v -// newMDMap). -func BetaMetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { - curMDMap := make(map[string]string) - // Load metadata on server into map - for _, kv := range serverMD.Items { - // If the server state has a key that we had in our old - // state, but not in our new state, we should delete it - _, okOld := oldMDMap[kv.Key] - _, okNew := newMDMap[kv.Key] - if okOld && !okNew { - continue - } else { - curMDMap[kv.Key] = *kv.Value - } - } - - // Insert new metadata into existing metadata (overwriting when needed) - for key, val := range newMDMap { - curMDMap[key] = val.(string) - } - - // Reformat old metadata into a list - serverMD.Items = nil - for key, val := range curMDMap { - v := val - serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ - Key: key, - Value: &v, - }) - } -} - -func expandComputeMetadata(m map[string]interface{}) []*compute.MetadataItems { - metadata := make([]*compute.MetadataItems, len(m)) - var keys []string - for key := range m { - keys = append(keys, key) - } - sort.Strings(keys) - // Append new metadata to existing metadata - for _, key := range keys { - v := m[key].(string) - metadata = append(metadata, &compute.MetadataItems{ - Key: key, - Value: &v, - }) - } - - return metadata -} - -func flattenMetadataBeta(metadata *compute.Metadata) map[string]string { - metadataMap := make(map[string]string) - for _, item := range metadata.Items { - metadataMap[item.Key] = *item.Value - } - return metadataMap -} - -// This function differs from flattenMetadataBeta only in that it takes -// compute.metadata rather than compute.metadata as an argument. It should -// be removed in favour of flattenMetadataBeta if/when all resources using it get -// beta support. -func flattenMetadata(metadata *compute.Metadata) map[string]interface{} { - metadataMap := make(map[string]interface{}) - for _, item := range metadata.Items { - metadataMap[item.Key] = *item.Value - } - return metadataMap -} - -func resourceInstanceMetadata(d TerraformResourceData) (*compute.Metadata, error) { - m := &compute.Metadata{} - mdMap := d.Get("metadata").(map[string]interface{}) - if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { - if w, ok := mdMap["startup-script"]; ok { - // metadata.startup-script could be from metadata_startup_script in the first place - if v != w { - return nil, errors.New("Cannot provide both metadata_startup_script and metadata.startup-script.") - } - } - mdMap["startup-script"] = v - } - if len(mdMap) > 0 { - m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) - var keys []string - for k := range mdMap { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := mdMap[k].(string) - m.Items = append(m.Items, &compute.MetadataItems{ - Key: k, - Value: &v, - }) - } - - // Set the fingerprint. If the metadata has never been set before - // then this will just be blank. - m.Fingerprint = d.Get("metadata_fingerprint").(string) - } - - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/ml_engine_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/ml_engine_operation.go deleted file mode 100644 index 26244f05e0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/ml_engine_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type MLEngineOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *MLEngineOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.MLEngineBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createMLEngineWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*MLEngineOperationWaiter, error) { - w := &MLEngineOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func MLEngineOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createMLEngineWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func MLEngineOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createMLEngineWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/network_management_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/network_management_operation.go deleted file mode 100644 index 410bbb185b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/network_management_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type NetworkManagementOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *NetworkManagementOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.NetworkManagementBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createNetworkManagementWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*NetworkManagementOperationWaiter, error) { - w := &NetworkManagementOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func NetworkManagementOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createNetworkManagementWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func NetworkManagementOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createNetworkManagementWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/network_services_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/network_services_operation.go deleted file mode 100644 index de387d175f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/network_services_operation.go +++ /dev/null @@ -1,62 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "time" -) - -type NetworkServicesOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *NetworkServicesOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.NetworkServicesBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createNetworkServicesWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*NetworkServicesOperationWaiter, error) { - w := &NetworkServicesOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func NetworkServicesOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createNetworkServicesWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/node_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/node_config.go deleted file mode 100644 index c08a03b95e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/node_config.go +++ /dev/null @@ -1,934 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "google.golang.org/api/container/v1" -) - -// Matches gke-default scope from https://cloud.google.com/sdk/gcloud/reference/container/clusters/create -var defaultOauthScopes = []string{ - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/trace.append", -} - -func schemaLoggingVariant() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: `Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.`, - Default: "DEFAULT", - ValidateFunc: validation.StringInSlice([]string{"DEFAULT", "MAX_THROUGHPUT"}, false), - } -} - -func schemaGcfsConfig(forceNew bool) *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `GCFS configuration for this node.`, - ForceNew: forceNew, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - ForceNew: forceNew, - Description: `Whether or not GCFS is enabled`, - }, - }, - }, - } -} - -func schemaNodeConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The configuration of the nodepool`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_size_gb": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.IntAtLeast(10), - Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`, - }, - - "disk_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd`, - }, - - "guest_accelerator": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - // Legacy config mode allows removing GPU's from an existing resource - // See https://www.terraform.io/docs/configuration/attr-as-blocks.html - ConfigMode: schema.SchemaConfigModeAttr, - Description: `List of the type and count of accelerator cards attached to the instance.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The number of the accelerator cards exposed to an instance.`, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The accelerator type resource name.`, - }, - "gpu_partition_size": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)`, - }, - "gpu_sharing_config": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, - Description: `Configuration for GPU sharing.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "gpu_sharing_strategy": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)`, - }, - "max_shared_clients_per_gpu": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The maximum number of containers that can share a GPU.`, - }, - }, - }, - }, - }, - }, - }, - - "image_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: caseDiffSuppress, - Description: `The image type to use for this node. Note that for a given image type, the latest version of it will be used.`, - }, - - "labels": { - Type: schema.TypeMap, - Optional: true, - // Computed=true because GKE Sandbox will automatically add labels to nodes that can/cannot run sandboxed pods. - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.`, - }, - - "resource_labels": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The GCE resource labels (a map of key/value pairs) to be applied to the node pool.`, - }, - - "local_ssd_count": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.IntAtLeast(0), - Description: `The number of local SSD disks to be attached to the node.`, - }, - - "logging_variant": schemaLoggingVariant(), - - "local_nvme_ssd_block_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Parameters for raw-block local NVMe SSDs.`, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "local_ssd_count": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: validation.IntAtLeast(0), - Description: `Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.`, - }, - }, - }, - }, - - "gcfs_config": schemaGcfsConfig(true), - - "gvnic": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Enable or disable gvnic in the node pool.`, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Whether or not gvnic is enabled`, - }, - }, - }, - }, - - "machine_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The name of a Google Compute Engine machine type.`, - }, - - "metadata": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The metadata key/value pairs assigned to instances in the cluster.`, - }, - - "min_cpu_platform": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.`, - }, - - "oauth_scopes": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The set of Google API scopes to be made available on all of the node VMs.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - DiffSuppressFunc: containerClusterAddedScopesSuppress, - Set: stringScopeHashcode, - }, - - "preemptible": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - Description: `Whether the nodes are created as preemptible VM instances.`, - }, - "reservation_affinity": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The reservation affinity configuration for the node pool.`, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "consume_reservation_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Corresponds to the type of reservation consumption.`, - ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"}, false), - }, - "key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The label key of a reservation resource.`, - }, - "values": { - Type: schema.TypeSet, - Description: "The label values of the reservation resource.", - ForceNew: true, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "spot": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - Description: `Whether the nodes are created as spot VM instances.`, - }, - - "service_account": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The Google Cloud Platform Service Account to be used by the node VMs.`, - }, - - "tags": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The list of instance tags applied to all nodes.`, - }, - - "shielded_instance_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Shielded Instance options.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_secure_boot": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - Description: `Defines whether the instance has Secure Boot enabled.`, - }, - "enable_integrity_monitoring": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: true, - Description: `Defines whether the instance has integrity monitoring enabled.`, - }, - }, - }, - }, - - "taint": { - Type: schema.TypeList, - Optional: true, - // Computed=true because GKE Sandbox will automatically add taints to nodes that can/cannot run sandboxed pods. - Computed: true, - ForceNew: true, - // Legacy config mode allows explicitly defining an empty taint. - // See https://www.terraform.io/docs/configuration/attr-as-blocks.html - ConfigMode: schema.SchemaConfigModeAttr, - Description: `List of Kubernetes taints to be applied to each node.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Key for taint.`, - }, - "value": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Value for taint.`, - }, - "effect": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"NO_SCHEDULE", "PREFER_NO_SCHEDULE", "NO_EXECUTE"}, false), - Description: `Effect for taint.`, - }, - }, - }, - }, - - "workload_metadata_config": { - Computed: true, - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The workload metadata configuration for this node.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"MODE_UNSPECIFIED", "GCE_METADATA", "GKE_METADATA"}, false), - Description: `Mode is the configuration for how to expose metadata to workloads running on the node.`, - }, - }, - }, - }, - - "boot_disk_kms_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, - }, - // Note that AtLeastOneOf can't be set because this schema is reused by - // two different resources. - "kubelet_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Node kubelet configs.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu_manager_policy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"static", "none", ""}, false), - Description: `Control the CPU management policy on the node.`, - }, - "cpu_cfs_quota": { - Type: schema.TypeBool, - Optional: true, - Description: `Enable CPU CFS quota enforcement for containers that specify CPU limits.`, - }, - "cpu_cfs_quota_period": { - Type: schema.TypeString, - Optional: true, - Description: `Set the CPU CFS quota period value 'cpu.cfs_period_us'.`, - }, - "pod_pids_limit": { - Type: schema.TypeInt, - Optional: true, - Description: `Controls the maximum number of processes allowed to run in a pod.`, - }, - }, - }, - }, - - "linux_node_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Parameters that can be configured on Linux nodes.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sysctls": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.`, - }, - }, - }, - }, - "node_group": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.`, - }, - }, - }, - } -} - -func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults { - configs := configured.([]interface{}) - if len(configs) == 0 || configs[0] == nil { - return nil - } - config := configs[0].(map[string]interface{}) - - nodeConfigDefaults := &container.NodeConfigDefaults{} - if variant, ok := config["logging_variant"]; ok { - nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{ - VariantConfig: &container.LoggingVariantConfig{ - Variant: variant.(string), - }, - } - } - return nodeConfigDefaults -} - -func expandNodeConfig(v interface{}) *container.NodeConfig { - nodeConfigs := v.([]interface{}) - nc := &container.NodeConfig{ - // Defaults can't be set on a list/set in the schema, so set the default on create here. - OauthScopes: defaultOauthScopes, - } - if len(nodeConfigs) == 0 { - return nc - } - - nodeConfig := nodeConfigs[0].(map[string]interface{}) - - if v, ok := nodeConfig["machine_type"]; ok { - nc.MachineType = v.(string) - } - - if v, ok := nodeConfig["guest_accelerator"]; ok { - accels := v.([]interface{}) - guestAccelerators := make([]*container.AcceleratorConfig, 0, len(accels)) - for _, raw := range accels { - data := raw.(map[string]interface{}) - if data["count"].(int) == 0 { - continue - } - guestAcceleratorConfig := &container.AcceleratorConfig{ - AcceleratorCount: int64(data["count"].(int)), - AcceleratorType: data["type"].(string), - GpuPartitionSize: data["gpu_partition_size"].(string), - } - - if v, ok := data["gpu_sharing_config"]; ok && len(v.([]interface{})) > 0 { - gpuSharingConfig := data["gpu_sharing_config"].([]interface{})[0].(map[string]interface{}) - guestAcceleratorConfig.GpuSharingConfig = &container.GPUSharingConfig{ - GpuSharingStrategy: gpuSharingConfig["gpu_sharing_strategy"].(string), - MaxSharedClientsPerGpu: int64(gpuSharingConfig["max_shared_clients_per_gpu"].(int)), - } - } - - guestAccelerators = append(guestAccelerators, guestAcceleratorConfig) - } - nc.Accelerators = guestAccelerators - } - - if v, ok := nodeConfig["disk_size_gb"]; ok { - nc.DiskSizeGb = int64(v.(int)) - } - - if v, ok := nodeConfig["disk_type"]; ok { - nc.DiskType = v.(string) - } - - if v, ok := nodeConfig["local_ssd_count"]; ok { - nc.LocalSsdCount = int64(v.(int)) - } - - if v, ok := nodeConfig["logging_variant"]; ok { - nc.LoggingConfig = &container.NodePoolLoggingConfig{ - VariantConfig: &container.LoggingVariantConfig{ - Variant: v.(string), - }, - } - } - - if v, ok := nodeConfig["local_nvme_ssd_block_config"]; ok && len(v.([]interface{})) > 0 { - conf := v.([]interface{})[0].(map[string]interface{}) - nc.LocalNvmeSsdBlockConfig = &container.LocalNvmeSsdBlockConfig{ - LocalSsdCount: int64(conf["local_ssd_count"].(int)), - } - } - - if v, ok := nodeConfig["gcfs_config"]; ok && len(v.([]interface{})) > 0 { - conf := v.([]interface{})[0].(map[string]interface{}) - nc.GcfsConfig = &container.GcfsConfig{ - Enabled: conf["enabled"].(bool), - } - } - - if v, ok := nodeConfig["gvnic"]; ok && len(v.([]interface{})) > 0 { - conf := v.([]interface{})[0].(map[string]interface{}) - nc.Gvnic = &container.VirtualNIC{ - Enabled: conf["enabled"].(bool), - } - } - - if v, ok := nodeConfig["reservation_affinity"]; ok && len(v.([]interface{})) > 0 { - conf := v.([]interface{})[0].(map[string]interface{}) - valuesSet := conf["values"].(*schema.Set) - values := make([]string, valuesSet.Len()) - for i, value := range valuesSet.List() { - values[i] = value.(string) - } - - nc.ReservationAffinity = &container.ReservationAffinity{ - ConsumeReservationType: conf["consume_reservation_type"].(string), - Key: conf["key"].(string), - Values: values, - } - } - - if scopes, ok := nodeConfig["oauth_scopes"]; ok { - scopesSet := scopes.(*schema.Set) - scopes := make([]string, scopesSet.Len()) - for i, scope := range scopesSet.List() { - scopes[i] = canonicalizeServiceScope(scope.(string)) - } - - nc.OauthScopes = scopes - } - - if v, ok := nodeConfig["service_account"]; ok { - nc.ServiceAccount = v.(string) - } - - if v, ok := nodeConfig["metadata"]; ok { - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - nc.Metadata = m - } - - if v, ok := nodeConfig["image_type"]; ok { - nc.ImageType = v.(string) - } - - if v, ok := nodeConfig["labels"]; ok { - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - nc.Labels = m - } - - if v, ok := nodeConfig["resource_labels"]; ok { - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - nc.ResourceLabels = m - } - - if v, ok := nodeConfig["tags"]; ok { - tagsList := v.([]interface{}) - tags := []string{} - for _, v := range tagsList { - if v != nil { - tags = append(tags, v.(string)) - } - } - nc.Tags = tags - } - - if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 { - conf := v.([]interface{})[0].(map[string]interface{}) - nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{ - EnableSecureBoot: conf["enable_secure_boot"].(bool), - EnableIntegrityMonitoring: conf["enable_integrity_monitoring"].(bool), - } - } - - // Preemptible Is Optional+Default, so it always has a value - nc.Preemptible = nodeConfig["preemptible"].(bool) - - // Spot Is Optional+Default, so it always has a value - nc.Spot = nodeConfig["spot"].(bool) - - if v, ok := nodeConfig["min_cpu_platform"]; ok { - nc.MinCpuPlatform = v.(string) - } - - if v, ok := nodeConfig["taint"]; ok && len(v.([]interface{})) > 0 { - taints := v.([]interface{}) - nodeTaints := make([]*container.NodeTaint, 0, len(taints)) - for _, raw := range taints { - data := raw.(map[string]interface{}) - taint := &container.NodeTaint{ - Key: data["key"].(string), - Value: data["value"].(string), - Effect: data["effect"].(string), - } - nodeTaints = append(nodeTaints, taint) - } - nc.Taints = nodeTaints - } - - if v, ok := nodeConfig["workload_metadata_config"]; ok { - nc.WorkloadMetadataConfig = expandWorkloadMetadataConfig(v) - } - - if v, ok := nodeConfig["boot_disk_kms_key"]; ok { - nc.BootDiskKmsKey = v.(string) - } - - if v, ok := nodeConfig["kubelet_config"]; ok { - nc.KubeletConfig = expandKubeletConfig(v) - } - - if v, ok := nodeConfig["linux_node_config"]; ok { - nc.LinuxNodeConfig = expandLinuxNodeConfig(v) - } - - if v, ok := nodeConfig["node_group"]; ok { - nc.NodeGroup = v.(string) - } - - return nc -} - -func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConfig { - if v == nil { - return nil - } - ls := v.([]interface{}) - if len(ls) == 0 { - return nil - } - wmc := &container.WorkloadMetadataConfig{} - - cfg := ls[0].(map[string]interface{}) - - if v, ok := cfg["mode"]; ok { - wmc.Mode = v.(string) - } - - return wmc -} - -func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { - if v == nil { - return nil - } - ls := v.([]interface{}) - if len(ls) == 0 { - return nil - } - cfg := ls[0].(map[string]interface{}) - kConfig := &container.NodeKubeletConfig{} - if cpuManagerPolicy, ok := cfg["cpu_manager_policy"]; ok { - kConfig.CpuManagerPolicy = cpuManagerPolicy.(string) - } - if cpuCfsQuota, ok := cfg["cpu_cfs_quota"]; ok { - kConfig.CpuCfsQuota = cpuCfsQuota.(bool) - kConfig.ForceSendFields = append(kConfig.ForceSendFields, "CpuCfsQuota") - } - if cpuCfsQuotaPeriod, ok := cfg["cpu_cfs_quota_period"]; ok { - kConfig.CpuCfsQuotaPeriod = cpuCfsQuotaPeriod.(string) - } - if podPidsLimit, ok := cfg["pod_pids_limit"]; ok { - kConfig.PodPidsLimit = int64(podPidsLimit.(int)) - } - return kConfig -} - -func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig { - if v == nil { - return nil - } - ls := v.([]interface{}) - if len(ls) == 0 { - return nil - } - cfg := ls[0].(map[string]interface{}) - sysCfgRaw, ok := cfg["sysctls"] - if !ok { - return nil - } - m := make(map[string]string) - for k, v := range sysCfgRaw.(map[string]interface{}) { - m[k] = v.(string) - } - return &container.LinuxNodeConfig{ - Sysctls: m, - } -} - -func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - if c == nil { - return result - } - - result = append(result, map[string]interface{}{}) - - result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig) - - return result -} - -func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} { - config := make([]map[string]interface{}, 0, 1) - - if c == nil { - return config - } - - config = append(config, map[string]interface{}{ - "machine_type": c.MachineType, - "disk_size_gb": c.DiskSizeGb, - "disk_type": c.DiskType, - "guest_accelerator": flattenContainerGuestAccelerators(c.Accelerators), - "local_ssd_count": c.LocalSsdCount, - "logging_variant": flattenLoggingVariant(c.LoggingConfig), - "local_nvme_ssd_block_config": flattenLocalNvmeSsdBlockConfig(c.LocalNvmeSsdBlockConfig), - "gcfs_config": flattenGcfsConfig(c.GcfsConfig), - "gvnic": flattenGvnic(c.Gvnic), - "reservation_affinity": flattenGKEReservationAffinity(c.ReservationAffinity), - "service_account": c.ServiceAccount, - "metadata": c.Metadata, - "image_type": c.ImageType, - "labels": c.Labels, - "resource_labels": c.ResourceLabels, - "tags": c.Tags, - "preemptible": c.Preemptible, - "spot": c.Spot, - "min_cpu_platform": c.MinCpuPlatform, - "shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig), - "taint": flattenTaints(c.Taints), - "workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig), - "boot_disk_kms_key": c.BootDiskKmsKey, - "kubelet_config": flattenKubeletConfig(c.KubeletConfig), - "linux_node_config": flattenLinuxNodeConfig(c.LinuxNodeConfig), - "node_group": c.NodeGroup, - }) - - if len(c.OauthScopes) > 0 { - config[0]["oauth_scopes"] = schema.NewSet(stringScopeHashcode, convertStringArrToInterface(c.OauthScopes)) - } - - return config -} - -func flattenContainerGuestAccelerators(c []*container.AcceleratorConfig) []map[string]interface{} { - result := []map[string]interface{}{} - for _, accel := range c { - accelerator := map[string]interface{}{ - "count": accel.AcceleratorCount, - "type": accel.AcceleratorType, - "gpu_partition_size": accel.GpuPartitionSize, - } - if accel.GpuSharingConfig != nil { - accelerator["gpu_sharing_config"] = []map[string]interface{}{ - { - "gpu_sharing_strategy": accel.GpuSharingConfig.GpuSharingStrategy, - "max_shared_clients_per_gpu": accel.GpuSharingConfig.MaxSharedClientsPerGpu, - }, - } - } - result = append(result, accelerator) - } - return result -} - -func flattenShieldedInstanceConfig(c *container.ShieldedInstanceConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "enable_secure_boot": c.EnableSecureBoot, - "enable_integrity_monitoring": c.EnableIntegrityMonitoring, - }) - } - return result -} - -func flattenLocalNvmeSsdBlockConfig(c *container.LocalNvmeSsdBlockConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "local_ssd_count": c.LocalSsdCount, - }) - } - return result -} - -func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string { - variant := "DEFAULT" - if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" { - variant = c.VariantConfig.Variant - } - return variant -} - -func flattenGcfsConfig(c *container.GcfsConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "enabled": c.Enabled, - }) - } - return result -} - -func flattenGvnic(c *container.VirtualNIC) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "enabled": c.Enabled, - }) - } - return result -} - -func flattenGKEReservationAffinity(c *container.ReservationAffinity) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "consume_reservation_type": c.ConsumeReservationType, - "key": c.Key, - "values": c.Values, - }) - } - return result -} - -func flattenTaints(c []*container.NodeTaint) []map[string]interface{} { - result := []map[string]interface{}{} - for _, taint := range c { - result = append(result, map[string]interface{}{ - "key": taint.Key, - "value": taint.Value, - "effect": taint.Effect, - }) - } - return result -} - -func flattenWorkloadMetadataConfig(c *container.WorkloadMetadataConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "mode": c.Mode, - }) - } - return result -} - -func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "cpu_cfs_quota": c.CpuCfsQuota, - "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, - "cpu_manager_policy": c.CpuManagerPolicy, - "pod_pids_limit": c.PodPidsLimit, - }) - } - return result -} - -func flattenLinuxNodeConfig(c *container.LinuxNodeConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "sysctls": c.Sysctls, - }) - } - return result -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/notebooks_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/notebooks_operation.go deleted file mode 100644 index 38d0f04b21..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/notebooks_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type NotebooksOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *NotebooksOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.NotebooksBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createNotebooksWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*NotebooksOperationWaiter, error) { - w := &NotebooksOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func NotebooksOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createNotebooksWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func NotebooksOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createNotebooksWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/orgpolicy_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/orgpolicy_utils.go deleted file mode 100644 index 56c680456f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/orgpolicy_utils.go +++ /dev/null @@ -1,28 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// OrgPolicyPolicy has a custom import method because the parent field needs to allow an additional forward slash -// to represent the type of parent (e.g. projects/{project_id}). -func resourceOrgPolicyPolicyCustomImport(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if err := parseImportId([]string{ - "^(?P[^/]+/?[^/]*)/policies/(?P[^/]+)", - "^(?P[^/]+/?[^/]*)/(?P[^/]+)", - }, d, config); err != nil { - return err - } - - // Replace import id for the resource id - id, err := replaceVarsRecursive(d, config, "{{parent}}/policies/{{name}}", false, 0) - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_ca_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_ca_utils.go deleted file mode 100644 index 3d55da7954..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_ca_utils.go +++ /dev/null @@ -1,221 +0,0 @@ -package google - -import ( - "fmt" - "log" - "math/rand" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// CA related utilities. - -func enableCA(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { - enableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") - if err != nil { - return err - } - - log.Printf("[DEBUG] Enabling CertificateAuthority") - - res, err := SendRequest(config, "POST", billingProject, enableUrl, userAgent, nil) - if err != nil { - return fmt.Errorf("Error enabling CertificateAuthority: %s", err) - } - - var opRes map[string]interface{} - err = PrivatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error waiting to enable CertificateAuthority: %s", err) - } - return nil -} - -func disableCA(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { - disableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") - if err != nil { - return err - } - - log.Printf("[DEBUG] Disabling CA") - - dRes, err := SendRequest(config, "POST", billingProject, disableUrl, userAgent, nil) - if err != nil { - return fmt.Errorf("Error disabling CA: %s", err) - } - - var opRes map[string]interface{} - err = PrivatecaOperationWaitTimeWithResponse( - config, dRes, &opRes, project, "Disabling CA", userAgent, - d.Timeout(schema.TimeoutDelete)) - if err != nil { - return fmt.Errorf("Error waiting to disable CA: %s", err) - } - return nil -} - -func activateSubCAWithThirdPartyIssuer(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { - // 1. prepare parameters - signedCACert := d.Get("pem_ca_certificate").(string) - - sc, ok := d.GetOk("subordinate_config") - if !ok { - return fmt.Errorf("subordinate_config is required to activate subordinate CA") - } - c := sc.([]interface{}) - if len(c) == 0 || c[0] == nil { - return fmt.Errorf("subordinate_config is required to activate subordinate CA") - } - chain, ok := c[0].(map[string]interface{})["pem_issuer_chain"] - if !ok { - return fmt.Errorf("subordinate_config.pem_issuer_chain is required to activate subordinate CA with third party issuer") - } - issuerChain := chain.([]interface{}) - if len(issuerChain) == 0 || issuerChain[0] == nil { - return fmt.Errorf("subordinate_config.pem_issuer_chain is required to activate subordinate CA with third party issuer") - } - pc := issuerChain[0].(map[string]interface{})["pem_certificates"].([]interface{}) - pemIssuerChain := make([]string, 0, len(pc)) - for _, pem := range pc { - pemIssuerChain = append(pemIssuerChain, pem.(string)) - } - - // 2. activate CA - activateObj := make(map[string]interface{}) - activateObj["pemCaCertificate"] = signedCACert - activateObj["subordinateConfig"] = make(map[string]interface{}) - activateObj["subordinateConfig"].(map[string]interface{})["pemIssuerChain"] = make(map[string]interface{}) - activateObj["subordinateConfig"].(map[string]interface{})["pemIssuerChain"].(map[string]interface{})["pemCertificates"] = pemIssuerChain - - activateUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:activate") - if err != nil { - return err - } - - log.Printf("[DEBUG] Activating CertificateAuthority: %#v", activateObj) - res, err := SendRequest(config, "POST", billingProject, activateUrl, userAgent, activateObj) - if err != nil { - return fmt.Errorf("Error enabling CertificateAuthority: %s", err) - } - - var opRes map[string]interface{} - err = PrivatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Activating CertificateAuthority", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error waiting to actiavte CertificateAuthority: %s", err) - } - return nil -} - -func activateSubCAWithFirstPartyIssuer(config *Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { - // 1. get issuer - sc, ok := d.GetOk("subordinate_config") - if !ok { - return fmt.Errorf("subordinate_config is required to activate subordinate CA") - } - c := sc.([]interface{}) - if len(c) == 0 || c[0] == nil { - return fmt.Errorf("subordinate_config is required to activate subordinate CA") - } - ca, ok := c[0].(map[string]interface{})["certificate_authority"] - if !ok { - return fmt.Errorf("subordinate_config.certificate_authority is required to activate subordinate CA with first party issuer") - } - issuer := ca.(string) - - // 2. fetch CSR - fetchCSRUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:fetch") - if err != nil { - return err - } - res, err := SendRequest(config, "GET", billingProject, fetchCSRUrl, userAgent, nil) - if err != nil { - return fmt.Errorf("failed to fetch CSR: %v", err) - } - csr := res["pemCsr"] - - // 3. sign the CSR with first party issuer - genCertId := func() string { - currentTime := time.Now() - dateStr := currentTime.Format("20060102") - - rand.Seed(time.Now().UnixNano()) - const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - rand1 := make([]byte, 3) - for i := range rand1 { - rand1[i] = letters[rand.Intn(len(letters))] - } - rand2 := make([]byte, 3) - for i := range rand2 { - rand2[i] = letters[rand.Intn(len(letters))] - } - return fmt.Sprintf("subordinate-%v-%v-%v", dateStr, string(rand1), string(rand2)) - } - - // parseCAName parses a CA name and return the CaPool name and CaId. - parseCAName := func(n string) (string, string, error) { - parts := regexp.MustCompile(`(projects/[a-z0-9-]+/locations/[a-z0-9-]+/caPools/[a-zA-Z0-9-]+)/certificateAuthorities/([a-zA-Z0-9-]+)`).FindStringSubmatch(n) - if len(parts) != 3 { - return "", "", fmt.Errorf("failed to parse CA name: %v, parts: %v", n, parts) - } - return parts[1], parts[2], err - } - - obj := make(map[string]interface{}) - obj["pemCsr"] = csr - obj["lifetime"] = d.Get("lifetime") - - certId := genCertId() - poolName, issuerId, err := parseCAName(issuer) - if err != nil { - return err - } - - PrivatecaBasePath, err := replaceVars(d, config, "{{PrivatecaBasePath}}") - if err != nil { - return err - } - signUrl := fmt.Sprintf("%v%v/certificates?certificateId=%v", PrivatecaBasePath, poolName, certId) - signUrl, err = addQueryParams(signUrl, map[string]string{"issuingCertificateAuthorityId": issuerId}) - if err != nil { - return err - } - - log.Printf("[DEBUG] Signing CA Certificate: %#v", obj) - res, err = SendRequestWithTimeout(config, "POST", billingProject, signUrl, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Certificate: %s", err) - } - signedCACert := res["pemCertificate"] - - // 4. activate sub CA with the signed CA cert. - activateObj := make(map[string]interface{}) - activateObj["pemCaCertificate"] = signedCACert - activateObj["subordinateConfig"] = make(map[string]interface{}) - activateObj["subordinateConfig"].(map[string]interface{})["certificateAuthority"] = issuer - - activateUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:activate") - if err != nil { - return err - } - - log.Printf("[DEBUG] Activating CertificateAuthority: %#v", activateObj) - res, err = SendRequest(config, "POST", billingProject, activateUrl, userAgent, activateObj) - if err != nil { - return fmt.Errorf("Error enabling CertificateAuthority: %s", err) - } - - var opRes map[string]interface{} - err = PrivatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error waiting to actiavte CertificateAuthority: %s", err) - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_operation.go deleted file mode 100644 index 50e61173ff..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type PrivatecaOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *PrivatecaOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.PrivatecaBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createPrivatecaWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*PrivatecaOperationWaiter, error) { - w := &PrivatecaOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func PrivatecaOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createPrivatecaWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func PrivatecaOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createPrivatecaWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider.go deleted file mode 100644 index af467bb6dd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider.go +++ /dev/null @@ -1,1825 +0,0 @@ -package google - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/version" - - googleoauth "golang.org/x/oauth2/google" -) - -const TestEnvVar = "TF_ACC" - -// Global MutexKV -var mutexKV = NewMutexKV() - -// Provider returns a *schema.Provider. -func Provider() *schema.Provider { - - // The mtls service client gives the type of endpoint (mtls/regular) - // at client creation. Since we use a shared client for requests we must - // rewrite the endpoints to be mtls endpoints for the scenario where - // mtls is enabled. - if isMtls() { - // if mtls is enabled switch all default endpoints to use the mtls endpoint - for key, bp := range DefaultBasePaths { - DefaultBasePaths[key] = getMtlsEndpoint(bp) - } - } - - provider := &schema.Provider{ - Schema: map[string]*schema.Schema{ - "credentials": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCredentials, - ConflictsWith: []string{"access_token"}, - }, - - "access_token": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"credentials"}, - }, - - "impersonate_service_account": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", - }, nil), - }, - - "impersonate_service_account_delegates": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PROJECT", - "GOOGLE_CLOUD_PROJECT", - "GCLOUD_PROJECT", - "CLOUDSDK_CORE_PROJECT", - }, nil), - }, - - "billing_project": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BILLING_PROJECT", - }, nil), - }, - - "region": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_REGION", - "GCLOUD_REGION", - "CLOUDSDK_COMPUTE_REGION", - }, nil), - }, - - "zone": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ZONE", - "GCLOUD_ZONE", - "CLOUDSDK_COMPUTE_ZONE", - }, nil), - }, - - "scopes": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "batching": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "send_after": { - Type: schema.TypeString, - Optional: true, - Default: "10s", - ValidateFunc: validateNonNegativeDuration(), - }, - "enable_batching": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - }, - }, - - "user_project_override": { - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "USER_PROJECT_OVERRIDE", - }, nil), - }, - - "request_timeout": { - Type: schema.TypeString, - Optional: true, - }, - - "request_reason": { - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "CLOUDSDK_CORE_REQUEST_REASON", - }, nil), - }, - - // Generated Products - "access_approval_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ACCESS_APPROVAL_CUSTOM_ENDPOINT", - }, DefaultBasePaths[AccessApprovalBasePathKey]), - }, - "access_context_manager_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ACCESS_CONTEXT_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[AccessContextManagerBasePathKey]), - }, - "active_directory_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ACTIVE_DIRECTORY_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ActiveDirectoryBasePathKey]), - }, - "alloydb_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ALLOYDB_CUSTOM_ENDPOINT", - }, DefaultBasePaths[AlloydbBasePathKey]), - }, - "apigee_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_APIGEE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ApigeeBasePathKey]), - }, - "app_engine_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_APP_ENGINE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[AppEngineBasePathKey]), - }, - "artifact_registry_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ARTIFACT_REGISTRY_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ArtifactRegistryBasePathKey]), - }, - "beyondcorp_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BEYONDCORP_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BeyondcorpBasePathKey]), - }, - "big_query_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIG_QUERY_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigQueryBasePathKey]), - }, - "bigquery_analytics_hub_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGQUERY_ANALYTICS_HUB_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigqueryAnalyticsHubBasePathKey]), - }, - "bigquery_connection_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGQUERY_CONNECTION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigqueryConnectionBasePathKey]), - }, - "bigquery_datapolicy_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGQUERY_DATAPOLICY_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigqueryDatapolicyBasePathKey]), - }, - "bigquery_data_transfer_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGQUERY_DATA_TRANSFER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigqueryDataTransferBasePathKey]), - }, - "bigquery_reservation_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGQUERY_RESERVATION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigqueryReservationBasePathKey]), - }, - "bigtable_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGTABLE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigtableBasePathKey]), - }, - "billing_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BILLING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BillingBasePathKey]), - }, - "binary_authorization_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BINARY_AUTHORIZATION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BinaryAuthorizationBasePathKey]), - }, - "certificate_manager_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CERTIFICATE_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CertificateManagerBasePathKey]), - }, - "cloud_asset_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_ASSET_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudAssetBasePathKey]), - }, - "cloud_build_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_BUILD_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudBuildBasePathKey]), - }, - "cloud_functions_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_FUNCTIONS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudFunctionsBasePathKey]), - }, - "cloudfunctions2_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUDFUNCTIONS2_CUSTOM_ENDPOINT", - }, DefaultBasePaths[Cloudfunctions2BasePathKey]), - }, - "cloud_identity_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_IDENTITY_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudIdentityBasePathKey]), - }, - "cloud_ids_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_IDS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudIdsBasePathKey]), - }, - "cloud_iot_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_IOT_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudIotBasePathKey]), - }, - "cloud_run_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_RUN_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudRunBasePathKey]), - }, - "cloud_run_v2_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_RUN_V2_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudRunV2BasePathKey]), - }, - "cloud_scheduler_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_SCHEDULER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudSchedulerBasePathKey]), - }, - "cloud_tasks_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_TASKS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudTasksBasePathKey]), - }, - "compute_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_COMPUTE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ComputeBasePathKey]), - }, - "container_analysis_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_ANALYSIS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ContainerAnalysisBasePathKey]), - }, - "container_attached_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_ATTACHED_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ContainerAttachedBasePathKey]), - }, - "data_catalog_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATA_CATALOG_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataCatalogBasePathKey]), - }, - "data_fusion_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATA_FUSION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataFusionBasePathKey]), - }, - "data_loss_prevention_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATA_LOSS_PREVENTION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataLossPreventionBasePathKey]), - }, - "dataplex_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATAPLEX_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataplexBasePathKey]), - }, - "dataproc_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATAPROC_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataprocBasePathKey]), - }, - "dataproc_metastore_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATAPROC_METASTORE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataprocMetastoreBasePathKey]), - }, - "datastore_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATASTORE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DatastoreBasePathKey]), - }, - "datastream_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATASTREAM_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DatastreamBasePathKey]), - }, - "deployment_manager_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DEPLOYMENT_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DeploymentManagerBasePathKey]), - }, - "dialogflow_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DIALOGFLOW_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DialogflowBasePathKey]), - }, - "dialogflow_cx_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DIALOGFLOW_CX_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DialogflowCXBasePathKey]), - }, - "dns_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DNS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DNSBasePathKey]), - }, - "document_ai_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DOCUMENT_AI_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DocumentAIBasePathKey]), - }, - "essential_contacts_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ESSENTIAL_CONTACTS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[EssentialContactsBasePathKey]), - }, - "filestore_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_FILESTORE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[FilestoreBasePathKey]), - }, - "firestore_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_FIRESTORE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[FirestoreBasePathKey]), - }, - "game_services_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_GAME_SERVICES_CUSTOM_ENDPOINT", - }, DefaultBasePaths[GameServicesBasePathKey]), - }, - "gke_backup_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_GKE_BACKUP_CUSTOM_ENDPOINT", - }, DefaultBasePaths[GKEBackupBasePathKey]), - }, - "gke_hub_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_GKE_HUB_CUSTOM_ENDPOINT", - }, DefaultBasePaths[GKEHubBasePathKey]), - }, - "healthcare_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_HEALTHCARE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[HealthcareBasePathKey]), - }, - "iam2_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAM2_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IAM2BasePathKey]), - }, - "iam_beta_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAM_BETA_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IAMBetaBasePathKey]), - }, - "iam_workforce_pool_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAM_WORKFORCE_POOL_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IAMWorkforcePoolBasePathKey]), - }, - "iap_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAP_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IapBasePathKey]), - }, - "identity_platform_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IDENTITY_PLATFORM_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IdentityPlatformBasePathKey]), - }, - "kms_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_KMS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[KMSBasePathKey]), - }, - "logging_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_LOGGING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[LoggingBasePathKey]), - }, - "memcache_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_MEMCACHE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[MemcacheBasePathKey]), - }, - "ml_engine_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ML_ENGINE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[MLEngineBasePathKey]), - }, - "monitoring_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_MONITORING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[MonitoringBasePathKey]), - }, - "network_management_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_NETWORK_MANAGEMENT_CUSTOM_ENDPOINT", - }, DefaultBasePaths[NetworkManagementBasePathKey]), - }, - "network_services_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_NETWORK_SERVICES_CUSTOM_ENDPOINT", - }, DefaultBasePaths[NetworkServicesBasePathKey]), - }, - "notebooks_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_NOTEBOOKS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[NotebooksBasePathKey]), - }, - "os_config_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_OS_CONFIG_CUSTOM_ENDPOINT", - }, DefaultBasePaths[OSConfigBasePathKey]), - }, - "os_login_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_OS_LOGIN_CUSTOM_ENDPOINT", - }, DefaultBasePaths[OSLoginBasePathKey]), - }, - "privateca_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", - }, DefaultBasePaths[PrivatecaBasePathKey]), - }, - "pubsub_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PUBSUB_CUSTOM_ENDPOINT", - }, DefaultBasePaths[PubsubBasePathKey]), - }, - "pubsub_lite_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PUBSUB_LITE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[PubsubLiteBasePathKey]), - }, - "redis_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_REDIS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[RedisBasePathKey]), - }, - "resource_manager_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_RESOURCE_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ResourceManagerBasePathKey]), - }, - "secret_manager_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SECRET_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SecretManagerBasePathKey]), - }, - "security_center_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SECURITY_CENTER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SecurityCenterBasePathKey]), - }, - "service_management_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SERVICE_MANAGEMENT_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ServiceManagementBasePathKey]), - }, - "service_usage_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ServiceUsageBasePathKey]), - }, - "source_repo_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SOURCE_REPO_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SourceRepoBasePathKey]), - }, - "spanner_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SPANNER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SpannerBasePathKey]), - }, - "sql_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SQL_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SQLBasePathKey]), - }, - "storage_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_STORAGE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[StorageBasePathKey]), - }, - "storage_transfer_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_STORAGE_TRANSFER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[StorageTransferBasePathKey]), - }, - "tags_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_TAGS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[TagsBasePathKey]), - }, - "tpu_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_TPU_CUSTOM_ENDPOINT", - }, DefaultBasePaths[TPUBasePathKey]), - }, - "vertex_ai_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_VERTEX_AI_CUSTOM_ENDPOINT", - }, DefaultBasePaths[VertexAIBasePathKey]), - }, - "vpc_access_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_VPC_ACCESS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[VPCAccessBasePathKey]), - }, - "workflows_custom_endpoint": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_WORKFLOWS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[WorkflowsBasePathKey]), - }, - - // Handwritten Products / Versioned / Atypical Entries - CloudBillingCustomEndpointEntryKey: CloudBillingCustomEndpointEntry, - ComposerCustomEndpointEntryKey: ComposerCustomEndpointEntry, - ContainerCustomEndpointEntryKey: ContainerCustomEndpointEntry, - DataflowCustomEndpointEntryKey: DataflowCustomEndpointEntry, - IamCredentialsCustomEndpointEntryKey: IamCredentialsCustomEndpointEntry, - ResourceManagerV3CustomEndpointEntryKey: ResourceManagerV3CustomEndpointEntry, - IAMCustomEndpointEntryKey: IAMCustomEndpointEntry, - ServiceNetworkingCustomEndpointEntryKey: ServiceNetworkingCustomEndpointEntry, - ServiceUsageCustomEndpointEntryKey: ServiceUsageCustomEndpointEntry, - BigtableAdminCustomEndpointEntryKey: BigtableAdminCustomEndpointEntry, - TagsLocationCustomEndpointEntryKey: TagsLocationCustomEndpointEntry, - - // dcl - ContainerAwsCustomEndpointEntryKey: ContainerAwsCustomEndpointEntry, - ContainerAzureCustomEndpointEntryKey: ContainerAzureCustomEndpointEntry, - }, - - ProviderMetaSchema: map[string]*schema.Schema{ - "module_name": { - Type: schema.TypeString, - Optional: true, - }, - }, - - DataSourcesMap: map[string]*schema.Resource{ - // ####### START datasources ########### - "google_access_approval_folder_service_account": DataSourceAccessApprovalFolderServiceAccount(), - "google_access_approval_organization_service_account": DataSourceAccessApprovalOrganizationServiceAccount(), - "google_access_approval_project_service_account": DataSourceAccessApprovalProjectServiceAccount(), - "google_active_folder": DataSourceGoogleActiveFolder(), - "google_artifact_registry_repository": DataSourceArtifactRegistryRepository(), - "google_app_engine_default_service_account": DataSourceGoogleAppEngineDefaultServiceAccount(), - "google_beyondcorp_app_connection": DataSourceGoogleBeyondcorpAppConnection(), - "google_beyondcorp_app_connector": DataSourceGoogleBeyondcorpAppConnector(), - "google_beyondcorp_app_gateway": DataSourceGoogleBeyondcorpAppGateway(), - "google_billing_account": DataSourceGoogleBillingAccount(), - "google_bigquery_default_service_account": DataSourceGoogleBigqueryDefaultServiceAccount(), - "google_client_config": DataSourceGoogleClientConfig(), - "google_client_openid_userinfo": DataSourceGoogleClientOpenIDUserinfo(), - "google_cloudbuild_trigger": DataSourceGoogleCloudBuildTrigger(), - "google_cloudfunctions_function": DataSourceGoogleCloudFunctionsFunction(), - "google_cloudfunctions2_function": DataSourceGoogleCloudFunctions2Function(), - "google_cloud_identity_groups": DataSourceGoogleCloudIdentityGroups(), - "google_cloud_identity_group_memberships": DataSourceGoogleCloudIdentityGroupMemberships(), - "google_cloud_run_locations": DataSourceGoogleCloudRunLocations(), - "google_cloud_run_service": DataSourceGoogleCloudRunService(), - "google_composer_environment": DataSourceGoogleComposerEnvironment(), - "google_composer_image_versions": DataSourceGoogleComposerImageVersions(), - "google_compute_address": DataSourceGoogleComputeAddress(), - "google_compute_addresses": DataSourceGoogleComputeAddresses(), - "google_compute_backend_service": DataSourceGoogleComputeBackendService(), - "google_compute_backend_bucket": DataSourceGoogleComputeBackendBucket(), - "google_compute_default_service_account": DataSourceGoogleComputeDefaultServiceAccount(), - "google_compute_disk": DataSourceGoogleComputeDisk(), - "google_compute_forwarding_rule": DataSourceGoogleComputeForwardingRule(), - "google_compute_global_address": DataSourceGoogleComputeGlobalAddress(), - "google_compute_global_forwarding_rule": DataSourceGoogleComputeGlobalForwardingRule(), - "google_compute_ha_vpn_gateway": DataSourceGoogleComputeHaVpnGateway(), - "google_compute_health_check": DataSourceGoogleComputeHealthCheck(), - "google_compute_image": DataSourceGoogleComputeImage(), - "google_compute_instance": DataSourceGoogleComputeInstance(), - "google_compute_instance_group": DataSourceGoogleComputeInstanceGroup(), - "google_compute_instance_group_manager": DataSourceGoogleComputeInstanceGroupManager(), - "google_compute_instance_serial_port": DataSourceGoogleComputeInstanceSerialPort(), - "google_compute_instance_template": DataSourceGoogleComputeInstanceTemplate(), - "google_compute_lb_ip_ranges": DataSourceGoogleComputeLbIpRanges(), - "google_compute_network": DataSourceGoogleComputeNetwork(), - "google_compute_network_endpoint_group": DataSourceGoogleComputeNetworkEndpointGroup(), - "google_compute_network_peering": DataSourceComputeNetworkPeering(), - "google_compute_node_types": DataSourceGoogleComputeNodeTypes(), - "google_compute_regions": DataSourceGoogleComputeRegions(), - "google_compute_region_network_endpoint_group": DataSourceGoogleComputeRegionNetworkEndpointGroup(), - "google_compute_region_instance_group": DataSourceGoogleComputeRegionInstanceGroup(), - "google_compute_region_ssl_certificate": DataSourceGoogleRegionComputeSslCertificate(), - "google_compute_resource_policy": DataSourceGoogleComputeResourcePolicy(), - "google_compute_router": DataSourceGoogleComputeRouter(), - "google_compute_router_nat": DataSourceGoogleComputeRouterNat(), - "google_compute_router_status": DataSourceGoogleComputeRouterStatus(), - "google_compute_snapshot": DataSourceGoogleComputeSnapshot(), - "google_compute_ssl_certificate": DataSourceGoogleComputeSslCertificate(), - "google_compute_ssl_policy": DataSourceGoogleComputeSslPolicy(), - "google_compute_subnetwork": DataSourceGoogleComputeSubnetwork(), - "google_compute_vpn_gateway": DataSourceGoogleComputeVpnGateway(), - "google_compute_zones": DataSourceGoogleComputeZones(), - "google_container_azure_versions": DataSourceGoogleContainerAzureVersions(), - "google_container_aws_versions": DataSourceGoogleContainerAwsVersions(), - "google_container_attached_versions": DataSourceGoogleContainerAttachedVersions(), - "google_container_attached_install_manifest": DataSourceGoogleContainerAttachedInstallManifest(), - "google_container_cluster": DataSourceGoogleContainerCluster(), - "google_container_engine_versions": DataSourceGoogleContainerEngineVersions(), - "google_container_registry_image": DataSourceGoogleContainerImage(), - "google_container_registry_repository": DataSourceGoogleContainerRepo(), - "google_dataproc_metastore_service": DataSourceDataprocMetastoreService(), - "google_dns_keys": DataSourceDNSKeys(), - "google_dns_managed_zone": DataSourceDnsManagedZone(), - "google_dns_record_set": DataSourceDnsRecordSet(), - "google_game_services_game_server_deployment_rollout": DataSourceGameServicesGameServerDeploymentRollout(), - "google_iam_policy": DataSourceGoogleIamPolicy(), - "google_iam_role": DataSourceGoogleIamRole(), - "google_iam_testable_permissions": DataSourceGoogleIamTestablePermissions(), - "google_iap_client": DataSourceGoogleIapClient(), - "google_kms_crypto_key": DataSourceGoogleKmsCryptoKey(), - "google_kms_crypto_key_version": DataSourceGoogleKmsCryptoKeyVersion(), - "google_kms_key_ring": DataSourceGoogleKmsKeyRing(), - "google_kms_secret": DataSourceGoogleKmsSecret(), - "google_kms_secret_ciphertext": DataSourceGoogleKmsSecretCiphertext(), - "google_folder": DataSourceGoogleFolder(), - "google_folders": DataSourceGoogleFolders(), - "google_folder_organization_policy": DataSourceGoogleFolderOrganizationPolicy(), - "google_logging_project_cmek_settings": DataSourceGoogleLoggingProjectCmekSettings(), - "google_logging_sink": DataSourceGoogleLoggingSink(), - "google_monitoring_notification_channel": DataSourceMonitoringNotificationChannel(), - "google_monitoring_cluster_istio_service": DataSourceMonitoringServiceClusterIstio(), - "google_monitoring_istio_canonical_service": DataSourceMonitoringIstioCanonicalService(), - "google_monitoring_mesh_istio_service": DataSourceMonitoringServiceMeshIstio(), - "google_monitoring_app_engine_service": DataSourceMonitoringServiceAppEngine(), - "google_monitoring_uptime_check_ips": DataSourceGoogleMonitoringUptimeCheckIps(), - "google_netblock_ip_ranges": DataSourceGoogleNetblockIpRanges(), - "google_organization": DataSourceGoogleOrganization(), - "google_privateca_certificate_authority": DataSourcePrivatecaCertificateAuthority(), - "google_project": DataSourceGoogleProject(), - "google_projects": DataSourceGoogleProjects(), - "google_project_organization_policy": DataSourceGoogleProjectOrganizationPolicy(), - "google_project_service": DataSourceGoogleProjectService(), - "google_pubsub_subscription": DataSourceGooglePubsubSubscription(), - "google_pubsub_topic": DataSourceGooglePubsubTopic(), - "google_secret_manager_secret": DataSourceSecretManagerSecret(), - "google_secret_manager_secret_version": DataSourceSecretManagerSecretVersion(), - "google_secret_manager_secret_version_access": DataSourceSecretManagerSecretVersionAccess(), - "google_service_account": DataSourceGoogleServiceAccount(), - "google_service_account_access_token": DataSourceGoogleServiceAccountAccessToken(), - "google_service_account_id_token": DataSourceGoogleServiceAccountIdToken(), - "google_service_account_jwt": DataSourceGoogleServiceAccountJwt(), - "google_service_account_key": DataSourceGoogleServiceAccountKey(), - "google_sourcerepo_repository": DataSourceGoogleSourceRepoRepository(), - "google_spanner_instance": DataSourceSpannerInstance(), - "google_sql_ca_certs": DataSourceGoogleSQLCaCerts(), - "google_sql_backup_run": DataSourceSqlBackupRun(), - "google_sql_databases": DataSourceSqlDatabases(), - "google_sql_database": DataSourceSqlDatabase(), - "google_sql_database_instance": DataSourceSqlDatabaseInstance(), - "google_sql_database_instances": DataSourceSqlDatabaseInstances(), - "google_service_networking_peered_dns_domain": DataSourceGoogleServiceNetworkingPeeredDNSDomain(), - "google_storage_bucket": DataSourceGoogleStorageBucket(), - "google_storage_bucket_object": DataSourceGoogleStorageBucketObject(), - "google_storage_bucket_object_content": DataSourceGoogleStorageBucketObjectContent(), - "google_storage_object_signed_url": DataSourceGoogleSignedUrl(), - "google_storage_project_service_account": DataSourceGoogleStorageProjectServiceAccount(), - "google_storage_transfer_project_service_account": DataSourceGoogleStorageTransferProjectServiceAccount(), - "google_tags_tag_key": DataSourceGoogleTagsTagKey(), - "google_tags_tag_value": DataSourceGoogleTagsTagValue(), - "google_tpu_tensorflow_versions": DataSourceTpuTensorflowVersions(), - "google_vpc_access_connector": DataSourceVPCAccessConnector(), - "google_redis_instance": DataSourceGoogleRedisInstance(), - // ####### END datasources ########### - }, - ResourcesMap: ResourceMap(), - } - - provider.ConfigureContextFunc = func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { - return providerConfigure(ctx, d, provider) - } - - ConfigureDCLProvider(provider) - - return provider -} - -// Generated resources: 274 -// Generated IAM resources: 186 -// Total generated resources: 460 -func ResourceMap() map[string]*schema.Resource { - resourceMap, _ := ResourceMapWithErrors() - return resourceMap -} - -func ResourceMapWithErrors() (map[string]*schema.Resource, error) { - return mergeResourceMaps( - map[string]*schema.Resource{ - "google_folder_access_approval_settings": ResourceAccessApprovalFolderSettings(), - "google_organization_access_approval_settings": ResourceAccessApprovalOrganizationSettings(), - "google_project_access_approval_settings": ResourceAccessApprovalProjectSettings(), - "google_access_context_manager_access_level": ResourceAccessContextManagerAccessLevel(), - "google_access_context_manager_access_level_condition": ResourceAccessContextManagerAccessLevelCondition(), - "google_access_context_manager_access_levels": ResourceAccessContextManagerAccessLevels(), - "google_access_context_manager_access_policy": ResourceAccessContextManagerAccessPolicy(), - "google_access_context_manager_access_policy_iam_binding": ResourceIamBinding(AccessContextManagerAccessPolicyIamSchema, AccessContextManagerAccessPolicyIamUpdaterProducer, AccessContextManagerAccessPolicyIdParseFunc), - "google_access_context_manager_access_policy_iam_member": ResourceIamMember(AccessContextManagerAccessPolicyIamSchema, AccessContextManagerAccessPolicyIamUpdaterProducer, AccessContextManagerAccessPolicyIdParseFunc), - "google_access_context_manager_access_policy_iam_policy": ResourceIamPolicy(AccessContextManagerAccessPolicyIamSchema, AccessContextManagerAccessPolicyIamUpdaterProducer, AccessContextManagerAccessPolicyIdParseFunc), - "google_access_context_manager_authorized_orgs_desc": ResourceAccessContextManagerAuthorizedOrgsDesc(), - "google_access_context_manager_gcp_user_access_binding": ResourceAccessContextManagerGcpUserAccessBinding(), - "google_access_context_manager_service_perimeter": ResourceAccessContextManagerServicePerimeter(), - "google_access_context_manager_service_perimeter_resource": ResourceAccessContextManagerServicePerimeterResource(), - "google_access_context_manager_service_perimeters": ResourceAccessContextManagerServicePerimeters(), - "google_active_directory_domain": ResourceActiveDirectoryDomain(), - "google_active_directory_domain_trust": ResourceActiveDirectoryDomainTrust(), - "google_alloydb_backup": ResourceAlloydbBackup(), - "google_alloydb_cluster": ResourceAlloydbCluster(), - "google_alloydb_instance": ResourceAlloydbInstance(), - "google_apigee_addons_config": ResourceApigeeAddonsConfig(), - "google_apigee_endpoint_attachment": ResourceApigeeEndpointAttachment(), - "google_apigee_env_keystore": ResourceApigeeEnvKeystore(), - "google_apigee_env_references": ResourceApigeeEnvReferences(), - "google_apigee_envgroup": ResourceApigeeEnvgroup(), - "google_apigee_envgroup_attachment": ResourceApigeeEnvgroupAttachment(), - "google_apigee_environment": ResourceApigeeEnvironment(), - "google_apigee_environment_iam_binding": ResourceIamBinding(ApigeeEnvironmentIamSchema, ApigeeEnvironmentIamUpdaterProducer, ApigeeEnvironmentIdParseFunc), - "google_apigee_environment_iam_member": ResourceIamMember(ApigeeEnvironmentIamSchema, ApigeeEnvironmentIamUpdaterProducer, ApigeeEnvironmentIdParseFunc), - "google_apigee_environment_iam_policy": ResourceIamPolicy(ApigeeEnvironmentIamSchema, ApigeeEnvironmentIamUpdaterProducer, ApigeeEnvironmentIdParseFunc), - "google_apigee_instance": ResourceApigeeInstance(), - "google_apigee_instance_attachment": ResourceApigeeInstanceAttachment(), - "google_apigee_nat_address": ResourceApigeeNatAddress(), - "google_apigee_organization": ResourceApigeeOrganization(), - "google_apigee_sync_authorization": ResourceApigeeSyncAuthorization(), - "google_app_engine_application_url_dispatch_rules": ResourceAppEngineApplicationUrlDispatchRules(), - "google_app_engine_domain_mapping": ResourceAppEngineDomainMapping(), - "google_app_engine_firewall_rule": ResourceAppEngineFirewallRule(), - "google_app_engine_flexible_app_version": ResourceAppEngineFlexibleAppVersion(), - "google_app_engine_service_network_settings": ResourceAppEngineServiceNetworkSettings(), - "google_app_engine_service_split_traffic": ResourceAppEngineServiceSplitTraffic(), - "google_app_engine_standard_app_version": ResourceAppEngineStandardAppVersion(), - "google_artifact_registry_repository": ResourceArtifactRegistryRepository(), - "google_artifact_registry_repository_iam_binding": ResourceIamBinding(ArtifactRegistryRepositoryIamSchema, ArtifactRegistryRepositoryIamUpdaterProducer, ArtifactRegistryRepositoryIdParseFunc), - "google_artifact_registry_repository_iam_member": ResourceIamMember(ArtifactRegistryRepositoryIamSchema, ArtifactRegistryRepositoryIamUpdaterProducer, ArtifactRegistryRepositoryIdParseFunc), - "google_artifact_registry_repository_iam_policy": ResourceIamPolicy(ArtifactRegistryRepositoryIamSchema, ArtifactRegistryRepositoryIamUpdaterProducer, ArtifactRegistryRepositoryIdParseFunc), - "google_beyondcorp_app_connection": ResourceBeyondcorpAppConnection(), - "google_beyondcorp_app_connector": ResourceBeyondcorpAppConnector(), - "google_beyondcorp_app_gateway": ResourceBeyondcorpAppGateway(), - "google_bigquery_dataset": ResourceBigQueryDataset(), - "google_bigquery_dataset_access": ResourceBigQueryDatasetAccess(), - "google_bigquery_job": ResourceBigQueryJob(), - "google_bigquery_routine": ResourceBigQueryRoutine(), - "google_bigquery_table_iam_binding": ResourceIamBinding(BigQueryTableIamSchema, BigQueryTableIamUpdaterProducer, BigQueryTableIdParseFunc), - "google_bigquery_table_iam_member": ResourceIamMember(BigQueryTableIamSchema, BigQueryTableIamUpdaterProducer, BigQueryTableIdParseFunc), - "google_bigquery_table_iam_policy": ResourceIamPolicy(BigQueryTableIamSchema, BigQueryTableIamUpdaterProducer, BigQueryTableIdParseFunc), - "google_bigquery_analytics_hub_data_exchange": ResourceBigqueryAnalyticsHubDataExchange(), - "google_bigquery_analytics_hub_data_exchange_iam_binding": ResourceIamBinding(BigqueryAnalyticsHubDataExchangeIamSchema, BigqueryAnalyticsHubDataExchangeIamUpdaterProducer, BigqueryAnalyticsHubDataExchangeIdParseFunc), - "google_bigquery_analytics_hub_data_exchange_iam_member": ResourceIamMember(BigqueryAnalyticsHubDataExchangeIamSchema, BigqueryAnalyticsHubDataExchangeIamUpdaterProducer, BigqueryAnalyticsHubDataExchangeIdParseFunc), - "google_bigquery_analytics_hub_data_exchange_iam_policy": ResourceIamPolicy(BigqueryAnalyticsHubDataExchangeIamSchema, BigqueryAnalyticsHubDataExchangeIamUpdaterProducer, BigqueryAnalyticsHubDataExchangeIdParseFunc), - "google_bigquery_analytics_hub_listing": ResourceBigqueryAnalyticsHubListing(), - "google_bigquery_analytics_hub_listing_iam_binding": ResourceIamBinding(BigqueryAnalyticsHubListingIamSchema, BigqueryAnalyticsHubListingIamUpdaterProducer, BigqueryAnalyticsHubListingIdParseFunc), - "google_bigquery_analytics_hub_listing_iam_member": ResourceIamMember(BigqueryAnalyticsHubListingIamSchema, BigqueryAnalyticsHubListingIamUpdaterProducer, BigqueryAnalyticsHubListingIdParseFunc), - "google_bigquery_analytics_hub_listing_iam_policy": ResourceIamPolicy(BigqueryAnalyticsHubListingIamSchema, BigqueryAnalyticsHubListingIamUpdaterProducer, BigqueryAnalyticsHubListingIdParseFunc), - "google_bigquery_connection": ResourceBigqueryConnectionConnection(), - "google_bigquery_connection_iam_binding": ResourceIamBinding(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), - "google_bigquery_connection_iam_member": ResourceIamMember(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), - "google_bigquery_connection_iam_policy": ResourceIamPolicy(BigqueryConnectionConnectionIamSchema, BigqueryConnectionConnectionIamUpdaterProducer, BigqueryConnectionConnectionIdParseFunc), - "google_bigquery_datapolicy_data_policy": ResourceBigqueryDatapolicyDataPolicy(), - "google_bigquery_datapolicy_data_policy_iam_binding": ResourceIamBinding(BigqueryDatapolicyDataPolicyIamSchema, BigqueryDatapolicyDataPolicyIamUpdaterProducer, BigqueryDatapolicyDataPolicyIdParseFunc), - "google_bigquery_datapolicy_data_policy_iam_member": ResourceIamMember(BigqueryDatapolicyDataPolicyIamSchema, BigqueryDatapolicyDataPolicyIamUpdaterProducer, BigqueryDatapolicyDataPolicyIdParseFunc), - "google_bigquery_datapolicy_data_policy_iam_policy": ResourceIamPolicy(BigqueryDatapolicyDataPolicyIamSchema, BigqueryDatapolicyDataPolicyIamUpdaterProducer, BigqueryDatapolicyDataPolicyIdParseFunc), - "google_bigquery_data_transfer_config": ResourceBigqueryDataTransferConfig(), - "google_bigquery_capacity_commitment": ResourceBigqueryReservationCapacityCommitment(), - "google_bigquery_reservation": ResourceBigqueryReservationReservation(), - "google_bigtable_app_profile": ResourceBigtableAppProfile(), - "google_billing_budget": ResourceBillingBudget(), - "google_binary_authorization_attestor": ResourceBinaryAuthorizationAttestor(), - "google_binary_authorization_attestor_iam_binding": ResourceIamBinding(BinaryAuthorizationAttestorIamSchema, BinaryAuthorizationAttestorIamUpdaterProducer, BinaryAuthorizationAttestorIdParseFunc), - "google_binary_authorization_attestor_iam_member": ResourceIamMember(BinaryAuthorizationAttestorIamSchema, BinaryAuthorizationAttestorIamUpdaterProducer, BinaryAuthorizationAttestorIdParseFunc), - "google_binary_authorization_attestor_iam_policy": ResourceIamPolicy(BinaryAuthorizationAttestorIamSchema, BinaryAuthorizationAttestorIamUpdaterProducer, BinaryAuthorizationAttestorIdParseFunc), - "google_binary_authorization_policy": ResourceBinaryAuthorizationPolicy(), - "google_certificate_manager_certificate": ResourceCertificateManagerCertificate(), - "google_certificate_manager_certificate_map": ResourceCertificateManagerCertificateMap(), - "google_certificate_manager_certificate_map_entry": ResourceCertificateManagerCertificateMapEntry(), - "google_certificate_manager_dns_authorization": ResourceCertificateManagerDnsAuthorization(), - "google_cloud_asset_folder_feed": ResourceCloudAssetFolderFeed(), - "google_cloud_asset_organization_feed": ResourceCloudAssetOrganizationFeed(), - "google_cloud_asset_project_feed": ResourceCloudAssetProjectFeed(), - "google_cloudbuild_bitbucket_server_config": ResourceCloudBuildBitbucketServerConfig(), - "google_cloudbuild_trigger": ResourceCloudBuildTrigger(), - "google_cloudfunctions_function_iam_binding": ResourceIamBinding(CloudFunctionsCloudFunctionIamSchema, CloudFunctionsCloudFunctionIamUpdaterProducer, CloudFunctionsCloudFunctionIdParseFunc), - "google_cloudfunctions_function_iam_member": ResourceIamMember(CloudFunctionsCloudFunctionIamSchema, CloudFunctionsCloudFunctionIamUpdaterProducer, CloudFunctionsCloudFunctionIdParseFunc), - "google_cloudfunctions_function_iam_policy": ResourceIamPolicy(CloudFunctionsCloudFunctionIamSchema, CloudFunctionsCloudFunctionIamUpdaterProducer, CloudFunctionsCloudFunctionIdParseFunc), - "google_cloudfunctions2_function": ResourceCloudfunctions2function(), - "google_cloudfunctions2_function_iam_binding": ResourceIamBinding(Cloudfunctions2functionIamSchema, Cloudfunctions2functionIamUpdaterProducer, Cloudfunctions2functionIdParseFunc), - "google_cloudfunctions2_function_iam_member": ResourceIamMember(Cloudfunctions2functionIamSchema, Cloudfunctions2functionIamUpdaterProducer, Cloudfunctions2functionIdParseFunc), - "google_cloudfunctions2_function_iam_policy": ResourceIamPolicy(Cloudfunctions2functionIamSchema, Cloudfunctions2functionIamUpdaterProducer, Cloudfunctions2functionIdParseFunc), - "google_cloud_identity_group": ResourceCloudIdentityGroup(), - "google_cloud_identity_group_membership": ResourceCloudIdentityGroupMembership(), - "google_cloud_ids_endpoint": ResourceCloudIdsEndpoint(), - "google_cloudiot_device": ResourceCloudIotDevice(), - "google_cloudiot_registry": ResourceCloudIotDeviceRegistry(), - "google_cloudiot_registry_iam_binding": ResourceIamBinding(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), - "google_cloudiot_registry_iam_member": ResourceIamMember(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), - "google_cloudiot_registry_iam_policy": ResourceIamPolicy(CloudIotDeviceRegistryIamSchema, CloudIotDeviceRegistryIamUpdaterProducer, CloudIotDeviceRegistryIdParseFunc), - "google_cloud_run_domain_mapping": ResourceCloudRunDomainMapping(), - "google_cloud_run_service": ResourceCloudRunService(), - "google_cloud_run_service_iam_binding": ResourceIamBinding(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), - "google_cloud_run_service_iam_member": ResourceIamMember(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), - "google_cloud_run_service_iam_policy": ResourceIamPolicy(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), - "google_cloud_run_v2_job": ResourceCloudRunV2Job(), - "google_cloud_run_v2_job_iam_binding": ResourceIamBinding(CloudRunV2JobIamSchema, CloudRunV2JobIamUpdaterProducer, CloudRunV2JobIdParseFunc), - "google_cloud_run_v2_job_iam_member": ResourceIamMember(CloudRunV2JobIamSchema, CloudRunV2JobIamUpdaterProducer, CloudRunV2JobIdParseFunc), - "google_cloud_run_v2_job_iam_policy": ResourceIamPolicy(CloudRunV2JobIamSchema, CloudRunV2JobIamUpdaterProducer, CloudRunV2JobIdParseFunc), - "google_cloud_run_v2_service": ResourceCloudRunV2Service(), - "google_cloud_run_v2_service_iam_binding": ResourceIamBinding(CloudRunV2ServiceIamSchema, CloudRunV2ServiceIamUpdaterProducer, CloudRunV2ServiceIdParseFunc), - "google_cloud_run_v2_service_iam_member": ResourceIamMember(CloudRunV2ServiceIamSchema, CloudRunV2ServiceIamUpdaterProducer, CloudRunV2ServiceIdParseFunc), - "google_cloud_run_v2_service_iam_policy": ResourceIamPolicy(CloudRunV2ServiceIamSchema, CloudRunV2ServiceIamUpdaterProducer, CloudRunV2ServiceIdParseFunc), - "google_cloud_scheduler_job": ResourceCloudSchedulerJob(), - "google_cloud_tasks_queue": ResourceCloudTasksQueue(), - "google_cloud_tasks_queue_iam_binding": ResourceIamBinding(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), - "google_cloud_tasks_queue_iam_member": ResourceIamMember(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), - "google_cloud_tasks_queue_iam_policy": ResourceIamPolicy(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), - "google_compute_address": ResourceComputeAddress(), - "google_compute_autoscaler": ResourceComputeAutoscaler(), - "google_compute_backend_bucket": ResourceComputeBackendBucket(), - "google_compute_backend_bucket_signed_url_key": ResourceComputeBackendBucketSignedUrlKey(), - "google_compute_backend_service": ResourceComputeBackendService(), - "google_compute_backend_service_signed_url_key": ResourceComputeBackendServiceSignedUrlKey(), - "google_compute_disk": ResourceComputeDisk(), - "google_compute_disk_iam_binding": ResourceIamBinding(ComputeDiskIamSchema, ComputeDiskIamUpdaterProducer, ComputeDiskIdParseFunc), - "google_compute_disk_iam_member": ResourceIamMember(ComputeDiskIamSchema, ComputeDiskIamUpdaterProducer, ComputeDiskIdParseFunc), - "google_compute_disk_iam_policy": ResourceIamPolicy(ComputeDiskIamSchema, ComputeDiskIamUpdaterProducer, ComputeDiskIdParseFunc), - "google_compute_disk_resource_policy_attachment": ResourceComputeDiskResourcePolicyAttachment(), - "google_compute_external_vpn_gateway": ResourceComputeExternalVpnGateway(), - "google_compute_firewall": ResourceComputeFirewall(), - "google_compute_forwarding_rule": ResourceComputeForwardingRule(), - "google_compute_global_address": ResourceComputeGlobalAddress(), - "google_compute_global_forwarding_rule": ResourceComputeGlobalForwardingRule(), - "google_compute_global_network_endpoint": ResourceComputeGlobalNetworkEndpoint(), - "google_compute_global_network_endpoint_group": ResourceComputeGlobalNetworkEndpointGroup(), - "google_compute_ha_vpn_gateway": ResourceComputeHaVpnGateway(), - "google_compute_health_check": ResourceComputeHealthCheck(), - "google_compute_http_health_check": ResourceComputeHttpHealthCheck(), - "google_compute_https_health_check": ResourceComputeHttpsHealthCheck(), - "google_compute_image": ResourceComputeImage(), - "google_compute_image_iam_binding": ResourceIamBinding(ComputeImageIamSchema, ComputeImageIamUpdaterProducer, ComputeImageIdParseFunc), - "google_compute_image_iam_member": ResourceIamMember(ComputeImageIamSchema, ComputeImageIamUpdaterProducer, ComputeImageIdParseFunc), - "google_compute_image_iam_policy": ResourceIamPolicy(ComputeImageIamSchema, ComputeImageIamUpdaterProducer, ComputeImageIdParseFunc), - "google_compute_instance_iam_binding": ResourceIamBinding(ComputeInstanceIamSchema, ComputeInstanceIamUpdaterProducer, ComputeInstanceIdParseFunc), - "google_compute_instance_iam_member": ResourceIamMember(ComputeInstanceIamSchema, ComputeInstanceIamUpdaterProducer, ComputeInstanceIdParseFunc), - "google_compute_instance_iam_policy": ResourceIamPolicy(ComputeInstanceIamSchema, ComputeInstanceIamUpdaterProducer, ComputeInstanceIdParseFunc), - "google_compute_instance_group_named_port": ResourceComputeInstanceGroupNamedPort(), - "google_compute_interconnect_attachment": ResourceComputeInterconnectAttachment(), - "google_compute_managed_ssl_certificate": ResourceComputeManagedSslCertificate(), - "google_compute_network": ResourceComputeNetwork(), - "google_compute_network_endpoint": ResourceComputeNetworkEndpoint(), - "google_compute_network_endpoint_group": ResourceComputeNetworkEndpointGroup(), - "google_compute_network_peering_routes_config": ResourceComputeNetworkPeeringRoutesConfig(), - "google_compute_node_group": ResourceComputeNodeGroup(), - "google_compute_node_template": ResourceComputeNodeTemplate(), - "google_compute_packet_mirroring": ResourceComputePacketMirroring(), - "google_compute_per_instance_config": ResourceComputePerInstanceConfig(), - "google_compute_region_autoscaler": ResourceComputeRegionAutoscaler(), - "google_compute_region_backend_service": ResourceComputeRegionBackendService(), - "google_compute_region_disk": ResourceComputeRegionDisk(), - "google_compute_region_disk_iam_binding": ResourceIamBinding(ComputeRegionDiskIamSchema, ComputeRegionDiskIamUpdaterProducer, ComputeRegionDiskIdParseFunc), - "google_compute_region_disk_iam_member": ResourceIamMember(ComputeRegionDiskIamSchema, ComputeRegionDiskIamUpdaterProducer, ComputeRegionDiskIdParseFunc), - "google_compute_region_disk_iam_policy": ResourceIamPolicy(ComputeRegionDiskIamSchema, ComputeRegionDiskIamUpdaterProducer, ComputeRegionDiskIdParseFunc), - "google_compute_region_disk_resource_policy_attachment": ResourceComputeRegionDiskResourcePolicyAttachment(), - "google_compute_region_health_check": ResourceComputeRegionHealthCheck(), - "google_compute_region_network_endpoint_group": ResourceComputeRegionNetworkEndpointGroup(), - "google_compute_region_per_instance_config": ResourceComputeRegionPerInstanceConfig(), - "google_compute_region_ssl_certificate": ResourceComputeRegionSslCertificate(), - "google_compute_region_target_http_proxy": ResourceComputeRegionTargetHttpProxy(), - "google_compute_region_target_https_proxy": ResourceComputeRegionTargetHttpsProxy(), - "google_compute_region_target_tcp_proxy": ResourceComputeRegionTargetTcpProxy(), - "google_compute_region_url_map": ResourceComputeRegionUrlMap(), - "google_compute_reservation": ResourceComputeReservation(), - "google_compute_resource_policy": ResourceComputeResourcePolicy(), - "google_compute_route": ResourceComputeRoute(), - "google_compute_router": ResourceComputeRouter(), - "google_compute_router_peer": ResourceComputeRouterBgpPeer(), - "google_compute_router_nat": ResourceComputeRouterNat(), - "google_compute_service_attachment": ResourceComputeServiceAttachment(), - "google_compute_snapshot": ResourceComputeSnapshot(), - "google_compute_snapshot_iam_binding": ResourceIamBinding(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), - "google_compute_snapshot_iam_member": ResourceIamMember(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), - "google_compute_snapshot_iam_policy": ResourceIamPolicy(ComputeSnapshotIamSchema, ComputeSnapshotIamUpdaterProducer, ComputeSnapshotIdParseFunc), - "google_compute_ssl_certificate": ResourceComputeSslCertificate(), - "google_compute_ssl_policy": ResourceComputeSslPolicy(), - "google_compute_subnetwork": ResourceComputeSubnetwork(), - "google_compute_subnetwork_iam_binding": ResourceIamBinding(ComputeSubnetworkIamSchema, ComputeSubnetworkIamUpdaterProducer, ComputeSubnetworkIdParseFunc), - "google_compute_subnetwork_iam_member": ResourceIamMember(ComputeSubnetworkIamSchema, ComputeSubnetworkIamUpdaterProducer, ComputeSubnetworkIdParseFunc), - "google_compute_subnetwork_iam_policy": ResourceIamPolicy(ComputeSubnetworkIamSchema, ComputeSubnetworkIamUpdaterProducer, ComputeSubnetworkIdParseFunc), - "google_compute_target_grpc_proxy": ResourceComputeTargetGrpcProxy(), - "google_compute_target_http_proxy": ResourceComputeTargetHttpProxy(), - "google_compute_target_https_proxy": ResourceComputeTargetHttpsProxy(), - "google_compute_target_instance": ResourceComputeTargetInstance(), - "google_compute_target_ssl_proxy": ResourceComputeTargetSslProxy(), - "google_compute_target_tcp_proxy": ResourceComputeTargetTcpProxy(), - "google_compute_url_map": ResourceComputeUrlMap(), - "google_compute_vpn_gateway": ResourceComputeVpnGateway(), - "google_compute_vpn_tunnel": ResourceComputeVpnTunnel(), - "google_container_analysis_note": ResourceContainerAnalysisNote(), - "google_container_analysis_occurrence": ResourceContainerAnalysisOccurrence(), - "google_container_attached_cluster": ResourceContainerAttachedCluster(), - "google_data_catalog_entry": ResourceDataCatalogEntry(), - "google_data_catalog_entry_group": ResourceDataCatalogEntryGroup(), - "google_data_catalog_entry_group_iam_binding": ResourceIamBinding(DataCatalogEntryGroupIamSchema, DataCatalogEntryGroupIamUpdaterProducer, DataCatalogEntryGroupIdParseFunc), - "google_data_catalog_entry_group_iam_member": ResourceIamMember(DataCatalogEntryGroupIamSchema, DataCatalogEntryGroupIamUpdaterProducer, DataCatalogEntryGroupIdParseFunc), - "google_data_catalog_entry_group_iam_policy": ResourceIamPolicy(DataCatalogEntryGroupIamSchema, DataCatalogEntryGroupIamUpdaterProducer, DataCatalogEntryGroupIdParseFunc), - "google_data_catalog_policy_tag": ResourceDataCatalogPolicyTag(), - "google_data_catalog_policy_tag_iam_binding": ResourceIamBinding(DataCatalogPolicyTagIamSchema, DataCatalogPolicyTagIamUpdaterProducer, DataCatalogPolicyTagIdParseFunc), - "google_data_catalog_policy_tag_iam_member": ResourceIamMember(DataCatalogPolicyTagIamSchema, DataCatalogPolicyTagIamUpdaterProducer, DataCatalogPolicyTagIdParseFunc), - "google_data_catalog_policy_tag_iam_policy": ResourceIamPolicy(DataCatalogPolicyTagIamSchema, DataCatalogPolicyTagIamUpdaterProducer, DataCatalogPolicyTagIdParseFunc), - "google_data_catalog_tag": ResourceDataCatalogTag(), - "google_data_catalog_tag_template": ResourceDataCatalogTagTemplate(), - "google_data_catalog_tag_template_iam_binding": ResourceIamBinding(DataCatalogTagTemplateIamSchema, DataCatalogTagTemplateIamUpdaterProducer, DataCatalogTagTemplateIdParseFunc), - "google_data_catalog_tag_template_iam_member": ResourceIamMember(DataCatalogTagTemplateIamSchema, DataCatalogTagTemplateIamUpdaterProducer, DataCatalogTagTemplateIdParseFunc), - "google_data_catalog_tag_template_iam_policy": ResourceIamPolicy(DataCatalogTagTemplateIamSchema, DataCatalogTagTemplateIamUpdaterProducer, DataCatalogTagTemplateIdParseFunc), - "google_data_catalog_taxonomy": ResourceDataCatalogTaxonomy(), - "google_data_catalog_taxonomy_iam_binding": ResourceIamBinding(DataCatalogTaxonomyIamSchema, DataCatalogTaxonomyIamUpdaterProducer, DataCatalogTaxonomyIdParseFunc), - "google_data_catalog_taxonomy_iam_member": ResourceIamMember(DataCatalogTaxonomyIamSchema, DataCatalogTaxonomyIamUpdaterProducer, DataCatalogTaxonomyIdParseFunc), - "google_data_catalog_taxonomy_iam_policy": ResourceIamPolicy(DataCatalogTaxonomyIamSchema, DataCatalogTaxonomyIamUpdaterProducer, DataCatalogTaxonomyIdParseFunc), - "google_data_fusion_instance": ResourceDataFusionInstance(), - "google_data_fusion_instance_iam_binding": ResourceIamBinding(DataFusionInstanceIamSchema, DataFusionInstanceIamUpdaterProducer, DataFusionInstanceIdParseFunc), - "google_data_fusion_instance_iam_member": ResourceIamMember(DataFusionInstanceIamSchema, DataFusionInstanceIamUpdaterProducer, DataFusionInstanceIdParseFunc), - "google_data_fusion_instance_iam_policy": ResourceIamPolicy(DataFusionInstanceIamSchema, DataFusionInstanceIamUpdaterProducer, DataFusionInstanceIdParseFunc), - "google_data_loss_prevention_deidentify_template": ResourceDataLossPreventionDeidentifyTemplate(), - "google_data_loss_prevention_inspect_template": ResourceDataLossPreventionInspectTemplate(), - "google_data_loss_prevention_job_trigger": ResourceDataLossPreventionJobTrigger(), - "google_data_loss_prevention_stored_info_type": ResourceDataLossPreventionStoredInfoType(), - "google_dataplex_asset_iam_binding": ResourceIamBinding(DataplexAssetIamSchema, DataplexAssetIamUpdaterProducer, DataplexAssetIdParseFunc), - "google_dataplex_asset_iam_member": ResourceIamMember(DataplexAssetIamSchema, DataplexAssetIamUpdaterProducer, DataplexAssetIdParseFunc), - "google_dataplex_asset_iam_policy": ResourceIamPolicy(DataplexAssetIamSchema, DataplexAssetIamUpdaterProducer, DataplexAssetIdParseFunc), - "google_dataplex_lake_iam_binding": ResourceIamBinding(DataplexLakeIamSchema, DataplexLakeIamUpdaterProducer, DataplexLakeIdParseFunc), - "google_dataplex_lake_iam_member": ResourceIamMember(DataplexLakeIamSchema, DataplexLakeIamUpdaterProducer, DataplexLakeIdParseFunc), - "google_dataplex_lake_iam_policy": ResourceIamPolicy(DataplexLakeIamSchema, DataplexLakeIamUpdaterProducer, DataplexLakeIdParseFunc), - "google_dataplex_zone_iam_binding": ResourceIamBinding(DataplexZoneIamSchema, DataplexZoneIamUpdaterProducer, DataplexZoneIdParseFunc), - "google_dataplex_zone_iam_member": ResourceIamMember(DataplexZoneIamSchema, DataplexZoneIamUpdaterProducer, DataplexZoneIdParseFunc), - "google_dataplex_zone_iam_policy": ResourceIamPolicy(DataplexZoneIamSchema, DataplexZoneIamUpdaterProducer, DataplexZoneIdParseFunc), - "google_dataproc_autoscaling_policy": ResourceDataprocAutoscalingPolicy(), - "google_dataproc_autoscaling_policy_iam_binding": ResourceIamBinding(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), - "google_dataproc_autoscaling_policy_iam_member": ResourceIamMember(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), - "google_dataproc_autoscaling_policy_iam_policy": ResourceIamPolicy(DataprocAutoscalingPolicyIamSchema, DataprocAutoscalingPolicyIamUpdaterProducer, DataprocAutoscalingPolicyIdParseFunc), - "google_dataproc_metastore_service": ResourceDataprocMetastoreService(), - "google_dataproc_metastore_service_iam_binding": ResourceIamBinding(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), - "google_dataproc_metastore_service_iam_member": ResourceIamMember(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), - "google_dataproc_metastore_service_iam_policy": ResourceIamPolicy(DataprocMetastoreServiceIamSchema, DataprocMetastoreServiceIamUpdaterProducer, DataprocMetastoreServiceIdParseFunc), - "google_datastore_index": ResourceDatastoreIndex(), - "google_datastream_connection_profile": ResourceDatastreamConnectionProfile(), - "google_datastream_private_connection": ResourceDatastreamPrivateConnection(), - "google_datastream_stream": ResourceDatastreamStream(), - "google_deployment_manager_deployment": ResourceDeploymentManagerDeployment(), - "google_dialogflow_agent": ResourceDialogflowAgent(), - "google_dialogflow_entity_type": ResourceDialogflowEntityType(), - "google_dialogflow_fulfillment": ResourceDialogflowFulfillment(), - "google_dialogflow_intent": ResourceDialogflowIntent(), - "google_dialogflow_cx_agent": ResourceDialogflowCXAgent(), - "google_dialogflow_cx_entity_type": ResourceDialogflowCXEntityType(), - "google_dialogflow_cx_flow": ResourceDialogflowCXFlow(), - "google_dialogflow_cx_intent": ResourceDialogflowCXIntent(), - "google_dialogflow_cx_page": ResourceDialogflowCXPage(), - "google_dialogflow_cx_webhook": ResourceDialogflowCXWebhook(), - "google_dns_managed_zone": ResourceDNSManagedZone(), - "google_dns_managed_zone_iam_binding": ResourceIamBinding(DNSManagedZoneIamSchema, DNSManagedZoneIamUpdaterProducer, DNSManagedZoneIdParseFunc), - "google_dns_managed_zone_iam_member": ResourceIamMember(DNSManagedZoneIamSchema, DNSManagedZoneIamUpdaterProducer, DNSManagedZoneIdParseFunc), - "google_dns_managed_zone_iam_policy": ResourceIamPolicy(DNSManagedZoneIamSchema, DNSManagedZoneIamUpdaterProducer, DNSManagedZoneIdParseFunc), - "google_dns_policy": ResourceDNSPolicy(), - "google_document_ai_processor": ResourceDocumentAIProcessor(), - "google_document_ai_processor_default_version": ResourceDocumentAIProcessorDefaultVersion(), - "google_essential_contacts_contact": ResourceEssentialContactsContact(), - "google_filestore_backup": ResourceFilestoreBackup(), - "google_filestore_instance": ResourceFilestoreInstance(), - "google_filestore_snapshot": ResourceFilestoreSnapshot(), - "google_firestore_database": ResourceFirestoreDatabase(), - "google_firestore_document": ResourceFirestoreDocument(), - "google_firestore_index": ResourceFirestoreIndex(), - "google_game_services_game_server_cluster": ResourceGameServicesGameServerCluster(), - "google_game_services_game_server_config": ResourceGameServicesGameServerConfig(), - "google_game_services_game_server_deployment": ResourceGameServicesGameServerDeployment(), - "google_game_services_game_server_deployment_rollout": ResourceGameServicesGameServerDeploymentRollout(), - "google_game_services_realm": ResourceGameServicesRealm(), - "google_gke_backup_backup_plan": ResourceGKEBackupBackupPlan(), - "google_gke_backup_backup_plan_iam_binding": ResourceIamBinding(GKEBackupBackupPlanIamSchema, GKEBackupBackupPlanIamUpdaterProducer, GKEBackupBackupPlanIdParseFunc), - "google_gke_backup_backup_plan_iam_member": ResourceIamMember(GKEBackupBackupPlanIamSchema, GKEBackupBackupPlanIamUpdaterProducer, GKEBackupBackupPlanIdParseFunc), - "google_gke_backup_backup_plan_iam_policy": ResourceIamPolicy(GKEBackupBackupPlanIamSchema, GKEBackupBackupPlanIamUpdaterProducer, GKEBackupBackupPlanIdParseFunc), - "google_gke_hub_membership": ResourceGKEHubMembership(), - "google_gke_hub_membership_iam_binding": ResourceIamBinding(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), - "google_gke_hub_membership_iam_member": ResourceIamMember(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), - "google_gke_hub_membership_iam_policy": ResourceIamPolicy(GKEHubMembershipIamSchema, GKEHubMembershipIamUpdaterProducer, GKEHubMembershipIdParseFunc), - "google_healthcare_consent_store": ResourceHealthcareConsentStore(), - "google_healthcare_consent_store_iam_binding": ResourceIamBinding(HealthcareConsentStoreIamSchema, HealthcareConsentStoreIamUpdaterProducer, HealthcareConsentStoreIdParseFunc), - "google_healthcare_consent_store_iam_member": ResourceIamMember(HealthcareConsentStoreIamSchema, HealthcareConsentStoreIamUpdaterProducer, HealthcareConsentStoreIdParseFunc), - "google_healthcare_consent_store_iam_policy": ResourceIamPolicy(HealthcareConsentStoreIamSchema, HealthcareConsentStoreIamUpdaterProducer, HealthcareConsentStoreIdParseFunc), - "google_healthcare_dataset": ResourceHealthcareDataset(), - "google_healthcare_dicom_store": ResourceHealthcareDicomStore(), - "google_healthcare_fhir_store": ResourceHealthcareFhirStore(), - "google_healthcare_hl7_v2_store": ResourceHealthcareHl7V2Store(), - "google_iam_access_boundary_policy": ResourceIAM2AccessBoundaryPolicy(), - "google_iam_workload_identity_pool": ResourceIAMBetaWorkloadIdentityPool(), - "google_iam_workload_identity_pool_provider": ResourceIAMBetaWorkloadIdentityPoolProvider(), - "google_iam_workforce_pool": ResourceIAMWorkforcePoolWorkforcePool(), - "google_iam_workforce_pool_provider": ResourceIAMWorkforcePoolWorkforcePoolProvider(), - "google_iap_app_engine_service_iam_binding": ResourceIamBinding(IapAppEngineServiceIamSchema, IapAppEngineServiceIamUpdaterProducer, IapAppEngineServiceIdParseFunc), - "google_iap_app_engine_service_iam_member": ResourceIamMember(IapAppEngineServiceIamSchema, IapAppEngineServiceIamUpdaterProducer, IapAppEngineServiceIdParseFunc), - "google_iap_app_engine_service_iam_policy": ResourceIamPolicy(IapAppEngineServiceIamSchema, IapAppEngineServiceIamUpdaterProducer, IapAppEngineServiceIdParseFunc), - "google_iap_app_engine_version_iam_binding": ResourceIamBinding(IapAppEngineVersionIamSchema, IapAppEngineVersionIamUpdaterProducer, IapAppEngineVersionIdParseFunc), - "google_iap_app_engine_version_iam_member": ResourceIamMember(IapAppEngineVersionIamSchema, IapAppEngineVersionIamUpdaterProducer, IapAppEngineVersionIdParseFunc), - "google_iap_app_engine_version_iam_policy": ResourceIamPolicy(IapAppEngineVersionIamSchema, IapAppEngineVersionIamUpdaterProducer, IapAppEngineVersionIdParseFunc), - "google_iap_brand": ResourceIapBrand(), - "google_iap_client": ResourceIapClient(), - "google_iap_tunnel_iam_binding": ResourceIamBinding(IapTunnelIamSchema, IapTunnelIamUpdaterProducer, IapTunnelIdParseFunc), - "google_iap_tunnel_iam_member": ResourceIamMember(IapTunnelIamSchema, IapTunnelIamUpdaterProducer, IapTunnelIdParseFunc), - "google_iap_tunnel_iam_policy": ResourceIamPolicy(IapTunnelIamSchema, IapTunnelIamUpdaterProducer, IapTunnelIdParseFunc), - "google_iap_tunnel_instance_iam_binding": ResourceIamBinding(IapTunnelInstanceIamSchema, IapTunnelInstanceIamUpdaterProducer, IapTunnelInstanceIdParseFunc), - "google_iap_tunnel_instance_iam_member": ResourceIamMember(IapTunnelInstanceIamSchema, IapTunnelInstanceIamUpdaterProducer, IapTunnelInstanceIdParseFunc), - "google_iap_tunnel_instance_iam_policy": ResourceIamPolicy(IapTunnelInstanceIamSchema, IapTunnelInstanceIamUpdaterProducer, IapTunnelInstanceIdParseFunc), - "google_iap_web_iam_binding": ResourceIamBinding(IapWebIamSchema, IapWebIamUpdaterProducer, IapWebIdParseFunc), - "google_iap_web_iam_member": ResourceIamMember(IapWebIamSchema, IapWebIamUpdaterProducer, IapWebIdParseFunc), - "google_iap_web_iam_policy": ResourceIamPolicy(IapWebIamSchema, IapWebIamUpdaterProducer, IapWebIdParseFunc), - "google_iap_web_backend_service_iam_binding": ResourceIamBinding(IapWebBackendServiceIamSchema, IapWebBackendServiceIamUpdaterProducer, IapWebBackendServiceIdParseFunc), - "google_iap_web_backend_service_iam_member": ResourceIamMember(IapWebBackendServiceIamSchema, IapWebBackendServiceIamUpdaterProducer, IapWebBackendServiceIdParseFunc), - "google_iap_web_backend_service_iam_policy": ResourceIamPolicy(IapWebBackendServiceIamSchema, IapWebBackendServiceIamUpdaterProducer, IapWebBackendServiceIdParseFunc), - "google_iap_web_type_app_engine_iam_binding": ResourceIamBinding(IapWebTypeAppEngineIamSchema, IapWebTypeAppEngineIamUpdaterProducer, IapWebTypeAppEngineIdParseFunc), - "google_iap_web_type_app_engine_iam_member": ResourceIamMember(IapWebTypeAppEngineIamSchema, IapWebTypeAppEngineIamUpdaterProducer, IapWebTypeAppEngineIdParseFunc), - "google_iap_web_type_app_engine_iam_policy": ResourceIamPolicy(IapWebTypeAppEngineIamSchema, IapWebTypeAppEngineIamUpdaterProducer, IapWebTypeAppEngineIdParseFunc), - "google_iap_web_type_compute_iam_binding": ResourceIamBinding(IapWebTypeComputeIamSchema, IapWebTypeComputeIamUpdaterProducer, IapWebTypeComputeIdParseFunc), - "google_iap_web_type_compute_iam_member": ResourceIamMember(IapWebTypeComputeIamSchema, IapWebTypeComputeIamUpdaterProducer, IapWebTypeComputeIdParseFunc), - "google_iap_web_type_compute_iam_policy": ResourceIamPolicy(IapWebTypeComputeIamSchema, IapWebTypeComputeIamUpdaterProducer, IapWebTypeComputeIdParseFunc), - "google_identity_platform_config": ResourceIdentityPlatformConfig(), - "google_identity_platform_default_supported_idp_config": ResourceIdentityPlatformDefaultSupportedIdpConfig(), - "google_identity_platform_inbound_saml_config": ResourceIdentityPlatformInboundSamlConfig(), - "google_identity_platform_oauth_idp_config": ResourceIdentityPlatformOauthIdpConfig(), - "google_identity_platform_project_default_config": ResourceIdentityPlatformProjectDefaultConfig(), - "google_identity_platform_tenant": ResourceIdentityPlatformTenant(), - "google_identity_platform_tenant_default_supported_idp_config": ResourceIdentityPlatformTenantDefaultSupportedIdpConfig(), - "google_identity_platform_tenant_inbound_saml_config": ResourceIdentityPlatformTenantInboundSamlConfig(), - "google_identity_platform_tenant_oauth_idp_config": ResourceIdentityPlatformTenantOauthIdpConfig(), - "google_kms_crypto_key": ResourceKMSCryptoKey(), - "google_kms_crypto_key_version": ResourceKMSCryptoKeyVersion(), - "google_kms_key_ring": ResourceKMSKeyRing(), - "google_kms_key_ring_import_job": ResourceKMSKeyRingImportJob(), - "google_kms_secret_ciphertext": ResourceKMSSecretCiphertext(), - "google_logging_metric": ResourceLoggingMetric(), - "google_memcache_instance": ResourceMemcacheInstance(), - "google_ml_engine_model": ResourceMLEngineModel(), - "google_monitoring_alert_policy": ResourceMonitoringAlertPolicy(), - "google_monitoring_service": ResourceMonitoringGenericService(), - "google_monitoring_group": ResourceMonitoringGroup(), - "google_monitoring_metric_descriptor": ResourceMonitoringMetricDescriptor(), - "google_monitoring_notification_channel": ResourceMonitoringNotificationChannel(), - "google_monitoring_custom_service": ResourceMonitoringService(), - "google_monitoring_slo": ResourceMonitoringSlo(), - "google_monitoring_uptime_check_config": ResourceMonitoringUptimeCheckConfig(), - "google_network_management_connectivity_test": ResourceNetworkManagementConnectivityTest(), - "google_network_services_edge_cache_keyset": ResourceNetworkServicesEdgeCacheKeyset(), - "google_network_services_edge_cache_origin": ResourceNetworkServicesEdgeCacheOrigin(), - "google_network_services_edge_cache_service": ResourceNetworkServicesEdgeCacheService(), - "google_notebooks_environment": ResourceNotebooksEnvironment(), - "google_notebooks_instance": ResourceNotebooksInstance(), - "google_notebooks_instance_iam_binding": ResourceIamBinding(NotebooksInstanceIamSchema, NotebooksInstanceIamUpdaterProducer, NotebooksInstanceIdParseFunc), - "google_notebooks_instance_iam_member": ResourceIamMember(NotebooksInstanceIamSchema, NotebooksInstanceIamUpdaterProducer, NotebooksInstanceIdParseFunc), - "google_notebooks_instance_iam_policy": ResourceIamPolicy(NotebooksInstanceIamSchema, NotebooksInstanceIamUpdaterProducer, NotebooksInstanceIdParseFunc), - "google_notebooks_location": ResourceNotebooksLocation(), - "google_notebooks_runtime": ResourceNotebooksRuntime(), - "google_notebooks_runtime_iam_binding": ResourceIamBinding(NotebooksRuntimeIamSchema, NotebooksRuntimeIamUpdaterProducer, NotebooksRuntimeIdParseFunc), - "google_notebooks_runtime_iam_member": ResourceIamMember(NotebooksRuntimeIamSchema, NotebooksRuntimeIamUpdaterProducer, NotebooksRuntimeIdParseFunc), - "google_notebooks_runtime_iam_policy": ResourceIamPolicy(NotebooksRuntimeIamSchema, NotebooksRuntimeIamUpdaterProducer, NotebooksRuntimeIdParseFunc), - "google_os_config_patch_deployment": ResourceOSConfigPatchDeployment(), - "google_os_login_ssh_public_key": ResourceOSLoginSSHPublicKey(), - "google_privateca_ca_pool": ResourcePrivatecaCaPool(), - "google_privateca_ca_pool_iam_binding": ResourceIamBinding(PrivatecaCaPoolIamSchema, PrivatecaCaPoolIamUpdaterProducer, PrivatecaCaPoolIdParseFunc), - "google_privateca_ca_pool_iam_member": ResourceIamMember(PrivatecaCaPoolIamSchema, PrivatecaCaPoolIamUpdaterProducer, PrivatecaCaPoolIdParseFunc), - "google_privateca_ca_pool_iam_policy": ResourceIamPolicy(PrivatecaCaPoolIamSchema, PrivatecaCaPoolIamUpdaterProducer, PrivatecaCaPoolIdParseFunc), - "google_privateca_certificate": ResourcePrivatecaCertificate(), - "google_privateca_certificate_authority": ResourcePrivatecaCertificateAuthority(), - "google_privateca_certificate_template_iam_binding": ResourceIamBinding(PrivatecaCertificateTemplateIamSchema, PrivatecaCertificateTemplateIamUpdaterProducer, PrivatecaCertificateTemplateIdParseFunc), - "google_privateca_certificate_template_iam_member": ResourceIamMember(PrivatecaCertificateTemplateIamSchema, PrivatecaCertificateTemplateIamUpdaterProducer, PrivatecaCertificateTemplateIdParseFunc), - "google_privateca_certificate_template_iam_policy": ResourceIamPolicy(PrivatecaCertificateTemplateIamSchema, PrivatecaCertificateTemplateIamUpdaterProducer, PrivatecaCertificateTemplateIdParseFunc), - "google_pubsub_schema": ResourcePubsubSchema(), - "google_pubsub_subscription": ResourcePubsubSubscription(), - "google_pubsub_topic": ResourcePubsubTopic(), - "google_pubsub_topic_iam_binding": ResourceIamBinding(PubsubTopicIamSchema, PubsubTopicIamUpdaterProducer, PubsubTopicIdParseFunc), - "google_pubsub_topic_iam_member": ResourceIamMember(PubsubTopicIamSchema, PubsubTopicIamUpdaterProducer, PubsubTopicIdParseFunc), - "google_pubsub_topic_iam_policy": ResourceIamPolicy(PubsubTopicIamSchema, PubsubTopicIamUpdaterProducer, PubsubTopicIdParseFunc), - "google_pubsub_lite_reservation": ResourcePubsubLiteReservation(), - "google_pubsub_lite_subscription": ResourcePubsubLiteSubscription(), - "google_pubsub_lite_topic": ResourcePubsubLiteTopic(), - "google_redis_instance": ResourceRedisInstance(), - "google_resource_manager_lien": ResourceResourceManagerLien(), - "google_secret_manager_secret": ResourceSecretManagerSecret(), - "google_secret_manager_secret_iam_binding": ResourceIamBinding(SecretManagerSecretIamSchema, SecretManagerSecretIamUpdaterProducer, SecretManagerSecretIdParseFunc), - "google_secret_manager_secret_iam_member": ResourceIamMember(SecretManagerSecretIamSchema, SecretManagerSecretIamUpdaterProducer, SecretManagerSecretIdParseFunc), - "google_secret_manager_secret_iam_policy": ResourceIamPolicy(SecretManagerSecretIamSchema, SecretManagerSecretIamUpdaterProducer, SecretManagerSecretIdParseFunc), - "google_secret_manager_secret_version": ResourceSecretManagerSecretVersion(), - "google_scc_mute_config": ResourceSecurityCenterMuteConfig(), - "google_scc_notification_config": ResourceSecurityCenterNotificationConfig(), - "google_scc_source": ResourceSecurityCenterSource(), - "google_scc_source_iam_binding": ResourceIamBinding(SecurityCenterSourceIamSchema, SecurityCenterSourceIamUpdaterProducer, SecurityCenterSourceIdParseFunc), - "google_scc_source_iam_member": ResourceIamMember(SecurityCenterSourceIamSchema, SecurityCenterSourceIamUpdaterProducer, SecurityCenterSourceIdParseFunc), - "google_scc_source_iam_policy": ResourceIamPolicy(SecurityCenterSourceIamSchema, SecurityCenterSourceIamUpdaterProducer, SecurityCenterSourceIdParseFunc), - "google_endpoints_service_iam_binding": ResourceIamBinding(ServiceManagementServiceIamSchema, ServiceManagementServiceIamUpdaterProducer, ServiceManagementServiceIdParseFunc), - "google_endpoints_service_iam_member": ResourceIamMember(ServiceManagementServiceIamSchema, ServiceManagementServiceIamUpdaterProducer, ServiceManagementServiceIdParseFunc), - "google_endpoints_service_iam_policy": ResourceIamPolicy(ServiceManagementServiceIamSchema, ServiceManagementServiceIamUpdaterProducer, ServiceManagementServiceIdParseFunc), - "google_endpoints_service_consumers_iam_binding": ResourceIamBinding(ServiceManagementServiceConsumersIamSchema, ServiceManagementServiceConsumersIamUpdaterProducer, ServiceManagementServiceConsumersIdParseFunc), - "google_endpoints_service_consumers_iam_member": ResourceIamMember(ServiceManagementServiceConsumersIamSchema, ServiceManagementServiceConsumersIamUpdaterProducer, ServiceManagementServiceConsumersIdParseFunc), - "google_endpoints_service_consumers_iam_policy": ResourceIamPolicy(ServiceManagementServiceConsumersIamSchema, ServiceManagementServiceConsumersIamUpdaterProducer, ServiceManagementServiceConsumersIdParseFunc), - "google_sourcerepo_repository": ResourceSourceRepoRepository(), - "google_sourcerepo_repository_iam_binding": ResourceIamBinding(SourceRepoRepositoryIamSchema, SourceRepoRepositoryIamUpdaterProducer, SourceRepoRepositoryIdParseFunc), - "google_sourcerepo_repository_iam_member": ResourceIamMember(SourceRepoRepositoryIamSchema, SourceRepoRepositoryIamUpdaterProducer, SourceRepoRepositoryIdParseFunc), - "google_sourcerepo_repository_iam_policy": ResourceIamPolicy(SourceRepoRepositoryIamSchema, SourceRepoRepositoryIamUpdaterProducer, SourceRepoRepositoryIdParseFunc), - "google_spanner_database": ResourceSpannerDatabase(), - "google_spanner_instance": ResourceSpannerInstance(), - "google_sql_database": ResourceSQLDatabase(), - "google_sql_source_representation_instance": ResourceSQLSourceRepresentationInstance(), - "google_storage_bucket_iam_binding": ResourceIamBinding(StorageBucketIamSchema, StorageBucketIamUpdaterProducer, StorageBucketIdParseFunc), - "google_storage_bucket_iam_member": ResourceIamMember(StorageBucketIamSchema, StorageBucketIamUpdaterProducer, StorageBucketIdParseFunc), - "google_storage_bucket_iam_policy": ResourceIamPolicy(StorageBucketIamSchema, StorageBucketIamUpdaterProducer, StorageBucketIdParseFunc), - "google_storage_bucket_access_control": ResourceStorageBucketAccessControl(), - "google_storage_default_object_access_control": ResourceStorageDefaultObjectAccessControl(), - "google_storage_hmac_key": ResourceStorageHmacKey(), - "google_storage_object_access_control": ResourceStorageObjectAccessControl(), - "google_storage_transfer_agent_pool": ResourceStorageTransferAgentPool(), - "google_tags_tag_binding": ResourceTagsTagBinding(), - "google_tags_tag_key": ResourceTagsTagKey(), - "google_tags_tag_key_iam_binding": ResourceIamBinding(TagsTagKeyIamSchema, TagsTagKeyIamUpdaterProducer, TagsTagKeyIdParseFunc), - "google_tags_tag_key_iam_member": ResourceIamMember(TagsTagKeyIamSchema, TagsTagKeyIamUpdaterProducer, TagsTagKeyIdParseFunc), - "google_tags_tag_key_iam_policy": ResourceIamPolicy(TagsTagKeyIamSchema, TagsTagKeyIamUpdaterProducer, TagsTagKeyIdParseFunc), - "google_tags_tag_value": ResourceTagsTagValue(), - "google_tags_tag_value_iam_binding": ResourceIamBinding(TagsTagValueIamSchema, TagsTagValueIamUpdaterProducer, TagsTagValueIdParseFunc), - "google_tags_tag_value_iam_member": ResourceIamMember(TagsTagValueIamSchema, TagsTagValueIamUpdaterProducer, TagsTagValueIdParseFunc), - "google_tags_tag_value_iam_policy": ResourceIamPolicy(TagsTagValueIamSchema, TagsTagValueIamUpdaterProducer, TagsTagValueIdParseFunc), - "google_tpu_node": ResourceTPUNode(), - "google_vertex_ai_dataset": ResourceVertexAIDataset(), - "google_vertex_ai_endpoint": ResourceVertexAIEndpoint(), - "google_vertex_ai_featurestore": ResourceVertexAIFeaturestore(), - "google_vertex_ai_featurestore_entitytype": ResourceVertexAIFeaturestoreEntitytype(), - "google_vertex_ai_featurestore_entitytype_feature": ResourceVertexAIFeaturestoreEntitytypeFeature(), - "google_vertex_ai_index": ResourceVertexAIIndex(), - "google_vertex_ai_tensorboard": ResourceVertexAITensorboard(), - "google_vpc_access_connector": ResourceVPCAccessConnector(), - "google_workflows_workflow": ResourceWorkflowsWorkflow(), - }, - map[string]*schema.Resource{ - // ####### START handwritten resources ########### - "google_app_engine_application": ResourceAppEngineApplication(), - "google_apigee_sharedflow": ResourceApigeeSharedFlow(), - "google_apigee_sharedflow_deployment": ResourceApigeeSharedFlowDeployment(), - "google_apigee_flowhook": ResourceApigeeFlowhook(), - "google_bigquery_table": ResourceBigQueryTable(), - "google_bigtable_gc_policy": ResourceBigtableGCPolicy(), - "google_bigtable_instance": ResourceBigtableInstance(), - "google_bigtable_table": ResourceBigtableTable(), - "google_billing_subaccount": ResourceBillingSubaccount(), - "google_cloudfunctions_function": ResourceCloudFunctionsFunction(), - "google_composer_environment": ResourceComposerEnvironment(), - "google_compute_attached_disk": ResourceComputeAttachedDisk(), - "google_compute_instance": ResourceComputeInstance(), - "google_compute_instance_from_template": ResourceComputeInstanceFromTemplate(), - "google_compute_instance_group": ResourceComputeInstanceGroup(), - "google_compute_instance_group_manager": ResourceComputeInstanceGroupManager(), - "google_compute_instance_template": ResourceComputeInstanceTemplate(), - "google_compute_network_peering": ResourceComputeNetworkPeering(), - "google_compute_project_default_network_tier": ResourceComputeProjectDefaultNetworkTier(), - "google_compute_project_metadata": ResourceComputeProjectMetadata(), - "google_compute_project_metadata_item": ResourceComputeProjectMetadataItem(), - "google_compute_region_instance_group_manager": ResourceComputeRegionInstanceGroupManager(), - "google_compute_router_interface": ResourceComputeRouterInterface(), - "google_compute_security_policy": ResourceComputeSecurityPolicy(), - "google_compute_shared_vpc_host_project": ResourceComputeSharedVpcHostProject(), - "google_compute_shared_vpc_service_project": ResourceComputeSharedVpcServiceProject(), - "google_compute_target_pool": ResourceComputeTargetPool(), - "google_container_cluster": ResourceContainerCluster(), - "google_container_node_pool": ResourceContainerNodePool(), - "google_container_registry": ResourceContainerRegistry(), - "google_dataflow_job": ResourceDataflowJob(), - "google_dataproc_cluster": ResourceDataprocCluster(), - "google_dataproc_job": ResourceDataprocJob(), - "google_dialogflow_cx_version": ResourceDialogflowCXVersion(), - "google_dialogflow_cx_environment": ResourceDialogflowCXEnvironment(), - "google_dns_record_set": ResourceDnsRecordSet(), - "google_endpoints_service": ResourceEndpointsService(), - "google_folder": ResourceGoogleFolder(), - "google_folder_organization_policy": ResourceGoogleFolderOrganizationPolicy(), - "google_logging_billing_account_sink": ResourceLoggingBillingAccountSink(), - "google_logging_billing_account_exclusion": ResourceLoggingExclusion(BillingAccountLoggingExclusionSchema, NewBillingAccountLoggingExclusionUpdater, BillingAccountLoggingExclusionIdParseFunc), - "google_logging_billing_account_bucket_config": ResourceLoggingBillingAccountBucketConfig(), - "google_logging_organization_sink": ResourceLoggingOrganizationSink(), - "google_logging_organization_exclusion": ResourceLoggingExclusion(OrganizationLoggingExclusionSchema, NewOrganizationLoggingExclusionUpdater, OrganizationLoggingExclusionIdParseFunc), - "google_logging_organization_bucket_config": ResourceLoggingOrganizationBucketConfig(), - "google_logging_folder_sink": ResourceLoggingFolderSink(), - "google_logging_folder_exclusion": ResourceLoggingExclusion(FolderLoggingExclusionSchema, NewFolderLoggingExclusionUpdater, FolderLoggingExclusionIdParseFunc), - "google_logging_folder_bucket_config": ResourceLoggingFolderBucketConfig(), - "google_logging_project_sink": ResourceLoggingProjectSink(), - "google_logging_project_exclusion": ResourceLoggingExclusion(ProjectLoggingExclusionSchema, NewProjectLoggingExclusionUpdater, ProjectLoggingExclusionIdParseFunc), - "google_logging_project_bucket_config": ResourceLoggingProjectBucketConfig(), - "google_monitoring_dashboard": ResourceMonitoringDashboard(), - "google_service_networking_connection": ResourceServiceNetworkingConnection(), - "google_sql_database_instance": ResourceSqlDatabaseInstance(), - "google_sql_ssl_cert": ResourceSqlSslCert(), - "google_sql_user": ResourceSqlUser(), - "google_organization_iam_custom_role": ResourceGoogleOrganizationIamCustomRole(), - "google_organization_policy": ResourceGoogleOrganizationPolicy(), - "google_project": ResourceGoogleProject(), - "google_project_default_service_accounts": ResourceGoogleProjectDefaultServiceAccounts(), - "google_project_service": ResourceGoogleProjectService(), - "google_project_iam_custom_role": ResourceGoogleProjectIamCustomRole(), - "google_project_organization_policy": ResourceGoogleProjectOrganizationPolicy(), - "google_project_usage_export_bucket": ResourceProjectUsageBucket(), - "google_service_account": ResourceGoogleServiceAccount(), - "google_service_account_key": ResourceGoogleServiceAccountKey(), - "google_service_networking_peered_dns_domain": ResourceGoogleServiceNetworkingPeeredDNSDomain(), - "google_storage_bucket": ResourceStorageBucket(), - "google_storage_bucket_acl": ResourceStorageBucketAcl(), - "google_storage_bucket_object": ResourceStorageBucketObject(), - "google_storage_object_acl": ResourceStorageObjectAcl(), - "google_storage_default_object_acl": ResourceStorageDefaultObjectAcl(), - "google_storage_notification": ResourceStorageNotification(), - "google_storage_transfer_job": ResourceStorageTransferJob(), - "google_tags_location_tag_binding": ResourceTagsLocationTagBinding(), - // ####### END handwritten resources ########### - }, - map[string]*schema.Resource{ - // ####### START non-generated IAM resources ########### - "google_bigtable_instance_iam_binding": ResourceIamBinding(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_instance_iam_member": ResourceIamMember(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_instance_iam_policy": ResourceIamPolicy(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_table_iam_binding": ResourceIamBinding(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigtable_table_iam_member": ResourceIamMember(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigtable_table_iam_policy": ResourceIamPolicy(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigquery_dataset_iam_binding": ResourceIamBinding(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), - "google_bigquery_dataset_iam_member": ResourceIamMember(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), - "google_bigquery_dataset_iam_policy": ResourceIamPolicy(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), - "google_billing_account_iam_binding": ResourceIamBinding(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), - "google_billing_account_iam_member": ResourceIamMember(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), - "google_billing_account_iam_policy": ResourceIamPolicy(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), - "google_dataproc_cluster_iam_binding": ResourceIamBinding(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), - "google_dataproc_cluster_iam_member": ResourceIamMember(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), - "google_dataproc_cluster_iam_policy": ResourceIamPolicy(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), - "google_dataproc_job_iam_binding": ResourceIamBinding(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), - "google_dataproc_job_iam_member": ResourceIamMember(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), - "google_dataproc_job_iam_policy": ResourceIamPolicy(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), - "google_folder_iam_binding": ResourceIamBinding(IamFolderSchema, NewFolderIamUpdater, FolderIdParseFunc), - "google_folder_iam_member": ResourceIamMember(IamFolderSchema, NewFolderIamUpdater, FolderIdParseFunc), - "google_folder_iam_policy": ResourceIamPolicy(IamFolderSchema, NewFolderIamUpdater, FolderIdParseFunc), - "google_folder_iam_audit_config": ResourceIamAuditConfig(IamFolderSchema, NewFolderIamUpdater, FolderIdParseFunc), - "google_healthcare_dataset_iam_binding": ResourceIamBindingWithBatching(IamHealthcareDatasetSchema, NewHealthcareDatasetIamUpdater, DatasetIdParseFunc, IamBatchingEnabled), - "google_healthcare_dataset_iam_member": ResourceIamMemberWithBatching(IamHealthcareDatasetSchema, NewHealthcareDatasetIamUpdater, DatasetIdParseFunc, IamBatchingEnabled), - "google_healthcare_dataset_iam_policy": ResourceIamPolicy(IamHealthcareDatasetSchema, NewHealthcareDatasetIamUpdater, DatasetIdParseFunc), - "google_healthcare_dicom_store_iam_binding": ResourceIamBindingWithBatching(IamHealthcareDicomStoreSchema, NewHealthcareDicomStoreIamUpdater, DicomStoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_dicom_store_iam_member": ResourceIamMemberWithBatching(IamHealthcareDicomStoreSchema, NewHealthcareDicomStoreIamUpdater, DicomStoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_dicom_store_iam_policy": ResourceIamPolicy(IamHealthcareDicomStoreSchema, NewHealthcareDicomStoreIamUpdater, DicomStoreIdParseFunc), - "google_healthcare_fhir_store_iam_binding": ResourceIamBindingWithBatching(IamHealthcareFhirStoreSchema, NewHealthcareFhirStoreIamUpdater, FhirStoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_fhir_store_iam_member": ResourceIamMemberWithBatching(IamHealthcareFhirStoreSchema, NewHealthcareFhirStoreIamUpdater, FhirStoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_fhir_store_iam_policy": ResourceIamPolicy(IamHealthcareFhirStoreSchema, NewHealthcareFhirStoreIamUpdater, FhirStoreIdParseFunc), - "google_healthcare_hl7_v2_store_iam_binding": ResourceIamBindingWithBatching(IamHealthcareHl7V2StoreSchema, NewHealthcareHl7V2StoreIamUpdater, Hl7V2StoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_hl7_v2_store_iam_member": ResourceIamMemberWithBatching(IamHealthcareHl7V2StoreSchema, NewHealthcareHl7V2StoreIamUpdater, Hl7V2StoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_hl7_v2_store_iam_policy": ResourceIamPolicy(IamHealthcareHl7V2StoreSchema, NewHealthcareHl7V2StoreIamUpdater, Hl7V2StoreIdParseFunc), - "google_kms_key_ring_iam_binding": ResourceIamBinding(IamKmsKeyRingSchema, NewKmsKeyRingIamUpdater, KeyRingIdParseFunc), - "google_kms_key_ring_iam_member": ResourceIamMember(IamKmsKeyRingSchema, NewKmsKeyRingIamUpdater, KeyRingIdParseFunc), - "google_kms_key_ring_iam_policy": ResourceIamPolicy(IamKmsKeyRingSchema, NewKmsKeyRingIamUpdater, KeyRingIdParseFunc), - "google_kms_crypto_key_iam_binding": ResourceIamBinding(IamKmsCryptoKeySchema, NewKmsCryptoKeyIamUpdater, CryptoIdParseFunc), - "google_kms_crypto_key_iam_member": ResourceIamMember(IamKmsCryptoKeySchema, NewKmsCryptoKeyIamUpdater, CryptoIdParseFunc), - "google_kms_crypto_key_iam_policy": ResourceIamPolicy(IamKmsCryptoKeySchema, NewKmsCryptoKeyIamUpdater, CryptoIdParseFunc), - "google_spanner_instance_iam_binding": ResourceIamBinding(IamSpannerInstanceSchema, NewSpannerInstanceIamUpdater, SpannerInstanceIdParseFunc), - "google_spanner_instance_iam_member": ResourceIamMember(IamSpannerInstanceSchema, NewSpannerInstanceIamUpdater, SpannerInstanceIdParseFunc), - "google_spanner_instance_iam_policy": ResourceIamPolicy(IamSpannerInstanceSchema, NewSpannerInstanceIamUpdater, SpannerInstanceIdParseFunc), - "google_spanner_database_iam_binding": ResourceIamBinding(IamSpannerDatabaseSchema, NewSpannerDatabaseIamUpdater, SpannerDatabaseIdParseFunc), - "google_spanner_database_iam_member": ResourceIamMember(IamSpannerDatabaseSchema, NewSpannerDatabaseIamUpdater, SpannerDatabaseIdParseFunc), - "google_spanner_database_iam_policy": ResourceIamPolicy(IamSpannerDatabaseSchema, NewSpannerDatabaseIamUpdater, SpannerDatabaseIdParseFunc), - "google_organization_iam_binding": ResourceIamBinding(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc), - "google_organization_iam_member": ResourceIamMember(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc), - "google_organization_iam_policy": ResourceIamPolicy(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc), - "google_organization_iam_audit_config": ResourceIamAuditConfig(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc), - "google_project_iam_policy": ResourceIamPolicy(IamProjectSchema, NewProjectIamUpdater, ProjectIdParseFunc), - "google_project_iam_binding": ResourceIamBindingWithBatching(IamProjectSchema, NewProjectIamUpdater, ProjectIdParseFunc, IamBatchingEnabled), - "google_project_iam_member": ResourceIamMemberWithBatching(IamProjectSchema, NewProjectIamUpdater, ProjectIdParseFunc, IamBatchingEnabled), - "google_project_iam_audit_config": ResourceIamAuditConfigWithBatching(IamProjectSchema, NewProjectIamUpdater, ProjectIdParseFunc, IamBatchingEnabled), - "google_pubsub_subscription_iam_binding": ResourceIamBinding(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), - "google_pubsub_subscription_iam_member": ResourceIamMember(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), - "google_pubsub_subscription_iam_policy": ResourceIamPolicy(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), - "google_service_account_iam_binding": ResourceIamBinding(IamServiceAccountSchema, NewServiceAccountIamUpdater, ServiceAccountIdParseFunc), - "google_service_account_iam_member": ResourceIamMember(IamServiceAccountSchema, NewServiceAccountIamUpdater, ServiceAccountIdParseFunc), - "google_service_account_iam_policy": ResourceIamPolicy(IamServiceAccountSchema, NewServiceAccountIamUpdater, ServiceAccountIdParseFunc), - // ####### END non-generated IAM resources ########### - }, - dclResources, - ) -} - -func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Provider) (interface{}, diag.Diagnostics) { - config := Config{ - Project: d.Get("project").(string), - Region: d.Get("region").(string), - Zone: d.Get("zone").(string), - UserProjectOverride: d.Get("user_project_override").(bool), - BillingProject: d.Get("billing_project").(string), - UserAgent: p.UserAgent("terraform-provider-google", version.ProviderVersion), - } - - // opt in extension for adding to the User-Agent header - if ext := os.Getenv("GOOGLE_TERRAFORM_USERAGENT_EXTENSION"); ext != "" { - ua := config.UserAgent - config.UserAgent = fmt.Sprintf("%s %s", ua, ext) - } - - if v, ok := d.GetOk("request_timeout"); ok { - var err error - config.RequestTimeout, err = time.ParseDuration(v.(string)) - if err != nil { - return nil, diag.FromErr(err) - } - } - - if v, ok := d.GetOk("request_reason"); ok { - config.RequestReason = v.(string) - } - - // Check for primary credentials in config. Note that if neither is set, ADCs - // will be used if available. - if v, ok := d.GetOk("access_token"); ok { - config.AccessToken = v.(string) - } - - if v, ok := d.GetOk("credentials"); ok { - config.Credentials = v.(string) - } - - // only check environment variables if neither value was set in config- this - // means config beats env var in all cases. - if config.AccessToken == "" && config.Credentials == "" { - config.Credentials = MultiEnvSearch([]string{ - "GOOGLE_CREDENTIALS", - "GOOGLE_CLOUD_KEYFILE_JSON", - "GCLOUD_KEYFILE_JSON", - }) - - config.AccessToken = MultiEnvSearch([]string{ - "GOOGLE_OAUTH_ACCESS_TOKEN", - }) - } - - // Given that impersonate_service_account is a secondary auth method, it has - // no conflicts to worry about. We pull the env var in a DefaultFunc. - if v, ok := d.GetOk("impersonate_service_account"); ok { - config.ImpersonateServiceAccount = v.(string) - } - - delegates := d.Get("impersonate_service_account_delegates").([]interface{}) - if len(delegates) > 0 { - config.ImpersonateServiceAccountDelegates = make([]string, len(delegates)) - } - for i, delegate := range delegates { - config.ImpersonateServiceAccountDelegates[i] = delegate.(string) - } - - scopes := d.Get("scopes").([]interface{}) - if len(scopes) > 0 { - config.Scopes = make([]string, len(scopes)) - } - for i, scope := range scopes { - config.Scopes[i] = scope.(string) - } - - batchCfg, err := ExpandProviderBatchingConfig(d.Get("batching")) - if err != nil { - return nil, diag.FromErr(err) - } - config.BatchingConfig = batchCfg - - // Generated products - config.AccessApprovalBasePath = d.Get("access_approval_custom_endpoint").(string) - config.AccessContextManagerBasePath = d.Get("access_context_manager_custom_endpoint").(string) - config.ActiveDirectoryBasePath = d.Get("active_directory_custom_endpoint").(string) - config.AlloydbBasePath = d.Get("alloydb_custom_endpoint").(string) - config.ApigeeBasePath = d.Get("apigee_custom_endpoint").(string) - config.AppEngineBasePath = d.Get("app_engine_custom_endpoint").(string) - config.ArtifactRegistryBasePath = d.Get("artifact_registry_custom_endpoint").(string) - config.BeyondcorpBasePath = d.Get("beyondcorp_custom_endpoint").(string) - config.BigQueryBasePath = d.Get("big_query_custom_endpoint").(string) - config.BigqueryAnalyticsHubBasePath = d.Get("bigquery_analytics_hub_custom_endpoint").(string) - config.BigqueryConnectionBasePath = d.Get("bigquery_connection_custom_endpoint").(string) - config.BigqueryDatapolicyBasePath = d.Get("bigquery_datapolicy_custom_endpoint").(string) - config.BigqueryDataTransferBasePath = d.Get("bigquery_data_transfer_custom_endpoint").(string) - config.BigqueryReservationBasePath = d.Get("bigquery_reservation_custom_endpoint").(string) - config.BigtableBasePath = d.Get("bigtable_custom_endpoint").(string) - config.BillingBasePath = d.Get("billing_custom_endpoint").(string) - config.BinaryAuthorizationBasePath = d.Get("binary_authorization_custom_endpoint").(string) - config.CertificateManagerBasePath = d.Get("certificate_manager_custom_endpoint").(string) - config.CloudAssetBasePath = d.Get("cloud_asset_custom_endpoint").(string) - config.CloudBuildBasePath = d.Get("cloud_build_custom_endpoint").(string) - config.CloudFunctionsBasePath = d.Get("cloud_functions_custom_endpoint").(string) - config.Cloudfunctions2BasePath = d.Get("cloudfunctions2_custom_endpoint").(string) - config.CloudIdentityBasePath = d.Get("cloud_identity_custom_endpoint").(string) - config.CloudIdsBasePath = d.Get("cloud_ids_custom_endpoint").(string) - config.CloudIotBasePath = d.Get("cloud_iot_custom_endpoint").(string) - config.CloudRunBasePath = d.Get("cloud_run_custom_endpoint").(string) - config.CloudRunV2BasePath = d.Get("cloud_run_v2_custom_endpoint").(string) - config.CloudSchedulerBasePath = d.Get("cloud_scheduler_custom_endpoint").(string) - config.CloudTasksBasePath = d.Get("cloud_tasks_custom_endpoint").(string) - config.ComputeBasePath = d.Get("compute_custom_endpoint").(string) - config.ContainerAnalysisBasePath = d.Get("container_analysis_custom_endpoint").(string) - config.ContainerAttachedBasePath = d.Get("container_attached_custom_endpoint").(string) - config.DataCatalogBasePath = d.Get("data_catalog_custom_endpoint").(string) - config.DataFusionBasePath = d.Get("data_fusion_custom_endpoint").(string) - config.DataLossPreventionBasePath = d.Get("data_loss_prevention_custom_endpoint").(string) - config.DataplexBasePath = d.Get("dataplex_custom_endpoint").(string) - config.DataprocBasePath = d.Get("dataproc_custom_endpoint").(string) - config.DataprocMetastoreBasePath = d.Get("dataproc_metastore_custom_endpoint").(string) - config.DatastoreBasePath = d.Get("datastore_custom_endpoint").(string) - config.DatastreamBasePath = d.Get("datastream_custom_endpoint").(string) - config.DeploymentManagerBasePath = d.Get("deployment_manager_custom_endpoint").(string) - config.DialogflowBasePath = d.Get("dialogflow_custom_endpoint").(string) - config.DialogflowCXBasePath = d.Get("dialogflow_cx_custom_endpoint").(string) - config.DNSBasePath = d.Get("dns_custom_endpoint").(string) - config.DocumentAIBasePath = d.Get("document_ai_custom_endpoint").(string) - config.EssentialContactsBasePath = d.Get("essential_contacts_custom_endpoint").(string) - config.FilestoreBasePath = d.Get("filestore_custom_endpoint").(string) - config.FirestoreBasePath = d.Get("firestore_custom_endpoint").(string) - config.GameServicesBasePath = d.Get("game_services_custom_endpoint").(string) - config.GKEBackupBasePath = d.Get("gke_backup_custom_endpoint").(string) - config.GKEHubBasePath = d.Get("gke_hub_custom_endpoint").(string) - config.HealthcareBasePath = d.Get("healthcare_custom_endpoint").(string) - config.IAM2BasePath = d.Get("iam2_custom_endpoint").(string) - config.IAMBetaBasePath = d.Get("iam_beta_custom_endpoint").(string) - config.IAMWorkforcePoolBasePath = d.Get("iam_workforce_pool_custom_endpoint").(string) - config.IapBasePath = d.Get("iap_custom_endpoint").(string) - config.IdentityPlatformBasePath = d.Get("identity_platform_custom_endpoint").(string) - config.KMSBasePath = d.Get("kms_custom_endpoint").(string) - config.LoggingBasePath = d.Get("logging_custom_endpoint").(string) - config.MemcacheBasePath = d.Get("memcache_custom_endpoint").(string) - config.MLEngineBasePath = d.Get("ml_engine_custom_endpoint").(string) - config.MonitoringBasePath = d.Get("monitoring_custom_endpoint").(string) - config.NetworkManagementBasePath = d.Get("network_management_custom_endpoint").(string) - config.NetworkServicesBasePath = d.Get("network_services_custom_endpoint").(string) - config.NotebooksBasePath = d.Get("notebooks_custom_endpoint").(string) - config.OSConfigBasePath = d.Get("os_config_custom_endpoint").(string) - config.OSLoginBasePath = d.Get("os_login_custom_endpoint").(string) - config.PrivatecaBasePath = d.Get("privateca_custom_endpoint").(string) - config.PubsubBasePath = d.Get("pubsub_custom_endpoint").(string) - config.PubsubLiteBasePath = d.Get("pubsub_lite_custom_endpoint").(string) - config.RedisBasePath = d.Get("redis_custom_endpoint").(string) - config.ResourceManagerBasePath = d.Get("resource_manager_custom_endpoint").(string) - config.SecretManagerBasePath = d.Get("secret_manager_custom_endpoint").(string) - config.SecurityCenterBasePath = d.Get("security_center_custom_endpoint").(string) - config.ServiceManagementBasePath = d.Get("service_management_custom_endpoint").(string) - config.ServiceUsageBasePath = d.Get("service_usage_custom_endpoint").(string) - config.SourceRepoBasePath = d.Get("source_repo_custom_endpoint").(string) - config.SpannerBasePath = d.Get("spanner_custom_endpoint").(string) - config.SQLBasePath = d.Get("sql_custom_endpoint").(string) - config.StorageBasePath = d.Get("storage_custom_endpoint").(string) - config.StorageTransferBasePath = d.Get("storage_transfer_custom_endpoint").(string) - config.TagsBasePath = d.Get("tags_custom_endpoint").(string) - config.TPUBasePath = d.Get("tpu_custom_endpoint").(string) - config.VertexAIBasePath = d.Get("vertex_ai_custom_endpoint").(string) - config.VPCAccessBasePath = d.Get("vpc_access_custom_endpoint").(string) - config.WorkflowsBasePath = d.Get("workflows_custom_endpoint").(string) - - // Handwritten Products / Versioned / Atypical Entries - config.CloudBillingBasePath = d.Get(CloudBillingCustomEndpointEntryKey).(string) - config.ComposerBasePath = d.Get(ComposerCustomEndpointEntryKey).(string) - config.ContainerBasePath = d.Get(ContainerCustomEndpointEntryKey).(string) - config.DataflowBasePath = d.Get(DataflowCustomEndpointEntryKey).(string) - config.IamCredentialsBasePath = d.Get(IamCredentialsCustomEndpointEntryKey).(string) - config.ResourceManagerV3BasePath = d.Get(ResourceManagerV3CustomEndpointEntryKey).(string) - config.IAMBasePath = d.Get(IAMCustomEndpointEntryKey).(string) - config.ServiceNetworkingBasePath = d.Get(ServiceNetworkingCustomEndpointEntryKey).(string) - config.ServiceUsageBasePath = d.Get(ServiceUsageCustomEndpointEntryKey).(string) - config.BigtableAdminBasePath = d.Get(BigtableAdminCustomEndpointEntryKey).(string) - config.TagsLocationBasePath = d.Get(TagsLocationCustomEndpointEntryKey).(string) - - // dcl - config.ContainerAwsBasePath = d.Get(ContainerAwsCustomEndpointEntryKey).(string) - config.ContainerAzureBasePath = d.Get(ContainerAzureCustomEndpointEntryKey).(string) - - stopCtx, ok := schema.StopContext(ctx) - if !ok { - stopCtx = ctx - } - if err := config.LoadAndValidate(stopCtx); err != nil { - return nil, diag.FromErr(err) - } - - return ProviderDCLConfigure(d, &config), nil -} - -func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { - if v == nil || v.(string) == "" { - return - } - creds := v.(string) - // if this is a path and we can stat it, assume it's ok - if _, err := os.Stat(creds); err == nil { - return - } - if _, err := googleoauth.CredentialsFromJSON(context.Background(), []byte(creds)); err != nil { - errors = append(errors, - fmt.Errorf("JSON credentials are not valid: %s", err)) - } - - return -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/mtls_util.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/mtls_util.go similarity index 94% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/mtls_util.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/mtls_util.go index a5ef1d9332..256a3a2cc8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/mtls_util.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/mtls_util.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package provider import ( "context" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider.go new file mode 100644 index 0000000000..da2d0bcb75 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider.go @@ -0,0 +1,1840 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package provider + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/version" + + "github.com/hashicorp/terraform-provider-google/google/services/accessapproval" + "github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager" + "github.com/hashicorp/terraform-provider-google/google/services/activedirectory" + "github.com/hashicorp/terraform-provider-google/google/services/alloydb" + "github.com/hashicorp/terraform-provider-google/google/services/apigee" + "github.com/hashicorp/terraform-provider-google/google/services/appengine" + "github.com/hashicorp/terraform-provider-google/google/services/artifactregistry" + "github.com/hashicorp/terraform-provider-google/google/services/beyondcorp" + "github.com/hashicorp/terraform-provider-google/google/services/bigquery" + "github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub" + "github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection" + "github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy" + "github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer" + "github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation" + "github.com/hashicorp/terraform-provider-google/google/services/bigtable" + "github.com/hashicorp/terraform-provider-google/google/services/billing" + "github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization" + "github.com/hashicorp/terraform-provider-google/google/services/certificatemanager" + "github.com/hashicorp/terraform-provider-google/google/services/cloudasset" + "github.com/hashicorp/terraform-provider-google/google/services/cloudbuild" + "github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2" + "github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions" + "github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2" + "github.com/hashicorp/terraform-provider-google/google/services/cloudidentity" + "github.com/hashicorp/terraform-provider-google/google/services/cloudids" + "github.com/hashicorp/terraform-provider-google/google/services/cloudiot" + "github.com/hashicorp/terraform-provider-google/google/services/cloudrun" + "github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2" + "github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler" + "github.com/hashicorp/terraform-provider-google/google/services/cloudtasks" + "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/containeranalysis" + "github.com/hashicorp/terraform-provider-google/google/services/containerattached" + "github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice" + "github.com/hashicorp/terraform-provider-google/google/services/datacatalog" + "github.com/hashicorp/terraform-provider-google/google/services/datafusion" + "github.com/hashicorp/terraform-provider-google/google/services/datalossprevention" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + "github.com/hashicorp/terraform-provider-google/google/services/dataproc" + "github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore" + "github.com/hashicorp/terraform-provider-google/google/services/datastore" + "github.com/hashicorp/terraform-provider-google/google/services/datastream" + "github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager" + "github.com/hashicorp/terraform-provider-google/google/services/dialogflow" + "github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx" + "github.com/hashicorp/terraform-provider-google/google/services/dns" + "github.com/hashicorp/terraform-provider-google/google/services/documentai" + "github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts" + "github.com/hashicorp/terraform-provider-google/google/services/filestore" + "github.com/hashicorp/terraform-provider-google/google/services/firestore" + "github.com/hashicorp/terraform-provider-google/google/services/gameservices" + "github.com/hashicorp/terraform-provider-google/google/services/gkebackup" + "github.com/hashicorp/terraform-provider-google/google/services/gkehub" + "github.com/hashicorp/terraform-provider-google/google/services/gkehub2" + "github.com/hashicorp/terraform-provider-google/google/services/healthcare" + "github.com/hashicorp/terraform-provider-google/google/services/iam2" + "github.com/hashicorp/terraform-provider-google/google/services/iambeta" + "github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool" + "github.com/hashicorp/terraform-provider-google/google/services/iap" + "github.com/hashicorp/terraform-provider-google/google/services/identityplatform" + "github.com/hashicorp/terraform-provider-google/google/services/kms" + "github.com/hashicorp/terraform-provider-google/google/services/logging" + "github.com/hashicorp/terraform-provider-google/google/services/looker" + "github.com/hashicorp/terraform-provider-google/google/services/memcache" + "github.com/hashicorp/terraform-provider-google/google/services/mlengine" + "github.com/hashicorp/terraform-provider-google/google/services/monitoring" + "github.com/hashicorp/terraform-provider-google/google/services/networkmanagement" + "github.com/hashicorp/terraform-provider-google/google/services/networksecurity" + "github.com/hashicorp/terraform-provider-google/google/services/networkservices" + "github.com/hashicorp/terraform-provider-google/google/services/notebooks" + "github.com/hashicorp/terraform-provider-google/google/services/osconfig" + "github.com/hashicorp/terraform-provider-google/google/services/oslogin" + "github.com/hashicorp/terraform-provider-google/google/services/privateca" + "github.com/hashicorp/terraform-provider-google/google/services/publicca" + "github.com/hashicorp/terraform-provider-google/google/services/pubsub" + "github.com/hashicorp/terraform-provider-google/google/services/pubsublite" + "github.com/hashicorp/terraform-provider-google/google/services/redis" + "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" + "github.com/hashicorp/terraform-provider-google/google/services/secretmanager" + "github.com/hashicorp/terraform-provider-google/google/services/securitycenter" + "github.com/hashicorp/terraform-provider-google/google/services/servicemanagement" + "github.com/hashicorp/terraform-provider-google/google/services/sourcerepo" + "github.com/hashicorp/terraform-provider-google/google/services/spanner" + "github.com/hashicorp/terraform-provider-google/google/services/sql" + "github.com/hashicorp/terraform-provider-google/google/services/storage" + "github.com/hashicorp/terraform-provider-google/google/services/storagetransfer" + "github.com/hashicorp/terraform-provider-google/google/services/tags" + "github.com/hashicorp/terraform-provider-google/google/services/tpu" + "github.com/hashicorp/terraform-provider-google/google/services/vertexai" + "github.com/hashicorp/terraform-provider-google/google/services/vpcaccess" + "github.com/hashicorp/terraform-provider-google/google/services/workflows" + + "github.com/hashicorp/terraform-provider-google/google/services/composer" + "github.com/hashicorp/terraform-provider-google/google/services/container" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + "github.com/hashicorp/terraform-provider-google/google/services/dataflow" + "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + googleoauth "golang.org/x/oauth2/google" +) + +// Provider returns a *schema.Provider. +func Provider() *schema.Provider { + + // The mtls service client gives the type of endpoint (mtls/regular) + // at client creation. Since we use a shared client for requests we must + // rewrite the endpoints to be mtls endpoints for the scenario where + // mtls is enabled. + if isMtls() { + // if mtls is enabled switch all default endpoints to use the mtls endpoint + for key, bp := range transport_tpg.DefaultBasePaths { + transport_tpg.DefaultBasePaths[key] = getMtlsEndpoint(bp) + } + } + + provider := &schema.Provider{ + Schema: map[string]*schema.Schema{ + "credentials": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateCredentials, + ConflictsWith: []string{"access_token"}, + }, + + "access_token": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"credentials"}, + }, + + "impersonate_service_account": { + Type: schema.TypeString, + Optional: true, + }, + + "impersonate_service_account_delegates": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + }, + + "billing_project": { + Type: schema.TypeString, + Optional: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + }, + + "scopes": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "batching": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "send_after": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateNonNegativeDuration(), + }, + "enable_batching": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + + "user_project_override": { + Type: schema.TypeBool, + Optional: true, + }, + + "request_timeout": { + Type: schema.TypeString, + Optional: true, + }, + + "request_reason": { + Type: schema.TypeString, + Optional: true, + }, + + // Generated Products + "access_approval_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "access_context_manager_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "active_directory_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "alloydb_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "apigee_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "app_engine_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "artifact_registry_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "beyondcorp_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "big_query_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "bigquery_analytics_hub_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "bigquery_connection_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "bigquery_datapolicy_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "bigquery_data_transfer_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "bigquery_reservation_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "bigtable_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "billing_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "binary_authorization_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "certificate_manager_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_asset_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_build_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloudbuildv2_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_functions_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloudfunctions2_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_identity_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_ids_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_iot_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_run_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_run_v2_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_scheduler_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "cloud_tasks_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "compute_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "container_analysis_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "container_attached_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "database_migration_service_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "data_catalog_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "data_fusion_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "data_loss_prevention_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "dataplex_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "dataproc_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "dataproc_metastore_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "datastore_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "datastream_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "deployment_manager_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "dialogflow_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "dialogflow_cx_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "dns_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "document_ai_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "essential_contacts_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "filestore_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "firestore_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "game_services_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "gke_backup_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "gke_hub_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "gke_hub2_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "healthcare_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "iam2_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "iam_beta_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "iam_workforce_pool_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "iap_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "identity_platform_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "kms_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "logging_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "looker_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "memcache_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "ml_engine_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "monitoring_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "network_management_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "network_security_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "network_services_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "notebooks_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "os_config_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "os_login_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "privateca_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "public_ca_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "pubsub_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "pubsub_lite_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "redis_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "resource_manager_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "secret_manager_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "security_center_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "service_management_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "service_usage_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "source_repo_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "spanner_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "sql_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "storage_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "storage_transfer_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "tags_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "tpu_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "vertex_ai_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "vpc_access_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + "workflows_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + + // Handwritten Products / Versioned / Atypical Entries + transport_tpg.CloudBillingCustomEndpointEntryKey: transport_tpg.CloudBillingCustomEndpointEntry, + transport_tpg.ComposerCustomEndpointEntryKey: transport_tpg.ComposerCustomEndpointEntry, + transport_tpg.ContainerCustomEndpointEntryKey: transport_tpg.ContainerCustomEndpointEntry, + transport_tpg.DataflowCustomEndpointEntryKey: transport_tpg.DataflowCustomEndpointEntry, + transport_tpg.IamCredentialsCustomEndpointEntryKey: transport_tpg.IamCredentialsCustomEndpointEntry, + transport_tpg.ResourceManagerV3CustomEndpointEntryKey: transport_tpg.ResourceManagerV3CustomEndpointEntry, + transport_tpg.IAMCustomEndpointEntryKey: transport_tpg.IAMCustomEndpointEntry, + transport_tpg.ServiceNetworkingCustomEndpointEntryKey: transport_tpg.ServiceNetworkingCustomEndpointEntry, + transport_tpg.TagsLocationCustomEndpointEntryKey: transport_tpg.TagsLocationCustomEndpointEntry, + + // dcl + transport_tpg.ContainerAwsCustomEndpointEntryKey: transport_tpg.ContainerAwsCustomEndpointEntry, + transport_tpg.ContainerAzureCustomEndpointEntryKey: transport_tpg.ContainerAzureCustomEndpointEntry, + }, + + ProviderMetaSchema: map[string]*schema.Schema{ + "module_name": { + Type: schema.TypeString, + Optional: true, + }, + }, + + DataSourcesMap: DatasourceMap(), + ResourcesMap: ResourceMap(), + } + + provider.ConfigureContextFunc = func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { + return providerConfigure(ctx, d, provider) + } + + transport_tpg.ConfigureDCLProvider(provider) + + return provider +} + +func DatasourceMap() map[string]*schema.Resource { + datasourceMap, _ := DatasourceMapWithErrors() + return datasourceMap +} + +func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { + return mergeResourceMaps(map[string]*schema.Resource{ + // ####### START handwritten datasources ########### + // ####### START datasources ########### + "google_access_approval_folder_service_account": accessapproval.DataSourceAccessApprovalFolderServiceAccount(), + "google_access_approval_organization_service_account": accessapproval.DataSourceAccessApprovalOrganizationServiceAccount(), + "google_access_approval_project_service_account": accessapproval.DataSourceAccessApprovalProjectServiceAccount(), + "google_active_folder": resourcemanager.DataSourceGoogleActiveFolder(), + "google_alloydb_locations": alloydb.DataSourceAlloydbLocations(), + "google_alloydb_supported_database_flags": alloydb.DataSourceAlloydbSupportedDatabaseFlags(), + "google_artifact_registry_repository": artifactregistry.DataSourceArtifactRegistryRepository(), + "google_app_engine_default_service_account": appengine.DataSourceGoogleAppEngineDefaultServiceAccount(), + "google_beyondcorp_app_connection": beyondcorp.DataSourceGoogleBeyondcorpAppConnection(), + "google_beyondcorp_app_connector": beyondcorp.DataSourceGoogleBeyondcorpAppConnector(), + "google_beyondcorp_app_gateway": beyondcorp.DataSourceGoogleBeyondcorpAppGateway(), + "google_billing_account": billing.DataSourceGoogleBillingAccount(), + "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), + "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), + "google_cloudfunctions_function": cloudfunctions.DataSourceGoogleCloudFunctionsFunction(), + "google_cloudfunctions2_function": cloudfunctions2.DataSourceGoogleCloudFunctions2Function(), + "google_cloud_identity_groups": cloudidentity.DataSourceGoogleCloudIdentityGroups(), + "google_cloud_identity_group_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupMemberships(), + "google_cloud_run_locations": cloudrun.DataSourceGoogleCloudRunLocations(), + "google_cloud_run_service": cloudrun.DataSourceGoogleCloudRunService(), + "google_composer_environment": composer.DataSourceGoogleComposerEnvironment(), + "google_composer_image_versions": composer.DataSourceGoogleComposerImageVersions(), + "google_compute_address": compute.DataSourceGoogleComputeAddress(), + "google_compute_addresses": compute.DataSourceGoogleComputeAddresses(), + "google_compute_backend_service": compute.DataSourceGoogleComputeBackendService(), + "google_compute_backend_bucket": compute.DataSourceGoogleComputeBackendBucket(), + "google_compute_default_service_account": compute.DataSourceGoogleComputeDefaultServiceAccount(), + "google_compute_disk": compute.DataSourceGoogleComputeDisk(), + "google_compute_forwarding_rule": compute.DataSourceGoogleComputeForwardingRule(), + "google_compute_global_address": compute.DataSourceGoogleComputeGlobalAddress(), + "google_compute_global_forwarding_rule": compute.DataSourceGoogleComputeGlobalForwardingRule(), + "google_compute_ha_vpn_gateway": compute.DataSourceGoogleComputeHaVpnGateway(), + "google_compute_health_check": compute.DataSourceGoogleComputeHealthCheck(), + "google_compute_image": compute.DataSourceGoogleComputeImage(), + "google_compute_instance": compute.DataSourceGoogleComputeInstance(), + "google_compute_instance_group": compute.DataSourceGoogleComputeInstanceGroup(), + "google_compute_instance_group_manager": compute.DataSourceGoogleComputeInstanceGroupManager(), + "google_compute_instance_serial_port": compute.DataSourceGoogleComputeInstanceSerialPort(), + "google_compute_instance_template": compute.DataSourceGoogleComputeInstanceTemplate(), + "google_compute_lb_ip_ranges": compute.DataSourceGoogleComputeLbIpRanges(), + "google_compute_network": compute.DataSourceGoogleComputeNetwork(), + "google_compute_network_endpoint_group": compute.DataSourceGoogleComputeNetworkEndpointGroup(), + "google_compute_network_peering": compute.DataSourceComputeNetworkPeering(), + "google_compute_node_types": compute.DataSourceGoogleComputeNodeTypes(), + "google_compute_regions": compute.DataSourceGoogleComputeRegions(), + "google_compute_region_network_endpoint_group": compute.DataSourceGoogleComputeRegionNetworkEndpointGroup(), + "google_compute_region_instance_group": compute.DataSourceGoogleComputeRegionInstanceGroup(), + "google_compute_region_ssl_certificate": compute.DataSourceGoogleRegionComputeSslCertificate(), + "google_compute_resource_policy": compute.DataSourceGoogleComputeResourcePolicy(), + "google_compute_router": compute.DataSourceGoogleComputeRouter(), + "google_compute_router_nat": compute.DataSourceGoogleComputeRouterNat(), + "google_compute_router_status": compute.DataSourceGoogleComputeRouterStatus(), + "google_compute_snapshot": compute.DataSourceGoogleComputeSnapshot(), + "google_compute_ssl_certificate": compute.DataSourceGoogleComputeSslCertificate(), + "google_compute_ssl_policy": compute.DataSourceGoogleComputeSslPolicy(), + "google_compute_subnetwork": compute.DataSourceGoogleComputeSubnetwork(), + "google_compute_vpn_gateway": compute.DataSourceGoogleComputeVpnGateway(), + "google_compute_zones": compute.DataSourceGoogleComputeZones(), + "google_container_azure_versions": containerazure.DataSourceGoogleContainerAzureVersions(), + "google_container_aws_versions": containeraws.DataSourceGoogleContainerAwsVersions(), + "google_container_attached_versions": containerattached.DataSourceGoogleContainerAttachedVersions(), + "google_container_attached_install_manifest": containerattached.DataSourceGoogleContainerAttachedInstallManifest(), + "google_container_cluster": container.DataSourceGoogleContainerCluster(), + "google_container_engine_versions": container.DataSourceGoogleContainerEngineVersions(), + "google_container_registry_image": containeranalysis.DataSourceGoogleContainerImage(), + "google_container_registry_repository": containeranalysis.DataSourceGoogleContainerRepo(), + "google_dataproc_metastore_service": dataprocmetastore.DataSourceDataprocMetastoreService(), + "google_datastream_static_ips": datastream.DataSourceGoogleDatastreamStaticIps(), + "google_game_services_game_server_deployment_rollout": gameservices.DataSourceGameServicesGameServerDeploymentRollout(), + "google_iam_policy": resourcemanager.DataSourceGoogleIamPolicy(), + "google_iam_role": resourcemanager.DataSourceGoogleIamRole(), + "google_iam_testable_permissions": resourcemanager.DataSourceGoogleIamTestablePermissions(), + "google_iap_client": iap.DataSourceGoogleIapClient(), + "google_kms_crypto_key": kms.DataSourceGoogleKmsCryptoKey(), + "google_kms_crypto_key_version": kms.DataSourceGoogleKmsCryptoKeyVersion(), + "google_kms_key_ring": kms.DataSourceGoogleKmsKeyRing(), + "google_kms_secret": kms.DataSourceGoogleKmsSecret(), + "google_kms_secret_ciphertext": kms.DataSourceGoogleKmsSecretCiphertext(), + "google_folder": resourcemanager.DataSourceGoogleFolder(), + "google_folders": resourcemanager.DataSourceGoogleFolders(), + "google_folder_organization_policy": resourcemanager.DataSourceGoogleFolderOrganizationPolicy(), + "google_logging_project_cmek_settings": logging.DataSourceGoogleLoggingProjectCmekSettings(), + "google_logging_sink": logging.DataSourceGoogleLoggingSink(), + "google_monitoring_notification_channel": monitoring.DataSourceMonitoringNotificationChannel(), + "google_monitoring_cluster_istio_service": monitoring.DataSourceMonitoringServiceClusterIstio(), + "google_monitoring_istio_canonical_service": monitoring.DataSourceMonitoringIstioCanonicalService(), + "google_monitoring_mesh_istio_service": monitoring.DataSourceMonitoringServiceMeshIstio(), + "google_monitoring_app_engine_service": monitoring.DataSourceMonitoringServiceAppEngine(), + "google_monitoring_uptime_check_ips": monitoring.DataSourceGoogleMonitoringUptimeCheckIps(), + "google_netblock_ip_ranges": resourcemanager.DataSourceGoogleNetblockIpRanges(), + "google_organization": resourcemanager.DataSourceGoogleOrganization(), + "google_privateca_certificate_authority": privateca.DataSourcePrivatecaCertificateAuthority(), + "google_project": resourcemanager.DataSourceGoogleProject(), + "google_projects": resourcemanager.DataSourceGoogleProjects(), + "google_project_organization_policy": resourcemanager.DataSourceGoogleProjectOrganizationPolicy(), + "google_project_service": resourcemanager.DataSourceGoogleProjectService(), + "google_pubsub_subscription": pubsub.DataSourceGooglePubsubSubscription(), + "google_pubsub_topic": pubsub.DataSourceGooglePubsubTopic(), + "google_secret_manager_secret": secretmanager.DataSourceSecretManagerSecret(), + "google_secret_manager_secret_version": secretmanager.DataSourceSecretManagerSecretVersion(), + "google_secret_manager_secret_version_access": secretmanager.DataSourceSecretManagerSecretVersionAccess(), + "google_service_account": resourcemanager.DataSourceGoogleServiceAccount(), + "google_service_account_access_token": resourcemanager.DataSourceGoogleServiceAccountAccessToken(), + "google_service_account_id_token": resourcemanager.DataSourceGoogleServiceAccountIdToken(), + "google_service_account_jwt": resourcemanager.DataSourceGoogleServiceAccountJwt(), + "google_service_account_key": resourcemanager.DataSourceGoogleServiceAccountKey(), + "google_sourcerepo_repository": sourcerepo.DataSourceGoogleSourceRepoRepository(), + "google_spanner_instance": spanner.DataSourceSpannerInstance(), + "google_sql_ca_certs": sql.DataSourceGoogleSQLCaCerts(), + "google_sql_tiers": sql.DataSourceGoogleSQLTiers(), + "google_sql_backup_run": sql.DataSourceSqlBackupRun(), + "google_sql_databases": sql.DataSourceSqlDatabases(), + "google_sql_database": sql.DataSourceSqlDatabase(), + "google_sql_database_instance": sql.DataSourceSqlDatabaseInstance(), + "google_sql_database_instances": sql.DataSourceSqlDatabaseInstances(), + "google_service_networking_peered_dns_domain": servicenetworking.DataSourceGoogleServiceNetworkingPeeredDNSDomain(), + "google_storage_bucket": storage.DataSourceGoogleStorageBucket(), + "google_storage_bucket_object": storage.DataSourceGoogleStorageBucketObject(), + "google_storage_bucket_object_content": storage.DataSourceGoogleStorageBucketObjectContent(), + "google_storage_object_signed_url": storage.DataSourceGoogleSignedUrl(), + "google_storage_project_service_account": storage.DataSourceGoogleStorageProjectServiceAccount(), + "google_storage_transfer_project_service_account": storagetransfer.DataSourceGoogleStorageTransferProjectServiceAccount(), + "google_tags_tag_key": tags.DataSourceGoogleTagsTagKey(), + "google_tags_tag_value": tags.DataSourceGoogleTagsTagValue(), + "google_tpu_tensorflow_versions": tpu.DataSourceTpuTensorflowVersions(), + "google_vpc_access_connector": vpcaccess.DataSourceVPCAccessConnector(), + "google_redis_instance": redis.DataSourceGoogleRedisInstance(), + "google_vertex_ai_index": vertexai.DataSourceVertexAIIndex(), + // ####### END datasources ########### + // ####### END handwritten datasources ########### + }, + map[string]*schema.Resource{ + // ####### START generated IAM datasources ########### + "google_access_context_manager_access_policy_iam_policy": tpgiamresource.DataSourceIamPolicy(accesscontextmanager.AccessContextManagerAccessPolicyIamSchema, accesscontextmanager.AccessContextManagerAccessPolicyIamUpdaterProducer), + "google_apigee_environment_iam_policy": tpgiamresource.DataSourceIamPolicy(apigee.ApigeeEnvironmentIamSchema, apigee.ApigeeEnvironmentIamUpdaterProducer), + "google_artifact_registry_repository_iam_policy": tpgiamresource.DataSourceIamPolicy(artifactregistry.ArtifactRegistryRepositoryIamSchema, artifactregistry.ArtifactRegistryRepositoryIamUpdaterProducer), + "google_bigquery_table_iam_policy": tpgiamresource.DataSourceIamPolicy(bigquery.BigQueryTableIamSchema, bigquery.BigQueryTableIamUpdaterProducer), + "google_bigquery_analytics_hub_data_exchange_iam_policy": tpgiamresource.DataSourceIamPolicy(bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIamSchema, bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIamUpdaterProducer), + "google_bigquery_analytics_hub_listing_iam_policy": tpgiamresource.DataSourceIamPolicy(bigqueryanalyticshub.BigqueryAnalyticsHubListingIamSchema, bigqueryanalyticshub.BigqueryAnalyticsHubListingIamUpdaterProducer), + "google_bigquery_connection_iam_policy": tpgiamresource.DataSourceIamPolicy(bigqueryconnection.BigqueryConnectionConnectionIamSchema, bigqueryconnection.BigqueryConnectionConnectionIamUpdaterProducer), + "google_bigquery_datapolicy_data_policy_iam_policy": tpgiamresource.DataSourceIamPolicy(bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamSchema, bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamUpdaterProducer), + "google_binary_authorization_attestor_iam_policy": tpgiamresource.DataSourceIamPolicy(binaryauthorization.BinaryAuthorizationAttestorIamSchema, binaryauthorization.BinaryAuthorizationAttestorIamUpdaterProducer), + "google_cloudbuildv2_connection_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudbuildv2.Cloudbuildv2ConnectionIamSchema, cloudbuildv2.Cloudbuildv2ConnectionIamUpdaterProducer), + "google_cloudfunctions_function_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudfunctions.CloudFunctionsCloudFunctionIamSchema, cloudfunctions.CloudFunctionsCloudFunctionIamUpdaterProducer), + "google_cloudfunctions2_function_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudfunctions2.Cloudfunctions2functionIamSchema, cloudfunctions2.Cloudfunctions2functionIamUpdaterProducer), + "google_cloudiot_registry_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudiot.CloudIotDeviceRegistryIamSchema, cloudiot.CloudIotDeviceRegistryIamUpdaterProducer), + "google_cloud_run_service_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudrun.CloudRunServiceIamSchema, cloudrun.CloudRunServiceIamUpdaterProducer), + "google_cloud_run_v2_job_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudrunv2.CloudRunV2JobIamSchema, cloudrunv2.CloudRunV2JobIamUpdaterProducer), + "google_cloud_run_v2_service_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudrunv2.CloudRunV2ServiceIamSchema, cloudrunv2.CloudRunV2ServiceIamUpdaterProducer), + "google_cloud_tasks_queue_iam_policy": tpgiamresource.DataSourceIamPolicy(cloudtasks.CloudTasksQueueIamSchema, cloudtasks.CloudTasksQueueIamUpdaterProducer), + "google_compute_disk_iam_policy": tpgiamresource.DataSourceIamPolicy(compute.ComputeDiskIamSchema, compute.ComputeDiskIamUpdaterProducer), + "google_compute_image_iam_policy": tpgiamresource.DataSourceIamPolicy(compute.ComputeImageIamSchema, compute.ComputeImageIamUpdaterProducer), + "google_compute_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(compute.ComputeInstanceIamSchema, compute.ComputeInstanceIamUpdaterProducer), + "google_compute_region_disk_iam_policy": tpgiamresource.DataSourceIamPolicy(compute.ComputeRegionDiskIamSchema, compute.ComputeRegionDiskIamUpdaterProducer), + "google_compute_snapshot_iam_policy": tpgiamresource.DataSourceIamPolicy(compute.ComputeSnapshotIamSchema, compute.ComputeSnapshotIamUpdaterProducer), + "google_compute_subnetwork_iam_policy": tpgiamresource.DataSourceIamPolicy(compute.ComputeSubnetworkIamSchema, compute.ComputeSubnetworkIamUpdaterProducer), + "google_container_analysis_note_iam_policy": tpgiamresource.DataSourceIamPolicy(containeranalysis.ContainerAnalysisNoteIamSchema, containeranalysis.ContainerAnalysisNoteIamUpdaterProducer), + "google_data_catalog_entry_group_iam_policy": tpgiamresource.DataSourceIamPolicy(datacatalog.DataCatalogEntryGroupIamSchema, datacatalog.DataCatalogEntryGroupIamUpdaterProducer), + "google_data_catalog_policy_tag_iam_policy": tpgiamresource.DataSourceIamPolicy(datacatalog.DataCatalogPolicyTagIamSchema, datacatalog.DataCatalogPolicyTagIamUpdaterProducer), + "google_data_catalog_tag_template_iam_policy": tpgiamresource.DataSourceIamPolicy(datacatalog.DataCatalogTagTemplateIamSchema, datacatalog.DataCatalogTagTemplateIamUpdaterProducer), + "google_data_catalog_taxonomy_iam_policy": tpgiamresource.DataSourceIamPolicy(datacatalog.DataCatalogTaxonomyIamSchema, datacatalog.DataCatalogTaxonomyIamUpdaterProducer), + "google_data_fusion_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(datafusion.DataFusionInstanceIamSchema, datafusion.DataFusionInstanceIamUpdaterProducer), + "google_dataplex_asset_iam_policy": tpgiamresource.DataSourceIamPolicy(dataplex.DataplexAssetIamSchema, dataplex.DataplexAssetIamUpdaterProducer), + "google_dataplex_datascan_iam_policy": tpgiamresource.DataSourceIamPolicy(dataplex.DataplexDatascanIamSchema, dataplex.DataplexDatascanIamUpdaterProducer), + "google_dataplex_lake_iam_policy": tpgiamresource.DataSourceIamPolicy(dataplex.DataplexLakeIamSchema, dataplex.DataplexLakeIamUpdaterProducer), + "google_dataplex_zone_iam_policy": tpgiamresource.DataSourceIamPolicy(dataplex.DataplexZoneIamSchema, dataplex.DataplexZoneIamUpdaterProducer), + "google_dataproc_autoscaling_policy_iam_policy": tpgiamresource.DataSourceIamPolicy(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer), + "google_dataproc_metastore_service_iam_policy": tpgiamresource.DataSourceIamPolicy(dataprocmetastore.DataprocMetastoreServiceIamSchema, dataprocmetastore.DataprocMetastoreServiceIamUpdaterProducer), + "google_dns_managed_zone_iam_policy": tpgiamresource.DataSourceIamPolicy(dns.DNSManagedZoneIamSchema, dns.DNSManagedZoneIamUpdaterProducer), + "google_gke_backup_backup_plan_iam_policy": tpgiamresource.DataSourceIamPolicy(gkebackup.GKEBackupBackupPlanIamSchema, gkebackup.GKEBackupBackupPlanIamUpdaterProducer), + "google_gke_hub_membership_iam_policy": tpgiamresource.DataSourceIamPolicy(gkehub.GKEHubMembershipIamSchema, gkehub.GKEHubMembershipIamUpdaterProducer), + "google_gke_hub_feature_iam_policy": tpgiamresource.DataSourceIamPolicy(gkehub2.GKEHub2FeatureIamSchema, gkehub2.GKEHub2FeatureIamUpdaterProducer), + "google_healthcare_consent_store_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.HealthcareConsentStoreIamSchema, healthcare.HealthcareConsentStoreIamUpdaterProducer), + "google_iap_app_engine_service_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapAppEngineServiceIamSchema, iap.IapAppEngineServiceIamUpdaterProducer), + "google_iap_app_engine_version_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapAppEngineVersionIamSchema, iap.IapAppEngineVersionIamUpdaterProducer), + "google_iap_tunnel_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapTunnelIamSchema, iap.IapTunnelIamUpdaterProducer), + "google_iap_tunnel_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapTunnelInstanceIamSchema, iap.IapTunnelInstanceIamUpdaterProducer), + "google_iap_web_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapWebIamSchema, iap.IapWebIamUpdaterProducer), + "google_iap_web_backend_service_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapWebBackendServiceIamSchema, iap.IapWebBackendServiceIamUpdaterProducer), + "google_iap_web_type_app_engine_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapWebTypeAppEngineIamSchema, iap.IapWebTypeAppEngineIamUpdaterProducer), + "google_iap_web_type_compute_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapWebTypeComputeIamSchema, iap.IapWebTypeComputeIamUpdaterProducer), + "google_notebooks_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(notebooks.NotebooksInstanceIamSchema, notebooks.NotebooksInstanceIamUpdaterProducer), + "google_notebooks_runtime_iam_policy": tpgiamresource.DataSourceIamPolicy(notebooks.NotebooksRuntimeIamSchema, notebooks.NotebooksRuntimeIamUpdaterProducer), + "google_privateca_ca_pool_iam_policy": tpgiamresource.DataSourceIamPolicy(privateca.PrivatecaCaPoolIamSchema, privateca.PrivatecaCaPoolIamUpdaterProducer), + "google_privateca_certificate_template_iam_policy": tpgiamresource.DataSourceIamPolicy(privateca.PrivatecaCertificateTemplateIamSchema, privateca.PrivatecaCertificateTemplateIamUpdaterProducer), + "google_pubsub_topic_iam_policy": tpgiamresource.DataSourceIamPolicy(pubsub.PubsubTopicIamSchema, pubsub.PubsubTopicIamUpdaterProducer), + "google_secret_manager_secret_iam_policy": tpgiamresource.DataSourceIamPolicy(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer), + "google_scc_source_iam_policy": tpgiamresource.DataSourceIamPolicy(securitycenter.SecurityCenterSourceIamSchema, securitycenter.SecurityCenterSourceIamUpdaterProducer), + "google_endpoints_service_iam_policy": tpgiamresource.DataSourceIamPolicy(servicemanagement.ServiceManagementServiceIamSchema, servicemanagement.ServiceManagementServiceIamUpdaterProducer), + "google_endpoints_service_consumers_iam_policy": tpgiamresource.DataSourceIamPolicy(servicemanagement.ServiceManagementServiceConsumersIamSchema, servicemanagement.ServiceManagementServiceConsumersIamUpdaterProducer), + "google_sourcerepo_repository_iam_policy": tpgiamresource.DataSourceIamPolicy(sourcerepo.SourceRepoRepositoryIamSchema, sourcerepo.SourceRepoRepositoryIamUpdaterProducer), + "google_storage_bucket_iam_policy": tpgiamresource.DataSourceIamPolicy(storage.StorageBucketIamSchema, storage.StorageBucketIamUpdaterProducer), + "google_tags_tag_key_iam_policy": tpgiamresource.DataSourceIamPolicy(tags.TagsTagKeyIamSchema, tags.TagsTagKeyIamUpdaterProducer), + "google_tags_tag_value_iam_policy": tpgiamresource.DataSourceIamPolicy(tags.TagsTagValueIamSchema, tags.TagsTagValueIamUpdaterProducer), + // ####### END generated IAM datasources ########### + }, + map[string]*schema.Resource{ + // ####### START non-generated IAM datasources ########### + "google_bigtable_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater), + "google_bigtable_table_iam_policy": tpgiamresource.DataSourceIamPolicy(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater), + "google_bigquery_dataset_iam_policy": tpgiamresource.DataSourceIamPolicy(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater), + "google_billing_account_iam_policy": tpgiamresource.DataSourceIamPolicy(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater), + "google_dataproc_cluster_iam_policy": tpgiamresource.DataSourceIamPolicy(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater), + "google_dataproc_job_iam_policy": tpgiamresource.DataSourceIamPolicy(dataproc.IamDataprocJobSchema, dataproc.NewDataprocJobUpdater), + "google_folder_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater), + "google_healthcare_dataset_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater), + "google_healthcare_dicom_store_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareDicomStoreSchema, healthcare.NewHealthcareDicomStoreIamUpdater), + "google_healthcare_fhir_store_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareFhirStoreSchema, healthcare.NewHealthcareFhirStoreIamUpdater), + "google_healthcare_hl7_v2_store_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareHl7V2StoreSchema, healthcare.NewHealthcareHl7V2StoreIamUpdater), + "google_kms_key_ring_iam_policy": tpgiamresource.DataSourceIamPolicy(kms.IamKmsKeyRingSchema, kms.NewKmsKeyRingIamUpdater), + "google_kms_crypto_key_iam_policy": tpgiamresource.DataSourceIamPolicy(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater), + "google_spanner_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater), + "google_spanner_database_iam_policy": tpgiamresource.DataSourceIamPolicy(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater), + "google_organization_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater), + "google_project_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater), + "google_pubsub_subscription_iam_policy": tpgiamresource.DataSourceIamPolicy(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater), + "google_service_account_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater), + // ####### END non-generated IAM datasources ########### + }) +} + +// Generated resources: 301 +// Generated IAM resources: 198 +// Total generated resources: 499 +func ResourceMap() map[string]*schema.Resource { + resourceMap, _ := ResourceMapWithErrors() + return resourceMap +} + +func ResourceMapWithErrors() (map[string]*schema.Resource, error) { + return mergeResourceMaps( + map[string]*schema.Resource{ + "google_folder_access_approval_settings": accessapproval.ResourceAccessApprovalFolderSettings(), + "google_organization_access_approval_settings": accessapproval.ResourceAccessApprovalOrganizationSettings(), + "google_project_access_approval_settings": accessapproval.ResourceAccessApprovalProjectSettings(), + "google_access_context_manager_access_level": accesscontextmanager.ResourceAccessContextManagerAccessLevel(), + "google_access_context_manager_access_level_condition": accesscontextmanager.ResourceAccessContextManagerAccessLevelCondition(), + "google_access_context_manager_access_levels": accesscontextmanager.ResourceAccessContextManagerAccessLevels(), + "google_access_context_manager_access_policy": accesscontextmanager.ResourceAccessContextManagerAccessPolicy(), + "google_access_context_manager_access_policy_iam_binding": tpgiamresource.ResourceIamBinding(accesscontextmanager.AccessContextManagerAccessPolicyIamSchema, accesscontextmanager.AccessContextManagerAccessPolicyIamUpdaterProducer, accesscontextmanager.AccessContextManagerAccessPolicyIdParseFunc), + "google_access_context_manager_access_policy_iam_member": tpgiamresource.ResourceIamMember(accesscontextmanager.AccessContextManagerAccessPolicyIamSchema, accesscontextmanager.AccessContextManagerAccessPolicyIamUpdaterProducer, accesscontextmanager.AccessContextManagerAccessPolicyIdParseFunc), + "google_access_context_manager_access_policy_iam_policy": tpgiamresource.ResourceIamPolicy(accesscontextmanager.AccessContextManagerAccessPolicyIamSchema, accesscontextmanager.AccessContextManagerAccessPolicyIamUpdaterProducer, accesscontextmanager.AccessContextManagerAccessPolicyIdParseFunc), + "google_access_context_manager_authorized_orgs_desc": accesscontextmanager.ResourceAccessContextManagerAuthorizedOrgsDesc(), + "google_access_context_manager_egress_policy": accesscontextmanager.ResourceAccessContextManagerEgressPolicy(), + "google_access_context_manager_gcp_user_access_binding": accesscontextmanager.ResourceAccessContextManagerGcpUserAccessBinding(), + "google_access_context_manager_ingress_policy": accesscontextmanager.ResourceAccessContextManagerIngressPolicy(), + "google_access_context_manager_service_perimeter": accesscontextmanager.ResourceAccessContextManagerServicePerimeter(), + "google_access_context_manager_service_perimeter_egress_policy": accesscontextmanager.ResourceAccessContextManagerServicePerimeterEgressPolicy(), + "google_access_context_manager_service_perimeter_ingress_policy": accesscontextmanager.ResourceAccessContextManagerServicePerimeterIngressPolicy(), + "google_access_context_manager_service_perimeter_resource": accesscontextmanager.ResourceAccessContextManagerServicePerimeterResource(), + "google_access_context_manager_service_perimeters": accesscontextmanager.ResourceAccessContextManagerServicePerimeters(), + "google_active_directory_domain": activedirectory.ResourceActiveDirectoryDomain(), + "google_active_directory_domain_trust": activedirectory.ResourceActiveDirectoryDomainTrust(), + "google_alloydb_backup": alloydb.ResourceAlloydbBackup(), + "google_alloydb_cluster": alloydb.ResourceAlloydbCluster(), + "google_alloydb_instance": alloydb.ResourceAlloydbInstance(), + "google_apigee_addons_config": apigee.ResourceApigeeAddonsConfig(), + "google_apigee_endpoint_attachment": apigee.ResourceApigeeEndpointAttachment(), + "google_apigee_env_keystore": apigee.ResourceApigeeEnvKeystore(), + "google_apigee_env_references": apigee.ResourceApigeeEnvReferences(), + "google_apigee_envgroup": apigee.ResourceApigeeEnvgroup(), + "google_apigee_envgroup_attachment": apigee.ResourceApigeeEnvgroupAttachment(), + "google_apigee_environment": apigee.ResourceApigeeEnvironment(), + "google_apigee_environment_iam_binding": tpgiamresource.ResourceIamBinding(apigee.ApigeeEnvironmentIamSchema, apigee.ApigeeEnvironmentIamUpdaterProducer, apigee.ApigeeEnvironmentIdParseFunc), + "google_apigee_environment_iam_member": tpgiamresource.ResourceIamMember(apigee.ApigeeEnvironmentIamSchema, apigee.ApigeeEnvironmentIamUpdaterProducer, apigee.ApigeeEnvironmentIdParseFunc), + "google_apigee_environment_iam_policy": tpgiamresource.ResourceIamPolicy(apigee.ApigeeEnvironmentIamSchema, apigee.ApigeeEnvironmentIamUpdaterProducer, apigee.ApigeeEnvironmentIdParseFunc), + "google_apigee_instance": apigee.ResourceApigeeInstance(), + "google_apigee_instance_attachment": apigee.ResourceApigeeInstanceAttachment(), + "google_apigee_keystores_aliases_self_signed_cert": apigee.ResourceApigeeKeystoresAliasesSelfSignedCert(), + "google_apigee_nat_address": apigee.ResourceApigeeNatAddress(), + "google_apigee_organization": apigee.ResourceApigeeOrganization(), + "google_apigee_sync_authorization": apigee.ResourceApigeeSyncAuthorization(), + "google_app_engine_application_url_dispatch_rules": appengine.ResourceAppEngineApplicationUrlDispatchRules(), + "google_app_engine_domain_mapping": appengine.ResourceAppEngineDomainMapping(), + "google_app_engine_firewall_rule": appengine.ResourceAppEngineFirewallRule(), + "google_app_engine_flexible_app_version": appengine.ResourceAppEngineFlexibleAppVersion(), + "google_app_engine_service_network_settings": appengine.ResourceAppEngineServiceNetworkSettings(), + "google_app_engine_service_split_traffic": appengine.ResourceAppEngineServiceSplitTraffic(), + "google_app_engine_standard_app_version": appengine.ResourceAppEngineStandardAppVersion(), + "google_artifact_registry_repository": artifactregistry.ResourceArtifactRegistryRepository(), + "google_artifact_registry_repository_iam_binding": tpgiamresource.ResourceIamBinding(artifactregistry.ArtifactRegistryRepositoryIamSchema, artifactregistry.ArtifactRegistryRepositoryIamUpdaterProducer, artifactregistry.ArtifactRegistryRepositoryIdParseFunc), + "google_artifact_registry_repository_iam_member": tpgiamresource.ResourceIamMember(artifactregistry.ArtifactRegistryRepositoryIamSchema, artifactregistry.ArtifactRegistryRepositoryIamUpdaterProducer, artifactregistry.ArtifactRegistryRepositoryIdParseFunc), + "google_artifact_registry_repository_iam_policy": tpgiamresource.ResourceIamPolicy(artifactregistry.ArtifactRegistryRepositoryIamSchema, artifactregistry.ArtifactRegistryRepositoryIamUpdaterProducer, artifactregistry.ArtifactRegistryRepositoryIdParseFunc), + "google_beyondcorp_app_connection": beyondcorp.ResourceBeyondcorpAppConnection(), + "google_beyondcorp_app_connector": beyondcorp.ResourceBeyondcorpAppConnector(), + "google_beyondcorp_app_gateway": beyondcorp.ResourceBeyondcorpAppGateway(), + "google_bigquery_dataset": bigquery.ResourceBigQueryDataset(), + "google_bigquery_dataset_access": bigquery.ResourceBigQueryDatasetAccess(), + "google_bigquery_job": bigquery.ResourceBigQueryJob(), + "google_bigquery_routine": bigquery.ResourceBigQueryRoutine(), + "google_bigquery_table_iam_binding": tpgiamresource.ResourceIamBinding(bigquery.BigQueryTableIamSchema, bigquery.BigQueryTableIamUpdaterProducer, bigquery.BigQueryTableIdParseFunc), + "google_bigquery_table_iam_member": tpgiamresource.ResourceIamMember(bigquery.BigQueryTableIamSchema, bigquery.BigQueryTableIamUpdaterProducer, bigquery.BigQueryTableIdParseFunc), + "google_bigquery_table_iam_policy": tpgiamresource.ResourceIamPolicy(bigquery.BigQueryTableIamSchema, bigquery.BigQueryTableIamUpdaterProducer, bigquery.BigQueryTableIdParseFunc), + "google_bigquery_analytics_hub_data_exchange": bigqueryanalyticshub.ResourceBigqueryAnalyticsHubDataExchange(), + "google_bigquery_analytics_hub_data_exchange_iam_binding": tpgiamresource.ResourceIamBinding(bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIamSchema, bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIamUpdaterProducer, bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIdParseFunc), + "google_bigquery_analytics_hub_data_exchange_iam_member": tpgiamresource.ResourceIamMember(bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIamSchema, bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIamUpdaterProducer, bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIdParseFunc), + "google_bigquery_analytics_hub_data_exchange_iam_policy": tpgiamresource.ResourceIamPolicy(bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIamSchema, bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIamUpdaterProducer, bigqueryanalyticshub.BigqueryAnalyticsHubDataExchangeIdParseFunc), + "google_bigquery_analytics_hub_listing": bigqueryanalyticshub.ResourceBigqueryAnalyticsHubListing(), + "google_bigquery_analytics_hub_listing_iam_binding": tpgiamresource.ResourceIamBinding(bigqueryanalyticshub.BigqueryAnalyticsHubListingIamSchema, bigqueryanalyticshub.BigqueryAnalyticsHubListingIamUpdaterProducer, bigqueryanalyticshub.BigqueryAnalyticsHubListingIdParseFunc), + "google_bigquery_analytics_hub_listing_iam_member": tpgiamresource.ResourceIamMember(bigqueryanalyticshub.BigqueryAnalyticsHubListingIamSchema, bigqueryanalyticshub.BigqueryAnalyticsHubListingIamUpdaterProducer, bigqueryanalyticshub.BigqueryAnalyticsHubListingIdParseFunc), + "google_bigquery_analytics_hub_listing_iam_policy": tpgiamresource.ResourceIamPolicy(bigqueryanalyticshub.BigqueryAnalyticsHubListingIamSchema, bigqueryanalyticshub.BigqueryAnalyticsHubListingIamUpdaterProducer, bigqueryanalyticshub.BigqueryAnalyticsHubListingIdParseFunc), + "google_bigquery_connection": bigqueryconnection.ResourceBigqueryConnectionConnection(), + "google_bigquery_connection_iam_binding": tpgiamresource.ResourceIamBinding(bigqueryconnection.BigqueryConnectionConnectionIamSchema, bigqueryconnection.BigqueryConnectionConnectionIamUpdaterProducer, bigqueryconnection.BigqueryConnectionConnectionIdParseFunc), + "google_bigquery_connection_iam_member": tpgiamresource.ResourceIamMember(bigqueryconnection.BigqueryConnectionConnectionIamSchema, bigqueryconnection.BigqueryConnectionConnectionIamUpdaterProducer, bigqueryconnection.BigqueryConnectionConnectionIdParseFunc), + "google_bigquery_connection_iam_policy": tpgiamresource.ResourceIamPolicy(bigqueryconnection.BigqueryConnectionConnectionIamSchema, bigqueryconnection.BigqueryConnectionConnectionIamUpdaterProducer, bigqueryconnection.BigqueryConnectionConnectionIdParseFunc), + "google_bigquery_datapolicy_data_policy": bigquerydatapolicy.ResourceBigqueryDatapolicyDataPolicy(), + "google_bigquery_datapolicy_data_policy_iam_binding": tpgiamresource.ResourceIamBinding(bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamSchema, bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamUpdaterProducer, bigquerydatapolicy.BigqueryDatapolicyDataPolicyIdParseFunc), + "google_bigquery_datapolicy_data_policy_iam_member": tpgiamresource.ResourceIamMember(bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamSchema, bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamUpdaterProducer, bigquerydatapolicy.BigqueryDatapolicyDataPolicyIdParseFunc), + "google_bigquery_datapolicy_data_policy_iam_policy": tpgiamresource.ResourceIamPolicy(bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamSchema, bigquerydatapolicy.BigqueryDatapolicyDataPolicyIamUpdaterProducer, bigquerydatapolicy.BigqueryDatapolicyDataPolicyIdParseFunc), + "google_bigquery_data_transfer_config": bigquerydatatransfer.ResourceBigqueryDataTransferConfig(), + "google_bigquery_capacity_commitment": bigqueryreservation.ResourceBigqueryReservationCapacityCommitment(), + "google_bigquery_reservation": bigqueryreservation.ResourceBigqueryReservationReservation(), + "google_bigtable_app_profile": bigtable.ResourceBigtableAppProfile(), + "google_billing_budget": billing.ResourceBillingBudget(), + "google_binary_authorization_attestor": binaryauthorization.ResourceBinaryAuthorizationAttestor(), + "google_binary_authorization_attestor_iam_binding": tpgiamresource.ResourceIamBinding(binaryauthorization.BinaryAuthorizationAttestorIamSchema, binaryauthorization.BinaryAuthorizationAttestorIamUpdaterProducer, binaryauthorization.BinaryAuthorizationAttestorIdParseFunc), + "google_binary_authorization_attestor_iam_member": tpgiamresource.ResourceIamMember(binaryauthorization.BinaryAuthorizationAttestorIamSchema, binaryauthorization.BinaryAuthorizationAttestorIamUpdaterProducer, binaryauthorization.BinaryAuthorizationAttestorIdParseFunc), + "google_binary_authorization_attestor_iam_policy": tpgiamresource.ResourceIamPolicy(binaryauthorization.BinaryAuthorizationAttestorIamSchema, binaryauthorization.BinaryAuthorizationAttestorIamUpdaterProducer, binaryauthorization.BinaryAuthorizationAttestorIdParseFunc), + "google_binary_authorization_policy": binaryauthorization.ResourceBinaryAuthorizationPolicy(), + "google_certificate_manager_certificate": certificatemanager.ResourceCertificateManagerCertificate(), + "google_certificate_manager_certificate_issuance_config": certificatemanager.ResourceCertificateManagerCertificateIssuanceConfig(), + "google_certificate_manager_certificate_map": certificatemanager.ResourceCertificateManagerCertificateMap(), + "google_certificate_manager_certificate_map_entry": certificatemanager.ResourceCertificateManagerCertificateMapEntry(), + "google_certificate_manager_dns_authorization": certificatemanager.ResourceCertificateManagerDnsAuthorization(), + "google_cloud_asset_folder_feed": cloudasset.ResourceCloudAssetFolderFeed(), + "google_cloud_asset_organization_feed": cloudasset.ResourceCloudAssetOrganizationFeed(), + "google_cloud_asset_project_feed": cloudasset.ResourceCloudAssetProjectFeed(), + "google_cloudbuild_bitbucket_server_config": cloudbuild.ResourceCloudBuildBitbucketServerConfig(), + "google_cloudbuild_trigger": cloudbuild.ResourceCloudBuildTrigger(), + "google_cloudbuildv2_connection_iam_binding": tpgiamresource.ResourceIamBinding(cloudbuildv2.Cloudbuildv2ConnectionIamSchema, cloudbuildv2.Cloudbuildv2ConnectionIamUpdaterProducer, cloudbuildv2.Cloudbuildv2ConnectionIdParseFunc), + "google_cloudbuildv2_connection_iam_member": tpgiamresource.ResourceIamMember(cloudbuildv2.Cloudbuildv2ConnectionIamSchema, cloudbuildv2.Cloudbuildv2ConnectionIamUpdaterProducer, cloudbuildv2.Cloudbuildv2ConnectionIdParseFunc), + "google_cloudbuildv2_connection_iam_policy": tpgiamresource.ResourceIamPolicy(cloudbuildv2.Cloudbuildv2ConnectionIamSchema, cloudbuildv2.Cloudbuildv2ConnectionIamUpdaterProducer, cloudbuildv2.Cloudbuildv2ConnectionIdParseFunc), + "google_cloudfunctions_function_iam_binding": tpgiamresource.ResourceIamBinding(cloudfunctions.CloudFunctionsCloudFunctionIamSchema, cloudfunctions.CloudFunctionsCloudFunctionIamUpdaterProducer, cloudfunctions.CloudFunctionsCloudFunctionIdParseFunc), + "google_cloudfunctions_function_iam_member": tpgiamresource.ResourceIamMember(cloudfunctions.CloudFunctionsCloudFunctionIamSchema, cloudfunctions.CloudFunctionsCloudFunctionIamUpdaterProducer, cloudfunctions.CloudFunctionsCloudFunctionIdParseFunc), + "google_cloudfunctions_function_iam_policy": tpgiamresource.ResourceIamPolicy(cloudfunctions.CloudFunctionsCloudFunctionIamSchema, cloudfunctions.CloudFunctionsCloudFunctionIamUpdaterProducer, cloudfunctions.CloudFunctionsCloudFunctionIdParseFunc), + "google_cloudfunctions2_function": cloudfunctions2.ResourceCloudfunctions2function(), + "google_cloudfunctions2_function_iam_binding": tpgiamresource.ResourceIamBinding(cloudfunctions2.Cloudfunctions2functionIamSchema, cloudfunctions2.Cloudfunctions2functionIamUpdaterProducer, cloudfunctions2.Cloudfunctions2functionIdParseFunc), + "google_cloudfunctions2_function_iam_member": tpgiamresource.ResourceIamMember(cloudfunctions2.Cloudfunctions2functionIamSchema, cloudfunctions2.Cloudfunctions2functionIamUpdaterProducer, cloudfunctions2.Cloudfunctions2functionIdParseFunc), + "google_cloudfunctions2_function_iam_policy": tpgiamresource.ResourceIamPolicy(cloudfunctions2.Cloudfunctions2functionIamSchema, cloudfunctions2.Cloudfunctions2functionIamUpdaterProducer, cloudfunctions2.Cloudfunctions2functionIdParseFunc), + "google_cloud_identity_group": cloudidentity.ResourceCloudIdentityGroup(), + "google_cloud_identity_group_membership": cloudidentity.ResourceCloudIdentityGroupMembership(), + "google_cloud_ids_endpoint": cloudids.ResourceCloudIdsEndpoint(), + "google_cloudiot_device": cloudiot.ResourceCloudIotDevice(), + "google_cloudiot_registry": cloudiot.ResourceCloudIotDeviceRegistry(), + "google_cloudiot_registry_iam_binding": tpgiamresource.ResourceIamBinding(cloudiot.CloudIotDeviceRegistryIamSchema, cloudiot.CloudIotDeviceRegistryIamUpdaterProducer, cloudiot.CloudIotDeviceRegistryIdParseFunc), + "google_cloudiot_registry_iam_member": tpgiamresource.ResourceIamMember(cloudiot.CloudIotDeviceRegistryIamSchema, cloudiot.CloudIotDeviceRegistryIamUpdaterProducer, cloudiot.CloudIotDeviceRegistryIdParseFunc), + "google_cloudiot_registry_iam_policy": tpgiamresource.ResourceIamPolicy(cloudiot.CloudIotDeviceRegistryIamSchema, cloudiot.CloudIotDeviceRegistryIamUpdaterProducer, cloudiot.CloudIotDeviceRegistryIdParseFunc), + "google_cloud_run_domain_mapping": cloudrun.ResourceCloudRunDomainMapping(), + "google_cloud_run_service": cloudrun.ResourceCloudRunService(), + "google_cloud_run_service_iam_binding": tpgiamresource.ResourceIamBinding(cloudrun.CloudRunServiceIamSchema, cloudrun.CloudRunServiceIamUpdaterProducer, cloudrun.CloudRunServiceIdParseFunc), + "google_cloud_run_service_iam_member": tpgiamresource.ResourceIamMember(cloudrun.CloudRunServiceIamSchema, cloudrun.CloudRunServiceIamUpdaterProducer, cloudrun.CloudRunServiceIdParseFunc), + "google_cloud_run_service_iam_policy": tpgiamresource.ResourceIamPolicy(cloudrun.CloudRunServiceIamSchema, cloudrun.CloudRunServiceIamUpdaterProducer, cloudrun.CloudRunServiceIdParseFunc), + "google_cloud_run_v2_job": cloudrunv2.ResourceCloudRunV2Job(), + "google_cloud_run_v2_job_iam_binding": tpgiamresource.ResourceIamBinding(cloudrunv2.CloudRunV2JobIamSchema, cloudrunv2.CloudRunV2JobIamUpdaterProducer, cloudrunv2.CloudRunV2JobIdParseFunc), + "google_cloud_run_v2_job_iam_member": tpgiamresource.ResourceIamMember(cloudrunv2.CloudRunV2JobIamSchema, cloudrunv2.CloudRunV2JobIamUpdaterProducer, cloudrunv2.CloudRunV2JobIdParseFunc), + "google_cloud_run_v2_job_iam_policy": tpgiamresource.ResourceIamPolicy(cloudrunv2.CloudRunV2JobIamSchema, cloudrunv2.CloudRunV2JobIamUpdaterProducer, cloudrunv2.CloudRunV2JobIdParseFunc), + "google_cloud_run_v2_service": cloudrunv2.ResourceCloudRunV2Service(), + "google_cloud_run_v2_service_iam_binding": tpgiamresource.ResourceIamBinding(cloudrunv2.CloudRunV2ServiceIamSchema, cloudrunv2.CloudRunV2ServiceIamUpdaterProducer, cloudrunv2.CloudRunV2ServiceIdParseFunc), + "google_cloud_run_v2_service_iam_member": tpgiamresource.ResourceIamMember(cloudrunv2.CloudRunV2ServiceIamSchema, cloudrunv2.CloudRunV2ServiceIamUpdaterProducer, cloudrunv2.CloudRunV2ServiceIdParseFunc), + "google_cloud_run_v2_service_iam_policy": tpgiamresource.ResourceIamPolicy(cloudrunv2.CloudRunV2ServiceIamSchema, cloudrunv2.CloudRunV2ServiceIamUpdaterProducer, cloudrunv2.CloudRunV2ServiceIdParseFunc), + "google_cloud_scheduler_job": cloudscheduler.ResourceCloudSchedulerJob(), + "google_cloud_tasks_queue": cloudtasks.ResourceCloudTasksQueue(), + "google_cloud_tasks_queue_iam_binding": tpgiamresource.ResourceIamBinding(cloudtasks.CloudTasksQueueIamSchema, cloudtasks.CloudTasksQueueIamUpdaterProducer, cloudtasks.CloudTasksQueueIdParseFunc), + "google_cloud_tasks_queue_iam_member": tpgiamresource.ResourceIamMember(cloudtasks.CloudTasksQueueIamSchema, cloudtasks.CloudTasksQueueIamUpdaterProducer, cloudtasks.CloudTasksQueueIdParseFunc), + "google_cloud_tasks_queue_iam_policy": tpgiamresource.ResourceIamPolicy(cloudtasks.CloudTasksQueueIamSchema, cloudtasks.CloudTasksQueueIamUpdaterProducer, cloudtasks.CloudTasksQueueIdParseFunc), + "google_compute_address": compute.ResourceComputeAddress(), + "google_compute_autoscaler": compute.ResourceComputeAutoscaler(), + "google_compute_backend_bucket": compute.ResourceComputeBackendBucket(), + "google_compute_backend_bucket_signed_url_key": compute.ResourceComputeBackendBucketSignedUrlKey(), + "google_compute_backend_service": compute.ResourceComputeBackendService(), + "google_compute_backend_service_signed_url_key": compute.ResourceComputeBackendServiceSignedUrlKey(), + "google_compute_disk": compute.ResourceComputeDisk(), + "google_compute_disk_iam_binding": tpgiamresource.ResourceIamBinding(compute.ComputeDiskIamSchema, compute.ComputeDiskIamUpdaterProducer, compute.ComputeDiskIdParseFunc), + "google_compute_disk_iam_member": tpgiamresource.ResourceIamMember(compute.ComputeDiskIamSchema, compute.ComputeDiskIamUpdaterProducer, compute.ComputeDiskIdParseFunc), + "google_compute_disk_iam_policy": tpgiamresource.ResourceIamPolicy(compute.ComputeDiskIamSchema, compute.ComputeDiskIamUpdaterProducer, compute.ComputeDiskIdParseFunc), + "google_compute_disk_resource_policy_attachment": compute.ResourceComputeDiskResourcePolicyAttachment(), + "google_compute_external_vpn_gateway": compute.ResourceComputeExternalVpnGateway(), + "google_compute_firewall": compute.ResourceComputeFirewall(), + "google_compute_forwarding_rule": compute.ResourceComputeForwardingRule(), + "google_compute_global_address": compute.ResourceComputeGlobalAddress(), + "google_compute_global_forwarding_rule": compute.ResourceComputeGlobalForwardingRule(), + "google_compute_global_network_endpoint": compute.ResourceComputeGlobalNetworkEndpoint(), + "google_compute_global_network_endpoint_group": compute.ResourceComputeGlobalNetworkEndpointGroup(), + "google_compute_ha_vpn_gateway": compute.ResourceComputeHaVpnGateway(), + "google_compute_health_check": compute.ResourceComputeHealthCheck(), + "google_compute_http_health_check": compute.ResourceComputeHttpHealthCheck(), + "google_compute_https_health_check": compute.ResourceComputeHttpsHealthCheck(), + "google_compute_image": compute.ResourceComputeImage(), + "google_compute_image_iam_binding": tpgiamresource.ResourceIamBinding(compute.ComputeImageIamSchema, compute.ComputeImageIamUpdaterProducer, compute.ComputeImageIdParseFunc), + "google_compute_image_iam_member": tpgiamresource.ResourceIamMember(compute.ComputeImageIamSchema, compute.ComputeImageIamUpdaterProducer, compute.ComputeImageIdParseFunc), + "google_compute_image_iam_policy": tpgiamresource.ResourceIamPolicy(compute.ComputeImageIamSchema, compute.ComputeImageIamUpdaterProducer, compute.ComputeImageIdParseFunc), + "google_compute_instance_iam_binding": tpgiamresource.ResourceIamBinding(compute.ComputeInstanceIamSchema, compute.ComputeInstanceIamUpdaterProducer, compute.ComputeInstanceIdParseFunc), + "google_compute_instance_iam_member": tpgiamresource.ResourceIamMember(compute.ComputeInstanceIamSchema, compute.ComputeInstanceIamUpdaterProducer, compute.ComputeInstanceIdParseFunc), + "google_compute_instance_iam_policy": tpgiamresource.ResourceIamPolicy(compute.ComputeInstanceIamSchema, compute.ComputeInstanceIamUpdaterProducer, compute.ComputeInstanceIdParseFunc), + "google_compute_instance_group_named_port": compute.ResourceComputeInstanceGroupNamedPort(), + "google_compute_interconnect_attachment": compute.ResourceComputeInterconnectAttachment(), + "google_compute_managed_ssl_certificate": compute.ResourceComputeManagedSslCertificate(), + "google_compute_network": compute.ResourceComputeNetwork(), + "google_compute_network_endpoint": compute.ResourceComputeNetworkEndpoint(), + "google_compute_network_endpoint_group": compute.ResourceComputeNetworkEndpointGroup(), + "google_compute_network_endpoints": compute.ResourceComputeNetworkEndpoints(), + "google_compute_network_peering_routes_config": compute.ResourceComputeNetworkPeeringRoutesConfig(), + "google_compute_node_group": compute.ResourceComputeNodeGroup(), + "google_compute_node_template": compute.ResourceComputeNodeTemplate(), + "google_compute_packet_mirroring": compute.ResourceComputePacketMirroring(), + "google_compute_per_instance_config": compute.ResourceComputePerInstanceConfig(), + "google_compute_public_advertised_prefix": compute.ResourceComputePublicAdvertisedPrefix(), + "google_compute_public_delegated_prefix": compute.ResourceComputePublicDelegatedPrefix(), + "google_compute_region_autoscaler": compute.ResourceComputeRegionAutoscaler(), + "google_compute_region_backend_service": compute.ResourceComputeRegionBackendService(), + "google_compute_region_commitment": compute.ResourceComputeRegionCommitment(), + "google_compute_region_disk": compute.ResourceComputeRegionDisk(), + "google_compute_region_disk_iam_binding": tpgiamresource.ResourceIamBinding(compute.ComputeRegionDiskIamSchema, compute.ComputeRegionDiskIamUpdaterProducer, compute.ComputeRegionDiskIdParseFunc), + "google_compute_region_disk_iam_member": tpgiamresource.ResourceIamMember(compute.ComputeRegionDiskIamSchema, compute.ComputeRegionDiskIamUpdaterProducer, compute.ComputeRegionDiskIdParseFunc), + "google_compute_region_disk_iam_policy": tpgiamresource.ResourceIamPolicy(compute.ComputeRegionDiskIamSchema, compute.ComputeRegionDiskIamUpdaterProducer, compute.ComputeRegionDiskIdParseFunc), + "google_compute_region_disk_resource_policy_attachment": compute.ResourceComputeRegionDiskResourcePolicyAttachment(), + "google_compute_region_health_check": compute.ResourceComputeRegionHealthCheck(), + "google_compute_region_network_endpoint_group": compute.ResourceComputeRegionNetworkEndpointGroup(), + "google_compute_region_per_instance_config": compute.ResourceComputeRegionPerInstanceConfig(), + "google_compute_region_ssl_certificate": compute.ResourceComputeRegionSslCertificate(), + "google_compute_region_target_http_proxy": compute.ResourceComputeRegionTargetHttpProxy(), + "google_compute_region_target_https_proxy": compute.ResourceComputeRegionTargetHttpsProxy(), + "google_compute_region_target_tcp_proxy": compute.ResourceComputeRegionTargetTcpProxy(), + "google_compute_region_url_map": compute.ResourceComputeRegionUrlMap(), + "google_compute_reservation": compute.ResourceComputeReservation(), + "google_compute_resource_policy": compute.ResourceComputeResourcePolicy(), + "google_compute_route": compute.ResourceComputeRoute(), + "google_compute_router": compute.ResourceComputeRouter(), + "google_compute_router_peer": compute.ResourceComputeRouterBgpPeer(), + "google_compute_router_nat": compute.ResourceComputeRouterNat(), + "google_compute_service_attachment": compute.ResourceComputeServiceAttachment(), + "google_compute_snapshot": compute.ResourceComputeSnapshot(), + "google_compute_snapshot_iam_binding": tpgiamresource.ResourceIamBinding(compute.ComputeSnapshotIamSchema, compute.ComputeSnapshotIamUpdaterProducer, compute.ComputeSnapshotIdParseFunc), + "google_compute_snapshot_iam_member": tpgiamresource.ResourceIamMember(compute.ComputeSnapshotIamSchema, compute.ComputeSnapshotIamUpdaterProducer, compute.ComputeSnapshotIdParseFunc), + "google_compute_snapshot_iam_policy": tpgiamresource.ResourceIamPolicy(compute.ComputeSnapshotIamSchema, compute.ComputeSnapshotIamUpdaterProducer, compute.ComputeSnapshotIdParseFunc), + "google_compute_ssl_certificate": compute.ResourceComputeSslCertificate(), + "google_compute_ssl_policy": compute.ResourceComputeSslPolicy(), + "google_compute_subnetwork": compute.ResourceComputeSubnetwork(), + "google_compute_subnetwork_iam_binding": tpgiamresource.ResourceIamBinding(compute.ComputeSubnetworkIamSchema, compute.ComputeSubnetworkIamUpdaterProducer, compute.ComputeSubnetworkIdParseFunc), + "google_compute_subnetwork_iam_member": tpgiamresource.ResourceIamMember(compute.ComputeSubnetworkIamSchema, compute.ComputeSubnetworkIamUpdaterProducer, compute.ComputeSubnetworkIdParseFunc), + "google_compute_subnetwork_iam_policy": tpgiamresource.ResourceIamPolicy(compute.ComputeSubnetworkIamSchema, compute.ComputeSubnetworkIamUpdaterProducer, compute.ComputeSubnetworkIdParseFunc), + "google_compute_target_grpc_proxy": compute.ResourceComputeTargetGrpcProxy(), + "google_compute_target_http_proxy": compute.ResourceComputeTargetHttpProxy(), + "google_compute_target_https_proxy": compute.ResourceComputeTargetHttpsProxy(), + "google_compute_target_instance": compute.ResourceComputeTargetInstance(), + "google_compute_target_ssl_proxy": compute.ResourceComputeTargetSslProxy(), + "google_compute_target_tcp_proxy": compute.ResourceComputeTargetTcpProxy(), + "google_compute_url_map": compute.ResourceComputeUrlMap(), + "google_compute_vpn_gateway": compute.ResourceComputeVpnGateway(), + "google_compute_vpn_tunnel": compute.ResourceComputeVpnTunnel(), + "google_container_analysis_note": containeranalysis.ResourceContainerAnalysisNote(), + "google_container_analysis_note_iam_binding": tpgiamresource.ResourceIamBinding(containeranalysis.ContainerAnalysisNoteIamSchema, containeranalysis.ContainerAnalysisNoteIamUpdaterProducer, containeranalysis.ContainerAnalysisNoteIdParseFunc), + "google_container_analysis_note_iam_member": tpgiamresource.ResourceIamMember(containeranalysis.ContainerAnalysisNoteIamSchema, containeranalysis.ContainerAnalysisNoteIamUpdaterProducer, containeranalysis.ContainerAnalysisNoteIdParseFunc), + "google_container_analysis_note_iam_policy": tpgiamresource.ResourceIamPolicy(containeranalysis.ContainerAnalysisNoteIamSchema, containeranalysis.ContainerAnalysisNoteIamUpdaterProducer, containeranalysis.ContainerAnalysisNoteIdParseFunc), + "google_container_analysis_occurrence": containeranalysis.ResourceContainerAnalysisOccurrence(), + "google_container_attached_cluster": containerattached.ResourceContainerAttachedCluster(), + "google_database_migration_service_connection_profile": databasemigrationservice.ResourceDatabaseMigrationServiceConnectionProfile(), + "google_data_catalog_entry": datacatalog.ResourceDataCatalogEntry(), + "google_data_catalog_entry_group": datacatalog.ResourceDataCatalogEntryGroup(), + "google_data_catalog_entry_group_iam_binding": tpgiamresource.ResourceIamBinding(datacatalog.DataCatalogEntryGroupIamSchema, datacatalog.DataCatalogEntryGroupIamUpdaterProducer, datacatalog.DataCatalogEntryGroupIdParseFunc), + "google_data_catalog_entry_group_iam_member": tpgiamresource.ResourceIamMember(datacatalog.DataCatalogEntryGroupIamSchema, datacatalog.DataCatalogEntryGroupIamUpdaterProducer, datacatalog.DataCatalogEntryGroupIdParseFunc), + "google_data_catalog_entry_group_iam_policy": tpgiamresource.ResourceIamPolicy(datacatalog.DataCatalogEntryGroupIamSchema, datacatalog.DataCatalogEntryGroupIamUpdaterProducer, datacatalog.DataCatalogEntryGroupIdParseFunc), + "google_data_catalog_policy_tag": datacatalog.ResourceDataCatalogPolicyTag(), + "google_data_catalog_policy_tag_iam_binding": tpgiamresource.ResourceIamBinding(datacatalog.DataCatalogPolicyTagIamSchema, datacatalog.DataCatalogPolicyTagIamUpdaterProducer, datacatalog.DataCatalogPolicyTagIdParseFunc), + "google_data_catalog_policy_tag_iam_member": tpgiamresource.ResourceIamMember(datacatalog.DataCatalogPolicyTagIamSchema, datacatalog.DataCatalogPolicyTagIamUpdaterProducer, datacatalog.DataCatalogPolicyTagIdParseFunc), + "google_data_catalog_policy_tag_iam_policy": tpgiamresource.ResourceIamPolicy(datacatalog.DataCatalogPolicyTagIamSchema, datacatalog.DataCatalogPolicyTagIamUpdaterProducer, datacatalog.DataCatalogPolicyTagIdParseFunc), + "google_data_catalog_tag": datacatalog.ResourceDataCatalogTag(), + "google_data_catalog_tag_template": datacatalog.ResourceDataCatalogTagTemplate(), + "google_data_catalog_tag_template_iam_binding": tpgiamresource.ResourceIamBinding(datacatalog.DataCatalogTagTemplateIamSchema, datacatalog.DataCatalogTagTemplateIamUpdaterProducer, datacatalog.DataCatalogTagTemplateIdParseFunc), + "google_data_catalog_tag_template_iam_member": tpgiamresource.ResourceIamMember(datacatalog.DataCatalogTagTemplateIamSchema, datacatalog.DataCatalogTagTemplateIamUpdaterProducer, datacatalog.DataCatalogTagTemplateIdParseFunc), + "google_data_catalog_tag_template_iam_policy": tpgiamresource.ResourceIamPolicy(datacatalog.DataCatalogTagTemplateIamSchema, datacatalog.DataCatalogTagTemplateIamUpdaterProducer, datacatalog.DataCatalogTagTemplateIdParseFunc), + "google_data_catalog_taxonomy": datacatalog.ResourceDataCatalogTaxonomy(), + "google_data_catalog_taxonomy_iam_binding": tpgiamresource.ResourceIamBinding(datacatalog.DataCatalogTaxonomyIamSchema, datacatalog.DataCatalogTaxonomyIamUpdaterProducer, datacatalog.DataCatalogTaxonomyIdParseFunc), + "google_data_catalog_taxonomy_iam_member": tpgiamresource.ResourceIamMember(datacatalog.DataCatalogTaxonomyIamSchema, datacatalog.DataCatalogTaxonomyIamUpdaterProducer, datacatalog.DataCatalogTaxonomyIdParseFunc), + "google_data_catalog_taxonomy_iam_policy": tpgiamresource.ResourceIamPolicy(datacatalog.DataCatalogTaxonomyIamSchema, datacatalog.DataCatalogTaxonomyIamUpdaterProducer, datacatalog.DataCatalogTaxonomyIdParseFunc), + "google_data_fusion_instance": datafusion.ResourceDataFusionInstance(), + "google_data_fusion_instance_iam_binding": tpgiamresource.ResourceIamBinding(datafusion.DataFusionInstanceIamSchema, datafusion.DataFusionInstanceIamUpdaterProducer, datafusion.DataFusionInstanceIdParseFunc), + "google_data_fusion_instance_iam_member": tpgiamresource.ResourceIamMember(datafusion.DataFusionInstanceIamSchema, datafusion.DataFusionInstanceIamUpdaterProducer, datafusion.DataFusionInstanceIdParseFunc), + "google_data_fusion_instance_iam_policy": tpgiamresource.ResourceIamPolicy(datafusion.DataFusionInstanceIamSchema, datafusion.DataFusionInstanceIamUpdaterProducer, datafusion.DataFusionInstanceIdParseFunc), + "google_data_loss_prevention_deidentify_template": datalossprevention.ResourceDataLossPreventionDeidentifyTemplate(), + "google_data_loss_prevention_inspect_template": datalossprevention.ResourceDataLossPreventionInspectTemplate(), + "google_data_loss_prevention_job_trigger": datalossprevention.ResourceDataLossPreventionJobTrigger(), + "google_data_loss_prevention_stored_info_type": datalossprevention.ResourceDataLossPreventionStoredInfoType(), + "google_dataplex_asset_iam_binding": tpgiamresource.ResourceIamBinding(dataplex.DataplexAssetIamSchema, dataplex.DataplexAssetIamUpdaterProducer, dataplex.DataplexAssetIdParseFunc), + "google_dataplex_asset_iam_member": tpgiamresource.ResourceIamMember(dataplex.DataplexAssetIamSchema, dataplex.DataplexAssetIamUpdaterProducer, dataplex.DataplexAssetIdParseFunc), + "google_dataplex_asset_iam_policy": tpgiamresource.ResourceIamPolicy(dataplex.DataplexAssetIamSchema, dataplex.DataplexAssetIamUpdaterProducer, dataplex.DataplexAssetIdParseFunc), + "google_dataplex_datascan": dataplex.ResourceDataplexDatascan(), + "google_dataplex_datascan_iam_binding": tpgiamresource.ResourceIamBinding(dataplex.DataplexDatascanIamSchema, dataplex.DataplexDatascanIamUpdaterProducer, dataplex.DataplexDatascanIdParseFunc), + "google_dataplex_datascan_iam_member": tpgiamresource.ResourceIamMember(dataplex.DataplexDatascanIamSchema, dataplex.DataplexDatascanIamUpdaterProducer, dataplex.DataplexDatascanIdParseFunc), + "google_dataplex_datascan_iam_policy": tpgiamresource.ResourceIamPolicy(dataplex.DataplexDatascanIamSchema, dataplex.DataplexDatascanIamUpdaterProducer, dataplex.DataplexDatascanIdParseFunc), + "google_dataplex_lake_iam_binding": tpgiamresource.ResourceIamBinding(dataplex.DataplexLakeIamSchema, dataplex.DataplexLakeIamUpdaterProducer, dataplex.DataplexLakeIdParseFunc), + "google_dataplex_lake_iam_member": tpgiamresource.ResourceIamMember(dataplex.DataplexLakeIamSchema, dataplex.DataplexLakeIamUpdaterProducer, dataplex.DataplexLakeIdParseFunc), + "google_dataplex_lake_iam_policy": tpgiamresource.ResourceIamPolicy(dataplex.DataplexLakeIamSchema, dataplex.DataplexLakeIamUpdaterProducer, dataplex.DataplexLakeIdParseFunc), + "google_dataplex_zone_iam_binding": tpgiamresource.ResourceIamBinding(dataplex.DataplexZoneIamSchema, dataplex.DataplexZoneIamUpdaterProducer, dataplex.DataplexZoneIdParseFunc), + "google_dataplex_zone_iam_member": tpgiamresource.ResourceIamMember(dataplex.DataplexZoneIamSchema, dataplex.DataplexZoneIamUpdaterProducer, dataplex.DataplexZoneIdParseFunc), + "google_dataplex_zone_iam_policy": tpgiamresource.ResourceIamPolicy(dataplex.DataplexZoneIamSchema, dataplex.DataplexZoneIamUpdaterProducer, dataplex.DataplexZoneIdParseFunc), + "google_dataproc_autoscaling_policy": dataproc.ResourceDataprocAutoscalingPolicy(), + "google_dataproc_autoscaling_policy_iam_binding": tpgiamresource.ResourceIamBinding(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_autoscaling_policy_iam_member": tpgiamresource.ResourceIamMember(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_autoscaling_policy_iam_policy": tpgiamresource.ResourceIamPolicy(dataproc.DataprocAutoscalingPolicyIamSchema, dataproc.DataprocAutoscalingPolicyIamUpdaterProducer, dataproc.DataprocAutoscalingPolicyIdParseFunc), + "google_dataproc_metastore_service": dataprocmetastore.ResourceDataprocMetastoreService(), + "google_dataproc_metastore_service_iam_binding": tpgiamresource.ResourceIamBinding(dataprocmetastore.DataprocMetastoreServiceIamSchema, dataprocmetastore.DataprocMetastoreServiceIamUpdaterProducer, dataprocmetastore.DataprocMetastoreServiceIdParseFunc), + "google_dataproc_metastore_service_iam_member": tpgiamresource.ResourceIamMember(dataprocmetastore.DataprocMetastoreServiceIamSchema, dataprocmetastore.DataprocMetastoreServiceIamUpdaterProducer, dataprocmetastore.DataprocMetastoreServiceIdParseFunc), + "google_dataproc_metastore_service_iam_policy": tpgiamresource.ResourceIamPolicy(dataprocmetastore.DataprocMetastoreServiceIamSchema, dataprocmetastore.DataprocMetastoreServiceIamUpdaterProducer, dataprocmetastore.DataprocMetastoreServiceIdParseFunc), + "google_datastore_index": datastore.ResourceDatastoreIndex(), + "google_datastream_connection_profile": datastream.ResourceDatastreamConnectionProfile(), + "google_datastream_private_connection": datastream.ResourceDatastreamPrivateConnection(), + "google_datastream_stream": datastream.ResourceDatastreamStream(), + "google_deployment_manager_deployment": deploymentmanager.ResourceDeploymentManagerDeployment(), + "google_dialogflow_agent": dialogflow.ResourceDialogflowAgent(), + "google_dialogflow_entity_type": dialogflow.ResourceDialogflowEntityType(), + "google_dialogflow_fulfillment": dialogflow.ResourceDialogflowFulfillment(), + "google_dialogflow_intent": dialogflow.ResourceDialogflowIntent(), + "google_dialogflow_cx_agent": dialogflowcx.ResourceDialogflowCXAgent(), + "google_dialogflow_cx_entity_type": dialogflowcx.ResourceDialogflowCXEntityType(), + "google_dialogflow_cx_flow": dialogflowcx.ResourceDialogflowCXFlow(), + "google_dialogflow_cx_intent": dialogflowcx.ResourceDialogflowCXIntent(), + "google_dialogflow_cx_page": dialogflowcx.ResourceDialogflowCXPage(), + "google_dialogflow_cx_webhook": dialogflowcx.ResourceDialogflowCXWebhook(), + "google_dns_managed_zone": dns.ResourceDNSManagedZone(), + "google_dns_managed_zone_iam_binding": tpgiamresource.ResourceIamBinding(dns.DNSManagedZoneIamSchema, dns.DNSManagedZoneIamUpdaterProducer, dns.DNSManagedZoneIdParseFunc), + "google_dns_managed_zone_iam_member": tpgiamresource.ResourceIamMember(dns.DNSManagedZoneIamSchema, dns.DNSManagedZoneIamUpdaterProducer, dns.DNSManagedZoneIdParseFunc), + "google_dns_managed_zone_iam_policy": tpgiamresource.ResourceIamPolicy(dns.DNSManagedZoneIamSchema, dns.DNSManagedZoneIamUpdaterProducer, dns.DNSManagedZoneIdParseFunc), + "google_dns_policy": dns.ResourceDNSPolicy(), + "google_dns_response_policy": dns.ResourceDNSResponsePolicy(), + "google_dns_response_policy_rule": dns.ResourceDNSResponsePolicyRule(), + "google_document_ai_processor": documentai.ResourceDocumentAIProcessor(), + "google_document_ai_processor_default_version": documentai.ResourceDocumentAIProcessorDefaultVersion(), + "google_essential_contacts_contact": essentialcontacts.ResourceEssentialContactsContact(), + "google_filestore_backup": filestore.ResourceFilestoreBackup(), + "google_filestore_instance": filestore.ResourceFilestoreInstance(), + "google_filestore_snapshot": filestore.ResourceFilestoreSnapshot(), + "google_firestore_database": firestore.ResourceFirestoreDatabase(), + "google_firestore_document": firestore.ResourceFirestoreDocument(), + "google_firestore_field": firestore.ResourceFirestoreField(), + "google_firestore_index": firestore.ResourceFirestoreIndex(), + "google_game_services_game_server_cluster": gameservices.ResourceGameServicesGameServerCluster(), + "google_game_services_game_server_config": gameservices.ResourceGameServicesGameServerConfig(), + "google_game_services_game_server_deployment": gameservices.ResourceGameServicesGameServerDeployment(), + "google_game_services_game_server_deployment_rollout": gameservices.ResourceGameServicesGameServerDeploymentRollout(), + "google_game_services_realm": gameservices.ResourceGameServicesRealm(), + "google_gke_backup_backup_plan": gkebackup.ResourceGKEBackupBackupPlan(), + "google_gke_backup_backup_plan_iam_binding": tpgiamresource.ResourceIamBinding(gkebackup.GKEBackupBackupPlanIamSchema, gkebackup.GKEBackupBackupPlanIamUpdaterProducer, gkebackup.GKEBackupBackupPlanIdParseFunc), + "google_gke_backup_backup_plan_iam_member": tpgiamresource.ResourceIamMember(gkebackup.GKEBackupBackupPlanIamSchema, gkebackup.GKEBackupBackupPlanIamUpdaterProducer, gkebackup.GKEBackupBackupPlanIdParseFunc), + "google_gke_backup_backup_plan_iam_policy": tpgiamresource.ResourceIamPolicy(gkebackup.GKEBackupBackupPlanIamSchema, gkebackup.GKEBackupBackupPlanIamUpdaterProducer, gkebackup.GKEBackupBackupPlanIdParseFunc), + "google_gke_hub_membership": gkehub.ResourceGKEHubMembership(), + "google_gke_hub_membership_iam_binding": tpgiamresource.ResourceIamBinding(gkehub.GKEHubMembershipIamSchema, gkehub.GKEHubMembershipIamUpdaterProducer, gkehub.GKEHubMembershipIdParseFunc), + "google_gke_hub_membership_iam_member": tpgiamresource.ResourceIamMember(gkehub.GKEHubMembershipIamSchema, gkehub.GKEHubMembershipIamUpdaterProducer, gkehub.GKEHubMembershipIdParseFunc), + "google_gke_hub_membership_iam_policy": tpgiamresource.ResourceIamPolicy(gkehub.GKEHubMembershipIamSchema, gkehub.GKEHubMembershipIamUpdaterProducer, gkehub.GKEHubMembershipIdParseFunc), + "google_gke_hub_feature": gkehub2.ResourceGKEHub2Feature(), + "google_gke_hub_feature_iam_binding": tpgiamresource.ResourceIamBinding(gkehub2.GKEHub2FeatureIamSchema, gkehub2.GKEHub2FeatureIamUpdaterProducer, gkehub2.GKEHub2FeatureIdParseFunc), + "google_gke_hub_feature_iam_member": tpgiamresource.ResourceIamMember(gkehub2.GKEHub2FeatureIamSchema, gkehub2.GKEHub2FeatureIamUpdaterProducer, gkehub2.GKEHub2FeatureIdParseFunc), + "google_gke_hub_feature_iam_policy": tpgiamresource.ResourceIamPolicy(gkehub2.GKEHub2FeatureIamSchema, gkehub2.GKEHub2FeatureIamUpdaterProducer, gkehub2.GKEHub2FeatureIdParseFunc), + "google_healthcare_consent_store": healthcare.ResourceHealthcareConsentStore(), + "google_healthcare_consent_store_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.HealthcareConsentStoreIamSchema, healthcare.HealthcareConsentStoreIamUpdaterProducer, healthcare.HealthcareConsentStoreIdParseFunc), + "google_healthcare_consent_store_iam_member": tpgiamresource.ResourceIamMember(healthcare.HealthcareConsentStoreIamSchema, healthcare.HealthcareConsentStoreIamUpdaterProducer, healthcare.HealthcareConsentStoreIdParseFunc), + "google_healthcare_consent_store_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.HealthcareConsentStoreIamSchema, healthcare.HealthcareConsentStoreIamUpdaterProducer, healthcare.HealthcareConsentStoreIdParseFunc), + "google_healthcare_dataset": healthcare.ResourceHealthcareDataset(), + "google_healthcare_dicom_store": healthcare.ResourceHealthcareDicomStore(), + "google_healthcare_fhir_store": healthcare.ResourceHealthcareFhirStore(), + "google_healthcare_hl7_v2_store": healthcare.ResourceHealthcareHl7V2Store(), + "google_iam_access_boundary_policy": iam2.ResourceIAM2AccessBoundaryPolicy(), + "google_iam_workload_identity_pool": iambeta.ResourceIAMBetaWorkloadIdentityPool(), + "google_iam_workload_identity_pool_provider": iambeta.ResourceIAMBetaWorkloadIdentityPoolProvider(), + "google_iam_workforce_pool": iamworkforcepool.ResourceIAMWorkforcePoolWorkforcePool(), + "google_iam_workforce_pool_provider": iamworkforcepool.ResourceIAMWorkforcePoolWorkforcePoolProvider(), + "google_iap_app_engine_service_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapAppEngineServiceIamSchema, iap.IapAppEngineServiceIamUpdaterProducer, iap.IapAppEngineServiceIdParseFunc), + "google_iap_app_engine_service_iam_member": tpgiamresource.ResourceIamMember(iap.IapAppEngineServiceIamSchema, iap.IapAppEngineServiceIamUpdaterProducer, iap.IapAppEngineServiceIdParseFunc), + "google_iap_app_engine_service_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapAppEngineServiceIamSchema, iap.IapAppEngineServiceIamUpdaterProducer, iap.IapAppEngineServiceIdParseFunc), + "google_iap_app_engine_version_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapAppEngineVersionIamSchema, iap.IapAppEngineVersionIamUpdaterProducer, iap.IapAppEngineVersionIdParseFunc), + "google_iap_app_engine_version_iam_member": tpgiamresource.ResourceIamMember(iap.IapAppEngineVersionIamSchema, iap.IapAppEngineVersionIamUpdaterProducer, iap.IapAppEngineVersionIdParseFunc), + "google_iap_app_engine_version_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapAppEngineVersionIamSchema, iap.IapAppEngineVersionIamUpdaterProducer, iap.IapAppEngineVersionIdParseFunc), + "google_iap_brand": iap.ResourceIapBrand(), + "google_iap_client": iap.ResourceIapClient(), + "google_iap_tunnel_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapTunnelIamSchema, iap.IapTunnelIamUpdaterProducer, iap.IapTunnelIdParseFunc), + "google_iap_tunnel_iam_member": tpgiamresource.ResourceIamMember(iap.IapTunnelIamSchema, iap.IapTunnelIamUpdaterProducer, iap.IapTunnelIdParseFunc), + "google_iap_tunnel_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapTunnelIamSchema, iap.IapTunnelIamUpdaterProducer, iap.IapTunnelIdParseFunc), + "google_iap_tunnel_instance_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapTunnelInstanceIamSchema, iap.IapTunnelInstanceIamUpdaterProducer, iap.IapTunnelInstanceIdParseFunc), + "google_iap_tunnel_instance_iam_member": tpgiamresource.ResourceIamMember(iap.IapTunnelInstanceIamSchema, iap.IapTunnelInstanceIamUpdaterProducer, iap.IapTunnelInstanceIdParseFunc), + "google_iap_tunnel_instance_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapTunnelInstanceIamSchema, iap.IapTunnelInstanceIamUpdaterProducer, iap.IapTunnelInstanceIdParseFunc), + "google_iap_web_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapWebIamSchema, iap.IapWebIamUpdaterProducer, iap.IapWebIdParseFunc), + "google_iap_web_iam_member": tpgiamresource.ResourceIamMember(iap.IapWebIamSchema, iap.IapWebIamUpdaterProducer, iap.IapWebIdParseFunc), + "google_iap_web_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapWebIamSchema, iap.IapWebIamUpdaterProducer, iap.IapWebIdParseFunc), + "google_iap_web_backend_service_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapWebBackendServiceIamSchema, iap.IapWebBackendServiceIamUpdaterProducer, iap.IapWebBackendServiceIdParseFunc), + "google_iap_web_backend_service_iam_member": tpgiamresource.ResourceIamMember(iap.IapWebBackendServiceIamSchema, iap.IapWebBackendServiceIamUpdaterProducer, iap.IapWebBackendServiceIdParseFunc), + "google_iap_web_backend_service_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapWebBackendServiceIamSchema, iap.IapWebBackendServiceIamUpdaterProducer, iap.IapWebBackendServiceIdParseFunc), + "google_iap_web_type_app_engine_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapWebTypeAppEngineIamSchema, iap.IapWebTypeAppEngineIamUpdaterProducer, iap.IapWebTypeAppEngineIdParseFunc), + "google_iap_web_type_app_engine_iam_member": tpgiamresource.ResourceIamMember(iap.IapWebTypeAppEngineIamSchema, iap.IapWebTypeAppEngineIamUpdaterProducer, iap.IapWebTypeAppEngineIdParseFunc), + "google_iap_web_type_app_engine_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapWebTypeAppEngineIamSchema, iap.IapWebTypeAppEngineIamUpdaterProducer, iap.IapWebTypeAppEngineIdParseFunc), + "google_iap_web_type_compute_iam_binding": tpgiamresource.ResourceIamBinding(iap.IapWebTypeComputeIamSchema, iap.IapWebTypeComputeIamUpdaterProducer, iap.IapWebTypeComputeIdParseFunc), + "google_iap_web_type_compute_iam_member": tpgiamresource.ResourceIamMember(iap.IapWebTypeComputeIamSchema, iap.IapWebTypeComputeIamUpdaterProducer, iap.IapWebTypeComputeIdParseFunc), + "google_iap_web_type_compute_iam_policy": tpgiamresource.ResourceIamPolicy(iap.IapWebTypeComputeIamSchema, iap.IapWebTypeComputeIamUpdaterProducer, iap.IapWebTypeComputeIdParseFunc), + "google_identity_platform_config": identityplatform.ResourceIdentityPlatformConfig(), + "google_identity_platform_default_supported_idp_config": identityplatform.ResourceIdentityPlatformDefaultSupportedIdpConfig(), + "google_identity_platform_inbound_saml_config": identityplatform.ResourceIdentityPlatformInboundSamlConfig(), + "google_identity_platform_oauth_idp_config": identityplatform.ResourceIdentityPlatformOauthIdpConfig(), + "google_identity_platform_project_default_config": identityplatform.ResourceIdentityPlatformProjectDefaultConfig(), + "google_identity_platform_tenant": identityplatform.ResourceIdentityPlatformTenant(), + "google_identity_platform_tenant_default_supported_idp_config": identityplatform.ResourceIdentityPlatformTenantDefaultSupportedIdpConfig(), + "google_identity_platform_tenant_inbound_saml_config": identityplatform.ResourceIdentityPlatformTenantInboundSamlConfig(), + "google_identity_platform_tenant_oauth_idp_config": identityplatform.ResourceIdentityPlatformTenantOauthIdpConfig(), + "google_kms_crypto_key": kms.ResourceKMSCryptoKey(), + "google_kms_crypto_key_version": kms.ResourceKMSCryptoKeyVersion(), + "google_kms_key_ring": kms.ResourceKMSKeyRing(), + "google_kms_key_ring_import_job": kms.ResourceKMSKeyRingImportJob(), + "google_kms_secret_ciphertext": kms.ResourceKMSSecretCiphertext(), + "google_logging_linked_dataset": logging.ResourceLoggingLinkedDataset(), + "google_logging_log_view": logging.ResourceLoggingLogView(), + "google_logging_metric": logging.ResourceLoggingMetric(), + "google_looker_instance": looker.ResourceLookerInstance(), + "google_memcache_instance": memcache.ResourceMemcacheInstance(), + "google_ml_engine_model": mlengine.ResourceMLEngineModel(), + "google_monitoring_alert_policy": monitoring.ResourceMonitoringAlertPolicy(), + "google_monitoring_service": monitoring.ResourceMonitoringGenericService(), + "google_monitoring_group": monitoring.ResourceMonitoringGroup(), + "google_monitoring_metric_descriptor": monitoring.ResourceMonitoringMetricDescriptor(), + "google_monitoring_monitored_project": monitoring.ResourceMonitoringMonitoredProject(), + "google_monitoring_notification_channel": monitoring.ResourceMonitoringNotificationChannel(), + "google_monitoring_custom_service": monitoring.ResourceMonitoringService(), + "google_monitoring_slo": monitoring.ResourceMonitoringSlo(), + "google_monitoring_uptime_check_config": monitoring.ResourceMonitoringUptimeCheckConfig(), + "google_network_management_connectivity_test": networkmanagement.ResourceNetworkManagementConnectivityTest(), + "google_network_security_address_group": networksecurity.ResourceNetworkSecurityAddressGroup(), + "google_network_security_gateway_security_policy": networksecurity.ResourceNetworkSecurityGatewaySecurityPolicy(), + "google_network_security_gateway_security_policy_rule": networksecurity.ResourceNetworkSecurityGatewaySecurityPolicyRule(), + "google_network_security_url_lists": networksecurity.ResourceNetworkSecurityUrlLists(), + "google_network_services_edge_cache_keyset": networkservices.ResourceNetworkServicesEdgeCacheKeyset(), + "google_network_services_edge_cache_origin": networkservices.ResourceNetworkServicesEdgeCacheOrigin(), + "google_network_services_edge_cache_service": networkservices.ResourceNetworkServicesEdgeCacheService(), + "google_network_services_gateway": networkservices.ResourceNetworkServicesGateway(), + "google_notebooks_environment": notebooks.ResourceNotebooksEnvironment(), + "google_notebooks_instance": notebooks.ResourceNotebooksInstance(), + "google_notebooks_instance_iam_binding": tpgiamresource.ResourceIamBinding(notebooks.NotebooksInstanceIamSchema, notebooks.NotebooksInstanceIamUpdaterProducer, notebooks.NotebooksInstanceIdParseFunc), + "google_notebooks_instance_iam_member": tpgiamresource.ResourceIamMember(notebooks.NotebooksInstanceIamSchema, notebooks.NotebooksInstanceIamUpdaterProducer, notebooks.NotebooksInstanceIdParseFunc), + "google_notebooks_instance_iam_policy": tpgiamresource.ResourceIamPolicy(notebooks.NotebooksInstanceIamSchema, notebooks.NotebooksInstanceIamUpdaterProducer, notebooks.NotebooksInstanceIdParseFunc), + "google_notebooks_location": notebooks.ResourceNotebooksLocation(), + "google_notebooks_runtime": notebooks.ResourceNotebooksRuntime(), + "google_notebooks_runtime_iam_binding": tpgiamresource.ResourceIamBinding(notebooks.NotebooksRuntimeIamSchema, notebooks.NotebooksRuntimeIamUpdaterProducer, notebooks.NotebooksRuntimeIdParseFunc), + "google_notebooks_runtime_iam_member": tpgiamresource.ResourceIamMember(notebooks.NotebooksRuntimeIamSchema, notebooks.NotebooksRuntimeIamUpdaterProducer, notebooks.NotebooksRuntimeIdParseFunc), + "google_notebooks_runtime_iam_policy": tpgiamresource.ResourceIamPolicy(notebooks.NotebooksRuntimeIamSchema, notebooks.NotebooksRuntimeIamUpdaterProducer, notebooks.NotebooksRuntimeIdParseFunc), + "google_os_config_patch_deployment": osconfig.ResourceOSConfigPatchDeployment(), + "google_os_login_ssh_public_key": oslogin.ResourceOSLoginSSHPublicKey(), + "google_privateca_ca_pool": privateca.ResourcePrivatecaCaPool(), + "google_privateca_ca_pool_iam_binding": tpgiamresource.ResourceIamBinding(privateca.PrivatecaCaPoolIamSchema, privateca.PrivatecaCaPoolIamUpdaterProducer, privateca.PrivatecaCaPoolIdParseFunc), + "google_privateca_ca_pool_iam_member": tpgiamresource.ResourceIamMember(privateca.PrivatecaCaPoolIamSchema, privateca.PrivatecaCaPoolIamUpdaterProducer, privateca.PrivatecaCaPoolIdParseFunc), + "google_privateca_ca_pool_iam_policy": tpgiamresource.ResourceIamPolicy(privateca.PrivatecaCaPoolIamSchema, privateca.PrivatecaCaPoolIamUpdaterProducer, privateca.PrivatecaCaPoolIdParseFunc), + "google_privateca_certificate": privateca.ResourcePrivatecaCertificate(), + "google_privateca_certificate_authority": privateca.ResourcePrivatecaCertificateAuthority(), + "google_privateca_certificate_template_iam_binding": tpgiamresource.ResourceIamBinding(privateca.PrivatecaCertificateTemplateIamSchema, privateca.PrivatecaCertificateTemplateIamUpdaterProducer, privateca.PrivatecaCertificateTemplateIdParseFunc), + "google_privateca_certificate_template_iam_member": tpgiamresource.ResourceIamMember(privateca.PrivatecaCertificateTemplateIamSchema, privateca.PrivatecaCertificateTemplateIamUpdaterProducer, privateca.PrivatecaCertificateTemplateIdParseFunc), + "google_privateca_certificate_template_iam_policy": tpgiamresource.ResourceIamPolicy(privateca.PrivatecaCertificateTemplateIamSchema, privateca.PrivatecaCertificateTemplateIamUpdaterProducer, privateca.PrivatecaCertificateTemplateIdParseFunc), + "google_public_ca_external_account_key": publicca.ResourcePublicCAExternalAccountKey(), + "google_pubsub_schema": pubsub.ResourcePubsubSchema(), + "google_pubsub_subscription": pubsub.ResourcePubsubSubscription(), + "google_pubsub_topic": pubsub.ResourcePubsubTopic(), + "google_pubsub_topic_iam_binding": tpgiamresource.ResourceIamBinding(pubsub.PubsubTopicIamSchema, pubsub.PubsubTopicIamUpdaterProducer, pubsub.PubsubTopicIdParseFunc), + "google_pubsub_topic_iam_member": tpgiamresource.ResourceIamMember(pubsub.PubsubTopicIamSchema, pubsub.PubsubTopicIamUpdaterProducer, pubsub.PubsubTopicIdParseFunc), + "google_pubsub_topic_iam_policy": tpgiamresource.ResourceIamPolicy(pubsub.PubsubTopicIamSchema, pubsub.PubsubTopicIamUpdaterProducer, pubsub.PubsubTopicIdParseFunc), + "google_pubsub_lite_reservation": pubsublite.ResourcePubsubLiteReservation(), + "google_pubsub_lite_subscription": pubsublite.ResourcePubsubLiteSubscription(), + "google_pubsub_lite_topic": pubsublite.ResourcePubsubLiteTopic(), + "google_redis_instance": redis.ResourceRedisInstance(), + "google_resource_manager_lien": resourcemanager.ResourceResourceManagerLien(), + "google_secret_manager_secret": secretmanager.ResourceSecretManagerSecret(), + "google_secret_manager_secret_iam_binding": tpgiamresource.ResourceIamBinding(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer, secretmanager.SecretManagerSecretIdParseFunc), + "google_secret_manager_secret_iam_member": tpgiamresource.ResourceIamMember(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer, secretmanager.SecretManagerSecretIdParseFunc), + "google_secret_manager_secret_iam_policy": tpgiamresource.ResourceIamPolicy(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer, secretmanager.SecretManagerSecretIdParseFunc), + "google_secret_manager_secret_version": secretmanager.ResourceSecretManagerSecretVersion(), + "google_scc_mute_config": securitycenter.ResourceSecurityCenterMuteConfig(), + "google_scc_notification_config": securitycenter.ResourceSecurityCenterNotificationConfig(), + "google_scc_source": securitycenter.ResourceSecurityCenterSource(), + "google_scc_source_iam_binding": tpgiamresource.ResourceIamBinding(securitycenter.SecurityCenterSourceIamSchema, securitycenter.SecurityCenterSourceIamUpdaterProducer, securitycenter.SecurityCenterSourceIdParseFunc), + "google_scc_source_iam_member": tpgiamresource.ResourceIamMember(securitycenter.SecurityCenterSourceIamSchema, securitycenter.SecurityCenterSourceIamUpdaterProducer, securitycenter.SecurityCenterSourceIdParseFunc), + "google_scc_source_iam_policy": tpgiamresource.ResourceIamPolicy(securitycenter.SecurityCenterSourceIamSchema, securitycenter.SecurityCenterSourceIamUpdaterProducer, securitycenter.SecurityCenterSourceIdParseFunc), + "google_endpoints_service_iam_binding": tpgiamresource.ResourceIamBinding(servicemanagement.ServiceManagementServiceIamSchema, servicemanagement.ServiceManagementServiceIamUpdaterProducer, servicemanagement.ServiceManagementServiceIdParseFunc), + "google_endpoints_service_iam_member": tpgiamresource.ResourceIamMember(servicemanagement.ServiceManagementServiceIamSchema, servicemanagement.ServiceManagementServiceIamUpdaterProducer, servicemanagement.ServiceManagementServiceIdParseFunc), + "google_endpoints_service_iam_policy": tpgiamresource.ResourceIamPolicy(servicemanagement.ServiceManagementServiceIamSchema, servicemanagement.ServiceManagementServiceIamUpdaterProducer, servicemanagement.ServiceManagementServiceIdParseFunc), + "google_endpoints_service_consumers_iam_binding": tpgiamresource.ResourceIamBinding(servicemanagement.ServiceManagementServiceConsumersIamSchema, servicemanagement.ServiceManagementServiceConsumersIamUpdaterProducer, servicemanagement.ServiceManagementServiceConsumersIdParseFunc), + "google_endpoints_service_consumers_iam_member": tpgiamresource.ResourceIamMember(servicemanagement.ServiceManagementServiceConsumersIamSchema, servicemanagement.ServiceManagementServiceConsumersIamUpdaterProducer, servicemanagement.ServiceManagementServiceConsumersIdParseFunc), + "google_endpoints_service_consumers_iam_policy": tpgiamresource.ResourceIamPolicy(servicemanagement.ServiceManagementServiceConsumersIamSchema, servicemanagement.ServiceManagementServiceConsumersIamUpdaterProducer, servicemanagement.ServiceManagementServiceConsumersIdParseFunc), + "google_sourcerepo_repository": sourcerepo.ResourceSourceRepoRepository(), + "google_sourcerepo_repository_iam_binding": tpgiamresource.ResourceIamBinding(sourcerepo.SourceRepoRepositoryIamSchema, sourcerepo.SourceRepoRepositoryIamUpdaterProducer, sourcerepo.SourceRepoRepositoryIdParseFunc), + "google_sourcerepo_repository_iam_member": tpgiamresource.ResourceIamMember(sourcerepo.SourceRepoRepositoryIamSchema, sourcerepo.SourceRepoRepositoryIamUpdaterProducer, sourcerepo.SourceRepoRepositoryIdParseFunc), + "google_sourcerepo_repository_iam_policy": tpgiamresource.ResourceIamPolicy(sourcerepo.SourceRepoRepositoryIamSchema, sourcerepo.SourceRepoRepositoryIamUpdaterProducer, sourcerepo.SourceRepoRepositoryIdParseFunc), + "google_spanner_database": spanner.ResourceSpannerDatabase(), + "google_spanner_instance": spanner.ResourceSpannerInstance(), + "google_sql_database": sql.ResourceSQLDatabase(), + "google_sql_source_representation_instance": sql.ResourceSQLSourceRepresentationInstance(), + "google_storage_bucket_iam_binding": tpgiamresource.ResourceIamBinding(storage.StorageBucketIamSchema, storage.StorageBucketIamUpdaterProducer, storage.StorageBucketIdParseFunc), + "google_storage_bucket_iam_member": tpgiamresource.ResourceIamMember(storage.StorageBucketIamSchema, storage.StorageBucketIamUpdaterProducer, storage.StorageBucketIdParseFunc), + "google_storage_bucket_iam_policy": tpgiamresource.ResourceIamPolicy(storage.StorageBucketIamSchema, storage.StorageBucketIamUpdaterProducer, storage.StorageBucketIdParseFunc), + "google_storage_bucket_access_control": storage.ResourceStorageBucketAccessControl(), + "google_storage_default_object_access_control": storage.ResourceStorageDefaultObjectAccessControl(), + "google_storage_hmac_key": storage.ResourceStorageHmacKey(), + "google_storage_object_access_control": storage.ResourceStorageObjectAccessControl(), + "google_storage_transfer_agent_pool": storagetransfer.ResourceStorageTransferAgentPool(), + "google_tags_tag_binding": tags.ResourceTagsTagBinding(), + "google_tags_tag_key": tags.ResourceTagsTagKey(), + "google_tags_tag_key_iam_binding": tpgiamresource.ResourceIamBinding(tags.TagsTagKeyIamSchema, tags.TagsTagKeyIamUpdaterProducer, tags.TagsTagKeyIdParseFunc), + "google_tags_tag_key_iam_member": tpgiamresource.ResourceIamMember(tags.TagsTagKeyIamSchema, tags.TagsTagKeyIamUpdaterProducer, tags.TagsTagKeyIdParseFunc), + "google_tags_tag_key_iam_policy": tpgiamresource.ResourceIamPolicy(tags.TagsTagKeyIamSchema, tags.TagsTagKeyIamUpdaterProducer, tags.TagsTagKeyIdParseFunc), + "google_tags_tag_value": tags.ResourceTagsTagValue(), + "google_tags_tag_value_iam_binding": tpgiamresource.ResourceIamBinding(tags.TagsTagValueIamSchema, tags.TagsTagValueIamUpdaterProducer, tags.TagsTagValueIdParseFunc), + "google_tags_tag_value_iam_member": tpgiamresource.ResourceIamMember(tags.TagsTagValueIamSchema, tags.TagsTagValueIamUpdaterProducer, tags.TagsTagValueIdParseFunc), + "google_tags_tag_value_iam_policy": tpgiamresource.ResourceIamPolicy(tags.TagsTagValueIamSchema, tags.TagsTagValueIamUpdaterProducer, tags.TagsTagValueIdParseFunc), + "google_tpu_node": tpu.ResourceTPUNode(), + "google_vertex_ai_dataset": vertexai.ResourceVertexAIDataset(), + "google_vertex_ai_endpoint": vertexai.ResourceVertexAIEndpoint(), + "google_vertex_ai_featurestore": vertexai.ResourceVertexAIFeaturestore(), + "google_vertex_ai_featurestore_entitytype": vertexai.ResourceVertexAIFeaturestoreEntitytype(), + "google_vertex_ai_featurestore_entitytype_feature": vertexai.ResourceVertexAIFeaturestoreEntitytypeFeature(), + "google_vertex_ai_index": vertexai.ResourceVertexAIIndex(), + "google_vertex_ai_index_endpoint": vertexai.ResourceVertexAIIndexEndpoint(), + "google_vertex_ai_tensorboard": vertexai.ResourceVertexAITensorboard(), + "google_vpc_access_connector": vpcaccess.ResourceVPCAccessConnector(), + "google_workflows_workflow": workflows.ResourceWorkflowsWorkflow(), + }, + map[string]*schema.Resource{ + // ####### START handwritten resources ########### + "google_app_engine_application": appengine.ResourceAppEngineApplication(), + "google_apigee_sharedflow": apigee.ResourceApigeeSharedFlow(), + "google_apigee_sharedflow_deployment": apigee.ResourceApigeeSharedFlowDeployment(), + "google_apigee_flowhook": apigee.ResourceApigeeFlowhook(), + "google_apigee_keystores_aliases_pkcs12": apigee.ResourceApigeeKeystoresAliasesPkcs12(), + "google_apigee_keystores_aliases_key_cert_file": apigee.ResourceApigeeKeystoresAliasesKeyCertFile(), + "google_bigquery_table": bigquery.ResourceBigQueryTable(), + "google_bigtable_gc_policy": bigtable.ResourceBigtableGCPolicy(), + "google_bigtable_instance": bigtable.ResourceBigtableInstance(), + "google_bigtable_table": bigtable.ResourceBigtableTable(), + "google_billing_subaccount": resourcemanager.ResourceBillingSubaccount(), + "google_cloudfunctions_function": cloudfunctions.ResourceCloudFunctionsFunction(), + "google_composer_environment": composer.ResourceComposerEnvironment(), + "google_compute_attached_disk": compute.ResourceComputeAttachedDisk(), + "google_compute_instance": compute.ResourceComputeInstance(), + "google_compute_disk_async_replication": compute.ResourceComputeDiskAsyncReplication(), + "google_compute_instance_from_template": compute.ResourceComputeInstanceFromTemplate(), + "google_compute_instance_group": compute.ResourceComputeInstanceGroup(), + "google_compute_instance_group_manager": compute.ResourceComputeInstanceGroupManager(), + "google_compute_instance_template": compute.ResourceComputeInstanceTemplate(), + "google_compute_network_peering": compute.ResourceComputeNetworkPeering(), + "google_compute_project_default_network_tier": compute.ResourceComputeProjectDefaultNetworkTier(), + "google_compute_project_metadata": compute.ResourceComputeProjectMetadata(), + "google_compute_project_metadata_item": compute.ResourceComputeProjectMetadataItem(), + "google_compute_region_instance_group_manager": compute.ResourceComputeRegionInstanceGroupManager(), + "google_compute_router_interface": compute.ResourceComputeRouterInterface(), + "google_compute_security_policy": compute.ResourceComputeSecurityPolicy(), + "google_compute_shared_vpc_host_project": compute.ResourceComputeSharedVpcHostProject(), + "google_compute_shared_vpc_service_project": compute.ResourceComputeSharedVpcServiceProject(), + "google_compute_target_pool": compute.ResourceComputeTargetPool(), + "google_container_cluster": container.ResourceContainerCluster(), + "google_container_node_pool": container.ResourceContainerNodePool(), + "google_container_registry": containeranalysis.ResourceContainerRegistry(), + "google_dataflow_job": dataflow.ResourceDataflowJob(), + "google_dataproc_cluster": dataproc.ResourceDataprocCluster(), + "google_dataproc_job": dataproc.ResourceDataprocJob(), + "google_dialogflow_cx_version": dialogflowcx.ResourceDialogflowCXVersion(), + "google_dialogflow_cx_environment": dialogflowcx.ResourceDialogflowCXEnvironment(), + "google_dns_record_set": dns.ResourceDnsRecordSet(), + "google_endpoints_service": servicemanagement.ResourceEndpointsService(), + "google_folder": resourcemanager.ResourceGoogleFolder(), + "google_folder_organization_policy": resourcemanager.ResourceGoogleFolderOrganizationPolicy(), + "google_logging_billing_account_sink": logging.ResourceLoggingBillingAccountSink(), + "google_logging_billing_account_exclusion": logging.ResourceLoggingExclusion(logging.BillingAccountLoggingExclusionSchema, logging.NewBillingAccountLoggingExclusionUpdater, logging.BillingAccountLoggingExclusionIdParseFunc), + "google_logging_billing_account_bucket_config": logging.ResourceLoggingBillingAccountBucketConfig(), + "google_logging_organization_sink": logging.ResourceLoggingOrganizationSink(), + "google_logging_organization_exclusion": logging.ResourceLoggingExclusion(logging.OrganizationLoggingExclusionSchema, logging.NewOrganizationLoggingExclusionUpdater, logging.OrganizationLoggingExclusionIdParseFunc), + "google_logging_organization_bucket_config": logging.ResourceLoggingOrganizationBucketConfig(), + "google_logging_folder_sink": logging.ResourceLoggingFolderSink(), + "google_logging_folder_exclusion": logging.ResourceLoggingExclusion(logging.FolderLoggingExclusionSchema, logging.NewFolderLoggingExclusionUpdater, logging.FolderLoggingExclusionIdParseFunc), + "google_logging_folder_bucket_config": logging.ResourceLoggingFolderBucketConfig(), + "google_logging_project_sink": logging.ResourceLoggingProjectSink(), + "google_logging_project_exclusion": logging.ResourceLoggingExclusion(logging.ProjectLoggingExclusionSchema, logging.NewProjectLoggingExclusionUpdater, logging.ProjectLoggingExclusionIdParseFunc), + "google_logging_project_bucket_config": logging.ResourceLoggingProjectBucketConfig(), + "google_monitoring_dashboard": monitoring.ResourceMonitoringDashboard(), + "google_os_config_os_policy_assignment": osconfig.ResourceOSConfigOSPolicyAssignment(), + "google_service_networking_connection": servicenetworking.ResourceServiceNetworkingConnection(), + "google_sql_database_instance": sql.ResourceSqlDatabaseInstance(), + "google_sql_ssl_cert": sql.ResourceSqlSslCert(), + "google_sql_user": sql.ResourceSqlUser(), + "google_organization_iam_custom_role": resourcemanager.ResourceGoogleOrganizationIamCustomRole(), + "google_organization_policy": resourcemanager.ResourceGoogleOrganizationPolicy(), + "google_project": resourcemanager.ResourceGoogleProject(), + "google_project_default_service_accounts": resourcemanager.ResourceGoogleProjectDefaultServiceAccounts(), + "google_project_service": resourcemanager.ResourceGoogleProjectService(), + "google_project_iam_custom_role": resourcemanager.ResourceGoogleProjectIamCustomRole(), + "google_project_organization_policy": resourcemanager.ResourceGoogleProjectOrganizationPolicy(), + "google_project_usage_export_bucket": compute.ResourceProjectUsageBucket(), + "google_service_account": resourcemanager.ResourceGoogleServiceAccount(), + "google_service_account_key": resourcemanager.ResourceGoogleServiceAccountKey(), + "google_service_networking_peered_dns_domain": servicenetworking.ResourceGoogleServiceNetworkingPeeredDNSDomain(), + "google_storage_bucket": storage.ResourceStorageBucket(), + "google_storage_bucket_acl": storage.ResourceStorageBucketAcl(), + "google_storage_bucket_object": storage.ResourceStorageBucketObject(), + "google_storage_object_acl": storage.ResourceStorageObjectAcl(), + "google_storage_default_object_acl": storage.ResourceStorageDefaultObjectAcl(), + "google_storage_notification": storage.ResourceStorageNotification(), + "google_storage_transfer_job": storagetransfer.ResourceStorageTransferJob(), + "google_tags_location_tag_binding": tags.ResourceTagsLocationTagBinding(), + // ####### END handwritten resources ########### + }, + map[string]*schema.Resource{ + // ####### START non-generated IAM resources ########### + "google_bigtable_instance_iam_binding": tpgiamresource.ResourceIamBinding(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_instance_iam_member": tpgiamresource.ResourceIamMember(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_instance_iam_policy": tpgiamresource.ResourceIamPolicy(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_table_iam_binding": tpgiamresource.ResourceIamBinding(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigtable_table_iam_member": tpgiamresource.ResourceIamMember(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigtable_table_iam_policy": tpgiamresource.ResourceIamPolicy(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigquery_dataset_iam_binding": tpgiamresource.ResourceIamBinding(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_bigquery_dataset_iam_member": tpgiamresource.ResourceIamMember(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_bigquery_dataset_iam_policy": tpgiamresource.ResourceIamPolicy(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_billing_account_iam_binding": tpgiamresource.ResourceIamBinding(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), + "google_billing_account_iam_member": tpgiamresource.ResourceIamMember(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), + "google_billing_account_iam_policy": tpgiamresource.ResourceIamPolicy(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), + "google_dataproc_cluster_iam_binding": tpgiamresource.ResourceIamBinding(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), + "google_dataproc_cluster_iam_member": tpgiamresource.ResourceIamMember(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), + "google_dataproc_cluster_iam_policy": tpgiamresource.ResourceIamPolicy(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), + "google_dataproc_job_iam_binding": tpgiamresource.ResourceIamBinding(dataproc.IamDataprocJobSchema, dataproc.NewDataprocJobUpdater, dataproc.DataprocJobIdParseFunc), + "google_dataproc_job_iam_member": tpgiamresource.ResourceIamMember(dataproc.IamDataprocJobSchema, dataproc.NewDataprocJobUpdater, dataproc.DataprocJobIdParseFunc), + "google_dataproc_job_iam_policy": tpgiamresource.ResourceIamPolicy(dataproc.IamDataprocJobSchema, dataproc.NewDataprocJobUpdater, dataproc.DataprocJobIdParseFunc), + "google_folder_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater, resourcemanager.FolderIdParseFunc), + "google_folder_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater, resourcemanager.FolderIdParseFunc), + "google_folder_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater, resourcemanager.FolderIdParseFunc), + "google_folder_iam_audit_config": tpgiamresource.ResourceIamAuditConfig(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater, resourcemanager.FolderIdParseFunc), + "google_healthcare_dataset_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater, healthcare.DatasetIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_dataset_iam_member": tpgiamresource.ResourceIamMember(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater, healthcare.DatasetIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_dataset_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater, healthcare.DatasetIdParseFunc), + "google_healthcare_dicom_store_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.IamHealthcareDicomStoreSchema, healthcare.NewHealthcareDicomStoreIamUpdater, healthcare.DicomStoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_dicom_store_iam_member": tpgiamresource.ResourceIamMember(healthcare.IamHealthcareDicomStoreSchema, healthcare.NewHealthcareDicomStoreIamUpdater, healthcare.DicomStoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_dicom_store_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.IamHealthcareDicomStoreSchema, healthcare.NewHealthcareDicomStoreIamUpdater, healthcare.DicomStoreIdParseFunc), + "google_healthcare_fhir_store_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.IamHealthcareFhirStoreSchema, healthcare.NewHealthcareFhirStoreIamUpdater, healthcare.FhirStoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_fhir_store_iam_member": tpgiamresource.ResourceIamMember(healthcare.IamHealthcareFhirStoreSchema, healthcare.NewHealthcareFhirStoreIamUpdater, healthcare.FhirStoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_fhir_store_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.IamHealthcareFhirStoreSchema, healthcare.NewHealthcareFhirStoreIamUpdater, healthcare.FhirStoreIdParseFunc), + "google_healthcare_hl7_v2_store_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.IamHealthcareHl7V2StoreSchema, healthcare.NewHealthcareHl7V2StoreIamUpdater, healthcare.Hl7V2StoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_hl7_v2_store_iam_member": tpgiamresource.ResourceIamMember(healthcare.IamHealthcareHl7V2StoreSchema, healthcare.NewHealthcareHl7V2StoreIamUpdater, healthcare.Hl7V2StoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_hl7_v2_store_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.IamHealthcareHl7V2StoreSchema, healthcare.NewHealthcareHl7V2StoreIamUpdater, healthcare.Hl7V2StoreIdParseFunc), + "google_kms_key_ring_iam_binding": tpgiamresource.ResourceIamBinding(kms.IamKmsKeyRingSchema, kms.NewKmsKeyRingIamUpdater, kms.KeyRingIdParseFunc), + "google_kms_key_ring_iam_member": tpgiamresource.ResourceIamMember(kms.IamKmsKeyRingSchema, kms.NewKmsKeyRingIamUpdater, kms.KeyRingIdParseFunc), + "google_kms_key_ring_iam_policy": tpgiamresource.ResourceIamPolicy(kms.IamKmsKeyRingSchema, kms.NewKmsKeyRingIamUpdater, kms.KeyRingIdParseFunc), + "google_kms_crypto_key_iam_binding": tpgiamresource.ResourceIamBinding(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater, kms.CryptoIdParseFunc), + "google_kms_crypto_key_iam_member": tpgiamresource.ResourceIamMember(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater, kms.CryptoIdParseFunc), + "google_kms_crypto_key_iam_policy": tpgiamresource.ResourceIamPolicy(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater, kms.CryptoIdParseFunc), + "google_spanner_instance_iam_binding": tpgiamresource.ResourceIamBinding(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater, spanner.SpannerInstanceIdParseFunc), + "google_spanner_instance_iam_member": tpgiamresource.ResourceIamMember(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater, spanner.SpannerInstanceIdParseFunc), + "google_spanner_instance_iam_policy": tpgiamresource.ResourceIamPolicy(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater, spanner.SpannerInstanceIdParseFunc), + "google_spanner_database_iam_binding": tpgiamresource.ResourceIamBinding(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), + "google_spanner_database_iam_member": tpgiamresource.ResourceIamMember(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), + "google_spanner_database_iam_policy": tpgiamresource.ResourceIamPolicy(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), + "google_organization_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), + "google_organization_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), + "google_organization_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), + "google_organization_iam_audit_config": tpgiamresource.ResourceIamAuditConfig(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), + "google_project_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater, resourcemanager.ProjectIdParseFunc), + "google_project_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater, resourcemanager.ProjectIdParseFunc, tpgiamresource.IamWithBatching), + "google_project_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater, resourcemanager.ProjectIdParseFunc, tpgiamresource.IamWithBatching), + "google_project_iam_audit_config": tpgiamresource.ResourceIamAuditConfig(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater, resourcemanager.ProjectIdParseFunc, tpgiamresource.IamWithBatching), + "google_pubsub_subscription_iam_binding": tpgiamresource.ResourceIamBinding(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater, pubsub.PubsubSubscriptionIdParseFunc), + "google_pubsub_subscription_iam_member": tpgiamresource.ResourceIamMember(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater, pubsub.PubsubSubscriptionIdParseFunc), + "google_pubsub_subscription_iam_policy": tpgiamresource.ResourceIamPolicy(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater, pubsub.PubsubSubscriptionIdParseFunc), + "google_service_account_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater, resourcemanager.ServiceAccountIdParseFunc), + "google_service_account_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater, resourcemanager.ServiceAccountIdParseFunc), + "google_service_account_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater, resourcemanager.ServiceAccountIdParseFunc), + // ####### END non-generated IAM resources ########### + }, + dclResources, + ) +} + +func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Provider) (interface{}, diag.Diagnostics) { + err := transport_tpg.HandleSDKDefaults(d) + if err != nil { + return nil, diag.FromErr(err) + } + transport_tpg.HandleDCLCustomEndpointDefaults(d) + + config := transport_tpg.Config{ + Project: d.Get("project").(string), + Region: d.Get("region").(string), + Zone: d.Get("zone").(string), + UserProjectOverride: d.Get("user_project_override").(bool), + BillingProject: d.Get("billing_project").(string), + UserAgent: p.UserAgent("terraform-provider-google", version.ProviderVersion), + } + + // opt in extension for adding to the User-Agent header + if ext := os.Getenv("GOOGLE_TERRAFORM_USERAGENT_EXTENSION"); ext != "" { + ua := config.UserAgent + config.UserAgent = fmt.Sprintf("%s %s", ua, ext) + } + + if v, ok := d.GetOk("request_timeout"); ok { + var err error + config.RequestTimeout, err = time.ParseDuration(v.(string)) + if err != nil { + return nil, diag.FromErr(err) + } + } + + if v, ok := d.GetOk("request_reason"); ok { + config.RequestReason = v.(string) + } + + // Check for primary credentials in config. Note that if neither is set, ADCs + // will be used if available. + if v, ok := d.GetOk("access_token"); ok { + config.AccessToken = v.(string) + } + + if v, ok := d.GetOk("credentials"); ok { + config.Credentials = v.(string) + } + + // only check environment variables if neither value was set in config- this + // means config beats env var in all cases. + if config.AccessToken == "" && config.Credentials == "" { + config.Credentials = transport_tpg.MultiEnvSearch([]string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", + }) + + config.AccessToken = transport_tpg.MultiEnvSearch([]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN", + }) + } + + // Given that impersonate_service_account is a secondary auth method, it has + // no conflicts to worry about. We pull the env var in a DefaultFunc. + if v, ok := d.GetOk("impersonate_service_account"); ok { + config.ImpersonateServiceAccount = v.(string) + } + + delegates := d.Get("impersonate_service_account_delegates").([]interface{}) + if len(delegates) > 0 { + config.ImpersonateServiceAccountDelegates = make([]string, len(delegates)) + } + for i, delegate := range delegates { + config.ImpersonateServiceAccountDelegates[i] = delegate.(string) + } + + scopes := d.Get("scopes").([]interface{}) + if len(scopes) > 0 { + config.Scopes = make([]string, len(scopes)) + } + for i, scope := range scopes { + config.Scopes[i] = scope.(string) + } + + batchCfg, err := transport_tpg.ExpandProviderBatchingConfig(d.Get("batching")) + if err != nil { + return nil, diag.FromErr(err) + } + config.BatchingConfig = batchCfg + + // Generated products + config.AccessApprovalBasePath = d.Get("access_approval_custom_endpoint").(string) + config.AccessContextManagerBasePath = d.Get("access_context_manager_custom_endpoint").(string) + config.ActiveDirectoryBasePath = d.Get("active_directory_custom_endpoint").(string) + config.AlloydbBasePath = d.Get("alloydb_custom_endpoint").(string) + config.ApigeeBasePath = d.Get("apigee_custom_endpoint").(string) + config.AppEngineBasePath = d.Get("app_engine_custom_endpoint").(string) + config.ArtifactRegistryBasePath = d.Get("artifact_registry_custom_endpoint").(string) + config.BeyondcorpBasePath = d.Get("beyondcorp_custom_endpoint").(string) + config.BigQueryBasePath = d.Get("big_query_custom_endpoint").(string) + config.BigqueryAnalyticsHubBasePath = d.Get("bigquery_analytics_hub_custom_endpoint").(string) + config.BigqueryConnectionBasePath = d.Get("bigquery_connection_custom_endpoint").(string) + config.BigqueryDatapolicyBasePath = d.Get("bigquery_datapolicy_custom_endpoint").(string) + config.BigqueryDataTransferBasePath = d.Get("bigquery_data_transfer_custom_endpoint").(string) + config.BigqueryReservationBasePath = d.Get("bigquery_reservation_custom_endpoint").(string) + config.BigtableBasePath = d.Get("bigtable_custom_endpoint").(string) + config.BillingBasePath = d.Get("billing_custom_endpoint").(string) + config.BinaryAuthorizationBasePath = d.Get("binary_authorization_custom_endpoint").(string) + config.CertificateManagerBasePath = d.Get("certificate_manager_custom_endpoint").(string) + config.CloudAssetBasePath = d.Get("cloud_asset_custom_endpoint").(string) + config.CloudBuildBasePath = d.Get("cloud_build_custom_endpoint").(string) + config.Cloudbuildv2BasePath = d.Get("cloudbuildv2_custom_endpoint").(string) + config.CloudFunctionsBasePath = d.Get("cloud_functions_custom_endpoint").(string) + config.Cloudfunctions2BasePath = d.Get("cloudfunctions2_custom_endpoint").(string) + config.CloudIdentityBasePath = d.Get("cloud_identity_custom_endpoint").(string) + config.CloudIdsBasePath = d.Get("cloud_ids_custom_endpoint").(string) + config.CloudIotBasePath = d.Get("cloud_iot_custom_endpoint").(string) + config.CloudRunBasePath = d.Get("cloud_run_custom_endpoint").(string) + config.CloudRunV2BasePath = d.Get("cloud_run_v2_custom_endpoint").(string) + config.CloudSchedulerBasePath = d.Get("cloud_scheduler_custom_endpoint").(string) + config.CloudTasksBasePath = d.Get("cloud_tasks_custom_endpoint").(string) + config.ComputeBasePath = d.Get("compute_custom_endpoint").(string) + config.ContainerAnalysisBasePath = d.Get("container_analysis_custom_endpoint").(string) + config.ContainerAttachedBasePath = d.Get("container_attached_custom_endpoint").(string) + config.DatabaseMigrationServiceBasePath = d.Get("database_migration_service_custom_endpoint").(string) + config.DataCatalogBasePath = d.Get("data_catalog_custom_endpoint").(string) + config.DataFusionBasePath = d.Get("data_fusion_custom_endpoint").(string) + config.DataLossPreventionBasePath = d.Get("data_loss_prevention_custom_endpoint").(string) + config.DataplexBasePath = d.Get("dataplex_custom_endpoint").(string) + config.DataprocBasePath = d.Get("dataproc_custom_endpoint").(string) + config.DataprocMetastoreBasePath = d.Get("dataproc_metastore_custom_endpoint").(string) + config.DatastoreBasePath = d.Get("datastore_custom_endpoint").(string) + config.DatastreamBasePath = d.Get("datastream_custom_endpoint").(string) + config.DeploymentManagerBasePath = d.Get("deployment_manager_custom_endpoint").(string) + config.DialogflowBasePath = d.Get("dialogflow_custom_endpoint").(string) + config.DialogflowCXBasePath = d.Get("dialogflow_cx_custom_endpoint").(string) + config.DNSBasePath = d.Get("dns_custom_endpoint").(string) + config.DocumentAIBasePath = d.Get("document_ai_custom_endpoint").(string) + config.EssentialContactsBasePath = d.Get("essential_contacts_custom_endpoint").(string) + config.FilestoreBasePath = d.Get("filestore_custom_endpoint").(string) + config.FirestoreBasePath = d.Get("firestore_custom_endpoint").(string) + config.GameServicesBasePath = d.Get("game_services_custom_endpoint").(string) + config.GKEBackupBasePath = d.Get("gke_backup_custom_endpoint").(string) + config.GKEHubBasePath = d.Get("gke_hub_custom_endpoint").(string) + config.GKEHub2BasePath = d.Get("gke_hub2_custom_endpoint").(string) + config.HealthcareBasePath = d.Get("healthcare_custom_endpoint").(string) + config.IAM2BasePath = d.Get("iam2_custom_endpoint").(string) + config.IAMBetaBasePath = d.Get("iam_beta_custom_endpoint").(string) + config.IAMWorkforcePoolBasePath = d.Get("iam_workforce_pool_custom_endpoint").(string) + config.IapBasePath = d.Get("iap_custom_endpoint").(string) + config.IdentityPlatformBasePath = d.Get("identity_platform_custom_endpoint").(string) + config.KMSBasePath = d.Get("kms_custom_endpoint").(string) + config.LoggingBasePath = d.Get("logging_custom_endpoint").(string) + config.LookerBasePath = d.Get("looker_custom_endpoint").(string) + config.MemcacheBasePath = d.Get("memcache_custom_endpoint").(string) + config.MLEngineBasePath = d.Get("ml_engine_custom_endpoint").(string) + config.MonitoringBasePath = d.Get("monitoring_custom_endpoint").(string) + config.NetworkManagementBasePath = d.Get("network_management_custom_endpoint").(string) + config.NetworkSecurityBasePath = d.Get("network_security_custom_endpoint").(string) + config.NetworkServicesBasePath = d.Get("network_services_custom_endpoint").(string) + config.NotebooksBasePath = d.Get("notebooks_custom_endpoint").(string) + config.OSConfigBasePath = d.Get("os_config_custom_endpoint").(string) + config.OSLoginBasePath = d.Get("os_login_custom_endpoint").(string) + config.PrivatecaBasePath = d.Get("privateca_custom_endpoint").(string) + config.PublicCABasePath = d.Get("public_ca_custom_endpoint").(string) + config.PubsubBasePath = d.Get("pubsub_custom_endpoint").(string) + config.PubsubLiteBasePath = d.Get("pubsub_lite_custom_endpoint").(string) + config.RedisBasePath = d.Get("redis_custom_endpoint").(string) + config.ResourceManagerBasePath = d.Get("resource_manager_custom_endpoint").(string) + config.SecretManagerBasePath = d.Get("secret_manager_custom_endpoint").(string) + config.SecurityCenterBasePath = d.Get("security_center_custom_endpoint").(string) + config.ServiceManagementBasePath = d.Get("service_management_custom_endpoint").(string) + config.ServiceUsageBasePath = d.Get("service_usage_custom_endpoint").(string) + config.SourceRepoBasePath = d.Get("source_repo_custom_endpoint").(string) + config.SpannerBasePath = d.Get("spanner_custom_endpoint").(string) + config.SQLBasePath = d.Get("sql_custom_endpoint").(string) + config.StorageBasePath = d.Get("storage_custom_endpoint").(string) + config.StorageTransferBasePath = d.Get("storage_transfer_custom_endpoint").(string) + config.TagsBasePath = d.Get("tags_custom_endpoint").(string) + config.TPUBasePath = d.Get("tpu_custom_endpoint").(string) + config.VertexAIBasePath = d.Get("vertex_ai_custom_endpoint").(string) + config.VPCAccessBasePath = d.Get("vpc_access_custom_endpoint").(string) + config.WorkflowsBasePath = d.Get("workflows_custom_endpoint").(string) + + // Handwritten Products / Versioned / Atypical Entries + config.CloudBillingBasePath = d.Get(transport_tpg.CloudBillingCustomEndpointEntryKey).(string) + config.ComposerBasePath = d.Get(transport_tpg.ComposerCustomEndpointEntryKey).(string) + config.ContainerBasePath = d.Get(transport_tpg.ContainerCustomEndpointEntryKey).(string) + config.DataflowBasePath = d.Get(transport_tpg.DataflowCustomEndpointEntryKey).(string) + config.IamCredentialsBasePath = d.Get(transport_tpg.IamCredentialsCustomEndpointEntryKey).(string) + config.ResourceManagerV3BasePath = d.Get(transport_tpg.ResourceManagerV3CustomEndpointEntryKey).(string) + config.IAMBasePath = d.Get(transport_tpg.IAMCustomEndpointEntryKey).(string) + config.ServiceNetworkingBasePath = d.Get(transport_tpg.ServiceNetworkingCustomEndpointEntryKey).(string) + config.ServiceUsageBasePath = d.Get(transport_tpg.ServiceUsageCustomEndpointEntryKey).(string) + config.BigtableAdminBasePath = d.Get(transport_tpg.BigtableAdminCustomEndpointEntryKey).(string) + config.TagsLocationBasePath = d.Get(transport_tpg.TagsLocationCustomEndpointEntryKey).(string) + + // dcl + config.ContainerAwsBasePath = d.Get(transport_tpg.ContainerAwsCustomEndpointEntryKey).(string) + config.ContainerAzureBasePath = d.Get(transport_tpg.ContainerAzureCustomEndpointEntryKey).(string) + + stopCtx, ok := schema.StopContext(ctx) + if !ok { + stopCtx = ctx + } + if err := config.LoadAndValidate(stopCtx); err != nil { + return nil, diag.FromErr(err) + } + + return transport_tpg.ProviderDCLConfigure(d, &config), nil +} + +func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { + if v == nil || v.(string) == "" { + return + } + creds := v.(string) + // if this is a path and we can stat it, assume it's ok + if _, err := os.Stat(creds); err == nil { + return + } + if _, err := googleoauth.CredentialsFromJSON(context.Background(), []byte(creds)); err != nil { + errors = append(errors, + fmt.Errorf("JSON credentials are not valid: %s", err)) + } + + return +} + +func mergeResourceMaps(ms ...map[string]*schema.Resource) (map[string]*schema.Resource, error) { + merged := make(map[string]*schema.Resource) + duplicates := []string{} + + for _, m := range ms { + for k, v := range m { + if _, ok := merged[k]; ok { + duplicates = append(duplicates, k) + } + + merged[k] = v + } + } + + var err error + if len(duplicates) > 0 { + err = fmt.Errorf("saw duplicates in mergeResourceMaps: %v", duplicates) + } + + return merged, err +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_dcl_resources.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_dcl_resources.go new file mode 100644 index 0000000000..a4c6008b20 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider/provider_dcl_resources.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package provider + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/services/apikeys" + "github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads" + "github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation" + "github.com/hashicorp/terraform-provider-google/google/services/cloudbuild" + "github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2" + "github.com/hashicorp/terraform-provider-google/google/services/clouddeploy" + "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + "github.com/hashicorp/terraform-provider-google/google/services/dataplex" + "github.com/hashicorp/terraform-provider-google/google/services/dataproc" + "github.com/hashicorp/terraform-provider-google/google/services/eventarc" + "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" + "github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity" + "github.com/hashicorp/terraform-provider-google/google/services/orgpolicy" + "github.com/hashicorp/terraform-provider-google/google/services/privateca" + "github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise" +) + +var dclResources = map[string]*schema.Resource{ + "google_apikeys_key": apikeys.ResourceApikeysKey(), + "google_assured_workloads_workload": assuredworkloads.ResourceAssuredWorkloadsWorkload(), + "google_bigquery_reservation_assignment": bigqueryreservation.ResourceBigqueryReservationAssignment(), + "google_cloudbuild_worker_pool": cloudbuild.ResourceCloudbuildWorkerPool(), + "google_cloudbuildv2_connection": cloudbuildv2.ResourceCloudbuildv2Connection(), + "google_cloudbuildv2_repository": cloudbuildv2.ResourceCloudbuildv2Repository(), + "google_clouddeploy_delivery_pipeline": clouddeploy.ResourceClouddeployDeliveryPipeline(), + "google_clouddeploy_target": clouddeploy.ResourceClouddeployTarget(), + "google_compute_firewall_policy": compute.ResourceComputeFirewallPolicy(), + "google_compute_firewall_policy_association": compute.ResourceComputeFirewallPolicyAssociation(), + "google_compute_firewall_policy_rule": compute.ResourceComputeFirewallPolicyRule(), + "google_compute_region_network_firewall_policy": compute.ResourceComputeRegionNetworkFirewallPolicy(), + "google_compute_network_firewall_policy": compute.ResourceComputeNetworkFirewallPolicy(), + "google_compute_network_firewall_policy_association": compute.ResourceComputeNetworkFirewallPolicyAssociation(), + "google_compute_region_network_firewall_policy_association": compute.ResourceComputeRegionNetworkFirewallPolicyAssociation(), + "google_compute_network_firewall_policy_rule": compute.ResourceComputeNetworkFirewallPolicyRule(), + "google_compute_region_network_firewall_policy_rule": compute.ResourceComputeRegionNetworkFirewallPolicyRule(), + "google_container_aws_cluster": containeraws.ResourceContainerAwsCluster(), + "google_container_aws_node_pool": containeraws.ResourceContainerAwsNodePool(), + "google_container_azure_client": containerazure.ResourceContainerAzureClient(), + "google_container_azure_cluster": containerazure.ResourceContainerAzureCluster(), + "google_container_azure_node_pool": containerazure.ResourceContainerAzureNodePool(), + "google_dataplex_asset": dataplex.ResourceDataplexAsset(), + "google_dataplex_lake": dataplex.ResourceDataplexLake(), + "google_dataplex_zone": dataplex.ResourceDataplexZone(), + "google_dataproc_workflow_template": dataproc.ResourceDataprocWorkflowTemplate(), + "google_eventarc_channel": eventarc.ResourceEventarcChannel(), + "google_eventarc_google_channel_config": eventarc.ResourceEventarcGoogleChannelConfig(), + "google_eventarc_trigger": eventarc.ResourceEventarcTrigger(), + "google_firebaserules_release": firebaserules.ResourceFirebaserulesRelease(), + "google_firebaserules_ruleset": firebaserules.ResourceFirebaserulesRuleset(), + "google_network_connectivity_hub": networkconnectivity.ResourceNetworkConnectivityHub(), + "google_network_connectivity_spoke": networkconnectivity.ResourceNetworkConnectivitySpoke(), + "google_org_policy_policy": orgpolicy.ResourceOrgPolicyPolicy(), + "google_privateca_certificate_template": privateca.ResourcePrivatecaCertificateTemplate(), + "google_recaptcha_enterprise_key": recaptchaenterprise.ResourceRecaptchaEnterpriseKey(), +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_endpoints.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_endpoints.go deleted file mode 100644 index 9ee8e47875..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_endpoints.go +++ /dev/null @@ -1,154 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// empty string is passed for dcl default since dcl -// [hardcodes the values](https://github.com/GoogleCloudPlatform/declarative-resource-client-library/blob/main/services/google/eventarc/beta/trigger_internal.go#L96-L103) - -var ApikeysEndpointEntryKey = "apikeys_custom_endpoint" -var ApikeysEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_APIKEYS_CUSTOM_ENDPOINT", - }, ""), -} - -var AssuredWorkloadsEndpointEntryKey = "assured_workloads_custom_endpoint" -var AssuredWorkloadsEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ASSURED_WORKLOADS_CUSTOM_ENDPOINT", - }, ""), -} - -var CloudBuildWorkerPoolEndpointEntryKey = "cloud_build_worker_pool_custom_endpoint" -var CloudBuildWorkerPoolEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_BUILD_WORKER_POOL_CUSTOM_ENDPOINT", - }, ""), -} - -var ClouddeployEndpointEntryKey = "clouddeploy_custom_endpoint" -var ClouddeployEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUDDEPLOY_CUSTOM_ENDPOINT", - }, ""), -} - -var CloudResourceManagerEndpointEntryKey = "cloud_resource_manager_custom_endpoint" -var CloudResourceManagerEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_RESOURCE_MANAGER_CUSTOM_ENDPOINT", - }, ""), -} - -var EventarcEndpointEntryKey = "eventarc_custom_endpoint" -var EventarcEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_EVENTARC_CUSTOM_ENDPOINT", - }, ""), -} - -var FirebaserulesEndpointEntryKey = "firebaserules_custom_endpoint" -var FirebaserulesEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_FIREBASERULES_CUSTOM_ENDPOINT", - }, ""), -} - -var NetworkConnectivityEndpointEntryKey = "network_connectivity_custom_endpoint" -var NetworkConnectivityEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_NETWORK_CONNECTIVITY_CUSTOM_ENDPOINT", - }, ""), -} - -var OrgPolicyEndpointEntryKey = "org_policy_custom_endpoint" -var OrgPolicyEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ORG_POLICY_CUSTOM_ENDPOINT", - }, ""), -} - -var RecaptchaEnterpriseEndpointEntryKey = "recaptcha_enterprise_custom_endpoint" -var RecaptchaEnterpriseEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_RECAPTCHA_ENTERPRISE_CUSTOM_ENDPOINT", - }, ""), -} - -type DCLConfig struct { - ApikeysBasePath string - AssuredWorkloadsBasePath string - CloudBuildWorkerPoolBasePath string - ClouddeployBasePath string - CloudResourceManagerBasePath string - EventarcBasePath string - FirebaserulesBasePath string - NetworkConnectivityBasePath string - OrgPolicyBasePath string - RecaptchaEnterpriseBasePath string -} - -func ConfigureDCLProvider(provider *schema.Provider) { - provider.Schema[ApikeysEndpointEntryKey] = ApikeysEndpointEntry - provider.Schema[AssuredWorkloadsEndpointEntryKey] = AssuredWorkloadsEndpointEntry - provider.Schema[CloudBuildWorkerPoolEndpointEntryKey] = CloudBuildWorkerPoolEndpointEntry - provider.Schema[ClouddeployEndpointEntryKey] = ClouddeployEndpointEntry - provider.Schema[CloudResourceManagerEndpointEntryKey] = CloudResourceManagerEndpointEntry - provider.Schema[EventarcEndpointEntryKey] = EventarcEndpointEntry - provider.Schema[FirebaserulesEndpointEntryKey] = FirebaserulesEndpointEntry - provider.Schema[NetworkConnectivityEndpointEntryKey] = NetworkConnectivityEndpointEntry - provider.Schema[OrgPolicyEndpointEntryKey] = OrgPolicyEndpointEntry - provider.Schema[RecaptchaEnterpriseEndpointEntryKey] = RecaptchaEnterpriseEndpointEntry -} - -func ProviderDCLConfigure(d *schema.ResourceData, config *Config) interface{} { - config.ApikeysBasePath = d.Get(ApikeysEndpointEntryKey).(string) - config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) - config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) - config.ClouddeployBasePath = d.Get(ClouddeployEndpointEntryKey).(string) - config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) - config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) - config.FirebaserulesBasePath = d.Get(FirebaserulesEndpointEntryKey).(string) - config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) - config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) - config.RecaptchaEnterpriseBasePath = d.Get(RecaptchaEnterpriseEndpointEntryKey).(string) - config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) - return config -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_resources.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_resources.go deleted file mode 100644 index c31509de39..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_resources.go +++ /dev/null @@ -1,60 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var dclResources = map[string]*schema.Resource{ - "google_apikeys_key": ResourceApikeysKey(), - "google_assured_workloads_workload": ResourceAssuredWorkloadsWorkload(), - "google_bigquery_reservation_assignment": ResourceBigqueryReservationAssignment(), - "google_cloudbuild_worker_pool": ResourceCloudbuildWorkerPool(), - "google_clouddeploy_delivery_pipeline": ResourceClouddeployDeliveryPipeline(), - "google_clouddeploy_target": ResourceClouddeployTarget(), - "google_compute_firewall_policy": ResourceComputeFirewallPolicy(), - "google_compute_firewall_policy_association": ResourceComputeFirewallPolicyAssociation(), - "google_compute_firewall_policy_rule": ResourceComputeFirewallPolicyRule(), - "google_compute_region_network_firewall_policy": ResourceComputeRegionNetworkFirewallPolicy(), - "google_compute_network_firewall_policy": ResourceComputeNetworkFirewallPolicy(), - "google_compute_network_firewall_policy_association": ResourceComputeNetworkFirewallPolicyAssociation(), - "google_compute_region_network_firewall_policy_association": ResourceComputeRegionNetworkFirewallPolicyAssociation(), - "google_compute_network_firewall_policy_rule": ResourceComputeNetworkFirewallPolicyRule(), - "google_compute_region_network_firewall_policy_rule": ResourceComputeRegionNetworkFirewallPolicyRule(), - "google_container_aws_cluster": ResourceContainerAwsCluster(), - "google_container_aws_node_pool": ResourceContainerAwsNodePool(), - "google_container_azure_client": ResourceContainerAzureClient(), - "google_container_azure_cluster": ResourceContainerAzureCluster(), - "google_container_azure_node_pool": ResourceContainerAzureNodePool(), - "google_dataplex_asset": ResourceDataplexAsset(), - "google_dataplex_lake": ResourceDataplexLake(), - "google_dataplex_zone": ResourceDataplexZone(), - "google_dataproc_workflow_template": ResourceDataprocWorkflowTemplate(), - "google_eventarc_channel": ResourceEventarcChannel(), - "google_eventarc_google_channel_config": ResourceEventarcGoogleChannelConfig(), - "google_eventarc_trigger": ResourceEventarcTrigger(), - "google_firebaserules_release": ResourceFirebaserulesRelease(), - "google_firebaserules_ruleset": ResourceFirebaserulesRuleset(), - "google_logging_log_view": ResourceLoggingLogView(), - "google_monitoring_monitored_project": ResourceMonitoringMonitoredProject(), - "google_network_connectivity_hub": ResourceNetworkConnectivityHub(), - "google_network_connectivity_spoke": ResourceNetworkConnectivitySpoke(), - "google_org_policy_policy": ResourceOrgPolicyPolicy(), - "google_os_config_os_policy_assignment": ResourceOsConfigOsPolicyAssignment(), - "google_privateca_certificate_template": ResourcePrivatecaCertificateTemplate(), - "google_recaptcha_enterprise_key": ResourceRecaptchaEnterpriseKey(), -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_handwritten_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_handwritten_endpoint.go deleted file mode 100644 index 0f1faba2f2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_handwritten_endpoint.go +++ /dev/null @@ -1,154 +0,0 @@ -package google - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// For generated resources, endpoint entries live in product-specific provider -// files. Collect handwritten ones here. If any of these are modified, be sure -// to update the provider_reference docs page. - -var CloudBillingCustomEndpointEntryKey = "cloud_billing_custom_endpoint" -var CloudBillingCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_BILLING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudBillingBasePathKey]), -} - -var ComposerCustomEndpointEntryKey = "composer_custom_endpoint" -var ComposerCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_COMPOSER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ComposerBasePathKey]), -} - -var ContainerCustomEndpointEntryKey = "container_custom_endpoint" -var ContainerCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ContainerBasePathKey]), -} - -var DataflowCustomEndpointEntryKey = "dataflow_custom_endpoint" -var DataflowCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATAFLOW_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataflowBasePathKey]), -} - -var IAMCustomEndpointEntryKey = "iam_custom_endpoint" -var IAMCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAM_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IAMBasePathKey]), -} - -var IamCredentialsCustomEndpointEntryKey = "iam_credentials_custom_endpoint" -var IamCredentialsCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAM_CREDENTIALS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IamCredentialsBasePathKey]), -} - -var ResourceManagerV3CustomEndpointEntryKey = "resource_manager_v3_custom_endpoint" -var ResourceManagerV3CustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_RESOURCE_MANAGER_V3_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ResourceManagerV3BasePathKey]), -} - -var ServiceNetworkingCustomEndpointEntryKey = "service_networking_custom_endpoint" -var ServiceNetworkingCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SERVICE_NETWORKING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ServiceNetworkingBasePathKey]), -} - -var ServiceUsageCustomEndpointEntryKey = "service_usage_custom_endpoint" -var ServiceUsageCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ServiceUsageBasePathKey]), -} - -var BigtableAdminCustomEndpointEntryKey = "bigtable_custom_endpoint" -var BigtableAdminCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGTABLE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigtableAdminBasePathKey]), -} - -var PrivatecaCertificateTemplateEndpointEntryKey = "privateca_custom_endpoint" -var PrivatecaCertificateTemplateCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", - }, DefaultBasePaths[PrivatecaBasePathKey]), -} - -var ContainerAwsCustomEndpointEntryKey = "container_aws_custom_endpoint" -var ContainerAwsCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINERAWS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ContainerAwsBasePathKey]), -} - -var ContainerAzureCustomEndpointEntryKey = "container_azure_custom_endpoint" -var ContainerAzureCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINERAZURE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ContainerAzureBasePathKey]), -} - -var TagsLocationCustomEndpointEntryKey = "tags_location_custom_endpoint" -var TagsLocationCustomEndpointEntry = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_TAGS_LOCATION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[TagsLocationBasePathKey]), -} - -func validateCustomEndpoint(v interface{}, k string) (ws []string, errors []error) { - re := `.*/[^/]+/$` - return validateRegexp(re)(v, k) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/pubsub_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/pubsub_utils.go deleted file mode 100644 index 64d1137e4f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/pubsub_utils.go +++ /dev/null @@ -1,24 +0,0 @@ -package google - -import ( - "fmt" - "regexp" -) - -const PubsubTopicRegex = "projects\\/.*\\/topics\\/.*" - -func getComputedSubscriptionName(project, subscription string) string { - match, _ := regexp.MatchString("projects\\/.*\\/subscriptions\\/.*", subscription) - if match { - return subscription - } - return fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) -} - -func getComputedTopicName(project, topic string) string { - match, _ := regexp.MatchString(PubsubTopicRegex, topic) - if match { - return topic - } - return fmt.Sprintf("projects/%s/topics/%s", project, topic) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/redis_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/redis_operation.go deleted file mode 100644 index 95dc63e121..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/redis_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type RedisOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *RedisOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.RedisBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createRedisWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*RedisOperationWaiter, error) { - w := &RedisOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func RedisOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createRedisWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func RedisOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createRedisWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/regional_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/regional_utils.go deleted file mode 100644 index 52c10fd308..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/regional_utils.go +++ /dev/null @@ -1,38 +0,0 @@ -package google - -import ( - "fmt" - "strings" -) - -//These functions are used by both the `resource_container_node_pool` and `resource_container_cluster` for handling regional clusters - -func isZone(location string) bool { - return len(strings.Split(location, "-")) == 3 -} - -func getLocation(d TerraformResourceData, config *Config) (string, error) { - if v, ok := d.GetOk("location"); ok { - return v.(string), nil - } else if v, isRegionalCluster := d.GetOk("region"); isRegionalCluster { - return v.(string), nil - } else { - // If region is not explicitly set, use "zone" (or fall back to the provider-level zone). - // For now, to avoid confusion, we require region to be set in the config to create a regional - // cluster rather than falling back to the provider-level region. - return getZone(d, config) - } -} - -// getZone reads the "zone" value from the given resource data and falls back -// to provider's value if not given. If neither is provided, returns an error. -func getZone(d TerraformResourceData, config *Config) (string, error) { - res, ok := d.GetOk("zone") - if !ok { - if config.Zone != "" { - return config.Zone, nil - } - return "", fmt.Errorf("Cannot determine zone: set in this resource, or set provider-level zone.") - } - return GetResourceNameFromSelfLink(res.(string)), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_level_condition.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_level_condition.go deleted file mode 100644 index 613e7f0246..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_level_condition.go +++ /dev/null @@ -1,864 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAccessContextManagerAccessLevelCondition() *schema.Resource { - return &schema.Resource{ - Create: resourceAccessContextManagerAccessLevelConditionCreate, - Read: resourceAccessContextManagerAccessLevelConditionRead, - Delete: resourceAccessContextManagerAccessLevelConditionDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "access_level": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Access Level to add this condition to.`, - }, - "device_policy": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Device specific restrictions, all restrictions must hold for -the Condition to be true. If not specified, all devices are -allowed.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allowed_device_management_levels": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of allowed device management levels. -An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}), - }, - }, - "allowed_encryption_statuses": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of allowed encryptions statuses. -An empty list allows all statuses. Possible values: ["ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}), - }, - }, - "os_constraints": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of allowed OS versions. -An empty list allows all types and all versions.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "os_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}), - Description: `The operating system type of the device. Possible values: ["OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"]`, - }, - "minimum_version": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The minimum allowed OS version. If not set, any version -of this OS satisfies the constraint. -Format: "major.minor.patch" such as "10.5.301", "9.2.1".`, - }, - }, - }, - }, - "require_admin_approval": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the device needs to be approved by the customer admin.`, - }, - "require_corp_owned": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the device needs to be corp owned.`, - }, - "require_screen_lock": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether or not screenlock is required for the DevicePolicy -to be true. Defaults to false.`, - }, - }, - }, - }, - "ip_subnetworks": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of CIDR block IP subnetwork specification. May be IPv4 -or IPv6. -Note that for a CIDR IP address block, the specified IP address -portion must be properly truncated (i.e. all the host bits must -be zero) or the input is considered malformed. For example, -"192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, -for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" -is not. The originating IP of a request must be in one of the -listed subnets in order for this Condition to be true. -If empty, all IP addresses are allowed.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "members": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An allowed list of members (users, service accounts). -Using groups is not supported yet. - -The signed-in user originating the request must be a part of one -of the provided members. If not specified, a request may come -from any user (logged in/not logged in, not present in any -groups, etc.). -Formats: 'user:{emailid}', 'serviceAccount:{emailid}'`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "negate": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to negate the Condition. If true, the Condition becomes -a NAND over its non-empty fields, each field must be false for -the Condition overall to be satisfied. Defaults to false.`, - }, - "regions": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The request must originate from one of the provided -countries/regions. -Format: A valid ISO 3166-1 alpha-2 code.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "required_access_levels": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of other access levels defined in the same Policy, -referenced by resource name. Referencing an AccessLevel which -does not exist is an error. All access levels listed must be -granted for the Condition to be true. -Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerAccessLevelConditionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - ipSubnetworksProp, err := expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(d.Get("ip_subnetworks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_subnetworks"); !isEmptyValue(reflect.ValueOf(ipSubnetworksProp)) && (ok || !reflect.DeepEqual(v, ipSubnetworksProp)) { - obj["ipSubnetworks"] = ipSubnetworksProp - } - requiredAccessLevelsProp, err := expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(d.Get("required_access_levels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("required_access_levels"); !isEmptyValue(reflect.ValueOf(requiredAccessLevelsProp)) && (ok || !reflect.DeepEqual(v, requiredAccessLevelsProp)) { - obj["requiredAccessLevels"] = requiredAccessLevelsProp - } - membersProp, err := expandNestedAccessContextManagerAccessLevelConditionMembers(d.Get("members"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("members"); !isEmptyValue(reflect.ValueOf(membersProp)) && (ok || !reflect.DeepEqual(v, membersProp)) { - obj["members"] = membersProp - } - negateProp, err := expandNestedAccessContextManagerAccessLevelConditionNegate(d.Get("negate"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("negate"); !isEmptyValue(reflect.ValueOf(negateProp)) && (ok || !reflect.DeepEqual(v, negateProp)) { - obj["negate"] = negateProp - } - devicePolicyProp, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(d.Get("device_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("device_policy"); !isEmptyValue(reflect.ValueOf(devicePolicyProp)) && (ok || !reflect.DeepEqual(v, devicePolicyProp)) { - obj["devicePolicy"] = devicePolicyProp - } - regionsProp, err := expandNestedAccessContextManagerAccessLevelConditionRegions(d.Get("regions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("regions"); !isEmptyValue(reflect.ValueOf(regionsProp)) && (ok || !reflect.DeepEqual(v, regionsProp)) { - obj["regions"] = regionsProp - } - - lockName, err := replaceVars(d, config, "{{access_level}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AccessLevelCondition: %#v", obj) - - obj, err = resourceAccessContextManagerAccessLevelConditionPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - url, err = addQueryParams(url, map[string]string{"updateMask": "basic.conditions"}) - if err != nil { - return err - } - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AccessLevelCondition: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{access_level}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceAccessContextManagerAccessLevelConditionPollRead(d, meta), PollCheckForExistence, "Creating AccessLevelCondition", d.Timeout(schema.TimeoutCreate), 1) - if err != nil { - return fmt.Errorf("Error waiting to create AccessLevelCondition: %s", err) - } - - log.Printf("[DEBUG] Finished creating AccessLevelCondition %q: %#v", d.Id(), res) - - return resourceAccessContextManagerAccessLevelConditionRead(d, meta) -} - -func resourceAccessContextManagerAccessLevelConditionPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return nil, err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = flattenNestedAccessContextManagerAccessLevelCondition(d, meta, res) - if err != nil { - return nil, err - } - - if res == nil { - return nil, fake404("nested", "AccessContextManagerAccessLevelCondition") - } - - return res, nil - } -} - -func resourceAccessContextManagerAccessLevelConditionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAccessLevelCondition %q", d.Id())) - } - - res, err = flattenNestedAccessContextManagerAccessLevelCondition(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing AccessContextManagerAccessLevelCondition because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("ip_subnetworks", flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(res["ipSubnetworks"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("required_access_levels", flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(res["requiredAccessLevels"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("members", flattenNestedAccessContextManagerAccessLevelConditionMembers(res["members"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("negate", flattenNestedAccessContextManagerAccessLevelConditionNegate(res["negate"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("device_policy", flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(res["devicePolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("regions", flattenNestedAccessContextManagerAccessLevelConditionRegions(res["regions"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - - return nil -} - -func resourceAccessContextManagerAccessLevelConditionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "{{access_level}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceAccessContextManagerAccessLevelConditionPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "AccessLevelCondition") - } - url, err = addQueryParams(url, map[string]string{"updateMask": "basic.conditions"}) - if err != nil { - return err - } - log.Printf("[DEBUG] Deleting AccessLevelCondition %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AccessLevelCondition") - } - - log.Printf("[DEBUG] Finished deleting AccessLevelCondition %q: %#v", d.Id(), res) - return nil -} - -func flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionMembers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionNegate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["require_screen_lock"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(original["requireScreenlock"], d, config) - transformed["allowed_encryption_statuses"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(original["allowedEncryptionStatuses"], d, config) - transformed["allowed_device_management_levels"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(original["allowedDeviceManagementLevels"], d, config) - transformed["os_constraints"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(original["osConstraints"], d, config) - transformed["require_admin_approval"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(original["requireAdminApproval"], d, config) - transformed["require_corp_owned"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(original["requireCorpOwned"], d, config) - return []interface{}{transformed} -} -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "minimum_version": flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(original["minimumVersion"], d, config), - "os_type": flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(original["osType"], d, config), - }) - } - return transformed -} -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionRegions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionMembers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionNegate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequireScreenLock, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(original["require_screen_lock"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !isEmptyValue(val) { - transformed["requireScreenlock"] = transformedRequireScreenLock - } - - transformedAllowedEncryptionStatuses, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(original["allowed_encryption_statuses"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !isEmptyValue(val) { - transformed["allowedEncryptionStatuses"] = transformedAllowedEncryptionStatuses - } - - transformedAllowedDeviceManagementLevels, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(original["allowed_device_management_levels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !isEmptyValue(val) { - transformed["allowedDeviceManagementLevels"] = transformedAllowedDeviceManagementLevels - } - - transformedOsConstraints, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(original["os_constraints"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOsConstraints); val.IsValid() && !isEmptyValue(val) { - transformed["osConstraints"] = transformedOsConstraints - } - - transformedRequireAdminApproval, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(original["require_admin_approval"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !isEmptyValue(val) { - transformed["requireAdminApproval"] = transformedRequireAdminApproval - } - - transformedRequireCorpOwned, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(original["require_corp_owned"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !isEmptyValue(val) { - transformed["requireCorpOwned"] = transformedRequireCorpOwned - } - - return transformed, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinimumVersion, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(original["minimum_version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !isEmptyValue(val) { - transformed["minimumVersion"] = transformedMinimumVersion - } - - transformedOsType, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(original["os_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOsType); val.IsValid() && !isEmptyValue(val) { - transformed["osType"] = transformedOsType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedAccessContextManagerAccessLevelCondition(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["basic"] - if !ok || v == nil { - return nil, nil - } - res = v.(map[string]interface{}) - - v, ok = res["conditions"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value basic.conditions. Actual value: %v", v) - } - - _, item, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedIpSubnetworks, err := expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(d.Get("ip_subnetworks"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedIpSubnetworks := flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(expectedIpSubnetworks, d, meta.(*Config)) - expectedRequiredAccessLevels, err := expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(d.Get("required_access_levels"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedRequiredAccessLevels := flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(expectedRequiredAccessLevels, d, meta.(*Config)) - expectedMembers, err := expandNestedAccessContextManagerAccessLevelConditionMembers(d.Get("members"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedMembers := flattenNestedAccessContextManagerAccessLevelConditionMembers(expectedMembers, d, meta.(*Config)) - expectedNegate, err := expandNestedAccessContextManagerAccessLevelConditionNegate(d.Get("negate"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedNegate := flattenNestedAccessContextManagerAccessLevelConditionNegate(expectedNegate, d, meta.(*Config)) - expectedDevicePolicy, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(d.Get("device_policy"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedDevicePolicy := flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(expectedDevicePolicy, d, meta.(*Config)) - expectedRegions, err := expandNestedAccessContextManagerAccessLevelConditionRegions(d.Get("regions"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedRegions := flattenNestedAccessContextManagerAccessLevelConditionRegions(expectedRegions, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemIpSubnetworks := flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(item["ipSubnetworks"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemIpSubnetworks)) && isEmptyValue(reflect.ValueOf(expectedFlattenedIpSubnetworks))) && !reflect.DeepEqual(itemIpSubnetworks, expectedFlattenedIpSubnetworks) { - log.Printf("[DEBUG] Skipping item with ipSubnetworks= %#v, looking for %#v)", itemIpSubnetworks, expectedFlattenedIpSubnetworks) - continue - } - itemRequiredAccessLevels := flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(item["requiredAccessLevels"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemRequiredAccessLevels)) && isEmptyValue(reflect.ValueOf(expectedFlattenedRequiredAccessLevels))) && !reflect.DeepEqual(itemRequiredAccessLevels, expectedFlattenedRequiredAccessLevels) { - log.Printf("[DEBUG] Skipping item with requiredAccessLevels= %#v, looking for %#v)", itemRequiredAccessLevels, expectedFlattenedRequiredAccessLevels) - continue - } - itemMembers := flattenNestedAccessContextManagerAccessLevelConditionMembers(item["members"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemMembers)) && isEmptyValue(reflect.ValueOf(expectedFlattenedMembers))) && !reflect.DeepEqual(itemMembers, expectedFlattenedMembers) { - log.Printf("[DEBUG] Skipping item with members= %#v, looking for %#v)", itemMembers, expectedFlattenedMembers) - continue - } - itemNegate := flattenNestedAccessContextManagerAccessLevelConditionNegate(item["negate"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemNegate)) && isEmptyValue(reflect.ValueOf(expectedFlattenedNegate))) && !reflect.DeepEqual(itemNegate, expectedFlattenedNegate) { - log.Printf("[DEBUG] Skipping item with negate= %#v, looking for %#v)", itemNegate, expectedFlattenedNegate) - continue - } - itemDevicePolicy := flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(item["devicePolicy"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemDevicePolicy)) && isEmptyValue(reflect.ValueOf(expectedFlattenedDevicePolicy))) && !reflect.DeepEqual(itemDevicePolicy, expectedFlattenedDevicePolicy) { - log.Printf("[DEBUG] Skipping item with devicePolicy= %#v, looking for %#v)", itemDevicePolicy, expectedFlattenedDevicePolicy) - continue - } - itemRegions := flattenNestedAccessContextManagerAccessLevelConditionRegions(item["regions"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemRegions)) && isEmptyValue(reflect.ValueOf(expectedFlattenedRegions))) && !reflect.DeepEqual(itemRegions, expectedFlattenedRegions) { - log.Printf("[DEBUG] Skipping item with regions= %#v, looking for %#v)", itemRegions, expectedFlattenedRegions) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -// PatchCreateEncoder handles creating request data to PATCH parent resource -// with list including new object. -func resourceAccessContextManagerAccessLevelConditionPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceAccessContextManagerAccessLevelConditionListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - // Return error if item already created. - if found != nil { - return nil, fmt.Errorf("Unable to create AccessLevelCondition, existing object already found: %+v", found) - } - - // Return list with the resource to create appended - res := map[string]interface{}{ - "conditions": append(currItems, obj), - } - wrapped := map[string]interface{}{ - "basic": res, - } - res = wrapped - - return res, nil -} - -// PatchDeleteEncoder handles creating request data to PATCH parent resource -// with list excluding object to delete. -func resourceAccessContextManagerAccessLevelConditionPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceAccessContextManagerAccessLevelConditionListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - // Spoof 404 error for proper handling by Delete (i.e. no-op) - return nil, fake404("nested", "AccessContextManagerAccessLevelCondition") - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "conditions": updatedItems, - } - wrapped := map[string]interface{}{ - "basic": res, - } - res = wrapped - - return res, nil -} - -// ListForPatch handles making API request to get parent resource and -// extracting list of objects. -func resourceAccessContextManagerAccessLevelConditionListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", "", url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - if v, ok = res["basic"]; ok && v != nil { - res = v.(map[string]interface{}) - } else { - return nil, nil - } - - v, ok = res["conditions"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, fmt.Errorf(`expected list for nested field "conditions"`) - } - return ls, nil - } - return nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_policy.go deleted file mode 100644 index 016b73b06c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_policy.go +++ /dev/null @@ -1,389 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAccessContextManagerAccessPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceAccessContextManagerAccessPolicyCreate, - Read: resourceAccessContextManagerAccessPolicyRead, - Update: resourceAccessContextManagerAccessPolicyUpdate, - Delete: resourceAccessContextManagerAccessPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAccessContextManagerAccessPolicyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of this AccessPolicy in the Cloud Resource Hierarchy. -Format: organizations/{organization_id}`, - }, - "title": { - Type: schema.TypeString, - Required: true, - Description: `Human readable title. Does not affect behavior.`, - }, - "scopes": { - Type: schema.TypeList, - Optional: true, - Description: `Folder or project on which this policy is applicable. -Format: folders/{{folder_id}} or projects/{{project_id}}`, - MaxItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the AccessPolicy was created in UTC.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Resource name of the AccessPolicy. Format: {policy_id}`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the AccessPolicy was updated in UTC.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerAccessPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandAccessContextManagerAccessPolicyParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - titleProp, err := expandAccessContextManagerAccessPolicyTitle(d.Get("title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(reflect.ValueOf(titleProp)) && (ok || !reflect.DeepEqual(v, titleProp)) { - obj["title"] = titleProp - } - scopesProp, err := expandAccessContextManagerAccessPolicyScopes(d.Get("scopes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("scopes"); !isEmptyValue(reflect.ValueOf(scopesProp)) && (ok || !reflect.DeepEqual(v, scopesProp)) { - obj["scopes"] = scopesProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AccessPolicy: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AccessPolicy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = AccessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating AccessPolicy", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create AccessPolicy: %s", err) - } - - if err := d.Set("name", flattenAccessContextManagerAccessPolicyName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // The operation for this resource contains the generated name that we need - // in order to perform a READ. We need to access the object inside of it as - // a map[string]interface, so let's do that. - - resp := res["response"].(map[string]interface{}) - name := GetResourceNameFromSelfLink(resp["name"].(string)) - log.Printf("[DEBUG] Setting AccessPolicy name, id to %s", name) - if err := d.Set("name", name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - - log.Printf("[DEBUG] Finished creating AccessPolicy %q: %#v", d.Id(), res) - - return resourceAccessContextManagerAccessPolicyRead(d, meta) -} - -func resourceAccessContextManagerAccessPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAccessPolicy %q", d.Id())) - } - - if err := d.Set("name", flattenAccessContextManagerAccessPolicyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("create_time", flattenAccessContextManagerAccessPolicyCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("update_time", flattenAccessContextManagerAccessPolicyUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("parent", flattenAccessContextManagerAccessPolicyParent(res["parent"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("title", flattenAccessContextManagerAccessPolicyTitle(res["title"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("scopes", flattenAccessContextManagerAccessPolicyScopes(res["scopes"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessPolicy: %s", err) - } - - return nil -} - -func resourceAccessContextManagerAccessPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - titleProp, err := expandAccessContextManagerAccessPolicyTitle(d.Get("title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, titleProp)) { - obj["title"] = titleProp - } - scopesProp, err := expandAccessContextManagerAccessPolicyScopes(d.Get("scopes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("scopes"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scopesProp)) { - obj["scopes"] = scopesProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AccessPolicy %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("title") { - updateMask = append(updateMask, "title") - } - - if d.HasChange("scopes") { - updateMask = append(updateMask, "scopes") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating AccessPolicy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AccessPolicy %q: %#v", d.Id(), res) - } - - err = AccessContextManagerOperationWaitTime( - config, res, "Updating AccessPolicy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerAccessPolicyRead(d, meta) -} - -func resourceAccessContextManagerAccessPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AccessPolicy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AccessPolicy") - } - - err = AccessContextManagerOperationWaitTime( - config, res, "Deleting AccessPolicy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting AccessPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerAccessPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerAccessPolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenAccessContextManagerAccessPolicyCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessPolicyUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessPolicyParent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessPolicyTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessPolicyScopes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerAccessPolicyParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessPolicyTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessPolicyScopes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_authorized_orgs_desc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_authorized_orgs_desc.go deleted file mode 100644 index c68f501d2a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_authorized_orgs_desc.go +++ /dev/null @@ -1,458 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAccessContextManagerAuthorizedOrgsDesc() *schema.Resource { - return &schema.Resource{ - Create: resourceAccessContextManagerAuthorizedOrgsDescCreate, - Read: resourceAccessContextManagerAuthorizedOrgsDescRead, - Update: resourceAccessContextManagerAuthorizedOrgsDescUpdate, - Delete: resourceAccessContextManagerAuthorizedOrgsDescDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAccessContextManagerAuthorizedOrgsDescImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name for the 'AuthorizedOrgsDesc'. Format: -'accessPolicies/{access_policy}/authorizedOrgsDescs/{authorized_orgs_desc}'. -The 'authorized_orgs_desc' component must begin with a letter, followed by -alphanumeric characters or '_'. -After you create an 'AuthorizedOrgsDesc', you cannot change its 'name'.`, - }, - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. Resource name for the access policy which owns this 'AuthorizedOrgsDesc'.`, - }, - "asset_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ASSET_TYPE_DEVICE", "ASSET_TYPE_CREDENTIAL_STRENGTH", ""}), - Description: `The type of entities that need to use the authorization relationship during -evaluation, such as a device. Valid values are "ASSET_TYPE_DEVICE" and -"ASSET_TYPE_CREDENTIAL_STRENGTH". Possible values: ["ASSET_TYPE_DEVICE", "ASSET_TYPE_CREDENTIAL_STRENGTH"]`, - }, - "authorization_direction": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"AUTHORIZATION_DIRECTION_TO", "AUTHORIZATION_DIRECTION_FROM", ""}), - Description: `The direction of the authorization relationship between this organization -and the organizations listed in the "orgs" field. The valid values for this -field include the following: - -AUTHORIZATION_DIRECTION_FROM: Allows this organization to evaluate traffic -in the organizations listed in the 'orgs' field. - -AUTHORIZATION_DIRECTION_TO: Allows the organizations listed in the 'orgs' -field to evaluate the traffic in this organization. - -For the authorization relationship to take effect, all of the organizations -must authorize and specify the appropriate relationship direction. For -example, if organization A authorized organization B and C to evaluate its -traffic, by specifying "AUTHORIZATION_DIRECTION_TO" as the authorization -direction, organizations B and C must specify -"AUTHORIZATION_DIRECTION_FROM" as the authorization direction in their -"AuthorizedOrgsDesc" resource. Possible values: ["AUTHORIZATION_DIRECTION_TO", "AUTHORIZATION_DIRECTION_FROM"]`, - }, - "authorization_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"AUTHORIZATION_TYPE_TRUST", ""}), - Description: `A granular control type for authorization levels. Valid value is "AUTHORIZATION_TYPE_TRUST". Possible values: ["AUTHORIZATION_TYPE_TRUST"]`, - }, - "orgs": { - Type: schema.TypeList, - Optional: true, - Description: `The list of organization ids in this AuthorizedOrgsDesc. -Format: 'organizations/' -Example: 'organizations/123456'`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the AuthorizedOrgsDesc was created in UTC.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the AuthorizedOrgsDesc was updated in UTC.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerAuthorizedOrgsDescCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandAccessContextManagerAuthorizedOrgsDescParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - nameProp, err := expandAccessContextManagerAuthorizedOrgsDescName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - orgsProp, err := expandAccessContextManagerAuthorizedOrgsDescOrgs(d.Get("orgs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("orgs"); !isEmptyValue(reflect.ValueOf(orgsProp)) && (ok || !reflect.DeepEqual(v, orgsProp)) { - obj["orgs"] = orgsProp - } - assetTypeProp, err := expandAccessContextManagerAuthorizedOrgsDescAssetType(d.Get("asset_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_type"); !isEmptyValue(reflect.ValueOf(assetTypeProp)) && (ok || !reflect.DeepEqual(v, assetTypeProp)) { - obj["assetType"] = assetTypeProp - } - authorizationDirectionProp, err := expandAccessContextManagerAuthorizedOrgsDescAuthorizationDirection(d.Get("authorization_direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorization_direction"); !isEmptyValue(reflect.ValueOf(authorizationDirectionProp)) && (ok || !reflect.DeepEqual(v, authorizationDirectionProp)) { - obj["authorizationDirection"] = authorizationDirectionProp - } - authorizationTypeProp, err := expandAccessContextManagerAuthorizedOrgsDescAuthorizationType(d.Get("authorization_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorization_type"); !isEmptyValue(reflect.ValueOf(authorizationTypeProp)) && (ok || !reflect.DeepEqual(v, authorizationTypeProp)) { - obj["authorizationType"] = authorizationTypeProp - } - - obj, err = resourceAccessContextManagerAuthorizedOrgsDescEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/authorizedOrgsDescs") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AuthorizedOrgsDesc: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AuthorizedOrgsDesc: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = AccessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating AuthorizedOrgsDesc", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create AuthorizedOrgsDesc: %s", err) - } - - if err := d.Set("name", flattenAccessContextManagerAuthorizedOrgsDescName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // This is useful if the resource in question doesn't have a perfectly consistent API - // That is, the Operation for Create might return before the Get operation shows the - // completed state of the resource. - time.Sleep(2 * time.Minute) - - log.Printf("[DEBUG] Finished creating AuthorizedOrgsDesc %q: %#v", d.Id(), res) - - return resourceAccessContextManagerAuthorizedOrgsDescRead(d, meta) -} - -func resourceAccessContextManagerAuthorizedOrgsDescRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAuthorizedOrgsDesc %q", d.Id())) - } - - if err := d.Set("create_time", flattenAccessContextManagerAuthorizedOrgsDescCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) - } - if err := d.Set("update_time", flattenAccessContextManagerAuthorizedOrgsDescUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) - } - if err := d.Set("name", flattenAccessContextManagerAuthorizedOrgsDescName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) - } - if err := d.Set("orgs", flattenAccessContextManagerAuthorizedOrgsDescOrgs(res["orgs"], d, config)); err != nil { - return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) - } - if err := d.Set("asset_type", flattenAccessContextManagerAuthorizedOrgsDescAssetType(res["assetType"], d, config)); err != nil { - return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) - } - if err := d.Set("authorization_direction", flattenAccessContextManagerAuthorizedOrgsDescAuthorizationDirection(res["authorizationDirection"], d, config)); err != nil { - return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) - } - if err := d.Set("authorization_type", flattenAccessContextManagerAuthorizedOrgsDescAuthorizationType(res["authorizationType"], d, config)); err != nil { - return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) - } - - return nil -} - -func resourceAccessContextManagerAuthorizedOrgsDescUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - orgsProp, err := expandAccessContextManagerAuthorizedOrgsDescOrgs(d.Get("orgs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("orgs"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, orgsProp)) { - obj["orgs"] = orgsProp - } - - obj, err = resourceAccessContextManagerAuthorizedOrgsDescEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AuthorizedOrgsDesc %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("orgs") { - updateMask = append(updateMask, "orgs") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating AuthorizedOrgsDesc %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AuthorizedOrgsDesc %q: %#v", d.Id(), res) - } - - err = AccessContextManagerOperationWaitTime( - config, res, "Updating AuthorizedOrgsDesc", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerAuthorizedOrgsDescRead(d, meta) -} - -func resourceAccessContextManagerAuthorizedOrgsDescDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AuthorizedOrgsDesc %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AuthorizedOrgsDesc") - } - - err = AccessContextManagerOperationWaitTime( - config, res, "Deleting AuthorizedOrgsDesc", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting AuthorizedOrgsDesc %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerAuthorizedOrgsDescImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, fmt.Errorf("Error parsing parent name. Should be in form accessPolicies/{{policy_id}}/authorizedOrgsDescs/{{short_name}}") - } - if err := d.Set("parent", fmt.Sprintf("%s/%s", stringParts[0], stringParts[1])); err != nil { - return nil, fmt.Errorf("Error setting parent, %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerAuthorizedOrgsDescCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAuthorizedOrgsDescUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAuthorizedOrgsDescName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAuthorizedOrgsDescOrgs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAuthorizedOrgsDescAssetType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAuthorizedOrgsDescAuthorizationDirection(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAuthorizedOrgsDescAuthorizationType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerAuthorizedOrgsDescParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAuthorizedOrgsDescName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAuthorizedOrgsDescOrgs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAuthorizedOrgsDescAssetType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAuthorizedOrgsDescAuthorizationDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAuthorizedOrgsDescAuthorizationType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAccessContextManagerAuthorizedOrgsDescEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "parent") - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_gcp_user_access_binding.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_gcp_user_access_binding.go deleted file mode 100644 index 6385126a47..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_gcp_user_access_binding.go +++ /dev/null @@ -1,322 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAccessContextManagerGcpUserAccessBinding() *schema.Resource { - return &schema.Resource{ - Create: resourceAccessContextManagerGcpUserAccessBindingCreate, - Read: resourceAccessContextManagerGcpUserAccessBindingRead, - Update: resourceAccessContextManagerGcpUserAccessBindingUpdate, - Delete: resourceAccessContextManagerGcpUserAccessBindingDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAccessContextManagerGcpUserAccessBindingImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "access_levels": { - Type: schema.TypeList, - Required: true, - Description: `Required. Access level that a user must have to be granted access. Only one access level is supported, not multiple. This repeated field must have exactly one element. Example: "accessPolicies/9522/accessLevels/device_trusted"`, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "group_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. Immutable. Google Group id whose members are subject to this binding's restrictions. See "id" in the G Suite Directory API's Groups resource. If a group's email address/alias is changed, this resource will continue to point at the changed group. This field does not accept group email addresses or aliases. Example: "01d520gv4vjcrht"`, - }, - "organization_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. ID of the parent organization.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by RFC 3986 Section 2.3). Should not be specified by the client during creation. Example: "organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N"`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerGcpUserAccessBindingCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - groupKeyProp, err := expandAccessContextManagerGcpUserAccessBindingGroupKey(d.Get("group_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("group_key"); !isEmptyValue(reflect.ValueOf(groupKeyProp)) && (ok || !reflect.DeepEqual(v, groupKeyProp)) { - obj["groupKey"] = groupKeyProp - } - accessLevelsProp, err := expandAccessContextManagerGcpUserAccessBindingAccessLevels(d.Get("access_levels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access_levels"); !isEmptyValue(reflect.ValueOf(accessLevelsProp)) && (ok || !reflect.DeepEqual(v, accessLevelsProp)) { - obj["accessLevels"] = accessLevelsProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}organizations/{{organization_id}}/gcpUserAccessBindings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new GcpUserAccessBinding: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating GcpUserAccessBinding: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = AccessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating GcpUserAccessBinding", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create GcpUserAccessBinding: %s", err) - } - - if err := d.Set("name", flattenAccessContextManagerGcpUserAccessBindingName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating GcpUserAccessBinding %q: %#v", d.Id(), res) - - return resourceAccessContextManagerGcpUserAccessBindingRead(d, meta) -} - -func resourceAccessContextManagerGcpUserAccessBindingRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerGcpUserAccessBinding %q", d.Id())) - } - - if err := d.Set("name", flattenAccessContextManagerGcpUserAccessBindingName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) - } - if err := d.Set("group_key", flattenAccessContextManagerGcpUserAccessBindingGroupKey(res["groupKey"], d, config)); err != nil { - return fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) - } - if err := d.Set("access_levels", flattenAccessContextManagerGcpUserAccessBindingAccessLevels(res["accessLevels"], d, config)); err != nil { - return fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) - } - - return nil -} - -func resourceAccessContextManagerGcpUserAccessBindingUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - accessLevelsProp, err := expandAccessContextManagerGcpUserAccessBindingAccessLevels(d.Get("access_levels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access_levels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessLevelsProp)) { - obj["accessLevels"] = accessLevelsProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating GcpUserAccessBinding %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("access_levels") { - updateMask = append(updateMask, "accessLevels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating GcpUserAccessBinding %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating GcpUserAccessBinding %q: %#v", d.Id(), res) - } - - err = AccessContextManagerOperationWaitTime( - config, res, "Updating GcpUserAccessBinding", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerGcpUserAccessBindingRead(d, meta) -} - -func resourceAccessContextManagerGcpUserAccessBindingDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GcpUserAccessBinding %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GcpUserAccessBinding") - } - - err = AccessContextManagerOperationWaitTime( - config, res, "Deleting GcpUserAccessBinding", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting GcpUserAccessBinding %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerGcpUserAccessBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - - if err := d.Set("name", name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - return []*schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerGcpUserAccessBindingName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerGcpUserAccessBindingGroupKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerGcpUserAccessBindingAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerGcpUserAccessBindingGroupKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerGcpUserAccessBindingAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeter_resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeter_resource.go deleted file mode 100644 index f136d69f99..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeter_resource.go +++ /dev/null @@ -1,437 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAccessContextManagerServicePerimeterResource() *schema.Resource { - return &schema.Resource{ - Create: resourceAccessContextManagerServicePerimeterResourceCreate, - Read: resourceAccessContextManagerServicePerimeterResourceRead, - Delete: resourceAccessContextManagerServicePerimeterResourceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAccessContextManagerServicePerimeterResourceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "perimeter_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Service Perimeter to add this resource to.`, - }, - "resource": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A GCP resource that is inside of the service perimeter. -Currently only projects are allowed. -Format: projects/{project_number}`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerServicePerimeterResourceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - resourceProp, err := expandNestedAccessContextManagerServicePerimeterResourceResource(d.Get("resource"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resource"); !isEmptyValue(reflect.ValueOf(resourceProp)) && (ok || !reflect.DeepEqual(v, resourceProp)) { - obj["resource"] = resourceProp - } - - lockName, err := replaceVars(d, config, "{{perimeter_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ServicePerimeterResource: %#v", obj) - - obj, err = resourceAccessContextManagerServicePerimeterResourcePatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - url, err = addQueryParams(url, map[string]string{"updateMask": "status.resources"}) - if err != nil { - return err - } - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ServicePerimeterResource: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{perimeter_name}}/{{resource}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = AccessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating ServicePerimeterResource", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create ServicePerimeterResource: %s", err) - } - - if _, ok := opRes["status"]; ok { - opRes, err = flattenNestedAccessContextManagerServicePerimeterResource(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error getting nested object from operation response: %s", err) - } - if opRes == nil { - // Object isn't there any more - remove it from the state. - return fmt.Errorf("Error decoding response from operation, could not find nested object") - } - } - if err := d.Set("resource", flattenNestedAccessContextManagerServicePerimeterResourceResource(opRes["resource"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{perimeter_name}}/{{resource}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating ServicePerimeterResource %q: %#v", d.Id(), res) - - return resourceAccessContextManagerServicePerimeterResourceRead(d, meta) -} - -func resourceAccessContextManagerServicePerimeterResourceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeterResource %q", d.Id())) - } - - res, err = flattenNestedAccessContextManagerServicePerimeterResource(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing AccessContextManagerServicePerimeterResource because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("resource", flattenNestedAccessContextManagerServicePerimeterResourceResource(res["resource"], d, config)); err != nil { - return fmt.Errorf("Error reading ServicePerimeterResource: %s", err) - } - - return nil -} - -func resourceAccessContextManagerServicePerimeterResourceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "{{perimeter_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceAccessContextManagerServicePerimeterResourcePatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "ServicePerimeterResource") - } - url, err = addQueryParams(url, map[string]string{"updateMask": "status.resources"}) - if err != nil { - return err - } - log.Printf("[DEBUG] Deleting ServicePerimeterResource %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ServicePerimeterResource") - } - - err = AccessContextManagerOperationWaitTime( - config, res, "Deleting ServicePerimeterResource", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting ServicePerimeterResource %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerServicePerimeterResourceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - parts, err := getImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)/(?P.+)"}, d, config, d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("perimeter_name", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { - return nil, fmt.Errorf("Error setting perimeter_name: %s", err) - } - if err := d.Set("resource", parts["resource"]); err != nil { - return nil, fmt.Errorf("Error setting resource: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenNestedAccessContextManagerServicePerimeterResourceResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedAccessContextManagerServicePerimeterResourceResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedAccessContextManagerServicePerimeterResource(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["status"] - if !ok || v == nil { - return nil, nil - } - res = v.(map[string]interface{}) - - v, ok = res["resources"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value status.resources. Actual value: %v", v) - } - - _, item, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedResource, err := expandNestedAccessContextManagerServicePerimeterResourceResource(d.Get("resource"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedResource := flattenNestedAccessContextManagerServicePerimeterResourceResource(expectedResource, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - // List response only contains the ID - construct a response object. - item := map[string]interface{}{ - "resource": itemRaw, - } - - itemResource := flattenNestedAccessContextManagerServicePerimeterResourceResource(item["resource"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemResource)) && isEmptyValue(reflect.ValueOf(expectedFlattenedResource))) && !reflect.DeepEqual(itemResource, expectedFlattenedResource) { - log.Printf("[DEBUG] Skipping item with resource= %#v, looking for %#v)", itemResource, expectedFlattenedResource) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -// PatchCreateEncoder handles creating request data to PATCH parent resource -// with list including new object. -func resourceAccessContextManagerServicePerimeterResourcePatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceAccessContextManagerServicePerimeterResourceListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - // Return error if item already created. - if found != nil { - return nil, fmt.Errorf("Unable to create ServicePerimeterResource, existing object already found: %+v", found) - } - - // Return list with the resource to create appended - res := map[string]interface{}{ - "resources": append(currItems, obj["resource"]), - } - wrapped := map[string]interface{}{ - "status": res, - } - res = wrapped - - return res, nil -} - -// PatchDeleteEncoder handles creating request data to PATCH parent resource -// with list excluding object to delete. -func resourceAccessContextManagerServicePerimeterResourcePatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceAccessContextManagerServicePerimeterResourceListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - // Spoof 404 error for proper handling by Delete (i.e. no-op) - return nil, fake404("nested", "AccessContextManagerServicePerimeterResource") - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "resources": updatedItems, - } - wrapped := map[string]interface{}{ - "status": res, - } - res = wrapped - - return res, nil -} - -// ListForPatch handles making API request to get parent resource and -// extracting list of objects. -func resourceAccessContextManagerServicePerimeterResourceListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", "", url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - if v, ok = res["status"]; ok && v != nil { - res = v.(map[string]interface{}) - } else { - return nil, nil - } - - v, ok = res["resources"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, fmt.Errorf(`expected list for nested field "resources"`) - } - return ls, nil - } - return nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_active_directory_domain.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_active_directory_domain.go deleted file mode 100644 index da6bfef0d1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_active_directory_domain.go +++ /dev/null @@ -1,472 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceActiveDirectoryDomain() *schema.Resource { - return &schema.Resource{ - Create: resourceActiveDirectoryDomainCreate, - Read: resourceActiveDirectoryDomainRead, - Update: resourceActiveDirectoryDomainUpdate, - Delete: resourceActiveDirectoryDomainDelete, - - Importer: &schema.ResourceImporter{ - State: resourceActiveDirectoryDomainImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "domain_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateADDomainName(), - Description: `The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, -https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains.`, - }, - "locations": { - Type: schema.TypeList, - Required: true, - Description: `Locations where domain needs to be provisioned. [regions][compute/docs/regions-zones/] -e.g. us-west1 or us-east4 Service supports up to 4 locations at once. Each location will use a /26 block.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "reserved_ip_range": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The CIDR range of internal addresses that are reserved for this domain. Reserved networks must be /24 or larger. -Ranges must be unique and non-overlapping with existing subnets in authorizedNetworks`, - }, - "admin": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of delegated administrator account used to perform Active Directory operations. -If not specified, setupadmin will be used.`, - Default: "setupadmin", - }, - "authorized_networks": { - Type: schema.TypeSet, - Optional: true, - Description: `The full names of the Google Compute Engine networks the domain instance is connected to. The domain is only available on networks listed in authorizedNetworks. -If CIDR subnets overlap between networks, domain creation will fail.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Resource labels that can contain user-provided metadata`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "fqdn": { - Type: schema.TypeString, - Computed: true, - Description: `The fully-qualified domain name of the exposed domain used by clients to connect to the service. -Similar to what would be chosen for an Active Directory set up on an internal network.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique name of the domain using the format: 'projects/{project}/locations/global/domains/{domainName}'.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceActiveDirectoryDomainCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandActiveDirectoryDomainLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - authorizedNetworksProp, err := expandActiveDirectoryDomainAuthorizedNetworks(d.Get("authorized_networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_networks"); !isEmptyValue(reflect.ValueOf(authorizedNetworksProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworksProp)) { - obj["authorizedNetworks"] = authorizedNetworksProp - } - reservedIpRangeProp, err := expandActiveDirectoryDomainReservedIpRange(d.Get("reserved_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reserved_ip_range"); !isEmptyValue(reflect.ValueOf(reservedIpRangeProp)) && (ok || !reflect.DeepEqual(v, reservedIpRangeProp)) { - obj["reservedIpRange"] = reservedIpRangeProp - } - locationsProp, err := expandActiveDirectoryDomainLocations(d.Get("locations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("locations"); !isEmptyValue(reflect.ValueOf(locationsProp)) && (ok || !reflect.DeepEqual(v, locationsProp)) { - obj["locations"] = locationsProp - } - adminProp, err := expandActiveDirectoryDomainAdmin(d.Get("admin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admin"); !isEmptyValue(reflect.ValueOf(adminProp)) && (ok || !reflect.DeepEqual(v, adminProp)) { - obj["admin"] = adminProp - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains?domainName={{domain_name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Domain: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Domain: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Domain: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ActiveDirectoryOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Domain", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Domain: %s", err) - } - - if err := d.Set("name", flattenActiveDirectoryDomainName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Domain %q: %#v", d.Id(), res) - - return resourceActiveDirectoryDomainRead(d, meta) -} - -func resourceActiveDirectoryDomainRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Domain: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ActiveDirectoryDomain %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Domain: %s", err) - } - - if err := d.Set("name", flattenActiveDirectoryDomainName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("labels", flattenActiveDirectoryDomainLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("authorized_networks", flattenActiveDirectoryDomainAuthorizedNetworks(res["authorizedNetworks"], d, config)); err != nil { - return fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("reserved_ip_range", flattenActiveDirectoryDomainReservedIpRange(res["reservedIpRange"], d, config)); err != nil { - return fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("locations", flattenActiveDirectoryDomainLocations(res["locations"], d, config)); err != nil { - return fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("admin", flattenActiveDirectoryDomainAdmin(res["admin"], d, config)); err != nil { - return fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("fqdn", flattenActiveDirectoryDomainFqdn(res["fqdn"], d, config)); err != nil { - return fmt.Errorf("Error reading Domain: %s", err) - } - - return nil -} - -func resourceActiveDirectoryDomainUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Domain: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandActiveDirectoryDomainLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - authorizedNetworksProp, err := expandActiveDirectoryDomainAuthorizedNetworks(d.Get("authorized_networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_networks"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorizedNetworksProp)) { - obj["authorizedNetworks"] = authorizedNetworksProp - } - locationsProp, err := expandActiveDirectoryDomainLocations(d.Get("locations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("locations"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, locationsProp)) { - obj["locations"] = locationsProp - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Domain %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("authorized_networks") { - updateMask = append(updateMask, "authorizedNetworks") - } - - if d.HasChange("locations") { - updateMask = append(updateMask, "locations") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Domain %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Domain %q: %#v", d.Id(), res) - } - - err = ActiveDirectoryOperationWaitTime( - config, res, project, "Updating Domain", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceActiveDirectoryDomainRead(d, meta) -} - -func resourceActiveDirectoryDomainDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Domain: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain_name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Domain %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Domain") - } - - err = ActiveDirectoryOperationWaitTime( - config, res, project, "Deleting Domain", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Domain %q: %#v", d.Id(), res) - return nil -} - -func resourceActiveDirectoryDomainImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenActiveDirectoryDomainName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainAuthorizedNetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenActiveDirectoryDomainReservedIpRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainLocations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainAdmin(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainFqdn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandActiveDirectoryDomainLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandActiveDirectoryDomainAuthorizedNetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandActiveDirectoryDomainReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandActiveDirectoryDomainLocations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandActiveDirectoryDomainAdmin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_active_directory_domain_trust.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_active_directory_domain_trust.go deleted file mode 100644 index 04e8f86805..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_active_directory_domain_trust.go +++ /dev/null @@ -1,624 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceActiveDirectoryDomainTrust() *schema.Resource { - return &schema.Resource{ - Create: resourceActiveDirectoryDomainTrustCreate, - Read: resourceActiveDirectoryDomainTrustRead, - Update: resourceActiveDirectoryDomainTrustUpdate, - Delete: resourceActiveDirectoryDomainTrustDelete, - - Importer: &schema.ResourceImporter{ - State: resourceActiveDirectoryDomainTrustImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "domain": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, -https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains.`, - }, - "target_dns_ip_addresses": { - Type: schema.TypeSet, - Required: true, - Description: `The target DNS server IP addresses which can resolve the remote domain involved in the trust.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "target_domain_name": { - Type: schema.TypeString, - Required: true, - Description: `The fully qualified target domain name which will be in trust with the current domain.`, - }, - "trust_direction": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"INBOUND", "OUTBOUND", "BIDIRECTIONAL"}), - Description: `The trust direction, which decides if the current domain is trusted, trusting, or both. Possible values: ["INBOUND", "OUTBOUND", "BIDIRECTIONAL"]`, - }, - "trust_handshake_secret": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The trust secret used for the handshake with the target domain. This will not be stored.`, - Sensitive: true, - }, - "trust_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"FOREST", "EXTERNAL"}), - Description: `The type of trust represented by the trust resource. Possible values: ["FOREST", "EXTERNAL"]`, - }, - "selective_authentication": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the trusted side has forest/domain wide access or selective access to an approved set of resources.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceActiveDirectoryDomainTrustCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_domain_name"); !isEmptyValue(reflect.ValueOf(targetDomainNameProp)) && (ok || !reflect.DeepEqual(v, targetDomainNameProp)) { - obj["targetDomainName"] = targetDomainNameProp - } - trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_type"); !isEmptyValue(reflect.ValueOf(trustTypeProp)) && (ok || !reflect.DeepEqual(v, trustTypeProp)) { - obj["trustType"] = trustTypeProp - } - trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_direction"); !isEmptyValue(reflect.ValueOf(trustDirectionProp)) && (ok || !reflect.DeepEqual(v, trustDirectionProp)) { - obj["trustDirection"] = trustDirectionProp - } - selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selective_authentication"); !isEmptyValue(reflect.ValueOf(selectiveAuthenticationProp)) && (ok || !reflect.DeepEqual(v, selectiveAuthenticationProp)) { - obj["selectiveAuthentication"] = selectiveAuthenticationProp - } - targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !isEmptyValue(reflect.ValueOf(targetDnsIpAddressesProp)) && (ok || !reflect.DeepEqual(v, targetDnsIpAddressesProp)) { - obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp - } - trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_handshake_secret"); !isEmptyValue(reflect.ValueOf(trustHandshakeSecretProp)) && (ok || !reflect.DeepEqual(v, trustHandshakeSecretProp)) { - obj["trustHandshakeSecret"] = trustHandshakeSecretProp - } - - obj, err = resourceActiveDirectoryDomainTrustEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:attachTrust") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DomainTrust: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainTrust: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DomainTrust: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ActiveDirectoryOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating DomainTrust", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create DomainTrust: %s", err) - } - - opRes, err = resourceActiveDirectoryDomainTrustDecoder(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return fmt.Errorf("Error decoding response from operation, could not find object") - } - - if _, ok := opRes["trusts"]; ok { - opRes, err = flattenNestedActiveDirectoryDomainTrust(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error getting nested object from operation response: %s", err) - } - if opRes == nil { - // Object isn't there any more - remove it from the state. - return fmt.Errorf("Error decoding response from operation, could not find nested object") - } - } - if err := d.Set("target_domain_name", flattenNestedActiveDirectoryDomainTrustTargetDomainName(opRes["targetDomainName"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DomainTrust %q: %#v", d.Id(), res) - - return resourceActiveDirectoryDomainTrustRead(d, meta) -} - -func resourceActiveDirectoryDomainTrustRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainTrust: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ActiveDirectoryDomainTrust %q", d.Id())) - } - - res, err = flattenNestedActiveDirectoryDomainTrust(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ActiveDirectoryDomainTrust because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceActiveDirectoryDomainTrustDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ActiveDirectoryDomainTrust because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DomainTrust: %s", err) - } - - if err := d.Set("target_domain_name", flattenNestedActiveDirectoryDomainTrustTargetDomainName(res["targetDomainName"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainTrust: %s", err) - } - if err := d.Set("trust_type", flattenNestedActiveDirectoryDomainTrustTrustType(res["trustType"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainTrust: %s", err) - } - if err := d.Set("trust_direction", flattenNestedActiveDirectoryDomainTrustTrustDirection(res["trustDirection"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainTrust: %s", err) - } - if err := d.Set("selective_authentication", flattenNestedActiveDirectoryDomainTrustSelectiveAuthentication(res["selectiveAuthentication"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainTrust: %s", err) - } - if err := d.Set("target_dns_ip_addresses", flattenNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(res["targetDnsIpAddresses"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainTrust: %s", err) - } - - return nil -} - -func resourceActiveDirectoryDomainTrustUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainTrust: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_domain_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetDomainNameProp)) { - obj["targetDomainName"] = targetDomainNameProp - } - trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustTypeProp)) { - obj["trustType"] = trustTypeProp - } - trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_direction"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustDirectionProp)) { - obj["trustDirection"] = trustDirectionProp - } - selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selective_authentication"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, selectiveAuthenticationProp)) { - obj["selectiveAuthentication"] = selectiveAuthenticationProp - } - targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetDnsIpAddressesProp)) { - obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp - } - trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_handshake_secret"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustHandshakeSecretProp)) { - obj["trustHandshakeSecret"] = trustHandshakeSecretProp - } - - obj, err = resourceActiveDirectoryDomainTrustUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:reconfigureTrust") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DomainTrust %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DomainTrust %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DomainTrust %q: %#v", d.Id(), res) - } - - err = ActiveDirectoryOperationWaitTime( - config, res, project, "Updating DomainTrust", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceActiveDirectoryDomainTrustRead(d, meta) -} - -func resourceActiveDirectoryDomainTrustDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:detachTrust") - if err != nil { - return err - } - - obj := make(map[string]interface{}) - targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_domain_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetDomainNameProp)) { - obj["targetDomainName"] = targetDomainNameProp - } - trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustTypeProp)) { - obj["trustType"] = trustTypeProp - } - trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_direction"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustDirectionProp)) { - obj["trustDirection"] = trustDirectionProp - } - selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selective_authentication"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, selectiveAuthenticationProp)) { - obj["selectiveAuthentication"] = selectiveAuthenticationProp - } - targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetDnsIpAddressesProp)) { - obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp - } - trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_handshake_secret"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustHandshakeSecretProp)) { - obj["trustHandshakeSecret"] = trustHandshakeSecretProp - } - - obj, err = resourceActiveDirectoryDomainTrustEncoder(d, meta, obj) - if err != nil { - return err - } - - log.Printf("[DEBUG] Deleting DomainTrust %q", d.Id()) - - res, err := SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DomainTrust") - } - - err = ActiveDirectoryOperationWaitTime( - config, res, project, "Deleting DomainTrust", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting DomainTrust %q: %#v", d.Id(), res) - return nil -} - -func resourceActiveDirectoryDomainTrustImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/domains/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedActiveDirectoryDomainTrustTargetDomainName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedActiveDirectoryDomainTrustTrustType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedActiveDirectoryDomainTrustTrustDirection(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedActiveDirectoryDomainTrustSelectiveAuthentication(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func expandNestedActiveDirectoryDomainTrustTargetDomainName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustTrustType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustTrustDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceActiveDirectoryDomainTrustEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - wrappedReq := map[string]interface{}{ - "trust": obj, - } - return wrappedReq, nil -} - -func resourceActiveDirectoryDomainTrustUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - wrappedReq := map[string]interface{}{ - "targetDomainName": obj["targetDomainName"], - "targetDnsIpAddresses": obj["targetDnsIpAddresses"], - } - return wrappedReq, nil -} - -func flattenNestedActiveDirectoryDomainTrust(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["trusts"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value trusts. Actual value: %v", v) - } - - _, item, err := resourceActiveDirectoryDomainTrustFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceActiveDirectoryDomainTrustFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedTargetDomainName, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedTargetDomainName := flattenNestedActiveDirectoryDomainTrustTargetDomainName(expectedTargetDomainName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - // Decode list item before comparing. - item, err := resourceActiveDirectoryDomainTrustDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemTargetDomainName := flattenNestedActiveDirectoryDomainTrustTargetDomainName(item["targetDomainName"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemTargetDomainName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedTargetDomainName))) && !reflect.DeepEqual(itemTargetDomainName, expectedFlattenedTargetDomainName) { - log.Printf("[DEBUG] Skipping item with targetDomainName= %#v, looking for %#v)", itemTargetDomainName, expectedFlattenedTargetDomainName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} -func resourceActiveDirectoryDomainTrustDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - v, ok := res["domainTrust"] - if !ok || v == nil { - return res, nil - } - - return v.(map[string]interface{}), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_backup.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_backup.go deleted file mode 100644 index 7cae932774..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_backup.go +++ /dev/null @@ -1,466 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAlloydbBackup() *schema.Resource { - return &schema.Resource{ - Create: resourceAlloydbBackupCreate, - Read: resourceAlloydbBackupRead, - Update: resourceAlloydbBackupUpdate, - Delete: resourceAlloydbBackupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAlloydbBackupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "backup_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the alloydb backup.`, - }, - "cluster_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: projectNumberDiffSuppress, - Description: `The full resource name of the backup source cluster (e.g., projects/{project}/locations/{location}/clusters/{clusterId}).`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `User-provided description of the backup.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-defined labels for the alloydb backup.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The location where the alloydb backup should reside.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the Backup was created in UTC.`, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `A hash of the resource.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backupId}`, - }, - "reconciling": { - Type: schema.TypeBool, - Computed: true, - Description: `If true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The current state of the backup.`, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the Backup was updated in UTC.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAlloydbBackupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - clusterNameProp, err := expandAlloydbBackupClusterName(d.Get("cluster_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cluster_name"); !isEmptyValue(reflect.ValueOf(clusterNameProp)) && (ok || !reflect.DeepEqual(v, clusterNameProp)) { - obj["clusterName"] = clusterNameProp - } - labelsProp, err := expandAlloydbBackupLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandAlloydbBackupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - obj, err = resourceAlloydbBackupEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups?backupId={{backup_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Backup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Backup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Backup: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = AlloydbOperationWaitTime( - config, res, project, "Creating Backup", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Backup: %s", err) - } - - log.Printf("[DEBUG] Finished creating Backup %q: %#v", d.Id(), res) - - return resourceAlloydbBackupRead(d, meta) -} - -func resourceAlloydbBackupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Backup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AlloydbBackup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - - if err := d.Set("name", flattenAlloydbBackupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("uid", flattenAlloydbBackupUid(res["uid"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("cluster_name", flattenAlloydbBackupClusterName(res["clusterName"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("labels", flattenAlloydbBackupLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("create_time", flattenAlloydbBackupCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("update_time", flattenAlloydbBackupUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("state", flattenAlloydbBackupState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("description", flattenAlloydbBackupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("reconciling", flattenAlloydbBackupReconciling(res["reconciling"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("etag", flattenAlloydbBackupEtag(res["etag"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - - return nil -} - -func resourceAlloydbBackupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Backup: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandAlloydbBackupLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - obj, err = resourceAlloydbBackupEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Backup %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Backup %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Backup %q: %#v", d.Id(), res) - } - - err = AlloydbOperationWaitTime( - config, res, project, "Updating Backup", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAlloydbBackupRead(d, meta) -} - -func resourceAlloydbBackupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Backup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Backup %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Backup") - } - - err = AlloydbOperationWaitTime( - config, res, project, "Deleting Backup", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Backup %q: %#v", d.Id(), res) - return nil -} - -func resourceAlloydbBackupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/backups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAlloydbBackupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupClusterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupReconciling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbBackupEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAlloydbBackupClusterName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbBackupLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAlloydbBackupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAlloydbBackupEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // The only other available type is AUTOMATED which cannot be set manually - obj["type"] = "ON_DEMAND" - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_cluster.go deleted file mode 100644 index 4b92d26fdd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_cluster.go +++ /dev/null @@ -1,1131 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAlloydbCluster() *schema.Resource { - return &schema.Resource{ - Create: resourceAlloydbClusterCreate, - Read: resourceAlloydbClusterRead, - Update: resourceAlloydbClusterUpdate, - Delete: resourceAlloydbClusterDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAlloydbClusterImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "cluster_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the alloydb cluster.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: projectNumberDiffSuppress, - Description: `The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - -"projects/{projectNumber}/global/networks/{network_id}".`, - }, - "automated_backup_policy": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `The automated backup policy for this cluster. - -If no policy is provided then the default policy will be used. The default policy takes one backup a day, has a backup window of 1 hour, and retains backups for 14 days.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "weekly_schedule": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Weekly schedule for the Backup.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "start_times": { - Type: schema.TypeList, - Required: true, - Description: `The times during the day to start a backup. At least one start time must be provided. The start times are assumed to be in UTC and to be an exact hour (e.g., 04:00:00).`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "days_of_week": { - Type: schema.TypeList, - Optional: true, - Description: `The days of the week to perform a backup. At least one day of the week must be provided. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - MinItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), - }, - }, - }, - }, - }, - "backup_window": { - Type: schema.TypeString, - Optional: true, - Description: `The length of the time window during which a backup can be taken. If a backup does not succeed within this time window, it will be canceled and considered failed. - -The backup window must be at least 5 minutes long. There is no upper bound on the window. If not set, it will default to 1 hour. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether automated backups are enabled.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels to apply to backups created using this configuration.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - Description: `The location where the backup will be stored. Currently, the only supported option is to store the backup in the same region as the cluster.`, - }, - "quantity_based_retention": { - Type: schema.TypeList, - Optional: true, - Description: `Quantity-based Backup retention policy to retain recent backups.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Optional: true, - Description: `The number of backups to retain.`, - }, - }, - }, - ConflictsWith: []string{"automated_backup_policy.0.time_based_retention"}, - }, - "time_based_retention": { - Type: schema.TypeList, - Optional: true, - Description: `Time-based Backup retention policy.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "retention_period": { - Type: schema.TypeString, - Optional: true, - Description: `The retention period. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - }, - }, - ConflictsWith: []string{"automated_backup_policy.0.quantity_based_retention"}, - }, - }, - }, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `User-settable and human-readable display name for the Cluster.`, - }, - "initial_user": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Initial user to setup during cluster creation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "password": { - Type: schema.TypeString, - Required: true, - Description: `The initial password for the user.`, - Sensitive: true, - }, - "user": { - Type: schema.TypeString, - Optional: true, - Description: `The database username.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-defined labels for the alloydb cluster.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The location where the alloydb cluster should reside.`, - }, - "backup_source": { - Type: schema.TypeList, - Computed: true, - Description: `Cluster created from backup.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "backup_name": { - Type: schema.TypeString, - Optional: true, - Description: `The name of the backup resource.`, - }, - }, - }, - }, - "database_version": { - Type: schema.TypeString, - Computed: true, - Description: `The database engine major version. This is an output-only field and it's populated at the Cluster creation time. This field cannot be changed after cluster creation.`, - }, - "migration_source": { - Type: schema.TypeList, - Computed: true, - Description: `Cluster created via DMS migration.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host_port": { - Type: schema.TypeString, - Optional: true, - Description: `The host and port of the on-premises instance in host:port format`, - }, - "reference_id": { - Type: schema.TypeString, - Optional: true, - Description: `Place holder for the external source identifier(e.g DMS job name) that created the cluster.`, - }, - "source_type": { - Type: schema.TypeString, - Optional: true, - Description: `Type of migration source.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the cluster resource.`, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `The system-generated UID of the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAlloydbClusterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandAlloydbClusterLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - networkProp, err := expandAlloydbClusterNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - displayNameProp, err := expandAlloydbClusterDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - initialUserProp, err := expandAlloydbClusterInitialUser(d.Get("initial_user"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("initial_user"); !isEmptyValue(reflect.ValueOf(initialUserProp)) && (ok || !reflect.DeepEqual(v, initialUserProp)) { - obj["initialUser"] = initialUserProp - } - automatedBackupPolicyProp, err := expandAlloydbClusterAutomatedBackupPolicy(d.Get("automated_backup_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("automated_backup_policy"); !isEmptyValue(reflect.ValueOf(automatedBackupPolicyProp)) && (ok || !reflect.DeepEqual(v, automatedBackupPolicyProp)) { - obj["automatedBackupPolicy"] = automatedBackupPolicyProp - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Cluster: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Cluster: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Cluster: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = AlloydbOperationWaitTime( - config, res, project, "Creating Cluster", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Cluster: %s", err) - } - - log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) - - return resourceAlloydbClusterRead(d, meta) -} - -func resourceAlloydbClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Cluster: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AlloydbCluster %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - - if err := d.Set("name", flattenAlloydbClusterName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - if err := d.Set("uid", flattenAlloydbClusterUid(res["uid"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - if err := d.Set("labels", flattenAlloydbClusterLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - if err := d.Set("network", flattenAlloydbClusterNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - if err := d.Set("display_name", flattenAlloydbClusterDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - if err := d.Set("database_version", flattenAlloydbClusterDatabaseVersion(res["databaseVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - if err := d.Set("automated_backup_policy", flattenAlloydbClusterAutomatedBackupPolicy(res["automatedBackupPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - if err := d.Set("backup_source", flattenAlloydbClusterBackupSource(res["backupSource"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - if err := d.Set("migration_source", flattenAlloydbClusterMigrationSource(res["migrationSource"], d, config)); err != nil { - return fmt.Errorf("Error reading Cluster: %s", err) - } - - return nil -} - -func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Cluster: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandAlloydbClusterLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - networkProp, err := expandAlloydbClusterNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - displayNameProp, err := expandAlloydbClusterDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - automatedBackupPolicyProp, err := expandAlloydbClusterAutomatedBackupPolicy(d.Get("automated_backup_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("automated_backup_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automatedBackupPolicyProp)) { - obj["automatedBackupPolicy"] = automatedBackupPolicyProp - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Cluster %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("network") { - updateMask = append(updateMask, "network") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("automated_backup_policy") { - updateMask = append(updateMask, "automatedBackupPolicy") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Cluster %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Cluster %q: %#v", d.Id(), res) - } - - err = AlloydbOperationWaitTime( - config, res, project, "Updating Cluster", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAlloydbClusterRead(d, meta) -} - -func resourceAlloydbClusterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Cluster: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Cluster") - } - - err = AlloydbOperationWaitTime( - config, res, project, "Deleting Cluster", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Cluster %q: %#v", d.Id(), res) - return nil -} - -func resourceAlloydbClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAlloydbClusterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterDatabaseVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterAutomatedBackupPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backup_window"] = - flattenAlloydbClusterAutomatedBackupPolicyBackupWindow(original["backupWindow"], d, config) - transformed["location"] = - flattenAlloydbClusterAutomatedBackupPolicyLocation(original["location"], d, config) - transformed["labels"] = - flattenAlloydbClusterAutomatedBackupPolicyLabels(original["labels"], d, config) - transformed["weekly_schedule"] = - flattenAlloydbClusterAutomatedBackupPolicyWeeklySchedule(original["weeklySchedule"], d, config) - transformed["time_based_retention"] = - flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(original["timeBasedRetention"], d, config) - transformed["quantity_based_retention"] = - flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(original["quantityBasedRetention"], d, config) - transformed["enabled"] = - flattenAlloydbClusterAutomatedBackupPolicyEnabled(original["enabled"], d, config) - return []interface{}{transformed} -} -func flattenAlloydbClusterAutomatedBackupPolicyBackupWindow(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterAutomatedBackupPolicyLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterAutomatedBackupPolicyLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterAutomatedBackupPolicyWeeklySchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["days_of_week"] = - flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(original["daysOfWeek"], d, config) - transformed["start_times"] = - flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(original["startTimes"], d, config) - return []interface{}{transformed} -} -func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "hours": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(original["hours"], d, config), - "minutes": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(original["minutes"], d, config), - "seconds": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(original["seconds"], d, config), - "nanos": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(original["nanos"], d, config), - }) - } - return transformed -} -func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["retention_period"] = - flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(original["retentionPeriod"], d, config) - return []interface{}{transformed} -} -func flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["count"] = - flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(original["count"], d, config) - return []interface{}{transformed} -} -func flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAlloydbClusterAutomatedBackupPolicyEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterBackupSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backup_name"] = - flattenAlloydbClusterBackupSourceBackupName(original["backupName"], d, config) - return []interface{}{transformed} -} -func flattenAlloydbClusterBackupSourceBackupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterMigrationSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_port"] = - flattenAlloydbClusterMigrationSourceHostPort(original["hostPort"], d, config) - transformed["reference_id"] = - flattenAlloydbClusterMigrationSourceReferenceId(original["referenceId"], d, config) - transformed["source_type"] = - flattenAlloydbClusterMigrationSourceSourceType(original["sourceType"], d, config) - return []interface{}{transformed} -} -func flattenAlloydbClusterMigrationSourceHostPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterMigrationSourceReferenceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbClusterMigrationSourceSourceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAlloydbClusterLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAlloydbClusterNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterInitialUser(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUser, err := expandAlloydbClusterInitialUserUser(original["user"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUser); val.IsValid() && !isEmptyValue(val) { - transformed["user"] = transformedUser - } - - transformedPassword, err := expandAlloydbClusterInitialUserPassword(original["password"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { - transformed["password"] = transformedPassword - } - - return transformed, nil -} - -func expandAlloydbClusterInitialUserUser(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterInitialUserPassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackupWindow, err := expandAlloydbClusterAutomatedBackupPolicyBackupWindow(original["backup_window"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBackupWindow); val.IsValid() && !isEmptyValue(val) { - transformed["backupWindow"] = transformedBackupWindow - } - - transformedLocation, err := expandAlloydbClusterAutomatedBackupPolicyLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - transformedLabels, err := expandAlloydbClusterAutomatedBackupPolicyLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedWeeklySchedule, err := expandAlloydbClusterAutomatedBackupPolicyWeeklySchedule(original["weekly_schedule"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWeeklySchedule); val.IsValid() && !isEmptyValue(val) { - transformed["weeklySchedule"] = transformedWeeklySchedule - } - - transformedTimeBasedRetention, err := expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(original["time_based_retention"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeBasedRetention); val.IsValid() && !isEmptyValue(val) { - transformed["timeBasedRetention"] = transformedTimeBasedRetention - } - - transformedQuantityBasedRetention, err := expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(original["quantity_based_retention"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedQuantityBasedRetention); val.IsValid() && !isEmptyValue(val) { - transformed["quantityBasedRetention"] = transformedQuantityBasedRetention - } - - transformedEnabled, err := expandAlloydbClusterAutomatedBackupPolicyEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyBackupWindow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyWeeklySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDaysOfWeek, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(original["days_of_week"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDaysOfWeek); val.IsValid() && !isEmptyValue(val) { - transformed["daysOfWeek"] = transformedDaysOfWeek - } - - transformedStartTimes, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(original["start_times"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStartTimes); val.IsValid() && !isEmptyValue(val) { - transformed["startTimes"] = transformedStartTimes - } - - return transformed, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRetentionPeriod, err := expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(original["retention_period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRetentionPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["retentionPeriod"] = transformedRetentionPeriod - } - - return transformed, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCount, err := expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - return transformed, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbClusterAutomatedBackupPolicyEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_instance.go deleted file mode 100644 index 3721ef07f4..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_alloydb_instance.go +++ /dev/null @@ -1,745 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAlloydbInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceAlloydbInstanceCreate, - Read: resourceAlloydbInstanceRead, - Update: resourceAlloydbInstanceUpdate, - Delete: resourceAlloydbInstanceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAlloydbInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "cluster": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the alloydb cluster. Must be in the format -'projects/{project}/locations/{location}/clusters/{cluster_id}'`, - }, - "instance_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the alloydb instance.`, - }, - "instance_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"PRIMARY", "READ_POOL"}), - Description: `The type of the instance. Possible values: ["PRIMARY", "READ_POOL"]`, - }, - "annotations": { - Type: schema.TypeMap, - Optional: true, - Description: `Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "availability_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"AVAILABILITY_TYPE_UNSPECIFIED", "ZONAL", "REGIONAL", ""}), - Description: `Availability type of an Instance. Defaults to REGIONAL for both primary and read instances. Note that primary and read instances can have different availability types. Possible values: ["AVAILABILITY_TYPE_UNSPECIFIED", "ZONAL", "REGIONAL"]`, - }, - "database_flags": { - Type: schema.TypeMap, - Optional: true, - Description: `Database flags. Set at instance level. * They are copied from primary instance on read instance creation. * Read instances can set new or override existing flags that are relevant for reads, e.g. for enabling columnar cache on a read instance. Flags set on read instance may or may not be present on primary.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `User-settable and human-readable display name for the Instance.`, - }, - "gce_zone": { - Type: schema.TypeString, - Optional: true, - Description: `The Compute Engine zone that the instance should serve from, per https://cloud.google.com/compute/docs/regions-zones This can ONLY be specified for ZONAL instances. If present for a REGIONAL instance, an error will be thrown. If this is absent for a ZONAL instance, instance is created in a random zone with available capacity.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-defined labels for the alloydb instance.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "machine_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Configurations for the machines that host the underlying database engine.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The number of CPU's in the VM instance.`, - }, - }, - }, - }, - "read_pool_config": { - Type: schema.TypeList, - Optional: true, - Description: `Read pool specific config.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "node_count": { - Type: schema.TypeInt, - Optional: true, - Description: `Read capacity, i.e. number of nodes in a read pool instance.`, - }, - }, - }, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the Instance was created in UTC.`, - }, - "ip_address": { - Type: schema.TypeString, - Computed: true, - Description: `The IP address for the Instance. This is the connection endpoint for an end-user application.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the instance resource.`, - }, - "reconciling": { - Type: schema.TypeBool, - Computed: true, - Description: `Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The current state of the alloydb instance.`, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `The system-generated UID of the resource.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the Instance was updated in UTC.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) error { - var project string - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandAlloydbInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - annotationsProp, err := expandAlloydbInstanceAnnotations(d.Get("annotations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("annotations"); !isEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { - obj["annotations"] = annotationsProp - } - displayNameProp, err := expandAlloydbInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - gceZoneProp, err := expandAlloydbInstanceGceZone(d.Get("gce_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gce_zone"); !isEmptyValue(reflect.ValueOf(gceZoneProp)) && (ok || !reflect.DeepEqual(v, gceZoneProp)) { - obj["gceZone"] = gceZoneProp - } - databaseFlagsProp, err := expandAlloydbInstanceDatabaseFlags(d.Get("database_flags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("database_flags"); !isEmptyValue(reflect.ValueOf(databaseFlagsProp)) && (ok || !reflect.DeepEqual(v, databaseFlagsProp)) { - obj["databaseFlags"] = databaseFlagsProp - } - availabilityTypeProp, err := expandAlloydbInstanceAvailabilityType(d.Get("availability_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("availability_type"); !isEmptyValue(reflect.ValueOf(availabilityTypeProp)) && (ok || !reflect.DeepEqual(v, availabilityTypeProp)) { - obj["availabilityType"] = availabilityTypeProp - } - instanceTypeProp, err := expandAlloydbInstanceInstanceType(d.Get("instance_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_type"); !isEmptyValue(reflect.ValueOf(instanceTypeProp)) && (ok || !reflect.DeepEqual(v, instanceTypeProp)) { - obj["instanceType"] = instanceTypeProp - } - readPoolConfigProp, err := expandAlloydbInstanceReadPoolConfig(d.Get("read_pool_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("read_pool_config"); !isEmptyValue(reflect.ValueOf(readPoolConfigProp)) && (ok || !reflect.DeepEqual(v, readPoolConfigProp)) { - obj["readPoolConfig"] = readPoolConfigProp - } - machineConfigProp, err := expandAlloydbInstanceMachineConfig(d.Get("machine_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("machine_config"); !isEmptyValue(reflect.ValueOf(machineConfigProp)) && (ok || !reflect.DeepEqual(v, machineConfigProp)) { - obj["machineConfig"] = machineConfigProp - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances?instanceId={{instance_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Instance: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{cluster}}/instances/{{instance_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = AlloydbOperationWaitTime( - config, res, project, "Creating Instance", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Instance: %s", err) - } - - log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceAlloydbInstanceRead(d, meta) -} - -func resourceAlloydbInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AlloydbInstance %q", d.Id())) - } - - if err := d.Set("name", flattenAlloydbInstanceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("create_time", flattenAlloydbInstanceCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("update_time", flattenAlloydbInstanceUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("uid", flattenAlloydbInstanceUid(res["uid"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenAlloydbInstanceLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("annotations", flattenAlloydbInstanceAnnotations(res["annotations"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("state", flattenAlloydbInstanceState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("gce_zone", flattenAlloydbInstanceGceZone(res["gceZone"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("reconciling", flattenAlloydbInstanceReconciling(res["reconciling"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("database_flags", flattenAlloydbInstanceDatabaseFlags(res["databaseFlags"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("availability_type", flattenAlloydbInstanceAvailabilityType(res["availabilityType"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("instance_type", flattenAlloydbInstanceInstanceType(res["instanceType"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("ip_address", flattenAlloydbInstanceIpAddress(res["ipAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("read_pool_config", flattenAlloydbInstanceReadPoolConfig(res["readPoolConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("machine_config", flattenAlloydbInstanceMachineConfig(res["machineConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceAlloydbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - var project string - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - labelsProp, err := expandAlloydbInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - annotationsProp, err := expandAlloydbInstanceAnnotations(d.Get("annotations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("annotations"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { - obj["annotations"] = annotationsProp - } - displayNameProp, err := expandAlloydbInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - gceZoneProp, err := expandAlloydbInstanceGceZone(d.Get("gce_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gce_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gceZoneProp)) { - obj["gceZone"] = gceZoneProp - } - databaseFlagsProp, err := expandAlloydbInstanceDatabaseFlags(d.Get("database_flags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("database_flags"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, databaseFlagsProp)) { - obj["databaseFlags"] = databaseFlagsProp - } - availabilityTypeProp, err := expandAlloydbInstanceAvailabilityType(d.Get("availability_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("availability_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, availabilityTypeProp)) { - obj["availabilityType"] = availabilityTypeProp - } - readPoolConfigProp, err := expandAlloydbInstanceReadPoolConfig(d.Get("read_pool_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("read_pool_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, readPoolConfigProp)) { - obj["readPoolConfig"] = readPoolConfigProp - } - machineConfigProp, err := expandAlloydbInstanceMachineConfig(d.Get("machine_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("machine_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, machineConfigProp)) { - obj["machineConfig"] = machineConfigProp - } - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("annotations") { - updateMask = append(updateMask, "annotations") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("gce_zone") { - updateMask = append(updateMask, "gceZone") - } - - if d.HasChange("database_flags") { - updateMask = append(updateMask, "databaseFlags") - } - - if d.HasChange("availability_type") { - updateMask = append(updateMask, "availabilityType") - } - - if d.HasChange("read_pool_config") { - updateMask = append(updateMask, "readPoolConfig") - } - - if d.HasChange("machine_config") { - updateMask = append(updateMask, "machineConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = AlloydbOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAlloydbInstanceRead(d, meta) -} - -func resourceAlloydbInstanceDelete(d *schema.ResourceData, meta interface{}) error { - var project string - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = AlloydbOperationWaitTime( - config, res, project, "Deleting Instance", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceAlloydbInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{ - "(?P.+)/instances/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{cluster}}/instances/{{instance_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAlloydbInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceAnnotations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceGceZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceReconciling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceDatabaseFlags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceAvailabilityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceInstanceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAlloydbInstanceReadPoolConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["node_count"] = - flattenAlloydbInstanceReadPoolConfigNodeCount(original["nodeCount"], d, config) - return []interface{}{transformed} -} -func flattenAlloydbInstanceReadPoolConfigNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAlloydbInstanceMachineConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cpu_count"] = - flattenAlloydbInstanceMachineConfigCpuCount(original["cpuCount"], d, config) - return []interface{}{transformed} -} -func flattenAlloydbInstanceMachineConfigCpuCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandAlloydbInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAlloydbInstanceAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAlloydbInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbInstanceGceZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbInstanceDatabaseFlags(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAlloydbInstanceAvailabilityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbInstanceInstanceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbInstanceReadPoolConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNodeCount, err := expandAlloydbInstanceReadPoolConfigNodeCount(original["node_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNodeCount); val.IsValid() && !isEmptyValue(val) { - transformed["nodeCount"] = transformedNodeCount - } - - return transformed, nil -} - -func expandAlloydbInstanceReadPoolConfigNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAlloydbInstanceMachineConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCpuCount, err := expandAlloydbInstanceMachineConfigCpuCount(original["cpu_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCpuCount); val.IsValid() && !isEmptyValue(val) { - transformed["cpuCount"] = transformedCpuCount - } - - return transformed, nil -} - -func expandAlloydbInstanceMachineConfigCpuCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_addons_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_addons_config.go deleted file mode 100644 index c758c1e383..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_addons_config.go +++ /dev/null @@ -1,663 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeAddonsConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeAddonsConfigCreate, - Read: resourceApigeeAddonsConfigRead, - Update: resourceApigeeAddonsConfigUpdate, - Delete: resourceApigeeAddonsConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeAddonsConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "org": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the Apigee organization.`, - }, - "addons_config": { - Type: schema.TypeList, - Optional: true, - Description: `Addon configurations of the Apigee organization.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "advanced_api_ops_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for the Monetization add-on.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, - }, - }, - }, - }, - "api_security_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for the Monetization add-on.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, - }, - "expires_at": { - Type: schema.TypeString, - Computed: true, - Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, - }, - }, - }, - }, - "connectors_platform_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for the Monetization add-on.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, - }, - "expires_at": { - Type: schema.TypeString, - Computed: true, - Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, - }, - }, - }, - }, - "integration_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for the Monetization add-on.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, - }, - }, - }, - }, - "monetization_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for the Monetization add-on.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, - }, - }, - }, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeAddonsConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - addonsConfigProp, err := expandApigeeAddonsConfigAddonsConfig(d.Get("addons_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("addons_config"); !isEmptyValue(reflect.ValueOf(addonsConfigProp)) && (ok || !reflect.DeepEqual(v, addonsConfigProp)) { - obj["addonsConfig"] = addonsConfigProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org}}:setAddons") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AddonsConfig: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AddonsConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "organizations/{{org}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ApigeeOperationWaitTime( - config, res, "Creating AddonsConfig", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create AddonsConfig: %s", err) - } - - log.Printf("[DEBUG] Finished creating AddonsConfig %q: %#v", d.Id(), res) - - return resourceApigeeAddonsConfigRead(d, meta) -} - -func resourceApigeeAddonsConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeAddonsConfig %q", d.Id())) - } - - if err := d.Set("addons_config", flattenApigeeAddonsConfigAddonsConfig(res["addonsConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading AddonsConfig: %s", err) - } - - return nil -} - -func resourceApigeeAddonsConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - addonsConfigProp, err := expandApigeeAddonsConfigAddonsConfig(d.Get("addons_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("addons_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, addonsConfigProp)) { - obj["addonsConfig"] = addonsConfigProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org}}:setAddons") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AddonsConfig %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating AddonsConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AddonsConfig %q: %#v", d.Id(), res) - } - - err = ApigeeOperationWaitTime( - config, res, "Updating AddonsConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceApigeeAddonsConfigRead(d, meta) -} - -func resourceApigeeAddonsConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org}}:setAddons") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AddonsConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AddonsConfig") - } - - err = ApigeeOperationWaitTime( - config, res, "Deleting AddonsConfig", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting AddonsConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeAddonsConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - parts := strings.Split(d.Get("org").(string), "/") - - var projectId string - switch len(parts) { - case 1: - projectId = parts[0] - case 2: - projectId = parts[1] - default: - return nil, fmt.Errorf( - "Saw %s when the org is expected to have shape %s or %s", - d.Get("org"), - "{{org}}", - "organizations/{{org}}", - ) - } - - if err := d.Set("org", projectId); err != nil { - return nil, fmt.Errorf("Error setting organization: %s", err) - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "organizations/{{org}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeAddonsConfigAddonsConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["advanced_api_ops_config"] = - flattenApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfig(original["advancedApiOpsConfig"], d, config) - transformed["integration_config"] = - flattenApigeeAddonsConfigAddonsConfigIntegrationConfig(original["integrationConfig"], d, config) - transformed["monetization_config"] = - flattenApigeeAddonsConfigAddonsConfigMonetizationConfig(original["monetizationConfig"], d, config) - transformed["api_security_config"] = - flattenApigeeAddonsConfigAddonsConfigApiSecurityConfig(original["apiSecurityConfig"], d, config) - transformed["connectors_platform_config"] = - flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfig(original["connectorsPlatformConfig"], d, config) - return []interface{}{transformed} -} -func flattenApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfigEnabled(original["enabled"], d, config) - return []interface{}{transformed} -} -func flattenApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeAddonsConfigAddonsConfigIntegrationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenApigeeAddonsConfigAddonsConfigIntegrationConfigEnabled(original["enabled"], d, config) - return []interface{}{transformed} -} -func flattenApigeeAddonsConfigAddonsConfigIntegrationConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeAddonsConfigAddonsConfigMonetizationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenApigeeAddonsConfigAddonsConfigMonetizationConfigEnabled(original["enabled"], d, config) - return []interface{}{transformed} -} -func flattenApigeeAddonsConfigAddonsConfigMonetizationConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeAddonsConfigAddonsConfigApiSecurityConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenApigeeAddonsConfigAddonsConfigApiSecurityConfigEnabled(original["enabled"], d, config) - transformed["expires_at"] = - flattenApigeeAddonsConfigAddonsConfigApiSecurityConfigExpiresAt(original["expiresAt"], d, config) - return []interface{}{transformed} -} -func flattenApigeeAddonsConfigAddonsConfigApiSecurityConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeAddonsConfigAddonsConfigApiSecurityConfigExpiresAt(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigEnabled(original["enabled"], d, config) - transformed["expires_at"] = - flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigExpiresAt(original["expiresAt"], d, config) - return []interface{}{transformed} -} -func flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigExpiresAt(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeAddonsConfigAddonsConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAdvancedApiOpsConfig, err := expandApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfig(original["advanced_api_ops_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAdvancedApiOpsConfig); val.IsValid() && !isEmptyValue(val) { - transformed["advancedApiOpsConfig"] = transformedAdvancedApiOpsConfig - } - - transformedIntegrationConfig, err := expandApigeeAddonsConfigAddonsConfigIntegrationConfig(original["integration_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegrationConfig); val.IsValid() && !isEmptyValue(val) { - transformed["integrationConfig"] = transformedIntegrationConfig - } - - transformedMonetizationConfig, err := expandApigeeAddonsConfigAddonsConfigMonetizationConfig(original["monetization_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonetizationConfig); val.IsValid() && !isEmptyValue(val) { - transformed["monetizationConfig"] = transformedMonetizationConfig - } - - transformedApiSecurityConfig, err := expandApigeeAddonsConfigAddonsConfigApiSecurityConfig(original["api_security_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedApiSecurityConfig); val.IsValid() && !isEmptyValue(val) { - transformed["apiSecurityConfig"] = transformedApiSecurityConfig - } - - transformedConnectorsPlatformConfig, err := expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfig(original["connectors_platform_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConnectorsPlatformConfig); val.IsValid() && !isEmptyValue(val) { - transformed["connectorsPlatformConfig"] = transformedConnectorsPlatformConfig - } - - return transformed, nil -} - -func expandApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfigEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeAddonsConfigAddonsConfigIntegrationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigIntegrationConfigEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandApigeeAddonsConfigAddonsConfigIntegrationConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeAddonsConfigAddonsConfigMonetizationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigMonetizationConfigEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandApigeeAddonsConfigAddonsConfigMonetizationConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeAddonsConfigAddonsConfigApiSecurityConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigApiSecurityConfigEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - transformedExpiresAt, err := expandApigeeAddonsConfigAddonsConfigApiSecurityConfigExpiresAt(original["expires_at"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpiresAt); val.IsValid() && !isEmptyValue(val) { - transformed["expiresAt"] = transformedExpiresAt - } - - return transformed, nil -} - -func expandApigeeAddonsConfigAddonsConfigApiSecurityConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeAddonsConfigAddonsConfigApiSecurityConfigExpiresAt(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - transformedExpiresAt, err := expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigExpiresAt(original["expires_at"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpiresAt); val.IsValid() && !isEmptyValue(val) { - transformed["expiresAt"] = transformedExpiresAt - } - - return transformed, nil -} - -func expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigExpiresAt(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_endpoint_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_endpoint_attachment.go deleted file mode 100644 index 1cc63f6d10..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_endpoint_attachment.go +++ /dev/null @@ -1,307 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeEndpointAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeEndpointAttachmentCreate, - Read: resourceApigeeEndpointAttachmentRead, - Delete: resourceApigeeEndpointAttachmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeEndpointAttachmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "endpoint_attachment_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the endpoint attachment.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Location of the endpoint attachment.`, - }, - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee instance, -in the format 'organizations/{{org_name}}'.`, - }, - "service_attachment": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Format: projects/*/regions/*/serviceAttachments/*`, - }, - "connection_state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the endpoint attachment connection to the service attachment.`, - }, - "host": { - Type: schema.TypeString, - Computed: true, - Description: `Host that can be used in either HTTP Target Endpoint directly, or as the host in Target Server.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Name of the Endpoint Attachment in the following format: -organizations/{organization}/endpointAttachments/{endpointAttachment}.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEndpointAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - locationProp, err := expandApigeeEndpointAttachmentLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - serviceAttachmentProp, err := expandApigeeEndpointAttachmentServiceAttachment(d.Get("service_attachment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_attachment"); !isEmptyValue(reflect.ValueOf(serviceAttachmentProp)) && (ok || !reflect.DeepEqual(v, serviceAttachmentProp)) { - obj["serviceAttachment"] = serviceAttachmentProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/endpointAttachments?endpointAttachmentId={{endpoint_attachment_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new EndpointAttachment: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating EndpointAttachment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ApigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating EndpointAttachment", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create EndpointAttachment: %s", err) - } - - if err := d.Set("name", flattenApigeeEndpointAttachmentName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating EndpointAttachment %q: %#v", d.Id(), res) - - return resourceApigeeEndpointAttachmentRead(d, meta) -} - -func resourceApigeeEndpointAttachmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeEndpointAttachment %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeEndpointAttachmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading EndpointAttachment: %s", err) - } - if err := d.Set("location", flattenApigeeEndpointAttachmentLocation(res["location"], d, config)); err != nil { - return fmt.Errorf("Error reading EndpointAttachment: %s", err) - } - if err := d.Set("host", flattenApigeeEndpointAttachmentHost(res["host"], d, config)); err != nil { - return fmt.Errorf("Error reading EndpointAttachment: %s", err) - } - if err := d.Set("service_attachment", flattenApigeeEndpointAttachmentServiceAttachment(res["serviceAttachment"], d, config)); err != nil { - return fmt.Errorf("Error reading EndpointAttachment: %s", err) - } - if err := d.Set("connection_state", flattenApigeeEndpointAttachmentConnectionState(res["connectionState"], d, config)); err != nil { - return fmt.Errorf("Error reading EndpointAttachment: %s", err) - } - - return nil -} - -func resourceApigeeEndpointAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting EndpointAttachment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EndpointAttachment") - } - - err = ApigeeOperationWaitTime( - config, res, "Deleting EndpointAttachment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting EndpointAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEndpointAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := strings.Split(d.Get("name").(string), "/") - if len(nameParts) == 4 { - // `organizations/{{org_name}}/endpointAttachment/{{endpoint_attachment_id}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("endpoint_attachment_id", nameParts[3]); err != nil { - return nil, fmt.Errorf("Error setting endpoint_attachment_id: %s", err) - } - } else { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "organizations/{{org_name}}/environments/{{name}}") - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeEndpointAttachmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEndpointAttachmentLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEndpointAttachmentHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEndpointAttachmentServiceAttachment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEndpointAttachmentConnectionState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEndpointAttachmentLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEndpointAttachmentServiceAttachment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_env_keystore.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_env_keystore.go deleted file mode 100644 index c5a97e826b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_env_keystore.go +++ /dev/null @@ -1,209 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeEnvKeystore() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeEnvKeystoreCreate, - Read: resourceApigeeEnvKeystoreRead, - Delete: resourceApigeeEnvKeystoreDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeEnvKeystoreImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(1 * time.Minute), - Delete: schema.DefaultTimeout(1 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "env_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee environment group associated with the Apigee environment, -in the format 'organizations/{{org_name}}/environments/{{env_name}}'.`, - }, - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the newly created keystore.`, - }, - "aliases": { - Type: schema.TypeList, - Computed: true, - Description: `Aliases in this keystore.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEnvKeystoreCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeEnvKeystoreName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keystores") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new EnvKeystore: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating EnvKeystore: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{env_id}}/keystores/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating EnvKeystore %q: %#v", d.Id(), res) - - return resourceApigeeEnvKeystoreRead(d, meta) -} - -func resourceApigeeEnvKeystoreRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keystores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvKeystore %q", d.Id())) - } - - if err := d.Set("aliases", flattenApigeeEnvKeystoreAliases(res["aliases"], d, config)); err != nil { - return fmt.Errorf("Error reading EnvKeystore: %s", err) - } - if err := d.Set("name", flattenApigeeEnvKeystoreName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading EnvKeystore: %s", err) - } - - return nil -} - -func resourceApigeeEnvKeystoreDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keystores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting EnvKeystore %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EnvKeystore") - } - - log.Printf("[DEBUG] Finished deleting EnvKeystore %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEnvKeystoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{ - "(?P.+)/keystores/(?P.+)", - "(?P.+)/(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{env_id}}/keystores/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeEnvKeystoreAliases(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvKeystoreName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEnvKeystoreName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_env_references.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_env_references.go deleted file mode 100644 index 70ebd36b36..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_env_references.go +++ /dev/null @@ -1,263 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeEnvReferences() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeEnvReferencesCreate, - Read: resourceApigeeEnvReferencesRead, - Delete: resourceApigeeEnvReferencesDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeEnvReferencesImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(1 * time.Minute), - Delete: schema.DefaultTimeout(1 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "env_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee environment group associated with the Apigee environment, -in the format 'organizations/{{org_name}}/environments/{{env_name}}'.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. The resource id of this reference. Values must match the regular expression [\w\s-.]+.`, - }, - "refers": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. The id of the resource to which this reference refers. Must be the id of a resource that exists in the parent environment and is of the given resourceType.`, - }, - "resource_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The type of resource referred to by this reference. Valid values are 'KeyStore' or 'TrustStore'.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Optional. A human-readable description of this reference.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEnvReferencesCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeEnvReferencesName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandApigeeEnvReferencesDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - resourceTypeProp, err := expandApigeeEnvReferencesResourceType(d.Get("resource_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resource_type"); !isEmptyValue(reflect.ValueOf(resourceTypeProp)) && (ok || !reflect.DeepEqual(v, resourceTypeProp)) { - obj["resourceType"] = resourceTypeProp - } - refersProp, err := expandApigeeEnvReferencesRefers(d.Get("refers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("refers"); !isEmptyValue(reflect.ValueOf(refersProp)) && (ok || !reflect.DeepEqual(v, refersProp)) { - obj["refers"] = refersProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/references/") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new EnvReferences: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating EnvReferences: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{env_id}}/references/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating EnvReferences %q: %#v", d.Id(), res) - - return resourceApigeeEnvReferencesRead(d, meta) -} - -func resourceApigeeEnvReferencesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/references/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvReferences %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeEnvReferencesName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading EnvReferences: %s", err) - } - if err := d.Set("description", flattenApigeeEnvReferencesDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading EnvReferences: %s", err) - } - if err := d.Set("resource_type", flattenApigeeEnvReferencesResourceType(res["resourceType"], d, config)); err != nil { - return fmt.Errorf("Error reading EnvReferences: %s", err) - } - if err := d.Set("refers", flattenApigeeEnvReferencesRefers(res["refers"], d, config)); err != nil { - return fmt.Errorf("Error reading EnvReferences: %s", err) - } - - return nil -} - -func resourceApigeeEnvReferencesDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/references/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting EnvReferences %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EnvReferences") - } - - log.Printf("[DEBUG] Finished deleting EnvReferences %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEnvReferencesImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{ - "(?P.+)/references/(?P.+)", - "(?P.+)/(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{env_id}}/references/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeEnvReferencesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvReferencesDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvReferencesResourceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvReferencesRefers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEnvReferencesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvReferencesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvReferencesResourceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvReferencesRefers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_envgroup.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_envgroup.go deleted file mode 100644 index 9b6eacd144..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_envgroup.go +++ /dev/null @@ -1,337 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeEnvgroup() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeEnvgroupCreate, - Read: resourceApigeeEnvgroupRead, - Update: resourceApigeeEnvgroupUpdate, - Delete: resourceApigeeEnvgroupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeEnvgroupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment group.`, - }, - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee environment group, -in the format 'organizations/{{org_name}}'.`, - }, - "hostnames": { - Type: schema.TypeList, - Optional: true, - Description: `Hostnames of the environment group.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEnvgroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeEnvgroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - hostnamesProp, err := expandApigeeEnvgroupHostnames(d.Get("hostnames"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("hostnames"); !isEmptyValue(reflect.ValueOf(hostnamesProp)) && (ok || !reflect.DeepEqual(v, hostnamesProp)) { - obj["hostnames"] = hostnamesProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Envgroup: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Envgroup: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{org_id}}/envgroups/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ApigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Envgroup", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Envgroup: %s", err) - } - - if err := d.Set("name", flattenApigeeEnvgroupName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{org_id}}/envgroups/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Envgroup %q: %#v", d.Id(), res) - - return resourceApigeeEnvgroupRead(d, meta) -} - -func resourceApigeeEnvgroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvgroup %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeEnvgroupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Envgroup: %s", err) - } - if err := d.Set("hostnames", flattenApigeeEnvgroupHostnames(res["hostnames"], d, config)); err != nil { - return fmt.Errorf("Error reading Envgroup: %s", err) - } - - return nil -} - -func resourceApigeeEnvgroupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - hostnamesProp, err := expandApigeeEnvgroupHostnames(d.Get("hostnames"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("hostnames"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostnamesProp)) { - obj["hostnames"] = hostnamesProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Envgroup %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("hostnames") { - updateMask = append(updateMask, "hostnames") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Envgroup %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Envgroup %q: %#v", d.Id(), res) - } - - err = ApigeeOperationWaitTime( - config, res, "Updating Envgroup", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceApigeeEnvgroupRead(d, meta) -} - -func resourceApigeeEnvgroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Envgroup %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Envgroup") - } - - err = ApigeeOperationWaitTime( - config, res, "Deleting Envgroup", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Envgroup %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEnvgroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := strings.Split(d.Get("name").(string), "/") - if len(nameParts) == 4 { - // `organizations/{{org_name}}/envgroups/{{name}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[3]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else if len(nameParts) == 3 { - // `organizations/{{org_name}}/{{name}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[2]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s or %s", - d.Get("name"), - "organizations/{{org_name}}/envgroups/{{name}}", - "organizations/{{org_name}}/{{name}}") - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{org_id}}/envgroups/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeEnvgroupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvgroupHostnames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEnvgroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvgroupHostnames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_envgroup_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_envgroup_attachment.go deleted file mode 100644 index acc9b682d7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_envgroup_attachment.go +++ /dev/null @@ -1,238 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeEnvgroupAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeEnvgroupAttachmentCreate, - Read: resourceApigeeEnvgroupAttachmentRead, - Delete: resourceApigeeEnvgroupAttachmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeEnvgroupAttachmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "envgroup_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee environment group associated with the Apigee environment, -in the format 'organizations/{{org_name}}/envgroups/{{envgroup_name}}'.`, - }, - "environment": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the newly created attachment (output parameter).`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEnvgroupAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - environmentProp, err := expandApigeeEnvgroupAttachmentEnvironment(d.Get("environment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("environment"); !isEmptyValue(reflect.ValueOf(environmentProp)) && (ok || !reflect.DeepEqual(v, environmentProp)) { - obj["environment"] = environmentProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new EnvgroupAttachment: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating EnvgroupAttachment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ApigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating EnvgroupAttachment", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create EnvgroupAttachment: %s", err) - } - - if err := d.Set("name", flattenApigeeEnvgroupAttachmentName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating EnvgroupAttachment %q: %#v", d.Id(), res) - - return resourceApigeeEnvgroupAttachmentRead(d, meta) -} - -func resourceApigeeEnvgroupAttachmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvgroupAttachment %q", d.Id())) - } - - if err := d.Set("environment", flattenApigeeEnvgroupAttachmentEnvironment(res["environment"], d, config)); err != nil { - return fmt.Errorf("Error reading EnvgroupAttachment: %s", err) - } - if err := d.Set("name", flattenApigeeEnvgroupAttachmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading EnvgroupAttachment: %s", err) - } - - return nil -} - -func resourceApigeeEnvgroupAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting EnvgroupAttachment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EnvgroupAttachment") - } - - err = ApigeeOperationWaitTime( - config, res, "Deleting EnvgroupAttachment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting EnvgroupAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEnvgroupAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{ - "(?P.+)/attachments/(?P.+)", - "(?P.+)/(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeEnvgroupAttachmentEnvironment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvgroupAttachmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEnvgroupAttachmentEnvironment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_environment.go deleted file mode 100644 index 9f04a661f7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_environment.go +++ /dev/null @@ -1,529 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeEnvironment() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeEnvironmentCreate, - Read: resourceApigeeEnvironmentRead, - Update: resourceApigeeEnvironmentUpdate, - Delete: resourceApigeeEnvironmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeEnvironmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment.`, - }, - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee environment, -in the format 'organizations/{{org_name}}'.`, - }, - "api_proxy_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"API_PROXY_TYPE_UNSPECIFIED", "PROGRAMMABLE", "CONFIGURABLE", ""}), - Description: `Optional. API Proxy type supported by the environment. The type can be set when creating -the Environment and cannot be changed. Possible values: ["API_PROXY_TYPE_UNSPECIFIED", "PROGRAMMABLE", "CONFIGURABLE"]`, - }, - "deployment_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"DEPLOYMENT_TYPE_UNSPECIFIED", "PROXY", "ARCHIVE", ""}), - Description: `Optional. Deployment type supported by the environment. The deployment type can be -set when creating the environment and cannot be changed. When you enable archive -deployment, you will be prevented from performing a subset of actions within the -environment, including: -Managing the deployment of API proxy or shared flow revisions; -Creating, updating, or deleting resource files; -Creating, updating, or deleting target servers. Possible values: ["DEPLOYMENT_TYPE_UNSPECIFIED", "PROXY", "ARCHIVE"]`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Description of the environment.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Display name of the environment.`, - }, - "node_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `NodeConfig for setting the min/max number of nodes associated with the environment.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_node_count": { - Type: schema.TypeString, - Optional: true, - Description: `The maximum total number of gateway nodes that the is reserved for all instances that -has the specified environment. If not specified, the default is determined by the -recommended maximum number of nodes for that gateway.`, - }, - "min_node_count": { - Type: schema.TypeString, - Optional: true, - Description: `The minimum total number of gateway nodes that the is reserved for all instances that -has the specified environment. If not specified, the default is determined by the -recommended minimum number of nodes for that gateway.`, - }, - "current_aggregate_node_count": { - Type: schema.TypeString, - Computed: true, - Description: `The current total number of gateway nodes that each environment currently has across -all instances.`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeEnvironmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandApigeeEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandApigeeEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - deploymentTypeProp, err := expandApigeeEnvironmentDeploymentType(d.Get("deployment_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deployment_type"); !isEmptyValue(reflect.ValueOf(deploymentTypeProp)) && (ok || !reflect.DeepEqual(v, deploymentTypeProp)) { - obj["deploymentType"] = deploymentTypeProp - } - apiProxyTypeProp, err := expandApigeeEnvironmentApiProxyType(d.Get("api_proxy_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("api_proxy_type"); !isEmptyValue(reflect.ValueOf(apiProxyTypeProp)) && (ok || !reflect.DeepEqual(v, apiProxyTypeProp)) { - obj["apiProxyType"] = apiProxyTypeProp - } - nodeConfigProp, err := expandApigeeEnvironmentNodeConfig(d.Get("node_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_config"); !isEmptyValue(reflect.ValueOf(nodeConfigProp)) && (ok || !reflect.DeepEqual(v, nodeConfigProp)) { - obj["nodeConfig"] = nodeConfigProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Environment: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Environment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{org_id}}/environments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ApigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Environment", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Environment: %s", err) - } - - if err := d.Set("name", flattenApigeeEnvironmentName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{org_id}}/environments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) - - return resourceApigeeEnvironmentRead(d, meta) -} - -func resourceApigeeEnvironmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvironment %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeEnvironmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("display_name", flattenApigeeEnvironmentDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("description", flattenApigeeEnvironmentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("deployment_type", flattenApigeeEnvironmentDeploymentType(res["deploymentType"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("api_proxy_type", flattenApigeeEnvironmentApiProxyType(res["apiProxyType"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("node_config", flattenApigeeEnvironmentNodeConfig(res["nodeConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - - return nil -} - -func resourceApigeeEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - nodeConfigProp, err := expandApigeeEnvironmentNodeConfig(d.Get("node_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeConfigProp)) { - obj["nodeConfig"] = nodeConfigProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("node_config") { - updateMask = append(updateMask, "nodeConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) - } - - err = ApigeeOperationWaitTime( - config, res, "Updating Environment", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceApigeeEnvironmentRead(d, meta) -} - -func resourceApigeeEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Environment") - } - - err = ApigeeOperationWaitTime( - config, res, "Deleting Environment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := strings.Split(d.Get("name").(string), "/") - if len(nameParts) == 4 { - // `organizations/{{org_name}}/environments/{{name}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[3]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else if len(nameParts) == 3 { - // `organizations/{{org_name}}/{{name}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[2]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s or %s", - d.Get("name"), - "organizations/{{org_name}}/environments/{{name}}", - "organizations/{{org_name}}/{{name}}") - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{org_id}}/environments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeEnvironmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentDeploymentType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentApiProxyType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentNodeConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min_node_count"] = - flattenApigeeEnvironmentNodeConfigMinNodeCount(original["minNodeCount"], d, config) - transformed["max_node_count"] = - flattenApigeeEnvironmentNodeConfigMaxNodeCount(original["maxNodeCount"], d, config) - transformed["current_aggregate_node_count"] = - flattenApigeeEnvironmentNodeConfigCurrentAggregateNodeCount(original["currentAggregateNodeCount"], d, config) - return []interface{}{transformed} -} -func flattenApigeeEnvironmentNodeConfigMinNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentNodeConfigMaxNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentNodeConfigCurrentAggregateNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEnvironmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentDeploymentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentApiProxyType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentNodeConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinNodeCount, err := expandApigeeEnvironmentNodeConfigMinNodeCount(original["min_node_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinNodeCount); val.IsValid() && !isEmptyValue(val) { - transformed["minNodeCount"] = transformedMinNodeCount - } - - transformedMaxNodeCount, err := expandApigeeEnvironmentNodeConfigMaxNodeCount(original["max_node_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxNodeCount); val.IsValid() && !isEmptyValue(val) { - transformed["maxNodeCount"] = transformedMaxNodeCount - } - - transformedCurrentAggregateNodeCount, err := expandApigeeEnvironmentNodeConfigCurrentAggregateNodeCount(original["current_aggregate_node_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCurrentAggregateNodeCount); val.IsValid() && !isEmptyValue(val) { - transformed["currentAggregateNodeCount"] = transformedCurrentAggregateNodeCount - } - - return transformed, nil -} - -func expandApigeeEnvironmentNodeConfigMinNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentNodeConfigMaxNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentNodeConfigCurrentAggregateNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_flowhook.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_flowhook.go deleted file mode 100644 index 5a406b27f8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_flowhook.go +++ /dev/null @@ -1,240 +0,0 @@ -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeFlowhook() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeFlowhookCreate, - Read: resourceApigeeFlowhookRead, - Delete: resourceApigeeFlowhookDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeFlowhookImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Description of the flow hook.`, - }, - "environment": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment.`, - }, - "flow_hook_point": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Where in the API call flow the flow hook is invoked. Must be one of PreProxyFlowHook, PostProxyFlowHook, PreTargetFlowHook, or PostTargetFlowHook.`, - }, - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the environment`, - }, - "sharedflow": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Id of the Sharedflow attaching to a flowhook point.`, - }, - "continue_on_error": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Default: true, - Description: `Flag that specifies whether execution should continue if the flow hook throws an exception. Set to true to continue execution. Set to false to stop execution if the flow hook throws an exception. Defaults to true.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeFlowhookCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandApigeeFlowhookDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sharedflowProp, err := expandApigeeFlowhookSharedflow(d.Get("sharedflow"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sharedflow"); !isEmptyValue(reflect.ValueOf(sharedflowProp)) && (ok || !reflect.DeepEqual(v, sharedflowProp)) { - obj["sharedFlow"] = sharedflowProp - } - continue_on_errorProp, err := expandApigeeFlowhookContinueOnError(d.Get("continue_on_error"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("continue_on_error"); !isEmptyValue(reflect.ValueOf(continue_on_errorProp)) && (ok || !reflect.DeepEqual(v, continue_on_errorProp)) { - obj["continueOnError"] = continue_on_errorProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Flowhook: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Flowhook: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Flowhook %q: %#v", d.Id(), res) - - return resourceApigeeFlowhookRead(d, meta) -} - -func resourceApigeeFlowhookRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeFlowhook %q", d.Id())) - } - if res["sharedFlow"] == nil || res["sharedFlow"].(string) == "" { - //if response does not contain shared_flow field, then nothing is attached to this flowhook, we treat this "binding" resource non-existent - d.SetId("") - return nil - } - if err := d.Set("description", flattenApigeeFlowhookDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Flowhook: %s", err) - } - if err := d.Set("sharedflow", flattenApigeeFlowhookSharedflow(res["sharedFlow"], d, config)); err != nil { - return fmt.Errorf("Error reading Flowhook: %s", err) - } - if err := d.Set("continue_on_error", flattenApigeeFlowhookContinueOnError(res["continueOnError"], d, config)); err != nil { - return fmt.Errorf("Error reading Flowhook: %s", err) - } - - return nil -} - -func resourceApigeeFlowhookDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Flowhook %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Flowhook") - } - - log.Printf("[DEBUG] Finished deleting Flowhook %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeFlowhookImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "organizations/(?P[^/]+)/environments/(?P[^/]+)/flowhooks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeFlowhookDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeFlowhookSharedflow(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeFlowhookContinueOnError(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeFlowhookDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeFlowhookSharedflow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeFlowhookContinueOnError(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_instance.go deleted file mode 100644 index f6f866e52a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_instance.go +++ /dev/null @@ -1,499 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Supress diffs when the lists of project have the same number of entries to handle the case that -// API does not return what the user originally provided. Instead, API does some transformation. -// For example, user provides a list of project number, but API returns a list of project Id. -func projectListDiffSuppress(_, _, _ string, d *schema.ResourceData) bool { - return projectListDiffSuppressFunc(d) -} - -func projectListDiffSuppressFunc(d TerraformResourceDataChange) bool { - kLength := "consumer_accept_list.#" - oldLength, newLength := d.GetChange(kLength) - - oldInt, ok := oldLength.(int) - if !ok { - return false - } - - newInt, ok := newLength.(int) - if !ok { - return false - } - log.Printf("[DEBUG] - suppressing diff with oldInt %d, newInt %d", oldInt, newInt) - - return oldInt == newInt -} - -func ResourceApigeeInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeInstanceCreate, - Read: resourceApigeeInstanceRead, - Delete: resourceApigeeInstanceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. Compute Engine location where the instance resides.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource ID of the instance.`, - }, - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee instance, -in the format 'organizations/{{org_name}}'.`, - }, - "consumer_accept_list": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: projectListDiffSuppress, - Description: `Optional. Customer accept list represents the list of projects (id/number) on customer -side that can privately connect to the service attachment. It is an optional field -which the customers can provide during the instance creation. By default, the customer -project associated with the Apigee organization will be included to the list.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Description of the instance.`, - }, - "disk_encryption_key_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Customer Managed Encryption Key (CMEK) used for disk and volume encryption. Required for Apigee paid subscriptions only. -Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)'`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Display name of the instance.`, - }, - "ip_range": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `IP range represents the customer-provided CIDR block of length 22 that will be used for -the Apigee instance creation. This optional range, if provided, should be freely -available as part of larger named range the customer has allocated to the Service -Networking peering. If this is not provided, Apigee will automatically request for any -available /22 CIDR block from Service Networking. The customer should use this CIDR block -for configuring their firewall needs to allow traffic from Apigee. -Input format: "a.b.c.d/22"`, - }, - "peering_cidr_range": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The size of the CIDR block range that will be reserved by the instance. For valid values, -see [CidrRange](https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.instances#CidrRange) on the documentation.`, - }, - "host": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Hostname or IP address of the exposed Apigee endpoint used by clients to connect to the service.`, - }, - "port": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Port number of the exposed Apigee endpoint.`, - }, - "service_attachment": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Resource name of the service attachment created for the instance in -the format: projects/*/regions/*/serviceAttachments/* Apigee customers can privately -forward traffic to this service attachment using the PSC endpoints.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - locationProp, err := expandApigeeInstanceLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - peeringCidrRangeProp, err := expandApigeeInstancePeeringCidrRange(d.Get("peering_cidr_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peering_cidr_range"); !isEmptyValue(reflect.ValueOf(peeringCidrRangeProp)) && (ok || !reflect.DeepEqual(v, peeringCidrRangeProp)) { - obj["peeringCidrRange"] = peeringCidrRangeProp - } - ipRangeProp, err := expandApigeeInstanceIpRange(d.Get("ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_range"); !isEmptyValue(reflect.ValueOf(ipRangeProp)) && (ok || !reflect.DeepEqual(v, ipRangeProp)) { - obj["ipRange"] = ipRangeProp - } - descriptionProp, err := expandApigeeInstanceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandApigeeInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - diskEncryptionKeyNameProp, err := expandApigeeInstanceDiskEncryptionKeyName(d.Get("disk_encryption_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_encryption_key_name"); !isEmptyValue(reflect.ValueOf(diskEncryptionKeyNameProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyNameProp)) { - obj["diskEncryptionKeyName"] = diskEncryptionKeyNameProp - } - consumerAcceptListProp, err := expandApigeeInstanceConsumerAcceptList(d.Get("consumer_accept_list"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_accept_list"); !isEmptyValue(reflect.ValueOf(consumerAcceptListProp)) && (ok || !reflect.DeepEqual(v, consumerAcceptListProp)) { - obj["consumerAcceptList"] = consumerAcceptListProp - } - - lockName, err := replaceVars(d, config, "{{org_id}}/apigeeInstances") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isApigeeRetryableError) - if err != nil { - return fmt.Errorf("Error creating Instance: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{org_id}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ApigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Instance", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Instance: %s", err) - } - - if err := d.Set("name", flattenApigeeInstanceName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{org_id}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceApigeeInstanceRead(d, meta) -} - -func resourceApigeeInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isApigeeRetryableError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeInstance %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeInstanceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("location", flattenApigeeInstanceLocation(res["location"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("peering_cidr_range", flattenApigeeInstancePeeringCidrRange(res["peeringCidrRange"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("description", flattenApigeeInstanceDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("display_name", flattenApigeeInstanceDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("disk_encryption_key_name", flattenApigeeInstanceDiskEncryptionKeyName(res["diskEncryptionKeyName"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("host", flattenApigeeInstanceHost(res["host"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("port", flattenApigeeInstancePort(res["port"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("consumer_accept_list", flattenApigeeInstanceConsumerAcceptList(res["consumerAcceptList"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("service_attachment", flattenApigeeInstanceServiceAttachment(res["serviceAttachment"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceApigeeInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "{{org_id}}/apigeeInstances") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isApigeeRetryableError) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = ApigeeOperationWaitTime( - config, res, "Deleting Instance", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := strings.Split(d.Get("name").(string), "/") - if len(nameParts) == 4 { - // `organizations/{{org_name}}/instances/{{name}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[3]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else if len(nameParts) == 3 { - // `organizations/{{org_name}}/{{name}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[2]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s or %s", - d.Get("name"), - "organizations/{{org_name}}/instances/{{name}}", - "organizations/{{org_name}}/{{name}}") - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{org_id}}/instances/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstancePeeringCidrRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceDiskEncryptionKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstancePort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceConsumerAcceptList(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceServiceAttachment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstancePeeringCidrRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceDiskEncryptionKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceConsumerAcceptList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_instance_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_instance_attachment.go deleted file mode 100644 index a23ae4e488..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_instance_attachment.go +++ /dev/null @@ -1,252 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeInstanceAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeInstanceAttachmentCreate, - Read: resourceApigeeInstanceAttachmentRead, - Delete: resourceApigeeInstanceAttachmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeInstanceAttachmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "environment": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment.`, - }, - "instance_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee instance associated with the Apigee environment, -in the format 'organisations/{{org_name}}/instances/{{instance_name}}'.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the newly created attachment (output parameter).`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeInstanceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - environmentProp, err := expandApigeeInstanceAttachmentEnvironment(d.Get("environment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("environment"); !isEmptyValue(reflect.ValueOf(environmentProp)) && (ok || !reflect.DeepEqual(v, environmentProp)) { - obj["environment"] = environmentProp - } - - lockName, err := replaceVars(d, config, "apigeeInstanceAttachments") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new InstanceAttachment: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating InstanceAttachment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{instance_id}}/attachments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ApigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating InstanceAttachment", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create InstanceAttachment: %s", err) - } - - if err := d.Set("name", flattenApigeeInstanceAttachmentName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{instance_id}}/attachments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating InstanceAttachment %q: %#v", d.Id(), res) - - return resourceApigeeInstanceAttachmentRead(d, meta) -} - -func resourceApigeeInstanceAttachmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeInstanceAttachment %q", d.Id())) - } - - if err := d.Set("environment", flattenApigeeInstanceAttachmentEnvironment(res["environment"], d, config)); err != nil { - return fmt.Errorf("Error reading InstanceAttachment: %s", err) - } - if err := d.Set("name", flattenApigeeInstanceAttachmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading InstanceAttachment: %s", err) - } - - return nil -} - -func resourceApigeeInstanceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "apigeeInstanceAttachments") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting InstanceAttachment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InstanceAttachment") - } - - err = ApigeeOperationWaitTime( - config, res, "Deleting InstanceAttachment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting InstanceAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeInstanceAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{ - "(?P.+)/attachments/(?P.+)", - "(?P.+)/(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{instance_id}}/attachments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeInstanceAttachmentEnvironment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceAttachmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeInstanceAttachmentEnvironment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_nat_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_nat_address.go deleted file mode 100644 index 34138736db..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_nat_address.go +++ /dev/null @@ -1,250 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeNatAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeNatAddressCreate, - Read: resourceApigeeNatAddressRead, - Delete: resourceApigeeNatAddressDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeNatAddressImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "instance_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee instance associated with the Apigee environment, -in the format 'organizations/{{org_name}}/instances/{{instance_name}}'.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource ID of the NAT address.`, - }, - "ip_address": { - Type: schema.TypeString, - Computed: true, - Description: `The allocated NAT IP address.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the NAT IP address.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeNatAddressCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeNatAddressName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/natAddresses") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new NatAddress: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating NatAddress: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{instance_id}}/natAddresses/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ApigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating NatAddress", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create NatAddress: %s", err) - } - - if err := d.Set("name", flattenApigeeNatAddressName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{instance_id}}/natAddresses/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating NatAddress %q: %#v", d.Id(), res) - - return resourceApigeeNatAddressRead(d, meta) -} - -func resourceApigeeNatAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/natAddresses/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeNatAddress %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeNatAddressName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading NatAddress: %s", err) - } - if err := d.Set("ip_address", flattenApigeeNatAddressIpAddress(res["ipAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading NatAddress: %s", err) - } - if err := d.Set("state", flattenApigeeNatAddressState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading NatAddress: %s", err) - } - - return nil -} - -func resourceApigeeNatAddressDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/natAddresses/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting NatAddress %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NatAddress") - } - - err = ApigeeOperationWaitTime( - config, res, "Deleting NatAddress", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting NatAddress %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeNatAddressImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats cannot import fields with forward slashes in their value - if err := parseImportId([]string{ - "(?P.+)/natAddresses/(?P.+)", - "(?P.+)/(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{instance_id}}/natAddresses/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeNatAddressName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeNatAddressIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeNatAddressState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeNatAddressName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_organization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_organization.go deleted file mode 100644 index 100eb606ce..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_organization.go +++ /dev/null @@ -1,673 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeOrganization() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeOrganizationCreate, - Read: resourceApigeeOrganizationRead, - Update: resourceApigeeOrganizationUpdate, - Delete: resourceApigeeOrganizationDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeOrganizationImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project ID associated with the Apigee organization.`, - }, - "analytics_region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Primary GCP region for analytics data storage. For valid values, see [Create an Apigee organization](https://cloud.google.com/apigee/docs/api-platform/get-started/create-org).`, - }, - "authorized_network": { - Type: schema.TypeString, - Optional: true, - Description: `Compute Engine network used for Service Networking to be peered with Apigee runtime instances. -See [Getting started with the Service Networking API](https://cloud.google.com/service-infrastructure/docs/service-networking/getting-started). -Valid only when 'RuntimeType' is set to CLOUD. The value can be updated only when there are no runtime instances. For example: "default".`, - }, - "billing_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Billing type of the Apigee organization. See [Apigee pricing](https://cloud.google.com/apigee/pricing).`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of the Apigee organization.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `The display name of the Apigee organization.`, - }, - "properties": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Properties defined in the Apigee organization profile.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "property": { - Type: schema.TypeList, - Optional: true, - Description: `List of all properties in the object.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the property.`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `Value of the property.`, - }, - }, - }, - }, - }, - }, - }, - "retention": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"DELETION_RETENTION_UNSPECIFIED", "MINIMUM", ""}), - Description: `Optional. This setting is applicable only for organizations that are soft-deleted (i.e., BillingType -is not EVALUATION). It controls how long Organization data will be retained after the initial delete -operation completes. During this period, the Organization may be restored to its last known state. -After this period, the Organization will no longer be able to be restored. Default value: "DELETION_RETENTION_UNSPECIFIED" Possible values: ["DELETION_RETENTION_UNSPECIFIED", "MINIMUM"]`, - Default: "DELETION_RETENTION_UNSPECIFIED", - }, - "runtime_database_encryption_key_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Cloud KMS key name used for encrypting the data that is stored and replicated across runtime instances. -Update is not allowed after the organization is created. -If not specified, a Google-Managed encryption key will be used. -Valid only when 'RuntimeType' is CLOUD. For example: 'projects/foo/locations/us/keyRings/bar/cryptoKeys/baz'.`, - }, - "runtime_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"CLOUD", "HYBRID", ""}), - Description: `Runtime type of the Apigee organization based on the Apigee subscription purchased. Default value: "CLOUD" Possible values: ["CLOUD", "HYBRID"]`, - Default: "CLOUD", - }, - "ca_certificate": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Base64-encoded public certificate for the root CA of the Apigee organization. -Valid only when 'RuntimeType' is CLOUD. A base64-encoded string.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Name of the Apigee organization.`, - }, - "subscription_type": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Subscription type of the Apigee organization. -Valid values include trial (free, limited, and for evaluation purposes only) or paid (full subscription has been purchased).`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeOrganizationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandApigeeOrganizationDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandApigeeOrganizationDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - analyticsRegionProp, err := expandApigeeOrganizationAnalyticsRegion(d.Get("analytics_region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("analytics_region"); !isEmptyValue(reflect.ValueOf(analyticsRegionProp)) && (ok || !reflect.DeepEqual(v, analyticsRegionProp)) { - obj["analyticsRegion"] = analyticsRegionProp - } - authorizedNetworkProp, err := expandApigeeOrganizationAuthorizedNetwork(d.Get("authorized_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(reflect.ValueOf(authorizedNetworkProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { - obj["authorizedNetwork"] = authorizedNetworkProp - } - runtimeTypeProp, err := expandApigeeOrganizationRuntimeType(d.Get("runtime_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_type"); !isEmptyValue(reflect.ValueOf(runtimeTypeProp)) && (ok || !reflect.DeepEqual(v, runtimeTypeProp)) { - obj["runtimeType"] = runtimeTypeProp - } - billingTypeProp, err := expandApigeeOrganizationBillingType(d.Get("billing_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("billing_type"); !isEmptyValue(reflect.ValueOf(billingTypeProp)) && (ok || !reflect.DeepEqual(v, billingTypeProp)) { - obj["billingType"] = billingTypeProp - } - runtimeDatabaseEncryptionKeyNameProp, err := expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(d.Get("runtime_database_encryption_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_database_encryption_key_name"); !isEmptyValue(reflect.ValueOf(runtimeDatabaseEncryptionKeyNameProp)) && (ok || !reflect.DeepEqual(v, runtimeDatabaseEncryptionKeyNameProp)) { - obj["runtimeDatabaseEncryptionKeyName"] = runtimeDatabaseEncryptionKeyNameProp - } - propertiesProp, err := expandApigeeOrganizationProperties(d.Get("properties"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("properties"); !isEmptyValue(reflect.ValueOf(propertiesProp)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { - obj["properties"] = propertiesProp - } - - obj, err = resourceApigeeOrganizationEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations?parent=projects/{{project_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Organization: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Organization: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "organizations/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ApigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Organization", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Organization: %s", err) - } - - if err := d.Set("name", flattenApigeeOrganizationName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "organizations/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Organization %q: %#v", d.Id(), res) - - return resourceApigeeOrganizationRead(d, meta) -} - -func resourceApigeeOrganizationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeOrganization %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeOrganizationName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("display_name", flattenApigeeOrganizationDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("description", flattenApigeeOrganizationDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("analytics_region", flattenApigeeOrganizationAnalyticsRegion(res["analyticsRegion"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("authorized_network", flattenApigeeOrganizationAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("runtime_type", flattenApigeeOrganizationRuntimeType(res["runtimeType"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("subscription_type", flattenApigeeOrganizationSubscriptionType(res["subscriptionType"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("billing_type", flattenApigeeOrganizationBillingType(res["billingType"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("ca_certificate", flattenApigeeOrganizationCaCertificate(res["caCertificate"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("runtime_database_encryption_key_name", flattenApigeeOrganizationRuntimeDatabaseEncryptionKeyName(res["runtimeDatabaseEncryptionKeyName"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("properties", flattenApigeeOrganizationProperties(res["properties"], d, config)); err != nil { - return fmt.Errorf("Error reading Organization: %s", err) - } - - return nil -} - -func resourceApigeeOrganizationUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandApigeeOrganizationDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandApigeeOrganizationDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - analyticsRegionProp, err := expandApigeeOrganizationAnalyticsRegion(d.Get("analytics_region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("analytics_region"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, analyticsRegionProp)) { - obj["analyticsRegion"] = analyticsRegionProp - } - authorizedNetworkProp, err := expandApigeeOrganizationAuthorizedNetwork(d.Get("authorized_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { - obj["authorizedNetwork"] = authorizedNetworkProp - } - runtimeTypeProp, err := expandApigeeOrganizationRuntimeType(d.Get("runtime_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeTypeProp)) { - obj["runtimeType"] = runtimeTypeProp - } - billingTypeProp, err := expandApigeeOrganizationBillingType(d.Get("billing_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("billing_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, billingTypeProp)) { - obj["billingType"] = billingTypeProp - } - runtimeDatabaseEncryptionKeyNameProp, err := expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(d.Get("runtime_database_encryption_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_database_encryption_key_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeDatabaseEncryptionKeyNameProp)) { - obj["runtimeDatabaseEncryptionKeyName"] = runtimeDatabaseEncryptionKeyNameProp - } - propertiesProp, err := expandApigeeOrganizationProperties(d.Get("properties"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("properties"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { - obj["properties"] = propertiesProp - } - - obj, err = resourceApigeeOrganizationEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Organization %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Organization %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Organization %q: %#v", d.Id(), res) - } - - err = ApigeeOperationWaitTime( - config, res, "Updating Organization", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceApigeeOrganizationRead(d, meta) -} - -func resourceApigeeOrganizationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}?retention={{retention}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Organization %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Organization") - } - - log.Printf("[DEBUG] Finished deleting Organization %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeOrganizationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - parts := strings.Split(d.Get("name").(string), "/") - - var projectId string - switch len(parts) { - case 1: - projectId = parts[0] - case 2: - projectId = parts[1] - default: - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s or %s", - d.Get("name"), - "{{name}}", - "organizations/{{name}}", - ) - } - - if err := d.Set("name", projectId); err != nil { - return nil, fmt.Errorf("Error setting organization: %s", err) - } - - if err := d.Set("project_id", projectId); err != nil { - return nil, fmt.Errorf("Error setting organization: %s", err) - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "organizations/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeOrganizationName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationAnalyticsRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationAuthorizedNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationRuntimeType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationSubscriptionType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationBillingType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationCaCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationRuntimeDatabaseEncryptionKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationProperties(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["property"] = - flattenApigeeOrganizationPropertiesProperty(original["property"], d, config) - return []interface{}{transformed} -} -func flattenApigeeOrganizationPropertiesProperty(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenApigeeOrganizationPropertiesPropertyName(original["name"], d, config), - "value": flattenApigeeOrganizationPropertiesPropertyValue(original["value"], d, config), - }) - } - return transformed -} -func flattenApigeeOrganizationPropertiesPropertyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationPropertiesPropertyValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeOrganizationDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationAnalyticsRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationAuthorizedNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationRuntimeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationBillingType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationProperties(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProperty, err := expandApigeeOrganizationPropertiesProperty(original["property"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProperty); val.IsValid() && !isEmptyValue(val) { - transformed["property"] = transformedProperty - } - - return transformed, nil -} - -func expandApigeeOrganizationPropertiesProperty(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandApigeeOrganizationPropertiesPropertyName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandApigeeOrganizationPropertiesPropertyValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandApigeeOrganizationPropertiesPropertyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationPropertiesPropertyValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceApigeeOrganizationEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - obj["name"] = d.Get("project_id").(string) - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sharedflow_deployment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sharedflow_deployment.go deleted file mode 100644 index 6c11246372..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sharedflow_deployment.go +++ /dev/null @@ -1,198 +0,0 @@ -package google - -import ( - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeSharedFlowDeployment() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeSharedflowDeploymentCreate, - Read: resourceApigeeSharedflowDeploymentRead, - Delete: resourceApigeeSharedflowDeploymentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeSharedflowDeploymentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "environment": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment.`, - }, - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee instance`, - }, - "revision": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Revision of the Sharedflow to be deployed.`, - }, - "service_account": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: `The service account represents the identity of the deployed proxy, and determines what permissions it has. The format must be {ACCOUNT_ID}@{PROJECT}.iam.gserviceaccount.com.`, - }, - "sharedflow_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Id of the Sharedflow to be deployed.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeSharedflowDeploymentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments?override=true&serviceAccount={{service_account}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new SharedflowDeployment at %s", url) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, nil, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating SharedflowDeployment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating SharedflowDeployment %q: %#v", d.Id(), res) - - return resourceApigeeSharedflowDeploymentRead(d, meta) -} - -func resourceApigeeSharedflowDeploymentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - log.Printf("[DEBUG] Reading SharedflowDeployment at %s", url) - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeSharedflowDeployment %q", d.Id())) - } - log.Printf("[DEBUG] ApigeeSharedflowDeployment deployStartTime %s", res["deployStartTime"]) - - return nil -} - -func resourceApigeeSharedflowDeploymentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting SharedflowDeployment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SharedflowDeployment") - } - - log.Printf("[DEBUG] Finished deleting SharedflowDeployment %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeSharedflowDeploymentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "organizations/(?P[^/]+)/environments/(?P[^/]+)/sharedflows/(?P[^/]+)/revisions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeSharedflowDeploymentOrgId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeSharedflowDeploymentEnvironment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeSharedflowDeploymentSharedflowId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeSharedflowDeploymentRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeSharedflowDeploymentServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sync_authorization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sync_authorization.go deleted file mode 100644 index 7251a132f9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sync_authorization.go +++ /dev/null @@ -1,248 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceApigeeSyncAuthorization() *schema.Resource { - return &schema.Resource{ - Create: resourceApigeeSyncAuthorizationCreate, - Read: resourceApigeeSyncAuthorizationRead, - Update: resourceApigeeSyncAuthorizationUpdate, - Delete: resourceApigeeSyncAuthorizationDelete, - - Importer: &schema.ResourceImporter{ - State: resourceApigeeSyncAuthorizationImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "identities": { - Type: schema.TypeList, - Required: true, - Description: `Array of service accounts to grant access to control plane resources, each specified using the following format: 'serviceAccount:service-account-name'. - -The 'service-account-name' is formatted like an email address. For example: my-synchronizer-manager-serviceAccount@my_project_id.iam.gserviceaccount.com - -You might specify multiple service accounts, for example, if you have multiple environments and wish to assign a unique service account to each one. - -The service accounts must have **Apigee Synchronizer Manager** role. See also [Create service accounts](https://cloud.google.com/apigee/docs/hybrid/v1.8/sa-about#create-the-service-accounts).`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the Apigee organization.`, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `Entity tag (ETag) used for optimistic concurrency control as a way to help prevent simultaneous updates from overwriting each other. -Used internally during updates.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeSyncAuthorizationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - identitiesProp, err := expandApigeeSyncAuthorizationIdentities(d.Get("identities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("identities"); ok || !reflect.DeepEqual(v, identitiesProp) { - obj["identities"] = identitiesProp - } - etagProp, err := expandApigeeSyncAuthorizationEtag(d.Get("etag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("etag"); !isEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { - obj["etag"] = etagProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}:setSyncAuthorization") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new SyncAuthorization: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating SyncAuthorization: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "organizations/{{name}}/syncAuthorization") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating SyncAuthorization %q: %#v", d.Id(), res) - - return resourceApigeeSyncAuthorizationRead(d, meta) -} - -func resourceApigeeSyncAuthorizationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}:getSyncAuthorization") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeSyncAuthorization %q", d.Id())) - } - - if err := d.Set("identities", flattenApigeeSyncAuthorizationIdentities(res["identities"], d, config)); err != nil { - return fmt.Errorf("Error reading SyncAuthorization: %s", err) - } - if err := d.Set("etag", flattenApigeeSyncAuthorizationEtag(res["etag"], d, config)); err != nil { - return fmt.Errorf("Error reading SyncAuthorization: %s", err) - } - - return nil -} - -func resourceApigeeSyncAuthorizationUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - identitiesProp, err := expandApigeeSyncAuthorizationIdentities(d.Get("identities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("identities"); ok || !reflect.DeepEqual(v, identitiesProp) { - obj["identities"] = identitiesProp - } - etagProp, err := expandApigeeSyncAuthorizationEtag(d.Get("etag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("etag"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { - obj["etag"] = etagProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}:setSyncAuthorization") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating SyncAuthorization %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating SyncAuthorization %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating SyncAuthorization %q: %#v", d.Id(), res) - } - - return resourceApigeeSyncAuthorizationRead(d, meta) -} - -func resourceApigeeSyncAuthorizationDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] Apigee SyncAuthorization resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceApigeeSyncAuthorizationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "organizations/(?P[^/]+)/syncAuthorization", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "organizations/{{name}}/syncAuthorization") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenApigeeSyncAuthorizationIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeSyncAuthorizationEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeSyncAuthorizationIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeSyncAuthorizationEtag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_application_url_dispatch_rules.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_application_url_dispatch_rules.go deleted file mode 100644 index c44a3626f3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_application_url_dispatch_rules.go +++ /dev/null @@ -1,401 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAppEngineApplicationUrlDispatchRules() *schema.Resource { - return &schema.Resource{ - Create: resourceAppEngineApplicationUrlDispatchRulesCreate, - Read: resourceAppEngineApplicationUrlDispatchRulesRead, - Update: resourceAppEngineApplicationUrlDispatchRulesUpdate, - Delete: resourceAppEngineApplicationUrlDispatchRulesDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAppEngineApplicationUrlDispatchRulesImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dispatch_rules": { - Type: schema.TypeList, - Required: true, - Description: `Rules to match an HTTP request and dispatch that request to a service.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: `Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. -The sum of the lengths of the domain and path may not exceed 100 characters.`, - }, - "service": { - Type: schema.TypeString, - Required: true, - Description: `Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. -The sum of the lengths of the domain and path may not exceed 100 characters.`, - }, - "domain": { - Type: schema.TypeString, - Optional: true, - Description: `Domain name to match against. The wildcard "*" is supported if specified before a period: "*.". -Defaults to matching all domains: "*".`, - Default: "*", - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineApplicationUrlDispatchRulesCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - dispatchRulesProp, err := expandAppEngineApplicationUrlDispatchRulesDispatchRules(d.Get("dispatch_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dispatch_rules"); !isEmptyValue(reflect.ValueOf(dispatchRulesProp)) && (ok || !reflect.DeepEqual(v, dispatchRulesProp)) { - obj["dispatchRules"] = dispatchRulesProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ApplicationUrlDispatchRules: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isAppEngineRetryableError) - if err != nil { - return fmt.Errorf("Error creating ApplicationUrlDispatchRules: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = AppEngineOperationWaitTime( - config, res, project, "Creating ApplicationUrlDispatchRules", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create ApplicationUrlDispatchRules: %s", err) - } - - log.Printf("[DEBUG] Finished creating ApplicationUrlDispatchRules %q: %#v", d.Id(), res) - - return resourceAppEngineApplicationUrlDispatchRulesRead(d, meta) -} - -func resourceAppEngineApplicationUrlDispatchRulesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AppEngineApplicationUrlDispatchRules %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ApplicationUrlDispatchRules: %s", err) - } - - if err := d.Set("dispatch_rules", flattenAppEngineApplicationUrlDispatchRulesDispatchRules(res["dispatchRules"], d, config)); err != nil { - return fmt.Errorf("Error reading ApplicationUrlDispatchRules: %s", err) - } - - return nil -} - -func resourceAppEngineApplicationUrlDispatchRulesUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - dispatchRulesProp, err := expandAppEngineApplicationUrlDispatchRulesDispatchRules(d.Get("dispatch_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dispatch_rules"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dispatchRulesProp)) { - obj["dispatchRules"] = dispatchRulesProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ApplicationUrlDispatchRules %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isAppEngineRetryableError) - - if err != nil { - return fmt.Errorf("Error updating ApplicationUrlDispatchRules %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ApplicationUrlDispatchRules %q: %#v", d.Id(), res) - } - - err = AppEngineOperationWaitTime( - config, res, project, "Updating ApplicationUrlDispatchRules", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineApplicationUrlDispatchRulesRead(d, meta) -} - -func resourceAppEngineApplicationUrlDispatchRulesDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ApplicationUrlDispatchRules %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, "ApplicationUrlDispatchRules") - } - - err = AppEngineOperationWaitTime( - config, res, project, "Deleting ApplicationUrlDispatchRules", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting ApplicationUrlDispatchRules %q: %#v", d.Id(), res) - return nil -} - -func resourceAppEngineApplicationUrlDispatchRulesImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAppEngineApplicationUrlDispatchRulesDispatchRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "domain": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(original["domain"], d, config), - "path": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesPath(original["path"], d, config), - "service": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesService(original["service"], d, config), - }) - } - return transformed -} -func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAppEngineApplicationUrlDispatchRulesDispatchRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDomain, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(original["domain"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !isEmptyValue(val) { - transformed["domain"] = transformedDomain - } - - transformedPath, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedService, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineApplicationUrlDispatchRulesDispatchRulesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineApplicationUrlDispatchRulesDispatchRulesService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_domain_mapping.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_domain_mapping.go deleted file mode 100644 index 2653dadeb1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_domain_mapping.go +++ /dev/null @@ -1,552 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func sslSettingsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // If certificate id is empty, and ssl management type is `MANUAL`, then - // ssl settings will not be configured, and ssl_settings block is not returned - - if k == "ssl_settings.#" && - old == "0" && new == "1" && - d.Get("ssl_settings.0.certificate_id") == "" && - d.Get("ssl_settings.0.ssl_management_type") == "MANUAL" { - return true - } - - return false -} - -func ResourceAppEngineDomainMapping() *schema.Resource { - return &schema.Resource{ - Create: resourceAppEngineDomainMappingCreate, - Read: resourceAppEngineDomainMappingRead, - Update: resourceAppEngineDomainMappingUpdate, - Delete: resourceAppEngineDomainMappingDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAppEngineDomainMappingImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "domain_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Relative name of the domain serving the application. Example: example.com.`, - }, - "override_strategy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"STRICT", "OVERRIDE", ""}), - Description: `Whether the domain creation should override any existing mappings for this domain. -By default, overrides are rejected. Default value: "STRICT" Possible values: ["STRICT", "OVERRIDE"]`, - Default: "STRICT", - }, - "ssl_settings": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: sslSettingsDiffSuppress, - Description: `SSL configuration for this domain. If unconfigured, this domain will not serve with SSL.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ssl_management_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"AUTOMATIC", "MANUAL"}), - Description: `SSL management type for this domain. If 'AUTOMATIC', a managed certificate is automatically provisioned. -If 'MANUAL', 'certificateId' must be manually specified in order to configure SSL for this domain. Possible values: ["AUTOMATIC", "MANUAL"]`, - }, - "certificate_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `ID of the AuthorizedCertificate resource configuring SSL for the application. Clearing this field will -remove SSL support. -By default, a managed certificate is automatically created for every domain mapping. To omit SSL support -or to configure SSL manually, specify 'SslManagementType.MANUAL' on a 'CREATE' or 'UPDATE' request. You must be -authorized to administer the 'AuthorizedCertificate' resource to manually map it to a DomainMapping resource. -Example: 12345.`, - }, - "pending_managed_certificate_id": { - Type: schema.TypeString, - Computed: true, - Description: `ID of the managed 'AuthorizedCertificate' resource currently being provisioned, if applicable. Until the new -managed certificate has been successfully provisioned, the previous SSL state will be preserved. Once the -provisioning process completes, the 'certificateId' field will reflect the new managed certificate and this -field will be left empty. To remove SSL support while there is still a pending managed certificate, clear the -'certificateId' field with an update request.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Full path to the DomainMapping resource in the API. Example: apps/myapp/domainMapping/example.com.`, - }, - "resource_records": { - Type: schema.TypeList, - Computed: true, - Description: `The resource records required to configure this domain mapping. These records must be added to the domain's DNS -configuration in order to serve the application via this domain mapping.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Relative name of the object affected by this record. Only applicable for CNAME records. Example: 'www'.`, - }, - "rrdata": { - Type: schema.TypeString, - Optional: true, - Description: `Data for this record. Values vary by record type, as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1).`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"A", "AAAA", "CNAME", ""}), - Description: `Resource record type. Example: 'AAAA'. Possible values: ["A", "AAAA", "CNAME"]`, - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineDomainMappingCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - sslSettingsProp, err := expandAppEngineDomainMappingSslSettings(d.Get("ssl_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_settings"); !isEmptyValue(reflect.ValueOf(sslSettingsProp)) && (ok || !reflect.DeepEqual(v, sslSettingsProp)) { - obj["sslSettings"] = sslSettingsProp - } - idProp, err := expandAppEngineDomainMappingDomainName(d.Get("domain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("domain_name"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DomainMapping: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DomainMapping: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = AppEngineOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating DomainMapping", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create DomainMapping: %s", err) - } - - if err := d.Set("name", flattenAppEngineDomainMappingName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DomainMapping %q: %#v", d.Id(), res) - - return resourceAppEngineDomainMappingRead(d, meta) -} - -func resourceAppEngineDomainMappingRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AppEngineDomainMapping %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - - if err := d.Set("name", flattenAppEngineDomainMappingName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("ssl_settings", flattenAppEngineDomainMappingSslSettings(res["sslSettings"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("resource_records", flattenAppEngineDomainMappingResourceRecords(res["resourceRecords"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("domain_name", flattenAppEngineDomainMappingDomainName(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - - return nil -} - -func resourceAppEngineDomainMappingUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - sslSettingsProp, err := expandAppEngineDomainMappingSslSettings(d.Get("ssl_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslSettingsProp)) { - obj["sslSettings"] = sslSettingsProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DomainMapping %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("ssl_settings") { - updateMask = append(updateMask, "ssl_settings.certificate_id", - "ssl_settings.ssl_management_type") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DomainMapping %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DomainMapping %q: %#v", d.Id(), res) - } - - err = AppEngineOperationWaitTime( - config, res, project, "Updating DomainMapping", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineDomainMappingRead(d, meta) -} - -func resourceAppEngineDomainMappingDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DomainMapping %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DomainMapping") - } - - err = AppEngineOperationWaitTime( - config, res, project, "Deleting DomainMapping", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting DomainMapping %q: %#v", d.Id(), res) - return nil -} - -func resourceAppEngineDomainMappingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/domainMappings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAppEngineDomainMappingName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingSslSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["certificate_id"] = - flattenAppEngineDomainMappingSslSettingsCertificateId(original["certificateId"], d, config) - transformed["ssl_management_type"] = - flattenAppEngineDomainMappingSslSettingsSslManagementType(original["sslManagementType"], d, config) - transformed["pending_managed_certificate_id"] = - flattenAppEngineDomainMappingSslSettingsPendingManagedCertificateId(original["pendingManagedCertificateId"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineDomainMappingSslSettingsCertificateId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingSslSettingsSslManagementType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingSslSettingsPendingManagedCertificateId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingResourceRecords(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenAppEngineDomainMappingResourceRecordsName(original["name"], d, config), - "rrdata": flattenAppEngineDomainMappingResourceRecordsRrdata(original["rrdata"], d, config), - "type": flattenAppEngineDomainMappingResourceRecordsType(original["type"], d, config), - }) - } - return transformed -} -func flattenAppEngineDomainMappingResourceRecordsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingResourceRecordsRrdata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingResourceRecordsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingDomainName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAppEngineDomainMappingSslSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCertificateId, err := expandAppEngineDomainMappingSslSettingsCertificateId(original["certificate_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCertificateId); val.IsValid() && !isEmptyValue(val) { - transformed["certificateId"] = transformedCertificateId - } - - transformedSslManagementType, err := expandAppEngineDomainMappingSslSettingsSslManagementType(original["ssl_management_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSslManagementType); val.IsValid() && !isEmptyValue(val) { - transformed["sslManagementType"] = transformedSslManagementType - } - - transformedPendingManagedCertificateId, err := expandAppEngineDomainMappingSslSettingsPendingManagedCertificateId(original["pending_managed_certificate_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPendingManagedCertificateId); val.IsValid() && !isEmptyValue(val) { - transformed["pendingManagedCertificateId"] = transformedPendingManagedCertificateId - } - - return transformed, nil -} - -func expandAppEngineDomainMappingSslSettingsCertificateId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineDomainMappingSslSettingsSslManagementType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineDomainMappingSslSettingsPendingManagedCertificateId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineDomainMappingDomainName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_firewall_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_firewall_rule.go deleted file mode 100644 index 7bcd55e798..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_firewall_rule.go +++ /dev/null @@ -1,449 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAppEngineFirewallRule() *schema.Resource { - return &schema.Resource{ - Create: resourceAppEngineFirewallRuleCreate, - Read: resourceAppEngineFirewallRuleRead, - Update: resourceAppEngineFirewallRuleUpdate, - Delete: resourceAppEngineFirewallRuleDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAppEngineFirewallRuleImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "action": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"UNSPECIFIED_ACTION", "ALLOW", "DENY"}), - Description: `The action to take if this rule matches. Possible values: ["UNSPECIFIED_ACTION", "ALLOW", "DENY"]`, - }, - "source_range": { - Type: schema.TypeString, - Required: true, - Description: `IP address or range, defined using CIDR notation, of requests that this rule applies to.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional string description of this rule.`, - }, - "priority": { - Type: schema.TypeInt, - Optional: true, - Description: `A positive integer that defines the order of rule evaluation. -Rules with the lowest priority are evaluated first. - -A default rule at priority Int32.MaxValue matches all IPv4 and -IPv6 traffic when no previous rule matches. Only the action of -this rule can be modified by the user.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineFirewallRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandAppEngineFirewallRuleDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sourceRangeProp, err := expandAppEngineFirewallRuleSourceRange(d.Get("source_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_range"); !isEmptyValue(reflect.ValueOf(sourceRangeProp)) && (ok || !reflect.DeepEqual(v, sourceRangeProp)) { - obj["sourceRange"] = sourceRangeProp - } - actionProp, err := expandAppEngineFirewallRuleAction(d.Get("action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("action"); !isEmptyValue(reflect.ValueOf(actionProp)) && (ok || !reflect.DeepEqual(v, actionProp)) { - obj["action"] = actionProp - } - priorityProp, err := expandAppEngineFirewallRulePriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new FirewallRule: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating FirewallRule: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceAppEngineFirewallRulePollRead(d, meta), PollCheckForExistence, "Creating FirewallRule", d.Timeout(schema.TimeoutCreate), 1) - if err != nil { - return fmt.Errorf("Error waiting to create FirewallRule: %s", err) - } - - log.Printf("[DEBUG] Finished creating FirewallRule %q: %#v", d.Id(), res) - - return resourceAppEngineFirewallRuleRead(d, meta) -} - -func resourceAppEngineFirewallRulePollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceAppEngineFirewallRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AppEngineFirewallRule %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading FirewallRule: %s", err) - } - - if err := d.Set("description", flattenAppEngineFirewallRuleDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading FirewallRule: %s", err) - } - if err := d.Set("source_range", flattenAppEngineFirewallRuleSourceRange(res["sourceRange"], d, config)); err != nil { - return fmt.Errorf("Error reading FirewallRule: %s", err) - } - if err := d.Set("action", flattenAppEngineFirewallRuleAction(res["action"], d, config)); err != nil { - return fmt.Errorf("Error reading FirewallRule: %s", err) - } - if err := d.Set("priority", flattenAppEngineFirewallRulePriority(res["priority"], d, config)); err != nil { - return fmt.Errorf("Error reading FirewallRule: %s", err) - } - - return nil -} - -func resourceAppEngineFirewallRuleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandAppEngineFirewallRuleDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sourceRangeProp, err := expandAppEngineFirewallRuleSourceRange(d.Get("source_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_range"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceRangeProp)) { - obj["sourceRange"] = sourceRangeProp - } - actionProp, err := expandAppEngineFirewallRuleAction(d.Get("action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("action"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, actionProp)) { - obj["action"] = actionProp - } - priorityProp, err := expandAppEngineFirewallRulePriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating FirewallRule %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("source_range") { - updateMask = append(updateMask, "sourceRange") - } - - if d.HasChange("action") { - updateMask = append(updateMask, "action") - } - - if d.HasChange("priority") { - updateMask = append(updateMask, "priority") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating FirewallRule %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating FirewallRule %q: %#v", d.Id(), res) - } - - return resourceAppEngineFirewallRuleRead(d, meta) -} - -func resourceAppEngineFirewallRuleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting FirewallRule %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "FirewallRule") - } - - log.Printf("[DEBUG] Finished deleting FirewallRule %q: %#v", d.Id(), res) - return nil -} - -func resourceAppEngineFirewallRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/firewall/ingressRules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAppEngineFirewallRuleDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFirewallRuleSourceRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFirewallRuleAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFirewallRulePriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandAppEngineFirewallRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFirewallRuleSourceRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFirewallRuleAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFirewallRulePriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_service_network_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_service_network_settings.go deleted file mode 100644 index 8799d950aa..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_service_network_settings.go +++ /dev/null @@ -1,353 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAppEngineServiceNetworkSettings() *schema.Resource { - return &schema.Resource{ - Create: resourceAppEngineServiceNetworkSettingsCreate, - Read: resourceAppEngineServiceNetworkSettingsRead, - Update: resourceAppEngineServiceNetworkSettingsUpdate, - Delete: resourceAppEngineServiceNetworkSettingsDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAppEngineServiceNetworkSettingsImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "network_settings": { - Type: schema.TypeList, - Required: true, - Description: `Ingress settings for this service. Will apply to all versions.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ingress_traffic_allowed": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", "INGRESS_TRAFFIC_ALLOWED_ALL", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB", ""}), - Description: `The ingress settings for version or service. Default value: "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED" Possible values: ["INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", "INGRESS_TRAFFIC_ALLOWED_ALL", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB"]`, - Default: "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", - }, - }, - }, - }, - "service": { - Type: schema.TypeString, - Required: true, - Description: `The name of the service these settings apply to.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineServiceNetworkSettingsCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineServiceNetworkSettingsService(d.Get("service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - networkSettingsProp, err := expandAppEngineServiceNetworkSettingsNetworkSettings(d.Get("network_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_settings"); !isEmptyValue(reflect.ValueOf(networkSettingsProp)) && (ok || !reflect.DeepEqual(v, networkSettingsProp)) { - obj["networkSettings"] = networkSettingsProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?updateMask=networkSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ServiceNetworkSettings: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ServiceNetworkSettings: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = AppEngineOperationWaitTime( - config, res, project, "Creating ServiceNetworkSettings", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create ServiceNetworkSettings: %s", err) - } - - log.Printf("[DEBUG] Finished creating ServiceNetworkSettings %q: %#v", d.Id(), res) - - return resourceAppEngineServiceNetworkSettingsRead(d, meta) -} - -func resourceAppEngineServiceNetworkSettingsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AppEngineServiceNetworkSettings %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) - } - - if err := d.Set("service", flattenAppEngineServiceNetworkSettingsService(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) - } - if err := d.Set("network_settings", flattenAppEngineServiceNetworkSettingsNetworkSettings(res["networkSettings"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) - } - - return nil -} - -func resourceAppEngineServiceNetworkSettingsUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineServiceNetworkSettingsService(d.Get("service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - networkSettingsProp, err := expandAppEngineServiceNetworkSettingsNetworkSettings(d.Get("network_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkSettingsProp)) { - obj["networkSettings"] = networkSettingsProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ServiceNetworkSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("service") { - updateMask = append(updateMask, "id") - } - - if d.HasChange("network_settings") { - updateMask = append(updateMask, "networkSettings") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ServiceNetworkSettings %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ServiceNetworkSettings %q: %#v", d.Id(), res) - } - - err = AppEngineOperationWaitTime( - config, res, project, "Updating ServiceNetworkSettings", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineServiceNetworkSettingsRead(d, meta) -} - -func resourceAppEngineServiceNetworkSettingsDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] AppEngine ServiceNetworkSettings resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceAppEngineServiceNetworkSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAppEngineServiceNetworkSettingsService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineServiceNetworkSettingsNetworkSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ingress_traffic_allowed"] = - flattenAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(original["ingressTrafficAllowed"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAppEngineServiceNetworkSettingsService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineServiceNetworkSettingsNetworkSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIngressTrafficAllowed, err := expandAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(original["ingress_traffic_allowed"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIngressTrafficAllowed); val.IsValid() && !isEmptyValue(val) { - transformed["ingressTrafficAllowed"] = transformedIngressTrafficAllowed - } - - return transformed, nil -} - -func expandAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_service_split_traffic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_service_split_traffic.go deleted file mode 100644 index b34dbae57d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_service_split_traffic.go +++ /dev/null @@ -1,361 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAppEngineServiceSplitTraffic() *schema.Resource { - return &schema.Resource{ - Create: resourceAppEngineServiceSplitTrafficCreate, - Read: resourceAppEngineServiceSplitTrafficRead, - Update: resourceAppEngineServiceSplitTrafficUpdate, - Delete: resourceAppEngineServiceSplitTrafficDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAppEngineServiceSplitTrafficImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "service": { - Type: schema.TypeString, - Required: true, - Description: `The name of the service these settings apply to.`, - }, - "split": { - Type: schema.TypeList, - Required: true, - Description: `Mapping that defines fractional HTTP traffic diversion to different versions within the service.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allocations": { - Type: schema.TypeMap, - Required: true, - Description: `Mapping from version IDs within the service to fractional (0.000, 1] allocations of traffic for that version. Each version can be specified only once, but some versions in the service may not have any traffic allocation. Services that have traffic allocated cannot be deleted until either the service is deleted or their traffic allocation is removed. Allocations must sum to 1. Up to two decimal place precision is supported for IP-based splits and up to three decimal places is supported for cookie-based splits.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "shard_by": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"UNSPECIFIED", "COOKIE", "IP", "RANDOM", ""}), - Description: `Mechanism used to determine which version a request is sent to. The traffic selection algorithm will be stable for either type until allocations are changed. Possible values: ["UNSPECIFIED", "COOKIE", "IP", "RANDOM"]`, - }, - }, - }, - }, - "migrate_traffic": { - Type: schema.TypeBool, - Optional: true, - Description: `If set to true traffic will be migrated to this version.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineServiceSplitTrafficCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineServiceSplitTrafficService(d.Get("service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - splitProp, err := expandAppEngineServiceSplitTrafficSplit(d.Get("split"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("split"); !isEmptyValue(reflect.ValueOf(splitProp)) && (ok || !reflect.DeepEqual(v, splitProp)) { - obj["split"] = splitProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?migrateTraffic={{migrate_traffic}}&updateMask=split") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ServiceSplitTraffic: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ServiceSplitTraffic: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = AppEngineOperationWaitTime( - config, res, project, "Creating ServiceSplitTraffic", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create ServiceSplitTraffic: %s", err) - } - - log.Printf("[DEBUG] Finished creating ServiceSplitTraffic %q: %#v", d.Id(), res) - - return resourceAppEngineServiceSplitTrafficRead(d, meta) -} - -func resourceAppEngineServiceSplitTrafficRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AppEngineServiceSplitTraffic %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ServiceSplitTraffic: %s", err) - } - - if err := d.Set("service", flattenAppEngineServiceSplitTrafficService(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceSplitTraffic: %s", err) - } - - return nil -} - -func resourceAppEngineServiceSplitTrafficUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineServiceSplitTrafficService(d.Get("service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - splitProp, err := expandAppEngineServiceSplitTrafficSplit(d.Get("split"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("split"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, splitProp)) { - obj["split"] = splitProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?migrateTraffic={{migrate_traffic}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ServiceSplitTraffic %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("service") { - updateMask = append(updateMask, "id") - } - - if d.HasChange("split") { - updateMask = append(updateMask, "split") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ServiceSplitTraffic %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ServiceSplitTraffic %q: %#v", d.Id(), res) - } - - err = AppEngineOperationWaitTime( - config, res, project, "Updating ServiceSplitTraffic", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineServiceSplitTrafficRead(d, meta) -} - -func resourceAppEngineServiceSplitTrafficDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] AppEngine ServiceSplitTraffic resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceAppEngineServiceSplitTrafficImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAppEngineServiceSplitTrafficService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAppEngineServiceSplitTrafficService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineServiceSplitTrafficSplit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedShardBy, err := expandAppEngineServiceSplitTrafficSplitShardBy(original["shard_by"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedShardBy); val.IsValid() && !isEmptyValue(val) { - transformed["shardBy"] = transformedShardBy - } - - transformedAllocations, err := expandAppEngineServiceSplitTrafficSplitAllocations(original["allocations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllocations); val.IsValid() && !isEmptyValue(val) { - transformed["allocations"] = transformedAllocations - } - - return transformed, nil -} - -func expandAppEngineServiceSplitTrafficSplitShardBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineServiceSplitTrafficSplitAllocations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_standard_app_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_standard_app_version.go deleted file mode 100644 index f388cb7092..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_standard_app_version.go +++ /dev/null @@ -1,2009 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAppEngineStandardAppVersion() *schema.Resource { - return &schema.Resource{ - Create: resourceAppEngineStandardAppVersionCreate, - Read: resourceAppEngineStandardAppVersionRead, - Update: resourceAppEngineStandardAppVersionUpdate, - Delete: resourceAppEngineStandardAppVersionDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAppEngineStandardAppVersionImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "deployment": { - Type: schema.TypeList, - Required: true, - Description: `Code and application artifacts that make up this version.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "files": { - Type: schema.TypeSet, - Optional: true, - Description: `Manifest of the files stored in Google Cloud Storage that are included as part of this version. -All files must be readable using the credentials supplied with this call.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "source_url": { - Type: schema.TypeString, - Required: true, - Description: `Source URL`, - }, - "sha1_sum": { - Type: schema.TypeString, - Optional: true, - Description: `SHA1 checksum of the file`, - }, - }, - }, - AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files"}, - }, - "zip": { - Type: schema.TypeList, - Optional: true, - Description: `Zip File`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_url": { - Type: schema.TypeString, - Required: true, - Description: `Source URL`, - }, - "files_count": { - Type: schema.TypeInt, - Optional: true, - Description: `files count`, - }, - }, - }, - AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files"}, - }, - }, - }, - }, - "entrypoint": { - Type: schema.TypeList, - Required: true, - Description: `The entrypoint for the application.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "shell": { - Type: schema.TypeString, - Required: true, - Description: `The format should be a shell command that can be fed to bash -c.`, - }, - }, - }, - }, - "runtime": { - Type: schema.TypeString, - Required: true, - Description: `Desired runtime. Example python27.`, - }, - "service": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `AppEngine service resource`, - }, - "app_engine_apis": { - Type: schema.TypeBool, - Optional: true, - Description: `Allows App Engine second generation runtimes to access the legacy bundled services.`, - }, - "automatic_scaling": { - Type: schema.TypeList, - Optional: true, - Description: `Automatic scaling is based on request rate, response latencies, and other application metrics.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_concurrent_requests": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance. - -Defaults to a runtime-specific value.`, - }, - "max_idle_instances": { - Type: schema.TypeInt, - Optional: true, - Description: `Maximum number of idle instances that should be maintained for this version.`, - }, - "max_pending_latency": { - Type: schema.TypeString, - Optional: true, - Description: `Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "min_idle_instances": { - Type: schema.TypeInt, - Optional: true, - Description: `Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service.`, - }, - "min_pending_latency": { - Type: schema.TypeString, - Optional: true, - Description: `Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "standard_scheduler_settings": { - Type: schema.TypeList, - Optional: true, - Description: `Scheduler settings for standard environment.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_instances": { - Type: schema.TypeInt, - Optional: true, - Description: `Maximum number of instances to run for this version. Set to zero to disable maxInstances configuration.`, - }, - "min_instances": { - Type: schema.TypeInt, - Optional: true, - Description: `Minimum number of instances to run for this version. Set to zero to disable minInstances configuration.`, - }, - "target_cpu_utilization": { - Type: schema.TypeFloat, - Optional: true, - Description: `Target CPU utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value.`, - }, - "target_throughput_utilization": { - Type: schema.TypeFloat, - Optional: true, - Description: `Target throughput utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value.`, - }, - }, - }, - }, - }, - }, - ConflictsWith: []string{"basic_scaling", "manual_scaling"}, - }, - "basic_scaling": { - Type: schema.TypeList, - Optional: true, - Description: `Basic scaling creates instances when your application receives requests. Each instance will be shut down when the application becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_instances": { - Type: schema.TypeInt, - Required: true, - Description: `Maximum number of instances to create for this version. Must be in the range [1.0, 200.0].`, - }, - "idle_timeout": { - Type: schema.TypeString, - Optional: true, - Description: `Duration of time after the last request that an instance must wait before the instance is shut down. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s.`, - Default: "900s", - }, - }, - }, - ConflictsWith: []string{"automatic_scaling", "manual_scaling"}, - }, - "env_variables": { - Type: schema.TypeMap, - Optional: true, - Description: `Environment variables available to the application.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "handlers": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `An ordered list of URL-matching patterns that should be applied to incoming requests. -The first matching URL handles the request and other request handlers are not attempted.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "auth_fail_action": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}), - Description: `Actions to take when the user is not logged in. Possible values: ["AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED"]`, - }, - "login": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}), - Description: `Methods to restrict access to a URL based on login status. Possible values: ["LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED"]`, - }, - "redirect_http_response_code": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307", ""}), - Description: `30x code to use when performing redirects for the secure field. Possible values: ["REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307"]`, - }, - "script": { - Type: schema.TypeList, - Optional: true, - Description: `Executes a script to handle the requests that match this URL pattern. -Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto".`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "script_path": { - Type: schema.TypeString, - Required: true, - Description: `Path to the script from the application root directory.`, - }, - }, - }, - }, - "security_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}), - Description: `Security (HTTPS) enforcement for this URL. Possible values: ["SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS"]`, - }, - "static_files": { - Type: schema.TypeList, - Optional: true, - Description: `Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. Static file handlers describe which files in the application directory are static files, and which URLs serve them.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "application_readable": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether files should also be uploaded as code data. By default, files declared in static file handlers are uploaded as -static data and are only served to end users; they cannot be read by the application. If enabled, uploads are charged -against both your code and static data storage resource quotas.`, - }, - "expiration": { - Type: schema.TypeString, - Optional: true, - Description: `Time a static file served by this handler should be cached by web proxies and browsers. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example "3.5s".`, - }, - "http_headers": { - Type: schema.TypeMap, - Optional: true, - Description: `HTTP headers to use for all responses from these URLs. -An object containing a list of "key:value" value pairs.".`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "mime_type": { - Type: schema.TypeString, - Optional: true, - Description: `MIME type used to serve all files served by this handler. -Defaults to file-specific MIME types, which are derived from each file's filename extension.`, - }, - "path": { - Type: schema.TypeString, - Optional: true, - Description: `Path to the static files matched by the URL pattern, from the application root directory. The path can refer to text matched in groupings in the URL pattern.`, - }, - "require_matching_file": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether this handler should match the request if the file referenced by the handler does not exist.`, - }, - "upload_path_regex": { - Type: schema.TypeString, - Optional: true, - Description: `Regular expression that matches the file paths for all files that should be referenced by this handler.`, - }, - }, - }, - }, - "url_regex": { - Type: schema.TypeString, - Optional: true, - Description: `URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. -All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path.`, - }, - }, - }, - }, - "inbound_services": { - Type: schema.TypeSet, - Optional: true, - Description: `A list of the types of messages that this application is able to receive. Possible values: ["INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"}), - }, - Set: schema.HashString, - }, - "instance_class": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Instance class that is used to run this version. Valid values are -AutomaticScaling: F1, F2, F4, F4_1G -BasicScaling or ManualScaling: B1, B2, B4, B4_1G, B8 -Defaults to F1 for AutomaticScaling and B2 for ManualScaling and BasicScaling. If no scaling is specified, AutomaticScaling is chosen.`, - }, - "libraries": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for third-party Python runtime libraries that are required by the application.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the library. Example "django".`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `Version of the library to select, or "latest".`, - }, - }, - }, - }, - "manual_scaling": { - Type: schema.TypeList, - Optional: true, - Description: `A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instances": { - Type: schema.TypeInt, - Required: true, - Description: `Number of instances to assign to the service at the start. - -**Note:** When managing the number of instances at runtime through the App Engine Admin API or the (now deprecated) Python 2 -Modules API set_num_instances() you must use 'lifecycle.ignore_changes = ["manual_scaling"[0].instances]' to prevent drift detection.`, - }, - }, - }, - ConflictsWith: []string{"automatic_scaling", "basic_scaling"}, - }, - "runtime_api_version": { - Type: schema.TypeString, - Optional: true, - Description: `The version of the API in the given runtime environment. -Please see the app.yaml reference for valid values at 'https://cloud.google.com/appengine/docs/standard//config/appref'\ -Substitute '' with 'python', 'java', 'php', 'ruby', 'go' or 'nodejs'.`, - }, - "service_account": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The identity that the deployed version will run as. Admin API will use the App Engine Appspot service account as default if this field is neither provided in app.yaml file nor through CLI flag.`, - }, - "threadsafe": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether multiple requests can be dispatched to this version at once.`, - }, - "version_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Relative name of the version within the service. For example, 'v1'. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names,"default", "latest", and any name with the prefix "ah-".`, - }, - "vpc_access_connector": { - Type: schema.TypeList, - Optional: true, - Description: `Enables VPC connectivity for standard apps.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1.`, - }, - "egress_setting": { - Type: schema.TypeString, - Optional: true, - Description: `The egress setting for the connector, controlling what traffic is diverted through it.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Full path to the Version resource in the API. Example, "v1".`, - }, - "noop_on_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `If set to 'true', the application version will not be deleted.`, - }, - "delete_service_on_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `If set to 'true', the service will be deleted if it is the last version.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineStandardAppVersionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineStandardAppVersionVersionId(d.Get("version_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_id"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - runtimeProp, err := expandAppEngineStandardAppVersionRuntime(d.Get("runtime"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime"); !isEmptyValue(reflect.ValueOf(runtimeProp)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { - obj["runtime"] = runtimeProp - } - serviceAccountProp, err := expandAppEngineStandardAppVersionServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - threadsafeProp, err := expandAppEngineStandardAppVersionThreadsafe(d.Get("threadsafe"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("threadsafe"); !isEmptyValue(reflect.ValueOf(threadsafeProp)) && (ok || !reflect.DeepEqual(v, threadsafeProp)) { - obj["threadsafe"] = threadsafeProp - } - appEngineApisProp, err := expandAppEngineStandardAppVersionAppEngineApis(d.Get("app_engine_apis"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_apis"); !isEmptyValue(reflect.ValueOf(appEngineApisProp)) && (ok || !reflect.DeepEqual(v, appEngineApisProp)) { - obj["appEngineApis"] = appEngineApisProp - } - runtimeApiVersionProp, err := expandAppEngineStandardAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_api_version"); !isEmptyValue(reflect.ValueOf(runtimeApiVersionProp)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { - obj["runtimeApiVersion"] = runtimeApiVersionProp - } - handlersProp, err := expandAppEngineStandardAppVersionHandlers(d.Get("handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("handlers"); !isEmptyValue(reflect.ValueOf(handlersProp)) && (ok || !reflect.DeepEqual(v, handlersProp)) { - obj["handlers"] = handlersProp - } - librariesProp, err := expandAppEngineStandardAppVersionLibraries(d.Get("libraries"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("libraries"); !isEmptyValue(reflect.ValueOf(librariesProp)) && (ok || !reflect.DeepEqual(v, librariesProp)) { - obj["libraries"] = librariesProp - } - envVariablesProp, err := expandAppEngineStandardAppVersionEnvVariables(d.Get("env_variables"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("env_variables"); !isEmptyValue(reflect.ValueOf(envVariablesProp)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { - obj["envVariables"] = envVariablesProp - } - deploymentProp, err := expandAppEngineStandardAppVersionDeployment(d.Get("deployment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deployment"); !isEmptyValue(reflect.ValueOf(deploymentProp)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { - obj["deployment"] = deploymentProp - } - entrypointProp, err := expandAppEngineStandardAppVersionEntrypoint(d.Get("entrypoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entrypoint"); !isEmptyValue(reflect.ValueOf(entrypointProp)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { - obj["entrypoint"] = entrypointProp - } - vpcAccessConnectorProp, err := expandAppEngineStandardAppVersionVPCAccessConnector(d.Get("vpc_access_connector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpc_access_connector"); !isEmptyValue(reflect.ValueOf(vpcAccessConnectorProp)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { - obj["vpcAccessConnector"] = vpcAccessConnectorProp - } - inboundServicesProp, err := expandAppEngineStandardAppVersionInboundServices(d.Get("inbound_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inbound_services"); !isEmptyValue(reflect.ValueOf(inboundServicesProp)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { - obj["inboundServices"] = inboundServicesProp - } - instanceClassProp, err := expandAppEngineStandardAppVersionInstanceClass(d.Get("instance_class"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_class"); !isEmptyValue(reflect.ValueOf(instanceClassProp)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { - obj["instanceClass"] = instanceClassProp - } - automaticScalingProp, err := expandAppEngineStandardAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("automatic_scaling"); !isEmptyValue(reflect.ValueOf(automaticScalingProp)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { - obj["automaticScaling"] = automaticScalingProp - } - basicScalingProp, err := expandAppEngineStandardAppVersionBasicScaling(d.Get("basic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic_scaling"); !isEmptyValue(reflect.ValueOf(basicScalingProp)) && (ok || !reflect.DeepEqual(v, basicScalingProp)) { - obj["basicScaling"] = basicScalingProp - } - manualScalingProp, err := expandAppEngineStandardAppVersionManualScaling(d.Get("manual_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("manual_scaling"); !isEmptyValue(reflect.ValueOf(manualScalingProp)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { - obj["manualScaling"] = manualScalingProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new StandardAppVersion: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isAppEngineRetryableError) - if err != nil { - return fmt.Errorf("Error creating StandardAppVersion: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = AppEngineOperationWaitTime( - config, res, project, "Creating StandardAppVersion", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create StandardAppVersion: %s", err) - } - - log.Printf("[DEBUG] Finished creating StandardAppVersion %q: %#v", d.Id(), res) - - return resourceAppEngineStandardAppVersionRead(d, meta) -} - -func resourceAppEngineStandardAppVersionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}?view=FULL") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AppEngineStandardAppVersion %q", d.Id())) - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("noop_on_destroy"); !ok { - if err := d.Set("noop_on_destroy", false); err != nil { - return fmt.Errorf("Error setting noop_on_destroy: %s", err) - } - } - if _, ok := d.GetOkExists("delete_service_on_destroy"); !ok { - if err := d.Set("delete_service_on_destroy", false); err != nil { - return fmt.Errorf("Error setting delete_service_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - - if err := d.Set("name", flattenAppEngineStandardAppVersionName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("version_id", flattenAppEngineStandardAppVersionVersionId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("runtime", flattenAppEngineStandardAppVersionRuntime(res["runtime"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("service_account", flattenAppEngineStandardAppVersionServiceAccount(res["serviceAccount"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("app_engine_apis", flattenAppEngineStandardAppVersionAppEngineApis(res["appEngineApis"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("runtime_api_version", flattenAppEngineStandardAppVersionRuntimeApiVersion(res["runtimeApiVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("handlers", flattenAppEngineStandardAppVersionHandlers(res["handlers"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("libraries", flattenAppEngineStandardAppVersionLibraries(res["libraries"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("vpc_access_connector", flattenAppEngineStandardAppVersionVPCAccessConnector(res["vpcAccessConnector"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("inbound_services", flattenAppEngineStandardAppVersionInboundServices(res["inboundServices"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("instance_class", flattenAppEngineStandardAppVersionInstanceClass(res["instanceClass"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("automatic_scaling", flattenAppEngineStandardAppVersionAutomaticScaling(res["automaticScaling"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("basic_scaling", flattenAppEngineStandardAppVersionBasicScaling(res["basicScaling"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("manual_scaling", flattenAppEngineStandardAppVersionManualScaling(res["manualScaling"], d, config)); err != nil { - return fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - - return nil -} - -func resourceAppEngineStandardAppVersionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineStandardAppVersionVersionId(d.Get("version_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - runtimeProp, err := expandAppEngineStandardAppVersionRuntime(d.Get("runtime"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { - obj["runtime"] = runtimeProp - } - serviceAccountProp, err := expandAppEngineStandardAppVersionServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - threadsafeProp, err := expandAppEngineStandardAppVersionThreadsafe(d.Get("threadsafe"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("threadsafe"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, threadsafeProp)) { - obj["threadsafe"] = threadsafeProp - } - appEngineApisProp, err := expandAppEngineStandardAppVersionAppEngineApis(d.Get("app_engine_apis"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_apis"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, appEngineApisProp)) { - obj["appEngineApis"] = appEngineApisProp - } - runtimeApiVersionProp, err := expandAppEngineStandardAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_api_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { - obj["runtimeApiVersion"] = runtimeApiVersionProp - } - handlersProp, err := expandAppEngineStandardAppVersionHandlers(d.Get("handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("handlers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, handlersProp)) { - obj["handlers"] = handlersProp - } - librariesProp, err := expandAppEngineStandardAppVersionLibraries(d.Get("libraries"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("libraries"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, librariesProp)) { - obj["libraries"] = librariesProp - } - envVariablesProp, err := expandAppEngineStandardAppVersionEnvVariables(d.Get("env_variables"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("env_variables"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { - obj["envVariables"] = envVariablesProp - } - deploymentProp, err := expandAppEngineStandardAppVersionDeployment(d.Get("deployment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deployment"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { - obj["deployment"] = deploymentProp - } - entrypointProp, err := expandAppEngineStandardAppVersionEntrypoint(d.Get("entrypoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entrypoint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { - obj["entrypoint"] = entrypointProp - } - vpcAccessConnectorProp, err := expandAppEngineStandardAppVersionVPCAccessConnector(d.Get("vpc_access_connector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpc_access_connector"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { - obj["vpcAccessConnector"] = vpcAccessConnectorProp - } - inboundServicesProp, err := expandAppEngineStandardAppVersionInboundServices(d.Get("inbound_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inbound_services"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { - obj["inboundServices"] = inboundServicesProp - } - instanceClassProp, err := expandAppEngineStandardAppVersionInstanceClass(d.Get("instance_class"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_class"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { - obj["instanceClass"] = instanceClassProp - } - automaticScalingProp, err := expandAppEngineStandardAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("automatic_scaling"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { - obj["automaticScaling"] = automaticScalingProp - } - basicScalingProp, err := expandAppEngineStandardAppVersionBasicScaling(d.Get("basic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic_scaling"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, basicScalingProp)) { - obj["basicScaling"] = basicScalingProp - } - manualScalingProp, err := expandAppEngineStandardAppVersionManualScaling(d.Get("manual_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("manual_scaling"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { - obj["manualScaling"] = manualScalingProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating StandardAppVersion %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isAppEngineRetryableError) - - if err != nil { - return fmt.Errorf("Error updating StandardAppVersion %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating StandardAppVersion %q: %#v", d.Id(), res) - } - - err = AppEngineOperationWaitTime( - config, res, project, "Updating StandardAppVersion", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineStandardAppVersionRead(d, meta) -} - -func resourceAppEngineStandardAppVersionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - if d.Get("noop_on_destroy") == true { - log.Printf("[DEBUG] Keeping the AppVersion %q", d.Id()) - return nil - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - if d.Get("delete_service_on_destroy") == true { - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Service %q", d.Id()) - res, err := SendRequestWithTimeout(config, "DELETE", project, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - err = AppEngineOperationWaitTime( - config, res, project, "Deleting Service", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil - } else { - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return err - } - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AppVersion %q", d.Id()) - res, err := SendRequestWithTimeout(config, "DELETE", project, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, "AppVersion") - } - err = AppEngineOperationWaitTime( - config, res, project, "Deleting AppVersion", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - log.Printf("[DEBUG] Finished deleting AppVersion %q: %#v", d.Id(), res) - return nil - - } -} - -func resourceAppEngineStandardAppVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("noop_on_destroy", false); err != nil { - return nil, fmt.Errorf("Error setting noop_on_destroy: %s", err) - } - if err := d.Set("delete_service_on_destroy", false); err != nil { - return nil, fmt.Errorf("Error setting delete_service_on_destroy: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenAppEngineStandardAppVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionVersionId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionRuntime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAppEngineApis(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionRuntimeApiVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "url_regex": flattenAppEngineStandardAppVersionHandlersUrlRegex(original["urlRegex"], d, config), - "security_level": flattenAppEngineStandardAppVersionHandlersSecurityLevel(original["securityLevel"], d, config), - "login": flattenAppEngineStandardAppVersionHandlersLogin(original["login"], d, config), - "auth_fail_action": flattenAppEngineStandardAppVersionHandlersAuthFailAction(original["authFailAction"], d, config), - "redirect_http_response_code": flattenAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(original["redirectHttpResponseCode"], d, config), - "script": flattenAppEngineStandardAppVersionHandlersScript(original["script"], d, config), - "static_files": flattenAppEngineStandardAppVersionHandlersStaticFiles(original["staticFiles"], d, config), - }) - } - return transformed -} -func flattenAppEngineStandardAppVersionHandlersUrlRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersSecurityLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersLogin(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersAuthFailAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersScript(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["script_path"] = - flattenAppEngineStandardAppVersionHandlersScriptScriptPath(original["scriptPath"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineStandardAppVersionHandlersScriptScriptPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFiles(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesPath(original["path"], d, config) - transformed["upload_path_regex"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(original["uploadPathRegex"], d, config) - transformed["http_headers"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(original["httpHeaders"], d, config) - transformed["mime_type"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesMimeType(original["mimeType"], d, config) - transformed["expiration"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) - transformed["require_matching_file"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(original["requireMatchingFile"], d, config) - transformed["application_readable"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(original["applicationReadable"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineStandardAppVersionHandlersStaticFilesPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesMimeType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesExpiration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionLibraries(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenAppEngineStandardAppVersionLibrariesName(original["name"], d, config), - "version": flattenAppEngineStandardAppVersionLibrariesVersion(original["version"], d, config), - }) - } - return transformed -} -func flattenAppEngineStandardAppVersionLibrariesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionLibrariesVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionVPCAccessConnector(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenAppEngineStandardAppVersionVPCAccessConnectorName(original["name"], d, config) - transformed["egress_setting"] = - flattenAppEngineStandardAppVersionVPCAccessConnectorEgressSetting(original["egressSetting"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineStandardAppVersionVPCAccessConnectorName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionVPCAccessConnectorEgressSetting(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionInboundServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenAppEngineStandardAppVersionInstanceClass(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScaling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_concurrent_requests"] = - flattenAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(original["maxConcurrentRequests"], d, config) - transformed["max_idle_instances"] = - flattenAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(original["maxIdleInstances"], d, config) - transformed["max_pending_latency"] = - flattenAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(original["maxPendingLatency"], d, config) - transformed["min_idle_instances"] = - flattenAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(original["minIdleInstances"], d, config) - transformed["min_pending_latency"] = - flattenAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(original["minPendingLatency"], d, config) - transformed["standard_scheduler_settings"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(original["standardSchedulerSettings"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_cpu_utilization"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(original["targetCpuUtilization"], d, config) - transformed["target_throughput_utilization"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(original["targetThroughputUtilization"], d, config) - transformed["min_instances"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(original["minInstances"], d, config) - transformed["max_instances"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(original["maxInstances"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAppEngineStandardAppVersionBasicScaling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["idle_timeout"] = - flattenAppEngineStandardAppVersionBasicScalingIdleTimeout(original["idleTimeout"], d, config) - transformed["max_instances"] = - flattenAppEngineStandardAppVersionBasicScalingMaxInstances(original["maxInstances"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineStandardAppVersionBasicScalingIdleTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionBasicScalingMaxInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenAppEngineStandardAppVersionManualScaling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["instances"] = - flattenAppEngineStandardAppVersionManualScalingInstances(original["instances"], d, config) - return []interface{}{transformed} -} -func flattenAppEngineStandardAppVersionManualScalingInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandAppEngineStandardAppVersionVersionId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionRuntime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionThreadsafe(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAppEngineApis(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionRuntimeApiVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrlRegex, err := expandAppEngineStandardAppVersionHandlersUrlRegex(original["url_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrlRegex); val.IsValid() && !isEmptyValue(val) { - transformed["urlRegex"] = transformedUrlRegex - } - - transformedSecurityLevel, err := expandAppEngineStandardAppVersionHandlersSecurityLevel(original["security_level"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !isEmptyValue(val) { - transformed["securityLevel"] = transformedSecurityLevel - } - - transformedLogin, err := expandAppEngineStandardAppVersionHandlersLogin(original["login"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLogin); val.IsValid() && !isEmptyValue(val) { - transformed["login"] = transformedLogin - } - - transformedAuthFailAction, err := expandAppEngineStandardAppVersionHandlersAuthFailAction(original["auth_fail_action"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !isEmptyValue(val) { - transformed["authFailAction"] = transformedAuthFailAction - } - - transformedRedirectHttpResponseCode, err := expandAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(original["redirect_http_response_code"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRedirectHttpResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectHttpResponseCode"] = transformedRedirectHttpResponseCode - } - - transformedScript, err := expandAppEngineStandardAppVersionHandlersScript(original["script"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !isEmptyValue(val) { - transformed["script"] = transformedScript - } - - transformedStaticFiles, err := expandAppEngineStandardAppVersionHandlersStaticFiles(original["static_files"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStaticFiles); val.IsValid() && !isEmptyValue(val) { - transformed["staticFiles"] = transformedStaticFiles - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAppEngineStandardAppVersionHandlersUrlRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersSecurityLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersLogin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersAuthFailAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedScriptPath, err := expandAppEngineStandardAppVersionHandlersScriptScriptPath(original["script_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedScriptPath); val.IsValid() && !isEmptyValue(val) { - transformed["scriptPath"] = transformedScriptPath - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionHandlersScriptScriptPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFiles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandAppEngineStandardAppVersionHandlersStaticFilesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedUploadPathRegex, err := expandAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(original["upload_path_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUploadPathRegex); val.IsValid() && !isEmptyValue(val) { - transformed["uploadPathRegex"] = transformedUploadPathRegex - } - - transformedHttpHeaders, err := expandAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(original["http_headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["httpHeaders"] = transformedHttpHeaders - } - - transformedMimeType, err := expandAppEngineStandardAppVersionHandlersStaticFilesMimeType(original["mime_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMimeType); val.IsValid() && !isEmptyValue(val) { - transformed["mimeType"] = transformedMimeType - } - - transformedExpiration, err := expandAppEngineStandardAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpiration); val.IsValid() && !isEmptyValue(val) { - transformed["expiration"] = transformedExpiration - } - - transformedRequireMatchingFile, err := expandAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(original["require_matching_file"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequireMatchingFile); val.IsValid() && !isEmptyValue(val) { - transformed["requireMatchingFile"] = transformedRequireMatchingFile - } - - transformedApplicationReadable, err := expandAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(original["application_readable"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedApplicationReadable); val.IsValid() && !isEmptyValue(val) { - transformed["applicationReadable"] = transformedApplicationReadable - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesMimeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesExpiration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionLibraries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAppEngineStandardAppVersionLibrariesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedVersion, err := expandAppEngineStandardAppVersionLibrariesVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAppEngineStandardAppVersionLibrariesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionLibrariesVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionEnvVariables(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAppEngineStandardAppVersionDeployment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedZip, err := expandAppEngineStandardAppVersionDeploymentZip(original["zip"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedZip); val.IsValid() && !isEmptyValue(val) { - transformed["zip"] = transformedZip - } - - transformedFiles, err := expandAppEngineStandardAppVersionDeploymentFiles(original["files"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFiles); val.IsValid() && !isEmptyValue(val) { - transformed["files"] = transformedFiles - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionDeploymentZip(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSourceUrl, err := expandAppEngineStandardAppVersionDeploymentZipSourceUrl(original["source_url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSourceUrl); val.IsValid() && !isEmptyValue(val) { - transformed["sourceUrl"] = transformedSourceUrl - } - - transformedFilesCount, err := expandAppEngineStandardAppVersionDeploymentZipFilesCount(original["files_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilesCount); val.IsValid() && !isEmptyValue(val) { - transformed["filesCount"] = transformedFilesCount - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionDeploymentZipSourceUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionDeploymentZipFilesCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionDeploymentFiles(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSha1Sum, err := expandAppEngineStandardAppVersionDeploymentFilesSha1Sum(original["sha1_sum"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSha1Sum); val.IsValid() && !isEmptyValue(val) { - transformed["sha1Sum"] = transformedSha1Sum - } - - transformedSourceUrl, err := expandAppEngineStandardAppVersionDeploymentFilesSourceUrl(original["source_url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSourceUrl); val.IsValid() && !isEmptyValue(val) { - transformed["sourceUrl"] = transformedSourceUrl - } - - transformedName, err := expandString(original["name"], d, config) - if err != nil { - return nil, err - } - m[transformedName] = transformed - } - return m, nil -} - -func expandAppEngineStandardAppVersionDeploymentFilesSha1Sum(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionDeploymentFilesSourceUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionEntrypoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedShell, err := expandAppEngineStandardAppVersionEntrypointShell(original["shell"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedShell); val.IsValid() && !isEmptyValue(val) { - transformed["shell"] = transformedShell - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionEntrypointShell(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionVPCAccessConnector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAppEngineStandardAppVersionVPCAccessConnectorName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedEgressSetting, err := expandAppEngineStandardAppVersionVPCAccessConnectorEgressSetting(original["egress_setting"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEgressSetting); val.IsValid() && !isEmptyValue(val) { - transformed["egressSetting"] = transformedEgressSetting - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionVPCAccessConnectorName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionVPCAccessConnectorEgressSetting(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionInboundServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandAppEngineStandardAppVersionInstanceClass(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxConcurrentRequests, err := expandAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(original["max_concurrent_requests"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxConcurrentRequests); val.IsValid() && !isEmptyValue(val) { - transformed["maxConcurrentRequests"] = transformedMaxConcurrentRequests - } - - transformedMaxIdleInstances, err := expandAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(original["max_idle_instances"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxIdleInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxIdleInstances"] = transformedMaxIdleInstances - } - - transformedMaxPendingLatency, err := expandAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(original["max_pending_latency"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxPendingLatency); val.IsValid() && !isEmptyValue(val) { - transformed["maxPendingLatency"] = transformedMaxPendingLatency - } - - transformedMinIdleInstances, err := expandAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(original["min_idle_instances"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinIdleInstances); val.IsValid() && !isEmptyValue(val) { - transformed["minIdleInstances"] = transformedMinIdleInstances - } - - transformedMinPendingLatency, err := expandAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(original["min_pending_latency"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinPendingLatency); val.IsValid() && !isEmptyValue(val) { - transformed["minPendingLatency"] = transformedMinPendingLatency - } - - transformedStandardSchedulerSettings, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(original["standard_scheduler_settings"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStandardSchedulerSettings); val.IsValid() && !isEmptyValue(val) { - transformed["standardSchedulerSettings"] = transformedStandardSchedulerSettings - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetCpuUtilization, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(original["target_cpu_utilization"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTargetCpuUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["targetCpuUtilization"] = transformedTargetCpuUtilization - } - - transformedTargetThroughputUtilization, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(original["target_throughput_utilization"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTargetThroughputUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["targetThroughputUtilization"] = transformedTargetThroughputUtilization - } - - transformedMinInstances, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(original["min_instances"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !isEmptyValue(val) { - transformed["minInstances"] = transformedMinInstances - } - - transformedMaxInstances, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(original["max_instances"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxInstances"] = transformedMaxInstances - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionBasicScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdleTimeout, err := expandAppEngineStandardAppVersionBasicScalingIdleTimeout(original["idle_timeout"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdleTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["idleTimeout"] = transformedIdleTimeout - } - - transformedMaxInstances, err := expandAppEngineStandardAppVersionBasicScalingMaxInstances(original["max_instances"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxInstances"] = transformedMaxInstances - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionBasicScalingIdleTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionBasicScalingMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionManualScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInstances, err := expandAppEngineStandardAppVersionManualScalingInstances(original["instances"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { - transformed["instances"] = transformedInstances - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionManualScalingInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_artifact_registry_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_artifact_registry_repository.go deleted file mode 100644 index a708430e81..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_artifact_registry_repository.go +++ /dev/null @@ -1,560 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceArtifactRegistryRepository() *schema.Resource { - return &schema.Resource{ - Create: resourceArtifactRegistryRepositoryCreate, - Read: resourceArtifactRegistryRepositoryRead, - Update: resourceArtifactRegistryRepositoryUpdate, - Delete: resourceArtifactRegistryRepositoryDelete, - - Importer: &schema.ResourceImporter{ - State: resourceArtifactRegistryRepositoryImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "format": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareCaseInsensitive, - Description: `The format of packages that are stored in the repository. Supported formats -can be found [here](https://cloud.google.com/artifact-registry/docs/supported-formats). -You can only create alpha formats if you are a member of the -[alpha user group](https://cloud.google.com/artifact-registry/docs/supported-formats#alpha-access).`, - }, - "repository_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The last part of the repository name, for example: -"repo1"`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `The user-provided description of the repository.`, - }, - "kms_key_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Cloud KMS resource name of the customer managed encryption key that’s -used to encrypt the contents of the Repository. Has the form: -'projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key'. -This value may not be changed after the Repository has been created.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels with user-defined metadata. -This field may contain up to 64 entries. Label keys and values may be no -longer than 63 characters. Label keys must begin with a lowercase letter -and may only contain lowercase letters, numeric characters, underscores, -and dashes.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The name of the location this repository is located in.`, - }, - "maven_config": { - Type: schema.TypeList, - Optional: true, - Description: `MavenRepositoryConfig is maven related repository details. -Provides additional configuration details for repositories of the maven -format type.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_snapshot_overwrites": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The repository with this flag will allow publishing the same -snapshot versions.`, - }, - "version_policy": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"VERSION_POLICY_UNSPECIFIED", "RELEASE", "SNAPSHOT", ""}), - Description: `Version policy defines the versions that the registry will accept. Default value: "VERSION_POLICY_UNSPECIFIED" Possible values: ["VERSION_POLICY_UNSPECIFIED", "RELEASE", "SNAPSHOT"]`, - Default: "VERSION_POLICY_UNSPECIFIED", - }, - }, - }, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the repository was created.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the repository, for example: -"repo1"`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the repository was last updated.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceArtifactRegistryRepositoryCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - formatProp, err := expandArtifactRegistryRepositoryFormat(d.Get("format"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("format"); !isEmptyValue(reflect.ValueOf(formatProp)) && (ok || !reflect.DeepEqual(v, formatProp)) { - obj["format"] = formatProp - } - descriptionProp, err := expandArtifactRegistryRepositoryDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandArtifactRegistryRepositoryLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - kmsKeyNameProp, err := expandArtifactRegistryRepositoryKmsKeyName(d.Get("kms_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kms_key_name"); !isEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { - obj["kmsKeyName"] = kmsKeyNameProp - } - mavenConfigProp, err := expandArtifactRegistryRepositoryMavenConfig(d.Get("maven_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maven_config"); !isEmptyValue(reflect.ValueOf(mavenConfigProp)) && (ok || !reflect.DeepEqual(v, mavenConfigProp)) { - obj["mavenConfig"] = mavenConfigProp - } - - url, err := replaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories?repository_id={{repository_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Repository: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Repository: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = ArtifactRegistryOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Repository", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Repository: %s", err) - } - - if err := d.Set("name", flattenArtifactRegistryRepositoryName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Repository %q: %#v", d.Id(), res) - - return resourceArtifactRegistryRepositoryRead(d, meta) -} - -func resourceArtifactRegistryRepositoryRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ArtifactRegistryRepository %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - - if err := d.Set("name", flattenArtifactRegistryRepositoryName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("format", flattenArtifactRegistryRepositoryFormat(res["format"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("description", flattenArtifactRegistryRepositoryDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("labels", flattenArtifactRegistryRepositoryLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("kms_key_name", flattenArtifactRegistryRepositoryKmsKeyName(res["kmsKeyName"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("create_time", flattenArtifactRegistryRepositoryCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("update_time", flattenArtifactRegistryRepositoryUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("maven_config", flattenArtifactRegistryRepositoryMavenConfig(res["mavenConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - - return nil -} - -func resourceArtifactRegistryRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandArtifactRegistryRepositoryDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandArtifactRegistryRepositoryLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - mavenConfigProp, err := expandArtifactRegistryRepositoryMavenConfig(d.Get("maven_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maven_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mavenConfigProp)) { - obj["mavenConfig"] = mavenConfigProp - } - - url, err := replaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Repository %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("maven_config") { - updateMask = append(updateMask, "mavenConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Repository %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Repository %q: %#v", d.Id(), res) - } - - return resourceArtifactRegistryRepositoryRead(d, meta) -} - -func resourceArtifactRegistryRepositoryDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Repository %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Repository") - } - - err = ArtifactRegistryOperationWaitTime( - config, res, project, "Deleting Repository", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Repository %q: %#v", d.Id(), res) - return nil -} - -func resourceArtifactRegistryRepositoryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/repositories/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenArtifactRegistryRepositoryName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenArtifactRegistryRepositoryFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenArtifactRegistryRepositoryDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenArtifactRegistryRepositoryLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenArtifactRegistryRepositoryKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenArtifactRegistryRepositoryCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenArtifactRegistryRepositoryUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenArtifactRegistryRepositoryMavenConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_snapshot_overwrites"] = - flattenArtifactRegistryRepositoryMavenConfigAllowSnapshotOverwrites(original["allowSnapshotOverwrites"], d, config) - transformed["version_policy"] = - flattenArtifactRegistryRepositoryMavenConfigVersionPolicy(original["versionPolicy"], d, config) - return []interface{}{transformed} -} -func flattenArtifactRegistryRepositoryMavenConfigAllowSnapshotOverwrites(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenArtifactRegistryRepositoryMavenConfigVersionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandArtifactRegistryRepositoryFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandArtifactRegistryRepositoryDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandArtifactRegistryRepositoryLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandArtifactRegistryRepositoryKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandArtifactRegistryRepositoryMavenConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowSnapshotOverwrites, err := expandArtifactRegistryRepositoryMavenConfigAllowSnapshotOverwrites(original["allow_snapshot_overwrites"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowSnapshotOverwrites); val.IsValid() && !isEmptyValue(val) { - transformed["allowSnapshotOverwrites"] = transformedAllowSnapshotOverwrites - } - - transformedVersionPolicy, err := expandArtifactRegistryRepositoryMavenConfigVersionPolicy(original["version_policy"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersionPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["versionPolicy"] = transformedVersionPolicy - } - - return transformed, nil -} - -func expandArtifactRegistryRepositoryMavenConfigAllowSnapshotOverwrites(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandArtifactRegistryRepositoryMavenConfigVersionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_connection.go deleted file mode 100644 index cd17c99df9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_connection.go +++ /dev/null @@ -1,687 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBeyondcorpAppConnection() *schema.Resource { - return &schema.Resource{ - Create: resourceBeyondcorpAppConnectionCreate, - Read: resourceBeyondcorpAppConnectionRead, - Update: resourceBeyondcorpAppConnectionUpdate, - Delete: resourceBeyondcorpAppConnectionDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBeyondcorpAppConnectionImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "application_endpoint": { - Type: schema.TypeList, - Required: true, - Description: `Address of the remote application endpoint for the BeyondCorp AppConnection.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host": { - Type: schema.TypeString, - Required: true, - Description: `Hostname or IP address of the remote application endpoint.`, - }, - "port": { - Type: schema.TypeInt, - Required: true, - Description: `Port of the remote application endpoint.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the AppConnection.`, - }, - "connectors": { - Type: schema.TypeList, - Optional: true, - Description: `List of AppConnectors that are authorised to be associated with this AppConnection`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `An arbitrary user-provided name for the AppConnection.`, - }, - "gateway": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Gateway used by the AppConnection.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "app_gateway": { - Type: schema.TypeString, - Required: true, - Description: `AppGateway name in following format: projects/{project_id}/locations/{locationId}/appgateways/{gateway_id}.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: `The type of hosting used by the gateway. Refer to -https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#Type_1 -for a list of possible values.`, - }, - "ingress_port": { - Type: schema.TypeInt, - Computed: true, - Description: `Ingress port reserved on the gateways for this AppConnection, if not specified or zero, the default port is 19443.`, - }, - "uri": { - Type: schema.TypeString, - Computed: true, - Description: `Server-defined URI for this resource.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user provided metadata.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The region of the AppConnection.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The type of network connectivity used by the AppConnection. Refer to -https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#type -for a list of possible values.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBeyondcorpAppConnectionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandBeyondcorpAppConnectionDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandBeyondcorpAppConnectionLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - typeProp, err := expandBeyondcorpAppConnectionType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - applicationEndpointProp, err := expandBeyondcorpAppConnectionApplicationEndpoint(d.Get("application_endpoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("application_endpoint"); !isEmptyValue(reflect.ValueOf(applicationEndpointProp)) && (ok || !reflect.DeepEqual(v, applicationEndpointProp)) { - obj["applicationEndpoint"] = applicationEndpointProp - } - connectorsProp, err := expandBeyondcorpAppConnectionConnectors(d.Get("connectors"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connectors"); !isEmptyValue(reflect.ValueOf(connectorsProp)) && (ok || !reflect.DeepEqual(v, connectorsProp)) { - obj["connectors"] = connectorsProp - } - gatewayProp, err := expandBeyondcorpAppConnectionGateway(d.Get("gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gateway"); !isEmptyValue(reflect.ValueOf(gatewayProp)) && (ok || !reflect.DeepEqual(v, gatewayProp)) { - obj["gateway"] = gatewayProp - } - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnections?app_connection_id={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AppConnection: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppConnection: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AppConnection: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnections/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = BeyondcorpOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating AppConnection", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create AppConnection: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnections/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating AppConnection %q: %#v", d.Id(), res) - - return resourceBeyondcorpAppConnectionRead(d, meta) -} - -func resourceBeyondcorpAppConnectionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnections/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppConnection: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BeyondcorpAppConnection %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading AppConnection: %s", err) - } - - if err := d.Set("display_name", flattenBeyondcorpAppConnectionDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnection: %s", err) - } - if err := d.Set("labels", flattenBeyondcorpAppConnectionLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnection: %s", err) - } - if err := d.Set("type", flattenBeyondcorpAppConnectionType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnection: %s", err) - } - if err := d.Set("application_endpoint", flattenBeyondcorpAppConnectionApplicationEndpoint(res["applicationEndpoint"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnection: %s", err) - } - if err := d.Set("connectors", flattenBeyondcorpAppConnectionConnectors(res["connectors"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnection: %s", err) - } - if err := d.Set("gateway", flattenBeyondcorpAppConnectionGateway(res["gateway"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnection: %s", err) - } - - return nil -} - -func resourceBeyondcorpAppConnectionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppConnection: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandBeyondcorpAppConnectionDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandBeyondcorpAppConnectionLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - applicationEndpointProp, err := expandBeyondcorpAppConnectionApplicationEndpoint(d.Get("application_endpoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("application_endpoint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, applicationEndpointProp)) { - obj["applicationEndpoint"] = applicationEndpointProp - } - connectorsProp, err := expandBeyondcorpAppConnectionConnectors(d.Get("connectors"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connectors"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, connectorsProp)) { - obj["connectors"] = connectorsProp - } - gatewayProp, err := expandBeyondcorpAppConnectionGateway(d.Get("gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gateway"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gatewayProp)) { - obj["gateway"] = gatewayProp - } - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnections/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AppConnection %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("application_endpoint") { - updateMask = append(updateMask, "applicationEndpoint") - } - - if d.HasChange("connectors") { - updateMask = append(updateMask, "connectors") - } - - if d.HasChange("gateway") { - updateMask = append(updateMask, "gateway") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating AppConnection %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AppConnection %q: %#v", d.Id(), res) - } - - err = BeyondcorpOperationWaitTime( - config, res, project, "Updating AppConnection", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceBeyondcorpAppConnectionRead(d, meta) -} - -func resourceBeyondcorpAppConnectionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppConnection: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnections/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AppConnection %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AppConnection") - } - - err = BeyondcorpOperationWaitTime( - config, res, project, "Deleting AppConnection", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting AppConnection %q: %#v", d.Id(), res) - return nil -} - -func resourceBeyondcorpAppConnectionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/appConnections/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnections/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBeyondcorpAppConnectionDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectionLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectionType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectionApplicationEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenBeyondcorpAppConnectionApplicationEndpointHost(original["host"], d, config) - transformed["port"] = - flattenBeyondcorpAppConnectionApplicationEndpointPort(original["port"], d, config) - return []interface{}{transformed} -} -func flattenBeyondcorpAppConnectionApplicationEndpointHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectionApplicationEndpointPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBeyondcorpAppConnectionConnectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectionGateway(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["app_gateway"] = - flattenBeyondcorpAppConnectionGatewayAppGateway(original["appGateway"], d, config) - transformed["type"] = - flattenBeyondcorpAppConnectionGatewayType(original["type"], d, config) - transformed["uri"] = - flattenBeyondcorpAppConnectionGatewayUri(original["uri"], d, config) - transformed["ingress_port"] = - flattenBeyondcorpAppConnectionGatewayIngressPort(original["ingressPort"], d, config) - return []interface{}{transformed} -} -func flattenBeyondcorpAppConnectionGatewayAppGateway(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectionGatewayType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectionGatewayUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectionGatewayIngressPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandBeyondcorpAppConnectionDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectionLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandBeyondcorpAppConnectionType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectionApplicationEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHost, err := expandBeyondcorpAppConnectionApplicationEndpointHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedPort, err := expandBeyondcorpAppConnectionApplicationEndpointPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - return transformed, nil -} - -func expandBeyondcorpAppConnectionApplicationEndpointHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectionApplicationEndpointPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectionConnectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectionGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAppGateway, err := expandBeyondcorpAppConnectionGatewayAppGateway(original["app_gateway"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAppGateway); val.IsValid() && !isEmptyValue(val) { - transformed["appGateway"] = transformedAppGateway - } - - transformedType, err := expandBeyondcorpAppConnectionGatewayType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedUri, err := expandBeyondcorpAppConnectionGatewayUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedIngressPort, err := expandBeyondcorpAppConnectionGatewayIngressPort(original["ingress_port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIngressPort); val.IsValid() && !isEmptyValue(val) { - transformed["ingressPort"] = transformedIngressPort - } - - return transformed, nil -} - -func expandBeyondcorpAppConnectionGatewayAppGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectionGatewayType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectionGatewayUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectionGatewayIngressPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_connector.go deleted file mode 100644 index 2eb4a915cf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_connector.go +++ /dev/null @@ -1,490 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBeyondcorpAppConnector() *schema.Resource { - return &schema.Resource{ - Create: resourceBeyondcorpAppConnectorCreate, - Read: resourceBeyondcorpAppConnectorRead, - Update: resourceBeyondcorpAppConnectorUpdate, - Delete: resourceBeyondcorpAppConnectorDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBeyondcorpAppConnectorImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the AppConnector.`, - }, - "principal_info": { - Type: schema.TypeList, - Required: true, - Description: `Principal information about the Identity of the AppConnector.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "service_account": { - Type: schema.TypeList, - Required: true, - Description: `ServiceAccount represents a GCP service account.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "email": { - Type: schema.TypeString, - Required: true, - Description: `Email address of the service account.`, - }, - }, - }, - }, - }, - }, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `An arbitrary user-provided name for the AppConnector.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user provided metadata.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The region of the AppConnector.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Represents the different states of a AppConnector.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBeyondcorpAppConnectorCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandBeyondcorpAppConnectorDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandBeyondcorpAppConnectorLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - principalInfoProp, err := expandBeyondcorpAppConnectorPrincipalInfo(d.Get("principal_info"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("principal_info"); !isEmptyValue(reflect.ValueOf(principalInfoProp)) && (ok || !reflect.DeepEqual(v, principalInfoProp)) { - obj["principalInfo"] = principalInfoProp - } - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnectors?app_connector_id={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AppConnector: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppConnector: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AppConnector: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = BeyondcorpOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating AppConnector", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create AppConnector: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating AppConnector %q: %#v", d.Id(), res) - - return resourceBeyondcorpAppConnectorRead(d, meta) -} - -func resourceBeyondcorpAppConnectorRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppConnector: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BeyondcorpAppConnector %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading AppConnector: %s", err) - } - - if err := d.Set("display_name", flattenBeyondcorpAppConnectorDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnector: %s", err) - } - if err := d.Set("labels", flattenBeyondcorpAppConnectorLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnector: %s", err) - } - if err := d.Set("principal_info", flattenBeyondcorpAppConnectorPrincipalInfo(res["principalInfo"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnector: %s", err) - } - if err := d.Set("state", flattenBeyondcorpAppConnectorState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading AppConnector: %s", err) - } - - return nil -} - -func resourceBeyondcorpAppConnectorUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppConnector: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandBeyondcorpAppConnectorDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandBeyondcorpAppConnectorLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - principalInfoProp, err := expandBeyondcorpAppConnectorPrincipalInfo(d.Get("principal_info"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("principal_info"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, principalInfoProp)) { - obj["principalInfo"] = principalInfoProp - } - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AppConnector %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("principal_info") { - updateMask = append(updateMask, "principalInfo") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating AppConnector %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AppConnector %q: %#v", d.Id(), res) - } - - err = BeyondcorpOperationWaitTime( - config, res, project, "Updating AppConnector", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceBeyondcorpAppConnectorRead(d, meta) -} - -func resourceBeyondcorpAppConnectorDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppConnector: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AppConnector %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AppConnector") - } - - err = BeyondcorpOperationWaitTime( - config, res, project, "Deleting AppConnector", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting AppConnector %q: %#v", d.Id(), res) - return nil -} - -func resourceBeyondcorpAppConnectorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/appConnectors/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBeyondcorpAppConnectorDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectorLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectorPrincipalInfo(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service_account"] = - flattenBeyondcorpAppConnectorPrincipalInfoServiceAccount(original["serviceAccount"], d, config) - return []interface{}{transformed} -} -func flattenBeyondcorpAppConnectorPrincipalInfoServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["email"] = - flattenBeyondcorpAppConnectorPrincipalInfoServiceAccountEmail(original["email"], d, config) - return []interface{}{transformed} -} -func flattenBeyondcorpAppConnectorPrincipalInfoServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppConnectorState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBeyondcorpAppConnectorDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppConnectorLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandBeyondcorpAppConnectorPrincipalInfo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceAccount, err := expandBeyondcorpAppConnectorPrincipalInfoServiceAccount(original["service_account"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccount"] = transformedServiceAccount - } - - return transformed, nil -} - -func expandBeyondcorpAppConnectorPrincipalInfoServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEmail, err := expandBeyondcorpAppConnectorPrincipalInfoServiceAccountEmail(original["email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !isEmptyValue(val) { - transformed["email"] = transformedEmail - } - - return transformed, nil -} - -func expandBeyondcorpAppConnectorPrincipalInfoServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_gateway.go deleted file mode 100644 index 6af3d4651b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_beyondcorp_app_gateway.go +++ /dev/null @@ -1,418 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBeyondcorpAppGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceBeyondcorpAppGatewayCreate, - Read: resourceBeyondcorpAppGatewayRead, - Delete: resourceBeyondcorpAppGatewayDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBeyondcorpAppGatewayImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the AppGateway.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An arbitrary user-provided name for the AppGateway.`, - }, - "host_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"HOST_TYPE_UNSPECIFIED", "GCP_REGIONAL_MIG", ""}), - Description: `The type of hosting used by the AppGateway. Default value: "HOST_TYPE_UNSPECIFIED" Possible values: ["HOST_TYPE_UNSPECIFIED", "GCP_REGIONAL_MIG"]`, - Default: "HOST_TYPE_UNSPECIFIED", - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Resource labels to represent user provided metadata.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The region of the AppGateway.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"TYPE_UNSPECIFIED", "TCP_PROXY", ""}), - Description: `The type of network connectivity used by the AppGateway. Default value: "TYPE_UNSPECIFIED" Possible values: ["TYPE_UNSPECIFIED", "TCP_PROXY"]`, - Default: "TYPE_UNSPECIFIED", - }, - "allocated_connections": { - Type: schema.TypeList, - Computed: true, - Description: `A list of connections allocated for the Gateway.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ingress_port": { - Type: schema.TypeInt, - Optional: true, - Description: `The ingress port of an allocated connection.`, - }, - "psc_uri": { - Type: schema.TypeString, - Optional: true, - Description: `The PSC uri of an allocated connection.`, - }, - }, - }, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Represents the different states of a AppGateway.`, - }, - "uri": { - Type: schema.TypeString, - Computed: true, - Description: `Server-defined URI for this resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBeyondcorpAppGatewayCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - typeProp, err := expandBeyondcorpAppGatewayType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - hostTypeProp, err := expandBeyondcorpAppGatewayHostType(d.Get("host_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host_type"); !isEmptyValue(reflect.ValueOf(hostTypeProp)) && (ok || !reflect.DeepEqual(v, hostTypeProp)) { - obj["hostType"] = hostTypeProp - } - displayNameProp, err := expandBeyondcorpAppGatewayDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandBeyondcorpAppGatewayLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appGateways?app_gateway_id={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AppGateway: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppGateway: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AppGateway: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appGateways/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = BeyondcorpOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating AppGateway", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create AppGateway: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appGateways/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating AppGateway %q: %#v", d.Id(), res) - - return resourceBeyondcorpAppGatewayRead(d, meta) -} - -func resourceBeyondcorpAppGatewayRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appGateways/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppGateway: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BeyondcorpAppGateway %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading AppGateway: %s", err) - } - - if err := d.Set("type", flattenBeyondcorpAppGatewayType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading AppGateway: %s", err) - } - if err := d.Set("host_type", flattenBeyondcorpAppGatewayHostType(res["hostType"], d, config)); err != nil { - return fmt.Errorf("Error reading AppGateway: %s", err) - } - if err := d.Set("display_name", flattenBeyondcorpAppGatewayDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading AppGateway: %s", err) - } - if err := d.Set("labels", flattenBeyondcorpAppGatewayLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading AppGateway: %s", err) - } - if err := d.Set("state", flattenBeyondcorpAppGatewayState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading AppGateway: %s", err) - } - if err := d.Set("uri", flattenBeyondcorpAppGatewayUri(res["uri"], d, config)); err != nil { - return fmt.Errorf("Error reading AppGateway: %s", err) - } - if err := d.Set("allocated_connections", flattenBeyondcorpAppGatewayAllocatedConnections(res["allocatedConnections"], d, config)); err != nil { - return fmt.Errorf("Error reading AppGateway: %s", err) - } - - return nil -} - -func resourceBeyondcorpAppGatewayDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppGateway: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appGateways/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AppGateway %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AppGateway") - } - - err = BeyondcorpOperationWaitTime( - config, res, project, "Deleting AppGateway", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting AppGateway %q: %#v", d.Id(), res) - return nil -} - -func resourceBeyondcorpAppGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/appGateways/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/appGateways/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBeyondcorpAppGatewayType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppGatewayHostType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppGatewayDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppGatewayLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppGatewayState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppGatewayUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppGatewayAllocatedConnections(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["psc_uri"] = - flattenBeyondcorpAppGatewayAllocatedConnectionsPscUri(original["pscUri"], d, config) - transformed["ingress_port"] = - flattenBeyondcorpAppGatewayAllocatedConnectionsIngressPort(original["ingressPort"], d, config) - return []interface{}{transformed} -} -func flattenBeyondcorpAppGatewayAllocatedConnectionsPscUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBeyondcorpAppGatewayAllocatedConnectionsIngressPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandBeyondcorpAppGatewayType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppGatewayHostType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppGatewayDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBeyondcorpAppGatewayLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_analytics_hub_data_exchange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_analytics_hub_data_exchange.go deleted file mode 100644 index e370a69834..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_analytics_hub_data_exchange.go +++ /dev/null @@ -1,455 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBigqueryAnalyticsHubDataExchange() *schema.Resource { - return &schema.Resource{ - Create: resourceBigqueryAnalyticsHubDataExchangeCreate, - Read: resourceBigqueryAnalyticsHubDataExchangeRead, - Update: resourceBigqueryAnalyticsHubDataExchangeUpdate, - Delete: resourceBigqueryAnalyticsHubDataExchangeDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigqueryAnalyticsHubDataExchangeImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "data_exchange_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the data exchange. Must contain only Unicode letters, numbers (0-9), underscores (_). Should not use characters that require URL-escaping, or characters outside of ASCII, spaces.`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `Human-readable display name of the data exchange. The display name must contain only Unicode letters, numbers (0-9), underscores (_), dashes (-), spaces ( ), and must not start or end with spaces.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the location this data exchange.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of the data exchange.`, - }, - "documentation": { - Type: schema.TypeString, - Optional: true, - Description: `Documentation describing the data exchange.`, - }, - "icon": { - Type: schema.TypeString, - Optional: true, - Description: `Base64 encoded image representing the data exchange.`, - }, - "primary_contact": { - Type: schema.TypeString, - Optional: true, - Description: `Email or URL of the primary point of contact of the data exchange.`, - }, - "listing_count": { - Type: schema.TypeInt, - Computed: true, - Description: `Number of listings contained in the data exchange.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the data exchange, for example: -"projects/myproject/locations/US/dataExchanges/123"`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryAnalyticsHubDataExchangeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandBigqueryAnalyticsHubDataExchangeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandBigqueryAnalyticsHubDataExchangeDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - primaryContactProp, err := expandBigqueryAnalyticsHubDataExchangePrimaryContact(d.Get("primary_contact"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("primary_contact"); !isEmptyValue(reflect.ValueOf(primaryContactProp)) && (ok || !reflect.DeepEqual(v, primaryContactProp)) { - obj["primaryContact"] = primaryContactProp - } - documentationProp, err := expandBigqueryAnalyticsHubDataExchangeDocumentation(d.Get("documentation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("documentation"); !isEmptyValue(reflect.ValueOf(documentationProp)) && (ok || !reflect.DeepEqual(v, documentationProp)) { - obj["documentation"] = documentationProp - } - iconProp, err := expandBigqueryAnalyticsHubDataExchangeIcon(d.Get("icon"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("icon"); !isEmptyValue(reflect.ValueOf(iconProp)) && (ok || !reflect.DeepEqual(v, iconProp)) { - obj["icon"] = iconProp - } - - url, err := replaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges?data_exchange_id={{data_exchange_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DataExchange: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DataExchange: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DataExchange: %s", err) - } - if err := d.Set("name", flattenBigqueryAnalyticsHubDataExchangeName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DataExchange %q: %#v", d.Id(), res) - - return resourceBigqueryAnalyticsHubDataExchangeRead(d, meta) -} - -func resourceBigqueryAnalyticsHubDataExchangeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DataExchange: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigqueryAnalyticsHubDataExchange %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DataExchange: %s", err) - } - - if err := d.Set("name", flattenBigqueryAnalyticsHubDataExchangeName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading DataExchange: %s", err) - } - if err := d.Set("display_name", flattenBigqueryAnalyticsHubDataExchangeDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading DataExchange: %s", err) - } - if err := d.Set("description", flattenBigqueryAnalyticsHubDataExchangeDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading DataExchange: %s", err) - } - if err := d.Set("primary_contact", flattenBigqueryAnalyticsHubDataExchangePrimaryContact(res["primaryContact"], d, config)); err != nil { - return fmt.Errorf("Error reading DataExchange: %s", err) - } - if err := d.Set("documentation", flattenBigqueryAnalyticsHubDataExchangeDocumentation(res["documentation"], d, config)); err != nil { - return fmt.Errorf("Error reading DataExchange: %s", err) - } - if err := d.Set("listing_count", flattenBigqueryAnalyticsHubDataExchangeListingCount(res["listingCount"], d, config)); err != nil { - return fmt.Errorf("Error reading DataExchange: %s", err) - } - if err := d.Set("icon", flattenBigqueryAnalyticsHubDataExchangeIcon(res["icon"], d, config)); err != nil { - return fmt.Errorf("Error reading DataExchange: %s", err) - } - - return nil -} - -func resourceBigqueryAnalyticsHubDataExchangeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DataExchange: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandBigqueryAnalyticsHubDataExchangeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandBigqueryAnalyticsHubDataExchangeDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - primaryContactProp, err := expandBigqueryAnalyticsHubDataExchangePrimaryContact(d.Get("primary_contact"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("primary_contact"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, primaryContactProp)) { - obj["primaryContact"] = primaryContactProp - } - documentationProp, err := expandBigqueryAnalyticsHubDataExchangeDocumentation(d.Get("documentation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("documentation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, documentationProp)) { - obj["documentation"] = documentationProp - } - iconProp, err := expandBigqueryAnalyticsHubDataExchangeIcon(d.Get("icon"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("icon"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, iconProp)) { - obj["icon"] = iconProp - } - - url, err := replaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DataExchange %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("primary_contact") { - updateMask = append(updateMask, "primaryContact") - } - - if d.HasChange("documentation") { - updateMask = append(updateMask, "documentation") - } - - if d.HasChange("icon") { - updateMask = append(updateMask, "icon") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DataExchange %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DataExchange %q: %#v", d.Id(), res) - } - - return resourceBigqueryAnalyticsHubDataExchangeRead(d, meta) -} - -func resourceBigqueryAnalyticsHubDataExchangeDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DataExchange: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DataExchange %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DataExchange") - } - - log.Printf("[DEBUG] Finished deleting DataExchange %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryAnalyticsHubDataExchangeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBigqueryAnalyticsHubDataExchangeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubDataExchangeDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubDataExchangeDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubDataExchangePrimaryContact(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubDataExchangeDocumentation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubDataExchangeListingCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigqueryAnalyticsHubDataExchangeIcon(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigqueryAnalyticsHubDataExchangeDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubDataExchangeDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubDataExchangePrimaryContact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubDataExchangeDocumentation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubDataExchangeIcon(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_analytics_hub_listing.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_analytics_hub_listing.go deleted file mode 100644 index 9918a554d0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_analytics_hub_listing.go +++ /dev/null @@ -1,768 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBigqueryAnalyticsHubListing() *schema.Resource { - return &schema.Resource{ - Create: resourceBigqueryAnalyticsHubListingCreate, - Read: resourceBigqueryAnalyticsHubListingRead, - Update: resourceBigqueryAnalyticsHubListingUpdate, - Delete: resourceBigqueryAnalyticsHubListingDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigqueryAnalyticsHubListingImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "bigquery_dataset": { - Type: schema.TypeList, - Required: true, - Description: `Shared dataset i.e. BigQuery dataset source.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: projectNumberDiffSuppress, - Description: `Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123`, - }, - }, - }, - }, - "data_exchange_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the data exchange. Must contain only Unicode letters, numbers (0-9), underscores (_). Should not use characters that require URL-escaping, or characters outside of ASCII, spaces.`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `Human-readable display name of the listing. The display name must contain only Unicode letters, numbers (0-9), underscores (_), dashes (-), spaces ( ), ampersands (&) and can't start or end with spaces.`, - }, - "listing_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the listing. Must contain only Unicode letters, numbers (0-9), underscores (_). Should not use characters that require URL-escaping, or characters outside of ASCII, spaces.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the location this data exchange listing.`, - }, - "categories": { - Type: schema.TypeList, - Optional: true, - Description: `Categories of the listing. Up to two categories are allowed.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "data_provider": { - Type: schema.TypeList, - Optional: true, - Description: `Details of the data provider who owns the source data.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the data provider.`, - }, - "primary_contact": { - Type: schema.TypeString, - Optional: true, - Description: `Email or URL of the data provider.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Short description of the listing. The description must not contain Unicode non-characters and C0 and C1 control codes except tabs (HT), new lines (LF), carriage returns (CR), and page breaks (FF).`, - }, - "documentation": { - Type: schema.TypeString, - Optional: true, - Description: `Documentation describing the listing.`, - }, - "icon": { - Type: schema.TypeString, - Optional: true, - Description: `Base64 encoded image representing the listing.`, - }, - "primary_contact": { - Type: schema.TypeString, - Optional: true, - Description: `Email or URL of the primary point of contact of the listing.`, - }, - "publisher": { - Type: schema.TypeList, - Optional: true, - Description: `Details of the publisher who owns the listing and who can share the source data.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the listing publisher.`, - }, - "primary_contact": { - Type: schema.TypeString, - Optional: true, - Description: `Email or URL of the listing publisher.`, - }, - }, - }, - }, - "request_access": { - Type: schema.TypeString, - Optional: true, - Description: `Email or URL of the request access of the listing. Subscribers can use this reference to request access.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the listing. e.g. "projects/myproject/locations/US/dataExchanges/123/listings/456"`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryAnalyticsHubListingCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandBigqueryAnalyticsHubListingDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandBigqueryAnalyticsHubListingDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - primaryContactProp, err := expandBigqueryAnalyticsHubListingPrimaryContact(d.Get("primary_contact"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("primary_contact"); !isEmptyValue(reflect.ValueOf(primaryContactProp)) && (ok || !reflect.DeepEqual(v, primaryContactProp)) { - obj["primaryContact"] = primaryContactProp - } - documentationProp, err := expandBigqueryAnalyticsHubListingDocumentation(d.Get("documentation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("documentation"); !isEmptyValue(reflect.ValueOf(documentationProp)) && (ok || !reflect.DeepEqual(v, documentationProp)) { - obj["documentation"] = documentationProp - } - iconProp, err := expandBigqueryAnalyticsHubListingIcon(d.Get("icon"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("icon"); !isEmptyValue(reflect.ValueOf(iconProp)) && (ok || !reflect.DeepEqual(v, iconProp)) { - obj["icon"] = iconProp - } - requestAccessProp, err := expandBigqueryAnalyticsHubListingRequestAccess(d.Get("request_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_access"); !isEmptyValue(reflect.ValueOf(requestAccessProp)) && (ok || !reflect.DeepEqual(v, requestAccessProp)) { - obj["requestAccess"] = requestAccessProp - } - dataProviderProp, err := expandBigqueryAnalyticsHubListingDataProvider(d.Get("data_provider"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_provider"); !isEmptyValue(reflect.ValueOf(dataProviderProp)) && (ok || !reflect.DeepEqual(v, dataProviderProp)) { - obj["dataProvider"] = dataProviderProp - } - publisherProp, err := expandBigqueryAnalyticsHubListingPublisher(d.Get("publisher"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("publisher"); !isEmptyValue(reflect.ValueOf(publisherProp)) && (ok || !reflect.DeepEqual(v, publisherProp)) { - obj["publisher"] = publisherProp - } - categoriesProp, err := expandBigqueryAnalyticsHubListingCategories(d.Get("categories"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("categories"); !isEmptyValue(reflect.ValueOf(categoriesProp)) && (ok || !reflect.DeepEqual(v, categoriesProp)) { - obj["categories"] = categoriesProp - } - bigqueryDatasetProp, err := expandBigqueryAnalyticsHubListingBigqueryDataset(d.Get("bigquery_dataset"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bigquery_dataset"); !isEmptyValue(reflect.ValueOf(bigqueryDatasetProp)) && (ok || !reflect.DeepEqual(v, bigqueryDatasetProp)) { - obj["bigqueryDataset"] = bigqueryDatasetProp - } - - url, err := replaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings?listing_id={{listing_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Listing: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Listing: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Listing: %s", err) - } - if err := d.Set("name", flattenBigqueryAnalyticsHubListingName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Listing %q: %#v", d.Id(), res) - - return resourceBigqueryAnalyticsHubListingRead(d, meta) -} - -func resourceBigqueryAnalyticsHubListingRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Listing: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigqueryAnalyticsHubListing %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - - if err := d.Set("name", flattenBigqueryAnalyticsHubListingName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("display_name", flattenBigqueryAnalyticsHubListingDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("description", flattenBigqueryAnalyticsHubListingDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("primary_contact", flattenBigqueryAnalyticsHubListingPrimaryContact(res["primaryContact"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("documentation", flattenBigqueryAnalyticsHubListingDocumentation(res["documentation"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("icon", flattenBigqueryAnalyticsHubListingIcon(res["icon"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("request_access", flattenBigqueryAnalyticsHubListingRequestAccess(res["requestAccess"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("data_provider", flattenBigqueryAnalyticsHubListingDataProvider(res["dataProvider"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("publisher", flattenBigqueryAnalyticsHubListingPublisher(res["publisher"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("categories", flattenBigqueryAnalyticsHubListingCategories(res["categories"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - if err := d.Set("bigquery_dataset", flattenBigqueryAnalyticsHubListingBigqueryDataset(res["bigqueryDataset"], d, config)); err != nil { - return fmt.Errorf("Error reading Listing: %s", err) - } - - return nil -} - -func resourceBigqueryAnalyticsHubListingUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Listing: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandBigqueryAnalyticsHubListingDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandBigqueryAnalyticsHubListingDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - primaryContactProp, err := expandBigqueryAnalyticsHubListingPrimaryContact(d.Get("primary_contact"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("primary_contact"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, primaryContactProp)) { - obj["primaryContact"] = primaryContactProp - } - documentationProp, err := expandBigqueryAnalyticsHubListingDocumentation(d.Get("documentation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("documentation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, documentationProp)) { - obj["documentation"] = documentationProp - } - iconProp, err := expandBigqueryAnalyticsHubListingIcon(d.Get("icon"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("icon"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, iconProp)) { - obj["icon"] = iconProp - } - requestAccessProp, err := expandBigqueryAnalyticsHubListingRequestAccess(d.Get("request_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_access"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requestAccessProp)) { - obj["requestAccess"] = requestAccessProp - } - dataProviderProp, err := expandBigqueryAnalyticsHubListingDataProvider(d.Get("data_provider"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_provider"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataProviderProp)) { - obj["dataProvider"] = dataProviderProp - } - publisherProp, err := expandBigqueryAnalyticsHubListingPublisher(d.Get("publisher"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("publisher"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, publisherProp)) { - obj["publisher"] = publisherProp - } - categoriesProp, err := expandBigqueryAnalyticsHubListingCategories(d.Get("categories"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("categories"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, categoriesProp)) { - obj["categories"] = categoriesProp - } - bigqueryDatasetProp, err := expandBigqueryAnalyticsHubListingBigqueryDataset(d.Get("bigquery_dataset"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bigquery_dataset"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bigqueryDatasetProp)) { - obj["bigqueryDataset"] = bigqueryDatasetProp - } - - url, err := replaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Listing %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("primary_contact") { - updateMask = append(updateMask, "primaryContact") - } - - if d.HasChange("documentation") { - updateMask = append(updateMask, "documentation") - } - - if d.HasChange("icon") { - updateMask = append(updateMask, "icon") - } - - if d.HasChange("request_access") { - updateMask = append(updateMask, "requestAccess") - } - - if d.HasChange("data_provider") { - updateMask = append(updateMask, "dataProvider") - } - - if d.HasChange("publisher") { - updateMask = append(updateMask, "publisher") - } - - if d.HasChange("categories") { - updateMask = append(updateMask, "categories") - } - - if d.HasChange("bigquery_dataset") { - updateMask = append(updateMask, "bigqueryDataset") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Listing %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Listing %q: %#v", d.Id(), res) - } - - return resourceBigqueryAnalyticsHubListingRead(d, meta) -} - -func resourceBigqueryAnalyticsHubListingDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Listing: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Listing %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Listing") - } - - log.Printf("[DEBUG] Finished deleting Listing %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryAnalyticsHubListingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)/listings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBigqueryAnalyticsHubListingName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingPrimaryContact(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingDocumentation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingIcon(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingRequestAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingDataProvider(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenBigqueryAnalyticsHubListingDataProviderName(original["name"], d, config) - transformed["primary_contact"] = - flattenBigqueryAnalyticsHubListingDataProviderPrimaryContact(original["primaryContact"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryAnalyticsHubListingDataProviderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingDataProviderPrimaryContact(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingPublisher(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenBigqueryAnalyticsHubListingPublisherName(original["name"], d, config) - transformed["primary_contact"] = - flattenBigqueryAnalyticsHubListingPublisherPrimaryContact(original["primaryContact"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryAnalyticsHubListingPublisherName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingPublisherPrimaryContact(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingCategories(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryAnalyticsHubListingBigqueryDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset"] = - flattenBigqueryAnalyticsHubListingBigqueryDatasetDataset(original["dataset"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryAnalyticsHubListingBigqueryDatasetDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigqueryAnalyticsHubListingDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingPrimaryContact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingDocumentation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingIcon(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingRequestAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingDataProvider(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandBigqueryAnalyticsHubListingDataProviderName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPrimaryContact, err := expandBigqueryAnalyticsHubListingDataProviderPrimaryContact(original["primary_contact"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrimaryContact); val.IsValid() && !isEmptyValue(val) { - transformed["primaryContact"] = transformedPrimaryContact - } - - return transformed, nil -} - -func expandBigqueryAnalyticsHubListingDataProviderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingDataProviderPrimaryContact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingPublisher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandBigqueryAnalyticsHubListingPublisherName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPrimaryContact, err := expandBigqueryAnalyticsHubListingPublisherPrimaryContact(original["primary_contact"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrimaryContact); val.IsValid() && !isEmptyValue(val) { - transformed["primaryContact"] = transformedPrimaryContact - } - - return transformed, nil -} - -func expandBigqueryAnalyticsHubListingPublisherName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingPublisherPrimaryContact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingCategories(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryAnalyticsHubListingBigqueryDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDataset, err := expandBigqueryAnalyticsHubListingBigqueryDatasetDataset(original["dataset"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !isEmptyValue(val) { - transformed["dataset"] = transformedDataset - } - - return transformed, nil -} - -func expandBigqueryAnalyticsHubListingBigqueryDatasetDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_capacity_commitment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_capacity_commitment.go deleted file mode 100644 index 524463f147..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_capacity_commitment.go +++ /dev/null @@ -1,407 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBigqueryReservationCapacityCommitment() *schema.Resource { - return &schema.Resource{ - Create: resourceBigqueryReservationCapacityCommitmentCreate, - Read: resourceBigqueryReservationCapacityCommitmentRead, - Update: resourceBigqueryReservationCapacityCommitmentUpdate, - Delete: resourceBigqueryReservationCapacityCommitmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigqueryReservationCapacityCommitmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "plan": { - Type: schema.TypeString, - Required: true, - Description: `Capacity commitment plan. Valid values are FLEX, TRIAL, MONTHLY, ANNUAL`, - }, - "slot_count": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Number of slots in this commitment.`, - }, - "capacity_commitment_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The optional capacity commitment ID. Capacity commitment name will be generated automatically if this field is -empty. This field must only contain lower case alphanumeric characters or dashes. The first and last character -cannot be a dash. Max length is 64 characters. NOTE: this ID won't be kept if the capacity commitment is split -or merged.`, - }, - "enforce_single_admin_project_per_org": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `If true, fail the request if another project in the organization has a capacity commitment.`, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The geographic location where the transfer config should reside. -Examples: US, EU, asia-northeast1. The default value is US.`, - Default: "US", - }, - "renewal_plan": { - Type: schema.TypeString, - Optional: true, - Description: `The plan this capacity commitment is converted to after commitmentEndTime passes. Once the plan is changed, committed period is extended according to commitment plan. Only applicable for ANNUAL and TRIAL commitments.`, - }, - "commitment_end_time": { - Type: schema.TypeString, - Computed: true, - Description: `The start of the current commitment period. It is applicable only for ACTIVE capacity commitments.`, - }, - "commitment_start_time": { - Type: schema.TypeString, - Computed: true, - Description: `The start of the current commitment period. It is applicable only for ACTIVE capacity commitments.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the capacity commitment, e.g., projects/myproject/locations/US/capacityCommitments/123`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the commitment`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryReservationCapacityCommitmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - slotCountProp, err := expandBigqueryReservationCapacityCommitmentSlotCount(d.Get("slot_count"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("slot_count"); !isEmptyValue(reflect.ValueOf(slotCountProp)) && (ok || !reflect.DeepEqual(v, slotCountProp)) { - obj["slotCount"] = slotCountProp - } - planProp, err := expandBigqueryReservationCapacityCommitmentPlan(d.Get("plan"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("plan"); !isEmptyValue(reflect.ValueOf(planProp)) && (ok || !reflect.DeepEqual(v, planProp)) { - obj["plan"] = planProp - } - renewalPlanProp, err := expandBigqueryReservationCapacityCommitmentRenewalPlan(d.Get("renewal_plan"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("renewal_plan"); !isEmptyValue(reflect.ValueOf(renewalPlanProp)) && (ok || !reflect.DeepEqual(v, renewalPlanProp)) { - obj["renewalPlan"] = renewalPlanProp - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/capacityCommitments?capacityCommitmentId={{capacity_commitment_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new CapacityCommitment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CapacityCommitment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating CapacityCommitment: %s", err) - } - if err := d.Set("name", flattenBigqueryReservationCapacityCommitmentName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating CapacityCommitment %q: %#v", d.Id(), res) - - return resourceBigqueryReservationCapacityCommitmentRead(d, meta) -} - -func resourceBigqueryReservationCapacityCommitmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CapacityCommitment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigqueryReservationCapacityCommitment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading CapacityCommitment: %s", err) - } - - if err := d.Set("name", flattenBigqueryReservationCapacityCommitmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading CapacityCommitment: %s", err) - } - if err := d.Set("slot_count", flattenBigqueryReservationCapacityCommitmentSlotCount(res["slotCount"], d, config)); err != nil { - return fmt.Errorf("Error reading CapacityCommitment: %s", err) - } - if err := d.Set("plan", flattenBigqueryReservationCapacityCommitmentPlan(res["plan"], d, config)); err != nil { - return fmt.Errorf("Error reading CapacityCommitment: %s", err) - } - if err := d.Set("state", flattenBigqueryReservationCapacityCommitmentState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading CapacityCommitment: %s", err) - } - if err := d.Set("commitment_start_time", flattenBigqueryReservationCapacityCommitmentCommitmentStartTime(res["commitmentStartTime"], d, config)); err != nil { - return fmt.Errorf("Error reading CapacityCommitment: %s", err) - } - if err := d.Set("commitment_end_time", flattenBigqueryReservationCapacityCommitmentCommitmentEndTime(res["commitmentEndTime"], d, config)); err != nil { - return fmt.Errorf("Error reading CapacityCommitment: %s", err) - } - if err := d.Set("renewal_plan", flattenBigqueryReservationCapacityCommitmentRenewalPlan(res["renewalPlan"], d, config)); err != nil { - return fmt.Errorf("Error reading CapacityCommitment: %s", err) - } - - return nil -} - -func resourceBigqueryReservationCapacityCommitmentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CapacityCommitment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - planProp, err := expandBigqueryReservationCapacityCommitmentPlan(d.Get("plan"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("plan"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, planProp)) { - obj["plan"] = planProp - } - renewalPlanProp, err := expandBigqueryReservationCapacityCommitmentRenewalPlan(d.Get("renewal_plan"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("renewal_plan"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, renewalPlanProp)) { - obj["renewalPlan"] = renewalPlanProp - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating CapacityCommitment %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("plan") { - updateMask = append(updateMask, "plan") - } - - if d.HasChange("renewal_plan") { - updateMask = append(updateMask, "renewalPlan") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating CapacityCommitment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating CapacityCommitment %q: %#v", d.Id(), res) - } - - return resourceBigqueryReservationCapacityCommitmentRead(d, meta) -} - -func resourceBigqueryReservationCapacityCommitmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CapacityCommitment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting CapacityCommitment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "CapacityCommitment") - } - - log.Printf("[DEBUG] Finished deleting CapacityCommitment %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryReservationCapacityCommitmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenBigqueryReservationCapacityCommitmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryReservationCapacityCommitmentSlotCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigqueryReservationCapacityCommitmentPlan(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryReservationCapacityCommitmentState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryReservationCapacityCommitmentCommitmentStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryReservationCapacityCommitmentCommitmentEndTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryReservationCapacityCommitmentRenewalPlan(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigqueryReservationCapacityCommitmentSlotCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryReservationCapacityCommitmentPlan(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryReservationCapacityCommitmentRenewalPlan(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_connection.go deleted file mode 100644 index bbf98c2822..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_connection.go +++ /dev/null @@ -1,1157 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBigqueryConnectionConnection() *schema.Resource { - return &schema.Resource{ - Create: resourceBigqueryConnectionConnectionCreate, - Read: resourceBigqueryConnectionConnectionRead, - Update: resourceBigqueryConnectionConnectionUpdate, - Delete: resourceBigqueryConnectionConnectionDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigqueryConnectionConnectionImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "aws": { - Type: schema.TypeList, - Optional: true, - Description: `Connection properties specific to Amazon Web Services.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access_role": { - Type: schema.TypeList, - Required: true, - Description: `Authentication using Google owned service account to assume into customer's AWS IAM Role.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "iam_role_id": { - Type: schema.TypeString, - Required: true, - Description: `The user’s AWS IAM Role that trusts the Google-owned AWS IAM user Connection.`, - }, - "identity": { - Type: schema.TypeString, - Computed: true, - Description: `A unique Google-owned and Google-generated identity for the Connection. This identity will be used to access the user's AWS IAM Role.`, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, - }, - "azure": { - Type: schema.TypeList, - Optional: true, - Description: `Container for connection properties specific to Azure.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "customer_tenant_id": { - Type: schema.TypeString, - Required: true, - Description: `The id of customer's directory that host the data.`, - }, - "federated_application_client_id": { - Type: schema.TypeString, - Optional: true, - Description: `The Azure Application (client) ID where the federated credentials will be hosted.`, - }, - "application": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the Azure Active Directory Application.`, - }, - "client_id": { - Type: schema.TypeString, - Computed: true, - Description: `The client id of the Azure Active Directory Application.`, - }, - "identity": { - Type: schema.TypeString, - Computed: true, - Description: `A unique Google-owned and Google-generated identity for the Connection. This identity will be used to access the user's Azure Active Directory Application.`, - }, - "object_id": { - Type: schema.TypeString, - Computed: true, - Description: `The object id of the Azure Active Directory Application.`, - }, - "redirect_uri": { - Type: schema.TypeString, - Computed: true, - Description: `The URL user will be redirected to after granting consent during connection setup.`, - }, - }, - }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, - }, - "cloud_resource": { - Type: schema.TypeList, - Optional: true, - Description: `Container for connection properties for delegation of access to GCP resources.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "service_account_id": { - Type: schema.TypeString, - Computed: true, - Description: `The account ID of the service created for the purpose of this connection.`, - }, - }, - }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, - }, - "cloud_spanner": { - Type: schema.TypeList, - Optional: true, - Description: `Connection properties specific to Cloud Spanner`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "database": { - Type: schema.TypeString, - Required: true, - Description: `Cloud Spanner database in the form 'project/instance/database'`, - }, - "use_parallelism": { - Type: schema.TypeBool, - Optional: true, - Description: `If parallelism should be used when reading from Cloud Spanner`, - }, - "use_serverless_analytics": { - Type: schema.TypeBool, - Optional: true, - Description: `If the serverless analytics service should be used to read data from Cloud Spanner. useParallelism must be set when using serverless analytics`, - }, - }, - }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, - }, - "cloud_sql": { - Type: schema.TypeList, - Optional: true, - Description: `Connection properties specific to the Cloud SQL.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "credential": { - Type: schema.TypeList, - Required: true, - Description: `Cloud SQL properties.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "password": { - Type: schema.TypeString, - Required: true, - Description: `Password for database.`, - Sensitive: true, - }, - "username": { - Type: schema.TypeString, - Required: true, - Description: `Username for database.`, - }, - }, - }, - }, - "database": { - Type: schema.TypeString, - Required: true, - Description: `Database name.`, - }, - "instance_id": { - Type: schema.TypeString, - Required: true, - Description: `Cloud SQL instance ID in the form project:location:instance.`, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"DATABASE_TYPE_UNSPECIFIED", "POSTGRES", "MYSQL"}), - Description: `Type of the Cloud SQL database. Possible values: ["DATABASE_TYPE_UNSPECIFIED", "POSTGRES", "MYSQL"]`, - }, - "service_account_id": { - Type: schema.TypeString, - Computed: true, - Description: `When the connection is used in the context of an operation in BigQuery, this service account will serve as the identity being used for connecting to the CloudSQL instance specified in this connection.`, - }, - }, - }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, - }, - "connection_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Optional connection id that should be assigned to the created connection.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A descriptive description for the connection`, - }, - "friendly_name": { - Type: schema.TypeString, - Optional: true, - Description: `A descriptive name for the connection`, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The geographic location where the connection should reside. -Cloud SQL instance must be in the same location as the connection -with following exceptions: Cloud SQL us-central1 maps to BigQuery US, Cloud SQL europe-west1 maps to BigQuery EU. -Examples: US, EU, asia-northeast1, us-central1, europe-west1. -Spanner Connections same as spanner region -AWS allowed regions are aws-us-east-1 -Azure allowed regions are azure-eastus2`, - }, - "has_credential": { - Type: schema.TypeBool, - Computed: true, - Description: `True if the connection has credential assigned.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the connection in the form of: -"projects/{project_id}/locations/{location_id}/connections/{connectionId}"`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryConnectionConnectionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - connection_idProp, err := expandBigqueryConnectionConnectionConnectionId(d.Get("connection_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connection_id"); !isEmptyValue(reflect.ValueOf(connection_idProp)) && (ok || !reflect.DeepEqual(v, connection_idProp)) { - obj["connection_id"] = connection_idProp - } - friendlyNameProp, err := expandBigqueryConnectionConnectionFriendlyName(d.Get("friendly_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("friendly_name"); !isEmptyValue(reflect.ValueOf(friendlyNameProp)) && (ok || !reflect.DeepEqual(v, friendlyNameProp)) { - obj["friendlyName"] = friendlyNameProp - } - descriptionProp, err := expandBigqueryConnectionConnectionDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - cloudSqlProp, err := expandBigqueryConnectionConnectionCloudSql(d.Get("cloud_sql"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_sql"); !isEmptyValue(reflect.ValueOf(cloudSqlProp)) && (ok || !reflect.DeepEqual(v, cloudSqlProp)) { - obj["cloudSql"] = cloudSqlProp - } - awsProp, err := expandBigqueryConnectionConnectionAws(d.Get("aws"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("aws"); !isEmptyValue(reflect.ValueOf(awsProp)) && (ok || !reflect.DeepEqual(v, awsProp)) { - obj["aws"] = awsProp - } - azureProp, err := expandBigqueryConnectionConnectionAzure(d.Get("azure"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("azure"); !isEmptyValue(reflect.ValueOf(azureProp)) && (ok || !reflect.DeepEqual(v, azureProp)) { - obj["azure"] = azureProp - } - cloudSpannerProp, err := expandBigqueryConnectionConnectionCloudSpanner(d.Get("cloud_spanner"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_spanner"); !isEmptyValue(reflect.ValueOf(cloudSpannerProp)) && (ok || !reflect.DeepEqual(v, cloudSpannerProp)) { - obj["cloudSpanner"] = cloudSpannerProp - } - cloudResourceProp, err := expandBigqueryConnectionConnectionCloudResource(d.Get("cloud_resource"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_resource"); ok || !reflect.DeepEqual(v, cloudResourceProp) { - obj["cloudResource"] = cloudResourceProp - } - - obj, err = resourceBigqueryConnectionConnectionEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryConnectionBasePath}}projects/{{project}}/locations/{{location}}/connections?connectionId={{connection_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Connection: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Connection: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Connection: %s", err) - } - if err := d.Set("name", flattenBigqueryConnectionConnectionName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if isEmptyValue(reflect.ValueOf(d.Get("connection_id"))) { - // connection id is set by API when unset and required to GET the connection - // it is set by reading the "name" field rather than a field in the response - if err := d.Set("connection_id", flattenBigqueryConnectionConnectionConnectionId("", d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - } - - // Reset id to make sure connection_id is not empty - id2, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id2) - - log.Printf("[DEBUG] Finished creating Connection %q: %#v", d.Id(), res) - - return resourceBigqueryConnectionConnectionRead(d, meta) -} - -func resourceBigqueryConnectionConnectionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryConnectionBasePath}}projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Connection: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigqueryConnectionConnection %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - - if err := d.Set("name", flattenBigqueryConnectionConnectionName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("connection_id", flattenBigqueryConnectionConnectionConnectionId(res["connection_id"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("friendly_name", flattenBigqueryConnectionConnectionFriendlyName(res["friendlyName"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("description", flattenBigqueryConnectionConnectionDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("has_credential", flattenBigqueryConnectionConnectionHasCredential(res["hasCredential"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("cloud_sql", flattenBigqueryConnectionConnectionCloudSql(res["cloudSql"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("aws", flattenBigqueryConnectionConnectionAws(res["aws"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("azure", flattenBigqueryConnectionConnectionAzure(res["azure"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("cloud_spanner", flattenBigqueryConnectionConnectionCloudSpanner(res["cloudSpanner"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - if err := d.Set("cloud_resource", flattenBigqueryConnectionConnectionCloudResource(res["cloudResource"], d, config)); err != nil { - return fmt.Errorf("Error reading Connection: %s", err) - } - - return nil -} - -func resourceBigqueryConnectionConnectionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Connection: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - friendlyNameProp, err := expandBigqueryConnectionConnectionFriendlyName(d.Get("friendly_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("friendly_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, friendlyNameProp)) { - obj["friendlyName"] = friendlyNameProp - } - descriptionProp, err := expandBigqueryConnectionConnectionDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - cloudSqlProp, err := expandBigqueryConnectionConnectionCloudSql(d.Get("cloud_sql"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_sql"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cloudSqlProp)) { - obj["cloudSql"] = cloudSqlProp - } - awsProp, err := expandBigqueryConnectionConnectionAws(d.Get("aws"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("aws"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, awsProp)) { - obj["aws"] = awsProp - } - azureProp, err := expandBigqueryConnectionConnectionAzure(d.Get("azure"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("azure"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, azureProp)) { - obj["azure"] = azureProp - } - cloudSpannerProp, err := expandBigqueryConnectionConnectionCloudSpanner(d.Get("cloud_spanner"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_spanner"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cloudSpannerProp)) { - obj["cloudSpanner"] = cloudSpannerProp - } - cloudResourceProp, err := expandBigqueryConnectionConnectionCloudResource(d.Get("cloud_resource"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_resource"); ok || !reflect.DeepEqual(v, cloudResourceProp) { - obj["cloudResource"] = cloudResourceProp - } - - obj, err = resourceBigqueryConnectionConnectionEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryConnectionBasePath}}projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Connection %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("friendly_name") { - updateMask = append(updateMask, "friendlyName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("cloud_sql") { - updateMask = append(updateMask, "cloudSql") - } - - if d.HasChange("aws") { - updateMask = append(updateMask, "aws.access_role.iam_role_id") - } - - if d.HasChange("azure") { - updateMask = append(updateMask, "azure.customer_tenant_id", - "azure.federated_application_client_id") - } - - if d.HasChange("cloud_spanner") { - updateMask = append(updateMask, "cloudSpanner") - } - - if d.HasChange("cloud_resource") { - updateMask = append(updateMask, "cloudResource") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Connection %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Connection %q: %#v", d.Id(), res) - } - - return resourceBigqueryConnectionConnectionRead(d, meta) -} - -func resourceBigqueryConnectionConnectionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Connection: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryConnectionBasePath}}projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Connection %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Connection") - } - - log.Printf("[DEBUG] Finished deleting Connection %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryConnectionConnectionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBigqueryConnectionConnectionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionConnectionId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - parts := strings.Split(d.Get("name").(string), "/") - return parts[len(parts)-1] -} - -func flattenBigqueryConnectionConnectionFriendlyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionHasCredential(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionCloudSql(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["instance_id"] = - flattenBigqueryConnectionConnectionCloudSqlInstanceId(original["instanceId"], d, config) - transformed["database"] = - flattenBigqueryConnectionConnectionCloudSqlDatabase(original["database"], d, config) - transformed["credential"] = - flattenBigqueryConnectionConnectionCloudSqlCredential(original["credential"], d, config) - transformed["type"] = - flattenBigqueryConnectionConnectionCloudSqlType(original["type"], d, config) - transformed["service_account_id"] = - flattenBigqueryConnectionConnectionCloudSqlServiceAccountId(original["serviceAccountId"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryConnectionConnectionCloudSqlInstanceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionCloudSqlDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionCloudSqlCredential(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return []interface{}{ - map[string]interface{}{ - "username": d.Get("cloud_sql.0.credential.0.username"), - "password": d.Get("cloud_sql.0.credential.0.password"), - }, - } -} - -func flattenBigqueryConnectionConnectionCloudSqlType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionCloudSqlServiceAccountId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAws(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["access_role"] = - flattenBigqueryConnectionConnectionAwsAccessRole(original["accessRole"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryConnectionConnectionAwsAccessRole(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["iam_role_id"] = - flattenBigqueryConnectionConnectionAwsAccessRoleIamRoleId(original["iamRoleId"], d, config) - transformed["identity"] = - flattenBigqueryConnectionConnectionAwsAccessRoleIdentity(original["identity"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryConnectionConnectionAwsAccessRoleIamRoleId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAwsAccessRoleIdentity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAzure(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["application"] = - flattenBigqueryConnectionConnectionAzureApplication(original["application"], d, config) - transformed["client_id"] = - flattenBigqueryConnectionConnectionAzureClientId(original["clientId"], d, config) - transformed["object_id"] = - flattenBigqueryConnectionConnectionAzureObjectId(original["objectId"], d, config) - transformed["customer_tenant_id"] = - flattenBigqueryConnectionConnectionAzureCustomerTenantId(original["customerTenantId"], d, config) - transformed["federated_application_client_id"] = - flattenBigqueryConnectionConnectionAzureFederatedApplicationClientId(original["federatedApplicationClientId"], d, config) - transformed["redirect_uri"] = - flattenBigqueryConnectionConnectionAzureRedirectUri(original["redirectUri"], d, config) - transformed["identity"] = - flattenBigqueryConnectionConnectionAzureIdentity(original["identity"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryConnectionConnectionAzureApplication(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAzureClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAzureObjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAzureCustomerTenantId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAzureFederatedApplicationClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAzureRedirectUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionAzureIdentity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionCloudSpanner(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["database"] = - flattenBigqueryConnectionConnectionCloudSpannerDatabase(original["database"], d, config) - transformed["use_parallelism"] = - flattenBigqueryConnectionConnectionCloudSpannerUseParallelism(original["useParallelism"], d, config) - transformed["use_serverless_analytics"] = - flattenBigqueryConnectionConnectionCloudSpannerUseServerlessAnalytics(original["useServerlessAnalytics"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryConnectionConnectionCloudSpannerDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionCloudSpannerUseParallelism(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionCloudSpannerUseServerlessAnalytics(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryConnectionConnectionCloudResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service_account_id"] = - flattenBigqueryConnectionConnectionCloudResourceServiceAccountId(original["serviceAccountId"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryConnectionConnectionCloudResourceServiceAccountId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigqueryConnectionConnectionConnectionId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionFriendlyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSql(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInstanceId, err := expandBigqueryConnectionConnectionCloudSqlInstanceId(original["instance_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInstanceId); val.IsValid() && !isEmptyValue(val) { - transformed["instanceId"] = transformedInstanceId - } - - transformedDatabase, err := expandBigqueryConnectionConnectionCloudSqlDatabase(original["database"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { - transformed["database"] = transformedDatabase - } - - transformedCredential, err := expandBigqueryConnectionConnectionCloudSqlCredential(original["credential"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCredential); val.IsValid() && !isEmptyValue(val) { - transformed["credential"] = transformedCredential - } - - transformedType, err := expandBigqueryConnectionConnectionCloudSqlType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedServiceAccountId, err := expandBigqueryConnectionConnectionCloudSqlServiceAccountId(original["service_account_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountId); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountId"] = transformedServiceAccountId - } - - return transformed, nil -} - -func expandBigqueryConnectionConnectionCloudSqlInstanceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSqlDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSqlCredential(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUsername, err := expandBigqueryConnectionConnectionCloudSqlCredentialUsername(original["username"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { - transformed["username"] = transformedUsername - } - - transformedPassword, err := expandBigqueryConnectionConnectionCloudSqlCredentialPassword(original["password"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { - transformed["password"] = transformedPassword - } - - return transformed, nil -} - -func expandBigqueryConnectionConnectionCloudSqlCredentialUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSqlCredentialPassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSqlType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSqlServiceAccountId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAws(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAccessRole, err := expandBigqueryConnectionConnectionAwsAccessRole(original["access_role"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAccessRole); val.IsValid() && !isEmptyValue(val) { - transformed["accessRole"] = transformedAccessRole - } - - return transformed, nil -} - -func expandBigqueryConnectionConnectionAwsAccessRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIamRoleId, err := expandBigqueryConnectionConnectionAwsAccessRoleIamRoleId(original["iam_role_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIamRoleId); val.IsValid() && !isEmptyValue(val) { - transformed["iamRoleId"] = transformedIamRoleId - } - - transformedIdentity, err := expandBigqueryConnectionConnectionAwsAccessRoleIdentity(original["identity"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdentity); val.IsValid() && !isEmptyValue(val) { - transformed["identity"] = transformedIdentity - } - - return transformed, nil -} - -func expandBigqueryConnectionConnectionAwsAccessRoleIamRoleId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAwsAccessRoleIdentity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAzure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedApplication, err := expandBigqueryConnectionConnectionAzureApplication(original["application"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedApplication); val.IsValid() && !isEmptyValue(val) { - transformed["application"] = transformedApplication - } - - transformedClientId, err := expandBigqueryConnectionConnectionAzureClientId(original["client_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedClientId); val.IsValid() && !isEmptyValue(val) { - transformed["clientId"] = transformedClientId - } - - transformedObjectId, err := expandBigqueryConnectionConnectionAzureObjectId(original["object_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedObjectId); val.IsValid() && !isEmptyValue(val) { - transformed["objectId"] = transformedObjectId - } - - transformedCustomerTenantId, err := expandBigqueryConnectionConnectionAzureCustomerTenantId(original["customer_tenant_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCustomerTenantId); val.IsValid() && !isEmptyValue(val) { - transformed["customerTenantId"] = transformedCustomerTenantId - } - - transformedFederatedApplicationClientId, err := expandBigqueryConnectionConnectionAzureFederatedApplicationClientId(original["federated_application_client_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFederatedApplicationClientId); val.IsValid() && !isEmptyValue(val) { - transformed["federatedApplicationClientId"] = transformedFederatedApplicationClientId - } - - transformedRedirectUri, err := expandBigqueryConnectionConnectionAzureRedirectUri(original["redirect_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRedirectUri); val.IsValid() && !isEmptyValue(val) { - transformed["redirectUri"] = transformedRedirectUri - } - - transformedIdentity, err := expandBigqueryConnectionConnectionAzureIdentity(original["identity"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdentity); val.IsValid() && !isEmptyValue(val) { - transformed["identity"] = transformedIdentity - } - - return transformed, nil -} - -func expandBigqueryConnectionConnectionAzureApplication(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAzureClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAzureObjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAzureCustomerTenantId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAzureFederatedApplicationClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAzureRedirectUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionAzureIdentity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSpanner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatabase, err := expandBigqueryConnectionConnectionCloudSpannerDatabase(original["database"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { - transformed["database"] = transformedDatabase - } - - transformedUseParallelism, err := expandBigqueryConnectionConnectionCloudSpannerUseParallelism(original["use_parallelism"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUseParallelism); val.IsValid() && !isEmptyValue(val) { - transformed["useParallelism"] = transformedUseParallelism - } - - transformedUseServerlessAnalytics, err := expandBigqueryConnectionConnectionCloudSpannerUseServerlessAnalytics(original["use_serverless_analytics"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUseServerlessAnalytics); val.IsValid() && !isEmptyValue(val) { - transformed["useServerlessAnalytics"] = transformedUseServerlessAnalytics - } - - return transformed, nil -} - -func expandBigqueryConnectionConnectionCloudSpannerDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSpannerUseParallelism(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudSpannerUseServerlessAnalytics(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryConnectionConnectionCloudResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceAccountId, err := expandBigqueryConnectionConnectionCloudResourceServiceAccountId(original["service_account_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountId); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountId"] = transformedServiceAccountId - } - - return transformed, nil -} - -func expandBigqueryConnectionConnectionCloudResourceServiceAccountId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceBigqueryConnectionConnectionEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // connection_id is needed to qualify the URL but cannot be sent in the body - delete(obj, "connection_id") - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_data_transfer_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_data_transfer_config.go deleted file mode 100644 index aaddb262f9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_data_transfer_config.go +++ /dev/null @@ -1,919 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var sensitiveParams = []string{"secret_access_key"} - -func sensitiveParamCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - for _, sp := range sensitiveParams { - mapLabel := diff.Get("params." + sp).(string) - authLabel := diff.Get("sensitive_params.0." + sp).(string) - if mapLabel != "" && authLabel != "" { - return fmt.Errorf("Sensitive param [%s] cannot be set in both `params` and the `sensitive_params` block.", sp) - } - } - return nil -} - -// This customizeDiff is to use ForceNew for params fields data_path_template and -// destination_table_name_template only if the value of "data_source_id" is "google_cloud_storage". -func paramsCustomizeDiffFunc(diff TerraformResourceDiff) error { - old, new := diff.GetChange("params") - dsId := diff.Get("data_source_id").(string) - oldParams := old.(map[string]interface{}) - newParams := new.(map[string]interface{}) - var err error - - if dsId == "google_cloud_storage" { - if oldParams["data_path_template"] != nil && newParams["data_path_template"] != nil && oldParams["data_path_template"].(string) != newParams["data_path_template"].(string) { - err = diff.ForceNew("params") - if err != nil { - return fmt.Errorf("ForceNew failed for params, old - %v and new - %v", oldParams, newParams) - } - return nil - } - - if oldParams["destination_table_name_template"] != nil && newParams["destination_table_name_template"] != nil && oldParams["destination_table_name_template"].(string) != newParams["destination_table_name_template"].(string) { - err = diff.ForceNew("params") - if err != nil { - return fmt.Errorf("ForceNew failed for params, old - %v and new - %v", oldParams, newParams) - } - return nil - } - } - - return nil -} - -func paramsCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - return paramsCustomizeDiffFunc(diff) -} - -func ResourceBigqueryDataTransferConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceBigqueryDataTransferConfigCreate, - Read: resourceBigqueryDataTransferConfigRead, - Update: resourceBigqueryDataTransferConfigUpdate, - Delete: resourceBigqueryDataTransferConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigqueryDataTransferConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: customdiff.All(sensitiveParamCustomizeDiff, paramsCustomizeDiff), - - Schema: map[string]*schema.Schema{ - "data_source_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The data source id. Cannot be changed once the transfer config is created.`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The user specified display name for the transfer config.`, - }, - "params": { - Type: schema.TypeMap, - Required: true, - Description: `Parameters specific to each data source. For more information see the bq tab in the 'Setting up a data transfer' -section for each data source. For example the parameters for Cloud Storage transfers are listed here: -https://cloud.google.com/bigquery-transfer/docs/cloud-storage-transfer#bq - -**NOTE** : If you are attempting to update a parameter that cannot be updated (due to api limitations) [please force recreation of the resource](https://www.terraform.io/cli/state/taint#forcing-re-creation-of-resources).`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "data_refresh_window_days": { - Type: schema.TypeInt, - Optional: true, - Description: `The number of days to look back to automatically refresh the data. -For example, if dataRefreshWindowDays = 10, then every day BigQuery -reingests data for [today-10, today-1], rather than ingesting data for -just [today-1]. Only valid if the data source supports the feature. -Set the value to 0 to use the default value.`, - }, - "destination_dataset_id": { - Type: schema.TypeString, - Optional: true, - Description: `The BigQuery target dataset id.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `When set to true, no runs are scheduled for a given transfer.`, - }, - "email_preferences": { - Type: schema.TypeList, - Optional: true, - Description: `Email notifications will be sent according to these preferences to the -email address of the user who owns this transfer config.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_failure_email": { - Type: schema.TypeBool, - Required: true, - Description: `If true, email notifications will be sent on transfer run failures.`, - }, - }, - }, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The geographic location where the transfer config should reside. -Examples: US, EU, asia-northeast1. The default value is US.`, - Default: "US", - }, - "notification_pubsub_topic": { - Type: schema.TypeString, - Optional: true, - Description: `Pub/Sub topic where notifications will be sent after transfer runs -associated with this transfer config finish.`, - }, - "schedule": { - Type: schema.TypeString, - Optional: true, - Description: `Data transfer schedule. If the data source does not support a custom -schedule, this should be empty. If it is empty, the default value for -the data source will be used. The specified times are in UTC. Examples -of valid format: 1st,3rd monday of month 15:30, every wed,fri of jan, -jun 13:15, and first sunday of quarter 00:00. See more explanation -about the format here: -https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format -NOTE: the granularity should be at least 8 hours, or less frequent.`, - }, - "schedule_options": { - Type: schema.TypeList, - Optional: true, - Description: `Options customizing the data transfer schedule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disable_auto_scheduling": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, automatic scheduling of data transfer runs for this -configuration will be disabled. The runs can be started on ad-hoc -basis using transferConfigs.startManualRuns API. When automatic -scheduling is disabled, the TransferConfig.schedule field will -be ignored.`, - AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, - }, - "end_time": { - Type: schema.TypeString, - Optional: true, - Description: `Defines time to stop scheduling transfer runs. A transfer run cannot be -scheduled at or after the end time. The end time can be changed at any -moment. The time when a data transfer can be triggered manually is not -limited by this option.`, - AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, - }, - "start_time": { - Type: schema.TypeString, - Optional: true, - Description: `Specifies time to start scheduling transfer runs. The first run will be -scheduled at or after the start time according to a recurrence pattern -defined in the schedule string. The start time can be changed at any -moment. The time when a data transfer can be triggered manually is not -limited by this option.`, - AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, - }, - }, - }, - }, - "sensitive_params": { - Type: schema.TypeList, - Optional: true, - Description: `Different parameters are configured primarily using the the 'params' field on this -resource. This block contains the parameters which contain secrets or passwords so that they can be marked -sensitive and hidden from plan output. The name of the field, eg: secret_access_key, will be the key -in the 'params' map in the api request. - -Credentials may not be specified in both locations and will cause an error. Changing from one location -to a different credential configuration in the config will require an apply to update state.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_access_key": { - Type: schema.TypeString, - Required: true, - Description: `The Secret Access Key of the AWS account transferring data from.`, - Sensitive: true, - }, - }, - }, - }, - "service_account_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Service account email. If this field is set, transfer config will -be created with this service account credentials. It requires that -requesting user calling this API has permissions to act as this service account.`, - Default: "", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the transfer config. Transfer config names have the -form projects/{projectId}/locations/{location}/transferConfigs/{configId}. -Where configId is usually a uuid, but this is not required. -The name is ignored when creating a transfer config.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryDataTransferConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandBigqueryDataTransferConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - destinationDatasetIdProp, err := expandBigqueryDataTransferConfigDestinationDatasetId(d.Get("destination_dataset_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_dataset_id"); !isEmptyValue(reflect.ValueOf(destinationDatasetIdProp)) && (ok || !reflect.DeepEqual(v, destinationDatasetIdProp)) { - obj["destinationDatasetId"] = destinationDatasetIdProp - } - dataSourceIdProp, err := expandBigqueryDataTransferConfigDataSourceId(d.Get("data_source_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_source_id"); !isEmptyValue(reflect.ValueOf(dataSourceIdProp)) && (ok || !reflect.DeepEqual(v, dataSourceIdProp)) { - obj["dataSourceId"] = dataSourceIdProp - } - scheduleProp, err := expandBigqueryDataTransferConfigSchedule(d.Get("schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule"); !isEmptyValue(reflect.ValueOf(scheduleProp)) && (ok || !reflect.DeepEqual(v, scheduleProp)) { - obj["schedule"] = scheduleProp - } - scheduleOptionsProp, err := expandBigqueryDataTransferConfigScheduleOptions(d.Get("schedule_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule_options"); !isEmptyValue(reflect.ValueOf(scheduleOptionsProp)) && (ok || !reflect.DeepEqual(v, scheduleOptionsProp)) { - obj["scheduleOptions"] = scheduleOptionsProp - } - emailPreferencesProp, err := expandBigqueryDataTransferConfigEmailPreferences(d.Get("email_preferences"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("email_preferences"); !isEmptyValue(reflect.ValueOf(emailPreferencesProp)) && (ok || !reflect.DeepEqual(v, emailPreferencesProp)) { - obj["emailPreferences"] = emailPreferencesProp - } - notificationPubsubTopicProp, err := expandBigqueryDataTransferConfigNotificationPubsubTopic(d.Get("notification_pubsub_topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_pubsub_topic"); !isEmptyValue(reflect.ValueOf(notificationPubsubTopicProp)) && (ok || !reflect.DeepEqual(v, notificationPubsubTopicProp)) { - obj["notificationPubsubTopic"] = notificationPubsubTopicProp - } - dataRefreshWindowDaysProp, err := expandBigqueryDataTransferConfigDataRefreshWindowDays(d.Get("data_refresh_window_days"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_refresh_window_days"); !isEmptyValue(reflect.ValueOf(dataRefreshWindowDaysProp)) && (ok || !reflect.DeepEqual(v, dataRefreshWindowDaysProp)) { - obj["dataRefreshWindowDays"] = dataRefreshWindowDaysProp - } - disabledProp, err := expandBigqueryDataTransferConfigDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - paramsProp, err := expandBigqueryDataTransferConfigParams(d.Get("params"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("params"); !isEmptyValue(reflect.ValueOf(paramsProp)) && (ok || !reflect.DeepEqual(v, paramsProp)) { - obj["params"] = paramsProp - } - - obj, err = resourceBigqueryDataTransferConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryDataTransferBasePath}}projects/{{project}}/locations/{{location}}/transferConfigs?serviceAccountName={{service_account_name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Config: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), iamMemberMissing) - if err != nil { - return fmt.Errorf("Error creating Config: %s", err) - } - if err := d.Set("name", flattenBigqueryDataTransferConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating Config %q: %#v", d.Id(), res) - - return resourceBigqueryDataTransferConfigRead(d, meta) -} - -func resourceBigqueryDataTransferConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, iamMemberMissing) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigqueryDataTransferConfig %q", d.Id())) - } - - res, err = resourceBigqueryDataTransferConfigDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing BigqueryDataTransferConfig because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - - if err := d.Set("display_name", flattenBigqueryDataTransferConfigDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("name", flattenBigqueryDataTransferConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("destination_dataset_id", flattenBigqueryDataTransferConfigDestinationDatasetId(res["destinationDatasetId"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("data_source_id", flattenBigqueryDataTransferConfigDataSourceId(res["dataSourceId"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("schedule", flattenBigqueryDataTransferConfigSchedule(res["schedule"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("schedule_options", flattenBigqueryDataTransferConfigScheduleOptions(res["scheduleOptions"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("email_preferences", flattenBigqueryDataTransferConfigEmailPreferences(res["emailPreferences"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("notification_pubsub_topic", flattenBigqueryDataTransferConfigNotificationPubsubTopic(res["notificationPubsubTopic"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("data_refresh_window_days", flattenBigqueryDataTransferConfigDataRefreshWindowDays(res["dataRefreshWindowDays"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("disabled", flattenBigqueryDataTransferConfigDisabled(res["disabled"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("params", flattenBigqueryDataTransferConfigParams(res["params"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - - return nil -} - -func resourceBigqueryDataTransferConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandBigqueryDataTransferConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - destinationDatasetIdProp, err := expandBigqueryDataTransferConfigDestinationDatasetId(d.Get("destination_dataset_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_dataset_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationDatasetIdProp)) { - obj["destinationDatasetId"] = destinationDatasetIdProp - } - scheduleProp, err := expandBigqueryDataTransferConfigSchedule(d.Get("schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scheduleProp)) { - obj["schedule"] = scheduleProp - } - scheduleOptionsProp, err := expandBigqueryDataTransferConfigScheduleOptions(d.Get("schedule_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule_options"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scheduleOptionsProp)) { - obj["scheduleOptions"] = scheduleOptionsProp - } - emailPreferencesProp, err := expandBigqueryDataTransferConfigEmailPreferences(d.Get("email_preferences"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("email_preferences"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, emailPreferencesProp)) { - obj["emailPreferences"] = emailPreferencesProp - } - notificationPubsubTopicProp, err := expandBigqueryDataTransferConfigNotificationPubsubTopic(d.Get("notification_pubsub_topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_pubsub_topic"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationPubsubTopicProp)) { - obj["notificationPubsubTopic"] = notificationPubsubTopicProp - } - dataRefreshWindowDaysProp, err := expandBigqueryDataTransferConfigDataRefreshWindowDays(d.Get("data_refresh_window_days"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_refresh_window_days"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataRefreshWindowDaysProp)) { - obj["dataRefreshWindowDays"] = dataRefreshWindowDaysProp - } - disabledProp, err := expandBigqueryDataTransferConfigDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - paramsProp, err := expandBigqueryDataTransferConfigParams(d.Get("params"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("params"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, paramsProp)) { - obj["params"] = paramsProp - } - - obj, err = resourceBigqueryDataTransferConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Config %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("destination_dataset_id") { - updateMask = append(updateMask, "destinationDatasetId") - } - - if d.HasChange("schedule") { - updateMask = append(updateMask, "schedule") - } - - if d.HasChange("schedule_options") { - updateMask = append(updateMask, "scheduleOptions") - } - - if d.HasChange("email_preferences") { - updateMask = append(updateMask, "emailPreferences") - } - - if d.HasChange("notification_pubsub_topic") { - updateMask = append(updateMask, "notificationPubsubTopic") - } - - if d.HasChange("data_refresh_window_days") { - updateMask = append(updateMask, "dataRefreshWindowDays") - } - - if d.HasChange("disabled") { - updateMask = append(updateMask, "disabled") - } - - if d.HasChange("params") { - updateMask = append(updateMask, "params") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), iamMemberMissing) - - if err != nil { - return fmt.Errorf("Error updating Config %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Config %q: %#v", d.Id(), res) - } - - return resourceBigqueryDataTransferConfigRead(d, meta) -} - -func resourceBigqueryDataTransferConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Config %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), iamMemberMissing) - if err != nil { - return handleNotFoundError(err, d, "Config") - } - - log.Printf("[DEBUG] Finished deleting Config %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryDataTransferConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenBigqueryDataTransferConfigDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigDestinationDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigDataSourceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigScheduleOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["disable_auto_scheduling"] = - flattenBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(original["disableAutoScheduling"], d, config) - transformed["start_time"] = - flattenBigqueryDataTransferConfigScheduleOptionsStartTime(original["startTime"], d, config) - transformed["end_time"] = - flattenBigqueryDataTransferConfigScheduleOptionsEndTime(original["endTime"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigScheduleOptionsStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigScheduleOptionsEndTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigEmailPreferences(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_failure_email"] = - flattenBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(original["enableFailureEmail"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigNotificationPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigDataRefreshWindowDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigqueryDataTransferConfigDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigParams(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - - kv := v.(map[string]interface{}) - - res := make(map[string]string) - for key, value := range kv { - res[key] = fmt.Sprintf("%v", value) - } - return res -} - -func expandBigqueryDataTransferConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigDestinationDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigDataSourceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigScheduleOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDisableAutoScheduling, err := expandBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(original["disable_auto_scheduling"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDisableAutoScheduling); val.IsValid() && !isEmptyValue(val) { - transformed["disableAutoScheduling"] = transformedDisableAutoScheduling - } - - transformedStartTime, err := expandBigqueryDataTransferConfigScheduleOptionsStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandBigqueryDataTransferConfigScheduleOptionsEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - return transformed, nil -} - -func expandBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigScheduleOptionsStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigScheduleOptionsEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigEmailPreferences(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableFailureEmail, err := expandBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(original["enable_failure_email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnableFailureEmail); val.IsValid() && !isEmptyValue(val) { - transformed["enableFailureEmail"] = transformedEnableFailureEmail - } - - return transformed, nil -} - -func expandBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigNotificationPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigDataRefreshWindowDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigParams(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceBigqueryDataTransferConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - paramMap, ok := obj["params"] - if !ok { - paramMap = make(map[string]string) - } - - var params map[string]string - params = paramMap.(map[string]string) - - for _, sp := range sensitiveParams { - if auth, _ := d.GetOkExists("sensitive_params.0." + sp); auth != "" { - params[sp] = auth.(string) - } - } - - obj["params"] = params - - return obj, nil -} - -func resourceBigqueryDataTransferConfigDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if paramMap, ok := res["params"]; ok { - params := paramMap.(map[string]interface{}) - for _, sp := range sensitiveParams { - if _, apiOk := params[sp]; apiOk { - if _, exists := d.GetOkExists("sensitive_params.0." + sp); exists { - delete(params, sp) - } else { - params[sp] = d.Get("params." + sp) - } - } - } - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_datapolicy_data_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_datapolicy_data_policy.go deleted file mode 100644 index 799eb80465..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_datapolicy_data_policy.go +++ /dev/null @@ -1,426 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBigqueryDatapolicyDataPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceBigqueryDatapolicyDataPolicyCreate, - Read: resourceBigqueryDatapolicyDataPolicyRead, - Update: resourceBigqueryDatapolicyDataPolicyUpdate, - Delete: resourceBigqueryDatapolicyDataPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigqueryDatapolicyDataPolicyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "data_policy_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `User-assigned (human readable) ID of the data policy that needs to be unique within a project. Used as {dataPolicyId} in part of the resource name.`, - }, - "data_policy_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"COLUMN_LEVEL_SECURITY_POLICY", "DATA_MASKING_POLICY"}), - Description: `The enrollment level of the service. Possible values: ["COLUMN_LEVEL_SECURITY_POLICY", "DATA_MASKING_POLICY"]`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the location of the data policy.`, - }, - "policy_tag": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: projectNumberDiffSuppress, - Description: `Policy tag resource name, in the format of projects/{project_number}/locations/{locationId}/taxonomies/{taxonomyId}/policyTags/{policyTag_id}.`, - }, - "data_masking_policy": { - Type: schema.TypeList, - Optional: true, - Description: `The data masking policy that specifies the data masking rule to use.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "predefined_expression": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"SHA256", "ALWAYS_NULL", "DEFAULT_MASKING_VALUE", "LAST_FOUR_CHARACTERS", "FIRST_FOUR_CHARACTERS", "EMAIL_MASK", "DATE_YEAR_MASK"}), - Description: `The available masking rules. Learn more here: https://cloud.google.com/bigquery/docs/column-data-masking-intro#masking_options. Possible values: ["SHA256", "ALWAYS_NULL", "DEFAULT_MASKING_VALUE", "LAST_FOUR_CHARACTERS", "FIRST_FOUR_CHARACTERS", "EMAIL_MASK", "DATE_YEAR_MASK"]`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Resource name of this data policy, in the format of projects/{project_number}/locations/{locationId}/dataPolicies/{dataPolicyId}.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryDatapolicyDataPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - dataPolicyIdProp, err := expandBigqueryDatapolicyDataPolicyDataPolicyId(d.Get("data_policy_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_policy_id"); !isEmptyValue(reflect.ValueOf(dataPolicyIdProp)) && (ok || !reflect.DeepEqual(v, dataPolicyIdProp)) { - obj["dataPolicyId"] = dataPolicyIdProp - } - policyTagProp, err := expandBigqueryDatapolicyDataPolicyPolicyTag(d.Get("policy_tag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("policy_tag"); !isEmptyValue(reflect.ValueOf(policyTagProp)) && (ok || !reflect.DeepEqual(v, policyTagProp)) { - obj["policyTag"] = policyTagProp - } - dataPolicyTypeProp, err := expandBigqueryDatapolicyDataPolicyDataPolicyType(d.Get("data_policy_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_policy_type"); !isEmptyValue(reflect.ValueOf(dataPolicyTypeProp)) && (ok || !reflect.DeepEqual(v, dataPolicyTypeProp)) { - obj["dataPolicyType"] = dataPolicyTypeProp - } - dataMaskingPolicyProp, err := expandBigqueryDatapolicyDataPolicyDataMaskingPolicy(d.Get("data_masking_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_masking_policy"); !isEmptyValue(reflect.ValueOf(dataMaskingPolicyProp)) && (ok || !reflect.DeepEqual(v, dataMaskingPolicyProp)) { - obj["dataMaskingPolicy"] = dataMaskingPolicyProp - } - - url, err := replaceVars(d, config, "{{BigqueryDatapolicyBasePath}}projects/{{project}}/locations/{{location}}/dataPolicies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DataPolicy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DataPolicy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DataPolicy: %s", err) - } - if err := d.Set("name", flattenBigqueryDatapolicyDataPolicyName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DataPolicy %q: %#v", d.Id(), res) - - return resourceBigqueryDatapolicyDataPolicyRead(d, meta) -} - -func resourceBigqueryDatapolicyDataPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryDatapolicyBasePath}}projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DataPolicy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigqueryDatapolicyDataPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DataPolicy: %s", err) - } - - if err := d.Set("name", flattenBigqueryDatapolicyDataPolicyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading DataPolicy: %s", err) - } - if err := d.Set("data_policy_id", flattenBigqueryDatapolicyDataPolicyDataPolicyId(res["dataPolicyId"], d, config)); err != nil { - return fmt.Errorf("Error reading DataPolicy: %s", err) - } - if err := d.Set("policy_tag", flattenBigqueryDatapolicyDataPolicyPolicyTag(res["policyTag"], d, config)); err != nil { - return fmt.Errorf("Error reading DataPolicy: %s", err) - } - if err := d.Set("data_policy_type", flattenBigqueryDatapolicyDataPolicyDataPolicyType(res["dataPolicyType"], d, config)); err != nil { - return fmt.Errorf("Error reading DataPolicy: %s", err) - } - if err := d.Set("data_masking_policy", flattenBigqueryDatapolicyDataPolicyDataMaskingPolicy(res["dataMaskingPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading DataPolicy: %s", err) - } - - return nil -} - -func resourceBigqueryDatapolicyDataPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DataPolicy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - policyTagProp, err := expandBigqueryDatapolicyDataPolicyPolicyTag(d.Get("policy_tag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("policy_tag"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, policyTagProp)) { - obj["policyTag"] = policyTagProp - } - dataPolicyTypeProp, err := expandBigqueryDatapolicyDataPolicyDataPolicyType(d.Get("data_policy_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_policy_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataPolicyTypeProp)) { - obj["dataPolicyType"] = dataPolicyTypeProp - } - dataMaskingPolicyProp, err := expandBigqueryDatapolicyDataPolicyDataMaskingPolicy(d.Get("data_masking_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_masking_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataMaskingPolicyProp)) { - obj["dataMaskingPolicy"] = dataMaskingPolicyProp - } - - url, err := replaceVars(d, config, "{{BigqueryDatapolicyBasePath}}projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DataPolicy %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("policy_tag") { - updateMask = append(updateMask, "policyTag") - } - - if d.HasChange("data_policy_type") { - updateMask = append(updateMask, "dataPolicyType") - } - - if d.HasChange("data_masking_policy") { - updateMask = append(updateMask, "dataMaskingPolicy") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DataPolicy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DataPolicy %q: %#v", d.Id(), res) - } - - return resourceBigqueryDatapolicyDataPolicyRead(d, meta) -} - -func resourceBigqueryDatapolicyDataPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DataPolicy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryDatapolicyBasePath}}projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DataPolicy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DataPolicy") - } - - log.Printf("[DEBUG] Finished deleting DataPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryDatapolicyDataPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/dataPolicies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBigqueryDatapolicyDataPolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDatapolicyDataPolicyDataPolicyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDatapolicyDataPolicyPolicyTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDatapolicyDataPolicyDataPolicyType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDatapolicyDataPolicyDataMaskingPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["predefined_expression"] = - flattenBigqueryDatapolicyDataPolicyDataMaskingPolicyPredefinedExpression(original["predefinedExpression"], d, config) - return []interface{}{transformed} -} -func flattenBigqueryDatapolicyDataPolicyDataMaskingPolicyPredefinedExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigqueryDatapolicyDataPolicyDataPolicyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDatapolicyDataPolicyPolicyTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDatapolicyDataPolicyDataPolicyType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDatapolicyDataPolicyDataMaskingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPredefinedExpression, err := expandBigqueryDatapolicyDataPolicyDataMaskingPolicyPredefinedExpression(original["predefined_expression"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPredefinedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["predefinedExpression"] = transformedPredefinedExpression - } - - return transformed, nil -} - -func expandBigqueryDatapolicyDataPolicyDataMaskingPolicyPredefinedExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_dataset.go deleted file mode 100644 index be5a847e33..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_dataset.go +++ /dev/null @@ -1,1409 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/googleapi" -) - -const datasetIdRegexp = `[0-9A-Za-z_]+` - -func validateDatasetId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(datasetIdRegexp).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) - } - - if len(value) > 1024 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 1,024 characters", k)) - } - - return -} - -func validateDefaultTableExpirationMs(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 3600000 { - errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) - } - - return -} - -func ResourceBigQueryDataset() *schema.Resource { - return &schema.Resource{ - Create: resourceBigQueryDatasetCreate, - Read: resourceBigQueryDatasetRead, - Update: resourceBigQueryDatasetUpdate, - Delete: resourceBigQueryDatasetDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigQueryDatasetImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateDatasetId, - Description: `A unique ID for this dataset, without the project name. The ID -must contain only letters (a-z, A-Z), numbers (0-9), or -underscores (_). The maximum length is 1,024 characters.`, - }, - - "access": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Description: `An array of objects that define dataset access for one or more entities.`, - Elem: bigqueryDatasetAccessSchema(), - // Default schema.HashSchema is used. - }, - "default_collation": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Defines the default collation specification of future tables created -in the dataset. If a table is created in this dataset without table-level -default collation, then the table inherits the dataset default collation, -which is applied to the string fields that do not have explicit collation -specified. A change to this field affects only tables created afterwards, -and does not alter the existing tables. - -The following values are supported: -- 'und:ci': undetermined locale, case insensitive. -- '': empty string. Default to case-sensitive behavior.`, - }, - "default_encryption_configuration": { - Type: schema.TypeList, - Optional: true, - Description: `The default encryption key for all tables in the dataset. Once this property is set, -all newly-created partitioned tables in the dataset will have encryption key set to -this value, unless table creation request (or query) overrides the key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - Description: `Describes the Cloud KMS encryption key that will be used to protect destination -BigQuery table. The BigQuery Service Account associated with your project requires -access to this encryption key.`, - }, - }, - }, - }, - "default_partition_expiration_ms": { - Type: schema.TypeInt, - Optional: true, - Description: `The default partition expiration for all partitioned tables in -the dataset, in milliseconds. - - -Once this property is set, all newly-created partitioned tables in -the dataset will have an 'expirationMs' property in the 'timePartitioning' -settings set to this value, and changing the value will only -affect new tables, not existing ones. The storage in a partition will -have an expiration time of its partition time plus this value. -Setting this property overrides the use of 'defaultTableExpirationMs' -for partitioned tables: only one of 'defaultTableExpirationMs' and -'defaultPartitionExpirationMs' will be used for any new partitioned -table. If you provide an explicit 'timePartitioning.expirationMs' when -creating or updating a partitioned table, that value takes precedence -over the default partition expiration time indicated by this property.`, - }, - "default_table_expiration_ms": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validateDefaultTableExpirationMs, - Description: `The default lifetime of all tables in the dataset, in milliseconds. -The minimum value is 3600000 milliseconds (one hour). - - -Once this property is set, all newly-created tables in the dataset -will have an 'expirationTime' property set to the creation time plus -the value in this property, and changing the value will only affect -new tables, not existing ones. When the 'expirationTime' for a given -table is reached, that table will be deleted automatically. -If a table's 'expirationTime' is modified or removed before the -table expires, or if you provide an explicit 'expirationTime' when -creating a table, that value takes precedence over the default -expiration time indicated by this property.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A user-friendly description of the dataset`, - }, - "friendly_name": { - Type: schema.TypeString, - Optional: true, - Description: `A descriptive name for the dataset`, - }, - "is_case_insensitive": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `TRUE if the dataset and its table names are case-insensitive, otherwise FALSE. -By default, this is FALSE, which means the dataset and its table names are -case-sensitive. This field does not affect routine references.`, - }, - "labels": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - Description: `The labels associated with this dataset. You can use these to -organize and group your datasets`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: caseDiffSuppress, - Description: `The geographic location where the dataset should reside. -See [official docs](https://cloud.google.com/bigquery/docs/dataset-locations). - - -There are two types of locations, regional or multi-regional. A regional -location is a specific geographic place, such as Tokyo, and a multi-regional -location is a large geographic area, such as the United States, that -contains at least two geographic places. - - -The default value is multi-regional location 'US'. -Changing this forces a new resource to be created.`, - Default: "US", - }, - "max_time_travel_hours": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Defines the time travel window in hours. The value can be from 48 to 168 hours (2 to 7 days).`, - }, - "creation_time": { - Type: schema.TypeInt, - Computed: true, - Description: `The time when this dataset was created, in milliseconds since the -epoch.`, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `A hash of the resource.`, - }, - "last_modified_time": { - Type: schema.TypeInt, - Computed: true, - Description: `The date when this dataset or any of its tables was last modified, in -milliseconds since the epoch.`, - }, - "delete_contents_on_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `If set to 'true', delete all the tables in the -dataset when destroying the resource; otherwise, -destroying the resource will fail if tables are present.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func bigqueryDatasetAccessSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeList, - Optional: true, - Description: `Grants all resources of particular types in a particular dataset read access to the current dataset.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeList, - Required: true, - Description: `The dataset this entry applies to`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the project containing this table.`, - }, - }, - }, - }, - "target_types": { - Type: schema.TypeList, - Required: true, - Description: `Which resources in the dataset this entry applies to. Currently, only views are supported, -but additional target types may be added in the future. Possible values: VIEWS`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "domain": { - Type: schema.TypeString, - Optional: true, - Description: `A domain to grant access to. Any users signed in with the -domain specified will be granted the specified access`, - }, - "group_by_email": { - Type: schema.TypeString, - Optional: true, - Description: `An email address of a Google Group to grant access to.`, - }, - "role": { - Type: schema.TypeString, - Optional: true, - Description: `Describes the rights granted to the user specified by the other -member of the access object. Basic, predefined, and custom roles -are supported. Predefined roles that have equivalent basic roles -are swapped by the API to their basic counterparts. See -[official docs](https://cloud.google.com/bigquery/docs/access-control).`, - }, - "routine": { - Type: schema.TypeList, - Optional: true, - Description: `A routine from a different dataset to grant access to. Queries -executed against that routine will have read access to tables in -this dataset. The role field is not required when this field is -set. If that routine is updated by any user, access to the routine -needs to be granted again via an update operation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the project containing this table.`, - }, - "routine_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the routine. The ID must contain only letters (a-z, -A-Z), numbers (0-9), or underscores (_). The maximum length -is 256 characters.`, - }, - }, - }, - }, - "special_group": { - Type: schema.TypeString, - Optional: true, - Description: `A special group to grant access to. Possible values include: - - -* 'projectOwners': Owners of the enclosing project. - - -* 'projectReaders': Readers of the enclosing project. - - -* 'projectWriters': Writers of the enclosing project. - - -* 'allAuthenticatedUsers': All authenticated BigQuery users.`, - }, - "user_by_email": { - Type: schema.TypeString, - Optional: true, - Description: `An email address of a user to grant access to. For example: -fred@example.com`, - }, - "view": { - Type: schema.TypeList, - Optional: true, - Description: `A view from a different dataset to grant access to. Queries -executed against that view will have read access to tables in -this dataset. The role field is not required when this field is -set. If that view is updated by any user, access to the view -needs to be granted again via an update operation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the project containing this table.`, - }, - "table_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the table. The ID must contain only letters (a-z, -A-Z), numbers (0-9), or underscores (_). The maximum length -is 1,024 characters.`, - }, - }, - }, - }, - }, - } -} - -func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - maxTimeTravelHoursProp, err := expandBigQueryDatasetMaxTimeTravelHours(d.Get("max_time_travel_hours"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("max_time_travel_hours"); !isEmptyValue(reflect.ValueOf(maxTimeTravelHoursProp)) && (ok || !reflect.DeepEqual(v, maxTimeTravelHoursProp)) { - obj["maxTimeTravelHours"] = maxTimeTravelHoursProp - } - accessProp, err := expandBigQueryDatasetAccess(d.Get("access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access"); !isEmptyValue(reflect.ValueOf(accessProp)) && (ok || !reflect.DeepEqual(v, accessProp)) { - obj["access"] = accessProp - } - datasetReferenceProp, err := expandBigQueryDatasetDatasetReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(datasetReferenceProp)) { - obj["datasetReference"] = datasetReferenceProp - } - defaultTableExpirationMsProp, err := expandBigQueryDatasetDefaultTableExpirationMs(d.Get("default_table_expiration_ms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_table_expiration_ms"); !isEmptyValue(reflect.ValueOf(defaultTableExpirationMsProp)) && (ok || !reflect.DeepEqual(v, defaultTableExpirationMsProp)) { - obj["defaultTableExpirationMs"] = defaultTableExpirationMsProp - } - defaultPartitionExpirationMsProp, err := expandBigQueryDatasetDefaultPartitionExpirationMs(d.Get("default_partition_expiration_ms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_partition_expiration_ms"); !isEmptyValue(reflect.ValueOf(defaultPartitionExpirationMsProp)) && (ok || !reflect.DeepEqual(v, defaultPartitionExpirationMsProp)) { - obj["defaultPartitionExpirationMs"] = defaultPartitionExpirationMsProp - } - descriptionProp, err := expandBigQueryDatasetDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - friendlyNameProp, err := expandBigQueryDatasetFriendlyName(d.Get("friendly_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("friendly_name"); ok || !reflect.DeepEqual(v, friendlyNameProp) { - obj["friendlyName"] = friendlyNameProp - } - labelsProp, err := expandBigQueryDatasetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - locationProp, err := expandBigQueryDatasetLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - defaultEncryptionConfigurationProp, err := expandBigQueryDatasetDefaultEncryptionConfiguration(d.Get("default_encryption_configuration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_encryption_configuration"); !isEmptyValue(reflect.ValueOf(defaultEncryptionConfigurationProp)) && (ok || !reflect.DeepEqual(v, defaultEncryptionConfigurationProp)) { - obj["defaultEncryptionConfiguration"] = defaultEncryptionConfigurationProp - } - isCaseInsensitiveProp, err := expandBigQueryDatasetIsCaseInsensitive(d.Get("is_case_insensitive"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_case_insensitive"); !isEmptyValue(reflect.ValueOf(isCaseInsensitiveProp)) && (ok || !reflect.DeepEqual(v, isCaseInsensitiveProp)) { - obj["isCaseInsensitive"] = isCaseInsensitiveProp - } - defaultCollationProp, err := expandBigQueryDatasetDefaultCollation(d.Get("default_collation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_collation"); !isEmptyValue(reflect.ValueOf(defaultCollationProp)) && (ok || !reflect.DeepEqual(v, defaultCollationProp)) { - obj["defaultCollation"] = defaultCollationProp - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Dataset: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Dataset: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) - - return resourceBigQueryDatasetRead(d, meta) -} - -func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigQueryDataset %q", d.Id())) - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("delete_contents_on_destroy"); !ok { - if err := d.Set("delete_contents_on_destroy", false); err != nil { - return fmt.Errorf("Error setting delete_contents_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - - if err := d.Set("max_time_travel_hours", flattenBigQueryDatasetMaxTimeTravelHours(res["maxTimeTravelHours"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("access", flattenBigQueryDatasetAccess(res["access"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("creation_time", flattenBigQueryDatasetCreationTime(res["creationTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - // Terraform must set the top level schema field, but since this object contains collapsed properties - // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. - if flattenedProp := flattenBigQueryDatasetDatasetReference(res["datasetReference"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*googleapi.Error); ok { - return fmt.Errorf("Error reading Dataset: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("default_table_expiration_ms", flattenBigQueryDatasetDefaultTableExpirationMs(res["defaultTableExpirationMs"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("default_partition_expiration_ms", flattenBigQueryDatasetDefaultPartitionExpirationMs(res["defaultPartitionExpirationMs"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("description", flattenBigQueryDatasetDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("etag", flattenBigQueryDatasetEtag(res["etag"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("friendly_name", flattenBigQueryDatasetFriendlyName(res["friendlyName"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("labels", flattenBigQueryDatasetLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("last_modified_time", flattenBigQueryDatasetLastModifiedTime(res["lastModifiedTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("location", flattenBigQueryDatasetLocation(res["location"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("default_encryption_configuration", flattenBigQueryDatasetDefaultEncryptionConfiguration(res["defaultEncryptionConfiguration"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("is_case_insensitive", flattenBigQueryDatasetIsCaseInsensitive(res["isCaseInsensitive"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("default_collation", flattenBigQueryDatasetDefaultCollation(res["defaultCollation"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - - return nil -} - -func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - maxTimeTravelHoursProp, err := expandBigQueryDatasetMaxTimeTravelHours(d.Get("max_time_travel_hours"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("max_time_travel_hours"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxTimeTravelHoursProp)) { - obj["maxTimeTravelHours"] = maxTimeTravelHoursProp - } - accessProp, err := expandBigQueryDatasetAccess(d.Get("access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessProp)) { - obj["access"] = accessProp - } - datasetReferenceProp, err := expandBigQueryDatasetDatasetReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(datasetReferenceProp)) { - obj["datasetReference"] = datasetReferenceProp - } - defaultTableExpirationMsProp, err := expandBigQueryDatasetDefaultTableExpirationMs(d.Get("default_table_expiration_ms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_table_expiration_ms"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultTableExpirationMsProp)) { - obj["defaultTableExpirationMs"] = defaultTableExpirationMsProp - } - defaultPartitionExpirationMsProp, err := expandBigQueryDatasetDefaultPartitionExpirationMs(d.Get("default_partition_expiration_ms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_partition_expiration_ms"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultPartitionExpirationMsProp)) { - obj["defaultPartitionExpirationMs"] = defaultPartitionExpirationMsProp - } - descriptionProp, err := expandBigQueryDatasetDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - friendlyNameProp, err := expandBigQueryDatasetFriendlyName(d.Get("friendly_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("friendly_name"); ok || !reflect.DeepEqual(v, friendlyNameProp) { - obj["friendlyName"] = friendlyNameProp - } - labelsProp, err := expandBigQueryDatasetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - locationProp, err := expandBigQueryDatasetLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - defaultEncryptionConfigurationProp, err := expandBigQueryDatasetDefaultEncryptionConfiguration(d.Get("default_encryption_configuration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_encryption_configuration"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultEncryptionConfigurationProp)) { - obj["defaultEncryptionConfiguration"] = defaultEncryptionConfigurationProp - } - isCaseInsensitiveProp, err := expandBigQueryDatasetIsCaseInsensitive(d.Get("is_case_insensitive"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_case_insensitive"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isCaseInsensitiveProp)) { - obj["isCaseInsensitive"] = isCaseInsensitiveProp - } - defaultCollationProp, err := expandBigQueryDatasetDefaultCollation(d.Get("default_collation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_collation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultCollationProp)) { - obj["defaultCollation"] = defaultCollationProp - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) - } - - return resourceBigQueryDatasetRead(d, meta) -} - -func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}?deleteContents={{delete_contents_on_destroy}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Dataset") - } - - log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) - return nil -} - -func resourceBigQueryDatasetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/datasets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("delete_contents_on_destroy", false); err != nil { - return nil, fmt.Errorf("Error setting delete_contents_on_destroy: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenBigQueryDatasetMaxTimeTravelHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(bigqueryDatasetAccessSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "domain": flattenBigQueryDatasetAccessDomain(original["domain"], d, config), - "group_by_email": flattenBigQueryDatasetAccessGroupByEmail(original["groupByEmail"], d, config), - "role": flattenBigQueryDatasetAccessRole(original["role"], d, config), - "special_group": flattenBigQueryDatasetAccessSpecialGroup(original["specialGroup"], d, config), - "user_by_email": flattenBigQueryDatasetAccessUserByEmail(original["userByEmail"], d, config), - "view": flattenBigQueryDatasetAccessView(original["view"], d, config), - "dataset": flattenBigQueryDatasetAccessDataset(original["dataset"], d, config), - "routine": flattenBigQueryDatasetAccessRoutine(original["routine"], d, config), - }) - } - return transformed -} -func flattenBigQueryDatasetAccessDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessGroupByEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessRole(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessSpecialGroup(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessUserByEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessView(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenBigQueryDatasetAccessViewDatasetId(original["datasetId"], d, config) - transformed["project_id"] = - flattenBigQueryDatasetAccessViewProjectId(original["projectId"], d, config) - transformed["table_id"] = - flattenBigQueryDatasetAccessViewTableId(original["tableId"], d, config) - return []interface{}{transformed} -} -func flattenBigQueryDatasetAccessViewDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessViewProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessViewTableId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset"] = - flattenBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) - transformed["target_types"] = - flattenBigQueryDatasetAccessDatasetTargetTypes(original["targetTypes"], d, config) - return []interface{}{transformed} -} -func flattenBigQueryDatasetAccessDatasetDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenBigQueryDatasetAccessDatasetDatasetDatasetId(original["datasetId"], d, config) - transformed["project_id"] = - flattenBigQueryDatasetAccessDatasetDatasetProjectId(original["projectId"], d, config) - return []interface{}{transformed} -} -func flattenBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessRoutine(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenBigQueryDatasetAccessRoutineDatasetId(original["datasetId"], d, config) - transformed["project_id"] = - flattenBigQueryDatasetAccessRoutineProjectId(original["projectId"], d, config) - transformed["routine_id"] = - flattenBigQueryDatasetAccessRoutineRoutineId(original["routineId"], d, config) - return []interface{}{transformed} -} -func flattenBigQueryDatasetAccessRoutineDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessRoutineProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessRoutineRoutineId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetCreationTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigQueryDatasetDatasetReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenBigQueryDatasetDatasetReferenceDatasetId(original["datasetId"], d, config) - return []interface{}{transformed} -} -func flattenBigQueryDatasetDatasetReferenceDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetDefaultTableExpirationMs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigQueryDatasetDefaultPartitionExpirationMs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigQueryDatasetDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetFriendlyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetLastModifiedTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -// Older Datasets in BigQuery have no Location set in the API response. This may be an issue when importing -// datasets created before BigQuery was available in multiple zones. We can safely assume that these datasets -// are in the US, as this was the default at the time. -func flattenBigQueryDatasetLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return "US" - } - return v -} - -func flattenBigQueryDatasetDefaultEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} -func flattenBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetIsCaseInsensitive(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetDefaultCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigQueryDatasetMaxTimeTravelHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDomain, err := expandBigQueryDatasetAccessDomain(original["domain"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !isEmptyValue(val) { - transformed["domain"] = transformedDomain - } - - transformedGroupByEmail, err := expandBigQueryDatasetAccessGroupByEmail(original["group_by_email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupByEmail); val.IsValid() && !isEmptyValue(val) { - transformed["groupByEmail"] = transformedGroupByEmail - } - - transformedRole, err := expandBigQueryDatasetAccessRole(original["role"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRole); val.IsValid() && !isEmptyValue(val) { - transformed["role"] = transformedRole - } - - transformedSpecialGroup, err := expandBigQueryDatasetAccessSpecialGroup(original["special_group"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSpecialGroup); val.IsValid() && !isEmptyValue(val) { - transformed["specialGroup"] = transformedSpecialGroup - } - - transformedUserByEmail, err := expandBigQueryDatasetAccessUserByEmail(original["user_by_email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUserByEmail); val.IsValid() && !isEmptyValue(val) { - transformed["userByEmail"] = transformedUserByEmail - } - - transformedView, err := expandBigQueryDatasetAccessView(original["view"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedView); val.IsValid() && !isEmptyValue(val) { - transformed["view"] = transformedView - } - - transformedDataset, err := expandBigQueryDatasetAccessDataset(original["dataset"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !isEmptyValue(val) { - transformed["dataset"] = transformedDataset - } - - transformedRoutine, err := expandBigQueryDatasetAccessRoutine(original["routine"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRoutine); val.IsValid() && !isEmptyValue(val) { - transformed["routine"] = transformedRoutine - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBigQueryDatasetAccessDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessGroupByEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessSpecialGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessUserByEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessView(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetId, err := expandBigQueryDatasetAccessViewDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedProjectId, err := expandBigQueryDatasetAccessViewProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedTableId, err := expandBigQueryDatasetAccessViewTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandBigQueryDatasetAccessViewDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessViewProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessViewTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDataset, err := expandBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !isEmptyValue(val) { - transformed["dataset"] = transformedDataset - } - - transformedTargetTypes, err := expandBigQueryDatasetAccessDatasetTargetTypes(original["target_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTargetTypes); val.IsValid() && !isEmptyValue(val) { - transformed["targetTypes"] = transformedTargetTypes - } - - return transformed, nil -} - -func expandBigQueryDatasetAccessDatasetDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetId, err := expandBigQueryDatasetAccessDatasetDatasetDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedProjectId, err := expandBigQueryDatasetAccessDatasetDatasetProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - return transformed, nil -} - -func expandBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessRoutine(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetId, err := expandBigQueryDatasetAccessRoutineDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedProjectId, err := expandBigQueryDatasetAccessRoutineProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedRoutineId, err := expandBigQueryDatasetAccessRoutineRoutineId(original["routine_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRoutineId); val.IsValid() && !isEmptyValue(val) { - transformed["routineId"] = transformedRoutineId - } - - return transformed, nil -} - -func expandBigQueryDatasetAccessRoutineDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessRoutineProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessRoutineRoutineId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDatasetReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedDatasetId, err := expandBigQueryDatasetDatasetReferenceDatasetId(d.Get("dataset_id"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - return transformed, nil -} - -func expandBigQueryDatasetDatasetReferenceDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDefaultTableExpirationMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDefaultPartitionExpirationMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetFriendlyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandBigQueryDatasetLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDefaultEncryptionConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetIsCaseInsensitive(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDefaultCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_dataset_access.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_dataset_access.go deleted file mode 100644 index 4cccd90844..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_dataset_access.go +++ /dev/null @@ -1,1172 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var bigqueryAccessRoleToPrimitiveMap = map[string]string{ - "roles/bigquery.dataOwner": "OWNER", - "roles/bigquery.dataEditor": "WRITER", - "roles/bigquery.dataViewer": "READER", -} - -func resourceBigQueryDatasetAccessRoleDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[new]; ok { - return primitiveRole == old - } - return false -} - -// we want to diff suppress any iam_members that are configured as `iam_member`, but stored in state as a different member type -func resourceBigQueryDatasetAccessIamMemberDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[new]; ok { - return primitiveRole == old - } - - if d.Get("api_updated_member") == true { - expectedIamMember := d.Get("iam_member").(string) - parts := strings.SplitAfter(expectedIamMember, ":") - - strippedIamMember := parts[0] - if len(parts) > 1 { - strippedIamMember = parts[1] - } - - if memberInState := d.Get("user_by_email").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) - } - - if memberInState := d.Get("group_by_email").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) - } - - if memberInState := d.Get("domain").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) - } - - if memberInState := d.Get("special_group").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) - } - } - - return false -} - -// this function will go through a response's access list and see if the iam_member has been reassigned to a different member_type -// if it has, it will return the member type, and the member -func resourceBigQueryDatasetAccessReassignIamMemberInNestedObjectList(d *schema.ResourceData, meta interface{}, items []interface{}) (member_type string, member interface{}, err error) { - expectedRole, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, meta.(*Config)) - if err != nil { - return "", nil, err - } - expectedFlattenedRole := flattenNestedBigQueryDatasetAccessRole(expectedRole, d, meta.(*Config)) - - expectedIamMember, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, meta.(*Config)) - if err != nil { - return "", nil, err - } - expectedFlattenedIamMember := flattenNestedBigQueryDatasetAccessIamMember(expectedIamMember, d, meta.(*Config)) - - parts := strings.SplitAfter(expectedFlattenedIamMember.(string), ":") - - expectedStrippedIamMember := parts[0] - if len(parts) > 1 { - expectedStrippedIamMember = parts[1] - } - - // Search list for this resource. - for _, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemRole := flattenNestedBigQueryDatasetAccessRole(item["role"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemRole)) && isEmptyValue(reflect.ValueOf(expectedFlattenedRole))) && !reflect.DeepEqual(itemRole, expectedFlattenedRole) { - log.Printf("[DEBUG] Skipping item with role= %#v, looking for %#v)", itemRole, expectedFlattenedRole) - continue - } - - itemUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(item["userByEmail"], d, meta.(*Config)) - if reflect.DeepEqual(itemUserByEmail, expectedStrippedIamMember) { - log.Printf("[DEBUG] Iam Member changed to userByEmail= %#v)", itemUserByEmail) - return "user_by_email", itemUserByEmail, nil - } - itemGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(item["groupByEmail"], d, meta.(*Config)) - if reflect.DeepEqual(itemGroupByEmail, expectedStrippedIamMember) { - log.Printf("[DEBUG] Iam Member changed to groupByEmail= %#v)", itemGroupByEmail) - return "group_by_email", itemGroupByEmail, nil - } - itemDomain := flattenNestedBigQueryDatasetAccessDomain(item["domain"], d, meta.(*Config)) - if reflect.DeepEqual(itemDomain, expectedStrippedIamMember) { - log.Printf("[DEBUG] Iam Member changed to domain= %#v)", itemDomain) - return "domain", itemDomain, nil - } - itemSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(item["specialGroup"], d, meta.(*Config)) - if reflect.DeepEqual(itemSpecialGroup, expectedStrippedIamMember) { - log.Printf("[DEBUG] Iam Member changed to specialGroup= %#v)", itemSpecialGroup) - return "special_group", itemSpecialGroup, nil - } - itemIamMember := flattenNestedBigQueryDatasetAccessIamMember(item["iamMember"], d, meta.(*Config)) - if reflect.DeepEqual(itemIamMember, expectedFlattenedIamMember) { - log.Printf("[DEBUG] Iam Member stayed as iamMember= %#v)", itemIamMember) - return "", nil, nil - } - continue - } - log.Printf("[DEBUG] Did not find item for resource %q)", d.Id()) - return "", nil, nil -} - -func ResourceBigQueryDatasetAccess() *schema.Resource { - return &schema.Resource{ - Create: resourceBigQueryDatasetAccessCreate, - Read: resourceBigQueryDatasetAccessRead, - Delete: resourceBigQueryDatasetAccessDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique ID for this dataset, without the project name. The ID -must contain only letters (a-z, A-Z), numbers (0-9), or -underscores (_). The maximum length is 1,024 characters.`, - }, - "dataset": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Grants all resources of particular types in a particular dataset read access to the current dataset.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `The dataset this entry applies to`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - }, - }, - }, - "target_types": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `Which resources in the dataset this entry applies to. Currently, only views are supported, -but additional target types may be added in the future. Possible values: VIEWS`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, - }, - "domain": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `A domain to grant access to. Any users signed in with the -domain specified will be granted the specified access`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, - }, - "group_by_email": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `An email address of a Google Group to grant access to.`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, - }, - "iam_member": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `Some other type of member that appears in the IAM Policy but isn't a user, -group, domain, or special group. For example: 'allUsers'`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, - }, - "role": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessRoleDiffSuppress, - Description: `Describes the rights granted to the user specified by the other -member of the access object. Basic, predefined, and custom roles are -supported. Predefined roles that have equivalent basic roles are -swapped by the API to their basic counterparts, and will show a diff -post-create. See -[official docs](https://cloud.google.com/bigquery/docs/access-control).`, - }, - "routine": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A routine from a different dataset to grant access to. Queries -executed against that routine will have read access to tables in -this dataset. The role field is not required when this field is -set. If that routine is updated by any user, access to the routine -needs to be granted again via an update operation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - "routine_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the routine. The ID must contain only letters (a-z, -A-Z), numbers (0-9), or underscores (_). The maximum length -is 256 characters.`, - }, - }, - }, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, - }, - "special_group": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `A special group to grant access to. Possible values include: - - -* 'projectOwners': Owners of the enclosing project. - - -* 'projectReaders': Readers of the enclosing project. - - -* 'projectWriters': Writers of the enclosing project. - - -* 'allAuthenticatedUsers': All authenticated BigQuery users.`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, - }, - "user_by_email": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `An email address of a user to grant access to. For example: -fred@example.com`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, - }, - "view": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A view from a different dataset to grant access to. Queries -executed against that view will have read access to tables in -this dataset. The role field is not required when this field is -set. If that view is updated by any user, access to the view -needs to be granted again via an update operation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - "table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the table. The ID must contain only letters (a-z, -A-Z), numbers (0-9), or underscores (_). The maximum length -is 1,024 characters.`, - }, - }, - }, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, - }, - "api_updated_member": { - Type: schema.TypeBool, - Computed: true, - Description: "If true, represents that that the iam_member in the config was translated to a different member type by the API, and is stored in state as a different member type", - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigQueryDatasetAccessCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - datasetIdProp, err := expandNestedBigQueryDatasetAccessDatasetId(d.Get("dataset_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dataset_id"); !isEmptyValue(reflect.ValueOf(datasetIdProp)) && (ok || !reflect.DeepEqual(v, datasetIdProp)) { - obj["datasetId"] = datasetIdProp - } - roleProp, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - userByEmailProp, err := expandNestedBigQueryDatasetAccessUserByEmail(d.Get("user_by_email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_by_email"); !isEmptyValue(reflect.ValueOf(userByEmailProp)) && (ok || !reflect.DeepEqual(v, userByEmailProp)) { - obj["userByEmail"] = userByEmailProp - } - groupByEmailProp, err := expandNestedBigQueryDatasetAccessGroupByEmail(d.Get("group_by_email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("group_by_email"); !isEmptyValue(reflect.ValueOf(groupByEmailProp)) && (ok || !reflect.DeepEqual(v, groupByEmailProp)) { - obj["groupByEmail"] = groupByEmailProp - } - domainProp, err := expandNestedBigQueryDatasetAccessDomain(d.Get("domain"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("domain"); !isEmptyValue(reflect.ValueOf(domainProp)) && (ok || !reflect.DeepEqual(v, domainProp)) { - obj["domain"] = domainProp - } - specialGroupProp, err := expandNestedBigQueryDatasetAccessSpecialGroup(d.Get("special_group"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("special_group"); !isEmptyValue(reflect.ValueOf(specialGroupProp)) && (ok || !reflect.DeepEqual(v, specialGroupProp)) { - obj["specialGroup"] = specialGroupProp - } - iamMemberProp, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("iam_member"); !isEmptyValue(reflect.ValueOf(iamMemberProp)) && (ok || !reflect.DeepEqual(v, iamMemberProp)) { - obj["iamMember"] = iamMemberProp - } - viewProp, err := expandNestedBigQueryDatasetAccessView(d.Get("view"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("view"); !isEmptyValue(reflect.ValueOf(viewProp)) && (ok || !reflect.DeepEqual(v, viewProp)) { - obj["view"] = viewProp - } - datasetProp, err := expandNestedBigQueryDatasetAccessDataset(d.Get("dataset"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dataset"); !isEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { - obj["dataset"] = datasetProp - } - routineProp, err := expandNestedBigQueryDatasetAccessRoutine(d.Get("routine"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("routine"); !isEmptyValue(reflect.ValueOf(routineProp)) && (ok || !reflect.DeepEqual(v, routineProp)) { - obj["routine"] = routineProp - } - - lockName, err := replaceVars(d, config, "{{dataset_id}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DatasetAccess: %#v", obj) - - obj, err = resourceBigQueryDatasetAccessPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DatasetAccess: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isBigqueryIAMQuotaError) - if err != nil { - return fmt.Errorf("Error creating DatasetAccess: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // by default, we are not updating the member - if err := d.Set("api_updated_member", false); err != nil { - return fmt.Errorf("Error setting api_updated_member: %s", err) - } - - // iam_member is a generalized attribute, if the API can map it to a different member type on the backend, it will return - // the correct member_type in the response. If it cannot be mapped to a different member type, it will stay in iam_member. - if iamMemberProp != "" { - member_type, member, err := resourceBigQueryDatasetAccessReassignIamMemberInNestedObjectList(d, meta, res["access"].([]interface{})) - if err != nil { - fmt.Println(err) - } - - // if the member type changed, we set that member_type in state (it's already in the response) and we clear iam_member - // and we set "api_updated_member" to true to acknowledge that we are making this change - if member_type != "" { - if err := d.Set(member_type, member.(string)); err != nil { - return fmt.Errorf("Error setting member_type: %s", err) - } - if err := d.Set("iam_member", ""); err != nil { - return fmt.Errorf("Error setting iam_member: %s", err) - } - if err := d.Set("api_updated_member", true); err != nil { - return fmt.Errorf("Error setting api_updated_member: %s", err) - } - } - } - - log.Printf("[DEBUG] Finished creating DatasetAccess %q: %#v", d.Id(), res) - - return resourceBigQueryDatasetAccessRead(d, meta) -} - -func resourceBigQueryDatasetAccessRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DatasetAccess: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isBigqueryIAMQuotaError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigQueryDatasetAccess %q", d.Id())) - } - - res, err = flattenNestedBigQueryDatasetAccess(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing BigQueryDatasetAccess because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - - if err := d.Set("role", flattenNestedBigQueryDatasetAccessRole(res["role"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("user_by_email", flattenNestedBigQueryDatasetAccessUserByEmail(res["userByEmail"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("group_by_email", flattenNestedBigQueryDatasetAccessGroupByEmail(res["groupByEmail"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("domain", flattenNestedBigQueryDatasetAccessDomain(res["domain"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("special_group", flattenNestedBigQueryDatasetAccessSpecialGroup(res["specialGroup"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("iam_member", flattenNestedBigQueryDatasetAccessIamMember(res["iamMember"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("view", flattenNestedBigQueryDatasetAccessView(res["view"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("dataset", flattenNestedBigQueryDatasetAccessDataset(res["dataset"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("routine", flattenNestedBigQueryDatasetAccessRoutine(res["routine"], d, config)); err != nil { - return fmt.Errorf("Error reading DatasetAccess: %s", err) - } - - return nil -} - -func resourceBigQueryDatasetAccessDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DatasetAccess: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "{{dataset_id}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceBigQueryDatasetAccessPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "DatasetAccess") - } - log.Printf("[DEBUG] Deleting DatasetAccess %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isBigqueryIAMQuotaError) - if err != nil { - return handleNotFoundError(err, d, "DatasetAccess") - } - - log.Printf("[DEBUG] Finished deleting DatasetAccess %q: %#v", d.Id(), res) - return nil -} - -func flattenNestedBigQueryDatasetAccessRole(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessUserByEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessGroupByEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessSpecialGroup(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessIamMember(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessView(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenNestedBigQueryDatasetAccessViewDatasetId(original["datasetId"], d, config) - transformed["project_id"] = - flattenNestedBigQueryDatasetAccessViewProjectId(original["projectId"], d, config) - transformed["table_id"] = - flattenNestedBigQueryDatasetAccessViewTableId(original["tableId"], d, config) - return []interface{}{transformed} -} -func flattenNestedBigQueryDatasetAccessViewDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessViewProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessViewTableId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset"] = - flattenNestedBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) - transformed["target_types"] = - flattenNestedBigQueryDatasetAccessDatasetTargetTypes(original["targetTypes"], d, config) - return []interface{}{transformed} -} -func flattenNestedBigQueryDatasetAccessDatasetDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenNestedBigQueryDatasetAccessDatasetDatasetDatasetId(original["datasetId"], d, config) - transformed["project_id"] = - flattenNestedBigQueryDatasetAccessDatasetDatasetProjectId(original["projectId"], d, config) - return []interface{}{transformed} -} -func flattenNestedBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessRoutine(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenNestedBigQueryDatasetAccessRoutineDatasetId(original["datasetId"], d, config) - transformed["project_id"] = - flattenNestedBigQueryDatasetAccessRoutineProjectId(original["projectId"], d, config) - transformed["routine_id"] = - flattenNestedBigQueryDatasetAccessRoutineRoutineId(original["routineId"], d, config) - return []interface{}{transformed} -} -func flattenNestedBigQueryDatasetAccessRoutineDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessRoutineProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessRoutineRoutineId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedBigQueryDatasetAccessDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[v.(string)]; ok { - return primitiveRole, nil - } - return v, nil -} - -func expandNestedBigQueryDatasetAccessUserByEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessGroupByEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessSpecialGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessIamMember(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessView(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetId, err := expandNestedBigQueryDatasetAccessViewDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedProjectId, err := expandNestedBigQueryDatasetAccessViewProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedTableId, err := expandNestedBigQueryDatasetAccessViewTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandNestedBigQueryDatasetAccessViewDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessViewProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessViewTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDataset, err := expandNestedBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !isEmptyValue(val) { - transformed["dataset"] = transformedDataset - } - - transformedTargetTypes, err := expandNestedBigQueryDatasetAccessDatasetTargetTypes(original["target_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTargetTypes); val.IsValid() && !isEmptyValue(val) { - transformed["targetTypes"] = transformedTargetTypes - } - - return transformed, nil -} - -func expandNestedBigQueryDatasetAccessDatasetDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetId, err := expandNestedBigQueryDatasetAccessDatasetDatasetDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedProjectId, err := expandNestedBigQueryDatasetAccessDatasetDatasetProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - return transformed, nil -} - -func expandNestedBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessRoutine(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetId, err := expandNestedBigQueryDatasetAccessRoutineDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedProjectId, err := expandNestedBigQueryDatasetAccessRoutineProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedRoutineId, err := expandNestedBigQueryDatasetAccessRoutineRoutineId(original["routine_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRoutineId); val.IsValid() && !isEmptyValue(val) { - transformed["routineId"] = transformedRoutineId - } - - return transformed, nil -} - -func expandNestedBigQueryDatasetAccessRoutineDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessRoutineProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessRoutineRoutineId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedBigQueryDatasetAccess(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["access"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value access. Actual value: %v", v) - } - - _, item, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceBigQueryDatasetAccessFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedRole, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedRole := flattenNestedBigQueryDatasetAccessRole(expectedRole, d, meta.(*Config)) - expectedUserByEmail, err := expandNestedBigQueryDatasetAccessUserByEmail(d.Get("user_by_email"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(expectedUserByEmail, d, meta.(*Config)) - expectedGroupByEmail, err := expandNestedBigQueryDatasetAccessGroupByEmail(d.Get("group_by_email"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(expectedGroupByEmail, d, meta.(*Config)) - expectedDomain, err := expandNestedBigQueryDatasetAccessDomain(d.Get("domain"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedDomain := flattenNestedBigQueryDatasetAccessDomain(expectedDomain, d, meta.(*Config)) - expectedSpecialGroup, err := expandNestedBigQueryDatasetAccessSpecialGroup(d.Get("special_group"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(expectedSpecialGroup, d, meta.(*Config)) - expectedIamMember, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedIamMember := flattenNestedBigQueryDatasetAccessIamMember(expectedIamMember, d, meta.(*Config)) - expectedView, err := expandNestedBigQueryDatasetAccessView(d.Get("view"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedView := flattenNestedBigQueryDatasetAccessView(expectedView, d, meta.(*Config)) - expectedDataset, err := expandNestedBigQueryDatasetAccessDataset(d.Get("dataset"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedDataset := flattenNestedBigQueryDatasetAccessDataset(expectedDataset, d, meta.(*Config)) - expectedRoutine, err := expandNestedBigQueryDatasetAccessRoutine(d.Get("routine"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedRoutine := flattenNestedBigQueryDatasetAccessRoutine(expectedRoutine, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemRole := flattenNestedBigQueryDatasetAccessRole(item["role"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemRole)) && isEmptyValue(reflect.ValueOf(expectedFlattenedRole))) && !reflect.DeepEqual(itemRole, expectedFlattenedRole) { - log.Printf("[DEBUG] Skipping item with role= %#v, looking for %#v)", itemRole, expectedFlattenedRole) - continue - } - itemUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(item["userByEmail"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemUserByEmail)) && isEmptyValue(reflect.ValueOf(expectedFlattenedUserByEmail))) && !reflect.DeepEqual(itemUserByEmail, expectedFlattenedUserByEmail) { - log.Printf("[DEBUG] Skipping item with userByEmail= %#v, looking for %#v)", itemUserByEmail, expectedFlattenedUserByEmail) - continue - } - itemGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(item["groupByEmail"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemGroupByEmail)) && isEmptyValue(reflect.ValueOf(expectedFlattenedGroupByEmail))) && !reflect.DeepEqual(itemGroupByEmail, expectedFlattenedGroupByEmail) { - log.Printf("[DEBUG] Skipping item with groupByEmail= %#v, looking for %#v)", itemGroupByEmail, expectedFlattenedGroupByEmail) - continue - } - itemDomain := flattenNestedBigQueryDatasetAccessDomain(item["domain"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemDomain)) && isEmptyValue(reflect.ValueOf(expectedFlattenedDomain))) && !reflect.DeepEqual(itemDomain, expectedFlattenedDomain) { - log.Printf("[DEBUG] Skipping item with domain= %#v, looking for %#v)", itemDomain, expectedFlattenedDomain) - continue - } - itemSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(item["specialGroup"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemSpecialGroup)) && isEmptyValue(reflect.ValueOf(expectedFlattenedSpecialGroup))) && !reflect.DeepEqual(itemSpecialGroup, expectedFlattenedSpecialGroup) { - log.Printf("[DEBUG] Skipping item with specialGroup= %#v, looking for %#v)", itemSpecialGroup, expectedFlattenedSpecialGroup) - continue - } - itemIamMember := flattenNestedBigQueryDatasetAccessIamMember(item["iamMember"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemIamMember)) && isEmptyValue(reflect.ValueOf(expectedFlattenedIamMember))) && !reflect.DeepEqual(itemIamMember, expectedFlattenedIamMember) { - log.Printf("[DEBUG] Skipping item with iamMember= %#v, looking for %#v)", itemIamMember, expectedFlattenedIamMember) - continue - } - itemView := flattenNestedBigQueryDatasetAccessView(item["view"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemView)) && isEmptyValue(reflect.ValueOf(expectedFlattenedView))) && !reflect.DeepEqual(itemView, expectedFlattenedView) { - log.Printf("[DEBUG] Skipping item with view= %#v, looking for %#v)", itemView, expectedFlattenedView) - continue - } - itemDataset := flattenNestedBigQueryDatasetAccessDataset(item["dataset"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemDataset)) && isEmptyValue(reflect.ValueOf(expectedFlattenedDataset))) && !reflect.DeepEqual(itemDataset, expectedFlattenedDataset) { - log.Printf("[DEBUG] Skipping item with dataset= %#v, looking for %#v)", itemDataset, expectedFlattenedDataset) - continue - } - itemRoutine := flattenNestedBigQueryDatasetAccessRoutine(item["routine"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemRoutine)) && isEmptyValue(reflect.ValueOf(expectedFlattenedRoutine))) && !reflect.DeepEqual(itemRoutine, expectedFlattenedRoutine) { - log.Printf("[DEBUG] Skipping item with routine= %#v, looking for %#v)", itemRoutine, expectedFlattenedRoutine) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -// PatchCreateEncoder handles creating request data to PATCH parent resource -// with list including new object. -func resourceBigQueryDatasetAccessPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceBigQueryDatasetAccessListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - // Return error if item already created. - if found != nil { - return nil, fmt.Errorf("Unable to create DatasetAccess, existing object already found: %+v", found) - } - - // Return list with the resource to create appended - res := map[string]interface{}{ - "access": append(currItems, obj), - } - - return res, nil -} - -// PatchDeleteEncoder handles creating request data to PATCH parent resource -// with list excluding object to delete. -func resourceBigQueryDatasetAccessPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceBigQueryDatasetAccessListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - // Spoof 404 error for proper handling by Delete (i.e. no-op) - return nil, fake404("nested", "BigQueryDatasetAccess") - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "access": updatedItems, - } - - return res, nil -} - -// ListForPatch handles making API request to get parent resource and -// extracting list of objects. -func resourceBigQueryDatasetAccessListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return nil, err - } - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", project, url, userAgent, nil, isBigqueryIAMQuotaError) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - - v, ok = res["access"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, fmt.Errorf(`expected list for nested field "access"`) - } - return ls, nil - } - return nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_reservation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_reservation.go deleted file mode 100644 index 1be41901af..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_reservation.go +++ /dev/null @@ -1,415 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceBigqueryReservationReservation() *schema.Resource { - return &schema.Resource{ - Create: resourceBigqueryReservationReservationCreate, - Read: resourceBigqueryReservationReservationRead, - Update: resourceBigqueryReservationReservationUpdate, - Delete: resourceBigqueryReservationReservationDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigqueryReservationReservationImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the reservation. This field must only contain alphanumeric characters or dash.`, - }, - "slot_capacity": { - Type: schema.TypeInt, - Required: true, - Description: `Minimum slots available to this reservation. A slot is a unit of computational power in BigQuery, and serves as the -unit of parallelism. Queries using this reservation might use more slots during runtime if ignoreIdleSlots is set to false.`, - }, - "concurrency": { - Type: schema.TypeInt, - Optional: true, - Description: `Maximum number of queries that are allowed to run concurrently in this reservation. This is a soft limit due to asynchronous nature of the system and various optimizations for small queries. Default value is 0 which means that concurrency will be automatically set based on the reservation size.`, - Default: 0, - }, - "ignore_idle_slots": { - Type: schema.TypeBool, - Optional: true, - Description: `If false, any query using this reservation will use idle slots from other reservations within -the same admin project. If true, a query using this reservation will execute with the slot -capacity specified above at most.`, - Default: false, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The geographic location where the transfer config should reside. -Examples: US, EU, asia-northeast1. The default value is US.`, - Default: "US", - }, - "multi_region_auxiliary": { - Type: schema.TypeBool, - Optional: true, - Description: `Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). -If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryReservationReservationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - slotCapacityProp, err := expandBigqueryReservationReservationSlotCapacity(d.Get("slot_capacity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("slot_capacity"); !isEmptyValue(reflect.ValueOf(slotCapacityProp)) && (ok || !reflect.DeepEqual(v, slotCapacityProp)) { - obj["slotCapacity"] = slotCapacityProp - } - ignoreIdleSlotsProp, err := expandBigqueryReservationReservationIgnoreIdleSlots(d.Get("ignore_idle_slots"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ignore_idle_slots"); !isEmptyValue(reflect.ValueOf(ignoreIdleSlotsProp)) && (ok || !reflect.DeepEqual(v, ignoreIdleSlotsProp)) { - obj["ignoreIdleSlots"] = ignoreIdleSlotsProp - } - concurrencyProp, err := expandBigqueryReservationReservationConcurrency(d.Get("concurrency"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("concurrency"); !isEmptyValue(reflect.ValueOf(concurrencyProp)) && (ok || !reflect.DeepEqual(v, concurrencyProp)) { - obj["concurrency"] = concurrencyProp - } - multiRegionAuxiliaryProp, err := expandBigqueryReservationReservationMultiRegionAuxiliary(d.Get("multi_region_auxiliary"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("multi_region_auxiliary"); !isEmptyValue(reflect.ValueOf(multiRegionAuxiliaryProp)) && (ok || !reflect.DeepEqual(v, multiRegionAuxiliaryProp)) { - obj["multiRegionAuxiliary"] = multiRegionAuxiliaryProp - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations?reservationId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Reservation: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Reservation: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Reservation %q: %#v", d.Id(), res) - - return resourceBigqueryReservationReservationRead(d, meta) -} - -func resourceBigqueryReservationReservationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigqueryReservationReservation %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Reservation: %s", err) - } - - if err := d.Set("slot_capacity", flattenBigqueryReservationReservationSlotCapacity(res["slotCapacity"], d, config)); err != nil { - return fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("ignore_idle_slots", flattenBigqueryReservationReservationIgnoreIdleSlots(res["ignoreIdleSlots"], d, config)); err != nil { - return fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("concurrency", flattenBigqueryReservationReservationConcurrency(res["concurrency"], d, config)); err != nil { - return fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("multi_region_auxiliary", flattenBigqueryReservationReservationMultiRegionAuxiliary(res["multiRegionAuxiliary"], d, config)); err != nil { - return fmt.Errorf("Error reading Reservation: %s", err) - } - - return nil -} - -func resourceBigqueryReservationReservationUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - slotCapacityProp, err := expandBigqueryReservationReservationSlotCapacity(d.Get("slot_capacity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("slot_capacity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, slotCapacityProp)) { - obj["slotCapacity"] = slotCapacityProp - } - ignoreIdleSlotsProp, err := expandBigqueryReservationReservationIgnoreIdleSlots(d.Get("ignore_idle_slots"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ignore_idle_slots"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ignoreIdleSlotsProp)) { - obj["ignoreIdleSlots"] = ignoreIdleSlotsProp - } - concurrencyProp, err := expandBigqueryReservationReservationConcurrency(d.Get("concurrency"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("concurrency"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, concurrencyProp)) { - obj["concurrency"] = concurrencyProp - } - multiRegionAuxiliaryProp, err := expandBigqueryReservationReservationMultiRegionAuxiliary(d.Get("multi_region_auxiliary"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("multi_region_auxiliary"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, multiRegionAuxiliaryProp)) { - obj["multiRegionAuxiliary"] = multiRegionAuxiliaryProp - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Reservation %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("slot_capacity") { - updateMask = append(updateMask, "slotCapacity") - } - - if d.HasChange("ignore_idle_slots") { - updateMask = append(updateMask, "ignoreIdleSlots") - } - - if d.HasChange("concurrency") { - updateMask = append(updateMask, "concurrency") - } - - if d.HasChange("multi_region_auxiliary") { - updateMask = append(updateMask, "multiRegionAuxiliary") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Reservation %q: %#v", d.Id(), res) - } - - return resourceBigqueryReservationReservationRead(d, meta) -} - -func resourceBigqueryReservationReservationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Reservation %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Reservation") - } - - log.Printf("[DEBUG] Finished deleting Reservation %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryReservationReservationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/reservations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBigqueryReservationReservationSlotCapacity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigqueryReservationReservationIgnoreIdleSlots(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryReservationReservationConcurrency(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigqueryReservationReservationMultiRegionAuxiliary(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigqueryReservationReservationSlotCapacity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryReservationReservationIgnoreIdleSlots(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryReservationReservationConcurrency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryReservationReservationMultiRegionAuxiliary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_routine.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_routine.go deleted file mode 100644 index c38287b406..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_routine.go +++ /dev/null @@ -1,811 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "google.golang.org/api/googleapi" -) - -func ResourceBigQueryRoutine() *schema.Resource { - return &schema.Resource{ - Create: resourceBigQueryRoutineCreate, - Read: resourceBigQueryRoutineRead, - Update: resourceBigQueryRoutineUpdate, - Delete: resourceBigQueryRoutineDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigQueryRoutineImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "definition_body": { - Type: schema.TypeString, - Required: true, - Description: `The body of the routine. For functions, this is the expression in the AS clause. -If language=SQL, it is the substring inside (but excluding) the parentheses.`, - }, - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the dataset containing this routine`, - }, - "routine_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.`, - }, - - "arguments": { - Type: schema.TypeList, - Optional: true, - Description: `Input/output argument of a function or a stored procedure.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "argument_kind": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"FIXED_TYPE", "ANY_TYPE", ""}), - Description: `Defaults to FIXED_TYPE. Default value: "FIXED_TYPE" Possible values: ["FIXED_TYPE", "ANY_TYPE"]`, - Default: "FIXED_TYPE", - }, - "data_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringIsJSON, - StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, - Description: `A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. -~>**NOTE**: Because this field expects a JSON string, any changes to the string -will create a diff, even if the JSON itself hasn't changed. If the API returns -a different value for the same schema, e.g. it switched the order of values -or replaced STRUCT field type with RECORD field type, we currently cannot -suppress the recurring diff this causes. As a workaround, we recommend using -the schema as returned by the API.`, - }, - "mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"IN", "OUT", "INOUT", ""}), - Description: `Specifies whether the argument is input or output. Can be set for procedures only. Possible values: ["IN", "OUT", "INOUT"]`, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: `The name of this argument. Can be absent for function return argument.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `The description of the routine if defined.`, - }, - "determinism_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"DETERMINISM_LEVEL_UNSPECIFIED", "DETERMINISTIC", "NOT_DETERMINISTIC", ""}), - Description: `The determinism level of the JavaScript UDF if defined. Possible values: ["DETERMINISM_LEVEL_UNSPECIFIED", "DETERMINISTIC", "NOT_DETERMINISTIC"]`, - }, - "imported_libraries": { - Type: schema.TypeList, - Optional: true, - Description: `Optional. If language = "JAVASCRIPT", this field stores the path of the -imported JAVASCRIPT libraries.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "language": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"SQL", "JAVASCRIPT", ""}), - Description: `The language of the routine. Possible values: ["SQL", "JAVASCRIPT"]`, - }, - "return_table_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringIsJSON, - StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, - Description: `Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". - -If absent, the return table type is inferred from definitionBody at query time in each query -that references this routine. If present, then the columns in the evaluated table result will -be cast to match the column types specificed in return table type, at query time.`, - }, - "return_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringIsJSON, - StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, - Description: `A JSON schema for the return type. Optional if language = "SQL"; required otherwise. -If absent, the return type is inferred from definitionBody at query time in each query -that references this routine. If present, then the evaluated result will be cast to -the specified returned type at query time. ~>**NOTE**: Because this field expects a JSON -string, any changes to the string will create a diff, even if the JSON itself hasn't -changed. If the API returns a different value for the same schema, e.g. it switche -d the order of values or replaced STRUCT field type with RECORD field type, we currently -cannot suppress the recurring diff this causes. As a workaround, we recommend using -the schema as returned by the API.`, - }, - "routine_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"SCALAR_FUNCTION", "PROCEDURE", "TABLE_VALUED_FUNCTION", ""}), - Description: `The type of routine. Possible values: ["SCALAR_FUNCTION", "PROCEDURE", "TABLE_VALUED_FUNCTION"]`, - }, - "creation_time": { - Type: schema.TypeInt, - Computed: true, - Description: `The time when this routine was created, in milliseconds since the -epoch.`, - }, - "last_modified_time": { - Type: schema.TypeInt, - Computed: true, - Description: `The time when this routine was modified, in milliseconds since the -epoch.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigQueryRoutineCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - routineReferenceProp, err := expandBigQueryRoutineRoutineReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(routineReferenceProp)) { - obj["routineReference"] = routineReferenceProp - } - routineTypeProp, err := expandBigQueryRoutineRoutineType(d.Get("routine_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("routine_type"); !isEmptyValue(reflect.ValueOf(routineTypeProp)) && (ok || !reflect.DeepEqual(v, routineTypeProp)) { - obj["routineType"] = routineTypeProp - } - languageProp, err := expandBigQueryRoutineLanguage(d.Get("language"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language"); !isEmptyValue(reflect.ValueOf(languageProp)) && (ok || !reflect.DeepEqual(v, languageProp)) { - obj["language"] = languageProp - } - argumentsProp, err := expandBigQueryRoutineArguments(d.Get("arguments"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("arguments"); !isEmptyValue(reflect.ValueOf(argumentsProp)) && (ok || !reflect.DeepEqual(v, argumentsProp)) { - obj["arguments"] = argumentsProp - } - returnTypeProp, err := expandBigQueryRoutineReturnType(d.Get("return_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("return_type"); !isEmptyValue(reflect.ValueOf(returnTypeProp)) && (ok || !reflect.DeepEqual(v, returnTypeProp)) { - obj["returnType"] = returnTypeProp - } - returnTableTypeProp, err := expandBigQueryRoutineReturnTableType(d.Get("return_table_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("return_table_type"); !isEmptyValue(reflect.ValueOf(returnTableTypeProp)) && (ok || !reflect.DeepEqual(v, returnTableTypeProp)) { - obj["returnTableType"] = returnTableTypeProp - } - importedLibrariesProp, err := expandBigQueryRoutineImportedLibraries(d.Get("imported_libraries"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("imported_libraries"); !isEmptyValue(reflect.ValueOf(importedLibrariesProp)) && (ok || !reflect.DeepEqual(v, importedLibrariesProp)) { - obj["importedLibraries"] = importedLibrariesProp - } - definitionBodyProp, err := expandBigQueryRoutineDefinitionBody(d.Get("definition_body"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("definition_body"); !isEmptyValue(reflect.ValueOf(definitionBodyProp)) && (ok || !reflect.DeepEqual(v, definitionBodyProp)) { - obj["definitionBody"] = definitionBodyProp - } - descriptionProp, err := expandBigQueryRoutineDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - determinismLevelProp, err := expandBigQueryRoutineDeterminismLevel(d.Get("determinism_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("determinism_level"); !isEmptyValue(reflect.ValueOf(determinismLevelProp)) && (ok || !reflect.DeepEqual(v, determinismLevelProp)) { - obj["determinismLevel"] = determinismLevelProp - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Routine: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Routine: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Routine: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Routine %q: %#v", d.Id(), res) - - return resourceBigQueryRoutineRead(d, meta) -} - -func resourceBigQueryRoutineRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Routine: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigQueryRoutine %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - - // Terraform must set the top level schema field, but since this object contains collapsed properties - // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. - if flattenedProp := flattenBigQueryRoutineRoutineReference(res["routineReference"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*googleapi.Error); ok { - return fmt.Errorf("Error reading Routine: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("routine_type", flattenBigQueryRoutineRoutineType(res["routineType"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("creation_time", flattenBigQueryRoutineCreationTime(res["creationTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("last_modified_time", flattenBigQueryRoutineLastModifiedTime(res["lastModifiedTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("language", flattenBigQueryRoutineLanguage(res["language"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("arguments", flattenBigQueryRoutineArguments(res["arguments"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("return_type", flattenBigQueryRoutineReturnType(res["returnType"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("return_table_type", flattenBigQueryRoutineReturnTableType(res["returnTableType"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("imported_libraries", flattenBigQueryRoutineImportedLibraries(res["importedLibraries"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("definition_body", flattenBigQueryRoutineDefinitionBody(res["definitionBody"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("description", flattenBigQueryRoutineDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("determinism_level", flattenBigQueryRoutineDeterminismLevel(res["determinismLevel"], d, config)); err != nil { - return fmt.Errorf("Error reading Routine: %s", err) - } - - return nil -} - -func resourceBigQueryRoutineUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Routine: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - routineReferenceProp, err := expandBigQueryRoutineRoutineReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(routineReferenceProp)) { - obj["routineReference"] = routineReferenceProp - } - routineTypeProp, err := expandBigQueryRoutineRoutineType(d.Get("routine_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("routine_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routineTypeProp)) { - obj["routineType"] = routineTypeProp - } - languageProp, err := expandBigQueryRoutineLanguage(d.Get("language"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, languageProp)) { - obj["language"] = languageProp - } - argumentsProp, err := expandBigQueryRoutineArguments(d.Get("arguments"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("arguments"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, argumentsProp)) { - obj["arguments"] = argumentsProp - } - returnTypeProp, err := expandBigQueryRoutineReturnType(d.Get("return_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("return_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, returnTypeProp)) { - obj["returnType"] = returnTypeProp - } - returnTableTypeProp, err := expandBigQueryRoutineReturnTableType(d.Get("return_table_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("return_table_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, returnTableTypeProp)) { - obj["returnTableType"] = returnTableTypeProp - } - importedLibrariesProp, err := expandBigQueryRoutineImportedLibraries(d.Get("imported_libraries"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("imported_libraries"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, importedLibrariesProp)) { - obj["importedLibraries"] = importedLibrariesProp - } - definitionBodyProp, err := expandBigQueryRoutineDefinitionBody(d.Get("definition_body"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("definition_body"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, definitionBodyProp)) { - obj["definitionBody"] = definitionBodyProp - } - descriptionProp, err := expandBigQueryRoutineDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - determinismLevelProp, err := expandBigQueryRoutineDeterminismLevel(d.Get("determinism_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("determinism_level"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, determinismLevelProp)) { - obj["determinismLevel"] = determinismLevelProp - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Routine %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Routine %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Routine %q: %#v", d.Id(), res) - } - - return resourceBigQueryRoutineRead(d, meta) -} - -func resourceBigQueryRoutineDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Routine: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Routine %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Routine") - } - - log.Printf("[DEBUG] Finished deleting Routine %q: %#v", d.Id(), res) - return nil -} - -func resourceBigQueryRoutineImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/datasets/(?P[^/]+)/routines/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBigQueryRoutineRoutineReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenBigQueryRoutineRoutineReferenceDatasetId(original["datasetId"], d, config) - transformed["routine_id"] = - flattenBigQueryRoutineRoutineReferenceRoutineId(original["routineId"], d, config) - return []interface{}{transformed} -} -func flattenBigQueryRoutineRoutineReferenceDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineRoutineReferenceRoutineId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineRoutineType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineCreationTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigQueryRoutineLastModifiedTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenBigQueryRoutineLanguage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineArguments(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenBigQueryRoutineArgumentsName(original["name"], d, config), - "argument_kind": flattenBigQueryRoutineArgumentsArgumentKind(original["argumentKind"], d, config), - "mode": flattenBigQueryRoutineArgumentsMode(original["mode"], d, config), - "data_type": flattenBigQueryRoutineArgumentsDataType(original["dataType"], d, config), - }) - } - return transformed -} -func flattenBigQueryRoutineArgumentsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineArgumentsArgumentKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineArgumentsMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineArgumentsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := json.Marshal(v) - if err != nil { - // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. - log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenBigQueryRoutineReturnType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := json.Marshal(v) - if err != nil { - // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. - log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenBigQueryRoutineReturnTableType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := json.Marshal(v) - if err != nil { - // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. - log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenBigQueryRoutineImportedLibraries(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineDefinitionBody(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineDeterminismLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigQueryRoutineRoutineReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - transformed := make(map[string]interface{}) - transformed["datasetId"] = d.Get("dataset_id") - project, _ := getProject(d, config) - transformed["projectId"] = project - transformed["routineId"] = d.Get("routine_id") - - return transformed, nil -} - -func expandBigQueryRoutineRoutineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineLanguage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineArguments(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandBigQueryRoutineArgumentsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedArgumentKind, err := expandBigQueryRoutineArgumentsArgumentKind(original["argument_kind"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedArgumentKind); val.IsValid() && !isEmptyValue(val) { - transformed["argumentKind"] = transformedArgumentKind - } - - transformedMode, err := expandBigQueryRoutineArgumentsMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - transformedDataType, err := expandBigQueryRoutineArgumentsDataType(original["data_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { - transformed["dataType"] = transformedDataType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBigQueryRoutineArgumentsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineArgumentsArgumentKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineArgumentsMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineArgumentsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func expandBigQueryRoutineReturnType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func expandBigQueryRoutineReturnTableType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func expandBigQueryRoutineImportedLibraries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineDefinitionBody(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineDeterminismLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_app_profile.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_app_profile.go deleted file mode 100644 index 0ff339ff74..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_app_profile.go +++ /dev/null @@ -1,520 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/bigtableadmin/v2" -) - -func ResourceBigtableAppProfile() *schema.Resource { - return &schema.Resource{ - Create: resourceBigtableAppProfileCreate, - Read: resourceBigtableAppProfileRead, - Update: resourceBigtableAppProfileUpdate, - Delete: resourceBigtableAppProfileDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBigtableAppProfileImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "app_profile_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The unique name of the app profile in the form '[_a-zA-Z0-9][-_.a-zA-Z0-9]*'.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Long form description of the use case for this app profile.`, - }, - "ignore_warnings": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, ignore safety checks when deleting/updating the app profile.`, - Default: false, - }, - "instance": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The name of the instance to create the app profile within.`, - }, - "multi_cluster_routing_use_any": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is available -in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes -consistency to improve availability.`, - ExactlyOneOf: []string{"single_cluster_routing", "multi_cluster_routing_use_any"}, - }, - "single_cluster_routing": { - Type: schema.TypeList, - Optional: true, - Description: `Use a single-cluster routing policy.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster_id": { - Type: schema.TypeString, - Required: true, - Description: `The cluster to which read/write requests should be routed.`, - }, - "allow_transactional_writes": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, CheckAndMutateRow and ReadModifyWriteRow requests are allowed by this app profile. -It is unsafe to send these requests to the same table/row/column in multiple clusters.`, - }, - }, - }, - ExactlyOneOf: []string{"single_cluster_routing", "multi_cluster_routing_use_any"}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique name of the requested app profile. Values are of the form 'projects//instances//appProfiles/'.`, - }, - "multi_cluster_routing_cluster_ids": { - Type: schema.TypeList, - Optional: true, - Description: `The set of clusters to route to. The order is ignored; clusters will be tried in order of distance. If left empty, all clusters are eligible.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - ConflictsWith: []string{"single_cluster_routing"}, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableAppProfileCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandBigtableAppProfileDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - multiClusterRoutingUseAnyProp, err := expandBigtableAppProfileMultiClusterRoutingUseAny(d.Get("multi_cluster_routing_use_any"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("multi_cluster_routing_use_any"); !isEmptyValue(reflect.ValueOf(multiClusterRoutingUseAnyProp)) && (ok || !reflect.DeepEqual(v, multiClusterRoutingUseAnyProp)) { - obj["multiClusterRoutingUseAny"] = multiClusterRoutingUseAnyProp - } - singleClusterRoutingProp, err := expandBigtableAppProfileSingleClusterRouting(d.Get("single_cluster_routing"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("single_cluster_routing"); !isEmptyValue(reflect.ValueOf(singleClusterRoutingProp)) && (ok || !reflect.DeepEqual(v, singleClusterRoutingProp)) { - obj["singleClusterRouting"] = singleClusterRoutingProp - } - - obj, err = resourceBigtableAppProfileEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles?appProfileId={{app_profile_id}}&ignoreWarnings={{ignore_warnings}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AppProfile: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppProfile: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AppProfile: %s", err) - } - if err := d.Set("name", flattenBigtableAppProfileName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating AppProfile %q: %#v", d.Id(), res) - - return resourceBigtableAppProfileRead(d, meta) -} - -func resourceBigtableAppProfileRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppProfile: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigtableAppProfile %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading AppProfile: %s", err) - } - - if err := d.Set("name", flattenBigtableAppProfileName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading AppProfile: %s", err) - } - if err := d.Set("description", flattenBigtableAppProfileDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading AppProfile: %s", err) - } - if err := d.Set("multi_cluster_routing_use_any", flattenBigtableAppProfileMultiClusterRoutingUseAny(res["multiClusterRoutingUseAny"], d, config)); err != nil { - return fmt.Errorf("Error reading AppProfile: %s", err) - } - if err := d.Set("single_cluster_routing", flattenBigtableAppProfileSingleClusterRouting(res["singleClusterRouting"], d, config)); err != nil { - return fmt.Errorf("Error reading AppProfile: %s", err) - } - - return nil -} - -func resourceBigtableAppProfileUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppProfile: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandBigtableAppProfileDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - multiClusterRoutingUseAnyProp, err := expandBigtableAppProfileMultiClusterRoutingUseAny(d.Get("multi_cluster_routing_use_any"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("multi_cluster_routing_use_any"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, multiClusterRoutingUseAnyProp)) { - obj["multiClusterRoutingUseAny"] = multiClusterRoutingUseAnyProp - } - singleClusterRoutingProp, err := expandBigtableAppProfileSingleClusterRouting(d.Get("single_cluster_routing"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("single_cluster_routing"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, singleClusterRoutingProp)) { - obj["singleClusterRouting"] = singleClusterRoutingProp - } - - obj, err = resourceBigtableAppProfileEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}?ignoreWarnings={{ignore_warnings}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AppProfile %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("multi_cluster_routing_use_any") { - updateMask = append(updateMask, "multiClusterRoutingUseAny") - } - - if d.HasChange("single_cluster_routing") { - updateMask = append(updateMask, "singleClusterRouting") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if d.HasChange("multi_cluster_routing_cluster_ids") && !stringInSlice(updateMask, "multiClusterRoutingUseAny") { - updateMask = append(updateMask, "multiClusterRoutingUseAny") - } - - // this api requires the body to define something for all values passed into - // the update mask, however, multi-cluster routing and single-cluster routing - // are conflicting, so we can't have them both in the update mask, despite - // both of them registering as changing. thus, we need to remove whichever - // one is not defined. - newRouting, oldRouting := d.GetChange("multi_cluster_routing_use_any") - if newRouting != oldRouting { - for i, val := range updateMask { - if val == "multiClusterRoutingUseAny" && newRouting.(bool) || - val == "singleClusterRouting" && oldRouting.(bool) { - updateMask = append(updateMask[0:i], updateMask[i+1:]...) - break - } - } - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating AppProfile %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AppProfile %q: %#v", d.Id(), res) - } - - return resourceBigtableAppProfileRead(d, meta) -} - -func resourceBigtableAppProfileDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AppProfile: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}?ignoreWarnings={{ignore_warnings}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AppProfile %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AppProfile") - } - - log.Printf("[DEBUG] Finished deleting AppProfile %q: %#v", d.Id(), res) - return nil -} - -func resourceBigtableAppProfileImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/appProfiles/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBigtableAppProfileName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigtableAppProfileDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigtableAppProfileMultiClusterRoutingUseAny(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return false - } - - if v.(map[string]interface{})["clusterIds"] == nil { - return true - } - - if len(v.(map[string]interface{})["clusterIds"].([]interface{})) > 0 { - if err := d.Set("multi_cluster_routing_cluster_ids", v.(map[string]interface{})["clusterIds"]); err != nil { - return true - } - } - - return true -} - -func flattenBigtableAppProfileSingleClusterRouting(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cluster_id"] = - flattenBigtableAppProfileSingleClusterRoutingClusterId(original["clusterId"], d, config) - transformed["allow_transactional_writes"] = - flattenBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(original["allowTransactionalWrites"], d, config) - return []interface{}{transformed} -} -func flattenBigtableAppProfileSingleClusterRoutingClusterId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigtableAppProfileDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigtableAppProfileMultiClusterRoutingUseAny(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || !v.(bool) { - return nil, nil - } - - obj := bigtableadmin.MultiClusterRoutingUseAny{} - - clusterIds := d.Get("multi_cluster_routing_cluster_ids").([]interface{}) - - for _, id := range clusterIds { - obj.ClusterIds = append(obj.ClusterIds, id.(string)) - } - - return obj, nil -} - -func expandBigtableAppProfileSingleClusterRouting(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedClusterId, err := expandBigtableAppProfileSingleClusterRoutingClusterId(original["cluster_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedClusterId); val.IsValid() && !isEmptyValue(val) { - transformed["clusterId"] = transformedClusterId - } - - transformedAllowTransactionalWrites, err := expandBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(original["allow_transactional_writes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowTransactionalWrites); val.IsValid() && !isEmptyValue(val) { - transformed["allowTransactionalWrites"] = transformedAllowTransactionalWrites - } - - return transformed, nil -} - -func expandBigtableAppProfileSingleClusterRoutingClusterId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceBigtableAppProfileEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Instance is a URL parameter only, so replace self-link/path with resource name only. - if err := d.Set("instance", GetResourceNameFromSelfLink(d.Get("instance").(string))); err != nil { - return nil, fmt.Errorf("Error setting instance: %s", err) - } - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_table.go deleted file mode 100644 index a3af79bc3b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_table.go +++ /dev/null @@ -1,354 +0,0 @@ -package google - -import ( - "context" - "fmt" - "log" - "time" - - "cloud.google.com/go/bigtable" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceBigtableTable() *schema.Resource { - return &schema.Resource{ - Create: resourceBigtableTableCreate, - Read: resourceBigtableTableRead, - Update: resourceBigtableTableUpdate, - Delete: resourceBigtableTableDestroy, - - Importer: &schema.ResourceImporter{ - State: resourceBigtableTableImport, - }, - - // Set a longer timeout for table creation as adding column families can be slow. - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(45 * time.Minute), - }, - - // ---------------------------------------------------------------------- - // IMPORTANT: Do not add any additional ForceNew fields to this resource. - // Destroying/recreating tables can lead to data loss for users. - // ---------------------------------------------------------------------- - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the table. Must be 1-50 characters and must only contain hyphens, underscores, periods, letters and numbers.`, - }, - - "column_family": { - Type: schema.TypeSet, - Optional: true, - Description: `A group of columns within a table which share a common configuration. This can be specified multiple times.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "family": { - Type: schema.TypeString, - Required: true, - Description: `The name of the column family.`, - }, - }, - }, - }, - - "instance_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The name of the Bigtable instance.`, - }, - - "split_keys": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `A list of predefined keys to split the table on. !> Warning: Modifying the split_keys of an existing table will cause Terraform to delete/recreate the entire google_bigtable_table resource.`, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "deletion_protection": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{"PROTECTED", "UNPROTECTED"}, false), - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `A field to make the table protected against data loss i.e. when set to PROTECTED, deleting the table, the column families in the table, and the instance containing the table would be prohibited. If not provided, currently deletion protection will be set to UNPROTECTED as it is the API default value.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - ctx := context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - if err := d.Set("instance_name", instanceName); err != nil { - return fmt.Errorf("Error setting instance_name: %s", err) - } - - defer c.Close() - - tableId := d.Get("name").(string) - tblConf := bigtable.TableConf{TableID: tableId} - - // Check if deletion protection is given - // If not given, currently tblConf.DeletionProtection will be set to false in the API - deletionProtection := d.Get("deletion_protection") - if deletionProtection == "PROTECTED" { - tblConf.DeletionProtection = bigtable.Protected - } else if deletionProtection == "UNPROTECTED" { - tblConf.DeletionProtection = bigtable.Unprotected - } - - // Set the split keys if given. - if v, ok := d.GetOk("split_keys"); ok { - tblConf.SplitKeys = convertStringArr(v.([]interface{})) - } - - // Set the column families if given. - columnFamilies := make(map[string]bigtable.GCPolicy) - if d.Get("column_family.#").(int) > 0 { - columns := d.Get("column_family").(*schema.Set).List() - - for _, co := range columns { - column := co.(map[string]interface{}) - - if v, ok := column["family"]; ok { - // By default, there is no GC rules. - columnFamilies[v.(string)] = bigtable.NoGcPolicy() - } - } - } - tblConf.Families = columnFamilies - - // This method may return before the table's creation is complete - we may need to wait until - // it exists in the future. - // Set a longer timeout as creating table and adding column families can be pretty slow. - ctxWithTimeout, cancel := context.WithTimeout(ctx, 20*time.Minute) - defer cancel() // Always call cancel. - err = c.CreateTableFromConf(ctxWithTimeout, &tblConf) - if err != nil { - return fmt.Errorf("Error creating table. %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceBigtableTableRead(d, meta) -} - -func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - ctx := context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - name := d.Get("name").(string) - table, err := c.TableInfo(ctx, name) - if err != nil { - if isNotFoundGrpcError(err) { - log.Printf("[WARN] Removing %s because it's gone", name) - d.SetId("") - return nil - } - return err - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("column_family", flattenColumnFamily(table.Families)); err != nil { - return fmt.Errorf("Error setting column_family: %s", err) - } - - deletionProtection := table.DeletionProtection - if deletionProtection == bigtable.Protected { - if err := d.Set("deletion_protection", "PROTECTED"); err != nil { - return fmt.Errorf("Error setting deletion_protection: %s", err) - } - } else if deletionProtection == bigtable.Unprotected { - if err := d.Set("deletion_protection", "UNPROTECTED"); err != nil { - return fmt.Errorf("Error setting deletion_protection: %s", err) - } - } else { - return fmt.Errorf("Error setting deletion_protection, it should be either PROTECTED or UNPROTECTED") - } - return nil -} - -func resourceBigtableTableUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - ctx := context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - defer c.Close() - - o, n := d.GetChange("column_family") - oSet := o.(*schema.Set) - nSet := n.(*schema.Set) - name := d.Get("name").(string) - - // Add column families that are in new but not in old - for _, new := range nSet.Difference(oSet).List() { - column := new.(map[string]interface{}) - - if v, ok := column["family"]; ok { - log.Printf("[DEBUG] adding column family %q", v) - if err := c.CreateColumnFamily(ctx, name, v.(string)); err != nil { - return fmt.Errorf("Error creating column family %q: %s", v, err) - } - } - } - - // Remove column families that are in old but not in new - for _, old := range oSet.Difference(nSet).List() { - column := old.(map[string]interface{}) - - if v, ok := column["family"]; ok { - log.Printf("[DEBUG] removing column family %q", v) - if err := c.DeleteColumnFamily(ctx, name, v.(string)); err != nil { - return fmt.Errorf("Error deleting column family %q: %s", v, err) - } - } - } - - if d.HasChange("deletion_protection") { - deletionProtection := d.Get("deletion_protection") - if deletionProtection == "PROTECTED" { - if err := c.UpdateTableWithDeletionProtection(ctx, name, bigtable.Protected); err != nil { - return fmt.Errorf("Error updating deletion protection in table %v: %s", name, err) - } - } else if deletionProtection == "UNPROTECTED" { - if err := c.UpdateTableWithDeletionProtection(ctx, name, bigtable.Unprotected); err != nil { - return fmt.Errorf("Error updating deletion protection in table %v: %s", name, err) - } - } - } - - return resourceBigtableTableRead(d, meta) -} - -func resourceBigtableTableDestroy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - ctx := context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - name := d.Get("name").(string) - err = c.DeleteTable(ctx, name) - if err != nil { - return fmt.Errorf("Error deleting table. %s", err) - } - - d.SetId("") - - return nil -} - -func flattenColumnFamily(families []string) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(families)) - - for _, f := range families { - data := make(map[string]interface{}) - data["family"] = f - result = append(result, data) - } - - return result -} - -// TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 -func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_binary_authorization_attestor.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_binary_authorization_attestor.go deleted file mode 100644 index a4ef86c8c1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_binary_authorization_attestor.go +++ /dev/null @@ -1,670 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func compareSignatureAlgorithm(_, old, new string, _ *schema.ResourceData) bool { - // See https://cloud.google.com/binary-authorization/docs/reference/rest/v1/projects.attestors#signaturealgorithm - normalizedAlgorithms := map[string]string{ - "ECDSA_P256_SHA256": "ECDSA_P256_SHA256", - "EC_SIGN_P256_SHA256": "ECDSA_P256_SHA256", - "ECDSA_P384_SHA384": "ECDSA_P384_SHA384", - "EC_SIGN_P384_SHA384": "ECDSA_P384_SHA384", - "ECDSA_P521_SHA512": "ECDSA_P521_SHA512", - "EC_SIGN_P521_SHA512": "ECDSA_P521_SHA512", - } - - normalizedOld := old - normalizedNew := new - - if normalized, ok := normalizedAlgorithms[old]; ok { - normalizedOld = normalized - } - if normalized, ok := normalizedAlgorithms[new]; ok { - normalizedNew = normalized - } - - if normalizedNew == normalizedOld { - return true - } - - return false -} - -func ResourceBinaryAuthorizationAttestor() *schema.Resource { - return &schema.Resource{ - Create: resourceBinaryAuthorizationAttestorCreate, - Read: resourceBinaryAuthorizationAttestorRead, - Update: resourceBinaryAuthorizationAttestorUpdate, - Delete: resourceBinaryAuthorizationAttestorDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBinaryAuthorizationAttestorImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "attestation_authority_note": { - Type: schema.TypeList, - Required: true, - Description: `A Container Analysis ATTESTATION_AUTHORITY Note, created by the user.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "note_reference": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The resource name of a ATTESTATION_AUTHORITY Note, created by the -user. If the Note is in a different project from the Attestor, it -should be specified in the format 'projects/*/notes/*' (or the legacy -'providers/*/notes/*'). This field may not be updated. -An attestation by this attestor is stored as a Container Analysis -ATTESTATION_AUTHORITY Occurrence that names a container image -and that links to this Note.`, - }, - "public_keys": { - Type: schema.TypeList, - Optional: true, - Description: `Public keys that verify attestations signed by this attestor. This -field may be updated. -If this field is non-empty, one of the specified public keys must -verify that an attestation was signed by this attestor for the -image specified in the admission request. -If this field is empty, this attestor always returns that no valid -attestations exist.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ascii_armored_pgp_public_key": { - Type: schema.TypeString, - Optional: true, - Description: `ASCII-armored representation of a PGP public key, as the -entire output by the command -'gpg --export --armor foo@example.com' (either LF or CRLF -line endings). When using this field, id should be left -blank. The BinAuthz API handlers will calculate the ID -and fill it in automatically. BinAuthz computes this ID -as the OpenPGP RFC4880 V4 fingerprint, represented as -upper-case hex. If id is provided by the caller, it will -be overwritten by the API-calculated ID.`, - }, - "comment": { - Type: schema.TypeString, - Optional: true, - Description: `A descriptive comment. This field may be updated.`, - }, - "id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The ID of this public key. Signatures verified by BinAuthz -must include the ID of the public key that can be used to -verify them, and that ID must match the contents of this -field exactly. Additional restrictions on this field can -be imposed based on which public key type is encapsulated. -See the documentation on publicKey cases below for details.`, - }, - "pkix_public_key": { - Type: schema.TypeList, - Optional: true, - Description: `A raw PKIX SubjectPublicKeyInfo format public key. - -NOTE: id may be explicitly provided by the caller when using this -type of public key, but it MUST be a valid RFC3986 URI. If id is left -blank, a default one will be computed based on the digest of the DER -encoding of the public key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "public_key_pem": { - Type: schema.TypeString, - Optional: true, - Description: `A PEM-encoded public key, as described in -'https://tools.ietf.org/html/rfc7468#section-13'`, - }, - "signature_algorithm": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSignatureAlgorithm, - Description: `The signature algorithm used to verify a message against -a signature using this key. These signature algorithm must -match the structure and any object identifiers encoded in -publicKeyPem (i.e. this algorithm must match that of the -public key).`, - }, - }, - }, - }, - }, - }, - }, - "delegation_service_account_email": { - Type: schema.TypeString, - Computed: true, - Description: `This field will contain the service account email address that -this Attestor will use as the principal when querying Container -Analysis. Attestor administrators must grant this service account -the IAM role needed to read attestations from the noteReference in -Container Analysis (containeranalysis.notes.occurrences.viewer). -This email address is fixed for the lifetime of the Attestor, but -callers should not make any other assumptions about the service -account email; future versions may use an email based on a -different naming pattern.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A descriptive comment. This field may be updated. The field may be -displayed in chooser dialogs.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBinaryAuthorizationAttestorCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandBinaryAuthorizationAttestorName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandBinaryAuthorizationAttestorDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - userOwnedGrafeasNoteProp, err := expandBinaryAuthorizationAttestorAttestationAuthorityNote(d.Get("attestation_authority_note"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation_authority_note"); !isEmptyValue(reflect.ValueOf(userOwnedGrafeasNoteProp)) && (ok || !reflect.DeepEqual(v, userOwnedGrafeasNoteProp)) { - obj["userOwnedGrafeasNote"] = userOwnedGrafeasNoteProp - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors?attestorId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Attestor: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Attestor: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Attestor: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Attestor %q: %#v", d.Id(), res) - - return resourceBinaryAuthorizationAttestorRead(d, meta) -} - -func resourceBinaryAuthorizationAttestorRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Attestor: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BinaryAuthorizationAttestor %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Attestor: %s", err) - } - - if err := d.Set("name", flattenBinaryAuthorizationAttestorName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Attestor: %s", err) - } - if err := d.Set("description", flattenBinaryAuthorizationAttestorDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Attestor: %s", err) - } - if err := d.Set("attestation_authority_note", flattenBinaryAuthorizationAttestorAttestationAuthorityNote(res["userOwnedGrafeasNote"], d, config)); err != nil { - return fmt.Errorf("Error reading Attestor: %s", err) - } - - return nil -} - -func resourceBinaryAuthorizationAttestorUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Attestor: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandBinaryAuthorizationAttestorName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandBinaryAuthorizationAttestorDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - userOwnedGrafeasNoteProp, err := expandBinaryAuthorizationAttestorAttestationAuthorityNote(d.Get("attestation_authority_note"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation_authority_note"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userOwnedGrafeasNoteProp)) { - obj["userOwnedGrafeasNote"] = userOwnedGrafeasNoteProp - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Attestor %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Attestor %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Attestor %q: %#v", d.Id(), res) - } - - return resourceBinaryAuthorizationAttestorRead(d, meta) -} - -func resourceBinaryAuthorizationAttestorDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Attestor: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Attestor %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Attestor") - } - - log.Printf("[DEBUG] Finished deleting Attestor %q: %#v", d.Id(), res) - return nil -} - -func resourceBinaryAuthorizationAttestorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/attestors/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBinaryAuthorizationAttestorName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenBinaryAuthorizationAttestorDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNote(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["note_reference"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(original["noteReference"], d, config) - transformed["public_keys"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(original["publicKeys"], d, config) - transformed["delegation_service_account_email"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(original["delegationServiceAccountEmail"], d, config) - return []interface{}{transformed} -} -func flattenBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "comment": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(original["comment"], d, config), - "id": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(original["id"], d, config), - "ascii_armored_pgp_public_key": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(original["asciiArmoredPgpPublicKey"], d, config), - "pkix_public_key": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(original["pkixPublicKey"], d, config), - }) - } - return transformed -} -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["public_key_pem"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(original["publicKeyPem"], d, config) - transformed["signature_algorithm"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(original["signatureAlgorithm"], d, config) - return []interface{}{transformed} -} -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBinaryAuthorizationAttestorName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") -} - -func expandBinaryAuthorizationAttestorDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNote(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNoteReference, err := expandBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(original["note_reference"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNoteReference); val.IsValid() && !isEmptyValue(val) { - transformed["noteReference"] = transformedNoteReference - } - - transformedPublicKeys, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(original["public_keys"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPublicKeys); val.IsValid() && !isEmptyValue(val) { - transformed["publicKeys"] = transformedPublicKeys - } - - transformedDelegationServiceAccountEmail, err := expandBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(original["delegation_service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDelegationServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["delegationServiceAccountEmail"] = transformedDelegationServiceAccountEmail - } - - return transformed, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := regexp.MustCompile("projects/(.+)/notes/(.+)") - if r.MatchString(v.(string)) { - return v.(string), nil - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return fmt.Sprintf("projects/%s/notes/%s", project, v.(string)), nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedComment, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(original["comment"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedComment); val.IsValid() && !isEmptyValue(val) { - transformed["comment"] = transformedComment - } - - transformedId, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedAsciiArmoredPgpPublicKey, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(original["ascii_armored_pgp_public_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAsciiArmoredPgpPublicKey); val.IsValid() && !isEmptyValue(val) { - transformed["asciiArmoredPgpPublicKey"] = transformedAsciiArmoredPgpPublicKey - } - - transformedPkixPublicKey, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(original["pkix_public_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPkixPublicKey); val.IsValid() && !isEmptyValue(val) { - transformed["pkixPublicKey"] = transformedPkixPublicKey - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPublicKeyPem, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(original["public_key_pem"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPublicKeyPem); val.IsValid() && !isEmptyValue(val) { - transformed["publicKeyPem"] = transformedPublicKeyPem - } - - transformedSignatureAlgorithm, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(original["signature_algorithm"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSignatureAlgorithm); val.IsValid() && !isEmptyValue(val) { - transformed["signatureAlgorithm"] = transformedSignatureAlgorithm - } - - return transformed, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_binary_authorization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_binary_authorization_policy.go deleted file mode 100644 index d70e727d99..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_binary_authorization_policy.go +++ /dev/null @@ -1,744 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "bytes" - "fmt" - "log" - "reflect" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func defaultBinaryAuthorizationPolicy(project string) map[string]interface{} { - return map[string]interface{}{ - "name": fmt.Sprintf("projects/%s/policy", project), - "admissionWhitelistPatterns": []interface{}{ - map[string]interface{}{ - "namePattern": "gcr.io/google_containers/*", - }, - }, - "defaultAdmissionRule": map[string]interface{}{ - "evaluationMode": "ALWAYS_ALLOW", - "enforcementMode": "ENFORCED_BLOCK_AND_AUDIT_LOG", - }, - } -} - -func ResourceBinaryAuthorizationPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceBinaryAuthorizationPolicyCreate, - Read: resourceBinaryAuthorizationPolicyRead, - Update: resourceBinaryAuthorizationPolicyUpdate, - Delete: resourceBinaryAuthorizationPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceBinaryAuthorizationPolicyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "default_admission_rule": { - Type: schema.TypeList, - Required: true, - Description: `Default admission rule for a cluster without a per-cluster admission -rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enforcement_mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"}), - Description: `The action when a pod creation is denied by the admission rule. Possible values: ["ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"]`, - }, - "evaluation_mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"}), - Description: `How this admission rule will be evaluated. Possible values: ["ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"]`, - }, - "require_attestations_by": { - Type: schema.TypeSet, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The resource names of the attestors that must attest to a -container image. If the attestor is in a different project from the -policy, it should be specified in the format 'projects/*/attestors/*'. -Each attestor must exist before a policy can reference it. To add an -attestor to a policy the principal issuing the policy change -request must be able to read the attestor resource. - -Note: this field must be non-empty when the evaluation_mode field -specifies REQUIRE_ATTESTATION, otherwise it must be empty.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: selfLinkNameHash, - }, - }, - }, - }, - "admission_whitelist_patterns": { - Type: schema.TypeList, - Optional: true, - Description: `A whitelist of image patterns to exclude from admission rules. If an -image's name matches a whitelist pattern, the image's admission -requests will always be permitted regardless of your admission rules.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name_pattern": { - Type: schema.TypeString, - Required: true, - Description: `An image name pattern to whitelist, in the form -'registry/path/to/image'. This supports a trailing * as a -wildcard, but this is allowed only in text after the registry/ -part.`, - }, - }, - }, - }, - "cluster_admission_rules": { - Type: schema.TypeSet, - Optional: true, - Description: `Per-cluster admission rules. An admission rule specifies either that -all container images used in a pod creation request must be attested -to by one or more attestors, that all pod creations will be allowed, -or that all pod creations will be denied. There can be at most one -admission rule per cluster spec. - - -Identifier format: '{{location}}.{{clusterId}}'. -A location is either a compute zone (e.g. 'us-central1-a') or a region -(e.g. 'us-central1').`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster": { - Type: schema.TypeString, - Required: true, - }, - "enforcement_mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"}), - Description: `The action when a pod creation is denied by the admission rule. Possible values: ["ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"]`, - }, - "evaluation_mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"}), - Description: `How this admission rule will be evaluated. Possible values: ["ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"]`, - }, - "require_attestations_by": { - Type: schema.TypeSet, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The resource names of the attestors that must attest to a -container image. If the attestor is in a different project from the -policy, it should be specified in the format 'projects/*/attestors/*'. -Each attestor must exist before a policy can reference it. To add an -attestor to a policy the principal issuing the policy change -request must be able to read the attestor resource. - -Note: this field must be non-empty when the evaluation_mode field -specifies REQUIRE_ATTESTATION, otherwise it must be empty.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: selfLinkNameHash, - }, - }, - }, - Set: func(v interface{}) int { - // require_attestations_by is a set of strings that can have the format - // projects/{project}/attestors/{attestor} or {attestor}. We diffsuppress - // and hash that set on the name, but now we need to make sure that the - // overall hash here respects that so changing the attestor format doesn't - // change the hash code of cluster_admission_rules. - raw := v.(map[string]interface{}) - - // modifying raw actually modifies the values passed to the provider. - // Use a copy to avoid that. - copy := make((map[string]interface{})) - for key, value := range raw { - copy[key] = value - } - at := copy["require_attestations_by"].(*schema.Set) - if at != nil { - t := convertAndMapStringArr(at.List(), GetResourceNameFromSelfLink) - copy["require_attestations_by"] = schema.NewSet(selfLinkNameHash, convertStringArrToInterface(t)) - } - var buf bytes.Buffer - schema.SerializeResourceForHash(&buf, copy, ResourceBinaryAuthorizationPolicy().Schema["cluster_admission_rules"].Elem.(*schema.Resource)) - return hashcode(buf.String()) - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A descriptive comment.`, - }, - "global_policy_evaluation_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"ENABLE", "DISABLE", ""}), - Description: `Controls the evaluation of a Google-maintained global admission policy -for common system-level images. Images not covered by the global -policy will be subject to the project admission policy. Possible values: ["ENABLE", "DISABLE"]`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBinaryAuthorizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandBinaryAuthorizationPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - globalPolicyEvaluationModeProp, err := expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(d.Get("global_policy_evaluation_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("global_policy_evaluation_mode"); !isEmptyValue(reflect.ValueOf(globalPolicyEvaluationModeProp)) && (ok || !reflect.DeepEqual(v, globalPolicyEvaluationModeProp)) { - obj["globalPolicyEvaluationMode"] = globalPolicyEvaluationModeProp - } - admissionWhitelistPatternsProp, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(d.Get("admission_whitelist_patterns"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admission_whitelist_patterns"); !isEmptyValue(reflect.ValueOf(admissionWhitelistPatternsProp)) && (ok || !reflect.DeepEqual(v, admissionWhitelistPatternsProp)) { - obj["admissionWhitelistPatterns"] = admissionWhitelistPatternsProp - } - clusterAdmissionRulesProp, err := expandBinaryAuthorizationPolicyClusterAdmissionRules(d.Get("cluster_admission_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cluster_admission_rules"); !isEmptyValue(reflect.ValueOf(clusterAdmissionRulesProp)) && (ok || !reflect.DeepEqual(v, clusterAdmissionRulesProp)) { - obj["clusterAdmissionRules"] = clusterAdmissionRulesProp - } - defaultAdmissionRuleProp, err := expandBinaryAuthorizationPolicyDefaultAdmissionRule(d.Get("default_admission_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_admission_rule"); !isEmptyValue(reflect.ValueOf(defaultAdmissionRuleProp)) && (ok || !reflect.DeepEqual(v, defaultAdmissionRuleProp)) { - obj["defaultAdmissionRule"] = defaultAdmissionRuleProp - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Policy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Policy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) - - return resourceBinaryAuthorizationPolicyRead(d, meta) -} - -func resourceBinaryAuthorizationPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BinaryAuthorizationPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - - if err := d.Set("description", flattenBinaryAuthorizationPolicyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("global_policy_evaluation_mode", flattenBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(res["globalPolicyEvaluationMode"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("admission_whitelist_patterns", flattenBinaryAuthorizationPolicyAdmissionWhitelistPatterns(res["admissionWhitelistPatterns"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("cluster_admission_rules", flattenBinaryAuthorizationPolicyClusterAdmissionRules(res["clusterAdmissionRules"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("default_admission_rule", flattenBinaryAuthorizationPolicyDefaultAdmissionRule(res["defaultAdmissionRule"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - - return nil -} - -func resourceBinaryAuthorizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandBinaryAuthorizationPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - globalPolicyEvaluationModeProp, err := expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(d.Get("global_policy_evaluation_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("global_policy_evaluation_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, globalPolicyEvaluationModeProp)) { - obj["globalPolicyEvaluationMode"] = globalPolicyEvaluationModeProp - } - admissionWhitelistPatternsProp, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(d.Get("admission_whitelist_patterns"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admission_whitelist_patterns"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, admissionWhitelistPatternsProp)) { - obj["admissionWhitelistPatterns"] = admissionWhitelistPatternsProp - } - clusterAdmissionRulesProp, err := expandBinaryAuthorizationPolicyClusterAdmissionRules(d.Get("cluster_admission_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cluster_admission_rules"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clusterAdmissionRulesProp)) { - obj["clusterAdmissionRules"] = clusterAdmissionRulesProp - } - defaultAdmissionRuleProp, err := expandBinaryAuthorizationPolicyDefaultAdmissionRule(d.Get("default_admission_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_admission_rule"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultAdmissionRuleProp)) { - obj["defaultAdmissionRule"] = defaultAdmissionRuleProp - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Policy %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Policy %q: %#v", d.Id(), res) - } - - return resourceBinaryAuthorizationPolicyRead(d, meta) -} - -func resourceBinaryAuthorizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = defaultBinaryAuthorizationPolicy(d.Get("project").(string)) - log.Printf("[DEBUG] Deleting Policy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Policy") - } - - log.Printf("[DEBUG] Finished deleting Policy %q: %#v", d.Id(), res) - return nil -} - -func resourceBinaryAuthorizationPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenBinaryAuthorizationPolicyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name_pattern": flattenBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(original["namePattern"], d, config), - }) - } - return transformed -} -func flattenBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyClusterAdmissionRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "cluster": k, - "evaluation_mode": flattenBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(original["evaluationMode"], d, config), - "require_attestations_by": flattenBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(original["requireAttestationsBy"], d, config), - "enforcement_mode": flattenBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(original["enforcementMode"], d, config), - }) - } - return transformed -} -func flattenBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(selfLinkNameHash, v.([]interface{})) -} - -func flattenBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyDefaultAdmissionRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["evaluation_mode"] = - flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(original["evaluationMode"], d, config) - transformed["require_attestations_by"] = - flattenBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(original["requireAttestationsBy"], d, config) - transformed["enforcement_mode"] = - flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(original["enforcementMode"], d, config) - return []interface{}{transformed} -} -func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(selfLinkNameHash, v.([]interface{})) -} - -func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBinaryAuthorizationPolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNamePattern, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(original["name_pattern"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamePattern); val.IsValid() && !isEmptyValue(val) { - transformed["namePattern"] = transformedNamePattern - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyClusterAdmissionRules(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEvaluationMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(original["evaluation_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !isEmptyValue(val) { - transformed["evaluationMode"] = transformedEvaluationMode - } - - transformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(original["require_attestations_by"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequireAttestationsBy); val.IsValid() && !isEmptyValue(val) { - transformed["requireAttestationsBy"] = transformedRequireAttestationsBy - } - - transformedEnforcementMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(original["enforcement_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnforcementMode); val.IsValid() && !isEmptyValue(val) { - transformed["enforcementMode"] = transformedEnforcementMode - } - - transformedCluster, err := expandString(original["cluster"], d, config) - if err != nil { - return nil, err - } - m[transformedCluster] = transformed - } - return m, nil -} - -func expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := regexp.MustCompile("projects/(.+)/attestors/(.+)") - - // It's possible that all entries in the list will specify a project, in - // which case the user wouldn't necessarily have to specify a provider - // project. - var project string - var err error - for _, s := range v.(*schema.Set).List() { - if !r.MatchString(s.(string)) { - project, err = getProject(d, config) - if err != nil { - return []interface{}{}, err - } - break - } - } - - return convertAndMapStringArr(v.(*schema.Set).List(), func(s string) string { - if r.MatchString(s) { - return s - } - - return fmt.Sprintf("projects/%s/attestors/%s", project, s) - }), nil -} - -func expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyDefaultAdmissionRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEvaluationMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(original["evaluation_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !isEmptyValue(val) { - transformed["evaluationMode"] = transformedEvaluationMode - } - - transformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(original["require_attestations_by"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequireAttestationsBy); val.IsValid() && !isEmptyValue(val) { - transformed["requireAttestationsBy"] = transformedRequireAttestationsBy - } - - transformedEnforcementMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(original["enforcement_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnforcementMode); val.IsValid() && !isEmptyValue(val) { - transformed["enforcementMode"] = transformedEnforcementMode - } - - return transformed, nil -} - -func expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := regexp.MustCompile("projects/(.+)/attestors/(.+)") - - // It's possible that all entries in the list will specify a project, in - // which case the user wouldn't necessarily have to specify a provider - // project. - var project string - var err error - for _, s := range v.(*schema.Set).List() { - if !r.MatchString(s.(string)) { - project, err = getProject(d, config) - if err != nil { - return []interface{}{}, err - } - break - } - } - - return convertAndMapStringArr(v.(*schema.Set).List(), func(s string) string { - if r.MatchString(s) { - return s - } - - return fmt.Sprintf("projects/%s/attestors/%s", project, s) - }), nil -} - -func expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate.go deleted file mode 100644 index 0525b1761c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate.go +++ /dev/null @@ -1,841 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func certManagerDefaultScopeDiffSuppress(_, old, new string, diff *schema.ResourceData) bool { - if old == "" && new == "DEFAULT" || old == "DEFAULT" && new == "" { - return true - } - return false -} - -func ResourceCertificateManagerCertificate() *schema.Resource { - return &schema.Resource{ - Create: resourceCertificateManagerCertificateCreate, - Read: resourceCertificateManagerCertificateRead, - Update: resourceCertificateManagerCertificateUpdate, - Delete: resourceCertificateManagerCertificateDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCertificateManagerCertificateImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A user-defined name of the certificate. Certificate names must be unique -The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, -and all following characters must be a dash, underscore, letter or digit.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Set of label tags associated with the Certificate resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "managed": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Configuration and state of a Managed Certificate. -Certificate Manager provisions and renews Managed Certificates -automatically, for as long as it's authorized to do so.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dns_authorizations": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - DiffSuppressFunc: projectNumberDiffSuppress, - Description: `Authorizations that will be used for performing domain authorization`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "domains": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The domains for which a managed SSL certificate will be generated. -Wildcard domains are only supported with DNS challenge resolution`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "authorization_attempt_info": { - Type: schema.TypeList, - Computed: true, - Description: `Detailed state of the latest authorization attempt for each domain -specified for this Managed Certificate.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "details": { - Type: schema.TypeString, - Computed: true, - Description: `Human readable explanation for reaching the state. Provided to help -address the configuration issues. -Not guaranteed to be stable. For programmatic access use 'failure_reason' field.`, - }, - "domain": { - Type: schema.TypeString, - Computed: true, - Description: `Domain name of the authorization attempt.`, - }, - "failure_reason": { - Type: schema.TypeString, - Computed: true, - Description: `Reason for failure of the authorization attempt for the domain.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the domain for managed certificate issuance.`, - }, - }, - }, - }, - "provisioning_issue": { - Type: schema.TypeList, - Computed: true, - Description: `Information about issues with provisioning this Managed Certificate.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "details": { - Type: schema.TypeString, - Computed: true, - Description: `Human readable explanation about the issue. Provided to help address -the configuration issues. -Not guaranteed to be stable. For programmatic access use 'reason' field.`, - }, - "reason": { - Type: schema.TypeString, - Computed: true, - Description: `Reason for provisioning failures.`, - }, - }, - }, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `A state of this Managed Certificate.`, - }, - }, - }, - ExactlyOneOf: []string{"self_managed", "managed"}, - }, - "scope": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: certManagerDefaultScopeDiffSuppress, - Description: `The scope of the certificate. - -DEFAULT: Certificates with default scope are served from core Google data centers. -If unsure, choose this option. - -EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, -served from non-core Google data centers. -Currently allowed only for managed certificates.`, - Default: "DEFAULT", - }, - "self_managed": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Certificate data for a SelfManaged Certificate. -SelfManaged Certificates are uploaded by the user. Updating such -certificates before they expire remains the user's responsibility.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "certificate_pem": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Deprecated in favor of `pem_certificate`", - Description: `**Deprecated** The certificate chain in PEM-encoded form. - -Leaf certificate comes first, followed by intermediate ones if any.`, - Sensitive: true, - ExactlyOneOf: []string{"self_managed.0.certificate_pem", "self_managed.0.pem_certificate"}, - }, - "pem_certificate": { - Type: schema.TypeString, - Optional: true, - Description: `The certificate chain in PEM-encoded form. - -Leaf certificate comes first, followed by intermediate ones if any.`, - ExactlyOneOf: []string{"self_managed.0.certificate_pem", "self_managed.0.pem_certificate"}, - }, - "pem_private_key": { - Type: schema.TypeString, - Optional: true, - Description: `The private key of the leaf certificate in PEM-encoded form.`, - Sensitive: true, - ExactlyOneOf: []string{"self_managed.0.private_key_pem", "self_managed.0.pem_private_key"}, - }, - "private_key_pem": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Deprecated in favor of `pem_private_key`", - Description: `**Deprecated** The private key of the leaf certificate in PEM-encoded form.`, - Sensitive: true, - ExactlyOneOf: []string{"self_managed.0.private_key_pem", "self_managed.0.pem_private_key"}, - }, - }, - }, - ExactlyOneOf: []string{"self_managed", "managed"}, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCertificateManagerCertificateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandCertificateManagerCertificateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCertificateManagerCertificateLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - scopeProp, err := expandCertificateManagerCertificateScope(d.Get("scope"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("scope"); !isEmptyValue(reflect.ValueOf(scopeProp)) && (ok || !reflect.DeepEqual(v, scopeProp)) { - obj["scope"] = scopeProp - } - selfManagedProp, err := expandCertificateManagerCertificateSelfManaged(d.Get("self_managed"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("self_managed"); !isEmptyValue(reflect.ValueOf(selfManagedProp)) && (ok || !reflect.DeepEqual(v, selfManagedProp)) { - obj["selfManaged"] = selfManagedProp - } - managedProp, err := expandCertificateManagerCertificateManaged(d.Get("managed"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("managed"); !isEmptyValue(reflect.ValueOf(managedProp)) && (ok || !reflect.DeepEqual(v, managedProp)) { - obj["managed"] = managedProp - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificates?certificateId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Certificate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Certificate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Certificate: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificates/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = CertificateManagerOperationWaitTime( - config, res, project, "Creating Certificate", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Certificate: %s", err) - } - - log.Printf("[DEBUG] Finished creating Certificate %q: %#v", d.Id(), res) - - return resourceCertificateManagerCertificateRead(d, meta) -} - -func resourceCertificateManagerCertificateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Certificate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Certificate: %s", err) - } - - if err := d.Set("description", flattenCertificateManagerCertificateDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("labels", flattenCertificateManagerCertificateLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("scope", flattenCertificateManagerCertificateScope(res["scope"], d, config)); err != nil { - return fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("managed", flattenCertificateManagerCertificateManaged(res["managed"], d, config)); err != nil { - return fmt.Errorf("Error reading Certificate: %s", err) - } - - return nil -} - -func resourceCertificateManagerCertificateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Certificate: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandCertificateManagerCertificateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCertificateManagerCertificateLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificates/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Certificate %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Certificate %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Certificate %q: %#v", d.Id(), res) - } - - err = CertificateManagerOperationWaitTime( - config, res, project, "Updating Certificate", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceCertificateManagerCertificateRead(d, meta) -} - -func resourceCertificateManagerCertificateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Certificate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Certificate %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Certificate") - } - - err = CertificateManagerOperationWaitTime( - config, res, project, "Deleting Certificate", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Certificate %q: %#v", d.Id(), res) - return nil -} - -func resourceCertificateManagerCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/certificates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificates/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCertificateManagerCertificateDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateScope(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManaged(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["domains"] = - flattenCertificateManagerCertificateManagedDomains(original["domains"], d, config) - transformed["dns_authorizations"] = - flattenCertificateManagerCertificateManagedDnsAuthorizations(original["dnsAuthorizations"], d, config) - transformed["state"] = - flattenCertificateManagerCertificateManagedState(original["state"], d, config) - transformed["provisioning_issue"] = - flattenCertificateManagerCertificateManagedProvisioningIssue(original["provisioningIssue"], d, config) - transformed["authorization_attempt_info"] = - flattenCertificateManagerCertificateManagedAuthorizationAttemptInfo(original["authorizationAttemptInfo"], d, config) - return []interface{}{transformed} -} -func flattenCertificateManagerCertificateManagedDomains(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManagedState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManagedProvisioningIssue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["reason"] = - flattenCertificateManagerCertificateManagedProvisioningIssueReason(original["reason"], d, config) - transformed["details"] = - flattenCertificateManagerCertificateManagedProvisioningIssueDetails(original["details"], d, config) - return []interface{}{transformed} -} -func flattenCertificateManagerCertificateManagedProvisioningIssueReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManagedProvisioningIssueDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfo(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "domain": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(original["domain"], d, config), - "state": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoState(original["state"], d, config), - "failure_reason": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(original["failureReason"], d, config), - "details": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(original["details"], d, config), - }) - } - return transformed -} -func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCertificateManagerCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCertificateManagerCertificateScope(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateSelfManaged(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCertificatePem, err := expandCertificateManagerCertificateSelfManagedCertificatePem(original["certificate_pem"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCertificatePem); val.IsValid() && !isEmptyValue(val) { - transformed["certificatePem"] = transformedCertificatePem - } - - transformedPrivateKeyPem, err := expandCertificateManagerCertificateSelfManagedPrivateKeyPem(original["private_key_pem"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrivateKeyPem); val.IsValid() && !isEmptyValue(val) { - transformed["privateKeyPem"] = transformedPrivateKeyPem - } - - transformedPemCertificate, err := expandCertificateManagerCertificateSelfManagedPemCertificate(original["pem_certificate"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPemCertificate); val.IsValid() && !isEmptyValue(val) { - transformed["pemCertificate"] = transformedPemCertificate - } - - transformedPemPrivateKey, err := expandCertificateManagerCertificateSelfManagedPemPrivateKey(original["pem_private_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPemPrivateKey); val.IsValid() && !isEmptyValue(val) { - transformed["pemPrivateKey"] = transformedPemPrivateKey - } - - return transformed, nil -} - -func expandCertificateManagerCertificateSelfManagedCertificatePem(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateSelfManagedPrivateKeyPem(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateSelfManagedPemCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateSelfManagedPemPrivateKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManaged(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDomains, err := expandCertificateManagerCertificateManagedDomains(original["domains"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !isEmptyValue(val) { - transformed["domains"] = transformedDomains - } - - transformedDnsAuthorizations, err := expandCertificateManagerCertificateManagedDnsAuthorizations(original["dns_authorizations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDnsAuthorizations); val.IsValid() && !isEmptyValue(val) { - transformed["dnsAuthorizations"] = transformedDnsAuthorizations - } - - transformedState, err := expandCertificateManagerCertificateManagedState(original["state"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { - transformed["state"] = transformedState - } - - transformedProvisioningIssue, err := expandCertificateManagerCertificateManagedProvisioningIssue(original["provisioning_issue"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProvisioningIssue); val.IsValid() && !isEmptyValue(val) { - transformed["provisioningIssue"] = transformedProvisioningIssue - } - - transformedAuthorizationAttemptInfo, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfo(original["authorization_attempt_info"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAuthorizationAttemptInfo); val.IsValid() && !isEmptyValue(val) { - transformed["authorizationAttemptInfo"] = transformedAuthorizationAttemptInfo - } - - return transformed, nil -} - -func expandCertificateManagerCertificateManagedDomains(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManagedState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManagedProvisioningIssue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedReason, err := expandCertificateManagerCertificateManagedProvisioningIssueReason(original["reason"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReason); val.IsValid() && !isEmptyValue(val) { - transformed["reason"] = transformedReason - } - - transformedDetails, err := expandCertificateManagerCertificateManagedProvisioningIssueDetails(original["details"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDetails); val.IsValid() && !isEmptyValue(val) { - transformed["details"] = transformedDetails - } - - return transformed, nil -} - -func expandCertificateManagerCertificateManagedProvisioningIssueReason(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManagedProvisioningIssueDetails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManagedAuthorizationAttemptInfo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDomain, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(original["domain"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !isEmptyValue(val) { - transformed["domain"] = transformedDomain - } - - transformedState, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoState(original["state"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { - transformed["state"] = transformedState - } - - transformedFailureReason, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(original["failure_reason"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFailureReason); val.IsValid() && !isEmptyValue(val) { - transformed["failureReason"] = transformedFailureReason - } - - transformedDetails, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(original["details"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDetails); val.IsValid() && !isEmptyValue(val) { - transformed["details"] = transformedDetails - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate_map.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate_map.go deleted file mode 100644 index 869abb2cfd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate_map.go +++ /dev/null @@ -1,478 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCertificateManagerCertificateMap() *schema.Resource { - return &schema.Resource{ - Create: resourceCertificateManagerCertificateMapCreate, - Read: resourceCertificateManagerCertificateMapRead, - Update: resourceCertificateManagerCertificateMapUpdate, - Delete: resourceCertificateManagerCertificateMapDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCertificateManagerCertificateMapImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A user-defined name of the Certificate Map. Certificate Map names must be unique -globally and match the pattern 'projects/*/locations/*/certificateMaps/*'.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - "labels": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - Description: `Set of labels associated with a Certificate Map resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp of a Certificate Map. Timestamp is in RFC3339 UTC "Zulu" format, -accurate to nanoseconds with up to nine fractional digits. -Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "gclb_targets": { - Type: schema.TypeList, - Computed: true, - Description: `A list of target proxies that use this Certificate Map`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_configs": { - Type: schema.TypeList, - Optional: true, - Description: `An IP configuration where this Certificate Map is serving`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": { - Type: schema.TypeString, - Optional: true, - Description: `An external IP address`, - }, - "ports": { - Type: schema.TypeList, - Optional: true, - Description: `A list of ports`, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, - "target_https_proxy": { - Type: schema.TypeString, - Optional: true, - Description: `Proxy name must be in the format projects/*/locations/*/targetHttpsProxies/*. -This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or -'targetSslProxy' may be set.`, - }, - "target_ssl_proxy": { - Type: schema.TypeString, - Optional: true, - Description: `Proxy name must be in the format projects/*/locations/*/targetSslProxies/*. -This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or -'targetSslProxy' may be set.`, - }, - }, - }, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Update timestamp of a Certificate Map. Timestamp is in RFC3339 UTC "Zulu" format, -accurate to nanoseconds with up to nine fractional digits. -Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCertificateManagerCertificateMapCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandCertificateManagerCertificateMapDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCertificateManagerCertificateMapLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps?certificateMapId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new CertificateMap: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateMap: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating CertificateMap: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = CertificateManagerOperationWaitTime( - config, res, project, "Creating CertificateMap", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create CertificateMap: %s", err) - } - - log.Printf("[DEBUG] Finished creating CertificateMap %q: %#v", d.Id(), res) - - return resourceCertificateManagerCertificateMapRead(d, meta) -} - -func resourceCertificateManagerCertificateMapRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateMap: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateMap %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading CertificateMap: %s", err) - } - - if err := d.Set("description", flattenCertificateManagerCertificateMapDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMap: %s", err) - } - if err := d.Set("create_time", flattenCertificateManagerCertificateMapCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMap: %s", err) - } - if err := d.Set("update_time", flattenCertificateManagerCertificateMapUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMap: %s", err) - } - if err := d.Set("labels", flattenCertificateManagerCertificateMapLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMap: %s", err) - } - if err := d.Set("gclb_targets", flattenCertificateManagerCertificateMapGclbTargets(res["gclbTargets"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMap: %s", err) - } - - return nil -} - -func resourceCertificateManagerCertificateMapUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateMap: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandCertificateManagerCertificateMapDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCertificateManagerCertificateMapLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating CertificateMap %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating CertificateMap %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating CertificateMap %q: %#v", d.Id(), res) - } - - err = CertificateManagerOperationWaitTime( - config, res, project, "Updating CertificateMap", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceCertificateManagerCertificateMapRead(d, meta) -} - -func resourceCertificateManagerCertificateMapDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateMap: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting CertificateMap %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "CertificateMap") - } - - err = CertificateManagerOperationWaitTime( - config, res, project, "Deleting CertificateMap", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting CertificateMap %q: %#v", d.Id(), res) - return nil -} - -func resourceCertificateManagerCertificateMapImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/certificateMaps/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCertificateManagerCertificateMapDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapGclbTargets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "ip_configs": flattenCertificateManagerCertificateMapGclbTargetsIpConfigs(original["ipConfigs"], d, config), - "target_https_proxy": flattenCertificateManagerCertificateMapGclbTargetsTargetHttpsProxy(original["targetHttpsProxy"], d, config), - "target_ssl_proxy": flattenCertificateManagerCertificateMapGclbTargetsTargetSslProxy(original["targetSslProxy"], d, config), - }) - } - return transformed -} -func flattenCertificateManagerCertificateMapGclbTargetsIpConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "ip_address": flattenCertificateManagerCertificateMapGclbTargetsIpConfigsIpAddress(original["ipAddress"], d, config), - "ports": flattenCertificateManagerCertificateMapGclbTargetsIpConfigsPorts(original["ports"], d, config), - }) - } - return transformed -} -func flattenCertificateManagerCertificateMapGclbTargetsIpConfigsIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapGclbTargetsIpConfigsPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapGclbTargetsTargetHttpsProxy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapGclbTargetsTargetSslProxy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCertificateManagerCertificateMapDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateMapLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate_map_entry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate_map_entry.go deleted file mode 100644 index 4f2a50c3e1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_certificate_map_entry.go +++ /dev/null @@ -1,505 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCertificateManagerCertificateMapEntry() *schema.Resource { - return &schema.Resource{ - Create: resourceCertificateManagerCertificateMapEntryCreate, - Read: resourceCertificateManagerCertificateMapEntryRead, - Update: resourceCertificateManagerCertificateMapEntryUpdate, - Delete: resourceCertificateManagerCertificateMapEntryDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCertificateManagerCertificateMapEntryImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "certificates": { - Type: schema.TypeList, - Required: true, - DiffSuppressFunc: projectNumberDiffSuppress, - Description: `A set of Certificates defines for the given hostname. -There can be defined up to fifteen certificates in each Certificate Map Entry. -Each certificate must match pattern projects/*/locations/*/certificates/*.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "map": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A map entry that is inputted into the cetrificate map`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A user-defined name of the Certificate Map Entry. Certificate Map Entry -names must be unique globally and match pattern -'projects/*/locations/*/certificateMaps/*/certificateMapEntries/*'`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - "hostname": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A Hostname (FQDN, e.g. example.com) or a wildcard hostname expression (*.example.com) -for a set of hostnames with common suffix. Used as Server Name Indication (SNI) for -selecting a proper certificate.`, - ExactlyOneOf: []string{"hostname", "matcher"}, - }, - "labels": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - Description: `Set of labels associated with a Certificate Map Entry. -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "matcher": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A predefined matcher for particular cases, other than SNI selection`, - ExactlyOneOf: []string{"hostname", "matcher"}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp of a Certificate Map Entry. Timestamp in RFC3339 UTC "Zulu" format, -with nanosecond resolution and up to nine fractional digits. -Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `A serving state of this Certificate Map Entry.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Update timestamp of a Certificate Map Entry. Timestamp in RFC3339 UTC "Zulu" format, -with nanosecond resolution and up to nine fractional digits. -Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCertificateManagerCertificateMapEntryCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandCertificateManagerCertificateMapEntryDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCertificateManagerCertificateMapEntryLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - certificatesProp, err := expandCertificateManagerCertificateMapEntryCertificates(d.Get("certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificates"); !isEmptyValue(reflect.ValueOf(certificatesProp)) && (ok || !reflect.DeepEqual(v, certificatesProp)) { - obj["certificates"] = certificatesProp - } - hostnameProp, err := expandCertificateManagerCertificateMapEntryHostname(d.Get("hostname"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("hostname"); !isEmptyValue(reflect.ValueOf(hostnameProp)) && (ok || !reflect.DeepEqual(v, hostnameProp)) { - obj["hostname"] = hostnameProp - } - matcherProp, err := expandCertificateManagerCertificateMapEntryMatcher(d.Get("matcher"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("matcher"); !isEmptyValue(reflect.ValueOf(matcherProp)) && (ok || !reflect.DeepEqual(v, matcherProp)) { - obj["matcher"] = matcherProp - } - nameProp, err := expandCertificateManagerCertificateMapEntryName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries?certificateMapEntryId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new CertificateMapEntry: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating CertificateMapEntry: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = CertificateManagerOperationWaitTime( - config, res, project, "Creating CertificateMapEntry", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create CertificateMapEntry: %s", err) - } - - log.Printf("[DEBUG] Finished creating CertificateMapEntry %q: %#v", d.Id(), res) - - return resourceCertificateManagerCertificateMapEntryRead(d, meta) -} - -func resourceCertificateManagerCertificateMapEntryRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateMapEntry %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - - if err := d.Set("description", flattenCertificateManagerCertificateMapEntryDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - if err := d.Set("create_time", flattenCertificateManagerCertificateMapEntryCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - if err := d.Set("update_time", flattenCertificateManagerCertificateMapEntryUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - if err := d.Set("labels", flattenCertificateManagerCertificateMapEntryLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - if err := d.Set("certificates", flattenCertificateManagerCertificateMapEntryCertificates(res["certificates"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - if err := d.Set("state", flattenCertificateManagerCertificateMapEntryState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - if err := d.Set("hostname", flattenCertificateManagerCertificateMapEntryHostname(res["hostname"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - if err := d.Set("matcher", flattenCertificateManagerCertificateMapEntryMatcher(res["matcher"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - if err := d.Set("name", flattenCertificateManagerCertificateMapEntryName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading CertificateMapEntry: %s", err) - } - - return nil -} - -func resourceCertificateManagerCertificateMapEntryUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandCertificateManagerCertificateMapEntryDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCertificateManagerCertificateMapEntryLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - certificatesProp, err := expandCertificateManagerCertificateMapEntryCertificates(d.Get("certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificates"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificatesProp)) { - obj["certificates"] = certificatesProp - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating CertificateMapEntry %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("certificates") { - updateMask = append(updateMask, "certificates") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating CertificateMapEntry %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating CertificateMapEntry %q: %#v", d.Id(), res) - } - - err = CertificateManagerOperationWaitTime( - config, res, project, "Updating CertificateMapEntry", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceCertificateManagerCertificateMapEntryRead(d, meta) -} - -func resourceCertificateManagerCertificateMapEntryDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting CertificateMapEntry %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "CertificateMapEntry") - } - - err = CertificateManagerOperationWaitTime( - config, res, project, "Deleting CertificateMapEntry", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting CertificateMapEntry %q: %#v", d.Id(), res) - return nil -} - -func resourceCertificateManagerCertificateMapEntryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/certificateMaps/(?P[^/]+)/certificateMapEntries/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCertificateManagerCertificateMapEntryDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapEntryCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapEntryUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapEntryLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapEntryCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapEntryState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapEntryHostname(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapEntryMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerCertificateMapEntryName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandCertificateManagerCertificateMapEntryDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateMapEntryLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCertificateManagerCertificateMapEntryCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateMapEntryHostname(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateMapEntryMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerCertificateMapEntryName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_dns_authorization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_dns_authorization.go deleted file mode 100644 index 05ec6c6505..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_certificate_manager_dns_authorization.go +++ /dev/null @@ -1,431 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCertificateManagerDnsAuthorization() *schema.Resource { - return &schema.Resource{ - Create: resourceCertificateManagerDnsAuthorizationCreate, - Read: resourceCertificateManagerDnsAuthorizationRead, - Update: resourceCertificateManagerDnsAuthorizationUpdate, - Delete: resourceCertificateManagerDnsAuthorizationDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCertificateManagerDnsAuthorizationImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "domain": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A domain which is being authorized. A DnsAuthorization resource covers a -single domain and its wildcard, e.g. authorization for "example.com" can -be used to issue certificates for "example.com" and "*.example.com".`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is created. -The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, -and all following characters must be a dash, underscore, letter or digit.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Set of label tags associated with the DNS Authorization resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "dns_resource_record": { - Type: schema.TypeList, - Computed: true, - Description: `The structure describing the DNS Resource Record that needs to be added -to DNS configuration for the authorization to be usable by -certificate.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "data": { - Type: schema.TypeString, - Computed: true, - Description: `Data of the DNS Resource Record.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Fully qualified name of the DNS Resource Record. -E.g. '_acme-challenge.example.com'.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: `Type of the DNS Resource Record.`, - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCertificateManagerDnsAuthorizationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandCertificateManagerDnsAuthorizationDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCertificateManagerDnsAuthorizationLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - domainProp, err := expandCertificateManagerDnsAuthorizationDomain(d.Get("domain"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("domain"); !isEmptyValue(reflect.ValueOf(domainProp)) && (ok || !reflect.DeepEqual(v, domainProp)) { - obj["domain"] = domainProp - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations?dnsAuthorizationId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DnsAuthorization: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DnsAuthorization: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DnsAuthorization: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = CertificateManagerOperationWaitTime( - config, res, project, "Creating DnsAuthorization", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create DnsAuthorization: %s", err) - } - - log.Printf("[DEBUG] Finished creating DnsAuthorization %q: %#v", d.Id(), res) - - return resourceCertificateManagerDnsAuthorizationRead(d, meta) -} - -func resourceCertificateManagerDnsAuthorizationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DnsAuthorization: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CertificateManagerDnsAuthorization %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DnsAuthorization: %s", err) - } - - if err := d.Set("description", flattenCertificateManagerDnsAuthorizationDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading DnsAuthorization: %s", err) - } - if err := d.Set("labels", flattenCertificateManagerDnsAuthorizationLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading DnsAuthorization: %s", err) - } - if err := d.Set("domain", flattenCertificateManagerDnsAuthorizationDomain(res["domain"], d, config)); err != nil { - return fmt.Errorf("Error reading DnsAuthorization: %s", err) - } - if err := d.Set("dns_resource_record", flattenCertificateManagerDnsAuthorizationDnsResourceRecord(res["dnsResourceRecord"], d, config)); err != nil { - return fmt.Errorf("Error reading DnsAuthorization: %s", err) - } - - return nil -} - -func resourceCertificateManagerDnsAuthorizationUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DnsAuthorization: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandCertificateManagerDnsAuthorizationDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCertificateManagerDnsAuthorizationLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DnsAuthorization %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DnsAuthorization %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DnsAuthorization %q: %#v", d.Id(), res) - } - - err = CertificateManagerOperationWaitTime( - config, res, project, "Updating DnsAuthorization", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceCertificateManagerDnsAuthorizationRead(d, meta) -} - -func resourceCertificateManagerDnsAuthorizationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DnsAuthorization: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DnsAuthorization %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DnsAuthorization") - } - - err = CertificateManagerOperationWaitTime( - config, res, project, "Deleting DnsAuthorization", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting DnsAuthorization %q: %#v", d.Id(), res) - return nil -} - -func resourceCertificateManagerDnsAuthorizationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/dnsAuthorizations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCertificateManagerDnsAuthorizationDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerDnsAuthorizationLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerDnsAuthorizationDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerDnsAuthorizationDnsResourceRecord(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenCertificateManagerDnsAuthorizationDnsResourceRecordName(original["name"], d, config) - transformed["type"] = - flattenCertificateManagerDnsAuthorizationDnsResourceRecordType(original["type"], d, config) - transformed["data"] = - flattenCertificateManagerDnsAuthorizationDnsResourceRecordData(original["data"], d, config) - return []interface{}{transformed} -} -func flattenCertificateManagerDnsAuthorizationDnsResourceRecordName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerDnsAuthorizationDnsResourceRecordType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCertificateManagerDnsAuthorizationDnsResourceRecordData(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCertificateManagerDnsAuthorizationDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCertificateManagerDnsAuthorizationLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCertificateManagerDnsAuthorizationDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_folder_feed.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_folder_feed.go deleted file mode 100644 index 6436ecf492..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_folder_feed.go +++ /dev/null @@ -1,661 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudAssetFolderFeed() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudAssetFolderFeedCreate, - Read: resourceCloudAssetFolderFeedRead, - Update: resourceCloudAssetFolderFeedUpdate, - Delete: resourceCloudAssetFolderFeedDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudAssetFolderFeedImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "billing_project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project whose identity will be used when sending messages to the -destination pubsub topic. It also specifies the project for API -enablement check, quota, and billing.`, - }, - "feed_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, - }, - "feed_output_config": { - Type: schema.TypeList, - Required: true, - Description: `Output configuration for asset feed destination.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_destination": { - Type: schema.TypeList, - Required: true, - Description: `Destination on Cloud Pubsub.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topic": { - Type: schema.TypeString, - Required: true, - Description: `Destination on Cloud Pubsub topic.`, - }, - }, - }, - }, - }, - }, - }, - "folder": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The folder this feed should be created in.`, - }, - "asset_names": { - Type: schema.TypeList, - Optional: true, - Description: `A list of the full names of the assets to receive updates. You must specify either or both of -assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are -exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. -See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "asset_types": { - Type: schema.TypeList, - Optional: true, - Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames -and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to -the feed. For example: "compute.googleapis.com/Disk" -See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all -supported asset types.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "condition": { - Type: schema.TypeList, - Optional: true, - Description: `A condition which determines whether an asset update should be published. If specified, an asset -will be returned only when the expression evaluates to true. When set, expression field -must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with -expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of -condition are optional.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expression": { - Type: schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of the expression. This is a longer text which describes the expression, -e.g. when hovered over it in a UI.`, - }, - "location": { - Type: schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file -name and a position in the file.`, - }, - "title": { - Type: schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose. -This can be used e.g. in UIs which allow to enter the expression.`, - }, - }, - }, - }, - "content_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY", ""}), - Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY"]`, - }, - "folder_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID of the folder where this feed has been created. Both [FOLDER_NUMBER] -and folders/[FOLDER_NUMBER] are accepted.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The format will be folders/{folder_number}/feeds/{client-assigned_feed_identifier}.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudAssetFolderFeedCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetFolderFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(reflect.ValueOf(assetNamesProp)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetFolderFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(reflect.ValueOf(assetTypesProp)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetFolderFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(reflect.ValueOf(contentTypeProp)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetFolderFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(reflect.ValueOf(feedOutputConfigProp)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetFolderFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(reflect.ValueOf(conditionProp)) && (ok || !reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetFolderFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}folders/{{folder_id}}/feeds?feedId={{feed_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new FolderFeed: %#v", obj) - billingProject := "" - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // Send the project ID in the X-Goog-User-Project header. - origUserProjectOverride := config.UserProjectOverride - config.UserProjectOverride = true - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating FolderFeed: %s", err) - } - if err := d.Set("name", flattenCloudAssetFolderFeedName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Restore the original value of user_project_override. - config.UserProjectOverride = origUserProjectOverride - - log.Printf("[DEBUG] Finished creating FolderFeed %q: %#v", d.Id(), res) - - return resourceCloudAssetFolderFeedRead(d, meta) -} - -func resourceCloudAssetFolderFeedRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudAssetFolderFeed %q", d.Id())) - } - - if err := d.Set("folder_id", flattenCloudAssetFolderFeedFolderId(res["folder_id"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("name", flattenCloudAssetFolderFeedName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("asset_names", flattenCloudAssetFolderFeedAssetNames(res["assetNames"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("asset_types", flattenCloudAssetFolderFeedAssetTypes(res["assetTypes"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("content_type", flattenCloudAssetFolderFeedContentType(res["contentType"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("feed_output_config", flattenCloudAssetFolderFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("condition", flattenCloudAssetFolderFeedCondition(res["condition"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderFeed: %s", err) - } - - return nil -} - -func resourceCloudAssetFolderFeedUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetFolderFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetFolderFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetFolderFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetFolderFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetFolderFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetFolderFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating FolderFeed %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("asset_names") { - updateMask = append(updateMask, "assetNames") - } - - if d.HasChange("asset_types") { - updateMask = append(updateMask, "assetTypes") - } - - if d.HasChange("content_type") { - updateMask = append(updateMask, "contentType") - } - - if d.HasChange("feed_output_config") { - updateMask = append(updateMask, "feedOutputConfig") - } - - if d.HasChange("condition") { - updateMask = append(updateMask, "condition") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating FolderFeed %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating FolderFeed %q: %#v", d.Id(), res) - } - - return resourceCloudAssetFolderFeedRead(d, meta) -} - -func resourceCloudAssetFolderFeedDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - log.Printf("[DEBUG] Deleting FolderFeed %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "FolderFeed") - } - - log.Printf("[DEBUG] Finished deleting FolderFeed %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudAssetFolderFeedImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - if err := d.Set("name", d.Id()); err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil -} - -func flattenCloudAssetFolderFeedFolderId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedAssetNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedAssetTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedContentType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedFeedOutputConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_destination"] = - flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestination(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic"] = - flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenCloudAssetFolderFeedConditionExpression(original["expression"], d, config) - transformed["title"] = - flattenCloudAssetFolderFeedConditionTitle(original["title"], d, config) - transformed["description"] = - flattenCloudAssetFolderFeedConditionDescription(original["description"], d, config) - transformed["location"] = - flattenCloudAssetFolderFeedConditionLocation(original["location"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetFolderFeedConditionExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedConditionTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedConditionDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedConditionLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudAssetFolderFeedAssetNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedAssetTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedContentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedFeedOutputConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubDestination, err := expandCloudAssetFolderFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubDestination"] = transformedPubsubDestination - } - - return transformed, nil -} - -func expandCloudAssetFolderFeedFeedOutputConfigPubsubDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopic, err := expandCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - return transformed, nil -} - -func expandCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandCloudAssetFolderFeedConditionExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandCloudAssetFolderFeedConditionTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandCloudAssetFolderFeedConditionDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandCloudAssetFolderFeedConditionLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandCloudAssetFolderFeedConditionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedConditionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedConditionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedConditionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudAssetFolderFeedEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Remove the "folders/" prefix from the folder ID - if folder, ok := d.GetOkExists("folder"); ok { - if err := d.Set("folder_id", strings.TrimPrefix(folder.(string), "folders/")); err != nil { - return nil, fmt.Errorf("Error setting folder_id: %s", err) - } - } - // The feed object must be under the "feed" attribute on the request. - newObj := make(map[string]interface{}) - newObj["feed"] = obj - return newObj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_organization_feed.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_organization_feed.go deleted file mode 100644 index f888ee8204..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_organization_feed.go +++ /dev/null @@ -1,648 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudAssetOrganizationFeed() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudAssetOrganizationFeedCreate, - Read: resourceCloudAssetOrganizationFeedRead, - Update: resourceCloudAssetOrganizationFeedUpdate, - Delete: resourceCloudAssetOrganizationFeedDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudAssetOrganizationFeedImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "billing_project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project whose identity will be used when sending messages to the -destination pubsub topic. It also specifies the project for API -enablement check, quota, and billing.`, - }, - "feed_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, - }, - "feed_output_config": { - Type: schema.TypeList, - Required: true, - Description: `Output configuration for asset feed destination.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_destination": { - Type: schema.TypeList, - Required: true, - Description: `Destination on Cloud Pubsub.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topic": { - Type: schema.TypeString, - Required: true, - Description: `Destination on Cloud Pubsub topic.`, - }, - }, - }, - }, - }, - }, - }, - "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The organization this feed should be created in.`, - }, - "asset_names": { - Type: schema.TypeList, - Optional: true, - Description: `A list of the full names of the assets to receive updates. You must specify either or both of -assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are -exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. -See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "asset_types": { - Type: schema.TypeList, - Optional: true, - Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames -and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to -the feed. For example: "compute.googleapis.com/Disk" -See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all -supported asset types.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "condition": { - Type: schema.TypeList, - Optional: true, - Description: `A condition which determines whether an asset update should be published. If specified, an asset -will be returned only when the expression evaluates to true. When set, expression field -must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with -expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of -condition are optional.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expression": { - Type: schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of the expression. This is a longer text which describes the expression, -e.g. when hovered over it in a UI.`, - }, - "location": { - Type: schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file -name and a position in the file.`, - }, - "title": { - Type: schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose. -This can be used e.g. in UIs which allow to enter the expression.`, - }, - }, - }, - }, - "content_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY", ""}), - Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY"]`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The format will be organizations/{organization_number}/feeds/{client-assigned_feed_identifier}.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudAssetOrganizationFeedCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetOrganizationFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(reflect.ValueOf(assetNamesProp)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetOrganizationFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(reflect.ValueOf(assetTypesProp)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetOrganizationFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(reflect.ValueOf(contentTypeProp)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetOrganizationFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(reflect.ValueOf(feedOutputConfigProp)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetOrganizationFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(reflect.ValueOf(conditionProp)) && (ok || !reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetOrganizationFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}organizations/{{org_id}}/feeds?feedId={{feed_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new OrganizationFeed: %#v", obj) - billingProject := "" - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // Send the project ID in the X-Goog-User-Project header. - origUserProjectOverride := config.UserProjectOverride - config.UserProjectOverride = true - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating OrganizationFeed: %s", err) - } - if err := d.Set("name", flattenCloudAssetOrganizationFeedName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Restore the original value of user_project_override. - config.UserProjectOverride = origUserProjectOverride - - log.Printf("[DEBUG] Finished creating OrganizationFeed %q: %#v", d.Id(), res) - - return resourceCloudAssetOrganizationFeedRead(d, meta) -} - -func resourceCloudAssetOrganizationFeedRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudAssetOrganizationFeed %q", d.Id())) - } - - if err := d.Set("name", flattenCloudAssetOrganizationFeedName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("asset_names", flattenCloudAssetOrganizationFeedAssetNames(res["assetNames"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("asset_types", flattenCloudAssetOrganizationFeedAssetTypes(res["assetTypes"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("content_type", flattenCloudAssetOrganizationFeedContentType(res["contentType"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("feed_output_config", flattenCloudAssetOrganizationFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("condition", flattenCloudAssetOrganizationFeedCondition(res["condition"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - - return nil -} - -func resourceCloudAssetOrganizationFeedUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetOrganizationFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetOrganizationFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetOrganizationFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetOrganizationFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetOrganizationFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetOrganizationFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating OrganizationFeed %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("asset_names") { - updateMask = append(updateMask, "assetNames") - } - - if d.HasChange("asset_types") { - updateMask = append(updateMask, "assetTypes") - } - - if d.HasChange("content_type") { - updateMask = append(updateMask, "contentType") - } - - if d.HasChange("feed_output_config") { - updateMask = append(updateMask, "feedOutputConfig") - } - - if d.HasChange("condition") { - updateMask = append(updateMask, "condition") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating OrganizationFeed %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating OrganizationFeed %q: %#v", d.Id(), res) - } - - return resourceCloudAssetOrganizationFeedRead(d, meta) -} - -func resourceCloudAssetOrganizationFeedDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - log.Printf("[DEBUG] Deleting OrganizationFeed %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "OrganizationFeed") - } - - log.Printf("[DEBUG] Finished deleting OrganizationFeed %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudAssetOrganizationFeedImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - if err := d.Set("name", d.Id()); err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil -} - -func flattenCloudAssetOrganizationFeedName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedAssetNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedAssetTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedContentType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedFeedOutputConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_destination"] = - flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic"] = - flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenCloudAssetOrganizationFeedConditionExpression(original["expression"], d, config) - transformed["title"] = - flattenCloudAssetOrganizationFeedConditionTitle(original["title"], d, config) - transformed["description"] = - flattenCloudAssetOrganizationFeedConditionDescription(original["description"], d, config) - transformed["location"] = - flattenCloudAssetOrganizationFeedConditionLocation(original["location"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetOrganizationFeedConditionExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedConditionTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedConditionDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedConditionLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudAssetOrganizationFeedAssetNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedAssetTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedContentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedFeedOutputConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubDestination, err := expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubDestination"] = transformedPubsubDestination - } - - return transformed, nil -} - -func expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopic, err := expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - return transformed, nil -} - -func expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandCloudAssetOrganizationFeedConditionExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandCloudAssetOrganizationFeedConditionTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandCloudAssetOrganizationFeedConditionDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandCloudAssetOrganizationFeedConditionLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandCloudAssetOrganizationFeedConditionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedConditionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedConditionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedConditionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudAssetOrganizationFeedEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Remove the "folders/" prefix from the folder ID - if folder, ok := d.GetOkExists("folder"); ok { - if err := d.Set("folder_id", strings.TrimPrefix(folder.(string), "folders/")); err != nil { - return nil, fmt.Errorf("Error setting folder_id: %s", err) - } - } - // The feed object must be under the "feed" attribute on the request. - newObj := make(map[string]interface{}) - newObj["feed"] = obj - return newObj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_project_feed.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_project_feed.go deleted file mode 100644 index d59b72a377..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_asset_project_feed.go +++ /dev/null @@ -1,660 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudAssetProjectFeed() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudAssetProjectFeedCreate, - Read: resourceCloudAssetProjectFeedRead, - Update: resourceCloudAssetProjectFeedUpdate, - Delete: resourceCloudAssetProjectFeedDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudAssetProjectFeedImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "feed_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, - }, - "feed_output_config": { - Type: schema.TypeList, - Required: true, - Description: `Output configuration for asset feed destination.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_destination": { - Type: schema.TypeList, - Required: true, - Description: `Destination on Cloud Pubsub.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topic": { - Type: schema.TypeString, - Required: true, - Description: `Destination on Cloud Pubsub topic.`, - }, - }, - }, - }, - }, - }, - }, - "asset_names": { - Type: schema.TypeList, - Optional: true, - Description: `A list of the full names of the assets to receive updates. You must specify either or both of -assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are -exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. -See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "asset_types": { - Type: schema.TypeList, - Optional: true, - Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames -and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to -the feed. For example: "compute.googleapis.com/Disk" -See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all -supported asset types.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "billing_project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The project whose identity will be used when sending messages to the -destination pubsub topic. It also specifies the project for API -enablement check, quota, and billing. If not specified, the resource's -project will be used.`, - }, - "condition": { - Type: schema.TypeList, - Optional: true, - Description: `A condition which determines whether an asset update should be published. If specified, an asset -will be returned only when the expression evaluates to true. When set, expression field -must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with -expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of -condition are optional.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expression": { - Type: schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of the expression. This is a longer text which describes the expression, -e.g. when hovered over it in a UI.`, - }, - "location": { - Type: schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file -name and a position in the file.`, - }, - "title": { - Type: schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose. -This can be used e.g. in UIs which allow to enter the expression.`, - }, - }, - }, - }, - "content_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY", ""}), - Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY"]`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The format will be projects/{projectNumber}/feeds/{client-assigned_feed_identifier}.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudAssetProjectFeedCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetProjectFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(reflect.ValueOf(assetNamesProp)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetProjectFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(reflect.ValueOf(assetTypesProp)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetProjectFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(reflect.ValueOf(contentTypeProp)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetProjectFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(reflect.ValueOf(feedOutputConfigProp)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetProjectFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(reflect.ValueOf(conditionProp)) && (ok || !reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetProjectFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}projects/{{project}}/feeds?feedId={{feed_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ProjectFeed: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectFeed: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // Send the project ID in the X-Goog-User-Project header. - origUserProjectOverride := config.UserProjectOverride - config.UserProjectOverride = true - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ProjectFeed: %s", err) - } - if err := d.Set("name", flattenCloudAssetProjectFeedName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Restore the original value of user_project_override. - config.UserProjectOverride = origUserProjectOverride - - log.Printf("[DEBUG] Finished creating ProjectFeed %q: %#v", d.Id(), res) - - return resourceCloudAssetProjectFeedRead(d, meta) -} - -func resourceCloudAssetProjectFeedRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectFeed: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudAssetProjectFeed %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ProjectFeed: %s", err) - } - - if err := d.Set("name", flattenCloudAssetProjectFeedName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("asset_names", flattenCloudAssetProjectFeedAssetNames(res["assetNames"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("asset_types", flattenCloudAssetProjectFeedAssetTypes(res["assetTypes"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("content_type", flattenCloudAssetProjectFeedContentType(res["contentType"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("feed_output_config", flattenCloudAssetProjectFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("condition", flattenCloudAssetProjectFeedCondition(res["condition"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectFeed: %s", err) - } - - return nil -} - -func resourceCloudAssetProjectFeedUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectFeed: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetProjectFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetProjectFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetProjectFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetProjectFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetProjectFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetProjectFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ProjectFeed %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("asset_names") { - updateMask = append(updateMask, "assetNames") - } - - if d.HasChange("asset_types") { - updateMask = append(updateMask, "assetTypes") - } - - if d.HasChange("content_type") { - updateMask = append(updateMask, "contentType") - } - - if d.HasChange("feed_output_config") { - updateMask = append(updateMask, "feedOutputConfig") - } - - if d.HasChange("condition") { - updateMask = append(updateMask, "condition") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ProjectFeed %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ProjectFeed %q: %#v", d.Id(), res) - } - - return resourceCloudAssetProjectFeedRead(d, meta) -} - -func resourceCloudAssetProjectFeedDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectFeed: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ProjectFeed %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ProjectFeed") - } - - log.Printf("[DEBUG] Finished deleting ProjectFeed %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudAssetProjectFeedImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - if err := d.Set("name", d.Id()); err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil -} - -func flattenCloudAssetProjectFeedName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedAssetNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedAssetTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedContentType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedFeedOutputConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_destination"] = - flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestination(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic"] = - flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenCloudAssetProjectFeedConditionExpression(original["expression"], d, config) - transformed["title"] = - flattenCloudAssetProjectFeedConditionTitle(original["title"], d, config) - transformed["description"] = - flattenCloudAssetProjectFeedConditionDescription(original["description"], d, config) - transformed["location"] = - flattenCloudAssetProjectFeedConditionLocation(original["location"], d, config) - return []interface{}{transformed} -} -func flattenCloudAssetProjectFeedConditionExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedConditionTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedConditionDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedConditionLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudAssetProjectFeedAssetNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedAssetTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedContentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedFeedOutputConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubDestination, err := expandCloudAssetProjectFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubDestination"] = transformedPubsubDestination - } - - return transformed, nil -} - -func expandCloudAssetProjectFeedFeedOutputConfigPubsubDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopic, err := expandCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - return transformed, nil -} - -func expandCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandCloudAssetProjectFeedConditionExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandCloudAssetProjectFeedConditionTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandCloudAssetProjectFeedConditionDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandCloudAssetProjectFeedConditionLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandCloudAssetProjectFeedConditionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedConditionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedConditionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedConditionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudAssetProjectFeedEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Remove the "folders/" prefix from the folder ID - if folder, ok := d.GetOkExists("folder"); ok { - if err := d.Set("folder_id", strings.TrimPrefix(folder.(string), "folders/")); err != nil { - return nil, fmt.Errorf("Error setting folder_id: %s", err) - } - } - // The feed object must be under the "feed" attribute on the request. - newObj := make(map[string]interface{}) - newObj["feed"] = obj - return newObj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_identity_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_identity_group.go deleted file mode 100644 index 13b5d0a1cf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_identity_group.go +++ /dev/null @@ -1,565 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudIdentityGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudIdentityGroupCreate, - Read: resourceCloudIdentityGroupRead, - Update: resourceCloudIdentityGroupUpdate, - Delete: resourceCloudIdentityGroupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudIdentityGroupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "group_key": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `EntityKey of the Group.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the entity. - -For Google-managed entities, the id must be the email address of an existing -group or user. - -For external-identity-mapped entities, the id must be a string conforming -to the Identity Source's requirements. - -Must be unique within a namespace.`, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The namespace in which the entity exists. - -If not specified, the EntityKey represents a Google-managed entity -such as a Google user or a Google Group. - -If specified, the EntityKey represents an external-identity-mapped group. -The namespace must correspond to an identity source created in Admin Console -and must be in the form of 'identitysources/{identity_source_id}'.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Required: true, - Description: `One or more label entries that apply to the Group. Currently supported labels contain a key with an empty value. - -Google Groups are the default type of group and have a label with a key of cloudidentity.googleapis.com/groups.discussion_forum and an empty value. - -Existing Google Groups can have an additional label with a key of cloudidentity.googleapis.com/groups.security and an empty value added to them. This is an immutable change and the security label cannot be removed once added. - -Dynamic groups have a label with a key of cloudidentity.googleapis.com/groups.dynamic. - -Identity-mapped groups for Cloud Search have a label with a key of system/groups/external and an empty value.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the entity under which this Group resides in the -Cloud Identity resource hierarchy. - -Must be of the form identitysources/{identity_source_id} for external-identity-mapped -groups or customers/{customer_id} for Google Groups.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An extended description to help users determine the purpose of a Group. -Must not be longer than 4,096 characters.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `The display name of the Group.`, - }, - "initial_group_config": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"INITIAL_GROUP_CONFIG_UNSPECIFIED", "WITH_INITIAL_OWNER", "EMPTY", ""}), - Description: `The initial configuration options for creating a Group. - -See the -[API reference](https://cloud.google.com/identity/docs/reference/rest/v1beta1/groups/create#initialgroupconfig) -for possible values. Default value: "EMPTY" Possible values: ["INITIAL_GROUP_CONFIG_UNSPECIFIED", "WITH_INITIAL_OWNER", "EMPTY"]`, - Default: "EMPTY", - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the Group was created.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Resource name of the Group in the format: groups/{group_id}, where group_id -is the unique ID assigned to the Group.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the Group was last updated.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudIdentityGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - groupKeyProp, err := expandCloudIdentityGroupGroupKey(d.Get("group_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("group_key"); !isEmptyValue(reflect.ValueOf(groupKeyProp)) && (ok || !reflect.DeepEqual(v, groupKeyProp)) { - obj["groupKey"] = groupKeyProp - } - parentProp, err := expandCloudIdentityGroupParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - displayNameProp, err := expandCloudIdentityGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandCloudIdentityGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCloudIdentityGroupLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}groups?initialGroupConfig={{initial_group_config}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Group: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Group: %s", err) - } - if err := d.Set("name", flattenCloudIdentityGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - err = PollingWaitTime(resourceCloudIdentityGroupPollRead(d, meta), PollCheckForExistenceWith403, "Creating Group", d.Timeout(schema.TimeoutCreate), 10) - if err != nil { - return fmt.Errorf("Error waiting to create Group: %s", err) - } - - log.Printf("[DEBUG] Finished creating Group %q: %#v", d.Id(), res) - - return resourceCloudIdentityGroupRead(d, meta) -} - -func resourceCloudIdentityGroupPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceCloudIdentityGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroup %q", d.Id())) - } - - if err := d.Set("name", flattenCloudIdentityGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("group_key", flattenCloudIdentityGroupGroupKey(res["groupKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("parent", flattenCloudIdentityGroupParent(res["parent"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("display_name", flattenCloudIdentityGroupDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("description", flattenCloudIdentityGroupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("create_time", flattenCloudIdentityGroupCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("update_time", flattenCloudIdentityGroupUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("labels", flattenCloudIdentityGroupLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - - return nil -} - -func resourceCloudIdentityGroupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandCloudIdentityGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandCloudIdentityGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCloudIdentityGroupLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Group %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Group %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Group %q: %#v", d.Id(), res) - } - - err = PollingWaitTime(resourceCloudIdentityGroupPollRead(d, meta), PollCheckForExistenceWith403, "Updating Group", d.Timeout(schema.TimeoutUpdate), 10) - if err != nil { - return err - } - - return resourceCloudIdentityGroupRead(d, meta) -} - -func resourceCloudIdentityGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Group %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Group") - } - - err = PollingWaitTime(resourceCloudIdentityGroupPollRead(d, meta), PollCheckForAbsenceWith403, "Deleting Group", d.Timeout(schema.TimeoutCreate), 10) - if err != nil { - return fmt.Errorf("Error waiting to delete Group: %s", err) - } - - log.Printf("[DEBUG] Finished deleting Group %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIdentityGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - - if d.Get("initial_group_config") == nil { - d.Set("initial_group_config", "EMPTY") - } - - if err := d.Set("name", name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - return []*schema.ResourceData{d}, nil -} - -func flattenCloudIdentityGroupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupGroupKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["id"] = - flattenCloudIdentityGroupGroupKeyId(original["id"], d, config) - transformed["namespace"] = - flattenCloudIdentityGroupGroupKeyNamespace(original["namespace"], d, config) - return []interface{}{transformed} -} -func flattenCloudIdentityGroupGroupKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupGroupKeyNamespace(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupParent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIdentityGroupGroupKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandCloudIdentityGroupGroupKeyId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedNamespace, err := expandCloudIdentityGroupGroupKeyNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - return transformed, nil -} - -func expandCloudIdentityGroupGroupKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupGroupKeyNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_identity_group_membership.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_identity_group_membership.go deleted file mode 100644 index cf31c721ac..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_identity_group_membership.go +++ /dev/null @@ -1,496 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudIdentityGroupMembership() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudIdentityGroupMembershipCreate, - Read: resourceCloudIdentityGroupMembershipRead, - Update: resourceCloudIdentityGroupMembershipUpdate, - Delete: resourceCloudIdentityGroupMembershipDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudIdentityGroupMembershipImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "group": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Group to create this membership in.`, - }, - "roles": { - Type: schema.TypeSet, - Required: true, - Description: `The MembershipRoles that apply to the Membership. -Must not contain duplicate MembershipRoles with the same name.`, - Elem: cloudidentityGroupMembershipRolesSchema(), - // Default schema.HashSchema is used. - }, - "preferred_member_key": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `EntityKey of the member.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the entity. - -For Google-managed entities, the id must be the email address of an existing -group or user. - -For external-identity-mapped entities, the id must be a string conforming -to the Identity Source's requirements. - -Must be unique within a namespace.`, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The namespace in which the entity exists. - -If not specified, the EntityKey represents a Google-managed entity -such as a Google user or a Google Group. - -If specified, the EntityKey represents an external-identity-mapped group. -The namespace must correspond to an identity source created in Admin Console -and must be in the form of 'identitysources/{identity_source_id}'.`, - }, - }, - }, - ExactlyOneOf: []string{"preferred_member_key"}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the Membership was created.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the Membership, of the form groups/{group_id}/memberships/{membership_id}.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: `The type of the membership.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the Membership was last updated.`, - }, - }, - UseJSONNumber: true, - } -} - -func cloudidentityGroupMembershipRolesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"OWNER", "MANAGER", "MEMBER"}), - Description: `The name of the MembershipRole. Must be one of OWNER, MANAGER, MEMBER. Possible values: ["OWNER", "MANAGER", "MEMBER"]`, - }, - }, - } -} - -func resourceCloudIdentityGroupMembershipCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - preferredMemberKeyProp, err := expandCloudIdentityGroupMembershipPreferredMemberKey(d.Get("preferred_member_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preferred_member_key"); !isEmptyValue(reflect.ValueOf(preferredMemberKeyProp)) && (ok || !reflect.DeepEqual(v, preferredMemberKeyProp)) { - obj["preferredMemberKey"] = preferredMemberKeyProp - } - rolesProp, err := expandCloudIdentityGroupMembershipRoles(d.Get("roles"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("roles"); !isEmptyValue(reflect.ValueOf(rolesProp)) && (ok || !reflect.DeepEqual(v, rolesProp)) { - obj["roles"] = rolesProp - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{group}}/memberships") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new GroupMembership: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating GroupMembership: %s", err) - } - if err := d.Set("name", flattenCloudIdentityGroupMembershipName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating GroupMembership %q: %#v", d.Id(), res) - - return resourceCloudIdentityGroupMembershipRead(d, meta) -} - -func resourceCloudIdentityGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(transformCloudIdentityGroupMembershipReadError(err), d, fmt.Sprintf("CloudIdentityGroupMembership %q", d.Id())) - } - - if err := d.Set("name", flattenCloudIdentityGroupMembershipName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("preferred_member_key", flattenCloudIdentityGroupMembershipPreferredMemberKey(res["preferredMemberKey"], d, config)); err != nil { - return fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("create_time", flattenCloudIdentityGroupMembershipCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("update_time", flattenCloudIdentityGroupMembershipUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("roles", flattenCloudIdentityGroupMembershipRoles(res["roles"], d, config)); err != nil { - return fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("type", flattenCloudIdentityGroupMembershipType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading GroupMembership: %s", err) - } - - return nil -} - -func resourceCloudIdentityGroupMembershipUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - d.Partial(true) - - if d.HasChange("roles") { - obj := make(map[string]interface{}) - - rolesProp, err := expandCloudIdentityGroupMembershipRoles(d.Get("roles"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("roles"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rolesProp)) { - obj["roles"] = rolesProp - } - - obj, err = resourceCloudIdentityGroupMembershipUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}:modifyMembershipRoles") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating GroupMembership %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating GroupMembership %q: %#v", d.Id(), res) - } - - } - - d.Partial(false) - - return resourceCloudIdentityGroupMembershipRead(d, meta) -} - -func resourceCloudIdentityGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GroupMembership %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GroupMembership") - } - - log.Printf("[DEBUG] Finished deleting GroupMembership %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIdentityGroupMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Configure "group" property, which does not appear in the response body. - group := regexp.MustCompile(`groups/[^/]+`).FindString(id) - if err := d.Set("group", group); err != nil { - return nil, fmt.Errorf("Error setting group property: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudIdentityGroupMembershipName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipPreferredMemberKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["id"] = - flattenCloudIdentityGroupMembershipPreferredMemberKeyId(original["id"], d, config) - transformed["namespace"] = - flattenCloudIdentityGroupMembershipPreferredMemberKeyNamespace(original["namespace"], d, config) - return []interface{}{transformed} -} -func flattenCloudIdentityGroupMembershipPreferredMemberKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipPreferredMemberKeyNamespace(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipRoles(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(cloudidentityGroupMembershipRolesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "name": flattenCloudIdentityGroupMembershipRolesName(original["name"], d, config), - }) - } - return transformed -} -func flattenCloudIdentityGroupMembershipRolesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIdentityGroupMembershipPreferredMemberKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandCloudIdentityGroupMembershipPreferredMemberKeyId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedNamespace, err := expandCloudIdentityGroupMembershipPreferredMemberKeyNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - return transformed, nil -} - -func expandCloudIdentityGroupMembershipPreferredMemberKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupMembershipPreferredMemberKeyNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupMembershipRoles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudIdentityGroupMembershipRolesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudIdentityGroupMembershipRolesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudIdentityGroupMembershipUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Return object for modifyMembershipRoles (we build request object from scratch, without using `obj`) - b, a := d.GetChange("roles") - before := b.(*schema.Set) - after := a.(*schema.Set) - // ref: https://cloud.google.com/identity/docs/reference/rest/v1/groups.memberships/modifyMembershipRoles#request-body - addRoles := after.Difference(before).List() - var removeRoles []string - for _, r := range before.Difference(after).List() { - removeRoles = append(removeRoles, r.(map[string]interface{})["name"].(string)) - } - req := map[string]interface{}{"addRoles": addRoles, "removeRoles": removeRoles} - return req, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_ids_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_ids_endpoint.go deleted file mode 100644 index 4be8c490d1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_ids_endpoint.go +++ /dev/null @@ -1,464 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudIdsEndpoint() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudIdsEndpointCreate, - Read: resourceCloudIdsEndpointRead, - Update: resourceCloudIdsEndpointUpdate, - Delete: resourceCloudIdsEndpointDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudIdsEndpointImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location for the endpoint.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the endpoint in the format projects/{project_id}/locations/{locationId}/endpoints/{endpointId}.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the VPC network that is connected to the IDS endpoint. This can either contain the VPC network name itself (like "src-net") or the full URL to the network (like "projects/{project_id}/global/networks/src-net").`, - }, - "severity": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"INFORMATIONAL", "LOW", "MEDIUM", "HIGH", "CRITICAL"}), - Description: `The minimum alert severity level that is reported by the endpoint. Possible values: ["INFORMATIONAL", "LOW", "MEDIUM", "HIGH", "CRITICAL"]`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of the endpoint.`, - }, - "threat_exceptions": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for threat IDs excluded from generating alerts. Limit: 99 IDs.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC 3339 text format.`, - }, - "endpoint_forwarding_rule": { - Type: schema.TypeString, - Computed: true, - Description: `URL of the endpoint's network address to which traffic is to be sent by Packet Mirroring.`, - }, - "endpoint_ip": { - Type: schema.TypeString, - Computed: true, - Description: `Internal IP address of the endpoint's network entry point.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Last update timestamp in RFC 3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudIdsEndpointCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandCloudIdsEndpointName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandCloudIdsEndpointNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - descriptionProp, err := expandCloudIdsEndpointDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - severityProp, err := expandCloudIdsEndpointSeverity(d.Get("severity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("severity"); !isEmptyValue(reflect.ValueOf(severityProp)) && (ok || !reflect.DeepEqual(v, severityProp)) { - obj["severity"] = severityProp - } - threatExceptionsProp, err := expandCloudIdsEndpointThreatExceptions(d.Get("threat_exceptions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("threat_exceptions"); !isEmptyValue(reflect.ValueOf(threatExceptionsProp)) && (ok || !reflect.DeepEqual(v, threatExceptionsProp)) { - obj["threatExceptions"] = threatExceptionsProp - } - - url, err := replaceVars(d, config, "{{CloudIdsBasePath}}projects/{{project}}/locations/{{location}}/endpoints?endpointId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Endpoint: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Endpoint: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Endpoint: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = CloudIdsOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Endpoint", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Endpoint: %s", err) - } - - if err := d.Set("name", flattenCloudIdsEndpointName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Endpoint %q: %#v", d.Id(), res) - - return resourceCloudIdsEndpointRead(d, meta) -} - -func resourceCloudIdsEndpointRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIdsBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Endpoint: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudIdsEndpoint %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - - if err := d.Set("name", flattenCloudIdsEndpointName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - if err := d.Set("create_time", flattenCloudIdsEndpointCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - if err := d.Set("update_time", flattenCloudIdsEndpointUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - if err := d.Set("network", flattenCloudIdsEndpointNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - if err := d.Set("description", flattenCloudIdsEndpointDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - if err := d.Set("endpoint_forwarding_rule", flattenCloudIdsEndpointEndpointForwardingRule(res["endpointForwardingRule"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - if err := d.Set("endpoint_ip", flattenCloudIdsEndpointEndpointIp(res["endpointIp"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - if err := d.Set("severity", flattenCloudIdsEndpointSeverity(res["severity"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - if err := d.Set("threat_exceptions", flattenCloudIdsEndpointThreatExceptions(res["threatExceptions"], d, config)); err != nil { - return fmt.Errorf("Error reading Endpoint: %s", err) - } - - return nil -} - -func resourceCloudIdsEndpointUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Endpoint: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - threatExceptionsProp, err := expandCloudIdsEndpointThreatExceptions(d.Get("threat_exceptions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("threat_exceptions"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, threatExceptionsProp)) { - obj["threatExceptions"] = threatExceptionsProp - } - - url, err := replaceVars(d, config, "{{CloudIdsBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Endpoint %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("threat_exceptions") { - updateMask = append(updateMask, "threatExceptions") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Endpoint %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Endpoint %q: %#v", d.Id(), res) - } - - err = CloudIdsOperationWaitTime( - config, res, project, "Updating Endpoint", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceCloudIdsEndpointRead(d, meta) -} - -func resourceCloudIdsEndpointDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Endpoint: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudIdsBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Endpoint %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Endpoint") - } - - err = CloudIdsOperationWaitTime( - config, res, project, "Deleting Endpoint", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Endpoint %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIdsEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/endpoints/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudIdsEndpointName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - parts := strings.Split(d.Get("name").(string), "/") - return parts[len(parts)-1] -} - -func flattenCloudIdsEndpointCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdsEndpointUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdsEndpointNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdsEndpointDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdsEndpointEndpointForwardingRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdsEndpointEndpointIp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdsEndpointSeverity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdsEndpointThreatExceptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIdsEndpointName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") -} - -func expandCloudIdsEndpointNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdsEndpointDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdsEndpointSeverity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdsEndpointThreatExceptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_domain_mapping.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_domain_mapping.go deleted file mode 100644 index d214928aad..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_domain_mapping.go +++ /dev/null @@ -1,878 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var domainMappingGoogleProvidedLabels = []string{ - "cloud.googleapis.com/location", - "run.googleapis.com/overrideAt", -} - -func domainMappingLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // Suppress diffs for the labels provided by Google - for _, label := range domainMappingGoogleProvidedLabels { - if strings.Contains(k, label) && new == "" { - return true - } - } - - // Let diff be determined by labels (above) - if strings.Contains(k, "labels.%") { - return true - } - - // For other keys, don't suppress diff. - return false -} - -func ResourceCloudRunDomainMapping() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudRunDomainMappingCreate, - Read: resourceCloudRunDomainMappingRead, - Delete: resourceCloudRunDomainMappingDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudRunDomainMappingImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the cloud run instance. eg us-central1`, - }, - "metadata": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `Metadata associated with this DomainMapping.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "namespace": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `In Cloud Run the namespace must be equal to either the -project ID or project number.`, - }, - "annotations": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: cloudrunAnnotationDiffSuppress, - Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: http://kubernetes.io/docs/user-guide/annotations - -**Note**: The Cloud Run API may add additional annotations that were not provided in your config. -If terraform plan shows a diff where a server-side annotation is added, you can add it to your config -or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "labels": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: domainMappingLabelDiffSuppress, - Description: `Map of string keys and values that can be used to organize and categorize -(scope and select) objects. May match selectors of replication controllers -and routes. -More info: http://kubernetes.io/docs/user-guide/labels`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "generation": { - Type: schema.TypeInt, - Computed: true, - Description: `A sequence number representing a specific generation of the desired state.`, - }, - "resource_version": { - Type: schema.TypeString, - Computed: true, - Description: `An opaque value that represents the internal version of this object that -can be used by clients to determine when objects have changed. May be used -for optimistic concurrency, change detection, and the watch operation on a -resource or set of resources. They may only be valid for a -particular resource or set of resources. - -More info: -https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency`, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `SelfLink is a URL representing this object.`, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `UID is a unique id generated by the server on successful creation of a resource and is not -allowed to change on PUT operations. - -More info: http://kubernetes.io/docs/user-guide/identifiers#uids`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name should be a [verified](https://support.google.com/webmasters/answer/9008080) domain`, - }, - "spec": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `The spec for this DomainMapping.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "route_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Cloud Run Service that this DomainMapping applies to. -The route must exist.`, - }, - "certificate_mode": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"NONE", "AUTOMATIC", ""}), - Description: `The mode of the certificate. Default value: "AUTOMATIC" Possible values: ["NONE", "AUTOMATIC"]`, - Default: "AUTOMATIC", - }, - "force_override": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If set, the mapping will override any mapping set before this spec was set. -It is recommended that the user leaves this empty to receive an error -warning about a potential conflict and only set it once the respective UI -has given such a warning.`, - }, - }, - }, - }, - "status": { - Type: schema.TypeList, - Computed: true, - Description: `The current status of the DomainMapping.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "resource_records": { - Type: schema.TypeList, - Optional: true, - Description: `The resource records required to configure this domain mapping. These -records must be added to the domain's DNS configuration in order to -serve the application via this domain mapping.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"A", "AAAA", "CNAME", ""}), - Description: `Resource record type. Example: 'AAAA'. Possible values: ["A", "AAAA", "CNAME"]`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Relative name of the object affected by this record. Only applicable for -'CNAME' records. Example: 'www'.`, - }, - "rrdata": { - Type: schema.TypeString, - Computed: true, - Description: `Data for this record. Values vary by record type, as defined in RFC 1035 -(section 5) and RFC 1034 (section 3.6.1).`, - }, - }, - }, - }, - "conditions": { - Type: schema.TypeList, - Computed: true, - Description: `Array of observed DomainMappingConditions, indicating the current state -of the DomainMapping.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "message": { - Type: schema.TypeString, - Computed: true, - Description: `Human readable message indicating details about the current status.`, - }, - "reason": { - Type: schema.TypeString, - Computed: true, - Description: `One-word CamelCase reason for the condition's current status.`, - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: `Status of the condition, one of True, False, Unknown.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: `Type of domain mapping condition.`, - }, - }, - }, - }, - "mapped_route_name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the route that the mapping currently points to.`, - }, - "observed_generation": { - Type: schema.TypeInt, - Computed: true, - Description: `ObservedGeneration is the 'Generation' of the DomainMapping that -was last processed by the controller.`, - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudRunDomainMappingCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - specProp, err := expandCloudRunDomainMappingSpec(d.Get("spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("spec"); !isEmptyValue(reflect.ValueOf(specProp)) && (ok || !reflect.DeepEqual(v, specProp)) { - obj["spec"] = specProp - } - metadataProp, err := expandCloudRunDomainMappingMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - - obj, err = resourceCloudRunDomainMappingEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DomainMapping: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isCloudRunCreationConflict) - if err != nil { - return fmt.Errorf("Error creating DomainMapping: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceCloudRunDomainMappingPollRead(d, meta), PollCheckKnativeStatusFunc(res), "Creating DomainMapping", d.Timeout(schema.TimeoutCreate), 1) - if err != nil { - return fmt.Errorf("Error waiting to create DomainMapping: %s", err) - } - - log.Printf("[DEBUG] Finished creating DomainMapping %q: %#v", d.Id(), res) - - return resourceCloudRunDomainMappingRead(d, meta) -} - -func resourceCloudRunDomainMappingPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isCloudRunCreationConflict) - if err != nil { - return res, err - } - res, err = resourceCloudRunDomainMappingDecoder(d, meta, res) - if err != nil { - return nil, err - } - if res == nil { - return nil, fake404("decoded", "CloudRunDomainMapping") - } - - return res, nil - } -} - -func resourceCloudRunDomainMappingRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isCloudRunCreationConflict) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudRunDomainMapping %q", d.Id())) - } - - res, err = resourceCloudRunDomainMappingDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing CloudRunDomainMapping because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - - if err := d.Set("status", flattenCloudRunDomainMappingStatus(res["status"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("spec", flattenCloudRunDomainMappingSpec(res["spec"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("metadata", flattenCloudRunDomainMappingMetadata(res["metadata"], d, config)); err != nil { - return fmt.Errorf("Error reading DomainMapping: %s", err) - } - - return nil -} - -func resourceCloudRunDomainMappingDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DomainMapping %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isCloudRunCreationConflict) - if err != nil { - return handleNotFoundError(err, d, "DomainMapping") - } - - log.Printf("[DEBUG] Finished deleting DomainMapping %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudRunDomainMappingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/(?P[^/]+)/namespaces/(?P[^/]+)/domainmappings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudRunDomainMappingStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["conditions"] = - flattenCloudRunDomainMappingStatusConditions(original["conditions"], d, config) - transformed["observed_generation"] = - flattenCloudRunDomainMappingStatusObservedGeneration(original["observedGeneration"], d, config) - transformed["resource_records"] = - flattenCloudRunDomainMappingStatusResourceRecords(original["resourceRecords"], d, config) - transformed["mapped_route_name"] = - flattenCloudRunDomainMappingStatusMappedRouteName(original["mappedRouteName"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunDomainMappingStatusConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "message": flattenCloudRunDomainMappingStatusConditionsMessage(original["message"], d, config), - "status": flattenCloudRunDomainMappingStatusConditionsStatus(original["status"], d, config), - "reason": flattenCloudRunDomainMappingStatusConditionsReason(original["reason"], d, config), - "type": flattenCloudRunDomainMappingStatusConditionsType(original["type"], d, config), - }) - } - return transformed -} -func flattenCloudRunDomainMappingStatusConditionsMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusConditionsStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusConditionsReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusConditionsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusObservedGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunDomainMappingStatusResourceRecords(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "type": flattenCloudRunDomainMappingStatusResourceRecordsType(original["type"], d, config), - "rrdata": flattenCloudRunDomainMappingStatusResourceRecordsRrdata(original["rrdata"], d, config), - "name": flattenCloudRunDomainMappingStatusResourceRecordsName(original["name"], d, config), - }) - } - return transformed -} -func flattenCloudRunDomainMappingStatusResourceRecordsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusResourceRecordsRrdata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusResourceRecordsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusMappedRouteName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["force_override"] = - flattenCloudRunDomainMappingSpecForceOverride(original["forceOverride"], d, config) - transformed["route_name"] = - flattenCloudRunDomainMappingSpecRouteName(original["routeName"], d, config) - transformed["certificate_mode"] = - flattenCloudRunDomainMappingSpecCertificateMode(original["certificateMode"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunDomainMappingSpecForceOverride(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // We want to ignore read on this field, but cannot because it is nested - return d.Get("spec.0.force_override") -} - -func flattenCloudRunDomainMappingSpecRouteName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingSpecCertificateMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["labels"] = - flattenCloudRunDomainMappingMetadataLabels(original["labels"], d, config) - transformed["generation"] = - flattenCloudRunDomainMappingMetadataGeneration(original["generation"], d, config) - transformed["resource_version"] = - flattenCloudRunDomainMappingMetadataResourceVersion(original["resourceVersion"], d, config) - transformed["self_link"] = - flattenCloudRunDomainMappingMetadataSelfLink(original["selfLink"], d, config) - transformed["uid"] = - flattenCloudRunDomainMappingMetadataUid(original["uid"], d, config) - transformed["namespace"] = - flattenCloudRunDomainMappingMetadataNamespace(original["namespace"], d, config) - transformed["annotations"] = - flattenCloudRunDomainMappingMetadataAnnotations(original["annotations"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunDomainMappingMetadataLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadataGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunDomainMappingMetadataResourceVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadataSelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadataUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadataNamespace(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return d.Get("project") -} - -func flattenCloudRunDomainMappingMetadataAnnotations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudRunDomainMappingSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedForceOverride, err := expandCloudRunDomainMappingSpecForceOverride(original["force_override"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedForceOverride); val.IsValid() && !isEmptyValue(val) { - transformed["forceOverride"] = transformedForceOverride - } - - transformedRouteName, err := expandCloudRunDomainMappingSpecRouteName(original["route_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRouteName); val.IsValid() && !isEmptyValue(val) { - transformed["routeName"] = transformedRouteName - } - - transformedCertificateMode, err := expandCloudRunDomainMappingSpecCertificateMode(original["certificate_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCertificateMode); val.IsValid() && !isEmptyValue(val) { - transformed["certificateMode"] = transformedCertificateMode - } - - return transformed, nil -} - -func expandCloudRunDomainMappingSpecForceOverride(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingSpecRouteName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandCloudRunDomainMappingSpecCertificateMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandCloudRunDomainMappingMetadataLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedGeneration, err := expandCloudRunDomainMappingMetadataGeneration(original["generation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { - transformed["generation"] = transformedGeneration - } - - transformedResourceVersion, err := expandCloudRunDomainMappingMetadataResourceVersion(original["resource_version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResourceVersion); val.IsValid() && !isEmptyValue(val) { - transformed["resourceVersion"] = transformedResourceVersion - } - - transformedSelfLink, err := expandCloudRunDomainMappingMetadataSelfLink(original["self_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["selfLink"] = transformedSelfLink - } - - transformedUid, err := expandCloudRunDomainMappingMetadataUid(original["uid"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUid); val.IsValid() && !isEmptyValue(val) { - transformed["uid"] = transformedUid - } - - transformedNamespace, err := expandCloudRunDomainMappingMetadataNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - transformedAnnotations, err := expandCloudRunDomainMappingMetadataAnnotations(original["annotations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !isEmptyValue(val) { - transformed["annotations"] = transformedAnnotations - } - - return transformed, nil -} - -func expandCloudRunDomainMappingMetadataLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunDomainMappingMetadataGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataResourceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataSelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataUid(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceCloudRunDomainMappingEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - name := d.Get("name").(string) - metadata := obj["metadata"].(map[string]interface{}) - metadata["name"] = name - - // The only acceptable version/kind right now - obj["apiVersion"] = "domains.cloudrun.com/v1" - obj["kind"] = "DomainMapping" - return obj, nil -} - -func resourceCloudRunDomainMappingDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // metadata is not present if the API returns an error - if obj, ok := res["metadata"]; ok { - if meta, ok := obj.(map[string]interface{}); ok { - res["name"] = meta["name"] - } else { - return nil, fmt.Errorf("Unable to decode 'metadata' block from API response.") - } - } - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_service.go deleted file mode 100644 index 5c572f6369..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_service.go +++ /dev/null @@ -1,2988 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/googleapi" -) - -func revisionNameCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - autogen := diff.Get("autogenerate_revision_name").(bool) - if autogen && diff.HasChange("template.0.metadata.0.name") { - return fmt.Errorf("google_cloud_run_service: `template.metadata.name` cannot be set while `autogenerate_revision_name` is true. Please remove the field or set `autogenerate_revision_name` to false.") - } - - return nil -} - -var cloudRunGoogleProvidedAnnotations = regexp.MustCompile(`serving\.knative\.dev/(?:(?:creator)|(?:lastModifier))$|run\.googleapis\.com/(?:(?:ingress-status))$|cloud\.googleapis\.com/(?:(?:location))`) - -func cloudrunAnnotationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // Suppress diffs for the annotations provided by Google - if cloudRunGoogleProvidedAnnotations.MatchString(k) && new == "" { - return true - } - - // Let diff be determined by annotations (above) - if strings.Contains(k, "annotations.%") { - return true - } - - // For other keys, don't suppress diff. - return false -} - -var cloudRunGoogleProvidedTemplateAnnotations = regexp.MustCompile(`template\.0\.metadata\.0\.annotations\.run\.googleapis\.com/sandbox`) -var cloudRunGoogleProvidedTemplateAnnotations_autoscaling_maxscale = regexp.MustCompile(`template\.0\.metadata\.0\.annotations\.autoscaling\.knative\.dev/maxScale`) - -func cloudrunTemplateAnnotationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // Suppress diffs for the annotations provided by API - if cloudRunGoogleProvidedTemplateAnnotations.MatchString(k) && - old == "gvisor" && new == "" { - return true - } - - if cloudRunGoogleProvidedTemplateAnnotations_autoscaling_maxscale.MatchString(k) && new == "" { - return true - } - - // For other keys, don't suppress diff. - return false -} - -var cloudRunGoogleProvidedLabels = regexp.MustCompile(`cloud\.googleapis\.com/(?:(?:location))`) - -func cloudrunLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // Suppress diffs for the labels provided by Google - if cloudRunGoogleProvidedLabels.MatchString(k) && new == "" { - return true - } - - // Let diff be determined by labels (above) - if strings.Contains(k, "labels.%") { - return true - } - - // For other keys, don't suppress diff. - return false -} - -func ResourceCloudRunService() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudRunServiceCreate, - Read: resourceCloudRunServiceRead, - Update: resourceCloudRunServiceUpdate, - Delete: resourceCloudRunServiceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudRunServiceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - SchemaVersion: 1, - CustomizeDiff: revisionNameCustomizeDiff, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the cloud run instance. eg us-central1`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name must be unique within a namespace, within a Cloud Run region. -Is required when creating resources. Name is primarily intended -for creation idempotence and configuration definition. Cannot be updated. -More info: http://kubernetes.io/docs/user-guide/identifiers#names`, - }, - "template": { - Type: schema.TypeList, - Optional: true, - Description: `template holds the latest specification for the Revision to -be stamped out. The template references the container image, and may also -include labels and annotations that should be attached to the Revision. -To correlate a Revision, and/or to force a Revision to be created when the -spec doesn't otherwise change, a nonce label may be provided in the -template metadata. For more details, see: -https://github.com/knative/serving/blob/main/docs/client-conventions.md#associate-modifications-with-revisions - -Cloud Run does not currently support referencing a build that is -responsible for materializing the container image from source.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "spec": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `RevisionSpec holds the desired state of the Revision (from the client).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "containers": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Container defines the unit of execution for this Revision. -In the context of a Revision, we disallow a number of the fields of -this Container, including: name, ports, and volumeMounts. -The runtime contract is documented here: -https://github.com/knative/serving/blob/main/docs/runtime-contract.md`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "image": { - Type: schema.TypeString, - Required: true, - Description: `Docker image name. This is most often a reference to a container located -in the container registry, such as gcr.io/cloudrun/hello -More info: https://kubernetes.io/docs/concepts/containers/images`, - }, - "args": { - Type: schema.TypeList, - Optional: true, - Description: `Arguments to the entrypoint. -The docker image's CMD is used if this is not provided. -Variable references $(VAR_NAME) are expanded using the container's -environment. If a variable cannot be resolved, the reference in the input -string will be unchanged. The $(VAR_NAME) syntax can be escaped with a -double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, -regardless of whether the variable exists or not. -More info: -https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "command": { - Type: schema.TypeList, - Optional: true, - Description: `Entrypoint array. Not executed within a shell. -The docker image's ENTRYPOINT is used if this is not provided. -Variable references $(VAR_NAME) are expanded using the container's -environment. If a variable cannot be resolved, the reference in the input -string will be unchanged. The $(VAR_NAME) syntax can be escaped with a -double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, -regardless of whether the variable exists or not. -More info: -https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "env": { - Type: schema.TypeSet, - Optional: true, - Description: `List of environment variables to set in the container.`, - Elem: cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema(), - // Default schema.HashSchema is used. - }, - "env_from": { - Type: schema.TypeList, - Optional: true, - Deprecated: "Not supported by Cloud Run fully managed", - ForceNew: true, - Description: `List of sources to populate environment variables in the container. -All invalid keys will be reported as an event when the container is starting. -When a key exists in multiple sources, the value associated with the last source will -take precedence. Values defined by an Env with a duplicate key will take -precedence.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "config_map_ref": { - Type: schema.TypeList, - Optional: true, - Description: `The ConfigMap to select from.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "local_object_reference": { - Type: schema.TypeList, - Optional: true, - Description: `The ConfigMap to select from.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the referent. -More info: -https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names`, - }, - }, - }, - }, - "optional": { - Type: schema.TypeBool, - Optional: true, - Description: `Specify whether the ConfigMap must be defined`, - }, - }, - }, - }, - "prefix": { - Type: schema.TypeString, - Optional: true, - Description: `An optional identifier to prepend to each key in the ConfigMap.`, - }, - "secret_ref": { - Type: schema.TypeList, - Optional: true, - Description: `The Secret to select from.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "local_object_reference": { - Type: schema.TypeList, - Optional: true, - Description: `The Secret to select from.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the referent. -More info: -https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names`, - }, - }, - }, - }, - "optional": { - Type: schema.TypeBool, - Optional: true, - Description: `Specify whether the Secret must be defined`, - }, - }, - }, - }, - }, - }, - }, - "ports": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `List of open ports in the container. -More Info: -https://cloud.google.com/run/docs/reference/rest/v1/RevisionSpec#ContainerPort`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "container_port": { - Type: schema.TypeInt, - Optional: true, - Description: `Port number the container listens on. This must be a valid port number (between 1 and 65535). Defaults to "8080".`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `If specified, used to specify which protocol to use. Allowed values are "http1" (HTTP/1) and "h2c" (HTTP/2 end-to-end). Defaults to "http1".`, - }, - "protocol": { - Type: schema.TypeString, - Optional: true, - Description: `Protocol for port. Must be "TCP". Defaults to "TCP".`, - }, - }, - }, - }, - "resources": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Compute Resources required by this container. Used to set values such as max memory -More info: -https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "limits": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - Description: `Limits describes the maximum amount of compute resources allowed. -The values of the map is string form of the 'quantity' k8s type: -https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "requests": { - Type: schema.TypeMap, - Optional: true, - Description: `Requests describes the minimum amount of compute resources required. -If Requests is omitted for a container, it defaults to Limits if that is -explicitly specified, otherwise to an implementation-defined value. -The values of the map is string form of the 'quantity' k8s type: -https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "volume_mounts": { - Type: schema.TypeList, - Optional: true, - Description: `Volume to mount into the container's filesystem. -Only supports SecretVolumeSources.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "mount_path": { - Type: schema.TypeString, - Required: true, - Description: `Path within the container at which the volume should be mounted. Must -not contain ':'.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: `This must match the Name of a Volume.`, - }, - }, - }, - }, - "working_dir": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Not supported by Cloud Run fully managed", - ForceNew: true, - Description: `Container's working directory. -If not specified, the container runtime's default will be used, which -might be configured in the container image.`, - }, - }, - }, - }, - "container_concurrency": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `ContainerConcurrency specifies the maximum allowed in-flight (concurrent) -requests per container of the Revision. Values are: -- '0' thread-safe, the system should manage the max concurrency. This is - the default value. -- '1' not-thread-safe. Single concurrency -- '2-N' thread-safe, max concurrency of N`, - }, - "service_account_name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Email address of the IAM service account associated with the revision of the -service. The service account represents the identity of the running revision, -and determines what permissions the revision has. If not provided, the revision -will use the project's default service account.`, - }, - "timeout_seconds": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `TimeoutSeconds holds the max duration the instance is allowed for responding to a request.`, - }, - "volumes": { - Type: schema.TypeList, - Optional: true, - Description: `Volume represents a named volume in a container.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Volume's name.`, - }, - "secret": { - Type: schema.TypeList, - Required: true, - Description: `The secret's value will be presented as the content of a file whose -name is defined in the item path. If no items are defined, the name of -the file is the secret_name.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_name": { - Type: schema.TypeString, - Required: true, - Description: `The name of the secret in Cloud Secret Manager. By default, the secret -is assumed to be in the same project. -If the secret is in another project, you must define an alias. -An alias definition has the form: -{alias}:projects/{project-id|project-number}/secrets/{secret-name}. -If multiple alias definitions are needed, they must be separated by -commas. -The alias definitions must be set on the run.googleapis.com/secrets -annotation.`, - }, - "default_mode": { - Type: schema.TypeInt, - Optional: true, - Description: `Mode bits to use on created files by default. Must be a value between 0000 -and 0777. Defaults to 0644. Directories within the path are not affected by -this setting. This might be in conflict with other options that affect the -file mode, like fsGroup, and the result can be other mode bits set.`, - }, - "items": { - Type: schema.TypeList, - Optional: true, - Description: `If unspecified, the volume will expose a file whose name is the -secret_name. -If specified, the key will be used as the version to fetch from Cloud -Secret Manager and the path will be the name of the file exposed in the -volume. When items are defined, they must specify a key and a path.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `The Cloud Secret Manager secret version. -Can be 'latest' for the latest value or an integer for a specific version.`, - }, - "path": { - Type: schema.TypeString, - Required: true, - Description: `The relative path of the file to map the key to. -May not be an absolute path. -May not contain the path element '..'. -May not start with the string '..'.`, - }, - "mode": { - Type: schema.TypeInt, - Optional: true, - Description: `Mode bits to use on this file, must be a value between 0000 and 0777. If -not specified, the volume defaultMode will be used. This might be in -conflict with other options that affect the file mode, like fsGroup, and -the result can be other mode bits set.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "serving_state": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Not supported by Cloud Run fully managed", - Description: `ServingState holds a value describing the state the resources -are in for this Revision. -It is expected -that the system will manipulate this based on routability and load.`, - }, - }, - }, - }, - "metadata": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Optional metadata for this Revision, including labels and annotations. -Name will be generated by the Configuration. To set minimum instances -for this revision, use the "autoscaling.knative.dev/minScale" annotation -key. To set maximum instances for this revision, use the -"autoscaling.knative.dev/maxScale" annotation key. To set Cloud SQL -connections for the revision, use the "run.googleapis.com/cloudsql-instances" -annotation key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "annotations": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: cloudrunTemplateAnnotationDiffSuppress, - Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: http://kubernetes.io/docs/user-guide/annotations - -**Note**: The Cloud Run API may add additional annotations that were not provided in your config. -If terraform plan shows a diff where a server-side annotation is added, you can add it to your config -or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Map of string keys and values that can be used to organize and categorize -(scope and select) objects. May match selectors of replication controllers -and routes. -More info: http://kubernetes.io/docs/user-guide/labels`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Name must be unique within a namespace, within a Cloud Run region. -Is required when creating resources. Name is primarily intended -for creation idempotence and configuration definition. Cannot be updated. -More info: http://kubernetes.io/docs/user-guide/identifiers#names`, - }, - "namespace": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `In Cloud Run the namespace must be equal to either the -project ID or project number. It will default to the resource's project.`, - }, - "generation": { - Type: schema.TypeInt, - Computed: true, - Description: `A sequence number representing a specific generation of the desired state.`, - }, - "resource_version": { - Type: schema.TypeString, - Computed: true, - Description: `An opaque value that represents the internal version of this object that -can be used by clients to determine when objects have changed. May be used -for optimistic concurrency, change detection, and the watch operation on a -resource or set of resources. They may only be valid for a -particular resource or set of resources. - -More info: -https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency`, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `SelfLink is a URL representing this object.`, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `UID is a unique id generated by the server on successful creation of a resource and is not -allowed to change on PUT operations. - -More info: http://kubernetes.io/docs/user-guide/identifiers#uids`, - }, - }, - }, - }, - }, - }, - }, - "traffic": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Traffic specifies how to distribute traffic over a collection of Knative Revisions -and Configurations`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "percent": { - Type: schema.TypeInt, - Required: true, - Description: `Percent specifies percent of the traffic to this Revision or Configuration.`, - }, - "latest_revision": { - Type: schema.TypeBool, - Optional: true, - Description: `LatestRevision may be optionally provided to indicate that the latest ready -Revision of the Configuration should be used for this traffic target. When -provided LatestRevision must be true if RevisionName is empty; it must be -false when RevisionName is non-empty.`, - }, - "revision_name": { - Type: schema.TypeString, - Optional: true, - Description: `RevisionName of a specific revision to which to send this portion of traffic.`, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - Description: `Tag is optionally used to expose a dedicated url for referencing this target exclusively.`, - }, - "url": { - Type: schema.TypeString, - Computed: true, - Description: `URL displays the URL for accessing tagged traffic targets. URL is displayed in status, -and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, -but may not contain anything else (e.g. basic auth, url path, etc.)`, - }, - }, - }, - }, - - "metadata": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Metadata associated with this Service, including name, namespace, labels, -and annotations.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "annotations": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: cloudrunAnnotationDiffSuppress, - Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: http://kubernetes.io/docs/user-guide/annotations - -**Note**: The Cloud Run API may add additional annotations that were not provided in your config. -If terraform plan shows a diff where a server-side annotation is added, you can add it to your config -or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. - -Cloud Run (fully managed) uses the following annotation keys to configure features on a Service: - -- 'run.googleapis.com/ingress' sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress) - for the Service. For example, '"run.googleapis.com/ingress" = "all"'.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "labels": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: cloudrunLabelDiffSuppress, - Description: `Map of string keys and values that can be used to organize and categorize -(scope and select) objects. May match selectors of replication controllers -and routes. -More info: http://kubernetes.io/docs/user-guide/labels`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "namespace": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `In Cloud Run the namespace must be equal to either the -project ID or project number.`, - }, - "generation": { - Type: schema.TypeInt, - Computed: true, - Description: `A sequence number representing a specific generation of the desired state.`, - }, - "resource_version": { - Type: schema.TypeString, - Computed: true, - Description: `An opaque value that represents the internal version of this object that -can be used by clients to determine when objects have changed. May be used -for optimistic concurrency, change detection, and the watch operation on a -resource or set of resources. They may only be valid for a -particular resource or set of resources. - -More info: -https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency`, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `SelfLink is a URL representing this object.`, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `UID is a unique id generated by the server on successful creation of a resource and is not -allowed to change on PUT operations. - -More info: http://kubernetes.io/docs/user-guide/identifiers#uids`, - }, - }, - }, - }, - "status": { - Type: schema.TypeList, - Computed: true, - Description: `The current status of the Service.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "conditions": { - Type: schema.TypeList, - Computed: true, - Description: `Array of observed Service Conditions, indicating the current ready state of the service.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "message": { - Type: schema.TypeString, - Computed: true, - Description: `Human readable message indicating details about the current status.`, - }, - "reason": { - Type: schema.TypeString, - Computed: true, - Description: `One-word CamelCase reason for the condition's current status.`, - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: `Status of the condition, one of True, False, Unknown.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: `Type of domain mapping condition.`, - }, - }, - }, - }, - "latest_created_revision_name": { - Type: schema.TypeString, - Computed: true, - Description: `From ConfigurationStatus. LatestCreatedRevisionName is the last revision that was created -from this Service's Configuration. It might not be ready yet, for that use -LatestReadyRevisionName.`, - }, - "latest_ready_revision_name": { - Type: schema.TypeString, - Computed: true, - Description: `From ConfigurationStatus. LatestReadyRevisionName holds the name of the latest Revision -stamped out from this Service's Configuration that has had its "Ready" condition become -"True".`, - }, - "observed_generation": { - Type: schema.TypeInt, - Computed: true, - Description: `ObservedGeneration is the 'Generation' of the Route that was last processed by the -controller. - -Clients polling for completed reconciliation should poll until observedGeneration = -metadata.generation and the Ready condition's status is True or False.`, - }, - "url": { - Type: schema.TypeString, - Computed: true, - Description: `From RouteStatus. URL holds the url that will distribute traffic over the provided traffic -targets. It generally has the form -https://{route-hash}-{project-hash}-{cluster-level-suffix}.a.run.app`, - }, - }, - }, - }, - "autogenerate_revision_name": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `If set to 'true', the revision name (template.metadata.name) will be omitted and -autogenerated by Cloud Run. This cannot be set to 'true' while 'template.metadata.name' -is also set. -(For legacy support, if 'template.metadata.name' is unset in state while -this field is set to false, the revision name will still autogenerate.)`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the environment variable.`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `Variable references $(VAR_NAME) are expanded -using the previous defined environment variables in the container and -any route environment variables. If a variable cannot be resolved, -the reference in the input string will be unchanged. The $(VAR_NAME) -syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped -references will never be expanded, regardless of whether the variable -exists or not. -Defaults to "".`, - }, - "value_from": { - Type: schema.TypeList, - Optional: true, - Description: `Source for the environment variable's value. Only supports secret_key_ref.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_key_ref": { - Type: schema.TypeList, - Required: true, - Description: `Selects a key (version) of a secret in Secret Manager.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `A Cloud Secret Manager secret version. Must be 'latest' for the latest -version or an integer for a specific version.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: `The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. -If the secret is in another project, you must define an alias. -An alias definition has the form: :projects/{project-id|project-number}/secrets/. -If multiple alias definitions are needed, they must be separated by commas. -The alias definitions must be set on the run.googleapis.com/secrets annotation.`, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func resourceCloudRunServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - specProp, err := expandCloudRunServiceSpec(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(specProp)) { - obj["spec"] = specProp - } - metadataProp, err := expandCloudRunServiceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - - obj, err = resourceCloudRunServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Service: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isCloudRunCreationConflict) - if err != nil { - return fmt.Errorf("Error creating Service: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceCloudRunServicePollRead(d, meta), PollCheckKnativeStatusFunc(res), "Creating Service", d.Timeout(schema.TimeoutCreate), 1) - if err != nil { - return fmt.Errorf("Error waiting to create Service: %s", err) - } - - log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) - - return resourceCloudRunServiceRead(d, meta) -} - -func resourceCloudRunServicePollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isCloudRunCreationConflict) - if err != nil { - return res, err - } - res, err = resourceCloudRunServiceDecoder(d, meta, res) - if err != nil { - return nil, err - } - if res == nil { - return nil, fake404("decoded", "CloudRunService") - } - - return res, nil - } -} - -func resourceCloudRunServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isCloudRunCreationConflict) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudRunService %q", d.Id())) - } - - res, err = resourceCloudRunServiceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing CloudRunService because it no longer exists.") - d.SetId("") - return nil - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("autogenerate_revision_name"); !ok { - if err := d.Set("autogenerate_revision_name", false); err != nil { - return fmt.Errorf("Error setting autogenerate_revision_name: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - // Terraform must set the top level schema field, but since this object contains collapsed properties - // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. - if flattenedProp := flattenCloudRunServiceSpec(res["spec"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*googleapi.Error); ok { - return fmt.Errorf("Error reading Service: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("status", flattenCloudRunServiceStatus(res["status"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("metadata", flattenCloudRunServiceMetadata(res["metadata"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - return nil -} - -func resourceCloudRunServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - specProp, err := expandCloudRunServiceSpec(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(specProp)) { - obj["spec"] = specProp - } - metadataProp, err := expandCloudRunServiceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - - obj, err = resourceCloudRunServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isCloudRunCreationConflict) - - if err != nil { - return fmt.Errorf("Error updating Service %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) - } - - err = PollingWaitTime(resourceCloudRunServicePollRead(d, meta), PollCheckKnativeStatusFunc(res), "Updating Service", d.Timeout(schema.TimeoutUpdate), 1) - if err != nil { - return err - } - - return resourceCloudRunServiceRead(d, meta) -} - -func resourceCloudRunServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Service %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isCloudRunCreationConflict) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - - log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudRunServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/(?P[^/]+)/namespaces/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("autogenerate_revision_name", false); err != nil { - return nil, fmt.Errorf("Error setting autogenerate_revision_name: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudRunServiceSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["traffic"] = - flattenCloudRunServiceSpecTraffic(original["traffic"], d, config) - transformed["template"] = - flattenCloudRunServiceSpecTemplate(original["template"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTraffic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "revision_name": flattenCloudRunServiceSpecTrafficRevisionName(original["revisionName"], d, config), - "percent": flattenCloudRunServiceSpecTrafficPercent(original["percent"], d, config), - "tag": flattenCloudRunServiceSpecTrafficTag(original["tag"], d, config), - "latest_revision": flattenCloudRunServiceSpecTrafficLatestRevision(original["latestRevision"], d, config), - "url": flattenCloudRunServiceSpecTrafficUrl(original["url"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceSpecTrafficRevisionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTrafficPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceSpecTrafficTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTrafficLatestRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTrafficUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["metadata"] = - flattenCloudRunServiceSpecTemplateMetadata(original["metadata"], d, config) - transformed["spec"] = - flattenCloudRunServiceSpecTemplateSpec(original["spec"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["labels"] = - flattenCloudRunServiceSpecTemplateMetadataLabels(original["labels"], d, config) - transformed["generation"] = - flattenCloudRunServiceSpecTemplateMetadataGeneration(original["generation"], d, config) - transformed["resource_version"] = - flattenCloudRunServiceSpecTemplateMetadataResourceVersion(original["resourceVersion"], d, config) - transformed["self_link"] = - flattenCloudRunServiceSpecTemplateMetadataSelfLink(original["selfLink"], d, config) - transformed["uid"] = - flattenCloudRunServiceSpecTemplateMetadataUid(original["uid"], d, config) - transformed["namespace"] = - flattenCloudRunServiceSpecTemplateMetadataNamespace(original["namespace"], d, config) - transformed["annotations"] = - flattenCloudRunServiceSpecTemplateMetadataAnnotations(original["annotations"], d, config) - transformed["name"] = - flattenCloudRunServiceSpecTemplateMetadataName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateMetadataLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceSpecTemplateMetadataResourceVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataSelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataNamespace(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataAnnotations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["containers"] = - flattenCloudRunServiceSpecTemplateSpecContainers(original["containers"], d, config) - transformed["container_concurrency"] = - flattenCloudRunServiceSpecTemplateSpecContainerConcurrency(original["containerConcurrency"], d, config) - transformed["timeout_seconds"] = - flattenCloudRunServiceSpecTemplateSpecTimeoutSeconds(original["timeoutSeconds"], d, config) - transformed["service_account_name"] = - flattenCloudRunServiceSpecTemplateSpecServiceAccountName(original["serviceAccountName"], d, config) - transformed["volumes"] = - flattenCloudRunServiceSpecTemplateSpecVolumes(original["volumes"], d, config) - transformed["serving_state"] = - flattenCloudRunServiceSpecTemplateSpecServingState(original["servingState"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecContainers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "working_dir": flattenCloudRunServiceSpecTemplateSpecContainersWorkingDir(original["workingDir"], d, config), - "args": flattenCloudRunServiceSpecTemplateSpecContainersArgs(original["args"], d, config), - "env_from": flattenCloudRunServiceSpecTemplateSpecContainersEnvFrom(original["envFrom"], d, config), - "image": flattenCloudRunServiceSpecTemplateSpecContainersImage(original["image"], d, config), - "command": flattenCloudRunServiceSpecTemplateSpecContainersCommand(original["command"], d, config), - "env": flattenCloudRunServiceSpecTemplateSpecContainersEnv(original["env"], d, config), - "ports": flattenCloudRunServiceSpecTemplateSpecContainersPorts(original["ports"], d, config), - "resources": flattenCloudRunServiceSpecTemplateSpecContainersResources(original["resources"], d, config), - "volume_mounts": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMounts(original["volumeMounts"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceSpecTemplateSpecContainersWorkingDir(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersArgs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "prefix": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(original["prefix"], d, config), - "config_map_ref": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(original["configMapRef"], d, config), - "secret_ref": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(original["secretRef"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["optional"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(original["optional"], d, config) - transformed["local_object_reference"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(original["localObjectReference"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["local_object_reference"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(original["localObjectReference"], d, config) - transformed["optional"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(original["optional"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersCommand(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "name": flattenCloudRunServiceSpecTemplateSpecContainersEnvName(original["name"], d, config), - "value": flattenCloudRunServiceSpecTemplateSpecContainersEnvValue(original["value"], d, config), - "value_from": flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(original["valueFrom"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceSpecTemplateSpecContainersEnvName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret_key_ref"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(original["secretKeyRef"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(original["key"], d, config) - transformed["name"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunServiceSpecTemplateSpecContainersPortsName(original["name"], d, config), - "protocol": flattenCloudRunServiceSpecTemplateSpecContainersPortsProtocol(original["protocol"], d, config), - "container_port": flattenCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(original["containerPort"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceSpecTemplateSpecContainersPortsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersPortsProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceSpecTemplateSpecContainersResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["limits"] = - flattenCloudRunServiceSpecTemplateSpecContainersResourcesLimits(original["limits"], d, config) - transformed["requests"] = - flattenCloudRunServiceSpecTemplateSpecContainersResourcesRequests(original["requests"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecContainersResourcesLimits(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersResourcesRequests(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMounts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "mount_path": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(original["mountPath"], d, config), - "name": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(original["name"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainerConcurrency(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceSpecTemplateSpecTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceSpecTemplateSpecServiceAccountName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunServiceSpecTemplateSpecVolumesName(original["name"], d, config), - "secret": flattenCloudRunServiceSpecTemplateSpecVolumesSecret(original["secret"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceSpecTemplateSpecVolumesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret_name"] = - flattenCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(original["secretName"], d, config) - transformed["default_mode"] = - flattenCloudRunServiceSpecTemplateSpecVolumesSecretDefaultMode(original["defaultMode"], d, config) - transformed["items"] = - flattenCloudRunServiceSpecTemplateSpecVolumesSecretItems(original["items"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretDefaultMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItems(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "key": flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(original["key"], d, config), - "path": flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(original["path"], d, config), - "mode": flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(original["mode"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceSpecTemplateSpecServingState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["conditions"] = - flattenCloudRunServiceStatusConditions(original["conditions"], d, config) - transformed["url"] = - flattenCloudRunServiceStatusUrl(original["url"], d, config) - transformed["observed_generation"] = - flattenCloudRunServiceStatusObservedGeneration(original["observedGeneration"], d, config) - transformed["latest_created_revision_name"] = - flattenCloudRunServiceStatusLatestCreatedRevisionName(original["latestCreatedRevisionName"], d, config) - transformed["latest_ready_revision_name"] = - flattenCloudRunServiceStatusLatestReadyRevisionName(original["latestReadyRevisionName"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceStatusConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "message": flattenCloudRunServiceStatusConditionsMessage(original["message"], d, config), - "status": flattenCloudRunServiceStatusConditionsStatus(original["status"], d, config), - "reason": flattenCloudRunServiceStatusConditionsReason(original["reason"], d, config), - "type": flattenCloudRunServiceStatusConditionsType(original["type"], d, config), - }) - } - return transformed -} -func flattenCloudRunServiceStatusConditionsMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusConditionsStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusConditionsReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusConditionsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusObservedGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceStatusLatestCreatedRevisionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusLatestReadyRevisionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["labels"] = - flattenCloudRunServiceMetadataLabels(original["labels"], d, config) - transformed["generation"] = - flattenCloudRunServiceMetadataGeneration(original["generation"], d, config) - transformed["resource_version"] = - flattenCloudRunServiceMetadataResourceVersion(original["resourceVersion"], d, config) - transformed["self_link"] = - flattenCloudRunServiceMetadataSelfLink(original["selfLink"], d, config) - transformed["uid"] = - flattenCloudRunServiceMetadataUid(original["uid"], d, config) - transformed["namespace"] = - flattenCloudRunServiceMetadataNamespace(original["namespace"], d, config) - transformed["annotations"] = - flattenCloudRunServiceMetadataAnnotations(original["annotations"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunServiceMetadataLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadataGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunServiceMetadataResourceVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadataSelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadataUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadataNamespace(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return d.Get("project") -} - -func flattenCloudRunServiceMetadataAnnotations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudRunServiceSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedTraffic, err := expandCloudRunServiceSpecTraffic(d.Get("traffic"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTraffic); val.IsValid() && !isEmptyValue(val) { - transformed["traffic"] = transformedTraffic - } - - transformedTemplate, err := expandCloudRunServiceSpecTemplate(d.Get("template"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTemplate); val.IsValid() && !isEmptyValue(val) { - transformed["template"] = transformedTemplate - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTraffic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRevisionName, err := expandCloudRunServiceSpecTrafficRevisionName(original["revision_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRevisionName); val.IsValid() && !isEmptyValue(val) { - transformed["revisionName"] = transformedRevisionName - } - - transformedPercent, err := expandCloudRunServiceSpecTrafficPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedTag, err := expandCloudRunServiceSpecTrafficTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - transformedLatestRevision, err := expandCloudRunServiceSpecTrafficLatestRevision(original["latest_revision"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLatestRevision); val.IsValid() && !isEmptyValue(val) { - transformed["latestRevision"] = transformedLatestRevision - } - - transformedUrl, err := expandCloudRunServiceSpecTrafficUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTrafficRevisionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTrafficPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTrafficTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTrafficLatestRevision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTrafficUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMetadata, err := expandCloudRunServiceSpecTemplateMetadata(original["metadata"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !isEmptyValue(val) { - transformed["metadata"] = transformedMetadata - } - - transformedSpec, err := expandCloudRunServiceSpecTemplateSpec(original["spec"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSpec); val.IsValid() && !isEmptyValue(val) { - transformed["spec"] = transformedSpec - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandCloudRunServiceSpecTemplateMetadataLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedGeneration, err := expandCloudRunServiceSpecTemplateMetadataGeneration(original["generation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { - transformed["generation"] = transformedGeneration - } - - transformedResourceVersion, err := expandCloudRunServiceSpecTemplateMetadataResourceVersion(original["resource_version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResourceVersion); val.IsValid() && !isEmptyValue(val) { - transformed["resourceVersion"] = transformedResourceVersion - } - - transformedSelfLink, err := expandCloudRunServiceSpecTemplateMetadataSelfLink(original["self_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["selfLink"] = transformedSelfLink - } - - transformedUid, err := expandCloudRunServiceSpecTemplateMetadataUid(original["uid"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUid); val.IsValid() && !isEmptyValue(val) { - transformed["uid"] = transformedUid - } - - transformedNamespace, err := expandCloudRunServiceSpecTemplateMetadataNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - transformedAnnotations, err := expandCloudRunServiceSpecTemplateMetadataAnnotations(original["annotations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !isEmptyValue(val) { - transformed["annotations"] = transformedAnnotations - } - - transformedName, err := expandCloudRunServiceSpecTemplateMetadataName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateMetadataLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceSpecTemplateMetadataGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataResourceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataSelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataUid(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -// If the property hasn't been explicitly set in config use the project defined by the provider or env. -func expandCloudRunServiceSpecTemplateMetadataNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - project, err := getProject(d, config) - if err != nil { - return project, nil - } - } - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceSpecTemplateMetadataName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if d.Get("autogenerate_revision_name") == true { - return nil, nil - } - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContainers, err := expandCloudRunServiceSpecTemplateSpecContainers(original["containers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContainers); val.IsValid() && !isEmptyValue(val) { - transformed["containers"] = transformedContainers - } - - transformedContainerConcurrency, err := expandCloudRunServiceSpecTemplateSpecContainerConcurrency(original["container_concurrency"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContainerConcurrency); val.IsValid() && !isEmptyValue(val) { - transformed["containerConcurrency"] = transformedContainerConcurrency - } - - transformedTimeoutSeconds, err := expandCloudRunServiceSpecTemplateSpecTimeoutSeconds(original["timeout_seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["timeoutSeconds"] = transformedTimeoutSeconds - } - - transformedServiceAccountName, err := expandCloudRunServiceSpecTemplateSpecServiceAccountName(original["service_account_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountName"] = transformedServiceAccountName - } - - transformedVolumes, err := expandCloudRunServiceSpecTemplateSpecVolumes(original["volumes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { - transformed["volumes"] = transformedVolumes - } - - transformedServingState, err := expandCloudRunServiceSpecTemplateSpecServingState(original["serving_state"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServingState); val.IsValid() && !isEmptyValue(val) { - transformed["servingState"] = transformedServingState - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWorkingDir, err := expandCloudRunServiceSpecTemplateSpecContainersWorkingDir(original["working_dir"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWorkingDir); val.IsValid() && !isEmptyValue(val) { - transformed["workingDir"] = transformedWorkingDir - } - - transformedArgs, err := expandCloudRunServiceSpecTemplateSpecContainersArgs(original["args"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !isEmptyValue(val) { - transformed["args"] = transformedArgs - } - - transformedEnvFrom, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFrom(original["env_from"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnvFrom); val.IsValid() && !isEmptyValue(val) { - transformed["envFrom"] = transformedEnvFrom - } - - transformedImage, err := expandCloudRunServiceSpecTemplateSpecContainersImage(original["image"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !isEmptyValue(val) { - transformed["image"] = transformedImage - } - - transformedCommand, err := expandCloudRunServiceSpecTemplateSpecContainersCommand(original["command"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommand); val.IsValid() && !isEmptyValue(val) { - transformed["command"] = transformedCommand - } - - transformedEnv, err := expandCloudRunServiceSpecTemplateSpecContainersEnv(original["env"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { - transformed["env"] = transformedEnv - } - - transformedPorts, err := expandCloudRunServiceSpecTemplateSpecContainersPorts(original["ports"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { - transformed["ports"] = transformedPorts - } - - transformedResources, err := expandCloudRunServiceSpecTemplateSpecContainersResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedVolumeMounts, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMounts(original["volume_mounts"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVolumeMounts); val.IsValid() && !isEmptyValue(val) { - transformed["volumeMounts"] = transformedVolumeMounts - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersWorkingDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersArgs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPrefix, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(original["prefix"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrefix); val.IsValid() && !isEmptyValue(val) { - transformed["prefix"] = transformedPrefix - } - - transformedConfigMapRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(original["config_map_ref"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConfigMapRef); val.IsValid() && !isEmptyValue(val) { - transformed["configMapRef"] = transformedConfigMapRef - } - - transformedSecretRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(original["secret_ref"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretRef); val.IsValid() && !isEmptyValue(val) { - transformed["secretRef"] = transformedSecretRef - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOptional, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(original["optional"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOptional); val.IsValid() && !isEmptyValue(val) { - transformed["optional"] = transformedOptional - } - - transformedLocalObjectReference, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(original["local_object_reference"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocalObjectReference); val.IsValid() && !isEmptyValue(val) { - transformed["localObjectReference"] = transformedLocalObjectReference - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLocalObjectReference, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(original["local_object_reference"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocalObjectReference); val.IsValid() && !isEmptyValue(val) { - transformed["localObjectReference"] = transformedLocalObjectReference - } - - transformedOptional, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(original["optional"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOptional); val.IsValid() && !isEmptyValue(val) { - transformed["optional"] = transformedOptional - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersCommand(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedValueFrom, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(original["value_from"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValueFrom); val.IsValid() && !isEmptyValue(val) { - transformed["valueFrom"] = transformedValueFrom - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecretKeyRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(original["secret_key_ref"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretKeyRef); val.IsValid() && !isEmptyValue(val) { - transformed["secretKeyRef"] = transformedSecretKeyRef - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersPortsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedProtocol, err := expandCloudRunServiceSpecTemplateSpecContainersPortsProtocol(original["protocol"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProtocol); val.IsValid() && !isEmptyValue(val) { - transformed["protocol"] = transformedProtocol - } - - transformedContainerPort, err := expandCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(original["container_port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContainerPort); val.IsValid() && !isEmptyValue(val) { - transformed["containerPort"] = transformedContainerPort - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersPortsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersPortsProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLimits, err := expandCloudRunServiceSpecTemplateSpecContainersResourcesLimits(original["limits"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !isEmptyValue(val) { - transformed["limits"] = transformedLimits - } - - transformedRequests, err := expandCloudRunServiceSpecTemplateSpecContainersResourcesRequests(original["requests"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequests); val.IsValid() && !isEmptyValue(val) { - transformed["requests"] = transformedRequests - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersResourcesLimits(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersResourcesRequests(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersVolumeMounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMountPath, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(original["mount_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !isEmptyValue(val) { - transformed["mountPath"] = transformedMountPath - } - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainerConcurrency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecServiceAccountName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecVolumesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedSecret, err := expandCloudRunServiceSpecTemplateSpecVolumesSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secret"] = transformedSecret - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecretName, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(original["secret_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretName); val.IsValid() && !isEmptyValue(val) { - transformed["secretName"] = transformedSecretName - } - - transformedDefaultMode, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretDefaultMode(original["default_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDefaultMode); val.IsValid() && !isEmptyValue(val) { - transformed["defaultMode"] = transformedDefaultMode - } - - transformedItems, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItems(original["items"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedItems); val.IsValid() && !isEmptyValue(val) { - transformed["items"] = transformedItems - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretDefaultMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretItems(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedPath, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedMode, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecServingState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandCloudRunServiceMetadataLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedGeneration, err := expandCloudRunServiceMetadataGeneration(original["generation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { - transformed["generation"] = transformedGeneration - } - - transformedResourceVersion, err := expandCloudRunServiceMetadataResourceVersion(original["resource_version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResourceVersion); val.IsValid() && !isEmptyValue(val) { - transformed["resourceVersion"] = transformedResourceVersion - } - - transformedSelfLink, err := expandCloudRunServiceMetadataSelfLink(original["self_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["selfLink"] = transformedSelfLink - } - - transformedUid, err := expandCloudRunServiceMetadataUid(original["uid"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUid); val.IsValid() && !isEmptyValue(val) { - transformed["uid"] = transformedUid - } - - transformedNamespace, err := expandCloudRunServiceMetadataNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - transformedAnnotations, err := expandCloudRunServiceMetadataAnnotations(original["annotations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !isEmptyValue(val) { - transformed["annotations"] = transformedAnnotations - } - - return transformed, nil -} - -func expandCloudRunServiceMetadataLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceMetadataGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadataResourceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadataSelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadataUid(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -// If the property hasn't been explicitly set in config use the project defined by the provider or env. -func expandCloudRunServiceMetadataNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - project, err := getProject(d, config) - if err != nil { - return project, nil - } - } - return v, nil -} - -func expandCloudRunServiceMetadataAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceCloudRunServiceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - name := d.Get("name").(string) - if obj["metadata"] == nil { - obj["metadata"] = make(map[string]interface{}) - } - metadata := obj["metadata"].(map[string]interface{}) - metadata["name"] = name - - // The only acceptable version/kind right now - obj["apiVersion"] = "serving.knative.dev/v1" - obj["kind"] = "Service" - return obj, nil -} - -func resourceCloudRunServiceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // metadata is not present if the API returns an error - if obj, ok := res["metadata"]; ok { - if meta, ok := obj.(map[string]interface{}); ok { - res["name"] = meta["name"] - } else { - return nil, fmt.Errorf("Unable to decode 'metadata' block from API response.") - } - } - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_v2_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_v2_service.go deleted file mode 100644 index 75eaa5c389..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_v2_service.go +++ /dev/null @@ -1,3715 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudRunV2Service() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudRunV2ServiceCreate, - Read: resourceCloudRunV2ServiceRead, - Update: resourceCloudRunV2ServiceUpdate, - Delete: resourceCloudRunV2ServiceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudRunV2ServiceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the Service.`, - }, - "template": { - Type: schema.TypeList, - Required: true, - Description: `The template used to create revisions for this Service.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "annotations": { - Type: schema.TypeMap, - Optional: true, - Description: `KRM-style annotations for the resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "containers": { - Type: schema.TypeList, - Optional: true, - Description: `Holds the single container that defines the unit of execution for this task.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "image": { - Type: schema.TypeString, - Required: true, - Description: `URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images`, - }, - "args": { - Type: schema.TypeList, - Optional: true, - Description: `Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "command": { - Type: schema.TypeList, - Optional: true, - Description: `Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "env": { - Type: schema.TypeList, - Optional: true, - Description: `List of environment variables to set in the container.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes`, - }, - "value_source": { - Type: schema.TypeList, - Optional: true, - Description: `Source for the environment variable's value.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_key_ref": { - Type: schema.TypeList, - Optional: true, - Description: `Selects a secret and a specific version from Cloud Secret Manager.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret": { - Type: schema.TypeString, - Required: true, - Description: `The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "liveness_probe": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "failure_threshold": { - Type: schema.TypeInt, - Optional: true, - Description: `Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.`, - Default: 3, - }, - "grpc": { - Type: schema.TypeList, - Optional: true, - Description: `GRPC specifies an action involving a GRPC port.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "port": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Port number to access on the container. Number must be in the range 1 to 65535. If not specified, defaults to the same value as container.ports[0].containerPort.`, - }, - "service": { - Type: schema.TypeString, - Optional: true, - Description: `The name of the service to place in the gRPC HealthCheckRequest -(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). -If this is not specified, the default behavior is defined by gRPC.`, - }, - }, - }, - }, - "http_get": { - Type: schema.TypeList, - Optional: true, - Description: `HTTPGet specifies the http request to perform.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "http_headers": { - Type: schema.TypeList, - Optional: true, - Description: `Custom headers to set in the request. HTTP allows repeated headers.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `The header field name`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `The header field value`, - Default: "", - }, - }, - }, - }, - "path": { - Type: schema.TypeString, - Optional: true, - Description: `Path to access on the HTTP server. Defaults to '/'.`, - Default: "/", - }, - }, - }, - }, - "initial_delay_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, - Default: 0, - }, - "period_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds`, - Default: 10, - }, - "tcp_socket": { - Type: schema.TypeList, - Optional: true, - Deprecated: "Cloud Run does not support tcp socket in liveness probe and `liveness_probe.tcp_socket` field will be removed in a future major release.", - Description: `TCPSocket specifies an action involving a TCP port. This field is not supported in liveness probe currently.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "port": { - Type: schema.TypeInt, - Optional: true, - Description: `Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080.`, - }, - }, - }, - }, - "timeout_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, - Default: 1, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the container specified as a DNS_LABEL.`, - }, - "ports": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. - -If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "container_port": { - Type: schema.TypeInt, - Optional: true, - Description: `Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c".`, - }, - }, - }, - }, - "resources": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu_idle": { - Type: schema.TypeBool, - Optional: true, - Description: `Determines whether CPU should be throttled or not outside of requests.`, - }, - "limits": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - Description: `Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "startup_probe": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "failure_threshold": { - Type: schema.TypeInt, - Optional: true, - Description: `Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.`, - Default: 3, - }, - "grpc": { - Type: schema.TypeList, - Optional: true, - Description: `GRPC specifies an action involving a GRPC port.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "port": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Port number to access on the container. Number must be in the range 1 to 65535. If not specified, defaults to the same value as container.ports[0].containerPort.`, - }, - "service": { - Type: schema.TypeString, - Optional: true, - Description: `The name of the service to place in the gRPC HealthCheckRequest -(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). -If this is not specified, the default behavior is defined by gRPC.`, - }, - }, - }, - }, - "http_get": { - Type: schema.TypeList, - Optional: true, - Description: `HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "http_headers": { - Type: schema.TypeList, - Optional: true, - Description: `Custom headers to set in the request. HTTP allows repeated headers.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `The header field name`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `The header field value`, - Default: "", - }, - }, - }, - }, - "path": { - Type: schema.TypeString, - Optional: true, - Description: `Path to access on the HTTP server. Defaults to '/'.`, - Default: "/", - }, - }, - }, - }, - "initial_delay_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, - Default: 0, - }, - "period_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds`, - Default: 10, - }, - "tcp_socket": { - Type: schema.TypeList, - Optional: true, - Description: `TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "port": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080.`, - }, - }, - }, - }, - "timeout_seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, - Default: 1, - }, - }, - }, - }, - "volume_mounts": { - Type: schema.TypeList, - Optional: true, - Description: `Volume to mount into the container's filesystem.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "mount_path": { - Type: schema.TypeString, - Required: true, - Description: `Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run`, - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: `This must match the Name of a Volume.`, - }, - }, - }, - }, - "working_dir": { - Type: schema.TypeString, - Optional: true, - Description: `Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image.`, - }, - }, - }, - }, - "encryption_key": { - Type: schema.TypeString, - Optional: true, - Description: `A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek`, - }, - "execution_environment": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2", ""}), - Description: `The sandbox environment to host this Revision. Possible values: ["EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2"]`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `KRM-style labels for the resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "max_instance_request_concurrency": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Sets the maximum number of requests that each serving instance can receive.`, - }, - "revision": { - Type: schema.TypeString, - Optional: true, - Description: `The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name.`, - }, - "scaling": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Scaling settings for this Revision.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_instance_count": { - Type: schema.TypeInt, - Optional: true, - Description: `Maximum number of serving instances that this resource should have.`, - }, - "min_instance_count": { - Type: schema.TypeInt, - Optional: true, - Description: `Minimum number of serving instances that this resource should have.`, - }, - }, - }, - }, - "service_account": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account.`, - }, - "timeout": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Max allowed time for an instance to respond to a request. - -A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, - }, - "volumes": { - Type: schema.TypeList, - Optional: true, - Description: `A list of Volumes to make available to containers.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Volume's name.`, - }, - "cloud_sql_instance": { - Type: schema.TypeList, - Optional: true, - Description: `For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instances": { - Type: schema.TypeList, - Optional: true, - Description: `The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance}`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "secret": { - Type: schema.TypeList, - Optional: true, - Description: `Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret": { - Type: schema.TypeString, - Required: true, - Description: `The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project.`, - }, - "default_mode": { - Type: schema.TypeInt, - Optional: true, - Description: `Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting.`, - }, - "items": { - Type: schema.TypeList, - Optional: true, - Description: `If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "mode": { - Type: schema.TypeInt, - Required: true, - Description: `Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used.`, - }, - "path": { - Type: schema.TypeString, - Required: true, - Description: `The relative path of the secret in the container.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "vpc_access": { - Type: schema.TypeList, - Optional: true, - Description: `VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "connector": { - Type: schema.TypeString, - Optional: true, - Description: `VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number.`, - }, - "egress": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ALL_TRAFFIC", "PRIVATE_RANGES_ONLY", ""}), - Description: `Traffic VPC egress settings. Possible values: ["ALL_TRAFFIC", "PRIVATE_RANGES_ONLY"]`, - }, - }, - }, - }, - }, - }, - }, - "annotations": { - Type: schema.TypeMap, - Optional: true, - Description: `Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run will populate some annotations using 'run.googleapis.com' or 'serving.knative.dev' namespaces. This field follows Kubernetes annotations' namespacing, limits, and rules. More info: https://kubernetes.io/docs/user-guide/annotations`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "binary_authorization": { - Type: schema.TypeList, - Optional: true, - Description: `Settings for the Binary Authorization feature.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "breakglass_justification": { - Type: schema.TypeString, - Optional: true, - Description: `If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass`, - }, - "use_default": { - Type: schema.TypeBool, - Optional: true, - Description: `If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled.`, - }, - }, - }, - }, - "client": { - Type: schema.TypeString, - Optional: true, - Description: `Arbitrary identifier for the API client.`, - }, - "client_version": { - Type: schema.TypeString, - Optional: true, - Description: `Arbitrary version identifier for the API client.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `User-provided description of the Service. This field currently has a 512-character limit.`, - }, - "ingress": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"INGRESS_TRAFFIC_ALL", "INGRESS_TRAFFIC_INTERNAL_ONLY", "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER", ""}), - Description: `Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. Possible values: ["INGRESS_TRAFFIC_ALL", "INGRESS_TRAFFIC_INTERNAL_ONLY", "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER"]`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Map of string keys and values that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels Cloud Run will populate some labels with 'run.googleapis.com' or 'serving.knative.dev' namespaces. Those labels are read-only, and user changes will not be preserved.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "launch_stage": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED", ""}), - Description: `The launch stage as defined by Google Cloud Platform Launch Stages. Cloud Run supports ALPHA, BETA, and GA. If no value is specified, GA is assumed. Possible values: ["UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The location of the cloud run service`, - }, - "traffic": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Specifies how to distribute traffic over a collection of Revisions belonging to the Service. If traffic is empty or not provided, defaults to 100% traffic to the latest Ready Revision.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "percent": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies percent of the traffic to this Revision. This defaults to zero if unspecified.`, - }, - "revision": { - Type: schema.TypeString, - Optional: true, - Description: `Revision to which to send this portion of traffic, if traffic allocation is by revision.`, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - Description: `Indicates a string to be part of the URI to exclusively reference this target.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST", "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION", ""}), - Description: `The allocation type for this traffic target. Possible values: ["TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST", "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"]`, - }, - }, - }, - }, - "conditions": { - Type: schema.TypeList, - Computed: true, - Description: `The Conditions of all other associated sub-resources. They contain additional diagnostics information in case the Service does not reach its Serving state. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "execution_reason": { - Type: schema.TypeString, - Computed: true, - Description: `A reason for the execution condition.`, - }, - "last_transition_time": { - Type: schema.TypeString, - Computed: true, - Description: `Last time the condition transitioned from one status to another. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "message": { - Type: schema.TypeString, - Computed: true, - Description: `Human readable message indicating details about the current status.`, - }, - "reason": { - Type: schema.TypeString, - Computed: true, - Description: `A common (service-level) reason for this condition.`, - }, - "revision_reason": { - Type: schema.TypeString, - Computed: true, - Description: `A reason for the revision condition.`, - }, - "severity": { - Type: schema.TypeString, - Computed: true, - Description: `How to interpret failures of this condition, one of Error, Warning, Info`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the condition.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: `type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready.`, - }, - }, - }, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates.`, - }, - "generation": { - Type: schema.TypeString, - Computed: true, - Description: `A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer.`, - }, - "latest_created_revision": { - Type: schema.TypeString, - Computed: true, - Description: `Name of the last created revision. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, - }, - "latest_ready_revision": { - Type: schema.TypeString, - Computed: true, - Description: `Name of the latest revision that is serving traffic. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, - }, - "observed_generation": { - Type: schema.TypeString, - Computed: true, - Description: `The generation of this Service currently serving traffic. See comments in reconciling for additional information on reconciliation process in Cloud Run. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer.`, - }, - "reconciling": { - Type: schema.TypeBool, - Computed: true, - Description: `Returns true if the Service is currently being acted upon by the system to bring it into the desired state. - -When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, observedGeneration, latest_ready_revison, trafficStatuses, and uri will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in terminalCondition.state. - -If reconciliation succeeded, the following fields will match: traffic and trafficStatuses, observedGeneration and generation, latestReadyRevision and latestCreatedRevision. - -If reconciliation failed, trafficStatuses, observedGeneration, and latestReadyRevision will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in terminalCondition and conditions.`, - }, - "terminal_condition": { - Type: schema.TypeList, - Computed: true, - Description: `The Condition of this Service, containing its readiness status, and detailed error information in case it did not reach a serving state. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "execution_reason": { - Type: schema.TypeString, - Computed: true, - Description: `A reason for the execution condition.`, - }, - "last_transition_time": { - Type: schema.TypeString, - Computed: true, - Description: `Last time the condition transitioned from one status to another.`, - }, - "message": { - Type: schema.TypeString, - Computed: true, - Description: `Human readable message indicating details about the current status.`, - }, - "reason": { - Type: schema.TypeString, - Computed: true, - Description: `A common (service-level) reason for this condition.`, - }, - "revision_reason": { - Type: schema.TypeString, - Computed: true, - Description: `A reason for the revision condition.`, - }, - "severity": { - Type: schema.TypeString, - Computed: true, - Description: `How to interpret failures of this condition, one of Error, Warning, Info`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the condition.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: `type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready.`, - }, - }, - }, - }, - "traffic_statuses": { - Type: schema.TypeList, - Computed: true, - Description: `Detailed status information for corresponding traffic targets. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "percent": { - Type: schema.TypeInt, - Computed: true, - Description: `Specifies percent of the traffic to this Revision.`, - }, - "revision": { - Type: schema.TypeString, - Computed: true, - Description: `Revision to which this traffic is sent.`, - }, - "tag": { - Type: schema.TypeString, - Computed: true, - Description: `Indicates the string used in the URI to exclusively reference this target.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Description: `The allocation type for this traffic target.`, - }, - "uri": { - Type: schema.TypeString, - Computed: true, - Description: `Displays the target URI.`, - }, - }, - }, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `Server assigned unique identifier for the trigger. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted.`, - }, - "uri": { - Type: schema.TypeString, - Computed: true, - Description: `The main URI in which this Service is serving traffic.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudRunV2ServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandCloudRunV2ServiceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCloudRunV2ServiceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - annotationsProp, err := expandCloudRunV2ServiceAnnotations(d.Get("annotations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("annotations"); !isEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { - obj["annotations"] = annotationsProp - } - clientProp, err := expandCloudRunV2ServiceClient(d.Get("client"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client"); !isEmptyValue(reflect.ValueOf(clientProp)) && (ok || !reflect.DeepEqual(v, clientProp)) { - obj["client"] = clientProp - } - clientVersionProp, err := expandCloudRunV2ServiceClientVersion(d.Get("client_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_version"); !isEmptyValue(reflect.ValueOf(clientVersionProp)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { - obj["clientVersion"] = clientVersionProp - } - ingressProp, err := expandCloudRunV2ServiceIngress(d.Get("ingress"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ingress"); !isEmptyValue(reflect.ValueOf(ingressProp)) && (ok || !reflect.DeepEqual(v, ingressProp)) { - obj["ingress"] = ingressProp - } - launchStageProp, err := expandCloudRunV2ServiceLaunchStage(d.Get("launch_stage"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(reflect.ValueOf(launchStageProp)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { - obj["launchStage"] = launchStageProp - } - binaryAuthorizationProp, err := expandCloudRunV2ServiceBinaryAuthorization(d.Get("binary_authorization"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("binary_authorization"); !isEmptyValue(reflect.ValueOf(binaryAuthorizationProp)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { - obj["binaryAuthorization"] = binaryAuthorizationProp - } - templateProp, err := expandCloudRunV2ServiceTemplate(d.Get("template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("template"); !isEmptyValue(reflect.ValueOf(templateProp)) && (ok || !reflect.DeepEqual(v, templateProp)) { - obj["template"] = templateProp - } - trafficProp, err := expandCloudRunV2ServiceTraffic(d.Get("traffic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("traffic"); !isEmptyValue(reflect.ValueOf(trafficProp)) && (ok || !reflect.DeepEqual(v, trafficProp)) { - obj["traffic"] = trafficProp - } - - url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/services?serviceId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Service: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Service: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = CloudRunV2OperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Service", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Service: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) - - return resourceCloudRunV2ServiceRead(d, meta) -} - -func resourceCloudRunV2ServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/services/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudRunV2Service %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - if err := d.Set("description", flattenCloudRunV2ServiceDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("uid", flattenCloudRunV2ServiceUid(res["uid"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("generation", flattenCloudRunV2ServiceGeneration(res["generation"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("labels", flattenCloudRunV2ServiceLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("annotations", flattenCloudRunV2ServiceAnnotations(res["annotations"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("client", flattenCloudRunV2ServiceClient(res["client"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("client_version", flattenCloudRunV2ServiceClientVersion(res["clientVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("ingress", flattenCloudRunV2ServiceIngress(res["ingress"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("launch_stage", flattenCloudRunV2ServiceLaunchStage(res["launchStage"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("binary_authorization", flattenCloudRunV2ServiceBinaryAuthorization(res["binaryAuthorization"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("template", flattenCloudRunV2ServiceTemplate(res["template"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("traffic", flattenCloudRunV2ServiceTraffic(res["traffic"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("observed_generation", flattenCloudRunV2ServiceObservedGeneration(res["observedGeneration"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("terminal_condition", flattenCloudRunV2ServiceTerminalCondition(res["terminalCondition"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("conditions", flattenCloudRunV2ServiceConditions(res["conditions"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("latest_ready_revision", flattenCloudRunV2ServiceLatestReadyRevision(res["latestReadyRevision"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("latest_created_revision", flattenCloudRunV2ServiceLatestCreatedRevision(res["latestCreatedRevision"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("traffic_statuses", flattenCloudRunV2ServiceTrafficStatuses(res["trafficStatuses"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("uri", flattenCloudRunV2ServiceUri(res["uri"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("reconciling", flattenCloudRunV2ServiceReconciling(res["reconciling"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("etag", flattenCloudRunV2ServiceEtag(res["etag"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - return nil -} - -func resourceCloudRunV2ServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandCloudRunV2ServiceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCloudRunV2ServiceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - annotationsProp, err := expandCloudRunV2ServiceAnnotations(d.Get("annotations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("annotations"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { - obj["annotations"] = annotationsProp - } - clientProp, err := expandCloudRunV2ServiceClient(d.Get("client"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientProp)) { - obj["client"] = clientProp - } - clientVersionProp, err := expandCloudRunV2ServiceClientVersion(d.Get("client_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { - obj["clientVersion"] = clientVersionProp - } - ingressProp, err := expandCloudRunV2ServiceIngress(d.Get("ingress"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ingress"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ingressProp)) { - obj["ingress"] = ingressProp - } - launchStageProp, err := expandCloudRunV2ServiceLaunchStage(d.Get("launch_stage"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { - obj["launchStage"] = launchStageProp - } - binaryAuthorizationProp, err := expandCloudRunV2ServiceBinaryAuthorization(d.Get("binary_authorization"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("binary_authorization"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { - obj["binaryAuthorization"] = binaryAuthorizationProp - } - templateProp, err := expandCloudRunV2ServiceTemplate(d.Get("template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("template"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, templateProp)) { - obj["template"] = templateProp - } - trafficProp, err := expandCloudRunV2ServiceTraffic(d.Get("traffic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("traffic"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trafficProp)) { - obj["traffic"] = trafficProp - } - - url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/services/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Service %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) - } - - err = CloudRunV2OperationWaitTime( - config, res, project, "Updating Service", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceCloudRunV2ServiceRead(d, meta) -} - -func resourceCloudRunV2ServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/services/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Service %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - - err = CloudRunV2OperationWaitTime( - config, res, project, "Deleting Service", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudRunV2ServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudRunV2ServiceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceAnnotations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceClient(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceClientVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceIngress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceLaunchStage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceBinaryAuthorization(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["breakglass_justification"] = - flattenCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(original["breakglassJustification"], d, config) - transformed["use_default"] = - flattenCloudRunV2ServiceBinaryAuthorizationUseDefault(original["useDefault"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceBinaryAuthorizationUseDefault(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["revision"] = - flattenCloudRunV2ServiceTemplateRevision(original["revision"], d, config) - transformed["labels"] = - flattenCloudRunV2ServiceTemplateLabels(original["labels"], d, config) - transformed["annotations"] = - flattenCloudRunV2ServiceTemplateAnnotations(original["annotations"], d, config) - transformed["scaling"] = - flattenCloudRunV2ServiceTemplateScaling(original["scaling"], d, config) - transformed["vpc_access"] = - flattenCloudRunV2ServiceTemplateVPCAccess(original["vpcAccess"], d, config) - transformed["timeout"] = - flattenCloudRunV2ServiceTemplateTimeout(original["timeout"], d, config) - transformed["service_account"] = - flattenCloudRunV2ServiceTemplateServiceAccount(original["serviceAccount"], d, config) - transformed["containers"] = - flattenCloudRunV2ServiceTemplateContainers(original["containers"], d, config) - transformed["volumes"] = - flattenCloudRunV2ServiceTemplateVolumes(original["volumes"], d, config) - transformed["execution_environment"] = - flattenCloudRunV2ServiceTemplateExecutionEnvironment(original["executionEnvironment"], d, config) - transformed["encryption_key"] = - flattenCloudRunV2ServiceTemplateEncryptionKey(original["encryptionKey"], d, config) - transformed["max_instance_request_concurrency"] = - flattenCloudRunV2ServiceTemplateMaxInstanceRequestConcurrency(original["maxInstanceRequestConcurrency"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateAnnotations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateScaling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min_instance_count"] = - flattenCloudRunV2ServiceTemplateScalingMinInstanceCount(original["minInstanceCount"], d, config) - transformed["max_instance_count"] = - flattenCloudRunV2ServiceTemplateScalingMaxInstanceCount(original["maxInstanceCount"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateScalingMinInstanceCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateScalingMaxInstanceCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateVPCAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["connector"] = - flattenCloudRunV2ServiceTemplateVPCAccessConnector(original["connector"], d, config) - transformed["egress"] = - flattenCloudRunV2ServiceTemplateVPCAccessEgress(original["egress"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateVPCAccessConnector(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateVPCAccessEgress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunV2ServiceTemplateContainersName(original["name"], d, config), - "image": flattenCloudRunV2ServiceTemplateContainersImage(original["image"], d, config), - "command": flattenCloudRunV2ServiceTemplateContainersCommand(original["command"], d, config), - "args": flattenCloudRunV2ServiceTemplateContainersArgs(original["args"], d, config), - "env": flattenCloudRunV2ServiceTemplateContainersEnv(original["env"], d, config), - "resources": flattenCloudRunV2ServiceTemplateContainersResources(original["resources"], d, config), - "ports": flattenCloudRunV2ServiceTemplateContainersPorts(original["ports"], d, config), - "volume_mounts": flattenCloudRunV2ServiceTemplateContainersVolumeMounts(original["volumeMounts"], d, config), - "working_dir": flattenCloudRunV2ServiceTemplateContainersWorkingDir(original["workingDir"], d, config), - "liveness_probe": flattenCloudRunV2ServiceTemplateContainersLivenessProbe(original["livenessProbe"], d, config), - "startup_probe": flattenCloudRunV2ServiceTemplateContainersStartupProbe(original["startupProbe"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTemplateContainersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersCommand(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersArgs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunV2ServiceTemplateContainersEnvName(original["name"], d, config), - "value": flattenCloudRunV2ServiceTemplateContainersEnvValue(original["value"], d, config), - "value_source": flattenCloudRunV2ServiceTemplateContainersEnvValueSource(original["valueSource"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTemplateContainersEnvName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersEnvValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersEnvValueSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret_key_ref"] = - flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRef(original["secretKeyRef"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret"] = - flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefSecret(original["secret"], d, config) - transformed["version"] = - flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefVersion(original["version"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["limits"] = - flattenCloudRunV2ServiceTemplateContainersResourcesLimits(original["limits"], d, config) - transformed["cpu_idle"] = - flattenCloudRunV2ServiceTemplateContainersResourcesCpuIdle(original["cpuIdle"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersResourcesLimits(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersResourcesCpuIdle(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunV2ServiceTemplateContainersPortsName(original["name"], d, config), - "container_port": flattenCloudRunV2ServiceTemplateContainersPortsContainerPort(original["containerPort"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTemplateContainersPortsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersPortsContainerPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersVolumeMounts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunV2ServiceTemplateContainersVolumeMountsName(original["name"], d, config), - "mount_path": flattenCloudRunV2ServiceTemplateContainersVolumeMountsMountPath(original["mountPath"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTemplateContainersVolumeMountsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersVolumeMountsMountPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersWorkingDir(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbe(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["initial_delay_seconds"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeInitialDelaySeconds(original["initialDelaySeconds"], d, config) - transformed["timeout_seconds"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeTimeoutSeconds(original["timeoutSeconds"], d, config) - transformed["period_seconds"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbePeriodSeconds(original["periodSeconds"], d, config) - transformed["failure_threshold"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeFailureThreshold(original["failureThreshold"], d, config) - transformed["http_get"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGet(original["httpGet"], d, config) - transformed["tcp_socket"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocket(original["tcpSocket"], d, config) - transformed["grpc"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpc(original["grpc"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["path"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPath(original["path"], d, config) - transformed["http_headers"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config), - "value": flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersValue(original["value"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocket(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["port"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocketPort(original["port"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpc(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["port"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpcPort(original["port"], d, config) - transformed["service"] = - flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpcService(original["service"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpcPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpcService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbe(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["initial_delay_seconds"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeInitialDelaySeconds(original["initialDelaySeconds"], d, config) - transformed["timeout_seconds"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeTimeoutSeconds(original["timeoutSeconds"], d, config) - transformed["period_seconds"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbePeriodSeconds(original["periodSeconds"], d, config) - transformed["failure_threshold"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeFailureThreshold(original["failureThreshold"], d, config) - transformed["http_get"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGet(original["httpGet"], d, config) - transformed["tcp_socket"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeTcpSocket(original["tcpSocket"], d, config) - transformed["grpc"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpc(original["grpc"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["path"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPath(original["path"], d, config) - transformed["http_headers"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config), - "value": flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersValue(original["value"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbeTcpSocket(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["port"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeTcpSocketPort(original["port"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersStartupProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpc(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["port"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpcPort(original["port"], d, config) - transformed["service"] = - flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpcService(original["service"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpcPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpcService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateVolumes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunV2ServiceTemplateVolumesName(original["name"], d, config), - "secret": flattenCloudRunV2ServiceTemplateVolumesSecret(original["secret"], d, config), - "cloud_sql_instance": flattenCloudRunV2ServiceTemplateVolumesCloudSqlInstance(original["cloudSqlInstance"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTemplateVolumesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateVolumesSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret"] = - flattenCloudRunV2ServiceTemplateVolumesSecretSecret(original["secret"], d, config) - transformed["default_mode"] = - flattenCloudRunV2ServiceTemplateVolumesSecretDefaultMode(original["defaultMode"], d, config) - transformed["items"] = - flattenCloudRunV2ServiceTemplateVolumesSecretItems(original["items"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateVolumesSecretSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateVolumesSecretDefaultMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateVolumesSecretItems(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "path": flattenCloudRunV2ServiceTemplateVolumesSecretItemsPath(original["path"], d, config), - "version": flattenCloudRunV2ServiceTemplateVolumesSecretItemsVersion(original["version"], d, config), - "mode": flattenCloudRunV2ServiceTemplateVolumesSecretItemsMode(original["mode"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTemplateVolumesSecretItemsPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateVolumesSecretItemsVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateVolumesSecretItemsMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTemplateVolumesCloudSqlInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["instances"] = - flattenCloudRunV2ServiceTemplateVolumesCloudSqlInstanceInstances(original["instances"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTemplateVolumesCloudSqlInstanceInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateExecutionEnvironment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTemplateMaxInstanceRequestConcurrency(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTraffic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "type": flattenCloudRunV2ServiceTrafficType(original["type"], d, config), - "revision": flattenCloudRunV2ServiceTrafficRevision(original["revision"], d, config), - "percent": flattenCloudRunV2ServiceTrafficPercent(original["percent"], d, config), - "tag": flattenCloudRunV2ServiceTrafficTag(original["tag"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTrafficType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTrafficRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTrafficPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTrafficTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceObservedGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTerminalCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenCloudRunV2ServiceTerminalConditionType(original["type"], d, config) - transformed["state"] = - flattenCloudRunV2ServiceTerminalConditionState(original["state"], d, config) - transformed["message"] = - flattenCloudRunV2ServiceTerminalConditionMessage(original["message"], d, config) - transformed["last_transition_time"] = - flattenCloudRunV2ServiceTerminalConditionLastTransitionTime(original["lastTransitionTime"], d, config) - transformed["severity"] = - flattenCloudRunV2ServiceTerminalConditionSeverity(original["severity"], d, config) - transformed["reason"] = - flattenCloudRunV2ServiceTerminalConditionReason(original["reason"], d, config) - transformed["revision_reason"] = - flattenCloudRunV2ServiceTerminalConditionRevisionReason(original["revisionReason"], d, config) - transformed["execution_reason"] = - flattenCloudRunV2ServiceTerminalConditionExecutionReason(original["executionReason"], d, config) - return []interface{}{transformed} -} -func flattenCloudRunV2ServiceTerminalConditionType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTerminalConditionState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTerminalConditionMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTerminalConditionLastTransitionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTerminalConditionSeverity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTerminalConditionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTerminalConditionRevisionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTerminalConditionExecutionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "type": flattenCloudRunV2ServiceConditionsType(original["type"], d, config), - "state": flattenCloudRunV2ServiceConditionsState(original["state"], d, config), - "message": flattenCloudRunV2ServiceConditionsMessage(original["message"], d, config), - "last_transition_time": flattenCloudRunV2ServiceConditionsLastTransitionTime(original["lastTransitionTime"], d, config), - "severity": flattenCloudRunV2ServiceConditionsSeverity(original["severity"], d, config), - "reason": flattenCloudRunV2ServiceConditionsReason(original["reason"], d, config), - "revision_reason": flattenCloudRunV2ServiceConditionsRevisionReason(original["revisionReason"], d, config), - "execution_reason": flattenCloudRunV2ServiceConditionsExecutionReason(original["executionReason"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceConditionsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceConditionsState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceConditionsMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceConditionsLastTransitionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceConditionsSeverity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceConditionsReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceConditionsRevisionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceConditionsExecutionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceLatestReadyRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceLatestCreatedRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTrafficStatuses(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "type": flattenCloudRunV2ServiceTrafficStatusesType(original["type"], d, config), - "revision": flattenCloudRunV2ServiceTrafficStatusesRevision(original["revision"], d, config), - "percent": flattenCloudRunV2ServiceTrafficStatusesPercent(original["percent"], d, config), - "tag": flattenCloudRunV2ServiceTrafficStatusesTag(original["tag"], d, config), - "uri": flattenCloudRunV2ServiceTrafficStatusesUri(original["uri"], d, config), - }) - } - return transformed -} -func flattenCloudRunV2ServiceTrafficStatusesType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTrafficStatusesRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTrafficStatusesPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudRunV2ServiceTrafficStatusesTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceTrafficStatusesUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceReconciling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunV2ServiceEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudRunV2ServiceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunV2ServiceAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunV2ServiceClient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceClientVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceIngress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceLaunchStage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceBinaryAuthorization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBreakglassJustification, err := expandCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(original["breakglass_justification"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBreakglassJustification); val.IsValid() && !isEmptyValue(val) { - transformed["breakglassJustification"] = transformedBreakglassJustification - } - - transformedUseDefault, err := expandCloudRunV2ServiceBinaryAuthorizationUseDefault(original["use_default"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUseDefault); val.IsValid() && !isEmptyValue(val) { - transformed["useDefault"] = transformedUseDefault - } - - return transformed, nil -} - -func expandCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceBinaryAuthorizationUseDefault(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRevision, err := expandCloudRunV2ServiceTemplateRevision(original["revision"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRevision); val.IsValid() && !isEmptyValue(val) { - transformed["revision"] = transformedRevision - } - - transformedLabels, err := expandCloudRunV2ServiceTemplateLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedAnnotations, err := expandCloudRunV2ServiceTemplateAnnotations(original["annotations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !isEmptyValue(val) { - transformed["annotations"] = transformedAnnotations - } - - transformedScaling, err := expandCloudRunV2ServiceTemplateScaling(original["scaling"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedScaling); val.IsValid() && !isEmptyValue(val) { - transformed["scaling"] = transformedScaling - } - - transformedVPCAccess, err := expandCloudRunV2ServiceTemplateVPCAccess(original["vpc_access"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVPCAccess); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccess"] = transformedVPCAccess - } - - transformedTimeout, err := expandCloudRunV2ServiceTemplateTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedServiceAccount, err := expandCloudRunV2ServiceTemplateServiceAccount(original["service_account"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccount"] = transformedServiceAccount - } - - transformedContainers, err := expandCloudRunV2ServiceTemplateContainers(original["containers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContainers); val.IsValid() && !isEmptyValue(val) { - transformed["containers"] = transformedContainers - } - - transformedVolumes, err := expandCloudRunV2ServiceTemplateVolumes(original["volumes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { - transformed["volumes"] = transformedVolumes - } - - transformedExecutionEnvironment, err := expandCloudRunV2ServiceTemplateExecutionEnvironment(original["execution_environment"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExecutionEnvironment); val.IsValid() && !isEmptyValue(val) { - transformed["executionEnvironment"] = transformedExecutionEnvironment - } - - transformedEncryptionKey, err := expandCloudRunV2ServiceTemplateEncryptionKey(original["encryption_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEncryptionKey); val.IsValid() && !isEmptyValue(val) { - transformed["encryptionKey"] = transformedEncryptionKey - } - - transformedMaxInstanceRequestConcurrency, err := expandCloudRunV2ServiceTemplateMaxInstanceRequestConcurrency(original["max_instance_request_concurrency"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxInstanceRequestConcurrency); val.IsValid() && !isEmptyValue(val) { - transformed["maxInstanceRequestConcurrency"] = transformedMaxInstanceRequestConcurrency - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateRevision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunV2ServiceTemplateAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunV2ServiceTemplateScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinInstanceCount, err := expandCloudRunV2ServiceTemplateScalingMinInstanceCount(original["min_instance_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinInstanceCount); val.IsValid() && !isEmptyValue(val) { - transformed["minInstanceCount"] = transformedMinInstanceCount - } - - transformedMaxInstanceCount, err := expandCloudRunV2ServiceTemplateScalingMaxInstanceCount(original["max_instance_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxInstanceCount); val.IsValid() && !isEmptyValue(val) { - transformed["maxInstanceCount"] = transformedMaxInstanceCount - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateScalingMinInstanceCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateScalingMaxInstanceCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVPCAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConnector, err := expandCloudRunV2ServiceTemplateVPCAccessConnector(original["connector"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConnector); val.IsValid() && !isEmptyValue(val) { - transformed["connector"] = transformedConnector - } - - transformedEgress, err := expandCloudRunV2ServiceTemplateVPCAccessEgress(original["egress"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEgress); val.IsValid() && !isEmptyValue(val) { - transformed["egress"] = transformedEgress - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateVPCAccessConnector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVPCAccessEgress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunV2ServiceTemplateContainersName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedImage, err := expandCloudRunV2ServiceTemplateContainersImage(original["image"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !isEmptyValue(val) { - transformed["image"] = transformedImage - } - - transformedCommand, err := expandCloudRunV2ServiceTemplateContainersCommand(original["command"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommand); val.IsValid() && !isEmptyValue(val) { - transformed["command"] = transformedCommand - } - - transformedArgs, err := expandCloudRunV2ServiceTemplateContainersArgs(original["args"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !isEmptyValue(val) { - transformed["args"] = transformedArgs - } - - transformedEnv, err := expandCloudRunV2ServiceTemplateContainersEnv(original["env"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { - transformed["env"] = transformedEnv - } - - transformedResources, err := expandCloudRunV2ServiceTemplateContainersResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedPorts, err := expandCloudRunV2ServiceTemplateContainersPorts(original["ports"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { - transformed["ports"] = transformedPorts - } - - transformedVolumeMounts, err := expandCloudRunV2ServiceTemplateContainersVolumeMounts(original["volume_mounts"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVolumeMounts); val.IsValid() && !isEmptyValue(val) { - transformed["volumeMounts"] = transformedVolumeMounts - } - - transformedWorkingDir, err := expandCloudRunV2ServiceTemplateContainersWorkingDir(original["working_dir"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWorkingDir); val.IsValid() && !isEmptyValue(val) { - transformed["workingDir"] = transformedWorkingDir - } - - transformedLivenessProbe, err := expandCloudRunV2ServiceTemplateContainersLivenessProbe(original["liveness_probe"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLivenessProbe); val.IsValid() && !isEmptyValue(val) { - transformed["livenessProbe"] = transformedLivenessProbe - } - - transformedStartupProbe, err := expandCloudRunV2ServiceTemplateContainersStartupProbe(original["startup_probe"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStartupProbe); val.IsValid() && !isEmptyValue(val) { - transformed["startupProbe"] = transformedStartupProbe - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTemplateContainersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersCommand(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersArgs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunV2ServiceTemplateContainersEnvName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandCloudRunV2ServiceTemplateContainersEnvValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedValueSource, err := expandCloudRunV2ServiceTemplateContainersEnvValueSource(original["value_source"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValueSource); val.IsValid() && !isEmptyValue(val) { - transformed["valueSource"] = transformedValueSource - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTemplateContainersEnvName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersEnvValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersEnvValueSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecretKeyRef, err := expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRef(original["secret_key_ref"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretKeyRef); val.IsValid() && !isEmptyValue(val) { - transformed["secretKeyRef"] = transformedSecretKeyRef - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecret, err := expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secret"] = transformedSecret - } - - transformedVersion, err := expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLimits, err := expandCloudRunV2ServiceTemplateContainersResourcesLimits(original["limits"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !isEmptyValue(val) { - transformed["limits"] = transformedLimits - } - - transformedCpuIdle, err := expandCloudRunV2ServiceTemplateContainersResourcesCpuIdle(original["cpu_idle"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCpuIdle); val.IsValid() && !isEmptyValue(val) { - transformed["cpuIdle"] = transformedCpuIdle - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersResourcesLimits(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunV2ServiceTemplateContainersResourcesCpuIdle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunV2ServiceTemplateContainersPortsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedContainerPort, err := expandCloudRunV2ServiceTemplateContainersPortsContainerPort(original["container_port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContainerPort); val.IsValid() && !isEmptyValue(val) { - transformed["containerPort"] = transformedContainerPort - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTemplateContainersPortsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersPortsContainerPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersVolumeMounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunV2ServiceTemplateContainersVolumeMountsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedMountPath, err := expandCloudRunV2ServiceTemplateContainersVolumeMountsMountPath(original["mount_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !isEmptyValue(val) { - transformed["mountPath"] = transformedMountPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTemplateContainersVolumeMountsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersVolumeMountsMountPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersWorkingDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbe(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInitialDelaySeconds, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !isEmptyValue(val) { - transformed["initialDelaySeconds"] = transformedInitialDelaySeconds - } - - transformedTimeoutSeconds, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeTimeoutSeconds(original["timeout_seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["timeoutSeconds"] = transformedTimeoutSeconds - } - - transformedPeriodSeconds, err := expandCloudRunV2ServiceTemplateContainersLivenessProbePeriodSeconds(original["period_seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["periodSeconds"] = transformedPeriodSeconds - } - - transformedFailureThreshold, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeFailureThreshold(original["failure_threshold"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["failureThreshold"] = transformedFailureThreshold - } - - transformedHttpGet, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGet(original["http_get"], d, config) - if err != nil { - return nil, err - } else { - transformed["httpGet"] = transformedHttpGet - } - - transformedTcpSocket, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocket(original["tcp_socket"], d, config) - if err != nil { - return nil, err - } else { - transformed["tcpSocket"] = transformedTcpSocket - } - - transformedGrpc, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpc(original["grpc"], d, config) - if err != nil { - return nil, err - } else { - transformed["grpc"] = transformedGrpc - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbePeriodSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedHttpHeaders, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeaders(original["http_headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["httpHeaders"] = transformedHttpHeaders - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersValue(original["value"], d, config) - if err != nil { - return nil, err - } else { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPort, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocketPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpc(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPort, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpcPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedService, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpcService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpcPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpcService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbe(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInitialDelaySeconds, err := expandCloudRunV2ServiceTemplateContainersStartupProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !isEmptyValue(val) { - transformed["initialDelaySeconds"] = transformedInitialDelaySeconds - } - - transformedTimeoutSeconds, err := expandCloudRunV2ServiceTemplateContainersStartupProbeTimeoutSeconds(original["timeout_seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["timeoutSeconds"] = transformedTimeoutSeconds - } - - transformedPeriodSeconds, err := expandCloudRunV2ServiceTemplateContainersStartupProbePeriodSeconds(original["period_seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["periodSeconds"] = transformedPeriodSeconds - } - - transformedFailureThreshold, err := expandCloudRunV2ServiceTemplateContainersStartupProbeFailureThreshold(original["failure_threshold"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["failureThreshold"] = transformedFailureThreshold - } - - transformedHttpGet, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGet(original["http_get"], d, config) - if err != nil { - return nil, err - } else { - transformed["httpGet"] = transformedHttpGet - } - - transformedTcpSocket, err := expandCloudRunV2ServiceTemplateContainersStartupProbeTcpSocket(original["tcp_socket"], d, config) - if err != nil { - return nil, err - } else { - transformed["tcpSocket"] = transformedTcpSocket - } - - transformedGrpc, err := expandCloudRunV2ServiceTemplateContainersStartupProbeGrpc(original["grpc"], d, config) - if err != nil { - return nil, err - } else { - transformed["grpc"] = transformedGrpc - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbePeriodSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedHttpHeaders, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeaders(original["http_headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["httpHeaders"] = transformedHttpHeaders - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersValue(original["value"], d, config) - if err != nil { - return nil, err - } else { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeTcpSocket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPort, err := expandCloudRunV2ServiceTemplateContainersStartupProbeTcpSocketPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeTcpSocketPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeGrpc(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPort, err := expandCloudRunV2ServiceTemplateContainersStartupProbeGrpcPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedService, err := expandCloudRunV2ServiceTemplateContainersStartupProbeGrpcService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeGrpcPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateContainersStartupProbeGrpcService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunV2ServiceTemplateVolumesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedSecret, err := expandCloudRunV2ServiceTemplateVolumesSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secret"] = transformedSecret - } - - transformedCloudSqlInstance, err := expandCloudRunV2ServiceTemplateVolumesCloudSqlInstance(original["cloud_sql_instance"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudSqlInstance); val.IsValid() && !isEmptyValue(val) { - transformed["cloudSqlInstance"] = transformedCloudSqlInstance - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTemplateVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVolumesSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecret, err := expandCloudRunV2ServiceTemplateVolumesSecretSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secret"] = transformedSecret - } - - transformedDefaultMode, err := expandCloudRunV2ServiceTemplateVolumesSecretDefaultMode(original["default_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDefaultMode); val.IsValid() && !isEmptyValue(val) { - transformed["defaultMode"] = transformedDefaultMode - } - - transformedItems, err := expandCloudRunV2ServiceTemplateVolumesSecretItems(original["items"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedItems); val.IsValid() && !isEmptyValue(val) { - transformed["items"] = transformedItems - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateVolumesSecretSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVolumesSecretDefaultMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVolumesSecretItems(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandCloudRunV2ServiceTemplateVolumesSecretItemsPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedVersion, err := expandCloudRunV2ServiceTemplateVolumesSecretItemsVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedMode, err := expandCloudRunV2ServiceTemplateVolumesSecretItemsMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTemplateVolumesSecretItemsPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVolumesSecretItemsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVolumesSecretItemsMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateVolumesCloudSqlInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInstances, err := expandCloudRunV2ServiceTemplateVolumesCloudSqlInstanceInstances(original["instances"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { - transformed["instances"] = transformedInstances - } - - return transformed, nil -} - -func expandCloudRunV2ServiceTemplateVolumesCloudSqlInstanceInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateExecutionEnvironment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTemplateMaxInstanceRequestConcurrency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTraffic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandCloudRunV2ServiceTrafficType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedRevision, err := expandCloudRunV2ServiceTrafficRevision(original["revision"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRevision); val.IsValid() && !isEmptyValue(val) { - transformed["revision"] = transformedRevision - } - - transformedPercent, err := expandCloudRunV2ServiceTrafficPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedTag, err := expandCloudRunV2ServiceTrafficTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunV2ServiceTrafficType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTrafficRevision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTrafficPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunV2ServiceTrafficTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_scheduler_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_scheduler_job.go deleted file mode 100644 index 7f802382c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_scheduler_job.go +++ /dev/null @@ -1,1565 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Both oidc and oauth headers cannot be set -func validateAuthHeaders(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - httpBlock := diff.Get("http_target.0").(map[string]interface{}) - - if httpBlock != nil { - oauth := httpBlock["oauth_token"] - oidc := httpBlock["oidc_token"] - - if oauth != nil && oidc != nil { - if len(oidc.([]interface{})) > 0 && len(oauth.([]interface{})) > 0 { - return fmt.Errorf("Error in http_target: only one of oauth_token or oidc_token can be specified, but not both.") - } - } - } - - return nil -} - -func authHeaderDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // If generating an `oauth_token` and `scope` is not provided in the configuration, - // the default "https://www.googleapis.com/auth/cloud-platform" scope will be used. - // Similarly, if generating an `oidc_token` and `audience` is not provided in the - // configuration, the URI specified in target will be used. Although not in the - // configuration, in both cases the default is returned in the object, but is not in. - // state. We suppress the diff if the values are these defaults but are not stored in state. - - b := strings.Split(k, ".") - if b[0] == "http_target" && len(b) > 4 { - block := b[2] - attr := b[4] - - if block == "oauth_token" && attr == "scope" { - if old == canonicalizeServiceScope("cloud-platform") && new == "" { - return true - } - } - - if block == "oidc_token" && attr == "audience" { - uri := d.Get(strings.Join(b[0:2], ".") + ".uri") - if old == uri && new == "" { - return true - } - } - - } - - return false -} - -func validateHttpHeaders() schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - headers := i.(map[string]interface{}) - if _, ok := headers["Content-Length"]; ok { - es = append(es, fmt.Errorf("Cannot set the Content-Length header on %s", k)) - return - } - r := regexp.MustCompile(`(X-Google-|X-AppEngine-).*`) - for key := range headers { - if r.MatchString(key) { - es = append(es, fmt.Errorf("Cannot set the %s header on %s", key, k)) - return - } - } - - return - } -} - -func ResourceCloudSchedulerJob() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudSchedulerJobCreate, - Read: resourceCloudSchedulerJobRead, - Update: resourceCloudSchedulerJobUpdate, - Delete: resourceCloudSchedulerJobDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudSchedulerJobImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: validateAuthHeaders, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the job.`, - }, - "app_engine_http_target": { - Type: schema.TypeList, - Optional: true, - Description: `App Engine HTTP target. -If the job providers a App Engine HTTP target the cron will -send a request to the service instance`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "relative_uri": { - Type: schema.TypeString, - Required: true, - Description: `The relative URI. -The relative URL must begin with "/" and must be a valid HTTP relative URL. -It can contain a path, query string arguments, and \# fragments. -If the relative URL is empty, then the root path "/" will be used. -No spaces are allowed, and the maximum length allowed is 2083 characters`, - }, - "app_engine_routing": { - Type: schema.TypeList, - Optional: true, - Description: `App Engine Routing setting for the job.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Optional: true, - Description: `App instance. -By default, the job is sent to an instance which is available when the job is attempted.`, - AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, - }, - "service": { - Type: schema.TypeString, - Optional: true, - Description: `App service. -By default, the job is sent to the service which is the default service when the job is attempted.`, - AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `App version. -By default, the job is sent to the version which is the default version when the job is attempted.`, - AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, - }, - }, - }, - }, - "body": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateBase64String, - Description: `HTTP request body. -A request body is allowed only if the HTTP method is POST or PUT. -It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - -A base64-encoded string.`, - }, - "headers": { - Type: schema.TypeMap, - Optional: true, - ValidateFunc: validateHttpHeaders(), - Description: `HTTP request headers. -This map contains the header field names and values. -Headers can be set when the job is created.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "http_method": { - Type: schema.TypeString, - Optional: true, - Description: `Which HTTP method to use for the request.`, - }, - }, - }, - ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, - }, - "attempt_deadline": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress("180s"), - Description: `The deadline for job attempts. If the request handler does not respond by this deadline then the request is -cancelled and the attempt is marked as a DEADLINE_EXCEEDED failure. The failed attempt can be viewed in -execution logs. Cloud Scheduler will retry the job according to the RetryConfig. -The allowed duration for this deadline is: -* For HTTP targets, between 15 seconds and 30 minutes. -* For App Engine HTTP targets, between 15 seconds and 24 hours. -* **Note**: For PubSub targets, this field is ignored - setting it will introduce an unresolvable diff. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"`, - Default: "180s", - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description for the job. -This string must not contain more than 500 characters.`, - }, - "http_target": { - Type: schema.TypeList, - Optional: true, - Description: `HTTP target. -If the job providers a http_target the cron will -send a request to the targeted url`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: lastSlashDiffSuppress, - Description: `The full URI path that the request will be sent to.`, - }, - "body": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateBase64String, - Description: `HTTP request body. -A request body is allowed only if the HTTP method is POST, PUT, or PATCH. -It is an error to set body on a job with an incompatible HttpMethod. - -A base64-encoded string.`, - }, - "headers": { - Type: schema.TypeMap, - Optional: true, - ValidateFunc: validateHttpHeaders(), - Description: `This map contains the header field names and values. -Repeated headers are not supported, but a header value can contain commas.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "http_method": { - Type: schema.TypeString, - Optional: true, - Description: `Which HTTP method to use for the request.`, - }, - "oauth_token": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: authHeaderDiffSuppress, - Description: `Contains information needed for generating an OAuth token. -This type of authorization should be used when sending requests to a GCP endpoint.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "service_account_email": { - Type: schema.TypeString, - Required: true, - Description: `Service account email to be used for generating OAuth token. -The service account must be within the same project as the job.`, - }, - "scope": { - Type: schema.TypeString, - Optional: true, - Description: `OAuth scope to be used for generating OAuth access token. If not specified, -"https://www.googleapis.com/auth/cloud-platform" will be used.`, - }, - }, - }, - }, - "oidc_token": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: authHeaderDiffSuppress, - Description: `Contains information needed for generating an OpenID Connect token. -This type of authorization should be used when sending requests to third party endpoints or Cloud Run.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "service_account_email": { - Type: schema.TypeString, - Required: true, - Description: `Service account email to be used for generating OAuth token. -The service account must be within the same project as the job.`, - }, - "audience": { - Type: schema.TypeString, - Optional: true, - Description: `Audience to be used when generating OIDC token. If not specified, -the URI specified in target will be used.`, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, - }, - "paused": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `Sets the job to a paused state. Jobs default to being enabled when this property is not set.`, - }, - "pubsub_target": { - Type: schema.TypeList, - Optional: true, - Description: `Pub/Sub target -If the job providers a Pub/Sub target the cron will publish -a message to the provided topic`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topic_name": { - Type: schema.TypeString, - Required: true, - Description: `The full resource name for the Cloud Pub/Sub topic to which -messages will be published when a job is delivered. ~>**NOTE:** -The topic name must be in the same format as required by PubSub's -PublishRequest.name, e.g. 'projects/my-project/topics/my-topic'.`, - }, - "attributes": { - Type: schema.TypeMap, - Optional: true, - Description: `Attributes for PubsubMessage. -Pubsub message must contain either non-empty data, or at least one attribute.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "data": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateBase64String, - Description: `The message payload for PubsubMessage. -Pubsub message must contain either non-empty data, or at least one attribute. - - A base64-encoded string.`, - }, - }, - }, - ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Region where the scheduler job resides. If it is not provided, Terraform will use the provider default.`, - }, - "retry_config": { - Type: schema.TypeList, - Optional: true, - Description: `By default, if a job does not complete successfully, -meaning that an acknowledgement is not received from the handler, -then it will be retried with exponential backoff according to the settings`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_backoff_duration": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The maximum amount of time to wait before retrying a job after it fails. -A duration in seconds with up to nine fractional digits, terminated by 's'.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - "max_doublings": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The time between retries will double maxDoublings times. -A job's retry interval starts at minBackoffDuration, -then doubles maxDoublings times, then increases linearly, -and finally retries retries at intervals of maxBackoffDuration up to retryCount times.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - "max_retry_duration": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The time limit for retrying a failed job, measured from time when an execution was first attempted. -If specified with retryCount, the job will be retried until both limits are reached. -A duration in seconds with up to nine fractional digits, terminated by 's'.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - "min_backoff_duration": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The minimum amount of time to wait before retrying a job after it fails. -A duration in seconds with up to nine fractional digits, terminated by 's'.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - "retry_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The number of attempts that the system will make to run a -job using the exponential backoff procedure described by maxDoublings. -Values greater than 5 and negative values are not allowed.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - }, - }, - }, - "schedule": { - Type: schema.TypeString, - Optional: true, - Description: `Describes the schedule on which the job will be executed.`, - }, - "time_zone": { - Type: schema.TypeString, - Optional: true, - Description: `Specifies the time zone to be used in interpreting schedule. -The value of this field must be a time zone name from the tz database.`, - Default: "Etc/UTC", - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the job.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudSchedulerJobCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandCloudSchedulerJobName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandCloudSchedulerJobDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - scheduleProp, err := expandCloudSchedulerJobSchedule(d.Get("schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule"); !isEmptyValue(reflect.ValueOf(scheduleProp)) && (ok || !reflect.DeepEqual(v, scheduleProp)) { - obj["schedule"] = scheduleProp - } - timeZoneProp, err := expandCloudSchedulerJobTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - pausedProp, err := expandCloudSchedulerJobPaused(d.Get("paused"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("paused"); !isEmptyValue(reflect.ValueOf(pausedProp)) && (ok || !reflect.DeepEqual(v, pausedProp)) { - obj["paused"] = pausedProp - } - attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attempt_deadline"); !isEmptyValue(reflect.ValueOf(attemptDeadlineProp)) && (ok || !reflect.DeepEqual(v, attemptDeadlineProp)) { - obj["attemptDeadline"] = attemptDeadlineProp - } - retryConfigProp, err := expandCloudSchedulerJobRetryConfig(d.Get("retry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_config"); !isEmptyValue(reflect.ValueOf(retryConfigProp)) && (ok || !reflect.DeepEqual(v, retryConfigProp)) { - obj["retryConfig"] = retryConfigProp - } - pubsubTargetProp, err := expandCloudSchedulerJobPubsubTarget(d.Get("pubsub_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_target"); !isEmptyValue(reflect.ValueOf(pubsubTargetProp)) && (ok || !reflect.DeepEqual(v, pubsubTargetProp)) { - obj["pubsubTarget"] = pubsubTargetProp - } - appEngineHttpTargetProp, err := expandCloudSchedulerJobAppEngineHttpTarget(d.Get("app_engine_http_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_http_target"); !isEmptyValue(reflect.ValueOf(appEngineHttpTargetProp)) && (ok || !reflect.DeepEqual(v, appEngineHttpTargetProp)) { - obj["appEngineHttpTarget"] = appEngineHttpTargetProp - } - httpTargetProp, err := expandCloudSchedulerJobHttpTarget(d.Get("http_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_target"); !isEmptyValue(reflect.ValueOf(httpTargetProp)) && (ok || !reflect.DeepEqual(v, httpTargetProp)) { - obj["httpTarget"] = httpTargetProp - } - - obj, err = resourceCloudSchedulerJobEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Job: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Job: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - endpoint := "resume" // Default to enabled - logSuccessMsg := "Job state has been set to ENABLED" - if paused, pausedOk := d.GetOk("paused"); pausedOk && paused.(bool) { - endpoint = "pause" - logSuccessMsg = "Job state has been set to PAUSED" - } - - linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) - url, err = replaceVars(d, config, linkTmpl) - if err != nil { - return err - } - - emptyReqBody := make(map[string]interface{}) - - _, err = SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, emptyReqBody, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) - } - - log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) - - log.Printf("[DEBUG] Finished creating Job %q: %#v", d.Id(), res) - - return resourceCloudSchedulerJobRead(d, meta) -} - -func resourceCloudSchedulerJobRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudSchedulerJob %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - - if err := d.Set("name", flattenCloudSchedulerJobName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("description", flattenCloudSchedulerJobDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("schedule", flattenCloudSchedulerJobSchedule(res["schedule"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("time_zone", flattenCloudSchedulerJobTimeZone(res["timeZone"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("state", flattenCloudSchedulerJobState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("paused", flattenCloudSchedulerJobPaused(res["paused"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("attempt_deadline", flattenCloudSchedulerJobAttemptDeadline(res["attemptDeadline"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("retry_config", flattenCloudSchedulerJobRetryConfig(res["retryConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("pubsub_target", flattenCloudSchedulerJobPubsubTarget(res["pubsubTarget"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("app_engine_http_target", flattenCloudSchedulerJobAppEngineHttpTarget(res["appEngineHttpTarget"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("http_target", flattenCloudSchedulerJobHttpTarget(res["httpTarget"], d, config)); err != nil { - return fmt.Errorf("Error reading Job: %s", err) - } - - return nil -} - -func resourceCloudSchedulerJobUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandCloudSchedulerJobDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - scheduleProp, err := expandCloudSchedulerJobSchedule(d.Get("schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scheduleProp)) { - obj["schedule"] = scheduleProp - } - timeZoneProp, err := expandCloudSchedulerJobTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - pausedProp, err := expandCloudSchedulerJobPaused(d.Get("paused"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("paused"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pausedProp)) { - obj["paused"] = pausedProp - } - attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attempt_deadline"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attemptDeadlineProp)) { - obj["attemptDeadline"] = attemptDeadlineProp - } - retryConfigProp, err := expandCloudSchedulerJobRetryConfig(d.Get("retry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryConfigProp)) { - obj["retryConfig"] = retryConfigProp - } - pubsubTargetProp, err := expandCloudSchedulerJobPubsubTarget(d.Get("pubsub_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubTargetProp)) { - obj["pubsubTarget"] = pubsubTargetProp - } - appEngineHttpTargetProp, err := expandCloudSchedulerJobAppEngineHttpTarget(d.Get("app_engine_http_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_http_target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, appEngineHttpTargetProp)) { - obj["appEngineHttpTarget"] = appEngineHttpTargetProp - } - httpTargetProp, err := expandCloudSchedulerJobHttpTarget(d.Get("http_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpTargetProp)) { - obj["httpTarget"] = httpTargetProp - } - - obj, err = resourceCloudSchedulerJobUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Job %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Job %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Job %q: %#v", d.Id(), res) - } - - if d.HasChange("paused") { - endpoint := "resume" // Default to enabled - logSuccessMsg := "Job state has been set to ENABLED" - if paused, pausedOk := d.GetOk("paused"); pausedOk { - if paused.(bool) { - endpoint = "pause" - logSuccessMsg = "Job state has been set to PAUSED" - } - } - - linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) - url, err = replaceVars(d, config, linkTmpl) - if err != nil { - return err - } - - emptyReqBody := make(map[string]interface{}) - - _, err = SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, emptyReqBody, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) - } - - log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) - } - return resourceCloudSchedulerJobRead(d, meta) -} - -func resourceCloudSchedulerJobDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Job %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Job") - } - - log.Printf("[DEBUG] Finished deleting Job %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudSchedulerJobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudSchedulerJobName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenCloudSchedulerJobDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobPaused(v interface{}, d *schema.ResourceData, config *Config) interface{} { - state := d.Get("state") - if state == "PAUSED" { - return true - } - if state == "ENABLED" { - return false - } - return false // Job has an error state that's not paused or enabled -} - -func flattenCloudSchedulerJobAttemptDeadline(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobRetryConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["retry_count"] = - flattenCloudSchedulerJobRetryConfigRetryCount(original["retryCount"], d, config) - transformed["max_retry_duration"] = - flattenCloudSchedulerJobRetryConfigMaxRetryDuration(original["maxRetryDuration"], d, config) - transformed["min_backoff_duration"] = - flattenCloudSchedulerJobRetryConfigMinBackoffDuration(original["minBackoffDuration"], d, config) - transformed["max_backoff_duration"] = - flattenCloudSchedulerJobRetryConfigMaxBackoffDuration(original["maxBackoffDuration"], d, config) - transformed["max_doublings"] = - flattenCloudSchedulerJobRetryConfigMaxDoublings(original["maxDoublings"], d, config) - return []interface{}{transformed} -} -func flattenCloudSchedulerJobRetryConfigRetryCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudSchedulerJobRetryConfigMaxRetryDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobRetryConfigMinBackoffDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobRetryConfigMaxBackoffDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobRetryConfigMaxDoublings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudSchedulerJobPubsubTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic_name"] = - flattenCloudSchedulerJobPubsubTargetTopicName(original["topicName"], d, config) - transformed["data"] = - flattenCloudSchedulerJobPubsubTargetData(original["data"], d, config) - transformed["attributes"] = - flattenCloudSchedulerJobPubsubTargetAttributes(original["attributes"], d, config) - return []interface{}{transformed} -} -func flattenCloudSchedulerJobPubsubTargetTopicName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobPubsubTargetData(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobPubsubTargetAttributes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobAppEngineHttpTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_method"] = - flattenCloudSchedulerJobAppEngineHttpTargetHttpMethod(original["httpMethod"], d, config) - transformed["app_engine_routing"] = - flattenCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(original["appEngineRouting"], d, config) - transformed["relative_uri"] = - flattenCloudSchedulerJobAppEngineHttpTargetRelativeUri(original["relativeUri"], d, config) - transformed["body"] = - flattenCloudSchedulerJobAppEngineHttpTargetBody(original["body"], d, config) - transformed["headers"] = - flattenCloudSchedulerJobAppEngineHttpTargetHeaders(original["headers"], d, config) - return []interface{}{transformed} -} -func flattenCloudSchedulerJobAppEngineHttpTargetHttpMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -// An `appEngineRouting` in API response is useless, so we set config values rather than api response to state. -func flattenCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - if stateV, ok := d.GetOk("app_engine_http_target"); ok && len(stateV.([]interface{})) > 0 { - return d.Get("app_engine_http_target.0.app_engine_routing") - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service"] = original["service"] - transformed["version"] = original["version"] - transformed["instance"] = original["instance"] - return []interface{}{transformed} -} - -func flattenCloudSchedulerJobAppEngineHttpTargetRelativeUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobAppEngineHttpTargetBody(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobAppEngineHttpTargetHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - var headers = v.(map[string]interface{}) - if v, ok := headers["User-Agent"]; ok { - if v.(string) == "AppEngine-Google; (+http://code.google.com/appengine)" { - delete(headers, "User-Agent") - } else if v.(string) == "Google-Cloud-Scheduler" { - delete(headers, "User-Agent") - } else { - headers["User-Agent"] = strings.TrimSpace(strings.Replace(v.(string), "AppEngine-Google; (+http://code.google.com/appengine)", "", -1)) - } - } - if v, ok := headers["Content-Type"]; ok { - if v.(string) == "application/octet-stream" { - delete(headers, "Content-Type") - } - } - r := regexp.MustCompile(`(X-Google-|X-AppEngine-|Content-Length).*`) - for key := range headers { - if r.MatchString(key) { - delete(headers, key) - } - } - return headers -} - -func flattenCloudSchedulerJobHttpTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["uri"] = - flattenCloudSchedulerJobHttpTargetUri(original["uri"], d, config) - transformed["http_method"] = - flattenCloudSchedulerJobHttpTargetHttpMethod(original["httpMethod"], d, config) - transformed["body"] = - flattenCloudSchedulerJobHttpTargetBody(original["body"], d, config) - transformed["headers"] = - flattenCloudSchedulerJobHttpTargetHeaders(original["headers"], d, config) - transformed["oauth_token"] = - flattenCloudSchedulerJobHttpTargetOauthToken(original["oauthToken"], d, config) - transformed["oidc_token"] = - flattenCloudSchedulerJobHttpTargetOidcToken(original["oidcToken"], d, config) - return []interface{}{transformed} -} -func flattenCloudSchedulerJobHttpTargetUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetHttpMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetBody(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - var headers = v.(map[string]interface{}) - if v, ok := headers["User-Agent"]; ok { - if v.(string) == "AppEngine-Google; (+http://code.google.com/appengine)" { - delete(headers, "User-Agent") - } else if v.(string) == "Google-Cloud-Scheduler" { - delete(headers, "User-Agent") - } else { - headers["User-Agent"] = strings.TrimSpace(strings.Replace(v.(string), "AppEngine-Google; (+http://code.google.com/appengine)", "", -1)) - } - } - if v, ok := headers["Content-Type"]; ok { - if v.(string) == "application/octet-stream" { - delete(headers, "Content-Type") - } - } - r := regexp.MustCompile(`(X-Google-|X-AppEngine-|Content-Length).*`) - for key := range headers { - if r.MatchString(key) { - delete(headers, key) - } - } - return headers -} - -func flattenCloudSchedulerJobHttpTargetOauthToken(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service_account_email"] = - flattenCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) - transformed["scope"] = - flattenCloudSchedulerJobHttpTargetOauthTokenScope(original["scope"], d, config) - return []interface{}{transformed} -} -func flattenCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetOauthTokenScope(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetOidcToken(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service_account_email"] = - flattenCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) - transformed["audience"] = - flattenCloudSchedulerJobHttpTargetOidcTokenAudience(original["audience"], d, config) - return []interface{}{transformed} -} -func flattenCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetOidcTokenAudience(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudSchedulerJobName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") -} - -func expandCloudSchedulerJobDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobPaused(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAttemptDeadline(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRetryCount, err := expandCloudSchedulerJobRetryConfigRetryCount(original["retry_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRetryCount); val.IsValid() && !isEmptyValue(val) { - transformed["retryCount"] = transformedRetryCount - } - - transformedMaxRetryDuration, err := expandCloudSchedulerJobRetryConfigMaxRetryDuration(original["max_retry_duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxRetryDuration); val.IsValid() && !isEmptyValue(val) { - transformed["maxRetryDuration"] = transformedMaxRetryDuration - } - - transformedMinBackoffDuration, err := expandCloudSchedulerJobRetryConfigMinBackoffDuration(original["min_backoff_duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinBackoffDuration); val.IsValid() && !isEmptyValue(val) { - transformed["minBackoffDuration"] = transformedMinBackoffDuration - } - - transformedMaxBackoffDuration, err := expandCloudSchedulerJobRetryConfigMaxBackoffDuration(original["max_backoff_duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxBackoffDuration); val.IsValid() && !isEmptyValue(val) { - transformed["maxBackoffDuration"] = transformedMaxBackoffDuration - } - - transformedMaxDoublings, err := expandCloudSchedulerJobRetryConfigMaxDoublings(original["max_doublings"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxDoublings); val.IsValid() && !isEmptyValue(val) { - transformed["maxDoublings"] = transformedMaxDoublings - } - - return transformed, nil -} - -func expandCloudSchedulerJobRetryConfigRetryCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfigMaxRetryDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfigMinBackoffDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfigMaxBackoffDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfigMaxDoublings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobPubsubTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopicName, err := expandCloudSchedulerJobPubsubTargetTopicName(original["topic_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTopicName); val.IsValid() && !isEmptyValue(val) { - transformed["topicName"] = transformedTopicName - } - - transformedData, err := expandCloudSchedulerJobPubsubTargetData(original["data"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedData); val.IsValid() && !isEmptyValue(val) { - transformed["data"] = transformedData - } - - transformedAttributes, err := expandCloudSchedulerJobPubsubTargetAttributes(original["attributes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAttributes); val.IsValid() && !isEmptyValue(val) { - transformed["attributes"] = transformedAttributes - } - - return transformed, nil -} - -func expandCloudSchedulerJobPubsubTargetTopicName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobPubsubTargetData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobPubsubTargetAttributes(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudSchedulerJobAppEngineHttpTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpMethod, err := expandCloudSchedulerJobAppEngineHttpTargetHttpMethod(original["http_method"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHttpMethod); val.IsValid() && !isEmptyValue(val) { - transformed["httpMethod"] = transformedHttpMethod - } - - transformedAppEngineRouting, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(original["app_engine_routing"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAppEngineRouting); val.IsValid() && !isEmptyValue(val) { - transformed["appEngineRouting"] = transformedAppEngineRouting - } - - transformedRelativeUri, err := expandCloudSchedulerJobAppEngineHttpTargetRelativeUri(original["relative_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRelativeUri); val.IsValid() && !isEmptyValue(val) { - transformed["relativeUri"] = transformedRelativeUri - } - - transformedBody, err := expandCloudSchedulerJobAppEngineHttpTargetBody(original["body"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBody); val.IsValid() && !isEmptyValue(val) { - transformed["body"] = transformedBody - } - - transformedHeaders, err := expandCloudSchedulerJobAppEngineHttpTargetHeaders(original["headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["headers"] = transformedHeaders - } - - return transformed, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetHttpMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedVersion, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedInstance, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingInstance(original["instance"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !isEmptyValue(val) { - transformed["instance"] = transformedInstance - } - - return transformed, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetRelativeUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetBody(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudSchedulerJobHttpTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUri, err := expandCloudSchedulerJobHttpTargetUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedHttpMethod, err := expandCloudSchedulerJobHttpTargetHttpMethod(original["http_method"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHttpMethod); val.IsValid() && !isEmptyValue(val) { - transformed["httpMethod"] = transformedHttpMethod - } - - transformedBody, err := expandCloudSchedulerJobHttpTargetBody(original["body"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBody); val.IsValid() && !isEmptyValue(val) { - transformed["body"] = transformedBody - } - - transformedHeaders, err := expandCloudSchedulerJobHttpTargetHeaders(original["headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["headers"] = transformedHeaders - } - - transformedOauthToken, err := expandCloudSchedulerJobHttpTargetOauthToken(original["oauth_token"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOauthToken); val.IsValid() && !isEmptyValue(val) { - transformed["oauthToken"] = transformedOauthToken - } - - transformedOidcToken, err := expandCloudSchedulerJobHttpTargetOidcToken(original["oidc_token"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOidcToken); val.IsValid() && !isEmptyValue(val) { - transformed["oidcToken"] = transformedOidcToken - } - - return transformed, nil -} - -func expandCloudSchedulerJobHttpTargetUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetHttpMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetBody(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudSchedulerJobHttpTargetOauthToken(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceAccountEmail, err := expandCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountEmail"] = transformedServiceAccountEmail - } - - transformedScope, err := expandCloudSchedulerJobHttpTargetOauthTokenScope(original["scope"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedScope); val.IsValid() && !isEmptyValue(val) { - transformed["scope"] = transformedScope - } - - return transformed, nil -} - -func expandCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetOauthTokenScope(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetOidcToken(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceAccountEmail, err := expandCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountEmail"] = transformedServiceAccountEmail - } - - transformedAudience, err := expandCloudSchedulerJobHttpTargetOidcTokenAudience(original["audience"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAudience); val.IsValid() && !isEmptyValue(val) { - transformed["audience"] = transformedAudience - } - - return transformed, nil -} - -func expandCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetOidcTokenAudience(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudSchedulerJobEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "paused") // Field doesn't exist in API - return obj, nil -} - -func resourceCloudSchedulerJobUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "paused") // Field doesn't exist in API - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_tasks_queue.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_tasks_queue.go deleted file mode 100644 index 893dbd6c3d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_tasks_queue.go +++ /dev/null @@ -1,871 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func suppressOmittedMaxDuration(_, old, new string, _ *schema.ResourceData) bool { - if old == "" && new == "0s" { - log.Printf("[INFO] max retry is 0s and api omitted field, suppressing diff") - return true - } - return false -} - -func ResourceCloudTasksQueue() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudTasksQueueCreate, - Read: resourceCloudTasksQueueRead, - Update: resourceCloudTasksQueueUpdate, - Delete: resourceCloudTasksQueueDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudTasksQueueImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the queue`, - }, - "app_engine_routing_override": { - Type: schema.TypeList, - Optional: true, - Description: `Overrides for task-level appEngineRouting. These settings apply only -to App Engine tasks in this queue`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Optional: true, - Description: `App instance. - -By default, the task is sent to an instance which is available when the task is attempted.`, - }, - "service": { - Type: schema.TypeString, - Optional: true, - Description: `App service. - -By default, the task is sent to the service which is the default service when the task is attempted.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `App version. - -By default, the task is sent to the version which is the default version when the task is attempted.`, - }, - "host": { - Type: schema.TypeString, - Computed: true, - Description: `The host that the task is sent to.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The queue name.`, - }, - "rate_limits": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Rate limits for task dispatches. - -The queue's actual dispatch rate is the result of: - -* Number of tasks in the queue -* User-specified throttling: rateLimits, retryConfig, and the queue's state. -* System throttling due to 429 (Too Many Requests) or 503 (Service - Unavailable) responses from the worker, high error rates, or to - smooth sudden large traffic spikes.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_concurrent_dispatches": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The maximum number of concurrent tasks that Cloud Tasks allows to -be dispatched for this queue. After this threshold has been -reached, Cloud Tasks stops dispatching tasks until the number of -concurrent requests decreases.`, - }, - "max_dispatches_per_second": { - Type: schema.TypeFloat, - Computed: true, - Optional: true, - Description: `The maximum rate at which tasks are dispatched from this queue. - -If unspecified when the queue is created, Cloud Tasks will pick the default.`, - }, - "max_burst_size": { - Type: schema.TypeInt, - Computed: true, - Description: `The max burst size. - -Max burst size limits how fast tasks in queue are processed when many tasks are -in the queue and the rate is high. This field allows the queue to have a high -rate so processing starts shortly after a task is enqueued, but still limits -resource usage when many tasks are enqueued in a short period of time.`, - }, - }, - }, - }, - "retry_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Settings that determine the retry behavior.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_attempts": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Number of attempts per task. - -Cloud Tasks will attempt the task maxAttempts times (that is, if -the first attempt fails, then there will be maxAttempts - 1 -retries). Must be >= -1. - -If unspecified when the queue is created, Cloud Tasks will pick -the default. - --1 indicates unlimited attempts.`, - }, - "max_backoff": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `A task will be scheduled for retry between minBackoff and -maxBackoff duration after it fails, if the queue's RetryConfig -specifies that the task should be retried.`, - }, - "max_doublings": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The time between retries will double maxDoublings times. - -A task's retry interval starts at minBackoff, then doubles maxDoublings times, -then increases linearly, and finally retries retries at intervals of maxBackoff -up to maxAttempts times.`, - }, - "max_retry_duration": { - Type: schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: suppressOmittedMaxDuration, - Description: `If positive, maxRetryDuration specifies the time limit for -retrying a failed task, measured from when the task was first -attempted. Once maxRetryDuration time has passed and the task has -been attempted maxAttempts times, no further attempts will be -made and the task will be deleted. - -If zero, then the task age is unlimited.`, - }, - "min_backoff": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `A task will be scheduled for retry between minBackoff and -maxBackoff duration after it fails, if the queue's RetryConfig -specifies that the task should be retried.`, - }, - }, - }, - }, - "stackdriver_logging_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options for writing logs to Stackdriver Logging.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sampling_ratio": { - Type: schema.TypeFloat, - Required: true, - Description: `Specifies the fraction of operations to write to Stackdriver Logging. -This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is the -default and means that no operations are logged.`, - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudTasksQueueCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandCloudTasksQueueName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - appEngineRoutingOverrideProp, err := expandCloudTasksQueueAppEngineRoutingOverride(d.Get("app_engine_routing_override"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_routing_override"); !isEmptyValue(reflect.ValueOf(appEngineRoutingOverrideProp)) && (ok || !reflect.DeepEqual(v, appEngineRoutingOverrideProp)) { - obj["appEngineRoutingOverride"] = appEngineRoutingOverrideProp - } - rateLimitsProp, err := expandCloudTasksQueueRateLimits(d.Get("rate_limits"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rate_limits"); !isEmptyValue(reflect.ValueOf(rateLimitsProp)) && (ok || !reflect.DeepEqual(v, rateLimitsProp)) { - obj["rateLimits"] = rateLimitsProp - } - retryConfigProp, err := expandCloudTasksQueueRetryConfig(d.Get("retry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_config"); !isEmptyValue(reflect.ValueOf(retryConfigProp)) && (ok || !reflect.DeepEqual(v, retryConfigProp)) { - obj["retryConfig"] = retryConfigProp - } - stackdriverLoggingConfigProp, err := expandCloudTasksQueueStackdriverLoggingConfig(d.Get("stackdriver_logging_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stackdriver_logging_config"); !isEmptyValue(reflect.ValueOf(stackdriverLoggingConfigProp)) && (ok || !reflect.DeepEqual(v, stackdriverLoggingConfigProp)) { - obj["stackdriverLoggingConfig"] = stackdriverLoggingConfigProp - } - - url, err := replaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Queue: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Queue: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Queue: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Queue %q: %#v", d.Id(), res) - - return resourceCloudTasksQueueRead(d, meta) -} - -func resourceCloudTasksQueueRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Queue: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudTasksQueue %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Queue: %s", err) - } - - if err := d.Set("name", flattenCloudTasksQueueName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Queue: %s", err) - } - if err := d.Set("app_engine_routing_override", flattenCloudTasksQueueAppEngineRoutingOverride(res["appEngineRoutingOverride"], d, config)); err != nil { - return fmt.Errorf("Error reading Queue: %s", err) - } - if err := d.Set("rate_limits", flattenCloudTasksQueueRateLimits(res["rateLimits"], d, config)); err != nil { - return fmt.Errorf("Error reading Queue: %s", err) - } - if err := d.Set("retry_config", flattenCloudTasksQueueRetryConfig(res["retryConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Queue: %s", err) - } - if err := d.Set("stackdriver_logging_config", flattenCloudTasksQueueStackdriverLoggingConfig(res["stackdriverLoggingConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Queue: %s", err) - } - - return nil -} - -func resourceCloudTasksQueueUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Queue: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - appEngineRoutingOverrideProp, err := expandCloudTasksQueueAppEngineRoutingOverride(d.Get("app_engine_routing_override"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_routing_override"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, appEngineRoutingOverrideProp)) { - obj["appEngineRoutingOverride"] = appEngineRoutingOverrideProp - } - rateLimitsProp, err := expandCloudTasksQueueRateLimits(d.Get("rate_limits"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rate_limits"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rateLimitsProp)) { - obj["rateLimits"] = rateLimitsProp - } - retryConfigProp, err := expandCloudTasksQueueRetryConfig(d.Get("retry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryConfigProp)) { - obj["retryConfig"] = retryConfigProp - } - stackdriverLoggingConfigProp, err := expandCloudTasksQueueStackdriverLoggingConfig(d.Get("stackdriver_logging_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stackdriver_logging_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stackdriverLoggingConfigProp)) { - obj["stackdriverLoggingConfig"] = stackdriverLoggingConfigProp - } - - url, err := replaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Queue %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("app_engine_routing_override") { - updateMask = append(updateMask, "appEngineRoutingOverride") - } - - if d.HasChange("rate_limits") { - updateMask = append(updateMask, "rateLimits") - } - - if d.HasChange("retry_config") { - updateMask = append(updateMask, "retryConfig") - } - - if d.HasChange("stackdriver_logging_config") { - updateMask = append(updateMask, "stackdriverLoggingConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Queue %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Queue %q: %#v", d.Id(), res) - } - - return resourceCloudTasksQueueRead(d, meta) -} - -func resourceCloudTasksQueueDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Queue: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Queue %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Queue") - } - - log.Printf("[DEBUG] Finished deleting Queue %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudTasksQueueImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudTasksQueueName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -// service, version, and instance are input-only. host is output-only. -func flattenCloudTasksQueueAppEngineRoutingOverride(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = original["host"] - if override, ok := d.GetOk("app_engine_routing_override"); ok && len(override.([]interface{})) > 0 { - transformed["service"] = d.Get("app_engine_routing_override.0.service") - transformed["version"] = d.Get("app_engine_routing_override.0.version") - transformed["instance"] = d.Get("app_engine_routing_override.0.instance") - } - return []interface{}{transformed} -} - -func flattenCloudTasksQueueRateLimits(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_dispatches_per_second"] = - flattenCloudTasksQueueRateLimitsMaxDispatchesPerSecond(original["maxDispatchesPerSecond"], d, config) - transformed["max_concurrent_dispatches"] = - flattenCloudTasksQueueRateLimitsMaxConcurrentDispatches(original["maxConcurrentDispatches"], d, config) - transformed["max_burst_size"] = - flattenCloudTasksQueueRateLimitsMaxBurstSize(original["maxBurstSize"], d, config) - return []interface{}{transformed} -} -func flattenCloudTasksQueueRateLimitsMaxDispatchesPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudTasksQueueRateLimitsMaxConcurrentDispatches(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudTasksQueueRateLimitsMaxBurstSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudTasksQueueRetryConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_attempts"] = - flattenCloudTasksQueueRetryConfigMaxAttempts(original["maxAttempts"], d, config) - transformed["max_retry_duration"] = - flattenCloudTasksQueueRetryConfigMaxRetryDuration(original["maxRetryDuration"], d, config) - transformed["min_backoff"] = - flattenCloudTasksQueueRetryConfigMinBackoff(original["minBackoff"], d, config) - transformed["max_backoff"] = - flattenCloudTasksQueueRetryConfigMaxBackoff(original["maxBackoff"], d, config) - transformed["max_doublings"] = - flattenCloudTasksQueueRetryConfigMaxDoublings(original["maxDoublings"], d, config) - return []interface{}{transformed} -} -func flattenCloudTasksQueueRetryConfigMaxAttempts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudTasksQueueRetryConfigMaxRetryDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudTasksQueueRetryConfigMinBackoff(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudTasksQueueRetryConfigMaxBackoff(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudTasksQueueRetryConfigMaxDoublings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudTasksQueueStackdriverLoggingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["sampling_ratio"] = - flattenCloudTasksQueueStackdriverLoggingConfigSamplingRatio(original["samplingRatio"], d, config) - return []interface{}{transformed} -} -func flattenCloudTasksQueueStackdriverLoggingConfigSamplingRatio(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudTasksQueueName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") -} - -func expandCloudTasksQueueAppEngineRoutingOverride(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandCloudTasksQueueAppEngineRoutingOverrideService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedVersion, err := expandCloudTasksQueueAppEngineRoutingOverrideVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedInstance, err := expandCloudTasksQueueAppEngineRoutingOverrideInstance(original["instance"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !isEmptyValue(val) { - transformed["instance"] = transformedInstance - } - - transformedHost, err := expandCloudTasksQueueAppEngineRoutingOverrideHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - return transformed, nil -} - -func expandCloudTasksQueueAppEngineRoutingOverrideService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueAppEngineRoutingOverrideVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueAppEngineRoutingOverrideInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueAppEngineRoutingOverrideHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRateLimits(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxDispatchesPerSecond, err := expandCloudTasksQueueRateLimitsMaxDispatchesPerSecond(original["max_dispatches_per_second"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxDispatchesPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["maxDispatchesPerSecond"] = transformedMaxDispatchesPerSecond - } - - transformedMaxConcurrentDispatches, err := expandCloudTasksQueueRateLimitsMaxConcurrentDispatches(original["max_concurrent_dispatches"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxConcurrentDispatches); val.IsValid() && !isEmptyValue(val) { - transformed["maxConcurrentDispatches"] = transformedMaxConcurrentDispatches - } - - transformedMaxBurstSize, err := expandCloudTasksQueueRateLimitsMaxBurstSize(original["max_burst_size"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxBurstSize); val.IsValid() && !isEmptyValue(val) { - transformed["maxBurstSize"] = transformedMaxBurstSize - } - - return transformed, nil -} - -func expandCloudTasksQueueRateLimitsMaxDispatchesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRateLimitsMaxConcurrentDispatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRateLimitsMaxBurstSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxAttempts, err := expandCloudTasksQueueRetryConfigMaxAttempts(original["max_attempts"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxAttempts); val.IsValid() && !isEmptyValue(val) { - transformed["maxAttempts"] = transformedMaxAttempts - } - - transformedMaxRetryDuration, err := expandCloudTasksQueueRetryConfigMaxRetryDuration(original["max_retry_duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxRetryDuration); val.IsValid() && !isEmptyValue(val) { - transformed["maxRetryDuration"] = transformedMaxRetryDuration - } - - transformedMinBackoff, err := expandCloudTasksQueueRetryConfigMinBackoff(original["min_backoff"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinBackoff); val.IsValid() && !isEmptyValue(val) { - transformed["minBackoff"] = transformedMinBackoff - } - - transformedMaxBackoff, err := expandCloudTasksQueueRetryConfigMaxBackoff(original["max_backoff"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxBackoff); val.IsValid() && !isEmptyValue(val) { - transformed["maxBackoff"] = transformedMaxBackoff - } - - transformedMaxDoublings, err := expandCloudTasksQueueRetryConfigMaxDoublings(original["max_doublings"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxDoublings); val.IsValid() && !isEmptyValue(val) { - transformed["maxDoublings"] = transformedMaxDoublings - } - - return transformed, nil -} - -func expandCloudTasksQueueRetryConfigMaxAttempts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfigMaxRetryDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfigMinBackoff(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfigMaxBackoff(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfigMaxDoublings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueStackdriverLoggingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSamplingRatio, err := expandCloudTasksQueueStackdriverLoggingConfigSamplingRatio(original["sampling_ratio"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSamplingRatio); val.IsValid() && !isEmptyValue(val) { - transformed["samplingRatio"] = transformedSamplingRatio - } - - return transformed, nil -} - -func expandCloudTasksQueueStackdriverLoggingConfigSamplingRatio(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_bitbucket_server_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_bitbucket_server_config.go deleted file mode 100644 index 94ea7a09fb..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_bitbucket_server_config.go +++ /dev/null @@ -1,846 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudBuildBitbucketServerConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudBuildBitbucketServerConfigCreate, - Read: resourceCloudBuildBitbucketServerConfigRead, - Update: resourceCloudBuildBitbucketServerConfigUpdate, - Delete: resourceCloudBuildBitbucketServerConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudBuildBitbucketServerConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "api_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Immutable. API Key that will be attached to webhook. Once this field has been set, it cannot be changed. -Changing this field will result in deleting/ recreating the resource.`, - }, - "config_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID to use for the BitbucketServerConfig, which will become the final component of the BitbucketServerConfig's resource name.`, - }, - "host_uri": { - Type: schema.TypeString, - Required: true, - Description: `Immutable. The URI of the Bitbucket Server host. Once this field has been set, it cannot be changed. -If you need to change it, please create another BitbucketServerConfig.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of this bitbucket server config.`, - }, - "secrets": { - Type: schema.TypeList, - Required: true, - Description: `Secret Manager secrets needed by the config.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "admin_access_token_version_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name for the admin access token's secret version.`, - }, - "read_access_token_version_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name for the read access token's secret version.`, - }, - "webhook_secret_version_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Immutable. The resource name for the webhook secret's secret version. Once this field has been set, it cannot be changed. -Changing this field will result in deleting/ recreating the resource.`, - }, - }, - }, - }, - "username": { - Type: schema.TypeString, - Required: true, - Description: `Username of the account Cloud Build will use on Bitbucket Server.`, - }, - "connected_repositories": { - Type: schema.TypeSet, - Optional: true, - Description: `Connected Bitbucket Server repositories for this config.`, - Elem: cloudbuildBitbucketServerConfigConnectedRepositoriesSchema(), - // Default schema.HashSchema is used. - }, - "peered_network": { - Type: schema.TypeString, - Optional: true, - Description: `The network to be used when reaching out to the Bitbucket Server instance. The VPC network must be enabled for private service connection. -This should be set if the Bitbucket Server instance is hosted on-premises and not reachable by public internet. If this field is left empty, -no network peering will occur and calls to the Bitbucket Server instance will be made over the public internet. Must be in the format -projects/{project}/global/networks/{network}, where {project} is a project number or id and {network} is the name of a VPC network in the project.`, - }, - "ssl_ca": { - Type: schema.TypeString, - Optional: true, - Description: `SSL certificate to use for requests to Bitbucket Server. The format should be PEM format but the extension can be one of .pem, .cer, or .crt.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name for the config.`, - }, - "webhook_key": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. UUID included in webhook requests. The UUID is used to look up the corresponding config.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func cloudbuildBitbucketServerConfigConnectedRepositoriesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "project_key": { - Type: schema.TypeString, - Required: true, - Description: `Identifier for the project storing the repository.`, - }, - "repo_slug": { - Type: schema.TypeString, - Required: true, - Description: `Identifier for the repository.`, - }, - }, - } -} - -func resourceCloudBuildBitbucketServerConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - hostUriProp, err := expandCloudBuildBitbucketServerConfigHostUri(d.Get("host_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host_uri"); !isEmptyValue(reflect.ValueOf(hostUriProp)) && (ok || !reflect.DeepEqual(v, hostUriProp)) { - obj["hostUri"] = hostUriProp - } - secretsProp, err := expandCloudBuildBitbucketServerConfigSecrets(d.Get("secrets"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secrets"); !isEmptyValue(reflect.ValueOf(secretsProp)) && (ok || !reflect.DeepEqual(v, secretsProp)) { - obj["secrets"] = secretsProp - } - usernameProp, err := expandCloudBuildBitbucketServerConfigUsername(d.Get("username"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("username"); !isEmptyValue(reflect.ValueOf(usernameProp)) && (ok || !reflect.DeepEqual(v, usernameProp)) { - obj["username"] = usernameProp - } - apiKeyProp, err := expandCloudBuildBitbucketServerConfigApiKey(d.Get("api_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("api_key"); !isEmptyValue(reflect.ValueOf(apiKeyProp)) && (ok || !reflect.DeepEqual(v, apiKeyProp)) { - obj["apiKey"] = apiKeyProp - } - connectedRepositoriesProp, err := expandCloudBuildBitbucketServerConfigConnectedRepositories(d.Get("connected_repositories"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connected_repositories"); !isEmptyValue(reflect.ValueOf(connectedRepositoriesProp)) && (ok || !reflect.DeepEqual(v, connectedRepositoriesProp)) { - obj["connectedRepositories"] = connectedRepositoriesProp - } - peeredNetworkProp, err := expandCloudBuildBitbucketServerConfigPeeredNetwork(d.Get("peered_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peered_network"); !isEmptyValue(reflect.ValueOf(peeredNetworkProp)) && (ok || !reflect.DeepEqual(v, peeredNetworkProp)) { - obj["peeredNetwork"] = peeredNetworkProp - } - sslCaProp, err := expandCloudBuildBitbucketServerConfigSslCa(d.Get("ssl_ca"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_ca"); !isEmptyValue(reflect.ValueOf(sslCaProp)) && (ok || !reflect.DeepEqual(v, sslCaProp)) { - obj["sslCa"] = sslCaProp - } - - obj, err = resourceCloudBuildBitbucketServerConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs?bitbucketServerConfigId={{config_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new BitbucketServerConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BitbucketServerConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating BitbucketServerConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = CloudBuildOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating BitbucketServerConfig", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create BitbucketServerConfig: %s", err) - } - - if err := d.Set("name", flattenCloudBuildBitbucketServerConfigName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating BitbucketServerConfig without connected repos: %q: %#v", d.Id(), res) - - if v, ok := d.GetOkExists("connected_repositories"); !isEmptyValue(reflect.ValueOf(connectedRepositoriesProp)) && (ok || !reflect.DeepEqual(v, connectedRepositoriesProp)) { - connectedReposPropArray, ok := connectedRepositoriesProp.([]interface{}) - if !ok { - return fmt.Errorf("Error reading connected_repositories") - } - - requests := make([]interface{}, len(connectedReposPropArray)) - for i := 0; i < len(connectedReposPropArray); i++ { - connectedRepo := make(map[string]interface{}) - connectedRepo["parent"] = id - connectedRepo["repo"] = connectedReposPropArray[i] - - connectedRepoRequest := make(map[string]interface{}) - connectedRepoRequest["parent"] = id - connectedRepoRequest["bitbucketServerConnectedRepository"] = connectedRepo - - requests[i] = connectedRepoRequest - } - obj = make(map[string]interface{}) - obj["requests"] = requests - - url, err = replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}/connectedRepositories:batchCreate") - if err != nil { - return err - } - - res, err = SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating connected_repositories: %s", err) - } - - err = CloudBuildOperationWaitTime( - config, res, project, "Creating connected_repositories on BitbucketServerConfig", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error waiting to create connected_repositories: %s", err) - } - } else { - log.Printf("[DEBUG] No connected repositories found to create: %#v", connectedRepositoriesProp) - } - - log.Printf("[DEBUG] Finished creating BitbucketServerConfig %q: %#v", d.Id(), res) - - return resourceCloudBuildBitbucketServerConfigRead(d, meta) -} - -func resourceCloudBuildBitbucketServerConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BitbucketServerConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudBuildBitbucketServerConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - - if err := d.Set("name", flattenCloudBuildBitbucketServerConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - if err := d.Set("host_uri", flattenCloudBuildBitbucketServerConfigHostUri(res["hostUri"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - if err := d.Set("secrets", flattenCloudBuildBitbucketServerConfigSecrets(res["secrets"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - if err := d.Set("username", flattenCloudBuildBitbucketServerConfigUsername(res["username"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - if err := d.Set("webhook_key", flattenCloudBuildBitbucketServerConfigWebhookKey(res["webhookKey"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - if err := d.Set("api_key", flattenCloudBuildBitbucketServerConfigApiKey(res["apiKey"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - if err := d.Set("connected_repositories", flattenCloudBuildBitbucketServerConfigConnectedRepositories(res["connectedRepositories"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - if err := d.Set("peered_network", flattenCloudBuildBitbucketServerConfigPeeredNetwork(res["peeredNetwork"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - if err := d.Set("ssl_ca", flattenCloudBuildBitbucketServerConfigSslCa(res["sslCa"], d, config)); err != nil { - return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) - } - - return nil -} - -func resourceCloudBuildBitbucketServerConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BitbucketServerConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - hostUriProp, err := expandCloudBuildBitbucketServerConfigHostUri(d.Get("host_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host_uri"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostUriProp)) { - obj["hostUri"] = hostUriProp - } - secretsProp, err := expandCloudBuildBitbucketServerConfigSecrets(d.Get("secrets"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secrets"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, secretsProp)) { - obj["secrets"] = secretsProp - } - usernameProp, err := expandCloudBuildBitbucketServerConfigUsername(d.Get("username"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("username"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, usernameProp)) { - obj["username"] = usernameProp - } - connectedRepositoriesProp, err := expandCloudBuildBitbucketServerConfigConnectedRepositories(d.Get("connected_repositories"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connected_repositories"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, connectedRepositoriesProp)) { - obj["connectedRepositories"] = connectedRepositoriesProp - } - peeredNetworkProp, err := expandCloudBuildBitbucketServerConfigPeeredNetwork(d.Get("peered_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peered_network"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peeredNetworkProp)) { - obj["peeredNetwork"] = peeredNetworkProp - } - sslCaProp, err := expandCloudBuildBitbucketServerConfigSslCa(d.Get("ssl_ca"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_ca"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslCaProp)) { - obj["sslCa"] = sslCaProp - } - - obj, err = resourceCloudBuildBitbucketServerConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating BitbucketServerConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("host_uri") { - updateMask = append(updateMask, "hostUri") - } - - if d.HasChange("secrets") { - updateMask = append(updateMask, "secrets") - } - - if d.HasChange("username") { - updateMask = append(updateMask, "username") - } - - if d.HasChange("connected_repositories") { - updateMask = append(updateMask, "connectedRepositories") - } - - if d.HasChange("peered_network") { - updateMask = append(updateMask, "peeredNetwork") - } - - if d.HasChange("ssl_ca") { - updateMask = append(updateMask, "sslCa") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - // remove connectedRepositories from updateMask - for i, field := range updateMask { - if field == "connectedRepositories" { - updateMask = append(updateMask[:i], updateMask[i+1:]...) - break - } - } - // reconstruct url - url, err = replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") - if err != nil { - return err - } - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating BitbucketServerConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating BitbucketServerConfig %q: %#v", d.Id(), res) - } - - err = CloudBuildOperationWaitTime( - config, res, project, "Updating BitbucketServerConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - if d.HasChange("connected_repositories") { - o, n := d.GetChange("connected_repositories") - oReposSet, ok := o.(*schema.Set) - if !ok { - return fmt.Errorf("Error reading old connected repositories") - } - nReposSet, ok := n.(*schema.Set) - if !ok { - return fmt.Errorf("Error reading new connected repositories") - } - - removeRepos := oReposSet.Difference(nReposSet).List() - createRepos := nReposSet.Difference(oReposSet).List() - - url, err = replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}:removeBitbucketServerConnectedRepository") - if err != nil { - return err - } - - // send remove repo requests. - for _, repo := range removeRepos { - obj := make(map[string]interface{}) - obj["connectedRepository"] = repo - res, err = SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error removing connected_repositories: %s", err) - } - } - - // if repos to create, prepare and send batchCreate request - if len(createRepos) > 0 { - parent, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - var requests []interface{} - for _, repo := range createRepos { - connectedRepo := make(map[string]interface{}) - connectedRepo["parent"] = parent - connectedRepo["repo"] = repo - - connectedRepoRequest := make(map[string]interface{}) - connectedRepoRequest["parent"] = parent - connectedRepoRequest["bitbucketServerConnectedRepository"] = connectedRepo - - requests = append(requests, connectedRepoRequest) - } - obj = make(map[string]interface{}) - obj["requests"] = requests - - url, err = replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}/connectedRepositories:batchCreate") - if err != nil { - return err - } - - res, err = SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating connected_repositories: %s", err) - } - - err = CloudBuildOperationWaitTime( - config, res, project, "Updating connected_repositories on BitbucketServerConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error waiting to create connected_repositories: %s", err) - } - } - } else { - log.Printf("[DEBUG] connected_repositories have no changes") - } - return resourceCloudBuildBitbucketServerConfigRead(d, meta) -} - -func resourceCloudBuildBitbucketServerConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BitbucketServerConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting BitbucketServerConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BitbucketServerConfig") - } - - err = CloudBuildOperationWaitTime( - config, res, project, "Deleting BitbucketServerConfig", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting BitbucketServerConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudBuildBitbucketServerConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/bitbucketServerConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudBuildBitbucketServerConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigHostUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigSecrets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["admin_access_token_version_name"] = - flattenCloudBuildBitbucketServerConfigSecretsAdminAccessTokenVersionName(original["adminAccessTokenVersionName"], d, config) - transformed["read_access_token_version_name"] = - flattenCloudBuildBitbucketServerConfigSecretsReadAccessTokenVersionName(original["readAccessTokenVersionName"], d, config) - transformed["webhook_secret_version_name"] = - flattenCloudBuildBitbucketServerConfigSecretsWebhookSecretVersionName(original["webhookSecretVersionName"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildBitbucketServerConfigSecretsAdminAccessTokenVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigSecretsReadAccessTokenVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigSecretsWebhookSecretVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigUsername(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigWebhookKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigApiKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigConnectedRepositories(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(cloudbuildBitbucketServerConfigConnectedRepositoriesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "project_key": flattenCloudBuildBitbucketServerConfigConnectedRepositoriesProjectKey(original["projectKey"], d, config), - "repo_slug": flattenCloudBuildBitbucketServerConfigConnectedRepositoriesRepoSlug(original["repoSlug"], d, config), - }) - } - return transformed -} -func flattenCloudBuildBitbucketServerConfigConnectedRepositoriesProjectKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigConnectedRepositoriesRepoSlug(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigPeeredNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildBitbucketServerConfigSslCa(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudBuildBitbucketServerConfigHostUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigSecrets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAdminAccessTokenVersionName, err := expandCloudBuildBitbucketServerConfigSecretsAdminAccessTokenVersionName(original["admin_access_token_version_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAdminAccessTokenVersionName); val.IsValid() && !isEmptyValue(val) { - transformed["adminAccessTokenVersionName"] = transformedAdminAccessTokenVersionName - } - - transformedReadAccessTokenVersionName, err := expandCloudBuildBitbucketServerConfigSecretsReadAccessTokenVersionName(original["read_access_token_version_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReadAccessTokenVersionName); val.IsValid() && !isEmptyValue(val) { - transformed["readAccessTokenVersionName"] = transformedReadAccessTokenVersionName - } - - transformedWebhookSecretVersionName, err := expandCloudBuildBitbucketServerConfigSecretsWebhookSecretVersionName(original["webhook_secret_version_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWebhookSecretVersionName); val.IsValid() && !isEmptyValue(val) { - transformed["webhookSecretVersionName"] = transformedWebhookSecretVersionName - } - - return transformed, nil -} - -func expandCloudBuildBitbucketServerConfigSecretsAdminAccessTokenVersionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigSecretsReadAccessTokenVersionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigSecretsWebhookSecretVersionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigApiKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigConnectedRepositories(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectKey, err := expandCloudBuildBitbucketServerConfigConnectedRepositoriesProjectKey(original["project_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectKey); val.IsValid() && !isEmptyValue(val) { - transformed["projectKey"] = transformedProjectKey - } - - transformedRepoSlug, err := expandCloudBuildBitbucketServerConfigConnectedRepositoriesRepoSlug(original["repo_slug"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepoSlug); val.IsValid() && !isEmptyValue(val) { - transformed["repoSlug"] = transformedRepoSlug - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildBitbucketServerConfigConnectedRepositoriesProjectKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigConnectedRepositoriesRepoSlug(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigPeeredNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildBitbucketServerConfigSslCa(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudBuildBitbucketServerConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // connectedRepositories is needed for batchCreate on the config after creation. - delete(obj, "connectedRepositories") - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_trigger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_trigger.go deleted file mode 100644 index eb16d1ee10..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_trigger.go +++ /dev/null @@ -1,5190 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func stepTimeoutCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - buildList := diff.Get("build").([]interface{}) - if len(buildList) == 0 || buildList[0] == nil { - return nil - } - build := buildList[0].(map[string]interface{}) - buildTimeoutString := build["timeout"].(string) - - buildTimeout, err := time.ParseDuration(buildTimeoutString) - if err != nil { - return fmt.Errorf("Error parsing build timeout : %s", err) - } - - var stepTimeoutSum time.Duration = 0 - steps := build["step"].([]interface{}) - for _, rawstep := range steps { - if rawstep == nil { - continue - } - step := rawstep.(map[string]interface{}) - timeoutString := step["timeout"].(string) - if len(timeoutString) == 0 { - continue - } - - timeout, err := time.ParseDuration(timeoutString) - if err != nil { - return fmt.Errorf("Error parsing build step timeout: %s", err) - } - stepTimeoutSum += timeout - } - if stepTimeoutSum > buildTimeout { - return fmt.Errorf("Step timeout sum (%v) cannot be greater than build timeout (%v)", stepTimeoutSum, buildTimeout) - } - return nil -} - -func ResourceCloudBuildTrigger() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudBuildTriggerCreate, - Read: resourceCloudBuildTriggerRead, - Update: resourceCloudBuildTriggerUpdate, - Delete: resourceCloudBuildTriggerDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudBuildTriggerImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - SchemaVersion: 2, - StateUpgraders: []schema.StateUpgrader{ - { - Type: resourceCloudBuildTriggerResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceCloudBuildTriggerUpgradeV0, - Version: 0, - }, - { - Type: resourceCloudBuildTriggerResourceV1().CoreConfigSchema().ImpliedType(), - Upgrade: resourceCloudBuildTriggerUpgradeV1, - Version: 1, - }, - }, - - CustomizeDiff: stepTimeoutCustomizeDiff, - - Schema: map[string]*schema.Schema{ - "approval_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Configuration for manual approval to start a build invocation of this BuildTrigger. -Builds created by this trigger will require approval before they execute. -Any user with a Cloud Build Approver role for the project can approve a build.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "approval_required": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether or not approval is needed. If this is set on a build, it will become pending when run, -and will need to be explicitly approved to start.`, - Default: false, - }, - }, - }, - }, - "bitbucket_server_trigger_config": { - Type: schema.TypeList, - Optional: true, - Description: `BitbucketServerTriggerConfig describes the configuration of a trigger that creates a build whenever a Bitbucket Server event is received.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bitbucket_server_config_resource": { - Type: schema.TypeString, - Required: true, - Description: `The Bitbucket server config resource that this trigger config maps to.`, - }, - "project_key": { - Type: schema.TypeString, - Required: true, - Description: `Key of the project that the repo is in. For example: The key for https://mybitbucket.server/projects/TEST/repos/test-repo is "TEST".`, - }, - "repo_slug": { - Type: schema.TypeString, - Required: true, - Description: `Slug of the repository. A repository slug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL. -For example, if the repository name is 'test repo', in the URL it would become 'test-repo' as in https://mybitbucket.server/projects/TEST/repos/test-repo.`, - }, - "pull_request": { - Type: schema.TypeList, - Optional: true, - Description: `Filter to match changes in pull requests.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "branch": { - Type: schema.TypeString, - Required: true, - Description: `Regex of branches to match. -The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax`, - }, - "comment_control": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}), - Description: `Configure builds to run whether a repository owner or collaborator need to comment /gcbrun. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, branches that do NOT match the git_ref will trigger a build.`, - }, - }, - }, - ExactlyOneOf: []string{"bitbucket_server_trigger_config.0.pull_request", "bitbucket_server_trigger_config.0.push"}, - }, - "push": { - Type: schema.TypeList, - Optional: true, - Description: `Filter to match changes in refs like branches, tags.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "branch": { - Type: schema.TypeString, - Optional: true, - Description: `Regex of branches to match. Specify only one of branch or tag.`, - ExactlyOneOf: []string{"bitbucket_server_trigger_config.0.push.0.branch", "bitbucket_server_trigger_config.0.push.0.tag"}, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `When true, only trigger a build if the revision regex does NOT match the gitRef regex.`, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - Description: `Regex of tags to match. Specify only one of branch or tag.`, - ExactlyOneOf: []string{"bitbucket_server_trigger_config.0.push.0.branch", "bitbucket_server_trigger_config.0.push.0.tag"}, - }, - }, - }, - ExactlyOneOf: []string{"bitbucket_server_trigger_config.0.pull_request", "bitbucket_server_trigger_config.0.push"}, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "build": { - Type: schema.TypeList, - Optional: true, - Description: `Contents of the build template. Either a filename or build template must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "step": { - Type: schema.TypeList, - Required: true, - Description: `The operations to be performed on the workspace.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `The name of the container image that will run this particular build step. - -If the image is available in the host's Docker daemon's cache, it will be -run directly. If not, the host will attempt to pull the image first, using -the builder service account's credentials if necessary. - -The Docker daemon's cache will already have the latest versions of all of -the officially supported build steps (see https://github.com/GoogleCloudPlatform/cloud-builders -for images and examples). -The Docker daemon will also have cached many of the layers for some popular -images, like "ubuntu", "debian", but they will be refreshed at the time -you attempt to use them. - -If you built an image in a previous build step, it will be stored in the -host's Docker daemon's cache and is available to use as the name for a -later build step.`, - }, - "args": { - Type: schema.TypeList, - Optional: true, - Description: `A list of arguments that will be presented to the step when it is started. - -If the image used to run the step's container has an entrypoint, the args -are used as arguments to that entrypoint. If the image does not define an -entrypoint, the first element in args is used as the entrypoint, and the -remainder will be used as arguments.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "dir": { - Type: schema.TypeString, - Optional: true, - Description: `Working directory to use when running this step's container. - -If this value is a relative path, it is relative to the build's working -directory. If this value is absolute, it may be outside the build's working -directory, in which case the contents of the path may not be persisted -across build step executions, unless a 'volume' for that path is specified. - -If the build specifies a 'RepoSource' with 'dir' and a step with a -'dir', -which specifies an absolute path, the 'RepoSource' 'dir' is ignored -for the step's execution.`, - }, - "entrypoint": { - Type: schema.TypeString, - Optional: true, - Description: `Entrypoint to be used instead of the build step image's -default entrypoint. -If unset, the image's default entrypoint is used`, - }, - "env": { - Type: schema.TypeList, - Optional: true, - Description: `A list of environment variable definitions to be used when -running a step. - -The elements are of the form "KEY=VALUE" for the environment variable -"KEY" being given the value "VALUE".`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "id": { - Type: schema.TypeString, - Optional: true, - Description: `Unique identifier for this build step, used in 'wait_for' to -reference this build step as a dependency.`, - }, - "script": { - Type: schema.TypeString, - Optional: true, - Description: `A shell script to be executed in the step. -When script is provided, the user cannot specify the entrypoint or args.`, - }, - "secret_env": { - Type: schema.TypeList, - Optional: true, - Description: `A list of environment variables which are encrypted using -a Cloud Key -Management Service crypto key. These values must be specified in -the build's 'Secret'.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "timeout": { - Type: schema.TypeString, - Optional: true, - Description: `Time limit for executing this build step. If not defined, -the step has no -time limit and will be allowed to continue to run until either it -completes or the build itself times out.`, - }, - "timing": { - Type: schema.TypeString, - Optional: true, - Description: `Output only. Stores timing information for executing this -build step.`, - }, - "volumes": { - Type: schema.TypeList, - Optional: true, - Description: `List of volumes to mount into the build step. - -Each volume is created as an empty volume prior to execution of the -build step. Upon completion of the build, volumes and their contents -are discarded. - -Using a named volume in only one step is not valid as it is -indicative of a build request with an incorrect configuration.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the volume to mount. - -Volume names must be unique per build step and must be valid names for -Docker volumes. Each named volume must be used by at least two build steps.`, - }, - "path": { - Type: schema.TypeString, - Required: true, - Description: `Path at which to mount the volume. - -Paths must be absolute and cannot conflict with other volume paths on -the same build step or with certain reserved volume paths.`, - }, - }, - }, - }, - "wait_for": { - Type: schema.TypeList, - Optional: true, - Description: `The ID(s) of the step(s) that this build step depends on. - -This build step will not start until all the build steps in 'wait_for' -have completed successfully. If 'wait_for' is empty, this build step -will start when all previous build steps in the 'Build.Steps' list -have completed successfully.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "artifacts": { - Type: schema.TypeList, - Optional: true, - Description: `Artifacts produced by the build that should be uploaded upon successful completion of all build steps.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "images": { - Type: schema.TypeList, - Optional: true, - Description: `A list of images to be pushed upon the successful completion of all build steps. - -The images will be pushed using the builder service account's credentials. - -The digests of the pushed images will be stored in the Build resource's results field. - -If any of the images fail to be pushed, the build is marked FAILURE.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "objects": { - Type: schema.TypeList, - Optional: true, - Description: `A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. - -Files in the workspace matching specified paths globs will be uploaded to the -Cloud Storage location using the builder service account's credentials. - -The location and generation of the uploaded objects will be stored in the Build resource's results field. - -If any objects fail to be pushed, the build is marked FAILURE.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Optional: true, - Description: `Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". - -Files in the workspace matching any path pattern will be uploaded to Cloud Storage with -this location as a prefix.`, - }, - "paths": { - Type: schema.TypeList, - Optional: true, - Description: `Path globs used to match files in the build's workspace.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "timing": { - Type: schema.TypeList, - Computed: true, - Description: `Output only. Stores timing information for pushing all artifact objects.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "end_time": { - Type: schema.TypeString, - Optional: true, - Description: `End of time span. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to -nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "start_time": { - Type: schema.TypeString, - Optional: true, - Description: `Start of time span. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to -nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "available_secrets": { - Type: schema.TypeList, - Optional: true, - Description: `Secrets and secret environment variables.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_manager": { - Type: schema.TypeList, - Required: true, - Description: `Pairs a secret environment variable with a SecretVersion in Secret Manager.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "env": { - Type: schema.TypeString, - Required: true, - Description: `Environment variable name to associate with the secret. Secret environment -variables must be unique across all of a build's secrets, and must be used -by at least one build step.`, - }, - "version_name": { - Type: schema.TypeString, - Required: true, - Description: `Resource name of the SecretVersion. In format: projects/*/secrets/*/versions/*`, - }, - }, - }, - }, - }, - }, - }, - "images": { - Type: schema.TypeList, - Optional: true, - Description: `A list of images to be pushed upon the successful completion of all build steps. -The images are pushed using the builder service account's credentials. -The digests of the pushed images will be stored in the Build resource's results field. -If any of the images fail to be pushed, the build status is marked FAILURE.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "logs_bucket": { - Type: schema.TypeString, - Optional: true, - Description: `Google Cloud Storage bucket where logs should be written. -Logs file names will be of the format ${logsBucket}/log-${build_id}.txt.`, - }, - "options": { - Type: schema.TypeList, - Optional: true, - Description: `Special options for this build.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_size_gb": { - Type: schema.TypeInt, - Optional: true, - Description: `Requested disk size for the VM that runs the build. Note that this is NOT "disk free"; -some of the space will be used by the operating system and build utilities. -Also note that this is the minimum disk size that will be allocated for the build -- -the build may run with a larger disk than requested. At present, the maximum disk size -is 1000GB; builds that request more than the maximum are rejected with an error.`, - }, - "dynamic_substitutions": { - Type: schema.TypeBool, - Optional: true, - Description: `Option to specify whether or not to apply bash style string operations to the substitutions. - -NOTE this is always enabled for triggered builds and cannot be overridden in the build configuration file.`, - }, - "env": { - Type: schema.TypeList, - Optional: true, - Description: `A list of global environment variable definitions that will exist for all build steps -in this build. If a variable is defined in both globally and in a build step, -the variable will use the build step value. - -The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE".`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "log_streaming_option": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF", ""}), - Description: `Option to define build log streaming behavior to Google Cloud Storage. Possible values: ["STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF"]`, - }, - "logging": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE", ""}), - Description: `Option to specify the logging mode, which determines if and where build logs are stored. Possible values: ["LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE"]`, - }, - "machine_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32", ""}), - Description: `Compute Engine machine type on which to run the build. Possible values: ["UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32"]`, - }, - "requested_verify_option": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NOT_VERIFIED", "VERIFIED", ""}), - Description: `Requested verifiability options. Possible values: ["NOT_VERIFIED", "VERIFIED"]`, - }, - "secret_env": { - Type: schema.TypeList, - Optional: true, - Description: `A list of global environment variables, which are encrypted using a Cloud Key Management -Service crypto key. These values must be specified in the build's Secret. These variables -will be available to all build steps in this build.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "source_provenance_hash": { - Type: schema.TypeList, - Optional: true, - Description: `Requested hash for SourceProvenance. Possible values: ["NONE", "SHA256", "MD5"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"NONE", "SHA256", "MD5"}), - }, - }, - "substitution_option": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MUST_MATCH", "ALLOW_LOOSE", ""}), - Description: `Option to specify behavior when there is an error in the substitution checks. - -NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden -in the build configuration file. Possible values: ["MUST_MATCH", "ALLOW_LOOSE"]`, - }, - "volumes": { - Type: schema.TypeList, - Optional: true, - Description: `Global list of volumes to mount for ALL build steps - -Each volume is created as an empty volume prior to starting the build process. -Upon completion of the build, volumes and their contents are discarded. Global -volume names and paths cannot conflict with the volumes defined a build step. - -Using a global volume in a build with only one step is not valid as it is indicative -of a build request with an incorrect configuration.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the volume to mount. - -Volume names must be unique per build step and must be valid names for Docker volumes. -Each named volume must be used by at least two build steps.`, - }, - "path": { - Type: schema.TypeString, - Optional: true, - Description: `Path at which to mount the volume. - -Paths must be absolute and cannot conflict with other volume paths on the same -build step or with certain reserved volume paths.`, - }, - }, - }, - }, - "worker_pool": { - Type: schema.TypeString, - Optional: true, - Description: `Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} - -This field is experimental.`, - }, - }, - }, - }, - "queue_ttl": { - Type: schema.TypeString, - Optional: true, - Description: `TTL in queue for this build. If provided and the build is enqueued longer than this value, -the build will expire and the build status will be EXPIRED. -The TTL starts ticking from createTime. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "secret": { - Type: schema.TypeList, - Optional: true, - Description: `Secrets to decrypt using Cloud Key Management Service.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - Description: `Cloud KMS key name to use to decrypt these envs.`, - }, - "secret_env": { - Type: schema.TypeMap, - Optional: true, - Description: `Map of environment variable name to its encrypted value. -Secret environment variables must be unique across all of a build's secrets, -and must be used by at least one build step. Values can be at most 64 KB in size. -There can be at most 100 secret values across all of a build's secrets.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "source": { - Type: schema.TypeList, - Optional: true, - Description: `The location of the source files to build. - -One of 'storageSource' or 'repoSource' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "repo_source": { - Type: schema.TypeList, - Optional: true, - Description: `Location of the source in a Google Cloud Source Repository.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "repo_name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the Cloud Source Repository.`, - }, - "branch_name": { - Type: schema.TypeString, - Optional: true, - Description: `Regex matching branches to build. Exactly one a of branch name, tag, or commit SHA must be provided. -The syntax of the regular expressions accepted is the syntax accepted by RE2 and -described at https://github.com/google/re2/wiki/Syntax`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - "commit_sha": { - Type: schema.TypeString, - Optional: true, - Description: `Explicit commit SHA to build. Exactly one a of branch name, tag, or commit SHA must be provided.`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - "dir": { - Type: schema.TypeString, - Optional: true, - Description: `Directory, relative to the source root, in which to run the build. -This must be a relative path. If a step's dir is specified and is an absolute path, -this value is ignored for that step's execution.`, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, - }, - "project_id": { - Type: schema.TypeString, - Optional: true, - Description: `ID of the project that owns the Cloud Source Repository. -If omitted, the project ID requesting the build is assumed.`, - }, - "substitutions": { - Type: schema.TypeMap, - Optional: true, - Description: `Substitutions to use in a triggered build. Should only be used with triggers.run`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "tag_name": { - Type: schema.TypeString, - Optional: true, - Description: `Regex matching tags to build. Exactly one a of branch name, tag, or commit SHA must be provided. -The syntax of the regular expressions accepted is the syntax accepted by RE2 and -described at https://github.com/google/re2/wiki/Syntax`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - }, - }, - }, - "storage_source": { - Type: schema.TypeList, - Optional: true, - Description: `Location of the source in an archive file in Google Cloud Storage.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: `Google Cloud Storage bucket containing the source.`, - }, - "object": { - Type: schema.TypeString, - Required: true, - Description: `Google Cloud Storage object containing the source. -This object must be a gzipped archive file (.tar.gz) containing source to build.`, - }, - "generation": { - Type: schema.TypeString, - Optional: true, - Description: `Google Cloud Storage generation for the object. -If the generation is omitted, the latest generation will be used`, - }, - }, - }, - }, - }, - }, - }, - "substitutions": { - Type: schema.TypeMap, - Optional: true, - Description: `Substitutions data for Build resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "tags": { - Type: schema.TypeList, - Optional: true, - Description: `Tags for annotation of a Build. These are not docker tags.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "timeout": { - Type: schema.TypeString, - Optional: true, - Description: `Amount of time that this build should be allowed to run, to second granularity. -If this amount of time elapses, work on the build will cease and the build status will be TIMEOUT. -This timeout must be equal to or greater than the sum of the timeouts for build steps within the build. -The expected format is the number of seconds followed by s. -Default time is ten minutes (600s).`, - Default: "600s", - }, - }, - }, - ExactlyOneOf: []string{"filename", "build", "git_file_source"}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Human-readable description of the trigger.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether the trigger is disabled or not. If true, the trigger will never result in a build.`, - }, - "filename": { - Type: schema.TypeString, - Optional: true, - Description: `Path, from the source root, to a file whose contents is used for the template. -Either a filename or build template must be provided. Set this only when using trigger_template or github. -When using Pub/Sub, Webhook or Manual set the file name using git_file_source instead.`, - ExactlyOneOf: []string{"filename", "build", "git_file_source"}, - }, - "filter": { - Type: schema.TypeString, - Optional: true, - Description: `A Common Expression Language string. Used only with Pub/Sub and Webhook.`, - }, - "git_file_source": { - Type: schema.TypeList, - Optional: true, - Description: `The file source describing the local or remote Build template.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: `The path of the file, with the repo root as the root of the path.`, - }, - "repo_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"}), - Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). -Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET_SERVER Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"]`, - }, - "github_enterprise_config": { - Type: schema.TypeString, - Optional: true, - Description: `The full resource name of the github enterprise config. -Format: projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}. projects/{project}/githubEnterpriseConfigs/{id}.`, - }, - "revision": { - Type: schema.TypeString, - Optional: true, - Description: `The branch, tag, arbitrary ref, or SHA version of the repo to use when resolving the -filename (optional). This field respects the same syntax/resolution as described here: https://git-scm.com/docs/gitrevisions -If unspecified, the revision from which the trigger invocation originated is assumed to be the revision from which to read the specified path.`, - }, - "uri": { - Type: schema.TypeString, - Optional: true, - Description: `The URI of the repo (optional). If unspecified, the repo from which the trigger -invocation originated is assumed to be the repo from which to read the specified path.`, - }, - }, - }, - ExactlyOneOf: []string{"filename", "git_file_source", "build"}, - }, - "github": { - Type: schema.TypeList, - Optional: true, - Description: `Describes the configuration of a trigger that creates a build whenever a GitHub event is received. - -One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enterprise_config_resource_name": { - Type: schema.TypeString, - Optional: true, - Description: `The resource name of the github enterprise config that should be applied to this installation. -For example: "projects/{$projectId}/locations/{$locationId}/githubEnterpriseConfigs/{$configId}"`, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the repository. For example: The name for -https://github.com/googlecloudplatform/cloud-builders is "cloud-builders".`, - }, - "owner": { - Type: schema.TypeString, - Optional: true, - Description: `Owner of the repository. For example: The owner for -https://github.com/googlecloudplatform/cloud-builders is "googlecloudplatform".`, - }, - "pull_request": { - Type: schema.TypeList, - Optional: true, - Description: `filter to match changes in pull requests. Specify only one of 'pull_request' or 'push'.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "branch": { - Type: schema.TypeString, - Required: true, - Description: `Regex of branches to match.`, - }, - "comment_control": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}), - Description: `Whether to block builds on a "/gcbrun" comment from a repository owner or collaborator. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, branches that do NOT match the git_ref will trigger a build.`, - }, - }, - }, - ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, - }, - "push": { - Type: schema.TypeList, - Optional: true, - Description: `filter to match changes in refs, like branches or tags. Specify only one of 'pull_request' or 'push'.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "branch": { - Type: schema.TypeString, - Optional: true, - Description: `Regex of branches to match. Specify only one of branch or tag.`, - ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `When true, only trigger a build if the revision regex does NOT match the git_ref regex.`, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - Description: `Regex of tags to match. Specify only one of branch or tag.`, - ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, - }, - }, - }, - ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "ignored_files": { - Type: schema.TypeList, - Optional: true, - Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match -extended with support for '**'. - -If ignoredFiles and changed files are both empty, then they are not -used to determine whether or not to trigger a build. - -If ignoredFiles is not empty, then we ignore any files that match any -of the ignored_file globs. If the change has no files that are outside -of the ignoredFiles globs, then we do not trigger a build.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "include_build_logs": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS", ""}), - Description: `Build logs will be sent back to GitHub as part of the checkrun -result. Values can be INCLUDE_BUILD_LOGS_UNSPECIFIED or -INCLUDE_BUILD_LOGS_WITH_STATUS Possible values: ["INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS"]`, - }, - "included_files": { - Type: schema.TypeList, - Optional: true, - Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match -extended with support for '**'. - -If any of the files altered in the commit pass the ignoredFiles filter -and includedFiles is empty, then as far as this filter is concerned, we -should trigger the build. - -If any of the files altered in the commit pass the ignoredFiles filter -and includedFiles is not empty, then we make sure that at least one of -those files matches a includedFiles glob. If not, then we do not trigger -a build.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The [Cloud Build location](https://cloud.google.com/build/docs/locations) for the trigger. -If not specified, "global" is used.`, - Default: "global", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Name of the trigger. Must be unique within the project.`, - }, - "pubsub_config": { - Type: schema.TypeList, - Optional: true, - Description: `PubsubConfig describes the configuration of a trigger that creates -a build whenever a Pub/Sub message is published. - -One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topic": { - Type: schema.TypeString, - Required: true, - Description: `The name of the topic from which this subscription is receiving messages.`, - }, - "service_account_email": { - Type: schema.TypeString, - Optional: true, - Description: `Service account that will make the push request.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Potential issues with the underlying Pub/Sub subscription configuration. -Only populated on get requests.`, - }, - "subscription": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Name of the subscription.`, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "service_account": { - Type: schema.TypeString, - Optional: true, - Description: `The service account used for all user-controlled operations including -triggers.patch, triggers.run, builds.create, and builds.cancel. - -If no service account is set, then the standard Cloud Build service account -([PROJECT_NUM]@system.gserviceaccount.com) will be used instead. - -Format: projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}`, - }, - "source_to_build": { - Type: schema.TypeList, - Optional: true, - Description: `The repo and ref of the repository from which to build. -This field is used only for those triggers that do not respond to SCM events. -Triggers that respond to such events build source at whatever commit caused the event. -This field is currently only used by Webhook, Pub/Sub, Manual, and Cron triggers. - -One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ref": { - Type: schema.TypeString, - Required: true, - Description: `The branch or tag to use. Must start with "refs/" (required).`, - }, - "repo_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"}), - Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). -Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET_SERVER Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"]`, - }, - "uri": { - Type: schema.TypeString, - Required: true, - Description: `The URI of the repo (required).`, - }, - "github_enterprise_config": { - Type: schema.TypeString, - Optional: true, - Description: `The full resource name of the github enterprise config. -Format: projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}. projects/{project}/githubEnterpriseConfigs/{id}.`, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "substitutions": { - Type: schema.TypeMap, - Optional: true, - Description: `Substitutions data for Build resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "tags": { - Type: schema.TypeList, - Optional: true, - Description: `Tags for annotation of a BuildTrigger`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "trigger_template": { - Type: schema.TypeList, - Optional: true, - Description: `Template describing the types of source changes to trigger a build. - -Branch and tag names in trigger templates are interpreted as regular -expressions. Any branch or tag change that matches that regular -expression will trigger a build. - -One of 'trigger_template', 'github', 'pubsub_config', 'webhook_config' or 'source_to_build' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "branch_name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. -This field is a regular expression.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - "commit_sha": { - Type: schema.TypeString, - Optional: true, - Description: `Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - "dir": { - Type: schema.TypeString, - Optional: true, - Description: `Directory, relative to the source root, in which to run the build. - -This must be a relative path. If a step's dir is specified and -is an absolute path, this value is ignored for that step's -execution.`, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, - }, - "project_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `ID of the project that owns the Cloud Source Repository. If -omitted, the project ID requesting the build is assumed.`, - }, - "repo_name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the Cloud Source Repository. If omitted, the name "default" is assumed.`, - Default: "default", - }, - "tag_name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. -This field is a regular expression.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "webhook_config": { - Type: schema.TypeList, - Optional: true, - Description: `WebhookConfig describes the configuration of a trigger that creates -a build whenever a webhook is sent to a trigger's webhook URL. - -One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret": { - Type: schema.TypeString, - Required: true, - Description: `Resource name for the secret required as a URL parameter.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Potential issues with the underlying Pub/Sub subscription configuration. -Only populated on get requests.`, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time when the trigger was created.`, - }, - "trigger_id": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier for the trigger.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudBuildTriggerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandCloudBuildTriggerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandCloudBuildTriggerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - tagsProp, err := expandCloudBuildTriggerTags(d.Get("tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tags"); !isEmptyValue(reflect.ValueOf(tagsProp)) && (ok || !reflect.DeepEqual(v, tagsProp)) { - obj["tags"] = tagsProp - } - disabledProp, err := expandCloudBuildTriggerDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - substitutionsProp, err := expandCloudBuildTriggerSubstitutions(d.Get("substitutions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("substitutions"); !isEmptyValue(reflect.ValueOf(substitutionsProp)) && (ok || !reflect.DeepEqual(v, substitutionsProp)) { - obj["substitutions"] = substitutionsProp - } - serviceAccountProp, err := expandCloudBuildTriggerServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - includeBuildLogsProp, err := expandCloudBuildTriggerIncludeBuildLogs(d.Get("include_build_logs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("include_build_logs"); !isEmptyValue(reflect.ValueOf(includeBuildLogsProp)) && (ok || !reflect.DeepEqual(v, includeBuildLogsProp)) { - obj["includeBuildLogs"] = includeBuildLogsProp - } - filenameProp, err := expandCloudBuildTriggerFilename(d.Get("filename"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filename"); !isEmptyValue(reflect.ValueOf(filenameProp)) && (ok || !reflect.DeepEqual(v, filenameProp)) { - obj["filename"] = filenameProp - } - filterProp, err := expandCloudBuildTriggerFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - gitFileSourceProp, err := expandCloudBuildTriggerGitFileSource(d.Get("git_file_source"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("git_file_source"); !isEmptyValue(reflect.ValueOf(gitFileSourceProp)) && (ok || !reflect.DeepEqual(v, gitFileSourceProp)) { - obj["gitFileSource"] = gitFileSourceProp - } - sourceToBuildProp, err := expandCloudBuildTriggerSourceToBuild(d.Get("source_to_build"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_to_build"); !isEmptyValue(reflect.ValueOf(sourceToBuildProp)) && (ok || !reflect.DeepEqual(v, sourceToBuildProp)) { - obj["sourceToBuild"] = sourceToBuildProp - } - ignoredFilesProp, err := expandCloudBuildTriggerIgnoredFiles(d.Get("ignored_files"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ignored_files"); !isEmptyValue(reflect.ValueOf(ignoredFilesProp)) && (ok || !reflect.DeepEqual(v, ignoredFilesProp)) { - obj["ignoredFiles"] = ignoredFilesProp - } - includedFilesProp, err := expandCloudBuildTriggerIncludedFiles(d.Get("included_files"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("included_files"); !isEmptyValue(reflect.ValueOf(includedFilesProp)) && (ok || !reflect.DeepEqual(v, includedFilesProp)) { - obj["includedFiles"] = includedFilesProp - } - triggerTemplateProp, err := expandCloudBuildTriggerTriggerTemplate(d.Get("trigger_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trigger_template"); !isEmptyValue(reflect.ValueOf(triggerTemplateProp)) && (ok || !reflect.DeepEqual(v, triggerTemplateProp)) { - obj["triggerTemplate"] = triggerTemplateProp - } - githubProp, err := expandCloudBuildTriggerGithub(d.Get("github"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("github"); !isEmptyValue(reflect.ValueOf(githubProp)) && (ok || !reflect.DeepEqual(v, githubProp)) { - obj["github"] = githubProp - } - bitbucketServerTriggerConfigProp, err := expandCloudBuildTriggerBitbucketServerTriggerConfig(d.Get("bitbucket_server_trigger_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bitbucket_server_trigger_config"); !isEmptyValue(reflect.ValueOf(bitbucketServerTriggerConfigProp)) && (ok || !reflect.DeepEqual(v, bitbucketServerTriggerConfigProp)) { - obj["bitbucketServerTriggerConfig"] = bitbucketServerTriggerConfigProp - } - pubsubConfigProp, err := expandCloudBuildTriggerPubsubConfig(d.Get("pubsub_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_config"); !isEmptyValue(reflect.ValueOf(pubsubConfigProp)) && (ok || !reflect.DeepEqual(v, pubsubConfigProp)) { - obj["pubsubConfig"] = pubsubConfigProp - } - webhookConfigProp, err := expandCloudBuildTriggerWebhookConfig(d.Get("webhook_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("webhook_config"); !isEmptyValue(reflect.ValueOf(webhookConfigProp)) && (ok || !reflect.DeepEqual(v, webhookConfigProp)) { - obj["webhookConfig"] = webhookConfigProp - } - approvalConfigProp, err := expandCloudBuildTriggerApprovalConfig(d.Get("approval_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("approval_config"); !isEmptyValue(reflect.ValueOf(approvalConfigProp)) && (ok || !reflect.DeepEqual(v, approvalConfigProp)) { - obj["approvalConfig"] = approvalConfigProp - } - buildProp, err := expandCloudBuildTriggerBuild(d.Get("build"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("build"); !isEmptyValue(reflect.ValueOf(buildProp)) && (ok || !reflect.DeepEqual(v, buildProp)) { - obj["build"] = buildProp - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/triggers") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Trigger: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Trigger: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Trigger: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - triggerId, ok := res["id"] - if !ok { - return fmt.Errorf("Create response didn't contain id. Create may not have succeeded.") - } - if err := d.Set("trigger_id", triggerId.(string)); err != nil { - return fmt.Errorf("Error setting trigger_id: %s", err) - } - - // Store the ID now. We tried to set it before and it failed because - // trigger_id didn't exist yet. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - // Force legacy id format for global triggers. - id = strings.ReplaceAll(id, "/locations/global/", "/") - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Trigger %q: %#v", d.Id(), res) - - return resourceCloudBuildTriggerRead(d, meta) -} - -func resourceCloudBuildTriggerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Trigger: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // To support import with the legacy id format. - url = strings.ReplaceAll(url, "/locations//", "/locations/global/") - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudBuildTrigger %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - - if err := d.Set("trigger_id", flattenCloudBuildTriggerTriggerId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("name", flattenCloudBuildTriggerName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("description", flattenCloudBuildTriggerDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("tags", flattenCloudBuildTriggerTags(res["tags"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("disabled", flattenCloudBuildTriggerDisabled(res["disabled"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("create_time", flattenCloudBuildTriggerCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("substitutions", flattenCloudBuildTriggerSubstitutions(res["substitutions"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("service_account", flattenCloudBuildTriggerServiceAccount(res["serviceAccount"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("include_build_logs", flattenCloudBuildTriggerIncludeBuildLogs(res["includeBuildLogs"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("filename", flattenCloudBuildTriggerFilename(res["filename"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("filter", flattenCloudBuildTriggerFilter(res["filter"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("git_file_source", flattenCloudBuildTriggerGitFileSource(res["gitFileSource"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("source_to_build", flattenCloudBuildTriggerSourceToBuild(res["sourceToBuild"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("ignored_files", flattenCloudBuildTriggerIgnoredFiles(res["ignoredFiles"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("included_files", flattenCloudBuildTriggerIncludedFiles(res["includedFiles"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("trigger_template", flattenCloudBuildTriggerTriggerTemplate(res["triggerTemplate"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("github", flattenCloudBuildTriggerGithub(res["github"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("bitbucket_server_trigger_config", flattenCloudBuildTriggerBitbucketServerTriggerConfig(res["bitbucketServerTriggerConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("pubsub_config", flattenCloudBuildTriggerPubsubConfig(res["pubsubConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("webhook_config", flattenCloudBuildTriggerWebhookConfig(res["webhookConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("approval_config", flattenCloudBuildTriggerApprovalConfig(res["approvalConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("build", flattenCloudBuildTriggerBuild(res["build"], d, config)); err != nil { - return fmt.Errorf("Error reading Trigger: %s", err) - } - - return nil -} - -func resourceCloudBuildTriggerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Trigger: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandCloudBuildTriggerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandCloudBuildTriggerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - tagsProp, err := expandCloudBuildTriggerTags(d.Get("tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tags"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tagsProp)) { - obj["tags"] = tagsProp - } - disabledProp, err := expandCloudBuildTriggerDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - substitutionsProp, err := expandCloudBuildTriggerSubstitutions(d.Get("substitutions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("substitutions"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, substitutionsProp)) { - obj["substitutions"] = substitutionsProp - } - serviceAccountProp, err := expandCloudBuildTriggerServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - includeBuildLogsProp, err := expandCloudBuildTriggerIncludeBuildLogs(d.Get("include_build_logs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("include_build_logs"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, includeBuildLogsProp)) { - obj["includeBuildLogs"] = includeBuildLogsProp - } - filenameProp, err := expandCloudBuildTriggerFilename(d.Get("filename"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filename"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filenameProp)) { - obj["filename"] = filenameProp - } - filterProp, err := expandCloudBuildTriggerFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - gitFileSourceProp, err := expandCloudBuildTriggerGitFileSource(d.Get("git_file_source"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("git_file_source"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gitFileSourceProp)) { - obj["gitFileSource"] = gitFileSourceProp - } - sourceToBuildProp, err := expandCloudBuildTriggerSourceToBuild(d.Get("source_to_build"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_to_build"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceToBuildProp)) { - obj["sourceToBuild"] = sourceToBuildProp - } - ignoredFilesProp, err := expandCloudBuildTriggerIgnoredFiles(d.Get("ignored_files"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ignored_files"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ignoredFilesProp)) { - obj["ignoredFiles"] = ignoredFilesProp - } - includedFilesProp, err := expandCloudBuildTriggerIncludedFiles(d.Get("included_files"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("included_files"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, includedFilesProp)) { - obj["includedFiles"] = includedFilesProp - } - triggerTemplateProp, err := expandCloudBuildTriggerTriggerTemplate(d.Get("trigger_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trigger_template"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, triggerTemplateProp)) { - obj["triggerTemplate"] = triggerTemplateProp - } - githubProp, err := expandCloudBuildTriggerGithub(d.Get("github"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("github"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, githubProp)) { - obj["github"] = githubProp - } - bitbucketServerTriggerConfigProp, err := expandCloudBuildTriggerBitbucketServerTriggerConfig(d.Get("bitbucket_server_trigger_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bitbucket_server_trigger_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bitbucketServerTriggerConfigProp)) { - obj["bitbucketServerTriggerConfig"] = bitbucketServerTriggerConfigProp - } - pubsubConfigProp, err := expandCloudBuildTriggerPubsubConfig(d.Get("pubsub_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubConfigProp)) { - obj["pubsubConfig"] = pubsubConfigProp - } - webhookConfigProp, err := expandCloudBuildTriggerWebhookConfig(d.Get("webhook_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("webhook_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, webhookConfigProp)) { - obj["webhookConfig"] = webhookConfigProp - } - approvalConfigProp, err := expandCloudBuildTriggerApprovalConfig(d.Get("approval_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("approval_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, approvalConfigProp)) { - obj["approvalConfig"] = approvalConfigProp - } - buildProp, err := expandCloudBuildTriggerBuild(d.Get("build"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("build"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, buildProp)) { - obj["build"] = buildProp - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Trigger %q: %#v", d.Id(), obj) - obj["id"] = d.Get("trigger_id") - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Trigger %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Trigger %q: %#v", d.Id(), res) - } - - return resourceCloudBuildTriggerRead(d, meta) -} - -func resourceCloudBuildTriggerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Trigger: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Trigger %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Trigger") - } - - log.Printf("[DEBUG] Finished deleting Trigger %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudBuildTriggerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/triggers/(?P[^/]+)", - "projects/(?P[^/]+)/triggers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Force legacy id format for global triggers. - id = strings.ReplaceAll(id, "/locations//", "/") - id = strings.ReplaceAll(id, "/locations/global/", "/") - d.SetId(id) - if d.Get("location") == "" { - // Happens when imported with legacy import format. - d.Set("location", "global") - } - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudBuildTriggerTriggerId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerSubstitutions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerIncludeBuildLogs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerFilename(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGitFileSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenCloudBuildTriggerGitFileSourcePath(original["path"], d, config) - transformed["uri"] = - flattenCloudBuildTriggerGitFileSourceUri(original["uri"], d, config) - transformed["repo_type"] = - flattenCloudBuildTriggerGitFileSourceRepoType(original["repoType"], d, config) - transformed["revision"] = - flattenCloudBuildTriggerGitFileSourceRevision(original["revision"], d, config) - transformed["github_enterprise_config"] = - flattenCloudBuildTriggerGitFileSourceGithubEnterpriseConfig(original["githubEnterpriseConfig"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerGitFileSourcePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGitFileSourceUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGitFileSourceRepoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGitFileSourceRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGitFileSourceGithubEnterpriseConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerSourceToBuild(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["uri"] = - flattenCloudBuildTriggerSourceToBuildUri(original["uri"], d, config) - transformed["ref"] = - flattenCloudBuildTriggerSourceToBuildRef(original["ref"], d, config) - transformed["repo_type"] = - flattenCloudBuildTriggerSourceToBuildRepoType(original["repoType"], d, config) - transformed["github_enterprise_config"] = - flattenCloudBuildTriggerSourceToBuildGithubEnterpriseConfig(original["githubEnterpriseConfig"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerSourceToBuildUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerSourceToBuildRef(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerSourceToBuildRepoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerSourceToBuildGithubEnterpriseConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerIgnoredFiles(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerIncludedFiles(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenCloudBuildTriggerTriggerTemplateProjectId(original["projectId"], d, config) - transformed["repo_name"] = - flattenCloudBuildTriggerTriggerTemplateRepoName(original["repoName"], d, config) - transformed["dir"] = - flattenCloudBuildTriggerTriggerTemplateDir(original["dir"], d, config) - transformed["invert_regex"] = - flattenCloudBuildTriggerTriggerTemplateInvertRegex(original["invertRegex"], d, config) - transformed["branch_name"] = - flattenCloudBuildTriggerTriggerTemplateBranchName(original["branchName"], d, config) - transformed["tag_name"] = - flattenCloudBuildTriggerTriggerTemplateTagName(original["tagName"], d, config) - transformed["commit_sha"] = - flattenCloudBuildTriggerTriggerTemplateCommitSha(original["commitSha"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerTriggerTemplateProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateRepoName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateDir(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateInvertRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateBranchName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateTagName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateCommitSha(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithub(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["owner"] = - flattenCloudBuildTriggerGithubOwner(original["owner"], d, config) - transformed["name"] = - flattenCloudBuildTriggerGithubName(original["name"], d, config) - transformed["pull_request"] = - flattenCloudBuildTriggerGithubPullRequest(original["pullRequest"], d, config) - transformed["push"] = - flattenCloudBuildTriggerGithubPush(original["push"], d, config) - transformed["enterprise_config_resource_name"] = - flattenCloudBuildTriggerGithubEnterpriseConfigResourceName(original["enterpriseConfigResourceName"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerGithubOwner(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPullRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["branch"] = - flattenCloudBuildTriggerGithubPullRequestBranch(original["branch"], d, config) - transformed["comment_control"] = - flattenCloudBuildTriggerGithubPullRequestCommentControl(original["commentControl"], d, config) - transformed["invert_regex"] = - flattenCloudBuildTriggerGithubPullRequestInvertRegex(original["invertRegex"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerGithubPullRequestBranch(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPullRequestCommentControl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPullRequestInvertRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPush(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["invert_regex"] = - flattenCloudBuildTriggerGithubPushInvertRegex(original["invertRegex"], d, config) - transformed["branch"] = - flattenCloudBuildTriggerGithubPushBranch(original["branch"], d, config) - transformed["tag"] = - flattenCloudBuildTriggerGithubPushTag(original["tag"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerGithubPushInvertRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPushBranch(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPushTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubEnterpriseConfigResourceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["repo_slug"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigRepoSlug(original["repoSlug"], d, config) - transformed["project_key"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigProjectKey(original["projectKey"], d, config) - transformed["bitbucket_server_config_resource"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigBitbucketServerConfigResource(original["bitbucketServerConfigResource"], d, config) - transformed["pull_request"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequest(original["pullRequest"], d, config) - transformed["push"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigPush(original["push"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBitbucketServerTriggerConfigRepoSlug(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfigProjectKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfigBitbucketServerConfigResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["branch"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestBranch(original["branch"], d, config) - transformed["comment_control"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestCommentControl(original["commentControl"], d, config) - transformed["invert_regex"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestInvertRegex(original["invertRegex"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestBranch(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestCommentControl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestInvertRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfigPush(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["invert_regex"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigPushInvertRegex(original["invertRegex"], d, config) - transformed["branch"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigPushBranch(original["branch"], d, config) - transformed["tag"] = - flattenCloudBuildTriggerBitbucketServerTriggerConfigPushTag(original["tag"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBitbucketServerTriggerConfigPushInvertRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfigPushBranch(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBitbucketServerTriggerConfigPushTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerPubsubConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["subscription"] = - flattenCloudBuildTriggerPubsubConfigSubscription(original["subscription"], d, config) - transformed["topic"] = - flattenCloudBuildTriggerPubsubConfigTopic(original["topic"], d, config) - transformed["service_account_email"] = - flattenCloudBuildTriggerPubsubConfigServiceAccountEmail(original["service_account_email"], d, config) - transformed["state"] = - flattenCloudBuildTriggerPubsubConfigState(original["state"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerPubsubConfigSubscription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerPubsubConfigTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerPubsubConfigServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerPubsubConfigState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerWebhookConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret"] = - flattenCloudBuildTriggerWebhookConfigSecret(original["secret"], d, config) - transformed["state"] = - flattenCloudBuildTriggerWebhookConfigState(original["state"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerWebhookConfigSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerWebhookConfigState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerApprovalConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - transformed := make(map[string]interface{}) - if v == nil { - // Disabled by default, but API will not return object if value is false - transformed["approval_required"] = false - return []interface{}{transformed} - } - - original := v.(map[string]interface{}) - transformed["approval_required"] = original["approvalRequired"] - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuild(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["source"] = - flattenCloudBuildTriggerBuildSource(original["source"], d, config) - transformed["tags"] = - flattenCloudBuildTriggerBuildTags(original["tags"], d, config) - transformed["images"] = - flattenCloudBuildTriggerBuildImages(original["images"], d, config) - transformed["substitutions"] = - flattenCloudBuildTriggerBuildSubstitutions(original["substitutions"], d, config) - transformed["queue_ttl"] = - flattenCloudBuildTriggerBuildQueueTtl(original["queueTtl"], d, config) - transformed["logs_bucket"] = - flattenCloudBuildTriggerBuildLogsBucket(original["logsBucket"], d, config) - transformed["timeout"] = - flattenCloudBuildTriggerBuildTimeout(original["timeout"], d, config) - transformed["secret"] = - flattenCloudBuildTriggerBuildSecret(original["secrets"], d, config) - transformed["available_secrets"] = - flattenCloudBuildTriggerBuildAvailableSecrets(original["availableSecrets"], d, config) - transformed["step"] = - flattenCloudBuildTriggerBuildStep(original["steps"], d, config) - transformed["artifacts"] = - flattenCloudBuildTriggerBuildArtifacts(original["artifacts"], d, config) - transformed["options"] = - flattenCloudBuildTriggerBuildOptions(original["options"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["storage_source"] = - flattenCloudBuildTriggerBuildSourceStorageSource(original["storageSource"], d, config) - transformed["repo_source"] = - flattenCloudBuildTriggerBuildSourceRepoSource(original["repoSource"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildSourceStorageSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bucket"] = - flattenCloudBuildTriggerBuildSourceStorageSourceBucket(original["bucket"], d, config) - transformed["object"] = - flattenCloudBuildTriggerBuildSourceStorageSourceObject(original["object"], d, config) - transformed["generation"] = - flattenCloudBuildTriggerBuildSourceStorageSourceGeneration(original["generation"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildSourceStorageSourceBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceStorageSourceObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceStorageSourceGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenCloudBuildTriggerBuildSourceRepoSourceProjectId(original["projectId"], d, config) - transformed["repo_name"] = - flattenCloudBuildTriggerBuildSourceRepoSourceRepoName(original["repoName"], d, config) - transformed["dir"] = - flattenCloudBuildTriggerBuildSourceRepoSourceDir(original["dir"], d, config) - transformed["invert_regex"] = - flattenCloudBuildTriggerBuildSourceRepoSourceInvertRegex(original["invertRegex"], d, config) - transformed["substitutions"] = - flattenCloudBuildTriggerBuildSourceRepoSourceSubstitutions(original["substitutions"], d, config) - transformed["branch_name"] = - flattenCloudBuildTriggerBuildSourceRepoSourceBranchName(original["branchName"], d, config) - transformed["tag_name"] = - flattenCloudBuildTriggerBuildSourceRepoSourceTagName(original["tagName"], d, config) - transformed["commit_sha"] = - flattenCloudBuildTriggerBuildSourceRepoSourceCommitSha(original["commitSha"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildSourceRepoSourceProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceRepoName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceDir(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceInvertRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceSubstitutions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceBranchName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceTagName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceCommitSha(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildImages(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSubstitutions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildQueueTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildLogsBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "kms_key_name": flattenCloudBuildTriggerBuildSecretKmsKeyName(original["kmsKeyName"], d, config), - "secret_env": flattenCloudBuildTriggerBuildSecretSecretEnv(original["secretEnv"], d, config), - }) - } - return transformed -} -func flattenCloudBuildTriggerBuildSecretKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSecretSecretEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildAvailableSecrets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret_manager"] = - flattenCloudBuildTriggerBuildAvailableSecretsSecretManager(original["secretManager"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildAvailableSecretsSecretManager(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "version_name": flattenCloudBuildTriggerBuildAvailableSecretsSecretManagerVersionName(original["versionName"], d, config), - "env": flattenCloudBuildTriggerBuildAvailableSecretsSecretManagerEnv(original["env"], d, config), - }) - } - return transformed -} -func flattenCloudBuildTriggerBuildAvailableSecretsSecretManagerVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildAvailableSecretsSecretManagerEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStep(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudBuildTriggerBuildStepName(original["name"], d, config), - "args": flattenCloudBuildTriggerBuildStepArgs(original["args"], d, config), - "env": flattenCloudBuildTriggerBuildStepEnv(original["env"], d, config), - "id": flattenCloudBuildTriggerBuildStepId(original["id"], d, config), - "entrypoint": flattenCloudBuildTriggerBuildStepEntrypoint(original["entrypoint"], d, config), - "dir": flattenCloudBuildTriggerBuildStepDir(original["dir"], d, config), - "secret_env": flattenCloudBuildTriggerBuildStepSecretEnv(original["secretEnv"], d, config), - "timeout": flattenCloudBuildTriggerBuildStepTimeout(original["timeout"], d, config), - "timing": flattenCloudBuildTriggerBuildStepTiming(original["timing"], d, config), - "volumes": flattenCloudBuildTriggerBuildStepVolumes(original["volumes"], d, config), - "wait_for": flattenCloudBuildTriggerBuildStepWaitFor(original["waitFor"], d, config), - "script": flattenCloudBuildTriggerBuildStepScript(original["script"], d, config), - }) - } - return transformed -} -func flattenCloudBuildTriggerBuildStepName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepArgs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepEntrypoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepDir(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepSecretEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepTiming(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepVolumes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudBuildTriggerBuildStepVolumesName(original["name"], d, config), - "path": flattenCloudBuildTriggerBuildStepVolumesPath(original["path"], d, config), - }) - } - return transformed -} -func flattenCloudBuildTriggerBuildStepVolumesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepVolumesPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepWaitFor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepScript(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifacts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["images"] = - flattenCloudBuildTriggerBuildArtifactsImages(original["images"], d, config) - transformed["objects"] = - flattenCloudBuildTriggerBuildArtifactsObjects(original["objects"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildArtifactsImages(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifactsObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["location"] = - flattenCloudBuildTriggerBuildArtifactsObjectsLocation(original["location"], d, config) - transformed["paths"] = - flattenCloudBuildTriggerBuildArtifactsObjectsPaths(original["paths"], d, config) - transformed["timing"] = - flattenCloudBuildTriggerBuildArtifactsObjectsTiming(original["timing"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildArtifactsObjectsLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifactsObjectsPaths(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifactsObjectsTiming(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["start_time"] = - flattenCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(original["startTime"], d, config) - transformed["end_time"] = - flattenCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(original["endTime"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["source_provenance_hash"] = - flattenCloudBuildTriggerBuildOptionsSourceProvenanceHash(original["sourceProvenanceHash"], d, config) - transformed["requested_verify_option"] = - flattenCloudBuildTriggerBuildOptionsRequestedVerifyOption(original["requestedVerifyOption"], d, config) - transformed["machine_type"] = - flattenCloudBuildTriggerBuildOptionsMachineType(original["machineType"], d, config) - transformed["disk_size_gb"] = - flattenCloudBuildTriggerBuildOptionsDiskSizeGb(original["diskSizeGb"], d, config) - transformed["substitution_option"] = - flattenCloudBuildTriggerBuildOptionsSubstitutionOption(original["substitutionOption"], d, config) - transformed["dynamic_substitutions"] = - flattenCloudBuildTriggerBuildOptionsDynamicSubstitutions(original["dynamicSubstitutions"], d, config) - transformed["log_streaming_option"] = - flattenCloudBuildTriggerBuildOptionsLogStreamingOption(original["logStreamingOption"], d, config) - transformed["worker_pool"] = - flattenCloudBuildTriggerBuildOptionsWorkerPool(original["workerPool"], d, config) - transformed["logging"] = - flattenCloudBuildTriggerBuildOptionsLogging(original["logging"], d, config) - transformed["env"] = - flattenCloudBuildTriggerBuildOptionsEnv(original["env"], d, config) - transformed["secret_env"] = - flattenCloudBuildTriggerBuildOptionsSecretEnv(original["secretEnv"], d, config) - transformed["volumes"] = - flattenCloudBuildTriggerBuildOptionsVolumes(original["volumes"], d, config) - return []interface{}{transformed} -} -func flattenCloudBuildTriggerBuildOptionsSourceProvenanceHash(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsRequestedVerifyOption(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsMachineType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsDiskSizeGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudBuildTriggerBuildOptionsSubstitutionOption(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsDynamicSubstitutions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsLogStreamingOption(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsWorkerPool(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsSecretEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsVolumes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudBuildTriggerBuildOptionsVolumesName(original["name"], d, config), - "path": flattenCloudBuildTriggerBuildOptionsVolumesPath(original["path"], d, config), - }) - } - return transformed -} -func flattenCloudBuildTriggerBuildOptionsVolumesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsVolumesPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudBuildTriggerName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerSubstitutions(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudBuildTriggerServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerIncludeBuildLogs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerFilename(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGitFileSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandCloudBuildTriggerGitFileSourcePath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedUri, err := expandCloudBuildTriggerGitFileSourceUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedRepoType, err := expandCloudBuildTriggerGitFileSourceRepoType(original["repo_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepoType); val.IsValid() && !isEmptyValue(val) { - transformed["repoType"] = transformedRepoType - } - - transformedRevision, err := expandCloudBuildTriggerGitFileSourceRevision(original["revision"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRevision); val.IsValid() && !isEmptyValue(val) { - transformed["revision"] = transformedRevision - } - - transformedGithubEnterpriseConfig, err := expandCloudBuildTriggerGitFileSourceGithubEnterpriseConfig(original["github_enterprise_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGithubEnterpriseConfig); val.IsValid() && !isEmptyValue(val) { - transformed["githubEnterpriseConfig"] = transformedGithubEnterpriseConfig - } - - return transformed, nil -} - -func expandCloudBuildTriggerGitFileSourcePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGitFileSourceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGitFileSourceRepoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGitFileSourceRevision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGitFileSourceGithubEnterpriseConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerSourceToBuild(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUri, err := expandCloudBuildTriggerSourceToBuildUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedRef, err := expandCloudBuildTriggerSourceToBuildRef(original["ref"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRef); val.IsValid() && !isEmptyValue(val) { - transformed["ref"] = transformedRef - } - - transformedRepoType, err := expandCloudBuildTriggerSourceToBuildRepoType(original["repo_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepoType); val.IsValid() && !isEmptyValue(val) { - transformed["repoType"] = transformedRepoType - } - - transformedGithubEnterpriseConfig, err := expandCloudBuildTriggerSourceToBuildGithubEnterpriseConfig(original["github_enterprise_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGithubEnterpriseConfig); val.IsValid() && !isEmptyValue(val) { - transformed["githubEnterpriseConfig"] = transformedGithubEnterpriseConfig - } - - return transformed, nil -} - -func expandCloudBuildTriggerSourceToBuildUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerSourceToBuildRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerSourceToBuildRepoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerSourceToBuildGithubEnterpriseConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerIgnoredFiles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerIncludedFiles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandCloudBuildTriggerTriggerTemplateProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedRepoName, err := expandCloudBuildTriggerTriggerTemplateRepoName(original["repo_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepoName); val.IsValid() && !isEmptyValue(val) { - transformed["repoName"] = transformedRepoName - } - - transformedDir, err := expandCloudBuildTriggerTriggerTemplateDir(original["dir"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDir); val.IsValid() && !isEmptyValue(val) { - transformed["dir"] = transformedDir - } - - transformedInvertRegex, err := expandCloudBuildTriggerTriggerTemplateInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - transformedBranchName, err := expandCloudBuildTriggerTriggerTemplateBranchName(original["branch_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBranchName); val.IsValid() && !isEmptyValue(val) { - transformed["branchName"] = transformedBranchName - } - - transformedTagName, err := expandCloudBuildTriggerTriggerTemplateTagName(original["tag_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTagName); val.IsValid() && !isEmptyValue(val) { - transformed["tagName"] = transformedTagName - } - - transformedCommitSha, err := expandCloudBuildTriggerTriggerTemplateCommitSha(original["commit_sha"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommitSha); val.IsValid() && !isEmptyValue(val) { - transformed["commitSha"] = transformedCommitSha - } - - return transformed, nil -} - -func expandCloudBuildTriggerTriggerTemplateProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateRepoName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateBranchName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateTagName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateCommitSha(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithub(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOwner, err := expandCloudBuildTriggerGithubOwner(original["owner"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOwner); val.IsValid() && !isEmptyValue(val) { - transformed["owner"] = transformedOwner - } - - transformedName, err := expandCloudBuildTriggerGithubName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPullRequest, err := expandCloudBuildTriggerGithubPullRequest(original["pull_request"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPullRequest); val.IsValid() && !isEmptyValue(val) { - transformed["pullRequest"] = transformedPullRequest - } - - transformedPush, err := expandCloudBuildTriggerGithubPush(original["push"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPush); val.IsValid() && !isEmptyValue(val) { - transformed["push"] = transformedPush - } - - transformedEnterpriseConfigResourceName, err := expandCloudBuildTriggerGithubEnterpriseConfigResourceName(original["enterprise_config_resource_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnterpriseConfigResourceName); val.IsValid() && !isEmptyValue(val) { - transformed["enterpriseConfigResourceName"] = transformedEnterpriseConfigResourceName - } - - return transformed, nil -} - -func expandCloudBuildTriggerGithubOwner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPullRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBranch, err := expandCloudBuildTriggerGithubPullRequestBranch(original["branch"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !isEmptyValue(val) { - transformed["branch"] = transformedBranch - } - - transformedCommentControl, err := expandCloudBuildTriggerGithubPullRequestCommentControl(original["comment_control"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommentControl); val.IsValid() && !isEmptyValue(val) { - transformed["commentControl"] = transformedCommentControl - } - - transformedInvertRegex, err := expandCloudBuildTriggerGithubPullRequestInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - return transformed, nil -} - -func expandCloudBuildTriggerGithubPullRequestBranch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPullRequestCommentControl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPullRequestInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPush(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInvertRegex, err := expandCloudBuildTriggerGithubPushInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - transformedBranch, err := expandCloudBuildTriggerGithubPushBranch(original["branch"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !isEmptyValue(val) { - transformed["branch"] = transformedBranch - } - - transformedTag, err := expandCloudBuildTriggerGithubPushTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandCloudBuildTriggerGithubPushInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPushBranch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPushTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubEnterpriseConfigResourceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRepoSlug, err := expandCloudBuildTriggerBitbucketServerTriggerConfigRepoSlug(original["repo_slug"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepoSlug); val.IsValid() && !isEmptyValue(val) { - transformed["repoSlug"] = transformedRepoSlug - } - - transformedProjectKey, err := expandCloudBuildTriggerBitbucketServerTriggerConfigProjectKey(original["project_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectKey); val.IsValid() && !isEmptyValue(val) { - transformed["projectKey"] = transformedProjectKey - } - - transformedBitbucketServerConfigResource, err := expandCloudBuildTriggerBitbucketServerTriggerConfigBitbucketServerConfigResource(original["bitbucket_server_config_resource"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBitbucketServerConfigResource); val.IsValid() && !isEmptyValue(val) { - transformed["bitbucketServerConfigResource"] = transformedBitbucketServerConfigResource - } - - transformedPullRequest, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequest(original["pull_request"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPullRequest); val.IsValid() && !isEmptyValue(val) { - transformed["pullRequest"] = transformedPullRequest - } - - transformedPush, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPush(original["push"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPush); val.IsValid() && !isEmptyValue(val) { - transformed["push"] = transformedPush - } - - return transformed, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigRepoSlug(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigProjectKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigBitbucketServerConfigResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBranch, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestBranch(original["branch"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !isEmptyValue(val) { - transformed["branch"] = transformedBranch - } - - transformedCommentControl, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestCommentControl(original["comment_control"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommentControl); val.IsValid() && !isEmptyValue(val) { - transformed["commentControl"] = transformedCommentControl - } - - transformedInvertRegex, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - return transformed, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestBranch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestCommentControl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigPush(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInvertRegex, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPushInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - transformedBranch, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPushBranch(original["branch"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !isEmptyValue(val) { - transformed["branch"] = transformedBranch - } - - transformedTag, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPushTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigPushInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigPushBranch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBitbucketServerTriggerConfigPushTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerPubsubConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSubscription, err := expandCloudBuildTriggerPubsubConfigSubscription(original["subscription"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubscription); val.IsValid() && !isEmptyValue(val) { - transformed["subscription"] = transformedSubscription - } - - transformedTopic, err := expandCloudBuildTriggerPubsubConfigTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - transformedServiceAccountEmail, err := expandCloudBuildTriggerPubsubConfigServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["service_account_email"] = transformedServiceAccountEmail - } - - transformedState, err := expandCloudBuildTriggerPubsubConfigState(original["state"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { - transformed["state"] = transformedState - } - - return transformed, nil -} - -func expandCloudBuildTriggerPubsubConfigSubscription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerPubsubConfigTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerPubsubConfigServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerPubsubConfigState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerWebhookConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecret, err := expandCloudBuildTriggerWebhookConfigSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secret"] = transformedSecret - } - - transformedState, err := expandCloudBuildTriggerWebhookConfigState(original["state"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { - transformed["state"] = transformedState - } - - return transformed, nil -} - -func expandCloudBuildTriggerWebhookConfigSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerWebhookConfigState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerApprovalConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedApprovalRequired, err := expandCloudBuildTriggerApprovalConfigApprovalRequired(original["approval_required"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedApprovalRequired); val.IsValid() && !isEmptyValue(val) { - transformed["approvalRequired"] = transformedApprovalRequired - } - - return transformed, nil -} - -func expandCloudBuildTriggerApprovalConfigApprovalRequired(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuild(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSource, err := expandCloudBuildTriggerBuildSource(original["source"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !isEmptyValue(val) { - transformed["source"] = transformedSource - } - - transformedTags, err := expandCloudBuildTriggerBuildTags(original["tags"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTags); val.IsValid() && !isEmptyValue(val) { - transformed["tags"] = transformedTags - } - - transformedImages, err := expandCloudBuildTriggerBuildImages(original["images"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImages); val.IsValid() && !isEmptyValue(val) { - transformed["images"] = transformedImages - } - - transformedSubstitutions, err := expandCloudBuildTriggerBuildSubstitutions(original["substitutions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubstitutions); val.IsValid() && !isEmptyValue(val) { - transformed["substitutions"] = transformedSubstitutions - } - - transformedQueueTtl, err := expandCloudBuildTriggerBuildQueueTtl(original["queue_ttl"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedQueueTtl); val.IsValid() && !isEmptyValue(val) { - transformed["queueTtl"] = transformedQueueTtl - } - - transformedLogsBucket, err := expandCloudBuildTriggerBuildLogsBucket(original["logs_bucket"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLogsBucket); val.IsValid() && !isEmptyValue(val) { - transformed["logsBucket"] = transformedLogsBucket - } - - transformedTimeout, err := expandCloudBuildTriggerBuildTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedSecret, err := expandCloudBuildTriggerBuildSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secrets"] = transformedSecret - } - - transformedAvailableSecrets, err := expandCloudBuildTriggerBuildAvailableSecrets(original["available_secrets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAvailableSecrets); val.IsValid() && !isEmptyValue(val) { - transformed["availableSecrets"] = transformedAvailableSecrets - } - - transformedStep, err := expandCloudBuildTriggerBuildStep(original["step"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStep); val.IsValid() && !isEmptyValue(val) { - transformed["steps"] = transformedStep - } - - transformedArtifacts, err := expandCloudBuildTriggerBuildArtifacts(original["artifacts"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedArtifacts); val.IsValid() && !isEmptyValue(val) { - transformed["artifacts"] = transformedArtifacts - } - - transformedOptions, err := expandCloudBuildTriggerBuildOptions(original["options"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOptions); val.IsValid() && !isEmptyValue(val) { - transformed["options"] = transformedOptions - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStorageSource, err := expandCloudBuildTriggerBuildSourceStorageSource(original["storage_source"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStorageSource); val.IsValid() && !isEmptyValue(val) { - transformed["storageSource"] = transformedStorageSource - } - - transformedRepoSource, err := expandCloudBuildTriggerBuildSourceRepoSource(original["repo_source"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepoSource); val.IsValid() && !isEmptyValue(val) { - transformed["repoSource"] = transformedRepoSource - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildSourceStorageSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBucket, err := expandCloudBuildTriggerBuildSourceStorageSourceBucket(original["bucket"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { - transformed["bucket"] = transformedBucket - } - - transformedObject, err := expandCloudBuildTriggerBuildSourceStorageSourceObject(original["object"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { - transformed["object"] = transformedObject - } - - transformedGeneration, err := expandCloudBuildTriggerBuildSourceStorageSourceGeneration(original["generation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { - transformed["generation"] = transformedGeneration - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildSourceStorageSourceBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceStorageSourceObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceStorageSourceGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandCloudBuildTriggerBuildSourceRepoSourceProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedRepoName, err := expandCloudBuildTriggerBuildSourceRepoSourceRepoName(original["repo_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepoName); val.IsValid() && !isEmptyValue(val) { - transformed["repoName"] = transformedRepoName - } - - transformedDir, err := expandCloudBuildTriggerBuildSourceRepoSourceDir(original["dir"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDir); val.IsValid() && !isEmptyValue(val) { - transformed["dir"] = transformedDir - } - - transformedInvertRegex, err := expandCloudBuildTriggerBuildSourceRepoSourceInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - transformedSubstitutions, err := expandCloudBuildTriggerBuildSourceRepoSourceSubstitutions(original["substitutions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubstitutions); val.IsValid() && !isEmptyValue(val) { - transformed["substitutions"] = transformedSubstitutions - } - - transformedBranchName, err := expandCloudBuildTriggerBuildSourceRepoSourceBranchName(original["branch_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBranchName); val.IsValid() && !isEmptyValue(val) { - transformed["branchName"] = transformedBranchName - } - - transformedTagName, err := expandCloudBuildTriggerBuildSourceRepoSourceTagName(original["tag_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTagName); val.IsValid() && !isEmptyValue(val) { - transformed["tagName"] = transformedTagName - } - - transformedCommitSha, err := expandCloudBuildTriggerBuildSourceRepoSourceCommitSha(original["commit_sha"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommitSha); val.IsValid() && !isEmptyValue(val) { - transformed["commitSha"] = transformedCommitSha - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceRepoName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceSubstitutions(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceBranchName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceTagName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceCommitSha(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildImages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSubstitutions(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudBuildTriggerBuildQueueTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildLogsBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandCloudBuildTriggerBuildSecretKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - transformedSecretEnv, err := expandCloudBuildTriggerBuildSecretSecretEnv(original["secret_env"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretEnv); val.IsValid() && !isEmptyValue(val) { - transformed["secretEnv"] = transformedSecretEnv - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildSecretKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSecretSecretEnv(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudBuildTriggerBuildAvailableSecrets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecretManager, err := expandCloudBuildTriggerBuildAvailableSecretsSecretManager(original["secret_manager"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretManager); val.IsValid() && !isEmptyValue(val) { - transformed["secretManager"] = transformedSecretManager - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildAvailableSecretsSecretManager(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedVersionName, err := expandCloudBuildTriggerBuildAvailableSecretsSecretManagerVersionName(original["version_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersionName); val.IsValid() && !isEmptyValue(val) { - transformed["versionName"] = transformedVersionName - } - - transformedEnv, err := expandCloudBuildTriggerBuildAvailableSecretsSecretManagerEnv(original["env"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { - transformed["env"] = transformedEnv - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildAvailableSecretsSecretManagerVersionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildAvailableSecretsSecretManagerEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStep(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudBuildTriggerBuildStepName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedArgs, err := expandCloudBuildTriggerBuildStepArgs(original["args"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !isEmptyValue(val) { - transformed["args"] = transformedArgs - } - - transformedEnv, err := expandCloudBuildTriggerBuildStepEnv(original["env"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { - transformed["env"] = transformedEnv - } - - transformedId, err := expandCloudBuildTriggerBuildStepId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedEntrypoint, err := expandCloudBuildTriggerBuildStepEntrypoint(original["entrypoint"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEntrypoint); val.IsValid() && !isEmptyValue(val) { - transformed["entrypoint"] = transformedEntrypoint - } - - transformedDir, err := expandCloudBuildTriggerBuildStepDir(original["dir"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDir); val.IsValid() && !isEmptyValue(val) { - transformed["dir"] = transformedDir - } - - transformedSecretEnv, err := expandCloudBuildTriggerBuildStepSecretEnv(original["secret_env"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretEnv); val.IsValid() && !isEmptyValue(val) { - transformed["secretEnv"] = transformedSecretEnv - } - - transformedTimeout, err := expandCloudBuildTriggerBuildStepTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedTiming, err := expandCloudBuildTriggerBuildStepTiming(original["timing"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTiming); val.IsValid() && !isEmptyValue(val) { - transformed["timing"] = transformedTiming - } - - transformedVolumes, err := expandCloudBuildTriggerBuildStepVolumes(original["volumes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { - transformed["volumes"] = transformedVolumes - } - - transformedWaitFor, err := expandCloudBuildTriggerBuildStepWaitFor(original["wait_for"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWaitFor); val.IsValid() && !isEmptyValue(val) { - transformed["waitFor"] = transformedWaitFor - } - - transformedScript, err := expandCloudBuildTriggerBuildStepScript(original["script"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !isEmptyValue(val) { - transformed["script"] = transformedScript - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildStepName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepArgs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepEntrypoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepSecretEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepTiming(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudBuildTriggerBuildStepVolumesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPath, err := expandCloudBuildTriggerBuildStepVolumesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildStepVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepVolumesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepWaitFor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifacts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedImages, err := expandCloudBuildTriggerBuildArtifactsImages(original["images"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImages); val.IsValid() && !isEmptyValue(val) { - transformed["images"] = transformedImages - } - - transformedObjects, err := expandCloudBuildTriggerBuildArtifactsObjects(original["objects"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedObjects); val.IsValid() && !isEmptyValue(val) { - transformed["objects"] = transformedObjects - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildArtifactsImages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLocation, err := expandCloudBuildTriggerBuildArtifactsObjectsLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - transformedPaths, err := expandCloudBuildTriggerBuildArtifactsObjectsPaths(original["paths"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPaths); val.IsValid() && !isEmptyValue(val) { - transformed["paths"] = transformedPaths - } - - transformedTiming, err := expandCloudBuildTriggerBuildArtifactsObjectsTiming(original["timing"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTiming); val.IsValid() && !isEmptyValue(val) { - transformed["timing"] = transformedTiming - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsPaths(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsTiming(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSourceProvenanceHash, err := expandCloudBuildTriggerBuildOptionsSourceProvenanceHash(original["source_provenance_hash"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSourceProvenanceHash); val.IsValid() && !isEmptyValue(val) { - transformed["sourceProvenanceHash"] = transformedSourceProvenanceHash - } - - transformedRequestedVerifyOption, err := expandCloudBuildTriggerBuildOptionsRequestedVerifyOption(original["requested_verify_option"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequestedVerifyOption); val.IsValid() && !isEmptyValue(val) { - transformed["requestedVerifyOption"] = transformedRequestedVerifyOption - } - - transformedMachineType, err := expandCloudBuildTriggerBuildOptionsMachineType(original["machine_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !isEmptyValue(val) { - transformed["machineType"] = transformedMachineType - } - - transformedDiskSizeGb, err := expandCloudBuildTriggerBuildOptionsDiskSizeGb(original["disk_size_gb"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !isEmptyValue(val) { - transformed["diskSizeGb"] = transformedDiskSizeGb - } - - transformedSubstitutionOption, err := expandCloudBuildTriggerBuildOptionsSubstitutionOption(original["substitution_option"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubstitutionOption); val.IsValid() && !isEmptyValue(val) { - transformed["substitutionOption"] = transformedSubstitutionOption - } - - transformedDynamicSubstitutions, err := expandCloudBuildTriggerBuildOptionsDynamicSubstitutions(original["dynamic_substitutions"], d, config) - if err != nil { - return nil, err - } else { - transformed["dynamicSubstitutions"] = transformedDynamicSubstitutions - } - - transformedLogStreamingOption, err := expandCloudBuildTriggerBuildOptionsLogStreamingOption(original["log_streaming_option"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLogStreamingOption); val.IsValid() && !isEmptyValue(val) { - transformed["logStreamingOption"] = transformedLogStreamingOption - } - - transformedWorkerPool, err := expandCloudBuildTriggerBuildOptionsWorkerPool(original["worker_pool"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWorkerPool); val.IsValid() && !isEmptyValue(val) { - transformed["workerPool"] = transformedWorkerPool - } - - transformedLogging, err := expandCloudBuildTriggerBuildOptionsLogging(original["logging"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLogging); val.IsValid() && !isEmptyValue(val) { - transformed["logging"] = transformedLogging - } - - transformedEnv, err := expandCloudBuildTriggerBuildOptionsEnv(original["env"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { - transformed["env"] = transformedEnv - } - - transformedSecretEnv, err := expandCloudBuildTriggerBuildOptionsSecretEnv(original["secret_env"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretEnv); val.IsValid() && !isEmptyValue(val) { - transformed["secretEnv"] = transformedSecretEnv - } - - transformedVolumes, err := expandCloudBuildTriggerBuildOptionsVolumes(original["volumes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { - transformed["volumes"] = transformedVolumes - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildOptionsSourceProvenanceHash(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsRequestedVerifyOption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsMachineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsSubstitutionOption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsDynamicSubstitutions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsLogStreamingOption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsWorkerPool(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsSecretEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudBuildTriggerBuildOptionsVolumesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPath, err := expandCloudBuildTriggerBuildOptionsVolumesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildOptionsVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsVolumesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudBuildTriggerUpgradeV1(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", rawState) - // Versions 0 and 1 didn't support location. Default them to global. - rawState["location"] = "global" - log.Printf("[DEBUG] Attributes after migration: %#v", rawState) - return rawState, nil -} - -func resourceCloudBuildTriggerResourceV1() *schema.Resource { - // Cloud Build Triggers started with V1 since its beginnings. - return resourceCloudBuildTriggerResourceV0() -} - -func resourceCloudBuildTriggerUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - // Do nothing as V0 and V1 are exactly the same. - return rawState, nil -} - -func resourceCloudBuildTriggerResourceV0() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudBuildTriggerCreate, - Read: resourceCloudBuildTriggerRead, - Update: resourceCloudBuildTriggerUpdate, - Delete: resourceCloudBuildTriggerDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudBuildTriggerImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - SchemaVersion: 1, - CustomizeDiff: stepTimeoutCustomizeDiff, - - Schema: map[string]*schema.Schema{ - "approval_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Configuration for manual approval to start a build invocation of this BuildTrigger. -Builds created by this trigger will require approval before they execute. -Any user with a Cloud Build Approver role for the project can approve a build.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "approval_required": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether or not approval is needed. If this is set on a build, it will become pending when run, -and will need to be explicitly approved to start.`, - Default: false, - }, - }, - }, - }, - "build": { - Type: schema.TypeList, - Optional: true, - Description: `Contents of the build template. Either a filename or build template must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "step": { - Type: schema.TypeList, - Required: true, - Description: `The operations to be performed on the workspace.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `The name of the container image that will run this particular build step. -If the image is available in the host's Docker daemon's cache, it will be -run directly. If not, the host will attempt to pull the image first, using -the builder service account's credentials if necessary. -The Docker daemon's cache will already have the latest versions of all of -the officially supported build steps (see https://github.com/GoogleCloudPlatform/cloud-builders -for images and examples). -The Docker daemon will also have cached many of the layers for some popular -images, like "ubuntu", "debian", but they will be refreshed at the time -you attempt to use them. -If you built an image in a previous build step, it will be stored in the -host's Docker daemon's cache and is available to use as the name for a -later build step.`, - }, - "args": { - Type: schema.TypeList, - Optional: true, - Description: `A list of arguments that will be presented to the step when it is started. -If the image used to run the step's container has an entrypoint, the args -are used as arguments to that entrypoint. If the image does not define an -entrypoint, the first element in args is used as the entrypoint, and the -remainder will be used as arguments.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "dir": { - Type: schema.TypeString, - Optional: true, - Description: `Working directory to use when running this step's container. -If this value is a relative path, it is relative to the build's working -directory. If this value is absolute, it may be outside the build's working -directory, in which case the contents of the path may not be persisted -across build step executions, unless a 'volume' for that path is specified. -If the build specifies a 'RepoSource' with 'dir' and a step with a -'dir', -which specifies an absolute path, the 'RepoSource' 'dir' is ignored -for the step's execution.`, - }, - "entrypoint": { - Type: schema.TypeString, - Optional: true, - Description: `Entrypoint to be used instead of the build step image's -default entrypoint. -If unset, the image's default entrypoint is used`, - }, - "env": { - Type: schema.TypeList, - Optional: true, - Description: `A list of environment variable definitions to be used when -running a step. -The elements are of the form "KEY=VALUE" for the environment variable -"KEY" being given the value "VALUE".`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "id": { - Type: schema.TypeString, - Optional: true, - Description: `Unique identifier for this build step, used in 'wait_for' to -reference this build step as a dependency.`, - }, - "secret_env": { - Type: schema.TypeList, - Optional: true, - Description: `A list of environment variables which are encrypted using -a Cloud Key -Management Service crypto key. These values must be specified in -the build's 'Secret'.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "timeout": { - Type: schema.TypeString, - Optional: true, - Description: `Time limit for executing this build step. If not defined, -the step has no -time limit and will be allowed to continue to run until either it -completes or the build itself times out.`, - }, - "timing": { - Type: schema.TypeString, - Optional: true, - Description: `Output only. Stores timing information for executing this -build step.`, - }, - "volumes": { - Type: schema.TypeList, - Optional: true, - Description: `List of volumes to mount into the build step. -Each volume is created as an empty volume prior to execution of the -build step. Upon completion of the build, volumes and their contents -are discarded. -Using a named volume in only one step is not valid as it is -indicative of a build request with an incorrect configuration.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the volume to mount. -Volume names must be unique per build step and must be valid names for -Docker volumes. Each named volume must be used by at least two build steps.`, - }, - "path": { - Type: schema.TypeString, - Required: true, - Description: `Path at which to mount the volume. -Paths must be absolute and cannot conflict with other volume paths on -the same build step or with certain reserved volume paths.`, - }, - }, - }, - }, - "wait_for": { - Type: schema.TypeList, - Optional: true, - Description: `The ID(s) of the step(s) that this build step depends on. -This build step will not start until all the build steps in 'wait_for' -have completed successfully. If 'wait_for' is empty, this build step -will start when all previous build steps in the 'Build.Steps' list -have completed successfully.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "artifacts": { - Type: schema.TypeList, - Optional: true, - Description: `Artifacts produced by the build that should be uploaded upon successful completion of all build steps.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "images": { - Type: schema.TypeList, - Optional: true, - Description: `A list of images to be pushed upon the successful completion of all build steps. -The images will be pushed using the builder service account's credentials. -The digests of the pushed images will be stored in the Build resource's results field. -If any of the images fail to be pushed, the build is marked FAILURE.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "objects": { - Type: schema.TypeList, - Optional: true, - Description: `A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. -Files in the workspace matching specified paths globs will be uploaded to the -Cloud Storage location using the builder service account's credentials. -The location and generation of the uploaded objects will be stored in the Build resource's results field. -If any objects fail to be pushed, the build is marked FAILURE.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Optional: true, - Description: `Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". -Files in the workspace matching any path pattern will be uploaded to Cloud Storage with -this location as a prefix.`, - }, - "paths": { - Type: schema.TypeList, - Optional: true, - Description: `Path globs used to match files in the build's workspace.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "timing": { - Type: schema.TypeList, - Computed: true, - Description: `Output only. Stores timing information for pushing all artifact objects.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "end_time": { - Type: schema.TypeString, - Optional: true, - Description: `End of time span. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to -nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "start_time": { - Type: schema.TypeString, - Optional: true, - Description: `Start of time span. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to -nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "available_secrets": { - Type: schema.TypeList, - Optional: true, - Description: `Secrets and secret environment variables.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_manager": { - Type: schema.TypeList, - Required: true, - Description: `Pairs a secret environment variable with a SecretVersion in Secret Manager.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "env": { - Type: schema.TypeString, - Required: true, - Description: `Environment variable name to associate with the secret. Secret environment -variables must be unique across all of a build's secrets, and must be used -by at least one build step.`, - }, - "version_name": { - Type: schema.TypeString, - Required: true, - Description: `Resource name of the SecretVersion. In format: projects/*/secrets/*/versions/*`, - }, - }, - }, - }, - }, - }, - }, - "images": { - Type: schema.TypeList, - Optional: true, - Description: `A list of images to be pushed upon the successful completion of all build steps. -The images are pushed using the builder service account's credentials. -The digests of the pushed images will be stored in the Build resource's results field. -If any of the images fail to be pushed, the build status is marked FAILURE.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "logs_bucket": { - Type: schema.TypeString, - Optional: true, - Description: `Google Cloud Storage bucket where logs should be written. -Logs file names will be of the format ${logsBucket}/log-${build_id}.txt.`, - }, - "options": { - Type: schema.TypeList, - Optional: true, - Description: `Special options for this build.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk_size_gb": { - Type: schema.TypeInt, - Optional: true, - Description: `Requested disk size for the VM that runs the build. Note that this is NOT "disk free"; -some of the space will be used by the operating system and build utilities. -Also note that this is the minimum disk size that will be allocated for the build -- -the build may run with a larger disk than requested. At present, the maximum disk size -is 1000GB; builds that request more than the maximum are rejected with an error.`, - }, - "dynamic_substitutions": { - Type: schema.TypeBool, - Optional: true, - Description: `Option to specify whether or not to apply bash style string operations to the substitutions. -NOTE this is always enabled for triggered builds and cannot be overridden in the build configuration file.`, - }, - "env": { - Type: schema.TypeList, - Optional: true, - Description: `A list of global environment variable definitions that will exist for all build steps -in this build. If a variable is defined in both globally and in a build step, -the variable will use the build step value. -The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE".`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "log_streaming_option": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF", ""}), - Description: `Option to define build log streaming behavior to Google Cloud Storage. Possible values: ["STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF"]`, - }, - "logging": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE", ""}), - Description: `Option to specify the logging mode, which determines if and where build logs are stored. Possible values: ["LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE"]`, - }, - "machine_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32", ""}), - Description: `Compute Engine machine type on which to run the build. Possible values: ["UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32"]`, - }, - "requested_verify_option": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NOT_VERIFIED", "VERIFIED", ""}), - Description: `Requested verifiability options. Possible values: ["NOT_VERIFIED", "VERIFIED"]`, - }, - "secret_env": { - Type: schema.TypeList, - Optional: true, - Description: `A list of global environment variables, which are encrypted using a Cloud Key Management -Service crypto key. These values must be specified in the build's Secret. These variables -will be available to all build steps in this build.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "source_provenance_hash": { - Type: schema.TypeList, - Optional: true, - Description: `Requested hash for SourceProvenance. Possible values: ["NONE", "SHA256", "MD5"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"NONE", "SHA256", "MD5"}), - }, - }, - "substitution_option": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MUST_MATCH", "ALLOW_LOOSE", ""}), - Description: `Option to specify behavior when there is an error in the substitution checks. -NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden -in the build configuration file. Possible values: ["MUST_MATCH", "ALLOW_LOOSE"]`, - }, - "volumes": { - Type: schema.TypeList, - Optional: true, - Description: `Global list of volumes to mount for ALL build steps -Each volume is created as an empty volume prior to starting the build process. -Upon completion of the build, volumes and their contents are discarded. Global -volume names and paths cannot conflict with the volumes defined a build step. -Using a global volume in a build with only one step is not valid as it is indicative -of a build request with an incorrect configuration.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the volume to mount. -Volume names must be unique per build step and must be valid names for Docker volumes. -Each named volume must be used by at least two build steps.`, - }, - "path": { - Type: schema.TypeString, - Optional: true, - Description: `Path at which to mount the volume. -Paths must be absolute and cannot conflict with other volume paths on the same -build step or with certain reserved volume paths.`, - }, - }, - }, - }, - "worker_pool": { - Type: schema.TypeString, - Optional: true, - Description: `Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} -This field is experimental.`, - }, - }, - }, - }, - "queue_ttl": { - Type: schema.TypeString, - Optional: true, - Description: `TTL in queue for this build. If provided and the build is enqueued longer than this value, -the build will expire and the build status will be EXPIRED. -The TTL starts ticking from createTime. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "secret": { - Type: schema.TypeList, - Optional: true, - Description: `Secrets to decrypt using Cloud Key Management Service.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - Description: `Cloud KMS key name to use to decrypt these envs.`, - }, - "secret_env": { - Type: schema.TypeMap, - Optional: true, - Description: `Map of environment variable name to its encrypted value. -Secret environment variables must be unique across all of a build's secrets, -and must be used by at least one build step. Values can be at most 64 KB in size. -There can be at most 100 secret values across all of a build's secrets.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "source": { - Type: schema.TypeList, - Optional: true, - Description: `The location of the source files to build. -One of 'storageSource' or 'repoSource' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "repo_source": { - Type: schema.TypeList, - Optional: true, - Description: `Location of the source in a Google Cloud Source Repository.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "repo_name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the Cloud Source Repository.`, - }, - "branch_name": { - Type: schema.TypeString, - Optional: true, - Description: `Regex matching branches to build. Exactly one a of branch name, tag, or commit SHA must be provided. -The syntax of the regular expressions accepted is the syntax accepted by RE2 and -described at https://github.com/google/re2/wiki/Syntax`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - "commit_sha": { - Type: schema.TypeString, - Optional: true, - Description: `Explicit commit SHA to build. Exactly one a of branch name, tag, or commit SHA must be provided.`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - "dir": { - Type: schema.TypeString, - Optional: true, - Description: `Directory, relative to the source root, in which to run the build. -This must be a relative path. If a step's dir is specified and is an absolute path, -this value is ignored for that step's execution.`, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, - }, - "project_id": { - Type: schema.TypeString, - Optional: true, - Description: `ID of the project that owns the Cloud Source Repository. -If omitted, the project ID requesting the build is assumed.`, - }, - "substitutions": { - Type: schema.TypeMap, - Optional: true, - Description: `Substitutions to use in a triggered build. Should only be used with triggers.run`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "tag_name": { - Type: schema.TypeString, - Optional: true, - Description: `Regex matching tags to build. Exactly one a of branch name, tag, or commit SHA must be provided. -The syntax of the regular expressions accepted is the syntax accepted by RE2 and -described at https://github.com/google/re2/wiki/Syntax`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - }, - }, - }, - "storage_source": { - Type: schema.TypeList, - Optional: true, - Description: `Location of the source in an archive file in Google Cloud Storage.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: `Google Cloud Storage bucket containing the source.`, - }, - "object": { - Type: schema.TypeString, - Required: true, - Description: `Google Cloud Storage object containing the source. -This object must be a gzipped archive file (.tar.gz) containing source to build.`, - }, - "generation": { - Type: schema.TypeString, - Optional: true, - Description: `Google Cloud Storage generation for the object. -If the generation is omitted, the latest generation will be used`, - }, - }, - }, - }, - }, - }, - }, - "substitutions": { - Type: schema.TypeMap, - Optional: true, - Description: `Substitutions data for Build resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "tags": { - Type: schema.TypeList, - Optional: true, - Description: `Tags for annotation of a Build. These are not docker tags.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "timeout": { - Type: schema.TypeString, - Optional: true, - Description: `Amount of time that this build should be allowed to run, to second granularity. -If this amount of time elapses, work on the build will cease and the build status will be TIMEOUT. -This timeout must be equal to or greater than the sum of the timeouts for build steps within the build. -The expected format is the number of seconds followed by s. -Default time is ten minutes (600s).`, - Default: "600s", - }, - }, - }, - ExactlyOneOf: []string{"filename", "build", "git_file_source"}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Human-readable description of the trigger.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether the trigger is disabled or not. If true, the trigger will never result in a build.`, - }, - "filename": { - Type: schema.TypeString, - Optional: true, - Description: `Path, from the source root, to a file whose contents is used for the template. -Either a filename or build template must be provided. Set this only when using trigger_template or github. -When using Pub/Sub, Webhook or Manual set the file name using git_file_source instead.`, - ExactlyOneOf: []string{"filename", "build", "git_file_source"}, - }, - "filter": { - Type: schema.TypeString, - Optional: true, - Description: `A Common Expression Language string. Used only with Pub/Sub and Webhook.`, - }, - "git_file_source": { - Type: schema.TypeList, - Optional: true, - Description: `The file source describing the local or remote Build template.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: `The path of the file, with the repo root as the root of the path.`, - }, - "repo_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"}), - Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). -Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"]`, - }, - "revision": { - Type: schema.TypeString, - Optional: true, - Description: `The branch, tag, arbitrary ref, or SHA version of the repo to use when resolving the -filename (optional). This field respects the same syntax/resolution as described here: https://git-scm.com/docs/gitrevisions -If unspecified, the revision from which the trigger invocation originated is assumed to be the revision from which to read the specified path.`, - }, - "uri": { - Type: schema.TypeString, - Optional: true, - Description: `The URI of the repo (optional). If unspecified, the repo from which the trigger -invocation originated is assumed to be the repo from which to read the specified path.`, - }, - }, - }, - ExactlyOneOf: []string{"filename", "git_file_source", "build"}, - }, - "github": { - Type: schema.TypeList, - Optional: true, - Description: `Describes the configuration of a trigger that creates a build whenever a GitHub event is received. -One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the repository. For example: The name for -https://github.com/googlecloudplatform/cloud-builders is "cloud-builders".`, - }, - "owner": { - Type: schema.TypeString, - Optional: true, - Description: `Owner of the repository. For example: The owner for -https://github.com/googlecloudplatform/cloud-builders is "googlecloudplatform".`, - }, - "pull_request": { - Type: schema.TypeList, - Optional: true, - Description: `filter to match changes in pull requests. Specify only one of 'pull_request' or 'push'.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "branch": { - Type: schema.TypeString, - Required: true, - Description: `Regex of branches to match.`, - }, - "comment_control": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}), - Description: `Whether to block builds on a "/gcbrun" comment from a repository owner or collaborator. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, branches that do NOT match the git_ref will trigger a build.`, - }, - }, - }, - ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, - }, - "push": { - Type: schema.TypeList, - Optional: true, - Description: `filter to match changes in refs, like branches or tags. Specify only one of 'pull_request' or 'push'.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "branch": { - Type: schema.TypeString, - Optional: true, - Description: `Regex of branches to match. Specify only one of branch or tag.`, - ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `When true, only trigger a build if the revision regex does NOT match the git_ref regex.`, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - Description: `Regex of tags to match. Specify only one of branch or tag.`, - ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, - }, - }, - }, - ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "ignored_files": { - Type: schema.TypeList, - Optional: true, - Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match -extended with support for '**'. -If ignoredFiles and changed files are both empty, then they are not -used to determine whether or not to trigger a build. -If ignoredFiles is not empty, then we ignore any files that match any -of the ignored_file globs. If the change has no files that are outside -of the ignoredFiles globs, then we do not trigger a build.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "include_build_logs": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS", ""}), - Description: `Build logs will be sent back to GitHub as part of the checkrun -result. Values can be INCLUDE_BUILD_LOGS_UNSPECIFIED or -INCLUDE_BUILD_LOGS_WITH_STATUS Possible values: ["INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS"]`, - }, - "included_files": { - Type: schema.TypeList, - Optional: true, - Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match -extended with support for '**'. -If any of the files altered in the commit pass the ignoredFiles filter -and includedFiles is empty, then as far as this filter is concerned, we -should trigger the build. -If any of the files altered in the commit pass the ignoredFiles filter -and includedFiles is not empty, then we make sure that at least one of -those files matches a includedFiles glob. If not, then we do not trigger -a build.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Name of the trigger. Must be unique within the project.`, - }, - "pubsub_config": { - Type: schema.TypeList, - Optional: true, - Description: `PubsubConfig describes the configuration of a trigger that creates -a build whenever a Pub/Sub message is published. -One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topic": { - Type: schema.TypeString, - Required: true, - Description: `The name of the topic from which this subscription is receiving messages.`, - }, - "service_account_email": { - Type: schema.TypeString, - Optional: true, - Description: `Service account that will make the push request.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Potential issues with the underlying Pub/Sub subscription configuration. -Only populated on get requests.`, - }, - "subscription": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Name of the subscription.`, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "service_account": { - Type: schema.TypeString, - Optional: true, - Description: `The service account used for all user-controlled operations including -triggers.patch, triggers.run, builds.create, and builds.cancel. -If no service account is set, then the standard Cloud Build service account -([PROJECT_NUM]@system.gserviceaccount.com) will be used instead. -Format: projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}`, - }, - "source_to_build": { - Type: schema.TypeList, - Optional: true, - Description: `The repo and ref of the repository from which to build. -This field is used only for those triggers that do not respond to SCM events. -Triggers that respond to such events build source at whatever commit caused the event. -This field is currently only used by Webhook, Pub/Sub, Manual, and Cron triggers. -One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ref": { - Type: schema.TypeString, - Required: true, - Description: `The branch or tag to use. Must start with "refs/" (required).`, - }, - "repo_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"}), - Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). -Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"]`, - }, - "uri": { - Type: schema.TypeString, - Required: true, - Description: `The URI of the repo (required).`, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "substitutions": { - Type: schema.TypeMap, - Optional: true, - Description: `Substitutions data for Build resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "tags": { - Type: schema.TypeList, - Optional: true, - Description: `Tags for annotation of a BuildTrigger`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "trigger_template": { - Type: schema.TypeList, - Optional: true, - Description: `Template describing the types of source changes to trigger a build. -Branch and tag names in trigger templates are interpreted as regular -expressions. Any branch or tag change that matches that regular -expression will trigger a build. -One of 'trigger_template', 'github', 'pubsub_config', 'webhook_config' or 'source_to_build' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "branch_name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. -This field is a regular expression.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - "commit_sha": { - Type: schema.TypeString, - Optional: true, - Description: `Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - "dir": { - Type: schema.TypeString, - Optional: true, - Description: `Directory, relative to the source root, in which to run the build. -This must be a relative path. If a step's dir is specified and -is an absolute path, this value is ignored for that step's -execution.`, - }, - "invert_regex": { - Type: schema.TypeBool, - Optional: true, - Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, - }, - "project_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `ID of the project that owns the Cloud Source Repository. If -omitted, the project ID requesting the build is assumed.`, - }, - "repo_name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the Cloud Source Repository. If omitted, the name "default" is assumed.`, - Default: "default", - }, - "tag_name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. -This field is a regular expression.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "webhook_config": { - Type: schema.TypeList, - Optional: true, - Description: `WebhookConfig describes the configuration of a trigger that creates -a build whenever a webhook is sent to a trigger's webhook URL. -One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret": { - Type: schema.TypeString, - Required: true, - Description: `Resource name for the secret required as a URL parameter.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Potential issues with the underlying Pub/Sub subscription configuration. -Only populated on get requests.`, - }, - }, - }, - AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time when the trigger was created.`, - }, - "trigger_id": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier for the trigger.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_clouddeploy_delivery_pipeline.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_clouddeploy_delivery_pipeline.go deleted file mode 100644 index 384a05e9c6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_clouddeploy_delivery_pipeline.go +++ /dev/null @@ -1,606 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" -) - -func ResourceClouddeployDeliveryPipeline() *schema.Resource { - return &schema.Resource{ - Create: resourceClouddeployDeliveryPipelineCreate, - Read: resourceClouddeployDeliveryPipelineRead, - Update: resourceClouddeployDeliveryPipelineUpdate, - Delete: resourceClouddeployDeliveryPipelineDelete, - - Importer: &schema.ResourceImporter{ - State: resourceClouddeployDeliveryPipelineImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The location for the resource", - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the `DeliveryPipeline`. Format is [a-z][a-z0-9\\-]{0,62}.", - }, - - "annotations": { - Type: schema.TypeMap, - Optional: true, - Description: "User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - Description: "Description of the `DeliveryPipeline`. Max length is 255 characters.", - }, - - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: "Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project for the resource", - }, - - "serial_pipeline": { - Type: schema.TypeList, - Optional: true, - Description: "SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.", - MaxItems: 1, - Elem: ClouddeployDeliveryPipelineSerialPipelineSchema(), - }, - - "suspended": { - Type: schema.TypeBool, - Optional: true, - Description: "When suspended, no new releases or rollouts can be created, but in-progress ones will complete.", - }, - - "condition": { - Type: schema.TypeList, - Computed: true, - Description: "Output only. Information around the state of the Delivery Pipeline.", - Elem: ClouddeployDeliveryPipelineConditionSchema(), - }, - - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. Time at which the pipeline was created.", - }, - - "etag": { - Type: schema.TypeString, - Computed: true, - Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", - }, - - "uid": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. Unique identifier of the `DeliveryPipeline`.", - }, - - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. Most recent time at which the pipeline was updated.", - }, - }, - } -} - -func ClouddeployDeliveryPipelineSerialPipelineSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "stages": { - Type: schema.TypeList, - Optional: true, - Description: "Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow.", - Elem: ClouddeployDeliveryPipelineSerialPipelineStagesSchema(), - }, - }, - } -} - -func ClouddeployDeliveryPipelineSerialPipelineStagesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "profiles": { - Type: schema.TypeList, - Optional: true, - Description: "Skaffold profiles to use when rendering the manifest for this stage's `Target`.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "target_id": { - Type: schema.TypeString, - Optional: true, - Description: "The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`.", - }, - }, - } -} - -func ClouddeployDeliveryPipelineConditionSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pipeline_ready_condition": { - Type: schema.TypeList, - Computed: true, - Description: "Details around the Pipeline's overall status.", - Elem: ClouddeployDeliveryPipelineConditionPipelineReadyConditionSchema(), - }, - - "targets_present_condition": { - Type: schema.TypeList, - Computed: true, - Description: "Details around targets enumerated in the pipeline.", - Elem: ClouddeployDeliveryPipelineConditionTargetsPresentConditionSchema(), - }, - }, - } -} - -func ClouddeployDeliveryPipelineConditionPipelineReadyConditionSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeBool, - Computed: true, - Description: "True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline.", - }, - - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: "Last time the condition was updated.", - }, - }, - } -} - -func ClouddeployDeliveryPipelineConditionTargetsPresentConditionSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "missing_targets": { - Type: schema.TypeList, - Computed: true, - Description: "The list of Target names that are missing. For example, projects/{project_id}/locations/{location_name}/targets/{target_name}.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "status": { - Type: schema.TypeBool, - Computed: true, - Description: "True if there aren't any missing Targets.", - }, - - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: "Last time the condition was updated.", - }, - }, - } -} - -func resourceClouddeployDeliveryPipelineCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &clouddeploy.DeliveryPipeline{ - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), - Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: dcl.String(project), - SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), - Suspended: dcl.Bool(d.Get("suspended").(bool)), - } - - id, err := obj.ID() - if err != nil { - return fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error creating DeliveryPipeline: %s", err) - } - - log.Printf("[DEBUG] Finished creating DeliveryPipeline %q: %#v", d.Id(), res) - - return resourceClouddeployDeliveryPipelineRead(d, meta) -} - -func resourceClouddeployDeliveryPipelineRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &clouddeploy.DeliveryPipeline{ - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), - Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: dcl.String(project), - SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), - Suspended: dcl.Bool(d.Get("suspended").(bool)), - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetDeliveryPipeline(context.Background(), obj) - if err != nil { - resourceName := fmt.Sprintf("ClouddeployDeliveryPipeline %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("location", res.Location); err != nil { - return fmt.Errorf("error setting location in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("annotations", res.Annotations); err != nil { - return fmt.Errorf("error setting annotations in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("serial_pipeline", flattenClouddeployDeliveryPipelineSerialPipeline(res.SerialPipeline)); err != nil { - return fmt.Errorf("error setting serial_pipeline in state: %s", err) - } - if err = d.Set("suspended", res.Suspended); err != nil { - return fmt.Errorf("error setting suspended in state: %s", err) - } - if err = d.Set("condition", flattenClouddeployDeliveryPipelineCondition(res.Condition)); err != nil { - return fmt.Errorf("error setting condition in state: %s", err) - } - if err = d.Set("create_time", res.CreateTime); err != nil { - return fmt.Errorf("error setting create_time in state: %s", err) - } - if err = d.Set("etag", res.Etag); err != nil { - return fmt.Errorf("error setting etag in state: %s", err) - } - if err = d.Set("uid", res.Uid); err != nil { - return fmt.Errorf("error setting uid in state: %s", err) - } - if err = d.Set("update_time", res.UpdateTime); err != nil { - return fmt.Errorf("error setting update_time in state: %s", err) - } - - return nil -} -func resourceClouddeployDeliveryPipelineUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &clouddeploy.DeliveryPipeline{ - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), - Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: dcl.String(project), - SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), - Suspended: dcl.Bool(d.Get("suspended").(bool)), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating DeliveryPipeline: %s", err) - } - - log.Printf("[DEBUG] Finished creating DeliveryPipeline %q: %#v", d.Id(), res) - - return resourceClouddeployDeliveryPipelineRead(d, meta) -} - -func resourceClouddeployDeliveryPipelineDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &clouddeploy.DeliveryPipeline{ - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), - Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: dcl.String(project), - SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), - Suspended: dcl.Bool(d.Get("suspended").(bool)), - } - - log.Printf("[DEBUG] Deleting DeliveryPipeline %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - if err := client.DeleteDeliveryPipeline(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting DeliveryPipeline: %s", err) - } - - log.Printf("[DEBUG] Finished deleting DeliveryPipeline %q", d.Id()) - return nil -} - -func resourceClouddeployDeliveryPipelineImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/deliveryPipelines/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func expandClouddeployDeliveryPipelineSerialPipeline(o interface{}) *clouddeploy.DeliveryPipelineSerialPipeline { - if o == nil { - return clouddeploy.EmptyDeliveryPipelineSerialPipeline - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return clouddeploy.EmptyDeliveryPipelineSerialPipeline - } - obj := objArr[0].(map[string]interface{}) - return &clouddeploy.DeliveryPipelineSerialPipeline{ - Stages: expandClouddeployDeliveryPipelineSerialPipelineStagesArray(obj["stages"]), - } -} - -func flattenClouddeployDeliveryPipelineSerialPipeline(obj *clouddeploy.DeliveryPipelineSerialPipeline) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "stages": flattenClouddeployDeliveryPipelineSerialPipelineStagesArray(obj.Stages), - } - - return []interface{}{transformed} - -} -func expandClouddeployDeliveryPipelineSerialPipelineStagesArray(o interface{}) []clouddeploy.DeliveryPipelineSerialPipelineStages { - if o == nil { - return make([]clouddeploy.DeliveryPipelineSerialPipelineStages, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]clouddeploy.DeliveryPipelineSerialPipelineStages, 0) - } - - items := make([]clouddeploy.DeliveryPipelineSerialPipelineStages, 0, len(objs)) - for _, item := range objs { - i := expandClouddeployDeliveryPipelineSerialPipelineStages(item) - items = append(items, *i) - } - - return items -} - -func expandClouddeployDeliveryPipelineSerialPipelineStages(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStages { - if o == nil { - return clouddeploy.EmptyDeliveryPipelineSerialPipelineStages - } - - obj := o.(map[string]interface{}) - return &clouddeploy.DeliveryPipelineSerialPipelineStages{ - Profiles: expandStringArray(obj["profiles"]), - TargetId: dcl.String(obj["target_id"].(string)), - } -} - -func flattenClouddeployDeliveryPipelineSerialPipelineStagesArray(objs []clouddeploy.DeliveryPipelineSerialPipelineStages) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenClouddeployDeliveryPipelineSerialPipelineStages(&item) - items = append(items, i) - } - - return items -} - -func flattenClouddeployDeliveryPipelineSerialPipelineStages(obj *clouddeploy.DeliveryPipelineSerialPipelineStages) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "profiles": obj.Profiles, - "target_id": obj.TargetId, - } - - return transformed - -} - -func flattenClouddeployDeliveryPipelineCondition(obj *clouddeploy.DeliveryPipelineCondition) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "pipeline_ready_condition": flattenClouddeployDeliveryPipelineConditionPipelineReadyCondition(obj.PipelineReadyCondition), - "targets_present_condition": flattenClouddeployDeliveryPipelineConditionTargetsPresentCondition(obj.TargetsPresentCondition), - } - - return []interface{}{transformed} - -} - -func flattenClouddeployDeliveryPipelineConditionPipelineReadyCondition(obj *clouddeploy.DeliveryPipelineConditionPipelineReadyCondition) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "status": obj.Status, - "update_time": obj.UpdateTime, - } - - return []interface{}{transformed} - -} - -func flattenClouddeployDeliveryPipelineConditionTargetsPresentCondition(obj *clouddeploy.DeliveryPipelineConditionTargetsPresentCondition) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "missing_targets": obj.MissingTargets, - "status": obj.Status, - "update_time": obj.UpdateTime, - } - - return []interface{}{transformed} - -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudiot_device.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudiot_device.go deleted file mode 100644 index ae5ee348cd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudiot_device.go +++ /dev/null @@ -1,922 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceCloudIotDevice() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudIotDeviceCreate, - Read: resourceCloudIotDeviceRead, - Update: resourceCloudIotDeviceUpdate, - Delete: resourceCloudIotDeviceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudIotDeviceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique name for the resource.`, - }, - "registry": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the device registry where this device should be created.`, - }, - "blocked": { - Type: schema.TypeBool, - Optional: true, - Description: `If a device is blocked, connections or requests from this device will fail.`, - }, - "credentials": { - Type: schema.TypeList, - Optional: true, - Description: `The credentials used to authenticate this device.`, - MaxItems: 3, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "public_key": { - Type: schema.TypeList, - Required: true, - Description: `A public key used to verify the signature of JSON Web Tokens (JWTs).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "format": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"RSA_PEM", "RSA_X509_PEM", "ES256_PEM", "ES256_X509_PEM"}), - Description: `The format of the key. Possible values: ["RSA_PEM", "RSA_X509_PEM", "ES256_PEM", "ES256_X509_PEM"]`, - }, - "key": { - Type: schema.TypeString, - Required: true, - Description: `The key data.`, - }, - }, - }, - }, - "expiration_time": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The time at which this credential becomes invalid.`, - }, - }, - }, - }, - "gateway_config": { - Type: schema.TypeList, - Optional: true, - Description: `Gateway-related configuration and state.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "gateway_auth_method": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ASSOCIATION_ONLY", "DEVICE_AUTH_TOKEN_ONLY", "ASSOCIATION_AND_DEVICE_AUTH_TOKEN", ""}), - Description: `Indicates whether the device is a gateway. Possible values: ["ASSOCIATION_ONLY", "DEVICE_AUTH_TOKEN_ONLY", "ASSOCIATION_AND_DEVICE_AUTH_TOKEN"]`, - }, - "gateway_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"GATEWAY", "NON_GATEWAY", ""}), - Description: `Indicates whether the device is a gateway. Default value: "NON_GATEWAY" Possible values: ["GATEWAY", "NON_GATEWAY"]`, - Default: "NON_GATEWAY", - }, - "last_accessed_gateway_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID of the gateway the device accessed most recently.`, - }, - "last_accessed_gateway_time": { - Type: schema.TypeString, - Computed: true, - Description: `The most recent time at which the device accessed the gateway specified in last_accessed_gateway.`, - }, - }, - }, - }, - "log_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "ERROR", "INFO", "DEBUG", ""}), - Description: `The logging verbosity for device activity. Possible values: ["NONE", "ERROR", "INFO", "DEBUG"]`, - }, - "metadata": { - Type: schema.TypeMap, - Optional: true, - Description: `The metadata key-value pairs assigned to the device.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "config": { - Type: schema.TypeList, - Computed: true, - Description: `The most recent device configuration, which is eventually sent from Cloud IoT Core to the device.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "binary_data": { - Type: schema.TypeString, - Optional: true, - Description: `The device configuration data.`, - }, - "cloud_update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time at which this configuration version was updated in Cloud IoT Core.`, - }, - "device_ack_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time at which Cloud IoT Core received the acknowledgment from the device, -indicating that the device has received this configuration version.`, - }, - "version": { - Type: schema.TypeString, - Computed: true, - Description: `The version of this update.`, - }, - }, - }, - }, - "last_config_ack_time": { - Type: schema.TypeString, - Computed: true, - Description: `The last time a cloud-to-device config version acknowledgment was received from the device.`, - }, - "last_config_send_time": { - Type: schema.TypeString, - Computed: true, - Description: `The last time a cloud-to-device config version was sent to the device.`, - }, - "last_error_status": { - Type: schema.TypeList, - Computed: true, - Description: `The error message of the most recent error, such as a failure to publish to Cloud Pub/Sub.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "details": { - Type: schema.TypeList, - Optional: true, - Description: `A list of messages that carry the error details.`, - Elem: &schema.Schema{ - Type: schema.TypeMap, - }, - }, - "message": { - Type: schema.TypeString, - Optional: true, - Description: `A developer-facing error message, which should be in English.`, - }, - "number": { - Type: schema.TypeInt, - Optional: true, - Description: `The status code, which should be an enum value of google.rpc.Code.`, - }, - }, - }, - }, - "last_error_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time the most recent error occurred, such as a failure to publish to Cloud Pub/Sub.`, - }, - "last_event_time": { - Type: schema.TypeString, - Computed: true, - Description: `The last time a telemetry event was received.`, - }, - "last_heartbeat_time": { - Type: schema.TypeString, - Computed: true, - Description: `The last time an MQTT PINGREQ was received.`, - }, - "last_state_time": { - Type: schema.TypeString, - Computed: true, - Description: `The last time a state event was received.`, - }, - "num_id": { - Type: schema.TypeString, - Computed: true, - Description: `A server-defined unique numeric ID for the device. -This is a more compact way to identify devices, and it is globally unique.`, - }, - "state": { - Type: schema.TypeList, - Computed: true, - Description: `The state most recently received from the device.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "binary_data": { - Type: schema.TypeString, - Optional: true, - Description: `The device state data.`, - }, - "update_time": { - Type: schema.TypeString, - Optional: true, - Description: `The time at which this state version was updated in Cloud IoT Core.`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudIotDeviceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandCloudIotDeviceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - credentialsProp, err := expandCloudIotDeviceCredentials(d.Get("credentials"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("credentials"); !isEmptyValue(reflect.ValueOf(credentialsProp)) && (ok || !reflect.DeepEqual(v, credentialsProp)) { - obj["credentials"] = credentialsProp - } - blockedProp, err := expandCloudIotDeviceBlocked(d.Get("blocked"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("blocked"); !isEmptyValue(reflect.ValueOf(blockedProp)) && (ok || !reflect.DeepEqual(v, blockedProp)) { - obj["blocked"] = blockedProp - } - logLevelProp, err := expandCloudIotDeviceLogLevel(d.Get("log_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_level"); !isEmptyValue(reflect.ValueOf(logLevelProp)) && (ok || !reflect.DeepEqual(v, logLevelProp)) { - obj["logLevel"] = logLevelProp - } - metadataProp, err := expandCloudIotDeviceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - gatewayConfigProp, err := expandCloudIotDeviceGatewayConfig(d.Get("gateway_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gateway_config"); !isEmptyValue(reflect.ValueOf(gatewayConfigProp)) && (ok || !reflect.DeepEqual(v, gatewayConfigProp)) { - obj["gatewayConfig"] = gatewayConfigProp - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Device: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Device: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{registry}}/devices/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Device %q: %#v", d.Id(), res) - - return resourceCloudIotDeviceRead(d, meta) -} - -func resourceCloudIotDeviceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudIotDevice %q", d.Id())) - } - - if err := d.Set("name", flattenCloudIotDeviceName(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("num_id", flattenCloudIotDeviceNumId(res["numId"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("credentials", flattenCloudIotDeviceCredentials(res["credentials"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_heartbeat_time", flattenCloudIotDeviceLastHeartbeatTime(res["lastHeartbeatTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_event_time", flattenCloudIotDeviceLastEventTime(res["lastEventTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_state_time", flattenCloudIotDeviceLastStateTime(res["lastStateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_config_ack_time", flattenCloudIotDeviceLastConfigAckTime(res["lastConfigAckTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_config_send_time", flattenCloudIotDeviceLastConfigSendTime(res["lastConfigSendTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("blocked", flattenCloudIotDeviceBlocked(res["blocked"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_error_time", flattenCloudIotDeviceLastErrorTime(res["lastErrorTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_error_status", flattenCloudIotDeviceLastErrorStatus(res["lastErrorStatus"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("config", flattenCloudIotDeviceConfig(res["config"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("state", flattenCloudIotDeviceState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("log_level", flattenCloudIotDeviceLogLevel(res["logLevel"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("metadata", flattenCloudIotDeviceMetadata(res["metadata"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("gateway_config", flattenCloudIotDeviceGatewayConfig(res["gatewayConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Device: %s", err) - } - - return nil -} - -func resourceCloudIotDeviceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - credentialsProp, err := expandCloudIotDeviceCredentials(d.Get("credentials"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("credentials"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, credentialsProp)) { - obj["credentials"] = credentialsProp - } - blockedProp, err := expandCloudIotDeviceBlocked(d.Get("blocked"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("blocked"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, blockedProp)) { - obj["blocked"] = blockedProp - } - logLevelProp, err := expandCloudIotDeviceLogLevel(d.Get("log_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_level"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logLevelProp)) { - obj["logLevel"] = logLevelProp - } - metadataProp, err := expandCloudIotDeviceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - gatewayConfigProp, err := expandCloudIotDeviceGatewayConfig(d.Get("gateway_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gateway_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gatewayConfigProp)) { - obj["gatewayConfig"] = gatewayConfigProp - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Device %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("credentials") { - updateMask = append(updateMask, "credentials") - } - - if d.HasChange("blocked") { - updateMask = append(updateMask, "blocked") - } - - if d.HasChange("log_level") { - updateMask = append(updateMask, "logLevel") - } - - if d.HasChange("metadata") { - updateMask = append(updateMask, "metadata") - } - - if d.HasChange("gateway_config") { - updateMask = append(updateMask, "gateway_config.gateway_auth_method") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Device %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Device %q: %#v", d.Id(), res) - } - - return resourceCloudIotDeviceRead(d, meta) -} - -func resourceCloudIotDeviceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Device %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Device") - } - - log.Printf("[DEBUG] Finished deleting Device %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIotDeviceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)/devices/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{registry}}/devices/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudIotDeviceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceNumId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "expiration_time": flattenCloudIotDeviceCredentialsExpirationTime(original["expirationTime"], d, config), - "public_key": flattenCloudIotDeviceCredentialsPublicKey(original["publicKey"], d, config), - }) - } - return transformed -} -func flattenCloudIotDeviceCredentialsExpirationTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceCredentialsPublicKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["format"] = - flattenCloudIotDeviceCredentialsPublicKeyFormat(original["format"], d, config) - transformed["key"] = - flattenCloudIotDeviceCredentialsPublicKeyKey(original["key"], d, config) - return []interface{}{transformed} -} -func flattenCloudIotDeviceCredentialsPublicKeyFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceCredentialsPublicKeyKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastHeartbeatTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastEventTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastStateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastConfigAckTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastConfigSendTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceBlocked(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastErrorTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastErrorStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["number"] = - flattenCloudIotDeviceLastErrorStatusNumber(original["number"], d, config) - transformed["message"] = - flattenCloudIotDeviceLastErrorStatusMessage(original["message"], d, config) - transformed["details"] = - flattenCloudIotDeviceLastErrorStatusDetails(original["details"], d, config) - return []interface{}{transformed} -} -func flattenCloudIotDeviceLastErrorStatusNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenCloudIotDeviceLastErrorStatusMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastErrorStatusDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["version"] = - flattenCloudIotDeviceConfigVersion(original["version"], d, config) - transformed["cloud_update_time"] = - flattenCloudIotDeviceConfigCloudUpdateTime(original["cloudUpdateTime"], d, config) - transformed["device_ack_time"] = - flattenCloudIotDeviceConfigDeviceAckTime(original["deviceAckTime"], d, config) - transformed["binary_data"] = - flattenCloudIotDeviceConfigBinaryData(original["binaryData"], d, config) - return []interface{}{transformed} -} -func flattenCloudIotDeviceConfigVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceConfigCloudUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceConfigDeviceAckTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceConfigBinaryData(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["update_time"] = - flattenCloudIotDeviceStateUpdateTime(original["updateTime"], d, config) - transformed["binary_data"] = - flattenCloudIotDeviceStateBinaryData(original["binaryData"], d, config) - return []interface{}{transformed} -} -func flattenCloudIotDeviceStateUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceStateBinaryData(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLogLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceGatewayConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["gateway_type"] = - flattenCloudIotDeviceGatewayConfigGatewayType(original["gatewayType"], d, config) - transformed["gateway_auth_method"] = - flattenCloudIotDeviceGatewayConfigGatewayAuthMethod(original["gatewayAuthMethod"], d, config) - transformed["last_accessed_gateway_id"] = - flattenCloudIotDeviceGatewayConfigLastAccessedGatewayId(original["lastAccessedGatewayId"], d, config) - transformed["last_accessed_gateway_time"] = - flattenCloudIotDeviceGatewayConfigLastAccessedGatewayTime(original["lastAccessedGatewayTime"], d, config) - return []interface{}{transformed} -} -func flattenCloudIotDeviceGatewayConfigGatewayType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceGatewayConfigGatewayAuthMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceGatewayConfigLastAccessedGatewayId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceGatewayConfigLastAccessedGatewayTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIotDeviceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpirationTime, err := expandCloudIotDeviceCredentialsExpirationTime(original["expiration_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpirationTime); val.IsValid() && !isEmptyValue(val) { - transformed["expirationTime"] = transformedExpirationTime - } - - transformedPublicKey, err := expandCloudIotDeviceCredentialsPublicKey(original["public_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPublicKey); val.IsValid() && !isEmptyValue(val) { - transformed["publicKey"] = transformedPublicKey - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudIotDeviceCredentialsExpirationTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceCredentialsPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFormat, err := expandCloudIotDeviceCredentialsPublicKeyFormat(original["format"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFormat); val.IsValid() && !isEmptyValue(val) { - transformed["format"] = transformedFormat - } - - transformedKey, err := expandCloudIotDeviceCredentialsPublicKeyKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandCloudIotDeviceCredentialsPublicKeyFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceCredentialsPublicKeyKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceBlocked(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceLogLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudIotDeviceGatewayConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGatewayType, err := expandCloudIotDeviceGatewayConfigGatewayType(original["gateway_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGatewayType); val.IsValid() && !isEmptyValue(val) { - transformed["gatewayType"] = transformedGatewayType - } - - transformedGatewayAuthMethod, err := expandCloudIotDeviceGatewayConfigGatewayAuthMethod(original["gateway_auth_method"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGatewayAuthMethod); val.IsValid() && !isEmptyValue(val) { - transformed["gatewayAuthMethod"] = transformedGatewayAuthMethod - } - - transformedLastAccessedGatewayId, err := expandCloudIotDeviceGatewayConfigLastAccessedGatewayId(original["last_accessed_gateway_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLastAccessedGatewayId); val.IsValid() && !isEmptyValue(val) { - transformed["lastAccessedGatewayId"] = transformedLastAccessedGatewayId - } - - transformedLastAccessedGatewayTime, err := expandCloudIotDeviceGatewayConfigLastAccessedGatewayTime(original["last_accessed_gateway_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLastAccessedGatewayTime); val.IsValid() && !isEmptyValue(val) { - transformed["lastAccessedGatewayTime"] = transformedLastAccessedGatewayTime - } - - return transformed, nil -} - -func expandCloudIotDeviceGatewayConfigGatewayType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceGatewayConfigGatewayAuthMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceGatewayConfigLastAccessedGatewayId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceGatewayConfigLastAccessedGatewayTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudiot_registry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudiot_registry.go deleted file mode 100644 index 6b64a84abe..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudiot_registry.go +++ /dev/null @@ -1,843 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func expandCloudIotDeviceRegistryHTTPConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHTTPEnabledState, err := expandCloudIotDeviceRegistryHTTPEnabledState(original["http_enabled_state"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHTTPEnabledState); val.IsValid() && !isEmptyValue(val) { - transformed["httpEnabledState"] = transformedHTTPEnabledState - } - - return transformed, nil -} - -func expandCloudIotDeviceRegistryHTTPEnabledState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryMqttConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMqttEnabledState, err := expandCloudIotDeviceRegistryMqttEnabledState(original["mqtt_enabled_state"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMqttEnabledState); val.IsValid() && !isEmptyValue(val) { - transformed["mqttEnabledState"] = transformedMqttEnabledState - } - - return transformed, nil -} - -func expandCloudIotDeviceRegistryMqttEnabledState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryStateNotificationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopicName, err := expandCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(original["pubsub_topic_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopicName"] = transformedPubsubTopicName - } - - return transformed, nil -} - -func expandCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPublicKeyCertificate, err := expandCloudIotDeviceRegistryCredentialsPublicKeyCertificate(original["public_key_certificate"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPublicKeyCertificate); val.IsValid() && !isEmptyValue(val) { - transformed["publicKeyCertificate"] = transformedPublicKeyCertificate - } - - req = append(req, transformed) - } - - return req, nil -} - -func expandCloudIotDeviceRegistryCredentialsPublicKeyCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFormat, err := expandCloudIotDeviceRegistryPublicKeyCertificateFormat(original["format"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFormat); val.IsValid() && !isEmptyValue(val) { - transformed["format"] = transformedFormat - } - - transformedCertificate, err := expandCloudIotDeviceRegistryPublicKeyCertificateCertificate(original["certificate"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCertificate); val.IsValid() && !isEmptyValue(val) { - transformed["certificate"] = transformedCertificate - } - - return transformed, nil -} - -func expandCloudIotDeviceRegistryPublicKeyCertificateFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryPublicKeyCertificateCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenCloudIotDeviceRegistryCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { - log.Printf("[DEBUG] Flattening device resitry credentials: %q", d.Id()) - if v == nil { - log.Printf("[DEBUG] The credentials array is nil: %q", d.Id()) - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - log.Printf("[DEBUG] Original credential: %+v", original) - if len(original) < 1 { - log.Printf("[DEBUG] Excluding empty credential that the API returned. %q", d.Id()) - continue - } - log.Printf("[DEBUG] Credentials array before appending a new credential: %+v", transformed) - transformed = append(transformed, map[string]interface{}{ - "public_key_certificate": flattenCloudIotDeviceRegistryCredentialsPublicKeyCertificate(original["publicKeyCertificate"], d, config), - }) - log.Printf("[DEBUG] Credentials array after appending a new credential: %+v", transformed) - } - return transformed -} - -func flattenCloudIotDeviceRegistryCredentialsPublicKeyCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - log.Printf("[DEBUG] Flattening device resitry credentials public key certificate: %q", d.Id()) - if v == nil { - log.Printf("[DEBUG] The public key certificate is nil: %q", d.Id()) - return v - } - - original := v.(map[string]interface{}) - log.Printf("[DEBUG] Original public key certificate: %+v", original) - transformed := make(map[string]interface{}) - - transformedPublicKeyCertificateFormat := flattenCloudIotDeviceRegistryPublicKeyCertificateFormat(original["format"], d, config) - transformed["format"] = transformedPublicKeyCertificateFormat - - transformedPublicKeyCertificateCertificate := flattenCloudIotDeviceRegistryPublicKeyCertificateCertificate(original["certificate"], d, config) - transformed["certificate"] = transformedPublicKeyCertificateCertificate - - log.Printf("[DEBUG] Transformed public key certificate: %+v", transformed) - - return transformed -} - -func flattenCloudIotDeviceRegistryPublicKeyCertificateFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryPublicKeyCertificateCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryHTTPConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHTTPEnabledState := flattenCloudIotDeviceRegistryHTTPConfigHTTPEnabledState(original["httpEnabledState"], d, config) - transformed["http_enabled_state"] = transformedHTTPEnabledState - - return transformed -} - -func flattenCloudIotDeviceRegistryHTTPConfigHTTPEnabledState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryMqttConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMqttEnabledState := flattenCloudIotDeviceRegistryMqttConfigMqttEnabledState(original["mqttEnabledState"], d, config) - transformed["mqtt_enabled_state"] = transformedMqttEnabledState - - return transformed -} - -func flattenCloudIotDeviceRegistryMqttConfigMqttEnabledState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryStateNotificationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - log.Printf("[DEBUG] Flattening state notification config: %+v", v) - if v == nil { - return v - } - - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopicName := flattenCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(original["pubsubTopicName"], d, config) - if val := reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !isEmptyValue(val) { - log.Printf("[DEBUG] pubsub topic name is not null: %v", d.Get("pubsub_topic_name")) - transformed["pubsub_topic_name"] = transformedPubsubTopicName - } - - return transformed -} - -func flattenCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func validateCloudIotDeviceRegistryID(v interface{}, k string) (warnings []string, errors []error) { - value := v.(string) - if strings.HasPrefix(value, "goog") { - errors = append(errors, fmt.Errorf( - "%q (%q) can not start with \"goog\"", k, value)) - } - if !regexp.MustCompile(CloudIoTIdRegex).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) doesn't match regexp %q", k, value, CloudIoTIdRegex)) - } - return -} - -func validateCloudIotDeviceRegistrySubfolderMatch(v interface{}, k string) (warnings []string, errors []error) { - value := v.(string) - if strings.HasPrefix(value, "/") { - errors = append(errors, fmt.Errorf( - "%q (%q) can not start with '/'", k, value)) - } - return -} - -func ResourceCloudIotDeviceRegistry() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudIotDeviceRegistryCreate, - Read: resourceCloudIotDeviceRegistryRead, - Update: resourceCloudIotDeviceRegistryUpdate, - Delete: resourceCloudIotDeviceRegistryDelete, - - Importer: &schema.ResourceImporter{ - State: resourceCloudIotDeviceRegistryImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateCloudIotDeviceRegistryID, - Description: `A unique name for the resource, required by device registry.`, - }, - "event_notification_configs": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `List of configurations for event notifications, such as PubSub topics -to publish device events to.`, - MaxItems: 10, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_topic_name": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `PubSub topic name to publish device events.`, - }, - "subfolder_matches": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateCloudIotDeviceRegistrySubfolderMatch, - Description: `If the subfolder name matches this string exactly, this -configuration will be used. The string must not include the -leading '/' character. If empty, all strings are matched. Empty -value can only be used for the last 'event_notification_configs' -item.`, - }, - }, - }, - }, - "log_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "ERROR", "INFO", "DEBUG", ""}), - DiffSuppressFunc: emptyOrDefaultStringSuppress("NONE"), - Description: `The default logging verbosity for activity from devices in this -registry. Specifies which events should be written to logs. For -example, if the LogLevel is ERROR, only events that terminate in -errors will be logged. LogLevel is inclusive; enabling INFO logging -will also enable ERROR logging. Default value: "NONE" Possible values: ["NONE", "ERROR", "INFO", "DEBUG"]`, - Default: "NONE", - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region in which the created registry should reside. -If it is not provided, the provider region is used.`, - }, - "state_notification_config": { - Type: schema.TypeMap, - Description: `A PubSub topic to publish device state updates.`, - Optional: true, - }, - "mqtt_config": { - Type: schema.TypeMap, - Description: `Activate or deactivate MQTT.`, - Computed: true, - Optional: true, - }, - "http_config": { - Type: schema.TypeMap, - Description: `Activate or deactivate HTTP.`, - Computed: true, - Optional: true, - }, - "credentials": { - Type: schema.TypeList, - Description: `List of public key certificates to authenticate devices.`, - Optional: true, - MaxItems: 10, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "public_key_certificate": { - Type: schema.TypeMap, - Description: `A public key certificate format and data.`, - Required: true, - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudIotDeviceRegistryCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandCloudIotDeviceRegistryName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - eventNotificationConfigsProp, err := expandCloudIotDeviceRegistryEventNotificationConfigs(d.Get("event_notification_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_notification_configs"); !isEmptyValue(reflect.ValueOf(eventNotificationConfigsProp)) && (ok || !reflect.DeepEqual(v, eventNotificationConfigsProp)) { - obj["eventNotificationConfigs"] = eventNotificationConfigsProp - } - logLevelProp, err := expandCloudIotDeviceRegistryLogLevel(d.Get("log_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_level"); !isEmptyValue(reflect.ValueOf(logLevelProp)) && (ok || !reflect.DeepEqual(v, logLevelProp)) { - obj["logLevel"] = logLevelProp - } - - obj, err = resourceCloudIotDeviceRegistryEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DeviceRegistry: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DeviceRegistry: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DeviceRegistry %q: %#v", d.Id(), res) - - return resourceCloudIotDeviceRegistryRead(d, meta) -} - -func resourceCloudIotDeviceRegistryRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudIotDeviceRegistry %q", d.Id())) - } - - res, err = resourceCloudIotDeviceRegistryDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing CloudIotDeviceRegistry because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - - if err := d.Set("name", flattenCloudIotDeviceRegistryName(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - if err := d.Set("event_notification_configs", flattenCloudIotDeviceRegistryEventNotificationConfigs(res["eventNotificationConfigs"], d, config)); err != nil { - return fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - if err := d.Set("log_level", flattenCloudIotDeviceRegistryLogLevel(res["logLevel"], d, config)); err != nil { - return fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - - return nil -} - -func resourceCloudIotDeviceRegistryUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - eventNotificationConfigsProp, err := expandCloudIotDeviceRegistryEventNotificationConfigs(d.Get("event_notification_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_notification_configs"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eventNotificationConfigsProp)) { - obj["eventNotificationConfigs"] = eventNotificationConfigsProp - } - logLevelProp, err := expandCloudIotDeviceRegistryLogLevel(d.Get("log_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_level"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logLevelProp)) { - obj["logLevel"] = logLevelProp - } - - obj, err = resourceCloudIotDeviceRegistryEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DeviceRegistry %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("event_notification_configs") { - updateMask = append(updateMask, "eventNotificationConfigs") - } - - if d.HasChange("log_level") { - updateMask = append(updateMask, "logLevel") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - log.Printf("[DEBUG] updateMask before adding extra schema entries %q: %v", d.Id(), updateMask) - - log.Printf("[DEBUG] Pre-update on state notification config: %q", d.Id()) - if d.HasChange("state_notification_config") { - log.Printf("[DEBUG] %q stateNotificationConfig.pubsubTopicName has a change. Adding it to the update mask", d.Id()) - updateMask = append(updateMask, "stateNotificationConfig.pubsubTopicName") - } - - log.Printf("[DEBUG] Pre-update on MQTT config: %q", d.Id()) - if d.HasChange("mqtt_config") { - log.Printf("[DEBUG] %q mqttConfig.mqttEnabledState has a change. Adding it to the update mask", d.Id()) - updateMask = append(updateMask, "mqttConfig.mqttEnabledState") - } - - log.Printf("[DEBUG] Pre-update on HTTP config: %q", d.Id()) - if d.HasChange("http_config") { - log.Printf("[DEBUG] %q httpConfig.httpEnabledState has a change. Adding it to the update mask", d.Id()) - updateMask = append(updateMask, "httpConfig.httpEnabledState") - } - - log.Printf("[DEBUG] Pre-update on credentials: %q", d.Id()) - if d.HasChange("credentials") { - log.Printf("[DEBUG] %q credentials has a change. Adding it to the update mask", d.Id()) - updateMask = append(updateMask, "credentials") - } - - log.Printf("[DEBUG] updateMask after adding extra schema entries %q: %v", d.Id(), updateMask) - - // Refreshing updateMask after adding extra schema entries - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - log.Printf("[DEBUG] Update URL %q: %v", d.Id(), url) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DeviceRegistry %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DeviceRegistry %q: %#v", d.Id(), res) - } - - return resourceCloudIotDeviceRegistryRead(d, meta) -} - -func resourceCloudIotDeviceRegistryDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DeviceRegistry %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DeviceRegistry") - } - - log.Printf("[DEBUG] Finished deleting DeviceRegistry %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIotDeviceRegistryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenCloudIotDeviceRegistryName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryEventNotificationConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "subfolder_matches": flattenCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(original["subfolderMatches"], d, config), - "pubsub_topic_name": flattenCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(original["pubsubTopicName"], d, config), - }) - } - return transformed -} -func flattenCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryLogLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIotDeviceRegistryName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryEventNotificationConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSubfolderMatches, err := expandCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(original["subfolder_matches"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubfolderMatches); val.IsValid() && !isEmptyValue(val) { - transformed["subfolderMatches"] = transformedSubfolderMatches - } - - transformedPubsubTopicName, err := expandCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(original["pubsub_topic_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopicName"] = transformedPubsubTopicName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryLogLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudIotDeviceRegistryEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - log.Printf("[DEBUG] Resource data before encoding extra schema entries %q: %#v", d.Id(), obj) - - log.Printf("[DEBUG] Encoding state notification config: %q", d.Id()) - stateNotificationConfigProp, err := expandCloudIotDeviceRegistryStateNotificationConfig(d.Get("state_notification_config"), d, config) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("state_notification_config"); !isEmptyValue(reflect.ValueOf(stateNotificationConfigProp)) && (ok || !reflect.DeepEqual(v, stateNotificationConfigProp)) { - log.Printf("[DEBUG] Encoding %q. Setting stateNotificationConfig: %#v", d.Id(), stateNotificationConfigProp) - obj["stateNotificationConfig"] = stateNotificationConfigProp - } - - log.Printf("[DEBUG] Encoding HTTP config: %q", d.Id()) - httpConfigProp, err := expandCloudIotDeviceRegistryHTTPConfig(d.Get("http_config"), d, config) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("http_config"); !isEmptyValue(reflect.ValueOf(httpConfigProp)) && (ok || !reflect.DeepEqual(v, httpConfigProp)) { - log.Printf("[DEBUG] Encoding %q. Setting httpConfig: %#v", d.Id(), httpConfigProp) - obj["httpConfig"] = httpConfigProp - } - - log.Printf("[DEBUG] Encoding MQTT config: %q", d.Id()) - mqttConfigProp, err := expandCloudIotDeviceRegistryMqttConfig(d.Get("mqtt_config"), d, config) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("mqtt_config"); !isEmptyValue(reflect.ValueOf(mqttConfigProp)) && (ok || !reflect.DeepEqual(v, mqttConfigProp)) { - log.Printf("[DEBUG] Encoding %q. Setting mqttConfig: %#v", d.Id(), mqttConfigProp) - obj["mqttConfig"] = mqttConfigProp - } - - log.Printf("[DEBUG] Encoding credentials: %q", d.Id()) - credentialsProp, err := expandCloudIotDeviceRegistryCredentials(d.Get("credentials"), d, config) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("credentials"); !isEmptyValue(reflect.ValueOf(credentialsProp)) && (ok || !reflect.DeepEqual(v, credentialsProp)) { - log.Printf("[DEBUG] Encoding %q. Setting credentials: %#v", d.Id(), credentialsProp) - obj["credentials"] = credentialsProp - } - - log.Printf("[DEBUG] Resource data after encoding extra schema entries %q: %#v", d.Id(), obj) - - return obj, nil -} - -func resourceCloudIotDeviceRegistryDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - log.Printf("[DEBUG] Decoding state notification config: %q", d.Id()) - log.Printf("[DEBUG] State notification config before decoding: %v", d.Get("state_notification_config")) - if err := d.Set("state_notification_config", flattenCloudIotDeviceRegistryStateNotificationConfig(res["stateNotificationConfig"], d, config)); err != nil { - return nil, fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - log.Printf("[DEBUG] State notification config after decoding: %v", d.Get("state_notification_config")) - - log.Printf("[DEBUG] Decoding HTTP config: %q", d.Id()) - log.Printf("[DEBUG] HTTP config before decoding: %v", d.Get("http_config")) - if err := d.Set("http_config", flattenCloudIotDeviceRegistryHTTPConfig(res["httpConfig"], d, config)); err != nil { - return nil, fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - log.Printf("[DEBUG] HTTP config after decoding: %v", d.Get("http_config")) - - log.Printf("[DEBUG] Decoding MQTT config: %q", d.Id()) - log.Printf("[DEBUG] MQTT config before decoding: %v", d.Get("mqtt_config")) - if err := d.Set("mqtt_config", flattenCloudIotDeviceRegistryMqttConfig(res["mqttConfig"], d, config)); err != nil { - return nil, fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - log.Printf("[DEBUG] MQTT config after decoding: %v", d.Get("mqtt_config")) - - log.Printf("[DEBUG] Decoding credentials: %q", d.Id()) - log.Printf("[DEBUG] credentials before decoding: %v", d.Get("credentials")) - if err := d.Set("credentials", flattenCloudIotDeviceRegistryCredentials(res["credentials"], d, config)); err != nil { - return nil, fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - log.Printf("[DEBUG] credentials after decoding: %v", d.Get("credentials")) - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_address.go deleted file mode 100644 index c4334f2103..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_address.go +++ /dev/null @@ -1,558 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeAddressCreate, - Read: resourceComputeAddressRead, - Delete: resourceComputeAddressDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeAddressImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), - Description: `Name of the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' -which means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The static external IP address represented by this resource. Only -IPv4 is supported. An address may only be specified for INTERNAL -address types. The IP address must be inside the specified subnetwork, -if any. Set by the API if undefined.`, - }, - "address_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"INTERNAL", "EXTERNAL", ""}), - Description: `The type of address to reserve. Default value: "EXTERNAL" Possible values: ["INTERNAL", "EXTERNAL"]`, - Default: "EXTERNAL", - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "network": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the network in which to reserve the address. This field -can only be used with INTERNAL type with the VPC_PEERING and -IPSEC_INTERCONNECT purposes.`, - }, - "network_tier": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"PREMIUM", "STANDARD", ""}), - Description: `The networking tier used for configuring this address. If this field is not -specified, it is assumed to be PREMIUM. Possible values: ["PREMIUM", "STANDARD"]`, - }, - "prefix_length": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The prefix length if the resource represents an IP range.`, - }, - "purpose": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The purpose of this resource, which can be one of the following values. - -* GCE_ENDPOINT for addresses that are used by VM instances, alias IP -ranges, load balancers, and similar resources. - -* SHARED_LOADBALANCER_VIP for an address that can be used by multiple -internal load balancers. - -* VPC_PEERING for addresses that are reserved for VPC peer networks. - -* IPSEC_INTERCONNECT for addresses created from a private IP range that -are reserved for a VLAN attachment in an HA VPN over Cloud Interconnect -configuration. These addresses are regional resources. - -* PRIVATE_SERVICE_CONNECT for a private network address that is used to -configure Private Service Connect. Only global internal addresses can use -this purpose. - - -This should only be set when using an Internal address.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created address should reside. -If it is not provided, the provider region is used.`, - }, - "subnetwork": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the subnetwork in which to reserve the address. If an IP -address is specified, it must be within the subnetwork's IP range. -This field can only be used with INTERNAL type with -GCE_ENDPOINT/DNS_RESOLVER purposes.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "users": { - Type: schema.TypeList, - Computed: true, - Description: `The URLs of the resources that are using this address.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - addressProp, err := expandComputeAddressAddress(d.Get("address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("address"); !isEmptyValue(reflect.ValueOf(addressProp)) && (ok || !reflect.DeepEqual(v, addressProp)) { - obj["address"] = addressProp - } - addressTypeProp, err := expandComputeAddressAddressType(d.Get("address_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("address_type"); !isEmptyValue(reflect.ValueOf(addressTypeProp)) && (ok || !reflect.DeepEqual(v, addressTypeProp)) { - obj["addressType"] = addressTypeProp - } - descriptionProp, err := expandComputeAddressDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeAddressName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - purposeProp, err := expandComputeAddressPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - networkTierProp, err := expandComputeAddressNetworkTier(d.Get("network_tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_tier"); !isEmptyValue(reflect.ValueOf(networkTierProp)) && (ok || !reflect.DeepEqual(v, networkTierProp)) { - obj["networkTier"] = networkTierProp - } - subnetworkProp, err := expandComputeAddressSubnetwork(d.Get("subnetwork"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnetwork"); !isEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { - obj["subnetwork"] = subnetworkProp - } - networkProp, err := expandComputeAddressNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - prefixLengthProp, err := expandComputeAddressPrefixLength(d.Get("prefix_length"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("prefix_length"); !isEmptyValue(reflect.ValueOf(prefixLengthProp)) && (ok || !reflect.DeepEqual(v, prefixLengthProp)) { - obj["prefixLength"] = prefixLengthProp - } - regionProp, err := expandComputeAddressRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Address: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Address: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Address: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/addresses/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Address", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Address: %s", err) - } - - log.Printf("[DEBUG] Finished creating Address %q: %#v", d.Id(), res) - - return resourceComputeAddressRead(d, meta) -} - -func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Address: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeAddress %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - - if err := d.Set("address", flattenComputeAddressAddress(res["address"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("address_type", flattenComputeAddressAddressType(res["addressType"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeAddressCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("description", flattenComputeAddressDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("name", flattenComputeAddressName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("purpose", flattenComputeAddressPurpose(res["purpose"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("network_tier", flattenComputeAddressNetworkTier(res["networkTier"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("subnetwork", flattenComputeAddressSubnetwork(res["subnetwork"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("users", flattenComputeAddressUsers(res["users"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("network", flattenComputeAddressNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("prefix_length", flattenComputeAddressPrefixLength(res["prefixLength"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("region", flattenComputeAddressRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Address: %s", err) - } - - return nil -} - -func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Address: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Address %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Address") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Address", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Address %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeAddressImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/addresses/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/addresses/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeAddressAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressAddressType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { - return "EXTERNAL" - } - - return v -} - -func flattenComputeAddressCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressPurpose(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressNetworkTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeAddressUsers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeAddressPrefixLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeAddressRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeAddressAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressAddressType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressNetworkTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeAddressNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeAddressPrefixLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_bucket.go deleted file mode 100644 index 266ee25522..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_bucket.go +++ /dev/null @@ -1,1124 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeBackendBucket() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeBackendBucketCreate, - Read: resourceComputeBackendBucketRead, - Update: resourceComputeBackendBucketUpdate, - Delete: resourceComputeBackendBucketDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeBackendBucketImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "bucket_name": { - Type: schema.TypeString, - Required: true, - Description: `Cloud Storage bucket name.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "cdn_policy": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Cloud CDN configuration for this Backend Bucket.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bypass_cache_on_request_headers": { - Type: schema.TypeList, - Optional: true, - Description: `Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.`, - MaxItems: 5, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "header_name": { - Type: schema.TypeString, - Optional: true, - Description: `The header field name to match on when bypassing cache. Values are case-insensitive.`, - }, - }, - }, - }, - "cache_key_policy": { - Type: schema.TypeList, - Optional: true, - Description: `The CacheKeyPolicy for this CdnPolicy.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "include_http_headers": { - Type: schema.TypeList, - Optional: true, - Description: `Allows HTTP request headers (by name) to be used in the -cache key.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.query_string_whitelist", "cdn_policy.0.cache_key_policy.0.include_http_headers"}, - }, - "query_string_whitelist": { - Type: schema.TypeList, - Optional: true, - Description: `Names of query string parameters to include in cache keys. -Default parameters are always included. '&' and '=' will -be percent encoded and not treated as delimiters.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.query_string_whitelist", "cdn_policy.0.cache_key_policy.0.include_http_headers"}, - }, - }, - }, - }, - "cache_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}), - Description: `Specifies the cache setting for all responses from this backend. -The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"]`, - }, - "client_ttl": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, - }, - "default_ttl": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the default TTL for cached content served by this origin for responses -that do not have an existing valid TTL (max-age or s-max-age).`, - }, - "max_ttl": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, - }, - "negative_caching": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects.`, - }, - "negative_caching_policy": { - Type: schema.TypeList, - Optional: true, - Description: `Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. -Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "code": { - Type: schema.TypeInt, - Optional: true, - Description: `The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 -can be specified as values, and you cannot specify a status code more than once.`, - }, - "ttl": { - Type: schema.TypeInt, - Optional: true, - Description: `The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s -(30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.`, - }, - }, - }, - }, - "request_coalescing": { - Type: schema.TypeBool, - Optional: true, - Description: `If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.`, - }, - "serve_while_stale": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache.`, - }, - "signed_url_cache_max_age_sec": { - Type: schema.TypeInt, - Optional: true, - Description: `Maximum number of seconds the response to a signed URL request will -be considered fresh. After this time period, -the response will be revalidated before being served. -When serving responses to signed URL requests, -Cloud CDN will internally behave as though -all responses from this backend had a "Cache-Control: public, -max-age=[TTL]" header, regardless of any existing Cache-Control -header. The actual headers served in responses will not be altered.`, - }, - }, - }, - }, - "compression_mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"AUTOMATIC", "DISABLED", ""}), - Description: `Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header. Possible values: ["AUTOMATIC", "DISABLED"]`, - }, - "custom_response_headers": { - Type: schema.TypeList, - Optional: true, - Description: `Headers that the HTTP/S load balancer should add to proxied responses.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional textual description of the resource; provided by the -client when the resource is created.`, - }, - "edge_security_policy": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The security policy associated with this backend bucket.`, - }, - "enable_cdn": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, enable Cloud CDN for this BackendBucket.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeBackendBucketCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - bucketNameProp, err := expandComputeBackendBucketBucketName(d.Get("bucket_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_name"); !isEmptyValue(reflect.ValueOf(bucketNameProp)) && (ok || !reflect.DeepEqual(v, bucketNameProp)) { - obj["bucketName"] = bucketNameProp - } - cdnPolicyProp, err := expandComputeBackendBucketCdnPolicy(d.Get("cdn_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(reflect.ValueOf(cdnPolicyProp)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { - obj["cdnPolicy"] = cdnPolicyProp - } - compressionModeProp, err := expandComputeBackendBucketCompressionMode(d.Get("compression_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("compression_mode"); !isEmptyValue(reflect.ValueOf(compressionModeProp)) && (ok || !reflect.DeepEqual(v, compressionModeProp)) { - obj["compressionMode"] = compressionModeProp - } - edgeSecurityPolicyProp, err := expandComputeBackendBucketEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("edge_security_policy"); !isEmptyValue(reflect.ValueOf(edgeSecurityPolicyProp)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { - obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp - } - customResponseHeadersProp, err := expandComputeBackendBucketCustomResponseHeaders(d.Get("custom_response_headers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_response_headers"); !isEmptyValue(reflect.ValueOf(customResponseHeadersProp)) && (ok || !reflect.DeepEqual(v, customResponseHeadersProp)) { - obj["customResponseHeaders"] = customResponseHeadersProp - } - descriptionProp, err := expandComputeBackendBucketDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableCdnProp, err := expandComputeBackendBucketEnableCdn(d.Get("enable_cdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(reflect.ValueOf(enableCdnProp)) && (ok || !reflect.DeepEqual(v, enableCdnProp)) { - obj["enableCdn"] = enableCdnProp - } - nameProp, err := expandComputeBackendBucketName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new BackendBucket: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendBucket: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating BackendBucket: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating BackendBucket", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create BackendBucket: %s", err) - } - - // security_policy isn't set by Create / Update - if o, n := d.GetChange("edge_security_policy"); o.(string) != n.(string) { - pol, err := ParseSecurityPolicyFieldValue(n.(string), d, config) - if err != nil { - return errwrap.Wrapf("Error parsing Backend Service security policy: {{err}}", err) - } - - spr := emptySecurityPolicyReference() - spr.SecurityPolicy = pol.RelativeLink() - op, err := config.NewComputeClient(userAgent).BackendBuckets.SetEdgeSecurityPolicy(project, obj["name"].(string), spr).Do() - if err != nil { - return errwrap.Wrapf("Error setting Backend Service security policy: {{err}}", err) - } - // This uses the create timeout for simplicity, though technically this code appears in both create and update - waitErr := ComputeOperationWaitTime(config, op, project, "Setting Backend Service Security Policy", userAgent, d.Timeout(schema.TimeoutCreate)) - if waitErr != nil { - return waitErr - } - } - - log.Printf("[DEBUG] Finished creating BackendBucket %q: %#v", d.Id(), res) - - return resourceComputeBackendBucketRead(d, meta) -} - -func resourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendBucket: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeBackendBucket %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - - if err := d.Set("bucket_name", flattenComputeBackendBucketBucketName(res["bucketName"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("cdn_policy", flattenComputeBackendBucketCdnPolicy(res["cdnPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("compression_mode", flattenComputeBackendBucketCompressionMode(res["compressionMode"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("edge_security_policy", flattenComputeBackendBucketEdgeSecurityPolicy(res["edgeSecurityPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("custom_response_headers", flattenComputeBackendBucketCustomResponseHeaders(res["customResponseHeaders"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeBackendBucketCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("description", flattenComputeBackendBucketDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("enable_cdn", flattenComputeBackendBucketEnableCdn(res["enableCdn"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("name", flattenComputeBackendBucketName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading BackendBucket: %s", err) - } - - return nil -} - -func resourceComputeBackendBucketUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendBucket: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - bucketNameProp, err := expandComputeBackendBucketBucketName(d.Get("bucket_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketNameProp)) { - obj["bucketName"] = bucketNameProp - } - cdnPolicyProp, err := expandComputeBackendBucketCdnPolicy(d.Get("cdn_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { - obj["cdnPolicy"] = cdnPolicyProp - } - compressionModeProp, err := expandComputeBackendBucketCompressionMode(d.Get("compression_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("compression_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, compressionModeProp)) { - obj["compressionMode"] = compressionModeProp - } - edgeSecurityPolicyProp, err := expandComputeBackendBucketEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("edge_security_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { - obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp - } - customResponseHeadersProp, err := expandComputeBackendBucketCustomResponseHeaders(d.Get("custom_response_headers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_response_headers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customResponseHeadersProp)) { - obj["customResponseHeaders"] = customResponseHeadersProp - } - descriptionProp, err := expandComputeBackendBucketDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableCdnProp, err := expandComputeBackendBucketEnableCdn(d.Get("enable_cdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableCdnProp)) { - obj["enableCdn"] = enableCdnProp - } - nameProp, err := expandComputeBackendBucketName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating BackendBucket %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating BackendBucket %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating BackendBucket %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating BackendBucket", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - // security_policy isn't set by Create / Update - if o, n := d.GetChange("edge_security_policy"); o.(string) != n.(string) { - pol, err := ParseSecurityPolicyFieldValue(n.(string), d, config) - if err != nil { - return errwrap.Wrapf("Error parsing Backend Service security policy: {{err}}", err) - } - - spr := emptySecurityPolicyReference() - spr.SecurityPolicy = pol.RelativeLink() - op, err := config.NewComputeClient(userAgent).BackendBuckets.SetEdgeSecurityPolicy(project, obj["name"].(string), spr).Do() - if err != nil { - return errwrap.Wrapf("Error setting Backend Service security policy: {{err}}", err) - } - // This uses the create timeout for simplicity, though technically this code appears in both create and update - waitErr := ComputeOperationWaitTime(config, op, project, "Setting Backend Service Security Policy", userAgent, d.Timeout(schema.TimeoutCreate)) - if waitErr != nil { - return waitErr - } - } - return resourceComputeBackendBucketRead(d, meta) -} - -func resourceComputeBackendBucketDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendBucket: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting BackendBucket %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BackendBucket") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting BackendBucket", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting BackendBucket %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeBackendBucketImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/backendBuckets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeBackendBucketBucketName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cache_key_policy"] = - flattenComputeBackendBucketCdnPolicyCacheKeyPolicy(original["cacheKeyPolicy"], d, config) - transformed["signed_url_cache_max_age_sec"] = - flattenComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(original["signedUrlCacheMaxAgeSec"], d, config) - transformed["default_ttl"] = - flattenComputeBackendBucketCdnPolicyDefaultTtl(original["defaultTtl"], d, config) - transformed["max_ttl"] = - flattenComputeBackendBucketCdnPolicyMaxTtl(original["maxTtl"], d, config) - transformed["client_ttl"] = - flattenComputeBackendBucketCdnPolicyClientTtl(original["clientTtl"], d, config) - transformed["negative_caching"] = - flattenComputeBackendBucketCdnPolicyNegativeCaching(original["negativeCaching"], d, config) - transformed["negative_caching_policy"] = - flattenComputeBackendBucketCdnPolicyNegativeCachingPolicy(original["negativeCachingPolicy"], d, config) - transformed["cache_mode"] = - flattenComputeBackendBucketCdnPolicyCacheMode(original["cacheMode"], d, config) - transformed["serve_while_stale"] = - flattenComputeBackendBucketCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) - transformed["request_coalescing"] = - flattenComputeBackendBucketCdnPolicyRequestCoalescing(original["requestCoalescing"], d, config) - transformed["bypass_cache_on_request_headers"] = - flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(original["bypassCacheOnRequestHeaders"], d, config) - return []interface{}{transformed} -} -func flattenComputeBackendBucketCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["query_string_whitelist"] = - flattenComputeBackendBucketCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["queryStringWhitelist"], d, config) - transformed["include_http_headers"] = - flattenComputeBackendBucketCdnPolicyCacheKeyPolicyIncludeHttpHeaders(original["includeHttpHeaders"], d, config) - return []interface{}{transformed} -} -func flattenComputeBackendBucketCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicyCacheKeyPolicyIncludeHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeBackendBucketCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeBackendBucketCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeBackendBucketCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeBackendBucketCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "code": flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(original["code"], d, config), - "ttl": flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config), - }) - } - return transformed -} -func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeBackendBucketCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeBackendBucketCdnPolicyRequestCoalescing(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["headerName"], d, config), - }) - } - return transformed -} -func flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCompressionMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketEdgeSecurityPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCustomResponseHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketEnableCdn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeBackendBucketBucketName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCacheKeyPolicy, err := expandComputeBackendBucketCdnPolicyCacheKeyPolicy(original["cache_key_policy"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["cacheKeyPolicy"] = transformedCacheKeyPolicy - } - - transformedSignedUrlCacheMaxAgeSec, err := expandComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(original["signed_url_cache_max_age_sec"], d, config) - if err != nil { - return nil, err - } else { - transformed["signedUrlCacheMaxAgeSec"] = transformedSignedUrlCacheMaxAgeSec - } - - transformedDefaultTtl, err := expandComputeBackendBucketCdnPolicyDefaultTtl(original["default_ttl"], d, config) - if err != nil { - return nil, err - } else { - transformed["defaultTtl"] = transformedDefaultTtl - } - - transformedMaxTtl, err := expandComputeBackendBucketCdnPolicyMaxTtl(original["max_ttl"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxTtl); val.IsValid() && !isEmptyValue(val) { - transformed["maxTtl"] = transformedMaxTtl - } - - transformedClientTtl, err := expandComputeBackendBucketCdnPolicyClientTtl(original["client_ttl"], d, config) - if err != nil { - return nil, err - } else { - transformed["clientTtl"] = transformedClientTtl - } - - transformedNegativeCaching, err := expandComputeBackendBucketCdnPolicyNegativeCaching(original["negative_caching"], d, config) - if err != nil { - return nil, err - } else { - transformed["negativeCaching"] = transformedNegativeCaching - } - - transformedNegativeCachingPolicy, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy - } - - transformedCacheMode, err := expandComputeBackendBucketCdnPolicyCacheMode(original["cache_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCacheMode); val.IsValid() && !isEmptyValue(val) { - transformed["cacheMode"] = transformedCacheMode - } - - transformedServeWhileStale, err := expandComputeBackendBucketCdnPolicyServeWhileStale(original["serve_while_stale"], d, config) - if err != nil { - return nil, err - } else { - transformed["serveWhileStale"] = transformedServeWhileStale - } - - transformedRequestCoalescing, err := expandComputeBackendBucketCdnPolicyRequestCoalescing(original["request_coalescing"], d, config) - if err != nil { - return nil, err - } else { - transformed["requestCoalescing"] = transformedRequestCoalescing - } - - transformedBypassCacheOnRequestHeaders, err := expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(original["bypass_cache_on_request_headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBypassCacheOnRequestHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["bypassCacheOnRequestHeaders"] = transformedBypassCacheOnRequestHeaders - } - - return transformed, nil -} - -func expandComputeBackendBucketCdnPolicyCacheKeyPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedQueryStringWhitelist, err := expandComputeBackendBucketCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["query_string_whitelist"], d, config) - if err != nil { - return nil, err - } else { - transformed["queryStringWhitelist"] = transformedQueryStringWhitelist - } - - transformedIncludeHttpHeaders, err := expandComputeBackendBucketCdnPolicyCacheKeyPolicyIncludeHttpHeaders(original["include_http_headers"], d, config) - if err != nil { - return nil, err - } else { - transformed["includeHttpHeaders"] = transformedIncludeHttpHeaders - } - - return transformed, nil -} - -func expandComputeBackendBucketCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyCacheKeyPolicyIncludeHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyDefaultTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyMaxTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyClientTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyNegativeCaching(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyNegativeCachingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCode, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(original["code"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCode); val.IsValid() && !isEmptyValue(val) { - transformed["code"] = transformedCode - } - - transformedTtl, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config) - if err != nil { - return nil, err - } else { - transformed["ttl"] = transformedTtl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyCacheMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyRequestCoalescing(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCompressionMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketEdgeSecurityPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCustomResponseHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketEnableCdn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_bucket_signed_url_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_bucket_signed_url_key.go deleted file mode 100644 index 01cf5a51da..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_bucket_signed_url_key.go +++ /dev/null @@ -1,335 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeBackendBucketSignedUrlKey() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeBackendBucketSignedUrlKeyCreate, - Read: resourceComputeBackendBucketSignedUrlKeyRead, - Delete: resourceComputeBackendBucketSignedUrlKeyDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "backend_bucket": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend bucket this signed URL key belongs.`, - }, - "key_value": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `128-bit key value used for signing the URL. The key value must be a -valid RFC 4648 Section 5 base64url encoded string.`, - Sensitive: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), - Description: `Name of the signed URL key.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeBackendBucketSignedUrlKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - keyNameProp, err := expandNestedComputeBackendBucketSignedUrlKeyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(keyNameProp)) && (ok || !reflect.DeepEqual(v, keyNameProp)) { - obj["keyName"] = keyNameProp - } - keyValueProp, err := expandNestedComputeBackendBucketSignedUrlKeyKeyValue(d.Get("key_value"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("key_value"); !isEmptyValue(reflect.ValueOf(keyValueProp)) && (ok || !reflect.DeepEqual(v, keyValueProp)) { - obj["keyValue"] = keyValueProp - } - backendBucketProp, err := expandNestedComputeBackendBucketSignedUrlKeyBackendBucket(d.Get("backend_bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_bucket"); !isEmptyValue(reflect.ValueOf(backendBucketProp)) && (ok || !reflect.DeepEqual(v, backendBucketProp)) { - obj["backendBucket"] = backendBucketProp - } - - lockName, err := replaceVars(d, config, "signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}/addSignedUrlKey") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new BackendBucketSignedUrlKey: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating BackendBucketSignedUrlKey: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{backend_bucket}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating BackendBucketSignedUrlKey", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create BackendBucketSignedUrlKey: %s", err) - } - - log.Printf("[DEBUG] Finished creating BackendBucketSignedUrlKey %q: %#v", d.Id(), res) - - return resourceComputeBackendBucketSignedUrlKeyRead(d, meta) -} - -func resourceComputeBackendBucketSignedUrlKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeBackendBucketSignedUrlKey %q", d.Id())) - } - - res, err = flattenNestedComputeBackendBucketSignedUrlKey(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeBackendBucketSignedUrlKey because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading BackendBucketSignedUrlKey: %s", err) - } - - if err := d.Set("name", flattenNestedComputeBackendBucketSignedUrlKeyName(res["keyName"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendBucketSignedUrlKey: %s", err) - } - - return nil -} - -func resourceComputeBackendBucketSignedUrlKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}/deleteSignedUrlKey?keyName={{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting BackendBucketSignedUrlKey %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BackendBucketSignedUrlKey") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting BackendBucketSignedUrlKey", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting BackendBucketSignedUrlKey %q: %#v", d.Id(), res) - return nil -} - -func flattenNestedComputeBackendBucketSignedUrlKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeBackendBucketSignedUrlKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeBackendBucketSignedUrlKeyKeyValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeBackendBucketSignedUrlKeyBackendBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("backendBuckets", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for backend_bucket: %s", err) - } - return f.RelativeLink(), nil -} - -func flattenNestedComputeBackendBucketSignedUrlKey(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["cdnPolicy"] - if !ok || v == nil { - return nil, nil - } - res = v.(map[string]interface{}) - - v, ok = res["signedUrlKeyNames"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value cdnPolicy.signedUrlKeyNames. Actual value: %v", v) - } - - _, item, err := resourceComputeBackendBucketSignedUrlKeyFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeBackendBucketSignedUrlKeyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeBackendBucketSignedUrlKeyName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeBackendBucketSignedUrlKeyName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - // List response only contains the ID - construct a response object. - item := map[string]interface{}{ - "keyName": itemRaw, - } - - itemName := flattenNestedComputeBackendBucketSignedUrlKeyName(item["keyName"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with keyName= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_service_signed_url_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_service_signed_url_key.go deleted file mode 100644 index 488f283b60..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_service_signed_url_key.go +++ /dev/null @@ -1,335 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeBackendServiceSignedUrlKey() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeBackendServiceSignedUrlKeyCreate, - Read: resourceComputeBackendServiceSignedUrlKeyRead, - Delete: resourceComputeBackendServiceSignedUrlKeyDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "backend_service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend service this signed URL key belongs.`, - }, - "key_value": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `128-bit key value used for signing the URL. The key value must be a -valid RFC 4648 Section 5 base64url encoded string.`, - Sensitive: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), - Description: `Name of the signed URL key.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeBackendServiceSignedUrlKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - keyNameProp, err := expandNestedComputeBackendServiceSignedUrlKeyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(keyNameProp)) && (ok || !reflect.DeepEqual(v, keyNameProp)) { - obj["keyName"] = keyNameProp - } - keyValueProp, err := expandNestedComputeBackendServiceSignedUrlKeyKeyValue(d.Get("key_value"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("key_value"); !isEmptyValue(reflect.ValueOf(keyValueProp)) && (ok || !reflect.DeepEqual(v, keyValueProp)) { - obj["keyValue"] = keyValueProp - } - backendServiceProp, err := expandNestedComputeBackendServiceSignedUrlKeyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(reflect.ValueOf(backendServiceProp)) && (ok || !reflect.DeepEqual(v, backendServiceProp)) { - obj["backendService"] = backendServiceProp - } - - lockName, err := replaceVars(d, config, "signedUrlKey/{{project}}/backendServices/{{backend_service}}/") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}/addSignedUrlKey") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new BackendServiceSignedUrlKey: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating BackendServiceSignedUrlKey: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{backend_service}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating BackendServiceSignedUrlKey", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create BackendServiceSignedUrlKey: %s", err) - } - - log.Printf("[DEBUG] Finished creating BackendServiceSignedUrlKey %q: %#v", d.Id(), res) - - return resourceComputeBackendServiceSignedUrlKeyRead(d, meta) -} - -func resourceComputeBackendServiceSignedUrlKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeBackendServiceSignedUrlKey %q", d.Id())) - } - - res, err = flattenNestedComputeBackendServiceSignedUrlKey(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeBackendServiceSignedUrlKey because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading BackendServiceSignedUrlKey: %s", err) - } - - if err := d.Set("name", flattenNestedComputeBackendServiceSignedUrlKeyName(res["keyName"], d, config)); err != nil { - return fmt.Errorf("Error reading BackendServiceSignedUrlKey: %s", err) - } - - return nil -} - -func resourceComputeBackendServiceSignedUrlKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "signedUrlKey/{{project}}/backendServices/{{backend_service}}/") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}/deleteSignedUrlKey?keyName={{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting BackendServiceSignedUrlKey %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BackendServiceSignedUrlKey") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting BackendServiceSignedUrlKey", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting BackendServiceSignedUrlKey %q: %#v", d.Id(), res) - return nil -} - -func flattenNestedComputeBackendServiceSignedUrlKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeBackendServiceSignedUrlKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeBackendServiceSignedUrlKeyKeyValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeBackendServiceSignedUrlKeyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func flattenNestedComputeBackendServiceSignedUrlKey(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["cdnPolicy"] - if !ok || v == nil { - return nil, nil - } - res = v.(map[string]interface{}) - - v, ok = res["signedUrlKeyNames"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value cdnPolicy.signedUrlKeyNames. Actual value: %v", v) - } - - _, item, err := resourceComputeBackendServiceSignedUrlKeyFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeBackendServiceSignedUrlKeyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeBackendServiceSignedUrlKeyName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeBackendServiceSignedUrlKeyName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - // List response only contains the ID - construct a response object. - item := map[string]interface{}{ - "keyName": itemRaw, - } - - itemName := flattenNestedComputeBackendServiceSignedUrlKeyName(item["keyName"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with keyName= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_disk.go deleted file mode 100644 index 86acf66682..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_disk.go +++ /dev/null @@ -1,1712 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/googleapi" -) - -// diffsupress for beta and to check change in source_disk attribute -func sourceDiskDiffSupress(_, old, new string, _ *schema.ResourceData) bool { - s1 := strings.TrimPrefix(old, "https://www.googleapis.com/compute/beta") - s2 := strings.TrimPrefix(new, "https://www.googleapis.com/compute/v1") - if strings.HasSuffix(s1, s2) { - return true - } - return false -} - -// Is the new disk size smaller than the old one? -func isDiskShrinkage(_ context.Context, old, new, _ interface{}) bool { - // It's okay to remove size entirely. - if old == nil || new == nil { - return false - } - return new.(int) < old.(int) -} - -// We cannot suppress the diff for the case when family name is not part of the image name since we can't -// make a network call in a DiffSuppressFunc. -func diskImageDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - // Understand that this function solves a messy problem ("how do we tell if the diff between two images - // is 'ForceNew-worthy', without making a network call?") in the best way we can: through a series of special - // cases and regexes. If you find yourself here because you are trying to add a new special case, - // you are probably looking for the diskImageFamilyEquals function and its subfunctions. - // In order to keep this maintainable, we need to ensure that the positive and negative examples - // in resource_compute_disk_test.go are as complete as possible. - - // 'old' is read from the API. - // It always has the format 'https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)' - matches := resolveImageLink.FindStringSubmatch(old) - if matches == nil { - // Image read from the API doesn't have the expected format. In practice, it should never happen - return false - } - oldProject := matches[1] - oldName := matches[2] - - // Partial or full self link family - if resolveImageProjectFamily.MatchString(new) { - // Value matches pattern "projects/{project}/global/images/family/{family-name}$" - matches := resolveImageProjectFamily.FindStringSubmatch(new) - newProject := matches[1] - newFamilyName := matches[2] - - return diskImageProjectNameEquals(oldProject, newProject) && diskImageFamilyEquals(oldName, newFamilyName) - } - - // Partial or full self link image - if resolveImageProjectImage.MatchString(new) { - // Value matches pattern "projects/{project}/global/images/{image-name}$" - matches := resolveImageProjectImage.FindStringSubmatch(new) - newProject := matches[1] - newImageName := matches[2] - - return diskImageProjectNameEquals(oldProject, newProject) && diskImageEquals(oldName, newImageName) - } - - // Partial link without project family - if resolveImageGlobalFamily.MatchString(new) { - // Value is "global/images/family/{family-name}" - matches := resolveImageGlobalFamily.FindStringSubmatch(new) - familyName := matches[1] - - return diskImageFamilyEquals(oldName, familyName) - } - - // Partial link without project image - if resolveImageGlobalImage.MatchString(new) { - // Value is "global/images/{image-name}" - matches := resolveImageGlobalImage.FindStringSubmatch(new) - imageName := matches[1] - - return diskImageEquals(oldName, imageName) - } - - // Family shorthand - if resolveImageFamilyFamily.MatchString(new) { - // Value is "family/{family-name}" - matches := resolveImageFamilyFamily.FindStringSubmatch(new) - familyName := matches[1] - - return diskImageFamilyEquals(oldName, familyName) - } - - // Shorthand for image or family - if resolveImageProjectImageShorthand.MatchString(new) { - // Value is "{project}/{image-name}" or "{project}/{family-name}" - matches := resolveImageProjectImageShorthand.FindStringSubmatch(new) - newProject := matches[1] - newName := matches[2] - - return diskImageProjectNameEquals(oldProject, newProject) && - (diskImageEquals(oldName, newName) || diskImageFamilyEquals(oldName, newName)) - } - - // Image or family only - if diskImageEquals(oldName, new) || diskImageFamilyEquals(oldName, new) { - // Value is "{image-name}" or "{family-name}" - return true - } - - return false -} - -func diskImageProjectNameEquals(project1, project2 string) bool { - // Convert short project name to full name - // For instance, centos => centos-cloud - fullProjectName, ok := imageMap[project2] - if ok { - project2 = fullProjectName - } - - return project1 == project2 -} - -func diskImageEquals(oldImageName, newImageName string) bool { - return oldImageName == newImageName -} - -func diskImageFamilyEquals(imageName, familyName string) bool { - // Handles the case when the image name includes the family name - // e.g. image name: debian-11-bullseye-v20220719, family name: debian-11 - - // First condition is to check if image contains arm64 because of case like: - // image name: opensuse-leap-15-4-v20220713-arm64, family name: opensuse-leap (should not be evaluated during handling of amd64 cases) - // In second condition, we have to check for amd64 because of cases like: - // image name: ubuntu-2210-kinetic-amd64-v20221022, family name: ubuntu-2210 (should not suppress) - if !strings.Contains(imageName, "-arm64") && strings.Contains(imageName, strings.TrimSuffix(familyName, "-amd64")) { - if strings.Contains(imageName, "-amd64") { - return strings.HasSuffix(familyName, "-amd64") - } else { - return !strings.HasSuffix(familyName, "-amd64") - } - } - - // We have to check for arm64 because of cases like: - // image name: opensuse-leap-15-4-v20220713-arm64, family name: opensuse-leap (should not suppress) - if strings.Contains(imageName, strings.TrimSuffix(familyName, "-arm64")) { - if strings.Contains(imageName, "-arm64") { - return strings.HasSuffix(familyName, "-arm64") - } else { - return !strings.HasSuffix(familyName, "-arm64") - } - } - - if suppressCanonicalFamilyDiff(imageName, familyName) { - return true - } - - if suppressCosFamilyDiff(imageName, familyName) { - return true - } - - if suppressWindowsSqlFamilyDiff(imageName, familyName) { - return true - } - - if suppressWindowsFamilyDiff(imageName, familyName) { - return true - } - - return false -} - -// e.g. image: ubuntu-1404-trusty-v20180122, family: ubuntu-1404-lts -func suppressCanonicalFamilyDiff(imageName, familyName string) bool { - parts := canonicalUbuntuLtsImage.FindStringSubmatch(imageName) - if len(parts) == 4 { - var f string - if parts[3] == "" { - f = fmt.Sprintf("ubuntu-%s%s-lts", parts[1], parts[2]) - } else { - f = fmt.Sprintf("ubuntu-%s%s-lts-%s", parts[1], parts[2], parts[3]) - } - if f == familyName { - return true - } - } - - return false -} - -// e.g. image: cos-NN-*, family: cos-NN-lts -func suppressCosFamilyDiff(imageName, familyName string) bool { - parts := cosLtsImage.FindStringSubmatch(imageName) - if len(parts) == 2 { - f := fmt.Sprintf("cos-%s-lts", parts[1]) - if f == familyName { - return true - } - } - - return false -} - -// e.g. image: sql-2017-standard-windows-2016-dc-v20180109, family: sql-std-2017-win-2016 -// e.g. image: sql-2017-express-windows-2012-r2-dc-v20180109, family: sql-exp-2017-win-2012-r2 -func suppressWindowsSqlFamilyDiff(imageName, familyName string) bool { - parts := windowsSqlImage.FindStringSubmatch(imageName) - if len(parts) == 5 { - edition := parts[2] // enterprise, standard or web. - sqlVersion := parts[1] - windowsVersion := parts[3] - - // Translate edition - switch edition { - case "enterprise": - edition = "ent" - case "standard": - edition = "std" - case "express": - edition = "exp" - } - - var f string - if revision := parts[4]; revision != "" { - // With revision - f = fmt.Sprintf("sql-%s-%s-win-%s-r%s", edition, sqlVersion, windowsVersion, revision) - } else { - // No revision - f = fmt.Sprintf("sql-%s-%s-win-%s", edition, sqlVersion, windowsVersion) - } - - if f == familyName { - return true - } - } - - return false -} - -// e.g. image: windows-server-1709-dc-core-v20180109, family: windows-1709-core -// e.g. image: windows-server-1709-dc-core-for-containers-v20180109, family: "windows-1709-core-for-containers -func suppressWindowsFamilyDiff(imageName, familyName string) bool { - updatedFamilyString := strings.Replace(familyName, "windows-", "windows-server-", 1) - updatedImageName := strings.Replace(imageName, "-dc-", "-", 1) - - return strings.Contains(updatedImageName, updatedFamilyString) -} - -func ResourceComputeDisk() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeDiskCreate, - Read: resourceComputeDiskRead, - Update: resourceComputeDiskUpdate, - Delete: resourceComputeDiskDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeDiskImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: customdiff.All( - customdiff.ForceNewIfChange("size", isDiskShrinkage)), - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "disk_encryption_key": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Encrypts the disk using a customer-supplied encryption key. - -After you encrypt a disk with a customer-supplied key, you must -provide the same key if you use the disk later (e.g. to create a disk -snapshot or an image, or to attach the disk to a virtual machine). - -Customer-supplied encryption keys do not protect access to metadata of -the disk. - -If you do not provide an encryption key when creating the disk, then -the disk will be encrypted using an automatically generated key and -you do not need to provide a key to use the disk later.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_self_link": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName -in the cloud console. Your project's Compute Engine System service account -('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have -'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. -See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, - }, - "kms_key_service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - Sensitive: true, - }, - "rsa_encrypted_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit -customer-supplied encryption key to either encrypt or decrypt -this resource. You can provide either the rawKey or the rsaEncryptedKey.`, - Sensitive: true, - }, - "sha256": { - Type: schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "image": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: diskImageDiffSuppress, - Description: `The image from which to initialize this disk. This can be -one of: the image's 'self_link', 'projects/{project}/global/images/{image}', -'projects/{project}/global/images/family/{family}', 'global/images/{image}', -'global/images/family/{family}', 'family/{family}', '{project}/{family}', -'{project}/{image}', '{family}', or '{image}'. If referred by family, the -images names must include the family name. If they don't, use the -[google_compute_image data source](/docs/providers/google/d/compute_image.html). -For instance, the image 'centos-6-v20180104' includes its family name 'centos-6'. -These images can be referred by family name here.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels to apply to this disk. A list of key->value pairs.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "physical_block_size_bytes": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Physical block size of the persistent disk, in bytes. If not present -in a request, a default value is used. Currently supported sizes -are 4096 and 16384, other sizes may be added in the future. -If an unsupported value is requested, the error message will list -the supported values for the caller's project.`, - }, - "provisioned_iops": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Indicates how many IOPS must be provisioned for the disk.`, - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Size of the persistent disk, specified in GB. You can specify this -field when creating a persistent disk using the 'image' or -'snapshot' parameter, or specify it alone to create an empty -persistent disk. - -If you specify this field along with 'image' or 'snapshot', -the value must not be less than the size of the image -or the size of the snapshot. - -~>**NOTE** If you change the size, Terraform updates the disk size -if upsizing is detected but recreates the disk if downsizing is requested. -You can add 'lifecycle.prevent_destroy' in the config to prevent destroying -and recreating.`, - }, - "snapshot": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The source snapshot used to create this disk. You can provide this as -a partial or full URL to the resource. If the snapshot is in another -project than this disk, you must supply a full URL. For example, the -following are valid values: - -* 'https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot' -* 'projects/project/global/snapshots/snapshot' -* 'global/snapshots/snapshot' -* 'snapshot'`, - }, - "source_disk": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: sourceDiskDiffSupress, - Description: `The source disk used to create this disk. You can provide this as a partial or full URL to the resource. -For example, the following are valid values: - -* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk} -* https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk} -* projects/{project}/zones/{zone}/disks/{disk} -* projects/{project}/regions/{region}/disks/{disk} -* zones/{zone}/disks/{disk} -* regions/{region}/disks/{disk}`, - }, - "source_image_encryption_key": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the source image. Required if -the source image is protected by a customer-supplied encryption key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_self_link": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName -in the cloud console. Your project's Compute Engine System service account -('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have -'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. -See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, - }, - "kms_key_service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - }, - "sha256": { - Type: schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "source_snapshot_encryption_key": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the source snapshot. Required -if the source snapshot is protected by a customer-supplied encryption -key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_self_link": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName -in the cloud console. Your project's Compute Engine System service account -('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have -'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. -See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, - }, - "kms_key_service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - }, - "sha256": { - Type: schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the disk type resource describing which disk type to use to -create the disk. Provide this when creating the disk.`, - Default: "pd-standard", - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the disk resides.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "label_fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `The fingerprint used for optimistic locking of this resource. Used -internally during updates.`, - }, - "last_attach_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Last attach timestamp in RFC3339 text format.`, - }, - "last_detach_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Last detach timestamp in RFC3339 text format.`, - }, - "source_disk_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID value of the disk used to create this image. This value may -be used to determine whether the image was taken from the current -or a previous instance of a given disk name.`, - }, - "source_image_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID value of the image used to create this disk. This value -identifies the exact image that was used to create this persistent -disk. For example, if you created the persistent disk from an image -that was later deleted and recreated under the same name, the source -image ID would identify the exact version of the image that was used.`, - }, - "source_snapshot_id": { - Type: schema.TypeString, - Computed: true, - Description: `The unique ID of the snapshot used to create this disk. This value -identifies the exact snapshot that was used to create this persistent -disk. For example, if you created the persistent disk from a snapshot -that was later deleted and recreated under the same name, the source -snapshot ID would identify the exact version of the snapshot that was -used.`, - }, - "users": { - Type: schema.TypeList, - Computed: true, - Description: `Links to the users of the disk (attached instances) in form: -project/zones/zone/instances/instance`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelFingerprintProp, err := expandComputeDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - descriptionProp, err := expandComputeDiskDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - nameProp, err := expandComputeDiskName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); !isEmptyValue(reflect.ValueOf(sizeGbProp)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) { - obj["sizeGb"] = sizeGbProp - } - physicalBlockSizeBytesProp, err := expandComputeDiskPhysicalBlockSizeBytes(d.Get("physical_block_size_bytes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("physical_block_size_bytes"); !isEmptyValue(reflect.ValueOf(physicalBlockSizeBytesProp)) && (ok || !reflect.DeepEqual(v, physicalBlockSizeBytesProp)) { - obj["physicalBlockSizeBytes"] = physicalBlockSizeBytesProp - } - sourceDiskProp, err := expandComputeDiskSourceDisk(d.Get("source_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { - obj["sourceDisk"] = sourceDiskProp - } - typeProp, err := expandComputeDiskType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - sourceImageProp, err := expandComputeDiskImage(d.Get("image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("image"); !isEmptyValue(reflect.ValueOf(sourceImageProp)) && (ok || !reflect.DeepEqual(v, sourceImageProp)) { - obj["sourceImage"] = sourceImageProp - } - provisionedIopsProp, err := expandComputeDiskProvisionedIops(d.Get("provisioned_iops"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("provisioned_iops"); !isEmptyValue(reflect.ValueOf(provisionedIopsProp)) && (ok || !reflect.DeepEqual(v, provisionedIopsProp)) { - obj["provisionedIops"] = provisionedIopsProp - } - zoneProp, err := expandComputeDiskZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - sourceImageEncryptionKeyProp, err := expandComputeDiskSourceImageEncryptionKey(d.Get("source_image_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_image_encryption_key"); !isEmptyValue(reflect.ValueOf(sourceImageEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceImageEncryptionKeyProp)) { - obj["sourceImageEncryptionKey"] = sourceImageEncryptionKeyProp - } - diskEncryptionKeyProp, err := expandComputeDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_encryption_key"); !isEmptyValue(reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyProp)) { - obj["diskEncryptionKey"] = diskEncryptionKeyProp - } - sourceSnapshotProp, err := expandComputeDiskSnapshot(d.Get("snapshot"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("snapshot"); !isEmptyValue(reflect.ValueOf(sourceSnapshotProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) { - obj["sourceSnapshot"] = sourceSnapshotProp - } - sourceSnapshotEncryptionKeyProp, err := expandComputeDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_snapshot_encryption_key"); !isEmptyValue(reflect.ValueOf(sourceSnapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotEncryptionKeyProp)) { - obj["sourceSnapshotEncryptionKey"] = sourceSnapshotEncryptionKeyProp - } - - obj, err = resourceComputeDiskEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Disk: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Disk: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Disk: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Disk", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Disk: %s", err) - } - - log.Printf("[DEBUG] Finished creating Disk %q: %#v", d.Id(), res) - - return resourceComputeDiskRead(d, meta) -} - -func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Disk: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id())) - } - - res, err = resourceComputeDiskDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ComputeDisk because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - - if err := d.Set("label_fingerprint", flattenComputeDiskLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeDiskCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("description", flattenComputeDiskDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("last_attach_timestamp", flattenComputeDiskLastAttachTimestamp(res["lastAttachTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("last_detach_timestamp", flattenComputeDiskLastDetachTimestamp(res["lastDetachTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("labels", flattenComputeDiskLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("name", flattenComputeDiskName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("size", flattenComputeDiskSize(res["sizeGb"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("users", flattenComputeDiskUsers(res["users"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("physical_block_size_bytes", flattenComputeDiskPhysicalBlockSizeBytes(res["physicalBlockSizeBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_disk", flattenComputeDiskSourceDisk(res["sourceDisk"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_disk_id", flattenComputeDiskSourceDiskId(res["sourceDiskId"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("type", flattenComputeDiskType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("image", flattenComputeDiskImage(res["sourceImage"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("provisioned_iops", flattenComputeDiskProvisionedIops(res["provisionedIops"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("zone", flattenComputeDiskZone(res["zone"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_image_encryption_key", flattenComputeDiskSourceImageEncryptionKey(res["sourceImageEncryptionKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_image_id", flattenComputeDiskSourceImageId(res["sourceImageId"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("disk_encryption_key", flattenComputeDiskDiskEncryptionKey(res["diskEncryptionKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("snapshot", flattenComputeDiskSnapshot(res["sourceSnapshot"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_snapshot_encryption_key", flattenComputeDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_snapshot_id", flattenComputeDiskSourceSnapshotId(res["sourceSnapshotId"], d, config)); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Disk: %s", err) - } - - return nil -} - -func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Disk: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("label_fingerprint") || d.HasChange("labels") { - obj := make(map[string]interface{}) - - labelFingerprintProp, err := expandComputeDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}/setLabels") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Disk", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("size") { - obj := make(map[string]interface{}) - - sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) { - obj["sizeGb"] = sizeGbProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}/resize") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Disk", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeDiskRead(d, meta) -} - -func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Disk: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - readRes, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id())) - } - - // if disks are attached to instances, they must be detached before the disk can be deleted - if v, ok := readRes["users"].([]interface{}); ok { - type detachArgs struct{ project, zone, instance, deviceName string } - var detachCalls []detachArgs - - for _, instance := range convertStringArr(v) { - self := d.Get("self_link").(string) - instanceProject, instanceZone, instanceName, err := GetLocationalResourcePropertiesFromSelfLinkString(instance) - if err != nil { - return err - } - - i, err := config.NewComputeClient(userAgent).Instances.Get(instanceProject, instanceZone, instanceName).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance) - continue - } - return fmt.Errorf("Error retrieving instance %s: %s", instance, err.Error()) - } - for _, disk := range i.Disks { - if compareSelfLinkOrResourceName("", disk.Source, self, nil) { - detachCalls = append(detachCalls, detachArgs{ - project: instanceProject, - zone: GetResourceNameFromSelfLink(i.Zone), - instance: i.Name, - deviceName: disk.DeviceName, - }) - } - } - } - - for _, call := range detachCalls { - op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do() - if err != nil { - return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project, - call.zone, call.instance, err.Error()) - } - err = ComputeOperationWaitTime(config, op, call.project, - fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance), userAgent, d.Timeout(schema.TimeoutDelete)) - if err != nil { - if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { - log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance) - continue - } - return err - } - } - } - log.Printf("[DEBUG] Deleting Disk %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Disk") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Disk", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Disk %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeDiskLabelFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskLastAttachTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskLastDetachTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeDiskUsers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeDiskPhysicalBlockSizeBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeDiskSourceDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceDiskId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeDiskImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskProvisionedIops(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeDiskZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeDiskSourceImageEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeDiskSourceImageEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeDiskSourceImageEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_self_link"] = - flattenComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["kms_key_service_account"] = - flattenComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} -func flattenComputeDiskSourceImageEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceImageEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceImageId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeDiskDiskEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["rsa_encrypted_key"] = - flattenComputeDiskDiskEncryptionKeyRsaEncryptedKey(original["rsaEncryptedKey"], d, config) - transformed["sha256"] = - flattenComputeDiskDiskEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_self_link"] = - flattenComputeDiskDiskEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["kms_key_service_account"] = - flattenComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} -func flattenComputeDiskDiskEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKeyRsaEncryptedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSnapshot(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeDiskSourceSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["kms_key_self_link"] = - flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["sha256"] = - flattenComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_service_account"] = - flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} -func flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceSnapshotId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeDiskLabelFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeDiskName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskPhysicalBlockSizeBytes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("diskTypes", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for type: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeDiskImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskProvisionedIops(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeDiskSourceImageEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeDiskSourceImageEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeDiskSourceImageEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeySelfLink, err := expandComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedKmsKeyServiceAccount, err := expandComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeDiskSourceImageEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceImageEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeDiskDiskEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedRsaEncryptedKey, err := expandComputeDiskDiskEncryptionKeyRsaEncryptedKey(original["rsa_encrypted_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRsaEncryptedKey); val.IsValid() && !isEmptyValue(val) { - transformed["rsaEncryptedKey"] = transformedRsaEncryptedKey - } - - transformedSha256, err := expandComputeDiskDiskEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeySelfLink, err := expandComputeDiskDiskEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedKmsKeyServiceAccount, err := expandComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeDiskDiskEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKeyRsaEncryptedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSnapshot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for snapshot: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeDiskSourceSnapshotEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeDiskSourceSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedKmsKeySelfLink, err := expandComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedSha256, err := expandComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeyServiceAccount, err := expandComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - if v, ok := d.GetOk("type"); ok { - log.Printf("[DEBUG] Loading disk type: %s", v.(string)) - diskType, err := readDiskType(config, d, v.(string)) - if err != nil { - return nil, fmt.Errorf( - "Error loading disk type '%s': %s", - v.(string), err) - } - - obj["type"] = diskType.RelativeLink() - } - - if v, ok := d.GetOk("image"); ok { - log.Printf("[DEBUG] Resolving image name: %s", v.(string)) - imageUrl, err := resolveImage(config, project, v.(string), userAgent) - if err != nil { - return nil, fmt.Errorf( - "Error resolving image name '%s': %s", - v.(string), err) - } - - obj["sourceImage"] = imageUrl - log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) - } - - return obj, nil -} - -func resourceComputeDiskDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["diskEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - // The raw key won't be returned, so we need to use the original. - transformed["rawKey"] = d.Get("disk_encryption_key.0.raw_key") - transformed["rsaEncryptedKey"] = d.Get("disk_encryption_key.0.rsa_encrypted_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - // The response for crypto keys often includes the version of the key which needs to be removed - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["diskEncryptionKey"] = transformed - } - - if v, ok := res["sourceImageEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - // The raw key won't be returned, so we need to use the original. - transformed["rawKey"] = d.Get("source_image_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - // The response for crypto keys often includes the version of the key which needs to be removed - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceImageEncryptionKey"] = transformed - } - - if v, ok := res["sourceSnapshotEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - // The raw key won't be returned, so we need to use the original. - transformed["rawKey"] = d.Get("source_snapshot_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - // The response for crypto keys often includes the version of the key which needs to be removed - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceSnapshotEncryptionKey"] = transformed - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_disk_resource_policy_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_disk_resource_policy_attachment.go deleted file mode 100644 index 568fb3eaa0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_disk_resource_policy_attachment.go +++ /dev/null @@ -1,395 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeDiskResourcePolicyAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeDiskResourcePolicyAttachmentCreate, - Read: resourceComputeDiskResourcePolicyAttachmentRead, - Delete: resourceComputeDiskResourcePolicyAttachmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeDiskResourcePolicyAttachmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "disk": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the disk in which the resource policies are attached to.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource policy to be attached to the disk for scheduling snapshot -creation. Do not specify the self link.`, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the disk resides.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeDiskResourcePolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceComputeDiskResourcePolicyAttachmentEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}/addResourcePolicies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DiskResourcePolicyAttachment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DiskResourcePolicyAttachment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{disk}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating DiskResourcePolicyAttachment", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create DiskResourcePolicyAttachment: %s", err) - } - - log.Printf("[DEBUG] Finished creating DiskResourcePolicyAttachment %q: %#v", d.Id(), res) - - return resourceComputeDiskResourcePolicyAttachmentRead(d, meta) -} - -func resourceComputeDiskResourcePolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeDiskResourcePolicyAttachment %q", d.Id())) - } - - res, err = flattenNestedComputeDiskResourcePolicyAttachment(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeDiskResourcePolicyAttachment because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceComputeDiskResourcePolicyAttachmentDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ComputeDiskResourcePolicyAttachment because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DiskResourcePolicyAttachment: %s", err) - } - - if err := d.Set("name", flattenNestedComputeDiskResourcePolicyAttachmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading DiskResourcePolicyAttachment: %s", err) - } - - return nil -} - -func resourceComputeDiskResourcePolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}/removeResourcePolicies") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = make(map[string]interface{}) - - zone, err := getZone(d, config) - if err != nil { - return err - } - if zone == "" { - return fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - // resourcePolicies are referred to by region but affixed to zonal disks. - // We construct the regional name from the zone: - // projects/{project}/regions/{region}/resourcePolicies/{resourceId} - region := getRegionFromZone(zone) - if region == "" { - return fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) - } - - name, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(name)) && (ok || !reflect.DeepEqual(v, name)) { - obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)} - } - log.Printf("[DEBUG] Deleting DiskResourcePolicyAttachment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DiskResourcePolicyAttachment") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting DiskResourcePolicyAttachment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting DiskResourcePolicyAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeDiskResourcePolicyAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{disk}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputeDiskResourcePolicyAttachmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeDiskResourcePolicyAttachmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeDiskResourcePolicyAttachmentEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - if zone == "" { - return nil, fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - // resourcePolicies are referred to by region but affixed to zonal disks. - // We construct the regional name from the zone: - // projects/{project}/regions/{region}/resourcePolicies/{resourceId} - region := getRegionFromZone(zone) - if region == "" { - return nil, fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) - } - - obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, obj["name"])} - delete(obj, "name") - return obj, nil -} - -func flattenNestedComputeDiskResourcePolicyAttachment(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["resourcePolicies"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value resourcePolicies. Actual value: %v", v) - } - - _, item, err := resourceComputeDiskResourcePolicyAttachmentFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeDiskResourcePolicyAttachmentFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeDiskResourcePolicyAttachmentName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - // List response only contains the ID - construct a response object. - item := map[string]interface{}{ - "name": itemRaw, - } - - // Decode list item before comparing. - item, err := resourceComputeDiskResourcePolicyAttachmentDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemName := flattenNestedComputeDiskResourcePolicyAttachmentName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} -func resourceComputeDiskResourcePolicyAttachmentDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - res["name"] = GetResourceNameFromSelfLink(res["name"].(string)) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_external_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_external_vpn_gateway.go deleted file mode 100644 index 5b4d0cc50c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_external_vpn_gateway.go +++ /dev/null @@ -1,438 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeExternalVpnGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeExternalVpnGatewayCreate, - Read: resourceComputeExternalVpnGatewayRead, - Delete: resourceComputeExternalVpnGatewayDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeExternalVpnGatewayImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "interface": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of interfaces on this external VPN gateway.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The numeric ID for this interface. Allowed values are based on the redundancy type -of this external VPN gateway -* '0 - SINGLE_IP_INTERNALLY_REDUNDANT' -* '0, 1 - TWO_IPS_REDUNDANCY' -* '0, 1, 2, 3 - FOUR_IPS_REDUNDANCY'`, - }, - "ip_address": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `IP address of the interface in the external VPN gateway. -Only IPv4 is supported. This IP address can be either from -your on-premise gateway or another Cloud provider's VPN gateway, -it cannot be an IP address from Google Compute Engine.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels for the external VPN gateway resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "redundancy_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"FOUR_IPS_REDUNDANCY", "SINGLE_IP_INTERNALLY_REDUNDANT", "TWO_IPS_REDUNDANCY", ""}), - Description: `Indicates the redundancy type of this external VPN gateway Possible values: ["FOUR_IPS_REDUNDANCY", "SINGLE_IP_INTERNALLY_REDUNDANT", "TWO_IPS_REDUNDANCY"]`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeExternalVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeExternalVpnGatewayDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandComputeExternalVpnGatewayLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - nameProp, err := expandComputeExternalVpnGatewayName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - redundancyTypeProp, err := expandComputeExternalVpnGatewayRedundancyType(d.Get("redundancy_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redundancy_type"); !isEmptyValue(reflect.ValueOf(redundancyTypeProp)) && (ok || !reflect.DeepEqual(v, redundancyTypeProp)) { - obj["redundancyType"] = redundancyTypeProp - } - interfacesProp, err := expandComputeExternalVpnGatewayInterface(d.Get("interface"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("interface"); !isEmptyValue(reflect.ValueOf(interfacesProp)) && (ok || !reflect.DeepEqual(v, interfacesProp)) { - obj["interfaces"] = interfacesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ExternalVpnGateway: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ExternalVpnGateway: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/externalVpnGateways/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating ExternalVpnGateway", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create ExternalVpnGateway: %s", err) - } - - log.Printf("[DEBUG] Finished creating ExternalVpnGateway %q: %#v", d.Id(), res) - - return resourceComputeExternalVpnGatewayRead(d, meta) -} - -func resourceComputeExternalVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeExternalVpnGateway %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - - if err := d.Set("description", flattenComputeExternalVpnGatewayDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("labels", flattenComputeExternalVpnGatewayLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("name", flattenComputeExternalVpnGatewayName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("redundancy_type", flattenComputeExternalVpnGatewayRedundancyType(res["redundancyType"], d, config)); err != nil { - return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("interface", flattenComputeExternalVpnGatewayInterface(res["interfaces"], d, config)); err != nil { - return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - - return nil -} - -func resourceComputeExternalVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ExternalVpnGateway %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ExternalVpnGateway") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting ExternalVpnGateway", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting ExternalVpnGateway %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeExternalVpnGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/externalVpnGateways/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/externalVpnGateways/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeExternalVpnGatewayDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeExternalVpnGatewayLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeExternalVpnGatewayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeExternalVpnGatewayRedundancyType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeExternalVpnGatewayInterface(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenComputeExternalVpnGatewayInterfaceId(original["id"], d, config), - "ip_address": flattenComputeExternalVpnGatewayInterfaceIpAddress(original["ipAddress"], d, config), - }) - } - return transformed -} -func flattenComputeExternalVpnGatewayInterfaceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeExternalVpnGatewayInterfaceIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeExternalVpnGatewayDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeExternalVpnGatewayLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeExternalVpnGatewayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeExternalVpnGatewayRedundancyType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeExternalVpnGatewayInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandComputeExternalVpnGatewayInterfaceId(original["id"], d, config) - if err != nil { - return nil, err - } else { - transformed["id"] = transformedId - } - - transformedIpAddress, err := expandComputeExternalVpnGatewayInterfaceIpAddress(original["ip_address"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddress"] = transformedIpAddress - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeExternalVpnGatewayInterfaceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeExternalVpnGatewayInterfaceIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall.go deleted file mode 100644 index e6992dd632..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall.go +++ /dev/null @@ -1,1179 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "bytes" - "context" - "fmt" - "log" - "reflect" - "sort" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func resourceComputeFirewallRuleHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["protocol"].(string)))) - - // We need to make sure to sort the strings below so that we always - // generate the same hash code no matter what is in the set. - if v, ok := m["ports"]; ok && v != nil { - s := convertStringArr(v.([]interface{})) - sort.Strings(s) - - for _, v := range s { - buf.WriteString(fmt.Sprintf("%s-", v)) - } - } - - return hashcode(buf.String()) -} - -func compareCaseInsensitive(k, old, new string, d *schema.ResourceData) bool { - return strings.ToLower(old) == strings.ToLower(new) -} - -func diffSuppressEnableLogging(k, old, new string, d *schema.ResourceData) bool { - if k == "log_config.#" { - if new == "0" && d.Get("enable_logging").(bool) { - return true - } - } - - return false -} - -func resourceComputeFirewallEnableLoggingCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - enableLogging, enableExists := diff.GetOkExists("enable_logging") - if !enableExists { - return nil - } - - logConfigExists := diff.Get("log_config.#").(int) != 0 - if logConfigExists && enableLogging == false { - return fmt.Errorf("log_config cannot be defined when enable_logging is false") - } - - return nil -} - -// Per https://github.com/hashicorp/terraform-provider-google/issues/2924 -// Make one of the source_ parameters Required in ingress google_compute_firewall -func resourceComputeFirewallSourceFieldsCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - direction := diff.Get("direction").(string) - - if direction != "EGRESS" { - _, tagsOk := diff.GetOk("source_tags") - _, rangesOk := diff.GetOk("source_ranges") - _, sasOk := diff.GetOk("source_service_accounts") - - _, tagsExist := diff.GetOkExists("source_tags") - _, rangesExist := diff.GetOkExists("source_ranges") - _, sasExist := diff.GetOkExists("source_service_accounts") - - if !tagsOk && !rangesOk && !sasOk && !tagsExist && !rangesExist && !sasExist { - return fmt.Errorf("one of source_tags, source_ranges, or source_service_accounts must be defined") - } - } - - return nil -} - -func diffSuppressSourceRanges(k, old, new string, d *schema.ResourceData) bool { - if k == "source_ranges.#" { - if old == "1" && new == "0" { - // Allow diffing on the individual element if we are going from 1 -> 0 - // this allows for diff suppress on ["0.0.0.0/0"] -> [] - return true - } - // For any other source_ranges.# diff, don't suppress - return false - } - kLength := "source_ranges.#" - oldLength, newLength := d.GetChange(kLength) - oldInt, ok := oldLength.(int) - - if !ok { - return false - } - - newInt, ok := newLength.(int) - if !ok { - return false - } - - // Diff suppress only should suppress removing the default range - // This should probably be newInt == 0, but due to Terraform core internals - // (bug?) values found via GetChange may not have the correct new value - // in some circumstances - if oldInt == 1 && newInt == 1 { - if old == "0.0.0.0/0" && new == "" { - return true - } - } - // For any other source_ranges value diff, don't suppress - return false -} - -func ResourceComputeFirewall() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeFirewallCreate, - Read: resourceComputeFirewallRead, - Update: resourceComputeFirewallUpdate, - Delete: resourceComputeFirewallDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeFirewallImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - SchemaVersion: 1, - MigrateState: resourceComputeFirewallMigrateState, - CustomizeDiff: customdiff.All( - resourceComputeFirewallEnableLoggingCustomizeDiff, - resourceComputeFirewallSourceFieldsCustomizeDiff, - ), - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the network to attach this firewall to.`, - }, - "allow": { - Type: schema.TypeSet, - Optional: true, - Description: `The list of ALLOW rules specified by this firewall. Each rule -specifies a protocol and port-range tuple that describes a permitted -connection.`, - Elem: computeFirewallAllowSchema(), - Set: resourceComputeFirewallRuleHash, - ExactlyOneOf: []string{"allow", "deny"}, - }, - "deny": { - Type: schema.TypeSet, - Optional: true, - Description: `The list of DENY rules specified by this firewall. Each rule specifies -a protocol and port-range tuple that describes a denied connection.`, - Elem: computeFirewallDenySchema(), - Set: resourceComputeFirewallRuleHash, - ExactlyOneOf: []string{"allow", "deny"}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "destination_ranges": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Description: `If destination ranges are specified, the firewall will apply only to -traffic that has destination IP address in these ranges. These ranges -must be expressed in CIDR format. IPv4 or IPv6 ranges are supported.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "direction": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"INGRESS", "EGRESS", ""}), - Description: `Direction of traffic to which this firewall applies; default is -INGRESS. Note: For INGRESS traffic, it is NOT supported to specify -destinationRanges; For EGRESS traffic, it is NOT supported to specify -'source_ranges' OR 'source_tags'. For INGRESS traffic, one of 'source_ranges', -'source_tags' or 'source_service_accounts' is required. Possible values: ["INGRESS", "EGRESS"]`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Denotes whether the firewall rule is disabled, i.e not applied to the -network it is associated with. When set to true, the firewall rule is -not enforced and the network behaves as if it did not exist. If this -is unspecified, the firewall rule will be enabled.`, - }, - "log_config": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: diffSuppressEnableLogging, - Description: `This field denotes the logging options for a particular firewall rule. -If defined, logging is enabled, and logs will be exported to Cloud Logging.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metadata": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA"}), - Description: `This field denotes whether to include or exclude metadata for firewall logs. Possible values: ["EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA"]`, - }, - }, - }, - }, - "priority": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 65535), - Description: `Priority for this rule. This is an integer between 0 and 65535, both -inclusive. When not specified, the value assumed is 1000. Relative -priorities determine precedence of conflicting rules. Lower value of -priority implies higher precedence (eg, a rule with priority 0 has -higher precedence than a rule with priority 1). DENY rules take -precedence over ALLOW rules having equal priority.`, - Default: 1000, - }, - "source_ranges": { - Type: schema.TypeSet, - Optional: true, - DiffSuppressFunc: diffSuppressSourceRanges, - Description: `If source ranges are specified, the firewall will apply only to -traffic that has source IP address in these ranges. These ranges must -be expressed in CIDR format. One or both of sourceRanges and -sourceTags may be set. If both properties are set, the firewall will -apply to traffic that has source IP address within sourceRanges OR the -source IP that belongs to a tag listed in the sourceTags property. The -connection does not need to match both properties for the firewall to -apply. IPv4 or IPv6 ranges are supported. For INGRESS traffic, one of -'source_ranges', 'source_tags' or 'source_service_accounts' is required.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "source_service_accounts": { - Type: schema.TypeSet, - Optional: true, - Description: `If source service accounts are specified, the firewall will apply only -to traffic originating from an instance with a service account in this -list. Source service accounts cannot be used to control traffic to an -instance's external IP address because service accounts are associated -with an instance, not an IP address. sourceRanges can be set at the -same time as sourceServiceAccounts. If both are set, the firewall will -apply to traffic that has source IP address within sourceRanges OR the -source IP belongs to an instance with service account listed in -sourceServiceAccount. The connection does not need to match both -properties for the firewall to apply. sourceServiceAccounts cannot be -used at the same time as sourceTags or targetTags. For INGRESS traffic, -one of 'source_ranges', 'source_tags' or 'source_service_accounts' is required.`, - MaxItems: 10, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - ConflictsWith: []string{"source_tags", "target_tags"}, - }, - "source_tags": { - Type: schema.TypeSet, - Optional: true, - Description: `If source tags are specified, the firewall will apply only to traffic -with source IP that belongs to a tag listed in source tags. Source -tags cannot be used to control traffic to an instance's external IP -address. Because tags are associated with an instance, not an IP -address. One or both of sourceRanges and sourceTags may be set. If -both properties are set, the firewall will apply to traffic that has -source IP address within sourceRanges OR the source IP that belongs to -a tag listed in the sourceTags property. The connection does not need -to match both properties for the firewall to apply. For INGRESS traffic, -one of 'source_ranges', 'source_tags' or 'source_service_accounts' is required.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - ConflictsWith: []string{"source_service_accounts", "target_service_accounts"}, - }, - "target_service_accounts": { - Type: schema.TypeSet, - Optional: true, - Description: `A list of service accounts indicating sets of instances located in the -network that may make network connections as specified in allowed[]. -targetServiceAccounts cannot be used at the same time as targetTags or -sourceTags. If neither targetServiceAccounts nor targetTags are -specified, the firewall rule applies to all instances on the specified -network.`, - MaxItems: 10, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - ConflictsWith: []string{"source_tags", "target_tags"}, - }, - "target_tags": { - Type: schema.TypeSet, - Optional: true, - Description: `A list of instance tags indicating sets of instances located in the -network that may make network connections as specified in allowed[]. -If no targetTags are specified, the firewall rule applies to all -instances on the specified network.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - ConflictsWith: []string{"source_service_accounts", "target_service_accounts"}, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "enable_logging": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Deprecated: "Deprecated in favor of log_config", - Description: "This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver.", - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeFirewallAllowSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareCaseInsensitive, - Description: `The IP protocol to which this rule applies. The protocol type is -required when creating a firewall rule. This value can either be -one of the following well known protocol strings (tcp, udp, -icmp, esp, ah, sctp, ipip, all), or the IP protocol number.`, - }, - "ports": { - Type: schema.TypeList, - Optional: true, - Description: `An optional list of ports to which this rule applies. This field -is only applicable for UDP or TCP protocol. Each entry must be -either an integer or a range. If not specified, this rule -applies to connections through any port. - -Example inputs include: ["22"], ["80","443"], and -["12345-12349"].`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func computeFirewallDenySchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "protocol": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareCaseInsensitive, - Description: `The IP protocol to which this rule applies. The protocol type is -required when creating a firewall rule. This value can either be -one of the following well known protocol strings (tcp, udp, -icmp, esp, ah, sctp, ipip, all), or the IP protocol number.`, - }, - "ports": { - Type: schema.TypeList, - Optional: true, - Description: `An optional list of ports to which this rule applies. This field -is only applicable for UDP or TCP protocol. Each entry must be -either an integer or a range. If not specified, this rule -applies to connections through any port. - -Example inputs include: ["22"], ["80","443"], and -["12345-12349"].`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - allowedProp, err := expandComputeFirewallAllow(d.Get("allow"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow"); !isEmptyValue(reflect.ValueOf(allowedProp)) && (ok || !reflect.DeepEqual(v, allowedProp)) { - obj["allowed"] = allowedProp - } - deniedProp, err := expandComputeFirewallDeny(d.Get("deny"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deny"); !isEmptyValue(reflect.ValueOf(deniedProp)) && (ok || !reflect.DeepEqual(v, deniedProp)) { - obj["denied"] = deniedProp - } - descriptionProp, err := expandComputeFirewallDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - destinationRangesProp, err := expandComputeFirewallDestinationRanges(d.Get("destination_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_ranges"); !isEmptyValue(reflect.ValueOf(destinationRangesProp)) && (ok || !reflect.DeepEqual(v, destinationRangesProp)) { - obj["destinationRanges"] = destinationRangesProp - } - directionProp, err := expandComputeFirewallDirection(d.Get("direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("direction"); !isEmptyValue(reflect.ValueOf(directionProp)) && (ok || !reflect.DeepEqual(v, directionProp)) { - obj["direction"] = directionProp - } - disabledProp, err := expandComputeFirewallDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); ok || !reflect.DeepEqual(v, disabledProp) { - obj["disabled"] = disabledProp - } - logConfigProp, err := expandComputeFirewallLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - nameProp, err := expandComputeFirewallName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeFirewallNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - priorityProp, err := expandComputeFirewallPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); ok || !reflect.DeepEqual(v, priorityProp) { - obj["priority"] = priorityProp - } - sourceRangesProp, err := expandComputeFirewallSourceRanges(d.Get("source_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_ranges"); !isEmptyValue(reflect.ValueOf(sourceRangesProp)) && (ok || !reflect.DeepEqual(v, sourceRangesProp)) { - obj["sourceRanges"] = sourceRangesProp - } - sourceServiceAccountsProp, err := expandComputeFirewallSourceServiceAccounts(d.Get("source_service_accounts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_service_accounts"); !isEmptyValue(reflect.ValueOf(sourceServiceAccountsProp)) && (ok || !reflect.DeepEqual(v, sourceServiceAccountsProp)) { - obj["sourceServiceAccounts"] = sourceServiceAccountsProp - } - sourceTagsProp, err := expandComputeFirewallSourceTags(d.Get("source_tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_tags"); !isEmptyValue(reflect.ValueOf(sourceTagsProp)) && (ok || !reflect.DeepEqual(v, sourceTagsProp)) { - obj["sourceTags"] = sourceTagsProp - } - targetServiceAccountsProp, err := expandComputeFirewallTargetServiceAccounts(d.Get("target_service_accounts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_service_accounts"); !isEmptyValue(reflect.ValueOf(targetServiceAccountsProp)) && (ok || !reflect.DeepEqual(v, targetServiceAccountsProp)) { - obj["targetServiceAccounts"] = targetServiceAccountsProp - } - targetTagsProp, err := expandComputeFirewallTargetTags(d.Get("target_tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_tags"); !isEmptyValue(reflect.ValueOf(targetTagsProp)) && (ok || !reflect.DeepEqual(v, targetTagsProp)) { - obj["targetTags"] = targetTagsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Firewall: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Firewall: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Firewall: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Firewall", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Firewall: %s", err) - } - - log.Printf("[DEBUG] Finished creating Firewall %q: %#v", d.Id(), res) - - return resourceComputeFirewallRead(d, meta) -} - -func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Firewall: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeFirewall %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - - if err := d.Set("allow", flattenComputeFirewallAllow(res["allowed"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeFirewallCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("deny", flattenComputeFirewallDeny(res["denied"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("description", flattenComputeFirewallDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("destination_ranges", flattenComputeFirewallDestinationRanges(res["destinationRanges"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("direction", flattenComputeFirewallDirection(res["direction"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("disabled", flattenComputeFirewallDisabled(res["disabled"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("log_config", flattenComputeFirewallLogConfig(res["logConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("name", flattenComputeFirewallName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("network", flattenComputeFirewallNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("priority", flattenComputeFirewallPriority(res["priority"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("source_ranges", flattenComputeFirewallSourceRanges(res["sourceRanges"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("source_service_accounts", flattenComputeFirewallSourceServiceAccounts(res["sourceServiceAccounts"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("source_tags", flattenComputeFirewallSourceTags(res["sourceTags"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("target_service_accounts", flattenComputeFirewallTargetServiceAccounts(res["targetServiceAccounts"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("target_tags", flattenComputeFirewallTargetTags(res["targetTags"], d, config)); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Firewall: %s", err) - } - - return nil -} - -func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Firewall: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - allowedProp, err := expandComputeFirewallAllow(d.Get("allow"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, allowedProp)) { - obj["allowed"] = allowedProp - } - deniedProp, err := expandComputeFirewallDeny(d.Get("deny"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deny"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deniedProp)) { - obj["denied"] = deniedProp - } - descriptionProp, err := expandComputeFirewallDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - destinationRangesProp, err := expandComputeFirewallDestinationRanges(d.Get("destination_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_ranges"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationRangesProp)) { - obj["destinationRanges"] = destinationRangesProp - } - disabledProp, err := expandComputeFirewallDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); ok || !reflect.DeepEqual(v, disabledProp) { - obj["disabled"] = disabledProp - } - logConfigProp, err := expandComputeFirewallLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - networkProp, err := expandComputeFirewallNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - priorityProp, err := expandComputeFirewallPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); ok || !reflect.DeepEqual(v, priorityProp) { - obj["priority"] = priorityProp - } - sourceRangesProp, err := expandComputeFirewallSourceRanges(d.Get("source_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_ranges"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceRangesProp)) { - obj["sourceRanges"] = sourceRangesProp - } - sourceServiceAccountsProp, err := expandComputeFirewallSourceServiceAccounts(d.Get("source_service_accounts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_service_accounts"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceServiceAccountsProp)) { - obj["sourceServiceAccounts"] = sourceServiceAccountsProp - } - sourceTagsProp, err := expandComputeFirewallSourceTags(d.Get("source_tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_tags"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceTagsProp)) { - obj["sourceTags"] = sourceTagsProp - } - targetServiceAccountsProp, err := expandComputeFirewallTargetServiceAccounts(d.Get("target_service_accounts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_service_accounts"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetServiceAccountsProp)) { - obj["targetServiceAccounts"] = targetServiceAccountsProp - } - targetTagsProp, err := expandComputeFirewallTargetTags(d.Get("target_tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_tags"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetTagsProp)) { - obj["targetTags"] = targetTagsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Firewall %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Firewall %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Firewall %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Firewall", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeFirewallRead(d, meta) -} - -func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Firewall: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Firewall %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Firewall") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Firewall", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Firewall %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeFirewallImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/firewalls/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeFirewallAllow(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(resourceComputeFirewallRuleHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "protocol": flattenComputeFirewallAllowProtocol(original["IPProtocol"], d, config), - "ports": flattenComputeFirewallAllowPorts(original["ports"], d, config), - }) - } - return transformed -} -func flattenComputeFirewallAllowProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallAllowPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDeny(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(resourceComputeFirewallRuleHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "protocol": flattenComputeFirewallDenyProtocol(original["IPProtocol"], d, config), - "ports": flattenComputeFirewallDenyPorts(original["ports"], d, config), - }) - } - return transformed -} -func flattenComputeFirewallDenyProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDenyPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDestinationRanges(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallDirection(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallLogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - - v, ok := original["enable"] - if ok && !v.(bool) { - return nil - } - - transformed := make(map[string]interface{}) - transformed["metadata"] = original["metadata"] - return []interface{}{transformed} -} - -func flattenComputeFirewallName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeFirewallPriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeFirewallSourceRanges(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallSourceServiceAccounts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallSourceTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallTargetServiceAccounts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallTargetTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func expandComputeFirewallAllow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProtocol, err := expandComputeFirewallAllowProtocol(original["protocol"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProtocol); val.IsValid() && !isEmptyValue(val) { - transformed["IPProtocol"] = transformedProtocol - } - - transformedPorts, err := expandComputeFirewallAllowPorts(original["ports"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { - transformed["ports"] = transformedPorts - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeFirewallAllowProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallAllowPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDeny(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProtocol, err := expandComputeFirewallDenyProtocol(original["protocol"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProtocol); val.IsValid() && !isEmptyValue(val) { - transformed["IPProtocol"] = transformedProtocol - } - - transformedPorts, err := expandComputeFirewallDenyPorts(original["ports"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { - transformed["ports"] = transformedPorts - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeFirewallDenyProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDenyPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDestinationRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandComputeFirewallDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - transformed := make(map[string]interface{}) - - if len(l) == 0 || l[0] == nil { - // send enable = enable_logging value to ensure correct logging status if there is no config - transformed["enable"] = d.Get("enable_logging").(bool) - return transformed, nil - } - - raw := l[0] - original := raw.(map[string]interface{}) - - // The log_config block is specified, so logging should be enabled - transformed["enable"] = true - transformed["metadata"] = original["metadata"] - - return transformed, nil -} - -func expandComputeFirewallName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeFirewallPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallSourceRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandComputeFirewallSourceServiceAccounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandComputeFirewallSourceTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandComputeFirewallTargetServiceAccounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandComputeFirewallTargetTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy_rule.go deleted file mode 100644 index b746158d2e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy_rule.go +++ /dev/null @@ -1,501 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" -) - -func ResourceComputeFirewallPolicyRule() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeFirewallPolicyRuleCreate, - Read: resourceComputeFirewallPolicyRuleRead, - Update: resourceComputeFirewallPolicyRuleUpdate, - Delete: resourceComputeFirewallPolicyRuleDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeFirewallPolicyRuleImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "action": { - Type: schema.TypeString, - Required: true, - Description: "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", - }, - - "direction": { - Type: schema.TypeString, - Required: true, - Description: "The direction in which this rule applies. Possible values: INGRESS, EGRESS", - }, - - "firewall_policy": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The firewall policy of the resource.", - }, - - "match": { - Type: schema.TypeList, - Required: true, - Description: "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced.", - MaxItems: 1, - Elem: ComputeFirewallPolicyRuleMatchSchema(), - }, - - "priority": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", - }, - - "description": { - Type: schema.TypeString, - Optional: true, - Description: "An optional description for this resource.", - }, - - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", - }, - - "enable_logging": { - Type: schema.TypeBool, - Optional: true, - Description: "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", - }, - - "target_resources": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "target_service_accounts": { - Type: schema.TypeList, - Optional: true, - Description: "A list of service accounts indicating the sets of instances that are applied with this rule.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "kind": { - Type: schema.TypeString, - Computed: true, - Description: "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", - }, - - "rule_tuple_count": { - Type: schema.TypeInt, - Computed: true, - Description: "Calculation of the complexity of a single firewall policy rule.", - }, - }, - } -} - -func ComputeFirewallPolicyRuleMatchSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "layer4_configs": { - Type: schema.TypeList, - Required: true, - Description: "Pairs of IP protocols and ports that the rule should match.", - Elem: ComputeFirewallPolicyRuleMatchLayer4ConfigsSchema(), - }, - - "dest_ip_ranges": { - Type: schema.TypeList, - Optional: true, - Description: "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "src_ip_ranges": { - Type: schema.TypeList, - Optional: true, - Description: "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func ComputeFirewallPolicyRuleMatchLayer4ConfigsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_protocol": { - Type: schema.TypeString, - Required: true, - Description: "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", - }, - - "ports": { - Type: schema.TypeList, - Optional: true, - Description: "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func resourceComputeFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &compute.FirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - TargetResources: expandStringArray(d.Get("target_resources")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), - } - - id, err := obj.ID() - if err != nil { - return fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error creating FirewallPolicyRule: %s", err) - } - - log.Printf("[DEBUG] Finished creating FirewallPolicyRule %q: %#v", d.Id(), res) - - return resourceComputeFirewallPolicyRuleRead(d, meta) -} - -func resourceComputeFirewallPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &compute.FirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - TargetResources: expandStringArray(d.Get("target_resources")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetFirewallPolicyRule(context.Background(), obj) - if err != nil { - resourceName := fmt.Sprintf("ComputeFirewallPolicyRule %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("action", res.Action); err != nil { - return fmt.Errorf("error setting action in state: %s", err) - } - if err = d.Set("direction", res.Direction); err != nil { - return fmt.Errorf("error setting direction in state: %s", err) - } - if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { - return fmt.Errorf("error setting firewall_policy in state: %s", err) - } - if err = d.Set("match", flattenComputeFirewallPolicyRuleMatch(res.Match)); err != nil { - return fmt.Errorf("error setting match in state: %s", err) - } - if err = d.Set("priority", res.Priority); err != nil { - return fmt.Errorf("error setting priority in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("disabled", res.Disabled); err != nil { - return fmt.Errorf("error setting disabled in state: %s", err) - } - if err = d.Set("enable_logging", res.EnableLogging); err != nil { - return fmt.Errorf("error setting enable_logging in state: %s", err) - } - if err = d.Set("target_resources", res.TargetResources); err != nil { - return fmt.Errorf("error setting target_resources in state: %s", err) - } - if err = d.Set("target_service_accounts", res.TargetServiceAccounts); err != nil { - return fmt.Errorf("error setting target_service_accounts in state: %s", err) - } - if err = d.Set("kind", res.Kind); err != nil { - return fmt.Errorf("error setting kind in state: %s", err) - } - if err = d.Set("rule_tuple_count", res.RuleTupleCount); err != nil { - return fmt.Errorf("error setting rule_tuple_count in state: %s", err) - } - - return nil -} -func resourceComputeFirewallPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &compute.FirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - TargetResources: expandStringArray(d.Get("target_resources")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating FirewallPolicyRule: %s", err) - } - - log.Printf("[DEBUG] Finished creating FirewallPolicyRule %q: %#v", d.Id(), res) - - return resourceComputeFirewallPolicyRuleRead(d, meta) -} - -func resourceComputeFirewallPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &compute.FirewallPolicyRule{ - Action: dcl.String(d.Get("action").(string)), - Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), - Priority: dcl.Int64(int64(d.Get("priority").(int))), - Description: dcl.String(d.Get("description").(string)), - Disabled: dcl.Bool(d.Get("disabled").(bool)), - EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), - TargetResources: expandStringArray(d.Get("target_resources")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), - } - - log.Printf("[DEBUG] Deleting FirewallPolicyRule %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - if err := client.DeleteFirewallPolicyRule(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting FirewallPolicyRule: %s", err) - } - - log.Printf("[DEBUG] Finished deleting FirewallPolicyRule %q", d.Id()) - return nil -} - -func resourceComputeFirewallPolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "locations/global/firewallPolicies/(?P[^/]+)/rules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func expandComputeFirewallPolicyRuleMatch(o interface{}) *compute.FirewallPolicyRuleMatch { - if o == nil { - return compute.EmptyFirewallPolicyRuleMatch - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return compute.EmptyFirewallPolicyRuleMatch - } - obj := objArr[0].(map[string]interface{}) - return &compute.FirewallPolicyRuleMatch{ - Layer4Configs: expandComputeFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), - DestIPRanges: expandStringArray(obj["dest_ip_ranges"]), - SrcIPRanges: expandStringArray(obj["src_ip_ranges"]), - } -} - -func flattenComputeFirewallPolicyRuleMatch(obj *compute.FirewallPolicyRuleMatch) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "layer4_configs": flattenComputeFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), - "dest_ip_ranges": obj.DestIPRanges, - "src_ip_ranges": obj.SrcIPRanges, - } - - return []interface{}{transformed} - -} -func expandComputeFirewallPolicyRuleMatchLayer4ConfigsArray(o interface{}) []compute.FirewallPolicyRuleMatchLayer4Configs { - if o == nil { - return make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0) - } - - items := make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0, len(objs)) - for _, item := range objs { - i := expandComputeFirewallPolicyRuleMatchLayer4Configs(item) - items = append(items, *i) - } - - return items -} - -func expandComputeFirewallPolicyRuleMatchLayer4Configs(o interface{}) *compute.FirewallPolicyRuleMatchLayer4Configs { - if o == nil { - return compute.EmptyFirewallPolicyRuleMatchLayer4Configs - } - - obj := o.(map[string]interface{}) - return &compute.FirewallPolicyRuleMatchLayer4Configs{ - IPProtocol: dcl.String(obj["ip_protocol"].(string)), - Ports: expandStringArray(obj["ports"]), - } -} - -func flattenComputeFirewallPolicyRuleMatchLayer4ConfigsArray(objs []compute.FirewallPolicyRuleMatchLayer4Configs) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenComputeFirewallPolicyRuleMatchLayer4Configs(&item) - items = append(items, i) - } - - return items -} - -func flattenComputeFirewallPolicyRuleMatchLayer4Configs(obj *compute.FirewallPolicyRuleMatchLayer4Configs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "ip_protocol": obj.IPProtocol, - "ports": obj.Ports, - } - - return transformed - -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_forwarding_rule.go deleted file mode 100644 index 65111a5bfd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_forwarding_rule.go +++ /dev/null @@ -1,680 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" -) - -func ResourceComputeForwardingRule() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeForwardingRuleCreate, - Read: resourceComputeForwardingRuleRead, - Update: resourceComputeForwardingRuleUpdate, - Delete: resourceComputeForwardingRuleDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeForwardingRuleImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - }, - - "all_ports": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "This field is used along with the `backend_service` field for internal load balancing or with the `target` field for internal TargetInstance. This field cannot be used with `port` or `portRange` fields. When the load balancing scheme is `INTERNAL` and protocol is TCP/UDP, specify this field to allow packets addressed to any ports will be forwarded to the backends configured with this forwarding rule.", - }, - - "allow_global_access": { - Type: schema.TypeBool, - Optional: true, - Description: "This field is used along with the `backend_service` field for internal load balancing or with the `target` field for internal TargetInstance. If the field is set to `TRUE`, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", - }, - - "backend_service": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "This field is only used for `INTERNAL` load balancing. For internal load balancing, this field identifies the BackendService resource to receive the matched traffic.", - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "An optional description of this resource. Provide this property when you create the resource.", - }, - - "ip_address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: internalIpDiffSuppress, - Description: "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule. If you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name` * Partial URL or by name, as in: * `projects/project_id/regions/region/addresses/address-name` * `regions/region/addresses/address-name` * `global/addresses/address-name` * `address-name` The loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", - }, - - "ip_protocol": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: caseDiffSuppress, - Description: "The IP protocol to which this rule applies. For protocol forwarding, valid options are `TCP`, `UDP`, `ESP`, `AH`, `SCTP` or `ICMP`. For Internal TCP/UDP Load Balancing, the load balancing scheme is `INTERNAL`, and one of `TCP` or `UDP` are valid. For Traffic Director, the load balancing scheme is `INTERNAL_SELF_MANAGED`, and only `TCP`is valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is `INTERNAL_MANAGED`, and only `TCP` is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is `EXTERNAL` and only `TCP` is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is `EXTERNAL`, and one of `TCP` or `UDP` is valid.", - }, - - "is_mirroring_collector": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "Indicates whether or not this load balancer can be used as a collector for packet mirroring. To prevent mirroring loops, instances behind this load balancer will not have their traffic mirrored even if a `PacketMirroring` rule applies to them. This can only be set to true for load balancers that have their `loadBalancingScheme` set to `INTERNAL`.", - }, - - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: "Labels to apply to this rule.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "load_balancing_scheme": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Specifies the forwarding rule type.\n\n* `EXTERNAL` is used for:\n * Classic Cloud VPN gateways\n * Protocol forwarding to VMs from an external IP address\n * The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP\n* `INTERNAL` is used for:\n * Protocol forwarding to VMs from an internal IP address\n * Internal TCP/UDP load balancers\n* `INTERNAL_MANAGED` is used for:\n * Internal HTTP(S) load balancers\n* `INTERNAL_SELF_MANAGED` is used for:\n * Traffic Director\n* `EXTERNAL_MANAGED` is used for:\n * Global external HTTP(S) load balancers \n\nFor more information about forwarding rules, refer to [Forwarding rule concepts](/load-balancing/docs/forwarding-rule-concepts). Possible values: INVALID, INTERNAL, INTERNAL_MANAGED, INTERNAL_SELF_MANAGED, EXTERNAL, EXTERNAL_MANAGED", - Default: "EXTERNAL", - }, - - "network": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "This field is not used for external load balancing. For `INTERNAL` and `INTERNAL_SELF_MANAGED` load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", - }, - - "network_tier": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: "This signifies the networking tier used for configuring this load balancer and can only take the following values: `PREMIUM`, `STANDARD`. For regional ForwardingRule, the valid values are `PREMIUM` and `STANDARD`. For GlobalForwardingRule, the valid value is `PREMIUM`. If this field is not specified, it is assumed to be `PREMIUM`. If `IPAddress` is specified, this value must be equal to the networkTier of the Address.", - }, - - "port_range": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: portRangeDiffSuppress, - Description: "When the load balancing scheme is `EXTERNAL`, `INTERNAL_SELF_MANAGED` and `INTERNAL_MANAGED`, you can specify a `port_range`. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the `target` field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. Applicable only when `IPProtocol` is `TCP`, `UDP`, or `SCTP`, only packets addressed to ports in the specified range will be forwarded to `target`. Forwarding rules with the same `[IPAddress, IPProtocol]` pair must have disjoint port ranges. Some types of forwarding target have constraints on the acceptable ports:\n\n* TargetHttpProxy: 80, 8080\n* TargetHttpsProxy: 443\n* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetVpnGateway: 500, 4500\n\n@pattern: d+(?:-d+)?", - }, - - "ports": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Description: "This field is used along with the `backend_service` field for internal load balancing. When the load balancing scheme is `INTERNAL`, a list of ports can be configured, for example, ['80'], ['8000','9000']. Only packets addressed to these ports are forwarded to the backends configured with the forwarding rule. If the forwarding rule's loadBalancingScheme is INTERNAL, you can specify ports in one of the following ways: * A list of up to five ports, which can be non-contiguous * Keyword `ALL`, which causes the forwarding rule to forward traffic on any port of the forwarding rule's protocol. @pattern: d+(?:-d+)? For more information, refer to [Port specifications](/load-balancing/docs/forwarding-rule-concepts#port_specifications).", - MaxItems: 5, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project this resource belongs in.", - }, - - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The location of this resource.", - }, - - "service_directory_registrations": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Service Directory resources to register this forwarding rule with. Currently, only supports a single Service Directory resource.", - Elem: ComputeForwardingRuleServiceDirectoryRegistrationsSchema(), - }, - - "service_label": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", - ValidateFunc: validateGCEName, - }, - - "subnetwork": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "This field is only used for `INTERNAL` load balancing. For internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule. If the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", - }, - - "target": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For `INTERNAL_SELF_MANAGED` load balancing, only `targetHttpProxy` is valid, not `targetHttpsProxy`.", - }, - - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: "[Output Only] Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", - }, - - "label_fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: "Used internally during label updates.", - }, - - "psc_connection_id": { - Type: schema.TypeString, - Computed: true, - Description: "The PSC connection id of the PSC Forwarding Rule.", - }, - - "psc_connection_status": { - Type: schema.TypeString, - Computed: true, - Description: "The PSC connection status of the PSC Forwarding Rule. Possible values: STATUS_UNSPECIFIED, PENDING, ACCEPTED, REJECTED, CLOSED", - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: "[Output Only] Server-defined URL for the resource.", - }, - - "service_name": { - Type: schema.TypeString, - Computed: true, - Description: "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", - }, - }, - } -} - -func ComputeForwardingRuleServiceDirectoryRegistrationsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "namespace": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Service Directory namespace to register the forwarding rule under.", - }, - - "service": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Service Directory service to register the forwarding rule under.", - }, - }, - } -} - -func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - obj := &compute.ForwardingRule{ - Name: dcl.String(d.Get("name").(string)), - AllPorts: dcl.Bool(d.Get("all_ports").(bool)), - AllowGlobalAccess: dcl.Bool(d.Get("allow_global_access").(bool)), - BackendService: dcl.String(d.Get("backend_service").(string)), - Description: dcl.String(d.Get("description").(string)), - IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IsMirroringCollector: dcl.Bool(d.Get("is_mirroring_collector").(bool)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - Network: dcl.StringOrNil(d.Get("network").(string)), - NetworkTier: compute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), - PortRange: dcl.String(d.Get("port_range").(string)), - Ports: expandStringArray(d.Get("ports")), - Project: dcl.String(project), - Location: dcl.String(region), - ServiceDirectoryRegistrations: expandComputeForwardingRuleServiceDirectoryRegistrationsArray(d.Get("service_directory_registrations")), - ServiceLabel: dcl.String(d.Get("service_label").(string)), - Subnetwork: dcl.StringOrNil(d.Get("subnetwork").(string)), - Target: dcl.String(d.Get("target").(string)), - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") - if err != nil { - return fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error creating ForwardingRule: %s", err) - } - - log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) - - return resourceComputeForwardingRuleRead(d, meta) -} - -func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - obj := &compute.ForwardingRule{ - Name: dcl.String(d.Get("name").(string)), - AllPorts: dcl.Bool(d.Get("all_ports").(bool)), - AllowGlobalAccess: dcl.Bool(d.Get("allow_global_access").(bool)), - BackendService: dcl.String(d.Get("backend_service").(string)), - Description: dcl.String(d.Get("description").(string)), - IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IsMirroringCollector: dcl.Bool(d.Get("is_mirroring_collector").(bool)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - Network: dcl.StringOrNil(d.Get("network").(string)), - NetworkTier: compute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), - PortRange: dcl.String(d.Get("port_range").(string)), - Ports: expandStringArray(d.Get("ports")), - Project: dcl.String(project), - Location: dcl.String(region), - ServiceDirectoryRegistrations: expandComputeForwardingRuleServiceDirectoryRegistrationsArray(d.Get("service_directory_registrations")), - ServiceLabel: dcl.String(d.Get("service_label").(string)), - Subnetwork: dcl.StringOrNil(d.Get("subnetwork").(string)), - Target: dcl.String(d.Get("target").(string)), - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetForwardingRule(context.Background(), obj) - if err != nil { - resourceName := fmt.Sprintf("ComputeForwardingRule %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("all_ports", res.AllPorts); err != nil { - return fmt.Errorf("error setting all_ports in state: %s", err) - } - if err = d.Set("allow_global_access", res.AllowGlobalAccess); err != nil { - return fmt.Errorf("error setting allow_global_access in state: %s", err) - } - if err = d.Set("backend_service", res.BackendService); err != nil { - return fmt.Errorf("error setting backend_service in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("ip_address", res.IPAddress); err != nil { - return fmt.Errorf("error setting ip_address in state: %s", err) - } - if err = d.Set("ip_protocol", res.IPProtocol); err != nil { - return fmt.Errorf("error setting ip_protocol in state: %s", err) - } - if err = d.Set("is_mirroring_collector", res.IsMirroringCollector); err != nil { - return fmt.Errorf("error setting is_mirroring_collector in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("load_balancing_scheme", res.LoadBalancingScheme); err != nil { - return fmt.Errorf("error setting load_balancing_scheme in state: %s", err) - } - if err = d.Set("network", res.Network); err != nil { - return fmt.Errorf("error setting network in state: %s", err) - } - if err = d.Set("network_tier", res.NetworkTier); err != nil { - return fmt.Errorf("error setting network_tier in state: %s", err) - } - if err = d.Set("port_range", res.PortRange); err != nil { - return fmt.Errorf("error setting port_range in state: %s", err) - } - if err = d.Set("ports", res.Ports); err != nil { - return fmt.Errorf("error setting ports in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("region", res.Location); err != nil { - return fmt.Errorf("error setting region in state: %s", err) - } - if err = d.Set("service_directory_registrations", flattenComputeForwardingRuleServiceDirectoryRegistrationsArray(res.ServiceDirectoryRegistrations)); err != nil { - return fmt.Errorf("error setting service_directory_registrations in state: %s", err) - } - if err = d.Set("service_label", res.ServiceLabel); err != nil { - return fmt.Errorf("error setting service_label in state: %s", err) - } - if err = d.Set("subnetwork", res.Subnetwork); err != nil { - return fmt.Errorf("error setting subnetwork in state: %s", err) - } - if err = d.Set("target", res.Target); err != nil { - return fmt.Errorf("error setting target in state: %s", err) - } - if err = d.Set("creation_timestamp", res.CreationTimestamp); err != nil { - return fmt.Errorf("error setting creation_timestamp in state: %s", err) - } - if err = d.Set("label_fingerprint", res.LabelFingerprint); err != nil { - return fmt.Errorf("error setting label_fingerprint in state: %s", err) - } - if err = d.Set("psc_connection_id", res.PscConnectionId); err != nil { - return fmt.Errorf("error setting psc_connection_id in state: %s", err) - } - if err = d.Set("psc_connection_status", res.PscConnectionStatus); err != nil { - return fmt.Errorf("error setting psc_connection_status in state: %s", err) - } - if err = d.Set("self_link", res.SelfLink); err != nil { - return fmt.Errorf("error setting self_link in state: %s", err) - } - if err = d.Set("service_name", res.ServiceName); err != nil { - return fmt.Errorf("error setting service_name in state: %s", err) - } - - return nil -} -func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - obj := &compute.ForwardingRule{ - Name: dcl.String(d.Get("name").(string)), - AllPorts: dcl.Bool(d.Get("all_ports").(bool)), - AllowGlobalAccess: dcl.Bool(d.Get("allow_global_access").(bool)), - BackendService: dcl.String(d.Get("backend_service").(string)), - Description: dcl.String(d.Get("description").(string)), - IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IsMirroringCollector: dcl.Bool(d.Get("is_mirroring_collector").(bool)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - Network: dcl.StringOrNil(d.Get("network").(string)), - NetworkTier: compute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), - PortRange: dcl.String(d.Get("port_range").(string)), - Ports: expandStringArray(d.Get("ports")), - Project: dcl.String(project), - Location: dcl.String(region), - ServiceDirectoryRegistrations: expandComputeForwardingRuleServiceDirectoryRegistrationsArray(d.Get("service_directory_registrations")), - ServiceLabel: dcl.String(d.Get("service_label").(string)), - Subnetwork: dcl.StringOrNil(d.Get("subnetwork").(string)), - Target: dcl.String(d.Get("target").(string)), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating ForwardingRule: %s", err) - } - - log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) - - return resourceComputeForwardingRuleRead(d, meta) -} - -func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - obj := &compute.ForwardingRule{ - Name: dcl.String(d.Get("name").(string)), - AllPorts: dcl.Bool(d.Get("all_ports").(bool)), - AllowGlobalAccess: dcl.Bool(d.Get("allow_global_access").(bool)), - BackendService: dcl.String(d.Get("backend_service").(string)), - Description: dcl.String(d.Get("description").(string)), - IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IsMirroringCollector: dcl.Bool(d.Get("is_mirroring_collector").(bool)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - Network: dcl.StringOrNil(d.Get("network").(string)), - NetworkTier: compute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), - PortRange: dcl.String(d.Get("port_range").(string)), - Ports: expandStringArray(d.Get("ports")), - Project: dcl.String(project), - Location: dcl.String(region), - ServiceDirectoryRegistrations: expandComputeForwardingRuleServiceDirectoryRegistrationsArray(d.Get("service_directory_registrations")), - ServiceLabel: dcl.String(d.Get("service_label").(string)), - Subnetwork: dcl.StringOrNil(d.Get("subnetwork").(string)), - Target: dcl.String(d.Get("target").(string)), - } - - log.Printf("[DEBUG] Deleting ForwardingRule %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - if err := client.DeleteForwardingRule(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting ForwardingRule: %s", err) - } - - log.Printf("[DEBUG] Finished deleting ForwardingRule %q", d.Id()) - return nil -} - -func resourceComputeForwardingRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/forwardingRules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func expandComputeForwardingRuleServiceDirectoryRegistrationsArray(o interface{}) []compute.ForwardingRuleServiceDirectoryRegistrations { - if o == nil { - return nil - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return nil - } - - items := make([]compute.ForwardingRuleServiceDirectoryRegistrations, 0, len(objs)) - for _, item := range objs { - i := expandComputeForwardingRuleServiceDirectoryRegistrations(item) - items = append(items, *i) - } - - return items -} - -func expandComputeForwardingRuleServiceDirectoryRegistrations(o interface{}) *compute.ForwardingRuleServiceDirectoryRegistrations { - if o == nil { - return nil - } - - obj := o.(map[string]interface{}) - return &compute.ForwardingRuleServiceDirectoryRegistrations{ - Namespace: dcl.StringOrNil(obj["namespace"].(string)), - Service: dcl.String(obj["service"].(string)), - } -} - -func flattenComputeForwardingRuleServiceDirectoryRegistrationsArray(objs []compute.ForwardingRuleServiceDirectoryRegistrations) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenComputeForwardingRuleServiceDirectoryRegistrations(&item) - items = append(items, i) - } - - return items -} - -func flattenComputeForwardingRuleServiceDirectoryRegistrations(obj *compute.ForwardingRuleServiceDirectoryRegistrations) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "namespace": obj.Namespace, - "service": obj.Service, - } - - return transformed - -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_address.go deleted file mode 100644 index ffd1438b31..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_address.go +++ /dev/null @@ -1,462 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeGlobalAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalAddressCreate, - Read: resourceComputeGlobalAddressRead, - Delete: resourceComputeGlobalAddressDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeGlobalAddressImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The IP address or beginning of the address range represented by this -resource. This can be supplied as an input to reserve a specific -address or omitted to allow GCP to choose a valid one for you.`, - }, - "address_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"EXTERNAL", "INTERNAL", ""}), - DiffSuppressFunc: emptyOrDefaultStringSuppress("EXTERNAL"), - Description: `The type of the address to reserve. - -* EXTERNAL indicates public/external single IP address. -* INTERNAL indicates internal IP ranges belonging to some network. Default value: "EXTERNAL" Possible values: ["EXTERNAL", "INTERNAL"]`, - Default: "EXTERNAL", - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "ip_version": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"IPV4", "IPV6", ""}), - DiffSuppressFunc: emptyOrDefaultStringSuppress("IPV4"), - Description: `The IP Version that will be used by this address. The default value is 'IPV4'. Possible values: ["IPV4", "IPV6"]`, - }, - "network": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the network in which to reserve the IP range. The IP range -must be in RFC1918 space. The network cannot be deleted if there are -any reserved IP ranges referring to it. - -This should only be set when using an Internal address.`, - }, - "prefix_length": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The prefix length of the IP range. If not present, it means the -address field is a single IP address. - -This field is not applicable to addresses with addressType=EXTERNAL, -or addressType=INTERNAL when purpose=PRIVATE_SERVICE_CONNECT`, - }, - "purpose": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The purpose of the resource. Possible values include: - -* VPC_PEERING - for peer networks - -* PRIVATE_SERVICE_CONNECT - for ([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) only) Private Service Connect networks`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - addressProp, err := expandComputeGlobalAddressAddress(d.Get("address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("address"); !isEmptyValue(reflect.ValueOf(addressProp)) && (ok || !reflect.DeepEqual(v, addressProp)) { - obj["address"] = addressProp - } - descriptionProp, err := expandComputeGlobalAddressDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeGlobalAddressName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - ipVersionProp, err := expandComputeGlobalAddressIpVersion(d.Get("ip_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_version"); !isEmptyValue(reflect.ValueOf(ipVersionProp)) && (ok || !reflect.DeepEqual(v, ipVersionProp)) { - obj["ipVersion"] = ipVersionProp - } - prefixLengthProp, err := expandComputeGlobalAddressPrefixLength(d.Get("prefix_length"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("prefix_length"); !isEmptyValue(reflect.ValueOf(prefixLengthProp)) && (ok || !reflect.DeepEqual(v, prefixLengthProp)) { - obj["prefixLength"] = prefixLengthProp - } - addressTypeProp, err := expandComputeGlobalAddressAddressType(d.Get("address_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("address_type"); !isEmptyValue(reflect.ValueOf(addressTypeProp)) && (ok || !reflect.DeepEqual(v, addressTypeProp)) { - obj["addressType"] = addressTypeProp - } - purposeProp, err := expandComputeGlobalAddressPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - networkProp, err := expandComputeGlobalAddressNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new GlobalAddress: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalAddress: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating GlobalAddress: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/addresses/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating GlobalAddress", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create GlobalAddress: %s", err) - } - - log.Printf("[DEBUG] Finished creating GlobalAddress %q: %#v", d.Id(), res) - - return resourceComputeGlobalAddressRead(d, meta) -} - -func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalAddress: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeGlobalAddress %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - - if err := d.Set("address", flattenComputeGlobalAddressAddress(res["address"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeGlobalAddressCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("description", flattenComputeGlobalAddressDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("name", flattenComputeGlobalAddressName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("ip_version", flattenComputeGlobalAddressIpVersion(res["ipVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("prefix_length", flattenComputeGlobalAddressPrefixLength(res["prefixLength"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("address_type", flattenComputeGlobalAddressAddressType(res["addressType"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("purpose", flattenComputeGlobalAddressPurpose(res["purpose"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("network", flattenComputeGlobalAddressNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading GlobalAddress: %s", err) - } - - return nil -} - -func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalAddress: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GlobalAddress %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GlobalAddress") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting GlobalAddress", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting GlobalAddress %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeGlobalAddressImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/addresses/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/addresses/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeGlobalAddressAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressIpVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressPrefixLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeGlobalAddressAddressType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressPurpose(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeGlobalAddressAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressIpVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressPrefixLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressAddressType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_forwarding_rule.go deleted file mode 100644 index af835e6db3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_forwarding_rule.go +++ /dev/null @@ -1,602 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" -) - -func ResourceComputeGlobalForwardingRule() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalForwardingRuleCreate, - Read: resourceComputeGlobalForwardingRuleRead, - Update: resourceComputeGlobalForwardingRuleUpdate, - Delete: resourceComputeGlobalForwardingRuleDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeGlobalForwardingRuleImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - }, - - "target": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For `INTERNAL_SELF_MANAGED` load balancing, only `targetHttpProxy` is valid, not `targetHttpsProxy`.", - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "An optional description of this resource. Provide this property when you create the resource.", - }, - - "ip_address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: internalIpDiffSuppress, - Description: "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule. If you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name` * Partial URL or by name, as in: * `projects/project_id/regions/region/addresses/address-name` * `regions/region/addresses/address-name` * `global/addresses/address-name` * `address-name` The loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", - }, - - "ip_protocol": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: caseDiffSuppress, - Description: "The IP protocol to which this rule applies. For protocol forwarding, valid options are `TCP`, `UDP`, `ESP`, `AH`, `SCTP` or `ICMP`. For Internal TCP/UDP Load Balancing, the load balancing scheme is `INTERNAL`, and one of `TCP` or `UDP` are valid. For Traffic Director, the load balancing scheme is `INTERNAL_SELF_MANAGED`, and only `TCP`is valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is `INTERNAL_MANAGED`, and only `TCP` is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is `EXTERNAL` and only `TCP` is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is `EXTERNAL`, and one of `TCP` or `UDP` is valid.", - }, - - "ip_version": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The IP Version that will be used by this forwarding rule. Valid options are `IPV4` or `IPV6`. This can only be specified for an external global forwarding rule. Possible values: UNSPECIFIED_VERSION, IPV4, IPV6", - }, - - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: "Labels to apply to this rule.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "load_balancing_scheme": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Specifies the forwarding rule type.\n\n* `EXTERNAL` is used for:\n * Classic Cloud VPN gateways\n * Protocol forwarding to VMs from an external IP address\n * The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP\n* `INTERNAL` is used for:\n * Protocol forwarding to VMs from an internal IP address\n * Internal TCP/UDP load balancers\n* `INTERNAL_MANAGED` is used for:\n * Internal HTTP(S) load balancers\n* `INTERNAL_SELF_MANAGED` is used for:\n * Traffic Director\n* `EXTERNAL_MANAGED` is used for:\n * Global external HTTP(S) load balancers \n\nFor more information about forwarding rules, refer to [Forwarding rule concepts](/load-balancing/docs/forwarding-rule-concepts). Possible values: INVALID, INTERNAL, INTERNAL_MANAGED, INTERNAL_SELF_MANAGED, EXTERNAL, EXTERNAL_MANAGED", - Default: "EXTERNAL", - }, - - "metadata_filters": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of [xDS](https://github.com/envoyproxy/data-plane-api/blob/master/XDS_PROTOCOL.md) compliant clients. In their xDS requests to Loadbalancer, xDS clients present [node metadata](https://github.com/envoyproxy/data-plane-api/search?q=%22message+Node%22+in%3A%2Fenvoy%2Fapi%2Fv2%2Fcore%2Fbase.proto&). If a match takes place, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. `TargetHttpProxy`, `UrlMap`) referenced by the `ForwardingRule` will not be visible to those proxies.\n\nFor each `metadataFilter` in this list, if its `filterMatchCriteria` is set to MATCH_ANY, at least one of the `filterLabel`s must match the corresponding label provided in the metadata. If its `filterMatchCriteria` is set to MATCH_ALL, then all of its `filterLabel`s must match with corresponding labels provided in the metadata.\n\n`metadataFilters` specified here will be applifed before those specified in the `UrlMap` that this `ForwardingRule` references.\n\n`metadataFilters` only applies to Loadbalancers that have their loadBalancingScheme set to `INTERNAL_SELF_MANAGED`.", - Elem: ComputeGlobalForwardingRuleMetadataFilterSchema(), - }, - - "network": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "This field is not used for external load balancing. For `INTERNAL` and `INTERNAL_SELF_MANAGED` load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", - }, - - "port_range": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: portRangeDiffSuppress, - Description: "When the load balancing scheme is `EXTERNAL`, `INTERNAL_SELF_MANAGED` and `INTERNAL_MANAGED`, you can specify a `port_range`. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the `target` field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. Applicable only when `IPProtocol` is `TCP`, `UDP`, or `SCTP`, only packets addressed to ports in the specified range will be forwarded to `target`. Forwarding rules with the same `[IPAddress, IPProtocol]` pair must have disjoint port ranges. Some types of forwarding target have constraints on the acceptable ports:\n\n* TargetHttpProxy: 80, 8080\n* TargetHttpsProxy: 443\n* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetVpnGateway: 500, 4500\n\n@pattern: d+(?:-d+)?", - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project this resource belongs in.", - }, - - "label_fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: "Used internally during label updates.", - }, - - "psc_connection_id": { - Type: schema.TypeString, - Computed: true, - Description: "The PSC connection id of the PSC Forwarding Rule.", - }, - - "psc_connection_status": { - Type: schema.TypeString, - Computed: true, - Description: "The PSC connection status of the PSC Forwarding Rule. Possible values: STATUS_UNSPECIFIED, PENDING, ACCEPTED, REJECTED, CLOSED", - }, - - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: "[Output Only] Server-defined URL for the resource.", - }, - }, - } -} - -func ComputeGlobalForwardingRuleMetadataFilterSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter_labels": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: "The list of label value pairs that must match labels in the provided metadata based on `filterMatchCriteria`\n\nThis list must not be empty and can have at the most 64 entries.", - MaxItems: 64, - MinItems: 1, - Elem: ComputeGlobalForwardingRuleMetadataFilterFilterLabelSchema(), - }, - - "filter_match_criteria": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Specifies how individual `filterLabel` matches within the list of `filterLabel`s contribute towards the overall `metadataFilter` match.\n\nSupported values are:\n\n* MATCH_ANY: At least one of the `filterLabels` must have a matching label in the provided metadata.\n* MATCH_ALL: All `filterLabels` must have matching labels in the provided metadata. Possible values: NOT_SET, MATCH_ALL, MATCH_ANY", - }, - }, - } -} - -func ComputeGlobalForwardingRuleMetadataFilterFilterLabelSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of metadata label.\n\nThe name can have a maximum length of 1024 characters and must be at least 1 character long.", - }, - - "value": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The value of the label must match the specified value.\n\nvalue can have a maximum length of 1024 characters.", - }, - }, - } -} - -func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &compute.ForwardingRule{ - Name: dcl.String(d.Get("name").(string)), - Target: dcl.String(d.Get("target").(string)), - Description: dcl.String(d.Get("description").(string)), - IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IPVersion: compute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), - Network: dcl.StringOrNil(d.Get("network").(string)), - PortRange: dcl.String(d.Get("port_range").(string)), - Project: dcl.String(project), - } - - id, err := obj.ID() - if err != nil { - return fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error creating ForwardingRule: %s", err) - } - - log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) - - return resourceComputeGlobalForwardingRuleRead(d, meta) -} - -func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &compute.ForwardingRule{ - Name: dcl.String(d.Get("name").(string)), - Target: dcl.String(d.Get("target").(string)), - Description: dcl.String(d.Get("description").(string)), - IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IPVersion: compute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), - Network: dcl.StringOrNil(d.Get("network").(string)), - PortRange: dcl.String(d.Get("port_range").(string)), - Project: dcl.String(project), - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetForwardingRule(context.Background(), obj) - if err != nil { - resourceName := fmt.Sprintf("ComputeGlobalForwardingRule %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("target", res.Target); err != nil { - return fmt.Errorf("error setting target in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("ip_address", res.IPAddress); err != nil { - return fmt.Errorf("error setting ip_address in state: %s", err) - } - if err = d.Set("ip_protocol", res.IPProtocol); err != nil { - return fmt.Errorf("error setting ip_protocol in state: %s", err) - } - if err = d.Set("ip_version", res.IPVersion); err != nil { - return fmt.Errorf("error setting ip_version in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("load_balancing_scheme", res.LoadBalancingScheme); err != nil { - return fmt.Errorf("error setting load_balancing_scheme in state: %s", err) - } - if err = d.Set("metadata_filters", flattenComputeGlobalForwardingRuleMetadataFilterArray(res.MetadataFilter)); err != nil { - return fmt.Errorf("error setting metadata_filters in state: %s", err) - } - if err = d.Set("network", res.Network); err != nil { - return fmt.Errorf("error setting network in state: %s", err) - } - if err = d.Set("port_range", res.PortRange); err != nil { - return fmt.Errorf("error setting port_range in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("label_fingerprint", res.LabelFingerprint); err != nil { - return fmt.Errorf("error setting label_fingerprint in state: %s", err) - } - if err = d.Set("psc_connection_id", res.PscConnectionId); err != nil { - return fmt.Errorf("error setting psc_connection_id in state: %s", err) - } - if err = d.Set("psc_connection_status", res.PscConnectionStatus); err != nil { - return fmt.Errorf("error setting psc_connection_status in state: %s", err) - } - if err = d.Set("self_link", res.SelfLink); err != nil { - return fmt.Errorf("error setting self_link in state: %s", err) - } - - return nil -} -func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &compute.ForwardingRule{ - Name: dcl.String(d.Get("name").(string)), - Target: dcl.String(d.Get("target").(string)), - Description: dcl.String(d.Get("description").(string)), - IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IPVersion: compute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), - Network: dcl.StringOrNil(d.Get("network").(string)), - PortRange: dcl.String(d.Get("port_range").(string)), - Project: dcl.String(project), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating ForwardingRule: %s", err) - } - - log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) - - return resourceComputeGlobalForwardingRuleRead(d, meta) -} - -func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &compute.ForwardingRule{ - Name: dcl.String(d.Get("name").(string)), - Target: dcl.String(d.Get("target").(string)), - Description: dcl.String(d.Get("description").(string)), - IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IPVersion: compute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), - Network: dcl.StringOrNil(d.Get("network").(string)), - PortRange: dcl.String(d.Get("port_range").(string)), - Project: dcl.String(project), - } - - log.Printf("[DEBUG] Deleting ForwardingRule %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - if err := client.DeleteForwardingRule(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting ForwardingRule: %s", err) - } - - log.Printf("[DEBUG] Finished deleting ForwardingRule %q", d.Id()) - return nil -} - -func resourceComputeGlobalForwardingRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/forwardingRules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func expandComputeGlobalForwardingRuleMetadataFilterArray(o interface{}) []compute.ForwardingRuleMetadataFilter { - if o == nil { - return make([]compute.ForwardingRuleMetadataFilter, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.ForwardingRuleMetadataFilter, 0) - } - - items := make([]compute.ForwardingRuleMetadataFilter, 0, len(objs)) - for _, item := range objs { - i := expandComputeGlobalForwardingRuleMetadataFilter(item) - items = append(items, *i) - } - - return items -} - -func expandComputeGlobalForwardingRuleMetadataFilter(o interface{}) *compute.ForwardingRuleMetadataFilter { - if o == nil { - return compute.EmptyForwardingRuleMetadataFilter - } - - obj := o.(map[string]interface{}) - return &compute.ForwardingRuleMetadataFilter{ - FilterLabel: expandComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(obj["filter_labels"]), - FilterMatchCriteria: compute.ForwardingRuleMetadataFilterFilterMatchCriteriaEnumRef(obj["filter_match_criteria"].(string)), - } -} - -func flattenComputeGlobalForwardingRuleMetadataFilterArray(objs []compute.ForwardingRuleMetadataFilter) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenComputeGlobalForwardingRuleMetadataFilter(&item) - items = append(items, i) - } - - return items -} - -func flattenComputeGlobalForwardingRuleMetadataFilter(obj *compute.ForwardingRuleMetadataFilter) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "filter_labels": flattenComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(obj.FilterLabel), - "filter_match_criteria": obj.FilterMatchCriteria, - } - - return transformed - -} -func expandComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(o interface{}) []compute.ForwardingRuleMetadataFilterFilterLabel { - if o == nil { - return make([]compute.ForwardingRuleMetadataFilterFilterLabel, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]compute.ForwardingRuleMetadataFilterFilterLabel, 0) - } - - items := make([]compute.ForwardingRuleMetadataFilterFilterLabel, 0, len(objs)) - for _, item := range objs { - i := expandComputeGlobalForwardingRuleMetadataFilterFilterLabel(item) - items = append(items, *i) - } - - return items -} - -func expandComputeGlobalForwardingRuleMetadataFilterFilterLabel(o interface{}) *compute.ForwardingRuleMetadataFilterFilterLabel { - if o == nil { - return compute.EmptyForwardingRuleMetadataFilterFilterLabel - } - - obj := o.(map[string]interface{}) - return &compute.ForwardingRuleMetadataFilterFilterLabel{ - Name: dcl.String(obj["name"].(string)), - Value: dcl.String(obj["value"].(string)), - } -} - -func flattenComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(objs []compute.ForwardingRuleMetadataFilterFilterLabel) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenComputeGlobalForwardingRuleMetadataFilterFilterLabel(&item) - items = append(items, i) - } - - return items -} - -func flattenComputeGlobalForwardingRuleMetadataFilterFilterLabel(obj *compute.ForwardingRuleMetadataFilterFilterLabel) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - "value": obj.Value, - } - - return transformed - -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_network_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_network_endpoint.go deleted file mode 100644 index 35ae572627..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_network_endpoint.go +++ /dev/null @@ -1,463 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeGlobalNetworkEndpoint() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalNetworkEndpointCreate, - Read: resourceComputeGlobalNetworkEndpointRead, - Delete: resourceComputeGlobalNetworkEndpointDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeGlobalNetworkEndpointImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "global_network_endpoint_group": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The global network endpoint group this endpoint is part of.`, - }, - "port": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Port number of the external endpoint.`, - }, - "fqdn": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Fully qualified domain name of network endpoint. -This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.`, - AtLeastOneOf: []string{"fqdn", "ip_address"}, - }, - "ip_address": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `IPv4 address external endpoint.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeGlobalNetworkEndpointCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - portProp, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - ipAddressProp, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(reflect.ValueOf(ipAddressProp)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { - obj["ipAddress"] = ipAddressProp - } - fqdnProp, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fqdn"); !isEmptyValue(reflect.ValueOf(fqdnProp)) && (ok || !reflect.DeepEqual(v, fqdnProp)) { - obj["fqdn"] = fqdnProp - } - - obj, err = resourceComputeGlobalNetworkEndpointEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "networkEndpoint/{{project}}/{{global_network_endpoint_group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/attachNetworkEndpoints") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new GlobalNetworkEndpoint: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating GlobalNetworkEndpoint: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating GlobalNetworkEndpoint", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create GlobalNetworkEndpoint: %s", err) - } - - log.Printf("[DEBUG] Finished creating GlobalNetworkEndpoint %q: %#v", d.Id(), res) - - return resourceComputeGlobalNetworkEndpointRead(d, meta) -} - -func resourceComputeGlobalNetworkEndpointRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/listNetworkEndpoints") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeGlobalNetworkEndpoint %q", d.Id())) - } - - res, err = flattenNestedComputeGlobalNetworkEndpoint(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeGlobalNetworkEndpoint because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceComputeGlobalNetworkEndpointDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ComputeGlobalNetworkEndpoint because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) - } - - if err := d.Set("port", flattenNestedComputeGlobalNetworkEndpointPort(res["port"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) - } - if err := d.Set("ip_address", flattenNestedComputeGlobalNetworkEndpointIpAddress(res["ipAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) - } - if err := d.Set("fqdn", flattenNestedComputeGlobalNetworkEndpointFqdn(res["fqdn"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) - } - - return nil -} - -func resourceComputeGlobalNetworkEndpointDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "networkEndpoint/{{project}}/{{global_network_endpoint_group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/detachNetworkEndpoints") - if err != nil { - return err - } - - var obj map[string]interface{} - toDelete := make(map[string]interface{}) - portProp, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, config) - if err != nil { - return err - } - if portProp != "" { - toDelete["port"] = portProp - } - - ipAddressProp, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } - if ipAddressProp != "" { - toDelete["ipAddress"] = ipAddressProp - } - - fqdnProp, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, config) - if err != nil { - return err - } - if fqdnProp != "" { - toDelete["fqdn"] = fqdnProp - } - - obj = map[string]interface{}{ - "networkEndpoints": []map[string]interface{}{toDelete}, - } - log.Printf("[DEBUG] Deleting GlobalNetworkEndpoint %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GlobalNetworkEndpoint") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting GlobalNetworkEndpoint", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting GlobalNetworkEndpoint %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeGlobalNetworkEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - // FQDN, port and ip_address are optional, so use * instead of + when reading the import id - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/networkEndpointGroups/(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)", - "(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputeGlobalNetworkEndpointPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles int given in float64 format - if floatVal, ok := v.(float64); ok { - return int(floatVal) - } - return v -} - -func flattenNestedComputeGlobalNetworkEndpointIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeGlobalNetworkEndpointFqdn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeGlobalNetworkEndpointPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeGlobalNetworkEndpointIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeGlobalNetworkEndpointFqdn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeGlobalNetworkEndpointEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. - if err := d.Set("global_network_endpoint_group", GetResourceNameFromSelfLink(d.Get("global_network_endpoint_group").(string))); err != nil { - return nil, fmt.Errorf("Error setting global_network_endpoint_group: %s", err) - } - - wrappedReq := map[string]interface{}{ - "networkEndpoints": []interface{}{obj}, - } - return wrappedReq, nil -} - -func flattenNestedComputeGlobalNetworkEndpoint(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["items"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value items. Actual value: %v", v) - } - - _, item, err := resourceComputeGlobalNetworkEndpointFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeGlobalNetworkEndpointFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedIpAddress, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedIpAddress := flattenNestedComputeGlobalNetworkEndpointIpAddress(expectedIpAddress, d, meta.(*Config)) - expectedFqdn, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedFqdn := flattenNestedComputeGlobalNetworkEndpointFqdn(expectedFqdn, d, meta.(*Config)) - expectedPort, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedPort := flattenNestedComputeGlobalNetworkEndpointPort(expectedPort, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - // Decode list item before comparing. - item, err := resourceComputeGlobalNetworkEndpointDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemIpAddress := flattenNestedComputeGlobalNetworkEndpointIpAddress(item["ipAddress"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemIpAddress)) && isEmptyValue(reflect.ValueOf(expectedFlattenedIpAddress))) && !reflect.DeepEqual(itemIpAddress, expectedFlattenedIpAddress) { - log.Printf("[DEBUG] Skipping item with ipAddress= %#v, looking for %#v)", itemIpAddress, expectedFlattenedIpAddress) - continue - } - itemFqdn := flattenNestedComputeGlobalNetworkEndpointFqdn(item["fqdn"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemFqdn)) && isEmptyValue(reflect.ValueOf(expectedFlattenedFqdn))) && !reflect.DeepEqual(itemFqdn, expectedFlattenedFqdn) { - log.Printf("[DEBUG] Skipping item with fqdn= %#v, looking for %#v)", itemFqdn, expectedFlattenedFqdn) - continue - } - itemPort := flattenNestedComputeGlobalNetworkEndpointPort(item["port"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemPort)) && isEmptyValue(reflect.ValueOf(expectedFlattenedPort))) && !reflect.DeepEqual(itemPort, expectedFlattenedPort) { - log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} -func resourceComputeGlobalNetworkEndpointDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - v, ok := res["networkEndpoint"] - if !ok || v == nil { - return res, nil - } - - return v.(map[string]interface{}), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_network_endpoint_group.go deleted file mode 100644 index 129aaddfaf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_global_network_endpoint_group.go +++ /dev/null @@ -1,331 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeGlobalNetworkEndpointGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalNetworkEndpointGroupCreate, - Read: resourceComputeGlobalNetworkEndpointGroupRead, - Delete: resourceComputeGlobalNetworkEndpointGroupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeGlobalNetworkEndpointGroupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network_endpoint_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"INTERNET_IP_PORT", "INTERNET_FQDN_PORT"}), - Description: `Type of network endpoints in this network endpoint group. Possible values: ["INTERNET_IP_PORT", "INTERNET_FQDN_PORT"]`, - }, - "default_port": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The default port used if the port number is not specified in the -network endpoint.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeGlobalNetworkEndpointGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeGlobalNetworkEndpointGroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeGlobalNetworkEndpointGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - networkEndpointTypeProp, err := expandComputeGlobalNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_endpoint_type"); !isEmptyValue(reflect.ValueOf(networkEndpointTypeProp)) && (ok || !reflect.DeepEqual(v, networkEndpointTypeProp)) { - obj["networkEndpointType"] = networkEndpointTypeProp - } - defaultPortProp, err := expandComputeGlobalNetworkEndpointGroupDefaultPort(d.Get("default_port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_port"); !isEmptyValue(reflect.ValueOf(defaultPortProp)) && (ok || !reflect.DeepEqual(v, defaultPortProp)) { - obj["defaultPort"] = defaultPortProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new GlobalNetworkEndpointGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating GlobalNetworkEndpointGroup: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/networkEndpointGroups/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating GlobalNetworkEndpointGroup", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create GlobalNetworkEndpointGroup: %s", err) - } - - log.Printf("[DEBUG] Finished creating GlobalNetworkEndpointGroup %q: %#v", d.Id(), res) - - return resourceComputeGlobalNetworkEndpointGroupRead(d, meta) -} - -func resourceComputeGlobalNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeGlobalNetworkEndpointGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - - if err := d.Set("name", flattenComputeGlobalNetworkEndpointGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - if err := d.Set("description", flattenComputeGlobalNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - if err := d.Set("network_endpoint_type", flattenComputeGlobalNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - if err := d.Set("default_port", flattenComputeGlobalNetworkEndpointGroupDefaultPort(res["defaultPort"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - - return nil -} - -func resourceComputeGlobalNetworkEndpointGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GlobalNetworkEndpointGroup %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GlobalNetworkEndpointGroup") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting GlobalNetworkEndpointGroup", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting GlobalNetworkEndpointGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeGlobalNetworkEndpointGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/networkEndpointGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/networkEndpointGroups/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeGlobalNetworkEndpointGroupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalNetworkEndpointGroupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalNetworkEndpointGroupNetworkEndpointType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalNetworkEndpointGroupDefaultPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandComputeGlobalNetworkEndpointGroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalNetworkEndpointGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalNetworkEndpointGroupNetworkEndpointType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalNetworkEndpointGroupDefaultPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ha_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ha_vpn_gateway.go deleted file mode 100644 index 568dca92f1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ha_vpn_gateway.go +++ /dev/null @@ -1,477 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeHaVpnGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeHaVpnGatewayCreate, - Read: resourceComputeHaVpnGatewayRead, - Delete: resourceComputeHaVpnGatewayDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeHaVpnGatewayImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network this VPN gateway is accepting traffic for.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region this gateway should sit in.`, - }, - "vpn_interfaces": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `A list of interfaces on this VPN gateway.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The numeric ID of this VPN gateway interface.`, - }, - "interconnect_attachment": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the interconnect attachment resource. When the value -of this field is present, the VPN Gateway will be used for -IPsec-encrypted Cloud Interconnect; all Egress or Ingress -traffic for this VPN Gateway interface will go through the -specified interconnect attachment resource. - -Not currently available publicly.`, - }, - "ip_address": { - Type: schema.TypeString, - Computed: true, - Description: `The external IP address for this VPN gateway interface.`, - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeHaVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeHaVpnGatewayDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeHaVpnGatewayName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeHaVpnGatewayNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - vpnInterfacesProp, err := expandComputeHaVpnGatewayVpnInterfaces(d.Get("vpn_interfaces"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpn_interfaces"); !isEmptyValue(reflect.ValueOf(vpnInterfacesProp)) && (ok || !reflect.DeepEqual(v, vpnInterfacesProp)) { - obj["vpnInterfaces"] = vpnInterfacesProp - } - regionProp, err := expandComputeHaVpnGatewayRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new HaVpnGateway: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating HaVpnGateway: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating HaVpnGateway", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create HaVpnGateway: %s", err) - } - - log.Printf("[DEBUG] Finished creating HaVpnGateway %q: %#v", d.Id(), res) - - return resourceComputeHaVpnGatewayRead(d, meta) -} - -func resourceComputeHaVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeHaVpnGateway %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - - if err := d.Set("description", flattenComputeHaVpnGatewayDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("name", flattenComputeHaVpnGatewayName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("network", flattenComputeHaVpnGatewayNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("vpn_interfaces", flattenComputeHaVpnGatewayVpnInterfaces(res["vpnInterfaces"], d, config)); err != nil { - return fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("region", flattenComputeHaVpnGatewayRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - - return nil -} - -func resourceComputeHaVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting HaVpnGateway %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HaVpnGateway") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting HaVpnGateway", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting HaVpnGateway %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeHaVpnGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/vpnGateways/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeHaVpnGatewayDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHaVpnGatewayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHaVpnGatewayNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeHaVpnGatewayVpnInterfaces(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenComputeHaVpnGatewayVpnInterfacesId(original["id"], d, config), - "ip_address": flattenComputeHaVpnGatewayVpnInterfacesIpAddress(original["ipAddress"], d, config), - "interconnect_attachment": flattenComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(original["interconnectAttachment"], d, config), - }) - } - return transformed -} -func flattenComputeHaVpnGatewayVpnInterfacesId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHaVpnGatewayVpnInterfacesIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeHaVpnGatewayRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeHaVpnGatewayDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHaVpnGatewayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHaVpnGatewayNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeHaVpnGatewayVpnInterfaces(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandComputeHaVpnGatewayVpnInterfacesId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedIpAddress, err := expandComputeHaVpnGatewayVpnInterfacesIpAddress(original["ip_address"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddress"] = transformedIpAddress - } - - transformedInterconnectAttachment, err := expandComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(original["interconnect_attachment"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInterconnectAttachment); val.IsValid() && !isEmptyValue(val) { - transformed["interconnectAttachment"] = transformedInterconnectAttachment - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeHaVpnGatewayVpnInterfacesId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHaVpnGatewayVpnInterfacesIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("interconnectAttachments", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for interconnect_attachment: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeHaVpnGatewayRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_http_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_http_health_check.go deleted file mode 100644 index 8eb111c4e4..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_http_health_check.go +++ /dev/null @@ -1,618 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeHttpHealthCheck() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeHttpHealthCheckCreate, - Read: resourceComputeHttpHealthCheckRead, - Update: resourceComputeHttpHealthCheckUpdate, - Delete: resourceComputeHttpHealthCheckDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeHttpHealthCheckImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "check_interval_sec": { - Type: schema.TypeInt, - Optional: true, - Description: `How often (in seconds) to send a health check. The default value is 5 -seconds.`, - Default: 5, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "healthy_threshold": { - Type: schema.TypeInt, - Optional: true, - Description: `A so-far unhealthy instance will be marked healthy after this many -consecutive successes. The default value is 2.`, - Default: 2, - }, - "host": { - Type: schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTP health check request. If -left empty (default value), the public IP on behalf of which this -health check is performed will be used.`, - }, - "port": { - Type: schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTP health check request. -The default value is 80.`, - Default: 80, - }, - "request_path": { - Type: schema.TypeString, - Optional: true, - Description: `The request path of the HTTP health check request. -The default value is /.`, - Default: "/", - }, - "timeout_sec": { - Type: schema.TypeInt, - Optional: true, - Description: `How long (in seconds) to wait before claiming failure. -The default value is 5 seconds. It is invalid for timeoutSec to have -greater value than checkIntervalSec.`, - Default: 5, - }, - "unhealthy_threshold": { - Type: schema.TypeInt, - Optional: true, - Description: `A so-far healthy instance will be marked unhealthy after this many -consecutive failures. The default value is 2.`, - Default: 2, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHttpHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(reflect.ValueOf(checkIntervalSecProp)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHttpHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHttpHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(reflect.ValueOf(healthyThresholdProp)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - hostProp, err := expandComputeHttpHealthCheckHost(d.Get("host"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host"); !isEmptyValue(reflect.ValueOf(hostProp)) && (ok || !reflect.DeepEqual(v, hostProp)) { - obj["host"] = hostProp - } - nameProp, err := expandComputeHttpHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandComputeHttpHealthCheckPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - requestPathProp, err := expandComputeHttpHealthCheckRequestPath(d.Get("request_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_path"); !isEmptyValue(reflect.ValueOf(requestPathProp)) && (ok || !reflect.DeepEqual(v, requestPathProp)) { - obj["requestPath"] = requestPathProp - } - timeoutSecProp, err := expandComputeHttpHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHttpHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(reflect.ValueOf(unhealthyThresholdProp)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new HttpHealthCheck: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating HttpHealthCheck: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating HttpHealthCheck", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create HttpHealthCheck: %s", err) - } - - log.Printf("[DEBUG] Finished creating HttpHealthCheck %q: %#v", d.Id(), res) - - return resourceComputeHttpHealthCheckRead(d, meta) -} - -func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeHttpHealthCheck %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - - if err := d.Set("check_interval_sec", flattenComputeHttpHealthCheckCheckIntervalSec(res["checkIntervalSec"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeHttpHealthCheckCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("description", flattenComputeHttpHealthCheckDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("healthy_threshold", flattenComputeHttpHealthCheckHealthyThreshold(res["healthyThreshold"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("host", flattenComputeHttpHealthCheckHost(res["host"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("name", flattenComputeHttpHealthCheckName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("port", flattenComputeHttpHealthCheckPort(res["port"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("request_path", flattenComputeHttpHealthCheckRequestPath(res["requestPath"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("timeout_sec", flattenComputeHttpHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("unhealthy_threshold", flattenComputeHttpHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - - return nil -} - -func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHttpHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHttpHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHttpHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - hostProp, err := expandComputeHttpHealthCheckHost(d.Get("host"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostProp)) { - obj["host"] = hostProp - } - nameProp, err := expandComputeHttpHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandComputeHttpHealthCheckPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - requestPathProp, err := expandComputeHttpHealthCheckRequestPath(d.Get("request_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_path"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requestPathProp)) { - obj["requestPath"] = requestPathProp - } - timeoutSecProp, err := expandComputeHttpHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHttpHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating HttpHealthCheck %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating HttpHealthCheck %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating HttpHealthCheck %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating HttpHealthCheck", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeHttpHealthCheckRead(d, meta) -} - -func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting HttpHealthCheck %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HttpHealthCheck") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting HttpHealthCheck", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting HttpHealthCheck %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeHttpHealthCheckImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/httpHealthChecks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeHttpHealthCheckCheckIntervalSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHttpHealthCheckCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckHealthyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHttpHealthCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHttpHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHttpHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandComputeHttpHealthCheckCheckIntervalSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckHealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckUnhealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_https_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_https_health_check.go deleted file mode 100644 index 4189fa0f5b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_https_health_check.go +++ /dev/null @@ -1,618 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeHttpsHealthCheck() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeHttpsHealthCheckCreate, - Read: resourceComputeHttpsHealthCheckRead, - Update: resourceComputeHttpsHealthCheckUpdate, - Delete: resourceComputeHttpsHealthCheckDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeHttpsHealthCheckImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "check_interval_sec": { - Type: schema.TypeInt, - Optional: true, - Description: `How often (in seconds) to send a health check. The default value is 5 -seconds.`, - Default: 5, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "healthy_threshold": { - Type: schema.TypeInt, - Optional: true, - Description: `A so-far unhealthy instance will be marked healthy after this many -consecutive successes. The default value is 2.`, - Default: 2, - }, - "host": { - Type: schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTPS health check request. If -left empty (default value), the public IP on behalf of which this -health check is performed will be used.`, - }, - "port": { - Type: schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTPS health check request. -The default value is 443.`, - Default: 443, - }, - "request_path": { - Type: schema.TypeString, - Optional: true, - Description: `The request path of the HTTPS health check request. -The default value is /.`, - Default: "/", - }, - "timeout_sec": { - Type: schema.TypeInt, - Optional: true, - Description: `How long (in seconds) to wait before claiming failure. -The default value is 5 seconds. It is invalid for timeoutSec to have -greater value than checkIntervalSec.`, - Default: 5, - }, - "unhealthy_threshold": { - Type: schema.TypeInt, - Optional: true, - Description: `A so-far healthy instance will be marked unhealthy after this many -consecutive failures. The default value is 2.`, - Default: 2, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHttpsHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(reflect.ValueOf(checkIntervalSecProp)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHttpsHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHttpsHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(reflect.ValueOf(healthyThresholdProp)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - hostProp, err := expandComputeHttpsHealthCheckHost(d.Get("host"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host"); !isEmptyValue(reflect.ValueOf(hostProp)) && (ok || !reflect.DeepEqual(v, hostProp)) { - obj["host"] = hostProp - } - nameProp, err := expandComputeHttpsHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandComputeHttpsHealthCheckPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - requestPathProp, err := expandComputeHttpsHealthCheckRequestPath(d.Get("request_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_path"); !isEmptyValue(reflect.ValueOf(requestPathProp)) && (ok || !reflect.DeepEqual(v, requestPathProp)) { - obj["requestPath"] = requestPathProp - } - timeoutSecProp, err := expandComputeHttpsHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHttpsHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(reflect.ValueOf(unhealthyThresholdProp)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new HttpsHealthCheck: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating HttpsHealthCheck: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating HttpsHealthCheck", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create HttpsHealthCheck: %s", err) - } - - log.Printf("[DEBUG] Finished creating HttpsHealthCheck %q: %#v", d.Id(), res) - - return resourceComputeHttpsHealthCheckRead(d, meta) -} - -func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeHttpsHealthCheck %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - - if err := d.Set("check_interval_sec", flattenComputeHttpsHealthCheckCheckIntervalSec(res["checkIntervalSec"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeHttpsHealthCheckCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("description", flattenComputeHttpsHealthCheckDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("healthy_threshold", flattenComputeHttpsHealthCheckHealthyThreshold(res["healthyThreshold"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("host", flattenComputeHttpsHealthCheckHost(res["host"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("name", flattenComputeHttpsHealthCheckName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("port", flattenComputeHttpsHealthCheckPort(res["port"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("request_path", flattenComputeHttpsHealthCheckRequestPath(res["requestPath"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("timeout_sec", flattenComputeHttpsHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("unhealthy_threshold", flattenComputeHttpsHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - - return nil -} - -func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHttpsHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHttpsHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHttpsHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - hostProp, err := expandComputeHttpsHealthCheckHost(d.Get("host"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostProp)) { - obj["host"] = hostProp - } - nameProp, err := expandComputeHttpsHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandComputeHttpsHealthCheckPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - requestPathProp, err := expandComputeHttpsHealthCheckRequestPath(d.Get("request_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_path"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requestPathProp)) { - obj["requestPath"] = requestPathProp - } - timeoutSecProp, err := expandComputeHttpsHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHttpsHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating HttpsHealthCheck %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating HttpsHealthCheck %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating HttpsHealthCheck %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating HttpsHealthCheck", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeHttpsHealthCheckRead(d, meta) -} - -func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting HttpsHealthCheck %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HttpsHealthCheck") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting HttpsHealthCheck", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting HttpsHealthCheck %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeHttpsHealthCheckImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/httpsHealthChecks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeHttpsHealthCheckCheckIntervalSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHttpsHealthCheckCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckHealthyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHttpsHealthCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHttpsHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeHttpsHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandComputeHttpsHealthCheckCheckIntervalSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckHealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckUnhealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_image.go deleted file mode 100644 index fdabcb4e86..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_image.go +++ /dev/null @@ -1,913 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeImage() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeImageCreate, - Read: resourceComputeImageRead, - Update: resourceComputeImageUpdate, - Delete: resourceComputeImageDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeImageImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "disk_size_gb": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Size of the image when restored onto a persistent disk (in GB).`, - }, - "family": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the image family to which this image belongs. You can -create disks by specifying an image family instead of a specific -image name. The image family always returns its latest image that is -not deprecated. The name of the image family must comply with -RFC1035.`, - }, - "guest_os_features": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - ForceNew: true, - Description: `A list of features to enable on the guest operating system. -Applicable only for bootable images.`, - Elem: computeImageGuestOsFeaturesSchema(), - // Default schema.HashSchema is used. - }, - "image_encryption_key": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Encrypts the image using a customer-supplied encryption key. - -After you encrypt an image with a customer-supplied key, you must -provide the same key if you use the image later (e.g. to create a -disk from the image)`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_self_link": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The self link of the encryption key that is stored in Google Cloud -KMS.`, - }, - "kms_key_service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account being used for the encryption request for the -given KMS key. If absent, the Compute Engine default service -account is used.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels to apply to this Image.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "licenses": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Any applicable license URI.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "raw_disk": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The parameters of the raw disk image.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full Google Cloud Storage URL where disk storage is stored -You must provide either this property or the sourceDisk property -but not both.`, - }, - "container_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"TAR", ""}), - Description: `The format used to encode and transmit the block device, which -should be TAR. This is just a container and transmission format -and not a runtime format. Provided by the client when the disk -image is created. Default value: "TAR" Possible values: ["TAR"]`, - Default: "TAR", - }, - "sha1": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional SHA1 checksum of the disk image before unpackaging. -This is provided by the client when the disk image is created.`, - }, - }, - }, - }, - "source_disk": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The source disk to create this image based on. -You must provide either this property or the -rawDisk.source property but not both to create an image.`, - }, - "source_image": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the source image used to create this image. In order to create an image, you must provide the full or partial -URL of one of the following: - -* The selfLink URL -* This property -* The rawDisk.source URL -* The sourceDisk URL`, - }, - "source_snapshot": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the source snapshot used to create this image. - -In order to create an image, you must provide the full or partial URL of one of the following: - -* The selfLink URL -* This property -* The sourceImage URL -* The rawDisk.source URL -* The sourceDisk URL`, - }, - "archive_size_bytes": { - Type: schema.TypeInt, - Computed: true, - Description: `Size of the image tar.gz archive stored in Google Cloud Storage (in -bytes).`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "label_fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `The fingerprint used for optimistic locking of this resource. Used -internally during updates.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeImageGuestOsFeaturesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC"}), - Description: `The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. Possible values: ["MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC"]`, - }, - }, - } -} - -func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeImageDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - diskSizeGbProp, err := expandComputeImageDiskSizeGb(d.Get("disk_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_size_gb"); !isEmptyValue(reflect.ValueOf(diskSizeGbProp)) && (ok || !reflect.DeepEqual(v, diskSizeGbProp)) { - obj["diskSizeGb"] = diskSizeGbProp - } - familyProp, err := expandComputeImageFamily(d.Get("family"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("family"); !isEmptyValue(reflect.ValueOf(familyProp)) && (ok || !reflect.DeepEqual(v, familyProp)) { - obj["family"] = familyProp - } - guestOsFeaturesProp, err := expandComputeImageGuestOsFeatures(d.Get("guest_os_features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("guest_os_features"); !isEmptyValue(reflect.ValueOf(guestOsFeaturesProp)) && (ok || !reflect.DeepEqual(v, guestOsFeaturesProp)) { - obj["guestOsFeatures"] = guestOsFeaturesProp - } - imageEncryptionKeyProp, err := expandComputeImageImageEncryptionKey(d.Get("image_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("image_encryption_key"); !isEmptyValue(reflect.ValueOf(imageEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, imageEncryptionKeyProp)) { - obj["imageEncryptionKey"] = imageEncryptionKeyProp - } - labelsProp, err := expandComputeImageLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - labelFingerprintProp, err := expandComputeImageLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - licensesProp, err := expandComputeImageLicenses(d.Get("licenses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("licenses"); !isEmptyValue(reflect.ValueOf(licensesProp)) && (ok || !reflect.DeepEqual(v, licensesProp)) { - obj["licenses"] = licensesProp - } - nameProp, err := expandComputeImageName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - rawDiskProp, err := expandComputeImageRawDisk(d.Get("raw_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("raw_disk"); !isEmptyValue(reflect.ValueOf(rawDiskProp)) && (ok || !reflect.DeepEqual(v, rawDiskProp)) { - obj["rawDisk"] = rawDiskProp - } - sourceDiskProp, err := expandComputeImageSourceDisk(d.Get("source_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { - obj["sourceDisk"] = sourceDiskProp - } - sourceImageProp, err := expandComputeImageSourceImage(d.Get("source_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_image"); !isEmptyValue(reflect.ValueOf(sourceImageProp)) && (ok || !reflect.DeepEqual(v, sourceImageProp)) { - obj["sourceImage"] = sourceImageProp - } - sourceSnapshotProp, err := expandComputeImageSourceSnapshot(d.Get("source_snapshot"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_snapshot"); !isEmptyValue(reflect.ValueOf(sourceSnapshotProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) { - obj["sourceSnapshot"] = sourceSnapshotProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Image: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Image: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Image: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/images/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Image", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Image: %s", err) - } - - log.Printf("[DEBUG] Finished creating Image %q: %#v", d.Id(), res) - - return resourceComputeImageRead(d, meta) -} - -func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Image: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeImage %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - - if err := d.Set("archive_size_bytes", flattenComputeImageArchiveSizeBytes(res["archiveSizeBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeImageCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("description", flattenComputeImageDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("disk_size_gb", flattenComputeImageDiskSizeGb(res["diskSizeGb"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("family", flattenComputeImageFamily(res["family"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("guest_os_features", flattenComputeImageGuestOsFeatures(res["guestOsFeatures"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("image_encryption_key", flattenComputeImageImageEncryptionKey(res["imageEncryptionKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("labels", flattenComputeImageLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("label_fingerprint", flattenComputeImageLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("licenses", flattenComputeImageLicenses(res["licenses"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("name", flattenComputeImageName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("source_disk", flattenComputeImageSourceDisk(res["sourceDisk"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("source_image", flattenComputeImageSourceImage(res["sourceImage"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("source_snapshot", flattenComputeImageSourceSnapshot(res["sourceSnapshot"], d, config)); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Image: %s", err) - } - - return nil -} - -func resourceComputeImageUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Image: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("labels") || d.HasChange("label_fingerprint") { - obj := make(map[string]interface{}) - - labelsProp, err := expandComputeImageLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - labelFingerprintProp, err := expandComputeImageLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}/setLabels") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Image %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Image %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Image", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeImageRead(d, meta) -} - -func resourceComputeImageDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Image: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Image %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Image") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Image", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Image %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeImageImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/images/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/images/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeImageArchiveSizeBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeImageCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageDiskSizeGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeImageFamily(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageGuestOsFeatures(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(computeImageGuestOsFeaturesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "type": flattenComputeImageGuestOsFeaturesType(original["type"], d, config), - }) - } - return transformed -} -func flattenComputeImageGuestOsFeaturesType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageImageEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_self_link"] = - flattenComputeImageImageEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["kms_key_service_account"] = - flattenComputeImageImageEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} -func flattenComputeImageImageEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - vStr := v.(string) - return strings.Split(vStr, "/cryptoKeyVersions/")[0] -} - -func flattenComputeImageImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageLabelFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageLicenses(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeImageName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageSourceDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeImageSourceImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeImageSourceSnapshot(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeImageDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageFamily(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageGuestOsFeatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandComputeImageGuestOsFeaturesType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeImageGuestOsFeaturesType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageImageEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeySelfLink, err := expandComputeImageImageEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedKmsKeyServiceAccount, err := expandComputeImageImageEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeImageImageEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeImageLabelFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageLicenses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, fmt.Errorf("Invalid value for licenses: nil") - } - f, err := parseGlobalFieldValue("licenses", raw.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for licenses: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeImageName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageRawDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContainerType, err := expandComputeImageRawDiskContainerType(original["container_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContainerType); val.IsValid() && !isEmptyValue(val) { - transformed["containerType"] = transformedContainerType - } - - transformedSha1, err := expandComputeImageRawDiskSha1(original["sha1"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSha1); val.IsValid() && !isEmptyValue(val) { - transformed["sha1Checksum"] = transformedSha1 - } - - transformedSource, err := expandComputeImageRawDiskSource(original["source"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !isEmptyValue(val) { - transformed["source"] = transformedSource - } - - return transformed, nil -} - -func expandComputeImageRawDiskContainerType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageRawDiskSha1(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageRawDiskSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageSourceDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("disks", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for source_disk: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeImageSourceImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("images", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for source_image: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeImageSourceSnapshot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for source_snapshot: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_from_machine_image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_from_machine_image.go deleted file mode 100644 index 93cfad7a2a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_from_machine_image.go +++ /dev/null @@ -1,3 +0,0 @@ -package google - -// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_named_port.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_named_port.go deleted file mode 100644 index 5efa36b3aa..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_named_port.go +++ /dev/null @@ -1,497 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeInstanceGroupNamedPort() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeInstanceGroupNamedPortCreate, - Read: resourceComputeInstanceGroupNamedPortRead, - Delete: resourceComputeInstanceGroupNamedPortDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeInstanceGroupNamedPortImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "group": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The name of the instance group.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name for this named port. The name must be 1-63 characters -long, and comply with RFC1035.`, - }, - "port": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The port number, which can be a value between 1 and 65535.`, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The zone of the instance group.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeInstanceGroupNamedPortCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeInstanceGroupNamedPortName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandNestedComputeInstanceGroupNamedPortPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - - obj, err = resourceComputeInstanceGroupNamedPortEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new InstanceGroupNamedPort: %#v", obj) - - obj, err = resourceComputeInstanceGroupNamedPortPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating InstanceGroupNamedPort: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating InstanceGroupNamedPort", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create InstanceGroupNamedPort: %s", err) - } - - log.Printf("[DEBUG] Finished creating InstanceGroupNamedPort %q: %#v", d.Id(), res) - - return resourceComputeInstanceGroupNamedPortRead(d, meta) -} - -func resourceComputeInstanceGroupNamedPortRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeInstanceGroupNamedPort %q", d.Id())) - } - - res, err = flattenNestedComputeInstanceGroupNamedPort(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeInstanceGroupNamedPort because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) - } - - if err := d.Set("name", flattenNestedComputeInstanceGroupNamedPortName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) - } - if err := d.Set("port", flattenNestedComputeInstanceGroupNamedPortPort(res["port"], d, config)); err != nil { - return fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) - } - - return nil -} - -func resourceComputeInstanceGroupNamedPortDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceComputeInstanceGroupNamedPortPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "InstanceGroupNamedPort") - } - log.Printf("[DEBUG] Deleting InstanceGroupNamedPort %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InstanceGroupNamedPort") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting InstanceGroupNamedPort", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting InstanceGroupNamedPort %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeInstanceGroupNamedPortImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroups/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputeInstanceGroupNamedPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeInstanceGroupNamedPortPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandNestedComputeInstanceGroupNamedPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeInstanceGroupNamedPortPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeInstanceGroupNamedPortEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - ig, err := ParseInstanceGroupFieldValue(d.Get("group").(string), d, config) - if err != nil { - return nil, err - } - - if err := d.Set("group", ig.Name); err != nil { - return nil, fmt.Errorf("Error setting group: %s", err) - } - if err := d.Set("zone", ig.Zone); err != nil { - return nil, fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("project", ig.Project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - - return obj, nil -} - -func flattenNestedComputeInstanceGroupNamedPort(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["namedPorts"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value namedPorts. Actual value: %v", v) - } - - _, item, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedPort, err := expandNestedComputeInstanceGroupNamedPortPort(d.Get("port"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedPort := flattenNestedComputeInstanceGroupNamedPortPort(expectedPort, d, meta.(*Config)) - expectedName, err := expandNestedComputeInstanceGroupNamedPortName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeInstanceGroupNamedPortName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemPort := flattenNestedComputeInstanceGroupNamedPortPort(item["port"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemPort)) && isEmptyValue(reflect.ValueOf(expectedFlattenedPort))) && !reflect.DeepEqual(itemPort, expectedFlattenedPort) { - log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) - continue - } - itemName := flattenNestedComputeInstanceGroupNamedPortName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -// PatchCreateEncoder handles creating request data to PATCH parent resource -// with list including new object. -func resourceComputeInstanceGroupNamedPortPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeInstanceGroupNamedPortListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - // Return error if item already created. - if found != nil { - return nil, fmt.Errorf("Unable to create InstanceGroupNamedPort, existing object already found: %+v", found) - } - - // Return list with the resource to create appended - res := map[string]interface{}{ - "namedPorts": append(currItems, obj), - } - - return res, nil -} - -// PatchDeleteEncoder handles creating request data to PATCH parent resource -// with list excluding object to delete. -func resourceComputeInstanceGroupNamedPortPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeInstanceGroupNamedPortListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - // Spoof 404 error for proper handling by Delete (i.e. no-op) - return nil, fake404("nested", "ComputeInstanceGroupNamedPort") - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "namedPorts": updatedItems, - } - - return res, nil -} - -// ListForPatch handles making API request to get parent resource and -// extracting list of objects. -func resourceComputeInstanceGroupNamedPortListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") - if err != nil { - return nil, err - } - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - - v, ok = res["namedPorts"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, fmt.Errorf(`expected list for nested field "namedPorts"`) - } - return ls, nil - } - return nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_interconnect_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_interconnect_attachment.go deleted file mode 100644 index 4a0d76a12b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_interconnect_attachment.go +++ /dev/null @@ -1,904 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// waitForAttachmentToBeProvisioned waits for an attachment to leave the -// "UNPROVISIONED" state, to indicate that it's either ready or awaiting partner -// activity. -func waitForAttachmentToBeProvisioned(d *schema.ResourceData, config *Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { - if err := resourceComputeInterconnectAttachmentRead(d, config); err != nil { - return resource.NonRetryableError(err) - } - - name := d.Get("name").(string) - state := d.Get("state").(string) - if state == "UNPROVISIONED" { - return resource.RetryableError(fmt.Errorf("InterconnectAttachment %q has state %q.", name, state)) - } - log.Printf("InterconnectAttachment %q has state %q.", name, state) - return nil - }) -} - -func ResourceComputeInterconnectAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeInterconnectAttachmentCreate, - Read: resourceComputeInterconnectAttachmentRead, - Update: resourceComputeInterconnectAttachmentUpdate, - Delete: resourceComputeInterconnectAttachmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeInterconnectAttachmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z]([-a-z0-9]*[a-z0-9])?$`), - Description: `Name of the resource. Provided by the client when the resource is created. The -name must be 1-63 characters long, and comply with RFC1035. Specifically, the -name must be 1-63 characters long and match the regular expression -'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a -lowercase letter, and all following characters must be a dash, lowercase -letter, or digit, except the last character, which cannot be a dash.`, - }, - "router": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the cloud router to be used for dynamic routing. This router must be in -the same region as this InterconnectAttachment. The InterconnectAttachment will -automatically connect the Interconnect to the network & region within which the -Cloud Router is configured.`, - }, - "admin_enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether the VLAN attachment is enabled or disabled. When using -PARTNER type this will Pre-Activate the interconnect attachment`, - Default: true, - }, - "bandwidth": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"BPS_50M", "BPS_100M", "BPS_200M", "BPS_300M", "BPS_400M", "BPS_500M", "BPS_1G", "BPS_2G", "BPS_5G", "BPS_10G", "BPS_20G", "BPS_50G", ""}), - Description: `Provisioned bandwidth capacity for the interconnect attachment. -For attachments of type DEDICATED, the user can set the bandwidth. -For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. -Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, -Defaults to BPS_10G Possible values: ["BPS_50M", "BPS_100M", "BPS_200M", "BPS_300M", "BPS_400M", "BPS_500M", "BPS_1G", "BPS_2G", "BPS_5G", "BPS_10G", "BPS_20G", "BPS_50G"]`, - }, - "candidate_subnets": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Up to 16 candidate prefixes that can be used to restrict the allocation -of cloudRouterIpAddress and customerRouterIpAddress for this attachment. -All prefixes must be within link-local address space (169.254.0.0/16) -and must be /29 or shorter (/28, /27, etc). Google will attempt to select -an unused /29 from the supplied candidate prefix(es). The request will -fail if all possible /29s are in use on Google's edge. If not supplied, -Google will randomly select an unused /29 from all of link-local space.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "edge_availability_domain": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Desired availability domain for the attachment. Only available for type -PARTNER, at creation time. For improved reliability, customers should -configure a pair of attachments with one per availability domain. The -selected availability domain will be provided to the Partner via the -pairing key so that the provisioned circuit will lie in the specified -domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.`, - }, - "encryption": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"NONE", "IPSEC", ""}), - Description: `Indicates the user-supplied encryption option of this interconnect -attachment. Can only be specified at attachment creation for PARTNER or -DEDICATED attachments. - -* NONE - This is the default value, which means that the VLAN attachment -carries unencrypted traffic. VMs are able to send traffic to, or receive -traffic from, such a VLAN attachment. - -* IPSEC - The VLAN attachment carries only encrypted traffic that is -encrypted by an IPsec device, such as an HA VPN gateway or third-party -IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, -such a VLAN attachment. To use HA VPN over Cloud Interconnect, the VLAN -attachment must be created with this option. Default value: "NONE" Possible values: ["NONE", "IPSEC"]`, - Default: "NONE", - }, - "interconnect": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the underlying Interconnect object that this attachment's -traffic will traverse through. Required if type is DEDICATED, must not -be set if type is PARTNER.`, - }, - "ipsec_internal_addresses": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `URL of addresses that have been reserved for the interconnect attachment, -Used only for interconnect attachment that has the encryption option as -IPSEC. - -The addresses must be RFC 1918 IP address ranges. When creating HA VPN -gateway over the interconnect attachment, if the attachment is configured -to use an RFC 1918 IP address, then the VPN gateway's IP address will be -allocated from the IP address range specified here. - -For example, if the HA VPN gateway's interface 0 is paired to this -interconnect attachment, then an RFC 1918 IP address for the VPN gateway -interface 0 will be allocated from the IP address specified for this -interconnect attachment. - -If this field is not specified for interconnect attachment that has -encryption option as IPSEC, later on when creating HA VPN gateway on this -interconnect attachment, the HA VPN gateway's IP address will be -allocated from regional external IP address pool.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "mtu": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Maximum Transmission Unit (MTU), in bytes, of packets passing through -this interconnect attachment. Currently, only 1440 and 1500 are allowed. If not specified, the value will default to 1440.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the regional interconnect attachment resides.`, - }, - "type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"DEDICATED", "PARTNER", "PARTNER_PROVIDER", ""}), - Description: `The type of InterconnectAttachment you wish to create. Defaults to -DEDICATED. Possible values: ["DEDICATED", "PARTNER", "PARTNER_PROVIDER"]`, - }, - "vlan_tag8021q": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When -using PARTNER type this will be managed upstream.`, - }, - "cloud_router_ip_address": { - Type: schema.TypeString, - Computed: true, - Description: `IPv4 address + prefix length to be configured on Cloud Router -Interface for this interconnect attachment.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "customer_router_ip_address": { - Type: schema.TypeString, - Computed: true, - Description: `IPv4 address + prefix length to be configured on the customer -router subinterface for this interconnect attachment.`, - }, - "google_reference_id": { - Type: schema.TypeString, - Computed: true, - Description: `Google reference ID, to be used when raising support tickets with -Google or otherwise to debug backend connectivity issues.`, - }, - "pairing_key": { - Type: schema.TypeString, - Computed: true, - Description: `[Output only for type PARTNER. Not present for DEDICATED]. The opaque -identifier of an PARTNER attachment used to initiate provisioning with -a selected partner. Of the form "XXXXX/region/domain"`, - }, - "partner_asn": { - Type: schema.TypeString, - Computed: true, - Description: `[Output only for type PARTNER. Not present for DEDICATED]. Optional -BGP ASN for the router that should be supplied by a layer 3 Partner if -they configured BGP on behalf of the customer.`, - }, - "private_interconnect_info": { - Type: schema.TypeList, - Computed: true, - Description: `Information specific to an InterconnectAttachment. This property -is populated if the interconnect that this is attached to is of type DEDICATED.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "tag8021q": { - Type: schema.TypeInt, - Computed: true, - Description: `802.1q encapsulation tag to be used for traffic between -Google and the customer, going to and from this network and region.`, - }, - }, - }, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `[Output Only] The current state of this attachment's functionality.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeInterconnectAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - adminEnabledProp, err := expandComputeInterconnectAttachmentAdminEnabled(d.Get("admin_enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admin_enabled"); ok || !reflect.DeepEqual(v, adminEnabledProp) { - obj["adminEnabled"] = adminEnabledProp - } - interconnectProp, err := expandComputeInterconnectAttachmentInterconnect(d.Get("interconnect"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("interconnect"); !isEmptyValue(reflect.ValueOf(interconnectProp)) && (ok || !reflect.DeepEqual(v, interconnectProp)) { - obj["interconnect"] = interconnectProp - } - descriptionProp, err := expandComputeInterconnectAttachmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - mtuProp, err := expandComputeInterconnectAttachmentMtu(d.Get("mtu"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mtu"); !isEmptyValue(reflect.ValueOf(mtuProp)) && (ok || !reflect.DeepEqual(v, mtuProp)) { - obj["mtu"] = mtuProp - } - bandwidthProp, err := expandComputeInterconnectAttachmentBandwidth(d.Get("bandwidth"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bandwidth"); !isEmptyValue(reflect.ValueOf(bandwidthProp)) && (ok || !reflect.DeepEqual(v, bandwidthProp)) { - obj["bandwidth"] = bandwidthProp - } - edgeAvailabilityDomainProp, err := expandComputeInterconnectAttachmentEdgeAvailabilityDomain(d.Get("edge_availability_domain"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("edge_availability_domain"); !isEmptyValue(reflect.ValueOf(edgeAvailabilityDomainProp)) && (ok || !reflect.DeepEqual(v, edgeAvailabilityDomainProp)) { - obj["edgeAvailabilityDomain"] = edgeAvailabilityDomainProp - } - typeProp, err := expandComputeInterconnectAttachmentType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - routerProp, err := expandComputeInterconnectAttachmentRouter(d.Get("router"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("router"); !isEmptyValue(reflect.ValueOf(routerProp)) && (ok || !reflect.DeepEqual(v, routerProp)) { - obj["router"] = routerProp - } - nameProp, err := expandComputeInterconnectAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - candidateSubnetsProp, err := expandComputeInterconnectAttachmentCandidateSubnets(d.Get("candidate_subnets"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("candidate_subnets"); !isEmptyValue(reflect.ValueOf(candidateSubnetsProp)) && (ok || !reflect.DeepEqual(v, candidateSubnetsProp)) { - obj["candidateSubnets"] = candidateSubnetsProp - } - vlanTag8021qProp, err := expandComputeInterconnectAttachmentVlanTag8021q(d.Get("vlan_tag8021q"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vlan_tag8021q"); !isEmptyValue(reflect.ValueOf(vlanTag8021qProp)) && (ok || !reflect.DeepEqual(v, vlanTag8021qProp)) { - obj["vlanTag8021q"] = vlanTag8021qProp - } - ipsecInternalAddressesProp, err := expandComputeInterconnectAttachmentIpsecInternalAddresses(d.Get("ipsec_internal_addresses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ipsec_internal_addresses"); !isEmptyValue(reflect.ValueOf(ipsecInternalAddressesProp)) && (ok || !reflect.DeepEqual(v, ipsecInternalAddressesProp)) { - obj["ipsecInternalAddresses"] = ipsecInternalAddressesProp - } - encryptionProp, err := expandComputeInterconnectAttachmentEncryption(d.Get("encryption"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption"); !isEmptyValue(reflect.ValueOf(encryptionProp)) && (ok || !reflect.DeepEqual(v, encryptionProp)) { - obj["encryption"] = encryptionProp - } - regionProp, err := expandComputeInterconnectAttachmentRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new InterconnectAttachment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating InterconnectAttachment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating InterconnectAttachment", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create InterconnectAttachment: %s", err) - } - - if err := waitForAttachmentToBeProvisioned(d, config, d.Timeout(schema.TimeoutCreate)); err != nil { - return fmt.Errorf("Error waiting for InterconnectAttachment %q to be provisioned: %q", d.Get("name").(string), err) - } - - log.Printf("[DEBUG] Finished creating InterconnectAttachment %q: %#v", d.Id(), res) - - return resourceComputeInterconnectAttachmentRead(d, meta) -} - -func resourceComputeInterconnectAttachmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeInterconnectAttachment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - - if err := d.Set("admin_enabled", flattenComputeInterconnectAttachmentAdminEnabled(res["adminEnabled"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("cloud_router_ip_address", flattenComputeInterconnectAttachmentCloudRouterIpAddress(res["cloudRouterIpAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("customer_router_ip_address", flattenComputeInterconnectAttachmentCustomerRouterIpAddress(res["customerRouterIpAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("interconnect", flattenComputeInterconnectAttachmentInterconnect(res["interconnect"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("description", flattenComputeInterconnectAttachmentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("mtu", flattenComputeInterconnectAttachmentMtu(res["mtu"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("bandwidth", flattenComputeInterconnectAttachmentBandwidth(res["bandwidth"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("edge_availability_domain", flattenComputeInterconnectAttachmentEdgeAvailabilityDomain(res["edgeAvailabilityDomain"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("pairing_key", flattenComputeInterconnectAttachmentPairingKey(res["pairingKey"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("partner_asn", flattenComputeInterconnectAttachmentPartnerAsn(res["partnerAsn"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("private_interconnect_info", flattenComputeInterconnectAttachmentPrivateInterconnectInfo(res["privateInterconnectInfo"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("type", flattenComputeInterconnectAttachmentType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("state", flattenComputeInterconnectAttachmentState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("google_reference_id", flattenComputeInterconnectAttachmentGoogleReferenceId(res["googleReferenceId"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("router", flattenComputeInterconnectAttachmentRouter(res["router"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeInterconnectAttachmentCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("name", flattenComputeInterconnectAttachmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("vlan_tag8021q", flattenComputeInterconnectAttachmentVlanTag8021q(res["vlanTag8021q"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("ipsec_internal_addresses", flattenComputeInterconnectAttachmentIpsecInternalAddresses(res["ipsecInternalAddresses"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("encryption", flattenComputeInterconnectAttachmentEncryption(res["encryption"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("region", flattenComputeInterconnectAttachmentRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - - return nil -} - -func resourceComputeInterconnectAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - adminEnabledProp, err := expandComputeInterconnectAttachmentAdminEnabled(d.Get("admin_enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admin_enabled"); ok || !reflect.DeepEqual(v, adminEnabledProp) { - obj["adminEnabled"] = adminEnabledProp - } - descriptionProp, err := expandComputeInterconnectAttachmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - mtuProp, err := expandComputeInterconnectAttachmentMtu(d.Get("mtu"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mtu"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mtuProp)) { - obj["mtu"] = mtuProp - } - bandwidthProp, err := expandComputeInterconnectAttachmentBandwidth(d.Get("bandwidth"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bandwidth"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bandwidthProp)) { - obj["bandwidth"] = bandwidthProp - } - regionProp, err := expandComputeInterconnectAttachmentRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating InterconnectAttachment %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating InterconnectAttachment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating InterconnectAttachment %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating InterconnectAttachment", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeInterconnectAttachmentRead(d, meta) -} - -func resourceComputeInterconnectAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - if err := waitForAttachmentToBeProvisioned(d, config, d.Timeout(schema.TimeoutCreate)); err != nil { - return fmt.Errorf("Error waiting for InterconnectAttachment %q to be provisioned: %q", d.Get("name").(string), err) - } - log.Printf("[DEBUG] Deleting InterconnectAttachment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InterconnectAttachment") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting InterconnectAttachment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting InterconnectAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeInterconnectAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/interconnectAttachments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeInterconnectAttachmentAdminEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentCloudRouterIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentCustomerRouterIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentInterconnect(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentMtu(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles int given in float64 format - if floatVal, ok := v.(float64); ok { - return fmt.Sprintf("%d", int(floatVal)) - } - return v -} - -func flattenComputeInterconnectAttachmentBandwidth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentEdgeAvailabilityDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentPairingKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentPartnerAsn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentPrivateInterconnectInfo(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["tag8021q"] = - flattenComputeInterconnectAttachmentPrivateInterconnectInfoTag8021q(original["tag8021q"], d, config) - return []interface{}{transformed} -} -func flattenComputeInterconnectAttachmentPrivateInterconnectInfoTag8021q(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeInterconnectAttachmentType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentGoogleReferenceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentRouter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeInterconnectAttachmentCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentVlanTag8021q(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeInterconnectAttachmentIpsecInternalAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeInterconnectAttachmentEncryption(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { - return "NONE" - } - - return v -} - -func flattenComputeInterconnectAttachmentRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeInterconnectAttachmentAdminEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentInterconnect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentMtu(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentBandwidth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentEdgeAvailabilityDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentRouter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("routers", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for router: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeInterconnectAttachmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentCandidateSubnets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentVlanTag8021q(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentIpsecInternalAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, fmt.Errorf("Invalid value for ipsec_internal_addresses: nil") - } - f, err := parseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for ipsec_internal_addresses: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeInterconnectAttachmentEncryption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_managed_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_managed_ssl_certificate.go deleted file mode 100644 index 2da2d7fef1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_managed_ssl_certificate.go +++ /dev/null @@ -1,435 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeManagedSslCertificate() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeManagedSslCertificateCreate, - Read: resourceComputeManagedSslCertificateRead, - Delete: resourceComputeManagedSslCertificateDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeManagedSslCertificateImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "managed": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Properties relevant to a managed certificate. These will be used if the -certificate is managed (as indicated by a value of 'MANAGED' in 'type').`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "domains": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - DiffSuppressFunc: absoluteDomainSuppress, - Description: `Domains for which a managed SSL certificate will be valid. Currently, -there can be up to 100 domains in this list.`, - MaxItems: 100, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash. - - -These are in the same namespace as the managed SSL certificates.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"MANAGED", ""}), - Description: `Enum field whose value is always 'MANAGED' - used to signal to the API -which type this is. Default value: "MANAGED" Possible values: ["MANAGED"]`, - Default: "MANAGED", - }, - "certificate_id": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The unique identifier for the resource.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "expire_time": { - Type: schema.TypeString, - Computed: true, - Description: `Expire time of the certificate in RFC3339 text format.`, - }, - "subject_alternative_names": { - Type: schema.TypeList, - Computed: true, - Description: `Domains associated with the certificate via Subject Alternative Name.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeManagedSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeManagedSslCertificateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeManagedSslCertificateName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - managedProp, err := expandComputeManagedSslCertificateManaged(d.Get("managed"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("managed"); !isEmptyValue(reflect.ValueOf(managedProp)) && (ok || !reflect.DeepEqual(v, managedProp)) { - obj["managed"] = managedProp - } - typeProp, err := expandComputeManagedSslCertificateType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ManagedSslCertificate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ManagedSslCertificate: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating ManagedSslCertificate", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create ManagedSslCertificate: %s", err) - } - - log.Printf("[DEBUG] Finished creating ManagedSslCertificate %q: %#v", d.Id(), res) - - return resourceComputeManagedSslCertificateRead(d, meta) -} - -func resourceComputeManagedSslCertificateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeManagedSslCertificate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeManagedSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("description", flattenComputeManagedSslCertificateDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("certificate_id", flattenComputeManagedSslCertificateCertificateId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("name", flattenComputeManagedSslCertificateName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("managed", flattenComputeManagedSslCertificateManaged(res["managed"], d, config)); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("type", flattenComputeManagedSslCertificateType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("subject_alternative_names", flattenComputeManagedSslCertificateSubjectAlternativeNames(res["subjectAlternativeNames"], d, config)); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("expire_time", flattenComputeManagedSslCertificateExpireTime(res["expireTime"], d, config)); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - - return nil -} - -func resourceComputeManagedSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ManagedSslCertificate %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ManagedSslCertificate") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting ManagedSslCertificate", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting ManagedSslCertificate %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeManagedSslCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/sslCertificates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeManagedSslCertificateCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateCertificateId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeManagedSslCertificateName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateManaged(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["domains"] = - flattenComputeManagedSslCertificateManagedDomains(original["domains"], d, config) - return []interface{}{transformed} -} -func flattenComputeManagedSslCertificateManagedDomains(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateSubjectAlternativeNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateExpireTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeManagedSslCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeManagedSslCertificateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeManagedSslCertificateManaged(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDomains, err := expandComputeManagedSslCertificateManagedDomains(original["domains"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !isEmptyValue(val) { - transformed["domains"] = transformedDomains - } - - return transformed, nil -} - -func expandComputeManagedSslCertificateManagedDomains(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeManagedSslCertificateType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network.go deleted file mode 100644 index 535fdcbdf0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network.go +++ /dev/null @@ -1,581 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/googleapi" -) - -func ResourceComputeNetwork() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeNetworkCreate, - Read: resourceComputeNetworkRead, - Update: resourceComputeNetworkUpdate, - Delete: resourceComputeNetworkDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeNetworkImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "auto_create_subnetworks": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `When set to 'true', the network is created in "auto subnet mode" and -it will create a subnet for each region automatically across the -'10.128.0.0/9' address range. - -When set to 'false', the network is created in "custom subnet mode" so -the user can explicitly connect subnetwork resources.`, - Default: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. The resource must be -recreated to modify this field.`, - }, - "enable_ula_internal_ipv6": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Enable ULA internal ipv6 on this network. Enabling this feature will assign -a /48 from google defined ULA prefix fd20::/20.`, - }, - "internal_ipv6_range": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `When enabling ula internal ipv6, caller optionally can specify the /48 range -they want from the google defined ULA prefix fd20::/20. The input must be a -valid /48 ULA IPv6 address and must be within the fd20::/20. Operation will -fail if the speficied /48 is already in used by another resource. -If the field is not speficied, then a /48 range will be randomly allocated from fd20::/20 and returned via this field.`, - }, - "mtu": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Maximum Transmission Unit in bytes. The default value is 1460 bytes. -The minimum value for this field is 1300 and the maximum value is 8896 bytes (jumbo frames). -Note that packets larger than 1500 bytes (standard Ethernet) can be subject to TCP-MSS clamping or dropped -with an ICMP 'Fragmentation-Needed' message if the packets are routed to the Internet or other VPCs -with varying MTUs.`, - }, - "routing_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"REGIONAL", "GLOBAL", ""}), - Description: `The network-wide routing mode to use. If set to 'REGIONAL', this -network's cloud routers will only advertise routes with subnetworks -of this network in the same region as the router. If set to 'GLOBAL', -this network's cloud routers will advertise routes with all -subnetworks of this network, across regions. Possible values: ["REGIONAL", "GLOBAL"]`, - }, - - "gateway_ipv4": { - Type: schema.TypeString, - Computed: true, - Description: `The gateway address for default routing out of the network. This value -is selected by GCP.`, - }, - "delete_default_routes_on_create": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `If set to 'true', default routes ('0.0.0.0/0') will be deleted -immediately after network creation. Defaults to 'false'.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeNetworkDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeNetworkName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - autoCreateSubnetworksProp, err := expandComputeNetworkAutoCreateSubnetworks(d.Get("auto_create_subnetworks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auto_create_subnetworks"); ok || !reflect.DeepEqual(v, autoCreateSubnetworksProp) { - obj["autoCreateSubnetworks"] = autoCreateSubnetworksProp - } - routingConfigProp, err := expandComputeNetworkRoutingConfig(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(routingConfigProp)) { - obj["routingConfig"] = routingConfigProp - } - mtuProp, err := expandComputeNetworkMtu(d.Get("mtu"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mtu"); !isEmptyValue(reflect.ValueOf(mtuProp)) && (ok || !reflect.DeepEqual(v, mtuProp)) { - obj["mtu"] = mtuProp - } - enableUlaInternalIpv6Prop, err := expandComputeNetworkEnableUlaInternalIpv6(d.Get("enable_ula_internal_ipv6"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_ula_internal_ipv6"); !isEmptyValue(reflect.ValueOf(enableUlaInternalIpv6Prop)) && (ok || !reflect.DeepEqual(v, enableUlaInternalIpv6Prop)) { - obj["enableUlaInternalIpv6"] = enableUlaInternalIpv6Prop - } - internalIpv6RangeProp, err := expandComputeNetworkInternalIpv6Range(d.Get("internal_ipv6_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("internal_ipv6_range"); !isEmptyValue(reflect.ValueOf(internalIpv6RangeProp)) && (ok || !reflect.DeepEqual(v, internalIpv6RangeProp)) { - obj["internalIpv6Range"] = internalIpv6RangeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Network: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Network: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Network: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Network", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Network: %s", err) - } - - if d.Get("delete_default_routes_on_create").(bool) { - token := "" - for paginate := true; paginate; { - network, err := config.NewComputeClient(userAgent).Networks.Get(project, d.Get("name").(string)).Do() - if err != nil { - return fmt.Errorf("Error finding network in proj: %s", err) - } - filter := fmt.Sprintf("(network=\"%s\") AND (destRange=\"0.0.0.0/0\")", network.SelfLink) - log.Printf("[DEBUG] Getting routes for network %q with filter '%q'", d.Get("name").(string), filter) - resp, err := config.NewComputeClient(userAgent).Routes.List(project).Filter(filter).Do() - if err != nil { - return fmt.Errorf("Error listing routes in proj: %s", err) - } - - log.Printf("[DEBUG] Found %d routes rules in %q network", len(resp.Items), d.Get("name").(string)) - - for _, route := range resp.Items { - op, err := config.NewComputeClient(userAgent).Routes.Delete(project, route.Name).Do() - if err != nil { - return fmt.Errorf("Error deleting route: %s", err) - } - err = ComputeOperationWaitTime(config, op, project, "Deleting Route", userAgent, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return err - } - } - - token = resp.NextPageToken - paginate = token != "" - } - } - - log.Printf("[DEBUG] Finished creating Network %q: %#v", d.Id(), res) - - return resourceComputeNetworkRead(d, meta) -} - -func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Network: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeNetwork %q", d.Id())) - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("delete_default_routes_on_create"); !ok { - if err := d.Set("delete_default_routes_on_create", false); err != nil { - return fmt.Errorf("Error setting delete_default_routes_on_create: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - - if err := d.Set("description", flattenComputeNetworkDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("gateway_ipv4", flattenComputeNetworkGatewayIpv4(res["gatewayIPv4"], d, config)); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("name", flattenComputeNetworkName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("auto_create_subnetworks", flattenComputeNetworkAutoCreateSubnetworks(res["autoCreateSubnetworks"], d, config)); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - // Terraform must set the top level schema field, but since this object contains collapsed properties - // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. - if flattenedProp := flattenComputeNetworkRoutingConfig(res["routingConfig"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*googleapi.Error); ok { - return fmt.Errorf("Error reading Network: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("mtu", flattenComputeNetworkMtu(res["mtu"], d, config)); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("enable_ula_internal_ipv6", flattenComputeNetworkEnableUlaInternalIpv6(res["enableUlaInternalIpv6"], d, config)); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("internal_ipv6_range", flattenComputeNetworkInternalIpv6Range(res["internalIpv6Range"], d, config)); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Network: %s", err) - } - - return nil -} - -func resourceComputeNetworkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Network: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("routing_mode") { - obj := make(map[string]interface{}) - - routingConfigProp, err := expandComputeNetworkRoutingConfig(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(routingConfigProp)) { - obj["routingConfig"] = routingConfigProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Network %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Network %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Network", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeNetworkRead(d, meta) -} - -func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Network: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Network %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Network") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Network", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Network %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNetworkImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/networks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("delete_default_routes_on_create", false); err != nil { - return nil, fmt.Errorf("Error setting delete_default_routes_on_create: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeNetworkDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkGatewayIpv4(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkAutoCreateSubnetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkRoutingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["routing_mode"] = - flattenComputeNetworkRoutingConfigRoutingMode(original["routingMode"], d, config) - return []interface{}{transformed} -} -func flattenComputeNetworkRoutingConfigRoutingMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkMtu(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeNetworkEnableUlaInternalIpv6(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkInternalIpv6Range(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeNetworkDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkAutoCreateSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkRoutingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedRoutingMode, err := expandComputeNetworkRoutingConfigRoutingMode(d.Get("routing_mode"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRoutingMode); val.IsValid() && !isEmptyValue(val) { - transformed["routingMode"] = transformedRoutingMode - } - - return transformed, nil -} - -func expandComputeNetworkRoutingConfigRoutingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkMtu(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEnableUlaInternalIpv6(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkInternalIpv6Range(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_endpoint.go deleted file mode 100644 index bb51145ad6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_endpoint.go +++ /dev/null @@ -1,478 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeNetworkEndpoint() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeNetworkEndpointCreate, - Read: resourceComputeNetworkEndpointRead, - Delete: resourceComputeNetworkEndpointDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeNetworkEndpointImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "ip_address": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `IPv4 address of network endpoint. The IP address must belong -to a VM in GCE (either the primary IP or as part of an aliased IP -range).`, - }, - "network_endpoint_group": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The network endpoint group this endpoint is part of.`, - }, - "instance": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name for a specific VM instance that the IP address belongs to. -This is required for network endpoints of type GCE_VM_IP_PORT. -The instance must be in the same zone of network endpoint group.`, - }, - "port": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `Port number of network endpoint. -**Note** 'port' is required unless the Network Endpoint Group is created -with the type of 'GCE_VM_IP'`, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Zone where the containing network endpoint group is located.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkEndpointCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - instanceProp, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(reflect.ValueOf(instanceProp)) && (ok || !reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - portProp, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(reflect.ValueOf(ipAddressProp)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { - obj["ipAddress"] = ipAddressProp - } - - obj, err = resourceComputeNetworkEndpointEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new NetworkEndpoint: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating NetworkEndpoint: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/{{instance}}/{{ip_address}}/{{port}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating NetworkEndpoint", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create NetworkEndpoint: %s", err) - } - - log.Printf("[DEBUG] Finished creating NetworkEndpoint %q: %#v", d.Id(), res) - - return resourceComputeNetworkEndpointRead(d, meta) -} - -func resourceComputeNetworkEndpointRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkEndpoint %q", d.Id())) - } - - res, err = flattenNestedComputeNetworkEndpoint(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeNetworkEndpoint because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceComputeNetworkEndpointDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ComputeNetworkEndpoint because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading NetworkEndpoint: %s", err) - } - - if err := d.Set("instance", flattenNestedComputeNetworkEndpointInstance(res["instance"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpoint: %s", err) - } - if err := d.Set("port", flattenNestedComputeNetworkEndpointPort(res["port"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpoint: %s", err) - } - if err := d.Set("ip_address", flattenNestedComputeNetworkEndpointIpAddress(res["ipAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpoint: %s", err) - } - - return nil -} - -func resourceComputeNetworkEndpointDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints") - if err != nil { - return err - } - - var obj map[string]interface{} - toDelete := make(map[string]interface{}) - instanceProp, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, config) - if err != nil { - return err - } - if instanceProp != "" { - toDelete["instance"] = instanceProp - } - - portProp, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, config) - if err != nil { - return err - } - if portProp != 0 { - toDelete["port"] = portProp - } - - ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } - toDelete["ipAddress"] = ipAddressProp - - obj = map[string]interface{}{ - "networkEndpoints": []map[string]interface{}{toDelete}, - } - log.Printf("[DEBUG] Deleting NetworkEndpoint %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NetworkEndpoint") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting NetworkEndpoint", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting NetworkEndpoint %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNetworkEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - // instance is optional, so use * instead of + when reading the import id - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)/(?P[^/]*)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]*)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]*)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]*)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/{{instance}}/{{ip_address}}/{{port}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputeNetworkEndpointInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenNestedComputeNetworkEndpointPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles int given in float64 format - if floatVal, ok := v.(float64); ok { - return int(floatVal) - } - return v -} - -func flattenNestedComputeNetworkEndpointIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeNetworkEndpointInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandNestedComputeNetworkEndpointPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeNetworkEndpointIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeNetworkEndpointEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. - if err := d.Set("network_endpoint_group", GetResourceNameFromSelfLink(d.Get("network_endpoint_group").(string))); err != nil { - return nil, fmt.Errorf("Error setting network_endpoint_group: %s", err) - } - - wrappedReq := map[string]interface{}{ - "networkEndpoints": []interface{}{obj}, - } - return wrappedReq, nil -} - -func flattenNestedComputeNetworkEndpoint(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["items"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value items. Actual value: %v", v) - } - - _, item, err := resourceComputeNetworkEndpointFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeNetworkEndpointFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedInstance, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedInstance := flattenNestedComputeNetworkEndpointInstance(expectedInstance, d, meta.(*Config)) - expectedIpAddress, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedIpAddress := flattenNestedComputeNetworkEndpointIpAddress(expectedIpAddress, d, meta.(*Config)) - expectedPort, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedPort := flattenNestedComputeNetworkEndpointPort(expectedPort, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - // Decode list item before comparing. - item, err := resourceComputeNetworkEndpointDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemInstance := flattenNestedComputeNetworkEndpointInstance(item["instance"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemInstance)) && isEmptyValue(reflect.ValueOf(expectedFlattenedInstance))) && !reflect.DeepEqual(itemInstance, expectedFlattenedInstance) { - log.Printf("[DEBUG] Skipping item with instance= %#v, looking for %#v)", itemInstance, expectedFlattenedInstance) - continue - } - itemIpAddress := flattenNestedComputeNetworkEndpointIpAddress(item["ipAddress"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemIpAddress)) && isEmptyValue(reflect.ValueOf(expectedFlattenedIpAddress))) && !reflect.DeepEqual(itemIpAddress, expectedFlattenedIpAddress) { - log.Printf("[DEBUG] Skipping item with ipAddress= %#v, looking for %#v)", itemIpAddress, expectedFlattenedIpAddress) - continue - } - itemPort := flattenNestedComputeNetworkEndpointPort(item["port"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemPort)) && isEmptyValue(reflect.ValueOf(expectedFlattenedPort))) && !reflect.DeepEqual(itemPort, expectedFlattenedPort) { - log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} -func resourceComputeNetworkEndpointDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - v, ok := res["networkEndpoint"] - if !ok || v == nil { - return res, nil - } - - return v.(map[string]interface{}), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_endpoint_group.go deleted file mode 100644 index d69ae3280a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_endpoint_group.go +++ /dev/null @@ -1,461 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeNetworkEndpointGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeNetworkEndpointGroupCreate, - Read: resourceComputeNetworkEndpointGroupRead, - Delete: resourceComputeNetworkEndpointGroupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeNetworkEndpointGroupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network to which all network endpoints in the NEG belong. -Uses "default" project network if unspecified.`, - }, - "default_port": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The default port used if the port number is not specified in the -network endpoint.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "network_endpoint_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT", ""}), - Description: `Type of network endpoints in this network endpoint group. -NON_GCP_PRIVATE_IP_PORT is used for hybrid connectivity network -endpoint groups (see https://cloud.google.com/load-balancing/docs/hybrid). -Note that NON_GCP_PRIVATE_IP_PORT can only be used with Backend Services -that 1) have the following load balancing schemes: EXTERNAL, EXTERNAL_MANAGED, -INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or -CONNECTION balancing modes. - -Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, and NON_GCP_PRIVATE_IP_PORT. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]`, - Default: "GCE_VM_IP_PORT", - }, - "subnetwork": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareOptionalSubnet, - Description: `Optional subnetwork to which all network endpoints in the NEG belong.`, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Zone where the network endpoint group is located.`, - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Description: `Number of network endpoints in the network endpoint group.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkEndpointGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeNetworkEndpointGroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeNetworkEndpointGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - networkEndpointTypeProp, err := expandComputeNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_endpoint_type"); !isEmptyValue(reflect.ValueOf(networkEndpointTypeProp)) && (ok || !reflect.DeepEqual(v, networkEndpointTypeProp)) { - obj["networkEndpointType"] = networkEndpointTypeProp - } - networkProp, err := expandComputeNetworkEndpointGroupNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - subnetworkProp, err := expandComputeNetworkEndpointGroupSubnetwork(d.Get("subnetwork"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnetwork"); !isEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { - obj["subnetwork"] = subnetworkProp - } - defaultPortProp, err := expandComputeNetworkEndpointGroupDefaultPort(d.Get("default_port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_port"); !isEmptyValue(reflect.ValueOf(defaultPortProp)) && (ok || !reflect.DeepEqual(v, defaultPortProp)) { - obj["defaultPort"] = defaultPortProp - } - zoneProp, err := expandComputeNetworkEndpointGroupZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new NetworkEndpointGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating NetworkEndpointGroup: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating NetworkEndpointGroup", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create NetworkEndpointGroup: %s", err) - } - - log.Printf("[DEBUG] Finished creating NetworkEndpointGroup %q: %#v", d.Id(), res) - - return resourceComputeNetworkEndpointGroupRead(d, meta) -} - -func resourceComputeNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkEndpointGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - - if err := d.Set("name", flattenComputeNetworkEndpointGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("description", flattenComputeNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("network_endpoint_type", flattenComputeNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("size", flattenComputeNetworkEndpointGroupSize(res["size"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("network", flattenComputeNetworkEndpointGroupNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("subnetwork", flattenComputeNetworkEndpointGroupSubnetwork(res["subnetwork"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("default_port", flattenComputeNetworkEndpointGroupDefaultPort(res["defaultPort"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("zone", flattenComputeNetworkEndpointGroupZone(res["zone"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - - return nil -} - -func resourceComputeNetworkEndpointGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting NetworkEndpointGroup %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NetworkEndpointGroup") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting NetworkEndpointGroup", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting NetworkEndpointGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNetworkEndpointGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeNetworkEndpointGroupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkEndpointGroupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkEndpointGroupNetworkEndpointType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkEndpointGroupSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeNetworkEndpointGroupNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeNetworkEndpointGroupSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeNetworkEndpointGroupDefaultPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeNetworkEndpointGroupZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeNetworkEndpointGroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEndpointGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEndpointGroupNetworkEndpointType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEndpointGroupNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeNetworkEndpointGroupSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeNetworkEndpointGroupDefaultPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEndpointGroupZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_peering.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_peering.go deleted file mode 100644 index b1e0e57123..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_peering.go +++ /dev/null @@ -1,352 +0,0 @@ -package google - -import ( - "fmt" - "log" - "sort" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/googleapi" - - "google.golang.org/api/compute/v1" -) - -const peerNetworkLinkRegex = "projects/(" + ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" - -func ResourceComputeNetworkPeering() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeNetworkPeeringCreate, - Read: resourceComputeNetworkPeeringRead, - Update: resourceComputeNetworkPeeringUpdate, - Delete: resourceComputeNetworkPeeringDelete, - Importer: &schema.ResourceImporter{ - State: resourceComputeNetworkPeeringImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(4 * time.Minute), - Update: schema.DefaultTimeout(4 * time.Minute), - Delete: schema.DefaultTimeout(4 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the peering.`, - }, - - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(peerNetworkLinkRegex), - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The primary network of the peering.`, - }, - - "peer_network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(peerNetworkLinkRegex), - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The peer network in the peering. The peer network may belong to a different project.`, - }, - - "export_custom_routes": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether to export the custom routes to the peer network. Defaults to false.`, - }, - - "import_custom_routes": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether to export the custom routes from the peer network. Defaults to false.`, - }, - - "export_subnet_routes_with_public_ip": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - Default: true, - }, - - "import_subnet_routes_with_public_ip": { - Type: schema.TypeBool, - ForceNew: true, - Optional: true, - }, - - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State for the peering, either ACTIVE or INACTIVE. The peering is ACTIVE when there's a matching configuration in the peer network.`, - }, - - "state_details": { - Type: schema.TypeString, - Computed: true, - Description: `Details about the current state of the peering.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkPeeringCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - peerNetworkFieldValue, err := ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) - if err != nil { - return err - } - - request := &compute.NetworksAddPeeringRequest{} - request.NetworkPeering = expandNetworkPeering(d) - - // Only one peering operation at a time can be performed for a given network. - // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. - peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) - for _, kn := range peeringLockNames { - mutexKV.Lock(kn) - defer mutexKV.Unlock(kn) - } - - addOp, err := config.NewComputeClient(userAgent).Networks.AddPeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() - if err != nil { - return fmt.Errorf("Error adding network peering: %s", err) - } - - err = ComputeOperationWaitTime(config, addOp, networkFieldValue.Project, "Adding Network Peering", userAgent, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return err - } - - d.SetId(fmt.Sprintf("%s/%s", networkFieldValue.Name, d.Get("name").(string))) - - return resourceComputeNetworkPeeringRead(d, meta) -} - -func resourceComputeNetworkPeeringRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - peeringName := d.Get("name").(string) - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - - network, err := config.NewComputeClient(userAgent).Networks.Get(networkFieldValue.Project, networkFieldValue.Name).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Network %q", networkFieldValue.Name)) - } - - peering := findPeeringFromNetwork(network, peeringName) - if peering == nil { - log.Printf("[WARN] Removing network peering %s from network %s because it's gone", peeringName, network.Name) - d.SetId("") - return nil - } - - if err := d.Set("peer_network", peering.Network); err != nil { - return fmt.Errorf("Error setting peer_network: %s", err) - } - if err := d.Set("name", peering.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("import_custom_routes", peering.ImportCustomRoutes); err != nil { - return fmt.Errorf("Error setting import_custom_routes: %s", err) - } - if err := d.Set("export_custom_routes", peering.ExportCustomRoutes); err != nil { - return fmt.Errorf("Error setting export_custom_routes: %s", err) - } - if err := d.Set("import_subnet_routes_with_public_ip", peering.ImportSubnetRoutesWithPublicIp); err != nil { - return fmt.Errorf("Error setting import_subnet_routes_with_public_ip: %s", err) - } - if err := d.Set("export_subnet_routes_with_public_ip", peering.ExportSubnetRoutesWithPublicIp); err != nil { - return fmt.Errorf("Error setting export_subnet_routes_with_public_ip: %s", err) - } - if err := d.Set("state", peering.State); err != nil { - return fmt.Errorf("Error setting state: %s", err) - } - if err := d.Set("state_details", peering.StateDetails); err != nil { - return fmt.Errorf("Error setting state_details: %s", err) - } - - return nil -} - -func resourceComputeNetworkPeeringUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - peerNetworkFieldValue, err := ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) - if err != nil { - return err - } - - request := &compute.NetworksUpdatePeeringRequest{} - request.NetworkPeering = expandNetworkPeering(d) - - // Only one peering operation at a time can be performed for a given network. - // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. - peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) - for _, kn := range peeringLockNames { - mutexKV.Lock(kn) - defer mutexKV.Unlock(kn) - } - - updateOp, err := config.NewComputeClient(userAgent).Networks.UpdatePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() - if err != nil { - return fmt.Errorf("Error updating network peering: %s", err) - } - - err = ComputeOperationWaitTime(config, updateOp, networkFieldValue.Project, "Updating Network Peering", userAgent, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - - return resourceComputeNetworkPeeringRead(d, meta) -} - -func resourceComputeNetworkPeeringDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - // Remove the `network` to `peer_network` peering - name := d.Get("name").(string) - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - peerNetworkFieldValue, err := ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) - if err != nil { - return err - } - - request := &compute.NetworksRemovePeeringRequest{ - Name: name, - } - - // Only one peering operation at a time can be performed for a given network. - // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. - peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) - for _, kn := range peeringLockNames { - mutexKV.Lock(kn) - defer mutexKV.Unlock(kn) - } - - removeOp, err := config.NewComputeClient(userAgent).Networks.RemovePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Peering `%s` already removed from network `%s`", name, networkFieldValue.Name) - } else { - return fmt.Errorf("Error removing peering `%s` from network `%s`: %s", name, networkFieldValue.Name, err) - } - } else { - err = ComputeOperationWaitTime(config, removeOp, networkFieldValue.Project, "Removing Network Peering", userAgent, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return err - } - } - - return nil -} - -func findPeeringFromNetwork(network *compute.Network, peeringName string) *compute.NetworkPeering { - for _, p := range network.Peerings { - if p.Name == peeringName { - return p - } - } - return nil -} -func expandNetworkPeering(d *schema.ResourceData) *compute.NetworkPeering { - return &compute.NetworkPeering{ - ExchangeSubnetRoutes: true, - Name: d.Get("name").(string), - Network: d.Get("peer_network").(string), - ExportCustomRoutes: d.Get("export_custom_routes").(bool), - ImportCustomRoutes: d.Get("import_custom_routes").(bool), - ExportSubnetRoutesWithPublicIp: d.Get("export_subnet_routes_with_public_ip").(bool), - ImportSubnetRoutesWithPublicIp: d.Get("import_subnet_routes_with_public_ip").(bool), - ForceSendFields: []string{"ExportSubnetRoutesWithPublicIp", "ImportCustomRoutes", "ExportCustomRoutes"}, - } -} - -func sortedNetworkPeeringMutexKeys(networkName, peerNetworkName *GlobalFieldValue) []string { - // Whether you delete the peering from network A to B or the one from B to A, they - // cannot happen at the same time. - networks := []string{ - fmt.Sprintf("%s/peerings", networkName.RelativeLink()), - fmt.Sprintf("%s/peerings", peerNetworkName.RelativeLink()), - } - sort.Strings(networks) - return networks -} - -func resourceComputeNetworkPeeringImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - splits := strings.Split(d.Id(), "/") - if len(splits) != 3 { - return nil, fmt.Errorf("Error parsing network peering import format, expected: {project}/{network}/{name}") - } - project := splits[0] - network := splits[1] - name := splits[2] - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - // Since the format of the network URL in the peering might be different depending on the ComputeBasePath, - // just read the network self link from the API. - net, err := config.NewComputeClient(userAgent).Networks.Get(project, network).Do() - if err != nil { - return nil, handleNotFoundError(err, d, fmt.Sprintf("Network %q", splits[1])) - } - - if err := d.Set("network", ConvertSelfLinkToV1(net.SelfLink)); err != nil { - return nil, fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("name", name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - // Replace import id for the resource id - id := fmt.Sprintf("%s/%s", network, name) - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_peering_routes_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_peering_routes_config.go deleted file mode 100644 index f54d067d69..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_peering_routes_config.go +++ /dev/null @@ -1,409 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeNetworkPeeringRoutesConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeNetworkPeeringRoutesConfigCreate, - Read: resourceComputeNetworkPeeringRoutesConfigRead, - Update: resourceComputeNetworkPeeringRoutesConfigUpdate, - Delete: resourceComputeNetworkPeeringRoutesConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeNetworkPeeringRoutesConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "export_custom_routes": { - Type: schema.TypeBool, - Required: true, - Description: `Whether to export the custom routes to the peer network.`, - }, - "import_custom_routes": { - Type: schema.TypeBool, - Required: true, - Description: `Whether to import the custom routes to the peer network.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the primary network for the peering.`, - }, - "peering": { - Type: schema.TypeString, - Required: true, - Description: `Name of the peering.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkPeeringRoutesConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peering"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - exportCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(d.Get("export_custom_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("export_custom_routes"); ok || !reflect.DeepEqual(v, exportCustomRoutesProp) { - obj["exportCustomRoutes"] = exportCustomRoutesProp - } - importCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(d.Get("import_custom_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("import_custom_routes"); ok || !reflect.DeepEqual(v, importCustomRoutesProp) { - obj["importCustomRoutes"] = importCustomRoutesProp - } - - obj, err = resourceComputeNetworkPeeringRoutesConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}/updatePeering") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new NetworkPeeringRoutesConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating NetworkPeeringRoutesConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating NetworkPeeringRoutesConfig", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create NetworkPeeringRoutesConfig: %s", err) - } - - log.Printf("[DEBUG] Finished creating NetworkPeeringRoutesConfig %q: %#v", d.Id(), res) - - return resourceComputeNetworkPeeringRoutesConfigRead(d, meta) -} - -func resourceComputeNetworkPeeringRoutesConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkPeeringRoutesConfig %q", d.Id())) - } - - res, err = flattenNestedComputeNetworkPeeringRoutesConfig(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeNetworkPeeringRoutesConfig because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) - } - - if err := d.Set("peering", flattenNestedComputeNetworkPeeringRoutesConfigPeering(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) - } - if err := d.Set("export_custom_routes", flattenNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(res["exportCustomRoutes"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) - } - if err := d.Set("import_custom_routes", flattenNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(res["importCustomRoutes"], d, config)); err != nil { - return fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) - } - - return nil -} - -func resourceComputeNetworkPeeringRoutesConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peering"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - exportCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(d.Get("export_custom_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("export_custom_routes"); ok || !reflect.DeepEqual(v, exportCustomRoutesProp) { - obj["exportCustomRoutes"] = exportCustomRoutesProp - } - importCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(d.Get("import_custom_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("import_custom_routes"); ok || !reflect.DeepEqual(v, importCustomRoutesProp) { - obj["importCustomRoutes"] = importCustomRoutesProp - } - - obj, err = resourceComputeNetworkPeeringRoutesConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}/updatePeering") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating NetworkPeeringRoutesConfig %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating NetworkPeeringRoutesConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating NetworkPeeringRoutesConfig %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating NetworkPeeringRoutesConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeNetworkPeeringRoutesConfigRead(d, meta) -} - -func resourceComputeNetworkPeeringRoutesConfigDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] Compute NetworkPeeringRoutesConfig resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceComputeNetworkPeeringRoutesConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/networks/(?P[^/]+)/networkPeerings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputeNetworkPeeringRoutesConfigPeering(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeNetworkPeeringRoutesConfigPeering(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeNetworkPeeringRoutesConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Stick request in a networkPeering block as in - // https://cloud.google.com/compute/docs/reference/rest/v1/networks/updatePeering - newObj := make(map[string]interface{}) - newObj["networkPeering"] = obj - return newObj, nil -} - -func flattenNestedComputeNetworkPeeringRoutesConfig(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["peerings"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value peerings. Actual value: %v", v) - } - - _, item, err := resourceComputeNetworkPeeringRoutesConfigFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeNetworkPeeringRoutesConfigFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedPeering, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedPeering := flattenNestedComputeNetworkPeeringRoutesConfigPeering(expectedPeering, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemPeering := flattenNestedComputeNetworkPeeringRoutesConfigPeering(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemPeering)) && isEmptyValue(reflect.ValueOf(expectedFlattenedPeering))) && !reflect.DeepEqual(itemPeering, expectedFlattenedPeering) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemPeering, expectedFlattenedPeering) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_node_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_node_group.go deleted file mode 100644 index a5b6e4b997..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_node_group.go +++ /dev/null @@ -1,837 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeNodeGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeNodeGroupCreate, - Read: resourceComputeNodeGroupRead, - Update: resourceComputeNodeGroupUpdate, - Delete: resourceComputeNodeGroupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeNodeGroupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "node_template": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the node template to which this node group belongs.`, - }, - "autoscaling_policy": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `If you use sole-tenant nodes for your workloads, you can use the node -group autoscaler to automatically manage the sizes of your node groups.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_nodes": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Maximum size of the node group. Set to a value less than or equal -to 100 and greater than or equal to min-nodes.`, - }, - "mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"OFF", "ON", "ONLY_SCALE_OUT"}), - Description: `The autoscaling mode. Set to one of the following: - - OFF: Disables the autoscaler. - - ON: Enables scaling in and scaling out. - - ONLY_SCALE_OUT: Enables only scaling out. - You must use this mode if your node groups are configured to - restart their hosted VMs on minimal servers. Possible values: ["OFF", "ON", "ONLY_SCALE_OUT"]`, - }, - "min_nodes": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Minimum size of the node group. Must be less -than or equal to max-nodes. The default value is 0.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional textual description of the resource.`, - }, - "initial_size": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The initial number of nodes in the node group. One of 'initial_size' or 'size' must be specified.`, - ExactlyOneOf: []string{"size", "initial_size"}, - }, - "maintenance_policy": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.`, - Default: "DEFAULT", - }, - "maintenance_window": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `contains properties for the timeframe of maintenance`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "start_time": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the resource.`, - }, - "share_settings": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Share settings for the node group.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "share_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ORGANIZATION", "SPECIFIC_PROJECTS", "LOCAL"}), - Description: `Node group sharing type. Possible values: ["ORGANIZATION", "SPECIFIC_PROJECTS", "LOCAL"]`, - }, - "project_map": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Description: `A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project id/number should be the same as the key of this project config in the project map.`, - }, - }, - }, - }, - }, - }, - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The total number of nodes in the node group. One of 'initial_size' or 'size' must be specified.`, - ExactlyOneOf: []string{"size", "initial_size"}, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Zone where this node group is located`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNodeGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeNodeGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeNodeGroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - nodeTemplateProp, err := expandComputeNodeGroupNodeTemplate(d.Get("node_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_template"); !isEmptyValue(reflect.ValueOf(nodeTemplateProp)) && (ok || !reflect.DeepEqual(v, nodeTemplateProp)) { - obj["nodeTemplate"] = nodeTemplateProp - } - sizeProp, err := expandComputeNodeGroupSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); ok || !reflect.DeepEqual(v, sizeProp) { - obj["size"] = sizeProp - } - maintenancePolicyProp, err := expandComputeNodeGroupMaintenancePolicy(d.Get("maintenance_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_policy"); !isEmptyValue(reflect.ValueOf(maintenancePolicyProp)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { - obj["maintenancePolicy"] = maintenancePolicyProp - } - maintenanceWindowProp, err := expandComputeNodeGroupMaintenanceWindow(d.Get("maintenance_window"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_window"); !isEmptyValue(reflect.ValueOf(maintenanceWindowProp)) && (ok || !reflect.DeepEqual(v, maintenanceWindowProp)) { - obj["maintenanceWindow"] = maintenanceWindowProp - } - autoscalingPolicyProp, err := expandComputeNodeGroupAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(autoscalingPolicyProp)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { - obj["autoscalingPolicy"] = autoscalingPolicyProp - } - shareSettingsProp, err := expandComputeNodeGroupShareSettings(d.Get("share_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("share_settings"); !isEmptyValue(reflect.ValueOf(shareSettingsProp)) && (ok || !reflect.DeepEqual(v, shareSettingsProp)) { - obj["shareSettings"] = shareSettingsProp - } - zoneProp, err := expandComputeNodeGroupZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups?initialNodeCount=PRE_CREATE_REPLACE_ME") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new NodeGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NodeGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - var sizeParam string - if v, ok := d.GetOkExists("size"); ok { - sizeParam = fmt.Sprintf("%v", v) - } else if v, ok := d.GetOkExists("initial_size"); ok { - sizeParam = fmt.Sprintf("%v", v) - } - - url = regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sizeParam) - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating NodeGroup: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating NodeGroup", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create NodeGroup: %s", err) - } - - log.Printf("[DEBUG] Finished creating NodeGroup %q: %#v", d.Id(), res) - - return resourceComputeNodeGroupRead(d, meta) -} - -func resourceComputeNodeGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NodeGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeNodeGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeNodeGroupCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("description", flattenComputeNodeGroupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("name", flattenComputeNodeGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("node_template", flattenComputeNodeGroupNodeTemplate(res["nodeTemplate"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("size", flattenComputeNodeGroupSize(res["size"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("maintenance_policy", flattenComputeNodeGroupMaintenancePolicy(res["maintenancePolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("maintenance_window", flattenComputeNodeGroupMaintenanceWindow(res["maintenanceWindow"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("autoscaling_policy", flattenComputeNodeGroupAutoscalingPolicy(res["autoscalingPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("share_settings", flattenComputeNodeGroupShareSettings(res["shareSettings"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("zone", flattenComputeNodeGroupZone(res["zone"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading NodeGroup: %s", err) - } - - return nil -} - -func resourceComputeNodeGroupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NodeGroup: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("node_template") { - obj := make(map[string]interface{}) - - nodeTemplateProp, err := expandComputeNodeGroupNodeTemplate(d.Get("node_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_template"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeTemplateProp)) { - obj["nodeTemplate"] = nodeTemplateProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}/setNodeTemplate") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating NodeGroup %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating NodeGroup %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating NodeGroup", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeNodeGroupRead(d, meta) -} - -func resourceComputeNodeGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NodeGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting NodeGroup %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NodeGroup") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting NodeGroup", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting NodeGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNodeGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/nodeGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeNodeGroupCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupNodeTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeNodeGroupSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeNodeGroupMaintenancePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupMaintenanceWindow(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["start_time"] = - flattenComputeNodeGroupMaintenanceWindowStartTime(original["startTime"], d, config) - return []interface{}{transformed} -} -func flattenComputeNodeGroupMaintenanceWindowStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupAutoscalingPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["mode"] = - flattenComputeNodeGroupAutoscalingPolicyMode(original["mode"], d, config) - transformed["min_nodes"] = - flattenComputeNodeGroupAutoscalingPolicyMinNodes(original["minNodes"], d, config) - transformed["max_nodes"] = - flattenComputeNodeGroupAutoscalingPolicyMaxNodes(original["maxNodes"], d, config) - return []interface{}{transformed} -} -func flattenComputeNodeGroupAutoscalingPolicyMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupAutoscalingPolicyMinNodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeNodeGroupAutoscalingPolicyMaxNodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeNodeGroupShareSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["share_type"] = - flattenComputeNodeGroupShareSettingsShareType(original["shareType"], d, config) - transformed["project_map"] = - flattenComputeNodeGroupShareSettingsProjectMap(original["projectMap"], d, config) - return []interface{}{transformed} -} -func flattenComputeNodeGroupShareSettingsShareType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupShareSettingsProjectMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "id": k, - "project_id": flattenComputeNodeGroupShareSettingsProjectMapProjectId(original["projectId"], d, config), - }) - } - return transformed -} -func flattenComputeNodeGroupShareSettingsProjectMapProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeNodeGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupNodeTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("nodeTemplates", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for node_template: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeNodeGroupSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupMaintenancePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupMaintenanceWindow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandComputeNodeGroupMaintenanceWindowStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - return transformed, nil -} - -func expandComputeNodeGroupMaintenanceWindowStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupAutoscalingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMode, err := expandComputeNodeGroupAutoscalingPolicyMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - transformedMinNodes, err := expandComputeNodeGroupAutoscalingPolicyMinNodes(original["min_nodes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinNodes); val.IsValid() && !isEmptyValue(val) { - transformed["minNodes"] = transformedMinNodes - } - - transformedMaxNodes, err := expandComputeNodeGroupAutoscalingPolicyMaxNodes(original["max_nodes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxNodes); val.IsValid() && !isEmptyValue(val) { - transformed["maxNodes"] = transformedMaxNodes - } - - return transformed, nil -} - -func expandComputeNodeGroupAutoscalingPolicyMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupAutoscalingPolicyMinNodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupAutoscalingPolicyMaxNodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupShareSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedShareType, err := expandComputeNodeGroupShareSettingsShareType(original["share_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedShareType); val.IsValid() && !isEmptyValue(val) { - transformed["shareType"] = transformedShareType - } - - transformedProjectMap, err := expandComputeNodeGroupShareSettingsProjectMap(original["project_map"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectMap); val.IsValid() && !isEmptyValue(val) { - transformed["projectMap"] = transformedProjectMap - } - - return transformed, nil -} - -func expandComputeNodeGroupShareSettingsShareType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupShareSettingsProjectMap(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandComputeNodeGroupShareSettingsProjectMapProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedId, err := expandString(original["id"], d, config) - if err != nil { - return nil, err - } - m[transformedId] = transformed - } - return m, nil -} - -func expandComputeNodeGroupShareSettingsProjectMapProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_node_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_node_template.go deleted file mode 100644 index 2fd8f15b86..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_node_template.go +++ /dev/null @@ -1,587 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeNodeTemplate() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeNodeTemplateCreate, - Read: resourceComputeNodeTemplateRead, - Delete: resourceComputeNodeTemplateDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeNodeTemplateImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "cpu_overcommit_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ENABLED", "NONE", ""}), - Description: `CPU overcommit. Default value: "NONE" Possible values: ["ENABLED", "NONE"]`, - Default: "NONE", - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional textual description of the resource.`, - }, - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the resource.`, - }, - "node_affinity_labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels to use for node affinity, which will be used in -instance scheduling.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "node_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Node type to use for nodes group that are created from this template. -Only one of nodeTypeFlexibility and nodeType can be specified.`, - ConflictsWith: []string{"node_type_flexibility"}, - }, - "node_type_flexibility": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Flexible properties for the desired node type. Node groups that -use this node template will create nodes of a type that matches -these properties. Only one of nodeTypeFlexibility and nodeType can -be specified.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpus": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Number of virtual CPUs to use.`, - AtLeastOneOf: []string{"node_type_flexibility.0.cpus", "node_type_flexibility.0.memory"}, - }, - "memory": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Physical memory available to the node, defined in MB.`, - AtLeastOneOf: []string{"node_type_flexibility.0.cpus", "node_type_flexibility.0.memory"}, - }, - "local_ssd": { - Type: schema.TypeString, - Computed: true, - Description: `Use local SSD`, - }, - }, - }, - ConflictsWith: []string{"node_type"}, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where nodes using the node template will be created. -If it is not provided, the provider region is used.`, - }, - "server_binding": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The server binding policy for nodes using this template. Determines -where the nodes should restart following a maintenance event.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"RESTART_NODE_ON_ANY_SERVER", "RESTART_NODE_ON_MINIMAL_SERVERS"}), - Description: `Type of server binding policy. If 'RESTART_NODE_ON_ANY_SERVER', -nodes using this template will restart on any physical server -following a maintenance event. - -If 'RESTART_NODE_ON_MINIMAL_SERVER', nodes using this template -will restart on the same physical server following a maintenance -event, instead of being live migrated to or restarted on a new -physical server. This option may be useful if you are using -software licenses tied to the underlying server characteristics -such as physical sockets or cores, to avoid the need for -additional licenses when maintenance occurs. However, VMs on such -nodes will experience outages while maintenance is applied. Possible values: ["RESTART_NODE_ON_ANY_SERVER", "RESTART_NODE_ON_MINIMAL_SERVERS"]`, - }, - }, - }, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNodeTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeNodeTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeNodeTemplateName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - nodeAffinityLabelsProp, err := expandComputeNodeTemplateNodeAffinityLabels(d.Get("node_affinity_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_affinity_labels"); !isEmptyValue(reflect.ValueOf(nodeAffinityLabelsProp)) && (ok || !reflect.DeepEqual(v, nodeAffinityLabelsProp)) { - obj["nodeAffinityLabels"] = nodeAffinityLabelsProp - } - nodeTypeProp, err := expandComputeNodeTemplateNodeType(d.Get("node_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_type"); !isEmptyValue(reflect.ValueOf(nodeTypeProp)) && (ok || !reflect.DeepEqual(v, nodeTypeProp)) { - obj["nodeType"] = nodeTypeProp - } - nodeTypeFlexibilityProp, err := expandComputeNodeTemplateNodeTypeFlexibility(d.Get("node_type_flexibility"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_type_flexibility"); !isEmptyValue(reflect.ValueOf(nodeTypeFlexibilityProp)) && (ok || !reflect.DeepEqual(v, nodeTypeFlexibilityProp)) { - obj["nodeTypeFlexibility"] = nodeTypeFlexibilityProp - } - serverBindingProp, err := expandComputeNodeTemplateServerBinding(d.Get("server_binding"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("server_binding"); !isEmptyValue(reflect.ValueOf(serverBindingProp)) && (ok || !reflect.DeepEqual(v, serverBindingProp)) { - obj["serverBinding"] = serverBindingProp - } - cpuOvercommitTypeProp, err := expandComputeNodeTemplateCpuOvercommitType(d.Get("cpu_overcommit_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cpu_overcommit_type"); !isEmptyValue(reflect.ValueOf(cpuOvercommitTypeProp)) && (ok || !reflect.DeepEqual(v, cpuOvercommitTypeProp)) { - obj["cpuOvercommitType"] = cpuOvercommitTypeProp - } - regionProp, err := expandComputeNodeTemplateRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new NodeTemplate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NodeTemplate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating NodeTemplate: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating NodeTemplate", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create NodeTemplate: %s", err) - } - - log.Printf("[DEBUG] Finished creating NodeTemplate %q: %#v", d.Id(), res) - - return resourceComputeNodeTemplateRead(d, meta) -} - -func resourceComputeNodeTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NodeTemplate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeNodeTemplate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeNodeTemplateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("description", flattenComputeNodeTemplateDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("name", flattenComputeNodeTemplateName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("node_affinity_labels", flattenComputeNodeTemplateNodeAffinityLabels(res["nodeAffinityLabels"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("node_type", flattenComputeNodeTemplateNodeType(res["nodeType"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("node_type_flexibility", flattenComputeNodeTemplateNodeTypeFlexibility(res["nodeTypeFlexibility"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("server_binding", flattenComputeNodeTemplateServerBinding(res["serverBinding"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("cpu_overcommit_type", flattenComputeNodeTemplateCpuOvercommitType(res["cpuOvercommitType"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("region", flattenComputeNodeTemplateRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading NodeTemplate: %s", err) - } - - return nil -} - -func resourceComputeNodeTemplateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NodeTemplate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting NodeTemplate %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NodeTemplate") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting NodeTemplate", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting NodeTemplate %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNodeTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/nodeTemplates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeNodeTemplateCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeAffinityLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeTypeFlexibility(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cpus"] = - flattenComputeNodeTemplateNodeTypeFlexibilityCpus(original["cpus"], d, config) - transformed["memory"] = - flattenComputeNodeTemplateNodeTypeFlexibilityMemory(original["memory"], d, config) - transformed["local_ssd"] = - flattenComputeNodeTemplateNodeTypeFlexibilityLocalSsd(original["localSsd"], d, config) - return []interface{}{transformed} -} -func flattenComputeNodeTemplateNodeTypeFlexibilityCpus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeTypeFlexibilityMemory(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeTypeFlexibilityLocalSsd(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateServerBinding(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenComputeNodeTemplateServerBindingType(original["type"], d, config) - return []interface{}{transformed} -} -func flattenComputeNodeTemplateServerBindingType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateCpuOvercommitType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeNodeTemplateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateNodeAffinityLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeNodeTemplateNodeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateNodeTypeFlexibility(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCpus, err := expandComputeNodeTemplateNodeTypeFlexibilityCpus(original["cpus"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCpus); val.IsValid() && !isEmptyValue(val) { - transformed["cpus"] = transformedCpus - } - - transformedMemory, err := expandComputeNodeTemplateNodeTypeFlexibilityMemory(original["memory"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMemory); val.IsValid() && !isEmptyValue(val) { - transformed["memory"] = transformedMemory - } - - transformedLocalSsd, err := expandComputeNodeTemplateNodeTypeFlexibilityLocalSsd(original["local_ssd"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocalSsd); val.IsValid() && !isEmptyValue(val) { - transformed["localSsd"] = transformedLocalSsd - } - - return transformed, nil -} - -func expandComputeNodeTemplateNodeTypeFlexibilityCpus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateNodeTypeFlexibilityMemory(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateNodeTypeFlexibilityLocalSsd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateServerBinding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandComputeNodeTemplateServerBindingType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - return transformed, nil -} - -func expandComputeNodeTemplateServerBindingType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateCpuOvercommitType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_packet_mirroring.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_packet_mirroring.go deleted file mode 100644 index d5e787e086..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_packet_mirroring.go +++ /dev/null @@ -1,903 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputePacketMirroring() *schema.Resource { - return &schema.Resource{ - Create: resourceComputePacketMirroringCreate, - Read: resourceComputePacketMirroringRead, - Update: resourceComputePacketMirroringUpdate, - Delete: resourceComputePacketMirroringDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputePacketMirroringImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "collector_ilb": { - Type: schema.TypeList, - Required: true, - Description: `The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL) -that will be used as collector for mirrored traffic. The -specified forwarding rule must have is_mirroring_collector -set to true.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "url": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the forwarding rule.`, - }, - }, - }, - }, - "mirrored_resources": { - Type: schema.TypeList, - Required: true, - Description: `A means of specifying which resources to mirror.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instances": { - Type: schema.TypeList, - Optional: true, - Description: `All the listed instances will be mirrored. Specify at most 50.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "url": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the instances where this rule should be active.`, - }, - }, - }, - AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, - }, - "subnetworks": { - Type: schema.TypeList, - Optional: true, - Description: `All instances in one of these subnetworks will be mirrored.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "url": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the subnetwork where this rule should be active.`, - }, - }, - }, - AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, - }, - "tags": { - Type: schema.TypeList, - Optional: true, - Description: `All instances with these tags will be mirrored.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateGCEName, - Description: `The name of the packet mirroring rule`, - }, - "network": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `Specifies the mirrored VPC network. Only packets in this network -will be mirrored. All mirrored VMs should have a NIC in the given -network. All mirrored subnetworks should belong to the given network.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "url": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full self_link URL of the network where this rule is active.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A human-readable description of the rule.`, - }, - "filter": { - Type: schema.TypeList, - Optional: true, - Description: `A filter for mirrored traffic. If unset, all traffic is mirrored.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr_ranges": { - Type: schema.TypeList, - Optional: true, - Description: `IP CIDR ranges that apply as a filter on the source (ingress) or -destination (egress) IP in the IP header. Only IPv4 is supported.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "direction": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"INGRESS", "EGRESS", "BOTH", ""}), - Description: `Direction of traffic to mirror. Default value: "BOTH" Possible values: ["INGRESS", "EGRESS", "BOTH"]`, - Default: "BOTH", - }, - "ip_protocols": { - Type: schema.TypeList, - Optional: true, - Description: `Possible IP protocols including tcp, udp, icmp and esp`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "priority": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Since only one rule can be active at a time, priority is -used to break ties in the case of two rules that apply to -the same instances.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The Region in which the created address should reside. -If it is not provided, the provider region is used.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputePacketMirroringCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputePacketMirroringName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputePacketMirroringDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - regionProp, err := expandComputePacketMirroringRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - networkProp, err := expandComputePacketMirroringNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - priorityProp, err := expandComputePacketMirroringPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - collectorIlbProp, err := expandComputePacketMirroringCollectorIlb(d.Get("collector_ilb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collector_ilb"); !isEmptyValue(reflect.ValueOf(collectorIlbProp)) && (ok || !reflect.DeepEqual(v, collectorIlbProp)) { - obj["collectorIlb"] = collectorIlbProp - } - filterProp, err := expandComputePacketMirroringFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - mirroredResourcesProp, err := expandComputePacketMirroringMirroredResources(d.Get("mirrored_resources"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mirrored_resources"); !isEmptyValue(reflect.ValueOf(mirroredResourcesProp)) && (ok || !reflect.DeepEqual(v, mirroredResourcesProp)) { - obj["mirroredResources"] = mirroredResourcesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new PacketMirroring: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PacketMirroring: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating PacketMirroring: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating PacketMirroring", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create PacketMirroring: %s", err) - } - - log.Printf("[DEBUG] Finished creating PacketMirroring %q: %#v", d.Id(), res) - - return resourceComputePacketMirroringRead(d, meta) -} - -func resourceComputePacketMirroringRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PacketMirroring: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputePacketMirroring %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - - if err := d.Set("name", flattenComputePacketMirroringName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("description", flattenComputePacketMirroringDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("region", flattenComputePacketMirroringRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("network", flattenComputePacketMirroringNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("priority", flattenComputePacketMirroringPriority(res["priority"], d, config)); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("collector_ilb", flattenComputePacketMirroringCollectorIlb(res["collectorIlb"], d, config)); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("filter", flattenComputePacketMirroringFilter(res["filter"], d, config)); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("mirrored_resources", flattenComputePacketMirroringMirroredResources(res["mirroredResources"], d, config)); err != nil { - return fmt.Errorf("Error reading PacketMirroring: %s", err) - } - - return nil -} - -func resourceComputePacketMirroringUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PacketMirroring: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandComputePacketMirroringName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - regionProp, err := expandComputePacketMirroringRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - priorityProp, err := expandComputePacketMirroringPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - collectorIlbProp, err := expandComputePacketMirroringCollectorIlb(d.Get("collector_ilb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collector_ilb"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, collectorIlbProp)) { - obj["collectorIlb"] = collectorIlbProp - } - filterProp, err := expandComputePacketMirroringFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - mirroredResourcesProp, err := expandComputePacketMirroringMirroredResources(d.Get("mirrored_resources"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mirrored_resources"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mirroredResourcesProp)) { - obj["mirroredResources"] = mirroredResourcesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating PacketMirroring %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating PacketMirroring %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating PacketMirroring %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating PacketMirroring", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputePacketMirroringRead(d, meta) -} - -func resourceComputePacketMirroringDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PacketMirroring: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting PacketMirroring %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "PacketMirroring") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting PacketMirroring", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting PacketMirroring %q: %#v", d.Id(), res) - return nil -} - -func resourceComputePacketMirroringImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/packetMirrorings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputePacketMirroringName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputePacketMirroringNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["url"] = - flattenComputePacketMirroringNetworkUrl(original["url"], d, config) - return []interface{}{transformed} -} -func flattenComputePacketMirroringNetworkUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputePacketMirroringPriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputePacketMirroringCollectorIlb(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["url"] = - flattenComputePacketMirroringCollectorIlbUrl(original["url"], d, config) - return []interface{}{transformed} -} -func flattenComputePacketMirroringCollectorIlbUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputePacketMirroringFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ip_protocols"] = - flattenComputePacketMirroringFilterIpProtocols(original["IPProtocols"], d, config) - transformed["cidr_ranges"] = - flattenComputePacketMirroringFilterCidrRanges(original["cidrRanges"], d, config) - transformed["direction"] = - flattenComputePacketMirroringFilterDirection(original["direction"], d, config) - return []interface{}{transformed} -} -func flattenComputePacketMirroringFilterIpProtocols(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringFilterCidrRanges(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringFilterDirection(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringMirroredResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["subnetworks"] = - flattenComputePacketMirroringMirroredResourcesSubnetworks(original["subnetworks"], d, config) - transformed["instances"] = - flattenComputePacketMirroringMirroredResourcesInstances(original["instances"], d, config) - transformed["tags"] = - flattenComputePacketMirroringMirroredResourcesTags(original["tags"], d, config) - return []interface{}{transformed} -} -func flattenComputePacketMirroringMirroredResourcesSubnetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "url": flattenComputePacketMirroringMirroredResourcesSubnetworksUrl(original["url"], d, config), - }) - } - return transformed -} -func flattenComputePacketMirroringMirroredResourcesSubnetworksUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputePacketMirroringMirroredResourcesInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "url": flattenComputePacketMirroringMirroredResourcesInstancesUrl(original["url"], d, config), - }) - } - return transformed -} -func flattenComputePacketMirroringMirroredResourcesInstancesUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputePacketMirroringMirroredResourcesTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputePacketMirroringName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandComputePacketMirroringNetworkUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - return transformed, nil -} - -func expandComputePacketMirroringNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for url: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputePacketMirroringPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringCollectorIlb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandComputePacketMirroringCollectorIlbUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - return transformed, nil -} - -func expandComputePacketMirroringCollectorIlbUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("forwardingRules", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for url: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputePacketMirroringFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpProtocols, err := expandComputePacketMirroringFilterIpProtocols(original["ip_protocols"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIpProtocols); val.IsValid() && !isEmptyValue(val) { - transformed["IPProtocols"] = transformedIpProtocols - } - - transformedCidrRanges, err := expandComputePacketMirroringFilterCidrRanges(original["cidr_ranges"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCidrRanges); val.IsValid() && !isEmptyValue(val) { - transformed["cidrRanges"] = transformedCidrRanges - } - - transformedDirection, err := expandComputePacketMirroringFilterDirection(original["direction"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDirection); val.IsValid() && !isEmptyValue(val) { - transformed["direction"] = transformedDirection - } - - return transformed, nil -} - -func expandComputePacketMirroringFilterIpProtocols(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringFilterCidrRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringFilterDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringMirroredResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSubnetworks, err := expandComputePacketMirroringMirroredResourcesSubnetworks(original["subnetworks"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubnetworks); val.IsValid() && !isEmptyValue(val) { - transformed["subnetworks"] = transformedSubnetworks - } - - transformedInstances, err := expandComputePacketMirroringMirroredResourcesInstances(original["instances"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { - transformed["instances"] = transformedInstances - } - - transformedTags, err := expandComputePacketMirroringMirroredResourcesTags(original["tags"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTags); val.IsValid() && !isEmptyValue(val) { - transformed["tags"] = transformedTags - } - - return transformed, nil -} - -func expandComputePacketMirroringMirroredResourcesSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandComputePacketMirroringMirroredResourcesSubnetworksUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputePacketMirroringMirroredResourcesSubnetworksUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for url: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputePacketMirroringMirroredResourcesInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandComputePacketMirroringMirroredResourcesInstancesUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputePacketMirroringMirroredResourcesInstancesUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("instances", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for url: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputePacketMirroringMirroredResourcesTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_per_instance_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_per_instance_config.go deleted file mode 100644 index cfdc9be504..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_per_instance_config.go +++ /dev/null @@ -1,725 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputePerInstanceConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceComputePerInstanceConfigCreate, - Read: resourceComputePerInstanceConfigRead, - Update: resourceComputePerInstanceConfigUpdate, - Delete: resourceComputePerInstanceConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputePerInstanceConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "instance_group_manager": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The instance group manager this instance config is part of.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name for this per-instance config and its corresponding instance.`, - }, - "preserved_state": { - Type: schema.TypeList, - Optional: true, - Description: `The preserved state for this instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk": { - Type: schema.TypeSet, - Optional: true, - Description: `Stateful disks for the instance.`, - Elem: computePerInstanceConfigPreservedStateDiskSchema(), - // Default schema.HashSchema is used. - }, - "metadata": { - Type: schema.TypeMap, - Optional: true, - Description: `Preserved metadata defined for this instance. This is a list of key->value pairs.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "zone": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Zone where the containing instance group manager is located`, - }, - "minimal_action": { - Type: schema.TypeString, - Optional: true, - Default: "NONE", - Description: `The minimal action to perform on the instance during an update. -Default is 'NONE'. Possible values are: -* REPLACE -* RESTART -* REFRESH -* NONE`, - }, - "most_disruptive_allowed_action": { - Type: schema.TypeString, - Optional: true, - Default: "REPLACE", - Description: `The most disruptive action to perform on the instance during an update. -Default is 'REPLACE'. Possible values are: -* REPLACE -* RESTART -* REFRESH -* NONE`, - }, - "remove_instance_state_on_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `When true, deleting this config will immediately remove any specified state from the underlying instance. -When false, deleting this config will *not* immediately remove any state from the underlying instance. -State will be removed on the next instance recreation or update.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func computePerInstanceConfigPreservedStateDiskSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, - Required: true, - Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance.`, - }, - "source": { - Type: schema.TypeString, - Required: true, - Description: `The URI of an existing persistent disk to attach under the specified device-name in the format -'projects/project-id/zones/zone/disks/disk-name'.`, - }, - "delete_rule": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION", ""}), - Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. -The available options are 'NEVER' and 'ON_PERMANENT_INSTANCE_DELETION'. -'NEVER' - detach the disk when the VM is deleted, but do not delete the disk. -'ON_PERMANENT_INSTANCE_DELETION' will delete the stateful disk when the VM is permanently -deleted from the instance group. Default value: "NEVER" Possible values: ["NEVER", "ON_PERMANENT_INSTANCE_DELETION"]`, - Default: "NEVER", - }, - "mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"READ_ONLY", "READ_WRITE", ""}), - Description: `The mode of the disk. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"]`, - Default: "READ_WRITE", - }, - }, - } -} - -func resourceComputePerInstanceConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - preservedStateProp, err := expandNestedComputePerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preserved_state"); !isEmptyValue(reflect.ValueOf(preservedStateProp)) && (ok || !reflect.DeepEqual(v, preservedStateProp)) { - obj["preservedState"] = preservedStateProp - } - - obj, err = resourceComputePerInstanceConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/createInstances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new PerInstanceConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating PerInstanceConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{instance_group_manager}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating PerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create PerInstanceConfig: %s", err) - } - - log.Printf("[DEBUG] Finished creating PerInstanceConfig %q: %#v", d.Id(), res) - - return resourceComputePerInstanceConfigRead(d, meta) -} - -func resourceComputePerInstanceConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/listPerInstanceConfigs") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputePerInstanceConfig %q", d.Id())) - } - - res, err = flattenNestedComputePerInstanceConfig(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputePerInstanceConfig because it couldn't be matched.") - d.SetId("") - return nil - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("minimal_action"); !ok { - if err := d.Set("minimal_action", "NONE"); err != nil { - return fmt.Errorf("Error setting minimal_action: %s", err) - } - } - if _, ok := d.GetOkExists("most_disruptive_allowed_action"); !ok { - if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { - return fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) - } - } - if _, ok := d.GetOkExists("remove_instance_state_on_destroy"); !ok { - if err := d.Set("remove_instance_state_on_destroy", false); err != nil { - return fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading PerInstanceConfig: %s", err) - } - - if err := d.Set("name", flattenNestedComputePerInstanceConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading PerInstanceConfig: %s", err) - } - if err := d.Set("preserved_state", flattenNestedComputePerInstanceConfigPreservedState(res["preservedState"], d, config)); err != nil { - return fmt.Errorf("Error reading PerInstanceConfig: %s", err) - } - - return nil -} - -func resourceComputePerInstanceConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - preservedStateProp, err := expandNestedComputePerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preserved_state"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, preservedStateProp)) { - obj["preservedState"] = preservedStateProp - } - - obj, err = resourceComputePerInstanceConfigUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/updatePerInstanceConfigs") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating PerInstanceConfig %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating PerInstanceConfig %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating PerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - // Instance name in applyUpdatesToInstances request must include zone - instanceName, err := replaceVars(d, config, "zones/{{zone}}/instances/{{name}}") - if err != nil { - return err - } - - obj = make(map[string]interface{}) - obj["instances"] = []string{instanceName} - - minAction := d.Get("minimal_action") - if minAction == "" { - minAction = "NONE" - } - obj["minimalAction"] = minAction - - mostDisruptiveAction := d.Get("most_disruptive_allowed_action") - if isEmptyValue(reflect.ValueOf(mostDisruptiveAction)) { - mostDisruptiveAction = "REPLACE" - } - obj["mostDisruptiveAllowedAction"] = mostDisruptiveAction - - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/applyUpdatesToInstances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) - res, err = SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) - } - - err = ComputeOperationWaitTime( - config, res, project, "Applying update to PerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - return resourceComputePerInstanceConfigRead(d, meta) -} - -func resourceComputePerInstanceConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/deletePerInstanceConfigs") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = map[string]interface{}{ - "names": [1]string{d.Get("name").(string)}, - } - log.Printf("[DEBUG] Deleting PerInstanceConfig %q", d.Id()) - - res, err := SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "PerInstanceConfig") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting PerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - // Potentially delete the state managed by this config - if d.Get("remove_instance_state_on_destroy").(bool) { - // Instance name in applyUpdatesToInstances request must include zone - instanceName, err := replaceVars(d, config, "zones/{{zone}}/instances/{{name}}") - if err != nil { - return err - } - - obj = make(map[string]interface{}) - obj["instances"] = []string{instanceName} - - // The deletion must be applied to the instance after the PerInstanceConfig is deleted - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/applyUpdatesToInstances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) - res, err = SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) - } - - err = ComputeOperationWaitTime( - config, res, project, "Applying update to PerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) - } - - // PerInstanceConfig goes into "DELETING" state while the instance is actually deleted - err = PollingWaitTime(resourceComputePerInstanceConfigPollRead(d, meta), PollCheckInstanceConfigDeleted, "Deleting PerInstanceConfig", d.Timeout(schema.TimeoutDelete), 1) - if err != nil { - return fmt.Errorf("Error waiting for delete on PerInstanceConfig %q: %s", d.Id(), err) - } - } - - log.Printf("[DEBUG] Finished deleting PerInstanceConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceComputePerInstanceConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{instance_group_manager}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("minimal_action", "NONE"); err != nil { - return nil, fmt.Errorf("Error setting minimal_action: %s", err) - } - if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { - return nil, fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) - } - if err := d.Set("remove_instance_state_on_destroy", false); err != nil { - return nil, fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputePerInstanceConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputePerInstanceConfigPreservedState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["metadata"] = - flattenNestedComputePerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) - transformed["disk"] = - flattenNestedComputePerInstanceConfigPreservedStateDisk(original["disks"], d, config) - return []interface{}{transformed} -} -func flattenNestedComputePerInstanceConfigPreservedStateMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputePerInstanceConfigPreservedStateDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - disks := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(disks)) - for devName, deleteRuleRaw := range disks { - diskObj := deleteRuleRaw.(map[string]interface{}) - source, err := getRelativePath(diskObj["source"].(string)) - if err != nil { - source = diskObj["source"].(string) - } - transformed = append(transformed, map[string]interface{}{ - "device_name": devName, - "delete_rule": diskObj["autoDelete"], - "source": source, - "mode": diskObj["mode"], - }) - } - return transformed -} - -func expandNestedComputePerInstanceConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputePerInstanceConfigPreservedState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMetadata, err := expandNestedComputePerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !isEmptyValue(val) { - transformed["metadata"] = transformedMetadata - } - - transformedDisk, err := expandNestedComputePerInstanceConfigPreservedStateDisk(original["disk"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDisk); val.IsValid() && !isEmptyValue(val) { - transformed["disks"] = transformedDisk - } - - return transformed, nil -} - -func expandNestedComputePerInstanceConfigPreservedStateMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNestedComputePerInstanceConfigPreservedStateDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - l := v.(*schema.Set).List() - req := make(map[string]interface{}) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - deviceName := original["device_name"].(string) - diskObj := make(map[string]interface{}) - deleteRule := original["delete_rule"].(string) - if deleteRule != "" { - diskObj["autoDelete"] = deleteRule - } - source := original["source"] - if source != "" { - diskObj["source"] = source - } - mode := original["mode"] - if source != "" { - diskObj["mode"] = mode - } - req[deviceName] = diskObj - } - return req, nil -} - -func resourceComputePerInstanceConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - wrappedReq := map[string]interface{}{ - "instances": []interface{}{obj}, - } - return wrappedReq, nil -} - -func resourceComputePerInstanceConfigUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // updates and creates use different wrapping object names - wrappedReq := map[string]interface{}{ - "perInstanceConfigs": []interface{}{obj}, - } - return wrappedReq, nil -} - -func flattenNestedComputePerInstanceConfig(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["items"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value items. Actual value: %v", v) - } - - _, item, err := resourceComputePerInstanceConfigFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputePerInstanceConfigFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputePerInstanceConfigName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedComputePerInstanceConfigName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_disk.go deleted file mode 100644 index 5b149910ff..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_disk.go +++ /dev/null @@ -1,1172 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/googleapi" -) - -func ResourceComputeRegionDisk() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionDiskCreate, - Read: resourceComputeRegionDiskRead, - Update: resourceComputeRegionDiskUpdate, - Delete: resourceComputeRegionDiskDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRegionDiskImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: customdiff.All( - customdiff.ForceNewIfChange("size", isDiskShrinkage)), - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "replica_zones": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `URLs of the zones where the disk should be replicated to.`, - MinItems: 2, - MaxItems: 2, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "disk_encryption_key": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Encrypts the disk using a customer-supplied encryption key. - -After you encrypt a disk with a customer-supplied key, you must -provide the same key if you use the disk later (e.g. to create a disk -snapshot or an image, or to attach the disk to a virtual machine). - -Customer-supplied encryption keys do not protect access to metadata of -the disk. - -If you do not provide an encryption key when creating the disk, then -the disk will be encrypted using an automatically generated key and -you do not need to provide a key to use the disk later.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the encryption key that is stored in Google Cloud KMS.`, - }, - "raw_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - Sensitive: true, - }, - "sha256": { - Type: schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels to apply to this disk. A list of key->value pairs.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "physical_block_size_bytes": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Physical block size of the persistent disk, in bytes. If not present -in a request, a default value is used. Currently supported sizes -are 4096 and 16384, other sizes may be added in the future. -If an unsupported value is requested, the error message will list -the supported values for the caller's project.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the region where the disk resides.`, - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Size of the persistent disk, specified in GB. You can specify this -field when creating a persistent disk using the sourceImage or -sourceSnapshot parameter, or specify it alone to create an empty -persistent disk. - -If you specify this field along with sourceImage or sourceSnapshot, -the value of sizeGb must not be less than the size of the sourceImage -or the size of the snapshot.`, - }, - "snapshot": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The source snapshot used to create this disk. You can provide this as -a partial or full URL to the resource. For example, the following are -valid values: - -* 'https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot' -* 'projects/project/global/snapshots/snapshot' -* 'global/snapshots/snapshot' -* 'snapshot'`, - }, - "source_disk": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: sourceDiskDiffSupress, - Description: `The source disk used to create this disk. You can provide this as a partial or full URL to the resource. -For example, the following are valid values: - -* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk} -* https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk} -* projects/{project}/zones/{zone}/disks/{disk} -* projects/{project}/regions/{region}/disks/{disk} -* zones/{zone}/disks/{disk} -* regions/{region}/disks/{disk}`, - }, - "source_snapshot_encryption_key": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the source snapshot. Required -if the source snapshot is protected by a customer-supplied encryption -key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "raw_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - }, - "sha256": { - Type: schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the disk type resource describing which disk type to use to -create the disk. Provide this when creating the disk.`, - Default: "pd-standard", - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "label_fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `The fingerprint used for optimistic locking of this resource. Used -internally during updates.`, - }, - "last_attach_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Last attach timestamp in RFC3339 text format.`, - }, - "last_detach_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Last detach timestamp in RFC3339 text format.`, - }, - "source_disk_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID value of the disk used to create this image. This value may -be used to determine whether the image was taken from the current -or a previous instance of a given disk name.`, - }, - "source_snapshot_id": { - Type: schema.TypeString, - Computed: true, - Description: `The unique ID of the snapshot used to create this disk. This value -identifies the exact snapshot that was used to create this persistent -disk. For example, if you created the persistent disk from a snapshot -that was later deleted and recreated under the same name, the source -snapshot ID would identify the exact version of the snapshot that was -used.`, - }, - "users": { - Type: schema.TypeList, - Computed: true, - Description: `Links to the users of the disk (attached instances) in form: -project/zones/zone/instances/instance`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionDiskCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelFingerprintProp, err := expandComputeRegionDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - descriptionProp, err := expandComputeRegionDiskDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandComputeRegionDiskLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - nameProp, err := expandComputeRegionDiskName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - sizeGbProp, err := expandComputeRegionDiskSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); !isEmptyValue(reflect.ValueOf(sizeGbProp)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) { - obj["sizeGb"] = sizeGbProp - } - physicalBlockSizeBytesProp, err := expandComputeRegionDiskPhysicalBlockSizeBytes(d.Get("physical_block_size_bytes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("physical_block_size_bytes"); !isEmptyValue(reflect.ValueOf(physicalBlockSizeBytesProp)) && (ok || !reflect.DeepEqual(v, physicalBlockSizeBytesProp)) { - obj["physicalBlockSizeBytes"] = physicalBlockSizeBytesProp - } - replicaZonesProp, err := expandComputeRegionDiskReplicaZones(d.Get("replica_zones"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("replica_zones"); !isEmptyValue(reflect.ValueOf(replicaZonesProp)) && (ok || !reflect.DeepEqual(v, replicaZonesProp)) { - obj["replicaZones"] = replicaZonesProp - } - typeProp, err := expandComputeRegionDiskType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - sourceDiskProp, err := expandComputeRegionDiskSourceDisk(d.Get("source_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { - obj["sourceDisk"] = sourceDiskProp - } - regionProp, err := expandComputeRegionDiskRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - diskEncryptionKeyProp, err := expandComputeRegionDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_encryption_key"); !isEmptyValue(reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyProp)) { - obj["diskEncryptionKey"] = diskEncryptionKeyProp - } - sourceSnapshotProp, err := expandComputeRegionDiskSnapshot(d.Get("snapshot"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("snapshot"); !isEmptyValue(reflect.ValueOf(sourceSnapshotProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) { - obj["sourceSnapshot"] = sourceSnapshotProp - } - sourceSnapshotEncryptionKeyProp, err := expandComputeRegionDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_snapshot_encryption_key"); !isEmptyValue(reflect.ValueOf(sourceSnapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotEncryptionKeyProp)) { - obj["sourceSnapshotEncryptionKey"] = sourceSnapshotEncryptionKeyProp - } - - obj, err = resourceComputeRegionDiskEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RegionDisk: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionDisk: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RegionDisk: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RegionDisk", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RegionDisk: %s", err) - } - - log.Printf("[DEBUG] Finished creating RegionDisk %q: %#v", d.Id(), res) - - return resourceComputeRegionDiskRead(d, meta) -} - -func resourceComputeRegionDiskRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionDisk: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionDisk %q", d.Id())) - } - - res, err = resourceComputeRegionDiskDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ComputeRegionDisk because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - - if err := d.Set("label_fingerprint", flattenComputeRegionDiskLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeRegionDiskCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("description", flattenComputeRegionDiskDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("last_attach_timestamp", flattenComputeRegionDiskLastAttachTimestamp(res["lastAttachTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("last_detach_timestamp", flattenComputeRegionDiskLastDetachTimestamp(res["lastDetachTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("labels", flattenComputeRegionDiskLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("name", flattenComputeRegionDiskName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("size", flattenComputeRegionDiskSize(res["sizeGb"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("users", flattenComputeRegionDiskUsers(res["users"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("physical_block_size_bytes", flattenComputeRegionDiskPhysicalBlockSizeBytes(res["physicalBlockSizeBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("replica_zones", flattenComputeRegionDiskReplicaZones(res["replicaZones"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("type", flattenComputeRegionDiskType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("source_disk", flattenComputeRegionDiskSourceDisk(res["sourceDisk"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("source_disk_id", flattenComputeRegionDiskSourceDiskId(res["sourceDiskId"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("region", flattenComputeRegionDiskRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("disk_encryption_key", flattenComputeRegionDiskDiskEncryptionKey(res["diskEncryptionKey"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("snapshot", flattenComputeRegionDiskSnapshot(res["sourceSnapshot"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("source_snapshot_encryption_key", flattenComputeRegionDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("source_snapshot_id", flattenComputeRegionDiskSourceSnapshotId(res["sourceSnapshotId"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading RegionDisk: %s", err) - } - - return nil -} - -func resourceComputeRegionDiskUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionDisk: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("label_fingerprint") || d.HasChange("labels") { - obj := make(map[string]interface{}) - - labelFingerprintProp, err := expandComputeRegionDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - labelsProp, err := expandComputeRegionDiskLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}/setLabels") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating RegionDisk %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating RegionDisk %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating RegionDisk", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("size") { - obj := make(map[string]interface{}) - - sizeGbProp, err := expandComputeRegionDiskSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) { - obj["sizeGb"] = sizeGbProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}/resize") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating RegionDisk %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating RegionDisk %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating RegionDisk", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeRegionDiskRead(d, meta) -} - -func resourceComputeRegionDiskDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionDisk: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - readRes, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id())) - } - - // if disks are attached to instances, they must be detached before the disk can be deleted - if v, ok := readRes["users"].([]interface{}); ok { - type detachArgs struct{ project, zone, instance, deviceName string } - var detachCalls []detachArgs - - for _, instance := range convertStringArr(v) { - self := d.Get("self_link").(string) - instanceProject, instanceZone, instanceName, err := GetLocationalResourcePropertiesFromSelfLinkString(instance) - if err != nil { - return err - } - - i, err := config.NewComputeClient(userAgent).Instances.Get(instanceProject, instanceZone, instanceName).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance) - continue - } - return fmt.Errorf("Error retrieving instance %s: %s", instance, err.Error()) - } - for _, disk := range i.Disks { - if compareSelfLinkOrResourceName("", disk.Source, self, nil) { - detachCalls = append(detachCalls, detachArgs{ - project: instanceProject, - zone: GetResourceNameFromSelfLink(i.Zone), - instance: i.Name, - deviceName: disk.DeviceName, - }) - } - } - } - - for _, call := range detachCalls { - op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do() - if err != nil { - return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project, - call.zone, call.instance, err.Error()) - } - err = ComputeOperationWaitTime(config, op, call.project, - fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance), userAgent, d.Timeout(schema.TimeoutDelete)) - if err != nil { - if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { - log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance) - continue - } - return err - } - } - } - log.Printf("[DEBUG] Deleting RegionDisk %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionDisk") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RegionDisk", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting RegionDisk %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeRegionDiskLabelFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskLastAttachTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskLastDetachTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRegionDiskUsers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeRegionDiskPhysicalBlockSizeBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRegionDiskReplicaZones(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeRegionDiskType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeRegionDiskSourceDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSourceDiskId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeRegionDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeRegionDiskDiskEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeRegionDiskDiskEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_name"] = - flattenComputeRegionDiskDiskEncryptionKeyKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} -func flattenComputeRegionDiskDiskEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskDiskEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskDiskEncryptionKeyKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSnapshot(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionDiskSourceSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeRegionDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) - return []interface{}{transformed} -} -func flattenComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSourceSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSourceSnapshotId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeRegionDiskLabelFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeRegionDiskName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskPhysicalBlockSizeBytes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskReplicaZones(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, fmt.Errorf("Invalid value for replica_zones: nil") - } - f, err := parseGlobalFieldValue("zones", raw.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for replica_zones: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeRegionDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("diskTypes", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for type: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionDiskSourceDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionDiskDiskEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeRegionDiskDiskEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeRegionDiskDiskEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeyName, err := expandComputeRegionDiskDiskEncryptionKeyKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandComputeRegionDiskDiskEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskDiskEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskDiskEncryptionKeyKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskSnapshot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for snapshot: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionDiskSourceSnapshotEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeRegionDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - return transformed, nil -} - -func expandComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskSourceSnapshotEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeRegionDiskEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - if v, ok := d.GetOk("type"); ok { - log.Printf("[DEBUG] Loading disk type: %s", v.(string)) - diskType, err := readRegionDiskType(config, d, v.(string)) - if err != nil { - return nil, fmt.Errorf( - "Error loading disk type '%s': %s", - v.(string), err) - } - - obj["type"] = diskType.RelativeLink() - } - - if v, ok := d.GetOk("image"); ok { - log.Printf("[DEBUG] Resolving image name: %s", v.(string)) - imageUrl, err := resolveImage(config, project, v.(string), userAgent) - if err != nil { - return nil, fmt.Errorf( - "Error resolving image name '%s': %s", - v.(string), err) - } - - obj["sourceImage"] = imageUrl - log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) - } - - return obj, nil -} - -func resourceComputeRegionDiskDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["diskEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - // The raw key won't be returned, so we need to use the original. - transformed["rawKey"] = d.Get("disk_encryption_key.0.raw_key") - transformed["rsaEncryptedKey"] = d.Get("disk_encryption_key.0.rsa_encrypted_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - // The response for crypto keys often includes the version of the key which needs to be removed - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["diskEncryptionKey"] = transformed - } - - if v, ok := res["sourceImageEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - // The raw key won't be returned, so we need to use the original. - transformed["rawKey"] = d.Get("source_image_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - // The response for crypto keys often includes the version of the key which needs to be removed - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceImageEncryptionKey"] = transformed - } - - if v, ok := res["sourceSnapshotEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - // The raw key won't be returned, so we need to use the original. - transformed["rawKey"] = d.Get("source_snapshot_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - // The response for crypto keys often includes the version of the key which needs to be removed - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceSnapshotEncryptionKey"] = transformed - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_disk_resource_policy_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_disk_resource_policy_attachment.go deleted file mode 100644 index c21ce884f6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_disk_resource_policy_attachment.go +++ /dev/null @@ -1,379 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRegionDiskResourcePolicyAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionDiskResourcePolicyAttachmentCreate, - Read: resourceComputeRegionDiskResourcePolicyAttachmentRead, - Delete: resourceComputeRegionDiskResourcePolicyAttachmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRegionDiskResourcePolicyAttachmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "disk": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the regional disk in which the resource policies are attached to.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource policy to be attached to the disk for scheduling snapshot -creation. Do not specify the self link.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the region where the disk resides.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionDiskResourcePolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRegionDiskResourcePolicyAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceComputeRegionDiskResourcePolicyAttachmentEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}/addResourcePolicies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RegionDiskResourcePolicyAttachment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RegionDiskResourcePolicyAttachment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{disk}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RegionDiskResourcePolicyAttachment", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RegionDiskResourcePolicyAttachment: %s", err) - } - - log.Printf("[DEBUG] Finished creating RegionDiskResourcePolicyAttachment %q: %#v", d.Id(), res) - - return resourceComputeRegionDiskResourcePolicyAttachmentRead(d, meta) -} - -func resourceComputeRegionDiskResourcePolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionDiskResourcePolicyAttachment %q", d.Id())) - } - - res, err = flattenNestedComputeRegionDiskResourcePolicyAttachment(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeRegionDiskResourcePolicyAttachment because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ComputeRegionDiskResourcePolicyAttachment because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RegionDiskResourcePolicyAttachment: %s", err) - } - - if err := d.Set("name", flattenNestedComputeRegionDiskResourcePolicyAttachmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionDiskResourcePolicyAttachment: %s", err) - } - - return nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}/removeResourcePolicies") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = make(map[string]interface{}) - - region, err := getRegion(d, config) - if err != nil { - return err - } - if region == "" { - return fmt.Errorf("region must be non-empty - set in resource or at provider-level") - } - - name, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(name)) && (ok || !reflect.DeepEqual(v, name)) { - obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)} - } - log.Printf("[DEBUG] Deleting RegionDiskResourcePolicyAttachment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionDiskResourcePolicyAttachment") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RegionDiskResourcePolicyAttachment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting RegionDiskResourcePolicyAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{disk}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputeRegionDiskResourcePolicyAttachmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeRegionDiskResourcePolicyAttachmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - if region == "" { - return nil, fmt.Errorf("region must be non-empty - set in resource or at provider-level") - } - - obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, obj["name"])} - delete(obj, "name") - return obj, nil -} - -func flattenNestedComputeRegionDiskResourcePolicyAttachment(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["resourcePolicies"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value resourcePolicies. Actual value: %v", v) - } - - _, item, err := resourceComputeRegionDiskResourcePolicyAttachmentFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeRegionDiskResourcePolicyAttachmentName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeRegionDiskResourcePolicyAttachmentName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - // List response only contains the ID - construct a response object. - item := map[string]interface{}{ - "name": itemRaw, - } - - // Decode list item before comparing. - item, err := resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemName := flattenNestedComputeRegionDiskResourcePolicyAttachmentName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} -func resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - res["name"] = GetResourceNameFromSelfLink(res["name"].(string)) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_endpoint_group.go deleted file mode 100644 index cc396131a3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_endpoint_group.go +++ /dev/null @@ -1,772 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRegionNetworkEndpointGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionNetworkEndpointGroupCreate, - Read: resourceComputeRegionNetworkEndpointGroupRead, - Delete: resourceComputeRegionNetworkEndpointGroupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRegionNetworkEndpointGroupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "region": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the region where the Serverless NEGs Reside.`, - }, - "app_engine": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Only valid when networkEndpointType is "SERVERLESS". -Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "service": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Optional serving service. -The service name must be 1-63 characters long, and comply with RFC1035. -Example value: "default", "my-service".`, - }, - "url_mask": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A template to parse service and version fields from a request URL. -URL mask allows for routing to multiple App Engine services without -having to create multiple Network Endpoint Groups and backend services. - -For example, the request URLs "foo1-dot-appname.appspot.com/v1" and -"foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with -URL mask "-dot-appname.appspot.com/". The URL mask will parse -them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Optional serving version. -The version must be 1-63 characters long, and comply with RFC1035. -Example value: "v1", "v2".`, - }, - }, - }, - ConflictsWith: []string{"cloud_run", "cloud_function"}, - }, - "cloud_function": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Only valid when networkEndpointType is "SERVERLESS". -Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "function": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A user-defined name of the Cloud Function. -The function name is case-sensitive and must be 1-63 characters long. -Example value: "func1".`, - AtLeastOneOf: []string{"cloud_function.0.function", "cloud_function.0.url_mask"}, - }, - "url_mask": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A template to parse function field from a request URL. URL mask allows -for routing to multiple Cloud Functions without having to create -multiple Network Endpoint Groups and backend services. - -For example, request URLs "mydomain.com/function1" and "mydomain.com/function2" -can be backed by the same Serverless NEG with URL mask "/". The URL mask -will parse them to { function = "function1" } and { function = "function2" } respectively.`, - AtLeastOneOf: []string{"cloud_function.0.function", "cloud_function.0.url_mask"}, - }, - }, - }, - ConflictsWith: []string{"cloud_run", "app_engine"}, - }, - "cloud_run": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Only valid when networkEndpointType is "SERVERLESS". -Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "service": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Cloud Run service is the main resource of Cloud Run. -The service must be 1-63 characters long, and comply with RFC1035. -Example value: "run-service".`, - AtLeastOneOf: []string{"cloud_run.0.service", "cloud_run.0.url_mask"}, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Cloud Run tag represents the "named-revision" to provide -additional fine-grained traffic routing information. -The tag must be 1-63 characters long, and comply with RFC1035. -Example value: "revision-0010".`, - }, - "url_mask": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A template to parse service and tag fields from a request URL. -URL mask allows for routing to multiple Run services without having -to create multiple network endpoint groups and backend services. - -For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" -an be backed by the same Serverless Network Endpoint Group (NEG) with -URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } -and { service="bar2", tag="foo2" } respectively.`, - AtLeastOneOf: []string{"cloud_run.0.service", "cloud_run.0.url_mask"}, - }, - }, - }, - ConflictsWith: []string{"cloud_function", "app_engine"}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "network": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `This field is only used for PSC. -The URL of the network to which all network endpoints in the NEG belong. Uses -"default" project network if unspecified.`, - }, - "network_endpoint_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"SERVERLESS", "PRIVATE_SERVICE_CONNECT", ""}), - Description: `Type of network endpoints in this network endpoint group. Defaults to SERVERLESS Default value: "SERVERLESS" Possible values: ["SERVERLESS", "PRIVATE_SERVICE_CONNECT"]`, - Default: "SERVERLESS", - }, - "psc_target_service": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The target service url used to set up private service connection to -a Google API or a PSC Producer Service Attachment.`, - }, - "subnetwork": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `This field is only used for PSC. -Optional URL of the subnetwork to which all network endpoints in the NEG belong.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionNetworkEndpointGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeRegionNetworkEndpointGroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeRegionNetworkEndpointGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - networkEndpointTypeProp, err := expandComputeRegionNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_endpoint_type"); !isEmptyValue(reflect.ValueOf(networkEndpointTypeProp)) && (ok || !reflect.DeepEqual(v, networkEndpointTypeProp)) { - obj["networkEndpointType"] = networkEndpointTypeProp - } - pscTargetServiceProp, err := expandComputeRegionNetworkEndpointGroupPscTargetService(d.Get("psc_target_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("psc_target_service"); !isEmptyValue(reflect.ValueOf(pscTargetServiceProp)) && (ok || !reflect.DeepEqual(v, pscTargetServiceProp)) { - obj["pscTargetService"] = pscTargetServiceProp - } - networkProp, err := expandComputeRegionNetworkEndpointGroupNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - subnetworkProp, err := expandComputeRegionNetworkEndpointGroupSubnetwork(d.Get("subnetwork"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnetwork"); !isEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { - obj["subnetwork"] = subnetworkProp - } - cloudRunProp, err := expandComputeRegionNetworkEndpointGroupCloudRun(d.Get("cloud_run"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_run"); !isEmptyValue(reflect.ValueOf(cloudRunProp)) && (ok || !reflect.DeepEqual(v, cloudRunProp)) { - obj["cloudRun"] = cloudRunProp - } - appEngineProp, err := expandComputeRegionNetworkEndpointGroupAppEngine(d.Get("app_engine"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine"); !isEmptyValue(reflect.ValueOf(appEngineProp)) && (ok || !reflect.DeepEqual(v, appEngineProp)) { - obj["appEngine"] = appEngineProp - } - cloudFunctionProp, err := expandComputeRegionNetworkEndpointGroupCloudFunction(d.Get("cloud_function"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_function"); !isEmptyValue(reflect.ValueOf(cloudFunctionProp)) && (ok || !reflect.DeepEqual(v, cloudFunctionProp)) { - obj["cloudFunction"] = cloudFunctionProp - } - regionProp, err := expandComputeRegionNetworkEndpointGroupRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RegionNetworkEndpointGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RegionNetworkEndpointGroup: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RegionNetworkEndpointGroup", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RegionNetworkEndpointGroup: %s", err) - } - - log.Printf("[DEBUG] Finished creating RegionNetworkEndpointGroup %q: %#v", d.Id(), res) - - return resourceComputeRegionNetworkEndpointGroupRead(d, meta) -} - -func resourceComputeRegionNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionNetworkEndpointGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - - if err := d.Set("name", flattenComputeRegionNetworkEndpointGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("description", flattenComputeRegionNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("network_endpoint_type", flattenComputeRegionNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("psc_target_service", flattenComputeRegionNetworkEndpointGroupPscTargetService(res["pscTargetService"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("network", flattenComputeRegionNetworkEndpointGroupNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("subnetwork", flattenComputeRegionNetworkEndpointGroupSubnetwork(res["subnetwork"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("cloud_run", flattenComputeRegionNetworkEndpointGroupCloudRun(res["cloudRun"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("app_engine", flattenComputeRegionNetworkEndpointGroupAppEngine(res["appEngine"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("cloud_function", flattenComputeRegionNetworkEndpointGroupCloudFunction(res["cloudFunction"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("region", flattenComputeRegionNetworkEndpointGroupRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - - return nil -} - -func resourceComputeRegionNetworkEndpointGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting RegionNetworkEndpointGroup %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionNetworkEndpointGroup") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RegionNetworkEndpointGroup", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting RegionNetworkEndpointGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionNetworkEndpointGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeRegionNetworkEndpointGroupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupNetworkEndpointType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupPscTargetService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionNetworkEndpointGroupSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service"] = - flattenComputeRegionNetworkEndpointGroupCloudRunService(original["service"], d, config) - transformed["tag"] = - flattenComputeRegionNetworkEndpointGroupCloudRunTag(original["tag"], d, config) - transformed["url_mask"] = - flattenComputeRegionNetworkEndpointGroupCloudRunUrlMask(original["urlMask"], d, config) - return []interface{}{transformed} -} -func flattenComputeRegionNetworkEndpointGroupCloudRunService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudRunTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudRunUrlMask(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupAppEngine(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["service"] = - flattenComputeRegionNetworkEndpointGroupAppEngineService(original["service"], d, config) - transformed["version"] = - flattenComputeRegionNetworkEndpointGroupAppEngineVersion(original["version"], d, config) - transformed["url_mask"] = - flattenComputeRegionNetworkEndpointGroupAppEngineUrlMask(original["urlMask"], d, config) - return []interface{}{transformed} -} -func flattenComputeRegionNetworkEndpointGroupAppEngineService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupAppEngineVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupAppEngineUrlMask(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudFunction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["function"] = - flattenComputeRegionNetworkEndpointGroupCloudFunctionFunction(original["function"], d, config) - transformed["url_mask"] = - flattenComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(original["urlMask"], d, config) - return []interface{}{transformed} -} -func flattenComputeRegionNetworkEndpointGroupCloudFunctionFunction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeRegionNetworkEndpointGroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupNetworkEndpointType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupPscTargetService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionNetworkEndpointGroupSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandComputeRegionNetworkEndpointGroupCloudRunService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedTag, err := expandComputeRegionNetworkEndpointGroupCloudRunTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupCloudRunUrlMask(original["url_mask"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrlMask); val.IsValid() && !isEmptyValue(val) { - transformed["urlMask"] = transformedUrlMask - } - - return transformed, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudRunService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudRunTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudRunUrlMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupAppEngine(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandComputeRegionNetworkEndpointGroupAppEngineService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedVersion, err := expandComputeRegionNetworkEndpointGroupAppEngineVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupAppEngineUrlMask(original["url_mask"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrlMask); val.IsValid() && !isEmptyValue(val) { - transformed["urlMask"] = transformedUrlMask - } - - return transformed, nil -} - -func expandComputeRegionNetworkEndpointGroupAppEngineService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupAppEngineVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupAppEngineUrlMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudFunction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFunction, err := expandComputeRegionNetworkEndpointGroupCloudFunctionFunction(original["function"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFunction); val.IsValid() && !isEmptyValue(val) { - transformed["function"] = transformedFunction - } - - transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(original["url_mask"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrlMask); val.IsValid() && !isEmptyValue(val) { - transformed["urlMask"] = transformedUrlMask - } - - return transformed, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudFunctionFunction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_per_instance_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_per_instance_config.go deleted file mode 100644 index 4ac8b40724..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_per_instance_config.go +++ /dev/null @@ -1,735 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRegionPerInstanceConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionPerInstanceConfigCreate, - Read: resourceComputeRegionPerInstanceConfigRead, - Update: resourceComputeRegionPerInstanceConfigUpdate, - Delete: resourceComputeRegionPerInstanceConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRegionPerInstanceConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name for this per-instance config and its corresponding instance.`, - }, - "region_instance_group_manager": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region instance group manager this instance config is part of.`, - }, - "preserved_state": { - Type: schema.TypeList, - Optional: true, - Description: `The preserved state for this instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disk": { - Type: schema.TypeSet, - Optional: true, - Description: `Stateful disks for the instance.`, - Elem: computeRegionPerInstanceConfigPreservedStateDiskSchema(), - // Default schema.HashSchema is used. - }, - "metadata": { - Type: schema.TypeMap, - Optional: true, - Description: `Preserved metadata defined for this instance. This is a list of key->value pairs.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the containing instance group manager is located`, - }, - "minimal_action": { - Type: schema.TypeString, - Optional: true, - Default: "NONE", - Description: `The minimal action to perform on the instance during an update. -Default is 'NONE'. Possible values are: -* REPLACE -* RESTART -* REFRESH -* NONE`, - }, - "most_disruptive_allowed_action": { - Type: schema.TypeString, - Optional: true, - Default: "REPLACE", - Description: `The most disruptive action to perform on the instance during an update. -Default is 'REPLACE'. Possible values are: -* REPLACE -* RESTART -* REFRESH -* NONE`, - }, - "remove_instance_state_on_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `When true, deleting this config will immediately remove any specified state from the underlying instance. -When false, deleting this config will *not* immediately remove any state from the underlying instance. -State will be removed on the next instance recreation or update.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeRegionPerInstanceConfigPreservedStateDiskSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, - Required: true, - Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance.`, - }, - "source": { - Type: schema.TypeString, - Required: true, - Description: `The URI of an existing persistent disk to attach under the specified device-name in the format -'projects/project-id/zones/zone/disks/disk-name'.`, - }, - "delete_rule": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION", ""}), - Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. -The available options are 'NEVER' and 'ON_PERMANENT_INSTANCE_DELETION'. -'NEVER' - detach the disk when the VM is deleted, but do not delete the disk. -'ON_PERMANENT_INSTANCE_DELETION' will delete the stateful disk when the VM is permanently -deleted from the instance group. Default value: "NEVER" Possible values: ["NEVER", "ON_PERMANENT_INSTANCE_DELETION"]`, - Default: "NEVER", - }, - "mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"READ_ONLY", "READ_WRITE", ""}), - Description: `The mode of the disk. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"]`, - Default: "READ_WRITE", - }, - }, - } -} - -func resourceComputeRegionPerInstanceConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - preservedStateProp, err := expandNestedComputeRegionPerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preserved_state"); !isEmptyValue(reflect.ValueOf(preservedStateProp)) && (ok || !reflect.DeepEqual(v, preservedStateProp)) { - obj["preservedState"] = preservedStateProp - } - - obj, err = resourceComputeRegionPerInstanceConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/createInstances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RegionPerInstanceConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RegionPerInstanceConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RegionPerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RegionPerInstanceConfig: %s", err) - } - - log.Printf("[DEBUG] Finished creating RegionPerInstanceConfig %q: %#v", d.Id(), res) - - return resourceComputeRegionPerInstanceConfigRead(d, meta) -} - -func resourceComputeRegionPerInstanceConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listPerInstanceConfigs") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionPerInstanceConfig %q", d.Id())) - } - - res, err = flattenNestedComputeRegionPerInstanceConfig(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeRegionPerInstanceConfig because it couldn't be matched.") - d.SetId("") - return nil - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("minimal_action"); !ok { - if err := d.Set("minimal_action", "NONE"); err != nil { - return fmt.Errorf("Error setting minimal_action: %s", err) - } - } - if _, ok := d.GetOkExists("most_disruptive_allowed_action"); !ok { - if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { - return fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) - } - } - if _, ok := d.GetOkExists("remove_instance_state_on_destroy"); !ok { - if err := d.Set("remove_instance_state_on_destroy", false); err != nil { - return fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) - } - - if err := d.Set("name", flattenNestedComputeRegionPerInstanceConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) - } - if err := d.Set("preserved_state", flattenNestedComputeRegionPerInstanceConfigPreservedState(res["preservedState"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) - } - - return nil -} - -func resourceComputeRegionPerInstanceConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - preservedStateProp, err := expandNestedComputeRegionPerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preserved_state"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, preservedStateProp)) { - obj["preservedState"] = preservedStateProp - } - - obj, err = resourceComputeRegionPerInstanceConfigUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/updatePerInstanceConfigs") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating RegionPerInstanceConfig %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating RegionPerInstanceConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating RegionPerInstanceConfig %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating RegionPerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - // Instance name in applyUpdatesToInstances request must include zone - instanceName, err := findInstanceName(d, config) - if err != nil { - return err - } - - obj = make(map[string]interface{}) - obj["instances"] = []string{instanceName} - - minAction := d.Get("minimal_action") - if minAction == "" { - minAction = "NONE" - } - obj["minimalAction"] = minAction - - mostDisruptiveAction := d.Get("most_disruptive_allowed_action") - if isEmptyValue(reflect.ValueOf(mostDisruptiveAction)) { - mostDisruptiveAction = "REPLACE" - } - obj["mostDisruptiveAllowedAction"] = mostDisruptiveAction - - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/applyUpdatesToInstances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) - res, err = SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) - } - - err = ComputeOperationWaitTime( - config, res, project, "Applying update to PerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - return resourceComputeRegionPerInstanceConfigRead(d, meta) -} - -func resourceComputeRegionPerInstanceConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/deletePerInstanceConfigs") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = map[string]interface{}{ - "names": [1]string{d.Get("name").(string)}, - } - log.Printf("[DEBUG] Deleting RegionPerInstanceConfig %q", d.Id()) - - res, err := SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionPerInstanceConfig") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RegionPerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - // Potentially delete the state managed by this config - if d.Get("remove_instance_state_on_destroy").(bool) { - // Instance name in applyUpdatesToInstances request must include zone - instanceName, err := findInstanceName(d, config) - if err != nil { - return err - } - - obj = make(map[string]interface{}) - obj["instances"] = []string{instanceName} - - // Updates must be applied to the instance after deleting the PerInstanceConfig - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/applyUpdatesToInstances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) - res, err = SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) - } - - err = ComputeOperationWaitTime( - config, res, project, "Applying update to PerInstanceConfig", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) - } - - // RegionPerInstanceConfig goes into "DELETING" state while the instance is actually deleted - err = PollingWaitTime(resourceComputeRegionPerInstanceConfigPollRead(d, meta), PollCheckInstanceConfigDeleted, "Deleting RegionPerInstanceConfig", d.Timeout(schema.TimeoutDelete), 1) - if err != nil { - return fmt.Errorf("Error waiting for delete on RegionPerInstanceConfig %q: %s", d.Id(), err) - } - } - - log.Printf("[DEBUG] Finished deleting RegionPerInstanceConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionPerInstanceConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("minimal_action", "NONE"); err != nil { - return nil, fmt.Errorf("Error setting minimal_action: %s", err) - } - if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { - return nil, fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) - } - if err := d.Set("remove_instance_state_on_destroy", false); err != nil { - return nil, fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputeRegionPerInstanceConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRegionPerInstanceConfigPreservedState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["metadata"] = - flattenNestedComputeRegionPerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) - transformed["disk"] = - flattenNestedComputeRegionPerInstanceConfigPreservedStateDisk(original["disks"], d, config) - return []interface{}{transformed} -} -func flattenNestedComputeRegionPerInstanceConfigPreservedStateMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRegionPerInstanceConfigPreservedStateDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - disks := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(disks)) - for devName, deleteRuleRaw := range disks { - diskObj := deleteRuleRaw.(map[string]interface{}) - source, err := getRelativePath(diskObj["source"].(string)) - if err != nil { - source = diskObj["source"].(string) - } - transformed = append(transformed, map[string]interface{}{ - "device_name": devName, - "delete_rule": diskObj["autoDelete"], - "source": source, - "mode": diskObj["mode"], - }) - } - return transformed -} - -func expandNestedComputeRegionPerInstanceConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRegionPerInstanceConfigPreservedState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMetadata, err := expandNestedComputeRegionPerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !isEmptyValue(val) { - transformed["metadata"] = transformedMetadata - } - - transformedDisk, err := expandNestedComputeRegionPerInstanceConfigPreservedStateDisk(original["disk"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDisk); val.IsValid() && !isEmptyValue(val) { - transformed["disks"] = transformedDisk - } - - return transformed, nil -} - -func expandNestedComputeRegionPerInstanceConfigPreservedStateMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNestedComputeRegionPerInstanceConfigPreservedStateDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - l := v.(*schema.Set).List() - req := make(map[string]interface{}) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - deviceName := original["device_name"].(string) - diskObj := make(map[string]interface{}) - deleteRule := original["delete_rule"].(string) - if deleteRule != "" { - diskObj["autoDelete"] = deleteRule - } - source := original["source"] - if source != "" { - diskObj["source"] = source - } - mode := original["mode"] - if source != "" { - diskObj["mode"] = mode - } - req[deviceName] = diskObj - } - return req, nil -} - -func resourceComputeRegionPerInstanceConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - wrappedReq := map[string]interface{}{ - "instances": []interface{}{obj}, - } - return wrappedReq, nil -} - -func resourceComputeRegionPerInstanceConfigUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // updates and creates use different wrapping object names - wrappedReq := map[string]interface{}{ - "perInstanceConfigs": []interface{}{obj}, - } - return wrappedReq, nil -} - -func flattenNestedComputeRegionPerInstanceConfig(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["items"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value items. Actual value: %v", v) - } - - _, item, err := resourceComputeRegionPerInstanceConfigFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeRegionPerInstanceConfigFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeRegionPerInstanceConfigName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedComputeRegionPerInstanceConfigName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_ssl_certificate.go deleted file mode 100644 index dc74fa6d7b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_ssl_certificate.go +++ /dev/null @@ -1,433 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRegionSslCertificate() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionSslCertificateCreate, - Read: resourceComputeRegionSslCertificateRead, - Delete: resourceComputeRegionSslCertificateDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRegionSslCertificateImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "certificate": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The certificate in PEM format. -The certificate chain must be no greater than 5 certs long. -The chain must include at least one intermediate cert.`, - Sensitive: true, - }, - "private_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: sha256DiffSuppress, - Description: `The write-only private key in PEM format.`, - Sensitive: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash. - - -These are in the same namespace as the managed SSL certificates.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created regional ssl certificate should reside. -If it is not provided, the provider region is used.`, - }, - "certificate_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "expire_time": { - Type: schema.TypeString, - Computed: true, - Description: `Expire time of the certificate in RFC3339 text format.`, - }, - "name_prefix": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name"}, - Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource - // uuid is 26 characters, limit the prefix to 37. - value := v.(string) - if len(value) > 37 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) - } - return - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - certificateProp, err := expandComputeRegionSslCertificateCertificate(d.Get("certificate"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate"); !isEmptyValue(reflect.ValueOf(certificateProp)) && (ok || !reflect.DeepEqual(v, certificateProp)) { - obj["certificate"] = certificateProp - } - descriptionProp, err := expandComputeRegionSslCertificateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRegionSslCertificateName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - privateKeyProp, err := expandComputeRegionSslCertificatePrivateKey(d.Get("private_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_key"); !isEmptyValue(reflect.ValueOf(privateKeyProp)) && (ok || !reflect.DeepEqual(v, privateKeyProp)) { - obj["privateKey"] = privateKeyProp - } - regionProp, err := expandComputeRegionSslCertificateRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RegionSslCertificate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RegionSslCertificate: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RegionSslCertificate", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RegionSslCertificate: %s", err) - } - - log.Printf("[DEBUG] Finished creating RegionSslCertificate %q: %#v", d.Id(), res) - - return resourceComputeRegionSslCertificateRead(d, meta) -} - -func resourceComputeRegionSslCertificateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionSslCertificate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - - if err := d.Set("certificate", flattenComputeRegionSslCertificateCertificate(res["certificate"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeRegionSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("description", flattenComputeRegionSslCertificateDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("expire_time", flattenComputeRegionSslCertificateExpireTime(res["expireTime"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("certificate_id", flattenComputeRegionSslCertificateCertificateId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("name", flattenComputeRegionSslCertificateName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("region", flattenComputeRegionSslCertificateRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - - return nil -} - -func resourceComputeRegionSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting RegionSslCertificate %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionSslCertificate") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RegionSslCertificate", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting RegionSslCertificate %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionSslCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/sslCertificates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeRegionSslCertificateCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateExpireTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateCertificateId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRegionSslCertificateName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionSslCertificateCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionSslCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionSslCertificateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - var certName string - if v, ok := d.GetOk("name"); ok { - certName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - certName = resource.PrefixedUniqueId(v.(string)) - } else { - certName = resource.UniqueId() - } - - // We need to get the {{name}} into schema to set the ID using ReplaceVars - if err := d.Set("name", certName); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return certName, nil -} - -func expandComputeRegionSslCertificatePrivateKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionSslCertificateRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_http_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_http_proxy.go deleted file mode 100644 index a8e967dd75..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_http_proxy.go +++ /dev/null @@ -1,429 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRegionTargetHttpProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionTargetHttpProxyCreate, - Read: resourceComputeRegionTargetHttpProxyRead, - Update: resourceComputeRegionTargetHttpProxyUpdate, - Delete: resourceComputeRegionTargetHttpProxyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRegionTargetHttpProxyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "url_map": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the RegionUrlMap resource that defines the mapping from URL -to the BackendService.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created target https proxy should reside. -If it is not provided, the provider region is used.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionTargetHttpProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeRegionTargetHttpProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRegionTargetHttpProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - urlMapProp, err := expandComputeRegionTargetHttpProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - regionProp, err := expandComputeRegionTargetHttpProxyRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RegionTargetHttpProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RegionTargetHttpProxy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RegionTargetHttpProxy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RegionTargetHttpProxy: %s", err) - } - - log.Printf("[DEBUG] Finished creating RegionTargetHttpProxy %q: %#v", d.Id(), res) - - return resourceComputeRegionTargetHttpProxyRead(d, meta) -} - -func resourceComputeRegionTargetHttpProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionTargetHttpProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRegionTargetHttpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("description", flattenComputeRegionTargetHttpProxyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeRegionTargetHttpProxyProxyId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("name", flattenComputeRegionTargetHttpProxyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeRegionTargetHttpProxyUrlMap(res["urlMap"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("region", flattenComputeRegionTargetHttpProxyRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - - return nil -} - -func resourceComputeRegionTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("url_map") { - obj := make(map[string]interface{}) - - urlMapProp, err := expandComputeRegionTargetHttpProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}/setUrlMap") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating RegionTargetHttpProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating RegionTargetHttpProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating RegionTargetHttpProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeRegionTargetHttpProxyRead(d, meta) -} - -func resourceComputeRegionTargetHttpProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting RegionTargetHttpProxy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionTargetHttpProxy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RegionTargetHttpProxy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting RegionTargetHttpProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionTargetHttpProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/targetHttpProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeRegionTargetHttpProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpProxyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpProxyProxyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRegionTargetHttpProxyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpProxyUrlMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionTargetHttpProxyRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionTargetHttpProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetHttpProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetHttpProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("urlMaps", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for url_map: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionTargetHttpProxyRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_https_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_https_proxy.go deleted file mode 100644 index a93df95673..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_https_proxy.go +++ /dev/null @@ -1,506 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRegionTargetHttpsProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionTargetHttpsProxyCreate, - Read: resourceComputeRegionTargetHttpsProxyRead, - Update: resourceComputeRegionTargetHttpsProxyUpdate, - Delete: resourceComputeRegionTargetHttpsProxyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRegionTargetHttpsProxyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "ssl_certificates": { - Type: schema.TypeList, - Required: true, - Description: `A list of RegionSslCertificate resources that are used to authenticate -connections between users and the load balancer. Currently, exactly -one SSL certificate must be specified.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "url_map": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the RegionUrlMap resource that defines the mapping from URL -to the RegionBackendService.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created target https proxy should reside. -If it is not provided, the provider region is used.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionTargetHttpsProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeRegionTargetHttpsProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRegionTargetHttpsProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - sslCertificatesProp, err := expandComputeRegionTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(reflect.ValueOf(sslCertificatesProp)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - urlMapProp, err := expandComputeRegionTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - regionProp, err := expandComputeRegionTargetHttpsProxyRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RegionTargetHttpsProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RegionTargetHttpsProxy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RegionTargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RegionTargetHttpsProxy: %s", err) - } - - log.Printf("[DEBUG] Finished creating RegionTargetHttpsProxy %q: %#v", d.Id(), res) - - return resourceComputeRegionTargetHttpsProxyRead(d, meta) -} - -func resourceComputeRegionTargetHttpsProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionTargetHttpsProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRegionTargetHttpsProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("description", flattenComputeRegionTargetHttpsProxyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeRegionTargetHttpsProxyProxyId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("name", flattenComputeRegionTargetHttpsProxyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("ssl_certificates", flattenComputeRegionTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeRegionTargetHttpsProxyUrlMap(res["urlMap"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("region", flattenComputeRegionTargetHttpsProxyRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - - return nil -} - -func resourceComputeRegionTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("ssl_certificates") { - obj := make(map[string]interface{}) - - sslCertificatesProp, err := expandComputeRegionTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslCertificates") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating RegionTargetHttpsProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating RegionTargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating RegionTargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("url_map") { - obj := make(map[string]interface{}) - - urlMapProp, err := expandComputeRegionTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setUrlMap") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating RegionTargetHttpsProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating RegionTargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating RegionTargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeRegionTargetHttpsProxyRead(d, meta) -} - -func resourceComputeRegionTargetHttpsProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting RegionTargetHttpsProxy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionTargetHttpsProxy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RegionTargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting RegionTargetHttpsProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionTargetHttpsProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/targetHttpsProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeRegionTargetHttpsProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpsProxyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpsProxyProxyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRegionTargetHttpsProxyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpsProxySslCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeRegionTargetHttpsProxyUrlMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionTargetHttpsProxyRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionTargetHttpsProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetHttpsProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetHttpsProxySslCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, fmt.Errorf("Invalid value for ssl_certificates: nil") - } - f, err := parseRegionalFieldValue("sslCertificates", raw.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for ssl_certificates: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeRegionTargetHttpsProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("urlMaps", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for url_map: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionTargetHttpsProxyRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_tcp_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_tcp_proxy.go deleted file mode 100644 index 508e0f272b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_target_tcp_proxy.go +++ /dev/null @@ -1,421 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRegionTargetTcpProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRegionTargetTcpProxyCreate, - Read: resourceComputeRegionTargetTcpProxyRead, - Delete: resourceComputeRegionTargetTcpProxyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRegionTargetTcpProxyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "backend_service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the BackendService resource.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_bind": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `This field only applies when the forwarding rule that references -this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - }, - "proxy_header": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), - Description: `Specifies the type of proxy header to append before sending data to -the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created target TCP proxy should reside. -If it is not provided, the provider region is used.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionTargetTcpProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeRegionTargetTcpProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRegionTargetTcpProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - proxyHeaderProp, err := expandComputeRegionTargetTcpProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(reflect.ValueOf(proxyHeaderProp)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - serviceProp, err := expandComputeRegionTargetTcpProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(reflect.ValueOf(serviceProp)) && (ok || !reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - proxyBindProp, err := expandComputeRegionTargetTcpProxyProxyBind(d.Get("proxy_bind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_bind"); !isEmptyValue(reflect.ValueOf(proxyBindProp)) && (ok || !reflect.DeepEqual(v, proxyBindProp)) { - obj["proxyBind"] = proxyBindProp - } - regionProp, err := expandComputeRegionTargetTcpProxyRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetTcpProxies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RegionTargetTcpProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetTcpProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RegionTargetTcpProxy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RegionTargetTcpProxy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RegionTargetTcpProxy: %s", err) - } - - log.Printf("[DEBUG] Finished creating RegionTargetTcpProxy %q: %#v", d.Id(), res) - - return resourceComputeRegionTargetTcpProxyRead(d, meta) -} - -func resourceComputeRegionTargetTcpProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetTcpProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionTargetTcpProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRegionTargetTcpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - if err := d.Set("description", flattenComputeRegionTargetTcpProxyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeRegionTargetTcpProxyProxyId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - if err := d.Set("name", flattenComputeRegionTargetTcpProxyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - if err := d.Set("proxy_header", flattenComputeRegionTargetTcpProxyProxyHeader(res["proxyHeader"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - if err := d.Set("backend_service", flattenComputeRegionTargetTcpProxyBackendService(res["service"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - if err := d.Set("proxy_bind", flattenComputeRegionTargetTcpProxyProxyBind(res["proxyBind"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - if err := d.Set("region", flattenComputeRegionTargetTcpProxyRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) - } - - return nil -} - -func resourceComputeRegionTargetTcpProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RegionTargetTcpProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting RegionTargetTcpProxy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionTargetTcpProxy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RegionTargetTcpProxy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting RegionTargetTcpProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionTargetTcpProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/targetTcpProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeRegionTargetTcpProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetTcpProxyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetTcpProxyProxyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRegionTargetTcpProxyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetTcpProxyProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetTcpProxyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionTargetTcpProxyProxyBind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetTcpProxyRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionTargetTcpProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetTcpProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetTcpProxyProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetTcpProxyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionTargetTcpProxyProxyBind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetTcpProxyRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_route.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_route.go deleted file mode 100644 index 73cd1c13d3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_route.go +++ /dev/null @@ -1,640 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRoute() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRouteCreate, - Read: resourceComputeRouteRead, - Delete: resourceComputeRouteDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRouteImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dest_range": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The destination range of outgoing packets that this route applies to. -Only IPv4 is supported.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z]([-a-z0-9]*[a-z0-9])?$`), - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network that this route applies to.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property -when you create the resource.`, - }, - "next_hop_gateway": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL to a gateway that should handle matching packets. -Currently, you can only specify the internet gateway, using a full or -partial valid URL: -* 'https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway' -* 'projects/project/global/gateways/default-internet-gateway' -* 'global/gateways/default-internet-gateway' -* The string 'default-internet-gateway'.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "next_hop_ilb": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareIpAddressOrSelfLinkOrResourceName, - Description: `The IP address or URL to a forwarding rule of type -loadBalancingScheme=INTERNAL that should handle matching -packets. - -With the GA provider you can only specify the forwarding -rule as a partial or full URL. For example, the following -are all valid values: -* 10.128.0.56 -* https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule -* regions/region/forwardingRules/forwardingRule - -When the beta provider, you can also specify the IP address -of a forwarding rule from the same VPC or any peered VPC. - -Note that this can only be used when the destinationRange is -a public (non-RFC 1918) IP CIDR range.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "next_hop_instance": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL to an instance that should handle matching packets. -You can specify this as a full or partial URL. For example: -* 'https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance' -* 'projects/project/zones/zone/instances/instance' -* 'zones/zone/instances/instance' -* Just the instance name, with the zone in 'next_hop_instance_zone'.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "next_hop_ip": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Network IP address of an instance that should handle matching packets.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "next_hop_vpn_tunnel": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL to a VpnTunnel that should handle matching packets.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "priority": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The priority of this route. Priority is used to break ties in cases -where there is more than one matching route of equal prefix length. - -In the case of two routes with equal prefix length, the one with the -lowest-numbered priority value wins. - -Default value is 1000. Valid range is 0 through 65535.`, - Default: 1000, - }, - "tags": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Description: `A list of instance tags to which this route applies.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "next_hop_network": { - Type: schema.TypeString, - Computed: true, - Description: `URL to a Network that should handle matching packets.`, - }, - "next_hop_instance_zone": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: "The zone of the instance specified in next_hop_instance. Omit if next_hop_instance is specified as a URL.", - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - destRangeProp, err := expandComputeRouteDestRange(d.Get("dest_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dest_range"); !isEmptyValue(reflect.ValueOf(destRangeProp)) && (ok || !reflect.DeepEqual(v, destRangeProp)) { - obj["destRange"] = destRangeProp - } - descriptionProp, err := expandComputeRouteDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRouteName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeRouteNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - priorityProp, err := expandComputeRoutePriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); ok || !reflect.DeepEqual(v, priorityProp) { - obj["priority"] = priorityProp - } - tagsProp, err := expandComputeRouteTags(d.Get("tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tags"); !isEmptyValue(reflect.ValueOf(tagsProp)) && (ok || !reflect.DeepEqual(v, tagsProp)) { - obj["tags"] = tagsProp - } - nextHopGatewayProp, err := expandComputeRouteNextHopGateway(d.Get("next_hop_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_gateway"); !isEmptyValue(reflect.ValueOf(nextHopGatewayProp)) && (ok || !reflect.DeepEqual(v, nextHopGatewayProp)) { - obj["nextHopGateway"] = nextHopGatewayProp - } - nextHopInstanceProp, err := expandComputeRouteNextHopInstance(d.Get("next_hop_instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_instance"); !isEmptyValue(reflect.ValueOf(nextHopInstanceProp)) && (ok || !reflect.DeepEqual(v, nextHopInstanceProp)) { - obj["nextHopInstance"] = nextHopInstanceProp - } - nextHopIpProp, err := expandComputeRouteNextHopIp(d.Get("next_hop_ip"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_ip"); !isEmptyValue(reflect.ValueOf(nextHopIpProp)) && (ok || !reflect.DeepEqual(v, nextHopIpProp)) { - obj["nextHopIp"] = nextHopIpProp - } - nextHopVpnTunnelProp, err := expandComputeRouteNextHopVpnTunnel(d.Get("next_hop_vpn_tunnel"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_vpn_tunnel"); !isEmptyValue(reflect.ValueOf(nextHopVpnTunnelProp)) && (ok || !reflect.DeepEqual(v, nextHopVpnTunnelProp)) { - obj["nextHopVpnTunnel"] = nextHopVpnTunnelProp - } - nextHopIlbProp, err := expandComputeRouteNextHopIlb(d.Get("next_hop_ilb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_ilb"); !isEmptyValue(reflect.ValueOf(nextHopIlbProp)) && (ok || !reflect.DeepEqual(v, nextHopIlbProp)) { - obj["nextHopIlb"] = nextHopIlbProp - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Route: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Route: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isPeeringOperationInProgress) - if err != nil { - return fmt.Errorf("Error creating Route: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/routes/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Route", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Route: %s", err) - } - - log.Printf("[DEBUG] Finished creating Route %q: %#v", d.Id(), res) - - return resourceComputeRouteRead(d, meta) -} - -func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Route: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isPeeringOperationInProgress) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRoute %q", d.Id())) - } - - res, err = resourceComputeRouteDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ComputeRoute because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - - if err := d.Set("dest_range", flattenComputeRouteDestRange(res["destRange"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("description", flattenComputeRouteDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("name", flattenComputeRouteName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("network", flattenComputeRouteNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("priority", flattenComputeRoutePriority(res["priority"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("tags", flattenComputeRouteTags(res["tags"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_gateway", flattenComputeRouteNextHopGateway(res["nextHopGateway"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_instance", flattenComputeRouteNextHopInstance(res["nextHopInstance"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_ip", flattenComputeRouteNextHopIp(res["nextHopIp"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_vpn_tunnel", flattenComputeRouteNextHopVpnTunnel(res["nextHopVpnTunnel"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_network", flattenComputeRouteNextHopNetwork(res["nextHopNetwork"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_ilb", flattenComputeRouteNextHopIlb(res["nextHopIlb"], d, config)); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Route: %s", err) - } - - return nil -} - -func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Route: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Route %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isPeeringOperationInProgress) - if err != nil { - return handleNotFoundError(err, d, "Route") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Route", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Route %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRouteImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/routes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/routes/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeRouteDestRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRoutePriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRouteTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeRouteNextHopGateway(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteNextHopInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRouteNextHopIp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteNextHopVpnTunnel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRouteNextHopNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteNextHopIlb(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeRouteDestRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRoutePriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v.(*schema.Set).List(), nil -} - -func expandComputeRouteNextHopGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == "default-internet-gateway" { - return replaceVars(d, config, "projects/{{project}}/global/gateways/default-internet-gateway") - } else { - return v, nil - } -} - -func expandComputeRouteNextHopInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == "" { - return v, nil - } - val, err := parseZonalFieldValue("instances", v.(string), "project", "next_hop_instance_zone", d, config, true) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - nextInstance, err := config.NewComputeClient(userAgent).Instances.Get(val.Project, val.Zone, val.Name).Do() - if err != nil { - return nil, err - } - return nextInstance.SelfLink, nil -} - -func expandComputeRouteNextHopIp(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteNextHopVpnTunnel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("vpnTunnels", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for next_hop_vpn_tunnel: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRouteNextHopIlb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeRouteDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["nextHopInstance"]; ok { - val, err := parseZonalFieldValue("instances", v.(string), "project", "next_hop_instance_zone", d, meta.(*Config), true) - if err != nil { - return nil, err - } - if err := d.Set("next_hop_instance_zone", val.Zone); err != nil { - return nil, fmt.Errorf("Error setting next_hop_instance_zone: %s", err) - } - res["nextHopInstance"] = val.RelativeLink() - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router.go deleted file mode 100644 index 1013d2ed65..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router.go +++ /dev/null @@ -1,749 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// customizeDiff func for additional checks on google_compute_router properties: -func resourceComputeRouterCustomDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - - block := diff.Get("bgp.0").(map[string]interface{}) - advertiseMode := block["advertise_mode"] - advertisedGroups := block["advertised_groups"].([]interface{}) - advertisedIPRanges := block["advertised_ip_ranges"].([]interface{}) - - if advertiseMode == "DEFAULT" && len(advertisedGroups) != 0 { - return fmt.Errorf("Error in bgp: advertised_groups cannot be specified when using advertise_mode DEFAULT") - } - if advertiseMode == "DEFAULT" && len(advertisedIPRanges) != 0 { - return fmt.Errorf("Error in bgp: advertised_ip_ranges cannot be specified when using advertise_mode DEFAULT") - } - - return nil -} - -func ResourceComputeRouter() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRouterCreate, - Read: resourceComputeRouterRead, - Update: resourceComputeRouterUpdate, - Delete: resourceComputeRouterDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRouterImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: resourceComputeRouterCustomDiff, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' -which means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the network to which this router belongs.`, - }, - "bgp": { - Type: schema.TypeList, - Optional: true, - Description: `BGP information specific to this router.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "asn": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validateRFC6996Asn, - Description: `Local BGP Autonomous System Number (ASN). Must be an RFC6996 -private ASN, either 16-bit or 32-bit. The value will be fixed for -this router resource. All VPN tunnels that link to this router -will have the same local ASN.`, - }, - "advertise_mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"DEFAULT", "CUSTOM", ""}), - Description: `User-specified flag to indicate which mode to use for advertisement. Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"]`, - Default: "DEFAULT", - }, - "advertised_groups": { - Type: schema.TypeList, - Optional: true, - Description: `User-specified list of prefix groups to advertise in custom mode. -This field can only be populated if advertiseMode is CUSTOM and -is advertised to all peers of the router. These groups will be -advertised in addition to any specified prefixes. Leave this field -blank to advertise no custom groups. - -This enum field has the one valid value: ALL_SUBNETS`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "advertised_ip_ranges": { - Type: schema.TypeList, - Optional: true, - Description: `User-specified list of individual IP ranges to advertise in -custom mode. This field can only be populated if advertiseMode -is CUSTOM and is advertised to all peers of the router. These IP -ranges will be advertised in addition to any specified groups. -Leave this field blank to advertise no custom IP ranges.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "range": { - Type: schema.TypeString, - Required: true, - Description: `The IP range to advertise. The value must be a -CIDR-formatted string.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `User-specified description for the IP range.`, - }, - }, - }, - }, - "keepalive_interval": { - Type: schema.TypeInt, - Optional: true, - Description: `The interval in seconds between BGP keepalive messages that are sent -to the peer. Hold time is three times the interval at which keepalive -messages are sent, and the hold time is the maximum number of seconds -allowed to elapse between successive keepalive messages that BGP -receives from a peer. - -BGP will use the smaller of either the local hold time value or the -peer's hold time value as the hold time for the BGP connection -between the two peers. If set, this value must be between 20 and 60. -The default is 20.`, - Default: 20, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "encrypted_interconnect_router": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Indicates if a router is dedicated for use with encrypted VLAN -attachments (interconnectAttachments).`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the router resides.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeRouterName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeRouterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); ok || !reflect.DeepEqual(v, descriptionProp) { - obj["description"] = descriptionProp - } - networkProp, err := expandComputeRouterNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - bgpProp, err := expandComputeRouterBgp(d.Get("bgp"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bgp"); ok || !reflect.DeepEqual(v, bgpProp) { - obj["bgp"] = bgpProp - } - encryptedInterconnectRouterProp, err := expandComputeRouterEncryptedInterconnectRouter(d.Get("encrypted_interconnect_router"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encrypted_interconnect_router"); !isEmptyValue(reflect.ValueOf(encryptedInterconnectRouterProp)) && (ok || !reflect.DeepEqual(v, encryptedInterconnectRouterProp)) { - obj["encryptedInterconnectRouter"] = encryptedInterconnectRouterProp - } - regionProp, err := expandComputeRouterRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Router: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Router: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Router: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Router", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Router: %s", err) - } - - log.Printf("[DEBUG] Finished creating Router %q: %#v", d.Id(), res) - - return resourceComputeRouterRead(d, meta) -} - -func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Router: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRouter %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRouterCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("name", flattenComputeRouterName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("description", flattenComputeRouterDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("network", flattenComputeRouterNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("bgp", flattenComputeRouterBgp(res["bgp"], d, config)); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("encrypted_interconnect_router", flattenComputeRouterEncryptedInterconnectRouter(res["encryptedInterconnectRouter"], d, config)); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("region", flattenComputeRouterRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Router: %s", err) - } - - return nil -} - -func resourceComputeRouterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Router: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeRouterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); ok || !reflect.DeepEqual(v, descriptionProp) { - obj["description"] = descriptionProp - } - bgpProp, err := expandComputeRouterBgp(d.Get("bgp"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bgp"); ok || !reflect.DeepEqual(v, bgpProp) { - obj["bgp"] = bgpProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Router %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Router %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Router %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Router", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRouterRead(d, meta) -} - -func resourceComputeRouterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Router: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "router/{{region}}/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Router %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Router") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Router", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Router %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRouterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeRouterCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRouterBgp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["asn"] = - flattenComputeRouterBgpAsn(original["asn"], d, config) - transformed["advertise_mode"] = - flattenComputeRouterBgpAdvertiseMode(original["advertiseMode"], d, config) - transformed["advertised_groups"] = - flattenComputeRouterBgpAdvertisedGroups(original["advertisedGroups"], d, config) - transformed["advertised_ip_ranges"] = - flattenComputeRouterBgpAdvertisedIpRanges(original["advertisedIpRanges"], d, config) - transformed["keepalive_interval"] = - flattenComputeRouterBgpKeepaliveInterval(original["keepaliveInterval"], d, config) - return []interface{}{transformed} -} -func flattenComputeRouterBgpAsn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRouterBgpAdvertiseMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterBgpAdvertisedGroups(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterBgpAdvertisedIpRanges(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "range": flattenComputeRouterBgpAdvertisedIpRangesRange(original["range"], d, config), - "description": flattenComputeRouterBgpAdvertisedIpRangesDescription(original["description"], d, config), - }) - } - return transformed -} -func flattenComputeRouterBgpAdvertisedIpRangesRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterBgpAdvertisedIpRangesDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterBgpKeepaliveInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeRouterEncryptedInterconnectRouter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRouterName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRouterBgp(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAsn, err := expandComputeRouterBgpAsn(original["asn"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAsn); val.IsValid() && !isEmptyValue(val) { - transformed["asn"] = transformedAsn - } - - transformedAdvertiseMode, err := expandComputeRouterBgpAdvertiseMode(original["advertise_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAdvertiseMode); val.IsValid() && !isEmptyValue(val) { - transformed["advertiseMode"] = transformedAdvertiseMode - } - - transformedAdvertisedGroups, err := expandComputeRouterBgpAdvertisedGroups(original["advertised_groups"], d, config) - if err != nil { - return nil, err - } else { - transformed["advertisedGroups"] = transformedAdvertisedGroups - } - - transformedAdvertisedIpRanges, err := expandComputeRouterBgpAdvertisedIpRanges(original["advertised_ip_ranges"], d, config) - if err != nil { - return nil, err - } else { - transformed["advertisedIpRanges"] = transformedAdvertisedIpRanges - } - - transformedKeepaliveInterval, err := expandComputeRouterBgpKeepaliveInterval(original["keepalive_interval"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKeepaliveInterval); val.IsValid() && !isEmptyValue(val) { - transformed["keepaliveInterval"] = transformedKeepaliveInterval - } - - return transformed, nil -} - -func expandComputeRouterBgpAsn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpAdvertiseMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpAdvertisedGroups(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpAdvertisedIpRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRange, err := expandComputeRouterBgpAdvertisedIpRangesRange(original["range"], d, config) - if err != nil { - return nil, err - } else { - transformed["range"] = transformedRange - } - - transformedDescription, err := expandComputeRouterBgpAdvertisedIpRangesDescription(original["description"], d, config) - if err != nil { - return nil, err - } else { - transformed["description"] = transformedDescription - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRouterBgpAdvertisedIpRangesRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpAdvertisedIpRangesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpKeepaliveInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterEncryptedInterconnectRouter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_peer.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_peer.go deleted file mode 100644 index 4c090ada1b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_peer.go +++ /dev/null @@ -1,1172 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeRouterBgpPeer() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeRouterBgpPeerCreate, - Read: resourceComputeRouterBgpPeerRead, - Update: resourceComputeRouterBgpPeerUpdate, - Delete: resourceComputeRouterBgpPeerDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeRouterBgpPeerImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "interface": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the interface the BGP peer is associated with.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRFC1035Name(2, 63), - Description: `Name of this BGP peer. The name must be 1-63 characters long, -and comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which -means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "peer_asn": { - Type: schema.TypeInt, - Required: true, - Description: `Peer BGP Autonomous System Number (ASN). -Each BGP interface may use a different value.`, - }, - "peer_ip_address": { - Type: schema.TypeString, - Required: true, - Description: `IP address of the BGP interface outside Google Cloud Platform. -Only IPv4 is supported.`, - }, - "router": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Cloud Router in which this BgpPeer will be configured.`, - }, - "advertise_mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"DEFAULT", "CUSTOM", ""}), - Description: `User-specified flag to indicate which mode to use for advertisement. -Valid values of this enum field are: 'DEFAULT', 'CUSTOM' Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"]`, - Default: "DEFAULT", - }, - "advertised_groups": { - Type: schema.TypeList, - Optional: true, - Description: `User-specified list of prefix groups to advertise in custom -mode, which can take one of the following options: - -* 'ALL_SUBNETS': Advertises all available subnets, including peer VPC subnets. -* 'ALL_VPC_SUBNETS': Advertises the router's own VPC subnets. -* 'ALL_PEER_VPC_SUBNETS': Advertises peer subnets of the router's VPC network. - - -Note that this field can only be populated if advertiseMode is 'CUSTOM' -and overrides the list defined for the router (in the "bgp" message). -These groups are advertised in addition to any specified prefixes. -Leave this field blank to advertise no custom groups.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "advertised_ip_ranges": { - Type: schema.TypeList, - Optional: true, - Description: `User-specified list of individual IP ranges to advertise in -custom mode. This field can only be populated if advertiseMode -is 'CUSTOM' and is advertised to all peers of the router. These IP -ranges will be advertised in addition to any specified groups. -Leave this field blank to advertise no custom IP ranges.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "range": { - Type: schema.TypeString, - Required: true, - Description: `The IP range to advertise. The value must be a -CIDR-formatted string.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `User-specified description for the IP range.`, - }, - }, - }, - }, - "advertised_route_priority": { - Type: schema.TypeInt, - Optional: true, - Description: `The priority of routes advertised to this BGP peer. -Where there is more than one matching route of maximum -length, the routes with the lowest priority value win.`, - }, - "bfd": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `BFD configuration for the BGP peering.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "session_initialization_mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"ACTIVE", "DISABLED", "PASSIVE"}), - Description: `The BFD session initialization mode for this BGP peer. -If set to 'ACTIVE', the Cloud Router will initiate the BFD session -for this BGP peer. If set to 'PASSIVE', the Cloud Router will wait -for the peer router to initiate the BFD session for this BGP peer. -If set to 'DISABLED', BFD is disabled for this BGP peer. Possible values: ["ACTIVE", "DISABLED", "PASSIVE"]`, - }, - "min_receive_interval": { - Type: schema.TypeInt, - Optional: true, - Description: `The minimum interval, in milliseconds, between BFD control packets -received from the peer router. The actual value is negotiated -between the two routers and is equal to the greater of this value -and the transmit interval of the other router. If set, this value -must be between 1000 and 30000.`, - Default: 1000, - }, - "min_transmit_interval": { - Type: schema.TypeInt, - Optional: true, - Description: `The minimum interval, in milliseconds, between BFD control packets -transmitted to the peer router. The actual value is negotiated -between the two routers and is equal to the greater of this value -and the corresponding receive interval of the other router. If set, -this value must be between 1000 and 30000.`, - Default: 1000, - }, - "multiplier": { - Type: schema.TypeInt, - Optional: true, - Description: `The number of consecutive BFD packets that must be missed before -BFD declares that a peer is unavailable. If set, the value must -be a value between 5 and 16.`, - Default: 5, - }, - }, - }, - }, - "enable": { - Type: schema.TypeBool, - Optional: true, - Description: `The status of the BGP peer connection. If set to false, any active session -with the peer is terminated and all associated routing information is removed. -If set to true, the peer connection can be established with routing information. -The default is true.`, - Default: true, - }, - "ip_address": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `IP address of the interface inside Google Cloud Platform. -Only IPv4 is supported.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the router and BgpPeer reside. -If it is not provided, the provider region is used.`, - }, - "router_appliance_instance": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URI of the VM instance that is used as third-party router appliances -such as Next Gen Firewalls, Virtual Routers, or Router Appliances. -The VM instance must be located in zones contained in the same region as -this Cloud Router. The VM instance is the peer side of the BGP session.`, - }, - "management_type": { - Type: schema.TypeString, - Computed: true, - Description: `The resource that configures and manages this BGP peer. - -* 'MANAGED_BY_USER' is the default value and can be managed by -you or other users -* 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and -managed by Cloud Interconnect, specifically by an -InterconnectAttachment of type PARTNER. Google automatically -creates, updates, and deletes this type of BGP peer when the -PARTNER InterconnectAttachment is created, updated, -or deleted.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRouterBgpPeerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - interfaceNameProp, err := expandNestedComputeRouterBgpPeerInterface(d.Get("interface"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("interface"); !isEmptyValue(reflect.ValueOf(interfaceNameProp)) && (ok || !reflect.DeepEqual(v, interfaceNameProp)) { - obj["interfaceName"] = interfaceNameProp - } - ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(reflect.ValueOf(ipAddressProp)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { - obj["ipAddress"] = ipAddressProp - } - peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_ip_address"); !isEmptyValue(reflect.ValueOf(peerIpAddressProp)) && (ok || !reflect.DeepEqual(v, peerIpAddressProp)) { - obj["peerIpAddress"] = peerIpAddressProp - } - peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_asn"); !isEmptyValue(reflect.ValueOf(peerAsnProp)) && (ok || !reflect.DeepEqual(v, peerAsnProp)) { - obj["peerAsn"] = peerAsnProp - } - advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { - obj["advertisedRoutePriority"] = advertisedRoutePriorityProp - } - advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertise_mode"); !isEmptyValue(reflect.ValueOf(advertiseModeProp)) && (ok || !reflect.DeepEqual(v, advertiseModeProp)) { - obj["advertiseMode"] = advertiseModeProp - } - advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_groups"); ok || !reflect.DeepEqual(v, advertisedGroupsProp) { - obj["advertisedGroups"] = advertisedGroupsProp - } - advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { - obj["advertisedIpRanges"] = advertisedIpRangesProp - } - bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bfd"); !isEmptyValue(reflect.ValueOf(bfdProp)) && (ok || !reflect.DeepEqual(v, bfdProp)) { - obj["bfd"] = bfdProp - } - enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable"); ok || !reflect.DeepEqual(v, enableProp) { - obj["enable"] = enableProp - } - routerApplianceInstanceProp, err := expandNestedComputeRouterBgpPeerRouterApplianceInstance(d.Get("router_appliance_instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("router_appliance_instance"); !isEmptyValue(reflect.ValueOf(routerApplianceInstanceProp)) && (ok || !reflect.DeepEqual(v, routerApplianceInstanceProp)) { - obj["routerApplianceInstance"] = routerApplianceInstanceProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new RouterBgpPeer: %#v", obj) - - obj, err = resourceComputeRouterBgpPeerPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating RouterBgpPeer: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating RouterBgpPeer", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create RouterBgpPeer: %s", err) - } - - log.Printf("[DEBUG] Finished creating RouterBgpPeer %q: %#v", d.Id(), res) - - return resourceComputeRouterBgpPeerRead(d, meta) -} - -func resourceComputeRouterBgpPeerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRouterBgpPeer %q", d.Id())) - } - - res, err = flattenNestedComputeRouterBgpPeer(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ComputeRouterBgpPeer because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - - if err := d.Set("name", flattenNestedComputeRouterBgpPeerName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("interface", flattenNestedComputeRouterBgpPeerInterface(res["interfaceName"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("ip_address", flattenNestedComputeRouterBgpPeerIpAddress(res["ipAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("peer_ip_address", flattenNestedComputeRouterBgpPeerPeerIpAddress(res["peerIpAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("peer_asn", flattenNestedComputeRouterBgpPeerPeerAsn(res["peerAsn"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("advertised_route_priority", flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(res["advertisedRoutePriority"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("advertise_mode", flattenNestedComputeRouterBgpPeerAdvertiseMode(res["advertiseMode"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("advertised_groups", flattenNestedComputeRouterBgpPeerAdvertisedGroups(res["advertisedGroups"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("advertised_ip_ranges", flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(res["advertisedIpRanges"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("management_type", flattenNestedComputeRouterBgpPeerManagementType(res["managementType"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("bfd", flattenNestedComputeRouterBgpPeerBfd(res["bfd"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("enable", flattenNestedComputeRouterBgpPeerEnable(res["enable"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("router_appliance_instance", flattenNestedComputeRouterBgpPeerRouterApplianceInstance(res["routerApplianceInstance"], d, config)); err != nil { - return fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - - return nil -} - -func resourceComputeRouterBgpPeerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { - obj["ipAddress"] = ipAddressProp - } - peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_ip_address"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerIpAddressProp)) { - obj["peerIpAddress"] = peerIpAddressProp - } - peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_asn"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerAsnProp)) { - obj["peerAsn"] = peerAsnProp - } - advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { - obj["advertisedRoutePriority"] = advertisedRoutePriorityProp - } - advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertise_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, advertiseModeProp)) { - obj["advertiseMode"] = advertiseModeProp - } - advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_groups"); ok || !reflect.DeepEqual(v, advertisedGroupsProp) { - obj["advertisedGroups"] = advertisedGroupsProp - } - advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { - obj["advertisedIpRanges"] = advertisedIpRangesProp - } - bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bfd"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bfdProp)) { - obj["bfd"] = bfdProp - } - enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable"); ok || !reflect.DeepEqual(v, enableProp) { - obj["enable"] = enableProp - } - routerApplianceInstanceProp, err := expandNestedComputeRouterBgpPeerRouterApplianceInstance(d.Get("router_appliance_instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("router_appliance_instance"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routerApplianceInstanceProp)) { - obj["routerApplianceInstance"] = routerApplianceInstanceProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating RouterBgpPeer %q: %#v", d.Id(), obj) - - obj, err = resourceComputeRouterBgpPeerPatchUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating RouterBgpPeer %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating RouterBgpPeer %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating RouterBgpPeer", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRouterBgpPeerRead(d, meta) -} - -func resourceComputeRouterBgpPeerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceComputeRouterBgpPeerPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "RouterBgpPeer") - } - log.Printf("[DEBUG] Deleting RouterBgpPeer %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RouterBgpPeer") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting RouterBgpPeer", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting RouterBgpPeer %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRouterBgpPeerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedComputeRouterBgpPeerName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerInterface(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerPeerAsn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { - return "DEFAULT" - } - - return v -} - -func flattenNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "range": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config), - "description": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config), - }) - } - return transformed -} -func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerManagementType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerBfd(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["session_initialization_mode"] = - flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["sessionInitializationMode"], d, config) - transformed["min_transmit_interval"] = - flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["minTransmitInterval"], d, config) - transformed["min_receive_interval"] = - flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["minReceiveInterval"], d, config) - transformed["multiplier"] = - flattenNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) - return []interface{}{transformed} -} -func flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNestedComputeRouterBgpPeerEnable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return true - } - b, err := strconv.ParseBool(v.(string)) - if err != nil { - // If we can't convert it into a bool return value as is and let caller handle it - return v - } - return b -} - -func flattenNestedComputeRouterBgpPeerRouterApplianceInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandNestedComputeRouterBgpPeerName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerPeerAsn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRange, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { - transformed["range"] = transformedRange - } - - transformedDescription, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config) - if err != nil { - return nil, err - } else { - transformed["description"] = transformedDescription - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerBfd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSessionInitializationMode, err := expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["session_initialization_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSessionInitializationMode); val.IsValid() && !isEmptyValue(val) { - transformed["sessionInitializationMode"] = transformedSessionInitializationMode - } - - transformedMinTransmitInterval, err := expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["min_transmit_interval"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinTransmitInterval); val.IsValid() && !isEmptyValue(val) { - transformed["minTransmitInterval"] = transformedMinTransmitInterval - } - - transformedMinReceiveInterval, err := expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["min_receive_interval"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinReceiveInterval); val.IsValid() && !isEmptyValue(val) { - transformed["minReceiveInterval"] = transformedMinReceiveInterval - } - - transformedMultiplier, err := expandNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMultiplier); val.IsValid() && !isEmptyValue(val) { - transformed["multiplier"] = transformedMultiplier - } - - return transformed, nil -} - -func expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - return strings.ToUpper(strconv.FormatBool(v.(bool))), nil -} - -func expandNestedComputeRouterBgpPeerRouterApplianceInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("instances", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for router_appliance_instance: %s", err) - } - return f.RelativeLink(), nil -} - -func flattenNestedComputeRouterBgpPeer(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["bgpPeers"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value bgpPeers. Actual value: %v", v) - } - - _, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeRouterBgpPeerFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeRouterBgpPeerName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedComputeRouterBgpPeerName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -// PatchCreateEncoder handles creating request data to PATCH parent resource -// with list including new object. -func resourceComputeRouterBgpPeerPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeRouterBgpPeerListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - // Return error if item already created. - if found != nil { - return nil, fmt.Errorf("Unable to create RouterBgpPeer, existing object already found: %+v", found) - } - - // Return list with the resource to create appended - res := map[string]interface{}{ - "bgpPeers": append(currItems, obj), - } - - return res, nil -} - -// PatchUpdateEncoder handles creating request data to PATCH parent resource -// with list including updated object. -func resourceComputeRouterBgpPeerPatchUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - items, err := resourceComputeRouterBgpPeerListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, items) - if err != nil { - return nil, err - } - - // Return error if item to update does not exist. - if item == nil { - return nil, fmt.Errorf("Unable to update RouterBgpPeer %q - not found in list", d.Id()) - } - - // Merge new object into old. - for k, v := range obj { - item[k] = v - } - items[idx] = item - - // Return list with new item added - res := map[string]interface{}{ - "bgpPeers": items, - } - - return res, nil -} - -// PatchDeleteEncoder handles creating request data to PATCH parent resource -// with list excluding object to delete. -func resourceComputeRouterBgpPeerPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeRouterBgpPeerListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - // Spoof 404 error for proper handling by Delete (i.e. no-op) - return nil, fake404("nested", "ComputeRouterBgpPeer") - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "bgpPeers": updatedItems, - } - - return res, nil -} - -// ListForPatch handles making API request to get parent resource and -// extracting list of objects. -func resourceComputeRouterBgpPeerListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return nil, err - } - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - - v, ok = res["bgpPeers"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, fmt.Errorf(`expected list for nested field "bgpPeers"`) - } - return ls, nil - } - return nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_service_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_service_attachment.go deleted file mode 100644 index 36e058e883..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_service_attachment.go +++ /dev/null @@ -1,764 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeServiceAttachment() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeServiceAttachmentCreate, - Read: resourceComputeServiceAttachmentRead, - Update: resourceComputeServiceAttachmentUpdate, - Delete: resourceComputeServiceAttachmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeServiceAttachmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "connection_preference": { - Type: schema.TypeString, - Required: true, - Description: `The connection preference to use for this service attachment. Valid -values include "ACCEPT_AUTOMATIC", "ACCEPT_MANUAL".`, - }, - "enable_proxy_protocol": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: `If true, enable the proxy protocol which is for supplying client TCP/IP -address data in TCP connections that traverse proxies on their way to -destination servers.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' -which means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "nat_subnets": { - Type: schema.TypeList, - Required: true, - Description: `An array of subnets that is provided for NAT in this service attachment.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "target_service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of a forwarding rule that represents the service identified by -this service attachment.`, - }, - "consumer_accept_lists": { - Type: schema.TypeList, - Optional: true, - Description: `An array of projects that are allowed to connect to this service -attachment.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "connection_limit": { - Type: schema.TypeInt, - Required: true, - Description: `The number of consumer forwarding rules the consumer project can -create.`, - }, - "project_id_or_num": { - Type: schema.TypeString, - Required: true, - Description: `A project that is allowed to connect to this service attachment.`, - }, - }, - }, - }, - "consumer_reject_lists": { - Type: schema.TypeList, - Optional: true, - Description: `An array of projects that are not allowed to connect to this service -attachment.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "domain_names": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `If specified, the domain name will be used during the integration between -the PSC connected endpoints and the Cloud DNS. For example, this is a -valid domain name: "p.mycompany.com.". Current max number of domain names -supported is 1.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the region where the resource resides.`, - }, - "connected_endpoints": { - Type: schema.TypeList, - Computed: true, - Description: `An array of the consumer forwarding rules connected to this service -attachment.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "endpoint": { - Type: schema.TypeString, - Computed: true, - Description: `The URL of the consumer forwarding rule.`, - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: `The status of the connection from the consumer forwarding rule to -this service attachment.`, - }, - }, - }, - }, - "fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. This field is used internally during -updates of this resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeServiceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeServiceAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeServiceAttachmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - fingerprintProp, err := expandComputeServiceAttachmentFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - connectionPreferenceProp, err := expandComputeServiceAttachmentConnectionPreference(d.Get("connection_preference"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connection_preference"); !isEmptyValue(reflect.ValueOf(connectionPreferenceProp)) && (ok || !reflect.DeepEqual(v, connectionPreferenceProp)) { - obj["connectionPreference"] = connectionPreferenceProp - } - targetServiceProp, err := expandComputeServiceAttachmentTargetService(d.Get("target_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_service"); !isEmptyValue(reflect.ValueOf(targetServiceProp)) && (ok || !reflect.DeepEqual(v, targetServiceProp)) { - obj["targetService"] = targetServiceProp - } - natSubnetsProp, err := expandComputeServiceAttachmentNatSubnets(d.Get("nat_subnets"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_subnets"); ok || !reflect.DeepEqual(v, natSubnetsProp) { - obj["natSubnets"] = natSubnetsProp - } - enableProxyProtocolProp, err := expandComputeServiceAttachmentEnableProxyProtocol(d.Get("enable_proxy_protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_proxy_protocol"); !isEmptyValue(reflect.ValueOf(enableProxyProtocolProp)) && (ok || !reflect.DeepEqual(v, enableProxyProtocolProp)) { - obj["enableProxyProtocol"] = enableProxyProtocolProp - } - domainNamesProp, err := expandComputeServiceAttachmentDomainNames(d.Get("domain_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("domain_names"); !isEmptyValue(reflect.ValueOf(domainNamesProp)) && (ok || !reflect.DeepEqual(v, domainNamesProp)) { - obj["domainNames"] = domainNamesProp - } - consumerRejectListsProp, err := expandComputeServiceAttachmentConsumerRejectLists(d.Get("consumer_reject_lists"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_reject_lists"); ok || !reflect.DeepEqual(v, consumerRejectListsProp) { - obj["consumerRejectLists"] = consumerRejectListsProp - } - consumerAcceptListsProp, err := expandComputeServiceAttachmentConsumerAcceptLists(d.Get("consumer_accept_lists"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_accept_lists"); ok || !reflect.DeepEqual(v, consumerAcceptListsProp) { - obj["consumerAcceptLists"] = consumerAcceptListsProp - } - regionProp, err := expandComputeServiceAttachmentRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ServiceAttachment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ServiceAttachment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating ServiceAttachment", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create ServiceAttachment: %s", err) - } - - log.Printf("[DEBUG] Finished creating ServiceAttachment %q: %#v", d.Id(), res) - - return resourceComputeServiceAttachmentRead(d, meta) -} - -func resourceComputeServiceAttachmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeServiceAttachment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - - if err := d.Set("name", flattenComputeServiceAttachmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("description", flattenComputeServiceAttachmentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("fingerprint", flattenComputeServiceAttachmentFingerprint(res["fingerprint"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("connection_preference", flattenComputeServiceAttachmentConnectionPreference(res["connectionPreference"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("connected_endpoints", flattenComputeServiceAttachmentConnectedEndpoints(res["connectedEndpoints"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("target_service", flattenComputeServiceAttachmentTargetService(res["targetService"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("nat_subnets", flattenComputeServiceAttachmentNatSubnets(res["natSubnets"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("enable_proxy_protocol", flattenComputeServiceAttachmentEnableProxyProtocol(res["enableProxyProtocol"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("domain_names", flattenComputeServiceAttachmentDomainNames(res["domainNames"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("consumer_reject_lists", flattenComputeServiceAttachmentConsumerRejectLists(res["consumerRejectLists"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("consumer_accept_lists", flattenComputeServiceAttachmentConsumerAcceptLists(res["consumerAcceptLists"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("region", flattenComputeServiceAttachmentRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - - return nil -} - -func resourceComputeServiceAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeServiceAttachmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - fingerprintProp, err := expandComputeServiceAttachmentFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - connectionPreferenceProp, err := expandComputeServiceAttachmentConnectionPreference(d.Get("connection_preference"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connection_preference"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, connectionPreferenceProp)) { - obj["connectionPreference"] = connectionPreferenceProp - } - natSubnetsProp, err := expandComputeServiceAttachmentNatSubnets(d.Get("nat_subnets"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_subnets"); ok || !reflect.DeepEqual(v, natSubnetsProp) { - obj["natSubnets"] = natSubnetsProp - } - consumerRejectListsProp, err := expandComputeServiceAttachmentConsumerRejectLists(d.Get("consumer_reject_lists"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_reject_lists"); ok || !reflect.DeepEqual(v, consumerRejectListsProp) { - obj["consumerRejectLists"] = consumerRejectListsProp - } - consumerAcceptListsProp, err := expandComputeServiceAttachmentConsumerAcceptLists(d.Get("consumer_accept_lists"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_accept_lists"); ok || !reflect.DeepEqual(v, consumerAcceptListsProp) { - obj["consumerAcceptLists"] = consumerAcceptListsProp - } - - obj, err = resourceComputeServiceAttachmentUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ServiceAttachment %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ServiceAttachment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ServiceAttachment %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating ServiceAttachment", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeServiceAttachmentRead(d, meta) -} - -func resourceComputeServiceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ServiceAttachment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ServiceAttachment") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting ServiceAttachment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting ServiceAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeServiceAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/serviceAttachments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeServiceAttachmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConnectionPreference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConnectedEndpoints(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "endpoint": flattenComputeServiceAttachmentConnectedEndpointsEndpoint(original["endpoint"], d, config), - "status": flattenComputeServiceAttachmentConnectedEndpointsStatus(original["status"], d, config), - }) - } - return transformed -} -func flattenComputeServiceAttachmentConnectedEndpointsEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConnectedEndpointsStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentTargetService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeServiceAttachmentNatSubnets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeServiceAttachmentEnableProxyProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentDomainNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConsumerRejectLists(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConsumerAcceptLists(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "project_id_or_num": flattenComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(original["projectIdOrNum"], d, config), - "connection_limit": flattenComputeServiceAttachmentConsumerAcceptListsConnectionLimit(original["connectionLimit"], d, config), - }) - } - return transformed -} -func flattenComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConsumerAcceptListsConnectionLimit(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeServiceAttachmentRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeServiceAttachmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentConnectionPreference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentTargetService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("forwardingRules", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for target_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeServiceAttachmentNatSubnets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, fmt.Errorf("Invalid value for nat_subnets: nil") - } - f, err := parseRegionalFieldValue("subnetworks", raw.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for nat_subnets: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeServiceAttachmentEnableProxyProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentDomainNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentConsumerRejectLists(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentConsumerAcceptLists(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectIdOrNum, err := expandComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(original["project_id_or_num"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectIdOrNum); val.IsValid() && !isEmptyValue(val) { - transformed["projectIdOrNum"] = transformedProjectIdOrNum - } - - transformedConnectionLimit, err := expandComputeServiceAttachmentConsumerAcceptListsConnectionLimit(original["connection_limit"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConnectionLimit); val.IsValid() && !isEmptyValue(val) { - transformed["connectionLimit"] = transformedConnectionLimit - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentConsumerAcceptListsConnectionLimit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeServiceAttachmentUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - // need to send value in PATCH due to validation bug on api b/198329756 - nameProp := d.Get("name") - if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - // need to send value in PATCH due to validation bug on api b/198308475 - enableProxyProtocolProp := d.Get("enable_proxy_protocol") - if v, ok := d.GetOkExists("enable_proxy_protocol"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableProxyProtocolProp)) { - obj["enableProxyProtocol"] = enableProxyProtocolProp - } - - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_snapshot.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_snapshot.go deleted file mode 100644 index 17d7dbd1d4..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_snapshot.go +++ /dev/null @@ -1,882 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeSnapshot() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSnapshotCreate, - Read: resourceComputeSnapshotRead, - Update: resourceComputeSnapshotUpdate, - Delete: resourceComputeSnapshotDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeSnapshotImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "source_disk": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the disk used to create this snapshot.`, - }, - "chain_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Creates the new snapshot in the snapshot chain labeled with the -specified name. The chain name must be 1-63 characters long and -comply with RFC1035. This is an uncommon option only for advanced -service owners who needs to create separate snapshot chains, for -example, for chargeback tracking. When you describe your snapshot -resource, this field is visible only if it has a non-empty value.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels to apply to this Snapshot.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "snapshot_encryption_key": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Encrypts the snapshot using a customer-supplied encryption key. - -After you encrypt a snapshot using a customer-supplied key, you must -provide the same key if you use the snapshot later. For example, you -must provide the encryption key when you create a disk from the -encrypted snapshot in a future request. - -Customer-supplied encryption keys do not protect access to metadata of -the snapshot. - -If you do not provide an encryption key when creating the snapshot, -then the snapshot will be encrypted using an automatically generated -key and you do not need to provide a key to use the snapshot later.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_self_link": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the encryption key that is stored in Google Cloud KMS.`, - }, - "kms_key_service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - Sensitive: true, - }, - "sha256": { - Type: schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "source_disk_encryption_key": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the source snapshot. Required -if the source snapshot is protected by a customer-supplied encryption -key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - Sensitive: true, - }, - }, - }, - }, - "storage_locations": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Cloud Storage bucket storage location of the snapshot (regional or multi-regional).`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the disk is hosted.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "disk_size_gb": { - Type: schema.TypeInt, - Computed: true, - Description: `Size of the snapshot, specified in GB.`, - }, - "label_fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `The fingerprint used for optimistic locking of this resource. Used -internally during updates.`, - }, - "licenses": { - Type: schema.TypeList, - Computed: true, - Description: `A list of public visible licenses that apply to this snapshot. This -can be because the original image had licenses attached (such as a -Windows image). snapshotEncryptionKey nested object Encrypts the -snapshot using a customer-supplied encryption key.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "snapshot_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "storage_bytes": { - Type: schema.TypeInt, - Computed: true, - Description: `A size of the storage used by the snapshot. As snapshots share -storage, this number is expected to change with snapshot -creation/deletion.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - chainNameProp, err := expandComputeSnapshotChainName(d.Get("chain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("chain_name"); !isEmptyValue(reflect.ValueOf(chainNameProp)) && (ok || !reflect.DeepEqual(v, chainNameProp)) { - obj["chainName"] = chainNameProp - } - nameProp, err := expandComputeSnapshotName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeSnapshotDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - storageLocationsProp, err := expandComputeSnapshotStorageLocations(d.Get("storage_locations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("storage_locations"); !isEmptyValue(reflect.ValueOf(storageLocationsProp)) && (ok || !reflect.DeepEqual(v, storageLocationsProp)) { - obj["storageLocations"] = storageLocationsProp - } - labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - sourceDiskProp, err := expandComputeSnapshotSourceDisk(d.Get("source_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { - obj["sourceDisk"] = sourceDiskProp - } - zoneProp, err := expandComputeSnapshotZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - snapshotEncryptionKeyProp, err := expandComputeSnapshotSnapshotEncryptionKey(d.Get("snapshot_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("snapshot_encryption_key"); !isEmptyValue(reflect.ValueOf(snapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, snapshotEncryptionKeyProp)) { - obj["snapshotEncryptionKey"] = snapshotEncryptionKeyProp - } - sourceDiskEncryptionKeyProp, err := expandComputeSnapshotSourceDiskEncryptionKey(d.Get("source_disk_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_disk_encryption_key"); !isEmptyValue(reflect.ValueOf(sourceDiskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceDiskEncryptionKeyProp)) { - obj["sourceDiskEncryptionKey"] = sourceDiskEncryptionKeyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}PRE_CREATE_REPLACE_ME/createSnapshot") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Snapshot: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - url = regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sourceDiskProp.(string)) - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Snapshot: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/snapshots/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Snapshot", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Snapshot: %s", err) - } - - log.Printf("[DEBUG] Finished creating Snapshot %q: %#v", d.Id(), res) - - return resourceComputeSnapshotRead(d, meta) -} - -func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSnapshot %q", d.Id())) - } - - res, err = resourceComputeSnapshotDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ComputeSnapshot because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeSnapshotCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("snapshot_id", flattenComputeSnapshotSnapshotId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("disk_size_gb", flattenComputeSnapshotDiskSizeGb(res["diskSizeGb"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("chain_name", flattenComputeSnapshotChainName(res["chainName"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("name", flattenComputeSnapshotName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("description", flattenComputeSnapshotDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("storage_bytes", flattenComputeSnapshotStorageBytes(res["storageBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("storage_locations", flattenComputeSnapshotStorageLocations(res["storageLocations"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("licenses", flattenComputeSnapshotLicenses(res["licenses"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("labels", flattenComputeSnapshotLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("label_fingerprint", flattenComputeSnapshotLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("source_disk", flattenComputeSnapshotSourceDisk(res["sourceDisk"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("snapshot_encryption_key", flattenComputeSnapshotSnapshotEncryptionKey(res["snapshotEncryptionKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - - return nil -} - -func resourceComputeSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("labels") || d.HasChange("label_fingerprint") { - obj := make(map[string]interface{}) - - labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}/setLabels") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Snapshot %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Snapshot %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Snapshot", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeSnapshotRead(d, meta) -} - -func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Snapshot %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Snapshot") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Snapshot", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Snapshot %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeSnapshotImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/snapshots/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeSnapshotCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotSnapshotId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeSnapshotDiskSizeGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeSnapshotChainName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotStorageBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeSnapshotStorageLocations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotLicenses(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeSnapshotLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotLabelFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotSourceDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeSnapshotSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeSnapshotSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeSnapshotSnapshotEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_self_link"] = - flattenComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["kms_key_service_account"] = - flattenComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} -func flattenComputeSnapshotSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return d.Get("snapshot_encryption_key.0.raw_key") -} - -func flattenComputeSnapshotSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeSnapshotChainName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotStorageLocations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeSnapshotLabelFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSourceDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("disks", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for source_disk: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeSnapshotZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeSnapshotSnapshotEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeSnapshotSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeSnapshotSnapshotEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeySelfLink, err := expandComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedKmsKeyServiceAccount, err := expandComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeSnapshotSnapshotEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSnapshotEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSourceDiskEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeSnapshotSourceDiskEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedKmsKeyServiceAccount, err := expandComputeSnapshotSourceDiskEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeSnapshotSourceDiskEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSourceDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeSnapshotDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["snapshotEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - // The raw key won't be returned, so we need to use the original. - transformed["rawKey"] = d.Get("snapshot_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - // The response for crypto keys often includes the version of the key which needs to be removed - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["snapshotEncryptionKey"] = transformed - } - - if v, ok := res["sourceDiskEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - // The raw key won't be returned, so we need to use the original. - transformed["rawKey"] = d.Get("source_disk_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - // The response for crypto keys often includes the version of the key which needs to be removed - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceDiskEncryptionKey"] = transformed - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ssl_certificate.go deleted file mode 100644 index c68a25e845..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ssl_certificate.go +++ /dev/null @@ -1,399 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeSslCertificate() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSslCertificateCreate, - Read: resourceComputeSslCertificateRead, - Delete: resourceComputeSslCertificateDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeSslCertificateImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "certificate": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The certificate in PEM format. -The certificate chain must be no greater than 5 certs long. -The chain must include at least one intermediate cert.`, - Sensitive: true, - }, - "private_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: sha256DiffSuppress, - Description: `The write-only private key in PEM format.`, - Sensitive: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash. - - -These are in the same namespace as the managed SSL certificates.`, - }, - "certificate_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "expire_time": { - Type: schema.TypeString, - Computed: true, - Description: `Expire time of the certificate in RFC3339 text format.`, - }, - "name_prefix": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name"}, - Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource - // uuid is 26 characters, limit the prefix to 37. - value := v.(string) - if len(value) > 37 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) - } - return - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - certificateProp, err := expandComputeSslCertificateCertificate(d.Get("certificate"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate"); !isEmptyValue(reflect.ValueOf(certificateProp)) && (ok || !reflect.DeepEqual(v, certificateProp)) { - obj["certificate"] = certificateProp - } - descriptionProp, err := expandComputeSslCertificateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeSslCertificateName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - privateKeyProp, err := expandComputeSslCertificatePrivateKey(d.Get("private_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_key"); !isEmptyValue(reflect.ValueOf(privateKeyProp)) && (ok || !reflect.DeepEqual(v, privateKeyProp)) { - obj["privateKey"] = privateKeyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new SslCertificate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SslCertificate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating SslCertificate: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating SslCertificate", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create SslCertificate: %s", err) - } - - log.Printf("[DEBUG] Finished creating SslCertificate %q: %#v", d.Id(), res) - - return resourceComputeSslCertificateRead(d, meta) -} - -func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SslCertificate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSslCertificate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading SslCertificate: %s", err) - } - - if err := d.Set("certificate", flattenComputeSslCertificateCertificate(res["certificate"], d, config)); err != nil { - return fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("description", flattenComputeSslCertificateDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("expire_time", flattenComputeSslCertificateExpireTime(res["expireTime"], d, config)); err != nil { - return fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("certificate_id", flattenComputeSslCertificateCertificateId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("name", flattenComputeSslCertificateName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading SslCertificate: %s", err) - } - - return nil -} - -func resourceComputeSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SslCertificate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting SslCertificate %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SslCertificate") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting SslCertificate", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting SslCertificate %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeSslCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/sslCertificates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeSslCertificateCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslCertificateCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslCertificateDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslCertificateExpireTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslCertificateCertificateId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeSslCertificateName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeSslCertificateCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslCertificateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - var certName string - if v, ok := d.GetOk("name"); ok { - certName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - certName = resource.PrefixedUniqueId(v.(string)) - } else { - certName = resource.UniqueId() - } - - // We need to get the {{name}} into schema to set the ID using ReplaceVars - if err := d.Set("name", certName); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return certName, nil -} - -func expandComputeSslCertificatePrivateKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ssl_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ssl_policy.go deleted file mode 100644 index e12d06404b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_ssl_policy.go +++ /dev/null @@ -1,517 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func sslPolicyCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - profile := diff.Get("profile") - customFeaturesCount := diff.Get("custom_features.#") - - // Validate that policy configs aren't incompatible during all phases - // CUSTOM profile demands non-zero custom_features, and other profiles (i.e., not CUSTOM) demand zero custom_features - if diff.HasChange("profile") || diff.HasChange("custom_features") { - if profile.(string) == "CUSTOM" { - if customFeaturesCount.(int) == 0 { - return fmt.Errorf("Error in SSL Policy %s: the profile is set to %s but no custom_features are set.", diff.Get("name"), profile.(string)) - } - } else { - if customFeaturesCount != 0 { - return fmt.Errorf("Error in SSL Policy %s: the profile is set to %s but using custom_features requires the profile to be CUSTOM.", diff.Get("name"), profile.(string)) - } - } - return nil - } - return nil -} - -func ResourceComputeSslPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSslPolicyCreate, - Read: resourceComputeSslPolicyRead, - Update: resourceComputeSslPolicyUpdate, - Delete: resourceComputeSslPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeSslPolicyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: sslPolicyCustomizeDiff, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "custom_features": { - Type: schema.TypeSet, - Optional: true, - Description: `Profile specifies the set of SSL features that can be used by the -load balancer when negotiating SSL with clients. This can be one of -'COMPATIBLE', 'MODERN', 'RESTRICTED', or 'CUSTOM'. If using 'CUSTOM', -the set of SSL features to enable must be specified in the -'customFeatures' field. - -See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) -for which ciphers are available to use. **Note**: this argument -*must* be present when using the 'CUSTOM' profile. This argument -*must not* be present when using any other profile.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "min_tls_version": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"TLS_1_0", "TLS_1_1", "TLS_1_2", ""}), - Description: `The minimum version of SSL protocol that can be used by the clients -to establish a connection with the load balancer. Default value: "TLS_1_0" Possible values: ["TLS_1_0", "TLS_1_1", "TLS_1_2"]`, - Default: "TLS_1_0", - }, - "profile": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"COMPATIBLE", "MODERN", "RESTRICTED", "CUSTOM", ""}), - Description: `Profile specifies the set of SSL features that can be used by the -load balancer when negotiating SSL with clients. If using 'CUSTOM', -the set of SSL features to enable must be specified in the -'customFeatures' field. - -See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) -for information on what cipher suites each profile provides. If -'CUSTOM' is used, the 'custom_features' attribute **must be set**. Default value: "COMPATIBLE" Possible values: ["COMPATIBLE", "MODERN", "RESTRICTED", "CUSTOM"]`, - Default: "COMPATIBLE", - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "enabled_features": { - Type: schema.TypeSet, - Computed: true, - Description: `The list of features enabled in the SSL policy.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. A hash of the contents stored in this -object. This field is used in optimistic locking.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSslPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeSslPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeSslPolicyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - profileProp, err := expandComputeSslPolicyProfile(d.Get("profile"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("profile"); !isEmptyValue(reflect.ValueOf(profileProp)) && (ok || !reflect.DeepEqual(v, profileProp)) { - obj["profile"] = profileProp - } - minTlsVersionProp, err := expandComputeSslPolicyMinTlsVersion(d.Get("min_tls_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_tls_version"); !isEmptyValue(reflect.ValueOf(minTlsVersionProp)) && (ok || !reflect.DeepEqual(v, minTlsVersionProp)) { - obj["minTlsVersion"] = minTlsVersionProp - } - customFeaturesProp, err := expandComputeSslPolicyCustomFeatures(d.Get("custom_features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_features"); !isEmptyValue(reflect.ValueOf(customFeaturesProp)) && (ok || !reflect.DeepEqual(v, customFeaturesProp)) { - obj["customFeatures"] = customFeaturesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new SslPolicy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SslPolicy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating SslPolicy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating SslPolicy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create SslPolicy: %s", err) - } - - log.Printf("[DEBUG] Finished creating SslPolicy %q: %#v", d.Id(), res) - - return resourceComputeSslPolicyRead(d, meta) -} - -func resourceComputeSslPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SslPolicy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSslPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeSslPolicyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("description", flattenComputeSslPolicyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("name", flattenComputeSslPolicyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("profile", flattenComputeSslPolicyProfile(res["profile"], d, config)); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("min_tls_version", flattenComputeSslPolicyMinTlsVersion(res["minTlsVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("enabled_features", flattenComputeSslPolicyEnabledFeatures(res["enabledFeatures"], d, config)); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("custom_features", flattenComputeSslPolicyCustomFeatures(res["customFeatures"], d, config)); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("fingerprint", flattenComputeSslPolicyFingerprint(res["fingerprint"], d, config)); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading SslPolicy: %s", err) - } - - return nil -} - -func resourceComputeSslPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SslPolicy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - profileProp, err := expandComputeSslPolicyProfile(d.Get("profile"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("profile"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, profileProp)) { - obj["profile"] = profileProp - } - minTlsVersionProp, err := expandComputeSslPolicyMinTlsVersion(d.Get("min_tls_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_tls_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, minTlsVersionProp)) { - obj["minTlsVersion"] = minTlsVersionProp - } - customFeaturesProp, err := expandComputeSslPolicyCustomFeatures(d.Get("custom_features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_features"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customFeaturesProp)) { - obj["customFeatures"] = customFeaturesProp - } - - obj, err = resourceComputeSslPolicyUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating SslPolicy %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating SslPolicy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating SslPolicy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating SslPolicy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeSslPolicyRead(d, meta) -} - -func resourceComputeSslPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SslPolicy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting SslPolicy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SslPolicy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting SslPolicy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting SslPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeSslPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/sslPolicies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeSslPolicyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyMinTlsVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyEnabledFeatures(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeSslPolicyCustomFeatures(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeSslPolicyFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeSslPolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslPolicyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslPolicyProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslPolicyMinTlsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslPolicyCustomFeatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func resourceComputeSslPolicyUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // TODO(https://github.com/GoogleCloudPlatform/magic-modules/issues/184): Handle fingerprint consistently - obj["fingerprint"] = d.Get("fingerprint") - - // TODO(https://github.com/GoogleCloudPlatform/magic-modules/issues/183): Can we generalize this - // Send a null fields if customFeatures is empty. - if v, ok := obj["customFeatures"]; ok && len(v.([]interface{})) == 0 { - obj["customFeatures"] = nil - } - - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_subnetwork.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_subnetwork.go deleted file mode 100644 index 3a01bfb8dc..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_subnetwork.go +++ /dev/null @@ -1,1197 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "net" - "reflect" - "time" - - "github.com/apparentlymart/go-cidr/cidr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Whether the IP CIDR change shrinks the block. -func isShrinkageIpCidr(_ context.Context, old, new, _ interface{}) bool { - _, oldCidr, oldErr := net.ParseCIDR(old.(string)) - _, newCidr, newErr := net.ParseCIDR(new.(string)) - - if oldErr != nil || newErr != nil { - // This should never happen. The ValidateFunc on the field ensures it. - return false - } - - oldStart, oldEnd := cidr.AddressRange(oldCidr) - - if newCidr.Contains(oldStart) && newCidr.Contains(oldEnd) { - // This is a CIDR range expansion, no need to ForceNew, we have an update method for it. - return false - } - - return true -} - -func ResourceComputeSubnetwork() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeSubnetworkCreate, - Read: resourceComputeSubnetworkRead, - Update: resourceComputeSubnetworkUpdate, - Delete: resourceComputeSubnetworkDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeSubnetworkImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: customdiff.All( - customdiff.ForceNewIfChange("ip_cidr_range", isShrinkageIpCidr), - resourceComputeSubnetworkSecondaryIpRangeSetStyleDiff, - ), - - Schema: map[string]*schema.Schema{ - "ip_cidr_range": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateIpCidrRange, - Description: `The range of internal addresses that are owned by this subnetwork. -Provide this property when you create the subnetwork. For example, -10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and -non-overlapping within a network. Only IPv4 is supported.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCEName, - Description: `The name of the resource, provided by the client when initially -creating the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which -means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network this subnet belongs to. -Only networks that are in the distributed mode can have subnetworks.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource. This field can be set only at resource -creation time.`, - }, - "ipv6_access_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"EXTERNAL", "INTERNAL", ""}), - Description: `The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation -or the first time the subnet is updated into IPV4_IPV6 dual stack. If the ipv6_type is EXTERNAL then this subnet -cannot enable direct path. Possible values: ["EXTERNAL", "INTERNAL"]`, - }, - "log_config": { - Type: schema.TypeList, - Optional: true, - Description: `Denotes the logging options for the subnetwork flow logs. If logging is enabled -logs will be exported to Stackdriver. This field cannot be set if the 'purpose' of this -subnetwork is 'INTERNAL_HTTPS_LOAD_BALANCER'`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "aggregation_interval": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"INTERVAL_5_SEC", "INTERVAL_30_SEC", "INTERVAL_1_MIN", "INTERVAL_5_MIN", "INTERVAL_10_MIN", "INTERVAL_15_MIN", ""}), - Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. -Toggles the aggregation interval for collecting flow logs. Increasing the -interval time will reduce the amount of generated flow logs for long -lasting connections. Default is an interval of 5 seconds per connection. Default value: "INTERVAL_5_SEC" Possible values: ["INTERVAL_5_SEC", "INTERVAL_30_SEC", "INTERVAL_1_MIN", "INTERVAL_5_MIN", "INTERVAL_10_MIN", "INTERVAL_15_MIN"]`, - Default: "INTERVAL_5_SEC", - AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, - }, - "filter_expr": { - Type: schema.TypeString, - Optional: true, - Description: `Export filter used to define which VPC flow logs should be logged, as as CEL expression. See -https://cloud.google.com/vpc/docs/flow-logs#filtering for details on how to format this field. -The default value is 'true', which evaluates to include everything.`, - Default: "true", - AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, - }, - "flow_sampling": { - Type: schema.TypeFloat, - Optional: true, - Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. -The value of the field must be in [0, 1]. Set the sampling rate of VPC -flow logs within the subnetwork where 1.0 means all collected logs are -reported and 0.0 means no logs are reported. Default is 0.5 which means -half of all collected logs are reported.`, - Default: 0.5, - AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, - }, - "metadata": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA", "CUSTOM_METADATA", ""}), - Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. -Configures whether metadata fields should be added to the reported VPC -flow logs. Default value: "INCLUDE_ALL_METADATA" Possible values: ["EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA", "CUSTOM_METADATA"]`, - Default: "INCLUDE_ALL_METADATA", - AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, - }, - "metadata_fields": { - Type: schema.TypeSet, - Optional: true, - Description: `List of metadata fields that should be added to reported logs. -Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" is set to CUSTOM_METADATA.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - }, - }, - }, - "private_ip_google_access": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `When enabled, VMs in this subnetwork without external IP addresses can -access Google APIs and services by using Private Google Access.`, - }, - "private_ipv6_google_access": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The private IPv6 google access type for the VMs in this subnet.`, - }, - "purpose": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The purpose of the resource. This field can be either 'PRIVATE_RFC_1918', 'INTERNAL_HTTPS_LOAD_BALANCER' or 'REGIONAL_MANAGED_PROXY'. -A subnetwork with purpose set to 'INTERNAL_HTTPS_LOAD_BALANCER' is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. -A subnetwork in a given region with purpose set to 'REGIONAL_MANAGED_PROXY' is a proxy-only subnet and is shared between all the regional Envoy-based load balancers. -If unspecified, the purpose defaults to 'PRIVATE_RFC_1918'. -The enableFlowLogs field isn't supported with the purpose field set to 'INTERNAL_HTTPS_LOAD_BALANCER'.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The GCP region for this subnetwork.`, - }, - "role": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ACTIVE", "BACKUP", ""}), - Description: `The role of subnetwork. -The value can be set to 'ACTIVE' or 'BACKUP'. -An 'ACTIVE' subnetwork is one that is currently being used. -A 'BACKUP' subnetwork is one that is ready to be promoted to 'ACTIVE' or is currently draining. - -Subnetwork role must be specified when purpose is set to 'INTERNAL_HTTPS_LOAD_BALANCER' or 'REGIONAL_MANAGED_PROXY'. Possible values: ["ACTIVE", "BACKUP"]`, - }, - "secondary_ip_range": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, - Description: `An array of configurations for secondary IP ranges for VM instances -contained in this subnetwork. The primary IP of such VM must belong -to the primary ipCidrRange of the subnetwork. The alias IPs may belong -to either primary or secondary ranges. - -**Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid -breaking users during the 0.12 upgrade. To explicitly send a list -of zero objects you must use the following syntax: -'example=[]' -For more details about this behavior, see [this section](https://www.terraform.io/docs/configuration/attr-as-blocks.html#defining-a-fixed-object-collection-value).`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_cidr_range": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateIpCidrRange, - Description: `The range of IP addresses belonging to this subnetwork secondary -range. Provide this property when you create the subnetwork. -Ranges must be unique and non-overlapping with all primary and -secondary IP ranges within a network. Only IPv4 is supported.`, - }, - "range_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateGCEName, - Description: `The name associated with this subnetwork secondary range, used -when adding an alias IP range to a VM instance. The name must -be 1-63 characters long, and comply with RFC1035. The name -must be unique within the subnetwork.`, - }, - }, - }, - }, - "stack_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"IPV4_ONLY", "IPV4_IPV6", ""}), - Description: `The stack type for this subnet to identify whether the IPv6 feature is enabled or not. -If not specified IPV4_ONLY will be used. Possible values: ["IPV4_ONLY", "IPV4_IPV6"]`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "external_ipv6_prefix": { - Type: schema.TypeString, - Computed: true, - Description: `The range of external IPv6 addresses that are owned by this subnetwork.`, - }, - "gateway_address": { - Type: schema.TypeString, - Computed: true, - Description: `The gateway address for default routes to reach destination addresses -outside this subnetwork.`, - }, - "ipv6_cidr_range": { - Type: schema.TypeString, - Computed: true, - Description: `The range of internal IPv6 addresses that are owned by this subnetwork.`, - }, - "fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: "Fingerprint of this resource. This field is used internally during updates of this resource.", - Deprecated: "This field is not useful for users, and has been removed as an output.", - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSubnetworkSecondaryIpRangeSetStyleDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - keys := diff.GetChangedKeysPrefix("secondary_ip_range") - if len(keys) == 0 { - return nil - } - oldCount, newCount := diff.GetChange("secondary_ip_range.#") - var count int - // There could be duplicates - worth continuing even if the counts are unequal. - if oldCount.(int) < newCount.(int) { - count = newCount.(int) - } else { - count = oldCount.(int) - } - - if count < 1 { - return nil - } - old := make([]interface{}, count) - new := make([]interface{}, count) - for i := 0; i < count; i++ { - o, n := diff.GetChange(fmt.Sprintf("secondary_ip_range.%d", i)) - - if o != nil { - old = append(old, o) - } - if n != nil { - new = append(new, n) - } - } - - oldSet := schema.NewSet(schema.HashResource(ResourceComputeSubnetwork().Schema["secondary_ip_range"].Elem.(*schema.Resource)), old) - newSet := schema.NewSet(schema.HashResource(ResourceComputeSubnetwork().Schema["secondary_ip_range"].Elem.(*schema.Resource)), new) - - if oldSet.Equal(newSet) { - if err := diff.Clear("secondary_ip_range"); err != nil { - return err - } - } - - return nil -} - -func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeSubnetworkDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - ipCidrRangeProp, err := expandComputeSubnetworkIpCidrRange(d.Get("ip_cidr_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_cidr_range"); !isEmptyValue(reflect.ValueOf(ipCidrRangeProp)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { - obj["ipCidrRange"] = ipCidrRangeProp - } - nameProp, err := expandComputeSubnetworkName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeSubnetworkNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - purposeProp, err := expandComputeSubnetworkPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - roleProp, err := expandComputeSubnetworkRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - secondaryIpRangesProp, err := expandComputeSubnetworkSecondaryIpRange(d.Get("secondary_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secondary_ip_range"); ok || !reflect.DeepEqual(v, secondaryIpRangesProp) { - obj["secondaryIpRanges"] = secondaryIpRangesProp - } - privateIpGoogleAccessProp, err := expandComputeSubnetworkPrivateIpGoogleAccess(d.Get("private_ip_google_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_ip_google_access"); !isEmptyValue(reflect.ValueOf(privateIpGoogleAccessProp)) && (ok || !reflect.DeepEqual(v, privateIpGoogleAccessProp)) { - obj["privateIpGoogleAccess"] = privateIpGoogleAccessProp - } - privateIpv6GoogleAccessProp, err := expandComputeSubnetworkPrivateIpv6GoogleAccess(d.Get("private_ipv6_google_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_ipv6_google_access"); !isEmptyValue(reflect.ValueOf(privateIpv6GoogleAccessProp)) && (ok || !reflect.DeepEqual(v, privateIpv6GoogleAccessProp)) { - obj["privateIpv6GoogleAccess"] = privateIpv6GoogleAccessProp - } - regionProp, err := expandComputeSubnetworkRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - logConfigProp, err := expandComputeSubnetworkLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - stackTypeProp, err := expandComputeSubnetworkStackType(d.Get("stack_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stack_type"); !isEmptyValue(reflect.ValueOf(stackTypeProp)) && (ok || !reflect.DeepEqual(v, stackTypeProp)) { - obj["stackType"] = stackTypeProp - } - ipv6AccessTypeProp, err := expandComputeSubnetworkIpv6AccessType(d.Get("ipv6_access_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ipv6_access_type"); !isEmptyValue(reflect.ValueOf(ipv6AccessTypeProp)) && (ok || !reflect.DeepEqual(v, ipv6AccessTypeProp)) { - obj["ipv6AccessType"] = ipv6AccessTypeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Subnetwork: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Subnetwork: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Subnetwork: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating Subnetwork", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Subnetwork: %s", err) - } - - log.Printf("[DEBUG] Finished creating Subnetwork %q: %#v", d.Id(), res) - - return resourceComputeSubnetworkRead(d, meta) -} - -func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Subnetwork: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeSubnetworkCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("description", flattenComputeSubnetworkDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("gateway_address", flattenComputeSubnetworkGatewayAddress(res["gatewayAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("ip_cidr_range", flattenComputeSubnetworkIpCidrRange(res["ipCidrRange"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("name", flattenComputeSubnetworkName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("network", flattenComputeSubnetworkNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("purpose", flattenComputeSubnetworkPurpose(res["purpose"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("role", flattenComputeSubnetworkRole(res["role"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("secondary_ip_range", flattenComputeSubnetworkSecondaryIpRange(res["secondaryIpRanges"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("private_ip_google_access", flattenComputeSubnetworkPrivateIpGoogleAccess(res["privateIpGoogleAccess"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("private_ipv6_google_access", flattenComputeSubnetworkPrivateIpv6GoogleAccess(res["privateIpv6GoogleAccess"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("region", flattenComputeSubnetworkRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("log_config", flattenComputeSubnetworkLogConfig(res["logConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("stack_type", flattenComputeSubnetworkStackType(res["stackType"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("ipv6_access_type", flattenComputeSubnetworkIpv6AccessType(res["ipv6AccessType"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("ipv6_cidr_range", flattenComputeSubnetworkIpv6CidrRange(res["ipv6CidrRange"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("external_ipv6_prefix", flattenComputeSubnetworkExternalIpv6Prefix(res["externalIpv6Prefix"], d, config)); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Subnetwork: %s", err) - } - - return nil -} - -func resourceComputeSubnetworkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Subnetwork: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("ip_cidr_range") { - obj := make(map[string]interface{}) - - ipCidrRangeProp, err := expandComputeSubnetworkIpCidrRange(d.Get("ip_cidr_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_cidr_range"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { - obj["ipCidrRange"] = ipCidrRangeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/expandIpCidrRange") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("private_ip_google_access") { - obj := make(map[string]interface{}) - - privateIpGoogleAccessProp, err := expandComputeSubnetworkPrivateIpGoogleAccess(d.Get("private_ip_google_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_ip_google_access"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateIpGoogleAccessProp)) { - obj["privateIpGoogleAccess"] = privateIpGoogleAccessProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/setPrivateIpGoogleAccess") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("private_ipv6_google_access") || d.HasChange("stack_type") || d.HasChange("ipv6_access_type") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := SendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - privateIpv6GoogleAccessProp, err := expandComputeSubnetworkPrivateIpv6GoogleAccess(d.Get("private_ipv6_google_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_ipv6_google_access"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateIpv6GoogleAccessProp)) { - obj["privateIpv6GoogleAccess"] = privateIpv6GoogleAccessProp - } - stackTypeProp, err := expandComputeSubnetworkStackType(d.Get("stack_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stack_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stackTypeProp)) { - obj["stackType"] = stackTypeProp - } - ipv6AccessTypeProp, err := expandComputeSubnetworkIpv6AccessType(d.Get("ipv6_access_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ipv6_access_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipv6AccessTypeProp)) { - obj["ipv6AccessType"] = ipv6AccessTypeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("log_config") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := SendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - logConfigProp, err := expandComputeSubnetworkLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("role") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := SendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - roleProp, err := expandComputeSubnetworkRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("secondary_ip_range") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := SendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - secondaryIpRangesProp, err := expandComputeSubnetworkSecondaryIpRange(d.Get("secondary_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secondary_ip_range"); ok || !reflect.DeepEqual(v, secondaryIpRangesProp) { - obj["secondaryIpRanges"] = secondaryIpRangesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeSubnetworkRead(d, meta) -} - -func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Subnetwork: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Subnetwork %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Subnetwork") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting Subnetwork", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Subnetwork %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeSubnetworkImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeSubnetworkCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkGatewayAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkIpCidrRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeSubnetworkPurpose(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkRole(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkSecondaryIpRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "range_name": flattenComputeSubnetworkSecondaryIpRangeRangeName(original["rangeName"], d, config), - "ip_cidr_range": flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(original["ipCidrRange"], d, config), - }) - } - return transformed -} -func flattenComputeSubnetworkSecondaryIpRangeRangeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkPrivateIpGoogleAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkPrivateIpv6GoogleAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeSubnetworkLogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - - v, ok := original["enable"] - if ok && !v.(bool) { - return nil - } - - transformed := make(map[string]interface{}) - transformed["flow_sampling"] = original["flowSampling"] - transformed["aggregation_interval"] = original["aggregationInterval"] - transformed["metadata"] = original["metadata"] - if original["metadata"].(string) == "CUSTOM_METADATA" { - transformed["metadata_fields"] = original["metadataFields"] - } else { - // MetadataFields can only be set when metadata is CUSTOM_METADATA. However, when updating - // from custom to include/exclude, the API will return the previous values of the metadata fields, - // despite not actually having any custom fields at the moment. The API team has confirmed - // this as WAI (b/162771344), so we work around it by clearing the response if metadata is - // not custom. - transformed["metadata_fields"] = nil - } - transformed["filter_expr"] = original["filterExpr"] - - return []interface{}{transformed} -} - -func flattenComputeSubnetworkStackType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkIpv6AccessType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkIpv6CidrRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkExternalIpv6Prefix(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeSubnetworkDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkIpCidrRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeSubnetworkPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkSecondaryIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRangeName, err := expandComputeSubnetworkSecondaryIpRangeRangeName(original["range_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRangeName); val.IsValid() && !isEmptyValue(val) { - transformed["rangeName"] = transformedRangeName - } - - transformedIpCidrRange, err := expandComputeSubnetworkSecondaryIpRangeIpCidrRange(original["ip_cidr_range"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIpCidrRange); val.IsValid() && !isEmptyValue(val) { - transformed["ipCidrRange"] = transformedIpCidrRange - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeSubnetworkSecondaryIpRangeRangeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkSecondaryIpRangeIpCidrRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkPrivateIpGoogleAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkPrivateIpv6GoogleAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeSubnetworkLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - transformed := make(map[string]interface{}) - if len(l) == 0 || l[0] == nil { - purpose, ok := d.GetOkExists("purpose") - - if ok && (purpose.(string) == "REGIONAL_MANAGED_PROXY" || purpose.(string) == "INTERNAL_HTTPS_LOAD_BALANCER") { - // Subnetworks for regional L7 ILB/XLB do not accept any values for logConfig - return nil, nil - } - // send enable = false to ensure logging is disabled if there is no config - transformed["enable"] = false - return transformed, nil - } - - raw := l[0] - original := raw.(map[string]interface{}) - - // The log_config block is specified, so logging should be enabled - transformed["enable"] = true - transformed["aggregationInterval"] = original["aggregation_interval"] - transformed["flowSampling"] = original["flow_sampling"] - transformed["metadata"] = original["metadata"] - transformed["filterExpr"] = original["filter_expr"] - - // make it JSON marshallable - transformed["metadataFields"] = original["metadata_fields"].(*schema.Set).List() - - return transformed, nil -} - -func expandComputeSubnetworkStackType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkIpv6AccessType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_grpc_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_grpc_proxy.go deleted file mode 100644 index 4f3f2ad014..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_grpc_proxy.go +++ /dev/null @@ -1,439 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeTargetGrpcProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetGrpcProxyCreate, - Read: resourceComputeTargetGrpcProxyRead, - Update: resourceComputeTargetGrpcProxyUpdate, - Delete: resourceComputeTargetGrpcProxyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeTargetGrpcProxyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource -is created. The name must be 1-63 characters long, and comply -with RFC1035. Specifically, the name must be 1-63 characters long -and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which -means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "url_map": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL to the UrlMap resource that defines the mapping from URL to -the BackendService. The protocol field in the BackendService -must be set to GRPC.`, - }, - "validate_for_proxyless": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, indicates that the BackendServices referenced by -the urlMap may be accessed by gRPC applications without using -a sidecar proxy. This will enable configuration checks on urlMap -and its referenced BackendServices to not allow unsupported features. -A gRPC application must use "xds:///" scheme in the target URI -of the service it is connecting to. If false, indicates that the -BackendServices referenced by the urlMap will be accessed by gRPC -applications via a sidecar proxy. In this case, a gRPC application -must not use "xds:///" scheme in the target URI of the service -it is connecting to`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. A hash of the contents stored in -this object. This field is used in optimistic locking. This field -will be ignored when inserting a TargetGrpcProxy. An up-to-date -fingerprint must be provided in order to patch/update the -TargetGrpcProxy; otherwise, the request will fail with error -412 conditionNotMet. To see the latest fingerprint, make a get() -request to retrieve the TargetGrpcProxy. A base64-encoded string.`, - }, - "self_link_with_id": { - Type: schema.TypeString, - Computed: true, - Description: `Server-defined URL with id for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetGrpcProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeTargetGrpcProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeTargetGrpcProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - urlMapProp, err := expandComputeTargetGrpcProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - validateForProxylessProp, err := expandComputeTargetGrpcProxyValidateForProxyless(d.Get("validate_for_proxyless"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("validate_for_proxyless"); !isEmptyValue(reflect.ValueOf(validateForProxylessProp)) && (ok || !reflect.DeepEqual(v, validateForProxylessProp)) { - obj["validateForProxyless"] = validateForProxylessProp - } - fingerprintProp, err := expandComputeTargetGrpcProxyFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TargetGrpcProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TargetGrpcProxy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating TargetGrpcProxy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create TargetGrpcProxy: %s", err) - } - - log.Printf("[DEBUG] Finished creating TargetGrpcProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetGrpcProxyRead(d, meta) -} - -func resourceComputeTargetGrpcProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeTargetGrpcProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetGrpcProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetGrpcProxyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetGrpcProxyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("self_link_with_id", flattenComputeTargetGrpcProxySelfLinkWithId(res["selfLinkWithId"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeTargetGrpcProxyUrlMap(res["urlMap"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("validate_for_proxyless", flattenComputeTargetGrpcProxyValidateForProxyless(res["validateForProxyless"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("fingerprint", flattenComputeTargetGrpcProxyFingerprint(res["fingerprint"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetGrpcProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetGrpcProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - fingerprintProp, err := expandComputeTargetGrpcProxyFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating TargetGrpcProxy %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating TargetGrpcProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetGrpcProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetGrpcProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeTargetGrpcProxyRead(d, meta) -} - -func resourceComputeTargetGrpcProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TargetGrpcProxy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetGrpcProxy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting TargetGrpcProxy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TargetGrpcProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetGrpcProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetGrpcProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeTargetGrpcProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxySelfLinkWithId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyUrlMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyValidateForProxyless(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeTargetGrpcProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetGrpcProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetGrpcProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetGrpcProxyValidateForProxyless(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetGrpcProxyFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_http_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_http_proxy.go deleted file mode 100644 index 403695408e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_http_proxy.go +++ /dev/null @@ -1,420 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeTargetHttpProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetHttpProxyCreate, - Read: resourceComputeTargetHttpProxyRead, - Update: resourceComputeTargetHttpProxyUpdate, - Delete: resourceComputeTargetHttpProxyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeTargetHttpProxyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "url_map": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the UrlMap resource that defines the mapping from URL -to the BackendService.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_bind": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `This field only applies when the forwarding rule that references -this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetHttpProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeTargetHttpProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - urlMapProp, err := expandComputeTargetHttpProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - proxyBindProp, err := expandComputeTargetHttpProxyProxyBind(d.Get("proxy_bind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_bind"); !isEmptyValue(reflect.ValueOf(proxyBindProp)) && (ok || !reflect.DeepEqual(v, proxyBindProp)) { - obj["proxyBind"] = proxyBindProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TargetHttpProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TargetHttpProxy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpProxies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating TargetHttpProxy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create TargetHttpProxy: %s", err) - } - - log.Printf("[DEBUG] Finished creating TargetHttpProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetHttpProxyRead(d, meta) -} - -func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeTargetHttpProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetHttpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetHttpProxyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeTargetHttpProxyProxyId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetHttpProxyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeTargetHttpProxyUrlMap(res["urlMap"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("proxy_bind", flattenComputeTargetHttpProxyProxyBind(res["proxyBind"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("url_map") { - obj := make(map[string]interface{}) - - urlMapProp, err := expandComputeTargetHttpProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpProxies/{{name}}/setUrlMap") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetHttpProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetHttpProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetHttpProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetHttpProxyRead(d, meta) -} - -func resourceComputeTargetHttpProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TargetHttpProxy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetHttpProxy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting TargetHttpProxy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TargetHttpProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetHttpProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetHttpProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpProxies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeTargetHttpProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpProxyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpProxyProxyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeTargetHttpProxyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpProxyUrlMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetHttpProxyProxyBind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeTargetHttpProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("urlMaps", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for url_map: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetHttpProxyProxyBind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_https_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_https_proxy.go deleted file mode 100644 index 162c67b115..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_https_proxy.go +++ /dev/null @@ -1,687 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeTargetHttpsProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetHttpsProxyCreate, - Read: resourceComputeTargetHttpsProxyRead, - Update: resourceComputeTargetHttpsProxyUpdate, - Delete: resourceComputeTargetHttpsProxyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeTargetHttpsProxyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "url_map": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the UrlMap resource that defines the mapping from URL -to the BackendService.`, - }, - "certificate_map": { - Type: schema.TypeString, - Optional: true, - Description: `A reference to the CertificateMap resource uri that identifies a certificate map -associated with the given target proxy. This field can only be set for global target proxies. -Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}'.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_bind": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `This field only applies when the forwarding rule that references -this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - }, - "quic_override": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "ENABLE", "DISABLE", ""}), - Description: `Specifies the QUIC override policy for this resource. This determines -whether the load balancer will attempt to negotiate QUIC with clients -or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is -specified, uses the QUIC policy with no user overrides, which is -equivalent to DISABLE. Default value: "NONE" Possible values: ["NONE", "ENABLE", "DISABLE"]`, - Default: "NONE", - }, - "ssl_certificates": { - Type: schema.TypeList, - Optional: true, - Description: `A list of SslCertificate resources that are used to authenticate -connections between users and the load balancer. At least one SSL -certificate must be specified.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "ssl_policy": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the SslPolicy resource that will be associated with -the TargetHttpsProxy resource. If not set, the TargetHttpsProxy -resource will not have any SSL policy configured.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetHttpsProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeTargetHttpsProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - quicOverrideProp, err := expandComputeTargetHttpsProxyQuicOverride(d.Get("quic_override"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("quic_override"); !isEmptyValue(reflect.ValueOf(quicOverrideProp)) && (ok || !reflect.DeepEqual(v, quicOverrideProp)) { - obj["quicOverride"] = quicOverrideProp - } - sslCertificatesProp, err := expandComputeTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(reflect.ValueOf(sslCertificatesProp)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - certificateMapProp, err := expandComputeTargetHttpsProxyCertificateMap(d.Get("certificate_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate_map"); !isEmptyValue(reflect.ValueOf(certificateMapProp)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { - obj["certificateMap"] = certificateMapProp - } - sslPolicyProp, err := expandComputeTargetHttpsProxySslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(reflect.ValueOf(sslPolicyProp)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - urlMapProp, err := expandComputeTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - proxyBindProp, err := expandComputeTargetHttpsProxyProxyBind(d.Get("proxy_bind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_bind"); !isEmptyValue(reflect.ValueOf(proxyBindProp)) && (ok || !reflect.DeepEqual(v, proxyBindProp)) { - obj["proxyBind"] = proxyBindProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TargetHttpsProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TargetHttpsProxy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpsProxies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating TargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create TargetHttpsProxy: %s", err) - } - - log.Printf("[DEBUG] Finished creating TargetHttpsProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetHttpsProxyRead(d, meta) -} - -func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeTargetHttpsProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetHttpsProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetHttpsProxyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeTargetHttpsProxyProxyId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetHttpsProxyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("quic_override", flattenComputeTargetHttpsProxyQuicOverride(res["quicOverride"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("ssl_certificates", flattenComputeTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("certificate_map", flattenComputeTargetHttpsProxyCertificateMap(res["certificateMap"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("ssl_policy", flattenComputeTargetHttpsProxySslPolicy(res["sslPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeTargetHttpsProxyUrlMap(res["urlMap"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("proxy_bind", flattenComputeTargetHttpsProxyProxyBind(res["proxyBind"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("quic_override") { - obj := make(map[string]interface{}) - - quicOverrideProp, err := expandComputeTargetHttpsProxyQuicOverride(d.Get("quic_override"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("quic_override"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, quicOverrideProp)) { - obj["quicOverride"] = quicOverrideProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setQuicOverride") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("ssl_certificates") { - obj := make(map[string]interface{}) - - sslCertificatesProp, err := expandComputeTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpsProxies/{{name}}/setSslCertificates") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("certificate_map") { - obj := make(map[string]interface{}) - - certificateMapProp, err := expandComputeTargetHttpsProxyCertificateMap(d.Get("certificate_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate_map"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { - obj["certificateMap"] = certificateMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setCertificateMap") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("ssl_policy") { - obj := make(map[string]interface{}) - - sslPolicyProp, err := expandComputeTargetHttpsProxySslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setSslPolicy") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("url_map") { - obj := make(map[string]interface{}) - - urlMapProp, err := expandComputeTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpsProxies/{{name}}/setUrlMap") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetHttpsProxyRead(d, meta) -} - -func resourceComputeTargetHttpsProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TargetHttpsProxy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetHttpsProxy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting TargetHttpsProxy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TargetHttpsProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetHttpsProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetHttpsProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpsProxies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeTargetHttpsProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpsProxyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpsProxyProxyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeTargetHttpsProxyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpsProxyQuicOverride(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { - return "NONE" - } - - return v -} - -func flattenComputeTargetHttpsProxySslCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeTargetHttpsProxyCertificateMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpsProxySslPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetHttpsProxyUrlMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetHttpsProxyProxyBind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeTargetHttpsProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpsProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpsProxyQuicOverride(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpsProxySslCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, fmt.Errorf("Invalid value for ssl_certificates: nil") - } - f, err := parseGlobalFieldValue("sslCertificates", raw.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for ssl_certificates: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeTargetHttpsProxyCertificateMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpsProxySslPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("sslPolicies", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for ssl_policy: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetHttpsProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("urlMaps", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for url_map: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetHttpsProxyProxyBind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_instance.go deleted file mode 100644 index f195875f22..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_instance.go +++ /dev/null @@ -1,401 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeTargetInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetInstanceCreate, - Read: resourceComputeTargetInstanceRead, - Delete: resourceComputeTargetInstanceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeTargetInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Compute instance VM handling traffic for this target instance. -Accepts the instance self-link, relative path -(e.g. 'projects/project/zones/zone/instances/instance') or name. If -name is given, the zone will default to the given zone or -the provider-default zone and the project will default to the -provider-level project.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "nat_policy": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"NO_NAT", ""}), - Description: `NAT option controlling how IPs are NAT'ed to the instance. -Currently only NO_NAT (default value) is supported. Default value: "NO_NAT" Possible values: ["NO_NAT"]`, - Default: "NO_NAT", - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the zone where the target instance resides.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeTargetInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeTargetInstanceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - instanceProp, err := expandComputeTargetInstanceInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(reflect.ValueOf(instanceProp)) && (ok || !reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - natPolicyProp, err := expandComputeTargetInstanceNatPolicy(d.Get("nat_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_policy"); !isEmptyValue(reflect.ValueOf(natPolicyProp)) && (ok || !reflect.DeepEqual(v, natPolicyProp)) { - obj["natPolicy"] = natPolicyProp - } - zoneProp, err := expandComputeTargetInstanceZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TargetInstance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetInstance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TargetInstance: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating TargetInstance", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create TargetInstance: %s", err) - } - - log.Printf("[DEBUG] Finished creating TargetInstance %q: %#v", d.Id(), res) - - return resourceComputeTargetInstanceRead(d, meta) -} - -func resourceComputeTargetInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetInstance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeTargetInstance %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TargetInstance: %s", err) - } - - if err := d.Set("name", flattenComputeTargetInstanceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeTargetInstanceCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("description", flattenComputeTargetInstanceDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("instance", flattenComputeTargetInstanceInstance(res["instance"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("nat_policy", flattenComputeTargetInstanceNatPolicy(res["natPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("zone", flattenComputeTargetInstanceZone(res["zone"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading TargetInstance: %s", err) - } - - return nil -} - -func resourceComputeTargetInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetInstance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TargetInstance %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetInstance") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting TargetInstance", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TargetInstance %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/targetInstances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeTargetInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetInstanceCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetInstanceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetInstanceInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetInstanceNatPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetInstanceZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeTargetInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetInstanceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetInstanceInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - // This method returns a full self link from a partial self link. - if v == nil || v.(string) == "" { - // It does not try to construct anything from empty. - return "", nil - } else if strings.HasPrefix(v.(string), "https://") { - // Anything that starts with a URL scheme is assumed to be a self link worth using. - return v, nil - } else if strings.HasPrefix(v.(string), "projects/") { - // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { - // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - // Anything else is assumed to be a regional resource, with a partial link that begins with the resource name. - // This isn't very likely - it's a last-ditch effort to extract something useful here. We can do a better job - // as soon as MultiResourceRefs are working since we'll know the types that this field is supposed to point to. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil -} - -func expandComputeTargetInstanceNatPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetInstanceZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_ssl_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_ssl_proxy.go deleted file mode 100644 index 71517fe040..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_ssl_proxy.go +++ /dev/null @@ -1,656 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeTargetSslProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetSslProxyCreate, - Read: resourceComputeTargetSslProxyRead, - Update: resourceComputeTargetSslProxyUpdate, - Delete: resourceComputeTargetSslProxyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeTargetSslProxyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "backend_service": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the BackendService resource.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "certificate_map": { - Type: schema.TypeString, - Optional: true, - Description: `A reference to the CertificateMap resource uri that identifies a certificate map -associated with the given target proxy. This field can only be set for global target proxies. -Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}'.`, - ExactlyOneOf: []string{"ssl_certificates", "certificate_map"}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_header": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), - Description: `Specifies the type of proxy header to append before sending data to -the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - }, - "ssl_certificates": { - Type: schema.TypeList, - Optional: true, - Description: `A list of SslCertificate resources that are used to authenticate -connections between users and the load balancer. At least one -SSL certificate must be specified.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - ExactlyOneOf: []string{"ssl_certificates", "certificate_map"}, - }, - "ssl_policy": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the SslPolicy resource that will be associated with -the TargetSslProxy resource. If not set, the TargetSslProxy -resource will not have any SSL policy configured.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetSslProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetSslProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeTargetSslProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - proxyHeaderProp, err := expandComputeTargetSslProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(reflect.ValueOf(proxyHeaderProp)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - serviceProp, err := expandComputeTargetSslProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(reflect.ValueOf(serviceProp)) && (ok || !reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - sslCertificatesProp, err := expandComputeTargetSslProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(reflect.ValueOf(sslCertificatesProp)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - certificateMapProp, err := expandComputeTargetSslProxyCertificateMap(d.Get("certificate_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate_map"); !isEmptyValue(reflect.ValueOf(certificateMapProp)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { - obj["certificateMap"] = certificateMapProp - } - sslPolicyProp, err := expandComputeTargetSslProxySslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(reflect.ValueOf(sslPolicyProp)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TargetSslProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TargetSslProxy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/targetSslProxies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating TargetSslProxy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create TargetSslProxy: %s", err) - } - - log.Printf("[DEBUG] Finished creating TargetSslProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetSslProxyRead(d, meta) -} - -func resourceComputeTargetSslProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeTargetSslProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetSslProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetSslProxyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeTargetSslProxyProxyId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetSslProxyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("proxy_header", flattenComputeTargetSslProxyProxyHeader(res["proxyHeader"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("backend_service", flattenComputeTargetSslProxyBackendService(res["service"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("ssl_certificates", flattenComputeTargetSslProxySslCertificates(res["sslCertificates"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("certificate_map", flattenComputeTargetSslProxyCertificateMap(res["certificateMap"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("ssl_policy", flattenComputeTargetSslProxySslPolicy(res["sslPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetSslProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("proxy_header") { - obj := make(map[string]interface{}) - - proxyHeaderProp, err := expandComputeTargetSslProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setProxyHeader") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("backend_service") { - obj := make(map[string]interface{}) - - serviceProp, err := expandComputeTargetSslProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setBackendService") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("ssl_certificates") { - obj := make(map[string]interface{}) - - sslCertificatesProp, err := expandComputeTargetSslProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setSslCertificates") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("certificate_map") { - obj := make(map[string]interface{}) - - certificateMapProp, err := expandComputeTargetSslProxyCertificateMap(d.Get("certificate_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate_map"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { - obj["certificateMap"] = certificateMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setCertificateMap") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("ssl_policy") { - obj := make(map[string]interface{}) - - sslPolicyProp, err := expandComputeTargetSslProxySslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setSslPolicy") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetSslProxyRead(d, meta) -} - -func resourceComputeTargetSslProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TargetSslProxy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetSslProxy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting TargetSslProxy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TargetSslProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetSslProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetSslProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/targetSslProxies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeTargetSslProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxyProxyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeTargetSslProxyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxyProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetSslProxySslCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeTargetSslProxyCertificateMap(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxySslPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeTargetSslProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetSslProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetSslProxyProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetSslProxyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetSslProxySslCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, fmt.Errorf("Invalid value for ssl_certificates: nil") - } - f, err := parseGlobalFieldValue("sslCertificates", raw.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for ssl_certificates: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeTargetSslProxyCertificateMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetSslProxySslPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("sslPolicies", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for ssl_policy: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_tcp_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_tcp_proxy.go deleted file mode 100644 index e32c1af104..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_tcp_proxy.go +++ /dev/null @@ -1,478 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeTargetTcpProxy() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeTargetTcpProxyCreate, - Read: resourceComputeTargetTcpProxyRead, - Update: resourceComputeTargetTcpProxyUpdate, - Delete: resourceComputeTargetTcpProxyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeTargetTcpProxyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "backend_service": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the BackendService resource.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_bind": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `This field only applies when the forwarding rule that references -this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - }, - "proxy_header": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), - Description: `Specifies the type of proxy header to append before sending data to -the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetTcpProxyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetTcpProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeTargetTcpProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - proxyHeaderProp, err := expandComputeTargetTcpProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(reflect.ValueOf(proxyHeaderProp)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - serviceProp, err := expandComputeTargetTcpProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(reflect.ValueOf(serviceProp)) && (ok || !reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - proxyBindProp, err := expandComputeTargetTcpProxyProxyBind(d.Get("proxy_bind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_bind"); !isEmptyValue(reflect.ValueOf(proxyBindProp)) && (ok || !reflect.DeepEqual(v, proxyBindProp)) { - obj["proxyBind"] = proxyBindProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TargetTcpProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TargetTcpProxy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/targetTcpProxies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating TargetTcpProxy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create TargetTcpProxy: %s", err) - } - - log.Printf("[DEBUG] Finished creating TargetTcpProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetTcpProxyRead(d, meta) -} - -func resourceComputeTargetTcpProxyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeTargetTcpProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetTcpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetTcpProxyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeTargetTcpProxyProxyId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetTcpProxyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("proxy_header", flattenComputeTargetTcpProxyProxyHeader(res["proxyHeader"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("backend_service", flattenComputeTargetTcpProxyBackendService(res["service"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("proxy_bind", flattenComputeTargetTcpProxyProxyBind(res["proxyBind"], d, config)); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetTcpProxyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("proxy_header") { - obj := make(map[string]interface{}) - - proxyHeaderProp, err := expandComputeTargetTcpProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}/setProxyHeader") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetTcpProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetTcpProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetTcpProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("backend_service") { - obj := make(map[string]interface{}) - - serviceProp, err := expandComputeTargetTcpProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}/setBackendService") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating TargetTcpProxy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TargetTcpProxy %q: %#v", d.Id(), res) - } - - err = ComputeOperationWaitTime( - config, res, project, "Updating TargetTcpProxy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetTcpProxyRead(d, meta) -} - -func resourceComputeTargetTcpProxyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TargetTcpProxy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetTcpProxy") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting TargetTcpProxy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TargetTcpProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetTcpProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetTcpProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/targetTcpProxies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeTargetTcpProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetTcpProxyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetTcpProxyProxyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeTargetTcpProxyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetTcpProxyProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetTcpProxyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetTcpProxyProxyBind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeTargetTcpProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetTcpProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetTcpProxyProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetTcpProxyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetTcpProxyProxyBind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_vpn_gateway.go deleted file mode 100644 index 6f26242131..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_vpn_gateway.go +++ /dev/null @@ -1,369 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceComputeVpnGateway() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeVpnGatewayCreate, - Read: resourceComputeVpnGatewayRead, - Delete: resourceComputeVpnGatewayDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeVpnGatewayImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network this VPN gateway is accepting traffic for.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region this gateway should sit in.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "gateway_id": { - Type: schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeVpnGatewayDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeVpnGatewayName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeVpnGatewayNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - regionProp, err := expandComputeVpnGatewayRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new VpnGateway: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for VpnGateway: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating VpnGateway: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating VpnGateway", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create VpnGateway: %s", err) - } - - log.Printf("[DEBUG] Finished creating VpnGateway %q: %#v", d.Id(), res) - - return resourceComputeVpnGatewayRead(d, meta) -} - -func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for VpnGateway: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeVpnGateway %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading VpnGateway: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeVpnGatewayCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("description", flattenComputeVpnGatewayDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("name", flattenComputeVpnGatewayName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("gateway_id", flattenComputeVpnGatewayGatewayId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("network", flattenComputeVpnGatewayNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("region", flattenComputeVpnGatewayRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading VpnGateway: %s", err) - } - - return nil -} - -func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for VpnGateway: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting VpnGateway %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "VpnGateway") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting VpnGateway", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting VpnGateway %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeVpnGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/targetVpnGateways/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeVpnGatewayCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnGatewayDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnGatewayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnGatewayGatewayId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeVpnGatewayNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnGatewayRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeVpnGatewayDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnGatewayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnGatewayNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnGatewayRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_vpn_tunnel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_vpn_tunnel.go deleted file mode 100644 index dcb959b0f3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_vpn_tunnel.go +++ /dev/null @@ -1,873 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "bytes" - "fmt" - "log" - "net" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// validatePeerAddr returns false if a tunnel's peer_ip property -// is invalid. Currently, only addresses that collide with RFC -// 5735 (https://tools.ietf.org/html/rfc5735) fail validation. -func validatePeerAddr(i interface{}, val string) ([]string, []error) { - ip := net.ParseIP(i.(string)) - if ip == nil { - return nil, []error{fmt.Errorf("could not parse %q to IP address", val)} - } - for _, test := range invalidPeerAddrs { - if bytes.Compare(ip, test.from) >= 0 && bytes.Compare(ip, test.to) <= 0 { - return nil, []error{fmt.Errorf("address is invalid (is between %q and %q, conflicting with RFC5735)", test.from, test.to)} - } - } - return nil, nil -} - -// invalidPeerAddrs is a collection of IP address ranges that represent -// a conflict with RFC 5735 (https://tools.ietf.org/html/rfc5735#page-3). -// CIDR range notations in the RFC were converted to a (from, to) pair -// for easy checking with bytes.Compare. -var invalidPeerAddrs = []struct { - from net.IP - to net.IP -}{ - { - from: net.ParseIP("0.0.0.0"), - to: net.ParseIP("0.255.255.255"), - }, - { - from: net.ParseIP("10.0.0.0"), - to: net.ParseIP("10.255.255.255"), - }, - { - from: net.ParseIP("127.0.0.0"), - to: net.ParseIP("127.255.255.255"), - }, - { - from: net.ParseIP("169.254.0.0"), - to: net.ParseIP("169.254.255.255"), - }, - { - from: net.ParseIP("172.16.0.0"), - to: net.ParseIP("172.31.255.255"), - }, - { - from: net.ParseIP("192.0.0.0"), - to: net.ParseIP("192.0.0.255"), - }, - { - from: net.ParseIP("192.0.2.0"), - to: net.ParseIP("192.0.2.255"), - }, - { - from: net.ParseIP("192.88.99.0"), - to: net.ParseIP("192.88.99.255"), - }, - { - from: net.ParseIP("192.168.0.0"), - to: net.ParseIP("192.168.255.255"), - }, - { - from: net.ParseIP("198.18.0.0"), - to: net.ParseIP("198.19.255.255"), - }, - { - from: net.ParseIP("198.51.100.0"), - to: net.ParseIP("198.51.100.255"), - }, - { - from: net.ParseIP("203.0.113.0"), - to: net.ParseIP("203.0.113.255"), - }, - { - from: net.ParseIP("224.0.0.0"), - to: net.ParseIP("239.255.255.255"), - }, - { - from: net.ParseIP("240.0.0.0"), - to: net.ParseIP("255.255.255.255"), - }, - { - from: net.ParseIP("255.255.255.255"), - to: net.ParseIP("255.255.255.255"), - }, -} - -func getVpnTunnelLink(config *Config, project, region, tunnel, userAgent string) (string, error) { - if !strings.Contains(tunnel, "/") { - // Tunnel value provided is just the name, lookup the tunnel SelfLink - tunnelData, err := config.NewComputeClient(userAgent).VpnTunnels.Get( - project, region, tunnel).Do() - if err != nil { - return "", fmt.Errorf("Error reading tunnel: %s", err) - } - tunnel = tunnelData.SelfLink - } - - return tunnel, nil - -} - -func ResourceComputeVpnTunnel() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeVpnTunnelCreate, - Read: resourceComputeVpnTunnelRead, - Delete: resourceComputeVpnTunnelDelete, - - Importer: &schema.ResourceImporter{ - State: resourceComputeVpnTunnelImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 -characters long and match the regular expression -'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character -must be a lowercase letter, and all following characters must -be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "shared_secret": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Shared secret used to set the secure session between the Cloud VPN -gateway and the peer VPN gateway.`, - Sensitive: true, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "ike_version": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `IKE protocol version to use when establishing the VPN tunnel with -peer VPN gateway. -Acceptable IKE versions are 1 or 2. Default version is 2.`, - Default: 2, - }, - "local_traffic_selector": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Local traffic selector to use when establishing the VPN tunnel with -peer VPN gateway. The value should be a CIDR formatted string, -for example '192.168.0.0/16'. The ranges should be disjoint. -Only IPv4 is supported.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "peer_external_gateway": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the peer side external VPN gateway to which this VPN tunnel is connected.`, - ConflictsWith: []string{"peer_gcp_gateway"}, - }, - "peer_external_gateway_interface": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The interface ID of the external VPN gateway to which this VPN tunnel is connected.`, - }, - "peer_gcp_gateway": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. -If provided, the VPN tunnel will automatically use the same vpn_gateway_interface -ID in the peer GCP VPN gateway. -This field must reference a 'google_compute_ha_vpn_gateway' resource.`, - ConflictsWith: []string{"peer_external_gateway"}, - }, - "peer_ip": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validatePeerAddr, - Description: `IP address of the peer VPN gateway. Only IPv4 is supported.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region where the tunnel is located. If unset, is set to the region of 'target_vpn_gateway'.`, - }, - "remote_traffic_selector": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Remote traffic selector to use when establishing the VPN tunnel with -peer VPN gateway. The value should be a CIDR formatted string, -for example '192.168.0.0/16'. The ranges should be disjoint. -Only IPv4 is supported.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "router": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of router resource to be used for dynamic routing.`, - }, - "target_vpn_gateway": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the Target VPN gateway with which this VPN tunnel is -associated.`, - }, - "vpn_gateway": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the VPN gateway with which this VPN tunnel is associated. -This must be used if a High Availability VPN gateway resource is created. -This field must reference a 'google_compute_ha_vpn_gateway' resource.`, - }, - "vpn_gateway_interface": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The interface ID of the VPN gateway with which this VPN tunnel is associated.`, - }, - "creation_timestamp": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "detailed_status": { - Type: schema.TypeString, - Computed: true, - Description: `Detailed status message for the VPN tunnel.`, - }, - "shared_secret_hash": { - Type: schema.TypeString, - Computed: true, - Description: `Hash of the shared secret.`, - }, - "tunnel_id": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier for the resource. This identifier is defined by the server.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeVpnTunnelName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeVpnTunnelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - targetVpnGatewayProp, err := expandComputeVpnTunnelTargetVpnGateway(d.Get("target_vpn_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_vpn_gateway"); !isEmptyValue(reflect.ValueOf(targetVpnGatewayProp)) && (ok || !reflect.DeepEqual(v, targetVpnGatewayProp)) { - obj["targetVpnGateway"] = targetVpnGatewayProp - } - vpnGatewayProp, err := expandComputeVpnTunnelVpnGateway(d.Get("vpn_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpn_gateway"); !isEmptyValue(reflect.ValueOf(vpnGatewayProp)) && (ok || !reflect.DeepEqual(v, vpnGatewayProp)) { - obj["vpnGateway"] = vpnGatewayProp - } - vpnGatewayInterfaceProp, err := expandComputeVpnTunnelVpnGatewayInterface(d.Get("vpn_gateway_interface"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpn_gateway_interface"); ok || !reflect.DeepEqual(v, vpnGatewayInterfaceProp) { - obj["vpnGatewayInterface"] = vpnGatewayInterfaceProp - } - peerExternalGatewayProp, err := expandComputeVpnTunnelPeerExternalGateway(d.Get("peer_external_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_external_gateway"); !isEmptyValue(reflect.ValueOf(peerExternalGatewayProp)) && (ok || !reflect.DeepEqual(v, peerExternalGatewayProp)) { - obj["peerExternalGateway"] = peerExternalGatewayProp - } - peerExternalGatewayInterfaceProp, err := expandComputeVpnTunnelPeerExternalGatewayInterface(d.Get("peer_external_gateway_interface"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_external_gateway_interface"); ok || !reflect.DeepEqual(v, peerExternalGatewayInterfaceProp) { - obj["peerExternalGatewayInterface"] = peerExternalGatewayInterfaceProp - } - peerGcpGatewayProp, err := expandComputeVpnTunnelPeerGcpGateway(d.Get("peer_gcp_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_gcp_gateway"); !isEmptyValue(reflect.ValueOf(peerGcpGatewayProp)) && (ok || !reflect.DeepEqual(v, peerGcpGatewayProp)) { - obj["peerGcpGateway"] = peerGcpGatewayProp - } - routerProp, err := expandComputeVpnTunnelRouter(d.Get("router"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("router"); !isEmptyValue(reflect.ValueOf(routerProp)) && (ok || !reflect.DeepEqual(v, routerProp)) { - obj["router"] = routerProp - } - peerIpProp, err := expandComputeVpnTunnelPeerIp(d.Get("peer_ip"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_ip"); !isEmptyValue(reflect.ValueOf(peerIpProp)) && (ok || !reflect.DeepEqual(v, peerIpProp)) { - obj["peerIp"] = peerIpProp - } - sharedSecretProp, err := expandComputeVpnTunnelSharedSecret(d.Get("shared_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("shared_secret"); !isEmptyValue(reflect.ValueOf(sharedSecretProp)) && (ok || !reflect.DeepEqual(v, sharedSecretProp)) { - obj["sharedSecret"] = sharedSecretProp - } - ikeVersionProp, err := expandComputeVpnTunnelIkeVersion(d.Get("ike_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ike_version"); !isEmptyValue(reflect.ValueOf(ikeVersionProp)) && (ok || !reflect.DeepEqual(v, ikeVersionProp)) { - obj["ikeVersion"] = ikeVersionProp - } - localTrafficSelectorProp, err := expandComputeVpnTunnelLocalTrafficSelector(d.Get("local_traffic_selector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("local_traffic_selector"); !isEmptyValue(reflect.ValueOf(localTrafficSelectorProp)) && (ok || !reflect.DeepEqual(v, localTrafficSelectorProp)) { - obj["localTrafficSelector"] = localTrafficSelectorProp - } - remoteTrafficSelectorProp, err := expandComputeVpnTunnelRemoteTrafficSelector(d.Get("remote_traffic_selector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("remote_traffic_selector"); !isEmptyValue(reflect.ValueOf(remoteTrafficSelectorProp)) && (ok || !reflect.DeepEqual(v, remoteTrafficSelectorProp)) { - obj["remoteTrafficSelector"] = remoteTrafficSelectorProp - } - regionProp, err := expandComputeVpnTunnelRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - obj, err = resourceComputeVpnTunnelEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new VpnTunnel: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for VpnTunnel: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating VpnTunnel: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = ComputeOperationWaitTime( - config, res, project, "Creating VpnTunnel", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create VpnTunnel: %s", err) - } - - log.Printf("[DEBUG] Finished creating VpnTunnel %q: %#v", d.Id(), res) - - return resourceComputeVpnTunnelRead(d, meta) -} - -func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for VpnTunnel: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeVpnTunnel %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - - if err := d.Set("tunnel_id", flattenComputeVpnTunnelTunnelId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeVpnTunnelCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("name", flattenComputeVpnTunnelName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("description", flattenComputeVpnTunnelDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("target_vpn_gateway", flattenComputeVpnTunnelTargetVpnGateway(res["targetVpnGateway"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("vpn_gateway", flattenComputeVpnTunnelVpnGateway(res["vpnGateway"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("vpn_gateway_interface", flattenComputeVpnTunnelVpnGatewayInterface(res["vpnGatewayInterface"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("peer_external_gateway", flattenComputeVpnTunnelPeerExternalGateway(res["peerExternalGateway"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("peer_external_gateway_interface", flattenComputeVpnTunnelPeerExternalGatewayInterface(res["peerExternalGatewayInterface"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("peer_gcp_gateway", flattenComputeVpnTunnelPeerGcpGateway(res["peerGcpGateway"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("router", flattenComputeVpnTunnelRouter(res["router"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("peer_ip", flattenComputeVpnTunnelPeerIp(res["peerIp"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("shared_secret_hash", flattenComputeVpnTunnelSharedSecretHash(res["sharedSecretHash"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("ike_version", flattenComputeVpnTunnelIkeVersion(res["ikeVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("local_traffic_selector", flattenComputeVpnTunnelLocalTrafficSelector(res["localTrafficSelector"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("remote_traffic_selector", flattenComputeVpnTunnelRemoteTrafficSelector(res["remoteTrafficSelector"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("detailed_status", flattenComputeVpnTunnelDetailedStatus(res["detailedStatus"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("region", flattenComputeVpnTunnelRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading VpnTunnel: %s", err) - } - - return nil -} - -func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for VpnTunnel: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting VpnTunnel %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "VpnTunnel") - } - - err = ComputeOperationWaitTime( - config, res, project, "Deleting VpnTunnel", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting VpnTunnel %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeVpnTunnelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/vpnTunnels/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenComputeVpnTunnelTunnelId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelTargetVpnGateway(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelVpnGateway(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelVpnGatewayInterface(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeVpnTunnelPeerExternalGateway(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelPeerExternalGatewayInterface(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeVpnTunnelPeerGcpGateway(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelRouter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelPeerIp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelSharedSecretHash(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelIkeVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenComputeVpnTunnelLocalTrafficSelector(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeVpnTunnelRemoteTrafficSelector(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeVpnTunnelDetailedStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeVpnTunnelName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelTargetVpnGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("targetVpnGateways", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for target_vpn_gateway: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnTunnelVpnGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("vpnGateways", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for vpn_gateway: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnTunnelVpnGatewayInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelPeerExternalGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("externalVpnGateways", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for peer_external_gateway: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnTunnelPeerExternalGatewayInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelPeerGcpGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("vpnGateways", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for peer_gcp_gateway: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnTunnelRouter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || v.(string) == "" { - return "", nil - } - f, err := parseRegionalFieldValue("routers", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for router: %s", err) - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+f.RelativeLink()) - if err != nil { - return nil, err - } - - return url, nil -} - -func expandComputeVpnTunnelPeerIp(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelSharedSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelIkeVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelLocalTrafficSelector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandComputeVpnTunnelRemoteTrafficSelector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandComputeVpnTunnelRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeVpnTunnelEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - f, err := parseRegionalFieldValue("targetVpnGateways", d.Get("target_vpn_gateway").(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, err - } - if _, ok := d.GetOk("project"); !ok { - if err := d.Set("project", f.Project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } - if _, ok := d.GetOk("region"); !ok { - if err := d.Set("region", f.Region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - } - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_analysis_note.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_analysis_note.go deleted file mode 100644 index a0498e1d97..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_analysis_note.go +++ /dev/null @@ -1,738 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceContainerAnalysisNote() *schema.Resource { - return &schema.Resource{ - Create: resourceContainerAnalysisNoteCreate, - Read: resourceContainerAnalysisNoteRead, - Update: resourceContainerAnalysisNoteUpdate, - Delete: resourceContainerAnalysisNoteDelete, - - Importer: &schema.ResourceImporter{ - State: resourceContainerAnalysisNoteImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "attestation_authority": { - Type: schema.TypeList, - Required: true, - Description: `Note kind that represents a logical attestation "role" or "authority". -For example, an organization might have one AttestationAuthority for -"QA" and one for "build". This Note is intended to act strictly as a -grouping mechanism for the attached Occurrences (Attestations). This -grouping mechanism also provides a security boundary, since IAM ACLs -gate the ability for a principle to attach an Occurrence to a given -Note. It also provides a single point of lookup to find all attached -Attestation Occurrences, even if they don't all live in the same -project.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hint": { - Type: schema.TypeList, - Required: true, - Description: `This submessage provides human-readable hints about the purpose of -the AttestationAuthority. Because the name of a Note acts as its -resource reference, it is important to disambiguate the canonical -name of the Note (which might be a UUID for security purposes) -from "readable" names more suitable for debug output. Note that -these hints should NOT be used to look up AttestationAuthorities -in security sensitive contexts, such as when looking up -Attestations to verify.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "human_readable_name": { - Type: schema.TypeString, - Required: true, - Description: `The human readable name of this Attestation Authority, for -example "qa".`, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the note.`, - }, - "expiration_time": { - Type: schema.TypeString, - Optional: true, - Description: `Time of expiration for this note. Leave empty if note does not expire.`, - }, - "long_description": { - Type: schema.TypeString, - Optional: true, - Description: `A detailed description of the note`, - }, - "related_note_names": { - Type: schema.TypeSet, - Optional: true, - Description: `Names of other notes related to this note.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "related_url": { - Type: schema.TypeSet, - Optional: true, - Description: `URLs associated with this note and related metadata.`, - Elem: containeranalysisNoteRelatedUrlSchema(), - // Default schema.HashSchema is used. - }, - "short_description": { - Type: schema.TypeString, - Optional: true, - Description: `A one sentence description of the note.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time this note was created.`, - }, - "kind": { - Type: schema.TypeString, - Computed: true, - Description: `The type of analysis this note describes`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time this note was last updated.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func containeranalysisNoteRelatedUrlSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "url": { - Type: schema.TypeString, - Required: true, - Description: `Specific URL associated with the resource.`, - }, - "label": { - Type: schema.TypeString, - Optional: true, - Description: `Label to describe usage of the URL`, - }, - }, - } -} - -func resourceContainerAnalysisNoteCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandContainerAnalysisNoteName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - shortDescriptionProp, err := expandContainerAnalysisNoteShortDescription(d.Get("short_description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("short_description"); !isEmptyValue(reflect.ValueOf(shortDescriptionProp)) && (ok || !reflect.DeepEqual(v, shortDescriptionProp)) { - obj["shortDescription"] = shortDescriptionProp - } - longDescriptionProp, err := expandContainerAnalysisNoteLongDescription(d.Get("long_description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("long_description"); !isEmptyValue(reflect.ValueOf(longDescriptionProp)) && (ok || !reflect.DeepEqual(v, longDescriptionProp)) { - obj["longDescription"] = longDescriptionProp - } - relatedUrlProp, err := expandContainerAnalysisNoteRelatedUrl(d.Get("related_url"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_url"); !isEmptyValue(reflect.ValueOf(relatedUrlProp)) && (ok || !reflect.DeepEqual(v, relatedUrlProp)) { - obj["relatedUrl"] = relatedUrlProp - } - expirationTimeProp, err := expandContainerAnalysisNoteExpirationTime(d.Get("expiration_time"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_time"); !isEmptyValue(reflect.ValueOf(expirationTimeProp)) && (ok || !reflect.DeepEqual(v, expirationTimeProp)) { - obj["expirationTime"] = expirationTimeProp - } - relatedNoteNamesProp, err := expandContainerAnalysisNoteRelatedNoteNames(d.Get("related_note_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_note_names"); !isEmptyValue(reflect.ValueOf(relatedNoteNamesProp)) && (ok || !reflect.DeepEqual(v, relatedNoteNamesProp)) { - obj["relatedNoteNames"] = relatedNoteNamesProp - } - attestationAuthorityProp, err := expandContainerAnalysisNoteAttestationAuthority(d.Get("attestation_authority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation_authority"); !isEmptyValue(reflect.ValueOf(attestationAuthorityProp)) && (ok || !reflect.DeepEqual(v, attestationAuthorityProp)) { - obj["attestationAuthority"] = attestationAuthorityProp - } - - obj, err = resourceContainerAnalysisNoteEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes?noteId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Note: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Note: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Note: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Note %q: %#v", d.Id(), res) - - return resourceContainerAnalysisNoteRead(d, meta) -} - -func resourceContainerAnalysisNoteRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Note: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ContainerAnalysisNote %q", d.Id())) - } - - res, err = resourceContainerAnalysisNoteDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ContainerAnalysisNote because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - - if err := d.Set("name", flattenContainerAnalysisNoteName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("short_description", flattenContainerAnalysisNoteShortDescription(res["shortDescription"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("long_description", flattenContainerAnalysisNoteLongDescription(res["longDescription"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("kind", flattenContainerAnalysisNoteKind(res["kind"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("related_url", flattenContainerAnalysisNoteRelatedUrl(res["relatedUrl"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("expiration_time", flattenContainerAnalysisNoteExpirationTime(res["expirationTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("create_time", flattenContainerAnalysisNoteCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("update_time", flattenContainerAnalysisNoteUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("related_note_names", flattenContainerAnalysisNoteRelatedNoteNames(res["relatedNoteNames"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("attestation_authority", flattenContainerAnalysisNoteAttestationAuthority(res["attestationAuthority"], d, config)); err != nil { - return fmt.Errorf("Error reading Note: %s", err) - } - - return nil -} - -func resourceContainerAnalysisNoteUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Note: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - shortDescriptionProp, err := expandContainerAnalysisNoteShortDescription(d.Get("short_description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("short_description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, shortDescriptionProp)) { - obj["shortDescription"] = shortDescriptionProp - } - longDescriptionProp, err := expandContainerAnalysisNoteLongDescription(d.Get("long_description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("long_description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, longDescriptionProp)) { - obj["longDescription"] = longDescriptionProp - } - relatedUrlProp, err := expandContainerAnalysisNoteRelatedUrl(d.Get("related_url"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_url"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, relatedUrlProp)) { - obj["relatedUrl"] = relatedUrlProp - } - expirationTimeProp, err := expandContainerAnalysisNoteExpirationTime(d.Get("expiration_time"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_time"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, expirationTimeProp)) { - obj["expirationTime"] = expirationTimeProp - } - relatedNoteNamesProp, err := expandContainerAnalysisNoteRelatedNoteNames(d.Get("related_note_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_note_names"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, relatedNoteNamesProp)) { - obj["relatedNoteNames"] = relatedNoteNamesProp - } - attestationAuthorityProp, err := expandContainerAnalysisNoteAttestationAuthority(d.Get("attestation_authority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation_authority"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attestationAuthorityProp)) { - obj["attestationAuthority"] = attestationAuthorityProp - } - - obj, err = resourceContainerAnalysisNoteEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Note %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("short_description") { - updateMask = append(updateMask, "shortDescription") - } - - if d.HasChange("long_description") { - updateMask = append(updateMask, "longDescription") - } - - if d.HasChange("related_url") { - updateMask = append(updateMask, "relatedUrl") - } - - if d.HasChange("expiration_time") { - updateMask = append(updateMask, "expirationTime") - } - - if d.HasChange("related_note_names") { - updateMask = append(updateMask, "relatedNoteNames") - } - - if d.HasChange("attestation_authority") { - updateMask = append(updateMask, "attestationAuthority") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Note %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Note %q: %#v", d.Id(), res) - } - - return resourceContainerAnalysisNoteRead(d, meta) -} - -func resourceContainerAnalysisNoteDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Note: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Note %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Note") - } - - log.Printf("[DEBUG] Finished deleting Note %q: %#v", d.Id(), res) - return nil -} - -func resourceContainerAnalysisNoteImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/notes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenContainerAnalysisNoteName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenContainerAnalysisNoteShortDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteLongDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteRelatedUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(containeranalysisNoteRelatedUrlSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "url": flattenContainerAnalysisNoteRelatedUrlUrl(original["url"], d, config), - "label": flattenContainerAnalysisNoteRelatedUrlLabel(original["label"], d, config), - }) - } - return transformed -} -func flattenContainerAnalysisNoteRelatedUrlUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteRelatedUrlLabel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteExpirationTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteRelatedNoteNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenContainerAnalysisNoteAttestationAuthority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hint"] = - flattenContainerAnalysisNoteAttestationAuthorityHint(original["hint"], d, config) - return []interface{}{transformed} -} -func flattenContainerAnalysisNoteAttestationAuthorityHint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["human_readable_name"] = - flattenContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(original["humanReadableName"], d, config) - return []interface{}{transformed} -} -func flattenContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandContainerAnalysisNoteName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteShortDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteLongDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteRelatedUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandContainerAnalysisNoteRelatedUrlUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - transformedLabel, err := expandContainerAnalysisNoteRelatedUrlLabel(original["label"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabel); val.IsValid() && !isEmptyValue(val) { - transformed["label"] = transformedLabel - } - - req = append(req, transformed) - } - return req, nil -} - -func expandContainerAnalysisNoteRelatedUrlUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteRelatedUrlLabel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteExpirationTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteRelatedNoteNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandContainerAnalysisNoteAttestationAuthority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHint, err := expandContainerAnalysisNoteAttestationAuthorityHint(original["hint"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHint); val.IsValid() && !isEmptyValue(val) { - transformed["hint"] = transformedHint - } - - return transformed, nil -} - -func expandContainerAnalysisNoteAttestationAuthorityHint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHumanReadableName, err := expandContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(original["human_readable_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHumanReadableName); val.IsValid() && !isEmptyValue(val) { - transformed["humanReadableName"] = transformedHumanReadableName - } - - return transformed, nil -} - -func expandContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceContainerAnalysisNoteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Field was renamed in GA API - obj["attestation"] = obj["attestationAuthority"] - delete(obj, "attestationAuthority") - - return obj, nil -} - -func resourceContainerAnalysisNoteDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Field was renamed in GA API - res["attestationAuthority"] = res["attestation"] - delete(res, "attestation") - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_analysis_occurrence.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_analysis_occurrence.go deleted file mode 100644 index 926bf3ebbd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_analysis_occurrence.go +++ /dev/null @@ -1,645 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceContainerAnalysisOccurrence() *schema.Resource { - return &schema.Resource{ - Create: resourceContainerAnalysisOccurrenceCreate, - Read: resourceContainerAnalysisOccurrenceRead, - Update: resourceContainerAnalysisOccurrenceUpdate, - Delete: resourceContainerAnalysisOccurrenceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceContainerAnalysisOccurrenceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "attestation": { - Type: schema.TypeList, - Required: true, - Description: `Occurrence that represents a single "attestation". The authenticity -of an attestation can be verified using the attached signature. -If the verifier trusts the public key of the signer, then verifying -the signature is sufficient to establish trust. In this circumstance, -the authority to which this attestation is attached is primarily -useful for lookup (how to find this attestation if you already -know the authority and artifact to be verified) and intent (for -which authority this attestation was intended to sign.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "serialized_payload": { - Type: schema.TypeString, - Required: true, - Description: `The serialized payload that is verified by one or -more signatures. A base64-encoded string.`, - }, - "signatures": { - Type: schema.TypeSet, - Required: true, - Description: `One or more signatures over serializedPayload. -Verifier implementations should consider this attestation -message verified if at least one signature verifies -serializedPayload. See Signature in common.proto for more -details on signature structure and verification.`, - Elem: containeranalysisOccurrenceAttestationSignaturesSchema(), - // Default schema.HashSchema is used. - }, - }, - }, - }, - "note_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The analysis note associated with this occurrence, in the form of -projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a -filter in list requests.`, - }, - "resource_uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. Immutable. A URI that represents the resource for which -the occurrence applies. For example, -https://gcr.io/project/image@sha256:123abc for a Docker image.`, - }, - "remediation": { - Type: schema.TypeString, - Optional: true, - Description: `A description of actions that can be taken to remedy the note.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the repository was created.`, - }, - "kind": { - Type: schema.TypeString, - Computed: true, - Description: `The note kind which explicitly denotes which of the occurrence -details are specified. This field can be used as a filter in list -requests.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the occurrence.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the repository was last updated.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func containeranalysisOccurrenceAttestationSignaturesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "public_key_id": { - Type: schema.TypeString, - Required: true, - Description: `The identifier for the public key that verifies this -signature. MUST be an RFC3986 conformant -URI. * When possible, the key id should be an -immutable reference, such as a cryptographic digest. -Examples of valid values: - -* OpenPGP V4 public key fingerprint. See https://www.iana.org/assignments/uri-schemes/prov/openpgp4fpr - for more details on this scheme. - * 'openpgp4fpr:74FAF3B861BDA0870C7B6DEF607E48D2A663AEEA' -* RFC6920 digest-named SubjectPublicKeyInfo (digest of the DER serialization): - * "ni:///sha-256;cD9o9Cq6LG3jD0iKXqEi_vdjJGecm_iXkbqVoScViaU"`, - }, - "signature": { - Type: schema.TypeString, - Optional: true, - Description: `The content of the signature, an opaque bytestring. -The payload that this signature verifies MUST be -unambiguously provided with the Signature during -verification. A wrapper message might provide the -payload explicitly. Alternatively, a message might -have a canonical serialization that can always be -unambiguously computed to derive the payload.`, - }, - }, - } -} - -func resourceContainerAnalysisOccurrenceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - resourceUriProp, err := expandContainerAnalysisOccurrenceResourceUri(d.Get("resource_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resource_uri"); !isEmptyValue(reflect.ValueOf(resourceUriProp)) && (ok || !reflect.DeepEqual(v, resourceUriProp)) { - obj["resourceUri"] = resourceUriProp - } - noteNameProp, err := expandContainerAnalysisOccurrenceNoteName(d.Get("note_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("note_name"); !isEmptyValue(reflect.ValueOf(noteNameProp)) && (ok || !reflect.DeepEqual(v, noteNameProp)) { - obj["noteName"] = noteNameProp - } - remediationProp, err := expandContainerAnalysisOccurrenceRemediation(d.Get("remediation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("remediation"); !isEmptyValue(reflect.ValueOf(remediationProp)) && (ok || !reflect.DeepEqual(v, remediationProp)) { - obj["remediation"] = remediationProp - } - attestationProp, err := expandContainerAnalysisOccurrenceAttestation(d.Get("attestation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation"); !isEmptyValue(reflect.ValueOf(attestationProp)) && (ok || !reflect.DeepEqual(v, attestationProp)) { - obj["attestation"] = attestationProp - } - - obj, err = resourceContainerAnalysisOccurrenceEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "{{note_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Occurrence: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Occurrence: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Occurrence: %s", err) - } - if err := d.Set("name", flattenContainerAnalysisOccurrenceName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/occurrences/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Occurrence %q: %#v", d.Id(), res) - - return resourceContainerAnalysisOccurrenceRead(d, meta) -} - -func resourceContainerAnalysisOccurrenceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Occurrence: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ContainerAnalysisOccurrence %q", d.Id())) - } - - res, err = resourceContainerAnalysisOccurrenceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ContainerAnalysisOccurrence because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - - if err := d.Set("name", flattenContainerAnalysisOccurrenceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("resource_uri", flattenContainerAnalysisOccurrenceResourceUri(res["resourceUri"], d, config)); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("note_name", flattenContainerAnalysisOccurrenceNoteName(res["noteName"], d, config)); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("kind", flattenContainerAnalysisOccurrenceKind(res["kind"], d, config)); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("remediation", flattenContainerAnalysisOccurrenceRemediation(res["remediation"], d, config)); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("create_time", flattenContainerAnalysisOccurrenceCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("update_time", flattenContainerAnalysisOccurrenceUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("attestation", flattenContainerAnalysisOccurrenceAttestation(res["attestation"], d, config)); err != nil { - return fmt.Errorf("Error reading Occurrence: %s", err) - } - - return nil -} - -func resourceContainerAnalysisOccurrenceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Occurrence: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - remediationProp, err := expandContainerAnalysisOccurrenceRemediation(d.Get("remediation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("remediation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, remediationProp)) { - obj["remediation"] = remediationProp - } - attestationProp, err := expandContainerAnalysisOccurrenceAttestation(d.Get("attestation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attestationProp)) { - obj["attestation"] = attestationProp - } - - obj, err = resourceContainerAnalysisOccurrenceUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "{{note_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Occurrence %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("remediation") { - updateMask = append(updateMask, "remediation") - } - - if d.HasChange("attestation") { - updateMask = append(updateMask, "attestation") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Occurrence %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Occurrence %q: %#v", d.Id(), res) - } - - return resourceContainerAnalysisOccurrenceRead(d, meta) -} - -func resourceContainerAnalysisOccurrenceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Occurrence: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "{{note_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Occurrence %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Occurrence") - } - - log.Printf("[DEBUG] Finished deleting Occurrence %q: %#v", d.Id(), res) - return nil -} - -func resourceContainerAnalysisOccurrenceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/occurrences/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/occurrences/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenContainerAnalysisOccurrenceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenContainerAnalysisOccurrenceResourceUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceNoteName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceRemediation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceAttestation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["serialized_payload"] = - flattenContainerAnalysisOccurrenceAttestationSerializedPayload(original["serializedPayload"], d, config) - transformed["signatures"] = - flattenContainerAnalysisOccurrenceAttestationSignatures(original["signatures"], d, config) - return []interface{}{transformed} -} -func flattenContainerAnalysisOccurrenceAttestationSerializedPayload(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceAttestationSignatures(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(containeranalysisOccurrenceAttestationSignaturesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "signature": flattenContainerAnalysisOccurrenceAttestationSignaturesSignature(original["signature"], d, config), - "public_key_id": flattenContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(original["publicKeyId"], d, config), - }) - } - return transformed -} -func flattenContainerAnalysisOccurrenceAttestationSignaturesSignature(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandContainerAnalysisOccurrenceResourceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceNoteName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceRemediation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceAttestation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSerializedPayload, err := expandContainerAnalysisOccurrenceAttestationSerializedPayload(original["serialized_payload"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSerializedPayload); val.IsValid() && !isEmptyValue(val) { - transformed["serializedPayload"] = transformedSerializedPayload - } - - transformedSignatures, err := expandContainerAnalysisOccurrenceAttestationSignatures(original["signatures"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSignatures); val.IsValid() && !isEmptyValue(val) { - transformed["signatures"] = transformedSignatures - } - - return transformed, nil -} - -func expandContainerAnalysisOccurrenceAttestationSerializedPayload(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceAttestationSignatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSignature, err := expandContainerAnalysisOccurrenceAttestationSignaturesSignature(original["signature"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSignature); val.IsValid() && !isEmptyValue(val) { - transformed["signature"] = transformedSignature - } - - transformedPublicKeyId, err := expandContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(original["public_key_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPublicKeyId); val.IsValid() && !isEmptyValue(val) { - transformed["publicKeyId"] = transformedPublicKeyId - } - - req = append(req, transformed) - } - return req, nil -} - -func expandContainerAnalysisOccurrenceAttestationSignaturesSignature(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceContainerAnalysisOccurrenceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // encoder logic only in non-GA versions - - return obj, nil -} - -func resourceContainerAnalysisOccurrenceUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Note is required, even for PATCH - noteNameProp, err := expandContainerAnalysisOccurrenceNoteName(d.Get("note_name"), d, meta.(*Config)) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("note_name"); !isEmptyValue(reflect.ValueOf(noteNameProp)) && (ok || !reflect.DeepEqual(v, noteNameProp)) { - obj["noteName"] = noteNameProp - } - - return resourceContainerAnalysisOccurrenceEncoder(d, meta, obj) -} - -func resourceContainerAnalysisOccurrenceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // encoder logic only in non-GA version - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_registry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_registry.go deleted file mode 100644 index 3b58258c0d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_registry.go +++ /dev/null @@ -1,117 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceContainerRegistry() *schema.Resource { - return &schema.Resource{ - Create: resourceContainerRegistryCreate, - Read: resourceContainerRegistryRead, - Delete: resourceContainerRegistryDelete, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - StateFunc: func(s interface{}) string { - return strings.ToUpper(s.(string)) - }, - Description: `The location of the registry. One of ASIA, EU, US or not specified. See the official documentation for more information on registry locations.`, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "bucket_self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - log.Printf("[DEBUG] Project: %s", project) - - location := d.Get("location").(string) - log.Printf("[DEBUG] location: %s", location) - urlBase := "https://gcr.io/v2/token" - if location != "" { - urlBase = fmt.Sprintf("https://%s.gcr.io/v2/token", strings.ToLower(location)) - } - - // Performing a token handshake with the GCR API causes the backing bucket to create if it hasn't already. - url, err := replaceVars(d, config, fmt.Sprintf("%s?service=gcr.io&scope=repository:{{project}}/my-repo:push,pull", urlBase)) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(config, "GET", project, url, userAgent, nil, d.Timeout(schema.TimeoutCreate)) - - if err != nil { - return err - } - return resourceContainerRegistryRead(d, meta) -} - -func resourceContainerRegistryRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - location := d.Get("location").(string) - project, err := getProject(d, config) - if err != nil { - return err - } - name := "" - if location != "" { - name = fmt.Sprintf("%s.artifacts.%s.appspot.com", strings.ToLower(location), project) - } else { - name = fmt.Sprintf("artifacts.%s.appspot.com", project) - } - - res, err := config.NewStorageClient(userAgent).Buckets.Get(name).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Container Registry Storage Bucket %q", name)) - } - log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) - - // Update the ID according to the bucket ID - if err := d.Set("bucket_self_link", res.SelfLink); err != nil { - return fmt.Errorf("Error setting bucket_self_link: %s", err) - } - - d.SetId(res.Id) - return nil -} - -func resourceContainerRegistryDelete(d *schema.ResourceData, meta interface{}) error { - // Don't delete the backing bucket as this is not a supported GCR action - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_entry_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_entry_group.go deleted file mode 100644 index 1be369d154..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_entry_group.go +++ /dev/null @@ -1,351 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDataCatalogEntryGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceDataCatalogEntryGroupCreate, - Read: resourceDataCatalogEntryGroupRead, - Update: resourceDataCatalogEntryGroupUpdate, - Delete: resourceDataCatalogEntryGroupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataCatalogEntryGroupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "entry_group_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), - Description: `The id of the entry group to create. The id must begin with a letter or underscore, -contain only English letters, numbers and underscores, and be at most 64 characters.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Entry group description, which can consist of several sentences or paragraphs that describe entry group contents.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `A short name to identify the entry group, for example, "analytics data - jan 2011".`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `EntryGroup location region.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the entry group in URL format. Example: projects/{project}/locations/{location}/entryGroups/{entryGroupId}`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataCatalogEntryGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogEntryGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogEntryGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}projects/{{project}}/locations/{{region}}/entryGroups?entryGroupId={{entry_group_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new EntryGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EntryGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating EntryGroup: %s", err) - } - if err := d.Set("name", flattenDataCatalogEntryGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating EntryGroup %q: %#v", d.Id(), res) - - return resourceDataCatalogEntryGroupRead(d, meta) -} - -func resourceDataCatalogEntryGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EntryGroup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataCatalogEntryGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading EntryGroup: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error reading EntryGroup: %s", err) - } - - if err := d.Set("name", flattenDataCatalogEntryGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading EntryGroup: %s", err) - } - if err := d.Set("display_name", flattenDataCatalogEntryGroupDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading EntryGroup: %s", err) - } - if err := d.Set("description", flattenDataCatalogEntryGroupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading EntryGroup: %s", err) - } - - return nil -} - -func resourceDataCatalogEntryGroupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EntryGroup: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogEntryGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogEntryGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating EntryGroup %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating EntryGroup %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating EntryGroup %q: %#v", d.Id(), res) - } - - return resourceDataCatalogEntryGroupRead(d, meta) -} - -func resourceDataCatalogEntryGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EntryGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting EntryGroup %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EntryGroup") - } - - log.Printf("[DEBUG] Finished deleting EntryGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogEntryGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - egRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/entryGroups/(.+)") - - parts := egRegex.FindStringSubmatch(name) - if len(parts) != 4 { - return nil, fmt.Errorf("entry group name does not fit the format %s", egRegex) - } - if err := d.Set("project", parts[1]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", parts[2]); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("entry_group_id", parts[3]); err != nil { - return nil, fmt.Errorf("Error setting entry_group_id: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenDataCatalogEntryGroupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryGroupDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryGroupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataCatalogEntryGroupDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_policy_tag.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_policy_tag.go deleted file mode 100644 index a35a9d366c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_policy_tag.go +++ /dev/null @@ -1,346 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDataCatalogPolicyTag() *schema.Resource { - return &schema.Resource{ - Create: resourceDataCatalogPolicyTagCreate, - Read: resourceDataCatalogPolicyTagRead, - Update: resourceDataCatalogPolicyTagUpdate, - Delete: resourceDataCatalogPolicyTagDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataCatalogPolicyTagImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `User defined name of this policy tag. It must: be unique within the parent -taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; -not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8.`, - }, - "taxonomy": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Taxonomy the policy tag is associated with`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of this policy tag. It must: contain only unicode characters, tabs, -newlines, carriage returns and page breaks; and be at most 2000 bytes long when -encoded in UTF-8. If not set, defaults to an empty description. -If not set, defaults to an empty description.`, - }, - "parent_policy_tag": { - Type: schema.TypeString, - Optional: true, - Description: `Resource name of this policy tag's parent policy tag. -If empty, it means this policy tag is a top level policy tag. -If not set, defaults to an empty string.`, - }, - "child_policy_tags": { - Type: schema.TypeList, - Computed: true, - Description: `Resource names of child policy tags of this policy tag.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Resource name of this policy tag, whose format is: -"projects/{project}/locations/{region}/taxonomies/{taxonomy}/policyTags/{policytag}"`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataCatalogPolicyTagCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogPolicyTagDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogPolicyTagDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - parentPolicyTagProp, err := expandDataCatalogPolicyTagParentPolicyTag(d.Get("parent_policy_tag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent_policy_tag"); !isEmptyValue(reflect.ValueOf(parentPolicyTagProp)) && (ok || !reflect.DeepEqual(v, parentPolicyTagProp)) { - obj["parentPolicyTag"] = parentPolicyTagProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{taxonomy}}/policyTags") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new PolicyTag: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating PolicyTag: %s", err) - } - if err := d.Set("name", flattenDataCatalogPolicyTagName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating PolicyTag %q: %#v", d.Id(), res) - - return resourceDataCatalogPolicyTagRead(d, meta) -} - -func resourceDataCatalogPolicyTagRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataCatalogPolicyTag %q", d.Id())) - } - - if err := d.Set("name", flattenDataCatalogPolicyTagName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading PolicyTag: %s", err) - } - if err := d.Set("display_name", flattenDataCatalogPolicyTagDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading PolicyTag: %s", err) - } - if err := d.Set("description", flattenDataCatalogPolicyTagDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading PolicyTag: %s", err) - } - if err := d.Set("parent_policy_tag", flattenDataCatalogPolicyTagParentPolicyTag(res["parentPolicyTag"], d, config)); err != nil { - return fmt.Errorf("Error reading PolicyTag: %s", err) - } - if err := d.Set("child_policy_tags", flattenDataCatalogPolicyTagChildPolicyTags(res["childPolicyTags"], d, config)); err != nil { - return fmt.Errorf("Error reading PolicyTag: %s", err) - } - - return nil -} - -func resourceDataCatalogPolicyTagUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogPolicyTagDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogPolicyTagDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - parentPolicyTagProp, err := expandDataCatalogPolicyTagParentPolicyTag(d.Get("parent_policy_tag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent_policy_tag"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parentPolicyTagProp)) { - obj["parentPolicyTag"] = parentPolicyTagProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating PolicyTag %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("parent_policy_tag") { - updateMask = append(updateMask, "parentPolicyTag") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating PolicyTag %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating PolicyTag %q: %#v", d.Id(), res) - } - - return resourceDataCatalogPolicyTagRead(d, meta) -} - -func resourceDataCatalogPolicyTagDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting PolicyTag %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "PolicyTag") - } - - log.Printf("[DEBUG] Finished deleting PolicyTag %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogPolicyTagImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?Pprojects/[^/]+/locations/[^/]+/taxonomies/[^/]+)/policyTags/(?P.+)"}, d, config); err != nil { - return nil, err - } - - originalName := d.Get("name").(string) - originalTaxonomy := d.Get("taxonomy").(string) - name := fmt.Sprintf("%s/policyTags/%s", originalTaxonomy, originalName) - - if err := d.Set("name", name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - return []*schema.ResourceData{d}, nil -} - -func flattenDataCatalogPolicyTagName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogPolicyTagDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogPolicyTagDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogPolicyTagParentPolicyTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogPolicyTagChildPolicyTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataCatalogPolicyTagDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogPolicyTagDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogPolicyTagParentPolicyTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_tag.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_tag.go deleted file mode 100644 index d01c89759d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_tag.go +++ /dev/null @@ -1,638 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDataCatalogTag() *schema.Resource { - return &schema.Resource{ - Create: resourceDataCatalogTagCreate, - Read: resourceDataCatalogTagRead, - Update: resourceDataCatalogTagUpdate, - Delete: resourceDataCatalogTagDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataCatalogTagImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "fields": { - Type: schema.TypeSet, - Required: true, - Description: `This maps the ID of a tag field to the value of and additional information about that field. -Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_name": { - Type: schema.TypeString, - Required: true, - }, - "bool_value": { - Type: schema.TypeBool, - Optional: true, - Description: `Holds the value for a tag field with boolean type.`, - }, - "double_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `Holds the value for a tag field with double type.`, - }, - "enum_value": { - Type: schema.TypeString, - Optional: true, - Description: `The display name of the enum value.`, - }, - - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `Holds the value for a tag field with string type.`, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - Description: `Holds the value for a tag field with timestamp type.`, - }, - "display_name": { - Type: schema.TypeString, - Computed: true, - Description: `The display name of this field`, - }, - "order": { - Type: schema.TypeInt, - Computed: true, - Description: `The order of this field with respect to other fields in this tag. For example, a higher value can indicate -a more important field. The value can be negative. Multiple fields can have the same order, and field orders -within a tag do not have to be sequential.`, - }, - }, - }, - }, - "template": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the tag template that this tag uses. Example: -projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId} -This field cannot be modified after creation.`, - }, - "column": { - Type: schema.TypeString, - Optional: true, - Description: `Resources like Entry can have schemas associated with them. This scope allows users to attach tags to an -individual column based on that schema. - -For attaching a tag to a nested column, use '.' to separate the column names. Example: -'outer_column.inner_column'`, - }, - "parent": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the parent this tag is attached to. This can be the name of an entry or an entry group. If an entry group, the tag will be attached to -all entries in that group.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the tag in URL format. Example: -projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/entries/{entryId}/tags/{tag_id} or -projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/tags/{tag_id} -where tag_id is a system-generated identifier. Note that this Tag may not actually be stored in the location in this name.`, - }, - "template_displayname": { - Type: schema.TypeString, - Computed: true, - Description: `The display name of the tag template.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataCatalogTagCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - templateProp, err := expandNestedDataCatalogTagTemplate(d.Get("template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("template"); !isEmptyValue(reflect.ValueOf(templateProp)) && (ok || !reflect.DeepEqual(v, templateProp)) { - obj["template"] = templateProp - } - fieldsProp, err := expandNestedDataCatalogTagFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - columnProp, err := expandNestedDataCatalogTagColumn(d.Get("column"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("column"); !isEmptyValue(reflect.ValueOf(columnProp)) && (ok || !reflect.DeepEqual(v, columnProp)) { - obj["column"] = columnProp - } - - obj, err = resourceDataCatalogTagEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{parent}}/tags") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Tag: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Tag: %s", err) - } - if err := d.Set("name", flattenNestedDataCatalogTagName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Tag %q: %#v", d.Id(), res) - - return resourceDataCatalogTagRead(d, meta) -} - -func resourceDataCatalogTagRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{parent}}/tags") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataCatalogTag %q", d.Id())) - } - - res, err = flattenNestedDataCatalogTag(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing DataCatalogTag because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenNestedDataCatalogTagName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Tag: %s", err) - } - if err := d.Set("template", flattenNestedDataCatalogTagTemplate(res["template"], d, config)); err != nil { - return fmt.Errorf("Error reading Tag: %s", err) - } - if err := d.Set("template_displayname", flattenNestedDataCatalogTagTemplateDisplayname(res["templateDisplayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Tag: %s", err) - } - if err := d.Set("fields", flattenNestedDataCatalogTagFields(res["fields"], d, config)); err != nil { - return fmt.Errorf("Error reading Tag: %s", err) - } - if err := d.Set("column", flattenNestedDataCatalogTagColumn(res["column"], d, config)); err != nil { - return fmt.Errorf("Error reading Tag: %s", err) - } - - return nil -} - -func resourceDataCatalogTagUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - fieldsProp, err := expandNestedDataCatalogTagFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - columnProp, err := expandNestedDataCatalogTagColumn(d.Get("column"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("column"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, columnProp)) { - obj["column"] = columnProp - } - - obj, err = resourceDataCatalogTagEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Tag %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("fields") { - updateMask = append(updateMask, "fields") - } - - if d.HasChange("column") { - updateMask = append(updateMask, "column") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Tag %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Tag %q: %#v", d.Id(), res) - } - - return resourceDataCatalogTagRead(d, meta) -} - -func resourceDataCatalogTagDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Tag %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Tag") - } - - log.Printf("[DEBUG] Finished deleting Tag %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogTagImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - egRegex := regexp.MustCompile("(.+)/tags") - - parts := egRegex.FindStringSubmatch(name) - if len(parts) != 2 { - return nil, fmt.Errorf("entry name does not fit the format %s", egRegex) - } - - if err := d.Set("parent", parts[1]); err != nil { - return nil, fmt.Errorf("Error setting parent: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenNestedDataCatalogTagName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagTemplateDisplayname(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "field_name": k, - "display_name": flattenNestedDataCatalogTagFieldsDisplayName(original["display_name"], d, config), - "order": flattenNestedDataCatalogTagFieldsOrder(original["order"], d, config), - "double_value": flattenNestedDataCatalogTagFieldsDoubleValue(original["doubleValue"], d, config), - "string_value": flattenNestedDataCatalogTagFieldsStringValue(original["stringValue"], d, config), - "bool_value": flattenNestedDataCatalogTagFieldsBoolValue(original["boolValue"], d, config), - "timestamp_value": flattenNestedDataCatalogTagFieldsTimestampValue(original["timestampValue"], d, config), - "enum_value": flattenNestedDataCatalogTagFieldsEnumValue(original["enumValue"], d, config), - }) - } - return transformed -} -func flattenNestedDataCatalogTagFieldsDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsOrder(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNestedDataCatalogTagFieldsDoubleValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsBoolValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsEnumValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - - return v.(map[string]interface{})["displayName"] -} - -func flattenNestedDataCatalogTagColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedDataCatalogTagTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFields(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDisplayName, err := expandNestedDataCatalogTagFieldsDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["display_name"] = transformedDisplayName - } - - transformedOrder, err := expandNestedDataCatalogTagFieldsOrder(original["order"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOrder); val.IsValid() && !isEmptyValue(val) { - transformed["order"] = transformedOrder - } - - transformedDoubleValue, err := expandNestedDataCatalogTagFieldsDoubleValue(original["double_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDoubleValue); val.IsValid() && !isEmptyValue(val) { - transformed["doubleValue"] = transformedDoubleValue - } - - transformedStringValue, err := expandNestedDataCatalogTagFieldsStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBoolValue, err := expandNestedDataCatalogTagFieldsBoolValue(original["bool_value"], d, config) - if err != nil { - return nil, err - } else { - transformed["boolValue"] = transformedBoolValue - } - - transformedTimestampValue, err := expandNestedDataCatalogTagFieldsTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedEnumValue, err := expandNestedDataCatalogTagFieldsEnumValue(original["enum_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnumValue); val.IsValid() && !isEmptyValue(val) { - transformed["enumValue"] = transformedEnumValue - } - - transformedFieldName, err := expandString(original["field_name"], d, config) - if err != nil { - return nil, err - } - m[transformedFieldName] = transformed - } - return m, nil -} - -func expandNestedDataCatalogTagFieldsDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsDoubleValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsBoolValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsEnumValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - // we flattened the original["enum_value"]["display_name"] object to be just original["enum_value"] so here, - // v is the value we want from the config - transformed := make(map[string]interface{}) - if val := reflect.ValueOf(v); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = v - } - - return transformed, nil -} - -func expandNestedDataCatalogTagColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataCatalogTagEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - if obj["fields"] != nil { - // isEmptyValue() does not work for a boolean as it shows - // false when it is 'empty'. Filter boolValue here based on - // the rule api does not take more than 1 'value' - fields := obj["fields"].(map[string]interface{}) - for _, elements := range fields { - values := elements.(map[string]interface{}) - if len(values) > 1 { - for val := range values { - if val == "boolValue" { - delete(values, "boolValue") - } - } - } - } - } - return obj, nil -} - -func flattenNestedDataCatalogTag(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["tags"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value tags. Actual value: %v", v) - } - - _, item, err := resourceDataCatalogTagFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceDataCatalogTagFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName := d.Get("name") - expectedFlattenedName := flattenNestedDataCatalogTagName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedDataCatalogTagName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_tag_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_tag_template.go deleted file mode 100644 index 22d1cc6e81..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_tag_template.go +++ /dev/null @@ -1,869 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -//Use it to delete TagTemplate Field -func deleteTagTemplateField(d *schema.ResourceData, config *Config, name, billingProject, userAgent string) error { - - url_delete, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}/fields/"+name+"?force={{force_delete}}") - if err != nil { - return err - } - var obj map[string]interface{} - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url_delete, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return fmt.Errorf("Error deleting TagTemplate Field %v: %s", name, err) - } - - log.Printf("[DEBUG] Finished deleting TagTemplate Field %q: %#v", name, res) - return nil -} - -//Use it to create TagTemplate Field -func createTagTemplateField(d *schema.ResourceData, config *Config, body map[string]interface{}, name, billingProject, userAgent string) error { - - url_create, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}/fields") - if err != nil { - return err - } - - url_create, err = addQueryParams(url_create, map[string]string{"tagTemplateFieldId": name}) - if err != nil { - return err - } - - res_create, err := SendRequestWithTimeout(config, "POST", billingProject, url_create, userAgent, body, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TagTemplate Field: %s", err) - } - - if err != nil { - return fmt.Errorf("Error creating TagTemplate Field %v: %s", name, err) - } else { - log.Printf("[DEBUG] Finished creating TagTemplate Field %v: %#v", name, res_create) - } - - return nil -} - -func ResourceDataCatalogTagTemplate() *schema.Resource { - return &schema.Resource{ - Create: resourceDataCatalogTagTemplateCreate, - Read: resourceDataCatalogTagTemplateRead, - Update: resourceDataCatalogTagTemplateUpdate, - Delete: resourceDataCatalogTagTemplateDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataCatalogTagTemplateImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "fields": { - Type: schema.TypeSet, - Required: true, - Description: `Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. The change of field_id will be resulting in re-creating of field. The change of primitive_type will be resulting in re-creating of field, however if the field is a required, you cannot update it.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_id": { - Type: schema.TypeString, - Required: true, - }, - "type": { - Type: schema.TypeList, - Required: true, - Description: `The type of value this tag field can contain.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enum_type": { - Type: schema.TypeList, - Optional: true, - Description: `Represents an enum type. - Exactly one of 'primitive_type' or 'enum_type' must be set`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allowed_values": { - Type: schema.TypeSet, - Required: true, - Description: `The set of allowed values for this enum. The display names of the -values must be case-insensitively unique within this set. Currently, -enum values can only be added to the list of allowed values. Deletion -and renaming of enum values are not supported. -Can have up to 500 allowed values.`, - Elem: datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema(), - // Default schema.HashSchema is used. - }, - }, - }, - }, - "primitive_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"DOUBLE", "STRING", "BOOL", "TIMESTAMP", ""}), - Description: `Represents primitive types - string, bool etc. - Exactly one of 'primitive_type' or 'enum_type' must be set Possible values: ["DOUBLE", "STRING", "BOOL", "TIMESTAMP"]`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `A description for this field.`, - }, - "display_name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The display name for this field.`, - }, - "is_required": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `Whether this is a required field. Defaults to false.`, - }, - "order": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The order of this field with respect to other fields in this tag template. -A higher value indicates a more important field. The value can be negative. -Multiple fields can have the same order, and field orders within a tag do not have to be sequential.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the tag template field in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}/fields/{field}`, - }, - }, - }, - }, - "tag_template_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z_][a-z0-9_]{0,63}$`), - Description: `The id of the tag template to create.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `The display name for this template.`, - }, - "force_delete": { - Type: schema.TypeBool, - Optional: true, - Description: `This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template.`, - Default: false, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Template location region.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the tag template in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The display name of the enum value.`, - }, - }, - } -} - -func resourceDataCatalogTagTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogTagTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - fieldsProp, err := expandDataCatalogTagTemplateFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}projects/{{project}}/locations/{{region}}/tagTemplates?tagTemplateId={{tag_template_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TagTemplate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TagTemplate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TagTemplate: %s", err) - } - if err := d.Set("name", flattenDataCatalogTagTemplateName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating TagTemplate %q: %#v", d.Id(), res) - - return resourceDataCatalogTagTemplateRead(d, meta) -} - -func resourceDataCatalogTagTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TagTemplate: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataCatalogTagTemplate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TagTemplate: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error reading TagTemplate: %s", err) - } - - if err := d.Set("name", flattenDataCatalogTagTemplateName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TagTemplate: %s", err) - } - if err := d.Set("display_name", flattenDataCatalogTagTemplateDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading TagTemplate: %s", err) - } - if err := d.Set("fields", flattenDataCatalogTagTemplateFields(res["fields"], d, config)); err != nil { - return fmt.Errorf("Error reading TagTemplate: %s", err) - } - - return nil -} - -func resourceDataCatalogTagTemplateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TagTemplate: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogTagTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - fieldsProp, err := expandDataCatalogTagTemplateFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating TagTemplate %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if len(updateMask) > 0 { - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating TagTemplate %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TagTemplate %q: %#v", d.Id(), res) - } - - } - - // since fields have a separate endpoint, - // we need to handle it manually - - type FieldChange struct { - Old, New map[string]interface{} - } - - o, n := d.GetChange("fields") - vals := make(map[string]*FieldChange) - - // this will create a dictionary with the value - // of field_id as the key that will contain the - // maps of old and new values - for _, raw := range o.(*schema.Set).List() { - obj := raw.(map[string]interface{}) - k := obj["field_id"].(string) - vals[k] = &FieldChange{Old: obj} - } - - for _, raw := range n.(*schema.Set).List() { - obj := raw.(map[string]interface{}) - k := obj["field_id"].(string) - if _, ok := vals[k]; !ok { - // if key is not present in the vals, - // then create an empty object to hold the new value - vals[k] = &FieldChange{} - } - vals[k].New = obj - } - - // fields schema to create schema.set below - dataCatalogTagTemplateFieldsSchema := &schema.Resource{ - Schema: ResourceDataCatalogTagTemplate().Schema["fields"].Elem.(*schema.Resource).Schema, - } - - for name, change := range vals { - // A few different situations to deal with in here: - // - change.Old is nil: create a new role - // - change.New is nil: remove an existing role - // - both are set: test if New is different than Old and update if so - - changeOldSet := schema.NewSet(schema.HashResource(dataCatalogTagTemplateFieldsSchema), []interface{}{}) - changeOldSet.Add(change.Old) - var changeOldProp map[string]interface{} - if len(change.Old) != 0 { - changeOldProp, _ = expandDataCatalogTagTemplateFields(changeOldSet, nil, nil) - changeOldProp = changeOldProp[name].(map[string]interface{}) - } - - changeNewSet := schema.NewSet(schema.HashResource(dataCatalogTagTemplateFieldsSchema), []interface{}{}) - changeNewSet.Add(change.New) - var changeNewProp map[string]interface{} - if len(change.New) != 0 { - changeNewProp, _ = expandDataCatalogTagTemplateFields(changeNewSet, nil, nil) - changeNewProp = changeNewProp[name].(map[string]interface{}) - } - - // if old state is empty, then we have a new field to create - if len(change.Old) == 0 { - err := createTagTemplateField(d, config, changeNewProp, name, billingProject, userAgent) - if err != nil { - return err - } - - continue - } - - // if new state is empty, then we need to delete the current field - if len(change.New) == 0 { - err := deleteTagTemplateField(d, config, name, billingProject, userAgent) - if err != nil { - return err - } - - continue - } - - // if we have old and new values, but are not equal, update with the new state - if !reflect.DeepEqual(changeOldProp, changeNewProp) { - url1, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}/fields/"+name) - if err != nil { - return err - } - - oldType := changeOldProp["type"].(map[string]interface{}) - newType := changeNewProp["type"].(map[string]interface{}) - - if oldType["primitiveType"] != newType["primitiveType"] { - // As primitiveType can't be changed, it is considered as ForceNew which triggers the deletion of old field and recreation of a new field - // Before that, we need to check that is_required is True for the newType or not, as we don't have support to add new required field in the existing TagTemplate, - // So in such cases, we can simply return the error - - // Reason for checking the isRequired in changeNewProp - - // Because this changeNewProp check should be ignored when the user wants to update the primitive type and make it optional rather than keeping it required. - if changeNewProp["isRequired"] != nil && changeNewProp["isRequired"].(bool) { - return fmt.Errorf("Updating the primitive type for a required field on an existing tag template is not supported as TagTemplateField %q is required", name) - } - - // delete changeOldProp - err_delete := deleteTagTemplateField(d, config, name, billingProject, userAgent) - if err_delete != nil { - return err_delete - } - - // recreate changeNewProp - err_create := createTagTemplateField(d, config, changeNewProp, name, billingProject, userAgent) - if err_create != nil { - return err_create - } - - log.Printf("[DEBUG] Finished updating TagTemplate Field %q", name) - return resourceDataCatalogTagTemplateRead(d, meta) - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url1, userAgent, changeNewProp, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return fmt.Errorf("Error updating TagTemplate Field %v: %s", name, err) - } - - log.Printf("[DEBUG] Finished updating TagTemplate Field %q: %#v", name, res) - } - } - return resourceDataCatalogTagTemplateRead(d, meta) -} - -func resourceDataCatalogTagTemplateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TagTemplate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}?force={{force_delete}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TagTemplate %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TagTemplate") - } - - log.Printf("[DEBUG] Finished deleting TagTemplate %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogTagTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - egRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/tagTemplates/(.+)") - - parts := egRegex.FindStringSubmatch(name) - if len(parts) != 4 { - return nil, fmt.Errorf("tag template name does not fit the format %s", egRegex) - } - if err := d.Set("project", parts[1]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", parts[2]); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("tag_template_id", parts[3]); err != nil { - return nil, fmt.Errorf("Error setting tag_template_id: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenDataCatalogTagTemplateName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "field_id": k, - "name": flattenDataCatalogTagTemplateFieldsName(original["name"], d, config), - "display_name": flattenDataCatalogTagTemplateFieldsDisplayName(original["displayName"], d, config), - "description": flattenDataCatalogTagTemplateFieldsDescription(original["description"], d, config), - "type": flattenDataCatalogTagTemplateFieldsType(original["type"], d, config), - "is_required": flattenDataCatalogTagTemplateFieldsIsRequired(original["isRequired"], d, config), - "order": flattenDataCatalogTagTemplateFieldsOrder(original["order"], d, config), - }) - } - return transformed -} -func flattenDataCatalogTagTemplateFieldsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["primitive_type"] = - flattenDataCatalogTagTemplateFieldsTypePrimitiveType(original["primitiveType"], d, config) - transformed["enum_type"] = - flattenDataCatalogTagTemplateFieldsTypeEnumType(original["enumType"], d, config) - return []interface{}{transformed} -} -func flattenDataCatalogTagTemplateFieldsTypePrimitiveType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsTypeEnumType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_values"] = - flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(original["allowedValues"], d, config) - return []interface{}{transformed} -} -func flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "display_name": flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(original["displayName"], d, config), - }) - } - return transformed -} -func flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsIsRequired(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsOrder(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandDataCatalogTagTemplateDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFields(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataCatalogTagTemplateFieldsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedDisplayName, err := expandDataCatalogTagTemplateFieldsDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - transformedDescription, err := expandDataCatalogTagTemplateFieldsDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedType, err := expandDataCatalogTagTemplateFieldsType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedIsRequired, err := expandDataCatalogTagTemplateFieldsIsRequired(original["is_required"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIsRequired); val.IsValid() && !isEmptyValue(val) { - transformed["isRequired"] = transformedIsRequired - } - - transformedOrder, err := expandDataCatalogTagTemplateFieldsOrder(original["order"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOrder); val.IsValid() && !isEmptyValue(val) { - transformed["order"] = transformedOrder - } - - transformedFieldId, err := expandString(original["field_id"], d, config) - if err != nil { - return nil, err - } - m[transformedFieldId] = transformed - } - return m, nil -} - -func expandDataCatalogTagTemplateFieldsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPrimitiveType, err := expandDataCatalogTagTemplateFieldsTypePrimitiveType(original["primitive_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrimitiveType); val.IsValid() && !isEmptyValue(val) { - transformed["primitiveType"] = transformedPrimitiveType - } - - transformedEnumType, err := expandDataCatalogTagTemplateFieldsTypeEnumType(original["enum_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnumType); val.IsValid() && !isEmptyValue(val) { - transformed["enumType"] = transformedEnumType - } - - return transformed, nil -} - -func expandDataCatalogTagTemplateFieldsTypePrimitiveType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsTypeEnumType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedValues, err := expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(original["allowed_values"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowedValues); val.IsValid() && !isEmptyValue(val) { - transformed["allowedValues"] = transformedAllowedValues - } - - return transformed, nil -} - -func expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDisplayName, err := expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsIsRequired(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_taxonomy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_taxonomy.go deleted file mode 100644 index dcdfa41ea6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_taxonomy.go +++ /dev/null @@ -1,371 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDataCatalogTaxonomy() *schema.Resource { - return &schema.Resource{ - Create: resourceDataCatalogTaxonomyCreate, - Read: resourceDataCatalogTaxonomyRead, - Update: resourceDataCatalogTaxonomyUpdate, - Delete: resourceDataCatalogTaxonomyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataCatalogTaxonomyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `User defined name of this taxonomy. -It must: contain only unicode letters, numbers, underscores, dashes -and spaces; not start or end with spaces; and be at most 200 bytes -long when encoded in UTF-8.`, - }, - "activated_policy_types": { - Type: schema.TypeList, - Optional: true, - Description: `A list of policy types that are activated for this taxonomy. If not set, -defaults to an empty list. Possible values: ["POLICY_TYPE_UNSPECIFIED", "FINE_GRAINED_ACCESS_CONTROL"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"POLICY_TYPE_UNSPECIFIED", "FINE_GRAINED_ACCESS_CONTROL"}), - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of this taxonomy. It must: contain only unicode characters, -tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes -long when encoded in UTF-8. If not set, defaults to an empty description.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Taxonomy location region.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Resource name of this taxonomy, whose format is: -"projects/{project}/locations/{region}/taxonomies/{taxonomy}".`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataCatalogTaxonomyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogTaxonomyDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogTaxonomyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - activatedPolicyTypesProp, err := expandDataCatalogTaxonomyActivatedPolicyTypes(d.Get("activated_policy_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("activated_policy_types"); !isEmptyValue(reflect.ValueOf(activatedPolicyTypesProp)) && (ok || !reflect.DeepEqual(v, activatedPolicyTypesProp)) { - obj["activatedPolicyTypes"] = activatedPolicyTypesProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}projects/{{project}}/locations/{{region}}/taxonomies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Taxonomy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Taxonomy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Taxonomy: %s", err) - } - if err := d.Set("name", flattenDataCatalogTaxonomyName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Taxonomy %q: %#v", d.Id(), res) - - return resourceDataCatalogTaxonomyRead(d, meta) -} - -func resourceDataCatalogTaxonomyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Taxonomy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataCatalogTaxonomy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Taxonomy: %s", err) - } - - if err := d.Set("name", flattenDataCatalogTaxonomyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Taxonomy: %s", err) - } - if err := d.Set("display_name", flattenDataCatalogTaxonomyDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Taxonomy: %s", err) - } - if err := d.Set("description", flattenDataCatalogTaxonomyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Taxonomy: %s", err) - } - if err := d.Set("activated_policy_types", flattenDataCatalogTaxonomyActivatedPolicyTypes(res["activatedPolicyTypes"], d, config)); err != nil { - return fmt.Errorf("Error reading Taxonomy: %s", err) - } - - return nil -} - -func resourceDataCatalogTaxonomyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Taxonomy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogTaxonomyDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogTaxonomyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - activatedPolicyTypesProp, err := expandDataCatalogTaxonomyActivatedPolicyTypes(d.Get("activated_policy_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("activated_policy_types"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activatedPolicyTypesProp)) { - obj["activatedPolicyTypes"] = activatedPolicyTypesProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Taxonomy %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("activated_policy_types") { - updateMask = append(updateMask, "activatedPolicyTypes") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Taxonomy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Taxonomy %q: %#v", d.Id(), res) - } - - return resourceDataCatalogTaxonomyRead(d, meta) -} - -func resourceDataCatalogTaxonomyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Taxonomy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Taxonomy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Taxonomy") - } - - log.Printf("[DEBUG] Finished deleting Taxonomy %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogTaxonomyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - d.SetId(name) - - re := regexp.MustCompile("projects/(.+)/(?:locations|regions)/(.+)/taxonomies/(.+)") - if matches := re.FindStringSubmatch(name); matches != nil { - d.Set("project", matches[1]) - d.Set("region", matches[2]) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenDataCatalogTaxonomyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTaxonomyDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTaxonomyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTaxonomyActivatedPolicyTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataCatalogTaxonomyDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTaxonomyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTaxonomyActivatedPolicyTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_fusion_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_fusion_instance.go deleted file mode 100644 index fa1aaf12a2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_fusion_instance.go +++ /dev/null @@ -1,1161 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var instanceAcceleratorOptions = []string{ - "delta.default.checkpoint.directory", - "ui.feature.cdc", -} - -func instanceOptionsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // Suppress diffs for the options generated by adding an accelerator to a data fusion instance - for _, option := range instanceAcceleratorOptions { - if strings.Contains(k, option) && new == "" { - return true - } - } - - // Let diff be determined by options (above) - if strings.Contains(k, "options.%") { - return true - } - - // For other keys, don't suppress diff. - return false -} - -func ResourceDataFusionInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceDataFusionInstanceCreate, - Read: resourceDataFusionInstanceRead, - Update: resourceDataFusionInstanceUpdate, - Delete: resourceDataFusionInstanceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataFusionInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(90 * time.Minute), - Update: schema.DefaultTimeout(25 * time.Minute), - Delete: schema.DefaultTimeout(50 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the instance or a fully qualified identifier for the instance.`, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"BASIC", "ENTERPRISE", "DEVELOPER"}), - Description: `Represents the type of Data Fusion instance. Each type is configured with -the default settings for processing and memory. -- BASIC: Basic Data Fusion instance. In Basic type, the user will be able to create data pipelines -using point and click UI. However, there are certain limitations, such as fewer number -of concurrent pipelines, no support for streaming pipelines, etc. -- ENTERPRISE: Enterprise Data Fusion instance. In Enterprise type, the user will have more features -available, such as support for streaming pipelines, higher number of concurrent pipelines, etc. -- DEVELOPER: Developer Data Fusion instance. In Developer type, the user will have all features available but -with restrictive capabilities. This is to help enterprises design and develop their data ingestion and integration -pipelines at low cost. Possible values: ["BASIC", "ENTERPRISE", "DEVELOPER"]`, - }, - "accelerators": { - Type: schema.TypeList, - Optional: true, - Description: `List of accelerators enabled for this CDF instance. - -If accelerators are enabled it is possible a permadiff will be created with the Options field. -Users will need to either manually update their state file to include these diffed options, or include the field in a [lifecycle ignore changes block](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes).`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "accelerator_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"CDC", "HEALTHCARE", "CCAI_INSIGHTS"}), - Description: `The type of an accelator for a CDF instance. Possible values: ["CDC", "HEALTHCARE", "CCAI_INSIGHTS"]`, - }, - "state": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"ENABLED", "DISABLED"}), - Description: `The type of an accelator for a CDF instance. Possible values: ["ENABLED", "DISABLED"]`, - }, - }, - }, - }, - "crypto_key_config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key_reference": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of projects/*/locations/*/keyRings/*/cryptoKeys/*.`, - }, - }, - }, - }, - "dataproc_service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of the instance.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Display name for an instance.`, - }, - "enable_rbac": { - Type: schema.TypeBool, - Optional: true, - Description: `Option to enable granular role-based access control.`, - }, - "enable_stackdriver_logging": { - Type: schema.TypeBool, - Optional: true, - Description: `Option to enable Stackdriver Logging.`, - }, - "enable_stackdriver_monitoring": { - Type: schema.TypeBool, - Optional: true, - Description: `Option to enable Stackdriver Monitoring.`, - }, - "event_publish_config": { - Type: schema.TypeList, - Optional: true, - Description: `Option to enable and pass metadata for event publishing.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - Description: `Option to enable Event Publishing.`, - }, - "topic": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the Pub/Sub topic. Format: projects/{projectId}/topics/{topic_id}`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `The resource labels for instance to use to annotate any related underlying resources, -such as Compute Engine VMs.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "network_config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Network configuration options. These are required when a private Data Fusion instance is to be created.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_allocation": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The IP range in CIDR notation to use for the managed Data Fusion instance -nodes. This range must not overlap with any other ranges used in the Data Fusion instance network.`, - }, - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the network in the project with which the tenant project -will be peered for executing pipelines. In case of shared VPC where the network resides in another host -project the network should specified in the form of projects/{host-project-id}/global/networks/{network}`, - }, - }, - }, - }, - "options": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: instanceOptionsDiffSuppress, - Description: `Map of additional options used to configure the behavior of Data Fusion instance.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "private_instance": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Specifies whether the Data Fusion instance should be private. If set to -true, all Data Fusion nodes will have private IP addresses and will not be -able to access the public internet.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region of the Data Fusion instance.`, - }, - "version": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Current version of the Data Fusion.`, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Name of the zone in which the Data Fusion instance will be created. Only DEVELOPER instances use this field.`, - }, - "api_endpoint": { - Type: schema.TypeString, - Computed: true, - Description: `Endpoint on which the REST APIs is accessible.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time the instance was created in RFC3339 UTC "Zulu" format, accurate to nanoseconds.`, - }, - "gcs_bucket": { - Type: schema.TypeString, - Computed: true, - Description: `Cloud Storage bucket generated by Data Fusion in the customer project.`, - }, - "p4_service_account": { - Type: schema.TypeString, - Computed: true, - Description: `P4 service account for the customer project.`, - }, - "service_endpoint": { - Type: schema.TypeString, - Computed: true, - Description: `Endpoint on which the Data Fusion UI and REST APIs are accessible.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The current state of this Data Fusion instance. -- CREATING: Instance is being created -- RUNNING: Instance is running and ready for requests -- FAILED: Instance creation failed -- DELETING: Instance is being deleted -- UPGRADING: Instance is being upgraded -- RESTARTING: Instance is being restarted`, - }, - "state_message": { - Type: schema.TypeString, - Computed: true, - Description: `Additional information about the current state of this Data Fusion instance if available.`, - }, - "tenant_project_id": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the tenant project.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time the instance was last updated in RFC3339 UTC "Zulu" format, accurate to nanoseconds.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataFusionInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandDataFusionInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandDataFusionInstanceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - typeProp, err := expandDataFusionInstanceType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - enableStackdriverLoggingProp, err := expandDataFusionInstanceEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !isEmptyValue(reflect.ValueOf(enableStackdriverLoggingProp)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { - obj["enableStackdriverLogging"] = enableStackdriverLoggingProp - } - enableStackdriverMonitoringProp, err := expandDataFusionInstanceEnableStackdriverMonitoring(d.Get("enable_stackdriver_monitoring"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_monitoring"); !isEmptyValue(reflect.ValueOf(enableStackdriverMonitoringProp)) && (ok || !reflect.DeepEqual(v, enableStackdriverMonitoringProp)) { - obj["enableStackdriverMonitoring"] = enableStackdriverMonitoringProp - } - enableRbacProp, err := expandDataFusionInstanceEnableRbac(d.Get("enable_rbac"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_rbac"); !isEmptyValue(reflect.ValueOf(enableRbacProp)) && (ok || !reflect.DeepEqual(v, enableRbacProp)) { - obj["enableRbac"] = enableRbacProp - } - labelsProp, err := expandDataFusionInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - optionsProp, err := expandDataFusionInstanceOptions(d.Get("options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("options"); !isEmptyValue(reflect.ValueOf(optionsProp)) && (ok || !reflect.DeepEqual(v, optionsProp)) { - obj["options"] = optionsProp - } - versionProp, err := expandDataFusionInstanceVersion(d.Get("version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version"); !isEmptyValue(reflect.ValueOf(versionProp)) && (ok || !reflect.DeepEqual(v, versionProp)) { - obj["version"] = versionProp - } - privateInstanceProp, err := expandDataFusionInstancePrivateInstance(d.Get("private_instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_instance"); !isEmptyValue(reflect.ValueOf(privateInstanceProp)) && (ok || !reflect.DeepEqual(v, privateInstanceProp)) { - obj["privateInstance"] = privateInstanceProp - } - dataprocServiceAccountProp, err := expandDataFusionInstanceDataprocServiceAccount(d.Get("dataproc_service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dataproc_service_account"); !isEmptyValue(reflect.ValueOf(dataprocServiceAccountProp)) && (ok || !reflect.DeepEqual(v, dataprocServiceAccountProp)) { - obj["dataprocServiceAccount"] = dataprocServiceAccountProp - } - networkConfigProp, err := expandDataFusionInstanceNetworkConfig(d.Get("network_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_config"); !isEmptyValue(reflect.ValueOf(networkConfigProp)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { - obj["networkConfig"] = networkConfigProp - } - zoneProp, err := expandDataFusionInstanceZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - displayNameProp, err := expandDataFusionInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - cryptoKeyConfigProp, err := expandDataFusionInstanceCryptoKeyConfig(d.Get("crypto_key_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("crypto_key_config"); !isEmptyValue(reflect.ValueOf(cryptoKeyConfigProp)) && (ok || !reflect.DeepEqual(v, cryptoKeyConfigProp)) { - obj["cryptoKeyConfig"] = cryptoKeyConfigProp - } - eventPublishConfigProp, err := expandDataFusionInstanceEventPublishConfig(d.Get("event_publish_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_publish_config"); !isEmptyValue(reflect.ValueOf(eventPublishConfigProp)) && (ok || !reflect.DeepEqual(v, eventPublishConfigProp)) { - obj["eventPublishConfig"] = eventPublishConfigProp - } - acceleratorsProp, err := expandDataFusionInstanceAccelerators(d.Get("accelerators"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("accelerators"); !isEmptyValue(reflect.ValueOf(acceleratorsProp)) && (ok || !reflect.DeepEqual(v, acceleratorsProp)) { - obj["accelerators"] = acceleratorsProp - } - - url, err := replaceVars(d, config, "{{DataFusionBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Instance: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = DataFusionOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Instance: %s", err) - } - - if err := d.Set("name", flattenDataFusionInstanceName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceDataFusionInstanceRead(d, meta) -} - -func resourceDataFusionInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataFusionBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataFusionInstance %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("name", flattenDataFusionInstanceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("description", flattenDataFusionInstanceDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("type", flattenDataFusionInstanceType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("enable_stackdriver_logging", flattenDataFusionInstanceEnableStackdriverLogging(res["enableStackdriverLogging"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("enable_stackdriver_monitoring", flattenDataFusionInstanceEnableStackdriverMonitoring(res["enableStackdriverMonitoring"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("enable_rbac", flattenDataFusionInstanceEnableRbac(res["enableRbac"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenDataFusionInstanceLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("options", flattenDataFusionInstanceOptions(res["options"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("create_time", flattenDataFusionInstanceCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("update_time", flattenDataFusionInstanceUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("state", flattenDataFusionInstanceState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("state_message", flattenDataFusionInstanceStateMessage(res["stateMessage"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("service_endpoint", flattenDataFusionInstanceServiceEndpoint(res["serviceEndpoint"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("version", flattenDataFusionInstanceVersion(res["version"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("private_instance", flattenDataFusionInstancePrivateInstance(res["privateInstance"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("dataproc_service_account", flattenDataFusionInstanceDataprocServiceAccount(res["dataprocServiceAccount"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("tenant_project_id", flattenDataFusionInstanceTenantProjectId(res["tenantProjectId"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("gcs_bucket", flattenDataFusionInstanceGcsBucket(res["gcsBucket"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("network_config", flattenDataFusionInstanceNetworkConfig(res["networkConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("zone", flattenDataFusionInstanceZone(res["zone"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("display_name", flattenDataFusionInstanceDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("api_endpoint", flattenDataFusionInstanceApiEndpoint(res["apiEndpoint"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("p4_service_account", flattenDataFusionInstanceP4ServiceAccount(res["p4ServiceAccount"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("crypto_key_config", flattenDataFusionInstanceCryptoKeyConfig(res["cryptoKeyConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("event_publish_config", flattenDataFusionInstanceEventPublishConfig(res["eventPublishConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("accelerators", flattenDataFusionInstanceAccelerators(res["accelerators"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceDataFusionInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - enableStackdriverLoggingProp, err := expandDataFusionInstanceEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { - obj["enableStackdriverLogging"] = enableStackdriverLoggingProp - } - enableStackdriverMonitoringProp, err := expandDataFusionInstanceEnableStackdriverMonitoring(d.Get("enable_stackdriver_monitoring"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_monitoring"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableStackdriverMonitoringProp)) { - obj["enableStackdriverMonitoring"] = enableStackdriverMonitoringProp - } - enableRbacProp, err := expandDataFusionInstanceEnableRbac(d.Get("enable_rbac"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_rbac"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableRbacProp)) { - obj["enableRbac"] = enableRbacProp - } - labelsProp, err := expandDataFusionInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - versionProp, err := expandDataFusionInstanceVersion(d.Get("version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionProp)) { - obj["version"] = versionProp - } - eventPublishConfigProp, err := expandDataFusionInstanceEventPublishConfig(d.Get("event_publish_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_publish_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eventPublishConfigProp)) { - obj["eventPublishConfig"] = eventPublishConfigProp - } - acceleratorsProp, err := expandDataFusionInstanceAccelerators(d.Get("accelerators"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("accelerators"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, acceleratorsProp)) { - obj["accelerators"] = acceleratorsProp - } - - url, err := replaceVars(d, config, "{{DataFusionBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("enable_stackdriver_logging") { - updateMask = append(updateMask, "enableStackdriverLogging") - } - - if d.HasChange("enable_stackdriver_monitoring") { - updateMask = append(updateMask, "enableStackdriverMonitoring") - } - - if d.HasChange("enable_rbac") { - updateMask = append(updateMask, "enableRbac") - } - - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = DataFusionOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceDataFusionInstanceRead(d, meta) -} - -func resourceDataFusionInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DataFusionBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = DataFusionOperationWaitTime( - config, res, project, "Deleting Instance", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceDataFusionInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDataFusionInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataFusionInstanceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceEnableStackdriverMonitoring(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceEnableRbac(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceStateMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceServiceEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstancePrivateInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceDataprocServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceTenantProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceGcsBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceNetworkConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ip_allocation"] = - flattenDataFusionInstanceNetworkConfigIpAllocation(original["ipAllocation"], d, config) - transformed["network"] = - flattenDataFusionInstanceNetworkConfigNetwork(original["network"], d, config) - return []interface{}{transformed} -} -func flattenDataFusionInstanceNetworkConfigIpAllocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceNetworkConfigNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceApiEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceP4ServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceCryptoKeyConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key_reference"] = - flattenDataFusionInstanceCryptoKeyConfigKeyReference(original["keyReference"], d, config) - return []interface{}{transformed} -} -func flattenDataFusionInstanceCryptoKeyConfigKeyReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceEventPublishConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenDataFusionInstanceEventPublishConfigEnabled(original["enabled"], d, config) - transformed["topic"] = - flattenDataFusionInstanceEventPublishConfigTopic(original["topic"], d, config) - return []interface{}{transformed} -} -func flattenDataFusionInstanceEventPublishConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceEventPublishConfigTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceAccelerators(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "accelerator_type": flattenDataFusionInstanceAcceleratorsAcceleratorType(original["acceleratorType"], d, config), - "state": flattenDataFusionInstanceAcceleratorsState(original["state"], d, config), - }) - } - return transformed -} -func flattenDataFusionInstanceAcceleratorsAcceleratorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataFusionInstanceAcceleratorsState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataFusionInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") -} - -func expandDataFusionInstanceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceEnableStackdriverLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceEnableStackdriverMonitoring(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceEnableRbac(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDataFusionInstanceOptions(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDataFusionInstanceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstancePrivateInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceDataprocServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceNetworkConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpAllocation, err := expandDataFusionInstanceNetworkConfigIpAllocation(original["ip_allocation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIpAllocation); val.IsValid() && !isEmptyValue(val) { - transformed["ipAllocation"] = transformedIpAllocation - } - - transformedNetwork, err := expandDataFusionInstanceNetworkConfigNetwork(original["network"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !isEmptyValue(val) { - transformed["network"] = transformedNetwork - } - - return transformed, nil -} - -func expandDataFusionInstanceNetworkConfigIpAllocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceNetworkConfigNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceCryptoKeyConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKeyReference, err := expandDataFusionInstanceCryptoKeyConfigKeyReference(original["key_reference"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKeyReference); val.IsValid() && !isEmptyValue(val) { - transformed["keyReference"] = transformedKeyReference - } - - return transformed, nil -} - -func expandDataFusionInstanceCryptoKeyConfigKeyReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceEventPublishConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandDataFusionInstanceEventPublishConfigEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - transformedTopic, err := expandDataFusionInstanceEventPublishConfigTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - return transformed, nil -} - -func expandDataFusionInstanceEventPublishConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceEventPublishConfigTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceAccelerators(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAcceleratorType, err := expandDataFusionInstanceAcceleratorsAcceleratorType(original["accelerator_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAcceleratorType); val.IsValid() && !isEmptyValue(val) { - transformed["acceleratorType"] = transformedAcceleratorType - } - - transformedState, err := expandDataFusionInstanceAcceleratorsState(original["state"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { - transformed["state"] = transformedState - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataFusionInstanceAcceleratorsAcceleratorType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataFusionInstanceAcceleratorsState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_deidentify_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_deidentify_template.go deleted file mode 100644 index e1f653bc10..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_deidentify_template.go +++ /dev/null @@ -1,9752 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceDataLossPreventionDeidentifyTemplate() *schema.Resource { - return &schema.Resource{ - Create: resourceDataLossPreventionDeidentifyTemplateCreate, - Read: resourceDataLossPreventionDeidentifyTemplateRead, - Update: resourceDataLossPreventionDeidentifyTemplateUpdate, - Delete: resourceDataLossPreventionDeidentifyTemplateDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataLossPreventionDeidentifyTemplateImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "deidentify_config": { - Type: schema.TypeList, - Required: true, - Description: `Configuration of the deidentify template`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "info_type_transformations": { - Type: schema.TypeList, - Optional: true, - Description: `Treat the dataset as free-form text and apply the same free text transformation everywhere`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "transformations": { - Type: schema.TypeList, - Required: true, - Description: `Transformation for each infoType. Cannot specify more than one for a given infoType.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "primitive_transformation": { - Type: schema.TypeList, - Required: true, - Description: `Primitive transformation to apply to the infoType. -The 'primitive_transformation' block must only contain one argument, corresponding to the type of transformation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "character_mask_config": { - Type: schema.TypeList, - Optional: true, - Description: `Partially mask a string by replacing a given number of characters with a fixed character. -Masking can start from the beginning or end of the string.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "characters_to_ignore": { - Type: schema.TypeList, - Optional: true, - Description: `Characters to skip when doing de-identification of a value. These will be left alone and skipped.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "characters_to_skip": { - Type: schema.TypeString, - Optional: true, - Description: `Characters to not transform when masking.`, - }, - "common_characters_to_ignore": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE", ""}), - Description: `Common characters to not transform when masking. Useful to avoid removing punctuation. Possible values: ["NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE"]`, - }, - }, - }, - }, - "masking_character": { - Type: schema.TypeString, - Optional: true, - Description: `Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string -such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for -strings, and 0 for digits.`, - }, - "number_to_mask": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally.`, - }, - "reverse_order": { - Type: schema.TypeBool, - Optional: true, - Description: `Mask characters in reverse order. For example, if masking_character is 0, number_to_mask is 14, and reverse_order is 'false', then the -input string '1234-5678-9012-3456' is masked as '00000000000000-3456'.`, - }, - }, - }, - }, - "crypto_deterministic_config": { - Type: schema.TypeList, - Optional: true, - Description: `Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "context": { - Type: schema.TypeList, - Optional: true, - Description: `A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. - -If the context is not set, plaintext would be used as is for encryption. If the context is set but: - -1. there is no record present when transforming a given value or -2. the field is not present when transforming a given value, - -plaintext would be used as is for encryption. - -Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "crypto_key": { - Type: schema.TypeList, - Optional: true, - Description: `The key used by the encryption function.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_wrapped": { - Type: schema.TypeList, - Optional: true, - Description: `KMS wrapped key. -Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt -For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). -Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "crypto_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, - }, - "wrapped_key": { - Type: schema.TypeString, - Required: true, - Description: `The wrapped data crypto key. - -A base64-encoded string.`, - }, - }, - }, - }, - "transient": { - Type: schema.TypeList, - Optional: true, - Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, - }, - }, - }, - }, - "unwrapped": { - Type: schema.TypeList, - Optional: true, - Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `A 128/192/256 bit key. - -A base64-encoded string.`, - }, - }, - }, - }, - }, - }, - }, - "surrogate_info_type": { - Type: schema.TypeList, - Optional: true, - Description: `The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} - -For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' - -This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. - -Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. - -In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - -* reverse a surrogate that does not correspond to an actual identifier -* be unable to parse the surrogate and result in an error - -Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `Optional version name for this InfoType.`, - }, - }, - }, - }, - }, - }, - }, - "crypto_replace_ffx_fpe_config": { - Type: schema.TypeList, - Optional: true, - Description: `Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the 'content.reidentify' API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. - -Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "common_alphabet": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC", ""}), - Description: `Common alphabets. Possible values: ["FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC"]`, - }, - "context": { - Type: schema.TypeList, - Optional: true, - Description: `The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. - -If the context is set but: - -1. there is no record present when transforming a given value or -2. the field is not present when transforming a given value, - -a default tweak will be used. - -Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's. Currently, the referenced field may be of value type integer or string. - -The tweak is constructed as a sequence of bytes in big endian byte order such that: - -* a 64 bit integer is encoded followed by a single byte of value 1 -* a string is encoded in UTF-8 format followed by a single byte of value 2`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "crypto_key": { - Type: schema.TypeList, - Optional: true, - Description: `The key used by the encryption algorithm.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_wrapped": { - Type: schema.TypeList, - Optional: true, - Description: `KMS wrapped key. -Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt -For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). -Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "crypto_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, - }, - "wrapped_key": { - Type: schema.TypeString, - Required: true, - Description: `The wrapped data crypto key. - -A base64-encoded string.`, - }, - }, - }, - }, - "transient": { - Type: schema.TypeList, - Optional: true, - Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, - }, - }, - }, - }, - "unwrapped": { - Type: schema.TypeList, - Optional: true, - Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `A 128/192/256 bit key. - -A base64-encoded string.`, - }, - }, - }, - }, - }, - }, - }, - "custom_alphabet": { - Type: schema.TypeString, - Optional: true, - Description: `This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: - -''0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~'!@#$%^&*()_-+={[}]|:;"'<,>.?/''`, - }, - "radix": { - Type: schema.TypeInt, - Optional: true, - Description: `The native way to select the alphabet. Must be in the range \[2, 95\].`, - }, - "surrogate_info_type": { - Type: schema.TypeList, - Optional: true, - Description: `The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate - -For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' - -This annotation identifies the surrogate when inspecting content using the custom infoType ['SurrogateType'](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. - -In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `Optional version name for this InfoType.`, - }, - }, - }, - }, - }, - }, - }, - "replace_config": { - Type: schema.TypeList, - Optional: true, - Description: `Replace each input value with a given value.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "new_value": { - Type: schema.TypeList, - Required: true, - Description: `Replace each input value with a given value. -The 'new_value' block must only contain one argument. For example when replacing the contents of a string-type field, only 'string_value' should be set.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - Description: `Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a -year by itself or a year and month where the day is not significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - Description: `Month of year. Must be from 1 to 12, or 0 if specifying a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - Description: `Year of date. Must be from 1 to 9999, or 0 if specifying a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeInt, - Optional: true, - Description: `An integer value.`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. -Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - "replace_with_info_type_config": { - Type: schema.TypeBool, - Optional: true, - Description: `Replace each matching finding with the name of the info type.`, - }, - }, - }, - }, - "info_types": { - Type: schema.TypeList, - Optional: true, - Description: `InfoTypes to apply the transformation to. Leaving this empty will apply the transformation to apply to -all findings that correspond to infoTypes that were requested in InspectConfig.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the information type.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"deidentify_config.0.info_type_transformations", "deidentify_config.0.record_transformations"}, - }, - "record_transformations": { - Type: schema.TypeList, - Optional: true, - Description: `Treat the dataset as structured. Transformations can be applied to specific locations within structured datasets, such as transforming a column within a table.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field_transformations": { - Type: schema.TypeList, - Optional: true, - Description: `Transform the record by applying various field transformations.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fields": { - Type: schema.TypeList, - Required: true, - Description: `Input field(s) to apply the transformation to. When you have columns that reference their position within a list, omit the index from the FieldId. -FieldId name matching ignores the index. For example, instead of "contact.nums[0].type", use "contact.nums.type".`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "primitive_transformation": { - Type: schema.TypeList, - Required: true, - Description: `Apply the transformation to the entire field. -The 'primitive_transformation' block must only contain one argument, corresponding to the type of transformation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucketing_config": { - Type: schema.TypeList, - Optional: true, - Description: `Generalization function that buckets values based on ranges. The ranges and replacement values are dynamically provided by the user for custom behavior, such as 1-30 -> LOW 31-65 -> MEDIUM 66-100 -> HIGH -This can be used on data of type: number, long, string, timestamp. -If the provided value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. -See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "buckets": { - Type: schema.TypeList, - Optional: true, - Description: `Set of buckets. Ranges must be non-overlapping. -Bucket is represented as a range, along with replacement values.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "replacement_value": { - Type: schema.TypeList, - Required: true, - Description: `Replacement value for this bucket. -The 'replacement_value' block must only contain one argument.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeString, - Optional: true, - Description: `An integer value (int64 format)`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - "max": { - Type: schema.TypeList, - Optional: true, - Description: `Upper bound of the range, exclusive; type must match min. -The 'max' block must only contain one argument. See the 'bucketing_config' block description for more information about choosing a data type.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeString, - Optional: true, - Description: `An integer value (int64 format)`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - "min": { - Type: schema.TypeList, - Optional: true, - Description: `Lower bound of the range, inclusive. Type should be the same as max if used. -The 'min' block must only contain one argument. See the 'bucketing_config' block description for more information about choosing a data type.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeString, - Optional: true, - Description: `An integer value (int64 format)`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "character_mask_config": { - Type: schema.TypeList, - Optional: true, - Description: `Partially mask a string by replacing a given number of characters with a fixed character. Masking can start from the beginning or end of the string. This can be used on data of any type (numbers, longs, and so on) and when de-identifying structured data we'll attempt to preserve the original data's type. (This allows you to take a long like 123 and modify it to a string like **3).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "characters_to_ignore": { - Type: schema.TypeList, - Optional: true, - Description: `Characters to skip when doing de-identification of a value. These will be left alone and skipped.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "characters_to_skip": { - Type: schema.TypeString, - Optional: true, - Description: `Characters to not transform when masking.`, - }, - "common_characters_to_ignore": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE", ""}), - Description: `Common characters to not transform when masking. Useful to avoid removing punctuation. Possible values: ["NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE"]`, - }, - }, - }, - }, - "masking_character": { - Type: schema.TypeString, - Optional: true, - Description: `Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string -such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for -strings, and 0 for digits.`, - }, - "number_to_mask": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally. -If number_to_mask is negative, this denotes inverse masking. Cloud DLP masks all but a number of characters. For example, suppose you have the following values: -- 'masking_character' is * -- 'number_to_mask' is -4 -- 'reverse_order' is false -- 'characters_to_ignore' includes - -- Input string is 1234-5678-9012-3456 - -The resulting de-identified string is ****-****-****-3456. Cloud DLP masks all but the last four characters. If reverseOrder is true, all but the first four characters are masked as 1234-****-****-****.`, - }, - "reverse_order": { - Type: schema.TypeBool, - Optional: true, - Description: `Mask characters in reverse order. For example, if masking_character is 0, number_to_mask is 14, and reverse_order is 'false', then the -input string '1234-5678-9012-3456' is masked as '00000000000000-3456'.`, - }, - }, - }, - }, - "crypto_deterministic_config": { - Type: schema.TypeList, - Optional: true, - Description: `Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "context": { - Type: schema.TypeList, - Optional: true, - Description: `A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. - -If the context is not set, plaintext would be used as is for encryption. If the context is set but: - -1. there is no record present when transforming a given value or -2. the field is not present when transforming a given value, - -plaintext would be used as is for encryption. - -Note that case (1) is expected when an InfoTypeTransformation is applied to both structured and unstructured ContentItems.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "crypto_key": { - Type: schema.TypeList, - Optional: true, - Description: `The key used by the encryption function. For deterministic encryption using AES-SIV, the provided key is internally expanded to 64 bytes prior to use.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_wrapped": { - Type: schema.TypeList, - Optional: true, - Description: `KMS wrapped key. -Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt -For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). -Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "crypto_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, - }, - "wrapped_key": { - Type: schema.TypeString, - Required: true, - Description: `The wrapped data crypto key. - -A base64-encoded string.`, - }, - }, - }, - }, - "transient": { - Type: schema.TypeList, - Optional: true, - Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, - }, - }, - }, - }, - "unwrapped": { - Type: schema.TypeList, - Optional: true, - Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `A 128/192/256 bit key. - -A base64-encoded string.`, - }, - }, - }, - }, - }, - }, - }, - "surrogate_info_type": { - Type: schema.TypeList, - Optional: true, - Description: `The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} - -For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' - -This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. - -Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. - -In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - -* reverse a surrogate that does not correspond to an actual identifier -* be unable to parse the surrogate and result in an error - -Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `Optional version name for this InfoType.`, - }, - }, - }, - }, - }, - }, - }, - "crypto_hash_config": { - Type: schema.TypeList, - Optional: true, - Description: `Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. The key size must be either 32 or 64 bytes. -Outputs a base64 encoded representation of the hashed output (for example, L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=). -Currently, only string and integer values can be hashed. -See https://cloud.google.com/dlp/docs/pseudonymization to learn more.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "crypto_key": { - Type: schema.TypeList, - Optional: true, - Description: `The key used by the encryption function.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_wrapped": { - Type: schema.TypeList, - Optional: true, - Description: `KMS wrapped key. -Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt -For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). -Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "crypto_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, - }, - "wrapped_key": { - Type: schema.TypeString, - Required: true, - Description: `The wrapped data crypto key. - -A base64-encoded string.`, - }, - }, - }, - }, - "transient": { - Type: schema.TypeList, - Optional: true, - Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, - }, - }, - }, - }, - "unwrapped": { - Type: schema.TypeList, - Optional: true, - Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `A 128/192/256 bit key. - -A base64-encoded string.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "crypto_replace_ffx_fpe_config": { - Type: schema.TypeList, - Optional: true, - Description: `Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the 'content.reidentify' API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. - -Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "common_alphabet": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC", ""}), - Description: `Common alphabets. Possible values: ["FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC"]`, - }, - "context": { - Type: schema.TypeList, - Optional: true, - Description: `The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. - -If the context is set but: - -1. there is no record present when transforming a given value or -2. the field is not present when transforming a given value, - -a default tweak will be used. - -Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's. Currently, the referenced field may be of value type integer or string. - -The tweak is constructed as a sequence of bytes in big endian byte order such that: - -* a 64 bit integer is encoded followed by a single byte of value 1 -* a string is encoded in UTF-8 format followed by a single byte of value 2`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "crypto_key": { - Type: schema.TypeList, - Optional: true, - Description: `The key used by the encryption algorithm.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_wrapped": { - Type: schema.TypeList, - Optional: true, - Description: `KMS wrapped key. -Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt -For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). -Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "crypto_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, - }, - "wrapped_key": { - Type: schema.TypeString, - Required: true, - Description: `The wrapped data crypto key. - -A base64-encoded string.`, - }, - }, - }, - }, - "transient": { - Type: schema.TypeList, - Optional: true, - Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, - }, - }, - }, - }, - "unwrapped": { - Type: schema.TypeList, - Optional: true, - Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `A 128/192/256 bit key. - -A base64-encoded string.`, - }, - }, - }, - }, - }, - }, - }, - "custom_alphabet": { - Type: schema.TypeString, - Optional: true, - Description: `This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: - -''0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~'!@#$%^&*()_-+={[}]|:;"'<,>.?/''`, - }, - "radix": { - Type: schema.TypeInt, - Optional: true, - Description: `The native way to select the alphabet. Must be in the range \[2, 95\].`, - }, - "surrogate_info_type": { - Type: schema.TypeList, - Optional: true, - Description: `The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate - -For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' - -This annotation identifies the surrogate when inspecting content using the custom infoType ['SurrogateType'](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. - -In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `Optional version name for this InfoType.`, - }, - }, - }, - }, - }, - }, - }, - "date_shift_config": { - Type: schema.TypeList, - Optional: true, - Description: `Shifts dates by random number of days, with option to be consistent for the same context. See https://cloud.google.com/dlp/docs/concepts-date-shifting to learn more.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "lower_bound_days": { - Type: schema.TypeInt, - Required: true, - Description: `For example, -5 means shift date to at most 5 days back in the past.`, - }, - "upper_bound_days": { - Type: schema.TypeInt, - Required: true, - Description: `Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction. - -For example, 3 means shift date to at most 3 days into the future.`, - }, - "context": { - Type: schema.TypeList, - Optional: true, - Description: `Points to the field that contains the context, for example, an entity id. -If set, must also set cryptoKey. If set, shift will be consistent for the given context.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "crypto_key": { - Type: schema.TypeList, - Optional: true, - Description: `Causes the shift to be computed based on this key and the context. This results in the same shift for the same context and cryptoKey. If set, must also set context. Can only be applied to table items.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_wrapped": { - Type: schema.TypeList, - Optional: true, - Description: `KMS wrapped key. -Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt -For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). -Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "crypto_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, - }, - "wrapped_key": { - Type: schema.TypeString, - Required: true, - Description: `The wrapped data crypto key. - -A base64-encoded string.`, - }, - }, - }, - }, - "transient": { - Type: schema.TypeList, - Optional: true, - Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, - }, - }, - }, - }, - "unwrapped": { - Type: schema.TypeList, - Optional: true, - Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `A 128/192/256 bit key. - -A base64-encoded string.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "fixed_size_bucketing_config": { - Type: schema.TypeList, - Optional: true, - Description: `Buckets values based on fixed size ranges. The Bucketing transformation can provide all of this functionality, but requires more configuration. This message is provided as a convenience to the user for simple bucketing strategies. - -The transformed value will be a hyphenated string of {lower_bound}-{upper_bound}. For example, if lower_bound = 10 and upper_bound = 20, all values that are within this bucket will be replaced with "10-20". - -This can be used on data of type: double, long. - -If the bound Value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. - -See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket_size": { - Type: schema.TypeFloat, - Required: true, - Description: `Size of each bucket (except for minimum and maximum buckets). -So if lower_bound = 10, upper_bound = 89, and bucketSize = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. -Precision up to 2 decimals works.`, - }, - "lower_bound": { - Type: schema.TypeList, - Required: true, - Description: `Lower bound value of buckets. -All values less than lower_bound are grouped together into a single bucket; for example if lower_bound = 10, then all values less than 10 are replaced with the value "-10". -The 'lower_bound' block must only contain one argument. See the 'fixed_size_bucketing_config' block description for more information about choosing a data type.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeString, - Optional: true, - Description: `An integer value (int64 format)`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - "upper_bound": { - Type: schema.TypeList, - Required: true, - Description: `Upper bound value of buckets. -All values greater than upper_bound are grouped together into a single bucket; for example if upper_bound = 89, then all values greater than 89 are replaced with the value "89+". -The 'upper_bound' block must only contain one argument. See the 'fixed_size_bucketing_config' block description for more information about choosing a data type.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeString, - Optional: true, - Description: `An integer value (int64 format)`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - "redact_config": { - Type: schema.TypeList, - Optional: true, - Description: `Redact a given value. For example, if used with an InfoTypeTransformation transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the output would be 'My phone number is '.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{}, - }, - }, - "replace_config": { - Type: schema.TypeList, - Optional: true, - Description: `Replace with a specified value.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "new_value": { - Type: schema.TypeList, - Required: true, - Description: `Replace each input value with a given value. -The 'new_value' block must only contain one argument. For example when replacing the contents of a string-type field, only 'string_value' should be set.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 31), - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 12), - Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 9999), - Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeString, - Optional: true, - Description: `An integer value (int64 format)`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 24), - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 59), - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 999999999), - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 60), - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRFC3339Date, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - "replace_dictionary_config": { - Type: schema.TypeList, - Optional: true, - Description: `Replace with a value randomly drawn (with replacement) from a dictionary.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "word_list": { - Type: schema.TypeList, - Optional: true, - Description: `A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "words": { - Type: schema.TypeList, - Required: true, - Description: `Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "time_part_config": { - Type: schema.TypeList, - Optional: true, - Description: `For use with Date, Timestamp, and TimeOfDay, extract or preserve a portion of the value.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "part_to_extract": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"YEAR", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "WEEK_OF_YEAR", "HOUR_OF_DAY", ""}), - Description: `The part of the time to keep. Possible values: ["YEAR", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "WEEK_OF_YEAR", "HOUR_OF_DAY"]`, - }, - }, - }, - }, - }, - }, - }, - "condition": { - Type: schema.TypeList, - Optional: true, - Description: `Only apply the transformation if the condition evaluates to true for the given RecordCondition. The conditions are allowed to reference fields that are not used in the actual transformation. -Example Use Cases: -- Apply a different bucket transformation to an age column if the zip code column for the same record is within a specific range. -- Redact a field if the date of birth field is greater than 85.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expressions": { - Type: schema.TypeList, - Optional: true, - Description: `An expression.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "conditions": { - Type: schema.TypeList, - Optional: true, - Description: `Conditions to apply to the expression.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "conditions": { - Type: schema.TypeList, - Optional: true, - Description: `A collection of conditions.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field": { - Type: schema.TypeList, - Required: true, - Description: `Field within the record this condition is evaluated against.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "operator": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"EQUAL_TO", "NOT_EQUAL_TO", "GREATER_THAN", "LESS_THAN", "GREATER_THAN_OR_EQUALS", "LESS_THAN_OR_EQUALS", "EXISTS"}), - Description: `Operator used to compare the field or infoType to the value. Possible values: ["EQUAL_TO", "NOT_EQUAL_TO", "GREATER_THAN", "LESS_THAN", "GREATER_THAN_OR_EQUALS", "LESS_THAN_OR_EQUALS", "EXISTS"]`, - }, - "value": { - Type: schema.TypeList, - Optional: true, - Description: `Value to compare against. -The 'value' block must only contain one argument. For example when a condition is evaluated against a string-type field, only 'string_value' should be set. -This argument is mandatory, except for conditions using the 'EXISTS' operator.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 31), - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 12), - Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 9999), - Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeString, - Optional: true, - Description: `An integer value (int64 format)`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 24), - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 59), - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 999999999), - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 60), - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateRFC3339Date, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "logical_operator": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"AND", ""}), - Description: `The operator to apply to the result of conditions. Default and currently only supported value is AND Default value: "AND" Possible values: ["AND"]`, - Default: "AND", - }, - }, - }, - }, - }, - }, - }, - }, - }, - AtLeastOneOf: []string{"deidentify_config.0.record_transformations.0.field_transformations", "deidentify_config.0.record_transformations.0.record_suppressions"}, - }, - "record_suppressions": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration defining which records get suppressed entirely. Records that match any suppression rule are omitted from the output.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "condition": { - Type: schema.TypeList, - Optional: true, - Description: `A condition that when it evaluates to true will result in the record being evaluated to be suppressed from the transformed content.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expressions": { - Type: schema.TypeList, - Optional: true, - Description: `An expression, consisting of an operator and conditions.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "conditions": { - Type: schema.TypeList, - Optional: true, - Description: `Conditions to apply to the expression.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "conditions": { - Type: schema.TypeList, - Optional: true, - Description: `A collection of conditions.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field": { - Type: schema.TypeList, - Required: true, - Description: `Field within the record this condition is evaluated against.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "operator": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"EQUAL_TO", "NOT_EQUAL_TO", "GREATER_THAN", "LESS_THAN", "GREATER_THAN_OR_EQUALS", "LESS_THAN_OR_EQUALS", "EXISTS"}), - Description: `Operator used to compare the field or infoType to the value. Possible values: ["EQUAL_TO", "NOT_EQUAL_TO", "GREATER_THAN", "LESS_THAN", "GREATER_THAN_OR_EQUALS", "LESS_THAN_OR_EQUALS", "EXISTS"]`, - }, - "value": { - Type: schema.TypeList, - Optional: true, - Description: `Value to compare against. [Mandatory, except for EXISTS tests.]`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "boolean_value": { - Type: schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Optional: true, - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, - }, - "month": { - Type: schema.TypeInt, - Optional: true, - Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, - }, - "year": { - Type: schema.TypeInt, - Optional: true, - Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: schema.TypeString, - Optional: true, - Description: `An integer value (int64 format)`, - }, - "string_value": { - Type: schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "timestamp_value": { - Type: schema.TypeString, - Optional: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "logical_operator": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"AND", ""}), - Description: `The operator to apply to the result of conditions. Default and currently only supported value is AND. Default value: "AND" Possible values: ["AND"]`, - Default: "AND", - }, - }, - }, - }, - }, - }, - }, - }, - }, - AtLeastOneOf: []string{"deidentify_config.0.record_transformations.0.field_transformations", "deidentify_config.0.record_transformations.0.record_suppressions"}, - }, - }, - }, - ExactlyOneOf: []string{"deidentify_config.0.info_type_transformations", "deidentify_config.0.record_transformations"}, - }, - }, - }, - }, - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of the template in any of the following formats: - -* 'projects/{{project}}' -* 'projects/{{project}}/locations/{{location}}' -* 'organizations/{{organization_id}}' -* 'organizations/{{organization_id}}/locations/{{location}}'`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of the template.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `User set display name of the template.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the template. Set by the server.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataLossPreventionDeidentifyTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionDeidentifyTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionDeidentifyTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - deidentifyConfigProp, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(d.Get("deidentify_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deidentify_config"); !isEmptyValue(reflect.ValueOf(deidentifyConfigProp)) && (ok || !reflect.DeepEqual(v, deidentifyConfigProp)) { - obj["deidentifyConfig"] = deidentifyConfigProp - } - - obj, err = resourceDataLossPreventionDeidentifyTemplateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DeidentifyTemplate: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DeidentifyTemplate: %s", err) - } - if err := d.Set("name", flattenDataLossPreventionDeidentifyTemplateName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DeidentifyTemplate %q: %#v", d.Id(), res) - - return resourceDataLossPreventionDeidentifyTemplateRead(d, meta) -} - -func resourceDataLossPreventionDeidentifyTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataLossPreventionDeidentifyTemplate %q", d.Id())) - } - - if err := d.Set("name", flattenDataLossPreventionDeidentifyTemplateName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) - } - if err := d.Set("description", flattenDataLossPreventionDeidentifyTemplateDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) - } - if err := d.Set("display_name", flattenDataLossPreventionDeidentifyTemplateDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) - } - if err := d.Set("deidentify_config", flattenDataLossPreventionDeidentifyTemplateDeidentifyConfig(res["deidentifyConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) - } - - return nil -} - -func resourceDataLossPreventionDeidentifyTemplateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionDeidentifyTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionDeidentifyTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - deidentifyConfigProp, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(d.Get("deidentify_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deidentify_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deidentifyConfigProp)) { - obj["deidentifyConfig"] = deidentifyConfigProp - } - - obj, err = resourceDataLossPreventionDeidentifyTemplateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DeidentifyTemplate %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("deidentify_config") { - updateMask = append(updateMask, "deidentifyConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DeidentifyTemplate %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DeidentifyTemplate %q: %#v", d.Id(), res) - } - - return resourceDataLossPreventionDeidentifyTemplateRead(d, meta) -} - -func resourceDataLossPreventionDeidentifyTemplateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DeidentifyTemplate %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DeidentifyTemplate") - } - - log.Printf("[DEBUG] Finished deleting DeidentifyTemplate %q: %#v", d.Id(), res) - return nil -} - -func resourceDataLossPreventionDeidentifyTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // Custom import to handle parent possibilities - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - parts := strings.Split(d.Get("name").(string), "/") - if len(parts) == 6 { - if err := d.Set("name", parts[5]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("name", parts[3]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/deidentifyTemplate/{{name}}", d.Get("name").(string)) - } - // Remove "/deidentifyTemplate/{{name}}" from the id - parts = parts[:len(parts)-2] - if err := d.Set("parent", strings.Join(parts, "/")); err != nil { - return nil, fmt.Errorf("Error setting parent: %s", err) - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDataLossPreventionDeidentifyTemplateName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataLossPreventionDeidentifyTemplateDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["info_type_transformations"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(original["infoTypeTransformations"], d, config) - transformed["record_transformations"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformations(original["recordTransformations"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transformations"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(original["transformations"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "info_types": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(original["infoTypes"], d, config), - "primitive_transformation": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(original["primitiveTransformation"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(original["name"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["replace_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(original["replaceConfig"], d, config) - transformed["replace_with_info_type_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(original["replaceWithInfoTypeConfig"], d, config) - transformed["character_mask_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(original["characterMaskConfig"], d, config) - transformed["crypto_deterministic_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["cryptoDeterministicConfig"], d, config) - transformed["crypto_replace_ffx_fpe_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["cryptoReplaceFfxFpeConfig"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["new_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(original["newValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v != nil -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["masking_character"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["maskingCharacter"], d, config) - transformed["number_to_mask"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["numberToMask"], d, config) - transformed["reverse_order"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverseOrder"], d, config) - transformed["characters_to_ignore"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["charactersToIgnore"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "characters_to_skip": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["charactersToSkip"], d, config), - "common_characters_to_ignore": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["commonCharactersToIgnore"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["crypto_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["cryptoKey"], d, config) - transformed["surrogate_info_type"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogateInfoType"], d, config) - transformed["context"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transient"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) - transformed["unwrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - transformed["kms_wrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["wrapped_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) - transformed["crypto_key_name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) - transformed["version"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["crypto_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["cryptoKey"], d, config) - transformed["context"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) - transformed["surrogate_info_type"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogateInfoType"], d, config) - transformed["common_alphabet"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["commonAlphabet"], d, config) - transformed["custom_alphabet"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["customAlphabet"], d, config) - transformed["radix"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transient"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) - transformed["unwrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - transformed["kms_wrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["wrapped_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) - transformed["crypto_key_name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) - transformed["version"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["field_transformations"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations(original["fieldTransformations"], d, config) - transformed["record_suppressions"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions(original["recordSuppressions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "fields": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields(original["fields"], d, config), - "condition": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition(original["condition"], d, config), - "primitive_transformation": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation(original["primitiveTransformation"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFieldsName(original["name"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFieldsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expressions"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions(original["expressions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["logical_operator"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperator(original["logicalOperator"], d, config) - transformed["conditions"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions(original["conditions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperator(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["conditions"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions(original["conditions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "field": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField(original["field"], d, config), - "operator": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperator(original["operator"], d, config), - "value": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue(original["value"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsFieldName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsFieldName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperator(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["replace_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig(original["replaceConfig"], d, config) - transformed["redact_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig(original["redactConfig"], d, config) - transformed["character_mask_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig(original["characterMaskConfig"], d, config) - transformed["crypto_replace_ffx_fpe_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["cryptoReplaceFfxFpeConfig"], d, config) - transformed["fixed_size_bucketing_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig(original["fixedSizeBucketingConfig"], d, config) - transformed["bucketing_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig(original["bucketingConfig"], d, config) - transformed["time_part_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig(original["timePartConfig"], d, config) - transformed["crypto_hash_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig(original["cryptoHashConfig"], d, config) - transformed["date_shift_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig(original["dateShiftConfig"], d, config) - transformed["crypto_deterministic_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["cryptoDeterministicConfig"], d, config) - transformed["replace_dictionary_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfig(original["replaceDictionaryConfig"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["new_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue(original["newValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - transformed := make(map[string]interface{}) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["masking_character"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["maskingCharacter"], d, config) - transformed["number_to_mask"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["numberToMask"], d, config) - transformed["reverse_order"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverseOrder"], d, config) - transformed["characters_to_ignore"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["charactersToIgnore"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "characters_to_skip": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["charactersToSkip"], d, config), - "common_characters_to_ignore": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["commonCharactersToIgnore"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["crypto_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["cryptoKey"], d, config) - transformed["context"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) - transformed["surrogate_info_type"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogateInfoType"], d, config) - transformed["common_alphabet"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["commonAlphabet"], d, config) - transformed["custom_alphabet"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["customAlphabet"], d, config) - transformed["radix"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transient"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) - transformed["unwrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - transformed["kms_wrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["wrapped_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) - transformed["crypto_key_name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) - transformed["version"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["lower_bound"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(original["lowerBound"], d, config) - transformed["upper_bound"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(original["upperBound"], d, config) - transformed["bucket_size"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(original["bucketSize"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["buckets"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets(original["buckets"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "min": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin(original["min"], d, config), - "max": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax(original["max"], d, config), - "replacement_value": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(original["replacementValue"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["part_to_extract"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtract(original["partToExtract"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtract(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["crypto_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(original["cryptoKey"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transient"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(original["transient"], d, config) - transformed["unwrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - transformed["kms_wrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(original["key"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["wrapped_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) - transformed["crypto_key_name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["upper_bound_days"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(original["upperBoundDays"], d, config) - transformed["lower_bound_days"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(original["lowerBoundDays"], d, config) - transformed["context"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext(original["context"], d, config) - transformed["crypto_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(original["cryptoKey"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContextName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContextName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transient"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(original["transient"], d, config) - transformed["unwrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - transformed["kms_wrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(original["key"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["wrapped_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) - transformed["crypto_key_name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["crypto_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["cryptoKey"], d, config) - transformed["surrogate_info_type"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogateInfoType"], d, config) - transformed["context"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transient"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) - transformed["unwrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - transformed["kms_wrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["wrapped_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) - transformed["crypto_key_name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) - transformed["version"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["word_list"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(original["wordList"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["words"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(original["words"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "condition": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition(original["condition"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expressions"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions(original["expressions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["logical_operator"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperator(original["logicalOperator"], d, config) - transformed["conditions"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions(original["conditions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperator(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["conditions"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions(original["conditions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "field": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField(original["field"], d, config), - "operator": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperator(original["operator"], d, config), - "value": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue(original["value"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsFieldName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsFieldName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperator(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueIntegerValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueFloatValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueStringValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueBooleanValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimestampValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataLossPreventionDeidentifyTemplateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoTypeTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(original["info_type_transformations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInfoTypeTransformations); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypeTransformations"] = transformedInfoTypeTransformations - } - - transformedRecordTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformations(original["record_transformations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRecordTransformations); val.IsValid() && !isEmptyValue(val) { - transformed["recordTransformations"] = transformedRecordTransformations - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(original["transformations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTransformations); val.IsValid() && !isEmptyValue(val) { - transformed["transformations"] = transformedTransformations - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoTypes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(original["info_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypes"] = transformedInfoTypes - } - - transformedPrimitiveTransformation, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(original["primitive_transformation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrimitiveTransformation); val.IsValid() && !isEmptyValue(val) { - transformed["primitiveTransformation"] = transformedPrimitiveTransformation - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedReplaceConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(original["replace_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReplaceConfig); val.IsValid() && !isEmptyValue(val) { - transformed["replaceConfig"] = transformedReplaceConfig - } - - transformedReplaceWithInfoTypeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(original["replace_with_info_type_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReplaceWithInfoTypeConfig); val.IsValid() && !isEmptyValue(val) { - transformed["replaceWithInfoTypeConfig"] = transformedReplaceWithInfoTypeConfig - } - - transformedCharacterMaskConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(original["character_mask_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCharacterMaskConfig); val.IsValid() && !isEmptyValue(val) { - transformed["characterMaskConfig"] = transformedCharacterMaskConfig - } - - transformedCryptoDeterministicConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["crypto_deterministic_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoDeterministicConfig); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoDeterministicConfig"] = transformedCryptoDeterministicConfig - } - - transformedCryptoReplaceFfxFpeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["crypto_replace_ffx_fpe_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoReplaceFfxFpeConfig); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoReplaceFfxFpeConfig"] = transformedCryptoReplaceFfxFpeConfig - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNewValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(original["new_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNewValue); val.IsValid() && !isEmptyValue(val) { - transformed["newValue"] = transformedNewValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || !v.(bool) { - return nil, nil - } - - return struct{}{}, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaskingCharacter, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["masking_character"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaskingCharacter); val.IsValid() && !isEmptyValue(val) { - transformed["maskingCharacter"] = transformedMaskingCharacter - } - - transformedNumberToMask, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["number_to_mask"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNumberToMask); val.IsValid() && !isEmptyValue(val) { - transformed["numberToMask"] = transformedNumberToMask - } - - transformedReverseOrder, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverse_order"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReverseOrder); val.IsValid() && !isEmptyValue(val) { - transformed["reverseOrder"] = transformedReverseOrder - } - - transformedCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["characters_to_ignore"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCharactersToIgnore); val.IsValid() && !isEmptyValue(val) { - transformed["charactersToIgnore"] = transformedCharactersToIgnore - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCharactersToSkip, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["characters_to_skip"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCharactersToSkip); val.IsValid() && !isEmptyValue(val) { - transformed["charactersToSkip"] = transformedCharactersToSkip - } - - transformedCommonCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["common_characters_to_ignore"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommonCharactersToIgnore); val.IsValid() && !isEmptyValue(val) { - transformed["commonCharactersToIgnore"] = transformedCommonCharactersToIgnore - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["crypto_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKey"] = transformedCryptoKey - } - - transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogate_info_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["surrogateInfoType"] = transformedSurrogateInfoType - } - - transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !isEmptyValue(val) { - transformed["context"] = transformedContext - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !isEmptyValue(val) { - transformed["transient"] = transformedTransient - } - - transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !isEmptyValue(val) { - transformed["unwrapped"] = transformedUnwrapped - } - - transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !isEmptyValue(val) { - transformed["kmsWrapped"] = transformedKmsWrapped - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !isEmptyValue(val) { - transformed["wrappedKey"] = transformedWrappedKey - } - - transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKeyName"] = transformedCryptoKeyName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["crypto_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKey"] = transformedCryptoKey - } - - transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !isEmptyValue(val) { - transformed["context"] = transformedContext - } - - transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogate_info_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["surrogateInfoType"] = transformedSurrogateInfoType - } - - transformedCommonAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["common_alphabet"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommonAlphabet); val.IsValid() && !isEmptyValue(val) { - transformed["commonAlphabet"] = transformedCommonAlphabet - } - - transformedCustomAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["custom_alphabet"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCustomAlphabet); val.IsValid() && !isEmptyValue(val) { - transformed["customAlphabet"] = transformedCustomAlphabet - } - - transformedRadix, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRadix); val.IsValid() && !isEmptyValue(val) { - transformed["radix"] = transformedRadix - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !isEmptyValue(val) { - transformed["transient"] = transformedTransient - } - - transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !isEmptyValue(val) { - transformed["unwrapped"] = transformedUnwrapped - } - - transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !isEmptyValue(val) { - transformed["kmsWrapped"] = transformedKmsWrapped - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !isEmptyValue(val) { - transformed["wrappedKey"] = transformedWrappedKey - } - - transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKeyName"] = transformedCryptoKeyName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFieldTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations(original["field_transformations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFieldTransformations); val.IsValid() && !isEmptyValue(val) { - transformed["fieldTransformations"] = transformedFieldTransformations - } - - transformedRecordSuppressions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions(original["record_suppressions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRecordSuppressions); val.IsValid() && !isEmptyValue(val) { - transformed["recordSuppressions"] = transformedRecordSuppressions - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFields, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields(original["fields"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFields); val.IsValid() && !isEmptyValue(val) { - transformed["fields"] = transformedFields - } - - transformedCondition, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition(original["condition"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCondition); val.IsValid() && !isEmptyValue(val) { - transformed["condition"] = transformedCondition - } - - transformedPrimitiveTransformation, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation(original["primitive_transformation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrimitiveTransformation); val.IsValid() && !isEmptyValue(val) { - transformed["primitiveTransformation"] = transformedPrimitiveTransformation - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFieldsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFieldsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpressions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions(original["expressions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpressions); val.IsValid() && !isEmptyValue(val) { - transformed["expressions"] = transformedExpressions - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLogicalOperator, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperator(original["logical_operator"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLogicalOperator); val.IsValid() && !isEmptyValue(val) { - transformed["logicalOperator"] = transformedLogicalOperator - } - - transformedConditions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions(original["conditions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !isEmptyValue(val) { - transformed["conditions"] = transformedConditions - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConditions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions(original["conditions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !isEmptyValue(val) { - transformed["conditions"] = transformedConditions - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedField, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField(original["field"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedField); val.IsValid() && !isEmptyValue(val) { - transformed["field"] = transformedField - } - - transformedOperator, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperator(original["operator"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOperator); val.IsValid() && !isEmptyValue(val) { - transformed["operator"] = transformedOperator - } - - transformedValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsFieldName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsFieldName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedReplaceConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig(original["replace_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReplaceConfig); val.IsValid() && !isEmptyValue(val) { - transformed["replaceConfig"] = transformedReplaceConfig - } - - transformedRedactConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig(original["redact_config"], d, config) - if err != nil { - return nil, err - } else { - transformed["redactConfig"] = transformedRedactConfig - } - - transformedCharacterMaskConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig(original["character_mask_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCharacterMaskConfig); val.IsValid() && !isEmptyValue(val) { - transformed["characterMaskConfig"] = transformedCharacterMaskConfig - } - - transformedCryptoReplaceFfxFpeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["crypto_replace_ffx_fpe_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoReplaceFfxFpeConfig); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoReplaceFfxFpeConfig"] = transformedCryptoReplaceFfxFpeConfig - } - - transformedFixedSizeBucketingConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig(original["fixed_size_bucketing_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFixedSizeBucketingConfig); val.IsValid() && !isEmptyValue(val) { - transformed["fixedSizeBucketingConfig"] = transformedFixedSizeBucketingConfig - } - - transformedBucketingConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig(original["bucketing_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBucketingConfig); val.IsValid() && !isEmptyValue(val) { - transformed["bucketingConfig"] = transformedBucketingConfig - } - - transformedTimePartConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig(original["time_part_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimePartConfig); val.IsValid() && !isEmptyValue(val) { - transformed["timePartConfig"] = transformedTimePartConfig - } - - transformedCryptoHashConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig(original["crypto_hash_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoHashConfig); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoHashConfig"] = transformedCryptoHashConfig - } - - transformedDateShiftConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig(original["date_shift_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateShiftConfig); val.IsValid() && !isEmptyValue(val) { - transformed["dateShiftConfig"] = transformedDateShiftConfig - } - - transformedCryptoDeterministicConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["crypto_deterministic_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoDeterministicConfig); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoDeterministicConfig"] = transformedCryptoDeterministicConfig - } - - transformedReplaceDictionaryConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfig(original["replace_dictionary_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReplaceDictionaryConfig); val.IsValid() && !isEmptyValue(val) { - transformed["replaceDictionaryConfig"] = transformedReplaceDictionaryConfig - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNewValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue(original["new_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNewValue); val.IsValid() && !isEmptyValue(val) { - transformed["newValue"] = transformedNewValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - transformed := make(map[string]interface{}) - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaskingCharacter, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["masking_character"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaskingCharacter); val.IsValid() && !isEmptyValue(val) { - transformed["maskingCharacter"] = transformedMaskingCharacter - } - - transformedNumberToMask, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["number_to_mask"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNumberToMask); val.IsValid() && !isEmptyValue(val) { - transformed["numberToMask"] = transformedNumberToMask - } - - transformedReverseOrder, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverse_order"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReverseOrder); val.IsValid() && !isEmptyValue(val) { - transformed["reverseOrder"] = transformedReverseOrder - } - - transformedCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["characters_to_ignore"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCharactersToIgnore); val.IsValid() && !isEmptyValue(val) { - transformed["charactersToIgnore"] = transformedCharactersToIgnore - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCharactersToSkip, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["characters_to_skip"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCharactersToSkip); val.IsValid() && !isEmptyValue(val) { - transformed["charactersToSkip"] = transformedCharactersToSkip - } - - transformedCommonCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["common_characters_to_ignore"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommonCharactersToIgnore); val.IsValid() && !isEmptyValue(val) { - transformed["commonCharactersToIgnore"] = transformedCommonCharactersToIgnore - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["crypto_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKey"] = transformedCryptoKey - } - - transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !isEmptyValue(val) { - transformed["context"] = transformedContext - } - - transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogate_info_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["surrogateInfoType"] = transformedSurrogateInfoType - } - - transformedCommonAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["common_alphabet"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCommonAlphabet); val.IsValid() && !isEmptyValue(val) { - transformed["commonAlphabet"] = transformedCommonAlphabet - } - - transformedCustomAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["custom_alphabet"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCustomAlphabet); val.IsValid() && !isEmptyValue(val) { - transformed["customAlphabet"] = transformedCustomAlphabet - } - - transformedRadix, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRadix); val.IsValid() && !isEmptyValue(val) { - transformed["radix"] = transformedRadix - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !isEmptyValue(val) { - transformed["transient"] = transformedTransient - } - - transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !isEmptyValue(val) { - transformed["unwrapped"] = transformedUnwrapped - } - - transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !isEmptyValue(val) { - transformed["kmsWrapped"] = transformedKmsWrapped - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !isEmptyValue(val) { - transformed["wrappedKey"] = transformedWrappedKey - } - - transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKeyName"] = transformedCryptoKeyName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLowerBound, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(original["lower_bound"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLowerBound); val.IsValid() && !isEmptyValue(val) { - transformed["lowerBound"] = transformedLowerBound - } - - transformedUpperBound, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(original["upper_bound"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUpperBound); val.IsValid() && !isEmptyValue(val) { - transformed["upperBound"] = transformedUpperBound - } - - transformedBucketSize, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(original["bucket_size"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBucketSize); val.IsValid() && !isEmptyValue(val) { - transformed["bucketSize"] = transformedBucketSize - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBuckets, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets(original["buckets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["buckets"] = transformedBuckets - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMin, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin(original["min"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { - transformed["min"] = transformedMin - } - - transformedMax, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax(original["max"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { - transformed["max"] = transformedMax - } - - transformedReplacementValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(original["replacement_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReplacementValue); val.IsValid() && !isEmptyValue(val) { - transformed["replacementValue"] = transformedReplacementValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPartToExtract, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtract(original["part_to_extract"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPartToExtract); val.IsValid() && !isEmptyValue(val) { - transformed["partToExtract"] = transformedPartToExtract - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtract(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(original["crypto_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKey"] = transformedCryptoKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(original["transient"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !isEmptyValue(val) { - transformed["transient"] = transformedTransient - } - - transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !isEmptyValue(val) { - transformed["unwrapped"] = transformedUnwrapped - } - - transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !isEmptyValue(val) { - transformed["kmsWrapped"] = transformedKmsWrapped - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !isEmptyValue(val) { - transformed["wrappedKey"] = transformedWrappedKey - } - - transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKeyName"] = transformedCryptoKeyName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUpperBoundDays, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(original["upper_bound_days"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUpperBoundDays); val.IsValid() && !isEmptyValue(val) { - transformed["upperBoundDays"] = transformedUpperBoundDays - } - - transformedLowerBoundDays, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(original["lower_bound_days"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLowerBoundDays); val.IsValid() && !isEmptyValue(val) { - transformed["lowerBoundDays"] = transformedLowerBoundDays - } - - transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext(original["context"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !isEmptyValue(val) { - transformed["context"] = transformedContext - } - - transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(original["crypto_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKey"] = transformedCryptoKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContextName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContextName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(original["transient"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !isEmptyValue(val) { - transformed["transient"] = transformedTransient - } - - transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !isEmptyValue(val) { - transformed["unwrapped"] = transformedUnwrapped - } - - transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !isEmptyValue(val) { - transformed["kmsWrapped"] = transformedKmsWrapped - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !isEmptyValue(val) { - transformed["wrappedKey"] = transformedWrappedKey - } - - transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKeyName"] = transformedCryptoKeyName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["crypto_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKey"] = transformedCryptoKey - } - - transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogate_info_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["surrogateInfoType"] = transformedSurrogateInfoType - } - - transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !isEmptyValue(val) { - transformed["context"] = transformedContext - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !isEmptyValue(val) { - transformed["transient"] = transformedTransient - } - - transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !isEmptyValue(val) { - transformed["unwrapped"] = transformedUnwrapped - } - - transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !isEmptyValue(val) { - transformed["kmsWrapped"] = transformedKmsWrapped - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !isEmptyValue(val) { - transformed["wrappedKey"] = transformedWrappedKey - } - - transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKeyName"] = transformedCryptoKeyName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWordList, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(original["word_list"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !isEmptyValue(val) { - transformed["wordList"] = transformedWordList - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWords, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(original["words"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !isEmptyValue(val) { - transformed["words"] = transformedWords - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCondition, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition(original["condition"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCondition); val.IsValid() && !isEmptyValue(val) { - transformed["condition"] = transformedCondition - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpressions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions(original["expressions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpressions); val.IsValid() && !isEmptyValue(val) { - transformed["expressions"] = transformedExpressions - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLogicalOperator, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperator(original["logical_operator"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLogicalOperator); val.IsValid() && !isEmptyValue(val) { - transformed["logicalOperator"] = transformedLogicalOperator - } - - transformedConditions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions(original["conditions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !isEmptyValue(val) { - transformed["conditions"] = transformedConditions - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConditions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions(original["conditions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !isEmptyValue(val) { - transformed["conditions"] = transformedConditions - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedField, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField(original["field"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedField); val.IsValid() && !isEmptyValue(val) { - transformed["field"] = transformedField - } - - transformedOperator, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperator(original["operator"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOperator); val.IsValid() && !isEmptyValue(val) { - transformed["operator"] = transformedOperator - } - - transformedValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsFieldName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsFieldName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataLossPreventionDeidentifyTemplateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["deidentifyTemplate"] = obj - return newObj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_inspect_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_inspect_template.go deleted file mode 100644 index acbebf9f42..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_inspect_template.go +++ /dev/null @@ -1,2294 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDataLossPreventionInspectTemplate() *schema.Resource { - return &schema.Resource{ - Create: resourceDataLossPreventionInspectTemplateCreate, - Read: resourceDataLossPreventionInspectTemplateRead, - Update: resourceDataLossPreventionInspectTemplateUpdate, - Delete: resourceDataLossPreventionInspectTemplateDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataLossPreventionInspectTemplateImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of the inspect template in any of the following formats: - -* 'projects/{{project}}' -* 'projects/{{project}}/locations/{{location}}' -* 'organizations/{{organization_id}}' -* 'organizations/{{organization_id}}/locations/{{location}}'`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of the inspect template.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `User set display name of the inspect template.`, - }, - "inspect_config": { - Type: schema.TypeList, - Optional: true, - Description: `The core content of the template.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content_options": { - Type: schema.TypeList, - Optional: true, - Description: `List of options defining data content to scan. If empty, text, images, and other content will be included. Possible values: ["CONTENT_TEXT", "CONTENT_IMAGE"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"CONTENT_TEXT", "CONTENT_IMAGE"}), - }, - }, - "custom_info_types": { - Type: schema.TypeList, - Optional: true, - Description: `Custom info types to be used. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "info_type": { - Type: schema.TypeList, - Required: true, - Description: `CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing -infoTypes and that infoType is specified in 'info_types' field. Specifying the latter adds findings to the -one detected by the system. If built-in info type is not specified in 'info_types' list then the name is -treated as a custom info type.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names -listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - "dictionary": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Dictionary which defines the rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloud_storage_path": { - Type: schema.TypeList, - Optional: true, - Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, - }, - }, - }, - }, - "word_list": { - Type: schema.TypeList, - Optional: true, - Description: `List of words or phrases to search for.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "words": { - Type: schema.TypeList, - Required: true, - Description: `Words or phrases defining the dictionary. The dictionary must contain at least one -phrase and every phrase must contain at least 2 characters that are letters or digits.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "exclusion_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"EXCLUSION_TYPE_EXCLUDE", ""}), - Description: `If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching. Possible values: ["EXCLUSION_TYPE_EXCLUDE"]`, - }, - "likelihood": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), - Description: `Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria -specified by the rule. Default value: "VERY_LIKELY" Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, - Default: "VERY_LIKELY", - }, - "regex": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Regular expression which defines the rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pattern": { - Type: schema.TypeString, - Required: true, - Description: `Pattern defining the regular expression. -Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, - }, - "group_indexes": { - Type: schema.TypeList, - Optional: true, - Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, - "stored_type": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A reference to a StoredInfoType to use with scanning.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Resource name of the requested StoredInfoType, for example 'organizations/433245324/storedInfoTypes/432452342' -or 'projects/project-id/storedInfoTypes/432452342'.`, - }, - }, - }, - }, - }, - }, - }, - "exclude_info_types": { - Type: schema.TypeBool, - Optional: true, - Description: `When true, excludes type information of the findings.`, - }, - "include_quote": { - Type: schema.TypeBool, - Optional: true, - Description: `When true, a contextual quote from the data that triggered a finding is included in the response.`, - }, - "info_types": { - Type: schema.TypeList, - Optional: true, - Description: `Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list -or listed at https://cloud.google.com/dlp/docs/infotypes-reference. - -When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. -By default this may be all types, but may change over time as detectors are updated.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed -at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - "version": { - Type: schema.TypeString, - Optional: true, - Description: `Version of the information type to use. By default, the version is set to stable`, - }, - }, - }, - }, - "limits": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration to control the number of findings returned.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_findings_per_item": { - Type: schema.TypeInt, - Required: true, - Description: `Max number of findings that will be returned for each item scanned. The maximum returned is 2000.`, - }, - "max_findings_per_request": { - Type: schema.TypeInt, - Required: true, - Description: `Max number of findings that will be returned per request/job. The maximum returned is 2000.`, - }, - "max_findings_per_info_type": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration of findings limit given for specified infoTypes.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "info_type": { - Type: schema.TypeList, - Required: true, - Description: `Type of information the findings limit applies to. Only one limit per infoType should be provided. If InfoTypeLimit does -not have an infoType, the DLP API applies the limit against all infoTypes that are found but not -specified in another InfoTypeLimit.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed -at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - "max_findings": { - Type: schema.TypeInt, - Required: true, - Description: `Max findings limit for the given infoType.`, - }, - }, - }, - }, - }, - }, - }, - "min_likelihood": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), - Description: `Only returns findings equal or above this threshold. See https://cloud.google.com/dlp/docs/likelihood for more info Default value: "POSSIBLE" Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, - Default: "POSSIBLE", - }, - "rule_set": { - Type: schema.TypeList, - Optional: true, - Description: `Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, -other rules are executed in the order they are specified for each info type.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "info_types": { - Type: schema.TypeList, - Required: true, - Description: `List of infoTypes this rule set is applied to.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed -at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - "rules": { - Type: schema.TypeList, - Required: true, - Description: `Set of rules to be applied to infoTypes. The rules are applied in order.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "exclusion_rule": { - Type: schema.TypeList, - Optional: true, - Description: `The rule that specifies conditions when findings of infoTypes specified in InspectionRuleSet are removed from results.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "matching_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"MATCHING_TYPE_FULL_MATCH", "MATCHING_TYPE_PARTIAL_MATCH", "MATCHING_TYPE_INVERSE_MATCH"}), - Description: `How the rule is applied. See the documentation for more information: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#MatchingType Possible values: ["MATCHING_TYPE_FULL_MATCH", "MATCHING_TYPE_PARTIAL_MATCH", "MATCHING_TYPE_INVERSE_MATCH"]`, - }, - "dictionary": { - Type: schema.TypeList, - Optional: true, - Description: `Dictionary which defines the rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloud_storage_path": { - Type: schema.TypeList, - Optional: true, - Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, - }, - }, - }, - }, - "word_list": { - Type: schema.TypeList, - Optional: true, - Description: `List of words or phrases to search for.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "words": { - Type: schema.TypeList, - Required: true, - Description: `Words or phrases defining the dictionary. The dictionary must contain at least one -phrase and every phrase must contain at least 2 characters that are letters or digits.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "exclude_info_types": { - Type: schema.TypeList, - Optional: true, - Description: `Set of infoTypes for which findings would affect this rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "info_types": { - Type: schema.TypeList, - Required: true, - Description: `If a finding is matched by any of the infoType detectors listed here, the finding will be excluded from the scan results.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed -at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - }, - }, - }, - "regex": { - Type: schema.TypeList, - Optional: true, - Description: `Regular expression which defines the rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pattern": { - Type: schema.TypeString, - Required: true, - Description: `Pattern defining the regular expression. -Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, - }, - "group_indexes": { - Type: schema.TypeList, - Optional: true, - Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - "hotword_rule": { - Type: schema.TypeList, - Optional: true, - Description: `Hotword-based detection rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hotword_regex": { - Type: schema.TypeList, - Required: true, - Description: `Regular expression pattern defining what qualifies as a hotword.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pattern": { - Type: schema.TypeString, - Required: true, - Description: `Pattern defining the regular expression. Its syntax -(https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, - }, - "group_indexes": { - Type: schema.TypeList, - Optional: true, - Description: `The index of the submatch to extract as findings. When not specified, -the entire match is returned. No more than 3 may be included.`, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - }, - "likelihood_adjustment": { - Type: schema.TypeList, - Required: true, - Description: `Likelihood adjustment to apply to all matching findings.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fixed_likelihood": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), - Description: `Set the likelihood of a finding to a fixed value. Either this or relative_likelihood can be set. Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, - }, - "relative_likelihood": { - Type: schema.TypeInt, - Optional: true, - Description: `Increase or decrease the likelihood by the specified number of levels. For example, -if a finding would be POSSIBLE without the detection rule and relativeLikelihood is 1, -then it is upgraded to LIKELY, while a value of -1 would downgrade it to UNLIKELY. -Likelihood may never drop below VERY_UNLIKELY or exceed VERY_LIKELY, so applying an -adjustment of 1 followed by an adjustment of -1 when base likelihood is VERY_LIKELY -will result in a final likelihood of LIKELY. Either this or fixed_likelihood can be set.`, - }, - }, - }, - }, - "proximity": { - Type: schema.TypeList, - Required: true, - Description: `Proximity of the finding within which the entire hotword must reside. The total length of the window cannot -exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be -used to match substrings of the finding itself. For example, the certainty of a phone number regex -'(\d{3}) \d{3}-\d{4}' could be adjusted upwards if the area code is known to be the local area code of a company -office using the hotword regex '(xxx)', where 'xxx' is the area code in question.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "window_after": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of characters after the finding to consider. Either this or window_before must be specified`, - }, - "window_before": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of characters before the finding to consider. Either this or window_after must be specified`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the inspect template. Set by the server.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataLossPreventionInspectTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionInspectTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionInspectTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - inspectConfigProp, err := expandDataLossPreventionInspectTemplateInspectConfig(d.Get("inspect_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inspect_config"); !isEmptyValue(reflect.ValueOf(inspectConfigProp)) && (ok || !reflect.DeepEqual(v, inspectConfigProp)) { - obj["inspectConfig"] = inspectConfigProp - } - - obj, err = resourceDataLossPreventionInspectTemplateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new InspectTemplate: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating InspectTemplate: %s", err) - } - if err := d.Set("name", flattenDataLossPreventionInspectTemplateName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating InspectTemplate %q: %#v", d.Id(), res) - - return resourceDataLossPreventionInspectTemplateRead(d, meta) -} - -func resourceDataLossPreventionInspectTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataLossPreventionInspectTemplate %q", d.Id())) - } - - if err := d.Set("name", flattenDataLossPreventionInspectTemplateName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading InspectTemplate: %s", err) - } - if err := d.Set("description", flattenDataLossPreventionInspectTemplateDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading InspectTemplate: %s", err) - } - if err := d.Set("display_name", flattenDataLossPreventionInspectTemplateDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading InspectTemplate: %s", err) - } - if err := d.Set("inspect_config", flattenDataLossPreventionInspectTemplateInspectConfig(res["inspectConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading InspectTemplate: %s", err) - } - - return nil -} - -func resourceDataLossPreventionInspectTemplateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionInspectTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionInspectTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - inspectConfigProp, err := expandDataLossPreventionInspectTemplateInspectConfig(d.Get("inspect_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inspect_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inspectConfigProp)) { - obj["inspectConfig"] = inspectConfigProp - } - - obj, err = resourceDataLossPreventionInspectTemplateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating InspectTemplate %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("inspect_config") { - updateMask = append(updateMask, "inspectConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating InspectTemplate %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating InspectTemplate %q: %#v", d.Id(), res) - } - - return resourceDataLossPreventionInspectTemplateRead(d, meta) -} - -func resourceDataLossPreventionInspectTemplateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting InspectTemplate %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InspectTemplate") - } - - log.Printf("[DEBUG] Finished deleting InspectTemplate %q: %#v", d.Id(), res) - return nil -} - -func resourceDataLossPreventionInspectTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // Custom import to handle parent possibilities - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - parts := strings.Split(d.Get("name").(string), "/") - if len(parts) == 6 { - if err := d.Set("name", parts[5]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("name", parts[3]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/inspectTemplate/{{name}}", d.Get("name").(string)) - } - // Remove "/inspectTemplate/{{name}}" from the id - parts = parts[:len(parts)-2] - if err := d.Set("parent", strings.Join(parts, "/")); err != nil { - return nil, fmt.Errorf("Error setting parent: %s", err) - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDataLossPreventionInspectTemplateName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataLossPreventionInspectTemplateDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["exclude_info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(original["excludeInfoTypes"], d, config) - transformed["include_quote"] = - flattenDataLossPreventionInspectTemplateInspectConfigIncludeQuote(original["includeQuote"], d, config) - transformed["min_likelihood"] = - flattenDataLossPreventionInspectTemplateInspectConfigMinLikelihood(original["minLikelihood"], d, config) - transformed["limits"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimits(original["limits"], d, config) - transformed["info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigInfoTypes(original["infoTypes"], d, config) - transformed["content_options"] = - flattenDataLossPreventionInspectTemplateInspectConfigContentOptions(original["contentOptions"], d, config) - transformed["rule_set"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSet(original["ruleSet"], d, config) - transformed["custom_info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(original["customInfoTypes"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigIncludeQuote(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigMinLikelihood(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimits(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_findings_per_item"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(original["maxFindingsPerItem"], d, config) - transformed["max_findings_per_request"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(original["maxFindingsPerRequest"], d, config) - transformed["max_findings_per_info_type"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(original["maxFindingsPerInfoType"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "info_type": flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(original["infoType"], d, config), - "max_findings": flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(original["maxFindings"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesName(original["name"], d, config), - "version": flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesVersion(original["version"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigContentOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "info_types": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(original["infoTypes"], d, config), - "rules": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRules(original["rules"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(original["name"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "hotword_rule": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(original["hotwordRule"], d, config), - "exclusion_rule": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(original["exclusionRule"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hotword_regex"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(original["hotwordRegex"], d, config) - transformed["proximity"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(original["proximity"], d, config) - transformed["likelihood_adjustment"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(original["likelihoodAdjustment"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pattern"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(original["pattern"], d, config) - transformed["group_indexes"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(original["groupIndexes"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["window_before"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(original["windowBefore"], d, config) - transformed["window_after"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(original["windowAfter"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_likelihood"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(original["fixedLikelihood"], d, config) - transformed["relative_likelihood"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(original["relativeLikelihood"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["matching_type"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(original["matchingType"], d, config) - transformed["dictionary"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(original["dictionary"], d, config) - transformed["regex"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(original["regex"], d, config) - transformed["exclude_info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(original["excludeInfoTypes"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["word_list"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(original["wordList"], d, config) - transformed["cloud_storage_path"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["words"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(original["words"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(original["path"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pattern"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(original["pattern"], d, config) - transformed["group_indexes"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(original["groupIndexes"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(original["infoTypes"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(original["name"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "info_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(original["infoType"], d, config), - "likelihood": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(original["likelihood"], d, config), - "exclusion_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(original["exclusionType"], d, config), - "regex": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(original["regex"], d, config), - "dictionary": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(original["dictionary"], d, config), - "stored_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(original["storedType"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pattern"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(original["pattern"], d, config) - transformed["group_indexes"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(original["groupIndexes"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["word_list"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(original["wordList"], d, config) - transformed["cloud_storage_path"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["words"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(original["words"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(original["path"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataLossPreventionInspectTemplateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExcludeInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(original["exclude_info_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExcludeInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["excludeInfoTypes"] = transformedExcludeInfoTypes - } - - transformedIncludeQuote, err := expandDataLossPreventionInspectTemplateInspectConfigIncludeQuote(original["include_quote"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIncludeQuote); val.IsValid() && !isEmptyValue(val) { - transformed["includeQuote"] = transformedIncludeQuote - } - - transformedMinLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigMinLikelihood(original["min_likelihood"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinLikelihood); val.IsValid() && !isEmptyValue(val) { - transformed["minLikelihood"] = transformedMinLikelihood - } - - transformedLimits, err := expandDataLossPreventionInspectTemplateInspectConfigLimits(original["limits"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !isEmptyValue(val) { - transformed["limits"] = transformedLimits - } - - transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypes(original["info_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypes"] = transformedInfoTypes - } - - transformedContentOptions, err := expandDataLossPreventionInspectTemplateInspectConfigContentOptions(original["content_options"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContentOptions); val.IsValid() && !isEmptyValue(val) { - transformed["contentOptions"] = transformedContentOptions - } - - transformedRuleSet, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSet(original["rule_set"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRuleSet); val.IsValid() && !isEmptyValue(val) { - transformed["ruleSet"] = transformedRuleSet - } - - transformedCustomInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(original["custom_info_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCustomInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["customInfoTypes"] = transformedCustomInfoTypes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigIncludeQuote(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigMinLikelihood(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimits(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxFindingsPerItem, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(original["max_findings_per_item"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxFindingsPerItem); val.IsValid() && !isEmptyValue(val) { - transformed["maxFindingsPerItem"] = transformedMaxFindingsPerItem - } - - transformedMaxFindingsPerRequest, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(original["max_findings_per_request"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxFindingsPerRequest); val.IsValid() && !isEmptyValue(val) { - transformed["maxFindingsPerRequest"] = transformedMaxFindingsPerRequest - } - - transformedMaxFindingsPerInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(original["max_findings_per_info_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxFindingsPerInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["maxFindingsPerInfoType"] = transformedMaxFindingsPerInfoType - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(original["info_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["infoType"] = transformedInfoType - } - - transformedMaxFindings, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(original["max_findings"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxFindings); val.IsValid() && !isEmptyValue(val) { - transformed["maxFindings"] = transformedMaxFindings - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedVersion, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypesVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigInfoTypesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigInfoTypesVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigContentOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(original["info_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypes"] = transformedInfoTypes - } - - transformedRules, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRules(original["rules"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRules); val.IsValid() && !isEmptyValue(val) { - transformed["rules"] = transformedRules - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHotwordRule, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(original["hotword_rule"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHotwordRule); val.IsValid() && !isEmptyValue(val) { - transformed["hotwordRule"] = transformedHotwordRule - } - - transformedExclusionRule, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(original["exclusion_rule"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExclusionRule); val.IsValid() && !isEmptyValue(val) { - transformed["exclusionRule"] = transformedExclusionRule - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHotwordRegex, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(original["hotword_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHotwordRegex); val.IsValid() && !isEmptyValue(val) { - transformed["hotwordRegex"] = transformedHotwordRegex - } - - transformedProximity, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(original["proximity"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProximity); val.IsValid() && !isEmptyValue(val) { - transformed["proximity"] = transformedProximity - } - - transformedLikelihoodAdjustment, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(original["likelihood_adjustment"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLikelihoodAdjustment); val.IsValid() && !isEmptyValue(val) { - transformed["likelihoodAdjustment"] = transformedLikelihoodAdjustment - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(original["pattern"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !isEmptyValue(val) { - transformed["pattern"] = transformedPattern - } - - transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(original["group_indexes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !isEmptyValue(val) { - transformed["groupIndexes"] = transformedGroupIndexes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWindowBefore, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(original["window_before"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWindowBefore); val.IsValid() && !isEmptyValue(val) { - transformed["windowBefore"] = transformedWindowBefore - } - - transformedWindowAfter, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(original["window_after"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWindowAfter); val.IsValid() && !isEmptyValue(val) { - transformed["windowAfter"] = transformedWindowAfter - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(original["fixed_likelihood"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFixedLikelihood); val.IsValid() && !isEmptyValue(val) { - transformed["fixedLikelihood"] = transformedFixedLikelihood - } - - transformedRelativeLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(original["relative_likelihood"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRelativeLikelihood); val.IsValid() && !isEmptyValue(val) { - transformed["relativeLikelihood"] = transformedRelativeLikelihood - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMatchingType, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(original["matching_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMatchingType); val.IsValid() && !isEmptyValue(val) { - transformed["matchingType"] = transformedMatchingType - } - - transformedDictionary, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(original["dictionary"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDictionary); val.IsValid() && !isEmptyValue(val) { - transformed["dictionary"] = transformedDictionary - } - - transformedRegex, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(original["regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRegex); val.IsValid() && !isEmptyValue(val) { - transformed["regex"] = transformedRegex - } - - transformedExcludeInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(original["exclude_info_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExcludeInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["excludeInfoTypes"] = transformedExcludeInfoTypes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWordList, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(original["word_list"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !isEmptyValue(val) { - transformed["wordList"] = transformedWordList - } - - transformedCloudStoragePath, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStoragePath"] = transformedCloudStoragePath - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWords, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(original["words"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !isEmptyValue(val) { - transformed["words"] = transformedWords - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(original["pattern"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !isEmptyValue(val) { - transformed["pattern"] = transformedPattern - } - - transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(original["group_indexes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !isEmptyValue(val) { - transformed["groupIndexes"] = transformedGroupIndexes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(original["info_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypes"] = transformedInfoTypes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(original["info_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["infoType"] = transformedInfoType - } - - transformedLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(original["likelihood"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLikelihood); val.IsValid() && !isEmptyValue(val) { - transformed["likelihood"] = transformedLikelihood - } - - transformedExclusionType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(original["exclusion_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExclusionType); val.IsValid() && !isEmptyValue(val) { - transformed["exclusionType"] = transformedExclusionType - } - - transformedRegex, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(original["regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRegex); val.IsValid() && !isEmptyValue(val) { - transformed["regex"] = transformedRegex - } - - transformedDictionary, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(original["dictionary"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDictionary); val.IsValid() && !isEmptyValue(val) { - transformed["dictionary"] = transformedDictionary - } - - transformedStoredType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(original["stored_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStoredType); val.IsValid() && !isEmptyValue(val) { - transformed["storedType"] = transformedStoredType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(original["pattern"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !isEmptyValue(val) { - transformed["pattern"] = transformedPattern - } - - transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(original["group_indexes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !isEmptyValue(val) { - transformed["groupIndexes"] = transformedGroupIndexes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWordList, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(original["word_list"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !isEmptyValue(val) { - transformed["wordList"] = transformedWordList - } - - transformedCloudStoragePath, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStoragePath"] = transformedCloudStoragePath - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWords, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(original["words"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !isEmptyValue(val) { - transformed["words"] = transformedWords - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataLossPreventionInspectTemplateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["inspectTemplate"] = obj - return newObj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_job_trigger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_job_trigger.go deleted file mode 100644 index 13023b70a4..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_job_trigger.go +++ /dev/null @@ -1,2125 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDataLossPreventionJobTrigger() *schema.Resource { - return &schema.Resource{ - Create: resourceDataLossPreventionJobTriggerCreate, - Read: resourceDataLossPreventionJobTriggerRead, - Update: resourceDataLossPreventionJobTriggerUpdate, - Delete: resourceDataLossPreventionJobTriggerDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataLossPreventionJobTriggerImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of the trigger, either in the format 'projects/{{project}}' -or 'projects/{{project}}/locations/{{location}}'`, - }, - "triggers": { - Type: schema.TypeList, - Required: true, - Description: `What event needs to occur for a new job to be started.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "schedule": { - Type: schema.TypeList, - Optional: true, - Description: `Schedule for triggered jobs`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "recurrence_period_duration": { - Type: schema.TypeString, - Optional: true, - Description: `With this option a job is started a regular periodic basis. For example: every day (86400 seconds). - -A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. - -This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - }, - }, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of the job trigger.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `User set display name of the job trigger.`, - }, - "inspect_job": { - Type: schema.TypeList, - Optional: true, - Description: `Controls what and how to inspect for findings.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "actions": { - Type: schema.TypeList, - Required: true, - Description: `A task to execute on the completion of a job.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pub_sub": { - Type: schema.TypeList, - Optional: true, - Description: `Publish a message into a given Pub/Sub topic when the job completes.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topic": { - Type: schema.TypeString, - Required: true, - Description: `Cloud Pub/Sub topic to send notifications to.`, - }, - }, - }, - ExactlyOneOf: []string{}, - }, - "publish_findings_to_cloud_data_catalog": { - Type: schema.TypeList, - Optional: true, - Description: `Publish findings of a DlpJob to Data Catalog.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{}, - }, - ExactlyOneOf: []string{}, - }, - "publish_summary_to_cscc": { - Type: schema.TypeList, - Optional: true, - Description: `Publish the result summary of a DlpJob to the Cloud Security Command Center.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{}, - }, - ExactlyOneOf: []string{}, - }, - "save_findings": { - Type: schema.TypeList, - Optional: true, - Description: `If set, the detailed findings will be persisted to the specified OutputStorageConfig. Only a single instance of this action can be specified. Compatible with: Inspect, Risk`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "output_config": { - Type: schema.TypeList, - Required: true, - Description: `Information on where to store output`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "table": { - Type: schema.TypeList, - Required: true, - Description: `Information on the location of the target BigQuery Table.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - Description: `Dataset ID of the table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The Google Cloud Platform project ID of the project containing the table.`, - }, - "table_id": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the table. If is not set a new one will be generated for you with the following format: -'dlp_googleapis_yyyy_mm_dd_[dlp_job_id]'. Pacific timezone will be used for generating the date details.`, - }, - }, - }, - }, - "output_schema": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS", ""}), - Description: `Schema used for writing the findings for Inspect jobs. This field is only used for -Inspect and must be unspecified for Risk jobs. Columns are derived from the Finding -object. If appending to an existing table, any columns from the predefined schema -that are missing will be added. No columns in the existing table will be deleted. - -If unspecified, then all available columns will be used for a new table or an (existing) -table with no schema, and no changes will be made to an existing table that has a schema. -Only for use with external storage. Possible values: ["BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS"]`, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{}, - }, - }, - }, - }, - "inspect_template_name": { - Type: schema.TypeString, - Required: true, - Description: `The name of the template to run when this job is triggered.`, - }, - "storage_config": { - Type: schema.TypeList, - Required: true, - Description: `Information on where to inspect`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "big_query_options": { - Type: schema.TypeList, - Optional: true, - Description: `Options defining BigQuery table and row identifiers.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "table_reference": { - Type: schema.TypeList, - Required: true, - Description: `Set of files to scan.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - Description: `The dataset ID of the table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The Google Cloud Platform project ID of the project containing the table.`, - }, - "table_id": { - Type: schema.TypeString, - Required: true, - Description: `The name of the table.`, - }, - }, - }, - }, - "identifying_fields": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies the BigQuery fields that will be returned with findings. -If not specified, no identifying fields will be returned for findings.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name of a BigQuery field to be returned with the findings.`, - }, - }, - }, - }, - "rows_limit": { - Type: schema.TypeInt, - Optional: true, - Description: `Max number of rows to scan. If the table has more rows than this value, the rest of the rows are omitted. -If not set, or if set to 0, all rows will be scanned. Only one of rowsLimit and rowsLimitPercent can be -specified. Cannot be used in conjunction with TimespanConfig.`, - }, - "rows_limit_percent": { - Type: schema.TypeInt, - Optional: true, - Description: `Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded down. -Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of -rowsLimit and rowsLimitPercent can be specified. Cannot be used in conjunction with TimespanConfig.`, - }, - "sample_method": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"TOP", "RANDOM_START", ""}), - Description: `How to sample rows if not all rows are scanned. Meaningful only when used in conjunction with either -rowsLimit or rowsLimitPercent. If not specified, rows are scanned in the order BigQuery reads them. Default value: "TOP" Possible values: ["TOP", "RANDOM_START"]`, - Default: "TOP", - }, - }, - }, - }, - "cloud_storage_options": { - Type: schema.TypeList, - Optional: true, - Description: `Options defining a file or a set of files within a Google Cloud Storage bucket.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "file_set": { - Type: schema.TypeList, - Required: true, - Description: `Set of files to scan.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "regex_file_set": { - Type: schema.TypeList, - Optional: true, - Description: `The regex-filtered set of files to scan.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket_name": { - Type: schema.TypeString, - Required: true, - Description: `The name of a Cloud Storage bucket.`, - }, - "exclude_regex": { - Type: schema.TypeList, - Optional: true, - Description: `A list of regular expressions matching file paths to exclude. All files in the bucket that match at -least one of these regular expressions will be excluded from the scan.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "include_regex": { - Type: schema.TypeList, - Optional: true, - Description: `A list of regular expressions matching file paths to include. All files in the bucket -that match at least one of these regular expressions will be included in the set of files, -except for those that also match an item in excludeRegex. Leaving this field empty will -match all files by default (this is equivalent to including .* in the list)`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - ExactlyOneOf: []string{"inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.url", "inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.regex_file_set"}, - }, - "url": { - Type: schema.TypeString, - Optional: true, - Description: `The Cloud Storage url of the file(s) to scan, in the format 'gs:///'. Trailing wildcard -in the path is allowed. - -If the url ends in a trailing slash, the bucket or directory represented by the url will be scanned -non-recursively (content in sub-directories will not be scanned). This means that 'gs://mybucket/' is -equivalent to 'gs://mybucket/*', and 'gs://mybucket/directory/' is equivalent to 'gs://mybucket/directory/*'.`, - ExactlyOneOf: []string{"inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.url", "inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.regex_file_set"}, - }, - }, - }, - }, - "bytes_limit_per_file": { - Type: schema.TypeInt, - Optional: true, - Description: `Max number of bytes to scan from a file. If a scanned file's size is bigger than this value -then the rest of the bytes are omitted.`, - }, - "bytes_limit_per_file_percent": { - Type: schema.TypeInt, - Optional: true, - Description: `Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. -Must be between 0 and 100, inclusively. Both 0 and 100 means no limit.`, - }, - "file_types": { - Type: schema.TypeList, - Optional: true, - Description: `List of file type groups to include in the scan. If empty, all files are scanned and available data -format processors are applied. In addition, the binary content of the selected files is always scanned as well. -Images are scanned only as binary if the specified region does not support image inspection and no fileTypes were specified. Possible values: ["BINARY_FILE", "TEXT_FILE", "IMAGE", "WORD", "PDF", "AVRO", "CSV", "TSV"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"BINARY_FILE", "TEXT_FILE", "IMAGE", "WORD", "PDF", "AVRO", "CSV", "TSV"}), - }, - }, - "files_limit_percent": { - Type: schema.TypeInt, - Optional: true, - Description: `Limits the number of files to scan to this percentage of the input FileSet. Number of files scanned is rounded down. -Must be between 0 and 100, inclusively. Both 0 and 100 means no limit.`, - }, - "sample_method": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"TOP", "RANDOM_START", ""}), - Description: `How to sample bytes if not all bytes are scanned. Meaningful only when used in conjunction with bytesLimitPerFile. -If not specified, scanning would start from the top. Possible values: ["TOP", "RANDOM_START"]`, - }, - }, - }, - }, - "datastore_options": { - Type: schema.TypeList, - Optional: true, - Description: `Options defining a data set within Google Cloud Datastore.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kind": { - Type: schema.TypeList, - Required: true, - Description: `A representation of a Datastore kind.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `The name of the Datastore kind.`, - }, - }, - }, - }, - "partition_id": { - Type: schema.TypeList, - Required: true, - Description: `Datastore partition ID. A partition ID identifies a grouping of entities. The grouping -is always by project and namespace, however the namespace ID may be empty.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the project to which the entities belong.`, - }, - "namespace_id": { - Type: schema.TypeString, - Optional: true, - Description: `If not empty, the ID of the namespace to which the entities belong.`, - }, - }, - }, - }, - }, - }, - }, - "timespan_config": { - Type: schema.TypeList, - Optional: true, - Description: `Information on where to inspect`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "timestamp_field": { - Type: schema.TypeList, - Required: true, - Description: `Information on where to inspect`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Specification of the field containing the timestamp of scanned items. Used for data sources like Datastore and BigQuery. - -For BigQuery: Required to filter out rows based on the given start and end times. If not specified and the table was -modified between the given start and end times, the entire table will be scanned. The valid data types of the timestamp -field are: INTEGER, DATE, TIMESTAMP, or DATETIME BigQuery column. - -For Datastore. Valid data types of the timestamp field are: TIMESTAMP. Datastore entity will be scanned if the -timestamp property does not exist or its value is empty or invalid.`, - }, - }, - }, - }, - "enable_auto_population_of_timespan_config": { - Type: schema.TypeBool, - Optional: true, - Description: `When the job is started by a JobTrigger we will automatically figure out a valid startTime to avoid -scanning files that have not been modified since the last time the JobTrigger executed. This will -be based on the time of the execution of the last run of the JobTrigger.`, - }, - "end_time": { - Type: schema.TypeString, - Optional: true, - Description: `Exclude files or rows newer than this value. If set to zero, no upper time limit is applied.`, - AtLeastOneOf: []string{"inspect_job.0.storage_config.0.timespan_config.0.start_time", "inspect_job.0.storage_config.0.timespan_config.0.end_time"}, - }, - "start_time": { - Type: schema.TypeString, - Optional: true, - Description: `Exclude files or rows older than this value.`, - AtLeastOneOf: []string{"inspect_job.0.storage_config.0.timespan_config.0.start_time", "inspect_job.0.storage_config.0.timespan_config.0.end_time"}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "status": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"PAUSED", "HEALTHY", "CANCELLED", ""}), - Description: `Whether the trigger is currently active. Default value: "HEALTHY" Possible values: ["PAUSED", "HEALTHY", "CANCELLED"]`, - Default: "HEALTHY", - }, - "last_run_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of the last time this trigger executed.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the job trigger. Set by the server.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataLossPreventionJobTriggerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionJobTriggerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionJobTriggerDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - statusProp, err := expandDataLossPreventionJobTriggerStatus(d.Get("status"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("status"); !isEmptyValue(reflect.ValueOf(statusProp)) && (ok || !reflect.DeepEqual(v, statusProp)) { - obj["status"] = statusProp - } - triggersProp, err := expandDataLossPreventionJobTriggerTriggers(d.Get("triggers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("triggers"); !isEmptyValue(reflect.ValueOf(triggersProp)) && (ok || !reflect.DeepEqual(v, triggersProp)) { - obj["triggers"] = triggersProp - } - inspectJobProp, err := expandDataLossPreventionJobTriggerInspectJob(d.Get("inspect_job"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inspect_job"); !isEmptyValue(reflect.ValueOf(inspectJobProp)) && (ok || !reflect.DeepEqual(v, inspectJobProp)) { - obj["inspectJob"] = inspectJobProp - } - - obj, err = resourceDataLossPreventionJobTriggerEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new JobTrigger: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating JobTrigger: %s", err) - } - if err := d.Set("name", flattenDataLossPreventionJobTriggerName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/jobTriggers/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating JobTrigger %q: %#v", d.Id(), res) - - return resourceDataLossPreventionJobTriggerRead(d, meta) -} - -func resourceDataLossPreventionJobTriggerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataLossPreventionJobTrigger %q", d.Id())) - } - - if err := d.Set("name", flattenDataLossPreventionJobTriggerName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("description", flattenDataLossPreventionJobTriggerDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("display_name", flattenDataLossPreventionJobTriggerDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("last_run_time", flattenDataLossPreventionJobTriggerLastRunTime(res["lastRunTime"], d, config)); err != nil { - return fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("status", flattenDataLossPreventionJobTriggerStatus(res["status"], d, config)); err != nil { - return fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("triggers", flattenDataLossPreventionJobTriggerTriggers(res["triggers"], d, config)); err != nil { - return fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("inspect_job", flattenDataLossPreventionJobTriggerInspectJob(res["inspectJob"], d, config)); err != nil { - return fmt.Errorf("Error reading JobTrigger: %s", err) - } - - return nil -} - -func resourceDataLossPreventionJobTriggerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionJobTriggerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionJobTriggerDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - statusProp, err := expandDataLossPreventionJobTriggerStatus(d.Get("status"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("status"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, statusProp)) { - obj["status"] = statusProp - } - triggersProp, err := expandDataLossPreventionJobTriggerTriggers(d.Get("triggers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("triggers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, triggersProp)) { - obj["triggers"] = triggersProp - } - inspectJobProp, err := expandDataLossPreventionJobTriggerInspectJob(d.Get("inspect_job"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inspect_job"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inspectJobProp)) { - obj["inspectJob"] = inspectJobProp - } - - obj, err = resourceDataLossPreventionJobTriggerEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating JobTrigger %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("status") { - updateMask = append(updateMask, "status") - } - - if d.HasChange("triggers") { - updateMask = append(updateMask, "triggers") - } - - if d.HasChange("inspect_job") { - updateMask = append(updateMask, "inspectJob") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating JobTrigger %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating JobTrigger %q: %#v", d.Id(), res) - } - - return resourceDataLossPreventionJobTriggerRead(d, meta) -} - -func resourceDataLossPreventionJobTriggerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting JobTrigger %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "JobTrigger") - } - - log.Printf("[DEBUG] Finished deleting JobTrigger %q: %#v", d.Id(), res) - return nil -} - -func resourceDataLossPreventionJobTriggerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // Custom import to handle parent possibilities - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - parts := strings.Split(d.Get("name").(string), "/") - if len(parts) == 6 { - if err := d.Set("name", parts[5]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("name", parts[3]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/jobTrigger/{{name}}", d.Get("name").(string)) - } - // Remove "/jobTrigger/{{name}}" from the id - parts = parts[:len(parts)-2] - if err := d.Set("parent", strings.Join(parts, "/")); err != nil { - return nil, fmt.Errorf("Error setting parent: %s", err) - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/jobTriggers/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDataLossPreventionJobTriggerName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataLossPreventionJobTriggerDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerLastRunTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerTriggers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "schedule": flattenDataLossPreventionJobTriggerTriggersSchedule(original["schedule"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionJobTriggerTriggersSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["recurrence_period_duration"] = - flattenDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(original["recurrencePeriodDuration"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJob(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["inspect_template_name"] = - flattenDataLossPreventionJobTriggerInspectJobInspectTemplateName(original["inspectTemplateName"], d, config) - transformed["storage_config"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfig(original["storageConfig"], d, config) - transformed["actions"] = - flattenDataLossPreventionJobTriggerInspectJobActions(original["actions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobInspectTemplateName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["timespan_config"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(original["timespanConfig"], d, config) - transformed["datastore_options"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(original["datastoreOptions"], d, config) - transformed["cloud_storage_options"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(original["cloudStorageOptions"], d, config) - transformed["big_query_options"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(original["bigQueryOptions"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["start_time"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(original["startTime"], d, config) - transformed["end_time"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(original["endTime"], d, config) - transformed["enable_auto_population_of_timespan_config"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(original["enableAutoPopulationOfTimespanConfig"], d, config) - transformed["timestamp_field"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(original["timestampField"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["partition_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(original["partitionId"], d, config) - transformed["kind"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(original["kind"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(original["projectId"], d, config) - transformed["namespace_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(original["namespaceId"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["file_set"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(original["fileSet"], d, config) - transformed["bytes_limit_per_file"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(original["bytesLimitPerFile"], d, config) - transformed["bytes_limit_per_file_percent"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(original["bytesLimitPerFilePercent"], d, config) - transformed["files_limit_percent"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(original["filesLimitPercent"], d, config) - transformed["file_types"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(original["fileTypes"], d, config) - transformed["sample_method"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(original["sampleMethod"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["url"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(original["url"], d, config) - transformed["regex_file_set"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(original["regexFileSet"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bucket_name"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(original["bucketName"], d, config) - transformed["include_regex"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(original["includeRegex"], d, config) - transformed["exclude_regex"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(original["excludeRegex"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["table_reference"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(original["tableReference"], d, config) - transformed["rows_limit"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(original["rowsLimit"], d, config) - transformed["rows_limit_percent"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(original["rowsLimitPercent"], d, config) - transformed["sample_method"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(original["sampleMethod"], d, config) - transformed["identifying_fields"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(original["identifyingFields"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(original["projectId"], d, config) - transformed["dataset_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(original["datasetId"], d, config) - transformed["table_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(original["tableId"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsName(original["name"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "save_findings": flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindings(original["saveFindings"], d, config), - "pub_sub": flattenDataLossPreventionJobTriggerInspectJobActionsPubSub(original["pubSub"], d, config), - "publish_summary_to_cscc": flattenDataLossPreventionJobTriggerInspectJobActionsPublishSummaryToCscc(original["publishSummaryToCscc"], d, config), - "publish_findings_to_cloud_data_catalog": flattenDataLossPreventionJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(original["publishFindingsToCloudDataCatalog"], d, config), - }) - } - return transformed -} -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["output_config"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(original["outputConfig"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["table"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(original["table"], d, config) - transformed["output_schema"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(original["outputSchema"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(original["projectId"], d, config) - transformed["dataset_id"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(original["datasetId"], d, config) - transformed["table_id"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(original["tableId"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsPubSub(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic"] = - flattenDataLossPreventionJobTriggerInspectJobActionsPubSubTopic(original["topic"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionJobTriggerInspectJobActionsPubSubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsPublishSummaryToCscc(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - transformed := make(map[string]interface{}) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - transformed := make(map[string]interface{}) - return []interface{}{transformed} -} - -func expandDataLossPreventionJobTriggerDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerTriggers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchedule, err := expandDataLossPreventionJobTriggerTriggersSchedule(original["schedule"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["schedule"] = transformedSchedule - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionJobTriggerTriggersSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRecurrencePeriodDuration, err := expandDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(original["recurrence_period_duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRecurrencePeriodDuration); val.IsValid() && !isEmptyValue(val) { - transformed["recurrencePeriodDuration"] = transformedRecurrencePeriodDuration - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJob(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInspectTemplateName, err := expandDataLossPreventionJobTriggerInspectJobInspectTemplateName(original["inspect_template_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInspectTemplateName); val.IsValid() && !isEmptyValue(val) { - transformed["inspectTemplateName"] = transformedInspectTemplateName - } - - transformedStorageConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfig(original["storage_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStorageConfig); val.IsValid() && !isEmptyValue(val) { - transformed["storageConfig"] = transformedStorageConfig - } - - transformedActions, err := expandDataLossPreventionJobTriggerInspectJobActions(original["actions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedActions); val.IsValid() && !isEmptyValue(val) { - transformed["actions"] = transformedActions - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobInspectTemplateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTimespanConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(original["timespan_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimespanConfig); val.IsValid() && !isEmptyValue(val) { - transformed["timespanConfig"] = transformedTimespanConfig - } - - transformedDatastoreOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(original["datastore_options"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatastoreOptions); val.IsValid() && !isEmptyValue(val) { - transformed["datastoreOptions"] = transformedDatastoreOptions - } - - transformedCloudStorageOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(original["cloud_storage_options"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudStorageOptions); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStorageOptions"] = transformedCloudStorageOptions - } - - transformedBigQueryOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(original["big_query_options"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBigQueryOptions); val.IsValid() && !isEmptyValue(val) { - transformed["bigQueryOptions"] = transformedBigQueryOptions - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - transformedEnableAutoPopulationOfTimespanConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(original["enable_auto_population_of_timespan_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnableAutoPopulationOfTimespanConfig); val.IsValid() && !isEmptyValue(val) { - transformed["enableAutoPopulationOfTimespanConfig"] = transformedEnableAutoPopulationOfTimespanConfig - } - - transformedTimestampField, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(original["timestamp_field"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTimestampField); val.IsValid() && !isEmptyValue(val) { - transformed["timestampField"] = transformedTimestampField - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPartitionId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(original["partition_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPartitionId); val.IsValid() && !isEmptyValue(val) { - transformed["partitionId"] = transformedPartitionId - } - - transformedKind, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(original["kind"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKind); val.IsValid() && !isEmptyValue(val) { - transformed["kind"] = transformedKind - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedNamespaceId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(original["namespace_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespaceId); val.IsValid() && !isEmptyValue(val) { - transformed["namespaceId"] = transformedNamespaceId - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFileSet, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(original["file_set"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFileSet); val.IsValid() && !isEmptyValue(val) { - transformed["fileSet"] = transformedFileSet - } - - transformedBytesLimitPerFile, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(original["bytes_limit_per_file"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBytesLimitPerFile); val.IsValid() && !isEmptyValue(val) { - transformed["bytesLimitPerFile"] = transformedBytesLimitPerFile - } - - transformedBytesLimitPerFilePercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(original["bytes_limit_per_file_percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBytesLimitPerFilePercent); val.IsValid() && !isEmptyValue(val) { - transformed["bytesLimitPerFilePercent"] = transformedBytesLimitPerFilePercent - } - - transformedFilesLimitPercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(original["files_limit_percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilesLimitPercent); val.IsValid() && !isEmptyValue(val) { - transformed["filesLimitPercent"] = transformedFilesLimitPercent - } - - transformedFileTypes, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(original["file_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFileTypes); val.IsValid() && !isEmptyValue(val) { - transformed["fileTypes"] = transformedFileTypes - } - - transformedSampleMethod, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(original["sample_method"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSampleMethod); val.IsValid() && !isEmptyValue(val) { - transformed["sampleMethod"] = transformedSampleMethod - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - transformedRegexFileSet, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(original["regex_file_set"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRegexFileSet); val.IsValid() && !isEmptyValue(val) { - transformed["regexFileSet"] = transformedRegexFileSet - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBucketName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(original["bucket_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBucketName); val.IsValid() && !isEmptyValue(val) { - transformed["bucketName"] = transformedBucketName - } - - transformedIncludeRegex, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(original["include_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIncludeRegex); val.IsValid() && !isEmptyValue(val) { - transformed["includeRegex"] = transformedIncludeRegex - } - - transformedExcludeRegex, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(original["exclude_regex"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExcludeRegex); val.IsValid() && !isEmptyValue(val) { - transformed["excludeRegex"] = transformedExcludeRegex - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTableReference, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(original["table_reference"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTableReference); val.IsValid() && !isEmptyValue(val) { - transformed["tableReference"] = transformedTableReference - } - - transformedRowsLimit, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(original["rows_limit"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRowsLimit); val.IsValid() && !isEmptyValue(val) { - transformed["rowsLimit"] = transformedRowsLimit - } - - transformedRowsLimitPercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(original["rows_limit_percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRowsLimitPercent); val.IsValid() && !isEmptyValue(val) { - transformed["rowsLimitPercent"] = transformedRowsLimitPercent - } - - transformedSampleMethod, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(original["sample_method"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSampleMethod); val.IsValid() && !isEmptyValue(val) { - transformed["sampleMethod"] = transformedSampleMethod - } - - transformedIdentifyingFields, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(original["identifying_fields"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdentifyingFields); val.IsValid() && !isEmptyValue(val) { - transformed["identifyingFields"] = transformedIdentifyingFields - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSaveFindings, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindings(original["save_findings"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSaveFindings); val.IsValid() && !isEmptyValue(val) { - transformed["saveFindings"] = transformedSaveFindings - } - - transformedPubSub, err := expandDataLossPreventionJobTriggerInspectJobActionsPubSub(original["pub_sub"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubSub); val.IsValid() && !isEmptyValue(val) { - transformed["pubSub"] = transformedPubSub - } - - transformedPublishSummaryToCscc, err := expandDataLossPreventionJobTriggerInspectJobActionsPublishSummaryToCscc(original["publish_summary_to_cscc"], d, config) - if err != nil { - return nil, err - } else { - transformed["publishSummaryToCscc"] = transformedPublishSummaryToCscc - } - - transformedPublishFindingsToCloudDataCatalog, err := expandDataLossPreventionJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(original["publish_findings_to_cloud_data_catalog"], d, config) - if err != nil { - return nil, err - } else { - transformed["publishFindingsToCloudDataCatalog"] = transformedPublishFindingsToCloudDataCatalog - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOutputConfig, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(original["output_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOutputConfig); val.IsValid() && !isEmptyValue(val) { - transformed["outputConfig"] = transformedOutputConfig - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTable, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(original["table"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { - transformed["table"] = transformedTable - } - - transformedOutputSchema, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(original["output_schema"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOutputSchema); val.IsValid() && !isEmptyValue(val) { - transformed["outputSchema"] = transformedOutputSchema - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsPubSub(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopic, err := expandDataLossPreventionJobTriggerInspectJobActionsPubSubTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsPubSubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsPublishSummaryToCscc(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - transformed := make(map[string]interface{}) - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - transformed := make(map[string]interface{}) - - return transformed, nil -} - -func resourceDataLossPreventionJobTriggerEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["jobTrigger"] = obj - return newObj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_stored_info_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_stored_info_type.go deleted file mode 100644 index d4767f4b30..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_loss_prevention_stored_info_type.go +++ /dev/null @@ -1,1067 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDataLossPreventionStoredInfoType() *schema.Resource { - return &schema.Resource{ - Create: resourceDataLossPreventionStoredInfoTypeCreate, - Read: resourceDataLossPreventionStoredInfoTypeRead, - Update: resourceDataLossPreventionStoredInfoTypeUpdate, - Delete: resourceDataLossPreventionStoredInfoTypeDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataLossPreventionStoredInfoTypeImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of the info type in any of the following formats: - -* 'projects/{{project}}' -* 'projects/{{project}}/locations/{{location}}' -* 'organizations/{{organization_id}}' -* 'organizations/{{organization_id}}/locations/{{location}}'`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of the info type.`, - }, - "dictionary": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Dictionary which defines the rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloud_storage_path": { - Type: schema.TypeList, - Optional: true, - Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, - }, - }, - }, - ExactlyOneOf: []string{"dictionary.0.word_list", "dictionary.0.cloud_storage_path"}, - }, - "word_list": { - Type: schema.TypeList, - Optional: true, - Description: `List of words or phrases to search for.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "words": { - Type: schema.TypeList, - Required: true, - Description: `Words or phrases defining the dictionary. The dictionary must contain at least one -phrase and every phrase must contain at least 2 characters that are letters or digits.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - ExactlyOneOf: []string{"dictionary.0.word_list", "dictionary.0.cloud_storage_path"}, - }, - }, - }, - ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `User set display name of the info type.`, - }, - "large_custom_dictionary": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Dictionary which defines the rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "output_path": { - Type: schema.TypeList, - Required: true, - Description: `Location to store dictionary artifacts in Google Cloud Storage. These files will only be accessible by project owners and the DLP API. -If any of these artifacts are modified, the dictionary is considered invalid and can no longer be used.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, - }, - }, - }, - }, - "big_query_field": { - Type: schema.TypeList, - Optional: true, - Description: `Field in a BigQuery table where each cell represents a dictionary phrase.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field": { - Type: schema.TypeList, - Required: true, - Description: `Designated field in the BigQuery table.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "table": { - Type: schema.TypeList, - Required: true, - Description: `Field in a BigQuery table where each cell represents a dictionary phrase.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_id": { - Type: schema.TypeString, - Required: true, - Description: `The dataset ID of the table.`, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The Google Cloud Platform project ID of the project containing the table.`, - }, - "table_id": { - Type: schema.TypeString, - Required: true, - Description: `The name of the table.`, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"large_custom_dictionary.0.cloud_storage_file_set", "large_custom_dictionary.0.big_query_field"}, - }, - "cloud_storage_file_set": { - Type: schema.TypeList, - Optional: true, - Description: `Set of files containing newline-delimited lists of dictionary phrases.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "url": { - Type: schema.TypeString, - Required: true, - Description: `The url, in the format 'gs:///'. Trailing wildcard in the path is allowed.`, - }, - }, - }, - ExactlyOneOf: []string{"large_custom_dictionary.0.cloud_storage_file_set", "large_custom_dictionary.0.big_query_field"}, - }, - }, - }, - ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, - }, - "regex": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Regular expression which defines the rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pattern": { - Type: schema.TypeString, - Required: true, - Description: `Pattern defining the regular expression. -Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, - }, - "group_indexes": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, - Elem: &schema.Schema{ - Type: schema.TypeInt, - }, - }, - }, - }, - ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the info type. Set by the server.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataLossPreventionStoredInfoTypeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionStoredInfoTypeDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionStoredInfoTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - regexProp, err := expandDataLossPreventionStoredInfoTypeRegex(d.Get("regex"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("regex"); !isEmptyValue(reflect.ValueOf(regexProp)) && (ok || !reflect.DeepEqual(v, regexProp)) { - obj["regex"] = regexProp - } - dictionaryProp, err := expandDataLossPreventionStoredInfoTypeDictionary(d.Get("dictionary"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dictionary"); !isEmptyValue(reflect.ValueOf(dictionaryProp)) && (ok || !reflect.DeepEqual(v, dictionaryProp)) { - obj["dictionary"] = dictionaryProp - } - largeCustomDictionaryProp, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionary(d.Get("large_custom_dictionary"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("large_custom_dictionary"); !isEmptyValue(reflect.ValueOf(largeCustomDictionaryProp)) && (ok || !reflect.DeepEqual(v, largeCustomDictionaryProp)) { - obj["largeCustomDictionary"] = largeCustomDictionaryProp - } - - obj, err = resourceDataLossPreventionStoredInfoTypeEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new StoredInfoType: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating StoredInfoType: %s", err) - } - if err := d.Set("name", flattenDataLossPreventionStoredInfoTypeName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceDataLossPreventionStoredInfoTypePollRead(d, meta), PollCheckForExistence, "Creating StoredInfoType", d.Timeout(schema.TimeoutCreate), 1) - if err != nil { - return fmt.Errorf("Error waiting to create StoredInfoType: %s", err) - } - - log.Printf("[DEBUG] Finished creating StoredInfoType %q: %#v", d.Id(), res) - - return resourceDataLossPreventionStoredInfoTypeRead(d, meta) -} - -func resourceDataLossPreventionStoredInfoTypePollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = resourceDataLossPreventionStoredInfoTypeDecoder(d, meta, res) - if err != nil { - return nil, err - } - if res == nil { - return nil, fake404("decoded", "DataLossPreventionStoredInfoType") - } - - return res, nil - } -} - -func resourceDataLossPreventionStoredInfoTypeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataLossPreventionStoredInfoType %q", d.Id())) - } - - res, err = resourceDataLossPreventionStoredInfoTypeDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing DataLossPreventionStoredInfoType because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenDataLossPreventionStoredInfoTypeName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("description", flattenDataLossPreventionStoredInfoTypeDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("display_name", flattenDataLossPreventionStoredInfoTypeDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("regex", flattenDataLossPreventionStoredInfoTypeRegex(res["regex"], d, config)); err != nil { - return fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("dictionary", flattenDataLossPreventionStoredInfoTypeDictionary(res["dictionary"], d, config)); err != nil { - return fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("large_custom_dictionary", flattenDataLossPreventionStoredInfoTypeLargeCustomDictionary(res["largeCustomDictionary"], d, config)); err != nil { - return fmt.Errorf("Error reading StoredInfoType: %s", err) - } - - return nil -} - -func resourceDataLossPreventionStoredInfoTypeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionStoredInfoTypeDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionStoredInfoTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - obj, err = resourceDataLossPreventionStoredInfoTypeEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating StoredInfoType %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating StoredInfoType %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating StoredInfoType %q: %#v", d.Id(), res) - } - - return resourceDataLossPreventionStoredInfoTypeRead(d, meta) -} - -func resourceDataLossPreventionStoredInfoTypeDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting StoredInfoType %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "StoredInfoType") - } - - log.Printf("[DEBUG] Finished deleting StoredInfoType %q: %#v", d.Id(), res) - return nil -} - -func resourceDataLossPreventionStoredInfoTypeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // Custom import to handle parent possibilities - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - parts := strings.Split(d.Get("name").(string), "/") - if len(parts) == 6 { - if err := d.Set("name", parts[5]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("name", parts[3]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/storedInfoType/{{name}}", d.Get("name").(string)) - } - // Remove "/storedInfoType/{{name}}" from the id - parts = parts[:len(parts)-2] - if err := d.Set("parent", strings.Join(parts, "/")); err != nil { - return nil, fmt.Errorf("Error setting parent: %s", err) - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDataLossPreventionStoredInfoTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataLossPreventionStoredInfoTypeDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pattern"] = - flattenDataLossPreventionStoredInfoTypeRegexPattern(original["pattern"], d, config) - transformed["group_indexes"] = - flattenDataLossPreventionStoredInfoTypeRegexGroupIndexes(original["groupIndexes"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeRegexPattern(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeDictionary(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["word_list"] = - flattenDataLossPreventionStoredInfoTypeDictionaryWordList(original["wordList"], d, config) - transformed["cloud_storage_path"] = - flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeDictionaryWordList(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["words"] = - flattenDataLossPreventionStoredInfoTypeDictionaryWordListWords(original["words"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeDictionaryWordListWords(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(original["path"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionary(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["output_path"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(original["outputPath"], d, config) - transformed["cloud_storage_file_set"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(original["cloudStorageFileSet"], d, config) - transformed["big_query_field"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(original["bigQueryField"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(original["path"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["url"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(original["url"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["table"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(original["table"], d, config) - transformed["field"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(original["field"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(original["projectId"], d, config) - transformed["dataset_id"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(original["datasetId"], d, config) - transformed["table_id"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(original["tableId"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataLossPreventionStoredInfoTypeDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPattern, err := expandDataLossPreventionStoredInfoTypeRegexPattern(original["pattern"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !isEmptyValue(val) { - transformed["pattern"] = transformedPattern - } - - transformedGroupIndexes, err := expandDataLossPreventionStoredInfoTypeRegexGroupIndexes(original["group_indexes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !isEmptyValue(val) { - transformed["groupIndexes"] = transformedGroupIndexes - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeRegexPattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeRegexGroupIndexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWordList, err := expandDataLossPreventionStoredInfoTypeDictionaryWordList(original["word_list"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !isEmptyValue(val) { - transformed["wordList"] = transformedWordList - } - - transformedCloudStoragePath, err := expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStoragePath"] = transformedCloudStoragePath - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionaryWordList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWords, err := expandDataLossPreventionStoredInfoTypeDictionaryWordListWords(original["words"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !isEmptyValue(val) { - transformed["words"] = transformedWords - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionaryWordListWords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOutputPath, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(original["output_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOutputPath); val.IsValid() && !isEmptyValue(val) { - transformed["outputPath"] = transformedOutputPath - } - - transformedCloudStorageFileSet, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(original["cloud_storage_file_set"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudStorageFileSet); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStorageFileSet"] = transformedCloudStorageFileSet - } - - transformedBigQueryField, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(original["big_query_field"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBigQueryField); val.IsValid() && !isEmptyValue(val) { - transformed["bigQueryField"] = transformedBigQueryField - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTable, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(original["table"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { - transformed["table"] = transformedTable - } - - transformedField, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(original["field"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedField); val.IsValid() && !isEmptyValue(val) { - transformed["field"] = transformedField - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataLossPreventionStoredInfoTypeEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["config"] = obj - return newObj, nil -} - -func resourceDataLossPreventionStoredInfoTypeDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Stored info types come back nested with previous versions. We only want the current - // version in the unwrapped form - name := res["name"].(string) - v, ok := res["currentVersion"] - if !ok || v == nil { - return nil, nil - } - - current := v.(map[string]interface{}) - configRaw, ok := current["config"] - if !ok || configRaw == nil { - return nil, nil - } - - config := configRaw.(map[string]interface{}) - // Name comes back on the top level, so set here - config["name"] = name - - return config, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataflow_flex_template_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataflow_flex_template_job.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataflow_flex_template_job.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_metastore_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_metastore_service.go deleted file mode 100644 index 23fbe05091..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_metastore_service.go +++ /dev/null @@ -1,1216 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDataprocMetastoreService() *schema.Resource { - return &schema.Resource{ - Create: resourceDataprocMetastoreServiceCreate, - Read: resourceDataprocMetastoreServiceRead, - Update: resourceDataprocMetastoreServiceUpdate, - Delete: resourceDataprocMetastoreServiceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDataprocMetastoreServiceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "service_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), -and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between -3 and 63 characters.`, - }, - "database_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"MYSQL", "SPANNER", ""}), - Description: `The database type that the Metastore service stores its data. Default value: "MYSQL" Possible values: ["MYSQL", "SPANNER"]`, - Default: "MYSQL", - }, - "encryption_config": { - Type: schema.TypeList, - Optional: true, - Description: `Information used to configure the Dataproc Metastore service to encrypt -customer data at rest.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The fully qualified customer provided Cloud KMS key name to use for customer data encryption. -Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)'`, - }, - }, - }, - }, - "hive_metastore_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration information specific to running Hive metastore software as the metastore service.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "version": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Hive metastore schema version.`, - }, - "config_overrides": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: dataprocMetastoreServiceOverrideSuppress, - Description: `A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). -The mappings override system defaults (some keys cannot be overridden)`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "kerberos_config": { - Type: schema.TypeList, - Optional: true, - Description: `Information used to configure the Hive metastore service as a service principal in a Kerberos realm.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "keytab": { - Type: schema.TypeList, - Required: true, - Description: `A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC).`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloud_secret": { - Type: schema.TypeString, - Required: true, - Description: `The relative resource name of a Secret Manager secret version, in the following form: - -"projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".`, - }, - }, - }, - }, - "krb5_config_gcs_uri": { - Type: schema.TypeString, - Required: true, - Description: `A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.`, - }, - "principal": { - Type: schema.TypeString, - Required: true, - Description: `A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.`, - }, - }, - }, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-defined labels for the metastore service.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The location where the metastore service should reside. -The default value is 'global'.`, - Default: "global", - }, - "maintenance_window": { - Type: schema.TypeList, - Optional: true, - Description: `The one hour maintenance window of the metastore service. -This specifies when the service can be restarted for maintenance purposes in UTC time. -Maintenance window is not needed for services with the 'SPANNER' database type.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day_of_week": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), - Description: `The day of week, when the window starts. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "hour_of_day": { - Type: schema.TypeInt, - Required: true, - Description: `The hour of day (0-23) when the window starts.`, - }, - }, - }, - }, - "network": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - -"projects/{projectNumber}/global/networks/{network_id}".`, - }, - "network_config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The configuration specifying the network settings for the Dataproc Metastore service.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "consumers": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `The consumer-side network configuration for the Dataproc Metastore instance.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "subnetwork": { - Type: schema.TypeString, - Required: true, - Description: `The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. -It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. -There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: -'projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}`, - }, - "endpoint_uri": { - Type: schema.TypeString, - Computed: true, - Description: `The URI of the endpoint used to access the metastore service.`, - }, - }, - }, - }, - }, - }, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The TCP port at which the metastore service is reached. Default: 9083.`, - }, - "release_channel": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"CANARY", "STABLE", ""}), - Description: `The release channel of the service. If unspecified, defaults to 'STABLE'. Default value: "STABLE" Possible values: ["CANARY", "STABLE"]`, - Default: "STABLE", - }, - "telemetry_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "log_format": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"LEGACY", "JSON", ""}), - Description: `The output format of the Dataproc Metastore service's logs. Default value: "JSON" Possible values: ["LEGACY", "JSON"]`, - Default: "JSON", - }, - }, - }, - }, - "tier": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"DEVELOPER", "ENTERPRISE", ""}), - Description: `The tier of the service. Possible values: ["DEVELOPER", "ENTERPRISE"]`, - }, - "artifact_gcs_uri": { - Type: schema.TypeString, - Computed: true, - Description: `A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.`, - }, - "endpoint_uri": { - Type: schema.TypeString, - Computed: true, - Description: `The URI of the endpoint used to access the metastore service.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The relative resource name of the metastore service.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The current state of the metastore service.`, - }, - "state_message": { - Type: schema.TypeString, - Computed: true, - Description: `Additional information about the current state of the metastore service, if available.`, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `The globally unique resource identifier of the metastore service.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataprocMetastoreServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandDataprocMetastoreServiceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - networkProp, err := expandDataprocMetastoreServiceNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - portProp, err := expandDataprocMetastoreServicePort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - tierProp, err := expandDataprocMetastoreServiceTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - maintenanceWindowProp, err := expandDataprocMetastoreServiceMaintenanceWindow(d.Get("maintenance_window"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_window"); !isEmptyValue(reflect.ValueOf(maintenanceWindowProp)) && (ok || !reflect.DeepEqual(v, maintenanceWindowProp)) { - obj["maintenanceWindow"] = maintenanceWindowProp - } - encryptionConfigProp, err := expandDataprocMetastoreServiceEncryptionConfig(d.Get("encryption_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_config"); !isEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { - obj["encryptionConfig"] = encryptionConfigProp - } - hiveMetastoreConfigProp, err := expandDataprocMetastoreServiceHiveMetastoreConfig(d.Get("hive_metastore_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("hive_metastore_config"); !isEmptyValue(reflect.ValueOf(hiveMetastoreConfigProp)) && (ok || !reflect.DeepEqual(v, hiveMetastoreConfigProp)) { - obj["hiveMetastoreConfig"] = hiveMetastoreConfigProp - } - networkConfigProp, err := expandDataprocMetastoreServiceNetworkConfig(d.Get("network_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_config"); !isEmptyValue(reflect.ValueOf(networkConfigProp)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { - obj["networkConfig"] = networkConfigProp - } - databaseTypeProp, err := expandDataprocMetastoreServiceDatabaseType(d.Get("database_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("database_type"); !isEmptyValue(reflect.ValueOf(databaseTypeProp)) && (ok || !reflect.DeepEqual(v, databaseTypeProp)) { - obj["databaseType"] = databaseTypeProp - } - releaseChannelProp, err := expandDataprocMetastoreServiceReleaseChannel(d.Get("release_channel"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("release_channel"); !isEmptyValue(reflect.ValueOf(releaseChannelProp)) && (ok || !reflect.DeepEqual(v, releaseChannelProp)) { - obj["releaseChannel"] = releaseChannelProp - } - telemetryConfigProp, err := expandDataprocMetastoreServiceTelemetryConfig(d.Get("telemetry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("telemetry_config"); !isEmptyValue(reflect.ValueOf(telemetryConfigProp)) && (ok || !reflect.DeepEqual(v, telemetryConfigProp)) { - obj["telemetryConfig"] = telemetryConfigProp - } - - url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services?serviceId={{service_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Service: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Service: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{service_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = DataprocMetastoreOperationWaitTime( - config, res, project, "Creating Service", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Service: %s", err) - } - - log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) - - return resourceDataprocMetastoreServiceRead(d, meta) -} - -func resourceDataprocMetastoreServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services/{{service_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataprocMetastoreService %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - if err := d.Set("name", flattenDataprocMetastoreServiceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("labels", flattenDataprocMetastoreServiceLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("network", flattenDataprocMetastoreServiceNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("endpoint_uri", flattenDataprocMetastoreServiceEndpointUri(res["endpointUri"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("port", flattenDataprocMetastoreServicePort(res["port"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("state", flattenDataprocMetastoreServiceState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("state_message", flattenDataprocMetastoreServiceStateMessage(res["stateMessage"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("artifact_gcs_uri", flattenDataprocMetastoreServiceArtifactGcsUri(res["artifactGcsUri"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("tier", flattenDataprocMetastoreServiceTier(res["tier"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("maintenance_window", flattenDataprocMetastoreServiceMaintenanceWindow(res["maintenanceWindow"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("encryption_config", flattenDataprocMetastoreServiceEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("hive_metastore_config", flattenDataprocMetastoreServiceHiveMetastoreConfig(res["hiveMetastoreConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("network_config", flattenDataprocMetastoreServiceNetworkConfig(res["networkConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("database_type", flattenDataprocMetastoreServiceDatabaseType(res["databaseType"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("release_channel", flattenDataprocMetastoreServiceReleaseChannel(res["releaseChannel"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("uid", flattenDataprocMetastoreServiceUid(res["uid"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("telemetry_config", flattenDataprocMetastoreServiceTelemetryConfig(res["telemetryConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - return nil -} - -func resourceDataprocMetastoreServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandDataprocMetastoreServiceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - portProp, err := expandDataprocMetastoreServicePort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - tierProp, err := expandDataprocMetastoreServiceTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - maintenanceWindowProp, err := expandDataprocMetastoreServiceMaintenanceWindow(d.Get("maintenance_window"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_window"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenanceWindowProp)) { - obj["maintenanceWindow"] = maintenanceWindowProp - } - encryptionConfigProp, err := expandDataprocMetastoreServiceEncryptionConfig(d.Get("encryption_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { - obj["encryptionConfig"] = encryptionConfigProp - } - hiveMetastoreConfigProp, err := expandDataprocMetastoreServiceHiveMetastoreConfig(d.Get("hive_metastore_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("hive_metastore_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hiveMetastoreConfigProp)) { - obj["hiveMetastoreConfig"] = hiveMetastoreConfigProp - } - telemetryConfigProp, err := expandDataprocMetastoreServiceTelemetryConfig(d.Get("telemetry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("telemetry_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, telemetryConfigProp)) { - obj["telemetryConfig"] = telemetryConfigProp - } - - url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services/{{service_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("port") { - updateMask = append(updateMask, "port") - } - - if d.HasChange("tier") { - updateMask = append(updateMask, "tier") - } - - if d.HasChange("maintenance_window") { - updateMask = append(updateMask, "maintenanceWindow") - } - - if d.HasChange("encryption_config") { - updateMask = append(updateMask, "encryptionConfig") - } - - if d.HasChange("hive_metastore_config") { - updateMask = append(updateMask, "hiveMetastoreConfig") - } - - if d.HasChange("telemetry_config") { - updateMask = append(updateMask, "telemetryConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Service %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) - } - - err = DataprocMetastoreOperationWaitTime( - config, res, project, "Updating Service", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceDataprocMetastoreServiceRead(d, meta) -} - -func resourceDataprocMetastoreServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services/{{service_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Service %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - - err = DataprocMetastoreOperationWaitTime( - config, res, project, "Deleting Service", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil -} - -func resourceDataprocMetastoreServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{service_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDataprocMetastoreServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceEndpointUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServicePort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataprocMetastoreServiceState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceStateMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceArtifactGcsUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceMaintenanceWindow(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hour_of_day"] = - flattenDataprocMetastoreServiceMaintenanceWindowHourOfDay(original["hourOfDay"], d, config) - transformed["day_of_week"] = - flattenDataprocMetastoreServiceMaintenanceWindowDayOfWeek(original["dayOfWeek"], d, config) - return []interface{}{transformed} -} -func flattenDataprocMetastoreServiceMaintenanceWindowHourOfDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDataprocMetastoreServiceMaintenanceWindowDayOfWeek(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceEncryptionConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key"] = - flattenDataprocMetastoreServiceEncryptionConfigKmsKey(original["kmsKey"], d, config) - return []interface{}{transformed} -} -func flattenDataprocMetastoreServiceEncryptionConfigKmsKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["version"] = - flattenDataprocMetastoreServiceHiveMetastoreConfigVersion(original["version"], d, config) - transformed["config_overrides"] = - flattenDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(original["configOverrides"], d, config) - transformed["kerberos_config"] = - flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(original["kerberosConfig"], d, config) - return []interface{}{transformed} -} -func flattenDataprocMetastoreServiceHiveMetastoreConfigVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["keytab"] = - flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab(original["keytab"], d, config) - transformed["principal"] = - flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(original["principal"], d, config) - transformed["krb5_config_gcs_uri"] = - flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(original["krb5ConfigGcsUri"], d, config) - return []interface{}{transformed} -} -func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cloud_secret"] = - flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabCloudSecret(original["cloudSecret"], d, config) - return []interface{}{transformed} -} -func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabCloudSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceNetworkConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["consumers"] = - flattenDataprocMetastoreServiceNetworkConfigConsumers(original["consumers"], d, config) - return []interface{}{transformed} -} -func flattenDataprocMetastoreServiceNetworkConfigConsumers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "endpoint_uri": flattenDataprocMetastoreServiceNetworkConfigConsumersEndpointUri(original["endpointUri"], d, config), - "subnetwork": flattenDataprocMetastoreServiceNetworkConfigConsumersSubnetwork(original["subnetwork"], d, config), - }) - } - return transformed -} -func flattenDataprocMetastoreServiceNetworkConfigConsumersEndpointUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceNetworkConfigConsumersSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceDatabaseType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceReleaseChannel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocMetastoreServiceTelemetryConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["log_format"] = - flattenDataprocMetastoreServiceTelemetryConfigLogFormat(original["logFormat"], d, config) - return []interface{}{transformed} -} -func flattenDataprocMetastoreServiceTelemetryConfigLogFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataprocMetastoreServiceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDataprocMetastoreServiceNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServicePort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceMaintenanceWindow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHourOfDay, err := expandDataprocMetastoreServiceMaintenanceWindowHourOfDay(original["hour_of_day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHourOfDay); val.IsValid() && !isEmptyValue(val) { - transformed["hourOfDay"] = transformedHourOfDay - } - - transformedDayOfWeek, err := expandDataprocMetastoreServiceMaintenanceWindowDayOfWeek(original["day_of_week"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeek"] = transformedDayOfWeek - } - - return transformed, nil -} - -func expandDataprocMetastoreServiceMaintenanceWindowHourOfDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceMaintenanceWindowDayOfWeek(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceEncryptionConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKey, err := expandDataprocMetastoreServiceEncryptionConfigKmsKey(original["kms_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKey); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKey"] = transformedKmsKey - } - - return transformed, nil -} - -func expandDataprocMetastoreServiceEncryptionConfigKmsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedVersion, err := expandDataprocMetastoreServiceHiveMetastoreConfigVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedConfigOverrides, err := expandDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(original["config_overrides"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConfigOverrides); val.IsValid() && !isEmptyValue(val) { - transformed["configOverrides"] = transformedConfigOverrides - } - - transformedKerberosConfig, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(original["kerberos_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKerberosConfig); val.IsValid() && !isEmptyValue(val) { - transformed["kerberosConfig"] = transformedKerberosConfig - } - - return transformed, nil -} - -func expandDataprocMetastoreServiceHiveMetastoreConfigVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKeytab, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab(original["keytab"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKeytab); val.IsValid() && !isEmptyValue(val) { - transformed["keytab"] = transformedKeytab - } - - transformedPrincipal, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(original["principal"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrincipal); val.IsValid() && !isEmptyValue(val) { - transformed["principal"] = transformedPrincipal - } - - transformedKrb5ConfigGcsUri, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(original["krb5_config_gcs_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKrb5ConfigGcsUri); val.IsValid() && !isEmptyValue(val) { - transformed["krb5ConfigGcsUri"] = transformedKrb5ConfigGcsUri - } - - return transformed, nil -} - -func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCloudSecret, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabCloudSecret(original["cloud_secret"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudSecret); val.IsValid() && !isEmptyValue(val) { - transformed["cloudSecret"] = transformedCloudSecret - } - - return transformed, nil -} - -func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabCloudSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceNetworkConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConsumers, err := expandDataprocMetastoreServiceNetworkConfigConsumers(original["consumers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConsumers); val.IsValid() && !isEmptyValue(val) { - transformed["consumers"] = transformedConsumers - } - - return transformed, nil -} - -func expandDataprocMetastoreServiceNetworkConfigConsumers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEndpointUri, err := expandDataprocMetastoreServiceNetworkConfigConsumersEndpointUri(original["endpoint_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEndpointUri); val.IsValid() && !isEmptyValue(val) { - transformed["endpointUri"] = transformedEndpointUri - } - - transformedSubnetwork, err := expandDataprocMetastoreServiceNetworkConfigConsumersSubnetwork(original["subnetwork"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubnetwork); val.IsValid() && !isEmptyValue(val) { - transformed["subnetwork"] = transformedSubnetwork - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataprocMetastoreServiceNetworkConfigConsumersEndpointUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceNetworkConfigConsumersSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceDatabaseType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceReleaseChannel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocMetastoreServiceTelemetryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLogFormat, err := expandDataprocMetastoreServiceTelemetryConfigLogFormat(original["log_format"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLogFormat); val.IsValid() && !isEmptyValue(val) { - transformed["logFormat"] = transformedLogFormat - } - - return transformed, nil -} - -func expandDataprocMetastoreServiceTelemetryConfigLogFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastore_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastore_index.go deleted file mode 100644 index f68e9755df..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastore_index.go +++ /dev/null @@ -1,380 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDatastoreIndex() *schema.Resource { - return &schema.Resource{ - Create: resourceDatastoreIndexCreate, - Read: resourceDatastoreIndexRead, - Delete: resourceDatastoreIndexDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDatastoreIndexImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "kind": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The entity kind which the index applies to.`, - }, - "ancestor": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"NONE", "ALL_ANCESTORS", ""}), - Description: `Policy for including ancestors in the index. Default value: "NONE" Possible values: ["NONE", "ALL_ANCESTORS"]`, - Default: "NONE", - }, - "properties": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An ordered list of properties to index on.`, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "direction": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ASCENDING", "DESCENDING"}), - Description: `The direction the index should optimize for sorting. Possible values: ["ASCENDING", "DESCENDING"]`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The property name to index.`, - }, - }, - }, - }, - "index_id": { - Type: schema.TypeString, - Computed: true, - Description: `The index id.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDatastoreIndexCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - kindProp, err := expandDatastoreIndexKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(reflect.ValueOf(kindProp)) && (ok || !reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - ancestorProp, err := expandDatastoreIndexAncestor(d.Get("ancestor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ancestor"); !isEmptyValue(reflect.ValueOf(ancestorProp)) && (ok || !reflect.DeepEqual(v, ancestorProp)) { - obj["ancestor"] = ancestorProp - } - propertiesProp, err := expandDatastoreIndexProperties(d.Get("properties"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("properties"); !isEmptyValue(reflect.ValueOf(propertiesProp)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { - obj["properties"] = propertiesProp - } - - url, err := replaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Index: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), datastoreIndex409Contention) - if err != nil { - return fmt.Errorf("Error creating Index: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = DatastoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Index", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Index: %s", err) - } - - if err := d.Set("index_id", flattenDatastoreIndexIndexId(opRes["indexId"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) - - return resourceDatastoreIndexRead(d, meta) -} - -func resourceDatastoreIndexRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, datastoreIndex409Contention) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DatastoreIndex %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - - if err := d.Set("index_id", flattenDatastoreIndexIndexId(res["indexId"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("kind", flattenDatastoreIndexKind(res["kind"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("ancestor", flattenDatastoreIndexAncestor(res["ancestor"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("properties", flattenDatastoreIndexProperties(res["properties"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - - return nil -} - -func resourceDatastoreIndexDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Index %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), datastoreIndex409Contention) - if err != nil { - return handleNotFoundError(err, d, "Index") - } - - err = DatastoreOperationWaitTime( - config, res, project, "Deleting Index", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) - return nil -} - -func resourceDatastoreIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/indexes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDatastoreIndexIndexId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastoreIndexKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastoreIndexAncestor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastoreIndexProperties(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDatastoreIndexPropertiesName(original["name"], d, config), - "direction": flattenDatastoreIndexPropertiesDirection(original["direction"], d, config), - }) - } - return transformed -} -func flattenDatastoreIndexPropertiesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastoreIndexPropertiesDirection(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDatastoreIndexKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexAncestor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexProperties(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDatastoreIndexPropertiesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedDirection, err := expandDatastoreIndexPropertiesDirection(original["direction"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDirection); val.IsValid() && !isEmptyValue(val) { - transformed["direction"] = transformedDirection - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDatastoreIndexPropertiesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexPropertiesDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_private_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_private_connection.go deleted file mode 100644 index 2f542201f7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_private_connection.go +++ /dev/null @@ -1,492 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func extractError(d *schema.ResourceData) error { - // Casts are not safe since the logic that populate it is type deterministic. - error := d.Get("error").([]interface{})[0].(map[string]interface{}) - message := error["message"].(string) - details := error["details"].(map[string]interface{}) - detailsJSON, _ := json.Marshal(details) - return fmt.Errorf("Failed to create PrivateConnection. %s details = %s", message, string(detailsJSON)) -} - -// waitForPrivateConnectionReady waits for a private connection state to become -// CREATED, if the state is FAILED propegate the error to the user. -func waitForPrivateConnectionReady(d *schema.ResourceData, config *Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { - if err := resourceDatastreamPrivateConnectionRead(d, config); err != nil { - return resource.NonRetryableError(err) - } - - name := d.Get("name").(string) - state := d.Get("state").(string) - if state == "CREATING" { - return resource.RetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) - } else if state == "CREATED" { - log.Printf("[DEBUG] PrivateConnection %q has state %q.", name, state) - return nil - } else if state == "FAILED" { - return resource.NonRetryableError(extractError(d)) - } else { - return resource.NonRetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) - } - }) -} - -func ResourceDatastreamPrivateConnection() *schema.Resource { - return &schema.Resource{ - Create: resourceDatastreamPrivateConnectionCreate, - Read: resourceDatastreamPrivateConnectionRead, - Delete: resourceDatastreamPrivateConnectionDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDatastreamPrivateConnectionImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Display name.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the location this private connection is located in.`, - }, - "private_connection_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The private connectivity identifier.`, - }, - "vpc_peering_config": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `The VPC Peering configuration is used to create VPC peering -between Datastream and the consumer's VPC.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "subnet": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A free subnet for peering. (CIDR of /29)`, - }, - "vpc": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Fully qualified name of the VPC that Datastream will peer to. -Format: projects/{project}/global/{networks}/{name}`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "error": { - Type: schema.TypeList, - Computed: true, - Description: `The PrivateConnection error in case of failure.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "details": { - Type: schema.TypeMap, - Optional: true, - Description: `A list of messages that carry the error details.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "message": { - Type: schema.TypeString, - Optional: true, - Description: `A message containing more information about the error that occurred.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource's name.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the PrivateConnection.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDatastreamPrivateConnectionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandDatastreamPrivateConnectionLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - displayNameProp, err := expandDatastreamPrivateConnectionDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - vpcPeeringConfigProp, err := expandDatastreamPrivateConnectionVPCPeeringConfig(d.Get("vpc_peering_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpc_peering_config"); !isEmptyValue(reflect.ValueOf(vpcPeeringConfigProp)) && (ok || !reflect.DeepEqual(v, vpcPeeringConfigProp)) { - obj["vpcPeeringConfig"] = vpcPeeringConfigProp - } - - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/privateConnections?privateConnectionId={{private_connection_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new PrivateConnection: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PrivateConnection: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating PrivateConnection: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = DatastreamOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating PrivateConnection", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create PrivateConnection: %s", err) - } - - if err := d.Set("name", flattenDatastreamPrivateConnectionName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := waitForPrivateConnectionReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return fmt.Errorf("Error waiting for PrivateConnection %q to be CREATED. %q", d.Get("name").(string), err) - } - - log.Printf("[DEBUG] Finished creating PrivateConnection %q: %#v", d.Id(), res) - - return resourceDatastreamPrivateConnectionRead(d, meta) -} - -func resourceDatastreamPrivateConnectionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PrivateConnection: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DatastreamPrivateConnection %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading PrivateConnection: %s", err) - } - - if err := d.Set("name", flattenDatastreamPrivateConnectionName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading PrivateConnection: %s", err) - } - if err := d.Set("labels", flattenDatastreamPrivateConnectionLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading PrivateConnection: %s", err) - } - if err := d.Set("display_name", flattenDatastreamPrivateConnectionDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading PrivateConnection: %s", err) - } - if err := d.Set("state", flattenDatastreamPrivateConnectionState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading PrivateConnection: %s", err) - } - if err := d.Set("error", flattenDatastreamPrivateConnectionError(res["error"], d, config)); err != nil { - return fmt.Errorf("Error reading PrivateConnection: %s", err) - } - if err := d.Set("vpc_peering_config", flattenDatastreamPrivateConnectionVPCPeeringConfig(res["vpcPeeringConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading PrivateConnection: %s", err) - } - - return nil -} - -func resourceDatastreamPrivateConnectionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for PrivateConnection: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting PrivateConnection %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "PrivateConnection") - } - - err = DatastreamOperationWaitTime( - config, res, project, "Deleting PrivateConnection", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting PrivateConnection %q: %#v", d.Id(), res) - return nil -} - -func resourceDatastreamPrivateConnectionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/privateConnections/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := waitForPrivateConnectionReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return nil, fmt.Errorf("Error waiting for PrivateConnection %q to be CREATED during importing: %q", d.Get("name").(string), err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenDatastreamPrivateConnectionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastreamPrivateConnectionLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastreamPrivateConnectionDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastreamPrivateConnectionState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastreamPrivateConnectionError(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["message"] = - flattenDatastreamPrivateConnectionErrorMessage(original["message"], d, config) - transformed["details"] = - flattenDatastreamPrivateConnectionErrorDetails(original["details"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamPrivateConnectionErrorMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastreamPrivateConnectionErrorDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastreamPrivateConnectionVPCPeeringConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["vpc"] = - flattenDatastreamPrivateConnectionVPCPeeringConfigVPC(original["vpc"], d, config) - transformed["subnet"] = - flattenDatastreamPrivateConnectionVPCPeeringConfigSubnet(original["subnet"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamPrivateConnectionVPCPeeringConfigVPC(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastreamPrivateConnectionVPCPeeringConfigSubnet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDatastreamPrivateConnectionLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDatastreamPrivateConnectionDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamPrivateConnectionVPCPeeringConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedVPC, err := expandDatastreamPrivateConnectionVPCPeeringConfigVPC(original["vpc"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVPC); val.IsValid() && !isEmptyValue(val) { - transformed["vpc"] = transformedVPC - } - - transformedSubnet, err := expandDatastreamPrivateConnectionVPCPeeringConfigSubnet(original["subnet"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubnet); val.IsValid() && !isEmptyValue(val) { - transformed["subnet"] = transformedSubnet - } - - return transformed, nil -} - -func expandDatastreamPrivateConnectionVPCPeeringConfigVPC(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamPrivateConnectionVPCPeeringConfigSubnet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_deployment_manager_deployment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_deployment_manager_deployment.go deleted file mode 100644 index 776bff2029..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_deployment_manager_deployment.go +++ /dev/null @@ -1,745 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func customDiffDeploymentManagerDeployment(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { - if preview := d.Get("preview").(bool); preview { - log.Printf("[WARN] Deployment preview set to true - Terraform will treat Deployment as recreate-only") - - if d.HasChange("preview") { - if err := d.ForceNew("preview"); err != nil { - return err - } - } - - if d.HasChange("target") { - if err := d.ForceNew("target"); err != nil { - return err - } - } - - if d.HasChange("labels") { - if err := d.ForceNew("labels"); err != nil { - return err - } - } - } - return nil -} - -func ResourceDeploymentManagerDeployment() *schema.Resource { - return &schema.Resource{ - Create: resourceDeploymentManagerDeploymentCreate, - Read: resourceDeploymentManagerDeploymentRead, - Update: resourceDeploymentManagerDeploymentUpdate, - Delete: resourceDeploymentManagerDeploymentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDeploymentManagerDeploymentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), - }, - - CustomizeDiff: customDiffDeploymentManagerDeployment, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Unique name for the deployment`, - }, - "target": { - Type: schema.TypeList, - Required: true, - Description: `Parameters that define your deployment, including the deployment -configuration and relevant templates.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "config": { - Type: schema.TypeList, - Required: true, - Description: `The root configuration file to use for this deployment.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content": { - Type: schema.TypeString, - Required: true, - Description: `The full YAML contents of your configuration file.`, - }, - }, - }, - }, - "imports": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies import files for this configuration. This can be -used to import templates or other files. For example, you might -import a text file in order to use the file in a template.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content": { - Type: schema.TypeString, - Optional: true, - Description: `The full contents of the template that you want to import.`, - }, - "name": { - Type: schema.TypeString, - Optional: true, - Description: `The name of the template to import, as declared in the YAML -configuration.`, - }, - }, - }, - }, - }, - }, - }, - "create_policy": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ACQUIRE", "CREATE_OR_ACQUIRE", ""}), - Description: `Set the policy to use for creating new resources. Only used on -create and update. Valid values are 'CREATE_OR_ACQUIRE' (default) or -'ACQUIRE'. If set to 'ACQUIRE' and resources do not already exist, -the deployment will fail. Note that updating this field does not -actually affect the deployment, just how it is updated. Default value: "CREATE_OR_ACQUIRE" Possible values: ["ACQUIRE", "CREATE_OR_ACQUIRE"]`, - Default: "CREATE_OR_ACQUIRE", - }, - "delete_policy": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ABANDON", "DELETE", ""}), - Description: `Set the policy to use for deleting new resources on update/delete. -Valid values are 'DELETE' (default) or 'ABANDON'. If 'DELETE', -resource is deleted after removal from Deployment Manager. If -'ABANDON', the resource is only removed from Deployment Manager -and is not actually deleted. Note that updating this field does not -actually change the deployment, just how it is updated. Default value: "DELETE" Possible values: ["ABANDON", "DELETE"]`, - Default: "DELETE", - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Optional user-provided description of deployment.`, - }, - "labels": { - Type: schema.TypeSet, - Optional: true, - Description: `Key-value pairs to apply to this labels.`, - Elem: deploymentmanagerDeploymentLabelsSchema(), - // Default schema.HashSchema is used. - }, - "preview": { - Type: schema.TypeBool, - Optional: true, - Description: `If set to true, a deployment is created with "shell" resources -that are not actually instantiated. This allows you to preview a -deployment. It can be updated to false to actually deploy -with real resources. - ~>**NOTE:** Deployment Manager does not allow update -of a deployment in preview (unless updating to preview=false). Thus, -Terraform will force-recreate deployments if either preview is updated -to true or if other fields are updated while preview is true.`, - Default: false, - }, - "deployment_id": { - Type: schema.TypeString, - Computed: true, - Description: `Unique identifier for deployment. Output only.`, - }, - "manifest": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. URL of the manifest representing the last manifest that -was successfully deployed.`, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Server defined URL for the resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func deploymentmanagerDeploymentLabelsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Optional: true, - Description: `Key for label.`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `Value of label.`, - }, - }, - } -} - -func resourceDeploymentManagerDeploymentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandDeploymentManagerDeploymentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandDeploymentManagerDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandDeploymentManagerDeploymentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); ok || !reflect.DeepEqual(v, labelsProp) { - obj["labels"] = labelsProp - } - targetProp, err := expandDeploymentManagerDeploymentTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments?preview={{preview}}&createPolicy={{create_policy}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Deployment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Deployment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Deployment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/deployments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = DeploymentManagerOperationWaitTime( - config, res, project, "Creating Deployment", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - resourceDeploymentManagerDeploymentPostCreateFailure(d, meta) - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Deployment: %s", err) - } - - log.Printf("[DEBUG] Finished creating Deployment %q: %#v", d.Id(), res) - - return resourceDeploymentManagerDeploymentRead(d, meta) -} - -func resourceDeploymentManagerDeploymentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Deployment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Deployment: %s", err) - } - - if err := d.Set("name", flattenDeploymentManagerDeploymentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("description", flattenDeploymentManagerDeploymentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("labels", flattenDeploymentManagerDeploymentLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("deployment_id", flattenDeploymentManagerDeploymentDeploymentId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("manifest", flattenDeploymentManagerDeploymentManifest(res["manifest"], d, config)); err != nil { - return fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("self_link", flattenDeploymentManagerDeploymentSelfLink(res["selfLink"], d, config)); err != nil { - return fmt.Errorf("Error reading Deployment: %s", err) - } - - return nil -} - -func resourceDeploymentManagerDeploymentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Deployment: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("preview") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := SendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?preview={{preview}}&createPolicy={{create_policy}}&deletePolicy={{delete_policy}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Deployment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Deployment %q: %#v", d.Id(), res) - } - - err = DeploymentManagerOperationWaitTime( - config, res, project, "Updating Deployment", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("description") || d.HasChange("labels") || d.HasChange("target") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := SendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - descriptionProp, err := expandDeploymentManagerDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandDeploymentManagerDeploymentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); ok || !reflect.DeepEqual(v, labelsProp) { - obj["labels"] = labelsProp - } - targetProp, err := expandDeploymentManagerDeploymentTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?preview={{preview}}&createPolicy={{create_policy}}&deletePolicy={{delete_policy}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Deployment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Deployment %q: %#v", d.Id(), res) - } - - err = DeploymentManagerOperationWaitTime( - config, res, project, "Updating Deployment", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceDeploymentManagerDeploymentRead(d, meta) -} - -func resourceDeploymentManagerDeploymentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Deployment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?deletePolicy={{delete_policy}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Deployment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Deployment") - } - - err = DeploymentManagerOperationWaitTime( - config, res, project, "Deleting Deployment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Deployment %q: %#v", d.Id(), res) - return nil -} - -func resourceDeploymentManagerDeploymentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/deployments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/deployments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDeploymentManagerDeploymentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(deploymentmanagerDeploymentLabelsSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "key": flattenDeploymentManagerDeploymentLabelsKey(original["key"], d, config), - "value": flattenDeploymentManagerDeploymentLabelsValue(original["value"], d, config), - }) - } - return transformed -} -func flattenDeploymentManagerDeploymentLabelsKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentLabelsValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentDeploymentId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentManifest(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentSelfLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDeploymentManagerDeploymentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDeploymentManagerDeploymentLabelsKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedValue, err := expandDeploymentManagerDeploymentLabelsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDeploymentManagerDeploymentLabelsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentLabelsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConfig, err := expandDeploymentManagerDeploymentTargetConfig(original["config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConfig); val.IsValid() && !isEmptyValue(val) { - transformed["config"] = transformedConfig - } - - transformedImports, err := expandDeploymentManagerDeploymentTargetImports(original["imports"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImports); val.IsValid() && !isEmptyValue(val) { - transformed["imports"] = transformedImports - } - - return transformed, nil -} - -func expandDeploymentManagerDeploymentTargetConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContent, err := expandDeploymentManagerDeploymentTargetConfigContent(original["content"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !isEmptyValue(val) { - transformed["content"] = transformedContent - } - - return transformed, nil -} - -func expandDeploymentManagerDeploymentTargetConfigContent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentTargetImports(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContent, err := expandDeploymentManagerDeploymentTargetImportsContent(original["content"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !isEmptyValue(val) { - transformed["content"] = transformedContent - } - - transformedName, err := expandDeploymentManagerDeploymentTargetImportsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDeploymentManagerDeploymentTargetImportsContent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentTargetImportsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDeploymentManagerDeploymentPostCreateFailure(d *schema.ResourceData, meta interface{}) { - log.Printf("[WARN] Attempt to clean up Deployment if it still exists") - var cleanErr error - if cleanErr = resourceDeploymentManagerDeploymentRead(d, meta); cleanErr == nil { - if d.Id() != "" { - log.Printf("[WARN] Deployment %q still exists, attempting to delete...", d.Id()) - if cleanErr = resourceDeploymentManagerDeploymentDelete(d, meta); cleanErr == nil { - log.Printf("[WARN] Invalid Deployment was successfully deleted") - d.SetId("") - } - } - } - if cleanErr != nil { - log.Printf("[WARN] Could not confirm cleanup of Deployment if created in error state: %v", cleanErr) - } -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_agent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_agent.go deleted file mode 100644 index aac1459ece..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_agent.go +++ /dev/null @@ -1,571 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceDialogflowAgent() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowAgentCreate, - Read: resourceDialogflowAgentRead, - Update: resourceDialogflowAgentUpdate, - Delete: resourceDialogflowAgentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowAgentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(40 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "default_language_code": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/docs/reference/language) -for a list of the currently supported language codes. This field cannot be updated after creation.`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The name of this agent.`, - }, - "time_zone": { - Type: schema.TypeString, - Required: true, - Description: `The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, -Europe/Paris.`, - }, - "api_version": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"API_VERSION_V1", "API_VERSION_V2", "API_VERSION_V2_BETA_1", ""}), - Description: `API version displayed in Dialogflow console. If not specified, V2 API is assumed. Clients are free to query -different service endpoints for different API versions. However, bots connectors and webhook calls will follow -the specified API version. -* API_VERSION_V1: Legacy V1 API. -* API_VERSION_V2: V2 API. -* API_VERSION_V2_BETA_1: V2beta1 API. Possible values: ["API_VERSION_V1", "API_VERSION_V2", "API_VERSION_V2_BETA_1"]`, - }, - "avatar_uri": { - Type: schema.TypeString, - Optional: true, - Description: `The URI of the agent's avatar, which are used throughout the Dialogflow console. When an image URL is entered -into this field, the Dialogflow will save the image in the backend. The address of the backend image returned -from the API will be shown in the [avatarUriBackend] field.`, - }, - "classification_threshold": { - Type: schema.TypeFloat, - Optional: true, - Description: `To filter out false positive results and still get variety in matched natural language inputs for your agent, -you can tune the machine learning classification threshold. If the returned score value is less than the threshold -value, then a fallback intent will be triggered or, if there are no fallback intents defined, no intent will be -triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the -default of 0.3 is used.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 500), - Description: `The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "enable_logging": { - Type: schema.TypeBool, - Optional: true, - Description: `Determines whether this agent should log conversation queries.`, - }, - "match_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"MATCH_MODE_HYBRID", "MATCH_MODE_ML_ONLY", ""}), - Description: `Determines how intents are detected from user queries. -* MATCH_MODE_HYBRID: Best for agents with a small number of examples in intents and/or wide use of templates -syntax and composite entities. -* MATCH_MODE_ML_ONLY: Can be used for agents with a large number of examples in intents, especially the ones -using @sys.any or very large developer entities. Possible values: ["MATCH_MODE_HYBRID", "MATCH_MODE_ML_ONLY"]`, - }, - "supported_language_codes": { - Type: schema.TypeList, - Optional: true, - Description: `The list of all languages supported by this agent (except for the defaultLanguageCode).`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "tier": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"TIER_STANDARD", "TIER_ENTERPRISE", "TIER_ENTERPRISE_PLUS", ""}), - Description: `The agent tier. If not specified, TIER_STANDARD is assumed. -* TIER_STANDARD: Standard tier. -* TIER_ENTERPRISE: Enterprise tier (Essentials). -* TIER_ENTERPRISE_PLUS: Enterprise tier (Plus). -NOTE: Due to consistency issues, the provider will not read this field from the API. Drift is possible between -the Terraform state and Dialogflow if the agent tier is changed outside of Terraform. Possible values: ["TIER_STANDARD", "TIER_ENTERPRISE", "TIER_ENTERPRISE_PLUS"]`, - }, - "avatar_uri_backend": { - Type: schema.TypeString, - Computed: true, - Description: `The URI of the agent's avatar as returned from the API. Output only. To provide an image URL for the agent avatar, -the [avatarUri] field can be used.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowAgentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowAgentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - defaultLanguageCodeProp, err := expandDialogflowAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_language_code"); !isEmptyValue(reflect.ValueOf(defaultLanguageCodeProp)) && (ok || !reflect.DeepEqual(v, defaultLanguageCodeProp)) { - obj["defaultLanguageCode"] = defaultLanguageCodeProp - } - supportedLanguageCodesProp, err := expandDialogflowAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("supported_language_codes"); !isEmptyValue(reflect.ValueOf(supportedLanguageCodesProp)) && (ok || !reflect.DeepEqual(v, supportedLanguageCodesProp)) { - obj["supportedLanguageCodes"] = supportedLanguageCodesProp - } - timeZoneProp, err := expandDialogflowAgentTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandDialogflowAgentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - avatarUriProp, err := expandDialogflowAgentAvatarUri(d.Get("avatar_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("avatar_uri"); !isEmptyValue(reflect.ValueOf(avatarUriProp)) && (ok || !reflect.DeepEqual(v, avatarUriProp)) { - obj["avatarUri"] = avatarUriProp - } - enableLoggingProp, err := expandDialogflowAgentEnableLogging(d.Get("enable_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_logging"); !isEmptyValue(reflect.ValueOf(enableLoggingProp)) && (ok || !reflect.DeepEqual(v, enableLoggingProp)) { - obj["enableLogging"] = enableLoggingProp - } - matchModeProp, err := expandDialogflowAgentMatchMode(d.Get("match_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("match_mode"); !isEmptyValue(reflect.ValueOf(matchModeProp)) && (ok || !reflect.DeepEqual(v, matchModeProp)) { - obj["matchMode"] = matchModeProp - } - classificationThresholdProp, err := expandDialogflowAgentClassificationThreshold(d.Get("classification_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("classification_threshold"); !isEmptyValue(reflect.ValueOf(classificationThresholdProp)) && (ok || !reflect.DeepEqual(v, classificationThresholdProp)) { - obj["classificationThreshold"] = classificationThresholdProp - } - apiVersionProp, err := expandDialogflowAgentApiVersion(d.Get("api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("api_version"); !isEmptyValue(reflect.ValueOf(apiVersionProp)) && (ok || !reflect.DeepEqual(v, apiVersionProp)) { - obj["apiVersion"] = apiVersionProp - } - tierProp, err := expandDialogflowAgentTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Agent: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Agent: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Agent %q: %#v", d.Id(), res) - - return resourceDialogflowAgentRead(d, meta) -} - -func resourceDialogflowAgentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowAgent %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - - if err := d.Set("display_name", flattenDialogflowAgentDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("default_language_code", flattenDialogflowAgentDefaultLanguageCode(res["defaultLanguageCode"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("supported_language_codes", flattenDialogflowAgentSupportedLanguageCodes(res["supportedLanguageCodes"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("time_zone", flattenDialogflowAgentTimeZone(res["timeZone"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("description", flattenDialogflowAgentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("avatar_uri_backend", flattenDialogflowAgentAvatarUriBackend(res["avatarUri"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("enable_logging", flattenDialogflowAgentEnableLogging(res["enableLogging"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("match_mode", flattenDialogflowAgentMatchMode(res["matchMode"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("classification_threshold", flattenDialogflowAgentClassificationThreshold(res["classificationThreshold"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("api_version", flattenDialogflowAgentApiVersion(res["apiVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - - return nil -} - -func resourceDialogflowAgentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowAgentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - defaultLanguageCodeProp, err := expandDialogflowAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_language_code"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultLanguageCodeProp)) { - obj["defaultLanguageCode"] = defaultLanguageCodeProp - } - supportedLanguageCodesProp, err := expandDialogflowAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("supported_language_codes"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, supportedLanguageCodesProp)) { - obj["supportedLanguageCodes"] = supportedLanguageCodesProp - } - timeZoneProp, err := expandDialogflowAgentTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandDialogflowAgentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - avatarUriProp, err := expandDialogflowAgentAvatarUri(d.Get("avatar_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("avatar_uri"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, avatarUriProp)) { - obj["avatarUri"] = avatarUriProp - } - enableLoggingProp, err := expandDialogflowAgentEnableLogging(d.Get("enable_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_logging"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableLoggingProp)) { - obj["enableLogging"] = enableLoggingProp - } - matchModeProp, err := expandDialogflowAgentMatchMode(d.Get("match_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("match_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, matchModeProp)) { - obj["matchMode"] = matchModeProp - } - classificationThresholdProp, err := expandDialogflowAgentClassificationThreshold(d.Get("classification_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("classification_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, classificationThresholdProp)) { - obj["classificationThreshold"] = classificationThresholdProp - } - apiVersionProp, err := expandDialogflowAgentApiVersion(d.Get("api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("api_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, apiVersionProp)) { - obj["apiVersion"] = apiVersionProp - } - tierProp, err := expandDialogflowAgentTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Agent %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Agent %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Agent %q: %#v", d.Id(), res) - } - - return resourceDialogflowAgentRead(d, meta) -} - -func resourceDialogflowAgentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Agent %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Agent") - } - - log.Printf("[DEBUG] Finished deleting Agent %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowAgentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowAgentDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentDefaultLanguageCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentSupportedLanguageCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentAvatarUriBackend(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentEnableLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentMatchMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentClassificationThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentApiVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowAgentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentDefaultLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentSupportedLanguageCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentAvatarUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentEnableLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentMatchMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentClassificationThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentApiVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_agent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_agent.go deleted file mode 100644 index 430d68b4d2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_agent.go +++ /dev/null @@ -1,641 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceDialogflowCXAgent() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowCXAgentCreate, - Read: resourceDialogflowCXAgentRead, - Update: resourceDialogflowCXAgentUpdate, - Delete: resourceDialogflowCXAgentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowCXAgentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(40 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "default_language_code": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/cx/docs/reference/language) -for a list of the currently supported language codes. This field cannot be updated after creation.`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The human-readable name of the agent, unique within the location.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the location this agent is located in. - -~> **Note:** The first time you are deploying an Agent in your project you must configure location settings. - This is a one time step but at the moment you can only [configure location settings](https://cloud.google.com/dialogflow/cx/docs/concept/region#location-settings) via the Dialogflow CX console. - Another options is to use global location so you don't need to manually configure location settings.`, - }, - "time_zone": { - Type: schema.TypeString, - Required: true, - Description: `The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, -Europe/Paris.`, - }, - "avatar_uri": { - Type: schema.TypeString, - Optional: true, - Description: `The URI of the agent's avatar. Avatars are used throughout the Dialogflow console and in the self-hosted Web Demo integration.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 500), - Description: `The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "enable_spell_correction": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates if automatic spell correction is enabled in detect intent requests.`, - }, - "enable_stackdriver_logging": { - Type: schema.TypeBool, - Optional: true, - Description: `Determines whether this agent should log conversation queries.`, - }, - "security_settings": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the SecuritySettings reference for the agent. Format: projects//locations//securitySettings/.`, - }, - "speech_to_text_settings": { - Type: schema.TypeList, - Optional: true, - Description: `Settings related to speech recognition.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_speech_adaptation": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether to use speech adaptation for speech recognition.`, - }, - }, - }, - }, - "supported_language_codes": { - Type: schema.TypeList, - Optional: true, - Description: `The list of all languages supported by this agent (except for the default_language_code).`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the agent.`, - }, - "start_flow": { - Type: schema.TypeString, - Computed: true, - Description: `Name of the start flow in this agent. A start flow will be automatically created when the agent is created, and can only be deleted by deleting the agent. Format: projects//locations//agents//flows/.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXAgentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXAgentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - defaultLanguageCodeProp, err := expandDialogflowCXAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_language_code"); !isEmptyValue(reflect.ValueOf(defaultLanguageCodeProp)) && (ok || !reflect.DeepEqual(v, defaultLanguageCodeProp)) { - obj["defaultLanguageCode"] = defaultLanguageCodeProp - } - supportedLanguageCodesProp, err := expandDialogflowCXAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("supported_language_codes"); !isEmptyValue(reflect.ValueOf(supportedLanguageCodesProp)) && (ok || !reflect.DeepEqual(v, supportedLanguageCodesProp)) { - obj["supportedLanguageCodes"] = supportedLanguageCodesProp - } - timeZoneProp, err := expandDialogflowCXAgentTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandDialogflowCXAgentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - avatarUriProp, err := expandDialogflowCXAgentAvatarUri(d.Get("avatar_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("avatar_uri"); !isEmptyValue(reflect.ValueOf(avatarUriProp)) && (ok || !reflect.DeepEqual(v, avatarUriProp)) { - obj["avatarUri"] = avatarUriProp - } - speechToTextSettingsProp, err := expandDialogflowCXAgentSpeechToTextSettings(d.Get("speech_to_text_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("speech_to_text_settings"); !isEmptyValue(reflect.ValueOf(speechToTextSettingsProp)) && (ok || !reflect.DeepEqual(v, speechToTextSettingsProp)) { - obj["speechToTextSettings"] = speechToTextSettingsProp - } - securitySettingsProp, err := expandDialogflowCXAgentSecuritySettings(d.Get("security_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("security_settings"); !isEmptyValue(reflect.ValueOf(securitySettingsProp)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { - obj["securitySettings"] = securitySettingsProp - } - enableStackdriverLoggingProp, err := expandDialogflowCXAgentEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !isEmptyValue(reflect.ValueOf(enableStackdriverLoggingProp)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { - obj["enableStackdriverLogging"] = enableStackdriverLoggingProp - } - enableSpellCorrectionProp, err := expandDialogflowCXAgentEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_spell_correction"); !isEmptyValue(reflect.ValueOf(enableSpellCorrectionProp)) && (ok || !reflect.DeepEqual(v, enableSpellCorrectionProp)) { - obj["enableSpellCorrection"] = enableSpellCorrectionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Agent: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Agent: %s", err) - } - if err := d.Set("name", flattenDialogflowCXAgentName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Agent %q: %#v", d.Id(), res) - - return resourceDialogflowCXAgentRead(d, meta) -} - -func resourceDialogflowCXAgentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowCXAgent %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - - if err := d.Set("name", flattenDialogflowCXAgentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXAgentDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("default_language_code", flattenDialogflowCXAgentDefaultLanguageCode(res["defaultLanguageCode"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("supported_language_codes", flattenDialogflowCXAgentSupportedLanguageCodes(res["supportedLanguageCodes"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("time_zone", flattenDialogflowCXAgentTimeZone(res["timeZone"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("description", flattenDialogflowCXAgentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("avatar_uri", flattenDialogflowCXAgentAvatarUri(res["avatarUri"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("speech_to_text_settings", flattenDialogflowCXAgentSpeechToTextSettings(res["speechToTextSettings"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("start_flow", flattenDialogflowCXAgentStartFlow(res["startFlow"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("security_settings", flattenDialogflowCXAgentSecuritySettings(res["securitySettings"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("enable_stackdriver_logging", flattenDialogflowCXAgentEnableStackdriverLogging(res["enableStackdriverLogging"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("enable_spell_correction", flattenDialogflowCXAgentEnableSpellCorrection(res["enableSpellCorrection"], d, config)); err != nil { - return fmt.Errorf("Error reading Agent: %s", err) - } - - return nil -} - -func resourceDialogflowCXAgentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXAgentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - supportedLanguageCodesProp, err := expandDialogflowCXAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("supported_language_codes"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, supportedLanguageCodesProp)) { - obj["supportedLanguageCodes"] = supportedLanguageCodesProp - } - timeZoneProp, err := expandDialogflowCXAgentTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandDialogflowCXAgentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - avatarUriProp, err := expandDialogflowCXAgentAvatarUri(d.Get("avatar_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("avatar_uri"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, avatarUriProp)) { - obj["avatarUri"] = avatarUriProp - } - speechToTextSettingsProp, err := expandDialogflowCXAgentSpeechToTextSettings(d.Get("speech_to_text_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("speech_to_text_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, speechToTextSettingsProp)) { - obj["speechToTextSettings"] = speechToTextSettingsProp - } - securitySettingsProp, err := expandDialogflowCXAgentSecuritySettings(d.Get("security_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("security_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { - obj["securitySettings"] = securitySettingsProp - } - enableStackdriverLoggingProp, err := expandDialogflowCXAgentEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { - obj["enableStackdriverLogging"] = enableStackdriverLoggingProp - } - enableSpellCorrectionProp, err := expandDialogflowCXAgentEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_spell_correction"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableSpellCorrectionProp)) { - obj["enableSpellCorrection"] = enableSpellCorrectionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Agent %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("supported_language_codes") { - updateMask = append(updateMask, "supportedLanguageCodes") - } - - if d.HasChange("time_zone") { - updateMask = append(updateMask, "timeZone") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("avatar_uri") { - updateMask = append(updateMask, "avatarUri") - } - - if d.HasChange("speech_to_text_settings") { - updateMask = append(updateMask, "speechToTextSettings") - } - - if d.HasChange("security_settings") { - updateMask = append(updateMask, "securitySettings") - } - - if d.HasChange("enable_stackdriver_logging") { - updateMask = append(updateMask, "enableStackdriverLogging") - } - - if d.HasChange("enable_spell_correction") { - updateMask = append(updateMask, "enableSpellCorrection") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Agent %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Agent %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXAgentRead(d, meta) -} - -func resourceDialogflowCXAgentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Agent %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Agent") - } - - log.Printf("[DEBUG] Finished deleting Agent %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXAgentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/agents/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowCXAgentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXAgentDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentDefaultLanguageCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentSupportedLanguageCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentAvatarUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentSpeechToTextSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_speech_adaptation"] = - flattenDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(original["enableSpeechAdaptation"], d, config) - return []interface{}{transformed} -} -func flattenDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentStartFlow(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentSecuritySettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentEnableSpellCorrection(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXAgentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentDefaultLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentSupportedLanguageCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentAvatarUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentSpeechToTextSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableSpeechAdaptation, err := expandDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(original["enable_speech_adaptation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnableSpeechAdaptation); val.IsValid() && !isEmptyValue(val) { - transformed["enableSpeechAdaptation"] = transformedEnableSpeechAdaptation - } - - return transformed, nil -} - -func expandDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentSecuritySettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentEnableStackdriverLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentEnableSpellCorrection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_entity_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_entity_type.go deleted file mode 100644 index 2484f6262f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_entity_type.go +++ /dev/null @@ -1,662 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceDialogflowCXEntityType() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowCXEntityTypeCreate, - Read: resourceDialogflowCXEntityTypeRead, - Update: resourceDialogflowCXEntityTypeUpdate, - Delete: resourceDialogflowCXEntityTypeDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowCXEntityTypeImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(40 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 64), - Description: `The human-readable name of the entity type, unique within the agent.`, - }, - "entities": { - Type: schema.TypeList, - Required: true, - Description: `The collection of entity entries associated with the entity type.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "synonyms": { - Type: schema.TypeList, - Optional: true, - Description: `A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym could be green onions. -For KIND_LIST entity types: This collection must contain exactly one synonym equal to value.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `The primary value associated with this entity entry. For example, if the entity type is vegetable, the value could be scallions. -For KIND_MAP entity types: A canonical value to be used in place of synonyms. -For KIND_LIST entity types: A string that can contain references to other entity types (with or without aliases).`, - }, - }, - }, - }, - "kind": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"KIND_MAP", "KIND_LIST", "KIND_REGEXP"}), - Description: `Indicates whether the entity type can be automatically expanded. -* KIND_MAP: Map entity types allow mapping of a group of synonyms to a canonical value. -* KIND_LIST: List entity types contain a set of entries that do not map to canonical values. However, list entity types can contain references to other entity types (with or without aliases). -* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values. Possible values: ["KIND_MAP", "KIND_LIST", "KIND_REGEXP"]`, - }, - "auto_expansion_mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"AUTO_EXPANSION_MODE_DEFAULT", "AUTO_EXPANSION_MODE_UNSPECIFIED", ""}), - Description: `Represents kinds of entities. -* AUTO_EXPANSION_MODE_UNSPECIFIED: Auto expansion disabled for the entity. -* AUTO_EXPANSION_MODE_DEFAULT: Allows an agent to recognize values that have not been explicitly listed in the entity. Possible values: ["AUTO_EXPANSION_MODE_DEFAULT", "AUTO_EXPANSION_MODE_UNSPECIFIED"]`, - }, - "enable_fuzzy_extraction": { - Type: schema.TypeBool, - Optional: true, - Description: `Enables fuzzy entity extraction during classification.`, - }, - "excluded_phrases": { - Type: schema.TypeList, - Optional: true, - Description: `Collection of exceptional words and phrases that shouldn't be matched. For example, if you have a size entity type with entry giant(an adjective), you might consider adding giants(a noun) as an exclusion. -If the kind of entity type is KIND_MAP, then the phrases specified by entities and excluded phrases should be mutually exclusive.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeString, - Optional: true, - Description: `The word or phrase to be excluded.`, - }, - }, - }, - }, - "language_code": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The language of the following fields in entityType: -EntityType.entities.value -EntityType.entities.synonyms -EntityType.excluded_phrases.value -If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used.`, - }, - "parent": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The agent to create a entity type for. -Format: projects//locations//agents/.`, - }, - "redact": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether parameters of the entity type should be redacted in log. If redaction is enabled, page parameters and intent parameters referring to the entity type will be replaced by parameter name when logging.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the entity type. -Format: projects//locations//agents//entityTypes/.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXEntityTypeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXEntityTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kindProp, err := expandDialogflowCXEntityTypeKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(reflect.ValueOf(kindProp)) && (ok || !reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - autoExpansionModeProp, err := expandDialogflowCXEntityTypeAutoExpansionMode(d.Get("auto_expansion_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auto_expansion_mode"); !isEmptyValue(reflect.ValueOf(autoExpansionModeProp)) && (ok || !reflect.DeepEqual(v, autoExpansionModeProp)) { - obj["autoExpansionMode"] = autoExpansionModeProp - } - entitiesProp, err := expandDialogflowCXEntityTypeEntities(d.Get("entities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entities"); !isEmptyValue(reflect.ValueOf(entitiesProp)) && (ok || !reflect.DeepEqual(v, entitiesProp)) { - obj["entities"] = entitiesProp - } - excludedPhrasesProp, err := expandDialogflowCXEntityTypeExcludedPhrases(d.Get("excluded_phrases"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("excluded_phrases"); !isEmptyValue(reflect.ValueOf(excludedPhrasesProp)) && (ok || !reflect.DeepEqual(v, excludedPhrasesProp)) { - obj["excludedPhrases"] = excludedPhrasesProp - } - enableFuzzyExtractionProp, err := expandDialogflowCXEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !isEmptyValue(reflect.ValueOf(enableFuzzyExtractionProp)) && (ok || !reflect.DeepEqual(v, enableFuzzyExtractionProp)) { - obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp - } - redactProp, err := expandDialogflowCXEntityTypeRedact(d.Get("redact"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redact"); !isEmptyValue(reflect.ValueOf(redactProp)) && (ok || !reflect.DeepEqual(v, redactProp)) { - obj["redact"] = redactProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new EntityType: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating EntityType: %s", err) - } - if err := d.Set("name", flattenDialogflowCXEntityTypeName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/entityTypes/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating EntityType %q: %#v", d.Id(), res) - - return resourceDialogflowCXEntityTypeRead(d, meta) -} - -func resourceDialogflowCXEntityTypeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowCXEntityType %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXEntityTypeName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXEntityTypeDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("kind", flattenDialogflowCXEntityTypeKind(res["kind"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("auto_expansion_mode", flattenDialogflowCXEntityTypeAutoExpansionMode(res["autoExpansionMode"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("entities", flattenDialogflowCXEntityTypeEntities(res["entities"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("excluded_phrases", flattenDialogflowCXEntityTypeExcludedPhrases(res["excludedPhrases"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("enable_fuzzy_extraction", flattenDialogflowCXEntityTypeEnableFuzzyExtraction(res["enableFuzzyExtraction"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("redact", flattenDialogflowCXEntityTypeRedact(res["redact"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - - return nil -} - -func resourceDialogflowCXEntityTypeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXEntityTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kindProp, err := expandDialogflowCXEntityTypeKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - autoExpansionModeProp, err := expandDialogflowCXEntityTypeAutoExpansionMode(d.Get("auto_expansion_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auto_expansion_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoExpansionModeProp)) { - obj["autoExpansionMode"] = autoExpansionModeProp - } - entitiesProp, err := expandDialogflowCXEntityTypeEntities(d.Get("entities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entities"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entitiesProp)) { - obj["entities"] = entitiesProp - } - excludedPhrasesProp, err := expandDialogflowCXEntityTypeExcludedPhrases(d.Get("excluded_phrases"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("excluded_phrases"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, excludedPhrasesProp)) { - obj["excludedPhrases"] = excludedPhrasesProp - } - enableFuzzyExtractionProp, err := expandDialogflowCXEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableFuzzyExtractionProp)) { - obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp - } - redactProp, err := expandDialogflowCXEntityTypeRedact(d.Get("redact"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redact"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, redactProp)) { - obj["redact"] = redactProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating EntityType %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("kind") { - updateMask = append(updateMask, "kind") - } - - if d.HasChange("auto_expansion_mode") { - updateMask = append(updateMask, "autoExpansionMode") - } - - if d.HasChange("entities") { - updateMask = append(updateMask, "entities") - } - - if d.HasChange("excluded_phrases") { - updateMask = append(updateMask, "excludedPhrases") - } - - if d.HasChange("enable_fuzzy_extraction") { - updateMask = append(updateMask, "enableFuzzyExtraction") - } - - if d.HasChange("redact") { - updateMask = append(updateMask, "redact") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating EntityType %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating EntityType %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXEntityTypeRead(d, meta) -} - -func resourceDialogflowCXEntityTypeDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - log.Printf("[DEBUG] Deleting EntityType %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EntityType") - } - - log.Printf("[DEBUG] Finished deleting EntityType %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXEntityTypeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value and parent contains slashes - if err := parseImportId([]string{ - "(?P.+)/entityTypes/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/entityTypes/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowCXEntityTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXEntityTypeDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeAutoExpansionMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeEntities(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "value": flattenDialogflowCXEntityTypeEntitiesValue(original["value"], d, config), - "synonyms": flattenDialogflowCXEntityTypeEntitiesSynonyms(original["synonyms"], d, config), - }) - } - return transformed -} -func flattenDialogflowCXEntityTypeEntitiesValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeEntitiesSynonyms(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeExcludedPhrases(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "value": flattenDialogflowCXEntityTypeExcludedPhrasesValue(original["value"], d, config), - }) - } - return transformed -} -func flattenDialogflowCXEntityTypeExcludedPhrasesValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeEnableFuzzyExtraction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeRedact(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXEntityTypeDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeAutoExpansionMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeEntities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedValue, err := expandDialogflowCXEntityTypeEntitiesValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedSynonyms, err := expandDialogflowCXEntityTypeEntitiesSynonyms(original["synonyms"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSynonyms); val.IsValid() && !isEmptyValue(val) { - transformed["synonyms"] = transformedSynonyms - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXEntityTypeEntitiesValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeEntitiesSynonyms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeExcludedPhrases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedValue, err := expandDialogflowCXEntityTypeExcludedPhrasesValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXEntityTypeExcludedPhrasesValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeEnableFuzzyExtraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeRedact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_environment.go deleted file mode 100644 index 22390450f0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_environment.go +++ /dev/null @@ -1,475 +0,0 @@ -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceDialogflowCXEnvironment() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowCXEnvironmentCreate, - Read: resourceDialogflowCXEnvironmentRead, - Update: resourceDialogflowCXEnvironmentUpdate, - Delete: resourceDialogflowCXEnvironmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowCXEnvironmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(40 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 64), - Description: `The human-readable name of the environment (unique in an agent). Limit of 64 characters.`, - }, - "version_configs": { - Type: schema.TypeList, - Required: true, - Description: `A list of configurations for flow versions. You should include version configs for all flows that are reachable from [Start Flow][Agent.start_flow] in the agent. Otherwise, an error will be returned.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "version": { - Type: schema.TypeString, - Required: true, - Description: `Format: projects/{{project}}/locations/{{location}}/agents/{{agent}}/flows/{{flow}}/versions/{{version}}.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 500), - Description: `The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "parent": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Agent to create an Environment for. -Format: projects//locations//agents/.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the environment.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Update time of this environment. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - versionConfigsProp, err := expandDialogflowCXEnvironmentVersionConfigs(d.Get("version_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_configs"); !isEmptyValue(reflect.ValueOf(versionConfigsProp)) && (ok || !reflect.DeepEqual(v, versionConfigsProp)) { - obj["versionConfigs"] = versionConfigsProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Environment: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Environment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/environments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = DialogflowCXOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Environment", userAgent, location, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Environment: %s", err) - } - - if err := d.Set("name", flattenDialogflowCXEnvironmentName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{parent}}/environments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) - - return resourceDialogflowCXEnvironmentRead(d, meta) -} - -func resourceDialogflowCXEnvironmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowCXEnvironment %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXEnvironmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXEnvironmentDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("description", flattenDialogflowCXEnvironmentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("version_configs", flattenDialogflowCXEnvironmentVersionConfigs(res["versionConfigs"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("update_time", flattenDialogflowCXEnvironmentUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - - return nil -} - -func resourceDialogflowCXEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - versionConfigsProp, err := expandDialogflowCXEnvironmentVersionConfigs(d.Get("version_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_configs"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionConfigsProp)) { - obj["versionConfigs"] = versionConfigsProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("version_configs") { - updateMask = append(updateMask, "versionConfigs") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) - } - - err = DialogflowCXOperationWaitTime( - config, res, "Updating Environment", userAgent, location, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceDialogflowCXEnvironmentRead(d, meta) -} - -func resourceDialogflowCXEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Environment") - } - - err = DialogflowCXOperationWaitTime( - config, res, "Deleting Environment", userAgent, location, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value and parent contains slashes - if err := parseImportId([]string{ - "(?P.+)/environments/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/environments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowCXEnvironmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXEnvironmentDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEnvironmentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEnvironmentVersionConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "version": flattenDialogflowCXEnvironmentVersionConfigsVersion(original["version"], d, config), - }) - } - return transformed -} -func flattenDialogflowCXEnvironmentVersionConfigsVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEnvironmentUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXEnvironmentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEnvironmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEnvironmentVersionConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedVersion, err := expandDialogflowCXEnvironmentVersionConfigsVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXEnvironmentVersionConfigsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_intent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_intent.go deleted file mode 100644 index 94cced0fb5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_intent.go +++ /dev/null @@ -1,865 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceDialogflowCXIntent() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowCXIntentCreate, - Read: resourceDialogflowCXIntentRead, - Update: resourceDialogflowCXIntentUpdate, - Delete: resourceDialogflowCXIntentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowCXIntentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(40 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 64), - Description: `The human-readable name of the intent, unique within the agent.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 140), - Description: `Human readable description for better understanding an intent like its scope, content, result etc. Maximum character limit: 140 characters.`, - }, - "is_fallback": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether this is a fallback intent. Currently only default fallback intent is allowed in the agent, which is added upon agent creation. -Adding training phrases to fallback intent is useful in the case of requests that are mistakenly matched, since training phrases assigned to fallback intents act as negative examples that triggers no-match event.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `The key/value metadata to label an intent. Labels can contain lowercase letters, digits and the symbols '-' and '_'. International characters are allowed, including letters from unicase alphabets. Keys must start with a letter. Keys and values can be no longer than 63 characters and no more than 128 bytes. -Prefix "sys-" is reserved for Dialogflow defined labels. Currently allowed Dialogflow defined labels include: * sys-head * sys-contextual The above labels do not require value. "sys-head" means the intent is a head intent. "sys.contextual" means the intent is a contextual intent. -An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "language_code": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The language of the following fields in intent: -Intent.training_phrases.parts.text -If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used.`, - }, - "parameters": { - Type: schema.TypeList, - Optional: true, - Description: `The collection of parameters associated with the intent.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "entity_type": { - Type: schema.TypeString, - Required: true, - Description: `The entity type of the parameter. -Format: projects/-/locations/-/agents/-/entityTypes/ for system entity types (for example, projects/-/locations/-/agents/-/entityTypes/sys.date), or projects//locations//agents//entityTypes/ for developer entity types.`, - }, - "id": { - Type: schema.TypeString, - Required: true, - Description: `The unique identifier of the parameter. This field is used by training phrases to annotate their parts.`, - }, - "is_list": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter represents a list of values.`, - }, - "redact": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter content should be redacted in log. If redaction is enabled, the parameter content will be replaced by parameter name during logging. -Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled.`, - }, - }, - }, - }, - "parent": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The agent to create an intent for. -Format: projects//locations//agents/.`, - }, - "priority": { - Type: schema.TypeInt, - Optional: true, - Description: `The priority of this intent. Higher numbers represent higher priorities. -If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds to the Normal priority in the console. -If the supplied value is negative, the intent is ignored in runtime detect intent requests.`, - }, - "training_phrases": { - Type: schema.TypeList, - Optional: true, - Description: `The collection of training phrases the agent is trained on to identify the intent.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "parts": { - Type: schema.TypeList, - Required: true, - Description: `The ordered list of training phrase parts. The parts are concatenated in order to form the training phrase. -Note: The API does not automatically annotate training phrases like the Dialogflow Console does. -Note: Do not forget to include whitespace at part boundaries, so the training phrase is well formatted when the parts are concatenated. -If the training phrase does not need to be annotated with parameters, you just need a single part with only the Part.text field set. -If you want to annotate the training phrase, you must create multiple parts, where the fields of each part are populated in one of two ways: -Part.text is set to a part of the phrase that has no parameters. -Part.text is set to a part of the phrase that you want to annotate, and the parameterId field is set.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "text": { - Type: schema.TypeString, - Required: true, - Description: `The text for this part.`, - }, - "parameter_id": { - Type: schema.TypeString, - Optional: true, - Description: `The parameter used to annotate this part of the training phrase. This field is required for annotated parts of the training phrase.`, - }, - }, - }, - }, - "repeat_count": { - Type: schema.TypeInt, - Optional: true, - Description: `Indicates how many times this example was added to the intent.`, - }, - "id": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the training phrase.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the intent. -Format: projects//locations//agents//intents/.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXIntentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXIntentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - trainingPhrasesProp, err := expandDialogflowCXIntentTrainingPhrases(d.Get("training_phrases"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("training_phrases"); !isEmptyValue(reflect.ValueOf(trainingPhrasesProp)) && (ok || !reflect.DeepEqual(v, trainingPhrasesProp)) { - obj["trainingPhrases"] = trainingPhrasesProp - } - parametersProp, err := expandDialogflowCXIntentParameters(d.Get("parameters"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parameters"); !isEmptyValue(reflect.ValueOf(parametersProp)) && (ok || !reflect.DeepEqual(v, parametersProp)) { - obj["parameters"] = parametersProp - } - priorityProp, err := expandDialogflowCXIntentPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - isFallbackProp, err := expandDialogflowCXIntentIsFallback(d.Get("is_fallback"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_fallback"); !isEmptyValue(reflect.ValueOf(isFallbackProp)) && (ok || !reflect.DeepEqual(v, isFallbackProp)) { - obj["isFallback"] = isFallbackProp - } - labelsProp, err := expandDialogflowCXIntentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandDialogflowCXIntentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - languageCodeProp, err := expandDialogflowCXIntentLanguageCode(d.Get("language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language_code"); !isEmptyValue(reflect.ValueOf(languageCodeProp)) && (ok || !reflect.DeepEqual(v, languageCodeProp)) { - obj["languageCode"] = languageCodeProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Intent: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Intent: %s", err) - } - if err := d.Set("name", flattenDialogflowCXIntentName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/intents/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Intent %q: %#v", d.Id(), res) - - return resourceDialogflowCXIntentRead(d, meta) -} - -func resourceDialogflowCXIntentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowCXIntent %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXIntentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXIntentDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("training_phrases", flattenDialogflowCXIntentTrainingPhrases(res["trainingPhrases"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("parameters", flattenDialogflowCXIntentParameters(res["parameters"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("priority", flattenDialogflowCXIntentPriority(res["priority"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("is_fallback", flattenDialogflowCXIntentIsFallback(res["isFallback"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("labels", flattenDialogflowCXIntentLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("description", flattenDialogflowCXIntentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("language_code", flattenDialogflowCXIntentLanguageCode(res["languageCode"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - - return nil -} - -func resourceDialogflowCXIntentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXIntentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - trainingPhrasesProp, err := expandDialogflowCXIntentTrainingPhrases(d.Get("training_phrases"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("training_phrases"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trainingPhrasesProp)) { - obj["trainingPhrases"] = trainingPhrasesProp - } - parametersProp, err := expandDialogflowCXIntentParameters(d.Get("parameters"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parameters"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parametersProp)) { - obj["parameters"] = parametersProp - } - priorityProp, err := expandDialogflowCXIntentPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - isFallbackProp, err := expandDialogflowCXIntentIsFallback(d.Get("is_fallback"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_fallback"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isFallbackProp)) { - obj["isFallback"] = isFallbackProp - } - labelsProp, err := expandDialogflowCXIntentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandDialogflowCXIntentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Intent %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("training_phrases") { - updateMask = append(updateMask, "trainingPhrases") - } - - if d.HasChange("parameters") { - updateMask = append(updateMask, "parameters") - } - - if d.HasChange("priority") { - updateMask = append(updateMask, "priority") - } - - if d.HasChange("is_fallback") { - updateMask = append(updateMask, "isFallback") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Intent %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Intent %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXIntentRead(d, meta) -} - -func resourceDialogflowCXIntentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - log.Printf("[DEBUG] Deleting Intent %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Intent") - } - - log.Printf("[DEBUG] Finished deleting Intent %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXIntentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value and parent contains slashes - if err := parseImportId([]string{ - "(?P.+)/intents/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/intents/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowCXIntentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXIntentDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentTrainingPhrases(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenDialogflowCXIntentTrainingPhrasesId(original["id"], d, config), - "parts": flattenDialogflowCXIntentTrainingPhrasesParts(original["parts"], d, config), - "repeat_count": flattenDialogflowCXIntentTrainingPhrasesRepeatCount(original["repeatCount"], d, config), - }) - } - return transformed -} -func flattenDialogflowCXIntentTrainingPhrasesId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentTrainingPhrasesParts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "text": flattenDialogflowCXIntentTrainingPhrasesPartsText(original["text"], d, config), - "parameter_id": flattenDialogflowCXIntentTrainingPhrasesPartsParameterId(original["parameterId"], d, config), - }) - } - return transformed -} -func flattenDialogflowCXIntentTrainingPhrasesPartsText(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentTrainingPhrasesPartsParameterId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentTrainingPhrasesRepeatCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDialogflowCXIntentParameters(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenDialogflowCXIntentParametersId(original["id"], d, config), - "entity_type": flattenDialogflowCXIntentParametersEntityType(original["entityType"], d, config), - "is_list": flattenDialogflowCXIntentParametersIsList(original["isList"], d, config), - "redact": flattenDialogflowCXIntentParametersRedact(original["redact"], d, config), - }) - } - return transformed -} -func flattenDialogflowCXIntentParametersId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentParametersEntityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentParametersIsList(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentParametersRedact(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentPriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDialogflowCXIntentIsFallback(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentLanguageCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXIntentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentTrainingPhrases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandDialogflowCXIntentTrainingPhrasesId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedParts, err := expandDialogflowCXIntentTrainingPhrasesParts(original["parts"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedParts); val.IsValid() && !isEmptyValue(val) { - transformed["parts"] = transformedParts - } - - transformedRepeatCount, err := expandDialogflowCXIntentTrainingPhrasesRepeatCount(original["repeat_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepeatCount); val.IsValid() && !isEmptyValue(val) { - transformed["repeatCount"] = transformedRepeatCount - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXIntentTrainingPhrasesId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentTrainingPhrasesParts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXIntentTrainingPhrasesPartsText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - transformedParameterId, err := expandDialogflowCXIntentTrainingPhrasesPartsParameterId(original["parameter_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedParameterId); val.IsValid() && !isEmptyValue(val) { - transformed["parameterId"] = transformedParameterId - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXIntentTrainingPhrasesPartsText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentTrainingPhrasesPartsParameterId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentTrainingPhrasesRepeatCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandDialogflowCXIntentParametersId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedEntityType, err := expandDialogflowCXIntentParametersEntityType(original["entity_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEntityType); val.IsValid() && !isEmptyValue(val) { - transformed["entityType"] = transformedEntityType - } - - transformedIsList, err := expandDialogflowCXIntentParametersIsList(original["is_list"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIsList); val.IsValid() && !isEmptyValue(val) { - transformed["isList"] = transformedIsList - } - - transformedRedact, err := expandDialogflowCXIntentParametersRedact(original["redact"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRedact); val.IsValid() && !isEmptyValue(val) { - transformed["redact"] = transformedRedact - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXIntentParametersId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentParametersEntityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentParametersIsList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentParametersRedact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentIsFallback(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDialogflowCXIntentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_version.go deleted file mode 100644 index 300b38b537..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_version.go +++ /dev/null @@ -1,472 +0,0 @@ -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceDialogflowCXVersion() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowCXVersionCreate, - Read: resourceDialogflowCXVersionRead, - Update: resourceDialogflowCXVersionUpdate, - Delete: resourceDialogflowCXVersionDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowCXVersionImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(40 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 64), - Description: `The human-readable name of the version. Limit of 64 characters.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 500), - Description: `The description of the version. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "parent": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Flow to create an Version for. -Format: projects//locations//agents//flows/.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Format: projects//locations//agents//flows//versions/. Version ID is a self-increasing number generated by Dialogflow upon version creation.`, - }, - "nlu_settings": { - Type: schema.TypeList, - Computed: true, - Description: `The NLU settings of the flow at version creation.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "classification_threshold": { - Type: schema.TypeFloat, - Optional: true, - Description: `To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. If the returned score value is less than the threshold value, then a no-match event will be triggered. -The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used.`, - }, - "model_training_mode": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL", ""}), - Description: `Indicates NLU model training mode. -* MODEL_TRAINING_MODE_AUTOMATIC: NLU model training is automatically triggered when a flow gets modified. User can also manually trigger model training in this mode. -* MODEL_TRAINING_MODE_MANUAL: User needs to manually trigger NLU model training. Best for large flows whose models take long time to train. Possible values: ["MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL"]`, - }, - "model_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED", ""}), - Description: `Indicates the type of NLU model. -* MODEL_TYPE_STANDARD: Use standard NLU model. -* MODEL_TYPE_ADVANCED: Use advanced NLU model. Possible values: ["MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED"]`, - }, - }, - }, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The state of this version. -* RUNNING: Version is not ready to serve (e.g. training is running). -* SUCCEEDED: Training has succeeded and this version is ready to serve. -* FAILED: Version training failed.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXVersionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXVersionDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXVersionDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Version: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Version: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/versions/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = DialogflowCXOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Version", userAgent, location, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Version: %s", err) - } - - if err := d.Set("name", flattenDialogflowCXVersionName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{parent}}/versions/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Version %q: %#v", d.Id(), res) - - return resourceDialogflowCXVersionRead(d, meta) -} - -func resourceDialogflowCXVersionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowCXVersion %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXVersionName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXVersionDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("description", flattenDialogflowCXVersionDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("nlu_settings", flattenDialogflowCXVersionNluSettings(res["nluSettings"], d, config)); err != nil { - return fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("create_time", flattenDialogflowCXVersionCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("state", flattenDialogflowCXVersionState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Version: %s", err) - } - - return nil -} - -func resourceDialogflowCXVersionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXVersionDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXVersionDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Version %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Version %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Version %q: %#v", d.Id(), res) - } - - err = DialogflowCXOperationWaitTime( - config, res, "Updating Version", userAgent, location, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceDialogflowCXVersionRead(d, meta) -} - -func resourceDialogflowCXVersionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - log.Printf("[DEBUG] Deleting Version %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Version") - } - - err = DialogflowCXOperationWaitTime( - config, res, "Deleting Version", userAgent, location, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Version %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value and parent contains slashes - if err := parseImportId([]string{ - "(?P.+)/versions/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/versions/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowCXVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXVersionDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionNluSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["model_type"] = - flattenDialogflowCXVersionNluSettingsModelType(original["modelType"], d, config) - transformed["classification_threshold"] = - flattenDialogflowCXVersionNluSettingsClassificationThreshold(original["classificationThreshold"], d, config) - transformed["model_training_mode"] = - flattenDialogflowCXVersionNluSettingsModelTrainingMode(original["modelTrainingMode"], d, config) - return []interface{}{transformed} -} -func flattenDialogflowCXVersionNluSettingsModelType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionNluSettingsClassificationThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionNluSettingsModelTrainingMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXVersionDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXVersionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_webhook.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_webhook.go deleted file mode 100644 index 19af5d4915..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_webhook.go +++ /dev/null @@ -1,821 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDialogflowCXWebhook() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowCXWebhookCreate, - Read: resourceDialogflowCXWebhookRead, - Update: resourceDialogflowCXWebhookUpdate, - Delete: resourceDialogflowCXWebhookDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowCXWebhookImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(40 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The human-readable name of the webhook, unique within the agent.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether the webhook is disabled.`, - }, - "enable_spell_correction": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates if automatic spell correction is enabled in detect intent requests.`, - }, - "enable_stackdriver_logging": { - Type: schema.TypeBool, - Optional: true, - Description: `Determines whether this agent should log conversation queries.`, - }, - "generic_web_service": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for a generic web service.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: `Whether to use speech adaptation for speech recognition.`, - }, - "allowed_ca_certs": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies a list of allowed custom CA certificates (in DER format) for HTTPS verification.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "request_headers": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `The HTTP request headers to send together with webhook requests.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "parent": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The agent to create a webhook for. -Format: projects//locations//agents/.`, - }, - "security_settings": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the SecuritySettings reference for the agent. Format: projects//locations//securitySettings/.`, - }, - "service_directory": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for a Service Directory service.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "generic_web_service": { - Type: schema.TypeList, - Required: true, - Description: `The name of Service Directory service.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: `Whether to use speech adaptation for speech recognition.`, - }, - "allowed_ca_certs": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies a list of allowed custom CA certificates (in DER format) for HTTPS verification.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "request_headers": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `The HTTP request headers to send together with webhook requests.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "service": { - Type: schema.TypeString, - Required: true, - Description: `The name of Service Directory service.`, - }, - }, - }, - }, - "timeout": { - Type: schema.TypeString, - Optional: true, - Description: `Webhook execution timeout.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the webhook. -Format: projects//locations//agents//webhooks/.`, - }, - "start_flow": { - Type: schema.TypeString, - Computed: true, - Description: `Name of the start flow in this agent. A start flow will be automatically created when the agent is created, and can only be deleted by deleting the agent. Format: projects//locations//agents//flows/.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXWebhookCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXWebhookDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - timeoutProp, err := expandDialogflowCXWebhookTimeout(d.Get("timeout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(reflect.ValueOf(timeoutProp)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { - obj["timeout"] = timeoutProp - } - disabledProp, err := expandDialogflowCXWebhookDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - genericWebServiceProp, err := expandDialogflowCXWebhookGenericWebService(d.Get("generic_web_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("generic_web_service"); !isEmptyValue(reflect.ValueOf(genericWebServiceProp)) && (ok || !reflect.DeepEqual(v, genericWebServiceProp)) { - obj["genericWebService"] = genericWebServiceProp - } - serviceDirectoryProp, err := expandDialogflowCXWebhookServiceDirectory(d.Get("service_directory"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_directory"); !isEmptyValue(reflect.ValueOf(serviceDirectoryProp)) && (ok || !reflect.DeepEqual(v, serviceDirectoryProp)) { - obj["serviceDirectory"] = serviceDirectoryProp - } - securitySettingsProp, err := expandDialogflowCXWebhookSecuritySettings(d.Get("security_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("security_settings"); !isEmptyValue(reflect.ValueOf(securitySettingsProp)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { - obj["securitySettings"] = securitySettingsProp - } - enableStackdriverLoggingProp, err := expandDialogflowCXWebhookEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !isEmptyValue(reflect.ValueOf(enableStackdriverLoggingProp)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { - obj["enableStackdriverLogging"] = enableStackdriverLoggingProp - } - enableSpellCorrectionProp, err := expandDialogflowCXWebhookEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_spell_correction"); !isEmptyValue(reflect.ValueOf(enableSpellCorrectionProp)) && (ok || !reflect.DeepEqual(v, enableSpellCorrectionProp)) { - obj["enableSpellCorrection"] = enableSpellCorrectionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/webhooks") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Webhook: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Webhook: %s", err) - } - if err := d.Set("name", flattenDialogflowCXWebhookName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/webhooks/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Webhook %q: %#v", d.Id(), res) - - return resourceDialogflowCXWebhookRead(d, meta) -} - -func resourceDialogflowCXWebhookRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/webhooks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowCXWebhook %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXWebhookName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXWebhookDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("timeout", flattenDialogflowCXWebhookTimeout(res["timeout"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("disabled", flattenDialogflowCXWebhookDisabled(res["disabled"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("generic_web_service", flattenDialogflowCXWebhookGenericWebService(res["genericWebService"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("service_directory", flattenDialogflowCXWebhookServiceDirectory(res["serviceDirectory"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("start_flow", flattenDialogflowCXWebhookStartFlow(res["startFlow"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("security_settings", flattenDialogflowCXWebhookSecuritySettings(res["securitySettings"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("enable_stackdriver_logging", flattenDialogflowCXWebhookEnableStackdriverLogging(res["enableStackdriverLogging"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - if err := d.Set("enable_spell_correction", flattenDialogflowCXWebhookEnableSpellCorrection(res["enableSpellCorrection"], d, config)); err != nil { - return fmt.Errorf("Error reading Webhook: %s", err) - } - - return nil -} - -func resourceDialogflowCXWebhookUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXWebhookDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - timeoutProp, err := expandDialogflowCXWebhookTimeout(d.Get("timeout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { - obj["timeout"] = timeoutProp - } - disabledProp, err := expandDialogflowCXWebhookDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - genericWebServiceProp, err := expandDialogflowCXWebhookGenericWebService(d.Get("generic_web_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("generic_web_service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, genericWebServiceProp)) { - obj["genericWebService"] = genericWebServiceProp - } - serviceDirectoryProp, err := expandDialogflowCXWebhookServiceDirectory(d.Get("service_directory"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_directory"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceDirectoryProp)) { - obj["serviceDirectory"] = serviceDirectoryProp - } - securitySettingsProp, err := expandDialogflowCXWebhookSecuritySettings(d.Get("security_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("security_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { - obj["securitySettings"] = securitySettingsProp - } - enableStackdriverLoggingProp, err := expandDialogflowCXWebhookEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { - obj["enableStackdriverLogging"] = enableStackdriverLoggingProp - } - enableSpellCorrectionProp, err := expandDialogflowCXWebhookEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_spell_correction"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableSpellCorrectionProp)) { - obj["enableSpellCorrection"] = enableSpellCorrectionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/webhooks/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Webhook %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("timeout") { - updateMask = append(updateMask, "timeout") - } - - if d.HasChange("disabled") { - updateMask = append(updateMask, "disabled") - } - - if d.HasChange("generic_web_service") { - updateMask = append(updateMask, "genericWebService") - } - - if d.HasChange("service_directory") { - updateMask = append(updateMask, "serviceDirectory") - } - - if d.HasChange("security_settings") { - updateMask = append(updateMask, "securitySettings") - } - - if d.HasChange("enable_stackdriver_logging") { - updateMask = append(updateMask, "enableStackdriverLogging") - } - - if d.HasChange("enable_spell_correction") { - updateMask = append(updateMask, "enableSpellCorrection") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Webhook %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Webhook %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXWebhookRead(d, meta) -} - -func resourceDialogflowCXWebhookDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/webhooks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - // extract location from the parent - location := "" - - if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { - location = parts[1] - } else { - return fmt.Errorf( - "Saw %s when the parent is expected to contains location %s", - d.Get("parent"), - "projects/{{project}}/locations/{{location}}/...", - ) - } - - url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) - log.Printf("[DEBUG] Deleting Webhook %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Webhook") - } - - log.Printf("[DEBUG] Finished deleting Webhook %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXWebhookImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value and parent contains slashes - if err := parseImportId([]string{ - "(?P.+)/webhooks/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/webhooks/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowCXWebhookName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXWebhookDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookGenericWebService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["uri"] = - flattenDialogflowCXWebhookGenericWebServiceUri(original["uri"], d, config) - transformed["request_headers"] = - flattenDialogflowCXWebhookGenericWebServiceRequestHeaders(original["requestHeaders"], d, config) - transformed["allowed_ca_certs"] = - flattenDialogflowCXWebhookGenericWebServiceAllowedCaCerts(original["allowedCaCerts"], d, config) - return []interface{}{transformed} -} -func flattenDialogflowCXWebhookGenericWebServiceUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookGenericWebServiceRequestHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookGenericWebServiceAllowedCaCerts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookServiceDirectory(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service"] = - flattenDialogflowCXWebhookServiceDirectoryService(original["service"], d, config) - transformed["generic_web_service"] = - flattenDialogflowCXWebhookServiceDirectoryGenericWebService(original["genericWebService"], d, config) - return []interface{}{transformed} -} -func flattenDialogflowCXWebhookServiceDirectoryService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookServiceDirectoryGenericWebService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["uri"] = - flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceUri(original["uri"], d, config) - transformed["request_headers"] = - flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceRequestHeaders(original["requestHeaders"], d, config) - transformed["allowed_ca_certs"] = - flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceAllowedCaCerts(original["allowedCaCerts"], d, config) - return []interface{}{transformed} -} -func flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceRequestHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceAllowedCaCerts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookStartFlow(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookSecuritySettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXWebhookEnableSpellCorrection(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXWebhookDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookGenericWebService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUri, err := expandDialogflowCXWebhookGenericWebServiceUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedRequestHeaders, err := expandDialogflowCXWebhookGenericWebServiceRequestHeaders(original["request_headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequestHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeaders"] = transformedRequestHeaders - } - - transformedAllowedCaCerts, err := expandDialogflowCXWebhookGenericWebServiceAllowedCaCerts(original["allowed_ca_certs"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowedCaCerts); val.IsValid() && !isEmptyValue(val) { - transformed["allowedCaCerts"] = transformedAllowedCaCerts - } - - return transformed, nil -} - -func expandDialogflowCXWebhookGenericWebServiceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookGenericWebServiceRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDialogflowCXWebhookGenericWebServiceAllowedCaCerts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookServiceDirectory(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandDialogflowCXWebhookServiceDirectoryService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedGenericWebService, err := expandDialogflowCXWebhookServiceDirectoryGenericWebService(original["generic_web_service"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGenericWebService); val.IsValid() && !isEmptyValue(val) { - transformed["genericWebService"] = transformedGenericWebService - } - - return transformed, nil -} - -func expandDialogflowCXWebhookServiceDirectoryService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookServiceDirectoryGenericWebService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUri, err := expandDialogflowCXWebhookServiceDirectoryGenericWebServiceUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedRequestHeaders, err := expandDialogflowCXWebhookServiceDirectoryGenericWebServiceRequestHeaders(original["request_headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequestHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeaders"] = transformedRequestHeaders - } - - transformedAllowedCaCerts, err := expandDialogflowCXWebhookServiceDirectoryGenericWebServiceAllowedCaCerts(original["allowed_ca_certs"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowedCaCerts); val.IsValid() && !isEmptyValue(val) { - transformed["allowedCaCerts"] = transformedAllowedCaCerts - } - - return transformed, nil -} - -func expandDialogflowCXWebhookServiceDirectoryGenericWebServiceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookServiceDirectoryGenericWebServiceRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDialogflowCXWebhookServiceDirectoryGenericWebServiceAllowedCaCerts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookSecuritySettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookEnableStackdriverLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXWebhookEnableSpellCorrection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_entity_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_entity_type.go deleted file mode 100644 index b0593a1aa9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_entity_type.go +++ /dev/null @@ -1,469 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDialogflowEntityType() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowEntityTypeCreate, - Read: resourceDialogflowEntityTypeRead, - Update: resourceDialogflowEntityTypeUpdate, - Delete: resourceDialogflowEntityTypeDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowEntityTypeImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The name of this entity type to be displayed on the console.`, - }, - "kind": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"KIND_MAP", "KIND_LIST", "KIND_REGEXP"}), - Description: `Indicates the kind of entity type. -* KIND_MAP: Map entity types allow mapping of a group of synonyms to a reference value. -* KIND_LIST: List entity types contain a set of entries that do not map to reference values. However, list entity -types can contain references to other entity types (with or without aliases). -* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values. Possible values: ["KIND_MAP", "KIND_LIST", "KIND_REGEXP"]`, - }, - "enable_fuzzy_extraction": { - Type: schema.TypeBool, - Optional: true, - Description: `Enables fuzzy entity extraction during classification.`, - }, - "entities": { - Type: schema.TypeList, - Optional: true, - Description: `The collection of entity entries associated with the entity type.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "synonyms": { - Type: schema.TypeList, - Required: true, - Description: `A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym -could be green onions. -For KIND_LIST entity types: -* This collection must contain exactly one synonym equal to value.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "value": { - Type: schema.TypeString, - Required: true, - Description: `The primary value associated with this entity entry. For example, if the entity type is vegetable, the value -could be scallions. -For KIND_MAP entity types: -* A reference value to be used in place of synonyms. -For KIND_LIST entity types: -* A string that can contain references to other entity types (with or without aliases).`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the entity type. -Format: projects//agent/entityTypes/.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowEntityTypeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowEntityTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kindProp, err := expandDialogflowEntityTypeKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(reflect.ValueOf(kindProp)) && (ok || !reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - enableFuzzyExtractionProp, err := expandDialogflowEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !isEmptyValue(reflect.ValueOf(enableFuzzyExtractionProp)) && (ok || !reflect.DeepEqual(v, enableFuzzyExtractionProp)) { - obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp - } - entitiesProp, err := expandDialogflowEntityTypeEntities(d.Get("entities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entities"); !isEmptyValue(reflect.ValueOf(entitiesProp)) && (ok || !reflect.DeepEqual(v, entitiesProp)) { - obj["entities"] = entitiesProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/entityTypes/") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new EntityType: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EntityType: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating EntityType: %s", err) - } - if err := d.Set("name", flattenDialogflowEntityTypeName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating EntityType %q: %#v", d.Id(), res) - - return resourceDialogflowEntityTypeRead(d, meta) -} - -func resourceDialogflowEntityTypeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EntityType: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowEntityType %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - - if err := d.Set("name", flattenDialogflowEntityTypeName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("display_name", flattenDialogflowEntityTypeDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("kind", flattenDialogflowEntityTypeKind(res["kind"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("enable_fuzzy_extraction", flattenDialogflowEntityTypeEnableFuzzyExtraction(res["enableFuzzyExtraction"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("entities", flattenDialogflowEntityTypeEntities(res["entities"], d, config)); err != nil { - return fmt.Errorf("Error reading EntityType: %s", err) - } - - return nil -} - -func resourceDialogflowEntityTypeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EntityType: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowEntityTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kindProp, err := expandDialogflowEntityTypeKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - enableFuzzyExtractionProp, err := expandDialogflowEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableFuzzyExtractionProp)) { - obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp - } - entitiesProp, err := expandDialogflowEntityTypeEntities(d.Get("entities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entities"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entitiesProp)) { - obj["entities"] = entitiesProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating EntityType %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating EntityType %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating EntityType %q: %#v", d.Id(), res) - } - - return resourceDialogflowEntityTypeRead(d, meta) -} - -func resourceDialogflowEntityTypeDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EntityType: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting EntityType %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EntityType") - } - - log.Printf("[DEBUG] Finished deleting EntityType %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowEntityTypeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, fmt.Errorf( - "Could not split project from name: %s", - d.Get("name"), - ) - } - - if err := d.Set("project", stringParts[1]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowEntityTypeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeEnableFuzzyExtraction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeEntities(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "value": flattenDialogflowEntityTypeEntitiesValue(original["value"], d, config), - "synonyms": flattenDialogflowEntityTypeEntitiesSynonyms(original["synonyms"], d, config), - }) - } - return transformed -} -func flattenDialogflowEntityTypeEntitiesValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeEntitiesSynonyms(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowEntityTypeDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowEntityTypeKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowEntityTypeEnableFuzzyExtraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowEntityTypeEntities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedValue, err := expandDialogflowEntityTypeEntitiesValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedSynonyms, err := expandDialogflowEntityTypeEntitiesSynonyms(original["synonyms"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSynonyms); val.IsValid() && !isEmptyValue(val) { - transformed["synonyms"] = transformedSynonyms - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowEntityTypeEntitiesValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowEntityTypeEntitiesSynonyms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_fulfillment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_fulfillment.go deleted file mode 100644 index 691d8ab066..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_fulfillment.go +++ /dev/null @@ -1,573 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDialogflowFulfillment() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowFulfillmentCreate, - Read: resourceDialogflowFulfillmentRead, - Update: resourceDialogflowFulfillmentUpdate, - Delete: resourceDialogflowFulfillmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowFulfillmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The human-readable name of the fulfillment, unique within the agent.`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether fulfillment is enabled.`, - }, - "features": { - Type: schema.TypeList, - Optional: true, - Description: `The field defines whether the fulfillment is enabled for certain features.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"SMALLTALK"}), - Description: `The type of the feature that enabled for fulfillment. -* SMALLTALK: Fulfillment is enabled for SmallTalk. Possible values: ["SMALLTALK"]`, - }, - }, - }, - }, - "generic_web_service": { - Type: schema.TypeList, - Optional: true, - Description: `Represents configuration for a generic web service. Dialogflow supports two mechanisms for authentications: - Basic authentication with username and password. - Authentication with additional authentication headers.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: `The fulfillment URI for receiving POST requests. It must use https protocol.`, - }, - "password": { - Type: schema.TypeString, - Optional: true, - Description: `The password for HTTP Basic authentication.`, - }, - "request_headers": { - Type: schema.TypeMap, - Optional: true, - Description: `The HTTP request headers to send together with fulfillment requests.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "username": { - Type: schema.TypeString, - Optional: true, - Description: `The user name for HTTP Basic authentication.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the fulfillment. -Format: projects//agent/fulfillment - projects//locations//agent/fulfillment`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowFulfillmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowFulfillmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandDialogflowFulfillmentEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - featuresProp, err := expandDialogflowFulfillmentFeatures(d.Get("features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("features"); !isEmptyValue(reflect.ValueOf(featuresProp)) && (ok || !reflect.DeepEqual(v, featuresProp)) { - obj["features"] = featuresProp - } - genericWebServiceProp, err := expandDialogflowFulfillmentGenericWebService(d.Get("generic_web_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("generic_web_service"); !isEmptyValue(reflect.ValueOf(genericWebServiceProp)) && (ok || !reflect.DeepEqual(v, genericWebServiceProp)) { - obj["genericWebService"] = genericWebServiceProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/?updateMask=name,displayName,enabled,genericWebService,features") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Fulfillment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Fulfillment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Fulfillment: %s", err) - } - if err := d.Set("name", flattenDialogflowFulfillmentName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating Fulfillment %q: %#v", d.Id(), res) - - return resourceDialogflowFulfillmentRead(d, meta) -} - -func resourceDialogflowFulfillmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Fulfillment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowFulfillment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Fulfillment: %s", err) - } - - if err := d.Set("name", flattenDialogflowFulfillmentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Fulfillment: %s", err) - } - if err := d.Set("display_name", flattenDialogflowFulfillmentDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Fulfillment: %s", err) - } - if err := d.Set("enabled", flattenDialogflowFulfillmentEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading Fulfillment: %s", err) - } - if err := d.Set("features", flattenDialogflowFulfillmentFeatures(res["features"], d, config)); err != nil { - return fmt.Errorf("Error reading Fulfillment: %s", err) - } - if err := d.Set("generic_web_service", flattenDialogflowFulfillmentGenericWebService(res["genericWebService"], d, config)); err != nil { - return fmt.Errorf("Error reading Fulfillment: %s", err) - } - - return nil -} - -func resourceDialogflowFulfillmentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Fulfillment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowFulfillmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandDialogflowFulfillmentEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - featuresProp, err := expandDialogflowFulfillmentFeatures(d.Get("features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("features"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, featuresProp)) { - obj["features"] = featuresProp - } - genericWebServiceProp, err := expandDialogflowFulfillmentGenericWebService(d.Get("generic_web_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("generic_web_service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, genericWebServiceProp)) { - obj["genericWebService"] = genericWebServiceProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Fulfillment %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("features") { - updateMask = append(updateMask, "features") - } - - if d.HasChange("generic_web_service") { - updateMask = append(updateMask, "genericWebService") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Fulfillment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Fulfillment %q: %#v", d.Id(), res) - } - - return resourceDialogflowFulfillmentRead(d, meta) -} - -func resourceDialogflowFulfillmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Fulfillment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/?updateMask=name,displayName,enabled,genericWebService,features") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Fulfillment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Fulfillment") - } - - log.Printf("[DEBUG] Finished deleting Fulfillment %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowFulfillmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, fmt.Errorf( - "Could not split project from name: %s", - d.Get("name"), - ) - } - - if err := d.Set("project", stringParts[1]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowFulfillmentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentFeatures(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "type": flattenDialogflowFulfillmentFeaturesType(original["type"], d, config), - }) - } - return transformed -} -func flattenDialogflowFulfillmentFeaturesType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentGenericWebService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["uri"] = - flattenDialogflowFulfillmentGenericWebServiceUri(original["uri"], d, config) - transformed["username"] = - flattenDialogflowFulfillmentGenericWebServiceUsername(original["username"], d, config) - transformed["password"] = - flattenDialogflowFulfillmentGenericWebServicePassword(original["password"], d, config) - transformed["request_headers"] = - flattenDialogflowFulfillmentGenericWebServiceRequestHeaders(original["requestHeaders"], d, config) - return []interface{}{transformed} -} -func flattenDialogflowFulfillmentGenericWebServiceUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentGenericWebServiceUsername(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentGenericWebServicePassword(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentGenericWebServiceRequestHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowFulfillmentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentFeatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandDialogflowFulfillmentFeaturesType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowFulfillmentFeaturesType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentGenericWebService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUri, err := expandDialogflowFulfillmentGenericWebServiceUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedUsername, err := expandDialogflowFulfillmentGenericWebServiceUsername(original["username"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { - transformed["username"] = transformedUsername - } - - transformedPassword, err := expandDialogflowFulfillmentGenericWebServicePassword(original["password"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { - transformed["password"] = transformedPassword - } - - transformedRequestHeaders, err := expandDialogflowFulfillmentGenericWebServiceRequestHeaders(original["request_headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequestHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeaders"] = transformedRequestHeaders - } - - return transformed, nil -} - -func expandDialogflowFulfillmentGenericWebServiceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentGenericWebServiceUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentGenericWebServicePassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentGenericWebServiceRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_intent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_intent.go deleted file mode 100644 index 888682555e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_intent.go +++ /dev/null @@ -1,685 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDialogflowIntent() *schema.Resource { - return &schema.Resource{ - Create: resourceDialogflowIntentCreate, - Read: resourceDialogflowIntentRead, - Update: resourceDialogflowIntentUpdate, - Delete: resourceDialogflowIntentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDialogflowIntentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The name of this intent to be displayed on the console.`, - }, - "action": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The name of the action associated with the intent. -Note: The action name must not contain whitespaces.`, - }, - "default_response_platforms": { - Type: schema.TypeList, - Optional: true, - Description: `The list of platforms for which the first responses will be copied from the messages in PLATFORM_UNSPECIFIED -(i.e. default platform). Possible values: ["FACEBOOK", "SLACK", "TELEGRAM", "KIK", "SKYPE", "LINE", "VIBER", "ACTIONS_ON_GOOGLE", "GOOGLE_HANGOUTS"]`, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"FACEBOOK", "SLACK", "TELEGRAM", "KIK", "SKYPE", "LINE", "VIBER", "ACTIONS_ON_GOOGLE", "GOOGLE_HANGOUTS"}), - }, - }, - "events": { - Type: schema.TypeList, - Optional: true, - Description: `The collection of event names that trigger the intent. If the collection of input contexts is not empty, all of -the contexts must be present in the active user session for an event to trigger this intent. See the -[events reference](https://cloud.google.com/dialogflow/docs/events-overview) for more details.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "input_context_names": { - Type: schema.TypeList, - Optional: true, - Description: `The list of context names required for this intent to be triggered. -Format: projects//agent/sessions/-/contexts/.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "is_fallback": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `Indicates whether this is a fallback intent.`, - }, - "ml_disabled": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `Indicates whether Machine Learning is disabled for the intent. -Note: If mlDisabled setting is set to true, then this intent is not taken into account during inference in ML -ONLY match mode. Also, auto-markup in the UI is turned off.`, - }, - "parent_followup_intent_name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The unique identifier of the parent intent in the chain of followup intents. -Format: projects//agent/intents/.`, - }, - "priority": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The priority of this intent. Higher numbers represent higher priorities. - - If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds - to the Normal priority in the console. - - If the supplied value is negative, the intent is ignored in runtime detect intent requests.`, - }, - "reset_contexts": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `Indicates whether to delete all contexts in the current session when this intent is matched.`, - }, - "webhook_state": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"WEBHOOK_STATE_ENABLED", "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING", ""}), - Description: `Indicates whether webhooks are enabled for the intent. -* WEBHOOK_STATE_ENABLED: Webhook is enabled in the agent and in the intent. -* WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING: Webhook is enabled in the agent and in the intent. Also, each slot -filling prompt is forwarded to the webhook. Possible values: ["WEBHOOK_STATE_ENABLED", "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING"]`, - }, - "followup_intent_info": { - Type: schema.TypeList, - Computed: true, - Description: `Information about all followup intents that have this intent as a direct or indirect parent. We populate this field -only in the output.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "followup_intent_name": { - Type: schema.TypeString, - Optional: true, - Description: `The unique identifier of the followup intent. -Format: projects//agent/intents/.`, - }, - "parent_followup_intent_name": { - Type: schema.TypeString, - Optional: true, - Description: `The unique identifier of the followup intent's parent. -Format: projects//agent/intents/.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of this intent. -Format: projects//agent/intents/.`, - }, - "root_followup_intent_name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the root intent in the chain of followup intents. It identifies the correct followup -intents chain for this intent. -Format: projects//agent/intents/.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowIntentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowIntentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - webhookStateProp, err := expandDialogflowIntentWebhookState(d.Get("webhook_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("webhook_state"); !isEmptyValue(reflect.ValueOf(webhookStateProp)) && (ok || !reflect.DeepEqual(v, webhookStateProp)) { - obj["webhookState"] = webhookStateProp - } - priorityProp, err := expandDialogflowIntentPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - isFallbackProp, err := expandDialogflowIntentIsFallback(d.Get("is_fallback"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_fallback"); !isEmptyValue(reflect.ValueOf(isFallbackProp)) && (ok || !reflect.DeepEqual(v, isFallbackProp)) { - obj["isFallback"] = isFallbackProp - } - mlDisabledProp, err := expandDialogflowIntentMlDisabled(d.Get("ml_disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ml_disabled"); !isEmptyValue(reflect.ValueOf(mlDisabledProp)) && (ok || !reflect.DeepEqual(v, mlDisabledProp)) { - obj["mlDisabled"] = mlDisabledProp - } - inputContextNamesProp, err := expandDialogflowIntentInputContextNames(d.Get("input_context_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("input_context_names"); !isEmptyValue(reflect.ValueOf(inputContextNamesProp)) && (ok || !reflect.DeepEqual(v, inputContextNamesProp)) { - obj["inputContextNames"] = inputContextNamesProp - } - eventsProp, err := expandDialogflowIntentEvents(d.Get("events"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("events"); !isEmptyValue(reflect.ValueOf(eventsProp)) && (ok || !reflect.DeepEqual(v, eventsProp)) { - obj["events"] = eventsProp - } - actionProp, err := expandDialogflowIntentAction(d.Get("action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("action"); !isEmptyValue(reflect.ValueOf(actionProp)) && (ok || !reflect.DeepEqual(v, actionProp)) { - obj["action"] = actionProp - } - resetContextsProp, err := expandDialogflowIntentResetContexts(d.Get("reset_contexts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reset_contexts"); !isEmptyValue(reflect.ValueOf(resetContextsProp)) && (ok || !reflect.DeepEqual(v, resetContextsProp)) { - obj["resetContexts"] = resetContextsProp - } - defaultResponsePlatformsProp, err := expandDialogflowIntentDefaultResponsePlatforms(d.Get("default_response_platforms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_response_platforms"); !isEmptyValue(reflect.ValueOf(defaultResponsePlatformsProp)) && (ok || !reflect.DeepEqual(v, defaultResponsePlatformsProp)) { - obj["defaultResponsePlatforms"] = defaultResponsePlatformsProp - } - parentFollowupIntentNameProp, err := expandDialogflowIntentParentFollowupIntentName(d.Get("parent_followup_intent_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent_followup_intent_name"); !isEmptyValue(reflect.ValueOf(parentFollowupIntentNameProp)) && (ok || !reflect.DeepEqual(v, parentFollowupIntentNameProp)) { - obj["parentFollowupIntentName"] = parentFollowupIntentNameProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/intents/") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Intent: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Intent: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Intent: %s", err) - } - if err := d.Set("name", flattenDialogflowIntentName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating Intent %q: %#v", d.Id(), res) - - return resourceDialogflowIntentRead(d, meta) -} - -func resourceDialogflowIntentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Intent: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DialogflowIntent %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - - if err := d.Set("name", flattenDialogflowIntentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("display_name", flattenDialogflowIntentDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("webhook_state", flattenDialogflowIntentWebhookState(res["webhookState"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("priority", flattenDialogflowIntentPriority(res["priority"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("is_fallback", flattenDialogflowIntentIsFallback(res["isFallback"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("ml_disabled", flattenDialogflowIntentMlDisabled(res["mlDisabled"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("input_context_names", flattenDialogflowIntentInputContextNames(res["inputContextNames"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("events", flattenDialogflowIntentEvents(res["events"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("action", flattenDialogflowIntentAction(res["action"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("reset_contexts", flattenDialogflowIntentResetContexts(res["resetContexts"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("default_response_platforms", flattenDialogflowIntentDefaultResponsePlatforms(res["defaultResponsePlatforms"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("root_followup_intent_name", flattenDialogflowIntentRootFollowupIntentName(res["rootFollowupIntentName"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("parent_followup_intent_name", flattenDialogflowIntentParentFollowupIntentName(res["parentFollowupIntentName"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("followup_intent_info", flattenDialogflowIntentFollowupIntentInfo(res["followupIntentInfo"], d, config)); err != nil { - return fmt.Errorf("Error reading Intent: %s", err) - } - - return nil -} - -func resourceDialogflowIntentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Intent: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowIntentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - webhookStateProp, err := expandDialogflowIntentWebhookState(d.Get("webhook_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("webhook_state"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, webhookStateProp)) { - obj["webhookState"] = webhookStateProp - } - priorityProp, err := expandDialogflowIntentPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - isFallbackProp, err := expandDialogflowIntentIsFallback(d.Get("is_fallback"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_fallback"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isFallbackProp)) { - obj["isFallback"] = isFallbackProp - } - mlDisabledProp, err := expandDialogflowIntentMlDisabled(d.Get("ml_disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ml_disabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mlDisabledProp)) { - obj["mlDisabled"] = mlDisabledProp - } - inputContextNamesProp, err := expandDialogflowIntentInputContextNames(d.Get("input_context_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("input_context_names"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inputContextNamesProp)) { - obj["inputContextNames"] = inputContextNamesProp - } - eventsProp, err := expandDialogflowIntentEvents(d.Get("events"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("events"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eventsProp)) { - obj["events"] = eventsProp - } - actionProp, err := expandDialogflowIntentAction(d.Get("action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("action"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, actionProp)) { - obj["action"] = actionProp - } - resetContextsProp, err := expandDialogflowIntentResetContexts(d.Get("reset_contexts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reset_contexts"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, resetContextsProp)) { - obj["resetContexts"] = resetContextsProp - } - defaultResponsePlatformsProp, err := expandDialogflowIntentDefaultResponsePlatforms(d.Get("default_response_platforms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_response_platforms"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultResponsePlatformsProp)) { - obj["defaultResponsePlatforms"] = defaultResponsePlatformsProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Intent %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Intent %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Intent %q: %#v", d.Id(), res) - } - - return resourceDialogflowIntentRead(d, meta) -} - -func resourceDialogflowIntentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Intent: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Intent %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Intent") - } - - log.Printf("[DEBUG] Finished deleting Intent %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowIntentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, fmt.Errorf( - "Could not split project from name: %s", - d.Get("name"), - ) - } - - if err := d.Set("project", stringParts[1]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenDialogflowIntentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentWebhookState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentPriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenDialogflowIntentIsFallback(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentMlDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentInputContextNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentEvents(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentResetContexts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentDefaultResponsePlatforms(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentRootFollowupIntentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentParentFollowupIntentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentFollowupIntentInfo(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "followup_intent_name": flattenDialogflowIntentFollowupIntentInfoFollowupIntentName(original["followupIntentName"], d, config), - "parent_followup_intent_name": flattenDialogflowIntentFollowupIntentInfoParentFollowupIntentName(original["parentFollowupIntentName"], d, config), - }) - } - return transformed -} -func flattenDialogflowIntentFollowupIntentInfoFollowupIntentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentFollowupIntentInfoParentFollowupIntentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowIntentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentWebhookState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentIsFallback(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentMlDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentInputContextNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentEvents(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentResetContexts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentDefaultResponsePlatforms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentParentFollowupIntentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_policy.go deleted file mode 100644 index ad486b00d5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_policy.go +++ /dev/null @@ -1,647 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "bytes" - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDNSPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceDNSPolicyCreate, - Read: resourceDNSPolicyRead, - Update: resourceDNSPolicyUpdate, - Delete: resourceDNSPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDNSPolicyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `User assigned name for this policy.`, - }, - "alternative_name_server_config": { - Type: schema.TypeList, - Optional: true, - Description: `Sets an alternative name server for the associated networks. -When specified, all DNS queries are forwarded to a name server that you choose. -Names such as .internal are not available when an alternative name server is specified.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "target_name_servers": { - Type: schema.TypeSet, - Required: true, - Description: `Sets an alternative name server for the associated networks. When specified, -all DNS queries are forwarded to a name server that you choose. Names such as .internal -are not available when an alternative name server is specified.`, - Elem: dnsPolicyAlternativeNameServerConfigTargetNameServersSchema(), - Set: func(v interface{}) int { - raw := v.(map[string]interface{}) - if address, ok := raw["ipv4_address"]; ok { - hashcode(address.(string)) - } - var buf bytes.Buffer - schema.SerializeResourceForHash(&buf, raw, dnsPolicyAlternativeNameServerConfigTargetNameServersSchema()) - return hashcode(buf.String()) - }, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A textual description field. Defaults to 'Managed by Terraform'.`, - Default: "Managed by Terraform", - }, - "enable_inbound_forwarding": { - Type: schema.TypeBool, - Optional: true, - Description: `Allows networks bound to this policy to receive DNS queries sent -by VMs or applications over VPN connections. When enabled, a -virtual IP address will be allocated from each of the sub-networks -that are bound to this policy.`, - }, - "enable_logging": { - Type: schema.TypeBool, - Optional: true, - Description: `Controls whether logging is enabled for the networks bound to this policy. -Defaults to no logging if not set.`, - }, - "networks": { - Type: schema.TypeSet, - Optional: true, - Description: `List of network names specifying networks to which this policy is applied.`, - Elem: dnsPolicyNetworksSchema(), - Set: func(v interface{}) int { - raw := v.(map[string]interface{}) - if url, ok := raw["network_url"]; ok { - return selfLinkNameHash(url) - } - var buf bytes.Buffer - schema.SerializeResourceForHash(&buf, raw, dnsPolicyNetworksSchema()) - return hashcode(buf.String()) - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func dnsPolicyAlternativeNameServerConfigTargetNameServersSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ipv4_address": { - Type: schema.TypeString, - Required: true, - Description: `IPv4 address to forward to.`, - }, - "forwarding_path": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"default", "private", ""}), - Description: `Forwarding path for this TargetNameServer. If unset or 'default' Cloud DNS will make forwarding -decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go -to the Internet. When set to 'private', Cloud DNS will always send queries through VPC for this target Possible values: ["default", "private"]`, - }, - }, - } -} - -func dnsPolicyNetworksSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "network_url": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The id or fully qualified URL of the VPC network to forward queries to. -This should be formatted like 'projects/{project}/global/networks/{network}' or -'https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}'`, - }, - }, - } -} - -func resourceDNSPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - alternativeNameServerConfigProp, err := expandDNSPolicyAlternativeNameServerConfig(d.Get("alternative_name_server_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("alternative_name_server_config"); !isEmptyValue(reflect.ValueOf(alternativeNameServerConfigProp)) && (ok || !reflect.DeepEqual(v, alternativeNameServerConfigProp)) { - obj["alternativeNameServerConfig"] = alternativeNameServerConfigProp - } - descriptionProp, err := expandDNSPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableInboundForwardingProp, err := expandDNSPolicyEnableInboundForwarding(d.Get("enable_inbound_forwarding"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_inbound_forwarding"); ok || !reflect.DeepEqual(v, enableInboundForwardingProp) { - obj["enableInboundForwarding"] = enableInboundForwardingProp - } - enableLoggingProp, err := expandDNSPolicyEnableLogging(d.Get("enable_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_logging"); ok || !reflect.DeepEqual(v, enableLoggingProp) { - obj["enableLogging"] = enableLoggingProp - } - nameProp, err := expandDNSPolicyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networksProp, err := expandDNSPolicyNetworks(d.Get("networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("networks"); !isEmptyValue(reflect.ValueOf(networksProp)) && (ok || !reflect.DeepEqual(v, networksProp)) { - obj["networks"] = networksProp - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Policy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Policy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/policies/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) - - return resourceDNSPolicyRead(d, meta) -} - -func resourceDNSPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DNSPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - - if err := d.Set("alternative_name_server_config", flattenDNSPolicyAlternativeNameServerConfig(res["alternativeNameServerConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("description", flattenDNSPolicyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("enable_inbound_forwarding", flattenDNSPolicyEnableInboundForwarding(res["enableInboundForwarding"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("enable_logging", flattenDNSPolicyEnableLogging(res["enableLogging"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("name", flattenDNSPolicyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("networks", flattenDNSPolicyNetworks(res["networks"], d, config)); err != nil { - return fmt.Errorf("Error reading Policy: %s", err) - } - - return nil -} - -func resourceDNSPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("alternative_name_server_config") || d.HasChange("description") || d.HasChange("enable_inbound_forwarding") || d.HasChange("enable_logging") || d.HasChange("networks") { - obj := make(map[string]interface{}) - - alternativeNameServerConfigProp, err := expandDNSPolicyAlternativeNameServerConfig(d.Get("alternative_name_server_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("alternative_name_server_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, alternativeNameServerConfigProp)) { - obj["alternativeNameServerConfig"] = alternativeNameServerConfigProp - } - descriptionProp, err := expandDNSPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableInboundForwardingProp, err := expandDNSPolicyEnableInboundForwarding(d.Get("enable_inbound_forwarding"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_inbound_forwarding"); ok || !reflect.DeepEqual(v, enableInboundForwardingProp) { - obj["enableInboundForwarding"] = enableInboundForwardingProp - } - enableLoggingProp, err := expandDNSPolicyEnableLogging(d.Get("enable_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_logging"); ok || !reflect.DeepEqual(v, enableLoggingProp) { - obj["enableLogging"] = enableLoggingProp - } - networksProp, err := expandDNSPolicyNetworks(d.Get("networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("networks"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networksProp)) { - obj["networks"] = networksProp - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Policy %q: %#v", d.Id(), res) - } - - } - - d.Partial(false) - - return resourceDNSPolicyRead(d, meta) -} - -func resourceDNSPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - // if networks are attached, they need to be detached before the policy can be deleted - if d.Get("networks.#").(int) > 0 { - patched := make(map[string]interface{}) - patched["networks"] = nil - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(config, "PATCH", project, url, userAgent, patched, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) - } - } - log.Printf("[DEBUG] Deleting Policy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Policy") - } - - log.Printf("[DEBUG] Finished deleting Policy %q: %#v", d.Id(), res) - return nil -} - -func resourceDNSPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/policies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/policies/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDNSPolicyAlternativeNameServerConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_name_servers"] = - flattenDNSPolicyAlternativeNameServerConfigTargetNameServers(original["targetNameServers"], d, config) - return []interface{}{transformed} -} -func flattenDNSPolicyAlternativeNameServerConfigTargetNameServers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(func(v interface{}) int { - raw := v.(map[string]interface{}) - if address, ok := raw["ipv4_address"]; ok { - hashcode(address.(string)) - } - var buf bytes.Buffer - schema.SerializeResourceForHash(&buf, raw, dnsPolicyAlternativeNameServerConfigTargetNameServersSchema()) - return hashcode(buf.String()) - }, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "ipv4_address": flattenDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(original["ipv4Address"], d, config), - "forwarding_path": flattenDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(original["forwardingPath"], d, config), - }) - } - return transformed -} -func flattenDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyEnableInboundForwarding(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyEnableLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyNetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(func(v interface{}) int { - raw := v.(map[string]interface{}) - if url, ok := raw["network_url"]; ok { - return selfLinkNameHash(url) - } - var buf bytes.Buffer - schema.SerializeResourceForHash(&buf, raw, dnsPolicyNetworksSchema()) - return hashcode(buf.String()) - }, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "network_url": flattenDNSPolicyNetworksNetworkUrl(original["networkUrl"], d, config), - }) - } - return transformed -} -func flattenDNSPolicyNetworksNetworkUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDNSPolicyAlternativeNameServerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetNameServers, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServers(original["target_name_servers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTargetNameServers); val.IsValid() && !isEmptyValue(val) { - transformed["targetNameServers"] = transformedTargetNameServers - } - - return transformed, nil -} - -func expandDNSPolicyAlternativeNameServerConfigTargetNameServers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpv4Address, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(original["ipv4_address"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIpv4Address); val.IsValid() && !isEmptyValue(val) { - transformed["ipv4Address"] = transformedIpv4Address - } - - transformedForwardingPath, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(original["forwarding_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedForwardingPath); val.IsValid() && !isEmptyValue(val) { - transformed["forwardingPath"] = transformedForwardingPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyEnableInboundForwarding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyEnableLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyNetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNetworkUrl, err := expandDNSPolicyNetworksNetworkUrl(original["network_url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !isEmptyValue(val) { - transformed["networkUrl"] = transformedNetworkUrl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDNSPolicyNetworksNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || v.(string) == "" { - return "", nil - } else if strings.HasPrefix(v.(string), "https://") { - return v, nil - } - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return ConvertSelfLinkToV1(url), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_document_ai_processor.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_document_ai_processor.go deleted file mode 100644 index 9986f457c3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_document_ai_processor.go +++ /dev/null @@ -1,284 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDocumentAIProcessor() *schema.Resource { - return &schema.Resource{ - Create: resourceDocumentAIProcessorCreate, - Read: resourceDocumentAIProcessorRead, - Delete: resourceDocumentAIProcessorDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDocumentAIProcessorImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The display name. Must be unique.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the resource.`, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The type of processor. For possible types see the [official list](https://cloud.google.com/document-ai/docs/reference/rest/v1/projects.locations/fetchProcessorTypes#google.cloud.documentai.v1.DocumentProcessorService.FetchProcessorTypes)`, - }, - "kms_key_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The KMS key used for encryption/decryption in CMEK scenarios. See https://cloud.google.com/security-key-management.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the processor.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDocumentAIProcessorCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - typeProp, err := expandDocumentAIProcessorType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - displayNameProp, err := expandDocumentAIProcessorDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kmsKeyNameProp, err := expandDocumentAIProcessorKmsKeyName(d.Get("kms_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kms_key_name"); !isEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { - obj["kmsKeyName"] = kmsKeyNameProp - } - - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Processor: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Processor: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Processor: %s", err) - } - if err := d.Set("name", flattenDocumentAIProcessorName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/processors/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Processor %q: %#v", d.Id(), res) - - return resourceDocumentAIProcessorRead(d, meta) -} - -func resourceDocumentAIProcessorRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Processor: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DocumentAIProcessor %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Processor: %s", err) - } - - if err := d.Set("name", flattenDocumentAIProcessorName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Processor: %s", err) - } - if err := d.Set("type", flattenDocumentAIProcessorType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading Processor: %s", err) - } - if err := d.Set("display_name", flattenDocumentAIProcessorDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Processor: %s", err) - } - if err := d.Set("kms_key_name", flattenDocumentAIProcessorKmsKeyName(res["kmsKeyName"], d, config)); err != nil { - return fmt.Errorf("Error reading Processor: %s", err) - } - - return nil -} - -func resourceDocumentAIProcessorDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Processor: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Processor %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Processor") - } - - log.Printf("[DEBUG] Finished deleting Processor %q: %#v", d.Id(), res) - return nil -} - -func resourceDocumentAIProcessorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/processors/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/processors/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDocumentAIProcessorName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDocumentAIProcessorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDocumentAIProcessorDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDocumentAIProcessorKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDocumentAIProcessorType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDocumentAIProcessorDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDocumentAIProcessorKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_document_ai_processor_default_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_document_ai_processor_default_version.go deleted file mode 100644 index 06f6fa6ba8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_document_ai_processor_default_version.go +++ /dev/null @@ -1,191 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceDocumentAIProcessorDefaultVersion() *schema.Resource { - return &schema.Resource{ - Create: resourceDocumentAIProcessorDefaultVersionCreate, - Read: resourceDocumentAIProcessorDefaultVersionRead, - Delete: resourceDocumentAIProcessorDefaultVersionDelete, - - Importer: &schema.ResourceImporter{ - State: resourceDocumentAIProcessorDefaultVersionImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "processor": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The processor to set the version on.`, - }, - "version": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: projectNumberDiffSuppress, - Description: `The version to set. Using 'stable' or 'rc' will cause the API to return the latest version in that release channel. -Apply 'lifecycle.ignore_changes' to the 'version' field to suppress this diff.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDocumentAIProcessorDefaultVersionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - defaultProcessorVersionProp, err := expandDocumentAIProcessorDefaultVersionVersion(d.Get("version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version"); !isEmptyValue(reflect.ValueOf(defaultProcessorVersionProp)) && (ok || !reflect.DeepEqual(v, defaultProcessorVersionProp)) { - obj["defaultProcessorVersion"] = defaultProcessorVersionProp - } - processorProp, err := expandDocumentAIProcessorDefaultVersionProcessor(d.Get("processor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("processor"); !isEmptyValue(reflect.ValueOf(processorProp)) && (ok || !reflect.DeepEqual(v, processorProp)) { - obj["processor"] = processorProp - } - - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}{{processor}}:setDefaultProcessorVersion") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ProcessorDefaultVersion: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if strings.Contains(url, "https://-") { - location := GetRegionFromRegionalSelfLink(url) - url = strings.TrimPrefix(url, "https://") - url = "https://" + location + url - } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ProcessorDefaultVersion: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{processor}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating ProcessorDefaultVersion %q: %#v", d.Id(), res) - - return resourceDocumentAIProcessorDefaultVersionRead(d, meta) -} - -func resourceDocumentAIProcessorDefaultVersionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DocumentAIBasePath}}{{processor}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if strings.Contains(url, "https://-") { - location := GetRegionFromRegionalSelfLink(url) - url = strings.TrimPrefix(url, "https://") - url = "https://" + location + url - } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DocumentAIProcessorDefaultVersion %q", d.Id())) - } - - if err := d.Set("version", flattenDocumentAIProcessorDefaultVersionVersion(res["defaultProcessorVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading ProcessorDefaultVersion: %s", err) - } - - return nil -} - -func resourceDocumentAIProcessorDefaultVersionDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] DocumentAI ProcessorDefaultVersion resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceDocumentAIProcessorDefaultVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{processor}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenDocumentAIProcessorDefaultVersionVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDocumentAIProcessorDefaultVersionVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDocumentAIProcessorDefaultVersionProcessor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_essential_contacts_contact.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_essential_contacts_contact.go deleted file mode 100644 index 2d4478c577..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_essential_contacts_contact.go +++ /dev/null @@ -1,316 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceEssentialContactsContact() *schema.Resource { - return &schema.Resource{ - Create: resourceEssentialContactsContactCreate, - Read: resourceEssentialContactsContactRead, - Update: resourceEssentialContactsContactUpdate, - Delete: resourceEssentialContactsContactDelete, - - Importer: &schema.ResourceImporter{ - State: resourceEssentialContactsContactImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "email": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The email address to send notifications to. This does not need to be a Google account.`, - }, - "language_tag": { - Type: schema.TypeString, - Required: true, - Description: `The preferred language for notifications, as a ISO 639-1 language code. See Supported languages for a list of supported languages.`, - }, - "notification_category_subscriptions": { - Type: schema.TypeList, - Required: true, - Description: `The categories of notifications that the contact will receive communications for.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource to save this contact for. Format: organizations/{organization_id}, folders/{folder_id} or projects/{project_id}`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The identifier for the contact. Format: {resourceType}/{resource_id}/contacts/{contact_id}`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceEssentialContactsContactCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - emailProp, err := expandEssentialContactsContactEmail(d.Get("email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("email"); !isEmptyValue(reflect.ValueOf(emailProp)) && (ok || !reflect.DeepEqual(v, emailProp)) { - obj["email"] = emailProp - } - notificationCategorySubscriptionsProp, err := expandEssentialContactsContactNotificationCategorySubscriptions(d.Get("notification_category_subscriptions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_category_subscriptions"); !isEmptyValue(reflect.ValueOf(notificationCategorySubscriptionsProp)) && (ok || !reflect.DeepEqual(v, notificationCategorySubscriptionsProp)) { - obj["notificationCategorySubscriptions"] = notificationCategorySubscriptionsProp - } - languageTagProp, err := expandEssentialContactsContactLanguageTag(d.Get("language_tag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language_tag"); !isEmptyValue(reflect.ValueOf(languageTagProp)) && (ok || !reflect.DeepEqual(v, languageTagProp)) { - obj["languageTag"] = languageTagProp - } - - url, err := replaceVars(d, config, "{{EssentialContactsBasePath}}{{parent}}/contacts") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Contact: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Contact: %s", err) - } - if err := d.Set("name", flattenEssentialContactsContactName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Contact %q: %#v", d.Id(), res) - - return resourceEssentialContactsContactRead(d, meta) -} - -func resourceEssentialContactsContactRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("EssentialContactsContact %q", d.Id())) - } - - if err := d.Set("name", flattenEssentialContactsContactName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Contact: %s", err) - } - if err := d.Set("email", flattenEssentialContactsContactEmail(res["email"], d, config)); err != nil { - return fmt.Errorf("Error reading Contact: %s", err) - } - if err := d.Set("notification_category_subscriptions", flattenEssentialContactsContactNotificationCategorySubscriptions(res["notificationCategorySubscriptions"], d, config)); err != nil { - return fmt.Errorf("Error reading Contact: %s", err) - } - if err := d.Set("language_tag", flattenEssentialContactsContactLanguageTag(res["languageTag"], d, config)); err != nil { - return fmt.Errorf("Error reading Contact: %s", err) - } - - return nil -} - -func resourceEssentialContactsContactUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - notificationCategorySubscriptionsProp, err := expandEssentialContactsContactNotificationCategorySubscriptions(d.Get("notification_category_subscriptions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_category_subscriptions"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationCategorySubscriptionsProp)) { - obj["notificationCategorySubscriptions"] = notificationCategorySubscriptionsProp - } - languageTagProp, err := expandEssentialContactsContactLanguageTag(d.Get("language_tag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language_tag"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, languageTagProp)) { - obj["languageTag"] = languageTagProp - } - - url, err := replaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Contact %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("notification_category_subscriptions") { - updateMask = append(updateMask, "notificationCategorySubscriptions") - } - - if d.HasChange("language_tag") { - updateMask = append(updateMask, "languageTag") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Contact %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Contact %q: %#v", d.Id(), res) - } - - return resourceEssentialContactsContactRead(d, meta) -} - -func resourceEssentialContactsContactDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Contact %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Contact") - } - - log.Printf("[DEBUG] Finished deleting Contact %q: %#v", d.Id(), res) - return nil -} - -func resourceEssentialContactsContactImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenEssentialContactsContactName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenEssentialContactsContactEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenEssentialContactsContactNotificationCategorySubscriptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenEssentialContactsContactLanguageTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandEssentialContactsContactEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandEssentialContactsContactNotificationCategorySubscriptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandEssentialContactsContactLanguageTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_backup.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_backup.go deleted file mode 100644 index 67013d2db0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_backup.go +++ /dev/null @@ -1,528 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceFilestoreBackup() *schema.Resource { - return &schema.Resource{ - Create: resourceFilestoreBackupCreate, - Read: resourceFilestoreBackupRead, - Update: resourceFilestoreBackupUpdate, - Delete: resourceFilestoreBackupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceFilestoreBackupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the location of the instance. This can be a region for ENTERPRISE tier instances.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the backup. The name must be unique within the specified instance. - -The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "source_file_share": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the file share in the source Cloud Filestore instance that the backup is created from.`, - }, - "source_instance": { - Type: schema.TypeString, - Required: true, - Description: `The resource name of the source Cloud Filestore instance, in the format projects/{projectId}/locations/{locationId}/instances/{instanceId}, used to create this backup.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of the backup with 2048 characters or less. Requests with longer descriptions will be rejected.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user-provided metadata.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "capacity_gb": { - Type: schema.TypeString, - Computed: true, - Description: `The amount of bytes needed to allocate a full copy of the snapshot content.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the snapshot was created in RFC3339 text format.`, - }, - "download_bytes": { - Type: schema.TypeString, - Computed: true, - Description: `Amount of bytes that will be downloaded if the backup is restored.`, - }, - "kms_key_name": { - Type: schema.TypeString, - Computed: true, - Description: `KMS key name used for data encryption.`, - }, - "source_instance_tier": { - Type: schema.TypeString, - Computed: true, - Description: `The service tier of the source Cloud Filestore instance that this backup is created from.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The backup state.`, - }, - "storage_bytes": { - Type: schema.TypeString, - Computed: true, - Description: `The size of the storage used by the backup. As backups share storage, this number is expected to change with backup creation/deletion.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceFilestoreBackupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandFilestoreBackupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandFilestoreBackupLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - sourceInstanceProp, err := expandFilestoreBackupSourceInstance(d.Get("source_instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_instance"); !isEmptyValue(reflect.ValueOf(sourceInstanceProp)) && (ok || !reflect.DeepEqual(v, sourceInstanceProp)) { - obj["sourceInstance"] = sourceInstanceProp - } - sourceFileShareProp, err := expandFilestoreBackupSourceFileShare(d.Get("source_file_share"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_file_share"); !isEmptyValue(reflect.ValueOf(sourceFileShareProp)) && (ok || !reflect.DeepEqual(v, sourceFileShareProp)) { - obj["sourceFileShare"] = sourceFileShareProp - } - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups?backupId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Backup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Backup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isNotFilestoreQuotaError) - if err != nil { - return fmt.Errorf("Error creating Backup: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = FilestoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Backup", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Backup: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Backup %q: %#v", d.Id(), res) - - return resourceFilestoreBackupRead(d, meta) -} - -func resourceFilestoreBackupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Backup: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isNotFilestoreQuotaError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("FilestoreBackup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - - if err := d.Set("description", flattenFilestoreBackupDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("state", flattenFilestoreBackupState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("create_time", flattenFilestoreBackupCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("labels", flattenFilestoreBackupLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("capacity_gb", flattenFilestoreBackupCapacityGb(res["capacityGb"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("storage_bytes", flattenFilestoreBackupStorageBytes(res["storageBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("source_instance", flattenFilestoreBackupSourceInstance(res["sourceInstance"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("source_file_share", flattenFilestoreBackupSourceFileShare(res["sourceFileShare"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("source_instance_tier", flattenFilestoreBackupSourceInstanceTier(res["sourceInstanceTier"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("download_bytes", flattenFilestoreBackupDownloadBytes(res["downloadBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - if err := d.Set("kms_key_name", flattenFilestoreBackupKmsKeyName(res["kmsKeyName"], d, config)); err != nil { - return fmt.Errorf("Error reading Backup: %s", err) - } - - return nil -} - -func resourceFilestoreBackupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Backup: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandFilestoreBackupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandFilestoreBackupLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - sourceInstanceProp, err := expandFilestoreBackupSourceInstance(d.Get("source_instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_instance"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceInstanceProp)) { - obj["sourceInstance"] = sourceInstanceProp - } - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Backup %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("source_instance") { - updateMask = append(updateMask, "sourceInstance") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isNotFilestoreQuotaError) - - if err != nil { - return fmt.Errorf("Error updating Backup %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Backup %q: %#v", d.Id(), res) - } - - err = FilestoreOperationWaitTime( - config, res, project, "Updating Backup", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceFilestoreBackupRead(d, meta) -} - -func resourceFilestoreBackupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Backup: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Backup %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isNotFilestoreQuotaError) - if err != nil { - return handleNotFoundError(err, d, "Backup") - } - - err = FilestoreOperationWaitTime( - config, res, project, "Deleting Backup", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Backup %q: %#v", d.Id(), res) - return nil -} - -func resourceFilestoreBackupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/backups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenFilestoreBackupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupCapacityGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupStorageBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupSourceInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupSourceFileShare(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupSourceInstanceTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupDownloadBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreBackupKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandFilestoreBackupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreBackupLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandFilestoreBackupSourceInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreBackupSourceFileShare(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_snapshot.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_snapshot.go deleted file mode 100644 index 444353ec26..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_snapshot.go +++ /dev/null @@ -1,431 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceFilestoreSnapshot() *schema.Resource { - return &schema.Resource{ - Create: resourceFilestoreSnapshotCreate, - Read: resourceFilestoreSnapshotRead, - Update: resourceFilestoreSnapshotUpdate, - Delete: resourceFilestoreSnapshotDelete, - - Importer: &schema.ResourceImporter{ - State: resourceFilestoreSnapshotImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the filestore instance.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the location of the instance. This can be a region for ENTERPRISE tier instances.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the snapshot. The name must be unique within the specified instance. - -The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of the snapshot with 2048 characters or less. Requests with longer descriptions will be rejected.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user-provided metadata.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the snapshot was created in RFC3339 text format.`, - }, - "filesystem_used_bytes": { - Type: schema.TypeString, - Computed: true, - Description: `The amount of bytes needed to allocate a full copy of the snapshot content.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The snapshot state.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceFilestoreSnapshotCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandFilestoreSnapshotDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandFilestoreSnapshotLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots?snapshotId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Snapshot: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isNotFilestoreQuotaError) - if err != nil { - return fmt.Errorf("Error creating Snapshot: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = FilestoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Snapshot", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Snapshot: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Snapshot %q: %#v", d.Id(), res) - - return resourceFilestoreSnapshotRead(d, meta) -} - -func resourceFilestoreSnapshotRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isNotFilestoreQuotaError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("FilestoreSnapshot %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - - if err := d.Set("description", flattenFilestoreSnapshotDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("state", flattenFilestoreSnapshotState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("create_time", flattenFilestoreSnapshotCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("labels", flattenFilestoreSnapshotLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("filesystem_used_bytes", flattenFilestoreSnapshotFilesystemUsedBytes(res["filesystemUsedBytes"], d, config)); err != nil { - return fmt.Errorf("Error reading Snapshot: %s", err) - } - - return nil -} - -func resourceFilestoreSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandFilestoreSnapshotDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandFilestoreSnapshotLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Snapshot %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isNotFilestoreQuotaError) - - if err != nil { - return fmt.Errorf("Error updating Snapshot %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Snapshot %q: %#v", d.Id(), res) - } - - err = FilestoreOperationWaitTime( - config, res, project, "Updating Snapshot", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceFilestoreSnapshotRead(d, meta) -} - -func resourceFilestoreSnapshotDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Snapshot %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isNotFilestoreQuotaError) - if err != nil { - return handleNotFoundError(err, d, "Snapshot") - } - - err = FilestoreOperationWaitTime( - config, res, project, "Deleting Snapshot", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Snapshot %q: %#v", d.Id(), res) - return nil -} - -func resourceFilestoreSnapshotImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)/snapshots/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenFilestoreSnapshotDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreSnapshotState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreSnapshotCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreSnapshotLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreSnapshotFilesystemUsedBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandFilestoreSnapshotDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreSnapshotLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_database.go deleted file mode 100644 index e1ee099e33..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_database.go +++ /dev/null @@ -1,464 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceFirestoreDatabase() *schema.Resource { - return &schema.Resource{ - Create: resourceFirestoreDatabaseCreate, - Read: resourceFirestoreDatabaseRead, - Update: resourceFirestoreDatabaseUpdate, - Delete: resourceFirestoreDatabaseDelete, - - Importer: &schema.ResourceImporter{ - State: resourceFirestoreDatabaseImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the database. Available databases are listed at -https://cloud.google.com/firestore/docs/locations.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. The ID to use for the database, which will become the final -component of the database's resource name. This value should be 4-63 -characters. Valid characters are /[a-z][0-9]-/ with first character -a letter and the last a letter or a number. Must not be -UUID-like /[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}/. -"(default)" database id is also valid.`, - }, - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"FIRESTORE_NATIVE", "DATASTORE_MODE"}), - Description: `The type of the database. -See https://cloud.google.com/datastore/docs/firestore-or-datastore -for information about how to choose. Possible values: ["FIRESTORE_NATIVE", "DATASTORE_MODE"]`, - }, - "app_engine_integration_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"ENABLED", "DISABLED", ""}), - Description: `The App Engine integration mode to use for this database. Possible values: ["ENABLED", "DISABLED"]`, - }, - "concurrency_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"OPTIMISTIC", "PESSIMISTIC", "OPTIMISTIC_WITH_ENTITY_GROUPS", ""}), - Description: `The concurrency control mode to use for this database. Possible values: ["OPTIMISTIC", "PESSIMISTIC", "OPTIMISTIC_WITH_ENTITY_GROUPS"]`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp at which this database was created.`, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `This checksum is computed by the server based on the value of other fields, -and may be sent on update and delete requests to ensure the client has an -up-to-date value before proceeding.`, - }, - "key_prefix": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The keyPrefix for this database. -This keyPrefix is used, in combination with the project id ("~") to construct the application id -that is returned from the Cloud Datastore APIs in Google App Engine first generation runtimes. -This value may be empty in which case the appid to use for URL-encoded keys is the project_id (eg: foo instead of v~foo).`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceFirestoreDatabaseCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandFirestoreDatabaseName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - locationIdProp, err := expandFirestoreDatabaseLocationId(d.Get("location_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location_id"); !isEmptyValue(reflect.ValueOf(locationIdProp)) && (ok || !reflect.DeepEqual(v, locationIdProp)) { - obj["locationId"] = locationIdProp - } - typeProp, err := expandFirestoreDatabaseType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - concurrencyModeProp, err := expandFirestoreDatabaseConcurrencyMode(d.Get("concurrency_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("concurrency_mode"); !isEmptyValue(reflect.ValueOf(concurrencyModeProp)) && (ok || !reflect.DeepEqual(v, concurrencyModeProp)) { - obj["concurrencyMode"] = concurrencyModeProp - } - appEngineIntegrationModeProp, err := expandFirestoreDatabaseAppEngineIntegrationMode(d.Get("app_engine_integration_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_integration_mode"); !isEmptyValue(reflect.ValueOf(appEngineIntegrationModeProp)) && (ok || !reflect.DeepEqual(v, appEngineIntegrationModeProp)) { - obj["appEngineIntegrationMode"] = appEngineIntegrationModeProp - } - etagProp, err := expandFirestoreDatabaseEtag(d.Get("etag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("etag"); !isEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { - obj["etag"] = etagProp - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases?databaseId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Database: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Database: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/databases/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = FirestoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Database", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Database: %s", err) - } - - if err := d.Set("name", flattenFirestoreDatabaseName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/databases/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) - - return resourceFirestoreDatabaseRead(d, meta) -} - -func resourceFirestoreDatabaseRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("FirestoreDatabase %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - - if err := d.Set("name", flattenFirestoreDatabaseName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("location_id", flattenFirestoreDatabaseLocationId(res["locationId"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("type", flattenFirestoreDatabaseType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("concurrency_mode", flattenFirestoreDatabaseConcurrencyMode(res["concurrencyMode"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("app_engine_integration_mode", flattenFirestoreDatabaseAppEngineIntegrationMode(res["appEngineIntegrationMode"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("key_prefix", flattenFirestoreDatabaseKeyPrefix(res["key_prefix"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("etag", flattenFirestoreDatabaseEtag(res["etag"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("create_time", flattenFirestoreDatabaseCreateTime(res["create_time"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - - return nil -} - -func resourceFirestoreDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - typeProp, err := expandFirestoreDatabaseType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - concurrencyModeProp, err := expandFirestoreDatabaseConcurrencyMode(d.Get("concurrency_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("concurrency_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, concurrencyModeProp)) { - obj["concurrencyMode"] = concurrencyModeProp - } - appEngineIntegrationModeProp, err := expandFirestoreDatabaseAppEngineIntegrationMode(d.Get("app_engine_integration_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_integration_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, appEngineIntegrationModeProp)) { - obj["appEngineIntegrationMode"] = appEngineIntegrationModeProp - } - etagProp, err := expandFirestoreDatabaseEtag(d.Get("etag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("etag"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { - obj["etag"] = etagProp - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Database %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("type") { - updateMask = append(updateMask, "type") - } - - if d.HasChange("concurrency_mode") { - updateMask = append(updateMask, "concurrencyMode") - } - - if d.HasChange("app_engine_integration_mode") { - updateMask = append(updateMask, "appEngineIntegrationMode") - } - - if d.HasChange("etag") { - updateMask = append(updateMask, "etag") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Database %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) - } - - err = FirestoreOperationWaitTime( - config, res, project, "Updating Database", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceFirestoreDatabaseRead(d, meta) -} - -func resourceFirestoreDatabaseDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] Firestore Database resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceFirestoreDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/databases/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/databases/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenFirestoreDatabaseName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenFirestoreDatabaseLocationId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDatabaseType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDatabaseConcurrencyMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDatabaseAppEngineIntegrationMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDatabaseKeyPrefix(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDatabaseEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDatabaseCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandFirestoreDatabaseName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/databases/{{name}}") -} - -func expandFirestoreDatabaseLocationId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreDatabaseType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreDatabaseConcurrencyMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreDatabaseAppEngineIntegrationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreDatabaseEtag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_document.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_document.go deleted file mode 100644 index 01c4ed78a2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_document.go +++ /dev/null @@ -1,386 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceFirestoreDocument() *schema.Resource { - return &schema.Resource{ - Create: resourceFirestoreDocumentCreate, - Read: resourceFirestoreDocumentRead, - Update: resourceFirestoreDocumentUpdate, - Delete: resourceFirestoreDocumentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceFirestoreDocumentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "collection": { - Type: schema.TypeString, - Required: true, - Description: `The collection ID, relative to database. For example: chatrooms or chatrooms/my-document/private-messages.`, - }, - "document_id": { - Type: schema.TypeString, - Required: true, - Description: `The client-assigned document ID to use for this document during creation.`, - }, - "fields": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsJSON, - StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, - Description: `The document's [fields](https://cloud.google.com/firestore/docs/reference/rest/v1/projects.databases.documents) formated as a json string.`, - }, - "database": { - Type: schema.TypeString, - Optional: true, - Description: `The Firestore database id. Defaults to '"(default)"'.`, - Default: "(default)", - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 format.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `A server defined name for this index. Format: -'projects/{{project_id}}/databases/{{database_id}}/documents/{{path}}/{{document_id}}'`, - }, - "path": { - Type: schema.TypeString, - Computed: true, - Description: `A relative path to the collection this document exists within`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Last update timestamp in RFC3339 format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceFirestoreDocumentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - fieldsProp, err := expandFirestoreDocumentFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{database}}/documents/{{collection}}?documentId={{document_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Document: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Document: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Document: %s", err) - } - if err := d.Set("name", flattenFirestoreDocumentName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Document %q: %#v", d.Id(), res) - - return resourceFirestoreDocumentRead(d, meta) -} - -func resourceFirestoreDocumentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Document: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("FirestoreDocument %q", d.Id())) - } - - res, err = resourceFirestoreDocumentDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing FirestoreDocument because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Document: %s", err) - } - - if err := d.Set("name", flattenFirestoreDocumentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Document: %s", err) - } - if err := d.Set("path", flattenFirestoreDocumentPath(res["path"], d, config)); err != nil { - return fmt.Errorf("Error reading Document: %s", err) - } - if err := d.Set("fields", flattenFirestoreDocumentFields(res["fields"], d, config)); err != nil { - return fmt.Errorf("Error reading Document: %s", err) - } - if err := d.Set("create_time", flattenFirestoreDocumentCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Document: %s", err) - } - if err := d.Set("update_time", flattenFirestoreDocumentUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Document: %s", err) - } - - return nil -} - -func resourceFirestoreDocumentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Document: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - fieldsProp, err := expandFirestoreDocumentFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Document %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Document %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Document %q: %#v", d.Id(), res) - } - - return resourceFirestoreDocumentRead(d, meta) -} - -func resourceFirestoreDocumentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Document: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Document %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Document") - } - - log.Printf("[DEBUG] Finished deleting Document %q: %#v", d.Id(), res) - return nil -} - -func resourceFirestoreDocumentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - re := regexp.MustCompile("^projects/([^/]+)/databases/([^/]+)/documents/(.+)/([^/]+)$") - match := re.FindStringSubmatch(d.Get("name").(string)) - if len(match) > 0 { - if err := d.Set("project", match[1]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("database", match[2]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("collection", match[3]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("document_id", match[4]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - } else { - return nil, fmt.Errorf("import did not match the regex ^projects/([^/]+)/databases/([^/]+)/documents/(.+)/([^/]+)$") - } - - return []*schema.ResourceData{d}, nil -} - -func flattenFirestoreDocumentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDocumentPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDocumentFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := json.Marshal(v) - if err != nil { - // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. - log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenFirestoreDocumentCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDocumentUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandFirestoreDocumentFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func resourceFirestoreDocumentDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // We use this decoder to add the path field - if name, ok := res["name"]; ok { - re := regexp.MustCompile("^projects/[^/]+/databases/[^/]+/documents/(.+)$") - match := re.FindStringSubmatch(name.(string)) - if len(match) > 0 { - res["path"] = match[1] - } - } - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_index.go deleted file mode 100644 index 0bd06b010a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firestore_index.go +++ /dev/null @@ -1,495 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -/* - * FirestoreIndex api apends __name__ as an item to the - * end of the fields list if not present. We are suppressing - * this server generated field. - */ -func firestoreIFieldsDiffSuppressFunc(k, old, new string, d TerraformResourceDataChange) bool { - kLength := "fields.#" - oldLength, newLength := d.GetChange(kLength) - oldInt, ok := oldLength.(int) - if !ok { - return false - } - newInt, ok := newLength.(int) - if !ok { - return false - } - - if oldInt == newInt+1 { - kold := fmt.Sprintf("fields.%v.field_path", oldInt-1) - knew := fmt.Sprintf("fields.%v.field_path", newInt-1) - - oldLastIndexName, _ := d.GetChange(kold) - _, newLastIndexName := d.GetChange(knew) - if oldLastIndexName == "__name__" && newLastIndexName != "__name__" { - oldBase := fmt.Sprintf("fields.%v", oldInt-1) - if strings.HasPrefix(k, oldBase) || k == kLength { - return true - } - } - } - return false -} - -func firestoreIFieldsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return firestoreIFieldsDiffSuppressFunc(k, old, new, d) -} - -func ResourceFirestoreIndex() *schema.Resource { - return &schema.Resource{ - Create: resourceFirestoreIndexCreate, - Read: resourceFirestoreIndexRead, - Delete: resourceFirestoreIndexDelete, - - Importer: &schema.ResourceImporter{ - State: resourceFirestoreIndexImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "collection": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The collection being indexed.`, - }, - "fields": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - DiffSuppressFunc: firestoreIFieldsDiffSuppress, - Description: `The fields supported by this index. The last field entry is always for -the field path '__name__'. If, on creation, '__name__' was not -specified as the last field, it will be added automatically with the -same direction as that of the last field defined. If the final field -in a composite index is not directional, the '__name__' will be -ordered '"ASCENDING"' (unless explicitly specified otherwise).`, - MinItems: 2, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "array_config": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"CONTAINS", ""}), - Description: `Indicates that this field supports operations on arrayValues. Only one of 'order' and 'arrayConfig' can -be specified. Possible values: ["CONTAINS"]`, - }, - "field_path": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the field.`, - }, - "order": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ASCENDING", "DESCENDING", ""}), - Description: `Indicates that this field supports ordering by the specified order or comparing using =, <, <=, >, >=. -Only one of 'order' and 'arrayConfig' can be specified. Possible values: ["ASCENDING", "DESCENDING"]`, - }, - }, - }, - }, - "database": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Firestore database id. Defaults to '"(default)"'.`, - Default: "(default)", - }, - "query_scope": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"COLLECTION", "COLLECTION_GROUP", ""}), - Description: `The scope at which a query is run. Default value: "COLLECTION" Possible values: ["COLLECTION", "COLLECTION_GROUP"]`, - Default: "COLLECTION", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `A server defined name for this index. Format: -'projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}'`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceFirestoreIndexCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - databaseProp, err := expandFirestoreIndexDatabase(d.Get("database"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("database"); !isEmptyValue(reflect.ValueOf(databaseProp)) && (ok || !reflect.DeepEqual(v, databaseProp)) { - obj["database"] = databaseProp - } - collectionProp, err := expandFirestoreIndexCollection(d.Get("collection"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collection"); !isEmptyValue(reflect.ValueOf(collectionProp)) && (ok || !reflect.DeepEqual(v, collectionProp)) { - obj["collection"] = collectionProp - } - queryScopeProp, err := expandFirestoreIndexQueryScope(d.Get("query_scope"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("query_scope"); !isEmptyValue(reflect.ValueOf(queryScopeProp)) && (ok || !reflect.DeepEqual(v, queryScopeProp)) { - obj["queryScope"] = queryScopeProp - } - fieldsProp, err := expandFirestoreIndexFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - obj, err = resourceFirestoreIndexEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Index: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Index: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = FirestoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Index", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Index: %s", err) - } - - if err := d.Set("name", flattenFirestoreIndexName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // The operation for this resource contains the generated name that we need - // in order to perform a READ. - metadata := res["metadata"].(map[string]interface{}) - name := metadata["index"].(string) - log.Printf("[DEBUG] Setting Index name, id to %s", name) - if err := d.Set("name", name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - - log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) - - return resourceFirestoreIndexRead(d, meta) -} - -func resourceFirestoreIndexRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("FirestoreIndex %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - - if err := d.Set("name", flattenFirestoreIndexName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("query_scope", flattenFirestoreIndexQueryScope(res["queryScope"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("fields", flattenFirestoreIndexFields(res["fields"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - - return nil -} - -func resourceFirestoreIndexDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Index %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Index") - } - - err = FirestoreOperationWaitTime( - config, res, project, "Deleting Index", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) - return nil -} - -func resourceFirestoreIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 8 { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}", - ) - } - - if err := d.Set("project", stringParts[1]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("database", stringParts[3]); err != nil { - return nil, fmt.Errorf("Error setting database: %s", err) - } - if err := d.Set("collection", stringParts[5]); err != nil { - return nil, fmt.Errorf("Error setting collection: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenFirestoreIndexName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreIndexQueryScope(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreIndexFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "field_path": flattenFirestoreIndexFieldsFieldPath(original["fieldPath"], d, config), - "order": flattenFirestoreIndexFieldsOrder(original["order"], d, config), - "array_config": flattenFirestoreIndexFieldsArrayConfig(original["arrayConfig"], d, config), - }) - } - return transformed -} -func flattenFirestoreIndexFieldsFieldPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreIndexFieldsOrder(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreIndexFieldsArrayConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandFirestoreIndexDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexCollection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexQueryScope(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFieldPath, err := expandFirestoreIndexFieldsFieldPath(original["field_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFieldPath); val.IsValid() && !isEmptyValue(val) { - transformed["fieldPath"] = transformedFieldPath - } - - transformedOrder, err := expandFirestoreIndexFieldsOrder(original["order"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOrder); val.IsValid() && !isEmptyValue(val) { - transformed["order"] = transformedOrder - } - - transformedArrayConfig, err := expandFirestoreIndexFieldsArrayConfig(original["array_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedArrayConfig); val.IsValid() && !isEmptyValue(val) { - transformed["arrayConfig"] = transformedArrayConfig - } - - req = append(req, transformed) - } - return req, nil -} - -func expandFirestoreIndexFieldsFieldPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexFieldsOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexFieldsArrayConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceFirestoreIndexEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // We've added project / database / collection as split fields of the name, but - // the API doesn't expect them. Make sure we remove them from any requests. - - delete(obj, "project") - delete(obj, "database") - delete(obj, "collection") - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_folder_access_approval_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_folder_access_approval_settings.go deleted file mode 100644 index 7cc9499a56..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_folder_access_approval_settings.go +++ /dev/null @@ -1,536 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "bytes" - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var accessApprovalCloudProductMapping = map[string]string{ - "appengine.googleapis.com": "App Engine", - "bigquery.googleapis.com": "BigQuery", - "bigtable.googleapis.com": "Cloud Bigtable", - "cloudkms.googleapis.com": "Cloud Key Management Service", - "compute.googleapis.com": "Compute Engine", - "dataflow.googleapis.com": "Cloud Dataflow", - "iam.googleapis.com": "Cloud Identity and Access Management", - "pubsub.googleapis.com": "Cloud Pub/Sub", - "storage.googleapis.com": "Cloud Storage", -} - -func accessApprovalEnrolledServicesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - cp := m["cloud_product"].(string) - if n, ok := accessApprovalCloudProductMapping[cp]; ok { - cp = n - } - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(cp))) // ToLower just in case - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["enrollment_level"].(string)))) - return hashcode(buf.String()) -} - -func ResourceAccessApprovalFolderSettings() *schema.Resource { - return &schema.Resource{ - Create: resourceAccessApprovalFolderSettingsCreate, - Read: resourceAccessApprovalFolderSettingsRead, - Update: resourceAccessApprovalFolderSettingsUpdate, - Delete: resourceAccessApprovalFolderSettingsDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAccessApprovalFolderSettingsImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "enrolled_services": { - Type: schema.TypeSet, - Required: true, - Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. -Access requests for the resource given by name against any of these services contained here will be required -to have explicit approval. Enrollment can only be done on an all or nothing basis. - -A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, - Elem: accessapprovalFolderSettingsEnrolledServicesSchema(), - Set: accessApprovalEnrolledServicesHash, - }, - "folder_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the folder of the access approval settings.`, - }, - "active_key_version": { - Type: schema.TypeString, - Optional: true, - Description: `The asymmetric crypto key version to use for signing approval requests. -Empty active_key_version indicates that a Google-managed key should be used for signing. -This property will be ignored if set by an ancestor of the resource, and new non-empty values may not be set.`, - }, - "notification_emails": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Description: `A list of email addresses to which notifications relating to approval requests should be sent. -Notifications relating to a resource will be sent to all emails in the settings of ancestor -resources of that resource. A maximum of 50 email addresses are allowed.`, - MaxItems: 50, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "ancestor_has_active_key_version": { - Type: schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that an ancestor of this Folder has set active_key_version.`, - }, - "enrolled_ancestor": { - Type: schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Folder.`, - }, - "invalid_key_version": { - Type: schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that there is some configuration issue with the active_key_version -configured on this Folder (e.g. it doesn't exist or the Access Approval service account doesn't have the -correct permissions on it, etc.) This key version is not necessarily the effective key version at this level, -as key versions are inherited top-down.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the settings. Format is "folders/{folder_id}/accessApprovalSettings"`, - }, - }, - UseJSONNumber: true, - } -} - -func accessapprovalFolderSettingsEnrolledServicesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloud_product": { - Type: schema.TypeString, - Required: true, - Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): - * all - * App Engine - * BigQuery - * Cloud Bigtable - * Cloud Key Management Service - * Compute Engine - * Cloud Dataflow - * Cloud Identity and Access Management - * Cloud Pub/Sub - * Cloud Storage - * Persistent Disk - -Note: These values are supported as input, but considered a legacy format: - * all - * appengine.googleapis.com - * bigquery.googleapis.com - * bigtable.googleapis.com - * cloudkms.googleapis.com - * compute.googleapis.com - * dataflow.googleapis.com - * iam.googleapis.com - * pubsub.googleapis.com - * storage.googleapis.com`, - }, - "enrollment_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"BLOCK_ALL", ""}), - Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, - Default: "BLOCK_ALL", - }, - }, - } -} - -func resourceAccessApprovalFolderSettingsCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalFolderSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(reflect.ValueOf(notificationEmailsProp)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalFolderSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(reflect.ValueOf(enrolledServicesProp)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - activeKeyVersionProp, err := expandAccessApprovalFolderSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("active_key_version"); !isEmptyValue(reflect.ValueOf(activeKeyVersionProp)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { - obj["activeKeyVersion"] = activeKeyVersionProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new FolderSettings: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - if d.HasChange("active_key_version") { - updateMask = append(updateMask, "activeKeyVersion") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating FolderSettings: %s", err) - } - if err := d.Set("name", flattenAccessApprovalFolderSettingsName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating FolderSettings %q: %#v", d.Id(), res) - - return resourceAccessApprovalFolderSettingsRead(d, meta) -} - -func resourceAccessApprovalFolderSettingsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessApprovalFolderSettings %q", d.Id())) - } - - if err := d.Set("name", flattenAccessApprovalFolderSettingsName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("notification_emails", flattenAccessApprovalFolderSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("enrolled_services", flattenAccessApprovalFolderSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("enrolled_ancestor", flattenAccessApprovalFolderSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("active_key_version", flattenAccessApprovalFolderSettingsActiveKeyVersion(res["activeKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("ancestor_has_active_key_version", flattenAccessApprovalFolderSettingsAncestorHasActiveKeyVersion(res["ancestorHasActiveKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("invalid_key_version", flattenAccessApprovalFolderSettingsInvalidKeyVersion(res["invalidKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading FolderSettings: %s", err) - } - - return nil -} - -func resourceAccessApprovalFolderSettingsUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalFolderSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalFolderSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - activeKeyVersionProp, err := expandAccessApprovalFolderSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("active_key_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { - obj["activeKeyVersion"] = activeKeyVersionProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating FolderSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - if d.HasChange("active_key_version") { - updateMask = append(updateMask, "activeKeyVersion") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating FolderSettings %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating FolderSettings %q: %#v", d.Id(), res) - } - - return resourceAccessApprovalFolderSettingsRead(d, meta) -} - -func resourceAccessApprovalFolderSettingsDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["notificationEmails"] = []string{} - obj["enrolledServices"] = []string{} - obj["activeKeyVersion"] = "" - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Emptying FolderSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - updateMask = append(updateMask, "notificationEmails") - updateMask = append(updateMask, "enrolledServices") - updateMask = append(updateMask, "activeKeyVersion") - - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - res, err := SendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error emptying FolderSettings %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished emptying FolderSettings %q: %#v", d.Id(), res) - } - - return nil -} - -func resourceAccessApprovalFolderSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "folders/(?P[^/]+)/accessApprovalSettings", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAccessApprovalFolderSettingsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsNotificationEmails(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenAccessApprovalFolderSettingsEnrolledServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "cloud_product": flattenAccessApprovalFolderSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), - "enrollment_level": flattenAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), - }) - } - return transformed -} -func flattenAccessApprovalFolderSettingsEnrolledServicesCloudProduct(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsEnrolledAncestor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsActiveKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsAncestorHasActiveKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsInvalidKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessApprovalFolderSettingsNotificationEmails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandAccessApprovalFolderSettingsEnrolledServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCloudProduct, err := expandAccessApprovalFolderSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudProduct); val.IsValid() && !isEmptyValue(val) { - transformed["cloudProduct"] = transformedCloudProduct - } - - transformedEnrollmentLevel, err := expandAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !isEmptyValue(val) { - transformed["enrollmentLevel"] = transformedEnrollmentLevel - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessApprovalFolderSettingsEnrolledServicesCloudProduct(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalFolderSettingsActiveKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_cluster.go deleted file mode 100644 index b90f0a0387..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_cluster.go +++ /dev/null @@ -1,542 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func suppressSuffixDiff(_, old, new string, _ *schema.ResourceData) bool { - if strings.HasSuffix(old, new) { - log.Printf("[INFO] suppressing diff as %s is the same as the full path of %s", new, old) - return true - } - - return false -} - -func ResourceGameServicesGameServerCluster() *schema.Resource { - return &schema.Resource{ - Create: resourceGameServicesGameServerClusterCreate, - Read: resourceGameServicesGameServerClusterRead, - Update: resourceGameServicesGameServerClusterUpdate, - Delete: resourceGameServicesGameServerClusterDelete, - - Importer: &schema.ResourceImporter{ - State: resourceGameServicesGameServerClusterImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "cluster_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. The resource name of the game server cluster`, - }, - "connection_info": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `Game server cluster connection information. This information is used to -manage game server clusters.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "gke_cluster_reference": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `Reference of the GKE cluster where the game servers are installed.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: suppressSuffixDiff, - Description: `The full or partial name of a GKE cluster, using one of the following -forms: - -* 'projects/{project_id}/locations/{location}/clusters/{cluster_id}' -* 'locations/{location}/clusters/{cluster_id}' -* '{cluster_id}' - -If project and location are not specified, the project and location of the -GameServerCluster resource are used to generate the full name of the -GKE cluster.`, - }, - }, - }, - }, - "namespace": { - Type: schema.TypeString, - Required: true, - Description: `Namespace designated on the game server cluster where the game server -instances will be created. The namespace existence will be validated -during creation.`, - }, - }, - }, - }, - "realm_id": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The realm id of the game server realm.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Human readable description of the cluster.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `The labels associated with this game server cluster. Each label is a -key-value pair.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - Description: `Location of the Cluster.`, - Default: "global", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource id of the game server cluster, eg: - -'projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}'. -For example, - -'projects/my-project/locations/{location}/realms/zanzibar/gameServerClusters/my-onprem-cluster'.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesGameServerClusterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandGameServicesGameServerClusterLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - connectionInfoProp, err := expandGameServicesGameServerClusterConnectionInfo(d.Get("connection_info"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connection_info"); !isEmptyValue(reflect.ValueOf(connectionInfoProp)) && (ok || !reflect.DeepEqual(v, connectionInfoProp)) { - obj["connectionInfo"] = connectionInfoProp - } - descriptionProp, err := expandGameServicesGameServerClusterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters?gameServerClusterId={{cluster_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new GameServerCluster: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerCluster: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating GameServerCluster: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = GameServicesOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating GameServerCluster", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create GameServerCluster: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerClusterName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating GameServerCluster %q: %#v", d.Id(), res) - - return resourceGameServicesGameServerClusterRead(d, meta) -} - -func resourceGameServicesGameServerClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerCluster: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("GameServicesGameServerCluster %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading GameServerCluster: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerClusterName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerCluster: %s", err) - } - if err := d.Set("labels", flattenGameServicesGameServerClusterLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerCluster: %s", err) - } - if err := d.Set("connection_info", flattenGameServicesGameServerClusterConnectionInfo(res["connectionInfo"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerCluster: %s", err) - } - if err := d.Set("description", flattenGameServicesGameServerClusterDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerCluster: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerClusterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerCluster: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandGameServicesGameServerClusterLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandGameServicesGameServerClusterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating GameServerCluster %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating GameServerCluster %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating GameServerCluster %q: %#v", d.Id(), res) - } - - err = GameServicesOperationWaitTime( - config, res, project, "Updating GameServerCluster", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGameServicesGameServerClusterRead(d, meta) -} - -func resourceGameServicesGameServerClusterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerCluster: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GameServerCluster %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GameServerCluster") - } - - err = GameServicesOperationWaitTime( - config, res, project, "Deleting GameServerCluster", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting GameServerCluster %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesGameServerClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/realms/(?P[^/]+)/gameServerClusters/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenGameServicesGameServerClusterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerClusterLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerClusterConnectionInfo(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["gke_cluster_reference"] = - flattenGameServicesGameServerClusterConnectionInfoGkeClusterReference(original["gkeClusterReference"], d, config) - transformed["namespace"] = - flattenGameServicesGameServerClusterConnectionInfoNamespace(original["namespace"], d, config) - return []interface{}{transformed} -} -func flattenGameServicesGameServerClusterConnectionInfoGkeClusterReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cluster"] = - flattenGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(original["cluster"], d, config) - return []interface{}{transformed} -} -func flattenGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerClusterConnectionInfoNamespace(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerClusterDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesGameServerClusterLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGameServicesGameServerClusterConnectionInfo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGkeClusterReference, err := expandGameServicesGameServerClusterConnectionInfoGkeClusterReference(original["gke_cluster_reference"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGkeClusterReference); val.IsValid() && !isEmptyValue(val) { - transformed["gkeClusterReference"] = transformedGkeClusterReference - } - - transformedNamespace, err := expandGameServicesGameServerClusterConnectionInfoNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - return transformed, nil -} - -func expandGameServicesGameServerClusterConnectionInfoGkeClusterReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCluster, err := expandGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(original["cluster"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCluster); val.IsValid() && !isEmptyValue(val) { - transformed["cluster"] = transformedCluster - } - - return transformed, nil -} - -func expandGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerClusterConnectionInfoNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerClusterDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_deployment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_deployment.go deleted file mode 100644 index 16826a5b3c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_deployment.go +++ /dev/null @@ -1,383 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceGameServicesGameServerDeployment() *schema.Resource { - return &schema.Resource{ - Create: resourceGameServicesGameServerDeploymentCreate, - Read: resourceGameServicesGameServerDeploymentRead, - Update: resourceGameServicesGameServerDeploymentUpdate, - Delete: resourceGameServicesGameServerDeploymentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceGameServicesGameServerDeploymentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique id for the deployment.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Human readable description of the game server deployment.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `The labels associated with this game server deployment. Each label is a -key-value pair.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - Description: `Location of the Deployment.`, - Default: "global", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource id of the game server deployment, eg: - -'projects/{project_id}/locations/{location}/gameServerDeployments/{deployment_id}'. -For example, - -'projects/my-project/locations/{location}/gameServerDeployments/my-deployment'.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesGameServerDeploymentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandGameServicesGameServerDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandGameServicesGameServerDeploymentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments?deploymentId={{deployment_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new GameServerDeployment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating GameServerDeployment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = GameServicesOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating GameServerDeployment", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create GameServerDeployment: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerDeploymentName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating GameServerDeployment %q: %#v", d.Id(), res) - - return resourceGameServicesGameServerDeploymentRead(d, meta) -} - -func resourceGameServicesGameServerDeploymentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("GameServicesGameServerDeployment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading GameServerDeployment: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerDeploymentName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerDeployment: %s", err) - } - if err := d.Set("description", flattenGameServicesGameServerDeploymentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerDeployment: %s", err) - } - if err := d.Set("labels", flattenGameServicesGameServerDeploymentLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerDeployment: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerDeploymentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandGameServicesGameServerDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandGameServicesGameServerDeploymentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating GameServerDeployment %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating GameServerDeployment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating GameServerDeployment %q: %#v", d.Id(), res) - } - - err = GameServicesOperationWaitTime( - config, res, project, "Updating GameServerDeployment", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGameServicesGameServerDeploymentRead(d, meta) -} - -func resourceGameServicesGameServerDeploymentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GameServerDeployment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GameServerDeployment") - } - - err = GameServicesOperationWaitTime( - config, res, project, "Deleting GameServerDeployment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting GameServerDeployment %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesGameServerDeploymentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/gameServerDeployments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenGameServicesGameServerDeploymentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesGameServerDeploymentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerDeploymentLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_deployment_rollout.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_deployment_rollout.go deleted file mode 100644 index b0c14292a2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_deployment_rollout.go +++ /dev/null @@ -1,425 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceGameServicesGameServerDeploymentRollout() *schema.Resource { - return &schema.Resource{ - Create: resourceGameServicesGameServerDeploymentRolloutCreate, - Read: resourceGameServicesGameServerDeploymentRolloutRead, - Update: resourceGameServicesGameServerDeploymentRolloutUpdate, - Delete: resourceGameServicesGameServerDeploymentRolloutDelete, - - Importer: &schema.ResourceImporter{ - State: resourceGameServicesGameServerDeploymentRolloutImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "default_game_server_config": { - Type: schema.TypeString, - Required: true, - Description: `This field points to the game server config that is -applied by default to all realms and clusters. For example, - -'projects/my-project/locations/global/gameServerDeployments/my-game/configs/my-config'.`, - }, - "deployment_id": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The deployment to rollout the new config to. Only 1 rollout must be associated with each deployment.`, - }, - "game_server_config_overrides": { - Type: schema.TypeList, - Optional: true, - Description: `The game_server_config_overrides contains the per game server config -overrides. The overrides are processed in the order they are listed. As -soon as a match is found for a cluster, the rest of the list is not -processed.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "config_version": { - Type: schema.TypeString, - Optional: true, - Description: `Version of the configuration.`, - }, - "realms_selector": { - Type: schema.TypeList, - Optional: true, - Description: `Selection by realms.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "realms": { - Type: schema.TypeList, - Optional: true, - Description: `List of realms to match against.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource id of the game server deployment - -eg: 'projects/my-project/locations/global/gameServerDeployments/my-deployment/rollout'.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesGameServerDeploymentRolloutCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Creating GameServerDeploymentRollout %q: ", d.Id()) - - err = resourceGameServicesGameServerDeploymentRolloutUpdate(d, meta) - if err != nil { - d.SetId("") - return fmt.Errorf("Error trying to create GameServerDeploymentRollout: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerDeploymentRolloutRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("GameServicesGameServerDeploymentRollout %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerDeploymentRolloutName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) - } - if err := d.Set("default_game_server_config", flattenGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(res["defaultGameServerConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) - } - if err := d.Set("game_server_config_overrides", flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(res["gameServerConfigOverrides"], d, config)); err != nil { - return fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerDeploymentRolloutUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - defaultGameServerConfigProp, err := expandGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(d.Get("default_game_server_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_game_server_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultGameServerConfigProp)) { - obj["defaultGameServerConfig"] = defaultGameServerConfigProp - } - gameServerConfigOverridesProp, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(d.Get("game_server_config_overrides"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("game_server_config_overrides"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gameServerConfigOverridesProp)) { - obj["gameServerConfigOverrides"] = gameServerConfigOverridesProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating GameServerDeploymentRollout %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("default_game_server_config") { - updateMask = append(updateMask, "defaultGameServerConfig") - } - - if d.HasChange("game_server_config_overrides") { - updateMask = append(updateMask, "gameServerConfigOverrides") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating GameServerDeploymentRollout %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating GameServerDeploymentRollout %q: %#v", d.Id(), res) - } - - err = GameServicesOperationWaitTime( - config, res, project, "Updating GameServerDeploymentRollout", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGameServicesGameServerDeploymentRolloutRead(d, meta) -} - -func resourceGameServicesGameServerDeploymentRolloutDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout?updateMask=defaultGameServerConfig") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GameServerDeploymentRollout %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GameServerDeploymentRollout") - } - - err = GameServicesOperationWaitTime( - config, res, project, "Deleting GameServerDeploymentRollout", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting GameServerDeploymentRollout %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesGameServerDeploymentRolloutImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/gameServerDeployments/(?P[^/]+)/rollout", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenGameServicesGameServerDeploymentRolloutName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "realms_selector": flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(original["realmsSelector"], d, config), - "config_version": flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(original["configVersion"], d, config), - }) - } - return transformed -} -func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["realms"] = - flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(original["realms"], d, config) - return []interface{}{transformed} -} -func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRealmsSelector, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(original["realms_selector"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRealmsSelector); val.IsValid() && !isEmptyValue(val) { - transformed["realmsSelector"] = transformedRealmsSelector - } - - transformedConfigVersion, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(original["config_version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConfigVersion); val.IsValid() && !isEmptyValue(val) { - transformed["configVersion"] = transformedConfigVersion - } - - req = append(req, transformed) - } - return req, nil -} - -func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRealms, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(original["realms"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRealms); val.IsValid() && !isEmptyValue(val) { - transformed["realms"] = transformedRealms - } - - return transformed, nil -} - -func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_realm.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_realm.go deleted file mode 100644 index c89935b3da..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_realm.go +++ /dev/null @@ -1,425 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceGameServicesRealm() *schema.Resource { - return &schema.Resource{ - Create: resourceGameServicesRealmCreate, - Read: resourceGameServicesRealmRead, - Update: resourceGameServicesRealmUpdate, - Delete: resourceGameServicesRealmDelete, - - Importer: &schema.ResourceImporter{ - State: resourceGameServicesRealmImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "realm_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `GCP region of the Realm.`, - }, - "time_zone": { - Type: schema.TypeString, - Required: true, - Description: `Required. Time zone where all realm-specific policies are evaluated. The value of -this field must be from the IANA time zone database: -https://www.iana.org/time-zones.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Human readable description of the realm.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `The labels associated with this realm. Each label is a key-value pair.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - Description: `Location of the Realm.`, - Default: "global", - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `ETag of the resource.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource id of the realm, of the form: -'projects/{project_id}/locations/{location}/realms/{realm_id}'. For -example, 'projects/my-project/locations/{location}/realms/my-realm'.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesRealmCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandGameServicesRealmLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - timeZoneProp, err := expandGameServicesRealmTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandGameServicesRealmDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms?realmId={{realm_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Realm: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Realm: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Realm: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = GameServicesOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Realm", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Realm: %s", err) - } - - if err := d.Set("name", flattenGameServicesRealmName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Realm %q: %#v", d.Id(), res) - - return resourceGameServicesRealmRead(d, meta) -} - -func resourceGameServicesRealmRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Realm: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("GameServicesRealm %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Realm: %s", err) - } - - if err := d.Set("name", flattenGameServicesRealmName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Realm: %s", err) - } - if err := d.Set("labels", flattenGameServicesRealmLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Realm: %s", err) - } - if err := d.Set("time_zone", flattenGameServicesRealmTimeZone(res["timeZone"], d, config)); err != nil { - return fmt.Errorf("Error reading Realm: %s", err) - } - if err := d.Set("etag", flattenGameServicesRealmEtag(res["etag"], d, config)); err != nil { - return fmt.Errorf("Error reading Realm: %s", err) - } - if err := d.Set("description", flattenGameServicesRealmDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Realm: %s", err) - } - - return nil -} - -func resourceGameServicesRealmUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Realm: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandGameServicesRealmLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - timeZoneProp, err := expandGameServicesRealmTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandGameServicesRealmDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Realm %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("time_zone") { - updateMask = append(updateMask, "timeZone") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Realm %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Realm %q: %#v", d.Id(), res) - } - - err = GameServicesOperationWaitTime( - config, res, project, "Updating Realm", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGameServicesRealmRead(d, meta) -} - -func resourceGameServicesRealmDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Realm: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Realm %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Realm") - } - - err = GameServicesOperationWaitTime( - config, res, project, "Deleting Realm", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Realm %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesRealmImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/realms/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenGameServicesRealmName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesRealmLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesRealmTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesRealmEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesRealmDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesRealmLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGameServicesRealmTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesRealmDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_gke_backup_backup_plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_gke_backup_backup_plan.go deleted file mode 100644 index 51fe0ee2bf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_gke_backup_backup_plan.go +++ /dev/null @@ -1,1153 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceGKEBackupBackupPlan() *schema.Resource { - return &schema.Resource{ - Create: resourceGKEBackupBackupPlanCreate, - Read: resourceGKEBackupBackupPlanRead, - Update: resourceGKEBackupBackupPlanUpdate, - Delete: resourceGKEBackupBackupPlanDelete, - - Importer: &schema.ResourceImporter{ - State: resourceGKEBackupBackupPlanImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "cluster": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The source cluster from which Backups will be created via this BackupPlan.`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The region of the Backup Plan.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full name of the BackupPlan Resource.`, - }, - "backup_config": { - Type: schema.TypeList, - Optional: true, - Description: `Defines the configuration of Backups created via this BackupPlan.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "all_namespaces": { - Type: schema.TypeBool, - Optional: true, - Description: `If True, include all namespaced resources.`, - ExactlyOneOf: []string{"backup_config.0.all_namespaces", "backup_config.0.selected_namespaces", "backup_config.0.selected_applications"}, - }, - "encryption_key": { - Type: schema.TypeList, - Optional: true, - Description: `This defines a customer managed encryption key that will be used to encrypt the "config" -portion (the Kubernetes resources) of Backups created via this plan.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "gcp_kms_encryption_key": { - Type: schema.TypeString, - Required: true, - Description: `Google Cloud KMS encryption key. Format: projects/*/locations/*/keyRings/*/cryptoKeys/*`, - }, - }, - }, - }, - "include_secrets": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `This flag specifies whether Kubernetes Secret resources should be included -when they fall into the scope of Backups.`, - }, - "include_volume_data": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `This flag specifies whether volume data should be backed up when PVCs are -included in the scope of a Backup.`, - }, - "selected_applications": { - Type: schema.TypeList, - Optional: true, - Description: `A list of namespaced Kubernetes Resources.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "namespaced_names": { - Type: schema.TypeList, - Required: true, - Description: `A list of namespaced Kubernetes resources.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `The name of a Kubernetes Resource.`, - }, - "namespace": { - Type: schema.TypeString, - Required: true, - Description: `The namespace of a Kubernetes Resource.`, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"backup_config.0.all_namespaces", "backup_config.0.selected_namespaces", "backup_config.0.selected_applications"}, - }, - "selected_namespaces": { - Type: schema.TypeList, - Optional: true, - Description: `If set, include just the resources in the listed namespaces.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "namespaces": { - Type: schema.TypeList, - Required: true, - Description: `A list of Kubernetes Namespaces.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - ExactlyOneOf: []string{"backup_config.0.all_namespaces", "backup_config.0.selected_namespaces", "backup_config.0.selected_applications"}, - }, - }, - }, - }, - "backup_schedule": { - Type: schema.TypeList, - Optional: true, - Description: `Defines a schedule for automatic Backup creation via this BackupPlan.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cron_schedule": { - Type: schema.TypeString, - Optional: true, - Description: `A standard cron string that defines a repeating schedule for -creating Backups via this BackupPlan. -If this is defined, then backupRetainDays must also be defined.`, - }, - "paused": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `This flag denotes whether automatic Backup creation is paused for this BackupPlan.`, - }, - }, - }, - }, - "deactivated": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `This flag indicates whether this BackupPlan has been deactivated. -Setting this field to True locks the BackupPlan such that no further updates will be allowed -(except deletes), including the deactivated field itself. It also prevents any new Backups -from being created via this BackupPlan (including scheduled Backups).`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `User specified descriptive string for this BackupPlan.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Description: A set of custom labels supplied by the user. -A list of key->value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "retention_policy": { - Type: schema.TypeList, - Optional: true, - Description: `RetentionPolicy governs lifecycle of Backups created under this plan.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "backup_delete_lock_days": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Minimum age for a Backup created via this BackupPlan (in days). -Must be an integer value between 0-90 (inclusive). -A Backup created under this BackupPlan will not be deletable -until it reaches Backup's (create time + backup_delete_lock_days). -Updating this field of a BackupPlan does not affect existing Backups. -Backups created after a successful update will inherit this new value.`, - }, - "backup_retain_days": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The default maximum age of a Backup created via this BackupPlan. -This field MUST be an integer value >= 0 and <= 365. If specified, -a Backup created under this BackupPlan will be automatically deleted -after its age reaches (createTime + backupRetainDays). -If not specified, Backups created under this BackupPlan will NOT be -subject to automatic deletion. Updating this field does NOT affect -existing Backups under it. Backups created AFTER a successful update -will automatically pick up the new value. -NOTE: backupRetainDays must be >= backupDeleteLockDays. -If cronSchedule is defined, then this must be <= 360 * the creation interval.]`, - }, - "locked": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: `This flag denotes whether the retention policy of this BackupPlan is locked. -If set to True, no further update is allowed on this policy, including -the locked field itself.`, - }, - }, - }, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `etag is used for optimistic concurrency control as a way to help prevent simultaneous -updates of a backup plan from overwriting each other. It is strongly suggested that -systems make use of the 'etag' in the read-modify-write cycle to perform BackupPlan updates -in order to avoid race conditions: An etag is returned in the response to backupPlans.get, -and systems are expected to put that etag in the request to backupPlans.patch or -backupPlans.delete to ensure that their change will be applied to the same version of the resource.`, - }, - "protected_pod_count": { - Type: schema.TypeInt, - Computed: true, - Description: `The number of Kubernetes Pods backed up in the last successful Backup created via this BackupPlan.`, - }, - "uid": { - Type: schema.TypeString, - Computed: true, - Description: `Server generated, unique identifier of UUID format.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGKEBackupBackupPlanCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandGKEBackupBackupPlanName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandGKEBackupBackupPlanDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - clusterProp, err := expandGKEBackupBackupPlanCluster(d.Get("cluster"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cluster"); !isEmptyValue(reflect.ValueOf(clusterProp)) && (ok || !reflect.DeepEqual(v, clusterProp)) { - obj["cluster"] = clusterProp - } - retentionPolicyProp, err := expandGKEBackupBackupPlanRetentionPolicy(d.Get("retention_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retention_policy"); !isEmptyValue(reflect.ValueOf(retentionPolicyProp)) && (ok || !reflect.DeepEqual(v, retentionPolicyProp)) { - obj["retentionPolicy"] = retentionPolicyProp - } - labelsProp, err := expandGKEBackupBackupPlanLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - backupScheduleProp, err := expandGKEBackupBackupPlanBackupSchedule(d.Get("backup_schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backup_schedule"); !isEmptyValue(reflect.ValueOf(backupScheduleProp)) && (ok || !reflect.DeepEqual(v, backupScheduleProp)) { - obj["backupSchedule"] = backupScheduleProp - } - deactivatedProp, err := expandGKEBackupBackupPlanDeactivated(d.Get("deactivated"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deactivated"); !isEmptyValue(reflect.ValueOf(deactivatedProp)) && (ok || !reflect.DeepEqual(v, deactivatedProp)) { - obj["deactivated"] = deactivatedProp - } - backupConfigProp, err := expandGKEBackupBackupPlanBackupConfig(d.Get("backup_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backup_config"); !isEmptyValue(reflect.ValueOf(backupConfigProp)) && (ok || !reflect.DeepEqual(v, backupConfigProp)) { - obj["backupConfig"] = backupConfigProp - } - - url, err := replaceVars(d, config, "{{GKEBackupBasePath}}projects/{{project}}/locations/{{location}}/backupPlans?backupPlanId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new BackupPlan: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackupPlan: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating BackupPlan: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = GKEBackupOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating BackupPlan", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create BackupPlan: %s", err) - } - - if err := d.Set("name", flattenGKEBackupBackupPlanName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating BackupPlan %q: %#v", d.Id(), res) - - return resourceGKEBackupBackupPlanRead(d, meta) -} - -func resourceGKEBackupBackupPlanRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GKEBackupBasePath}}projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackupPlan: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("GKEBackupBackupPlan %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - - if err := d.Set("name", flattenGKEBackupBackupPlanName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("uid", flattenGKEBackupBackupPlanUid(res["uid"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("description", flattenGKEBackupBackupPlanDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("cluster", flattenGKEBackupBackupPlanCluster(res["cluster"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("retention_policy", flattenGKEBackupBackupPlanRetentionPolicy(res["retentionPolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("labels", flattenGKEBackupBackupPlanLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("backup_schedule", flattenGKEBackupBackupPlanBackupSchedule(res["backupSchedule"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("etag", flattenGKEBackupBackupPlanEtag(res["etag"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("deactivated", flattenGKEBackupBackupPlanDeactivated(res["deactivated"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("backup_config", flattenGKEBackupBackupPlanBackupConfig(res["backupConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - if err := d.Set("protected_pod_count", flattenGKEBackupBackupPlanProtectedPodCount(res["protectedPodCount"], d, config)); err != nil { - return fmt.Errorf("Error reading BackupPlan: %s", err) - } - - return nil -} - -func resourceGKEBackupBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackupPlan: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandGKEBackupBackupPlanDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - retentionPolicyProp, err := expandGKEBackupBackupPlanRetentionPolicy(d.Get("retention_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retention_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retentionPolicyProp)) { - obj["retentionPolicy"] = retentionPolicyProp - } - labelsProp, err := expandGKEBackupBackupPlanLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - backupScheduleProp, err := expandGKEBackupBackupPlanBackupSchedule(d.Get("backup_schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backup_schedule"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backupScheduleProp)) { - obj["backupSchedule"] = backupScheduleProp - } - deactivatedProp, err := expandGKEBackupBackupPlanDeactivated(d.Get("deactivated"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deactivated"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deactivatedProp)) { - obj["deactivated"] = deactivatedProp - } - backupConfigProp, err := expandGKEBackupBackupPlanBackupConfig(d.Get("backup_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backup_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backupConfigProp)) { - obj["backupConfig"] = backupConfigProp - } - - url, err := replaceVars(d, config, "{{GKEBackupBasePath}}projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating BackupPlan %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("retention_policy") { - updateMask = append(updateMask, "retentionPolicy") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("backup_schedule") { - updateMask = append(updateMask, "backupSchedule") - } - - if d.HasChange("deactivated") { - updateMask = append(updateMask, "deactivated") - } - - if d.HasChange("backup_config") { - updateMask = append(updateMask, "backupConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating BackupPlan %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating BackupPlan %q: %#v", d.Id(), res) - } - - err = GKEBackupOperationWaitTime( - config, res, project, "Updating BackupPlan", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGKEBackupBackupPlanRead(d, meta) -} - -func resourceGKEBackupBackupPlanDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for BackupPlan: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GKEBackupBasePath}}projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting BackupPlan %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BackupPlan") - } - - err = GKEBackupOperationWaitTime( - config, res, project, "Deleting BackupPlan", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting BackupPlan %q: %#v", d.Id(), res) - return nil -} - -func resourceGKEBackupBackupPlanImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/backupPlans/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenGKEBackupBackupPlanName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenGKEBackupBackupPlanUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanCluster(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanRetentionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backup_delete_lock_days"] = - flattenGKEBackupBackupPlanRetentionPolicyBackupDeleteLockDays(original["backupDeleteLockDays"], d, config) - transformed["backup_retain_days"] = - flattenGKEBackupBackupPlanRetentionPolicyBackupRetainDays(original["backupRetainDays"], d, config) - transformed["locked"] = - flattenGKEBackupBackupPlanRetentionPolicyLocked(original["locked"], d, config) - return []interface{}{transformed} -} -func flattenGKEBackupBackupPlanRetentionPolicyBackupDeleteLockDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenGKEBackupBackupPlanRetentionPolicyBackupRetainDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenGKEBackupBackupPlanRetentionPolicyLocked(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cron_schedule"] = - flattenGKEBackupBackupPlanBackupScheduleCronSchedule(original["cronSchedule"], d, config) - transformed["paused"] = - flattenGKEBackupBackupPlanBackupSchedulePaused(original["paused"], d, config) - return []interface{}{transformed} -} -func flattenGKEBackupBackupPlanBackupScheduleCronSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupSchedulePaused(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanDeactivated(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["include_volume_data"] = - flattenGKEBackupBackupPlanBackupConfigIncludeVolumeData(original["includeVolumeData"], d, config) - transformed["include_secrets"] = - flattenGKEBackupBackupPlanBackupConfigIncludeSecrets(original["includeSecrets"], d, config) - transformed["encryption_key"] = - flattenGKEBackupBackupPlanBackupConfigEncryptionKey(original["encryptionKey"], d, config) - transformed["all_namespaces"] = - flattenGKEBackupBackupPlanBackupConfigAllNamespaces(original["allNamespaces"], d, config) - transformed["selected_namespaces"] = - flattenGKEBackupBackupPlanBackupConfigSelectedNamespaces(original["selectedNamespaces"], d, config) - transformed["selected_applications"] = - flattenGKEBackupBackupPlanBackupConfigSelectedApplications(original["selectedApplications"], d, config) - return []interface{}{transformed} -} -func flattenGKEBackupBackupPlanBackupConfigIncludeVolumeData(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupConfigIncludeSecrets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupConfigEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["gcp_kms_encryption_key"] = - flattenGKEBackupBackupPlanBackupConfigEncryptionKeyGcpKmsEncryptionKey(original["gcpKmsEncryptionKey"], d, config) - return []interface{}{transformed} -} -func flattenGKEBackupBackupPlanBackupConfigEncryptionKeyGcpKmsEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupConfigAllNamespaces(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupConfigSelectedNamespaces(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["namespaces"] = - flattenGKEBackupBackupPlanBackupConfigSelectedNamespacesNamespaces(original["namespaces"], d, config) - return []interface{}{transformed} -} -func flattenGKEBackupBackupPlanBackupConfigSelectedNamespacesNamespaces(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupConfigSelectedApplications(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["namespaced_names"] = - flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNames(original["namespacedNames"], d, config) - return []interface{}{transformed} -} -func flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "namespace": flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesNamespace(original["namespace"], d, config), - "name": flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesName(original["name"], d, config), - }) - } - return transformed -} -func flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesNamespace(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEBackupBackupPlanProtectedPodCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandGKEBackupBackupPlanName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") -} - -func expandGKEBackupBackupPlanDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanCluster(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanRetentionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackupDeleteLockDays, err := expandGKEBackupBackupPlanRetentionPolicyBackupDeleteLockDays(original["backup_delete_lock_days"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBackupDeleteLockDays); val.IsValid() && !isEmptyValue(val) { - transformed["backupDeleteLockDays"] = transformedBackupDeleteLockDays - } - - transformedBackupRetainDays, err := expandGKEBackupBackupPlanRetentionPolicyBackupRetainDays(original["backup_retain_days"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBackupRetainDays); val.IsValid() && !isEmptyValue(val) { - transformed["backupRetainDays"] = transformedBackupRetainDays - } - - transformedLocked, err := expandGKEBackupBackupPlanRetentionPolicyLocked(original["locked"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocked); val.IsValid() && !isEmptyValue(val) { - transformed["locked"] = transformedLocked - } - - return transformed, nil -} - -func expandGKEBackupBackupPlanRetentionPolicyBackupDeleteLockDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanRetentionPolicyBackupRetainDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanRetentionPolicyLocked(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGKEBackupBackupPlanBackupSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCronSchedule, err := expandGKEBackupBackupPlanBackupScheduleCronSchedule(original["cron_schedule"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCronSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["cronSchedule"] = transformedCronSchedule - } - - transformedPaused, err := expandGKEBackupBackupPlanBackupSchedulePaused(original["paused"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPaused); val.IsValid() && !isEmptyValue(val) { - transformed["paused"] = transformedPaused - } - - return transformed, nil -} - -func expandGKEBackupBackupPlanBackupScheduleCronSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanBackupSchedulePaused(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanDeactivated(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanBackupConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIncludeVolumeData, err := expandGKEBackupBackupPlanBackupConfigIncludeVolumeData(original["include_volume_data"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIncludeVolumeData); val.IsValid() && !isEmptyValue(val) { - transformed["includeVolumeData"] = transformedIncludeVolumeData - } - - transformedIncludeSecrets, err := expandGKEBackupBackupPlanBackupConfigIncludeSecrets(original["include_secrets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIncludeSecrets); val.IsValid() && !isEmptyValue(val) { - transformed["includeSecrets"] = transformedIncludeSecrets - } - - transformedEncryptionKey, err := expandGKEBackupBackupPlanBackupConfigEncryptionKey(original["encryption_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEncryptionKey); val.IsValid() && !isEmptyValue(val) { - transformed["encryptionKey"] = transformedEncryptionKey - } - - transformedAllNamespaces, err := expandGKEBackupBackupPlanBackupConfigAllNamespaces(original["all_namespaces"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllNamespaces); val.IsValid() && !isEmptyValue(val) { - transformed["allNamespaces"] = transformedAllNamespaces - } - - transformedSelectedNamespaces, err := expandGKEBackupBackupPlanBackupConfigSelectedNamespaces(original["selected_namespaces"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSelectedNamespaces); val.IsValid() && !isEmptyValue(val) { - transformed["selectedNamespaces"] = transformedSelectedNamespaces - } - - transformedSelectedApplications, err := expandGKEBackupBackupPlanBackupConfigSelectedApplications(original["selected_applications"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSelectedApplications); val.IsValid() && !isEmptyValue(val) { - transformed["selectedApplications"] = transformedSelectedApplications - } - - return transformed, nil -} - -func expandGKEBackupBackupPlanBackupConfigIncludeVolumeData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanBackupConfigIncludeSecrets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanBackupConfigEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGcpKmsEncryptionKey, err := expandGKEBackupBackupPlanBackupConfigEncryptionKeyGcpKmsEncryptionKey(original["gcp_kms_encryption_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGcpKmsEncryptionKey); val.IsValid() && !isEmptyValue(val) { - transformed["gcpKmsEncryptionKey"] = transformedGcpKmsEncryptionKey - } - - return transformed, nil -} - -func expandGKEBackupBackupPlanBackupConfigEncryptionKeyGcpKmsEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanBackupConfigAllNamespaces(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanBackupConfigSelectedNamespaces(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNamespaces, err := expandGKEBackupBackupPlanBackupConfigSelectedNamespacesNamespaces(original["namespaces"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespaces); val.IsValid() && !isEmptyValue(val) { - transformed["namespaces"] = transformedNamespaces - } - - return transformed, nil -} - -func expandGKEBackupBackupPlanBackupConfigSelectedNamespacesNamespaces(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanBackupConfigSelectedApplications(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNamespacedNames, err := expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNames(original["namespaced_names"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespacedNames); val.IsValid() && !isEmptyValue(val) { - transformed["namespacedNames"] = transformedNamespacedNames - } - - return transformed, nil -} - -func expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNamespace, err := expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - transformedName, err := expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_gke_hub_membership.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_gke_hub_membership.go deleted file mode 100644 index 960c08d2f1..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_gke_hub_membership.go +++ /dev/null @@ -1,546 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func suppressGkeHubEndpointSelfLinkDiff(_, old, new string, _ *schema.ResourceData) bool { - // The custom expander injects //container.googleapis.com/ if a selflink is supplied. - selfLink := strings.TrimPrefix(old, "//container.googleapis.com/") - if selfLink == new { - return true - } - - return false -} - -func ResourceGKEHubMembership() *schema.Resource { - return &schema.Resource{ - Create: resourceGKEHubMembershipCreate, - Read: resourceGKEHubMembershipRead, - Update: resourceGKEHubMembershipUpdate, - Delete: resourceGKEHubMembershipDelete, - - Importer: &schema.ResourceImporter{ - State: resourceGKEHubMembershipImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "membership_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The client-provided identifier of the membership.`, - }, - "authority": { - Type: schema.TypeList, - Optional: true, - Description: `Authority encodes how Google will recognize identities from this Membership. -See the workload identity documentation for more details: -https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "issuer": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid -with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster' (must be 'locations' rather than 'zones'). If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'.`, - }, - }, - }, - }, - "endpoint": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "gke_cluster": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "resource_link": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: suppressGkeHubEndpointSelfLinkDiff, - Description: `Self-link of the GCP resource for the GKE cluster. -For example: '//container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster'. -It can be at the most 1000 characters in length. If the cluster is provisioned with Terraform, -this can be '"//container.googleapis.com/${google_container_cluster.my-cluster.id}"' or -'google_container_cluster.my-cluster.id'.`, - }, - }, - }, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels to apply to this membership.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique identifier of the membership.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGKEHubMembershipCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandGKEHubMembershipLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - endpointProp, err := expandGKEHubMembershipEndpoint(d.Get("endpoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("endpoint"); !isEmptyValue(reflect.ValueOf(endpointProp)) && (ok || !reflect.DeepEqual(v, endpointProp)) { - obj["endpoint"] = endpointProp - } - authorityProp, err := expandGKEHubMembershipAuthority(d.Get("authority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authority"); !isEmptyValue(reflect.ValueOf(authorityProp)) && (ok || !reflect.DeepEqual(v, authorityProp)) { - obj["authority"] = authorityProp - } - - url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships?membershipId={{membership_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Membership: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Membership: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Membership: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = GKEHubOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Membership", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Membership: %s", err) - } - - if err := d.Set("name", flattenGKEHubMembershipName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Membership %q: %#v", d.Id(), res) - - return resourceGKEHubMembershipRead(d, meta) -} - -func resourceGKEHubMembershipRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Membership: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("GKEHubMembership %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Membership: %s", err) - } - - if err := d.Set("name", flattenGKEHubMembershipName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Membership: %s", err) - } - if err := d.Set("labels", flattenGKEHubMembershipLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Membership: %s", err) - } - if err := d.Set("endpoint", flattenGKEHubMembershipEndpoint(res["endpoint"], d, config)); err != nil { - return fmt.Errorf("Error reading Membership: %s", err) - } - if err := d.Set("authority", flattenGKEHubMembershipAuthority(res["authority"], d, config)); err != nil { - return fmt.Errorf("Error reading Membership: %s", err) - } - - return nil -} - -func resourceGKEHubMembershipUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Membership: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandGKEHubMembershipLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - authorityProp, err := expandGKEHubMembershipAuthority(d.Get("authority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authority"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorityProp)) { - obj["authority"] = authorityProp - } - - url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Membership %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("authority") { - updateMask = append(updateMask, "authority") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Membership %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Membership %q: %#v", d.Id(), res) - } - - err = GKEHubOperationWaitTime( - config, res, project, "Updating Membership", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGKEHubMembershipRead(d, meta) -} - -func resourceGKEHubMembershipDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Membership: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Membership %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Membership") - } - - err = GKEHubOperationWaitTime( - config, res, project, "Deleting Membership", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Membership %q: %#v", d.Id(), res) - return nil -} - -func resourceGKEHubMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/memberships/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenGKEHubMembershipName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEHubMembershipLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEHubMembershipEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["gke_cluster"] = - flattenGKEHubMembershipEndpointGkeCluster(original["gkeCluster"], d, config) - return []interface{}{transformed} -} -func flattenGKEHubMembershipEndpointGkeCluster(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resource_link"] = - flattenGKEHubMembershipEndpointGkeClusterResourceLink(original["resourceLink"], d, config) - return []interface{}{transformed} -} -func flattenGKEHubMembershipEndpointGkeClusterResourceLink(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEHubMembershipAuthority(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["issuer"] = - flattenGKEHubMembershipAuthorityIssuer(original["issuer"], d, config) - return []interface{}{transformed} -} -func flattenGKEHubMembershipAuthorityIssuer(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGKEHubMembershipLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGKEHubMembershipEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGkeCluster, err := expandGKEHubMembershipEndpointGkeCluster(original["gke_cluster"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGkeCluster); val.IsValid() && !isEmptyValue(val) { - transformed["gkeCluster"] = transformedGkeCluster - } - - return transformed, nil -} - -func expandGKEHubMembershipEndpointGkeCluster(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceLink, err := expandGKEHubMembershipEndpointGkeClusterResourceLink(original["resource_link"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResourceLink); val.IsValid() && !isEmptyValue(val) { - transformed["resourceLink"] = transformedResourceLink - } - - return transformed, nil -} - -func expandGKEHubMembershipEndpointGkeClusterResourceLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if strings.HasPrefix(v.(string), "//") { - return v, nil - } else { - v = "//container.googleapis.com/" + v.(string) - return v, nil - } -} - -func expandGKEHubMembershipAuthority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIssuer, err := expandGKEHubMembershipAuthorityIssuer(original["issuer"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIssuer); val.IsValid() && !isEmptyValue(val) { - transformed["issuer"] = transformedIssuer - } - - return transformed, nil -} - -func expandGKEHubMembershipAuthorityIssuer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_folder_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_folder_organization_policy.go deleted file mode 100644 index 3844076fdf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_folder_organization_policy.go +++ /dev/null @@ -1,184 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -func ResourceGoogleFolderOrganizationPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceGoogleFolderOrganizationPolicyCreate, - Read: resourceGoogleFolderOrganizationPolicyRead, - Update: resourceGoogleFolderOrganizationPolicyUpdate, - Delete: resourceGoogleFolderOrganizationPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceFolderOrgPolicyImporter, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(4 * time.Minute), - Update: schema.DefaultTimeout(4 * time.Minute), - Read: schema.DefaultTimeout(4 * time.Minute), - Delete: schema.DefaultTimeout(4 * time.Minute), - }, - - Schema: mergeSchemas( - schemaOrganizationPolicy, - map[string]*schema.Schema{ - "folder": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the folder to set the policy for. Its format is folders/{folder_id}.`, - }, - }, - ), - UseJSONNumber: true, - } -} - -func resourceFolderOrgPolicyImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "folders/(?P[^/]+)/constraints/(?P[^/]+)", - "folders/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)"}, - d, config); err != nil { - return nil, err - } - - if d.Get("folder") == "" || d.Get("constraint") == "" { - return nil, fmt.Errorf("unable to parse folder or constraint. Check import formats") - } - - if err := d.Set("folder", "folders/"+d.Get("folder").(string)); err != nil { - return nil, fmt.Errorf("Error setting folder: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func resourceGoogleFolderOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) - - if isOrganizationPolicyUnset(d) { - return resourceGoogleFolderOrganizationPolicyDelete(d, meta) - } - - if err := setFolderOrganizationPolicy(d, meta); err != nil { - return err - } - - return resourceGoogleFolderOrganizationPolicyRead(d, meta) -} - -func resourceGoogleFolderOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - folder := canonicalFolderId(d.Get("folder").(string)) - - var policy *cloudresourcemanager.OrgPolicy - err = RetryTimeDuration(func() (getErr error) { - policy, getErr = config.NewResourceManagerClient(userAgent).Folders.GetOrgPolicy(folder, &cloudresourcemanager.GetOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return getErr - }, d.Timeout(schema.TimeoutRead)) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", folder)) - } - - if err := d.Set("constraint", policy.Constraint); err != nil { - return fmt.Errorf("Error setting constraint: %s", err) - } - if err := d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)); err != nil { - return fmt.Errorf("Error setting boolean_policy: %s", err) - } - if err := d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)); err != nil { - return fmt.Errorf("Error setting list_policy: %s", err) - } - if err := d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)); err != nil { - return fmt.Errorf("Error setting restore_policy: %s", err) - } - if err := d.Set("version", policy.Version); err != nil { - return fmt.Errorf("Error setting version: %s", err) - } - if err := d.Set("etag", policy.Etag); err != nil { - return fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("update_time", policy.UpdateTime); err != nil { - return fmt.Errorf("Error setting update_time: %s", err) - } - - return nil -} - -func resourceGoogleFolderOrganizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - if isOrganizationPolicyUnset(d) { - return resourceGoogleFolderOrganizationPolicyDelete(d, meta) - } - - if err := setFolderOrganizationPolicy(d, meta); err != nil { - return err - } - - return resourceGoogleFolderOrganizationPolicyRead(d, meta) -} - -func resourceGoogleFolderOrganizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - folder := canonicalFolderId(d.Get("folder").(string)) - - return RetryTimeDuration(func() (delErr error) { - _, delErr = config.NewResourceManagerClient(userAgent).Folders.ClearOrgPolicy(folder, &cloudresourcemanager.ClearOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return delErr - }, d.Timeout(schema.TimeoutDelete)) -} - -func setFolderOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - folder := canonicalFolderId(d.Get("folder").(string)) - - listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) - if err != nil { - return err - } - - restoreDefault, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) - if err != nil { - return err - } - - return RetryTimeDuration(func() (setErr error) { - _, setErr = config.NewResourceManagerClient(userAgent).Folders.SetOrgPolicy(folder, &cloudresourcemanager.SetOrgPolicyRequest{ - Policy: &cloudresourcemanager.OrgPolicy{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), - ListPolicy: listPolicy, - RestoreDefault: restoreDefault, - Version: int64(d.Get("version").(int)), - Etag: d.Get("etag").(string), - }, - }).Do() - return setErr - }, d.Timeout(schema.TimeoutCreate)) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project.go deleted file mode 100644 index 87f5da2e25..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project.go +++ /dev/null @@ -1,742 +0,0 @@ -package google - -import ( - "context" - "fmt" - "log" - "net/http" - "regexp" - "strconv" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudbilling/v1" - "google.golang.org/api/cloudresourcemanager/v1" - "google.golang.org/api/googleapi" - "google.golang.org/api/serviceusage/v1" -) - -type ServicesCall interface { - Header() http.Header - Do(opts ...googleapi.CallOption) (*serviceusage.Operation, error) -} - -// ResourceGoogleProject returns a *schema.Resource that allows a customer -// to declare a Google Cloud Project resource. -func ResourceGoogleProject() *schema.Resource { - return &schema.Resource{ - SchemaVersion: 1, - - Create: resourceGoogleProjectCreate, - Read: resourceGoogleProjectRead, - Update: resourceGoogleProjectUpdate, - Delete: resourceGoogleProjectDelete, - - Importer: &schema.ResourceImporter{ - State: resourceProjectImportState, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Read: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - MigrateState: resourceGoogleProjectMigrateState, - - Schema: map[string]*schema.Schema{ - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateProjectID(), - Description: `The project ID. Changing this forces a new project to be created.`, - }, - "skip_delete": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - Description: `If true, the Terraform resource can be deleted without deleting the Project via the Google API.`, - }, - "auto_create_network": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Create the 'default' network automatically. Default true. If set to false, the default network will be deleted. Note that, for quota purposes, you will still need to have 1 network slot available to create the project successfully, even if you set auto_create_network to false, since the network will exist momentarily.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateProjectName(), - Description: `The display name of the project.`, - }, - "org_id": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"folder_id"}, - Description: `The numeric ID of the organization this project belongs to. Changing this forces a new project to be created. Only one of org_id or folder_id may be specified. If the org_id is specified then the project is created at the top level. Changing this forces the project to be migrated to the newly specified organization.`, - }, - "folder_id": { - Type: schema.TypeString, - Optional: true, - StateFunc: parseFolderId, - ConflictsWith: []string{"org_id"}, - Description: `The numeric ID of the folder this project should be created under. Only one of org_id or folder_id may be specified. If the folder_id is specified, then the project is created under the specified folder. Changing this forces the project to be migrated to the newly specified folder.`, - }, - "number": { - Type: schema.TypeString, - Computed: true, - Description: `The numeric identifier of the project.`, - }, - "billing_account": { - Type: schema.TypeString, - Optional: true, - Description: `The alphanumeric ID of the billing account this project belongs to. The user or service account performing this operation with Terraform must have Billing Account Administrator privileges (roles/billing.admin) in the organization. See Google Cloud Billing API Access Control for more details.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `A set of key/value label pairs to assign to the project.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - if err = resourceGoogleProjectCheckPreRequisites(config, d, userAgent); err != nil { - return fmt.Errorf("failed pre-requisites: %v", err) - } - - var pid string - pid = d.Get("project_id").(string) - - log.Printf("[DEBUG]: Creating new project %q", pid) - project := &cloudresourcemanager.Project{ - ProjectId: pid, - Name: d.Get("name").(string), - } - - if err = getParentResourceId(d, project); err != nil { - return err - } - - if _, ok := d.GetOk("labels"); ok { - project.Labels = expandLabels(d) - } - - var op *cloudresourcemanager.Operation - err = RetryTimeDuration(func() (reqErr error) { - op, reqErr = config.NewResourceManagerClient(userAgent).Projects.Create(project).Do() - return reqErr - }, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("error creating project %s (%s): %s. "+ - "If you received a 403 error, make sure you have the"+ - " `roles/resourcemanager.projectCreator` permission", - project.ProjectId, project.Name, err) - } - - d.SetId(fmt.Sprintf("projects/%s", pid)) - - // Wait for the operation to complete - opAsMap, err := ConvertToMap(op) - if err != nil { - return err - } - - waitErr := ResourceManagerOperationWaitTime(config, opAsMap, "creating folder", userAgent, d.Timeout(schema.TimeoutCreate)) - if waitErr != nil { - // The resource wasn't actually created - d.SetId("") - return waitErr - } - - // Set the billing account - if _, ok := d.GetOk("billing_account"); ok { - err = updateProjectBillingAccount(d, config, userAgent) - if err != nil { - return err - } - } - - // Sleep for 10s, letting the billing account settle before other resources - // try to use this project. - time.Sleep(10 * time.Second) - - err = resourceGoogleProjectRead(d, meta) - if err != nil { - return err - } - - // There's no such thing as "don't auto-create network", only "delete the network - // post-creation" - but that's what it's called in the UI and let's not confuse - // people if we don't have to. The GCP Console is doing the same thing - creating - // a network and deleting it in the background. - if !d.Get("auto_create_network").(bool) { - // The compute API has to be enabled before we can delete a network. - - billingProject := project.ProjectId - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if err = EnableServiceUsageProjectServices([]string{"compute.googleapis.com"}, project.ProjectId, billingProject, userAgent, config, d.Timeout(schema.TimeoutCreate)); err != nil { - return errwrap.Wrapf("Error enabling the Compute Engine API required to delete the default network: {{err}} ", err) - } - - if err = forceDeleteComputeNetwork(d, config, project.ProjectId, "default"); err != nil { - if IsGoogleApiErrorWithCode(err, 404) { - log.Printf("[DEBUG] Default network not found for project %q, no need to delete it", project.ProjectId) - } else { - return errwrap.Wrapf(fmt.Sprintf("Error deleting default network in project %s: {{err}}", project.ProjectId), err) - } - } - } - return nil -} - -func resourceGoogleProjectCheckPreRequisites(config *Config, d *schema.ResourceData, userAgent string) error { - ib, ok := d.GetOk("billing_account") - if !ok { - return nil - } - ba := "billingAccounts/" + ib.(string) - const perm = "billing.resourceAssociations.create" - req := &cloudbilling.TestIamPermissionsRequest{ - Permissions: []string{perm}, - } - resp, err := config.NewBillingClient(userAgent).BillingAccounts.TestIamPermissions(ba, req).Do() - if err != nil { - return fmt.Errorf("failed to check permissions on billing account %q: %v", ba, err) - } - if !stringInSlice(resp.Permissions, perm) { - return fmt.Errorf("missing permission on %q: %v", ba, perm) - } - if !d.Get("auto_create_network").(bool) { - call := config.NewServiceUsageClient(userAgent).Services.Get("projects/00000000000/services/serviceusage.googleapis.com") - if config.UserProjectOverride { - if billingProject, err := getBillingProject(d, config); err == nil { - call.Header().Add("X-Goog-User-Project", billingProject) - } - } - _, err := call.Do() - switch { - // We are querying a dummy project since the call is already coming from the quota project. - // If the API is enabled we get a not found message or accessNotConfigured if API is not enabled. - case err.Error() == "googleapi: Error 403: Project '00000000000' not found or permission denied., forbidden": - return nil - case strings.Contains(err.Error(), "accessNotConfigured"): - return fmt.Errorf("API serviceusage not enabled.\nFound error: %v", err) - } - } - return nil -} - -func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - parts := strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - - p, err := readGoogleProject(d, config, userAgent) - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 403 && strings.Contains(gerr.Message, "caller does not have permission") { - return fmt.Errorf("the user does not have permission to access Project %q or it may not exist", pid) - } - return handleNotFoundError(err, d, fmt.Sprintf("Project %q", pid)) - } - - // If the project has been deleted from outside Terraform, remove it from state file. - if p.LifecycleState != "ACTIVE" { - log.Printf("[WARN] Removing project '%s' because its state is '%s' (requires 'ACTIVE').", pid, p.LifecycleState) - d.SetId("") - return nil - } - - if err := d.Set("project_id", pid); err != nil { - return fmt.Errorf("Error setting project_id: %s", err) - } - if err := d.Set("number", strconv.FormatInt(p.ProjectNumber, 10)); err != nil { - return fmt.Errorf("Error setting number: %s", err) - } - if err := d.Set("name", p.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("labels", p.Labels); err != nil { - return fmt.Errorf("Error setting labels: %s", err) - } - - if p.Parent != nil { - switch p.Parent.Type { - case "organization": - if err := d.Set("org_id", p.Parent.Id); err != nil { - return fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("folder_id", ""); err != nil { - return fmt.Errorf("Error setting folder_id: %s", err) - } - case "folder": - if err := d.Set("folder_id", p.Parent.Id); err != nil { - return fmt.Errorf("Error setting folder_id: %s", err) - } - if err := d.Set("org_id", ""); err != nil { - return fmt.Errorf("Error setting org_id: %s", err) - } - } - } - - var ba *cloudbilling.ProjectBillingInfo - err = RetryTimeDuration(func() (reqErr error) { - ba, reqErr = config.NewBillingClient(userAgent).Projects.GetBillingInfo(PrefixedProject(pid)).Do() - return reqErr - }, d.Timeout(schema.TimeoutRead)) - // Read the billing account - if err != nil && !isApiNotEnabledError(err) { - return fmt.Errorf("Error reading billing account for project %q: %v", PrefixedProject(pid), err) - } else if isApiNotEnabledError(err) { - log.Printf("[WARN] Billing info API not enabled, please enable it to read billing info about project %q: %s", pid, err.Error()) - } else if ba.BillingAccountName != "" { - // BillingAccountName is contains the resource name of the billing account - // associated with the project, if any. For example, - // `billingAccounts/012345-567890-ABCDEF`. We care about the ID and not - // the `billingAccounts/` prefix, so we need to remove that. If the - // prefix ever changes, we'll validate to make sure it's something we - // recognize. - _ba := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") - if ba.BillingAccountName == _ba { - return fmt.Errorf("Error parsing billing account for project %q. Expected value to begin with 'billingAccounts/' but got %s", PrefixedProject(pid), ba.BillingAccountName) - } - if err := d.Set("billing_account", _ba); err != nil { - return fmt.Errorf("Error setting billing_account: %s", err) - } - } - - return nil -} - -func PrefixedProject(pid string) string { - return "projects/" + pid -} - -func getParentResourceId(d *schema.ResourceData, p *cloudresourcemanager.Project) error { - orgId := d.Get("org_id").(string) - folderId := d.Get("folder_id").(string) - - if orgId != "" && folderId != "" { - return fmt.Errorf("'org_id' and 'folder_id' cannot be both set.") - } - - if orgId != "" { - p.Parent = &cloudresourcemanager.ResourceId{ - Id: orgId, - Type: "organization", - } - } - - if folderId != "" { - p.Parent = &cloudresourcemanager.ResourceId{ - Id: parseFolderId(folderId), - Type: "folder", - } - } - - return nil -} - -func parseFolderId(v interface{}) string { - folderId := v.(string) - if strings.HasPrefix(folderId, "folders/") { - return folderId[8:] - } - return folderId -} - -func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - parts := strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - project_name := d.Get("name").(string) - - // Read the project - // we need the project even though refresh has already been called - // because the API doesn't support patch, so we need the actual object - p, err := readGoogleProject(d, config, userAgent) - if err != nil { - if IsGoogleApiErrorWithCode(err, 404) { - return fmt.Errorf("Project %q does not exist.", pid) - } - return fmt.Errorf("Error checking project %q: %s", pid, err) - } - - d.Partial(true) - - // Project display name has changed - if ok := d.HasChange("name"); ok { - p.Name = project_name - // Do update on project - if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { - return err - } - } - - // Project parent has changed - if d.HasChange("org_id") || d.HasChange("folder_id") { - if err := getParentResourceId(d, p); err != nil { - return err - } - - // Do update on project - if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { - return err - } - } - - // Billing account has changed - if ok := d.HasChange("billing_account"); ok { - err = updateProjectBillingAccount(d, config, userAgent) - if err != nil { - return err - } - } - - // Project Labels have changed - if ok := d.HasChange("labels"); ok { - p.Labels = expandLabels(d) - - // Do Update on project - if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { - return err - } - } - - d.Partial(false) - return resourceGoogleProjectRead(d, meta) -} - -func updateProject(config *Config, d *schema.ResourceData, projectName, userAgent string, desiredProject *cloudresourcemanager.Project) (*cloudresourcemanager.Project, error) { - var newProj *cloudresourcemanager.Project - if err := RetryTimeDuration(func() (updateErr error) { - newProj, updateErr = config.NewResourceManagerClient(userAgent).Projects.Update(desiredProject.ProjectId, desiredProject).Do() - return updateErr - }, d.Timeout(schema.TimeoutUpdate)); err != nil { - return nil, fmt.Errorf("Error updating project %q: %s", projectName, err) - } - return newProj, nil -} - -func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - // Only delete projects if skip_delete isn't set - if !d.Get("skip_delete").(bool) { - parts := strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - if err := RetryTimeDuration(func() error { - _, delErr := config.NewResourceManagerClient(userAgent).Projects.Delete(pid).Do() - return delErr - }, d.Timeout(schema.TimeoutDelete)); err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project %s", pid)) - } - } - d.SetId("") - return nil -} - -func resourceProjectImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - // Prevent importing via project number, this will cause issues later - matched, err := regexp.MatchString("^\\d+$", pid) - if err != nil { - return nil, fmt.Errorf("Error matching project %q: %s", pid, err) - } - - if matched { - return nil, fmt.Errorf("Error importing project %q, please use project_id", pid) - } - - // Ensure the id format includes projects/ - d.SetId(fmt.Sprintf("projects/%s", pid)) - - // Explicitly set to default as a workaround for `ImportStateVerify` tests, and so that users - // don't see a diff immediately after import. - if err := d.Set("auto_create_network", true); err != nil { - return nil, fmt.Errorf("Error setting auto_create_network: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -// Delete a compute network along with the firewall rules inside it. -func forceDeleteComputeNetwork(d *schema.ResourceData, config *Config, projectId, networkName string) error { - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - // Read the network from the API so we can get the correct self link format. We can't construct it from the - // base path because it might not line up exactly (compute.googleapis.com vs www.googleapis.com) - net, err := config.NewComputeClient(userAgent).Networks.Get(projectId, networkName).Do() - if err != nil { - return err - } - - token := "" - for paginate := true; paginate; { - filter := fmt.Sprintf("network eq %s", net.SelfLink) - resp, err := config.NewComputeClient(userAgent).Firewalls.List(projectId).Filter(filter).Do() - if err != nil { - return errwrap.Wrapf("Error listing firewall rules in proj: {{err}}", err) - } - - log.Printf("[DEBUG] Found %d firewall rules in %q network", len(resp.Items), networkName) - - for _, firewall := range resp.Items { - op, err := config.NewComputeClient(userAgent).Firewalls.Delete(projectId, firewall.Name).Do() - if err != nil { - return errwrap.Wrapf("Error deleting firewall: {{err}}", err) - } - err = ComputeOperationWaitTime(config, op, projectId, "Deleting Firewall", userAgent, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return err - } - } - - token = resp.NextPageToken - paginate = token != "" - } - - return deleteComputeNetwork(projectId, networkName, userAgent, config) -} - -func updateProjectBillingAccount(d *schema.ResourceData, config *Config, userAgent string) error { - parts := strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - name := d.Get("billing_account").(string) - ba := &cloudbilling.ProjectBillingInfo{} - // If we're unlinking an existing billing account, an empty request does that, not an empty-string billing account. - if name != "" { - ba.BillingAccountName = "billingAccounts/" + name - } - updateBillingInfoFunc := func() error { - _, err := config.NewBillingClient(userAgent).Projects.UpdateBillingInfo(PrefixedProject(pid), ba).Do() - return err - } - err := RetryTimeDuration(updateBillingInfoFunc, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - if err := d.Set("billing_account", ""); err != nil { - return fmt.Errorf("Error setting billing_account: %s", err) - } - if _err, ok := err.(*googleapi.Error); ok { - return fmt.Errorf("Error setting billing account %q for project %q: %v", name, PrefixedProject(pid), _err) - } - return fmt.Errorf("Error setting billing account %q for project %q: %v", name, PrefixedProject(pid), err) - } - for retries := 0; retries < 3; retries++ { - var ba *cloudbilling.ProjectBillingInfo - err = RetryTimeDuration(func() (reqErr error) { - ba, reqErr = config.NewBillingClient(userAgent).Projects.GetBillingInfo(PrefixedProject(pid)).Do() - return reqErr - }, d.Timeout(schema.TimeoutRead)) - if err != nil { - return fmt.Errorf("Error getting billing info for project %q: %v", PrefixedProject(pid), err) - } - baName := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") - if baName == name { - return nil - } - time.Sleep(3 * time.Second) - } - return fmt.Errorf("Timed out waiting for billing account to return correct value. Waiting for %s, got %s.", - name, strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/")) -} - -func deleteComputeNetwork(project, network, userAgent string, config *Config) error { - op, err := config.NewComputeClient(userAgent).Networks.Delete( - project, network).Do() - if err != nil { - return errwrap.Wrapf("Error deleting network: {{err}}", err) - } - - err = ComputeOperationWaitTime(config, op, project, "Deleting Network", userAgent, 10*time.Minute) - if err != nil { - return err - } - return nil -} - -func readGoogleProject(d *schema.ResourceData, config *Config, userAgent string) (*cloudresourcemanager.Project, error) { - var p *cloudresourcemanager.Project - // Read the project - parts := strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - err := RetryTimeDuration(func() (reqErr error) { - p, reqErr = config.NewResourceManagerClient(userAgent).Projects.Get(pid).Do() - return reqErr - }, d.Timeout(schema.TimeoutRead)) - return p, err -} - -// Enables services. WARNING: Use BatchRequestEnableServices for better batching if possible. -func EnableServiceUsageProjectServices(services []string, project, billingProject, userAgent string, config *Config, timeout time.Duration) error { - // ServiceUsage does not allow more than 20 services to be enabled per - // batchEnable API call. See - // https://cloud.google.com/service-usage/docs/reference/rest/v1/services/batchEnable - for i := 0; i < len(services); i += maxServiceUsageBatchSize { - j := i + maxServiceUsageBatchSize - if j > len(services) { - j = len(services) - } - nextBatch := services[i:j] - if len(nextBatch) == 0 { - // All batches finished, return. - return nil - } - - if err := doEnableServicesRequest(nextBatch, project, billingProject, userAgent, config, timeout); err != nil { - return err - } - log.Printf("[DEBUG] Finished enabling next batch of %d project services: %+v", len(nextBatch), nextBatch) - } - - log.Printf("[DEBUG] Verifying that all services are enabled") - return waitForServiceUsageEnabledServices(services, project, billingProject, userAgent, config, timeout) -} - -func doEnableServicesRequest(services []string, project, billingProject, userAgent string, config *Config, timeout time.Duration) error { - var op *serviceusage.Operation - var call ServicesCall - err := RetryTimeDuration(func() error { - var rerr error - if len(services) == 1 { - // BatchEnable returns an error for a single item, so just enable - // using service endpoint. - name := fmt.Sprintf("projects/%s/services/%s", project, services[0]) - req := &serviceusage.EnableServiceRequest{} - call = config.NewServiceUsageClient(userAgent).Services.Enable(name, req) - } else { - // Batch enable for multiple services. - name := fmt.Sprintf("projects/%s", project) - req := &serviceusage.BatchEnableServicesRequest{ServiceIds: services} - call = config.NewServiceUsageClient(userAgent).Services.BatchEnable(name, req) - } - if config.UserProjectOverride && billingProject != "" { - call.Header().Add("X-Goog-User-Project", billingProject) - } - op, rerr = call.Do() - return handleServiceUsageRetryableError(rerr) - }, - timeout, - serviceUsageServiceBeingActivated, - ) - if err != nil { - return errwrap.Wrapf("failed to send enable services request: {{err}}", err) - } - // Poll for the API to return - waitErr := serviceUsageOperationWait(config, op, billingProject, fmt.Sprintf("Enable Project %q Services: %+v", project, services), userAgent, timeout) - if waitErr != nil { - return waitErr - } - return nil -} - -// Retrieve a project's services from the API -// if a service has been renamed, this function will list both the old and new -// forms of the service. LIST responses are expected to return only the old or -// new form, but we'll always return both. -func ListCurrentlyEnabledServices(project, billingProject, userAgent string, config *Config, timeout time.Duration) (map[string]struct{}, error) { - log.Printf("[DEBUG] Listing enabled services for project %s", project) - apiServices := make(map[string]struct{}) - err := RetryTimeDuration(func() error { - ctx := context.Background() - call := config.NewServiceUsageClient(userAgent).Services.List(fmt.Sprintf("projects/%s", project)) - if config.UserProjectOverride && billingProject != "" { - call.Header().Add("X-Goog-User-Project", billingProject) - } - return call.Fields("services/name,nextPageToken").Filter("state:ENABLED"). - Pages(ctx, func(r *serviceusage.ListServicesResponse) error { - for _, v := range r.Services { - // services are returned as "projects/{{project}}/services/{{name}}" - name := GetResourceNameFromSelfLink(v.Name) - - // if name not in ignoredProjectServicesSet - if _, ok := ignoredProjectServicesSet[name]; !ok { - apiServices[name] = struct{}{} - - // if a service has been renamed, set both. We'll deal - // with setting the right values later. - if v, ok := renamedServicesByOldAndNewServiceNames[name]; ok { - log.Printf("[DEBUG] Adding service alias for %s to enabled services: %s", name, v) - apiServices[v] = struct{}{} - } - } - } - return nil - }) - }, timeout) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Failed to list enabled services for project %s: {{err}}", project), err) - } - return apiServices, nil -} - -// waitForServiceUsageEnabledServices doesn't resend enable requests - it just -// waits for service enablement status to propagate. Essentially, it waits until -// all services show up as enabled when listing services on the project. -func waitForServiceUsageEnabledServices(services []string, project, billingProject, userAgent string, config *Config, timeout time.Duration) error { - missing := make([]string, 0, len(services)) - delay := time.Duration(0) - interval := time.Second - err := RetryTimeDuration(func() error { - // Get the list of services that are enabled on the project - enabledServices, err := ListCurrentlyEnabledServices(project, billingProject, userAgent, config, timeout) - if err != nil { - return err - } - - missing := make([]string, 0, len(services)) - for _, s := range services { - if _, ok := enabledServices[s]; !ok { - missing = append(missing, s) - } - } - if len(missing) > 0 { - log.Printf("[DEBUG] waiting %v before reading project %s services...", delay, project) - time.Sleep(delay) - delay += interval - interval += delay - - // Spoof a googleapi Error so retryTime will try again - return &googleapi.Error{ - Code: 503, - Message: fmt.Sprintf("The service(s) %q are still being enabled for project %s. This isn't a real API error, this is just eventual consistency.", missing, project), - } - } - return nil - }, timeout) - if err != nil { - return errwrap.Wrap(err, fmt.Errorf("failed to enable some service(s) %q for project %s", missing, project)) - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_migrate.go deleted file mode 100644 index 12a187d7f9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_migrate.go +++ /dev/null @@ -1,63 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "google.golang.org/api/cloudresourcemanager/v1" -) - -func resourceGoogleProjectMigrateState(v int, s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - if s.Empty() { - log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return s, nil - } - - switch v { - case 0: - log.Println("[INFO] Found Google Project State v0; migrating to v1") - s, err := migrateGoogleProjectStateV0toV1(s, meta.(*Config)) - if err != nil { - return s, err - } - return s, nil - default: - return s, fmt.Errorf("Unexpected schema version: %d", v) - } -} - -// This migration adjusts google_project resources to include several additional attributes -// required to support project creation/deletion that was added in V1. -func migrateGoogleProjectStateV0toV1(s *terraform.InstanceState, config *Config) (*terraform.InstanceState, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", s.Attributes) - - s.Attributes["skip_delete"] = "true" - s.Attributes["project_id"] = s.ID - - if s.Attributes["policy_data"] != "" { - p, err := getProjectIamPolicy(s.ID, config) - if err != nil { - return s, fmt.Errorf("Could not retrieve project's IAM policy while attempting to migrate state from V0 to V1: %v", err) - } - s.Attributes["policy_etag"] = p.Etag - } - - log.Printf("[DEBUG] Attributes after migration: %#v", s.Attributes) - return s, nil -} - -// Retrieve the existing IAM Policy for a Project -func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) { - p, err := config.NewResourceManagerClient(config.UserAgent).Projects.GetIamPolicy(project, - &cloudresourcemanager.GetIamPolicyRequest{ - Options: &cloudresourcemanager.GetPolicyOptions{ - RequestedPolicyVersion: IamPolicyVersion, - }, - }).Do() - - if err != nil { - return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) - } - return p, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_organization_policy.go deleted file mode 100644 index 816b3e72cc..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_organization_policy.go +++ /dev/null @@ -1,181 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/cloudresourcemanager/v1" -) - -func ResourceGoogleProjectOrganizationPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceGoogleProjectOrganizationPolicyCreate, - Read: resourceGoogleProjectOrganizationPolicyRead, - Update: resourceGoogleProjectOrganizationPolicyUpdate, - Delete: resourceGoogleProjectOrganizationPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceProjectOrgPolicyImporter, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(4 * time.Minute), - Update: schema.DefaultTimeout(4 * time.Minute), - Read: schema.DefaultTimeout(4 * time.Minute), - Delete: schema.DefaultTimeout(4 * time.Minute), - }, - - Schema: mergeSchemas( - schemaOrganizationPolicy, - map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project ID.`, - }, - }, - ), - UseJSONNumber: true, - } -} - -func resourceProjectOrgPolicyImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "projects/(?P[^/]+):constraints/(?P[^/]+)", - "(?P[^/]+):constraints/(?P[^/]+)", - "(?P[^/]+):(?P[^/]+)"}, - d, config); err != nil { - return nil, err - } - - if d.Get("project") == "" || d.Get("constraint") == "" { - return nil, fmt.Errorf("unable to parse project or constraint. Check import formats") - } - - return []*schema.ResourceData{d}, nil -} - -func resourceGoogleProjectOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { - if isOrganizationPolicyUnset(d) { - d.SetId(fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) - return resourceGoogleProjectOrganizationPolicyDelete(d, meta) - } - - if err := setProjectOrganizationPolicy(d, meta); err != nil { - return err - } - - d.SetId(fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) - - return resourceGoogleProjectOrganizationPolicyRead(d, meta) -} - -func resourceGoogleProjectOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - project := PrefixedProject(d.Get("project").(string)) - - var policy *cloudresourcemanager.OrgPolicy - err = RetryTimeDuration(func() (readErr error) { - policy, readErr = config.NewResourceManagerClient(userAgent).Projects.GetOrgPolicy(project, &cloudresourcemanager.GetOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return readErr - }, d.Timeout(schema.TimeoutRead)) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", project)) - } - - if err := d.Set("constraint", policy.Constraint); err != nil { - return fmt.Errorf("Error setting constraint: %s", err) - } - if err := d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)); err != nil { - return fmt.Errorf("Error setting boolean_policy: %s", err) - } - if err := d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)); err != nil { - return fmt.Errorf("Error setting list_policy: %s", err) - } - if err := d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)); err != nil { - return fmt.Errorf("Error setting restore_policy: %s", err) - } - if err := d.Set("version", policy.Version); err != nil { - return fmt.Errorf("Error setting version: %s", err) - } - if err := d.Set("etag", policy.Etag); err != nil { - return fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("update_time", policy.UpdateTime); err != nil { - return fmt.Errorf("Error setting update_time: %s", err) - } - - return nil -} - -func resourceGoogleProjectOrganizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - if isOrganizationPolicyUnset(d) { - return resourceGoogleProjectOrganizationPolicyDelete(d, meta) - } - - if err := setProjectOrganizationPolicy(d, meta); err != nil { - return err - } - - return resourceGoogleProjectOrganizationPolicyRead(d, meta) -} - -func resourceGoogleProjectOrganizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - project := PrefixedProject(d.Get("project").(string)) - - return RetryTimeDuration(func() error { - _, err := config.NewResourceManagerClient(userAgent).Projects.ClearOrgPolicy(project, &cloudresourcemanager.ClearOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return err - }, d.Timeout(schema.TimeoutDelete)) -} - -func setProjectOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project := PrefixedProject(d.Get("project").(string)) - - listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) - if err != nil { - return err - } - - restore_default, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) - if err != nil { - return err - } - - return RetryTimeDuration(func() error { - _, err := config.NewResourceManagerClient(userAgent).Projects.SetOrgPolicy(project, &cloudresourcemanager.SetOrgPolicyRequest{ - Policy: &cloudresourcemanager.OrgPolicy{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), - ListPolicy: listPolicy, - RestoreDefault: restore_default, - Version: int64(d.Get("version").(int)), - Etag: d.Get("etag").(string), - }, - }).Do() - return err - }, d.Timeout(schema.TimeoutCreate)) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_service.go deleted file mode 100644 index d2427a1268..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_service.go +++ /dev/null @@ -1,296 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/googleapi" - "google.golang.org/api/serviceusage/v1" -) - -// These services can only be enabled as a side-effect of enabling other services, -// so don't bother storing them in the config or using them for diffing. -var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"} -var ignoredProjectServicesSet = golangSetFromStringSlice(ignoredProjectServices) - -// Services that can't be user-specified but are otherwise valid. Renamed -// services should be added to this set during major releases. -var bannedProjectServices = []string{"bigquery-json.googleapis.com"} - -// Service Renames -// we expect when a service is renamed: -// - both service names will continue to be able to be set -// - setting one will effectively enable the other as a dependent -// - GET will return whichever service name is requested -// - LIST responses will not contain the old service name -// renames may be reverted, though, so we should canonicalise both ways until -// the old service is fully removed from the provider -// -// We handle service renames in the provider by pretending that we've read both -// the old and new service names from the API if we see either, and only setting -// the one(s) that existed in prior state in config (if any). If neither exists, -// we'll set the old service name in state. -// Additionally, in case of service rename rollbacks or unexpected early -// removals of services, if we fail to create or delete a service that's been -// renamed we'll retry using an alternate name. -// We try creation by the user-specified value followed by the other value. -// We try deletion by the old value followed by the new value. - -// map from old -> new names of services that have been renamed -// these should be removed during major provider versions. comment here with -// "DEPRECATED FOR {{version}} next to entries slated for removal in {{version}} -// upon removal, we should disallow the old name from being used even if it's -// not gone from the underlying API yet -var renamedServices = map[string]string{} - -// renamedServices in reverse (new -> old) -var renamedServicesByNewServiceNames = reverseStringMap(renamedServices) - -// renamedServices expressed as both old -> new and new -> old -var renamedServicesByOldAndNewServiceNames = mergeStringMaps(renamedServices, renamedServicesByNewServiceNames) - -const maxServiceUsageBatchSize = 20 - -func validateProjectServiceService(val interface{}, key string) (warns []string, errs []error) { - bannedServicesFunc := StringNotInSlice(append(ignoredProjectServices, bannedProjectServices...), false) - warns, errs = bannedServicesFunc(val, key) - if len(errs) > 0 { - return - } - - // StringNotInSlice already validates that this is a string - v, _ := val.(string) - if !strings.Contains(v, ".") { - errs = append(errs, fmt.Errorf("expected %s to be a domain like serviceusage.googleapis.com", v)) - } - return -} - -func ResourceGoogleProjectService() *schema.Resource { - return &schema.Resource{ - Create: resourceGoogleProjectServiceCreate, - Read: resourceGoogleProjectServiceRead, - Delete: resourceGoogleProjectServiceDelete, - Update: resourceGoogleProjectServiceUpdate, - - Importer: &schema.ResourceImporter{ - State: resourceGoogleProjectServiceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Read: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateProjectServiceService, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - }, - - "disable_dependent_services": { - Type: schema.TypeBool, - Optional: true, - }, - - "disable_on_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleProjectServiceImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { - parts := strings.Split(d.Id(), "/") - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid google_project_service id format for import, expecting `{project}/{service}`, found %s", d.Id()) - } - if err := d.Set("project", parts[0]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("service", parts[1]); err != nil { - return nil, fmt.Errorf("Error setting service: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func resourceGoogleProjectServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - project = GetResourceNameFromSelfLink(project) - - srv := d.Get("service").(string) - id := project + "/" + srv - - // Check if the service has already been enabled - servicesRaw, err := BatchRequestReadServices(project, d, config) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) - } - servicesList := servicesRaw.(map[string]struct{}) - if _, ok := servicesList[srv]; ok { - log.Printf("[DEBUG] service %s was already found to be enabled in project %s", srv, project) - d.SetId(id) - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("service", srv); err != nil { - return fmt.Errorf("Error setting service: %s", err) - } - return nil - } - - err = BatchRequestEnableService(srv, project, d, config) - if err != nil { - return err - } - d.SetId(id) - return resourceGoogleProjectServiceRead(d, meta) -} - -func resourceGoogleProjectServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - project = GetResourceNameFromSelfLink(project) - - // Verify project for services still exists - projectGetCall := config.NewResourceManagerClient(userAgent).Projects.Get(project) - if config.UserProjectOverride { - billingProject := project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - projectGetCall.Header().Add("X-Goog-User-Project", billingProject) - } - p, err := projectGetCall.Do() - - if err == nil && p.LifecycleState == "DELETE_REQUESTED" { - // Construct a 404 error for handleNotFoundError - err = &googleapi.Error{ - Code: 404, - Message: "Project deletion was requested", - } - } - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) - } - - servicesRaw, err := BatchRequestReadServices(project, d, config) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) - } - servicesList := servicesRaw.(map[string]struct{}) - - srv := d.Get("service").(string) - if _, ok := servicesList[srv]; ok { - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("service", srv); err != nil { - return fmt.Errorf("Error setting service: %s", err) - } - return nil - } - - log.Printf("[DEBUG] service %s not in enabled services for project %s, removing from state", srv, project) - d.SetId("") - return nil -} - -func resourceGoogleProjectServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if disable := d.Get("disable_on_destroy"); !(disable.(bool)) { - log.Printf("[WARN] Project service %q disable_on_destroy is false, skip disabling service", d.Id()) - d.SetId("") - return nil - } - - project, err := getProject(d, config) - if err != nil { - return err - } - project = GetResourceNameFromSelfLink(project) - - service := d.Get("service").(string) - disableDependencies := d.Get("disable_dependent_services").(bool) - if err = disableServiceUsageProjectService(service, project, d, config, disableDependencies); err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) - } - - d.SetId("") - return nil -} - -func resourceGoogleProjectServiceUpdate(d *schema.ResourceData, meta interface{}) error { - // This update method is no-op because the only updatable fields - // are state/config-only, i.e. they aren't sent in requests to the API. - return nil -} - -// Disables a project service. -func disableServiceUsageProjectService(service, project string, d *schema.ResourceData, config *Config, disableDependentServices bool) error { - err := RetryTimeDuration(func() error { - billingProject := project - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - name := fmt.Sprintf("projects/%s/services/%s", project, service) - servicesDisableCall := config.NewServiceUsageClient(userAgent).Services.Disable(name, &serviceusage.DisableServiceRequest{ - DisableDependentServices: disableDependentServices, - }) - if config.UserProjectOverride { - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - servicesDisableCall.Header().Add("X-Goog-User-Project", billingProject) - } - sop, err := servicesDisableCall.Do() - if err != nil { - return err - } - // Wait for the operation to complete - waitErr := serviceUsageOperationWait(config, sop, billingProject, "api to disable", userAgent, d.Timeout(schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - return nil - }, d.Timeout(schema.TimeoutDelete), serviceUsageServiceBeingActivated) - if err != nil { - return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, err) - } - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_consent_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_consent_store.go deleted file mode 100644 index 1155762ae8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_consent_store.go +++ /dev/null @@ -1,337 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceHealthcareConsentStore() *schema.Resource { - return &schema.Resource{ - Create: resourceHealthcareConsentStoreCreate, - Read: resourceHealthcareConsentStoreRead, - Update: resourceHealthcareConsentStoreUpdate, - Delete: resourceHealthcareConsentStoreDelete, - - Importer: &schema.ResourceImporter{ - State: resourceHealthcareConsentStoreImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the dataset addressed by this request. Must be in the format -'projects/{project}/locations/{location}/datasets/{dataset}'`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of this ConsentStore, for example: -"consent1"`, - }, - "default_consent_ttl": { - Type: schema.TypeString, - Optional: true, - Description: `Default time to live for consents in this store. Must be at least 24 hours. Updating this field will not affect the expiration time of existing consents. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "enable_consent_create_on_update": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, [consents.patch] [google.cloud.healthcare.v1.consent.UpdateConsent] creates the consent if it does not already exist.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-supplied key-value pairs used to organize Consent stores. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must -conform to the following PCRE regular expression: '[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}' - -Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 -bytes, and must conform to the following PCRE regular expression: '[\p{Ll}\p{Lo}\p{N}_-]{0,63}' - -No more than 64 labels can be associated with a given store. - -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareConsentStoreCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - defaultConsentTtlProp, err := expandHealthcareConsentStoreDefaultConsentTtl(d.Get("default_consent_ttl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_consent_ttl"); !isEmptyValue(reflect.ValueOf(defaultConsentTtlProp)) && (ok || !reflect.DeepEqual(v, defaultConsentTtlProp)) { - obj["defaultConsentTtl"] = defaultConsentTtlProp - } - enableConsentCreateOnUpdateProp, err := expandHealthcareConsentStoreEnableConsentCreateOnUpdate(d.Get("enable_consent_create_on_update"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_consent_create_on_update"); !isEmptyValue(reflect.ValueOf(enableConsentCreateOnUpdateProp)) && (ok || !reflect.DeepEqual(v, enableConsentCreateOnUpdateProp)) { - obj["enableConsentCreateOnUpdate"] = enableConsentCreateOnUpdateProp - } - labelsProp, err := expandHealthcareConsentStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores?consentStoreId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ConsentStore: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ConsentStore: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{dataset}}/consentStores/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating ConsentStore %q: %#v", d.Id(), res) - - return resourceHealthcareConsentStoreRead(d, meta) -} - -func resourceHealthcareConsentStoreRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("HealthcareConsentStore %q", d.Id())) - } - - if err := d.Set("default_consent_ttl", flattenHealthcareConsentStoreDefaultConsentTtl(res["defaultConsentTtl"], d, config)); err != nil { - return fmt.Errorf("Error reading ConsentStore: %s", err) - } - if err := d.Set("enable_consent_create_on_update", flattenHealthcareConsentStoreEnableConsentCreateOnUpdate(res["enableConsentCreateOnUpdate"], d, config)); err != nil { - return fmt.Errorf("Error reading ConsentStore: %s", err) - } - if err := d.Set("labels", flattenHealthcareConsentStoreLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading ConsentStore: %s", err) - } - - return nil -} - -func resourceHealthcareConsentStoreUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - defaultConsentTtlProp, err := expandHealthcareConsentStoreDefaultConsentTtl(d.Get("default_consent_ttl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_consent_ttl"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultConsentTtlProp)) { - obj["defaultConsentTtl"] = defaultConsentTtlProp - } - enableConsentCreateOnUpdateProp, err := expandHealthcareConsentStoreEnableConsentCreateOnUpdate(d.Get("enable_consent_create_on_update"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_consent_create_on_update"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableConsentCreateOnUpdateProp)) { - obj["enableConsentCreateOnUpdate"] = enableConsentCreateOnUpdateProp - } - labelsProp, err := expandHealthcareConsentStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ConsentStore %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("default_consent_ttl") { - updateMask = append(updateMask, "defaultConsentTtl") - } - - if d.HasChange("enable_consent_create_on_update") { - updateMask = append(updateMask, "enableConsentCreateOnUpdate") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ConsentStore %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ConsentStore %q: %#v", d.Id(), res) - } - - return resourceHealthcareConsentStoreRead(d, meta) -} - -func resourceHealthcareConsentStoreDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ConsentStore %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ConsentStore") - } - - log.Printf("[DEBUG] Finished deleting ConsentStore %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareConsentStoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)/consentStores/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{dataset}}/consentStores/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenHealthcareConsentStoreDefaultConsentTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareConsentStoreEnableConsentCreateOnUpdate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareConsentStoreLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandHealthcareConsentStoreDefaultConsentTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareConsentStoreEnableConsentCreateOnUpdate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareConsentStoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_dataset.go deleted file mode 100644 index d2919db667..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_dataset.go +++ /dev/null @@ -1,335 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceHealthcareDataset() *schema.Resource { - return &schema.Resource{ - Create: resourceHealthcareDatasetCreate, - Read: resourceHealthcareDatasetRead, - Update: resourceHealthcareDatasetUpdate, - Delete: resourceHealthcareDatasetDelete, - - Importer: &schema.ResourceImporter{ - State: resourceHealthcareDatasetImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location for the Dataset.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the Dataset.`, - }, - "time_zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The default timezone used by this dataset. Must be a either a valid IANA time zone name such as -"America/New_York" or empty, which defaults to UTC. This is used for parsing times in resources -(e.g., HL7 messages) where no explicit timezone is specified.`, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The fully qualified name of this dataset`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareDatasetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandHealthcareDatasetName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - timeZoneProp, err := expandHealthcareDatasetTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets?datasetId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Dataset: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), healthcareDatasetNotInitialized) - if err != nil { - return fmt.Errorf("Error creating Dataset: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) - - return resourceHealthcareDatasetRead(d, meta) -} - -func resourceHealthcareDatasetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, healthcareDatasetNotInitialized) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("HealthcareDataset %q", d.Id())) - } - - res, err = resourceHealthcareDatasetDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing HealthcareDataset because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - - if err := d.Set("name", flattenHealthcareDatasetName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("time_zone", flattenHealthcareDatasetTimeZone(res["timeZone"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - - return nil -} - -func resourceHealthcareDatasetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - timeZoneProp, err := expandHealthcareDatasetTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("time_zone") { - updateMask = append(updateMask, "timeZone") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), healthcareDatasetNotInitialized) - - if err != nil { - return fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) - } - - return resourceHealthcareDatasetRead(d, meta) -} - -func resourceHealthcareDatasetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), healthcareDatasetNotInitialized) - if err != nil { - return handleNotFoundError(err, d, "Dataset") - } - - log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareDatasetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/datasets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenHealthcareDatasetName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareDatasetTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandHealthcareDatasetName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareDatasetTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceHealthcareDatasetDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Take the returned long form of the name and use it as `self_link`. - // Then modify the name to be the user specified form. - // We can't just ignore_read on `name` as the linter will - // complain that the returned `res` is never used afterwards. - // Some field needs to be actually set, and we chose `name`. - if err := d.Set("self_link", res["name"].(string)); err != nil { - return nil, fmt.Errorf("Error setting self_link: %s", err) - } - res["name"] = d.Get("name").(string) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_dicom_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_dicom_store.go deleted file mode 100644 index 25403ad570..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_dicom_store.go +++ /dev/null @@ -1,399 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceHealthcareDicomStore() *schema.Resource { - return &schema.Resource{ - Create: resourceHealthcareDicomStoreCreate, - Read: resourceHealthcareDicomStoreRead, - Update: resourceHealthcareDicomStoreUpdate, - Delete: resourceHealthcareDicomStoreDelete, - - Importer: &schema.ResourceImporter{ - State: resourceHealthcareDicomStoreImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the dataset addressed by this request. Must be in the format -'projects/{project}/locations/{location}/datasets/{dataset}'`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the DicomStore. - -** Changing this property may recreate the Dicom store (removing all data) **`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-supplied key-value pairs used to organize DICOM stores. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must -conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - -Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 -bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - -No more than 64 labels can be associated with a given store. - -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "notification_config": { - Type: schema.TypeList, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_topic": { - Type: schema.TypeString, - Required: true, - Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. -PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. -It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message -was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a -project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given -Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, - }, - }, - }, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The fully qualified name of this dataset`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareDicomStoreCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandHealthcareDicomStoreName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - labelsProp, err := expandHealthcareDicomStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigProp, err := expandHealthcareDicomStoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(reflect.ValueOf(notificationConfigProp)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores?dicomStoreId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DicomStore: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DicomStore: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{dataset}}/dicomStores/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DicomStore %q: %#v", d.Id(), res) - - return resourceHealthcareDicomStoreRead(d, meta) -} - -func resourceHealthcareDicomStoreRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("HealthcareDicomStore %q", d.Id())) - } - - res, err = resourceHealthcareDicomStoreDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing HealthcareDicomStore because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenHealthcareDicomStoreName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading DicomStore: %s", err) - } - if err := d.Set("labels", flattenHealthcareDicomStoreLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading DicomStore: %s", err) - } - if err := d.Set("notification_config", flattenHealthcareDicomStoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading DicomStore: %s", err) - } - - return nil -} - -func resourceHealthcareDicomStoreUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - labelsProp, err := expandHealthcareDicomStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigProp, err := expandHealthcareDicomStoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DicomStore %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("notification_config") { - updateMask = append(updateMask, "notificationConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DicomStore %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DicomStore %q: %#v", d.Id(), res) - } - - return resourceHealthcareDicomStoreRead(d, meta) -} - -func resourceHealthcareDicomStoreDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DicomStore %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DicomStore") - } - - log.Printf("[DEBUG] Finished deleting DicomStore %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareDicomStoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - dicomStoreId, err := parseHealthcareDicomStoreId(d.Id(), config) - if err != nil { - return nil, err - } - - if err := d.Set("dataset", dicomStoreId.DatasetId.datasetId()); err != nil { - return nil, fmt.Errorf("Error setting dataset: %s", err) - } - if err := d.Set("name", dicomStoreId.Name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenHealthcareDicomStoreName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareDicomStoreLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareDicomStoreNotificationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_topic"] = - flattenHealthcareDicomStoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) - return []interface{}{transformed} -} -func flattenHealthcareDicomStoreNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandHealthcareDicomStoreName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareDicomStoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandHealthcareDicomStoreNotificationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandHealthcareDicomStoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - return transformed, nil -} - -func expandHealthcareDicomStoreNotificationConfigPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceHealthcareDicomStoreDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Take the returned long form of the name and use it as `self_link`. - // Then modify the name to be the user specified form. - // We can't just ignore_read on `name` as the linter will - // complain that the returned `res` is never used afterwards. - // Some field needs to be actually set, and we chose `name`. - if err := d.Set("self_link", res["name"].(string)); err != nil { - return nil, fmt.Errorf("Error setting self_link: %s", err) - } - res["name"] = d.Get("name").(string) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_fhir_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_fhir_store.go deleted file mode 100644 index 696d76bdae..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_fhir_store.go +++ /dev/null @@ -1,814 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceHealthcareFhirStore() *schema.Resource { - return &schema.Resource{ - Create: resourceHealthcareFhirStoreCreate, - Read: resourceHealthcareFhirStoreRead, - Update: resourceHealthcareFhirStoreUpdate, - Delete: resourceHealthcareFhirStoreDelete, - - Importer: &schema.ResourceImporter{ - State: resourceHealthcareFhirStoreImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the dataset addressed by this request. Must be in the format -'projects/{project}/locations/{location}/datasets/{dataset}'`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the FhirStore. - -** Changing this property may recreate the FHIR store (removing all data) **`, - }, - "version": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"DSTU2", "STU3", "R4"}), - Description: `The FHIR specification version. Possible values: ["DSTU2", "STU3", "R4"]`, - }, - "disable_referential_integrity": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to disable referential integrity in this FHIR store. This field is immutable after FHIR store -creation. The default value is false, meaning that the API will enforce referential integrity and fail the -requests that will result in inconsistent state in the FHIR store. When this field is set to true, the API -will skip referential integrity check. Consequently, operations that rely on references, such as -Patient.get$everything, will not return all the results if broken references exist. - -** Changing this property may recreate the FHIR store (removing all data) **`, - }, - "disable_resource_versioning": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to disable resource versioning for this FHIR store. This field can not be changed after the creation -of FHIR store. If set to false, which is the default behavior, all write operations will cause historical -versions to be recorded automatically. The historical versions can be fetched through the history APIs, but -cannot be updated. If set to true, no historical versions will be kept. The server will send back errors for -attempts to read the historical versions. - -** Changing this property may recreate the FHIR store (removing all data) **`, - }, - "enable_history_import": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to allow the bulk import API to accept history bundles and directly insert historical resource -versions into the FHIR store. Importing resource histories creates resource interactions that appear to have -occurred in the past, which clients may not want to allow. If set to false, history bundles within an import -will fail with an error. - -** Changing this property may recreate the FHIR store (removing all data) ** - -** This property can be changed manually in the Google Cloud Healthcare admin console without recreating the FHIR store **`, - }, - "enable_update_create": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether this FHIR store has the updateCreate capability. This determines if the client can use an Update -operation to create a new resource with a client-specified ID. If false, all IDs are server-assigned through -the Create operation and attempts to Update a non-existent resource will return errors. Please treat the audit -logs with appropriate levels of care if client-specified resource IDs contain sensitive data such as patient -identifiers, those IDs will be part of the FHIR resource path recorded in Cloud audit logs and Cloud Pub/Sub -notifications.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-supplied key-value pairs used to organize FHIR stores. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must -conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - -Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 -bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - -No more than 64 labels can be associated with a given store. - -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "notification_config": { - Type: schema.TypeList, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_topic": { - Type: schema.TypeString, - Required: true, - Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. -PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. -It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message -was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a -project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given -Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, - }, - }, - }, - }, - "stream_configs": { - Type: schema.TypeList, - Optional: true, - Description: `A list of streaming configs that configure the destinations of streaming export for every resource mutation in -this FHIR store. Each store is allowed to have up to 10 streaming configs. After a new config is added, the next -resource mutation is streamed to the new location in addition to the existing ones. When a location is removed -from the list, the server stops streaming to that location. Before adding a new config, you must add the required -bigquery.dataEditor role to your project's Cloud Healthcare Service Agent service account. Some lag (typically on -the order of dozens of seconds) is expected before the results show up in the streaming destination.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bigquery_destination": { - Type: schema.TypeList, - Required: true, - Description: `The destination BigQuery structure that contains both the dataset location and corresponding schema config. -The output is organized in one table per resource type. The server reuses the existing tables (if any) that -are named after the resource types, e.g. "Patient", "Observation". When there is no existing table for a given -resource type, the server attempts to create one. -See the [streaming config reference](https://cloud.google.com/healthcare/docs/reference/rest/v1beta1/projects.locations.datasets.fhirStores#streamconfig) for more details.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dataset_uri": { - Type: schema.TypeString, - Required: true, - Description: `BigQuery URI to a dataset, up to 2000 characters long, in the format bq://projectId.bqDatasetId`, - }, - "schema_config": { - Type: schema.TypeList, - Required: true, - Description: `The configuration for the exported BigQuery schema.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "recursive_structure_depth": { - Type: schema.TypeInt, - Required: true, - Description: `The depth for all recursive structures in the output analytics schema. For example, concept in the CodeSystem -resource is a recursive structure; when the depth is 2, the CodeSystem table will have a column called -concept.concept but not concept.concept.concept. If not specified or set to 0, the server will use the default -value 2. The maximum depth allowed is 5.`, - }, - "schema_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ANALYTICS", "ANALYTICS_V2", "LOSSLESS", ""}), - Description: `Specifies the output schema type. - * ANALYTICS: Analytics schema defined by the FHIR community. - See https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. - * ANALYTICS_V2: Analytics V2, similar to schema defined by the FHIR community, with added support for extensions with one or more occurrences and contained resources in stringified JSON. - * LOSSLESS: A data-driven schema generated from the fields present in the FHIR data being exported, with no additional simplification. Default value: "ANALYTICS" Possible values: ["ANALYTICS", "ANALYTICS_V2", "LOSSLESS"]`, - Default: "ANALYTICS", - }, - }, - }, - }, - }, - }, - }, - "resource_types": { - Type: schema.TypeList, - Optional: true, - Description: `Supply a FHIR resource type (such as "Patient" or "Observation"). See -https://www.hl7.org/fhir/valueset-resource-types.html for a list of all FHIR resource types. The server treats -an empty list as an intent to stream all the supported resource types in this FHIR store.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The fully qualified name of this dataset`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareFhirStoreCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandHealthcareFhirStoreName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - versionProp, err := expandHealthcareFhirStoreVersion(d.Get("version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version"); !isEmptyValue(reflect.ValueOf(versionProp)) && (ok || !reflect.DeepEqual(v, versionProp)) { - obj["version"] = versionProp - } - enableUpdateCreateProp, err := expandHealthcareFhirStoreEnableUpdateCreate(d.Get("enable_update_create"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_update_create"); !isEmptyValue(reflect.ValueOf(enableUpdateCreateProp)) && (ok || !reflect.DeepEqual(v, enableUpdateCreateProp)) { - obj["enableUpdateCreate"] = enableUpdateCreateProp - } - disableReferentialIntegrityProp, err := expandHealthcareFhirStoreDisableReferentialIntegrity(d.Get("disable_referential_integrity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_referential_integrity"); !isEmptyValue(reflect.ValueOf(disableReferentialIntegrityProp)) && (ok || !reflect.DeepEqual(v, disableReferentialIntegrityProp)) { - obj["disableReferentialIntegrity"] = disableReferentialIntegrityProp - } - disableResourceVersioningProp, err := expandHealthcareFhirStoreDisableResourceVersioning(d.Get("disable_resource_versioning"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_resource_versioning"); !isEmptyValue(reflect.ValueOf(disableResourceVersioningProp)) && (ok || !reflect.DeepEqual(v, disableResourceVersioningProp)) { - obj["disableResourceVersioning"] = disableResourceVersioningProp - } - enableHistoryImportProp, err := expandHealthcareFhirStoreEnableHistoryImport(d.Get("enable_history_import"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_history_import"); !isEmptyValue(reflect.ValueOf(enableHistoryImportProp)) && (ok || !reflect.DeepEqual(v, enableHistoryImportProp)) { - obj["enableHistoryImport"] = enableHistoryImportProp - } - labelsProp, err := expandHealthcareFhirStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigProp, err := expandHealthcareFhirStoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(reflect.ValueOf(notificationConfigProp)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - streamConfigsProp, err := expandHealthcareFhirStoreStreamConfigs(d.Get("stream_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stream_configs"); !isEmptyValue(reflect.ValueOf(streamConfigsProp)) && (ok || !reflect.DeepEqual(v, streamConfigsProp)) { - obj["streamConfigs"] = streamConfigsProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores?fhirStoreId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new FhirStore: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating FhirStore: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{dataset}}/fhirStores/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating FhirStore %q: %#v", d.Id(), res) - - return resourceHealthcareFhirStoreRead(d, meta) -} - -func resourceHealthcareFhirStoreRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("HealthcareFhirStore %q", d.Id())) - } - - res, err = resourceHealthcareFhirStoreDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing HealthcareFhirStore because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenHealthcareFhirStoreName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("version", flattenHealthcareFhirStoreVersion(res["version"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("enable_update_create", flattenHealthcareFhirStoreEnableUpdateCreate(res["enableUpdateCreate"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("disable_referential_integrity", flattenHealthcareFhirStoreDisableReferentialIntegrity(res["disableReferentialIntegrity"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("disable_resource_versioning", flattenHealthcareFhirStoreDisableResourceVersioning(res["disableResourceVersioning"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("enable_history_import", flattenHealthcareFhirStoreEnableHistoryImport(res["enableHistoryImport"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("labels", flattenHealthcareFhirStoreLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("notification_config", flattenHealthcareFhirStoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("stream_configs", flattenHealthcareFhirStoreStreamConfigs(res["streamConfigs"], d, config)); err != nil { - return fmt.Errorf("Error reading FhirStore: %s", err) - } - - return nil -} - -func resourceHealthcareFhirStoreUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - enableUpdateCreateProp, err := expandHealthcareFhirStoreEnableUpdateCreate(d.Get("enable_update_create"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_update_create"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableUpdateCreateProp)) { - obj["enableUpdateCreate"] = enableUpdateCreateProp - } - labelsProp, err := expandHealthcareFhirStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigProp, err := expandHealthcareFhirStoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - streamConfigsProp, err := expandHealthcareFhirStoreStreamConfigs(d.Get("stream_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stream_configs"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, streamConfigsProp)) { - obj["streamConfigs"] = streamConfigsProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating FhirStore %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("enable_update_create") { - updateMask = append(updateMask, "enableUpdateCreate") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("notification_config") { - updateMask = append(updateMask, "notificationConfig") - } - - if d.HasChange("stream_configs") { - updateMask = append(updateMask, "streamConfigs") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating FhirStore %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating FhirStore %q: %#v", d.Id(), res) - } - - return resourceHealthcareFhirStoreRead(d, meta) -} - -func resourceHealthcareFhirStoreDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting FhirStore %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "FhirStore") - } - - log.Printf("[DEBUG] Finished deleting FhirStore %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareFhirStoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - fhirStoreId, err := parseHealthcareFhirStoreId(d.Id(), config) - if err != nil { - return nil, err - } - - if err := d.Set("dataset", fhirStoreId.DatasetId.datasetId()); err != nil { - return nil, fmt.Errorf("Error setting dataset: %s", err) - } - if err := d.Set("name", fhirStoreId.Name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenHealthcareFhirStoreName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreEnableUpdateCreate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreDisableReferentialIntegrity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreDisableResourceVersioning(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreEnableHistoryImport(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreNotificationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_topic"] = - flattenHealthcareFhirStoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) - return []interface{}{transformed} -} -func flattenHealthcareFhirStoreNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreStreamConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "resource_types": flattenHealthcareFhirStoreStreamConfigsResourceTypes(original["resourceTypes"], d, config), - "bigquery_destination": flattenHealthcareFhirStoreStreamConfigsBigqueryDestination(original["bigqueryDestination"], d, config), - }) - } - return transformed -} -func flattenHealthcareFhirStoreStreamConfigsResourceTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestination(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_uri"] = - flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(original["datasetUri"], d, config) - transformed["schema_config"] = - flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(original["schemaConfig"], d, config) - return []interface{}{transformed} -} -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["schema_type"] = - flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(original["schemaType"], d, config) - transformed["recursive_structure_depth"] = - flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(original["recursiveStructureDepth"], d, config) - return []interface{}{transformed} -} -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandHealthcareFhirStoreName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreEnableUpdateCreate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreDisableReferentialIntegrity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreDisableResourceVersioning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreEnableHistoryImport(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandHealthcareFhirStoreNotificationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandHealthcareFhirStoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - return transformed, nil -} - -func expandHealthcareFhirStoreNotificationConfigPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreStreamConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceTypes, err := expandHealthcareFhirStoreStreamConfigsResourceTypes(original["resource_types"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResourceTypes); val.IsValid() && !isEmptyValue(val) { - transformed["resourceTypes"] = transformedResourceTypes - } - - transformedBigqueryDestination, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestination(original["bigquery_destination"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBigqueryDestination); val.IsValid() && !isEmptyValue(val) { - transformed["bigqueryDestination"] = transformedBigqueryDestination - } - - req = append(req, transformed) - } - return req, nil -} - -func expandHealthcareFhirStoreStreamConfigsResourceTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetUri, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(original["dataset_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetUri); val.IsValid() && !isEmptyValue(val) { - transformed["datasetUri"] = transformedDatasetUri - } - - transformedSchemaConfig, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(original["schema_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSchemaConfig); val.IsValid() && !isEmptyValue(val) { - transformed["schemaConfig"] = transformedSchemaConfig - } - - return transformed, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchemaType, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(original["schema_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSchemaType); val.IsValid() && !isEmptyValue(val) { - transformed["schemaType"] = transformedSchemaType - } - - transformedRecursiveStructureDepth, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(original["recursive_structure_depth"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRecursiveStructureDepth); val.IsValid() && !isEmptyValue(val) { - transformed["recursiveStructureDepth"] = transformedRecursiveStructureDepth - } - - return transformed, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceHealthcareFhirStoreDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Take the returned long form of the name and use it as `self_link`. - // Then modify the name to be the user specified form. - // We can't just ignore_read on `name` as the linter will - // complain that the returned `res` is never used afterwards. - // Some field needs to be actually set, and we chose `name`. - if err := d.Set("self_link", res["name"].(string)); err != nil { - return nil, fmt.Errorf("Error setting self_link: %s", err) - } - res["name"] = d.Get("name").(string) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_hl7_v2_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_hl7_v2_store.go deleted file mode 100644 index 4524f5deaa..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_healthcare_hl7_v2_store.go +++ /dev/null @@ -1,694 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceHealthcareHl7V2Store() *schema.Resource { - return &schema.Resource{ - Create: resourceHealthcareHl7V2StoreCreate, - Read: resourceHealthcareHl7V2StoreRead, - Update: resourceHealthcareHl7V2StoreUpdate, - Delete: resourceHealthcareHl7V2StoreDelete, - - Importer: &schema.ResourceImporter{ - State: resourceHealthcareHl7V2StoreImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dataset": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the dataset addressed by this request. Must be in the format -'projects/{project}/locations/{location}/datasets/{dataset}'`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the Hl7V2Store. - -** Changing this property may recreate the Hl7v2 store (removing all data) **`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-supplied key-value pairs used to organize HL7v2 stores. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must -conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - -Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 -bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - -No more than 64 labels can be associated with a given store. - -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "notification_config": { - Type: schema.TypeList, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_topic": { - Type: schema.TypeString, - Required: true, - Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. -PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. -It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message -was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a -project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given -Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, - }, - }, - }, - }, - "notification_configs": { - Type: schema.TypeList, - Optional: true, - Description: `A list of notification configs. Each configuration uses a filter to determine whether to publish a -message (both Ingest & Create) on the corresponding notification destination. Only the message name -is sent as part of the notification. Supplied by the client.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pubsub_topic": { - Type: schema.TypeString, - Required: true, - Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. -PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. -It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message -was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a -project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given -Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. - -If a notification cannot be published to Cloud Pub/Sub, errors will be logged to Stackdriver`, - }, - "filter": { - Type: schema.TypeString, - Optional: true, - Description: `Restricts notifications sent for messages matching a filter. If this is empty, all messages -are matched. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings - -Fields/functions available for filtering are: - -* messageType, from the MSH-9.1 field. For example, NOT messageType = "ADT". -* send_date or sendDate, the YYYY-MM-DD date the message was sent in the dataset's timeZone, from the MSH-7 segment. For example, send_date < "2017-01-02". -* sendTime, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, sendTime < "2017-01-02T00:00:00-05:00". -* sendFacility, the care center that the message came from, from the MSH-4 segment. For example, sendFacility = "ABC". -* PatientId(value, type), which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, PatientId("123456", "MRN"). -* labels.x, a string value of the label with key x as set using the Message.labels map. For example, labels."priority"="high". The operator :* can be used to assert the existence of a label. For example, labels."priority":*.`, - }, - }, - }, - }, - "parser_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_null_header": { - Type: schema.TypeBool, - Optional: true, - Description: `Determines whether messages with no header are allowed.`, - AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema"}, - }, - "schema": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringIsJSON, - StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, - Description: `JSON encoded string for schemas used to parse messages in this -store if schematized parsing is desired.`, - AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema", "parser_config.0.version"}, - }, - "segment_terminator": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateBase64String, - Description: `Byte(s) to be used as the segment terminator. If this is unset, '\r' will be used as segment terminator. - -A base64-encoded string.`, - AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema"}, - }, - "version": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"V1", "V2", "V3", ""}), - Description: `The version of the unschematized parser to be used when a custom 'schema' is not set. Default value: "V1" Possible values: ["V1", "V2", "V3"]`, - Default: "V1", - }, - }, - }, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The fully qualified name of this dataset`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareHl7V2StoreCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandHealthcareHl7V2StoreName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - parserConfigProp, err := expandHealthcareHl7V2StoreParserConfig(d.Get("parser_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parser_config"); !isEmptyValue(reflect.ValueOf(parserConfigProp)) && (ok || !reflect.DeepEqual(v, parserConfigProp)) { - obj["parserConfig"] = parserConfigProp - } - labelsProp, err := expandHealthcareHl7V2StoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigsProp, err := expandHealthcareHl7V2StoreNotificationConfigs(d.Get("notification_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_configs"); !isEmptyValue(reflect.ValueOf(notificationConfigsProp)) && (ok || !reflect.DeepEqual(v, notificationConfigsProp)) { - obj["notificationConfigs"] = notificationConfigsProp - } - notificationConfigProp, err := expandHealthcareHl7V2StoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(reflect.ValueOf(notificationConfigProp)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores?hl7V2StoreId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Hl7V2Store: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Hl7V2Store: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{dataset}}/hl7V2Stores/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Hl7V2Store %q: %#v", d.Id(), res) - - return resourceHealthcareHl7V2StoreRead(d, meta) -} - -func resourceHealthcareHl7V2StoreRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("HealthcareHl7V2Store %q", d.Id())) - } - - res, err = resourceHealthcareHl7V2StoreDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing HealthcareHl7V2Store because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenHealthcareHl7V2StoreName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - if err := d.Set("parser_config", flattenHealthcareHl7V2StoreParserConfig(res["parserConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - if err := d.Set("labels", flattenHealthcareHl7V2StoreLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - if err := d.Set("notification_configs", flattenHealthcareHl7V2StoreNotificationConfigs(res["notificationConfigs"], d, config)); err != nil { - return fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - if err := d.Set("notification_config", flattenHealthcareHl7V2StoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - - return nil -} - -func resourceHealthcareHl7V2StoreUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - parserConfigProp, err := expandHealthcareHl7V2StoreParserConfig(d.Get("parser_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parser_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parserConfigProp)) { - obj["parserConfig"] = parserConfigProp - } - labelsProp, err := expandHealthcareHl7V2StoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigsProp, err := expandHealthcareHl7V2StoreNotificationConfigs(d.Get("notification_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_configs"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationConfigsProp)) { - obj["notificationConfigs"] = notificationConfigsProp - } - notificationConfigProp, err := expandHealthcareHl7V2StoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Hl7V2Store %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("parser_config") { - updateMask = append(updateMask, "parser_config.allow_null_header", - "parser_config.segment_terminator", - "parser_config.schema") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("notification_configs") { - updateMask = append(updateMask, "notificationConfigs") - } - - if d.HasChange("notification_config") { - updateMask = append(updateMask, "notificationConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Hl7V2Store %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Hl7V2Store %q: %#v", d.Id(), res) - } - - return resourceHealthcareHl7V2StoreRead(d, meta) -} - -func resourceHealthcareHl7V2StoreDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Hl7V2Store %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Hl7V2Store") - } - - log.Printf("[DEBUG] Finished deleting Hl7V2Store %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareHl7V2StoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - hl7v2StoreId, err := parseHealthcareHl7V2StoreId(d.Id(), config) - if err != nil { - return nil, err - } - - if err := d.Set("dataset", hl7v2StoreId.DatasetId.datasetId()); err != nil { - return nil, fmt.Errorf("Error setting dataset: %s", err) - } - if err := d.Set("name", hl7v2StoreId.Name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenHealthcareHl7V2StoreName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreParserConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_null_header"] = - flattenHealthcareHl7V2StoreParserConfigAllowNullHeader(original["allowNullHeader"], d, config) - transformed["segment_terminator"] = - flattenHealthcareHl7V2StoreParserConfigSegmentTerminator(original["segmentTerminator"], d, config) - transformed["schema"] = - flattenHealthcareHl7V2StoreParserConfigSchema(original["schema"], d, config) - transformed["version"] = - flattenHealthcareHl7V2StoreParserConfigVersion(original["version"], d, config) - return []interface{}{transformed} -} -func flattenHealthcareHl7V2StoreParserConfigAllowNullHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreParserConfigSegmentTerminator(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreParserConfigSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := json.Marshal(v) - if err != nil { - // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. - log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenHealthcareHl7V2StoreParserConfigVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreNotificationConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "pubsub_topic": flattenHealthcareHl7V2StoreNotificationConfigsPubsubTopic(original["pubsubTopic"], d, config), - "filter": flattenHealthcareHl7V2StoreNotificationConfigsFilter(original["filter"], d, config), - }) - } - return transformed -} -func flattenHealthcareHl7V2StoreNotificationConfigsPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreNotificationConfigsFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreNotificationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_topic"] = - flattenHealthcareHl7V2StoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) - return []interface{}{transformed} -} -func flattenHealthcareHl7V2StoreNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandHealthcareHl7V2StoreName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreParserConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowNullHeader, err := expandHealthcareHl7V2StoreParserConfigAllowNullHeader(original["allow_null_header"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowNullHeader); val.IsValid() && !isEmptyValue(val) { - transformed["allowNullHeader"] = transformedAllowNullHeader - } - - transformedSegmentTerminator, err := expandHealthcareHl7V2StoreParserConfigSegmentTerminator(original["segment_terminator"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSegmentTerminator); val.IsValid() && !isEmptyValue(val) { - transformed["segmentTerminator"] = transformedSegmentTerminator - } - - transformedSchema, err := expandHealthcareHl7V2StoreParserConfigSchema(original["schema"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { - transformed["schema"] = transformedSchema - } - - transformedVersion, err := expandHealthcareHl7V2StoreParserConfigVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - return transformed, nil -} - -func expandHealthcareHl7V2StoreParserConfigAllowNullHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreParserConfigSegmentTerminator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreParserConfigSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func expandHealthcareHl7V2StoreParserConfigVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandHealthcareHl7V2StoreNotificationConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandHealthcareHl7V2StoreNotificationConfigsPubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - transformedFilter, err := expandHealthcareHl7V2StoreNotificationConfigsFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - req = append(req, transformed) - } - return req, nil -} - -func expandHealthcareHl7V2StoreNotificationConfigsPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreNotificationConfigsFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreNotificationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandHealthcareHl7V2StoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - return transformed, nil -} - -func expandHealthcareHl7V2StoreNotificationConfigPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceHealthcareHl7V2StoreDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Take the returned long form of the name and use it as `self_link`. - // Then modify the name to be the user specified form. - // We can't just ignore_read on `name` as the linter will - // complain that the returned `res` is never used afterwards. - // Some field needs to be actually set, and we chose `name`. - if err := d.Set("self_link", res["name"].(string)); err != nil { - return nil, fmt.Errorf("Error setting self_link: %s", err) - } - res["name"] = d.Get("name").(string) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_access_boundary_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_access_boundary_policy.go deleted file mode 100644 index c185c2c282..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_access_boundary_policy.go +++ /dev/null @@ -1,589 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIAM2AccessBoundaryPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceIAM2AccessBoundaryPolicyCreate, - Read: resourceIAM2AccessBoundaryPolicyRead, - Update: resourceIAM2AccessBoundaryPolicyUpdate, - Delete: resourceIAM2AccessBoundaryPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIAM2AccessBoundaryPolicyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the policy.`, - }, - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The attachment point is identified by its URL-encoded full resource name.`, - }, - "rules": { - Type: schema.TypeList, - Required: true, - Description: `Rules to be applied.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access_boundary_rule": { - Type: schema.TypeList, - Optional: true, - Description: `An access boundary rule in an IAM policy.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "availability_condition": { - Type: schema.TypeList, - Optional: true, - Description: `The availability condition further constrains the access allowed by the access boundary rule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expression": { - Type: schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of the expression. This is a longer text which describes the expression, -e.g. when hovered over it in a UI.`, - }, - "location": { - Type: schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, -e.g. a file name and a position in the file.`, - }, - "title": { - Type: schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose. -This can be used e.g. in UIs which allow to enter the expression.`, - }, - }, - }, - }, - "available_permissions": { - Type: schema.TypeList, - Optional: true, - Description: `A list of permissions that may be allowed for use on the specified resource.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "available_resource": { - Type: schema.TypeString, - Optional: true, - Description: `The full resource name of a Google Cloud resource entity.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `The description of the rule.`, - }, - }, - }, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `The display name of the rule.`, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `The hash of the resource. Used internally during updates.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIAM2AccessBoundaryPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAM2AccessBoundaryPolicyDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - etagProp, err := expandIAM2AccessBoundaryPolicyEtag(d.Get("etag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("etag"); !isEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { - obj["etag"] = etagProp - } - rulesProp, err := expandIAM2AccessBoundaryPolicyRules(d.Get("rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rules"); !isEmptyValue(reflect.ValueOf(rulesProp)) && (ok || !reflect.DeepEqual(v, rulesProp)) { - obj["rules"] = rulesProp - } - - url, err := replaceVars(d, config, "{{IAM2BasePath}}policies/{{parent}}/accessboundarypolicies?policyId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AccessBoundaryPolicy: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AccessBoundaryPolicy: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = IAM2OperationWaitTime( - config, res, "Creating AccessBoundaryPolicy", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create AccessBoundaryPolicy: %s", err) - } - - log.Printf("[DEBUG] Finished creating AccessBoundaryPolicy %q: %#v", d.Id(), res) - - return resourceIAM2AccessBoundaryPolicyRead(d, meta) -} - -func resourceIAM2AccessBoundaryPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IAM2BasePath}}policies/{{parent}}/accessboundarypolicies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IAM2AccessBoundaryPolicy %q", d.Id())) - } - - if err := d.Set("display_name", flattenIAM2AccessBoundaryPolicyDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessBoundaryPolicy: %s", err) - } - if err := d.Set("etag", flattenIAM2AccessBoundaryPolicyEtag(res["etag"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessBoundaryPolicy: %s", err) - } - if err := d.Set("rules", flattenIAM2AccessBoundaryPolicyRules(res["rules"], d, config)); err != nil { - return fmt.Errorf("Error reading AccessBoundaryPolicy: %s", err) - } - - return nil -} - -func resourceIAM2AccessBoundaryPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAM2AccessBoundaryPolicyDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - etagProp, err := expandIAM2AccessBoundaryPolicyEtag(d.Get("etag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("etag"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { - obj["etag"] = etagProp - } - rulesProp, err := expandIAM2AccessBoundaryPolicyRules(d.Get("rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rules"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rulesProp)) { - obj["rules"] = rulesProp - } - - url, err := replaceVars(d, config, "{{IAM2BasePath}}policies/{{parent}}/accessboundarypolicies/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AccessBoundaryPolicy %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating AccessBoundaryPolicy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AccessBoundaryPolicy %q: %#v", d.Id(), res) - } - - err = IAM2OperationWaitTime( - config, res, "Updating AccessBoundaryPolicy", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceIAM2AccessBoundaryPolicyRead(d, meta) -} - -func resourceIAM2AccessBoundaryPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{IAM2BasePath}}policies/{{parent}}/accessboundarypolicies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AccessBoundaryPolicy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AccessBoundaryPolicy") - } - - err = IAM2OperationWaitTime( - config, res, "Deleting AccessBoundaryPolicy", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting AccessBoundaryPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceIAM2AccessBoundaryPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIAM2AccessBoundaryPolicyDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAM2AccessBoundaryPolicyEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAM2AccessBoundaryPolicyRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "description": flattenIAM2AccessBoundaryPolicyRulesDescription(original["description"], d, config), - "access_boundary_rule": flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRule(original["accessBoundaryRule"], d, config), - }) - } - return transformed -} -func flattenIAM2AccessBoundaryPolicyRulesDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["available_resource"] = - flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailableResource(original["availableResource"], d, config) - transformed["available_permissions"] = - flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailablePermissions(original["availablePermissions"], d, config) - transformed["availability_condition"] = - flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityCondition(original["availabilityCondition"], d, config) - return []interface{}{transformed} -} -func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailableResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailablePermissions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionExpression(original["expression"], d, config) - transformed["title"] = - flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionTitle(original["title"], d, config) - transformed["description"] = - flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionDescription(original["description"], d, config) - transformed["location"] = - flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionLocation(original["location"], d, config) - return []interface{}{transformed} -} -func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIAM2AccessBoundaryPolicyDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAM2AccessBoundaryPolicyEtag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAM2AccessBoundaryPolicyRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDescription, err := expandIAM2AccessBoundaryPolicyRulesDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedAccessBoundaryRule, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRule(original["access_boundary_rule"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAccessBoundaryRule); val.IsValid() && !isEmptyValue(val) { - transformed["accessBoundaryRule"] = transformedAccessBoundaryRule - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIAM2AccessBoundaryPolicyRulesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAvailableResource, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailableResource(original["available_resource"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAvailableResource); val.IsValid() && !isEmptyValue(val) { - transformed["availableResource"] = transformedAvailableResource - } - - transformedAvailablePermissions, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailablePermissions(original["available_permissions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAvailablePermissions); val.IsValid() && !isEmptyValue(val) { - transformed["availablePermissions"] = transformedAvailablePermissions - } - - transformedAvailabilityCondition, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityCondition(original["availability_condition"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAvailabilityCondition); val.IsValid() && !isEmptyValue(val) { - transformed["availabilityCondition"] = transformedAvailabilityCondition - } - - return transformed, nil -} - -func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailableResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailablePermissions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workforce_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workforce_pool.go deleted file mode 100644 index 75059fbf26..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workforce_pool.go +++ /dev/null @@ -1,480 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const workforcePoolIdRegexp = `^[a-z][a-z0-9-]{4,61}[a-z0-9]$` - -func validateWorkforcePoolId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if strings.HasPrefix(value, "gcp-") { - errors = append(errors, fmt.Errorf( - "%q (%q) can not start with \"gcp-\". "+ - "The prefix `gcp-` is reserved for use by Google, and may not be specified.", k, value)) - } - - if !regexp.MustCompile(workforcePoolIdRegexp).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) must contain only lowercase letters [a-z], digits [0-9], and hyphens "+ - "[-]. The WorkforcePool ID must be between 6 and 63 characters, begin "+ - "with a letter, and cannot have a trailing hyphen.", k, value)) - } - - return -} - -func ResourceIAMWorkforcePoolWorkforcePool() *schema.Resource { - return &schema.Resource{ - Create: resourceIAMWorkforcePoolWorkforcePoolCreate, - Read: resourceIAMWorkforcePoolWorkforcePoolRead, - Update: resourceIAMWorkforcePoolWorkforcePoolUpdate, - Delete: resourceIAMWorkforcePoolWorkforcePoolDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIAMWorkforcePoolWorkforcePoolImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location for the resource.`, - }, - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Immutable. The resource name of the parent. Format: 'organizations/{org-id}'.`, - }, - "workforce_pool_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateWorkforcePoolId, - Description: `The name of the pool. The ID must be a globally unique string of 6 to 63 lowercase letters, -digits, or hyphens. It must start with a letter, and cannot have a trailing hyphen. -The prefix 'gcp-' is reserved for use by Google, and may not be specified.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A user-specified description of the pool. Cannot exceed 256 characters.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether the pool is disabled. You cannot use a disabled pool to exchange tokens, -or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters.`, - }, - "session_duration": { - Type: schema.TypeString, - Optional: true, - Description: `Duration that the Google Cloud access tokens, console sign-in sessions, -and 'gcloud' sign-in sessions from this pool are valid. -Must be greater than 15 minutes (900s) and less than 12 hours (43200s). -If 'sessionDuration' is not configured, minted credentials have a default duration of one hour (3600s). -A duration in seconds with up to nine fractional digits, ending with ''s''. Example: "'3.5s'".`, - Default: "3600s", - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The resource name of the pool. -Format: 'locations/{location}/workforcePools/{workforcePoolId}'`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The state of the pool. - * STATE_UNSPECIFIED: State unspecified. - * ACTIVE: The pool is active, and may be used in Google Cloud policies. - * DELETED: The pool is soft-deleted. Soft-deleted pools are permanently deleted - after approximately 30 days. You can restore a soft-deleted pool using - [workforcePools.undelete](https://cloud.google.com/iam/docs/reference/rest/v1/locations.workforcePools/undelete#google.iam.admin.v1.WorkforcePools.UndeleteWorkforcePool). - You cannot reuse the ID of a soft-deleted pool until it is permanently deleted. - While a pool is deleted, you cannot use it to exchange tokens, or use - existing tokens to access resources. If the pool is undeleted, existing - tokens grant access again.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIAMWorkforcePoolWorkforcePoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandIAMWorkforcePoolWorkforcePoolParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - displayNameProp, err := expandIAMWorkforcePoolWorkforcePoolDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandIAMWorkforcePoolWorkforcePoolDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - disabledProp, err := expandIAMWorkforcePoolWorkforcePoolDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - sessionDurationProp, err := expandIAMWorkforcePoolWorkforcePoolSessionDuration(d.Get("session_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("session_duration"); !isEmptyValue(reflect.ValueOf(sessionDurationProp)) && (ok || !reflect.DeepEqual(v, sessionDurationProp)) { - obj["sessionDuration"] = sessionDurationProp - } - - url, err := replaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools?workforcePoolId={{workforce_pool_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new WorkforcePool: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating WorkforcePool: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "locations/{{location}}/workforcePools/{{workforce_pool_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = IAMWorkforcePoolOperationWaitTime( - config, res, "Creating WorkforcePool", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create WorkforcePool: %s", err) - } - - log.Printf("[DEBUG] Finished creating WorkforcePool %q: %#v", d.Id(), res) - - return resourceIAMWorkforcePoolWorkforcePoolRead(d, meta) -} - -func resourceIAMWorkforcePoolWorkforcePoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IAMWorkforcePoolWorkforcePool %q", d.Id())) - } - - res, err = resourceIAMWorkforcePoolWorkforcePoolDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing IAMWorkforcePoolWorkforcePool because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenIAMWorkforcePoolWorkforcePoolName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePool: %s", err) - } - if err := d.Set("parent", flattenIAMWorkforcePoolWorkforcePoolParent(res["parent"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePool: %s", err) - } - if err := d.Set("display_name", flattenIAMWorkforcePoolWorkforcePoolDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePool: %s", err) - } - if err := d.Set("description", flattenIAMWorkforcePoolWorkforcePoolDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePool: %s", err) - } - if err := d.Set("state", flattenIAMWorkforcePoolWorkforcePoolState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePool: %s", err) - } - if err := d.Set("disabled", flattenIAMWorkforcePoolWorkforcePoolDisabled(res["disabled"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePool: %s", err) - } - if err := d.Set("session_duration", flattenIAMWorkforcePoolWorkforcePoolSessionDuration(res["sessionDuration"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePool: %s", err) - } - - return nil -} - -func resourceIAMWorkforcePoolWorkforcePoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAMWorkforcePoolWorkforcePoolDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandIAMWorkforcePoolWorkforcePoolDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - disabledProp, err := expandIAMWorkforcePoolWorkforcePoolDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - sessionDurationProp, err := expandIAMWorkforcePoolWorkforcePoolSessionDuration(d.Get("session_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("session_duration"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionDurationProp)) { - obj["sessionDuration"] = sessionDurationProp - } - - url, err := replaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating WorkforcePool %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("disabled") { - updateMask = append(updateMask, "disabled") - } - - if d.HasChange("session_duration") { - updateMask = append(updateMask, "sessionDuration") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating WorkforcePool %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating WorkforcePool %q: %#v", d.Id(), res) - } - - err = IAMWorkforcePoolOperationWaitTime( - config, res, "Updating WorkforcePool", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceIAMWorkforcePoolWorkforcePoolRead(d, meta) -} - -func resourceIAMWorkforcePoolWorkforcePoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting WorkforcePool %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "WorkforcePool") - } - - err = IAMWorkforcePoolOperationWaitTime( - config, res, "Deleting WorkforcePool", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting WorkforcePool %q: %#v", d.Id(), res) - return nil -} - -func resourceIAMWorkforcePoolWorkforcePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/(?P[^/]+)/workforcePools/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "locations/{{location}}/workforcePools/{{workforce_pool_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIAMWorkforcePoolWorkforcePoolName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolParent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolSessionDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIAMWorkforcePoolWorkforcePoolParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolSessionDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIAMWorkforcePoolWorkforcePoolDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v := res["state"]; v == "DELETED" { - return nil, nil - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workforce_pool_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workforce_pool_provider.go deleted file mode 100644 index 5e77a101d4..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workforce_pool_provider.go +++ /dev/null @@ -1,749 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const workforcePoolProviderIdRegexp = `^[a-z0-9-]{4,32}$` - -func validateWorkforcePoolProviderId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if strings.HasPrefix(value, "gcp-") { - errors = append(errors, fmt.Errorf( - "%q (%q) can not start with \"gcp-\". "+ - "The prefix `gcp-` is reserved for use by Google, and may not be specified.", k, value)) - } - - if !regexp.MustCompile(workforcePoolProviderIdRegexp).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) must be 4-32 characters, and may contain the characters [a-z0-9-].", k, value)) - } - - return -} - -func ResourceIAMWorkforcePoolWorkforcePoolProvider() *schema.Resource { - return &schema.Resource{ - Create: resourceIAMWorkforcePoolWorkforcePoolProviderCreate, - Read: resourceIAMWorkforcePoolWorkforcePoolProviderRead, - Update: resourceIAMWorkforcePoolWorkforcePoolProviderUpdate, - Delete: resourceIAMWorkforcePoolWorkforcePoolProviderDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIAMWorkforcePoolWorkforcePoolProviderImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location for the resource.`, - }, - "provider_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateWorkforcePoolProviderId, - Description: `The ID for the provider, which becomes the final component of the resource name. -This value must be 4-32 characters, and may contain the characters [a-z0-9-]. -The prefix 'gcp-' is reserved for use by Google, and may not be specified.`, - }, - "workforce_pool_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID to use for the pool, which becomes the final component of the resource name. -The IDs must be a globally unique string of 6 to 63 lowercase letters, digits, or hyphens. -It must start with a letter, and cannot have a trailing hyphen. -The prefix 'gcp-' is reserved for use by Google, and may not be specified.`, - }, - "attribute_condition": { - Type: schema.TypeString, - Optional: true, - Description: `A [Common Expression Language](https://opensource.google/projects/cel) expression, in -plain text, to restrict what otherwise valid authentication credentials issued by the -provider should not be accepted. - -The expression must output a boolean representing whether to allow the federation. - -The following keywords may be referenced in the expressions: - * 'assertion': JSON representing the authentication credential issued by the provider. - * 'google': The Google attributes mapped from the assertion in the 'attribute_mappings'. - 'google.profile_photo' and 'google.display_name' are not supported. - * 'attribute': The custom attributes mapped from the assertion in the 'attribute_mappings'. - -The maximum length of the attribute condition expression is 4096 characters. -If unspecified, all valid authentication credentials will be accepted. - -The following example shows how to only allow credentials with a mapped 'google.groups' value of 'admins': -''' -"'admins' in google.groups" -'''`, - }, - "attribute_mapping": { - Type: schema.TypeMap, - Optional: true, - Description: `Maps attributes from the authentication credentials issued by an external identity provider -to Google Cloud attributes, such as 'subject' and 'segment'. - -Each key must be a string specifying the Google Cloud IAM attribute to map to. - -The following keys are supported: - * 'google.subject': The principal IAM is authenticating. You can reference this value in IAM bindings. - This is also the subject that appears in Cloud Logging logs. This is a required field and - the mapped subject cannot exceed 127 bytes. - * 'google.groups': Groups the authenticating user belongs to. You can grant groups access to - resources using an IAM 'principalSet' binding; access applies to all members of the group. - * 'google.display_name': The name of the authenticated user. This is an optional field and - the mapped display name cannot exceed 100 bytes. If not set, 'google.subject' will be displayed instead. - This attribute cannot be referenced in IAM bindings. - * 'google.profile_photo': The URL that specifies the authenticated user's thumbnail photo. - This is an optional field. When set, the image will be visible as the user's profile picture. - If not set, a generic user icon will be displayed instead. - This attribute cannot be referenced in IAM bindings. - -You can also provide custom attributes by specifying 'attribute.{custom_attribute}', where {custom_attribute} -is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. -The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. - -You can reference these attributes in IAM policies to define fine-grained access for a workforce pool -to Google Cloud resources. For example: - * 'google.subject': - 'principal://iam.googleapis.com/locations/{location}/workforcePools/{pool}/subject/{value}' - * 'google.groups': - 'principalSet://iam.googleapis.com/locations/{location}/workforcePools/{pool}/group/{value}' - * 'attribute.{custom_attribute}': - 'principalSet://iam.googleapis.com/locations/{location}/workforcePools/{pool}/attribute.{custom_attribute}/{value}' - -Each value must be a [Common Expression Language](https://opensource.google/projects/cel) -function that maps an identity provider credential to the normalized attribute specified -by the corresponding map key. - -You can use the 'assertion' keyword in the expression to access a JSON representation of -the authentication credential issued by the provider. - -The maximum length of an attribute mapping expression is 2048 characters. When evaluated, -the total size of all mapped attributes must not exceed 8KB. - -For OIDC providers, you must supply a custom mapping that includes the 'google.subject' attribute. -For example, the following maps the sub claim of the incoming credential to the 'subject' attribute -on a Google token: -''' -{"google.subject": "assertion.sub"} -''' - -An object containing a list of '"key": value' pairs. -Example: '{ "name": "wrench", "mass": "1.3kg", "count": "3" }'.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A user-specified description of the provider. Cannot exceed 256 characters.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. -However, existing tokens still grant access.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `A user-specified display name for the provider. Cannot exceed 32 characters.`, - }, - "oidc": { - Type: schema.TypeList, - Optional: true, - Description: `Represents an OpenId Connect 1.0 identity provider.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_id": { - Type: schema.TypeString, - Required: true, - Description: `The client ID. Must match the audience claim of the JWT issued by the identity provider.`, - }, - "issuer_uri": { - Type: schema.TypeString, - Required: true, - Description: `The OIDC issuer URI. Must be a valid URI using the 'https' scheme.`, - }, - }, - }, - ExactlyOneOf: []string{"saml", "oidc"}, - }, - "saml": { - Type: schema.TypeList, - Optional: true, - Description: `Represents a SAML identity provider.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "idp_metadata_xml": { - Type: schema.TypeString, - Required: true, - Description: `SAML Identity provider configuration metadata xml doc. -The xml document should comply with [SAML 2.0 specification](https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf). -The max size of the acceptable xml document will be bounded to 128k characters. - -The metadata xml document should satisfy the following constraints: -1) Must contain an Identity Provider Entity ID. -2) Must contain at least one non-expired signing key certificate. -3) For each signing key: - a) Valid from should be no more than 7 days from now. - b) Valid to should be no more than 10 years in the future. -4) Up to 3 IdP signing keys are allowed in the metadata xml. - -When updating the provider's metadata xml, at least one non-expired signing key -must overlap with the existing metadata. This requirement is skipped if there are -no non-expired signing keys present in the existing metadata.`, - }, - }, - }, - ExactlyOneOf: []string{"saml", "oidc"}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The resource name of the provider. -Format: 'locations/{location}/workforcePools/{workforcePoolId}/providers/{providerId}'`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The current state of the provider. -* STATE_UNSPECIFIED: State unspecified. -* ACTIVE: The provider is active and may be used to validate authentication credentials. -* DELETED: The provider is soft-deleted. Soft-deleted providers are permanently - deleted after approximately 30 days. You can restore a soft-deleted provider using - [providers.undelete](https://cloud.google.com/iam/docs/reference/rest/v1/locations.workforcePools.providers/undelete#google.iam.admin.v1.WorkforcePools.UndeleteWorkforcePoolProvider).`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIAMWorkforcePoolWorkforcePoolProviderCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - disabledProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - attributeMappingProp, err := expandIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(d.Get("attribute_mapping"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attribute_mapping"); !isEmptyValue(reflect.ValueOf(attributeMappingProp)) && (ok || !reflect.DeepEqual(v, attributeMappingProp)) { - obj["attributeMapping"] = attributeMappingProp - } - attributeConditionProp, err := expandIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(d.Get("attribute_condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attribute_condition"); !isEmptyValue(reflect.ValueOf(attributeConditionProp)) && (ok || !reflect.DeepEqual(v, attributeConditionProp)) { - obj["attributeCondition"] = attributeConditionProp - } - samlProp, err := expandIAMWorkforcePoolWorkforcePoolProviderSaml(d.Get("saml"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("saml"); !isEmptyValue(reflect.ValueOf(samlProp)) && (ok || !reflect.DeepEqual(v, samlProp)) { - obj["saml"] = samlProp - } - oidcProp, err := expandIAMWorkforcePoolWorkforcePoolProviderOidc(d.Get("oidc"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("oidc"); !isEmptyValue(reflect.ValueOf(oidcProp)) && (ok || !reflect.DeepEqual(v, oidcProp)) { - obj["oidc"] = oidcProp - } - - url, err := replaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers?workforcePoolProviderId={{provider_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new WorkforcePoolProvider: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating WorkforcePoolProvider: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = IAMWorkforcePoolOperationWaitTime( - config, res, "Creating WorkforcePoolProvider", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create WorkforcePoolProvider: %s", err) - } - - log.Printf("[DEBUG] Finished creating WorkforcePoolProvider %q: %#v", d.Id(), res) - - return resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta) -} - -func resourceIAMWorkforcePoolWorkforcePoolProviderRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IAMWorkforcePoolWorkforcePoolProvider %q", d.Id())) - } - - res, err = resourceIAMWorkforcePoolWorkforcePoolProviderDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing IAMWorkforcePoolWorkforcePoolProvider because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenIAMWorkforcePoolWorkforcePoolProviderName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - if err := d.Set("display_name", flattenIAMWorkforcePoolWorkforcePoolProviderDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - if err := d.Set("description", flattenIAMWorkforcePoolWorkforcePoolProviderDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - if err := d.Set("state", flattenIAMWorkforcePoolWorkforcePoolProviderState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - if err := d.Set("disabled", flattenIAMWorkforcePoolWorkforcePoolProviderDisabled(res["disabled"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - if err := d.Set("attribute_mapping", flattenIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(res["attributeMapping"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - if err := d.Set("attribute_condition", flattenIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(res["attributeCondition"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - if err := d.Set("saml", flattenIAMWorkforcePoolWorkforcePoolProviderSaml(res["saml"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - if err := d.Set("oidc", flattenIAMWorkforcePoolWorkforcePoolProviderOidc(res["oidc"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) - } - - return nil -} - -func resourceIAMWorkforcePoolWorkforcePoolProviderUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - disabledProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - attributeMappingProp, err := expandIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(d.Get("attribute_mapping"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attribute_mapping"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributeMappingProp)) { - obj["attributeMapping"] = attributeMappingProp - } - attributeConditionProp, err := expandIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(d.Get("attribute_condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attribute_condition"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributeConditionProp)) { - obj["attributeCondition"] = attributeConditionProp - } - samlProp, err := expandIAMWorkforcePoolWorkforcePoolProviderSaml(d.Get("saml"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("saml"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, samlProp)) { - obj["saml"] = samlProp - } - oidcProp, err := expandIAMWorkforcePoolWorkforcePoolProviderOidc(d.Get("oidc"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("oidc"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oidcProp)) { - obj["oidc"] = oidcProp - } - - url, err := replaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating WorkforcePoolProvider %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("disabled") { - updateMask = append(updateMask, "disabled") - } - - if d.HasChange("attribute_mapping") { - updateMask = append(updateMask, "attributeMapping") - } - - if d.HasChange("attribute_condition") { - updateMask = append(updateMask, "attributeCondition") - } - - if d.HasChange("saml") { - updateMask = append(updateMask, "saml") - } - - if d.HasChange("oidc") { - updateMask = append(updateMask, "oidc") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating WorkforcePoolProvider %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating WorkforcePoolProvider %q: %#v", d.Id(), res) - } - - err = IAMWorkforcePoolOperationWaitTime( - config, res, "Updating WorkforcePoolProvider", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta) -} - -func resourceIAMWorkforcePoolWorkforcePoolProviderDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting WorkforcePoolProvider %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "WorkforcePoolProvider") - } - - err = IAMWorkforcePoolOperationWaitTime( - config, res, "Deleting WorkforcePoolProvider", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting WorkforcePoolProvider %q: %#v", d.Id(), res) - return nil -} - -func resourceIAMWorkforcePoolWorkforcePoolProviderImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/(?P[^/]+)/workforcePools/(?P[^/]+)/providers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderSaml(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["idp_metadata_xml"] = - flattenIAMWorkforcePoolWorkforcePoolProviderSamlIdpMetadataXml(original["idpMetadataXml"], d, config) - return []interface{}{transformed} -} -func flattenIAMWorkforcePoolWorkforcePoolProviderSamlIdpMetadataXml(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderOidc(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["issuer_uri"] = - flattenIAMWorkforcePoolWorkforcePoolProviderOidcIssuerUri(original["issuerUri"], d, config) - transformed["client_id"] = - flattenIAMWorkforcePoolWorkforcePoolProviderOidcClientId(original["clientId"], d, config) - return []interface{}{transformed} -} -func flattenIAMWorkforcePoolWorkforcePoolProviderOidcIssuerUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMWorkforcePoolWorkforcePoolProviderOidcClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIAMWorkforcePoolWorkforcePoolProviderDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderSaml(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdpMetadataXml, err := expandIAMWorkforcePoolWorkforcePoolProviderSamlIdpMetadataXml(original["idp_metadata_xml"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdpMetadataXml); val.IsValid() && !isEmptyValue(val) { - transformed["idpMetadataXml"] = transformedIdpMetadataXml - } - - return transformed, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderSamlIdpMetadataXml(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderOidc(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIssuerUri, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcIssuerUri(original["issuer_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIssuerUri); val.IsValid() && !isEmptyValue(val) { - transformed["issuerUri"] = transformedIssuerUri - } - - transformedClientId, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcClientId(original["client_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedClientId); val.IsValid() && !isEmptyValue(val) { - transformed["clientId"] = transformedClientId - } - - return transformed, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderOidcIssuerUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMWorkforcePoolWorkforcePoolProviderOidcClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIAMWorkforcePoolWorkforcePoolProviderDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v := res["state"]; v == "DELETED" { - return nil, nil - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workload_identity_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workload_identity_pool.go deleted file mode 100644 index 94541c7ed8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workload_identity_pool.go +++ /dev/null @@ -1,456 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const workloadIdentityPoolIdRegexp = `^[0-9a-z-]+$` - -func validateWorkloadIdentityPoolId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if strings.HasPrefix(value, "gcp-") { - errors = append(errors, fmt.Errorf( - "%q (%q) can not start with \"gcp-\"", k, value)) - } - - if !regexp.MustCompile(workloadIdentityPoolIdRegexp).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must contain only lowercase letters (a-z), numbers (0-9), or dashes (-)", k)) - } - - if len(value) < 4 { - errors = append(errors, fmt.Errorf( - "%q cannot be smaller than 4 characters", k)) - } - - if len(value) > 32 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 32 characters", k)) - } - - return -} - -func ResourceIAMBetaWorkloadIdentityPool() *schema.Resource { - return &schema.Resource{ - Create: resourceIAMBetaWorkloadIdentityPoolCreate, - Read: resourceIAMBetaWorkloadIdentityPoolRead, - Update: resourceIAMBetaWorkloadIdentityPoolUpdate, - Delete: resourceIAMBetaWorkloadIdentityPoolDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIAMBetaWorkloadIdentityPoolImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "workload_identity_pool_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateWorkloadIdentityPoolId, - Description: `The ID to use for the pool, which becomes the final component of the resource name. This -value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix -'gcp-' is reserved for use by Google, and may not be specified.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of the pool. Cannot exceed 256 characters.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether the pool is disabled. You cannot use a disabled pool to exchange tokens, or use -existing tokens to access resources. If the pool is re-enabled, existing tokens grant -access again.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `A display name for the pool. Cannot exceed 32 characters.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the pool as -'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}'.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The state of the pool. -* STATE_UNSPECIFIED: State unspecified. -* ACTIVE: The pool is active, and may be used in Google Cloud policies. -* DELETED: The pool is soft-deleted. Soft-deleted pools are permanently deleted after - approximately 30 days. You can restore a soft-deleted pool using - UndeleteWorkloadIdentityPool. You cannot reuse the ID of a soft-deleted pool until it is - permanently deleted. While a pool is deleted, you cannot use it to exchange tokens, or - use existing tokens to access resources. If the pool is undeleted, existing tokens grant - access again.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIAMBetaWorkloadIdentityPoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAMBetaWorkloadIdentityPoolDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandIAMBetaWorkloadIdentityPoolDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - disabledProp, err := expandIAMBetaWorkloadIdentityPoolDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - - url, err := replaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools?workloadIdentityPoolId={{workload_identity_pool_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new WorkloadIdentityPool: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for WorkloadIdentityPool: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating WorkloadIdentityPool: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = IAMBetaOperationWaitTime( - config, res, project, "Creating WorkloadIdentityPool", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create WorkloadIdentityPool: %s", err) - } - - log.Printf("[DEBUG] Finished creating WorkloadIdentityPool %q: %#v", d.Id(), res) - - return resourceIAMBetaWorkloadIdentityPoolRead(d, meta) -} - -func resourceIAMBetaWorkloadIdentityPoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for WorkloadIdentityPool: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IAMBetaWorkloadIdentityPool %q", d.Id())) - } - - res, err = resourceIAMBetaWorkloadIdentityPoolDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing IAMBetaWorkloadIdentityPool because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) - } - - if err := d.Set("state", flattenIAMBetaWorkloadIdentityPoolState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) - } - if err := d.Set("display_name", flattenIAMBetaWorkloadIdentityPoolDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) - } - if err := d.Set("description", flattenIAMBetaWorkloadIdentityPoolDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) - } - if err := d.Set("name", flattenIAMBetaWorkloadIdentityPoolName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) - } - if err := d.Set("disabled", flattenIAMBetaWorkloadIdentityPoolDisabled(res["disabled"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) - } - - return nil -} - -func resourceIAMBetaWorkloadIdentityPoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for WorkloadIdentityPool: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAMBetaWorkloadIdentityPoolDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandIAMBetaWorkloadIdentityPoolDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - disabledProp, err := expandIAMBetaWorkloadIdentityPoolDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - - url, err := replaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating WorkloadIdentityPool %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("disabled") { - updateMask = append(updateMask, "disabled") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating WorkloadIdentityPool %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating WorkloadIdentityPool %q: %#v", d.Id(), res) - } - - err = IAMBetaOperationWaitTime( - config, res, project, "Updating WorkloadIdentityPool", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceIAMBetaWorkloadIdentityPoolRead(d, meta) -} - -func resourceIAMBetaWorkloadIdentityPoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for WorkloadIdentityPool: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting WorkloadIdentityPool %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "WorkloadIdentityPool") - } - - err = IAMBetaOperationWaitTime( - config, res, project, "Deleting WorkloadIdentityPool", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting WorkloadIdentityPool %q: %#v", d.Id(), res) - return nil -} - -func resourceIAMBetaWorkloadIdentityPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/workloadIdentityPools/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIAMBetaWorkloadIdentityPoolState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIAMBetaWorkloadIdentityPoolDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMBetaWorkloadIdentityPoolDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMBetaWorkloadIdentityPoolDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIAMBetaWorkloadIdentityPoolDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v := res["state"]; v == "DELETED" { - return nil, nil - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workload_identity_pool_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workload_identity_pool_provider.go deleted file mode 100644 index efe49b6b59..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_workload_identity_pool_provider.go +++ /dev/null @@ -1,797 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const workloadIdentityPoolProviderIdRegexp = `^[0-9a-z-]+$` - -func validateWorkloadIdentityPoolProviderId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if strings.HasPrefix(value, "gcp-") { - errors = append(errors, fmt.Errorf( - "%q (%q) can not start with \"gcp-\"", k, value)) - } - - if !regexp.MustCompile(workloadIdentityPoolProviderIdRegexp).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q must contain only lowercase letters (a-z), numbers (0-9), or dashes (-)", k)) - } - - if len(value) < 4 { - errors = append(errors, fmt.Errorf( - "%q cannot be smaller than 4 characters", k)) - } - - if len(value) > 32 { - errors = append(errors, fmt.Errorf( - "%q cannot be greater than 32 characters", k)) - } - - return -} - -func ResourceIAMBetaWorkloadIdentityPoolProvider() *schema.Resource { - return &schema.Resource{ - Create: resourceIAMBetaWorkloadIdentityPoolProviderCreate, - Read: resourceIAMBetaWorkloadIdentityPoolProviderRead, - Update: resourceIAMBetaWorkloadIdentityPoolProviderUpdate, - Delete: resourceIAMBetaWorkloadIdentityPoolProviderDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIAMBetaWorkloadIdentityPoolProviderImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "workload_identity_pool_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID used for the pool, which is the final component of the pool resource name. This -value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix -'gcp-' is reserved for use by Google, and may not be specified.`, - }, - "workload_identity_pool_provider_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateWorkloadIdentityPoolProviderId, - Description: `The ID for the provider, which becomes the final component of the resource name. This -value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix -'gcp-' is reserved for use by Google, and may not be specified.`, - }, - "attribute_condition": { - Type: schema.TypeString, - Optional: true, - Description: `[A Common Expression Language](https://opensource.google/projects/cel) expression, in -plain text, to restrict what otherwise valid authentication credentials issued by the -provider should not be accepted. - -The expression must output a boolean representing whether to allow the federation. - -The following keywords may be referenced in the expressions: - * 'assertion': JSON representing the authentication credential issued by the provider. - * 'google': The Google attributes mapped from the assertion in the 'attribute_mappings'. - * 'attribute': The custom attributes mapped from the assertion in the 'attribute_mappings'. - -The maximum length of the attribute condition expression is 4096 characters. If -unspecified, all valid authentication credential are accepted. - -The following example shows how to only allow credentials with a mapped 'google.groups' -value of 'admins': -''' -"'admins' in google.groups" -'''`, - }, - "attribute_mapping": { - Type: schema.TypeMap, - Optional: true, - Description: `Maps attributes from authentication credentials issued by an external identity provider -to Google Cloud attributes, such as 'subject' and 'segment'. - -Each key must be a string specifying the Google Cloud IAM attribute to map to. - -The following keys are supported: - * 'google.subject': The principal IAM is authenticating. You can reference this value - in IAM bindings. This is also the subject that appears in Cloud Logging logs. - Cannot exceed 127 characters. - * 'google.groups': Groups the external identity belongs to. You can grant groups - access to resources using an IAM 'principalSet' binding; access applies to all - members of the group. - -You can also provide custom attributes by specifying 'attribute.{custom_attribute}', -where '{custom_attribute}' is the name of the custom attribute to be mapped. You can -define a maximum of 50 custom attributes. The maximum length of a mapped attribute key -is 100 characters, and the key may only contain the characters [a-z0-9_]. - -You can reference these attributes in IAM policies to define fine-grained access for a -workload to Google Cloud resources. For example: - * 'google.subject': - 'principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}' - * 'google.groups': - 'principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}' - * 'attribute.{custom_attribute}': - 'principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}' - -Each value must be a [Common Expression Language](https://opensource.google/projects/cel) -function that maps an identity provider credential to the normalized attribute specified -by the corresponding map key. - -You can use the 'assertion' keyword in the expression to access a JSON representation of -the authentication credential issued by the provider. - -The maximum length of an attribute mapping expression is 2048 characters. When evaluated, -the total size of all mapped attributes must not exceed 8KB. - -For AWS providers, the following rules apply: - - If no attribute mapping is defined, the following default mapping applies: - ''' - { - "google.subject":"assertion.arn", - "attribute.aws_role": - "assertion.arn.contains('assumed-role')" - " ? assertion.arn.extract('{account_arn}assumed-role/')" - " + 'assumed-role/'" - " + assertion.arn.extract('assumed-role/{role_name}/')" - " : assertion.arn", - } - ''' - - If any custom attribute mappings are defined, they must include a mapping to the - 'google.subject' attribute. - -For OIDC providers, the following rules apply: - - Custom attribute mappings must be defined, and must include a mapping to the - 'google.subject' attribute. For example, the following maps the 'sub' claim of the - incoming credential to the 'subject' attribute on a Google token. - ''' - {"google.subject": "assertion.sub"} - '''`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "aws": { - Type: schema.TypeList, - Optional: true, - Description: `An Amazon Web Services identity provider. Not compatible with the property oidc.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeString, - Required: true, - Description: `The AWS account ID.`, - }, - }, - }, - ExactlyOneOf: []string{"aws", "oidc"}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description for the provider. Cannot exceed 256 characters.`, - }, - "disabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. -However, existing tokens still grant access.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `A display name for the provider. Cannot exceed 32 characters.`, - }, - "oidc": { - Type: schema.TypeList, - Optional: true, - Description: `An OpenId Connect 1.0 identity provider. Not compatible with the property aws.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "issuer_uri": { - Type: schema.TypeString, - Required: true, - Description: `The OIDC issuer URL.`, - }, - "allowed_audiences": { - Type: schema.TypeList, - Optional: true, - Description: `Acceptable values for the 'aud' field (audience) in the OIDC token. Token exchange -requests are rejected if the token audience does not match one of the configured -values. Each audience may be at most 256 characters. A maximum of 10 audiences may -be configured. - -If this list is empty, the OIDC token audience must be equal to the full canonical -resource name of the WorkloadIdentityPoolProvider, with or without the HTTPS prefix. -For example: -''' -//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ -https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ -'''`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - ExactlyOneOf: []string{"aws", "oidc"}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the provider as -'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The state of the provider. -* STATE_UNSPECIFIED: State unspecified. -* ACTIVE: The provider is active, and may be used to validate authentication credentials. -* DELETED: The provider is soft-deleted. Soft-deleted providers are permanently deleted - after approximately 30 days. You can restore a soft-deleted provider using - UndeleteWorkloadIdentityPoolProvider. You cannot reuse the ID of a soft-deleted provider - until it is permanently deleted.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIAMBetaWorkloadIdentityPoolProviderCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAMBetaWorkloadIdentityPoolProviderDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandIAMBetaWorkloadIdentityPoolProviderDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - disabledProp, err := expandIAMBetaWorkloadIdentityPoolProviderDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - attributeMappingProp, err := expandIAMBetaWorkloadIdentityPoolProviderAttributeMapping(d.Get("attribute_mapping"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attribute_mapping"); !isEmptyValue(reflect.ValueOf(attributeMappingProp)) && (ok || !reflect.DeepEqual(v, attributeMappingProp)) { - obj["attributeMapping"] = attributeMappingProp - } - attributeConditionProp, err := expandIAMBetaWorkloadIdentityPoolProviderAttributeCondition(d.Get("attribute_condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attribute_condition"); !isEmptyValue(reflect.ValueOf(attributeConditionProp)) && (ok || !reflect.DeepEqual(v, attributeConditionProp)) { - obj["attributeCondition"] = attributeConditionProp - } - awsProp, err := expandIAMBetaWorkloadIdentityPoolProviderAws(d.Get("aws"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("aws"); !isEmptyValue(reflect.ValueOf(awsProp)) && (ok || !reflect.DeepEqual(v, awsProp)) { - obj["aws"] = awsProp - } - oidcProp, err := expandIAMBetaWorkloadIdentityPoolProviderOidc(d.Get("oidc"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("oidc"); !isEmptyValue(reflect.ValueOf(oidcProp)) && (ok || !reflect.DeepEqual(v, oidcProp)) { - obj["oidc"] = oidcProp - } - - url, err := replaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers?workloadIdentityPoolProviderId={{workload_identity_pool_provider_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new WorkloadIdentityPoolProvider: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for WorkloadIdentityPoolProvider: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating WorkloadIdentityPoolProvider: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = IAMBetaOperationWaitTime( - config, res, project, "Creating WorkloadIdentityPoolProvider", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create WorkloadIdentityPoolProvider: %s", err) - } - - log.Printf("[DEBUG] Finished creating WorkloadIdentityPoolProvider %q: %#v", d.Id(), res) - - return resourceIAMBetaWorkloadIdentityPoolProviderRead(d, meta) -} - -func resourceIAMBetaWorkloadIdentityPoolProviderRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for WorkloadIdentityPoolProvider: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IAMBetaWorkloadIdentityPoolProvider %q", d.Id())) - } - - res, err = resourceIAMBetaWorkloadIdentityPoolProviderDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing IAMBetaWorkloadIdentityPoolProvider because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - - if err := d.Set("state", flattenIAMBetaWorkloadIdentityPoolProviderState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - if err := d.Set("display_name", flattenIAMBetaWorkloadIdentityPoolProviderDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - if err := d.Set("description", flattenIAMBetaWorkloadIdentityPoolProviderDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - if err := d.Set("name", flattenIAMBetaWorkloadIdentityPoolProviderName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - if err := d.Set("disabled", flattenIAMBetaWorkloadIdentityPoolProviderDisabled(res["disabled"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - if err := d.Set("attribute_mapping", flattenIAMBetaWorkloadIdentityPoolProviderAttributeMapping(res["attributeMapping"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - if err := d.Set("attribute_condition", flattenIAMBetaWorkloadIdentityPoolProviderAttributeCondition(res["attributeCondition"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - if err := d.Set("aws", flattenIAMBetaWorkloadIdentityPoolProviderAws(res["aws"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - if err := d.Set("oidc", flattenIAMBetaWorkloadIdentityPoolProviderOidc(res["oidc"], d, config)); err != nil { - return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) - } - - return nil -} - -func resourceIAMBetaWorkloadIdentityPoolProviderUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for WorkloadIdentityPoolProvider: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIAMBetaWorkloadIdentityPoolProviderDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandIAMBetaWorkloadIdentityPoolProviderDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - disabledProp, err := expandIAMBetaWorkloadIdentityPoolProviderDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - attributeMappingProp, err := expandIAMBetaWorkloadIdentityPoolProviderAttributeMapping(d.Get("attribute_mapping"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attribute_mapping"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributeMappingProp)) { - obj["attributeMapping"] = attributeMappingProp - } - attributeConditionProp, err := expandIAMBetaWorkloadIdentityPoolProviderAttributeCondition(d.Get("attribute_condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attribute_condition"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributeConditionProp)) { - obj["attributeCondition"] = attributeConditionProp - } - awsProp, err := expandIAMBetaWorkloadIdentityPoolProviderAws(d.Get("aws"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("aws"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, awsProp)) { - obj["aws"] = awsProp - } - oidcProp, err := expandIAMBetaWorkloadIdentityPoolProviderOidc(d.Get("oidc"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("oidc"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oidcProp)) { - obj["oidc"] = oidcProp - } - - url, err := replaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating WorkloadIdentityPoolProvider %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("disabled") { - updateMask = append(updateMask, "disabled") - } - - if d.HasChange("attribute_mapping") { - updateMask = append(updateMask, "attributeMapping") - } - - if d.HasChange("attribute_condition") { - updateMask = append(updateMask, "attributeCondition") - } - - if d.HasChange("aws") { - updateMask = append(updateMask, "aws") - } - - if d.HasChange("oidc") { - updateMask = append(updateMask, "oidc.allowed_audiences", - "oidc.issuer_uri") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating WorkloadIdentityPoolProvider %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating WorkloadIdentityPoolProvider %q: %#v", d.Id(), res) - } - - err = IAMBetaOperationWaitTime( - config, res, project, "Updating WorkloadIdentityPoolProvider", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceIAMBetaWorkloadIdentityPoolProviderRead(d, meta) -} - -func resourceIAMBetaWorkloadIdentityPoolProviderDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for WorkloadIdentityPoolProvider: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting WorkloadIdentityPoolProvider %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "WorkloadIdentityPoolProvider") - } - - err = IAMBetaOperationWaitTime( - config, res, project, "Deleting WorkloadIdentityPoolProvider", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting WorkloadIdentityPoolProvider %q: %#v", d.Id(), res) - return nil -} - -func resourceIAMBetaWorkloadIdentityPoolProviderImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/workloadIdentityPools/(?P[^/]+)/providers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIAMBetaWorkloadIdentityPoolProviderState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderAttributeMapping(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderAttributeCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderAws(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["account_id"] = - flattenIAMBetaWorkloadIdentityPoolProviderAwsAccountId(original["accountId"], d, config) - return []interface{}{transformed} -} -func flattenIAMBetaWorkloadIdentityPoolProviderAwsAccountId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderOidc(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_audiences"] = - flattenIAMBetaWorkloadIdentityPoolProviderOidcAllowedAudiences(original["allowedAudiences"], d, config) - transformed["issuer_uri"] = - flattenIAMBetaWorkloadIdentityPoolProviderOidcIssuerUri(original["issuerUri"], d, config) - return []interface{}{transformed} -} -func flattenIAMBetaWorkloadIdentityPoolProviderOidcAllowedAudiences(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIAMBetaWorkloadIdentityPoolProviderOidcIssuerUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIAMBetaWorkloadIdentityPoolProviderDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderAttributeMapping(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderAttributeCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderAws(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAccountId, err := expandIAMBetaWorkloadIdentityPoolProviderAwsAccountId(original["account_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAccountId); val.IsValid() && !isEmptyValue(val) { - transformed["accountId"] = transformedAccountId - } - - return transformed, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderAwsAccountId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderOidc(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedAudiences, err := expandIAMBetaWorkloadIdentityPoolProviderOidcAllowedAudiences(original["allowed_audiences"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowedAudiences); val.IsValid() && !isEmptyValue(val) { - transformed["allowedAudiences"] = transformedAllowedAudiences - } - - transformedIssuerUri, err := expandIAMBetaWorkloadIdentityPoolProviderOidcIssuerUri(original["issuer_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIssuerUri); val.IsValid() && !isEmptyValue(val) { - transformed["issuerUri"] = transformedIssuerUri - } - - return transformed, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderOidcAllowedAudiences(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIAMBetaWorkloadIdentityPoolProviderOidcIssuerUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIAMBetaWorkloadIdentityPoolProviderDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v := res["state"]; v == "DELETED" { - return nil, nil - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iap_brand.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iap_brand.go deleted file mode 100644 index f5abd36356..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iap_brand.go +++ /dev/null @@ -1,322 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIapBrand() *schema.Resource { - return &schema.Resource{ - Create: resourceIapBrandCreate, - Read: resourceIapBrandRead, - Delete: resourceIapBrandDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIapBrandImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "application_title": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Application name displayed on OAuth consent screen.`, - }, - "support_email": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Support email displayed on the OAuth consent screen. Can be either a -user or group email. When a user email is specified, the caller must -be the user with the associated email address. When a group email is -specified, the caller can be either a user or a service account which -is an owner of the specified group in Cloud Identity.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Identifier of the brand, in the format 'projects/{project_number}/brands/{brand_id}' -NOTE: The name can also be expressed as 'projects/{project_id}/brands/{brand_id}', e.g. when importing. -NOTE: The brand identification corresponds to the project number as only one -brand can be created per project.`, - }, - "org_internal_only": { - Type: schema.TypeBool, - Computed: true, - Description: `Whether the brand is only intended for usage inside the GSuite organization only.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIapBrandCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - supportEmailProp, err := expandIapBrandSupportEmail(d.Get("support_email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("support_email"); !isEmptyValue(reflect.ValueOf(supportEmailProp)) && (ok || !reflect.DeepEqual(v, supportEmailProp)) { - obj["supportEmail"] = supportEmailProp - } - applicationTitleProp, err := expandIapBrandApplicationTitle(d.Get("application_title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("application_title"); !isEmptyValue(reflect.ValueOf(applicationTitleProp)) && (ok || !reflect.DeepEqual(v, applicationTitleProp)) { - obj["applicationTitle"] = applicationTitleProp - } - - url, err := replaceVars(d, config, "{{IapBasePath}}projects/{{project}}/brands") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Brand: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Brand: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Brand: %s", err) - } - if err := d.Set("name", flattenIapBrandName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - err = PollingWaitTime(resourceIapBrandPollRead(d, meta), PollCheckForExistence, "Creating Brand", d.Timeout(schema.TimeoutCreate), 5) - if err != nil { - return fmt.Errorf("Error waiting to create Brand: %s", err) - } - - log.Printf("[DEBUG] Finished creating Brand %q: %#v", d.Id(), res) - - return resourceIapBrandRead(d, meta) -} - -func resourceIapBrandPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{IapBasePath}}{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for Brand: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceIapBrandRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IapBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Brand: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IapBrand %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Brand: %s", err) - } - - if err := d.Set("support_email", flattenIapBrandSupportEmail(res["supportEmail"], d, config)); err != nil { - return fmt.Errorf("Error reading Brand: %s", err) - } - if err := d.Set("application_title", flattenIapBrandApplicationTitle(res["applicationTitle"], d, config)); err != nil { - return fmt.Errorf("Error reading Brand: %s", err) - } - if err := d.Set("org_internal_only", flattenIapBrandOrgInternalOnly(res["orgInternalOnly"], d, config)); err != nil { - return fmt.Errorf("Error reading Brand: %s", err) - } - if err := d.Set("name", flattenIapBrandName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Brand: %s", err) - } - - return nil -} - -func resourceIapBrandDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] Iap Brand resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceIapBrandImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := strings.Split(d.Get("name").(string), "/") - if len(nameParts) != 4 && len(nameParts) != 2 { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have either shape %s or %s", - d.Get("name"), - "projects/{{project}}/brands/{{name}}", - "{{project}}/{{name}}", - ) - } - - var project string - if len(nameParts) == 4 { - project = nameParts[1] - } - if len(nameParts) == 2 { - project = nameParts[0] // Different index - - // Set `name` (and `id`) as a 4-part format so Read func produces valid URL - brand := nameParts[1] - name := fmt.Sprintf("projects/%s/brands/%s", project, brand) - if err := d.Set("name", name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - } - - if err := d.Set("project", project); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenIapBrandSupportEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapBrandApplicationTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapBrandOrgInternalOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapBrandName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIapBrandSupportEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIapBrandApplicationTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iap_client.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iap_client.go deleted file mode 100644 index 301144b2bf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iap_client.go +++ /dev/null @@ -1,237 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIapClient() *schema.Resource { - return &schema.Resource{ - Create: resourceIapClientCreate, - Read: resourceIapClientRead, - Delete: resourceIapClientDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIapClientImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "brand": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Identifier of the brand to which this client -is attached to. The format is -'projects/{project_number}/brands/{brand_id}/identityAwareProxyClients/{client_id}'.`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Human-friendly name given to the OAuth client.`, - }, - "client_id": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Unique identifier of the OAuth client.`, - }, - "secret": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Client secret of the OAuth client.`, - Sensitive: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIapClientCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandIapClientDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - url, err := replaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Client: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), iapClient409Operation) - if err != nil { - return fmt.Errorf("Error creating Client: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{brand}}/identityAwareProxyClients/{{client_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - brand := d.Get("brand") - clientId := flattenIapClientClientId(res["name"], d, config) - - if err := d.Set("client_id", clientId); err != nil { - return fmt.Errorf("Error setting client_id: %s", err) - } - d.SetId(fmt.Sprintf("%s/identityAwareProxyClients/%s", brand, clientId)) - - log.Printf("[DEBUG] Finished creating Client %q: %#v", d.Id(), res) - - return resourceIapClientRead(d, meta) -} - -func resourceIapClientRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients/{{client_id}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, iapClient409Operation) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IapClient %q", d.Id())) - } - - if err := d.Set("secret", flattenIapClientSecret(res["secret"], d, config)); err != nil { - return fmt.Errorf("Error reading Client: %s", err) - } - if err := d.Set("display_name", flattenIapClientDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Client: %s", err) - } - if err := d.Set("client_id", flattenIapClientClientId(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Client: %s", err) - } - - return nil -} - -func resourceIapClientDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients/{{client_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Client %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), iapClient409Operation) - if err != nil { - return handleNotFoundError(err, d, "Client") - } - - log.Printf("[DEBUG] Finished deleting Client %q: %#v", d.Id(), res) - return nil -} - -func resourceIapClientImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := strings.Split(d.Get("brand").(string), "/") - if len(nameParts) != 6 { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("brand").(string), - "projects/{{project_number}}/brands/{{brand_id}}/identityAwareProxyClients/{{client_id}}", - ) - } - - if err := d.Set("brand", fmt.Sprintf("projects/%s/brands/%s", nameParts[1], nameParts[3])); err != nil { - return nil, fmt.Errorf("Error setting brand: %s", err) - } - if err := d.Set("client_id", nameParts[5]); err != nil { - return nil, fmt.Errorf("Error setting client_id: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenIapClientSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapClientDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapClientClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandIapClientDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_config.go deleted file mode 100644 index c29085c83a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_config.go +++ /dev/null @@ -1,256 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformConfigCreate, - Read: resourceIdentityPlatformConfigRead, - Update: resourceIdentityPlatformConfigUpdate, - Delete: resourceIdentityPlatformConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "autodelete_anonymous_users": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether anonymous users will be auto-deleted after a period of 30 days`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the Config resource`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/identityPlatform:initializeAuth") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, nil, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Config: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/config") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Update the resource after initializing auth to set fields. - if err := resourceIdentityPlatformConfigUpdate(d, meta); err != nil { - return err - } - - log.Printf("[DEBUG] Finished creating Config %q: %#v", d.Id(), res) - - return resourceIdentityPlatformConfigRead(d, meta) -} - -func resourceIdentityPlatformConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("autodelete_anonymous_users", flattenIdentityPlatformConfigAutodeleteAnonymousUsers(res["autodeleteAnonymousUsers"], d, config)); err != nil { - return fmt.Errorf("Error reading Config: %s", err) - } - - return nil -} - -func resourceIdentityPlatformConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - autodeleteAnonymousUsersProp, err := expandIdentityPlatformConfigAutodeleteAnonymousUsers(d.Get("autodelete_anonymous_users"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("autodelete_anonymous_users"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autodeleteAnonymousUsersProp)) { - obj["autodeleteAnonymousUsers"] = autodeleteAnonymousUsersProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Config %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("autodelete_anonymous_users") { - updateMask = append(updateMask, "autodeleteAnonymousUsers") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Config %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Config %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformConfigRead(d, meta) -} - -func resourceIdentityPlatformConfigDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] IdentityPlatform Config resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceIdentityPlatformConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/config", - "projects/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/config") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformConfigAutodeleteAnonymousUsers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformConfigAutodeleteAnonymousUsers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_default_supported_idp_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_default_supported_idp_config.go deleted file mode 100644 index 0b7b8a380e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_default_supported_idp_config.go +++ /dev/null @@ -1,378 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformDefaultSupportedIdpConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformDefaultSupportedIdpConfigCreate, - Read: resourceIdentityPlatformDefaultSupportedIdpConfigRead, - Update: resourceIdentityPlatformDefaultSupportedIdpConfigUpdate, - Delete: resourceIdentityPlatformDefaultSupportedIdpConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformDefaultSupportedIdpConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "client_id": { - Type: schema.TypeString, - Required: true, - Description: `OAuth client ID`, - }, - "client_secret": { - Type: schema.TypeString, - Required: true, - Description: `OAuth client secret`, - }, - "idp_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the IDP. Possible values include: - -* 'apple.com' - -* 'facebook.com' - -* 'gc.apple.com' - -* 'github.com' - -* 'google.com' - -* 'linkedin.com' - -* 'microsoft.com' - -* 'playgames.google.com' - -* 'twitter.com' - -* 'yahoo.com'`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `If this IDP allows the user to sign in`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the DefaultSupportedIdpConfig resource`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - clientIdProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(reflect.ValueOf(clientIdProp)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(reflect.ValueOf(clientSecretProp)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - enabledProp, err := expandIdentityPlatformDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs?idpId={{idp_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DefaultSupportedIdpConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DefaultSupportedIdpConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformDefaultSupportedIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformDefaultSupportedIdpConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("client_id", flattenIdentityPlatformDefaultSupportedIdpConfigClientId(res["clientId"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("client_secret", flattenIdentityPlatformDefaultSupportedIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformDefaultSupportedIdpConfigEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - clientIdProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - enabledProp, err := expandIdentityPlatformDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DefaultSupportedIdpConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("client_id") { - updateMask = append(updateMask, "clientId") - } - - if d.HasChange("client_secret") { - updateMask = append(updateMask, "clientSecret") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DefaultSupportedIdpConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DefaultSupportedIdpConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformDefaultSupportedIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DefaultSupportedIdpConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DefaultSupportedIdpConfig") - } - - log.Printf("[DEBUG] Finished deleting DefaultSupportedIdpConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/defaultSupportedIdpConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformDefaultSupportedIdpConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformDefaultSupportedIdpConfigClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformDefaultSupportedIdpConfigClientSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformDefaultSupportedIdpConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformDefaultSupportedIdpConfigClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformDefaultSupportedIdpConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_inbound_saml_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_inbound_saml_config.go deleted file mode 100644 index 3d89142ceb..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_inbound_saml_config.go +++ /dev/null @@ -1,690 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformInboundSamlConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformInboundSamlConfigCreate, - Read: resourceIdentityPlatformInboundSamlConfigRead, - Update: resourceIdentityPlatformInboundSamlConfigUpdate, - Delete: resourceIdentityPlatformInboundSamlConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformInboundSamlConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `Human friendly display name.`, - }, - "idp_config": { - Type: schema.TypeList, - Required: true, - Description: `SAML IdP configuration when the project acts as the relying party`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "idp_certificates": { - Type: schema.TypeList, - Required: true, - Description: `The IdP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "x509_certificate": { - Type: schema.TypeString, - Optional: true, - Description: `The IdP's x509 certificate.`, - }, - }, - }, - }, - "idp_entity_id": { - Type: schema.TypeString, - Required: true, - Description: `Unique identifier for all SAML entities`, - }, - "sso_url": { - Type: schema.TypeString, - Required: true, - Description: `URL to send Authentication request to.`, - }, - "sign_request": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates if outbounding SAMLRequest should be signed.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters, -hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an -alphanumeric character, and have at least 2 characters.`, - }, - "sp_config": { - Type: schema.TypeList, - Required: true, - Description: `SAML SP (Service Provider) configuration when the project acts as the relying party to receive -and accept an authentication assertion issued by a SAML identity provider.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "callback_uri": { - Type: schema.TypeString, - Optional: true, - Description: `Callback URI where responses from IDP are handled. Must start with 'https://'.`, - }, - "sp_entity_id": { - Type: schema.TypeString, - Optional: true, - Description: `Unique identifier for all SAML entities.`, - }, - "sp_certificates": { - Type: schema.TypeList, - Computed: true, - Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "x509_certificate": { - Type: schema.TypeString, - Computed: true, - Description: `The x509 certificate`, - }, - }, - }, - }, - }, - }, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `If this config allows users to sign in with the provider.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformInboundSamlConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandIdentityPlatformInboundSamlConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandIdentityPlatformInboundSamlConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformInboundSamlConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - idpConfigProp, err := expandIdentityPlatformInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("idp_config"); !isEmptyValue(reflect.ValueOf(idpConfigProp)) && (ok || !reflect.DeepEqual(v, idpConfigProp)) { - obj["idpConfig"] = idpConfigProp - } - spConfigProp, err := expandIdentityPlatformInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sp_config"); !isEmptyValue(reflect.ValueOf(spConfigProp)) && (ok || !reflect.DeepEqual(v, spConfigProp)) { - obj["spConfig"] = spConfigProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs?inboundSamlConfigId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new InboundSamlConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating InboundSamlConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating InboundSamlConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformInboundSamlConfigRead(d, meta) -} - -func resourceIdentityPlatformInboundSamlConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformInboundSamlConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformInboundSamlConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformInboundSamlConfigDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformInboundSamlConfigEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - if err := d.Set("idp_config", flattenIdentityPlatformInboundSamlConfigIdpConfig(res["idpConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - if err := d.Set("sp_config", flattenIdentityPlatformInboundSamlConfigSpConfig(res["spConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformInboundSamlConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformInboundSamlConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformInboundSamlConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - idpConfigProp, err := expandIdentityPlatformInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("idp_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idpConfigProp)) { - obj["idpConfig"] = idpConfigProp - } - spConfigProp, err := expandIdentityPlatformInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sp_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, spConfigProp)) { - obj["spConfig"] = spConfigProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating InboundSamlConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("idp_config") { - updateMask = append(updateMask, "idpConfig") - } - - if d.HasChange("sp_config") { - updateMask = append(updateMask, "spConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating InboundSamlConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating InboundSamlConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformInboundSamlConfigRead(d, meta) -} - -func resourceIdentityPlatformInboundSamlConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting InboundSamlConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InboundSamlConfig") - } - - log.Printf("[DEBUG] Finished deleting InboundSamlConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformInboundSamlConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/inboundSamlConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformInboundSamlConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformInboundSamlConfigDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["idp_entity_id"] = - flattenIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(original["idpEntityId"], d, config) - transformed["sso_url"] = - flattenIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(original["ssoUrl"], d, config) - transformed["sign_request"] = - flattenIdentityPlatformInboundSamlConfigIdpConfigSignRequest(original["signRequest"], d, config) - transformed["idp_certificates"] = - flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(original["idpCertificates"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfigSignRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "x509_certificate": flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509Certificate"], d, config), - }) - } - return transformed -} -func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigSpConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["sp_entity_id"] = - flattenIdentityPlatformInboundSamlConfigSpConfigSpEntityId(original["spEntityId"], d, config) - transformed["callback_uri"] = - flattenIdentityPlatformInboundSamlConfigSpConfigCallbackUri(original["callbackUri"], d, config) - transformed["sp_certificates"] = - flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificates(original["spCertificates"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformInboundSamlConfigSpConfigSpEntityId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigSpConfigCallbackUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "x509_certificate": flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509Certificate"], d, config), - }) - } - return transformed -} -func flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformInboundSamlConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdpEntityId, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(original["idp_entity_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdpEntityId); val.IsValid() && !isEmptyValue(val) { - transformed["idpEntityId"] = transformedIdpEntityId - } - - transformedSsoUrl, err := expandIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(original["sso_url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSsoUrl); val.IsValid() && !isEmptyValue(val) { - transformed["ssoUrl"] = transformedSsoUrl - } - - transformedSignRequest, err := expandIdentityPlatformInboundSamlConfigIdpConfigSignRequest(original["sign_request"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSignRequest); val.IsValid() && !isEmptyValue(val) { - transformed["signRequest"] = transformedSignRequest - } - - transformedIdpCertificates, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(original["idp_certificates"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdpCertificates); val.IsValid() && !isEmptyValue(val) { - transformed["idpCertificates"] = transformedIdpCertificates - } - - return transformed, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigSignRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Certificate, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509_certificate"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedX509Certificate); val.IsValid() && !isEmptyValue(val) { - transformed["x509Certificate"] = transformedX509Certificate - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSpEntityId, err := expandIdentityPlatformInboundSamlConfigSpConfigSpEntityId(original["sp_entity_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSpEntityId); val.IsValid() && !isEmptyValue(val) { - transformed["spEntityId"] = transformedSpEntityId - } - - transformedCallbackUri, err := expandIdentityPlatformInboundSamlConfigSpConfigCallbackUri(original["callback_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCallbackUri); val.IsValid() && !isEmptyValue(val) { - transformed["callbackUri"] = transformedCallbackUri - } - - transformedSpCertificates, err := expandIdentityPlatformInboundSamlConfigSpConfigSpCertificates(original["sp_certificates"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSpCertificates); val.IsValid() && !isEmptyValue(val) { - transformed["spCertificates"] = transformedSpCertificates - } - - return transformed, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfigSpEntityId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfigCallbackUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfigSpCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Certificate, err := expandIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509_certificate"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedX509Certificate); val.IsValid() && !isEmptyValue(val) { - transformed["x509Certificate"] = transformedX509Certificate - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_oauth_idp_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_oauth_idp_config.go deleted file mode 100644 index 1f391fc9af..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_oauth_idp_config.go +++ /dev/null @@ -1,427 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformOauthIdpConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformOauthIdpConfigCreate, - Read: resourceIdentityPlatformOauthIdpConfigRead, - Update: resourceIdentityPlatformOauthIdpConfigUpdate, - Delete: resourceIdentityPlatformOauthIdpConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformOauthIdpConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "client_id": { - Type: schema.TypeString, - Required: true, - Description: `The client id of an OAuth client.`, - }, - "issuer": { - Type: schema.TypeString, - Required: true, - Description: `For OIDC Idps, the issuer identifier.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the OauthIdpConfig. Must start with 'oidc.'.`, - }, - "client_secret": { - Type: schema.TypeString, - Optional: true, - Description: `The client secret of the OAuth client, to enable OIDC code flow.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `Human friendly display name.`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `If this config allows users to sign in with the provider.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformOauthIdpConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandIdentityPlatformOauthIdpConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandIdentityPlatformOauthIdpConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformOauthIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - issuerProp, err := expandIdentityPlatformOauthIdpConfigIssuer(d.Get("issuer"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuer"); !isEmptyValue(reflect.ValueOf(issuerProp)) && (ok || !reflect.DeepEqual(v, issuerProp)) { - obj["issuer"] = issuerProp - } - clientIdProp, err := expandIdentityPlatformOauthIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(reflect.ValueOf(clientIdProp)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(reflect.ValueOf(clientSecretProp)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs?oauthIdpConfigId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new OauthIdpConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating OauthIdpConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating OauthIdpConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformOauthIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformOauthIdpConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformOauthIdpConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformOauthIdpConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformOauthIdpConfigDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformOauthIdpConfigEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("issuer", flattenIdentityPlatformOauthIdpConfigIssuer(res["issuer"], d, config)); err != nil { - return fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("client_id", flattenIdentityPlatformOauthIdpConfigClientId(res["clientId"], d, config)); err != nil { - return fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("client_secret", flattenIdentityPlatformOauthIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { - return fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformOauthIdpConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformOauthIdpConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformOauthIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - issuerProp, err := expandIdentityPlatformOauthIdpConfigIssuer(d.Get("issuer"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuer"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, issuerProp)) { - obj["issuer"] = issuerProp - } - clientIdProp, err := expandIdentityPlatformOauthIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating OauthIdpConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("issuer") { - updateMask = append(updateMask, "issuer") - } - - if d.HasChange("client_id") { - updateMask = append(updateMask, "clientId") - } - - if d.HasChange("client_secret") { - updateMask = append(updateMask, "clientSecret") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating OauthIdpConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating OauthIdpConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformOauthIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformOauthIdpConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting OauthIdpConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "OauthIdpConfig") - } - - log.Printf("[DEBUG] Finished deleting OauthIdpConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformOauthIdpConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/oauthIdpConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformOauthIdpConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformOauthIdpConfigDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformOauthIdpConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformOauthIdpConfigIssuer(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformOauthIdpConfigClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformOauthIdpConfigClientSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformOauthIdpConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigIssuer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_project_default_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_project_default_config.go deleted file mode 100644 index 4e6e398d8d..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_project_default_config.go +++ /dev/null @@ -1,753 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformProjectDefaultConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformProjectDefaultConfigCreate, - Read: resourceIdentityPlatformProjectDefaultConfigRead, - Update: resourceIdentityPlatformProjectDefaultConfigUpdate, - Delete: resourceIdentityPlatformProjectDefaultConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformProjectDefaultConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "sign_in": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration related to local sign in methods.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_duplicate_emails": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether to allow more than one account to have the same email.`, - }, - "anonymous": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options related to authenticating an anonymous user.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - Description: `Whether anonymous user auth is enabled for the project or not.`, - }, - }, - }, - }, - "email": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options related to authenticating a user by their email address.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether email auth is enabled for the project or not.`, - }, - "password_required": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether a password is required for email auth or not. If true, both an email and -password must be provided to sign in. If false, a user may sign in via either -email/password or email link.`, - }, - }, - }, - }, - "phone_number": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options related to authenticated a user by their phone number.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether phone number auth is enabled for the project or not.`, - }, - "test_phone_numbers": { - Type: schema.TypeMap, - Optional: true, - Description: `A map of that can be used for phone auth testing.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "hash_config": { - Type: schema.TypeList, - Computed: true, - Description: `Output only. Hash config information.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "algorithm": { - Type: schema.TypeString, - Computed: true, - Description: `Different password hash algorithms used in Identity Toolkit.`, - }, - "memory_cost": { - Type: schema.TypeInt, - Computed: true, - Description: `Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field.`, - }, - "rounds": { - Type: schema.TypeInt, - Computed: true, - Description: `How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms.`, - }, - "salt_separator": { - Type: schema.TypeString, - Computed: true, - Description: `Non-printable character to be inserted between the salt and plain text password in base64.`, - }, - "signer_key": { - Type: schema.TypeString, - Computed: true, - Description: `Signer key in base64.`, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the Config resource. Example: "projects/my-awesome-project/config"`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformProjectDefaultConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - signInProp, err := expandIdentityPlatformProjectDefaultConfigSignIn(d.Get("sign_in"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sign_in"); !isEmptyValue(reflect.ValueOf(signInProp)) && (ok || !reflect.DeepEqual(v, signInProp)) { - obj["signIn"] = signInProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ProjectDefaultConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ProjectDefaultConfig: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformProjectDefaultConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating ProjectDefaultConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformProjectDefaultConfigRead(d, meta) -} - -func resourceIdentityPlatformProjectDefaultConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformProjectDefaultConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformProjectDefaultConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) - } - if err := d.Set("sign_in", flattenIdentityPlatformProjectDefaultConfigSignIn(res["signIn"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformProjectDefaultConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - signInProp, err := expandIdentityPlatformProjectDefaultConfigSignIn(d.Get("sign_in"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sign_in"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, signInProp)) { - obj["signIn"] = signInProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ProjectDefaultConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("sign_in") { - updateMask = append(updateMask, "signIn") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ProjectDefaultConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ProjectDefaultConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformProjectDefaultConfigRead(d, meta) -} - -func resourceIdentityPlatformProjectDefaultConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ProjectDefaultConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ProjectDefaultConfig") - } - - log.Printf("[DEBUG] Finished deleting ProjectDefaultConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformProjectDefaultConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/config/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformProjectDefaultConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignIn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["email"] = - flattenIdentityPlatformProjectDefaultConfigSignInEmail(original["email"], d, config) - transformed["phone_number"] = - flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumber(original["phoneNumber"], d, config) - transformed["anonymous"] = - flattenIdentityPlatformProjectDefaultConfigSignInAnonymous(original["anonymous"], d, config) - transformed["allow_duplicate_emails"] = - flattenIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(original["allowDuplicateEmails"], d, config) - transformed["hash_config"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfig(original["hashConfig"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenIdentityPlatformProjectDefaultConfigSignInEmailEnabled(original["enabled"], d, config) - transformed["password_required"] = - flattenIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(original["passwordRequired"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInEmailEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(original["enabled"], d, config) - transformed["test_phone_numbers"] = - flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(original["testPhoneNumbers"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInAnonymous(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(original["enabled"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["algorithm"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(original["algorithm"], d, config) - transformed["signer_key"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(original["signerKey"], d, config) - transformed["salt_separator"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(original["saltSeparator"], d, config) - transformed["rounds"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(original["rounds"], d, config) - transformed["memory_cost"] = - flattenIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(original["memoryCost"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandIdentityPlatformProjectDefaultConfigSignIn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEmail, err := expandIdentityPlatformProjectDefaultConfigSignInEmail(original["email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !isEmptyValue(val) { - transformed["email"] = transformedEmail - } - - transformedPhoneNumber, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumber(original["phone_number"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPhoneNumber); val.IsValid() && !isEmptyValue(val) { - transformed["phoneNumber"] = transformedPhoneNumber - } - - transformedAnonymous, err := expandIdentityPlatformProjectDefaultConfigSignInAnonymous(original["anonymous"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAnonymous); val.IsValid() && !isEmptyValue(val) { - transformed["anonymous"] = transformedAnonymous - } - - transformedAllowDuplicateEmails, err := expandIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(original["allow_duplicate_emails"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowDuplicateEmails); val.IsValid() && !isEmptyValue(val) { - transformed["allowDuplicateEmails"] = transformedAllowDuplicateEmails - } - - transformedHashConfig, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfig(original["hash_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHashConfig); val.IsValid() && !isEmptyValue(val) { - transformed["hashConfig"] = transformedHashConfig - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInEmailEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - transformedPasswordRequired, err := expandIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(original["password_required"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPasswordRequired); val.IsValid() && !isEmptyValue(val) { - transformed["passwordRequired"] = transformedPasswordRequired - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInEmailEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - transformedTestPhoneNumbers, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(original["test_phone_numbers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTestPhoneNumbers); val.IsValid() && !isEmptyValue(val) { - transformed["testPhoneNumbers"] = transformedTestPhoneNumbers - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInAnonymous(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAlgorithm, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(original["algorithm"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !isEmptyValue(val) { - transformed["algorithm"] = transformedAlgorithm - } - - transformedSignerKey, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(original["signer_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSignerKey); val.IsValid() && !isEmptyValue(val) { - transformed["signerKey"] = transformedSignerKey - } - - transformedSaltSeparator, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(original["salt_separator"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSaltSeparator); val.IsValid() && !isEmptyValue(val) { - transformed["saltSeparator"] = transformedSaltSeparator - } - - transformedRounds, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(original["rounds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRounds); val.IsValid() && !isEmptyValue(val) { - transformed["rounds"] = transformedRounds - } - - transformedMemoryCost, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(original["memory_cost"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMemoryCost); val.IsValid() && !isEmptyValue(val) { - transformed["memoryCost"] = transformedMemoryCost - } - - return transformed, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant.go deleted file mode 100644 index cc86e78f95..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant.go +++ /dev/null @@ -1,404 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformTenant() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformTenantCreate, - Read: resourceIdentityPlatformTenantRead, - Update: resourceIdentityPlatformTenantUpdate, - Delete: resourceIdentityPlatformTenantDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformTenantImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `Human friendly display name of the tenant.`, - }, - "allow_password_signup": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether to allow email/password user authentication.`, - }, - "disable_auth": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether authentication is disabled for the tenant. If true, the users under -the disabled tenant are not allowed to sign-in. Admins of the disabled tenant -are not able to manage its users.`, - }, - "enable_email_link_signin": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether to enable email link user authentication.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the tenant that is generated by the server`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformTenantCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformTenantDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - allowPasswordSignupProp, err := expandIdentityPlatformTenantAllowPasswordSignup(d.Get("allow_password_signup"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow_password_signup"); !isEmptyValue(reflect.ValueOf(allowPasswordSignupProp)) && (ok || !reflect.DeepEqual(v, allowPasswordSignupProp)) { - obj["allowPasswordSignup"] = allowPasswordSignupProp - } - enableEmailLinkSigninProp, err := expandIdentityPlatformTenantEnableEmailLinkSignin(d.Get("enable_email_link_signin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_email_link_signin"); !isEmptyValue(reflect.ValueOf(enableEmailLinkSigninProp)) && (ok || !reflect.DeepEqual(v, enableEmailLinkSigninProp)) { - obj["enableEmailLinkSignin"] = enableEmailLinkSigninProp - } - disableAuthProp, err := expandIdentityPlatformTenantDisableAuth(d.Get("disable_auth"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_auth"); !isEmptyValue(reflect.ValueOf(disableAuthProp)) && (ok || !reflect.DeepEqual(v, disableAuthProp)) { - obj["disableAuth"] = disableAuthProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Tenant: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Tenant: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Tenant: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformTenantName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - if err := d.Set("name", GetResourceNameFromSelfLink(name.(string))); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - // Store the ID now that we have set the computed name - id, err = replaceVars(d, config, "projects/{{project}}/tenants/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Tenant %q: %#v", d.Id(), res) - - return resourceIdentityPlatformTenantRead(d, meta) -} - -func resourceIdentityPlatformTenantRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Tenant: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformTenant %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Tenant: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformTenantName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Tenant: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformTenantDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Tenant: %s", err) - } - if err := d.Set("allow_password_signup", flattenIdentityPlatformTenantAllowPasswordSignup(res["allowPasswordSignup"], d, config)); err != nil { - return fmt.Errorf("Error reading Tenant: %s", err) - } - if err := d.Set("enable_email_link_signin", flattenIdentityPlatformTenantEnableEmailLinkSignin(res["enableEmailLinkSignin"], d, config)); err != nil { - return fmt.Errorf("Error reading Tenant: %s", err) - } - if err := d.Set("disable_auth", flattenIdentityPlatformTenantDisableAuth(res["disableAuth"], d, config)); err != nil { - return fmt.Errorf("Error reading Tenant: %s", err) - } - - return nil -} - -func resourceIdentityPlatformTenantUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Tenant: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformTenantDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - allowPasswordSignupProp, err := expandIdentityPlatformTenantAllowPasswordSignup(d.Get("allow_password_signup"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow_password_signup"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, allowPasswordSignupProp)) { - obj["allowPasswordSignup"] = allowPasswordSignupProp - } - enableEmailLinkSigninProp, err := expandIdentityPlatformTenantEnableEmailLinkSignin(d.Get("enable_email_link_signin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_email_link_signin"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableEmailLinkSigninProp)) { - obj["enableEmailLinkSignin"] = enableEmailLinkSigninProp - } - disableAuthProp, err := expandIdentityPlatformTenantDisableAuth(d.Get("disable_auth"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_auth"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disableAuthProp)) { - obj["disableAuth"] = disableAuthProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Tenant %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("allow_password_signup") { - updateMask = append(updateMask, "allowPasswordSignup") - } - - if d.HasChange("enable_email_link_signin") { - updateMask = append(updateMask, "enableEmailLinkSignin") - } - - if d.HasChange("disable_auth") { - updateMask = append(updateMask, "disableAuth") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Tenant %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Tenant %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformTenantRead(d, meta) -} - -func resourceIdentityPlatformTenantDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Tenant: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Tenant %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Tenant") - } - - log.Printf("[DEBUG] Finished deleting Tenant %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformTenantImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/tenants/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformTenantName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformTenantDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantAllowPasswordSignup(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantEnableEmailLinkSignin(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantDisableAuth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformTenantDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantAllowPasswordSignup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantEnableEmailLinkSignin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantDisableAuth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_default_supported_idp_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_default_supported_idp_config.go deleted file mode 100644 index 1e00059198..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_default_supported_idp_config.go +++ /dev/null @@ -1,384 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformTenantDefaultSupportedIdpConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformTenantDefaultSupportedIdpConfigCreate, - Read: resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead, - Update: resourceIdentityPlatformTenantDefaultSupportedIdpConfigUpdate, - Delete: resourceIdentityPlatformTenantDefaultSupportedIdpConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformTenantDefaultSupportedIdpConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "client_id": { - Type: schema.TypeString, - Required: true, - Description: `OAuth client ID`, - }, - "client_secret": { - Type: schema.TypeString, - Required: true, - Description: `OAuth client secret`, - }, - "idp_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the IDP. Possible values include: - -* 'apple.com' - -* 'facebook.com' - -* 'gc.apple.com' - -* 'github.com' - -* 'google.com' - -* 'linkedin.com' - -* 'microsoft.com' - -* 'playgames.google.com' - -* 'twitter.com' - -* 'yahoo.com'`, - }, - "tenant": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the tenant where this DefaultSupportedIdpConfig resource exists`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `If this IDP allows the user to sign in`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The name of the default supported IDP config resource`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - clientIdProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(reflect.ValueOf(clientIdProp)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(reflect.ValueOf(clientSecretProp)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - enabledProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs?idpId={{idp_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TenantDefaultSupportedIdpConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TenantDefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformTenantDefaultSupportedIdpConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("client_id", flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientId(res["clientId"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("client_secret", flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - clientIdProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - enabledProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("client_id") { - updateMask = append(updateMask, "clientId") - } - - if d.HasChange("client_secret") { - updateMask = append(updateMask, "clientSecret") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating TenantDefaultSupportedIdpConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TenantDefaultSupportedIdpConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TenantDefaultSupportedIdpConfig") - } - - log.Printf("[DEBUG] Finished deleting TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/tenants/(?P[^/]+)/defaultSupportedIdpConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_inbound_saml_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_inbound_saml_config.go deleted file mode 100644 index 89df1cc320..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_inbound_saml_config.go +++ /dev/null @@ -1,696 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformTenantInboundSamlConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformTenantInboundSamlConfigCreate, - Read: resourceIdentityPlatformTenantInboundSamlConfigRead, - Update: resourceIdentityPlatformTenantInboundSamlConfigUpdate, - Delete: resourceIdentityPlatformTenantInboundSamlConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformTenantInboundSamlConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `Human friendly display name.`, - }, - "idp_config": { - Type: schema.TypeList, - Required: true, - Description: `SAML IdP configuration when the project acts as the relying party`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "idp_certificates": { - Type: schema.TypeList, - Required: true, - Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "x509_certificate": { - Type: schema.TypeString, - Optional: true, - Description: `The x509 certificate`, - }, - }, - }, - }, - "idp_entity_id": { - Type: schema.TypeString, - Required: true, - Description: `Unique identifier for all SAML entities`, - }, - "sso_url": { - Type: schema.TypeString, - Required: true, - Description: `URL to send Authentication request to.`, - }, - "sign_request": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates if outbounding SAMLRequest should be signed.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters, -hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an -alphanumeric character, and have at least 2 characters.`, - }, - "sp_config": { - Type: schema.TypeList, - Required: true, - Description: `SAML SP (Service Provider) configuration when the project acts as the relying party to receive -and accept an authentication assertion issued by a SAML identity provider.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "callback_uri": { - Type: schema.TypeString, - Required: true, - Description: `Callback URI where responses from IDP are handled. Must start with 'https://'.`, - }, - "sp_entity_id": { - Type: schema.TypeString, - Required: true, - Description: `Unique identifier for all SAML entities.`, - }, - "sp_certificates": { - Type: schema.TypeList, - Computed: true, - Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "x509_certificate": { - Type: schema.TypeString, - Computed: true, - Description: `The x509 certificate`, - }, - }, - }, - }, - }, - }, - }, - "tenant": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the tenant where this inbound SAML config resource exists`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `If this config allows users to sign in with the provider.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformTenantInboundSamlConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandIdentityPlatformTenantInboundSamlConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandIdentityPlatformTenantInboundSamlConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformTenantInboundSamlConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - idpConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("idp_config"); !isEmptyValue(reflect.ValueOf(idpConfigProp)) && (ok || !reflect.DeepEqual(v, idpConfigProp)) { - obj["idpConfig"] = idpConfigProp - } - spConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sp_config"); !isEmptyValue(reflect.ValueOf(spConfigProp)) && (ok || !reflect.DeepEqual(v, spConfigProp)) { - obj["spConfig"] = spConfigProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs?inboundSamlConfigId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TenantInboundSamlConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TenantInboundSamlConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating TenantInboundSamlConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformTenantInboundSamlConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantInboundSamlConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformTenantInboundSamlConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformTenantInboundSamlConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformTenantInboundSamlConfigDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformTenantInboundSamlConfigEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - if err := d.Set("idp_config", flattenIdentityPlatformTenantInboundSamlConfigIdpConfig(res["idpConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - if err := d.Set("sp_config", flattenIdentityPlatformTenantInboundSamlConfigSpConfig(res["spConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformTenantInboundSamlConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformTenantInboundSamlConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformTenantInboundSamlConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - idpConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("idp_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idpConfigProp)) { - obj["idpConfig"] = idpConfigProp - } - spConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sp_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, spConfigProp)) { - obj["spConfig"] = spConfigProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating TenantInboundSamlConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("idp_config") { - updateMask = append(updateMask, "idpConfig") - } - - if d.HasChange("sp_config") { - updateMask = append(updateMask, "spConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating TenantInboundSamlConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TenantInboundSamlConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformTenantInboundSamlConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantInboundSamlConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TenantInboundSamlConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TenantInboundSamlConfig") - } - - log.Printf("[DEBUG] Finished deleting TenantInboundSamlConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformTenantInboundSamlConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/tenants/(?P[^/]+)/inboundSamlConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformTenantInboundSamlConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformTenantInboundSamlConfigDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["idp_entity_id"] = - flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(original["idpEntityId"], d, config) - transformed["sso_url"] = - flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(original["ssoUrl"], d, config) - transformed["sign_request"] = - flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(original["signRequest"], d, config) - transformed["idp_certificates"] = - flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(original["idpCertificates"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "x509_certificate": flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509Certificate"], d, config), - }) - } - return transformed -} -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigSpConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["sp_entity_id"] = - flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(original["spEntityId"], d, config) - transformed["callback_uri"] = - flattenIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(original["callbackUri"], d, config) - transformed["sp_certificates"] = - flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(original["spCertificates"], d, config) - return []interface{}{transformed} -} -func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "x509_certificate": flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509Certificate"], d, config), - }) - } - return transformed -} -func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformTenantInboundSamlConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdpEntityId, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(original["idp_entity_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdpEntityId); val.IsValid() && !isEmptyValue(val) { - transformed["idpEntityId"] = transformedIdpEntityId - } - - transformedSsoUrl, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(original["sso_url"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSsoUrl); val.IsValid() && !isEmptyValue(val) { - transformed["ssoUrl"] = transformedSsoUrl - } - - transformedSignRequest, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(original["sign_request"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSignRequest); val.IsValid() && !isEmptyValue(val) { - transformed["signRequest"] = transformedSignRequest - } - - transformedIdpCertificates, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(original["idp_certificates"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIdpCertificates); val.IsValid() && !isEmptyValue(val) { - transformed["idpCertificates"] = transformedIdpCertificates - } - - return transformed, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Certificate, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509_certificate"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedX509Certificate); val.IsValid() && !isEmptyValue(val) { - transformed["x509Certificate"] = transformedX509Certificate - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSpEntityId, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(original["sp_entity_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSpEntityId); val.IsValid() && !isEmptyValue(val) { - transformed["spEntityId"] = transformedSpEntityId - } - - transformedCallbackUri, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(original["callback_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCallbackUri); val.IsValid() && !isEmptyValue(val) { - transformed["callbackUri"] = transformedCallbackUri - } - - transformedSpCertificates, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(original["sp_certificates"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSpCertificates); val.IsValid() && !isEmptyValue(val) { - transformed["spCertificates"] = transformedSpCertificates - } - - return transformed, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Certificate, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509_certificate"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedX509Certificate); val.IsValid() && !isEmptyValue(val) { - transformed["x509Certificate"] = transformedX509Certificate - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_oauth_idp_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_oauth_idp_config.go deleted file mode 100644 index 9a23958be8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_identity_platform_tenant_oauth_idp_config.go +++ /dev/null @@ -1,433 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceIdentityPlatformTenantOauthIdpConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceIdentityPlatformTenantOauthIdpConfigCreate, - Read: resourceIdentityPlatformTenantOauthIdpConfigRead, - Update: resourceIdentityPlatformTenantOauthIdpConfigUpdate, - Delete: resourceIdentityPlatformTenantOauthIdpConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceIdentityPlatformTenantOauthIdpConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "client_id": { - Type: schema.TypeString, - Required: true, - Description: `The client id of an OAuth client.`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `Human friendly display name.`, - }, - "issuer": { - Type: schema.TypeString, - Required: true, - Description: `For OIDC Idps, the issuer identifier.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the OauthIdpConfig. Must start with 'oidc.'.`, - }, - "tenant": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the tenant where this OIDC IDP configuration resource exists`, - }, - "client_secret": { - Type: schema.TypeString, - Optional: true, - Description: `The client secret of the OAuth client, to enable OIDC code flow.`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `If this config allows users to sign in with the provider.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformTenantOauthIdpConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandIdentityPlatformTenantOauthIdpConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandIdentityPlatformTenantOauthIdpConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformTenantOauthIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - issuerProp, err := expandIdentityPlatformTenantOauthIdpConfigIssuer(d.Get("issuer"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuer"); !isEmptyValue(reflect.ValueOf(issuerProp)) && (ok || !reflect.DeepEqual(v, issuerProp)) { - obj["issuer"] = issuerProp - } - clientIdProp, err := expandIdentityPlatformTenantOauthIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(reflect.ValueOf(clientIdProp)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformTenantOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(reflect.ValueOf(clientSecretProp)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs?oauthIdpConfigId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TenantOauthIdpConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TenantOauthIdpConfig: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating TenantOauthIdpConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformTenantOauthIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantOauthIdpConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformTenantOauthIdpConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformTenantOauthIdpConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformTenantOauthIdpConfigDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformTenantOauthIdpConfigEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("issuer", flattenIdentityPlatformTenantOauthIdpConfigIssuer(res["issuer"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("client_id", flattenIdentityPlatformTenantOauthIdpConfigClientId(res["clientId"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("client_secret", flattenIdentityPlatformTenantOauthIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { - return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformTenantOauthIdpConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformTenantOauthIdpConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformTenantOauthIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - issuerProp, err := expandIdentityPlatformTenantOauthIdpConfigIssuer(d.Get("issuer"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuer"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, issuerProp)) { - obj["issuer"] = issuerProp - } - clientIdProp, err := expandIdentityPlatformTenantOauthIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformTenantOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating TenantOauthIdpConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("issuer") { - updateMask = append(updateMask, "issuer") - } - - if d.HasChange("client_id") { - updateMask = append(updateMask, "clientId") - } - - if d.HasChange("client_secret") { - updateMask = append(updateMask, "clientSecret") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating TenantOauthIdpConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TenantOauthIdpConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformTenantOauthIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantOauthIdpConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TenantOauthIdpConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TenantOauthIdpConfig") - } - - log.Printf("[DEBUG] Finished deleting TenantOauthIdpConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformTenantOauthIdpConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/tenants/(?P[^/]+)/oauthIdpConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformTenantOauthIdpConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformTenantOauthIdpConfigDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantOauthIdpConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantOauthIdpConfigIssuer(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantOauthIdpConfigClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantOauthIdpConfigClientSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformTenantOauthIdpConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigIssuer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_crypto_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_crypto_key.go deleted file mode 100644 index 2d5a9ae40e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_crypto_key.go +++ /dev/null @@ -1,626 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceKMSCryptoKey() *schema.Resource { - return &schema.Resource{ - Create: resourceKMSCryptoKeyCreate, - Read: resourceKMSCryptoKeyRead, - Update: resourceKMSCryptoKeyUpdate, - Delete: resourceKMSCryptoKeyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceKMSCryptoKeyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - SchemaVersion: 1, - StateUpgraders: []schema.StateUpgrader{ - { - Type: resourceKMSCryptoKeyResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceKMSCryptoKeyUpgradeV0, - Version: 0, - }, - }, - - Schema: map[string]*schema.Schema{ - "key_ring": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: kmsCryptoKeyRingsEquivalent, - Description: `The KeyRing that this key belongs to. -Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the CryptoKey.`, - }, - "destroy_scheduled_duration": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED. -If not specified at creation time, the default duration is 24 hours.`, - }, - "import_only": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Whether this key may contain imported versions only.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels with user-defined metadata to apply to this resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "purpose": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC", ""}), - Description: `The immutable purpose of this CryptoKey. See the -[purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) -for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", "MAC"]`, - Default: "ENCRYPT_DECRYPT", - }, - "rotation_period": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: orEmpty(validateKmsCryptoKeyRotationPeriod), - Description: `Every time this period passes, generate a new CryptoKeyVersion and set it as the primary. -The first rotation will take place after the specified period. The rotation period has -the format of a decimal number with up to 9 fractional digits, followed by the -letter 's' (seconds). It must be greater than a day (ie, 86400).`, - }, - "skip_initial_version_creation": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If set to true, the request will create a CryptoKey without any CryptoKeyVersions. -You must use the 'google_kms_key_ring_import_job' resource to import the CryptoKeyVersion.`, - }, - "version_template": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `A template describing settings for new crypto key versions.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "algorithm": { - Type: schema.TypeString, - Required: true, - Description: `The algorithm to use when creating a version based on this template. -See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs.`, - }, - "protection_level": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE".`, - Default: "SOFTWARE", - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceKMSCryptoKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandKMSCryptoKeyLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - purposeProp, err := expandKMSCryptoKeyPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - rotationPeriodProp, err := expandKMSCryptoKeyRotationPeriod(d.Get("rotation_period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rotation_period"); !isEmptyValue(reflect.ValueOf(rotationPeriodProp)) && (ok || !reflect.DeepEqual(v, rotationPeriodProp)) { - obj["rotationPeriod"] = rotationPeriodProp - } - versionTemplateProp, err := expandKMSCryptoKeyVersionTemplate(d.Get("version_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_template"); !isEmptyValue(reflect.ValueOf(versionTemplateProp)) && (ok || !reflect.DeepEqual(v, versionTemplateProp)) { - obj["versionTemplate"] = versionTemplateProp - } - destroyScheduledDurationProp, err := expandKMSCryptoKeyDestroyScheduledDuration(d.Get("destroy_scheduled_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destroy_scheduled_duration"); !isEmptyValue(reflect.ValueOf(destroyScheduledDurationProp)) && (ok || !reflect.DeepEqual(v, destroyScheduledDurationProp)) { - obj["destroyScheduledDuration"] = destroyScheduledDurationProp - } - importOnlyProp, err := expandKMSCryptoKeyImportOnly(d.Get("import_only"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("import_only"); !isEmptyValue(reflect.ValueOf(importOnlyProp)) && (ok || !reflect.DeepEqual(v, importOnlyProp)) { - obj["importOnly"] = importOnlyProp - } - - obj, err = resourceKMSCryptoKeyEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys?cryptoKeyId={{name}}&skipInitialVersionCreation={{skip_initial_version_creation}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new CryptoKey: %#v", obj) - billingProject := "" - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating CryptoKey: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{key_ring}}/cryptoKeys/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating CryptoKey %q: %#v", d.Id(), res) - - return resourceKMSCryptoKeyRead(d, meta) -} - -func resourceKMSCryptoKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("KMSCryptoKey %q", d.Id())) - } - - res, err = resourceKMSCryptoKeyDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing KMSCryptoKey because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("labels", flattenKMSCryptoKeyLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("purpose", flattenKMSCryptoKeyPurpose(res["purpose"], d, config)); err != nil { - return fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("rotation_period", flattenKMSCryptoKeyRotationPeriod(res["rotationPeriod"], d, config)); err != nil { - return fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("version_template", flattenKMSCryptoKeyVersionTemplate(res["versionTemplate"], d, config)); err != nil { - return fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("destroy_scheduled_duration", flattenKMSCryptoKeyDestroyScheduledDuration(res["destroyScheduledDuration"], d, config)); err != nil { - return fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("import_only", flattenKMSCryptoKeyImportOnly(res["importOnly"], d, config)); err != nil { - return fmt.Errorf("Error reading CryptoKey: %s", err) - } - - return nil -} - -func resourceKMSCryptoKeyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - labelsProp, err := expandKMSCryptoKeyLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - rotationPeriodProp, err := expandKMSCryptoKeyRotationPeriod(d.Get("rotation_period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rotation_period"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rotationPeriodProp)) { - obj["rotationPeriod"] = rotationPeriodProp - } - versionTemplateProp, err := expandKMSCryptoKeyVersionTemplate(d.Get("version_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_template"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionTemplateProp)) { - obj["versionTemplate"] = versionTemplateProp - } - - obj, err = resourceKMSCryptoKeyUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating CryptoKey %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("rotation_period") { - updateMask = append(updateMask, "rotationPeriod", - "nextRotationTime") - } - - if d.HasChange("version_template") { - updateMask = append(updateMask, "versionTemplate.algorithm") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating CryptoKey %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating CryptoKey %q: %#v", d.Id(), res) - } - - return resourceKMSCryptoKeyRead(d, meta) -} - -func resourceKMSCryptoKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) - if err != nil { - return err - } - - log.Printf(` -[WARNING] KMS CryptoKey resources cannot be deleted from GCP. The CryptoKey %s will be removed from Terraform state, -and all its CryptoKeyVersions will be destroyed, but it will still be present in the project.`, cryptoKeyId.cryptoKeyId()) - - // Delete all versions of the key - if err := clearCryptoKeyVersions(cryptoKeyId, userAgent, config); err != nil { - return err - } - - // Make sure automatic key rotation is disabled if set - if d.Get("rotation_period") != "" { - if err := disableCryptoKeyRotation(cryptoKeyId, userAgent, config); err != nil { - return fmt.Errorf( - "While cryptoKeyVersions were cleared, Terraform was unable to disable automatic rotation of key due to an error: %s."+ - "Please retry or manually disable automatic rotation to prevent creation of a new version of this key.", err) - } - } - - d.SetId("") - return nil -} - -func resourceKMSCryptoKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) - if err != nil { - return nil, err - } - - if err := d.Set("key_ring", cryptoKeyId.KeyRingId.keyRingId()); err != nil { - return nil, fmt.Errorf("Error setting key_ring: %s", err) - } - if err := d.Set("name", cryptoKeyId.Name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - if err := d.Set("skip_initial_version_creation", false); err != nil { - return nil, fmt.Errorf("Error setting skip_initial_version_creation: %s", err) - } - - id, err := replaceVars(d, config, "{{key_ring}}/cryptoKeys/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenKMSCryptoKeyLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyPurpose(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyRotationPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyVersionTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["algorithm"] = - flattenKMSCryptoKeyVersionTemplateAlgorithm(original["algorithm"], d, config) - transformed["protection_level"] = - flattenKMSCryptoKeyVersionTemplateProtectionLevel(original["protectionLevel"], d, config) - return []interface{}{transformed} -} -func flattenKMSCryptoKeyVersionTemplateAlgorithm(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyVersionTemplateProtectionLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyDestroyScheduledDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyImportOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandKMSCryptoKeyLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandKMSCryptoKeyPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyRotationPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyVersionTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAlgorithm, err := expandKMSCryptoKeyVersionTemplateAlgorithm(original["algorithm"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !isEmptyValue(val) { - transformed["algorithm"] = transformedAlgorithm - } - - transformedProtectionLevel, err := expandKMSCryptoKeyVersionTemplateProtectionLevel(original["protection_level"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProtectionLevel); val.IsValid() && !isEmptyValue(val) { - transformed["protectionLevel"] = transformedProtectionLevel - } - - return transformed, nil -} - -func expandKMSCryptoKeyVersionTemplateAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyVersionTemplateProtectionLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyDestroyScheduledDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyImportOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceKMSCryptoKeyEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // if rotationPeriod is set, nextRotationTime must also be set. - if d.Get("rotation_period") != "" { - rotationPeriod := d.Get("rotation_period").(string) - nextRotation, err := kmsCryptoKeyNextRotation(time.Now(), rotationPeriod) - - if err != nil { - return nil, fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) - } - - obj["nextRotationTime"] = nextRotation - } - - // set to false if it is not true explicitly - if !(d.Get("skip_initial_version_creation").(bool)) { - if err := d.Set("skip_initial_version_creation", false); err != nil { - return nil, fmt.Errorf("Error setting skip_initial_version_creation: %s", err) - } - } - - return obj, nil -} - -func resourceKMSCryptoKeyUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // if rotationPeriod is changed, nextRotationTime must also be set. - if d.HasChange("rotation_period") && d.Get("rotation_period") != "" { - rotationPeriod := d.Get("rotation_period").(string) - nextRotation, err := kmsCryptoKeyNextRotation(time.Now(), rotationPeriod) - - if err != nil { - return nil, fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) - } - - obj["nextRotationTime"] = nextRotation - } - - return obj, nil -} - -func resourceKMSCryptoKeyDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Modify the name to be the user specified form. - // We can't just ignore_read on `name` as the linter will - // complain that the returned `res` is never used afterwards. - // Some field needs to be actually set, and we chose `name`. - res["name"] = d.Get("name").(string) - return res, nil -} - -func resourceKMSCryptoKeyResourceV0() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "key_ring": { - Type: schema.TypeString, - Required: true, - }, - "rotation_period": { - Type: schema.TypeString, - Optional: true, - }, - "version_template": { - Type: schema.TypeList, - Optional: true, - }, - "self_link": { - Type: schema.TypeString, - }, - }, - } -} - -func resourceKMSCryptoKeyUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", rawState) - - config := meta.(*Config) - keyRingId := rawState["key_ring"].(string) - parsed, err := parseKmsKeyRingId(keyRingId, config) - if err != nil { - return nil, err - } - rawState["key_ring"] = parsed.keyRingId() - - log.Printf("[DEBUG] Attributes after migration: %#v", rawState) - return rawState, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_key_ring.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_key_ring.go deleted file mode 100644 index e650a6b02c..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_key_ring.go +++ /dev/null @@ -1,233 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceKMSKeyRing() *schema.Resource { - return &schema.Resource{ - Create: resourceKMSKeyRingCreate, - Read: resourceKMSKeyRingRead, - Delete: resourceKMSKeyRingDelete, - - Importer: &schema.ResourceImporter{ - State: resourceKMSKeyRingImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location for the KeyRing. -A full list of valid locations can be found by running 'gcloud kms locations list'.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the KeyRing.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceKMSKeyRingCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandKMSKeyRingName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - locationProp, err := expandKMSKeyRingLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - - obj, err = resourceKMSKeyRingEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/keyRings?keyRingId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new KeyRing: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for KeyRing: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating KeyRing: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/keyRings/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating KeyRing %q: %#v", d.Id(), res) - - return resourceKMSKeyRingRead(d, meta) -} - -func resourceKMSKeyRingRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/keyRings/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for KeyRing: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("KMSKeyRing %q", d.Id())) - } - - res, err = resourceKMSKeyRingDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing KMSKeyRing because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading KeyRing: %s", err) - } - - if err := d.Set("name", flattenKMSKeyRingName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading KeyRing: %s", err) - } - - return nil -} - -func resourceKMSKeyRingDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] KMS KeyRing resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceKMSKeyRingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/keyRings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/keyRings/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenKMSKeyRingName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandKMSKeyRingName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSKeyRingLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceKMSKeyRingEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - return nil, nil -} - -func resourceKMSKeyRingDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Modify the name to be the user specified form. - // We can't just ignore_read on `name` as the linter will - // complain that the returned `res` is never used afterwards. - // Some field needs to be actually set, and we chose `name`. - res["name"] = d.Get("name").(string) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_key_ring_import_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_key_ring_import_job.go deleted file mode 100644 index 0d51220d90..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_key_ring_import_job.go +++ /dev/null @@ -1,357 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceKMSKeyRingImportJob() *schema.Resource { - return &schema.Resource{ - Create: resourceKMSKeyRingImportJobCreate, - Read: resourceKMSKeyRingImportJobRead, - Delete: resourceKMSKeyRingImportJobDelete, - - Importer: &schema.ResourceImporter{ - State: resourceKMSKeyRingImportJobImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "import_job_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `It must be unique within a KeyRing and match the regular expression [a-zA-Z0-9_-]{1,63}`, - }, - "import_method": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"RSA_OAEP_3072_SHA1_AES_256", "RSA_OAEP_4096_SHA1_AES_256"}), - Description: `The wrapping method to be used for incoming key material. Possible values: ["RSA_OAEP_3072_SHA1_AES_256", "RSA_OAEP_4096_SHA1_AES_256"]`, - }, - "key_ring": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: kmsCryptoKeyRingsEquivalent, - Description: `The KeyRing that this import job belongs to. -Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''.`, - }, - "protection_level": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"SOFTWARE", "HSM", "EXTERNAL"}), - Description: `The protection level of the ImportJob. This must match the protectionLevel of the -versionTemplate on the CryptoKey you attempt to import into. Possible values: ["SOFTWARE", "HSM", "EXTERNAL"]`, - }, - "attestation": { - Type: schema.TypeList, - Computed: true, - Description: `Statement that was generated and signed by the key creator (for example, an HSM) at key creation time. -Use this statement to verify attributes of the key as stored on the HSM, independently of Google. -Only present if the chosen ImportMethod is one with a protection level of HSM.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content": { - Type: schema.TypeString, - Computed: true, - Description: `The attestation data provided by the HSM when the key operation was performed. -A base64-encoded string.`, - }, - "format": { - Type: schema.TypeString, - Computed: true, - Description: `The format of the attestation data.`, - }, - }, - }, - }, - "expire_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time at which this resource is scheduled for expiration and can no longer be used. -This is in RFC3339 text format.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name for this ImportJob in the format projects/*/locations/*/keyRings/*/importJobs/*.`, - }, - "public_key": { - Type: schema.TypeList, - Computed: true, - Description: `The public key with which to wrap key material prior to import. Only returned if state is 'ACTIVE'.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pem": { - Type: schema.TypeString, - Computed: true, - Description: `The public key, encoded in PEM format. For more information, see the RFC 7468 sections -for General Considerations and Textual Encoding of Subject Public Key Info.`, - }, - }, - }, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The current state of the ImportJob, indicating if it can be used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceKMSKeyRingImportJobCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - importMethodProp, err := expandKMSKeyRingImportJobImportMethod(d.Get("import_method"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("import_method"); !isEmptyValue(reflect.ValueOf(importMethodProp)) && (ok || !reflect.DeepEqual(v, importMethodProp)) { - obj["importMethod"] = importMethodProp - } - protectionLevelProp, err := expandKMSKeyRingImportJobProtectionLevel(d.Get("protection_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protection_level"); !isEmptyValue(reflect.ValueOf(protectionLevelProp)) && (ok || !reflect.DeepEqual(v, protectionLevelProp)) { - obj["protectionLevel"] = protectionLevelProp - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/importJobs?importJobId={{import_job_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new KeyRingImportJob: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating KeyRingImportJob: %s", err) - } - if err := d.Set("name", flattenKMSKeyRingImportJobName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating KeyRingImportJob %q: %#v", d.Id(), res) - - return resourceKMSKeyRingImportJobRead(d, meta) -} - -func resourceKMSKeyRingImportJobRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("KMSKeyRingImportJob %q", d.Id())) - } - - if err := d.Set("name", flattenKMSKeyRingImportJobName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("import_method", flattenKMSKeyRingImportJobImportMethod(res["importMethod"], d, config)); err != nil { - return fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("protection_level", flattenKMSKeyRingImportJobProtectionLevel(res["protectionLevel"], d, config)); err != nil { - return fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("expire_time", flattenKMSKeyRingImportJobExpireTime(res["expireTime"], d, config)); err != nil { - return fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("state", flattenKMSKeyRingImportJobState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("public_key", flattenKMSKeyRingImportJobPublicKey(res["publicKey"], d, config)); err != nil { - return fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("attestation", flattenKMSKeyRingImportJobAttestation(res["attestation"], d, config)); err != nil { - return fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - - return nil -} - -func resourceKMSKeyRingImportJobDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting KeyRingImportJob %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "KeyRingImportJob") - } - - log.Printf("[DEBUG] Finished deleting KeyRingImportJob %q: %#v", d.Id(), res) - return nil -} - -func resourceKMSKeyRingImportJobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 8 { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/importJobs/{{importJobId}}", - ) - } - - if err := d.Set("key_ring", stringParts[3]); err != nil { - return nil, fmt.Errorf("Error setting key_ring: %s", err) - } - if err := d.Set("import_job_id", stringParts[5]); err != nil { - return nil, fmt.Errorf("Error setting import_job_id: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenKMSKeyRingImportJobName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobImportMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobProtectionLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobExpireTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobPublicKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pem"] = - flattenKMSKeyRingImportJobPublicKeyPem(original["pem"], d, config) - return []interface{}{transformed} -} -func flattenKMSKeyRingImportJobPublicKeyPem(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobAttestation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["format"] = - flattenKMSKeyRingImportJobAttestationFormat(original["format"], d, config) - transformed["content"] = - flattenKMSKeyRingImportJobAttestationContent(original["content"], d, config) - return []interface{}{transformed} -} -func flattenKMSKeyRingImportJobAttestationFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobAttestationContent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandKMSKeyRingImportJobImportMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSKeyRingImportJobProtectionLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_secret_ciphertext.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_secret_ciphertext.go deleted file mode 100644 index 5b9c6c2b60..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_secret_ciphertext.go +++ /dev/null @@ -1,211 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/base64" - "fmt" - "log" - "reflect" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceKMSSecretCiphertext() *schema.Resource { - return &schema.Resource{ - Create: resourceKMSSecretCiphertextCreate, - Read: resourceKMSSecretCiphertextRead, - Delete: resourceKMSSecretCiphertextDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "crypto_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full name of the CryptoKey that will be used to encrypt the provided plaintext. -Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}''`, - }, - "plaintext": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The plaintext to be encrypted.`, - Sensitive: true, - }, - "additional_authenticated_data": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The additional authenticated data used for integrity checks during encryption and decryption.`, - Sensitive: true, - }, - "ciphertext": { - Type: schema.TypeString, - Computed: true, - Description: `Contains the result of encrypting the provided plaintext, encoded in base64.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceKMSSecretCiphertextCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - plaintextProp, err := expandKMSSecretCiphertextPlaintext(d.Get("plaintext"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("plaintext"); !isEmptyValue(reflect.ValueOf(plaintextProp)) && (ok || !reflect.DeepEqual(v, plaintextProp)) { - obj["plaintext"] = plaintextProp - } - additionalAuthenticatedDataProp, err := expandKMSSecretCiphertextAdditionalAuthenticatedData(d.Get("additional_authenticated_data"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("additional_authenticated_data"); !isEmptyValue(reflect.ValueOf(additionalAuthenticatedDataProp)) && (ok || !reflect.DeepEqual(v, additionalAuthenticatedDataProp)) { - obj["additionalAuthenticatedData"] = additionalAuthenticatedDataProp - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}:encrypt") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new SecretCiphertext: %#v", obj) - billingProject := "" - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating SecretCiphertext: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{crypto_key}}/{{ciphertext}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // we don't set anything on read and instead do it all in create - ciphertext, ok := res["ciphertext"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - if err := d.Set("ciphertext", ciphertext.(string)); err != nil { - return fmt.Errorf("Error setting ciphertext: %s", err) - } - - id, err = replaceVars(d, config, "{{crypto_key}}/{{ciphertext}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating SecretCiphertext %q: %#v", d.Id(), res) - - return resourceKMSSecretCiphertextRead(d, meta) -} - -func resourceKMSSecretCiphertextRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("KMSSecretCiphertext %q", d.Id())) - } - - res, err = resourceKMSSecretCiphertextDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing KMSSecretCiphertext because it no longer exists.") - d.SetId("") - return nil - } - - return nil -} - -func resourceKMSSecretCiphertextDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] KMS SecretCiphertext resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func expandKMSSecretCiphertextPlaintext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - return base64.StdEncoding.EncodeToString([]byte(v.(string))), nil -} - -func expandKMSSecretCiphertextAdditionalAuthenticatedData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - return base64.StdEncoding.EncodeToString([]byte(v.(string))), nil -} - -func resourceKMSSecretCiphertextDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_billing_account_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_billing_account_sink.go deleted file mode 100644 index 62df4b790e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_billing_account_sink.go +++ /dev/null @@ -1,100 +0,0 @@ -package google - -import ( - "fmt" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceLoggingBillingAccountSink() *schema.Resource { - schm := &schema.Resource{ - Create: resourceLoggingBillingAccountSinkCreate, - Read: resourceLoggingBillingAccountSinkRead, - Delete: resourceLoggingBillingAccountSinkDelete, - Update: resourceLoggingBillingAccountSinkUpdate, - Schema: resourceLoggingSinkSchema(), - Importer: &schema.ResourceImporter{ - State: resourceLoggingSinkImportState("billing_account"), - }, - UseJSONNumber: true, - } - schm.Schema["billing_account"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The billing account exported to the sink.`, - } - return schm -} - -func resourceLoggingBillingAccountSinkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - id, sink := expandResourceLoggingSink(d, "billingAccounts", d.Get("billing_account").(string)) - - // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. - _, err = config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(true).Do() - if err != nil { - return err - } - - d.SetId(id.canonicalId()) - return resourceLoggingBillingAccountSinkRead(d, meta) -} - -func resourceLoggingBillingAccountSinkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - sink, err := config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Billing Logging Sink %s", d.Get("name").(string))) - } - - if err := flattenResourceLoggingSink(d, sink); err != nil { - return err - } - - return nil -} - -func resourceLoggingBillingAccountSinkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - sink, updateMask := expandResourceLoggingSinkForUpdate(d) - - // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. - _, err = config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Patch(d.Id(), sink). - UpdateMask(updateMask).UniqueWriterIdentity(true).Do() - if err != nil { - return err - } - - return resourceLoggingBillingAccountSinkRead(d, meta) -} - -func resourceLoggingBillingAccountSinkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - _, err = config.NewLoggingClient(userAgent).Projects.Sinks.Delete(d.Id()).Do() - if err != nil { - return err - } - - return nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_log_view.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_log_view.go deleted file mode 100644 index be3c5ecda3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_log_view.go +++ /dev/null @@ -1,315 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - logging "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging" -) - -func ResourceLoggingLogView() *schema.Resource { - return &schema.Resource{ - Create: resourceLoggingLogViewCreate, - Read: resourceLoggingLogViewRead, - Update: resourceLoggingLogViewUpdate, - Delete: resourceLoggingLogViewDelete, - - Importer: &schema.ResourceImporter{ - State: resourceLoggingLogViewImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The bucket of the resource", - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The resource name of the view. For example: `projects/my-project/locations/global/buckets/my-bucket/views/my-view`", - }, - - "description": { - Type: schema.TypeString, - Optional: true, - Description: "Describes this view.", - }, - - "filter": { - Type: schema.TypeString, - Optional: true, - Description: "Filter that restricts which log entries in a bucket are visible in this view. Filters are restricted to be a logical AND of ==/!= of any of the following: - originating project/folder/organization/billing account. - resource type - log id For example: SOURCE(\"projects/myproject\") AND resource.type = \"gce_instance\" AND LOG_ID(\"stdout\")", - }, - - "location": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: "The location of the resource. The supported locations are: global, us-central1, us-east1, us-west1, asia-east1, europe-west1.", - }, - - "parent": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The parent of the resource.", - }, - - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. The creation timestamp of the view.", - }, - - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. The last update timestamp of the view.", - }, - }, - } -} - -func resourceLoggingLogViewCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &logging.LogView{ - Bucket: dcl.String(d.Get("bucket").(string)), - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Filter: dcl.String(d.Get("filter").(string)), - Location: dcl.StringOrNil(d.Get("location").(string)), - Parent: dcl.StringOrNil(d.Get("parent").(string)), - } - - id, err := obj.ID() - if err != nil { - return fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLLoggingClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyLogView(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error creating LogView: %s", err) - } - - log.Printf("[DEBUG] Finished creating LogView %q: %#v", d.Id(), res) - - return resourceLoggingLogViewRead(d, meta) -} - -func resourceLoggingLogViewRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &logging.LogView{ - Bucket: dcl.String(d.Get("bucket").(string)), - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Filter: dcl.String(d.Get("filter").(string)), - Location: dcl.StringOrNil(d.Get("location").(string)), - Parent: dcl.StringOrNil(d.Get("parent").(string)), - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLLoggingClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetLogView(context.Background(), obj) - if err != nil { - resourceName := fmt.Sprintf("LoggingLogView %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("bucket", res.Bucket); err != nil { - return fmt.Errorf("error setting bucket in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("filter", res.Filter); err != nil { - return fmt.Errorf("error setting filter in state: %s", err) - } - if err = d.Set("location", res.Location); err != nil { - return fmt.Errorf("error setting location in state: %s", err) - } - if err = d.Set("parent", res.Parent); err != nil { - return fmt.Errorf("error setting parent in state: %s", err) - } - if err = d.Set("create_time", res.CreateTime); err != nil { - return fmt.Errorf("error setting create_time in state: %s", err) - } - if err = d.Set("update_time", res.UpdateTime); err != nil { - return fmt.Errorf("error setting update_time in state: %s", err) - } - - return nil -} -func resourceLoggingLogViewUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &logging.LogView{ - Bucket: dcl.String(d.Get("bucket").(string)), - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Filter: dcl.String(d.Get("filter").(string)), - Location: dcl.StringOrNil(d.Get("location").(string)), - Parent: dcl.StringOrNil(d.Get("parent").(string)), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLLoggingClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyLogView(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating LogView: %s", err) - } - - log.Printf("[DEBUG] Finished creating LogView %q: %#v", d.Id(), res) - - return resourceLoggingLogViewRead(d, meta) -} - -func resourceLoggingLogViewDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &logging.LogView{ - Bucket: dcl.String(d.Get("bucket").(string)), - Name: dcl.String(d.Get("name").(string)), - Description: dcl.String(d.Get("description").(string)), - Filter: dcl.String(d.Get("filter").(string)), - Location: dcl.StringOrNil(d.Get("location").(string)), - Parent: dcl.StringOrNil(d.Get("parent").(string)), - } - - log.Printf("[DEBUG] Deleting LogView %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLLoggingClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - if err := client.DeleteLogView(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting LogView: %s", err) - } - - log.Printf("[DEBUG] Finished deleting LogView %q", d.Id()) - return nil -} - -func resourceLoggingLogViewImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/locations/(?P.+)/buckets/(?P.+)/views/(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_metric.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_metric.go deleted file mode 100644 index f6d7d05582..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_metric.go +++ /dev/null @@ -1,1106 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceLoggingMetric() *schema.Resource { - return &schema.Resource{ - Create: resourceLoggingMetricCreate, - Read: resourceLoggingMetricRead, - Update: resourceLoggingMetricUpdate, - Delete: resourceLoggingMetricDelete, - - Importer: &schema.ResourceImporter{ - State: resourceLoggingMetricImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "filter": { - Type: schema.TypeString, - Required: true, - Description: `An advanced logs filter (https://cloud.google.com/logging/docs/view/advanced-filters) which -is used to match log entries.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: `The client-assigned metric identifier. Examples - "error_count", "nginx/requests". -Metric identifiers are limited to 100 characters and can include only the following -characters A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash -character (/) denotes a hierarchy of name pieces, and it cannot be the first character -of the name.`, - }, - "bucket_name": { - Type: schema.TypeString, - Optional: true, - Description: `The resource name of the Log Bucket that owns the Log Metric. Only Log Buckets in projects -are supported. The bucket has to be in the same project as the metric.`, - }, - "bucket_options": { - Type: schema.TypeList, - Optional: true, - Description: `The bucketOptions are required when the logs-based metric is using a DISTRIBUTION value type and it -describes the bucket boundaries used to create a histogram of the extracted values.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "explicit_buckets": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies a set of buckets with arbitrary widths.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bounds": { - Type: schema.TypeList, - Required: true, - Description: `The values must be monotonically increasing.`, - Elem: &schema.Schema{ - Type: schema.TypeFloat, - }, - }, - }, - }, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, - }, - "exponential_buckets": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies an exponential sequence of buckets that have a width that is proportional to the value of -the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "growth_factor": { - Type: schema.TypeFloat, - Optional: true, - Description: `Must be greater than 1.`, - AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, - }, - "num_finite_buckets": { - Type: schema.TypeInt, - Optional: true, - Description: `Must be greater than 0.`, - AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, - }, - "scale": { - Type: schema.TypeFloat, - Optional: true, - Description: `Must be greater than 0.`, - AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, - }, - }, - }, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, - }, - "linear_buckets": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). -Each bucket represents a constant absolute uncertainty on the specific value in the bucket.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "num_finite_buckets": { - Type: schema.TypeInt, - Optional: true, - Description: `Must be greater than 0.`, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, - }, - "offset": { - Type: schema.TypeFloat, - Optional: true, - Description: `Lower bound of the first bucket.`, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, - }, - "width": { - Type: schema.TypeFloat, - Optional: true, - Description: `Must be greater than 0.`, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, - }, - }, - }, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of this metric, which is used in documentation. The maximum length of the -description is 8000 characters.`, - }, - "label_extractors": { - Type: schema.TypeMap, - Optional: true, - Description: `A map from a label key string to an extractor expression which is used to extract data from a log -entry field and assign as the label value. Each label key specified in the LabelDescriptor must -have an associated extractor expression in this map. The syntax of the extractor expression is -the same as for the valueExtractor field.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "metric_descriptor": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `The optional metric descriptor associated with the logs-based metric. -If unspecified, it uses a default metric descriptor with a DELTA metric kind, -INT64 value type, with no labels and a unit of "1". Such a metric counts the -number of log entries matching the filter expression.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_kind": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"DELTA", "GAUGE", "CUMULATIVE"}), - Description: `Whether the metric records instantaneous values, changes to a value, etc. -Some combinations of metricKind and valueType might not be supported. -For counter metrics, set this to DELTA. Possible values: ["DELTA", "GAUGE", "CUMULATIVE"]`, - }, - "value_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION", "MONEY"}), - Description: `Whether the measurement is an integer, a floating-point number, etc. -Some combinations of metricKind and valueType might not be supported. -For counter metrics, set this to INT64. Possible values: ["BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION", "MONEY"]`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `A concise name for the metric, which can be displayed in user interfaces. Use sentence case -without an ending period, for example "Request count". This field is optional but it is -recommended to be set for any metrics associated with user-visible concepts, such as Quota.`, - }, - "labels": { - Type: schema.TypeSet, - Optional: true, - Description: `The set of labels that can be used to describe a specific instance of this metric type. For -example, the appengine.googleapis.com/http/server/response_latencies metric type has a label -for the HTTP response code, response_code, so you can look at latencies for successful responses -or just for responses that failed.`, - Elem: loggingMetricMetricDescriptorLabelsSchema(), - // Default schema.HashSchema is used. - }, - "unit": { - Type: schema.TypeString, - Optional: true, - Description: `The unit in which the metric value is reported. It is only applicable if the valueType is -'INT64', 'DOUBLE', or 'DISTRIBUTION'. The supported units are a subset of -[The Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) standard`, - Default: "1", - }, - }, - }, - }, - "value_extractor": { - Type: schema.TypeString, - Optional: true, - Description: `A valueExtractor is required when using a distribution logs-based metric to extract the values to -record from a log entry. Two functions are supported for value extraction - EXTRACT(field) or -REGEXP_EXTRACT(field, regex). The argument are 1. field - The name of the log entry field from which -the value is to be extracted. 2. regex - A regular expression using the Google RE2 syntax -(https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified -log entry field. The value of the field is converted to a string before applying the regex. It is an -error to specify a regex that does not include exactly one capture group.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func loggingMetricMetricDescriptorLabelsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The label key.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description for the label.`, - }, - "value_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"BOOL", "INT64", "STRING", ""}), - Description: `The type of data that can be assigned to the label. Default value: "STRING" Possible values: ["BOOL", "INT64", "STRING"]`, - Default: "STRING", - }, - }, - } -} - -func resourceLoggingMetricCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandLoggingMetricName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandLoggingMetricDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - bucketNameProp, err := expandLoggingMetricBucketName(d.Get("bucket_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_name"); !isEmptyValue(reflect.ValueOf(bucketNameProp)) && (ok || !reflect.DeepEqual(v, bucketNameProp)) { - obj["bucketName"] = bucketNameProp - } - filterProp, err := expandLoggingMetricFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - metricDescriptorProp, err := expandLoggingMetricMetricDescriptor(d.Get("metric_descriptor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metric_descriptor"); !isEmptyValue(reflect.ValueOf(metricDescriptorProp)) && (ok || !reflect.DeepEqual(v, metricDescriptorProp)) { - obj["metricDescriptor"] = metricDescriptorProp - } - labelExtractorsProp, err := expandLoggingMetricLabelExtractors(d.Get("label_extractors"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_extractors"); !isEmptyValue(reflect.ValueOf(labelExtractorsProp)) && (ok || !reflect.DeepEqual(v, labelExtractorsProp)) { - obj["labelExtractors"] = labelExtractorsProp - } - valueExtractorProp, err := expandLoggingMetricValueExtractor(d.Get("value_extractor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_extractor"); !isEmptyValue(reflect.ValueOf(valueExtractorProp)) && (ok || !reflect.DeepEqual(v, valueExtractorProp)) { - obj["valueExtractor"] = valueExtractorProp - } - bucketOptionsProp, err := expandLoggingMetricBucketOptions(d.Get("bucket_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_options"); !isEmptyValue(reflect.ValueOf(bucketOptionsProp)) && (ok || !reflect.DeepEqual(v, bucketOptionsProp)) { - obj["bucketOptions"] = bucketOptionsProp - } - - lockName, err := replaceVars(d, config, "customMetric/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Metric: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Metric: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Metric: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating Metric %q: %#v", d.Id(), res) - - return resourceLoggingMetricRead(d, meta) -} - -func resourceLoggingMetricRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Metric: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("LoggingMetric %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - - if err := d.Set("name", flattenLoggingMetricName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("description", flattenLoggingMetricDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("bucket_name", flattenLoggingMetricBucketName(res["bucketName"], d, config)); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("filter", flattenLoggingMetricFilter(res["filter"], d, config)); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("metric_descriptor", flattenLoggingMetricMetricDescriptor(res["metricDescriptor"], d, config)); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("label_extractors", flattenLoggingMetricLabelExtractors(res["labelExtractors"], d, config)); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("value_extractor", flattenLoggingMetricValueExtractor(res["valueExtractor"], d, config)); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("bucket_options", flattenLoggingMetricBucketOptions(res["bucketOptions"], d, config)); err != nil { - return fmt.Errorf("Error reading Metric: %s", err) - } - - return nil -} - -func resourceLoggingMetricUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Metric: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandLoggingMetricName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandLoggingMetricDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - bucketNameProp, err := expandLoggingMetricBucketName(d.Get("bucket_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketNameProp)) { - obj["bucketName"] = bucketNameProp - } - filterProp, err := expandLoggingMetricFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - metricDescriptorProp, err := expandLoggingMetricMetricDescriptor(d.Get("metric_descriptor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metric_descriptor"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metricDescriptorProp)) { - obj["metricDescriptor"] = metricDescriptorProp - } - labelExtractorsProp, err := expandLoggingMetricLabelExtractors(d.Get("label_extractors"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_extractors"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelExtractorsProp)) { - obj["labelExtractors"] = labelExtractorsProp - } - valueExtractorProp, err := expandLoggingMetricValueExtractor(d.Get("value_extractor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_extractor"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, valueExtractorProp)) { - obj["valueExtractor"] = valueExtractorProp - } - bucketOptionsProp, err := expandLoggingMetricBucketOptions(d.Get("bucket_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_options"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketOptionsProp)) { - obj["bucketOptions"] = bucketOptionsProp - } - - lockName, err := replaceVars(d, config, "customMetric/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Metric %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Metric %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Metric %q: %#v", d.Id(), res) - } - - return resourceLoggingMetricRead(d, meta) -} - -func resourceLoggingMetricDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Metric: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "customMetric/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Metric %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Metric") - } - - log.Printf("[DEBUG] Finished deleting Metric %q: %#v", d.Id(), res) - return nil -} - -func resourceLoggingMetricImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenLoggingMetricName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["unit"] = - flattenLoggingMetricMetricDescriptorUnit(original["unit"], d, config) - transformed["value_type"] = - flattenLoggingMetricMetricDescriptorValueType(original["valueType"], d, config) - transformed["metric_kind"] = - flattenLoggingMetricMetricDescriptorMetricKind(original["metricKind"], d, config) - transformed["labels"] = - flattenLoggingMetricMetricDescriptorLabels(original["labels"], d, config) - transformed["display_name"] = - flattenLoggingMetricMetricDescriptorDisplayName(original["displayName"], d, config) - return []interface{}{transformed} -} -func flattenLoggingMetricMetricDescriptorUnit(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorValueType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorMetricKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(loggingMetricMetricDescriptorLabelsSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "key": flattenLoggingMetricMetricDescriptorLabelsKey(original["key"], d, config), - "description": flattenLoggingMetricMetricDescriptorLabelsDescription(original["description"], d, config), - "value_type": flattenLoggingMetricMetricDescriptorLabelsValueType(original["valueType"], d, config), - }) - } - return transformed -} -func flattenLoggingMetricMetricDescriptorLabelsKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorLabelsDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorLabelsValueType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { - return "STRING" - } - - return v -} - -func flattenLoggingMetricMetricDescriptorDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricLabelExtractors(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricValueExtractor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["linear_buckets"] = - flattenLoggingMetricBucketOptionsLinearBuckets(original["linearBuckets"], d, config) - transformed["exponential_buckets"] = - flattenLoggingMetricBucketOptionsExponentialBuckets(original["exponentialBuckets"], d, config) - transformed["explicit_buckets"] = - flattenLoggingMetricBucketOptionsExplicitBuckets(original["explicitBuckets"], d, config) - return []interface{}{transformed} -} -func flattenLoggingMetricBucketOptionsLinearBuckets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["num_finite_buckets"] = - flattenLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(original["numFiniteBuckets"], d, config) - transformed["width"] = - flattenLoggingMetricBucketOptionsLinearBucketsWidth(original["width"], d, config) - transformed["offset"] = - flattenLoggingMetricBucketOptionsLinearBucketsOffset(original["offset"], d, config) - return []interface{}{transformed} -} -func flattenLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenLoggingMetricBucketOptionsLinearBucketsWidth(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptionsLinearBucketsOffset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptionsExponentialBuckets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["num_finite_buckets"] = - flattenLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(original["numFiniteBuckets"], d, config) - transformed["growth_factor"] = - flattenLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(original["growthFactor"], d, config) - transformed["scale"] = - flattenLoggingMetricBucketOptionsExponentialBucketsScale(original["scale"], d, config) - return []interface{}{transformed} -} -func flattenLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptionsExponentialBucketsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptionsExplicitBuckets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bounds"] = - flattenLoggingMetricBucketOptionsExplicitBucketsBounds(original["bounds"], d, config) - return []interface{}{transformed} -} -func flattenLoggingMetricBucketOptionsExplicitBucketsBounds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandLoggingMetricName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUnit, err := expandLoggingMetricMetricDescriptorUnit(original["unit"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUnit); val.IsValid() && !isEmptyValue(val) { - transformed["unit"] = transformedUnit - } - - transformedValueType, err := expandLoggingMetricMetricDescriptorValueType(original["value_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValueType); val.IsValid() && !isEmptyValue(val) { - transformed["valueType"] = transformedValueType - } - - transformedMetricKind, err := expandLoggingMetricMetricDescriptorMetricKind(original["metric_kind"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMetricKind); val.IsValid() && !isEmptyValue(val) { - transformed["metricKind"] = transformedMetricKind - } - - transformedLabels, err := expandLoggingMetricMetricDescriptorLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedDisplayName, err := expandLoggingMetricMetricDescriptorDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - return transformed, nil -} - -func expandLoggingMetricMetricDescriptorUnit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorMetricKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandLoggingMetricMetricDescriptorLabelsKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedDescription, err := expandLoggingMetricMetricDescriptorLabelsDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedValueType, err := expandLoggingMetricMetricDescriptorLabelsValueType(original["value_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValueType); val.IsValid() && !isEmptyValue(val) { - transformed["valueType"] = transformedValueType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandLoggingMetricMetricDescriptorLabelsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorLabelsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorLabelsValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricLabelExtractors(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandLoggingMetricValueExtractor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLinearBuckets, err := expandLoggingMetricBucketOptionsLinearBuckets(original["linear_buckets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLinearBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["linearBuckets"] = transformedLinearBuckets - } - - transformedExponentialBuckets, err := expandLoggingMetricBucketOptionsExponentialBuckets(original["exponential_buckets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExponentialBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["exponentialBuckets"] = transformedExponentialBuckets - } - - transformedExplicitBuckets, err := expandLoggingMetricBucketOptionsExplicitBuckets(original["explicit_buckets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedExplicitBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["explicitBuckets"] = transformedExplicitBuckets - } - - return transformed, nil -} - -func expandLoggingMetricBucketOptionsLinearBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNumFiniteBuckets, err := expandLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(original["num_finite_buckets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNumFiniteBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["numFiniteBuckets"] = transformedNumFiniteBuckets - } - - transformedWidth, err := expandLoggingMetricBucketOptionsLinearBucketsWidth(original["width"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWidth); val.IsValid() && !isEmptyValue(val) { - transformed["width"] = transformedWidth - } - - transformedOffset, err := expandLoggingMetricBucketOptionsLinearBucketsOffset(original["offset"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOffset); val.IsValid() && !isEmptyValue(val) { - transformed["offset"] = transformedOffset - } - - return transformed, nil -} - -func expandLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsLinearBucketsWidth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsLinearBucketsOffset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsExponentialBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNumFiniteBuckets, err := expandLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(original["num_finite_buckets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNumFiniteBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["numFiniteBuckets"] = transformedNumFiniteBuckets - } - - transformedGrowthFactor, err := expandLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(original["growth_factor"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGrowthFactor); val.IsValid() && !isEmptyValue(val) { - transformed["growthFactor"] = transformedGrowthFactor - } - - transformedScale, err := expandLoggingMetricBucketOptionsExponentialBucketsScale(original["scale"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { - transformed["scale"] = transformedScale - } - - return transformed, nil -} - -func expandLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsExponentialBucketsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsExplicitBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBounds, err := expandLoggingMetricBucketOptionsExplicitBucketsBounds(original["bounds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBounds); val.IsValid() && !isEmptyValue(val) { - transformed["bounds"] = transformedBounds - } - - return transformed, nil -} - -func expandLoggingMetricBucketOptionsExplicitBucketsBounds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_project_bucket_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_project_bucket_config.go deleted file mode 100644 index 84dbd74de9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_project_bucket_config.go +++ /dev/null @@ -1,322 +0,0 @@ -package google - -import ( - "fmt" - "log" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var loggingProjectBucketConfigSchema = map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent project that contains the logging bucket.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the bucket`, - }, - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the bucket.`, - }, - "bucket_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the logging bucket. Logging automatically creates two log buckets: _Required and _Default.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `An optional description for this bucket.`, - }, - "retention_days": { - Type: schema.TypeInt, - Optional: true, - Default: 30, - Description: `Logs will be retained by default for this amount of time, after which they will automatically be deleted. The minimum retention period is 1 day. If this value is set to zero at bucket creation time, the default time of 30 days will be used.`, - }, - "enable_analytics": { - Type: schema.TypeBool, - Optional: true, - Description: `Enable log analytics for the bucket. Cannot be disabled once enabled.`, - DiffSuppressFunc: enableAnalyticsBackwardsChangeDiffSuppress, - }, - "lifecycle_state": { - Type: schema.TypeString, - Computed: true, - Description: `The bucket's lifecycle such as active or deleted.`, - }, - "cmek_settings": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Description: `The CMEK settings of the log bucket. If present, new log entries written to this log bucket are encrypted using the CMEK key provided in this configuration. If a log bucket has CMEK settings, the CMEK settings cannot be disabled later by updating the log bucket. Changing the KMS key is allowed.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the CMEK settings.`, - }, - "kms_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name for the configured Cloud KMS key. -KMS key name format: -"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" -To enable CMEK for the bucket, set this field to a valid kmsKeyName for which the associated service account has the required cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the key. -The Cloud KMS key used by the bucket can be updated by changing the kmsKeyName to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked. -See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information.`, - }, - "kms_key_version_name": { - Type: schema.TypeString, - Computed: true, - Description: `The CryptoKeyVersion resource name for the configured Cloud KMS key. -KMS key name format: -"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION]" -For example: -"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1" -This is a read-only field used to convey the specific configured CryptoKeyVersion of kms_key that has been configured. It will be populated in cases where the CMEK settings are bound to a single key version.`, - }, - "service_account_id": { - Type: schema.TypeString, - Computed: true, - Description: `The service account associated with a project for which CMEK will apply. -Before enabling CMEK for a logging bucket, you must first assign the cloudkms.cryptoKeyEncrypterDecrypter role to the service account associated with the project for which CMEK will apply. Use [v2.getCmekSettings](https://cloud.google.com/logging/docs/reference/v2/rest/v2/TopLevel/getCmekSettings#google.logging.v2.ConfigServiceV2.GetCmekSettings) to obtain the service account ID. -See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information.`, - }, - }, - }, - }, -} - -func projectBucketConfigID(d *schema.ResourceData, config *Config) (string, error) { - project := d.Get("project").(string) - location := d.Get("location").(string) - bucketID := d.Get("bucket_id").(string) - - if !strings.HasPrefix(project, "project") { - project = "projects/" + project - } - - id := fmt.Sprintf("%s/locations/%s/buckets/%s", project, location, bucketID) - return id, nil -} - -// Create Logging Bucket config -func ResourceLoggingProjectBucketConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceLoggingProjectBucketConfigAcquireOrCreate("project", projectBucketConfigID), - Read: resourceLoggingProjectBucketConfigRead, - Update: resourceLoggingProjectBucketConfigUpdate, - Delete: resourceLoggingBucketConfigDelete, - Importer: &schema.ResourceImporter{ - State: resourceLoggingBucketConfigImportState("project"), - }, - Schema: loggingProjectBucketConfigSchema, - UseJSONNumber: true, - } -} - -func resourceLoggingProjectBucketConfigAcquireOrCreate(parentType string, iDFunc loggingBucketConfigIDFunc) func(*schema.ResourceData, interface{}) error { - return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - id, err := iDFunc(d, config) - if err != nil { - return err - } - - if parentType == "project" { - //logging bucket can be created only at the project level, in future api may allow for folder, org and other parent resources - - log.Printf("[DEBUG] Fetching logging bucket config: %#v", id) - url, err := replaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", id)) - if err != nil { - return err - } - - res, _ := SendRequest(config, "GET", "", url, userAgent, nil) - if res == nil { - log.Printf("[DEGUG] Loggin Bucket not exist %s", id) - // we need to pass the id in here because we don't want to set it in state - // until we know there won't be any errors on create - return resourceLoggingProjectBucketConfigCreate(d, meta, id) - } - } - - d.SetId(id) - - return resourceLoggingProjectBucketConfigUpdate(d, meta) - } -} - -func resourceLoggingProjectBucketConfigCreate(d *schema.ResourceData, meta interface{}, id string) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["name"] = d.Get("name") - obj["description"] = d.Get("description") - obj["retentionDays"] = d.Get("retention_days") - obj["analyticsEnabled"] = d.Get("enable_analytics") - obj["locked"] = d.Get("locked") - obj["cmekSettings"] = expandCmekSettings(d.Get("cmek_settings")) - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/locations/{{location}}/buckets?bucketId={{bucket_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Bucket: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return err - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Bucket: %s", err) - } - - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Bucket %q: %#v", d.Id(), res) - - return resourceLoggingProjectBucketConfigRead(d, meta) -} - -func resourceLoggingProjectBucketConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - log.Printf("[DEBUG] Fetching logging bucket config: %#v", d.Id()) - - url, err := replaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) - if err != nil { - return err - } - - res, err := SendRequest(config, "GET", "", url, userAgent, nil) - if err != nil { - log.Printf("[WARN] Unable to acquire logging bucket config at %s", d.Id()) - - d.SetId("") - return err - } - - if err := d.Set("name", res["name"]); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", res["description"]); err != nil { - return fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("lifecycle_state", res["lifecycleState"]); err != nil { - return fmt.Errorf("Error setting lifecycle_state: %s", err) - } - if err := d.Set("retention_days", res["retentionDays"]); err != nil { - return fmt.Errorf("Error setting retention_days: %s", err) - } - if err := d.Set("enable_analytics", res["analyticsEnabled"]); err != nil { - return fmt.Errorf("Error setting enable_analytics: %s", err) - } - - if err := d.Set("cmek_settings", flattenCmekSettings(res["cmekSettings"])); err != nil { - return fmt.Errorf("Error setting cmek_settings: %s", err) - } - - return nil -} - -func resourceLoggingProjectBucketConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - - url, err := replaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) - if err != nil { - return err - } - - updateMaskAnalytics := []string{} - // Check if analytics is being enabled. Analytics enablement is an atomic operation and can not be performed while other fields - // are being updated, so we enable analytics before updating the rest of the fields. - if d.HasChange("enable_analytics") { - obj["analyticsEnabled"] = d.Get("enable_analytics") - updateMaskAnalytics = append(updateMaskAnalytics, "analyticsEnabled") - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMaskAnalytics, ",")}) - if err != nil { - return err - } - _, err = SendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Logging Bucket Config %q: %s", d.Id(), err) - } - } - - obj["retentionDays"] = d.Get("retention_days") - obj["description"] = d.Get("description") - obj["cmekSettings"] = expandCmekSettings(d.Get("cmek_settings")) - updateMask := []string{} - if d.HasChange("retention_days") { - updateMask = append(updateMask, "retentionDays") - } - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - if d.HasChange("cmek_settings") { - updateMask = append(updateMask, "cmekSettings") - } - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if len(updateMask) > 0 { - _, err = SendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - } - if err != nil { - return fmt.Errorf("Error updating Logging Bucket Config %q: %s", d.Id(), err) - } - - return resourceLoggingProjectBucketConfigRead(d, meta) -} - -func enableAnalyticsBackwardsChangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - oldValue, _ := strconv.ParseBool(old) - if oldValue { - return true - } - return false -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_manager_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_manager_operation.go deleted file mode 100644 index 6dd25ba4b7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_manager_operation.go +++ /dev/null @@ -1,73 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type ResourceManagerOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *ResourceManagerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.ResourceManagerBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createResourceManagerWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*ResourceManagerOperationWaiter, error) { - w := &ResourceManagerOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func ResourceManagerOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { - w, err := createResourceManagerWaiter(config, op, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func ResourceManagerOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createResourceManagerWaiter(config, op, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_ml_engine_model.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_ml_engine_model.go deleted file mode 100644 index 78465f5a95..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_ml_engine_model.go +++ /dev/null @@ -1,420 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceMLEngineModel() *schema.Resource { - return &schema.Resource{ - Create: resourceMLEngineModelCreate, - Read: resourceMLEngineModelRead, - Delete: resourceMLEngineModelDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMLEngineModelImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name specified for the model.`, - }, - "default_version": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The default version of the model. This version will be used to handle -prediction requests that do not specify a version.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name specified for the version when it was created.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The description specified for the model when it was created.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `One or more labels that you can add, to organize your models.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "online_prediction_console_logging": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, online prediction nodes send stderr and stdout streams to Stackdriver Logging`, - }, - "online_prediction_logging": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, online prediction access logs are sent to StackDriver Logging.`, - }, - "regions": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The list of regions where the model is going to be deployed. -Currently only one region per model is supported`, - MaxItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMLEngineModelCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandMLEngineModelName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandMLEngineModelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - defaultVersionProp, err := expandMLEngineModelDefaultVersion(d.Get("default_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_version"); !isEmptyValue(reflect.ValueOf(defaultVersionProp)) && (ok || !reflect.DeepEqual(v, defaultVersionProp)) { - obj["defaultVersion"] = defaultVersionProp - } - regionsProp, err := expandMLEngineModelRegions(d.Get("regions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("regions"); !isEmptyValue(reflect.ValueOf(regionsProp)) && (ok || !reflect.DeepEqual(v, regionsProp)) { - obj["regions"] = regionsProp - } - onlinePredictionLoggingProp, err := expandMLEngineModelOnlinePredictionLogging(d.Get("online_prediction_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("online_prediction_logging"); !isEmptyValue(reflect.ValueOf(onlinePredictionLoggingProp)) && (ok || !reflect.DeepEqual(v, onlinePredictionLoggingProp)) { - obj["onlinePredictionLogging"] = onlinePredictionLoggingProp - } - onlinePredictionConsoleLoggingProp, err := expandMLEngineModelOnlinePredictionConsoleLogging(d.Get("online_prediction_console_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("online_prediction_console_logging"); !isEmptyValue(reflect.ValueOf(onlinePredictionConsoleLoggingProp)) && (ok || !reflect.DeepEqual(v, onlinePredictionConsoleLoggingProp)) { - obj["onlinePredictionConsoleLogging"] = onlinePredictionConsoleLoggingProp - } - labelsProp, err := expandMLEngineModelLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Model: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Model: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Model: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/models/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Model %q: %#v", d.Id(), res) - - return resourceMLEngineModelRead(d, meta) -} - -func resourceMLEngineModelRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Model: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MLEngineModel %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Model: %s", err) - } - - if err := d.Set("name", flattenMLEngineModelName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("description", flattenMLEngineModelDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("default_version", flattenMLEngineModelDefaultVersion(res["defaultVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("regions", flattenMLEngineModelRegions(res["regions"], d, config)); err != nil { - return fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("online_prediction_logging", flattenMLEngineModelOnlinePredictionLogging(res["onlinePredictionLogging"], d, config)); err != nil { - return fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("online_prediction_console_logging", flattenMLEngineModelOnlinePredictionConsoleLogging(res["onlinePredictionConsoleLogging"], d, config)); err != nil { - return fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("labels", flattenMLEngineModelLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Model: %s", err) - } - - return nil -} - -func resourceMLEngineModelDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Model: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Model %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Model") - } - - err = MLEngineOperationWaitTime( - config, res, project, "Deleting Model", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Model %q: %#v", d.Id(), res) - return nil -} - -func resourceMLEngineModelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/models/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/models/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenMLEngineModelName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenMLEngineModelDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelDefaultVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenMLEngineModelDefaultVersionName(original["name"], d, config) - return []interface{}{transformed} -} -func flattenMLEngineModelDefaultVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelRegions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelOnlinePredictionLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelOnlinePredictionConsoleLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMLEngineModelName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelDefaultVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandMLEngineModelDefaultVersionName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandMLEngineModelDefaultVersionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelOnlinePredictionLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelOnlinePredictionConsoleLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_alert_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_alert_policy.go deleted file mode 100644 index c335dbe13a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_alert_policy.go +++ /dev/null @@ -1,2427 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// API does not return a value for REDUCE_NONE -func crossSeriesReducerDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return (new == "" && old == "REDUCE_NONE") || (new == "REDUCE_NONE" && old == "") -} - -func ResourceMonitoringAlertPolicy() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringAlertPolicyCreate, - Read: resourceMonitoringAlertPolicyRead, - Update: resourceMonitoringAlertPolicyUpdate, - Delete: resourceMonitoringAlertPolicyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringAlertPolicyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "combiner": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"AND", "OR", "AND_WITH_MATCHING_RESOURCE"}), - Description: `How to combine the results of multiple conditions to -determine if an incident should be opened. Possible values: ["AND", "OR", "AND_WITH_MATCHING_RESOURCE"]`, - }, - "conditions": { - Type: schema.TypeList, - Required: true, - Description: `A list of conditions for the policy. The conditions are combined by -AND or OR according to the combiner field. If the combined conditions -evaluate to true, then an incident is created. A policy can have from -one to six conditions.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `A short name or phrase used to identify the -condition in dashboards, notifications, and -incidents. To avoid confusion, don't use the same -display name for multiple conditions in the same -policy.`, - }, - "condition_absent": { - Type: schema.TypeList, - Optional: true, - Description: `A condition that checks that a time series -continues to receive new data points.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "duration": { - Type: schema.TypeString, - Required: true, - Description: `The amount of time that a time series must -fail to report new data to be considered -failing. Currently, only values that are a -multiple of a minute--e.g. 60s, 120s, or 300s ---are supported.`, - }, - "aggregations": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies the alignment of data points in -individual time series as well as how to -combine the retrieved time series together -(such as when aggregating multiple streams -on each resource to a single stream for each -resource or when aggregating streams across -all members of a group of resources). -Multiple aggregations are applied in the -order specified.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alignment_period": { - Type: schema.TypeString, - Optional: true, - Description: `The alignment period for per-time -series alignment. If present, -alignmentPeriod must be at least -60 seconds. After per-time series -alignment, each time series will -contain data points only on the -period boundaries. If -perSeriesAligner is not specified -or equals ALIGN_NONE, then this -field is ignored. If -perSeriesAligner is specified and -does not equal ALIGN_NONE, then -this field must be defined; -otherwise an error is returned.`, - }, - "cross_series_reducer": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}), - DiffSuppressFunc: crossSeriesReducerDiffSuppress, - Description: `The approach to be used to combine -time series. Not all reducer -functions may be applied to all -time series, depending on the -metric type and the value type of -the original time series. -Reduction may change the metric -type of value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, - }, - "group_by_fields": { - Type: schema.TypeList, - Optional: true, - Description: `The set of fields to preserve when -crossSeriesReducer is specified. -The groupByFields determine how -the time series are partitioned -into subsets prior to applying the -aggregation function. Each subset -contains time series that have the -same value for each of the -grouping fields. Each individual -time series is a member of exactly -one subset. The crossSeriesReducer -is applied to each subset of time -series. It is not possible to -reduce across different resource -types, so this field implicitly -contains resource.type. Fields not -specified in groupByFields are -aggregated away. If groupByFields -is not specified and all the time -series have the same resource -type, then the time series are -aggregated into a single output -time series. If crossSeriesReducer -is not defined, this field is -ignored.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "per_series_aligner": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}), - Description: `The approach to be used to align -individual time series. Not all -alignment functions may be applied -to all time series, depending on -the metric type and value type of -the original time series. -Alignment may change the metric -type or the value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, - }, - }, - }, - }, - "filter": { - Type: schema.TypeString, - Optional: true, - Description: `A filter that identifies which time series -should be compared with the threshold.The -filter is similar to the one that is -specified in the -MetricService.ListTimeSeries request (that -call is useful to verify the time series -that will be retrieved / processed) and must -specify the metric type and optionally may -contain restrictions on resource type, -resource labels, and metric labels. This -field may not exceed 2048 Unicode characters -in length.`, - }, - "trigger": { - Type: schema.TypeList, - Optional: true, - Description: `The number/percent of time series for which -the comparison must hold in order for the -condition to trigger. If unspecified, then -the condition will trigger if the comparison -is true for any of the time series that have -been identified by filter and aggregations.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Optional: true, - Description: `The absolute number of time series -that must fail the predicate for the -condition to be triggered.`, - }, - "percent": { - Type: schema.TypeFloat, - Optional: true, - Description: `The percentage of time series that -must fail the predicate for the -condition to be triggered.`, - }, - }, - }, - }, - }, - }, - }, - "condition_matched_log": { - Type: schema.TypeList, - Optional: true, - Description: `A condition that checks for log messages matching given constraints. -If set, no other conditions can be present.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter": { - Type: schema.TypeString, - Required: true, - Description: `A logs-based filter.`, - }, - "label_extractors": { - Type: schema.TypeMap, - Optional: true, - Description: `A map from a label key to an extractor expression, which is used to -extract the value for this label key. Each entry in this map is -a specification for how data should be extracted from log entries that -match filter. Each combination of extracted values is treated as -a separate rule for the purposes of triggering notifications. -Label keys and corresponding values can be used in notifications -generated by this condition.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "condition_monitoring_query_language": { - Type: schema.TypeList, - Optional: true, - Description: `A Monitoring Query Language query that outputs a boolean stream`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "duration": { - Type: schema.TypeString, - Required: true, - Description: `The amount of time that a time series must -violate the threshold to be considered -failing. Currently, only values that are a -multiple of a minute--e.g., 0, 60, 120, or -300 seconds--are supported. If an invalid -value is given, an error will be returned. -When choosing a duration, it is useful to -keep in mind the frequency of the underlying -time series data (which may also be affected -by any alignments specified in the -aggregations field); a good duration is long -enough so that a single outlier does not -generate spurious alerts, but short enough -that unhealthy states are detected and -alerted on quickly.`, - }, - "query": { - Type: schema.TypeString, - Required: true, - Description: `Monitoring Query Language query that outputs a boolean stream.`, - }, - "evaluation_missing_data": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP", ""}), - Description: `A condition control that determines how -metric-threshold conditions are evaluated when -data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]`, - }, - "trigger": { - Type: schema.TypeList, - Optional: true, - Description: `The number/percent of time series for which -the comparison must hold in order for the -condition to trigger. If unspecified, then -the condition will trigger if the comparison -is true for any of the time series that have -been identified by filter and aggregations, -or by the ratio, if denominator_filter and -denominator_aggregations are specified.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Optional: true, - Description: `The absolute number of time series -that must fail the predicate for the -condition to be triggered.`, - }, - "percent": { - Type: schema.TypeFloat, - Optional: true, - Description: `The percentage of time series that -must fail the predicate for the -condition to be triggered.`, - }, - }, - }, - }, - }, - }, - }, - "condition_threshold": { - Type: schema.TypeList, - Optional: true, - Description: `A condition that compares a time series against a -threshold.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "comparison": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"COMPARISON_GT", "COMPARISON_GE", "COMPARISON_LT", "COMPARISON_LE", "COMPARISON_EQ", "COMPARISON_NE"}), - Description: `The comparison to apply between the time -series (indicated by filter and aggregation) -and the threshold (indicated by -threshold_value). The comparison is applied -on each time series, with the time series on -the left-hand side and the threshold on the -right-hand side. Only COMPARISON_LT and -COMPARISON_GT are supported currently. Possible values: ["COMPARISON_GT", "COMPARISON_GE", "COMPARISON_LT", "COMPARISON_LE", "COMPARISON_EQ", "COMPARISON_NE"]`, - }, - "duration": { - Type: schema.TypeString, - Required: true, - Description: `The amount of time that a time series must -violate the threshold to be considered -failing. Currently, only values that are a -multiple of a minute--e.g., 0, 60, 120, or -300 seconds--are supported. If an invalid -value is given, an error will be returned. -When choosing a duration, it is useful to -keep in mind the frequency of the underlying -time series data (which may also be affected -by any alignments specified in the -aggregations field); a good duration is long -enough so that a single outlier does not -generate spurious alerts, but short enough -that unhealthy states are detected and -alerted on quickly.`, - }, - "aggregations": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies the alignment of data points in -individual time series as well as how to -combine the retrieved time series together -(such as when aggregating multiple streams -on each resource to a single stream for each -resource or when aggregating streams across -all members of a group of resources). -Multiple aggregations are applied in the -order specified.This field is similar to the -one in the MetricService.ListTimeSeries -request. It is advisable to use the -ListTimeSeries method when debugging this -field.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alignment_period": { - Type: schema.TypeString, - Optional: true, - Description: `The alignment period for per-time -series alignment. If present, -alignmentPeriod must be at least -60 seconds. After per-time series -alignment, each time series will -contain data points only on the -period boundaries. If -perSeriesAligner is not specified -or equals ALIGN_NONE, then this -field is ignored. If -perSeriesAligner is specified and -does not equal ALIGN_NONE, then -this field must be defined; -otherwise an error is returned.`, - }, - "cross_series_reducer": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}), - DiffSuppressFunc: crossSeriesReducerDiffSuppress, - Description: `The approach to be used to combine -time series. Not all reducer -functions may be applied to all -time series, depending on the -metric type and the value type of -the original time series. -Reduction may change the metric -type of value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, - }, - "group_by_fields": { - Type: schema.TypeList, - Optional: true, - Description: `The set of fields to preserve when -crossSeriesReducer is specified. -The groupByFields determine how -the time series are partitioned -into subsets prior to applying the -aggregation function. Each subset -contains time series that have the -same value for each of the -grouping fields. Each individual -time series is a member of exactly -one subset. The crossSeriesReducer -is applied to each subset of time -series. It is not possible to -reduce across different resource -types, so this field implicitly -contains resource.type. Fields not -specified in groupByFields are -aggregated away. If groupByFields -is not specified and all the time -series have the same resource -type, then the time series are -aggregated into a single output -time series. If crossSeriesReducer -is not defined, this field is -ignored.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "per_series_aligner": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}), - Description: `The approach to be used to align -individual time series. Not all -alignment functions may be applied -to all time series, depending on -the metric type and value type of -the original time series. -Alignment may change the metric -type or the value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, - }, - }, - }, - }, - "denominator_aggregations": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies the alignment of data points in -individual time series selected by -denominatorFilter as well as how to combine -the retrieved time series together (such as -when aggregating multiple streams on each -resource to a single stream for each -resource or when aggregating streams across -all members of a group of resources).When -computing ratios, the aggregations and -denominator_aggregations fields must use the -same alignment period and produce time -series that have the same periodicity and -labels.This field is similar to the one in -the MetricService.ListTimeSeries request. It -is advisable to use the ListTimeSeries -method when debugging this field.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alignment_period": { - Type: schema.TypeString, - Optional: true, - Description: `The alignment period for per-time -series alignment. If present, -alignmentPeriod must be at least -60 seconds. After per-time series -alignment, each time series will -contain data points only on the -period boundaries. If -perSeriesAligner is not specified -or equals ALIGN_NONE, then this -field is ignored. If -perSeriesAligner is specified and -does not equal ALIGN_NONE, then -this field must be defined; -otherwise an error is returned.`, - }, - "cross_series_reducer": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}), - DiffSuppressFunc: crossSeriesReducerDiffSuppress, - Description: `The approach to be used to combine -time series. Not all reducer -functions may be applied to all -time series, depending on the -metric type and the value type of -the original time series. -Reduction may change the metric -type of value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, - }, - "group_by_fields": { - Type: schema.TypeList, - Optional: true, - Description: `The set of fields to preserve when -crossSeriesReducer is specified. -The groupByFields determine how -the time series are partitioned -into subsets prior to applying the -aggregation function. Each subset -contains time series that have the -same value for each of the -grouping fields. Each individual -time series is a member of exactly -one subset. The crossSeriesReducer -is applied to each subset of time -series. It is not possible to -reduce across different resource -types, so this field implicitly -contains resource.type. Fields not -specified in groupByFields are -aggregated away. If groupByFields -is not specified and all the time -series have the same resource -type, then the time series are -aggregated into a single output -time series. If crossSeriesReducer -is not defined, this field is -ignored.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "per_series_aligner": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}), - Description: `The approach to be used to align -individual time series. Not all -alignment functions may be applied -to all time series, depending on -the metric type and value type of -the original time series. -Alignment may change the metric -type or the value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, - }, - }, - }, - }, - "denominator_filter": { - Type: schema.TypeString, - Optional: true, - Description: `A filter that identifies a time series that -should be used as the denominator of a ratio -that will be compared with the threshold. If -a denominator_filter is specified, the time -series specified by the filter field will be -used as the numerator.The filter is similar -to the one that is specified in the -MetricService.ListTimeSeries request (that -call is useful to verify the time series -that will be retrieved / processed) and must -specify the metric type and optionally may -contain restrictions on resource type, -resource labels, and metric labels. This -field may not exceed 2048 Unicode characters -in length.`, - }, - "evaluation_missing_data": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP", ""}), - Description: `A condition control that determines how -metric-threshold conditions are evaluated when -data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]`, - }, - "filter": { - Type: schema.TypeString, - Optional: true, - Description: `A filter that identifies which time series -should be compared with the threshold.The -filter is similar to the one that is -specified in the -MetricService.ListTimeSeries request (that -call is useful to verify the time series -that will be retrieved / processed) and must -specify the metric type and optionally may -contain restrictions on resource type, -resource labels, and metric labels. This -field may not exceed 2048 Unicode characters -in length.`, - }, - "threshold_value": { - Type: schema.TypeFloat, - Optional: true, - Description: `A value against which to compare the time -series.`, - }, - "trigger": { - Type: schema.TypeList, - Optional: true, - Description: `The number/percent of time series for which -the comparison must hold in order for the -condition to trigger. If unspecified, then -the condition will trigger if the comparison -is true for any of the time series that have -been identified by filter and aggregations, -or by the ratio, if denominator_filter and -denominator_aggregations are specified.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Optional: true, - Description: `The absolute number of time series -that must fail the predicate for the -condition to be triggered.`, - }, - "percent": { - Type: schema.TypeFloat, - Optional: true, - Description: `The percentage of time series that -must fail the predicate for the -condition to be triggered.`, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique resource name for this condition. -Its syntax is: -projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] -[CONDITION_ID] is assigned by Stackdriver Monitoring when -the condition is created as part of a new or updated alerting -policy.`, - }, - }, - }, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `A short name or phrase used to identify the policy in -dashboards, notifications, and incidents. To avoid confusion, don't use -the same display name for multiple policies in the same project. The -name is limited to 512 Unicode characters.`, - }, - "alert_strategy": { - Type: schema.TypeList, - Optional: true, - Description: `Control over how this alert policy's notification channels are notified.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "auto_close": { - Type: schema.TypeString, - Optional: true, - Description: `If an alert policy that was active has no data for this long, any open incidents will close.`, - }, - "notification_rate_limit": { - Type: schema.TypeList, - Optional: true, - Description: `Required for alert policies with a LogMatch condition. -This limit is not implemented for alert policies that are not log-based.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "period": { - Type: schema.TypeString, - Optional: true, - Description: `Not more than one notification per period.`, - }, - }, - }, - }, - }, - }, - }, - "documentation": { - Type: schema.TypeList, - Optional: true, - Description: `Documentation that is included with notifications and incidents related -to this policy. Best practice is for the documentation to include information -to help responders understand, mitigate, escalate, and correct the underlying -problems detected by the alerting policy. Notification channels that have -limited capacity might not show this documentation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content": { - Type: schema.TypeString, - Optional: true, - Description: `The text of the documentation, interpreted according to mimeType. -The content may not exceed 8,192 Unicode characters and may not -exceed more than 10,240 bytes when encoded in UTF-8 format, -whichever is smaller.`, - AtLeastOneOf: []string{"documentation.0.content", "documentation.0.mime_type"}, - }, - "mime_type": { - Type: schema.TypeString, - Optional: true, - Description: `The format of the content field. Presently, only the value -"text/markdown" is supported.`, - Default: "text/markdown", - AtLeastOneOf: []string{"documentation.0.content", "documentation.0.mime_type"}, - }, - }, - }, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether or not the policy is enabled. The default is true.`, - Default: true, - }, - "notification_channels": { - Type: schema.TypeList, - Optional: true, - Description: `Identifies the notification channels to which notifications should be -sent when incidents are opened or closed or when new violations occur -on an already opened incident. Each element of this array corresponds -to the name field in each of the NotificationChannel objects that are -returned from the notificationChannels.list method. The syntax of the -entries in this field is -'projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]'`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "user_labels": { - Type: schema.TypeMap, - Optional: true, - Description: `This field is intended to be used for organizing and identifying the AlertPolicy -objects.The field can contain up to 64 entries. Each key and value is limited -to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values -can contain only lowercase letters, numerals, underscores, and dashes. Keys -must begin with a letter.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "creation_record": { - Type: schema.TypeList, - Computed: true, - Description: `A read-only record of the creation of the alerting policy. -If provided in a call to create or update, this field will -be ignored.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "mutate_time": { - Type: schema.TypeString, - Computed: true, - Description: `When the change occurred.`, - }, - "mutated_by": { - Type: schema.TypeString, - Computed: true, - Description: `The email address of the user making the change.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The unique resource name for this policy. -Its syntax is: projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringAlertPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringAlertPolicyDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - combinerProp, err := expandMonitoringAlertPolicyCombiner(d.Get("combiner"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("combiner"); !isEmptyValue(reflect.ValueOf(combinerProp)) && (ok || !reflect.DeepEqual(v, combinerProp)) { - obj["combiner"] = combinerProp - } - enabledProp, err := expandMonitoringAlertPolicyEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { - obj["enabled"] = enabledProp - } - conditionsProp, err := expandMonitoringAlertPolicyConditions(d.Get("conditions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("conditions"); !isEmptyValue(reflect.ValueOf(conditionsProp)) && (ok || !reflect.DeepEqual(v, conditionsProp)) { - obj["conditions"] = conditionsProp - } - notificationChannelsProp, err := expandMonitoringAlertPolicyNotificationChannels(d.Get("notification_channels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_channels"); !isEmptyValue(reflect.ValueOf(notificationChannelsProp)) && (ok || !reflect.DeepEqual(v, notificationChannelsProp)) { - obj["notificationChannels"] = notificationChannelsProp - } - alertStrategyProp, err := expandMonitoringAlertPolicyAlertStrategy(d.Get("alert_strategy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("alert_strategy"); !isEmptyValue(reflect.ValueOf(alertStrategyProp)) && (ok || !reflect.DeepEqual(v, alertStrategyProp)) { - obj["alertStrategy"] = alertStrategyProp - } - userLabelsProp, err := expandMonitoringAlertPolicyUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(reflect.ValueOf(userLabelsProp)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) { - obj["userLabels"] = userLabelsProp - } - documentationProp, err := expandMonitoringAlertPolicyDocumentation(d.Get("documentation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("documentation"); !isEmptyValue(reflect.ValueOf(documentationProp)) && (ok || !reflect.DeepEqual(v, documentationProp)) { - obj["documentation"] = documentationProp - } - - lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/alertPolicies") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AlertPolicy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AlertPolicy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error creating AlertPolicy: %s", err) - } - if err := d.Set("name", flattenMonitoringAlertPolicyName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating AlertPolicy %q: %#v", d.Id(), res) - - return resourceMonitoringAlertPolicyRead(d, meta) -} - -func resourceMonitoringAlertPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AlertPolicy: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringAlertPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - - if err := d.Set("name", flattenMonitoringAlertPolicyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("display_name", flattenMonitoringAlertPolicyDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("combiner", flattenMonitoringAlertPolicyCombiner(res["combiner"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("creation_record", flattenMonitoringAlertPolicyCreationRecord(res["creationRecord"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("enabled", flattenMonitoringAlertPolicyEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("conditions", flattenMonitoringAlertPolicyConditions(res["conditions"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("notification_channels", flattenMonitoringAlertPolicyNotificationChannels(res["notificationChannels"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("alert_strategy", flattenMonitoringAlertPolicyAlertStrategy(res["alertStrategy"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("user_labels", flattenMonitoringAlertPolicyUserLabels(res["userLabels"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("documentation", flattenMonitoringAlertPolicyDocumentation(res["documentation"], d, config)); err != nil { - return fmt.Errorf("Error reading AlertPolicy: %s", err) - } - - return nil -} - -func resourceMonitoringAlertPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AlertPolicy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringAlertPolicyDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - combinerProp, err := expandMonitoringAlertPolicyCombiner(d.Get("combiner"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("combiner"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, combinerProp)) { - obj["combiner"] = combinerProp - } - enabledProp, err := expandMonitoringAlertPolicyEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { - obj["enabled"] = enabledProp - } - conditionsProp, err := expandMonitoringAlertPolicyConditions(d.Get("conditions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("conditions"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionsProp)) { - obj["conditions"] = conditionsProp - } - notificationChannelsProp, err := expandMonitoringAlertPolicyNotificationChannels(d.Get("notification_channels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_channels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationChannelsProp)) { - obj["notificationChannels"] = notificationChannelsProp - } - alertStrategyProp, err := expandMonitoringAlertPolicyAlertStrategy(d.Get("alert_strategy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("alert_strategy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, alertStrategyProp)) { - obj["alertStrategy"] = alertStrategyProp - } - userLabelsProp, err := expandMonitoringAlertPolicyUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) { - obj["userLabels"] = userLabelsProp - } - documentationProp, err := expandMonitoringAlertPolicyDocumentation(d.Get("documentation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("documentation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, documentationProp)) { - obj["documentation"] = documentationProp - } - - lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AlertPolicy %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("combiner") { - updateMask = append(updateMask, "combiner") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("conditions") { - updateMask = append(updateMask, "conditions") - } - - if d.HasChange("notification_channels") { - updateMask = append(updateMask, "notificationChannels") - } - - if d.HasChange("alert_strategy") { - updateMask = append(updateMask, "alertStrategy") - } - - if d.HasChange("user_labels") { - updateMask = append(updateMask, "userLabels") - } - - if d.HasChange("documentation") { - updateMask = append(updateMask, "documentation") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return fmt.Errorf("Error updating AlertPolicy %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AlertPolicy %q: %#v", d.Id(), res) - } - - return resourceMonitoringAlertPolicyRead(d, meta) -} - -func resourceMonitoringAlertPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AlertPolicy: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AlertPolicy %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "AlertPolicy") - } - - log.Printf("[DEBUG] Finished deleting AlertPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringAlertPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenMonitoringAlertPolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyCombiner(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyCreationRecord(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["mutate_time"] = - flattenMonitoringAlertPolicyCreationRecordMutateTime(original["mutateTime"], d, config) - transformed["mutated_by"] = - flattenMonitoringAlertPolicyCreationRecordMutatedBy(original["mutatedBy"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyCreationRecordMutateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyCreationRecordMutatedBy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "condition_absent": flattenMonitoringAlertPolicyConditionsConditionAbsent(original["conditionAbsent"], d, config), - "name": flattenMonitoringAlertPolicyConditionsName(original["name"], d, config), - "condition_monitoring_query_language": flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(original["conditionMonitoringQueryLanguage"], d, config), - "condition_threshold": flattenMonitoringAlertPolicyConditionsConditionThreshold(original["conditionThreshold"], d, config), - "display_name": flattenMonitoringAlertPolicyConditionsDisplayName(original["displayName"], d, config), - "condition_matched_log": flattenMonitoringAlertPolicyConditionsConditionMatchedLog(original["conditionMatchedLog"], d, config), - }) - } - return transformed -} -func flattenMonitoringAlertPolicyConditionsConditionAbsent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["aggregations"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentAggregations(original["aggregations"], d, config) - transformed["trigger"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentTrigger(original["trigger"], d, config) - transformed["duration"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentDuration(original["duration"], d, config) - transformed["filter"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentFilter(original["filter"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), - "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(original["groupByFields"], d, config), - "alignment_period": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), - "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), - }) - } - return transformed -} -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentTrigger(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["percent"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(original["percent"], d, config) - transformed["count"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(original["count"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["query"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(original["query"], d, config) - transformed["duration"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(original["duration"], d, config) - transformed["trigger"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(original["trigger"], d, config) - transformed["evaluation_missing_data"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(original["evaluationMissingData"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["percent"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(original["percent"], d, config) - transformed["count"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(original["count"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["threshold_value"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(original["thresholdValue"], d, config) - transformed["denominator_filter"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(original["denominatorFilter"], d, config) - transformed["denominator_aggregations"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(original["denominatorAggregations"], d, config) - transformed["duration"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdDuration(original["duration"], d, config) - transformed["comparison"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdComparison(original["comparison"], d, config) - transformed["trigger"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdTrigger(original["trigger"], d, config) - transformed["aggregations"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) - transformed["filter"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) - transformed["evaluation_missing_data"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(original["evaluationMissingData"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), - "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(original["groupByFields"], d, config), - "alignment_period": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), - "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), - }) - } - return transformed -} -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdComparison(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdTrigger(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["percent"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(original["percent"], d, config) - transformed["count"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(original["count"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), - "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(original["groupByFields"], d, config), - "alignment_period": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), - "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), - }) - } - return transformed -} -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMatchedLog(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["filter"] = - flattenMonitoringAlertPolicyConditionsConditionMatchedLogFilter(original["filter"], d, config) - transformed["label_extractors"] = - flattenMonitoringAlertPolicyConditionsConditionMatchedLogLabelExtractors(original["labelExtractors"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyConditionsConditionMatchedLogFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMatchedLogLabelExtractors(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyNotificationChannels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyAlertStrategy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["notification_rate_limit"] = - flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimit(original["notificationRateLimit"], d, config) - transformed["auto_close"] = - flattenMonitoringAlertPolicyAlertStrategyAutoClose(original["autoClose"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimit(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["period"] = - flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimitPeriod(original["period"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimitPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyAlertStrategyAutoClose(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyUserLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyDocumentation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["content"] = - flattenMonitoringAlertPolicyDocumentationContent(original["content"], d, config) - transformed["mime_type"] = - flattenMonitoringAlertPolicyDocumentationMimeType(original["mimeType"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringAlertPolicyDocumentationContent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyDocumentationMimeType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringAlertPolicyDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyCombiner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConditionAbsent, err := expandMonitoringAlertPolicyConditionsConditionAbsent(original["condition_absent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConditionAbsent); val.IsValid() && !isEmptyValue(val) { - transformed["conditionAbsent"] = transformedConditionAbsent - } - - transformedName, err := expandMonitoringAlertPolicyConditionsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedConditionMonitoringQueryLanguage, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(original["condition_monitoring_query_language"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConditionMonitoringQueryLanguage); val.IsValid() && !isEmptyValue(val) { - transformed["conditionMonitoringQueryLanguage"] = transformedConditionMonitoringQueryLanguage - } - - transformedConditionThreshold, err := expandMonitoringAlertPolicyConditionsConditionThreshold(original["condition_threshold"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConditionThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["conditionThreshold"] = transformedConditionThreshold - } - - transformedDisplayName, err := expandMonitoringAlertPolicyConditionsDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - transformedConditionMatchedLog, err := expandMonitoringAlertPolicyConditionsConditionMatchedLog(original["condition_matched_log"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConditionMatchedLog); val.IsValid() && !isEmptyValue(val) { - transformed["conditionMatchedLog"] = transformedConditionMatchedLog - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAggregations, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregations(original["aggregations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAggregations); val.IsValid() && !isEmptyValue(val) { - transformed["aggregations"] = transformedAggregations - } - - transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionAbsentTrigger(original["trigger"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { - transformed["trigger"] = transformedTrigger - } - - transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionAbsentDuration(original["duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { - transformed["duration"] = transformedDuration - } - - transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionAbsentFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { - transformed["perSeriesAligner"] = transformedPerSeriesAligner - } - - transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(original["group_by_fields"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { - transformed["groupByFields"] = transformedGroupByFields - } - - transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(original["alignment_period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["alignmentPeriod"] = transformedAlignmentPeriod - } - - transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { - transformed["crossSeriesReducer"] = transformedCrossSeriesReducer - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentTrigger(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedCount, err := expandMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedQuery, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(original["query"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedQuery); val.IsValid() && !isEmptyValue(val) { - transformed["query"] = transformedQuery - } - - transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(original["duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { - transformed["duration"] = transformedDuration - } - - transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(original["trigger"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { - transformed["trigger"] = transformedTrigger - } - - transformedEvaluationMissingData, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(original["evaluation_missing_data"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEvaluationMissingData); val.IsValid() && !isEmptyValue(val) { - transformed["evaluationMissingData"] = transformedEvaluationMissingData - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedCount, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedThresholdValue, err := expandMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(original["threshold_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedThresholdValue); val.IsValid() && !isEmptyValue(val) { - transformed["thresholdValue"] = transformedThresholdValue - } - - transformedDenominatorFilter, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(original["denominator_filter"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDenominatorFilter); val.IsValid() && !isEmptyValue(val) { - transformed["denominatorFilter"] = transformedDenominatorFilter - } - - transformedDenominatorAggregations, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(original["denominator_aggregations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDenominatorAggregations); val.IsValid() && !isEmptyValue(val) { - transformed["denominatorAggregations"] = transformedDenominatorAggregations - } - - transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionThresholdDuration(original["duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { - transformed["duration"] = transformedDuration - } - - transformedComparison, err := expandMonitoringAlertPolicyConditionsConditionThresholdComparison(original["comparison"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedComparison); val.IsValid() && !isEmptyValue(val) { - transformed["comparison"] = transformedComparison - } - - transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionThresholdTrigger(original["trigger"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { - transformed["trigger"] = transformedTrigger - } - - transformedAggregations, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAggregations); val.IsValid() && !isEmptyValue(val) { - transformed["aggregations"] = transformedAggregations - } - - transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - transformedEvaluationMissingData, err := expandMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(original["evaluation_missing_data"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEvaluationMissingData); val.IsValid() && !isEmptyValue(val) { - transformed["evaluationMissingData"] = transformedEvaluationMissingData - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { - transformed["perSeriesAligner"] = transformedPerSeriesAligner - } - - transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(original["group_by_fields"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { - transformed["groupByFields"] = transformedGroupByFields - } - - transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(original["alignment_period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["alignmentPeriod"] = transformedAlignmentPeriod - } - - transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { - transformed["crossSeriesReducer"] = transformedCrossSeriesReducer - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdComparison(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdTrigger(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedCount, err := expandMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { - transformed["perSeriesAligner"] = transformedPerSeriesAligner - } - - transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(original["group_by_fields"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { - transformed["groupByFields"] = transformedGroupByFields - } - - transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(original["alignment_period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["alignmentPeriod"] = transformedAlignmentPeriod - } - - transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { - transformed["crossSeriesReducer"] = transformedCrossSeriesReducer - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMatchedLog(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionMatchedLogFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - transformedLabelExtractors, err := expandMonitoringAlertPolicyConditionsConditionMatchedLogLabelExtractors(original["label_extractors"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabelExtractors); val.IsValid() && !isEmptyValue(val) { - transformed["labelExtractors"] = transformedLabelExtractors - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMatchedLogFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMatchedLogLabelExtractors(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringAlertPolicyNotificationChannels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyAlertStrategy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNotificationRateLimit, err := expandMonitoringAlertPolicyAlertStrategyNotificationRateLimit(original["notification_rate_limit"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNotificationRateLimit); val.IsValid() && !isEmptyValue(val) { - transformed["notificationRateLimit"] = transformedNotificationRateLimit - } - - transformedAutoClose, err := expandMonitoringAlertPolicyAlertStrategyAutoClose(original["auto_close"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAutoClose); val.IsValid() && !isEmptyValue(val) { - transformed["autoClose"] = transformedAutoClose - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyAlertStrategyNotificationRateLimit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPeriod, err := expandMonitoringAlertPolicyAlertStrategyNotificationRateLimitPeriod(original["period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["period"] = transformedPeriod - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyAlertStrategyNotificationRateLimitPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyAlertStrategyAutoClose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyUserLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringAlertPolicyDocumentation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContent, err := expandMonitoringAlertPolicyDocumentationContent(original["content"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !isEmptyValue(val) { - transformed["content"] = transformedContent - } - - transformedMimeType, err := expandMonitoringAlertPolicyDocumentationMimeType(original["mime_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMimeType); val.IsValid() && !isEmptyValue(val) { - transformed["mimeType"] = transformedMimeType - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyDocumentationContent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyDocumentationMimeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_custom_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_custom_service.go deleted file mode 100644 index 7e4c1d8ac3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_custom_service.go +++ /dev/null @@ -1,454 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceMonitoringService() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringServiceCreate, - Read: resourceMonitoringServiceRead, - Update: resourceMonitoringServiceUpdate, - Delete: resourceMonitoringServiceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringServiceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `Name used for UI elements listing this Service.`, - }, - "service_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z0-9\-]+$`), - Description: `An optional service ID to use. If not given, the server will generate a -service ID.`, - }, - "telemetry": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration for how to query telemetry on a Service.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "resource_name": { - Type: schema.TypeString, - Optional: true, - Description: `The full name of the resource that defines this service. -Formatted as described in -https://cloud.google.com/apis/design/resource_names.`, - }, - }, - }, - }, - "user_labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels which have been used to annotate the service. Label keys must start -with a letter. Label keys and values may contain lowercase letters, -numbers, underscores, and dashes. Label keys and values have a maximum -length of 63 characters, and must be less than 128 bytes in size. Up to 64 -label entries may be stored. For labels which do not have a semantic value, -the empty string may be supplied for the label value.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The full resource name for this service. The syntax is: -projects/[PROJECT_ID]/services/[SERVICE_ID].`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringServiceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - userLabelsProp, err := expandMonitoringServiceUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); ok || !reflect.DeepEqual(v, userLabelsProp) { - obj["userLabels"] = userLabelsProp - } - telemetryProp, err := expandMonitoringServiceTelemetry(d.Get("telemetry"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("telemetry"); !isEmptyValue(reflect.ValueOf(telemetryProp)) && (ok || !reflect.DeepEqual(v, telemetryProp)) { - obj["telemetry"] = telemetryProp - } - nameProp, err := expandMonitoringServiceServiceId(d.Get("service_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_id"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceMonitoringServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services?serviceId={{service_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Service: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error creating Service: %s", err) - } - if err := d.Set("name", flattenMonitoringServiceName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) - - return resourceMonitoringServiceRead(d, meta) -} - -func resourceMonitoringServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringService %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - if err := d.Set("name", flattenMonitoringServiceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("display_name", flattenMonitoringServiceDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("user_labels", flattenMonitoringServiceUserLabels(res["userLabels"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("telemetry", flattenMonitoringServiceTelemetry(res["telemetry"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("service_id", flattenMonitoringServiceServiceId(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Service: %s", err) - } - - return nil -} - -func resourceMonitoringServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringServiceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - userLabelsProp, err := expandMonitoringServiceUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); ok || !reflect.DeepEqual(v, userLabelsProp) { - obj["userLabels"] = userLabelsProp - } - telemetryProp, err := expandMonitoringServiceTelemetry(d.Get("telemetry"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("telemetry"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, telemetryProp)) { - obj["telemetry"] = telemetryProp - } - - obj, err = resourceMonitoringServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("user_labels") { - updateMask = append(updateMask, "userLabels") - } - - if d.HasChange("telemetry") { - updateMask = append(updateMask, "telemetry") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return fmt.Errorf("Error updating Service %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) - } - - return resourceMonitoringServiceRead(d, meta) -} - -func resourceMonitoringServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Service %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - - log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenMonitoringServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringServiceDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringServiceUserLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringServiceTelemetry(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resource_name"] = - flattenMonitoringServiceTelemetryResourceName(original["resourceName"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringServiceTelemetryResourceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringServiceServiceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandMonitoringServiceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringServiceUserLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringServiceTelemetry(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceName, err := expandMonitoringServiceTelemetryResourceName(original["resource_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResourceName); val.IsValid() && !isEmptyValue(val) { - transformed["resourceName"] = transformedResourceName - } - - return transformed, nil -} - -func expandMonitoringServiceTelemetryResourceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringServiceServiceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceMonitoringServiceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Currently only CUSTOM service types can be created, but the - // custom identifier block does not actually have fields right now. - // Set to empty to indicate manually-created service type is CUSTOM. - if _, ok := obj["custom"]; !ok { - obj["custom"] = map[string]interface{}{} - } - // Name/Service ID is a query parameter only - delete(obj, "name") - - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_dashboard.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_dashboard.go deleted file mode 100644 index 63f958e1d0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_dashboard.go +++ /dev/null @@ -1,214 +0,0 @@ -package google - -import ( - "fmt" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func monitoringDashboardDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - computedFields := []string{"etag", "name"} - - oldMap, err := structure.ExpandJsonFromString(old) - if err != nil { - return false - } - - newMap, err := structure.ExpandJsonFromString(new) - if err != nil { - return false - } - - for _, f := range computedFields { - delete(oldMap, f) - delete(newMap, f) - } - - return reflect.DeepEqual(oldMap, newMap) -} - -func ResourceMonitoringDashboard() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringDashboardCreate, - Read: resourceMonitoringDashboardRead, - Update: resourceMonitoringDashboardUpdate, - Delete: resourceMonitoringDashboardDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringDashboardImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(4 * time.Minute), - Update: schema.DefaultTimeout(4 * time.Minute), - Delete: schema.DefaultTimeout(4 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "dashboard_json": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: monitoringDashboardDiffSuppress, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - Description: `The JSON representation of a dashboard, following the format at https://cloud.google.com/monitoring/api/ref_v3/rest/v1/projects.dashboards.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringDashboardCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj, err := structure.ExpandJsonFromString(d.Get("dashboard_json").(string)) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v1/projects/{{project}}/dashboards") - if err != nil { - return err - } - res, err := SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error creating Dashboard: %s", err) - } - - name, ok := res["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - d.SetId(name.(string)) - - return resourceMonitoringDashboardRead(d, config) -} - -func resourceMonitoringDashboardRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url := config.MonitoringBasePath + "v1/" + d.Id() - - project, err := getProject(d, config) - if err != nil { - return err - } - - res, err := SendRequest(config, "GET", project, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringDashboard %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting Dashboard: %s", err) - } - - str, err := structure.FlattenJsonToString(res) - if err != nil { - return fmt.Errorf("Error reading Dashboard: %s", err) - } - if err = d.Set("dashboard_json", str); err != nil { - return fmt.Errorf("Error reading Dashboard: %s", err) - } - - return nil -} - -func resourceMonitoringDashboardUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - o, n := d.GetChange("dashboard_json") - oObj, err := structure.ExpandJsonFromString(o.(string)) - if err != nil { - return err - } - nObj, err := structure.ExpandJsonFromString(n.(string)) - if err != nil { - return err - } - - nObj["etag"] = oObj["etag"] - - project, err := getProject(d, config) - if err != nil { - return err - } - - url := config.MonitoringBasePath + "v1/" + d.Id() - _, err = SendRequestWithTimeout(config, "PATCH", project, url, userAgent, nObj, d.Timeout(schema.TimeoutUpdate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error updating Dashboard %q: %s", d.Id(), err) - } - - return resourceMonitoringDashboardRead(d, config) -} - -func resourceMonitoringDashboardDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url := config.MonitoringBasePath + "v1/" + d.Id() - - project, err := getProject(d, config) - if err != nil { - return err - } - - _, err = SendRequestWithTimeout(config, "DELETE", project, url, userAgent, nil, d.Timeout(schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringDashboard %q", d.Id())) - } - - return nil -} - -func resourceMonitoringDashboardImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - parts, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/dashboards/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("project", parts["project"]); err != nil { - return nil, fmt.Errorf("Error setting project: %s", err) - } - d.SetId(fmt.Sprintf("projects/%s/dashboards/%s", parts["project"], parts["id"])) - - return []*schema.ResourceData{d}, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_group.go deleted file mode 100644 index cf90152660..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_group.go +++ /dev/null @@ -1,399 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceMonitoringGroup() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringGroupCreate, - Read: resourceMonitoringGroupRead, - Update: resourceMonitoringGroupUpdate, - Delete: resourceMonitoringGroupDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringGroupImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `A user-assigned name for this group, used only for display -purposes.`, - }, - "filter": { - Type: schema.TypeString, - Required: true, - Description: `The filter used to determine which monitored resources -belong to this group.`, - }, - "is_cluster": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, the members of this group are considered to be a -cluster. The system can perform additional analysis on -groups that are clusters.`, - }, - "parent_name": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The name of the group's parent, if it has one. The format is -"projects/{project_id_or_number}/groups/{group_id}". For -groups with no parent, parentName is the empty string, "".`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `A unique identifier for this group. The format is -"projects/{project_id_or_number}/groups/{group_id}".`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentNameProp, err := expandMonitoringGroupParentName(d.Get("parent_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent_name"); !isEmptyValue(reflect.ValueOf(parentNameProp)) && (ok || !reflect.DeepEqual(v, parentNameProp)) { - obj["parentName"] = parentNameProp - } - isClusterProp, err := expandMonitoringGroupIsCluster(d.Get("is_cluster"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_cluster"); !isEmptyValue(reflect.ValueOf(isClusterProp)) && (ok || !reflect.DeepEqual(v, isClusterProp)) { - obj["isCluster"] = isClusterProp - } - displayNameProp, err := expandMonitoringGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - filterProp, err := expandMonitoringGroupFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/groups") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Group: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Group: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error creating Group: %s", err) - } - if err := d.Set("name", flattenMonitoringGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating Group %q: %#v", d.Id(), res) - - return resourceMonitoringGroupRead(d, meta) -} - -func resourceMonitoringGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Group: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - - if err := d.Set("parent_name", flattenMonitoringGroupParentName(res["parentName"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("name", flattenMonitoringGroupName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("is_cluster", flattenMonitoringGroupIsCluster(res["isCluster"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("display_name", flattenMonitoringGroupDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("filter", flattenMonitoringGroupFilter(res["filter"], d, config)); err != nil { - return fmt.Errorf("Error reading Group: %s", err) - } - - return nil -} - -func resourceMonitoringGroupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Group: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - parentNameProp, err := expandMonitoringGroupParentName(d.Get("parent_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parentNameProp)) { - obj["parentName"] = parentNameProp - } - isClusterProp, err := expandMonitoringGroupIsCluster(d.Get("is_cluster"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_cluster"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isClusterProp)) { - obj["isCluster"] = isClusterProp - } - displayNameProp, err := expandMonitoringGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - filterProp, err := expandMonitoringGroupFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Group %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return fmt.Errorf("Error updating Group %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Group %q: %#v", d.Id(), res) - } - - return resourceMonitoringGroupRead(d, meta) -} - -func resourceMonitoringGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Group: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Group %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "Group") - } - - log.Printf("[DEBUG] Finished deleting Group %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenMonitoringGroupParentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGroupName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGroupIsCluster(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGroupDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGroupFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringGroupParentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringGroupIsCluster(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringGroupDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringGroupFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_metric_descriptor.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_metric_descriptor.go deleted file mode 100644 index 52b37a4ce5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_metric_descriptor.go +++ /dev/null @@ -1,723 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceMonitoringMetricDescriptor() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringMetricDescriptorCreate, - Read: resourceMonitoringMetricDescriptorRead, - Update: resourceMonitoringMetricDescriptorUpdate, - Delete: resourceMonitoringMetricDescriptorDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringMetricDescriptorImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "description": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A detailed description of the metric, which can be used in documentation.`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example "Request count".`, - }, - "metric_kind": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"METRIC_KIND_UNSPECIFIED", "GAUGE", "DELTA", "CUMULATIVE"}), - Description: `Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metricKind and valueType might not be supported. Possible values: ["METRIC_KIND_UNSPECIFIED", "GAUGE", "DELTA", "CUMULATIVE"]`, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The metric type, including its DNS name prefix. The type is not URL-encoded. All service defined metrics must be prefixed with the service name, in the format of {service name}/{relative metric name}, such as cloudsql.googleapis.com/database/cpu/utilization. The relative metric name must have only upper and lower-case letters, digits, '/' and underscores '_' are allowed. Additionally, the maximum number of characters allowed for the relative_metric_name is 100. All user-defined metric types have the DNS name custom.googleapis.com, external.googleapis.com, or logging.googleapis.com/user/.`, - }, - "value_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION"}), - Description: `Whether the measurement is an integer, a floating-point number, etc. Some combinations of metricKind and valueType might not be supported. Possible values: ["BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION"]`, - }, - "labels": { - Type: schema.TypeSet, - Optional: true, - Description: `The set of labels that can be used to describe a specific instance of this metric type. In order to delete a label, the entire resource must be deleted, then created with the desired labels.`, - Elem: monitoringMetricDescriptorLabelsSchema(), - // Default schema.HashSchema is used. - }, - "launch_stage": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED", ""}), - Description: `The launch stage of the metric definition. Possible values: ["LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, - }, - "metadata": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Metadata which can be used to guide usage of the metric.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ingest_delay": { - Type: schema.TypeString, - Optional: true, - Description: `The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors. In '[duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration)'.`, - AtLeastOneOf: []string{"metadata.0.sample_period", "metadata.0.ingest_delay"}, - }, - "sample_period": { - Type: schema.TypeString, - Optional: true, - Description: `The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period. In '[duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration)'.`, - AtLeastOneOf: []string{"metadata.0.sample_period", "metadata.0.ingest_delay"}, - }, - }, - }, - }, - "unit": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The units in which the metric value is reported. It is only applicable if the -valueType is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of -the stored metric values. - -Different systems may scale the values to be more easily displayed (so a value of -0.02KBy might be displayed as 20By, and a value of 3523KBy might be displayed as -3.5MBy). However, if the unit is KBy, then the value of the metric is always in -thousands of bytes, no matter how it may be displayed. - -If you want a custom metric to record the exact number of CPU-seconds used by a job, -you can create an INT64 CUMULATIVE metric whose unit is s{CPU} (or equivalently -1s{CPU} or just s). If the job uses 12,005 CPU-seconds, then the value is written as -12005. - -Alternatively, if you want a custom metric to record data in a more granular way, you -can create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write the value -12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024). -The supported units are a subset of The Unified Code for Units of Measure standard. -More info can be found in the API documentation -(https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors).`, - }, - "monitored_resource_types": { - Type: schema.TypeList, - Computed: true, - Description: `If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here. This field allows time series to be associated with the intersection of this metric type and the monitored resource types in this list.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the metric descriptor.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func monitoringMetricDescriptorLabelsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `The key for this label. The key must not exceed 100 characters. The first character of the key must be an upper- or lower-case letter, the remaining characters must be letters, digits or underscores, and the key must match the regular expression [a-zA-Z][a-zA-Z0-9_]*`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description for the label.`, - }, - "value_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"STRING", "BOOL", "INT64", ""}), - Description: `The type of data that can be assigned to the label. Default value: "STRING" Possible values: ["STRING", "BOOL", "INT64"]`, - Default: "STRING", - }, - }, - } -} - -func resourceMonitoringMetricDescriptorCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - typeProp, err := expandMonitoringMetricDescriptorType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - labelsProp, err := expandMonitoringMetricDescriptorLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - metricKindProp, err := expandMonitoringMetricDescriptorMetricKind(d.Get("metric_kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metric_kind"); !isEmptyValue(reflect.ValueOf(metricKindProp)) && (ok || !reflect.DeepEqual(v, metricKindProp)) { - obj["metricKind"] = metricKindProp - } - valueTypeProp, err := expandMonitoringMetricDescriptorValueType(d.Get("value_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_type"); !isEmptyValue(reflect.ValueOf(valueTypeProp)) && (ok || !reflect.DeepEqual(v, valueTypeProp)) { - obj["valueType"] = valueTypeProp - } - unitProp, err := expandMonitoringMetricDescriptorUnit(d.Get("unit"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unit"); !isEmptyValue(reflect.ValueOf(unitProp)) && (ok || !reflect.DeepEqual(v, unitProp)) { - obj["unit"] = unitProp - } - descriptionProp, err := expandMonitoringMetricDescriptorDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandMonitoringMetricDescriptorDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - metadataProp, err := expandMonitoringMetricDescriptorMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - launchStageProp, err := expandMonitoringMetricDescriptorLaunchStage(d.Get("launch_stage"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(reflect.ValueOf(launchStageProp)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { - obj["launchStage"] = launchStageProp - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/metricDescriptors") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new MetricDescriptor: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error creating MetricDescriptor: %s", err) - } - if err := d.Set("name", flattenMonitoringMetricDescriptorName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), PollCheckForExistence, "Creating MetricDescriptor", d.Timeout(schema.TimeoutCreate), 20) - if err != nil { - return fmt.Errorf("Error waiting to create MetricDescriptor: %s", err) - } - - log.Printf("[DEBUG] Finished creating MetricDescriptor %q: %#v", d.Id(), res) - - return resourceMonitoringMetricDescriptorRead(d, meta) -} - -func resourceMonitoringMetricDescriptorPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceMonitoringMetricDescriptorRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringMetricDescriptor %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - - if err := d.Set("name", flattenMonitoringMetricDescriptorName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("type", flattenMonitoringMetricDescriptorType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("labels", flattenMonitoringMetricDescriptorLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("metric_kind", flattenMonitoringMetricDescriptorMetricKind(res["metricKind"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("value_type", flattenMonitoringMetricDescriptorValueType(res["valueType"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("unit", flattenMonitoringMetricDescriptorUnit(res["unit"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("description", flattenMonitoringMetricDescriptorDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("display_name", flattenMonitoringMetricDescriptorDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("monitored_resource_types", flattenMonitoringMetricDescriptorMonitoredResourceTypes(res["monitoredResourceTypes"], d, config)); err != nil { - return fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - - return nil -} - -func resourceMonitoringMetricDescriptorUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - typeProp, err := expandMonitoringMetricDescriptorType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - labelsProp, err := expandMonitoringMetricDescriptorLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - metricKindProp, err := expandMonitoringMetricDescriptorMetricKind(d.Get("metric_kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metric_kind"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metricKindProp)) { - obj["metricKind"] = metricKindProp - } - valueTypeProp, err := expandMonitoringMetricDescriptorValueType(d.Get("value_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, valueTypeProp)) { - obj["valueType"] = valueTypeProp - } - unitProp, err := expandMonitoringMetricDescriptorUnit(d.Get("unit"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unit"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unitProp)) { - obj["unit"] = unitProp - } - descriptionProp, err := expandMonitoringMetricDescriptorDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandMonitoringMetricDescriptorDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - metadataProp, err := expandMonitoringMetricDescriptorMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - launchStageProp, err := expandMonitoringMetricDescriptorLaunchStage(d.Get("launch_stage"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { - obj["launchStage"] = launchStageProp - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/metricDescriptors") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating MetricDescriptor %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return fmt.Errorf("Error updating MetricDescriptor %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating MetricDescriptor %q: %#v", d.Id(), res) - } - - err = PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), PollCheckForExistence, "Updating MetricDescriptor", d.Timeout(schema.TimeoutUpdate), 20) - if err != nil { - return err - } - - return resourceMonitoringMetricDescriptorRead(d, meta) -} - -func resourceMonitoringMetricDescriptorDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting MetricDescriptor %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "MetricDescriptor") - } - - err = PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), PollCheckForAbsence, "Deleting MetricDescriptor", d.Timeout(schema.TimeoutCreate), 20) - if err != nil { - return fmt.Errorf("Error waiting to delete MetricDescriptor: %s", err) - } - - log.Printf("[DEBUG] Finished deleting MetricDescriptor %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringMetricDescriptorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenMonitoringMetricDescriptorName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(schema.HashResource(monitoringMetricDescriptorLabelsSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "key": flattenMonitoringMetricDescriptorLabelsKey(original["key"], d, config), - "value_type": flattenMonitoringMetricDescriptorLabelsValueType(original["valueType"], d, config), - "description": flattenMonitoringMetricDescriptorLabelsDescription(original["description"], d, config), - }) - } - return transformed -} -func flattenMonitoringMetricDescriptorLabelsKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorLabelsValueType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { - return "STRING" - } - - return v -} - -func flattenMonitoringMetricDescriptorLabelsDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorMetricKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorValueType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorUnit(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorMonitoredResourceTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringMetricDescriptorType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandMonitoringMetricDescriptorLabelsKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedValueType, err := expandMonitoringMetricDescriptorLabelsValueType(original["value_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValueType); val.IsValid() && !isEmptyValue(val) { - transformed["valueType"] = transformedValueType - } - - transformedDescription, err := expandMonitoringMetricDescriptorLabelsDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringMetricDescriptorLabelsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorLabelsValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorLabelsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorMetricKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorUnit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSamplePeriod, err := expandMonitoringMetricDescriptorMetadataSamplePeriod(original["sample_period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSamplePeriod); val.IsValid() && !isEmptyValue(val) { - transformed["samplePeriod"] = transformedSamplePeriod - } - - transformedIngestDelay, err := expandMonitoringMetricDescriptorMetadataIngestDelay(original["ingest_delay"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIngestDelay); val.IsValid() && !isEmptyValue(val) { - transformed["ingestDelay"] = transformedIngestDelay - } - - return transformed, nil -} - -func expandMonitoringMetricDescriptorMetadataSamplePeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorMetadataIngestDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorLaunchStage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_monitored_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_monitored_project.go deleted file mode 100644 index b8cc97c245..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_monitored_project.go +++ /dev/null @@ -1,211 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - monitoring "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring" -) - -func ResourceMonitoringMonitoredProject() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringMonitoredProjectCreate, - Read: resourceMonitoringMonitoredProjectRead, - Delete: resourceMonitoringMonitoredProjectDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringMonitoredProjectImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "metrics_scope": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. The resource name of the existing Metrics Scope that will monitor this project. Example: locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}", - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Immutable. The resource name of the `MonitoredProject`. On input, the resource name includes the scoping project ID and monitored project ID. On output, it contains the equivalent project numbers. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}`", - }, - - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. The time when this `MonitoredProject` was created.", - }, - }, - } -} - -func resourceMonitoringMonitoredProjectCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &monitoring.MonitoredProject{ - MetricsScope: dcl.String(d.Get("metrics_scope").(string)), - Name: dcl.String(d.Get("name").(string)), - } - - id, err := obj.ID() - if err != nil { - return fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLMonitoringClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - client.Config.BasePath += "v1" - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyMonitoredProject(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error creating MonitoredProject: %s", err) - } - - log.Printf("[DEBUG] Finished creating MonitoredProject %q: %#v", d.Id(), res) - - return resourceMonitoringMonitoredProjectRead(d, meta) -} - -func resourceMonitoringMonitoredProjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &monitoring.MonitoredProject{ - MetricsScope: dcl.String(d.Get("metrics_scope").(string)), - Name: dcl.String(d.Get("name").(string)), - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLMonitoringClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - client.Config.BasePath += "v1" - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetMonitoredProject(context.Background(), obj) - if err != nil { - resourceName := fmt.Sprintf("MonitoringMonitoredProject %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("metrics_scope", res.MetricsScope); err != nil { - return fmt.Errorf("error setting metrics_scope in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("create_time", res.CreateTime); err != nil { - return fmt.Errorf("error setting create_time in state: %s", err) - } - - return nil -} - -func resourceMonitoringMonitoredProjectDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &monitoring.MonitoredProject{ - MetricsScope: dcl.String(d.Get("metrics_scope").(string)), - Name: dcl.String(d.Get("name").(string)), - } - - log.Printf("[DEBUG] Deleting MonitoredProject %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLMonitoringClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - client.Config.BasePath += "v1" - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - if err := client.DeleteMonitoredProject(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting MonitoredProject: %s", err) - } - - log.Printf("[DEBUG] Finished deleting MonitoredProject %q", d.Id()) - return nil -} - -func resourceMonitoringMonitoredProjectImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "locations/global/metricsScopes/(?P[^/]+)/projects/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_notification_channel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_notification_channel.go deleted file mode 100644 index 91434735fa..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_notification_channel.go +++ /dev/null @@ -1,612 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var sensitiveLabels = []string{"auth_token", "service_key", "password"} - -func sensitiveLabelCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - for _, sl := range sensitiveLabels { - mapLabel := diff.Get("labels." + sl).(string) - authLabel := diff.Get("sensitive_labels.0." + sl).(string) - if mapLabel != "" && authLabel != "" { - return fmt.Errorf("Sensitive label [%s] cannot be set in both `labels` and the `sensitive_labels` block.", sl) - } - } - return nil -} - -func ResourceMonitoringNotificationChannel() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringNotificationChannelCreate, - Read: resourceMonitoringNotificationChannelRead, - Update: resourceMonitoringNotificationChannelUpdate, - Delete: resourceMonitoringNotificationChannelDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringNotificationChannelImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: sensitiveLabelCustomizeDiff, - - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - Description: `The type of the notification channel. This field matches the value of the NotificationChannelDescriptor.type field. See https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannelDescriptors/list to get the list of valid values such as "email", "slack", etc...`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `An optional human-readable description of this notification channel. This description may provide additional details, beyond the display name, for the channel. This may not exceed 1024 Unicode characters.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `An optional human-readable name for this notification channel. It is recommended that you specify a non-empty and unique name in order to make it easier to identify the channels in your project, though this is not enforced. The display name is limited to 512 Unicode characters.`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether notifications are forwarded to the described channel. This makes it possible to disable delivery of notifications to a particular channel without removing the channel from all alerting policies that reference the channel. This is a more convenient approach when the change is temporary and you want to receive notifications from the same set of alerting policies on the channel at some point in the future.`, - Default: true, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Configuration fields that define the channel and its behavior. The -permissible and required labels are specified in the -NotificationChannelDescriptor corresponding to the type field. - -Labels with sensitive data are obfuscated by the API and therefore Terraform cannot -determine if there are upstream changes to these fields. They can also be configured via -the sensitive_labels block, but cannot be configured in both places.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "sensitive_labels": { - Type: schema.TypeList, - Optional: true, - Description: `Different notification type behaviors are configured primarily using the the 'labels' field on this -resource. This block contains the labels which contain secrets or passwords so that they can be marked -sensitive and hidden from plan output. The name of the field, eg: password, will be the key -in the 'labels' map in the api request. - -Credentials may not be specified in both locations and will cause an error. Changing from one location -to a different credential configuration in the config will require an apply to update state.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "auth_token": { - Type: schema.TypeString, - Optional: true, - Description: `An authorization token for a notification channel. Channel types that support this field include: slack`, - Sensitive: true, - ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, - }, - "password": { - Type: schema.TypeString, - Optional: true, - Description: `An password for a notification channel. Channel types that support this field include: webhook_basicauth`, - Sensitive: true, - ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, - }, - "service_key": { - Type: schema.TypeString, - Optional: true, - Description: `An servicekey token for a notification channel. Channel types that support this field include: pagerduty`, - Sensitive: true, - ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, - }, - }, - }, - }, - "user_labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-supplied key/value data that does not need to conform to the corresponding NotificationChannelDescriptor's schema, unlike the labels field. This field is intended to be used for organizing and identifying the NotificationChannel objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The full REST resource name for this channel. The syntax is: -projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] -The [CHANNEL_ID] is automatically assigned by the server on creation.`, - }, - "verification_status": { - Type: schema.TypeString, - Computed: true, - Description: `Indicates whether this channel has been verified or not. On a ListNotificationChannels or GetNotificationChannel operation, this field is expected to be populated.If the value is UNVERIFIED, then it indicates that the channel is non-functioning (it both requires verification and lacks verification); otherwise, it is assumed that the channel works.If the channel is neither VERIFIED nor UNVERIFIED, it implies that the channel is of a type that does not require verification or that this specific channel has been exempted from verification because it was created prior to verification being required for channels of this type.This field cannot be modified using a standard UpdateNotificationChannel operation. To change the value of this field, you must call VerifyNotificationChannel.`, - }, - "force_delete": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `If true, the notification channel will be deleted regardless -of its use in alert policies (the policies will be updated -to remove the channel). If false, channels that are still -referenced by an existing alerting policy will fail to be -deleted in a delete operation.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringNotificationChannelCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - typeProp, err := expandMonitoringNotificationChannelType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - userLabelsProp, err := expandMonitoringNotificationChannelUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(reflect.ValueOf(userLabelsProp)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) { - obj["userLabels"] = userLabelsProp - } - descriptionProp, err := expandMonitoringNotificationChannelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandMonitoringNotificationChannelDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandMonitoringNotificationChannelEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { - obj["enabled"] = enabledProp - } - - obj, err = resourceMonitoringNotificationChannelEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/notificationChannels") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new NotificationChannel: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NotificationChannel: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error creating NotificationChannel: %s", err) - } - if err := d.Set("name", flattenMonitoringNotificationChannelName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating NotificationChannel %q: %#v", d.Id(), res) - - return resourceMonitoringNotificationChannelRead(d, meta) -} - -func resourceMonitoringNotificationChannelRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NotificationChannel: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringNotificationChannel %q", d.Id())) - } - - res, err = resourceMonitoringNotificationChannelDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing MonitoringNotificationChannel because it no longer exists.") - d.SetId("") - return nil - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("force_delete"); !ok { - if err := d.Set("force_delete", false); err != nil { - return fmt.Errorf("Error setting force_delete: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - - if err := d.Set("labels", flattenMonitoringNotificationChannelLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("name", flattenMonitoringNotificationChannelName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("verification_status", flattenMonitoringNotificationChannelVerificationStatus(res["verificationStatus"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("type", flattenMonitoringNotificationChannelType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("user_labels", flattenMonitoringNotificationChannelUserLabels(res["userLabels"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("description", flattenMonitoringNotificationChannelDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("display_name", flattenMonitoringNotificationChannelDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("enabled", flattenMonitoringNotificationChannelEnabled(res["enabled"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationChannel: %s", err) - } - - return nil -} - -func resourceMonitoringNotificationChannelUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NotificationChannel: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - typeProp, err := expandMonitoringNotificationChannelType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - userLabelsProp, err := expandMonitoringNotificationChannelUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) { - obj["userLabels"] = userLabelsProp - } - descriptionProp, err := expandMonitoringNotificationChannelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandMonitoringNotificationChannelDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandMonitoringNotificationChannelEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { - obj["enabled"] = enabledProp - } - - obj, err = resourceMonitoringNotificationChannelEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating NotificationChannel %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return fmt.Errorf("Error updating NotificationChannel %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating NotificationChannel %q: %#v", d.Id(), res) - } - - return resourceMonitoringNotificationChannelRead(d, meta) -} - -func resourceMonitoringNotificationChannelDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for NotificationChannel: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}?force={{force_delete}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting NotificationChannel %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "NotificationChannel") - } - - log.Printf("[DEBUG] Finished deleting NotificationChannel %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringNotificationChannelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenMonitoringNotificationChannelLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelVerificationStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelUserLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringNotificationChannelLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringNotificationChannelType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringNotificationChannelUserLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringNotificationChannelDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringNotificationChannelDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringNotificationChannelEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceMonitoringNotificationChannelEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - labelmap, ok := obj["labels"] - if !ok { - labelmap = make(map[string]string) - } - - var labels map[string]string - labels = labelmap.(map[string]string) - - for _, sl := range sensitiveLabels { - if auth, _ := d.GetOkExists("sensitive_labels.0." + sl); auth != "" { - labels[sl] = auth.(string) - } - } - - obj["labels"] = labels - - return obj, nil -} - -func resourceMonitoringNotificationChannelDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if labelmap, ok := res["labels"]; ok { - labels := labelmap.(map[string]interface{}) - for _, sl := range sensitiveLabels { - if _, apiOk := labels[sl]; apiOk { - if _, exists := d.GetOkExists("sensitive_labels.0." + sl); exists { - delete(labels, sl) - } else { - labels[sl] = d.Get("labels." + sl) - } - } - } - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_service.go deleted file mode 100644 index f21fd59fa6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_service.go +++ /dev/null @@ -1,477 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceMonitoringGenericService() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringGenericServiceCreate, - Read: resourceMonitoringGenericServiceRead, - Update: resourceMonitoringGenericServiceUpdate, - Delete: resourceMonitoringGenericServiceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringGenericServiceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "service_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `An optional service ID to use. If not given, the server will generate a -service ID.`, - }, - "basic_service": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A well-known service type, defined by its service type and service labels. -Valid values of service types and services labels are described at -https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "service_labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels that specify the resource that emits the monitoring data -which is used for SLO reporting of this 'Service'.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "service_type": { - Type: schema.TypeString, - Optional: true, - Description: `The type of service that this basic service defines, e.g. -APP_ENGINE service type`, - }, - }, - }, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `Name used for UI elements listing this Service.`, - }, - "user_labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels which have been used to annotate the service. Label keys must start -with a letter. Label keys and values may contain lowercase letters, -numbers, underscores, and dashes. Label keys and values have a maximum -length of 63 characters, and must be less than 128 bytes in size. Up to 64 -label entries may be stored. For labels which do not have a semantic value, -the empty string may be supplied for the label value.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The full resource name for this service. The syntax is: -projects/[PROJECT_ID]/services/[SERVICE_ID].`, - }, - "telemetry": { - Type: schema.TypeList, - Computed: true, - Description: `Configuration for how to query telemetry on a Service.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "resource_name": { - Type: schema.TypeString, - Optional: true, - Description: `The full name of the resource that defines this service. -Formatted as described in -https://cloud.google.com/apis/design/resource_names.`, - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringGenericServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringGenericServiceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - userLabelsProp, err := expandMonitoringGenericServiceUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); ok || !reflect.DeepEqual(v, userLabelsProp) { - obj["userLabels"] = userLabelsProp - } - basicServiceProp, err := expandMonitoringGenericServiceBasicService(d.Get("basic_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic_service"); !isEmptyValue(reflect.ValueOf(basicServiceProp)) && (ok || !reflect.DeepEqual(v, basicServiceProp)) { - obj["basicService"] = basicServiceProp - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services?serviceId={{service_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new GenericService: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GenericService: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error creating GenericService: %s", err) - } - if err := d.Set("name", flattenMonitoringGenericServiceName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/services/{{service_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating GenericService %q: %#v", d.Id(), res) - - return resourceMonitoringGenericServiceRead(d, meta) -} - -func resourceMonitoringGenericServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GenericService: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringGenericService %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading GenericService: %s", err) - } - - if err := d.Set("name", flattenMonitoringGenericServiceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GenericService: %s", err) - } - if err := d.Set("display_name", flattenMonitoringGenericServiceDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading GenericService: %s", err) - } - if err := d.Set("user_labels", flattenMonitoringGenericServiceUserLabels(res["userLabels"], d, config)); err != nil { - return fmt.Errorf("Error reading GenericService: %s", err) - } - if err := d.Set("telemetry", flattenMonitoringGenericServiceTelemetry(res["telemetry"], d, config)); err != nil { - return fmt.Errorf("Error reading GenericService: %s", err) - } - if err := d.Set("basic_service", flattenMonitoringGenericServiceBasicService(res["basicService"], d, config)); err != nil { - return fmt.Errorf("Error reading GenericService: %s", err) - } - - return nil -} - -func resourceMonitoringGenericServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GenericService: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringGenericServiceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - userLabelsProp, err := expandMonitoringGenericServiceUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); ok || !reflect.DeepEqual(v, userLabelsProp) { - obj["userLabels"] = userLabelsProp - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating GenericService %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("user_labels") { - updateMask = append(updateMask, "userLabels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return fmt.Errorf("Error updating GenericService %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating GenericService %q: %#v", d.Id(), res) - } - - return resourceMonitoringGenericServiceRead(d, meta) -} - -func resourceMonitoringGenericServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GenericService: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GenericService %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "GenericService") - } - - log.Printf("[DEBUG] Finished deleting GenericService %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringGenericServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/services/{{service_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenMonitoringGenericServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGenericServiceDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGenericServiceUserLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGenericServiceTelemetry(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resource_name"] = - flattenMonitoringGenericServiceTelemetryResourceName(original["resourceName"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringGenericServiceTelemetryResourceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGenericServiceBasicService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service_type"] = - flattenMonitoringGenericServiceBasicServiceServiceType(original["serviceType"], d, config) - transformed["service_labels"] = - flattenMonitoringGenericServiceBasicServiceServiceLabels(original["serviceLabels"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringGenericServiceBasicServiceServiceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGenericServiceBasicServiceServiceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringGenericServiceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringGenericServiceUserLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringGenericServiceBasicService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceType, err := expandMonitoringGenericServiceBasicServiceServiceType(original["service_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceType); val.IsValid() && !isEmptyValue(val) { - transformed["serviceType"] = transformedServiceType - } - - transformedServiceLabels, err := expandMonitoringGenericServiceBasicServiceServiceLabels(original["service_labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceLabels); val.IsValid() && !isEmptyValue(val) { - transformed["serviceLabels"] = transformedServiceLabels - } - - return transformed, nil -} - -func expandMonitoringGenericServiceBasicServiceServiceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringGenericServiceBasicServiceServiceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_uptime_check_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_uptime_check_config.go deleted file mode 100644 index b327783781..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_uptime_check_config.go +++ /dev/null @@ -1,1403 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func resourceMonitoringUptimeCheckConfigHttpCheckPathDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return old == "/"+new -} - -func ResourceMonitoringUptimeCheckConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceMonitoringUptimeCheckConfigCreate, - Read: resourceMonitoringUptimeCheckConfigRead, - Update: resourceMonitoringUptimeCheckConfigUpdate, - Delete: resourceMonitoringUptimeCheckConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceMonitoringUptimeCheckConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `A human-friendly name for the uptime check configuration. The display name should be unique within a Stackdriver Workspace in order to make it easier to identify; however, uniqueness is not enforced.`, - }, - "timeout": { - Type: schema.TypeString, - Required: true, - Description: `The maximum amount of time to wait for the request to complete (must be between 1 and 60 seconds). Accepted formats https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration`, - }, - "checker_type": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"STATIC_IP_CHECKERS", "VPC_CHECKERS", ""}), - Description: `The checker type to use for the check. If the monitored resource type is servicedirectory_service, checkerType must be set to VPC_CHECKERS. Possible values: ["STATIC_IP_CHECKERS", "VPC_CHECKERS"]`, - }, - "content_matchers": { - Type: schema.TypeList, - Optional: true, - Description: `The expected content on the page the check is run against. Currently, only the first entry in the list is supported, and other entries will be ignored. The server will look for an exact match of the string in the page response's content. This field is optional and should only be specified if a content match is required.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "content": { - Type: schema.TypeString, - Required: true, - Description: `String or regex content to match (max 1024 bytes)`, - }, - "json_path_matcher": { - Type: schema.TypeList, - Optional: true, - Description: `Information needed to perform a JSONPath content match. Used for 'ContentMatcherOption::MATCHES_JSON_PATH' and 'ContentMatcherOption::NOT_MATCHES_JSON_PATH'.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "json_path": { - Type: schema.TypeString, - Required: true, - Description: `JSONPath within the response output pointing to the expected 'ContentMatcher::content' to match against.`, - }, - "json_matcher": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"EXACT_MATCH", "REGEX_MATCH", ""}), - Description: `Options to perform JSONPath content matching. Default value: "EXACT_MATCH" Possible values: ["EXACT_MATCH", "REGEX_MATCH"]`, - Default: "EXACT_MATCH", - }, - }, - }, - }, - "matcher": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"CONTAINS_STRING", "NOT_CONTAINS_STRING", "MATCHES_REGEX", "NOT_MATCHES_REGEX", "MATCHES_JSON_PATH", "NOT_MATCHES_JSON_PATH", ""}), - Description: `The type of content matcher that will be applied to the server output, compared to the content string when the check is run. Default value: "CONTAINS_STRING" Possible values: ["CONTAINS_STRING", "NOT_CONTAINS_STRING", "MATCHES_REGEX", "NOT_MATCHES_REGEX", "MATCHES_JSON_PATH", "NOT_MATCHES_JSON_PATH"]`, - Default: "CONTAINS_STRING", - }, - }, - }, - }, - "http_check": { - Type: schema.TypeList, - Optional: true, - Description: `Contains information needed to make an HTTP or HTTPS check.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "accepted_response_status_codes": { - Type: schema.TypeList, - Optional: true, - Description: `If present, the check will only pass if the HTTP response status code is in this set of status codes. If empty, the HTTP status code will only pass if the HTTP status code is 200-299.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status_class": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"STATUS_CLASS_1XX", "STATUS_CLASS_2XX", "STATUS_CLASS_3XX", "STATUS_CLASS_4XX", "STATUS_CLASS_5XX", "STATUS_CLASS_ANY", ""}), - Description: `A class of status codes to accept. Possible values: ["STATUS_CLASS_1XX", "STATUS_CLASS_2XX", "STATUS_CLASS_3XX", "STATUS_CLASS_4XX", "STATUS_CLASS_5XX", "STATUS_CLASS_ANY"]`, - }, - "status_value": { - Type: schema.TypeInt, - Optional: true, - Description: `A status code to accept.`, - }, - }, - }, - }, - "auth_info": { - Type: schema.TypeList, - Optional: true, - Description: `The authentication information. Optional when creating an HTTP check; defaults to empty.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "password": { - Type: schema.TypeString, - Required: true, - Description: `The password to authenticate.`, - Sensitive: true, - }, - "username": { - Type: schema.TypeString, - Required: true, - Description: `The username to authenticate.`, - }, - }, - }, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "body": { - Type: schema.TypeString, - Optional: true, - Description: `The request body associated with the HTTP POST request. If contentType is URL_ENCODED, the body passed in must be URL-encoded. Users can provide a Content-Length header via the headers field or the API will do so. If the requestMethod is GET and body is not empty, the API will return an error. The maximum byte size is 1 megabyte. Note - As with all bytes fields JSON representations are base64 encoded. e.g. "foo=bar" in URL-encoded form is "foo%3Dbar" and in base64 encoding is "Zm9vJTI1M0RiYXI=".`, - }, - "content_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"TYPE_UNSPECIFIED", "URL_ENCODED", ""}), - Description: `The content type to use for the check. Possible values: ["TYPE_UNSPECIFIED", "URL_ENCODED"]`, - }, - "headers": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - Description: `The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100.`, - Elem: &schema.Schema{Type: schema.TypeString}, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "mask_headers": { - Type: schema.TypeBool, - Optional: true, - Description: `Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if mask_headers is set to True then the headers will be obscured with ******.`, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "path": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: resourceMonitoringUptimeCheckConfigHttpCheckPathDiffSuppress, - Description: `The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. If the provided path does not begin with "/", a "/" will be prepended automatically. Optional (defaults to "/").`, - Default: "/", - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL).`, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "request_method": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"METHOD_UNSPECIFIED", "GET", "POST", ""}), - Description: `The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then requestMethod defaults to GET. Default value: "GET" Possible values: ["METHOD_UNSPECIFIED", "GET", "POST"]`, - Default: "GET", - }, - "use_ssl": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, use HTTPS instead of HTTP to run the check.`, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "validate_ssl": { - Type: schema.TypeBool, - Optional: true, - Description: `Boolean specifying whether to include SSL certificate validation as a part of the Uptime check. Only applies to checks where monitoredResource is set to uptime_url. If useSsl is false, setting validateSsl to true has no effect.`, - }, - }, - }, - ExactlyOneOf: []string{"http_check", "tcp_check"}, - }, - "monitored_resource": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The monitored resource (https://cloud.google.com/monitoring/api/resources) associated with the configuration. The following monitored resource types are supported for uptime checks: uptime_url gce_instance gae_app aws_ec2_instance aws_elb_load_balancer k8s_service servicedirectory_service`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "labels": { - Type: schema.TypeMap, - Required: true, - ForceNew: true, - Description: `Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels "project_id", "instance_id", and "zone".`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors#MonitoredResourceDescriptor) object. For example, the type of a Compute Engine VM instance is gce_instance. For a list of types, see Monitoring resource types (https://cloud.google.com/monitoring/api/resources) and Logging resource types (https://cloud.google.com/logging/docs/api/v2/resource-list).`, - }, - }, - }, - ExactlyOneOf: []string{"monitored_resource", "resource_group"}, - }, - "period": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `How often, in seconds, the uptime check is performed. Currently, the only supported values are 60s (1 minute), 300s (5 minutes), 600s (10 minutes), and 900s (15 minutes). Optional, defaults to 300s.`, - Default: "300s", - }, - "resource_group": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The group resource associated with the configuration.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "group_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The group of resources being monitored. Should be the 'name' of a group`, - AtLeastOneOf: []string{"resource_group.0.resource_type", "resource_group.0.group_id"}, - }, - "resource_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"RESOURCE_TYPE_UNSPECIFIED", "INSTANCE", "AWS_ELB_LOAD_BALANCER", ""}), - Description: `The resource type of the group members. Possible values: ["RESOURCE_TYPE_UNSPECIFIED", "INSTANCE", "AWS_ELB_LOAD_BALANCER"]`, - AtLeastOneOf: []string{"resource_group.0.resource_type", "resource_group.0.group_id"}, - }, - }, - }, - ExactlyOneOf: []string{"monitored_resource", "resource_group"}, - }, - "selected_regions": { - Type: schema.TypeList, - Optional: true, - Description: `The list of regions from which the check will be run. Some regions contain one location, and others contain more than one. If this field is specified, enough regions to include a minimum of 3 locations must be provided, or an error message is returned. Not specifying this field will result in uptime checks running from all regions.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "tcp_check": { - Type: schema.TypeList, - Optional: true, - Description: `Contains information needed to make a TCP check.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "port": { - Type: schema.TypeInt, - Required: true, - Description: `The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) to construct the full URL.`, - }, - }, - }, - ExactlyOneOf: []string{"http_check", "tcp_check"}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `A unique resource name for this UptimeCheckConfig. The format is projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].`, - }, - "uptime_check_id": { - Type: schema.TypeString, - Computed: true, - Description: `The id of the uptime check`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringUptimeCheckConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringUptimeCheckConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - periodProp, err := expandMonitoringUptimeCheckConfigPeriod(d.Get("period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("period"); !isEmptyValue(reflect.ValueOf(periodProp)) && (ok || !reflect.DeepEqual(v, periodProp)) { - obj["period"] = periodProp - } - timeoutProp, err := expandMonitoringUptimeCheckConfigTimeout(d.Get("timeout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(reflect.ValueOf(timeoutProp)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { - obj["timeout"] = timeoutProp - } - contentMatchersProp, err := expandMonitoringUptimeCheckConfigContentMatchers(d.Get("content_matchers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_matchers"); !isEmptyValue(reflect.ValueOf(contentMatchersProp)) && (ok || !reflect.DeepEqual(v, contentMatchersProp)) { - obj["contentMatchers"] = contentMatchersProp - } - selectedRegionsProp, err := expandMonitoringUptimeCheckConfigSelectedRegions(d.Get("selected_regions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selected_regions"); !isEmptyValue(reflect.ValueOf(selectedRegionsProp)) && (ok || !reflect.DeepEqual(v, selectedRegionsProp)) { - obj["selectedRegions"] = selectedRegionsProp - } - checkerTypeProp, err := expandMonitoringUptimeCheckConfigCheckerType(d.Get("checker_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("checker_type"); !isEmptyValue(reflect.ValueOf(checkerTypeProp)) && (ok || !reflect.DeepEqual(v, checkerTypeProp)) { - obj["checkerType"] = checkerTypeProp - } - httpCheckProp, err := expandMonitoringUptimeCheckConfigHttpCheck(d.Get("http_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_check"); !isEmptyValue(reflect.ValueOf(httpCheckProp)) && (ok || !reflect.DeepEqual(v, httpCheckProp)) { - obj["httpCheck"] = httpCheckProp - } - tcpCheckProp, err := expandMonitoringUptimeCheckConfigTcpCheck(d.Get("tcp_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_check"); !isEmptyValue(reflect.ValueOf(tcpCheckProp)) && (ok || !reflect.DeepEqual(v, tcpCheckProp)) { - obj["tcpCheck"] = tcpCheckProp - } - resourceGroupProp, err := expandMonitoringUptimeCheckConfigResourceGroup(d.Get("resource_group"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resource_group"); !isEmptyValue(reflect.ValueOf(resourceGroupProp)) && (ok || !reflect.DeepEqual(v, resourceGroupProp)) { - obj["resourceGroup"] = resourceGroupProp - } - monitoredResourceProp, err := expandMonitoringUptimeCheckConfigMonitoredResource(d.Get("monitored_resource"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("monitored_resource"); !isEmptyValue(reflect.ValueOf(monitoredResourceProp)) && (ok || !reflect.DeepEqual(v, monitoredResourceProp)) { - obj["monitoredResource"] = monitoredResourceProp - } - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/uptimeCheckConfigs") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new UptimeCheckConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return fmt.Errorf("Error creating UptimeCheckConfig: %s", err) - } - if err := d.Set("name", flattenMonitoringUptimeCheckConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating UptimeCheckConfig %q: %#v", d.Id(), res) - - return resourceMonitoringUptimeCheckConfigRead(d, meta) -} - -func resourceMonitoringUptimeCheckConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringUptimeCheckConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - - if err := d.Set("name", flattenMonitoringUptimeCheckConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("uptime_check_id", flattenMonitoringUptimeCheckConfigUptimeCheckId(res["id"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("display_name", flattenMonitoringUptimeCheckConfigDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("period", flattenMonitoringUptimeCheckConfigPeriod(res["period"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("timeout", flattenMonitoringUptimeCheckConfigTimeout(res["timeout"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("content_matchers", flattenMonitoringUptimeCheckConfigContentMatchers(res["contentMatchers"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("selected_regions", flattenMonitoringUptimeCheckConfigSelectedRegions(res["selectedRegions"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("checker_type", flattenMonitoringUptimeCheckConfigCheckerType(res["checkerType"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("http_check", flattenMonitoringUptimeCheckConfigHttpCheck(res["httpCheck"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("tcp_check", flattenMonitoringUptimeCheckConfigTcpCheck(res["tcpCheck"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("resource_group", flattenMonitoringUptimeCheckConfigResourceGroup(res["resourceGroup"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("monitored_resource", flattenMonitoringUptimeCheckConfigMonitoredResource(res["monitoredResource"], d, config)); err != nil { - return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - - return nil -} - -func resourceMonitoringUptimeCheckConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringUptimeCheckConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - timeoutProp, err := expandMonitoringUptimeCheckConfigTimeout(d.Get("timeout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { - obj["timeout"] = timeoutProp - } - contentMatchersProp, err := expandMonitoringUptimeCheckConfigContentMatchers(d.Get("content_matchers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_matchers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, contentMatchersProp)) { - obj["contentMatchers"] = contentMatchersProp - } - selectedRegionsProp, err := expandMonitoringUptimeCheckConfigSelectedRegions(d.Get("selected_regions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selected_regions"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, selectedRegionsProp)) { - obj["selectedRegions"] = selectedRegionsProp - } - httpCheckProp, err := expandMonitoringUptimeCheckConfigHttpCheck(d.Get("http_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpCheckProp)) { - obj["httpCheck"] = httpCheckProp - } - tcpCheckProp, err := expandMonitoringUptimeCheckConfigTcpCheck(d.Get("tcp_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpCheckProp)) { - obj["tcpCheck"] = tcpCheckProp - } - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating UptimeCheckConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("timeout") { - updateMask = append(updateMask, "timeout") - } - - if d.HasChange("content_matchers") { - updateMask = append(updateMask, "contentMatchers") - } - - if d.HasChange("selected_regions") { - updateMask = append(updateMask, "selectedRegions") - } - - if d.HasChange("http_check") { - updateMask = append(updateMask, "httpCheck") - } - - if d.HasChange("tcp_check") { - updateMask = append(updateMask, "tcpCheck") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return fmt.Errorf("Error updating UptimeCheckConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating UptimeCheckConfig %q: %#v", d.Id(), res) - } - - return resourceMonitoringUptimeCheckConfigRead(d, meta) -} - -func resourceMonitoringUptimeCheckConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting UptimeCheckConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "UptimeCheckConfig") - } - - log.Printf("[DEBUG] Finished deleting UptimeCheckConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringUptimeCheckConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*schema.ResourceData{d}, nil -} - -func flattenMonitoringUptimeCheckConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigUptimeCheckId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - parts := strings.Split(d.Get("name").(string), "/") - return parts[len(parts)-1] -} - -func flattenMonitoringUptimeCheckConfigDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigContentMatchers(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "content": flattenMonitoringUptimeCheckConfigContentMatchersContent(original["content"], d, config), - "matcher": flattenMonitoringUptimeCheckConfigContentMatchersMatcher(original["matcher"], d, config), - "json_path_matcher": flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcher(original["jsonPathMatcher"], d, config), - }) - } - return transformed -} -func flattenMonitoringUptimeCheckConfigContentMatchersContent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigContentMatchersMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["json_path"] = - flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonPath(original["jsonPath"], d, config) - transformed["json_matcher"] = - flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonMatcher(original["jsonMatcher"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigSelectedRegions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigCheckerType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_method"] = - flattenMonitoringUptimeCheckConfigHttpCheckRequestMethod(original["requestMethod"], d, config) - transformed["content_type"] = - flattenMonitoringUptimeCheckConfigHttpCheckContentType(original["contentType"], d, config) - transformed["auth_info"] = - flattenMonitoringUptimeCheckConfigHttpCheckAuthInfo(original["authInfo"], d, config) - transformed["port"] = - flattenMonitoringUptimeCheckConfigHttpCheckPort(original["port"], d, config) - transformed["headers"] = - flattenMonitoringUptimeCheckConfigHttpCheckHeaders(original["headers"], d, config) - transformed["path"] = - flattenMonitoringUptimeCheckConfigHttpCheckPath(original["path"], d, config) - transformed["use_ssl"] = - flattenMonitoringUptimeCheckConfigHttpCheckUseSsl(original["useSsl"], d, config) - transformed["validate_ssl"] = - flattenMonitoringUptimeCheckConfigHttpCheckValidateSsl(original["validateSsl"], d, config) - transformed["mask_headers"] = - flattenMonitoringUptimeCheckConfigHttpCheckMaskHeaders(original["maskHeaders"], d, config) - transformed["body"] = - flattenMonitoringUptimeCheckConfigHttpCheckBody(original["body"], d, config) - transformed["accepted_response_status_codes"] = - flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(original["acceptedResponseStatusCodes"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringUptimeCheckConfigHttpCheckRequestMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckContentType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfo(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["password"] = - flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(original["password"], d, config) - transformed["username"] = - flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(original["username"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return d.Get("http_check.0.auth_info.0.password") -} - -func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenMonitoringUptimeCheckConfigHttpCheckHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckUseSsl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckValidateSsl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckMaskHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "status_value": flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(original["statusValue"], d, config), - "status_class": flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(original["statusClass"], d, config), - }) - } - return transformed -} -func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigTcpCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["port"] = - flattenMonitoringUptimeCheckConfigTcpCheckPort(original["port"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringUptimeCheckConfigTcpCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenMonitoringUptimeCheckConfigResourceGroup(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resource_type"] = - flattenMonitoringUptimeCheckConfigResourceGroupResourceType(original["resourceType"], d, config) - transformed["group_id"] = - flattenMonitoringUptimeCheckConfigResourceGroupGroupId(original["groupId"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringUptimeCheckConfigResourceGroupResourceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigResourceGroupGroupId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - project := d.Get("project").(string) - return fmt.Sprintf("projects/%s/groups/%s", project, v) -} - -func flattenMonitoringUptimeCheckConfigMonitoredResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenMonitoringUptimeCheckConfigMonitoredResourceType(original["type"], d, config) - transformed["labels"] = - flattenMonitoringUptimeCheckConfigMonitoredResourceLabels(original["labels"], d, config) - return []interface{}{transformed} -} -func flattenMonitoringUptimeCheckConfigMonitoredResourceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigMonitoredResourceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringUptimeCheckConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContent, err := expandMonitoringUptimeCheckConfigContentMatchersContent(original["content"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !isEmptyValue(val) { - transformed["content"] = transformedContent - } - - transformedMatcher, err := expandMonitoringUptimeCheckConfigContentMatchersMatcher(original["matcher"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMatcher); val.IsValid() && !isEmptyValue(val) { - transformed["matcher"] = transformedMatcher - } - - transformedJsonPathMatcher, err := expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcher(original["json_path_matcher"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedJsonPathMatcher); val.IsValid() && !isEmptyValue(val) { - transformed["jsonPathMatcher"] = transformedJsonPathMatcher - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchersContent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchersMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedJsonPath, err := expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonPath(original["json_path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedJsonPath); val.IsValid() && !isEmptyValue(val) { - transformed["jsonPath"] = transformedJsonPath - } - - transformedJsonMatcher, err := expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonMatcher(original["json_matcher"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedJsonMatcher); val.IsValid() && !isEmptyValue(val) { - transformed["jsonMatcher"] = transformedJsonMatcher - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigSelectedRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigCheckerType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestMethod, err := expandMonitoringUptimeCheckConfigHttpCheckRequestMethod(original["request_method"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRequestMethod); val.IsValid() && !isEmptyValue(val) { - transformed["requestMethod"] = transformedRequestMethod - } - - transformedContentType, err := expandMonitoringUptimeCheckConfigHttpCheckContentType(original["content_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContentType); val.IsValid() && !isEmptyValue(val) { - transformed["contentType"] = transformedContentType - } - - transformedAuthInfo, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfo(original["auth_info"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAuthInfo); val.IsValid() && !isEmptyValue(val) { - transformed["authInfo"] = transformedAuthInfo - } - - transformedPort, err := expandMonitoringUptimeCheckConfigHttpCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedHeaders, err := expandMonitoringUptimeCheckConfigHttpCheckHeaders(original["headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["headers"] = transformedHeaders - } - - transformedPath, err := expandMonitoringUptimeCheckConfigHttpCheckPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedUseSsl, err := expandMonitoringUptimeCheckConfigHttpCheckUseSsl(original["use_ssl"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUseSsl); val.IsValid() && !isEmptyValue(val) { - transformed["useSsl"] = transformedUseSsl - } - - transformedValidateSsl, err := expandMonitoringUptimeCheckConfigHttpCheckValidateSsl(original["validate_ssl"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValidateSsl); val.IsValid() && !isEmptyValue(val) { - transformed["validateSsl"] = transformedValidateSsl - } - - transformedMaskHeaders, err := expandMonitoringUptimeCheckConfigHttpCheckMaskHeaders(original["mask_headers"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaskHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["maskHeaders"] = transformedMaskHeaders - } - - transformedBody, err := expandMonitoringUptimeCheckConfigHttpCheckBody(original["body"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBody); val.IsValid() && !isEmptyValue(val) { - transformed["body"] = transformedBody - } - - transformedAcceptedResponseStatusCodes, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(original["accepted_response_status_codes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAcceptedResponseStatusCodes); val.IsValid() && !isEmptyValue(val) { - transformed["acceptedResponseStatusCodes"] = transformedAcceptedResponseStatusCodes - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckRequestMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckContentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAuthInfo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPassword, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(original["password"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { - transformed["password"] = transformedPassword - } - - transformedUsername, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(original["username"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { - transformed["username"] = transformedUsername - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckUseSsl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckValidateSsl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckMaskHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStatusValue, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(original["status_value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStatusValue); val.IsValid() && !isEmptyValue(val) { - transformed["statusValue"] = transformedStatusValue - } - - transformedStatusClass, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(original["status_class"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStatusClass); val.IsValid() && !isEmptyValue(val) { - transformed["statusClass"] = transformedStatusClass - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigTcpCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPort, err := expandMonitoringUptimeCheckConfigTcpCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigTcpCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigResourceGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceType, err := expandMonitoringUptimeCheckConfigResourceGroupResourceType(original["resource_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedResourceType); val.IsValid() && !isEmptyValue(val) { - transformed["resourceType"] = transformedResourceType - } - - transformedGroupId, err := expandMonitoringUptimeCheckConfigResourceGroupGroupId(original["group_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGroupId); val.IsValid() && !isEmptyValue(val) { - transformed["groupId"] = transformedGroupId - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigResourceGroupResourceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigResourceGroupGroupId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandMonitoringUptimeCheckConfigMonitoredResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandMonitoringUptimeCheckConfigMonitoredResourceType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedLabels, err := expandMonitoringUptimeCheckConfigMonitoredResourceLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigMonitoredResourceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigMonitoredResourceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_management_connectivity_test_resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_management_connectivity_test_resource.go deleted file mode 100644 index cdf16cda68..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_management_connectivity_test_resource.go +++ /dev/null @@ -1,898 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceNetworkManagementConnectivityTest() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkManagementConnectivityTestCreate, - Read: resourceNetworkManagementConnectivityTestRead, - Update: resourceNetworkManagementConnectivityTestUpdate, - Delete: resourceNetworkManagementConnectivityTestDelete, - - Importer: &schema.ResourceImporter{ - State: resourceNetworkManagementConnectivityTestImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "destination": { - Type: schema.TypeList, - Required: true, - Description: `Required. Destination specification of the Connectivity Test. - -You can use a combination of destination IP address, Compute -Engine VM instance, or VPC network to uniquely identify the -destination location. - -Even if the destination IP address is not unique, the source IP -location is unique. Usually, the analysis can infer the destination -endpoint from route information. - -If the destination you specify is a VM instance and the instance has -multiple network interfaces, then you must also specify either a -destination IP address or VPC network to identify the destination -interface. - -A reachability analysis proceeds even if the destination location -is ambiguous. However, the result can include endpoints that you -don't intend to test.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Optional: true, - Description: `A Compute Engine instance URI.`, - }, - "ip_address": { - Type: schema.TypeString, - Optional: true, - Description: `The IP address of the endpoint, which can be an external or -internal IP. An IPv6 address is only allowed when the test's -destination is a global load balancer VIP.`, - }, - "network": { - Type: schema.TypeString, - Optional: true, - Description: `A Compute Engine network URI.`, - }, - "port": { - Type: schema.TypeInt, - Optional: true, - Description: `The IP protocol port of the endpoint. Only applicable when -protocol is TCP or UDP.`, - }, - "project_id": { - Type: schema.TypeString, - Optional: true, - Description: `Project ID where the endpoint is located. The Project ID can be -derived from the URI if you provide a VM instance or network URI. -The following are two cases where you must provide the project ID: -1. Only the IP address is specified, and the IP address is within -a GCP project. 2. When you are using Shared VPC and the IP address -that you provide is from the service project. In this case, the -network that the IP address resides in is defined in the host -project.`, - }, - }, - }, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Unique name for the connectivity test.`, - }, - "source": { - Type: schema.TypeList, - Required: true, - Description: `Required. Source specification of the Connectivity Test. - -You can use a combination of source IP address, virtual machine -(VM) instance, or Compute Engine network to uniquely identify the -source location. - -Examples: If the source IP address is an internal IP address within -a Google Cloud Virtual Private Cloud (VPC) network, then you must -also specify the VPC network. Otherwise, specify the VM instance, -which already contains its internal IP address and VPC network -information. - -If the source of the test is within an on-premises network, then -you must provide the destination VPC network. - -If the source endpoint is a Compute Engine VM instance with multiple -network interfaces, the instance itself is not sufficient to -identify the endpoint. So, you must also specify the source IP -address or VPC network. - -A reachability analysis proceeds even if the source location is -ambiguous. However, the test result may include endpoints that -you don't intend to test.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Optional: true, - Description: `A Compute Engine instance URI.`, - }, - "ip_address": { - Type: schema.TypeString, - Optional: true, - Description: `The IP address of the endpoint, which can be an external or -internal IP. An IPv6 address is only allowed when the test's -destination is a global load balancer VIP.`, - }, - "network": { - Type: schema.TypeString, - Optional: true, - Description: `A Compute Engine network URI.`, - }, - "network_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"GCP_NETWORK", "NON_GCP_NETWORK", ""}), - Description: `Type of the network where the endpoint is located. Possible values: ["GCP_NETWORK", "NON_GCP_NETWORK"]`, - }, - "port": { - Type: schema.TypeInt, - Optional: true, - Description: `The IP protocol port of the endpoint. Only applicable when -protocol is TCP or UDP.`, - }, - "project_id": { - Type: schema.TypeString, - Optional: true, - Description: `Project ID where the endpoint is located. The Project ID can be -derived from the URI if you provide a VM instance or network URI. -The following are two cases where you must provide the project ID: - -1. Only the IP address is specified, and the IP address is - within a GCP project. -2. When you are using Shared VPC and the IP address - that you provide is from the service project. In this case, - the network that the IP address resides in is defined in the - host project.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `The user-supplied description of the Connectivity Test. -Maximum of 512 characters.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user-provided metadata.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "protocol": { - Type: schema.TypeString, - Optional: true, - Description: `IP Protocol of the test. When not provided, "TCP" is assumed.`, - Default: "TCP", - }, - "related_projects": { - Type: schema.TypeList, - Optional: true, - Description: `Other projects that may be relevant for reachability analysis. -This is applicable to scenarios where a test can cross project -boundaries.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNetworkManagementConnectivityTestCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNetworkManagementConnectivityTestName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandNetworkManagementConnectivityTestDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sourceProp, err := expandNetworkManagementConnectivityTestSource(d.Get("source"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source"); !isEmptyValue(reflect.ValueOf(sourceProp)) && (ok || !reflect.DeepEqual(v, sourceProp)) { - obj["source"] = sourceProp - } - destinationProp, err := expandNetworkManagementConnectivityTestDestination(d.Get("destination"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination"); !isEmptyValue(reflect.ValueOf(destinationProp)) && (ok || !reflect.DeepEqual(v, destinationProp)) { - obj["destination"] = destinationProp - } - protocolProp, err := expandNetworkManagementConnectivityTestProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - relatedProjectsProp, err := expandNetworkManagementConnectivityTestRelatedProjects(d.Get("related_projects"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_projects"); !isEmptyValue(reflect.ValueOf(relatedProjectsProp)) && (ok || !reflect.DeepEqual(v, relatedProjectsProp)) { - obj["relatedProjects"] = relatedProjectsProp - } - labelsProp, err := expandNetworkManagementConnectivityTestLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests?testId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ConnectivityTest: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ConnectivityTest: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = NetworkManagementOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating ConnectivityTest", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create ConnectivityTest: %s", err) - } - - if err := d.Set("name", flattenNetworkManagementConnectivityTestName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating ConnectivityTest %q: %#v", d.Id(), res) - - return resourceNetworkManagementConnectivityTestRead(d, meta) -} - -func resourceNetworkManagementConnectivityTestRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NetworkManagementConnectivityTest %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - - if err := d.Set("name", flattenNetworkManagementConnectivityTestName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("description", flattenNetworkManagementConnectivityTestDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("source", flattenNetworkManagementConnectivityTestSource(res["source"], d, config)); err != nil { - return fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("destination", flattenNetworkManagementConnectivityTestDestination(res["destination"], d, config)); err != nil { - return fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("protocol", flattenNetworkManagementConnectivityTestProtocol(res["protocol"], d, config)); err != nil { - return fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("related_projects", flattenNetworkManagementConnectivityTestRelatedProjects(res["relatedProjects"], d, config)); err != nil { - return fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("labels", flattenNetworkManagementConnectivityTestLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - - return nil -} - -func resourceNetworkManagementConnectivityTestUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkManagementConnectivityTestDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sourceProp, err := expandNetworkManagementConnectivityTestSource(d.Get("source"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceProp)) { - obj["source"] = sourceProp - } - destinationProp, err := expandNetworkManagementConnectivityTestDestination(d.Get("destination"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationProp)) { - obj["destination"] = destinationProp - } - protocolProp, err := expandNetworkManagementConnectivityTestProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - relatedProjectsProp, err := expandNetworkManagementConnectivityTestRelatedProjects(d.Get("related_projects"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_projects"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, relatedProjectsProp)) { - obj["relatedProjects"] = relatedProjectsProp - } - labelsProp, err := expandNetworkManagementConnectivityTestLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ConnectivityTest %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("source") { - updateMask = append(updateMask, "source.ipAddress", - "source.port", - "source.instance", - "source.network", - "source.networkType", - "source.projectId") - } - - if d.HasChange("destination") { - updateMask = append(updateMask, "destination.ipAddress", - "destination.port", - "destination.instance", - "destination.network", - "destination.projectId") - } - - if d.HasChange("protocol") { - updateMask = append(updateMask, "protocol") - } - - if d.HasChange("related_projects") { - updateMask = append(updateMask, "relatedProjects") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ConnectivityTest %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ConnectivityTest %q: %#v", d.Id(), res) - } - - err = NetworkManagementOperationWaitTime( - config, res, project, "Updating ConnectivityTest", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNetworkManagementConnectivityTestRead(d, meta) -} - -func resourceNetworkManagementConnectivityTestDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ConnectivityTest %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ConnectivityTest") - } - - err = NetworkManagementOperationWaitTime( - config, res, project, "Deleting ConnectivityTest", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting ConnectivityTest %q: %#v", d.Id(), res) - return nil -} - -func resourceNetworkManagementConnectivityTestImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/connectivityTests/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNetworkManagementConnectivityTestName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenNetworkManagementConnectivityTestDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ip_address"] = - flattenNetworkManagementConnectivityTestSourceIpAddress(original["ipAddress"], d, config) - transformed["port"] = - flattenNetworkManagementConnectivityTestSourcePort(original["port"], d, config) - transformed["instance"] = - flattenNetworkManagementConnectivityTestSourceInstance(original["instance"], d, config) - transformed["network"] = - flattenNetworkManagementConnectivityTestSourceNetwork(original["network"], d, config) - transformed["network_type"] = - flattenNetworkManagementConnectivityTestSourceNetworkType(original["networkType"], d, config) - transformed["project_id"] = - flattenNetworkManagementConnectivityTestSourceProjectId(original["projectId"], d, config) - return []interface{}{transformed} -} -func flattenNetworkManagementConnectivityTestSourceIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSourcePort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNetworkManagementConnectivityTestSourceInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSourceNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSourceNetworkType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSourceProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestDestination(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ip_address"] = - flattenNetworkManagementConnectivityTestDestinationIpAddress(original["ipAddress"], d, config) - transformed["port"] = - flattenNetworkManagementConnectivityTestDestinationPort(original["port"], d, config) - transformed["instance"] = - flattenNetworkManagementConnectivityTestDestinationInstance(original["instance"], d, config) - transformed["network"] = - flattenNetworkManagementConnectivityTestDestinationNetwork(original["network"], d, config) - transformed["project_id"] = - flattenNetworkManagementConnectivityTestDestinationProjectId(original["projectId"], d, config) - return []interface{}{transformed} -} -func flattenNetworkManagementConnectivityTestDestinationIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestDestinationPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNetworkManagementConnectivityTestDestinationInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestDestinationNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestDestinationProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestRelatedProjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNetworkManagementConnectivityTestName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - // projects/X/tests/Y - note not "connectivityTests" - f, err := parseGlobalFieldValue("tests", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func expandNetworkManagementConnectivityTestDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpAddress, err := expandNetworkManagementConnectivityTestSourceIpAddress(original["ip_address"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddress"] = transformedIpAddress - } - - transformedPort, err := expandNetworkManagementConnectivityTestSourcePort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedInstance, err := expandNetworkManagementConnectivityTestSourceInstance(original["instance"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !isEmptyValue(val) { - transformed["instance"] = transformedInstance - } - - transformedNetwork, err := expandNetworkManagementConnectivityTestSourceNetwork(original["network"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !isEmptyValue(val) { - transformed["network"] = transformedNetwork - } - - transformedNetworkType, err := expandNetworkManagementConnectivityTestSourceNetworkType(original["network_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNetworkType); val.IsValid() && !isEmptyValue(val) { - transformed["networkType"] = transformedNetworkType - } - - transformedProjectId, err := expandNetworkManagementConnectivityTestSourceProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - return transformed, nil -} - -func expandNetworkManagementConnectivityTestSourceIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourcePort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourceInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourceNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourceNetworkType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourceProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpAddress, err := expandNetworkManagementConnectivityTestDestinationIpAddress(original["ip_address"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddress"] = transformedIpAddress - } - - transformedPort, err := expandNetworkManagementConnectivityTestDestinationPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedInstance, err := expandNetworkManagementConnectivityTestDestinationInstance(original["instance"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !isEmptyValue(val) { - transformed["instance"] = transformedInstance - } - - transformedNetwork, err := expandNetworkManagementConnectivityTestDestinationNetwork(original["network"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !isEmptyValue(val) { - transformed["network"] = transformedNetwork - } - - transformedProjectId, err := expandNetworkManagementConnectivityTestDestinationProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - return transformed, nil -} - -func expandNetworkManagementConnectivityTestDestinationIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestinationPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestinationInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestinationNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestinationProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestRelatedProjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_keyset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_keyset.go deleted file mode 100644 index b63af96dbe..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_keyset.go +++ /dev/null @@ -1,576 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceNetworkServicesEdgeCacheKeyset() *schema.Resource { - return &schema.Resource{ - Create: resourceNetworkServicesEdgeCacheKeysetCreate, - Read: resourceNetworkServicesEdgeCacheKeysetRead, - Update: resourceNetworkServicesEdgeCacheKeysetUpdate, - Delete: resourceNetworkServicesEdgeCacheKeysetDelete, - - Importer: &schema.ResourceImporter{ - State: resourceNetworkServicesEdgeCacheKeysetImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is created. -The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, -and all following characters must be a dash, underscore, letter or digit.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Set of label tags associated with the EdgeCache resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "public_key": { - Type: schema.TypeList, - Optional: true, - Description: `An ordered list of Ed25519 public keys to use for validating signed requests. -You must specify 'public_keys' or 'validation_shared_keys' (or both). The keys in 'public_keys' are checked first. -You may specify no more than one Google-managed public key. -If you specify 'public_keys', you must specify at least one (1) key and may specify up to three (3) keys. - -Ed25519 public keys are not secret, and only allow Google to validate a request was signed by your corresponding private key. -Ensure that the private key is kept secret, and that only authorized users can add public keys to a keyset.`, - MinItems: 1, - MaxItems: 3, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the public key. The ID must be 1-63 characters long, and comply with RFC1035. -The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* -which means the first character must be a letter, and all following characters must be a dash, underscore, letter or digit.`, - }, - "managed": { - Type: schema.TypeBool, - Optional: true, - Description: `Set to true to have the CDN automatically manage this public key value.`, - }, - "value": { - Type: schema.TypeString, - Optional: true, - Description: `The base64-encoded value of the Ed25519 public key. The base64 encoding can be padded (44 bytes) or unpadded (43 bytes). -Representations or encodings of the public key other than this will be rejected with an error.`, - Sensitive: true, - }, - }, - }, - AtLeastOneOf: []string{"public_key", "validation_shared_keys"}, - }, - "validation_shared_keys": { - Type: schema.TypeList, - Optional: true, - Description: `An ordered list of shared keys to use for validating signed requests. -Shared keys are secret. Ensure that only authorized users can add 'validation_shared_keys' to a keyset. -You can rotate keys by appending (pushing) a new key to the list of 'validation_shared_keys' and removing any superseded keys. -You must specify 'public_keys' or 'validation_shared_keys' (or both). The keys in 'public_keys' are checked first.`, - MinItems: 1, - MaxItems: 3, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secret_version": { - Type: schema.TypeString, - Required: true, - Description: `The name of the secret version in Secret Manager. - -The resource name of the secret version must be in the format 'projects/*/secrets/*/versions/*' where the '*' values are replaced by the secrets themselves. -The secrets must be at least 16 bytes large. The recommended secret size depends on the signature algorithm you are using. -* If you are using HMAC-SHA1, we suggest 20-byte secrets. -* If you are using HMAC-SHA256, we suggest 32-byte secrets. -See RFC 2104, Section 3 for more details on these recommendations.`, - }, - }, - }, - AtLeastOneOf: []string{"public_key", "validation_shared_keys"}, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNetworkServicesEdgeCacheKeysetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkServicesEdgeCacheKeysetDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandNetworkServicesEdgeCacheKeysetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - publicKeysProp, err := expandNetworkServicesEdgeCacheKeysetPublicKey(d.Get("public_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("public_key"); !isEmptyValue(reflect.ValueOf(publicKeysProp)) && (ok || !reflect.DeepEqual(v, publicKeysProp)) { - obj["publicKeys"] = publicKeysProp - } - validationSharedKeysProp, err := expandNetworkServicesEdgeCacheKeysetValidationSharedKeys(d.Get("validation_shared_keys"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("validation_shared_keys"); !isEmptyValue(reflect.ValueOf(validationSharedKeysProp)) && (ok || !reflect.DeepEqual(v, validationSharedKeysProp)) { - obj["validationSharedKeys"] = validationSharedKeysProp - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets?edgeCacheKeysetId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new EdgeCacheKeyset: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating EdgeCacheKeyset: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = NetworkServicesOperationWaitTime( - config, res, project, "Creating EdgeCacheKeyset", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create EdgeCacheKeyset: %s", err) - } - - log.Printf("[DEBUG] Finished creating EdgeCacheKeyset %q: %#v", d.Id(), res) - - return resourceNetworkServicesEdgeCacheKeysetRead(d, meta) -} - -func resourceNetworkServicesEdgeCacheKeysetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NetworkServicesEdgeCacheKeyset %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - - if err := d.Set("description", flattenNetworkServicesEdgeCacheKeysetDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - if err := d.Set("labels", flattenNetworkServicesEdgeCacheKeysetLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - if err := d.Set("public_key", flattenNetworkServicesEdgeCacheKeysetPublicKey(res["publicKeys"], d, config)); err != nil { - return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - if err := d.Set("validation_shared_keys", flattenNetworkServicesEdgeCacheKeysetValidationSharedKeys(res["validationSharedKeys"], d, config)); err != nil { - return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - - return nil -} - -func resourceNetworkServicesEdgeCacheKeysetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkServicesEdgeCacheKeysetDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandNetworkServicesEdgeCacheKeysetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - publicKeysProp, err := expandNetworkServicesEdgeCacheKeysetPublicKey(d.Get("public_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("public_key"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, publicKeysProp)) { - obj["publicKeys"] = publicKeysProp - } - validationSharedKeysProp, err := expandNetworkServicesEdgeCacheKeysetValidationSharedKeys(d.Get("validation_shared_keys"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("validation_shared_keys"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, validationSharedKeysProp)) { - obj["validationSharedKeys"] = validationSharedKeysProp - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating EdgeCacheKeyset %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("public_key") { - updateMask = append(updateMask, "publicKeys") - } - - if d.HasChange("validation_shared_keys") { - updateMask = append(updateMask, "validationSharedKeys") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating EdgeCacheKeyset %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating EdgeCacheKeyset %q: %#v", d.Id(), res) - } - - err = NetworkServicesOperationWaitTime( - config, res, project, "Updating EdgeCacheKeyset", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNetworkServicesEdgeCacheKeysetRead(d, meta) -} - -func resourceNetworkServicesEdgeCacheKeysetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting EdgeCacheKeyset %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EdgeCacheKeyset") - } - - err = NetworkServicesOperationWaitTime( - config, res, project, "Deleting EdgeCacheKeyset", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting EdgeCacheKeyset %q: %#v", d.Id(), res) - return nil -} - -func resourceNetworkServicesEdgeCacheKeysetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/edgeCacheKeysets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNetworkServicesEdgeCacheKeysetDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheKeysetLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheKeysetPublicKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenNetworkServicesEdgeCacheKeysetPublicKeyId(original["id"], d, config), - "value": flattenNetworkServicesEdgeCacheKeysetPublicKeyValue(original["value"], d, config), - "managed": flattenNetworkServicesEdgeCacheKeysetPublicKeyManaged(original["managed"], d, config), - }) - } - return transformed -} -func flattenNetworkServicesEdgeCacheKeysetPublicKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheKeysetPublicKeyValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheKeysetPublicKeyManaged(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheKeysetValidationSharedKeys(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "secret_version": flattenNetworkServicesEdgeCacheKeysetValidationSharedKeysSecretVersion(original["secretVersion"], d, config), - }) - } - return transformed -} -func flattenNetworkServicesEdgeCacheKeysetValidationSharedKeysSecretVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNetworkServicesEdgeCacheKeysetDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheKeysetLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNetworkServicesEdgeCacheKeysetPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandNetworkServicesEdgeCacheKeysetPublicKeyId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedValue, err := expandNetworkServicesEdgeCacheKeysetPublicKeyValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedManaged, err := expandNetworkServicesEdgeCacheKeysetPublicKeyManaged(original["managed"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedManaged); val.IsValid() && !isEmptyValue(val) { - transformed["managed"] = transformedManaged - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheKeysetPublicKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheKeysetPublicKeyValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheKeysetPublicKeyManaged(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheKeysetValidationSharedKeys(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecretVersion, err := expandNetworkServicesEdgeCacheKeysetValidationSharedKeysSecretVersion(original["secret_version"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretVersion); val.IsValid() && !isEmptyValue(val) { - transformed["secretVersion"] = transformedSecretVersion - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheKeysetValidationSharedKeysSecretVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_environment.go deleted file mode 100644 index 785f28902e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_environment.go +++ /dev/null @@ -1,589 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceNotebooksEnvironment() *schema.Resource { - return &schema.Resource{ - Create: resourceNotebooksEnvironmentCreate, - Read: resourceNotebooksEnvironmentRead, - Update: resourceNotebooksEnvironmentUpdate, - Delete: resourceNotebooksEnvironmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceNotebooksEnvironmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the machine resides.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name specified for the Environment instance. -Format: projects/{project_id}/locations/{location}/environments/{environmentId}`, - }, - "container_image": { - Type: schema.TypeList, - Optional: true, - Description: `Use a container image to start the notebook instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "repository": { - Type: schema.TypeString, - Required: true, - Description: `The path to the container image repository. -For example: gcr.io/{project_id}/{imageName}`, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, - }, - }, - }, - ExactlyOneOf: []string{"vm_image", "container_image"}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A brief description of this environment.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `Display name of this environment for the UI.`, - }, - "post_startup_script": { - Type: schema.TypeString, - Optional: true, - Description: `Path to a Bash script that automatically runs after a notebook instance fully boots up. -The path must be a URL or Cloud Storage path. Example: "gs://path-to-file/file-name"`, - }, - "vm_image": { - Type: schema.TypeList, - Optional: true, - Description: `Use a Compute Engine VM image to start the notebook instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - Description: `The name of the Google Cloud project that this VM image belongs to. -Format: projects/{project_id}`, - }, - "image_family": { - Type: schema.TypeString, - Optional: true, - Description: `Use this VM image family to find the image; the newest image in this family will be used.`, - }, - "image_name": { - Type: schema.TypeString, - Optional: true, - Description: `Use VM image name to find the image.`, - }, - }, - }, - ExactlyOneOf: []string{"vm_image", "container_image"}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Instance creation time`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNotebooksEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandNotebooksEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandNotebooksEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - postStartupScriptProp, err := expandNotebooksEnvironmentPostStartupScript(d.Get("post_startup_script"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("post_startup_script"); !isEmptyValue(reflect.ValueOf(postStartupScriptProp)) && (ok || !reflect.DeepEqual(v, postStartupScriptProp)) { - obj["postStartupScript"] = postStartupScriptProp - } - vmImageProp, err := expandNotebooksEnvironmentVmImage(d.Get("vm_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vm_image"); !isEmptyValue(reflect.ValueOf(vmImageProp)) && (ok || !reflect.DeepEqual(v, vmImageProp)) { - obj["vmImage"] = vmImageProp - } - containerImageProp, err := expandNotebooksEnvironmentContainerImage(d.Get("container_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("container_image"); !isEmptyValue(reflect.ValueOf(containerImageProp)) && (ok || !reflect.DeepEqual(v, containerImageProp)) { - obj["containerImage"] = containerImageProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments?environmentId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Environment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Environment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Environment: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = NotebooksOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Environment", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Environment: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) - - return resourceNotebooksEnvironmentRead(d, meta) -} - -func resourceNotebooksEnvironmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Environment: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NotebooksEnvironment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - - if err := d.Set("display_name", flattenNotebooksEnvironmentDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("description", flattenNotebooksEnvironmentDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("post_startup_script", flattenNotebooksEnvironmentPostStartupScript(res["postStartupScript"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("create_time", flattenNotebooksEnvironmentCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("vm_image", flattenNotebooksEnvironmentVmImage(res["vmImage"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("container_image", flattenNotebooksEnvironmentContainerImage(res["containerImage"], d, config)); err != nil { - return fmt.Errorf("Error reading Environment: %s", err) - } - - return nil -} - -func resourceNotebooksEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Environment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandNotebooksEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandNotebooksEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - postStartupScriptProp, err := expandNotebooksEnvironmentPostStartupScript(d.Get("post_startup_script"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("post_startup_script"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, postStartupScriptProp)) { - obj["postStartupScript"] = postStartupScriptProp - } - vmImageProp, err := expandNotebooksEnvironmentVmImage(d.Get("vm_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vm_image"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, vmImageProp)) { - obj["vmImage"] = vmImageProp - } - containerImageProp, err := expandNotebooksEnvironmentContainerImage(d.Get("container_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("container_image"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, containerImageProp)) { - obj["containerImage"] = containerImageProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) - } - - err = NotebooksOperationWaitTime( - config, res, project, "Updating Environment", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNotebooksEnvironmentRead(d, meta) -} - -func resourceNotebooksEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Environment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Environment") - } - - err = NotebooksOperationWaitTime( - config, res, project, "Deleting Environment", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) - return nil -} - -func resourceNotebooksEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNotebooksEnvironmentDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentPostStartupScript(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentVmImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project"] = - flattenNotebooksEnvironmentVmImageProject(original["project"], d, config) - transformed["image_name"] = - flattenNotebooksEnvironmentVmImageImageName(original["imageName"], d, config) - transformed["image_family"] = - flattenNotebooksEnvironmentVmImageImageFamily(original["imageFamily"], d, config) - return []interface{}{transformed} -} -func flattenNotebooksEnvironmentVmImageProject(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentVmImageImageName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentVmImageImageFamily(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentContainerImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["repository"] = - flattenNotebooksEnvironmentContainerImageRepository(original["repository"], d, config) - transformed["tag"] = - flattenNotebooksEnvironmentContainerImageTag(original["tag"], d, config) - return []interface{}{transformed} -} -func flattenNotebooksEnvironmentContainerImageRepository(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentContainerImageTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNotebooksEnvironmentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentPostStartupScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentVmImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProject, err := expandNotebooksEnvironmentVmImageProject(original["project"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProject); val.IsValid() && !isEmptyValue(val) { - transformed["project"] = transformedProject - } - - transformedImageName, err := expandNotebooksEnvironmentVmImageImageName(original["image_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImageName); val.IsValid() && !isEmptyValue(val) { - transformed["imageName"] = transformedImageName - } - - transformedImageFamily, err := expandNotebooksEnvironmentVmImageImageFamily(original["image_family"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImageFamily); val.IsValid() && !isEmptyValue(val) { - transformed["imageFamily"] = transformedImageFamily - } - - return transformed, nil -} - -func expandNotebooksEnvironmentVmImageProject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentVmImageImageName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentVmImageImageFamily(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentContainerImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRepository, err := expandNotebooksEnvironmentContainerImageRepository(original["repository"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !isEmptyValue(val) { - transformed["repository"] = transformedRepository - } - - transformedTag, err := expandNotebooksEnvironmentContainerImageTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandNotebooksEnvironmentContainerImageRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentContainerImageTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_instance.go deleted file mode 100644 index a4579a119f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_instance.go +++ /dev/null @@ -1,1376 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const notebooksInstanceGoogleProvidedLabel = "goog-caip-notebook" - -func NotebooksInstanceLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // Suppress diffs for the label provided by Google - if strings.Contains(k, notebooksInstanceGoogleProvidedLabel) && new == "" { - return true - } - - // Let diff be determined by labels (above) - if strings.Contains(k, "labels.%") { - return true - } - - // For other keys, don't suppress diff. - return false -} - -func ResourceNotebooksInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceNotebooksInstanceCreate, - Read: resourceNotebooksInstanceRead, - Update: resourceNotebooksInstanceUpdate, - Delete: resourceNotebooksInstanceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceNotebooksInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the machine resides.`, - }, - "machine_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to a machine type which defines VM kind.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name specified for the Notebook instance.`, - }, - "accelerator_config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The hardware accelerator used on this instance. If you use accelerators, -make sure that your configuration has enough vCPUs and memory to support the -machineType you have selected.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "core_count": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Count of cores of this accelerator.`, - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ACCELERATOR_TYPE_UNSPECIFIED", "NVIDIA_TESLA_K80", "NVIDIA_TESLA_P100", "NVIDIA_TESLA_V100", "NVIDIA_TESLA_P4", "NVIDIA_TESLA_T4", "NVIDIA_TESLA_T4_VWS", "NVIDIA_TESLA_P100_VWS", "NVIDIA_TESLA_P4_VWS", "NVIDIA_TESLA_A100", "TPU_V2", "TPU_V3"}), - Description: `Type of this accelerator. Possible values: ["ACCELERATOR_TYPE_UNSPECIFIED", "NVIDIA_TESLA_K80", "NVIDIA_TESLA_P100", "NVIDIA_TESLA_V100", "NVIDIA_TESLA_P4", "NVIDIA_TESLA_T4", "NVIDIA_TESLA_T4_VWS", "NVIDIA_TESLA_P100_VWS", "NVIDIA_TESLA_P4_VWS", "NVIDIA_TESLA_A100", "TPU_V2", "TPU_V3"]`, - }, - }, - }, - }, - "boot_disk_size_gb": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The size of the boot disk in GB attached to this instance, -up to a maximum of 64000 GB (64 TB). The minimum recommended value is 100 GB. -If not specified, this defaults to 100.`, - }, - "boot_disk_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), - Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, - }, - "container_image": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Use a container image to start the notebook instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "repository": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The path to the container image repository. -For example: gcr.io/{project_id}/{imageName}`, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, - }, - }, - }, - ExactlyOneOf: []string{"vm_image", "container_image"}, - }, - "custom_gpu_driver_path": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specify a custom Cloud Storage path where the GPU driver is stored. -If not specified, we'll automatically choose from official GPU drivers.`, - }, - "data_disk_size_gb": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The size of the data disk in GB attached to this instance, -up to a maximum of 64000 GB (64 TB). -You can choose the size of the data disk based on how big your notebooks and data are. -If not specified, this defaults to 100.`, - }, - "data_disk_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), - Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, - }, - "disk_encryption": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"DISK_ENCRYPTION_UNSPECIFIED", "GMEK", "CMEK", ""}), - DiffSuppressFunc: emptyOrDefaultStringSuppress("DISK_ENCRYPTION_UNSPECIFIED"), - Description: `Disk encryption method used on the boot and data disks, defaults to GMEK. Possible values: ["DISK_ENCRYPTION_UNSPECIFIED", "GMEK", "CMEK"]`, - }, - "install_gpu_driver": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the end user authorizes Google Cloud to install GPU driver -on this instance. If this field is empty or set to false, the GPU driver -won't be installed. Only applicable to instances with GPUs.`, - }, - "instance_owners": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The list of owners of this instance after creation. -Format: alias@example.com. -Currently supports one owner only. -If not specified, all of the service account users of -your VM instance's service account can use the instance.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "kms_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The KMS key used to encrypt the disks, only applicable if diskEncryption is CMEK. -Format: projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}`, - }, - "labels": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: NotebooksInstanceLabelDiffSuppress, - Description: `Labels to apply to this instance. These can be later modified by the setLabels method. -An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "metadata": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Custom metadata to apply to this instance. -An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "network": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the VPC that this instance is in. -Format: projects/{project_id}/global/networks/{network_id}`, - }, - "nic_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC", ""}), - Description: `The type of vNIC driver. Possible values: ["UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC"]`, - }, - "no_proxy_access": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The notebook instance will not register with the proxy..`, - }, - "no_public_ip": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `No public IP will be assigned to this instance.`, - }, - "no_remove_data_disk": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, the data disk will not be auto deleted when deleting the instance.`, - }, - "post_startup_script": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Path to a Bash script that automatically runs after a -notebook instance fully boots up. The path must be a URL -or Cloud Storage path (gs://path-to-file/file-name).`, - }, - "reservation_affinity": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Reservation Affinity for consuming Zonal reservation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "consume_reservation_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"}), - Description: `The type of Compute Reservation. Possible values: ["NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"]`, - }, - "key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Corresponds to the label key of reservation resource.`, - }, - "values": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Corresponds to the label values of reservation resource.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "service_account": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The service account on this instance, giving access to other -Google Cloud services. You can use any service account within -the same project, but you must have the service account user -permission to use the instance. If not specified, -the Compute Engine default service account is used.`, - }, - "service_account_scopes": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Optional. The URIs of service account scopes to be included in Compute Engine instances. -If not specified, the following scopes are defined: -- https://www.googleapis.com/auth/cloud-platform -- https://www.googleapis.com/auth/userinfo.email`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "shielded_instance_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `A set of Shielded Instance options. Check [Images using supported Shielded VM features] -Not all combinations are valid`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_integrity_monitoring": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the -boot integrity of the instance. The attestation is performed against the integrity policy baseline. -This baseline is initially derived from the implicitly trusted boot image when the instance is created. -Enabled by default.`, - Default: true, - }, - "enable_secure_boot": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs -authentic software by verifying the digital signature of all boot components, and halting the boot process -if signature verification fails. -Disabled by default.`, - }, - "enable_vtpm": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Defines whether the instance has the vTPM enabled. -Enabled by default.`, - Default: true, - }, - }, - }, - }, - "subnet": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the subnet that this instance is in. -Format: projects/{project_id}/regions/{region}/subnetworks/{subnetwork_id}`, - }, - "tags": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The Compute Engine tags to add to instance.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "vm_image": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Use a Compute Engine VM image to start the notebook instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the Google Cloud project that this VM image belongs to. -Format: projects/{project_id}`, - }, - "image_family": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Use this VM image family to find the image; the newest image in this family will be used.`, - }, - "image_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Use VM image name to find the image.`, - }, - }, - }, - ExactlyOneOf: []string{"vm_image", "container_image"}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Instance creation time`, - }, - "proxy_uri": { - Type: schema.TypeString, - Computed: true, - Description: `The proxy endpoint that is used to access the Jupyter notebook. -Only returned when the resource is in a 'PROVISIONED' state. If -needed you can utilize 'terraform apply -refresh-only' to await -the population of this value.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `The state of this instance.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Instance update time.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNotebooksInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - machineTypeProp, err := expandNotebooksInstanceMachineType(d.Get("machine_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("machine_type"); !isEmptyValue(reflect.ValueOf(machineTypeProp)) && (ok || !reflect.DeepEqual(v, machineTypeProp)) { - obj["machineType"] = machineTypeProp - } - postStartupScriptProp, err := expandNotebooksInstancePostStartupScript(d.Get("post_startup_script"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("post_startup_script"); !isEmptyValue(reflect.ValueOf(postStartupScriptProp)) && (ok || !reflect.DeepEqual(v, postStartupScriptProp)) { - obj["postStartupScript"] = postStartupScriptProp - } - instanceOwnersProp, err := expandNotebooksInstanceInstanceOwners(d.Get("instance_owners"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_owners"); !isEmptyValue(reflect.ValueOf(instanceOwnersProp)) && (ok || !reflect.DeepEqual(v, instanceOwnersProp)) { - obj["instanceOwners"] = instanceOwnersProp - } - serviceAccountProp, err := expandNotebooksInstanceServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - serviceAccountScopesProp, err := expandNotebooksInstanceServiceAccountScopes(d.Get("service_account_scopes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account_scopes"); !isEmptyValue(reflect.ValueOf(serviceAccountScopesProp)) && (ok || !reflect.DeepEqual(v, serviceAccountScopesProp)) { - obj["serviceAccountScopes"] = serviceAccountScopesProp - } - acceleratorConfigProp, err := expandNotebooksInstanceAcceleratorConfig(d.Get("accelerator_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("accelerator_config"); !isEmptyValue(reflect.ValueOf(acceleratorConfigProp)) && (ok || !reflect.DeepEqual(v, acceleratorConfigProp)) { - obj["acceleratorConfig"] = acceleratorConfigProp - } - shieldedInstanceConfigProp, err := expandNotebooksInstanceShieldedInstanceConfig(d.Get("shielded_instance_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("shielded_instance_config"); !isEmptyValue(reflect.ValueOf(shieldedInstanceConfigProp)) && (ok || !reflect.DeepEqual(v, shieldedInstanceConfigProp)) { - obj["shieldedInstanceConfig"] = shieldedInstanceConfigProp - } - nicTypeProp, err := expandNotebooksInstanceNicType(d.Get("nic_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nic_type"); !isEmptyValue(reflect.ValueOf(nicTypeProp)) && (ok || !reflect.DeepEqual(v, nicTypeProp)) { - obj["nicType"] = nicTypeProp - } - reservationAffinityProp, err := expandNotebooksInstanceReservationAffinity(d.Get("reservation_affinity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reservation_affinity"); !isEmptyValue(reflect.ValueOf(reservationAffinityProp)) && (ok || !reflect.DeepEqual(v, reservationAffinityProp)) { - obj["reservationAffinity"] = reservationAffinityProp - } - installGpuDriverProp, err := expandNotebooksInstanceInstallGpuDriver(d.Get("install_gpu_driver"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("install_gpu_driver"); !isEmptyValue(reflect.ValueOf(installGpuDriverProp)) && (ok || !reflect.DeepEqual(v, installGpuDriverProp)) { - obj["installGpuDriver"] = installGpuDriverProp - } - customGpuDriverPathProp, err := expandNotebooksInstanceCustomGpuDriverPath(d.Get("custom_gpu_driver_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_gpu_driver_path"); !isEmptyValue(reflect.ValueOf(customGpuDriverPathProp)) && (ok || !reflect.DeepEqual(v, customGpuDriverPathProp)) { - obj["customGpuDriverPath"] = customGpuDriverPathProp - } - bootDiskTypeProp, err := expandNotebooksInstanceBootDiskType(d.Get("boot_disk_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("boot_disk_type"); !isEmptyValue(reflect.ValueOf(bootDiskTypeProp)) && (ok || !reflect.DeepEqual(v, bootDiskTypeProp)) { - obj["bootDiskType"] = bootDiskTypeProp - } - bootDiskSizeGbProp, err := expandNotebooksInstanceBootDiskSizeGb(d.Get("boot_disk_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("boot_disk_size_gb"); !isEmptyValue(reflect.ValueOf(bootDiskSizeGbProp)) && (ok || !reflect.DeepEqual(v, bootDiskSizeGbProp)) { - obj["bootDiskSizeGb"] = bootDiskSizeGbProp - } - dataDiskTypeProp, err := expandNotebooksInstanceDataDiskType(d.Get("data_disk_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_disk_type"); !isEmptyValue(reflect.ValueOf(dataDiskTypeProp)) && (ok || !reflect.DeepEqual(v, dataDiskTypeProp)) { - obj["dataDiskType"] = dataDiskTypeProp - } - dataDiskSizeGbProp, err := expandNotebooksInstanceDataDiskSizeGb(d.Get("data_disk_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_disk_size_gb"); !isEmptyValue(reflect.ValueOf(dataDiskSizeGbProp)) && (ok || !reflect.DeepEqual(v, dataDiskSizeGbProp)) { - obj["dataDiskSizeGb"] = dataDiskSizeGbProp - } - noRemoveDataDiskProp, err := expandNotebooksInstanceNoRemoveDataDisk(d.Get("no_remove_data_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("no_remove_data_disk"); !isEmptyValue(reflect.ValueOf(noRemoveDataDiskProp)) && (ok || !reflect.DeepEqual(v, noRemoveDataDiskProp)) { - obj["noRemoveDataDisk"] = noRemoveDataDiskProp - } - diskEncryptionProp, err := expandNotebooksInstanceDiskEncryption(d.Get("disk_encryption"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_encryption"); !isEmptyValue(reflect.ValueOf(diskEncryptionProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionProp)) { - obj["diskEncryption"] = diskEncryptionProp - } - kmsKeyProp, err := expandNotebooksInstanceKmsKey(d.Get("kms_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kms_key"); !isEmptyValue(reflect.ValueOf(kmsKeyProp)) && (ok || !reflect.DeepEqual(v, kmsKeyProp)) { - obj["kmsKey"] = kmsKeyProp - } - noPublicIpProp, err := expandNotebooksInstanceNoPublicIp(d.Get("no_public_ip"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("no_public_ip"); !isEmptyValue(reflect.ValueOf(noPublicIpProp)) && (ok || !reflect.DeepEqual(v, noPublicIpProp)) { - obj["noPublicIp"] = noPublicIpProp - } - noProxyAccessProp, err := expandNotebooksInstanceNoProxyAccess(d.Get("no_proxy_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("no_proxy_access"); !isEmptyValue(reflect.ValueOf(noProxyAccessProp)) && (ok || !reflect.DeepEqual(v, noProxyAccessProp)) { - obj["noProxyAccess"] = noProxyAccessProp - } - networkProp, err := expandNotebooksInstanceNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - subnetProp, err := expandNotebooksInstanceSubnet(d.Get("subnet"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnet"); !isEmptyValue(reflect.ValueOf(subnetProp)) && (ok || !reflect.DeepEqual(v, subnetProp)) { - obj["subnet"] = subnetProp - } - labelsProp, err := expandNotebooksInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - tagsProp, err := expandNotebooksInstanceTags(d.Get("tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tags"); !isEmptyValue(reflect.ValueOf(tagsProp)) && (ok || !reflect.DeepEqual(v, tagsProp)) { - obj["tags"] = tagsProp - } - metadataProp, err := expandNotebooksInstanceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - vmImageProp, err := expandNotebooksInstanceVmImage(d.Get("vm_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vm_image"); !isEmptyValue(reflect.ValueOf(vmImageProp)) && (ok || !reflect.DeepEqual(v, vmImageProp)) { - obj["vmImage"] = vmImageProp - } - containerImageProp, err := expandNotebooksInstanceContainerImage(d.Get("container_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("container_image"); !isEmptyValue(reflect.ValueOf(containerImageProp)) && (ok || !reflect.DeepEqual(v, containerImageProp)) { - obj["containerImage"] = containerImageProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances?instanceId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Instance: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = NotebooksOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Instance: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceNotebooksInstanceRead(d, meta) -} - -func resourceNotebooksInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NotebooksInstance %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("machine_type", flattenNotebooksInstanceMachineType(res["machineType"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("post_startup_script", flattenNotebooksInstancePostStartupScript(res["postStartupScript"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("proxy_uri", flattenNotebooksInstanceProxyUri(res["proxyUri"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("service_account", flattenNotebooksInstanceServiceAccount(res["serviceAccount"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("service_account_scopes", flattenNotebooksInstanceServiceAccountScopes(res["serviceAccountScopes"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("accelerator_config", flattenNotebooksInstanceAcceleratorConfig(res["acceleratorConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("shielded_instance_config", flattenNotebooksInstanceShieldedInstanceConfig(res["shieldedInstanceConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("nic_type", flattenNotebooksInstanceNicType(res["nicType"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("reservation_affinity", flattenNotebooksInstanceReservationAffinity(res["reservationAffinity"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("state", flattenNotebooksInstanceState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("install_gpu_driver", flattenNotebooksInstanceInstallGpuDriver(res["installGpuDriver"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("custom_gpu_driver_path", flattenNotebooksInstanceCustomGpuDriverPath(res["customGpuDriverPath"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("disk_encryption", flattenNotebooksInstanceDiskEncryption(res["diskEncryption"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("kms_key", flattenNotebooksInstanceKmsKey(res["kmsKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("no_public_ip", flattenNotebooksInstanceNoPublicIp(res["noPublicIp"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("no_proxy_access", flattenNotebooksInstanceNoProxyAccess(res["noProxyAccess"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("network", flattenNotebooksInstanceNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("subnet", flattenNotebooksInstanceSubnet(res["subnet"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenNotebooksInstanceLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("tags", flattenNotebooksInstanceTags(res["tags"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("create_time", flattenNotebooksInstanceCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("update_time", flattenNotebooksInstanceUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceNotebooksInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("labels") { - obj := make(map[string]interface{}) - - labelsProp, err := expandNotebooksInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:setLabels") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = NotebooksOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceNotebooksInstanceRead(d, meta) -} - -func resourceNotebooksInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = NotebooksOperationWaitTime( - config, res, project, "Deleting Instance", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceNotebooksInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNotebooksInstanceMachineType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenNotebooksInstancePostStartupScript(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceProxyUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceServiceAccountScopes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceAcceleratorConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenNotebooksInstanceAcceleratorConfigType(original["type"], d, config) - transformed["core_count"] = - flattenNotebooksInstanceAcceleratorConfigCoreCount(original["coreCount"], d, config) - return []interface{}{transformed} -} -func flattenNotebooksInstanceAcceleratorConfigType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceAcceleratorConfigCoreCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenNotebooksInstanceShieldedInstanceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_integrity_monitoring"] = - flattenNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(original["enableIntegrityMonitoring"], d, config) - transformed["enable_secure_boot"] = - flattenNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(original["enableSecureBoot"], d, config) - transformed["enable_vtpm"] = - flattenNotebooksInstanceShieldedInstanceConfigEnableVtpm(original["enableVtpm"], d, config) - return []interface{}{transformed} -} -func flattenNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceShieldedInstanceConfigEnableVtpm(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceNicType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceReservationAffinity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["consume_reservation_type"] = - flattenNotebooksInstanceReservationAffinityConsumeReservationType(original["consumeReservationType"], d, config) - transformed["key"] = - flattenNotebooksInstanceReservationAffinityKey(original["key"], d, config) - transformed["values"] = - flattenNotebooksInstanceReservationAffinityValues(original["values"], d, config) - return []interface{}{transformed} -} -func flattenNotebooksInstanceReservationAffinityConsumeReservationType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceReservationAffinityKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceReservationAffinityValues(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceInstallGpuDriver(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceCustomGpuDriverPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceDiskEncryption(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceKmsKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceNoPublicIp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceNoProxyAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceSubnet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNotebooksInstanceMachineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstancePostStartupScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceInstanceOwners(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceServiceAccountScopes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceAcceleratorConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandNotebooksInstanceAcceleratorConfigType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedCoreCount, err := expandNotebooksInstanceAcceleratorConfigCoreCount(original["core_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCoreCount); val.IsValid() && !isEmptyValue(val) { - transformed["coreCount"] = transformedCoreCount - } - - return transformed, nil -} - -func expandNotebooksInstanceAcceleratorConfigType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceAcceleratorConfigCoreCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceShieldedInstanceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableIntegrityMonitoring, err := expandNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(original["enable_integrity_monitoring"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnableIntegrityMonitoring); val.IsValid() && !isEmptyValue(val) { - transformed["enableIntegrityMonitoring"] = transformedEnableIntegrityMonitoring - } - - transformedEnableSecureBoot, err := expandNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(original["enable_secure_boot"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnableSecureBoot); val.IsValid() && !isEmptyValue(val) { - transformed["enableSecureBoot"] = transformedEnableSecureBoot - } - - transformedEnableVtpm, err := expandNotebooksInstanceShieldedInstanceConfigEnableVtpm(original["enable_vtpm"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnableVtpm); val.IsValid() && !isEmptyValue(val) { - transformed["enableVtpm"] = transformedEnableVtpm - } - - return transformed, nil -} - -func expandNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceShieldedInstanceConfigEnableVtpm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNicType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceReservationAffinity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConsumeReservationType, err := expandNotebooksInstanceReservationAffinityConsumeReservationType(original["consume_reservation_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConsumeReservationType); val.IsValid() && !isEmptyValue(val) { - transformed["consumeReservationType"] = transformedConsumeReservationType - } - - transformedKey, err := expandNotebooksInstanceReservationAffinityKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedValues, err := expandNotebooksInstanceReservationAffinityValues(original["values"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValues); val.IsValid() && !isEmptyValue(val) { - transformed["values"] = transformedValues - } - - return transformed, nil -} - -func expandNotebooksInstanceReservationAffinityConsumeReservationType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceReservationAffinityKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceReservationAffinityValues(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceInstallGpuDriver(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceCustomGpuDriverPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceBootDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceBootDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceDataDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceDataDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNoRemoveDataDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceDiskEncryption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceKmsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNoPublicIp(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNoProxyAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceSubnet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNotebooksInstanceTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNotebooksInstanceVmImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProject, err := expandNotebooksInstanceVmImageProject(original["project"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProject); val.IsValid() && !isEmptyValue(val) { - transformed["project"] = transformedProject - } - - transformedImageFamily, err := expandNotebooksInstanceVmImageImageFamily(original["image_family"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImageFamily); val.IsValid() && !isEmptyValue(val) { - transformed["imageFamily"] = transformedImageFamily - } - - transformedImageName, err := expandNotebooksInstanceVmImageImageName(original["image_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedImageName); val.IsValid() && !isEmptyValue(val) { - transformed["imageName"] = transformedImageName - } - - return transformed, nil -} - -func expandNotebooksInstanceVmImageProject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceVmImageImageFamily(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceVmImageImageName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceContainerImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRepository, err := expandNotebooksInstanceContainerImageRepository(original["repository"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !isEmptyValue(val) { - transformed["repository"] = transformedRepository - } - - transformedTag, err := expandNotebooksInstanceContainerImageTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandNotebooksInstanceContainerImageRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceContainerImageTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_location.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_location.go deleted file mode 100644 index 90fc1e2aa7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_location.go +++ /dev/null @@ -1,311 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceNotebooksLocation() *schema.Resource { - return &schema.Resource{ - Create: resourceNotebooksLocationCreate, - Read: resourceNotebooksLocationRead, - Update: resourceNotebooksLocationUpdate, - Delete: resourceNotebooksLocationDelete, - - Importer: &schema.ResourceImporter{ - State: resourceNotebooksLocationImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the Location resource.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNotebooksLocationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNotebooksLocationName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Location: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Location: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Location: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = NotebooksOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Location", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Location: %s", err) - } - - if err := d.Set("name", flattenNotebooksLocationName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Location %q: %#v", d.Id(), res) - - return resourceNotebooksLocationRead(d, meta) -} - -func resourceNotebooksLocationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Location: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NotebooksLocation %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Location: %s", err) - } - - if err := d.Set("name", flattenNotebooksLocationName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Location: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Location: %s", err) - } - - return nil -} - -func resourceNotebooksLocationUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Location: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandNotebooksLocationName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Location %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Location %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Location %q: %#v", d.Id(), res) - } - - err = NotebooksOperationWaitTime( - config, res, project, "Updating Location", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNotebooksLocationRead(d, meta) -} - -func resourceNotebooksLocationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Location: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Location %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Location") - } - - err = NotebooksOperationWaitTime( - config, res, project, "Deleting Location", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Location %q: %#v", d.Id(), res) - return nil -} - -func resourceNotebooksLocationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNotebooksLocationName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandNotebooksLocationName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_organization_access_approval_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_organization_access_approval_settings.go deleted file mode 100644 index 94ba2946c6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_organization_access_approval_settings.go +++ /dev/null @@ -1,496 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAccessApprovalOrganizationSettings() *schema.Resource { - return &schema.Resource{ - Create: resourceAccessApprovalOrganizationSettingsCreate, - Read: resourceAccessApprovalOrganizationSettingsRead, - Update: resourceAccessApprovalOrganizationSettingsUpdate, - Delete: resourceAccessApprovalOrganizationSettingsDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAccessApprovalOrganizationSettingsImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "enrolled_services": { - Type: schema.TypeSet, - Required: true, - Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. -Access requests for the resource given by name against any of these services contained here will be required -to have explicit approval. Enrollment can be done for individual services. - -A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, - Elem: accessapprovalOrganizationSettingsEnrolledServicesSchema(), - Set: accessApprovalEnrolledServicesHash, - }, - "organization_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the organization of the access approval settings.`, - }, - "active_key_version": { - Type: schema.TypeString, - Optional: true, - Description: `The asymmetric crypto key version to use for signing approval requests. -Empty active_key_version indicates that a Google-managed key should be used for signing.`, - }, - "notification_emails": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Description: `A list of email addresses to which notifications relating to approval requests should be sent. -Notifications relating to a resource will be sent to all emails in the settings of ancestor -resources of that resource. A maximum of 50 email addresses are allowed.`, - MaxItems: 50, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "ancestor_has_active_key_version": { - Type: schema.TypeBool, - Computed: true, - Description: `This field will always be unset for the organization since organizations do not have ancestors.`, - }, - "enrolled_ancestor": { - Type: schema.TypeBool, - Computed: true, - Description: `This field will always be unset for the organization since organizations do not have ancestors.`, - }, - "invalid_key_version": { - Type: schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that there is some configuration issue with the active_key_version -configured on this Organization (e.g. it doesn't exist or the Access Approval service account doesn't have the -correct permissions on it, etc.).`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the settings. Format is "organizations/{organization_id}/accessApprovalSettings"`, - }, - }, - UseJSONNumber: true, - } -} - -func accessapprovalOrganizationSettingsEnrolledServicesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloud_product": { - Type: schema.TypeString, - Required: true, - Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): - all - appengine.googleapis.com - bigquery.googleapis.com - bigtable.googleapis.com - cloudkms.googleapis.com - compute.googleapis.com - dataflow.googleapis.com - iam.googleapis.com - pubsub.googleapis.com - storage.googleapis.com`, - }, - "enrollment_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"BLOCK_ALL", ""}), - Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, - Default: "BLOCK_ALL", - }, - }, - } -} - -func resourceAccessApprovalOrganizationSettingsCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalOrganizationSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(reflect.ValueOf(notificationEmailsProp)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalOrganizationSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(reflect.ValueOf(enrolledServicesProp)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - activeKeyVersionProp, err := expandAccessApprovalOrganizationSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("active_key_version"); !isEmptyValue(reflect.ValueOf(activeKeyVersionProp)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { - obj["activeKeyVersion"] = activeKeyVersionProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new OrganizationSettings: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - if d.HasChange("active_key_version") { - updateMask = append(updateMask, "activeKeyVersion") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating OrganizationSettings: %s", err) - } - if err := d.Set("name", flattenAccessApprovalOrganizationSettingsName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating OrganizationSettings %q: %#v", d.Id(), res) - - return resourceAccessApprovalOrganizationSettingsRead(d, meta) -} - -func resourceAccessApprovalOrganizationSettingsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessApprovalOrganizationSettings %q", d.Id())) - } - - if err := d.Set("name", flattenAccessApprovalOrganizationSettingsName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("notification_emails", flattenAccessApprovalOrganizationSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("enrolled_services", flattenAccessApprovalOrganizationSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("enrolled_ancestor", flattenAccessApprovalOrganizationSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("active_key_version", flattenAccessApprovalOrganizationSettingsActiveKeyVersion(res["activeKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("ancestor_has_active_key_version", flattenAccessApprovalOrganizationSettingsAncestorHasActiveKeyVersion(res["ancestorHasActiveKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("invalid_key_version", flattenAccessApprovalOrganizationSettingsInvalidKeyVersion(res["invalidKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - - return nil -} - -func resourceAccessApprovalOrganizationSettingsUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalOrganizationSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalOrganizationSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - activeKeyVersionProp, err := expandAccessApprovalOrganizationSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("active_key_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { - obj["activeKeyVersion"] = activeKeyVersionProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating OrganizationSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - if d.HasChange("active_key_version") { - updateMask = append(updateMask, "activeKeyVersion") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating OrganizationSettings %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating OrganizationSettings %q: %#v", d.Id(), res) - } - - return resourceAccessApprovalOrganizationSettingsRead(d, meta) -} - -func resourceAccessApprovalOrganizationSettingsDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["notificationEmails"] = []string{} - obj["enrolledServices"] = []string{} - obj["activeKeyVersion"] = "" - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Emptying OrganizationSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - updateMask = append(updateMask, "notificationEmails") - updateMask = append(updateMask, "enrolledServices") - updateMask = append(updateMask, "activeKeyVersion") - - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - res, err := SendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error emptying OrganizationSettings %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished emptying OrganizationSettings %q: %#v", d.Id(), res) - } - - return nil -} - -func resourceAccessApprovalOrganizationSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "organizations/(?P[^/]+)/accessApprovalSettings", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAccessApprovalOrganizationSettingsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsNotificationEmails(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenAccessApprovalOrganizationSettingsEnrolledServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "cloud_product": flattenAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), - "enrollment_level": flattenAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), - }) - } - return transformed -} -func flattenAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsEnrolledAncestor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsActiveKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsAncestorHasActiveKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsInvalidKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessApprovalOrganizationSettingsNotificationEmails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandAccessApprovalOrganizationSettingsEnrolledServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCloudProduct, err := expandAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudProduct); val.IsValid() && !isEmptyValue(val) { - transformed["cloudProduct"] = transformedCloudProduct - } - - transformedEnrollmentLevel, err := expandAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !isEmptyValue(val) { - transformed["enrollmentLevel"] = transformedEnrollmentLevel - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalOrganizationSettingsActiveKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_config_os_policy_assignment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_config_os_policy_assignment.go deleted file mode 100644 index 9c91e23273..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_config_os_policy_assignment.go +++ /dev/null @@ -1,3137 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - osconfig "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig" -) - -func ResourceOsConfigOsPolicyAssignment() *schema.Resource { - return &schema.Resource{ - Create: resourceOsConfigOsPolicyAssignmentCreate, - Read: resourceOsConfigOsPolicyAssignmentRead, - Update: resourceOsConfigOsPolicyAssignmentUpdate, - Delete: resourceOsConfigOsPolicyAssignmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceOsConfigOsPolicyAssignmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "instance_filter": { - Type: schema.TypeList, - Required: true, - Description: "Required. Filter to select VMs.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentInstanceFilterSchema(), - }, - - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The location for the resource", - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Resource name.", - }, - - "os_policies": { - Type: schema.TypeList, - Required: true, - Description: "Required. List of OS policies to be applied to the VMs.", - Elem: OsConfigOsPolicyAssignmentOSPoliciesSchema(), - }, - - "rollout": { - Type: schema.TypeList, - Required: true, - Description: "Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentRolloutSchema(), - }, - - "description": { - Type: schema.TypeString, - Optional: true, - Description: "OS policy assignment description. Length of the description is limited to 1024 characters.", - }, - - "project": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project for the resource", - }, - - "skip_await_rollout": { - Type: schema.TypeBool, - Optional: true, - Description: "Set to true to skip awaiting rollout during resource creation and update.", - }, - - "baseline": { - Type: schema.TypeBool, - Computed: true, - Description: "Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.", - }, - - "deleted": { - Type: schema.TypeBool, - Computed: true, - Description: "Output only. Indicates that this revision deletes the OS policy assignment.", - }, - - "etag": { - Type: schema.TypeString, - Computed: true, - Description: "The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.", - }, - - "reconciling": { - Type: schema.TypeBool, - Computed: true, - Description: "Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING", - }, - - "revision_create_time": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. The timestamp that the revision was created.", - }, - - "revision_id": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment", - }, - - "rollout_state": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. OS policy assignment rollout state Possible values: ROLLOUT_STATE_UNSPECIFIED, IN_PROGRESS, CANCELLING, CANCELLED, SUCCEEDED", - }, - - "uid": { - Type: schema.TypeString, - Computed: true, - Description: "Output only. Server generated unique id for the OS policy assignment resource.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentInstanceFilterSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "all": { - Type: schema.TypeBool, - Optional: true, - Description: "Target all VMs in the project. If true, no other criteria is permitted.", - }, - - "exclusion_labels": { - Type: schema.TypeList, - Optional: true, - Description: "List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.", - Elem: OsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsSchema(), - }, - - "inclusion_labels": { - Type: schema.TypeList, - Optional: true, - Description: "List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.", - Elem: OsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsSchema(), - }, - - "inventories": { - Type: schema.TypeList, - Optional: true, - Description: "List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.", - Elem: OsConfigOsPolicyAssignmentInstanceFilterInventoriesSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func OsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func OsConfigOsPolicyAssignmentInstanceFilterInventoriesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "os_short_name": { - Type: schema.TypeString, - Required: true, - Description: "Required. The OS short name", - }, - - "os_version": { - Type: schema.TypeString, - Optional: true, - Description: "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - Description: "Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.", - }, - - "mode": { - Type: schema.TypeString, - Required: true, - Description: "Required. Policy mode Possible values: MODE_UNSPECIFIED, VALIDATION, ENFORCEMENT", - }, - - "resource_groups": { - Type: schema.TypeList, - Required: true, - Description: "Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`", - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsSchema(), - }, - - "allow_no_resource_group_match": { - Type: schema.TypeBool, - Optional: true, - Description: "This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.", - }, - - "description": { - Type: schema.TypeString, - Optional: true, - Description: "Policy description. Length of the description is limited to 1024 characters.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "resources": { - Type: schema.TypeList, - Required: true, - Description: "Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.", - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesSchema(), - }, - - "inventory_filters": { - Type: schema.TypeList, - Optional: true, - Description: "List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.", - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - Description: "Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.", - }, - - "exec": { - Type: schema.TypeList, - Optional: true, - Description: "Exec resource", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSchema(), - }, - - "file": { - Type: schema.TypeList, - Optional: true, - Description: "File resource", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSchema(), - }, - - "pkg": { - Type: schema.TypeList, - Optional: true, - Description: "Package resource", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSchema(), - }, - - "repository": { - Type: schema.TypeList, - Optional: true, - Description: "Package repository resource", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "validate": { - Type: schema.TypeList, - Required: true, - Description: "Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates \"in desired state\", and exit code of 101 indicates \"not in desired state\". Any other exit code indicates a failure running validate.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSchema(), - }, - - "enforce": { - Type: schema.TypeList, - Optional: true, - Description: "What to run to bring this resource into the desired state. An exit code of 100 indicates \"success\", any other exit code indicates a failure running enforce.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "interpreter": { - Type: schema.TypeString, - Required: true, - Description: "Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL", - }, - - "args": { - Type: schema.TypeList, - Optional: true, - Description: "Optional arguments to pass to the source during execution.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "file": { - Type: schema.TypeList, - Optional: true, - Description: "A remote or local file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSchema(), - }, - - "output_file_path": { - Type: schema.TypeString, - Optional: true, - Description: "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.", - }, - - "script": { - Type: schema.TypeString, - Optional: true, - Description: "An inline script. The size of the script is limited to 1024 characters.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_insecure": { - Type: schema.TypeBool, - Optional: true, - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - - "gcs": { - Type: schema.TypeList, - Optional: true, - Description: "A Cloud Storage object.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSchema(), - }, - - "local_path": { - Type: schema.TypeString, - Optional: true, - Description: "A local path within the VM to use.", - }, - - "remote": { - Type: schema.TypeList, - Optional: true, - Description: "A generic remote file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "Required. Bucket of the Cloud Storage object.", - }, - - "object": { - Type: schema.TypeString, - Required: true, - Description: "Required. Name of the Cloud Storage object.", - }, - - "generation": { - Type: schema.TypeInt, - Optional: true, - Description: "Generation number of the Cloud Storage object.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - - "sha256_checksum": { - Type: schema.TypeString, - Optional: true, - Description: "SHA256 checksum of the remote file.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "interpreter": { - Type: schema.TypeString, - Required: true, - Description: "Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL", - }, - - "args": { - Type: schema.TypeList, - Optional: true, - Description: "Optional arguments to pass to the source during execution.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "file": { - Type: schema.TypeList, - Optional: true, - Description: "A remote or local file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSchema(), - }, - - "output_file_path": { - Type: schema.TypeString, - Optional: true, - Description: "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.", - }, - - "script": { - Type: schema.TypeString, - Optional: true, - Description: "An inline script. The size of the script is limited to 1024 characters.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_insecure": { - Type: schema.TypeBool, - Optional: true, - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - - "gcs": { - Type: schema.TypeList, - Optional: true, - Description: "A Cloud Storage object.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSchema(), - }, - - "local_path": { - Type: schema.TypeString, - Optional: true, - Description: "A local path within the VM to use.", - }, - - "remote": { - Type: schema.TypeList, - Optional: true, - Description: "A generic remote file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "Required. Bucket of the Cloud Storage object.", - }, - - "object": { - Type: schema.TypeString, - Required: true, - Description: "Required. Name of the Cloud Storage object.", - }, - - "generation": { - Type: schema.TypeInt, - Optional: true, - Description: "Generation number of the Cloud Storage object.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - - "sha256_checksum": { - Type: schema.TypeString, - Optional: true, - Description: "SHA256 checksum of the remote file.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: "Required. The absolute path of the file within the VM.", - }, - - "state": { - Type: schema.TypeString, - Required: true, - Description: "Required. Desired state of the file. Possible values: OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED, COMPLIANT, NON_COMPLIANT, UNKNOWN, NO_OS_POLICIES_APPLICABLE", - }, - - "content": { - Type: schema.TypeString, - Optional: true, - Description: "A a file with this content. The size of the content is limited to 1024 characters.", - }, - - "file": { - Type: schema.TypeList, - Optional: true, - Description: "A remote or local source.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSchema(), - }, - - "permissions": { - Type: schema.TypeString, - Computed: true, - Description: "Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_insecure": { - Type: schema.TypeBool, - Optional: true, - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - - "gcs": { - Type: schema.TypeList, - Optional: true, - Description: "A Cloud Storage object.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSchema(), - }, - - "local_path": { - Type: schema.TypeString, - Optional: true, - Description: "A local path within the VM to use.", - }, - - "remote": { - Type: schema.TypeList, - Optional: true, - Description: "A generic remote file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "Required. Bucket of the Cloud Storage object.", - }, - - "object": { - Type: schema.TypeString, - Required: true, - Description: "Required. Name of the Cloud Storage object.", - }, - - "generation": { - Type: schema.TypeInt, - Optional: true, - Description: "Generation number of the Cloud Storage object.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - - "sha256_checksum": { - Type: schema.TypeString, - Optional: true, - Description: "SHA256 checksum of the remote file.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "desired_state": { - Type: schema.TypeString, - Required: true, - Description: "Required. The desired state the agent should maintain for this package. Possible values: DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED", - }, - - "apt": { - Type: schema.TypeList, - Optional: true, - Description: "A package managed by Apt.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSchema(), - }, - - "deb": { - Type: schema.TypeList, - Optional: true, - Description: "A deb package file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSchema(), - }, - - "googet": { - Type: schema.TypeList, - Optional: true, - Description: "A package managed by GooGet.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSchema(), - }, - - "msi": { - Type: schema.TypeList, - Optional: true, - Description: "An MSI package.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSchema(), - }, - - "rpm": { - Type: schema.TypeList, - Optional: true, - Description: "An rpm package file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSchema(), - }, - - "yum": { - Type: schema.TypeList, - Optional: true, - Description: "A package managed by YUM.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSchema(), - }, - - "zypper": { - Type: schema.TypeList, - Optional: true, - Description: "A package managed by Zypper.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Required. Package name.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": { - Type: schema.TypeList, - Required: true, - Description: "Required. A deb package.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSchema(), - }, - - "pull_deps": { - Type: schema.TypeBool, - Optional: true, - Description: "Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_insecure": { - Type: schema.TypeBool, - Optional: true, - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - - "gcs": { - Type: schema.TypeList, - Optional: true, - Description: "A Cloud Storage object.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSchema(), - }, - - "local_path": { - Type: schema.TypeString, - Optional: true, - Description: "A local path within the VM to use.", - }, - - "remote": { - Type: schema.TypeList, - Optional: true, - Description: "A generic remote file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "Required. Bucket of the Cloud Storage object.", - }, - - "object": { - Type: schema.TypeString, - Required: true, - Description: "Required. Name of the Cloud Storage object.", - }, - - "generation": { - Type: schema.TypeInt, - Optional: true, - Description: "Generation number of the Cloud Storage object.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - - "sha256_checksum": { - Type: schema.TypeString, - Optional: true, - Description: "SHA256 checksum of the remote file.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Required. Package name.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": { - Type: schema.TypeList, - Required: true, - Description: "Required. The MSI package.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSchema(), - }, - - "properties": { - Type: schema.TypeList, - Optional: true, - Description: "Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_insecure": { - Type: schema.TypeBool, - Optional: true, - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - - "gcs": { - Type: schema.TypeList, - Optional: true, - Description: "A Cloud Storage object.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSchema(), - }, - - "local_path": { - Type: schema.TypeString, - Optional: true, - Description: "A local path within the VM to use.", - }, - - "remote": { - Type: schema.TypeList, - Optional: true, - Description: "A generic remote file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "Required. Bucket of the Cloud Storage object.", - }, - - "object": { - Type: schema.TypeString, - Required: true, - Description: "Required. Name of the Cloud Storage object.", - }, - - "generation": { - Type: schema.TypeInt, - Optional: true, - Description: "Generation number of the Cloud Storage object.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - - "sha256_checksum": { - Type: schema.TypeString, - Optional: true, - Description: "SHA256 checksum of the remote file.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": { - Type: schema.TypeList, - Required: true, - Description: "Required. An rpm package.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSchema(), - }, - - "pull_deps": { - Type: schema.TypeBool, - Optional: true, - Description: "Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allow_insecure": { - Type: schema.TypeBool, - Optional: true, - Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", - }, - - "gcs": { - Type: schema.TypeList, - Optional: true, - Description: "A Cloud Storage object.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSchema(), - }, - - "local_path": { - Type: schema.TypeString, - Optional: true, - Description: "A local path within the VM to use.", - }, - - "remote": { - Type: schema.TypeList, - Optional: true, - Description: "A generic remote file.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - Description: "Required. Bucket of the Cloud Storage object.", - }, - - "object": { - Type: schema.TypeString, - Required: true, - Description: "Required. Name of the Cloud Storage object.", - }, - - "generation": { - Type: schema.TypeInt, - Optional: true, - Description: "Generation number of the Cloud Storage object.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "uri": { - Type: schema.TypeString, - Required: true, - Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", - }, - - "sha256_checksum": { - Type: schema.TypeString, - Optional: true, - Description: "SHA256 checksum of the remote file.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Required. Package name.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Required. Package name.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "apt": { - Type: schema.TypeList, - Optional: true, - Description: "An Apt Repository.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSchema(), - }, - - "goo": { - Type: schema.TypeList, - Optional: true, - Description: "A Goo Repository.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSchema(), - }, - - "yum": { - Type: schema.TypeList, - Optional: true, - Description: "A Yum Repository.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSchema(), - }, - - "zypper": { - Type: schema.TypeList, - Optional: true, - Description: "A Zypper Repository.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSchema(), - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "archive_type": { - Type: schema.TypeString, - Required: true, - Description: "Required. Type of archive files in this repository. Possible values: ARCHIVE_TYPE_UNSPECIFIED, DEB, DEB_SRC", - }, - - "components": { - Type: schema.TypeList, - Required: true, - Description: "Required. List of components for this repository. Must contain at least one item.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "distribution": { - Type: schema.TypeString, - Required: true, - Description: "Required. Distribution of this repository.", - }, - - "uri": { - Type: schema.TypeString, - Required: true, - Description: "Required. URI for this repository.", - }, - - "gpg_key": { - Type: schema.TypeString, - Optional: true, - Description: "URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "Required. The name of the repository.", - }, - - "url": { - Type: schema.TypeString, - Required: true, - Description: "Required. The url of the repository.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "base_url": { - Type: schema.TypeString, - Required: true, - Description: "Required. The location of the repository directory.", - }, - - "id": { - Type: schema.TypeString, - Required: true, - Description: "Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.", - }, - - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: "The display name of the repository.", - }, - - "gpg_keys": { - Type: schema.TypeList, - Optional: true, - Description: "URIs of GPG keys.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "base_url": { - Type: schema.TypeString, - Required: true, - Description: "Required. The location of the repository directory.", - }, - - "id": { - Type: schema.TypeString, - Required: true, - Description: "Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.", - }, - - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: "The display name of the repository.", - }, - - "gpg_keys": { - Type: schema.TypeList, - Optional: true, - Description: "URIs of GPG keys.", - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "os_short_name": { - Type: schema.TypeString, - Required: true, - Description: "Required. The OS short name", - }, - - "os_version": { - Type: schema.TypeString, - Optional: true, - Description: "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentRolloutSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "disruption_budget": { - Type: schema.TypeList, - Required: true, - Description: "Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.", - MaxItems: 1, - Elem: OsConfigOsPolicyAssignmentRolloutDisruptionBudgetSchema(), - }, - - "min_wait_duration": { - Type: schema.TypeString, - Required: true, - Description: "Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.", - }, - }, - } -} - -func OsConfigOsPolicyAssignmentRolloutDisruptionBudgetSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fixed": { - Type: schema.TypeInt, - Optional: true, - Description: "Specifies a fixed value.", - }, - - "percent": { - Type: schema.TypeInt, - Optional: true, - Description: "Specifies the relative value defined as a percentage, which will be multiplied by a reference value.", - }, - }, - } -} - -func resourceOsConfigOsPolicyAssignmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &osconfig.OSPolicyAssignment{ - InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(d.Get("instance_filter")), - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(d.Get("os_policies")), - Rollout: expandOsConfigOsPolicyAssignmentRollout(d.Get("rollout")), - Description: dcl.String(d.Get("description").(string)), - Project: dcl.String(project), - SkipAwaitRollout: dcl.Bool(d.Get("skip_await_rollout").(bool)), - } - - id, err := obj.ID() - if err != nil { - return fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLOsConfigClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyOSPolicyAssignment(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error creating OSPolicyAssignment: %s", err) - } - - log.Printf("[DEBUG] Finished creating OSPolicyAssignment %q: %#v", d.Id(), res) - - return resourceOsConfigOsPolicyAssignmentRead(d, meta) -} - -func resourceOsConfigOsPolicyAssignmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &osconfig.OSPolicyAssignment{ - InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(d.Get("instance_filter")), - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(d.Get("os_policies")), - Rollout: expandOsConfigOsPolicyAssignmentRollout(d.Get("rollout")), - Description: dcl.String(d.Get("description").(string)), - Project: dcl.String(project), - SkipAwaitRollout: dcl.Bool(d.Get("skip_await_rollout").(bool)), - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLOsConfigClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.GetOSPolicyAssignment(context.Background(), obj) - if err != nil { - resourceName := fmt.Sprintf("OsConfigOsPolicyAssignment %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("instance_filter", flattenOsConfigOsPolicyAssignmentInstanceFilter(res.InstanceFilter)); err != nil { - return fmt.Errorf("error setting instance_filter in state: %s", err) - } - if err = d.Set("location", res.Location); err != nil { - return fmt.Errorf("error setting location in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("os_policies", flattenOsConfigOsPolicyAssignmentOSPoliciesArray(res.OSPolicies)); err != nil { - return fmt.Errorf("error setting os_policies in state: %s", err) - } - if err = d.Set("rollout", flattenOsConfigOsPolicyAssignmentRollout(res.Rollout)); err != nil { - return fmt.Errorf("error setting rollout in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("skip_await_rollout", res.SkipAwaitRollout); err != nil { - return fmt.Errorf("error setting skip_await_rollout in state: %s", err) - } - if err = d.Set("baseline", res.Baseline); err != nil { - return fmt.Errorf("error setting baseline in state: %s", err) - } - if err = d.Set("deleted", res.Deleted); err != nil { - return fmt.Errorf("error setting deleted in state: %s", err) - } - if err = d.Set("etag", res.Etag); err != nil { - return fmt.Errorf("error setting etag in state: %s", err) - } - if err = d.Set("reconciling", res.Reconciling); err != nil { - return fmt.Errorf("error setting reconciling in state: %s", err) - } - if err = d.Set("revision_create_time", res.RevisionCreateTime); err != nil { - return fmt.Errorf("error setting revision_create_time in state: %s", err) - } - if err = d.Set("revision_id", res.RevisionId); err != nil { - return fmt.Errorf("error setting revision_id in state: %s", err) - } - if err = d.Set("rollout_state", res.RolloutState); err != nil { - return fmt.Errorf("error setting rollout_state in state: %s", err) - } - if err = d.Set("uid", res.Uid); err != nil { - return fmt.Errorf("error setting uid in state: %s", err) - } - - return nil -} -func resourceOsConfigOsPolicyAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &osconfig.OSPolicyAssignment{ - InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(d.Get("instance_filter")), - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(d.Get("os_policies")), - Rollout: expandOsConfigOsPolicyAssignmentRollout(d.Get("rollout")), - Description: dcl.String(d.Get("description").(string)), - Project: dcl.String(project), - SkipAwaitRollout: dcl.Bool(d.Get("skip_await_rollout").(bool)), - } - // Construct state hint from old values - old := &osconfig.OSPolicyAssignment{ - InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(oldValue(d.GetChange("instance_filter"))), - Location: dcl.String(oldValue(d.GetChange("location")).(string)), - Name: dcl.String(oldValue(d.GetChange("name")).(string)), - OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(oldValue(d.GetChange("os_policies"))), - Rollout: expandOsConfigOsPolicyAssignmentRollout(oldValue(d.GetChange("rollout"))), - Description: dcl.String(oldValue(d.GetChange("description")).(string)), - Project: dcl.StringOrNil(oldValue(d.GetChange("project")).(string)), - SkipAwaitRollout: dcl.Bool(oldValue(d.GetChange("skip_await_rollout")).(bool)), - } - directive := UpdateDirective - directive = append(directive, dcl.WithStateHint(old)) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLOsConfigClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - res, err := client.ApplyOSPolicyAssignment(context.Background(), obj, directive...) - - if _, ok := err.(dcl.DiffAfterApplyError); ok { - log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error updating OSPolicyAssignment: %s", err) - } - - log.Printf("[DEBUG] Finished creating OSPolicyAssignment %q: %#v", d.Id(), res) - - return resourceOsConfigOsPolicyAssignmentRead(d, meta) -} - -func resourceOsConfigOsPolicyAssignmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &osconfig.OSPolicyAssignment{ - InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(d.Get("instance_filter")), - Location: dcl.String(d.Get("location").(string)), - Name: dcl.String(d.Get("name").(string)), - OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(d.Get("os_policies")), - Rollout: expandOsConfigOsPolicyAssignmentRollout(d.Get("rollout")), - Description: dcl.String(d.Get("description").(string)), - Project: dcl.String(project), - SkipAwaitRollout: dcl.Bool(d.Get("skip_await_rollout").(bool)), - } - - log.Printf("[DEBUG] Deleting OSPolicyAssignment %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - billingProject := project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLOsConfigClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { - d.SetId("") - return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) - } else { - client.Config.BasePath = bp - } - if err := client.DeleteOSPolicyAssignment(context.Background(), obj); err != nil { - return fmt.Errorf("Error deleting OSPolicyAssignment: %s", err) - } - - log.Printf("[DEBUG] Finished deleting OSPolicyAssignment %q", d.Id()) - return nil -} - -func resourceOsConfigOsPolicyAssignmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/osPolicyAssignments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func expandOsConfigOsPolicyAssignmentInstanceFilter(o interface{}) *osconfig.OSPolicyAssignmentInstanceFilter { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentInstanceFilter - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentInstanceFilter - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentInstanceFilter{ - All: dcl.Bool(obj["all"].(bool)), - ExclusionLabels: expandOsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsArray(obj["exclusion_labels"]), - InclusionLabels: expandOsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsArray(obj["inclusion_labels"]), - Inventories: expandOsConfigOsPolicyAssignmentInstanceFilterInventoriesArray(obj["inventories"]), - } -} - -func flattenOsConfigOsPolicyAssignmentInstanceFilter(obj *osconfig.OSPolicyAssignmentInstanceFilter) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "all": obj.All, - "exclusion_labels": flattenOsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsArray(obj.ExclusionLabels), - "inclusion_labels": flattenOsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsArray(obj.InclusionLabels), - "inventories": flattenOsConfigOsPolicyAssignmentInstanceFilterInventoriesArray(obj.Inventories), - } - - return []interface{}{transformed} - -} -func expandOsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsArray(o interface{}) []osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels { - if o == nil { - return make([]osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels, 0) - } - - items := make([]osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels, 0, len(objs)) - for _, item := range objs { - i := expandOsConfigOsPolicyAssignmentInstanceFilterExclusionLabels(item) - items = append(items, *i) - } - - return items -} - -func expandOsConfigOsPolicyAssignmentInstanceFilterExclusionLabels(o interface{}) *osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentInstanceFilterExclusionLabels - } - - obj := o.(map[string]interface{}) - return &osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels{ - Labels: checkStringMap(obj["labels"]), - } -} - -func flattenOsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsArray(objs []osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenOsConfigOsPolicyAssignmentInstanceFilterExclusionLabels(&item) - items = append(items, i) - } - - return items -} - -func flattenOsConfigOsPolicyAssignmentInstanceFilterExclusionLabels(obj *osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "labels": obj.Labels, - } - - return transformed - -} -func expandOsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsArray(o interface{}) []osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels { - if o == nil { - return make([]osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels, 0) - } - - items := make([]osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels, 0, len(objs)) - for _, item := range objs { - i := expandOsConfigOsPolicyAssignmentInstanceFilterInclusionLabels(item) - items = append(items, *i) - } - - return items -} - -func expandOsConfigOsPolicyAssignmentInstanceFilterInclusionLabels(o interface{}) *osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentInstanceFilterInclusionLabels - } - - obj := o.(map[string]interface{}) - return &osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels{ - Labels: checkStringMap(obj["labels"]), - } -} - -func flattenOsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsArray(objs []osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenOsConfigOsPolicyAssignmentInstanceFilterInclusionLabels(&item) - items = append(items, i) - } - - return items -} - -func flattenOsConfigOsPolicyAssignmentInstanceFilterInclusionLabels(obj *osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "labels": obj.Labels, - } - - return transformed - -} -func expandOsConfigOsPolicyAssignmentInstanceFilterInventoriesArray(o interface{}) []osconfig.OSPolicyAssignmentInstanceFilterInventories { - if o == nil { - return make([]osconfig.OSPolicyAssignmentInstanceFilterInventories, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]osconfig.OSPolicyAssignmentInstanceFilterInventories, 0) - } - - items := make([]osconfig.OSPolicyAssignmentInstanceFilterInventories, 0, len(objs)) - for _, item := range objs { - i := expandOsConfigOsPolicyAssignmentInstanceFilterInventories(item) - items = append(items, *i) - } - - return items -} - -func expandOsConfigOsPolicyAssignmentInstanceFilterInventories(o interface{}) *osconfig.OSPolicyAssignmentInstanceFilterInventories { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentInstanceFilterInventories - } - - obj := o.(map[string]interface{}) - return &osconfig.OSPolicyAssignmentInstanceFilterInventories{ - OSShortName: dcl.String(obj["os_short_name"].(string)), - OSVersion: dcl.String(obj["os_version"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentInstanceFilterInventoriesArray(objs []osconfig.OSPolicyAssignmentInstanceFilterInventories) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenOsConfigOsPolicyAssignmentInstanceFilterInventories(&item) - items = append(items, i) - } - - return items -} - -func flattenOsConfigOsPolicyAssignmentInstanceFilterInventories(obj *osconfig.OSPolicyAssignmentInstanceFilterInventories) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "os_short_name": obj.OSShortName, - "os_version": obj.OSVersion, - } - - return transformed - -} -func expandOsConfigOsPolicyAssignmentOSPoliciesArray(o interface{}) []osconfig.OSPolicyAssignmentOSPolicies { - if o == nil { - return make([]osconfig.OSPolicyAssignmentOSPolicies, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]osconfig.OSPolicyAssignmentOSPolicies, 0) - } - - items := make([]osconfig.OSPolicyAssignmentOSPolicies, 0, len(objs)) - for _, item := range objs { - i := expandOsConfigOsPolicyAssignmentOSPolicies(item) - items = append(items, *i) - } - - return items -} - -func expandOsConfigOsPolicyAssignmentOSPolicies(o interface{}) *osconfig.OSPolicyAssignmentOSPolicies { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPolicies - } - - obj := o.(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPolicies{ - Id: dcl.String(obj["id"].(string)), - Mode: osconfig.OSPolicyAssignmentOSPoliciesModeEnumRef(obj["mode"].(string)), - ResourceGroups: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsArray(obj["resource_groups"]), - AllowNoResourceGroupMatch: dcl.Bool(obj["allow_no_resource_group_match"].(bool)), - Description: dcl.String(obj["description"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesArray(objs []osconfig.OSPolicyAssignmentOSPolicies) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenOsConfigOsPolicyAssignmentOSPolicies(&item) - items = append(items, i) - } - - return items -} - -func flattenOsConfigOsPolicyAssignmentOSPolicies(obj *osconfig.OSPolicyAssignmentOSPolicies) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "id": obj.Id, - "mode": obj.Mode, - "resource_groups": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsArray(obj.ResourceGroups), - "allow_no_resource_group_match": obj.AllowNoResourceGroupMatch, - "description": obj.Description, - } - - return transformed - -} -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsArray(o interface{}) []osconfig.OSPolicyAssignmentOSPoliciesResourceGroups { - if o == nil { - return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroups, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroups, 0) - } - - items := make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroups, 0, len(objs)) - for _, item := range objs { - i := expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroups(item) - items = append(items, *i) - } - - return items -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroups(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroups { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroups - } - - obj := o.(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroups{ - Resources: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesArray(obj["resources"]), - InventoryFilters: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersArray(obj["inventory_filters"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsArray(objs []osconfig.OSPolicyAssignmentOSPoliciesResourceGroups) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroups(&item) - items = append(items, i) - } - - return items -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroups(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroups) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "resources": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesArray(obj.Resources), - "inventory_filters": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersArray(obj.InventoryFilters), - } - - return transformed - -} -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesArray(o interface{}) []osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources { - if o == nil { - return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0) - } - - items := make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0, len(objs)) - for _, item := range objs { - i := expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResources(item) - items = append(items, *i) - } - - return items -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResources(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResources - } - - obj := o.(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources{ - Id: dcl.String(obj["id"].(string)), - Exec: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(obj["exec"]), - File: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(obj["file"]), - Pkg: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(obj["pkg"]), - Repository: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(obj["repository"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesArray(objs []osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResources(&item) - items = append(items, i) - } - - return items -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResources(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "id": obj.Id, - "exec": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(obj.Exec), - "file": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(obj.File), - "pkg": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(obj.Pkg), - "repository": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(obj.Repository), - } - - return transformed - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{ - Validate: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(obj["validate"]), - Enforce: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(obj["enforce"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "validate": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(obj.Validate), - "enforce": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(obj.Enforce), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{ - Interpreter: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumRef(obj["interpreter"].(string)), - Args: expandStringArray(obj["args"]), - File: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(obj["file"]), - OutputFilePath: dcl.String(obj["output_file_path"].(string)), - Script: dcl.String(obj["script"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "interpreter": obj.Interpreter, - "args": obj.Args, - "file": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(obj.File), - "output_file_path": obj.OutputFilePath, - "script": obj.Script, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{ - AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), - Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(obj["gcs"]), - LocalPath: dcl.String(obj["local_path"].(string)), - Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(obj["remote"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allow_insecure": obj.AllowInsecure, - "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(obj.Gcs), - "local_path": obj.LocalPath, - "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(obj.Remote), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{ - Bucket: dcl.String(obj["bucket"].(string)), - Object: dcl.String(obj["object"].(string)), - Generation: dcl.Int64(int64(obj["generation"].(int))), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "bucket": obj.Bucket, - "object": obj.Object, - "generation": obj.Generation, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{ - Uri: dcl.String(obj["uri"].(string)), - Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "uri": obj.Uri, - "sha256_checksum": obj.Sha256Checksum, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{ - Interpreter: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumRef(obj["interpreter"].(string)), - Args: expandStringArray(obj["args"]), - File: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(obj["file"]), - OutputFilePath: dcl.String(obj["output_file_path"].(string)), - Script: dcl.String(obj["script"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "interpreter": obj.Interpreter, - "args": obj.Args, - "file": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(obj.File), - "output_file_path": obj.OutputFilePath, - "script": obj.Script, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{ - AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), - Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(obj["gcs"]), - LocalPath: dcl.String(obj["local_path"].(string)), - Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(obj["remote"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allow_insecure": obj.AllowInsecure, - "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(obj.Gcs), - "local_path": obj.LocalPath, - "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(obj.Remote), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{ - Bucket: dcl.String(obj["bucket"].(string)), - Object: dcl.String(obj["object"].(string)), - Generation: dcl.Int64(int64(obj["generation"].(int))), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "bucket": obj.Bucket, - "object": obj.Object, - "generation": obj.Generation, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{ - Uri: dcl.String(obj["uri"].(string)), - Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "uri": obj.Uri, - "sha256_checksum": obj.Sha256Checksum, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{ - Path: dcl.String(obj["path"].(string)), - State: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumRef(obj["state"].(string)), - Content: dcl.String(obj["content"].(string)), - File: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(obj["file"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "path": obj.Path, - "state": obj.State, - "content": obj.Content, - "file": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(obj.File), - "permissions": obj.Permissions, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{ - AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), - Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(obj["gcs"]), - LocalPath: dcl.String(obj["local_path"].(string)), - Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(obj["remote"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allow_insecure": obj.AllowInsecure, - "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(obj.Gcs), - "local_path": obj.LocalPath, - "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(obj.Remote), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{ - Bucket: dcl.String(obj["bucket"].(string)), - Object: dcl.String(obj["object"].(string)), - Generation: dcl.Int64(int64(obj["generation"].(int))), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "bucket": obj.Bucket, - "object": obj.Object, - "generation": obj.Generation, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{ - Uri: dcl.String(obj["uri"].(string)), - Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "uri": obj.Uri, - "sha256_checksum": obj.Sha256Checksum, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{ - DesiredState: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumRef(obj["desired_state"].(string)), - Apt: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(obj["apt"]), - Deb: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(obj["deb"]), - Googet: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(obj["googet"]), - Msi: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(obj["msi"]), - Rpm: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(obj["rpm"]), - Yum: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(obj["yum"]), - Zypper: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(obj["zypper"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "desired_state": obj.DesiredState, - "apt": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(obj.Apt), - "deb": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(obj.Deb), - "googet": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(obj.Googet), - "msi": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(obj.Msi), - "rpm": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(obj.Rpm), - "yum": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(obj.Yum), - "zypper": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(obj.Zypper), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{ - Name: dcl.String(obj["name"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{ - Source: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(obj["source"]), - PullDeps: dcl.Bool(obj["pull_deps"].(bool)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "source": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(obj.Source), - "pull_deps": obj.PullDeps, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{ - AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), - Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(obj["gcs"]), - LocalPath: dcl.String(obj["local_path"].(string)), - Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(obj["remote"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allow_insecure": obj.AllowInsecure, - "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(obj.Gcs), - "local_path": obj.LocalPath, - "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(obj.Remote), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{ - Bucket: dcl.String(obj["bucket"].(string)), - Object: dcl.String(obj["object"].(string)), - Generation: dcl.Int64(int64(obj["generation"].(int))), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "bucket": obj.Bucket, - "object": obj.Object, - "generation": obj.Generation, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{ - Uri: dcl.String(obj["uri"].(string)), - Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "uri": obj.Uri, - "sha256_checksum": obj.Sha256Checksum, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{ - Name: dcl.String(obj["name"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{ - Source: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(obj["source"]), - Properties: expandStringArray(obj["properties"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "source": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(obj.Source), - "properties": obj.Properties, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{ - AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), - Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(obj["gcs"]), - LocalPath: dcl.String(obj["local_path"].(string)), - Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(obj["remote"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allow_insecure": obj.AllowInsecure, - "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(obj.Gcs), - "local_path": obj.LocalPath, - "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(obj.Remote), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{ - Bucket: dcl.String(obj["bucket"].(string)), - Object: dcl.String(obj["object"].(string)), - Generation: dcl.Int64(int64(obj["generation"].(int))), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "bucket": obj.Bucket, - "object": obj.Object, - "generation": obj.Generation, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{ - Uri: dcl.String(obj["uri"].(string)), - Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "uri": obj.Uri, - "sha256_checksum": obj.Sha256Checksum, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{ - Source: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(obj["source"]), - PullDeps: dcl.Bool(obj["pull_deps"].(bool)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "source": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(obj.Source), - "pull_deps": obj.PullDeps, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{ - AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), - Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(obj["gcs"]), - LocalPath: dcl.String(obj["local_path"].(string)), - Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(obj["remote"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allow_insecure": obj.AllowInsecure, - "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(obj.Gcs), - "local_path": obj.LocalPath, - "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(obj.Remote), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{ - Bucket: dcl.String(obj["bucket"].(string)), - Object: dcl.String(obj["object"].(string)), - Generation: dcl.Int64(int64(obj["generation"].(int))), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "bucket": obj.Bucket, - "object": obj.Object, - "generation": obj.Generation, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{ - Uri: dcl.String(obj["uri"].(string)), - Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "uri": obj.Uri, - "sha256_checksum": obj.Sha256Checksum, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{ - Name: dcl.String(obj["name"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{ - Name: dcl.String(obj["name"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{ - Apt: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(obj["apt"]), - Goo: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(obj["goo"]), - Yum: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(obj["yum"]), - Zypper: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(obj["zypper"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "apt": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(obj.Apt), - "goo": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(obj.Goo), - "yum": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(obj.Yum), - "zypper": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(obj.Zypper), - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{ - ArchiveType: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumRef(obj["archive_type"].(string)), - Components: expandStringArray(obj["components"]), - Distribution: dcl.String(obj["distribution"].(string)), - Uri: dcl.String(obj["uri"].(string)), - GpgKey: dcl.String(obj["gpg_key"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "archive_type": obj.ArchiveType, - "components": obj.Components, - "distribution": obj.Distribution, - "uri": obj.Uri, - "gpg_key": obj.GpgKey, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{ - Name: dcl.String(obj["name"].(string)), - Url: dcl.String(obj["url"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - "url": obj.Url, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{ - BaseUrl: dcl.String(obj["base_url"].(string)), - Id: dcl.String(obj["id"].(string)), - DisplayName: dcl.String(obj["display_name"].(string)), - GpgKeys: expandStringArray(obj["gpg_keys"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "base_url": obj.BaseUrl, - "id": obj.Id, - "display_name": obj.DisplayName, - "gpg_keys": obj.GpgKeys, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{ - BaseUrl: dcl.String(obj["base_url"].(string)), - Id: dcl.String(obj["id"].(string)), - DisplayName: dcl.String(obj["display_name"].(string)), - GpgKeys: expandStringArray(obj["gpg_keys"]), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "base_url": obj.BaseUrl, - "id": obj.Id, - "display_name": obj.DisplayName, - "gpg_keys": obj.GpgKeys, - } - - return []interface{}{transformed} - -} -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersArray(o interface{}) []osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - if o == nil { - return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 || objs[0] == nil { - return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0) - } - - items := make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0, len(objs)) - for _, item := range objs { - i := expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(item) - items = append(items, *i) - } - - return items -} - -func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters - } - - obj := o.(map[string]interface{}) - return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{ - OSShortName: dcl.String(obj["os_short_name"].(string)), - OSVersion: dcl.String(obj["os_version"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersArray(objs []osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(&item) - items = append(items, i) - } - - return items -} - -func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "os_short_name": obj.OSShortName, - "os_version": obj.OSVersion, - } - - return transformed - -} - -func expandOsConfigOsPolicyAssignmentRollout(o interface{}) *osconfig.OSPolicyAssignmentRollout { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentRollout - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentRollout - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentRollout{ - DisruptionBudget: expandOsConfigOsPolicyAssignmentRolloutDisruptionBudget(obj["disruption_budget"]), - MinWaitDuration: dcl.String(obj["min_wait_duration"].(string)), - } -} - -func flattenOsConfigOsPolicyAssignmentRollout(obj *osconfig.OSPolicyAssignmentRollout) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "disruption_budget": flattenOsConfigOsPolicyAssignmentRolloutDisruptionBudget(obj.DisruptionBudget), - "min_wait_duration": obj.MinWaitDuration, - } - - return []interface{}{transformed} - -} - -func expandOsConfigOsPolicyAssignmentRolloutDisruptionBudget(o interface{}) *osconfig.OSPolicyAssignmentRolloutDisruptionBudget { - if o == nil { - return osconfig.EmptyOSPolicyAssignmentRolloutDisruptionBudget - } - objArr := o.([]interface{}) - if len(objArr) == 0 || objArr[0] == nil { - return osconfig.EmptyOSPolicyAssignmentRolloutDisruptionBudget - } - obj := objArr[0].(map[string]interface{}) - return &osconfig.OSPolicyAssignmentRolloutDisruptionBudget{ - Fixed: dcl.Int64(int64(obj["fixed"].(int))), - Percent: dcl.Int64(int64(obj["percent"].(int))), - } -} - -func flattenOsConfigOsPolicyAssignmentRolloutDisruptionBudget(obj *osconfig.OSPolicyAssignmentRolloutDisruptionBudget) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "fixed": obj.Fixed, - "percent": obj.Percent, - } - - return []interface{}{transformed} - -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_login_ssh_public_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_login_ssh_public_key.go deleted file mode 100644 index 80e71d86aa..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_login_ssh_public_key.go +++ /dev/null @@ -1,316 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceOSLoginSSHPublicKey() *schema.Resource { - return &schema.Resource{ - Create: resourceOSLoginSSHPublicKeyCreate, - Read: resourceOSLoginSSHPublicKeyRead, - Update: resourceOSLoginSSHPublicKeyUpdate, - Delete: resourceOSLoginSSHPublicKeyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceOSLoginSSHPublicKeyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Public key text in SSH format, defined by RFC4253 section 6.6.`, - }, - "user": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The user email.`, - }, - "expiration_time_usec": { - Type: schema.TypeString, - Optional: true, - Description: `An expiration time in microseconds since epoch.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The project ID of the Google Cloud Platform project.`, - }, - "fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `The SHA-256 fingerprint of the SSH public key.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceOSLoginSSHPublicKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - keyProp, err := expandOSLoginSSHPublicKeyKey(d.Get("key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("key"); !isEmptyValue(reflect.ValueOf(keyProp)) && (ok || !reflect.DeepEqual(v, keyProp)) { - obj["key"] = keyProp - } - expirationTimeUsecProp, err := expandOSLoginSSHPublicKeyExpirationTimeUsec(d.Get("expiration_time_usec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_time_usec"); !isEmptyValue(reflect.ValueOf(expirationTimeUsecProp)) && (ok || !reflect.DeepEqual(v, expirationTimeUsecProp)) { - obj["expirationTimeUsec"] = expirationTimeUsecProp - } - - url, err := replaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}:importSshPublicKey") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new SSHPublicKey: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // Don't use `getProject()` because we only want to set the project in the URL - // if the user set it explicitly on the resource. - if p, ok := d.GetOk("project"); ok { - url, err = addQueryParams(url, map[string]string{"projectId": p.(string)}) - if err != nil { - return err - } - } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating SSHPublicKey: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - loginProfile, ok := res["loginProfile"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - // `fingerprint` is autogenerated from the api so needs to be set post-create - sshPublicKeys := loginProfile.(map[string]interface{})["sshPublicKeys"] - for _, sshPublicKey := range sshPublicKeys.(map[string]interface{}) { - if sshPublicKey.(map[string]interface{})["key"].(string) == d.Get("key") { - if err := d.Set("fingerprint", sshPublicKey.(map[string]interface{})["fingerprint"].(string)); err != nil { - return fmt.Errorf("Error setting fingerprint: %s", err) - } - break - } - } - - // Store the ID now - id, err = replaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating SSHPublicKey %q: %#v", d.Id(), res) - - return resourceOSLoginSSHPublicKeyRead(d, meta) -} - -func resourceOSLoginSSHPublicKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("OSLoginSSHPublicKey %q", d.Id())) - } - - if err := d.Set("key", flattenOSLoginSSHPublicKeyKey(res["key"], d, config)); err != nil { - return fmt.Errorf("Error reading SSHPublicKey: %s", err) - } - if err := d.Set("expiration_time_usec", flattenOSLoginSSHPublicKeyExpirationTimeUsec(res["expirationTimeUsec"], d, config)); err != nil { - return fmt.Errorf("Error reading SSHPublicKey: %s", err) - } - if err := d.Set("fingerprint", flattenOSLoginSSHPublicKeyFingerprint(res["fingerprint"], d, config)); err != nil { - return fmt.Errorf("Error reading SSHPublicKey: %s", err) - } - - return nil -} - -func resourceOSLoginSSHPublicKeyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - expirationTimeUsecProp, err := expandOSLoginSSHPublicKeyExpirationTimeUsec(d.Get("expiration_time_usec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_time_usec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, expirationTimeUsecProp)) { - obj["expirationTimeUsec"] = expirationTimeUsecProp - } - - url, err := replaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating SSHPublicKey %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("expiration_time_usec") { - updateMask = append(updateMask, "expirationTimeUsec") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating SSHPublicKey %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating SSHPublicKey %q: %#v", d.Id(), res) - } - - return resourceOSLoginSSHPublicKeyRead(d, meta) -} - -func resourceOSLoginSSHPublicKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting SSHPublicKey %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SSHPublicKey") - } - - log.Printf("[DEBUG] Finished deleting SSHPublicKey %q: %#v", d.Id(), res) - return nil -} - -func resourceOSLoginSSHPublicKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "users/(?P[^/]+)/sshPublicKeys/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenOSLoginSSHPublicKeyKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSLoginSSHPublicKeyExpirationTimeUsec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSLoginSSHPublicKeyFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandOSLoginSSHPublicKeyKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSLoginSSHPublicKeyExpirationTimeUsec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_project_access_approval_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_project_access_approval_settings.go deleted file mode 100644 index a019553c53..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_project_access_approval_settings.go +++ /dev/null @@ -1,535 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceAccessApprovalProjectSettings() *schema.Resource { - return &schema.Resource{ - Create: resourceAccessApprovalProjectSettingsCreate, - Read: resourceAccessApprovalProjectSettingsRead, - Update: resourceAccessApprovalProjectSettingsUpdate, - Delete: resourceAccessApprovalProjectSettingsDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAccessApprovalProjectSettingsImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "enrolled_services": { - Type: schema.TypeSet, - Required: true, - Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. -Access requests for the resource given by name against any of these services contained here will be required -to have explicit approval. Enrollment can only be done on an all or nothing basis. - -A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, - Elem: accessapprovalProjectSettingsEnrolledServicesSchema(), - Set: accessApprovalEnrolledServicesHash, - }, - "project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the project of the access approval settings.`, - }, - "active_key_version": { - Type: schema.TypeString, - Optional: true, - Description: `The asymmetric crypto key version to use for signing approval requests. -Empty active_key_version indicates that a Google-managed key should be used for signing. -This property will be ignored if set by an ancestor of the resource, and new non-empty values may not be set.`, - }, - "notification_emails": { - Type: schema.TypeSet, - Computed: true, - Optional: true, - Description: `A list of email addresses to which notifications relating to approval requests should be sent. -Notifications relating to a resource will be sent to all emails in the settings of ancestor -resources of that resource. A maximum of 50 email addresses are allowed.`, - MaxItems: 50, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Deprecated in favor of `project_id`", - Description: `Deprecated in favor of 'project_id'`, - }, - "ancestor_has_active_key_version": { - Type: schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that an ancestor of this Project has set active_key_version.`, - }, - "enrolled_ancestor": { - Type: schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project.`, - }, - "invalid_key_version": { - Type: schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that there is some configuration issue with the active_key_version -configured on this Project (e.g. it doesn't exist or the Access Approval service account doesn't have the -correct permissions on it, etc.) This key version is not necessarily the effective key version at this level, -as key versions are inherited top-down.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the settings. Format is "projects/{project_id}/accessApprovalSettings"`, - }, - }, - UseJSONNumber: true, - } -} - -func accessapprovalProjectSettingsEnrolledServicesSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloud_product": { - Type: schema.TypeString, - Required: true, - Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): - all - appengine.googleapis.com - bigquery.googleapis.com - bigtable.googleapis.com - cloudkms.googleapis.com - compute.googleapis.com - dataflow.googleapis.com - iam.googleapis.com - pubsub.googleapis.com - storage.googleapis.com`, - }, - "enrollment_level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"BLOCK_ALL", ""}), - Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, - Default: "BLOCK_ALL", - }, - }, - } -} - -func resourceAccessApprovalProjectSettingsCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalProjectSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(reflect.ValueOf(notificationEmailsProp)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalProjectSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(reflect.ValueOf(enrolledServicesProp)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - activeKeyVersionProp, err := expandAccessApprovalProjectSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("active_key_version"); !isEmptyValue(reflect.ValueOf(activeKeyVersionProp)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { - obj["activeKeyVersion"] = activeKeyVersionProp - } - projectProp, err := expandAccessApprovalProjectSettingsProject(d.Get("project"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("project"); !isEmptyValue(reflect.ValueOf(projectProp)) && (ok || !reflect.DeepEqual(v, projectProp)) { - obj["project"] = projectProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ProjectSettings: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - if d.HasChange("active_key_version") { - updateMask = append(updateMask, "activeKeyVersion") - } - - if d.HasChange("project") { - updateMask = append(updateMask, "project") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ProjectSettings: %s", err) - } - if err := d.Set("name", flattenAccessApprovalProjectSettingsName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating ProjectSettings %q: %#v", d.Id(), res) - - return resourceAccessApprovalProjectSettingsRead(d, meta) -} - -func resourceAccessApprovalProjectSettingsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessApprovalProjectSettings %q", d.Id())) - } - - if err := d.Set("name", flattenAccessApprovalProjectSettingsName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("notification_emails", flattenAccessApprovalProjectSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("enrolled_services", flattenAccessApprovalProjectSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("enrolled_ancestor", flattenAccessApprovalProjectSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("active_key_version", flattenAccessApprovalProjectSettingsActiveKeyVersion(res["activeKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("ancestor_has_active_key_version", flattenAccessApprovalProjectSettingsAncestorHasActiveKeyVersion(res["ancestorHasActiveKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("invalid_key_version", flattenAccessApprovalProjectSettingsInvalidKeyVersion(res["invalidKeyVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("project", flattenAccessApprovalProjectSettingsProject(res["project"], d, config)); err != nil { - return fmt.Errorf("Error reading ProjectSettings: %s", err) - } - - return nil -} - -func resourceAccessApprovalProjectSettingsUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalProjectSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalProjectSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - activeKeyVersionProp, err := expandAccessApprovalProjectSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("active_key_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { - obj["activeKeyVersion"] = activeKeyVersionProp - } - projectProp, err := expandAccessApprovalProjectSettingsProject(d.Get("project"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("project"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, projectProp)) { - obj["project"] = projectProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ProjectSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - if d.HasChange("active_key_version") { - updateMask = append(updateMask, "activeKeyVersion") - } - - if d.HasChange("project") { - updateMask = append(updateMask, "project") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ProjectSettings %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ProjectSettings %q: %#v", d.Id(), res) - } - - return resourceAccessApprovalProjectSettingsRead(d, meta) -} - -func resourceAccessApprovalProjectSettingsDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["notificationEmails"] = []string{} - obj["enrolledServices"] = []string{} - obj["activeKeyVersion"] = "" - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Emptying ProjectSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - updateMask = append(updateMask, "notificationEmails") - updateMask = append(updateMask, "enrolledServices") - updateMask = append(updateMask, "activeKeyVersion") - - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - res, err := SendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error emptying ProjectSettings %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished emptying ProjectSettings %q: %#v", d.Id(), res) - } - - return nil -} - -func resourceAccessApprovalProjectSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/accessApprovalSettings", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenAccessApprovalProjectSettingsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsNotificationEmails(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenAccessApprovalProjectSettingsEnrolledServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed.Add(map[string]interface{}{ - "cloud_product": flattenAccessApprovalProjectSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), - "enrollment_level": flattenAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), - }) - } - return transformed -} -func flattenAccessApprovalProjectSettingsEnrolledServicesCloudProduct(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsEnrolledAncestor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsActiveKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsAncestorHasActiveKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsInvalidKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsProject(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessApprovalProjectSettingsNotificationEmails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - return v, nil -} - -func expandAccessApprovalProjectSettingsEnrolledServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCloudProduct, err := expandAccessApprovalProjectSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCloudProduct); val.IsValid() && !isEmptyValue(val) { - transformed["cloudProduct"] = transformedCloudProduct - } - - transformedEnrollmentLevel, err := expandAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !isEmptyValue(val) { - transformed["enrollmentLevel"] = transformedEnrollmentLevel - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessApprovalProjectSettingsEnrolledServicesCloudProduct(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalProjectSettingsActiveKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalProjectSettingsProject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_project_service_identity.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_project_service_identity.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_project_service_identity.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_reservation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_reservation.go deleted file mode 100644 index f48be26e9a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_reservation.go +++ /dev/null @@ -1,301 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourcePubsubLiteReservation() *schema.Resource { - return &schema.Resource{ - Create: resourcePubsubLiteReservationCreate, - Read: resourcePubsubLiteReservationRead, - Update: resourcePubsubLiteReservationUpdate, - Delete: resourcePubsubLiteReservationDelete, - - Importer: &schema.ResourceImporter{ - State: resourcePubsubLiteReservationImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the reservation.`, - }, - "throughput_capacity": { - Type: schema.TypeInt, - Required: true, - Description: `The reserved throughput capacity. Every unit of throughput capacity is -equivalent to 1 MiB/s of published messages or 2 MiB/s of subscribed -messages.`, - }, - "region": { - Type: schema.TypeString, - Optional: true, - Description: `The region of the pubsub lite reservation.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubLiteReservationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - throughputCapacityProp, err := expandPubsubLiteReservationThroughputCapacity(d.Get("throughput_capacity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("throughput_capacity"); !isEmptyValue(reflect.ValueOf(throughputCapacityProp)) && (ok || !reflect.DeepEqual(v, throughputCapacityProp)) { - obj["throughputCapacity"] = throughputCapacityProp - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations?reservationId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Reservation: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Reservation: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Reservation %q: %#v", d.Id(), res) - - return resourcePubsubLiteReservationRead(d, meta) -} - -func resourcePubsubLiteReservationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PubsubLiteReservation %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Reservation: %s", err) - } - - if err := d.Set("throughput_capacity", flattenPubsubLiteReservationThroughputCapacity(res["throughputCapacity"], d, config)); err != nil { - return fmt.Errorf("Error reading Reservation: %s", err) - } - - return nil -} - -func resourcePubsubLiteReservationUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - throughputCapacityProp, err := expandPubsubLiteReservationThroughputCapacity(d.Get("throughput_capacity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("throughput_capacity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, throughputCapacityProp)) { - obj["throughputCapacity"] = throughputCapacityProp - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Reservation %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("throughput_capacity") { - updateMask = append(updateMask, "throughputCapacity") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Reservation %q: %#v", d.Id(), res) - } - - return resourcePubsubLiteReservationRead(d, meta) -} - -func resourcePubsubLiteReservationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Reservation %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Reservation") - } - - log.Printf("[DEBUG] Finished deleting Reservation %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubLiteReservationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/reservations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenPubsubLiteReservationThroughputCapacity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func expandPubsubLiteReservationThroughputCapacity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_subscription.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_subscription.go deleted file mode 100644 index 0693491d7b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_subscription.go +++ /dev/null @@ -1,422 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourcePubsubLiteSubscription() *schema.Resource { - return &schema.Resource{ - Create: resourcePubsubLiteSubscriptionCreate, - Read: resourcePubsubLiteSubscriptionRead, - Update: resourcePubsubLiteSubscriptionUpdate, - Delete: resourcePubsubLiteSubscriptionDelete, - - Importer: &schema.ResourceImporter{ - State: resourcePubsubLiteSubscriptionImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the subscription.`, - }, - "topic": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to a Topic resource.`, - }, - "delivery_config": { - Type: schema.TypeList, - Optional: true, - Description: `The settings for this subscription's message delivery.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delivery_requirement": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"DELIVER_IMMEDIATELY", "DELIVER_AFTER_STORED", "DELIVERY_REQUIREMENT_UNSPECIFIED"}), - Description: `When this subscription should send messages to subscribers relative to messages persistence in storage. Possible values: ["DELIVER_IMMEDIATELY", "DELIVER_AFTER_STORED", "DELIVERY_REQUIREMENT_UNSPECIFIED"]`, - }, - }, - }, - }, - "region": { - Type: schema.TypeString, - Optional: true, - Description: `The region of the pubsub lite topic.`, - }, - "zone": { - Type: schema.TypeString, - Optional: true, - Description: `The zone of the pubsub lite topic.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubLiteSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - topicProp, err := expandPubsubLiteSubscriptionTopic(d.Get("topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("topic"); !isEmptyValue(reflect.ValueOf(topicProp)) && (ok || !reflect.DeepEqual(v, topicProp)) { - obj["topic"] = topicProp - } - deliveryConfigProp, err := expandPubsubLiteSubscriptionDeliveryConfig(d.Get("delivery_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("delivery_config"); !isEmptyValue(reflect.ValueOf(deliveryConfigProp)) && (ok || !reflect.DeepEqual(v, deliveryConfigProp)) { - obj["deliveryConfig"] = deliveryConfigProp - } - - obj, err = resourcePubsubLiteSubscriptionEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions?subscriptionId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Subscription: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Subscription: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Subscription %q: %#v", d.Id(), res) - - return resourcePubsubLiteSubscriptionRead(d, meta) -} - -func resourcePubsubLiteSubscriptionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PubsubLiteSubscription %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Subscription: %s", err) - } - - if err := d.Set("topic", flattenPubsubLiteSubscriptionTopic(res["topic"], d, config)); err != nil { - return fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("delivery_config", flattenPubsubLiteSubscriptionDeliveryConfig(res["deliveryConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Subscription: %s", err) - } - - return nil -} - -func resourcePubsubLiteSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - deliveryConfigProp, err := expandPubsubLiteSubscriptionDeliveryConfig(d.Get("delivery_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("delivery_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deliveryConfigProp)) { - obj["deliveryConfig"] = deliveryConfigProp - } - - obj, err = resourcePubsubLiteSubscriptionEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Subscription %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("delivery_config") { - updateMask = append(updateMask, "deliveryConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Subscription %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Subscription %q: %#v", d.Id(), res) - } - - return resourcePubsubLiteSubscriptionRead(d, meta) -} - -func resourcePubsubLiteSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Subscription %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Subscription") - } - - log.Printf("[DEBUG] Finished deleting Subscription %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubLiteSubscriptionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/subscriptions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenPubsubLiteSubscriptionTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenPubsubLiteSubscriptionDeliveryConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["delivery_requirement"] = - flattenPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(original["deliveryRequirement"], d, config) - return []interface{}{transformed} -} -func flattenPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandPubsubLiteSubscriptionTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) - if err != nil { - return "", err - } - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - - if zone == "" { - return nil, fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - topic := d.Get("topic").(string) - - re := regexp.MustCompile(`projects\/(.*)\/locations\/(.*)\/topics\/(.*)`) - match := re.FindStringSubmatch(topic) - if len(match) == 4 { - return topic, nil - } else { - // If no full topic given, we expand it to a full topic on the same project - fullTopic := fmt.Sprintf("projects/%s/locations/%s/topics/%s", project, zone, topic) - if err := d.Set("topic", fullTopic); err != nil { - return nil, fmt.Errorf("Error setting topic: %s", err) - } - return fullTopic, nil - } -} - -func expandPubsubLiteSubscriptionDeliveryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDeliveryRequirement, err := expandPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(original["delivery_requirement"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDeliveryRequirement); val.IsValid() && !isEmptyValue(val) { - transformed["deliveryRequirement"] = transformedDeliveryRequirement - } - - return transformed, nil -} - -func expandPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePubsubLiteSubscriptionEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - - if zone == "" { - return nil, fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - // API Endpoint requires region in the URL. We infer it from the zone. - - region := getRegionFromZone(zone) - - if region == "" { - return nil, fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) - } - - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_topic.go deleted file mode 100644 index c8b2c32bdc..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_lite_topic.go +++ /dev/null @@ -1,675 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourcePubsubLiteTopic() *schema.Resource { - return &schema.Resource{ - Create: resourcePubsubLiteTopicCreate, - Read: resourcePubsubLiteTopicRead, - Update: resourcePubsubLiteTopicUpdate, - Delete: resourcePubsubLiteTopicDelete, - - Importer: &schema.ResourceImporter{ - State: resourcePubsubLiteTopicImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the topic.`, - }, - "partition_config": { - Type: schema.TypeList, - Optional: true, - Description: `The settings for this topic's partitions.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Required: true, - Description: `The number of partitions in the topic. Must be at least 1.`, - }, - "capacity": { - Type: schema.TypeList, - Optional: true, - Description: `The capacity configuration.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "publish_mib_per_sec": { - Type: schema.TypeInt, - Required: true, - Description: `Subscribe throughput capacity per partition in MiB/s. Must be >= 4 and <= 16.`, - }, - "subscribe_mib_per_sec": { - Type: schema.TypeInt, - Required: true, - Description: `Publish throughput capacity per partition in MiB/s. Must be >= 4 and <= 16.`, - }, - }, - }, - }, - }, - }, - }, - "region": { - Type: schema.TypeString, - Optional: true, - Description: `The region of the pubsub lite topic.`, - }, - "reservation_config": { - Type: schema.TypeList, - Optional: true, - Description: `The settings for this topic's Reservation usage.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "throughput_reservation": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Reservation to use for this topic's throughput capacity.`, - }, - }, - }, - }, - "retention_config": { - Type: schema.TypeList, - Optional: true, - Description: `The settings for a topic's message retention.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "per_partition_bytes": { - Type: schema.TypeString, - Required: true, - Description: `The provisioned storage, in bytes, per partition. If the number of bytes stored -in any of the topic's partitions grows beyond this value, older messages will be -dropped to make room for newer ones, regardless of the value of period.`, - }, - "period": { - Type: schema.TypeString, - Optional: true, - Description: `How long a published message is retained. If unset, messages will be retained as -long as the bytes retained for each partition is below perPartitionBytes. A -duration in seconds with up to nine fractional digits, terminated by 's'. -Example: "3.5s".`, - }, - }, - }, - }, - "zone": { - Type: schema.TypeString, - Optional: true, - Description: `The zone of the pubsub lite topic.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubLiteTopicCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - partitionConfigProp, err := expandPubsubLiteTopicPartitionConfig(d.Get("partition_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("partition_config"); !isEmptyValue(reflect.ValueOf(partitionConfigProp)) && (ok || !reflect.DeepEqual(v, partitionConfigProp)) { - obj["partitionConfig"] = partitionConfigProp - } - retentionConfigProp, err := expandPubsubLiteTopicRetentionConfig(d.Get("retention_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retention_config"); !isEmptyValue(reflect.ValueOf(retentionConfigProp)) && (ok || !reflect.DeepEqual(v, retentionConfigProp)) { - obj["retentionConfig"] = retentionConfigProp - } - reservationConfigProp, err := expandPubsubLiteTopicReservationConfig(d.Get("reservation_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reservation_config"); !isEmptyValue(reflect.ValueOf(reservationConfigProp)) && (ok || !reflect.DeepEqual(v, reservationConfigProp)) { - obj["reservationConfig"] = reservationConfigProp - } - - obj, err = resourcePubsubLiteTopicEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics?topicId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Topic: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Topic: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Topic %q: %#v", d.Id(), res) - - return resourcePubsubLiteTopicRead(d, meta) -} - -func resourcePubsubLiteTopicRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PubsubLiteTopic %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - - if err := d.Set("partition_config", flattenPubsubLiteTopicPartitionConfig(res["partitionConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("retention_config", flattenPubsubLiteTopicRetentionConfig(res["retentionConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("reservation_config", flattenPubsubLiteTopicReservationConfig(res["reservationConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - - return nil -} - -func resourcePubsubLiteTopicUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - partitionConfigProp, err := expandPubsubLiteTopicPartitionConfig(d.Get("partition_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("partition_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, partitionConfigProp)) { - obj["partitionConfig"] = partitionConfigProp - } - retentionConfigProp, err := expandPubsubLiteTopicRetentionConfig(d.Get("retention_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retention_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retentionConfigProp)) { - obj["retentionConfig"] = retentionConfigProp - } - reservationConfigProp, err := expandPubsubLiteTopicReservationConfig(d.Get("reservation_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reservation_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, reservationConfigProp)) { - obj["reservationConfig"] = reservationConfigProp - } - - obj, err = resourcePubsubLiteTopicEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Topic %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("partition_config") { - updateMask = append(updateMask, "partitionConfig") - } - - if d.HasChange("retention_config") { - updateMask = append(updateMask, "retentionConfig") - } - - if d.HasChange("reservation_config") { - updateMask = append(updateMask, "reservationConfig") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Topic %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Topic %q: %#v", d.Id(), res) - } - - return resourcePubsubLiteTopicRead(d, meta) -} - -func resourcePubsubLiteTopicDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Topic %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Topic") - } - - log.Printf("[DEBUG] Finished deleting Topic %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubLiteTopicImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/topics/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenPubsubLiteTopicPartitionConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["count"] = - flattenPubsubLiteTopicPartitionConfigCount(original["count"], d, config) - transformed["capacity"] = - flattenPubsubLiteTopicPartitionConfigCapacity(original["capacity"], d, config) - return []interface{}{transformed} -} -func flattenPubsubLiteTopicPartitionConfigCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenPubsubLiteTopicPartitionConfigCapacity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["publish_mib_per_sec"] = - flattenPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(original["publishMibPerSec"], d, config) - transformed["subscribe_mib_per_sec"] = - flattenPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(original["subscribeMibPerSec"], d, config) - return []interface{}{transformed} -} -func flattenPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenPubsubLiteTopicRetentionConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["per_partition_bytes"] = - flattenPubsubLiteTopicRetentionConfigPerPartitionBytes(original["perPartitionBytes"], d, config) - transformed["period"] = - flattenPubsubLiteTopicRetentionConfigPeriod(original["period"], d, config) - return []interface{}{transformed} -} -func flattenPubsubLiteTopicRetentionConfigPerPartitionBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubLiteTopicRetentionConfigPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubLiteTopicReservationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["throughput_reservation"] = - flattenPubsubLiteTopicReservationConfigThroughputReservation(original["throughputReservation"], d, config) - return []interface{}{transformed} -} -func flattenPubsubLiteTopicReservationConfigThroughputReservation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandPubsubLiteTopicPartitionConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCount, err := expandPubsubLiteTopicPartitionConfigCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - transformedCapacity, err := expandPubsubLiteTopicPartitionConfigCapacity(original["capacity"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCapacity); val.IsValid() && !isEmptyValue(val) { - transformed["capacity"] = transformedCapacity - } - - return transformed, nil -} - -func expandPubsubLiteTopicPartitionConfigCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicPartitionConfigCapacity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPublishMibPerSec, err := expandPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(original["publish_mib_per_sec"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPublishMibPerSec); val.IsValid() && !isEmptyValue(val) { - transformed["publishMibPerSec"] = transformedPublishMibPerSec - } - - transformedSubscribeMibPerSec, err := expandPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(original["subscribe_mib_per_sec"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSubscribeMibPerSec); val.IsValid() && !isEmptyValue(val) { - transformed["subscribeMibPerSec"] = transformedSubscribeMibPerSec - } - - return transformed, nil -} - -func expandPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicRetentionConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPerPartitionBytes, err := expandPubsubLiteTopicRetentionConfigPerPartitionBytes(original["per_partition_bytes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPerPartitionBytes); val.IsValid() && !isEmptyValue(val) { - transformed["perPartitionBytes"] = transformedPerPartitionBytes - } - - transformedPeriod, err := expandPubsubLiteTopicRetentionConfigPeriod(original["period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["period"] = transformedPeriod - } - - return transformed, nil -} - -func expandPubsubLiteTopicRetentionConfigPerPartitionBytes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicRetentionConfigPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicReservationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedThroughputReservation, err := expandPubsubLiteTopicReservationConfigThroughputReservation(original["throughput_reservation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedThroughputReservation); val.IsValid() && !isEmptyValue(val) { - transformed["throughputReservation"] = transformedThroughputReservation - } - - return transformed, nil -} - -func expandPubsubLiteTopicReservationConfigThroughputReservation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("reservations", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for throughput_reservation: %s", err) - } - // Custom due to "locations" rather than "regions". - return fmt.Sprintf("projects/%s/locations/%s/reservations/%s", f.Project, f.Region, f.Name), nil -} - -func resourcePubsubLiteTopicEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - - if zone == "" { - return nil, fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - // API Endpoint requires region in the URL. We infer it from the zone. - - region := getRegionFromZone(zone) - - if region == "" { - return nil, fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) - } - - return obj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_schema.go deleted file mode 100644 index 7759ed6140..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_schema.go +++ /dev/null @@ -1,301 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourcePubsubSchema() *schema.Resource { - return &schema.Resource{ - Create: resourcePubsubSchemaCreate, - Read: resourcePubsubSchemaRead, - Delete: resourcePubsubSchemaDelete, - - Importer: &schema.ResourceImporter{ - State: resourcePubsubSchemaImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The ID to use for the schema, which will become the final component of the schema's resource name.`, - }, - "definition": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The definition of the schema. -This should contain a string representing the full definition of the schema -that is a valid schema definition of the type specified in type.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"TYPE_UNSPECIFIED", "PROTOCOL_BUFFER", "AVRO", ""}), - Description: `The type of the schema definition Default value: "TYPE_UNSPECIFIED" Possible values: ["TYPE_UNSPECIFIED", "PROTOCOL_BUFFER", "AVRO"]`, - Default: "TYPE_UNSPECIFIED", - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubSchemaCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - typeProp, err := expandPubsubSchemaType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - definitionProp, err := expandPubsubSchemaDefinition(d.Get("definition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("definition"); !isEmptyValue(reflect.ValueOf(definitionProp)) && (ok || !reflect.DeepEqual(v, definitionProp)) { - obj["definition"] = definitionProp - } - nameProp, err := expandPubsubSchemaName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas?schemaId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Schema: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Schema: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/schemas/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Schema %q: %#v", d.Id(), res) - - return resourcePubsubSchemaRead(d, meta) -} - -func resourcePubsubSchemaPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourcePubsubSchemaRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PubsubSchema %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Schema: %s", err) - } - - if err := d.Set("type", flattenPubsubSchemaType(res["type"], d, config)); err != nil { - return fmt.Errorf("Error reading Schema: %s", err) - } - if err := d.Set("name", flattenPubsubSchemaName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Schema: %s", err) - } - - return nil -} - -func resourcePubsubSchemaDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Schema %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Schema") - } - - err = PollingWaitTime(resourcePubsubSchemaPollRead(d, meta), PollCheckForAbsence, "Deleting Schema", d.Timeout(schema.TimeoutCreate), 10) - if err != nil { - return fmt.Errorf("Error waiting to delete Schema: %s", err) - } - - log.Printf("[DEBUG] Finished deleting Schema %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubSchemaImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/schemas/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/schemas/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenPubsubSchemaType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSchemaName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandPubsubSchemaType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSchemaDefinition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSchemaName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_topic.go deleted file mode 100644 index 39fa874247..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_topic.go +++ /dev/null @@ -1,632 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourcePubsubTopic() *schema.Resource { - return &schema.Resource{ - Create: resourcePubsubTopicCreate, - Read: resourcePubsubTopicRead, - Update: resourcePubsubTopicUpdate, - Delete: resourcePubsubTopicDelete, - - Importer: &schema.ResourceImporter{ - State: resourcePubsubTopicImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the topic.`, - }, - "kms_key_name": { - Type: schema.TypeString, - Optional: true, - Description: `The resource name of the Cloud KMS CryptoKey to be used to protect access -to messages published on this topic. Your project's PubSub service account -('service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com') must have -'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. -The expected format is 'projects/*/locations/*/keyRings/*/cryptoKeys/*'`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this Topic.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "message_retention_duration": { - Type: schema.TypeString, - Optional: true, - Description: `Indicates the minimum duration to retain a message after it is published -to the topic. If this field is set, messages published to the topic in -the last messageRetentionDuration are always available to subscribers. -For instance, it allows any attached subscription to seek to a timestamp -that is up to messageRetentionDuration in the past. If this field is not -set, message retention is controlled by settings on individual subscriptions. -Cannot be more than 31 days or less than 10 minutes.`, - }, - "message_storage_policy": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Policy constraining the set of Google Cloud Platform regions where -messages published to the topic may be stored. If not present, then no -constraints are in effect.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "allowed_persistence_regions": { - Type: schema.TypeList, - Required: true, - Description: `A list of IDs of GCP regions where messages that are published to -the topic may be persisted in storage. Messages published by -publishers running in non-allowed GCP regions (or running outside -of GCP altogether) will be routed for storage in one of the -allowed regions. An empty list means that no regions are allowed, -and is not a valid configuration.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "schema_settings": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Settings for validating messages published against a schema.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "schema": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the schema that messages published should be -validated against. Format is projects/{project}/schemas/{schema}. -The value of this field will be _deleted-schema_ -if the schema has been deleted.`, - }, - "encoding": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"ENCODING_UNSPECIFIED", "JSON", "BINARY", ""}), - Description: `The encoding of messages validated against schema. Default value: "ENCODING_UNSPECIFIED" Possible values: ["ENCODING_UNSPECIFIED", "JSON", "BINARY"]`, - Default: "ENCODING_UNSPECIFIED", - }, - }, - }, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandPubsubTopicName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - kmsKeyNameProp, err := expandPubsubTopicKmsKeyName(d.Get("kms_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kms_key_name"); !isEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { - obj["kmsKeyName"] = kmsKeyNameProp - } - labelsProp, err := expandPubsubTopicLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - messageStoragePolicyProp, err := expandPubsubTopicMessageStoragePolicy(d.Get("message_storage_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_storage_policy"); !isEmptyValue(reflect.ValueOf(messageStoragePolicyProp)) && (ok || !reflect.DeepEqual(v, messageStoragePolicyProp)) { - obj["messageStoragePolicy"] = messageStoragePolicyProp - } - schemaSettingsProp, err := expandPubsubTopicSchemaSettings(d.Get("schema_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schema_settings"); !isEmptyValue(reflect.ValueOf(schemaSettingsProp)) && (ok || !reflect.DeepEqual(v, schemaSettingsProp)) { - obj["schemaSettings"] = schemaSettingsProp - } - messageRetentionDurationProp, err := expandPubsubTopicMessageRetentionDuration(d.Get("message_retention_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_retention_duration"); !isEmptyValue(reflect.ValueOf(messageRetentionDurationProp)) && (ok || !reflect.DeepEqual(v, messageRetentionDurationProp)) { - obj["messageRetentionDuration"] = messageRetentionDurationProp - } - - obj, err = resourcePubsubTopicEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Topic: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), pubsubTopicProjectNotReady) - if err != nil { - return fmt.Errorf("Error creating Topic: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/topics/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourcePubsubTopicPollRead(d, meta), PollCheckForExistence, "Creating Topic", d.Timeout(schema.TimeoutCreate), 1) - if err != nil { - log.Printf("[ERROR] Unable to confirm eventually consistent Topic %q finished updating: %q", d.Id(), err) - } - - log.Printf("[DEBUG] Finished creating Topic %q: %#v", d.Id(), res) - - return resourcePubsubTopicRead(d, meta) -} - -func resourcePubsubTopicPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, pubsubTopicProjectNotReady) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, pubsubTopicProjectNotReady) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PubsubTopic %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - - if err := d.Set("name", flattenPubsubTopicName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("kms_key_name", flattenPubsubTopicKmsKeyName(res["kmsKeyName"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("labels", flattenPubsubTopicLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("message_storage_policy", flattenPubsubTopicMessageStoragePolicy(res["messageStoragePolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("schema_settings", flattenPubsubTopicSchemaSettings(res["schemaSettings"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("message_retention_duration", flattenPubsubTopicMessageRetentionDuration(res["messageRetentionDuration"], d, config)); err != nil { - return fmt.Errorf("Error reading Topic: %s", err) - } - - return nil -} - -func resourcePubsubTopicUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - kmsKeyNameProp, err := expandPubsubTopicKmsKeyName(d.Get("kms_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kms_key_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { - obj["kmsKeyName"] = kmsKeyNameProp - } - labelsProp, err := expandPubsubTopicLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - messageStoragePolicyProp, err := expandPubsubTopicMessageStoragePolicy(d.Get("message_storage_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_storage_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, messageStoragePolicyProp)) { - obj["messageStoragePolicy"] = messageStoragePolicyProp - } - schemaSettingsProp, err := expandPubsubTopicSchemaSettings(d.Get("schema_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schema_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, schemaSettingsProp)) { - obj["schemaSettings"] = schemaSettingsProp - } - messageRetentionDurationProp, err := expandPubsubTopicMessageRetentionDuration(d.Get("message_retention_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_retention_duration"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, messageRetentionDurationProp)) { - obj["messageRetentionDuration"] = messageRetentionDurationProp - } - - obj, err = resourcePubsubTopicUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Topic %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("kms_key_name") { - updateMask = append(updateMask, "kmsKeyName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("message_storage_policy") { - updateMask = append(updateMask, "messageStoragePolicy") - } - - if d.HasChange("schema_settings") { - updateMask = append(updateMask, "schemaSettings") - } - - if d.HasChange("message_retention_duration") { - updateMask = append(updateMask, "messageRetentionDuration") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), pubsubTopicProjectNotReady) - - if err != nil { - return fmt.Errorf("Error updating Topic %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Topic %q: %#v", d.Id(), res) - } - - return resourcePubsubTopicRead(d, meta) -} - -func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Topic %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), pubsubTopicProjectNotReady) - if err != nil { - return handleNotFoundError(err, d, "Topic") - } - - log.Printf("[DEBUG] Finished deleting Topic %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubTopicImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/topics/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/topics/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenPubsubTopicName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenPubsubTopicKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicMessageStoragePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_persistence_regions"] = - flattenPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(original["allowedPersistenceRegions"], d, config) - return []interface{}{transformed} -} -func flattenPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicSchemaSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["schema"] = - flattenPubsubTopicSchemaSettingsSchema(original["schema"], d, config) - transformed["encoding"] = - flattenPubsubTopicSchemaSettingsEncoding(original["encoding"], d, config) - return []interface{}{transformed} -} -func flattenPubsubTopicSchemaSettingsSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicSchemaSettingsEncoding(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicMessageRetentionDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandPubsubTopicName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandPubsubTopicKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubTopicLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandPubsubTopicMessageStoragePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedPersistenceRegions, err := expandPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(original["allowed_persistence_regions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAllowedPersistenceRegions); val.IsValid() && !isEmptyValue(val) { - transformed["allowedPersistenceRegions"] = transformedAllowedPersistenceRegions - } - - return transformed, nil -} - -func expandPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubTopicSchemaSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchema, err := expandPubsubTopicSchemaSettingsSchema(original["schema"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { - transformed["schema"] = transformedSchema - } - - transformedEncoding, err := expandPubsubTopicSchemaSettingsEncoding(original["encoding"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { - transformed["encoding"] = transformedEncoding - } - - return transformed, nil -} - -func expandPubsubTopicSchemaSettingsSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubTopicSchemaSettingsEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubTopicMessageRetentionDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePubsubTopicEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "name") - return obj, nil -} - -func resourcePubsubTopicUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["topic"] = obj - return newObj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_redis_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_redis_instance.go deleted file mode 100644 index 9976b427bb..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_redis_instance.go +++ /dev/null @@ -1,2017 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -// Is the new redis version less than the old one? -func isRedisVersionDecreasing(_ context.Context, old, new, _ interface{}) bool { - return isRedisVersionDecreasingFunc(old, new) -} - -// separate function for unit testing -func isRedisVersionDecreasingFunc(old, new interface{}) bool { - if old == nil || new == nil { - return false - } - re := regexp.MustCompile(`REDIS_(\d+)_(\d+)`) - oldParsed := re.FindSubmatch([]byte(old.(string))) - newParsed := re.FindSubmatch([]byte(new.(string))) - - if oldParsed == nil || newParsed == nil { - return false - } - - oldVersion, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", oldParsed[1], oldParsed[2]), 32) - if err != nil { - return false - } - newVersion, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", newParsed[1], newParsed[2]), 32) - if err != nil { - return false - } - - return newVersion < oldVersion -} - -// returns true if old=new or old='auto' -func secondaryIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - if (strings.ToLower(new) == "auto" && old != "") || old == new { - return true - } - return false -} - -func ResourceRedisInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceRedisInstanceCreate, - Read: resourceRedisInstanceRead, - Update: resourceRedisInstanceUpdate, - Delete: resourceRedisInstanceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceRedisInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: customdiff.All( - customdiff.ForceNewIfChange("redis_version", isRedisVersionDecreasing)), - - Schema: map[string]*schema.Schema{ - "memory_size_gb": { - Type: schema.TypeInt, - Required: true, - Description: `Redis memory size in GiB.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z][a-z0-9-]{0,39}[a-z0-9]$`), - Description: `The ID of the instance or a fully qualified identifier for the instance.`, - }, - "alternative_location_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Only applicable to STANDARD_HA tier which protects the instance -against zonal failures by provisioning it across two zones. -If provided, it must be a different zone from the one provided in -[locationId].`, - }, - "auth_enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `Optional. Indicates whether OSS Redis AUTH is enabled for the -instance. If set to "true" AUTH is enabled on the instance. -Default value is "false" meaning AUTH is disabled.`, - Default: false, - }, - "authorized_network": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full name of the Google Compute Engine network to which the -instance is connected. If left unspecified, the default network -will be used.`, - }, - "connect_mode": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS", ""}), - Description: `The connection mode of the Redis instance. Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"]`, - Default: "DIRECT_PEERING", - }, - "customer_managed_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Optional. The KMS key reference that you want to use to encrypt the data at rest for this Redis -instance. If this is provided, CMEK is enabled.`, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `An arbitrary and optional user-provided name for the instance.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user provided metadata.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "location_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The zone where the instance will be provisioned. If not provided, -the service will choose a zone for the instance. For STANDARD_HA tier, -instances will be created across two zones for protection against -zonal failures. If [alternativeLocationId] is also provided, it must -be different from [locationId].`, - }, - "maintenance_policy": { - Type: schema.TypeList, - Optional: true, - Description: `Maintenance policy for an instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Optional. Description of what this policy is for. -Create/Update methods return INVALID_ARGUMENT if the -length is greater than 512.`, - }, - "weekly_maintenance_window": { - Type: schema.TypeList, - Optional: true, - Description: `Optional. Maintenance window that is applied to resources covered by this policy. -Minimum 1. For the current version, the maximum number -of weekly_window is expected to be one.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), - Description: `Required. The day of week that maintenance updates occur. - -- DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. -- MONDAY: Monday -- TUESDAY: Tuesday -- WEDNESDAY: Wednesday -- THURSDAY: Thursday -- FRIDAY: Friday -- SATURDAY: Saturday -- SUNDAY: Sunday Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "start_time": { - Type: schema.TypeList, - Required: true, - Description: `Required. Start time of the window in UTC time.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 23), - Description: `Hours of day in 24 hour format. Should be from 0 to 23. -An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 59), - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 999999999), - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 60), - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. -An API may allow the value 60 if it allows leap-seconds.`, - }, - }, - }, - }, - "duration": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Duration of the maintenance window. -The current window is fixed at 1 hour. -A duration in seconds with up to nine fractional digits, -terminated by 's'. Example: "3.5s".`, - }, - }, - }, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The time when the policy was created. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond -resolution and up to nine fractional digits.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The time when the policy was last updated. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond -resolution and up to nine fractional digits.`, - }, - }, - }, - }, - "maintenance_schedule": { - Type: schema.TypeList, - Optional: true, - Description: `Upcoming maintenance schedule.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "end_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The end time of any upcoming scheduled maintenance for this instance. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond -resolution and up to nine fractional digits.`, - }, - "schedule_deadline_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The deadline that the maintenance schedule start time -can not go beyond, including reschedule. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond -resolution and up to nine fractional digits.`, - }, - "start_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The start time of any upcoming scheduled maintenance for this instance. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond -resolution and up to nine fractional digits.`, - }, - }, - }, - }, - "persistence_config": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Description: `Persistence configuration for an instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "persistence_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"DISABLED", "RDB"}), - Description: `Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. - -- DISABLED: Persistence is disabled for the instance, and any existing snapshots are deleted. -- RDB: RDB based Persistence is enabled. Possible values: ["DISABLED", "RDB"]`, - }, - "rdb_snapshot_period": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ONE_HOUR", "SIX_HOURS", "TWELVE_HOURS", "TWENTY_FOUR_HOURS", ""}), - Description: `Optional. Available snapshot periods for scheduling. - -- ONE_HOUR: Snapshot every 1 hour. -- SIX_HOURS: Snapshot every 6 hours. -- TWELVE_HOURS: Snapshot every 12 hours. -- TWENTY_FOUR_HOURS: Snapshot every 24 hours. Possible values: ["ONE_HOUR", "SIX_HOURS", "TWELVE_HOURS", "TWENTY_FOUR_HOURS"]`, - }, - "rdb_snapshot_start_time": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Optional. Date and time that the first snapshot was/will be attempted, -and to which future snapshots will be aligned. If not provided, -the current time will be used. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution -and up to nine fractional digits. -Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "rdb_next_snapshot_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The next time that a snapshot attempt is scheduled to occur. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up -to nine fractional digits. -Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - "read_replicas_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateEnum([]string{"READ_REPLICAS_DISABLED", "READ_REPLICAS_ENABLED", ""}), - Description: `Optional. Read replica mode. Can only be specified when trying to create the instance. -If not set, Memorystore Redis backend will default to READ_REPLICAS_DISABLED. -- READ_REPLICAS_DISABLED: If disabled, read endpoint will not be provided and the -instance cannot scale up or down the number of replicas. -- READ_REPLICAS_ENABLED: If enabled, read endpoint will be provided and the instance -can scale up and down the number of replicas. Possible values: ["READ_REPLICAS_DISABLED", "READ_REPLICAS_ENABLED"]`, - }, - "redis_configs": { - Type: schema.TypeMap, - Optional: true, - Description: `Redis configuration parameters, according to http://redis.io/topics/config. -Please check Memorystore documentation for the list of supported parameters: -https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "redis_version": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The version of Redis software. If not provided, latest supported -version will be used. Please check the API documentation linked -at the top for the latest valid values.`, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The name of the Redis region of the instance.`, - }, - "replica_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `Optional. The number of replica nodes. The valid range for the Standard Tier with -read replicas enabled is [1-5] and defaults to 2. If read replicas are not enabled -for a Standard Tier instance, the only valid value is 1 and the default is 1. -The valid value for basic tier is 0 and the default is also 0.`, - }, - "reserved_ip_range": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The CIDR range of internal addresses that are reserved for this -instance. If not provided, the service will choose an unused /29 -block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be -unique and non-overlapping with existing subnets in an authorized -network.`, - }, - "secondary_ip_range": { - Type: schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: secondaryIpDiffSuppress, - Description: `Optional. Additional IP range for node placement. Required when enabling read replicas on -an existing instance. For DIRECT_PEERING mode value must be a CIDR range of size /28, or -"auto". For PRIVATE_SERVICE_ACCESS mode value must be the name of an allocated address -range associated with the private service access connection, or "auto".`, - }, - "tier": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"BASIC", "STANDARD_HA", ""}), - Description: `The service tier of the instance. Must be one of these values: - -- BASIC: standalone instance -- STANDARD_HA: highly available primary/replica instances Default value: "BASIC" Possible values: ["BASIC", "STANDARD_HA"]`, - Default: "BASIC", - }, - "transit_encryption_mode": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"SERVER_AUTHENTICATION", "DISABLED", ""}), - Description: `The TLS mode of the Redis instance, If not provided, TLS is disabled for the instance. - -- SERVER_AUTHENTICATION: Client to Server traffic encryption enabled with server authentication Default value: "DISABLED" Possible values: ["SERVER_AUTHENTICATION", "DISABLED"]`, - Default: "DISABLED", - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time the instance was created in RFC3339 UTC "Zulu" format, -accurate to nanoseconds.`, - }, - "current_location_id": { - Type: schema.TypeString, - Computed: true, - Description: `The current zone where the Redis endpoint is placed. -For Basic Tier instances, this will always be the same as the -[locationId] provided by the user at creation time. For Standard Tier -instances, this can be either [locationId] or [alternativeLocationId] -and can change after a failover event.`, - }, - "host": { - Type: schema.TypeString, - Computed: true, - Description: `Hostname or IP address of the exposed Redis endpoint used by clients -to connect to the service.`, - }, - "nodes": { - Type: schema.TypeList, - Computed: true, - Description: `Output only. Info per node.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - Description: `Node identifying string. e.g. 'node-0', 'node-1'`, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Description: `Location of the node.`, - }, - }, - }, - }, - "persistence_iam_identity": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Cloud IAM identity used by import / export operations -to transfer data to/from Cloud Storage. Format is "serviceAccount:". -The value may change over time for a given instance so should be -checked before each import/export operation.`, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - Description: `The port number of the exposed Redis endpoint.`, - }, - "read_endpoint": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Hostname or IP address of the exposed readonly Redis endpoint. Standard tier only. -Targets all healthy replica nodes in instance. Replication is asynchronous and replica nodes -will exhibit some lag behind the primary. Write requests must target 'host'.`, - }, - "read_endpoint_port": { - Type: schema.TypeInt, - Computed: true, - Description: `Output only. The port number of the exposed readonly redis endpoint. Standard tier only. -Write requests should target 'port'.`, - }, - "server_ca_certs": { - Type: schema.TypeList, - Computed: true, - Description: `List of server CA certificates for the instance.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cert": { - Type: schema.TypeString, - Computed: true, - Description: `The certificate data in PEM format.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the certificate was created.`, - }, - "expire_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time when the certificate expires.`, - }, - "serial_number": { - Type: schema.TypeString, - Computed: true, - Description: `Serial number, as extracted from the certificate.`, - }, - "sha1_fingerprint": { - Type: schema.TypeString, - Computed: true, - Description: `Sha1 Fingerprint of the certificate.`, - }, - }, - }, - }, - "auth_string": { - Type: schema.TypeString, - Description: "AUTH String set on the instance. This field will only be populated if auth_enabled is true.", - Computed: true, - Sensitive: true, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceRedisInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - alternativeLocationIdProp, err := expandRedisInstanceAlternativeLocationId(d.Get("alternative_location_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("alternative_location_id"); !isEmptyValue(reflect.ValueOf(alternativeLocationIdProp)) && (ok || !reflect.DeepEqual(v, alternativeLocationIdProp)) { - obj["alternativeLocationId"] = alternativeLocationIdProp - } - authEnabledProp, err := expandRedisInstanceAuthEnabled(d.Get("auth_enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auth_enabled"); !isEmptyValue(reflect.ValueOf(authEnabledProp)) && (ok || !reflect.DeepEqual(v, authEnabledProp)) { - obj["authEnabled"] = authEnabledProp - } - authorizedNetworkProp, err := expandRedisInstanceAuthorizedNetwork(d.Get("authorized_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(reflect.ValueOf(authorizedNetworkProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { - obj["authorizedNetwork"] = authorizedNetworkProp - } - connectModeProp, err := expandRedisInstanceConnectMode(d.Get("connect_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connect_mode"); !isEmptyValue(reflect.ValueOf(connectModeProp)) && (ok || !reflect.DeepEqual(v, connectModeProp)) { - obj["connectMode"] = connectModeProp - } - displayNameProp, err := expandRedisInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandRedisInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - redisConfigsProp, err := expandRedisInstanceRedisConfigs(d.Get("redis_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redis_configs"); !isEmptyValue(reflect.ValueOf(redisConfigsProp)) && (ok || !reflect.DeepEqual(v, redisConfigsProp)) { - obj["redisConfigs"] = redisConfigsProp - } - locationIdProp, err := expandRedisInstanceLocationId(d.Get("location_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location_id"); !isEmptyValue(reflect.ValueOf(locationIdProp)) && (ok || !reflect.DeepEqual(v, locationIdProp)) { - obj["locationId"] = locationIdProp - } - nameProp, err := expandRedisInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - persistenceConfigProp, err := expandRedisInstancePersistenceConfig(d.Get("persistence_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("persistence_config"); !isEmptyValue(reflect.ValueOf(persistenceConfigProp)) && (ok || !reflect.DeepEqual(v, persistenceConfigProp)) { - obj["persistenceConfig"] = persistenceConfigProp - } - maintenancePolicyProp, err := expandRedisInstanceMaintenancePolicy(d.Get("maintenance_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_policy"); !isEmptyValue(reflect.ValueOf(maintenancePolicyProp)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { - obj["maintenancePolicy"] = maintenancePolicyProp - } - maintenanceScheduleProp, err := expandRedisInstanceMaintenanceSchedule(d.Get("maintenance_schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_schedule"); !isEmptyValue(reflect.ValueOf(maintenanceScheduleProp)) && (ok || !reflect.DeepEqual(v, maintenanceScheduleProp)) { - obj["maintenanceSchedule"] = maintenanceScheduleProp - } - memorySizeGbProp, err := expandRedisInstanceMemorySizeGb(d.Get("memory_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("memory_size_gb"); !isEmptyValue(reflect.ValueOf(memorySizeGbProp)) && (ok || !reflect.DeepEqual(v, memorySizeGbProp)) { - obj["memorySizeGb"] = memorySizeGbProp - } - redisVersionProp, err := expandRedisInstanceRedisVersion(d.Get("redis_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redis_version"); !isEmptyValue(reflect.ValueOf(redisVersionProp)) && (ok || !reflect.DeepEqual(v, redisVersionProp)) { - obj["redisVersion"] = redisVersionProp - } - reservedIpRangeProp, err := expandRedisInstanceReservedIpRange(d.Get("reserved_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reserved_ip_range"); !isEmptyValue(reflect.ValueOf(reservedIpRangeProp)) && (ok || !reflect.DeepEqual(v, reservedIpRangeProp)) { - obj["reservedIpRange"] = reservedIpRangeProp - } - tierProp, err := expandRedisInstanceTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - transitEncryptionModeProp, err := expandRedisInstanceTransitEncryptionMode(d.Get("transit_encryption_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transit_encryption_mode"); !isEmptyValue(reflect.ValueOf(transitEncryptionModeProp)) && (ok || !reflect.DeepEqual(v, transitEncryptionModeProp)) { - obj["transitEncryptionMode"] = transitEncryptionModeProp - } - replicaCountProp, err := expandRedisInstanceReplicaCount(d.Get("replica_count"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("replica_count"); !isEmptyValue(reflect.ValueOf(replicaCountProp)) && (ok || !reflect.DeepEqual(v, replicaCountProp)) { - obj["replicaCount"] = replicaCountProp - } - readReplicasModeProp, err := expandRedisInstanceReadReplicasMode(d.Get("read_replicas_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("read_replicas_mode"); !isEmptyValue(reflect.ValueOf(readReplicasModeProp)) && (ok || !reflect.DeepEqual(v, readReplicasModeProp)) { - obj["readReplicasMode"] = readReplicasModeProp - } - secondaryIpRangeProp, err := expandRedisInstanceSecondaryIpRange(d.Get("secondary_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secondary_ip_range"); !isEmptyValue(reflect.ValueOf(secondaryIpRangeProp)) && (ok || !reflect.DeepEqual(v, secondaryIpRangeProp)) { - obj["secondaryIpRange"] = secondaryIpRangeProp - } - customerManagedKeyProp, err := expandRedisInstanceCustomerManagedKey(d.Get("customer_managed_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("customer_managed_key"); !isEmptyValue(reflect.ValueOf(customerManagedKeyProp)) && (ok || !reflect.DeepEqual(v, customerManagedKeyProp)) { - obj["customerManagedKey"] = customerManagedKeyProp - } - - obj, err = resourceRedisInstanceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Instance: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = RedisOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Instance: %s", err) - } - - opRes, err = resourceRedisInstanceDecoder(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenRedisInstanceName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceRedisInstanceRead(d, meta) -} - -func resourceRedisInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("RedisInstance %q", d.Id())) - } - - res, err = resourceRedisInstanceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing RedisInstance because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("alternative_location_id", flattenRedisInstanceAlternativeLocationId(res["alternativeLocationId"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("auth_enabled", flattenRedisInstanceAuthEnabled(res["authEnabled"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("authorized_network", flattenRedisInstanceAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("connect_mode", flattenRedisInstanceConnectMode(res["connectMode"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("create_time", flattenRedisInstanceCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("current_location_id", flattenRedisInstanceCurrentLocationId(res["currentLocationId"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("display_name", flattenRedisInstanceDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("host", flattenRedisInstanceHost(res["host"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenRedisInstanceLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("redis_configs", flattenRedisInstanceRedisConfigs(res["redisConfigs"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("location_id", flattenRedisInstanceLocationId(res["locationId"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("name", flattenRedisInstanceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("persistence_config", flattenRedisInstancePersistenceConfig(res["persistenceConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("maintenance_policy", flattenRedisInstanceMaintenancePolicy(res["maintenancePolicy"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("maintenance_schedule", flattenRedisInstanceMaintenanceSchedule(res["maintenanceSchedule"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("memory_size_gb", flattenRedisInstanceMemorySizeGb(res["memorySizeGb"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("port", flattenRedisInstancePort(res["port"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("persistence_iam_identity", flattenRedisInstancePersistenceIamIdentity(res["persistenceIamIdentity"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("redis_version", flattenRedisInstanceRedisVersion(res["redisVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("tier", flattenRedisInstanceTier(res["tier"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("transit_encryption_mode", flattenRedisInstanceTransitEncryptionMode(res["transitEncryptionMode"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("server_ca_certs", flattenRedisInstanceServerCaCerts(res["serverCaCerts"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("replica_count", flattenRedisInstanceReplicaCount(res["replicaCount"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("nodes", flattenRedisInstanceNodes(res["nodes"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("read_endpoint", flattenRedisInstanceReadEndpoint(res["readEndpoint"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("read_endpoint_port", flattenRedisInstanceReadEndpointPort(res["readEndpointPort"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("read_replicas_mode", flattenRedisInstanceReadReplicasMode(res["readReplicasMode"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("secondary_ip_range", flattenRedisInstanceSecondaryIpRange(res["secondaryIpRange"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("customer_managed_key", flattenRedisInstanceCustomerManagedKey(res["customerManagedKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceRedisInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - authEnabledProp, err := expandRedisInstanceAuthEnabled(d.Get("auth_enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auth_enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authEnabledProp)) { - obj["authEnabled"] = authEnabledProp - } - displayNameProp, err := expandRedisInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandRedisInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - redisConfigsProp, err := expandRedisInstanceRedisConfigs(d.Get("redis_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redis_configs"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, redisConfigsProp)) { - obj["redisConfigs"] = redisConfigsProp - } - persistenceConfigProp, err := expandRedisInstancePersistenceConfig(d.Get("persistence_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("persistence_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, persistenceConfigProp)) { - obj["persistenceConfig"] = persistenceConfigProp - } - maintenancePolicyProp, err := expandRedisInstanceMaintenancePolicy(d.Get("maintenance_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { - obj["maintenancePolicy"] = maintenancePolicyProp - } - maintenanceScheduleProp, err := expandRedisInstanceMaintenanceSchedule(d.Get("maintenance_schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_schedule"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenanceScheduleProp)) { - obj["maintenanceSchedule"] = maintenanceScheduleProp - } - memorySizeGbProp, err := expandRedisInstanceMemorySizeGb(d.Get("memory_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("memory_size_gb"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, memorySizeGbProp)) { - obj["memorySizeGb"] = memorySizeGbProp - } - replicaCountProp, err := expandRedisInstanceReplicaCount(d.Get("replica_count"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("replica_count"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, replicaCountProp)) { - obj["replicaCount"] = replicaCountProp - } - readReplicasModeProp, err := expandRedisInstanceReadReplicasMode(d.Get("read_replicas_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("read_replicas_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, readReplicasModeProp)) { - obj["readReplicasMode"] = readReplicasModeProp - } - secondaryIpRangeProp, err := expandRedisInstanceSecondaryIpRange(d.Get("secondary_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secondary_ip_range"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, secondaryIpRangeProp)) { - obj["secondaryIpRange"] = secondaryIpRangeProp - } - - obj, err = resourceRedisInstanceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("auth_enabled") { - updateMask = append(updateMask, "authEnabled") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("redis_configs") { - updateMask = append(updateMask, "redisConfigs") - } - - if d.HasChange("persistence_config") { - updateMask = append(updateMask, "persistenceConfig") - } - - if d.HasChange("maintenance_policy") { - updateMask = append(updateMask, "maintenancePolicy") - } - - if d.HasChange("maintenance_schedule") { - updateMask = append(updateMask, "maintenanceSchedule") - } - - if d.HasChange("memory_size_gb") { - updateMask = append(updateMask, "memorySizeGb") - } - - if d.HasChange("replica_count") { - updateMask = append(updateMask, "replicaCount") - } - - if d.HasChange("read_replicas_mode") { - updateMask = append(updateMask, "readReplicasMode") - } - - if d.HasChange("secondary_ip_range") { - updateMask = append(updateMask, "secondaryIpRange") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - // if updateMask is empty we are not updating anything so skip the post - if len(updateMask) > 0 { - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = RedisOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - } - d.Partial(true) - - if d.HasChange("redis_version") { - obj := make(map[string]interface{}) - - redisVersionProp, err := expandRedisInstanceRedisVersion(d.Get("redis_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redis_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, redisVersionProp)) { - obj["redisVersion"] = redisVersionProp - } - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}:upgrade") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = RedisOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceRedisInstanceRead(d, meta) -} - -func resourceRedisInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = RedisOperationWaitTime( - config, res, project, "Deleting Instance", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceRedisInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenRedisInstanceAlternativeLocationId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceAuthEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceAuthorizedNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceConnectMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceCurrentLocationId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceRedisConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceLocationId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenRedisInstancePersistenceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["persistence_mode"] = - flattenRedisInstancePersistenceConfigPersistenceMode(original["persistenceMode"], d, config) - transformed["rdb_snapshot_period"] = - flattenRedisInstancePersistenceConfigRdbSnapshotPeriod(original["rdbSnapshotPeriod"], d, config) - transformed["rdb_next_snapshot_time"] = - flattenRedisInstancePersistenceConfigRdbNextSnapshotTime(original["rdbNextSnapshotTime"], d, config) - transformed["rdb_snapshot_start_time"] = - flattenRedisInstancePersistenceConfigRdbSnapshotStartTime(original["rdbSnapshotStartTime"], d, config) - return []interface{}{transformed} -} -func flattenRedisInstancePersistenceConfigPersistenceMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstancePersistenceConfigRdbSnapshotPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstancePersistenceConfigRdbNextSnapshotTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstancePersistenceConfigRdbSnapshotStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMaintenancePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["create_time"] = - flattenRedisInstanceMaintenancePolicyCreateTime(original["createTime"], d, config) - transformed["update_time"] = - flattenRedisInstanceMaintenancePolicyUpdateTime(original["updateTime"], d, config) - transformed["description"] = - flattenRedisInstanceMaintenancePolicyDescription(original["description"], d, config) - transformed["weekly_maintenance_window"] = - flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindow(original["weeklyMaintenanceWindow"], d, config) - return []interface{}{transformed} -} -func flattenRedisInstanceMaintenancePolicyCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMaintenancePolicyUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMaintenancePolicyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "day": flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(original["day"], d, config), - "duration": flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(original["duration"], d, config), - "start_time": flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(original["startTime"], d, config), - }) - } - return transformed -} -func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(original["hours"], d, config) - transformed["minutes"] = - flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(original["nanos"], d, config) - return []interface{}{transformed} -} -func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenRedisInstanceMaintenanceSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["start_time"] = - flattenRedisInstanceMaintenanceScheduleStartTime(original["startTime"], d, config) - transformed["end_time"] = - flattenRedisInstanceMaintenanceScheduleEndTime(original["endTime"], d, config) - transformed["schedule_deadline_time"] = - flattenRedisInstanceMaintenanceScheduleScheduleDeadlineTime(original["scheduleDeadlineTime"], d, config) - return []interface{}{transformed} -} -func flattenRedisInstanceMaintenanceScheduleStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMaintenanceScheduleEndTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMaintenanceScheduleScheduleDeadlineTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceMemorySizeGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenRedisInstancePort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenRedisInstancePersistenceIamIdentity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceRedisVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceTransitEncryptionMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCerts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "serial_number": flattenRedisInstanceServerCaCertsSerialNumber(original["serialNumber"], d, config), - "cert": flattenRedisInstanceServerCaCertsCert(original["cert"], d, config), - "create_time": flattenRedisInstanceServerCaCertsCreateTime(original["createTime"], d, config), - "expire_time": flattenRedisInstanceServerCaCertsExpireTime(original["expireTime"], d, config), - "sha1_fingerprint": flattenRedisInstanceServerCaCertsSha1Fingerprint(original["sha1Fingerprint"], d, config), - }) - } - return transformed -} -func flattenRedisInstanceServerCaCertsSerialNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCertsCert(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCertsCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCertsExpireTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCertsSha1Fingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenRedisInstanceNodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenRedisInstanceNodesId(original["id"], d, config), - "zone": flattenRedisInstanceNodesZone(original["zone"], d, config), - }) - } - return transformed -} -func flattenRedisInstanceNodesId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceNodesZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceReadEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceReadEndpointPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenRedisInstanceReadReplicasMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceSecondaryIpRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceCustomerManagedKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandRedisInstanceAlternativeLocationId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceAuthEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceAuthorizedNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - fv, err := ParseNetworkFieldValue(v.(string), d, config) - if err != nil { - return nil, err - } - return fv.RelativeLink(), nil -} - -func expandRedisInstanceConnectMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandRedisInstanceRedisConfigs(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandRedisInstanceLocationId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") -} - -func expandRedisInstancePersistenceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPersistenceMode, err := expandRedisInstancePersistenceConfigPersistenceMode(original["persistence_mode"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPersistenceMode); val.IsValid() && !isEmptyValue(val) { - transformed["persistenceMode"] = transformedPersistenceMode - } - - transformedRdbSnapshotPeriod, err := expandRedisInstancePersistenceConfigRdbSnapshotPeriod(original["rdb_snapshot_period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRdbSnapshotPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["rdbSnapshotPeriod"] = transformedRdbSnapshotPeriod - } - - transformedRdbNextSnapshotTime, err := expandRedisInstancePersistenceConfigRdbNextSnapshotTime(original["rdb_next_snapshot_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRdbNextSnapshotTime); val.IsValid() && !isEmptyValue(val) { - transformed["rdbNextSnapshotTime"] = transformedRdbNextSnapshotTime - } - - transformedRdbSnapshotStartTime, err := expandRedisInstancePersistenceConfigRdbSnapshotStartTime(original["rdb_snapshot_start_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRdbSnapshotStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["rdbSnapshotStartTime"] = transformedRdbSnapshotStartTime - } - - return transformed, nil -} - -func expandRedisInstancePersistenceConfigPersistenceMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstancePersistenceConfigRdbSnapshotPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstancePersistenceConfigRdbNextSnapshotTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstancePersistenceConfigRdbSnapshotStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCreateTime, err := expandRedisInstanceMaintenancePolicyCreateTime(original["create_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCreateTime); val.IsValid() && !isEmptyValue(val) { - transformed["createTime"] = transformedCreateTime - } - - transformedUpdateTime, err := expandRedisInstanceMaintenancePolicyUpdateTime(original["update_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUpdateTime); val.IsValid() && !isEmptyValue(val) { - transformed["updateTime"] = transformedUpdateTime - } - - transformedDescription, err := expandRedisInstanceMaintenancePolicyDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedWeeklyMaintenanceWindow, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindow(original["weekly_maintenance_window"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedWeeklyMaintenanceWindow); val.IsValid() && !isEmptyValue(val) { - transformed["weeklyMaintenanceWindow"] = transformedWeeklyMaintenanceWindow - } - - return transformed, nil -} - -func expandRedisInstanceMaintenancePolicyCreateTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicyUpdateTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDay, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - transformedDuration, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(original["duration"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { - transformed["duration"] = transformedDuration - } - - transformedStartTime, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else { - transformed["startTime"] = transformedStartTime - } - - req = append(req, transformed) - } - return req, nil -} - -func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenanceSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandRedisInstanceMaintenanceScheduleStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandRedisInstanceMaintenanceScheduleEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - transformedScheduleDeadlineTime, err := expandRedisInstanceMaintenanceScheduleScheduleDeadlineTime(original["schedule_deadline_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedScheduleDeadlineTime); val.IsValid() && !isEmptyValue(val) { - transformed["scheduleDeadlineTime"] = transformedScheduleDeadlineTime - } - - return transformed, nil -} - -func expandRedisInstanceMaintenanceScheduleStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenanceScheduleEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMaintenanceScheduleScheduleDeadlineTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceMemorySizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceRedisVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceTransitEncryptionMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceReplicaCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceReadReplicasMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceSecondaryIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceCustomerManagedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceRedisInstanceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - region, err := getRegionFromSchema("region", "location_id", d, config) - if err != nil { - return nil, err - } - if err := d.Set("region", region); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - return obj, nil -} - -func resourceRedisInstanceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - if v, ok := res["authEnabled"].(bool); ok { - if v { - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}/authString") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for Instance: %s", err) - } - - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return nil, fmt.Errorf("Error reading AuthString: %s", err) - } - - if err := d.Set("auth_string", res["authString"]); err != nil { - return nil, fmt.Errorf("Error reading Instance: %s", err) - } - } - } else { - if err := d.Set("auth_string", ""); err != nil { - return nil, fmt.Errorf("Error reading Instance: %s", err) - } - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_resource_manager_lien.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_resource_manager_lien.go deleted file mode 100644 index 2aaced0817..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_resource_manager_lien.go +++ /dev/null @@ -1,466 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceResourceManagerLien() *schema.Resource { - return &schema.Resource{ - Create: resourceResourceManagerLienCreate, - Read: resourceResourceManagerLienRead, - Delete: resourceResourceManagerLienDelete, - - Importer: &schema.ResourceImporter{ - State: resourceResourceManagerLienImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "origin": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A stable, user-visible/meaningful string identifying the origin -of the Lien, intended to be inspected programmatically. Maximum length of -200 characters.`, - }, - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A reference to the resource this Lien is attached to. -The server will validate the parent against those for which Liens are supported. -Since a variety of objects can have Liens against them, you must provide the type -prefix (e.g. "projects/my-project-name").`, - }, - "reason": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Concise user-visible strings indicating why an action cannot be performed -on a resource. Maximum length of 200 characters.`, - }, - "restrictions": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `The types of operations which should be blocked as a result of this Lien. -Each value should correspond to an IAM permission. The server will validate -the permissions against those for which Liens are supported. An empty -list is meaningless and will be rejected. -e.g. ['resourcemanager.projects.delete']`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time of creation`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `A system-generated unique identifier for this Lien.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceResourceManagerLienCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - reasonProp, err := expandNestedResourceManagerLienReason(d.Get("reason"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reason"); !isEmptyValue(reflect.ValueOf(reasonProp)) && (ok || !reflect.DeepEqual(v, reasonProp)) { - obj["reason"] = reasonProp - } - originProp, err := expandNestedResourceManagerLienOrigin(d.Get("origin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("origin"); !isEmptyValue(reflect.ValueOf(originProp)) && (ok || !reflect.DeepEqual(v, originProp)) { - obj["origin"] = originProp - } - parentProp, err := expandNestedResourceManagerLienParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - restrictionsProp, err := expandNestedResourceManagerLienRestrictions(d.Get("restrictions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("restrictions"); !isEmptyValue(reflect.ValueOf(restrictionsProp)) && (ok || !reflect.DeepEqual(v, restrictionsProp)) { - obj["restrictions"] = restrictionsProp - } - - url, err := replaceVars(d, config, "{{ResourceManagerBasePath}}liens") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Lien: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Lien: %s", err) - } - if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // This resource is unusual - instead of returning an Operation from - // Create, it returns the created object itself. We don't parse - // any of the values there, preferring to centralize that logic in - // Read(). In this resource, Read is also unusual - it requires - // us to know the server-side generated name of the object we're - // trying to fetch, and the only way to know that is to capture - // it here. The following two lines do that. - d.SetId(flattenNestedResourceManagerLienName(res["name"], d, config).(string)) - if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - - log.Printf("[DEBUG] Finished creating Lien %q: %#v", d.Id(), res) - - return resourceResourceManagerLienRead(d, meta) -} - -func resourceResourceManagerLienRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ResourceManagerBasePath}}liens?parent={{parent}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ResourceManagerLien %q", d.Id())) - } - - res, err = flattenNestedResourceManagerLien(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing ResourceManagerLien because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceResourceManagerLienDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing ResourceManagerLien because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("reason", flattenNestedResourceManagerLienReason(res["reason"], d, config)); err != nil { - return fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("origin", flattenNestedResourceManagerLienOrigin(res["origin"], d, config)); err != nil { - return fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("create_time", flattenNestedResourceManagerLienCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("parent", flattenNestedResourceManagerLienParent(res["parent"], d, config)); err != nil { - return fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("restrictions", flattenNestedResourceManagerLienRestrictions(res["restrictions"], d, config)); err != nil { - return fmt.Errorf("Error reading Lien: %s", err) - } - - return nil -} - -func resourceResourceManagerLienDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ResourceManagerBasePath}}liens?parent={{parent}}") - if err != nil { - return err - } - - var obj map[string]interface{} - // log the old URL to make the ineffassign linter happy - // in theory, we should find a way to disable the default URL and not construct - // both, but that's a problem for another day. Today, we cheat. - log.Printf("[DEBUG] replacing URL %q with a custom delete URL", url) - url, err = replaceVars(d, config, "{{ResourceManagerBasePath}}liens/{{name}}") - if err != nil { - return err - } - log.Printf("[DEBUG] Deleting Lien %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Lien") - } - - log.Printf("[DEBUG] Finished deleting Lien %q: %#v", d.Id(), res) - return nil -} - -func resourceResourceManagerLienImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - parent, err := replaceVars(d, config, "projects/{{parent}}") - if err != nil { - return nil, err - } - if err := d.Set("parent", parent); err != nil { - return nil, fmt.Errorf("Error setting parent: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedResourceManagerLienName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenNestedResourceManagerLienReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedResourceManagerLienOrigin(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedResourceManagerLienCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedResourceManagerLienParent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedResourceManagerLienRestrictions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedResourceManagerLienReason(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedResourceManagerLienOrigin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedResourceManagerLienParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedResourceManagerLienRestrictions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedResourceManagerLien(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["liens"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value liens. Actual value: %v", v) - } - - _, item, err := resourceResourceManagerLienFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceResourceManagerLienFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName := d.Get("name") - expectedFlattenedName := flattenNestedResourceManagerLienName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - // Decode list item before comparing. - item, err := resourceResourceManagerLienDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemName := flattenNestedResourceManagerLienName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} -func resourceResourceManagerLienDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // The problem we're trying to solve here is that this property is a Project, - // and there are a lot of ways to specify a Project, including the ID vs - // Number, which is something that we can't address in a diffsuppress. - // Since we can't enforce a particular method of entering the project, - // we're just going to have to use whatever the user entered, whether - // it's project/projectName, project/12345, projectName, or 12345. - // The normal behavior of this method would be 'return res' - and that's - // what we'll fall back to if any of our conditions aren't met. Those - // conditions are: - // 1) if the new or old values contain '/', the prefix of that is 'projects'. - // 2) if either is non-numeric, a project with that ID exists. - // 3) the project IDs represented by both the new and old values are the same. - config := meta.(*Config) - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - new := res["parent"].(string) - old := d.Get("parent").(string) - if strings.HasPrefix(new, "projects/") { - new = strings.Split(new, "/")[1] - } - if strings.HasPrefix(old, "projects/") { - old = strings.Split(old, "/")[1] - } - log.Printf("[DEBUG] Trying to figure out whether to use %s or %s", old, new) - // If there's still a '/' in there, the value must not be a project ID. - if strings.Contains(old, "/") || strings.Contains(new, "/") { - return res, nil - } - // If 'old' isn't entirely numeric, let's assume it's a project ID. - // If it's a project ID - var oldProjId int64 - var newProjId int64 - if oldVal, err := StringToFixed64(old); err == nil { - log.Printf("[DEBUG] The old value was a real number: %d", oldVal) - oldProjId = oldVal - } else { - pOld, err := config.NewResourceManagerClient(userAgent).Projects.Get(old).Do() - if err != nil { - return res, nil - } - oldProjId = pOld.ProjectNumber - } - if newVal, err := StringToFixed64(new); err == nil { - log.Printf("[DEBUG] The new value was a real number: %d", newVal) - newProjId = newVal - } else { - pNew, err := config.NewResourceManagerClient(userAgent).Projects.Get(new).Do() - if err != nil { - return res, nil - } - newProjId = pNew.ProjectNumber - } - if newProjId == oldProjId { - res["parent"] = d.Get("parent") - } - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_runtimeconfig_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_runtimeconfig_config.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_runtimeconfig_config.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_runtimeconfig_variable.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_runtimeconfig_variable.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_runtimeconfig_variable.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_mute_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_mute_config.go deleted file mode 100644 index 86e47616b6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_mute_config.go +++ /dev/null @@ -1,366 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceSecurityCenterMuteConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityCenterMuteConfigCreate, - Read: resourceSecurityCenterMuteConfigRead, - Update: resourceSecurityCenterMuteConfigUpdate, - Delete: resourceSecurityCenterMuteConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSecurityCenterMuteConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "filter": { - Type: schema.TypeString, - Required: true, - Description: `An expression that defines the filter to apply across create/update -events of findings. While creating a filter string, be mindful of -the scope in which the mute configuration is being created. E.g., -If a filter contains project = X but is created under the -project = Y scope, it might not match any findings.`, - }, - "mute_config_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Unique identifier provided by the client within the parent scope.`, - }, - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name of the new mute configs's parent. Its format is -"organizations/[organization_id]", "folders/[folder_id]", or -"projects/[project_id]".`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `A description of the mute config.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time at which the mute config was created. This field is set by -the server and will be ignored if provided on config creation.`, - }, - "most_recent_editor": { - Type: schema.TypeString, - Computed: true, - Description: `Email address of the user who last edited the mute config. This -field is set by the server and will be ignored if provided on -config creation or update.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Name of the mute config. Its format is -organizations/{organization}/muteConfigs/{configId}, -folders/{folder}/muteConfigs/{configId}, -or projects/{project}/muteConfigs/{configId}`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. The most recent time at which the mute config was -updated. This field is set by the server and will be ignored if -provided on config creation or update.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecurityCenterMuteConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterMuteConfigDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - filterProp, err := expandSecurityCenterMuteConfigFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{parent}}/muteConfigs?muteConfigId={{mute_config_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new MuteConfig: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating MuteConfig: %s", err) - } - if err := d.Set("name", flattenSecurityCenterMuteConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating MuteConfig %q: %#v", d.Id(), res) - - return resourceSecurityCenterMuteConfigRead(d, meta) -} - -func resourceSecurityCenterMuteConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SecurityCenterMuteConfig %q", d.Id())) - } - - if err := d.Set("name", flattenSecurityCenterMuteConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading MuteConfig: %s", err) - } - if err := d.Set("description", flattenSecurityCenterMuteConfigDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading MuteConfig: %s", err) - } - if err := d.Set("filter", flattenSecurityCenterMuteConfigFilter(res["filter"], d, config)); err != nil { - return fmt.Errorf("Error reading MuteConfig: %s", err) - } - if err := d.Set("create_time", flattenSecurityCenterMuteConfigCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading MuteConfig: %s", err) - } - if err := d.Set("update_time", flattenSecurityCenterMuteConfigUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading MuteConfig: %s", err) - } - if err := d.Set("most_recent_editor", flattenSecurityCenterMuteConfigMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { - return fmt.Errorf("Error reading MuteConfig: %s", err) - } - - return nil -} - -func resourceSecurityCenterMuteConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterMuteConfigDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - filterProp, err := expandSecurityCenterMuteConfigFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating MuteConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("filter") { - updateMask = append(updateMask, "filter") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating MuteConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating MuteConfig %q: %#v", d.Id(), res) - } - - return resourceSecurityCenterMuteConfigRead(d, meta) -} - -func resourceSecurityCenterMuteConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting MuteConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "MuteConfig") - } - - log.Printf("[DEBUG] Finished deleting MuteConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceSecurityCenterMuteConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - // current import_formats can't import fields with forward slashes in their value - name := d.Get("name").(string) - - matched, err := regexp.MatchString("(organizations|folders|projects)/.+/muteConfigs/.+", name) - if err != nil { - return nil, fmt.Errorf("error validating import name: %s", err) - } - - if !matched { - return nil, fmt.Errorf("error validating import name: %s does not fit naming for muteConfigs. Expected %s", - name, "organizations/{organization}/muteConfigs/{configId}, folders/{folder}/muteConfigs/{configId} or projects/{project}/muteConfigs/{configId}") - } - - if err := d.Set("name", name); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - // mute_config_id and parent are not returned by the API and therefore need to be set manually - stringParts := strings.Split(d.Get("name").(string), "/") - if err := d.Set("mute_config_id", stringParts[3]); err != nil { - return nil, fmt.Errorf("Error setting mute_config_id: %s", err) - } - - if err := d.Set("parent", fmt.Sprintf("%s/%s", stringParts[0], stringParts[1])); err != nil { - return nil, fmt.Errorf("Error setting mute_config_id: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenSecurityCenterMuteConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterMuteConfigDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterMuteConfigFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterMuteConfigCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterMuteConfigUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterMuteConfigMostRecentEditor(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSecurityCenterMuteConfigDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecurityCenterMuteConfigFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_notification_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_notification_config.go deleted file mode 100644 index 66e5e84946..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_notification_config.go +++ /dev/null @@ -1,435 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceSecurityCenterNotificationConfig() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityCenterNotificationConfigCreate, - Read: resourceSecurityCenterNotificationConfigRead, - Update: resourceSecurityCenterNotificationConfigUpdate, - Delete: resourceSecurityCenterNotificationConfigDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSecurityCenterNotificationConfigImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "config_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `This must be unique within the organization.`, - }, - "organization": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The organization whose Cloud Security Command Center the Notification -Config lives in.`, - }, - "pubsub_topic": { - Type: schema.TypeString, - Required: true, - Description: `The Pub/Sub topic to send notifications to. Its format is -"projects/[project_id]/topics/[topic]".`, - }, - "streaming_config": { - Type: schema.TypeList, - Required: true, - Description: `The config for triggering streaming-based notifications.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter": { - Type: schema.TypeString, - Required: true, - Description: `Expression that defines the filter to apply across create/update -events of assets or findings as specified by the event type. The -expression is a list of zero or more restrictions combined via -logical operators AND and OR. Parentheses are supported, and OR -has higher precedence than AND. - -Restrictions have the form and may have -a - character in front of them to indicate negation. The fields -map to those defined in the corresponding resource. - -The supported operators are: - -* = for all value types. -* >, <, >=, <= for integer values. -* :, meaning substring matching, for strings. - -The supported value types are: - -* string literals in quotes. -* integer literals without quotes. -* boolean literals true and false without quotes. - -See -[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) -for information on how to write a filter.`, - }, - }, - }, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 1024), - Description: `The description of the notification config (max of 1024 characters).`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of this notification config, in the format -'organizations/{{organization}}/notificationConfigs/{{config_id}}'.`, - }, - "service_account": { - Type: schema.TypeString, - Computed: true, - Description: `The service account that needs "pubsub.topics.publish" permission to -publish to the Pub/Sub topic.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecurityCenterNotificationConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterNotificationConfigDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - pubsubTopicProp, err := expandSecurityCenterNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_topic"); !isEmptyValue(reflect.ValueOf(pubsubTopicProp)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { - obj["pubsubTopic"] = pubsubTopicProp - } - streamingConfigProp, err := expandSecurityCenterNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("streaming_config"); !isEmptyValue(reflect.ValueOf(streamingConfigProp)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { - obj["streamingConfig"] = streamingConfigProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/notificationConfigs?configId={{config_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new NotificationConfig: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating NotificationConfig: %s", err) - } - if err := d.Set("name", flattenSecurityCenterNotificationConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating NotificationConfig %q: %#v", d.Id(), res) - - return resourceSecurityCenterNotificationConfigRead(d, meta) -} - -func resourceSecurityCenterNotificationConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SecurityCenterNotificationConfig %q", d.Id())) - } - - if err := d.Set("name", flattenSecurityCenterNotificationConfigName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationConfig: %s", err) - } - if err := d.Set("description", flattenSecurityCenterNotificationConfigDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationConfig: %s", err) - } - if err := d.Set("pubsub_topic", flattenSecurityCenterNotificationConfigPubsubTopic(res["pubsubTopic"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationConfig: %s", err) - } - if err := d.Set("service_account", flattenSecurityCenterNotificationConfigServiceAccount(res["serviceAccount"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationConfig: %s", err) - } - if err := d.Set("streaming_config", flattenSecurityCenterNotificationConfigStreamingConfig(res["streamingConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading NotificationConfig: %s", err) - } - - return nil -} - -func resourceSecurityCenterNotificationConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterNotificationConfigDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - pubsubTopicProp, err := expandSecurityCenterNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_topic"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { - obj["pubsubTopic"] = pubsubTopicProp - } - streamingConfigProp, err := expandSecurityCenterNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("streaming_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { - obj["streamingConfig"] = streamingConfigProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating NotificationConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("pubsub_topic") { - updateMask = append(updateMask, "pubsubTopic") - } - - if d.HasChange("streaming_config") { - updateMask = append(updateMask, "streamingConfig.filter") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating NotificationConfig %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating NotificationConfig %q: %#v", d.Id(), res) - } - - return resourceSecurityCenterNotificationConfigRead(d, meta) -} - -func resourceSecurityCenterNotificationConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting NotificationConfig %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NotificationConfig") - } - - log.Printf("[DEBUG] Finished deleting NotificationConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceSecurityCenterNotificationConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 4 { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "organizations/{{organization}}/sources/{{source}}", - ) - } - - if err := d.Set("organization", stringParts[1]); err != nil { - return nil, fmt.Errorf("Error setting organization: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenSecurityCenterNotificationConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterNotificationConfigDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterNotificationConfigServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterNotificationConfigStreamingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["filter"] = - flattenSecurityCenterNotificationConfigStreamingConfigFilter(original["filter"], d, config) - return []interface{}{transformed} -} -func flattenSecurityCenterNotificationConfigStreamingConfigFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSecurityCenterNotificationConfigDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecurityCenterNotificationConfigPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecurityCenterNotificationConfigStreamingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFilter, err := expandSecurityCenterNotificationConfigStreamingConfigFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - return transformed, nil -} - -func expandSecurityCenterNotificationConfigStreamingConfigFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_source.go deleted file mode 100644 index 7c1b21a863..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_scc_source.go +++ /dev/null @@ -1,300 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceSecurityCenterSource() *schema.Resource { - return &schema.Resource{ - Create: resourceSecurityCenterSourceCreate, - Read: resourceSecurityCenterSourceRead, - Update: resourceSecurityCenterSourceUpdate, - Delete: resourceSecurityCenterSourceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSecurityCenterSourceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(`[\p{L}\p{N}]({\p{L}\p{N}_- ]{0,30}[\p{L}\p{N}])?`), - Description: `The source’s display name. A source’s display name must be unique -amongst its siblings, for example, two sources with the same parent -can't share the same display name. The display name must start and end -with a letter or digit, may contain letters, digits, spaces, hyphens, -and underscores, and can be no longer than 32 characters.`, - }, - "organization": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The organization whose Cloud Security Command Center the Source -lives in.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 1024), - Description: `The description of the source (max of 1024 characters).`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of this source, in the format -'organizations/{{organization}}/sources/{{source}}'.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecurityCenterSourceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterSourceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandSecurityCenterSourceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/sources") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Source: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Source: %s", err) - } - if err := d.Set("name", flattenSecurityCenterSourceName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - log.Printf("[DEBUG] Finished creating Source %q: %#v", d.Id(), res) - - return resourceSecurityCenterSourceRead(d, meta) -} - -func resourceSecurityCenterSourceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SecurityCenterSource %q", d.Id())) - } - - if err := d.Set("name", flattenSecurityCenterSourceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Source: %s", err) - } - if err := d.Set("description", flattenSecurityCenterSourceDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Source: %s", err) - } - if err := d.Set("display_name", flattenSecurityCenterSourceDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Source: %s", err) - } - - return nil -} - -func resourceSecurityCenterSourceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterSourceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandSecurityCenterSourceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Source %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Source %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Source %q: %#v", d.Id(), res) - } - - return resourceSecurityCenterSourceRead(d, meta) -} - -func resourceSecurityCenterSourceDelete(d *schema.ResourceData, meta interface{}) error { - log.Printf("[WARNING] SecurityCenter Source resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceSecurityCenterSourceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 4 { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "organizations/{{organization}}/sources/{{source}}", - ) - } - - if err := d.Set("organization", stringParts[1]); err != nil { - return nil, fmt.Errorf("Error setting organization: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -func flattenSecurityCenterSourceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterSourceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterSourceDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSecurityCenterSourceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecurityCenterSourceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_secret_manager_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_secret_manager_secret.go deleted file mode 100644 index 42a494f048..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_secret_manager_secret.go +++ /dev/null @@ -1,815 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceSecretManagerSecret() *schema.Resource { - return &schema.Resource{ - Create: resourceSecretManagerSecretCreate, - Read: resourceSecretManagerSecretRead, - Update: resourceSecretManagerSecretUpdate, - Delete: resourceSecretManagerSecretDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSecretManagerSecretImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "replication": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `The replication policy of the secret data attached to the Secret. It cannot be changed -after the Secret has been created.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "automatic": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The Secret will automatically be replicated without any restrictions.`, - ExactlyOneOf: []string{"replication.0.automatic", "replication.0.user_managed"}, - }, - "user_managed": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The Secret will automatically be replicated without any restrictions.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "replicas": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `The list of Replicas for this Secret. Cannot be empty.`, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "location": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The canonical IDs of the location to replicate data. For example: "us-east1".`, - }, - "customer_managed_encryption": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Customer Managed Encryption for the secret.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Describes the Cloud KMS encryption key that will be used to protect destination secret.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"replication.0.automatic", "replication.0.user_managed"}, - }, - }, - }, - }, - "secret_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `This must be unique within the project.`, - }, - "expire_time": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Timestamp in UTC when the Secret is scheduled to expire. This is always provided on output, regardless of what was sent on input. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `The labels assigned to this Secret. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, -and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - -Label values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, -and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - -No more than 64 labels can be assigned to a given resource. - -An object containing a list of "key": value pairs. Example: -{ "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "rotation": { - Type: schema.TypeList, - Optional: true, - Description: `The rotation time and period for a Secret. At 'next_rotation_time', Secret Manager will send a Pub/Sub notification to the topics configured on the Secret. 'topics' must be set to configure rotation.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "next_rotation_time": { - Type: schema.TypeString, - Optional: true, - Description: `Timestamp in UTC at which the Secret is scheduled to rotate. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - RequiredWith: []string{"rotation.0.rotation_period"}, - }, - "rotation_period": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years). -If rotationPeriod is set, 'next_rotation_time' must be set. 'next_rotation_time' will be advanced by this period when the service automatically sends rotation notifications.`, - }, - }, - }, - RequiredWith: []string{"topics"}, - }, - "topics": { - Type: schema.TypeList, - Optional: true, - Description: `A list of up to 10 Pub/Sub topics to which messages are published when control plane operations are called on the secret or its versions.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: `The resource name of the Pub/Sub topic that will be published to, in the following format: projects/*/topics/*. -For publication to succeed, the Secret Manager Service Agent service account must have pubsub.publisher permissions on the topic.`, - }, - }, - }, - }, - "ttl": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The TTL for the Secret. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time at which the Secret was created.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the Secret. Format: -'projects/{{project}}/secrets/{{secret_id}}'`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecretManagerSecretCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandSecretManagerSecretLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - replicationProp, err := expandSecretManagerSecretReplication(d.Get("replication"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("replication"); !isEmptyValue(reflect.ValueOf(replicationProp)) && (ok || !reflect.DeepEqual(v, replicationProp)) { - obj["replication"] = replicationProp - } - topicsProp, err := expandSecretManagerSecretTopics(d.Get("topics"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("topics"); !isEmptyValue(reflect.ValueOf(topicsProp)) && (ok || !reflect.DeepEqual(v, topicsProp)) { - obj["topics"] = topicsProp - } - expireTimeProp, err := expandSecretManagerSecretExpireTime(d.Get("expire_time"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expire_time"); !isEmptyValue(reflect.ValueOf(expireTimeProp)) && (ok || !reflect.DeepEqual(v, expireTimeProp)) { - obj["expireTime"] = expireTimeProp - } - ttlProp, err := expandSecretManagerSecretTtl(d.Get("ttl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ttl"); !isEmptyValue(reflect.ValueOf(ttlProp)) && (ok || !reflect.DeepEqual(v, ttlProp)) { - obj["ttl"] = ttlProp - } - rotationProp, err := expandSecretManagerSecretRotation(d.Get("rotation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rotation"); !isEmptyValue(reflect.ValueOf(rotationProp)) && (ok || !reflect.DeepEqual(v, rotationProp)) { - obj["rotation"] = rotationProp - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets?secretId={{secret_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Secret: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Secret: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Secret: %s", err) - } - if err := d.Set("name", flattenSecretManagerSecretName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Secret %q: %#v", d.Id(), res) - - return resourceSecretManagerSecretRead(d, meta) -} - -func resourceSecretManagerSecretRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Secret: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SecretManagerSecret %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Secret: %s", err) - } - - if err := d.Set("name", flattenSecretManagerSecretName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("create_time", flattenSecretManagerSecretCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("labels", flattenSecretManagerSecretLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("replication", flattenSecretManagerSecretReplication(res["replication"], d, config)); err != nil { - return fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("topics", flattenSecretManagerSecretTopics(res["topics"], d, config)); err != nil { - return fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("expire_time", flattenSecretManagerSecretExpireTime(res["expireTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("rotation", flattenSecretManagerSecretRotation(res["rotation"], d, config)); err != nil { - return fmt.Errorf("Error reading Secret: %s", err) - } - - return nil -} - -func resourceSecretManagerSecretUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Secret: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandSecretManagerSecretLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - topicsProp, err := expandSecretManagerSecretTopics(d.Get("topics"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("topics"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, topicsProp)) { - obj["topics"] = topicsProp - } - expireTimeProp, err := expandSecretManagerSecretExpireTime(d.Get("expire_time"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expire_time"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, expireTimeProp)) { - obj["expireTime"] = expireTimeProp - } - rotationProp, err := expandSecretManagerSecretRotation(d.Get("rotation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rotation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rotationProp)) { - obj["rotation"] = rotationProp - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Secret %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("topics") { - updateMask = append(updateMask, "topics") - } - - if d.HasChange("expire_time") { - updateMask = append(updateMask, "expireTime") - } - - if d.HasChange("rotation") { - updateMask = append(updateMask, "rotation") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Secret %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Secret %q: %#v", d.Id(), res) - } - - return resourceSecretManagerSecretRead(d, meta) -} - -func resourceSecretManagerSecretDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Secret: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Secret %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Secret") - } - - log.Printf("[DEBUG] Finished deleting Secret %q: %#v", d.Id(), res) - return nil -} - -func resourceSecretManagerSecretImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/secrets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenSecretManagerSecretName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretReplication(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["automatic"] = - flattenSecretManagerSecretReplicationAutomatic(original["automatic"], d, config) - transformed["user_managed"] = - flattenSecretManagerSecretReplicationUserManaged(original["userManaged"], d, config) - return []interface{}{transformed} -} -func flattenSecretManagerSecretReplicationAutomatic(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v != nil -} - -func flattenSecretManagerSecretReplicationUserManaged(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["replicas"] = - flattenSecretManagerSecretReplicationUserManagedReplicas(original["replicas"], d, config) - return []interface{}{transformed} -} -func flattenSecretManagerSecretReplicationUserManagedReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "location": flattenSecretManagerSecretReplicationUserManagedReplicasLocation(original["location"], d, config), - "customer_managed_encryption": flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(original["customerManagedEncryption"], d, config), - }) - } - return transformed -} -func flattenSecretManagerSecretReplicationUserManagedReplicasLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} -func flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretTopics(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenSecretManagerSecretTopicsName(original["name"], d, config), - }) - } - return transformed -} -func flattenSecretManagerSecretTopicsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretExpireTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretRotation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["next_rotation_time"] = - flattenSecretManagerSecretRotationNextRotationTime(original["nextRotationTime"], d, config) - transformed["rotation_period"] = - flattenSecretManagerSecretRotationRotationPeriod(original["rotationPeriod"], d, config) - return []interface{}{transformed} -} -func flattenSecretManagerSecretRotationNextRotationTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretRotationRotationPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSecretManagerSecretLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandSecretManagerSecretReplication(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAutomatic, err := expandSecretManagerSecretReplicationAutomatic(original["automatic"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAutomatic); val.IsValid() && !isEmptyValue(val) { - transformed["automatic"] = transformedAutomatic - } - - transformedUserManaged, err := expandSecretManagerSecretReplicationUserManaged(original["user_managed"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUserManaged); val.IsValid() && !isEmptyValue(val) { - transformed["userManaged"] = transformedUserManaged - } - - return transformed, nil -} - -func expandSecretManagerSecretReplicationAutomatic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || !v.(bool) { - return nil, nil - } - - return struct{}{}, nil -} - -func expandSecretManagerSecretReplicationUserManaged(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedReplicas, err := expandSecretManagerSecretReplicationUserManagedReplicas(original["replicas"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedReplicas); val.IsValid() && !isEmptyValue(val) { - transformed["replicas"] = transformedReplicas - } - - return transformed, nil -} - -func expandSecretManagerSecretReplicationUserManagedReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLocation, err := expandSecretManagerSecretReplicationUserManagedReplicasLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - transformedCustomerManagedEncryption, err := expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(original["customer_managed_encryption"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCustomerManagedEncryption); val.IsValid() && !isEmptyValue(val) { - transformed["customerManagedEncryption"] = transformedCustomerManagedEncryption - } - - req = append(req, transformed) - } - return req, nil -} - -func expandSecretManagerSecretReplicationUserManagedReplicasLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretTopics(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandSecretManagerSecretTopicsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandSecretManagerSecretTopicsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretExpireTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretRotation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNextRotationTime, err := expandSecretManagerSecretRotationNextRotationTime(original["next_rotation_time"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNextRotationTime); val.IsValid() && !isEmptyValue(val) { - transformed["nextRotationTime"] = transformedNextRotationTime - } - - transformedRotationPeriod, err := expandSecretManagerSecretRotationRotationPeriod(original["rotation_period"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedRotationPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["rotationPeriod"] = transformedRotationPeriod - } - - return transformed, nil -} - -func expandSecretManagerSecretRotationNextRotationTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretRotationRotationPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_secret_manager_secret_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_secret_manager_secret_version.go deleted file mode 100644 index 5492d25df9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_secret_manager_secret_version.go +++ /dev/null @@ -1,431 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/base64" - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/googleapi" -) - -func resourceSecretManagerSecretVersionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - _, err := expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } - - return resourceSecretManagerSecretVersionRead(d, meta) -} - -func ResourceSecretManagerSecretVersion() *schema.Resource { - return &schema.Resource{ - Create: resourceSecretManagerSecretVersionCreate, - Read: resourceSecretManagerSecretVersionRead, - Delete: resourceSecretManagerSecretVersionDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSecretManagerSecretVersionImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Update: resourceSecretManagerSecretVersionUpdate, - - Schema: map[string]*schema.Schema{ - "secret_data": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The secret data. Must be no larger than 64KiB.`, - Sensitive: true, - }, - - "secret": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Secret Manager secret resource`, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Description: `The current state of the SecretVersion.`, - Default: true, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time at which the Secret was created.`, - }, - "destroy_time": { - Type: schema.TypeString, - Computed: true, - Description: `The time at which the Secret was destroyed. Only present if state is DESTROYED.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the SecretVersion. Format: -'projects/{{project}}/secrets/{{secret_id}}/versions/{{version}}'`, - }, - "version": { - Type: schema.TypeString, - Computed: true, - Description: `The version of the Secret.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecretManagerSecretVersionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - stateProp, err := expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(stateProp)) && (ok || !reflect.DeepEqual(v, stateProp)) { - obj["state"] = stateProp - } - payloadProp, err := expandSecretManagerSecretVersionPayload(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(payloadProp)) { - obj["payload"] = payloadProp - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{secret}}:addVersion") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new SecretVersion: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating SecretVersion: %s", err) - } - if err := d.Set("name", flattenSecretManagerSecretVersionName(res["name"], d, config)); err != nil { - return fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `name` is autogenerated from the api so needs to be set post-create - name, ok := res["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - if err := d.Set("name", name.(string)); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - _, err = expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished creating SecretVersion %q: %#v", d.Id(), res) - - return resourceSecretManagerSecretVersionRead(d, meta) -} - -func resourceSecretManagerSecretVersionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SecretManagerSecretVersion %q", d.Id())) - } - - res, err = resourceSecretManagerSecretVersionDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing SecretManagerSecretVersion because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("enabled", flattenSecretManagerSecretVersionEnabled(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading SecretVersion: %s", err) - } - if err := d.Set("name", flattenSecretManagerSecretVersionName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading SecretVersion: %s", err) - } - if err := d.Set("version", flattenSecretManagerSecretVersionVersion(res["version"], d, config)); err != nil { - return fmt.Errorf("Error reading SecretVersion: %s", err) - } - if err := d.Set("create_time", flattenSecretManagerSecretVersionCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading SecretVersion: %s", err) - } - if err := d.Set("destroy_time", flattenSecretManagerSecretVersionDestroyTime(res["destroyTime"], d, config)); err != nil { - return fmt.Errorf("Error reading SecretVersion: %s", err) - } - // Terraform must set the top level schema field, but since this object contains collapsed properties - // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. - if flattenedProp := flattenSecretManagerSecretVersionPayload(res["payload"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*googleapi.Error); ok { - return fmt.Errorf("Error reading SecretVersion: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - - return nil -} - -func resourceSecretManagerSecretVersionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{name}}:destroy") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting SecretVersion %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SecretVersion") - } - - log.Printf("[DEBUG] Finished deleting SecretVersion %q: %#v", d.Id(), res) - return nil -} - -func resourceSecretManagerSecretVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - secretRegex := regexp.MustCompile("(projects/.+/secrets/.+)/versions/.+$") - versionRegex := regexp.MustCompile("projects/(.+)/secrets/(.+)/versions/(.+)$") - - parts := secretRegex.FindStringSubmatch(name) - if len(parts) != 2 { - panic(fmt.Sprintf("Version name does not fit the format `projects/{{project}}/secrets/{{secret}}/versions/{{version}}`")) - } - if err := d.Set("secret", parts[1]); err != nil { - return nil, fmt.Errorf("Error setting secret: %s", err) - } - - parts = versionRegex.FindStringSubmatch(name) - - if err := d.Set("version", parts[3]); err != nil { - return nil, fmt.Errorf("Error setting version: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenSecretManagerSecretVersionEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v.(string) == "ENABLED" { - return true - } - - return false -} - -func flattenSecretManagerSecretVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretVersionVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - name := d.Get("name").(string) - secretRegex := regexp.MustCompile("projects/(.+)/secrets/(.+)/versions/(.+)$") - - parts := secretRegex.FindStringSubmatch(name) - if len(parts) != 4 { - panic(fmt.Sprintf("Version name does not fit the format `projects/{{project}}/secrets/{{secret}}/versions/{{version}}`")) - } - - return parts[3] -} - -func flattenSecretManagerSecretVersionCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretVersionDestroyTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretVersionPayload(v interface{}, d *schema.ResourceData, config *Config) interface{} { - transformed := make(map[string]interface{}) - - // if this secret version is disabled, the api will return an error, as the value cannot be accessed, return what we have - if d.Get("enabled").(bool) == false { - transformed["secret_data"] = d.Get("secret_data") - return []interface{}{transformed} - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{name}}:access") - if err != nil { - return err - } - - parts := strings.Split(d.Get("name").(string), "/") - project := parts[1] - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - accessRes, err := SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return err - } - - data, err := base64.StdEncoding.DecodeString(accessRes["payload"].(map[string]interface{})["data"].(string)) - if err != nil { - return err - } - transformed["secret_data"] = string(data) - return []interface{}{transformed} -} - -func expandSecretManagerSecretVersionEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - name := d.Get("name").(string) - if name == "" { - return "", nil - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{name}}") - if err != nil { - return nil, err - } - - if v == true { - url = fmt.Sprintf("%s:enable", url) - } else { - url = fmt.Sprintf("%s:disable", url) - } - - parts := strings.Split(name, "/") - project := parts[1] - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - _, err = SendRequest(config, "POST", project, url, userAgent, nil) - if err != nil { - return nil, err - } - - return nil, nil -} - -func expandSecretManagerSecretVersionPayload(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedSecretData, err := expandSecretManagerSecretVersionPayloadSecretData(d.Get("secret_data"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSecretData); val.IsValid() && !isEmptyValue(val) { - transformed["data"] = transformedSecretData - } - - return transformed, nil -} - -func expandSecretManagerSecretVersionPayloadSecretData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - return base64.StdEncoding.EncodeToString([]byte(v.(string))), nil -} - -func resourceSecretManagerSecretVersionDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v := res["state"]; v == "DESTROYED" { - return nil, nil - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sourcerepo_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sourcerepo_repository.go deleted file mode 100644 index d12b3ea546..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sourcerepo_repository.go +++ /dev/null @@ -1,468 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "bytes" - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func resourceSourceRepoRepositoryPubSubConfigsHash(v interface{}) int { - if v == nil { - return 0 - } - - var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(fmt.Sprintf("%s-", GetResourceNameFromSelfLink(m["topic"].(string)))) - buf.WriteString(fmt.Sprintf("%s-", m["message_format"].(string))) - if v, ok := m["service_account_email"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - - return hashcode(buf.String()) -} - -func ResourceSourceRepoRepository() *schema.Resource { - return &schema.Resource{ - Create: resourceSourceRepoRepositoryCreate, - Read: resourceSourceRepoRepositoryRead, - Update: resourceSourceRepoRepositoryUpdate, - Delete: resourceSourceRepoRepositoryDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSourceRepoRepositoryImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name of the repository, of the form '{{repo}}'. -The repo name may contain slashes. eg, 'name/with/slash'`, - }, - "pubsub_configs": { - Type: schema.TypeSet, - Optional: true, - Description: `How this repository publishes a change in the repository through Cloud Pub/Sub. -Keyed by the topic names.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "topic": { - Type: schema.TypeString, - Required: true, - }, - "message_format": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"PROTOBUF", "JSON"}), - Description: `The format of the Cloud Pub/Sub messages. -- PROTOBUF: The message payload is a serialized protocol buffer of SourceRepoEvent. -- JSON: The message payload is a JSON string of SourceRepoEvent. Possible values: ["PROTOBUF", "JSON"]`, - }, - "service_account_email": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Email address of the service account used for publishing Cloud Pub/Sub messages. -This service account needs to be in the same project as the PubsubConfig. When added, -the caller needs to have iam.serviceAccounts.actAs permission on this service account. -If unspecified, it defaults to the compute engine default service account.`, - }, - }, - }, - Set: resourceSourceRepoRepositoryPubSubConfigsHash, - }, - "size": { - Type: schema.TypeInt, - Computed: true, - Description: `The disk usage of the repo, in bytes.`, - }, - "url": { - Type: schema.TypeString, - Computed: true, - Description: `URL to clone the repository from Google Cloud Source Repositories.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSourceRepoRepositoryCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandSourceRepoRepositoryName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - pubsubConfigsProp, err := expandSourceRepoRepositoryPubsubConfigs(d.Get("pubsub_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_configs"); !isEmptyValue(reflect.ValueOf(pubsubConfigsProp)) && (ok || !reflect.DeepEqual(v, pubsubConfigsProp)) { - obj["pubsubConfigs"] = pubsubConfigsProp - } - - url, err := replaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Repository: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Repository: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if v, ok := d.GetOkExists("pubsub_configs"); !isEmptyValue(reflect.ValueOf(pubsubConfigsProp)) && (ok || !reflect.DeepEqual(v, pubsubConfigsProp)) { - log.Printf("[DEBUG] Calling update after create to patch in pubsub_configs") - // pubsub_configs cannot be added on create - return resourceSourceRepoRepositoryUpdate(d, meta) - } - - log.Printf("[DEBUG] Finished creating Repository %q: %#v", d.Id(), res) - - return resourceSourceRepoRepositoryRead(d, meta) -} - -func resourceSourceRepoRepositoryRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SourceRepoRepository %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - - if err := d.Set("name", flattenSourceRepoRepositoryName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("url", flattenSourceRepoRepositoryUrl(res["url"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("size", flattenSourceRepoRepositorySize(res["size"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("pubsub_configs", flattenSourceRepoRepositoryPubsubConfigs(res["pubsubConfigs"], d, config)); err != nil { - return fmt.Errorf("Error reading Repository: %s", err) - } - - return nil -} - -func resourceSourceRepoRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - pubsubConfigsProp, err := expandSourceRepoRepositoryPubsubConfigs(d.Get("pubsub_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_configs"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubConfigsProp)) { - obj["pubsubConfigs"] = pubsubConfigsProp - } - - obj, err = resourceSourceRepoRepositoryUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Repository %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("pubsub_configs") { - updateMask = append(updateMask, "pubsubConfigs") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Repository %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Repository %q: %#v", d.Id(), res) - } - - return resourceSourceRepoRepositoryRead(d, meta) -} - -func resourceSourceRepoRepositoryDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Repository %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Repository") - } - - log.Printf("[DEBUG] Finished deleting Repository %q: %#v", d.Id(), res) - return nil -} - -func resourceSourceRepoRepositoryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/repos/(?P.+)", - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenSourceRepoRepositoryName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - - // We can't use a standard name_from_self_link because the name can include /'s - parts := strings.SplitAfterN(v.(string), "/", 4) - return parts[3] -} - -func flattenSourceRepoRepositoryUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSourceRepoRepositorySize(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenSourceRepoRepositoryPubsubConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "topic": k, - "message_format": flattenSourceRepoRepositoryPubsubConfigsMessageFormat(original["messageFormat"], d, config), - "service_account_email": flattenSourceRepoRepositoryPubsubConfigsServiceAccountEmail(original["serviceAccountEmail"], d, config), - }) - } - return transformed -} -func flattenSourceRepoRepositoryPubsubConfigsMessageFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSourceRepoRepositoryPubsubConfigsServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSourceRepoRepositoryName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/repos/{{name}}") -} - -func expandSourceRepoRepositoryPubsubConfigs(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMessageFormat, err := expandSourceRepoRepositoryPubsubConfigsMessageFormat(original["message_format"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMessageFormat); val.IsValid() && !isEmptyValue(val) { - transformed["messageFormat"] = transformedMessageFormat - } - - transformedServiceAccountEmail, err := expandSourceRepoRepositoryPubsubConfigsServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountEmail"] = transformedServiceAccountEmail - } - - transformedTopic, err := expandSourceRepoRepositoryPubsubConfigsTopic(original["topic"], d, config) - if err != nil { - return nil, err - } - m[transformedTopic] = transformed - } - return m, nil -} - -func expandSourceRepoRepositoryPubsubConfigsMessageFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSourceRepoRepositoryPubsubConfigsServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceSourceRepoRepositoryUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Add "topic" field using pubsubConfig map key - pubsubConfigsVal := obj["pubsubConfigs"] - if pubsubConfigsVal != nil { - pubsubConfigs := pubsubConfigsVal.(map[string]interface{}) - for key := range pubsubConfigs { - config := pubsubConfigs[key].(map[string]interface{}) - config["topic"] = key - } - } - - // Nest request body in "repo" field - newObj := make(map[string]interface{}) - newObj["repo"] = obj - return newObj, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_spanner_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_spanner_database.go deleted file mode 100644 index e8d78a38a0..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_spanner_database.go +++ /dev/null @@ -1,794 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "regexp" - "strconv" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// customizeDiff func for additional checks on google_spanner_database properties: -func resourceSpannerDBDdlCustomDiffFunc(diff TerraformResourceDiff) error { - old, new := diff.GetChange("ddl") - oldDdls := old.([]interface{}) - newDdls := new.([]interface{}) - var err error - - if len(newDdls) < len(oldDdls) { - err = diff.ForceNew("ddl") - if err != nil { - return fmt.Errorf("ForceNew failed for ddl, old - %v and new - %v", oldDdls, newDdls) - } - return nil - } - - for i := range oldDdls { - if newDdls[i].(string) != oldDdls[i].(string) { - err = diff.ForceNew("ddl") - if err != nil { - return fmt.Errorf("ForceNew failed for ddl, old - %v and new - %v", oldDdls, newDdls) - } - return nil - } - } - return nil -} - -func resourceSpannerDBDdlCustomDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - // separate func to allow unit testing - return resourceSpannerDBDdlCustomDiffFunc(diff) -} - -func validateDatabaseRetentionPeriod(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - valueError := fmt.Errorf("version_retention_period should be in range [1h, 7d], in a format resembling 1d, 24h, 1440m, or 86400s") - - r := regexp.MustCompile("^(\\d{1}d|\\d{1,3}h|\\d{2,5}m|\\d{4,6}s)$") - if !r.MatchString(value) { - errors = append(errors, valueError) - return - } - - unit := value[len(value)-1:] - multiple := value[:len(value)-1] - num, err := strconv.Atoi(multiple) - if err != nil { - errors = append(errors, valueError) - return - } - - if unit == "d" && (num < 1 || num > 7) { - errors = append(errors, valueError) - return - } - if unit == "h" && (num < 1 || num > 7*24) { - errors = append(errors, valueError) - return - } - if unit == "m" && (num < 1*60 || num > 7*24*60) { - errors = append(errors, valueError) - return - } - if unit == "s" && (num < 1*60*60 || num > 7*24*60*60) { - errors = append(errors, valueError) - return - } - - return -} - -func resourceSpannerDBVirtualUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { - // deletion_protection is the only virtual field - if d.HasChange("deletion_protection") { - for field := range resourceSchema { - if field == "deletion_protection" { - continue - } - if d.HasChange(field) { - return false - } - } - return true - } - return false -} - -func ResourceSpannerDatabase() *schema.Resource { - return &schema.Resource{ - Create: resourceSpannerDatabaseCreate, - Read: resourceSpannerDatabaseRead, - Update: resourceSpannerDatabaseUpdate, - Delete: resourceSpannerDatabaseDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSpannerDatabaseImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: resourceSpannerDBDdlCustomDiff, - - Schema: map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The instance to create the database on.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z][a-z0-9_\-]*[a-z0-9]$`), - Description: `A unique identifier for the database, which cannot be changed after -the instance is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].`, - }, - "database_dialect": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"GOOGLE_STANDARD_SQL", "POSTGRESQL", ""}), - Description: `The dialect of the Cloud Spanner Database. -If it is not provided, "GOOGLE_STANDARD_SQL" will be used. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]`, - }, - "ddl": { - Type: schema.TypeList, - Optional: true, - Description: `An optional list of DDL statements to run inside the newly created -database. Statements can create tables, indexes, etc. These statements -execute atomically with the creation of the database: if there is an -error in any statement, the database is not created.`, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "encryption_config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Encryption configuration for the database`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Fully qualified name of the KMS key to use to encrypt this database. This key must exist -in the same location as the Spanner Database.`, - }, - }, - }, - }, - "version_retention_period": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validateDatabaseRetentionPeriod, - Description: `The retention period for the database. The retention period must be between 1 hour -and 7 days, and can be specified in days, hours, minutes, or seconds. For example, -the values 1d, 24h, 1440m, and 86400s are equivalent. Default value is 1h. -If this property is used, you must avoid adding new DDL statements to 'ddl' that -update the database's version_retention_period.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `An explanation of the status of the database.`, - }, - "deletion_protection": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Whether or not to allow Terraform to destroy the instance. Unless this field is set to false -in Terraform state, a 'terraform destroy' or 'terraform apply' that would delete the instance will fail.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSpannerDatabaseCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandSpannerDatabaseName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - versionRetentionPeriodProp, err := expandSpannerDatabaseVersionRetentionPeriod(d.Get("version_retention_period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_retention_period"); !isEmptyValue(reflect.ValueOf(versionRetentionPeriodProp)) && (ok || !reflect.DeepEqual(v, versionRetentionPeriodProp)) { - obj["versionRetentionPeriod"] = versionRetentionPeriodProp - } - extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ddl"); !isEmptyValue(reflect.ValueOf(extraStatementsProp)) && (ok || !reflect.DeepEqual(v, extraStatementsProp)) { - obj["extraStatements"] = extraStatementsProp - } - encryptionConfigProp, err := expandSpannerDatabaseEncryptionConfig(d.Get("encryption_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_config"); !isEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { - obj["encryptionConfig"] = encryptionConfigProp - } - databaseDialectProp, err := expandSpannerDatabaseDatabaseDialect(d.Get("database_dialect"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("database_dialect"); !isEmptyValue(reflect.ValueOf(databaseDialectProp)) && (ok || !reflect.DeepEqual(v, databaseDialectProp)) { - obj["databaseDialect"] = databaseDialectProp - } - instanceProp, err := expandSpannerDatabaseInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(reflect.ValueOf(instanceProp)) && (ok || !reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - - obj, err = resourceSpannerDatabaseEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Database: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Database: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{instance}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = SpannerOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Database", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Database: %s", err) - } - - opRes, err = resourceSpannerDatabaseDecoder(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenSpannerDatabaseName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{instance}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Note: Databases that are created with POSTGRESQL dialect do not support extra DDL - // statements at the time of database creation. To avoid users needing to run - // `terraform apply` twice to get their desired outcome, the provider does not set - // `extraStatements` in the call to the `create` endpoint and all DDL (other than - // ) is run post-create, by calling the `updateDdl` endpoint - - _, ok := opRes["name"] - if !ok { - return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - retention, retentionPeriodOk := d.GetOk("version_retention_period") - retentionPeriod := retention.(string) - ddl, ddlOk := d.GetOk("ddl") - ddlStatements := ddl.([]interface{}) - - if retentionPeriodOk || ddlOk { - - obj := make(map[string]interface{}) - updateDdls := []string{} - - if ddlOk { - for i := 0; i < len(ddlStatements); i++ { - if ddlStatements[i] != nil { - updateDdls = append(updateDdls, ddlStatements[i].(string)) - } - } - } - - if retentionPeriodOk { - dbName := d.Get("name") - retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, retentionPeriod) - if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { - retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, retentionPeriod) - } - updateDdls = append(updateDdls, retentionDdl) - } - - // Skip API call if there are no new ddl entries (due to ignoring nil values) - if len(updateDdls) > 0 { - log.Printf("[DEBUG] Applying extra DDL statements to the new Database: %#v", updateDdls) - - obj["statements"] = updateDdls - - url, err = replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}/ddl") - if err != nil { - return err - } - - res, err = SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error executing DDL statements on Database: %s", err) - } - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = SpannerOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Database", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to run DDL against newly-created Database: %s", err) - } - } - } - - log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) - - return resourceSpannerDatabaseRead(d, meta) -} - -func resourceSpannerDatabaseRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SpannerDatabase %q", d.Id())) - } - - res, err = resourceSpannerDatabaseDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing SpannerDatabase because it no longer exists.") - d.SetId("") - return nil - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("deletion_protection"); !ok { - if err := d.Set("deletion_protection", true); err != nil { - return fmt.Errorf("Error setting deletion_protection: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - - if err := d.Set("name", flattenSpannerDatabaseName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("version_retention_period", flattenSpannerDatabaseVersionRetentionPeriod(res["versionRetentionPeriod"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("state", flattenSpannerDatabaseState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("encryption_config", flattenSpannerDatabaseEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("database_dialect", flattenSpannerDatabaseDatabaseDialect(res["databaseDialect"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("instance", flattenSpannerDatabaseInstance(res["instance"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - - return nil -} - -func resourceSpannerDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("version_retention_period") || d.HasChange("ddl") { - obj := make(map[string]interface{}) - - versionRetentionPeriodProp, err := expandSpannerDatabaseVersionRetentionPeriod(d.Get("version_retention_period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_retention_period"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionRetentionPeriodProp)) { - obj["versionRetentionPeriod"] = versionRetentionPeriodProp - } - extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ddl"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, extraStatementsProp)) { - obj["extraStatements"] = extraStatementsProp - } - - obj, err = resourceSpannerDatabaseUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}/ddl") - if err != nil { - return err - } - - if len(obj["statements"].([]string)) == 0 { - // Return early to avoid making an API call that errors, - // due to containing no DDL SQL statements - return resourceSpannerDatabaseRead(d, meta) - } - - if resourceSpannerDBVirtualUpdate(d, ResourceSpannerDatabase().Schema) { - if d.Get("deletion_protection") != nil { - if err := d.Set("deletion_protection", d.Get("deletion_protection")); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - } - return nil - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Database %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) - } - - err = SpannerOperationWaitTime( - config, res, project, "Updating Database", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceSpannerDatabaseRead(d, meta) -} - -func resourceSpannerDatabaseDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - if d.Get("deletion_protection").(bool) { - return fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`") - } - log.Printf("[DEBUG] Deleting Database %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Database") - } - - err = SpannerOperationWaitTime( - config, res, project, "Deleting Database", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Database %q: %#v", d.Id(), res) - return nil -} - -func resourceSpannerDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)", - "instances/(?P[^/]+)/databases/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{instance}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("deletion_protection", true); err != nil { - return nil, fmt.Errorf("Error setting deletion_protection: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenSpannerDatabaseName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenSpannerDatabaseVersionRetentionPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerDatabaseState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerDatabaseEncryptionConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenSpannerDatabaseEncryptionConfigKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} -func flattenSpannerDatabaseEncryptionConfigKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerDatabaseDatabaseDialect(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerDatabaseInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandSpannerDatabaseName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerDatabaseVersionRetentionPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerDatabaseDdl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerDatabaseEncryptionConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandSpannerDatabaseEncryptionConfigKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandSpannerDatabaseEncryptionConfigKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerDatabaseDatabaseDialect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerDatabaseInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("instances", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for instance: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceSpannerDatabaseEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - obj["createStatement"] = fmt.Sprintf("CREATE DATABASE `%s`", obj["name"]) - if dialect, ok := obj["databaseDialect"]; ok && dialect == "POSTGRESQL" { - obj["createStatement"] = fmt.Sprintf("CREATE DATABASE \"%s\"", obj["name"]) - } - - // Extra DDL statements are removed from the create request and instead applied to the database in - // a post-create action, to accommodate retrictions when creating PostgreSQL-enabled databases. - // https://cloud.google.com/spanner/docs/create-manage-databases#create_a_database - log.Printf("[DEBUG] Preparing to create new Database. Any extra DDL statements will be applied to the Database in a separate API call") - - delete(obj, "name") - delete(obj, "instance") - - delete(obj, "versionRetentionPeriod") - delete(obj, "extraStatements") - return obj, nil -} - -func resourceSpannerDatabaseUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - old, new := d.GetChange("ddl") - oldDdls := old.([]interface{}) - newDdls := new.([]interface{}) - updateDdls := []string{} - - //Only new ddl statments to be add to update call - for i := len(oldDdls); i < len(newDdls); i++ { - if newDdls[i] != nil { - updateDdls = append(updateDdls, newDdls[i].(string)) - } - } - - //Add statement to update version_retention_period property, if needed - if d.HasChange("version_retention_period") { - dbName := d.Get("name") - retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, obj["versionRetentionPeriod"]) - if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { - retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, obj["versionRetentionPeriod"]) - } - updateDdls = append(updateDdls, retentionDdl) - } - - obj["statements"] = updateDdls - delete(obj, "name") - delete(obj, "versionRetentionPeriod") - delete(obj, "instance") - delete(obj, "extraStatements") - return obj, nil -} - -func resourceSpannerDatabaseDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - d.SetId(res["name"].(string)) - if err := parseImportId([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - res["project"] = d.Get("project").(string) - res["instance"] = d.Get("instance").(string) - res["name"] = d.Get("name").(string) - id, err := replaceVars(d, config, "{{instance}}/{{name}}") - if err != nil { - return nil, err - } - d.SetId(id) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_spanner_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_spanner_instance.go deleted file mode 100644 index 7cc03057e3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_spanner_instance.go +++ /dev/null @@ -1,699 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func deleteSpannerBackups(d *schema.ResourceData, config *Config, res map[string]interface{}, userAgent string, billingProject string) error { - var v interface{} - var ok bool - - v, ok = res["backups"] - if !ok || v == nil { - return nil - } - - // Iterate over the list and delete each backup. - for _, itemRaw := range v.([]interface{}) { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - backupName := item["name"].(string) - - log.Printf("[DEBUG] Found backups for resource %q: %#v)", d.Id(), item) - - path := "{{SpannerBasePath}}" + backupName - - url, err := replaceVars(d, config, path) - if err != nil { - return err - } - - _, err = SendRequest(config, "DELETE", billingProject, url, userAgent, nil) - if err != nil { - return err - } - } - return nil -} - -func resourceSpannerInstanceVirtualUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { - // force_destroy is the only virtual field - if d.HasChange("force_destroy") { - for field := range resourceSchema { - if field == "force_destroy" { - continue - } - if d.HasChange(field) { - return false - } - } - return true - } - return false -} - -func ResourceSpannerInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceSpannerInstanceCreate, - Read: resourceSpannerInstanceRead, - Update: resourceSpannerInstanceUpdate, - Delete: resourceSpannerInstanceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSpannerInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "config": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the instance's configuration (similar but not -quite the same as a region) which defines the geographic placement and -replication of your databases in this instance. It determines where your data -is stored. Values are typically of the form 'regional-europe-west1' , 'us-central' etc. -In order to obtain a valid list please consult the -[Configuration section of the docs](https://cloud.google.com/spanner/docs/instances).`, - }, - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The descriptive name for this instance as it appears in UIs. Must be -unique per project and between 4 and 30 characters in length.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z][-a-z0-9]*[a-z0-9]$`), - Description: `A unique identifier for the instance, which cannot be changed after -the instance is created. The name must be between 6 and 30 characters -in length. - - -If not provided, a random string starting with 'tf-' will be selected.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "num_nodes": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The number of nodes allocated to this instance. Exactly one of either node_count or processing_units -must be present in terraform.`, - ExactlyOneOf: []string{"num_nodes", "processing_units"}, - }, - "processing_units": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - Description: `The number of processing units allocated to this instance. Exactly one of processing_units -or node_count must be present in terraform.`, - ExactlyOneOf: []string{"num_nodes", "processing_units"}, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Instance status: 'CREATING' or 'READY'.`, - }, - "force_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `When deleting a spanner instance, this boolean option will delete all backups of this instance. -This must be set to true if you created a backup manually in the console.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSpannerInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandSpannerInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - configProp, err := expandSpannerInstanceConfig(d.Get("config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("config"); !isEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { - obj["config"] = configProp - } - displayNameProp, err := expandSpannerInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - nodeCountProp, err := expandSpannerInstanceNumNodes(d.Get("num_nodes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("num_nodes"); !isEmptyValue(reflect.ValueOf(nodeCountProp)) && (ok || !reflect.DeepEqual(v, nodeCountProp)) { - obj["nodeCount"] = nodeCountProp - } - processingUnitsProp, err := expandSpannerInstanceProcessingUnits(d.Get("processing_units"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("processing_units"); !isEmptyValue(reflect.ValueOf(processingUnitsProp)) && (ok || !reflect.DeepEqual(v, processingUnitsProp)) { - obj["processingUnits"] = processingUnitsProp - } - labelsProp, err := expandSpannerInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - obj, err = resourceSpannerInstanceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Instance: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = SpannerOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Instance: %s", err) - } - - opRes, err = resourceSpannerInstanceDecoder(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenSpannerInstanceName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // This is useful if the resource in question doesn't have a perfectly consistent API - // That is, the Operation for Create might return before the Get operation shows the - // completed state of the resource. - time.Sleep(5 * time.Second) - - log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceSpannerInstanceRead(d, meta) -} - -func resourceSpannerInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SpannerInstance %q", d.Id())) - } - - res, err = resourceSpannerInstanceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing SpannerInstance because it no longer exists.") - d.SetId("") - return nil - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("force_destroy"); !ok { - if err := d.Set("force_destroy", false); err != nil { - return fmt.Errorf("Error setting force_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("name", flattenSpannerInstanceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("config", flattenSpannerInstanceConfig(res["config"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("display_name", flattenSpannerInstanceDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("num_nodes", flattenSpannerInstanceNumNodes(res["nodeCount"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("processing_units", flattenSpannerInstanceProcessingUnits(res["processingUnits"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenSpannerInstanceLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("state", flattenSpannerInstanceState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceSpannerInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandSpannerInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - nodeCountProp, err := expandSpannerInstanceNumNodes(d.Get("num_nodes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("num_nodes"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeCountProp)) { - obj["nodeCount"] = nodeCountProp - } - processingUnitsProp, err := expandSpannerInstanceProcessingUnits(d.Get("processing_units"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("processing_units"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, processingUnitsProp)) { - obj["processingUnits"] = processingUnitsProp - } - labelsProp, err := expandSpannerInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - obj, err = resourceSpannerInstanceUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) - if resourceSpannerInstanceVirtualUpdate(d, ResourceSpannerInstance().Schema) { - if d.Get("force_destroy") != nil { - if err := d.Set("force_destroy", d.Get("force_destroy")); err != nil { - return fmt.Errorf("Error reading Instance: %s", err) - } - } - return nil - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = SpannerOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceSpannerInstanceRead(d, meta) -} - -func resourceSpannerInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if d.Get("force_destroy").(bool) { - backupsUrl, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}/backups") - if err != nil { - return err - } - - resp, err := SendRequest(config, "GET", billingProject, backupsUrl, userAgent, nil) - if err != nil { - // API returns 200 if no backups exist but the instance still exists, hence the error check. - return handleNotFoundError(err, d, fmt.Sprintf("SpannerInstance %q", d.Id())) - } - - err = deleteSpannerBackups(d, config, resp, billingProject, userAgent) - if err != nil { - return err - } - } - log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceSpannerInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("force_destroy", false); err != nil { - return nil, fmt.Errorf("Error setting force_destroy: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenSpannerInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenSpannerInstanceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenSpannerInstanceDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerInstanceNumNodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenSpannerInstanceProcessingUnits(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenSpannerInstanceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerInstanceState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSpannerInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerInstanceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := regexp.MustCompile("projects/(.+)/instanceConfigs/(.+)") - if r.MatchString(v.(string)) { - return v.(string), nil - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return fmt.Sprintf("projects/%s/instanceConfigs/%s", project, v.(string)), nil -} - -func expandSpannerInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerInstanceNumNodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerInstanceProcessingUnits(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceSpannerInstanceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - // Temp Logic to accommodate processing_units and num_nodes - if obj["processingUnits"] == nil && obj["nodeCount"] == nil { - obj["nodeCount"] = 1 - } - newObj := make(map[string]interface{}) - newObj["instance"] = obj - if obj["name"] == nil { - if err := d.Set("name", resource.PrefixedUniqueId("tfgen-spanid-")[:30]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - newObj["instanceId"] = d.Get("name").(string) - } else { - newObj["instanceId"] = obj["name"] - } - delete(obj, "name") - return newObj, nil -} - -func resourceSpannerInstanceUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - project, err := getProject(d, meta.(*Config)) - if err != nil { - return nil, err - } - obj["name"] = fmt.Sprintf("projects/%s/instances/%s", project, obj["name"]) - newObj := make(map[string]interface{}) - newObj["instance"] = obj - updateMask := make([]string, 0) - if d.HasChange("num_nodes") { - updateMask = append(updateMask, "nodeCount") - } - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - if d.HasChange("processing_units") { - updateMask = append(updateMask, "processingUnits") - } - newObj["fieldMask"] = strings.Join(updateMask, ",") - return newObj, nil -} - -func resourceSpannerInstanceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - d.SetId(res["name"].(string)) - if err := parseImportId([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - res["project"] = d.Get("project").(string) - res["name"] = d.Get("name").(string) - id, err := replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return nil, err - } - d.SetId(id) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_database.go deleted file mode 100644 index 9e9bb560ac..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_database.go +++ /dev/null @@ -1,441 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceSQLDatabase() *schema.Resource { - return &schema.Resource{ - Create: resourceSQLDatabaseCreate, - Read: resourceSQLDatabaseRead, - Update: resourceSQLDatabaseUpdate, - Delete: resourceSQLDatabaseDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSQLDatabaseImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the Cloud SQL instance. This does not include the project -ID.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the database in the Cloud SQL instance. -This does not include the project ID or instance name.`, - }, - "charset": { - Type: schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: caseDiffSuppress, - Description: `The charset value. See MySQL's -[Supported Character Sets and Collations](https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html) -and Postgres' [Character Set Support](https://www.postgresql.org/docs/9.6/static/multibyte.html) -for more details and supported values. Postgres databases only support -a value of 'UTF8' at creation time.`, - }, - "collation": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `The collation value. See MySQL's -[Supported Character Sets and Collations](https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html) -and Postgres' [Collation Support](https://www.postgresql.org/docs/9.6/static/collation.html) -for more details and supported values. Postgres databases only support -a value of 'en_US.UTF8' at creation time.`, - }, - "deletion_policy": { - Type: schema.TypeString, - Optional: true, - Default: "DELETE", - Description: `The deletion policy for the database. Setting ABANDON allows the resource -to be abandoned rather than deleted. This is useful for Postgres, where databases cannot be -deleted from the API if there are users other than cloudsqlsuperuser with access. Possible -values are: "ABANDON", "DELETE". Defaults to "DELETE".`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSQLDatabaseCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - charsetProp, err := expandSQLDatabaseCharset(d.Get("charset"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("charset"); !isEmptyValue(reflect.ValueOf(charsetProp)) && (ok || !reflect.DeepEqual(v, charsetProp)) { - obj["charset"] = charsetProp - } - collationProp, err := expandSQLDatabaseCollation(d.Get("collation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collation"); !isEmptyValue(reflect.ValueOf(collationProp)) && (ok || !reflect.DeepEqual(v, collationProp)) { - obj["collation"] = collationProp - } - nameProp, err := expandSQLDatabaseName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - instanceProp, err := expandSQLDatabaseInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(reflect.ValueOf(instanceProp)) && (ok || !reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - - lockName, err := replaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Database: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Database: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = SqlAdminOperationWaitTime( - config, res, project, "Creating Database", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create Database: %s", err) - } - - log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) - - return resourceSQLDatabaseRead(d, meta) -} - -func resourceSQLDatabaseRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(transformSQLDatabaseReadError(err), d, fmt.Sprintf("SQLDatabase %q", d.Id())) - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("deletion_policy"); !ok { - if err := d.Set("deletion_policy", "DELETE"); err != nil { - return fmt.Errorf("Error setting deletion_policy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - - if err := d.Set("charset", flattenSQLDatabaseCharset(res["charset"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("collation", flattenSQLDatabaseCollation(res["collation"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("name", flattenSQLDatabaseName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("instance", flattenSQLDatabaseInstance(res["instance"], d, config)); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading Database: %s", err) - } - - return nil -} - -func resourceSQLDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - charsetProp, err := expandSQLDatabaseCharset(d.Get("charset"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("charset"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, charsetProp)) { - obj["charset"] = charsetProp - } - collationProp, err := expandSQLDatabaseCollation(d.Get("collation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, collationProp)) { - obj["collation"] = collationProp - } - nameProp, err := expandSQLDatabaseName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - instanceProp, err := expandSQLDatabaseInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - - lockName, err := replaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Database %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Database %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) - } - - err = SqlAdminOperationWaitTime( - config, res, project, "Updating Database", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceSQLDatabaseRead(d, meta) -} - -func resourceSQLDatabaseDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { - // Allows for database to be abandoned without deletion to avoid deletion failing - // for Postgres databases in some circumstances due to existing SQL users - return nil - } - log.Printf("[DEBUG] Deleting Database %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Database") - } - - err = SqlAdminOperationWaitTime( - config, res, project, "Deleting Database", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Database %q: %#v", d.Id(), res) - return nil -} - -func resourceSQLDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)", - "instances/(?P[^/]+)/databases/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("deletion_policy", "DELETE"); err != nil { - return nil, fmt.Errorf("Error setting deletion_policy: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenSQLDatabaseCharset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLDatabaseCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLDatabaseName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLDatabaseInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSQLDatabaseCharset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLDatabaseCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLDatabaseName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLDatabaseInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_source_representation_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_source_representation_instance.go deleted file mode 100644 index 3d4a757c02..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_source_representation_instance.go +++ /dev/null @@ -1,569 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strconv" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "google.golang.org/api/googleapi" -) - -func ResourceSQLSourceRepresentationInstance() *schema.Resource { - return &schema.Resource{ - Create: resourceSQLSourceRepresentationInstanceCreate, - Read: resourceSQLSourceRepresentationInstanceRead, - Delete: resourceSQLSourceRepresentationInstanceDelete, - - Importer: &schema.ResourceImporter{ - State: resourceSQLSourceRepresentationInstanceImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "database_version": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"MYSQL_5_5", "MYSQL_5_6", "MYSQL_5_7", "MYSQL_8_0"}), - Description: `The MySQL version running on your source database server. Possible values: ["MYSQL_5_5", "MYSQL_5_6", "MYSQL_5_7", "MYSQL_8_0"]`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the source representation instance. Use any valid Cloud SQL instance name.`, - }, - "host": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateIpAddress, - Description: `The externally accessible IPv4 address for the source database server.`, - }, - "ca_certificate": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The CA certificate on the external server. Include only if SSL/TLS is used on the external server.`, - }, - "client_certificate": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The client certificate on the external server. Required only for server-client authentication. Include only if SSL/TLS is used on the external server.`, - }, - "client_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The private key file for the client certificate on the external server. Required only for server-client authentication. Include only if SSL/TLS is used on the external server.`, - }, - "dump_file_path": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A file in the bucket that contains the data from the external server.`, - }, - "password": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The password for the replication user account.`, - Sensitive: true, - }, - "port": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(0, 65535), - Description: `The externally accessible port for the source database server. -Defaults to 3306.`, - Default: 3306, - }, - "username": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The replication user account on the external server.`, - }, - - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The Region in which the created instance should reside. -If it is not provided, the provider region is used.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSQLSourceRepresentationInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandSQLSourceRepresentationInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - regionProp, err := expandSQLSourceRepresentationInstanceRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - databaseVersionProp, err := expandSQLSourceRepresentationInstanceDatabaseVersion(d.Get("database_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("database_version"); !isEmptyValue(reflect.ValueOf(databaseVersionProp)) && (ok || !reflect.DeepEqual(v, databaseVersionProp)) { - obj["databaseVersion"] = databaseVersionProp - } - onPremisesConfigurationProp, err := expandSQLSourceRepresentationInstanceOnPremisesConfiguration(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(reflect.ValueOf(onPremisesConfigurationProp)) { - obj["onPremisesConfiguration"] = onPremisesConfigurationProp - } - - obj, err = resourceSQLSourceRepresentationInstanceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new SourceRepresentationInstance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating SourceRepresentationInstance: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = SqlAdminOperationWaitTime( - config, res, project, "Creating SourceRepresentationInstance", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - // The resource didn't actually create - d.SetId("") - return fmt.Errorf("Error waiting to create SourceRepresentationInstance: %s", err) - } - - log.Printf("[DEBUG] Finished creating SourceRepresentationInstance %q: %#v", d.Id(), res) - - return resourceSQLSourceRepresentationInstanceRead(d, meta) -} - -func resourceSQLSourceRepresentationInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SQLSourceRepresentationInstance %q", d.Id())) - } - - res, err = resourceSQLSourceRepresentationInstanceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing SQLSourceRepresentationInstance because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) - } - - if err := d.Set("name", flattenSQLSourceRepresentationInstanceName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) - } - if err := d.Set("region", flattenSQLSourceRepresentationInstanceRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) - } - if err := d.Set("database_version", flattenSQLSourceRepresentationInstanceDatabaseVersion(res["databaseVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) - } - // Terraform must set the top level schema field, but since this object contains collapsed properties - // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. - if flattenedProp := flattenSQLSourceRepresentationInstanceOnPremisesConfiguration(res["onPremisesConfiguration"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*googleapi.Error); ok { - return fmt.Errorf("Error reading SourceRepresentationInstance: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - - return nil -} - -func resourceSQLSourceRepresentationInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting SourceRepresentationInstance %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SourceRepresentationInstance") - } - - err = SqlAdminOperationWaitTime( - config, res, project, "Deleting SourceRepresentationInstance", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting SourceRepresentationInstance %q: %#v", d.Id(), res) - return nil -} - -func resourceSQLSourceRepresentationInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenSQLSourceRepresentationInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceDatabaseVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfiguration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationHost(original["host"], d, config) - transformed["port"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPort(original["port"], d, config) - transformed["username"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationUsername(original["username"], d, config) - transformed["password"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPassword(original["password"], d, config) - transformed["dump_file_path"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationDumpFilePath(original["dumpFilePath"], d, config) - transformed["ca_certificate"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationCaCertificate(original["caCertificate"], d, config) - transformed["client_certificate"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationClientCertificate(original["clientCertificate"], d, config) - transformed["client_key"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationClientKey(original["clientKey"], d, config) - return []interface{}{transformed} -} -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationUsername(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPassword(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return d.Get("password") -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationDumpFilePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationCaCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationClientCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationClientKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSQLSourceRepresentationInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceDatabaseVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedHost, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationHost(d.Get("host"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedPort, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationPort(d.Get("port"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedUsername, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationUsername(d.Get("username"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { - transformed["username"] = transformedUsername - } - - transformedPassword, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationPassword(d.Get("password"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { - transformed["password"] = transformedPassword - } - - transformedDumpFilePath, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationDumpFilePath(d.Get("dump_file_path"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDumpFilePath); val.IsValid() && !isEmptyValue(val) { - transformed["dumpFilePath"] = transformedDumpFilePath - } - - transformedCaCertificate, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationCaCertificate(d.Get("ca_certificate"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCaCertificate); val.IsValid() && !isEmptyValue(val) { - transformed["caCertificate"] = transformedCaCertificate - } - - transformedClientCertificate, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationClientCertificate(d.Get("client_certificate"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedClientCertificate); val.IsValid() && !isEmptyValue(val) { - transformed["clientCertificate"] = transformedClientCertificate - } - - transformedClientKey, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationClientKey(d.Get("client_key"), d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedClientKey); val.IsValid() && !isEmptyValue(val) { - transformed["clientKey"] = transformedClientKey - } - - return transformed, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationPassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationDumpFilePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationCaCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationClientCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationClientKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceSQLSourceRepresentationInstanceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - opc := obj["onPremisesConfiguration"].(map[string]interface{}) - opc["hostPort"] = fmt.Sprintf("%v:%v", opc["host"], opc["port"]) - delete(opc, "host") - delete(opc, "port") - return obj, nil -} - -func resourceSQLSourceRepresentationInstanceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["onPremisesConfiguration"]; ok { - opc := v.(map[string]interface{}) - hostPort := opc["hostPort"] - spl := strings.Split(hostPort.(string), ":") - if len(spl) != 2 { - return nil, fmt.Errorf("unexpected value for hostPort, expected [host]:[port], got %q", hostPort) - } - opc["host"] = spl[0] - p, err := strconv.Atoi(spl[1]) - if err != nil { - return nil, fmt.Errorf("error converting port %q to int: %v", spl[1], err) - } - opc["port"] = p - delete(opc, "hostPort") - } - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_access_control.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_access_control.go deleted file mode 100644 index 61ef050a19..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_access_control.go +++ /dev/null @@ -1,347 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceStorageBucketAccessControl() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageBucketAccessControlCreate, - Read: resourceStorageBucketAccessControlRead, - Update: resourceStorageBucketAccessControlUpdate, - Delete: resourceStorageBucketAccessControlDelete, - - Importer: &schema.ResourceImporter{ - State: resourceStorageBucketAccessControlImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the bucket.`, - }, - "entity": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The entity holding the permission, in one of the following forms: - user-userId - user-email - group-groupId - group-email - domain-domain - project-team-projectId - allUsers - allAuthenticatedUsers -Examples: - The user liz@example.com would be user-liz@example.com. - The group example@googlegroups.com would be - group-example@googlegroups.com. - To refer to all members of the Google Apps for Business domain - example.com, the entity would be domain-example.com.`, - }, - "role": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"OWNER", "READER", "WRITER", ""}), - Description: `The access permission for the entity. Possible values: ["OWNER", "READER", "WRITER"]`, - }, - "domain": { - Type: schema.TypeString, - Computed: true, - Description: `The domain associated with the entity.`, - }, - "email": { - Type: schema.TypeString, - Computed: true, - Description: `The email address associated with the entity.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageBucketAccessControlCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageBucketAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(reflect.ValueOf(bucketProp)) && (ok || !reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageBucketAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(reflect.ValueOf(entityProp)) && (ok || !reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - roleProp, err := expandStorageBucketAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new BucketAccessControl: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating BucketAccessControl: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{bucket}}/{{entity}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating BucketAccessControl %q: %#v", d.Id(), res) - - return resourceStorageBucketAccessControlRead(d, meta) -} - -func resourceStorageBucketAccessControlRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("StorageBucketAccessControl %q", d.Id())) - } - - if err := d.Set("bucket", flattenStorageBucketAccessControlBucket(res["bucket"], d, config)); err != nil { - return fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - if err := d.Set("domain", flattenStorageBucketAccessControlDomain(res["domain"], d, config)); err != nil { - return fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - if err := d.Set("email", flattenStorageBucketAccessControlEmail(res["email"], d, config)); err != nil { - return fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - if err := d.Set("entity", flattenStorageBucketAccessControlEntity(res["entity"], d, config)); err != nil { - return fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - if err := d.Set("role", flattenStorageBucketAccessControlRole(res["role"], d, config)); err != nil { - return fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - - return nil -} - -func resourceStorageBucketAccessControlUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageBucketAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageBucketAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - roleProp, err := expandStorageBucketAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating BucketAccessControl %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating BucketAccessControl %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating BucketAccessControl %q: %#v", d.Id(), res) - } - - return resourceStorageBucketAccessControlRead(d, meta) -} - -func resourceStorageBucketAccessControlDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting BucketAccessControl %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BucketAccessControl") - } - - log.Printf("[DEBUG] Finished deleting BucketAccessControl %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageBucketAccessControlImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{bucket}}/{{entity}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenStorageBucketAccessControlBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenStorageBucketAccessControlDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageBucketAccessControlEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageBucketAccessControlEntity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageBucketAccessControlRole(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageBucketAccessControlBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageBucketAccessControlEntity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageBucketAccessControlRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_default_object_access_control.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_default_object_access_control.go deleted file mode 100644 index 96381847fc..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_default_object_access_control.go +++ /dev/null @@ -1,440 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceStorageDefaultObjectAccessControl() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageDefaultObjectAccessControlCreate, - Read: resourceStorageDefaultObjectAccessControlRead, - Update: resourceStorageDefaultObjectAccessControlUpdate, - Delete: resourceStorageDefaultObjectAccessControlDelete, - - Importer: &schema.ResourceImporter{ - State: resourceStorageDefaultObjectAccessControlImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the bucket.`, - }, - "entity": { - Type: schema.TypeString, - Required: true, - Description: `The entity holding the permission, in one of the following forms: - * user-{{userId}} - * user-{{email}} (such as "user-liz@example.com") - * group-{{groupId}} - * group-{{email}} (such as "group-example@googlegroups.com") - * domain-{{domain}} (such as "domain-example.com") - * project-team-{{projectId}} - * allUsers - * allAuthenticatedUsers`, - }, - "role": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"OWNER", "READER"}), - Description: `The access permission for the entity. Possible values: ["OWNER", "READER"]`, - }, - "object": { - Type: schema.TypeString, - Optional: true, - Description: `The name of the object, if applied to an object.`, - }, - "domain": { - Type: schema.TypeString, - Computed: true, - Description: `The domain associated with the entity.`, - }, - "email": { - Type: schema.TypeString, - Computed: true, - Description: `The email address associated with the entity.`, - }, - "entity_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID for the entity`, - }, - "generation": { - Type: schema.TypeInt, - Computed: true, - Description: `The content generation of the object, if applied to an object.`, - }, - "project_team": { - Type: schema.TypeList, - Computed: true, - Description: `The project team associated with the entity`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "project_number": { - Type: schema.TypeString, - Optional: true, - Description: `The project team associated with the entity`, - }, - "team": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"editors", "owners", "viewers", ""}), - Description: `The team. Possible values: ["editors", "owners", "viewers"]`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageDefaultObjectAccessControlCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageDefaultObjectAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(reflect.ValueOf(bucketProp)) && (ok || !reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageDefaultObjectAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(reflect.ValueOf(entityProp)) && (ok || !reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - objectProp, err := expandStorageDefaultObjectAccessControlObject(d.Get("object"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("object"); !isEmptyValue(reflect.ValueOf(objectProp)) && (ok || !reflect.DeepEqual(v, objectProp)) { - obj["object"] = objectProp - } - roleProp, err := expandStorageDefaultObjectAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new DefaultObjectAccessControl: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating DefaultObjectAccessControl: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{bucket}}/{{entity}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating DefaultObjectAccessControl %q: %#v", d.Id(), res) - - return resourceStorageDefaultObjectAccessControlRead(d, meta) -} - -func resourceStorageDefaultObjectAccessControlRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("StorageDefaultObjectAccessControl %q", d.Id())) - } - - if err := d.Set("domain", flattenStorageDefaultObjectAccessControlDomain(res["domain"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("email", flattenStorageDefaultObjectAccessControlEmail(res["email"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("entity", flattenStorageDefaultObjectAccessControlEntity(res["entity"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("entity_id", flattenStorageDefaultObjectAccessControlEntityId(res["entityId"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("generation", flattenStorageDefaultObjectAccessControlGeneration(res["generation"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("object", flattenStorageDefaultObjectAccessControlObject(res["object"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("project_team", flattenStorageDefaultObjectAccessControlProjectTeam(res["projectTeam"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("role", flattenStorageDefaultObjectAccessControlRole(res["role"], d, config)); err != nil { - return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - - return nil -} - -func resourceStorageDefaultObjectAccessControlUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageDefaultObjectAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageDefaultObjectAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - objectProp, err := expandStorageDefaultObjectAccessControlObject(d.Get("object"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("object"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, objectProp)) { - obj["object"] = objectProp - } - roleProp, err := expandStorageDefaultObjectAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating DefaultObjectAccessControl %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating DefaultObjectAccessControl %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating DefaultObjectAccessControl %q: %#v", d.Id(), res) - } - - return resourceStorageDefaultObjectAccessControlRead(d, meta) -} - -func resourceStorageDefaultObjectAccessControlDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting DefaultObjectAccessControl %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DefaultObjectAccessControl") - } - - log.Printf("[DEBUG] Finished deleting DefaultObjectAccessControl %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageDefaultObjectAccessControlImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{bucket}}/{{entity}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenStorageDefaultObjectAccessControlDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlEntity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlEntityId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenStorageDefaultObjectAccessControlObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlProjectTeam(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_number"] = - flattenStorageDefaultObjectAccessControlProjectTeamProjectNumber(original["projectNumber"], d, config) - transformed["team"] = - flattenStorageDefaultObjectAccessControlProjectTeamTeam(original["team"], d, config) - return []interface{}{transformed} -} -func flattenStorageDefaultObjectAccessControlProjectTeamProjectNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlProjectTeamTeam(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlRole(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageDefaultObjectAccessControlBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageDefaultObjectAccessControlEntity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageDefaultObjectAccessControlObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageDefaultObjectAccessControlRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_hmac_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_hmac_key.go deleted file mode 100644 index 9a5c132604..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_hmac_key.go +++ /dev/null @@ -1,471 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceStorageHmacKey() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageHmacKeyCreate, - Read: resourceStorageHmacKeyRead, - Update: resourceStorageHmacKeyUpdate, - Delete: resourceStorageHmacKeyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceStorageHmacKeyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "service_account_email": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The email address of the key's associated service account.`, - }, - "state": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"ACTIVE", "INACTIVE", ""}), - Description: `The state of the key. Can be set to one of ACTIVE, INACTIVE. Default value: "ACTIVE" Possible values: ["ACTIVE", "INACTIVE"]`, - Default: "ACTIVE", - }, - "access_id": { - Type: schema.TypeString, - Computed: true, - Description: `The access ID of the HMAC Key.`, - }, - "secret": { - Type: schema.TypeString, - Computed: true, - Description: `HMAC secret key material.`, - Sensitive: true, - }, - "time_created": { - Type: schema.TypeString, - Computed: true, - Description: `'The creation time of the HMAC key in RFC 3339 format. '`, - }, - "updated": { - Type: schema.TypeString, - Computed: true, - Description: `'The last modification time of the HMAC key metadata in RFC 3339 format.'`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageHmacKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - serviceAccountEmailProp, err := expandStorageHmacKeyServiceAccountEmail(d.Get("service_account_email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account_email"); !isEmptyValue(reflect.ValueOf(serviceAccountEmailProp)) && (ok || !reflect.DeepEqual(v, serviceAccountEmailProp)) { - obj["serviceAccountEmail"] = serviceAccountEmailProp - } - stateProp, err := expandStorageHmacKeyState(d.Get("state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("state"); !isEmptyValue(reflect.ValueOf(stateProp)) && (ok || !reflect.DeepEqual(v, stateProp)) { - obj["state"] = stateProp - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys?serviceAccountEmail={{service_account_email}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new HmacKey: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating HmacKey: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // `secret` and `access_id` are generated by the API upon successful CREATE. The following - // ensures terraform has the correct values based on the Projects.hmacKeys response object. - secret, ok := res["secret"].(string) - if !ok { - return fmt.Errorf("The response to CREATE was missing an expected field. Your create did not work.") - } - - if err := d.Set("secret", secret); err != nil { - return fmt.Errorf("Error setting secret: %s", err) - } - - metadata := res["metadata"].(map[string]interface{}) - accessId, ok := metadata["accessId"].(string) - if !ok { - return fmt.Errorf("The response to CREATE was missing an expected field. Your create did not work.") - } - - if err := d.Set("access_id", accessId); err != nil { - return fmt.Errorf("Error setting access_id: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - - d.SetId(id) - - err = PollingWaitTime(resourceStorageHmacKeyPollRead(d, meta), PollCheckForExistence, "Creating HmacKey", d.Timeout(schema.TimeoutCreate), 1) - if err != nil { - return fmt.Errorf("Error waiting to create HmacKey: %s", err) - } - - log.Printf("[DEBUG] Finished creating HmacKey %q: %#v", d.Id(), res) - - return resourceStorageHmacKeyRead(d, meta) -} - -func resourceStorageHmacKeyPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = resourceStorageHmacKeyDecoder(d, meta, res) - if err != nil { - return nil, err - } - if res == nil { - return nil, fake404("decoded", "StorageHmacKey") - } - - return res, nil - } -} - -func resourceStorageHmacKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("StorageHmacKey %q", d.Id())) - } - - res, err = resourceStorageHmacKeyDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing StorageHmacKey because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading HmacKey: %s", err) - } - - if err := d.Set("service_account_email", flattenStorageHmacKeyServiceAccountEmail(res["serviceAccountEmail"], d, config)); err != nil { - return fmt.Errorf("Error reading HmacKey: %s", err) - } - if err := d.Set("state", flattenStorageHmacKeyState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading HmacKey: %s", err) - } - if err := d.Set("access_id", flattenStorageHmacKeyAccessId(res["accessId"], d, config)); err != nil { - return fmt.Errorf("Error reading HmacKey: %s", err) - } - if err := d.Set("time_created", flattenStorageHmacKeyTimeCreated(res["timeCreated"], d, config)); err != nil { - return fmt.Errorf("Error reading HmacKey: %s", err) - } - if err := d.Set("updated", flattenStorageHmacKeyUpdated(res["updated"], d, config)); err != nil { - return fmt.Errorf("Error reading HmacKey: %s", err) - } - - return nil -} - -func resourceStorageHmacKeyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("state") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := SendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("StorageHmacKey %q", d.Id())) - } - - obj["etag"] = getRes["etag"] - - stateProp, err := expandStorageHmacKeyState(d.Get("state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("state"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stateProp)) { - obj["state"] = stateProp - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating HmacKey %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating HmacKey %q: %#v", d.Id(), res) - } - - } - - d.Partial(false) - - return resourceStorageHmacKeyRead(d, meta) -} - -func resourceStorageHmacKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - getUrl, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - getRes, err := SendRequest(config, "GET", project, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("StorageHmacKey %q", d.Id())) - } - - // HmacKeys need to be INACTIVE to be deleted and the API doesn't accept noop - // updates - if v := getRes["state"]; v == "ACTIVE" { - getRes["state"] = "INACTIVE" - updateUrl, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Deactivating HmacKey %q: %#v", d.Id(), getRes) - _, err = SendRequestWithTimeout(config, "PUT", project, updateUrl, userAgent, getRes, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error deactivating HmacKey %q: %s", d.Id(), err) - } - } - log.Printf("[DEBUG] Deleting HmacKey %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HmacKey") - } - - log.Printf("[DEBUG] Finished deleting HmacKey %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageHmacKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/hmacKeys/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenStorageHmacKeyServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageHmacKeyState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageHmacKeyAccessId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageHmacKeyTimeCreated(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageHmacKeyUpdated(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageHmacKeyServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageHmacKeyState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceStorageHmacKeyDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v := res["state"]; v == "DELETED" { - return nil, nil - } - - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_object_access_control.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_object_access_control.go deleted file mode 100644 index 76ee6fd5b7..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_object_access_control.go +++ /dev/null @@ -1,450 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceStorageObjectAccessControl() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageObjectAccessControlCreate, - Read: resourceStorageObjectAccessControlRead, - Update: resourceStorageObjectAccessControlUpdate, - Delete: resourceStorageObjectAccessControlDelete, - - Importer: &schema.ResourceImporter{ - State: resourceStorageObjectAccessControlImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "bucket": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the bucket.`, - }, - "entity": { - Type: schema.TypeString, - Required: true, - Description: `The entity holding the permission, in one of the following forms: - * user-{{userId}} - * user-{{email}} (such as "user-liz@example.com") - * group-{{groupId}} - * group-{{email}} (such as "group-example@googlegroups.com") - * domain-{{domain}} (such as "domain-example.com") - * project-team-{{projectId}} - * allUsers - * allAuthenticatedUsers`, - }, - "object": { - Type: schema.TypeString, - Required: true, - Description: `The name of the object to apply the access control to.`, - }, - "role": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateEnum([]string{"OWNER", "READER"}), - Description: `The access permission for the entity. Possible values: ["OWNER", "READER"]`, - }, - "domain": { - Type: schema.TypeString, - Computed: true, - Description: `The domain associated with the entity.`, - }, - "email": { - Type: schema.TypeString, - Computed: true, - Description: `The email address associated with the entity.`, - }, - "entity_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID for the entity`, - }, - "generation": { - Type: schema.TypeInt, - Computed: true, - Description: `The content generation of the object, if applied to an object.`, - }, - "project_team": { - Type: schema.TypeList, - Computed: true, - Description: `The project team associated with the entity`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "project_number": { - Type: schema.TypeString, - Optional: true, - Description: `The project team associated with the entity`, - }, - "team": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateEnum([]string{"editors", "owners", "viewers", ""}), - Description: `The team. Possible values: ["editors", "owners", "viewers"]`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageObjectAccessControlCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageObjectAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(reflect.ValueOf(bucketProp)) && (ok || !reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageObjectAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(reflect.ValueOf(entityProp)) && (ok || !reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - objectProp, err := expandStorageObjectAccessControlObject(d.Get("object"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("object"); !isEmptyValue(reflect.ValueOf(objectProp)) && (ok || !reflect.DeepEqual(v, objectProp)) { - obj["object"] = objectProp - } - roleProp, err := expandStorageObjectAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new ObjectAccessControl: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ObjectAccessControl: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{bucket}}/{{object}}/{{entity}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating ObjectAccessControl %q: %#v", d.Id(), res) - - return resourceStorageObjectAccessControlRead(d, meta) -} - -func resourceStorageObjectAccessControlRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("StorageObjectAccessControl %q", d.Id())) - } - - if err := d.Set("bucket", flattenStorageObjectAccessControlBucket(res["bucket"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("domain", flattenStorageObjectAccessControlDomain(res["domain"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("email", flattenStorageObjectAccessControlEmail(res["email"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("entity", flattenStorageObjectAccessControlEntity(res["entity"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("entity_id", flattenStorageObjectAccessControlEntityId(res["entityId"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("generation", flattenStorageObjectAccessControlGeneration(res["generation"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("object", flattenStorageObjectAccessControlObject(res["object"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("project_team", flattenStorageObjectAccessControlProjectTeam(res["projectTeam"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("role", flattenStorageObjectAccessControlRole(res["role"], d, config)); err != nil { - return fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - - return nil -} - -func resourceStorageObjectAccessControlUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageObjectAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageObjectAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - objectProp, err := expandStorageObjectAccessControlObject(d.Get("object"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("object"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, objectProp)) { - obj["object"] = objectProp - } - roleProp, err := expandStorageObjectAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating ObjectAccessControl %q: %#v", d.Id(), obj) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating ObjectAccessControl %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ObjectAccessControl %q: %#v", d.Id(), res) - } - - return resourceStorageObjectAccessControlRead(d, meta) -} - -func resourceStorageObjectAccessControlDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting ObjectAccessControl %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ObjectAccessControl") - } - - log.Printf("[DEBUG] Finished deleting ObjectAccessControl %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageObjectAccessControlImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{bucket}}/{{object}}/{{entity}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenStorageObjectAccessControlBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenStorageObjectAccessControlDomain(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlEntity(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlEntityId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenStorageObjectAccessControlObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlProjectTeam(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_number"] = - flattenStorageObjectAccessControlProjectTeamProjectNumber(original["projectNumber"], d, config) - transformed["team"] = - flattenStorageObjectAccessControlProjectTeamTeam(original["team"], d, config) - return []interface{}{transformed} -} -func flattenStorageObjectAccessControlProjectTeamProjectNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlProjectTeamTeam(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlRole(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageObjectAccessControlBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageObjectAccessControlEntity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageObjectAccessControlObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageObjectAccessControlRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_transfer_agent_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_transfer_agent_pool.go deleted file mode 100644 index 35046e07b3..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_transfer_agent_pool.go +++ /dev/null @@ -1,407 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// waitForAgentPoolReady waits for an agent pool to leave the -// "CREATING" state and become "CREATED", to indicate that it's ready. -func waitForAgentPoolReady(d *schema.ResourceData, config *Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { - if err := resourceStorageTransferAgentPoolRead(d, config); err != nil { - return resource.NonRetryableError(err) - } - - name := d.Get("name").(string) - state := d.Get("state").(string) - if state == "CREATING" { - return resource.RetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) - } else if state == "CREATED" { - log.Printf("[DEBUG] AgentPool %q has state %q.", name, state) - return nil - } else { - return resource.NonRetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) - } - }) -} - -func ResourceStorageTransferAgentPool() *schema.Resource { - return &schema.Resource{ - Create: resourceStorageTransferAgentPoolCreate, - Read: resourceStorageTransferAgentPoolRead, - Update: resourceStorageTransferAgentPoolUpdate, - Delete: resourceStorageTransferAgentPoolDelete, - - Importer: &schema.ResourceImporter{ - State: resourceStorageTransferAgentPoolImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(30 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the agent pool to create. - -The agentPoolId must meet the following requirements: -* Length of 128 characters or less. -* Not start with the string goog. -* Start with a lowercase ASCII character, followed by: - * Zero or more: lowercase Latin alphabet characters, numerals, hyphens (-), periods (.), underscores (_), or tildes (~). - * One or more numerals or lowercase ASCII characters. - -As expressed by the regular expression: ^(?!goog)[a-z]([a-z0-9-._~]*[a-z0-9])?$.`, - }, - "bandwidth_limit": { - Type: schema.TypeList, - Optional: true, - Description: `Specifies the bandwidth limit details. If this field is unspecified, the default value is set as 'No Limit'.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "limit_mbps": { - Type: schema.TypeString, - Required: true, - Description: `Bandwidth rate in megabytes per second, distributed across all the agents in the pool.`, - }, - }, - }, - }, - "display_name": { - Type: schema.TypeString, - Optional: true, - Description: `Specifies the client-specified AgentPool description.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `Specifies the state of the AgentPool.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageTransferAgentPoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandStorageTransferAgentPoolDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - bandwidthLimitProp, err := expandStorageTransferAgentPoolBandwidthLimit(d.Get("bandwidth_limit"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bandwidth_limit"); !isEmptyValue(reflect.ValueOf(bandwidthLimitProp)) && (ok || !reflect.DeepEqual(v, bandwidthLimitProp)) { - obj["bandwidthLimit"] = bandwidthLimitProp - } - - url, err := replaceVars(d, config, "{{StorageTransferBasePath}}projects/{{project}}/agentPools?agentPoolId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new AgentPool: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AgentPool: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating AgentPool: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/agentPools/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := waitForAgentPoolReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return fmt.Errorf("Error waiting for AgentPool %q to be CREATED during creation: %q", d.Get("name").(string), err) - } - - log.Printf("[DEBUG] Finished creating AgentPool %q: %#v", d.Id(), res) - - return resourceStorageTransferAgentPoolRead(d, meta) -} - -func resourceStorageTransferAgentPoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageTransferBasePath}}projects/{{project}}/agentPools/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AgentPool: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("StorageTransferAgentPool %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading AgentPool: %s", err) - } - - if err := d.Set("display_name", flattenStorageTransferAgentPoolDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading AgentPool: %s", err) - } - if err := d.Set("state", flattenStorageTransferAgentPoolState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading AgentPool: %s", err) - } - if err := d.Set("bandwidth_limit", flattenStorageTransferAgentPoolBandwidthLimit(res["bandwidthLimit"], d, config)); err != nil { - return fmt.Errorf("Error reading AgentPool: %s", err) - } - - return nil -} - -func resourceStorageTransferAgentPoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AgentPool: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandStorageTransferAgentPoolDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - bandwidthLimitProp, err := expandStorageTransferAgentPoolBandwidthLimit(d.Get("bandwidth_limit"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bandwidth_limit"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bandwidthLimitProp)) { - obj["bandwidthLimit"] = bandwidthLimitProp - } - - url, err := replaceVars(d, config, "{{StorageTransferBasePath}}projects/{{project}}/agentPools/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating AgentPool %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("bandwidth_limit") { - updateMask = append(updateMask, "bandwidthLimit") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if err := waitForAgentPoolReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return fmt.Errorf("Error waiting for AgentPool %q to be CREATED before updating: %q", d.Get("name").(string), err) - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating AgentPool %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating AgentPool %q: %#v", d.Id(), res) - } - - return resourceStorageTransferAgentPoolRead(d, meta) -} - -func resourceStorageTransferAgentPoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for AgentPool: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{StorageTransferBasePath}}projects/{{project}}/agentPools/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting AgentPool %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AgentPool") - } - - log.Printf("[DEBUG] Finished deleting AgentPool %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageTransferAgentPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/agentPools/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/agentPools/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := waitForAgentPoolReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return nil, fmt.Errorf("Error waiting for AgentPool %q to be CREATED during importing: %q", d.Get("name").(string), err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenStorageTransferAgentPoolDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageTransferAgentPoolState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageTransferAgentPoolBandwidthLimit(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["limit_mbps"] = - flattenStorageTransferAgentPoolBandwidthLimitLimitMbps(original["limitMbps"], d, config) - return []interface{}{transformed} -} -func flattenStorageTransferAgentPoolBandwidthLimitLimitMbps(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageTransferAgentPoolDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageTransferAgentPoolBandwidthLimit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLimitMbps, err := expandStorageTransferAgentPoolBandwidthLimitLimitMbps(original["limit_mbps"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLimitMbps); val.IsValid() && !isEmptyValue(val) { - transformed["limitMbps"] = transformedLimitMbps - } - - return transformed, nil -} - -func expandStorageTransferAgentPoolBandwidthLimitLimitMbps(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_location_tag_bindings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_location_tag_bindings.go deleted file mode 100644 index 005e9026ee..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_location_tag_bindings.go +++ /dev/null @@ -1,343 +0,0 @@ -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceTagsLocationTagBinding() *schema.Resource { - return &schema.Resource{ - Create: resourceTagsLocationTagBindingCreate, - Read: resourceTagsLocationTagBindingRead, - Delete: resourceTagsLocationTagBindingDelete, - - Importer: &schema.ResourceImporter{ - State: resourceTagsLocationTagBindingImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full resource name of the resource the TagValue is bound to. E.g. //cloudresourcemanager.googleapis.com/projects/123`, - }, - "tag_value": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The TagValue of the TagBinding. Must be of the form tagValues/456.`, - }, - "location": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Description: `The geographic location where the transfer config should reside. -Examples: US, EU, asia-northeast1. The default value is US.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The generated id for the TagBinding. This is a string of the form: 'tagBindings/{full-resource-name}/{tag-value-name}'`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTagsLocationTagBindingCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandNestedTagsLocationTagBindingParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - tagValueProp, err := expandNestedTagsLocationTagBindingTagValue(d.Get("tag_value"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tag_value"); !isEmptyValue(reflect.ValueOf(tagValueProp)) && (ok || !reflect.DeepEqual(v, tagValueProp)) { - obj["tagValue"] = tagValueProp - } - - url, err := replaceVars(d, config, "{{TagsLocationBasePath}}tagBindings") - log.Printf("url for TagsLocation: %s", url) - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new LocationTagBinding: %#v", obj) - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating LocationTagBinding: %s", err) - } - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - - var opRes map[string]interface{} - err = TagsLocationOperationWaitTimeWithResponse( - config, res, &opRes, "Creating LocationTagBinding", userAgent, - d.Timeout(schema.TimeoutCreate)) - - if err != nil { - d.SetId("") - return fmt.Errorf("Error waiting to create LocationTagBinding: %s", err) - } - - if _, ok := opRes["tagBindings"]; ok { - opRes, err = flattenNestedTagsLocationTagBinding(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error getting nested object from operation response: %s", err) - } - if opRes == nil { - // Object isn't there any more - remove it from the state. - d.SetId("") - return fmt.Errorf("Error decoding response from operation, could not find nested object") - } - } - if err := d.Set("name", flattenNestedTagsLocationTagBindingName(opRes["name"], d, config)); err != nil { - return err - } - - id, err := replaceVars(d, config, "{{location}}/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating LocationTagBinding %q: %#v", d.Id(), res) - - return resourceTagsLocationTagBindingRead(d, meta) -} - -func resourceTagsLocationTagBindingRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TagsLocationBasePath}}tagBindings/?parent={{parent}}&pageSize=300") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) - } - log.Printf("[DEBUG] Skipping res with name for import = %#v,)", res) - - p, ok := res["tagBindings"] - if !ok || p == nil { - return nil - } - pView := p.([]interface{}) - - //if there are more than 300 bindings - handling pagination over here - if pageToken, ok := res["nextPageToken"].(string); ok { - for pageToken != "" { - url, err = addQueryParams(url, map[string]string{"pageToken": fmt.Sprintf("%s", res["nextPageToken"])}) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) - } - resp, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) - } - if resp == nil { - d.SetId("") - return nil - } - v, ok := resp["tagBindings"] - if !ok || v == nil { - return nil - } - pView = append(pView, v.([]interface{})...) - if token, ok := res["nextPageToken"]; ok { - pageToken = token.(string) - } else { - pageToken = "" - } - } - } - - newMap := make(map[string]interface{}, 1) - newMap["tagBindings"] = pView - - res, err = flattenNestedTagsLocationTagBinding(d, meta, newMap) - if err != nil { - return err - } - - if err := d.Set("name", flattenNestedTagsLocationTagBindingName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading LocationTagBinding: %s", err) - } - if err := d.Set("parent", flattenNestedTagsLocationTagBindingParent(res["parent"], d, config)); err != nil { - return fmt.Errorf("Error reading LocationTagBinding: %s", err) - } - if err := d.Set("tag_value", flattenNestedTagsLocationTagBindingTagValue(res["tagValue"], d, config)); err != nil { - return fmt.Errorf("Error reading LocationTagBinding: %s", err) - } - - return nil -} - -func resourceTagsLocationTagBindingDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{TagsLocationBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting LocationTagBinding %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "LocationTagBinding") - } - - err = TagsLocationOperationWaitTime( - config, res, "Deleting LocationTagBinding", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting LocationTagBinding %q: %#v", d.Id(), res) - return nil -} - -func resourceTagsLocationTagBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{"(?P[^/]+)/tagBindings/(?P[^/]+)/tagValues/(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - parent := d.Get("parent").(string) - parentProper := strings.ReplaceAll(parent, "%2F", "/") - d.Set("parent", parentProper) - d.Set("name", fmt.Sprintf("tagBindings/%s/tagValues/%s", parent, d.Get("tag_value").(string))) - id, err := replaceVars(d, config, "{{location}}/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedTagsLocationTagBindingName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedTagsLocationTagBindingParent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedTagsLocationTagBindingTagValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedTagsLocationTagBindingParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedTagsLocationTagBindingTagValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedTagsLocationTagBinding(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["tagBindings"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - log.Printf("[DEBUG] Hey it's in break = %#v,)", v) - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value tagBindings. Actual value: %v", v) - } - - _, item, err := resourceTagsLocationTagBindingFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceTagsLocationTagBindingFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName := d.Get("name") - expectedFlattenedName := flattenNestedTagsLocationTagBindingName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - - item := itemRaw.(map[string]interface{}) - itemName := flattenNestedTagsLocationTagBindingName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - return idx, item, nil - } - return -1, nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_binding.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_binding.go deleted file mode 100644 index d0c0ff6b3e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_binding.go +++ /dev/null @@ -1,340 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceTagsTagBinding() *schema.Resource { - return &schema.Resource{ - Create: resourceTagsTagBindingCreate, - Read: resourceTagsTagBindingRead, - Delete: resourceTagsTagBindingDelete, - - Importer: &schema.ResourceImporter{ - State: resourceTagsTagBindingImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full resource name of the resource the TagValue is bound to. E.g. //cloudresourcemanager.googleapis.com/projects/123`, - }, - "tag_value": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The TagValue of the TagBinding. Must be of the form tagValues/456.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The generated id for the TagBinding. This is a string of the form: 'tagBindings/{full-resource-name}/{tag-value-name}'`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTagsTagBindingCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandNestedTagsTagBindingParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - tagValueProp, err := expandNestedTagsTagBindingTagValue(d.Get("tag_value"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tag_value"); !isEmptyValue(reflect.ValueOf(tagValueProp)) && (ok || !reflect.DeepEqual(v, tagValueProp)) { - obj["tagValue"] = tagValueProp - } - - lockName, err := replaceVars(d, config, "tagBindings/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagBindings") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TagBinding: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TagBinding: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "tagBindings/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = TagsOperationWaitTimeWithResponse( - config, res, &opRes, "Creating TagBinding", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create TagBinding: %s", err) - } - - if _, ok := opRes["tagBindings"]; ok { - opRes, err = flattenNestedTagsTagBinding(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error getting nested object from operation response: %s", err) - } - if opRes == nil { - // Object isn't there any more - remove it from the state. - return fmt.Errorf("Error decoding response from operation, could not find nested object") - } - } - if err := d.Set("name", flattenNestedTagsTagBindingName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "tagBindings/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating TagBinding %q: %#v", d.Id(), res) - - return resourceTagsTagBindingRead(d, meta) -} - -func resourceTagsTagBindingRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagBindings/?parent={{parent}}&pageSize=300") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("TagsTagBinding %q", d.Id())) - } - - res, err = flattenNestedTagsTagBinding(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing TagsTagBinding because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenNestedTagsTagBindingName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TagBinding: %s", err) - } - if err := d.Set("parent", flattenNestedTagsTagBindingParent(res["parent"], d, config)); err != nil { - return fmt.Errorf("Error reading TagBinding: %s", err) - } - if err := d.Set("tag_value", flattenNestedTagsTagBindingTagValue(res["tagValue"], d, config)); err != nil { - return fmt.Errorf("Error reading TagBinding: %s", err) - } - - return nil -} - -func resourceTagsTagBindingDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "tagBindings/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagBindings/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TagBinding %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TagBinding") - } - - err = TagsOperationWaitTime( - config, res, "Deleting TagBinding", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TagBinding %q: %#v", d.Id(), res) - return nil -} - -func resourceTagsTagBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - - // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{ - "tagBindings/(?P.+)", - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - d.SetId(name) - - return []*schema.ResourceData{d}, nil -} - -func flattenNestedTagsTagBindingName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - parts := strings.Split(v.(string), "/") - return strings.Join(parts[len(parts)-3:], "/") -} - -func flattenNestedTagsTagBindingParent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedTagsTagBindingTagValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedTagsTagBindingParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedTagsTagBindingTagValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedTagsTagBinding(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["tagBindings"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - // Construct list out of single nested resource - v = []interface{}{v} - default: - return nil, fmt.Errorf("expected list or map for value tagBindings. Actual value: %v", v) - } - - _, item, err := resourceTagsTagBindingFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceTagsTagBindingFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName := d.Get("name") - expectedFlattenedName := flattenNestedTagsTagBindingName(expectedName, d, meta.(*Config)) - - // Search list for this resource. - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedTagsTagBindingName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { - log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_key.go deleted file mode 100644 index 6e58520d28..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_key.go +++ /dev/null @@ -1,457 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceTagsTagKey() *schema.Resource { - return &schema.Resource{ - Create: resourceTagsTagKeyCreate, - Read: resourceTagsTagKeyRead, - Update: resourceTagsTagKeyUpdate, - Delete: resourceTagsTagKeyDelete, - - Importer: &schema.ResourceImporter{ - State: resourceTagsTagKeyImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Input only. The resource name of the new TagKey's parent. Must be of the form organizations/{org_id}.`, - }, - "short_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 63), - Description: `Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - -The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 256), - Description: `User-assigned description of the TagKey. Must not exceed 256 characters.`, - }, - "purpose": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateEnum([]string{"GCE_FIREWALL", ""}), - Description: `Optional. A purpose cannot be changed once set. - -A purpose denotes that this Tag is intended for use in policies of a specific policy engine, and will involve that policy engine in management operations involving this Tag. Possible values: ["GCE_FIREWALL"]`, - }, - "purpose_data": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Optional. Purpose data cannot be changed once set. - -Purpose data corresponds to the policy system that the tag is intended for. For example, the GCE_FIREWALL purpose expects data in the following format: 'network = "/"'.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Creation time. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The generated numeric id for the TagKey.`, - }, - "namespaced_name": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Namespaced name of the TagKey.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Update time. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTagsTagKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandTagsTagKeyParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - shortNameProp, err := expandTagsTagKeyShortName(d.Get("short_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("short_name"); !isEmptyValue(reflect.ValueOf(shortNameProp)) && (ok || !reflect.DeepEqual(v, shortNameProp)) { - obj["shortName"] = shortNameProp - } - descriptionProp, err := expandTagsTagKeyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - purposeProp, err := expandTagsTagKeyPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - purposeDataProp, err := expandTagsTagKeyPurposeData(d.Get("purpose_data"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose_data"); !isEmptyValue(reflect.ValueOf(purposeDataProp)) && (ok || !reflect.DeepEqual(v, purposeDataProp)) { - obj["purposeData"] = purposeDataProp - } - - lockName, err := replaceVars(d, config, "tagKeys/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagKeys") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TagKey: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TagKey: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "tagKeys/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = TagsOperationWaitTimeWithResponse( - config, res, &opRes, "Creating TagKey", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create TagKey: %s", err) - } - - if err := d.Set("name", flattenTagsTagKeyName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "tagKeys/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating TagKey %q: %#v", d.Id(), res) - - return resourceTagsTagKeyRead(d, meta) -} - -func resourceTagsTagKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("TagsTagKey %q", d.Id())) - } - - if err := d.Set("name", flattenTagsTagKeyName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("parent", flattenTagsTagKeyParent(res["parent"], d, config)); err != nil { - return fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("short_name", flattenTagsTagKeyShortName(res["shortName"], d, config)); err != nil { - return fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("namespaced_name", flattenTagsTagKeyNamespacedName(res["namespacedName"], d, config)); err != nil { - return fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("description", flattenTagsTagKeyDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("create_time", flattenTagsTagKeyCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("update_time", flattenTagsTagKeyUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("purpose", flattenTagsTagKeyPurpose(res["purpose"], d, config)); err != nil { - return fmt.Errorf("Error reading TagKey: %s", err) - } - - return nil -} - -func resourceTagsTagKeyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandTagsTagKeyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - lockName, err := replaceVars(d, config, "tagKeys/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating TagKey %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating TagKey %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TagKey %q: %#v", d.Id(), res) - } - - err = TagsOperationWaitTime( - config, res, "Updating TagKey", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceTagsTagKeyRead(d, meta) -} - -func resourceTagsTagKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "tagKeys/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TagKey %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TagKey") - } - - err = TagsOperationWaitTime( - config, res, "Deleting TagKey", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TagKey %q: %#v", d.Id(), res) - return nil -} - -func resourceTagsTagKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "tagKeys/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "tagKeys/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenTagsTagKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenTagsTagKeyParent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyShortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyNamespacedName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyPurpose(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandTagsTagKeyParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagKeyShortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagKeyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagKeyPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagKeyPurposeData(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_value.go deleted file mode 100644 index 2a892394b8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tags_tag_value.go +++ /dev/null @@ -1,404 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceTagsTagValue() *schema.Resource { - return &schema.Resource{ - Create: resourceTagsTagValueCreate, - Read: resourceTagsTagValueRead, - Update: resourceTagsTagValueUpdate, - Delete: resourceTagsTagValueDelete, - - Importer: &schema.ResourceImporter{ - State: resourceTagsTagValueImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "parent": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Input only. The resource name of the new TagValue's parent. Must be of the form tagKeys/{tag_key_id}.`, - }, - "short_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 63), - Description: `Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - -The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 256), - Description: `User-assigned description of the TagValue. Must not exceed 256 characters.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Creation time. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The generated numeric id for the TagValue.`, - }, - "namespaced_name": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Namespaced name of the TagValue. Will be in the format {organizationId}/{tag_key_short_name}/{shortName}.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Output only. Update time. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTagsTagValueCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandTagsTagValueParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - shortNameProp, err := expandTagsTagValueShortName(d.Get("short_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("short_name"); !isEmptyValue(reflect.ValueOf(shortNameProp)) && (ok || !reflect.DeepEqual(v, shortNameProp)) { - obj["shortName"] = shortNameProp - } - descriptionProp, err := expandTagsTagValueDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - lockName, err := replaceVars(d, config, "tagValues/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagValues") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new TagValue: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating TagValue: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "tagValues/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = TagsOperationWaitTimeWithResponse( - config, res, &opRes, "Creating TagValue", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create TagValue: %s", err) - } - - if err := d.Set("name", flattenTagsTagValueName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "tagValues/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating TagValue %q: %#v", d.Id(), res) - - return resourceTagsTagValueRead(d, meta) -} - -func resourceTagsTagValueRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("TagsTagValue %q", d.Id())) - } - - if err := d.Set("name", flattenTagsTagValueName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("parent", flattenTagsTagValueParent(res["parent"], d, config)); err != nil { - return fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("short_name", flattenTagsTagValueShortName(res["shortName"], d, config)); err != nil { - return fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("namespaced_name", flattenTagsTagValueNamespacedName(res["namespacedName"], d, config)); err != nil { - return fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("description", flattenTagsTagValueDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("create_time", flattenTagsTagValueCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("update_time", flattenTagsTagValueUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading TagValue: %s", err) - } - - return nil -} - -func resourceTagsTagValueUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandTagsTagValueDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - lockName, err := replaceVars(d, config, "tagValues/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating TagValue %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating TagValue %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating TagValue %q: %#v", d.Id(), res) - } - - err = TagsOperationWaitTime( - config, res, "Updating TagValue", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceTagsTagValueRead(d, meta) -} - -func resourceTagsTagValueDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "tagValues/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting TagValue %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TagValue") - } - - err = TagsOperationWaitTime( - config, res, "Deleting TagValue", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting TagValue %q: %#v", d.Id(), res) - return nil -} - -func resourceTagsTagValueImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "tagValues/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "tagValues/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenTagsTagValueName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenTagsTagValueParent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueShortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueNamespacedName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandTagsTagValueParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagValueShortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagValueDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tpu_node.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tpu_node.go deleted file mode 100644 index 399a480176..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_tpu_node.go +++ /dev/null @@ -1,702 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "regexp" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// compareTpuNodeSchedulingConfig diff suppresses for the default -// scheduling, i.e. if preemptible is false, the API may either return no -// schedulingConfig or an empty schedulingConfig. -func compareTpuNodeSchedulingConfig(k, old, new string, d *schema.ResourceData) bool { - if k == "scheduling_config.0.preemptible" { - return old == "" && new == "false" - } - if k == "scheduling_config.#" { - o, n := d.GetChange("scheduling_config.0.preemptible") - return o.(bool) == n.(bool) - } - return false -} - -func tpuNodeCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - old, new := diff.GetChange("network") - config := meta.(*Config) - - networkLinkRegex := regexp.MustCompile("projects/(.+)/global/networks/(.+)") - - var pid string - - if networkLinkRegex.MatchString(new.(string)) { - parts := networkLinkRegex.FindStringSubmatch(new.(string)) - pid = parts[1] - } - - project, err := config.NewResourceManagerClient(config.UserAgent).Projects.Get(pid).Do() - if err != nil { - return fmt.Errorf("Failed to retrieve project, pid: %s, err: %s", pid, err) - } - - if networkLinkRegex.MatchString(old.(string)) { - parts := networkLinkRegex.FindStringSubmatch(old.(string)) - i, err := StringToFixed64(parts[1]) - if err == nil { - if project.ProjectNumber == i { - if err := diff.SetNew("network", old); err != nil { - return err - } - return nil - } - } - } - return nil -} - -func ResourceTPUNode() *schema.Resource { - return &schema.Resource{ - Create: resourceTPUNodeCreate, - Read: resourceTPUNodeRead, - Update: resourceTPUNodeUpdate, - Delete: resourceTPUNodeDelete, - - Importer: &schema.ResourceImporter{ - State: resourceTPUNodeImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - CustomizeDiff: tpuNodeCustomizeDiff, - - Schema: map[string]*schema.Schema{ - "accelerator_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The type of hardware accelerators associated with this node.`, - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The immutable name of the TPU.`, - }, - "tensorflow_version": { - Type: schema.TypeString, - Required: true, - Description: `The version of Tensorflow running in the Node.`, - }, - "cidr_block": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The CIDR block that the TPU node will use when selecting an IP -address. This CIDR block must be a /29 block; the Compute Engine -networks API forbids a smaller block, and using a larger block would -be wasteful (a node can only consume one IP address). - -Errors will occur if the CIDR block has already been used for a -currently existing TPU node, the CIDR block conflicts with any -subnetworks in the user's provided network, or the provided network -is peered with another network that is using that CIDR block.`, - ConflictsWith: []string{"use_service_networking"}, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The user-supplied description of the TPU. Maximum of 512 characters.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Resource labels to represent user provided metadata.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "network": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of a network to peer the TPU node to. It must be a -preexisting Compute Engine network inside of the project on which -this API has been activated. If none is provided, "default" will be -used.`, - }, - "scheduling_config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareTpuNodeSchedulingConfig, - Description: `Sets the scheduling options for this TPU instance.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "preemptible": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareTpuNodeSchedulingConfig, - Description: `Defines whether the TPU instance is preemptible.`, - }, - }, - }, - }, - "use_service_networking": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the VPC peering for the node is set up through Service Networking API. -The VPC Peering should be set up before provisioning the node. If this field is set, -cidr_block field should not be specified. If the network that you want to peer the -TPU Node to is a Shared VPC network, the node must be created with this this field enabled.`, - Default: false, - ConflictsWith: []string{"cidr_block"}, - }, - "zone": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The GCP location for the TPU. If it is not provided, the provider zone is used.`, - }, - "network_endpoints": { - Type: schema.TypeList, - Computed: true, - Description: `The network endpoints where TPU workers can be accessed and sent work. -It is recommended that Tensorflow clients of the node first reach out -to the first (index 0) entry.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": { - Type: schema.TypeString, - Computed: true, - Description: `The IP address of this network endpoint.`, - }, - "port": { - Type: schema.TypeInt, - Computed: true, - Description: `The port of this network endpoint.`, - }, - }, - }, - }, - "service_account": { - Type: schema.TypeString, - Computed: true, - Description: `The service account used to run the tensor flow services within the -node. To share resources, including Google Cloud Storage data, with -the Tensorflow job running in the Node, this account must have -permissions to that data.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTPUNodeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandTPUNodeName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandTPUNodeDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - acceleratorTypeProp, err := expandTPUNodeAcceleratorType(d.Get("accelerator_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("accelerator_type"); !isEmptyValue(reflect.ValueOf(acceleratorTypeProp)) && (ok || !reflect.DeepEqual(v, acceleratorTypeProp)) { - obj["acceleratorType"] = acceleratorTypeProp - } - tensorflowVersionProp, err := expandTPUNodeTensorflowVersion(d.Get("tensorflow_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tensorflow_version"); !isEmptyValue(reflect.ValueOf(tensorflowVersionProp)) && (ok || !reflect.DeepEqual(v, tensorflowVersionProp)) { - obj["tensorflowVersion"] = tensorflowVersionProp - } - networkProp, err := expandTPUNodeNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - cidrBlockProp, err := expandTPUNodeCidrBlock(d.Get("cidr_block"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cidr_block"); !isEmptyValue(reflect.ValueOf(cidrBlockProp)) && (ok || !reflect.DeepEqual(v, cidrBlockProp)) { - obj["cidrBlock"] = cidrBlockProp - } - useServiceNetworkingProp, err := expandTPUNodeUseServiceNetworking(d.Get("use_service_networking"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("use_service_networking"); !isEmptyValue(reflect.ValueOf(useServiceNetworkingProp)) && (ok || !reflect.DeepEqual(v, useServiceNetworkingProp)) { - obj["useServiceNetworking"] = useServiceNetworkingProp - } - schedulingConfigProp, err := expandTPUNodeSchedulingConfig(d.Get("scheduling_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("scheduling_config"); !isEmptyValue(reflect.ValueOf(schedulingConfigProp)) && (ok || !reflect.DeepEqual(v, schedulingConfigProp)) { - obj["schedulingConfig"] = schedulingConfigProp - } - labelsProp, err := expandTPUNodeLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes?nodeId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Node: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Node: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Node: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = TPUOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Node", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Node: %s", err) - } - - if err := d.Set("name", flattenTPUNodeName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Node %q: %#v", d.Id(), res) - - return resourceTPUNodeRead(d, meta) -} - -func resourceTPUNodeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Node: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("TPUNode %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - - if err := d.Set("name", flattenTPUNodeName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("description", flattenTPUNodeDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("accelerator_type", flattenTPUNodeAcceleratorType(res["acceleratorType"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("tensorflow_version", flattenTPUNodeTensorflowVersion(res["tensorflowVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("network", flattenTPUNodeNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("cidr_block", flattenTPUNodeCidrBlock(res["cidrBlock"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("service_account", flattenTPUNodeServiceAccount(res["serviceAccount"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("use_service_networking", flattenTPUNodeUseServiceNetworking(res["useServiceNetworking"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("scheduling_config", flattenTPUNodeSchedulingConfig(res["schedulingConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("network_endpoints", flattenTPUNodeNetworkEndpoints(res["networkEndpoints"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("labels", flattenTPUNodeLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Node: %s", err) - } - - return nil -} - -func resourceTPUNodeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Node: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("tensorflow_version") { - obj := make(map[string]interface{}) - - tensorflowVersionProp, err := expandTPUNodeTensorflowVersion(d.Get("tensorflow_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tensorflow_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tensorflowVersionProp)) { - obj["tensorflowVersion"] = tensorflowVersionProp - } - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}:reimage") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating Node %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Node %q: %#v", d.Id(), res) - } - - err = TPUOperationWaitTime( - config, res, project, "Updating Node", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceTPUNodeRead(d, meta) -} - -func resourceTPUNodeDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Node: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Node %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Node") - } - - err = TPUOperationWaitTime( - config, res, project, "Deleting Node", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Node %q: %#v", d.Id(), res) - return nil -} - -func resourceTPUNodeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/nodes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenTPUNodeName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenTPUNodeDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeAcceleratorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeTensorflowVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeCidrBlock(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeUseServiceNetworking(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeSchedulingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["preemptible"] = - flattenTPUNodeSchedulingConfigPreemptible(original["preemptible"], d, config) - return []interface{}{transformed} -} -func flattenTPUNodeSchedulingConfigPreemptible(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeNetworkEndpoints(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "ip_address": flattenTPUNodeNetworkEndpointsIpAddress(original["ipAddress"], d, config), - "port": flattenTPUNodeNetworkEndpointsPort(original["port"], d, config), - }) - } - return transformed -} -func flattenTPUNodeNetworkEndpointsIpAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeNetworkEndpointsPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenTPUNodeLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandTPUNodeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeAcceleratorType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeTensorflowVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeCidrBlock(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeUseServiceNetworking(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeSchedulingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPreemptible, err := expandTPUNodeSchedulingConfigPreemptible(original["preemptible"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPreemptible); val.IsValid() && !isEmptyValue(val) { - transformed["preemptible"] = transformedPreemptible - } - - return transformed, nil -} - -func expandTPUNodeSchedulingConfigPreemptible(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_dataset.go deleted file mode 100644 index d64db18dfd..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_dataset.go +++ /dev/null @@ -1,455 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceVertexAIDataset() *schema.Resource { - return &schema.Resource{ - Create: resourceVertexAIDatasetCreate, - Read: resourceVertexAIDatasetRead, - Update: resourceVertexAIDatasetUpdate, - Delete: resourceVertexAIDatasetDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The user-defined name of the Dataset. The name can be up to 128 characters long and can be consist of any UTF-8 characters.`, - }, - "metadata_schema_uri": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/.`, - }, - "encryption_spec": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. -Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Computed: true, - Optional: true, - Description: `A set of key/value label pairs to assign to this Workflow.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region of the dataset. eg us-central1`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the dataset was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the Dataset. This value is set by Google.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the dataset was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceVertexAIDatasetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandVertexAIDatasetDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandVertexAIDatasetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - encryptionSpecProp, err := expandVertexAIDatasetEncryptionSpec(d.Get("encryption_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_spec"); !isEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { - obj["encryptionSpec"] = encryptionSpecProp - } - metadataSchemaUriProp, err := expandVertexAIDatasetMetadataSchemaUri(d.Get("metadata_schema_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata_schema_uri"); !isEmptyValue(reflect.ValueOf(metadataSchemaUriProp)) && (ok || !reflect.DeepEqual(v, metadataSchemaUriProp)) { - obj["metadataSchemaUri"] = metadataSchemaUriProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/datasets") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Dataset: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Dataset: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = VertexAIOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Dataset", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Dataset: %s", err) - } - - if err := d.Set("name", flattenVertexAIDatasetName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) - - return resourceVertexAIDatasetRead(d, meta) -} - -func resourceVertexAIDatasetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VertexAIDataset %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - - if err := d.Set("name", flattenVertexAIDatasetName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("display_name", flattenVertexAIDatasetDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("create_time", flattenVertexAIDatasetCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("update_time", flattenVertexAIDatasetUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("labels", flattenVertexAIDatasetLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("encryption_spec", flattenVertexAIDatasetEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("metadata_schema_uri", flattenVertexAIDatasetMetadataSchemaUri(res["metadataSchemaUri"], d, config)); err != nil { - return fmt.Errorf("Error reading Dataset: %s", err) - } - - return nil -} - -func resourceVertexAIDatasetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandVertexAIDatasetDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandVertexAIDatasetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) - } - - return resourceVertexAIDatasetRead(d, meta) -} - -func resourceVertexAIDatasetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Dataset") - } - - err = VertexAIOperationWaitTime( - config, res, project, "Deleting Dataset", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) - return nil -} - -func flattenVertexAIDatasetName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetEncryptionSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenVertexAIDatasetEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIDatasetEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetMetadataSchemaUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandVertexAIDatasetDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIDatasetLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandVertexAIDatasetEncryptionSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandVertexAIDatasetEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandVertexAIDatasetEncryptionSpecKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIDatasetMetadataSchemaUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore.go deleted file mode 100644 index 5c68f48c5f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore.go +++ /dev/null @@ -1,659 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceVertexAIFeaturestore() *schema.Resource { - return &schema.Resource{ - Create: resourceVertexAIFeaturestoreCreate, - Read: resourceVertexAIFeaturestoreRead, - Update: resourceVertexAIFeaturestoreUpdate, - Delete: resourceVertexAIFeaturestoreDelete, - - Importer: &schema.ResourceImporter{ - State: resourceVertexAIFeaturestoreImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "encryption_spec": { - Type: schema.TypeList, - Optional: true, - Description: `If set, both of the online and offline data storage will be secured by this key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this Featurestore.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the Featurestore. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.`, - }, - "online_serving_config": { - Type: schema.TypeList, - Optional: true, - Description: `Config for online serving resources.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "fixed_node_count": { - Type: schema.TypeInt, - Optional: true, - Description: `The number of nodes for each cluster. The number of nodes will not scale automatically but can be scaled manually by providing different values when updating.`, - ExactlyOneOf: []string{"online_serving_config.0.fixed_node_count", "online_serving_config.0.scaling"}, - }, - "scaling": { - Type: schema.TypeList, - Optional: true, - Description: `Online serving scaling configuration. Only one of fixedNodeCount and scaling can be set. Setting one will reset the other.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_node_count": { - Type: schema.TypeInt, - Required: true, - Description: `The maximum number of nodes to scale up to. Must be greater than minNodeCount, and less than or equal to 10 times of 'minNodeCount'.`, - }, - "min_node_count": { - Type: schema.TypeInt, - Required: true, - Description: `The minimum number of nodes to scale down to. Must be greater than or equal to 1.`, - }, - }, - }, - ExactlyOneOf: []string{"online_serving_config.0.fixed_node_count", "online_serving_config.0.scaling"}, - }, - }, - }, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region of the dataset. eg us-central1`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the featurestore was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `Used to perform consistent read-modify-write updates.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the featurestore was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "force_destroy": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `If set to true, any EntityTypes and Features for this Featurestore will also be deleted`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceVertexAIFeaturestoreCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandVertexAIFeaturestoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - onlineServingConfigProp, err := expandVertexAIFeaturestoreOnlineServingConfig(d.Get("online_serving_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("online_serving_config"); !isEmptyValue(reflect.ValueOf(onlineServingConfigProp)) && (ok || !reflect.DeepEqual(v, onlineServingConfigProp)) { - obj["onlineServingConfig"] = onlineServingConfigProp - } - encryptionSpecProp, err := expandVertexAIFeaturestoreEncryptionSpec(d.Get("encryption_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_spec"); !isEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { - obj["encryptionSpec"] = encryptionSpecProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/featurestores?featurestoreId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Featurestore: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Featurestore: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Featurestore: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/featurestores/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = VertexAIOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Featurestore", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Featurestore: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/featurestores/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Featurestore %q: %#v", d.Id(), res) - - return resourceVertexAIFeaturestoreRead(d, meta) -} - -func resourceVertexAIFeaturestoreRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/featurestores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Featurestore: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VertexAIFeaturestore %q", d.Id())) - } - - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("force_destroy"); !ok { - if err := d.Set("force_destroy", false); err != nil { - return fmt.Errorf("Error setting force_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Featurestore: %s", err) - } - - if err := d.Set("create_time", flattenVertexAIFeaturestoreCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Featurestore: %s", err) - } - if err := d.Set("update_time", flattenVertexAIFeaturestoreUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Featurestore: %s", err) - } - if err := d.Set("labels", flattenVertexAIFeaturestoreLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Featurestore: %s", err) - } - if err := d.Set("online_serving_config", flattenVertexAIFeaturestoreOnlineServingConfig(res["onlineServingConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Featurestore: %s", err) - } - if err := d.Set("encryption_spec", flattenVertexAIFeaturestoreEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { - return fmt.Errorf("Error reading Featurestore: %s", err) - } - - return nil -} - -func resourceVertexAIFeaturestoreUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Featurestore: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandVertexAIFeaturestoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - onlineServingConfigProp, err := expandVertexAIFeaturestoreOnlineServingConfig(d.Get("online_serving_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("online_serving_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, onlineServingConfigProp)) { - obj["onlineServingConfig"] = onlineServingConfigProp - } - encryptionSpecProp, err := expandVertexAIFeaturestoreEncryptionSpec(d.Get("encryption_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_spec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { - obj["encryptionSpec"] = encryptionSpecProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/featurestores/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Featurestore %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("online_serving_config") { - updateMask = append(updateMask, "onlineServingConfig") - } - - if d.HasChange("encryption_spec") { - updateMask = append(updateMask, "encryptionSpec") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Featurestore %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Featurestore %q: %#v", d.Id(), res) - } - - err = VertexAIOperationWaitTime( - config, res, project, "Updating Featurestore", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceVertexAIFeaturestoreRead(d, meta) -} - -func resourceVertexAIFeaturestoreDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Featurestore: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/featurestores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if v, ok := d.GetOk("force_destroy"); ok { - url, err = addQueryParams(url, map[string]string{"force": fmt.Sprintf("%v", v)}) - if err != nil { - return err - } - } - log.Printf("[DEBUG] Deleting Featurestore %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Featurestore") - } - - err = VertexAIOperationWaitTime( - config, res, project, "Deleting Featurestore", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Featurestore %q: %#v", d.Id(), res) - return nil -} - -func resourceVertexAIFeaturestoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/featurestores/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/featurestores/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Explicitly set virtual fields to default values on import - if err := d.Set("force_destroy", false); err != nil { - return nil, fmt.Errorf("Error setting force_destroy: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenVertexAIFeaturestoreCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIFeaturestoreUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIFeaturestoreLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIFeaturestoreOnlineServingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_node_count"] = - flattenVertexAIFeaturestoreOnlineServingConfigFixedNodeCount(original["fixedNodeCount"], d, config) - transformed["scaling"] = - flattenVertexAIFeaturestoreOnlineServingConfigScaling(original["scaling"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIFeaturestoreOnlineServingConfigFixedNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVertexAIFeaturestoreOnlineServingConfigScaling(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min_node_count"] = - flattenVertexAIFeaturestoreOnlineServingConfigScalingMinNodeCount(original["minNodeCount"], d, config) - transformed["max_node_count"] = - flattenVertexAIFeaturestoreOnlineServingConfigScalingMaxNodeCount(original["maxNodeCount"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIFeaturestoreOnlineServingConfigScalingMinNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVertexAIFeaturestoreOnlineServingConfigScalingMaxNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVertexAIFeaturestoreEncryptionSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenVertexAIFeaturestoreEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIFeaturestoreEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandVertexAIFeaturestoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandVertexAIFeaturestoreOnlineServingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedNodeCount, err := expandVertexAIFeaturestoreOnlineServingConfigFixedNodeCount(original["fixed_node_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFixedNodeCount); val.IsValid() && !isEmptyValue(val) { - transformed["fixedNodeCount"] = transformedFixedNodeCount - } - - transformedScaling, err := expandVertexAIFeaturestoreOnlineServingConfigScaling(original["scaling"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedScaling); val.IsValid() && !isEmptyValue(val) { - transformed["scaling"] = transformedScaling - } - - return transformed, nil -} - -func expandVertexAIFeaturestoreOnlineServingConfigFixedNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIFeaturestoreOnlineServingConfigScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinNodeCount, err := expandVertexAIFeaturestoreOnlineServingConfigScalingMinNodeCount(original["min_node_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMinNodeCount); val.IsValid() && !isEmptyValue(val) { - transformed["minNodeCount"] = transformedMinNodeCount - } - - transformedMaxNodeCount, err := expandVertexAIFeaturestoreOnlineServingConfigScalingMaxNodeCount(original["max_node_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMaxNodeCount); val.IsValid() && !isEmptyValue(val) { - transformed["maxNodeCount"] = transformedMaxNodeCount - } - - return transformed, nil -} - -func expandVertexAIFeaturestoreOnlineServingConfigScalingMinNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIFeaturestoreOnlineServingConfigScalingMaxNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIFeaturestoreEncryptionSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandVertexAIFeaturestoreEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandVertexAIFeaturestoreEncryptionSpecKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore_entitytype_feature.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore_entitytype_feature.go deleted file mode 100644 index 6961cced63..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore_entitytype_feature.go +++ /dev/null @@ -1,390 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceVertexAIFeaturestoreEntitytypeFeature() *schema.Resource { - return &schema.Resource{ - Create: resourceVertexAIFeaturestoreEntitytypeFeatureCreate, - Read: resourceVertexAIFeaturestoreEntitytypeFeatureRead, - Update: resourceVertexAIFeaturestoreEntitytypeFeatureUpdate, - Delete: resourceVertexAIFeaturestoreEntitytypeFeatureDelete, - - Importer: &schema.ResourceImporter{ - State: resourceVertexAIFeaturestoreEntitytypeFeatureImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "entitytype": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the Featurestore to use, in the format projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entitytype}.`, - }, - "value_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Type of Feature value. Immutable. https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.featurestores.entityTypes.features#ValueType`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of the feature.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to the feature.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the feature. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the entity type was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `Used to perform consistent read-modify-write updates.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp when the entity type was most recently updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceVertexAIFeaturestoreEntitytypeFeatureCreate(d *schema.ResourceData, meta interface{}) error { - var project string - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandVertexAIFeaturestoreEntitytypeFeatureLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandVertexAIFeaturestoreEntitytypeFeatureDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - valueTypeProp, err := expandVertexAIFeaturestoreEntitytypeFeatureValueType(d.Get("value_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_type"); !isEmptyValue(reflect.ValueOf(valueTypeProp)) && (ok || !reflect.DeepEqual(v, valueTypeProp)) { - obj["valueType"] = valueTypeProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{entitytype}}/features?featureId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new FeaturestoreEntitytypeFeature: %#v", obj) - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if v, ok := d.GetOk("entitytype"); ok { - re := regexp.MustCompile("projects/([a-zA-Z0-9-]*)/(?:locations|regions)/([a-zA-Z0-9-]*)") - switch { - case re.MatchString(v.(string)): - if res := re.FindStringSubmatch(v.(string)); len(res) == 3 && res[1] != "" { - project = res[1] - } - } - } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating FeaturestoreEntitytypeFeature: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{entitytype}}/features/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = VertexAIOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating FeaturestoreEntitytypeFeature", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create FeaturestoreEntitytypeFeature: %s", err) - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{entitytype}}/features/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating FeaturestoreEntitytypeFeature %q: %#v", d.Id(), res) - - return resourceVertexAIFeaturestoreEntitytypeFeatureRead(d, meta) -} - -func resourceVertexAIFeaturestoreEntitytypeFeatureRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{entitytype}}/features/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VertexAIFeaturestoreEntitytypeFeature %q", d.Id())) - } - - if err := d.Set("create_time", flattenVertexAIFeaturestoreEntitytypeFeatureCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) - } - if err := d.Set("update_time", flattenVertexAIFeaturestoreEntitytypeFeatureUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) - } - if err := d.Set("labels", flattenVertexAIFeaturestoreEntitytypeFeatureLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) - } - if err := d.Set("description", flattenVertexAIFeaturestoreEntitytypeFeatureDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) - } - if err := d.Set("value_type", flattenVertexAIFeaturestoreEntitytypeFeatureValueType(res["valueType"], d, config)); err != nil { - return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) - } - - return nil -} - -func resourceVertexAIFeaturestoreEntitytypeFeatureUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - labelsProp, err := expandVertexAIFeaturestoreEntitytypeFeatureLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandVertexAIFeaturestoreEntitytypeFeatureDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{entitytype}}/features/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating FeaturestoreEntitytypeFeature %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating FeaturestoreEntitytypeFeature %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating FeaturestoreEntitytypeFeature %q: %#v", d.Id(), res) - } - - return resourceVertexAIFeaturestoreEntitytypeFeatureRead(d, meta) -} - -func resourceVertexAIFeaturestoreEntitytypeFeatureDelete(d *schema.ResourceData, meta interface{}) error { - var project string - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{entitytype}}/features/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - if v, ok := d.GetOk("entitytype"); ok { - re := regexp.MustCompile("projects/([a-zA-Z0-9-]*)/(?:locations|regions)/([a-zA-Z0-9-]*)") - switch { - case re.MatchString(v.(string)): - if res := re.FindStringSubmatch(v.(string)); len(res) == 3 && res[1] != "" { - project = res[1] - } - } - } - log.Printf("[DEBUG] Deleting FeaturestoreEntitytypeFeature %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "FeaturestoreEntitytypeFeature") - } - - err = VertexAIOperationWaitTime( - config, res, project, "Deleting FeaturestoreEntitytypeFeature", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting FeaturestoreEntitytypeFeature %q: %#v", d.Id(), res) - return nil -} - -func resourceVertexAIFeaturestoreEntitytypeFeatureImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)/features/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "{{entitytype}}/features/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenVertexAIFeaturestoreEntitytypeFeatureCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIFeaturestoreEntitytypeFeatureUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIFeaturestoreEntitytypeFeatureLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIFeaturestoreEntitytypeFeatureDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIFeaturestoreEntitytypeFeatureValueType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandVertexAIFeaturestoreEntitytypeFeatureLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandVertexAIFeaturestoreEntitytypeFeatureDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIFeaturestoreEntitytypeFeatureValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_index.go deleted file mode 100644 index c6efe5ca35..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_index.go +++ /dev/null @@ -1,1078 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceVertexAIIndex() *schema.Resource { - return &schema.Resource{ - Create: resourceVertexAIIndexCreate, - Read: resourceVertexAIIndexRead, - Update: resourceVertexAIIndexUpdate, - Delete: resourceVertexAIIndexDelete, - - Importer: &schema.ResourceImporter{ - State: resourceVertexAIIndexImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `The description of the Index.`, - }, - "index_update_method": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The update method to use with this Index. The value must be the followings. If not set, BATCH_UPDATE will be used by default. -* BATCH_UPDATE: user can call indexes.patch with files on Cloud Storage of datapoints to update. -* STREAM_UPDATE: user can call indexes.upsertDatapoints/DeleteDatapoints to update the Index and the updates will be applied in corresponding DeployedIndexes in nearly real-time.`, - Default: "BATCH_UPDATE", - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `The labels with user-defined metadata to organize your Indexes.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "metadata": { - Type: schema.TypeList, - Optional: true, - Description: `An additional information about the Index`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The configuration of the Matching Engine Index.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dimensions": { - Type: schema.TypeInt, - Required: true, - Description: `The number of dimensions of the input vectors.`, - }, - "algorithm_config": { - Type: schema.TypeList, - Optional: true, - Description: `The configuration with regard to the algorithms used for efficient search.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "brute_force_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options for using brute force search, which simply implements the -standard linear search in the database for each query.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{}, - }, - ExactlyOneOf: []string{}, - }, - "tree_ah_config": { - Type: schema.TypeList, - Optional: true, - Description: `Configuration options for using the tree-AH algorithm (Shallow tree + Asymmetric Hashing). -Please refer to this paper for more details: https://arxiv.org/abs/1908.10396`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "leaf_node_embedding_count": { - Type: schema.TypeInt, - Optional: true, - Description: `Number of embeddings on each leaf node. The default value is 1000 if not set.`, - Default: 1000, - }, - "leaf_nodes_to_search_percent": { - Type: schema.TypeInt, - Optional: true, - Description: `The default percentage of leaf nodes that any query may be searched. Must be in -range 1-100, inclusive. The default value is 10 (means 10%) if not set.`, - Default: 10, - }, - }, - }, - ExactlyOneOf: []string{}, - }, - }, - }, - }, - "approximate_neighbors_count": { - Type: schema.TypeInt, - Optional: true, - Description: `The default number of neighbors to find via approximate search before exact reordering is -performed. Exact reordering is a procedure where results returned by an -approximate search algorithm are reordered via a more expensive distance computation. -Required if tree-AH algorithm is used.`, - }, - "distance_measure_type": { - Type: schema.TypeString, - Optional: true, - Description: `The distance measure used in nearest neighbor search. The value must be one of the followings: -* SQUARED_L2_DISTANCE: Euclidean (L_2) Distance -* L1_DISTANCE: Manhattan (L_1) Distance -* COSINE_DISTANCE: Cosine Distance. Defined as 1 - cosine similarity. -* DOT_PRODUCT_DISTANCE: Dot Product Distance. Defined as a negative of the dot product`, - Default: "DOT_PRODUCT_DISTANCE", - }, - "feature_norm_type": { - Type: schema.TypeString, - Optional: true, - Description: `Type of normalization to be carried out on each vector. The value must be one of the followings: -* UNIT_L2_NORM: Unit L2 normalization type -* NONE: No normalization type is specified.`, - Default: "NONE", - }, - }, - }, - }, - "contents_delta_uri": { - Type: schema.TypeString, - Optional: true, - Description: `Allows inserting, updating or deleting the contents of the Matching Engine Index. -The string must be a valid Cloud Storage directory path. If this -field is set when calling IndexService.UpdateIndex, then no other -Index field can be also updated as part of the same call. -The expected structure and format of the files this URI points to is -described at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format`, - }, - "is_complete_overwrite": { - Type: schema.TypeBool, - Optional: true, - Description: `If this field is set together with contentsDeltaUri when calling IndexService.UpdateIndex, -then existing content of the Index will be replaced by the data from the contentsDeltaUri.`, - Default: false, - }, - }, - }, - }, - "region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The region of the index. eg us-central1`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "deployed_indexes": { - Type: schema.TypeList, - Computed: true, - Description: `The pointers to DeployedIndexes created from this Index. An Index can be only deleted if all its DeployedIndexes had been undeployed first.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployed_index_id": { - Type: schema.TypeString, - Computed: true, - Description: `The ID of the DeployedIndex in the above IndexEndpoint.`, - }, - "index_endpoint": { - Type: schema.TypeString, - Computed: true, - Description: `A resource name of the IndexEndpoint.`, - }, - }, - }, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `Used to perform consistent read-modify-write updates.`, - }, - "index_stats": { - Type: schema.TypeList, - Computed: true, - Description: `Stats of the index resource.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "shards_count": { - Type: schema.TypeInt, - Computed: true, - Description: `The number of shards in the Index.`, - }, - "vectors_count": { - Type: schema.TypeString, - Computed: true, - Description: `The number of vectors in the Index.`, - }, - }, - }, - }, - "metadata_schema_uri": { - Type: schema.TypeString, - Computed: true, - Description: `Points to a YAML file stored on Google Cloud Storage describing additional information about the Index, that is specific to it. Unset if the Index does not have any additional information.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The resource name of the Index.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the Index was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceVertexAIIndexCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandVertexAIIndexDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandVertexAIIndexDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - metadataProp, err := expandVertexAIIndexMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - labelsProp, err := expandVertexAIIndexLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - indexUpdateMethodProp, err := expandVertexAIIndexIndexUpdateMethod(d.Get("index_update_method"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("index_update_method"); !isEmptyValue(reflect.ValueOf(indexUpdateMethodProp)) && (ok || !reflect.DeepEqual(v, indexUpdateMethodProp)) { - obj["indexUpdateMethod"] = indexUpdateMethodProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Index: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Index: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexes/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = VertexAIOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Index", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Index: %s", err) - } - - if err := d.Set("name", flattenVertexAIIndexName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexes/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) - - return resourceVertexAIIndexRead(d, meta) -} - -func resourceVertexAIIndexRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VertexAIIndex %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - - if err := d.Set("name", flattenVertexAIIndexName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("display_name", flattenVertexAIIndexDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("description", flattenVertexAIIndexDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("metadata", flattenVertexAIIndexMetadata(res["metadata"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("metadata_schema_uri", flattenVertexAIIndexMetadataSchemaUri(res["metadataSchemaUri"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("deployed_indexes", flattenVertexAIIndexDeployedIndexes(res["deployedIndexes"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("labels", flattenVertexAIIndexLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("create_time", flattenVertexAIIndexCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("update_time", flattenVertexAIIndexUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("index_stats", flattenVertexAIIndexIndexStats(res["indexStats"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("index_update_method", flattenVertexAIIndexIndexUpdateMethod(res["indexUpdateMethod"], d, config)); err != nil { - return fmt.Errorf("Error reading Index: %s", err) - } - - return nil -} - -func resourceVertexAIIndexUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandVertexAIIndexDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandVertexAIIndexDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - metadataProp, err := expandVertexAIIndexMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - labelsProp, err := expandVertexAIIndexLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Index %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("metadata") { - updateMask = append(updateMask, "metadata") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - newUpdateMask := []string{} - - if d.HasChange("metadata.0.contents_delta_uri") { - // Use the current value of isCompleteOverwrite when updating contentsDeltaUri - newUpdateMask = append(newUpdateMask, "metadata.contentsDeltaUri") - newUpdateMask = append(newUpdateMask, "metadata.isCompleteOverwrite") - } - - for _, mask := range updateMask { - // Use granular update masks instead of 'metadata' to avoid the following error: - // 'If `contents_delta_gcs_uri` is set as part of `index.metadata`, then no other Index fields can be also updated as part of the same update call.' - if mask == "metadata" { - continue - } - newUpdateMask = append(newUpdateMask, mask) - } - - // Refreshing updateMask after adding extra schema entries - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Index %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Index %q: %#v", d.Id(), res) - } - - err = VertexAIOperationWaitTime( - config, res, project, "Updating Index", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceVertexAIIndexRead(d, meta) -} - -func resourceVertexAIIndexDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Index %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Index") - } - - err = VertexAIOperationWaitTime( - config, res, project, "Deleting Index", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) - return nil -} - -func resourceVertexAIIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/indexes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexes/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenVertexAIIndexName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenVertexAIIndexDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["contents_delta_uri"] = - flattenVertexAIIndexMetadataContentsDeltaUri(original["contentsDeltaUri"], d, config) - transformed["is_complete_overwrite"] = - flattenVertexAIIndexMetadataIsCompleteOverwrite(original["isCompleteOverwrite"], d, config) - transformed["config"] = - flattenVertexAIIndexMetadataConfig(original["config"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIIndexMetadataContentsDeltaUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // We want to ignore read on this field, but cannot because it is nested - return d.Get("metadata.0.contents_delta_uri") -} - -func flattenVertexAIIndexMetadataIsCompleteOverwrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // We want to ignore read on this field, but cannot because it is nested - return d.Get("metadata.0.is_complete_overwrite") -} - -func flattenVertexAIIndexMetadataConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dimensions"] = - flattenVertexAIIndexMetadataConfigDimensions(original["dimensions"], d, config) - transformed["approximate_neighbors_count"] = - flattenVertexAIIndexMetadataConfigApproximateNeighborsCount(original["approximateNeighborsCount"], d, config) - transformed["distance_measure_type"] = - flattenVertexAIIndexMetadataConfigDistanceMeasureType(original["distanceMeasureType"], d, config) - transformed["feature_norm_type"] = - flattenVertexAIIndexMetadataConfigFeatureNormType(original["featureNormType"], d, config) - transformed["algorithm_config"] = - flattenVertexAIIndexMetadataConfigAlgorithmConfig(original["algorithmConfig"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIIndexMetadataConfigDimensions(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVertexAIIndexMetadataConfigApproximateNeighborsCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVertexAIIndexMetadataConfigDistanceMeasureType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexMetadataConfigFeatureNormType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexMetadataConfigAlgorithmConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["tree_ah_config"] = - flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfig(original["treeAhConfig"], d, config) - transformed["brute_force_config"] = - flattenVertexAIIndexMetadataConfigAlgorithmConfigBruteForceConfig(original["bruteForceConfig"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["leaf_node_embedding_count"] = - flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodeEmbeddingCount(original["leafNodeEmbeddingCount"], d, config) - transformed["leaf_nodes_to_search_percent"] = - flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodesToSearchPercent(original["leafNodesToSearchPercent"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodeEmbeddingCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodesToSearchPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVertexAIIndexMetadataConfigAlgorithmConfigBruteForceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - transformed := make(map[string]interface{}) - return []interface{}{transformed} -} - -func flattenVertexAIIndexMetadataSchemaUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexDeployedIndexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "index_endpoint": flattenVertexAIIndexDeployedIndexesIndexEndpoint(original["indexEndpoint"], d, config), - "deployed_index_id": flattenVertexAIIndexDeployedIndexesDeployedIndexId(original["deployedIndexId"], d, config), - }) - } - return transformed -} -func flattenVertexAIIndexDeployedIndexesIndexEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexDeployedIndexesDeployedIndexId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexIndexStats(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["vectors_count"] = - flattenVertexAIIndexIndexStatsVectorsCount(original["vectorsCount"], d, config) - transformed["shards_count"] = - flattenVertexAIIndexIndexStatsShardsCount(original["shardsCount"], d, config) - return []interface{}{transformed} -} -func flattenVertexAIIndexIndexStatsVectorsCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIIndexIndexStatsShardsCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVertexAIIndexIndexUpdateMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandVertexAIIndexDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContentsDeltaUri, err := expandVertexAIIndexMetadataContentsDeltaUri(original["contents_delta_uri"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedContentsDeltaUri); val.IsValid() && !isEmptyValue(val) { - transformed["contentsDeltaUri"] = transformedContentsDeltaUri - } - - transformedIsCompleteOverwrite, err := expandVertexAIIndexMetadataIsCompleteOverwrite(original["is_complete_overwrite"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedIsCompleteOverwrite); val.IsValid() && !isEmptyValue(val) { - transformed["isCompleteOverwrite"] = transformedIsCompleteOverwrite - } - - transformedConfig, err := expandVertexAIIndexMetadataConfig(original["config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedConfig); val.IsValid() && !isEmptyValue(val) { - transformed["config"] = transformedConfig - } - - return transformed, nil -} - -func expandVertexAIIndexMetadataContentsDeltaUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadataIsCompleteOverwrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadataConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDimensions, err := expandVertexAIIndexMetadataConfigDimensions(original["dimensions"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDimensions); val.IsValid() && !isEmptyValue(val) { - transformed["dimensions"] = transformedDimensions - } - - transformedApproximateNeighborsCount, err := expandVertexAIIndexMetadataConfigApproximateNeighborsCount(original["approximate_neighbors_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedApproximateNeighborsCount); val.IsValid() && !isEmptyValue(val) { - transformed["approximateNeighborsCount"] = transformedApproximateNeighborsCount - } - - transformedDistanceMeasureType, err := expandVertexAIIndexMetadataConfigDistanceMeasureType(original["distance_measure_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDistanceMeasureType); val.IsValid() && !isEmptyValue(val) { - transformed["distanceMeasureType"] = transformedDistanceMeasureType - } - - transformedFeatureNormType, err := expandVertexAIIndexMetadataConfigFeatureNormType(original["feature_norm_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFeatureNormType); val.IsValid() && !isEmptyValue(val) { - transformed["featureNormType"] = transformedFeatureNormType - } - - transformedAlgorithmConfig, err := expandVertexAIIndexMetadataConfigAlgorithmConfig(original["algorithm_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedAlgorithmConfig); val.IsValid() && !isEmptyValue(val) { - transformed["algorithmConfig"] = transformedAlgorithmConfig - } - - return transformed, nil -} - -func expandVertexAIIndexMetadataConfigDimensions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadataConfigApproximateNeighborsCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadataConfigDistanceMeasureType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadataConfigFeatureNormType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadataConfigAlgorithmConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTreeAhConfig, err := expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfig(original["tree_ah_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTreeAhConfig); val.IsValid() && !isEmptyValue(val) { - transformed["treeAhConfig"] = transformedTreeAhConfig - } - - transformedBruteForceConfig, err := expandVertexAIIndexMetadataConfigAlgorithmConfigBruteForceConfig(original["brute_force_config"], d, config) - if err != nil { - return nil, err - } else { - transformed["bruteForceConfig"] = transformedBruteForceConfig - } - - return transformed, nil -} - -func expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLeafNodeEmbeddingCount, err := expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodeEmbeddingCount(original["leaf_node_embedding_count"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLeafNodeEmbeddingCount); val.IsValid() && !isEmptyValue(val) { - transformed["leafNodeEmbeddingCount"] = transformedLeafNodeEmbeddingCount - } - - transformedLeafNodesToSearchPercent, err := expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodesToSearchPercent(original["leaf_nodes_to_search_percent"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLeafNodesToSearchPercent); val.IsValid() && !isEmptyValue(val) { - transformed["leafNodesToSearchPercent"] = transformedLeafNodesToSearchPercent - } - - return transformed, nil -} - -func expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodeEmbeddingCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodesToSearchPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIIndexMetadataConfigAlgorithmConfigBruteForceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - transformed := make(map[string]interface{}) - - return transformed, nil -} - -func expandVertexAIIndexLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandVertexAIIndexIndexUpdateMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_tensorboard.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_tensorboard.go deleted file mode 100644 index 951cbc0e31..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_tensorboard.go +++ /dev/null @@ -1,524 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceVertexAITensorboard() *schema.Resource { - return &schema.Resource{ - Create: resourceVertexAITensorboardCreate, - Read: resourceVertexAITensorboardRead, - Update: resourceVertexAITensorboardUpdate, - Delete: resourceVertexAITensorboardDelete, - - Importer: &schema.ResourceImporter{ - State: resourceVertexAITensorboardImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "display_name": { - Type: schema.TypeString, - Required: true, - Description: `User provided name of this Tensorboard.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of this Tensorboard.`, - }, - "encryption_spec": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Customer-managed encryption key spec for a Tensorboard. If set, this Tensorboard and all sub-resources of this Tensorboard will be secured by this key.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. -Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.`, - }, - }, - }, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `The labels with user-defined metadata to organize your Tensorboards.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region of the tensorboard. eg us-central1`, - }, - "blob_storage_path_prefix": { - Type: schema.TypeString, - Computed: true, - Description: `Consumer project Cloud Storage path prefix used to store blob data, which can either be a bucket or directory. Does not end with a '/'.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the Tensorboard was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `Name of the Tensorboard.`, - }, - "run_count": { - Type: schema.TypeString, - Computed: true, - Description: `The number of Runs stored in this Tensorboard.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the Tensorboard was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceVertexAITensorboardCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandVertexAITensorboardDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandVertexAITensorboardDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - encryptionSpecProp, err := expandVertexAITensorboardEncryptionSpec(d.Get("encryption_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_spec"); !isEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { - obj["encryptionSpec"] = encryptionSpecProp - } - labelsProp, err := expandVertexAITensorboardLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/tensorboards") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Tensorboard: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Tensorboard: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Tensorboard: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = VertexAIOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Tensorboard", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Tensorboard: %s", err) - } - - if err := d.Set("name", flattenVertexAITensorboardName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Tensorboard %q: %#v", d.Id(), res) - - return resourceVertexAITensorboardRead(d, meta) -} - -func resourceVertexAITensorboardRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Tensorboard: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VertexAITensorboard %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - - if err := d.Set("name", flattenVertexAITensorboardName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - if err := d.Set("display_name", flattenVertexAITensorboardDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - if err := d.Set("description", flattenVertexAITensorboardDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - if err := d.Set("encryption_spec", flattenVertexAITensorboardEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - if err := d.Set("blob_storage_path_prefix", flattenVertexAITensorboardBlobStoragePathPrefix(res["blobStoragePathPrefix"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - if err := d.Set("run_count", flattenVertexAITensorboardRunCount(res["runCount"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - if err := d.Set("create_time", flattenVertexAITensorboardCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - if err := d.Set("update_time", flattenVertexAITensorboardUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - if err := d.Set("labels", flattenVertexAITensorboardLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Tensorboard: %s", err) - } - - return nil -} - -func resourceVertexAITensorboardUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Tensorboard: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandVertexAITensorboardDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandVertexAITensorboardDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandVertexAITensorboardLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Tensorboard %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Tensorboard %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Tensorboard %q: %#v", d.Id(), res) - } - - err = VertexAIOperationWaitTime( - config, res, project, "Updating Tensorboard", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceVertexAITensorboardRead(d, meta) -} - -func resourceVertexAITensorboardDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Tensorboard: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Tensorboard %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Tensorboard") - } - - err = VertexAIOperationWaitTime( - config, res, project, "Deleting Tensorboard", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Tensorboard %q: %#v", d.Id(), res) - return nil -} - -func resourceVertexAITensorboardImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/tensorboards/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/tensorboards/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("name", id); err != nil { - return nil, fmt.Errorf("Error setting name for import: %s", err) - } - - return []*schema.ResourceData{d}, nil -} - -func flattenVertexAITensorboardName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAITensorboardDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAITensorboardDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAITensorboardEncryptionSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenVertexAITensorboardEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} -func flattenVertexAITensorboardEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAITensorboardBlobStoragePathPrefix(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAITensorboardRunCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAITensorboardCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAITensorboardUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAITensorboardLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandVertexAITensorboardDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAITensorboardDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAITensorboardEncryptionSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandVertexAITensorboardEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandVertexAITensorboardEncryptionSpecKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAITensorboardLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vpc_access_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vpc_access_connector.go deleted file mode 100644 index df9d9446ee..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vpc_access_connector.go +++ /dev/null @@ -1,642 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "fmt" - "log" - "reflect" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func ResourceVPCAccessConnector() *schema.Resource { - return &schema.Resource{ - Create: resourceVPCAccessConnectorCreate, - Read: resourceVPCAccessConnectorRead, - Delete: resourceVPCAccessConnectorDelete, - - Importer: &schema.ResourceImporter{ - State: resourceVPCAccessConnectorImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the resource (Max 25 characters).`, - }, - "ip_cidr_range": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The range of internal addresses that follows RFC 4632 notation. Example: '10.132.0.0/28'.`, - RequiredWith: []string{"network"}, - }, - "machine_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Machine type of VM Instance underlying connector. Default is e2-micro`, - Default: "e2-micro", - }, - "max_instances": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Maximum value of instances in autoscaling group underlying the connector.`, - }, - "max_throughput": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(200, 1000), - Description: `Maximum throughput of the connector in Mbps, must be greater than 'min_throughput'. Default is 300.`, - Default: 300, - }, - "min_instances": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Minimum value of instances in autoscaling group underlying the connector.`, - }, - "min_throughput": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IntBetween(200, 1000), - Description: `Minimum throughput of the connector in Mbps. Default and min is 200.`, - Default: 200, - }, - "network": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `Name or self_link of the VPC network. Required if 'ip_cidr_range' is set.`, - ExactlyOneOf: []string{"network", "subnet.0.name"}, - }, - "region": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Region where the VPC Access connector resides. If it is not provided, the provider region is used.`, - }, - "subnet": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The subnet in which to house the connector`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is -https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be {subnetName}"`, - ExactlyOneOf: []string{"network", "subnet.0.name"}, - }, - "project_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Project in which the subnet exists. If not set, this project is assumed to be the project for which the connector create request was issued.`, - }, - }, - }, - }, - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The fully qualified name of this VPC connector`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the VPC access connector.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceVPCAccessConnectorCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandVPCAccessConnectorName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandVPCAccessConnectorNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - ipCidrRangeProp, err := expandVPCAccessConnectorIpCidrRange(d.Get("ip_cidr_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_cidr_range"); !isEmptyValue(reflect.ValueOf(ipCidrRangeProp)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { - obj["ipCidrRange"] = ipCidrRangeProp - } - machineTypeProp, err := expandVPCAccessConnectorMachineType(d.Get("machine_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("machine_type"); !isEmptyValue(reflect.ValueOf(machineTypeProp)) && (ok || !reflect.DeepEqual(v, machineTypeProp)) { - obj["machineType"] = machineTypeProp - } - minThroughputProp, err := expandVPCAccessConnectorMinThroughput(d.Get("min_throughput"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_throughput"); !isEmptyValue(reflect.ValueOf(minThroughputProp)) && (ok || !reflect.DeepEqual(v, minThroughputProp)) { - obj["minThroughput"] = minThroughputProp - } - minInstancesProp, err := expandVPCAccessConnectorMinInstances(d.Get("min_instances"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_instances"); !isEmptyValue(reflect.ValueOf(minInstancesProp)) && (ok || !reflect.DeepEqual(v, minInstancesProp)) { - obj["minInstances"] = minInstancesProp - } - maxInstancesProp, err := expandVPCAccessConnectorMaxInstances(d.Get("max_instances"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("max_instances"); !isEmptyValue(reflect.ValueOf(maxInstancesProp)) && (ok || !reflect.DeepEqual(v, maxInstancesProp)) { - obj["maxInstances"] = maxInstancesProp - } - maxThroughputProp, err := expandVPCAccessConnectorMaxThroughput(d.Get("max_throughput"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("max_throughput"); !isEmptyValue(reflect.ValueOf(maxThroughputProp)) && (ok || !reflect.DeepEqual(v, maxThroughputProp)) { - obj["maxThroughput"] = maxThroughputProp - } - subnetProp, err := expandVPCAccessConnectorSubnet(d.Get("subnet"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnet"); !isEmptyValue(reflect.ValueOf(subnetProp)) && (ok || !reflect.DeepEqual(v, subnetProp)) { - obj["subnet"] = subnetProp - } - - obj, err = resourceVPCAccessConnectorEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors?connectorId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Connector: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Connector: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Connector: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = VPCAccessOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Connector", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Connector: %s", err) - } - - opRes, err = resourceVPCAccessConnectorDecoder(d, meta, opRes) - if err != nil { - return fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenVPCAccessConnectorName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // This is useful if the resource in question doesn't have a perfectly consistent API - // That is, the Operation for Create might return before the Get operation shows the - // completed state of the resource. - time.Sleep(5 * time.Second) - - log.Printf("[DEBUG] Finished creating Connector %q: %#v", d.Id(), res) - - return resourceVPCAccessConnectorRead(d, meta) -} - -func resourceVPCAccessConnectorRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Connector: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VPCAccessConnector %q", d.Id())) - } - - res, err = resourceVPCAccessConnectorDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - // Decoding the object has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing VPCAccessConnector because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - - if err := d.Set("name", flattenVPCAccessConnectorName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("network", flattenVPCAccessConnectorNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("ip_cidr_range", flattenVPCAccessConnectorIpCidrRange(res["ipCidrRange"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("state", flattenVPCAccessConnectorState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("machine_type", flattenVPCAccessConnectorMachineType(res["machineType"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("min_throughput", flattenVPCAccessConnectorMinThroughput(res["minThroughput"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("min_instances", flattenVPCAccessConnectorMinInstances(res["minInstances"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("max_instances", flattenVPCAccessConnectorMaxInstances(res["maxInstances"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("max_throughput", flattenVPCAccessConnectorMaxThroughput(res["maxThroughput"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("subnet", flattenVPCAccessConnectorSubnet(res["subnet"], d, config)); err != nil { - return fmt.Errorf("Error reading Connector: %s", err) - } - - return nil -} - -func resourceVPCAccessConnectorDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Connector: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Connector %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Connector") - } - - err = VPCAccessOperationWaitTime( - config, res, project, "Deleting Connector", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Connector %q: %#v", d.Id(), res) - return nil -} - -func resourceVPCAccessConnectorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/connectors/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func flattenVPCAccessConnectorName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenVPCAccessConnectorNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenVPCAccessConnectorIpCidrRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVPCAccessConnectorState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVPCAccessConnectorMachineType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVPCAccessConnectorMinThroughput(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVPCAccessConnectorMinInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVPCAccessConnectorMaxInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVPCAccessConnectorMaxThroughput(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { - return intVal - } - } - - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v // let terraform core handle it otherwise -} - -func flattenVPCAccessConnectorSubnet(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenVPCAccessConnectorSubnetName(original["name"], d, config) - transformed["project_id"] = - flattenVPCAccessConnectorSubnetProjectId(original["projectId"], d, config) - return []interface{}{transformed} -} -func flattenVPCAccessConnectorSubnetName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVPCAccessConnectorSubnetProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandVPCAccessConnectorName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandVPCAccessConnectorIpCidrRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorMachineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorMinThroughput(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorMaxThroughput(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorSubnet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandVPCAccessConnectorSubnetName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedProjectId, err := expandVPCAccessConnectorSubnetProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - return transformed, nil -} - -func expandVPCAccessConnectorSubnetName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorSubnetProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceVPCAccessConnectorEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "name") - return obj, nil -} - -func resourceVPCAccessConnectorDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - // Take the returned long form of the name and use it as `self_link`. - // Then modify the name to be the user specified form. - // We can't just ignore_read on `name` as the linter will - // complain that the returned `res` is never used afterwards. - // Some field needs to be actually set, and we chose `name`. - if err := d.Set("self_link", res["name"].(string)); err != nil { - return nil, fmt.Errorf("Error setting self_link: %s", err) - } - res["name"] = d.Get("name").(string) - return res, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_workflows_workflow.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_workflows_workflow.go deleted file mode 100644 index 6ea1fc0de2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_workflows_workflow.go +++ /dev/null @@ -1,616 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "context" - "fmt" - "log" - "reflect" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func ResourceWorkflowsWorkflow() *schema.Resource { - return &schema.Resource{ - Create: resourceWorkflowsWorkflowCreate, - Read: resourceWorkflowsWorkflowRead, - Update: resourceWorkflowsWorkflowUpdate, - Delete: resourceWorkflowsWorkflowDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(20 * time.Minute), - Update: schema.DefaultTimeout(20 * time.Minute), - Delete: schema.DefaultTimeout(20 * time.Minute), - }, - - SchemaVersion: 1, - StateUpgraders: []schema.StateUpgrader{ - { - Type: resourceWorkflowsWorkflowResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceWorkflowsWorkflowUpgradeV0, - Version: 0, - }, - }, - - Schema: map[string]*schema.Schema{ - "description": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Description of the workflow provided by the user. Must be at most 1000 unicode characters long.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this Workflow.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Name of the Workflow.`, - }, - "region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The region of the workflow.`, - }, - "service_account": { - Type: schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the service account associated with the latest workflow version. This service -account represents the identity of the workflow and determines what permissions the workflow has. - -Format: projects/{project}/serviceAccounts/{account}.`, - }, - "source_contents": { - Type: schema.TypeString, - Optional: true, - Description: `Workflow code to be executed. The size limit is 32KB.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "revision_id": { - Type: schema.TypeString, - Computed: true, - Description: `The revision of the workflow. A new one is generated if the service account or source contents is changed.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the workflow deployment.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "name_prefix": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name"}, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceWorkflowsWorkflowCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandWorkflowsWorkflowName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandWorkflowsWorkflowDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandWorkflowsWorkflowLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - serviceAccountProp, err := expandWorkflowsWorkflowServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - sourceContentsProp, err := expandWorkflowsWorkflowSourceContents(d.Get("source_contents"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_contents"); !isEmptyValue(reflect.ValueOf(sourceContentsProp)) && (ok || !reflect.DeepEqual(v, sourceContentsProp)) { - obj["sourceContents"] = sourceContentsProp - } - - obj, err = resourceWorkflowsWorkflowEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows?workflowId={{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Creating new Workflow: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Workflow: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating Workflow: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = WorkflowsOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Workflow", userAgent, - d.Timeout(schema.TimeoutCreate)) - if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Workflow: %s", err) - } - - if err := d.Set("name", flattenWorkflowsWorkflowName(opRes["name"], d, config)); err != nil { - return err - } - - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - log.Printf("[DEBUG] Finished creating Workflow %q: %#v", d.Id(), res) - - return resourceWorkflowsWorkflowRead(d, meta) -} - -func resourceWorkflowsWorkflowRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Workflow: %s", err) - } - billingProject = project - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("WorkflowsWorkflow %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - - if err := d.Set("name", flattenWorkflowsWorkflowName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("description", flattenWorkflowsWorkflowDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("create_time", flattenWorkflowsWorkflowCreateTime(res["createTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("update_time", flattenWorkflowsWorkflowUpdateTime(res["updateTime"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("state", flattenWorkflowsWorkflowState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("labels", flattenWorkflowsWorkflowLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("service_account", flattenWorkflowsWorkflowServiceAccount(res["serviceAccount"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("source_contents", flattenWorkflowsWorkflowSourceContents(res["sourceContents"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("revision_id", flattenWorkflowsWorkflowRevisionId(res["revisionId"], d, config)); err != nil { - return fmt.Errorf("Error reading Workflow: %s", err) - } - - return nil -} - -func resourceWorkflowsWorkflowUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Workflow: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandWorkflowsWorkflowDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandWorkflowsWorkflowLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - serviceAccountProp, err := expandWorkflowsWorkflowServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - sourceContentsProp, err := expandWorkflowsWorkflowSourceContents(d.Get("source_contents"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_contents"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceContentsProp)) { - obj["sourceContents"] = sourceContentsProp - } - - obj, err = resourceWorkflowsWorkflowEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return err - } - - log.Printf("[DEBUG] Updating Workflow %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("service_account") { - updateMask = append(updateMask, "serviceAccount") - } - - if d.HasChange("source_contents") { - updateMask = append(updateMask, "sourceContents") - } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return fmt.Errorf("Error updating Workflow %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Workflow %q: %#v", d.Id(), res) - } - - err = WorkflowsOperationWaitTime( - config, res, project, "Updating Workflow", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceWorkflowsWorkflowRead(d, meta) -} - -func resourceWorkflowsWorkflowDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Workflow: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Workflow %q", d.Id()) - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Workflow") - } - - err = WorkflowsOperationWaitTime( - config, res, project, "Deleting Workflow", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err - } - - log.Printf("[DEBUG] Finished deleting Workflow %q: %#v", d.Id(), res) - return nil -} - -func flattenWorkflowsWorkflowName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenWorkflowsWorkflowDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowSourceContents(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowRevisionId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func expandWorkflowsWorkflowName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandWorkflowsWorkflowDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandWorkflowsWorkflowLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandWorkflowsWorkflowServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandWorkflowsWorkflowSourceContents(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceWorkflowsWorkflowEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - var ResName string - if v, ok := d.GetOk("name"); ok { - ResName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - ResName = resource.PrefixedUniqueId(v.(string)) - } else { - ResName = resource.UniqueId() - } - - if err := d.Set("name", ResName); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) - } - - return obj, nil -} - -func resourceWorkflowsWorkflowResourceV0() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "description": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Description: `Description of the workflow provided by the user. Must be at most 1000 unicode characters long.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this Workflow.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Name of the Workflow.`, - }, - "region": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The region of the workflow.`, - }, - "service_account": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the service account associated with the latest workflow version. This service -account represents the identity of the workflow and determines what permissions the workflow has. - -Format: projects/{project}/serviceAccounts/{account}.`, - }, - "source_contents": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Workflow code to be executed. The size limit is 32KB.`, - }, - "create_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "revision_id": { - Type: schema.TypeString, - Computed: true, - Description: `The revision of the workflow. A new one is generated if the service account or source contents is changed.`, - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: `State of the workflow deployment.`, - }, - "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "name_prefix": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name"}, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceWorkflowsWorkflowUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - log.Printf("[DEBUG] Attributes before migration: %#v", rawState) - - rawState["name"] = GetResourceNameFromSelfLink(rawState["name"].(string)) - - log.Printf("[DEBUG] Attributes after migration: %#v", rawState) - return rawState, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/retry_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/retry_utils.go deleted file mode 100644 index 5d67c6e99e..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/retry_utils.go +++ /dev/null @@ -1,85 +0,0 @@ -package google - -import ( - "log" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" -) - -func retry(retryFunc func() error) error { - return retryTime(retryFunc, 1) -} - -func retryTime(retryFunc func() error, minutes int) error { - return RetryTimeDuration(retryFunc, time.Duration(minutes)*time.Minute) -} - -func RetryTimeDuration(retryFunc func() error, duration time.Duration, errorRetryPredicates ...RetryErrorPredicateFunc) error { - return resource.Retry(duration, func() *resource.RetryError { - err := retryFunc() - if err == nil { - return nil - } - if isRetryableError(err, errorRetryPredicates...) { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - }) -} - -func isRetryableError(topErr error, customPredicates ...RetryErrorPredicateFunc) bool { - if topErr == nil { - return false - } - - retryPredicates := append( - // Global error retry predicates are registered in this default list. - defaultErrorRetryPredicates, - customPredicates...) - - // Check all wrapped errors for a retryable error status. - isRetryable := false - errwrap.Walk(topErr, func(werr error) { - for _, pred := range retryPredicates { - if predRetry, predReason := pred(werr); predRetry { - log.Printf("[DEBUG] Dismissed an error as retryable. %s - %s", predReason, werr) - isRetryable = true - return - } - } - }) - return isRetryable -} - -// The polling overrides the default backoff logic with max backoff of 10s. The poll interval can be greater than 10s. -func retryWithPolling(retryFunc func() (interface{}, error), timeout time.Duration, pollInterval time.Duration, errorRetryPredicates ...RetryErrorPredicateFunc) (interface{}, error) { - refreshFunc := func() (interface{}, string, error) { - result, err := retryFunc() - if err == nil { - return result, "done", nil - } - - // Check if it is a retryable error. - if isRetryableError(err, errorRetryPredicates...) { - return result, "retrying", nil - } - - // The error is not retryable. - return result, "done", err - } - stateChange := &resource.StateChangeConf{ - Pending: []string{ - "retrying", - }, - Target: []string{ - "done", - }, - Refresh: refreshFunc, - Timeout: timeout, - PollInterval: pollInterval, - } - - return stateChange.WaitForState() -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/runadminv3_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/runadminv3_operation.go deleted file mode 100644 index d555b477b6..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/runadminv3_operation.go +++ /dev/null @@ -1,59 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "time" - - "google.golang.org/api/run/v2" -) - -type RunAdminV2OperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *RunAdminV2OperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - url := fmt.Sprintf("%s%s", w.Config.CloudRunV2BasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createRunAdminV2Waiter(config *Config, op *run.GoogleLongrunningOperation, project, activity, userAgent string) (*RunAdminV2OperationWaiter, error) { - w := &RunAdminV2OperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func runAdminV2OperationWaitTimeWithResponse(config *Config, op *run.GoogleLongrunningOperation, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createRunAdminV2Waiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func runAdminV2OperationWaitTime(config *Config, op *run.GoogleLongrunningOperation, project, activity, userAgent string, timeout time.Duration) error { - if op.Done { - return nil - } - w, err := createRunAdminV2Waiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/security_policy_association_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/security_policy_association_utils.go deleted file mode 100644 index 71664db3c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/security_policy_association_utils.go +++ /dev/null @@ -1 +0,0 @@ -package google diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/self_link_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/self_link_helpers.go deleted file mode 100644 index 16767600e5..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/self_link_helpers.go +++ /dev/null @@ -1,183 +0,0 @@ -package google - -import ( - "errors" - "fmt" - "net/url" - "regexp" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Compare only the resource name of two self links/paths. -func compareResourceNames(_, old, new string, _ *schema.ResourceData) bool { - return GetResourceNameFromSelfLink(old) == GetResourceNameFromSelfLink(new) -} - -// Compare only the relative path of two self links. -func compareSelfLinkRelativePaths(_, old, new string, _ *schema.ResourceData) bool { - oldStripped, err := getRelativePath(old) - if err != nil { - return false - } - - newStripped, err := getRelativePath(new) - if err != nil { - return false - } - - if oldStripped == newStripped { - return true - } - - return false -} - -// compareSelfLinkOrResourceName checks if two resources are the same resource -// -// Use this method when the field accepts either a name or a self_link referencing a resource. -// The value we store (i.e. `old` in this method), must be a self_link. -func compareSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { - newParts := strings.Split(new, "/") - - if len(newParts) == 1 { - // `new` is a name - // `old` is always a self_link - if GetResourceNameFromSelfLink(old) == newParts[0] { - return true - } - } - - // The `new` string is a self_link - return compareSelfLinkRelativePaths("", old, new, nil) -} - -// Hash the relative path of a self link. -func selfLinkRelativePathHash(selfLink interface{}) int { - path, _ := getRelativePath(selfLink.(string)) - return hashcode(path) -} - -func getRelativePath(selfLink string) (string, error) { - stringParts := strings.SplitAfterN(selfLink, "projects/", 2) - if len(stringParts) != 2 { - return "", fmt.Errorf("String was not a self link: %s", selfLink) - } - - return "projects/" + stringParts[1], nil -} - -// Hash the name path of a self link. -func selfLinkNameHash(selfLink interface{}) int { - name := GetResourceNameFromSelfLink(selfLink.(string)) - return hashcode(name) -} - -func ConvertSelfLinkToV1(link string) string { - reg := regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/") - return reg.ReplaceAllString(link, "/compute/v1/projects/") -} - -func GetResourceNameFromSelfLink(link string) string { - parts := strings.Split(link, "/") - return parts[len(parts)-1] -} - -func NameFromSelfLinkStateFunc(v interface{}) string { - return GetResourceNameFromSelfLink(v.(string)) -} - -func StoreResourceName(resourceLink interface{}) string { - return GetResourceNameFromSelfLink(resourceLink.(string)) -} - -type LocationType int - -const ( - Zonal LocationType = iota - Regional - Global -) - -func GetZonalResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *Config) (string, string, string, error) { - return getResourcePropertiesFromSelfLinkOrSchema(d, config, Zonal) -} - -func GetRegionalResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *Config) (string, string, string, error) { - return getResourcePropertiesFromSelfLinkOrSchema(d, config, Regional) -} - -func getResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *Config, locationType LocationType) (string, string, string, error) { - if selfLink, ok := d.GetOk("self_link"); ok { - return GetLocationalResourcePropertiesFromSelfLinkString(selfLink.(string)) - } else { - project, err := getProject(d, config) - if err != nil { - return "", "", "", err - } - - location := "" - if locationType == Regional { - location, err = getRegion(d, config) - if err != nil { - return "", "", "", err - } - } else if locationType == Zonal { - location, err = getZone(d, config) - if err != nil { - return "", "", "", err - } - } - - n, ok := d.GetOk("name") - name := n.(string) - if !ok { - return "", "", "", errors.New("must provide either `self_link` or `name`") - } - return project, location, name, nil - } -} - -// given a full locational (non-global) self link, returns the project + region/zone + name or an error -func GetLocationalResourcePropertiesFromSelfLinkString(selfLink string) (string, string, string, error) { - parsed, err := url.Parse(selfLink) - if err != nil { - return "", "", "", err - } - - s := strings.Split(parsed.Path, "/") - - // This is a pretty bad way to tell if this is a self link, but stops us - // from accessing an index out of bounds and causing a panic. generally, we - // expect bad values to be partial URIs and names, so this will catch them - if len(s) < 9 { - return "", "", "", fmt.Errorf("value %s was not a self link", selfLink) - } - - return s[4], s[6], s[8], nil -} - -// return the region a selfLink is referring to -func GetRegionFromRegionSelfLink(selfLink string) string { - re := regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/[a-zA-Z0-9-]*/regions/([a-zA-Z0-9-]*)") - switch { - case re.MatchString(selfLink): - if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { - return res[1] - } - } - return selfLink -} - -// This function supports selflinks that have regions and locations in their paths -func GetRegionFromRegionalSelfLink(selfLink string) string { - re := regexp.MustCompile("projects/[a-zA-Z0-9-]*/(?:locations|regions)/([a-zA-Z0-9-]*)") - switch { - case re.MatchString(selfLink): - if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { - return res[1] - } - } - return selfLink -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_networking_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_networking_operation.go deleted file mode 100644 index 8f88f6b271..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_networking_operation.go +++ /dev/null @@ -1,35 +0,0 @@ -package google - -import ( - "time" - - "google.golang.org/api/servicenetworking/v1" -) - -type ServiceNetworkingOperationWaiter struct { - Service *servicenetworking.APIService - Project string - UserProjectOverride bool - CommonOperationWaiter -} - -func (w *ServiceNetworkingOperationWaiter) QueryOp() (interface{}, error) { - opGetCall := w.Service.Operations.Get(w.Op.Name) - if w.UserProjectOverride { - opGetCall.Header().Add("X-Goog-User-Project", w.Project) - } - return opGetCall.Do() -} - -func ServiceNetworkingOperationWaitTime(config *Config, op *servicenetworking.Operation, activity, userAgent, project string, timeout time.Duration) error { - w := &ServiceNetworkingOperationWaiter{ - Service: config.NewServiceNetworkingClient(userAgent), - Project: project, - UserProjectOverride: config.UserProjectOverride, - } - - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_usage_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_usage_operation.go deleted file mode 100644 index 4c5d713c96..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_usage_operation.go +++ /dev/null @@ -1,94 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "log" - "strings" - "time" - - "google.golang.org/api/googleapi" -) - -type ServiceUsageOperationWaiter struct { - Config *Config - UserAgent string - Project string - retryCount int - CommonOperationWaiter -} - -func (w *ServiceUsageOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.ServiceUsageBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func (w *ServiceUsageOperationWaiter) IsRetryable(err error) bool { - // Retries errors on 403 3 times if the error message - // returned contains `has not been used in project` - maxRetries := 3 - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 403 { - if w.retryCount < maxRetries && strings.Contains(gerr.Body, "has not been used in project") { - w.retryCount += 1 - log.Printf("[DEBUG] retrying on 403 %v more times", w.retryCount-maxRetries-1) - return true - } - } - return false -} - -func createServiceUsageWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*ServiceUsageOperationWaiter, error) { - w := &ServiceUsageOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func ServiceUsageOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createServiceUsageWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func ServiceUsageOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createServiceUsageWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceman_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceman_operation.go deleted file mode 100644 index 5f6a9e737a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceman_operation.go +++ /dev/null @@ -1,36 +0,0 @@ -package google - -import ( - "fmt" - "time" - - "google.golang.org/api/googleapi" - "google.golang.org/api/servicemanagement/v1" -) - -type ServiceManagementOperationWaiter struct { - Service *servicemanagement.APIService - CommonOperationWaiter -} - -func (w *ServiceManagementOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - return w.Service.Operations.Get(w.Op.Name).Do() -} - -func ServiceManagementOperationWaitTime(config *Config, op *servicemanagement.Operation, activity, userAgent string, timeout time.Duration) (googleapi.RawMessage, error) { - w := &ServiceManagementOperationWaiter{ - Service: config.NewServiceManClient(userAgent), - } - - if err := w.SetOp(op); err != nil { - return nil, err - } - - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return nil, err - } - return w.Op.Response, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_folder_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_folder_service_account.go new file mode 100644 index 0000000000..33e3406d66 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_folder_service_account.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package accessapproval + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceAccessApprovalFolderServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAccessApprovalFolderServiceAccountRead, + Schema: map[string]*schema.Schema{ + "folder_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "account_email": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAccessApprovalFolderServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/serviceAccount") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessApprovalFolderServiceAccount %q", d.Id())) + } + + if err := d.Set("name", res["name"]); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("account_email", res["accountEmail"]); err != nil { + return fmt.Errorf("Error setting account_email: %s", err) + } + d.SetId(res["name"].(string)) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_organization_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_organization_service_account.go new file mode 100644 index 0000000000..7d6011a9d1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_organization_service_account.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package accessapproval + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceAccessApprovalOrganizationServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAccessApprovalOrganizationServiceAccountRead, + Schema: map[string]*schema.Schema{ + "organization_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "account_email": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAccessApprovalOrganizationServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/serviceAccount") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessApprovalOrganizationServiceAccount %q", d.Id())) + } + + if err := d.Set("name", res["name"]); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("account_email", res["accountEmail"]); err != nil { + return fmt.Errorf("Error setting account_email: %s", err) + } + d.SetId(res["name"].(string)) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_project_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_project_service_account.go new file mode 100644 index 0000000000..761f3ccc75 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/data_source_access_approval_project_service_account.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package accessapproval + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceAccessApprovalProjectServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAccessApprovalProjectServiceAccountRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "account_email": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAccessApprovalProjectServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/serviceAccount") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessApprovalProjectServiceAccount %q", d.Id())) + } + + if err := d.Set("name", res["name"]); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("account_email", res["accountEmail"]); err != nil { + return fmt.Errorf("Error setting account_email: %s", err) + } + d.SetId(res["name"].(string)) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_folder_access_approval_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_folder_access_approval_settings.go new file mode 100644 index 0000000000..3aad595b38 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_folder_access_approval_settings.go @@ -0,0 +1,572 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accessapproval + +import ( + "bytes" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +var accessApprovalCloudProductMapping = map[string]string{ + "appengine.googleapis.com": "App Engine", + "bigquery.googleapis.com": "BigQuery", + "bigtable.googleapis.com": "Cloud Bigtable", + "cloudkms.googleapis.com": "Cloud Key Management Service", + "compute.googleapis.com": "Compute Engine", + "dataflow.googleapis.com": "Cloud Dataflow", + "iam.googleapis.com": "Cloud Identity and Access Management", + "pubsub.googleapis.com": "Cloud Pub/Sub", + "storage.googleapis.com": "Cloud Storage", +} + +func accessApprovalEnrolledServicesHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + cp := m["cloud_product"].(string) + if n, ok := accessApprovalCloudProductMapping[cp]; ok { + cp = n + } + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(cp))) // ToLower just in case + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["enrollment_level"].(string)))) + return tpgresource.Hashcode(buf.String()) +} + +func ResourceAccessApprovalFolderSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessApprovalFolderSettingsCreate, + Read: resourceAccessApprovalFolderSettingsRead, + Update: resourceAccessApprovalFolderSettingsUpdate, + Delete: resourceAccessApprovalFolderSettingsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessApprovalFolderSettingsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "enrolled_services": { + Type: schema.TypeSet, + Required: true, + Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. +Access requests for the resource given by name against any of these services contained here will be required +to have explicit approval. Enrollment can only be done on an all or nothing basis. + +A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, + Elem: accessapprovalFolderSettingsEnrolledServicesSchema(), + Set: accessApprovalEnrolledServicesHash, + }, + "folder_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the folder of the access approval settings.`, + }, + "active_key_version": { + Type: schema.TypeString, + Optional: true, + Description: `The asymmetric crypto key version to use for signing approval requests. +Empty active_key_version indicates that a Google-managed key should be used for signing. +This property will be ignored if set by an ancestor of the resource, and new non-empty values may not be set.`, + }, + "notification_emails": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Description: `A list of email addresses to which notifications relating to approval requests should be sent. +Notifications relating to a resource will be sent to all emails in the settings of ancestor +resources of that resource. A maximum of 50 email addresses are allowed.`, + MaxItems: 50, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "ancestor_has_active_key_version": { + Type: schema.TypeBool, + Computed: true, + Description: `If the field is true, that indicates that an ancestor of this Folder has set active_key_version.`, + }, + "enrolled_ancestor": { + Type: schema.TypeBool, + Computed: true, + Description: `If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Folder.`, + }, + "invalid_key_version": { + Type: schema.TypeBool, + Computed: true, + Description: `If the field is true, that indicates that there is some configuration issue with the active_key_version +configured on this Folder (e.g. it doesn't exist or the Access Approval service account doesn't have the +correct permissions on it, etc.) This key version is not necessarily the effective key version at this level, +as key versions are inherited top-down.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the settings. Format is "folders/{folder_id}/accessApprovalSettings"`, + }, + }, + UseJSONNumber: true, + } +} + +func accessapprovalFolderSettingsEnrolledServicesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_product": { + Type: schema.TypeString, + Required: true, + Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): + * all + * App Engine + * BigQuery + * Cloud Bigtable + * Cloud Key Management Service + * Compute Engine + * Cloud Dataflow + * Cloud Identity and Access Management + * Cloud Pub/Sub + * Cloud Storage + * Persistent Disk + +Note: These values are supported as input, but considered a legacy format: + * all + * appengine.googleapis.com + * bigquery.googleapis.com + * bigtable.googleapis.com + * cloudkms.googleapis.com + * compute.googleapis.com + * dataflow.googleapis.com + * iam.googleapis.com + * pubsub.googleapis.com + * storage.googleapis.com`, + }, + "enrollment_level": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"BLOCK_ALL", ""}), + Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, + Default: "BLOCK_ALL", + }, + }, + } +} + +func resourceAccessApprovalFolderSettingsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + notificationEmailsProp, err := expandAccessApprovalFolderSettingsNotificationEmails(d.Get("notification_emails"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_emails"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationEmailsProp)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { + obj["notificationEmails"] = notificationEmailsProp + } + enrolledServicesProp, err := expandAccessApprovalFolderSettingsEnrolledServices(d.Get("enrolled_services"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enrolled_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(enrolledServicesProp)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { + obj["enrolledServices"] = enrolledServicesProp + } + activeKeyVersionProp, err := expandAccessApprovalFolderSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("active_key_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(activeKeyVersionProp)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { + obj["activeKeyVersion"] = activeKeyVersionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FolderSettings: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + updateMask := []string{} + + if d.HasChange("notification_emails") { + updateMask = append(updateMask, "notificationEmails") + } + + if d.HasChange("enrolled_services") { + updateMask = append(updateMask, "enrolledServices") + } + + if d.HasChange("active_key_version") { + updateMask = append(updateMask, "activeKeyVersion") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating FolderSettings: %s", err) + } + if err := d.Set("name", flattenAccessApprovalFolderSettingsName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder_id}}/accessApprovalSettings") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FolderSettings %q: %#v", d.Id(), res) + + return resourceAccessApprovalFolderSettingsRead(d, meta) +} + +func resourceAccessApprovalFolderSettingsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessApprovalFolderSettings %q", d.Id())) + } + + if err := d.Set("name", flattenAccessApprovalFolderSettingsName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSettings: %s", err) + } + if err := d.Set("notification_emails", flattenAccessApprovalFolderSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSettings: %s", err) + } + if err := d.Set("enrolled_services", flattenAccessApprovalFolderSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSettings: %s", err) + } + if err := d.Set("enrolled_ancestor", flattenAccessApprovalFolderSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSettings: %s", err) + } + if err := d.Set("active_key_version", flattenAccessApprovalFolderSettingsActiveKeyVersion(res["activeKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSettings: %s", err) + } + if err := d.Set("ancestor_has_active_key_version", flattenAccessApprovalFolderSettingsAncestorHasActiveKeyVersion(res["ancestorHasActiveKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSettings: %s", err) + } + if err := d.Set("invalid_key_version", flattenAccessApprovalFolderSettingsInvalidKeyVersion(res["invalidKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderSettings: %s", err) + } + + return nil +} + +func resourceAccessApprovalFolderSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + notificationEmailsProp, err := expandAccessApprovalFolderSettingsNotificationEmails(d.Get("notification_emails"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_emails"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { + obj["notificationEmails"] = notificationEmailsProp + } + enrolledServicesProp, err := expandAccessApprovalFolderSettingsEnrolledServices(d.Get("enrolled_services"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enrolled_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { + obj["enrolledServices"] = enrolledServicesProp + } + activeKeyVersionProp, err := expandAccessApprovalFolderSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("active_key_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { + obj["activeKeyVersion"] = activeKeyVersionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FolderSettings %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("notification_emails") { + updateMask = append(updateMask, "notificationEmails") + } + + if d.HasChange("enrolled_services") { + updateMask = append(updateMask, "enrolledServices") + } + + if d.HasChange("active_key_version") { + updateMask = append(updateMask, "activeKeyVersion") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating FolderSettings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FolderSettings %q: %#v", d.Id(), res) + } + + return resourceAccessApprovalFolderSettingsRead(d, meta) +} + +func resourceAccessApprovalFolderSettingsDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["notificationEmails"] = []string{} + obj["enrolledServices"] = []string{} + obj["activeKeyVersion"] = "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Emptying FolderSettings %q: %#v", d.Id(), obj) + updateMask := []string{} + + updateMask = append(updateMask, "notificationEmails") + updateMask = append(updateMask, "enrolledServices") + updateMask = append(updateMask, "activeKeyVersion") + + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error emptying FolderSettings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished emptying FolderSettings %q: %#v", d.Id(), res) + } + + return nil +} + +func resourceAccessApprovalFolderSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "folders/(?P[^/]+)/accessApprovalSettings", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder_id}}/accessApprovalSettings") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAccessApprovalFolderSettingsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalFolderSettingsNotificationEmails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessApprovalFolderSettingsEnrolledServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "cloud_product": flattenAccessApprovalFolderSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), + "enrollment_level": flattenAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), + }) + } + return transformed +} +func flattenAccessApprovalFolderSettingsEnrolledServicesCloudProduct(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalFolderSettingsEnrolledAncestor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalFolderSettingsActiveKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalFolderSettingsAncestorHasActiveKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalFolderSettingsInvalidKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAccessApprovalFolderSettingsNotificationEmails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandAccessApprovalFolderSettingsEnrolledServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCloudProduct, err := expandAccessApprovalFolderSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudProduct); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudProduct"] = transformedCloudProduct + } + + transformedEnrollmentLevel, err := expandAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enrollmentLevel"] = transformedEnrollmentLevel + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAccessApprovalFolderSettingsEnrolledServicesCloudProduct(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessApprovalFolderSettingsActiveKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_organization_access_approval_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_organization_access_approval_settings.go new file mode 100644 index 0000000000..fa5f477051 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_organization_access_approval_settings.go @@ -0,0 +1,532 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accessapproval + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAccessApprovalOrganizationSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessApprovalOrganizationSettingsCreate, + Read: resourceAccessApprovalOrganizationSettingsRead, + Update: resourceAccessApprovalOrganizationSettingsUpdate, + Delete: resourceAccessApprovalOrganizationSettingsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessApprovalOrganizationSettingsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "enrolled_services": { + Type: schema.TypeSet, + Required: true, + Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. +Access requests for the resource given by name against any of these services contained here will be required +to have explicit approval. Enrollment can be done for individual services. + +A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, + Elem: accessapprovalOrganizationSettingsEnrolledServicesSchema(), + Set: accessApprovalEnrolledServicesHash, + }, + "organization_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the organization of the access approval settings.`, + }, + "active_key_version": { + Type: schema.TypeString, + Optional: true, + Description: `The asymmetric crypto key version to use for signing approval requests. +Empty active_key_version indicates that a Google-managed key should be used for signing.`, + }, + "notification_emails": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Description: `A list of email addresses to which notifications relating to approval requests should be sent. +Notifications relating to a resource will be sent to all emails in the settings of ancestor +resources of that resource. A maximum of 50 email addresses are allowed.`, + MaxItems: 50, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "ancestor_has_active_key_version": { + Type: schema.TypeBool, + Computed: true, + Description: `This field will always be unset for the organization since organizations do not have ancestors.`, + }, + "enrolled_ancestor": { + Type: schema.TypeBool, + Computed: true, + Description: `This field will always be unset for the organization since organizations do not have ancestors.`, + }, + "invalid_key_version": { + Type: schema.TypeBool, + Computed: true, + Description: `If the field is true, that indicates that there is some configuration issue with the active_key_version +configured on this Organization (e.g. it doesn't exist or the Access Approval service account doesn't have the +correct permissions on it, etc.).`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the settings. Format is "organizations/{organization_id}/accessApprovalSettings"`, + }, + }, + UseJSONNumber: true, + } +} + +func accessapprovalOrganizationSettingsEnrolledServicesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_product": { + Type: schema.TypeString, + Required: true, + Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): + all + appengine.googleapis.com + bigquery.googleapis.com + bigtable.googleapis.com + cloudkms.googleapis.com + compute.googleapis.com + dataflow.googleapis.com + iam.googleapis.com + pubsub.googleapis.com + storage.googleapis.com`, + }, + "enrollment_level": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"BLOCK_ALL", ""}), + Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, + Default: "BLOCK_ALL", + }, + }, + } +} + +func resourceAccessApprovalOrganizationSettingsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + notificationEmailsProp, err := expandAccessApprovalOrganizationSettingsNotificationEmails(d.Get("notification_emails"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_emails"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationEmailsProp)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { + obj["notificationEmails"] = notificationEmailsProp + } + enrolledServicesProp, err := expandAccessApprovalOrganizationSettingsEnrolledServices(d.Get("enrolled_services"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enrolled_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(enrolledServicesProp)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { + obj["enrolledServices"] = enrolledServicesProp + } + activeKeyVersionProp, err := expandAccessApprovalOrganizationSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("active_key_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(activeKeyVersionProp)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { + obj["activeKeyVersion"] = activeKeyVersionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OrganizationSettings: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + updateMask := []string{} + + if d.HasChange("notification_emails") { + updateMask = append(updateMask, "notificationEmails") + } + + if d.HasChange("enrolled_services") { + updateMask = append(updateMask, "enrolledServices") + } + + if d.HasChange("active_key_version") { + updateMask = append(updateMask, "activeKeyVersion") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating OrganizationSettings: %s", err) + } + if err := d.Set("name", flattenAccessApprovalOrganizationSettingsName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization_id}}/accessApprovalSettings") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating OrganizationSettings %q: %#v", d.Id(), res) + + return resourceAccessApprovalOrganizationSettingsRead(d, meta) +} + +func resourceAccessApprovalOrganizationSettingsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessApprovalOrganizationSettings %q", d.Id())) + } + + if err := d.Set("name", flattenAccessApprovalOrganizationSettingsName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSettings: %s", err) + } + if err := d.Set("notification_emails", flattenAccessApprovalOrganizationSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSettings: %s", err) + } + if err := d.Set("enrolled_services", flattenAccessApprovalOrganizationSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSettings: %s", err) + } + if err := d.Set("enrolled_ancestor", flattenAccessApprovalOrganizationSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSettings: %s", err) + } + if err := d.Set("active_key_version", flattenAccessApprovalOrganizationSettingsActiveKeyVersion(res["activeKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSettings: %s", err) + } + if err := d.Set("ancestor_has_active_key_version", flattenAccessApprovalOrganizationSettingsAncestorHasActiveKeyVersion(res["ancestorHasActiveKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSettings: %s", err) + } + if err := d.Set("invalid_key_version", flattenAccessApprovalOrganizationSettingsInvalidKeyVersion(res["invalidKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationSettings: %s", err) + } + + return nil +} + +func resourceAccessApprovalOrganizationSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + notificationEmailsProp, err := expandAccessApprovalOrganizationSettingsNotificationEmails(d.Get("notification_emails"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_emails"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { + obj["notificationEmails"] = notificationEmailsProp + } + enrolledServicesProp, err := expandAccessApprovalOrganizationSettingsEnrolledServices(d.Get("enrolled_services"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enrolled_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { + obj["enrolledServices"] = enrolledServicesProp + } + activeKeyVersionProp, err := expandAccessApprovalOrganizationSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("active_key_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { + obj["activeKeyVersion"] = activeKeyVersionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OrganizationSettings %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("notification_emails") { + updateMask = append(updateMask, "notificationEmails") + } + + if d.HasChange("enrolled_services") { + updateMask = append(updateMask, "enrolledServices") + } + + if d.HasChange("active_key_version") { + updateMask = append(updateMask, "activeKeyVersion") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating OrganizationSettings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OrganizationSettings %q: %#v", d.Id(), res) + } + + return resourceAccessApprovalOrganizationSettingsRead(d, meta) +} + +func resourceAccessApprovalOrganizationSettingsDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["notificationEmails"] = []string{} + obj["enrolledServices"] = []string{} + obj["activeKeyVersion"] = "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Emptying OrganizationSettings %q: %#v", d.Id(), obj) + updateMask := []string{} + + updateMask = append(updateMask, "notificationEmails") + updateMask = append(updateMask, "enrolledServices") + updateMask = append(updateMask, "activeKeyVersion") + + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error emptying OrganizationSettings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished emptying OrganizationSettings %q: %#v", d.Id(), res) + } + + return nil +} + +func resourceAccessApprovalOrganizationSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/accessApprovalSettings", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization_id}}/accessApprovalSettings") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAccessApprovalOrganizationSettingsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalOrganizationSettingsNotificationEmails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessApprovalOrganizationSettingsEnrolledServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "cloud_product": flattenAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), + "enrollment_level": flattenAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), + }) + } + return transformed +} +func flattenAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalOrganizationSettingsEnrolledAncestor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalOrganizationSettingsActiveKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalOrganizationSettingsAncestorHasActiveKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalOrganizationSettingsInvalidKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAccessApprovalOrganizationSettingsNotificationEmails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandAccessApprovalOrganizationSettingsEnrolledServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCloudProduct, err := expandAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudProduct); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudProduct"] = transformedCloudProduct + } + + transformedEnrollmentLevel, err := expandAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enrollmentLevel"] = transformedEnrollmentLevel + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessApprovalOrganizationSettingsActiveKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_project_access_approval_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_project_access_approval_settings.go new file mode 100644 index 0000000000..776f1cf7a6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accessapproval/resource_project_access_approval_settings.go @@ -0,0 +1,571 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accessapproval + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAccessApprovalProjectSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessApprovalProjectSettingsCreate, + Read: resourceAccessApprovalProjectSettingsRead, + Update: resourceAccessApprovalProjectSettingsUpdate, + Delete: resourceAccessApprovalProjectSettingsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessApprovalProjectSettingsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "enrolled_services": { + Type: schema.TypeSet, + Required: true, + Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. +Access requests for the resource given by name against any of these services contained here will be required +to have explicit approval. Enrollment can only be done on an all or nothing basis. + +A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, + Elem: accessapprovalProjectSettingsEnrolledServicesSchema(), + Set: accessApprovalEnrolledServicesHash, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the project of the access approval settings.`, + }, + "active_key_version": { + Type: schema.TypeString, + Optional: true, + Description: `The asymmetric crypto key version to use for signing approval requests. +Empty active_key_version indicates that a Google-managed key should be used for signing. +This property will be ignored if set by an ancestor of the resource, and new non-empty values may not be set.`, + }, + "notification_emails": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Description: `A list of email addresses to which notifications relating to approval requests should be sent. +Notifications relating to a resource will be sent to all emails in the settings of ancestor +resources of that resource. A maximum of 50 email addresses are allowed.`, + MaxItems: 50, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Deprecated in favor of `project_id`", + Description: `Deprecated in favor of 'project_id'`, + }, + "ancestor_has_active_key_version": { + Type: schema.TypeBool, + Computed: true, + Description: `If the field is true, that indicates that an ancestor of this Project has set active_key_version.`, + }, + "enrolled_ancestor": { + Type: schema.TypeBool, + Computed: true, + Description: `If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project.`, + }, + "invalid_key_version": { + Type: schema.TypeBool, + Computed: true, + Description: `If the field is true, that indicates that there is some configuration issue with the active_key_version +configured on this Project (e.g. it doesn't exist or the Access Approval service account doesn't have the +correct permissions on it, etc.) This key version is not necessarily the effective key version at this level, +as key versions are inherited top-down.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the settings. Format is "projects/{project_id}/accessApprovalSettings"`, + }, + }, + UseJSONNumber: true, + } +} + +func accessapprovalProjectSettingsEnrolledServicesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_product": { + Type: schema.TypeString, + Required: true, + Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): + all + appengine.googleapis.com + bigquery.googleapis.com + bigtable.googleapis.com + cloudkms.googleapis.com + compute.googleapis.com + dataflow.googleapis.com + iam.googleapis.com + pubsub.googleapis.com + storage.googleapis.com`, + }, + "enrollment_level": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"BLOCK_ALL", ""}), + Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, + Default: "BLOCK_ALL", + }, + }, + } +} + +func resourceAccessApprovalProjectSettingsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + notificationEmailsProp, err := expandAccessApprovalProjectSettingsNotificationEmails(d.Get("notification_emails"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_emails"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationEmailsProp)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { + obj["notificationEmails"] = notificationEmailsProp + } + enrolledServicesProp, err := expandAccessApprovalProjectSettingsEnrolledServices(d.Get("enrolled_services"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enrolled_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(enrolledServicesProp)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { + obj["enrolledServices"] = enrolledServicesProp + } + activeKeyVersionProp, err := expandAccessApprovalProjectSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("active_key_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(activeKeyVersionProp)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { + obj["activeKeyVersion"] = activeKeyVersionProp + } + projectProp, err := expandAccessApprovalProjectSettingsProject(d.Get("project"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("project"); !tpgresource.IsEmptyValue(reflect.ValueOf(projectProp)) && (ok || !reflect.DeepEqual(v, projectProp)) { + obj["project"] = projectProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProjectSettings: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + updateMask := []string{} + + if d.HasChange("notification_emails") { + updateMask = append(updateMask, "notificationEmails") + } + + if d.HasChange("enrolled_services") { + updateMask = append(updateMask, "enrolledServices") + } + + if d.HasChange("active_key_version") { + updateMask = append(updateMask, "activeKeyVersion") + } + + if d.HasChange("project") { + updateMask = append(updateMask, "project") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ProjectSettings: %s", err) + } + if err := d.Set("name", flattenAccessApprovalProjectSettingsName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project_id}}/accessApprovalSettings") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ProjectSettings %q: %#v", d.Id(), res) + + return resourceAccessApprovalProjectSettingsRead(d, meta) +} + +func resourceAccessApprovalProjectSettingsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessApprovalProjectSettings %q", d.Id())) + } + + if err := d.Set("name", flattenAccessApprovalProjectSettingsName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSettings: %s", err) + } + if err := d.Set("notification_emails", flattenAccessApprovalProjectSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSettings: %s", err) + } + if err := d.Set("enrolled_services", flattenAccessApprovalProjectSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSettings: %s", err) + } + if err := d.Set("enrolled_ancestor", flattenAccessApprovalProjectSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSettings: %s", err) + } + if err := d.Set("active_key_version", flattenAccessApprovalProjectSettingsActiveKeyVersion(res["activeKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSettings: %s", err) + } + if err := d.Set("ancestor_has_active_key_version", flattenAccessApprovalProjectSettingsAncestorHasActiveKeyVersion(res["ancestorHasActiveKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSettings: %s", err) + } + if err := d.Set("invalid_key_version", flattenAccessApprovalProjectSettingsInvalidKeyVersion(res["invalidKeyVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSettings: %s", err) + } + if err := d.Set("project", flattenAccessApprovalProjectSettingsProject(res["project"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectSettings: %s", err) + } + + return nil +} + +func resourceAccessApprovalProjectSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + notificationEmailsProp, err := expandAccessApprovalProjectSettingsNotificationEmails(d.Get("notification_emails"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_emails"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationEmailsProp)) { + obj["notificationEmails"] = notificationEmailsProp + } + enrolledServicesProp, err := expandAccessApprovalProjectSettingsEnrolledServices(d.Get("enrolled_services"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enrolled_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enrolledServicesProp)) { + obj["enrolledServices"] = enrolledServicesProp + } + activeKeyVersionProp, err := expandAccessApprovalProjectSettingsActiveKeyVersion(d.Get("active_key_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("active_key_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activeKeyVersionProp)) { + obj["activeKeyVersion"] = activeKeyVersionProp + } + projectProp, err := expandAccessApprovalProjectSettingsProject(d.Get("project"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("project"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, projectProp)) { + obj["project"] = projectProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ProjectSettings %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("notification_emails") { + updateMask = append(updateMask, "notificationEmails") + } + + if d.HasChange("enrolled_services") { + updateMask = append(updateMask, "enrolledServices") + } + + if d.HasChange("active_key_version") { + updateMask = append(updateMask, "activeKeyVersion") + } + + if d.HasChange("project") { + updateMask = append(updateMask, "project") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ProjectSettings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ProjectSettings %q: %#v", d.Id(), res) + } + + return resourceAccessApprovalProjectSettingsRead(d, meta) +} + +func resourceAccessApprovalProjectSettingsDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["notificationEmails"] = []string{} + obj["enrolledServices"] = []string{} + obj["activeKeyVersion"] = "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Emptying ProjectSettings %q: %#v", d.Id(), obj) + updateMask := []string{} + + updateMask = append(updateMask, "notificationEmails") + updateMask = append(updateMask, "enrolledServices") + updateMask = append(updateMask, "activeKeyVersion") + + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error emptying ProjectSettings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished emptying ProjectSettings %q: %#v", d.Id(), res) + } + + return nil +} + +func resourceAccessApprovalProjectSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/accessApprovalSettings", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project_id}}/accessApprovalSettings") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAccessApprovalProjectSettingsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalProjectSettingsNotificationEmails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessApprovalProjectSettingsEnrolledServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "cloud_product": flattenAccessApprovalProjectSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), + "enrollment_level": flattenAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), + }) + } + return transformed +} +func flattenAccessApprovalProjectSettingsEnrolledServicesCloudProduct(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalProjectSettingsEnrolledAncestor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalProjectSettingsActiveKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalProjectSettingsAncestorHasActiveKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalProjectSettingsInvalidKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessApprovalProjectSettingsProject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAccessApprovalProjectSettingsNotificationEmails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandAccessApprovalProjectSettingsEnrolledServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCloudProduct, err := expandAccessApprovalProjectSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudProduct); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudProduct"] = transformedCloudProduct + } + + transformedEnrollmentLevel, err := expandAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enrollmentLevel"] = transformedEnrollmentLevel + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAccessApprovalProjectSettingsEnrolledServicesCloudProduct(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessApprovalProjectSettingsActiveKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessApprovalProjectSettingsProject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/access_context_manager_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/access_context_manager_operation.go new file mode 100644 index 0000000000..033a68c2a2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/access_context_manager_operation.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type AccessContextManagerOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *AccessContextManagerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.AccessContextManagerBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createAccessContextManagerWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*AccessContextManagerOperationWaiter, error) { + w := &AccessContextManagerOperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func AccessContextManagerOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + w, err := createAccessContextManagerWaiter(config, op, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func AccessContextManagerOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createAccessContextManagerWaiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/iam_access_context_manager_access_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/iam_access_context_manager_access_policy.go new file mode 100644 index 0000000000..a16a628fdc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/iam_access_context_manager_access_policy.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var AccessContextManagerAccessPolicyIamSchema = map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type AccessContextManagerAccessPolicyIamUpdater struct { + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func AccessContextManagerAccessPolicyIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &AccessContextManagerAccessPolicyIamUpdater{ + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func AccessContextManagerAccessPolicyIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &AccessContextManagerAccessPolicyIamUpdater{ + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *AccessContextManagerAccessPolicyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyAccessPolicyUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *AccessContextManagerAccessPolicyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyAccessPolicyUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *AccessContextManagerAccessPolicyIamUpdater) qualifyAccessPolicyUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{AccessContextManagerBasePath}}%s:%s", fmt.Sprintf("accessPolicies/%s", u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *AccessContextManagerAccessPolicyIamUpdater) GetResourceId() string { + return fmt.Sprintf("accessPolicies/%s", u.name) +} + +func (u *AccessContextManagerAccessPolicyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-accesscontextmanager-accesspolicy-%s", u.GetResourceId()) +} + +func (u *AccessContextManagerAccessPolicyIamUpdater) DescribeResource() string { + return fmt.Sprintf("accesscontextmanager accesspolicy %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_level.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_level.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_level.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_level.go index 23a9f2edc1..07855facc3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_level.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_level.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package accesscontextmanager import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceAccessContextManagerAccessLevel() *schema.Resource { @@ -92,7 +99,7 @@ allowed.`, An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}), + ValidateFunc: verify.ValidateEnum([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}), }, }, "allowed_encryption_statuses": { @@ -102,7 +109,7 @@ An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPEC An empty list allows all statuses. Possible values: ["ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}), + ValidateFunc: verify.ValidateEnum([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}), }, }, "os_constraints": { @@ -115,7 +122,7 @@ An empty list allows all types and all versions.`, "os_type": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}), + ValidateFunc: verify.ValidateEnum([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}), Description: `The operating system type of the device. Possible values: ["OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"]`, }, "minimum_version": { @@ -219,7 +226,7 @@ Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, "combining_function": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"AND", "OR", ""}), + ValidateFunc: verify.ValidateEnum([]string{"AND", "OR", ""}), Description: `How the conditions list should be combined to determine if a request is granted this AccessLevel. If AND is used, each Condition in conditions must be satisfied for the AccessLevel to be applied. If @@ -234,7 +241,7 @@ for the AccessLevel to be applied. Default value: "AND" Possible values: ["AND", "custom": { Type: schema.TypeList, Optional: true, - Description: `Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. + Description: `Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. See CEL spec at: https://github.com/google/cel-spec.`, MaxItems: 1, Elem: &schema.Resource{ @@ -243,7 +250,7 @@ See CEL spec at: https://github.com/google/cel-spec.`, Type: schema.TypeList, Required: true, Description: `Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. -This page details the objects and attributes that are used to the build the CEL expressions for +This page details the objects and attributes that are used to the build the CEL expressions for custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec.`, MaxItems: 1, Elem: &schema.Resource{ @@ -286,8 +293,8 @@ custom access levels - https://cloud.google.com/access-context-manager/docs/cust } func resourceAccessContextManagerAccessLevelCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -296,37 +303,37 @@ func resourceAccessContextManagerAccessLevelCreate(d *schema.ResourceData, meta titleProp, err := expandAccessContextManagerAccessLevelTitle(d.Get("title"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(reflect.ValueOf(titleProp)) && (ok || !reflect.DeepEqual(v, titleProp)) { + } else if v, ok := d.GetOkExists("title"); !tpgresource.IsEmptyValue(reflect.ValueOf(titleProp)) && (ok || !reflect.DeepEqual(v, titleProp)) { obj["title"] = titleProp } descriptionProp, err := expandAccessContextManagerAccessLevelDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } basicProp, err := expandAccessContextManagerAccessLevelBasic(d.Get("basic"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("basic"); !isEmptyValue(reflect.ValueOf(basicProp)) && (ok || !reflect.DeepEqual(v, basicProp)) { + } else if v, ok := d.GetOkExists("basic"); !tpgresource.IsEmptyValue(reflect.ValueOf(basicProp)) && (ok || !reflect.DeepEqual(v, basicProp)) { obj["basic"] = basicProp } customProp, err := expandAccessContextManagerAccessLevelCustom(d.Get("custom"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("custom"); !isEmptyValue(reflect.ValueOf(customProp)) && (ok || !reflect.DeepEqual(v, customProp)) { + } else if v, ok := d.GetOkExists("custom"); !tpgresource.IsEmptyValue(reflect.ValueOf(customProp)) && (ok || !reflect.DeepEqual(v, customProp)) { obj["custom"] = customProp } parentProp, err := expandAccessContextManagerAccessLevelParent(d.Get("parent"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { obj["parent"] = parentProp } nameProp, err := expandAccessContextManagerAccessLevelName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } @@ -335,7 +342,7 @@ func resourceAccessContextManagerAccessLevelCreate(d *schema.ResourceData, meta return err } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels") if err != nil { return err } @@ -344,17 +351,25 @@ func resourceAccessContextManagerAccessLevelCreate(d *schema.ResourceData, meta billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating AccessLevel: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -378,7 +393,7 @@ func resourceAccessContextManagerAccessLevelCreate(d *schema.ResourceData, meta } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -390,13 +405,13 @@ func resourceAccessContextManagerAccessLevelCreate(d *schema.ResourceData, meta } func resourceAccessContextManagerAccessLevelRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") if err != nil { return err } @@ -404,13 +419,19 @@ func resourceAccessContextManagerAccessLevelRead(d *schema.ResourceData, meta in billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAccessLevel %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAccessLevel %q", d.Id())) } if err := d.Set("title", flattenAccessContextManagerAccessLevelTitle(res["title"], d, config)); err != nil { @@ -433,8 +454,8 @@ func resourceAccessContextManagerAccessLevelRead(d *schema.ResourceData, meta in } func resourceAccessContextManagerAccessLevelUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -445,25 +466,25 @@ func resourceAccessContextManagerAccessLevelUpdate(d *schema.ResourceData, meta titleProp, err := expandAccessContextManagerAccessLevelTitle(d.Get("title"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, titleProp)) { + } else if v, ok := d.GetOkExists("title"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, titleProp)) { obj["title"] = titleProp } descriptionProp, err := expandAccessContextManagerAccessLevelDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } basicProp, err := expandAccessContextManagerAccessLevelBasic(d.Get("basic"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("basic"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, basicProp)) { + } else if v, ok := d.GetOkExists("basic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, basicProp)) { obj["basic"] = basicProp } customProp, err := expandAccessContextManagerAccessLevelCustom(d.Get("custom"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("custom"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customProp)) { + } else if v, ok := d.GetOkExists("custom"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customProp)) { obj["custom"] = customProp } @@ -472,7 +493,7 @@ func resourceAccessContextManagerAccessLevelUpdate(d *schema.ResourceData, meta return err } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") if err != nil { return err } @@ -495,19 +516,27 @@ func resourceAccessContextManagerAccessLevelUpdate(d *schema.ResourceData, meta if d.HasChange("custom") { updateMask = append(updateMask, "custom") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating AccessLevel %q: %s", d.Id(), err) @@ -527,15 +556,15 @@ func resourceAccessContextManagerAccessLevelUpdate(d *schema.ResourceData, meta } func resourceAccessContextManagerAccessLevelDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") if err != nil { return err } @@ -544,13 +573,21 @@ func resourceAccessContextManagerAccessLevelDelete(d *schema.ResourceData, meta log.Printf("[DEBUG] Deleting AccessLevel %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "AccessLevel") + return transport_tpg.HandleNotFoundError(err, d, "AccessLevel") } err = AccessContextManagerOperationWaitTime( @@ -566,10 +603,10 @@ func resourceAccessContextManagerAccessLevelDelete(d *schema.ResourceData, meta } func resourceAccessContextManagerAccessLevelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { return nil, err } stringParts := strings.Split(d.Get("name").(string), "/") @@ -582,15 +619,15 @@ func resourceAccessContextManagerAccessLevelImport(d *schema.ResourceData, meta return []*schema.ResourceData{d}, nil } -func flattenAccessContextManagerAccessLevelTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasic(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -605,15 +642,15 @@ func flattenAccessContextManagerAccessLevelBasic(v interface{}, d *schema.Resour flattenAccessContextManagerAccessLevelBasicConditions(original["conditions"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerAccessLevelBasicCombiningFunction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenAccessContextManagerAccessLevelBasicCombiningFunction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "AND" } return v } -func flattenAccessContextManagerAccessLevelBasicConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -636,23 +673,23 @@ func flattenAccessContextManagerAccessLevelBasicConditions(v interface{}, d *sch } return transformed } -func flattenAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsMembers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsMembers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsNegate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsNegate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -675,19 +712,19 @@ func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicy(v interfa flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(original["requireCorpOwned"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -707,31 +744,31 @@ func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstrai } return transformed } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelBasicConditionsRegions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelBasicConditionsRegions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelCustom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelCustom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -744,7 +781,7 @@ func flattenAccessContextManagerAccessLevelCustom(v interface{}, d *schema.Resou flattenAccessContextManagerAccessLevelCustomExpr(original["expr"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerAccessLevelCustomExpr(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelCustomExpr(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -763,35 +800,35 @@ func flattenAccessContextManagerAccessLevelCustomExpr(v interface{}, d *schema.R flattenAccessContextManagerAccessLevelCustomExprLocation(original["location"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerAccessLevelCustomExprExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelCustomExprExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelCustomExprTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelCustomExprTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelCustomExprDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelCustomExprDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelCustomExprLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelCustomExprLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandAccessContextManagerAccessLevelTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -803,25 +840,25 @@ func expandAccessContextManagerAccessLevelBasic(v interface{}, d TerraformResour transformedCombiningFunction, err := expandAccessContextManagerAccessLevelBasicCombiningFunction(original["combining_function"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCombiningFunction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCombiningFunction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["combiningFunction"] = transformedCombiningFunction } transformedConditions, err := expandAccessContextManagerAccessLevelBasicConditions(original["conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["conditions"] = transformedConditions } return transformed, nil } -func expandAccessContextManagerAccessLevelBasicCombiningFunction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicCombiningFunction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -834,42 +871,42 @@ func expandAccessContextManagerAccessLevelBasicConditions(v interface{}, d Terra transformedIpSubnetworks, err := expandAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(original["ip_subnetworks"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIpSubnetworks); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIpSubnetworks); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ipSubnetworks"] = transformedIpSubnetworks } transformedRequiredAccessLevels, err := expandAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(original["required_access_levels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequiredAccessLevels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequiredAccessLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requiredAccessLevels"] = transformedRequiredAccessLevels } transformedMembers, err := expandAccessContextManagerAccessLevelBasicConditionsMembers(original["members"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMembers); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMembers); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["members"] = transformedMembers } transformedNegate, err := expandAccessContextManagerAccessLevelBasicConditionsNegate(original["negate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNegate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNegate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["negate"] = transformedNegate } transformedDevicePolicy, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicy(original["device_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDevicePolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDevicePolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["devicePolicy"] = transformedDevicePolicy } transformedRegions, err := expandAccessContextManagerAccessLevelBasicConditionsRegions(original["regions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRegions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRegions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["regions"] = transformedRegions } @@ -878,23 +915,23 @@ func expandAccessContextManagerAccessLevelBasicConditions(v interface{}, d Terra return req, nil } -func expandAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsMembers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsMembers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsNegate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsNegate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -906,61 +943,61 @@ func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicy(v interfac transformedRequireScreenLock, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(original["require_screen_lock"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requireScreenlock"] = transformedRequireScreenLock } transformedAllowedEncryptionStatuses, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(original["allowed_encryption_statuses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedEncryptionStatuses"] = transformedAllowedEncryptionStatuses } transformedAllowedDeviceManagementLevels, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(original["allowed_device_management_levels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedDeviceManagementLevels"] = transformedAllowedDeviceManagementLevels } transformedOsConstraints, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(original["os_constraints"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOsConstraints); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOsConstraints); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["osConstraints"] = transformedOsConstraints } transformedRequireAdminApproval, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(original["require_admin_approval"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requireAdminApproval"] = transformedRequireAdminApproval } transformedRequireCorpOwned, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(original["require_corp_owned"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requireCorpOwned"] = transformedRequireCorpOwned } return transformed, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -973,21 +1010,21 @@ func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstrain transformedMinimumVersion, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(original["minimum_version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minimumVersion"] = transformedMinimumVersion } transformedRequireVerifiedChromeOs, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(original["require_verified_chrome_os"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequireVerifiedChromeOs); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequireVerifiedChromeOs); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requireVerifiedChromeOs"] = transformedRequireVerifiedChromeOs } transformedOsType, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(original["os_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOsType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOsType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["osType"] = transformedOsType } @@ -996,31 +1033,31 @@ func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstrain return req, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelBasicConditionsRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelBasicConditionsRegions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelCustom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelCustom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1032,14 +1069,14 @@ func expandAccessContextManagerAccessLevelCustom(v interface{}, d TerraformResou transformedExpr, err := expandAccessContextManagerAccessLevelCustomExpr(original["expr"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExpr); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExpr); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["expr"] = transformedExpr } return transformed, nil } -func expandAccessContextManagerAccessLevelCustomExpr(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelCustomExpr(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1051,55 +1088,55 @@ func expandAccessContextManagerAccessLevelCustomExpr(v interface{}, d TerraformR transformedExpression, err := expandAccessContextManagerAccessLevelCustomExprExpression(original["expression"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["expression"] = transformedExpression } transformedTitle, err := expandAccessContextManagerAccessLevelCustomExprTitle(original["title"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["title"] = transformedTitle } transformedDescription, err := expandAccessContextManagerAccessLevelCustomExprDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedLocation, err := expandAccessContextManagerAccessLevelCustomExprLocation(original["location"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["location"] = transformedLocation } return transformed, nil } -func expandAccessContextManagerAccessLevelCustomExprExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelCustomExprExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelCustomExprTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelCustomExprTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelCustomExprDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelCustomExprDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelCustomExprLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelCustomExprLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_level_condition.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_level_condition.go new file mode 100644 index 0000000000..34e0c4a8fd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_level_condition.go @@ -0,0 +1,904 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAccessContextManagerAccessLevelCondition() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerAccessLevelConditionCreate, + Read: resourceAccessContextManagerAccessLevelConditionRead, + Delete: resourceAccessContextManagerAccessLevelConditionDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "access_level": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Access Level to add this condition to.`, + }, + "device_policy": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Device specific restrictions, all restrictions must hold for +the Condition to be true. If not specified, all devices are +allowed.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_device_management_levels": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of allowed device management levels. +An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}), + }, + }, + "allowed_encryption_statuses": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of allowed encryptions statuses. +An empty list allows all statuses. Possible values: ["ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}), + }, + }, + "os_constraints": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of allowed OS versions. +An empty list allows all types and all versions.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "os_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}), + Description: `The operating system type of the device. Possible values: ["OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"]`, + }, + "minimum_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The minimum allowed OS version. If not set, any version +of this OS satisfies the constraint. +Format: "major.minor.patch" such as "10.5.301", "9.2.1".`, + }, + }, + }, + }, + "require_admin_approval": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether the device needs to be approved by the customer admin.`, + }, + "require_corp_owned": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether the device needs to be corp owned.`, + }, + "require_screen_lock": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether or not screenlock is required for the DevicePolicy +to be true. Defaults to false.`, + }, + }, + }, + }, + "ip_subnetworks": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of CIDR block IP subnetwork specification. May be IPv4 +or IPv6. +Note that for a CIDR IP address block, the specified IP address +portion must be properly truncated (i.e. all the host bits must +be zero) or the input is considered malformed. For example, +"192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, +for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" +is not. The originating IP of a request must be in one of the +listed subnets in order for this Condition to be true. +If empty, all IP addresses are allowed.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "members": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `An allowed list of members (users, service accounts). +Using groups is not supported yet. + +The signed-in user originating the request must be a part of one +of the provided members. If not specified, a request may come +from any user (logged in/not logged in, not present in any +groups, etc.). +Formats: 'user:{emailid}', 'serviceAccount:{emailid}'`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "negate": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to negate the Condition. If true, the Condition becomes +a NAND over its non-empty fields, each field must be false for +the Condition overall to be satisfied. Defaults to false.`, + }, + "regions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The request must originate from one of the provided +countries/regions. +Format: A valid ISO 3166-1 alpha-2 code.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "required_access_levels": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of other access levels defined in the same Policy, +referenced by resource name. Referencing an AccessLevel which +does not exist is an error. All access levels listed must be +granted for the Condition to be true. +Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerAccessLevelConditionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + ipSubnetworksProp, err := expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(d.Get("ip_subnetworks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_subnetworks"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipSubnetworksProp)) && (ok || !reflect.DeepEqual(v, ipSubnetworksProp)) { + obj["ipSubnetworks"] = ipSubnetworksProp + } + requiredAccessLevelsProp, err := expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(d.Get("required_access_levels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("required_access_levels"); !tpgresource.IsEmptyValue(reflect.ValueOf(requiredAccessLevelsProp)) && (ok || !reflect.DeepEqual(v, requiredAccessLevelsProp)) { + obj["requiredAccessLevels"] = requiredAccessLevelsProp + } + membersProp, err := expandNestedAccessContextManagerAccessLevelConditionMembers(d.Get("members"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("members"); !tpgresource.IsEmptyValue(reflect.ValueOf(membersProp)) && (ok || !reflect.DeepEqual(v, membersProp)) { + obj["members"] = membersProp + } + negateProp, err := expandNestedAccessContextManagerAccessLevelConditionNegate(d.Get("negate"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("negate"); !tpgresource.IsEmptyValue(reflect.ValueOf(negateProp)) && (ok || !reflect.DeepEqual(v, negateProp)) { + obj["negate"] = negateProp + } + devicePolicyProp, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(d.Get("device_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("device_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(devicePolicyProp)) && (ok || !reflect.DeepEqual(v, devicePolicyProp)) { + obj["devicePolicy"] = devicePolicyProp + } + regionsProp, err := expandNestedAccessContextManagerAccessLevelConditionRegions(d.Get("regions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("regions"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionsProp)) && (ok || !reflect.DeepEqual(v, regionsProp)) { + obj["regions"] = regionsProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{access_level}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AccessLevelCondition: %#v", obj) + + obj, err = resourceAccessContextManagerAccessLevelConditionPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "basic.conditions"}) + if err != nil { + return err + } + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AccessLevelCondition: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{access_level}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = transport_tpg.PollingWaitTime(resourceAccessContextManagerAccessLevelConditionPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating AccessLevelCondition", d.Timeout(schema.TimeoutCreate), 1) + if err != nil { + return fmt.Errorf("Error waiting to create AccessLevelCondition: %s", err) + } + + log.Printf("[DEBUG] Finished creating AccessLevelCondition %q: %#v", d.Id(), res) + + return resourceAccessContextManagerAccessLevelConditionRead(d, meta) +} + +func resourceAccessContextManagerAccessLevelConditionPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") + if err != nil { + return nil, err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + res, err = flattenNestedAccessContextManagerAccessLevelCondition(d, meta, res) + if err != nil { + return nil, err + } + + if res == nil { + return nil, tpgresource.Fake404("nested", "AccessContextManagerAccessLevelCondition") + } + + return res, nil + } +} + +func resourceAccessContextManagerAccessLevelConditionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAccessLevelCondition %q", d.Id())) + } + + res, err = flattenNestedAccessContextManagerAccessLevelCondition(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing AccessContextManagerAccessLevelCondition because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("ip_subnetworks", flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(res["ipSubnetworks"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessLevelCondition: %s", err) + } + if err := d.Set("required_access_levels", flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(res["requiredAccessLevels"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessLevelCondition: %s", err) + } + if err := d.Set("members", flattenNestedAccessContextManagerAccessLevelConditionMembers(res["members"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessLevelCondition: %s", err) + } + if err := d.Set("negate", flattenNestedAccessContextManagerAccessLevelConditionNegate(res["negate"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessLevelCondition: %s", err) + } + if err := d.Set("device_policy", flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(res["devicePolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessLevelCondition: %s", err) + } + if err := d.Set("regions", flattenNestedAccessContextManagerAccessLevelConditionRegions(res["regions"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessLevelCondition: %s", err) + } + + return nil +} + +func resourceAccessContextManagerAccessLevelConditionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "{{access_level}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceAccessContextManagerAccessLevelConditionPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AccessLevelCondition") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "basic.conditions"}) + if err != nil { + return err + } + log.Printf("[DEBUG] Deleting AccessLevelCondition %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AccessLevelCondition") + } + + log.Printf("[DEBUG] Finished deleting AccessLevelCondition %q: %#v", d.Id(), res) + return nil +} + +func flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionMembers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionNegate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["require_screen_lock"] = + flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(original["requireScreenlock"], d, config) + transformed["allowed_encryption_statuses"] = + flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(original["allowedEncryptionStatuses"], d, config) + transformed["allowed_device_management_levels"] = + flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(original["allowedDeviceManagementLevels"], d, config) + transformed["os_constraints"] = + flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(original["osConstraints"], d, config) + transformed["require_admin_approval"] = + flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(original["requireAdminApproval"], d, config) + transformed["require_corp_owned"] = + flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(original["requireCorpOwned"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "minimum_version": flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(original["minimumVersion"], d, config), + "os_type": flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(original["osType"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerAccessLevelConditionRegions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionMembers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionNegate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRequireScreenLock, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(original["require_screen_lock"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireScreenlock"] = transformedRequireScreenLock + } + + transformedAllowedEncryptionStatuses, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(original["allowed_encryption_statuses"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedEncryptionStatuses"] = transformedAllowedEncryptionStatuses + } + + transformedAllowedDeviceManagementLevels, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(original["allowed_device_management_levels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedDeviceManagementLevels"] = transformedAllowedDeviceManagementLevels + } + + transformedOsConstraints, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(original["os_constraints"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOsConstraints); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["osConstraints"] = transformedOsConstraints + } + + transformedRequireAdminApproval, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(original["require_admin_approval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireAdminApproval"] = transformedRequireAdminApproval + } + + transformedRequireCorpOwned, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(original["require_corp_owned"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireCorpOwned"] = transformedRequireCorpOwned + } + + return transformed, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinimumVersion, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(original["minimum_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minimumVersion"] = transformedMinimumVersion + } + + transformedOsType, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(original["os_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOsType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["osType"] = transformedOsType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerAccessLevelConditionRegions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedAccessContextManagerAccessLevelCondition(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["basic"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["conditions"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value basic.conditions. Actual value: %v", v) + } + + _, item, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedIpSubnetworks, err := expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(d.Get("ip_subnetworks"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedIpSubnetworks := flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(expectedIpSubnetworks, d, meta.(*transport_tpg.Config)) + expectedRequiredAccessLevels, err := expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(d.Get("required_access_levels"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedRequiredAccessLevels := flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(expectedRequiredAccessLevels, d, meta.(*transport_tpg.Config)) + expectedMembers, err := expandNestedAccessContextManagerAccessLevelConditionMembers(d.Get("members"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedMembers := flattenNestedAccessContextManagerAccessLevelConditionMembers(expectedMembers, d, meta.(*transport_tpg.Config)) + expectedNegate, err := expandNestedAccessContextManagerAccessLevelConditionNegate(d.Get("negate"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedNegate := flattenNestedAccessContextManagerAccessLevelConditionNegate(expectedNegate, d, meta.(*transport_tpg.Config)) + expectedDevicePolicy, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(d.Get("device_policy"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedDevicePolicy := flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(expectedDevicePolicy, d, meta.(*transport_tpg.Config)) + expectedRegions, err := expandNestedAccessContextManagerAccessLevelConditionRegions(d.Get("regions"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedRegions := flattenNestedAccessContextManagerAccessLevelConditionRegions(expectedRegions, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemIpSubnetworks := flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(item["ipSubnetworks"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemIpSubnetworks)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedIpSubnetworks))) && !reflect.DeepEqual(itemIpSubnetworks, expectedFlattenedIpSubnetworks) { + log.Printf("[DEBUG] Skipping item with ipSubnetworks= %#v, looking for %#v)", itemIpSubnetworks, expectedFlattenedIpSubnetworks) + continue + } + itemRequiredAccessLevels := flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(item["requiredAccessLevels"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemRequiredAccessLevels)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedRequiredAccessLevels))) && !reflect.DeepEqual(itemRequiredAccessLevels, expectedFlattenedRequiredAccessLevels) { + log.Printf("[DEBUG] Skipping item with requiredAccessLevels= %#v, looking for %#v)", itemRequiredAccessLevels, expectedFlattenedRequiredAccessLevels) + continue + } + itemMembers := flattenNestedAccessContextManagerAccessLevelConditionMembers(item["members"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemMembers)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedMembers))) && !reflect.DeepEqual(itemMembers, expectedFlattenedMembers) { + log.Printf("[DEBUG] Skipping item with members= %#v, looking for %#v)", itemMembers, expectedFlattenedMembers) + continue + } + itemNegate := flattenNestedAccessContextManagerAccessLevelConditionNegate(item["negate"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemNegate)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedNegate))) && !reflect.DeepEqual(itemNegate, expectedFlattenedNegate) { + log.Printf("[DEBUG] Skipping item with negate= %#v, looking for %#v)", itemNegate, expectedFlattenedNegate) + continue + } + itemDevicePolicy := flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(item["devicePolicy"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemDevicePolicy)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedDevicePolicy))) && !reflect.DeepEqual(itemDevicePolicy, expectedFlattenedDevicePolicy) { + log.Printf("[DEBUG] Skipping item with devicePolicy= %#v, looking for %#v)", itemDevicePolicy, expectedFlattenedDevicePolicy) + continue + } + itemRegions := flattenNestedAccessContextManagerAccessLevelConditionRegions(item["regions"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemRegions)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedRegions))) && !reflect.DeepEqual(itemRegions, expectedFlattenedRegions) { + log.Printf("[DEBUG] Skipping item with regions= %#v, looking for %#v)", itemRegions, expectedFlattenedRegions) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceAccessContextManagerAccessLevelConditionPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerAccessLevelConditionListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create AccessLevelCondition, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "conditions": append(currItems, obj), + } + wrapped := map[string]interface{}{ + "basic": res, + } + res = wrapped + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceAccessContextManagerAccessLevelConditionPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerAccessLevelConditionListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "AccessContextManagerAccessLevelCondition") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "conditions": updatedItems, + } + wrapped := map[string]interface{}{ + "basic": res, + } + res = wrapped + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceAccessContextManagerAccessLevelConditionListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + if v, ok = res["basic"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } + + v, ok = res["conditions"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "conditions"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_levels.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_levels.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_levels.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_levels.go index a6838cae8a..fa47844873 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_access_levels.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_levels.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package accesscontextmanager import ( "fmt" @@ -21,6 +24,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceAccessContextManagerAccessLevels() *schema.Resource { @@ -106,7 +113,7 @@ allowed.`, An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}), + ValidateFunc: verify.ValidateEnum([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}), }, }, "allowed_encryption_statuses": { @@ -116,7 +123,7 @@ An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPEC An empty list allows all statuses. Possible values: ["ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}), + ValidateFunc: verify.ValidateEnum([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}), }, }, "os_constraints": { @@ -129,7 +136,7 @@ An empty list allows all types and all versions.`, "os_type": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}), + ValidateFunc: verify.ValidateEnum([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}), Description: `The operating system type of the device. Possible values: ["OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"]`, }, "minimum_version": { @@ -228,7 +235,7 @@ Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, "combining_function": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"AND", "OR", ""}), + ValidateFunc: verify.ValidateEnum([]string{"AND", "OR", ""}), Description: `How the conditions list should be combined to determine if a request is granted this AccessLevel. If AND is used, each Condition in conditions must be satisfied for the AccessLevel to be applied. If @@ -242,7 +249,7 @@ for the AccessLevel to be applied. Default value: "AND" Possible values: ["AND", "custom": { Type: schema.TypeList, Optional: true, - Description: `Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. + Description: `Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. See CEL spec at: https://github.com/google/cel-spec.`, MaxItems: 1, Elem: &schema.Resource{ @@ -251,7 +258,7 @@ See CEL spec at: https://github.com/google/cel-spec.`, Type: schema.TypeList, Required: true, Description: `Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. -This page details the objects and attributes that are used to the build the CEL expressions for +This page details the objects and attributes that are used to the build the CEL expressions for custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec.`, MaxItems: 1, Elem: &schema.Resource{ @@ -292,8 +299,8 @@ custom access levels - https://cloud.google.com/access-context-manager/docs/cust } func resourceAccessContextManagerAccessLevelsCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -302,11 +309,11 @@ func resourceAccessContextManagerAccessLevelsCreate(d *schema.ResourceData, meta accessLevelsProp, err := expandAccessContextManagerAccessLevelsAccessLevels(d.Get("access_levels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("access_levels"); !isEmptyValue(reflect.ValueOf(accessLevelsProp)) && (ok || !reflect.DeepEqual(v, accessLevelsProp)) { + } else if v, ok := d.GetOkExists("access_levels"); !tpgresource.IsEmptyValue(reflect.ValueOf(accessLevelsProp)) && (ok || !reflect.DeepEqual(v, accessLevelsProp)) { obj["accessLevels"] = accessLevelsProp } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") if err != nil { return err } @@ -315,17 +322,25 @@ func resourceAccessContextManagerAccessLevelsCreate(d *schema.ResourceData, meta billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating AccessLevels: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/accessLevels") + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/accessLevels") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -347,13 +362,13 @@ func resourceAccessContextManagerAccessLevelsCreate(d *schema.ResourceData, meta } func resourceAccessContextManagerAccessLevelsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels") if err != nil { return err } @@ -361,13 +376,19 @@ func resourceAccessContextManagerAccessLevelsRead(d *schema.ResourceData, meta i billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAccessLevels %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAccessLevels %q", d.Id())) } if err := d.Set("access_levels", flattenAccessContextManagerAccessLevelsAccessLevels(res["accessLevels"], d, config)); err != nil { @@ -378,8 +399,8 @@ func resourceAccessContextManagerAccessLevelsRead(d *schema.ResourceData, meta i } func resourceAccessContextManagerAccessLevelsUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -390,11 +411,11 @@ func resourceAccessContextManagerAccessLevelsUpdate(d *schema.ResourceData, meta accessLevelsProp, err := expandAccessContextManagerAccessLevelsAccessLevels(d.Get("access_levels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("access_levels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessLevelsProp)) { + } else if v, ok := d.GetOkExists("access_levels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessLevelsProp)) { obj["accessLevels"] = accessLevelsProp } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") if err != nil { return err } @@ -402,11 +423,19 @@ func resourceAccessContextManagerAccessLevelsUpdate(d *schema.ResourceData, meta log.Printf("[DEBUG] Updating AccessLevels %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating AccessLevels %q: %s", d.Id(), err) @@ -426,8 +455,8 @@ func resourceAccessContextManagerAccessLevelsUpdate(d *schema.ResourceData, meta } func resourceAccessContextManagerAccessLevelsDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -435,13 +464,20 @@ func resourceAccessContextManagerAccessLevelsDelete(d *schema.ResourceData, meta obj := make(map[string]interface{}) obj["accessLevels"] = []string{} - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") if err != nil { return err } log.Printf("[DEBUG] Deleting AccessLevels %q: %#v", d.Id(), obj) - res, err := SendRequestWithTimeout(config, "POST", "", url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error deleting AccessLevels %q: %s", d.Id(), err) @@ -461,10 +497,10 @@ func resourceAccessContextManagerAccessLevelsDelete(d *schema.ResourceData, meta } func resourceAccessContextManagerAccessLevelsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value - parts, err := getImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/(.+)"}, d, config, d.Id()) + parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/(.+)"}, d, config, d.Id()) if err != nil { return nil, err } @@ -475,7 +511,7 @@ func resourceAccessContextManagerAccessLevelsImport(d *schema.ResourceData, meta return []*schema.ResourceData{d}, nil } -func flattenAccessContextManagerAccessLevelsAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -497,19 +533,19 @@ func flattenAccessContextManagerAccessLevelsAccessLevels(v interface{}, d *schem } return transformed } -func flattenAccessContextManagerAccessLevelsAccessLevelsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasic(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -524,15 +560,15 @@ func flattenAccessContextManagerAccessLevelsAccessLevelsBasic(v interface{}, d * flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditions(original["conditions"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "AND" } return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -555,23 +591,23 @@ func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interf } return transformed } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -594,19 +630,19 @@ func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePol flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(original["requireCorpOwned"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -625,27 +661,27 @@ func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePol } return transformed } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsCustom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsCustom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -658,7 +694,7 @@ func flattenAccessContextManagerAccessLevelsAccessLevelsCustom(v interface{}, d flattenAccessContextManagerAccessLevelsAccessLevelsCustomExpr(original["expr"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExpr(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExpr(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -677,23 +713,23 @@ func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExpr(v interface{} flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(original["location"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandAccessContextManagerAccessLevelsAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -707,35 +743,35 @@ func expandAccessContextManagerAccessLevelsAccessLevels(v interface{}, d Terrafo transformedName, err := expandAccessContextManagerAccessLevelsAccessLevelsName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedTitle, err := expandAccessContextManagerAccessLevelsAccessLevelsTitle(original["title"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["title"] = transformedTitle } transformedDescription, err := expandAccessContextManagerAccessLevelsAccessLevelsDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedBasic, err := expandAccessContextManagerAccessLevelsAccessLevelsBasic(original["basic"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBasic); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBasic); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["basic"] = transformedBasic } transformedCustom, err := expandAccessContextManagerAccessLevelsAccessLevelsCustom(original["custom"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCustom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCustom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["custom"] = transformedCustom } @@ -744,19 +780,19 @@ func expandAccessContextManagerAccessLevelsAccessLevels(v interface{}, d Terrafo return req, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -768,25 +804,25 @@ func expandAccessContextManagerAccessLevelsAccessLevelsBasic(v interface{}, d Te transformedCombiningFunction, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(original["combining_function"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCombiningFunction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCombiningFunction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["combiningFunction"] = transformedCombiningFunction } transformedConditions, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditions(original["conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["conditions"] = transformedConditions } return transformed, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -799,42 +835,42 @@ func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interfa transformedIpSubnetworks, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(original["ip_subnetworks"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIpSubnetworks); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIpSubnetworks); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ipSubnetworks"] = transformedIpSubnetworks } transformedRequiredAccessLevels, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(original["required_access_levels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequiredAccessLevels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequiredAccessLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requiredAccessLevels"] = transformedRequiredAccessLevels } transformedMembers, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(original["members"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMembers); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMembers); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["members"] = transformedMembers } transformedNegate, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(original["negate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNegate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNegate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["negate"] = transformedNegate } transformedDevicePolicy, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(original["device_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDevicePolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDevicePolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["devicePolicy"] = transformedDevicePolicy } transformedRegions, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(original["regions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRegions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRegions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["regions"] = transformedRegions } @@ -843,23 +879,23 @@ func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interfa return req, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -871,61 +907,61 @@ func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePoli transformedRequireScreenLock, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(original["require_screen_lock"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requireScreenlock"] = transformedRequireScreenLock } transformedAllowedEncryptionStatuses, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(original["allowed_encryption_statuses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedEncryptionStatuses"] = transformedAllowedEncryptionStatuses } transformedAllowedDeviceManagementLevels, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(original["allowed_device_management_levels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedDeviceManagementLevels"] = transformedAllowedDeviceManagementLevels } transformedOsConstraints, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(original["os_constraints"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOsConstraints); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOsConstraints); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["osConstraints"] = transformedOsConstraints } transformedRequireAdminApproval, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(original["require_admin_approval"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requireAdminApproval"] = transformedRequireAdminApproval } transformedRequireCorpOwned, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(original["require_corp_owned"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requireCorpOwned"] = transformedRequireCorpOwned } return transformed, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -938,14 +974,14 @@ func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePoli transformedMinimumVersion, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(original["minimum_version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minimumVersion"] = transformedMinimumVersion } transformedOsType, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(original["os_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOsType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOsType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["osType"] = transformedOsType } @@ -954,27 +990,27 @@ func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePoli return req, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsCustom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsCustom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -986,14 +1022,14 @@ func expandAccessContextManagerAccessLevelsAccessLevelsCustom(v interface{}, d T transformedExpr, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExpr(original["expr"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExpr); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExpr); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["expr"] = transformedExpr } return transformed, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExpr(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsCustomExpr(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1005,46 +1041,46 @@ func expandAccessContextManagerAccessLevelsAccessLevelsCustomExpr(v interface{}, transformedExpression, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(original["expression"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["expression"] = transformedExpression } transformedTitle, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(original["title"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["title"] = transformedTitle } transformedDescription, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedLocation, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(original["location"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["location"] = transformedLocation } return transformed, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy.go new file mode 100644 index 0000000000..46ba93d541 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy.go @@ -0,0 +1,425 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAccessContextManagerAccessPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerAccessPolicyCreate, + Read: resourceAccessContextManagerAccessPolicyRead, + Update: resourceAccessContextManagerAccessPolicyUpdate, + Delete: resourceAccessContextManagerAccessPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessContextManagerAccessPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The parent of this AccessPolicy in the Cloud Resource Hierarchy. +Format: organizations/{organization_id}`, + }, + "title": { + Type: schema.TypeString, + Required: true, + Description: `Human readable title. Does not affect behavior.`, + }, + "scopes": { + Type: schema.TypeList, + Optional: true, + Description: `Folder or project on which this policy is applicable. +Format: folders/{{folder_id}} or projects/{{project_id}}`, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the AccessPolicy was created in UTC.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Resource name of the AccessPolicy. Format: {policy_id}`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the AccessPolicy was updated in UTC.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerAccessPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentProp, err := expandAccessContextManagerAccessPolicyParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + titleProp, err := expandAccessContextManagerAccessPolicyTitle(d.Get("title"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("title"); !tpgresource.IsEmptyValue(reflect.ValueOf(titleProp)) && (ok || !reflect.DeepEqual(v, titleProp)) { + obj["title"] = titleProp + } + scopesProp, err := expandAccessContextManagerAccessPolicyScopes(d.Get("scopes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scopes"); !tpgresource.IsEmptyValue(reflect.ValueOf(scopesProp)) && (ok || !reflect.DeepEqual(v, scopesProp)) { + obj["scopes"] = scopesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AccessPolicy: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AccessPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating AccessPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create AccessPolicy: %s", err) + } + + if err := d.Set("name", flattenAccessContextManagerAccessPolicyName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // The operation for this resource contains the generated name that we need + // in order to perform a READ. We need to access the object inside of it as + // a map[string]interface, so let's do that. + + resp := res["response"].(map[string]interface{}) + name := tpgresource.GetResourceNameFromSelfLink(resp["name"].(string)) + log.Printf("[DEBUG] Setting AccessPolicy name, id to %s", name) + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name) + + log.Printf("[DEBUG] Finished creating AccessPolicy %q: %#v", d.Id(), res) + + return resourceAccessContextManagerAccessPolicyRead(d, meta) +} + +func resourceAccessContextManagerAccessPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAccessPolicy %q", d.Id())) + } + + if err := d.Set("name", flattenAccessContextManagerAccessPolicyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessPolicy: %s", err) + } + if err := d.Set("create_time", flattenAccessContextManagerAccessPolicyCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessPolicy: %s", err) + } + if err := d.Set("update_time", flattenAccessContextManagerAccessPolicyUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessPolicy: %s", err) + } + if err := d.Set("parent", flattenAccessContextManagerAccessPolicyParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessPolicy: %s", err) + } + if err := d.Set("title", flattenAccessContextManagerAccessPolicyTitle(res["title"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessPolicy: %s", err) + } + if err := d.Set("scopes", flattenAccessContextManagerAccessPolicyScopes(res["scopes"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessPolicy: %s", err) + } + + return nil +} + +func resourceAccessContextManagerAccessPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + titleProp, err := expandAccessContextManagerAccessPolicyTitle(d.Get("title"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("title"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, titleProp)) { + obj["title"] = titleProp + } + scopesProp, err := expandAccessContextManagerAccessPolicyScopes(d.Get("scopes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scopes"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scopesProp)) { + obj["scopes"] = scopesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AccessPolicy %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("title") { + updateMask = append(updateMask, "title") + } + + if d.HasChange("scopes") { + updateMask = append(updateMask, "scopes") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AccessPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AccessPolicy %q: %#v", d.Id(), res) + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Updating AccessPolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAccessContextManagerAccessPolicyRead(d, meta) +} + +func resourceAccessContextManagerAccessPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AccessPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AccessPolicy") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting AccessPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AccessPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceAccessContextManagerAccessPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAccessContextManagerAccessPolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenAccessContextManagerAccessPolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAccessPolicyUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAccessPolicyParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAccessPolicyTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAccessPolicyScopes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAccessContextManagerAccessPolicyParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessContextManagerAccessPolicyTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessContextManagerAccessPolicyScopes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy_sweeper.go new file mode 100644 index 0000000000..2133ad6092 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_access_policy_sweeper.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package accesscontextmanager + +import ( + "context" + "fmt" + "log" + neturl "net/url" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("gcp_access_context_manager_policy", testSweepAccessContextManagerPolicies) +} + +func testSweepAccessContextManagerPolicies(region string) error { + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Fatalf("error getting shared config for region %q: %s", region, err) + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Fatalf("error loading and validating shared config for region %q: %s", region, err) + } + + testOrg := envvar.GetTestOrgFromEnv(nil) + if testOrg == "" { + log.Printf("test org not set for test environment, skip sweep") + return nil + } + + log.Printf("[DEBUG] Listing Access Policies for org %q", testOrg) + + parent := neturl.QueryEscape(fmt.Sprintf("organizations/%s", testOrg)) + listUrl := fmt.Sprintf("%saccessPolicies?parent=%s", config.AccessContextManagerBasePath, parent) + + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil && !transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + log.Printf("unable to list AccessPolicies for organization %q: %v", testOrg, err) + return nil + } + var policies []interface{} + if resp != nil { + if v, ok := resp["accessPolicies"]; ok { + policies = v.([]interface{}) + } + } + + if len(policies) == 0 { + log.Printf("[DEBUG] no access policies found, exiting sweeper") + return nil + } + if len(policies) > 1 { + log.Printf("unexpected - more than one access policies found, change the tests") + return nil + } + + policy := policies[0].(map[string]interface{}) + log.Printf("[DEBUG] Deleting test Access Policies %q", policy["name"]) + + policyUrl := config.AccessContextManagerBasePath + policy["name"].(string) + if _, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + RawURL: policyUrl, + UserAgent: config.UserAgent, + }); err != nil && !transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + log.Printf("unable to delete access policy %q", policy["name"].(string)) + return nil + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_authorized_orgs_desc.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_authorized_orgs_desc.go new file mode 100644 index 0000000000..53b0ac2d42 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_authorized_orgs_desc.go @@ -0,0 +1,495 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAccessContextManagerAuthorizedOrgsDesc() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerAuthorizedOrgsDescCreate, + Read: resourceAccessContextManagerAuthorizedOrgsDescRead, + Update: resourceAccessContextManagerAuthorizedOrgsDescUpdate, + Delete: resourceAccessContextManagerAuthorizedOrgsDescDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessContextManagerAuthorizedOrgsDescImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource name for the 'AuthorizedOrgsDesc'. Format: +'accessPolicies/{access_policy}/authorizedOrgsDescs/{authorized_orgs_desc}'. +The 'authorized_orgs_desc' component must begin with a letter, followed by +alphanumeric characters or '_'. +After you create an 'AuthorizedOrgsDesc', you cannot change its 'name'.`, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Resource name for the access policy which owns this 'AuthorizedOrgsDesc'.`, + }, + "asset_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ASSET_TYPE_DEVICE", "ASSET_TYPE_CREDENTIAL_STRENGTH", ""}), + Description: `The type of entities that need to use the authorization relationship during +evaluation, such as a device. Valid values are "ASSET_TYPE_DEVICE" and +"ASSET_TYPE_CREDENTIAL_STRENGTH". Possible values: ["ASSET_TYPE_DEVICE", "ASSET_TYPE_CREDENTIAL_STRENGTH"]`, + }, + "authorization_direction": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"AUTHORIZATION_DIRECTION_TO", "AUTHORIZATION_DIRECTION_FROM", ""}), + Description: `The direction of the authorization relationship between this organization +and the organizations listed in the "orgs" field. The valid values for this +field include the following: + +AUTHORIZATION_DIRECTION_FROM: Allows this organization to evaluate traffic +in the organizations listed in the 'orgs' field. + +AUTHORIZATION_DIRECTION_TO: Allows the organizations listed in the 'orgs' +field to evaluate the traffic in this organization. + +For the authorization relationship to take effect, all of the organizations +must authorize and specify the appropriate relationship direction. For +example, if organization A authorized organization B and C to evaluate its +traffic, by specifying "AUTHORIZATION_DIRECTION_TO" as the authorization +direction, organizations B and C must specify +"AUTHORIZATION_DIRECTION_FROM" as the authorization direction in their +"AuthorizedOrgsDesc" resource. Possible values: ["AUTHORIZATION_DIRECTION_TO", "AUTHORIZATION_DIRECTION_FROM"]`, + }, + "authorization_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"AUTHORIZATION_TYPE_TRUST", ""}), + Description: `A granular control type for authorization levels. Valid value is "AUTHORIZATION_TYPE_TRUST". Possible values: ["AUTHORIZATION_TYPE_TRUST"]`, + }, + "orgs": { + Type: schema.TypeList, + Optional: true, + Description: `The list of organization ids in this AuthorizedOrgsDesc. +Format: 'organizations/' +Example: 'organizations/123456'`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the AuthorizedOrgsDesc was created in UTC.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the AuthorizedOrgsDesc was updated in UTC.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerAuthorizedOrgsDescCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentProp, err := expandAccessContextManagerAuthorizedOrgsDescParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + nameProp, err := expandAccessContextManagerAuthorizedOrgsDescName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + orgsProp, err := expandAccessContextManagerAuthorizedOrgsDescOrgs(d.Get("orgs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("orgs"); !tpgresource.IsEmptyValue(reflect.ValueOf(orgsProp)) && (ok || !reflect.DeepEqual(v, orgsProp)) { + obj["orgs"] = orgsProp + } + assetTypeProp, err := expandAccessContextManagerAuthorizedOrgsDescAssetType(d.Get("asset_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(assetTypeProp)) && (ok || !reflect.DeepEqual(v, assetTypeProp)) { + obj["assetType"] = assetTypeProp + } + authorizationDirectionProp, err := expandAccessContextManagerAuthorizedOrgsDescAuthorizationDirection(d.Get("authorization_direction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorization_direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorizationDirectionProp)) && (ok || !reflect.DeepEqual(v, authorizationDirectionProp)) { + obj["authorizationDirection"] = authorizationDirectionProp + } + authorizationTypeProp, err := expandAccessContextManagerAuthorizedOrgsDescAuthorizationType(d.Get("authorization_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorization_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorizationTypeProp)) && (ok || !reflect.DeepEqual(v, authorizationTypeProp)) { + obj["authorizationType"] = authorizationTypeProp + } + + obj, err = resourceAccessContextManagerAuthorizedOrgsDescEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/authorizedOrgsDescs") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AuthorizedOrgsDesc: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AuthorizedOrgsDesc: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating AuthorizedOrgsDesc", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create AuthorizedOrgsDesc: %s", err) + } + + if err := d.Set("name", flattenAccessContextManagerAuthorizedOrgsDescName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // This is useful if the resource in question doesn't have a perfectly consistent API + // That is, the Operation for Create might return before the Get operation shows the + // completed state of the resource. + time.Sleep(2 * time.Minute) + + log.Printf("[DEBUG] Finished creating AuthorizedOrgsDesc %q: %#v", d.Id(), res) + + return resourceAccessContextManagerAuthorizedOrgsDescRead(d, meta) +} + +func resourceAccessContextManagerAuthorizedOrgsDescRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerAuthorizedOrgsDesc %q", d.Id())) + } + + if err := d.Set("create_time", flattenAccessContextManagerAuthorizedOrgsDescCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) + } + if err := d.Set("update_time", flattenAccessContextManagerAuthorizedOrgsDescUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) + } + if err := d.Set("name", flattenAccessContextManagerAuthorizedOrgsDescName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) + } + if err := d.Set("orgs", flattenAccessContextManagerAuthorizedOrgsDescOrgs(res["orgs"], d, config)); err != nil { + return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) + } + if err := d.Set("asset_type", flattenAccessContextManagerAuthorizedOrgsDescAssetType(res["assetType"], d, config)); err != nil { + return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) + } + if err := d.Set("authorization_direction", flattenAccessContextManagerAuthorizedOrgsDescAuthorizationDirection(res["authorizationDirection"], d, config)); err != nil { + return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) + } + if err := d.Set("authorization_type", flattenAccessContextManagerAuthorizedOrgsDescAuthorizationType(res["authorizationType"], d, config)); err != nil { + return fmt.Errorf("Error reading AuthorizedOrgsDesc: %s", err) + } + + return nil +} + +func resourceAccessContextManagerAuthorizedOrgsDescUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + orgsProp, err := expandAccessContextManagerAuthorizedOrgsDescOrgs(d.Get("orgs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("orgs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, orgsProp)) { + obj["orgs"] = orgsProp + } + + obj, err = resourceAccessContextManagerAuthorizedOrgsDescEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AuthorizedOrgsDesc %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("orgs") { + updateMask = append(updateMask, "orgs") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AuthorizedOrgsDesc %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AuthorizedOrgsDesc %q: %#v", d.Id(), res) + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Updating AuthorizedOrgsDesc", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAccessContextManagerAuthorizedOrgsDescRead(d, meta) +} + +func resourceAccessContextManagerAuthorizedOrgsDescDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AuthorizedOrgsDesc %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AuthorizedOrgsDesc") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting AuthorizedOrgsDesc", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AuthorizedOrgsDesc %q: %#v", d.Id(), res) + return nil +} + +func resourceAccessContextManagerAuthorizedOrgsDescImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) < 2 { + return nil, fmt.Errorf("Error parsing parent name. Should be in form accessPolicies/{{policy_id}}/authorizedOrgsDescs/{{short_name}}") + } + if err := d.Set("parent", fmt.Sprintf("%s/%s", stringParts[0], stringParts[1])); err != nil { + return nil, fmt.Errorf("Error setting parent, %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenAccessContextManagerAuthorizedOrgsDescCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAuthorizedOrgsDescUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAuthorizedOrgsDescName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAuthorizedOrgsDescOrgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAuthorizedOrgsDescAssetType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAuthorizedOrgsDescAuthorizationDirection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerAuthorizedOrgsDescAuthorizationType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAccessContextManagerAuthorizedOrgsDescParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessContextManagerAuthorizedOrgsDescName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessContextManagerAuthorizedOrgsDescOrgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessContextManagerAuthorizedOrgsDescAssetType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessContextManagerAuthorizedOrgsDescAuthorizationDirection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessContextManagerAuthorizedOrgsDescAuthorizationType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceAccessContextManagerAuthorizedOrgsDescEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "parent") + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_egress_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_egress_policy.go new file mode 100644 index 0000000000..22f2811d12 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_egress_policy.go @@ -0,0 +1,451 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAccessContextManagerEgressPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerEgressPolicyCreate, + Read: resourceAccessContextManagerEgressPolicyRead, + Delete: resourceAccessContextManagerEgressPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessContextManagerEgressPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "egress_policy_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Service Perimeter to add this resource to.`, + }, + "resource": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A GCP resource that is inside of the service perimeter.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerEgressPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + resourceProp, err := expandNestedAccessContextManagerEgressPolicyResource(d.Get("resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceProp)) && (ok || !reflect.DeepEqual(v, resourceProp)) { + obj["resource"] = resourceProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{egress_policy_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EgressPolicy: %#v", obj) + + obj, err = resourceAccessContextManagerEgressPolicyPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.resources"}) + if err != nil { + return err + } + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EgressPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{egress_policy_name}}/{{resource}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating EgressPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create EgressPolicy: %s", err) + } + + if _, ok := opRes["status"]; ok { + opRes, err = flattenNestedAccessContextManagerEgressPolicy(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("resource", flattenNestedAccessContextManagerEgressPolicyResource(opRes["resource"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{egress_policy_name}}/{{resource}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EgressPolicy %q: %#v", d.Id(), res) + + return resourceAccessContextManagerEgressPolicyRead(d, meta) +} + +func resourceAccessContextManagerEgressPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{egress_policy_name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerEgressPolicy %q", d.Id())) + } + + res, err = flattenNestedAccessContextManagerEgressPolicy(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing AccessContextManagerEgressPolicy because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("resource", flattenNestedAccessContextManagerEgressPolicyResource(res["resource"], d, config)); err != nil { + return fmt.Errorf("Error reading EgressPolicy: %s", err) + } + + return nil +} + +func resourceAccessContextManagerEgressPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{egress_policy_name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceAccessContextManagerEgressPolicyPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EgressPolicy") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.resources"}) + if err != nil { + return err + } + log.Printf("[DEBUG] Deleting EgressPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EgressPolicy") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting EgressPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting EgressPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceAccessContextManagerEgressPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return nil, err + } + + if err := d.Set("perimeter", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { + return nil, fmt.Errorf("Error setting perimeter: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenNestedAccessContextManagerEgressPolicyResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedAccessContextManagerEgressPolicyResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedAccessContextManagerEgressPolicy(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["status"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["resources"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value status.resources. Actual value: %v", v) + } + + _, item, err := resourceAccessContextManagerEgressPolicyFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceAccessContextManagerEgressPolicyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedResource, err := expandNestedAccessContextManagerEgressPolicyResource(d.Get("resource"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedResource := flattenNestedAccessContextManagerEgressPolicyResource(expectedResource, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + // List response only contains the ID - construct a response object. + item := map[string]interface{}{ + "resource": itemRaw, + } + + itemResource := flattenNestedAccessContextManagerEgressPolicyResource(item["resource"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemResource)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedResource))) && !reflect.DeepEqual(itemResource, expectedFlattenedResource) { + log.Printf("[DEBUG] Skipping item with resource= %#v, looking for %#v)", itemResource, expectedFlattenedResource) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceAccessContextManagerEgressPolicyPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerEgressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceAccessContextManagerEgressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create EgressPolicy, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "resources": append(currItems, obj["resource"]), + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceAccessContextManagerEgressPolicyPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerEgressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerEgressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "AccessContextManagerEgressPolicy") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "resources": updatedItems, + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceAccessContextManagerEgressPolicyListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{egress_policy_name}}") + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + if v, ok = res["status"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } + + v, ok = res["resources"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "resources"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding.go new file mode 100644 index 0000000000..21325530c6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding.go @@ -0,0 +1,358 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAccessContextManagerGcpUserAccessBinding() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerGcpUserAccessBindingCreate, + Read: resourceAccessContextManagerGcpUserAccessBindingRead, + Update: resourceAccessContextManagerGcpUserAccessBindingUpdate, + Delete: resourceAccessContextManagerGcpUserAccessBindingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessContextManagerGcpUserAccessBindingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "access_levels": { + Type: schema.TypeList, + Required: true, + Description: `Required. Access level that a user must have to be granted access. Only one access level is supported, not multiple. This repeated field must have exactly one element. Example: "accessPolicies/9522/accessLevels/device_trusted"`, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "group_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Immutable. Google Group id whose members are subject to this binding's restrictions. See "id" in the G Suite Directory API's Groups resource. If a group's email address/alias is changed, this resource will continue to point at the changed group. This field does not accept group email addresses or aliases. Example: "01d520gv4vjcrht"`, + }, + "organization_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. ID of the parent organization.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by RFC 3986 Section 2.3). Should not be specified by the client during creation. Example: "organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N"`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerGcpUserAccessBindingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + groupKeyProp, err := expandAccessContextManagerGcpUserAccessBindingGroupKey(d.Get("group_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("group_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(groupKeyProp)) && (ok || !reflect.DeepEqual(v, groupKeyProp)) { + obj["groupKey"] = groupKeyProp + } + accessLevelsProp, err := expandAccessContextManagerGcpUserAccessBindingAccessLevels(d.Get("access_levels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("access_levels"); !tpgresource.IsEmptyValue(reflect.ValueOf(accessLevelsProp)) && (ok || !reflect.DeepEqual(v, accessLevelsProp)) { + obj["accessLevels"] = accessLevelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}organizations/{{organization_id}}/gcpUserAccessBindings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GcpUserAccessBinding: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GcpUserAccessBinding: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating GcpUserAccessBinding", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create GcpUserAccessBinding: %s", err) + } + + if err := d.Set("name", flattenAccessContextManagerGcpUserAccessBindingName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating GcpUserAccessBinding %q: %#v", d.Id(), res) + + return resourceAccessContextManagerGcpUserAccessBindingRead(d, meta) +} + +func resourceAccessContextManagerGcpUserAccessBindingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerGcpUserAccessBinding %q", d.Id())) + } + + if err := d.Set("name", flattenAccessContextManagerGcpUserAccessBindingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) + } + if err := d.Set("group_key", flattenAccessContextManagerGcpUserAccessBindingGroupKey(res["groupKey"], d, config)); err != nil { + return fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) + } + if err := d.Set("access_levels", flattenAccessContextManagerGcpUserAccessBindingAccessLevels(res["accessLevels"], d, config)); err != nil { + return fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) + } + + return nil +} + +func resourceAccessContextManagerGcpUserAccessBindingUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + accessLevelsProp, err := expandAccessContextManagerGcpUserAccessBindingAccessLevels(d.Get("access_levels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("access_levels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessLevelsProp)) { + obj["accessLevels"] = accessLevelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating GcpUserAccessBinding %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("access_levels") { + updateMask = append(updateMask, "accessLevels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating GcpUserAccessBinding %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GcpUserAccessBinding %q: %#v", d.Id(), res) + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Updating GcpUserAccessBinding", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAccessContextManagerGcpUserAccessBindingRead(d, meta) +} + +func resourceAccessContextManagerGcpUserAccessBindingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GcpUserAccessBinding %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GcpUserAccessBinding") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting GcpUserAccessBinding", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GcpUserAccessBinding %q: %#v", d.Id(), res) + return nil +} + +func resourceAccessContextManagerGcpUserAccessBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + + if err := d.Set("name", name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name) + return []*schema.ResourceData{d}, nil +} + +func flattenAccessContextManagerGcpUserAccessBindingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerGcpUserAccessBindingGroupKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerGcpUserAccessBindingAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAccessContextManagerGcpUserAccessBindingGroupKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAccessContextManagerGcpUserAccessBindingAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding_sweeper.go new file mode 100644 index 0000000000..bc7a691885 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_gcp_user_access_binding_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("AccessContextManagerGcpUserAccessBinding", testSweepAccessContextManagerGcpUserAccessBinding) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAccessContextManagerGcpUserAccessBinding(region string) error { + resourceName := "AccessContextManagerGcpUserAccessBinding" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://accesscontextmanager.googleapis.com/v1/organizations/{{organization_id}}/gcpUserAccessBindings", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["gcpUserAccessBindings"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://accesscontextmanager.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_ingress_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_ingress_policy.go new file mode 100644 index 0000000000..7a6b4570da --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_ingress_policy.go @@ -0,0 +1,451 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAccessContextManagerIngressPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerIngressPolicyCreate, + Read: resourceAccessContextManagerIngressPolicyRead, + Delete: resourceAccessContextManagerIngressPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessContextManagerIngressPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "ingress_policy_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Service Perimeter to add this resource to.`, + }, + "resource": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A GCP resource that is inside of the service perimeter.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerIngressPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + resourceProp, err := expandNestedAccessContextManagerIngressPolicyResource(d.Get("resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceProp)) && (ok || !reflect.DeepEqual(v, resourceProp)) { + obj["resource"] = resourceProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{ingress_policy_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new IngressPolicy: %#v", obj) + + obj, err = resourceAccessContextManagerIngressPolicyPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.resources"}) + if err != nil { + return err + } + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating IngressPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{ingress_policy_name}}/{{resource}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating IngressPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create IngressPolicy: %s", err) + } + + if _, ok := opRes["status"]; ok { + opRes, err = flattenNestedAccessContextManagerIngressPolicy(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("resource", flattenNestedAccessContextManagerIngressPolicyResource(opRes["resource"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{ingress_policy_name}}/{{resource}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating IngressPolicy %q: %#v", d.Id(), res) + + return resourceAccessContextManagerIngressPolicyRead(d, meta) +} + +func resourceAccessContextManagerIngressPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{ingress_policy_name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerIngressPolicy %q", d.Id())) + } + + res, err = flattenNestedAccessContextManagerIngressPolicy(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing AccessContextManagerIngressPolicy because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("resource", flattenNestedAccessContextManagerIngressPolicyResource(res["resource"], d, config)); err != nil { + return fmt.Errorf("Error reading IngressPolicy: %s", err) + } + + return nil +} + +func resourceAccessContextManagerIngressPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{ingress_policy_name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceAccessContextManagerIngressPolicyPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "IngressPolicy") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.resources"}) + if err != nil { + return err + } + log.Printf("[DEBUG] Deleting IngressPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "IngressPolicy") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting IngressPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting IngressPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceAccessContextManagerIngressPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return nil, err + } + + if err := d.Set("perimeter", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { + return nil, fmt.Errorf("Error setting perimeter: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenNestedAccessContextManagerIngressPolicyResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedAccessContextManagerIngressPolicyResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedAccessContextManagerIngressPolicy(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["status"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["resources"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value status.resources. Actual value: %v", v) + } + + _, item, err := resourceAccessContextManagerIngressPolicyFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceAccessContextManagerIngressPolicyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedResource, err := expandNestedAccessContextManagerIngressPolicyResource(d.Get("resource"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedResource := flattenNestedAccessContextManagerIngressPolicyResource(expectedResource, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + // List response only contains the ID - construct a response object. + item := map[string]interface{}{ + "resource": itemRaw, + } + + itemResource := flattenNestedAccessContextManagerIngressPolicyResource(item["resource"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemResource)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedResource))) && !reflect.DeepEqual(itemResource, expectedFlattenedResource) { + log.Printf("[DEBUG] Skipping item with resource= %#v, looking for %#v)", itemResource, expectedFlattenedResource) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceAccessContextManagerIngressPolicyPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerIngressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceAccessContextManagerIngressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create IngressPolicy, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "resources": append(currItems, obj["resource"]), + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceAccessContextManagerIngressPolicyPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerIngressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerIngressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "AccessContextManagerIngressPolicy") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "resources": updatedItems, + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceAccessContextManagerIngressPolicyListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{ingress_policy_name}}") + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + if v, ok = res["status"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } + + v, ok = res["resources"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "resources"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeter.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeter.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter.go index 04eb7c1041..395dd70a61 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeter.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package accesscontextmanager import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceAccessContextManagerServicePerimeter() *schema.Resource { @@ -72,7 +79,7 @@ behavior.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"PERIMETER_TYPE_REGULAR", "PERIMETER_TYPE_BRIDGE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"PERIMETER_TYPE_REGULAR", "PERIMETER_TYPE_BRIDGE", ""}), Description: `Specifies the type of the Perimeter. There are two types: regular and bridge. Regular Service Perimeter contains resources, access levels, and restricted services. Every resource can be in at most @@ -117,14 +124,14 @@ Format: accessPolicies/{policy_id}/accessLevels/{access_level_name}`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, + AtLeastOneOf: []string{"spec.0.resources", "spec.0.access_levels", "spec.0.restricted_services"}, }, "egress_policies": { Type: schema.TypeList, Optional: true, - Description: `List of EgressPolicies to apply to the perimeter. A perimeter may + Description: `List of EgressPolicies to apply to the perimeter. A perimeter may have multiple EgressPolicies, each of which is evaluated separately. -Access is granted if any EgressPolicy grants it. Must be empty for +Access is granted if any EgressPolicy grants it. Must be empty for a perimeter bridge.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -138,8 +145,8 @@ a perimeter bridge.`, "identities": { Type: schema.TypeList, Optional: true, - Description: `A list of identities that are allowed access through this 'EgressPolicy'. -Should be in the format of email address. The email address should + Description: `A list of identities that are allowed access through this 'EgressPolicy'. +Should be in the format of email address. The email address should represent individual user or service account only.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -148,9 +155,9 @@ represent individual user or service account only.`, "identity_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), - Description: `Specifies the type of identities that are allowed access to outside the -perimeter. If left unspecified, then members of 'identities' field will + ValidateFunc: verify.ValidateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access to outside the +perimeter. If left unspecified, then members of 'identities' field will be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, }, }, @@ -159,7 +166,7 @@ be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY" "egress_to": { Type: schema.TypeList, Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and destination resources that + Description: `Defines the conditions on the 'ApiOperation' and destination resources that cause this 'EgressPolicy' to apply.`, MaxItems: 1, Elem: &schema.Resource{ @@ -177,30 +184,30 @@ s3://bucket/path). Currently '*' is not allowed.`, "operations": { Type: schema.TypeList, Optional: true, - Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches + Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches if it contains an operation/service in this list.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method_selectors": { Type: schema.TypeList, Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong -to the service specified by 'serviceName' field. A single MethodSelector -entry with '*' specified for the 'method' field will allow all methods + Description: `API methods or permissions to allow. Method or permission must belong +to the service specified by 'serviceName' field. A single MethodSelector +entry with '*' specified for the 'method' field will allow all methods AND permissions for the service specified in 'serviceName'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method": { Type: schema.TypeString, Optional: true, - Description: `Value for 'method' should be a valid method name for the corresponding -'serviceName' in 'ApiOperation'. If '*' used as value for method, + Description: `Value for 'method' should be a valid method name for the corresponding +'serviceName' in 'ApiOperation'. If '*' used as value for method, then ALL methods and permissions are allowed.`, }, "permission": { Type: schema.TypeString, Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the + Description: `Value for permission should be a valid Cloud IAM permission for the corresponding 'serviceName' in 'ApiOperation'.`, }, }, @@ -209,8 +216,8 @@ corresponding 'serviceName' in 'ApiOperation'.`, "service_name": { Type: schema.TypeString, Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName field set to '*' will allow all methods AND permissions for all services.`, }, }, @@ -219,10 +226,10 @@ field set to '*' will allow all methods AND permissions for all services.`, "resources": { Type: schema.TypeList, Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', that match this to stanza. A request matches -if it contains a resource in this list. If * is specified for resources, -then this 'EgressTo' rule will authorize access to all resources outside + Description: `A list of resources, currently only projects in the form +'projects/', that match this to stanza. A request matches +if it contains a resource in this list. If * is specified for resources, +then this 'EgressTo' rule will authorize access to all resources outside the perimeter.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -255,7 +262,7 @@ to apply.`, Type: schema.TypeList, Optional: true, Description: `A list of identities that are allowed access through this ingress policy. -Should be in the format of email address. The email address should represent +Should be in the format of email address. The email address should represent individual user or service account only.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -264,9 +271,9 @@ individual user or service account only.`, "identity_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), - Description: `Specifies the type of identities that are allowed access from outside the -perimeter. If left unspecified, then members of 'identities' field will be + ValidateFunc: verify.ValidateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access from outside the +perimeter. If left unspecified, then members of 'identities' field will be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, }, "sources": { @@ -278,23 +285,23 @@ allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", " "access_level": { Type: schema.TypeString, Optional: true, - Description: `An 'AccessLevel' resource name that allow resources within the -'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed + Description: `An 'AccessLevel' resource name that allow resources within the +'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent -'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, -resources within the perimeter can only be accessed via Google Cloud calls -with request origins within the perimeter. -Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' +'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, +resources within the perimeter can only be accessed via Google Cloud calls +with request origins within the perimeter. +Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' If * is specified, then all IngressSources will be allowed.`, }, "resource": { Type: schema.TypeString, Optional: true, - Description: `A Google Cloud resource that is allowed to ingress the perimeter. -Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' -The project may be in any Google Cloud organization, not just the -organization that the perimeter is defined in. '*' is not allowed, the case + Description: `A Google Cloud resource that is allowed to ingress the perimeter. +Requests from these resources will be allowed to access perimeter data. +Currently only projects are allowed. Format 'projects/{project_number}' +The project may be in any Google Cloud organization, not just the +organization that the perimeter is defined in. '*' is not allowed, the case of allowing all Google Cloud resources only is not supported.`, }, }, @@ -314,30 +321,30 @@ this 'IngressPolicy' to apply.`, "operations": { Type: schema.TypeList, Optional: true, - Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' + Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' are allowed to perform in this 'ServicePerimeter'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method_selectors": { Type: schema.TypeList, Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong to -the service specified by serviceName field. A single 'MethodSelector' entry -with '*' specified for the method field will allow all methods AND + Description: `API methods or permissions to allow. Method or permission must belong to +the service specified by serviceName field. A single 'MethodSelector' entry +with '*' specified for the method field will allow all methods AND permissions for the service specified in 'serviceName'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method": { Type: schema.TypeString, Optional: true, - Description: `Value for method should be a valid method name for the corresponding -serviceName in 'ApiOperation'. If '*' used as value for 'method', then + Description: `Value for method should be a valid method name for the corresponding +serviceName in 'ApiOperation'. If '*' used as value for 'method', then ALL methods and permissions are allowed.`, }, "permission": { Type: schema.TypeString, Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the + Description: `Value for permission should be a valid Cloud IAM permission for the corresponding 'serviceName' in 'ApiOperation'.`, }, }, @@ -346,8 +353,8 @@ corresponding 'serviceName' in 'ApiOperation'.`, "service_name": { Type: schema.TypeString, Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' field set to '*' will allow all methods AND permissions for all services.`, }, }, @@ -356,12 +363,12 @@ field set to '*' will allow all methods AND permissions for all services.`, "resources": { Type: schema.TypeList, Optional: true, - Description: `A list of resources, currently only projects in the form + Description: `A list of resources, currently only projects in the form 'projects/', protected by this 'ServicePerimeter' that are allowed to be accessed by sources defined in the corresponding 'IngressFrom'. A request matches if it contains a resource in this list. If '*' is specified for resources, -then this 'IngressTo' rule will authorize access to all +then this 'IngressTo' rule will authorize access to all resources inside the perimeter, provided that the request also matches the 'operations' field.`, Elem: &schema.Schema{ @@ -383,7 +390,7 @@ Format: projects/{project_number}`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, + AtLeastOneOf: []string{"spec.0.resources", "spec.0.access_levels", "spec.0.restricted_services"}, }, "restricted_services": { Type: schema.TypeList, @@ -396,7 +403,7 @@ restrictions.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, + AtLeastOneOf: []string{"spec.0.resources", "spec.0.access_levels", "spec.0.restricted_services"}, }, "vpc_accessible_services": { Type: schema.TypeList, @@ -457,9 +464,9 @@ Format: accessPolicies/{policy_id}/accessLevels/{access_level_name}`, "egress_policies": { Type: schema.TypeList, Optional: true, - Description: `List of EgressPolicies to apply to the perimeter. A perimeter may + Description: `List of EgressPolicies to apply to the perimeter. A perimeter may have multiple EgressPolicies, each of which is evaluated separately. -Access is granted if any EgressPolicy grants it. Must be empty for +Access is granted if any EgressPolicy grants it. Must be empty for a perimeter bridge.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -473,8 +480,8 @@ a perimeter bridge.`, "identities": { Type: schema.TypeList, Optional: true, - Description: `A list of identities that are allowed access through this 'EgressPolicy'. -Should be in the format of email address. The email address should + Description: `A list of identities that are allowed access through this 'EgressPolicy'. +Should be in the format of email address. The email address should represent individual user or service account only.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -483,9 +490,9 @@ represent individual user or service account only.`, "identity_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), - Description: `Specifies the type of identities that are allowed access to outside the -perimeter. If left unspecified, then members of 'identities' field will + ValidateFunc: verify.ValidateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access to outside the +perimeter. If left unspecified, then members of 'identities' field will be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, }, }, @@ -494,7 +501,7 @@ be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY" "egress_to": { Type: schema.TypeList, Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and destination resources that + Description: `Defines the conditions on the 'ApiOperation' and destination resources that cause this 'EgressPolicy' to apply.`, MaxItems: 1, Elem: &schema.Resource{ @@ -512,30 +519,30 @@ s3://bucket/path). Currently '*' is not allowed.`, "operations": { Type: schema.TypeList, Optional: true, - Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches + Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches if it contains an operation/service in this list.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method_selectors": { Type: schema.TypeList, Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong -to the service specified by 'serviceName' field. A single MethodSelector -entry with '*' specified for the 'method' field will allow all methods + Description: `API methods or permissions to allow. Method or permission must belong +to the service specified by 'serviceName' field. A single MethodSelector +entry with '*' specified for the 'method' field will allow all methods AND permissions for the service specified in 'serviceName'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method": { Type: schema.TypeString, Optional: true, - Description: `Value for 'method' should be a valid method name for the corresponding -'serviceName' in 'ApiOperation'. If '*' used as value for method, + Description: `Value for 'method' should be a valid method name for the corresponding +'serviceName' in 'ApiOperation'. If '*' used as value for method, then ALL methods and permissions are allowed.`, }, "permission": { Type: schema.TypeString, Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the + Description: `Value for permission should be a valid Cloud IAM permission for the corresponding 'serviceName' in 'ApiOperation'.`, }, }, @@ -544,8 +551,8 @@ corresponding 'serviceName' in 'ApiOperation'.`, "service_name": { Type: schema.TypeString, Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName field set to '*' will allow all methods AND permissions for all services.`, }, }, @@ -554,10 +561,10 @@ field set to '*' will allow all methods AND permissions for all services.`, "resources": { Type: schema.TypeList, Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', that match this to stanza. A request matches -if it contains a resource in this list. If * is specified for resources, -then this 'EgressTo' rule will authorize access to all resources outside + Description: `A list of resources, currently only projects in the form +'projects/', that match this to stanza. A request matches +if it contains a resource in this list. If * is specified for resources, +then this 'EgressTo' rule will authorize access to all resources outside the perimeter.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -590,7 +597,7 @@ to apply.`, Type: schema.TypeList, Optional: true, Description: `A list of identities that are allowed access through this ingress policy. -Should be in the format of email address. The email address should represent +Should be in the format of email address. The email address should represent individual user or service account only.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -599,9 +606,9 @@ individual user or service account only.`, "identity_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), - Description: `Specifies the type of identities that are allowed access from outside the -perimeter. If left unspecified, then members of 'identities' field will be + ValidateFunc: verify.ValidateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access from outside the +perimeter. If left unspecified, then members of 'identities' field will be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, }, "sources": { @@ -613,23 +620,23 @@ allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", " "access_level": { Type: schema.TypeString, Optional: true, - Description: `An 'AccessLevel' resource name that allow resources within the -'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed + Description: `An 'AccessLevel' resource name that allow resources within the +'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent -'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, -resources within the perimeter can only be accessed via Google Cloud calls -with request origins within the perimeter. -Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' +'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, +resources within the perimeter can only be accessed via Google Cloud calls +with request origins within the perimeter. +Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' If * is specified, then all IngressSources will be allowed.`, }, "resource": { Type: schema.TypeString, Optional: true, - Description: `A Google Cloud resource that is allowed to ingress the perimeter. -Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' -The project may be in any Google Cloud organization, not just the -organization that the perimeter is defined in. '*' is not allowed, the case + Description: `A Google Cloud resource that is allowed to ingress the perimeter. +Requests from these resources will be allowed to access perimeter data. +Currently only projects are allowed. Format 'projects/{project_number}' +The project may be in any Google Cloud organization, not just the +organization that the perimeter is defined in. '*' is not allowed, the case of allowing all Google Cloud resources only is not supported.`, }, }, @@ -649,30 +656,30 @@ this 'IngressPolicy' to apply.`, "operations": { Type: schema.TypeList, Optional: true, - Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' + Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' are allowed to perform in this 'ServicePerimeter'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method_selectors": { Type: schema.TypeList, Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong to -the service specified by serviceName field. A single 'MethodSelector' entry -with '*' specified for the method field will allow all methods AND + Description: `API methods or permissions to allow. Method or permission must belong to +the service specified by serviceName field. A single 'MethodSelector' entry +with '*' specified for the method field will allow all methods AND permissions for the service specified in 'serviceName'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method": { Type: schema.TypeString, Optional: true, - Description: `Value for method should be a valid method name for the corresponding -serviceName in 'ApiOperation'. If '*' used as value for 'method', then + Description: `Value for method should be a valid method name for the corresponding +serviceName in 'ApiOperation'. If '*' used as value for 'method', then ALL methods and permissions are allowed.`, }, "permission": { Type: schema.TypeString, Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the + Description: `Value for permission should be a valid Cloud IAM permission for the corresponding 'serviceName' in 'ApiOperation'.`, }, }, @@ -681,8 +688,8 @@ corresponding 'serviceName' in 'ApiOperation'.`, "service_name": { Type: schema.TypeString, Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' field set to '*' will allow all methods AND permissions for all services.`, }, }, @@ -691,12 +698,12 @@ field set to '*' will allow all methods AND permissions for all services.`, "resources": { Type: schema.TypeList, Optional: true, - Description: `A list of resources, currently only projects in the form + Description: `A list of resources, currently only projects in the form 'projects/', protected by this 'ServicePerimeter' that are allowed to be accessed by sources defined in the corresponding 'IngressFrom'. A request matches if it contains a resource in this list. If '*' is specified for resources, -then this 'IngressTo' rule will authorize access to all +then this 'IngressTo' rule will authorize access to all resources inside the perimeter, provided that the request also matches the 'operations' field.`, Elem: &schema.Schema{ @@ -793,8 +800,8 @@ bet set to True if any of the fields in the spec are set to non-default values.` } func resourceAccessContextManagerServicePerimeterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -803,49 +810,49 @@ func resourceAccessContextManagerServicePerimeterCreate(d *schema.ResourceData, titleProp, err := expandAccessContextManagerServicePerimeterTitle(d.Get("title"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(reflect.ValueOf(titleProp)) && (ok || !reflect.DeepEqual(v, titleProp)) { + } else if v, ok := d.GetOkExists("title"); !tpgresource.IsEmptyValue(reflect.ValueOf(titleProp)) && (ok || !reflect.DeepEqual(v, titleProp)) { obj["title"] = titleProp } descriptionProp, err := expandAccessContextManagerServicePerimeterDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } perimeterTypeProp, err := expandAccessContextManagerServicePerimeterPerimeterType(d.Get("perimeter_type"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("perimeter_type"); !isEmptyValue(reflect.ValueOf(perimeterTypeProp)) && (ok || !reflect.DeepEqual(v, perimeterTypeProp)) { + } else if v, ok := d.GetOkExists("perimeter_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(perimeterTypeProp)) && (ok || !reflect.DeepEqual(v, perimeterTypeProp)) { obj["perimeterType"] = perimeterTypeProp } statusProp, err := expandAccessContextManagerServicePerimeterStatus(d.Get("status"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("status"); !isEmptyValue(reflect.ValueOf(statusProp)) && (ok || !reflect.DeepEqual(v, statusProp)) { + } else if v, ok := d.GetOkExists("status"); !tpgresource.IsEmptyValue(reflect.ValueOf(statusProp)) && (ok || !reflect.DeepEqual(v, statusProp)) { obj["status"] = statusProp } specProp, err := expandAccessContextManagerServicePerimeterSpec(d.Get("spec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("spec"); !isEmptyValue(reflect.ValueOf(specProp)) && (ok || !reflect.DeepEqual(v, specProp)) { + } else if v, ok := d.GetOkExists("spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(specProp)) && (ok || !reflect.DeepEqual(v, specProp)) { obj["spec"] = specProp } useExplicitDryRunSpecProp, err := expandAccessContextManagerServicePerimeterUseExplicitDryRunSpec(d.Get("use_explicit_dry_run_spec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("use_explicit_dry_run_spec"); !isEmptyValue(reflect.ValueOf(useExplicitDryRunSpecProp)) && (ok || !reflect.DeepEqual(v, useExplicitDryRunSpecProp)) { + } else if v, ok := d.GetOkExists("use_explicit_dry_run_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(useExplicitDryRunSpecProp)) && (ok || !reflect.DeepEqual(v, useExplicitDryRunSpecProp)) { obj["useExplicitDryRunSpec"] = useExplicitDryRunSpecProp } parentProp, err := expandAccessContextManagerServicePerimeterParent(d.Get("parent"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { obj["parent"] = parentProp } nameProp, err := expandAccessContextManagerServicePerimeterName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } @@ -854,14 +861,14 @@ func resourceAccessContextManagerServicePerimeterCreate(d *schema.ResourceData, return err } - lockName, err := replaceVars(d, config, "{{name}}") + lockName, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters") if err != nil { return err } @@ -870,17 +877,25 @@ func resourceAccessContextManagerServicePerimeterCreate(d *schema.ResourceData, billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating ServicePerimeter: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -904,7 +919,7 @@ func resourceAccessContextManagerServicePerimeterCreate(d *schema.ResourceData, } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -916,13 +931,13 @@ func resourceAccessContextManagerServicePerimeterCreate(d *schema.ResourceData, } func resourceAccessContextManagerServicePerimeterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") if err != nil { return err } @@ -930,13 +945,19 @@ func resourceAccessContextManagerServicePerimeterRead(d *schema.ResourceData, me billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeter %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeter %q", d.Id())) } if err := d.Set("title", flattenAccessContextManagerServicePerimeterTitle(res["title"], d, config)); err != nil { @@ -971,8 +992,8 @@ func resourceAccessContextManagerServicePerimeterRead(d *schema.ResourceData, me } func resourceAccessContextManagerServicePerimeterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -983,31 +1004,31 @@ func resourceAccessContextManagerServicePerimeterUpdate(d *schema.ResourceData, titleProp, err := expandAccessContextManagerServicePerimeterTitle(d.Get("title"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, titleProp)) { + } else if v, ok := d.GetOkExists("title"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, titleProp)) { obj["title"] = titleProp } descriptionProp, err := expandAccessContextManagerServicePerimeterDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } statusProp, err := expandAccessContextManagerServicePerimeterStatus(d.Get("status"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("status"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, statusProp)) { + } else if v, ok := d.GetOkExists("status"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, statusProp)) { obj["status"] = statusProp } specProp, err := expandAccessContextManagerServicePerimeterSpec(d.Get("spec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("spec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, specProp)) { + } else if v, ok := d.GetOkExists("spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, specProp)) { obj["spec"] = specProp } useExplicitDryRunSpecProp, err := expandAccessContextManagerServicePerimeterUseExplicitDryRunSpec(d.Get("use_explicit_dry_run_spec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("use_explicit_dry_run_spec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, useExplicitDryRunSpecProp)) { + } else if v, ok := d.GetOkExists("use_explicit_dry_run_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, useExplicitDryRunSpecProp)) { obj["useExplicitDryRunSpec"] = useExplicitDryRunSpecProp } @@ -1016,14 +1037,14 @@ func resourceAccessContextManagerServicePerimeterUpdate(d *schema.ResourceData, return err } - lockName, err := replaceVars(d, config, "{{name}}") + lockName, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") if err != nil { return err } @@ -1050,19 +1071,27 @@ func resourceAccessContextManagerServicePerimeterUpdate(d *schema.ResourceData, if d.HasChange("use_explicit_dry_run_spec") { updateMask = append(updateMask, "useExplicitDryRunSpec") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating ServicePerimeter %q: %s", d.Id(), err) @@ -1082,22 +1111,22 @@ func resourceAccessContextManagerServicePerimeterUpdate(d *schema.ResourceData, } func resourceAccessContextManagerServicePerimeterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - lockName, err := replaceVars(d, config, "{{name}}") + lockName, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") if err != nil { return err } @@ -1106,13 +1135,21 @@ func resourceAccessContextManagerServicePerimeterDelete(d *schema.ResourceData, log.Printf("[DEBUG] Deleting ServicePerimeter %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "ServicePerimeter") + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeter") } err = AccessContextManagerOperationWaitTime( @@ -1128,10 +1165,10 @@ func resourceAccessContextManagerServicePerimeterDelete(d *schema.ResourceData, } func resourceAccessContextManagerServicePerimeterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { return nil, err } stringParts := strings.Split(d.Get("name").(string), "/") @@ -1144,31 +1181,31 @@ func resourceAccessContextManagerServicePerimeterImport(d *schema.ResourceData, return []*schema.ResourceData{d}, nil } -func flattenAccessContextManagerServicePerimeterTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterPerimeterType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenAccessContextManagerServicePerimeterPerimeterType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "PERIMETER_TYPE_REGULAR" } return v } -func flattenAccessContextManagerServicePerimeterStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1184,29 +1221,29 @@ func flattenAccessContextManagerServicePerimeterStatus(v interface{}, d *schema. transformed["restricted_services"] = flattenAccessContextManagerServicePerimeterStatusRestrictedServices(original["restrictedServices"], d, config) transformed["vpc_accessible_services"] = - flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServices(original["vpcAccessibleServices"], d, config) + flattenAccessContextManagerServicePerimeterStatusVpcAccessibleServices(original["vpcAccessibleServices"], d, config) transformed["ingress_policies"] = flattenAccessContextManagerServicePerimeterStatusIngressPolicies(original["ingressPolicies"], d, config) transformed["egress_policies"] = flattenAccessContextManagerServicePerimeterStatusEgressPolicies(original["egressPolicies"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterStatusResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusRestrictedServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusRestrictedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusVpcAccessibleServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1216,23 +1253,23 @@ func flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServices(v in } transformed := make(map[string]interface{}) transformed["enable_restriction"] = - flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) + flattenAccessContextManagerServicePerimeterStatusVpcAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) transformed["allowed_services"] = - flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServicesAllowedServices(original["allowedServices"], d, config) + flattenAccessContextManagerServicePerimeterStatusVpcAccessibleServicesAllowedServices(original["allowedServices"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusVpcAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusVpcAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenAccessContextManagerServicePerimeterStatusIngressPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1251,7 +1288,7 @@ func flattenAccessContextManagerServicePerimeterStatusIngressPolicies(v interfac } return transformed } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1268,15 +1305,15 @@ func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(original["sources"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1295,15 +1332,15 @@ func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom } return transformed } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1318,11 +1355,11 @@ func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(v flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(original["operations"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1341,11 +1378,11 @@ func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOp } return transformed } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1364,15 +1401,15 @@ func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOp } return transformed } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusEgressPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1391,7 +1428,7 @@ func flattenAccessContextManagerServicePerimeterStatusEgressPolicies(v interface } return transformed } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1406,15 +1443,15 @@ func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(v flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1431,15 +1468,15 @@ func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(v i flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(original["operations"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1458,11 +1495,11 @@ func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOper } return transformed } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1481,15 +1518,15 @@ func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOper } return transformed } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1505,26 +1542,26 @@ func flattenAccessContextManagerServicePerimeterSpec(v interface{}, d *schema.Re transformed["restricted_services"] = flattenAccessContextManagerServicePerimeterSpecRestrictedServices(original["restrictedServices"], d, config) transformed["vpc_accessible_services"] = - flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServices(original["vpcAccessibleServices"], d, config) + flattenAccessContextManagerServicePerimeterSpecVpcAccessibleServices(original["vpcAccessibleServices"], d, config) transformed["ingress_policies"] = flattenAccessContextManagerServicePerimeterSpecIngressPolicies(original["ingressPolicies"], d, config) transformed["egress_policies"] = flattenAccessContextManagerServicePerimeterSpecEgressPolicies(original["egressPolicies"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterSpecResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecRestrictedServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecRestrictedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecVpcAccessibleServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1534,20 +1571,20 @@ func flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServices(v inte } transformed := make(map[string]interface{}) transformed["enable_restriction"] = - flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) + flattenAccessContextManagerServicePerimeterSpecVpcAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) transformed["allowed_services"] = - flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServicesAllowedServices(original["allowedServices"], d, config) + flattenAccessContextManagerServicePerimeterSpecVpcAccessibleServicesAllowedServices(original["allowedServices"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecVpcAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecVpcAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1566,7 +1603,7 @@ func flattenAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{ } return transformed } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1583,15 +1620,15 @@ func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(v flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(original["sources"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1610,15 +1647,15 @@ func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSo } return transformed } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1633,11 +1670,11 @@ func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(v i flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(original["operations"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1656,11 +1693,11 @@ func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOper } return transformed } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1679,15 +1716,15 @@ func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOper } return transformed } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1706,7 +1743,7 @@ func flattenAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{} } return transformed } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1721,15 +1758,15 @@ func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(v i flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1746,15 +1783,15 @@ func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(v int flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(original["operations"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1773,11 +1810,11 @@ func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperat } return transformed } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1796,35 +1833,35 @@ func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperat } return transformed } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterUseExplicitDryRunSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterUseExplicitDryRunSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimeterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimeterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandAccessContextManagerServicePerimeterTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterPerimeterType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterPerimeterType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1836,62 +1873,62 @@ func expandAccessContextManagerServicePerimeterStatus(v interface{}, d Terraform transformedResources, err := expandAccessContextManagerServicePerimeterStatusResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedAccessLevels, err := expandAccessContextManagerServicePerimeterStatusAccessLevels(original["access_levels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessLevels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessLevels"] = transformedAccessLevels } transformedRestrictedServices, err := expandAccessContextManagerServicePerimeterStatusRestrictedServices(original["restricted_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["restrictedServices"] = transformedRestrictedServices } - transformedVPCAccessibleServices, err := expandAccessContextManagerServicePerimeterStatusVPCAccessibleServices(original["vpc_accessible_services"], d, config) + transformedVpcAccessibleServices, err := expandAccessContextManagerServicePerimeterStatusVpcAccessibleServices(original["vpc_accessible_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVPCAccessibleServices); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccessibleServices"] = transformedVPCAccessibleServices + } else if val := reflect.ValueOf(transformedVpcAccessibleServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcAccessibleServices"] = transformedVpcAccessibleServices } transformedIngressPolicies, err := expandAccessContextManagerServicePerimeterStatusIngressPolicies(original["ingress_policies"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressPolicies"] = transformedIngressPolicies } transformedEgressPolicies, err := expandAccessContextManagerServicePerimeterStatusEgressPolicies(original["egress_policies"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressPolicies"] = transformedEgressPolicies } return transformed, nil } -func expandAccessContextManagerServicePerimeterStatusResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusRestrictedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusRestrictedServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandAccessContextManagerServicePerimeterStatusVPCAccessibleServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusVpcAccessibleServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1900,33 +1937,33 @@ func expandAccessContextManagerServicePerimeterStatusVPCAccessibleServices(v int original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedEnableRestriction, err := expandAccessContextManagerServicePerimeterStatusVPCAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) + transformedEnableRestriction, err := expandAccessContextManagerServicePerimeterStatusVpcAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableRestriction"] = transformedEnableRestriction } - transformedAllowedServices, err := expandAccessContextManagerServicePerimeterStatusVPCAccessibleServicesAllowedServices(original["allowed_services"], d, config) + transformedAllowedServices, err := expandAccessContextManagerServicePerimeterStatusVpcAccessibleServicesAllowedServices(original["allowed_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedServices"] = transformedAllowedServices } return transformed, nil } -func expandAccessContextManagerServicePerimeterStatusVPCAccessibleServicesEnableRestriction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusVpcAccessibleServicesEnableRestriction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusVPCAccessibleServicesAllowedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusVpcAccessibleServicesAllowedServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1939,14 +1976,14 @@ func expandAccessContextManagerServicePerimeterStatusIngressPolicies(v interface transformedIngressFrom, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(original["ingress_from"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressFrom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressFrom"] = transformedIngressFrom } transformedIngressTo, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(original["ingress_to"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressTo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressTo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressTo"] = transformedIngressTo } @@ -1955,7 +1992,7 @@ func expandAccessContextManagerServicePerimeterStatusIngressPolicies(v interface return req, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1967,36 +2004,36 @@ func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom( transformedIdentityType, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(original["identity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityType"] = transformedIdentityType } transformedIdentities, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(original["identities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identities"] = transformedIdentities } transformedSources, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(original["sources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sources"] = transformedSources } return transformed, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2009,14 +2046,14 @@ func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromS transformedAccessLevel, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(original["access_level"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessLevel"] = transformedAccessLevel } transformedResource, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(original["resource"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resource"] = transformedResource } @@ -2025,15 +2062,15 @@ func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromS return req, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2045,25 +2082,25 @@ func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(v transformedResources, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedOperations, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(original["operations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operations"] = transformedOperations } return transformed, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2076,14 +2113,14 @@ func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOpe transformedServiceName, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(original["service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceName"] = transformedServiceName } transformedMethodSelectors, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(original["method_selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["methodSelectors"] = transformedMethodSelectors } @@ -2092,11 +2129,11 @@ func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOpe return req, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2109,14 +2146,14 @@ func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOpe transformedMethod, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedPermission, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["permission"] = transformedPermission } @@ -2125,15 +2162,15 @@ func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOpe return req, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2146,14 +2183,14 @@ func expandAccessContextManagerServicePerimeterStatusEgressPolicies(v interface{ transformedEgressFrom, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(original["egress_from"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressFrom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressFrom"] = transformedEgressFrom } transformedEgressTo, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(original["egress_to"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressTo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressTo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressTo"] = transformedEgressTo } @@ -2162,7 +2199,7 @@ func expandAccessContextManagerServicePerimeterStatusEgressPolicies(v interface{ return req, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2174,29 +2211,29 @@ func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(v transformedIdentityType, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(original["identity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityType"] = transformedIdentityType } transformedIdentities, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identities"] = transformedIdentities } return transformed, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2208,36 +2245,36 @@ func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(v in transformedResources, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedExternalResources, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToExternalResources(original["external_resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["externalResources"] = transformedExternalResources } transformedOperations, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(original["operations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operations"] = transformedOperations } return transformed, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToExternalResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToExternalResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2250,14 +2287,14 @@ func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOpera transformedServiceName, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(original["service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceName"] = transformedServiceName } transformedMethodSelectors, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(original["method_selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["methodSelectors"] = transformedMethodSelectors } @@ -2266,11 +2303,11 @@ func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOpera return req, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2283,14 +2320,14 @@ func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOpera transformedMethod, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedPermission, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["permission"] = transformedPermission } @@ -2299,15 +2336,15 @@ func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOpera return req, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2319,61 +2356,61 @@ func expandAccessContextManagerServicePerimeterSpec(v interface{}, d TerraformRe transformedResources, err := expandAccessContextManagerServicePerimeterSpecResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedAccessLevels, err := expandAccessContextManagerServicePerimeterSpecAccessLevels(original["access_levels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessLevels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessLevels"] = transformedAccessLevels } transformedRestrictedServices, err := expandAccessContextManagerServicePerimeterSpecRestrictedServices(original["restricted_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["restrictedServices"] = transformedRestrictedServices } - transformedVPCAccessibleServices, err := expandAccessContextManagerServicePerimeterSpecVPCAccessibleServices(original["vpc_accessible_services"], d, config) + transformedVpcAccessibleServices, err := expandAccessContextManagerServicePerimeterSpecVpcAccessibleServices(original["vpc_accessible_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVPCAccessibleServices); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccessibleServices"] = transformedVPCAccessibleServices + } else if val := reflect.ValueOf(transformedVpcAccessibleServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcAccessibleServices"] = transformedVpcAccessibleServices } transformedIngressPolicies, err := expandAccessContextManagerServicePerimeterSpecIngressPolicies(original["ingress_policies"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressPolicies"] = transformedIngressPolicies } transformedEgressPolicies, err := expandAccessContextManagerServicePerimeterSpecEgressPolicies(original["egress_policies"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressPolicies"] = transformedEgressPolicies } return transformed, nil } -func expandAccessContextManagerServicePerimeterSpecResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecRestrictedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecRestrictedServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecVPCAccessibleServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecVpcAccessibleServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2382,32 +2419,32 @@ func expandAccessContextManagerServicePerimeterSpecVPCAccessibleServices(v inter original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedEnableRestriction, err := expandAccessContextManagerServicePerimeterSpecVPCAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) + transformedEnableRestriction, err := expandAccessContextManagerServicePerimeterSpecVpcAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableRestriction"] = transformedEnableRestriction } - transformedAllowedServices, err := expandAccessContextManagerServicePerimeterSpecVPCAccessibleServicesAllowedServices(original["allowed_services"], d, config) + transformedAllowedServices, err := expandAccessContextManagerServicePerimeterSpecVpcAccessibleServicesAllowedServices(original["allowed_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedServices"] = transformedAllowedServices } return transformed, nil } -func expandAccessContextManagerServicePerimeterSpecVPCAccessibleServicesEnableRestriction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecVpcAccessibleServicesEnableRestriction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecVPCAccessibleServicesAllowedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecVpcAccessibleServicesAllowedServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2420,14 +2457,14 @@ func expandAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{} transformedIngressFrom, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(original["ingress_from"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressFrom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressFrom"] = transformedIngressFrom } transformedIngressTo, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(original["ingress_to"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressTo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressTo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressTo"] = transformedIngressTo } @@ -2436,7 +2473,7 @@ func expandAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{} return req, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2448,36 +2485,36 @@ func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(v transformedIdentityType, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(original["identity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityType"] = transformedIdentityType } transformedIdentities, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(original["identities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identities"] = transformedIdentities } transformedSources, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(original["sources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sources"] = transformedSources } return transformed, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2490,14 +2527,14 @@ func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSou transformedAccessLevel, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(original["access_level"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessLevel"] = transformedAccessLevel } transformedResource, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(original["resource"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resource"] = transformedResource } @@ -2506,15 +2543,15 @@ func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSou return req, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2526,25 +2563,25 @@ func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(v in transformedResources, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedOperations, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(original["operations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operations"] = transformedOperations } return transformed, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2557,14 +2594,14 @@ func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOpera transformedServiceName, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(original["service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceName"] = transformedServiceName } transformedMethodSelectors, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(original["method_selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["methodSelectors"] = transformedMethodSelectors } @@ -2573,11 +2610,11 @@ func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOpera return req, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2590,14 +2627,14 @@ func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOpera transformedMethod, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedPermission, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["permission"] = transformedPermission } @@ -2606,15 +2643,15 @@ func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOpera return req, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2627,14 +2664,14 @@ func expandAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{}, transformedEgressFrom, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(original["egress_from"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressFrom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressFrom"] = transformedEgressFrom } transformedEgressTo, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(original["egress_to"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressTo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressTo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressTo"] = transformedEgressTo } @@ -2643,7 +2680,7 @@ func expandAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{}, return req, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2655,29 +2692,29 @@ func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(v in transformedIdentityType, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(original["identity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityType"] = transformedIdentityType } transformedIdentities, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identities"] = transformedIdentities } return transformed, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2689,36 +2726,36 @@ func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(v inte transformedResources, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedExternalResources, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToExternalResources(original["external_resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["externalResources"] = transformedExternalResources } transformedOperations, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(original["operations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operations"] = transformedOperations } return transformed, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToExternalResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToExternalResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2731,14 +2768,14 @@ func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperati transformedServiceName, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(original["service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceName"] = transformedServiceName } transformedMethodSelectors, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(original["method_selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["methodSelectors"] = transformedMethodSelectors } @@ -2747,11 +2784,11 @@ func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperati return req, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2764,14 +2801,14 @@ func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperati transformedMethod, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedPermission, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["permission"] = transformedPermission } @@ -2780,23 +2817,23 @@ func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperati return req, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterUseExplicitDryRunSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterUseExplicitDryRunSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimeterName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimeterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_egress_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_egress_policy.go new file mode 100644 index 0000000000..e05d708d3a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_egress_policy.go @@ -0,0 +1,946 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAccessContextManagerServicePerimeterEgressPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerServicePerimeterEgressPolicyCreate, + Read: resourceAccessContextManagerServicePerimeterEgressPolicyRead, + Update: resourceAccessContextManagerServicePerimeterEgressPolicyUpdate, + Delete: resourceAccessContextManagerServicePerimeterEgressPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessContextManagerServicePerimeterEgressPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "perimeter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Service Perimeter to add this resource to.`, + }, + "egress_from": { + Type: schema.TypeList, + Optional: true, + Description: `Defines conditions on the source of a request causing this 'EgressPolicy' to apply.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identities": { + Type: schema.TypeList, + Optional: true, + Description: `A list of identities that are allowed access through this 'EgressPolicy'. +Should be in the format of email address. The email address should +represent individual user or service account only.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "identity_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access to outside the +perimeter. If left unspecified, then members of 'identities' field will +be allowed access. Possible values: ["ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, + }, + }, + }, + }, + "egress_to": { + Type: schema.TypeList, + Optional: true, + Description: `Defines the conditions on the 'ApiOperation' and destination resources that +cause this 'EgressPolicy' to apply.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "external_resources": { + Type: schema.TypeList, + Optional: true, + Description: `A list of external resources that are allowed to be accessed. A request +matches if it contains an external resource in this list (Example: +s3://bucket/path). Currently '*' is not allowed.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "operations": { + Type: schema.TypeList, + Optional: true, + Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches +if it contains an operation/service in this list.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "method_selectors": { + Type: schema.TypeList, + Optional: true, + Description: `API methods or permissions to allow. Method or permission must belong +to the service specified by 'serviceName' field. A single MethodSelector +entry with '*' specified for the 'method' field will allow all methods +AND permissions for the service specified in 'serviceName'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "method": { + Type: schema.TypeString, + Optional: true, + Description: `Value for 'method' should be a valid method name for the corresponding +'serviceName' in 'ApiOperation'. If '*' used as value for method, +then ALL methods and permissions are allowed.`, + }, + "permission": { + Type: schema.TypeString, + Optional: true, + Description: `Value for permission should be a valid Cloud IAM permission for the +corresponding 'serviceName' in 'ApiOperation'.`, + }, + }, + }, + }, + "service_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName +field set to '*' will allow all methods AND permissions for all services.`, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Optional: true, + Description: `A list of resources, currently only projects in the form +'projects/', that match this to stanza. A request matches +if it contains a resource in this list. If * is specified for resources, +then this 'EgressTo' rule will authorize access to all resources outside +the perimeter.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerServicePerimeterEgressPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + egressFromProp, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(d.Get("egress_from"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("egress_from"); !tpgresource.IsEmptyValue(reflect.ValueOf(egressFromProp)) && (ok || !reflect.DeepEqual(v, egressFromProp)) { + obj["egressFrom"] = egressFromProp + } + egressToProp, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(d.Get("egress_to"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("egress_to"); !tpgresource.IsEmptyValue(reflect.ValueOf(egressToProp)) && (ok || !reflect.DeepEqual(v, egressToProp)) { + obj["egressTo"] = egressToProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServicePerimeterEgressPolicy: %#v", obj) + + obj, err = resourceAccessContextManagerServicePerimeterEgressPolicyPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.egressPolicies"}) + if err != nil { + return err + } + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ServicePerimeterEgressPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating ServicePerimeterEgressPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create ServicePerimeterEgressPolicy: %s", err) + } + + if _, ok := opRes["status"]; ok { + opRes, err = flattenNestedAccessContextManagerServicePerimeterEgressPolicy(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("egress_from", flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(opRes["egressFrom"], d, config)); err != nil { + return err + } + if err := d.Set("egress_to", flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(opRes["egressTo"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ServicePerimeterEgressPolicy %q: %#v", d.Id(), res) + + return resourceAccessContextManagerServicePerimeterEgressPolicyRead(d, meta) +} + +func resourceAccessContextManagerServicePerimeterEgressPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeterEgressPolicy %q", d.Id())) + } + + res, err = flattenNestedAccessContextManagerServicePerimeterEgressPolicy(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing AccessContextManagerServicePerimeterEgressPolicy because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("egress_from", flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(res["egressFrom"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterEgressPolicy: %s", err) + } + if err := d.Set("egress_to", flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(res["egressTo"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterEgressPolicy: %s", err) + } + + return nil +} + +func resourceAccessContextManagerServicePerimeterEgressPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + egressFromProp, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(d.Get("egress_from"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("egress_from"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, egressFromProp)) { + obj["egressFrom"] = egressFromProp + } + egressToProp, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(d.Get("egress_to"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("egress_to"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, egressToProp)) { + obj["egressTo"] = egressToProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ServicePerimeterEgressPolicy %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("egress_from") { + updateMask = append(updateMask, "egressFrom") + } + + if d.HasChange("egress_to") { + updateMask = append(updateMask, "egressTo") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + obj, err = resourceAccessContextManagerServicePerimeterEgressPolicyPatchUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ServicePerimeterEgressPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ServicePerimeterEgressPolicy %q: %#v", d.Id(), res) + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Updating ServicePerimeterEgressPolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAccessContextManagerServicePerimeterEgressPolicyRead(d, meta) +} + +func resourceAccessContextManagerServicePerimeterEgressPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceAccessContextManagerServicePerimeterEgressPolicyPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterEgressPolicy") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.egressPolicies"}) + if err != nil { + return err + } + log.Printf("[DEBUG] Deleting ServicePerimeterEgressPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterEgressPolicy") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting ServicePerimeterEgressPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ServicePerimeterEgressPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceAccessContextManagerServicePerimeterEgressPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return nil, err + } + + if err := d.Set("perimeter", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { + return nil, fmt.Errorf("Error setting perimeter: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFromIdentities(original["identities"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToResources(original["resources"], d, config) + transformed["external_resources"] = + flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToExternalResources(original["externalResources"], d, config) + transformed["operations"] = + flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdentityType, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressFromIdentityType(original["identity_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identityType"] = transformedIdentityType + } + + transformedIdentities, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressFromIdentities(original["identities"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identities"] = transformedIdentities + } + + return transformed, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResources, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToResources(original["resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resources"] = transformedResources + } + + transformedExternalResources, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToExternalResources(original["external_resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["externalResources"] = transformedExternalResources + } + + transformedOperations, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperations(original["operations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["operations"] = transformedOperations + } + + return transformed, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToExternalResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceName, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsServiceName(original["service_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceName"] = transformedServiceName + } + + transformedMethodSelectors, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectors(original["method_selectors"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["methodSelectors"] = transformedMethodSelectors + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMethod, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectorsMethod(original["method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["method"] = transformedMethod + } + + transformedPermission, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["permission"] = transformedPermission + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedAccessContextManagerServicePerimeterEgressPolicy(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["status"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["egressPolicies"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value status.egressPolicies. Actual value: %v", v) + } + + _, item, err := resourceAccessContextManagerServicePerimeterEgressPolicyFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceAccessContextManagerServicePerimeterEgressPolicyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedEgressFrom, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(d.Get("egress_from"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedEgressFrom := flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(expectedEgressFrom, d, meta.(*transport_tpg.Config)) + expectedEgressTo, err := expandNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(d.Get("egress_to"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedEgressTo := flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(expectedEgressTo, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemEgressFrom := flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressFrom(item["egressFrom"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemEgressFrom)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedEgressFrom))) && !reflect.DeepEqual(itemEgressFrom, expectedFlattenedEgressFrom) { + log.Printf("[DEBUG] Skipping item with egressFrom= %#v, looking for %#v)", itemEgressFrom, expectedFlattenedEgressFrom) + continue + } + itemEgressTo := flattenNestedAccessContextManagerServicePerimeterEgressPolicyEgressTo(item["egressTo"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemEgressTo)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedEgressTo))) && !reflect.DeepEqual(itemEgressTo, expectedFlattenedEgressTo) { + log.Printf("[DEBUG] Skipping item with egressTo= %#v, looking for %#v)", itemEgressTo, expectedFlattenedEgressTo) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceAccessContextManagerServicePerimeterEgressPolicyPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterEgressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceAccessContextManagerServicePerimeterEgressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create ServicePerimeterEgressPolicy, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "egressPolicies": append(currItems, obj), + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// PatchUpdateEncoder handles creating request data to PATCH parent resource +// with list including updated object. +func resourceAccessContextManagerServicePerimeterEgressPolicyPatchUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + items, err := resourceAccessContextManagerServicePerimeterEgressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerServicePerimeterEgressPolicyFindNestedObjectInList(d, meta, items) + if err != nil { + return nil, err + } + + // Return error if item to update does not exist. + if item == nil { + return nil, fmt.Errorf("Unable to update ServicePerimeterEgressPolicy %q - not found in list", d.Id()) + } + + // Merge new object into old. + for k, v := range obj { + item[k] = v + } + items[idx] = item + + // Return list with new item added + res := map[string]interface{}{ + "egressPolicies": items, + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceAccessContextManagerServicePerimeterEgressPolicyPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterEgressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerServicePerimeterEgressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "AccessContextManagerServicePerimeterEgressPolicy") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "egressPolicies": updatedItems, + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceAccessContextManagerServicePerimeterEgressPolicyListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + if v, ok = res["status"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } + + v, ok = res["egressPolicies"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "egressPolicies"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_ingress_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_ingress_policy.go new file mode 100644 index 0000000000..e7e1fe0605 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_ingress_policy.go @@ -0,0 +1,1027 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAccessContextManagerServicePerimeterIngressPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerServicePerimeterIngressPolicyCreate, + Read: resourceAccessContextManagerServicePerimeterIngressPolicyRead, + Update: resourceAccessContextManagerServicePerimeterIngressPolicyUpdate, + Delete: resourceAccessContextManagerServicePerimeterIngressPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessContextManagerServicePerimeterIngressPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "perimeter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Service Perimeter to add this resource to.`, + }, + "ingress_from": { + Type: schema.TypeList, + Optional: true, + Description: `Defines the conditions on the source of a request causing this 'IngressPolicy' +to apply.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identities": { + Type: schema.TypeList, + Optional: true, + Description: `A list of identities that are allowed access through this ingress policy. +Should be in the format of email address. The email address should represent +individual user or service account only.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "identity_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access from outside the +perimeter. If left unspecified, then members of 'identities' field will be +allowed access. Possible values: ["ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, + }, + "sources": { + Type: schema.TypeList, + Optional: true, + Description: `Sources that this 'IngressPolicy' authorizes access from.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_level": { + Type: schema.TypeString, + Optional: true, + Description: `An 'AccessLevel' resource name that allow resources within the +'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed +must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent +'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, +resources within the perimeter can only be accessed via Google Cloud calls +with request origins within the perimeter. +Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' +If * is specified, then all IngressSources will be allowed.`, + }, + "resource": { + Type: schema.TypeString, + Optional: true, + Description: `A Google Cloud resource that is allowed to ingress the perimeter. +Requests from these resources will be allowed to access perimeter data. +Currently only projects are allowed. Format 'projects/{project_number}' +The project may be in any Google Cloud organization, not just the +organization that the perimeter is defined in. '*' is not allowed, the case +of allowing all Google Cloud resources only is not supported.`, + }, + }, + }, + }, + }, + }, + }, + "ingress_to": { + Type: schema.TypeList, + Optional: true, + Description: `Defines the conditions on the 'ApiOperation' and request destination that cause +this 'IngressPolicy' to apply.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "operations": { + Type: schema.TypeList, + Optional: true, + Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' +are allowed to perform in this 'ServicePerimeter'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "method_selectors": { + Type: schema.TypeList, + Optional: true, + Description: `API methods or permissions to allow. Method or permission must belong to +the service specified by serviceName field. A single 'MethodSelector' entry +with '*' specified for the method field will allow all methods AND +permissions for the service specified in 'serviceName'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "method": { + Type: schema.TypeString, + Optional: true, + Description: `Value for method should be a valid method name for the corresponding +serviceName in 'ApiOperation'. If '*' used as value for 'method', then +ALL methods and permissions are allowed.`, + }, + "permission": { + Type: schema.TypeString, + Optional: true, + Description: `Value for permission should be a valid Cloud IAM permission for the +corresponding 'serviceName' in 'ApiOperation'.`, + }, + }, + }, + }, + "service_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' +field set to '*' will allow all methods AND permissions for all services.`, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Optional: true, + Description: `A list of resources, currently only projects in the form +'projects/', protected by this 'ServicePerimeter' +that are allowed to be accessed by sources defined in the +corresponding 'IngressFrom'. A request matches if it contains +a resource in this list. If '*' is specified for resources, +then this 'IngressTo' rule will authorize access to all +resources inside the perimeter, provided that the request +also matches the 'operations' field.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerServicePerimeterIngressPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + ingressFromProp, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(d.Get("ingress_from"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ingress_from"); !tpgresource.IsEmptyValue(reflect.ValueOf(ingressFromProp)) && (ok || !reflect.DeepEqual(v, ingressFromProp)) { + obj["ingressFrom"] = ingressFromProp + } + ingressToProp, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(d.Get("ingress_to"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ingress_to"); !tpgresource.IsEmptyValue(reflect.ValueOf(ingressToProp)) && (ok || !reflect.DeepEqual(v, ingressToProp)) { + obj["ingressTo"] = ingressToProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServicePerimeterIngressPolicy: %#v", obj) + + obj, err = resourceAccessContextManagerServicePerimeterIngressPolicyPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.ingressPolicies"}) + if err != nil { + return err + } + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ServicePerimeterIngressPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating ServicePerimeterIngressPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create ServicePerimeterIngressPolicy: %s", err) + } + + if _, ok := opRes["status"]; ok { + opRes, err = flattenNestedAccessContextManagerServicePerimeterIngressPolicy(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("ingress_from", flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(opRes["ingressFrom"], d, config)); err != nil { + return err + } + if err := d.Set("ingress_to", flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(opRes["ingressTo"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ServicePerimeterIngressPolicy %q: %#v", d.Id(), res) + + return resourceAccessContextManagerServicePerimeterIngressPolicyRead(d, meta) +} + +func resourceAccessContextManagerServicePerimeterIngressPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeterIngressPolicy %q", d.Id())) + } + + res, err = flattenNestedAccessContextManagerServicePerimeterIngressPolicy(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing AccessContextManagerServicePerimeterIngressPolicy because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("ingress_from", flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(res["ingressFrom"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterIngressPolicy: %s", err) + } + if err := d.Set("ingress_to", flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(res["ingressTo"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterIngressPolicy: %s", err) + } + + return nil +} + +func resourceAccessContextManagerServicePerimeterIngressPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + ingressFromProp, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(d.Get("ingress_from"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ingress_from"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ingressFromProp)) { + obj["ingressFrom"] = ingressFromProp + } + ingressToProp, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(d.Get("ingress_to"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ingress_to"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ingressToProp)) { + obj["ingressTo"] = ingressToProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ServicePerimeterIngressPolicy %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("ingress_from") { + updateMask = append(updateMask, "ingressFrom") + } + + if d.HasChange("ingress_to") { + updateMask = append(updateMask, "ingressTo") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + obj, err = resourceAccessContextManagerServicePerimeterIngressPolicyPatchUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ServicePerimeterIngressPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ServicePerimeterIngressPolicy %q: %#v", d.Id(), res) + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Updating ServicePerimeterIngressPolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAccessContextManagerServicePerimeterIngressPolicyRead(d, meta) +} + +func resourceAccessContextManagerServicePerimeterIngressPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceAccessContextManagerServicePerimeterIngressPolicyPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterIngressPolicy") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.ingressPolicies"}) + if err != nil { + return err + } + log.Printf("[DEBUG] Deleting ServicePerimeterIngressPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterIngressPolicy") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting ServicePerimeterIngressPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ServicePerimeterIngressPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceAccessContextManagerServicePerimeterIngressPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return nil, err + } + + if err := d.Set("perimeter", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { + return nil, fmt.Errorf("Error setting perimeter: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSources(original["sources"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSourcesAccessLevel(original["accessLevel"], d, config), + "resource": flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSourcesResource(original["resource"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToResources(original["resources"], d, config) + transformed["operations"] = + flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdentityType, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromIdentityType(original["identity_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identityType"] = transformedIdentityType + } + + transformedIdentities, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromIdentities(original["identities"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identities"] = transformedIdentities + } + + transformedSources, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSources(original["sources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sources"] = transformedSources + } + + return transformed, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAccessLevel, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSourcesAccessLevel(original["access_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessLevel"] = transformedAccessLevel + } + + transformedResource, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSourcesResource(original["resource"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resource"] = transformedResource + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSourcesAccessLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFromSourcesResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResources, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToResources(original["resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resources"] = transformedResources + } + + transformedOperations, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperations(original["operations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["operations"] = transformedOperations + } + + return transformed, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceName, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsServiceName(original["service_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceName"] = transformedServiceName + } + + transformedMethodSelectors, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectors(original["method_selectors"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["methodSelectors"] = transformedMethodSelectors + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMethod, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectorsMethod(original["method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["method"] = transformedMethod + } + + transformedPermission, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["permission"] = transformedPermission + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedAccessContextManagerServicePerimeterIngressPolicy(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["status"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["ingressPolicies"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value status.ingressPolicies. Actual value: %v", v) + } + + _, item, err := resourceAccessContextManagerServicePerimeterIngressPolicyFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceAccessContextManagerServicePerimeterIngressPolicyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedIngressFrom, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(d.Get("ingress_from"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedIngressFrom := flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(expectedIngressFrom, d, meta.(*transport_tpg.Config)) + expectedIngressTo, err := expandNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(d.Get("ingress_to"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedIngressTo := flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(expectedIngressTo, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemIngressFrom := flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressFrom(item["ingressFrom"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemIngressFrom)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedIngressFrom))) && !reflect.DeepEqual(itemIngressFrom, expectedFlattenedIngressFrom) { + log.Printf("[DEBUG] Skipping item with ingressFrom= %#v, looking for %#v)", itemIngressFrom, expectedFlattenedIngressFrom) + continue + } + itemIngressTo := flattenNestedAccessContextManagerServicePerimeterIngressPolicyIngressTo(item["ingressTo"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemIngressTo)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedIngressTo))) && !reflect.DeepEqual(itemIngressTo, expectedFlattenedIngressTo) { + log.Printf("[DEBUG] Skipping item with ingressTo= %#v, looking for %#v)", itemIngressTo, expectedFlattenedIngressTo) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceAccessContextManagerServicePerimeterIngressPolicyPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterIngressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceAccessContextManagerServicePerimeterIngressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create ServicePerimeterIngressPolicy, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "ingressPolicies": append(currItems, obj), + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// PatchUpdateEncoder handles creating request data to PATCH parent resource +// with list including updated object. +func resourceAccessContextManagerServicePerimeterIngressPolicyPatchUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + items, err := resourceAccessContextManagerServicePerimeterIngressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerServicePerimeterIngressPolicyFindNestedObjectInList(d, meta, items) + if err != nil { + return nil, err + } + + // Return error if item to update does not exist. + if item == nil { + return nil, fmt.Errorf("Unable to update ServicePerimeterIngressPolicy %q - not found in list", d.Id()) + } + + // Merge new object into old. + for k, v := range obj { + item[k] = v + } + items[idx] = item + + // Return list with new item added + res := map[string]interface{}{ + "ingressPolicies": items, + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceAccessContextManagerServicePerimeterIngressPolicyPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterIngressPolicyListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerServicePerimeterIngressPolicyFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "AccessContextManagerServicePerimeterIngressPolicy") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "ingressPolicies": updatedItems, + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceAccessContextManagerServicePerimeterIngressPolicyListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter}}") + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + if v, ok = res["status"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } + + v, ok = res["ingressPolicies"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "ingressPolicies"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_resource.go new file mode 100644 index 0000000000..b93a5b6f8f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeter_resource.go @@ -0,0 +1,470 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package accesscontextmanager + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAccessContextManagerServicePerimeterResource() *schema.Resource { + return &schema.Resource{ + Create: resourceAccessContextManagerServicePerimeterResourceCreate, + Read: resourceAccessContextManagerServicePerimeterResourceRead, + Delete: resourceAccessContextManagerServicePerimeterResourceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAccessContextManagerServicePerimeterResourceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "perimeter_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Service Perimeter to add this resource to.`, + }, + "resource": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A GCP resource that is inside of the service perimeter. +Currently only projects are allowed. +Format: projects/{project_number}`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAccessContextManagerServicePerimeterResourceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + resourceProp, err := expandNestedAccessContextManagerServicePerimeterResourceResource(d.Get("resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceProp)) && (ok || !reflect.DeepEqual(v, resourceProp)) { + obj["resource"] = resourceProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter_name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServicePerimeterResource: %#v", obj) + + obj, err = resourceAccessContextManagerServicePerimeterResourcePatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.resources"}) + if err != nil { + return err + } + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ServicePerimeterResource: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{perimeter_name}}/{{resource}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AccessContextManagerOperationWaitTimeWithResponse( + config, res, &opRes, "Creating ServicePerimeterResource", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create ServicePerimeterResource: %s", err) + } + + if _, ok := opRes["status"]; ok { + opRes, err = flattenNestedAccessContextManagerServicePerimeterResource(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("resource", flattenNestedAccessContextManagerServicePerimeterResourceResource(opRes["resource"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{perimeter_name}}/{{resource}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ServicePerimeterResource %q: %#v", d.Id(), res) + + return resourceAccessContextManagerServicePerimeterResourceRead(d, meta) +} + +func resourceAccessContextManagerServicePerimeterResourceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeterResource %q", d.Id())) + } + + res, err = flattenNestedAccessContextManagerServicePerimeterResource(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing AccessContextManagerServicePerimeterResource because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("resource", flattenNestedAccessContextManagerServicePerimeterResourceResource(res["resource"], d, config)); err != nil { + return fmt.Errorf("Error reading ServicePerimeterResource: %s", err) + } + + return nil +} + +func resourceAccessContextManagerServicePerimeterResourceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "{{perimeter_name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceAccessContextManagerServicePerimeterResourcePatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterResource") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "status.resources"}) + if err != nil { + return err + } + log.Printf("[DEBUG] Deleting ServicePerimeterResource %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServicePerimeterResource") + } + + err = AccessContextManagerOperationWaitTime( + config, res, "Deleting ServicePerimeterResource", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ServicePerimeterResource %q: %#v", d.Id(), res) + return nil +} + +func resourceAccessContextManagerServicePerimeterResourceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)/(?P.+)"}, d, config, d.Id()) + if err != nil { + return nil, err + } + + if err := d.Set("perimeter_name", fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { + return nil, fmt.Errorf("Error setting perimeter_name: %s", err) + } + if err := d.Set("resource", parts["resource"]); err != nil { + return nil, fmt.Errorf("Error setting resource: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenNestedAccessContextManagerServicePerimeterResourceResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedAccessContextManagerServicePerimeterResourceResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedAccessContextManagerServicePerimeterResource(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["status"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["resources"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value status.resources. Actual value: %v", v) + } + + _, item, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedResource, err := expandNestedAccessContextManagerServicePerimeterResourceResource(d.Get("resource"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedResource := flattenNestedAccessContextManagerServicePerimeterResourceResource(expectedResource, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + // List response only contains the ID - construct a response object. + item := map[string]interface{}{ + "resource": itemRaw, + } + + itemResource := flattenNestedAccessContextManagerServicePerimeterResourceResource(item["resource"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemResource)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedResource))) && !reflect.DeepEqual(itemResource, expectedFlattenedResource) { + log.Printf("[DEBUG] Skipping item with resource= %#v, looking for %#v)", itemResource, expectedFlattenedResource) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceAccessContextManagerServicePerimeterResourcePatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterResourceListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create ServicePerimeterResource, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "resources": append(currItems, obj["resource"]), + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceAccessContextManagerServicePerimeterResourcePatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceAccessContextManagerServicePerimeterResourceListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "AccessContextManagerServicePerimeterResource") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "resources": updatedItems, + } + wrapped := map[string]interface{}{ + "status": res, + } + res = wrapped + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceAccessContextManagerServicePerimeterResourceListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + if v, ok = res["status"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } + + v, ok = res["resources"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "resources"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeters.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeters.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeters.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeters.go index c1951711a9..6daad1c47b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_access_context_manager_service_perimeters.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager/resource_access_context_manager_service_perimeters.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package accesscontextmanager import ( "fmt" @@ -21,6 +24,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceAccessContextManagerServicePerimeters() *schema.Resource { @@ -86,7 +93,7 @@ behavior.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"PERIMETER_TYPE_REGULAR", "PERIMETER_TYPE_BRIDGE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"PERIMETER_TYPE_REGULAR", "PERIMETER_TYPE_BRIDGE", ""}), Description: `Specifies the type of the Perimeter. There are two types: regular and bridge. Regular Service Perimeter contains resources, access levels, and restricted services. Every resource can be in at most @@ -135,9 +142,9 @@ Format: accessPolicies/{policy_id}/accessLevels/{access_level_name}`, "egress_policies": { Type: schema.TypeList, Optional: true, - Description: `List of EgressPolicies to apply to the perimeter. A perimeter may + Description: `List of EgressPolicies to apply to the perimeter. A perimeter may have multiple EgressPolicies, each of which is evaluated separately. -Access is granted if any EgressPolicy grants it. Must be empty for +Access is granted if any EgressPolicy grants it. Must be empty for a perimeter bridge.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -151,8 +158,8 @@ a perimeter bridge.`, "identities": { Type: schema.TypeList, Optional: true, - Description: `A list of identities that are allowed access through this 'EgressPolicy'. -Should be in the format of email address. The email address should + Description: `A list of identities that are allowed access through this 'EgressPolicy'. +Should be in the format of email address. The email address should represent individual user or service account only.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -161,9 +168,9 @@ represent individual user or service account only.`, "identity_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), - Description: `Specifies the type of identities that are allowed access to outside the -perimeter. If left unspecified, then members of 'identities' field will + ValidateFunc: verify.ValidateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access to outside the +perimeter. If left unspecified, then members of 'identities' field will be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, }, }, @@ -172,7 +179,7 @@ be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY" "egress_to": { Type: schema.TypeList, Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and destination resources that + Description: `Defines the conditions on the 'ApiOperation' and destination resources that cause this 'EgressPolicy' to apply.`, MaxItems: 1, Elem: &schema.Resource{ @@ -190,30 +197,30 @@ s3://bucket/path). Currently '*' is not allowed.`, "operations": { Type: schema.TypeList, Optional: true, - Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches + Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches if it contains an operation/service in this list.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method_selectors": { Type: schema.TypeList, Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong -to the service specified by 'serviceName' field. A single MethodSelector -entry with '*' specified for the 'method' field will allow all methods + Description: `API methods or permissions to allow. Method or permission must belong +to the service specified by 'serviceName' field. A single MethodSelector +entry with '*' specified for the 'method' field will allow all methods AND permissions for the service specified in 'serviceName'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method": { Type: schema.TypeString, Optional: true, - Description: `Value for 'method' should be a valid method name for the corresponding -'serviceName' in 'ApiOperation'. If '*' used as value for method, + Description: `Value for 'method' should be a valid method name for the corresponding +'serviceName' in 'ApiOperation'. If '*' used as value for method, then ALL methods and permissions are allowed.`, }, "permission": { Type: schema.TypeString, Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the + Description: `Value for permission should be a valid Cloud IAM permission for the corresponding 'serviceName' in 'ApiOperation'.`, }, }, @@ -222,8 +229,8 @@ corresponding 'serviceName' in 'ApiOperation'.`, "service_name": { Type: schema.TypeString, Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName field set to '*' will allow all methods AND permissions for all services.`, }, }, @@ -232,10 +239,10 @@ field set to '*' will allow all methods AND permissions for all services.`, "resources": { Type: schema.TypeList, Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', that match this to stanza. A request matches -if it contains a resource in this list. If * is specified for resources, -then this 'EgressTo' rule will authorize access to all resources outside + Description: `A list of resources, currently only projects in the form +'projects/', that match this to stanza. A request matches +if it contains a resource in this list. If * is specified for resources, +then this 'EgressTo' rule will authorize access to all resources outside the perimeter.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -268,7 +275,7 @@ to apply.`, Type: schema.TypeList, Optional: true, Description: `A list of identities that are allowed access through this ingress policy. -Should be in the format of email address. The email address should represent +Should be in the format of email address. The email address should represent individual user or service account only.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -277,9 +284,9 @@ individual user or service account only.`, "identity_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), - Description: `Specifies the type of identities that are allowed access from outside the -perimeter. If left unspecified, then members of 'identities' field will be + ValidateFunc: verify.ValidateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access from outside the +perimeter. If left unspecified, then members of 'identities' field will be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, }, "sources": { @@ -291,23 +298,23 @@ allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", " "access_level": { Type: schema.TypeString, Optional: true, - Description: `An 'AccessLevel' resource name that allow resources within the -'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed + Description: `An 'AccessLevel' resource name that allow resources within the +'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent -'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, -resources within the perimeter can only be accessed via Google Cloud calls -with request origins within the perimeter. -Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' +'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, +resources within the perimeter can only be accessed via Google Cloud calls +with request origins within the perimeter. +Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' If * is specified, then all IngressSources will be allowed.`, }, "resource": { Type: schema.TypeString, Optional: true, - Description: `A Google Cloud resource that is allowed to ingress the perimeter. -Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' -The project may be in any Google Cloud organization, not just the -organization that the perimeter is defined in. '*' is not allowed, the case + Description: `A Google Cloud resource that is allowed to ingress the perimeter. +Requests from these resources will be allowed to access perimeter data. +Currently only projects are allowed. Format 'projects/{project_number}' +The project may be in any Google Cloud organization, not just the +organization that the perimeter is defined in. '*' is not allowed, the case of allowing all Google Cloud resources only is not supported.`, }, }, @@ -327,30 +334,30 @@ this 'IngressPolicy' to apply.`, "operations": { Type: schema.TypeList, Optional: true, - Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' + Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' are allowed to perform in this 'ServicePerimeter'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method_selectors": { Type: schema.TypeList, Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong to -the service specified by serviceName field. A single 'MethodSelector' entry -with '*' specified for the method field will allow all methods AND + Description: `API methods or permissions to allow. Method or permission must belong to +the service specified by serviceName field. A single 'MethodSelector' entry +with '*' specified for the method field will allow all methods AND permissions for the service specified in 'serviceName'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method": { Type: schema.TypeString, Optional: true, - Description: `Value for method should be a valid method name for the corresponding -serviceName in 'ApiOperation'. If '*' used as value for 'method', then + Description: `Value for method should be a valid method name for the corresponding +serviceName in 'ApiOperation'. If '*' used as value for 'method', then ALL methods and permissions are allowed.`, }, "permission": { Type: schema.TypeString, Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the + Description: `Value for permission should be a valid Cloud IAM permission for the corresponding 'serviceName' in 'ApiOperation'.`, }, }, @@ -359,8 +366,8 @@ corresponding 'serviceName' in 'ApiOperation'.`, "service_name": { Type: schema.TypeString, Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' field set to '*' will allow all methods AND permissions for all services.`, }, }, @@ -369,12 +376,12 @@ field set to '*' will allow all methods AND permissions for all services.`, "resources": { Type: schema.TypeList, Optional: true, - Description: `A list of resources, currently only projects in the form + Description: `A list of resources, currently only projects in the form 'projects/', protected by this 'ServicePerimeter' that are allowed to be accessed by sources defined in the corresponding 'IngressFrom'. A request matches if it contains a resource in this list. If '*' is specified for resources, -then this 'IngressTo' rule will authorize access to all +then this 'IngressTo' rule will authorize access to all resources inside the perimeter, provided that the request also matches the 'operations' field.`, Elem: &schema.Schema{ @@ -467,9 +474,9 @@ Format: accessPolicies/{policy_id}/accessLevels/{access_level_name}`, "egress_policies": { Type: schema.TypeList, Optional: true, - Description: `List of EgressPolicies to apply to the perimeter. A perimeter may + Description: `List of EgressPolicies to apply to the perimeter. A perimeter may have multiple EgressPolicies, each of which is evaluated separately. -Access is granted if any EgressPolicy grants it. Must be empty for +Access is granted if any EgressPolicy grants it. Must be empty for a perimeter bridge.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -483,8 +490,8 @@ a perimeter bridge.`, "identities": { Type: schema.TypeList, Optional: true, - Description: `A list of identities that are allowed access through this 'EgressPolicy'. -Should be in the format of email address. The email address should + Description: `A list of identities that are allowed access through this 'EgressPolicy'. +Should be in the format of email address. The email address should represent individual user or service account only.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -493,9 +500,9 @@ represent individual user or service account only.`, "identity_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), - Description: `Specifies the type of identities that are allowed access to outside the -perimeter. If left unspecified, then members of 'identities' field will + ValidateFunc: verify.ValidateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access to outside the +perimeter. If left unspecified, then members of 'identities' field will be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, }, }, @@ -504,7 +511,7 @@ be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY" "egress_to": { Type: schema.TypeList, Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and destination resources that + Description: `Defines the conditions on the 'ApiOperation' and destination resources that cause this 'EgressPolicy' to apply.`, MaxItems: 1, Elem: &schema.Resource{ @@ -522,30 +529,30 @@ s3://bucket/path). Currently '*' is not allowed.`, "operations": { Type: schema.TypeList, Optional: true, - Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches + Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches if it contains an operation/service in this list.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method_selectors": { Type: schema.TypeList, Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong -to the service specified by 'serviceName' field. A single MethodSelector -entry with '*' specified for the 'method' field will allow all methods + Description: `API methods or permissions to allow. Method or permission must belong +to the service specified by 'serviceName' field. A single MethodSelector +entry with '*' specified for the 'method' field will allow all methods AND permissions for the service specified in 'serviceName'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method": { Type: schema.TypeString, Optional: true, - Description: `Value for 'method' should be a valid method name for the corresponding -'serviceName' in 'ApiOperation'. If '*' used as value for method, + Description: `Value for 'method' should be a valid method name for the corresponding +'serviceName' in 'ApiOperation'. If '*' used as value for method, then ALL methods and permissions are allowed.`, }, "permission": { Type: schema.TypeString, Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the + Description: `Value for permission should be a valid Cloud IAM permission for the corresponding 'serviceName' in 'ApiOperation'.`, }, }, @@ -554,8 +561,8 @@ corresponding 'serviceName' in 'ApiOperation'.`, "service_name": { Type: schema.TypeString, Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName field set to '*' will allow all methods AND permissions for all services.`, }, }, @@ -564,10 +571,10 @@ field set to '*' will allow all methods AND permissions for all services.`, "resources": { Type: schema.TypeList, Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', that match this to stanza. A request matches -if it contains a resource in this list. If * is specified for resources, -then this 'EgressTo' rule will authorize access to all resources outside + Description: `A list of resources, currently only projects in the form +'projects/', that match this to stanza. A request matches +if it contains a resource in this list. If * is specified for resources, +then this 'EgressTo' rule will authorize access to all resources outside the perimeter.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -600,7 +607,7 @@ to apply.`, Type: schema.TypeList, Optional: true, Description: `A list of identities that are allowed access through this ingress policy. -Should be in the format of email address. The email address should represent +Should be in the format of email address. The email address should represent individual user or service account only.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -609,9 +616,9 @@ individual user or service account only.`, "identity_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), - Description: `Specifies the type of identities that are allowed access from outside the -perimeter. If left unspecified, then members of 'identities' field will be + ValidateFunc: verify.ValidateEnum([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}), + Description: `Specifies the type of identities that are allowed access from outside the +perimeter. If left unspecified, then members of 'identities' field will be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, }, "sources": { @@ -623,23 +630,23 @@ allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", " "access_level": { Type: schema.TypeString, Optional: true, - Description: `An 'AccessLevel' resource name that allow resources within the -'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed + Description: `An 'AccessLevel' resource name that allow resources within the +'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent -'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, -resources within the perimeter can only be accessed via Google Cloud calls -with request origins within the perimeter. -Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' +'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, +resources within the perimeter can only be accessed via Google Cloud calls +with request origins within the perimeter. +Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' If * is specified, then all IngressSources will be allowed.`, }, "resource": { Type: schema.TypeString, Optional: true, - Description: `A Google Cloud resource that is allowed to ingress the perimeter. -Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' -The project may be in any Google Cloud organization, not just the -organization that the perimeter is defined in. '*' is not allowed, the case + Description: `A Google Cloud resource that is allowed to ingress the perimeter. +Requests from these resources will be allowed to access perimeter data. +Currently only projects are allowed. Format 'projects/{project_number}' +The project may be in any Google Cloud organization, not just the +organization that the perimeter is defined in. '*' is not allowed, the case of allowing all Google Cloud resources only is not supported.`, }, }, @@ -659,30 +666,30 @@ this 'IngressPolicy' to apply.`, "operations": { Type: schema.TypeList, Optional: true, - Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' + Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' are allowed to perform in this 'ServicePerimeter'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method_selectors": { Type: schema.TypeList, Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong to -the service specified by serviceName field. A single 'MethodSelector' entry -with '*' specified for the method field will allow all methods AND + Description: `API methods or permissions to allow. Method or permission must belong to +the service specified by serviceName field. A single 'MethodSelector' entry +with '*' specified for the method field will allow all methods AND permissions for the service specified in 'serviceName'.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "method": { Type: schema.TypeString, Optional: true, - Description: `Value for method should be a valid method name for the corresponding -serviceName in 'ApiOperation'. If '*' used as value for 'method', then + Description: `Value for method should be a valid method name for the corresponding +serviceName in 'ApiOperation'. If '*' used as value for 'method', then ALL methods and permissions are allowed.`, }, "permission": { Type: schema.TypeString, Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the + Description: `Value for permission should be a valid Cloud IAM permission for the corresponding 'serviceName' in 'ApiOperation'.`, }, }, @@ -691,8 +698,8 @@ corresponding 'serviceName' in 'ApiOperation'.`, "service_name": { Type: schema.TypeString, Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' + Description: `The name of the API whose methods or permissions the 'IngressPolicy' or +'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' field set to '*' will allow all methods AND permissions for all services.`, }, }, @@ -701,12 +708,12 @@ field set to '*' will allow all methods AND permissions for all services.`, "resources": { Type: schema.TypeList, Optional: true, - Description: `A list of resources, currently only projects in the form + Description: `A list of resources, currently only projects in the form 'projects/', protected by this 'ServicePerimeter' that are allowed to be accessed by sources defined in the corresponding 'IngressFrom'. A request matches if it contains a resource in this list. If '*' is specified for resources, -then this 'IngressTo' rule will authorize access to all +then this 'IngressTo' rule will authorize access to all resources inside the perimeter, provided that the request also matches the 'operations' field.`, Elem: &schema.Schema{ @@ -800,8 +807,8 @@ bet set to True if any of the fields in the spec are set to non-default values.` } func resourceAccessContextManagerServicePerimetersCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -810,17 +817,17 @@ func resourceAccessContextManagerServicePerimetersCreate(d *schema.ResourceData, servicePerimetersProp, err := expandAccessContextManagerServicePerimetersServicePerimeters(d.Get("service_perimeters"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("service_perimeters"); !isEmptyValue(reflect.ValueOf(servicePerimetersProp)) && (ok || !reflect.DeepEqual(v, servicePerimetersProp)) { + } else if v, ok := d.GetOkExists("service_perimeters"); !tpgresource.IsEmptyValue(reflect.ValueOf(servicePerimetersProp)) && (ok || !reflect.DeepEqual(v, servicePerimetersProp)) { obj["servicePerimeters"] = servicePerimetersProp } parentProp, err := expandAccessContextManagerServicePerimetersParent(d.Get("parent"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { obj["parent"] = parentProp } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") if err != nil { return err } @@ -829,17 +836,25 @@ func resourceAccessContextManagerServicePerimetersCreate(d *schema.ResourceData, billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating ServicePerimeters: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "{{parent}}/servicePerimeters") + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/servicePerimeters") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -861,13 +876,13 @@ func resourceAccessContextManagerServicePerimetersCreate(d *schema.ResourceData, } func resourceAccessContextManagerServicePerimetersRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters") if err != nil { return err } @@ -875,13 +890,19 @@ func resourceAccessContextManagerServicePerimetersRead(d *schema.ResourceData, m billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeters %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AccessContextManagerServicePerimeters %q", d.Id())) } if err := d.Set("service_perimeters", flattenAccessContextManagerServicePerimetersServicePerimeters(res["servicePerimeters"], d, config)); err != nil { @@ -892,8 +913,8 @@ func resourceAccessContextManagerServicePerimetersRead(d *schema.ResourceData, m } func resourceAccessContextManagerServicePerimetersUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -904,17 +925,17 @@ func resourceAccessContextManagerServicePerimetersUpdate(d *schema.ResourceData, servicePerimetersProp, err := expandAccessContextManagerServicePerimetersServicePerimeters(d.Get("service_perimeters"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("service_perimeters"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, servicePerimetersProp)) { + } else if v, ok := d.GetOkExists("service_perimeters"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, servicePerimetersProp)) { obj["servicePerimeters"] = servicePerimetersProp } parentProp, err := expandAccessContextManagerServicePerimetersParent(d.Get("parent"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parentProp)) { + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parentProp)) { obj["parent"] = parentProp } - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") if err != nil { return err } @@ -922,11 +943,19 @@ func resourceAccessContextManagerServicePerimetersUpdate(d *schema.ResourceData, log.Printf("[DEBUG] Updating ServicePerimeters %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating ServicePerimeters %q: %s", d.Id(), err) @@ -946,8 +975,8 @@ func resourceAccessContextManagerServicePerimetersUpdate(d *schema.ResourceData, } func resourceAccessContextManagerServicePerimetersDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -955,13 +984,20 @@ func resourceAccessContextManagerServicePerimetersDelete(d *schema.ResourceData, obj := make(map[string]interface{}) obj["servicePerimeters"] = []string{} - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") + url, err := tpgresource.ReplaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") if err != nil { return err } log.Printf("[DEBUG] Deleting servicePerimeters %q: %#v", d.Id(), obj) - res, err := SendRequestWithTimeout(config, "POST", "", url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error deleting ServicePerimeters %q: %s", d.Id(), err) @@ -981,10 +1017,10 @@ func resourceAccessContextManagerServicePerimetersDelete(d *schema.ResourceData, } func resourceAccessContextManagerServicePerimetersImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value - parts, err := getImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/(.+)"}, d, config, d.Id()) + parts, err := tpgresource.GetImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/(.+)"}, d, config, d.Id()) if err != nil { return nil, err } @@ -995,7 +1031,7 @@ func resourceAccessContextManagerServicePerimetersImport(d *schema.ResourceData, return []*schema.ResourceData{d}, nil } -func flattenAccessContextManagerServicePerimetersServicePerimeters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimeters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1021,35 +1057,35 @@ func flattenAccessContextManagerServicePerimetersServicePerimeters(v interface{} } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersPerimeterType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenAccessContextManagerServicePerimetersServicePerimetersPerimeterType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "PERIMETER_TYPE_REGULAR" } return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1065,29 +1101,29 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatus(v inter transformed["restricted_services"] = flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(original["restrictedServices"], d, config) transformed["vpc_accessible_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServices(original["vpcAccessibleServices"], d, config) + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServices(original["vpcAccessibleServices"], d, config) transformed["ingress_policies"] = flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(original["ingressPolicies"], d, config) transformed["egress_policies"] = flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(original["egressPolicies"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1097,23 +1133,23 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAcces } transformed := make(map[string]interface{}) transformed["enable_restriction"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) transformed["allowed_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesAllowedServices(original["allowedServices"], d, config) + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesAllowedServices(original["allowedServices"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1132,7 +1168,7 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressP } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1149,15 +1185,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressP flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(original["sources"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1176,15 +1212,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressP } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1199,11 +1235,11 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressP flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(original["operations"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1222,11 +1258,11 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressP } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1245,15 +1281,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressP } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1272,7 +1308,7 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPo } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1287,15 +1323,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPo flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1312,15 +1348,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPo flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(original["operations"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1339,11 +1375,11 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPo } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1362,15 +1398,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPo } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1386,26 +1422,26 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpec(v interfa transformed["restricted_services"] = flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(original["restrictedServices"], d, config) transformed["vpc_accessible_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServices(original["vpcAccessibleServices"], d, config) + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServices(original["vpcAccessibleServices"], d, config) transformed["ingress_policies"] = flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(original["ingressPolicies"], d, config) transformed["egress_policies"] = flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(original["egressPolicies"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1415,20 +1451,20 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessi } transformed := make(map[string]interface{}) transformed["enable_restriction"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) transformed["allowed_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesAllowedServices(original["allowedServices"], d, config) + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesAllowedServices(original["allowedServices"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1447,7 +1483,7 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPol } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1464,15 +1500,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPol flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(original["sources"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1491,15 +1527,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPol } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1514,11 +1550,11 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPol flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(original["operations"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1537,11 +1573,11 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPol } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1560,15 +1596,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPol } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1587,7 +1623,7 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoli } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1602,15 +1638,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoli flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1627,15 +1663,15 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoli flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(original["operations"], d, config) return []interface{}{transformed} } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1654,11 +1690,11 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoli } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1677,19 +1713,19 @@ func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoli } return transformed } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandAccessContextManagerServicePerimetersServicePerimeters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimeters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1703,63 +1739,63 @@ func expandAccessContextManagerServicePerimetersServicePerimeters(v interface{}, transformedName, err := expandAccessContextManagerServicePerimetersServicePerimetersName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedTitle, err := expandAccessContextManagerServicePerimetersServicePerimetersTitle(original["title"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["title"] = transformedTitle } transformedDescription, err := expandAccessContextManagerServicePerimetersServicePerimetersDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedCreateTime, err := expandAccessContextManagerServicePerimetersServicePerimetersCreateTime(original["create_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCreateTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCreateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["createTime"] = transformedCreateTime } transformedUpdateTime, err := expandAccessContextManagerServicePerimetersServicePerimetersUpdateTime(original["update_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUpdateTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUpdateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["updateTime"] = transformedUpdateTime } transformedPerimeterType, err := expandAccessContextManagerServicePerimetersServicePerimetersPerimeterType(original["perimeter_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerimeterType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerimeterType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["perimeterType"] = transformedPerimeterType } transformedStatus, err := expandAccessContextManagerServicePerimetersServicePerimetersStatus(original["status"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStatus); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["status"] = transformedStatus } transformedSpec, err := expandAccessContextManagerServicePerimetersServicePerimetersSpec(original["spec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSpec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["spec"] = transformedSpec } transformedUseExplicitDryRunSpec, err := expandAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(original["use_explicit_dry_run_spec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUseExplicitDryRunSpec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUseExplicitDryRunSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["useExplicitDryRunSpec"] = transformedUseExplicitDryRunSpec } @@ -1768,31 +1804,31 @@ func expandAccessContextManagerServicePerimetersServicePerimeters(v interface{}, return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersCreateTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersCreateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersUpdateTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersUpdateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersPerimeterType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersPerimeterType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1804,62 +1840,62 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatus(v interf transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedAccessLevels, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(original["access_levels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessLevels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessLevels"] = transformedAccessLevels } transformedRestrictedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(original["restricted_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["restrictedServices"] = transformedRestrictedServices } - transformedVPCAccessibleServices, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServices(original["vpc_accessible_services"], d, config) + transformedVpcAccessibleServices, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServices(original["vpc_accessible_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVPCAccessibleServices); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccessibleServices"] = transformedVPCAccessibleServices + } else if val := reflect.ValueOf(transformedVpcAccessibleServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcAccessibleServices"] = transformedVpcAccessibleServices } transformedIngressPolicies, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(original["ingress_policies"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressPolicies"] = transformedIngressPolicies } transformedEgressPolicies, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(original["egress_policies"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressPolicies"] = transformedEgressPolicies } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1868,33 +1904,33 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccess original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedEnableRestriction, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) + transformedEnableRestriction, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableRestriction"] = transformedEnableRestriction } - transformedAllowedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesAllowedServices(original["allowed_services"], d, config) + transformedAllowedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesAllowedServices(original["allowed_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedServices"] = transformedAllowedServices } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesEnableRestriction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesEnableRestriction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesAllowedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesAllowedServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1907,14 +1943,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo transformedIngressFrom, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(original["ingress_from"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressFrom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressFrom"] = transformedIngressFrom } transformedIngressTo, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(original["ingress_to"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressTo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressTo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressTo"] = transformedIngressTo } @@ -1923,7 +1959,7 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1935,36 +1971,36 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo transformedIdentityType, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(original["identity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityType"] = transformedIdentityType } transformedIdentities, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(original["identities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identities"] = transformedIdentities } transformedSources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(original["sources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sources"] = transformedSources } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1977,14 +2013,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo transformedAccessLevel, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(original["access_level"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessLevel"] = transformedAccessLevel } transformedResource, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(original["resource"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resource"] = transformedResource } @@ -1993,15 +2029,15 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2013,25 +2049,25 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedOperations, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(original["operations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operations"] = transformedOperations } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2044,14 +2080,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo transformedServiceName, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(original["service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceName"] = transformedServiceName } transformedMethodSelectors, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(original["method_selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["methodSelectors"] = transformedMethodSelectors } @@ -2060,11 +2096,11 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2077,14 +2113,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo transformedMethod, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedPermission, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["permission"] = transformedPermission } @@ -2093,15 +2129,15 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPo return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2114,14 +2150,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPol transformedEgressFrom, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(original["egress_from"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressFrom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressFrom"] = transformedEgressFrom } transformedEgressTo, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(original["egress_to"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressTo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressTo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressTo"] = transformedEgressTo } @@ -2130,7 +2166,7 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPol return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2142,29 +2178,29 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPol transformedIdentityType, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(original["identity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityType"] = transformedIdentityType } transformedIdentities, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identities"] = transformedIdentities } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2176,36 +2212,36 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPol transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedExternalResources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(original["external_resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["externalResources"] = transformedExternalResources } transformedOperations, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(original["operations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operations"] = transformedOperations } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2218,14 +2254,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPol transformedServiceName, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(original["service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceName"] = transformedServiceName } transformedMethodSelectors, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(original["method_selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["methodSelectors"] = transformedMethodSelectors } @@ -2234,11 +2270,11 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPol return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2251,14 +2287,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPol transformedMethod, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedPermission, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["permission"] = transformedPermission } @@ -2267,15 +2303,15 @@ func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPol return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2287,61 +2323,61 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpec(v interfac transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedAccessLevels, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(original["access_levels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessLevels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessLevels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessLevels"] = transformedAccessLevels } transformedRestrictedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(original["restricted_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["restrictedServices"] = transformedRestrictedServices } - transformedVPCAccessibleServices, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServices(original["vpc_accessible_services"], d, config) + transformedVpcAccessibleServices, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServices(original["vpc_accessible_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVPCAccessibleServices); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccessibleServices"] = transformedVPCAccessibleServices + } else if val := reflect.ValueOf(transformedVpcAccessibleServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcAccessibleServices"] = transformedVpcAccessibleServices } transformedIngressPolicies, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(original["ingress_policies"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressPolicies"] = transformedIngressPolicies } transformedEgressPolicies, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(original["egress_policies"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressPolicies"] = transformedEgressPolicies } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2350,32 +2386,32 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessib original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedEnableRestriction, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) + transformedEnableRestriction, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableRestriction"] = transformedEnableRestriction } - transformedAllowedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesAllowedServices(original["allowed_services"], d, config) + transformedAllowedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesAllowedServices(original["allowed_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedServices"] = transformedAllowedServices } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesEnableRestriction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesEnableRestriction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesAllowedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesAllowedServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2388,14 +2424,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli transformedIngressFrom, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(original["ingress_from"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressFrom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressFrom"] = transformedIngressFrom } transformedIngressTo, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(original["ingress_to"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressTo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressTo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressTo"] = transformedIngressTo } @@ -2404,7 +2440,7 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2416,36 +2452,36 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli transformedIdentityType, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(original["identity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityType"] = transformedIdentityType } transformedIdentities, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(original["identities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identities"] = transformedIdentities } transformedSources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(original["sources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sources"] = transformedSources } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2458,14 +2494,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli transformedAccessLevel, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(original["access_level"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessLevel"] = transformedAccessLevel } transformedResource, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(original["resource"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resource"] = transformedResource } @@ -2474,15 +2510,15 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2494,25 +2530,25 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedOperations, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(original["operations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operations"] = transformedOperations } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2525,14 +2561,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli transformedServiceName, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(original["service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceName"] = transformedServiceName } transformedMethodSelectors, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(original["method_selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["methodSelectors"] = transformedMethodSelectors } @@ -2541,11 +2577,11 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2558,14 +2594,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli transformedMethod, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedPermission, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["permission"] = transformedPermission } @@ -2574,15 +2610,15 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoli return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2595,14 +2631,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolic transformedEgressFrom, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(original["egress_from"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressFrom); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressFrom"] = transformedEgressFrom } transformedEgressTo, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(original["egress_to"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgressTo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgressTo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egressTo"] = transformedEgressTo } @@ -2611,7 +2647,7 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolic return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2623,29 +2659,29 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolic transformedIdentityType, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(original["identity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityType"] = transformedIdentityType } transformedIdentities, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identities"] = transformedIdentities } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2657,36 +2693,36 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolic transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedExternalResources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(original["external_resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExternalResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["externalResources"] = transformedExternalResources } transformedOperations, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(original["operations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operations"] = transformedOperations } return transformed, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2699,14 +2735,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolic transformedServiceName, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(original["service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceName"] = transformedServiceName } transformedMethodSelectors, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(original["method_selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["methodSelectors"] = transformedMethodSelectors } @@ -2715,11 +2751,11 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolic return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2732,14 +2768,14 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolic transformedMethod, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedPermission, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPermission); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["permission"] = transformedPermission } @@ -2748,18 +2784,18 @@ func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolic return req, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAccessContextManagerServicePerimetersParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAccessContextManagerServicePerimetersParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/active_directory_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/active_directory_operation.go new file mode 100644 index 0000000000..1603d1bd41 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/active_directory_operation.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package activedirectory + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type ActiveDirectoryOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *ActiveDirectoryOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.ActiveDirectoryBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) +} + +func createActiveDirectoryWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*ActiveDirectoryOperationWaiter, error) { + w := &ActiveDirectoryOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func ActiveDirectoryOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createActiveDirectoryWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func ActiveDirectoryOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createActiveDirectoryWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain.go new file mode 100644 index 0000000000..b3dc711844 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain.go @@ -0,0 +1,513 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package activedirectory + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceActiveDirectoryDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceActiveDirectoryDomainCreate, + Read: resourceActiveDirectoryDomainRead, + Update: resourceActiveDirectoryDomainUpdate, + Delete: resourceActiveDirectoryDomainDelete, + + Importer: &schema.ResourceImporter{ + State: resourceActiveDirectoryDomainImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "domain_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateADDomainName(), + Description: `The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, +https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains.`, + }, + "locations": { + Type: schema.TypeList, + Required: true, + Description: `Locations where domain needs to be provisioned. [regions][compute/docs/regions-zones/] +e.g. us-west1 or us-east4 Service supports up to 4 locations at once. Each location will use a /26 block.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "reserved_ip_range": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The CIDR range of internal addresses that are reserved for this domain. Reserved networks must be /24 or larger. +Ranges must be unique and non-overlapping with existing subnets in authorizedNetworks`, + }, + "admin": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of delegated administrator account used to perform Active Directory operations. +If not specified, setupadmin will be used.`, + Default: "setupadmin", + }, + "authorized_networks": { + Type: schema.TypeSet, + Optional: true, + Description: `The full names of the Google Compute Engine networks the domain instance is connected to. The domain is only available on networks listed in authorizedNetworks. +If CIDR subnets overlap between networks, domain creation will fail.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels that can contain user-provided metadata`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "fqdn": { + Type: schema.TypeString, + Computed: true, + Description: `The fully-qualified domain name of the exposed domain used by clients to connect to the service. +Similar to what would be chosen for an Active Directory set up on an internal network.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique name of the domain using the format: 'projects/{project}/locations/global/domains/{domainName}'.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceActiveDirectoryDomainCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandActiveDirectoryDomainLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + authorizedNetworksProp, err := expandActiveDirectoryDomainAuthorizedNetworks(d.Get("authorized_networks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorized_networks"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorizedNetworksProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworksProp)) { + obj["authorizedNetworks"] = authorizedNetworksProp + } + reservedIpRangeProp, err := expandActiveDirectoryDomainReservedIpRange(d.Get("reserved_ip_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_ip_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(reservedIpRangeProp)) && (ok || !reflect.DeepEqual(v, reservedIpRangeProp)) { + obj["reservedIpRange"] = reservedIpRangeProp + } + locationsProp, err := expandActiveDirectoryDomainLocations(d.Get("locations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("locations"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationsProp)) && (ok || !reflect.DeepEqual(v, locationsProp)) { + obj["locations"] = locationsProp + } + adminProp, err := expandActiveDirectoryDomainAdmin(d.Get("admin"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admin"); !tpgresource.IsEmptyValue(reflect.ValueOf(adminProp)) && (ok || !reflect.DeepEqual(v, adminProp)) { + obj["admin"] = adminProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains?domainName={{domain_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Domain: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Domain: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return fmt.Errorf("Error creating Domain: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ActiveDirectoryOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Domain", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Domain: %s", err) + } + + if err := d.Set("name", flattenActiveDirectoryDomainName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Domain %q: %#v", d.Id(), res) + + return resourceActiveDirectoryDomainRead(d, meta) +} + +func resourceActiveDirectoryDomainRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Domain: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ActiveDirectoryDomain %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Domain: %s", err) + } + + if err := d.Set("name", flattenActiveDirectoryDomainName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Domain: %s", err) + } + if err := d.Set("labels", flattenActiveDirectoryDomainLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Domain: %s", err) + } + if err := d.Set("authorized_networks", flattenActiveDirectoryDomainAuthorizedNetworks(res["authorizedNetworks"], d, config)); err != nil { + return fmt.Errorf("Error reading Domain: %s", err) + } + if err := d.Set("reserved_ip_range", flattenActiveDirectoryDomainReservedIpRange(res["reservedIpRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Domain: %s", err) + } + if err := d.Set("locations", flattenActiveDirectoryDomainLocations(res["locations"], d, config)); err != nil { + return fmt.Errorf("Error reading Domain: %s", err) + } + if err := d.Set("admin", flattenActiveDirectoryDomainAdmin(res["admin"], d, config)); err != nil { + return fmt.Errorf("Error reading Domain: %s", err) + } + if err := d.Set("fqdn", flattenActiveDirectoryDomainFqdn(res["fqdn"], d, config)); err != nil { + return fmt.Errorf("Error reading Domain: %s", err) + } + + return nil +} + +func resourceActiveDirectoryDomainUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Domain: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandActiveDirectoryDomainLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + authorizedNetworksProp, err := expandActiveDirectoryDomainAuthorizedNetworks(d.Get("authorized_networks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorized_networks"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorizedNetworksProp)) { + obj["authorizedNetworks"] = authorizedNetworksProp + } + locationsProp, err := expandActiveDirectoryDomainLocations(d.Get("locations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("locations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, locationsProp)) { + obj["locations"] = locationsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Domain %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("authorized_networks") { + updateMask = append(updateMask, "authorizedNetworks") + } + + if d.HasChange("locations") { + updateMask = append(updateMask, "locations") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + + if err != nil { + return fmt.Errorf("Error updating Domain %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Domain %q: %#v", d.Id(), res) + } + + err = ActiveDirectoryOperationWaitTime( + config, res, project, "Updating Domain", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceActiveDirectoryDomainRead(d, meta) +} + +func resourceActiveDirectoryDomainDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Domain: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain_name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Domain %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Domain") + } + + err = ActiveDirectoryOperationWaitTime( + config, res, project, "Deleting Domain", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Domain %q: %#v", d.Id(), res) + return nil +} + +func resourceActiveDirectoryDomainImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenActiveDirectoryDomainName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenActiveDirectoryDomainLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenActiveDirectoryDomainAuthorizedNetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenActiveDirectoryDomainReservedIpRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenActiveDirectoryDomainLocations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenActiveDirectoryDomainAdmin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenActiveDirectoryDomainFqdn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandActiveDirectoryDomainLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandActiveDirectoryDomainAuthorizedNetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandActiveDirectoryDomainReservedIpRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandActiveDirectoryDomainLocations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandActiveDirectoryDomainAdmin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_sweeper.go new file mode 100644 index 0000000000..8189a43e18 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package activedirectory + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ActiveDirectoryDomain", testSweepActiveDirectoryDomain) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepActiveDirectoryDomain(region string) error { + resourceName := "ActiveDirectoryDomain" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://managedidentities.googleapis.com/v1/projects/{{project}}/locations/global/domains", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["domains"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://managedidentities.googleapis.com/v1/projects/{{project}}/locations/global/domains/{{domain_name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_trust.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_trust.go new file mode 100644 index 0000000000..125362997a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/activedirectory/resource_active_directory_domain_trust.go @@ -0,0 +1,661 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package activedirectory + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceActiveDirectoryDomainTrust() *schema.Resource { + return &schema.Resource{ + Create: resourceActiveDirectoryDomainTrustCreate, + Read: resourceActiveDirectoryDomainTrustRead, + Update: resourceActiveDirectoryDomainTrustUpdate, + Delete: resourceActiveDirectoryDomainTrustDelete, + + Importer: &schema.ResourceImporter{ + State: resourceActiveDirectoryDomainTrustImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, +https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains.`, + }, + "target_dns_ip_addresses": { + Type: schema.TypeSet, + Required: true, + Description: `The target DNS server IP addresses which can resolve the remote domain involved in the trust.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "target_domain_name": { + Type: schema.TypeString, + Required: true, + Description: `The fully qualified target domain name which will be in trust with the current domain.`, + }, + "trust_direction": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INBOUND", "OUTBOUND", "BIDIRECTIONAL"}), + Description: `The trust direction, which decides if the current domain is trusted, trusting, or both. Possible values: ["INBOUND", "OUTBOUND", "BIDIRECTIONAL"]`, + }, + "trust_handshake_secret": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The trust secret used for the handshake with the target domain. This will not be stored.`, + Sensitive: true, + }, + "trust_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"FOREST", "EXTERNAL"}), + Description: `The type of trust represented by the trust resource. Possible values: ["FOREST", "EXTERNAL"]`, + }, + "selective_authentication": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether the trusted side has forest/domain wide access or selective access to an approved set of resources.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceActiveDirectoryDomainTrustCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_domain_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetDomainNameProp)) && (ok || !reflect.DeepEqual(v, targetDomainNameProp)) { + obj["targetDomainName"] = targetDomainNameProp + } + trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(trustTypeProp)) && (ok || !reflect.DeepEqual(v, trustTypeProp)) { + obj["trustType"] = trustTypeProp + } + trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(trustDirectionProp)) && (ok || !reflect.DeepEqual(v, trustDirectionProp)) { + obj["trustDirection"] = trustDirectionProp + } + selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("selective_authentication"); !tpgresource.IsEmptyValue(reflect.ValueOf(selectiveAuthenticationProp)) && (ok || !reflect.DeepEqual(v, selectiveAuthenticationProp)) { + obj["selectiveAuthentication"] = selectiveAuthenticationProp + } + targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetDnsIpAddressesProp)) && (ok || !reflect.DeepEqual(v, targetDnsIpAddressesProp)) { + obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp + } + trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_handshake_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(trustHandshakeSecretProp)) && (ok || !reflect.DeepEqual(v, trustHandshakeSecretProp)) { + obj["trustHandshakeSecret"] = trustHandshakeSecretProp + } + + obj, err = resourceActiveDirectoryDomainTrustEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:attachTrust") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DomainTrust: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainTrust: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DomainTrust: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ActiveDirectoryOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating DomainTrust", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create DomainTrust: %s", err) + } + + opRes, err = resourceActiveDirectoryDomainTrustDecoder(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error decoding response from operation: %s", err) + } + if opRes == nil { + return fmt.Errorf("Error decoding response from operation, could not find object") + } + + if _, ok := opRes["trusts"]; ok { + opRes, err = flattenNestedActiveDirectoryDomainTrust(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("target_domain_name", flattenNestedActiveDirectoryDomainTrustTargetDomainName(opRes["targetDomainName"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DomainTrust %q: %#v", d.Id(), res) + + return resourceActiveDirectoryDomainTrustRead(d, meta) +} + +func resourceActiveDirectoryDomainTrustRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainTrust: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ActiveDirectoryDomainTrust %q", d.Id())) + } + + res, err = flattenNestedActiveDirectoryDomainTrust(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ActiveDirectoryDomainTrust because it couldn't be matched.") + d.SetId("") + return nil + } + + res, err = resourceActiveDirectoryDomainTrustDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ActiveDirectoryDomainTrust because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DomainTrust: %s", err) + } + + if err := d.Set("target_domain_name", flattenNestedActiveDirectoryDomainTrustTargetDomainName(res["targetDomainName"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainTrust: %s", err) + } + if err := d.Set("trust_type", flattenNestedActiveDirectoryDomainTrustTrustType(res["trustType"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainTrust: %s", err) + } + if err := d.Set("trust_direction", flattenNestedActiveDirectoryDomainTrustTrustDirection(res["trustDirection"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainTrust: %s", err) + } + if err := d.Set("selective_authentication", flattenNestedActiveDirectoryDomainTrustSelectiveAuthentication(res["selectiveAuthentication"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainTrust: %s", err) + } + if err := d.Set("target_dns_ip_addresses", flattenNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(res["targetDnsIpAddresses"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainTrust: %s", err) + } + + return nil +} + +func resourceActiveDirectoryDomainTrustUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainTrust: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_domain_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetDomainNameProp)) { + obj["targetDomainName"] = targetDomainNameProp + } + trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustTypeProp)) { + obj["trustType"] = trustTypeProp + } + trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustDirectionProp)) { + obj["trustDirection"] = trustDirectionProp + } + selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("selective_authentication"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, selectiveAuthenticationProp)) { + obj["selectiveAuthentication"] = selectiveAuthenticationProp + } + targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetDnsIpAddressesProp)) { + obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp + } + trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_handshake_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustHandshakeSecretProp)) { + obj["trustHandshakeSecret"] = trustHandshakeSecretProp + } + + obj, err = resourceActiveDirectoryDomainTrustUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:reconfigureTrust") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DomainTrust %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DomainTrust %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DomainTrust %q: %#v", d.Id(), res) + } + + err = ActiveDirectoryOperationWaitTime( + config, res, project, "Updating DomainTrust", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceActiveDirectoryDomainTrustRead(d, meta) +} + +func resourceActiveDirectoryDomainTrustDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:detachTrust") + if err != nil { + return err + } + + obj := make(map[string]interface{}) + targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_domain_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetDomainNameProp)) { + obj["targetDomainName"] = targetDomainNameProp + } + trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustTypeProp)) { + obj["trustType"] = trustTypeProp + } + trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustDirectionProp)) { + obj["trustDirection"] = trustDirectionProp + } + selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("selective_authentication"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, selectiveAuthenticationProp)) { + obj["selectiveAuthentication"] = selectiveAuthenticationProp + } + targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetDnsIpAddressesProp)) { + obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp + } + trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trust_handshake_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trustHandshakeSecretProp)) { + obj["trustHandshakeSecret"] = trustHandshakeSecretProp + } + + obj, err = resourceActiveDirectoryDomainTrustEncoder(d, meta, obj) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting DomainTrust %q", d.Id()) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DomainTrust") + } + + err = ActiveDirectoryOperationWaitTime( + config, res, project, "Deleting DomainTrust", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting DomainTrust %q: %#v", d.Id(), res) + return nil +} + +func resourceActiveDirectoryDomainTrustImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/domains/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedActiveDirectoryDomainTrustTargetDomainName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedActiveDirectoryDomainTrustTrustType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedActiveDirectoryDomainTrustTrustDirection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedActiveDirectoryDomainTrustSelectiveAuthentication(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func expandNestedActiveDirectoryDomainTrustTargetDomainName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedActiveDirectoryDomainTrustTrustType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedActiveDirectoryDomainTrustTrustDirection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceActiveDirectoryDomainTrustEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + + wrappedReq := map[string]interface{}{ + "trust": obj, + } + return wrappedReq, nil +} + +func resourceActiveDirectoryDomainTrustUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + wrappedReq := map[string]interface{}{ + "targetDomainName": obj["targetDomainName"], + "targetDnsIpAddresses": obj["targetDnsIpAddresses"], + } + return wrappedReq, nil +} + +func flattenNestedActiveDirectoryDomainTrust(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["trusts"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value trusts. Actual value: %v", v) + } + + _, item, err := resourceActiveDirectoryDomainTrustFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceActiveDirectoryDomainTrustFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedTargetDomainName, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedTargetDomainName := flattenNestedActiveDirectoryDomainTrustTargetDomainName(expectedTargetDomainName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + // Decode list item before comparing. + item, err := resourceActiveDirectoryDomainTrustDecoder(d, meta, item) + if err != nil { + return -1, nil, err + } + + itemTargetDomainName := flattenNestedActiveDirectoryDomainTrustTargetDomainName(item["targetDomainName"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemTargetDomainName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedTargetDomainName))) && !reflect.DeepEqual(itemTargetDomainName, expectedFlattenedTargetDomainName) { + log.Printf("[DEBUG] Skipping item with targetDomainName= %#v, looking for %#v)", itemTargetDomainName, expectedFlattenedTargetDomainName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} +func resourceActiveDirectoryDomainTrustDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + v, ok := res["domainTrust"] + if !ok || v == nil { + return res, nil + } + + return v.(map[string]interface{}), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/alloydb_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/alloydb_operation.go new file mode 100644 index 0000000000..997cd598dd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/alloydb_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package alloydb + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type AlloydbOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *AlloydbOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.AlloydbBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createAlloydbWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*AlloydbOperationWaiter, error) { + w := &AlloydbOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func AlloydbOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createAlloydbWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/data_source_alloydb_locations.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/data_source_alloydb_locations.go new file mode 100644 index 0000000000..8351f5bce2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/data_source_alloydb_locations.go @@ -0,0 +1,156 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package alloydb + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceAlloydbLocations() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceAlloydbLocationsRead, + + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: `Project ID of the project.`, + }, + "locations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Resource name for the location, which may vary between implementations. For example: "projects/example-project/locations/us-east1`, + }, + "location_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The canonical id for this location. For example: "us-east1".`, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The friendly name for this location, typically a nearby city name. For example, "Tokyo".`, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "metadata": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Service-specific metadata. For example the available capacity at the given location.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func dataSourceAlloydbLocationsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations") + if err != nil { + return fmt.Errorf("Error setting api endpoint") + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Locations %q", d.Id())) + } + var locations []map[string]interface{} + for { + fetchedLocations := res["locations"].([]interface{}) + for _, loc := range fetchedLocations { + locationDetails := make(map[string]interface{}) + l := loc.(map[string]interface{}) + if l["name"] != nil { + locationDetails["name"] = l["name"].(string) + } + if l["locationId"] != nil { + locationDetails["location_id"] = l["locationId"].(string) + } + if l["displayName"] != nil { + locationDetails["display_id"] = l["displayName"].(string) + } + if l["labels"] != nil { + labels := make(map[string]string) + for k, v := range l["labels"].(map[string]interface{}) { + labels[k] = v.(string) + } + locationDetails["labels"] = labels + } + if l["metadata"] != nil { + metadata := make(map[string]string) + for k, v := range l["metadata"].(map[interface{}]interface{}) { + metadata[k.(string)] = v.(string) + } + locationDetails["metadata"] = metadata + } + locations = append(locations, locationDetails) + } + if res["nextPageToken"] == nil || res["nextPageToken"].(string) == "" { + break + } + url, err = tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations?pageToken="+res["nextPageToken"].(string)) + if err != nil { + return fmt.Errorf("Error setting api endpoint") + } + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Locations %q", d.Id())) + } + } + + if err := d.Set("locations", locations); err != nil { + return fmt.Errorf("Error setting locations: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/locations", project)) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/data_source_alloydb_supported_database_flags.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/data_source_alloydb_supported_database_flags.go new file mode 100644 index 0000000000..3687efd7c7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/data_source_alloydb_supported_database_flags.go @@ -0,0 +1,234 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package alloydb + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceAlloydbSupportedDatabaseFlags() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceAlloydbSupportedDatabaseFlagsRead, + + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: `Project ID of the project.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: `The canonical id for the location. For example: "us-east1".`, + }, + "supported_database_flags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The name of the flag resource, following Google Cloud conventions, e.g.: * projects/{project}/locations/{location}/flags/{flag} This field currently has no semantic meaning.`, + }, + "flag_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The name of the database flag, e.g. "max_allowed_packets". The is a possibly key for the Instance.database_flags map field.`, + }, + "value_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `ValueType describes the semantic type of the value that the flag accepts. The supported values are:- 'VALUE_TYPE_UNSPECIFIED', 'STRING', 'INTEGER', 'FLOAT', 'NONE'.`, + }, + "accepts_multiple_values": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Whether the database flag accepts multiple values. If true, a comma-separated list of stringified values may be specified.`, + }, + "supported_db_versions": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Major database engine versions for which this flag is supported. Supported values are:- 'DATABASE_VERSION_UNSPECIFIED', and 'POSTGRES_14'.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "requires_db_restart": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Whether setting or updating this flag on an Instance requires a database restart. If a flag that requires database restart is set, the backend will automatically restart the database (making sure to satisfy any availability SLO's).`, + }, + "string_restrictions": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Restriction on STRING type value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_values": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `The list of allowed values, if bounded. This field will be empty if there is a unbounded number of allowed values.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "integer_restrictions": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Restriction on INTEGER type value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_value": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The minimum value that can be specified, if applicable.`, + }, + "max_value": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The maximum value that can be specified, if applicable.`, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceAlloydbSupportedDatabaseFlagsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + location := d.Get("location").(string) + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/supportedDatabaseFlags") + if err != nil { + return fmt.Errorf("Error setting api endpoint") + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SupportedDatabaseFlags %q", d.Id())) + } + var supportedDatabaseFlags []map[string]interface{} + for { + result := res["supportedDatabaseFlags"].([]interface{}) + for _, dbFlag := range result { + supportedDatabaseFlag := make(map[string]interface{}) + flag := dbFlag.(map[string]interface{}) + if flag["name"] != nil { + supportedDatabaseFlag["name"] = flag["name"].(string) + } + if flag["flagName"] != nil { + supportedDatabaseFlag["flag_name"] = flag["flagName"].(string) + } + if flag["valueType"] != nil { + supportedDatabaseFlag["value_type"] = flag["valueType"].(string) + } + if flag["acceptsMultipleValues"] != nil { + supportedDatabaseFlag["accepts_multiple_values"] = flag["acceptsMultipleValues"].(bool) + } + if flag["requiresDbRestart"] != nil { + supportedDatabaseFlag["requires_db_restart"] = flag["requiresDbRestart"].(bool) + } + if flag["supportedDbVersions"] != nil { + dbVersions := make([]string, 0, len(flag["supportedDbVersions"].([]interface{}))) + for _, supDbVer := range flag["supportedDbVersions"].([]interface{}) { + dbVersions = append(dbVersions, supDbVer.(string)) + } + supportedDatabaseFlag["supported_db_versions"] = dbVersions + } + + if flag["stringRestrictions"] != nil { + restrictions := make([]map[string][]string, 0, 1) + fetchedAllowedValues := flag["stringRestrictions"].(map[string]interface{})["allowedValues"] + if fetchedAllowedValues != nil { + allowedValues := make([]string, 0, len(fetchedAllowedValues.([]interface{}))) + for _, val := range fetchedAllowedValues.([]interface{}) { + allowedValues = append(allowedValues, val.(string)) + } + stringRestrictions := map[string][]string{ + "allowed_values": allowedValues, + } + restrictions = append(restrictions, stringRestrictions) + supportedDatabaseFlag["string_restrictions"] = restrictions + } + } + if flag["integerRestrictions"] != nil { + restrictions := make([]map[string]string, 0, 1) + minValue := flag["integerRestrictions"].(map[string]interface{})["minValue"].(string) + maxValue := flag["integerRestrictions"].(map[string]interface{})["maxValue"].(string) + integerRestrictions := map[string]string{ + "min_value": minValue, + "max_value": maxValue, + } + restrictions = append(restrictions, integerRestrictions) + supportedDatabaseFlag["integer_restrictions"] = restrictions + } + supportedDatabaseFlags = append(supportedDatabaseFlags, supportedDatabaseFlag) + } + if res["pageToken"] == nil || res["pageToken"].(string) == "" { + break + } + url, err = tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/supportedDatabaseFlags?pageToken="+res["nextPageToken"].(string)) + if err != nil { + return fmt.Errorf("Error setting api endpoint") + } + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SupportedDatabaseFlags %q", d.Id())) + } + } + if err := d.Set("supported_database_flags", supportedDatabaseFlags); err != nil { + return fmt.Errorf("Error setting supported_database_flags: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/locations/%s/supportedDbFlags", project, location)) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_backup.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_backup.go new file mode 100644 index 0000000000..600d9b9d91 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_backup.go @@ -0,0 +1,625 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package alloydb + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAlloydbBackup() *schema.Resource { + return &schema.Resource{ + Create: resourceAlloydbBackupCreate, + Read: resourceAlloydbBackupRead, + Update: resourceAlloydbBackupUpdate, + Delete: resourceAlloydbBackupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAlloydbBackupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backup_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the alloydb backup.`, + }, + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `The full resource name of the backup source cluster (e.g., projects/{project}/locations/{location}/clusters/{clusterId}).`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location where the alloydb backup should reside.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `User-provided description of the backup.`, + }, + "encryption_config": { + Type: schema.TypeList, + Optional: true, + Description: `EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME].`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the alloydb backup.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Backup was created in UTC.`, + }, + "encryption_info": { + Type: schema.TypeList, + Computed: true, + Description: `EncryptionInfo describes the encryption information of a cluster or a backup.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "encryption_type": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Type of encryption.`, + }, + "kms_key_versions": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. Cloud KMS key versions that are being used to protect the database or the backup.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `A hash of the resource.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backupId}`, + }, + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: `If true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the backup.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Backup was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAlloydbBackupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + clusterNameProp, err := expandAlloydbBackupClusterName(d.Get("cluster_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cluster_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(clusterNameProp)) && (ok || !reflect.DeepEqual(v, clusterNameProp)) { + obj["clusterName"] = clusterNameProp + } + labelsProp, err := expandAlloydbBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandAlloydbBackupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + encryptionConfigProp, err := expandAlloydbBackupEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + + obj, err = resourceAlloydbBackupEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups?backupId={{backup_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Backup: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Backup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = AlloydbOperationWaitTime( + config, res, project, "Creating Backup", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Backup: %s", err) + } + + log.Printf("[DEBUG] Finished creating Backup %q: %#v", d.Id(), res) + + return resourceAlloydbBackupRead(d, meta) +} + +func resourceAlloydbBackupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AlloydbBackup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + + if err := d.Set("name", flattenAlloydbBackupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("uid", flattenAlloydbBackupUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("cluster_name", flattenAlloydbBackupClusterName(res["clusterName"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("labels", flattenAlloydbBackupLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("create_time", flattenAlloydbBackupCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("update_time", flattenAlloydbBackupUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("state", flattenAlloydbBackupState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("description", flattenAlloydbBackupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("reconciling", flattenAlloydbBackupReconciling(res["reconciling"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("etag", flattenAlloydbBackupEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("encryption_config", flattenAlloydbBackupEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("encryption_info", flattenAlloydbBackupEncryptionInfo(res["encryptionInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + + return nil +} + +func resourceAlloydbBackupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + encryptionConfigProp, err := expandAlloydbBackupEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + + obj, err = resourceAlloydbBackupEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Backup %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("encryption_config") { + updateMask = append(updateMask, "encryptionConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Backup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Backup %q: %#v", d.Id(), res) + } + + err = AlloydbOperationWaitTime( + config, res, project, "Updating Backup", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAlloydbBackupRead(d, meta) +} + +func resourceAlloydbBackupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Backup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Backup") + } + + err = AlloydbOperationWaitTime( + config, res, project, "Deleting Backup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Backup %q: %#v", d.Id(), res) + return nil +} + +func resourceAlloydbBackupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/backups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{backup_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAlloydbBackupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenAlloydbBackupEncryptionConfigKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbBackupEncryptionConfigKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupEncryptionInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["encryption_type"] = + flattenAlloydbBackupEncryptionInfoEncryptionType(original["encryptionType"], d, config) + transformed["kms_key_versions"] = + flattenAlloydbBackupEncryptionInfoKmsKeyVersions(original["kmsKeyVersions"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbBackupEncryptionInfoEncryptionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupEncryptionInfoKmsKeyVersions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAlloydbBackupClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbBackupLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbBackupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbBackupEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandAlloydbBackupEncryptionConfigKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandAlloydbBackupEncryptionConfigKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceAlloydbBackupEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // The only other available type is AUTOMATED which cannot be set manually + obj["type"] = "ON_DEMAND" + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_backup_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_backup_sweeper.go new file mode 100644 index 0000000000..d32a21226e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_backup_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package alloydb + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("AlloydbBackup", testSweepAlloydbBackup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAlloydbBackup(region string) error { + resourceName := "AlloydbBackup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://alloydb.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["backups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://alloydb.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backups/{{backup_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster.go new file mode 100644 index 0000000000..cc1e4ed9e5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster.go @@ -0,0 +1,1364 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package alloydb + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAlloydbCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAlloydbClusterCreate, + Read: resourceAlloydbClusterRead, + Update: resourceAlloydbClusterUpdate, + Delete: resourceAlloydbClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAlloydbClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the alloydb cluster.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location where the alloydb cluster should reside.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: + +"projects/{projectNumber}/global/networks/{network_id}".`, + }, + "automated_backup_policy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `The automated backup policy for this cluster. + +If no policy is provided then the default policy will be used. The default policy takes one backup a day, has a backup window of 1 hour, and retains backups for 14 days.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backup_window": { + Type: schema.TypeString, + Optional: true, + Description: `The length of the time window during which a backup can be taken. If a backup does not succeed within this time window, it will be canceled and considered failed. + +The backup window must be at least 5 minutes long. There is no upper bound on the window. If not set, it will default to 1 hour. + +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether automated backups are enabled.`, + }, + "encryption_config": { + Type: schema.TypeList, + Optional: true, + Description: `EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME].`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to backups created using this configuration.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `The location where the backup will be stored. Currently, the only supported option is to store the backup in the same region as the cluster.`, + }, + "quantity_based_retention": { + Type: schema.TypeList, + Optional: true, + Description: `Quantity-based Backup retention policy to retain recent backups. Conflicts with 'time_based_retention', both can't be set together.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of backups to retain.`, + }, + }, + }, + ConflictsWith: []string{"automated_backup_policy.0.time_based_retention"}, + }, + "time_based_retention": { + Type: schema.TypeList, + Optional: true, + Description: `Time-based Backup retention policy. Conflicts with 'quantity_based_retention', both can't be set together.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retention_period": { + Type: schema.TypeString, + Optional: true, + Description: `The retention period. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + }, + }, + ConflictsWith: []string{"automated_backup_policy.0.quantity_based_retention"}, + }, + "weekly_schedule": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Weekly schedule for the Backup.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_times": { + Type: schema.TypeList, + Required: true, + Description: `The times during the day to start a backup. At least one start time must be provided. The start times are assumed to be in UTC and to be an exact hour (e.g., 04:00:00).`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Currently, only the value 0 is supported.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Currently, only the value 0 is supported.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Currently, only the value 0 is supported.`, + }, + }, + }, + }, + "days_of_week": { + Type: schema.TypeList, + Optional: true, + Description: `The days of the week to perform a backup. At least one day of the week must be provided. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + }, + }, + }, + }, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User-settable and human-readable display name for the Cluster.`, + }, + "encryption_config": { + Type: schema.TypeList, + Optional: true, + Description: `EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME].`, + }, + }, + }, + }, + "initial_user": { + Type: schema.TypeList, + Optional: true, + Description: `Initial user to setup during cluster creation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "password": { + Type: schema.TypeString, + Required: true, + Description: `The initial password for the user.`, + Sensitive: true, + }, + "user": { + Type: schema.TypeString, + Optional: true, + Description: `The database username.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the alloydb cluster.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "backup_source": { + Type: schema.TypeList, + Computed: true, + Description: `Cluster created from backup.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backup_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the backup resource.`, + }, + }, + }, + }, + "database_version": { + Type: schema.TypeString, + Computed: true, + Description: `The database engine major version. This is an output-only field and it's populated at the Cluster creation time. This field cannot be changed after cluster creation.`, + }, + "encryption_info": { + Type: schema.TypeList, + Computed: true, + Description: `EncryptionInfo describes the encryption information of a cluster or a backup.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "encryption_type": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Type of encryption.`, + }, + "kms_key_versions": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. Cloud KMS key versions that are being used to protect the database or the backup.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "migration_source": { + Type: schema.TypeList, + Computed: true, + Description: `Cluster created via DMS migration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host_port": { + Type: schema.TypeString, + Optional: true, + Description: `The host and port of the on-premises instance in host:port format`, + }, + "reference_id": { + Type: schema.TypeString, + Optional: true, + Description: `Place holder for the external source identifier(e.g DMS job name) that created the cluster.`, + }, + "source_type": { + Type: schema.TypeString, + Optional: true, + Description: `Type of migration source.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the cluster resource.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The system-generated UID of the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAlloydbClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbClusterLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + encryptionConfigProp, err := expandAlloydbClusterEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + networkProp, err := expandAlloydbClusterNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + displayNameProp, err := expandAlloydbClusterDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + initialUserProp, err := expandAlloydbClusterInitialUser(d.Get("initial_user"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("initial_user"); !tpgresource.IsEmptyValue(reflect.ValueOf(initialUserProp)) && (ok || !reflect.DeepEqual(v, initialUserProp)) { + obj["initialUser"] = initialUserProp + } + automatedBackupPolicyProp, err := expandAlloydbClusterAutomatedBackupPolicy(d.Get("automated_backup_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automated_backup_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(automatedBackupPolicyProp)) && (ok || !reflect.DeepEqual(v, automatedBackupPolicyProp)) { + obj["automatedBackupPolicy"] = automatedBackupPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Cluster: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Cluster: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = AlloydbOperationWaitTime( + config, res, project, "Creating Cluster", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceAlloydbClusterRead(d, meta) +} + +func resourceAlloydbClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AlloydbCluster %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + + if err := d.Set("name", flattenAlloydbClusterName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("uid", flattenAlloydbClusterUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("labels", flattenAlloydbClusterLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("encryption_config", flattenAlloydbClusterEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("encryption_info", flattenAlloydbClusterEncryptionInfo(res["encryptionInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("network", flattenAlloydbClusterNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("display_name", flattenAlloydbClusterDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("database_version", flattenAlloydbClusterDatabaseVersion(res["databaseVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("automated_backup_policy", flattenAlloydbClusterAutomatedBackupPolicy(res["automatedBackupPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("backup_source", flattenAlloydbClusterBackupSource(res["backupSource"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("migration_source", flattenAlloydbClusterMigrationSource(res["migrationSource"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + + return nil +} + +func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbClusterLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + encryptionConfigProp, err := expandAlloydbClusterEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + networkProp, err := expandAlloydbClusterNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + displayNameProp, err := expandAlloydbClusterDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + initialUserProp, err := expandAlloydbClusterInitialUser(d.Get("initial_user"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("initial_user"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, initialUserProp)) { + obj["initialUser"] = initialUserProp + } + automatedBackupPolicyProp, err := expandAlloydbClusterAutomatedBackupPolicy(d.Get("automated_backup_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automated_backup_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automatedBackupPolicyProp)) { + obj["automatedBackupPolicy"] = automatedBackupPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Cluster %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("encryption_config") { + updateMask = append(updateMask, "encryptionConfig") + } + + if d.HasChange("network") { + updateMask = append(updateMask, "network") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("initial_user") { + updateMask = append(updateMask, "initialUser") + } + + if d.HasChange("automated_backup_policy") { + updateMask = append(updateMask, "automatedBackupPolicy") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Cluster %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Cluster %q: %#v", d.Id(), res) + } + + err = AlloydbOperationWaitTime( + config, res, project, "Updating Cluster", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAlloydbClusterRead(d, meta) +} + +func resourceAlloydbClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Cluster: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Cluster") + } + + err = AlloydbOperationWaitTime( + config, res, project, "Deleting Cluster", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Cluster %q: %#v", d.Id(), res) + return nil +} + +func resourceAlloydbClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAlloydbClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenAlloydbClusterEncryptionConfigKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterEncryptionConfigKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterEncryptionInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["encryption_type"] = + flattenAlloydbClusterEncryptionInfoEncryptionType(original["encryptionType"], d, config) + transformed["kms_key_versions"] = + flattenAlloydbClusterEncryptionInfoKmsKeyVersions(original["kmsKeyVersions"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterEncryptionInfoEncryptionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterEncryptionInfoKmsKeyVersions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterDatabaseVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["backup_window"] = + flattenAlloydbClusterAutomatedBackupPolicyBackupWindow(original["backupWindow"], d, config) + transformed["location"] = + flattenAlloydbClusterAutomatedBackupPolicyLocation(original["location"], d, config) + transformed["labels"] = + flattenAlloydbClusterAutomatedBackupPolicyLabels(original["labels"], d, config) + transformed["encryption_config"] = + flattenAlloydbClusterAutomatedBackupPolicyEncryptionConfig(original["encryptionConfig"], d, config) + transformed["weekly_schedule"] = + flattenAlloydbClusterAutomatedBackupPolicyWeeklySchedule(original["weeklySchedule"], d, config) + transformed["time_based_retention"] = + flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(original["timeBasedRetention"], d, config) + transformed["quantity_based_retention"] = + flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(original["quantityBasedRetention"], d, config) + transformed["enabled"] = + flattenAlloydbClusterAutomatedBackupPolicyEnabled(original["enabled"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyBackupWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenAlloydbClusterAutomatedBackupPolicyEncryptionConfigKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyEncryptionConfigKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklySchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["days_of_week"] = + flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(original["daysOfWeek"], d, config) + transformed["start_times"] = + flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(original["startTimes"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // If no start times exist, that means we take backups at midnight. This is represented as 0's all around. + return append(transformed, map[string]interface{}{}) + } + transformed = append(transformed, map[string]interface{}{ + "hours": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(original["hours"], d, config), + "minutes": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(original["minutes"], d, config), + "seconds": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(original["seconds"], d, config), + "nanos": flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(original["nanos"], d, config), + }) + } + return transformed +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["retention_period"] = + flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(original["retentionPeriod"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["count"] = + flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(original["count"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbClusterAutomatedBackupPolicyEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterBackupSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["backup_name"] = + flattenAlloydbClusterBackupSourceBackupName(original["backupName"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterBackupSourceBackupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterMigrationSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["host_port"] = + flattenAlloydbClusterMigrationSourceHostPort(original["hostPort"], d, config) + transformed["reference_id"] = + flattenAlloydbClusterMigrationSourceReferenceId(original["referenceId"], d, config) + transformed["source_type"] = + flattenAlloydbClusterMigrationSourceSourceType(original["sourceType"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbClusterMigrationSourceHostPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterMigrationSourceReferenceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterMigrationSourceSourceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAlloydbClusterLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbClusterEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandAlloydbClusterEncryptionConfigKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandAlloydbClusterEncryptionConfigKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterInitialUser(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUser, err := expandAlloydbClusterInitialUserUser(original["user"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUser); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["user"] = transformedUser + } + + transformedPassword, err := expandAlloydbClusterInitialUserPassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + return transformed, nil +} + +func expandAlloydbClusterInitialUserUser(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterInitialUserPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBackupWindow, err := expandAlloydbClusterAutomatedBackupPolicyBackupWindow(original["backup_window"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBackupWindow); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["backupWindow"] = transformedBackupWindow + } + + transformedLocation, err := expandAlloydbClusterAutomatedBackupPolicyLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + transformedLabels, err := expandAlloydbClusterAutomatedBackupPolicyLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedEncryptionConfig, err := expandAlloydbClusterAutomatedBackupPolicyEncryptionConfig(original["encryption_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptionConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encryptionConfig"] = transformedEncryptionConfig + } + + transformedWeeklySchedule, err := expandAlloydbClusterAutomatedBackupPolicyWeeklySchedule(original["weekly_schedule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWeeklySchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["weeklySchedule"] = transformedWeeklySchedule + } + + transformedTimeBasedRetention, err := expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(original["time_based_retention"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeBasedRetention); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeBasedRetention"] = transformedTimeBasedRetention + } + + transformedQuantityBasedRetention, err := expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(original["quantity_based_retention"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQuantityBasedRetention); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["quantityBasedRetention"] = transformedQuantityBasedRetention + } + + transformedEnabled, err := expandAlloydbClusterAutomatedBackupPolicyEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyBackupWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandAlloydbClusterAutomatedBackupPolicyEncryptionConfigKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyEncryptionConfigKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklySchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDaysOfWeek, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(original["days_of_week"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDaysOfWeek); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["daysOfWeek"] = transformedDaysOfWeek + } + + transformedStartTimes, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(original["start_times"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTimes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTimes"] = transformedStartTimes + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleDaysOfWeek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyWeeklyScheduleStartTimesNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetention(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRetentionPeriod, err := expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(original["retention_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRetentionPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["retentionPeriod"] = transformedRetentionPeriod + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyTimeBasedRetentionRetentionPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetention(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCount, err := expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(original["count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["count"] = transformedCount + } + + return transformed, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAutomatedBackupPolicyEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster_sweeper.go new file mode 100644 index 0000000000..cba154abee --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_cluster_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package alloydb + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("AlloydbCluster", testSweepAlloydbCluster) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAlloydbCluster(region string) error { + resourceName := "AlloydbCluster" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://alloydb.googleapis.com/v1/projects/{{project}}/locations/{{location}}/clusters", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["clusters"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://alloydb.googleapis.com/v1/projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance.go new file mode 100644 index 0000000000..2c2d43e340 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance.go @@ -0,0 +1,787 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package alloydb + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAlloydbInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceAlloydbInstanceCreate, + Read: resourceAlloydbInstanceRead, + Update: resourceAlloydbInstanceUpdate, + Delete: resourceAlloydbInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAlloydbInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(40 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the alloydb cluster. Must be in the format +'projects/{project}/locations/{location}/clusters/{cluster_id}'`, + }, + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the alloydb instance.`, + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"PRIMARY", "READ_POOL"}), + Description: `The type of the instance. If the instance type is READ_POOL, provide the associated PRIMARY instance in the 'depends_on' meta-data attribute. Possible values: ["PRIMARY", "READ_POOL"]`, + }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "availability_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"AVAILABILITY_TYPE_UNSPECIFIED", "ZONAL", "REGIONAL", ""}), + Description: `'Availability type of an Instance. Defaults to REGIONAL for both primary and read instances. +Note that primary and read instances can have different availability types. +Only READ_POOL instance supports ZONAL type. Users can't specify the zone for READ_POOL instance. +Zone is automatically chosen from the list of zones in the region specified. +Read pool of size 1 can only have zonal availability. Read pools with node count of 2 or more +can have regional availability (nodes are present in 2 or more zones in a region).' Possible values: ["AVAILABILITY_TYPE_UNSPECIFIED", "ZONAL", "REGIONAL"]`, + }, + "database_flags": { + Type: schema.TypeMap, + Optional: true, + Description: `Database flags. Set at instance level. * They are copied from primary instance on read instance creation. * Read instances can set new or override existing flags that are relevant for reads, e.g. for enabling columnar cache on a read instance. Flags set on read instance may or may not be present on primary.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User-settable and human-readable display name for the Instance.`, + }, + "gce_zone": { + Type: schema.TypeString, + Optional: true, + Description: `The Compute Engine zone that the instance should serve from, per https://cloud.google.com/compute/docs/regions-zones This can ONLY be specified for ZONAL instances. If present for a REGIONAL instance, an error will be thrown. If this is absent for a ZONAL instance, instance is created in a random zone with available capacity.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the alloydb instance.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "machine_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Configurations for the machines that host the underlying database engine.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The number of CPU's in the VM instance.`, + }, + }, + }, + }, + "read_pool_config": { + Type: schema.TypeList, + Optional: true, + Description: `Read pool specific config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Read capacity, i.e. number of nodes in a read pool instance.`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Instance was created in UTC.`, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address for the Instance. This is the connection endpoint for an end-user application.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the instance resource.`, + }, + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: `Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the alloydb instance.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The system-generated UID of the resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Instance was updated in UTC.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + annotationsProp, err := expandAlloydbInstanceAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + displayNameProp, err := expandAlloydbInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + gceZoneProp, err := expandAlloydbInstanceGceZone(d.Get("gce_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gce_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(gceZoneProp)) && (ok || !reflect.DeepEqual(v, gceZoneProp)) { + obj["gceZone"] = gceZoneProp + } + databaseFlagsProp, err := expandAlloydbInstanceDatabaseFlags(d.Get("database_flags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_flags"); !tpgresource.IsEmptyValue(reflect.ValueOf(databaseFlagsProp)) && (ok || !reflect.DeepEqual(v, databaseFlagsProp)) { + obj["databaseFlags"] = databaseFlagsProp + } + availabilityTypeProp, err := expandAlloydbInstanceAvailabilityType(d.Get("availability_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("availability_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(availabilityTypeProp)) && (ok || !reflect.DeepEqual(v, availabilityTypeProp)) { + obj["availabilityType"] = availabilityTypeProp + } + instanceTypeProp, err := expandAlloydbInstanceInstanceType(d.Get("instance_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceTypeProp)) && (ok || !reflect.DeepEqual(v, instanceTypeProp)) { + obj["instanceType"] = instanceTypeProp + } + readPoolConfigProp, err := expandAlloydbInstanceReadPoolConfig(d.Get("read_pool_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("read_pool_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(readPoolConfigProp)) && (ok || !reflect.DeepEqual(v, readPoolConfigProp)) { + obj["readPoolConfig"] = readPoolConfigProp + } + machineConfigProp, err := expandAlloydbInstanceMachineConfig(d.Get("machine_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("machine_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(machineConfigProp)) && (ok || !reflect.DeepEqual(v, machineConfigProp)) { + obj["machineConfig"] = machineConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances?instanceId={{instance_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Instance: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Instance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{cluster}}/instances/{{instance_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = AlloydbOperationWaitTime( + config, res, project, "Creating Instance", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Instance: %s", err) + } + + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) + + return resourceAlloydbInstanceRead(d, meta) +} + +func resourceAlloydbInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AlloydbInstance %q", d.Id())) + } + + if err := d.Set("name", flattenAlloydbInstanceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("create_time", flattenAlloydbInstanceCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("update_time", flattenAlloydbInstanceUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("uid", flattenAlloydbInstanceUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("labels", flattenAlloydbInstanceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("annotations", flattenAlloydbInstanceAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("state", flattenAlloydbInstanceState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("gce_zone", flattenAlloydbInstanceGceZone(res["gceZone"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("reconciling", flattenAlloydbInstanceReconciling(res["reconciling"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("database_flags", flattenAlloydbInstanceDatabaseFlags(res["databaseFlags"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("availability_type", flattenAlloydbInstanceAvailabilityType(res["availabilityType"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("instance_type", flattenAlloydbInstanceInstanceType(res["instanceType"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("ip_address", flattenAlloydbInstanceIpAddress(res["ipAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("read_pool_config", flattenAlloydbInstanceReadPoolConfig(res["readPoolConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("machine_config", flattenAlloydbInstanceMachineConfig(res["machineConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + return nil +} + +func resourceAlloydbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + labelsProp, err := expandAlloydbInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + annotationsProp, err := expandAlloydbInstanceAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + displayNameProp, err := expandAlloydbInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + gceZoneProp, err := expandAlloydbInstanceGceZone(d.Get("gce_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gce_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gceZoneProp)) { + obj["gceZone"] = gceZoneProp + } + databaseFlagsProp, err := expandAlloydbInstanceDatabaseFlags(d.Get("database_flags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_flags"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, databaseFlagsProp)) { + obj["databaseFlags"] = databaseFlagsProp + } + availabilityTypeProp, err := expandAlloydbInstanceAvailabilityType(d.Get("availability_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("availability_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, availabilityTypeProp)) { + obj["availabilityType"] = availabilityTypeProp + } + readPoolConfigProp, err := expandAlloydbInstanceReadPoolConfig(d.Get("read_pool_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("read_pool_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, readPoolConfigProp)) { + obj["readPoolConfig"] = readPoolConfigProp + } + machineConfigProp, err := expandAlloydbInstanceMachineConfig(d.Get("machine_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("machine_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, machineConfigProp)) { + obj["machineConfig"] = machineConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("annotations") { + updateMask = append(updateMask, "annotations") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("gce_zone") { + updateMask = append(updateMask, "gceZone") + } + + if d.HasChange("database_flags") { + updateMask = append(updateMask, "databaseFlags") + } + + if d.HasChange("availability_type") { + updateMask = append(updateMask, "availabilityType") + } + + if d.HasChange("read_pool_config") { + updateMask = append(updateMask, "readPoolConfig") + } + + if d.HasChange("machine_config") { + updateMask = append(updateMask, "machineConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = AlloydbOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAlloydbInstanceRead(d, meta) +} + +func resourceAlloydbInstanceDelete(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Instance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Instance") + } + + err = AlloydbOperationWaitTime( + config, res, project, "Deleting Instance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) + return nil +} + +func resourceAlloydbInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/instances/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{cluster}}/instances/{{instance_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAlloydbInstanceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceGceZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceDatabaseFlags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceAvailabilityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceInstanceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceReadPoolConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["node_count"] = + flattenAlloydbInstanceReadPoolConfigNodeCount(original["nodeCount"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbInstanceReadPoolConfigNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbInstanceMachineConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cpu_count"] = + flattenAlloydbInstanceMachineConfigCpuCount(original["cpuCount"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbInstanceMachineConfigCpuCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandAlloydbInstanceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbInstanceAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbInstanceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceGceZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceDatabaseFlags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbInstanceAvailabilityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceInstanceType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceReadPoolConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNodeCount, err := expandAlloydbInstanceReadPoolConfigNodeCount(original["node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeCount"] = transformedNodeCount + } + + return transformed, nil +} + +func expandAlloydbInstanceReadPoolConfigNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceMachineConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCpuCount, err := expandAlloydbInstanceMachineConfigCpuCount(original["cpu_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpuCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cpuCount"] = transformedCpuCount + } + + return transformed, nil +} + +func expandAlloydbInstanceMachineConfigCpuCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance_sweeper.go new file mode 100644 index 0000000000..adb0025e6e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/alloydb/resource_alloydb_instance_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package alloydb + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("AlloydbInstance", testSweepAlloydbInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAlloydbInstance(region string) error { + resourceName := "AlloydbInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://alloydb.googleapis.com/v1/{{cluster}}/instances?instanceId={{instance_id}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://alloydb.googleapis.com/v1/{{cluster}}/instances/{{instance_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/apigee_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/apigee_operation.go new file mode 100644 index 0000000000..7c25197098 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/apigee_operation.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type ApigeeOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *ApigeeOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.ApigeeBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createApigeeWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*ApigeeOperationWaiter, error) { + w := &ApigeeOperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func ApigeeOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + w, err := createApigeeWaiter(config, op, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func ApigeeOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createApigeeWaiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/iam_apigee_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/iam_apigee_environment.go new file mode 100644 index 0000000000..10f4085dbc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/iam_apigee_environment.go @@ -0,0 +1,202 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ApigeeEnvironmentIamSchema = map[string]*schema.Schema{ + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "env_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ApigeeEnvironmentIamUpdater struct { + orgId string + envId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ApigeeEnvironmentIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("org_id"); ok { + values["org_id"] = v.(string) + } + + if v, ok := d.GetOk("env_id"); ok { + values["env_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P.+)/environments/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("env_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ApigeeEnvironmentIamUpdater{ + orgId: values["org_id"], + envId: values["env_id"], + d: d, + Config: config, + } + + if err := d.Set("org_id", u.orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("env_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting env_id: %s", err) + } + + return u, nil +} + +func ApigeeEnvironmentIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P.+)/environments/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ApigeeEnvironmentIamUpdater{ + orgId: values["org_id"], + envId: values["env_id"], + d: d, + Config: config, + } + if err := d.Set("env_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting env_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ApigeeEnvironmentIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyEnvironmentUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ApigeeEnvironmentIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyEnvironmentUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ApigeeEnvironmentIamUpdater) qualifyEnvironmentUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ApigeeBasePath}}%s:%s", fmt.Sprintf("%s/environments/%s", u.orgId, u.envId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ApigeeEnvironmentIamUpdater) GetResourceId() string { + return fmt.Sprintf("%s/environments/%s", u.orgId, u.envId) +} + +func (u *ApigeeEnvironmentIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-apigee-environment-%s", u.GetResourceId()) +} + +func (u *ApigeeEnvironmentIamUpdater) DescribeResource() string { + return fmt.Sprintf("apigee environment %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_addons_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_addons_config.go new file mode 100644 index 0000000000..ac9b2051a9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_addons_config.go @@ -0,0 +1,699 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeAddonsConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeAddonsConfigCreate, + Read: resourceApigeeAddonsConfigRead, + Update: resourceApigeeAddonsConfigUpdate, + Delete: resourceApigeeAddonsConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeAddonsConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "org": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the Apigee organization.`, + }, + "addons_config": { + Type: schema.TypeList, + Optional: true, + Description: `Addon configurations of the Apigee organization.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "advanced_api_ops_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the Monetization add-on.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, + }, + }, + }, + }, + "api_security_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the Monetization add-on.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, + }, + "expires_at": { + Type: schema.TypeString, + Computed: true, + Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, + }, + }, + }, + }, + "connectors_platform_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the Monetization add-on.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, + }, + "expires_at": { + Type: schema.TypeString, + Computed: true, + Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, + }, + }, + }, + }, + "integration_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the Monetization add-on.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, + }, + }, + }, + }, + "monetization_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the Monetization add-on.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Flag that specifies whether the Advanced API Ops add-on is enabled.`, + }, + }, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeAddonsConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + addonsConfigProp, err := expandApigeeAddonsConfigAddonsConfig(d.Get("addons_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("addons_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(addonsConfigProp)) && (ok || !reflect.DeepEqual(v, addonsConfigProp)) { + obj["addonsConfig"] = addonsConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org}}:setAddons") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AddonsConfig: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AddonsConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ApigeeOperationWaitTime( + config, res, "Creating AddonsConfig", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create AddonsConfig: %s", err) + } + + log.Printf("[DEBUG] Finished creating AddonsConfig %q: %#v", d.Id(), res) + + return resourceApigeeAddonsConfigRead(d, meta) +} + +func resourceApigeeAddonsConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeAddonsConfig %q", d.Id())) + } + + if err := d.Set("addons_config", flattenApigeeAddonsConfigAddonsConfig(res["addonsConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading AddonsConfig: %s", err) + } + + return nil +} + +func resourceApigeeAddonsConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + addonsConfigProp, err := expandApigeeAddonsConfigAddonsConfig(d.Get("addons_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("addons_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, addonsConfigProp)) { + obj["addonsConfig"] = addonsConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org}}:setAddons") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AddonsConfig %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AddonsConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AddonsConfig %q: %#v", d.Id(), res) + } + + err = ApigeeOperationWaitTime( + config, res, "Updating AddonsConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceApigeeAddonsConfigRead(d, meta) +} + +func resourceApigeeAddonsConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org}}:setAddons") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AddonsConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AddonsConfig") + } + + err = ApigeeOperationWaitTime( + config, res, "Deleting AddonsConfig", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AddonsConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeAddonsConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + parts := strings.Split(d.Get("org").(string), "/") + + var projectId string + switch len(parts) { + case 1: + projectId = parts[0] + case 2: + projectId = parts[1] + default: + return nil, fmt.Errorf( + "Saw %s when the org is expected to have shape %s or %s", + d.Get("org"), + "{{org}}", + "organizations/{{org}}", + ) + } + + if err := d.Set("org", projectId); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeAddonsConfigAddonsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["advanced_api_ops_config"] = + flattenApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfig(original["advancedApiOpsConfig"], d, config) + transformed["integration_config"] = + flattenApigeeAddonsConfigAddonsConfigIntegrationConfig(original["integrationConfig"], d, config) + transformed["monetization_config"] = + flattenApigeeAddonsConfigAddonsConfigMonetizationConfig(original["monetizationConfig"], d, config) + transformed["api_security_config"] = + flattenApigeeAddonsConfigAddonsConfigApiSecurityConfig(original["apiSecurityConfig"], d, config) + transformed["connectors_platform_config"] = + flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfig(original["connectorsPlatformConfig"], d, config) + return []interface{}{transformed} +} +func flattenApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfigEnabled(original["enabled"], d, config) + return []interface{}{transformed} +} +func flattenApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAddonsConfigAddonsConfigIntegrationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenApigeeAddonsConfigAddonsConfigIntegrationConfigEnabled(original["enabled"], d, config) + return []interface{}{transformed} +} +func flattenApigeeAddonsConfigAddonsConfigIntegrationConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAddonsConfigAddonsConfigMonetizationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenApigeeAddonsConfigAddonsConfigMonetizationConfigEnabled(original["enabled"], d, config) + return []interface{}{transformed} +} +func flattenApigeeAddonsConfigAddonsConfigMonetizationConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAddonsConfigAddonsConfigApiSecurityConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenApigeeAddonsConfigAddonsConfigApiSecurityConfigEnabled(original["enabled"], d, config) + transformed["expires_at"] = + flattenApigeeAddonsConfigAddonsConfigApiSecurityConfigExpiresAt(original["expiresAt"], d, config) + return []interface{}{transformed} +} +func flattenApigeeAddonsConfigAddonsConfigApiSecurityConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAddonsConfigAddonsConfigApiSecurityConfigExpiresAt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigEnabled(original["enabled"], d, config) + transformed["expires_at"] = + flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigExpiresAt(original["expiresAt"], d, config) + return []interface{}{transformed} +} +func flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigExpiresAt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeAddonsConfigAddonsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAdvancedApiOpsConfig, err := expandApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfig(original["advanced_api_ops_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdvancedApiOpsConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["advancedApiOpsConfig"] = transformedAdvancedApiOpsConfig + } + + transformedIntegrationConfig, err := expandApigeeAddonsConfigAddonsConfigIntegrationConfig(original["integration_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegrationConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integrationConfig"] = transformedIntegrationConfig + } + + transformedMonetizationConfig, err := expandApigeeAddonsConfigAddonsConfigMonetizationConfig(original["monetization_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonetizationConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["monetizationConfig"] = transformedMonetizationConfig + } + + transformedApiSecurityConfig, err := expandApigeeAddonsConfigAddonsConfigApiSecurityConfig(original["api_security_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApiSecurityConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["apiSecurityConfig"] = transformedApiSecurityConfig + } + + transformedConnectorsPlatformConfig, err := expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfig(original["connectors_platform_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnectorsPlatformConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["connectorsPlatformConfig"] = transformedConnectorsPlatformConfig + } + + return transformed, nil +} + +func expandApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + return transformed, nil +} + +func expandApigeeAddonsConfigAddonsConfigAdvancedApiOpsConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAddonsConfigAddonsConfigIntegrationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigIntegrationConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + return transformed, nil +} + +func expandApigeeAddonsConfigAddonsConfigIntegrationConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAddonsConfigAddonsConfigMonetizationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigMonetizationConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + return transformed, nil +} + +func expandApigeeAddonsConfigAddonsConfigMonetizationConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAddonsConfigAddonsConfigApiSecurityConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigApiSecurityConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + transformedExpiresAt, err := expandApigeeAddonsConfigAddonsConfigApiSecurityConfigExpiresAt(original["expires_at"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpiresAt); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expiresAt"] = transformedExpiresAt + } + + return transformed, nil +} + +func expandApigeeAddonsConfigAddonsConfigApiSecurityConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAddonsConfigAddonsConfigApiSecurityConfigExpiresAt(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + transformedExpiresAt, err := expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigExpiresAt(original["expires_at"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpiresAt); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expiresAt"] = transformedExpiresAt + } + + return transformed, nil +} + +func expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeAddonsConfigAddonsConfigConnectorsPlatformConfigExpiresAt(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_addons_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_addons_config_sweeper.go new file mode 100644 index 0000000000..34d74ac58f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_addons_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApigeeAddonsConfig", testSweepApigeeAddonsConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApigeeAddonsConfig(region string) error { + resourceName := "ApigeeAddonsConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apigee.googleapis.com/v1/organizations", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["addonsConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apigee.googleapis.com/v1/organizations/{{org}}:setAddons" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_endpoint_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_endpoint_attachment.go new file mode 100644 index 0000000000..07872ed98d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_endpoint_attachment.go @@ -0,0 +1,335 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeEndpointAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeEndpointAttachmentCreate, + Read: resourceApigeeEndpointAttachmentRead, + Delete: resourceApigeeEndpointAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeEndpointAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "endpoint_attachment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the endpoint attachment.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Location of the endpoint attachment.`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization associated with the Apigee instance, +in the format 'organizations/{{org_name}}'.`, + }, + "service_attachment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Format: projects/*/regions/*/serviceAttachments/*`, + }, + "connection_state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the endpoint attachment connection to the service attachment.`, + }, + "host": { + Type: schema.TypeString, + Computed: true, + Description: `Host that can be used in either HTTP Target Endpoint directly, or as the host in Target Server.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the Endpoint Attachment in the following format: +organizations/{organization}/endpointAttachments/{endpointAttachment}.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeEndpointAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + locationProp, err := expandApigeeEndpointAttachmentLocation(d.Get("location"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + serviceAttachmentProp, err := expandApigeeEndpointAttachmentServiceAttachment(d.Get("service_attachment"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_attachment"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAttachmentProp)) && (ok || !reflect.DeepEqual(v, serviceAttachmentProp)) { + obj["serviceAttachment"] = serviceAttachmentProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/endpointAttachments?endpointAttachmentId={{endpoint_attachment_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EndpointAttachment: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EndpointAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Creating EndpointAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create EndpointAttachment: %s", err) + } + + if err := d.Set("name", flattenApigeeEndpointAttachmentName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EndpointAttachment %q: %#v", d.Id(), res) + + return resourceApigeeEndpointAttachmentRead(d, meta) +} + +func resourceApigeeEndpointAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeEndpointAttachment %q", d.Id())) + } + + if err := d.Set("name", flattenApigeeEndpointAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EndpointAttachment: %s", err) + } + if err := d.Set("location", flattenApigeeEndpointAttachmentLocation(res["location"], d, config)); err != nil { + return fmt.Errorf("Error reading EndpointAttachment: %s", err) + } + if err := d.Set("host", flattenApigeeEndpointAttachmentHost(res["host"], d, config)); err != nil { + return fmt.Errorf("Error reading EndpointAttachment: %s", err) + } + if err := d.Set("service_attachment", flattenApigeeEndpointAttachmentServiceAttachment(res["serviceAttachment"], d, config)); err != nil { + return fmt.Errorf("Error reading EndpointAttachment: %s", err) + } + if err := d.Set("connection_state", flattenApigeeEndpointAttachmentConnectionState(res["connectionState"], d, config)); err != nil { + return fmt.Errorf("Error reading EndpointAttachment: %s", err) + } + + return nil +} + +func resourceApigeeEndpointAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting EndpointAttachment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EndpointAttachment") + } + + err = ApigeeOperationWaitTime( + config, res, "Deleting EndpointAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting EndpointAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeEndpointAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + nameParts := strings.Split(d.Get("name").(string), "/") + if len(nameParts) == 4 { + // `organizations/{{org_name}}/endpointAttachment/{{endpoint_attachment_id}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("endpoint_attachment_id", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting endpoint_attachment_id: %s", err) + } + } else { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{org_name}}/environments/{{name}}") + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeEndpointAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEndpointAttachmentLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEndpointAttachmentHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEndpointAttachmentServiceAttachment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEndpointAttachmentConnectionState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeEndpointAttachmentLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEndpointAttachmentServiceAttachment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_keystore.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_keystore.go new file mode 100644 index 0000000000..8cd54ee17f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_keystore.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeEnvKeystore() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeEnvKeystoreCreate, + Read: resourceApigeeEnvKeystoreRead, + Delete: resourceApigeeEnvKeystoreDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeEnvKeystoreImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(1 * time.Minute), + Delete: schema.DefaultTimeout(1 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "env_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee environment group associated with the Apigee environment, +in the format 'organizations/{{org_name}}/environments/{{env_name}}'.`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the newly created keystore.`, + }, + "aliases": { + Type: schema.TypeList, + Computed: true, + Description: `Aliases in this keystore.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeEnvKeystoreCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeEnvKeystoreName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keystores") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EnvKeystore: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EnvKeystore: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{env_id}}/keystores/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EnvKeystore %q: %#v", d.Id(), res) + + return resourceApigeeEnvKeystoreRead(d, meta) +} + +func resourceApigeeEnvKeystoreRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keystores/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvKeystore %q", d.Id())) + } + + if err := d.Set("aliases", flattenApigeeEnvKeystoreAliases(res["aliases"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvKeystore: %s", err) + } + if err := d.Set("name", flattenApigeeEnvKeystoreName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvKeystore: %s", err) + } + + return nil +} + +func resourceApigeeEnvKeystoreDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/keystores/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting EnvKeystore %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EnvKeystore") + } + + log.Printf("[DEBUG] Finished deleting EnvKeystore %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeEnvKeystoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/keystores/(?P.+)", + "(?P.+)/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{env_id}}/keystores/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeEnvKeystoreAliases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvKeystoreName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeEnvKeystoreName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_keystore_alias_pkcs12.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_keystore_alias_pkcs12.go new file mode 100644 index 0000000000..e86b7661bc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_keystore_alias_pkcs12.go @@ -0,0 +1,631 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apigee + +import ( + "bytes" + "fmt" + "io" + "log" + "mime/multipart" + "os" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeKeystoresAliasesPkcs12() *schema.Resource { + return &schema.Resource{ + Create: ResourceApigeeKeystoresAliasesPkcs12Create, + Read: ResourceApigeeKeystoresAliasesPkcs12Read, + Delete: ResourceApigeeKeystoresAliasesPkcs12Delete, + + Importer: &schema.ResourceImporter{ + State: ResourceApigeeKeystoresAliasesPkcs12Import, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "alias": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Alias Name`, + }, + "file": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Cert content`, + }, + "environment": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: `Environment associated with the alias`, + }, + "keystore": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Keystore Name`, + }, + "org_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: `Organization ID associated with the alias`, + }, + "filehash": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Hash of the pkcs file", + }, + "certs_info": { + Type: schema.TypeList, + Computed: true, + Description: `Chain of certificates under this alias.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert_info": { + Type: schema.TypeList, + Computed: true, + Description: `List of all properties in the object.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "basic_constraints": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 basic constraints extension.`, + }, + "expiry_date": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 notAfter validity period in milliseconds since epoch.`, + }, + "is_valid": { + Type: schema.TypeString, + Computed: true, + Description: `Flag that specifies whether the certificate is valid. +Flag is set to Yes if the certificate is valid, No if expired, or Not yet if not yet valid.`, + }, + "issuer": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 issuer.`, + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + Description: `Public key component of the X.509 subject public key info.`, + }, + "serial_number": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 serial number.`, + }, + "sig_alg_name": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 signatureAlgorithm.`, + }, + "subject": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 subject.`, + }, + "subject_alternative_names": { + Type: schema.TypeList, + Computed: true, + Description: `X.509 subject alternative names (SANs) extension.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "valid_from": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 notBefore validity period in milliseconds since epoch.`, + }, + "version": { + Type: schema.TypeInt, + Computed: true, + Description: `X.509 version.`, + }, + }, + }, + }, + }, + }, + }, + "password": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Password for the Private Key if it's encrypted`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Optional.Type of Alias`, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceApigeeKeystoresAliasesPkcs12Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + filePath, _ := d.GetOk("file") + file, err := os.Open(filePath.(string)) + if err != nil { + return err + } + buf := new(bytes.Buffer) + bw := multipart.NewWriter(buf) + if password, ok := d.GetOkExists("password"); ok { + keyFilePartWriter, _ := bw.CreateFormField("password") + keyFilePartWriter.Write([]byte(password.(string))) + } + certFilePartWriter, _ := bw.CreateFormField("file") + _, err = io.Copy(certFilePartWriter, file) + bw.Close() + file.Close() + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases?format=pkcs12&alias={{alias}}&ignoreExpiryValidation=true") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new KeystoresAliasesPkcs12") + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestRawBodyWithTimeout(config, "POST", billingProject, url, userAgent, buf, "multipart/form-data; boundary="+bw.Boundary(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating KeystoresAliasesPkcs12: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating KeystoreAliasesPkcs %q: %#v", d.Id(), res) + + return ResourceApigeeKeystoresAliasesPkcs12Read(d, meta) +} + +func ResourceApigeeKeystoresAliasesPkcs12Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeKeystoreAliasesPkcs %q", d.Id())) + } + + if err := d.Set("alias", flattenApigeeKeystoreAliasesPkcsAlias(res["alias"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoreAliasesPkcs: %s", err) + } + + if err := d.Set("certs_info", flattenApigeeKeystoreAliasesPkcsCertsInfo(res["certsInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoreAliasesPkcs: %s", err) + } + + if err := d.Set("type", flattenApigeeKeystoreAliasesPkcsType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoreAliasesPkcs: %s", err) + } + + return nil +} + +func ResourceApigeeKeystoresAliasesPkcs12Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting KeystoreAliasesPkcs %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "KeystoreAliasesPkcs") + } + + log.Printf("[DEBUG] Finished deleting KeystoreAliasesPkcs %q: %#v", d.Id(), res) + return nil +} + +func ResourceApigeeKeystoresAliasesPkcs12Import(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/environments/(?P[^/]+)/keystores/(?P[^/]+)/aliases/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeKeystoreAliasesPkcsOrgId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsKeystore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsAlias(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCert(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cert_info"] = + flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfo(original["certInfo"], d, config) + return []interface{}{transformed} +} +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "version": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoVersion(original["version"], d, config), + "subject": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoSubject(original["subject"], d, config), + "issuer": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoIssuer(original["issuer"], d, config), + "expiry_date": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoExpiryDate(original["expiryDate"], d, config), + "valid_from": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoValidFrom(original["validFrom"], d, config), + "is_valid": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoIsValid(original["isValid"], d, config), + "subject_alternative_names": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoSubjectAlternativeNames(original["subjectAlternativeNames"], d, config), + "sig_alg_name": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoSigAlgName(original["sigAlgName"], d, config), + "public_key": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoPublicKey(original["publicKey"], d, config), + "basic_constraints": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoBasicConstraints(original["basicConstraints"], d, config), + "serial_number": flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoSerialNumber(original["serialNumber"], d, config), + }) + } + return transformed +} +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoSubject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoIssuer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoExpiryDate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoValidFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoIsValid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoSubjectAlternativeNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoSigAlgName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoBasicConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsCertsInfoCertInfoSerialNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoreAliasesPkcsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeKeystoreAliasesPkcsOrgId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsEnvironment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsKeystore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsAlias(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCert(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCertInfo, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfo(original["cert_info"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertInfo); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certInfo"] = transformedCertInfo + } + + return transformed, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersion, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSubject, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoSubject(original["subject"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subject"] = transformedSubject + } + + transformedIssuer, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoIssuer(original["issuer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIssuer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["issuer"] = transformedIssuer + } + + transformedExpiryDate, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoExpiryDate(original["expiry_date"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpiryDate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expiryDate"] = transformedExpiryDate + } + + transformedValidFrom, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoValidFrom(original["valid_from"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValidFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["validFrom"] = transformedValidFrom + } + + transformedIsValid, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoIsValid(original["is_valid"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsValid); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isValid"] = transformedIsValid + } + + transformedSubjectAlternativeNames, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoSubjectAlternativeNames(original["subject_alternative_names"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubjectAlternativeNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subjectAlternativeNames"] = transformedSubjectAlternativeNames + } + + transformedSigAlgName, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoSigAlgName(original["sig_alg_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSigAlgName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sigAlgName"] = transformedSigAlgName + } + + transformedPublicKey, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoPublicKey(original["public_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicKey"] = transformedPublicKey + } + + transformedBasicConstraints, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoBasicConstraints(original["basic_constraints"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBasicConstraints); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["basicConstraints"] = transformedBasicConstraints + } + + transformedSerialNumber, err := expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoSerialNumber(original["serial_number"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSerialNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serialNumber"] = transformedSerialNumber + } + + req = append(req, transformed) + } + return req, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoSubject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoIssuer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoExpiryDate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoValidFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoIsValid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoSubjectAlternativeNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoSigAlgName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoPublicKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoBasicConstraints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoreAliasesPkcsCertsInfoCertInfoSerialNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_references.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_references.go new file mode 100644 index 0000000000..4cd34a4321 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_env_references.go @@ -0,0 +1,291 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeEnvReferences() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeEnvReferencesCreate, + Read: resourceApigeeEnvReferencesRead, + Delete: resourceApigeeEnvReferencesDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeEnvReferencesImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(1 * time.Minute), + Delete: schema.DefaultTimeout(1 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "env_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee environment group associated with the Apigee environment, +in the format 'organizations/{{org_name}}/environments/{{env_name}}'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. The resource id of this reference. Values must match the regular expression [\w\s-.]+.`, + }, + "refers": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. The id of the resource to which this reference refers. Must be the id of a resource that exists in the parent environment and is of the given resourceType.`, + }, + "resource_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The type of resource referred to by this reference. Valid values are 'KeyStore' or 'TrustStore'.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional. A human-readable description of this reference.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeEnvReferencesCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeEnvReferencesName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandApigeeEnvReferencesDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + resourceTypeProp, err := expandApigeeEnvReferencesResourceType(d.Get("resource_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceTypeProp)) && (ok || !reflect.DeepEqual(v, resourceTypeProp)) { + obj["resourceType"] = resourceTypeProp + } + refersProp, err := expandApigeeEnvReferencesRefers(d.Get("refers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("refers"); !tpgresource.IsEmptyValue(reflect.ValueOf(refersProp)) && (ok || !reflect.DeepEqual(v, refersProp)) { + obj["refers"] = refersProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/references/") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EnvReferences: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EnvReferences: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{env_id}}/references/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EnvReferences %q: %#v", d.Id(), res) + + return resourceApigeeEnvReferencesRead(d, meta) +} + +func resourceApigeeEnvReferencesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/references/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvReferences %q", d.Id())) + } + + if err := d.Set("name", flattenApigeeEnvReferencesName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvReferences: %s", err) + } + if err := d.Set("description", flattenApigeeEnvReferencesDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvReferences: %s", err) + } + if err := d.Set("resource_type", flattenApigeeEnvReferencesResourceType(res["resourceType"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvReferences: %s", err) + } + if err := d.Set("refers", flattenApigeeEnvReferencesRefers(res["refers"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvReferences: %s", err) + } + + return nil +} + +func resourceApigeeEnvReferencesDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{env_id}}/references/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting EnvReferences %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EnvReferences") + } + + log.Printf("[DEBUG] Finished deleting EnvReferences %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeEnvReferencesImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/references/(?P.+)", + "(?P.+)/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{env_id}}/references/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeEnvReferencesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvReferencesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvReferencesResourceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvReferencesRefers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeEnvReferencesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvReferencesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvReferencesResourceType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvReferencesRefers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup.go new file mode 100644 index 0000000000..59bcb5c09c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup.go @@ -0,0 +1,373 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeEnvgroup() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeEnvgroupCreate, + Read: resourceApigeeEnvgroupRead, + Update: resourceApigeeEnvgroupUpdate, + Delete: resourceApigeeEnvgroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeEnvgroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource ID of the environment group.`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization associated with the Apigee environment group, +in the format 'organizations/{{org_name}}'.`, + }, + "hostnames": { + Type: schema.TypeList, + Optional: true, + Description: `Hostnames of the environment group.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeEnvgroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeEnvgroupName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + hostnamesProp, err := expandApigeeEnvgroupHostnames(d.Get("hostnames"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hostnames"); !tpgresource.IsEmptyValue(reflect.ValueOf(hostnamesProp)) && (ok || !reflect.DeepEqual(v, hostnamesProp)) { + obj["hostnames"] = hostnamesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Envgroup: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Envgroup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/envgroups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Creating Envgroup", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Envgroup: %s", err) + } + + if err := d.Set("name", flattenApigeeEnvgroupName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{org_id}}/envgroups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Envgroup %q: %#v", d.Id(), res) + + return resourceApigeeEnvgroupRead(d, meta) +} + +func resourceApigeeEnvgroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvgroup %q", d.Id())) + } + + if err := d.Set("name", flattenApigeeEnvgroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Envgroup: %s", err) + } + if err := d.Set("hostnames", flattenApigeeEnvgroupHostnames(res["hostnames"], d, config)); err != nil { + return fmt.Errorf("Error reading Envgroup: %s", err) + } + + return nil +} + +func resourceApigeeEnvgroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + hostnamesProp, err := expandApigeeEnvgroupHostnames(d.Get("hostnames"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hostnames"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostnamesProp)) { + obj["hostnames"] = hostnamesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Envgroup %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("hostnames") { + updateMask = append(updateMask, "hostnames") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Envgroup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Envgroup %q: %#v", d.Id(), res) + } + + err = ApigeeOperationWaitTime( + config, res, "Updating Envgroup", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceApigeeEnvgroupRead(d, meta) +} + +func resourceApigeeEnvgroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Envgroup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Envgroup") + } + + err = ApigeeOperationWaitTime( + config, res, "Deleting Envgroup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Envgroup %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeEnvgroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + nameParts := strings.Split(d.Get("name").(string), "/") + if len(nameParts) == 4 { + // `organizations/{{org_name}}/envgroups/{{name}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("name", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(nameParts) == 3 { + // `organizations/{{org_name}}/{{name}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("name", nameParts[2]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s or %s", + d.Get("name"), + "organizations/{{org_name}}/envgroups/{{name}}", + "organizations/{{org_name}}/{{name}}") + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/envgroups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeEnvgroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvgroupHostnames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeEnvgroupName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvgroupHostnames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup_attachment.go new file mode 100644 index 0000000000..ebea1cebeb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup_attachment.go @@ -0,0 +1,266 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeEnvgroupAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeEnvgroupAttachmentCreate, + Read: resourceApigeeEnvgroupAttachmentRead, + Delete: resourceApigeeEnvgroupAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeEnvgroupAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "envgroup_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee environment group associated with the Apigee environment, +in the format 'organizations/{{org_name}}/envgroups/{{envgroup_name}}'.`, + }, + "environment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource ID of the environment.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the newly created attachment (output parameter).`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeEnvgroupAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + environmentProp, err := expandApigeeEnvgroupAttachmentEnvironment(d.Get("environment"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("environment"); !tpgresource.IsEmptyValue(reflect.ValueOf(environmentProp)) && (ok || !reflect.DeepEqual(v, environmentProp)) { + obj["environment"] = environmentProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EnvgroupAttachment: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EnvgroupAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Creating EnvgroupAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create EnvgroupAttachment: %s", err) + } + + if err := d.Set("name", flattenApigeeEnvgroupAttachmentName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EnvgroupAttachment %q: %#v", d.Id(), res) + + return resourceApigeeEnvgroupAttachmentRead(d, meta) +} + +func resourceApigeeEnvgroupAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvgroupAttachment %q", d.Id())) + } + + if err := d.Set("environment", flattenApigeeEnvgroupAttachmentEnvironment(res["environment"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvgroupAttachment: %s", err) + } + if err := d.Set("name", flattenApigeeEnvgroupAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EnvgroupAttachment: %s", err) + } + + return nil +} + +func resourceApigeeEnvgroupAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting EnvgroupAttachment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EnvgroupAttachment") + } + + err = ApigeeOperationWaitTime( + config, res, "Deleting EnvgroupAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting EnvgroupAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeEnvgroupAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/attachments/(?P.+)", + "(?P.+)/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeEnvgroupAttachmentEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvgroupAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeEnvgroupAttachmentEnvironment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup_sweeper.go new file mode 100644 index 0000000000..d5fd877216 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_envgroup_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApigeeEnvgroup", testSweepApigeeEnvgroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApigeeEnvgroup(region string) error { + resourceName := "ApigeeEnvgroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apigee.googleapis.com/v1/envgroups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["envgroups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apigee.googleapis.com/v1/{{org_id}}/envgroups/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment.go new file mode 100644 index 0000000000..58447a30b8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment.go @@ -0,0 +1,566 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceApigeeEnvironment() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeEnvironmentCreate, + Read: resourceApigeeEnvironmentRead, + Update: resourceApigeeEnvironmentUpdate, + Delete: resourceApigeeEnvironmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeEnvironmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource ID of the environment.`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization associated with the Apigee environment, +in the format 'organizations/{{org_name}}'.`, + }, + "api_proxy_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"API_PROXY_TYPE_UNSPECIFIED", "PROGRAMMABLE", "CONFIGURABLE", ""}), + Description: `Optional. API Proxy type supported by the environment. The type can be set when creating +the Environment and cannot be changed. Possible values: ["API_PROXY_TYPE_UNSPECIFIED", "PROGRAMMABLE", "CONFIGURABLE"]`, + }, + "deployment_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DEPLOYMENT_TYPE_UNSPECIFIED", "PROXY", "ARCHIVE", ""}), + Description: `Optional. Deployment type supported by the environment. The deployment type can be +set when creating the environment and cannot be changed. When you enable archive +deployment, you will be prevented from performing a subset of actions within the +environment, including: +Managing the deployment of API proxy or shared flow revisions; +Creating, updating, or deleting resource files; +Creating, updating, or deleting target servers. Possible values: ["DEPLOYMENT_TYPE_UNSPECIFIED", "PROXY", "ARCHIVE"]`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Description of the environment.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Display name of the environment.`, + }, + "node_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `NodeConfig for setting the min/max number of nodes associated with the environment.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_node_count": { + Type: schema.TypeString, + Optional: true, + Description: `The maximum total number of gateway nodes that the is reserved for all instances that +has the specified environment. If not specified, the default is determined by the +recommended maximum number of nodes for that gateway.`, + }, + "min_node_count": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum total number of gateway nodes that the is reserved for all instances that +has the specified environment. If not specified, the default is determined by the +recommended minimum number of nodes for that gateway.`, + }, + "current_aggregate_node_count": { + Type: schema.TypeString, + Computed: true, + Description: `The current total number of gateway nodes that each environment currently has across +all instances.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeEnvironmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + displayNameProp, err := expandApigeeEnvironmentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandApigeeEnvironmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + deploymentTypeProp, err := expandApigeeEnvironmentDeploymentType(d.Get("deployment_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployment_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(deploymentTypeProp)) && (ok || !reflect.DeepEqual(v, deploymentTypeProp)) { + obj["deploymentType"] = deploymentTypeProp + } + apiProxyTypeProp, err := expandApigeeEnvironmentApiProxyType(d.Get("api_proxy_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("api_proxy_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(apiProxyTypeProp)) && (ok || !reflect.DeepEqual(v, apiProxyTypeProp)) { + obj["apiProxyType"] = apiProxyTypeProp + } + nodeConfigProp, err := expandApigeeEnvironmentNodeConfig(d.Get("node_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("node_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeConfigProp)) && (ok || !reflect.DeepEqual(v, nodeConfigProp)) { + obj["nodeConfig"] = nodeConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Environment: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Environment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/environments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Creating Environment", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Environment: %s", err) + } + + if err := d.Set("name", flattenApigeeEnvironmentName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{org_id}}/environments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) + + return resourceApigeeEnvironmentRead(d, meta) +} + +func resourceApigeeEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeEnvironment %q", d.Id())) + } + + if err := d.Set("name", flattenApigeeEnvironmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("display_name", flattenApigeeEnvironmentDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("description", flattenApigeeEnvironmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("deployment_type", flattenApigeeEnvironmentDeploymentType(res["deploymentType"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("api_proxy_type", flattenApigeeEnvironmentApiProxyType(res["apiProxyType"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("node_config", flattenApigeeEnvironmentNodeConfig(res["nodeConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + + return nil +} + +func resourceApigeeEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + nodeConfigProp, err := expandApigeeEnvironmentNodeConfig(d.Get("node_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("node_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeConfigProp)) { + obj["nodeConfig"] = nodeConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("node_config") { + updateMask = append(updateMask, "nodeConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) + } + + err = ApigeeOperationWaitTime( + config, res, "Updating Environment", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceApigeeEnvironmentRead(d, meta) +} + +func resourceApigeeEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Environment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Environment") + } + + err = ApigeeOperationWaitTime( + config, res, "Deleting Environment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + nameParts := strings.Split(d.Get("name").(string), "/") + if len(nameParts) == 4 { + // `organizations/{{org_name}}/environments/{{name}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("name", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(nameParts) == 3 { + // `organizations/{{org_name}}/{{name}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("name", nameParts[2]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s or %s", + d.Get("name"), + "organizations/{{org_name}}/environments/{{name}}", + "organizations/{{org_name}}/{{name}}") + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/environments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeEnvironmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvironmentDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvironmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvironmentDeploymentType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvironmentApiProxyType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvironmentNodeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_node_count"] = + flattenApigeeEnvironmentNodeConfigMinNodeCount(original["minNodeCount"], d, config) + transformed["max_node_count"] = + flattenApigeeEnvironmentNodeConfigMaxNodeCount(original["maxNodeCount"], d, config) + transformed["current_aggregate_node_count"] = + flattenApigeeEnvironmentNodeConfigCurrentAggregateNodeCount(original["currentAggregateNodeCount"], d, config) + return []interface{}{transformed} +} +func flattenApigeeEnvironmentNodeConfigMinNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvironmentNodeConfigMaxNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeEnvironmentNodeConfigCurrentAggregateNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeEnvironmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvironmentDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvironmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvironmentDeploymentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvironmentApiProxyType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvironmentNodeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinNodeCount, err := expandApigeeEnvironmentNodeConfigMinNodeCount(original["min_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minNodeCount"] = transformedMinNodeCount + } + + transformedMaxNodeCount, err := expandApigeeEnvironmentNodeConfigMaxNodeCount(original["max_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxNodeCount"] = transformedMaxNodeCount + } + + transformedCurrentAggregateNodeCount, err := expandApigeeEnvironmentNodeConfigCurrentAggregateNodeCount(original["current_aggregate_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCurrentAggregateNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["currentAggregateNodeCount"] = transformedCurrentAggregateNodeCount + } + + return transformed, nil +} + +func expandApigeeEnvironmentNodeConfigMinNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvironmentNodeConfigMaxNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeEnvironmentNodeConfigCurrentAggregateNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_sweeper.go new file mode 100644 index 0000000000..02ac78ae5d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_environment_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApigeeEnvironment", testSweepApigeeEnvironment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApigeeEnvironment(region string) error { + resourceName := "ApigeeEnvironment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apigee.googleapis.com/v1/environments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["environments"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apigee.googleapis.com/v1/{{org_id}}/environments/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_flowhook.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_flowhook.go new file mode 100644 index 0000000000..78dfaba8fe --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_flowhook.go @@ -0,0 +1,266 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apigee + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeFlowhook() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeFlowhookCreate, + Read: resourceApigeeFlowhookRead, + Delete: resourceApigeeFlowhookDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeFlowhookImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Description of the flow hook.`, + }, + "environment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource ID of the environment.`, + }, + "flow_hook_point": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Where in the API call flow the flow hook is invoked. Must be one of PreProxyFlowHook, PostProxyFlowHook, PreTargetFlowHook, or PostTargetFlowHook.`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization associated with the environment`, + }, + "sharedflow": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Id of the Sharedflow attaching to a flowhook point.`, + }, + "continue_on_error": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Default: true, + Description: `Flag that specifies whether execution should continue if the flow hook throws an exception. Set to true to continue execution. Set to false to stop execution if the flow hook throws an exception. Defaults to true.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeFlowhookCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandApigeeFlowhookDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sharedflowProp, err := expandApigeeFlowhookSharedflow(d.Get("sharedflow"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sharedflow"); !tpgresource.IsEmptyValue(reflect.ValueOf(sharedflowProp)) && (ok || !reflect.DeepEqual(v, sharedflowProp)) { + obj["sharedFlow"] = sharedflowProp + } + continue_on_errorProp, err := expandApigeeFlowhookContinueOnError(d.Get("continue_on_error"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("continue_on_error"); !tpgresource.IsEmptyValue(reflect.ValueOf(continue_on_errorProp)) && (ok || !reflect.DeepEqual(v, continue_on_errorProp)) { + obj["continueOnError"] = continue_on_errorProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Flowhook: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Flowhook: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Flowhook %q: %#v", d.Id(), res) + + return resourceApigeeFlowhookRead(d, meta) +} + +func resourceApigeeFlowhookRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeFlowhook %q", d.Id())) + } + if res["sharedFlow"] == nil || res["sharedFlow"].(string) == "" { + //if response does not contain shared_flow field, then nothing is attached to this flowhook, we treat this "binding" resource non-existent + d.SetId("") + return nil + } + if err := d.Set("description", flattenApigeeFlowhookDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Flowhook: %s", err) + } + if err := d.Set("sharedflow", flattenApigeeFlowhookSharedflow(res["sharedFlow"], d, config)); err != nil { + return fmt.Errorf("Error reading Flowhook: %s", err) + } + if err := d.Set("continue_on_error", flattenApigeeFlowhookContinueOnError(res["continueOnError"], d, config)); err != nil { + return fmt.Errorf("Error reading Flowhook: %s", err) + } + + return nil +} + +func resourceApigeeFlowhookDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Flowhook %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Flowhook") + } + + log.Printf("[DEBUG] Finished deleting Flowhook %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeFlowhookImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/environments/(?P[^/]+)/flowhooks/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/flowhooks/{{flow_hook_point}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeFlowhookDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeFlowhookSharedflow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeFlowhookContinueOnError(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeFlowhookDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeFlowhookSharedflow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeFlowhookContinueOnError(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance.go new file mode 100644 index 0000000000..95e74df6e2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance.go @@ -0,0 +1,530 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Supress diffs when the lists of project have the same number of entries to handle the case that +// API does not return what the user originally provided. Instead, API does some transformation. +// For example, user provides a list of project number, but API returns a list of project Id. +func projectListDiffSuppress(_, _, _ string, d *schema.ResourceData) bool { + return ProjectListDiffSuppressFunc(d) +} + +func ProjectListDiffSuppressFunc(d tpgresource.TerraformResourceDataChange) bool { + kLength := "consumer_accept_list.#" + oldLength, newLength := d.GetChange(kLength) + + oldInt, ok := oldLength.(int) + if !ok { + return false + } + + newInt, ok := newLength.(int) + if !ok { + return false + } + log.Printf("[DEBUG] - suppressing diff with oldInt %d, newInt %d", oldInt, newInt) + + return oldInt == newInt +} + +func ResourceApigeeInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeInstanceCreate, + Read: resourceApigeeInstanceRead, + Delete: resourceApigeeInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Compute Engine location where the instance resides.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource ID of the instance.`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization associated with the Apigee instance, +in the format 'organizations/{{org_name}}'.`, + }, + "consumer_accept_list": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: projectListDiffSuppress, + Description: `Optional. Customer accept list represents the list of projects (id/number) on customer +side that can privately connect to the service attachment. It is an optional field +which the customers can provide during the instance creation. By default, the customer +project associated with the Apigee organization will be included to the list.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Description of the instance.`, + }, + "disk_encryption_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Customer Managed Encryption Key (CMEK) used for disk and volume encryption. Required for Apigee paid subscriptions only. +Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)'`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Display name of the instance.`, + }, + "ip_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `IP range represents the customer-provided CIDR block of length 22 that will be used for +the Apigee instance creation. This optional range, if provided, should be freely +available as part of larger named range the customer has allocated to the Service +Networking peering. If this is not provided, Apigee will automatically request for any +available /22 CIDR block from Service Networking. The customer should use this CIDR block +for configuring their firewall needs to allow traffic from Apigee. +Input format: "a.b.c.d/22"`, + }, + "peering_cidr_range": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The size of the CIDR block range that will be reserved by the instance. For valid values, +see [CidrRange](https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.instances#CidrRange) on the documentation.`, + }, + "host": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Hostname or IP address of the exposed Apigee endpoint used by clients to connect to the service.`, + }, + "port": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Port number of the exposed Apigee endpoint.`, + }, + "service_attachment": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Resource name of the service attachment created for the instance in +the format: projects/*/regions/*/serviceAttachments/* Apigee customers can privately +forward traffic to this service attachment using the PSC endpoints.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeInstanceName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + locationProp, err := expandApigeeInstanceLocation(d.Get("location"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + peeringCidrRangeProp, err := expandApigeeInstancePeeringCidrRange(d.Get("peering_cidr_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peering_cidr_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(peeringCidrRangeProp)) && (ok || !reflect.DeepEqual(v, peeringCidrRangeProp)) { + obj["peeringCidrRange"] = peeringCidrRangeProp + } + ipRangeProp, err := expandApigeeInstanceIpRange(d.Get("ip_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipRangeProp)) && (ok || !reflect.DeepEqual(v, ipRangeProp)) { + obj["ipRange"] = ipRangeProp + } + descriptionProp, err := expandApigeeInstanceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandApigeeInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + diskEncryptionKeyNameProp, err := expandApigeeInstanceDiskEncryptionKeyName(d.Get("disk_encryption_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disk_encryption_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(diskEncryptionKeyNameProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyNameProp)) { + obj["diskEncryptionKeyName"] = diskEncryptionKeyNameProp + } + consumerAcceptListProp, err := expandApigeeInstanceConsumerAcceptList(d.Get("consumer_accept_list"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("consumer_accept_list"); !tpgresource.IsEmptyValue(reflect.ValueOf(consumerAcceptListProp)) && (ok || !reflect.DeepEqual(v, consumerAcceptListProp)) { + obj["consumerAcceptList"] = consumerAcceptListProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/apigeeInstances") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Instance: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsApigeeRetryableError}, + }) + if err != nil { + return fmt.Errorf("Error creating Instance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Creating Instance", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Instance: %s", err) + } + + if err := d.Set("name", flattenApigeeInstanceName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{org_id}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) + + return resourceApigeeInstanceRead(d, meta) +} + +func resourceApigeeInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsApigeeRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeInstance %q", d.Id())) + } + + if err := d.Set("name", flattenApigeeInstanceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("location", flattenApigeeInstanceLocation(res["location"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("peering_cidr_range", flattenApigeeInstancePeeringCidrRange(res["peeringCidrRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("description", flattenApigeeInstanceDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("display_name", flattenApigeeInstanceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("disk_encryption_key_name", flattenApigeeInstanceDiskEncryptionKeyName(res["diskEncryptionKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("host", flattenApigeeInstanceHost(res["host"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("port", flattenApigeeInstancePort(res["port"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("consumer_accept_list", flattenApigeeInstanceConsumerAcceptList(res["consumerAcceptList"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("service_attachment", flattenApigeeInstanceServiceAttachment(res["serviceAttachment"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + return nil +} + +func resourceApigeeInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/apigeeInstances") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Instance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsApigeeRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Instance") + } + + err = ApigeeOperationWaitTime( + config, res, "Deleting Instance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + nameParts := strings.Split(d.Get("name").(string), "/") + if len(nameParts) == 4 { + // `organizations/{{org_name}}/instances/{{name}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("name", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(nameParts) == 3 { + // `organizations/{{org_name}}/{{name}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("name", nameParts[2]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s or %s", + d.Get("name"), + "organizations/{{org_name}}/instances/{{name}}", + "organizations/{{org_name}}/{{name}}") + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{org_id}}/instances/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeInstanceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstanceLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstancePeeringCidrRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstanceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstanceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstanceDiskEncryptionKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstanceHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstancePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstanceConsumerAcceptList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstanceServiceAttachment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeInstanceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeInstanceLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeInstancePeeringCidrRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeInstanceIpRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeInstanceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeInstanceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeInstanceDiskEncryptionKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeInstanceConsumerAcceptList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance_attachment.go new file mode 100644 index 0000000000..32e32e084b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance_attachment.go @@ -0,0 +1,280 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeInstanceAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeInstanceAttachmentCreate, + Read: resourceApigeeInstanceAttachmentRead, + Delete: resourceApigeeInstanceAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeInstanceAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "environment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource ID of the environment.`, + }, + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee instance associated with the Apigee environment, +in the format 'organisations/{{org_name}}/instances/{{instance_name}}'.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the newly created attachment (output parameter).`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeInstanceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + environmentProp, err := expandApigeeInstanceAttachmentEnvironment(d.Get("environment"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("environment"); !tpgresource.IsEmptyValue(reflect.ValueOf(environmentProp)) && (ok || !reflect.DeepEqual(v, environmentProp)) { + obj["environment"] = environmentProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apigeeInstanceAttachments") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new InstanceAttachment: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating InstanceAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{instance_id}}/attachments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Creating InstanceAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create InstanceAttachment: %s", err) + } + + if err := d.Set("name", flattenApigeeInstanceAttachmentName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{instance_id}}/attachments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating InstanceAttachment %q: %#v", d.Id(), res) + + return resourceApigeeInstanceAttachmentRead(d, meta) +} + +func resourceApigeeInstanceAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeInstanceAttachment %q", d.Id())) + } + + if err := d.Set("environment", flattenApigeeInstanceAttachmentEnvironment(res["environment"], d, config)); err != nil { + return fmt.Errorf("Error reading InstanceAttachment: %s", err) + } + if err := d.Set("name", flattenApigeeInstanceAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading InstanceAttachment: %s", err) + } + + return nil +} + +func resourceApigeeInstanceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "apigeeInstanceAttachments") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting InstanceAttachment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "InstanceAttachment") + } + + err = ApigeeOperationWaitTime( + config, res, "Deleting InstanceAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting InstanceAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeInstanceAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/attachments/(?P.+)", + "(?P.+)/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{instance_id}}/attachments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeInstanceAttachmentEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeInstanceAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeInstanceAttachmentEnvironment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance_sweeper.go new file mode 100644 index 0000000000..8e0cfee74a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_instance_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApigeeInstance", testSweepApigeeInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApigeeInstance(region string) error { + resourceName := "ApigeeInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apigee.googleapis.com/v1/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apigee.googleapis.com/v1/{{org_id}}/instances/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_key_cert_file.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_key_cert_file.go new file mode 100644 index 0000000000..7117593ee8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_key_cert_file.go @@ -0,0 +1,694 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apigee + +import ( + "bytes" + "context" + "fmt" + "log" + "mime/multipart" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeKeystoresAliasesKeyCertFile() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeKeystoresAliasesKeyCertFileCreate, + Read: resourceApigeeKeystoresAliasesKeyCertFileRead, + Update: resourceApigeeKeystoresAliasesKeyCertFileUpdate, + Delete: resourceApigeeKeystoresAliasesKeyCertFileDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeKeystoresAliasesKeyCertFileImport, + }, + + CustomizeDiff: customdiff.All( + /* + If cert is changed then an update is expected, so we tell Terraform core to expect update on certs_info + */ + + customdiff.ComputedIf("certs_info", func(_ context.Context, diff *schema.ResourceDiff, v interface{}) bool { + return diff.HasChange("cert") + }), + ), + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Read: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "alias": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: `Alias Name`, + }, + "cert": { + Type: schema.TypeString, + Required: true, + Description: `Cert content`, + }, + "environment": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: `Environment associated with the alias`, + }, + "keystore": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: `Keystore Name`, + }, + "org_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: `Organization ID associated with the alias`, + }, + "certs_info": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `Chain of certificates under this alias.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert_info": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `List of all properties in the object.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "basic_constraints": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `X.509 basic constraints extension.`, + }, + "expiry_date": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `X.509 notAfter validity period in milliseconds since epoch.`, + }, + "is_valid": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Flag that specifies whether the certificate is valid. +Flag is set to Yes if the certificate is valid, No if expired, or Not yet if not yet valid.`, + }, + "issuer": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `X.509 issuer.`, + }, + "public_key": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Public key component of the X.509 subject public key info.`, + }, + "serial_number": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `X.509 serial number.`, + }, + "sig_alg_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `X.509 signatureAlgorithm.`, + }, + "subject": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `X.509 subject.`, + }, + "subject_alternative_names": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `X.509 subject alternative names (SANs) extension.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "valid_from": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `X.509 notBefore validity period in milliseconds since epoch.`, + }, + "version": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: `X.509 version.`, + }, + }, + }, + }, + }, + }, + }, + "key": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Sensitive: true, + Description: `Private Key content, omit if uploading to truststore`, + }, + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: `Password for the Private Key if it's encrypted`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Optional.Type of Alias`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeKeystoresAliasesKeyCertFileCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + buf := new(bytes.Buffer) + bw := multipart.NewWriter(buf) + if key, ok := d.GetOkExists("key"); ok { + keyFilePartWriter, _ := bw.CreateFormField("keyFile") + keyFilePartWriter.Write([]byte(key.(string))) + } + if password, ok := d.GetOkExists("password"); ok { + keyFilePartWriter, _ := bw.CreateFormField("password") + keyFilePartWriter.Write([]byte(password.(string))) + } + certFilePartWriter, _ := bw.CreateFormField("certFile") + certFilePartWriter.Write([]byte(d.Get("cert").(string))) + bw.Close() + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases?format=keycertfile&alias={{alias}}&ignoreExpiryValidation=true") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new KeystoresAliasesKeyCertFile") + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestRawBodyWithTimeout(config, "POST", billingProject, url, userAgent, buf, "multipart/form-data; boundary="+bw.Boundary(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating KeystoresAliasesKeyCertFile: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating KeystoresAliasesKeyCertFile %q: %#v", d.Id(), res) + + return resourceApigeeKeystoresAliasesKeyCertFileRead(d, meta) +} + +func resourceApigeeKeystoresAliasesKeyCertFileRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeKeystoresAliasesKeyCertFile %q", d.Id())) + } + + if err := d.Set("alias", flattenApigeeKeystoresAliasesKeyCertFileAlias(res["alias"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoresAliasesKeyCertFile: %s", err) + } + + if err := d.Set("certs_info", flattenApigeeKeystoresAliasesKeyCertFileCertsInfo(res["certsInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoresAliasesKeyCertFile: %s", err) + } + if err := d.Set("type", flattenApigeeKeystoresAliasesKeyCertFileType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoresAliasesKeyCertFile: %s", err) + } + + return nil +} + +func resourceApigeeKeystoresAliasesKeyCertFileUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}?ignoreExpiryValidation=true") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating KeystoresAliasesKeyCertFile %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + buf := new(bytes.Buffer) + bw := multipart.NewWriter(buf) + certFilePartWriter, _ := bw.CreateFormField("certFile") + certFilePartWriter.Write([]byte(d.Get("cert").(string))) + bw.Close() + + res, err := sendRequestRawBodyWithTimeout(config, "PUT", billingProject, url, userAgent, buf, "multipart/form-data; boundary="+bw.Boundary(), d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("Error updating KeystoresAliasesKeyCertFile %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating KeystoresAliasesKeyCertFile %q: %#v", d.Id(), res) + } + + return resourceApigeeKeystoresAliasesKeyCertFileRead(d, meta) +} + +func resourceApigeeKeystoresAliasesKeyCertFileDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting KeystoresAliasesKeyCertFile %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "KeystoresAliasesKeyCertFile") + } + + log.Printf("[DEBUG] Finished deleting KeystoresAliasesKeyCertFile %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeKeystoresAliasesKeyCertFileImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/environments/(?P[^/]+)/keystores/(?P[^/]+)/aliases/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeKeystoresAliasesKeyCertFileOrgId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileKeystore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileAlias(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFilePassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCert(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cert_info"] = + flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfo(original["certInfo"], d, config) + return []interface{}{transformed} +} +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "version": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoVersion(original["version"], d, config), + "subject": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSubject(original["subject"], d, config), + "issuer": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoIssuer(original["issuer"], d, config), + "expiry_date": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoExpiryDate(original["expiryDate"], d, config), + "valid_from": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoValidFrom(original["validFrom"], d, config), + "is_valid": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoIsValid(original["isValid"], d, config), + "subject_alternative_names": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSubjectAlternativeNames(original["subjectAlternativeNames"], d, config), + "sig_alg_name": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSigAlgName(original["sigAlgName"], d, config), + "public_key": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoPublicKey(original["publicKey"], d, config), + "basic_constraints": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoBasicConstraints(original["basicConstraints"], d, config), + "serial_number": flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSerialNumber(original["serialNumber"], d, config), + }) + } + return transformed +} +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSubject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoIssuer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoExpiryDate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoValidFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoIsValid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSubjectAlternativeNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSigAlgName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoBasicConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSerialNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesKeyCertFileType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeKeystoresAliasesKeyCertFileOrgId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileEnvironment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileKeystore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileAlias(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFilePassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCert(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCertInfo, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfo(original["cert_info"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertInfo); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certInfo"] = transformedCertInfo + } + + return transformed, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersion, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSubject, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSubject(original["subject"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subject"] = transformedSubject + } + + transformedIssuer, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoIssuer(original["issuer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIssuer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["issuer"] = transformedIssuer + } + + transformedExpiryDate, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoExpiryDate(original["expiry_date"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpiryDate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expiryDate"] = transformedExpiryDate + } + + transformedValidFrom, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoValidFrom(original["valid_from"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValidFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["validFrom"] = transformedValidFrom + } + + transformedIsValid, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoIsValid(original["is_valid"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsValid); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isValid"] = transformedIsValid + } + + transformedSubjectAlternativeNames, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSubjectAlternativeNames(original["subject_alternative_names"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubjectAlternativeNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subjectAlternativeNames"] = transformedSubjectAlternativeNames + } + + transformedSigAlgName, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSigAlgName(original["sig_alg_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSigAlgName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sigAlgName"] = transformedSigAlgName + } + + transformedPublicKey, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoPublicKey(original["public_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicKey"] = transformedPublicKey + } + + transformedBasicConstraints, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoBasicConstraints(original["basic_constraints"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBasicConstraints); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["basicConstraints"] = transformedBasicConstraints + } + + transformedSerialNumber, err := expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSerialNumber(original["serial_number"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSerialNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serialNumber"] = transformedSerialNumber + } + + req = append(req, transformed) + } + return req, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSubject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoIssuer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoExpiryDate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoValidFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoIsValid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSubjectAlternativeNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSigAlgName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoPublicKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoBasicConstraints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesKeyCertFileCertsInfoCertInfoSerialNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_key_cert_file_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_key_cert_file_sweeper.go new file mode 100644 index 0000000000..8f90819205 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_key_cert_file_sweeper.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apigee + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApigeeKeystoresAliasesKeyCertFile", testSweepApigeeKeystoresAliasesKeyCertFile) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApigeeKeystoresAliasesKeyCertFile(region string) error { + resourceName := "ApigeeKeystoresAliasesKeyCertFile" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apigee.googleapis.com/v1/organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["keystoresAliasesKeyCertFiles"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["alias"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["alias"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apigee.googleapis.com/v1/organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_self_signed_cert.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_self_signed_cert.go new file mode 100644 index 0000000000..c5b6cf937b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_keystores_aliases_self_signed_cert.go @@ -0,0 +1,687 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeKeystoresAliasesSelfSignedCert() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeKeystoresAliasesSelfSignedCertCreate, + Read: resourceApigeeKeystoresAliasesSelfSignedCertRead, + Delete: resourceApigeeKeystoresAliasesSelfSignedCertDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeKeystoresAliasesSelfSignedCertImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "alias": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Alias for the key/certificate pair. Values must match the regular expression [\w\s-.]{1,255}. +This must be provided for all formats except selfsignedcert; self-signed certs may specify the alias in either +this parameter or the JSON body.`, + }, + "environment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee environment name`, + }, + "keystore": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee keystore name associated in an Apigee environment`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization name associated with the Apigee environment`, + }, + "sig_alg": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Signature algorithm to generate private key. Valid values are SHA512withRSA, SHA384withRSA, and SHA256withRSA`, + }, + "subject": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Subject details.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "common_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Common name of the organization. Maximum length is 64 characters.`, + }, + "country_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Two-letter country code. Example, IN for India, US for United States of America.`, + }, + "email": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Email address. Max 255 characters.`, + }, + "locality": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `City or town name. Maximum length is 128 characters.`, + }, + "org": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Organization name. Maximum length is 64 characters.`, + }, + "org_unit": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Organization team name. Maximum length is 64 characters.`, + }, + "state": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `State or district name. Maximum length is 128 characters.`, + }, + }, + }, + }, + "cert_validity_in_days": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Validity duration of certificate, in days. Accepts positive non-zero value. Defaults to 365.`, + }, + "key_size": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Key size. Default and maximum value is 2048 bits.`, + }, + "subject_alternative_dns_names": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `List of alternative host names. Maximum length is 255 characters for each value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subject_alternative_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Subject Alternative Name`, + }, + }, + }, + }, + "certs_info": { + Type: schema.TypeList, + Computed: true, + Description: `Chain of certificates under this alias.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert_info": { + Type: schema.TypeList, + Computed: true, + Description: `List of all properties in the object.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "basic_constraints": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 basic constraints extension.`, + }, + "expiry_date": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 notAfter validity period in milliseconds since epoch.`, + }, + "is_valid": { + Type: schema.TypeString, + Computed: true, + Description: `Flag that specifies whether the certificate is valid. +Flag is set to Yes if the certificate is valid, No if expired, or Not yet if not yet valid.`, + }, + "issuer": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 issuer.`, + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + Description: `Public key component of the X.509 subject public key info.`, + }, + "serial_number": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 serial number.`, + }, + "sig_alg_name": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 signatureAlgorithm.`, + }, + "subject": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 subject.`, + }, + "subject_alternative_names": { + Type: schema.TypeList, + Computed: true, + Description: `X.509 subject alternative names (SANs) extension.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "valid_from": { + Type: schema.TypeString, + Computed: true, + Description: `X.509 notBefore validity period in milliseconds since epoch.`, + }, + "version": { + Type: schema.TypeInt, + Computed: true, + Description: `X.509 version.`, + }, + }, + }, + }, + }, + }, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Optional.Type of Alias`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeKeystoresAliasesSelfSignedCertCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + aliasProp, err := expandApigeeKeystoresAliasesSelfSignedCertAlias(d.Get("alias"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("alias"); !tpgresource.IsEmptyValue(reflect.ValueOf(aliasProp)) && (ok || !reflect.DeepEqual(v, aliasProp)) { + obj["alias"] = aliasProp + } + subjectAlternativeDnsNamesProp, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectAlternativeDnsNames(d.Get("subject_alternative_dns_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subject_alternative_dns_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(subjectAlternativeDnsNamesProp)) && (ok || !reflect.DeepEqual(v, subjectAlternativeDnsNamesProp)) { + obj["subjectAlternativeDnsNames"] = subjectAlternativeDnsNamesProp + } + keySizeProp, err := expandApigeeKeystoresAliasesSelfSignedCertKeySize(d.Get("key_size"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key_size"); !tpgresource.IsEmptyValue(reflect.ValueOf(keySizeProp)) && (ok || !reflect.DeepEqual(v, keySizeProp)) { + obj["keySize"] = keySizeProp + } + sigAlgProp, err := expandApigeeKeystoresAliasesSelfSignedCertSigAlg(d.Get("sig_alg"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sig_alg"); !tpgresource.IsEmptyValue(reflect.ValueOf(sigAlgProp)) && (ok || !reflect.DeepEqual(v, sigAlgProp)) { + obj["sigAlg"] = sigAlgProp + } + subjectProp, err := expandApigeeKeystoresAliasesSelfSignedCertSubject(d.Get("subject"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subject"); !tpgresource.IsEmptyValue(reflect.ValueOf(subjectProp)) && (ok || !reflect.DeepEqual(v, subjectProp)) { + obj["subject"] = subjectProp + } + certValidityInDaysProp, err := expandApigeeKeystoresAliasesSelfSignedCertCertValidityInDays(d.Get("cert_validity_in_days"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cert_validity_in_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(certValidityInDaysProp)) && (ok || !reflect.DeepEqual(v, certValidityInDaysProp)) { + obj["certValidityInDays"] = certValidityInDaysProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases?alias={{alias}}&format=selfsignedcert") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new KeystoresAliasesSelfSignedCert: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating KeystoresAliasesSelfSignedCert: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating KeystoresAliasesSelfSignedCert %q: %#v", d.Id(), res) + + return resourceApigeeKeystoresAliasesSelfSignedCertRead(d, meta) +} + +func resourceApigeeKeystoresAliasesSelfSignedCertRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeKeystoresAliasesSelfSignedCert %q", d.Id())) + } + + if err := d.Set("certs_info", flattenApigeeKeystoresAliasesSelfSignedCertCertsInfo(res["certsInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoresAliasesSelfSignedCert: %s", err) + } + if err := d.Set("type", flattenApigeeKeystoresAliasesSelfSignedCertType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoresAliasesSelfSignedCert: %s", err) + } + if err := d.Set("alias", flattenApigeeKeystoresAliasesSelfSignedCertAlias(res["alias"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoresAliasesSelfSignedCert: %s", err) + } + if err := d.Set("subject_alternative_dns_names", flattenApigeeKeystoresAliasesSelfSignedCertSubjectAlternativeDnsNames(res["subjectAlternativeDnsNames"], d, config)); err != nil { + return fmt.Errorf("Error reading KeystoresAliasesSelfSignedCert: %s", err) + } + + return nil +} + +func resourceApigeeKeystoresAliasesSelfSignedCertDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting KeystoresAliasesSelfSignedCert %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "KeystoresAliasesSelfSignedCert") + } + + log.Printf("[DEBUG] Finished deleting KeystoresAliasesSelfSignedCert %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeKeystoresAliasesSelfSignedCertImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/environments/(?P[^/]+)/keystores/(?P[^/]+)/aliases/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cert_info"] = + flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfo(original["certInfo"], d, config) + return []interface{}{transformed} +} +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "version": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoVersion(original["version"], d, config), + "subject": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoSubject(original["subject"], d, config), + "issuer": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoIssuer(original["issuer"], d, config), + "expiry_date": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoExpiryDate(original["expiryDate"], d, config), + "valid_from": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoValidFrom(original["validFrom"], d, config), + "is_valid": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoIsValid(original["isValid"], d, config), + "subject_alternative_names": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoSubjectAlternativeNames(original["subjectAlternativeNames"], d, config), + "sig_alg_name": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoSigAlgName(original["sigAlgName"], d, config), + "public_key": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoPublicKey(original["publicKey"], d, config), + "basic_constraints": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoBasicConstraints(original["basicConstraints"], d, config), + "serial_number": flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoSerialNumber(original["serialNumber"], d, config), + }) + } + return transformed +} +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoSubject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoIssuer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoExpiryDate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoValidFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoIsValid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoSubjectAlternativeNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoSigAlgName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoBasicConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertCertsInfoCertInfoSerialNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertAlias(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeKeystoresAliasesSelfSignedCertSubjectAlternativeDnsNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["subject_alternative_name"] = + flattenApigeeKeystoresAliasesSelfSignedCertSubjectAlternativeDnsNamesSubjectAlternativeName(original["subjectAlternativeName"], d, config) + return []interface{}{transformed} +} +func flattenApigeeKeystoresAliasesSelfSignedCertSubjectAlternativeDnsNamesSubjectAlternativeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeKeystoresAliasesSelfSignedCertAlias(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectAlternativeDnsNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSubjectAlternativeName, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectAlternativeDnsNamesSubjectAlternativeName(original["subject_alternative_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubjectAlternativeName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subjectAlternativeName"] = transformedSubjectAlternativeName + } + + return transformed, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectAlternativeDnsNamesSubjectAlternativeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertKeySize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSigAlg(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCountryCode, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectCountryCode(original["country_code"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCountryCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["countryCode"] = transformedCountryCode + } + + transformedState, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedLocality, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectLocality(original["locality"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocality); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["locality"] = transformedLocality + } + + transformedOrg, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectOrg(original["org"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrg); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["org"] = transformedOrg + } + + transformedOrgUnit, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectOrgUnit(original["org_unit"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrgUnit); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["orgUnit"] = transformedOrgUnit + } + + transformedCommonName, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectCommonName(original["common_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommonName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commonName"] = transformedCommonName + } + + transformedEmail, err := expandApigeeKeystoresAliasesSelfSignedCertSubjectEmail(original["email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["email"] = transformedEmail + } + + return transformed, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectCountryCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectLocality(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectOrg(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectOrgUnit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectCommonName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertSubjectEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeKeystoresAliasesSelfSignedCertCertValidityInDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_nat_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_nat_address.go new file mode 100644 index 0000000000..bba1126cc7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_nat_address.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeNatAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeNatAddressCreate, + Read: resourceApigeeNatAddressRead, + Delete: resourceApigeeNatAddressDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeNatAddressImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee instance associated with the Apigee environment, +in the format 'organizations/{{org_name}}/instances/{{instance_name}}'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource ID of the NAT address.`, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The allocated NAT IP address.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the NAT IP address.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeNatAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandApigeeNatAddressName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/natAddresses") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NatAddress: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NatAddress: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{instance_id}}/natAddresses/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Creating NatAddress", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create NatAddress: %s", err) + } + + if err := d.Set("name", flattenApigeeNatAddressName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{instance_id}}/natAddresses/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating NatAddress %q: %#v", d.Id(), res) + + return resourceApigeeNatAddressRead(d, meta) +} + +func resourceApigeeNatAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/natAddresses/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeNatAddress %q", d.Id())) + } + + if err := d.Set("name", flattenApigeeNatAddressName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NatAddress: %s", err) + } + if err := d.Set("ip_address", flattenApigeeNatAddressIpAddress(res["ipAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading NatAddress: %s", err) + } + if err := d.Set("state", flattenApigeeNatAddressState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading NatAddress: %s", err) + } + + return nil +} + +func resourceApigeeNatAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/natAddresses/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting NatAddress %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NatAddress") + } + + err = ApigeeOperationWaitTime( + config, res, "Deleting NatAddress", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting NatAddress %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeNatAddressImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats cannot import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/natAddresses/(?P.+)", + "(?P.+)/(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{instance_id}}/natAddresses/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeNatAddressName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeNatAddressIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeNatAddressState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeNatAddressName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_organization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_organization.go new file mode 100644 index 0000000000..b304f9fd3c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_organization.go @@ -0,0 +1,754 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceApigeeOrganization() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeOrganizationCreate, + Read: resourceApigeeOrganizationRead, + Update: resourceApigeeOrganizationUpdate, + Delete: resourceApigeeOrganizationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeOrganizationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(45 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The project ID associated with the Apigee organization.`, + }, + "analytics_region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Primary GCP region for analytics data storage. For valid values, see [Create an Apigee organization](https://cloud.google.com/apigee/docs/api-platform/get-started/create-org).`, + }, + "authorized_network": { + Type: schema.TypeString, + Optional: true, + Description: `Compute Engine network used for Service Networking to be peered with Apigee runtime instances. +See [Getting started with the Service Networking API](https://cloud.google.com/service-infrastructure/docs/service-networking/getting-started). +Valid only when 'RuntimeType' is set to CLOUD. The value can be updated only when there are no runtime instances. For example: "default".`, + }, + "billing_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Billing type of the Apigee organization. See [Apigee pricing](https://cloud.google.com/apigee/pricing).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the Apigee organization.`, + }, + "disable_vpc_peering": { + Type: schema.TypeBool, + Optional: true, + Description: `Flag that specifies whether the VPC Peering through Private Google Access should be +disabled between the consumer network and Apigee. Required if an 'authorizedNetwork' +on the consumer project is not provided, in which case the flag should be set to 'true'. +Valid only when 'RuntimeType' is set to CLOUD. The value must be set before the creation +of any Apigee runtime instance and can be updated only when there are no runtime instances.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `The display name of the Apigee organization.`, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Properties defined in the Apigee organization profile.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "property": { + Type: schema.TypeList, + Optional: true, + Description: `List of all properties in the object.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the property.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Value of the property.`, + }, + }, + }, + }, + }, + }, + }, + "retention": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DELETION_RETENTION_UNSPECIFIED", "MINIMUM", ""}), + Description: `Optional. This setting is applicable only for organizations that are soft-deleted (i.e., BillingType +is not EVALUATION). It controls how long Organization data will be retained after the initial delete +operation completes. During this period, the Organization may be restored to its last known state. +After this period, the Organization will no longer be able to be restored. Default value: "DELETION_RETENTION_UNSPECIFIED" Possible values: ["DELETION_RETENTION_UNSPECIFIED", "MINIMUM"]`, + Default: "DELETION_RETENTION_UNSPECIFIED", + }, + "runtime_database_encryption_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Cloud KMS key name used for encrypting the data that is stored and replicated across runtime instances. +Update is not allowed after the organization is created. +If not specified, a Google-Managed encryption key will be used. +Valid only when 'RuntimeType' is CLOUD. For example: 'projects/foo/locations/us/keyRings/bar/cryptoKeys/baz'.`, + }, + "runtime_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"CLOUD", "HYBRID", ""}), + Description: `Runtime type of the Apigee organization based on the Apigee subscription purchased. Default value: "CLOUD" Possible values: ["CLOUD", "HYBRID"]`, + Default: "CLOUD", + }, + "apigee_project_id": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Project ID of the Apigee Tenant Project.`, + }, + "ca_certificate": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Base64-encoded public certificate for the root CA of the Apigee organization. +Valid only when 'RuntimeType' is CLOUD. A base64-encoded string.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Name of the Apigee organization.`, + }, + "subscription_type": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Subscription type of the Apigee organization. +Valid values include trial (free, limited, and for evaluation purposes only) or paid (full subscription has been purchased).`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeOrganizationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandApigeeOrganizationDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandApigeeOrganizationDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + analyticsRegionProp, err := expandApigeeOrganizationAnalyticsRegion(d.Get("analytics_region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("analytics_region"); !tpgresource.IsEmptyValue(reflect.ValueOf(analyticsRegionProp)) && (ok || !reflect.DeepEqual(v, analyticsRegionProp)) { + obj["analyticsRegion"] = analyticsRegionProp + } + authorizedNetworkProp, err := expandApigeeOrganizationAuthorizedNetwork(d.Get("authorized_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorized_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorizedNetworkProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { + obj["authorizedNetwork"] = authorizedNetworkProp + } + disableVpcPeeringProp, err := expandApigeeOrganizationDisableVpcPeering(d.Get("disable_vpc_peering"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disable_vpc_peering"); !tpgresource.IsEmptyValue(reflect.ValueOf(disableVpcPeeringProp)) && (ok || !reflect.DeepEqual(v, disableVpcPeeringProp)) { + obj["disableVpcPeering"] = disableVpcPeeringProp + } + runtimeTypeProp, err := expandApigeeOrganizationRuntimeType(d.Get("runtime_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeTypeProp)) && (ok || !reflect.DeepEqual(v, runtimeTypeProp)) { + obj["runtimeType"] = runtimeTypeProp + } + billingTypeProp, err := expandApigeeOrganizationBillingType(d.Get("billing_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("billing_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(billingTypeProp)) && (ok || !reflect.DeepEqual(v, billingTypeProp)) { + obj["billingType"] = billingTypeProp + } + runtimeDatabaseEncryptionKeyNameProp, err := expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(d.Get("runtime_database_encryption_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime_database_encryption_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeDatabaseEncryptionKeyNameProp)) && (ok || !reflect.DeepEqual(v, runtimeDatabaseEncryptionKeyNameProp)) { + obj["runtimeDatabaseEncryptionKeyName"] = runtimeDatabaseEncryptionKeyNameProp + } + propertiesProp, err := expandApigeeOrganizationProperties(d.Get("properties"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("properties"); !tpgresource.IsEmptyValue(reflect.ValueOf(propertiesProp)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { + obj["properties"] = propertiesProp + } + + obj, err = resourceApigeeOrganizationEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations?parent=projects/{{project_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Organization: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Organization: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ApigeeOperationWaitTimeWithResponse( + config, res, &opRes, "Creating Organization", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Organization: %s", err) + } + + if err := d.Set("name", flattenApigeeOrganizationName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "organizations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Organization %q: %#v", d.Id(), res) + + return resourceApigeeOrganizationRead(d, meta) +} + +func resourceApigeeOrganizationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeOrganization %q", d.Id())) + } + + if err := d.Set("name", flattenApigeeOrganizationName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("display_name", flattenApigeeOrganizationDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("description", flattenApigeeOrganizationDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("analytics_region", flattenApigeeOrganizationAnalyticsRegion(res["analyticsRegion"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("authorized_network", flattenApigeeOrganizationAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("disable_vpc_peering", flattenApigeeOrganizationDisableVpcPeering(res["disableVpcPeering"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("runtime_type", flattenApigeeOrganizationRuntimeType(res["runtimeType"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("subscription_type", flattenApigeeOrganizationSubscriptionType(res["subscriptionType"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("billing_type", flattenApigeeOrganizationBillingType(res["billingType"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("ca_certificate", flattenApigeeOrganizationCaCertificate(res["caCertificate"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("runtime_database_encryption_key_name", flattenApigeeOrganizationRuntimeDatabaseEncryptionKeyName(res["runtimeDatabaseEncryptionKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("properties", flattenApigeeOrganizationProperties(res["properties"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + if err := d.Set("apigee_project_id", flattenApigeeOrganizationApigeeProjectId(res["apigeeProjectId"], d, config)); err != nil { + return fmt.Errorf("Error reading Organization: %s", err) + } + + return nil +} + +func resourceApigeeOrganizationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandApigeeOrganizationDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandApigeeOrganizationDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + analyticsRegionProp, err := expandApigeeOrganizationAnalyticsRegion(d.Get("analytics_region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("analytics_region"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, analyticsRegionProp)) { + obj["analyticsRegion"] = analyticsRegionProp + } + authorizedNetworkProp, err := expandApigeeOrganizationAuthorizedNetwork(d.Get("authorized_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorized_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { + obj["authorizedNetwork"] = authorizedNetworkProp + } + disableVpcPeeringProp, err := expandApigeeOrganizationDisableVpcPeering(d.Get("disable_vpc_peering"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disable_vpc_peering"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disableVpcPeeringProp)) { + obj["disableVpcPeering"] = disableVpcPeeringProp + } + runtimeTypeProp, err := expandApigeeOrganizationRuntimeType(d.Get("runtime_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeTypeProp)) { + obj["runtimeType"] = runtimeTypeProp + } + billingTypeProp, err := expandApigeeOrganizationBillingType(d.Get("billing_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("billing_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, billingTypeProp)) { + obj["billingType"] = billingTypeProp + } + runtimeDatabaseEncryptionKeyNameProp, err := expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(d.Get("runtime_database_encryption_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime_database_encryption_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeDatabaseEncryptionKeyNameProp)) { + obj["runtimeDatabaseEncryptionKeyName"] = runtimeDatabaseEncryptionKeyNameProp + } + propertiesProp, err := expandApigeeOrganizationProperties(d.Get("properties"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("properties"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { + obj["properties"] = propertiesProp + } + + obj, err = resourceApigeeOrganizationEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Organization %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Organization %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Organization %q: %#v", d.Id(), res) + } + + err = ApigeeOperationWaitTime( + config, res, "Updating Organization", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceApigeeOrganizationRead(d, meta) +} + +func resourceApigeeOrganizationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}?retention={{retention}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Organization %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Organization") + } + + log.Printf("[DEBUG] Finished deleting Organization %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeOrganizationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + parts := strings.Split(d.Get("name").(string), "/") + + var projectId string + switch len(parts) { + case 1: + projectId = parts[0] + case 2: + projectId = parts[1] + default: + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s or %s", + d.Get("name"), + "{{name}}", + "organizations/{{name}}", + ) + } + + if err := d.Set("name", projectId); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) + } + + if err := d.Set("project_id", projectId); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeOrganizationName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationAnalyticsRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationAuthorizedNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationDisableVpcPeering(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationRuntimeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationSubscriptionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationBillingType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationCaCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationRuntimeDatabaseEncryptionKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["property"] = + flattenApigeeOrganizationPropertiesProperty(original["property"], d, config) + return []interface{}{transformed} +} +func flattenApigeeOrganizationPropertiesProperty(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenApigeeOrganizationPropertiesPropertyName(original["name"], d, config), + "value": flattenApigeeOrganizationPropertiesPropertyValue(original["value"], d, config), + }) + } + return transformed +} +func flattenApigeeOrganizationPropertiesPropertyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationPropertiesPropertyValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeOrganizationApigeeProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeOrganizationDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationAnalyticsRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationAuthorizedNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationDisableVpcPeering(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationRuntimeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationBillingType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProperty, err := expandApigeeOrganizationPropertiesProperty(original["property"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProperty); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["property"] = transformedProperty + } + + return transformed, nil +} + +func expandApigeeOrganizationPropertiesProperty(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandApigeeOrganizationPropertiesPropertyName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandApigeeOrganizationPropertiesPropertyValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandApigeeOrganizationPropertiesPropertyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeOrganizationPropertiesPropertyValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceApigeeOrganizationEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + obj["name"] = d.Get("project_id").(string) + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_organization_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_organization_sweeper.go new file mode 100644 index 0000000000..2275dee643 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_organization_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApigeeOrganization", testSweepApigeeOrganization) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApigeeOrganization(region string) error { + resourceName := "ApigeeOrganization" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apigee.googleapis.com/v1/organizations", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["organizations"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apigee.googleapis.com/v1/organizations/{{name}}?retention={{retention}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sharedflow.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sharedflow.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow.go index e0c136552d..9022a0ea0f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apigee_sharedflow.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow.go @@ -1,3 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 // ---------------------------------------------------------------------------- // // This file is partially automatically generated by Magic Modules and with manual @@ -5,7 +7,7 @@ // // ---------------------------------------------------------------------------- -package google +package apigee import ( "context" @@ -20,6 +22,8 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/googleapi" ) @@ -60,11 +64,10 @@ func ResourceApigeeSharedFlow() *schema.Resource { Description: `The ID of the shared flow.`, }, "org_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee instance, -in the format 'organizations/{{org_name}}'.`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization name associated with the Apigee instance.`, }, "latest_revision_id": { Type: schema.TypeString, @@ -106,7 +109,7 @@ in the format 'organizations/{{org_name}}'.`, "config_bundle": { Type: schema.TypeString, Required: true, - Description: `A path to the config bundle zip you want to upload. Must be defined if content is not.`, + Description: `Path to the config zip bundle`, }, "md5hash": { Type: schema.TypeString, @@ -121,7 +124,7 @@ in the format 'organizations/{{org_name}}'.`, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { localMd5Hash := "" if config_bundle, ok := d.GetOkExists("config_bundle"); ok { - localMd5Hash = getFileMd5Hash(config_bundle.(string)) + localMd5Hash = tpgresource.GetFileMd5Hash(config_bundle.(string)) } if localMd5Hash == "" { return false @@ -150,8 +153,8 @@ func resourceApigeeSharedFlowCreate(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] resourceApigeeSharedFlowCreate, org_id=, %s", d.Get("org_id").(string)) log.Printf("[DEBUG] resourceApigeeSharedFlowCreate, config_bundle=, %s", d.Get("config_bundle").(string)) - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -164,19 +167,19 @@ func resourceApigeeSharedFlowCreate(d *schema.ResourceData, meta interface{}) er if err != nil { return err } - localMd5Hash = getFileMd5Hash(configBundlePath.(string)) + localMd5Hash = tpgresource.GetFileMd5Hash(configBundlePath.(string)) } else { return fmt.Errorf("Error, \"config_bundle\" must be specified") } - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/sharedflows?name={{name}}&action=import") + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/sharedflows?name={{name}}&action=import") if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } @@ -189,7 +192,7 @@ func resourceApigeeSharedFlowCreate(d *schema.ResourceData, meta interface{}) er } // Store the ID now - id, err := replaceVars(d, config, "organizations/{{org_id}}/sharedflows/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/sharedflows/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -212,13 +215,13 @@ func resourceApigeeSharedFlowUpdate(d *schema.ResourceData, meta interface{}) er } func resourceApigeeSharedFlowRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/sharedflows/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/sharedflows/{{name}}") if err != nil { return err } @@ -227,14 +230,20 @@ func resourceApigeeSharedFlowRead(d *schema.ResourceData, meta interface{}) erro billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } log.Printf("[DEBUG] resourceApigeeSharedFlowRead sendRequest") log.Printf("[DEBUG] resourceApigeeSharedFlowRead, url=, %s", url) - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ApigeeSharedFlow %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeSharedFlow %q", d.Id())) } log.Printf("[DEBUG] resourceApigeeSharedFlowRead sendRequest completed") previousLastModifiedAt := getApigeeSharedFlowLastModifiedAt(d) @@ -280,15 +289,15 @@ func getApigeeSharedFlowLastModifiedAt(d *schema.ResourceData) string { func resourceApigeeSharedFlowDelete(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] resourceApigeeSharedFlowDelete") - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/sharedflows/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/sharedflows/{{name}}") if err != nil { return err } @@ -297,13 +306,21 @@ func resourceApigeeSharedFlowDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Deleting SharedFlow %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "SharedFlow") + return transport_tpg.HandleNotFoundError(err, d, "SharedFlow") } log.Printf("[DEBUG] Finished deleting SharedFlow %q: %#v", d.Id(), res) @@ -311,8 +328,8 @@ func resourceApigeeSharedFlowDelete(d *schema.ResourceData, meta interface{}) er } func resourceApigeeSharedFlowImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "organizations/(?P[^/]+)/sharedflows/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", }, d, config); err != nil { @@ -320,7 +337,7 @@ func resourceApigeeSharedFlowImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "organizations/{{org_id}}/sharedflows/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/sharedflows/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) @@ -331,7 +348,7 @@ func resourceApigeeSharedFlowImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenApigeeSharedFlowMetaData(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenApigeeSharedFlowMetaData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -348,36 +365,36 @@ func flattenApigeeSharedFlowMetaData(v interface{}, d *schema.ResourceData, conf flattenApigeeSharedFlowMetaDataSubType(original["subType"], d, config) return []interface{}{transformed} } -func flattenApigeeSharedFlowMetaDataCreatedAt(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenApigeeSharedFlowMetaDataCreatedAt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenApigeeSharedFlowMetaDataLastModifiedAt(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenApigeeSharedFlowMetaDataLastModifiedAt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenApigeeSharedFlowMetaDataSubType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenApigeeSharedFlowMetaDataSubType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenApigeeSharedFlowName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenApigeeSharedFlowName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenApigeeSharedFlowRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenApigeeSharedFlowRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenApigeeSharedFlowLatestRevisionId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenApigeeSharedFlowLatestRevisionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandApigeeSharedFlowName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandApigeeSharedFlowName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } // sendRequestRawBodyWithTimeout is derived from sendRequestWithTimeout with direct pass through of request body -func sendRequestRawBodyWithTimeout(config *Config, method, project, rawurl, userAgent string, body io.Reader, contentType string, timeout time.Duration, errorRetryPredicates ...RetryErrorPredicateFunc) (map[string]interface{}, error) { +func sendRequestRawBodyWithTimeout(config *transport_tpg.Config, method, project, rawurl, userAgent string, body io.Reader, contentType string, timeout time.Duration, errorRetryPredicates ...transport_tpg.RetryErrorPredicateFunc) (map[string]interface{}, error) { log.Printf("[DEBUG] sendRequestRawBodyWithTimeout start") reqHeaders := make(http.Header) reqHeaders.Set("User-Agent", userAgent) @@ -397,8 +414,8 @@ func sendRequestRawBodyWithTimeout(config *Config, method, project, rawurl, user log.Printf("[DEBUG] sendRequestRawBodyWithTimeout sending request") - err := RetryTimeDuration( - func() error { + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { req, err := http.NewRequest(method, rawurl, body) if err != nil { return err @@ -417,9 +434,9 @@ func sendRequestRawBodyWithTimeout(config *Config, method, project, rawurl, user return nil }, - timeout, - errorRetryPredicates..., - ) + Timeout: timeout, + ErrorRetryPredicates: errorRetryPredicates, + }) if err != nil { return nil, err } @@ -449,7 +466,7 @@ func apigeeSharedflowDetectBundleUpdate(_ context.Context, diff *schema.Resource oldBundleHash := tmp.(string) currentBundleHash := "" if config_bundle, ok := diff.GetOkExists("config_bundle"); ok { - currentBundleHash = getFileMd5Hash(config_bundle.(string)) + currentBundleHash = tpgresource.GetFileMd5Hash(config_bundle.(string)) } log.Printf("[DEBUG] apigeeSharedflowDetectUpdate detect_md5hash: %s -> %s", oldBundleHash, currentBundleHash) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow_deployment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow_deployment.go new file mode 100644 index 0000000000..95f1c16687 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow_deployment.go @@ -0,0 +1,267 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apigee + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeSharedFlowDeployment() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeSharedflowDeploymentCreate, + Read: resourceApigeeSharedflowDeploymentRead, + Update: resourceApigeeSharedflowDeploymentUpdate, + Delete: resourceApigeeSharedflowDeploymentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeSharedflowDeploymentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "environment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource ID of the environment.`, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Apigee Organization associated with the Apigee instance`, + }, + "revision": { + Type: schema.TypeString, + Required: true, + Description: `Revision of the Sharedflow to be deployed.`, + }, + "service_account": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: `The service account represents the identity of the deployed proxy, and determines what permissions it has. The format must be {ACCOUNT_ID}@{PROJECT}.iam.gserviceaccount.com.`, + }, + "sharedflow_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Id of the Sharedflow to be deployed.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeSharedflowDeploymentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments?override=true&serviceAccount={{service_account}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SharedflowDeployment at %s", url) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating SharedflowDeployment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating SharedflowDeployment %q: %#v", d.Id(), res) + + return resourceApigeeSharedflowDeploymentRead(d, meta) +} + +func resourceApigeeSharedflowDeploymentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + log.Printf("[DEBUG] Reading SharedflowDeployment at %s", url) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeSharedflowDeployment %q", d.Id())) + } + log.Printf("[DEBUG] ApigeeSharedflowDeployment deployStartTime %s", res["deployStartTime"]) + + return nil +} + +func resourceApigeeSharedflowDeploymentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments?override=true&serviceAccount={{service_account}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating new SharedflowDeployment at %s", url) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating SharedflowDeployment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished updating SharedflowDeployment %q: %#v", d.Id(), res) + + return resourceApigeeSharedflowDeploymentRead(d, meta) +} + +func resourceApigeeSharedflowDeploymentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting SharedflowDeployment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "SharedflowDeployment") + } + + log.Printf("[DEBUG] Finished deleting SharedflowDeployment %q: %#v", d.Id(), res) + return nil +} + +func resourceApigeeSharedflowDeploymentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/environments/(?P[^/]+)/sharedflows/(?P[^/]+)/revisions/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{org_id}}/environments/{{environment}}/sharedflows/{{sharedflow_id}}/revisions/{{revision}}/deployments") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeSharedflowDeploymentOrgId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeSharedflowDeploymentEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeSharedflowDeploymentSharedflowId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeSharedflowDeploymentRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeSharedflowDeploymentServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow_sweeper.go new file mode 100644 index 0000000000..a131623225 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sharedflow_sweeper.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package apigee + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApigeeSharedFlow", testSweepApigeeSharedFlow) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepApigeeSharedFlow(region string) error { + resourceName := "ApigeeSharedFlow" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://apigee.googleapis.com/v1/organizations/{{org_id}}/sharedflows/{{name}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["sharedFlows"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://apigee.googleapis.com/v1/organizations/{{org_id}}/sharedflows/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sync_authorization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sync_authorization.go new file mode 100644 index 0000000000..bc988ecbd1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apigee/resource_apigee_sync_authorization.go @@ -0,0 +1,276 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package apigee + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceApigeeSyncAuthorization() *schema.Resource { + return &schema.Resource{ + Create: resourceApigeeSyncAuthorizationCreate, + Read: resourceApigeeSyncAuthorizationRead, + Update: resourceApigeeSyncAuthorizationUpdate, + Delete: resourceApigeeSyncAuthorizationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceApigeeSyncAuthorizationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "identities": { + Type: schema.TypeList, + Required: true, + Description: `Array of service accounts to grant access to control plane resources, each specified using the following format: 'serviceAccount:service-account-name'. + +The 'service-account-name' is formatted like an email address. For example: my-synchronizer-manager-serviceAccount@my_project_id.iam.gserviceaccount.com + +You might specify multiple service accounts, for example, if you have multiple environments and wish to assign a unique service account to each one. + +The service accounts must have **Apigee Synchronizer Manager** role. See also [Create service accounts](https://cloud.google.com/apigee/docs/hybrid/v1.8/sa-about#create-the-service-accounts).`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the Apigee organization.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `Entity tag (ETag) used for optimistic concurrency control as a way to help prevent simultaneous updates from overwriting each other. +Used internally during updates.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceApigeeSyncAuthorizationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + identitiesProp, err := expandApigeeSyncAuthorizationIdentities(d.Get("identities"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("identities"); ok || !reflect.DeepEqual(v, identitiesProp) { + obj["identities"] = identitiesProp + } + etagProp, err := expandApigeeSyncAuthorizationEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}:setSyncAuthorization") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SyncAuthorization: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating SyncAuthorization: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{name}}/syncAuthorization") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating SyncAuthorization %q: %#v", d.Id(), res) + + return resourceApigeeSyncAuthorizationRead(d, meta) +} + +func resourceApigeeSyncAuthorizationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}:getSyncAuthorization") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ApigeeSyncAuthorization %q", d.Id())) + } + + if err := d.Set("identities", flattenApigeeSyncAuthorizationIdentities(res["identities"], d, config)); err != nil { + return fmt.Errorf("Error reading SyncAuthorization: %s", err) + } + if err := d.Set("etag", flattenApigeeSyncAuthorizationEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading SyncAuthorization: %s", err) + } + + return nil +} + +func resourceApigeeSyncAuthorizationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + identitiesProp, err := expandApigeeSyncAuthorizationIdentities(d.Get("identities"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("identities"); ok || !reflect.DeepEqual(v, identitiesProp) { + obj["identities"] = identitiesProp + } + etagProp, err := expandApigeeSyncAuthorizationEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}:setSyncAuthorization") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating SyncAuthorization %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating SyncAuthorization %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating SyncAuthorization %q: %#v", d.Id(), res) + } + + return resourceApigeeSyncAuthorizationRead(d, meta) +} + +func resourceApigeeSyncAuthorizationDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] Apigee SyncAuthorization resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceApigeeSyncAuthorizationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "organizations/(?P[^/]+)/syncAuthorization", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{name}}/syncAuthorization") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenApigeeSyncAuthorizationIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenApigeeSyncAuthorizationEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandApigeeSyncAuthorizationIdentities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApigeeSyncAuthorizationEtag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apikeys_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apikeys/resource_apikeys_key.go similarity index 88% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apikeys_key.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apikeys/resource_apikeys_key.go index 1dc0f671fa..7c02665746 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_apikeys_key.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apikeys/resource_apikeys_key.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package apikeys import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" apikeys "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apikeys" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceApikeysKey() *schema.Resource { @@ -63,7 +70,7 @@ func ResourceApikeysKey() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -226,8 +233,8 @@ func ApikeysKeyRestrictionsServerKeyRestrictionsSchema() *schema.Resource { } func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -244,18 +251,18 @@ func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -277,8 +284,8 @@ func resourceApikeysKeyCreate(d *schema.ResourceData, meta interface{}) error { } func resourceApikeysKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -290,17 +297,17 @@ func resourceApikeysKeyRead(d *schema.ResourceData, meta interface{}) error { Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -309,7 +316,7 @@ func resourceApikeysKeyRead(d *schema.ResourceData, meta interface{}) error { res, err := client.GetKey(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ApikeysKey %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("name", res.Name); err != nil { @@ -334,8 +341,8 @@ func resourceApikeysKeyRead(d *schema.ResourceData, meta interface{}) error { return nil } func resourceApikeysKeyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -346,19 +353,19 @@ func resourceApikeysKeyUpdate(d *schema.ResourceData, meta interface{}) error { Project: dcl.String(project), Restrictions: expandApikeysKeyRestrictions(d.Get("restrictions")), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -380,8 +387,8 @@ func resourceApikeysKeyUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceApikeysKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -394,17 +401,17 @@ func resourceApikeysKeyDelete(d *schema.ResourceData, meta interface{}) error { } log.Printf("[DEBUG] Deleting Key %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLApikeysClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -419,9 +426,9 @@ func resourceApikeysKeyDelete(d *schema.ResourceData, meta interface{}) error { } func resourceApikeysKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/global/keys/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -430,7 +437,7 @@ func resourceApikeysKeyImport(d *schema.ResourceData, meta interface{}) ([]*sche } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/global/keys/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/global/keys/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -582,7 +589,7 @@ func expandApikeysKeyRestrictionsApiTargets(o interface{}) *apikeys.KeyRestricti obj := o.(map[string]interface{}) return &apikeys.KeyRestrictionsApiTargets{ Service: dcl.String(obj["service"].(string)), - Methods: expandStringArray(obj["methods"]), + Methods: tpgdclresource.ExpandStringArray(obj["methods"]), } } @@ -623,7 +630,7 @@ func expandApikeysKeyRestrictionsBrowserKeyRestrictions(o interface{}) *apikeys. } obj := objArr[0].(map[string]interface{}) return &apikeys.KeyRestrictionsBrowserKeyRestrictions{ - AllowedReferrers: expandStringArray(obj["allowed_referrers"]), + AllowedReferrers: tpgdclresource.ExpandStringArray(obj["allowed_referrers"]), } } @@ -649,7 +656,7 @@ func expandApikeysKeyRestrictionsIosKeyRestrictions(o interface{}) *apikeys.KeyR } obj := objArr[0].(map[string]interface{}) return &apikeys.KeyRestrictionsIosKeyRestrictions{ - AllowedBundleIds: expandStringArray(obj["allowed_bundle_ids"]), + AllowedBundleIds: tpgdclresource.ExpandStringArray(obj["allowed_bundle_ids"]), } } @@ -675,7 +682,7 @@ func expandApikeysKeyRestrictionsServerKeyRestrictions(o interface{}) *apikeys.K } obj := objArr[0].(map[string]interface{}) return &apikeys.KeyRestrictionsServerKeyRestrictions{ - AllowedIps: expandStringArray(obj["allowed_ips"]), + AllowedIps: tpgdclresource.ExpandStringArray(obj["allowed_ips"]), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apikeys/resource_apikeys_key_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apikeys/resource_apikeys_key_sweeper.go new file mode 100644 index 0000000000..f22ee9ecc1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/apikeys/resource_apikeys_key_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package apikeys + +import ( + "context" + "log" + "testing" + + apikeys "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apikeys" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ApikeysKey", testSweepApikeysKey) +} + +func testSweepApikeysKey(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ApikeysKey") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLApikeysClient(config, config.UserAgent, "", 0) + err = client.DeleteAllKey(context.Background(), d["project"], isDeletableApikeysKey) + if err != nil { + return err + } + return nil +} + +func isDeletableApikeysKey(r *apikeys.Key) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/appengine_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/appengine_operation.go new file mode 100644 index 0000000000..ae86a96dbc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/appengine_operation.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package appengine + +import ( + "encoding/json" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/appengine/v1" +) + +var ( + appEngineOperationIdRegexp = regexp.MustCompile(fmt.Sprintf("apps/%s/operations/(.*)", verify.ProjectRegex)) +) + +type AppEngineOperationWaiter struct { + Service *appengine.APIService + AppId string + tpgresource.CommonOperationWaiter +} + +func (w *AppEngineOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + matches := appEngineOperationIdRegexp.FindStringSubmatch(w.Op.Name) + if len(matches) != 2 { + return nil, fmt.Errorf("Expected %d results of parsing operation name, got %d from %s", 2, len(matches), w.Op.Name) + } + return w.Service.Apps.Operations.Get(w.AppId, matches[1]).Do() +} + +func AppEngineOperationWaitTimeWithResponse(config *transport_tpg.Config, res interface{}, response *map[string]interface{}, appId, activity, userAgent string, timeout time.Duration) error { + op := &appengine.Operation{} + err := tpgresource.Convert(res, op) + if err != nil { + return err + } + + w := &AppEngineOperationWaiter{ + Service: config.NewAppEngineClient(userAgent), + AppId: appId, + } + + if err := w.SetOp(op); err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func AppEngineOperationWaitTime(config *transport_tpg.Config, res interface{}, appId, activity, userAgent string, timeout time.Duration) error { + op := &appengine.Operation{} + err := tpgresource.Convert(res, op) + if err != nil { + return err + } + + w := &AppEngineOperationWaiter{ + Service: config.NewAppEngineClient(userAgent), + AppId: appId, + } + + if err := w.SetOp(op); err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_app_engine_default_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/data_source_google_app_engine_default_service_account.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_app_engine_default_service_account.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/data_source_google_app_engine_default_service_account.go index 45ddec68e7..33695e4d25 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_app_engine_default_service_account.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/data_source_google_app_engine_default_service_account.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package appengine import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleAppEngineDefaultServiceAccount() *schema.Resource { @@ -40,27 +44,27 @@ func DataSourceGoogleAppEngineDefaultServiceAccount() *schema.Resource { } func dataSourceGoogleAppEngineDefaultServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } serviceAccountEmail := fmt.Sprintf("%s@appspot.gserviceaccount.com", project) - serviceAccountName, err := serviceAccountFQN(serviceAccountEmail, d, config) + serviceAccountName, err := tpgresource.ServiceAccountFQN(serviceAccountEmail, d, config) if err != nil { return err } sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(serviceAccountName).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", serviceAccountName)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service Account %q", serviceAccountName)) } d.SetId(sa.Name) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_app_version_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_app_version_sweeper.go new file mode 100644 index 0000000000..d835951cd2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_app_version_sweeper.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package appengine + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// This will sweep both Standard and Flexible App Engine App Versions +func init() { + sweeper.AddTestSweepers("AppEngineAppVersion", testSweepAppEngineAppVersion) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAppEngineAppVersion(region string) error { + resourceName := "AppEngineAppVersion" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + servicesUrl := "https://appengine.googleapis.com/v1/apps/" + config.Project + "/services" + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: servicesUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", servicesUrl, err) + return nil + } + + resourceList, ok := res["services"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["id"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource id was nil", resourceName) + return nil + } + + id := obj["id"].(string) + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(id) { + nonPrefixCount++ + continue + } + + deleteUrl := servicesUrl + "/" + id + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, id) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf_test prefix remain.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_application.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_application.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_application.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_application.go index 7d7f181149..7b69cd52d0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_application.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_application.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package appengine import ( "context" @@ -9,6 +11,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" appengine "google.golang.org/api/appengine/v1" ) @@ -38,7 +43,7 @@ func ResourceAppEngineApplication() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ValidateFunc: validateProjectID(), + ValidateFunc: verify.ValidateProjectID(), Description: `The project ID to create the application under.`, }, "auth_domain": { @@ -199,13 +204,13 @@ func appEngineApplicationLocationIDCustomizeDiff(_ context.Context, d *schema.Re } func resourceAppEngineApplicationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -214,12 +219,12 @@ func resourceAppEngineApplicationCreate(d *schema.ResourceData, meta interface{} return err } - lockName, err := replaceVars(d, config, "apps/{{project}}") + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) log.Printf("[DEBUG] Creating App Engine App") op, err := config.NewAppEngineClient(userAgent).Apps.Create(app).Do() @@ -241,8 +246,8 @@ func resourceAppEngineApplicationCreate(d *schema.ResourceData, meta interface{} } func resourceAppEngineApplicationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -250,7 +255,7 @@ func resourceAppEngineApplicationRead(d *schema.ResourceData, meta interface{}) app, err := config.NewAppEngineClient(userAgent).Apps.Get(pid).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("App Engine Application %q", pid)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("App Engine Application %q", pid)) } if err := d.Set("auth_domain", app.AuthDomain); err != nil { return fmt.Errorf("Error setting auth_domain: %s", err) @@ -313,8 +318,8 @@ func resourceAppEngineApplicationRead(d *schema.ResourceData, meta interface{}) } func resourceAppEngineApplicationUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -324,12 +329,12 @@ func resourceAppEngineApplicationUpdate(d *schema.ResourceData, meta interface{} return err } - lockName, err := replaceVars(d, config, "apps/{{project}}") + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) log.Printf("[DEBUG] Updating App Engine App") op, err := config.NewAppEngineClient(userAgent).Apps.Patch(pid, app).UpdateMask("authDomain,databaseType,servingStatus,featureSettings.splitHealthChecks,iap").Do() diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_application_url_dispatch_rules.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_application_url_dispatch_rules.go new file mode 100644 index 0000000000..6001b82685 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_application_url_dispatch_rules.go @@ -0,0 +1,441 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package appengine + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceAppEngineApplicationUrlDispatchRules() *schema.Resource { + return &schema.Resource{ + Create: resourceAppEngineApplicationUrlDispatchRulesCreate, + Read: resourceAppEngineApplicationUrlDispatchRulesRead, + Update: resourceAppEngineApplicationUrlDispatchRulesUpdate, + Delete: resourceAppEngineApplicationUrlDispatchRulesDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAppEngineApplicationUrlDispatchRulesImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dispatch_rules": { + Type: schema.TypeList, + Required: true, + Description: `Rules to match an HTTP request and dispatch that request to a service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. +The sum of the lengths of the domain and path may not exceed 100 characters.`, + }, + "service": { + Type: schema.TypeString, + Required: true, + Description: `Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. +The sum of the lengths of the domain and path may not exceed 100 characters.`, + }, + "domain": { + Type: schema.TypeString, + Optional: true, + Description: `Domain name to match against. The wildcard "*" is supported if specified before a period: "*.". +Defaults to matching all domains: "*".`, + Default: "*", + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAppEngineApplicationUrlDispatchRulesCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + dispatchRulesProp, err := expandAppEngineApplicationUrlDispatchRulesDispatchRules(d.Get("dispatch_rules"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dispatch_rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(dispatchRulesProp)) && (ok || !reflect.DeepEqual(v, dispatchRulesProp)) { + obj["dispatchRules"] = dispatchRulesProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ApplicationUrlDispatchRules: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + if err != nil { + return fmt.Errorf("Error creating ApplicationUrlDispatchRules: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = AppEngineOperationWaitTime( + config, res, project, "Creating ApplicationUrlDispatchRules", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ApplicationUrlDispatchRules: %s", err) + } + + log.Printf("[DEBUG] Finished creating ApplicationUrlDispatchRules %q: %#v", d.Id(), res) + + return resourceAppEngineApplicationUrlDispatchRulesRead(d, meta) +} + +func resourceAppEngineApplicationUrlDispatchRulesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AppEngineApplicationUrlDispatchRules %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ApplicationUrlDispatchRules: %s", err) + } + + if err := d.Set("dispatch_rules", flattenAppEngineApplicationUrlDispatchRulesDispatchRules(res["dispatchRules"], d, config)); err != nil { + return fmt.Errorf("Error reading ApplicationUrlDispatchRules: %s", err) + } + + return nil +} + +func resourceAppEngineApplicationUrlDispatchRulesUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + dispatchRulesProp, err := expandAppEngineApplicationUrlDispatchRulesDispatchRules(d.Get("dispatch_rules"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dispatch_rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dispatchRulesProp)) { + obj["dispatchRules"] = dispatchRulesProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ApplicationUrlDispatchRules %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + + if err != nil { + return fmt.Errorf("Error updating ApplicationUrlDispatchRules %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ApplicationUrlDispatchRules %q: %#v", d.Id(), res) + } + + err = AppEngineOperationWaitTime( + config, res, project, "Updating ApplicationUrlDispatchRules", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAppEngineApplicationUrlDispatchRulesRead(d, meta) +} + +func resourceAppEngineApplicationUrlDispatchRulesDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ApplicationUrlDispatchRules %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ApplicationUrlDispatchRules") + } + + err = AppEngineOperationWaitTime( + config, res, project, "Deleting ApplicationUrlDispatchRules", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ApplicationUrlDispatchRules %q: %#v", d.Id(), res) + return nil +} + +func resourceAppEngineApplicationUrlDispatchRulesImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAppEngineApplicationUrlDispatchRulesDispatchRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "domain": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(original["domain"], d, config), + "path": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesPath(original["path"], d, config), + "service": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesService(original["service"], d, config), + }) + } + return transformed +} +func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAppEngineApplicationUrlDispatchRulesDispatchRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDomain, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(original["domain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["domain"] = transformedDomain + } + + transformedPath, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedService, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineApplicationUrlDispatchRulesDispatchRulesPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineApplicationUrlDispatchRulesDispatchRulesService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_domain_mapping.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_domain_mapping.go new file mode 100644 index 0000000000..1321e14990 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_domain_mapping.go @@ -0,0 +1,589 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package appengine + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func sslSettingsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // If certificate id is empty, and ssl management type is `MANUAL`, then + // ssl settings will not be configured, and ssl_settings block is not returned + + if k == "ssl_settings.#" && + old == "0" && new == "1" && + d.Get("ssl_settings.0.certificate_id") == "" && + d.Get("ssl_settings.0.ssl_management_type") == "MANUAL" { + return true + } + + return false +} + +func ResourceAppEngineDomainMapping() *schema.Resource { + return &schema.Resource{ + Create: resourceAppEngineDomainMappingCreate, + Read: resourceAppEngineDomainMappingRead, + Update: resourceAppEngineDomainMappingUpdate, + Delete: resourceAppEngineDomainMappingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAppEngineDomainMappingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "domain_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Relative name of the domain serving the application. Example: example.com.`, + }, + "override_strategy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"STRICT", "OVERRIDE", ""}), + Description: `Whether the domain creation should override any existing mappings for this domain. +By default, overrides are rejected. Default value: "STRICT" Possible values: ["STRICT", "OVERRIDE"]`, + Default: "STRICT", + }, + "ssl_settings": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: sslSettingsDiffSuppress, + Description: `SSL configuration for this domain. If unconfigured, this domain will not serve with SSL.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssl_management_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"AUTOMATIC", "MANUAL"}), + Description: `SSL management type for this domain. If 'AUTOMATIC', a managed certificate is automatically provisioned. +If 'MANUAL', 'certificateId' must be manually specified in order to configure SSL for this domain. Possible values: ["AUTOMATIC", "MANUAL"]`, + }, + "certificate_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `ID of the AuthorizedCertificate resource configuring SSL for the application. Clearing this field will +remove SSL support. +By default, a managed certificate is automatically created for every domain mapping. To omit SSL support +or to configure SSL manually, specify 'SslManagementType.MANUAL' on a 'CREATE' or 'UPDATE' request. You must be +authorized to administer the 'AuthorizedCertificate' resource to manually map it to a DomainMapping resource. +Example: 12345.`, + }, + "pending_managed_certificate_id": { + Type: schema.TypeString, + Computed: true, + Description: `ID of the managed 'AuthorizedCertificate' resource currently being provisioned, if applicable. Until the new +managed certificate has been successfully provisioned, the previous SSL state will be preserved. Once the +provisioning process completes, the 'certificateId' field will reflect the new managed certificate and this +field will be left empty. To remove SSL support while there is still a pending managed certificate, clear the +'certificateId' field with an update request.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Full path to the DomainMapping resource in the API. Example: apps/myapp/domainMapping/example.com.`, + }, + "resource_records": { + Type: schema.TypeList, + Computed: true, + Description: `The resource records required to configure this domain mapping. These records must be added to the domain's DNS +configuration in order to serve the application via this domain mapping.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Relative name of the object affected by this record. Only applicable for CNAME records. Example: 'www'.`, + }, + "rrdata": { + Type: schema.TypeString, + Optional: true, + Description: `Data for this record. Values vary by record type, as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1).`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"A", "AAAA", "CNAME", ""}), + Description: `Resource record type. Example: 'AAAA'. Possible values: ["A", "AAAA", "CNAME"]`, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAppEngineDomainMappingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + sslSettingsProp, err := expandAppEngineDomainMappingSslSettings(d.Get("ssl_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslSettingsProp)) && (ok || !reflect.DeepEqual(v, sslSettingsProp)) { + obj["sslSettings"] = sslSettingsProp + } + idProp, err := expandAppEngineDomainMappingDomainName(d.Get("domain_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("domain_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DomainMapping: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainMapping: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DomainMapping: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = AppEngineOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating DomainMapping", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create DomainMapping: %s", err) + } + + if err := d.Set("name", flattenAppEngineDomainMappingName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DomainMapping %q: %#v", d.Id(), res) + + return resourceAppEngineDomainMappingRead(d, meta) +} + +func resourceAppEngineDomainMappingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainMapping: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AppEngineDomainMapping %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + + if err := d.Set("name", flattenAppEngineDomainMappingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + if err := d.Set("ssl_settings", flattenAppEngineDomainMappingSslSettings(res["sslSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + if err := d.Set("resource_records", flattenAppEngineDomainMappingResourceRecords(res["resourceRecords"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + if err := d.Set("domain_name", flattenAppEngineDomainMappingDomainName(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + + return nil +} + +func resourceAppEngineDomainMappingUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainMapping: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + sslSettingsProp, err := expandAppEngineDomainMappingSslSettings(d.Get("ssl_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslSettingsProp)) { + obj["sslSettings"] = sslSettingsProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DomainMapping %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("ssl_settings") { + updateMask = append(updateMask, "ssl_settings.certificate_id", + "ssl_settings.ssl_management_type") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DomainMapping %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DomainMapping %q: %#v", d.Id(), res) + } + + err = AppEngineOperationWaitTime( + config, res, project, "Updating DomainMapping", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAppEngineDomainMappingRead(d, meta) +} + +func resourceAppEngineDomainMappingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainMapping: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DomainMapping %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DomainMapping") + } + + err = AppEngineOperationWaitTime( + config, res, project, "Deleting DomainMapping", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting DomainMapping %q: %#v", d.Id(), res) + return nil +} + +func resourceAppEngineDomainMappingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "apps/(?P[^/]+)/domainMappings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAppEngineDomainMappingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineDomainMappingSslSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["certificate_id"] = + flattenAppEngineDomainMappingSslSettingsCertificateId(original["certificateId"], d, config) + transformed["ssl_management_type"] = + flattenAppEngineDomainMappingSslSettingsSslManagementType(original["sslManagementType"], d, config) + transformed["pending_managed_certificate_id"] = + flattenAppEngineDomainMappingSslSettingsPendingManagedCertificateId(original["pendingManagedCertificateId"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineDomainMappingSslSettingsCertificateId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineDomainMappingSslSettingsSslManagementType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineDomainMappingSslSettingsPendingManagedCertificateId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineDomainMappingResourceRecords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenAppEngineDomainMappingResourceRecordsName(original["name"], d, config), + "rrdata": flattenAppEngineDomainMappingResourceRecordsRrdata(original["rrdata"], d, config), + "type": flattenAppEngineDomainMappingResourceRecordsType(original["type"], d, config), + }) + } + return transformed +} +func flattenAppEngineDomainMappingResourceRecordsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineDomainMappingResourceRecordsRrdata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineDomainMappingResourceRecordsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineDomainMappingDomainName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAppEngineDomainMappingSslSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCertificateId, err := expandAppEngineDomainMappingSslSettingsCertificateId(original["certificate_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificateId"] = transformedCertificateId + } + + transformedSslManagementType, err := expandAppEngineDomainMappingSslSettingsSslManagementType(original["ssl_management_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSslManagementType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sslManagementType"] = transformedSslManagementType + } + + transformedPendingManagedCertificateId, err := expandAppEngineDomainMappingSslSettingsPendingManagedCertificateId(original["pending_managed_certificate_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPendingManagedCertificateId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pendingManagedCertificateId"] = transformedPendingManagedCertificateId + } + + return transformed, nil +} + +func expandAppEngineDomainMappingSslSettingsCertificateId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineDomainMappingSslSettingsSslManagementType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineDomainMappingSslSettingsPendingManagedCertificateId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineDomainMappingDomainName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_domain_mapping_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_domain_mapping_sweeper.go new file mode 100644 index 0000000000..56f44dcfd8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_domain_mapping_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package appengine + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("AppEngineDomainMapping", testSweepAppEngineDomainMapping) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAppEngineDomainMapping(region string) error { + resourceName := "AppEngineDomainMapping" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://appengine.googleapis.com/v1/apps/{{project}}/domainMappings", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["domainMappings"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://appengine.googleapis.com/v1/apps/{{project}}/domainMappings/{{domain_name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_firewall_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_firewall_rule.go new file mode 100644 index 0000000000..c53246e650 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_firewall_rule.go @@ -0,0 +1,492 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package appengine + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAppEngineFirewallRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAppEngineFirewallRuleCreate, + Read: resourceAppEngineFirewallRuleRead, + Update: resourceAppEngineFirewallRuleUpdate, + Delete: resourceAppEngineFirewallRuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAppEngineFirewallRuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"UNSPECIFIED_ACTION", "ALLOW", "DENY"}), + Description: `The action to take if this rule matches. Possible values: ["UNSPECIFIED_ACTION", "ALLOW", "DENY"]`, + }, + "source_range": { + Type: schema.TypeString, + Required: true, + Description: `IP address or range, defined using CIDR notation, of requests that this rule applies to.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional string description of this rule.`, + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + Description: `A positive integer that defines the order of rule evaluation. +Rules with the lowest priority are evaluated first. + +A default rule at priority Int32.MaxValue matches all IPv4 and +IPv6 traffic when no previous rule matches. Only the action of +this rule can be modified by the user.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAppEngineFirewallRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandAppEngineFirewallRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sourceRangeProp, err := expandAppEngineFirewallRuleSourceRange(d.Get("source_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceRangeProp)) && (ok || !reflect.DeepEqual(v, sourceRangeProp)) { + obj["sourceRange"] = sourceRangeProp + } + actionProp, err := expandAppEngineFirewallRuleAction(d.Get("action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("action"); !tpgresource.IsEmptyValue(reflect.ValueOf(actionProp)) && (ok || !reflect.DeepEqual(v, actionProp)) { + obj["action"] = actionProp + } + priorityProp, err := expandAppEngineFirewallRulePriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FirewallRule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for FirewallRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating FirewallRule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/firewall/ingressRules/{{priority}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = transport_tpg.PollingWaitTime(resourceAppEngineFirewallRulePollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating FirewallRule", d.Timeout(schema.TimeoutCreate), 1) + if err != nil { + return fmt.Errorf("Error waiting to create FirewallRule: %s", err) + } + + log.Printf("[DEBUG] Finished creating FirewallRule %q: %#v", d.Id(), res) + + return resourceAppEngineFirewallRuleRead(d, meta) +} + +func resourceAppEngineFirewallRulePollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for FirewallRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + return res, nil + } +} + +func resourceAppEngineFirewallRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for FirewallRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AppEngineFirewallRule %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading FirewallRule: %s", err) + } + + if err := d.Set("description", flattenAppEngineFirewallRuleDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading FirewallRule: %s", err) + } + if err := d.Set("source_range", flattenAppEngineFirewallRuleSourceRange(res["sourceRange"], d, config)); err != nil { + return fmt.Errorf("Error reading FirewallRule: %s", err) + } + if err := d.Set("action", flattenAppEngineFirewallRuleAction(res["action"], d, config)); err != nil { + return fmt.Errorf("Error reading FirewallRule: %s", err) + } + if err := d.Set("priority", flattenAppEngineFirewallRulePriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading FirewallRule: %s", err) + } + + return nil +} + +func resourceAppEngineFirewallRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for FirewallRule: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandAppEngineFirewallRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sourceRangeProp, err := expandAppEngineFirewallRuleSourceRange(d.Get("source_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceRangeProp)) { + obj["sourceRange"] = sourceRangeProp + } + actionProp, err := expandAppEngineFirewallRuleAction(d.Get("action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, actionProp)) { + obj["action"] = actionProp + } + priorityProp, err := expandAppEngineFirewallRulePriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FirewallRule %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("source_range") { + updateMask = append(updateMask, "sourceRange") + } + + if d.HasChange("action") { + updateMask = append(updateMask, "action") + } + + if d.HasChange("priority") { + updateMask = append(updateMask, "priority") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating FirewallRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FirewallRule %q: %#v", d.Id(), res) + } + + return resourceAppEngineFirewallRuleRead(d, meta) +} + +func resourceAppEngineFirewallRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for FirewallRule: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting FirewallRule %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FirewallRule") + } + + log.Printf("[DEBUG] Finished deleting FirewallRule %q: %#v", d.Id(), res) + return nil +} + +func resourceAppEngineFirewallRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "apps/(?P[^/]+)/firewall/ingressRules/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/firewall/ingressRules/{{priority}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAppEngineFirewallRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineFirewallRuleSourceRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineFirewallRuleAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineFirewallRulePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandAppEngineFirewallRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineFirewallRuleSourceRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineFirewallRuleAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineFirewallRulePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_flexible_app_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_flexible_app_version.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_flexible_app_version.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_flexible_app_version.go index 32a2fa6dad..adb00f5a19 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_app_engine_flexible_app_version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_flexible_app_version.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package appengine import ( "fmt" @@ -21,6 +24,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceAppEngineFlexibleAppVersion() *schema.Resource { @@ -150,7 +157,7 @@ replies to a healthcheck until it is ready to serve traffic. Default: "300s"`, "service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `AppEngine service resource. Can contain numbers, letters, and hyphens.`, }, "api_config": { @@ -168,21 +175,21 @@ replies to a healthcheck until it is ready to serve traffic. Default: "300s"`, "auth_fail_action": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}), Description: `Action to take when users access resources that require authentication. Default value: "AUTH_FAIL_ACTION_REDIRECT" Possible values: ["AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED"]`, Default: "AUTH_FAIL_ACTION_REDIRECT", }, "login": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}), Description: `Level of login required to access this resource. Default value: "LOGIN_OPTIONAL" Possible values: ["LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED"]`, Default: "LOGIN_OPTIONAL", }, "security_level": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}), Description: `Security (HTTPS) enforcement for this URL. Possible values: ["SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS"]`, }, "url": { @@ -505,7 +512,7 @@ the configuration ID. In this case, configId must be omitted.`, "rollout_strategy": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FIXED", "MANAGED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FIXED", "MANAGED", ""}), Description: `Endpoints rollout strategy. If FIXED, configId must be specified. If MANAGED, configId must be omitted. Default value: "FIXED" Possible values: ["FIXED", "MANAGED"]`, Default: "FIXED", }, @@ -544,19 +551,19 @@ The first matching URL handles the request and other request handlers are not at "auth_fail_action": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}), Description: `Actions to take when the user is not logged in. Possible values: ["AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED"]`, }, "login": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}), Description: `Methods to restrict access to a URL based on login status. Possible values: ["LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED"]`, }, "redirect_http_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307", ""}), + ValidateFunc: verify.ValidateEnum([]string{"REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307", ""}), Description: `30x code to use when performing redirects for the secure field. Possible values: ["REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307"]`, }, "script": { @@ -578,7 +585,7 @@ Only the auto value is supported for Node.js in the App Engine standard environm "security_level": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}), Description: `Security (HTTPS) enforcement for this URL. Possible values: ["SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS"]`, }, "static_files": { @@ -651,7 +658,7 @@ All URLs that begin with this prefix are handled by this handler, using the port Description: `A list of the types of messages that this application is able to receive. Possible values: ["INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"}), + ValidateFunc: verify.ValidateEnum([]string{"INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"}), }, Set: schema.HashString, }, @@ -811,7 +818,7 @@ default if this field is neither provided in app.yaml file nor through CLI flag. "serving_status": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"SERVING", "STOPPED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SERVING", "STOPPED", ""}), Description: `Current serving status of this version. Only the versions with a SERVING status create instances and can be billed. Default value: "SERVING" Possible values: ["SERVING", "STOPPED"]`, Default: "SERVING", }, @@ -866,8 +873,8 @@ Reserved names,"default", "latest", and any name with the prefix "ah-".`, } func resourceAppEngineFlexibleAppVersionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -876,151 +883,151 @@ func resourceAppEngineFlexibleAppVersionCreate(d *schema.ResourceData, meta inte idProp, err := expandAppEngineFlexibleAppVersionVersionId(d.Get("version_id"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("version_id"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + } else if v, ok := d.GetOkExists("version_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { obj["id"] = idProp } inboundServicesProp, err := expandAppEngineFlexibleAppVersionInboundServices(d.Get("inbound_services"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("inbound_services"); !isEmptyValue(reflect.ValueOf(inboundServicesProp)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { + } else if v, ok := d.GetOkExists("inbound_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(inboundServicesProp)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { obj["inboundServices"] = inboundServicesProp } instanceClassProp, err := expandAppEngineFlexibleAppVersionInstanceClass(d.Get("instance_class"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("instance_class"); !isEmptyValue(reflect.ValueOf(instanceClassProp)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { + } else if v, ok := d.GetOkExists("instance_class"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceClassProp)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { obj["instanceClass"] = instanceClassProp } networkProp, err := expandAppEngineFlexibleAppVersionNetwork(d.Get("network"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { obj["network"] = networkProp } resourcesProp, err := expandAppEngineFlexibleAppVersionResources(d.Get("resources"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("resources"); !isEmptyValue(reflect.ValueOf(resourcesProp)) && (ok || !reflect.DeepEqual(v, resourcesProp)) { + } else if v, ok := d.GetOkExists("resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourcesProp)) && (ok || !reflect.DeepEqual(v, resourcesProp)) { obj["resources"] = resourcesProp } runtimeProp, err := expandAppEngineFlexibleAppVersionRuntime(d.Get("runtime"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("runtime"); !isEmptyValue(reflect.ValueOf(runtimeProp)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { + } else if v, ok := d.GetOkExists("runtime"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeProp)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { obj["runtime"] = runtimeProp } runtimeChannelProp, err := expandAppEngineFlexibleAppVersionRuntimeChannel(d.Get("runtime_channel"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("runtime_channel"); !isEmptyValue(reflect.ValueOf(runtimeChannelProp)) && (ok || !reflect.DeepEqual(v, runtimeChannelProp)) { + } else if v, ok := d.GetOkExists("runtime_channel"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeChannelProp)) && (ok || !reflect.DeepEqual(v, runtimeChannelProp)) { obj["runtimeChannel"] = runtimeChannelProp } betaSettingsProp, err := expandAppEngineFlexibleAppVersionBetaSettings(d.Get("beta_settings"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("beta_settings"); !isEmptyValue(reflect.ValueOf(betaSettingsProp)) && (ok || !reflect.DeepEqual(v, betaSettingsProp)) { + } else if v, ok := d.GetOkExists("beta_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(betaSettingsProp)) && (ok || !reflect.DeepEqual(v, betaSettingsProp)) { obj["betaSettings"] = betaSettingsProp } servingStatusProp, err := expandAppEngineFlexibleAppVersionServingStatus(d.Get("serving_status"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("serving_status"); !isEmptyValue(reflect.ValueOf(servingStatusProp)) && (ok || !reflect.DeepEqual(v, servingStatusProp)) { + } else if v, ok := d.GetOkExists("serving_status"); !tpgresource.IsEmptyValue(reflect.ValueOf(servingStatusProp)) && (ok || !reflect.DeepEqual(v, servingStatusProp)) { obj["servingStatus"] = servingStatusProp } runtimeApiVersionProp, err := expandAppEngineFlexibleAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("runtime_api_version"); !isEmptyValue(reflect.ValueOf(runtimeApiVersionProp)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { + } else if v, ok := d.GetOkExists("runtime_api_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeApiVersionProp)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { obj["runtimeApiVersion"] = runtimeApiVersionProp } handlersProp, err := expandAppEngineFlexibleAppVersionHandlers(d.Get("handlers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("handlers"); !isEmptyValue(reflect.ValueOf(handlersProp)) && (ok || !reflect.DeepEqual(v, handlersProp)) { + } else if v, ok := d.GetOkExists("handlers"); !tpgresource.IsEmptyValue(reflect.ValueOf(handlersProp)) && (ok || !reflect.DeepEqual(v, handlersProp)) { obj["handlers"] = handlersProp } runtimeMainExecutablePathProp, err := expandAppEngineFlexibleAppVersionRuntimeMainExecutablePath(d.Get("runtime_main_executable_path"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("runtime_main_executable_path"); !isEmptyValue(reflect.ValueOf(runtimeMainExecutablePathProp)) && (ok || !reflect.DeepEqual(v, runtimeMainExecutablePathProp)) { + } else if v, ok := d.GetOkExists("runtime_main_executable_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeMainExecutablePathProp)) && (ok || !reflect.DeepEqual(v, runtimeMainExecutablePathProp)) { obj["runtimeMainExecutablePath"] = runtimeMainExecutablePathProp } serviceAccountProp, err := expandAppEngineFlexibleAppVersionServiceAccount(d.Get("service_account"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { obj["serviceAccount"] = serviceAccountProp } apiConfigProp, err := expandAppEngineFlexibleAppVersionApiConfig(d.Get("api_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("api_config"); !isEmptyValue(reflect.ValueOf(apiConfigProp)) && (ok || !reflect.DeepEqual(v, apiConfigProp)) { + } else if v, ok := d.GetOkExists("api_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(apiConfigProp)) && (ok || !reflect.DeepEqual(v, apiConfigProp)) { obj["apiConfig"] = apiConfigProp } envVariablesProp, err := expandAppEngineFlexibleAppVersionEnvVariables(d.Get("env_variables"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("env_variables"); !isEmptyValue(reflect.ValueOf(envVariablesProp)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { + } else if v, ok := d.GetOkExists("env_variables"); !tpgresource.IsEmptyValue(reflect.ValueOf(envVariablesProp)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { obj["envVariables"] = envVariablesProp } defaultExpirationProp, err := expandAppEngineFlexibleAppVersionDefaultExpiration(d.Get("default_expiration"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_expiration"); !isEmptyValue(reflect.ValueOf(defaultExpirationProp)) && (ok || !reflect.DeepEqual(v, defaultExpirationProp)) { + } else if v, ok := d.GetOkExists("default_expiration"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultExpirationProp)) && (ok || !reflect.DeepEqual(v, defaultExpirationProp)) { obj["defaultExpiration"] = defaultExpirationProp } readinessCheckProp, err := expandAppEngineFlexibleAppVersionReadinessCheck(d.Get("readiness_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("readiness_check"); !isEmptyValue(reflect.ValueOf(readinessCheckProp)) && (ok || !reflect.DeepEqual(v, readinessCheckProp)) { + } else if v, ok := d.GetOkExists("readiness_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(readinessCheckProp)) && (ok || !reflect.DeepEqual(v, readinessCheckProp)) { obj["readinessCheck"] = readinessCheckProp } livenessCheckProp, err := expandAppEngineFlexibleAppVersionLivenessCheck(d.Get("liveness_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("liveness_check"); !isEmptyValue(reflect.ValueOf(livenessCheckProp)) && (ok || !reflect.DeepEqual(v, livenessCheckProp)) { + } else if v, ok := d.GetOkExists("liveness_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(livenessCheckProp)) && (ok || !reflect.DeepEqual(v, livenessCheckProp)) { obj["livenessCheck"] = livenessCheckProp } nobuildFilesRegexProp, err := expandAppEngineFlexibleAppVersionNobuildFilesRegex(d.Get("nobuild_files_regex"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("nobuild_files_regex"); !isEmptyValue(reflect.ValueOf(nobuildFilesRegexProp)) && (ok || !reflect.DeepEqual(v, nobuildFilesRegexProp)) { + } else if v, ok := d.GetOkExists("nobuild_files_regex"); !tpgresource.IsEmptyValue(reflect.ValueOf(nobuildFilesRegexProp)) && (ok || !reflect.DeepEqual(v, nobuildFilesRegexProp)) { obj["nobuildFilesRegex"] = nobuildFilesRegexProp } deploymentProp, err := expandAppEngineFlexibleAppVersionDeployment(d.Get("deployment"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("deployment"); !isEmptyValue(reflect.ValueOf(deploymentProp)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { + } else if v, ok := d.GetOkExists("deployment"); !tpgresource.IsEmptyValue(reflect.ValueOf(deploymentProp)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { obj["deployment"] = deploymentProp } endpointsApiServiceProp, err := expandAppEngineFlexibleAppVersionEndpointsApiService(d.Get("endpoints_api_service"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("endpoints_api_service"); !isEmptyValue(reflect.ValueOf(endpointsApiServiceProp)) && (ok || !reflect.DeepEqual(v, endpointsApiServiceProp)) { + } else if v, ok := d.GetOkExists("endpoints_api_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(endpointsApiServiceProp)) && (ok || !reflect.DeepEqual(v, endpointsApiServiceProp)) { obj["endpointsApiService"] = endpointsApiServiceProp } entrypointProp, err := expandAppEngineFlexibleAppVersionEntrypoint(d.Get("entrypoint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("entrypoint"); !isEmptyValue(reflect.ValueOf(entrypointProp)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { + } else if v, ok := d.GetOkExists("entrypoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(entrypointProp)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { obj["entrypoint"] = entrypointProp } - vpcAccessConnectorProp, err := expandAppEngineFlexibleAppVersionVPCAccessConnector(d.Get("vpc_access_connector"), d, config) + vpcAccessConnectorProp, err := expandAppEngineFlexibleAppVersionVpcAccessConnector(d.Get("vpc_access_connector"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("vpc_access_connector"); !isEmptyValue(reflect.ValueOf(vpcAccessConnectorProp)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { + } else if v, ok := d.GetOkExists("vpc_access_connector"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpcAccessConnectorProp)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { obj["vpcAccessConnector"] = vpcAccessConnectorProp } automaticScalingProp, err := expandAppEngineFlexibleAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("automatic_scaling"); !isEmptyValue(reflect.ValueOf(automaticScalingProp)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { + } else if v, ok := d.GetOkExists("automatic_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(automaticScalingProp)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { obj["automaticScaling"] = automaticScalingProp } manualScalingProp, err := expandAppEngineFlexibleAppVersionManualScaling(d.Get("manual_scaling"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("manual_scaling"); !isEmptyValue(reflect.ValueOf(manualScalingProp)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { + } else if v, ok := d.GetOkExists("manual_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(manualScalingProp)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { obj["manualScaling"] = manualScalingProp } @@ -1029,14 +1036,14 @@ func resourceAppEngineFlexibleAppVersionCreate(d *schema.ResourceData, meta inte return err } - lockName, err := replaceVars(d, config, "apps/{{project}}") + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") if err != nil { return err } @@ -1044,24 +1051,33 @@ func resourceAppEngineFlexibleAppVersionCreate(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Creating new FlexibleAppVersion: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for FlexibleAppVersion: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isAppEngineRetryableError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) if err != nil { return fmt.Errorf("Error creating FlexibleAppVersion: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1083,33 +1099,40 @@ func resourceAppEngineFlexibleAppVersionCreate(d *schema.ResourceData, meta inte } func resourceAppEngineFlexibleAppVersionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}?view=FULL") + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}?view=FULL") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for FlexibleAppVersion: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isAppEngineRetryableError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AppEngineFlexibleAppVersion %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AppEngineFlexibleAppVersion %q", d.Id())) } // Explicitly set virtual fields to default values if unset @@ -1187,7 +1210,7 @@ func resourceAppEngineFlexibleAppVersionRead(d *schema.ResourceData, meta interf if err := d.Set("endpoints_api_service", flattenAppEngineFlexibleAppVersionEndpointsApiService(res["endpointsApiService"], d, config)); err != nil { return fmt.Errorf("Error reading FlexibleAppVersion: %s", err) } - if err := d.Set("vpc_access_connector", flattenAppEngineFlexibleAppVersionVPCAccessConnector(res["vpcAccessConnector"], d, config)); err != nil { + if err := d.Set("vpc_access_connector", flattenAppEngineFlexibleAppVersionVpcAccessConnector(res["vpcAccessConnector"], d, config)); err != nil { return fmt.Errorf("Error reading FlexibleAppVersion: %s", err) } if err := d.Set("automatic_scaling", flattenAppEngineFlexibleAppVersionAutomaticScaling(res["automaticScaling"], d, config)); err != nil { @@ -1201,15 +1224,15 @@ func resourceAppEngineFlexibleAppVersionRead(d *schema.ResourceData, meta interf } func resourceAppEngineFlexibleAppVersionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for FlexibleAppVersion: %s", err) } @@ -1219,151 +1242,151 @@ func resourceAppEngineFlexibleAppVersionUpdate(d *schema.ResourceData, meta inte idProp, err := expandAppEngineFlexibleAppVersionVersionId(d.Get("version_id"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("version_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { + } else if v, ok := d.GetOkExists("version_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { obj["id"] = idProp } inboundServicesProp, err := expandAppEngineFlexibleAppVersionInboundServices(d.Get("inbound_services"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("inbound_services"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { + } else if v, ok := d.GetOkExists("inbound_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { obj["inboundServices"] = inboundServicesProp } instanceClassProp, err := expandAppEngineFlexibleAppVersionInstanceClass(d.Get("instance_class"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("instance_class"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { + } else if v, ok := d.GetOkExists("instance_class"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { obj["instanceClass"] = instanceClassProp } networkProp, err := expandAppEngineFlexibleAppVersionNetwork(d.Get("network"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { obj["network"] = networkProp } resourcesProp, err := expandAppEngineFlexibleAppVersionResources(d.Get("resources"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("resources"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, resourcesProp)) { + } else if v, ok := d.GetOkExists("resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, resourcesProp)) { obj["resources"] = resourcesProp } runtimeProp, err := expandAppEngineFlexibleAppVersionRuntime(d.Get("runtime"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("runtime"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { + } else if v, ok := d.GetOkExists("runtime"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { obj["runtime"] = runtimeProp } runtimeChannelProp, err := expandAppEngineFlexibleAppVersionRuntimeChannel(d.Get("runtime_channel"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("runtime_channel"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeChannelProp)) { + } else if v, ok := d.GetOkExists("runtime_channel"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeChannelProp)) { obj["runtimeChannel"] = runtimeChannelProp } betaSettingsProp, err := expandAppEngineFlexibleAppVersionBetaSettings(d.Get("beta_settings"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("beta_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, betaSettingsProp)) { + } else if v, ok := d.GetOkExists("beta_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, betaSettingsProp)) { obj["betaSettings"] = betaSettingsProp } servingStatusProp, err := expandAppEngineFlexibleAppVersionServingStatus(d.Get("serving_status"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("serving_status"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, servingStatusProp)) { + } else if v, ok := d.GetOkExists("serving_status"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, servingStatusProp)) { obj["servingStatus"] = servingStatusProp } runtimeApiVersionProp, err := expandAppEngineFlexibleAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("runtime_api_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { + } else if v, ok := d.GetOkExists("runtime_api_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { obj["runtimeApiVersion"] = runtimeApiVersionProp } handlersProp, err := expandAppEngineFlexibleAppVersionHandlers(d.Get("handlers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("handlers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, handlersProp)) { + } else if v, ok := d.GetOkExists("handlers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, handlersProp)) { obj["handlers"] = handlersProp } runtimeMainExecutablePathProp, err := expandAppEngineFlexibleAppVersionRuntimeMainExecutablePath(d.Get("runtime_main_executable_path"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("runtime_main_executable_path"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeMainExecutablePathProp)) { + } else if v, ok := d.GetOkExists("runtime_main_executable_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeMainExecutablePathProp)) { obj["runtimeMainExecutablePath"] = runtimeMainExecutablePathProp } serviceAccountProp, err := expandAppEngineFlexibleAppVersionServiceAccount(d.Get("service_account"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { obj["serviceAccount"] = serviceAccountProp } apiConfigProp, err := expandAppEngineFlexibleAppVersionApiConfig(d.Get("api_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("api_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, apiConfigProp)) { + } else if v, ok := d.GetOkExists("api_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, apiConfigProp)) { obj["apiConfig"] = apiConfigProp } envVariablesProp, err := expandAppEngineFlexibleAppVersionEnvVariables(d.Get("env_variables"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("env_variables"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { + } else if v, ok := d.GetOkExists("env_variables"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { obj["envVariables"] = envVariablesProp } defaultExpirationProp, err := expandAppEngineFlexibleAppVersionDefaultExpiration(d.Get("default_expiration"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_expiration"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultExpirationProp)) { + } else if v, ok := d.GetOkExists("default_expiration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultExpirationProp)) { obj["defaultExpiration"] = defaultExpirationProp } readinessCheckProp, err := expandAppEngineFlexibleAppVersionReadinessCheck(d.Get("readiness_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("readiness_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, readinessCheckProp)) { + } else if v, ok := d.GetOkExists("readiness_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, readinessCheckProp)) { obj["readinessCheck"] = readinessCheckProp } livenessCheckProp, err := expandAppEngineFlexibleAppVersionLivenessCheck(d.Get("liveness_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("liveness_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, livenessCheckProp)) { + } else if v, ok := d.GetOkExists("liveness_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, livenessCheckProp)) { obj["livenessCheck"] = livenessCheckProp } nobuildFilesRegexProp, err := expandAppEngineFlexibleAppVersionNobuildFilesRegex(d.Get("nobuild_files_regex"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("nobuild_files_regex"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nobuildFilesRegexProp)) { + } else if v, ok := d.GetOkExists("nobuild_files_regex"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nobuildFilesRegexProp)) { obj["nobuildFilesRegex"] = nobuildFilesRegexProp } deploymentProp, err := expandAppEngineFlexibleAppVersionDeployment(d.Get("deployment"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("deployment"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { + } else if v, ok := d.GetOkExists("deployment"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { obj["deployment"] = deploymentProp } endpointsApiServiceProp, err := expandAppEngineFlexibleAppVersionEndpointsApiService(d.Get("endpoints_api_service"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("endpoints_api_service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, endpointsApiServiceProp)) { + } else if v, ok := d.GetOkExists("endpoints_api_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, endpointsApiServiceProp)) { obj["endpointsApiService"] = endpointsApiServiceProp } entrypointProp, err := expandAppEngineFlexibleAppVersionEntrypoint(d.Get("entrypoint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("entrypoint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { + } else if v, ok := d.GetOkExists("entrypoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { obj["entrypoint"] = entrypointProp } - vpcAccessConnectorProp, err := expandAppEngineFlexibleAppVersionVPCAccessConnector(d.Get("vpc_access_connector"), d, config) + vpcAccessConnectorProp, err := expandAppEngineFlexibleAppVersionVpcAccessConnector(d.Get("vpc_access_connector"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("vpc_access_connector"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { + } else if v, ok := d.GetOkExists("vpc_access_connector"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { obj["vpcAccessConnector"] = vpcAccessConnectorProp } automaticScalingProp, err := expandAppEngineFlexibleAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("automatic_scaling"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { + } else if v, ok := d.GetOkExists("automatic_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { obj["automaticScaling"] = automaticScalingProp } manualScalingProp, err := expandAppEngineFlexibleAppVersionManualScaling(d.Get("manual_scaling"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("manual_scaling"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { + } else if v, ok := d.GetOkExists("manual_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { obj["manualScaling"] = manualScalingProp } @@ -1372,14 +1395,14 @@ func resourceAppEngineFlexibleAppVersionUpdate(d *schema.ResourceData, meta inte return err } - lockName, err := replaceVars(d, config, "apps/{{project}}") + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") if err != nil { return err } @@ -1387,11 +1410,20 @@ func resourceAppEngineFlexibleAppVersionUpdate(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Updating FlexibleAppVersion %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isAppEngineRetryableError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) if err != nil { return fmt.Errorf("Error updating FlexibleAppVersion %q: %s", d.Id(), err) @@ -1411,8 +1443,8 @@ func resourceAppEngineFlexibleAppVersionUpdate(d *schema.ResourceData, meta inte } func resourceAppEngineFlexibleAppVersionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -1422,28 +1454,37 @@ func resourceAppEngineFlexibleAppVersionDelete(d *schema.ResourceData, meta inte return nil } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - lockName, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) if d.Get("delete_service_on_destroy") == true { - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") if err != nil { return err } var obj map[string]interface{} log.Printf("[DEBUG] Deleting Service %q", d.Id()) - res, err := SendRequestWithTimeout(config, "DELETE", project, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isAppEngineRetryableError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) if err != nil { - return handleNotFoundError(err, d, "Service") + return transport_tpg.HandleNotFoundError(err, d, "Service") } err = AppEngineOperationWaitTime( config, res, project, "Deleting Service", userAgent, @@ -1455,15 +1496,24 @@ func resourceAppEngineFlexibleAppVersionDelete(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) return nil } else { - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}") if err != nil { return err } var obj map[string]interface{} log.Printf("[DEBUG] Deleting AppVersion %q", d.Id()) - res, err := SendRequestWithTimeout(config, "DELETE", project, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isAppEngineRetryableError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) if err != nil { - return handleNotFoundError(err, d, "AppVersion") + return transport_tpg.HandleNotFoundError(err, d, "AppVersion") } err = AppEngineOperationWaitTime( config, res, project, "Deleting AppVersion", userAgent, @@ -1479,8 +1529,8 @@ func resourceAppEngineFlexibleAppVersionDelete(d *schema.ResourceData, meta inte } func resourceAppEngineFlexibleAppVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "apps/(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -1489,7 +1539,7 @@ func resourceAppEngineFlexibleAppVersionImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1506,26 +1556,26 @@ func resourceAppEngineFlexibleAppVersionImport(d *schema.ResourceData, meta inte return []*schema.ResourceData{d}, nil } -func flattenAppEngineFlexibleAppVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionVersionId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionVersionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionInboundServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionInboundServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenAppEngineFlexibleAppVersionInstanceClass(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionInstanceClass(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1546,27 +1596,27 @@ func flattenAppEngineFlexibleAppVersionNetwork(v interface{}, d *schema.Resource flattenAppEngineFlexibleAppVersionNetworkSessionAffinity(original["sessionAffinity"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionNetworkForwardedPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionNetworkForwardedPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionNetworkInstanceTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionNetworkInstanceTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionNetworkName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionNetworkName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionNetworkSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionNetworkSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionNetworkSessionAffinity(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionNetworkSessionAffinity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1585,10 +1635,10 @@ func flattenAppEngineFlexibleAppVersionResources(v interface{}, d *schema.Resour flattenAppEngineFlexibleAppVersionResourcesVolumes(original["volumes"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionResourcesCpu(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionResourcesCpu(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1602,10 +1652,10 @@ func flattenAppEngineFlexibleAppVersionResourcesCpu(v interface{}, d *schema.Res return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionResourcesDiskGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionResourcesDiskGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1619,11 +1669,11 @@ func flattenAppEngineFlexibleAppVersionResourcesDiskGb(v interface{}, d *schema. return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionResourcesMemoryGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionResourcesMemoryGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1643,18 +1693,18 @@ func flattenAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d *schema } return transformed } -func flattenAppEngineFlexibleAppVersionResourcesVolumesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionResourcesVolumesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionResourcesVolumesVolumeType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionResourcesVolumesVolumeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionResourcesVolumesSizeGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionResourcesVolumesSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1668,23 +1718,23 @@ func flattenAppEngineFlexibleAppVersionResourcesVolumesSizeGb(v interface{}, d * return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionRuntime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionRuntime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionRuntimeChannel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionRuntimeChannel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionServingStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionServingStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionRuntimeApiVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionRuntimeApiVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1708,27 +1758,27 @@ func flattenAppEngineFlexibleAppVersionHandlers(v interface{}, d *schema.Resourc } return transformed } -func flattenAppEngineFlexibleAppVersionHandlersUrlRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersUrlRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersSecurityLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersSecurityLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersLogin(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersLogin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersAuthFailAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersAuthFailAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersScript(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1741,11 +1791,11 @@ func flattenAppEngineFlexibleAppVersionHandlersScript(v interface{}, d *schema.R flattenAppEngineFlexibleAppVersionHandlersScriptScriptPath(original["scriptPath"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionHandlersScriptScriptPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersScriptScriptPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersStaticFiles(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersStaticFiles(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1770,43 +1820,43 @@ func flattenAppEngineFlexibleAppVersionHandlersStaticFiles(v interface{}, d *sch flattenAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(original["applicationReadable"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersStaticFilesPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionRuntimeMainExecutablePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionRuntimeMainExecutablePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionApiConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionApiConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1827,31 +1877,31 @@ func flattenAppEngineFlexibleAppVersionApiConfig(v interface{}, d *schema.Resour flattenAppEngineFlexibleAppVersionApiConfigUrl(original["url"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionApiConfigAuthFailAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionApiConfigAuthFailAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionApiConfigLogin(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionApiConfigLogin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionApiConfigScript(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionApiConfigScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionApiConfigSecurityLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionApiConfigSecurityLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionApiConfigUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionApiConfigUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionDefaultExpiration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionDefaultExpiration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionReadinessCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionReadinessCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1876,35 +1926,35 @@ func flattenAppEngineFlexibleAppVersionReadinessCheck(v interface{}, d *schema.R flattenAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(original["appStartTimeout"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionReadinessCheckPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionReadinessCheckPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionReadinessCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionReadinessCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionReadinessCheckCheckInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionReadinessCheckCheckInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionReadinessCheckTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionReadinessCheckTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionLivenessCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionLivenessCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1929,39 +1979,39 @@ func flattenAppEngineFlexibleAppVersionLivenessCheck(v interface{}, d *schema.Re flattenAppEngineFlexibleAppVersionLivenessCheckInitialDelay(original["initialDelay"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionLivenessCheckPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionLivenessCheckPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionLivenessCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionLivenessCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionLivenessCheckCheckInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionLivenessCheckCheckInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionLivenessCheckTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionLivenessCheckTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionLivenessCheckInitialDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionLivenessCheckInitialDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionNobuildFilesRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionNobuildFilesRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionDeployment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionDeployment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { original := v.(map[string]interface{}) transformed := make(map[string]interface{}) transformed["zip"] = d.Get("deployment.0.zip") @@ -1974,7 +2024,7 @@ func flattenAppEngineFlexibleAppVersionDeployment(v interface{}, d *schema.Resou return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionDeploymentContainer(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionDeploymentContainer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1988,11 +2038,11 @@ func flattenAppEngineFlexibleAppVersionDeploymentContainer(v interface{}, d *sch return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionDeploymentContainerImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionDeploymentContainerImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2008,15 +2058,15 @@ func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(v interface{} return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionEndpointsApiService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionEndpointsApiService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2035,23 +2085,23 @@ func flattenAppEngineFlexibleAppVersionEndpointsApiService(v interface{}, d *sch flattenAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(original["disableTraceSampling"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionEndpointsApiServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionEndpointsApiServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionVPCAccessConnector(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionVpcAccessConnector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2061,14 +2111,14 @@ func flattenAppEngineFlexibleAppVersionVPCAccessConnector(v interface{}, d *sche } transformed := make(map[string]interface{}) transformed["name"] = - flattenAppEngineFlexibleAppVersionVPCAccessConnectorName(original["name"], d, config) + flattenAppEngineFlexibleAppVersionVpcAccessConnectorName(original["name"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionVPCAccessConnectorName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionVpcAccessConnectorName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionAutomaticScaling(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2103,11 +2153,11 @@ func flattenAppEngineFlexibleAppVersionAutomaticScaling(v interface{}, d *schema flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(original["networkUtilization"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2122,18 +2172,18 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(v interfac flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(original["targetUtilization"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2147,10 +2197,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(v i return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2164,10 +2214,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(v interf return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2181,14 +2231,14 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(v inter return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2202,10 +2252,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(v interf return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2219,11 +2269,11 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(v inter return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2238,15 +2288,15 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(v inte flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(original["targetConcurrentRequests"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2265,10 +2315,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(v interfa flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(original["targetReadOpsPerSecond"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2282,10 +2332,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWrit return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2299,10 +2349,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWrit return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2316,10 +2366,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetRead return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2333,7 +2383,7 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetRead return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2352,10 +2402,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(v inte flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(original["targetReceivedPacketsPerSecond"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2369,10 +2419,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetS return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2386,10 +2436,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetS return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2403,10 +2453,10 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetR return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2420,7 +2470,7 @@ func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetR return v // let terraform core handle it otherwise } -func flattenAppEngineFlexibleAppVersionManualScaling(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionManualScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2433,10 +2483,10 @@ func flattenAppEngineFlexibleAppVersionManualScaling(v interface{}, d *schema.Re flattenAppEngineFlexibleAppVersionManualScalingInstances(original["instances"], d, config) return []interface{}{transformed} } -func flattenAppEngineFlexibleAppVersionManualScalingInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenAppEngineFlexibleAppVersionManualScalingInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2450,20 +2500,20 @@ func flattenAppEngineFlexibleAppVersionManualScalingInstances(v interface{}, d * return v // let terraform core handle it otherwise } -func expandAppEngineFlexibleAppVersionVersionId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionVersionId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionInboundServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionInboundServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandAppEngineFlexibleAppVersionInstanceClass(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionInstanceClass(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2475,62 +2525,62 @@ func expandAppEngineFlexibleAppVersionNetwork(v interface{}, d TerraformResource transformedForwardedPorts, err := expandAppEngineFlexibleAppVersionNetworkForwardedPorts(original["forwarded_ports"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedForwardedPorts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedForwardedPorts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["forwardedPorts"] = transformedForwardedPorts } transformedInstanceTag, err := expandAppEngineFlexibleAppVersionNetworkInstanceTag(original["instance_tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstanceTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstanceTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["instanceTag"] = transformedInstanceTag } transformedName, err := expandAppEngineFlexibleAppVersionNetworkName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedSubnetwork, err := expandAppEngineFlexibleAppVersionNetworkSubnetwork(original["subnetwork"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubnetwork); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubnetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subnetworkName"] = transformedSubnetwork } transformedSessionAffinity, err := expandAppEngineFlexibleAppVersionNetworkSessionAffinity(original["session_affinity"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSessionAffinity); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSessionAffinity); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sessionAffinity"] = transformedSessionAffinity } return transformed, nil } -func expandAppEngineFlexibleAppVersionNetworkForwardedPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionNetworkForwardedPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionNetworkInstanceTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionNetworkInstanceTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionNetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionNetworkName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionNetworkSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionNetworkSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionNetworkSessionAffinity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionNetworkSessionAffinity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2542,47 +2592,47 @@ func expandAppEngineFlexibleAppVersionResources(v interface{}, d TerraformResour transformedCpu, err := expandAppEngineFlexibleAppVersionResourcesCpu(original["cpu"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCpu); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCpu); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cpu"] = transformedCpu } transformedDiskGb, err := expandAppEngineFlexibleAppVersionResourcesDiskGb(original["disk_gb"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDiskGb); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDiskGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["diskGb"] = transformedDiskGb } transformedMemoryGb, err := expandAppEngineFlexibleAppVersionResourcesMemoryGb(original["memory_gb"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMemoryGb); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMemoryGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["memoryGb"] = transformedMemoryGb } transformedVolumes, err := expandAppEngineFlexibleAppVersionResourcesVolumes(original["volumes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["volumes"] = transformedVolumes } return transformed, nil } -func expandAppEngineFlexibleAppVersionResourcesCpu(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionResourcesCpu(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionResourcesDiskGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionResourcesDiskGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionResourcesMemoryGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionResourcesMemoryGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2595,21 +2645,21 @@ func expandAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d Terrafor transformedName, err := expandAppEngineFlexibleAppVersionResourcesVolumesName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedVolumeType, err := expandAppEngineFlexibleAppVersionResourcesVolumesVolumeType(original["volume_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVolumeType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVolumeType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["volumeType"] = transformedVolumeType } transformedSizeGb, err := expandAppEngineFlexibleAppVersionResourcesVolumesSizeGb(original["size_gb"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSizeGb); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sizeGb"] = transformedSizeGb } @@ -2618,27 +2668,27 @@ func expandAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d Terrafor return req, nil } -func expandAppEngineFlexibleAppVersionResourcesVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionResourcesVolumesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionResourcesVolumesVolumeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionResourcesVolumesVolumeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionResourcesVolumesSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionResourcesVolumesSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionRuntime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionRuntime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionRuntimeChannel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionRuntimeChannel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionBetaSettings(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandAppEngineFlexibleAppVersionBetaSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2649,15 +2699,15 @@ func expandAppEngineFlexibleAppVersionBetaSettings(v interface{}, d TerraformRes return m, nil } -func expandAppEngineFlexibleAppVersionServingStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionServingStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionRuntimeApiVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionRuntimeApiVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2670,49 +2720,49 @@ func expandAppEngineFlexibleAppVersionHandlers(v interface{}, d TerraformResourc transformedUrlRegex, err := expandAppEngineFlexibleAppVersionHandlersUrlRegex(original["url_regex"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRegex); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRegex"] = transformedUrlRegex } transformedSecurityLevel, err := expandAppEngineFlexibleAppVersionHandlersSecurityLevel(original["security_level"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["securityLevel"] = transformedSecurityLevel } transformedLogin, err := expandAppEngineFlexibleAppVersionHandlersLogin(original["login"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLogin); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLogin); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["login"] = transformedLogin } transformedAuthFailAction, err := expandAppEngineFlexibleAppVersionHandlersAuthFailAction(original["auth_fail_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["authFailAction"] = transformedAuthFailAction } transformedRedirectHttpResponseCode, err := expandAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(original["redirect_http_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectHttpResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectHttpResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectHttpResponseCode"] = transformedRedirectHttpResponseCode } transformedScript, err := expandAppEngineFlexibleAppVersionHandlersScript(original["script"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["script"] = transformedScript } transformedStaticFiles, err := expandAppEngineFlexibleAppVersionHandlersStaticFiles(original["static_files"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStaticFiles); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStaticFiles); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["staticFiles"] = transformedStaticFiles } @@ -2721,27 +2771,27 @@ func expandAppEngineFlexibleAppVersionHandlers(v interface{}, d TerraformResourc return req, nil } -func expandAppEngineFlexibleAppVersionHandlersUrlRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersUrlRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersSecurityLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersSecurityLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersLogin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersLogin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersAuthFailAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersAuthFailAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2753,18 +2803,18 @@ func expandAppEngineFlexibleAppVersionHandlersScript(v interface{}, d TerraformR transformedScriptPath, err := expandAppEngineFlexibleAppVersionHandlersScriptScriptPath(original["script_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScriptPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScriptPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scriptPath"] = transformedScriptPath } return transformed, nil } -func expandAppEngineFlexibleAppVersionHandlersScriptScriptPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersScriptScriptPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersStaticFiles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersStaticFiles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2776,64 +2826,64 @@ func expandAppEngineFlexibleAppVersionHandlersStaticFiles(v interface{}, d Terra transformedPath, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedUploadPathRegex, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(original["upload_path_regex"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUploadPathRegex); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUploadPathRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["uploadPathRegex"] = transformedUploadPathRegex } transformedHttpHeaders, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(original["http_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpHeaders"] = transformedHttpHeaders } transformedMimeType, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(original["mime_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMimeType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMimeType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mimeType"] = transformedMimeType } transformedExpiration, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExpiration); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExpiration); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["expiration"] = transformedExpiration } transformedRequireMatchingFile, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(original["require_matching_file"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequireMatchingFile); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequireMatchingFile); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requireMatchingFile"] = transformedRequireMatchingFile } transformedApplicationReadable, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(original["application_readable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedApplicationReadable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedApplicationReadable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["applicationReadable"] = transformedApplicationReadable } return transformed, nil } -func expandAppEngineFlexibleAppVersionHandlersStaticFilesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersStaticFilesPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2844,31 +2894,31 @@ func expandAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(v interface return m, nil } -func expandAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionRuntimeMainExecutablePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionRuntimeMainExecutablePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionApiConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionApiConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2880,62 +2930,62 @@ func expandAppEngineFlexibleAppVersionApiConfig(v interface{}, d TerraformResour transformedAuthFailAction, err := expandAppEngineFlexibleAppVersionApiConfigAuthFailAction(original["auth_fail_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["authFailAction"] = transformedAuthFailAction } transformedLogin, err := expandAppEngineFlexibleAppVersionApiConfigLogin(original["login"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLogin); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLogin); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["login"] = transformedLogin } transformedScript, err := expandAppEngineFlexibleAppVersionApiConfigScript(original["script"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["script"] = transformedScript } transformedSecurityLevel, err := expandAppEngineFlexibleAppVersionApiConfigSecurityLevel(original["security_level"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["securityLevel"] = transformedSecurityLevel } transformedUrl, err := expandAppEngineFlexibleAppVersionApiConfigUrl(original["url"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["url"] = transformedUrl } return transformed, nil } -func expandAppEngineFlexibleAppVersionApiConfigAuthFailAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionApiConfigAuthFailAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionApiConfigLogin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionApiConfigLogin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionApiConfigScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionApiConfigScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionApiConfigSecurityLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionApiConfigSecurityLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionApiConfigUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionApiConfigUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionEnvVariables(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandAppEngineFlexibleAppVersionEnvVariables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2946,11 +2996,11 @@ func expandAppEngineFlexibleAppVersionEnvVariables(v interface{}, d TerraformRes return m, nil } -func expandAppEngineFlexibleAppVersionDefaultExpiration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDefaultExpiration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionReadinessCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionReadinessCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2962,84 +3012,84 @@ func expandAppEngineFlexibleAppVersionReadinessCheck(v interface{}, d TerraformR transformedPath, err := expandAppEngineFlexibleAppVersionReadinessCheckPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedHost, err := expandAppEngineFlexibleAppVersionReadinessCheckHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedFailureThreshold, err := expandAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(original["failure_threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["failureThreshold"] = transformedFailureThreshold } transformedSuccessThreshold, err := expandAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(original["success_threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuccessThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuccessThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["successThreshold"] = transformedSuccessThreshold } transformedCheckInterval, err := expandAppEngineFlexibleAppVersionReadinessCheckCheckInterval(original["check_interval"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCheckInterval); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCheckInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["checkInterval"] = transformedCheckInterval } transformedTimeout, err := expandAppEngineFlexibleAppVersionReadinessCheckTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedAppStartTimeout, err := expandAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(original["app_start_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAppStartTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAppStartTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["appStartTimeout"] = transformedAppStartTimeout } return transformed, nil } -func expandAppEngineFlexibleAppVersionReadinessCheckPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionReadinessCheckPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionReadinessCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionReadinessCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionReadinessCheckCheckInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionReadinessCheckCheckInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionReadinessCheckTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionReadinessCheckTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionLivenessCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionLivenessCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3051,88 +3101,88 @@ func expandAppEngineFlexibleAppVersionLivenessCheck(v interface{}, d TerraformRe transformedPath, err := expandAppEngineFlexibleAppVersionLivenessCheckPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedHost, err := expandAppEngineFlexibleAppVersionLivenessCheckHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedFailureThreshold, err := expandAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(original["failure_threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["failureThreshold"] = transformedFailureThreshold } transformedSuccessThreshold, err := expandAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(original["success_threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuccessThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuccessThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["successThreshold"] = transformedSuccessThreshold } transformedCheckInterval, err := expandAppEngineFlexibleAppVersionLivenessCheckCheckInterval(original["check_interval"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCheckInterval); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCheckInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["checkInterval"] = transformedCheckInterval } transformedTimeout, err := expandAppEngineFlexibleAppVersionLivenessCheckTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedInitialDelay, err := expandAppEngineFlexibleAppVersionLivenessCheckInitialDelay(original["initial_delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInitialDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInitialDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["initialDelay"] = transformedInitialDelay } return transformed, nil } -func expandAppEngineFlexibleAppVersionLivenessCheckPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionLivenessCheckPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionLivenessCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionLivenessCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionLivenessCheckCheckInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionLivenessCheckCheckInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionLivenessCheckTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionLivenessCheckTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionLivenessCheckInitialDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionLivenessCheckInitialDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionNobuildFilesRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionNobuildFilesRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionDeployment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeployment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3144,35 +3194,35 @@ func expandAppEngineFlexibleAppVersionDeployment(v interface{}, d TerraformResou transformedZip, err := expandAppEngineFlexibleAppVersionDeploymentZip(original["zip"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedZip); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedZip); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["zip"] = transformedZip } transformedFiles, err := expandAppEngineFlexibleAppVersionDeploymentFiles(original["files"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFiles); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFiles); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["files"] = transformedFiles } transformedContainer, err := expandAppEngineFlexibleAppVersionDeploymentContainer(original["container"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedContainer); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedContainer); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["container"] = transformedContainer } transformedCloudBuildOptions, err := expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(original["cloud_build_options"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCloudBuildOptions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCloudBuildOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cloudBuildOptions"] = transformedCloudBuildOptions } return transformed, nil } -func expandAppEngineFlexibleAppVersionDeploymentZip(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentZip(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3184,29 +3234,29 @@ func expandAppEngineFlexibleAppVersionDeploymentZip(v interface{}, d TerraformRe transformedSourceUrl, err := expandAppEngineFlexibleAppVersionDeploymentZipSourceUrl(original["source_url"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceUrl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceUrl"] = transformedSourceUrl } transformedFilesCount, err := expandAppEngineFlexibleAppVersionDeploymentZipFilesCount(original["files_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilesCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFilesCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["filesCount"] = transformedFilesCount } return transformed, nil } -func expandAppEngineFlexibleAppVersionDeploymentZipSourceUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentZipSourceUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionDeploymentZipFilesCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentZipFilesCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionDeploymentFiles(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentFiles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { if v == nil { return map[string]interface{}{}, nil } @@ -3218,18 +3268,18 @@ func expandAppEngineFlexibleAppVersionDeploymentFiles(v interface{}, d Terraform transformedSha1Sum, err := expandAppEngineFlexibleAppVersionDeploymentFilesSha1Sum(original["sha1_sum"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSha1Sum); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSha1Sum); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sha1Sum"] = transformedSha1Sum } transformedSourceUrl, err := expandAppEngineFlexibleAppVersionDeploymentFilesSourceUrl(original["source_url"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceUrl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceUrl"] = transformedSourceUrl } - transformedName, err := expandString(original["name"], d, config) + transformedName, err := tpgresource.ExpandString(original["name"], d, config) if err != nil { return nil, err } @@ -3238,15 +3288,15 @@ func expandAppEngineFlexibleAppVersionDeploymentFiles(v interface{}, d Terraform return m, nil } -func expandAppEngineFlexibleAppVersionDeploymentFilesSha1Sum(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentFilesSha1Sum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionDeploymentFilesSourceUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentFilesSourceUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionDeploymentContainer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentContainer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3258,18 +3308,18 @@ func expandAppEngineFlexibleAppVersionDeploymentContainer(v interface{}, d Terra transformedImage, err := expandAppEngineFlexibleAppVersionDeploymentContainerImage(original["image"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["image"] = transformedImage } return transformed, nil } -func expandAppEngineFlexibleAppVersionDeploymentContainerImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentContainerImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3281,29 +3331,29 @@ func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(v interface{}, transformedAppYamlPath, err := expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(original["app_yaml_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAppYamlPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAppYamlPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["appYamlPath"] = transformedAppYamlPath } transformedCloudBuildTimeout, err := expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(original["cloud_build_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCloudBuildTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCloudBuildTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cloudBuildTimeout"] = transformedCloudBuildTimeout } return transformed, nil } -func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionEndpointsApiService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionEndpointsApiService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3315,51 +3365,51 @@ func expandAppEngineFlexibleAppVersionEndpointsApiService(v interface{}, d Terra transformedName, err := expandAppEngineFlexibleAppVersionEndpointsApiServiceName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedConfigId, err := expandAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(original["config_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConfigId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConfigId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["configId"] = transformedConfigId } transformedRolloutStrategy, err := expandAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(original["rollout_strategy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRolloutStrategy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRolloutStrategy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rolloutStrategy"] = transformedRolloutStrategy } transformedDisableTraceSampling, err := expandAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(original["disable_trace_sampling"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisableTraceSampling); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisableTraceSampling); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disableTraceSampling"] = transformedDisableTraceSampling } return transformed, nil } -func expandAppEngineFlexibleAppVersionEndpointsApiServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionEndpointsApiServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionEntrypoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionEntrypoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3371,18 +3421,18 @@ func expandAppEngineFlexibleAppVersionEntrypoint(v interface{}, d TerraformResou transformedShell, err := expandAppEngineFlexibleAppVersionEntrypointShell(original["shell"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedShell); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedShell); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["shell"] = transformedShell } return transformed, nil } -func expandAppEngineFlexibleAppVersionEntrypointShell(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionEntrypointShell(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionVPCAccessConnector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionVpcAccessConnector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3391,21 +3441,21 @@ func expandAppEngineFlexibleAppVersionVPCAccessConnector(v interface{}, d Terraf original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedName, err := expandAppEngineFlexibleAppVersionVPCAccessConnectorName(original["name"], d, config) + transformedName, err := expandAppEngineFlexibleAppVersionVpcAccessConnectorName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } return transformed, nil } -func expandAppEngineFlexibleAppVersionVPCAccessConnectorName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionVpcAccessConnectorName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3417,95 +3467,95 @@ func expandAppEngineFlexibleAppVersionAutomaticScaling(v interface{}, d Terrafor transformedCoolDownPeriod, err := expandAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(original["cool_down_period"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCoolDownPeriod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCoolDownPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["coolDownPeriod"] = transformedCoolDownPeriod } transformedCpuUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(original["cpu_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cpuUtilization"] = transformedCpuUtilization } transformedMaxConcurrentRequests, err := expandAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(original["max_concurrent_requests"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConcurrentRequests); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConcurrentRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConcurrentRequests"] = transformedMaxConcurrentRequests } transformedMaxIdleInstances, err := expandAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(original["max_idle_instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxIdleInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxIdleInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxIdleInstances"] = transformedMaxIdleInstances } transformedMaxTotalInstances, err := expandAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(original["max_total_instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxTotalInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxTotalInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxTotalInstances"] = transformedMaxTotalInstances } transformedMaxPendingLatency, err := expandAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(original["max_pending_latency"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxPendingLatency); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxPendingLatency); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxPendingLatency"] = transformedMaxPendingLatency } transformedMinIdleInstances, err := expandAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(original["min_idle_instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinIdleInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinIdleInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minIdleInstances"] = transformedMinIdleInstances } transformedMinTotalInstances, err := expandAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(original["min_total_instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinTotalInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinTotalInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minTotalInstances"] = transformedMinTotalInstances } transformedMinPendingLatency, err := expandAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(original["min_pending_latency"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinPendingLatency); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinPendingLatency); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minPendingLatency"] = transformedMinPendingLatency } transformedRequestUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(original["request_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestUtilization"] = transformedRequestUtilization } transformedDiskUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(original["disk_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDiskUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDiskUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["diskUtilization"] = transformedDiskUtilization } transformedNetworkUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(original["network_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNetworkUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNetworkUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["networkUtilization"] = transformedNetworkUtilization } return transformed, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3517,57 +3567,57 @@ func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(v interface transformedAggregationWindowLength, err := expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(original["aggregation_window_length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAggregationWindowLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAggregationWindowLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["aggregationWindowLength"] = transformedAggregationWindowLength } transformedTargetUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(original["target_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetUtilization"] = transformedTargetUtilization } return transformed, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3579,29 +3629,29 @@ func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(v inter transformedTargetRequestCountPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(original["target_request_count_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetRequestCountPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetRequestCountPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetRequestCountPerSecond"] = transformedTargetRequestCountPerSecond } transformedTargetConcurrentRequests, err := expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(original["target_concurrent_requests"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetConcurrentRequests); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetConcurrentRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetConcurrentRequests"] = transformedTargetConcurrentRequests } return transformed, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3613,51 +3663,51 @@ func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(v interfac transformedTargetWriteBytesPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(original["target_write_bytes_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetWriteBytesPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetWriteBytesPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetWriteBytesPerSecond"] = transformedTargetWriteBytesPerSecond } transformedTargetWriteOpsPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(original["target_write_ops_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetWriteOpsPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetWriteOpsPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetWriteOpsPerSecond"] = transformedTargetWriteOpsPerSecond } transformedTargetReadBytesPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(original["target_read_bytes_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetReadBytesPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetReadBytesPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetReadBytesPerSecond"] = transformedTargetReadBytesPerSecond } transformedTargetReadOpsPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(original["target_read_ops_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetReadOpsPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetReadOpsPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetReadOpsPerSecond"] = transformedTargetReadOpsPerSecond } return transformed, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3669,51 +3719,51 @@ func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(v inter transformedTargetSentBytesPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(original["target_sent_bytes_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetSentBytesPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetSentBytesPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetSentBytesPerSecond"] = transformedTargetSentBytesPerSecond } transformedTargetSentPacketsPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(original["target_sent_packets_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetSentPacketsPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetSentPacketsPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetSentPacketsPerSecond"] = transformedTargetSentPacketsPerSecond } transformedTargetReceivedBytesPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(original["target_received_bytes_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetReceivedBytesPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetReceivedBytesPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetReceivedBytesPerSecond"] = transformedTargetReceivedBytesPerSecond } transformedTargetReceivedPacketsPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(original["target_received_packets_per_second"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetReceivedPacketsPerSecond); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetReceivedPacketsPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetReceivedPacketsPerSecond"] = transformedTargetReceivedPacketsPerSecond } return transformed, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandAppEngineFlexibleAppVersionManualScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionManualScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3725,14 +3775,14 @@ func expandAppEngineFlexibleAppVersionManualScaling(v interface{}, d TerraformRe transformedInstances, err := expandAppEngineFlexibleAppVersionManualScalingInstances(original["instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["instances"] = transformedInstances } return transformed, nil } -func expandAppEngineFlexibleAppVersionManualScalingInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandAppEngineFlexibleAppVersionManualScalingInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_service_network_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_service_network_settings.go new file mode 100644 index 0000000000..abf2790d18 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_service_network_settings.go @@ -0,0 +1,382 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package appengine + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAppEngineServiceNetworkSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAppEngineServiceNetworkSettingsCreate, + Read: resourceAppEngineServiceNetworkSettingsRead, + Update: resourceAppEngineServiceNetworkSettingsUpdate, + Delete: resourceAppEngineServiceNetworkSettingsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAppEngineServiceNetworkSettingsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "network_settings": { + Type: schema.TypeList, + Required: true, + Description: `Ingress settings for this service. Will apply to all versions.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ingress_traffic_allowed": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", "INGRESS_TRAFFIC_ALLOWED_ALL", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB", ""}), + Description: `The ingress settings for version or service. Default value: "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED" Possible values: ["INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", "INGRESS_TRAFFIC_ALLOWED_ALL", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB"]`, + Default: "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", + }, + }, + }, + }, + "service": { + Type: schema.TypeString, + Required: true, + Description: `The name of the service these settings apply to.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAppEngineServiceNetworkSettingsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + idProp, err := expandAppEngineServiceNetworkSettingsService(d.Get("service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + networkSettingsProp, err := expandAppEngineServiceNetworkSettingsNetworkSettings(d.Get("network_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkSettingsProp)) && (ok || !reflect.DeepEqual(v, networkSettingsProp)) { + obj["networkSettings"] = networkSettingsProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?updateMask=networkSettings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServiceNetworkSettings: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ServiceNetworkSettings: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = AppEngineOperationWaitTime( + config, res, project, "Creating ServiceNetworkSettings", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ServiceNetworkSettings: %s", err) + } + + log.Printf("[DEBUG] Finished creating ServiceNetworkSettings %q: %#v", d.Id(), res) + + return resourceAppEngineServiceNetworkSettingsRead(d, meta) +} + +func resourceAppEngineServiceNetworkSettingsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AppEngineServiceNetworkSettings %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) + } + + if err := d.Set("service", flattenAppEngineServiceNetworkSettingsService(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) + } + if err := d.Set("network_settings", flattenAppEngineServiceNetworkSettingsNetworkSettings(res["networkSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) + } + + return nil +} + +func resourceAppEngineServiceNetworkSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + idProp, err := expandAppEngineServiceNetworkSettingsService(d.Get("service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + networkSettingsProp, err := expandAppEngineServiceNetworkSettingsNetworkSettings(d.Get("network_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkSettingsProp)) { + obj["networkSettings"] = networkSettingsProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ServiceNetworkSettings %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("service") { + updateMask = append(updateMask, "id") + } + + if d.HasChange("network_settings") { + updateMask = append(updateMask, "networkSettings") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ServiceNetworkSettings %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ServiceNetworkSettings %q: %#v", d.Id(), res) + } + + err = AppEngineOperationWaitTime( + config, res, project, "Updating ServiceNetworkSettings", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAppEngineServiceNetworkSettingsRead(d, meta) +} + +func resourceAppEngineServiceNetworkSettingsDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] AppEngine ServiceNetworkSettings resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceAppEngineServiceNetworkSettingsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "apps/(?P[^/]+)/services/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAppEngineServiceNetworkSettingsService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineServiceNetworkSettingsNetworkSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ingress_traffic_allowed"] = + flattenAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(original["ingressTrafficAllowed"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAppEngineServiceNetworkSettingsService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineServiceNetworkSettingsNetworkSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIngressTrafficAllowed, err := expandAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(original["ingress_traffic_allowed"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIngressTrafficAllowed); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ingressTrafficAllowed"] = transformedIngressTrafficAllowed + } + + return transformed, nil +} + +func expandAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_service_split_traffic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_service_split_traffic.go new file mode 100644 index 0000000000..df5584deef --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_service_split_traffic.go @@ -0,0 +1,390 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package appengine + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAppEngineServiceSplitTraffic() *schema.Resource { + return &schema.Resource{ + Create: resourceAppEngineServiceSplitTrafficCreate, + Read: resourceAppEngineServiceSplitTrafficRead, + Update: resourceAppEngineServiceSplitTrafficUpdate, + Delete: resourceAppEngineServiceSplitTrafficDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAppEngineServiceSplitTrafficImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + Description: `The name of the service these settings apply to.`, + }, + "split": { + Type: schema.TypeList, + Required: true, + Description: `Mapping that defines fractional HTTP traffic diversion to different versions within the service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allocations": { + Type: schema.TypeMap, + Required: true, + Description: `Mapping from version IDs within the service to fractional (0.000, 1] allocations of traffic for that version. Each version can be specified only once, but some versions in the service may not have any traffic allocation. Services that have traffic allocated cannot be deleted until either the service is deleted or their traffic allocation is removed. Allocations must sum to 1. Up to two decimal place precision is supported for IP-based splits and up to three decimal places is supported for cookie-based splits.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "shard_by": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UNSPECIFIED", "COOKIE", "IP", "RANDOM", ""}), + Description: `Mechanism used to determine which version a request is sent to. The traffic selection algorithm will be stable for either type until allocations are changed. Possible values: ["UNSPECIFIED", "COOKIE", "IP", "RANDOM"]`, + }, + }, + }, + }, + "migrate_traffic": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to true traffic will be migrated to this version.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAppEngineServiceSplitTrafficCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + idProp, err := expandAppEngineServiceSplitTrafficService(d.Get("service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + splitProp, err := expandAppEngineServiceSplitTrafficSplit(d.Get("split"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("split"); !tpgresource.IsEmptyValue(reflect.ValueOf(splitProp)) && (ok || !reflect.DeepEqual(v, splitProp)) { + obj["split"] = splitProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?migrateTraffic={{migrate_traffic}}&updateMask=split") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServiceSplitTraffic: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ServiceSplitTraffic: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = AppEngineOperationWaitTime( + config, res, project, "Creating ServiceSplitTraffic", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ServiceSplitTraffic: %s", err) + } + + log.Printf("[DEBUG] Finished creating ServiceSplitTraffic %q: %#v", d.Id(), res) + + return resourceAppEngineServiceSplitTrafficRead(d, meta) +} + +func resourceAppEngineServiceSplitTrafficRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AppEngineServiceSplitTraffic %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ServiceSplitTraffic: %s", err) + } + + if err := d.Set("service", flattenAppEngineServiceSplitTrafficService(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceSplitTraffic: %s", err) + } + + return nil +} + +func resourceAppEngineServiceSplitTrafficUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + idProp, err := expandAppEngineServiceSplitTrafficService(d.Get("service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + splitProp, err := expandAppEngineServiceSplitTrafficSplit(d.Get("split"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("split"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, splitProp)) { + obj["split"] = splitProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?migrateTraffic={{migrate_traffic}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ServiceSplitTraffic %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("service") { + updateMask = append(updateMask, "id") + } + + if d.HasChange("split") { + updateMask = append(updateMask, "split") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ServiceSplitTraffic %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ServiceSplitTraffic %q: %#v", d.Id(), res) + } + + err = AppEngineOperationWaitTime( + config, res, project, "Updating ServiceSplitTraffic", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAppEngineServiceSplitTrafficRead(d, meta) +} + +func resourceAppEngineServiceSplitTrafficDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] AppEngine ServiceSplitTraffic resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceAppEngineServiceSplitTrafficImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "apps/(?P[^/]+)/services/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenAppEngineServiceSplitTrafficService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandAppEngineServiceSplitTrafficService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineServiceSplitTrafficSplit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedShardBy, err := expandAppEngineServiceSplitTrafficSplitShardBy(original["shard_by"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedShardBy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["shardBy"] = transformedShardBy + } + + transformedAllocations, err := expandAppEngineServiceSplitTrafficSplitAllocations(original["allocations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllocations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allocations"] = transformedAllocations + } + + return transformed, nil +} + +func expandAppEngineServiceSplitTrafficSplitShardBy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineServiceSplitTrafficSplitAllocations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_standard_app_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_standard_app_version.go new file mode 100644 index 0000000000..d9b276c81f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/appengine/resource_app_engine_standard_app_version.go @@ -0,0 +1,2059 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package appengine + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceAppEngineStandardAppVersion() *schema.Resource { + return &schema.Resource{ + Create: resourceAppEngineStandardAppVersionCreate, + Read: resourceAppEngineStandardAppVersionRead, + Update: resourceAppEngineStandardAppVersionUpdate, + Delete: resourceAppEngineStandardAppVersionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAppEngineStandardAppVersionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "deployment": { + Type: schema.TypeList, + Required: true, + Description: `Code and application artifacts that make up this version.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "files": { + Type: schema.TypeSet, + Optional: true, + Description: `Manifest of the files stored in Google Cloud Storage that are included as part of this version. +All files must be readable using the credentials supplied with this call.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "source_url": { + Type: schema.TypeString, + Required: true, + Description: `Source URL`, + }, + "sha1_sum": { + Type: schema.TypeString, + Optional: true, + Description: `SHA1 checksum of the file`, + }, + }, + }, + AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files"}, + }, + "zip": { + Type: schema.TypeList, + Optional: true, + Description: `Zip File`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_url": { + Type: schema.TypeString, + Required: true, + Description: `Source URL`, + }, + "files_count": { + Type: schema.TypeInt, + Optional: true, + Description: `files count`, + }, + }, + }, + AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files"}, + }, + }, + }, + }, + "entrypoint": { + Type: schema.TypeList, + Required: true, + Description: `The entrypoint for the application.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "shell": { + Type: schema.TypeString, + Required: true, + Description: `The format should be a shell command that can be fed to bash -c.`, + }, + }, + }, + }, + "runtime": { + Type: schema.TypeString, + Required: true, + Description: `Desired runtime. Example python27.`, + }, + "service": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `AppEngine service resource`, + }, + "app_engine_apis": { + Type: schema.TypeBool, + Optional: true, + Description: `Allows App Engine second generation runtimes to access the legacy bundled services.`, + }, + "automatic_scaling": { + Type: schema.TypeList, + Optional: true, + Description: `Automatic scaling is based on request rate, response latencies, and other application metrics.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_concurrent_requests": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance. + +Defaults to a runtime-specific value.`, + }, + "max_idle_instances": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum number of idle instances that should be maintained for this version.`, + }, + "max_pending_latency": { + Type: schema.TypeString, + Optional: true, + Description: `Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "min_idle_instances": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service.`, + }, + "min_pending_latency": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "standard_scheduler_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Scheduler settings for standard environment.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_instances": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum number of instances to run for this version. Set to zero to disable maxInstances configuration.`, + }, + "min_instances": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of instances to run for this version. Set to zero to disable minInstances configuration.`, + }, + "target_cpu_utilization": { + Type: schema.TypeFloat, + Optional: true, + Description: `Target CPU utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value.`, + }, + "target_throughput_utilization": { + Type: schema.TypeFloat, + Optional: true, + Description: `Target throughput utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value.`, + }, + }, + }, + }, + }, + }, + ConflictsWith: []string{"basic_scaling", "manual_scaling"}, + }, + "basic_scaling": { + Type: schema.TypeList, + Optional: true, + Description: `Basic scaling creates instances when your application receives requests. Each instance will be shut down when the application becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_instances": { + Type: schema.TypeInt, + Required: true, + Description: `Maximum number of instances to create for this version. Must be in the range [1.0, 200.0].`, + }, + "idle_timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Duration of time after the last request that an instance must wait before the instance is shut down. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s.`, + Default: "900s", + }, + }, + }, + ConflictsWith: []string{"automatic_scaling", "manual_scaling"}, + }, + "env_variables": { + Type: schema.TypeMap, + Optional: true, + Description: `Environment variables available to the application.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "handlers": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `An ordered list of URL-matching patterns that should be applied to incoming requests. +The first matching URL handles the request and other request handlers are not attempted.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auth_fail_action": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}), + Description: `Actions to take when the user is not logged in. Possible values: ["AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED"]`, + }, + "login": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}), + Description: `Methods to restrict access to a URL based on login status. Possible values: ["LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED"]`, + }, + "redirect_http_response_code": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307", ""}), + Description: `30x code to use when performing redirects for the secure field. Possible values: ["REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307"]`, + }, + "script": { + Type: schema.TypeList, + Optional: true, + Description: `Executes a script to handle the requests that match this URL pattern. +Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto".`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "script_path": { + Type: schema.TypeString, + Required: true, + Description: `Path to the script from the application root directory.`, + }, + }, + }, + }, + "security_level": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}), + Description: `Security (HTTPS) enforcement for this URL. Possible values: ["SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS"]`, + }, + "static_files": { + Type: schema.TypeList, + Optional: true, + Description: `Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. Static file handlers describe which files in the application directory are static files, and which URLs serve them.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "application_readable": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether files should also be uploaded as code data. By default, files declared in static file handlers are uploaded as +static data and are only served to end users; they cannot be read by the application. If enabled, uploads are charged +against both your code and static data storage resource quotas.`, + }, + "expiration": { + Type: schema.TypeString, + Optional: true, + Description: `Time a static file served by this handler should be cached by web proxies and browsers. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example "3.5s".`, + }, + "http_headers": { + Type: schema.TypeMap, + Optional: true, + Description: `HTTP headers to use for all responses from these URLs. +An object containing a list of "key:value" value pairs.".`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "mime_type": { + Type: schema.TypeString, + Optional: true, + Description: `MIME type used to serve all files served by this handler. +Defaults to file-specific MIME types, which are derived from each file's filename extension.`, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to the static files matched by the URL pattern, from the application root directory. The path can refer to text matched in groupings in the URL pattern.`, + }, + "require_matching_file": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether this handler should match the request if the file referenced by the handler does not exist.`, + }, + "upload_path_regex": { + Type: schema.TypeString, + Optional: true, + Description: `Regular expression that matches the file paths for all files that should be referenced by this handler.`, + }, + }, + }, + }, + "url_regex": { + Type: schema.TypeString, + Optional: true, + Description: `URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. +All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path.`, + }, + }, + }, + }, + "inbound_services": { + Type: schema.TypeSet, + Optional: true, + Description: `A list of the types of messages that this application is able to receive. Possible values: ["INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"}), + }, + Set: schema.HashString, + }, + "instance_class": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Instance class that is used to run this version. Valid values are +AutomaticScaling: F1, F2, F4, F4_1G +BasicScaling or ManualScaling: B1, B2, B4, B4_1G, B8 +Defaults to F1 for AutomaticScaling and B2 for ManualScaling and BasicScaling. If no scaling is specified, AutomaticScaling is chosen.`, + }, + "libraries": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for third-party Python runtime libraries that are required by the application.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the library. Example "django".`, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version of the library to select, or "latest".`, + }, + }, + }, + }, + "manual_scaling": { + Type: schema.TypeList, + Optional: true, + Description: `A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instances": { + Type: schema.TypeInt, + Required: true, + Description: `Number of instances to assign to the service at the start. + +**Note:** When managing the number of instances at runtime through the App Engine Admin API or the (now deprecated) Python 2 +Modules API set_num_instances() you must use 'lifecycle.ignore_changes = ["manual_scaling"[0].instances]' to prevent drift detection.`, + }, + }, + }, + ConflictsWith: []string{"automatic_scaling", "basic_scaling"}, + }, + "runtime_api_version": { + Type: schema.TypeString, + Optional: true, + Description: `The version of the API in the given runtime environment. +Please see the app.yaml reference for valid values at 'https://cloud.google.com/appengine/docs/standard//config/appref'\ +Substitute '' with 'python', 'java', 'php', 'ruby', 'go' or 'nodejs'.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The identity that the deployed version will run as. Admin API will use the App Engine Appspot service account as default if this field is neither provided in app.yaml file nor through CLI flag.`, + }, + "threadsafe": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether multiple requests can be dispatched to this version at once.`, + }, + "version_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Relative name of the version within the service. For example, 'v1'. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names,"default", "latest", and any name with the prefix "ah-".`, + }, + "vpc_access_connector": { + Type: schema.TypeList, + Optional: true, + Description: `Enables VPC connectivity for standard apps.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1.`, + }, + "egress_setting": { + Type: schema.TypeString, + Optional: true, + Description: `The egress setting for the connector, controlling what traffic is diverted through it.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Full path to the Version resource in the API. Example, "v1".`, + }, + "noop_on_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to 'true', the application version will not be deleted.`, + }, + "delete_service_on_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to 'true', the service will be deleted if it is the last version.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceAppEngineStandardAppVersionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + idProp, err := expandAppEngineStandardAppVersionVersionId(d.Get("version_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + runtimeProp, err := expandAppEngineStandardAppVersionRuntime(d.Get("runtime"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeProp)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { + obj["runtime"] = runtimeProp + } + serviceAccountProp, err := expandAppEngineStandardAppVersionServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + threadsafeProp, err := expandAppEngineStandardAppVersionThreadsafe(d.Get("threadsafe"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("threadsafe"); !tpgresource.IsEmptyValue(reflect.ValueOf(threadsafeProp)) && (ok || !reflect.DeepEqual(v, threadsafeProp)) { + obj["threadsafe"] = threadsafeProp + } + appEngineApisProp, err := expandAppEngineStandardAppVersionAppEngineApis(d.Get("app_engine_apis"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine_apis"); !tpgresource.IsEmptyValue(reflect.ValueOf(appEngineApisProp)) && (ok || !reflect.DeepEqual(v, appEngineApisProp)) { + obj["appEngineApis"] = appEngineApisProp + } + runtimeApiVersionProp, err := expandAppEngineStandardAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime_api_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeApiVersionProp)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { + obj["runtimeApiVersion"] = runtimeApiVersionProp + } + handlersProp, err := expandAppEngineStandardAppVersionHandlers(d.Get("handlers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("handlers"); !tpgresource.IsEmptyValue(reflect.ValueOf(handlersProp)) && (ok || !reflect.DeepEqual(v, handlersProp)) { + obj["handlers"] = handlersProp + } + librariesProp, err := expandAppEngineStandardAppVersionLibraries(d.Get("libraries"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("libraries"); !tpgresource.IsEmptyValue(reflect.ValueOf(librariesProp)) && (ok || !reflect.DeepEqual(v, librariesProp)) { + obj["libraries"] = librariesProp + } + envVariablesProp, err := expandAppEngineStandardAppVersionEnvVariables(d.Get("env_variables"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("env_variables"); !tpgresource.IsEmptyValue(reflect.ValueOf(envVariablesProp)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { + obj["envVariables"] = envVariablesProp + } + deploymentProp, err := expandAppEngineStandardAppVersionDeployment(d.Get("deployment"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployment"); !tpgresource.IsEmptyValue(reflect.ValueOf(deploymentProp)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { + obj["deployment"] = deploymentProp + } + entrypointProp, err := expandAppEngineStandardAppVersionEntrypoint(d.Get("entrypoint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entrypoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(entrypointProp)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { + obj["entrypoint"] = entrypointProp + } + vpcAccessConnectorProp, err := expandAppEngineStandardAppVersionVpcAccessConnector(d.Get("vpc_access_connector"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vpc_access_connector"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpcAccessConnectorProp)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { + obj["vpcAccessConnector"] = vpcAccessConnectorProp + } + inboundServicesProp, err := expandAppEngineStandardAppVersionInboundServices(d.Get("inbound_services"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("inbound_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(inboundServicesProp)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { + obj["inboundServices"] = inboundServicesProp + } + instanceClassProp, err := expandAppEngineStandardAppVersionInstanceClass(d.Get("instance_class"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance_class"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceClassProp)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { + obj["instanceClass"] = instanceClassProp + } + automaticScalingProp, err := expandAppEngineStandardAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automatic_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(automaticScalingProp)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { + obj["automaticScaling"] = automaticScalingProp + } + basicScalingProp, err := expandAppEngineStandardAppVersionBasicScaling(d.Get("basic_scaling"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("basic_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(basicScalingProp)) && (ok || !reflect.DeepEqual(v, basicScalingProp)) { + obj["basicScaling"] = basicScalingProp + } + manualScalingProp, err := expandAppEngineStandardAppVersionManualScaling(d.Get("manual_scaling"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("manual_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(manualScalingProp)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { + obj["manualScaling"] = manualScalingProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new StandardAppVersion: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + if err != nil { + return fmt.Errorf("Error creating StandardAppVersion: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = AppEngineOperationWaitTime( + config, res, project, "Creating StandardAppVersion", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create StandardAppVersion: %s", err) + } + + log.Printf("[DEBUG] Finished creating StandardAppVersion %q: %#v", d.Id(), res) + + return resourceAppEngineStandardAppVersionRead(d, meta) +} + +func resourceAppEngineStandardAppVersionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}?view=FULL") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AppEngineStandardAppVersion %q", d.Id())) + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("noop_on_destroy"); !ok { + if err := d.Set("noop_on_destroy", false); err != nil { + return fmt.Errorf("Error setting noop_on_destroy: %s", err) + } + } + if _, ok := d.GetOkExists("delete_service_on_destroy"); !ok { + if err := d.Set("delete_service_on_destroy", false); err != nil { + return fmt.Errorf("Error setting delete_service_on_destroy: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + + if err := d.Set("name", flattenAppEngineStandardAppVersionName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("version_id", flattenAppEngineStandardAppVersionVersionId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("runtime", flattenAppEngineStandardAppVersionRuntime(res["runtime"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("service_account", flattenAppEngineStandardAppVersionServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("app_engine_apis", flattenAppEngineStandardAppVersionAppEngineApis(res["appEngineApis"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("runtime_api_version", flattenAppEngineStandardAppVersionRuntimeApiVersion(res["runtimeApiVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("handlers", flattenAppEngineStandardAppVersionHandlers(res["handlers"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("libraries", flattenAppEngineStandardAppVersionLibraries(res["libraries"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("vpc_access_connector", flattenAppEngineStandardAppVersionVpcAccessConnector(res["vpcAccessConnector"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("inbound_services", flattenAppEngineStandardAppVersionInboundServices(res["inboundServices"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("instance_class", flattenAppEngineStandardAppVersionInstanceClass(res["instanceClass"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("automatic_scaling", flattenAppEngineStandardAppVersionAutomaticScaling(res["automaticScaling"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("basic_scaling", flattenAppEngineStandardAppVersionBasicScaling(res["basicScaling"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + if err := d.Set("manual_scaling", flattenAppEngineStandardAppVersionManualScaling(res["manualScaling"], d, config)); err != nil { + return fmt.Errorf("Error reading StandardAppVersion: %s", err) + } + + return nil +} + +func resourceAppEngineStandardAppVersionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + idProp, err := expandAppEngineStandardAppVersionVersionId(d.Get("version_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + runtimeProp, err := expandAppEngineStandardAppVersionRuntime(d.Get("runtime"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { + obj["runtime"] = runtimeProp + } + serviceAccountProp, err := expandAppEngineStandardAppVersionServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + threadsafeProp, err := expandAppEngineStandardAppVersionThreadsafe(d.Get("threadsafe"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("threadsafe"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, threadsafeProp)) { + obj["threadsafe"] = threadsafeProp + } + appEngineApisProp, err := expandAppEngineStandardAppVersionAppEngineApis(d.Get("app_engine_apis"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine_apis"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, appEngineApisProp)) { + obj["appEngineApis"] = appEngineApisProp + } + runtimeApiVersionProp, err := expandAppEngineStandardAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("runtime_api_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { + obj["runtimeApiVersion"] = runtimeApiVersionProp + } + handlersProp, err := expandAppEngineStandardAppVersionHandlers(d.Get("handlers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("handlers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, handlersProp)) { + obj["handlers"] = handlersProp + } + librariesProp, err := expandAppEngineStandardAppVersionLibraries(d.Get("libraries"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("libraries"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, librariesProp)) { + obj["libraries"] = librariesProp + } + envVariablesProp, err := expandAppEngineStandardAppVersionEnvVariables(d.Get("env_variables"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("env_variables"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { + obj["envVariables"] = envVariablesProp + } + deploymentProp, err := expandAppEngineStandardAppVersionDeployment(d.Get("deployment"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deployment"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { + obj["deployment"] = deploymentProp + } + entrypointProp, err := expandAppEngineStandardAppVersionEntrypoint(d.Get("entrypoint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entrypoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { + obj["entrypoint"] = entrypointProp + } + vpcAccessConnectorProp, err := expandAppEngineStandardAppVersionVpcAccessConnector(d.Get("vpc_access_connector"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vpc_access_connector"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { + obj["vpcAccessConnector"] = vpcAccessConnectorProp + } + inboundServicesProp, err := expandAppEngineStandardAppVersionInboundServices(d.Get("inbound_services"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("inbound_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { + obj["inboundServices"] = inboundServicesProp + } + instanceClassProp, err := expandAppEngineStandardAppVersionInstanceClass(d.Get("instance_class"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance_class"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { + obj["instanceClass"] = instanceClassProp + } + automaticScalingProp, err := expandAppEngineStandardAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("automatic_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { + obj["automaticScaling"] = automaticScalingProp + } + basicScalingProp, err := expandAppEngineStandardAppVersionBasicScaling(d.Get("basic_scaling"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("basic_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, basicScalingProp)) { + obj["basicScaling"] = basicScalingProp + } + manualScalingProp, err := expandAppEngineStandardAppVersionManualScaling(d.Get("manual_scaling"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("manual_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { + obj["manualScaling"] = manualScalingProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating StandardAppVersion %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + + if err != nil { + return fmt.Errorf("Error updating StandardAppVersion %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating StandardAppVersion %q: %#v", d.Id(), res) + } + + err = AppEngineOperationWaitTime( + config, res, project, "Updating StandardAppVersion", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceAppEngineStandardAppVersionRead(d, meta) +} + +func resourceAppEngineStandardAppVersionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + if d.Get("noop_on_destroy") == true { + log.Printf("[DEBUG] Keeping the AppVersion %q", d.Id()) + return nil + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + if d.Get("delete_service_on_destroy") == true { + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") + if err != nil { + return err + } + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Service %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Service") + } + err = AppEngineOperationWaitTime( + config, res, project, "Deleting Service", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) + return nil + } else { + url, err := tpgresource.ReplaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}") + if err != nil { + return err + } + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AppVersion %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsAppEngineRetryableError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AppVersion") + } + err = AppEngineOperationWaitTime( + config, res, project, "Deleting AppVersion", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + log.Printf("[DEBUG] Finished deleting AppVersion %q: %#v", d.Id(), res) + return nil + + } +} + +func resourceAppEngineStandardAppVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "apps/(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("noop_on_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting noop_on_destroy: %s", err) + } + if err := d.Set("delete_service_on_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting delete_service_on_destroy: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenAppEngineStandardAppVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionVersionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionRuntime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionAppEngineApis(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionRuntimeApiVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "url_regex": flattenAppEngineStandardAppVersionHandlersUrlRegex(original["urlRegex"], d, config), + "security_level": flattenAppEngineStandardAppVersionHandlersSecurityLevel(original["securityLevel"], d, config), + "login": flattenAppEngineStandardAppVersionHandlersLogin(original["login"], d, config), + "auth_fail_action": flattenAppEngineStandardAppVersionHandlersAuthFailAction(original["authFailAction"], d, config), + "redirect_http_response_code": flattenAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(original["redirectHttpResponseCode"], d, config), + "script": flattenAppEngineStandardAppVersionHandlersScript(original["script"], d, config), + "static_files": flattenAppEngineStandardAppVersionHandlersStaticFiles(original["staticFiles"], d, config), + }) + } + return transformed +} +func flattenAppEngineStandardAppVersionHandlersUrlRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersSecurityLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersLogin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersAuthFailAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["script_path"] = + flattenAppEngineStandardAppVersionHandlersScriptScriptPath(original["scriptPath"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineStandardAppVersionHandlersScriptScriptPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersStaticFiles(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenAppEngineStandardAppVersionHandlersStaticFilesPath(original["path"], d, config) + transformed["upload_path_regex"] = + flattenAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(original["uploadPathRegex"], d, config) + transformed["http_headers"] = + flattenAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(original["httpHeaders"], d, config) + transformed["mime_type"] = + flattenAppEngineStandardAppVersionHandlersStaticFilesMimeType(original["mimeType"], d, config) + transformed["expiration"] = + flattenAppEngineStandardAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) + transformed["require_matching_file"] = + flattenAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(original["requireMatchingFile"], d, config) + transformed["application_readable"] = + flattenAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(original["applicationReadable"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineStandardAppVersionHandlersStaticFilesPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersStaticFilesMimeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersStaticFilesExpiration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionLibraries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenAppEngineStandardAppVersionLibrariesName(original["name"], d, config), + "version": flattenAppEngineStandardAppVersionLibrariesVersion(original["version"], d, config), + }) + } + return transformed +} +func flattenAppEngineStandardAppVersionLibrariesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionLibrariesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionVpcAccessConnector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenAppEngineStandardAppVersionVpcAccessConnectorName(original["name"], d, config) + transformed["egress_setting"] = + flattenAppEngineStandardAppVersionVpcAccessConnectorEgressSetting(original["egressSetting"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineStandardAppVersionVpcAccessConnectorName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionVpcAccessConnectorEgressSetting(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionInboundServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAppEngineStandardAppVersionInstanceClass(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionAutomaticScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["max_concurrent_requests"] = + flattenAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(original["maxConcurrentRequests"], d, config) + transformed["max_idle_instances"] = + flattenAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(original["maxIdleInstances"], d, config) + transformed["max_pending_latency"] = + flattenAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(original["maxPendingLatency"], d, config) + transformed["min_idle_instances"] = + flattenAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(original["minIdleInstances"], d, config) + transformed["min_pending_latency"] = + flattenAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(original["minPendingLatency"], d, config) + transformed["standard_scheduler_settings"] = + flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(original["standardSchedulerSettings"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["target_cpu_utilization"] = + flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(original["targetCpuUtilization"], d, config) + transformed["target_throughput_utilization"] = + flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(original["targetThroughputUtilization"], d, config) + transformed["min_instances"] = + flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(original["minInstances"], d, config) + transformed["max_instances"] = + flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(original["maxInstances"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAppEngineStandardAppVersionBasicScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["idle_timeout"] = + flattenAppEngineStandardAppVersionBasicScalingIdleTimeout(original["idleTimeout"], d, config) + transformed["max_instances"] = + flattenAppEngineStandardAppVersionBasicScalingMaxInstances(original["maxInstances"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineStandardAppVersionBasicScalingIdleTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAppEngineStandardAppVersionBasicScalingMaxInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAppEngineStandardAppVersionManualScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["instances"] = + flattenAppEngineStandardAppVersionManualScalingInstances(original["instances"], d, config) + return []interface{}{transformed} +} +func flattenAppEngineStandardAppVersionManualScalingInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandAppEngineStandardAppVersionVersionId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionRuntime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionThreadsafe(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAppEngineApis(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionRuntimeApiVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrlRegex, err := expandAppEngineStandardAppVersionHandlersUrlRegex(original["url_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrlRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["urlRegex"] = transformedUrlRegex + } + + transformedSecurityLevel, err := expandAppEngineStandardAppVersionHandlersSecurityLevel(original["security_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["securityLevel"] = transformedSecurityLevel + } + + transformedLogin, err := expandAppEngineStandardAppVersionHandlersLogin(original["login"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLogin); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["login"] = transformedLogin + } + + transformedAuthFailAction, err := expandAppEngineStandardAppVersionHandlersAuthFailAction(original["auth_fail_action"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["authFailAction"] = transformedAuthFailAction + } + + transformedRedirectHttpResponseCode, err := expandAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(original["redirect_http_response_code"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRedirectHttpResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["redirectHttpResponseCode"] = transformedRedirectHttpResponseCode + } + + transformedScript, err := expandAppEngineStandardAppVersionHandlersScript(original["script"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["script"] = transformedScript + } + + transformedStaticFiles, err := expandAppEngineStandardAppVersionHandlersStaticFiles(original["static_files"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStaticFiles); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["staticFiles"] = transformedStaticFiles + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAppEngineStandardAppVersionHandlersUrlRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersSecurityLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersLogin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersAuthFailAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScriptPath, err := expandAppEngineStandardAppVersionHandlersScriptScriptPath(original["script_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScriptPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scriptPath"] = transformedScriptPath + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionHandlersScriptScriptPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersStaticFiles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandAppEngineStandardAppVersionHandlersStaticFilesPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedUploadPathRegex, err := expandAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(original["upload_path_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUploadPathRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uploadPathRegex"] = transformedUploadPathRegex + } + + transformedHttpHeaders, err := expandAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(original["http_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpHeaders"] = transformedHttpHeaders + } + + transformedMimeType, err := expandAppEngineStandardAppVersionHandlersStaticFilesMimeType(original["mime_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMimeType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mimeType"] = transformedMimeType + } + + transformedExpiration, err := expandAppEngineStandardAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpiration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expiration"] = transformedExpiration + } + + transformedRequireMatchingFile, err := expandAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(original["require_matching_file"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireMatchingFile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireMatchingFile"] = transformedRequireMatchingFile + } + + transformedApplicationReadable, err := expandAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(original["application_readable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApplicationReadable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["applicationReadable"] = transformedApplicationReadable + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionHandlersStaticFilesPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAppEngineStandardAppVersionHandlersStaticFilesMimeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersStaticFilesExpiration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionLibraries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandAppEngineStandardAppVersionLibrariesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandAppEngineStandardAppVersionLibrariesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + req = append(req, transformed) + } + return req, nil +} + +func expandAppEngineStandardAppVersionLibrariesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionLibrariesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionEnvVariables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAppEngineStandardAppVersionDeployment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedZip, err := expandAppEngineStandardAppVersionDeploymentZip(original["zip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedZip); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["zip"] = transformedZip + } + + transformedFiles, err := expandAppEngineStandardAppVersionDeploymentFiles(original["files"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFiles); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["files"] = transformedFiles + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionDeploymentZip(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSourceUrl, err := expandAppEngineStandardAppVersionDeploymentZipSourceUrl(original["source_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourceUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sourceUrl"] = transformedSourceUrl + } + + transformedFilesCount, err := expandAppEngineStandardAppVersionDeploymentZipFilesCount(original["files_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilesCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filesCount"] = transformedFilesCount + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionDeploymentZipSourceUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionDeploymentZipFilesCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionDeploymentFiles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSha1Sum, err := expandAppEngineStandardAppVersionDeploymentFilesSha1Sum(original["sha1_sum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha1Sum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha1Sum"] = transformedSha1Sum + } + + transformedSourceUrl, err := expandAppEngineStandardAppVersionDeploymentFilesSourceUrl(original["source_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourceUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sourceUrl"] = transformedSourceUrl + } + + transformedName, err := tpgresource.ExpandString(original["name"], d, config) + if err != nil { + return nil, err + } + m[transformedName] = transformed + } + return m, nil +} + +func expandAppEngineStandardAppVersionDeploymentFilesSha1Sum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionDeploymentFilesSourceUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionEntrypoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedShell, err := expandAppEngineStandardAppVersionEntrypointShell(original["shell"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedShell); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["shell"] = transformedShell + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionEntrypointShell(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionVpcAccessConnector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandAppEngineStandardAppVersionVpcAccessConnectorName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedEgressSetting, err := expandAppEngineStandardAppVersionVpcAccessConnectorEgressSetting(original["egress_setting"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEgressSetting); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["egressSetting"] = transformedEgressSetting + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionVpcAccessConnectorName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionVpcAccessConnectorEgressSetting(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionInboundServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandAppEngineStandardAppVersionInstanceClass(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaxConcurrentRequests, err := expandAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(original["max_concurrent_requests"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxConcurrentRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxConcurrentRequests"] = transformedMaxConcurrentRequests + } + + transformedMaxIdleInstances, err := expandAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(original["max_idle_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxIdleInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxIdleInstances"] = transformedMaxIdleInstances + } + + transformedMaxPendingLatency, err := expandAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(original["max_pending_latency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxPendingLatency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxPendingLatency"] = transformedMaxPendingLatency + } + + transformedMinIdleInstances, err := expandAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(original["min_idle_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinIdleInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minIdleInstances"] = transformedMinIdleInstances + } + + transformedMinPendingLatency, err := expandAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(original["min_pending_latency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinPendingLatency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minPendingLatency"] = transformedMinPendingLatency + } + + transformedStandardSchedulerSettings, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(original["standard_scheduler_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStandardSchedulerSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["standardSchedulerSettings"] = transformedStandardSchedulerSettings + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTargetCpuUtilization, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(original["target_cpu_utilization"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetCpuUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetCpuUtilization"] = transformedTargetCpuUtilization + } + + transformedTargetThroughputUtilization, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(original["target_throughput_utilization"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetThroughputUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetThroughputUtilization"] = transformedTargetThroughputUtilization + } + + transformedMinInstances, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(original["min_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minInstances"] = transformedMinInstances + } + + transformedMaxInstances, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(original["max_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxInstances"] = transformedMaxInstances + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionBasicScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdleTimeout, err := expandAppEngineStandardAppVersionBasicScalingIdleTimeout(original["idle_timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdleTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["idleTimeout"] = transformedIdleTimeout + } + + transformedMaxInstances, err := expandAppEngineStandardAppVersionBasicScalingMaxInstances(original["max_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxInstances"] = transformedMaxInstances + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionBasicScalingIdleTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionBasicScalingMaxInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineStandardAppVersionManualScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInstances, err := expandAppEngineStandardAppVersionManualScalingInstances(original["instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instances"] = transformedInstances + } + + return transformed, nil +} + +func expandAppEngineStandardAppVersionManualScalingInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/artifact_registry_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/artifact_registry_operation.go new file mode 100644 index 0000000000..f4d23cf8ca --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/artifact_registry_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package artifactregistry + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type ArtifactRegistryOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *ArtifactRegistryOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.ArtifactRegistryBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createArtifactRegistryWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*ArtifactRegistryOperationWaiter, error) { + w := &ArtifactRegistryOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func ArtifactRegistryOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createArtifactRegistryWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func ArtifactRegistryOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createArtifactRegistryWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/data_source_artifact_registry_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/data_source_artifact_registry_repository.go new file mode 100644 index 0000000000..ab16924a05 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/data_source_artifact_registry_repository.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package artifactregistry + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceArtifactRegistryRepository() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceArtifactRegistryRepository().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "repository_id", "location") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceArtifactRegistryRepositoryRead, + Schema: dsSchema, + } +} + +func dataSourceArtifactRegistryRepositoryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + + repository_id := d.Get("repository_id").(string) + d.SetId(fmt.Sprintf("projects/%s/locations/%s/repositories/%s", project, location, repository_id)) + + err = resourceArtifactRegistryRepositoryRead(d, meta) + if err != nil { + return err + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/iam_artifact_registry_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/iam_artifact_registry_repository.go new file mode 100644 index 0000000000..a3cb5b247d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/iam_artifact_registry_repository.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package artifactregistry + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ArtifactRegistryRepositoryIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "repository": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ArtifactRegistryRepositoryIamUpdater struct { + project string + location string + repository string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ArtifactRegistryRepositoryIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("repository"); ok { + values["repository"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/repositories/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("repository").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ArtifactRegistryRepositoryIamUpdater{ + project: values["project"], + location: values["location"], + repository: values["repository"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("repository", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting repository: %s", err) + } + + return u, nil +} + +func ArtifactRegistryRepositoryIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/repositories/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ArtifactRegistryRepositoryIamUpdater{ + project: values["project"], + location: values["location"], + repository: values["repository"], + d: d, + Config: config, + } + if err := d.Set("repository", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting repository: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ArtifactRegistryRepositoryIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyRepositoryUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ArtifactRegistryRepositoryIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyRepositoryUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ArtifactRegistryRepositoryIamUpdater) qualifyRepositoryUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ArtifactRegistryBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/repositories/%s", u.project, u.location, u.repository), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ArtifactRegistryRepositoryIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/repositories/%s", u.project, u.location, u.repository) +} + +func (u *ArtifactRegistryRepositoryIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-artifactregistry-repository-%s", u.GetResourceId()) +} + +func (u *ArtifactRegistryRepositoryIamUpdater) DescribeResource() string { + return fmt.Sprintf("artifactregistry repository %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/resource_artifact_registry_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/resource_artifact_registry_repository.go new file mode 100644 index 0000000000..aca457fc70 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/resource_artifact_registry_repository.go @@ -0,0 +1,1248 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package artifactregistry + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceArtifactRegistryRepository() *schema.Resource { + return &schema.Resource{ + Create: resourceArtifactRegistryRepositoryCreate, + Read: resourceArtifactRegistryRepositoryRead, + Update: resourceArtifactRegistryRepositoryUpdate, + Delete: resourceArtifactRegistryRepositoryDelete, + + Importer: &schema.ResourceImporter{ + State: resourceArtifactRegistryRepositoryImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "format": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareCaseInsensitive, + Description: `The format of packages that are stored in the repository. Supported formats +can be found [here](https://cloud.google.com/artifact-registry/docs/supported-formats). +You can only create alpha formats if you are a member of the +[alpha user group](https://cloud.google.com/artifact-registry/docs/supported-formats#alpha-access).`, + }, + "repository_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The last part of the repository name, for example: +"repo1"`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The user-provided description of the repository.`, + }, + "docker_config": { + Type: schema.TypeList, + Optional: true, + Description: `Docker repository config contains repository level configuration for the repositories of docker type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "immutable_tags": { + Type: schema.TypeBool, + Optional: true, + Description: `The repository which enabled this flag prevents all tags from being modified, moved or deleted. This does not prevent tags from being created.`, + }, + }, + }, + }, + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Cloud KMS resource name of the customer managed encryption key that’s +used to encrypt the contents of the Repository. Has the form: +'projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key'. +This value may not be changed after the Repository has been created.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels with user-defined metadata. +This field may contain up to 64 entries. Label keys and values may be no +longer than 63 characters. Label keys must begin with a lowercase letter +and may only contain lowercase letters, numeric characters, underscores, +and dashes.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The name of the location this repository is located in.`, + }, + "maven_config": { + Type: schema.TypeList, + Optional: true, + Description: `MavenRepositoryConfig is maven related repository details. +Provides additional configuration details for repositories of the maven +format type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_snapshot_overwrites": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `The repository with this flag will allow publishing the same +snapshot versions.`, + }, + "version_policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"VERSION_POLICY_UNSPECIFIED", "RELEASE", "SNAPSHOT", ""}), + Description: `Version policy defines the versions that the registry will accept. Default value: "VERSION_POLICY_UNSPECIFIED" Possible values: ["VERSION_POLICY_UNSPECIFIED", "RELEASE", "SNAPSHOT"]`, + Default: "VERSION_POLICY_UNSPECIFIED", + }, + }, + }, + }, + "mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"STANDARD_REPOSITORY", "VIRTUAL_REPOSITORY", "REMOTE_REPOSITORY", ""}), + Description: `The mode configures the repository to serve artifacts from different sources. Default value: "STANDARD_REPOSITORY" Possible values: ["STANDARD_REPOSITORY", "VIRTUAL_REPOSITORY", "REMOTE_REPOSITORY"]`, + Default: "STANDARD_REPOSITORY", + }, + "remote_repository_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configuration specific for a Remote Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The description of the remote source.`, + }, + "docker_repository": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specific settings for a Docker remote repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_repository": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DOCKER_HUB", ""}), + Description: `Address of the remote repository. Default value: "DOCKER_HUB" Possible values: ["DOCKER_HUB"]`, + Default: "DOCKER_HUB", + ExactlyOneOf: []string{"remote_repository_config.0.docker_repository.0.public_repository"}, + }, + }, + }, + ExactlyOneOf: []string{"remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository"}, + }, + "maven_repository": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specific settings for a Maven remote repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_repository": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MAVEN_CENTRAL", ""}), + Description: `Address of the remote repository. Default value: "MAVEN_CENTRAL" Possible values: ["MAVEN_CENTRAL"]`, + Default: "MAVEN_CENTRAL", + ExactlyOneOf: []string{"remote_repository_config.0.maven_repository.0.public_repository"}, + }, + }, + }, + ExactlyOneOf: []string{"remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository"}, + }, + "npm_repository": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specific settings for an Npm remote repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_repository": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NPMJS", ""}), + Description: `Address of the remote repository. Default value: "NPMJS" Possible values: ["NPMJS"]`, + Default: "NPMJS", + ExactlyOneOf: []string{"remote_repository_config.0.npm_repository.0.public_repository"}, + }, + }, + }, + ExactlyOneOf: []string{"remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository"}, + }, + "python_repository": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specific settings for a Python remote repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_repository": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"PYPI", ""}), + Description: `Address of the remote repository. Default value: "PYPI" Possible values: ["PYPI"]`, + Default: "PYPI", + ExactlyOneOf: []string{"remote_repository_config.0.python_repository.0.public_repository"}, + }, + }, + }, + ExactlyOneOf: []string{"remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository"}, + }, + }, + }, + ConflictsWith: []string{"virtual_repository_config"}, + }, + "virtual_repository_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration specific for a Virtual Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "upstream_policies": { + Type: schema.TypeList, + Optional: true, + Description: `Policies that configure the upstream artifacts distributed by the Virtual +Repository. Upstream policies cannot be set on a standard repository.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Description: `The user-provided ID of the upstream policy.`, + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + Description: `Entries with a greater priority value take precedence in the pull order.`, + }, + "repository": { + Type: schema.TypeString, + Optional: true, + Description: `A reference to the repository resource, for example: +"projects/p1/locations/us-central1/repository/repo1".`, + }, + }, + }, + }, + }, + }, + ConflictsWith: []string{"remote_repository_config"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the repository was created.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the repository, for example: +"repo1"`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the repository was last updated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceArtifactRegistryRepositoryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + formatProp, err := expandArtifactRegistryRepositoryFormat(d.Get("format"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("format"); !tpgresource.IsEmptyValue(reflect.ValueOf(formatProp)) && (ok || !reflect.DeepEqual(v, formatProp)) { + obj["format"] = formatProp + } + descriptionProp, err := expandArtifactRegistryRepositoryDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandArtifactRegistryRepositoryLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + kmsKeyNameProp, err := expandArtifactRegistryRepositoryKmsKeyName(d.Get("kms_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kms_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { + obj["kmsKeyName"] = kmsKeyNameProp + } + dockerConfigProp, err := expandArtifactRegistryRepositoryDockerConfig(d.Get("docker_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("docker_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(dockerConfigProp)) && (ok || !reflect.DeepEqual(v, dockerConfigProp)) { + obj["dockerConfig"] = dockerConfigProp + } + mavenConfigProp, err := expandArtifactRegistryRepositoryMavenConfig(d.Get("maven_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maven_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(mavenConfigProp)) && (ok || !reflect.DeepEqual(v, mavenConfigProp)) { + obj["mavenConfig"] = mavenConfigProp + } + modeProp, err := expandArtifactRegistryRepositoryMode(d.Get("mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(modeProp)) && (ok || !reflect.DeepEqual(v, modeProp)) { + obj["mode"] = modeProp + } + virtualRepositoryConfigProp, err := expandArtifactRegistryRepositoryVirtualRepositoryConfig(d.Get("virtual_repository_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("virtual_repository_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(virtualRepositoryConfigProp)) && (ok || !reflect.DeepEqual(v, virtualRepositoryConfigProp)) { + obj["virtualRepositoryConfig"] = virtualRepositoryConfigProp + } + remoteRepositoryConfigProp, err := expandArtifactRegistryRepositoryRemoteRepositoryConfig(d.Get("remote_repository_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("remote_repository_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(remoteRepositoryConfigProp)) && (ok || !reflect.DeepEqual(v, remoteRepositoryConfigProp)) { + obj["remoteRepositoryConfig"] = remoteRepositoryConfigProp + } + + obj, err = resourceArtifactRegistryRepositoryEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories?repository_id={{repository_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Repository: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Repository: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Repository: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = ArtifactRegistryOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Repository", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Repository: %s", err) + } + + if err := d.Set("name", flattenArtifactRegistryRepositoryName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Repository %q: %#v", d.Id(), res) + + return resourceArtifactRegistryRepositoryRead(d, meta) +} + +func resourceArtifactRegistryRepositoryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Repository: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ArtifactRegistryRepository %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + + if err := d.Set("name", flattenArtifactRegistryRepositoryName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("format", flattenArtifactRegistryRepositoryFormat(res["format"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("description", flattenArtifactRegistryRepositoryDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("labels", flattenArtifactRegistryRepositoryLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("kms_key_name", flattenArtifactRegistryRepositoryKmsKeyName(res["kmsKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("create_time", flattenArtifactRegistryRepositoryCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("update_time", flattenArtifactRegistryRepositoryUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("docker_config", flattenArtifactRegistryRepositoryDockerConfig(res["dockerConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("maven_config", flattenArtifactRegistryRepositoryMavenConfig(res["mavenConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("mode", flattenArtifactRegistryRepositoryMode(res["mode"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("virtual_repository_config", flattenArtifactRegistryRepositoryVirtualRepositoryConfig(res["virtualRepositoryConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("remote_repository_config", flattenArtifactRegistryRepositoryRemoteRepositoryConfig(res["remoteRepositoryConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + + return nil +} + +func resourceArtifactRegistryRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Repository: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandArtifactRegistryRepositoryDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandArtifactRegistryRepositoryLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + dockerConfigProp, err := expandArtifactRegistryRepositoryDockerConfig(d.Get("docker_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("docker_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dockerConfigProp)) { + obj["dockerConfig"] = dockerConfigProp + } + mavenConfigProp, err := expandArtifactRegistryRepositoryMavenConfig(d.Get("maven_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maven_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mavenConfigProp)) { + obj["mavenConfig"] = mavenConfigProp + } + virtualRepositoryConfigProp, err := expandArtifactRegistryRepositoryVirtualRepositoryConfig(d.Get("virtual_repository_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("virtual_repository_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, virtualRepositoryConfigProp)) { + obj["virtualRepositoryConfig"] = virtualRepositoryConfigProp + } + + obj, err = resourceArtifactRegistryRepositoryEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Repository %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("docker_config") { + updateMask = append(updateMask, "dockerConfig") + } + + if d.HasChange("maven_config") { + updateMask = append(updateMask, "mavenConfig") + } + + if d.HasChange("virtual_repository_config") { + updateMask = append(updateMask, "virtualRepositoryConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Repository %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Repository %q: %#v", d.Id(), res) + } + + return resourceArtifactRegistryRepositoryRead(d, meta) +} + +func resourceArtifactRegistryRepositoryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Repository: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Repository %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Repository") + } + + err = ArtifactRegistryOperationWaitTime( + config, res, project, "Deleting Repository", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Repository %q: %#v", d.Id(), res) + return nil +} + +func resourceArtifactRegistryRepositoryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/repositories/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenArtifactRegistryRepositoryName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenArtifactRegistryRepositoryFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryDockerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["immutable_tags"] = + flattenArtifactRegistryRepositoryDockerConfigImmutableTags(original["immutableTags"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryDockerConfigImmutableTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryMavenConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["allow_snapshot_overwrites"] = + flattenArtifactRegistryRepositoryMavenConfigAllowSnapshotOverwrites(original["allowSnapshotOverwrites"], d, config) + transformed["version_policy"] = + flattenArtifactRegistryRepositoryMavenConfigVersionPolicy(original["versionPolicy"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryMavenConfigAllowSnapshotOverwrites(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryMavenConfigVersionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryVirtualRepositoryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["upstream_policies"] = + flattenArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPolicies(original["upstreamPolicies"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesId(original["id"], d, config), + "repository": flattenArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesRepository(original["repository"], d, config), + "priority": flattenArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesPriority(original["priority"], d, config), + }) + } + return transformed +} +func flattenArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesPriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenArtifactRegistryRepositoryRemoteRepositoryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["description"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigDescription(original["description"], d, config) + transformed["docker_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepository(original["dockerRepository"], d, config) + transformed["maven_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigMavenRepository(original["mavenRepository"], d, config) + transformed["npm_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepository(original["npmRepository"], d, config) + transformed["python_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepository(original["pythonRepository"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["public_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepositoryPublicRepository(original["publicRepository"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepositoryPublicRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigMavenRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["public_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigMavenRepositoryPublicRepository(original["publicRepository"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigMavenRepositoryPublicRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["public_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepositoryPublicRepository(original["publicRepository"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepositoryPublicRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["public_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepositoryPublicRepository(original["publicRepository"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepositoryPublicRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandArtifactRegistryRepositoryFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandArtifactRegistryRepositoryKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryDockerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedImmutableTags, err := expandArtifactRegistryRepositoryDockerConfigImmutableTags(original["immutable_tags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImmutableTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["immutableTags"] = transformedImmutableTags + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryDockerConfigImmutableTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryMavenConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowSnapshotOverwrites, err := expandArtifactRegistryRepositoryMavenConfigAllowSnapshotOverwrites(original["allow_snapshot_overwrites"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowSnapshotOverwrites); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowSnapshotOverwrites"] = transformedAllowSnapshotOverwrites + } + + transformedVersionPolicy, err := expandArtifactRegistryRepositoryMavenConfigVersionPolicy(original["version_policy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["versionPolicy"] = transformedVersionPolicy + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryMavenConfigAllowSnapshotOverwrites(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryMavenConfigVersionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryVirtualRepositoryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUpstreamPolicies, err := expandArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPolicies(original["upstream_policies"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpstreamPolicies); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["upstreamPolicies"] = transformedUpstreamPolicies + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedRepository, err := expandArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedPriority, err := expandArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesPriority(original["priority"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["priority"] = transformedPriority + } + + req = append(req, transformed) + } + return req, nil +} + +func expandArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryVirtualRepositoryConfigUpstreamPoliciesPriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDescription, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedDockerRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepository(original["docker_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDockerRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dockerRepository"] = transformedDockerRepository + } + + transformedMavenRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigMavenRepository(original["maven_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMavenRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mavenRepository"] = transformedMavenRepository + } + + transformedNpmRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepository(original["npm_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNpmRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["npmRepository"] = transformedNpmRepository + } + + transformedPythonRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepository(original["python_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPythonRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pythonRepository"] = transformedPythonRepository + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepositoryPublicRepository(original["public_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicRepository"] = transformedPublicRepository + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepositoryPublicRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigMavenRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigMavenRepositoryPublicRepository(original["public_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicRepository"] = transformedPublicRepository + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigMavenRepositoryPublicRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepositoryPublicRepository(original["public_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicRepository"] = transformedPublicRepository + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepositoryPublicRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepositoryPublicRepository(original["public_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicRepository"] = transformedPublicRepository + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepositoryPublicRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceArtifactRegistryRepositoryEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + if _, ok := d.GetOk("location"); !ok { + location, err := tpgresource.GetRegionFromSchema("region", "zone", d, config) + if err != nil { + return nil, fmt.Errorf("Cannot determine location: set in this resource, or set provider-level 'region' or 'zone'.") + } + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/resource_artifact_registry_repository_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/resource_artifact_registry_repository_sweeper.go new file mode 100644 index 0000000000..71c399177b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/artifactregistry/resource_artifact_registry_repository_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package artifactregistry + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ArtifactRegistryRepository", testSweepArtifactRegistryRepository) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepArtifactRegistryRepository(region string) error { + resourceName := "ArtifactRegistryRepository" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://artifactregistry.googleapis.com/v1/projects/{{project}}/locations/{{location}}/repositories", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["repositories"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://artifactregistry.googleapis.com/v1/projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_assured_workloads_workload.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads/resource_assured_workloads_workload.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_assured_workloads_workload.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads/resource_assured_workloads_workload.go index edda61daa3..0ea8fa83d5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_assured_workloads_workload.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads/resource_assured_workloads_workload.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package assuredworkloads import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" assuredworkloads "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceAssuredWorkloadsWorkload() *schema.Resource { @@ -49,7 +56,7 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, 'billingAccounts/012345-567890-ABCDEF`.", }, @@ -57,7 +64,7 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS", + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS", }, "display_name": { @@ -77,7 +84,7 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The organization for the resource", }, @@ -193,7 +200,7 @@ func AssuredWorkloadsWorkloadResourcesSchema() *schema.Resource { } func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &assuredworkloads.Workload{ BillingAccount: dcl.String(d.Get("billing_account").(string)), @@ -202,7 +209,7 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa Location: dcl.String(d.Get("location").(string)), Organization: dcl.String(d.Get("organization").(string)), KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), } @@ -212,18 +219,18 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -256,7 +263,7 @@ func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interfa } func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &assuredworkloads.Workload{ BillingAccount: dcl.String(d.Get("billing_account").(string)), @@ -265,23 +272,23 @@ func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface Location: dcl.String(d.Get("location").(string)), Organization: dcl.String(d.Get("organization").(string)), KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), Name: dcl.StringOrNil(d.Get("name").(string)), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -290,7 +297,7 @@ func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface res, err := client.GetWorkload(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("AssuredWorkloadsWorkload %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("billing_account", res.BillingAccount); err != nil { @@ -333,7 +340,7 @@ func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface return nil } func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &assuredworkloads.Workload{ BillingAccount: dcl.String(d.Get("billing_account").(string)), @@ -342,38 +349,38 @@ func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interfa Location: dcl.String(d.Get("location").(string)), Organization: dcl.String(d.Get("organization").(string)), KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), Name: dcl.StringOrNil(d.Get("name").(string)), } // Construct state hint from old values old := &assuredworkloads.Workload{ - BillingAccount: dcl.String(oldValue(d.GetChange("billing_account")).(string)), - ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(oldValue(d.GetChange("compliance_regime")).(string)), - DisplayName: dcl.String(oldValue(d.GetChange("display_name")).(string)), - Location: dcl.String(oldValue(d.GetChange("location")).(string)), - Organization: dcl.String(oldValue(d.GetChange("organization")).(string)), - KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(oldValue(d.GetChange("kms_settings"))), - Labels: checkStringMap(oldValue(d.GetChange("labels"))), - ProvisionedResourcesParent: dcl.String(oldValue(d.GetChange("provisioned_resources_parent")).(string)), - ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(oldValue(d.GetChange("resource_settings"))), - Name: dcl.StringOrNil(oldValue(d.GetChange("name")).(string)), - } - directive := UpdateDirective + BillingAccount: dcl.String(tpgdclresource.OldValue(d.GetChange("billing_account")).(string)), + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(tpgdclresource.OldValue(d.GetChange("compliance_regime")).(string)), + DisplayName: dcl.String(tpgdclresource.OldValue(d.GetChange("display_name")).(string)), + Location: dcl.String(tpgdclresource.OldValue(d.GetChange("location")).(string)), + Organization: dcl.String(tpgdclresource.OldValue(d.GetChange("organization")).(string)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(tpgdclresource.OldValue(d.GetChange("kms_settings"))), + Labels: tpgresource.CheckStringMap(tpgdclresource.OldValue(d.GetChange("labels"))), + ProvisionedResourcesParent: dcl.String(tpgdclresource.OldValue(d.GetChange("provisioned_resources_parent")).(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(tpgdclresource.OldValue(d.GetChange("resource_settings"))), + Name: dcl.StringOrNil(tpgdclresource.OldValue(d.GetChange("name")).(string)), + } + directive := tpgdclresource.UpdateDirective directive = append(directive, dcl.WithStateHint(old)) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -395,7 +402,7 @@ func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interfa } func resourceAssuredWorkloadsWorkloadDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &assuredworkloads.Workload{ BillingAccount: dcl.String(d.Get("billing_account").(string)), @@ -404,24 +411,24 @@ func resourceAssuredWorkloadsWorkloadDelete(d *schema.ResourceData, meta interfa Location: dcl.String(d.Get("location").(string)), Organization: dcl.String(d.Get("organization").(string)), KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), Name: dcl.StringOrNil(d.Get("name").(string)), } log.Printf("[DEBUG] Deleting Workload %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -436,9 +443,9 @@ func resourceAssuredWorkloadsWorkloadDelete(d *schema.ResourceData, meta interfa } func resourceAssuredWorkloadsWorkloadImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "organizations/(?P[^/]+)/locations/(?P[^/]+)/workloads/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", }, d, config); err != nil { @@ -446,7 +453,7 @@ func resourceAssuredWorkloadsWorkloadImport(d *schema.ResourceData, meta interfa } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/beyondcorp_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/beyondcorp_operation.go new file mode 100644 index 0000000000..bfbc9c5aa5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/beyondcorp_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package beyondcorp + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type BeyondcorpOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *BeyondcorpOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.BeyondcorpBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createBeyondcorpWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*BeyondcorpOperationWaiter, error) { + w := &BeyondcorpOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func BeyondcorpOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createBeyondcorpWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func BeyondcorpOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createBeyondcorpWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_connection.go new file mode 100644 index 0000000000..0089ac003b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_connection.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package beyondcorp + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleBeyondcorpAppConnection() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceBeyondcorpAppConnection().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + + return &schema.Resource{ + Read: dataSourceGoogleBeyondcorpAppConnectionRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleBeyondcorpAppConnectionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/locations/%s/appConnections/%s", project, region, name)) + + return resourceBeyondcorpAppConnectionRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_connector.go new file mode 100644 index 0000000000..6fbd2af55d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_connector.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package beyondcorp + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleBeyondcorpAppConnector() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceBeyondcorpAppConnector().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + + return &schema.Resource{ + Read: dataSourceGoogleBeyondcorpAppConnectorRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleBeyondcorpAppConnectorRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/locations/%s/appConnectors/%s", project, region, name)) + + return resourceBeyondcorpAppConnectorRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_gateway.go new file mode 100644 index 0000000000..4960307fa1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/data_source_google_beyondcorp_app_gateway.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package beyondcorp + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleBeyondcorpAppGateway() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceBeyondcorpAppGateway().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + + return &schema.Resource{ + Read: dataSourceGoogleBeyondcorpAppGatewayRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleBeyondcorpAppGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/locations/%s/appGateways/%s", project, region, name)) + + return resourceBeyondcorpAppGatewayRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connection.go new file mode 100644 index 0000000000..5c928322ec --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connection.go @@ -0,0 +1,723 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package beyondcorp + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceBeyondcorpAppConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceBeyondcorpAppConnectionCreate, + Read: resourceBeyondcorpAppConnectionRead, + Update: resourceBeyondcorpAppConnectionUpdate, + Delete: resourceBeyondcorpAppConnectionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBeyondcorpAppConnectionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "application_endpoint": { + Type: schema.TypeList, + Required: true, + Description: `Address of the remote application endpoint for the BeyondCorp AppConnection.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Required: true, + Description: `Hostname or IP address of the remote application endpoint.`, + }, + "port": { + Type: schema.TypeInt, + Required: true, + Description: `Port of the remote application endpoint.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the AppConnection.`, + }, + "connectors": { + Type: schema.TypeList, + Optional: true, + Description: `List of AppConnectors that are authorised to be associated with this AppConnection`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `An arbitrary user-provided name for the AppConnection.`, + }, + "gateway": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Gateway used by the AppConnection.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "app_gateway": { + Type: schema.TypeString, + Required: true, + Description: `AppGateway name in following format: projects/{project_id}/locations/{locationId}/appgateways/{gateway_id}.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: `The type of hosting used by the gateway. Refer to +https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#Type_1 +for a list of possible values.`, + }, + "ingress_port": { + Type: schema.TypeInt, + Computed: true, + Description: `Ingress port reserved on the gateways for this AppConnection, if not specified or zero, the default port is 19443.`, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URI for this resource.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels to represent user provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the AppConnection.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The type of network connectivity used by the AppConnection. Refer to +https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#type +for a list of possible values.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBeyondcorpAppConnectionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandBeyondcorpAppConnectionDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandBeyondcorpAppConnectionLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + typeProp, err := expandBeyondcorpAppConnectionType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + applicationEndpointProp, err := expandBeyondcorpAppConnectionApplicationEndpoint(d.Get("application_endpoint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("application_endpoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(applicationEndpointProp)) && (ok || !reflect.DeepEqual(v, applicationEndpointProp)) { + obj["applicationEndpoint"] = applicationEndpointProp + } + connectorsProp, err := expandBeyondcorpAppConnectionConnectors(d.Get("connectors"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connectors"); !tpgresource.IsEmptyValue(reflect.ValueOf(connectorsProp)) && (ok || !reflect.DeepEqual(v, connectorsProp)) { + obj["connectors"] = connectorsProp + } + gatewayProp, err := expandBeyondcorpAppConnectionGateway(d.Get("gateway"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gateway"); !tpgresource.IsEmptyValue(reflect.ValueOf(gatewayProp)) && (ok || !reflect.DeepEqual(v, gatewayProp)) { + obj["gateway"] = gatewayProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnections?app_connection_id={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AppConnection: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppConnection: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AppConnection: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnections/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = BeyondcorpOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating AppConnection", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create AppConnection: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnections/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating AppConnection %q: %#v", d.Id(), res) + + return resourceBeyondcorpAppConnectionRead(d, meta) +} + +func resourceBeyondcorpAppConnectionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnections/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppConnection: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BeyondcorpAppConnection %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AppConnection: %s", err) + } + + if err := d.Set("display_name", flattenBeyondcorpAppConnectionDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnection: %s", err) + } + if err := d.Set("labels", flattenBeyondcorpAppConnectionLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnection: %s", err) + } + if err := d.Set("type", flattenBeyondcorpAppConnectionType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnection: %s", err) + } + if err := d.Set("application_endpoint", flattenBeyondcorpAppConnectionApplicationEndpoint(res["applicationEndpoint"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnection: %s", err) + } + if err := d.Set("connectors", flattenBeyondcorpAppConnectionConnectors(res["connectors"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnection: %s", err) + } + if err := d.Set("gateway", flattenBeyondcorpAppConnectionGateway(res["gateway"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnection: %s", err) + } + + return nil +} + +func resourceBeyondcorpAppConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppConnection: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandBeyondcorpAppConnectionDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandBeyondcorpAppConnectionLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + applicationEndpointProp, err := expandBeyondcorpAppConnectionApplicationEndpoint(d.Get("application_endpoint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("application_endpoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, applicationEndpointProp)) { + obj["applicationEndpoint"] = applicationEndpointProp + } + connectorsProp, err := expandBeyondcorpAppConnectionConnectors(d.Get("connectors"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connectors"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, connectorsProp)) { + obj["connectors"] = connectorsProp + } + gatewayProp, err := expandBeyondcorpAppConnectionGateway(d.Get("gateway"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gateway"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gatewayProp)) { + obj["gateway"] = gatewayProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnections/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AppConnection %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("application_endpoint") { + updateMask = append(updateMask, "applicationEndpoint") + } + + if d.HasChange("connectors") { + updateMask = append(updateMask, "connectors") + } + + if d.HasChange("gateway") { + updateMask = append(updateMask, "gateway") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AppConnection %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AppConnection %q: %#v", d.Id(), res) + } + + err = BeyondcorpOperationWaitTime( + config, res, project, "Updating AppConnection", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceBeyondcorpAppConnectionRead(d, meta) +} + +func resourceBeyondcorpAppConnectionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppConnection: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnections/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AppConnection %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AppConnection") + } + + err = BeyondcorpOperationWaitTime( + config, res, project, "Deleting AppConnection", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AppConnection %q: %#v", d.Id(), res) + return nil +} + +func resourceBeyondcorpAppConnectionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/appConnections/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnections/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBeyondcorpAppConnectionDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectionLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectionApplicationEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["host"] = + flattenBeyondcorpAppConnectionApplicationEndpointHost(original["host"], d, config) + transformed["port"] = + flattenBeyondcorpAppConnectionApplicationEndpointPort(original["port"], d, config) + return []interface{}{transformed} +} +func flattenBeyondcorpAppConnectionApplicationEndpointHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectionApplicationEndpointPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBeyondcorpAppConnectionConnectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectionGateway(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["app_gateway"] = + flattenBeyondcorpAppConnectionGatewayAppGateway(original["appGateway"], d, config) + transformed["type"] = + flattenBeyondcorpAppConnectionGatewayType(original["type"], d, config) + transformed["uri"] = + flattenBeyondcorpAppConnectionGatewayUri(original["uri"], d, config) + transformed["ingress_port"] = + flattenBeyondcorpAppConnectionGatewayIngressPort(original["ingressPort"], d, config) + return []interface{}{transformed} +} +func flattenBeyondcorpAppConnectionGatewayAppGateway(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectionGatewayType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectionGatewayUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectionGatewayIngressPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandBeyondcorpAppConnectionDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectionLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandBeyondcorpAppConnectionType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectionApplicationEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHost, err := expandBeyondcorpAppConnectionApplicationEndpointHost(original["host"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["host"] = transformedHost + } + + transformedPort, err := expandBeyondcorpAppConnectionApplicationEndpointPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + return transformed, nil +} + +func expandBeyondcorpAppConnectionApplicationEndpointHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectionApplicationEndpointPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectionConnectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectionGateway(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAppGateway, err := expandBeyondcorpAppConnectionGatewayAppGateway(original["app_gateway"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAppGateway); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["appGateway"] = transformedAppGateway + } + + transformedType, err := expandBeyondcorpAppConnectionGatewayType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedUri, err := expandBeyondcorpAppConnectionGatewayUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedIngressPort, err := expandBeyondcorpAppConnectionGatewayIngressPort(original["ingress_port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIngressPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ingressPort"] = transformedIngressPort + } + + return transformed, nil +} + +func expandBeyondcorpAppConnectionGatewayAppGateway(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectionGatewayType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectionGatewayUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectionGatewayIngressPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connection_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connection_sweeper.go new file mode 100644 index 0000000000..14b57d100c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connection_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package beyondcorp + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BeyondcorpAppConnection", testSweepBeyondcorpAppConnection) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBeyondcorpAppConnection(region string) error { + resourceName := "BeyondcorpAppConnection" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://beyondcorp.googleapis.com/v1/projects/{{project}}/locations/{{region}}/appConnections", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["appConnections"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://beyondcorp.googleapis.com/v1/projects/{{project}}/locations/{{region}}/appConnections/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connector.go new file mode 100644 index 0000000000..b7ba55e72d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connector.go @@ -0,0 +1,526 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package beyondcorp + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceBeyondcorpAppConnector() *schema.Resource { + return &schema.Resource{ + Create: resourceBeyondcorpAppConnectorCreate, + Read: resourceBeyondcorpAppConnectorRead, + Update: resourceBeyondcorpAppConnectorUpdate, + Delete: resourceBeyondcorpAppConnectorDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBeyondcorpAppConnectorImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the AppConnector.`, + }, + "principal_info": { + Type: schema.TypeList, + Required: true, + Description: `Principal information about the Identity of the AppConnector.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_account": { + Type: schema.TypeList, + Required: true, + Description: `ServiceAccount represents a GCP service account.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + Description: `Email address of the service account.`, + }, + }, + }, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `An arbitrary user-provided name for the AppConnector.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels to represent user provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the AppConnector.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Represents the different states of a AppConnector.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBeyondcorpAppConnectorCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandBeyondcorpAppConnectorDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandBeyondcorpAppConnectorLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + principalInfoProp, err := expandBeyondcorpAppConnectorPrincipalInfo(d.Get("principal_info"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("principal_info"); !tpgresource.IsEmptyValue(reflect.ValueOf(principalInfoProp)) && (ok || !reflect.DeepEqual(v, principalInfoProp)) { + obj["principalInfo"] = principalInfoProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnectors?app_connector_id={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AppConnector: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppConnector: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AppConnector: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = BeyondcorpOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating AppConnector", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create AppConnector: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating AppConnector %q: %#v", d.Id(), res) + + return resourceBeyondcorpAppConnectorRead(d, meta) +} + +func resourceBeyondcorpAppConnectorRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppConnector: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BeyondcorpAppConnector %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AppConnector: %s", err) + } + + if err := d.Set("display_name", flattenBeyondcorpAppConnectorDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnector: %s", err) + } + if err := d.Set("labels", flattenBeyondcorpAppConnectorLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnector: %s", err) + } + if err := d.Set("principal_info", flattenBeyondcorpAppConnectorPrincipalInfo(res["principalInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnector: %s", err) + } + if err := d.Set("state", flattenBeyondcorpAppConnectorState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading AppConnector: %s", err) + } + + return nil +} + +func resourceBeyondcorpAppConnectorUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppConnector: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandBeyondcorpAppConnectorDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandBeyondcorpAppConnectorLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + principalInfoProp, err := expandBeyondcorpAppConnectorPrincipalInfo(d.Get("principal_info"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("principal_info"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, principalInfoProp)) { + obj["principalInfo"] = principalInfoProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AppConnector %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("principal_info") { + updateMask = append(updateMask, "principalInfo") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AppConnector %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AppConnector %q: %#v", d.Id(), res) + } + + err = BeyondcorpOperationWaitTime( + config, res, project, "Updating AppConnector", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceBeyondcorpAppConnectorRead(d, meta) +} + +func resourceBeyondcorpAppConnectorDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppConnector: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AppConnector %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AppConnector") + } + + err = BeyondcorpOperationWaitTime( + config, res, project, "Deleting AppConnector", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AppConnector %q: %#v", d.Id(), res) + return nil +} + +func resourceBeyondcorpAppConnectorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/appConnectors/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appConnectors/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBeyondcorpAppConnectorDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectorLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectorPrincipalInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account"] = + flattenBeyondcorpAppConnectorPrincipalInfoServiceAccount(original["serviceAccount"], d, config) + return []interface{}{transformed} +} +func flattenBeyondcorpAppConnectorPrincipalInfoServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["email"] = + flattenBeyondcorpAppConnectorPrincipalInfoServiceAccountEmail(original["email"], d, config) + return []interface{}{transformed} +} +func flattenBeyondcorpAppConnectorPrincipalInfoServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppConnectorState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBeyondcorpAppConnectorDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppConnectorLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandBeyondcorpAppConnectorPrincipalInfo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccount, err := expandBeyondcorpAppConnectorPrincipalInfoServiceAccount(original["service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccount"] = transformedServiceAccount + } + + return transformed, nil +} + +func expandBeyondcorpAppConnectorPrincipalInfoServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEmail, err := expandBeyondcorpAppConnectorPrincipalInfoServiceAccountEmail(original["email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["email"] = transformedEmail + } + + return transformed, nil +} + +func expandBeyondcorpAppConnectorPrincipalInfoServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connector_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connector_sweeper.go new file mode 100644 index 0000000000..b6329fc1f9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_connector_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package beyondcorp + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BeyondcorpAppConnector", testSweepBeyondcorpAppConnector) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBeyondcorpAppConnector(region string) error { + resourceName := "BeyondcorpAppConnector" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://beyondcorp.googleapis.com/v1/projects/{{project}}/locations/{{region}}/appConnectors", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["appConnectors"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://beyondcorp.googleapis.com/v1/projects/{{project}}/locations/{{region}}/appConnectors/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_gateway.go new file mode 100644 index 0000000000..94bd19cb8d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_gateway.go @@ -0,0 +1,447 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package beyondcorp + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceBeyondcorpAppGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceBeyondcorpAppGatewayCreate, + Read: resourceBeyondcorpAppGatewayRead, + Delete: resourceBeyondcorpAppGatewayDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBeyondcorpAppGatewayImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the AppGateway.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An arbitrary user-provided name for the AppGateway.`, + }, + "host_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"HOST_TYPE_UNSPECIFIED", "GCP_REGIONAL_MIG", ""}), + Description: `The type of hosting used by the AppGateway. Default value: "HOST_TYPE_UNSPECIFIED" Possible values: ["HOST_TYPE_UNSPECIFIED", "GCP_REGIONAL_MIG"]`, + Default: "HOST_TYPE_UNSPECIFIED", + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Resource labels to represent user provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the AppGateway.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"TYPE_UNSPECIFIED", "TCP_PROXY", ""}), + Description: `The type of network connectivity used by the AppGateway. Default value: "TYPE_UNSPECIFIED" Possible values: ["TYPE_UNSPECIFIED", "TCP_PROXY"]`, + Default: "TYPE_UNSPECIFIED", + }, + "allocated_connections": { + Type: schema.TypeList, + Computed: true, + Description: `A list of connections allocated for the Gateway.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ingress_port": { + Type: schema.TypeInt, + Optional: true, + Description: `The ingress port of an allocated connection.`, + }, + "psc_uri": { + Type: schema.TypeString, + Optional: true, + Description: `The PSC uri of an allocated connection.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Represents the different states of a AppGateway.`, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URI for this resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBeyondcorpAppGatewayCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + typeProp, err := expandBeyondcorpAppGatewayType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + hostTypeProp, err := expandBeyondcorpAppGatewayHostType(d.Get("host_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("host_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(hostTypeProp)) && (ok || !reflect.DeepEqual(v, hostTypeProp)) { + obj["hostType"] = hostTypeProp + } + displayNameProp, err := expandBeyondcorpAppGatewayDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandBeyondcorpAppGatewayLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appGateways?app_gateway_id={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AppGateway: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppGateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AppGateway: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appGateways/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = BeyondcorpOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating AppGateway", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create AppGateway: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appGateways/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating AppGateway %q: %#v", d.Id(), res) + + return resourceBeyondcorpAppGatewayRead(d, meta) +} + +func resourceBeyondcorpAppGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appGateways/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppGateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BeyondcorpAppGateway %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AppGateway: %s", err) + } + + if err := d.Set("type", flattenBeyondcorpAppGatewayType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGateway: %s", err) + } + if err := d.Set("host_type", flattenBeyondcorpAppGatewayHostType(res["hostType"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGateway: %s", err) + } + if err := d.Set("display_name", flattenBeyondcorpAppGatewayDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGateway: %s", err) + } + if err := d.Set("labels", flattenBeyondcorpAppGatewayLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGateway: %s", err) + } + if err := d.Set("state", flattenBeyondcorpAppGatewayState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGateway: %s", err) + } + if err := d.Set("uri", flattenBeyondcorpAppGatewayUri(res["uri"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGateway: %s", err) + } + if err := d.Set("allocated_connections", flattenBeyondcorpAppGatewayAllocatedConnections(res["allocatedConnections"], d, config)); err != nil { + return fmt.Errorf("Error reading AppGateway: %s", err) + } + + return nil +} + +func resourceBeyondcorpAppGatewayDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppGateway: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BeyondcorpBasePath}}projects/{{project}}/locations/{{region}}/appGateways/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AppGateway %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AppGateway") + } + + err = BeyondcorpOperationWaitTime( + config, res, project, "Deleting AppGateway", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AppGateway %q: %#v", d.Id(), res) + return nil +} + +func resourceBeyondcorpAppGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/appGateways/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/appGateways/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBeyondcorpAppGatewayType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppGatewayHostType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppGatewayDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppGatewayLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppGatewayState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppGatewayUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppGatewayAllocatedConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["psc_uri"] = + flattenBeyondcorpAppGatewayAllocatedConnectionsPscUri(original["pscUri"], d, config) + transformed["ingress_port"] = + flattenBeyondcorpAppGatewayAllocatedConnectionsIngressPort(original["ingressPort"], d, config) + return []interface{}{transformed} +} +func flattenBeyondcorpAppGatewayAllocatedConnectionsPscUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBeyondcorpAppGatewayAllocatedConnectionsIngressPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandBeyondcorpAppGatewayType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppGatewayHostType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppGatewayDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBeyondcorpAppGatewayLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_gateway_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_gateway_sweeper.go new file mode 100644 index 0000000000..b2ebe59e30 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/beyondcorp/resource_beyondcorp_app_gateway_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package beyondcorp + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BeyondcorpAppGateway", testSweepBeyondcorpAppGateway) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBeyondcorpAppGateway(region string) error { + resourceName := "BeyondcorpAppGateway" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://beyondcorp.googleapis.com/v1/projects/{{project}}/locations/{{region}}/appGateways", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["appGateways"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://beyondcorp.googleapis.com/v1/projects/{{project}}/locations/{{region}}/appGateways/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/data_source_google_bigquery_default_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/data_source_google_bigquery_default_service_account.go new file mode 100644 index 0000000000..2e5eb0e604 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/data_source_google_bigquery_default_service_account.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleBigqueryDefaultServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleBigqueryDefaultServiceAccountRead, + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Computed: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "member": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleBigqueryDefaultServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + projectResource, err := config.NewBigQueryClient(userAgent).Projects.GetServiceAccount(project).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BigQuery service account not found") + } + + d.SetId(projectResource.Email) + if err := d.Set("email", projectResource.Email); err != nil { + return fmt.Errorf("Error setting email: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("member", "serviceAccount:"+projectResource.Email); err != nil { + return fmt.Errorf("Error setting member: %s", err) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_dataset.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_dataset.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_dataset.go index af4d5811cc..b8bdaa28cd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigquery_dataset.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_dataset.go @@ -1,10 +1,16 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery import ( "errors" "fmt" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/cloudresourcemanager/v1" @@ -33,12 +39,12 @@ var bigqueryAccessPrimitiveToRoleMap = map[string]string{ type BigqueryDatasetIamUpdater struct { project string datasetId string - d TerraformResourceData - Config *Config + d tpgresource.TerraformResourceData + Config *transport_tpg.Config } -func NewBigqueryDatasetIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) +func NewBigqueryDatasetIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } @@ -55,8 +61,8 @@ func NewBigqueryDatasetIamUpdater(d TerraformResourceData, config *Config) (Reso }, nil } -func BigqueryDatasetIdParseFunc(d *schema.ResourceData, config *Config) error { - fv, err := parseProjectFieldValue("datasets", d.Id(), "project", d, config, false) +func BigqueryDatasetIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseProjectFieldValue("datasets", d.Id(), "project", d, config, false) if err != nil { return err } @@ -76,12 +82,18 @@ func BigqueryDatasetIdParseFunc(d *schema.ResourceData, config *Config) error { func (u *BigqueryDatasetIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { url := fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return nil, err } - res, err := SendRequest(u.Config, "GET", u.project, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: u.project, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) } @@ -104,12 +116,19 @@ func (u *BigqueryDatasetIamUpdater) SetResourceIamPolicy(policy *cloudresourcema "access": access, } - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return err } - _, err = SendRequest(u.Config, "PATCH", u.project, url, userAgent, obj) + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "PATCH", + Project: u.project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) if err != nil { return fmt.Errorf("Error creating DatasetAccess: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_table.go new file mode 100644 index 0000000000..ed2b37a6d8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/iam_bigquery_table.go @@ -0,0 +1,243 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquery + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var BigQueryTableIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type BigQueryTableIamUpdater struct { + project string + datasetId string + tableId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func BigQueryTableIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("dataset_id"); ok { + values["dataset_id"] = v.(string) + } + + if v, ok := d.GetOk("table_id"); ok { + values["table_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("table_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &BigQueryTableIamUpdater{ + project: values["project"], + datasetId: values["dataset_id"], + tableId: values["table_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("dataset_id", u.datasetId); err != nil { + return nil, fmt.Errorf("Error setting dataset_id: %s", err) + } + if err := d.Set("table_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting table_id: %s", err) + } + + return u, nil +} + +func BigQueryTableIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &BigQueryTableIamUpdater{ + project: values["project"], + datasetId: values["dataset_id"], + tableId: values["table_id"], + d: d, + Config: config, + } + if err := d.Set("table_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting table_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *BigQueryTableIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTableUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": 1, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *BigQueryTableIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + // This is an override of the existing version that might have been set in the resource_iam_member|policy|binding code + json["version"] = 1 + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTableUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigQueryTableIamUpdater) qualifyTableUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{BigQueryBasePath}}%s:%s", fmt.Sprintf("projects/%s/datasets/%s/tables/%s", u.project, u.datasetId, u.tableId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *BigQueryTableIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/datasets/%s/tables/%s", u.project, u.datasetId, u.tableId) +} + +func (u *BigQueryTableIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigquery-table-%s", u.GetResourceId()) +} + +func (u *BigQueryTableIamUpdater) DescribeResource() string { + return fmt.Sprintf("bigquery table %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset.go new file mode 100644 index 0000000000..0c3ff4deb2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset.go @@ -0,0 +1,1479 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquery + +import ( + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" +) + +const datasetIdRegexp = `[0-9A-Za-z_]+` + +func validateDatasetId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(datasetIdRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) + } + + if len(value) > 1024 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 1,024 characters", k)) + } + + return +} + +func validateDefaultTableExpirationMs(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 3600000 { + errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) + } + + return +} + +func ResourceBigQueryDataset() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryDatasetCreate, + Read: resourceBigQueryDatasetRead, + Update: resourceBigQueryDatasetUpdate, + Delete: resourceBigQueryDatasetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigQueryDatasetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateDatasetId, + Description: `A unique ID for this dataset, without the project name. The ID +must contain only letters (a-z, A-Z), numbers (0-9), or +underscores (_). The maximum length is 1,024 characters.`, + }, + + "access": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Description: `An array of objects that define dataset access for one or more entities.`, + Elem: bigqueryDatasetAccessSchema(), + // Default schema.HashSchema is used. + }, + "default_collation": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Defines the default collation specification of future tables created +in the dataset. If a table is created in this dataset without table-level +default collation, then the table inherits the dataset default collation, +which is applied to the string fields that do not have explicit collation +specified. A change to this field affects only tables created afterwards, +and does not alter the existing tables. + +The following values are supported: +- 'und:ci': undetermined locale, case insensitive. +- '': empty string. Default to case-sensitive behavior.`, + }, + "default_encryption_configuration": { + Type: schema.TypeList, + Optional: true, + Description: `The default encryption key for all tables in the dataset. Once this property is set, +all newly-created partitioned tables in the dataset will have encryption key set to +this value, unless table creation request (or query) overrides the key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `Describes the Cloud KMS encryption key that will be used to protect destination +BigQuery table. The BigQuery Service Account associated with your project requires +access to this encryption key.`, + }, + }, + }, + }, + "default_partition_expiration_ms": { + Type: schema.TypeInt, + Optional: true, + Description: `The default partition expiration for all partitioned tables in +the dataset, in milliseconds. + + +Once this property is set, all newly-created partitioned tables in +the dataset will have an 'expirationMs' property in the 'timePartitioning' +settings set to this value, and changing the value will only +affect new tables, not existing ones. The storage in a partition will +have an expiration time of its partition time plus this value. +Setting this property overrides the use of 'defaultTableExpirationMs' +for partitioned tables: only one of 'defaultTableExpirationMs' and +'defaultPartitionExpirationMs' will be used for any new partitioned +table. If you provide an explicit 'timePartitioning.expirationMs' when +creating or updating a partitioned table, that value takes precedence +over the default partition expiration time indicated by this property.`, + }, + "default_table_expiration_ms": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateDefaultTableExpirationMs, + Description: `The default lifetime of all tables in the dataset, in milliseconds. +The minimum value is 3600000 milliseconds (one hour). + + +Once this property is set, all newly-created tables in the dataset +will have an 'expirationTime' property set to the creation time plus +the value in this property, and changing the value will only affect +new tables, not existing ones. When the 'expirationTime' for a given +table is reached, that table will be deleted automatically. +If a table's 'expirationTime' is modified or removed before the +table expires, or if you provide an explicit 'expirationTime' when +creating a table, that value takes precedence over the default +expiration time indicated by this property.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A user-friendly description of the dataset`, + }, + "friendly_name": { + Type: schema.TypeString, + Optional: true, + Description: `A descriptive name for the dataset`, + }, + "is_case_insensitive": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `TRUE if the dataset and its table names are case-insensitive, otherwise FALSE. +By default, this is FALSE, which means the dataset and its table names are +case-sensitive. This field does not affect routine references.`, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `The labels associated with this dataset. You can use these to +organize and group your datasets`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: `The geographic location where the dataset should reside. +See [official docs](https://cloud.google.com/bigquery/docs/dataset-locations). + + +There are two types of locations, regional or multi-regional. A regional +location is a specific geographic place, such as Tokyo, and a multi-regional +location is a large geographic area, such as the United States, that +contains at least two geographic places. + + +The default value is multi-regional location 'US'. +Changing this forces a new resource to be created.`, + Default: "US", + }, + "max_time_travel_hours": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Defines the time travel window in hours. The value can be from 48 to 168 hours (2 to 7 days).`, + }, + "storage_billing_model": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Specifies the storage billing model for the dataset. +Set this flag value to LOGICAL to use logical bytes for storage billing, +or to PHYSICAL to use physical bytes instead. + +LOGICAL is the default if this flag isn't specified.`, + }, + "creation_time": { + Type: schema.TypeInt, + Computed: true, + Description: `The time when this dataset was created, in milliseconds since the +epoch.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `A hash of the resource.`, + }, + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + Description: `The date when this dataset or any of its tables was last modified, in +milliseconds since the epoch.`, + }, + "delete_contents_on_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to 'true', delete all the tables in the +dataset when destroying the resource; otherwise, +destroying the resource will fail if tables are present.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func bigqueryDatasetAccessSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeList, + Optional: true, + Description: `Grants all resources of particular types in a particular dataset read access to the current dataset.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeList, + Required: true, + Description: `The dataset this entry applies to`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the project containing this table.`, + }, + }, + }, + }, + "target_types": { + Type: schema.TypeList, + Required: true, + Description: `Which resources in the dataset this entry applies to. Currently, only views are supported, +but additional target types may be added in the future. Possible values: VIEWS`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "domain": { + Type: schema.TypeString, + Optional: true, + Description: `A domain to grant access to. Any users signed in with the +domain specified will be granted the specified access`, + }, + "group_by_email": { + Type: schema.TypeString, + Optional: true, + Description: `An email address of a Google Group to grant access to.`, + }, + "role": { + Type: schema.TypeString, + Optional: true, + Description: `Describes the rights granted to the user specified by the other +member of the access object. Basic, predefined, and custom roles +are supported. Predefined roles that have equivalent basic roles +are swapped by the API to their basic counterparts. See +[official docs](https://cloud.google.com/bigquery/docs/access-control).`, + }, + "routine": { + Type: schema.TypeList, + Optional: true, + Description: `A routine from a different dataset to grant access to. Queries +executed against that routine will have read access to tables in +this dataset. The role field is not required when this field is +set. If that routine is updated by any user, access to the routine +needs to be granted again via an update operation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the project containing this table.`, + }, + "routine_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the routine. The ID must contain only letters (a-z, +A-Z), numbers (0-9), or underscores (_). The maximum length +is 256 characters.`, + }, + }, + }, + }, + "special_group": { + Type: schema.TypeString, + Optional: true, + Description: `A special group to grant access to. Possible values include: + + +* 'projectOwners': Owners of the enclosing project. + + +* 'projectReaders': Readers of the enclosing project. + + +* 'projectWriters': Writers of the enclosing project. + + +* 'allAuthenticatedUsers': All authenticated BigQuery users.`, + }, + "user_by_email": { + Type: schema.TypeString, + Optional: true, + Description: `An email address of a user to grant access to. For example: +fred@example.com`, + }, + "view": { + Type: schema.TypeList, + Optional: true, + Description: `A view from a different dataset to grant access to. Queries +executed against that view will have read access to tables in +this dataset. The role field is not required when this field is +set. If that view is updated by any user, access to the view +needs to be granted again via an update operation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the project containing this table.`, + }, + "table_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the table. The ID must contain only letters (a-z, +A-Z), numbers (0-9), or underscores (_). The maximum length +is 1,024 characters.`, + }, + }, + }, + }, + }, + } +} + +func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + maxTimeTravelHoursProp, err := expandBigQueryDatasetMaxTimeTravelHours(d.Get("max_time_travel_hours"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("max_time_travel_hours"); !tpgresource.IsEmptyValue(reflect.ValueOf(maxTimeTravelHoursProp)) && (ok || !reflect.DeepEqual(v, maxTimeTravelHoursProp)) { + obj["maxTimeTravelHours"] = maxTimeTravelHoursProp + } + accessProp, err := expandBigQueryDatasetAccess(d.Get("access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("access"); !tpgresource.IsEmptyValue(reflect.ValueOf(accessProp)) && (ok || !reflect.DeepEqual(v, accessProp)) { + obj["access"] = accessProp + } + datasetReferenceProp, err := expandBigQueryDatasetDatasetReference(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(datasetReferenceProp)) { + obj["datasetReference"] = datasetReferenceProp + } + defaultTableExpirationMsProp, err := expandBigQueryDatasetDefaultTableExpirationMs(d.Get("default_table_expiration_ms"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_table_expiration_ms"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultTableExpirationMsProp)) && (ok || !reflect.DeepEqual(v, defaultTableExpirationMsProp)) { + obj["defaultTableExpirationMs"] = defaultTableExpirationMsProp + } + defaultPartitionExpirationMsProp, err := expandBigQueryDatasetDefaultPartitionExpirationMs(d.Get("default_partition_expiration_ms"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_partition_expiration_ms"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultPartitionExpirationMsProp)) && (ok || !reflect.DeepEqual(v, defaultPartitionExpirationMsProp)) { + obj["defaultPartitionExpirationMs"] = defaultPartitionExpirationMsProp + } + descriptionProp, err := expandBigQueryDatasetDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + friendlyNameProp, err := expandBigQueryDatasetFriendlyName(d.Get("friendly_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("friendly_name"); ok || !reflect.DeepEqual(v, friendlyNameProp) { + obj["friendlyName"] = friendlyNameProp + } + labelsProp, err := expandBigQueryDatasetLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + locationProp, err := expandBigQueryDatasetLocation(d.Get("location"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + defaultEncryptionConfigurationProp, err := expandBigQueryDatasetDefaultEncryptionConfiguration(d.Get("default_encryption_configuration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_encryption_configuration"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultEncryptionConfigurationProp)) && (ok || !reflect.DeepEqual(v, defaultEncryptionConfigurationProp)) { + obj["defaultEncryptionConfiguration"] = defaultEncryptionConfigurationProp + } + isCaseInsensitiveProp, err := expandBigQueryDatasetIsCaseInsensitive(d.Get("is_case_insensitive"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_case_insensitive"); !tpgresource.IsEmptyValue(reflect.ValueOf(isCaseInsensitiveProp)) && (ok || !reflect.DeepEqual(v, isCaseInsensitiveProp)) { + obj["isCaseInsensitive"] = isCaseInsensitiveProp + } + defaultCollationProp, err := expandBigQueryDatasetDefaultCollation(d.Get("default_collation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_collation"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultCollationProp)) && (ok || !reflect.DeepEqual(v, defaultCollationProp)) { + obj["defaultCollation"] = defaultCollationProp + } + storageBillingModelProp, err := expandBigQueryDatasetStorageBillingModel(d.Get("storage_billing_model"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("storage_billing_model"); !tpgresource.IsEmptyValue(reflect.ValueOf(storageBillingModelProp)) && (ok || !reflect.DeepEqual(v, storageBillingModelProp)) { + obj["storageBillingModel"] = storageBillingModelProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Dataset: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Dataset: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQueryDataset %q", d.Id())) + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("delete_contents_on_destroy"); !ok { + if err := d.Set("delete_contents_on_destroy", false); err != nil { + return fmt.Errorf("Error setting delete_contents_on_destroy: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + + if err := d.Set("max_time_travel_hours", flattenBigQueryDatasetMaxTimeTravelHours(res["maxTimeTravelHours"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("access", flattenBigQueryDatasetAccess(res["access"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("creation_time", flattenBigQueryDatasetCreationTime(res["creationTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + // Terraform must set the top level schema field, but since this object contains collapsed properties + // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. + if flattenedProp := flattenBigQueryDatasetDatasetReference(res["datasetReference"], d, config); flattenedProp != nil { + if gerr, ok := flattenedProp.(*googleapi.Error); ok { + return fmt.Errorf("Error reading Dataset: %s", gerr) + } + casted := flattenedProp.([]interface{})[0] + if casted != nil { + for k, v := range casted.(map[string]interface{}) { + if err := d.Set(k, v); err != nil { + return fmt.Errorf("Error setting %s: %s", k, err) + } + } + } + } + if err := d.Set("default_table_expiration_ms", flattenBigQueryDatasetDefaultTableExpirationMs(res["defaultTableExpirationMs"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("default_partition_expiration_ms", flattenBigQueryDatasetDefaultPartitionExpirationMs(res["defaultPartitionExpirationMs"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("description", flattenBigQueryDatasetDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("etag", flattenBigQueryDatasetEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("friendly_name", flattenBigQueryDatasetFriendlyName(res["friendlyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("labels", flattenBigQueryDatasetLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("last_modified_time", flattenBigQueryDatasetLastModifiedTime(res["lastModifiedTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("location", flattenBigQueryDatasetLocation(res["location"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("default_encryption_configuration", flattenBigQueryDatasetDefaultEncryptionConfiguration(res["defaultEncryptionConfiguration"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("is_case_insensitive", flattenBigQueryDatasetIsCaseInsensitive(res["isCaseInsensitive"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("default_collation", flattenBigQueryDatasetDefaultCollation(res["defaultCollation"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("storage_billing_model", flattenBigQueryDatasetStorageBillingModel(res["storageBillingModel"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + + return nil +} + +func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + maxTimeTravelHoursProp, err := expandBigQueryDatasetMaxTimeTravelHours(d.Get("max_time_travel_hours"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("max_time_travel_hours"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxTimeTravelHoursProp)) { + obj["maxTimeTravelHours"] = maxTimeTravelHoursProp + } + accessProp, err := expandBigQueryDatasetAccess(d.Get("access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("access"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessProp)) { + obj["access"] = accessProp + } + datasetReferenceProp, err := expandBigQueryDatasetDatasetReference(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(datasetReferenceProp)) { + obj["datasetReference"] = datasetReferenceProp + } + defaultTableExpirationMsProp, err := expandBigQueryDatasetDefaultTableExpirationMs(d.Get("default_table_expiration_ms"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_table_expiration_ms"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultTableExpirationMsProp)) { + obj["defaultTableExpirationMs"] = defaultTableExpirationMsProp + } + defaultPartitionExpirationMsProp, err := expandBigQueryDatasetDefaultPartitionExpirationMs(d.Get("default_partition_expiration_ms"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_partition_expiration_ms"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultPartitionExpirationMsProp)) { + obj["defaultPartitionExpirationMs"] = defaultPartitionExpirationMsProp + } + descriptionProp, err := expandBigQueryDatasetDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + friendlyNameProp, err := expandBigQueryDatasetFriendlyName(d.Get("friendly_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("friendly_name"); ok || !reflect.DeepEqual(v, friendlyNameProp) { + obj["friendlyName"] = friendlyNameProp + } + labelsProp, err := expandBigQueryDatasetLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + locationProp, err := expandBigQueryDatasetLocation(d.Get("location"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + defaultEncryptionConfigurationProp, err := expandBigQueryDatasetDefaultEncryptionConfiguration(d.Get("default_encryption_configuration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_encryption_configuration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultEncryptionConfigurationProp)) { + obj["defaultEncryptionConfiguration"] = defaultEncryptionConfigurationProp + } + isCaseInsensitiveProp, err := expandBigQueryDatasetIsCaseInsensitive(d.Get("is_case_insensitive"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_case_insensitive"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isCaseInsensitiveProp)) { + obj["isCaseInsensitive"] = isCaseInsensitiveProp + } + defaultCollationProp, err := expandBigQueryDatasetDefaultCollation(d.Get("default_collation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_collation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultCollationProp)) { + obj["defaultCollation"] = defaultCollationProp + } + storageBillingModelProp, err := expandBigQueryDatasetStorageBillingModel(d.Get("storage_billing_model"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("storage_billing_model"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, storageBillingModelProp)) { + obj["storageBillingModel"] = storageBillingModelProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) + } + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}?deleteContents={{delete_contents_on_destroy}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Dataset") + } + + log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) + return nil +} + +func resourceBigQueryDatasetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/datasets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("delete_contents_on_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting delete_contents_on_destroy: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenBigQueryDatasetMaxTimeTravelHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(bigqueryDatasetAccessSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "domain": flattenBigQueryDatasetAccessDomain(original["domain"], d, config), + "group_by_email": flattenBigQueryDatasetAccessGroupByEmail(original["groupByEmail"], d, config), + "role": flattenBigQueryDatasetAccessRole(original["role"], d, config), + "special_group": flattenBigQueryDatasetAccessSpecialGroup(original["specialGroup"], d, config), + "user_by_email": flattenBigQueryDatasetAccessUserByEmail(original["userByEmail"], d, config), + "view": flattenBigQueryDatasetAccessView(original["view"], d, config), + "dataset": flattenBigQueryDatasetAccessDataset(original["dataset"], d, config), + "routine": flattenBigQueryDatasetAccessRoutine(original["routine"], d, config), + }) + } + return transformed +} +func flattenBigQueryDatasetAccessDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessGroupByEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessSpecialGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessUserByEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessView(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenBigQueryDatasetAccessViewDatasetId(original["datasetId"], d, config) + transformed["project_id"] = + flattenBigQueryDatasetAccessViewProjectId(original["projectId"], d, config) + transformed["table_id"] = + flattenBigQueryDatasetAccessViewTableId(original["tableId"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryDatasetAccessViewDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessViewProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessViewTableId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset"] = + flattenBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) + transformed["target_types"] = + flattenBigQueryDatasetAccessDatasetTargetTypes(original["targetTypes"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryDatasetAccessDatasetDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenBigQueryDatasetAccessDatasetDatasetDatasetId(original["datasetId"], d, config) + transformed["project_id"] = + flattenBigQueryDatasetAccessDatasetDatasetProjectId(original["projectId"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessRoutine(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenBigQueryDatasetAccessRoutineDatasetId(original["datasetId"], d, config) + transformed["project_id"] = + flattenBigQueryDatasetAccessRoutineProjectId(original["projectId"], d, config) + transformed["routine_id"] = + flattenBigQueryDatasetAccessRoutineRoutineId(original["routineId"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryDatasetAccessRoutineDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessRoutineProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetAccessRoutineRoutineId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetCreationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigQueryDatasetDatasetReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenBigQueryDatasetDatasetReferenceDatasetId(original["datasetId"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryDatasetDatasetReferenceDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetDefaultTableExpirationMs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigQueryDatasetDefaultPartitionExpirationMs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigQueryDatasetDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetFriendlyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetLastModifiedTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +// Older Datasets in BigQuery have no Location set in the API response. This may be an issue when importing +// datasets created before BigQuery was available in multiple zones. We can safely assume that these datasets +// are in the US, as this was the default at the time. +func flattenBigQueryDatasetLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return "US" + } + return v +} + +func flattenBigQueryDatasetDefaultEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetIsCaseInsensitive(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetDefaultCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryDatasetStorageBillingModel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBigQueryDatasetMaxTimeTravelHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDomain, err := expandBigQueryDatasetAccessDomain(original["domain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["domain"] = transformedDomain + } + + transformedGroupByEmail, err := expandBigQueryDatasetAccessGroupByEmail(original["group_by_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupByEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupByEmail"] = transformedGroupByEmail + } + + transformedRole, err := expandBigQueryDatasetAccessRole(original["role"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRole); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["role"] = transformedRole + } + + transformedSpecialGroup, err := expandBigQueryDatasetAccessSpecialGroup(original["special_group"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSpecialGroup); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["specialGroup"] = transformedSpecialGroup + } + + transformedUserByEmail, err := expandBigQueryDatasetAccessUserByEmail(original["user_by_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUserByEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["userByEmail"] = transformedUserByEmail + } + + transformedView, err := expandBigQueryDatasetAccessView(original["view"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedView); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["view"] = transformedView + } + + transformedDataset, err := expandBigQueryDatasetAccessDataset(original["dataset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataset"] = transformedDataset + } + + transformedRoutine, err := expandBigQueryDatasetAccessRoutine(original["routine"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRoutine); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["routine"] = transformedRoutine + } + + req = append(req, transformed) + } + return req, nil +} + +func expandBigQueryDatasetAccessDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessGroupByEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessSpecialGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessUserByEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessView(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatasetId, err := expandBigQueryDatasetAccessViewDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedProjectId, err := expandBigQueryDatasetAccessViewProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedTableId, err := expandBigQueryDatasetAccessViewTableId(original["table_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableId"] = transformedTableId + } + + return transformed, nil +} + +func expandBigQueryDatasetAccessViewDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessViewProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessViewTableId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataset, err := expandBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataset"] = transformedDataset + } + + transformedTargetTypes, err := expandBigQueryDatasetAccessDatasetTargetTypes(original["target_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetTypes"] = transformedTargetTypes + } + + return transformed, nil +} + +func expandBigQueryDatasetAccessDatasetDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatasetId, err := expandBigQueryDatasetAccessDatasetDatasetDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedProjectId, err := expandBigQueryDatasetAccessDatasetDatasetProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + return transformed, nil +} + +func expandBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessRoutine(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatasetId, err := expandBigQueryDatasetAccessRoutineDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedProjectId, err := expandBigQueryDatasetAccessRoutineProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedRoutineId, err := expandBigQueryDatasetAccessRoutineRoutineId(original["routine_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRoutineId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["routineId"] = transformedRoutineId + } + + return transformed, nil +} + +func expandBigQueryDatasetAccessRoutineDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessRoutineProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetAccessRoutineRoutineId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetDatasetReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + transformed := make(map[string]interface{}) + transformedDatasetId, err := expandBigQueryDatasetDatasetReferenceDatasetId(d.Get("dataset_id"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + return transformed, nil +} + +func expandBigQueryDatasetDatasetReferenceDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetDefaultTableExpirationMs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetDefaultPartitionExpirationMs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetFriendlyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandBigQueryDatasetLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetDefaultEncryptionConfiguration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetIsCaseInsensitive(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetDefaultCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryDatasetStorageBillingModel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset_access.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset_access.go new file mode 100644 index 0000000000..1cdb89fe4a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_dataset_access.go @@ -0,0 +1,1210 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquery + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var bigqueryAccessRoleToPrimitiveMap = map[string]string{ + "roles/bigquery.dataOwner": "OWNER", + "roles/bigquery.dataEditor": "WRITER", + "roles/bigquery.dataViewer": "READER", +} + +func resourceBigQueryDatasetAccessRoleDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[new]; ok { + return primitiveRole == old + } + return false +} + +// we want to diff suppress any iam_members that are configured as `iam_member`, but stored in state as a different member type +func resourceBigQueryDatasetAccessIamMemberDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[new]; ok { + return primitiveRole == old + } + + if d.Get("api_updated_member") == true { + expectedIamMember := d.Get("iam_member").(string) + parts := strings.SplitAfter(expectedIamMember, ":") + + strippedIamMember := parts[0] + if len(parts) > 1 { + strippedIamMember = parts[1] + } + + if memberInState := d.Get("user_by_email").(string); memberInState != "" { + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + } + + if memberInState := d.Get("group_by_email").(string); memberInState != "" { + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + } + + if memberInState := d.Get("domain").(string); memberInState != "" { + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + } + + if memberInState := d.Get("special_group").(string); memberInState != "" { + return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + } + } + + return false +} + +// this function will go through a response's access list and see if the iam_member has been reassigned to a different member_type +// if it has, it will return the member type, and the member +func resourceBigQueryDatasetAccessReassignIamMemberInNestedObjectList(d *schema.ResourceData, meta interface{}, items []interface{}) (member_type string, member interface{}, err error) { + expectedRole, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, meta.(*transport_tpg.Config)) + if err != nil { + return "", nil, err + } + expectedFlattenedRole := flattenNestedBigQueryDatasetAccessRole(expectedRole, d, meta.(*transport_tpg.Config)) + + expectedIamMember, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, meta.(*transport_tpg.Config)) + if err != nil { + return "", nil, err + } + expectedFlattenedIamMember := flattenNestedBigQueryDatasetAccessIamMember(expectedIamMember, d, meta.(*transport_tpg.Config)) + + parts := strings.SplitAfter(expectedFlattenedIamMember.(string), ":") + + expectedStrippedIamMember := parts[0] + if len(parts) > 1 { + expectedStrippedIamMember = parts[1] + } + + // Search list for this resource. + for _, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemRole := flattenNestedBigQueryDatasetAccessRole(item["role"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemRole)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedRole))) && !reflect.DeepEqual(itemRole, expectedFlattenedRole) { + log.Printf("[DEBUG] Skipping item with role= %#v, looking for %#v)", itemRole, expectedFlattenedRole) + continue + } + + itemUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(item["userByEmail"], d, meta.(*transport_tpg.Config)) + if reflect.DeepEqual(itemUserByEmail, expectedStrippedIamMember) { + log.Printf("[DEBUG] Iam Member changed to userByEmail= %#v)", itemUserByEmail) + return "user_by_email", itemUserByEmail, nil + } + itemGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(item["groupByEmail"], d, meta.(*transport_tpg.Config)) + if reflect.DeepEqual(itemGroupByEmail, expectedStrippedIamMember) { + log.Printf("[DEBUG] Iam Member changed to groupByEmail= %#v)", itemGroupByEmail) + return "group_by_email", itemGroupByEmail, nil + } + itemDomain := flattenNestedBigQueryDatasetAccessDomain(item["domain"], d, meta.(*transport_tpg.Config)) + if reflect.DeepEqual(itemDomain, expectedStrippedIamMember) { + log.Printf("[DEBUG] Iam Member changed to domain= %#v)", itemDomain) + return "domain", itemDomain, nil + } + itemSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(item["specialGroup"], d, meta.(*transport_tpg.Config)) + if reflect.DeepEqual(itemSpecialGroup, expectedStrippedIamMember) { + log.Printf("[DEBUG] Iam Member changed to specialGroup= %#v)", itemSpecialGroup) + return "special_group", itemSpecialGroup, nil + } + itemIamMember := flattenNestedBigQueryDatasetAccessIamMember(item["iamMember"], d, meta.(*transport_tpg.Config)) + if reflect.DeepEqual(itemIamMember, expectedFlattenedIamMember) { + log.Printf("[DEBUG] Iam Member stayed as iamMember= %#v)", itemIamMember) + return "", nil, nil + } + continue + } + log.Printf("[DEBUG] Did not find item for resource %q)", d.Id()) + return "", nil, nil +} + +func ResourceBigQueryDatasetAccess() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryDatasetAccessCreate, + Read: resourceBigQueryDatasetAccessRead, + Delete: resourceBigQueryDatasetAccessDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A unique ID for this dataset, without the project name. The ID +must contain only letters (a-z, A-Z), numbers (0-9), or +underscores (_). The maximum length is 1,024 characters.`, + }, + "dataset": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Grants all resources of particular types in a particular dataset read access to the current dataset.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The dataset this entry applies to`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + }, + }, + }, + "target_types": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Which resources in the dataset this entry applies to. Currently, only views are supported, +but additional target types may be added in the future. Possible values: VIEWS`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, + }, + "domain": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, + Description: `A domain to grant access to. Any users signed in with the +domain specified will be granted the specified access`, + ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, + }, + "group_by_email": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, + Description: `An email address of a Google Group to grant access to.`, + ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, + }, + "iam_member": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, + Description: `Some other type of member that appears in the IAM Policy but isn't a user, +group, domain, or special group. For example: 'allUsers'`, + ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, + }, + "role": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: resourceBigQueryDatasetAccessRoleDiffSuppress, + Description: `Describes the rights granted to the user specified by the other +member of the access object. Basic, predefined, and custom roles are +supported. Predefined roles that have equivalent basic roles are +swapped by the API to their basic counterparts, and will show a diff +post-create. See +[official docs](https://cloud.google.com/bigquery/docs/access-control).`, + }, + "routine": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A routine from a different dataset to grant access to. Queries +executed against that routine will have read access to tables in +this dataset. The role field is not required when this field is +set. If that routine is updated by any user, access to the routine +needs to be granted again via an update operation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + "routine_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the routine. The ID must contain only letters (a-z, +A-Z), numbers (0-9), or underscores (_). The maximum length +is 256 characters.`, + }, + }, + }, + ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, + }, + "special_group": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, + Description: `A special group to grant access to. Possible values include: + + +* 'projectOwners': Owners of the enclosing project. + + +* 'projectReaders': Readers of the enclosing project. + + +* 'projectWriters': Writers of the enclosing project. + + +* 'allAuthenticatedUsers': All authenticated BigQuery users.`, + ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, + }, + "user_by_email": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, + Description: `An email address of a user to grant access to. For example: +fred@example.com`, + ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, + }, + "view": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A view from a different dataset to grant access to. Queries +executed against that view will have read access to tables in +this dataset. The role field is not required when this field is +set. If that view is updated by any user, access to the view +needs to be granted again via an update operation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the project containing this table.`, + }, + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the table. The ID must contain only letters (a-z, +A-Z), numbers (0-9), or underscores (_). The maximum length +is 1,024 characters.`, + }, + }, + }, + ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view", "dataset", "routine"}, + }, + "api_updated_member": { + Type: schema.TypeBool, + Computed: true, + Description: "If true, represents that that the iam_member in the config was translated to a different member type by the API, and is stored in state as a different member type", + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigQueryDatasetAccessCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + datasetIdProp, err := expandNestedBigQueryDatasetAccessDatasetId(d.Get("dataset_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetIdProp)) && (ok || !reflect.DeepEqual(v, datasetIdProp)) { + obj["datasetId"] = datasetIdProp + } + roleProp, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + userByEmailProp, err := expandNestedBigQueryDatasetAccessUserByEmail(d.Get("user_by_email"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_by_email"); !tpgresource.IsEmptyValue(reflect.ValueOf(userByEmailProp)) && (ok || !reflect.DeepEqual(v, userByEmailProp)) { + obj["userByEmail"] = userByEmailProp + } + groupByEmailProp, err := expandNestedBigQueryDatasetAccessGroupByEmail(d.Get("group_by_email"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("group_by_email"); !tpgresource.IsEmptyValue(reflect.ValueOf(groupByEmailProp)) && (ok || !reflect.DeepEqual(v, groupByEmailProp)) { + obj["groupByEmail"] = groupByEmailProp + } + domainProp, err := expandNestedBigQueryDatasetAccessDomain(d.Get("domain"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("domain"); !tpgresource.IsEmptyValue(reflect.ValueOf(domainProp)) && (ok || !reflect.DeepEqual(v, domainProp)) { + obj["domain"] = domainProp + } + specialGroupProp, err := expandNestedBigQueryDatasetAccessSpecialGroup(d.Get("special_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("special_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(specialGroupProp)) && (ok || !reflect.DeepEqual(v, specialGroupProp)) { + obj["specialGroup"] = specialGroupProp + } + iamMemberProp, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("iam_member"); !tpgresource.IsEmptyValue(reflect.ValueOf(iamMemberProp)) && (ok || !reflect.DeepEqual(v, iamMemberProp)) { + obj["iamMember"] = iamMemberProp + } + viewProp, err := expandNestedBigQueryDatasetAccessView(d.Get("view"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("view"); !tpgresource.IsEmptyValue(reflect.ValueOf(viewProp)) && (ok || !reflect.DeepEqual(v, viewProp)) { + obj["view"] = viewProp + } + datasetProp, err := expandNestedBigQueryDatasetAccessDataset(d.Get("dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(datasetProp)) && (ok || !reflect.DeepEqual(v, datasetProp)) { + obj["dataset"] = datasetProp + } + routineProp, err := expandNestedBigQueryDatasetAccessRoutine(d.Get("routine"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("routine"); !tpgresource.IsEmptyValue(reflect.ValueOf(routineProp)) && (ok || !reflect.DeepEqual(v, routineProp)) { + obj["routine"] = routineProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{dataset_id}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DatasetAccess: %#v", obj) + + obj, err = resourceBigQueryDatasetAccessPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DatasetAccess: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigqueryIAMQuotaError}, + }) + if err != nil { + return fmt.Errorf("Error creating DatasetAccess: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // by default, we are not updating the member + if err := d.Set("api_updated_member", false); err != nil { + return fmt.Errorf("Error setting api_updated_member: %s", err) + } + + // iam_member is a generalized attribute, if the API can map it to a different member type on the backend, it will return + // the correct member_type in the response. If it cannot be mapped to a different member type, it will stay in iam_member. + if iamMemberProp != "" { + member_type, member, err := resourceBigQueryDatasetAccessReassignIamMemberInNestedObjectList(d, meta, res["access"].([]interface{})) + if err != nil { + fmt.Println(err) + } + + // if the member type changed, we set that member_type in state (it's already in the response) and we clear iam_member + // and we set "api_updated_member" to true to acknowledge that we are making this change + if member_type != "" { + if err := d.Set(member_type, member.(string)); err != nil { + return fmt.Errorf("Error setting member_type: %s", err) + } + if err := d.Set("iam_member", ""); err != nil { + return fmt.Errorf("Error setting iam_member: %s", err) + } + if err := d.Set("api_updated_member", true); err != nil { + return fmt.Errorf("Error setting api_updated_member: %s", err) + } + } + } + + log.Printf("[DEBUG] Finished creating DatasetAccess %q: %#v", d.Id(), res) + + return resourceBigQueryDatasetAccessRead(d, meta) +} + +func resourceBigQueryDatasetAccessRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DatasetAccess: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigqueryIAMQuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQueryDatasetAccess %q", d.Id())) + } + + res, err = flattenNestedBigQueryDatasetAccess(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing BigQueryDatasetAccess because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + + if err := d.Set("role", flattenNestedBigQueryDatasetAccessRole(res["role"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + if err := d.Set("user_by_email", flattenNestedBigQueryDatasetAccessUserByEmail(res["userByEmail"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + if err := d.Set("group_by_email", flattenNestedBigQueryDatasetAccessGroupByEmail(res["groupByEmail"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + if err := d.Set("domain", flattenNestedBigQueryDatasetAccessDomain(res["domain"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + if err := d.Set("special_group", flattenNestedBigQueryDatasetAccessSpecialGroup(res["specialGroup"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + if err := d.Set("iam_member", flattenNestedBigQueryDatasetAccessIamMember(res["iamMember"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + if err := d.Set("view", flattenNestedBigQueryDatasetAccessView(res["view"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + if err := d.Set("dataset", flattenNestedBigQueryDatasetAccessDataset(res["dataset"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + if err := d.Set("routine", flattenNestedBigQueryDatasetAccessRoutine(res["routine"], d, config)); err != nil { + return fmt.Errorf("Error reading DatasetAccess: %s", err) + } + + return nil +} + +func resourceBigQueryDatasetAccessDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DatasetAccess: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "{{dataset_id}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceBigQueryDatasetAccessPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DatasetAccess") + } + log.Printf("[DEBUG] Deleting DatasetAccess %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigqueryIAMQuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DatasetAccess") + } + + log.Printf("[DEBUG] Finished deleting DatasetAccess %q: %#v", d.Id(), res) + return nil +} + +func flattenNestedBigQueryDatasetAccessRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessUserByEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessGroupByEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessSpecialGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessIamMember(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessView(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenNestedBigQueryDatasetAccessViewDatasetId(original["datasetId"], d, config) + transformed["project_id"] = + flattenNestedBigQueryDatasetAccessViewProjectId(original["projectId"], d, config) + transformed["table_id"] = + flattenNestedBigQueryDatasetAccessViewTableId(original["tableId"], d, config) + return []interface{}{transformed} +} +func flattenNestedBigQueryDatasetAccessViewDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessViewProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessViewTableId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset"] = + flattenNestedBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) + transformed["target_types"] = + flattenNestedBigQueryDatasetAccessDatasetTargetTypes(original["targetTypes"], d, config) + return []interface{}{transformed} +} +func flattenNestedBigQueryDatasetAccessDatasetDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenNestedBigQueryDatasetAccessDatasetDatasetDatasetId(original["datasetId"], d, config) + transformed["project_id"] = + flattenNestedBigQueryDatasetAccessDatasetDatasetProjectId(original["projectId"], d, config) + return []interface{}{transformed} +} +func flattenNestedBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessRoutine(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenNestedBigQueryDatasetAccessRoutineDatasetId(original["datasetId"], d, config) + transformed["project_id"] = + flattenNestedBigQueryDatasetAccessRoutineProjectId(original["projectId"], d, config) + transformed["routine_id"] = + flattenNestedBigQueryDatasetAccessRoutineRoutineId(original["routineId"], d, config) + return []interface{}{transformed} +} +func flattenNestedBigQueryDatasetAccessRoutineDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessRoutineProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedBigQueryDatasetAccessRoutineRoutineId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedBigQueryDatasetAccessDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[v.(string)]; ok { + return primitiveRole, nil + } + return v, nil +} + +func expandNestedBigQueryDatasetAccessUserByEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessGroupByEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessSpecialGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessIamMember(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessView(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatasetId, err := expandNestedBigQueryDatasetAccessViewDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedProjectId, err := expandNestedBigQueryDatasetAccessViewProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedTableId, err := expandNestedBigQueryDatasetAccessViewTableId(original["table_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableId"] = transformedTableId + } + + return transformed, nil +} + +func expandNestedBigQueryDatasetAccessViewDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessViewProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessViewTableId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataset, err := expandNestedBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataset"] = transformedDataset + } + + transformedTargetTypes, err := expandNestedBigQueryDatasetAccessDatasetTargetTypes(original["target_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetTypes"] = transformedTargetTypes + } + + return transformed, nil +} + +func expandNestedBigQueryDatasetAccessDatasetDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatasetId, err := expandNestedBigQueryDatasetAccessDatasetDatasetDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedProjectId, err := expandNestedBigQueryDatasetAccessDatasetDatasetProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + return transformed, nil +} + +func expandNestedBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessRoutine(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatasetId, err := expandNestedBigQueryDatasetAccessRoutineDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedProjectId, err := expandNestedBigQueryDatasetAccessRoutineProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedRoutineId, err := expandNestedBigQueryDatasetAccessRoutineRoutineId(original["routine_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRoutineId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["routineId"] = transformedRoutineId + } + + return transformed, nil +} + +func expandNestedBigQueryDatasetAccessRoutineDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessRoutineProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedBigQueryDatasetAccessRoutineRoutineId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedBigQueryDatasetAccess(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["access"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value access. Actual value: %v", v) + } + + _, item, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceBigQueryDatasetAccessFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedRole, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedRole := flattenNestedBigQueryDatasetAccessRole(expectedRole, d, meta.(*transport_tpg.Config)) + expectedUserByEmail, err := expandNestedBigQueryDatasetAccessUserByEmail(d.Get("user_by_email"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(expectedUserByEmail, d, meta.(*transport_tpg.Config)) + expectedGroupByEmail, err := expandNestedBigQueryDatasetAccessGroupByEmail(d.Get("group_by_email"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(expectedGroupByEmail, d, meta.(*transport_tpg.Config)) + expectedDomain, err := expandNestedBigQueryDatasetAccessDomain(d.Get("domain"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedDomain := flattenNestedBigQueryDatasetAccessDomain(expectedDomain, d, meta.(*transport_tpg.Config)) + expectedSpecialGroup, err := expandNestedBigQueryDatasetAccessSpecialGroup(d.Get("special_group"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(expectedSpecialGroup, d, meta.(*transport_tpg.Config)) + expectedIamMember, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedIamMember := flattenNestedBigQueryDatasetAccessIamMember(expectedIamMember, d, meta.(*transport_tpg.Config)) + expectedView, err := expandNestedBigQueryDatasetAccessView(d.Get("view"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedView := flattenNestedBigQueryDatasetAccessView(expectedView, d, meta.(*transport_tpg.Config)) + expectedDataset, err := expandNestedBigQueryDatasetAccessDataset(d.Get("dataset"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedDataset := flattenNestedBigQueryDatasetAccessDataset(expectedDataset, d, meta.(*transport_tpg.Config)) + expectedRoutine, err := expandNestedBigQueryDatasetAccessRoutine(d.Get("routine"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedRoutine := flattenNestedBigQueryDatasetAccessRoutine(expectedRoutine, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemRole := flattenNestedBigQueryDatasetAccessRole(item["role"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemRole)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedRole))) && !reflect.DeepEqual(itemRole, expectedFlattenedRole) { + log.Printf("[DEBUG] Skipping item with role= %#v, looking for %#v)", itemRole, expectedFlattenedRole) + continue + } + itemUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(item["userByEmail"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemUserByEmail)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedUserByEmail))) && !reflect.DeepEqual(itemUserByEmail, expectedFlattenedUserByEmail) { + log.Printf("[DEBUG] Skipping item with userByEmail= %#v, looking for %#v)", itemUserByEmail, expectedFlattenedUserByEmail) + continue + } + itemGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(item["groupByEmail"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemGroupByEmail)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedGroupByEmail))) && !reflect.DeepEqual(itemGroupByEmail, expectedFlattenedGroupByEmail) { + log.Printf("[DEBUG] Skipping item with groupByEmail= %#v, looking for %#v)", itemGroupByEmail, expectedFlattenedGroupByEmail) + continue + } + itemDomain := flattenNestedBigQueryDatasetAccessDomain(item["domain"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemDomain)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedDomain))) && !reflect.DeepEqual(itemDomain, expectedFlattenedDomain) { + log.Printf("[DEBUG] Skipping item with domain= %#v, looking for %#v)", itemDomain, expectedFlattenedDomain) + continue + } + itemSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(item["specialGroup"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemSpecialGroup)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedSpecialGroup))) && !reflect.DeepEqual(itemSpecialGroup, expectedFlattenedSpecialGroup) { + log.Printf("[DEBUG] Skipping item with specialGroup= %#v, looking for %#v)", itemSpecialGroup, expectedFlattenedSpecialGroup) + continue + } + itemIamMember := flattenNestedBigQueryDatasetAccessIamMember(item["iamMember"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemIamMember)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedIamMember))) && !reflect.DeepEqual(itemIamMember, expectedFlattenedIamMember) { + log.Printf("[DEBUG] Skipping item with iamMember= %#v, looking for %#v)", itemIamMember, expectedFlattenedIamMember) + continue + } + itemView := flattenNestedBigQueryDatasetAccessView(item["view"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemView)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedView))) && !reflect.DeepEqual(itemView, expectedFlattenedView) { + log.Printf("[DEBUG] Skipping item with view= %#v, looking for %#v)", itemView, expectedFlattenedView) + continue + } + itemDataset := flattenNestedBigQueryDatasetAccessDataset(item["dataset"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemDataset)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedDataset))) && !reflect.DeepEqual(itemDataset, expectedFlattenedDataset) { + log.Printf("[DEBUG] Skipping item with dataset= %#v, looking for %#v)", itemDataset, expectedFlattenedDataset) + continue + } + itemRoutine := flattenNestedBigQueryDatasetAccessRoutine(item["routine"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemRoutine)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedRoutine))) && !reflect.DeepEqual(itemRoutine, expectedFlattenedRoutine) { + log.Printf("[DEBUG] Skipping item with routine= %#v, looking for %#v)", itemRoutine, expectedFlattenedRoutine) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceBigQueryDatasetAccessPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceBigQueryDatasetAccessListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create DatasetAccess, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "access": append(currItems, obj), + } + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceBigQueryDatasetAccessPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceBigQueryDatasetAccessListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "BigQueryDatasetAccess") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "access": updatedItems, + } + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceBigQueryDatasetAccessListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") + if err != nil { + return nil, err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigqueryIAMQuotaError}, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + + v, ok = res["access"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "access"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_job.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_job.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_job.go index 9fd31edceb..cb99214f27 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_job.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_job.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package bigquery import ( "fmt" @@ -23,6 +26,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/googleapi" ) @@ -66,7 +74,7 @@ func ResourceBigQueryJob() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, }, @@ -91,7 +99,7 @@ or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), + ValidateFunc: verify.ValidateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), Description: `Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. @@ -133,7 +141,7 @@ The BigQuery Service Account associated with your project requires access to thi Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, }, @@ -158,7 +166,7 @@ or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), + ValidateFunc: verify.ValidateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. @@ -262,7 +270,7 @@ Default is ','`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, }, @@ -327,7 +335,7 @@ or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, }, @@ -389,7 +397,7 @@ The default value is false.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), + ValidateFunc: verify.ValidateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), Description: `Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. @@ -477,6 +485,30 @@ empty string is present for all data types except for STRING and BYTE. For STRIN an empty value.`, Default: "", }, + "parquet_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Parquet Options for load and make external tables.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_list_inference": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type.`, + AtLeastOneOf: []string{"load.0.parquet_options.0.enum_as_string", "load.0.parquet_options.0.enable_list_inference"}, + }, + "enum_as_string": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.`, + }, + }, + }, + }, "projection_fields": { Type: schema.TypeList, Optional: true, @@ -573,7 +605,7 @@ A wrapper is used here because an empty string is an invalid value.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), + ValidateFunc: verify.ValidateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. @@ -614,7 +646,7 @@ However, you must still set destinationTable when result size exceeds the allowe Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), + ValidateFunc: verify.ValidateEnum([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}), Description: `Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. @@ -633,7 +665,7 @@ Creation, truncation and append actions occur as one atomic update upon job comp Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The dataset. Can be specified '{{dataset_id}}' if 'project_id' is also set, or of the form 'projects/{{project}}/datasets/{{dataset_id}}' if not.`, }, @@ -685,7 +717,7 @@ For queries that produce anonymous (cached) results, this field will be populate Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, }, @@ -737,7 +769,7 @@ If unspecified, this will be set to your project default.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"INTERACTIVE", "BATCH", ""}), + ValidateFunc: verify.ValidateEnum([]string{"INTERACTIVE", "BATCH", ""}), Description: `Specifies a priority for the query. Default value: "INTERACTIVE" Possible values: ["INTERACTIVE", "BATCH"]`, Default: "INTERACTIVE", }, @@ -768,7 +800,7 @@ ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema t Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"LAST", "FIRST_SELECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"LAST", "FIRST_SELECT", ""}), Description: `Determines which statement in the script represents the "key result", used to populate the schema and query results of the script job. Possible values: ["LAST", "FIRST_SELECT"]`, AtLeastOneOf: []string{"query.0.script_options.0.statement_timeout_ms", "query.0.script_options.0.statement_byte_budget", "query.0.script_options.0.key_result_statement"}, @@ -833,7 +865,7 @@ Providing a inline code resource is equivalent to providing a URI for a file con Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), + ValidateFunc: verify.ValidateEnum([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}), Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. @@ -947,8 +979,8 @@ not necessarily mean that the job has not completed or was unsuccessful.`, } func resourceBigQueryJobCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -957,13 +989,13 @@ func resourceBigQueryJobCreate(d *schema.ResourceData, meta interface{}) error { configurationProp, err := expandBigQueryJobConfiguration(nil, d, config) if err != nil { return err - } else if !isEmptyValue(reflect.ValueOf(configurationProp)) { + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(configurationProp)) { obj["configuration"] = configurationProp } jobReferenceProp, err := expandBigQueryJobJobReference(nil, d, config) if err != nil { return err - } else if !isEmptyValue(reflect.ValueOf(jobReferenceProp)) { + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(jobReferenceProp)) { obj["jobReference"] = jobReferenceProp } @@ -972,7 +1004,7 @@ func resourceBigQueryJobCreate(d *schema.ResourceData, meta interface{}) error { return err } - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs") + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs") if err != nil { return err } @@ -980,30 +1012,38 @@ func resourceBigQueryJobCreate(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Creating new Job: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Job: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Job: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/jobs/{{job_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/jobs/{{job_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } d.SetId(id) - err = PollingWaitTime(resourceBigQueryJobPollRead(d, meta), PollCheckForExistence, "Creating Job", d.Timeout(schema.TimeoutCreate), 1) + err = transport_tpg.PollingWaitTime(resourceBigQueryJobPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating Job", d.Timeout(schema.TimeoutCreate), 1) if err != nil { return fmt.Errorf("Error waiting to create Job: %s", err) } @@ -1013,34 +1053,40 @@ func resourceBigQueryJobCreate(d *schema.ResourceData, meta interface{}) error { return resourceBigQueryJobRead(d, meta) } -func resourceBigQueryJobPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { +func resourceBigQueryJobPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { return func() (map[string]interface{}, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs/{{job_id}}?location={{location}}") + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs/{{job_id}}?location={{location}}") if err != nil { return nil, err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, fmt.Errorf("Error fetching project for Job: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return res, err } @@ -1049,33 +1095,39 @@ func resourceBigQueryJobPollRead(d *schema.ResourceData, meta interface{}) PollR } func resourceBigQueryJobRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs/{{job_id}}?location={{location}}") + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs/{{job_id}}?location={{location}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Job: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigQueryJob %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQueryJob %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -1132,8 +1184,8 @@ func resourceBigQueryJobDelete(d *schema.ResourceData, meta interface{}) error { } func resourceBigQueryJobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/jobs/(?P[^/]+)/location/(?P[^/]+)", "projects/(?P[^/]+)/jobs/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -1145,7 +1197,7 @@ func resourceBigQueryJobImport(d *schema.ResourceData, meta interface{}) ([]*sch } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/jobs/{{job_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/jobs/{{job_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1154,11 +1206,11 @@ func resourceBigQueryJobImport(d *schema.ResourceData, meta interface{}) ([]*sch return []*schema.ResourceData{d}, nil } -func flattenBigQueryJobUserEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobUserEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfiguration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfiguration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1183,19 +1235,19 @@ func flattenBigQueryJobConfiguration(v interface{}, d *schema.ResourceData, conf flattenBigQueryJobConfigurationExtract(original["extract"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobConfigurationJobType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationJobType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationJobTimeoutMs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationJobTimeoutMs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1240,11 +1292,11 @@ func flattenBigQueryJobConfigurationQuery(v interface{}, d *schema.ResourceData, flattenBigQueryJobConfigurationQueryScriptOptions(original["scriptOptions"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobConfigurationQueryQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryDestinationTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryDestinationTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1264,7 +1316,7 @@ func flattenBigQueryJobConfigurationQueryDestinationTable(v interface{}, d *sche return []interface{}{transformed} } -func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1283,23 +1335,23 @@ func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interfac } return transformed } -func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryCreateDisposition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryCreateDisposition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryWriteDisposition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryWriteDisposition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1318,26 +1370,26 @@ func flattenBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d *schema return []interface{}{transformed} } -func flattenBigQueryJobConfigurationQueryPriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryPriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryAllowLargeResults(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryAllowLargeResults(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryUseQueryCache(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryUseQueryCache(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryFlattenResults(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryFlattenResults(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryMaximumBillingTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryMaximumBillingTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1351,25 +1403,25 @@ func flattenBigQueryJobConfigurationQueryMaximumBillingTier(v interface{}, d *sc return v // let terraform core handle it otherwise } -func flattenBigQueryJobConfigurationQueryMaximumBytesBilled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryMaximumBytesBilled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryUseLegacySql(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryUseLegacySql(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryParameterMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryParameterMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQuerySchemaUpdateOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQuerySchemaUpdateOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } // KmsKeyName switched from using a key name to a key version, this will separate the key name from the key version and save them // separately in state. https://github.com/hashicorp/terraform-provider-google/issues/9208 -func flattenBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return []map[string]interface{}{} } @@ -1392,7 +1444,7 @@ func flattenBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(v in } -func flattenBigQueryJobConfigurationQueryScriptOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryScriptOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1409,19 +1461,19 @@ func flattenBigQueryJobConfigurationQueryScriptOptions(v interface{}, d *schema. flattenBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(original["keyResultStatement"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoad(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoad(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1470,13 +1522,15 @@ func flattenBigQueryJobConfigurationLoad(v interface{}, d *schema.ResourceData, flattenBigQueryJobConfigurationLoadTimePartitioning(original["timePartitioning"], d, config) transformed["destination_encryption_configuration"] = flattenBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(original["destinationEncryptionConfiguration"], d, config) + transformed["parquet_options"] = + flattenBigQueryJobConfigurationLoadParquetOptions(original["parquetOptions"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobConfigurationLoadSourceUris(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadSourceUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadDestinationTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadDestinationTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1496,26 +1550,26 @@ func flattenBigQueryJobConfigurationLoadDestinationTable(v interface{}, d *schem return []interface{}{transformed} } -func flattenBigQueryJobConfigurationLoadCreateDisposition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadCreateDisposition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadWriteDisposition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadWriteDisposition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadNullMarker(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadNullMarker(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadFieldDelimiter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadFieldDelimiter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadSkipLeadingRows(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadSkipLeadingRows(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1529,18 +1583,18 @@ func flattenBigQueryJobConfigurationLoadSkipLeadingRows(v interface{}, d *schema return v // let terraform core handle it otherwise } -func flattenBigQueryJobConfigurationLoadEncoding(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadEncoding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadQuote(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadQuote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadMaxBadRecords(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadMaxBadRecords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1554,39 +1608,39 @@ func flattenBigQueryJobConfigurationLoadMaxBadRecords(v interface{}, d *schema.R return v // let terraform core handle it otherwise } -func flattenBigQueryJobConfigurationLoadAllowQuotedNewlines(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadAllowQuotedNewlines(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadSourceFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadSourceFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadJsonExtension(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadJsonExtension(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadAllowJaggedRows(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadAllowJaggedRows(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadIgnoreUnknownValues(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadIgnoreUnknownValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadProjectionFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadProjectionFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadAutodetect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadAutodetect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadSchemaUpdateOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadSchemaUpdateOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadTimePartitioning(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadTimePartitioning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1603,21 +1657,21 @@ func flattenBigQueryJobConfigurationLoadTimePartitioning(v interface{}, d *schem flattenBigQueryJobConfigurationLoadTimePartitioningField(original["field"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobConfigurationLoadTimePartitioningType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadTimePartitioningType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadTimePartitioningExpirationMs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadTimePartitioningExpirationMs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationLoadTimePartitioningField(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadTimePartitioningField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } // KmsKeyName switched from using a key name to a key version, this will separate the key name from the key version and save them // separately in state. https://github.com/hashicorp/terraform-provider-google/issues/9208 -func flattenBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return []map[string]interface{}{} } @@ -1640,7 +1694,30 @@ func flattenBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(v int } -func flattenBigQueryJobConfigurationCopy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationLoadParquetOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enum_as_string"] = + flattenBigQueryJobConfigurationLoadParquetOptionsEnumAsString(original["enumAsString"], d, config) + transformed["enable_list_inference"] = + flattenBigQueryJobConfigurationLoadParquetOptionsEnableListInference(original["enableListInference"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryJobConfigurationLoadParquetOptionsEnumAsString(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryJobConfigurationLoadParquetOptionsEnableListInference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryJobConfigurationCopy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1661,7 +1738,7 @@ func flattenBigQueryJobConfigurationCopy(v interface{}, d *schema.ResourceData, flattenBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(original["destinationEncryptionConfiguration"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobConfigurationCopySourceTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationCopySourceTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1689,7 +1766,7 @@ func flattenBigQueryJobConfigurationCopySourceTables(v interface{}, d *schema.Re return transformed } -func flattenBigQueryJobConfigurationCopyDestinationTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationCopyDestinationTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1709,17 +1786,17 @@ func flattenBigQueryJobConfigurationCopyDestinationTable(v interface{}, d *schem return []interface{}{transformed} } -func flattenBigQueryJobConfigurationCopyCreateDisposition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationCopyCreateDisposition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationCopyWriteDisposition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationCopyWriteDisposition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } // KmsKeyName switched from using a key name to a key version, this will separate the key name from the key version and save them // separately in state. https://github.com/hashicorp/terraform-provider-google/issues/9208 -func flattenBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return []map[string]interface{}{} } @@ -1742,7 +1819,7 @@ func flattenBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(v int } -func flattenBigQueryJobConfigurationExtract(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtract(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1769,31 +1846,31 @@ func flattenBigQueryJobConfigurationExtract(v interface{}, d *schema.ResourceDat flattenBigQueryJobConfigurationExtractSourceModel(original["sourceModel"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobConfigurationExtractDestinationUris(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractDestinationUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationExtractPrintHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractPrintHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationExtractFieldDelimiter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractFieldDelimiter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationExtractDestinationFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractDestinationFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationExtractCompression(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractCompression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationExtractUseAvroLogicalTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractUseAvroLogicalTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationExtractSourceTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractSourceTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1813,7 +1890,7 @@ func flattenBigQueryJobConfigurationExtractSourceTable(v interface{}, d *schema. return []interface{}{transformed} } -func flattenBigQueryJobConfigurationExtractSourceModel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractSourceModel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1830,19 +1907,19 @@ func flattenBigQueryJobConfigurationExtractSourceModel(v interface{}, d *schema. flattenBigQueryJobConfigurationExtractSourceModelModelId(original["modelId"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobConfigurationExtractSourceModelProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractSourceModelProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationExtractSourceModelDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractSourceModelDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobConfigurationExtractSourceModelModelId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobConfigurationExtractSourceModelModelId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobJobReference(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobJobReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1857,15 +1934,15 @@ func flattenBigQueryJobJobReference(v interface{}, d *schema.ResourceData, confi flattenBigQueryJobJobReferenceLocation(original["location"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobJobReferenceJobId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobJobReferenceJobId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobJobReferenceLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobJobReferenceLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1882,7 +1959,7 @@ func flattenBigQueryJobStatus(v interface{}, d *schema.ResourceData, config *Con flattenBigQueryJobStatusState(original["state"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobStatusErrorResult(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusErrorResult(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1899,19 +1976,19 @@ func flattenBigQueryJobStatusErrorResult(v interface{}, d *schema.ResourceData, flattenBigQueryJobStatusErrorResultMessage(original["message"], d, config) return []interface{}{transformed} } -func flattenBigQueryJobStatusErrorResultReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusErrorResultReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobStatusErrorResultLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusErrorResultLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobStatusErrorResultMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusErrorResultMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobStatusErrors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1931,85 +2008,85 @@ func flattenBigQueryJobStatusErrors(v interface{}, d *schema.ResourceData, confi } return transformed } -func flattenBigQueryJobStatusErrorsReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusErrorsReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobStatusErrorsLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusErrorsLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobStatusErrorsMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusErrorsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBigQueryJobStatusState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBigQueryJobStatusState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandBigQueryJobConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfiguration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { transformed := make(map[string]interface{}) transformedJobType, err := expandBigQueryJobConfigurationJobType(d.Get("job_type"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedJobType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedJobType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["jobType"] = transformedJobType } transformedJobTimeoutMs, err := expandBigQueryJobConfigurationJobTimeoutMs(d.Get("job_timeout_ms"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedJobTimeoutMs); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedJobTimeoutMs); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["jobTimeoutMs"] = transformedJobTimeoutMs } transformedLabels, err := expandBigQueryJobConfigurationLabels(d.Get("labels"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["labels"] = transformedLabels } transformedQuery, err := expandBigQueryJobConfigurationQuery(d.Get("query"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["query"] = transformedQuery } transformedLoad, err := expandBigQueryJobConfigurationLoad(d.Get("load"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLoad); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLoad); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["load"] = transformedLoad } transformedCopy, err := expandBigQueryJobConfigurationCopy(d.Get("copy"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCopy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCopy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["copy"] = transformedCopy } transformedExtract, err := expandBigQueryJobConfigurationExtract(d.Get("extract"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExtract); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExtract); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["extract"] = transformedExtract } return transformed, nil } -func expandBigQueryJobConfigurationJobType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationJobType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationJobTimeoutMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationJobTimeoutMs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandBigQueryJobConfigurationLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2020,7 +2097,7 @@ func expandBigQueryJobConfigurationLabels(v interface{}, d TerraformResourceData return m, nil } -func expandBigQueryJobConfigurationQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2032,84 +2109,84 @@ func expandBigQueryJobConfigurationQuery(v interface{}, d TerraformResourceData, transformedQuery, err := expandBigQueryJobConfigurationQueryQuery(original["query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["query"] = transformedQuery } transformedDestinationTable, err := expandBigQueryJobConfigurationQueryDestinationTable(original["destination_table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationTable"] = transformedDestinationTable } transformedUserDefinedFunctionResources, err := expandBigQueryJobConfigurationQueryUserDefinedFunctionResources(original["user_defined_function_resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUserDefinedFunctionResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUserDefinedFunctionResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["userDefinedFunctionResources"] = transformedUserDefinedFunctionResources } transformedCreateDisposition, err := expandBigQueryJobConfigurationQueryCreateDisposition(original["create_disposition"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["createDisposition"] = transformedCreateDisposition } transformedWriteDisposition, err := expandBigQueryJobConfigurationQueryWriteDisposition(original["write_disposition"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["writeDisposition"] = transformedWriteDisposition } transformedDefaultDataset, err := expandBigQueryJobConfigurationQueryDefaultDataset(original["default_dataset"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultDataset); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultDataset); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultDataset"] = transformedDefaultDataset } transformedPriority, err := expandBigQueryJobConfigurationQueryPriority(original["priority"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["priority"] = transformedPriority } transformedAllowLargeResults, err := expandBigQueryJobConfigurationQueryAllowLargeResults(original["allow_large_results"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowLargeResults); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowLargeResults); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowLargeResults"] = transformedAllowLargeResults } transformedUseQueryCache, err := expandBigQueryJobConfigurationQueryUseQueryCache(original["use_query_cache"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUseQueryCache); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUseQueryCache); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["useQueryCache"] = transformedUseQueryCache } transformedFlattenResults, err := expandBigQueryJobConfigurationQueryFlattenResults(original["flatten_results"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFlattenResults); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFlattenResults); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["flattenResults"] = transformedFlattenResults } transformedMaximumBillingTier, err := expandBigQueryJobConfigurationQueryMaximumBillingTier(original["maximum_billing_tier"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaximumBillingTier); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaximumBillingTier); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maximumBillingTier"] = transformedMaximumBillingTier } transformedMaximumBytesBilled, err := expandBigQueryJobConfigurationQueryMaximumBytesBilled(original["maximum_bytes_billed"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaximumBytesBilled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaximumBytesBilled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maximumBytesBilled"] = transformedMaximumBytesBilled } @@ -2123,39 +2200,39 @@ func expandBigQueryJobConfigurationQuery(v interface{}, d TerraformResourceData, transformedParameterMode, err := expandBigQueryJobConfigurationQueryParameterMode(original["parameter_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedParameterMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedParameterMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["parameterMode"] = transformedParameterMode } transformedSchemaUpdateOptions, err := expandBigQueryJobConfigurationQuerySchemaUpdateOptions(original["schema_update_options"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchemaUpdateOptions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchemaUpdateOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schemaUpdateOptions"] = transformedSchemaUpdateOptions } transformedDestinationEncryptionConfiguration, err := expandBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(original["destination_encryption_configuration"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationEncryptionConfiguration"] = transformedDestinationEncryptionConfiguration } transformedScriptOptions, err := expandBigQueryJobConfigurationQueryScriptOptions(original["script_options"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScriptOptions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScriptOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scriptOptions"] = transformedScriptOptions } return transformed, nil } -func expandBigQueryJobConfigurationQueryQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryDestinationTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryDestinationTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2165,17 +2242,17 @@ func expandBigQueryJobConfigurationQueryDestinationTable(v interface{}, d Terraf transformed := make(map[string]interface{}) transformedProjectId := original["project_id"] - if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedDatasetId := original["dataset_id"] - if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetId"] = transformedDatasetId } transformedTableId := original["table_id"] - if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tableId"] = transformedTableId } @@ -2188,7 +2265,7 @@ func expandBigQueryJobConfigurationQueryDestinationTable(v interface{}, d Terraf return transformed, nil } -func expandBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2201,14 +2278,14 @@ func expandBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interface transformedResourceUri, err := expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(original["resource_uri"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResourceUri); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResourceUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resourceUri"] = transformedResourceUri } transformedInlineCode, err := expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(original["inline_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInlineCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInlineCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["inlineCode"] = transformedInlineCode } @@ -2217,23 +2294,23 @@ func expandBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interface return req, nil } -func expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryCreateDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryCreateDisposition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryWriteDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryWriteDisposition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2243,12 +2320,12 @@ func expandBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d Terrafor transformed := make(map[string]interface{}) transformedProjectId := original["project_id"] - if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedDatasetId := original["dataset_id"] - if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetId"] = transformedDatasetId } @@ -2260,43 +2337,43 @@ func expandBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d Terrafor return transformed, nil } -func expandBigQueryJobConfigurationQueryPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryPriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryAllowLargeResults(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryAllowLargeResults(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryUseQueryCache(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryUseQueryCache(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryFlattenResults(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryFlattenResults(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryMaximumBillingTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryMaximumBillingTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryMaximumBytesBilled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryMaximumBytesBilled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryUseLegacySql(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryUseLegacySql(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryParameterMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryParameterMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQuerySchemaUpdateOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQuerySchemaUpdateOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2308,29 +2385,29 @@ func expandBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(v int transformedKmsKeyName, err := expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKeyName"] = transformedKmsKeyName } transformedKmsKeyVersion, err := expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyVersion(original["kms_key_version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKeyVersion"] = transformedKmsKeyVersion } return transformed, nil } -func expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryScriptOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryScriptOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2342,40 +2419,40 @@ func expandBigQueryJobConfigurationQueryScriptOptions(v interface{}, d Terraform transformedStatementTimeoutMs, err := expandBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(original["statement_timeout_ms"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStatementTimeoutMs); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStatementTimeoutMs); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["statementTimeoutMs"] = transformedStatementTimeoutMs } transformedStatementByteBudget, err := expandBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(original["statement_byte_budget"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStatementByteBudget); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStatementByteBudget); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["statementByteBudget"] = transformedStatementByteBudget } transformedKeyResultStatement, err := expandBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(original["key_result_statement"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKeyResultStatement); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKeyResultStatement); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["keyResultStatement"] = transformedKeyResultStatement } return transformed, nil } -func expandBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoad(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoad(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2387,151 +2464,158 @@ func expandBigQueryJobConfigurationLoad(v interface{}, d TerraformResourceData, transformedSourceUris, err := expandBigQueryJobConfigurationLoadSourceUris(original["source_uris"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceUris); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceUris"] = transformedSourceUris } transformedDestinationTable, err := expandBigQueryJobConfigurationLoadDestinationTable(original["destination_table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationTable"] = transformedDestinationTable } transformedCreateDisposition, err := expandBigQueryJobConfigurationLoadCreateDisposition(original["create_disposition"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["createDisposition"] = transformedCreateDisposition } transformedWriteDisposition, err := expandBigQueryJobConfigurationLoadWriteDisposition(original["write_disposition"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["writeDisposition"] = transformedWriteDisposition } transformedNullMarker, err := expandBigQueryJobConfigurationLoadNullMarker(original["null_marker"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullMarker); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullMarker); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullMarker"] = transformedNullMarker } transformedFieldDelimiter, err := expandBigQueryJobConfigurationLoadFieldDelimiter(original["field_delimiter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFieldDelimiter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFieldDelimiter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fieldDelimiter"] = transformedFieldDelimiter } transformedSkipLeadingRows, err := expandBigQueryJobConfigurationLoadSkipLeadingRows(original["skip_leading_rows"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSkipLeadingRows); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSkipLeadingRows); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["skipLeadingRows"] = transformedSkipLeadingRows } transformedEncoding, err := expandBigQueryJobConfigurationLoadEncoding(original["encoding"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["encoding"] = transformedEncoding } transformedQuote, err := expandBigQueryJobConfigurationLoadQuote(original["quote"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedQuote); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedQuote); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["quote"] = transformedQuote } transformedMaxBadRecords, err := expandBigQueryJobConfigurationLoadMaxBadRecords(original["max_bad_records"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxBadRecords); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxBadRecords); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxBadRecords"] = transformedMaxBadRecords } transformedAllowQuotedNewlines, err := expandBigQueryJobConfigurationLoadAllowQuotedNewlines(original["allow_quoted_newlines"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowQuotedNewlines); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowQuotedNewlines); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowQuotedNewlines"] = transformedAllowQuotedNewlines } transformedSourceFormat, err := expandBigQueryJobConfigurationLoadSourceFormat(original["source_format"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceFormat); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceFormat"] = transformedSourceFormat } transformedJsonExtension, err := expandBigQueryJobConfigurationLoadJsonExtension(original["json_extension"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedJsonExtension); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedJsonExtension); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["jsonExtension"] = transformedJsonExtension } transformedAllowJaggedRows, err := expandBigQueryJobConfigurationLoadAllowJaggedRows(original["allow_jagged_rows"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowJaggedRows); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowJaggedRows); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowJaggedRows"] = transformedAllowJaggedRows } transformedIgnoreUnknownValues, err := expandBigQueryJobConfigurationLoadIgnoreUnknownValues(original["ignore_unknown_values"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIgnoreUnknownValues); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIgnoreUnknownValues); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ignoreUnknownValues"] = transformedIgnoreUnknownValues } transformedProjectionFields, err := expandBigQueryJobConfigurationLoadProjectionFields(original["projection_fields"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProjectionFields); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProjectionFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectionFields"] = transformedProjectionFields } transformedAutodetect, err := expandBigQueryJobConfigurationLoadAutodetect(original["autodetect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAutodetect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAutodetect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["autodetect"] = transformedAutodetect } transformedSchemaUpdateOptions, err := expandBigQueryJobConfigurationLoadSchemaUpdateOptions(original["schema_update_options"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchemaUpdateOptions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchemaUpdateOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schemaUpdateOptions"] = transformedSchemaUpdateOptions } transformedTimePartitioning, err := expandBigQueryJobConfigurationLoadTimePartitioning(original["time_partitioning"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimePartitioning); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimePartitioning); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timePartitioning"] = transformedTimePartitioning } transformedDestinationEncryptionConfiguration, err := expandBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(original["destination_encryption_configuration"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationEncryptionConfiguration"] = transformedDestinationEncryptionConfiguration } + transformedParquetOptions, err := expandBigQueryJobConfigurationLoadParquetOptions(original["parquet_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedParquetOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["parquetOptions"] = transformedParquetOptions + } + return transformed, nil } -func expandBigQueryJobConfigurationLoadSourceUris(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadSourceUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadDestinationTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadDestinationTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2541,17 +2625,17 @@ func expandBigQueryJobConfigurationLoadDestinationTable(v interface{}, d Terrafo transformed := make(map[string]interface{}) transformedProjectId := original["project_id"] - if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedDatasetId := original["dataset_id"] - if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetId"] = transformedDatasetId } transformedTableId := original["table_id"] - if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tableId"] = transformedTableId } @@ -2564,71 +2648,71 @@ func expandBigQueryJobConfigurationLoadDestinationTable(v interface{}, d Terrafo return transformed, nil } -func expandBigQueryJobConfigurationLoadCreateDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadCreateDisposition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadWriteDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadWriteDisposition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadNullMarker(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadNullMarker(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadFieldDelimiter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadFieldDelimiter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadSkipLeadingRows(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadSkipLeadingRows(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadQuote(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadQuote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadMaxBadRecords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadMaxBadRecords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadAllowQuotedNewlines(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadAllowQuotedNewlines(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadSourceFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadSourceFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadJsonExtension(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadJsonExtension(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadAllowJaggedRows(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadAllowJaggedRows(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadIgnoreUnknownValues(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadIgnoreUnknownValues(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadProjectionFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadProjectionFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadAutodetect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadAutodetect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadSchemaUpdateOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadSchemaUpdateOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadTimePartitioning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadTimePartitioning(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2640,40 +2724,40 @@ func expandBigQueryJobConfigurationLoadTimePartitioning(v interface{}, d Terrafo transformedType, err := expandBigQueryJobConfigurationLoadTimePartitioningType(original["type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["type"] = transformedType } transformedExpirationMs, err := expandBigQueryJobConfigurationLoadTimePartitioningExpirationMs(original["expiration_ms"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExpirationMs); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExpirationMs); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["expirationMs"] = transformedExpirationMs } transformedField, err := expandBigQueryJobConfigurationLoadTimePartitioningField(original["field"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedField); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedField); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["field"] = transformedField } return transformed, nil } -func expandBigQueryJobConfigurationLoadTimePartitioningType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadTimePartitioningType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadTimePartitioningExpirationMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadTimePartitioningExpirationMs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadTimePartitioningField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadTimePartitioningField(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2685,29 +2769,63 @@ func expandBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(v inte transformedKmsKeyName, err := expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKeyName"] = transformedKmsKeyName } transformedKmsKeyVersion, err := expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyVersion(original["kms_key_version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKeyVersion"] = transformedKmsKeyVersion } return transformed, nil } -func expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryJobConfigurationLoadParquetOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnumAsString, err := expandBigQueryJobConfigurationLoadParquetOptionsEnumAsString(original["enum_as_string"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnumAsString); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enumAsString"] = transformedEnumAsString + } + + transformedEnableListInference, err := expandBigQueryJobConfigurationLoadParquetOptionsEnableListInference(original["enable_list_inference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableListInference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableListInference"] = transformedEnableListInference + } + + return transformed, nil +} + +func expandBigQueryJobConfigurationLoadParquetOptionsEnumAsString(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationLoadParquetOptionsEnableListInference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationCopy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationCopy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2719,42 +2837,42 @@ func expandBigQueryJobConfigurationCopy(v interface{}, d TerraformResourceData, transformedSourceTables, err := expandBigQueryJobConfigurationCopySourceTables(original["source_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceTables"] = transformedSourceTables } transformedDestinationTable, err := expandBigQueryJobConfigurationCopyDestinationTable(original["destination_table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationTable"] = transformedDestinationTable } transformedCreateDisposition, err := expandBigQueryJobConfigurationCopyCreateDisposition(original["create_disposition"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["createDisposition"] = transformedCreateDisposition } transformedWriteDisposition, err := expandBigQueryJobConfigurationCopyWriteDisposition(original["write_disposition"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["writeDisposition"] = transformedWriteDisposition } transformedDestinationEncryptionConfiguration, err := expandBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(original["destination_encryption_configuration"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationEncryptionConfiguration"] = transformedDestinationEncryptionConfiguration } return transformed, nil } -func expandBigQueryJobConfigurationCopySourceTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationCopySourceTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2765,17 +2883,17 @@ func expandBigQueryJobConfigurationCopySourceTables(v interface{}, d TerraformRe transformed := make(map[string]interface{}) transformedProjectId := original["project_id"] - if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedDatasetId := original["dataset_id"] - if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetId"] = transformedDatasetId } transformedTableId := original["table_id"] - if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tableId"] = transformedTableId } @@ -2791,7 +2909,7 @@ func expandBigQueryJobConfigurationCopySourceTables(v interface{}, d TerraformRe return req, nil } -func expandBigQueryJobConfigurationCopyDestinationTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationCopyDestinationTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2801,17 +2919,17 @@ func expandBigQueryJobConfigurationCopyDestinationTable(v interface{}, d Terrafo transformed := make(map[string]interface{}) transformedProjectId := original["project_id"] - if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedDatasetId := original["dataset_id"] - if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetId"] = transformedDatasetId } transformedTableId := original["table_id"] - if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tableId"] = transformedTableId } @@ -2824,15 +2942,15 @@ func expandBigQueryJobConfigurationCopyDestinationTable(v interface{}, d Terrafo return transformed, nil } -func expandBigQueryJobConfigurationCopyCreateDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationCopyCreateDisposition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationCopyWriteDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationCopyWriteDisposition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2844,29 +2962,29 @@ func expandBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(v inte transformedKmsKeyName, err := expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKeyName"] = transformedKmsKeyName } transformedKmsKeyVersion, err := expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyVersion(original["kms_key_version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKeyVersion"] = transformedKmsKeyVersion } return transformed, nil } -func expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtract(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtract(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2878,87 +2996,87 @@ func expandBigQueryJobConfigurationExtract(v interface{}, d TerraformResourceDat transformedDestinationUris, err := expandBigQueryJobConfigurationExtractDestinationUris(original["destination_uris"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationUris); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationUris"] = transformedDestinationUris } transformedPrintHeader, err := expandBigQueryJobConfigurationExtractPrintHeader(original["print_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrintHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrintHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["printHeader"] = transformedPrintHeader } transformedFieldDelimiter, err := expandBigQueryJobConfigurationExtractFieldDelimiter(original["field_delimiter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFieldDelimiter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFieldDelimiter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fieldDelimiter"] = transformedFieldDelimiter } transformedDestinationFormat, err := expandBigQueryJobConfigurationExtractDestinationFormat(original["destination_format"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationFormat); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationFormat"] = transformedDestinationFormat } transformedCompression, err := expandBigQueryJobConfigurationExtractCompression(original["compression"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCompression); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCompression); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["compression"] = transformedCompression } transformedUseAvroLogicalTypes, err := expandBigQueryJobConfigurationExtractUseAvroLogicalTypes(original["use_avro_logical_types"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUseAvroLogicalTypes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUseAvroLogicalTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["useAvroLogicalTypes"] = transformedUseAvroLogicalTypes } transformedSourceTable, err := expandBigQueryJobConfigurationExtractSourceTable(original["source_table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceTable"] = transformedSourceTable } transformedSourceModel, err := expandBigQueryJobConfigurationExtractSourceModel(original["source_model"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceModel); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceModel); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceModel"] = transformedSourceModel } return transformed, nil } -func expandBigQueryJobConfigurationExtractDestinationUris(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractDestinationUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtractPrintHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractPrintHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtractFieldDelimiter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractFieldDelimiter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtractDestinationFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractDestinationFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtractCompression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractCompression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtractUseAvroLogicalTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractUseAvroLogicalTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtractSourceTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractSourceTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2968,17 +3086,17 @@ func expandBigQueryJobConfigurationExtractSourceTable(v interface{}, d Terraform transformed := make(map[string]interface{}) transformedProjectId := original["project_id"] - if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedDatasetId := original["dataset_id"] - if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetId"] = transformedDatasetId } transformedTableId := original["table_id"] - if val := reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tableId"] = transformedTableId } @@ -2991,7 +3109,7 @@ func expandBigQueryJobConfigurationExtractSourceTable(v interface{}, d Terraform return transformed, nil } -func expandBigQueryJobConfigurationExtractSourceModel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractSourceModel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3003,68 +3121,68 @@ func expandBigQueryJobConfigurationExtractSourceModel(v interface{}, d Terraform transformedProjectId, err := expandBigQueryJobConfigurationExtractSourceModelProjectId(original["project_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedDatasetId, err := expandBigQueryJobConfigurationExtractSourceModelDatasetId(original["dataset_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetId"] = transformedDatasetId } transformedModelId, err := expandBigQueryJobConfigurationExtractSourceModelModelId(original["model_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedModelId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedModelId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["modelId"] = transformedModelId } return transformed, nil } -func expandBigQueryJobConfigurationExtractSourceModelProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractSourceModelProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtractSourceModelDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractSourceModelDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobConfigurationExtractSourceModelModelId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobConfigurationExtractSourceModelModelId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobJobReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobJobReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { transformed := make(map[string]interface{}) transformedJobId, err := expandBigQueryJobJobReferenceJobId(d.Get("job_id"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedJobId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedJobId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["jobId"] = transformedJobId } transformedLocation, err := expandBigQueryJobJobReferenceLocation(d.Get("location"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["location"] = transformedLocation } return transformed, nil } -func expandBigQueryJobJobReferenceJobId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobJobReferenceJobId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBigQueryJobJobReferenceLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBigQueryJobJobReferenceLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } func resourceBigQueryJobEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - project, err := getProject(d, meta.(*Config)) + project, err := tpgresource.GetProject(d, meta.(*transport_tpg.Config)) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_routine.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_routine.go new file mode 100644 index 0000000000..f80c78dd53 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_routine.go @@ -0,0 +1,849 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquery + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" +) + +func ResourceBigQueryRoutine() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryRoutineCreate, + Read: resourceBigQueryRoutineRead, + Update: resourceBigQueryRoutineUpdate, + Delete: resourceBigQueryRoutineDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigQueryRoutineImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "definition_body": { + Type: schema.TypeString, + Required: true, + Description: `The body of the routine. For functions, this is the expression in the AS clause. +If language=SQL, it is the substring inside (but excluding) the parentheses.`, + }, + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the dataset containing this routine`, + }, + "routine_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.`, + }, + + "arguments": { + Type: schema.TypeList, + Optional: true, + Description: `Input/output argument of a function or a stored procedure.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "argument_kind": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"FIXED_TYPE", "ANY_TYPE", ""}), + Description: `Defaults to FIXED_TYPE. Default value: "FIXED_TYPE" Possible values: ["FIXED_TYPE", "ANY_TYPE"]`, + Default: "FIXED_TYPE", + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. +~>**NOTE**: Because this field expects a JSON string, any changes to the string +will create a diff, even if the JSON itself hasn't changed. If the API returns +a different value for the same schema, e.g. it switched the order of values +or replaced STRUCT field type with RECORD field type, we currently cannot +suppress the recurring diff this causes. As a workaround, we recommend using +the schema as returned by the API.`, + }, + "mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"IN", "OUT", "INOUT", ""}), + Description: `Specifies whether the argument is input or output. Can be set for procedures only. Possible values: ["IN", "OUT", "INOUT"]`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of this argument. Can be absent for function return argument.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The description of the routine if defined.`, + }, + "determinism_level": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DETERMINISM_LEVEL_UNSPECIFIED", "DETERMINISTIC", "NOT_DETERMINISTIC", ""}), + Description: `The determinism level of the JavaScript UDF if defined. Possible values: ["DETERMINISM_LEVEL_UNSPECIFIED", "DETERMINISTIC", "NOT_DETERMINISTIC"]`, + }, + "imported_libraries": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. If language = "JAVASCRIPT", this field stores the path of the +imported JAVASCRIPT libraries.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "language": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"SQL", "JAVASCRIPT", ""}), + Description: `The language of the routine. Possible values: ["SQL", "JAVASCRIPT"]`, + }, + "return_table_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". + +If absent, the return table type is inferred from definitionBody at query time in each query +that references this routine. If present, then the columns in the evaluated table result will +be cast to match the column types specificed in return table type, at query time.`, + }, + "return_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `A JSON schema for the return type. Optional if language = "SQL"; required otherwise. +If absent, the return type is inferred from definitionBody at query time in each query +that references this routine. If present, then the evaluated result will be cast to +the specified returned type at query time. ~>**NOTE**: Because this field expects a JSON +string, any changes to the string will create a diff, even if the JSON itself hasn't +changed. If the API returns a different value for the same schema, e.g. it switche +d the order of values or replaced STRUCT field type with RECORD field type, we currently +cannot suppress the recurring diff this causes. As a workaround, we recommend using +the schema as returned by the API.`, + }, + "routine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"SCALAR_FUNCTION", "PROCEDURE", "TABLE_VALUED_FUNCTION", ""}), + Description: `The type of routine. Possible values: ["SCALAR_FUNCTION", "PROCEDURE", "TABLE_VALUED_FUNCTION"]`, + }, + "creation_time": { + Type: schema.TypeInt, + Computed: true, + Description: `The time when this routine was created, in milliseconds since the +epoch.`, + }, + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + Description: `The time when this routine was modified, in milliseconds since the +epoch.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigQueryRoutineCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + routineReferenceProp, err := expandBigQueryRoutineRoutineReference(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(routineReferenceProp)) { + obj["routineReference"] = routineReferenceProp + } + routineTypeProp, err := expandBigQueryRoutineRoutineType(d.Get("routine_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("routine_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(routineTypeProp)) && (ok || !reflect.DeepEqual(v, routineTypeProp)) { + obj["routineType"] = routineTypeProp + } + languageProp, err := expandBigQueryRoutineLanguage(d.Get("language"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("language"); !tpgresource.IsEmptyValue(reflect.ValueOf(languageProp)) && (ok || !reflect.DeepEqual(v, languageProp)) { + obj["language"] = languageProp + } + argumentsProp, err := expandBigQueryRoutineArguments(d.Get("arguments"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("arguments"); !tpgresource.IsEmptyValue(reflect.ValueOf(argumentsProp)) && (ok || !reflect.DeepEqual(v, argumentsProp)) { + obj["arguments"] = argumentsProp + } + returnTypeProp, err := expandBigQueryRoutineReturnType(d.Get("return_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("return_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(returnTypeProp)) && (ok || !reflect.DeepEqual(v, returnTypeProp)) { + obj["returnType"] = returnTypeProp + } + returnTableTypeProp, err := expandBigQueryRoutineReturnTableType(d.Get("return_table_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("return_table_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(returnTableTypeProp)) && (ok || !reflect.DeepEqual(v, returnTableTypeProp)) { + obj["returnTableType"] = returnTableTypeProp + } + importedLibrariesProp, err := expandBigQueryRoutineImportedLibraries(d.Get("imported_libraries"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("imported_libraries"); !tpgresource.IsEmptyValue(reflect.ValueOf(importedLibrariesProp)) && (ok || !reflect.DeepEqual(v, importedLibrariesProp)) { + obj["importedLibraries"] = importedLibrariesProp + } + definitionBodyProp, err := expandBigQueryRoutineDefinitionBody(d.Get("definition_body"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("definition_body"); !tpgresource.IsEmptyValue(reflect.ValueOf(definitionBodyProp)) && (ok || !reflect.DeepEqual(v, definitionBodyProp)) { + obj["definitionBody"] = definitionBodyProp + } + descriptionProp, err := expandBigQueryRoutineDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + determinismLevelProp, err := expandBigQueryRoutineDeterminismLevel(d.Get("determinism_level"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("determinism_level"); !tpgresource.IsEmptyValue(reflect.ValueOf(determinismLevelProp)) && (ok || !reflect.DeepEqual(v, determinismLevelProp)) { + obj["determinismLevel"] = determinismLevelProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Routine: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Routine: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Routine: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Routine %q: %#v", d.Id(), res) + + return resourceBigQueryRoutineRead(d, meta) +} + +func resourceBigQueryRoutineRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Routine: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQueryRoutine %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + + // Terraform must set the top level schema field, but since this object contains collapsed properties + // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. + if flattenedProp := flattenBigQueryRoutineRoutineReference(res["routineReference"], d, config); flattenedProp != nil { + if gerr, ok := flattenedProp.(*googleapi.Error); ok { + return fmt.Errorf("Error reading Routine: %s", gerr) + } + casted := flattenedProp.([]interface{})[0] + if casted != nil { + for k, v := range casted.(map[string]interface{}) { + if err := d.Set(k, v); err != nil { + return fmt.Errorf("Error setting %s: %s", k, err) + } + } + } + } + if err := d.Set("routine_type", flattenBigQueryRoutineRoutineType(res["routineType"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("creation_time", flattenBigQueryRoutineCreationTime(res["creationTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("last_modified_time", flattenBigQueryRoutineLastModifiedTime(res["lastModifiedTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("language", flattenBigQueryRoutineLanguage(res["language"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("arguments", flattenBigQueryRoutineArguments(res["arguments"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("return_type", flattenBigQueryRoutineReturnType(res["returnType"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("return_table_type", flattenBigQueryRoutineReturnTableType(res["returnTableType"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("imported_libraries", flattenBigQueryRoutineImportedLibraries(res["importedLibraries"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("definition_body", flattenBigQueryRoutineDefinitionBody(res["definitionBody"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("description", flattenBigQueryRoutineDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + if err := d.Set("determinism_level", flattenBigQueryRoutineDeterminismLevel(res["determinismLevel"], d, config)); err != nil { + return fmt.Errorf("Error reading Routine: %s", err) + } + + return nil +} + +func resourceBigQueryRoutineUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Routine: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + routineReferenceProp, err := expandBigQueryRoutineRoutineReference(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(routineReferenceProp)) { + obj["routineReference"] = routineReferenceProp + } + routineTypeProp, err := expandBigQueryRoutineRoutineType(d.Get("routine_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("routine_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routineTypeProp)) { + obj["routineType"] = routineTypeProp + } + languageProp, err := expandBigQueryRoutineLanguage(d.Get("language"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("language"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, languageProp)) { + obj["language"] = languageProp + } + argumentsProp, err := expandBigQueryRoutineArguments(d.Get("arguments"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("arguments"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, argumentsProp)) { + obj["arguments"] = argumentsProp + } + returnTypeProp, err := expandBigQueryRoutineReturnType(d.Get("return_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("return_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, returnTypeProp)) { + obj["returnType"] = returnTypeProp + } + returnTableTypeProp, err := expandBigQueryRoutineReturnTableType(d.Get("return_table_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("return_table_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, returnTableTypeProp)) { + obj["returnTableType"] = returnTableTypeProp + } + importedLibrariesProp, err := expandBigQueryRoutineImportedLibraries(d.Get("imported_libraries"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("imported_libraries"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, importedLibrariesProp)) { + obj["importedLibraries"] = importedLibrariesProp + } + definitionBodyProp, err := expandBigQueryRoutineDefinitionBody(d.Get("definition_body"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("definition_body"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, definitionBodyProp)) { + obj["definitionBody"] = definitionBodyProp + } + descriptionProp, err := expandBigQueryRoutineDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + determinismLevelProp, err := expandBigQueryRoutineDeterminismLevel(d.Get("determinism_level"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("determinism_level"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, determinismLevelProp)) { + obj["determinismLevel"] = determinismLevelProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Routine %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Routine %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Routine %q: %#v", d.Id(), res) + } + + return resourceBigQueryRoutineRead(d, meta) +} + +func resourceBigQueryRoutineDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Routine: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Routine %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Routine") + } + + log.Printf("[DEBUG] Finished deleting Routine %q: %#v", d.Id(), res) + return nil +} + +func resourceBigQueryRoutineImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/datasets/(?P[^/]+)/routines/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBigQueryRoutineRoutineReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenBigQueryRoutineRoutineReferenceDatasetId(original["datasetId"], d, config) + transformed["routine_id"] = + flattenBigQueryRoutineRoutineReferenceRoutineId(original["routineId"], d, config) + return []interface{}{transformed} +} +func flattenBigQueryRoutineRoutineReferenceDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineRoutineReferenceRoutineId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineRoutineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineCreationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigQueryRoutineLastModifiedTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigQueryRoutineLanguage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineArguments(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenBigQueryRoutineArgumentsName(original["name"], d, config), + "argument_kind": flattenBigQueryRoutineArgumentsArgumentKind(original["argumentKind"], d, config), + "mode": flattenBigQueryRoutineArgumentsMode(original["mode"], d, config), + "data_type": flattenBigQueryRoutineArgumentsDataType(original["dataType"], d, config), + }) + } + return transformed +} +func flattenBigQueryRoutineArgumentsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineArgumentsArgumentKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineArgumentsMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineArgumentsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + b, err := json.Marshal(v) + if err != nil { + // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. + log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) + } + return string(b) +} + +func flattenBigQueryRoutineReturnType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + b, err := json.Marshal(v) + if err != nil { + // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. + log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) + } + return string(b) +} + +func flattenBigQueryRoutineReturnTableType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + b, err := json.Marshal(v) + if err != nil { + // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. + log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) + } + return string(b) +} + +func flattenBigQueryRoutineImportedLibraries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineDefinitionBody(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigQueryRoutineDeterminismLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBigQueryRoutineRoutineReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + + transformed := make(map[string]interface{}) + transformed["datasetId"] = d.Get("dataset_id") + project, _ := tpgresource.GetProject(d, config) + transformed["projectId"] = project + transformed["routineId"] = d.Get("routine_id") + + return transformed, nil +} + +func expandBigQueryRoutineRoutineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineLanguage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineArguments(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandBigQueryRoutineArgumentsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedArgumentKind, err := expandBigQueryRoutineArgumentsArgumentKind(original["argument_kind"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgumentKind); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["argumentKind"] = transformedArgumentKind + } + + transformedMode, err := expandBigQueryRoutineArgumentsMode(original["mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mode"] = transformedMode + } + + transformedDataType, err := expandBigQueryRoutineArgumentsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataType"] = transformedDataType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandBigQueryRoutineArgumentsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineArgumentsArgumentKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineArgumentsMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineArgumentsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + b := []byte(v.(string)) + if len(b) == 0 { + return nil, nil + } + m := make(map[string]interface{}) + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return m, nil +} + +func expandBigQueryRoutineReturnType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + b := []byte(v.(string)) + if len(b) == 0 { + return nil, nil + } + m := make(map[string]interface{}) + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return m, nil +} + +func expandBigQueryRoutineReturnTableType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + b := []byte(v.(string)) + if len(b) == 0 { + return nil, nil + } + m := make(map[string]interface{}) + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return m, nil +} + +func expandBigQueryRoutineImportedLibraries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineDefinitionBody(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigQueryRoutineDeterminismLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_routine_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_routine_sweeper.go new file mode 100644 index 0000000000..f5c7b06593 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_routine_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquery + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BigQueryRoutine", testSweepBigQueryRoutine) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBigQueryRoutine(region string) error { + resourceName := "BigQueryRoutine" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://bigquery.googleapis.com/bigquery/v2/projects/{{project}}/datasets/{{dataset_id}}/routines", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["routines"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://bigquery.googleapis.com/bigquery/v2/projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_table.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_table.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_table.go index a194a72c2a..3c979ec31f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_table.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquery/resource_bigquery_table.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery import ( "context" @@ -16,6 +18,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/bigquery/v2" ) @@ -179,7 +184,7 @@ func bigQueryTableConnectionIdSuppress(name, old, new string, _ *schema.Resource // "projects/{{project}}/locations/{{location}}/connections/{{connection_id}}". // but always returns "{{project}}.{{location}}.{{connection_id}}" - if isEmptyValue(reflect.ValueOf(old)) || isEmptyValue(reflect.ValueOf(new)) { + if tpgresource.IsEmptyValue(reflect.ValueOf(old)) || tpgresource.IsEmptyValue(reflect.ValueOf(new)) { return false } @@ -320,7 +325,7 @@ func resourceBigQueryTableSchemaIsChangeable(old, new interface{}) (bool, error) } } -func resourceBigQueryTableSchemaCustomizeDiffFunc(d TerraformResourceDiff) error { +func resourceBigQueryTableSchemaCustomizeDiffFunc(d tpgresource.TerraformResourceDiff) error { if _, hasSchema := d.GetOk("schema"); hasSchema { oldSchema, newSchema := d.GetChange("schema") oldSchemaText := oldSchema.(string) @@ -439,10 +444,10 @@ func ResourceBigQueryTable() *schema.Resource { // SourceFormat [Required] The data format. "source_format": { Type: schema.TypeString, - Required: true, - Description: `The data format. Supported values are: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "PARQUET", "ORC" and "DATASTORE_BACKUP". To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, + Optional: true, + Description: ` Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) for supported formats. To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, ValidateFunc: validation.StringInSlice([]string{ - "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", + "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "ICEBERG", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", }, false), }, // SourceURIs [Required] The fully-qualified URIs that point to your data in Google Cloud. @@ -462,7 +467,7 @@ func ResourceBigQueryTable() *schema.Resource { }, // Schema: Optional] The schema for the data. // Schema is required for CSV and JSON formats if autodetect is not on. - // Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, ORC and Parquet formats. + // Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC, and Parquet formats. "schema": { Type: schema.TypeString, Optional: true, @@ -535,6 +540,45 @@ func ResourceBigQueryTable() *schema.Resource { }, }, }, + // jsonOptions: [Optional] Additional properties to set if sourceFormat is set to JSON. + "json_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional properties to set if sourceFormat is set to JSON."`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "encoding": { + Type: schema.TypeString, + Optional: true, + Default: "UTF-8", + ValidateFunc: validation.StringInSlice([]string{"UTF-8", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE"}, false), + Description: `The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.`, + }, + }, + }, + }, + + "parquet_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional properties to set if sourceFormat is set to PARQUET."`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enum_as_string": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.`, + }, + "enable_list_inference": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether to use schema inference specifically for Parquet LIST logical type.`, + }, + }, + }, + }, // GoogleSheetsOptions: [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS. "google_sheets_options": { Type: schema.TypeList, @@ -655,6 +699,18 @@ func ResourceBigQueryTable() *schema.Resource { Optional: true, Description: `When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.`, }, + "metadata_cache_mode": { + Type: schema.TypeString, + Optional: true, + Description: `Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.`, + ValidateFunc: validation.StringInSlice([]string{"AUTOMATIC", "MANUAL"}, false), + }, + "object_metadata": { + Type: schema.TypeString, + Optional: true, + Description: `Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If ObjectMetadata is set, sourceFormat should be omitted.`, + ConflictsWith: []string{"external_data_configuration.0.source_format"}, + }, }, }, }, @@ -980,9 +1036,9 @@ func ResourceBigQueryTable() *schema.Resource { } func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } @@ -1063,7 +1119,7 @@ func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, e if v, ok := d.GetOk("clustering"); ok { table.Clustering = &bigquery.Clustering{ - Fields: convertStringArr(v.([]interface{})), + Fields: tpgresource.ConvertStringArr(v.([]interface{})), ForceSendFields: []string{"Fields"}, } } @@ -1072,13 +1128,13 @@ func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, e } func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1129,15 +1185,15 @@ func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error } func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1147,7 +1203,7 @@ func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { res, err := config.NewBigQueryClient(userAgent).Tables.Get(project, datasetID, tableID).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) } if err := d.Set("project", project); err != nil { @@ -1280,8 +1336,8 @@ func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { } func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -1293,7 +1349,7 @@ func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1312,15 +1368,15 @@ func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error if d.Get("deletion_protection").(bool) { return fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`") } - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1355,9 +1411,13 @@ func expandExternalDataConfiguration(cfg interface{}) (*bigquery.ExternalDataCon if v, ok := raw["compression"]; ok { edc.Compression = v.(string) } + if v, ok := raw["csv_options"]; ok { edc.CsvOptions = expandCsvOptions(v) } + if v, ok := raw["json_options"]; ok { + edc.JsonOptions = expandJsonOptions(v) + } if v, ok := raw["google_sheets_options"]; ok { edc.GoogleSheetsOptions = expandGoogleSheetsOptions(v) } @@ -1367,6 +1427,10 @@ func expandExternalDataConfiguration(cfg interface{}) (*bigquery.ExternalDataCon if v, ok := raw["avro_options"]; ok { edc.AvroOptions = expandAvroOptions(v) } + if v, ok := raw["parquet_options"]; ok { + edc.ParquetOptions = expandParquetOptions(v) + } + if v, ok := raw["ignore_unknown_values"]; ok { edc.IgnoreUnknownValues = v.(bool) } @@ -1389,6 +1453,12 @@ func expandExternalDataConfiguration(cfg interface{}) (*bigquery.ExternalDataCon if v, ok := raw["reference_file_schema_uri"]; ok { edc.ReferenceFileSchemaUri = v.(string) } + if v, ok := raw["metadata_cache_mode"]; ok { + edc.MetadataCacheMode = v.(string) + } + if v, ok := raw["object_metadata"]; ok { + edc.ObjectMetadata = v.(string) + } return edc, nil @@ -1420,6 +1490,14 @@ func flattenExternalDataConfiguration(edc *bigquery.ExternalDataConfiguration) ( result["avro_options"] = flattenAvroOptions(edc.AvroOptions) } + if edc.ParquetOptions != nil { + result["parquet_options"] = flattenParquetOptions(edc.ParquetOptions) + } + + if edc.JsonOptions != nil { + result["json_options"] = flattenJsonOptions(edc.JsonOptions) + } + if edc.IgnoreUnknownValues { result["ignore_unknown_values"] = edc.IgnoreUnknownValues } @@ -1438,6 +1516,13 @@ func flattenExternalDataConfiguration(edc *bigquery.ExternalDataConfiguration) ( if edc.ReferenceFileSchemaUri != "" { result["reference_file_schema_uri"] = edc.ReferenceFileSchemaUri } + if edc.MetadataCacheMode != "" { + result["metadata_cache_mode"] = edc.MetadataCacheMode + } + + if edc.ObjectMetadata != "" { + result["object_metadata"] = edc.ObjectMetadata + } return []map[string]interface{}{result}, nil } @@ -1610,6 +1695,64 @@ func flattenAvroOptions(opts *bigquery.AvroOptions) []map[string]interface{} { return []map[string]interface{}{result} } +func expandParquetOptions(configured interface{}) *bigquery.ParquetOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.ParquetOptions{} + + if v, ok := raw["enum_as_string"]; ok { + opts.EnumAsString = v.(bool) + } + + if v, ok := raw["enable_list_inference"]; ok { + opts.EnableListInference = v.(bool) + } + + return opts +} + +func flattenParquetOptions(opts *bigquery.ParquetOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.EnumAsString { + result["enum_as_string"] = opts.EnumAsString + } + + if opts.EnableListInference { + result["enable_list_inference"] = opts.EnableListInference + } + + return []map[string]interface{}{result} +} + +func expandJsonOptions(configured interface{}) *bigquery.JsonOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.JsonOptions{} + + if v, ok := raw["encoding"]; ok { + opts.Encoding = v.(string) + } + + return opts +} + +func flattenJsonOptions(opts *bigquery.JsonOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.Encoding != "" { + result["encoding"] = opts.Encoding + } + + return []map[string]interface{}{result} +} + func expandSchema(raw interface{}) (*bigquery.TableSchema, error) { var fields []*bigquery.TableFieldSchema @@ -1780,8 +1923,8 @@ func flattenMaterializedView(mvd *bigquery.MaterializedViewDefinition) []map[str } func resourceBigQueryTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -1795,7 +1938,7 @@ func resourceBigQueryTableImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/iam_bigquery_analytics_hub_data_exchange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/iam_bigquery_analytics_hub_data_exchange.go new file mode 100644 index 0000000000..dad175a7ac --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/iam_bigquery_analytics_hub_data_exchange.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryanalyticshub + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var BigqueryAnalyticsHubDataExchangeIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "data_exchange_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type BigqueryAnalyticsHubDataExchangeIamUpdater struct { + project string + location string + dataExchangeId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func BigqueryAnalyticsHubDataExchangeIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("data_exchange_id"); ok { + values["data_exchange_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("data_exchange_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryAnalyticsHubDataExchangeIamUpdater{ + project: values["project"], + location: values["location"], + dataExchangeId: values["data_exchange_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("data_exchange_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting data_exchange_id: %s", err) + } + + return u, nil +} + +func BigqueryAnalyticsHubDataExchangeIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryAnalyticsHubDataExchangeIamUpdater{ + project: values["project"], + location: values["location"], + dataExchangeId: values["data_exchange_id"], + d: d, + Config: config, + } + if err := d.Set("data_exchange_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting data_exchange_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyDataExchangeUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyDataExchangeUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) qualifyDataExchangeUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{BigqueryAnalyticsHubBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/dataExchanges/%s", u.project, u.location, u.dataExchangeId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/dataExchanges/%s", u.project, u.location, u.dataExchangeId) +} + +func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigqueryanalyticshub-dataexchange-%s", u.GetResourceId()) +} + +func (u *BigqueryAnalyticsHubDataExchangeIamUpdater) DescribeResource() string { + return fmt.Sprintf("bigqueryanalyticshub dataexchange %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/iam_bigquery_analytics_hub_listing.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/iam_bigquery_analytics_hub_listing.go new file mode 100644 index 0000000000..8f10ec43ff --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/iam_bigquery_analytics_hub_listing.go @@ -0,0 +1,260 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryanalyticshub + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var BigqueryAnalyticsHubListingIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "data_exchange_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "listing_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type BigqueryAnalyticsHubListingIamUpdater struct { + project string + location string + dataExchangeId string + listingId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func BigqueryAnalyticsHubListingIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("data_exchange_id"); ok { + values["data_exchange_id"] = v.(string) + } + + if v, ok := d.GetOk("listing_id"); ok { + values["listing_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)/listings/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("listing_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryAnalyticsHubListingIamUpdater{ + project: values["project"], + location: values["location"], + dataExchangeId: values["data_exchange_id"], + listingId: values["listing_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("data_exchange_id", u.dataExchangeId); err != nil { + return nil, fmt.Errorf("Error setting data_exchange_id: %s", err) + } + if err := d.Set("listing_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting listing_id: %s", err) + } + + return u, nil +} + +func BigqueryAnalyticsHubListingIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)/listings/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryAnalyticsHubListingIamUpdater{ + project: values["project"], + location: values["location"], + dataExchangeId: values["data_exchange_id"], + listingId: values["listing_id"], + d: d, + Config: config, + } + if err := d.Set("listing_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting listing_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *BigqueryAnalyticsHubListingIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyListingUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *BigqueryAnalyticsHubListingIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyListingUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigqueryAnalyticsHubListingIamUpdater) qualifyListingUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{BigqueryAnalyticsHubBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/dataExchanges/%s/listings/%s", u.project, u.location, u.dataExchangeId, u.listingId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *BigqueryAnalyticsHubListingIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/dataExchanges/%s/listings/%s", u.project, u.location, u.dataExchangeId, u.listingId) +} + +func (u *BigqueryAnalyticsHubListingIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigqueryanalyticshub-listing-%s", u.GetResourceId()) +} + +func (u *BigqueryAnalyticsHubListingIamUpdater) DescribeResource() string { + return fmt.Sprintf("bigqueryanalyticshub listing %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange.go new file mode 100644 index 0000000000..cb9a8de3de --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange.go @@ -0,0 +1,491 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryanalyticshub + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceBigqueryAnalyticsHubDataExchange() *schema.Resource { + return &schema.Resource{ + Create: resourceBigqueryAnalyticsHubDataExchangeCreate, + Read: resourceBigqueryAnalyticsHubDataExchangeRead, + Update: resourceBigqueryAnalyticsHubDataExchangeUpdate, + Delete: resourceBigqueryAnalyticsHubDataExchangeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigqueryAnalyticsHubDataExchangeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "data_exchange_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the data exchange. Must contain only Unicode letters, numbers (0-9), underscores (_). Should not use characters that require URL-escaping, or characters outside of ASCII, spaces.`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `Human-readable display name of the data exchange. The display name must contain only Unicode letters, numbers (0-9), underscores (_), dashes (-), spaces ( ), and must not start or end with spaces.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location this data exchange.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the data exchange.`, + }, + "documentation": { + Type: schema.TypeString, + Optional: true, + Description: `Documentation describing the data exchange.`, + }, + "icon": { + Type: schema.TypeString, + Optional: true, + Description: `Base64 encoded image representing the data exchange.`, + }, + "primary_contact": { + Type: schema.TypeString, + Optional: true, + Description: `Email or URL of the primary point of contact of the data exchange.`, + }, + "listing_count": { + Type: schema.TypeInt, + Computed: true, + Description: `Number of listings contained in the data exchange.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the data exchange, for example: +"projects/myproject/locations/US/dataExchanges/123"`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigqueryAnalyticsHubDataExchangeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandBigqueryAnalyticsHubDataExchangeDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandBigqueryAnalyticsHubDataExchangeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + primaryContactProp, err := expandBigqueryAnalyticsHubDataExchangePrimaryContact(d.Get("primary_contact"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("primary_contact"); !tpgresource.IsEmptyValue(reflect.ValueOf(primaryContactProp)) && (ok || !reflect.DeepEqual(v, primaryContactProp)) { + obj["primaryContact"] = primaryContactProp + } + documentationProp, err := expandBigqueryAnalyticsHubDataExchangeDocumentation(d.Get("documentation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("documentation"); !tpgresource.IsEmptyValue(reflect.ValueOf(documentationProp)) && (ok || !reflect.DeepEqual(v, documentationProp)) { + obj["documentation"] = documentationProp + } + iconProp, err := expandBigqueryAnalyticsHubDataExchangeIcon(d.Get("icon"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("icon"); !tpgresource.IsEmptyValue(reflect.ValueOf(iconProp)) && (ok || !reflect.DeepEqual(v, iconProp)) { + obj["icon"] = iconProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges?data_exchange_id={{data_exchange_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DataExchange: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DataExchange: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DataExchange: %s", err) + } + if err := d.Set("name", flattenBigqueryAnalyticsHubDataExchangeName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DataExchange %q: %#v", d.Id(), res) + + return resourceBigqueryAnalyticsHubDataExchangeRead(d, meta) +} + +func resourceBigqueryAnalyticsHubDataExchangeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DataExchange: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigqueryAnalyticsHubDataExchange %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } + + if err := d.Set("name", flattenBigqueryAnalyticsHubDataExchangeName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } + if err := d.Set("display_name", flattenBigqueryAnalyticsHubDataExchangeDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } + if err := d.Set("description", flattenBigqueryAnalyticsHubDataExchangeDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } + if err := d.Set("primary_contact", flattenBigqueryAnalyticsHubDataExchangePrimaryContact(res["primaryContact"], d, config)); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } + if err := d.Set("documentation", flattenBigqueryAnalyticsHubDataExchangeDocumentation(res["documentation"], d, config)); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } + if err := d.Set("listing_count", flattenBigqueryAnalyticsHubDataExchangeListingCount(res["listingCount"], d, config)); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } + if err := d.Set("icon", flattenBigqueryAnalyticsHubDataExchangeIcon(res["icon"], d, config)); err != nil { + return fmt.Errorf("Error reading DataExchange: %s", err) + } + + return nil +} + +func resourceBigqueryAnalyticsHubDataExchangeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DataExchange: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandBigqueryAnalyticsHubDataExchangeDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandBigqueryAnalyticsHubDataExchangeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + primaryContactProp, err := expandBigqueryAnalyticsHubDataExchangePrimaryContact(d.Get("primary_contact"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("primary_contact"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, primaryContactProp)) { + obj["primaryContact"] = primaryContactProp + } + documentationProp, err := expandBigqueryAnalyticsHubDataExchangeDocumentation(d.Get("documentation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("documentation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, documentationProp)) { + obj["documentation"] = documentationProp + } + iconProp, err := expandBigqueryAnalyticsHubDataExchangeIcon(d.Get("icon"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("icon"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, iconProp)) { + obj["icon"] = iconProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DataExchange %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("primary_contact") { + updateMask = append(updateMask, "primaryContact") + } + + if d.HasChange("documentation") { + updateMask = append(updateMask, "documentation") + } + + if d.HasChange("icon") { + updateMask = append(updateMask, "icon") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DataExchange %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DataExchange %q: %#v", d.Id(), res) + } + + return resourceBigqueryAnalyticsHubDataExchangeRead(d, meta) +} + +func resourceBigqueryAnalyticsHubDataExchangeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DataExchange: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DataExchange %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DataExchange") + } + + log.Printf("[DEBUG] Finished deleting DataExchange %q: %#v", d.Id(), res) + return nil +} + +func resourceBigqueryAnalyticsHubDataExchangeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBigqueryAnalyticsHubDataExchangeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubDataExchangeDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubDataExchangeDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubDataExchangePrimaryContact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubDataExchangeDocumentation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubDataExchangeListingCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigqueryAnalyticsHubDataExchangeIcon(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBigqueryAnalyticsHubDataExchangeDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubDataExchangeDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubDataExchangePrimaryContact(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubDataExchangeDocumentation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubDataExchangeIcon(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange_sweeper.go new file mode 100644 index 0000000000..f201f2cda2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_data_exchange_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryanalyticshub + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BigqueryAnalyticsHubDataExchange", testSweepBigqueryAnalyticsHubDataExchange) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBigqueryAnalyticsHubDataExchange(region string) error { + resourceName := "BigqueryAnalyticsHubDataExchange" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://analyticshub.googleapis.com/v1/projects/{{project}}/locations/{{location}}/dataExchanges", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["dataExchanges"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://analyticshub.googleapis.com/v1/projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_listing.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_listing.go new file mode 100644 index 0000000000..f33262c3cd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub/resource_bigquery_analytics_hub_listing.go @@ -0,0 +1,804 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryanalyticshub + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceBigqueryAnalyticsHubListing() *schema.Resource { + return &schema.Resource{ + Create: resourceBigqueryAnalyticsHubListingCreate, + Read: resourceBigqueryAnalyticsHubListingRead, + Update: resourceBigqueryAnalyticsHubListingUpdate, + Delete: resourceBigqueryAnalyticsHubListingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigqueryAnalyticsHubListingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "bigquery_dataset": { + Type: schema.TypeList, + Required: true, + Description: `Shared dataset i.e. BigQuery dataset source.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123`, + }, + }, + }, + }, + "data_exchange_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the data exchange. Must contain only Unicode letters, numbers (0-9), underscores (_). Should not use characters that require URL-escaping, or characters outside of ASCII, spaces.`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `Human-readable display name of the listing. The display name must contain only Unicode letters, numbers (0-9), underscores (_), dashes (-), spaces ( ), ampersands (&) and can't start or end with spaces.`, + }, + "listing_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the listing. Must contain only Unicode letters, numbers (0-9), underscores (_). Should not use characters that require URL-escaping, or characters outside of ASCII, spaces.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location this data exchange listing.`, + }, + "categories": { + Type: schema.TypeList, + Optional: true, + Description: `Categories of the listing. Up to two categories are allowed.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "data_provider": { + Type: schema.TypeList, + Optional: true, + Description: `Details of the data provider who owns the source data.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the data provider.`, + }, + "primary_contact": { + Type: schema.TypeString, + Optional: true, + Description: `Email or URL of the data provider.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Short description of the listing. The description must not contain Unicode non-characters and C0 and C1 control codes except tabs (HT), new lines (LF), carriage returns (CR), and page breaks (FF).`, + }, + "documentation": { + Type: schema.TypeString, + Optional: true, + Description: `Documentation describing the listing.`, + }, + "icon": { + Type: schema.TypeString, + Optional: true, + Description: `Base64 encoded image representing the listing.`, + }, + "primary_contact": { + Type: schema.TypeString, + Optional: true, + Description: `Email or URL of the primary point of contact of the listing.`, + }, + "publisher": { + Type: schema.TypeList, + Optional: true, + Description: `Details of the publisher who owns the listing and who can share the source data.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the listing publisher.`, + }, + "primary_contact": { + Type: schema.TypeString, + Optional: true, + Description: `Email or URL of the listing publisher.`, + }, + }, + }, + }, + "request_access": { + Type: schema.TypeString, + Optional: true, + Description: `Email or URL of the request access of the listing. Subscribers can use this reference to request access.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the listing. e.g. "projects/myproject/locations/US/dataExchanges/123/listings/456"`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigqueryAnalyticsHubListingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandBigqueryAnalyticsHubListingDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandBigqueryAnalyticsHubListingDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + primaryContactProp, err := expandBigqueryAnalyticsHubListingPrimaryContact(d.Get("primary_contact"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("primary_contact"); !tpgresource.IsEmptyValue(reflect.ValueOf(primaryContactProp)) && (ok || !reflect.DeepEqual(v, primaryContactProp)) { + obj["primaryContact"] = primaryContactProp + } + documentationProp, err := expandBigqueryAnalyticsHubListingDocumentation(d.Get("documentation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("documentation"); !tpgresource.IsEmptyValue(reflect.ValueOf(documentationProp)) && (ok || !reflect.DeepEqual(v, documentationProp)) { + obj["documentation"] = documentationProp + } + iconProp, err := expandBigqueryAnalyticsHubListingIcon(d.Get("icon"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("icon"); !tpgresource.IsEmptyValue(reflect.ValueOf(iconProp)) && (ok || !reflect.DeepEqual(v, iconProp)) { + obj["icon"] = iconProp + } + requestAccessProp, err := expandBigqueryAnalyticsHubListingRequestAccess(d.Get("request_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("request_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(requestAccessProp)) && (ok || !reflect.DeepEqual(v, requestAccessProp)) { + obj["requestAccess"] = requestAccessProp + } + dataProviderProp, err := expandBigqueryAnalyticsHubListingDataProvider(d.Get("data_provider"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_provider"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataProviderProp)) && (ok || !reflect.DeepEqual(v, dataProviderProp)) { + obj["dataProvider"] = dataProviderProp + } + publisherProp, err := expandBigqueryAnalyticsHubListingPublisher(d.Get("publisher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("publisher"); !tpgresource.IsEmptyValue(reflect.ValueOf(publisherProp)) && (ok || !reflect.DeepEqual(v, publisherProp)) { + obj["publisher"] = publisherProp + } + categoriesProp, err := expandBigqueryAnalyticsHubListingCategories(d.Get("categories"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("categories"); !tpgresource.IsEmptyValue(reflect.ValueOf(categoriesProp)) && (ok || !reflect.DeepEqual(v, categoriesProp)) { + obj["categories"] = categoriesProp + } + bigqueryDatasetProp, err := expandBigqueryAnalyticsHubListingBigqueryDataset(d.Get("bigquery_dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bigquery_dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(bigqueryDatasetProp)) && (ok || !reflect.DeepEqual(v, bigqueryDatasetProp)) { + obj["bigqueryDataset"] = bigqueryDatasetProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings?listing_id={{listing_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Listing: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Listing: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Listing: %s", err) + } + if err := d.Set("name", flattenBigqueryAnalyticsHubListingName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Listing %q: %#v", d.Id(), res) + + return resourceBigqueryAnalyticsHubListingRead(d, meta) +} + +func resourceBigqueryAnalyticsHubListingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Listing: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigqueryAnalyticsHubListing %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + + if err := d.Set("name", flattenBigqueryAnalyticsHubListingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("display_name", flattenBigqueryAnalyticsHubListingDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("description", flattenBigqueryAnalyticsHubListingDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("primary_contact", flattenBigqueryAnalyticsHubListingPrimaryContact(res["primaryContact"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("documentation", flattenBigqueryAnalyticsHubListingDocumentation(res["documentation"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("icon", flattenBigqueryAnalyticsHubListingIcon(res["icon"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("request_access", flattenBigqueryAnalyticsHubListingRequestAccess(res["requestAccess"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("data_provider", flattenBigqueryAnalyticsHubListingDataProvider(res["dataProvider"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("publisher", flattenBigqueryAnalyticsHubListingPublisher(res["publisher"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("categories", flattenBigqueryAnalyticsHubListingCategories(res["categories"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + if err := d.Set("bigquery_dataset", flattenBigqueryAnalyticsHubListingBigqueryDataset(res["bigqueryDataset"], d, config)); err != nil { + return fmt.Errorf("Error reading Listing: %s", err) + } + + return nil +} + +func resourceBigqueryAnalyticsHubListingUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Listing: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandBigqueryAnalyticsHubListingDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandBigqueryAnalyticsHubListingDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + primaryContactProp, err := expandBigqueryAnalyticsHubListingPrimaryContact(d.Get("primary_contact"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("primary_contact"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, primaryContactProp)) { + obj["primaryContact"] = primaryContactProp + } + documentationProp, err := expandBigqueryAnalyticsHubListingDocumentation(d.Get("documentation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("documentation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, documentationProp)) { + obj["documentation"] = documentationProp + } + iconProp, err := expandBigqueryAnalyticsHubListingIcon(d.Get("icon"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("icon"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, iconProp)) { + obj["icon"] = iconProp + } + requestAccessProp, err := expandBigqueryAnalyticsHubListingRequestAccess(d.Get("request_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("request_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requestAccessProp)) { + obj["requestAccess"] = requestAccessProp + } + dataProviderProp, err := expandBigqueryAnalyticsHubListingDataProvider(d.Get("data_provider"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_provider"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataProviderProp)) { + obj["dataProvider"] = dataProviderProp + } + publisherProp, err := expandBigqueryAnalyticsHubListingPublisher(d.Get("publisher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("publisher"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, publisherProp)) { + obj["publisher"] = publisherProp + } + categoriesProp, err := expandBigqueryAnalyticsHubListingCategories(d.Get("categories"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("categories"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, categoriesProp)) { + obj["categories"] = categoriesProp + } + bigqueryDatasetProp, err := expandBigqueryAnalyticsHubListingBigqueryDataset(d.Get("bigquery_dataset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bigquery_dataset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bigqueryDatasetProp)) { + obj["bigqueryDataset"] = bigqueryDatasetProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Listing %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("primary_contact") { + updateMask = append(updateMask, "primaryContact") + } + + if d.HasChange("documentation") { + updateMask = append(updateMask, "documentation") + } + + if d.HasChange("icon") { + updateMask = append(updateMask, "icon") + } + + if d.HasChange("request_access") { + updateMask = append(updateMask, "requestAccess") + } + + if d.HasChange("data_provider") { + updateMask = append(updateMask, "dataProvider") + } + + if d.HasChange("publisher") { + updateMask = append(updateMask, "publisher") + } + + if d.HasChange("categories") { + updateMask = append(updateMask, "categories") + } + + if d.HasChange("bigquery_dataset") { + updateMask = append(updateMask, "bigqueryDataset") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Listing %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Listing %q: %#v", d.Id(), res) + } + + return resourceBigqueryAnalyticsHubListingRead(d, meta) +} + +func resourceBigqueryAnalyticsHubListingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Listing: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryAnalyticsHubBasePath}}projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Listing %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Listing") + } + + log.Printf("[DEBUG] Finished deleting Listing %q: %#v", d.Id(), res) + return nil +} + +func resourceBigqueryAnalyticsHubListingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/dataExchanges/(?P[^/]+)/listings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBigqueryAnalyticsHubListingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingPrimaryContact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingDocumentation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingIcon(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingRequestAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingDataProvider(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenBigqueryAnalyticsHubListingDataProviderName(original["name"], d, config) + transformed["primary_contact"] = + flattenBigqueryAnalyticsHubListingDataProviderPrimaryContact(original["primaryContact"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryAnalyticsHubListingDataProviderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingDataProviderPrimaryContact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingPublisher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenBigqueryAnalyticsHubListingPublisherName(original["name"], d, config) + transformed["primary_contact"] = + flattenBigqueryAnalyticsHubListingPublisherPrimaryContact(original["primaryContact"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryAnalyticsHubListingPublisherName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingPublisherPrimaryContact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingCategories(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryAnalyticsHubListingBigqueryDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset"] = + flattenBigqueryAnalyticsHubListingBigqueryDatasetDataset(original["dataset"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryAnalyticsHubListingBigqueryDatasetDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBigqueryAnalyticsHubListingDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingPrimaryContact(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingDocumentation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingIcon(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingRequestAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingDataProvider(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandBigqueryAnalyticsHubListingDataProviderName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedPrimaryContact, err := expandBigqueryAnalyticsHubListingDataProviderPrimaryContact(original["primary_contact"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryContact); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primaryContact"] = transformedPrimaryContact + } + + return transformed, nil +} + +func expandBigqueryAnalyticsHubListingDataProviderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingDataProviderPrimaryContact(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingPublisher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandBigqueryAnalyticsHubListingPublisherName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedPrimaryContact, err := expandBigqueryAnalyticsHubListingPublisherPrimaryContact(original["primary_contact"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryContact); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primaryContact"] = transformedPrimaryContact + } + + return transformed, nil +} + +func expandBigqueryAnalyticsHubListingPublisherName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingPublisherPrimaryContact(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingCategories(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryAnalyticsHubListingBigqueryDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataset, err := expandBigqueryAnalyticsHubListingBigqueryDatasetDataset(original["dataset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataset"] = transformedDataset + } + + return transformed, nil +} + +func expandBigqueryAnalyticsHubListingBigqueryDatasetDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/iam_bigquery_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/iam_bigquery_connection.go new file mode 100644 index 0000000000..d796c87387 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/iam_bigquery_connection.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryconnection + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var BigqueryConnectionConnectionIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "connection_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type BigqueryConnectionConnectionIamUpdater struct { + project string + location string + connectionId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func BigqueryConnectionConnectionIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("connection_id"); ok { + values["connection_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("connection_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryConnectionConnectionIamUpdater{ + project: values["project"], + location: values["location"], + connectionId: values["connection_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("connection_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting connection_id: %s", err) + } + + return u, nil +} + +func BigqueryConnectionConnectionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryConnectionConnectionIamUpdater{ + project: values["project"], + location: values["location"], + connectionId: values["connection_id"], + d: d, + Config: config, + } + if err := d.Set("connection_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting connection_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyConnectionUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyConnectionUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) qualifyConnectionUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{BigqueryConnectionBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.connectionId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.connectionId) +} + +func (u *BigqueryConnectionConnectionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigqueryconnection-connection-%s", u.GetResourceId()) +} + +func (u *BigqueryConnectionConnectionIamUpdater) DescribeResource() string { + return fmt.Sprintf("bigqueryconnection connection %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/resource_bigquery_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/resource_bigquery_connection.go new file mode 100644 index 0000000000..bbc851a07c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/resource_bigquery_connection.go @@ -0,0 +1,1194 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryconnection + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceBigqueryConnectionConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceBigqueryConnectionConnectionCreate, + Read: resourceBigqueryConnectionConnectionRead, + Update: resourceBigqueryConnectionConnectionUpdate, + Delete: resourceBigqueryConnectionConnectionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigqueryConnectionConnectionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "aws": { + Type: schema.TypeList, + Optional: true, + Description: `Connection properties specific to Amazon Web Services.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_role": { + Type: schema.TypeList, + Required: true, + Description: `Authentication using Google owned service account to assume into customer's AWS IAM Role.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iam_role_id": { + Type: schema.TypeString, + Required: true, + Description: `The user’s AWS IAM Role that trusts the Google-owned AWS IAM user Connection.`, + }, + "identity": { + Type: schema.TypeString, + Computed: true, + Description: `A unique Google-owned and Google-generated identity for the Connection. This identity will be used to access the user's AWS IAM Role.`, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + }, + "azure": { + Type: schema.TypeList, + Optional: true, + Description: `Container for connection properties specific to Azure.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "customer_tenant_id": { + Type: schema.TypeString, + Required: true, + Description: `The id of customer's directory that host the data.`, + }, + "federated_application_client_id": { + Type: schema.TypeString, + Optional: true, + Description: `The Azure Application (client) ID where the federated credentials will be hosted.`, + }, + "application": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the Azure Active Directory Application.`, + }, + "client_id": { + Type: schema.TypeString, + Computed: true, + Description: `The client id of the Azure Active Directory Application.`, + }, + "identity": { + Type: schema.TypeString, + Computed: true, + Description: `A unique Google-owned and Google-generated identity for the Connection. This identity will be used to access the user's Azure Active Directory Application.`, + }, + "object_id": { + Type: schema.TypeString, + Computed: true, + Description: `The object id of the Azure Active Directory Application.`, + }, + "redirect_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URL user will be redirected to after granting consent during connection setup.`, + }, + }, + }, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + }, + "cloud_resource": { + Type: schema.TypeList, + Optional: true, + Description: `Container for connection properties for delegation of access to GCP resources.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_account_id": { + Type: schema.TypeString, + Computed: true, + Description: `The account ID of the service created for the purpose of this connection.`, + }, + }, + }, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + }, + "cloud_spanner": { + Type: schema.TypeList, + Optional: true, + Description: `Connection properties specific to Cloud Spanner`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database": { + Type: schema.TypeString, + Required: true, + Description: `Cloud Spanner database in the form 'project/instance/database'`, + }, + "use_parallelism": { + Type: schema.TypeBool, + Optional: true, + Description: `If parallelism should be used when reading from Cloud Spanner`, + }, + "use_serverless_analytics": { + Type: schema.TypeBool, + Optional: true, + Description: `If the serverless analytics service should be used to read data from Cloud Spanner. useParallelism must be set when using serverless analytics`, + }, + }, + }, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + }, + "cloud_sql": { + Type: schema.TypeList, + Optional: true, + Description: `Connection properties specific to the Cloud SQL.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "credential": { + Type: schema.TypeList, + Required: true, + Description: `Cloud SQL properties.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "password": { + Type: schema.TypeString, + Required: true, + Description: `Password for database.`, + Sensitive: true, + }, + "username": { + Type: schema.TypeString, + Required: true, + Description: `Username for database.`, + }, + }, + }, + }, + "database": { + Type: schema.TypeString, + Required: true, + Description: `Database name.`, + }, + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: `Cloud SQL instance ID in the form project:location:instance.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"DATABASE_TYPE_UNSPECIFIED", "POSTGRES", "MYSQL"}), + Description: `Type of the Cloud SQL database. Possible values: ["DATABASE_TYPE_UNSPECIFIED", "POSTGRES", "MYSQL"]`, + }, + "service_account_id": { + Type: schema.TypeString, + Computed: true, + Description: `When the connection is used in the context of an operation in BigQuery, this service account will serve as the identity being used for connecting to the CloudSQL instance specified in this connection.`, + }, + }, + }, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + }, + "connection_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Optional connection id that should be assigned to the created connection.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A descriptive description for the connection`, + }, + "friendly_name": { + Type: schema.TypeString, + Optional: true, + Description: `A descriptive name for the connection`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The geographic location where the connection should reside. +Cloud SQL instance must be in the same location as the connection +with following exceptions: Cloud SQL us-central1 maps to BigQuery US, Cloud SQL europe-west1 maps to BigQuery EU. +Examples: US, EU, asia-northeast1, us-central1, europe-west1. +Spanner Connections same as spanner region +AWS allowed regions are aws-us-east-1 +Azure allowed regions are azure-eastus2`, + }, + "has_credential": { + Type: schema.TypeBool, + Computed: true, + Description: `True if the connection has credential assigned.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the connection in the form of: +"projects/{project_id}/locations/{location_id}/connections/{connectionId}"`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigqueryConnectionConnectionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + connection_idProp, err := expandBigqueryConnectionConnectionConnectionId(d.Get("connection_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connection_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(connection_idProp)) && (ok || !reflect.DeepEqual(v, connection_idProp)) { + obj["connection_id"] = connection_idProp + } + friendlyNameProp, err := expandBigqueryConnectionConnectionFriendlyName(d.Get("friendly_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("friendly_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(friendlyNameProp)) && (ok || !reflect.DeepEqual(v, friendlyNameProp)) { + obj["friendlyName"] = friendlyNameProp + } + descriptionProp, err := expandBigqueryConnectionConnectionDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + cloudSqlProp, err := expandBigqueryConnectionConnectionCloudSql(d.Get("cloud_sql"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloud_sql"); !tpgresource.IsEmptyValue(reflect.ValueOf(cloudSqlProp)) && (ok || !reflect.DeepEqual(v, cloudSqlProp)) { + obj["cloudSql"] = cloudSqlProp + } + awsProp, err := expandBigqueryConnectionConnectionAws(d.Get("aws"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("aws"); !tpgresource.IsEmptyValue(reflect.ValueOf(awsProp)) && (ok || !reflect.DeepEqual(v, awsProp)) { + obj["aws"] = awsProp + } + azureProp, err := expandBigqueryConnectionConnectionAzure(d.Get("azure"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("azure"); !tpgresource.IsEmptyValue(reflect.ValueOf(azureProp)) && (ok || !reflect.DeepEqual(v, azureProp)) { + obj["azure"] = azureProp + } + cloudSpannerProp, err := expandBigqueryConnectionConnectionCloudSpanner(d.Get("cloud_spanner"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloud_spanner"); !tpgresource.IsEmptyValue(reflect.ValueOf(cloudSpannerProp)) && (ok || !reflect.DeepEqual(v, cloudSpannerProp)) { + obj["cloudSpanner"] = cloudSpannerProp + } + cloudResourceProp, err := expandBigqueryConnectionConnectionCloudResource(d.Get("cloud_resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloud_resource"); ok || !reflect.DeepEqual(v, cloudResourceProp) { + obj["cloudResource"] = cloudResourceProp + } + + obj, err = resourceBigqueryConnectionConnectionEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryConnectionBasePath}}projects/{{project}}/locations/{{location}}/connections?connectionId={{connection_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Connection: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Connection: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Connection: %s", err) + } + if err := d.Set("name", flattenBigqueryConnectionConnectionName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if tpgresource.IsEmptyValue(reflect.ValueOf(d.Get("connection_id"))) { + // connection id is set by API when unset and required to GET the connection + // it is set by reading the "name" field rather than a field in the response + if err := d.Set("connection_id", flattenBigqueryConnectionConnectionConnectionId("", d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + } + + // Reset id to make sure connection_id is not empty + id2, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id2) + + log.Printf("[DEBUG] Finished creating Connection %q: %#v", d.Id(), res) + + return resourceBigqueryConnectionConnectionRead(d, meta) +} + +func resourceBigqueryConnectionConnectionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryConnectionBasePath}}projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Connection: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigqueryConnectionConnection %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + + if err := d.Set("name", flattenBigqueryConnectionConnectionName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("connection_id", flattenBigqueryConnectionConnectionConnectionId(res["connection_id"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("friendly_name", flattenBigqueryConnectionConnectionFriendlyName(res["friendlyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("description", flattenBigqueryConnectionConnectionDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("has_credential", flattenBigqueryConnectionConnectionHasCredential(res["hasCredential"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("cloud_sql", flattenBigqueryConnectionConnectionCloudSql(res["cloudSql"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("aws", flattenBigqueryConnectionConnectionAws(res["aws"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("azure", flattenBigqueryConnectionConnectionAzure(res["azure"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("cloud_spanner", flattenBigqueryConnectionConnectionCloudSpanner(res["cloudSpanner"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + if err := d.Set("cloud_resource", flattenBigqueryConnectionConnectionCloudResource(res["cloudResource"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } + + return nil +} + +func resourceBigqueryConnectionConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Connection: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + friendlyNameProp, err := expandBigqueryConnectionConnectionFriendlyName(d.Get("friendly_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("friendly_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, friendlyNameProp)) { + obj["friendlyName"] = friendlyNameProp + } + descriptionProp, err := expandBigqueryConnectionConnectionDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + cloudSqlProp, err := expandBigqueryConnectionConnectionCloudSql(d.Get("cloud_sql"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloud_sql"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cloudSqlProp)) { + obj["cloudSql"] = cloudSqlProp + } + awsProp, err := expandBigqueryConnectionConnectionAws(d.Get("aws"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("aws"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, awsProp)) { + obj["aws"] = awsProp + } + azureProp, err := expandBigqueryConnectionConnectionAzure(d.Get("azure"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("azure"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, azureProp)) { + obj["azure"] = azureProp + } + cloudSpannerProp, err := expandBigqueryConnectionConnectionCloudSpanner(d.Get("cloud_spanner"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloud_spanner"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cloudSpannerProp)) { + obj["cloudSpanner"] = cloudSpannerProp + } + cloudResourceProp, err := expandBigqueryConnectionConnectionCloudResource(d.Get("cloud_resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloud_resource"); ok || !reflect.DeepEqual(v, cloudResourceProp) { + obj["cloudResource"] = cloudResourceProp + } + + obj, err = resourceBigqueryConnectionConnectionEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryConnectionBasePath}}projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Connection %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("friendly_name") { + updateMask = append(updateMask, "friendlyName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("cloud_sql") { + updateMask = append(updateMask, "cloudSql") + } + + if d.HasChange("aws") { + updateMask = append(updateMask, "aws.access_role.iam_role_id") + } + + if d.HasChange("azure") { + updateMask = append(updateMask, "azure.customer_tenant_id", + "azure.federated_application_client_id") + } + + if d.HasChange("cloud_spanner") { + updateMask = append(updateMask, "cloudSpanner") + } + + if d.HasChange("cloud_resource") { + updateMask = append(updateMask, "cloudResource") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Connection %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Connection %q: %#v", d.Id(), res) + } + + return resourceBigqueryConnectionConnectionRead(d, meta) +} + +func resourceBigqueryConnectionConnectionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Connection: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryConnectionBasePath}}projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Connection %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Connection") + } + + log.Printf("[DEBUG] Finished deleting Connection %q: %#v", d.Id(), res) + return nil +} + +func resourceBigqueryConnectionConnectionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/connections/{{connection_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBigqueryConnectionConnectionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionConnectionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + parts := strings.Split(d.Get("name").(string), "/") + return parts[len(parts)-1] +} + +func flattenBigqueryConnectionConnectionFriendlyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionHasCredential(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionCloudSql(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["instance_id"] = + flattenBigqueryConnectionConnectionCloudSqlInstanceId(original["instanceId"], d, config) + transformed["database"] = + flattenBigqueryConnectionConnectionCloudSqlDatabase(original["database"], d, config) + transformed["credential"] = + flattenBigqueryConnectionConnectionCloudSqlCredential(original["credential"], d, config) + transformed["type"] = + flattenBigqueryConnectionConnectionCloudSqlType(original["type"], d, config) + transformed["service_account_id"] = + flattenBigqueryConnectionConnectionCloudSqlServiceAccountId(original["serviceAccountId"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionCloudSqlInstanceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionCloudSqlDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionCloudSqlCredential(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return []interface{}{ + map[string]interface{}{ + "username": d.Get("cloud_sql.0.credential.0.username"), + "password": d.Get("cloud_sql.0.credential.0.password"), + }, + } +} + +func flattenBigqueryConnectionConnectionCloudSqlType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionCloudSqlServiceAccountId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAws(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["access_role"] = + flattenBigqueryConnectionConnectionAwsAccessRole(original["accessRole"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionAwsAccessRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["iam_role_id"] = + flattenBigqueryConnectionConnectionAwsAccessRoleIamRoleId(original["iamRoleId"], d, config) + transformed["identity"] = + flattenBigqueryConnectionConnectionAwsAccessRoleIdentity(original["identity"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionAwsAccessRoleIamRoleId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAwsAccessRoleIdentity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAzure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["application"] = + flattenBigqueryConnectionConnectionAzureApplication(original["application"], d, config) + transformed["client_id"] = + flattenBigqueryConnectionConnectionAzureClientId(original["clientId"], d, config) + transformed["object_id"] = + flattenBigqueryConnectionConnectionAzureObjectId(original["objectId"], d, config) + transformed["customer_tenant_id"] = + flattenBigqueryConnectionConnectionAzureCustomerTenantId(original["customerTenantId"], d, config) + transformed["federated_application_client_id"] = + flattenBigqueryConnectionConnectionAzureFederatedApplicationClientId(original["federatedApplicationClientId"], d, config) + transformed["redirect_uri"] = + flattenBigqueryConnectionConnectionAzureRedirectUri(original["redirectUri"], d, config) + transformed["identity"] = + flattenBigqueryConnectionConnectionAzureIdentity(original["identity"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionAzureApplication(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAzureClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAzureObjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAzureCustomerTenantId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAzureFederatedApplicationClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAzureRedirectUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionAzureIdentity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionCloudSpanner(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["database"] = + flattenBigqueryConnectionConnectionCloudSpannerDatabase(original["database"], d, config) + transformed["use_parallelism"] = + flattenBigqueryConnectionConnectionCloudSpannerUseParallelism(original["useParallelism"], d, config) + transformed["use_serverless_analytics"] = + flattenBigqueryConnectionConnectionCloudSpannerUseServerlessAnalytics(original["useServerlessAnalytics"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionCloudSpannerDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionCloudSpannerUseParallelism(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionCloudSpannerUseServerlessAnalytics(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionCloudResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account_id"] = + flattenBigqueryConnectionConnectionCloudResourceServiceAccountId(original["serviceAccountId"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionCloudResourceServiceAccountId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBigqueryConnectionConnectionConnectionId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionFriendlyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSql(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInstanceId, err := expandBigqueryConnectionConnectionCloudSqlInstanceId(original["instance_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstanceId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instanceId"] = transformedInstanceId + } + + transformedDatabase, err := expandBigqueryConnectionConnectionCloudSqlDatabase(original["database"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["database"] = transformedDatabase + } + + transformedCredential, err := expandBigqueryConnectionConnectionCloudSqlCredential(original["credential"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCredential); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["credential"] = transformedCredential + } + + transformedType, err := expandBigqueryConnectionConnectionCloudSqlType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedServiceAccountId, err := expandBigqueryConnectionConnectionCloudSqlServiceAccountId(original["service_account_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountId"] = transformedServiceAccountId + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionCloudSqlInstanceId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSqlDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSqlCredential(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUsername, err := expandBigqueryConnectionConnectionCloudSqlCredentialUsername(original["username"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["username"] = transformedUsername + } + + transformedPassword, err := expandBigqueryConnectionConnectionCloudSqlCredentialPassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionCloudSqlCredentialUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSqlCredentialPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSqlType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSqlServiceAccountId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAws(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAccessRole, err := expandBigqueryConnectionConnectionAwsAccessRole(original["access_role"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessRole); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessRole"] = transformedAccessRole + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionAwsAccessRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIamRoleId, err := expandBigqueryConnectionConnectionAwsAccessRoleIamRoleId(original["iam_role_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIamRoleId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["iamRoleId"] = transformedIamRoleId + } + + transformedIdentity, err := expandBigqueryConnectionConnectionAwsAccessRoleIdentity(original["identity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identity"] = transformedIdentity + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionAwsAccessRoleIamRoleId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAwsAccessRoleIdentity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAzure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedApplication, err := expandBigqueryConnectionConnectionAzureApplication(original["application"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApplication); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["application"] = transformedApplication + } + + transformedClientId, err := expandBigqueryConnectionConnectionAzureClientId(original["client_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientId"] = transformedClientId + } + + transformedObjectId, err := expandBigqueryConnectionConnectionAzureObjectId(original["object_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["objectId"] = transformedObjectId + } + + transformedCustomerTenantId, err := expandBigqueryConnectionConnectionAzureCustomerTenantId(original["customer_tenant_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomerTenantId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customerTenantId"] = transformedCustomerTenantId + } + + transformedFederatedApplicationClientId, err := expandBigqueryConnectionConnectionAzureFederatedApplicationClientId(original["federated_application_client_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFederatedApplicationClientId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["federatedApplicationClientId"] = transformedFederatedApplicationClientId + } + + transformedRedirectUri, err := expandBigqueryConnectionConnectionAzureRedirectUri(original["redirect_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRedirectUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["redirectUri"] = transformedRedirectUri + } + + transformedIdentity, err := expandBigqueryConnectionConnectionAzureIdentity(original["identity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identity"] = transformedIdentity + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionAzureApplication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAzureClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAzureObjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAzureCustomerTenantId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAzureFederatedApplicationClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAzureRedirectUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionAzureIdentity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSpanner(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatabase, err := expandBigqueryConnectionConnectionCloudSpannerDatabase(original["database"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["database"] = transformedDatabase + } + + transformedUseParallelism, err := expandBigqueryConnectionConnectionCloudSpannerUseParallelism(original["use_parallelism"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseParallelism); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["useParallelism"] = transformedUseParallelism + } + + transformedUseServerlessAnalytics, err := expandBigqueryConnectionConnectionCloudSpannerUseServerlessAnalytics(original["use_serverless_analytics"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseServerlessAnalytics); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["useServerlessAnalytics"] = transformedUseServerlessAnalytics + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionCloudSpannerDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSpannerUseParallelism(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudSpannerUseServerlessAnalytics(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionCloudResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccountId, err := expandBigqueryConnectionConnectionCloudResourceServiceAccountId(original["service_account_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountId"] = transformedServiceAccountId + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionCloudResourceServiceAccountId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceBigqueryConnectionConnectionEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // connection_id is needed to qualify the URL but cannot be sent in the body + delete(obj, "connection_id") + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/resource_bigquery_connection_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/resource_bigquery_connection_sweeper.go new file mode 100644 index 0000000000..543120e799 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection/resource_bigquery_connection_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryconnection + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BigqueryConnectionConnection", testSweepBigqueryConnectionConnection) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBigqueryConnectionConnection(region string) error { + resourceName := "BigqueryConnectionConnection" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://bigqueryconnection.googleapis.com/v1/projects/{{project}}/locations/{{location}}/connections", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["connections"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://bigqueryconnection.googleapis.com/v1/projects/{{project}}/locations/{{location}}/connections/{{connection_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/iam_bigquery_datapolicy_data_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/iam_bigquery_datapolicy_data_policy.go new file mode 100644 index 0000000000..3128516bfa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/iam_bigquery_datapolicy_data_policy.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquerydatapolicy + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var BigqueryDatapolicyDataPolicyIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "data_policy_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type BigqueryDatapolicyDataPolicyIamUpdater struct { + project string + location string + dataPolicyId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func BigqueryDatapolicyDataPolicyIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("data_policy_id"); ok { + values["data_policy_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("data_policy_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryDatapolicyDataPolicyIamUpdater{ + project: values["project"], + location: values["location"], + dataPolicyId: values["data_policy_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("data_policy_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting data_policy_id: %s", err) + } + + return u, nil +} + +func BigqueryDatapolicyDataPolicyIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &BigqueryDatapolicyDataPolicyIamUpdater{ + project: values["project"], + location: values["location"], + dataPolicyId: values["data_policy_id"], + d: d, + Config: config, + } + if err := d.Set("data_policy_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting data_policy_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *BigqueryDatapolicyDataPolicyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyDataPolicyUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *BigqueryDatapolicyDataPolicyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyDataPolicyUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BigqueryDatapolicyDataPolicyIamUpdater) qualifyDataPolicyUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{BigqueryDatapolicyBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/dataPolicies/%s", u.project, u.location, u.dataPolicyId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *BigqueryDatapolicyDataPolicyIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/dataPolicies/%s", u.project, u.location, u.dataPolicyId) +} + +func (u *BigqueryDatapolicyDataPolicyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-bigquerydatapolicy-datapolicy-%s", u.GetResourceId()) +} + +func (u *BigqueryDatapolicyDataPolicyIamUpdater) DescribeResource() string { + return fmt.Sprintf("bigquerydatapolicy datapolicy %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy.go new file mode 100644 index 0000000000..c73d0a939c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy.go @@ -0,0 +1,463 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquerydatapolicy + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceBigqueryDatapolicyDataPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceBigqueryDatapolicyDataPolicyCreate, + Read: resourceBigqueryDatapolicyDataPolicyRead, + Update: resourceBigqueryDatapolicyDataPolicyUpdate, + Delete: resourceBigqueryDatapolicyDataPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigqueryDatapolicyDataPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "data_policy_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `User-assigned (human readable) ID of the data policy that needs to be unique within a project. Used as {dataPolicyId} in part of the resource name.`, + }, + "data_policy_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"COLUMN_LEVEL_SECURITY_POLICY", "DATA_MASKING_POLICY"}), + Description: `The enrollment level of the service. Possible values: ["COLUMN_LEVEL_SECURITY_POLICY", "DATA_MASKING_POLICY"]`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location of the data policy.`, + }, + "policy_tag": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `Policy tag resource name, in the format of projects/{project_number}/locations/{locationId}/taxonomies/{taxonomyId}/policyTags/{policyTag_id}.`, + }, + "data_masking_policy": { + Type: schema.TypeList, + Optional: true, + Description: `The data masking policy that specifies the data masking rule to use.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predefined_expression": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SHA256", "ALWAYS_NULL", "DEFAULT_MASKING_VALUE", "LAST_FOUR_CHARACTERS", "FIRST_FOUR_CHARACTERS", "EMAIL_MASK", "DATE_YEAR_MASK"}), + Description: `The available masking rules. Learn more here: https://cloud.google.com/bigquery/docs/column-data-masking-intro#masking_options. Possible values: ["SHA256", "ALWAYS_NULL", "DEFAULT_MASKING_VALUE", "LAST_FOUR_CHARACTERS", "FIRST_FOUR_CHARACTERS", "EMAIL_MASK", "DATE_YEAR_MASK"]`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Resource name of this data policy, in the format of projects/{project_number}/locations/{locationId}/dataPolicies/{dataPolicyId}.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigqueryDatapolicyDataPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + dataPolicyIdProp, err := expandBigqueryDatapolicyDataPolicyDataPolicyId(d.Get("data_policy_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_policy_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataPolicyIdProp)) && (ok || !reflect.DeepEqual(v, dataPolicyIdProp)) { + obj["dataPolicyId"] = dataPolicyIdProp + } + policyTagProp, err := expandBigqueryDatapolicyDataPolicyPolicyTag(d.Get("policy_tag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("policy_tag"); !tpgresource.IsEmptyValue(reflect.ValueOf(policyTagProp)) && (ok || !reflect.DeepEqual(v, policyTagProp)) { + obj["policyTag"] = policyTagProp + } + dataPolicyTypeProp, err := expandBigqueryDatapolicyDataPolicyDataPolicyType(d.Get("data_policy_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_policy_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataPolicyTypeProp)) && (ok || !reflect.DeepEqual(v, dataPolicyTypeProp)) { + obj["dataPolicyType"] = dataPolicyTypeProp + } + dataMaskingPolicyProp, err := expandBigqueryDatapolicyDataPolicyDataMaskingPolicy(d.Get("data_masking_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_masking_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataMaskingPolicyProp)) && (ok || !reflect.DeepEqual(v, dataMaskingPolicyProp)) { + obj["dataMaskingPolicy"] = dataMaskingPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryDatapolicyBasePath}}projects/{{project}}/locations/{{location}}/dataPolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DataPolicy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DataPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DataPolicy: %s", err) + } + if err := d.Set("name", flattenBigqueryDatapolicyDataPolicyName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DataPolicy %q: %#v", d.Id(), res) + + return resourceBigqueryDatapolicyDataPolicyRead(d, meta) +} + +func resourceBigqueryDatapolicyDataPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryDatapolicyBasePath}}projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DataPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigqueryDatapolicyDataPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DataPolicy: %s", err) + } + + if err := d.Set("name", flattenBigqueryDatapolicyDataPolicyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading DataPolicy: %s", err) + } + if err := d.Set("data_policy_id", flattenBigqueryDatapolicyDataPolicyDataPolicyId(res["dataPolicyId"], d, config)); err != nil { + return fmt.Errorf("Error reading DataPolicy: %s", err) + } + if err := d.Set("policy_tag", flattenBigqueryDatapolicyDataPolicyPolicyTag(res["policyTag"], d, config)); err != nil { + return fmt.Errorf("Error reading DataPolicy: %s", err) + } + if err := d.Set("data_policy_type", flattenBigqueryDatapolicyDataPolicyDataPolicyType(res["dataPolicyType"], d, config)); err != nil { + return fmt.Errorf("Error reading DataPolicy: %s", err) + } + if err := d.Set("data_masking_policy", flattenBigqueryDatapolicyDataPolicyDataMaskingPolicy(res["dataMaskingPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading DataPolicy: %s", err) + } + + return nil +} + +func resourceBigqueryDatapolicyDataPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DataPolicy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + policyTagProp, err := expandBigqueryDatapolicyDataPolicyPolicyTag(d.Get("policy_tag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("policy_tag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, policyTagProp)) { + obj["policyTag"] = policyTagProp + } + dataPolicyTypeProp, err := expandBigqueryDatapolicyDataPolicyDataPolicyType(d.Get("data_policy_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_policy_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataPolicyTypeProp)) { + obj["dataPolicyType"] = dataPolicyTypeProp + } + dataMaskingPolicyProp, err := expandBigqueryDatapolicyDataPolicyDataMaskingPolicy(d.Get("data_masking_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_masking_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataMaskingPolicyProp)) { + obj["dataMaskingPolicy"] = dataMaskingPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryDatapolicyBasePath}}projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DataPolicy %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("policy_tag") { + updateMask = append(updateMask, "policyTag") + } + + if d.HasChange("data_policy_type") { + updateMask = append(updateMask, "dataPolicyType") + } + + if d.HasChange("data_masking_policy") { + updateMask = append(updateMask, "dataMaskingPolicy") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DataPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DataPolicy %q: %#v", d.Id(), res) + } + + return resourceBigqueryDatapolicyDataPolicyRead(d, meta) +} + +func resourceBigqueryDatapolicyDataPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DataPolicy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryDatapolicyBasePath}}projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DataPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DataPolicy") + } + + log.Printf("[DEBUG] Finished deleting DataPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceBigqueryDatapolicyDataPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/dataPolicies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBigqueryDatapolicyDataPolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDatapolicyDataPolicyDataPolicyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDatapolicyDataPolicyPolicyTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDatapolicyDataPolicyDataPolicyType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDatapolicyDataPolicyDataMaskingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["predefined_expression"] = + flattenBigqueryDatapolicyDataPolicyDataMaskingPolicyPredefinedExpression(original["predefinedExpression"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryDatapolicyDataPolicyDataMaskingPolicyPredefinedExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBigqueryDatapolicyDataPolicyDataPolicyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDatapolicyDataPolicyPolicyTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDatapolicyDataPolicyDataPolicyType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDatapolicyDataPolicyDataMaskingPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPredefinedExpression, err := expandBigqueryDatapolicyDataPolicyDataMaskingPolicyPredefinedExpression(original["predefined_expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPredefinedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["predefinedExpression"] = transformedPredefinedExpression + } + + return transformed, nil +} + +func expandBigqueryDatapolicyDataPolicyDataMaskingPolicyPredefinedExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy_sweeper.go new file mode 100644 index 0000000000..e70afd9221 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy/resource_bigquery_datapolicy_data_policy_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquerydatapolicy + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BigqueryDatapolicyDataPolicy", testSweepBigqueryDatapolicyDataPolicy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBigqueryDatapolicyDataPolicy(region string) error { + resourceName := "BigqueryDatapolicyDataPolicy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://bigquerydatapolicy.googleapis.com/v1/projects/{{project}}/locations/{{location}}/dataPolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["dataPolicies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://bigquerydatapolicy.googleapis.com/v1/projects/{{project}}/locations/{{location}}/dataPolicies/{{data_policy_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go new file mode 100644 index 0000000000..ee5c29fa20 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go @@ -0,0 +1,961 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquerydatatransfer + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var sensitiveParams = []string{"secret_access_key"} + +func sensitiveParamCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + for _, sp := range sensitiveParams { + mapLabel := diff.Get("params." + sp).(string) + authLabel := diff.Get("sensitive_params.0." + sp).(string) + if mapLabel != "" && authLabel != "" { + return fmt.Errorf("Sensitive param [%s] cannot be set in both `params` and the `sensitive_params` block.", sp) + } + } + return nil +} + +// This customizeDiff is to use ForceNew for params fields data_path_template and +// destination_table_name_template only if the value of "data_source_id" is "google_cloud_storage". +func ParamsCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { + old, new := diff.GetChange("params") + dsId := diff.Get("data_source_id").(string) + oldParams := old.(map[string]interface{}) + newParams := new.(map[string]interface{}) + var err error + + if dsId == "google_cloud_storage" { + if oldParams["data_path_template"] != nil && newParams["data_path_template"] != nil && oldParams["data_path_template"].(string) != newParams["data_path_template"].(string) { + err = diff.ForceNew("params") + if err != nil { + return fmt.Errorf("ForceNew failed for params, old - %v and new - %v", oldParams, newParams) + } + return nil + } + + if oldParams["destination_table_name_template"] != nil && newParams["destination_table_name_template"] != nil && oldParams["destination_table_name_template"].(string) != newParams["destination_table_name_template"].(string) { + err = diff.ForceNew("params") + if err != nil { + return fmt.Errorf("ForceNew failed for params, old - %v and new - %v", oldParams, newParams) + } + return nil + } + } + + return nil +} + +func paramsCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + return ParamsCustomizeDiffFunc(diff) +} + +func ResourceBigqueryDataTransferConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceBigqueryDataTransferConfigCreate, + Read: resourceBigqueryDataTransferConfigRead, + Update: resourceBigqueryDataTransferConfigUpdate, + Delete: resourceBigqueryDataTransferConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigqueryDataTransferConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + sensitiveParamCustomizeDiff, + paramsCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "data_source_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The data source id. Cannot be changed once the transfer config is created.`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The user specified display name for the transfer config.`, + }, + "params": { + Type: schema.TypeMap, + Required: true, + Description: `Parameters specific to each data source. For more information see the bq tab in the 'Setting up a data transfer' +section for each data source. For example the parameters for Cloud Storage transfers are listed here: +https://cloud.google.com/bigquery-transfer/docs/cloud-storage-transfer#bq + +**NOTE** : If you are attempting to update a parameter that cannot be updated (due to api limitations) [please force recreation of the resource](https://www.terraform.io/cli/state/taint#forcing-re-creation-of-resources).`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "data_refresh_window_days": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of days to look back to automatically refresh the data. +For example, if dataRefreshWindowDays = 10, then every day BigQuery +reingests data for [today-10, today-1], rather than ingesting data for +just [today-1]. Only valid if the data source supports the feature. +Set the value to 0 to use the default value.`, + }, + "destination_dataset_id": { + Type: schema.TypeString, + Optional: true, + Description: `The BigQuery target dataset id.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `When set to true, no runs are scheduled for a given transfer.`, + }, + "email_preferences": { + Type: schema.TypeList, + Optional: true, + Description: `Email notifications will be sent according to these preferences to the +email address of the user who owns this transfer config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_failure_email": { + Type: schema.TypeBool, + Required: true, + Description: `If true, email notifications will be sent on transfer run failures.`, + }, + }, + }, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The geographic location where the transfer config should reside. +Examples: US, EU, asia-northeast1. The default value is US.`, + Default: "US", + }, + "notification_pubsub_topic": { + Type: schema.TypeString, + Optional: true, + Description: `Pub/Sub topic where notifications will be sent after transfer runs +associated with this transfer config finish.`, + }, + "schedule": { + Type: schema.TypeString, + Optional: true, + Description: `Data transfer schedule. If the data source does not support a custom +schedule, this should be empty. If it is empty, the default value for +the data source will be used. The specified times are in UTC. Examples +of valid format: 1st,3rd monday of month 15:30, every wed,fri of jan, +jun 13:15, and first sunday of quarter 00:00. See more explanation +about the format here: +https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format +NOTE: the granularity should be at least 8 hours, or less frequent.`, + }, + "schedule_options": { + Type: schema.TypeList, + Optional: true, + Description: `Options customizing the data transfer schedule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disable_auto_scheduling": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, automatic scheduling of data transfer runs for this +configuration will be disabled. The runs can be started on ad-hoc +basis using transferConfigs.startManualRuns API. When automatic +scheduling is disabled, the TransferConfig.schedule field will +be ignored.`, + AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, + }, + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: `Defines time to stop scheduling transfer runs. A transfer run cannot be +scheduled at or after the end time. The end time can be changed at any +moment. The time when a data transfer can be triggered manually is not +limited by this option.`, + AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies time to start scheduling transfer runs. The first run will be +scheduled at or after the start time according to a recurrence pattern +defined in the schedule string. The start time can be changed at any +moment. The time when a data transfer can be triggered manually is not +limited by this option.`, + AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, + }, + }, + }, + }, + "sensitive_params": { + Type: schema.TypeList, + Optional: true, + Description: `Different parameters are configured primarily using the the 'params' field on this +resource. This block contains the parameters which contain secrets or passwords so that they can be marked +sensitive and hidden from plan output. The name of the field, eg: secret_access_key, will be the key +in the 'params' map in the api request. + +Credentials may not be specified in both locations and will cause an error. Changing from one location +to a different credential configuration in the config will require an apply to update state.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_access_key": { + Type: schema.TypeString, + Required: true, + Description: `The Secret Access Key of the AWS account transferring data from.`, + Sensitive: true, + }, + }, + }, + }, + "service_account_name": { + Type: schema.TypeString, + Optional: true, + Description: `Service account email. If this field is set, transfer config will +be created with this service account credentials. It requires that +requesting user calling this API has permissions to act as this service account.`, + Default: "", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the transfer config. Transfer config names have the +form projects/{projectId}/locations/{location}/transferConfigs/{configId}. +Where configId is usually a uuid, but this is not required. +The name is ignored when creating a transfer config.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigqueryDataTransferConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandBigqueryDataTransferConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + destinationDatasetIdProp, err := expandBigqueryDataTransferConfigDestinationDatasetId(d.Get("destination_dataset_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination_dataset_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(destinationDatasetIdProp)) && (ok || !reflect.DeepEqual(v, destinationDatasetIdProp)) { + obj["destinationDatasetId"] = destinationDatasetIdProp + } + dataSourceIdProp, err := expandBigqueryDataTransferConfigDataSourceId(d.Get("data_source_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_source_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataSourceIdProp)) && (ok || !reflect.DeepEqual(v, dataSourceIdProp)) { + obj["dataSourceId"] = dataSourceIdProp + } + scheduleProp, err := expandBigqueryDataTransferConfigSchedule(d.Get("schedule"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(scheduleProp)) && (ok || !reflect.DeepEqual(v, scheduleProp)) { + obj["schedule"] = scheduleProp + } + scheduleOptionsProp, err := expandBigqueryDataTransferConfigScheduleOptions(d.Get("schedule_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("schedule_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(scheduleOptionsProp)) && (ok || !reflect.DeepEqual(v, scheduleOptionsProp)) { + obj["scheduleOptions"] = scheduleOptionsProp + } + emailPreferencesProp, err := expandBigqueryDataTransferConfigEmailPreferences(d.Get("email_preferences"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("email_preferences"); !tpgresource.IsEmptyValue(reflect.ValueOf(emailPreferencesProp)) && (ok || !reflect.DeepEqual(v, emailPreferencesProp)) { + obj["emailPreferences"] = emailPreferencesProp + } + notificationPubsubTopicProp, err := expandBigqueryDataTransferConfigNotificationPubsubTopic(d.Get("notification_pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationPubsubTopicProp)) && (ok || !reflect.DeepEqual(v, notificationPubsubTopicProp)) { + obj["notificationPubsubTopic"] = notificationPubsubTopicProp + } + dataRefreshWindowDaysProp, err := expandBigqueryDataTransferConfigDataRefreshWindowDays(d.Get("data_refresh_window_days"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_refresh_window_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataRefreshWindowDaysProp)) && (ok || !reflect.DeepEqual(v, dataRefreshWindowDaysProp)) { + obj["dataRefreshWindowDays"] = dataRefreshWindowDaysProp + } + disabledProp, err := expandBigqueryDataTransferConfigDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + paramsProp, err := expandBigqueryDataTransferConfigParams(d.Get("params"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("params"); !tpgresource.IsEmptyValue(reflect.ValueOf(paramsProp)) && (ok || !reflect.DeepEqual(v, paramsProp)) { + obj["params"] = paramsProp + } + + obj, err = resourceBigqueryDataTransferConfigEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryDataTransferBasePath}}projects/{{project}}/locations/{{location}}/transferConfigs?serviceAccountName={{service_account_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Config: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Config: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IamMemberMissing}, + }) + if err != nil { + return fmt.Errorf("Error creating Config: %s", err) + } + if err := d.Set("name", flattenBigqueryDataTransferConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating Config %q: %#v", d.Id(), res) + + return resourceBigqueryDataTransferConfigRead(d, meta) +} + +func resourceBigqueryDataTransferConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Config: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IamMemberMissing}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigqueryDataTransferConfig %q", d.Id())) + } + + res, err = resourceBigqueryDataTransferConfigDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing BigqueryDataTransferConfig because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + + if err := d.Set("display_name", flattenBigqueryDataTransferConfigDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("name", flattenBigqueryDataTransferConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("destination_dataset_id", flattenBigqueryDataTransferConfigDestinationDatasetId(res["destinationDatasetId"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("data_source_id", flattenBigqueryDataTransferConfigDataSourceId(res["dataSourceId"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("schedule", flattenBigqueryDataTransferConfigSchedule(res["schedule"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("schedule_options", flattenBigqueryDataTransferConfigScheduleOptions(res["scheduleOptions"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("email_preferences", flattenBigqueryDataTransferConfigEmailPreferences(res["emailPreferences"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("notification_pubsub_topic", flattenBigqueryDataTransferConfigNotificationPubsubTopic(res["notificationPubsubTopic"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("data_refresh_window_days", flattenBigqueryDataTransferConfigDataRefreshWindowDays(res["dataRefreshWindowDays"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("disabled", flattenBigqueryDataTransferConfigDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("params", flattenBigqueryDataTransferConfigParams(res["params"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + + return nil +} + +func resourceBigqueryDataTransferConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Config: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandBigqueryDataTransferConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + destinationDatasetIdProp, err := expandBigqueryDataTransferConfigDestinationDatasetId(d.Get("destination_dataset_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination_dataset_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationDatasetIdProp)) { + obj["destinationDatasetId"] = destinationDatasetIdProp + } + scheduleProp, err := expandBigqueryDataTransferConfigSchedule(d.Get("schedule"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scheduleProp)) { + obj["schedule"] = scheduleProp + } + scheduleOptionsProp, err := expandBigqueryDataTransferConfigScheduleOptions(d.Get("schedule_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("schedule_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scheduleOptionsProp)) { + obj["scheduleOptions"] = scheduleOptionsProp + } + emailPreferencesProp, err := expandBigqueryDataTransferConfigEmailPreferences(d.Get("email_preferences"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("email_preferences"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, emailPreferencesProp)) { + obj["emailPreferences"] = emailPreferencesProp + } + notificationPubsubTopicProp, err := expandBigqueryDataTransferConfigNotificationPubsubTopic(d.Get("notification_pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationPubsubTopicProp)) { + obj["notificationPubsubTopic"] = notificationPubsubTopicProp + } + dataRefreshWindowDaysProp, err := expandBigqueryDataTransferConfigDataRefreshWindowDays(d.Get("data_refresh_window_days"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_refresh_window_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataRefreshWindowDaysProp)) { + obj["dataRefreshWindowDays"] = dataRefreshWindowDaysProp + } + disabledProp, err := expandBigqueryDataTransferConfigDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + paramsProp, err := expandBigqueryDataTransferConfigParams(d.Get("params"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("params"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, paramsProp)) { + obj["params"] = paramsProp + } + + obj, err = resourceBigqueryDataTransferConfigEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}?serviceAccountName={{service_account_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Config %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("destination_dataset_id") { + updateMask = append(updateMask, "destinationDatasetId") + } + + if d.HasChange("schedule") { + updateMask = append(updateMask, "schedule") + } + + if d.HasChange("schedule_options") { + updateMask = append(updateMask, "scheduleOptions") + } + + if d.HasChange("email_preferences") { + updateMask = append(updateMask, "emailPreferences") + } + + if d.HasChange("notification_pubsub_topic") { + updateMask = append(updateMask, "notificationPubsubTopic") + } + + if d.HasChange("data_refresh_window_days") { + updateMask = append(updateMask, "dataRefreshWindowDays") + } + + if d.HasChange("disabled") { + updateMask = append(updateMask, "disabled") + } + + if d.HasChange("params") { + updateMask = append(updateMask, "params") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IamMemberMissing}, + }) + + if err != nil { + return fmt.Errorf("Error updating Config %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Config %q: %#v", d.Id(), res) + } + + return resourceBigqueryDataTransferConfigRead(d, meta) +} + +func resourceBigqueryDataTransferConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Config: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Config %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IamMemberMissing}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Config") + } + + log.Printf("[DEBUG] Finished deleting Config %q: %#v", d.Id(), res) + return nil +} + +func resourceBigqueryDataTransferConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenBigqueryDataTransferConfigDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigDestinationDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigDataSourceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigScheduleOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["disable_auto_scheduling"] = + flattenBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(original["disableAutoScheduling"], d, config) + transformed["start_time"] = + flattenBigqueryDataTransferConfigScheduleOptionsStartTime(original["startTime"], d, config) + transformed["end_time"] = + flattenBigqueryDataTransferConfigScheduleOptionsEndTime(original["endTime"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigScheduleOptionsStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigScheduleOptionsEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigEmailPreferences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_failure_email"] = + flattenBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(original["enableFailureEmail"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigNotificationPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigDataRefreshWindowDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigqueryDataTransferConfigDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryDataTransferConfigParams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + kv := v.(map[string]interface{}) + + res := make(map[string]string) + for key, value := range kv { + res[key] = fmt.Sprintf("%v", value) + } + return res +} + +func expandBigqueryDataTransferConfigDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigDestinationDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigDataSourceId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigScheduleOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisableAutoScheduling, err := expandBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(original["disable_auto_scheduling"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisableAutoScheduling); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["disableAutoScheduling"] = transformedDisableAutoScheduling + } + + transformedStartTime, err := expandBigqueryDataTransferConfigScheduleOptionsStartTime(original["start_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTime"] = transformedStartTime + } + + transformedEndTime, err := expandBigqueryDataTransferConfigScheduleOptionsEndTime(original["end_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endTime"] = transformedEndTime + } + + return transformed, nil +} + +func expandBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigScheduleOptionsStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigScheduleOptionsEndTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigEmailPreferences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnableFailureEmail, err := expandBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(original["enable_failure_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableFailureEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableFailureEmail"] = transformedEnableFailureEmail + } + + return transformed, nil +} + +func expandBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigNotificationPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigDataRefreshWindowDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryDataTransferConfigParams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func resourceBigqueryDataTransferConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + paramMap, ok := obj["params"] + if !ok { + paramMap = make(map[string]string) + } + + var params map[string]string + params = paramMap.(map[string]string) + + for _, sp := range sensitiveParams { + if auth, _ := d.GetOkExists("sensitive_params.0." + sp); auth != "" { + params[sp] = auth.(string) + } + } + + obj["params"] = params + + return obj, nil +} + +func resourceBigqueryDataTransferConfigDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if paramMap, ok := res["params"]; ok { + params := paramMap.(map[string]interface{}) + for _, sp := range sensitiveParams { + if _, apiOk := params[sp]; apiOk { + if _, exists := d.GetOkExists("sensitive_params.0." + sp); exists { + delete(params, sp) + } else { + params[sp] = d.Get("params." + sp) + } + } + } + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_sweeper.go new file mode 100644 index 0000000000..789a580f72 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigquerydatatransfer + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BigqueryDataTransferConfig", testSweepBigqueryDataTransferConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBigqueryDataTransferConfig(region string) error { + resourceName := "BigqueryDataTransferConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://bigquerydatatransfer.googleapis.com/v1/projects/{{project}}/locations/{{location}}/transferConfigs?serviceAccountName={{service_account_name}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["configs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://bigquerydatatransfer.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_capacity_commitment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_capacity_commitment.go new file mode 100644 index 0000000000..9a5a010dfb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_capacity_commitment.go @@ -0,0 +1,482 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryreservation + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func bigqueryReservationCapacityCommitmentPlanDiffSuppressFunc(_, old, new string, _ *schema.ResourceData) bool { + if (old == "FLEX" || old == "MONTHLY" || old == "ANNUAL") && new == old+"_FLAT_RATE" { + return true + } + return false +} + +func ResourceBigqueryReservationCapacityCommitment() *schema.Resource { + return &schema.Resource{ + Create: resourceBigqueryReservationCapacityCommitmentCreate, + Read: resourceBigqueryReservationCapacityCommitmentRead, + Update: resourceBigqueryReservationCapacityCommitmentUpdate, + Delete: resourceBigqueryReservationCapacityCommitmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigqueryReservationCapacityCommitmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "plan": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: bigqueryReservationCapacityCommitmentPlanDiffSuppressFunc, + Description: `Capacity commitment plan. Valid values are at https://cloud.google.com/bigquery/docs/reference/reservations/rpc/google.cloud.bigquery.reservation.v1#commitmentplan`, + }, + "slot_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Number of slots in this commitment.`, + }, + "capacity_commitment_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The optional capacity commitment ID. Capacity commitment name will be generated automatically if this field is +empty. This field must only contain lower case alphanumeric characters or dashes. The first and last character +cannot be a dash. Max length is 64 characters. NOTE: this ID won't be kept if the capacity commitment is split +or merged.`, + }, + "edition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The edition type. Valid values are STANDARD, ENTERPRISE, ENTERPRISE_PLUS`, + }, + "enforce_single_admin_project_per_org": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `If true, fail the request if another project in the organization has a capacity commitment.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The geographic location where the transfer config should reside. +Examples: US, EU, asia-northeast1. The default value is US.`, + Default: "US", + }, + "renewal_plan": { + Type: schema.TypeString, + Optional: true, + Description: `The plan this capacity commitment is converted to after commitmentEndTime passes. Once the plan is changed, committed period is extended according to commitment plan. Only applicable some commitment plans.`, + }, + "commitment_end_time": { + Type: schema.TypeString, + Computed: true, + Description: `The start of the current commitment period. It is applicable only for ACTIVE capacity commitments.`, + }, + "commitment_start_time": { + Type: schema.TypeString, + Computed: true, + Description: `The start of the current commitment period. It is applicable only for ACTIVE capacity commitments.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the capacity commitment, e.g., projects/myproject/locations/US/capacityCommitments/123`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the commitment`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigqueryReservationCapacityCommitmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + slotCountProp, err := expandBigqueryReservationCapacityCommitmentSlotCount(d.Get("slot_count"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("slot_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(slotCountProp)) && (ok || !reflect.DeepEqual(v, slotCountProp)) { + obj["slotCount"] = slotCountProp + } + planProp, err := expandBigqueryReservationCapacityCommitmentPlan(d.Get("plan"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("plan"); !tpgresource.IsEmptyValue(reflect.ValueOf(planProp)) && (ok || !reflect.DeepEqual(v, planProp)) { + obj["plan"] = planProp + } + renewalPlanProp, err := expandBigqueryReservationCapacityCommitmentRenewalPlan(d.Get("renewal_plan"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("renewal_plan"); !tpgresource.IsEmptyValue(reflect.ValueOf(renewalPlanProp)) && (ok || !reflect.DeepEqual(v, renewalPlanProp)) { + obj["renewalPlan"] = renewalPlanProp + } + editionProp, err := expandBigqueryReservationCapacityCommitmentEdition(d.Get("edition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("edition"); !tpgresource.IsEmptyValue(reflect.ValueOf(editionProp)) && (ok || !reflect.DeepEqual(v, editionProp)) { + obj["edition"] = editionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/capacityCommitments?capacityCommitmentId={{capacity_commitment_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CapacityCommitment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CapacityCommitment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating CapacityCommitment: %s", err) + } + if err := d.Set("name", flattenBigqueryReservationCapacityCommitmentName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating CapacityCommitment %q: %#v", d.Id(), res) + + return resourceBigqueryReservationCapacityCommitmentRead(d, meta) +} + +func resourceBigqueryReservationCapacityCommitmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CapacityCommitment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigqueryReservationCapacityCommitment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + + if err := d.Set("name", flattenBigqueryReservationCapacityCommitmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + if err := d.Set("slot_count", flattenBigqueryReservationCapacityCommitmentSlotCount(res["slotCount"], d, config)); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + if err := d.Set("plan", flattenBigqueryReservationCapacityCommitmentPlan(res["plan"], d, config)); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + if err := d.Set("state", flattenBigqueryReservationCapacityCommitmentState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + if err := d.Set("commitment_start_time", flattenBigqueryReservationCapacityCommitmentCommitmentStartTime(res["commitmentStartTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + if err := d.Set("commitment_end_time", flattenBigqueryReservationCapacityCommitmentCommitmentEndTime(res["commitmentEndTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + if err := d.Set("renewal_plan", flattenBigqueryReservationCapacityCommitmentRenewalPlan(res["renewalPlan"], d, config)); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + if err := d.Set("edition", flattenBigqueryReservationCapacityCommitmentEdition(res["edition"], d, config)); err != nil { + return fmt.Errorf("Error reading CapacityCommitment: %s", err) + } + + return nil +} + +func resourceBigqueryReservationCapacityCommitmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CapacityCommitment: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + planProp, err := expandBigqueryReservationCapacityCommitmentPlan(d.Get("plan"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("plan"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, planProp)) { + obj["plan"] = planProp + } + renewalPlanProp, err := expandBigqueryReservationCapacityCommitmentRenewalPlan(d.Get("renewal_plan"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("renewal_plan"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, renewalPlanProp)) { + obj["renewalPlan"] = renewalPlanProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating CapacityCommitment %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("plan") { + updateMask = append(updateMask, "plan") + } + + if d.HasChange("renewal_plan") { + updateMask = append(updateMask, "renewalPlan") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating CapacityCommitment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating CapacityCommitment %q: %#v", d.Id(), res) + } + + return resourceBigqueryReservationCapacityCommitmentRead(d, meta) +} + +func resourceBigqueryReservationCapacityCommitmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CapacityCommitment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting CapacityCommitment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "CapacityCommitment") + } + + log.Printf("[DEBUG] Finished deleting CapacityCommitment %q: %#v", d.Id(), res) + return nil +} + +func resourceBigqueryReservationCapacityCommitmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/capacityCommitments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBigqueryReservationCapacityCommitmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationCapacityCommitmentSlotCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigqueryReservationCapacityCommitmentPlan(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationCapacityCommitmentState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationCapacityCommitmentCommitmentStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationCapacityCommitmentCommitmentEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationCapacityCommitmentRenewalPlan(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationCapacityCommitmentEdition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBigqueryReservationCapacityCommitmentSlotCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationCapacityCommitmentPlan(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationCapacityCommitmentRenewalPlan(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationCapacityCommitmentEdition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_capacity_commitment_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_capacity_commitment_sweeper.go new file mode 100644 index 0000000000..e851907270 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_capacity_commitment_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryreservation + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BigqueryReservationCapacityCommitment", testSweepBigqueryReservationCapacityCommitment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBigqueryReservationCapacityCommitment(region string) error { + resourceName := "BigqueryReservationCapacityCommitment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://bigqueryreservation.googleapis.com/v1/projects/{{project}}/locations/{{location}}/capacityCommitments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["capacityCommitments"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://bigqueryreservation.googleapis.com/v1/projects/{{project}}/locations/{{location}}/capacityCommitments/{{capacity_commitment_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation.go new file mode 100644 index 0000000000..5f90fc14fa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation.go @@ -0,0 +1,597 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigqueryreservation + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceBigqueryReservationReservation() *schema.Resource { + return &schema.Resource{ + Create: resourceBigqueryReservationReservationCreate, + Read: resourceBigqueryReservationReservationRead, + Update: resourceBigqueryReservationReservationUpdate, + Delete: resourceBigqueryReservationReservationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigqueryReservationReservationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the reservation. This field must only contain alphanumeric characters or dash.`, + }, + "slot_capacity": { + Type: schema.TypeInt, + Required: true, + Description: `Minimum slots available to this reservation. A slot is a unit of computational power in BigQuery, and serves as the +unit of parallelism. Queries using this reservation might use more slots during runtime if ignoreIdleSlots is set to false.`, + }, + "autoscale": { + Type: schema.TypeList, + Optional: true, + Description: `The configuration parameters for the auto scaling feature.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_slots": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of slots to be scaled when needed.`, + }, + "current_slots": { + Type: schema.TypeInt, + Computed: true, + Description: `The slot capacity added to this reservation when autoscale happens. Will be between [0, max_slots].`, + }, + }, + }, + }, + "concurrency": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum number of queries that are allowed to run concurrently in this reservation. This is a soft limit due to asynchronous nature of the system and various optimizations for small queries. Default value is 0 which means that concurrency will be automatically set based on the reservation size.`, + Default: 0, + }, + "edition": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The edition type. Valid values are STANDARD, ENTERPRISE, ENTERPRISE_PLUS`, + }, + "ignore_idle_slots": { + Type: schema.TypeBool, + Optional: true, + Description: `If false, any query using this reservation will use idle slots from other reservations within +the same admin project. If true, a query using this reservation will execute with the slot +capacity specified above at most.`, + Default: false, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The geographic location where the transfer config should reside. +Examples: US, EU, asia-northeast1. The default value is US.`, + Default: "US", + }, + "multi_region_auxiliary": { + Type: schema.TypeBool, + Optional: true, + Description: `Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). +If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigqueryReservationReservationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + slotCapacityProp, err := expandBigqueryReservationReservationSlotCapacity(d.Get("slot_capacity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("slot_capacity"); !tpgresource.IsEmptyValue(reflect.ValueOf(slotCapacityProp)) && (ok || !reflect.DeepEqual(v, slotCapacityProp)) { + obj["slotCapacity"] = slotCapacityProp + } + ignoreIdleSlotsProp, err := expandBigqueryReservationReservationIgnoreIdleSlots(d.Get("ignore_idle_slots"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ignore_idle_slots"); !tpgresource.IsEmptyValue(reflect.ValueOf(ignoreIdleSlotsProp)) && (ok || !reflect.DeepEqual(v, ignoreIdleSlotsProp)) { + obj["ignoreIdleSlots"] = ignoreIdleSlotsProp + } + concurrencyProp, err := expandBigqueryReservationReservationConcurrency(d.Get("concurrency"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("concurrency"); !tpgresource.IsEmptyValue(reflect.ValueOf(concurrencyProp)) && (ok || !reflect.DeepEqual(v, concurrencyProp)) { + obj["concurrency"] = concurrencyProp + } + multiRegionAuxiliaryProp, err := expandBigqueryReservationReservationMultiRegionAuxiliary(d.Get("multi_region_auxiliary"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("multi_region_auxiliary"); !tpgresource.IsEmptyValue(reflect.ValueOf(multiRegionAuxiliaryProp)) && (ok || !reflect.DeepEqual(v, multiRegionAuxiliaryProp)) { + obj["multiRegionAuxiliary"] = multiRegionAuxiliaryProp + } + editionProp, err := expandBigqueryReservationReservationEdition(d.Get("edition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("edition"); !tpgresource.IsEmptyValue(reflect.ValueOf(editionProp)) && (ok || !reflect.DeepEqual(v, editionProp)) { + obj["edition"] = editionProp + } + autoscaleProp, err := expandBigqueryReservationReservationAutoscale(d.Get("autoscale"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("autoscale"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoscaleProp)) && (ok || !reflect.DeepEqual(v, autoscaleProp)) { + obj["autoscale"] = autoscaleProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations?reservationId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Reservation: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Reservation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Reservation: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/reservations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Reservation %q: %#v", d.Id(), res) + + return resourceBigqueryReservationReservationRead(d, meta) +} + +func resourceBigqueryReservationReservationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Reservation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigqueryReservationReservation %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + + if err := d.Set("slot_capacity", flattenBigqueryReservationReservationSlotCapacity(res["slotCapacity"], d, config)); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + if err := d.Set("ignore_idle_slots", flattenBigqueryReservationReservationIgnoreIdleSlots(res["ignoreIdleSlots"], d, config)); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + if err := d.Set("concurrency", flattenBigqueryReservationReservationConcurrency(res["concurrency"], d, config)); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + if err := d.Set("multi_region_auxiliary", flattenBigqueryReservationReservationMultiRegionAuxiliary(res["multiRegionAuxiliary"], d, config)); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + if err := d.Set("edition", flattenBigqueryReservationReservationEdition(res["edition"], d, config)); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + if err := d.Set("autoscale", flattenBigqueryReservationReservationAutoscale(res["autoscale"], d, config)); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + + return nil +} + +func resourceBigqueryReservationReservationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Reservation: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + slotCapacityProp, err := expandBigqueryReservationReservationSlotCapacity(d.Get("slot_capacity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("slot_capacity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, slotCapacityProp)) { + obj["slotCapacity"] = slotCapacityProp + } + ignoreIdleSlotsProp, err := expandBigqueryReservationReservationIgnoreIdleSlots(d.Get("ignore_idle_slots"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ignore_idle_slots"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ignoreIdleSlotsProp)) { + obj["ignoreIdleSlots"] = ignoreIdleSlotsProp + } + concurrencyProp, err := expandBigqueryReservationReservationConcurrency(d.Get("concurrency"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("concurrency"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, concurrencyProp)) { + obj["concurrency"] = concurrencyProp + } + multiRegionAuxiliaryProp, err := expandBigqueryReservationReservationMultiRegionAuxiliary(d.Get("multi_region_auxiliary"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("multi_region_auxiliary"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, multiRegionAuxiliaryProp)) { + obj["multiRegionAuxiliary"] = multiRegionAuxiliaryProp + } + autoscaleProp, err := expandBigqueryReservationReservationAutoscale(d.Get("autoscale"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("autoscale"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscaleProp)) { + obj["autoscale"] = autoscaleProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Reservation %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("slot_capacity") { + updateMask = append(updateMask, "slotCapacity") + } + + if d.HasChange("ignore_idle_slots") { + updateMask = append(updateMask, "ignoreIdleSlots") + } + + if d.HasChange("concurrency") { + updateMask = append(updateMask, "concurrency") + } + + if d.HasChange("multi_region_auxiliary") { + updateMask = append(updateMask, "multiRegionAuxiliary") + } + + if d.HasChange("autoscale") { + updateMask = append(updateMask, "autoscale") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Reservation %q: %#v", d.Id(), res) + } + + return resourceBigqueryReservationReservationRead(d, meta) +} + +func resourceBigqueryReservationReservationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Reservation: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Reservation %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Reservation") + } + + log.Printf("[DEBUG] Finished deleting Reservation %q: %#v", d.Id(), res) + return nil +} + +func resourceBigqueryReservationReservationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/reservations/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/reservations/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBigqueryReservationReservationSlotCapacity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigqueryReservationReservationIgnoreIdleSlots(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationReservationConcurrency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigqueryReservationReservationMultiRegionAuxiliary(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationReservationEdition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryReservationReservationAutoscale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["current_slots"] = + flattenBigqueryReservationReservationAutoscaleCurrentSlots(original["currentSlots"], d, config) + transformed["max_slots"] = + flattenBigqueryReservationReservationAutoscaleMaxSlots(original["maxSlots"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryReservationReservationAutoscaleCurrentSlots(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenBigqueryReservationReservationAutoscaleMaxSlots(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandBigqueryReservationReservationSlotCapacity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationReservationIgnoreIdleSlots(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationReservationConcurrency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationReservationMultiRegionAuxiliary(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationReservationEdition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationReservationAutoscale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCurrentSlots, err := expandBigqueryReservationReservationAutoscaleCurrentSlots(original["current_slots"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCurrentSlots); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["currentSlots"] = transformedCurrentSlots + } + + transformedMaxSlots, err := expandBigqueryReservationReservationAutoscaleMaxSlots(original["max_slots"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxSlots); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxSlots"] = transformedMaxSlots + } + + return transformed, nil +} + +func expandBigqueryReservationReservationAutoscaleCurrentSlots(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryReservationReservationAutoscaleMaxSlots(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_reservation_assignment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation_assignment.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_reservation_assignment.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation_assignment.go index 352eb1eccd..b0bb5d2fd3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigquery_reservation_assignment.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation_assignment.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package bigqueryreservation import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" bigqueryreservation "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceBigqueryReservationAssignment() *schema.Resource { @@ -47,7 +54,7 @@ func ResourceBigqueryReservationAssignment() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The resource which will use the reservation. E.g. projects/myproject, folders/123, organizations/456.", }, @@ -62,7 +69,7 @@ func ResourceBigqueryReservationAssignment() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The reservation for the resource", }, @@ -79,7 +86,7 @@ func ResourceBigqueryReservationAssignment() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -99,8 +106,8 @@ func ResourceBigqueryReservationAssignment() *schema.Resource { } func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -118,18 +125,18 @@ func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta in return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLBigqueryReservationClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLBigqueryReservationClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -162,8 +169,8 @@ func resourceBigqueryReservationAssignmentCreate(d *schema.ResourceData, meta in } func resourceBigqueryReservationAssignmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -177,17 +184,17 @@ func resourceBigqueryReservationAssignmentRead(d *schema.ResourceData, meta inte Name: dcl.StringOrNil(d.Get("name").(string)), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLBigqueryReservationClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLBigqueryReservationClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -196,7 +203,7 @@ func resourceBigqueryReservationAssignmentRead(d *schema.ResourceData, meta inte res, err := client.GetAssignment(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("BigqueryReservationAssignment %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("assignee", res.Assignee); err != nil { @@ -225,8 +232,8 @@ func resourceBigqueryReservationAssignmentRead(d *schema.ResourceData, meta inte } func resourceBigqueryReservationAssignmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -241,17 +248,17 @@ func resourceBigqueryReservationAssignmentDelete(d *schema.ResourceData, meta in } log.Printf("[DEBUG] Deleting Assignment %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLBigqueryReservationClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLBigqueryReservationClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -266,9 +273,9 @@ func resourceBigqueryReservationAssignmentDelete(d *schema.ResourceData, meta in } func resourceBigqueryReservationAssignmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/reservations/(?P[^/]+)/assignments/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -277,7 +284,7 @@ func resourceBigqueryReservationAssignmentImport(d *schema.ResourceData, meta in } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/reservations/{{reservation}}/assignments/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/reservations/{{reservation}}/assignments/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation_sweeper.go new file mode 100644 index 0000000000..85e531d1b3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation/resource_bigquery_reservation_sweeper.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigqueryreservation + +import ( + "context" + "log" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// This will sweep BigqueryReservation Reservation and Assignment resources +func init() { + sweeper.AddTestSweepers("BigqueryReservation", testSweepBigqueryReservation) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBigqueryReservation(region string) error { + resourceName := "BigqueryReservation" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + servicesUrl := config.BigqueryReservationBasePath + "projects/" + config.Project + "/locations/" + region + "/reservations" + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: servicesUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", servicesUrl, err) + return nil + } + + resourceList, ok := res["reservations"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + reservationName := obj["name"].(string) + reservationNameParts := strings.Split(reservationName, "/") + reservationShortName := reservationNameParts[len(reservationNameParts)-1] + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(reservationShortName) { + nonPrefixCount++ + continue + } + + deleteAllAssignments(config, reservationName) + + deleteUrl := servicesUrl + "/" + reservationShortName + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, reservationShortName) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf-test prefix remain.", nonPrefixCount) + } + + return nil +} + +func deleteAllAssignments(config *transport_tpg.Config, reservationName string) { + assignmentListUrl := config.BigqueryReservationBasePath + reservationName + "/assignments" + + assignmentRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: assignmentListUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", assignmentListUrl, err) + return + } + + assignmentList, ok := assignmentRes["assignments"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in assignment response.") + return + } + + al := assignmentList.([]interface{}) + + for _, ri := range al { + obj := ri.(map[string]interface{}) + name := obj["name"].(string) + + deleteUrl := config.BigqueryReservationBasePath + name + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for bigquery reservation assignment resource: %s", name) + } + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigtable_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_instance.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigtable_instance.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_instance.go index 9f716fa049..4d62d15206 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigtable_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_instance.go @@ -1,8 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable import ( "fmt" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/bigtableadmin/v2" "github.com/hashicorp/errwrap" @@ -27,12 +32,12 @@ var IamBigtableInstanceSchema = map[string]*schema.Schema{ type BigtableInstanceIamUpdater struct { project string instance string - d TerraformResourceData - Config *Config + d tpgresource.TerraformResourceData + Config *transport_tpg.Config } -func NewBigtableInstanceUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) +func NewBigtableInstanceUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } @@ -49,8 +54,8 @@ func NewBigtableInstanceUpdater(d TerraformResourceData, config *Config) (Resour }, nil } -func BigtableInstanceIdParseFunc(d *schema.ResourceData, config *Config) error { - fv, err := parseProjectFieldValue("instances", d.Id(), "project", d, config, false) +func BigtableInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseProjectFieldValue("instances", d.Id(), "project", d, config, false) if err != nil { return err } @@ -70,7 +75,7 @@ func BigtableInstanceIdParseFunc(d *schema.ResourceData, config *Config) error { func (u *BigtableInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { req := &bigtableadmin.GetIamPolicyRequest{} - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return nil, err } @@ -96,7 +101,7 @@ func (u *BigtableInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcem req := &bigtableadmin.SetIamPolicyRequest{Policy: bigtablePolicy} - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return err } @@ -123,7 +128,7 @@ func (u *BigtableInstanceIamUpdater) DescribeResource() string { func resourceManagerToBigtablePolicy(p *cloudresourcemanager.Policy) (*bigtableadmin.Policy, error) { out := &bigtableadmin.Policy{} - err := Convert(p, out) + err := tpgresource.Convert(p, out) if err != nil { return nil, errwrap.Wrapf("Cannot convert a bigtable policy to a cloudresourcemanager policy: {{err}}", err) } @@ -132,7 +137,7 @@ func resourceManagerToBigtablePolicy(p *cloudresourcemanager.Policy) (*bigtablea func bigtableToResourceManagerPolicy(p *bigtableadmin.Policy) (*cloudresourcemanager.Policy, error) { out := &cloudresourcemanager.Policy{} - err := Convert(p, out) + err := tpgresource.Convert(p, out) if err != nil { return nil, errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a bigtable policy: {{err}}", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigtable_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_table.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigtable_table.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_table.go index 04a01939bf..73c7e5d0ab 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_bigtable_table.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/iam_bigtable_table.go @@ -1,8 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable import ( "fmt" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/bigtableadmin/v2" "github.com/hashicorp/errwrap" @@ -33,12 +38,12 @@ type BigtableTableIamUpdater struct { project string instance string table string - d TerraformResourceData - Config *Config + d tpgresource.TerraformResourceData + Config *transport_tpg.Config } -func NewBigtableTableUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) +func NewBigtableTableUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } @@ -56,15 +61,15 @@ func NewBigtableTableUpdater(d TerraformResourceData, config *Config) (ResourceI }, nil } -func BigtableTableIdParseFunc(d *schema.ResourceData, config *Config) error { +func BigtableTableIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { values := make(map[string]string) - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P[^/]+)"}, d, config, d.Id()) + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P
[^/]+)"}, d, config, d.Id()) if err != nil { return err } - project, _ := getProject(d, config) + project, _ := tpgresource.GetProject(d, config) for k, v := range m { values[k] = v @@ -90,7 +95,7 @@ func BigtableTableIdParseFunc(d *schema.ResourceData, config *Config) error { func (u *BigtableTableIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { req := &bigtableadmin.GetIamPolicyRequest{} - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return nil, err } @@ -116,7 +121,7 @@ func (u *BigtableTableIamUpdater) SetResourceIamPolicy(policy *cloudresourcemana req := &bigtableadmin.SetIamPolicyRequest{Policy: bigtablePolicy} - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_app_profile.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_app_profile.go new file mode 100644 index 0000000000..64d2dd6325 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_app_profile.go @@ -0,0 +1,556 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package bigtable + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/bigtableadmin/v2" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceBigtableAppProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceBigtableAppProfileCreate, + Read: resourceBigtableAppProfileRead, + Update: resourceBigtableAppProfileUpdate, + Delete: resourceBigtableAppProfileDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBigtableAppProfileImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "app_profile_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The unique name of the app profile in the form '[_a-zA-Z0-9][-_.a-zA-Z0-9]*'.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Long form description of the use case for this app profile.`, + }, + "ignore_warnings": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, ignore safety checks when deleting/updating the app profile.`, + Default: false, + }, + "instance": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The name of the instance to create the app profile within.`, + }, + "multi_cluster_routing_use_any": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is available +in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes +consistency to improve availability.`, + ExactlyOneOf: []string{"single_cluster_routing", "multi_cluster_routing_use_any"}, + }, + "single_cluster_routing": { + Type: schema.TypeList, + Optional: true, + Description: `Use a single-cluster routing policy.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + Description: `The cluster to which read/write requests should be routed.`, + }, + "allow_transactional_writes": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, CheckAndMutateRow and ReadModifyWriteRow requests are allowed by this app profile. +It is unsafe to send these requests to the same table/row/column in multiple clusters.`, + }, + }, + }, + ExactlyOneOf: []string{"single_cluster_routing", "multi_cluster_routing_use_any"}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique name of the requested app profile. Values are of the form 'projects//instances//appProfiles/'.`, + }, + "multi_cluster_routing_cluster_ids": { + Type: schema.TypeList, + Optional: true, + Description: `The set of clusters to route to. The order is ignored; clusters will be tried in order of distance. If left empty, all clusters are eligible.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ConflictsWith: []string{"single_cluster_routing"}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigtableAppProfileCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandBigtableAppProfileDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + multiClusterRoutingUseAnyProp, err := expandBigtableAppProfileMultiClusterRoutingUseAny(d.Get("multi_cluster_routing_use_any"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("multi_cluster_routing_use_any"); !tpgresource.IsEmptyValue(reflect.ValueOf(multiClusterRoutingUseAnyProp)) && (ok || !reflect.DeepEqual(v, multiClusterRoutingUseAnyProp)) { + obj["multiClusterRoutingUseAny"] = multiClusterRoutingUseAnyProp + } + singleClusterRoutingProp, err := expandBigtableAppProfileSingleClusterRouting(d.Get("single_cluster_routing"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("single_cluster_routing"); !tpgresource.IsEmptyValue(reflect.ValueOf(singleClusterRoutingProp)) && (ok || !reflect.DeepEqual(v, singleClusterRoutingProp)) { + obj["singleClusterRouting"] = singleClusterRoutingProp + } + + obj, err = resourceBigtableAppProfileEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles?appProfileId={{app_profile_id}}&ignoreWarnings={{ignore_warnings}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AppProfile: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppProfile: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AppProfile: %s", err) + } + if err := d.Set("name", flattenBigtableAppProfileName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating AppProfile %q: %#v", d.Id(), res) + + return resourceBigtableAppProfileRead(d, meta) +} + +func resourceBigtableAppProfileRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppProfile: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigtableAppProfile %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AppProfile: %s", err) + } + + if err := d.Set("name", flattenBigtableAppProfileName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading AppProfile: %s", err) + } + if err := d.Set("description", flattenBigtableAppProfileDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading AppProfile: %s", err) + } + if err := d.Set("multi_cluster_routing_use_any", flattenBigtableAppProfileMultiClusterRoutingUseAny(res["multiClusterRoutingUseAny"], d, config)); err != nil { + return fmt.Errorf("Error reading AppProfile: %s", err) + } + if err := d.Set("single_cluster_routing", flattenBigtableAppProfileSingleClusterRouting(res["singleClusterRouting"], d, config)); err != nil { + return fmt.Errorf("Error reading AppProfile: %s", err) + } + + return nil +} + +func resourceBigtableAppProfileUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppProfile: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandBigtableAppProfileDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + multiClusterRoutingUseAnyProp, err := expandBigtableAppProfileMultiClusterRoutingUseAny(d.Get("multi_cluster_routing_use_any"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("multi_cluster_routing_use_any"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, multiClusterRoutingUseAnyProp)) { + obj["multiClusterRoutingUseAny"] = multiClusterRoutingUseAnyProp + } + singleClusterRoutingProp, err := expandBigtableAppProfileSingleClusterRouting(d.Get("single_cluster_routing"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("single_cluster_routing"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, singleClusterRoutingProp)) { + obj["singleClusterRouting"] = singleClusterRoutingProp + } + + obj, err = resourceBigtableAppProfileEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}?ignoreWarnings={{ignore_warnings}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AppProfile %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("multi_cluster_routing_use_any") { + updateMask = append(updateMask, "multiClusterRoutingUseAny") + } + + if d.HasChange("single_cluster_routing") { + updateMask = append(updateMask, "singleClusterRouting") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + if d.HasChange("multi_cluster_routing_cluster_ids") && !tpgresource.StringInSlice(updateMask, "multiClusterRoutingUseAny") { + updateMask = append(updateMask, "multiClusterRoutingUseAny") + } + + // this api requires the body to define something for all values passed into + // the update mask, however, multi-cluster routing and single-cluster routing + // are conflicting, so we can't have them both in the update mask, despite + // both of them registering as changing. thus, we need to remove whichever + // one is not defined. + newRouting, oldRouting := d.GetChange("multi_cluster_routing_use_any") + if newRouting != oldRouting { + for i, val := range updateMask { + if val == "multiClusterRoutingUseAny" && newRouting.(bool) || + val == "singleClusterRouting" && oldRouting.(bool) { + updateMask = append(updateMask[0:i], updateMask[i+1:]...) + break + } + } + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AppProfile %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AppProfile %q: %#v", d.Id(), res) + } + + return resourceBigtableAppProfileRead(d, meta) +} + +func resourceBigtableAppProfileDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AppProfile: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}?ignoreWarnings={{ignore_warnings}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AppProfile %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AppProfile") + } + + log.Printf("[DEBUG] Finished deleting AppProfile %q: %#v", d.Id(), res) + return nil +} + +func resourceBigtableAppProfileImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)/appProfiles/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBigtableAppProfileName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigtableAppProfileDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigtableAppProfileMultiClusterRoutingUseAny(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return false + } + + if v.(map[string]interface{})["clusterIds"] == nil { + return true + } + + if len(v.(map[string]interface{})["clusterIds"].([]interface{})) > 0 { + if err := d.Set("multi_cluster_routing_cluster_ids", v.(map[string]interface{})["clusterIds"]); err != nil { + return true + } + } + + return true +} + +func flattenBigtableAppProfileSingleClusterRouting(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cluster_id"] = + flattenBigtableAppProfileSingleClusterRoutingClusterId(original["clusterId"], d, config) + transformed["allow_transactional_writes"] = + flattenBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(original["allowTransactionalWrites"], d, config) + return []interface{}{transformed} +} +func flattenBigtableAppProfileSingleClusterRoutingClusterId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBigtableAppProfileDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigtableAppProfileMultiClusterRoutingUseAny(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil || !v.(bool) { + return nil, nil + } + + obj := bigtableadmin.MultiClusterRoutingUseAny{} + + clusterIds := d.Get("multi_cluster_routing_cluster_ids").([]interface{}) + + for _, id := range clusterIds { + obj.ClusterIds = append(obj.ClusterIds, id.(string)) + } + + return obj, nil +} + +func expandBigtableAppProfileSingleClusterRouting(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedClusterId, err := expandBigtableAppProfileSingleClusterRoutingClusterId(original["cluster_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClusterId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clusterId"] = transformedClusterId + } + + transformedAllowTransactionalWrites, err := expandBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(original["allow_transactional_writes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowTransactionalWrites); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowTransactionalWrites"] = transformedAllowTransactionalWrites + } + + return transformed, nil +} + +func expandBigtableAppProfileSingleClusterRoutingClusterId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceBigtableAppProfileEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Instance is a URL parameter only, so replace self-link/path with resource name only. + if err := d.Set("instance", tpgresource.GetResourceNameFromSelfLink(d.Get("instance").(string))); err != nil { + return nil, fmt.Errorf("Error setting instance: %s", err) + } + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_gc_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_gc_policy.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_gc_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_gc_policy.go index 595fc7cfd3..d54fd84f83 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_gc_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_gc_policy.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable import ( "context" @@ -12,6 +14,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) const ( @@ -19,7 +25,7 @@ const ( GCPolicyModeUnion = "UNION" ) -func resourceBigtableGCPolicyCustomizeDiffFunc(diff TerraformResourceDiff) error { +func resourceBigtableGCPolicyCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { count := diff.Get("max_age.#").(int) if count < 1 { return nil @@ -65,12 +71,17 @@ func ResourceBigtableGCPolicy() *schema.Resource { Update: resourceBigtableGCPolicyUpsert, CustomizeDiff: resourceBigtableGCPolicyCustomizeDiff, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + Schema: map[string]*schema.Schema{ "instance_name": { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareResourceNames, + DiffSuppressFunc: tpgresource.CompareResourceNames, Description: `The name of the Bigtable instance.`, }, @@ -131,7 +142,7 @@ func ResourceBigtableGCPolicy() *schema.Resource { Computed: true, ForceNew: true, Description: `Duration before applying GC policy`, - ValidateFunc: validateDuration(), + ValidateFunc: verify.ValidateDuration(), ExactlyOneOf: []string{"max_age.0.days", "max_age.0.duration"}, }, }, @@ -179,20 +190,20 @@ func ResourceBigtableGCPolicy() *schema.Resource { } func resourceBigtableGCPolicyUpsert(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } ctx := context.Background() - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) if err != nil { return fmt.Errorf("Error starting admin client. %s", err) @@ -211,9 +222,9 @@ func resourceBigtableGCPolicyUpsert(d *schema.ResourceData, meta interface{}) er tableName := d.Get("table").(string) columnFamily := d.Get("column_family").(string) - retryFunc := func() (interface{}, error) { + retryFunc := func() error { reqErr := c.SetGCPolicy(ctx, tableName, columnFamily, gcPolicy) - return "", reqErr + return reqErr } // The default create timeout is 20 minutes. timeout := d.Timeout(schema.TimeoutCreate) @@ -221,7 +232,12 @@ func resourceBigtableGCPolicyUpsert(d *schema.ResourceData, meta interface{}) er // Mutations to gc policies can only happen one-at-a-time and take some amount of time. // Use a fixed polling rate of 30s based on the RetryInfo returned by the server rather than // the standard up-to-10s exponential backoff for those operations. - _, err = retryWithPolling(retryFunc, timeout, pollInterval, isBigTableRetryableError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: retryFunc, + Timeout: timeout, + PollInterval: pollInterval, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigTableRetryableError}, + }) if err != nil { return err } @@ -241,19 +257,19 @@ func resourceBigtableGCPolicyUpsert(d *schema.ResourceData, meta interface{}) er } func resourceBigtableGCPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } ctx := context.Background() - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) if err != nil { return fmt.Errorf("Error starting admin client. %s", err) @@ -265,7 +281,7 @@ func resourceBigtableGCPolicyRead(d *schema.ResourceData, meta interface{}) erro columnFamily := d.Get("column_family").(string) ti, err := c.TableInfo(ctx, name) if err != nil { - if isNotFoundGrpcError(err) { + if tpgresource.IsNotFoundGrpcError(err) { log.Printf("[WARN] Removing the GC policy because the parent table %s is gone", name) d.SetId("") return nil @@ -289,7 +305,7 @@ func resourceBigtableGCPolicyRead(d *schema.ResourceData, meta interface{}) erro maxAge := d.Get("max_age") maxVersion := d.Get("max_version") if d.Get("mode") == "" && len(maxAge.([]interface{})) == 0 && len(maxVersion.([]interface{})) == 0 { - gcRuleString, err := gcPolicyToGCRuleString(fi.FullGCPolicy, true) + gcRuleString, err := GcPolicyToGCRuleString(fi.FullGCPolicy, true) if err != nil { return err } @@ -310,7 +326,7 @@ func resourceBigtableGCPolicyRead(d *schema.ResourceData, meta interface{}) erro } // Recursively convert Bigtable GC policy to JSON format in a map. -func gcPolicyToGCRuleString(gc bigtable.GCPolicy, topLevel bool) (map[string]interface{}, error) { +func GcPolicyToGCRuleString(gc bigtable.GCPolicy, topLevel bool) (map[string]interface{}, error) { result := make(map[string]interface{}) switch bigtable.GetPolicyType(gc) { case bigtable.PolicyMaxAge: @@ -344,7 +360,7 @@ func gcPolicyToGCRuleString(gc bigtable.GCPolicy, topLevel bool) (map[string]int result["mode"] = "union" rules := []interface{}{} for _, c := range gc.(bigtable.UnionGCPolicy).Children { - gcRuleString, err := gcPolicyToGCRuleString(c, false) + gcRuleString, err := GcPolicyToGCRuleString(c, false) if err != nil { return nil, err } @@ -356,7 +372,7 @@ func gcPolicyToGCRuleString(gc bigtable.GCPolicy, topLevel bool) (map[string]int result["mode"] = "intersection" rules := []interface{}{} for _, c := range gc.(bigtable.IntersectionGCPolicy).Children { - gcRuleString, err := gcPolicyToGCRuleString(c, false) + gcRuleString, err := GcPolicyToGCRuleString(c, false) if err != nil { return nil, err } @@ -375,7 +391,7 @@ func gcPolicyToGCRuleString(gc bigtable.GCPolicy, topLevel bool) (map[string]int } func resourceBigtableGCPolicyDestroy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { // Allows for the GC policy to be abandoned without deletion to avoid possible @@ -384,18 +400,18 @@ func resourceBigtableGCPolicyDestroy(d *schema.ResourceData, meta interface{}) e return nil } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } ctx := context.Background() - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) if err != nil { return fmt.Errorf("Error starting admin client. %s", err) @@ -403,14 +419,19 @@ func resourceBigtableGCPolicyDestroy(d *schema.ResourceData, meta interface{}) e defer c.Close() - retryFunc := func() (interface{}, error) { + retryFunc := func() error { reqErr := c.SetGCPolicy(ctx, d.Get("table").(string), d.Get("column_family").(string), bigtable.NoGcPolicy()) - return "", reqErr + return reqErr } // The default delete timeout is 20 minutes. timeout := d.Timeout(schema.TimeoutDelete) pollInterval := time.Duration(30) * time.Second - _, err = retryWithPolling(retryFunc, timeout, pollInterval, isBigTableRetryableError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: retryFunc, + Timeout: timeout, + PollInterval: pollInterval, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsBigTableRetryableError}, + }) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_instance.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance.go index 8c6a8d2397..2b95fc0a89 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance.go @@ -1,14 +1,21 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable import ( "context" "fmt" "log" + "strings" + "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "cloud.google.com/go/bigtable" ) @@ -23,15 +30,21 @@ func ResourceBigtableInstance() *schema.Resource { State: resourceBigtableInstanceImport, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + }, + CustomizeDiff: customdiff.All( resourceBigtableInstanceClusterReorderTypeList, + resourceBigtableInstanceUniqueClusterID, ), SchemaVersion: 1, StateUpgraders: []schema.StateUpgrader{ { Type: resourceBigtableInstanceResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceBigtableInstanceUpgradeV0, + Upgrade: ResourceBigtableInstanceUpgradeV0, Version: 0, }, }, @@ -69,7 +82,7 @@ func ResourceBigtableInstance() *schema.Resource { // so mark as computed. Computed: true, ValidateFunc: validation.IntAtLeast(1), - Description: `The number of nodes in your Cloud Bigtable cluster. Required, with a minimum of 1 for each cluster in an instance.`, + Description: `The number of nodes in the cluster. If no value is set, Cloud Bigtable automatically allocates nodes based on your data footprint and optimized for 50% storage utilization.`, }, "storage_type": { Type: schema.TypeString, @@ -161,15 +174,15 @@ func ResourceBigtableInstance() *schema.Resource { } func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } ctx := context.Background() - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -185,7 +198,7 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er conf.DisplayName = displayName.(string) if _, ok := d.GetOk("labels"); ok { - conf.Labels = expandLabels(d) + conf.Labels = tpgresource.ExpandLabels(d) } switch d.Get("instance_type").(string) { @@ -207,12 +220,13 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er defer c.Close() - err = c.CreateInstanceWithClusters(ctx, conf) - if err != nil { + ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutCreate)) + defer cancel() + if err := c.CreateInstanceWithClusters(ctxWithTimeout, conf); err != nil { return fmt.Errorf("Error creating instance. %s", err) } - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -222,14 +236,14 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er } func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } ctx := context.Background() - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -245,7 +259,7 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro instance, err := c.InstanceInfo(ctx, instanceName) if err != nil { - if isNotFoundGrpcError(err) { + if tpgresource.IsNotFoundGrpcError(err) { log.Printf("[WARN] Removing %s because it's gone", instanceName) d.SetId("") return nil @@ -259,7 +273,17 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro clusters, err := c.Clusters(ctx, instance.Name) if err != nil { - return fmt.Errorf("Error retrieving instance clusters. %s", err) + partiallyUnavailableErr, ok := err.(bigtable.ErrPartiallyUnavailable) + + if !ok { + return fmt.Errorf("Error retrieving instance clusters. %s", err) + } + + unavailableClusterZones := getUnavailableClusterZones(d.Get("cluster").([]interface{}), partiallyUnavailableErr.Locations) + + if len(unavailableClusterZones) > 0 { + return fmt.Errorf("Error retrieving instance clusters. The following zones are unavailable: %s", strings.Join(unavailableClusterZones, ", ")) + } } clustersNewState := []map[string]interface{}{} @@ -289,14 +313,14 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro } func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } ctx := context.Background() - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -318,7 +342,7 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er conf.DisplayName = displayName.(string) if d.HasChange("labels") { - conf.Labels = expandLabels(d) + conf.Labels = tpgresource.ExpandLabels(d) } switch d.Get("instance_type").(string) { @@ -333,8 +357,9 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er return err } - _, err = bigtable.UpdateInstanceAndSyncClusters(ctx, c, conf) - if err != nil { + ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutUpdate)) + defer cancel() + if _, err := bigtable.UpdateInstanceAndSyncClusters(ctxWithTimeout, c, conf); err != nil { return fmt.Errorf("Error updating instance. %s", err) } @@ -345,15 +370,15 @@ func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) e if d.Get("deletion_protection").(bool) { return fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`") } - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } ctx := context.Background() - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -404,7 +429,24 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} { return cluster } -func expandBigtableClusters(clusters []interface{}, instanceID string, config *Config) ([]bigtable.ClusterConfig, error) { +func getUnavailableClusterZones(clusters []interface{}, unavailableZones []string) []string { + var zones []string + + for _, c := range clusters { + cluster := c.(map[string]interface{}) + zone := cluster["zone"].(string) + + for _, unavailableZone := range unavailableZones { + if zone == unavailableZone { + zones = append(zones, zone) + break + } + } + } + return zones +} + +func expandBigtableClusters(clusters []interface{}, instanceID string, config *transport_tpg.Config) ([]bigtable.ClusterConfig, error) { results := make([]bigtable.ClusterConfig, 0, len(clusters)) for _, c := range clusters { cluster := c.(map[string]interface{}) @@ -445,14 +487,31 @@ func expandBigtableClusters(clusters []interface{}, instanceID string, config *C // getBigtableZone reads the "zone" value from the given resource data and falls back // to provider's value if not given. If neither is provided, returns an error. -func getBigtableZone(z string, config *Config) (string, error) { +func getBigtableZone(z string, config *transport_tpg.Config) (string, error) { if z == "" { if config.Zone != "" { return config.Zone, nil } return "", fmt.Errorf("cannot determine zone: set in cluster.0.zone, or set provider-level zone") } - return GetResourceNameFromSelfLink(z), nil + return tpgresource.GetResourceNameFromSelfLink(z), nil +} + +// resourceBigtableInstanceUniqueClusterID asserts cluster ID uniqueness. +func resourceBigtableInstanceUniqueClusterID(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + _, newCount := diff.GetChange("cluster.#") + clusters := map[string]bool{} + + for i := 0; i < newCount.(int); i++ { + _, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) + clusterID := newId.(string) + if clusters[clusterID] { + return fmt.Errorf("duplicated cluster_id: %q", clusterID) + } + clusters[clusterID] = true + } + + return nil } // resourceBigtableInstanceClusterReorderTypeList causes the cluster block to @@ -576,8 +635,8 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch } func resourceBigtableInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -586,7 +645,7 @@ func resourceBigtableInstanceImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_instance_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance_migrate.go similarity index 92% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_instance_migrate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance_migrate.go index 6a3ca7d39b..f798f5075e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_bigtable_instance_migrate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance_migrate.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable import ( "context" @@ -72,7 +74,7 @@ func resourceBigtableInstanceResourceV0() *schema.Resource { } } -func resourceBigtableInstanceUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { +func ResourceBigtableInstanceUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { log.Printf("[DEBUG] Attributes before migration: %#v", rawState) rawState["deletion_protection"] = true diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance_sweeper.go new file mode 100644 index 0000000000..ffcd483200 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_instance_sweeper.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// This will sweep GCE Disk resources +func init() { + sweeper.AddTestSweepers("BigtableInstance", testSweepBigtableInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +// We don't have a way to filter the list by zone, and it's not clear it's worth the +// effort as we only create within us-central1. +func testSweepBigtableInstance(region string) error { + resourceName := "BigtableInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + servicesUrl := "https://bigtableadmin.googleapis.com/v2/projects/" + config.Project + "/instances" + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: servicesUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", servicesUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource id was nil", resourceName) + return nil + } + + id := obj["displayName"].(string) + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(id) { + nonPrefixCount++ + continue + } + + deleteUrl := servicesUrl + "/" + id + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, id) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf-test prefix remain.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_table.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_table.go new file mode 100644 index 0000000000..5272c99056 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/bigtable/resource_bigtable_table.go @@ -0,0 +1,403 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigtable + +import ( + "context" + "fmt" + "log" + "time" + + "cloud.google.com/go/bigtable" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceBigtableTable() *schema.Resource { + return &schema.Resource{ + Create: resourceBigtableTableCreate, + Read: resourceBigtableTableRead, + Update: resourceBigtableTableUpdate, + Delete: resourceBigtableTableDestroy, + + Importer: &schema.ResourceImporter{ + State: resourceBigtableTableImport, + }, + + // Set a longer timeout for table creation as adding column families can be slow. + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + }, + + // ---------------------------------------------------------------------- + // IMPORTANT: Do not add any additional ForceNew fields to this resource. + // Destroying/recreating tables can lead to data loss for users. + // ---------------------------------------------------------------------- + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the table. Must be 1-50 characters and must only contain hyphens, underscores, periods, letters and numbers.`, + }, + + "column_family": { + Type: schema.TypeSet, + Optional: true, + Description: `A group of columns within a table which share a common configuration. This can be specified multiple times.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "family": { + Type: schema.TypeString, + Required: true, + Description: `The name of the column family.`, + }, + }, + }, + }, + + "instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The name of the Bigtable instance.`, + }, + + "split_keys": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A list of predefined keys to split the table on. !> Warning: Modifying the split_keys of an existing table will cause Terraform to delete/recreate the entire google_bigtable_table resource.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "deletion_protection": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"PROTECTED", "UNPROTECTED"}, false), + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A field to make the table protected against data loss i.e. when set to PROTECTED, deleting the table, the column families in the table, and the instance containing the table would be prohibited. If not provided, currently deletion protection will be set to UNPROTECTED as it is the API default value.`, + }, + + "change_stream_retention": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: verify.ValidateDuration(), + Description: `Duration to retain change stream data for the table. Set to 0 to disable. Must be between 1 and 7 days.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + if err := d.Set("instance_name", instanceName); err != nil { + return fmt.Errorf("Error setting instance_name: %s", err) + } + + defer c.Close() + + tableId := d.Get("name").(string) + tblConf := bigtable.TableConf{TableID: tableId} + + // Check if deletion protection is given + // If not given, currently tblConf.DeletionProtection will be set to false in the API + deletionProtection := d.Get("deletion_protection") + if deletionProtection == "PROTECTED" { + tblConf.DeletionProtection = bigtable.Protected + } else if deletionProtection == "UNPROTECTED" { + tblConf.DeletionProtection = bigtable.Unprotected + } + + if changeStreamRetention, ok := d.GetOk("change_stream_retention"); ok { + tblConf.ChangeStreamRetention, err = time.ParseDuration(changeStreamRetention.(string)) + if err != nil { + return fmt.Errorf("Error parsing change stream retention: %s", err) + } + } + + // Set the split keys if given. + if v, ok := d.GetOk("split_keys"); ok { + tblConf.SplitKeys = tpgresource.ConvertStringArr(v.([]interface{})) + } + + // Set the column families if given. + columnFamilies := make(map[string]bigtable.GCPolicy) + if d.Get("column_family.#").(int) > 0 { + columns := d.Get("column_family").(*schema.Set).List() + + for _, co := range columns { + column := co.(map[string]interface{}) + + if v, ok := column["family"]; ok { + // By default, there is no GC rules. + columnFamilies[v.(string)] = bigtable.NoGcPolicy() + } + } + } + tblConf.Families = columnFamilies + + // This method may return before the table's creation is complete - we may need to wait until + // it exists in the future. + // Set a longer timeout as creating table and adding column families can be pretty slow. + ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutCreate)) + defer cancel() // Always call cancel. + err = c.CreateTableFromConf(ctxWithTimeout, &tblConf) + if err != nil { + return fmt.Errorf("Error creating table. %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceBigtableTableRead(d, meta) +} + +func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + name := d.Get("name").(string) + table, err := c.TableInfo(ctx, name) + if err != nil { + if tpgresource.IsNotFoundGrpcError(err) { + log.Printf("[WARN] Removing %s because it's gone", name) + d.SetId("") + return nil + } + return err + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("column_family", FlattenColumnFamily(table.Families)); err != nil { + return fmt.Errorf("Error setting column_family: %s", err) + } + + deletionProtection := table.DeletionProtection + if deletionProtection == bigtable.Protected { + if err := d.Set("deletion_protection", "PROTECTED"); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } else if deletionProtection == bigtable.Unprotected { + if err := d.Set("deletion_protection", "UNPROTECTED"); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } else { + return fmt.Errorf("Error setting deletion_protection, it should be either PROTECTED or UNPROTECTED") + } + + changeStreamRetention := table.ChangeStreamRetention + if changeStreamRetention != nil { + if err := d.Set("change_stream_retention", changeStreamRetention.(time.Duration).String()); err != nil { + return fmt.Errorf("Error setting change_stream_retention: %s", err) + } + } + + return nil +} + +func resourceBigtableTableUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + defer c.Close() + + o, n := d.GetChange("column_family") + oSet := o.(*schema.Set) + nSet := n.(*schema.Set) + name := d.Get("name").(string) + + // Add column families that are in new but not in old + for _, new := range nSet.Difference(oSet).List() { + column := new.(map[string]interface{}) + + if v, ok := column["family"]; ok { + log.Printf("[DEBUG] adding column family %q", v) + if err := c.CreateColumnFamily(ctx, name, v.(string)); err != nil { + return fmt.Errorf("Error creating column family %q: %s", v, err) + } + } + } + + // Remove column families that are in old but not in new + for _, old := range oSet.Difference(nSet).List() { + column := old.(map[string]interface{}) + + if v, ok := column["family"]; ok { + log.Printf("[DEBUG] removing column family %q", v) + if err := c.DeleteColumnFamily(ctx, name, v.(string)); err != nil { + return fmt.Errorf("Error deleting column family %q: %s", v, err) + } + } + } + + ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutCreate)) + defer cancel() + if d.HasChange("deletion_protection") { + deletionProtection := d.Get("deletion_protection") + if deletionProtection == "PROTECTED" { + if err := c.UpdateTableWithDeletionProtection(ctxWithTimeout, name, bigtable.Protected); err != nil { + return fmt.Errorf("Error updating deletion protection in table %v: %s", name, err) + } + } else if deletionProtection == "UNPROTECTED" { + if err := c.UpdateTableWithDeletionProtection(ctxWithTimeout, name, bigtable.Unprotected); err != nil { + return fmt.Errorf("Error updating deletion protection in table %v: %s", name, err) + } + } + } + + if d.HasChange("change_stream_retention") { + changeStreamRetention := d.Get("change_stream_retention") + changeStream, err := time.ParseDuration(changeStreamRetention.(string)) + if err != nil { + return fmt.Errorf("Error parsing change stream retention: %s", err) + } + if changeStream == 0 { + if err := c.UpdateTableDisableChangeStream(ctxWithTimeout, name); err != nil { + return fmt.Errorf("Error disabling change stream retention in table %v: %s", name, err) + } + } else { + if err := c.UpdateTableWithChangeStream(ctxWithTimeout, name, changeStream); err != nil { + return fmt.Errorf("Error updating change stream retention in table %v: %s", name, err) + } + } + } + + return resourceBigtableTableRead(d, meta) +} + +func resourceBigtableTableDestroy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + ctx := context.Background() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instanceName := tpgresource.GetResourceNameFromSelfLink(d.Get("instance_name").(string)) + c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + name := d.Get("name").(string) + err = c.DeleteTable(ctx, name) + if err != nil { + return fmt.Errorf("Error deleting table. %s", err) + } + + d.SetId("") + + return nil +} + +func FlattenColumnFamily(families []string) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(families)) + + for _, f := range families { + data := make(map[string]interface{}) + data["family"] = f + result = append(result, data) + } + + return result +} + +// TODO(rileykarson): Fix the stored import format after rebasing 3.0.0 +func resourceBigtableTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/data_source_google_billing_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/data_source_google_billing_account.go new file mode 100644 index 0000000000..7372503fc6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/data_source_google_billing_account.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package billing + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/cloudbilling/v1" +) + +func DataSourceGoogleBillingAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceBillingAccountRead, + Schema: map[string]*schema.Schema{ + "billing_account": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"display_name"}, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"billing_account"}, + }, + "open": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "project_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "lookup_projects": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + } +} + +func dataSourceBillingAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + open, openOk := d.GetOkExists("open") + + var billingAccount *cloudbilling.BillingAccount + if v, ok := d.GetOk("billing_account"); ok { + resp, err := config.NewBillingClient(userAgent).BillingAccounts.Get(CanonicalBillingAccountName(v.(string))).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Billing Account Not Found : %s", v)) + } + + if openOk && resp.Open != open.(bool) { + return fmt.Errorf("Billing account not found: %s", v) + } + + billingAccount = resp + } else if v, ok := d.GetOk("display_name"); ok { + token := "" + for paginate := true; paginate; { + resp, err := config.NewBillingClient(userAgent).BillingAccounts.List().PageToken(token).Do() + if err != nil { + return fmt.Errorf("Error reading billing accounts: %s", err) + } + + for _, ba := range resp.BillingAccounts { + if ba.DisplayName == v.(string) { + if openOk && ba.Open != open.(bool) { + continue + } + if billingAccount != nil { + return fmt.Errorf("More than one matching billing account found") + } + billingAccount = ba + } + } + + token = resp.NextPageToken + paginate = token != "" + } + + if billingAccount == nil { + return fmt.Errorf("Billing account not found: %s", v) + } + } else { + return fmt.Errorf("one of billing_account or display_name must be set") + } + + if d.Get("lookup_projects").(bool) { + resp, err := config.NewBillingClient(userAgent).BillingAccounts.Projects.List(billingAccount.Name).Do() + if err != nil { + return fmt.Errorf("Error reading billing account projects: %s", err) + } + projectIds := flattenBillingProjects(resp.ProjectBillingInfo) + + if err := d.Set("project_ids", projectIds); err != nil { + return fmt.Errorf("Error setting project_ids: %s", err) + } + } + + d.SetId(tpgresource.GetResourceNameFromSelfLink(billingAccount.Name)) + if err := d.Set("name", billingAccount.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("display_name", billingAccount.DisplayName); err != nil { + return fmt.Errorf("Error setting display_name: %s", err) + } + if err := d.Set("open", billingAccount.Open); err != nil { + return fmt.Errorf("Error setting open: %s", err) + } + + return nil +} + +func CanonicalBillingAccountName(ba string) string { + if strings.HasPrefix(ba, "billingAccounts/") { + return ba + } + + return "billingAccounts/" + ba +} + +func flattenBillingProjects(billingProjects []*cloudbilling.ProjectBillingInfo) []string { + projectIds := make([]string, len(billingProjects)) + for i, billingProject := range billingProjects { + projectIds[i] = billingProject.ProjectId + } + + return projectIds +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_billing_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/iam_billing_account.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_billing_account.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/iam_billing_account.go index 977e415528..8f9871f9ba 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_billing_account.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/iam_billing_account.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package billing import ( "fmt" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudresourcemanager/v1" ) @@ -19,11 +24,11 @@ var IamBillingAccountSchema = map[string]*schema.Schema{ type BillingAccountIamUpdater struct { billingAccountId string - d TerraformResourceData - Config *Config + d tpgresource.TerraformResourceData + Config *transport_tpg.Config } -func NewBillingAccountIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { +func NewBillingAccountIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { return &BillingAccountIamUpdater{ billingAccountId: canonicalBillingAccountId(d.Get("billing_account_id").(string)), d: d, @@ -31,7 +36,7 @@ func NewBillingAccountIamUpdater(d TerraformResourceData, config *Config) (Resou }, nil } -func BillingAccountIdParseFunc(d *schema.ResourceData, _ *Config) error { +func BillingAccountIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { if err := d.Set("billing_account_id", d.Id()); err != nil { return fmt.Errorf("Error setting billing_account_id: %s", err) } @@ -39,7 +44,7 @@ func BillingAccountIdParseFunc(d *schema.ResourceData, _ *Config) error { } func (u *BillingAccountIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return nil, err } @@ -53,7 +58,7 @@ func (u *BillingAccountIamUpdater) SetResourceIamPolicy(policy *cloudresourceman return err } - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return err } @@ -87,7 +92,7 @@ func canonicalBillingAccountId(resource string) string { func resourceManagerToBillingPolicy(p *cloudresourcemanager.Policy) (*cloudbilling.Policy, error) { out := &cloudbilling.Policy{} - err := Convert(p, out) + err := tpgresource.Convert(p, out) if err != nil { return nil, errwrap.Wrapf("Cannot convert a v1 policy to a billing policy: {{err}}", err) } @@ -96,7 +101,7 @@ func resourceManagerToBillingPolicy(p *cloudresourcemanager.Policy) (*cloudbilli func billingToResourceManagerPolicy(p *cloudbilling.Policy) (*cloudresourcemanager.Policy, error) { out := &cloudresourcemanager.Policy{} - err := Convert(p, out) + err := tpgresource.Convert(p, out) if err != nil { return nil, errwrap.Wrapf("Cannot convert a billing policy to a v1 policy: {{err}}", err) } @@ -104,7 +109,7 @@ func billingToResourceManagerPolicy(p *cloudbilling.Policy) (*cloudresourcemanag } // Retrieve the existing IAM Policy for a billing account -func getBillingAccountIamPolicyByBillingAccountName(resource string, config *Config, userAgent string) (*cloudresourcemanager.Policy, error) { +func getBillingAccountIamPolicyByBillingAccountName(resource string, config *transport_tpg.Config, userAgent string) (*cloudresourcemanager.Policy, error) { p, err := config.NewBillingClient(userAgent).BillingAccounts.GetIamPolicy("billingAccounts/" + resource).Do() if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_billing_budget.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/resource_billing_budget.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_billing_budget.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/resource_billing_budget.go index ca3cdc65ba..aead141dc0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_billing_budget.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/resource_billing_budget.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package billing import ( "context" @@ -24,9 +27,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) -// Check to see if a specified value in the config exists and suppress diffs if so. Otherwise run emptyOrDefaultStringSuppress. +// Check to see if a specified value in the config exists and suppress diffs if so. Otherwise run EmptyOrDefaultStringSuppress. func checkValAndDefaultStringSuppress(defaultVal string, checkVal string) schema.SchemaDiffSuppressFunc { return func(k, old, new string, d *schema.ResourceData) bool { @@ -55,10 +62,11 @@ func ResourceBillingBudget() *schema.Resource { }, SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ { Type: resourceBillingBudgetResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceBillingBudgetUpgradeV0, + Upgrade: ResourceBillingBudgetUpgradeV0, Version: 0, }, }, @@ -188,7 +196,7 @@ spend against the budget.`, "calendar_period": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"MONTH", "QUARTER", "YEAR", "CALENDAR_PERIOD_UNSPECIFIED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"MONTH", "QUARTER", "YEAR", "CALENDAR_PERIOD_UNSPECIFIED", ""}), DiffSuppressFunc: checkValAndDefaultStringSuppress("MONTH", "budget_filter.0.custom_period.0.start_date"), Description: `A CalendarPeriod represents the abstract concept of a recurring time period that has a canonical start. Grammatically, "the start of the current CalendarPeriod". @@ -214,7 +222,7 @@ If creditTypesTreatment is not INCLUDE_SPECIFIED_CREDITS, this field must be emp "credit_types_treatment": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS", ""}), + ValidateFunc: verify.ValidateEnum([]string{"INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS", ""}), Description: `Specifies how credits should be treated when determining spend for threshold calculations. Default value: "INCLUDE_ALL_CREDITS" Possible values: ["INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS"]`, Default: "INCLUDE_ALL_CREDITS", @@ -261,7 +269,7 @@ Exactly one of 'calendar_period', 'custom_period' must be provided.`, "end_date": { Type: schema.TypeList, Optional: true, - Description: `Optional. The end date of the time period. Budgets with elapsed end date won't be processed. + Description: `Optional. The end date of the time period. Budgets with elapsed end date won't be processed. If unset, specifies to track all usage incurred since the startDate.`, MaxItems: 1, Elem: &schema.Resource{ @@ -371,7 +379,7 @@ budget.`, "spend_basis": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}), + ValidateFunc: verify.ValidateEnum([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}), Description: `The type of basis used to determine if spend has passed the threshold. Default value: "CURRENT_SPEND" Possible values: ["CURRENT_SPEND", "FORECASTED_SPEND"]`, Default: "CURRENT_SPEND", @@ -392,8 +400,8 @@ billingAccounts/{billingAccountId}/budgets/{budgetId}.`, } func resourceBillingBudgetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -402,35 +410,35 @@ func resourceBillingBudgetCreate(d *schema.ResourceData, meta interface{}) error displayNameProp, err := expandBillingBudgetDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } budgetFilterProp, err := expandBillingBudgetBudgetFilter(d.Get("budget_filter"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("budget_filter"); !isEmptyValue(reflect.ValueOf(budgetFilterProp)) && (ok || !reflect.DeepEqual(v, budgetFilterProp)) { + } else if v, ok := d.GetOkExists("budget_filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(budgetFilterProp)) && (ok || !reflect.DeepEqual(v, budgetFilterProp)) { obj["budgetFilter"] = budgetFilterProp } amountProp, err := expandBillingBudgetAmount(d.Get("amount"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("amount"); !isEmptyValue(reflect.ValueOf(amountProp)) && (ok || !reflect.DeepEqual(v, amountProp)) { + } else if v, ok := d.GetOkExists("amount"); !tpgresource.IsEmptyValue(reflect.ValueOf(amountProp)) && (ok || !reflect.DeepEqual(v, amountProp)) { obj["amount"] = amountProp } thresholdRulesProp, err := expandBillingBudgetThresholdRules(d.Get("threshold_rules"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("threshold_rules"); !isEmptyValue(reflect.ValueOf(thresholdRulesProp)) && (ok || !reflect.DeepEqual(v, thresholdRulesProp)) { + } else if v, ok := d.GetOkExists("threshold_rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(thresholdRulesProp)) && (ok || !reflect.DeepEqual(v, thresholdRulesProp)) { obj["thresholdRules"] = thresholdRulesProp } notificationsRuleProp, err := expandBillingBudgetAllUpdatesRule(d.Get("all_updates_rule"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("all_updates_rule"); !isEmptyValue(reflect.ValueOf(notificationsRuleProp)) && (ok || !reflect.DeepEqual(v, notificationsRuleProp)) { + } else if v, ok := d.GetOkExists("all_updates_rule"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationsRuleProp)) && (ok || !reflect.DeepEqual(v, notificationsRuleProp)) { obj["notificationsRule"] = notificationsRuleProp } - url, err := replaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets") + url, err := tpgresource.ReplaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets") if err != nil { return err } @@ -439,11 +447,19 @@ func resourceBillingBudgetCreate(d *schema.ResourceData, meta interface{}) error billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Budget: %s", err) } @@ -452,7 +468,7 @@ func resourceBillingBudgetCreate(d *schema.ResourceData, meta interface{}) error } // Store the ID now - id, err := replaceVars(d, config, "billingAccounts/{{billing_account}}/budgets/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "billingAccounts/{{billing_account}}/budgets/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -464,13 +480,13 @@ func resourceBillingBudgetCreate(d *schema.ResourceData, meta interface{}) error } func resourceBillingBudgetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") if err != nil { return err } @@ -478,13 +494,19 @@ func resourceBillingBudgetRead(d *schema.ResourceData, meta interface{}) error { billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("BillingBudget %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BillingBudget %q", d.Id())) } if err := d.Set("name", flattenBillingBudgetName(res["name"], d, config)); err != nil { @@ -510,8 +532,8 @@ func resourceBillingBudgetRead(d *schema.ResourceData, meta interface{}) error { } func resourceBillingBudgetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -522,35 +544,35 @@ func resourceBillingBudgetUpdate(d *schema.ResourceData, meta interface{}) error displayNameProp, err := expandBillingBudgetDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } budgetFilterProp, err := expandBillingBudgetBudgetFilter(d.Get("budget_filter"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("budget_filter"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, budgetFilterProp)) { + } else if v, ok := d.GetOkExists("budget_filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, budgetFilterProp)) { obj["budgetFilter"] = budgetFilterProp } amountProp, err := expandBillingBudgetAmount(d.Get("amount"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("amount"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, amountProp)) { + } else if v, ok := d.GetOkExists("amount"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, amountProp)) { obj["amount"] = amountProp } thresholdRulesProp, err := expandBillingBudgetThresholdRules(d.Get("threshold_rules"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("threshold_rules"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, thresholdRulesProp)) { + } else if v, ok := d.GetOkExists("threshold_rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, thresholdRulesProp)) { obj["thresholdRules"] = thresholdRulesProp } notificationsRuleProp, err := expandBillingBudgetAllUpdatesRule(d.Get("all_updates_rule"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("all_updates_rule"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationsRuleProp)) { + } else if v, ok := d.GetOkExists("all_updates_rule"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationsRuleProp)) { obj["notificationsRule"] = notificationsRuleProp } - url, err := replaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") if err != nil { return err } @@ -589,19 +611,27 @@ func resourceBillingBudgetUpdate(d *schema.ResourceData, meta interface{}) error "notificationsRule.monitoringNotificationChannels", "notificationsRule.disableDefaultIamRecipients") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Budget %q: %s", d.Id(), err) @@ -613,15 +643,15 @@ func resourceBillingBudgetUpdate(d *schema.ResourceData, meta interface{}) error } func resourceBillingBudgetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - url, err := replaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") if err != nil { return err } @@ -630,13 +660,21 @@ func resourceBillingBudgetDelete(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Deleting Budget %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Budget") + return transport_tpg.HandleNotFoundError(err, d, "Budget") } log.Printf("[DEBUG] Finished deleting Budget %q: %#v", d.Id(), res) @@ -644,8 +682,8 @@ func resourceBillingBudgetDelete(d *schema.ResourceData, meta interface{}) error } func resourceBillingBudgetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "billingAccounts/(?P[^/]+)/budgets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -654,7 +692,7 @@ func resourceBillingBudgetImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "billingAccounts/{{billing_account}}/budgets/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "billingAccounts/{{billing_account}}/budgets/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -663,18 +701,18 @@ func resourceBillingBudgetImport(d *schema.ResourceData, meta interface{}) ([]*s return []*schema.ResourceData{d}, nil } -func flattenBillingBudgetName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func flattenBillingBudgetDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetBudgetFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -701,30 +739,30 @@ func flattenBillingBudgetBudgetFilter(v interface{}, d *schema.ResourceData, con flattenBillingBudgetBudgetFilterCustomPeriod(original["customPeriod"], d, config) return []interface{}{transformed} } -func flattenBillingBudgetBudgetFilterProjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterProjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenBillingBudgetBudgetFilterCreditTypesTreatment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCreditTypesTreatment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetBudgetFilterServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetBudgetFilterCreditTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCreditTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetBudgetFilterSubaccounts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterSubaccounts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetBudgetFilterLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { /* note: api only accepts below format. Also only takes a single element in the array labels = { @@ -749,11 +787,11 @@ func flattenBillingBudgetBudgetFilterLabels(v interface{}, d *schema.ResourceDat return transformed } -func flattenBillingBudgetBudgetFilterCalendarPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCalendarPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetBudgetFilterCustomPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -768,7 +806,7 @@ func flattenBillingBudgetBudgetFilterCustomPeriod(v interface{}, d *schema.Resou flattenBillingBudgetBudgetFilterCustomPeriodEndDate(original["endDate"], d, config) return []interface{}{transformed} } -func flattenBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -785,10 +823,10 @@ func flattenBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d *sch flattenBillingBudgetBudgetFilterCustomPeriodStartDateDay(original["day"], d, config) return []interface{}{transformed} } -func flattenBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -802,10 +840,10 @@ func flattenBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d return v // let terraform core handle it otherwise } -func flattenBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -819,10 +857,10 @@ func flattenBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d return v // let terraform core handle it otherwise } -func flattenBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -836,7 +874,7 @@ func flattenBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d * return v // let terraform core handle it otherwise } -func flattenBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -853,10 +891,10 @@ func flattenBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d *schem flattenBillingBudgetBudgetFilterCustomPeriodEndDateDay(original["day"], d, config) return []interface{}{transformed} } -func flattenBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -870,10 +908,10 @@ func flattenBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d *s return v // let terraform core handle it otherwise } -func flattenBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -887,10 +925,10 @@ func flattenBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d * return v // let terraform core handle it otherwise } -func flattenBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -904,7 +942,7 @@ func flattenBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d *sc return v // let terraform core handle it otherwise } -func flattenBillingBudgetAmount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAmount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -919,7 +957,7 @@ func flattenBillingBudgetAmount(v interface{}, d *schema.ResourceData, config *C flattenBillingBudgetAmountLastPeriodAmount(original["lastPeriodAmount"], d, config) return []interface{}{transformed} } -func flattenBillingBudgetAmountSpecifiedAmount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAmountSpecifiedAmount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -936,18 +974,18 @@ func flattenBillingBudgetAmountSpecifiedAmount(v interface{}, d *schema.Resource flattenBillingBudgetAmountSpecifiedAmountNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenBillingBudgetAmountSpecifiedAmountCurrencyCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAmountSpecifiedAmountCurrencyCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetAmountSpecifiedAmountUnits(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAmountSpecifiedAmountUnits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetAmountSpecifiedAmountNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAmountSpecifiedAmountNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -961,11 +999,11 @@ func flattenBillingBudgetAmountSpecifiedAmountNanos(v interface{}, d *schema.Res return v // let terraform core handle it otherwise } -func flattenBillingBudgetAmountLastPeriodAmount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAmountLastPeriodAmount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v != nil } -func flattenBillingBudgetThresholdRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetThresholdRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -984,15 +1022,15 @@ func flattenBillingBudgetThresholdRules(v interface{}, d *schema.ResourceData, c } return transformed } -func flattenBillingBudgetThresholdRulesThresholdPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetThresholdRulesThresholdPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetThresholdRulesSpendBasis(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetThresholdRulesSpendBasis(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetAllUpdatesRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAllUpdatesRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1011,31 +1049,31 @@ func flattenBillingBudgetAllUpdatesRule(v interface{}, d *schema.ResourceData, c flattenBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(original["disableDefaultIamRecipients"], d, config) return []interface{}{transformed} } -func flattenBillingBudgetAllUpdatesRulePubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAllUpdatesRulePubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetAllUpdatesRuleSchemaVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenBillingBudgetAllUpdatesRuleSchemaVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "1.0" } return v } -func flattenBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandBillingBudgetDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1047,84 +1085,84 @@ func expandBillingBudgetBudgetFilter(v interface{}, d TerraformResourceData, con transformedProjects, err := expandBillingBudgetBudgetFilterProjects(original["projects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projects"] = transformedProjects } transformedCreditTypesTreatment, err := expandBillingBudgetBudgetFilterCreditTypesTreatment(original["credit_types_treatment"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCreditTypesTreatment); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCreditTypesTreatment); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["creditTypesTreatment"] = transformedCreditTypesTreatment } transformedServices, err := expandBillingBudgetBudgetFilterServices(original["services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["services"] = transformedServices } transformedCreditTypes, err := expandBillingBudgetBudgetFilterCreditTypes(original["credit_types"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCreditTypes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCreditTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["creditTypes"] = transformedCreditTypes } transformedSubaccounts, err := expandBillingBudgetBudgetFilterSubaccounts(original["subaccounts"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubaccounts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubaccounts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subaccounts"] = transformedSubaccounts } transformedLabels, err := expandBillingBudgetBudgetFilterLabels(original["labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["labels"] = transformedLabels } transformedCalendarPeriod, err := expandBillingBudgetBudgetFilterCalendarPeriod(original["calendar_period"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCalendarPeriod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCalendarPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["calendarPeriod"] = transformedCalendarPeriod } transformedCustomPeriod, err := expandBillingBudgetBudgetFilterCustomPeriod(original["custom_period"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCustomPeriod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCustomPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["customPeriod"] = transformedCustomPeriod } return transformed, nil } -func expandBillingBudgetBudgetFilterProjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterProjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandBillingBudgetBudgetFilterCreditTypesTreatment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCreditTypesTreatment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterCreditTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCreditTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterSubaccounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterSubaccounts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterLabels(v interface{}, d TerraformResourceData, config *Config) (map[string][]string, error) { +func expandBillingBudgetBudgetFilterLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string][]string, error) { if v == nil { return map[string][]string{}, nil } @@ -1135,11 +1173,11 @@ func expandBillingBudgetBudgetFilterLabels(v interface{}, d TerraformResourceDat return m, nil } -func expandBillingBudgetBudgetFilterCalendarPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCalendarPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterCustomPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1151,21 +1189,21 @@ func expandBillingBudgetBudgetFilterCustomPeriod(v interface{}, d TerraformResou transformedStartDate, err := expandBillingBudgetBudgetFilterCustomPeriodStartDate(original["start_date"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStartDate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStartDate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["startDate"] = transformedStartDate } transformedEndDate, err := expandBillingBudgetBudgetFilterCustomPeriodEndDate(original["end_date"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEndDate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEndDate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["endDate"] = transformedEndDate } return transformed, nil } -func expandBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1177,40 +1215,40 @@ func expandBillingBudgetBudgetFilterCustomPeriodStartDate(v interface{}, d Terra transformedYear, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateYear(original["year"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["year"] = transformedYear } transformedMonth, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateMonth(original["month"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["month"] = transformedMonth } transformedDay, err := expandBillingBudgetBudgetFilterCustomPeriodStartDateDay(original["day"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["day"] = transformedDay } return transformed, nil } -func expandBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriodStartDateYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriodStartDateMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriodStartDateDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1222,40 +1260,40 @@ func expandBillingBudgetBudgetFilterCustomPeriodEndDate(v interface{}, d Terrafo transformedYear, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateYear(original["year"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["year"] = transformedYear } transformedMonth, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateMonth(original["month"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["month"] = transformedMonth } transformedDay, err := expandBillingBudgetBudgetFilterCustomPeriodEndDateDay(original["day"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["day"] = transformedDay } return transformed, nil } -func expandBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriodEndDateYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriodEndDateMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetBudgetFilterCustomPeriodEndDateDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetAmount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAmount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1267,21 +1305,21 @@ func expandBillingBudgetAmount(v interface{}, d TerraformResourceData, config *C transformedSpecifiedAmount, err := expandBillingBudgetAmountSpecifiedAmount(original["specified_amount"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSpecifiedAmount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSpecifiedAmount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["specifiedAmount"] = transformedSpecifiedAmount } transformedLastPeriodAmount, err := expandBillingBudgetAmountLastPeriodAmount(original["last_period_amount"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLastPeriodAmount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLastPeriodAmount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["lastPeriodAmount"] = transformedLastPeriodAmount } return transformed, nil } -func expandBillingBudgetAmountSpecifiedAmount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAmountSpecifiedAmount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1293,40 +1331,40 @@ func expandBillingBudgetAmountSpecifiedAmount(v interface{}, d TerraformResource transformedCurrencyCode, err := expandBillingBudgetAmountSpecifiedAmountCurrencyCode(original["currency_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCurrencyCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCurrencyCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["currencyCode"] = transformedCurrencyCode } transformedUnits, err := expandBillingBudgetAmountSpecifiedAmountUnits(original["units"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUnits); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUnits); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["units"] = transformedUnits } transformedNanos, err := expandBillingBudgetAmountSpecifiedAmountNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandBillingBudgetAmountSpecifiedAmountCurrencyCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAmountSpecifiedAmountCurrencyCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetAmountSpecifiedAmountUnits(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAmountSpecifiedAmountUnits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetAmountSpecifiedAmountNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAmountSpecifiedAmountNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetAmountLastPeriodAmount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAmountLastPeriodAmount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil || !v.(bool) { return nil, nil } @@ -1334,7 +1372,7 @@ func expandBillingBudgetAmountLastPeriodAmount(v interface{}, d TerraformResourc return struct{}{}, nil } -func expandBillingBudgetThresholdRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetThresholdRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1354,7 +1392,7 @@ func expandBillingBudgetThresholdRules(v interface{}, d TerraformResourceData, c transformedSpendBasis, err := expandBillingBudgetThresholdRulesSpendBasis(original["spend_basis"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSpendBasis); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSpendBasis); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["spendBasis"] = transformedSpendBasis } @@ -1363,15 +1401,15 @@ func expandBillingBudgetThresholdRules(v interface{}, d TerraformResourceData, c return req, nil } -func expandBillingBudgetThresholdRulesThresholdPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetThresholdRulesThresholdPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetThresholdRulesSpendBasis(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetThresholdRulesSpendBasis(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetAllUpdatesRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAllUpdatesRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1383,47 +1421,47 @@ func expandBillingBudgetAllUpdatesRule(v interface{}, d TerraformResourceData, c transformedPubsubTopic, err := expandBillingBudgetAllUpdatesRulePubsubTopic(original["pubsub_topic"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pubsubTopic"] = transformedPubsubTopic } transformedSchemaVersion, err := expandBillingBudgetAllUpdatesRuleSchemaVersion(original["schema_version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchemaVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchemaVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schemaVersion"] = transformedSchemaVersion } transformedMonitoringNotificationChannels, err := expandBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(original["monitoring_notification_channels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMonitoringNotificationChannels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMonitoringNotificationChannels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["monitoringNotificationChannels"] = transformedMonitoringNotificationChannels } transformedDisableDefaultIamRecipients, err := expandBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(original["disable_default_iam_recipients"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisableDefaultIamRecipients); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisableDefaultIamRecipients); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disableDefaultIamRecipients"] = transformedDisableDefaultIamRecipients } return transformed, nil } -func expandBillingBudgetAllUpdatesRulePubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAllUpdatesRulePubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetAllUpdatesRuleSchemaVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAllUpdatesRuleSchemaVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1670,10 +1708,10 @@ billingAccounts/{billingAccountId}/budgets/{budgetId}.`, } } -func resourceBillingBudgetUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { +func ResourceBillingBudgetUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { log.Printf("[DEBUG] Attributes before migration: %#v", rawState) - rawState["name"] = GetResourceNameFromSelfLink(rawState["name"].(string)) + rawState["name"] = tpgresource.GetResourceNameFromSelfLink(rawState["name"].(string)) log.Printf("[DEBUG] Attributes after migration: %#v", rawState) return rawState, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/resource_billing_budget_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/resource_billing_budget_sweeper.go new file mode 100644 index 0000000000..b0537b8d01 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/billing/resource_billing_budget_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package billing + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BillingBudget", testSweepBillingBudget) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBillingBudget(region string) error { + resourceName := "BillingBudget" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://billingbudgets.googleapis.com/v1/billingAccounts/{{billing_account}}/budgets", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["budgets"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://billingbudgets.googleapis.com/v1/billingAccounts/{{billing_account}}/budgets/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/iam_binary_authorization_attestor.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/iam_binary_authorization_attestor.go new file mode 100644 index 0000000000..1e4588856b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/iam_binary_authorization_attestor.go @@ -0,0 +1,221 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package binaryauthorization + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var BinaryAuthorizationAttestorIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "attestor": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type BinaryAuthorizationAttestorIamUpdater struct { + project string + attestor string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func BinaryAuthorizationAttestorIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("attestor"); ok { + values["attestor"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/attestors/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("attestor").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &BinaryAuthorizationAttestorIamUpdater{ + project: values["project"], + attestor: values["attestor"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("attestor", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting attestor: %s", err) + } + + return u, nil +} + +func BinaryAuthorizationAttestorIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/attestors/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &BinaryAuthorizationAttestorIamUpdater{ + project: values["project"], + attestor: values["attestor"], + d: d, + Config: config, + } + if err := d.Set("attestor", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting attestor: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *BinaryAuthorizationAttestorIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyAttestorUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *BinaryAuthorizationAttestorIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyAttestorUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *BinaryAuthorizationAttestorIamUpdater) qualifyAttestorUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{BinaryAuthorizationBasePath}}%s:%s", fmt.Sprintf("projects/%s/attestors/%s", u.project, u.attestor), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *BinaryAuthorizationAttestorIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/attestors/%s", u.project, u.attestor) +} + +func (u *BinaryAuthorizationAttestorIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-binaryauthorization-attestor-%s", u.GetResourceId()) +} + +func (u *BinaryAuthorizationAttestorIamUpdater) DescribeResource() string { + return fmt.Sprintf("binaryauthorization attestor %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_attestor.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_attestor.go new file mode 100644 index 0000000000..186e2b0b18 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_attestor.go @@ -0,0 +1,706 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package binaryauthorization + +import ( + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func CompareSignatureAlgorithm(_, old, new string, _ *schema.ResourceData) bool { + // See https://cloud.google.com/binary-authorization/docs/reference/rest/v1/projects.attestors#signaturealgorithm + normalizedAlgorithms := map[string]string{ + "ECDSA_P256_SHA256": "ECDSA_P256_SHA256", + "EC_SIGN_P256_SHA256": "ECDSA_P256_SHA256", + "ECDSA_P384_SHA384": "ECDSA_P384_SHA384", + "EC_SIGN_P384_SHA384": "ECDSA_P384_SHA384", + "ECDSA_P521_SHA512": "ECDSA_P521_SHA512", + "EC_SIGN_P521_SHA512": "ECDSA_P521_SHA512", + } + + normalizedOld := old + normalizedNew := new + + if normalized, ok := normalizedAlgorithms[old]; ok { + normalizedOld = normalized + } + if normalized, ok := normalizedAlgorithms[new]; ok { + normalizedNew = normalized + } + + if normalizedNew == normalizedOld { + return true + } + + return false +} + +func ResourceBinaryAuthorizationAttestor() *schema.Resource { + return &schema.Resource{ + Create: resourceBinaryAuthorizationAttestorCreate, + Read: resourceBinaryAuthorizationAttestorRead, + Update: resourceBinaryAuthorizationAttestorUpdate, + Delete: resourceBinaryAuthorizationAttestorDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBinaryAuthorizationAttestorImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "attestation_authority_note": { + Type: schema.TypeList, + Required: true, + Description: `A Container Analysis ATTESTATION_AUTHORITY Note, created by the user.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "note_reference": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The resource name of a ATTESTATION_AUTHORITY Note, created by the +user. If the Note is in a different project from the Attestor, it +should be specified in the format 'projects/*/notes/*' (or the legacy +'providers/*/notes/*'). This field may not be updated. +An attestation by this attestor is stored as a Container Analysis +ATTESTATION_AUTHORITY Occurrence that names a container image +and that links to this Note.`, + }, + "public_keys": { + Type: schema.TypeList, + Optional: true, + Description: `Public keys that verify attestations signed by this attestor. This +field may be updated. +If this field is non-empty, one of the specified public keys must +verify that an attestation was signed by this attestor for the +image specified in the admission request. +If this field is empty, this attestor always returns that no valid +attestations exist.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ascii_armored_pgp_public_key": { + Type: schema.TypeString, + Optional: true, + Description: `ASCII-armored representation of a PGP public key, as the +entire output by the command +'gpg --export --armor foo@example.com' (either LF or CRLF +line endings). When using this field, id should be left +blank. The BinAuthz API handlers will calculate the ID +and fill it in automatically. BinAuthz computes this ID +as the OpenPGP RFC4880 V4 fingerprint, represented as +upper-case hex. If id is provided by the caller, it will +be overwritten by the API-calculated ID.`, + }, + "comment": { + Type: schema.TypeString, + Optional: true, + Description: `A descriptive comment. This field may be updated.`, + }, + "id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The ID of this public key. Signatures verified by BinAuthz +must include the ID of the public key that can be used to +verify them, and that ID must match the contents of this +field exactly. Additional restrictions on this field can +be imposed based on which public key type is encapsulated. +See the documentation on publicKey cases below for details.`, + }, + "pkix_public_key": { + Type: schema.TypeList, + Optional: true, + Description: `A raw PKIX SubjectPublicKeyInfo format public key. + +NOTE: id may be explicitly provided by the caller when using this +type of public key, but it MUST be a valid RFC3986 URI. If id is left +blank, a default one will be computed based on the digest of the DER +encoding of the public key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_key_pem": { + Type: schema.TypeString, + Optional: true, + Description: `A PEM-encoded public key, as described in +'https://tools.ietf.org/html/rfc7468#section-13'`, + }, + "signature_algorithm": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: CompareSignatureAlgorithm, + Description: `The signature algorithm used to verify a message against +a signature using this key. These signature algorithm must +match the structure and any object identifiers encoded in +publicKeyPem (i.e. this algorithm must match that of the +public key).`, + }, + }, + }, + }, + }, + }, + }, + "delegation_service_account_email": { + Type: schema.TypeString, + Computed: true, + Description: `This field will contain the service account email address that +this Attestor will use as the principal when querying Container +Analysis. Attestor administrators must grant this service account +the IAM role needed to read attestations from the noteReference in +Container Analysis (containeranalysis.notes.occurrences.viewer). +This email address is fixed for the lifetime of the Attestor, but +callers should not make any other assumptions about the service +account email; future versions may use an email based on a +different naming pattern.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A descriptive comment. This field may be updated. The field may be +displayed in chooser dialogs.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBinaryAuthorizationAttestorCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandBinaryAuthorizationAttestorName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandBinaryAuthorizationAttestorDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + userOwnedGrafeasNoteProp, err := expandBinaryAuthorizationAttestorAttestationAuthorityNote(d.Get("attestation_authority_note"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attestation_authority_note"); !tpgresource.IsEmptyValue(reflect.ValueOf(userOwnedGrafeasNoteProp)) && (ok || !reflect.DeepEqual(v, userOwnedGrafeasNoteProp)) { + obj["userOwnedGrafeasNote"] = userOwnedGrafeasNoteProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors?attestorId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Attestor: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Attestor: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Attestor: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/attestors/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Attestor %q: %#v", d.Id(), res) + + return resourceBinaryAuthorizationAttestorRead(d, meta) +} + +func resourceBinaryAuthorizationAttestorRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Attestor: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BinaryAuthorizationAttestor %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Attestor: %s", err) + } + + if err := d.Set("name", flattenBinaryAuthorizationAttestorName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Attestor: %s", err) + } + if err := d.Set("description", flattenBinaryAuthorizationAttestorDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Attestor: %s", err) + } + if err := d.Set("attestation_authority_note", flattenBinaryAuthorizationAttestorAttestationAuthorityNote(res["userOwnedGrafeasNote"], d, config)); err != nil { + return fmt.Errorf("Error reading Attestor: %s", err) + } + + return nil +} + +func resourceBinaryAuthorizationAttestorUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Attestor: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandBinaryAuthorizationAttestorName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandBinaryAuthorizationAttestorDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + userOwnedGrafeasNoteProp, err := expandBinaryAuthorizationAttestorAttestationAuthorityNote(d.Get("attestation_authority_note"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attestation_authority_note"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userOwnedGrafeasNoteProp)) { + obj["userOwnedGrafeasNote"] = userOwnedGrafeasNoteProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Attestor %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Attestor %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Attestor %q: %#v", d.Id(), res) + } + + return resourceBinaryAuthorizationAttestorRead(d, meta) +} + +func resourceBinaryAuthorizationAttestorDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Attestor: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Attestor %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Attestor") + } + + log.Printf("[DEBUG] Finished deleting Attestor %q: %#v", d.Id(), res) + return nil +} + +func resourceBinaryAuthorizationAttestorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/attestors/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/attestors/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBinaryAuthorizationAttestorName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenBinaryAuthorizationAttestorDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationAttestorAttestationAuthorityNote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["note_reference"] = + flattenBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(original["noteReference"], d, config) + transformed["public_keys"] = + flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(original["publicKeys"], d, config) + transformed["delegation_service_account_email"] = + flattenBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(original["delegationServiceAccountEmail"], d, config) + return []interface{}{transformed} +} +func flattenBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "comment": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(original["comment"], d, config), + "id": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(original["id"], d, config), + "ascii_armored_pgp_public_key": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(original["asciiArmoredPgpPublicKey"], d, config), + "pkix_public_key": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(original["pkixPublicKey"], d, config), + }) + } + return transformed +} +func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["public_key_pem"] = + flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(original["publicKeyPem"], d, config) + transformed["signature_algorithm"] = + flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(original["signatureAlgorithm"], d, config) + return []interface{}{transformed} +} +func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBinaryAuthorizationAttestorName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/attestors/{{name}}") +} + +func expandBinaryAuthorizationAttestorDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNoteReference, err := expandBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(original["note_reference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNoteReference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["noteReference"] = transformedNoteReference + } + + transformedPublicKeys, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(original["public_keys"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicKeys); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicKeys"] = transformedPublicKeys + } + + transformedDelegationServiceAccountEmail, err := expandBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(original["delegation_service_account_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDelegationServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["delegationServiceAccountEmail"] = transformedDelegationServiceAccountEmail + } + + return transformed, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + r := regexp.MustCompile("projects/(.+)/notes/(.+)") + if r.MatchString(v.(string)) { + return v.(string), nil + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + return fmt.Sprintf("projects/%s/notes/%s", project, v.(string)), nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedComment, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(original["comment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedComment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["comment"] = transformedComment + } + + transformedId, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedAsciiArmoredPgpPublicKey, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(original["ascii_armored_pgp_public_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAsciiArmoredPgpPublicKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["asciiArmoredPgpPublicKey"] = transformedAsciiArmoredPgpPublicKey + } + + transformedPkixPublicKey, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(original["pkix_public_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPkixPublicKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pkixPublicKey"] = transformedPkixPublicKey + } + + req = append(req, transformed) + } + return req, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicKeyPem, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(original["public_key_pem"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicKeyPem); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicKeyPem"] = transformedPublicKeyPem + } + + transformedSignatureAlgorithm, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(original["signature_algorithm"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSignatureAlgorithm); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["signatureAlgorithm"] = transformedSignatureAlgorithm + } + + return transformed, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_attestor_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_attestor_sweeper.go new file mode 100644 index 0000000000..60794cf7f1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_attestor_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package binaryauthorization + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("BinaryAuthorizationAttestor", testSweepBinaryAuthorizationAttestor) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBinaryAuthorizationAttestor(region string) error { + resourceName := "BinaryAuthorizationAttestor" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://binaryauthorization.googleapis.com/v1/projects/{{project}}/attestors?attestorId={{name}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["attestors"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://binaryauthorization.googleapis.com/v1/projects/{{project}}/attestors/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_policy.go new file mode 100644 index 0000000000..8455443872 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization/resource_binary_authorization_policy.go @@ -0,0 +1,781 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package binaryauthorization + +import ( + "bytes" + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func DefaultBinaryAuthorizationPolicy(project string) map[string]interface{} { + return map[string]interface{}{ + "name": fmt.Sprintf("projects/%s/policy", project), + "admissionWhitelistPatterns": []interface{}{ + map[string]interface{}{ + "namePattern": "gcr.io/google_containers/*", + }, + }, + "defaultAdmissionRule": map[string]interface{}{ + "evaluationMode": "ALWAYS_ALLOW", + "enforcementMode": "ENFORCED_BLOCK_AND_AUDIT_LOG", + }, + } +} + +func ResourceBinaryAuthorizationPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceBinaryAuthorizationPolicyCreate, + Read: resourceBinaryAuthorizationPolicyRead, + Update: resourceBinaryAuthorizationPolicyUpdate, + Delete: resourceBinaryAuthorizationPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBinaryAuthorizationPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "default_admission_rule": { + Type: schema.TypeList, + Required: true, + Description: `Default admission rule for a cluster without a per-cluster admission +rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enforcement_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"}), + Description: `The action when a pod creation is denied by the admission rule. Possible values: ["ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"]`, + }, + "evaluation_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"}), + Description: `How this admission rule will be evaluated. Possible values: ["ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"]`, + }, + "require_attestations_by": { + Type: schema.TypeSet, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The resource names of the attestors that must attest to a +container image. If the attestor is in a different project from the +policy, it should be specified in the format 'projects/*/attestors/*'. +Each attestor must exist before a policy can reference it. To add an +attestor to a policy the principal issuing the policy change +request must be able to read the attestor resource. + +Note: this field must be non-empty when the evaluation_mode field +specifies REQUIRE_ATTESTATION, otherwise it must be empty.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: tpgresource.SelfLinkNameHash, + }, + }, + }, + }, + "admission_whitelist_patterns": { + Type: schema.TypeList, + Optional: true, + Description: `A whitelist of image patterns to exclude from admission rules. If an +image's name matches a whitelist pattern, the image's admission +requests will always be permitted regardless of your admission rules.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name_pattern": { + Type: schema.TypeString, + Required: true, + Description: `An image name pattern to whitelist, in the form +'registry/path/to/image'. This supports a trailing * as a +wildcard, but this is allowed only in text after the registry/ +part.`, + }, + }, + }, + }, + "cluster_admission_rules": { + Type: schema.TypeSet, + Optional: true, + Description: `Per-cluster admission rules. An admission rule specifies either that +all container images used in a pod creation request must be attested +to by one or more attestors, that all pod creations will be allowed, +or that all pod creations will be denied. There can be at most one +admission rule per cluster spec. + + +Identifier format: '{{location}}.{{clusterId}}'. +A location is either a compute zone (e.g. 'us-central1-a') or a region +(e.g. 'us-central1').`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + }, + "enforcement_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"}), + Description: `The action when a pod creation is denied by the admission rule. Possible values: ["ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"]`, + }, + "evaluation_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"}), + Description: `How this admission rule will be evaluated. Possible values: ["ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"]`, + }, + "require_attestations_by": { + Type: schema.TypeSet, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The resource names of the attestors that must attest to a +container image. If the attestor is in a different project from the +policy, it should be specified in the format 'projects/*/attestors/*'. +Each attestor must exist before a policy can reference it. To add an +attestor to a policy the principal issuing the policy change +request must be able to read the attestor resource. + +Note: this field must be non-empty when the evaluation_mode field +specifies REQUIRE_ATTESTATION, otherwise it must be empty.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: tpgresource.SelfLinkNameHash, + }, + }, + }, + Set: func(v interface{}) int { + // require_attestations_by is a set of strings that can have the format + // projects/{project}/attestors/{attestor} or {attestor}. We diffsuppress + // and hash that set on the name, but now we need to make sure that the + // overall hash here respects that so changing the attestor format doesn't + // change the hash code of cluster_admission_rules. + raw := v.(map[string]interface{}) + + // modifying raw actually modifies the values passed to the provider. + // Use a copy to avoid that. + copy := make((map[string]interface{})) + for key, value := range raw { + copy[key] = value + } + at := copy["require_attestations_by"].(*schema.Set) + if at != nil { + t := tpgresource.ConvertAndMapStringArr(at.List(), tpgresource.GetResourceNameFromSelfLink) + copy["require_attestations_by"] = schema.NewSet(tpgresource.SelfLinkNameHash, tpgresource.ConvertStringArrToInterface(t)) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, copy, ResourceBinaryAuthorizationPolicy().Schema["cluster_admission_rules"].Elem.(*schema.Resource)) + return tpgresource.Hashcode(buf.String()) + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A descriptive comment.`, + }, + "global_policy_evaluation_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ENABLE", "DISABLE", ""}), + Description: `Controls the evaluation of a Google-maintained global admission policy +for common system-level images. Images not covered by the global +policy will be subject to the project admission policy. Possible values: ["ENABLE", "DISABLE"]`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBinaryAuthorizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandBinaryAuthorizationPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + globalPolicyEvaluationModeProp, err := expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(d.Get("global_policy_evaluation_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("global_policy_evaluation_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(globalPolicyEvaluationModeProp)) && (ok || !reflect.DeepEqual(v, globalPolicyEvaluationModeProp)) { + obj["globalPolicyEvaluationMode"] = globalPolicyEvaluationModeProp + } + admissionWhitelistPatternsProp, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(d.Get("admission_whitelist_patterns"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admission_whitelist_patterns"); !tpgresource.IsEmptyValue(reflect.ValueOf(admissionWhitelistPatternsProp)) && (ok || !reflect.DeepEqual(v, admissionWhitelistPatternsProp)) { + obj["admissionWhitelistPatterns"] = admissionWhitelistPatternsProp + } + clusterAdmissionRulesProp, err := expandBinaryAuthorizationPolicyClusterAdmissionRules(d.Get("cluster_admission_rules"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cluster_admission_rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(clusterAdmissionRulesProp)) && (ok || !reflect.DeepEqual(v, clusterAdmissionRulesProp)) { + obj["clusterAdmissionRules"] = clusterAdmissionRulesProp + } + defaultAdmissionRuleProp, err := expandBinaryAuthorizationPolicyDefaultAdmissionRule(d.Get("default_admission_rule"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_admission_rule"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultAdmissionRuleProp)) && (ok || !reflect.DeepEqual(v, defaultAdmissionRuleProp)) { + obj["defaultAdmissionRule"] = defaultAdmissionRuleProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Policy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Policy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Policy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) + + return resourceBinaryAuthorizationPolicyRead(d, meta) +} + +func resourceBinaryAuthorizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Policy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BinaryAuthorizationPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + + if err := d.Set("description", flattenBinaryAuthorizationPolicyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("global_policy_evaluation_mode", flattenBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(res["globalPolicyEvaluationMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("admission_whitelist_patterns", flattenBinaryAuthorizationPolicyAdmissionWhitelistPatterns(res["admissionWhitelistPatterns"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("cluster_admission_rules", flattenBinaryAuthorizationPolicyClusterAdmissionRules(res["clusterAdmissionRules"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("default_admission_rule", flattenBinaryAuthorizationPolicyDefaultAdmissionRule(res["defaultAdmissionRule"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + + return nil +} + +func resourceBinaryAuthorizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Policy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandBinaryAuthorizationPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + globalPolicyEvaluationModeProp, err := expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(d.Get("global_policy_evaluation_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("global_policy_evaluation_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, globalPolicyEvaluationModeProp)) { + obj["globalPolicyEvaluationMode"] = globalPolicyEvaluationModeProp + } + admissionWhitelistPatternsProp, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(d.Get("admission_whitelist_patterns"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admission_whitelist_patterns"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, admissionWhitelistPatternsProp)) { + obj["admissionWhitelistPatterns"] = admissionWhitelistPatternsProp + } + clusterAdmissionRulesProp, err := expandBinaryAuthorizationPolicyClusterAdmissionRules(d.Get("cluster_admission_rules"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cluster_admission_rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clusterAdmissionRulesProp)) { + obj["clusterAdmissionRules"] = clusterAdmissionRulesProp + } + defaultAdmissionRuleProp, err := expandBinaryAuthorizationPolicyDefaultAdmissionRule(d.Get("default_admission_rule"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_admission_rule"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultAdmissionRuleProp)) { + obj["defaultAdmissionRule"] = defaultAdmissionRuleProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Policy %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Policy %q: %#v", d.Id(), res) + } + + return resourceBinaryAuthorizationPolicyRead(d, meta) +} + +func resourceBinaryAuthorizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Policy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") + if err != nil { + return err + } + + var obj map[string]interface{} + obj = DefaultBinaryAuthorizationPolicy(d.Get("project").(string)) + log.Printf("[DEBUG] Deleting Policy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Policy") + } + + log.Printf("[DEBUG] Finished deleting Policy %q: %#v", d.Id(), res) + return nil +} + +func resourceBinaryAuthorizationPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBinaryAuthorizationPolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name_pattern": flattenBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(original["namePattern"], d, config), + }) + } + return transformed +} +func flattenBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationPolicyClusterAdmissionRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "cluster": k, + "evaluation_mode": flattenBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(original["evaluationMode"], d, config), + "require_attestations_by": flattenBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(original["requireAttestationsBy"], d, config), + "enforcement_mode": flattenBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(original["enforcementMode"], d, config), + }) + } + return transformed +} +func flattenBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(tpgresource.SelfLinkNameHash, v.([]interface{})) +} + +func flattenBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationPolicyDefaultAdmissionRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["evaluation_mode"] = + flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(original["evaluationMode"], d, config) + transformed["require_attestations_by"] = + flattenBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(original["requireAttestationsBy"], d, config) + transformed["enforcement_mode"] = + flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(original["enforcementMode"], d, config) + return []interface{}{transformed} +} +func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(tpgresource.SelfLinkNameHash, v.([]interface{})) +} + +func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBinaryAuthorizationPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNamePattern, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(original["name_pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamePattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namePattern"] = transformedNamePattern + } + + req = append(req, transformed) + } + return req, nil +} + +func expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationPolicyClusterAdmissionRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEvaluationMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(original["evaluation_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["evaluationMode"] = transformedEvaluationMode + } + + transformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(original["require_attestations_by"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireAttestationsBy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireAttestationsBy"] = transformedRequireAttestationsBy + } + + transformedEnforcementMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(original["enforcement_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnforcementMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enforcementMode"] = transformedEnforcementMode + } + + transformedCluster, err := tpgresource.ExpandString(original["cluster"], d, config) + if err != nil { + return nil, err + } + m[transformedCluster] = transformed + } + return m, nil +} + +func expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + r := regexp.MustCompile("projects/(.+)/attestors/(.+)") + + // It's possible that all entries in the list will specify a project, in + // which case the user wouldn't necessarily have to specify a provider + // project. + var project string + var err error + for _, s := range v.(*schema.Set).List() { + if !r.MatchString(s.(string)) { + project, err = tpgresource.GetProject(d, config) + if err != nil { + return []interface{}{}, err + } + break + } + } + + return tpgresource.ConvertAndMapStringArr(v.(*schema.Set).List(), func(s string) string { + if r.MatchString(s) { + return s + } + + return fmt.Sprintf("projects/%s/attestors/%s", project, s) + }), nil +} + +func expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationPolicyDefaultAdmissionRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEvaluationMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(original["evaluation_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["evaluationMode"] = transformedEvaluationMode + } + + transformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(original["require_attestations_by"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireAttestationsBy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireAttestationsBy"] = transformedRequireAttestationsBy + } + + transformedEnforcementMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(original["enforcement_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnforcementMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enforcementMode"] = transformedEnforcementMode + } + + return transformed, nil +} + +func expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + r := regexp.MustCompile("projects/(.+)/attestors/(.+)") + + // It's possible that all entries in the list will specify a project, in + // which case the user wouldn't necessarily have to specify a provider + // project. + var project string + var err error + for _, s := range v.(*schema.Set).List() { + if !r.MatchString(s.(string)) { + project, err = tpgresource.GetProject(d, config) + if err != nil { + return []interface{}{}, err + } + break + } + } + + return tpgresource.ConvertAndMapStringArr(v.(*schema.Set).List(), func(s string) string { + if r.MatchString(s) { + return s + } + + return fmt.Sprintf("projects/%s/attestors/%s", project, s) + }), nil +} + +func expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/certificate_manager_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/certificate_manager_operation.go new file mode 100644 index 0000000000..d927511f74 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/certificate_manager_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type CertificateManagerOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *CertificateManagerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.CertificateManagerBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createCertificateManagerWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*CertificateManagerOperationWaiter, error) { + w := &CertificateManagerOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func CertificateManagerOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createCertificateManagerWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate.go new file mode 100644 index 0000000000..668bdbf301 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate.go @@ -0,0 +1,1134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func certManagerDefaultScopeDiffSuppress(_, old, new string, diff *schema.ResourceData) bool { + if old == "" && new == "DEFAULT" || old == "DEFAULT" && new == "" { + return true + } + return false +} + +func ResourceCertificateManagerCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateCreate, + Read: resourceCertificateManagerCertificateRead, + Update: resourceCertificateManagerCertificateUpdate, + Delete: resourceCertificateManagerCertificateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 1, + + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceCertificateManagerCertificateResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceCertificateManagerCertificateUpgradeV0, + Version: 0, + }, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the certificate. Certificate names must be unique +The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, +and all following characters must be a dash, underscore, letter or digit.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the Certificate resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `The Certificate Manager location. If not specified, "global" is used.`, + Default: "global", + }, + "managed": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configuration and state of a Managed Certificate. +Certificate Manager provisions and renews Managed Certificates +automatically, for as long as it's authorized to do so.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_authorizations": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "domains": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The domains for which a managed SSL certificate will be generated. +Wildcard domains are only supported with DNS challenge resolution`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "issuance_config": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. +If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. +Either issuanceConfig or dnsAuthorizations should be specificed, but not both.`, + }, + "authorization_attempt_info": { + Type: schema.TypeList, + Computed: true, + Description: `Detailed state of the latest authorization attempt for each domain +specified for this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation for reaching the state. Provided to help +address the configuration issues. +Not guaranteed to be stable. For programmatic access use 'failure_reason' field.`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `Domain name of the authorization attempt.`, + }, + "failure_reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for failure of the authorization attempt for the domain.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the domain for managed certificate issuance.`, + }, + }, + }, + }, + "provisioning_issue": { + Type: schema.TypeList, + Computed: true, + Description: `Information about issues with provisioning this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation about the issue. Provided to help address +the configuration issues. +Not guaranteed to be stable. For programmatic access use 'reason' field.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for provisioning failures.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `A state of this Managed Certificate.`, + }, + }, + }, + ExactlyOneOf: []string{"self_managed", "managed"}, + }, + "scope": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: certManagerDefaultScopeDiffSuppress, + Description: `The scope of the certificate. + +DEFAULT: Certificates with default scope are served from core Google data centers. +If unsure, choose this option. + +EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, +served from non-core Google data centers. +Currently allowed only for managed certificates.`, + Default: "DEFAULT", + }, + "self_managed": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Certificate data for a SelfManaged Certificate. +SelfManaged Certificates are uploaded by the user. Updating such +certificates before they expire remains the user's responsibility.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_pem": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Deprecated in favor of `pem_certificate`", + ForceNew: true, + Description: `**Deprecated** The certificate chain in PEM-encoded form. + +Leaf certificate comes first, followed by intermediate ones if any.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.certificate_pem", "self_managed.0.pem_certificate"}, + }, + "pem_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The certificate chain in PEM-encoded form. + +Leaf certificate comes first, followed by intermediate ones if any.`, + ExactlyOneOf: []string{"self_managed.0.certificate_pem", "self_managed.0.pem_certificate"}, + }, + "pem_private_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The private key of the leaf certificate in PEM-encoded form.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.private_key_pem", "self_managed.0.pem_private_key"}, + }, + "private_key_pem": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Deprecated in favor of `pem_private_key`", + ForceNew: true, + Description: `**Deprecated** The private key of the leaf certificate in PEM-encoded form.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.private_key_pem", "self_managed.0.pem_private_key"}, + }, + }, + }, + ExactlyOneOf: []string{"self_managed", "managed"}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerCertificateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + scopeProp, err := expandCertificateManagerCertificateScope(d.Get("scope"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scope"); !tpgresource.IsEmptyValue(reflect.ValueOf(scopeProp)) && (ok || !reflect.DeepEqual(v, scopeProp)) { + obj["scope"] = scopeProp + } + selfManagedProp, err := expandCertificateManagerCertificateSelfManaged(d.Get("self_managed"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("self_managed"); !tpgresource.IsEmptyValue(reflect.ValueOf(selfManagedProp)) && (ok || !reflect.DeepEqual(v, selfManagedProp)) { + obj["selfManaged"] = selfManagedProp + } + managedProp, err := expandCertificateManagerCertificateManaged(d.Get("managed"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("managed"); !tpgresource.IsEmptyValue(reflect.ValueOf(managedProp)) && (ok || !reflect.DeepEqual(v, managedProp)) { + obj["managed"] = managedProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/certificates?certificateId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Certificate: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Certificate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Certificate: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/certificates/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = CertificateManagerOperationWaitTime( + config, res, project, "Creating Certificate", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Certificate: %s", err) + } + + log.Printf("[DEBUG] Finished creating Certificate %q: %#v", d.Id(), res) + + return resourceCertificateManagerCertificateRead(d, meta) +} + +func resourceCertificateManagerCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/certificates/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Certificate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificate %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Certificate: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerCertificateDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Certificate: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerCertificateLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Certificate: %s", err) + } + if err := d.Set("scope", flattenCertificateManagerCertificateScope(res["scope"], d, config)); err != nil { + return fmt.Errorf("Error reading Certificate: %s", err) + } + if err := d.Set("managed", flattenCertificateManagerCertificateManaged(res["managed"], d, config)); err != nil { + return fmt.Errorf("Error reading Certificate: %s", err) + } + + return nil +} + +func resourceCertificateManagerCertificateUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Certificate: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/certificates/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Certificate %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Certificate %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Certificate %q: %#v", d.Id(), res) + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Updating Certificate", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCertificateManagerCertificateRead(d, meta) +} + +func resourceCertificateManagerCertificateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Certificate: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/certificates/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Certificate %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Certificate") + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Deleting Certificate", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Certificate %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/certificates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/certificates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerCertificateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateScope(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManaged(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["domains"] = + flattenCertificateManagerCertificateManagedDomains(original["domains"], d, config) + transformed["dns_authorizations"] = + flattenCertificateManagerCertificateManagedDnsAuthorizations(original["dnsAuthorizations"], d, config) + transformed["issuance_config"] = + flattenCertificateManagerCertificateManagedIssuanceConfig(original["issuanceConfig"], d, config) + transformed["state"] = + flattenCertificateManagerCertificateManagedState(original["state"], d, config) + transformed["provisioning_issue"] = + flattenCertificateManagerCertificateManagedProvisioningIssue(original["provisioningIssue"], d, config) + transformed["authorization_attempt_info"] = + flattenCertificateManagerCertificateManagedAuthorizationAttemptInfo(original["authorizationAttemptInfo"], d, config) + return []interface{}{transformed} +} +func flattenCertificateManagerCertificateManagedDomains(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedIssuanceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedProvisioningIssue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["reason"] = + flattenCertificateManagerCertificateManagedProvisioningIssueReason(original["reason"], d, config) + transformed["details"] = + flattenCertificateManagerCertificateManagedProvisioningIssueDetails(original["details"], d, config) + return []interface{}{transformed} +} +func flattenCertificateManagerCertificateManagedProvisioningIssueReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedProvisioningIssueDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "domain": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(original["domain"], d, config), + "state": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoState(original["state"], d, config), + "failure_reason": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(original["failureReason"], d, config), + "details": flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(original["details"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCertificateManagerCertificateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCertificateManagerCertificateScope(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateSelfManaged(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCertificatePem, err := expandCertificateManagerCertificateSelfManagedCertificatePem(original["certificate_pem"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificatePem); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificatePem"] = transformedCertificatePem + } + + transformedPrivateKeyPem, err := expandCertificateManagerCertificateSelfManagedPrivateKeyPem(original["private_key_pem"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateKeyPem); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateKeyPem"] = transformedPrivateKeyPem + } + + transformedPemCertificate, err := expandCertificateManagerCertificateSelfManagedPemCertificate(original["pem_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPemCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pemCertificate"] = transformedPemCertificate + } + + transformedPemPrivateKey, err := expandCertificateManagerCertificateSelfManagedPemPrivateKey(original["pem_private_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPemPrivateKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pemPrivateKey"] = transformedPemPrivateKey + } + + return transformed, nil +} + +func expandCertificateManagerCertificateSelfManagedCertificatePem(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateSelfManagedPrivateKeyPem(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateSelfManagedPemCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateSelfManagedPemPrivateKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManaged(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDomains, err := expandCertificateManagerCertificateManagedDomains(original["domains"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["domains"] = transformedDomains + } + + transformedDnsAuthorizations, err := expandCertificateManagerCertificateManagedDnsAuthorizations(original["dns_authorizations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDnsAuthorizations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dnsAuthorizations"] = transformedDnsAuthorizations + } + + transformedIssuanceConfig, err := expandCertificateManagerCertificateManagedIssuanceConfig(original["issuance_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIssuanceConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["issuanceConfig"] = transformedIssuanceConfig + } + + transformedState, err := expandCertificateManagerCertificateManagedState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedProvisioningIssue, err := expandCertificateManagerCertificateManagedProvisioningIssue(original["provisioning_issue"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProvisioningIssue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["provisioningIssue"] = transformedProvisioningIssue + } + + transformedAuthorizationAttemptInfo, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfo(original["authorization_attempt_info"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuthorizationAttemptInfo); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["authorizationAttemptInfo"] = transformedAuthorizationAttemptInfo + } + + return transformed, nil +} + +func expandCertificateManagerCertificateManagedDomains(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedDnsAuthorizations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedIssuanceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedProvisioningIssue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedReason, err := expandCertificateManagerCertificateManagedProvisioningIssueReason(original["reason"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReason); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["reason"] = transformedReason + } + + transformedDetails, err := expandCertificateManagerCertificateManagedProvisioningIssueDetails(original["details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDetails); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["details"] = transformedDetails + } + + return transformed, nil +} + +func expandCertificateManagerCertificateManagedProvisioningIssueReason(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedProvisioningIssueDetails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDomain, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(original["domain"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["domain"] = transformedDomain + } + + transformedState, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedFailureReason, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(original["failure_reason"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureReason); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["failureReason"] = transformedFailureReason + } + + transformedDetails, err := expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(original["details"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDetails); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["details"] = transformedDetails + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoFailureReason(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateManagedAuthorizationAttemptInfoDetails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func ResourceCertificateManagerCertificateUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + // Version 0 didn't support location. Default it to global. + rawState["location"] = "global" + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} + +func resourceCertificateManagerCertificateResourceV0() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateCreate, + Read: resourceCertificateManagerCertificateRead, + Update: resourceCertificateManagerCertificateUpdate, + Delete: resourceCertificateManagerCertificateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the certificate. Certificate names must be unique +The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, +and all following characters must be a dash, underscore, letter or digit.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the Certificate resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "managed": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Configuration and state of a Managed Certificate. +Certificate Manager provisions and renews Managed Certificates +automatically, for as long as it's authorized to do so.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_authorizations": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `Authorizations that will be used for performing domain authorization`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "domains": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The domains for which a managed SSL certificate will be generated. +Wildcard domains are only supported with DNS challenge resolution`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "authorization_attempt_info": { + Type: schema.TypeList, + Computed: true, + Description: `Detailed state of the latest authorization attempt for each domain +specified for this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation for reaching the state. Provided to help +address the configuration issues. +Not guaranteed to be stable. For programmatic access use 'failure_reason' field.`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `Domain name of the authorization attempt.`, + }, + "failure_reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for failure of the authorization attempt for the domain.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the domain for managed certificate issuance.`, + }, + }, + }, + }, + "provisioning_issue": { + Type: schema.TypeList, + Computed: true, + Description: `Information about issues with provisioning this Managed Certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable explanation about the issue. Provided to help address +the configuration issues. +Not guaranteed to be stable. For programmatic access use 'reason' field.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `Reason for provisioning failures.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `A state of this Managed Certificate.`, + }, + }, + }, + ExactlyOneOf: []string{"self_managed", "managed"}, + }, + "scope": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: certManagerDefaultScopeDiffSuppress, + Description: `The scope of the certificate. + +DEFAULT: Certificates with default scope are served from core Google data centers. +If unsure, choose this option. + +EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, +served from non-core Google data centers. +Currently allowed only for managed certificates.`, + Default: "DEFAULT", + }, + "self_managed": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Certificate data for a SelfManaged Certificate. +SelfManaged Certificates are uploaded by the user. Updating such +certificates before they expire remains the user's responsibility.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_pem": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Deprecated in favor of `pem_certificate`", + Description: `**Deprecated** The certificate chain in PEM-encoded form. + +Leaf certificate comes first, followed by intermediate ones if any.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.certificate_pem", "self_managed.0.pem_certificate"}, + }, + "pem_certificate": { + Type: schema.TypeString, + Optional: true, + Description: `The certificate chain in PEM-encoded form. + +Leaf certificate comes first, followed by intermediate ones if any.`, + ExactlyOneOf: []string{"self_managed.0.certificate_pem", "self_managed.0.pem_certificate"}, + }, + "pem_private_key": { + Type: schema.TypeString, + Optional: true, + Description: `The private key of the leaf certificate in PEM-encoded form.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.private_key_pem", "self_managed.0.pem_private_key"}, + }, + "private_key_pem": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Deprecated in favor of `pem_private_key`", + Description: `**Deprecated** The private key of the leaf certificate in PEM-encoded form.`, + Sensitive: true, + ExactlyOneOf: []string{"self_managed.0.private_key_pem", "self_managed.0.pem_private_key"}, + }, + }, + }, + ExactlyOneOf: []string{"self_managed", "managed"}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_issuance_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_issuance_config.go new file mode 100644 index 0000000000..0b9a4a061c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_issuance_config.go @@ -0,0 +1,534 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCertificateManagerCertificateIssuanceConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateIssuanceConfigCreate, + Read: resourceCertificateManagerCertificateIssuanceConfigRead, + Delete: resourceCertificateManagerCertificateIssuanceConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateIssuanceConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "certificate_authority_config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The CA that issues the workload certificate. It includes the CA address, type, authentication to CA service, etc.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_authority_service_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Defines a CertificateAuthorityServiceConfig.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_pool": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `A CA pool resource used to issue a certificate. +The CA pool string has a relative resource path following the form +"projects/{project}/locations/{location}/caPools/{caPool}".`, + }, + }, + }, + }, + }, + }, + }, + "key_algorithm": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"RSA_2048", "ECDSA_P256"}), + Description: `Key algorithm to use when generating the private key. Possible values: ["RSA_2048", "ECDSA_P256"]`, + }, + "lifetime": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Lifetime of issued certificates. A duration in seconds with up to nine fractional digits, ending with 's'. +Example: "1814400s". Valid values are from 21 days (1814400s) to 30 days (2592000s)`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the certificate issuance config. +CertificateIssuanceConfig names must be unique globally.`, + }, + "rotation_window_percentage": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `It specifies the percentage of elapsed time of the certificate lifetime to wait before renewing the certificate. +Must be a number between 1-99, inclusive. +You must set the rotation window percentage in relation to the certificate lifetime so that certificate renewal occurs at least 7 days after +the certificate has been issued and at least 7 days before it expires.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `One or more paragraphs of text description of a CertificateIssuanceConfig.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `'Set of label tags associated with the CertificateIssuanceConfig resource. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Certificate Manager location. If not specified, "global" is used.`, + Default: "global", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The creation timestamp of a CertificateIssuanceConfig. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last update timestamp of a CertificateIssuanceConfig. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerCertificateIssuanceConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateIssuanceConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + rotationWindowPercentageProp, err := expandCertificateManagerCertificateIssuanceConfigRotationWindowPercentage(d.Get("rotation_window_percentage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rotation_window_percentage"); !tpgresource.IsEmptyValue(reflect.ValueOf(rotationWindowPercentageProp)) && (ok || !reflect.DeepEqual(v, rotationWindowPercentageProp)) { + obj["rotationWindowPercentage"] = rotationWindowPercentageProp + } + keyAlgorithmProp, err := expandCertificateManagerCertificateIssuanceConfigKeyAlgorithm(d.Get("key_algorithm"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key_algorithm"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyAlgorithmProp)) && (ok || !reflect.DeepEqual(v, keyAlgorithmProp)) { + obj["keyAlgorithm"] = keyAlgorithmProp + } + lifetimeProp, err := expandCertificateManagerCertificateIssuanceConfigLifetime(d.Get("lifetime"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("lifetime"); !tpgresource.IsEmptyValue(reflect.ValueOf(lifetimeProp)) && (ok || !reflect.DeepEqual(v, lifetimeProp)) { + obj["lifetime"] = lifetimeProp + } + labelsProp, err := expandCertificateManagerCertificateIssuanceConfigLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + certificateAuthorityConfigProp, err := expandCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfig(d.Get("certificate_authority_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_authority_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateAuthorityConfigProp)) && (ok || !reflect.DeepEqual(v, certificateAuthorityConfigProp)) { + obj["certificateAuthorityConfig"] = certificateAuthorityConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/certificateIssuanceConfigs?certificateIssuanceConfigId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CertificateIssuanceConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateIssuanceConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating CertificateIssuanceConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/certificateIssuanceConfigs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = CertificateManagerOperationWaitTime( + config, res, project, "Creating CertificateIssuanceConfig", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create CertificateIssuanceConfig: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateIssuanceConfig %q: %#v", d.Id(), res) + + return resourceCertificateManagerCertificateIssuanceConfigRead(d, meta) +} + +func resourceCertificateManagerCertificateIssuanceConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/certificateIssuanceConfigs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateIssuanceConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateIssuanceConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerCertificateIssuanceConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + if err := d.Set("rotation_window_percentage", flattenCertificateManagerCertificateIssuanceConfigRotationWindowPercentage(res["rotationWindowPercentage"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + if err := d.Set("key_algorithm", flattenCertificateManagerCertificateIssuanceConfigKeyAlgorithm(res["keyAlgorithm"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + if err := d.Set("lifetime", flattenCertificateManagerCertificateIssuanceConfigLifetime(res["lifetime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + if err := d.Set("create_time", flattenCertificateManagerCertificateIssuanceConfigCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + if err := d.Set("update_time", flattenCertificateManagerCertificateIssuanceConfigUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerCertificateIssuanceConfigLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + if err := d.Set("certificate_authority_config", flattenCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfig(res["certificateAuthorityConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateIssuanceConfig: %s", err) + } + + return nil +} + +func resourceCertificateManagerCertificateIssuanceConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateIssuanceConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/{{location}}/certificateIssuanceConfigs/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting CertificateIssuanceConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "CertificateIssuanceConfig") + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Deleting CertificateIssuanceConfig", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CertificateIssuanceConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerCertificateIssuanceConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/certificateIssuanceConfigs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/certificateIssuanceConfigs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerCertificateIssuanceConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateIssuanceConfigRotationWindowPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCertificateManagerCertificateIssuanceConfigKeyAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateIssuanceConfigLifetime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateIssuanceConfigCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateIssuanceConfigUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateIssuanceConfigLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["certificate_authority_service_config"] = + flattenCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfig(original["certificateAuthorityServiceConfig"], d, config) + return []interface{}{transformed} +} +func flattenCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ca_pool"] = + flattenCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfigCaPool(original["caPool"], d, config) + return []interface{}{transformed} +} +func flattenCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfigCaPool(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCertificateManagerCertificateIssuanceConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateIssuanceConfigRotationWindowPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateIssuanceConfigKeyAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateIssuanceConfigLifetime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateIssuanceConfigLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCertificateAuthorityServiceConfig, err := expandCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfig(original["certificate_authority_service_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateAuthorityServiceConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificateAuthorityServiceConfig"] = transformedCertificateAuthorityServiceConfig + } + + return transformed, nil +} + +func expandCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCaPool, err := expandCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfigCaPool(original["ca_pool"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCaPool); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["caPool"] = transformedCaPool + } + + return transformed, nil +} + +func expandCertificateManagerCertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfigCaPool(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_issuance_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_issuance_config_sweeper.go new file mode 100644 index 0000000000..67ca320fe3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_issuance_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CertificateManagerCertificateIssuanceConfig", testSweepCertificateManagerCertificateIssuanceConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCertificateManagerCertificateIssuanceConfig(region string) error { + resourceName := "CertificateManagerCertificateIssuanceConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/{{location}}/certificateIssuanceConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["certificateIssuanceConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/{{location}}/certificateIssuanceConfigs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map.go new file mode 100644 index 0000000000..5d6e4a2cc5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map.go @@ -0,0 +1,514 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceCertificateManagerCertificateMap() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateMapCreate, + Read: resourceCertificateManagerCertificateMapRead, + Update: resourceCertificateManagerCertificateMapUpdate, + Delete: resourceCertificateManagerCertificateMapDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateMapImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the Certificate Map. Certificate Map names must be unique +globally and match the pattern 'projects/*/locations/*/certificateMaps/*'.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Set of labels associated with a Certificate Map resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp of a Certificate Map. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "gclb_targets": { + Type: schema.TypeList, + Computed: true, + Description: `A list of target proxies that use this Certificate Map`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_configs": { + Type: schema.TypeList, + Optional: true, + Description: `An IP configuration where this Certificate Map is serving`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Optional: true, + Description: `An external IP address`, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Description: `A list of ports`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "target_https_proxy": { + Type: schema.TypeString, + Optional: true, + Description: `Proxy name must be in the format projects/*/locations/*/targetHttpsProxies/*. +This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or +'targetSslProxy' may be set.`, + }, + "target_ssl_proxy": { + Type: schema.TypeString, + Optional: true, + Description: `Proxy name must be in the format projects/*/locations/*/targetSslProxies/*. +This field is part of a union field 'target_proxy': Only one of 'targetHttpsProxy' or +'targetSslProxy' may be set.`, + }, + }, + }, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Update timestamp of a Certificate Map. Timestamp is in RFC3339 UTC "Zulu" format, +accurate to nanoseconds with up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerCertificateMapCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps?certificateMapId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CertificateMap: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating CertificateMap: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = CertificateManagerOperationWaitTime( + config, res, project, "Creating CertificateMap", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create CertificateMap: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateMap %q: %#v", d.Id(), res) + + return resourceCertificateManagerCertificateMapRead(d, meta) +} + +func resourceCertificateManagerCertificateMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateMap %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerCertificateMapDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("create_time", flattenCertificateManagerCertificateMapCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("update_time", flattenCertificateManagerCertificateMapUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerCertificateMapLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + if err := d.Set("gclb_targets", flattenCertificateManagerCertificateMapGclbTargets(res["gclbTargets"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMap: %s", err) + } + + return nil +} + +func resourceCertificateManagerCertificateMapUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating CertificateMap %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating CertificateMap %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating CertificateMap %q: %#v", d.Id(), res) + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Updating CertificateMap", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCertificateManagerCertificateMapRead(d, meta) +} + +func resourceCertificateManagerCertificateMapDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMap: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting CertificateMap %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "CertificateMap") + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Deleting CertificateMap", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CertificateMap %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerCertificateMapImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/certificateMaps/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerCertificateMapDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_configs": flattenCertificateManagerCertificateMapGclbTargetsIpConfigs(original["ipConfigs"], d, config), + "target_https_proxy": flattenCertificateManagerCertificateMapGclbTargetsTargetHttpsProxy(original["targetHttpsProxy"], d, config), + "target_ssl_proxy": flattenCertificateManagerCertificateMapGclbTargetsTargetSslProxy(original["targetSslProxy"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_address": flattenCertificateManagerCertificateMapGclbTargetsIpConfigsIpAddress(original["ipAddress"], d, config), + "ports": flattenCertificateManagerCertificateMapGclbTargetsIpConfigsPorts(original["ports"], d, config), + }) + } + return transformed +} +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigsIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsIpConfigsPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsTargetHttpsProxy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapGclbTargetsTargetSslProxy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCertificateManagerCertificateMapDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_entry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_entry.go new file mode 100644 index 0000000000..145dea7c75 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_entry.go @@ -0,0 +1,541 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceCertificateManagerCertificateMapEntry() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerCertificateMapEntryCreate, + Read: resourceCertificateManagerCertificateMapEntryRead, + Update: resourceCertificateManagerCertificateMapEntryUpdate, + Delete: resourceCertificateManagerCertificateMapEntryDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerCertificateMapEntryImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "certificates": { + Type: schema.TypeList, + Required: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `A set of Certificates defines for the given hostname. +There can be defined up to fifteen certificates in each Certificate Map Entry. +Each certificate must match pattern projects/*/locations/*/certificates/*.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "map": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A map entry that is inputted into the cetrificate map`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the Certificate Map Entry. Certificate Map Entry +names must be unique globally and match pattern +'projects/*/locations/*/certificateMaps/*/certificateMapEntries/*'`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "hostname": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A Hostname (FQDN, e.g. example.com) or a wildcard hostname expression (*.example.com) +for a set of hostnames with common suffix. Used as Server Name Indication (SNI) for +selecting a proper certificate.`, + ExactlyOneOf: []string{"hostname", "matcher"}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Set of labels associated with a Certificate Map Entry. +An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "matcher": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A predefined matcher for particular cases, other than SNI selection`, + ExactlyOneOf: []string{"hostname", "matcher"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp of a Certificate Map Entry. Timestamp in RFC3339 UTC "Zulu" format, +with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `A serving state of this Certificate Map Entry.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Update timestamp of a Certificate Map Entry. Timestamp in RFC3339 UTC "Zulu" format, +with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerCertificateMapEntryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapEntryDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapEntryLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + certificatesProp, err := expandCertificateManagerCertificateMapEntryCertificates(d.Get("certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificatesProp)) && (ok || !reflect.DeepEqual(v, certificatesProp)) { + obj["certificates"] = certificatesProp + } + hostnameProp, err := expandCertificateManagerCertificateMapEntryHostname(d.Get("hostname"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hostname"); !tpgresource.IsEmptyValue(reflect.ValueOf(hostnameProp)) && (ok || !reflect.DeepEqual(v, hostnameProp)) { + obj["hostname"] = hostnameProp + } + matcherProp, err := expandCertificateManagerCertificateMapEntryMatcher(d.Get("matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(matcherProp)) && (ok || !reflect.DeepEqual(v, matcherProp)) { + obj["matcher"] = matcherProp + } + nameProp, err := expandCertificateManagerCertificateMapEntryName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries?certificateMapEntryId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CertificateMapEntry: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating CertificateMapEntry: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = CertificateManagerOperationWaitTime( + config, res, project, "Creating CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create CertificateMapEntry: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateMapEntry %q: %#v", d.Id(), res) + + return resourceCertificateManagerCertificateMapEntryRead(d, meta) +} + +func resourceCertificateManagerCertificateMapEntryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CertificateManagerCertificateMapEntry %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerCertificateMapEntryDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("create_time", flattenCertificateManagerCertificateMapEntryCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("update_time", flattenCertificateManagerCertificateMapEntryUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerCertificateMapEntryLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("certificates", flattenCertificateManagerCertificateMapEntryCertificates(res["certificates"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("state", flattenCertificateManagerCertificateMapEntryState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("hostname", flattenCertificateManagerCertificateMapEntryHostname(res["hostname"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("matcher", flattenCertificateManagerCertificateMapEntryMatcher(res["matcher"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + if err := d.Set("name", flattenCertificateManagerCertificateMapEntryName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading CertificateMapEntry: %s", err) + } + + return nil +} + +func resourceCertificateManagerCertificateMapEntryUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerCertificateMapEntryDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerCertificateMapEntryLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + certificatesProp, err := expandCertificateManagerCertificateMapEntryCertificates(d.Get("certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificatesProp)) { + obj["certificates"] = certificatesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating CertificateMapEntry %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("certificates") { + updateMask = append(updateMask, "certificates") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating CertificateMapEntry %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating CertificateMapEntry %q: %#v", d.Id(), res) + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Updating CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCertificateManagerCertificateMapEntryRead(d, meta) +} + +func resourceCertificateManagerCertificateMapEntryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateMapEntry: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting CertificateMapEntry %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "CertificateMapEntry") + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Deleting CertificateMapEntry", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting CertificateMapEntry %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerCertificateMapEntryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/certificateMaps/(?P[^/]+)/certificateMapEntries/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerCertificateMapEntryDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerCertificateMapEntryName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandCertificateManagerCertificateMapEntryDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCertificateManagerCertificateMapEntryCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerCertificateMapEntryName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_entry_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_entry_sweeper.go new file mode 100644 index 0000000000..99a2eabe9e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_entry_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CertificateManagerCertificateMapEntry", testSweepCertificateManagerCertificateMapEntry) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCertificateManagerCertificateMapEntry(region string) error { + resourceName := "CertificateManagerCertificateMapEntry" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["certificateMapEntries"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/certificateMaps/{{map}}/certificateMapEntries/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_sweeper.go new file mode 100644 index 0000000000..8d87d36082 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_map_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CertificateManagerCertificateMap", testSweepCertificateManagerCertificateMap) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCertificateManagerCertificateMap(region string) error { + resourceName := "CertificateManagerCertificateMap" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/certificateMaps", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["certificateMaps"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/certificateMaps/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_sweeper.go new file mode 100644 index 0000000000..8ebbe1b77b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_certificate_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CertificateManagerCertificate", testSweepCertificateManagerCertificate) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCertificateManagerCertificate(region string) error { + resourceName := "CertificateManagerCertificate" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/{{location}}/certificates", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["certificates"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/{{location}}/certificates/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_dns_authorization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_dns_authorization.go new file mode 100644 index 0000000000..ad95ea6899 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_dns_authorization.go @@ -0,0 +1,467 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceCertificateManagerDnsAuthorization() *schema.Resource { + return &schema.Resource{ + Create: resourceCertificateManagerDnsAuthorizationCreate, + Read: resourceCertificateManagerDnsAuthorizationRead, + Update: resourceCertificateManagerDnsAuthorizationUpdate, + Delete: resourceCertificateManagerDnsAuthorizationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCertificateManagerDnsAuthorizationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A domain which is being authorized. A DnsAuthorization resource covers a +single domain and its wildcard, e.g. authorization for "example.com" can +be used to issue certificates for "example.com" and "*.example.com".`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource; provided by the client when the resource is created. +The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, +and all following characters must be a dash, underscore, letter or digit.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the DNS Authorization resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "dns_resource_record": { + Type: schema.TypeList, + Computed: true, + Description: `The structure describing the DNS Resource Record that needs to be added +to DNS configuration for the authorization to be usable by +certificate.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Computed: true, + Description: `Data of the DNS Resource Record.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Fully qualified name of the DNS Resource Record. +E.g. '_acme-challenge.example.com'.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Type of the DNS Resource Record.`, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCertificateManagerDnsAuthorizationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerDnsAuthorizationDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerDnsAuthorizationLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + domainProp, err := expandCertificateManagerDnsAuthorizationDomain(d.Get("domain"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("domain"); !tpgresource.IsEmptyValue(reflect.ValueOf(domainProp)) && (ok || !reflect.DeepEqual(v, domainProp)) { + obj["domain"] = domainProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations?dnsAuthorizationId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DnsAuthorization: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DnsAuthorization: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DnsAuthorization: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = CertificateManagerOperationWaitTime( + config, res, project, "Creating DnsAuthorization", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create DnsAuthorization: %s", err) + } + + log.Printf("[DEBUG] Finished creating DnsAuthorization %q: %#v", d.Id(), res) + + return resourceCertificateManagerDnsAuthorizationRead(d, meta) +} + +func resourceCertificateManagerDnsAuthorizationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DnsAuthorization: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CertificateManagerDnsAuthorization %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DnsAuthorization: %s", err) + } + + if err := d.Set("description", flattenCertificateManagerDnsAuthorizationDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading DnsAuthorization: %s", err) + } + if err := d.Set("labels", flattenCertificateManagerDnsAuthorizationLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading DnsAuthorization: %s", err) + } + if err := d.Set("domain", flattenCertificateManagerDnsAuthorizationDomain(res["domain"], d, config)); err != nil { + return fmt.Errorf("Error reading DnsAuthorization: %s", err) + } + if err := d.Set("dns_resource_record", flattenCertificateManagerDnsAuthorizationDnsResourceRecord(res["dnsResourceRecord"], d, config)); err != nil { + return fmt.Errorf("Error reading DnsAuthorization: %s", err) + } + + return nil +} + +func resourceCertificateManagerDnsAuthorizationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DnsAuthorization: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCertificateManagerDnsAuthorizationDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCertificateManagerDnsAuthorizationLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DnsAuthorization %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DnsAuthorization %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DnsAuthorization %q: %#v", d.Id(), res) + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Updating DnsAuthorization", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCertificateManagerDnsAuthorizationRead(d, meta) +} + +func resourceCertificateManagerDnsAuthorizationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DnsAuthorization: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CertificateManagerBasePath}}projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DnsAuthorization %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DnsAuthorization") + } + + err = CertificateManagerOperationWaitTime( + config, res, project, "Deleting DnsAuthorization", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting DnsAuthorization %q: %#v", d.Id(), res) + return nil +} + +func resourceCertificateManagerDnsAuthorizationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/dnsAuthorizations/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/dnsAuthorizations/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCertificateManagerDnsAuthorizationDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerDnsAuthorizationLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerDnsAuthorizationDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerDnsAuthorizationDnsResourceRecord(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenCertificateManagerDnsAuthorizationDnsResourceRecordName(original["name"], d, config) + transformed["type"] = + flattenCertificateManagerDnsAuthorizationDnsResourceRecordType(original["type"], d, config) + transformed["data"] = + flattenCertificateManagerDnsAuthorizationDnsResourceRecordData(original["data"], d, config) + return []interface{}{transformed} +} +func flattenCertificateManagerDnsAuthorizationDnsResourceRecordName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerDnsAuthorizationDnsResourceRecordType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCertificateManagerDnsAuthorizationDnsResourceRecordData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCertificateManagerDnsAuthorizationDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCertificateManagerDnsAuthorizationLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCertificateManagerDnsAuthorizationDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_dns_authorization_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_dns_authorization_sweeper.go new file mode 100644 index 0000000000..562519e04e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/certificatemanager/resource_certificate_manager_dns_authorization_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package certificatemanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CertificateManagerDnsAuthorization", testSweepCertificateManagerDnsAuthorization) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCertificateManagerDnsAuthorization(region string) error { + resourceName := "CertificateManagerDnsAuthorization" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/dnsAuthorizations", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["dnsAuthorizations"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://certificatemanager.googleapis.com/v1/projects/{{project}}/locations/global/dnsAuthorizations/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/data_source_google_cloud_asset_resources_search_all.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/data_source_google_cloud_asset_resources_search_all.go new file mode 100644 index 0000000000..acdb2a6138 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/data_source_google_cloud_asset_resources_search_all.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudasset diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_folder_feed.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_folder_feed.go new file mode 100644 index 0000000000..5867e40c2d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_folder_feed.go @@ -0,0 +1,698 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudasset + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCloudAssetFolderFeed() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudAssetFolderFeedCreate, + Read: resourceCloudAssetFolderFeedRead, + Update: resourceCloudAssetFolderFeedUpdate, + Delete: resourceCloudAssetFolderFeedDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudAssetFolderFeedImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "billing_project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The project whose identity will be used when sending messages to the +destination pubsub topic. It also specifies the project for API +enablement check, quota, and billing.`, + }, + "feed_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, + }, + "feed_output_config": { + Type: schema.TypeList, + Required: true, + Description: `Output configuration for asset feed destination.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_destination": { + Type: schema.TypeList, + Required: true, + Description: `Destination on Cloud Pubsub.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Required: true, + Description: `Destination on Cloud Pubsub topic.`, + }, + }, + }, + }, + }, + }, + }, + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The folder this feed should be created in.`, + }, + "asset_names": { + Type: schema.TypeList, + Optional: true, + Description: `A list of the full names of the assets to receive updates. You must specify either or both of +assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are +exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. +See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "asset_types": { + Type: schema.TypeList, + Optional: true, + Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames +and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to +the feed. For example: "compute.googleapis.com/Disk" +See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all +supported asset types.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "condition": { + Type: schema.TypeList, + Optional: true, + Description: `A condition which determines whether an asset update should be published. If specified, an asset +will be returned only when the expression evaluates to true. When set, expression field +must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with +expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of +condition are optional.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the expression. This is a longer text which describes the expression, +e.g. when hovered over it in a UI.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `String indicating the location of the expression for error reporting, e.g. a file +name and a position in the file.`, + }, + "title": { + Type: schema.TypeString, + Optional: true, + Description: `Title for the expression, i.e. a short string describing its purpose. +This can be used e.g. in UIs which allow to enter the expression.`, + }, + }, + }, + }, + "content_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "OS_INVENTORY", "ACCESS_POLICY", ""}), + Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "OS_INVENTORY", "ACCESS_POLICY"]`, + }, + "folder_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID of the folder where this feed has been created. Both [FOLDER_NUMBER] +and folders/[FOLDER_NUMBER] are accepted.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The format will be folders/{folder_number}/feeds/{client-assigned_feed_identifier}.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudAssetFolderFeedCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + assetNamesProp, err := expandCloudAssetFolderFeedAssetNames(d.Get("asset_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(assetNamesProp)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { + obj["assetNames"] = assetNamesProp + } + assetTypesProp, err := expandCloudAssetFolderFeedAssetTypes(d.Get("asset_types"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(assetTypesProp)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { + obj["assetTypes"] = assetTypesProp + } + contentTypeProp, err := expandCloudAssetFolderFeedContentType(d.Get("content_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("content_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(contentTypeProp)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { + obj["contentType"] = contentTypeProp + } + feedOutputConfigProp, err := expandCloudAssetFolderFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("feed_output_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(feedOutputConfigProp)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { + obj["feedOutputConfig"] = feedOutputConfigProp + } + conditionProp, err := expandCloudAssetFolderFeedCondition(d.Get("condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(conditionProp)) && (ok || !reflect.DeepEqual(v, conditionProp)) { + obj["condition"] = conditionProp + } + + obj, err = resourceCloudAssetFolderFeedEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}folders/{{folder_id}}/feeds?feedId={{feed_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FolderFeed: %#v", obj) + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // Send the project ID in the X-Goog-User-Project header. + origUserProjectOverride := config.UserProjectOverride + config.UserProjectOverride = true + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating FolderFeed: %s", err) + } + if err := d.Set("name", flattenCloudAssetFolderFeedName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Restore the original value of user_project_override. + config.UserProjectOverride = origUserProjectOverride + + log.Printf("[DEBUG] Finished creating FolderFeed %q: %#v", d.Id(), res) + + return resourceCloudAssetFolderFeedRead(d, meta) +} + +func resourceCloudAssetFolderFeedRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudAssetFolderFeed %q", d.Id())) + } + + if err := d.Set("folder_id", flattenCloudAssetFolderFeedFolderId(res["folder_id"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderFeed: %s", err) + } + if err := d.Set("name", flattenCloudAssetFolderFeedName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderFeed: %s", err) + } + if err := d.Set("asset_names", flattenCloudAssetFolderFeedAssetNames(res["assetNames"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderFeed: %s", err) + } + if err := d.Set("asset_types", flattenCloudAssetFolderFeedAssetTypes(res["assetTypes"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderFeed: %s", err) + } + if err := d.Set("content_type", flattenCloudAssetFolderFeedContentType(res["contentType"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderFeed: %s", err) + } + if err := d.Set("feed_output_config", flattenCloudAssetFolderFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderFeed: %s", err) + } + if err := d.Set("condition", flattenCloudAssetFolderFeedCondition(res["condition"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderFeed: %s", err) + } + + return nil +} + +func resourceCloudAssetFolderFeedUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + assetNamesProp, err := expandCloudAssetFolderFeedAssetNames(d.Get("asset_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { + obj["assetNames"] = assetNamesProp + } + assetTypesProp, err := expandCloudAssetFolderFeedAssetTypes(d.Get("asset_types"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { + obj["assetTypes"] = assetTypesProp + } + contentTypeProp, err := expandCloudAssetFolderFeedContentType(d.Get("content_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("content_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { + obj["contentType"] = contentTypeProp + } + feedOutputConfigProp, err := expandCloudAssetFolderFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("feed_output_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { + obj["feedOutputConfig"] = feedOutputConfigProp + } + conditionProp, err := expandCloudAssetFolderFeedCondition(d.Get("condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionProp)) { + obj["condition"] = conditionProp + } + + obj, err = resourceCloudAssetFolderFeedEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FolderFeed %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("asset_names") { + updateMask = append(updateMask, "assetNames") + } + + if d.HasChange("asset_types") { + updateMask = append(updateMask, "assetTypes") + } + + if d.HasChange("content_type") { + updateMask = append(updateMask, "contentType") + } + + if d.HasChange("feed_output_config") { + updateMask = append(updateMask, "feedOutputConfig") + } + + if d.HasChange("condition") { + updateMask = append(updateMask, "condition") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating FolderFeed %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FolderFeed %q: %#v", d.Id(), res) + } + + return resourceCloudAssetFolderFeedRead(d, meta) +} + +func resourceCloudAssetFolderFeedDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + log.Printf("[DEBUG] Deleting FolderFeed %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FolderFeed") + } + + log.Printf("[DEBUG] Finished deleting FolderFeed %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudAssetFolderFeedImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if err := d.Set("name", d.Id()); err != nil { + return nil, err + } + return []*schema.ResourceData{d}, nil +} + +func flattenCloudAssetFolderFeedFolderId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedAssetNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedAssetTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedContentType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedFeedOutputConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pubsub_destination"] = + flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["topic"] = + flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expression"] = + flattenCloudAssetFolderFeedConditionExpression(original["expression"], d, config) + transformed["title"] = + flattenCloudAssetFolderFeedConditionTitle(original["title"], d, config) + transformed["description"] = + flattenCloudAssetFolderFeedConditionDescription(original["description"], d, config) + transformed["location"] = + flattenCloudAssetFolderFeedConditionLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetFolderFeedConditionExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedConditionTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedConditionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetFolderFeedConditionLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudAssetFolderFeedAssetNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetFolderFeedAssetTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetFolderFeedContentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetFolderFeedFeedOutputConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubDestination, err := expandCloudAssetFolderFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubDestination"] = transformedPubsubDestination + } + + return transformed, nil +} + +func expandCloudAssetFolderFeedFeedOutputConfigPubsubDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTopic, err := expandCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topic"] = transformedTopic + } + + return transformed, nil +} + +func expandCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetFolderFeedCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpression, err := expandCloudAssetFolderFeedConditionExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedTitle, err := expandCloudAssetFolderFeedConditionTitle(original["title"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["title"] = transformedTitle + } + + transformedDescription, err := expandCloudAssetFolderFeedConditionDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedLocation, err := expandCloudAssetFolderFeedConditionLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandCloudAssetFolderFeedConditionExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetFolderFeedConditionTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetFolderFeedConditionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetFolderFeedConditionLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceCloudAssetFolderFeedEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Remove the "folders/" prefix from the folder ID + if folder, ok := d.GetOkExists("folder"); ok { + if err := d.Set("folder_id", strings.TrimPrefix(folder.(string), "folders/")); err != nil { + return nil, fmt.Errorf("Error setting folder_id: %s", err) + } + } + // The feed object must be under the "feed" attribute on the request. + newObj := make(map[string]interface{}) + newObj["feed"] = obj + return newObj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_folder_feed_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_folder_feed_sweeper.go new file mode 100644 index 0000000000..60554da854 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_folder_feed_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudasset + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudAssetFolderFeed", testSweepCloudAssetFolderFeed) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudAssetFolderFeed(region string) error { + resourceName := "CloudAssetFolderFeed" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudasset.googleapis.com/v1/folders/{{folder_id}}/feeds", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["feeds"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudasset.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_organization_feed.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_organization_feed.go new file mode 100644 index 0000000000..ef63bc72aa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_organization_feed.go @@ -0,0 +1,685 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudasset + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCloudAssetOrganizationFeed() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudAssetOrganizationFeedCreate, + Read: resourceCloudAssetOrganizationFeedRead, + Update: resourceCloudAssetOrganizationFeedUpdate, + Delete: resourceCloudAssetOrganizationFeedDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudAssetOrganizationFeedImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "billing_project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The project whose identity will be used when sending messages to the +destination pubsub topic. It also specifies the project for API +enablement check, quota, and billing.`, + }, + "feed_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, + }, + "feed_output_config": { + Type: schema.TypeList, + Required: true, + Description: `Output configuration for asset feed destination.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_destination": { + Type: schema.TypeList, + Required: true, + Description: `Destination on Cloud Pubsub.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Required: true, + Description: `Destination on Cloud Pubsub topic.`, + }, + }, + }, + }, + }, + }, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The organization this feed should be created in.`, + }, + "asset_names": { + Type: schema.TypeList, + Optional: true, + Description: `A list of the full names of the assets to receive updates. You must specify either or both of +assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are +exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. +See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "asset_types": { + Type: schema.TypeList, + Optional: true, + Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames +and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to +the feed. For example: "compute.googleapis.com/Disk" +See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all +supported asset types.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "condition": { + Type: schema.TypeList, + Optional: true, + Description: `A condition which determines whether an asset update should be published. If specified, an asset +will be returned only when the expression evaluates to true. When set, expression field +must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with +expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of +condition are optional.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the expression. This is a longer text which describes the expression, +e.g. when hovered over it in a UI.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `String indicating the location of the expression for error reporting, e.g. a file +name and a position in the file.`, + }, + "title": { + Type: schema.TypeString, + Optional: true, + Description: `Title for the expression, i.e. a short string describing its purpose. +This can be used e.g. in UIs which allow to enter the expression.`, + }, + }, + }, + }, + "content_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "OS_INVENTORY", "ACCESS_POLICY", ""}), + Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "OS_INVENTORY", "ACCESS_POLICY"]`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The format will be organizations/{organization_number}/feeds/{client-assigned_feed_identifier}.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudAssetOrganizationFeedCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + assetNamesProp, err := expandCloudAssetOrganizationFeedAssetNames(d.Get("asset_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(assetNamesProp)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { + obj["assetNames"] = assetNamesProp + } + assetTypesProp, err := expandCloudAssetOrganizationFeedAssetTypes(d.Get("asset_types"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(assetTypesProp)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { + obj["assetTypes"] = assetTypesProp + } + contentTypeProp, err := expandCloudAssetOrganizationFeedContentType(d.Get("content_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("content_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(contentTypeProp)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { + obj["contentType"] = contentTypeProp + } + feedOutputConfigProp, err := expandCloudAssetOrganizationFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("feed_output_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(feedOutputConfigProp)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { + obj["feedOutputConfig"] = feedOutputConfigProp + } + conditionProp, err := expandCloudAssetOrganizationFeedCondition(d.Get("condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(conditionProp)) && (ok || !reflect.DeepEqual(v, conditionProp)) { + obj["condition"] = conditionProp + } + + obj, err = resourceCloudAssetOrganizationFeedEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}organizations/{{org_id}}/feeds?feedId={{feed_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OrganizationFeed: %#v", obj) + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // Send the project ID in the X-Goog-User-Project header. + origUserProjectOverride := config.UserProjectOverride + config.UserProjectOverride = true + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating OrganizationFeed: %s", err) + } + if err := d.Set("name", flattenCloudAssetOrganizationFeedName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Restore the original value of user_project_override. + config.UserProjectOverride = origUserProjectOverride + + log.Printf("[DEBUG] Finished creating OrganizationFeed %q: %#v", d.Id(), res) + + return resourceCloudAssetOrganizationFeedRead(d, meta) +} + +func resourceCloudAssetOrganizationFeedRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudAssetOrganizationFeed %q", d.Id())) + } + + if err := d.Set("name", flattenCloudAssetOrganizationFeedName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationFeed: %s", err) + } + if err := d.Set("asset_names", flattenCloudAssetOrganizationFeedAssetNames(res["assetNames"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationFeed: %s", err) + } + if err := d.Set("asset_types", flattenCloudAssetOrganizationFeedAssetTypes(res["assetTypes"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationFeed: %s", err) + } + if err := d.Set("content_type", flattenCloudAssetOrganizationFeedContentType(res["contentType"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationFeed: %s", err) + } + if err := d.Set("feed_output_config", flattenCloudAssetOrganizationFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationFeed: %s", err) + } + if err := d.Set("condition", flattenCloudAssetOrganizationFeedCondition(res["condition"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationFeed: %s", err) + } + + return nil +} + +func resourceCloudAssetOrganizationFeedUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + assetNamesProp, err := expandCloudAssetOrganizationFeedAssetNames(d.Get("asset_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { + obj["assetNames"] = assetNamesProp + } + assetTypesProp, err := expandCloudAssetOrganizationFeedAssetTypes(d.Get("asset_types"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { + obj["assetTypes"] = assetTypesProp + } + contentTypeProp, err := expandCloudAssetOrganizationFeedContentType(d.Get("content_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("content_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { + obj["contentType"] = contentTypeProp + } + feedOutputConfigProp, err := expandCloudAssetOrganizationFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("feed_output_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { + obj["feedOutputConfig"] = feedOutputConfigProp + } + conditionProp, err := expandCloudAssetOrganizationFeedCondition(d.Get("condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionProp)) { + obj["condition"] = conditionProp + } + + obj, err = resourceCloudAssetOrganizationFeedEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OrganizationFeed %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("asset_names") { + updateMask = append(updateMask, "assetNames") + } + + if d.HasChange("asset_types") { + updateMask = append(updateMask, "assetTypes") + } + + if d.HasChange("content_type") { + updateMask = append(updateMask, "contentType") + } + + if d.HasChange("feed_output_config") { + updateMask = append(updateMask, "feedOutputConfig") + } + + if d.HasChange("condition") { + updateMask = append(updateMask, "condition") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating OrganizationFeed %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OrganizationFeed %q: %#v", d.Id(), res) + } + + return resourceCloudAssetOrganizationFeedRead(d, meta) +} + +func resourceCloudAssetOrganizationFeedDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + log.Printf("[DEBUG] Deleting OrganizationFeed %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "OrganizationFeed") + } + + log.Printf("[DEBUG] Finished deleting OrganizationFeed %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudAssetOrganizationFeedImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if err := d.Set("name", d.Id()); err != nil { + return nil, err + } + return []*schema.ResourceData{d}, nil +} + +func flattenCloudAssetOrganizationFeedName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetOrganizationFeedAssetNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetOrganizationFeedAssetTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetOrganizationFeedContentType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetOrganizationFeedFeedOutputConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pubsub_destination"] = + flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["topic"] = + flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetOrganizationFeedCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expression"] = + flattenCloudAssetOrganizationFeedConditionExpression(original["expression"], d, config) + transformed["title"] = + flattenCloudAssetOrganizationFeedConditionTitle(original["title"], d, config) + transformed["description"] = + flattenCloudAssetOrganizationFeedConditionDescription(original["description"], d, config) + transformed["location"] = + flattenCloudAssetOrganizationFeedConditionLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetOrganizationFeedConditionExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetOrganizationFeedConditionTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetOrganizationFeedConditionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetOrganizationFeedConditionLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudAssetOrganizationFeedAssetNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetOrganizationFeedAssetTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetOrganizationFeedContentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetOrganizationFeedFeedOutputConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubDestination, err := expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubDestination"] = transformedPubsubDestination + } + + return transformed, nil +} + +func expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTopic, err := expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topic"] = transformedTopic + } + + return transformed, nil +} + +func expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetOrganizationFeedCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpression, err := expandCloudAssetOrganizationFeedConditionExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedTitle, err := expandCloudAssetOrganizationFeedConditionTitle(original["title"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["title"] = transformedTitle + } + + transformedDescription, err := expandCloudAssetOrganizationFeedConditionDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedLocation, err := expandCloudAssetOrganizationFeedConditionLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandCloudAssetOrganizationFeedConditionExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetOrganizationFeedConditionTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetOrganizationFeedConditionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetOrganizationFeedConditionLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceCloudAssetOrganizationFeedEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Remove the "folders/" prefix from the folder ID + if folder, ok := d.GetOkExists("folder"); ok { + if err := d.Set("folder_id", strings.TrimPrefix(folder.(string), "folders/")); err != nil { + return nil, fmt.Errorf("Error setting folder_id: %s", err) + } + } + // The feed object must be under the "feed" attribute on the request. + newObj := make(map[string]interface{}) + newObj["feed"] = obj + return newObj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_organization_feed_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_organization_feed_sweeper.go new file mode 100644 index 0000000000..5e4c684d95 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_organization_feed_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudasset + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudAssetOrganizationFeed", testSweepCloudAssetOrganizationFeed) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudAssetOrganizationFeed(region string) error { + resourceName := "CloudAssetOrganizationFeed" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudasset.googleapis.com/v1/organizations/{{org_id}}/feeds", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["feeds"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudasset.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_project_feed.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_project_feed.go new file mode 100644 index 0000000000..cace9ba787 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_project_feed.go @@ -0,0 +1,697 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudasset + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCloudAssetProjectFeed() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudAssetProjectFeedCreate, + Read: resourceCloudAssetProjectFeedRead, + Update: resourceCloudAssetProjectFeedUpdate, + Delete: resourceCloudAssetProjectFeedDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudAssetProjectFeedImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "feed_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, + }, + "feed_output_config": { + Type: schema.TypeList, + Required: true, + Description: `Output configuration for asset feed destination.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_destination": { + Type: schema.TypeList, + Required: true, + Description: `Destination on Cloud Pubsub.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Required: true, + Description: `Destination on Cloud Pubsub topic.`, + }, + }, + }, + }, + }, + }, + }, + "asset_names": { + Type: schema.TypeList, + Optional: true, + Description: `A list of the full names of the assets to receive updates. You must specify either or both of +assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are +exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. +See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "asset_types": { + Type: schema.TypeList, + Optional: true, + Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames +and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to +the feed. For example: "compute.googleapis.com/Disk" +See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all +supported asset types.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "billing_project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The project whose identity will be used when sending messages to the +destination pubsub topic. It also specifies the project for API +enablement check, quota, and billing. If not specified, the resource's +project will be used.`, + }, + "condition": { + Type: schema.TypeList, + Optional: true, + Description: `A condition which determines whether an asset update should be published. If specified, an asset +will be returned only when the expression evaluates to true. When set, expression field +must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with +expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of +condition are optional.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the expression. This is a longer text which describes the expression, +e.g. when hovered over it in a UI.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `String indicating the location of the expression for error reporting, e.g. a file +name and a position in the file.`, + }, + "title": { + Type: schema.TypeString, + Optional: true, + Description: `Title for the expression, i.e. a short string describing its purpose. +This can be used e.g. in UIs which allow to enter the expression.`, + }, + }, + }, + }, + "content_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "OS_INVENTORY", "ACCESS_POLICY", ""}), + Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "OS_INVENTORY", "ACCESS_POLICY"]`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The format will be projects/{projectNumber}/feeds/{client-assigned_feed_identifier}.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudAssetProjectFeedCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + assetNamesProp, err := expandCloudAssetProjectFeedAssetNames(d.Get("asset_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(assetNamesProp)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { + obj["assetNames"] = assetNamesProp + } + assetTypesProp, err := expandCloudAssetProjectFeedAssetTypes(d.Get("asset_types"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(assetTypesProp)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { + obj["assetTypes"] = assetTypesProp + } + contentTypeProp, err := expandCloudAssetProjectFeedContentType(d.Get("content_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("content_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(contentTypeProp)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { + obj["contentType"] = contentTypeProp + } + feedOutputConfigProp, err := expandCloudAssetProjectFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("feed_output_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(feedOutputConfigProp)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { + obj["feedOutputConfig"] = feedOutputConfigProp + } + conditionProp, err := expandCloudAssetProjectFeedCondition(d.Get("condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(conditionProp)) && (ok || !reflect.DeepEqual(v, conditionProp)) { + obj["condition"] = conditionProp + } + + obj, err = resourceCloudAssetProjectFeedEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}projects/{{project}}/feeds?feedId={{feed_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProjectFeed: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectFeed: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // Send the project ID in the X-Goog-User-Project header. + origUserProjectOverride := config.UserProjectOverride + config.UserProjectOverride = true + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ProjectFeed: %s", err) + } + if err := d.Set("name", flattenCloudAssetProjectFeedName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Restore the original value of user_project_override. + config.UserProjectOverride = origUserProjectOverride + + log.Printf("[DEBUG] Finished creating ProjectFeed %q: %#v", d.Id(), res) + + return resourceCloudAssetProjectFeedRead(d, meta) +} + +func resourceCloudAssetProjectFeedRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectFeed: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudAssetProjectFeed %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ProjectFeed: %s", err) + } + + if err := d.Set("name", flattenCloudAssetProjectFeedName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectFeed: %s", err) + } + if err := d.Set("asset_names", flattenCloudAssetProjectFeedAssetNames(res["assetNames"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectFeed: %s", err) + } + if err := d.Set("asset_types", flattenCloudAssetProjectFeedAssetTypes(res["assetTypes"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectFeed: %s", err) + } + if err := d.Set("content_type", flattenCloudAssetProjectFeedContentType(res["contentType"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectFeed: %s", err) + } + if err := d.Set("feed_output_config", flattenCloudAssetProjectFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectFeed: %s", err) + } + if err := d.Set("condition", flattenCloudAssetProjectFeedCondition(res["condition"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectFeed: %s", err) + } + + return nil +} + +func resourceCloudAssetProjectFeedUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectFeed: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + assetNamesProp, err := expandCloudAssetProjectFeedAssetNames(d.Get("asset_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetNamesProp)) { + obj["assetNames"] = assetNamesProp + } + assetTypesProp, err := expandCloudAssetProjectFeedAssetTypes(d.Get("asset_types"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("asset_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, assetTypesProp)) { + obj["assetTypes"] = assetTypesProp + } + contentTypeProp, err := expandCloudAssetProjectFeedContentType(d.Get("content_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("content_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, contentTypeProp)) { + obj["contentType"] = contentTypeProp + } + feedOutputConfigProp, err := expandCloudAssetProjectFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("feed_output_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, feedOutputConfigProp)) { + obj["feedOutputConfig"] = feedOutputConfigProp + } + conditionProp, err := expandCloudAssetProjectFeedCondition(d.Get("condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionProp)) { + obj["condition"] = conditionProp + } + + obj, err = resourceCloudAssetProjectFeedEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ProjectFeed %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("asset_names") { + updateMask = append(updateMask, "assetNames") + } + + if d.HasChange("asset_types") { + updateMask = append(updateMask, "assetTypes") + } + + if d.HasChange("content_type") { + updateMask = append(updateMask, "contentType") + } + + if d.HasChange("feed_output_config") { + updateMask = append(updateMask, "feedOutputConfig") + } + + if d.HasChange("condition") { + updateMask = append(updateMask, "condition") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ProjectFeed %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ProjectFeed %q: %#v", d.Id(), res) + } + + return resourceCloudAssetProjectFeedRead(d, meta) +} + +func resourceCloudAssetProjectFeedDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectFeed: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ProjectFeed %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ProjectFeed") + } + + log.Printf("[DEBUG] Finished deleting ProjectFeed %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudAssetProjectFeedImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if err := d.Set("name", d.Id()); err != nil { + return nil, err + } + return []*schema.ResourceData{d}, nil +} + +func flattenCloudAssetProjectFeedName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetProjectFeedAssetNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetProjectFeedAssetTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetProjectFeedContentType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetProjectFeedFeedOutputConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pubsub_destination"] = + flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["topic"] = + flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetProjectFeedCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expression"] = + flattenCloudAssetProjectFeedConditionExpression(original["expression"], d, config) + transformed["title"] = + flattenCloudAssetProjectFeedConditionTitle(original["title"], d, config) + transformed["description"] = + flattenCloudAssetProjectFeedConditionDescription(original["description"], d, config) + transformed["location"] = + flattenCloudAssetProjectFeedConditionLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenCloudAssetProjectFeedConditionExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetProjectFeedConditionTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetProjectFeedConditionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudAssetProjectFeedConditionLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudAssetProjectFeedAssetNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetProjectFeedAssetTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetProjectFeedContentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetProjectFeedFeedOutputConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubDestination, err := expandCloudAssetProjectFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubDestination"] = transformedPubsubDestination + } + + return transformed, nil +} + +func expandCloudAssetProjectFeedFeedOutputConfigPubsubDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTopic, err := expandCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topic"] = transformedTopic + } + + return transformed, nil +} + +func expandCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetProjectFeedCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpression, err := expandCloudAssetProjectFeedConditionExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedTitle, err := expandCloudAssetProjectFeedConditionTitle(original["title"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["title"] = transformedTitle + } + + transformedDescription, err := expandCloudAssetProjectFeedConditionDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedLocation, err := expandCloudAssetProjectFeedConditionLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandCloudAssetProjectFeedConditionExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetProjectFeedConditionTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetProjectFeedConditionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudAssetProjectFeedConditionLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceCloudAssetProjectFeedEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Remove the "folders/" prefix from the folder ID + if folder, ok := d.GetOkExists("folder"); ok { + if err := d.Set("folder_id", strings.TrimPrefix(folder.(string), "folders/")); err != nil { + return nil, fmt.Errorf("Error setting folder_id: %s", err) + } + } + // The feed object must be under the "feed" attribute on the request. + newObj := make(map[string]interface{}) + newObj["feed"] = obj + return newObj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_project_feed_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_project_feed_sweeper.go new file mode 100644 index 0000000000..04b7cefb7b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudasset/resource_cloud_asset_project_feed_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudasset + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudAssetProjectFeed", testSweepCloudAssetProjectFeed) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudAssetProjectFeed(region string) error { + resourceName := "CloudAssetProjectFeed" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudasset.googleapis.com/v1/projects/{{project}}/feeds", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["feeds"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudasset.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/cloud_build_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/cloud_build_operation.go new file mode 100644 index 0000000000..1c3fa8dcaa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/cloud_build_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudbuild + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type CloudBuildOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *CloudBuildOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.CloudBuildBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createCloudBuildWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*CloudBuildOperationWaiter, error) { + w := &CloudBuildOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func CloudBuildOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createCloudBuildWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func CloudBuildOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createCloudBuildWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/data_source_google_cloudbuild_trigger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/data_source_google_cloudbuild_trigger.go new file mode 100644 index 0000000000..47bbe2e836 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/data_source_google_cloudbuild_trigger.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudbuild + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleCloudBuildTrigger() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCloudBuildTrigger().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "trigger_id", "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleCloudBuildTriggerRead, + Schema: dsSchema, + } + +} + +func dataSourceGoogleCloudBuildTriggerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + id = strings.ReplaceAll(id, "/locations/global/", "/") + + d.SetId(id) + return resourceCloudBuildTriggerRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_bitbucket_server_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_bitbucket_server_config.go new file mode 100644 index 0000000000..1dd59d7b81 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_bitbucket_server_config.go @@ -0,0 +1,906 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudbuild + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceCloudBuildBitbucketServerConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudBuildBitbucketServerConfigCreate, + Read: resourceCloudBuildBitbucketServerConfigRead, + Update: resourceCloudBuildBitbucketServerConfigUpdate, + Delete: resourceCloudBuildBitbucketServerConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudBuildBitbucketServerConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "api_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Immutable. API Key that will be attached to webhook. Once this field has been set, it cannot be changed. +Changing this field will result in deleting/ recreating the resource.`, + }, + "config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID to use for the BitbucketServerConfig, which will become the final component of the BitbucketServerConfig's resource name.`, + }, + "host_uri": { + Type: schema.TypeString, + Required: true, + Description: `Immutable. The URI of the Bitbucket Server host. Once this field has been set, it cannot be changed. +If you need to change it, please create another BitbucketServerConfig.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of this bitbucket server config.`, + }, + "secrets": { + Type: schema.TypeList, + Required: true, + Description: `Secret Manager secrets needed by the config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_access_token_version_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name for the admin access token's secret version.`, + }, + "read_access_token_version_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name for the read access token's secret version.`, + }, + "webhook_secret_version_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Immutable. The resource name for the webhook secret's secret version. Once this field has been set, it cannot be changed. +Changing this field will result in deleting/ recreating the resource.`, + }, + }, + }, + }, + "username": { + Type: schema.TypeString, + Required: true, + Description: `Username of the account Cloud Build will use on Bitbucket Server.`, + }, + "connected_repositories": { + Type: schema.TypeSet, + Optional: true, + Description: `Connected Bitbucket Server repositories for this config.`, + Elem: cloudbuildBitbucketServerConfigConnectedRepositoriesSchema(), + // Default schema.HashSchema is used. + }, + "peered_network": { + Type: schema.TypeString, + Optional: true, + Description: `The network to be used when reaching out to the Bitbucket Server instance. The VPC network must be enabled for private service connection. +This should be set if the Bitbucket Server instance is hosted on-premises and not reachable by public internet. If this field is left empty, +no network peering will occur and calls to the Bitbucket Server instance will be made over the public internet. Must be in the format +projects/{project}/global/networks/{network}, where {project} is a project number or id and {network} is the name of a VPC network in the project.`, + }, + "ssl_ca": { + Type: schema.TypeString, + Optional: true, + Description: `SSL certificate to use for requests to Bitbucket Server. The format should be PEM format but the extension can be one of .pem, .cer, or .crt.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name for the config.`, + }, + "webhook_key": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. UUID included in webhook requests. The UUID is used to look up the corresponding config.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func cloudbuildBitbucketServerConfigConnectedRepositoriesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_key": { + Type: schema.TypeString, + Required: true, + Description: `Identifier for the project storing the repository.`, + }, + "repo_slug": { + Type: schema.TypeString, + Required: true, + Description: `Identifier for the repository.`, + }, + }, + } +} + +func resourceCloudBuildBitbucketServerConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + hostUriProp, err := expandCloudBuildBitbucketServerConfigHostUri(d.Get("host_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("host_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(hostUriProp)) && (ok || !reflect.DeepEqual(v, hostUriProp)) { + obj["hostUri"] = hostUriProp + } + secretsProp, err := expandCloudBuildBitbucketServerConfigSecrets(d.Get("secrets"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("secrets"); !tpgresource.IsEmptyValue(reflect.ValueOf(secretsProp)) && (ok || !reflect.DeepEqual(v, secretsProp)) { + obj["secrets"] = secretsProp + } + usernameProp, err := expandCloudBuildBitbucketServerConfigUsername(d.Get("username"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("username"); !tpgresource.IsEmptyValue(reflect.ValueOf(usernameProp)) && (ok || !reflect.DeepEqual(v, usernameProp)) { + obj["username"] = usernameProp + } + apiKeyProp, err := expandCloudBuildBitbucketServerConfigApiKey(d.Get("api_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("api_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(apiKeyProp)) && (ok || !reflect.DeepEqual(v, apiKeyProp)) { + obj["apiKey"] = apiKeyProp + } + connectedRepositoriesProp, err := expandCloudBuildBitbucketServerConfigConnectedRepositories(d.Get("connected_repositories"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connected_repositories"); !tpgresource.IsEmptyValue(reflect.ValueOf(connectedRepositoriesProp)) && (ok || !reflect.DeepEqual(v, connectedRepositoriesProp)) { + obj["connectedRepositories"] = connectedRepositoriesProp + } + peeredNetworkProp, err := expandCloudBuildBitbucketServerConfigPeeredNetwork(d.Get("peered_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peered_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(peeredNetworkProp)) && (ok || !reflect.DeepEqual(v, peeredNetworkProp)) { + obj["peeredNetwork"] = peeredNetworkProp + } + sslCaProp, err := expandCloudBuildBitbucketServerConfigSslCa(d.Get("ssl_ca"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_ca"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslCaProp)) && (ok || !reflect.DeepEqual(v, sslCaProp)) { + obj["sslCa"] = sslCaProp + } + + obj, err = resourceCloudBuildBitbucketServerConfigEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs?bitbucketServerConfigId={{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new BitbucketServerConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BitbucketServerConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating BitbucketServerConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = CloudBuildOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating BitbucketServerConfig", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create BitbucketServerConfig: %s", err) + } + + if err := d.Set("name", flattenCloudBuildBitbucketServerConfigName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating BitbucketServerConfig without connected repos: %q: %#v", d.Id(), res) + + if v, ok := d.GetOkExists("connected_repositories"); !tpgresource.IsEmptyValue(reflect.ValueOf(connectedRepositoriesProp)) && (ok || !reflect.DeepEqual(v, connectedRepositoriesProp)) { + connectedReposPropArray, ok := connectedRepositoriesProp.([]interface{}) + if !ok { + return fmt.Errorf("Error reading connected_repositories") + } + + requests := make([]interface{}, len(connectedReposPropArray)) + for i := 0; i < len(connectedReposPropArray); i++ { + connectedRepo := make(map[string]interface{}) + connectedRepo["parent"] = id + connectedRepo["repo"] = connectedReposPropArray[i] + + connectedRepoRequest := make(map[string]interface{}) + connectedRepoRequest["parent"] = id + connectedRepoRequest["bitbucketServerConnectedRepository"] = connectedRepo + + requests[i] = connectedRepoRequest + } + obj = make(map[string]interface{}) + obj["requests"] = requests + + url, err = tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}/connectedRepositories:batchCreate") + if err != nil { + return err + } + + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating connected_repositories: %s", err) + } + + err = CloudBuildOperationWaitTime( + config, res, project, "Creating connected_repositories on BitbucketServerConfig", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to create connected_repositories: %s", err) + } + } else { + log.Printf("[DEBUG] No connected repositories found to create: %#v", connectedRepositoriesProp) + } + + log.Printf("[DEBUG] Finished creating BitbucketServerConfig %q: %#v", d.Id(), res) + + return resourceCloudBuildBitbucketServerConfigRead(d, meta) +} + +func resourceCloudBuildBitbucketServerConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BitbucketServerConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudBuildBitbucketServerConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + + if err := d.Set("name", flattenCloudBuildBitbucketServerConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + if err := d.Set("host_uri", flattenCloudBuildBitbucketServerConfigHostUri(res["hostUri"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + if err := d.Set("secrets", flattenCloudBuildBitbucketServerConfigSecrets(res["secrets"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + if err := d.Set("username", flattenCloudBuildBitbucketServerConfigUsername(res["username"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + if err := d.Set("webhook_key", flattenCloudBuildBitbucketServerConfigWebhookKey(res["webhookKey"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + if err := d.Set("api_key", flattenCloudBuildBitbucketServerConfigApiKey(res["apiKey"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + if err := d.Set("connected_repositories", flattenCloudBuildBitbucketServerConfigConnectedRepositories(res["connectedRepositories"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + if err := d.Set("peered_network", flattenCloudBuildBitbucketServerConfigPeeredNetwork(res["peeredNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + if err := d.Set("ssl_ca", flattenCloudBuildBitbucketServerConfigSslCa(res["sslCa"], d, config)); err != nil { + return fmt.Errorf("Error reading BitbucketServerConfig: %s", err) + } + + return nil +} + +func resourceCloudBuildBitbucketServerConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BitbucketServerConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + hostUriProp, err := expandCloudBuildBitbucketServerConfigHostUri(d.Get("host_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("host_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostUriProp)) { + obj["hostUri"] = hostUriProp + } + secretsProp, err := expandCloudBuildBitbucketServerConfigSecrets(d.Get("secrets"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("secrets"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, secretsProp)) { + obj["secrets"] = secretsProp + } + usernameProp, err := expandCloudBuildBitbucketServerConfigUsername(d.Get("username"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("username"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, usernameProp)) { + obj["username"] = usernameProp + } + connectedRepositoriesProp, err := expandCloudBuildBitbucketServerConfigConnectedRepositories(d.Get("connected_repositories"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connected_repositories"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, connectedRepositoriesProp)) { + obj["connectedRepositories"] = connectedRepositoriesProp + } + peeredNetworkProp, err := expandCloudBuildBitbucketServerConfigPeeredNetwork(d.Get("peered_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peered_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peeredNetworkProp)) { + obj["peeredNetwork"] = peeredNetworkProp + } + sslCaProp, err := expandCloudBuildBitbucketServerConfigSslCa(d.Get("ssl_ca"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_ca"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslCaProp)) { + obj["sslCa"] = sslCaProp + } + + obj, err = resourceCloudBuildBitbucketServerConfigEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating BitbucketServerConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("host_uri") { + updateMask = append(updateMask, "hostUri") + } + + if d.HasChange("secrets") { + updateMask = append(updateMask, "secrets") + } + + if d.HasChange("username") { + updateMask = append(updateMask, "username") + } + + if d.HasChange("connected_repositories") { + updateMask = append(updateMask, "connectedRepositories") + } + + if d.HasChange("peered_network") { + updateMask = append(updateMask, "peeredNetwork") + } + + if d.HasChange("ssl_ca") { + updateMask = append(updateMask, "sslCa") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + // remove connectedRepositories from updateMask + for i, field := range updateMask { + if field == "connectedRepositories" { + updateMask = append(updateMask[:i], updateMask[i+1:]...) + break + } + } + // reconstruct url + url, err = tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating BitbucketServerConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating BitbucketServerConfig %q: %#v", d.Id(), res) + } + + err = CloudBuildOperationWaitTime( + config, res, project, "Updating BitbucketServerConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + if d.HasChange("connected_repositories") { + o, n := d.GetChange("connected_repositories") + oReposSet, ok := o.(*schema.Set) + if !ok { + return fmt.Errorf("Error reading old connected repositories") + } + nReposSet, ok := n.(*schema.Set) + if !ok { + return fmt.Errorf("Error reading new connected repositories") + } + + removeRepos := oReposSet.Difference(nReposSet).List() + createRepos := nReposSet.Difference(oReposSet).List() + + url, err = tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}:removeBitbucketServerConnectedRepository") + if err != nil { + return err + } + + // send remove repo requests. + for _, repo := range removeRepos { + obj := make(map[string]interface{}) + obj["connectedRepository"] = repo + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error removing connected_repositories: %s", err) + } + } + + // if repos to create, prepare and send batchCreate request + if len(createRepos) > 0 { + parent, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + var requests []interface{} + for _, repo := range createRepos { + connectedRepo := make(map[string]interface{}) + connectedRepo["parent"] = parent + connectedRepo["repo"] = repo + + connectedRepoRequest := make(map[string]interface{}) + connectedRepoRequest["parent"] = parent + connectedRepoRequest["bitbucketServerConnectedRepository"] = connectedRepo + + requests = append(requests, connectedRepoRequest) + } + obj = make(map[string]interface{}) + obj["requests"] = requests + + url, err = tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}/connectedRepositories:batchCreate") + if err != nil { + return err + } + + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating connected_repositories: %s", err) + } + + err = CloudBuildOperationWaitTime( + config, res, project, "Updating connected_repositories on BitbucketServerConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error waiting to create connected_repositories: %s", err) + } + } + } else { + log.Printf("[DEBUG] connected_repositories have no changes") + } + return resourceCloudBuildBitbucketServerConfigRead(d, meta) +} + +func resourceCloudBuildBitbucketServerConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BitbucketServerConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting BitbucketServerConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BitbucketServerConfig") + } + + err = CloudBuildOperationWaitTime( + config, res, project, "Deleting BitbucketServerConfig", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting BitbucketServerConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudBuildBitbucketServerConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/bitbucketServerConfigs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudBuildBitbucketServerConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigHostUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigSecrets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["admin_access_token_version_name"] = + flattenCloudBuildBitbucketServerConfigSecretsAdminAccessTokenVersionName(original["adminAccessTokenVersionName"], d, config) + transformed["read_access_token_version_name"] = + flattenCloudBuildBitbucketServerConfigSecretsReadAccessTokenVersionName(original["readAccessTokenVersionName"], d, config) + transformed["webhook_secret_version_name"] = + flattenCloudBuildBitbucketServerConfigSecretsWebhookSecretVersionName(original["webhookSecretVersionName"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildBitbucketServerConfigSecretsAdminAccessTokenVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigSecretsReadAccessTokenVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigSecretsWebhookSecretVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigWebhookKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigApiKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigConnectedRepositories(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(cloudbuildBitbucketServerConfigConnectedRepositoriesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "project_key": flattenCloudBuildBitbucketServerConfigConnectedRepositoriesProjectKey(original["projectKey"], d, config), + "repo_slug": flattenCloudBuildBitbucketServerConfigConnectedRepositoriesRepoSlug(original["repoSlug"], d, config), + }) + } + return transformed +} +func flattenCloudBuildBitbucketServerConfigConnectedRepositoriesProjectKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigConnectedRepositoriesRepoSlug(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigPeeredNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildBitbucketServerConfigSslCa(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudBuildBitbucketServerConfigHostUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigSecrets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAdminAccessTokenVersionName, err := expandCloudBuildBitbucketServerConfigSecretsAdminAccessTokenVersionName(original["admin_access_token_version_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdminAccessTokenVersionName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["adminAccessTokenVersionName"] = transformedAdminAccessTokenVersionName + } + + transformedReadAccessTokenVersionName, err := expandCloudBuildBitbucketServerConfigSecretsReadAccessTokenVersionName(original["read_access_token_version_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReadAccessTokenVersionName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["readAccessTokenVersionName"] = transformedReadAccessTokenVersionName + } + + transformedWebhookSecretVersionName, err := expandCloudBuildBitbucketServerConfigSecretsWebhookSecretVersionName(original["webhook_secret_version_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWebhookSecretVersionName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["webhookSecretVersionName"] = transformedWebhookSecretVersionName + } + + return transformed, nil +} + +func expandCloudBuildBitbucketServerConfigSecretsAdminAccessTokenVersionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigSecretsReadAccessTokenVersionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigSecretsWebhookSecretVersionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigApiKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigConnectedRepositories(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectKey, err := expandCloudBuildBitbucketServerConfigConnectedRepositoriesProjectKey(original["project_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectKey"] = transformedProjectKey + } + + transformedRepoSlug, err := expandCloudBuildBitbucketServerConfigConnectedRepositoriesRepoSlug(original["repo_slug"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepoSlug); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repoSlug"] = transformedRepoSlug + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudBuildBitbucketServerConfigConnectedRepositoriesProjectKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigConnectedRepositoriesRepoSlug(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigPeeredNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildBitbucketServerConfigSslCa(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceCloudBuildBitbucketServerConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // connectedRepositories is needed for batchCreate on the config after creation. + delete(obj, "connectedRepositories") + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_bitbucket_server_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_bitbucket_server_config_sweeper.go new file mode 100644 index 0000000000..d09e0af07d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_bitbucket_server_config_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudbuild + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudBuildBitbucketServerConfig", testSweepCloudBuildBitbucketServerConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudBuildBitbucketServerConfig(region string) error { + resourceName := "CloudBuildBitbucketServerConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudbuild.googleapis.com/v1/projects/{{project}}/locations/{{location}}/bitbucketServerConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["bitbucketServerConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudbuild.googleapis.com/v1/projects/{{project}}/locations/{{location}}/bitbucketServerConfigs/{{config_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_trigger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_trigger.go new file mode 100644 index 0000000000..f62075d435 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_trigger.go @@ -0,0 +1,5631 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudbuild + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func stepTimeoutCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + buildList := diff.Get("build").([]interface{}) + if len(buildList) == 0 || buildList[0] == nil { + return nil + } + build := buildList[0].(map[string]interface{}) + buildTimeoutString := build["timeout"].(string) + + buildTimeout, err := time.ParseDuration(buildTimeoutString) + if err != nil { + return fmt.Errorf("Error parsing build timeout : %s", err) + } + + var stepTimeoutSum time.Duration = 0 + steps := build["step"].([]interface{}) + for _, rawstep := range steps { + if rawstep == nil { + continue + } + step := rawstep.(map[string]interface{}) + timeoutString := step["timeout"].(string) + if len(timeoutString) == 0 { + continue + } + + timeout, err := time.ParseDuration(timeoutString) + if err != nil { + return fmt.Errorf("Error parsing build step timeout: %s", err) + } + stepTimeoutSum += timeout + } + if stepTimeoutSum > buildTimeout { + return fmt.Errorf("Step timeout sum (%v) cannot be greater than build timeout (%v)", stepTimeoutSum, buildTimeout) + } + return nil +} + +func ResourceCloudBuildTrigger() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudBuildTriggerCreate, + Read: resourceCloudBuildTriggerRead, + Update: resourceCloudBuildTriggerUpdate, + Delete: resourceCloudBuildTriggerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudBuildTriggerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 2, + + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceCloudBuildTriggerResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceCloudBuildTriggerUpgradeV0, + Version: 0, + }, + { + Type: resourceCloudBuildTriggerResourceV1().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceCloudBuildTriggerUpgradeV1, + Version: 1, + }, + }, + CustomizeDiff: customdiff.All( + stepTimeoutCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "approval_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Configuration for manual approval to start a build invocation of this BuildTrigger. +Builds created by this trigger will require approval before they execute. +Any user with a Cloud Build Approver role for the project can approve a build.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "approval_required": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not approval is needed. If this is set on a build, it will become pending when run, +and will need to be explicitly approved to start.`, + Default: false, + }, + }, + }, + }, + "bitbucket_server_trigger_config": { + Type: schema.TypeList, + Optional: true, + Description: `BitbucketServerTriggerConfig describes the configuration of a trigger that creates a build whenever a Bitbucket Server event is received.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bitbucket_server_config_resource": { + Type: schema.TypeString, + Required: true, + Description: `The Bitbucket server config resource that this trigger config maps to.`, + }, + "project_key": { + Type: schema.TypeString, + Required: true, + Description: `Key of the project that the repo is in. For example: The key for https://mybitbucket.server/projects/TEST/repos/test-repo is "TEST".`, + }, + "repo_slug": { + Type: schema.TypeString, + Required: true, + Description: `Slug of the repository. A repository slug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL. +For example, if the repository name is 'test repo', in the URL it would become 'test-repo' as in https://mybitbucket.server/projects/TEST/repos/test-repo.`, + }, + "pull_request": { + Type: schema.TypeList, + Optional: true, + Description: `Filter to match changes in pull requests.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Required: true, + Description: `Regex of branches to match. +The syntax of the regular expressions accepted is the syntax accepted by RE2 and described at https://github.com/google/re2/wiki/Syntax`, + }, + "comment_control": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}), + Description: `Configure builds to run whether a repository owner or collaborator need to comment /gcbrun. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, branches that do NOT match the git_ref will trigger a build.`, + }, + }, + }, + ExactlyOneOf: []string{"bitbucket_server_trigger_config.0.pull_request", "bitbucket_server_trigger_config.0.push"}, + }, + "push": { + Type: schema.TypeList, + Optional: true, + Description: `Filter to match changes in refs like branches, tags.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of branches to match. Specify only one of branch or tag.`, + ExactlyOneOf: []string{"bitbucket_server_trigger_config.0.push.0.branch", "bitbucket_server_trigger_config.0.push.0.tag"}, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, only trigger a build if the revision regex does NOT match the gitRef regex.`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of tags to match. Specify only one of branch or tag.`, + ExactlyOneOf: []string{"bitbucket_server_trigger_config.0.push.0.branch", "bitbucket_server_trigger_config.0.push.0.tag"}, + }, + }, + }, + ExactlyOneOf: []string{"bitbucket_server_trigger_config.0.pull_request", "bitbucket_server_trigger_config.0.push"}, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build", "repository_event_config"}, + }, + "build": { + Type: schema.TypeList, + Optional: true, + Description: `Contents of the build template. Either a filename or build template must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "step": { + Type: schema.TypeList, + Required: true, + Description: `The operations to be performed on the workspace.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the container image that will run this particular build step. + +If the image is available in the host's Docker daemon's cache, it will be +run directly. If not, the host will attempt to pull the image first, using +the builder service account's credentials if necessary. + +The Docker daemon's cache will already have the latest versions of all of +the officially supported build steps (see https://github.com/GoogleCloudPlatform/cloud-builders +for images and examples). +The Docker daemon will also have cached many of the layers for some popular +images, like "ubuntu", "debian", but they will be refreshed at the time +you attempt to use them. + +If you built an image in a previous build step, it will be stored in the +host's Docker daemon's cache and is available to use as the name for a +later build step.`, + }, + "allow_exit_codes": { + Type: schema.TypeList, + Optional: true, + Description: `Allow this build step to fail without failing the entire build if and +only if the exit code is one of the specified codes. + +If 'allowFailure' is also specified, this field will take precedence.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "allow_failure": { + Type: schema.TypeBool, + Optional: true, + Description: `Allow this build step to fail without failing the entire build. +If false, the entire build will fail if this step fails. Otherwise, the +build will succeed, but this step will still have a failure status. +Error information will be reported in the 'failureDetail' field. + +'allowExitCodes' takes precedence over this field.`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `A list of arguments that will be presented to the step when it is started. + +If the image used to run the step's container has an entrypoint, the args +are used as arguments to that entrypoint. If the image does not define an +entrypoint, the first element in args is used as the entrypoint, and the +remainder will be used as arguments.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Working directory to use when running this step's container. + +If this value is a relative path, it is relative to the build's working +directory. If this value is absolute, it may be outside the build's working +directory, in which case the contents of the path may not be persisted +across build step executions, unless a 'volume' for that path is specified. + +If the build specifies a 'RepoSource' with 'dir' and a step with a +'dir', +which specifies an absolute path, the 'RepoSource' 'dir' is ignored +for the step's execution.`, + }, + "entrypoint": { + Type: schema.TypeString, + Optional: true, + Description: `Entrypoint to be used instead of the build step image's +default entrypoint. +If unset, the image's default entrypoint is used`, + }, + "env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of environment variable definitions to be used when +running a step. + +The elements are of the form "KEY=VALUE" for the environment variable +"KEY" being given the value "VALUE".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "id": { + Type: schema.TypeString, + Optional: true, + Description: `Unique identifier for this build step, used in 'wait_for' to +reference this build step as a dependency.`, + }, + "script": { + Type: schema.TypeString, + Optional: true, + Description: `A shell script to be executed in the step. +When script is provided, the user cannot specify the entrypoint or args.`, + }, + "secret_env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of environment variables which are encrypted using +a Cloud Key +Management Service crypto key. These values must be specified in +the build's 'Secret'.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Time limit for executing this build step. If not defined, +the step has no +time limit and will be allowed to continue to run until either it +completes or the build itself times out.`, + }, + "timing": { + Type: schema.TypeString, + Optional: true, + Description: `Output only. Stores timing information for executing this +build step.`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `List of volumes to mount into the build step. + +Each volume is created as an empty volume prior to execution of the +build step. Upon completion of the build, volumes and their contents +are discarded. + +Using a named volume in only one step is not valid as it is +indicative of a build request with an incorrect configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the volume to mount. + +Volume names must be unique per build step and must be valid names for +Docker volumes. Each named volume must be used by at least two build steps.`, + }, + "path": { + Type: schema.TypeString, + Required: true, + Description: `Path at which to mount the volume. + +Paths must be absolute and cannot conflict with other volume paths on +the same build step or with certain reserved volume paths.`, + }, + }, + }, + }, + "wait_for": { + Type: schema.TypeList, + Optional: true, + Description: `The ID(s) of the step(s) that this build step depends on. + +This build step will not start until all the build steps in 'wait_for' +have completed successfully. If 'wait_for' is empty, this build step +will start when all previous build steps in the 'Build.Steps' list +have completed successfully.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "artifacts": { + Type: schema.TypeList, + Optional: true, + Description: `Artifacts produced by the build that should be uploaded upon successful completion of all build steps.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "images": { + Type: schema.TypeList, + Optional: true, + Description: `A list of images to be pushed upon the successful completion of all build steps. + +The images will be pushed using the builder service account's credentials. + +The digests of the pushed images will be stored in the Build resource's results field. + +If any of the images fail to be pushed, the build is marked FAILURE.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "objects": { + Type: schema.TypeList, + Optional: true, + Description: `A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. + +Files in the workspace matching specified paths globs will be uploaded to the +Cloud Storage location using the builder service account's credentials. + +The location and generation of the uploaded objects will be stored in the Build resource's results field. + +If any objects fail to be pushed, the build is marked FAILURE.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Optional: true, + Description: `Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". + +Files in the workspace matching any path pattern will be uploaded to Cloud Storage with +this location as a prefix.`, + }, + "paths": { + Type: schema.TypeList, + Optional: true, + Description: `Path globs used to match files in the build's workspace.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timing": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. Stores timing information for pushing all artifact objects.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: `End of time span. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to +nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: `Start of time span. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to +nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "available_secrets": { + Type: schema.TypeList, + Optional: true, + Description: `Secrets and secret environment variables.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_manager": { + Type: schema.TypeList, + Required: true, + Description: `Pairs a secret environment variable with a SecretVersion in Secret Manager.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "env": { + Type: schema.TypeString, + Required: true, + Description: `Environment variable name to associate with the secret. Secret environment +variables must be unique across all of a build's secrets, and must be used +by at least one build step.`, + }, + "version_name": { + Type: schema.TypeString, + Required: true, + Description: `Resource name of the SecretVersion. In format: projects/*/secrets/*/versions/*`, + }, + }, + }, + }, + }, + }, + }, + "images": { + Type: schema.TypeList, + Optional: true, + Description: `A list of images to be pushed upon the successful completion of all build steps. +The images are pushed using the builder service account's credentials. +The digests of the pushed images will be stored in the Build resource's results field. +If any of the images fail to be pushed, the build status is marked FAILURE.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "logs_bucket": { + Type: schema.TypeString, + Optional: true, + Description: `Google Cloud Storage bucket where logs should be written. +Logs file names will be of the format ${logsBucket}/log-${build_id}.txt.`, + }, + "options": { + Type: schema.TypeList, + Optional: true, + Description: `Special options for this build.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Description: `Requested disk size for the VM that runs the build. Note that this is NOT "disk free"; +some of the space will be used by the operating system and build utilities. +Also note that this is the minimum disk size that will be allocated for the build -- +the build may run with a larger disk than requested. At present, the maximum disk size +is 1000GB; builds that request more than the maximum are rejected with an error.`, + }, + "dynamic_substitutions": { + Type: schema.TypeBool, + Optional: true, + Description: `Option to specify whether or not to apply bash style string operations to the substitutions. + +NOTE this is always enabled for triggered builds and cannot be overridden in the build configuration file.`, + }, + "env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of global environment variable definitions that will exist for all build steps +in this build. If a variable is defined in both globally and in a build step, +the variable will use the build step value. + +The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "log_streaming_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF", ""}), + Description: `Option to define build log streaming behavior to Google Cloud Storage. Possible values: ["STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF"]`, + }, + "logging": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE", ""}), + Description: `Option to specify the logging mode, which determines if and where build logs are stored. Possible values: ["LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE"]`, + }, + "machine_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32", ""}), + Description: `Compute Engine machine type on which to run the build. Possible values: ["UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32"]`, + }, + "requested_verify_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NOT_VERIFIED", "VERIFIED", ""}), + Description: `Requested verifiability options. Possible values: ["NOT_VERIFIED", "VERIFIED"]`, + }, + "secret_env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of global environment variables, which are encrypted using a Cloud Key Management +Service crypto key. These values must be specified in the build's Secret. These variables +will be available to all build steps in this build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "source_provenance_hash": { + Type: schema.TypeList, + Optional: true, + Description: `Requested hash for SourceProvenance. Possible values: ["NONE", "SHA256", "MD5"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "SHA256", "MD5"}), + }, + }, + "substitution_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MUST_MATCH", "ALLOW_LOOSE", ""}), + Description: `Option to specify behavior when there is an error in the substitution checks. + +NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden +in the build configuration file. Possible values: ["MUST_MATCH", "ALLOW_LOOSE"]`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `Global list of volumes to mount for ALL build steps + +Each volume is created as an empty volume prior to starting the build process. +Upon completion of the build, volumes and their contents are discarded. Global +volume names and paths cannot conflict with the volumes defined a build step. + +Using a global volume in a build with only one step is not valid as it is indicative +of a build request with an incorrect configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the volume to mount. + +Volume names must be unique per build step and must be valid names for Docker volumes. +Each named volume must be used by at least two build steps.`, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path at which to mount the volume. + +Paths must be absolute and cannot conflict with other volume paths on the same +build step or with certain reserved volume paths.`, + }, + }, + }, + }, + "worker_pool": { + Type: schema.TypeString, + Optional: true, + Description: `Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} + +This field is experimental.`, + }, + }, + }, + }, + "queue_ttl": { + Type: schema.TypeString, + Optional: true, + Description: `TTL in queue for this build. If provided and the build is enqueued longer than this value, +the build will expire and the build status will be EXPIRED. +The TTL starts ticking from createTime. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "secret": { + Type: schema.TypeList, + Optional: true, + Description: `Secrets to decrypt using Cloud Key Management Service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `Cloud KMS key name to use to decrypt these envs.`, + }, + "secret_env": { + Type: schema.TypeMap, + Optional: true, + Description: `Map of environment variable name to its encrypted value. +Secret environment variables must be unique across all of a build's secrets, +and must be used by at least one build step. Values can be at most 64 KB in size. +There can be at most 100 secret values across all of a build's secrets.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "source": { + Type: schema.TypeList, + Optional: true, + Description: `The location of the source files to build. + +One of 'storageSource' or 'repoSource' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repo_source": { + Type: schema.TypeList, + Optional: true, + Description: `Location of the source in a Google Cloud Source Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repo_name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Source Repository.`, + }, + "branch_name": { + Type: schema.TypeString, + Optional: true, + Description: `Regex matching branches to build. Exactly one a of branch name, tag, or commit SHA must be provided. +The syntax of the regular expressions accepted is the syntax accepted by RE2 and +described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + "commit_sha": { + Type: schema.TypeString, + Optional: true, + Description: `Explicit commit SHA to build. Exactly one a of branch name, tag, or commit SHA must be provided.`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Directory, relative to the source root, in which to run the build. +This must be a relative path. If a step's dir is specified and is an absolute path, +this value is ignored for that step's execution.`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Description: `ID of the project that owns the Cloud Source Repository. +If omitted, the project ID requesting the build is assumed.`, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions to use in a triggered build. Should only be used with triggers.run`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tag_name": { + Type: schema.TypeString, + Optional: true, + Description: `Regex matching tags to build. Exactly one a of branch name, tag, or commit SHA must be provided. +The syntax of the regular expressions accepted is the syntax accepted by RE2 and +described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + }, + }, + }, + "storage_source": { + Type: schema.TypeList, + Optional: true, + Description: `Location of the source in an archive file in Google Cloud Storage.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Google Cloud Storage bucket containing the source.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Google Cloud Storage object containing the source. +This object must be a gzipped archive file (.tar.gz) containing source to build.`, + }, + "generation": { + Type: schema.TypeString, + Optional: true, + Description: `Google Cloud Storage generation for the object. +If the generation is omitted, the latest generation will be used`, + }, + }, + }, + }, + }, + }, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions data for Build resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: `Tags for annotation of a Build. These are not docker tags.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Amount of time that this build should be allowed to run, to second granularity. +If this amount of time elapses, work on the build will cease and the build status will be TIMEOUT. +This timeout must be equal to or greater than the sum of the timeouts for build steps within the build. +The expected format is the number of seconds followed by s. +Default time is ten minutes (600s).`, + Default: "600s", + }, + }, + }, + ExactlyOneOf: []string{"filename", "build", "git_file_source"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Human-readable description of the trigger.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the trigger is disabled or not. If true, the trigger will never result in a build.`, + }, + "filename": { + Type: schema.TypeString, + Optional: true, + Description: `Path, from the source root, to a file whose contents is used for the template. +Either a filename or build template must be provided. Set this only when using trigger_template or github. +When using Pub/Sub, Webhook or Manual set the file name using git_file_source instead.`, + ExactlyOneOf: []string{"filename", "build", "git_file_source"}, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `A Common Expression Language string. Used only with Pub/Sub and Webhook.`, + }, + "git_file_source": { + Type: schema.TypeList, + Optional: true, + Description: `The file source describing the local or remote Build template.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `The path of the file, with the repo root as the root of the path.`, + }, + "repo_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"}), + Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). +Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET_SERVER Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"]`, + }, + "github_enterprise_config": { + Type: schema.TypeString, + Optional: true, + Description: `The full resource name of the github enterprise config. +Format: projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}. projects/{project}/githubEnterpriseConfigs/{id}.`, + }, + "repository": { + Type: schema.TypeString, + Optional: true, + Description: `The fully qualified resource name of the Repo API repository. The fully qualified resource name of the Repo API repository. +If unspecified, the repo from which the trigger invocation originated is assumed to be the repo from which to read the specified path.`, + }, + "revision": { + Type: schema.TypeString, + Optional: true, + Description: `The branch, tag, arbitrary ref, or SHA version of the repo to use when resolving the +filename (optional). This field respects the same syntax/resolution as described here: https://git-scm.com/docs/gitrevisions +If unspecified, the revision from which the trigger invocation originated is assumed to be the revision from which to read the specified path.`, + }, + "uri": { + Type: schema.TypeString, + Optional: true, + Description: `The URI of the repo (optional). If unspecified, the repo from which the trigger +invocation originated is assumed to be the repo from which to read the specified path.`, + }, + }, + }, + ExactlyOneOf: []string{"filename", "git_file_source", "build"}, + }, + "github": { + Type: schema.TypeList, + Optional: true, + Description: `Describes the configuration of a trigger that creates a build whenever a GitHub event is received. + +One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enterprise_config_resource_name": { + Type: schema.TypeString, + Optional: true, + Description: `The resource name of the github enterprise config that should be applied to this installation. +For example: "projects/{$projectId}/locations/{$locationId}/githubEnterpriseConfigs/{$configId}"`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the repository. For example: The name for +https://github.com/googlecloudplatform/cloud-builders is "cloud-builders".`, + }, + "owner": { + Type: schema.TypeString, + Optional: true, + Description: `Owner of the repository. For example: The owner for +https://github.com/googlecloudplatform/cloud-builders is "googlecloudplatform".`, + }, + "pull_request": { + Type: schema.TypeList, + Optional: true, + Description: `filter to match changes in pull requests. Specify only one of 'pull_request' or 'push'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Required: true, + Description: `Regex of branches to match.`, + }, + "comment_control": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}), + Description: `Whether to block builds on a "/gcbrun" comment from a repository owner or collaborator. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, branches that do NOT match the git_ref will trigger a build.`, + }, + }, + }, + ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, + }, + "push": { + Type: schema.TypeList, + Optional: true, + Description: `filter to match changes in refs, like branches or tags. Specify only one of 'pull_request' or 'push'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of branches to match. Specify only one of branch or tag.`, + ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, only trigger a build if the revision regex does NOT match the git_ref regex.`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of tags to match. Specify only one of branch or tag.`, + ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, + }, + }, + }, + ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build", "repository_event_config"}, + }, + "ignored_files": { + Type: schema.TypeList, + Optional: true, + Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match +extended with support for '**'. + +If ignoredFiles and changed files are both empty, then they are not +used to determine whether or not to trigger a build. + +If ignoredFiles is not empty, then we ignore any files that match any +of the ignored_file globs. If the change has no files that are outside +of the ignoredFiles globs, then we do not trigger a build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "include_build_logs": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS", ""}), + Description: `Build logs will be sent back to GitHub as part of the checkrun +result. Values can be INCLUDE_BUILD_LOGS_UNSPECIFIED or +INCLUDE_BUILD_LOGS_WITH_STATUS Possible values: ["INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS"]`, + }, + "included_files": { + Type: schema.TypeList, + Optional: true, + Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match +extended with support for '**'. + +If any of the files altered in the commit pass the ignoredFiles filter +and includedFiles is empty, then as far as this filter is concerned, we +should trigger the build. + +If any of the files altered in the commit pass the ignoredFiles filter +and includedFiles is not empty, then we make sure that at least one of +those files matches a includedFiles glob. If not, then we do not trigger +a build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The [Cloud Build location](https://cloud.google.com/build/docs/locations) for the trigger. +If not specified, "global" is used.`, + Default: "global", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Name of the trigger. Must be unique within the project.`, + }, + "pubsub_config": { + Type: schema.TypeList, + Optional: true, + Description: `PubsubConfig describes the configuration of a trigger that creates +a build whenever a Pub/Sub message is published. + +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Required: true, + Description: `The name of the topic from which this subscription is receiving messages.`, + }, + "service_account_email": { + Type: schema.TypeString, + Optional: true, + Description: `Service account that will make the push request.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Potential issues with the underlying Pub/Sub subscription configuration. +Only populated on get requests.`, + }, + "subscription": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Name of the subscription.`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build", "repository_event_config"}, + }, + "repository_event_config": { + Type: schema.TypeList, + Optional: true, + Description: `The configuration of a trigger that creates a build whenever an event from Repo API is received.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pull_request": { + Type: schema.TypeList, + Optional: true, + Description: `Contains filter properties for matching Pull Requests.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of branches to match. + +The syntax of the regular expressions accepted is the syntax accepted by +RE2 and described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{}, + }, + "comment_control": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}), + Description: `Configure builds to run whether a repository owner or collaborator need to comment '/gcbrun'. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, branches that do NOT match the git_ref will trigger a build.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "push": { + Type: schema.TypeList, + Optional: true, + Description: `Contains filter properties for matching git pushes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of branches to match. + +The syntax of the regular expressions accepted is the syntax accepted by +RE2 and described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{}, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, only trigger a build if the revision regex does NOT match the git_ref regex.`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of tags to match. + +The syntax of the regular expressions accepted is the syntax accepted by +RE2 and described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{}, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "repository": { + Type: schema.TypeString, + Optional: true, + Description: `The resource name of the Repo API resource.`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build", "repository_event_config"}, + }, + "service_account": { + Type: schema.TypeString, + Optional: true, + Description: `The service account used for all user-controlled operations including +triggers.patch, triggers.run, builds.create, and builds.cancel. + +If no service account is set, then the standard Cloud Build service account +([PROJECT_NUM]@system.gserviceaccount.com) will be used instead. + +Format: projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}`, + }, + "source_to_build": { + Type: schema.TypeList, + Optional: true, + Description: `The repo and ref of the repository from which to build. +This field is used only for those triggers that do not respond to SCM events. +Triggers that respond to such events build source at whatever commit caused the event. +This field is currently only used by Webhook, Pub/Sub, Manual, and Cron triggers. + +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ref": { + Type: schema.TypeString, + Required: true, + Description: `The branch or tag to use. Must start with "refs/" (required).`, + }, + "repo_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"}), + Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). +Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET_SERVER Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET_SERVER"]`, + }, + "github_enterprise_config": { + Type: schema.TypeString, + Optional: true, + Description: `The full resource name of the github enterprise config. +Format: projects/{project}/locations/{location}/githubEnterpriseConfigs/{id}. projects/{project}/githubEnterpriseConfigs/{id}.`, + }, + "repository": { + Type: schema.TypeString, + Optional: true, + Description: `The qualified resource name of the Repo API repository. +Either uri or repository can be specified and is required.`, + }, + "uri": { + Type: schema.TypeString, + Optional: true, + Description: `The URI of the repo.`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build", "repository_event_config"}, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions data for Build resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: `Tags for annotation of a BuildTrigger`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "trigger_template": { + Type: schema.TypeList, + Optional: true, + Description: `Template describing the types of source changes to trigger a build. + +Branch and tag names in trigger templates are interpreted as regular +expressions. Any branch or tag change that matches that regular +expression will trigger a build. + +One of 'trigger_template', 'github', 'pubsub_config', 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. +This field is a regular expression.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + "commit_sha": { + Type: schema.TypeString, + Optional: true, + Description: `Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Directory, relative to the source root, in which to run the build. + +This must be a relative path. If a step's dir is specified and +is an absolute path, this value is ignored for that step's +execution.`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `ID of the project that owns the Cloud Source Repository. If +omitted, the project ID requesting the build is assumed.`, + }, + "repo_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the Cloud Source Repository. If omitted, the name "default" is assumed.`, + Default: "default", + }, + "tag_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. +This field is a regular expression.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build", "repository_event_config"}, + }, + "webhook_config": { + Type: schema.TypeList, + Optional: true, + Description: `WebhookConfig describes the configuration of a trigger that creates +a build whenever a webhook is sent to a trigger's webhook URL. + +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `Resource name for the secret required as a URL parameter.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Potential issues with the underlying Pub/Sub subscription configuration. +Only populated on get requests.`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "bitbucket_server_trigger_config", "pubsub_config", "webhook_config", "source_to_build", "repository_event_config"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time when the trigger was created.`, + }, + "trigger_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier for the trigger.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudBuildTriggerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandCloudBuildTriggerName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandCloudBuildTriggerDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + tagsProp, err := expandCloudBuildTriggerTags(d.Get("tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(tagsProp)) && (ok || !reflect.DeepEqual(v, tagsProp)) { + obj["tags"] = tagsProp + } + disabledProp, err := expandCloudBuildTriggerDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + substitutionsProp, err := expandCloudBuildTriggerSubstitutions(d.Get("substitutions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("substitutions"); !tpgresource.IsEmptyValue(reflect.ValueOf(substitutionsProp)) && (ok || !reflect.DeepEqual(v, substitutionsProp)) { + obj["substitutions"] = substitutionsProp + } + serviceAccountProp, err := expandCloudBuildTriggerServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + includeBuildLogsProp, err := expandCloudBuildTriggerIncludeBuildLogs(d.Get("include_build_logs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("include_build_logs"); !tpgresource.IsEmptyValue(reflect.ValueOf(includeBuildLogsProp)) && (ok || !reflect.DeepEqual(v, includeBuildLogsProp)) { + obj["includeBuildLogs"] = includeBuildLogsProp + } + filenameProp, err := expandCloudBuildTriggerFilename(d.Get("filename"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filename"); !tpgresource.IsEmptyValue(reflect.ValueOf(filenameProp)) && (ok || !reflect.DeepEqual(v, filenameProp)) { + obj["filename"] = filenameProp + } + filterProp, err := expandCloudBuildTriggerFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + gitFileSourceProp, err := expandCloudBuildTriggerGitFileSource(d.Get("git_file_source"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("git_file_source"); !tpgresource.IsEmptyValue(reflect.ValueOf(gitFileSourceProp)) && (ok || !reflect.DeepEqual(v, gitFileSourceProp)) { + obj["gitFileSource"] = gitFileSourceProp + } + repositoryEventConfigProp, err := expandCloudBuildTriggerRepositoryEventConfig(d.Get("repository_event_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("repository_event_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(repositoryEventConfigProp)) && (ok || !reflect.DeepEqual(v, repositoryEventConfigProp)) { + obj["repositoryEventConfig"] = repositoryEventConfigProp + } + sourceToBuildProp, err := expandCloudBuildTriggerSourceToBuild(d.Get("source_to_build"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_to_build"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceToBuildProp)) && (ok || !reflect.DeepEqual(v, sourceToBuildProp)) { + obj["sourceToBuild"] = sourceToBuildProp + } + ignoredFilesProp, err := expandCloudBuildTriggerIgnoredFiles(d.Get("ignored_files"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ignored_files"); !tpgresource.IsEmptyValue(reflect.ValueOf(ignoredFilesProp)) && (ok || !reflect.DeepEqual(v, ignoredFilesProp)) { + obj["ignoredFiles"] = ignoredFilesProp + } + includedFilesProp, err := expandCloudBuildTriggerIncludedFiles(d.Get("included_files"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("included_files"); !tpgresource.IsEmptyValue(reflect.ValueOf(includedFilesProp)) && (ok || !reflect.DeepEqual(v, includedFilesProp)) { + obj["includedFiles"] = includedFilesProp + } + triggerTemplateProp, err := expandCloudBuildTriggerTriggerTemplate(d.Get("trigger_template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trigger_template"); !tpgresource.IsEmptyValue(reflect.ValueOf(triggerTemplateProp)) && (ok || !reflect.DeepEqual(v, triggerTemplateProp)) { + obj["triggerTemplate"] = triggerTemplateProp + } + githubProp, err := expandCloudBuildTriggerGithub(d.Get("github"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("github"); !tpgresource.IsEmptyValue(reflect.ValueOf(githubProp)) && (ok || !reflect.DeepEqual(v, githubProp)) { + obj["github"] = githubProp + } + bitbucketServerTriggerConfigProp, err := expandCloudBuildTriggerBitbucketServerTriggerConfig(d.Get("bitbucket_server_trigger_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bitbucket_server_trigger_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(bitbucketServerTriggerConfigProp)) && (ok || !reflect.DeepEqual(v, bitbucketServerTriggerConfigProp)) { + obj["bitbucketServerTriggerConfig"] = bitbucketServerTriggerConfigProp + } + pubsubConfigProp, err := expandCloudBuildTriggerPubsubConfig(d.Get("pubsub_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubConfigProp)) && (ok || !reflect.DeepEqual(v, pubsubConfigProp)) { + obj["pubsubConfig"] = pubsubConfigProp + } + webhookConfigProp, err := expandCloudBuildTriggerWebhookConfig(d.Get("webhook_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("webhook_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(webhookConfigProp)) && (ok || !reflect.DeepEqual(v, webhookConfigProp)) { + obj["webhookConfig"] = webhookConfigProp + } + approvalConfigProp, err := expandCloudBuildTriggerApprovalConfig(d.Get("approval_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("approval_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(approvalConfigProp)) && (ok || !reflect.DeepEqual(v, approvalConfigProp)) { + obj["approvalConfig"] = approvalConfigProp + } + buildProp, err := expandCloudBuildTriggerBuild(d.Get("build"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("build"); !tpgresource.IsEmptyValue(reflect.ValueOf(buildProp)) && (ok || !reflect.DeepEqual(v, buildProp)) { + obj["build"] = buildProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/triggers") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Trigger: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Trigger: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Trigger: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + triggerId, ok := res["id"] + if !ok { + return fmt.Errorf("Create response didn't contain id. Create may not have succeeded.") + } + if err := d.Set("trigger_id", triggerId.(string)); err != nil { + return fmt.Errorf("Error setting trigger_id: %s", err) + } + + // Store the ID now. We tried to set it before and it failed because + // trigger_id didn't exist yet. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + // Force legacy id format for global triggers. + id = strings.ReplaceAll(id, "/locations/global/", "/") + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Trigger %q: %#v", d.Id(), res) + + return resourceCloudBuildTriggerRead(d, meta) +} + +func resourceCloudBuildTriggerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Trigger: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // To support import with the legacy id format. + url = strings.ReplaceAll(url, "/locations//", "/locations/global/") + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudBuildTrigger %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + + if err := d.Set("trigger_id", flattenCloudBuildTriggerTriggerId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("name", flattenCloudBuildTriggerName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("description", flattenCloudBuildTriggerDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("tags", flattenCloudBuildTriggerTags(res["tags"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("disabled", flattenCloudBuildTriggerDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("create_time", flattenCloudBuildTriggerCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("substitutions", flattenCloudBuildTriggerSubstitutions(res["substitutions"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("service_account", flattenCloudBuildTriggerServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("include_build_logs", flattenCloudBuildTriggerIncludeBuildLogs(res["includeBuildLogs"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("filename", flattenCloudBuildTriggerFilename(res["filename"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("filter", flattenCloudBuildTriggerFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("git_file_source", flattenCloudBuildTriggerGitFileSource(res["gitFileSource"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("repository_event_config", flattenCloudBuildTriggerRepositoryEventConfig(res["repositoryEventConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("source_to_build", flattenCloudBuildTriggerSourceToBuild(res["sourceToBuild"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("ignored_files", flattenCloudBuildTriggerIgnoredFiles(res["ignoredFiles"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("included_files", flattenCloudBuildTriggerIncludedFiles(res["includedFiles"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("trigger_template", flattenCloudBuildTriggerTriggerTemplate(res["triggerTemplate"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("github", flattenCloudBuildTriggerGithub(res["github"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("bitbucket_server_trigger_config", flattenCloudBuildTriggerBitbucketServerTriggerConfig(res["bitbucketServerTriggerConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("pubsub_config", flattenCloudBuildTriggerPubsubConfig(res["pubsubConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("webhook_config", flattenCloudBuildTriggerWebhookConfig(res["webhookConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("approval_config", flattenCloudBuildTriggerApprovalConfig(res["approvalConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + if err := d.Set("build", flattenCloudBuildTriggerBuild(res["build"], d, config)); err != nil { + return fmt.Errorf("Error reading Trigger: %s", err) + } + + return nil +} + +func resourceCloudBuildTriggerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Trigger: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandCloudBuildTriggerName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandCloudBuildTriggerDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + tagsProp, err := expandCloudBuildTriggerTags(d.Get("tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tagsProp)) { + obj["tags"] = tagsProp + } + disabledProp, err := expandCloudBuildTriggerDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + substitutionsProp, err := expandCloudBuildTriggerSubstitutions(d.Get("substitutions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("substitutions"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, substitutionsProp)) { + obj["substitutions"] = substitutionsProp + } + serviceAccountProp, err := expandCloudBuildTriggerServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + includeBuildLogsProp, err := expandCloudBuildTriggerIncludeBuildLogs(d.Get("include_build_logs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("include_build_logs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, includeBuildLogsProp)) { + obj["includeBuildLogs"] = includeBuildLogsProp + } + filenameProp, err := expandCloudBuildTriggerFilename(d.Get("filename"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filename"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filenameProp)) { + obj["filename"] = filenameProp + } + filterProp, err := expandCloudBuildTriggerFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + gitFileSourceProp, err := expandCloudBuildTriggerGitFileSource(d.Get("git_file_source"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("git_file_source"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gitFileSourceProp)) { + obj["gitFileSource"] = gitFileSourceProp + } + repositoryEventConfigProp, err := expandCloudBuildTriggerRepositoryEventConfig(d.Get("repository_event_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("repository_event_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, repositoryEventConfigProp)) { + obj["repositoryEventConfig"] = repositoryEventConfigProp + } + sourceToBuildProp, err := expandCloudBuildTriggerSourceToBuild(d.Get("source_to_build"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_to_build"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceToBuildProp)) { + obj["sourceToBuild"] = sourceToBuildProp + } + ignoredFilesProp, err := expandCloudBuildTriggerIgnoredFiles(d.Get("ignored_files"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ignored_files"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ignoredFilesProp)) { + obj["ignoredFiles"] = ignoredFilesProp + } + includedFilesProp, err := expandCloudBuildTriggerIncludedFiles(d.Get("included_files"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("included_files"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, includedFilesProp)) { + obj["includedFiles"] = includedFilesProp + } + triggerTemplateProp, err := expandCloudBuildTriggerTriggerTemplate(d.Get("trigger_template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("trigger_template"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, triggerTemplateProp)) { + obj["triggerTemplate"] = triggerTemplateProp + } + githubProp, err := expandCloudBuildTriggerGithub(d.Get("github"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("github"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, githubProp)) { + obj["github"] = githubProp + } + bitbucketServerTriggerConfigProp, err := expandCloudBuildTriggerBitbucketServerTriggerConfig(d.Get("bitbucket_server_trigger_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bitbucket_server_trigger_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bitbucketServerTriggerConfigProp)) { + obj["bitbucketServerTriggerConfig"] = bitbucketServerTriggerConfigProp + } + pubsubConfigProp, err := expandCloudBuildTriggerPubsubConfig(d.Get("pubsub_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubConfigProp)) { + obj["pubsubConfig"] = pubsubConfigProp + } + webhookConfigProp, err := expandCloudBuildTriggerWebhookConfig(d.Get("webhook_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("webhook_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, webhookConfigProp)) { + obj["webhookConfig"] = webhookConfigProp + } + approvalConfigProp, err := expandCloudBuildTriggerApprovalConfig(d.Get("approval_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("approval_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, approvalConfigProp)) { + obj["approvalConfig"] = approvalConfigProp + } + buildProp, err := expandCloudBuildTriggerBuild(d.Get("build"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("build"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, buildProp)) { + obj["build"] = buildProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Trigger %q: %#v", d.Id(), obj) + obj["id"] = d.Get("trigger_id") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Trigger %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Trigger %q: %#v", d.Id(), res) + } + + return resourceCloudBuildTriggerRead(d, meta) +} + +func resourceCloudBuildTriggerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Trigger: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Trigger %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Trigger") + } + + log.Printf("[DEBUG] Finished deleting Trigger %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudBuildTriggerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/triggers/(?P[^/]+)", + "projects/(?P[^/]+)/triggers/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Force legacy id format for global triggers. + id = strings.ReplaceAll(id, "/locations//", "/") + id = strings.ReplaceAll(id, "/locations/global/", "/") + d.SetId(id) + if d.Get("location") == "" { + // Happens when imported with legacy import format. + d.Set("location", "global") + } + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudBuildTriggerTriggerId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerSubstitutions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerIncludeBuildLogs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerFilename(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGitFileSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenCloudBuildTriggerGitFileSourcePath(original["path"], d, config) + transformed["uri"] = + flattenCloudBuildTriggerGitFileSourceUri(original["uri"], d, config) + transformed["repository"] = + flattenCloudBuildTriggerGitFileSourceRepository(original["repository"], d, config) + transformed["repo_type"] = + flattenCloudBuildTriggerGitFileSourceRepoType(original["repoType"], d, config) + transformed["revision"] = + flattenCloudBuildTriggerGitFileSourceRevision(original["revision"], d, config) + transformed["github_enterprise_config"] = + flattenCloudBuildTriggerGitFileSourceGithubEnterpriseConfig(original["githubEnterpriseConfig"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerGitFileSourcePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGitFileSourceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGitFileSourceRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGitFileSourceRepoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGitFileSourceRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGitFileSourceGithubEnterpriseConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerRepositoryEventConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["repository"] = + flattenCloudBuildTriggerRepositoryEventConfigRepository(original["repository"], d, config) + transformed["pull_request"] = + flattenCloudBuildTriggerRepositoryEventConfigPullRequest(original["pullRequest"], d, config) + transformed["push"] = + flattenCloudBuildTriggerRepositoryEventConfigPush(original["push"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerRepositoryEventConfigRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerRepositoryEventConfigPullRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["branch"] = + flattenCloudBuildTriggerRepositoryEventConfigPullRequestBranch(original["branch"], d, config) + transformed["invert_regex"] = + flattenCloudBuildTriggerRepositoryEventConfigPullRequestInvertRegex(original["invertRegex"], d, config) + transformed["comment_control"] = + flattenCloudBuildTriggerRepositoryEventConfigPullRequestCommentControl(original["commentControl"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerRepositoryEventConfigPullRequestBranch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerRepositoryEventConfigPullRequestInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerRepositoryEventConfigPullRequestCommentControl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerRepositoryEventConfigPush(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["branch"] = + flattenCloudBuildTriggerRepositoryEventConfigPushBranch(original["branch"], d, config) + transformed["tag"] = + flattenCloudBuildTriggerRepositoryEventConfigPushTag(original["tag"], d, config) + transformed["invert_regex"] = + flattenCloudBuildTriggerRepositoryEventConfigPushInvertRegex(original["invertRegex"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerRepositoryEventConfigPushBranch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerRepositoryEventConfigPushTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerRepositoryEventConfigPushInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerSourceToBuild(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenCloudBuildTriggerSourceToBuildUri(original["uri"], d, config) + transformed["repository"] = + flattenCloudBuildTriggerSourceToBuildRepository(original["repository"], d, config) + transformed["ref"] = + flattenCloudBuildTriggerSourceToBuildRef(original["ref"], d, config) + transformed["repo_type"] = + flattenCloudBuildTriggerSourceToBuildRepoType(original["repoType"], d, config) + transformed["github_enterprise_config"] = + flattenCloudBuildTriggerSourceToBuildGithubEnterpriseConfig(original["githubEnterpriseConfig"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerSourceToBuildUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerSourceToBuildRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerSourceToBuildRef(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerSourceToBuildRepoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerSourceToBuildGithubEnterpriseConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerIgnoredFiles(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerIncludedFiles(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerTriggerTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project_id"] = + flattenCloudBuildTriggerTriggerTemplateProjectId(original["projectId"], d, config) + transformed["repo_name"] = + flattenCloudBuildTriggerTriggerTemplateRepoName(original["repoName"], d, config) + transformed["dir"] = + flattenCloudBuildTriggerTriggerTemplateDir(original["dir"], d, config) + transformed["invert_regex"] = + flattenCloudBuildTriggerTriggerTemplateInvertRegex(original["invertRegex"], d, config) + transformed["branch_name"] = + flattenCloudBuildTriggerTriggerTemplateBranchName(original["branchName"], d, config) + transformed["tag_name"] = + flattenCloudBuildTriggerTriggerTemplateTagName(original["tagName"], d, config) + transformed["commit_sha"] = + flattenCloudBuildTriggerTriggerTemplateCommitSha(original["commitSha"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerTriggerTemplateProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerTriggerTemplateRepoName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerTriggerTemplateDir(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerTriggerTemplateInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerTriggerTemplateBranchName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerTriggerTemplateTagName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerTriggerTemplateCommitSha(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithub(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["owner"] = + flattenCloudBuildTriggerGithubOwner(original["owner"], d, config) + transformed["name"] = + flattenCloudBuildTriggerGithubName(original["name"], d, config) + transformed["pull_request"] = + flattenCloudBuildTriggerGithubPullRequest(original["pullRequest"], d, config) + transformed["push"] = + flattenCloudBuildTriggerGithubPush(original["push"], d, config) + transformed["enterprise_config_resource_name"] = + flattenCloudBuildTriggerGithubEnterpriseConfigResourceName(original["enterpriseConfigResourceName"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerGithubOwner(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithubName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithubPullRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["branch"] = + flattenCloudBuildTriggerGithubPullRequestBranch(original["branch"], d, config) + transformed["comment_control"] = + flattenCloudBuildTriggerGithubPullRequestCommentControl(original["commentControl"], d, config) + transformed["invert_regex"] = + flattenCloudBuildTriggerGithubPullRequestInvertRegex(original["invertRegex"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerGithubPullRequestBranch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithubPullRequestCommentControl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithubPullRequestInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithubPush(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["invert_regex"] = + flattenCloudBuildTriggerGithubPushInvertRegex(original["invertRegex"], d, config) + transformed["branch"] = + flattenCloudBuildTriggerGithubPushBranch(original["branch"], d, config) + transformed["tag"] = + flattenCloudBuildTriggerGithubPushTag(original["tag"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerGithubPushInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithubPushBranch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithubPushTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerGithubEnterpriseConfigResourceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["repo_slug"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigRepoSlug(original["repoSlug"], d, config) + transformed["project_key"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigProjectKey(original["projectKey"], d, config) + transformed["bitbucket_server_config_resource"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigBitbucketServerConfigResource(original["bitbucketServerConfigResource"], d, config) + transformed["pull_request"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequest(original["pullRequest"], d, config) + transformed["push"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigPush(original["push"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBitbucketServerTriggerConfigRepoSlug(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfigProjectKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfigBitbucketServerConfigResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["branch"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestBranch(original["branch"], d, config) + transformed["comment_control"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestCommentControl(original["commentControl"], d, config) + transformed["invert_regex"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestInvertRegex(original["invertRegex"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestBranch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestCommentControl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfigPullRequestInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfigPush(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["invert_regex"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigPushInvertRegex(original["invertRegex"], d, config) + transformed["branch"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigPushBranch(original["branch"], d, config) + transformed["tag"] = + flattenCloudBuildTriggerBitbucketServerTriggerConfigPushTag(original["tag"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBitbucketServerTriggerConfigPushInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfigPushBranch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBitbucketServerTriggerConfigPushTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerPubsubConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["subscription"] = + flattenCloudBuildTriggerPubsubConfigSubscription(original["subscription"], d, config) + transformed["topic"] = + flattenCloudBuildTriggerPubsubConfigTopic(original["topic"], d, config) + transformed["service_account_email"] = + flattenCloudBuildTriggerPubsubConfigServiceAccountEmail(original["service_account_email"], d, config) + transformed["state"] = + flattenCloudBuildTriggerPubsubConfigState(original["state"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerPubsubConfigSubscription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerPubsubConfigTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerPubsubConfigServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerPubsubConfigState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerWebhookConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret"] = + flattenCloudBuildTriggerWebhookConfigSecret(original["secret"], d, config) + transformed["state"] = + flattenCloudBuildTriggerWebhookConfigState(original["state"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerWebhookConfigSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerWebhookConfigState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerApprovalConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + transformed := make(map[string]interface{}) + if v == nil { + // Disabled by default, but API will not return object if value is false + transformed["approval_required"] = false + return []interface{}{transformed} + } + + original := v.(map[string]interface{}) + transformed["approval_required"] = original["approvalRequired"] + return []interface{}{transformed} +} + +func flattenCloudBuildTriggerBuild(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["source"] = + flattenCloudBuildTriggerBuildSource(original["source"], d, config) + transformed["tags"] = + flattenCloudBuildTriggerBuildTags(original["tags"], d, config) + transformed["images"] = + flattenCloudBuildTriggerBuildImages(original["images"], d, config) + transformed["substitutions"] = + flattenCloudBuildTriggerBuildSubstitutions(original["substitutions"], d, config) + transformed["queue_ttl"] = + flattenCloudBuildTriggerBuildQueueTtl(original["queueTtl"], d, config) + transformed["logs_bucket"] = + flattenCloudBuildTriggerBuildLogsBucket(original["logsBucket"], d, config) + transformed["timeout"] = + flattenCloudBuildTriggerBuildTimeout(original["timeout"], d, config) + transformed["secret"] = + flattenCloudBuildTriggerBuildSecret(original["secrets"], d, config) + transformed["available_secrets"] = + flattenCloudBuildTriggerBuildAvailableSecrets(original["availableSecrets"], d, config) + transformed["step"] = + flattenCloudBuildTriggerBuildStep(original["steps"], d, config) + transformed["artifacts"] = + flattenCloudBuildTriggerBuildArtifacts(original["artifacts"], d, config) + transformed["options"] = + flattenCloudBuildTriggerBuildOptions(original["options"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["storage_source"] = + flattenCloudBuildTriggerBuildSourceStorageSource(original["storageSource"], d, config) + transformed["repo_source"] = + flattenCloudBuildTriggerBuildSourceRepoSource(original["repoSource"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildSourceStorageSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenCloudBuildTriggerBuildSourceStorageSourceBucket(original["bucket"], d, config) + transformed["object"] = + flattenCloudBuildTriggerBuildSourceStorageSourceObject(original["object"], d, config) + transformed["generation"] = + flattenCloudBuildTriggerBuildSourceStorageSourceGeneration(original["generation"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildSourceStorageSourceBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceStorageSourceObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceStorageSourceGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceRepoSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project_id"] = + flattenCloudBuildTriggerBuildSourceRepoSourceProjectId(original["projectId"], d, config) + transformed["repo_name"] = + flattenCloudBuildTriggerBuildSourceRepoSourceRepoName(original["repoName"], d, config) + transformed["dir"] = + flattenCloudBuildTriggerBuildSourceRepoSourceDir(original["dir"], d, config) + transformed["invert_regex"] = + flattenCloudBuildTriggerBuildSourceRepoSourceInvertRegex(original["invertRegex"], d, config) + transformed["substitutions"] = + flattenCloudBuildTriggerBuildSourceRepoSourceSubstitutions(original["substitutions"], d, config) + transformed["branch_name"] = + flattenCloudBuildTriggerBuildSourceRepoSourceBranchName(original["branchName"], d, config) + transformed["tag_name"] = + flattenCloudBuildTriggerBuildSourceRepoSourceTagName(original["tagName"], d, config) + transformed["commit_sha"] = + flattenCloudBuildTriggerBuildSourceRepoSourceCommitSha(original["commitSha"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildSourceRepoSourceProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceRepoSourceRepoName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceRepoSourceDir(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceRepoSourceInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceRepoSourceSubstitutions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceRepoSourceBranchName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceRepoSourceTagName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSourceRepoSourceCommitSha(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildImages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSubstitutions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildQueueTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildLogsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "kms_key_name": flattenCloudBuildTriggerBuildSecretKmsKeyName(original["kmsKeyName"], d, config), + "secret_env": flattenCloudBuildTriggerBuildSecretSecretEnv(original["secretEnv"], d, config), + }) + } + return transformed +} +func flattenCloudBuildTriggerBuildSecretKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildSecretSecretEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildAvailableSecrets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret_manager"] = + flattenCloudBuildTriggerBuildAvailableSecretsSecretManager(original["secretManager"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildAvailableSecretsSecretManager(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "version_name": flattenCloudBuildTriggerBuildAvailableSecretsSecretManagerVersionName(original["versionName"], d, config), + "env": flattenCloudBuildTriggerBuildAvailableSecretsSecretManagerEnv(original["env"], d, config), + }) + } + return transformed +} +func flattenCloudBuildTriggerBuildAvailableSecretsSecretManagerVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildAvailableSecretsSecretManagerEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStep(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudBuildTriggerBuildStepName(original["name"], d, config), + "args": flattenCloudBuildTriggerBuildStepArgs(original["args"], d, config), + "env": flattenCloudBuildTriggerBuildStepEnv(original["env"], d, config), + "id": flattenCloudBuildTriggerBuildStepId(original["id"], d, config), + "entrypoint": flattenCloudBuildTriggerBuildStepEntrypoint(original["entrypoint"], d, config), + "dir": flattenCloudBuildTriggerBuildStepDir(original["dir"], d, config), + "secret_env": flattenCloudBuildTriggerBuildStepSecretEnv(original["secretEnv"], d, config), + "timeout": flattenCloudBuildTriggerBuildStepTimeout(original["timeout"], d, config), + "timing": flattenCloudBuildTriggerBuildStepTiming(original["timing"], d, config), + "volumes": flattenCloudBuildTriggerBuildStepVolumes(original["volumes"], d, config), + "wait_for": flattenCloudBuildTriggerBuildStepWaitFor(original["waitFor"], d, config), + "script": flattenCloudBuildTriggerBuildStepScript(original["script"], d, config), + "allow_failure": flattenCloudBuildTriggerBuildStepAllowFailure(original["allowFailure"], d, config), + "allow_exit_codes": flattenCloudBuildTriggerBuildStepAllowExitCodes(original["allowExitCodes"], d, config), + }) + } + return transformed +} +func flattenCloudBuildTriggerBuildStepName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepEntrypoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepDir(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepSecretEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepTiming(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepVolumes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudBuildTriggerBuildStepVolumesName(original["name"], d, config), + "path": flattenCloudBuildTriggerBuildStepVolumesPath(original["path"], d, config), + }) + } + return transformed +} +func flattenCloudBuildTriggerBuildStepVolumesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepVolumesPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepWaitFor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepAllowFailure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildStepAllowExitCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildArtifacts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["images"] = + flattenCloudBuildTriggerBuildArtifactsImages(original["images"], d, config) + transformed["objects"] = + flattenCloudBuildTriggerBuildArtifactsObjects(original["objects"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildArtifactsImages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildArtifactsObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["location"] = + flattenCloudBuildTriggerBuildArtifactsObjectsLocation(original["location"], d, config) + transformed["paths"] = + flattenCloudBuildTriggerBuildArtifactsObjectsPaths(original["paths"], d, config) + transformed["timing"] = + flattenCloudBuildTriggerBuildArtifactsObjectsTiming(original["timing"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildArtifactsObjectsLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildArtifactsObjectsPaths(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildArtifactsObjectsTiming(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_time"] = + flattenCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(original["startTime"], d, config) + transformed["end_time"] = + flattenCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(original["endTime"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["source_provenance_hash"] = + flattenCloudBuildTriggerBuildOptionsSourceProvenanceHash(original["sourceProvenanceHash"], d, config) + transformed["requested_verify_option"] = + flattenCloudBuildTriggerBuildOptionsRequestedVerifyOption(original["requestedVerifyOption"], d, config) + transformed["machine_type"] = + flattenCloudBuildTriggerBuildOptionsMachineType(original["machineType"], d, config) + transformed["disk_size_gb"] = + flattenCloudBuildTriggerBuildOptionsDiskSizeGb(original["diskSizeGb"], d, config) + transformed["substitution_option"] = + flattenCloudBuildTriggerBuildOptionsSubstitutionOption(original["substitutionOption"], d, config) + transformed["dynamic_substitutions"] = + flattenCloudBuildTriggerBuildOptionsDynamicSubstitutions(original["dynamicSubstitutions"], d, config) + transformed["log_streaming_option"] = + flattenCloudBuildTriggerBuildOptionsLogStreamingOption(original["logStreamingOption"], d, config) + transformed["worker_pool"] = + flattenCloudBuildTriggerBuildOptionsWorkerPool(original["workerPool"], d, config) + transformed["logging"] = + flattenCloudBuildTriggerBuildOptionsLogging(original["logging"], d, config) + transformed["env"] = + flattenCloudBuildTriggerBuildOptionsEnv(original["env"], d, config) + transformed["secret_env"] = + flattenCloudBuildTriggerBuildOptionsSecretEnv(original["secretEnv"], d, config) + transformed["volumes"] = + flattenCloudBuildTriggerBuildOptionsVolumes(original["volumes"], d, config) + return []interface{}{transformed} +} +func flattenCloudBuildTriggerBuildOptionsSourceProvenanceHash(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsRequestedVerifyOption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsDiskSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudBuildTriggerBuildOptionsSubstitutionOption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsDynamicSubstitutions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsLogStreamingOption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsWorkerPool(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsSecretEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsVolumes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudBuildTriggerBuildOptionsVolumesName(original["name"], d, config), + "path": flattenCloudBuildTriggerBuildOptionsVolumesPath(original["path"], d, config), + }) + } + return transformed +} +func flattenCloudBuildTriggerBuildOptionsVolumesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudBuildTriggerBuildOptionsVolumesPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudBuildTriggerName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerSubstitutions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudBuildTriggerServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerIncludeBuildLogs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerFilename(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGitFileSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudBuildTriggerGitFileSourcePath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedUri, err := expandCloudBuildTriggerGitFileSourceUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedRepository, err := expandCloudBuildTriggerGitFileSourceRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedRepoType, err := expandCloudBuildTriggerGitFileSourceRepoType(original["repo_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repoType"] = transformedRepoType + } + + transformedRevision, err := expandCloudBuildTriggerGitFileSourceRevision(original["revision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRevision); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["revision"] = transformedRevision + } + + transformedGithubEnterpriseConfig, err := expandCloudBuildTriggerGitFileSourceGithubEnterpriseConfig(original["github_enterprise_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGithubEnterpriseConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["githubEnterpriseConfig"] = transformedGithubEnterpriseConfig + } + + return transformed, nil +} + +func expandCloudBuildTriggerGitFileSourcePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGitFileSourceUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGitFileSourceRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGitFileSourceRepoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGitFileSourceRevision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGitFileSourceGithubEnterpriseConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerRepositoryEventConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRepository, err := expandCloudBuildTriggerRepositoryEventConfigRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedPullRequest, err := expandCloudBuildTriggerRepositoryEventConfigPullRequest(original["pull_request"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPullRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pullRequest"] = transformedPullRequest + } + + transformedPush, err := expandCloudBuildTriggerRepositoryEventConfigPush(original["push"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPush); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["push"] = transformedPush + } + + return transformed, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigPullRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBranch, err := expandCloudBuildTriggerRepositoryEventConfigPullRequestBranch(original["branch"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["branch"] = transformedBranch + } + + transformedInvertRegex, err := expandCloudBuildTriggerRepositoryEventConfigPullRequestInvertRegex(original["invert_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["invertRegex"] = transformedInvertRegex + } + + transformedCommentControl, err := expandCloudBuildTriggerRepositoryEventConfigPullRequestCommentControl(original["comment_control"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommentControl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commentControl"] = transformedCommentControl + } + + return transformed, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigPullRequestBranch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigPullRequestInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigPullRequestCommentControl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigPush(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBranch, err := expandCloudBuildTriggerRepositoryEventConfigPushBranch(original["branch"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["branch"] = transformedBranch + } + + transformedTag, err := expandCloudBuildTriggerRepositoryEventConfigPushTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + transformedInvertRegex, err := expandCloudBuildTriggerRepositoryEventConfigPushInvertRegex(original["invert_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["invertRegex"] = transformedInvertRegex + } + + return transformed, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigPushBranch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigPushTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerRepositoryEventConfigPushInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerSourceToBuild(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandCloudBuildTriggerSourceToBuildUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedRepository, err := expandCloudBuildTriggerSourceToBuildRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedRef, err := expandCloudBuildTriggerSourceToBuildRef(original["ref"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRef); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ref"] = transformedRef + } + + transformedRepoType, err := expandCloudBuildTriggerSourceToBuildRepoType(original["repo_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repoType"] = transformedRepoType + } + + transformedGithubEnterpriseConfig, err := expandCloudBuildTriggerSourceToBuildGithubEnterpriseConfig(original["github_enterprise_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGithubEnterpriseConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["githubEnterpriseConfig"] = transformedGithubEnterpriseConfig + } + + return transformed, nil +} + +func expandCloudBuildTriggerSourceToBuildUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerSourceToBuildRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerSourceToBuildRef(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerSourceToBuildRepoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerSourceToBuildGithubEnterpriseConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerIgnoredFiles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerIncludedFiles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerTriggerTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectId, err := expandCloudBuildTriggerTriggerTemplateProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedRepoName, err := expandCloudBuildTriggerTriggerTemplateRepoName(original["repo_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepoName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repoName"] = transformedRepoName + } + + transformedDir, err := expandCloudBuildTriggerTriggerTemplateDir(original["dir"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDir); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dir"] = transformedDir + } + + transformedInvertRegex, err := expandCloudBuildTriggerTriggerTemplateInvertRegex(original["invert_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["invertRegex"] = transformedInvertRegex + } + + transformedBranchName, err := expandCloudBuildTriggerTriggerTemplateBranchName(original["branch_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBranchName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["branchName"] = transformedBranchName + } + + transformedTagName, err := expandCloudBuildTriggerTriggerTemplateTagName(original["tag_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTagName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tagName"] = transformedTagName + } + + transformedCommitSha, err := expandCloudBuildTriggerTriggerTemplateCommitSha(original["commit_sha"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommitSha); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commitSha"] = transformedCommitSha + } + + return transformed, nil +} + +func expandCloudBuildTriggerTriggerTemplateProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerTriggerTemplateRepoName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerTriggerTemplateDir(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerTriggerTemplateInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerTriggerTemplateBranchName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerTriggerTemplateTagName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerTriggerTemplateCommitSha(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithub(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOwner, err := expandCloudBuildTriggerGithubOwner(original["owner"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOwner); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["owner"] = transformedOwner + } + + transformedName, err := expandCloudBuildTriggerGithubName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedPullRequest, err := expandCloudBuildTriggerGithubPullRequest(original["pull_request"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPullRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pullRequest"] = transformedPullRequest + } + + transformedPush, err := expandCloudBuildTriggerGithubPush(original["push"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPush); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["push"] = transformedPush + } + + transformedEnterpriseConfigResourceName, err := expandCloudBuildTriggerGithubEnterpriseConfigResourceName(original["enterprise_config_resource_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnterpriseConfigResourceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enterpriseConfigResourceName"] = transformedEnterpriseConfigResourceName + } + + return transformed, nil +} + +func expandCloudBuildTriggerGithubOwner(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithubName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithubPullRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBranch, err := expandCloudBuildTriggerGithubPullRequestBranch(original["branch"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["branch"] = transformedBranch + } + + transformedCommentControl, err := expandCloudBuildTriggerGithubPullRequestCommentControl(original["comment_control"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommentControl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commentControl"] = transformedCommentControl + } + + transformedInvertRegex, err := expandCloudBuildTriggerGithubPullRequestInvertRegex(original["invert_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["invertRegex"] = transformedInvertRegex + } + + return transformed, nil +} + +func expandCloudBuildTriggerGithubPullRequestBranch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithubPullRequestCommentControl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithubPullRequestInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithubPush(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInvertRegex, err := expandCloudBuildTriggerGithubPushInvertRegex(original["invert_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["invertRegex"] = transformedInvertRegex + } + + transformedBranch, err := expandCloudBuildTriggerGithubPushBranch(original["branch"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["branch"] = transformedBranch + } + + transformedTag, err := expandCloudBuildTriggerGithubPushTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + return transformed, nil +} + +func expandCloudBuildTriggerGithubPushInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithubPushBranch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithubPushTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerGithubEnterpriseConfigResourceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRepoSlug, err := expandCloudBuildTriggerBitbucketServerTriggerConfigRepoSlug(original["repo_slug"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepoSlug); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repoSlug"] = transformedRepoSlug + } + + transformedProjectKey, err := expandCloudBuildTriggerBitbucketServerTriggerConfigProjectKey(original["project_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectKey"] = transformedProjectKey + } + + transformedBitbucketServerConfigResource, err := expandCloudBuildTriggerBitbucketServerTriggerConfigBitbucketServerConfigResource(original["bitbucket_server_config_resource"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBitbucketServerConfigResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bitbucketServerConfigResource"] = transformedBitbucketServerConfigResource + } + + transformedPullRequest, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequest(original["pull_request"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPullRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pullRequest"] = transformedPullRequest + } + + transformedPush, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPush(original["push"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPush); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["push"] = transformedPush + } + + return transformed, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigRepoSlug(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigProjectKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigBitbucketServerConfigResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBranch, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestBranch(original["branch"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["branch"] = transformedBranch + } + + transformedCommentControl, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestCommentControl(original["comment_control"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommentControl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commentControl"] = transformedCommentControl + } + + transformedInvertRegex, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestInvertRegex(original["invert_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["invertRegex"] = transformedInvertRegex + } + + return transformed, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestBranch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestCommentControl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigPullRequestInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigPush(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInvertRegex, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPushInvertRegex(original["invert_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["invertRegex"] = transformedInvertRegex + } + + transformedBranch, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPushBranch(original["branch"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBranch); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["branch"] = transformedBranch + } + + transformedTag, err := expandCloudBuildTriggerBitbucketServerTriggerConfigPushTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + return transformed, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigPushInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigPushBranch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBitbucketServerTriggerConfigPushTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerPubsubConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSubscription, err := expandCloudBuildTriggerPubsubConfigSubscription(original["subscription"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubscription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subscription"] = transformedSubscription + } + + transformedTopic, err := expandCloudBuildTriggerPubsubConfigTopic(original["topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topic"] = transformedTopic + } + + transformedServiceAccountEmail, err := expandCloudBuildTriggerPubsubConfigServiceAccountEmail(original["service_account_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service_account_email"] = transformedServiceAccountEmail + } + + transformedState, err := expandCloudBuildTriggerPubsubConfigState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + return transformed, nil +} + +func expandCloudBuildTriggerPubsubConfigSubscription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerPubsubConfigTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerPubsubConfigServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerPubsubConfigState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerWebhookConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecret, err := expandCloudBuildTriggerWebhookConfigSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secret"] = transformedSecret + } + + transformedState, err := expandCloudBuildTriggerWebhookConfigState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + return transformed, nil +} + +func expandCloudBuildTriggerWebhookConfigSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerWebhookConfigState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerApprovalConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedApprovalRequired, err := expandCloudBuildTriggerApprovalConfigApprovalRequired(original["approval_required"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApprovalRequired); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["approvalRequired"] = transformedApprovalRequired + } + + return transformed, nil +} + +func expandCloudBuildTriggerApprovalConfigApprovalRequired(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuild(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSource, err := expandCloudBuildTriggerBuildSource(original["source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["source"] = transformedSource + } + + transformedTags, err := expandCloudBuildTriggerBuildTags(original["tags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tags"] = transformedTags + } + + transformedImages, err := expandCloudBuildTriggerBuildImages(original["images"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImages); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["images"] = transformedImages + } + + transformedSubstitutions, err := expandCloudBuildTriggerBuildSubstitutions(original["substitutions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubstitutions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["substitutions"] = transformedSubstitutions + } + + transformedQueueTtl, err := expandCloudBuildTriggerBuildQueueTtl(original["queue_ttl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueueTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queueTtl"] = transformedQueueTtl + } + + transformedLogsBucket, err := expandCloudBuildTriggerBuildLogsBucket(original["logs_bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLogsBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["logsBucket"] = transformedLogsBucket + } + + transformedTimeout, err := expandCloudBuildTriggerBuildTimeout(original["timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeout"] = transformedTimeout + } + + transformedSecret, err := expandCloudBuildTriggerBuildSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secrets"] = transformedSecret + } + + transformedAvailableSecrets, err := expandCloudBuildTriggerBuildAvailableSecrets(original["available_secrets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAvailableSecrets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["availableSecrets"] = transformedAvailableSecrets + } + + transformedStep, err := expandCloudBuildTriggerBuildStep(original["step"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStep); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["steps"] = transformedStep + } + + transformedArtifacts, err := expandCloudBuildTriggerBuildArtifacts(original["artifacts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArtifacts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["artifacts"] = transformedArtifacts + } + + transformedOptions, err := expandCloudBuildTriggerBuildOptions(original["options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["options"] = transformedOptions + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStorageSource, err := expandCloudBuildTriggerBuildSourceStorageSource(original["storage_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStorageSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storageSource"] = transformedStorageSource + } + + transformedRepoSource, err := expandCloudBuildTriggerBuildSourceRepoSource(original["repo_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepoSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repoSource"] = transformedRepoSource + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildSourceStorageSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandCloudBuildTriggerBuildSourceStorageSourceBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedObject, err := expandCloudBuildTriggerBuildSourceStorageSourceObject(original["object"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["object"] = transformedObject + } + + transformedGeneration, err := expandCloudBuildTriggerBuildSourceStorageSourceGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildSourceStorageSourceBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceStorageSourceObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceStorageSourceGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectId, err := expandCloudBuildTriggerBuildSourceRepoSourceProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedRepoName, err := expandCloudBuildTriggerBuildSourceRepoSourceRepoName(original["repo_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepoName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repoName"] = transformedRepoName + } + + transformedDir, err := expandCloudBuildTriggerBuildSourceRepoSourceDir(original["dir"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDir); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dir"] = transformedDir + } + + transformedInvertRegex, err := expandCloudBuildTriggerBuildSourceRepoSourceInvertRegex(original["invert_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["invertRegex"] = transformedInvertRegex + } + + transformedSubstitutions, err := expandCloudBuildTriggerBuildSourceRepoSourceSubstitutions(original["substitutions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubstitutions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["substitutions"] = transformedSubstitutions + } + + transformedBranchName, err := expandCloudBuildTriggerBuildSourceRepoSourceBranchName(original["branch_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBranchName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["branchName"] = transformedBranchName + } + + transformedTagName, err := expandCloudBuildTriggerBuildSourceRepoSourceTagName(original["tag_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTagName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tagName"] = transformedTagName + } + + transformedCommitSha, err := expandCloudBuildTriggerBuildSourceRepoSourceCommitSha(original["commit_sha"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommitSha); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commitSha"] = transformedCommitSha + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSourceProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSourceRepoName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSourceDir(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSourceInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSourceSubstitutions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSourceBranchName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSourceTagName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSourceRepoSourceCommitSha(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildImages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSubstitutions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudBuildTriggerBuildQueueTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildLogsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandCloudBuildTriggerBuildSecretKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + transformedSecretEnv, err := expandCloudBuildTriggerBuildSecretSecretEnv(original["secret_env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretEnv"] = transformedSecretEnv + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudBuildTriggerBuildSecretKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildSecretSecretEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudBuildTriggerBuildAvailableSecrets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecretManager, err := expandCloudBuildTriggerBuildAvailableSecretsSecretManager(original["secret_manager"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretManager); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretManager"] = transformedSecretManager + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildAvailableSecretsSecretManager(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersionName, err := expandCloudBuildTriggerBuildAvailableSecretsSecretManagerVersionName(original["version_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersionName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["versionName"] = transformedVersionName + } + + transformedEnv, err := expandCloudBuildTriggerBuildAvailableSecretsSecretManagerEnv(original["env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["env"] = transformedEnv + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudBuildTriggerBuildAvailableSecretsSecretManagerVersionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildAvailableSecretsSecretManagerEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStep(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudBuildTriggerBuildStepName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedArgs, err := expandCloudBuildTriggerBuildStepArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedEnv, err := expandCloudBuildTriggerBuildStepEnv(original["env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["env"] = transformedEnv + } + + transformedId, err := expandCloudBuildTriggerBuildStepId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedEntrypoint, err := expandCloudBuildTriggerBuildStepEntrypoint(original["entrypoint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEntrypoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["entrypoint"] = transformedEntrypoint + } + + transformedDir, err := expandCloudBuildTriggerBuildStepDir(original["dir"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDir); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dir"] = transformedDir + } + + transformedSecretEnv, err := expandCloudBuildTriggerBuildStepSecretEnv(original["secret_env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretEnv"] = transformedSecretEnv + } + + transformedTimeout, err := expandCloudBuildTriggerBuildStepTimeout(original["timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeout"] = transformedTimeout + } + + transformedTiming, err := expandCloudBuildTriggerBuildStepTiming(original["timing"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTiming); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timing"] = transformedTiming + } + + transformedVolumes, err := expandCloudBuildTriggerBuildStepVolumes(original["volumes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["volumes"] = transformedVolumes + } + + transformedWaitFor, err := expandCloudBuildTriggerBuildStepWaitFor(original["wait_for"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWaitFor); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["waitFor"] = transformedWaitFor + } + + transformedScript, err := expandCloudBuildTriggerBuildStepScript(original["script"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["script"] = transformedScript + } + + transformedAllowFailure, err := expandCloudBuildTriggerBuildStepAllowFailure(original["allow_failure"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowFailure); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowFailure"] = transformedAllowFailure + } + + transformedAllowExitCodes, err := expandCloudBuildTriggerBuildStepAllowExitCodes(original["allow_exit_codes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowExitCodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowExitCodes"] = transformedAllowExitCodes + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudBuildTriggerBuildStepName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepEntrypoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepDir(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepSecretEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepTiming(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepVolumes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudBuildTriggerBuildStepVolumesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedPath, err := expandCloudBuildTriggerBuildStepVolumesPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudBuildTriggerBuildStepVolumesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepVolumesPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepWaitFor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepAllowFailure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildStepAllowExitCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildArtifacts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedImages, err := expandCloudBuildTriggerBuildArtifactsImages(original["images"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImages); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["images"] = transformedImages + } + + transformedObjects, err := expandCloudBuildTriggerBuildArtifactsObjects(original["objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["objects"] = transformedObjects + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildArtifactsImages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildArtifactsObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLocation, err := expandCloudBuildTriggerBuildArtifactsObjectsLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + transformedPaths, err := expandCloudBuildTriggerBuildArtifactsObjectsPaths(original["paths"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPaths); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["paths"] = transformedPaths + } + + transformedTiming, err := expandCloudBuildTriggerBuildArtifactsObjectsTiming(original["timing"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTiming); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timing"] = transformedTiming + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildArtifactsObjectsLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildArtifactsObjectsPaths(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildArtifactsObjectsTiming(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStartTime, err := expandCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(original["start_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTime"] = transformedStartTime + } + + transformedEndTime, err := expandCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(original["end_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endTime"] = transformedEndTime + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSourceProvenanceHash, err := expandCloudBuildTriggerBuildOptionsSourceProvenanceHash(original["source_provenance_hash"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourceProvenanceHash); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sourceProvenanceHash"] = transformedSourceProvenanceHash + } + + transformedRequestedVerifyOption, err := expandCloudBuildTriggerBuildOptionsRequestedVerifyOption(original["requested_verify_option"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequestedVerifyOption); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requestedVerifyOption"] = transformedRequestedVerifyOption + } + + transformedMachineType, err := expandCloudBuildTriggerBuildOptionsMachineType(original["machine_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineType"] = transformedMachineType + } + + transformedDiskSizeGb, err := expandCloudBuildTriggerBuildOptionsDiskSizeGb(original["disk_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diskSizeGb"] = transformedDiskSizeGb + } + + transformedSubstitutionOption, err := expandCloudBuildTriggerBuildOptionsSubstitutionOption(original["substitution_option"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubstitutionOption); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["substitutionOption"] = transformedSubstitutionOption + } + + transformedDynamicSubstitutions, err := expandCloudBuildTriggerBuildOptionsDynamicSubstitutions(original["dynamic_substitutions"], d, config) + if err != nil { + return nil, err + } else { + transformed["dynamicSubstitutions"] = transformedDynamicSubstitutions + } + + transformedLogStreamingOption, err := expandCloudBuildTriggerBuildOptionsLogStreamingOption(original["log_streaming_option"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLogStreamingOption); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["logStreamingOption"] = transformedLogStreamingOption + } + + transformedWorkerPool, err := expandCloudBuildTriggerBuildOptionsWorkerPool(original["worker_pool"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWorkerPool); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["workerPool"] = transformedWorkerPool + } + + transformedLogging, err := expandCloudBuildTriggerBuildOptionsLogging(original["logging"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLogging); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["logging"] = transformedLogging + } + + transformedEnv, err := expandCloudBuildTriggerBuildOptionsEnv(original["env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["env"] = transformedEnv + } + + transformedSecretEnv, err := expandCloudBuildTriggerBuildOptionsSecretEnv(original["secret_env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretEnv"] = transformedSecretEnv + } + + transformedVolumes, err := expandCloudBuildTriggerBuildOptionsVolumes(original["volumes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["volumes"] = transformedVolumes + } + + return transformed, nil +} + +func expandCloudBuildTriggerBuildOptionsSourceProvenanceHash(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsRequestedVerifyOption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsSubstitutionOption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsDynamicSubstitutions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsLogStreamingOption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsWorkerPool(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsSecretEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsVolumes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudBuildTriggerBuildOptionsVolumesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedPath, err := expandCloudBuildTriggerBuildOptionsVolumesPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudBuildTriggerBuildOptionsVolumesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudBuildTriggerBuildOptionsVolumesPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func ResourceCloudBuildTriggerUpgradeV1(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + // Versions 0 and 1 didn't support location. Default them to global. + rawState["location"] = "global" + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} + +func resourceCloudBuildTriggerResourceV1() *schema.Resource { + // Cloud Build Triggers started with V1 since its beginnings. + return resourceCloudBuildTriggerResourceV0() +} + +func ResourceCloudBuildTriggerUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + // Do nothing as V0 and V1 are exactly the same. + return rawState, nil +} + +func resourceCloudBuildTriggerResourceV0() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudBuildTriggerCreate, + Read: resourceCloudBuildTriggerRead, + Update: resourceCloudBuildTriggerUpdate, + Delete: resourceCloudBuildTriggerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudBuildTriggerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 1, + CustomizeDiff: stepTimeoutCustomizeDiff, + + Schema: map[string]*schema.Schema{ + "approval_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Configuration for manual approval to start a build invocation of this BuildTrigger. +Builds created by this trigger will require approval before they execute. +Any user with a Cloud Build Approver role for the project can approve a build.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "approval_required": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not approval is needed. If this is set on a build, it will become pending when run, +and will need to be explicitly approved to start.`, + Default: false, + }, + }, + }, + }, + "build": { + Type: schema.TypeList, + Optional: true, + Description: `Contents of the build template. Either a filename or build template must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "step": { + Type: schema.TypeList, + Required: true, + Description: `The operations to be performed on the workspace.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the container image that will run this particular build step. +If the image is available in the host's Docker daemon's cache, it will be +run directly. If not, the host will attempt to pull the image first, using +the builder service account's credentials if necessary. +The Docker daemon's cache will already have the latest versions of all of +the officially supported build steps (see https://github.com/GoogleCloudPlatform/cloud-builders +for images and examples). +The Docker daemon will also have cached many of the layers for some popular +images, like "ubuntu", "debian", but they will be refreshed at the time +you attempt to use them. +If you built an image in a previous build step, it will be stored in the +host's Docker daemon's cache and is available to use as the name for a +later build step.`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `A list of arguments that will be presented to the step when it is started. +If the image used to run the step's container has an entrypoint, the args +are used as arguments to that entrypoint. If the image does not define an +entrypoint, the first element in args is used as the entrypoint, and the +remainder will be used as arguments.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Working directory to use when running this step's container. +If this value is a relative path, it is relative to the build's working +directory. If this value is absolute, it may be outside the build's working +directory, in which case the contents of the path may not be persisted +across build step executions, unless a 'volume' for that path is specified. +If the build specifies a 'RepoSource' with 'dir' and a step with a +'dir', +which specifies an absolute path, the 'RepoSource' 'dir' is ignored +for the step's execution.`, + }, + "entrypoint": { + Type: schema.TypeString, + Optional: true, + Description: `Entrypoint to be used instead of the build step image's +default entrypoint. +If unset, the image's default entrypoint is used`, + }, + "env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of environment variable definitions to be used when +running a step. +The elements are of the form "KEY=VALUE" for the environment variable +"KEY" being given the value "VALUE".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "id": { + Type: schema.TypeString, + Optional: true, + Description: `Unique identifier for this build step, used in 'wait_for' to +reference this build step as a dependency.`, + }, + "secret_env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of environment variables which are encrypted using +a Cloud Key +Management Service crypto key. These values must be specified in +the build's 'Secret'.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Time limit for executing this build step. If not defined, +the step has no +time limit and will be allowed to continue to run until either it +completes or the build itself times out.`, + }, + "timing": { + Type: schema.TypeString, + Optional: true, + Description: `Output only. Stores timing information for executing this +build step.`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `List of volumes to mount into the build step. +Each volume is created as an empty volume prior to execution of the +build step. Upon completion of the build, volumes and their contents +are discarded. +Using a named volume in only one step is not valid as it is +indicative of a build request with an incorrect configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the volume to mount. +Volume names must be unique per build step and must be valid names for +Docker volumes. Each named volume must be used by at least two build steps.`, + }, + "path": { + Type: schema.TypeString, + Required: true, + Description: `Path at which to mount the volume. +Paths must be absolute and cannot conflict with other volume paths on +the same build step or with certain reserved volume paths.`, + }, + }, + }, + }, + "wait_for": { + Type: schema.TypeList, + Optional: true, + Description: `The ID(s) of the step(s) that this build step depends on. +This build step will not start until all the build steps in 'wait_for' +have completed successfully. If 'wait_for' is empty, this build step +will start when all previous build steps in the 'Build.Steps' list +have completed successfully.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "artifacts": { + Type: schema.TypeList, + Optional: true, + Description: `Artifacts produced by the build that should be uploaded upon successful completion of all build steps.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "images": { + Type: schema.TypeList, + Optional: true, + Description: `A list of images to be pushed upon the successful completion of all build steps. +The images will be pushed using the builder service account's credentials. +The digests of the pushed images will be stored in the Build resource's results field. +If any of the images fail to be pushed, the build is marked FAILURE.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "objects": { + Type: schema.TypeList, + Optional: true, + Description: `A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. +Files in the workspace matching specified paths globs will be uploaded to the +Cloud Storage location using the builder service account's credentials. +The location and generation of the uploaded objects will be stored in the Build resource's results field. +If any objects fail to be pushed, the build is marked FAILURE.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Optional: true, + Description: `Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". +Files in the workspace matching any path pattern will be uploaded to Cloud Storage with +this location as a prefix.`, + }, + "paths": { + Type: schema.TypeList, + Optional: true, + Description: `Path globs used to match files in the build's workspace.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timing": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. Stores timing information for pushing all artifact objects.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: `End of time span. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to +nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: `Start of time span. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to +nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "available_secrets": { + Type: schema.TypeList, + Optional: true, + Description: `Secrets and secret environment variables.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_manager": { + Type: schema.TypeList, + Required: true, + Description: `Pairs a secret environment variable with a SecretVersion in Secret Manager.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "env": { + Type: schema.TypeString, + Required: true, + Description: `Environment variable name to associate with the secret. Secret environment +variables must be unique across all of a build's secrets, and must be used +by at least one build step.`, + }, + "version_name": { + Type: schema.TypeString, + Required: true, + Description: `Resource name of the SecretVersion. In format: projects/*/secrets/*/versions/*`, + }, + }, + }, + }, + }, + }, + }, + "images": { + Type: schema.TypeList, + Optional: true, + Description: `A list of images to be pushed upon the successful completion of all build steps. +The images are pushed using the builder service account's credentials. +The digests of the pushed images will be stored in the Build resource's results field. +If any of the images fail to be pushed, the build status is marked FAILURE.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "logs_bucket": { + Type: schema.TypeString, + Optional: true, + Description: `Google Cloud Storage bucket where logs should be written. +Logs file names will be of the format ${logsBucket}/log-${build_id}.txt.`, + }, + "options": { + Type: schema.TypeList, + Optional: true, + Description: `Special options for this build.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Description: `Requested disk size for the VM that runs the build. Note that this is NOT "disk free"; +some of the space will be used by the operating system and build utilities. +Also note that this is the minimum disk size that will be allocated for the build -- +the build may run with a larger disk than requested. At present, the maximum disk size +is 1000GB; builds that request more than the maximum are rejected with an error.`, + }, + "dynamic_substitutions": { + Type: schema.TypeBool, + Optional: true, + Description: `Option to specify whether or not to apply bash style string operations to the substitutions. +NOTE this is always enabled for triggered builds and cannot be overridden in the build configuration file.`, + }, + "env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of global environment variable definitions that will exist for all build steps +in this build. If a variable is defined in both globally and in a build step, +the variable will use the build step value. +The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE".`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "log_streaming_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF", ""}), + Description: `Option to define build log streaming behavior to Google Cloud Storage. Possible values: ["STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF"]`, + }, + "logging": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE", ""}), + Description: `Option to specify the logging mode, which determines if and where build logs are stored. Possible values: ["LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "CLOUD_LOGGING_ONLY", "NONE"]`, + }, + "machine_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32", ""}), + Description: `Compute Engine machine type on which to run the build. Possible values: ["UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32"]`, + }, + "requested_verify_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NOT_VERIFIED", "VERIFIED", ""}), + Description: `Requested verifiability options. Possible values: ["NOT_VERIFIED", "VERIFIED"]`, + }, + "secret_env": { + Type: schema.TypeList, + Optional: true, + Description: `A list of global environment variables, which are encrypted using a Cloud Key Management +Service crypto key. These values must be specified in the build's Secret. These variables +will be available to all build steps in this build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "source_provenance_hash": { + Type: schema.TypeList, + Optional: true, + Description: `Requested hash for SourceProvenance. Possible values: ["NONE", "SHA256", "MD5"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "SHA256", "MD5"}), + }, + }, + "substitution_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MUST_MATCH", "ALLOW_LOOSE", ""}), + Description: `Option to specify behavior when there is an error in the substitution checks. +NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden +in the build configuration file. Possible values: ["MUST_MATCH", "ALLOW_LOOSE"]`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `Global list of volumes to mount for ALL build steps +Each volume is created as an empty volume prior to starting the build process. +Upon completion of the build, volumes and their contents are discarded. Global +volume names and paths cannot conflict with the volumes defined a build step. +Using a global volume in a build with only one step is not valid as it is indicative +of a build request with an incorrect configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the volume to mount. +Volume names must be unique per build step and must be valid names for Docker volumes. +Each named volume must be used by at least two build steps.`, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path at which to mount the volume. +Paths must be absolute and cannot conflict with other volume paths on the same +build step or with certain reserved volume paths.`, + }, + }, + }, + }, + "worker_pool": { + Type: schema.TypeString, + Optional: true, + Description: `Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} +This field is experimental.`, + }, + }, + }, + }, + "queue_ttl": { + Type: schema.TypeString, + Optional: true, + Description: `TTL in queue for this build. If provided and the build is enqueued longer than this value, +the build will expire and the build status will be EXPIRED. +The TTL starts ticking from createTime. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "secret": { + Type: schema.TypeList, + Optional: true, + Description: `Secrets to decrypt using Cloud Key Management Service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `Cloud KMS key name to use to decrypt these envs.`, + }, + "secret_env": { + Type: schema.TypeMap, + Optional: true, + Description: `Map of environment variable name to its encrypted value. +Secret environment variables must be unique across all of a build's secrets, +and must be used by at least one build step. Values can be at most 64 KB in size. +There can be at most 100 secret values across all of a build's secrets.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "source": { + Type: schema.TypeList, + Optional: true, + Description: `The location of the source files to build. +One of 'storageSource' or 'repoSource' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repo_source": { + Type: schema.TypeList, + Optional: true, + Description: `Location of the source in a Google Cloud Source Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repo_name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Source Repository.`, + }, + "branch_name": { + Type: schema.TypeString, + Optional: true, + Description: `Regex matching branches to build. Exactly one a of branch name, tag, or commit SHA must be provided. +The syntax of the regular expressions accepted is the syntax accepted by RE2 and +described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + "commit_sha": { + Type: schema.TypeString, + Optional: true, + Description: `Explicit commit SHA to build. Exactly one a of branch name, tag, or commit SHA must be provided.`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Directory, relative to the source root, in which to run the build. +This must be a relative path. If a step's dir is specified and is an absolute path, +this value is ignored for that step's execution.`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Description: `ID of the project that owns the Cloud Source Repository. +If omitted, the project ID requesting the build is assumed.`, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions to use in a triggered build. Should only be used with triggers.run`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tag_name": { + Type: schema.TypeString, + Optional: true, + Description: `Regex matching tags to build. Exactly one a of branch name, tag, or commit SHA must be provided. +The syntax of the regular expressions accepted is the syntax accepted by RE2 and +described at https://github.com/google/re2/wiki/Syntax`, + ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, + }, + }, + }, + }, + "storage_source": { + Type: schema.TypeList, + Optional: true, + Description: `Location of the source in an archive file in Google Cloud Storage.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Google Cloud Storage bucket containing the source.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Google Cloud Storage object containing the source. +This object must be a gzipped archive file (.tar.gz) containing source to build.`, + }, + "generation": { + Type: schema.TypeString, + Optional: true, + Description: `Google Cloud Storage generation for the object. +If the generation is omitted, the latest generation will be used`, + }, + }, + }, + }, + }, + }, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions data for Build resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: `Tags for annotation of a Build. These are not docker tags.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Amount of time that this build should be allowed to run, to second granularity. +If this amount of time elapses, work on the build will cease and the build status will be TIMEOUT. +This timeout must be equal to or greater than the sum of the timeouts for build steps within the build. +The expected format is the number of seconds followed by s. +Default time is ten minutes (600s).`, + Default: "600s", + }, + }, + }, + ExactlyOneOf: []string{"filename", "build", "git_file_source"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Human-readable description of the trigger.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the trigger is disabled or not. If true, the trigger will never result in a build.`, + }, + "filename": { + Type: schema.TypeString, + Optional: true, + Description: `Path, from the source root, to a file whose contents is used for the template. +Either a filename or build template must be provided. Set this only when using trigger_template or github. +When using Pub/Sub, Webhook or Manual set the file name using git_file_source instead.`, + ExactlyOneOf: []string{"filename", "build", "git_file_source"}, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `A Common Expression Language string. Used only with Pub/Sub and Webhook.`, + }, + "git_file_source": { + Type: schema.TypeList, + Optional: true, + Description: `The file source describing the local or remote Build template.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `The path of the file, with the repo root as the root of the path.`, + }, + "repo_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"}), + Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). +Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"]`, + }, + "revision": { + Type: schema.TypeString, + Optional: true, + Description: `The branch, tag, arbitrary ref, or SHA version of the repo to use when resolving the +filename (optional). This field respects the same syntax/resolution as described here: https://git-scm.com/docs/gitrevisions +If unspecified, the revision from which the trigger invocation originated is assumed to be the revision from which to read the specified path.`, + }, + "uri": { + Type: schema.TypeString, + Optional: true, + Description: `The URI of the repo (optional). If unspecified, the repo from which the trigger +invocation originated is assumed to be the repo from which to read the specified path.`, + }, + }, + }, + ExactlyOneOf: []string{"filename", "git_file_source", "build"}, + }, + "github": { + Type: schema.TypeList, + Optional: true, + Description: `Describes the configuration of a trigger that creates a build whenever a GitHub event is received. +One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the repository. For example: The name for +https://github.com/googlecloudplatform/cloud-builders is "cloud-builders".`, + }, + "owner": { + Type: schema.TypeString, + Optional: true, + Description: `Owner of the repository. For example: The owner for +https://github.com/googlecloudplatform/cloud-builders is "googlecloudplatform".`, + }, + "pull_request": { + Type: schema.TypeList, + Optional: true, + Description: `filter to match changes in pull requests. Specify only one of 'pull_request' or 'push'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Required: true, + Description: `Regex of branches to match.`, + }, + "comment_control": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}), + Description: `Whether to block builds on a "/gcbrun" comment from a repository owner or collaborator. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, branches that do NOT match the git_ref will trigger a build.`, + }, + }, + }, + ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, + }, + "push": { + Type: schema.TypeList, + Optional: true, + Description: `filter to match changes in refs, like branches or tags. Specify only one of 'pull_request' or 'push'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of branches to match. Specify only one of branch or tag.`, + ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, only trigger a build if the revision regex does NOT match the git_ref regex.`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `Regex of tags to match. Specify only one of branch or tag.`, + ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, + }, + }, + }, + ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "ignored_files": { + Type: schema.TypeList, + Optional: true, + Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match +extended with support for '**'. +If ignoredFiles and changed files are both empty, then they are not +used to determine whether or not to trigger a build. +If ignoredFiles is not empty, then we ignore any files that match any +of the ignored_file globs. If the change has no files that are outside +of the ignoredFiles globs, then we do not trigger a build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "include_build_logs": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS", ""}), + Description: `Build logs will be sent back to GitHub as part of the checkrun +result. Values can be INCLUDE_BUILD_LOGS_UNSPECIFIED or +INCLUDE_BUILD_LOGS_WITH_STATUS Possible values: ["INCLUDE_BUILD_LOGS_UNSPECIFIED", "INCLUDE_BUILD_LOGS_WITH_STATUS"]`, + }, + "included_files": { + Type: schema.TypeList, + Optional: true, + Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match +extended with support for '**'. +If any of the files altered in the commit pass the ignoredFiles filter +and includedFiles is empty, then as far as this filter is concerned, we +should trigger the build. +If any of the files altered in the commit pass the ignoredFiles filter +and includedFiles is not empty, then we make sure that at least one of +those files matches a includedFiles glob. If not, then we do not trigger +a build.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Name of the trigger. Must be unique within the project.`, + }, + "pubsub_config": { + Type: schema.TypeList, + Optional: true, + Description: `PubsubConfig describes the configuration of a trigger that creates +a build whenever a Pub/Sub message is published. +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Required: true, + Description: `The name of the topic from which this subscription is receiving messages.`, + }, + "service_account_email": { + Type: schema.TypeString, + Optional: true, + Description: `Service account that will make the push request.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Potential issues with the underlying Pub/Sub subscription configuration. +Only populated on get requests.`, + }, + "subscription": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Name of the subscription.`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "service_account": { + Type: schema.TypeString, + Optional: true, + Description: `The service account used for all user-controlled operations including +triggers.patch, triggers.run, builds.create, and builds.cancel. +If no service account is set, then the standard Cloud Build service account +([PROJECT_NUM]@system.gserviceaccount.com) will be used instead. +Format: projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}`, + }, + "source_to_build": { + Type: schema.TypeList, + Optional: true, + Description: `The repo and ref of the repository from which to build. +This field is used only for those triggers that do not respond to SCM events. +Triggers that respond to such events build source at whatever commit caused the event. +This field is currently only used by Webhook, Pub/Sub, Manual, and Cron triggers. +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ref": { + Type: schema.TypeString, + Required: true, + Description: `The branch or tag to use. Must start with "refs/" (required).`, + }, + "repo_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"}), + Description: `The type of the repo, since it may not be explicit from the repo field (e.g from a URL). +Values can be UNKNOWN, CLOUD_SOURCE_REPOSITORIES, GITHUB, BITBUCKET Possible values: ["UNKNOWN", "CLOUD_SOURCE_REPOSITORIES", "GITHUB", "BITBUCKET"]`, + }, + "uri": { + Type: schema.TypeString, + Required: true, + Description: `The URI of the repo (required).`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "substitutions": { + Type: schema.TypeMap, + Optional: true, + Description: `Substitutions data for Build resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: `Tags for annotation of a BuildTrigger`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "trigger_template": { + Type: schema.TypeList, + Optional: true, + Description: `Template describing the types of source changes to trigger a build. +Branch and tag names in trigger templates are interpreted as regular +expressions. Any branch or tag change that matches that regular +expression will trigger a build. +One of 'trigger_template', 'github', 'pubsub_config', 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. +This field is a regular expression.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + "commit_sha": { + Type: schema.TypeString, + Optional: true, + Description: `Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + "dir": { + Type: schema.TypeString, + Optional: true, + Description: `Directory, relative to the source root, in which to run the build. +This must be a relative path. If a step's dir is specified and +is an absolute path, this value is ignored for that step's +execution.`, + }, + "invert_regex": { + Type: schema.TypeBool, + Optional: true, + Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `ID of the project that owns the Cloud Source Repository. If +omitted, the project ID requesting the build is assumed.`, + }, + "repo_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the Cloud Source Repository. If omitted, the name "default" is assumed.`, + Default: "default", + }, + "tag_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. +This field is a regular expression.`, + ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "webhook_config": { + Type: schema.TypeList, + Optional: true, + Description: `WebhookConfig describes the configuration of a trigger that creates +a build whenever a webhook is sent to a trigger's webhook URL. +One of 'trigger_template', 'github', 'pubsub_config' 'webhook_config' or 'source_to_build' must be provided.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `Resource name for the secret required as a URL parameter.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Potential issues with the underlying Pub/Sub subscription configuration. +Only populated on get requests.`, + }, + }, + }, + AtLeastOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config", "source_to_build"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time when the trigger was created.`, + }, + "trigger_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier for the trigger.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_trigger_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_trigger_sweeper.go new file mode 100644 index 0000000000..27e526c2a2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_trigger_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudbuild + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudBuildTrigger", testSweepCloudBuildTrigger) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudBuildTrigger(region string) error { + resourceName := "CloudBuildTrigger" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudbuild.googleapis.com/v1/projects/{{project}}/locations/{{location}}/triggers", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["triggers"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudbuild.googleapis.com/v1/projects/{{project}}/locations/{{location}}/triggers/{{trigger_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_worker_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_worker_pool.go similarity index 84% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_worker_pool.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_worker_pool.go index b43208979e..c7d2c6c2d1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudbuild_worker_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_worker_pool.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package cloudbuild import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" cloudbuild "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceCloudbuildWorkerPool() *schema.Resource { @@ -86,7 +93,7 @@ func ResourceCloudbuildWorkerPool() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -139,7 +146,7 @@ func CloudbuildWorkerPoolNetworkConfigSchema() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareResourceNames, + DiffSuppressFunc: tpgresource.CompareResourceNames, Description: "Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)", }, @@ -179,8 +186,8 @@ func CloudbuildWorkerPoolWorkerConfigSchema() *schema.Resource { } func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -188,7 +195,7 @@ func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{} obj := &cloudbuild.WorkerPool{ Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), DisplayName: dcl.String(d.Get("display_name").(string)), NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), Project: dcl.String(project), @@ -200,18 +207,18 @@ func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -233,8 +240,8 @@ func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{} } func resourceCloudbuildWorkerPoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -242,24 +249,24 @@ func resourceCloudbuildWorkerPoolRead(d *schema.ResourceData, meta interface{}) obj := &cloudbuild.WorkerPool{ Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), DisplayName: dcl.String(d.Get("display_name").(string)), NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), Project: dcl.String(project), WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -268,7 +275,7 @@ func resourceCloudbuildWorkerPoolRead(d *schema.ResourceData, meta interface{}) res, err := client.GetWorkerPool(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("CloudbuildWorkerPool %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("location", res.Location); err != nil { @@ -311,8 +318,8 @@ func resourceCloudbuildWorkerPoolRead(d *schema.ResourceData, meta interface{}) return nil } func resourceCloudbuildWorkerPoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -320,25 +327,25 @@ func resourceCloudbuildWorkerPoolUpdate(d *schema.ResourceData, meta interface{} obj := &cloudbuild.WorkerPool{ Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), DisplayName: dcl.String(d.Get("display_name").(string)), NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), Project: dcl.String(project), WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -360,8 +367,8 @@ func resourceCloudbuildWorkerPoolUpdate(d *schema.ResourceData, meta interface{} } func resourceCloudbuildWorkerPoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -369,7 +376,7 @@ func resourceCloudbuildWorkerPoolDelete(d *schema.ResourceData, meta interface{} obj := &cloudbuild.WorkerPool{ Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), DisplayName: dcl.String(d.Get("display_name").(string)), NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), Project: dcl.String(project), @@ -377,17 +384,17 @@ func resourceCloudbuildWorkerPoolDelete(d *schema.ResourceData, meta interface{} } log.Printf("[DEBUG] Deleting WorkerPool %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -402,9 +409,9 @@ func resourceCloudbuildWorkerPoolDelete(d *schema.ResourceData, meta interface{} } func resourceCloudbuildWorkerPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/workerPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -413,7 +420,7 @@ func resourceCloudbuildWorkerPoolImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workerPools/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workerPools/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go new file mode 100644 index 0000000000..b2f5ea2f89 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuild/resource_cloudbuild_worker_pool_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package cloudbuild + +import ( + "context" + "log" + "testing" + + cloudbuild "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudbuildWorkerPool", testSweepCloudbuildWorkerPool) +} + +func testSweepCloudbuildWorkerPool(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for CloudbuildWorkerPool") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLCloudbuildClient(config, config.UserAgent, "", 0) + err = client.DeleteAllWorkerPool(context.Background(), d["project"], d["location"], isDeletableCloudbuildWorkerPool) + if err != nil { + return err + } + return nil +} + +func isDeletableCloudbuildWorkerPool(r *cloudbuild.WorkerPool) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/iam_cloudbuildv2_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/iam_cloudbuildv2_connection.go new file mode 100644 index 0000000000..97ccbd4656 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/iam_cloudbuildv2_connection.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudbuildv2 + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var Cloudbuildv2ConnectionIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type Cloudbuildv2ConnectionIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func Cloudbuildv2ConnectionIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &Cloudbuildv2ConnectionIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func Cloudbuildv2ConnectionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &Cloudbuildv2ConnectionIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *Cloudbuildv2ConnectionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyConnectionUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *Cloudbuildv2ConnectionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyConnectionUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *Cloudbuildv2ConnectionIamUpdater) qualifyConnectionUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{Cloudbuildv2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *Cloudbuildv2ConnectionIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/connections/%s", u.project, u.location, u.name) +} + +func (u *Cloudbuildv2ConnectionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudbuildv2-connection-%s", u.GetResourceId()) +} + +func (u *Cloudbuildv2ConnectionIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudbuildv2 connection %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_connection.go new file mode 100644 index 0000000000..d786561f9b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_connection.go @@ -0,0 +1,894 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package cloudbuildv2 + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + cloudbuildv2 "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceCloudbuildv2Connection() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudbuildv2ConnectionCreate, + Read: resourceCloudbuildv2ConnectionRead, + Update: resourceCloudbuildv2ConnectionUpdate, + Delete: resourceCloudbuildv2ConnectionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudbuildv2ConnectionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Immutable. The resource name of the connection, in the format `projects/{project}/locations/{location}/connections/{connection_id}`.", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Allows clients to store small amounts of arbitrary data.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: "If disabled is set to true, functionality is disabled for this connection. Repository based API methods and webhooks processing for repositories in this connection will be disabled.", + }, + + "github_config": { + Type: schema.TypeList, + Optional: true, + Description: "Configuration for connections to github.com.", + MaxItems: 1, + Elem: Cloudbuildv2ConnectionGithubConfigSchema(), + ConflictsWith: []string{"github_enterprise_config", "gitlab_config"}, + }, + + "github_enterprise_config": { + Type: schema.TypeList, + Optional: true, + Description: "Configuration for connections to an instance of GitHub Enterprise.", + MaxItems: 1, + Elem: Cloudbuildv2ConnectionGithubEnterpriseConfigSchema(), + ConflictsWith: []string{"github_config", "gitlab_config"}, + }, + + "gitlab_config": { + Type: schema.TypeList, + Optional: true, + Description: "Configuration for connections to gitlab.com or an instance of GitLab Enterprise.", + MaxItems: 1, + Elem: Cloudbuildv2ConnectionGitlabConfigSchema(), + ConflictsWith: []string{"github_config", "github_enterprise_config"}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Server assigned timestamp for when the connection was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "installation_state": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Installation state of the Connection.", + Elem: Cloudbuildv2ConnectionInstallationStateSchema(), + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Set to true when the connection is being set up or updated in the background.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Server assigned timestamp for when the connection was updated.", + }, + }, + } +} + +func Cloudbuildv2ConnectionGithubConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "app_installation_id": { + Type: schema.TypeInt, + Optional: true, + Description: "GitHub App installation id.", + }, + + "authorizer_credential": { + Type: schema.TypeList, + Optional: true, + Description: "OAuth credential of the account that authorized the Cloud Build GitHub App. It is recommended to use a robot account instead of a human user account. The OAuth token must be tied to the Cloud Build GitHub App.", + MaxItems: 1, + Elem: Cloudbuildv2ConnectionGithubConfigAuthorizerCredentialSchema(), + }, + }, + } +} + +func Cloudbuildv2ConnectionGithubConfigAuthorizerCredentialSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oauth_token_secret_version": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "A SecretManager resource containing the OAuth token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.", + }, + + "username": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The username associated to this token.", + }, + }, + } +} + +func Cloudbuildv2ConnectionGithubEnterpriseConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host_uri": { + Type: schema.TypeString, + Required: true, + Description: "Required. The URI of the GitHub Enterprise host this connection is for.", + }, + + "app_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Id of the GitHub App created from the manifest.", + }, + + "app_installation_id": { + Type: schema.TypeInt, + Optional: true, + Description: "ID of the installation of the GitHub App.", + }, + + "app_slug": { + Type: schema.TypeString, + Optional: true, + Description: "The URL-friendly name of the GitHub App.", + }, + + "private_key_secret_version": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "SecretManager resource containing the private key of the GitHub App, formatted as `projects/*/secrets/*/versions/*`.", + }, + + "service_directory_config": { + Type: schema.TypeList, + Optional: true, + Description: "Configuration for using Service Directory to privately connect to a GitHub Enterprise server. This should only be set if the GitHub Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitHub Enterprise server will be made over the public internet.", + MaxItems: 1, + Elem: Cloudbuildv2ConnectionGithubEnterpriseConfigServiceDirectoryConfigSchema(), + }, + + "ssl_ca": { + Type: schema.TypeString, + Optional: true, + Description: "SSL certificate to use for requests to GitHub Enterprise.", + }, + + "webhook_secret_secret_version": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "SecretManager resource containing the webhook secret of the GitHub App, formatted as `projects/*/secrets/*/versions/*`.", + }, + }, + } +} + +func Cloudbuildv2ConnectionGithubEnterpriseConfigServiceDirectoryConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.", + }, + }, + } +} + +func Cloudbuildv2ConnectionGitlabConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorizer_credential": { + Type: schema.TypeList, + Required: true, + Description: "Required. A GitLab personal access token with the `api` scope access.", + MaxItems: 1, + Elem: Cloudbuildv2ConnectionGitlabConfigAuthorizerCredentialSchema(), + }, + + "read_authorizer_credential": { + Type: schema.TypeList, + Required: true, + Description: "Required. A GitLab personal access token with the minimum `read_api` scope access.", + MaxItems: 1, + Elem: Cloudbuildv2ConnectionGitlabConfigReadAuthorizerCredentialSchema(), + }, + + "webhook_secret_secret_version": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. Immutable. SecretManager resource containing the webhook secret of a GitLab Enterprise project, formatted as `projects/*/secrets/*/versions/*`.", + }, + + "host_uri": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "The URI of the GitLab Enterprise host this connection is for. If not specified, the default value is https://gitlab.com.", + }, + + "service_directory_config": { + Type: schema.TypeList, + Optional: true, + Description: "Configuration for using Service Directory to privately connect to a GitLab Enterprise server. This should only be set if the GitLab Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitLab Enterprise server will be made over the public internet.", + MaxItems: 1, + Elem: Cloudbuildv2ConnectionGitlabConfigServiceDirectoryConfigSchema(), + }, + + "ssl_ca": { + Type: schema.TypeString, + Optional: true, + Description: "SSL certificate to use for requests to GitLab Enterprise.", + }, + + "server_version": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Version of the GitLab Enterprise server running on the `host_uri`.", + }, + }, + } +} + +func Cloudbuildv2ConnectionGitlabConfigAuthorizerCredentialSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "user_token_secret_version": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.", + }, + + "username": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The username associated to this token.", + }, + }, + } +} + +func Cloudbuildv2ConnectionGitlabConfigReadAuthorizerCredentialSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "user_token_secret_version": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.", + }, + + "username": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The username associated to this token.", + }, + }, + } +} + +func Cloudbuildv2ConnectionGitlabConfigServiceDirectoryConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.", + }, + }, + } +} + +func Cloudbuildv2ConnectionInstallationStateSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_uri": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Link to follow for next action. Empty string if the installation is already complete.", + }, + + "message": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Message of what the user should do next to continue the installation. Empty string if the installation is already complete.", + }, + + "stage": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Current step of the installation process. Possible values: STAGE_UNSPECIFIED, PENDING_CREATE_APP, PENDING_USER_OAUTH, PENDING_INSTALL_APP, COMPLETE", + }, + }, + } +} + +func resourceCloudbuildv2ConnectionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuildv2.Connection{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + GithubConfig: expandCloudbuildv2ConnectionGithubConfig(d.Get("github_config")), + GithubEnterpriseConfig: expandCloudbuildv2ConnectionGithubEnterpriseConfig(d.Get("github_enterprise_config")), + GitlabConfig: expandCloudbuildv2ConnectionGitlabConfig(d.Get("gitlab_config")), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLCloudbuildv2Client(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyConnection(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Connection: %s", err) + } + + log.Printf("[DEBUG] Finished creating Connection %q: %#v", d.Id(), res) + + return resourceCloudbuildv2ConnectionRead(d, meta) +} + +func resourceCloudbuildv2ConnectionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuildv2.Connection{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + GithubConfig: expandCloudbuildv2ConnectionGithubConfig(d.Get("github_config")), + GithubEnterpriseConfig: expandCloudbuildv2ConnectionGithubEnterpriseConfig(d.Get("github_enterprise_config")), + GitlabConfig: expandCloudbuildv2ConnectionGitlabConfig(d.Get("gitlab_config")), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLCloudbuildv2Client(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetConnection(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("Cloudbuildv2Connection %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("disabled", res.Disabled); err != nil { + return fmt.Errorf("error setting disabled in state: %s", err) + } + if err = d.Set("github_config", flattenCloudbuildv2ConnectionGithubConfig(res.GithubConfig)); err != nil { + return fmt.Errorf("error setting github_config in state: %s", err) + } + if err = d.Set("github_enterprise_config", flattenCloudbuildv2ConnectionGithubEnterpriseConfig(res.GithubEnterpriseConfig)); err != nil { + return fmt.Errorf("error setting github_enterprise_config in state: %s", err) + } + if err = d.Set("gitlab_config", flattenCloudbuildv2ConnectionGitlabConfig(res.GitlabConfig)); err != nil { + return fmt.Errorf("error setting gitlab_config in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("installation_state", flattenCloudbuildv2ConnectionInstallationState(res.InstallationState)); err != nil { + return fmt.Errorf("error setting installation_state in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceCloudbuildv2ConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuildv2.Connection{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + GithubConfig: expandCloudbuildv2ConnectionGithubConfig(d.Get("github_config")), + GithubEnterpriseConfig: expandCloudbuildv2ConnectionGithubEnterpriseConfig(d.Get("github_enterprise_config")), + GitlabConfig: expandCloudbuildv2ConnectionGitlabConfig(d.Get("gitlab_config")), + Project: dcl.String(project), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLCloudbuildv2Client(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyConnection(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Connection: %s", err) + } + + log.Printf("[DEBUG] Finished creating Connection %q: %#v", d.Id(), res) + + return resourceCloudbuildv2ConnectionRead(d, meta) +} + +func resourceCloudbuildv2ConnectionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuildv2.Connection{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + GithubConfig: expandCloudbuildv2ConnectionGithubConfig(d.Get("github_config")), + GithubEnterpriseConfig: expandCloudbuildv2ConnectionGithubEnterpriseConfig(d.Get("github_enterprise_config")), + GitlabConfig: expandCloudbuildv2ConnectionGitlabConfig(d.Get("gitlab_config")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Connection %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLCloudbuildv2Client(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteConnection(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Connection: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Connection %q", d.Id()) + return nil +} + +func resourceCloudbuildv2ConnectionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/connections/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandCloudbuildv2ConnectionGithubConfig(o interface{}) *cloudbuildv2.ConnectionGithubConfig { + if o == nil { + return cloudbuildv2.EmptyConnectionGithubConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return cloudbuildv2.EmptyConnectionGithubConfig + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuildv2.ConnectionGithubConfig{ + AppInstallationId: dcl.Int64(int64(obj["app_installation_id"].(int))), + AuthorizerCredential: expandCloudbuildv2ConnectionGithubConfigAuthorizerCredential(obj["authorizer_credential"]), + } +} + +func flattenCloudbuildv2ConnectionGithubConfig(obj *cloudbuildv2.ConnectionGithubConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "app_installation_id": obj.AppInstallationId, + "authorizer_credential": flattenCloudbuildv2ConnectionGithubConfigAuthorizerCredential(obj.AuthorizerCredential), + } + + return []interface{}{transformed} + +} + +func expandCloudbuildv2ConnectionGithubConfigAuthorizerCredential(o interface{}) *cloudbuildv2.ConnectionGithubConfigAuthorizerCredential { + if o == nil { + return cloudbuildv2.EmptyConnectionGithubConfigAuthorizerCredential + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return cloudbuildv2.EmptyConnectionGithubConfigAuthorizerCredential + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuildv2.ConnectionGithubConfigAuthorizerCredential{ + OAuthTokenSecretVersion: dcl.String(obj["oauth_token_secret_version"].(string)), + } +} + +func flattenCloudbuildv2ConnectionGithubConfigAuthorizerCredential(obj *cloudbuildv2.ConnectionGithubConfigAuthorizerCredential) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "oauth_token_secret_version": obj.OAuthTokenSecretVersion, + "username": obj.Username, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildv2ConnectionGithubEnterpriseConfig(o interface{}) *cloudbuildv2.ConnectionGithubEnterpriseConfig { + if o == nil { + return cloudbuildv2.EmptyConnectionGithubEnterpriseConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return cloudbuildv2.EmptyConnectionGithubEnterpriseConfig + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuildv2.ConnectionGithubEnterpriseConfig{ + HostUri: dcl.String(obj["host_uri"].(string)), + AppId: dcl.Int64(int64(obj["app_id"].(int))), + AppInstallationId: dcl.Int64(int64(obj["app_installation_id"].(int))), + AppSlug: dcl.String(obj["app_slug"].(string)), + PrivateKeySecretVersion: dcl.String(obj["private_key_secret_version"].(string)), + ServiceDirectoryConfig: expandCloudbuildv2ConnectionGithubEnterpriseConfigServiceDirectoryConfig(obj["service_directory_config"]), + SslCa: dcl.String(obj["ssl_ca"].(string)), + WebhookSecretSecretVersion: dcl.String(obj["webhook_secret_secret_version"].(string)), + } +} + +func flattenCloudbuildv2ConnectionGithubEnterpriseConfig(obj *cloudbuildv2.ConnectionGithubEnterpriseConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "host_uri": obj.HostUri, + "app_id": obj.AppId, + "app_installation_id": obj.AppInstallationId, + "app_slug": obj.AppSlug, + "private_key_secret_version": obj.PrivateKeySecretVersion, + "service_directory_config": flattenCloudbuildv2ConnectionGithubEnterpriseConfigServiceDirectoryConfig(obj.ServiceDirectoryConfig), + "ssl_ca": obj.SslCa, + "webhook_secret_secret_version": obj.WebhookSecretSecretVersion, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildv2ConnectionGithubEnterpriseConfigServiceDirectoryConfig(o interface{}) *cloudbuildv2.ConnectionGithubEnterpriseConfigServiceDirectoryConfig { + if o == nil { + return cloudbuildv2.EmptyConnectionGithubEnterpriseConfigServiceDirectoryConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return cloudbuildv2.EmptyConnectionGithubEnterpriseConfigServiceDirectoryConfig + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuildv2.ConnectionGithubEnterpriseConfigServiceDirectoryConfig{ + Service: dcl.String(obj["service"].(string)), + } +} + +func flattenCloudbuildv2ConnectionGithubEnterpriseConfigServiceDirectoryConfig(obj *cloudbuildv2.ConnectionGithubEnterpriseConfigServiceDirectoryConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "service": obj.Service, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildv2ConnectionGitlabConfig(o interface{}) *cloudbuildv2.ConnectionGitlabConfig { + if o == nil { + return cloudbuildv2.EmptyConnectionGitlabConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return cloudbuildv2.EmptyConnectionGitlabConfig + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuildv2.ConnectionGitlabConfig{ + AuthorizerCredential: expandCloudbuildv2ConnectionGitlabConfigAuthorizerCredential(obj["authorizer_credential"]), + ReadAuthorizerCredential: expandCloudbuildv2ConnectionGitlabConfigReadAuthorizerCredential(obj["read_authorizer_credential"]), + WebhookSecretSecretVersion: dcl.String(obj["webhook_secret_secret_version"].(string)), + HostUri: dcl.StringOrNil(obj["host_uri"].(string)), + ServiceDirectoryConfig: expandCloudbuildv2ConnectionGitlabConfigServiceDirectoryConfig(obj["service_directory_config"]), + SslCa: dcl.String(obj["ssl_ca"].(string)), + } +} + +func flattenCloudbuildv2ConnectionGitlabConfig(obj *cloudbuildv2.ConnectionGitlabConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "authorizer_credential": flattenCloudbuildv2ConnectionGitlabConfigAuthorizerCredential(obj.AuthorizerCredential), + "read_authorizer_credential": flattenCloudbuildv2ConnectionGitlabConfigReadAuthorizerCredential(obj.ReadAuthorizerCredential), + "webhook_secret_secret_version": obj.WebhookSecretSecretVersion, + "host_uri": obj.HostUri, + "service_directory_config": flattenCloudbuildv2ConnectionGitlabConfigServiceDirectoryConfig(obj.ServiceDirectoryConfig), + "ssl_ca": obj.SslCa, + "server_version": obj.ServerVersion, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildv2ConnectionGitlabConfigAuthorizerCredential(o interface{}) *cloudbuildv2.ConnectionGitlabConfigAuthorizerCredential { + if o == nil { + return cloudbuildv2.EmptyConnectionGitlabConfigAuthorizerCredential + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return cloudbuildv2.EmptyConnectionGitlabConfigAuthorizerCredential + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuildv2.ConnectionGitlabConfigAuthorizerCredential{ + UserTokenSecretVersion: dcl.String(obj["user_token_secret_version"].(string)), + } +} + +func flattenCloudbuildv2ConnectionGitlabConfigAuthorizerCredential(obj *cloudbuildv2.ConnectionGitlabConfigAuthorizerCredential) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "user_token_secret_version": obj.UserTokenSecretVersion, + "username": obj.Username, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildv2ConnectionGitlabConfigReadAuthorizerCredential(o interface{}) *cloudbuildv2.ConnectionGitlabConfigReadAuthorizerCredential { + if o == nil { + return cloudbuildv2.EmptyConnectionGitlabConfigReadAuthorizerCredential + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return cloudbuildv2.EmptyConnectionGitlabConfigReadAuthorizerCredential + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuildv2.ConnectionGitlabConfigReadAuthorizerCredential{ + UserTokenSecretVersion: dcl.String(obj["user_token_secret_version"].(string)), + } +} + +func flattenCloudbuildv2ConnectionGitlabConfigReadAuthorizerCredential(obj *cloudbuildv2.ConnectionGitlabConfigReadAuthorizerCredential) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "user_token_secret_version": obj.UserTokenSecretVersion, + "username": obj.Username, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildv2ConnectionGitlabConfigServiceDirectoryConfig(o interface{}) *cloudbuildv2.ConnectionGitlabConfigServiceDirectoryConfig { + if o == nil { + return cloudbuildv2.EmptyConnectionGitlabConfigServiceDirectoryConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return cloudbuildv2.EmptyConnectionGitlabConfigServiceDirectoryConfig + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuildv2.ConnectionGitlabConfigServiceDirectoryConfig{ + Service: dcl.String(obj["service"].(string)), + } +} + +func flattenCloudbuildv2ConnectionGitlabConfigServiceDirectoryConfig(obj *cloudbuildv2.ConnectionGitlabConfigServiceDirectoryConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "service": obj.Service, + } + + return []interface{}{transformed} + +} + +func flattenCloudbuildv2ConnectionInstallationState(obj *cloudbuildv2.ConnectionInstallationState) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "action_uri": obj.ActionUri, + "message": obj.Message, + "stage": obj.Stage, + } + + return []interface{}{transformed} + +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_connection_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_connection_sweeper.go new file mode 100644 index 0000000000..6694cbaf9f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_connection_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package cloudbuildv2 + +import ( + "context" + "log" + "testing" + + cloudbuildv2 "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("Cloudbuildv2Connection", testSweepCloudbuildv2Connection) +} + +func testSweepCloudbuildv2Connection(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for Cloudbuildv2Connection") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLCloudbuildv2Client(config, config.UserAgent, "", 0) + err = client.DeleteAllConnection(context.Background(), d["project"], d["location"], isDeletableCloudbuildv2Connection) + if err != nil { + return err + } + return nil +} + +func isDeletableCloudbuildv2Connection(r *cloudbuildv2.Connection) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_repository.go new file mode 100644 index 0000000000..667dbda998 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2/resource_cloudbuildv2_repository.go @@ -0,0 +1,303 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package cloudbuildv2 + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + cloudbuildv2 "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceCloudbuildv2Repository() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudbuildv2RepositoryCreate, + Read: resourceCloudbuildv2RepositoryRead, + Delete: resourceCloudbuildv2RepositoryDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudbuildv2RepositoryImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the repository.", + }, + + "parent_connection": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The connection for the resource", + }, + + "remote_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Git Clone HTTPS URI.", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Allows clients to store small amounts of arbitrary data.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Server assigned timestamp for when the connection was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Server assigned timestamp for when the connection was updated.", + }, + }, + } +} + +func resourceCloudbuildv2RepositoryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuildv2.Repository{ + Name: dcl.String(d.Get("name").(string)), + Connection: dcl.String(d.Get("parent_connection").(string)), + RemoteUri: dcl.String(d.Get("remote_uri").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Location: dcl.StringOrNil(d.Get("location").(string)), + Project: dcl.String(project), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLCloudbuildv2Client(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyRepository(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Repository: %s", err) + } + + log.Printf("[DEBUG] Finished creating Repository %q: %#v", d.Id(), res) + + return resourceCloudbuildv2RepositoryRead(d, meta) +} + +func resourceCloudbuildv2RepositoryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuildv2.Repository{ + Name: dcl.String(d.Get("name").(string)), + Connection: dcl.String(d.Get("parent_connection").(string)), + RemoteUri: dcl.String(d.Get("remote_uri").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Location: dcl.StringOrNil(d.Get("location").(string)), + Project: dcl.String(project), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLCloudbuildv2Client(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetRepository(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("Cloudbuildv2Repository %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("parent_connection", res.Connection); err != nil { + return fmt.Errorf("error setting parent_connection in state: %s", err) + } + if err = d.Set("remote_uri", res.RemoteUri); err != nil { + return fmt.Errorf("error setting remote_uri in state: %s", err) + } + if err = d.Set("annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} + +func resourceCloudbuildv2RepositoryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuildv2.Repository{ + Name: dcl.String(d.Get("name").(string)), + Connection: dcl.String(d.Get("parent_connection").(string)), + RemoteUri: dcl.String(d.Get("remote_uri").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Location: dcl.StringOrNil(d.Get("location").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Repository %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLCloudbuildv2Client(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteRepository(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Repository: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Repository %q", d.Id()) + return nil +} + +func resourceCloudbuildv2RepositoryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/connections/(?P[^/]+)/repositories/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/connections/{{parent_connection}}/repositories/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go new file mode 100644 index 0000000000..aa72917931 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline.go @@ -0,0 +1,1320 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package clouddeploy + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceClouddeployDeliveryPipeline() *schema.Resource { + return &schema.Resource{ + Create: resourceClouddeployDeliveryPipelineCreate, + Read: resourceClouddeployDeliveryPipelineRead, + Update: resourceClouddeployDeliveryPipelineUpdate, + Delete: resourceClouddeployDeliveryPipelineDelete, + + Importer: &schema.ResourceImporter{ + State: resourceClouddeployDeliveryPipelineImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the `DeliveryPipeline`. Format is [a-z][a-z0-9\\-]{0,62}.", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "User annotations. These attributes can only be set and used by the user, and not by Google Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Description of the `DeliveryPipeline`. Max length is 255 characters.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Labels are attributes that can be set and used by both the user and by Google Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "serial_pipeline": { + Type: schema.TypeList, + Optional: true, + Description: "SerialPipeline defines a sequential set of stages for a `DeliveryPipeline`.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineSchema(), + }, + + "suspended": { + Type: schema.TypeBool, + Optional: true, + Description: "When suspended, no new releases or rollouts can be created, but in-progress ones will complete.", + }, + + "condition": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Information around the state of the Delivery Pipeline.", + Elem: ClouddeployDeliveryPipelineConditionSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the pipeline was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Unique identifier of the `DeliveryPipeline`.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Most recent time at which the pipeline was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "stages": { + Type: schema.TypeList, + Optional: true, + Description: "Each stage specifies configuration for a `Target`. The ordering of this list defines the promotion flow.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deploy_parameters": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The deploy parameters to use for the target in this stage.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersSchema(), + }, + + "profiles": { + Type: schema.TypeList, + Optional: true, + Description: "Skaffold profiles to use when rendering the manifest for this stage's `Target`.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "strategy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The strategy to use for a `Rollout` to this stage.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategySchema(), + }, + + "target_id": { + Type: schema.TypeString, + Optional: true, + Description: "The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeMap, + Required: true, + Description: "Required. Values are deploy parameters in key-value pairs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "match_target_labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Deploy parameters are applied to targets with match labels. If unspecified, deploy parameters are applied to all targets (including child targets of a multi-target).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "canary": { + Type: schema.TypeList, + Optional: true, + Description: "Canary deployment strategy provides progressive percentage based deployments to a Target.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanarySchema(), + }, + + "standard": { + Type: schema.TypeList, + Optional: true, + Description: "Standard deployment strategy executes a single deploy and allows verifying the deployment.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanarySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "canary_deployment": { + Type: schema.TypeList, + Optional: true, + Description: "Configures the progressive based deployment for a Target.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSchema(), + }, + + "custom_canary_deployment": { + Type: schema.TypeList, + Optional: true, + Description: "Configures the progressive based deployment for a Target, but allows customizing at the phase level where a phase represents each of the percentage deployments.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSchema(), + }, + + "runtime_config": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Runtime specific configurations for the deployment strategy. The runtime configuration is used to determine how Cloud Deploy will split traffic to enable a progressive deployment.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeploymentSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percentages": { + Type: schema.TypeList, + Required: true, + Description: "Required. The percentage based deployments that will occur as a part of a `Rollout`. List is expected in ascending order and each integer n is 0 <= n < 100.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to run verify tests after each percentage deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "phase_configs": { + Type: schema.TypeList, + Required: true, + Description: "Required. Configuration for each phase in the canary deployment in the order executed.", + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percentage": { + Type: schema.TypeInt, + Required: true, + Description: "Required. Percentage deployment for the phase.", + }, + + "phase_id": { + Type: schema.TypeString, + Required: true, + Description: "Required. The ID to assign to the `Rollout` phase. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", + }, + + "profiles": { + Type: schema.TypeList, + Optional: true, + Description: "Skaffold profiles to use when rendering the manifest for this phase. These are in addition to the profiles list specified in the `DeliveryPipeline` stage.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to run verify tests after the deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_run": { + Type: schema.TypeList, + Optional: true, + Description: "Cloud Run runtime configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSchema(), + }, + + "kubernetes": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes runtime configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRunSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automatic_traffic_control": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether Cloud Deploy should update the traffic stanza in a Cloud Run Service on the user's behalf to facilitate traffic splitting. This is required to be true for CanaryDeployments, but optional for CustomCanaryDeployments.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gateway_service_mesh": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes Gateway API service mesh configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSchema(), + }, + + "service_networking": { + Type: schema.TypeList, + Optional: true, + Description: "Kubernetes Service networking configuration.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified HTTPRoute and Service.", + }, + + "http_route": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Gateway API HTTPRoute.", + }, + + "service": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Service.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployment": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Deployment whose traffic is managed by the specified Service.", + }, + + "service": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Kubernetes Service.", + }, + + "disable_pod_overprovisioning": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Whether to disable Pod overprovisioning. If Pod overprovisioning is disabled then Cloud Deploy will limit the number of total Pods used for the deployment strategy to the number of Pods the Deployment has on the cluster.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to verify a deployment.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pipeline_ready_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details around the Pipeline's overall status.", + Elem: ClouddeployDeliveryPipelineConditionPipelineReadyConditionSchema(), + }, + + "targets_present_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details around targets enumerated in the pipeline.", + Elem: ClouddeployDeliveryPipelineConditionTargetsPresentConditionSchema(), + }, + + "targets_type_condition": { + Type: schema.TypeList, + Computed: true, + Description: "Details on the whether the targets enumerated in the pipeline are of the same type.", + Elem: ClouddeployDeliveryPipelineConditionTargetsTypeConditionSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionPipelineReadyConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if the Pipeline is in a valid state. Otherwise at least one condition in `PipelineCondition` is in an invalid state. Iterate over those conditions and see which condition(s) has status = false to find out what is wrong with the Pipeline.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last time the condition was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionTargetsPresentConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "missing_targets": { + Type: schema.TypeList, + Computed: true, + Description: "The list of Target names that are missing. For example, projects/{project_id}/locations/{location_name}/targets/{target_name}.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if there aren't any missing Targets.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Last time the condition was updated.", + }, + }, + } +} + +func ClouddeployDeliveryPipelineConditionTargetsTypeConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "error_details": { + Type: schema.TypeString, + Computed: true, + Description: "Human readable error message.", + }, + + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "True if the targets are all a comparable type. For example this is true if all targets are GKE clusters. This is false if some targets are Cloud Run targets and others are GKE clusters.", + }, + }, + } +} + +func resourceClouddeployDeliveryPipelineCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &clouddeploy.DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Labels: tpgresource.CheckStringMap(d.Get("labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished creating DeliveryPipeline %q: %#v", d.Id(), res) + + return resourceClouddeployDeliveryPipelineRead(d, meta) +} + +func resourceClouddeployDeliveryPipelineRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &clouddeploy.DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Labels: tpgresource.CheckStringMap(d.Get("labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetDeliveryPipeline(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ClouddeployDeliveryPipeline %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("serial_pipeline", flattenClouddeployDeliveryPipelineSerialPipeline(res.SerialPipeline)); err != nil { + return fmt.Errorf("error setting serial_pipeline in state: %s", err) + } + if err = d.Set("suspended", res.Suspended); err != nil { + return fmt.Errorf("error setting suspended in state: %s", err) + } + if err = d.Set("condition", flattenClouddeployDeliveryPipelineCondition(res.Condition)); err != nil { + return fmt.Errorf("error setting condition in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceClouddeployDeliveryPipelineUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &clouddeploy.DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Labels: tpgresource.CheckStringMap(d.Get("labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyDeliveryPipeline(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished creating DeliveryPipeline %q: %#v", d.Id(), res) + + return resourceClouddeployDeliveryPipelineRead(d, meta) +} + +func resourceClouddeployDeliveryPipelineDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + obj := &clouddeploy.DeliveryPipeline{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Labels: tpgresource.CheckStringMap(d.Get("labels")), + Project: dcl.String(project), + SerialPipeline: expandClouddeployDeliveryPipelineSerialPipeline(d.Get("serial_pipeline")), + Suspended: dcl.Bool(d.Get("suspended").(bool)), + } + + log.Printf("[DEBUG] Deleting DeliveryPipeline %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteDeliveryPipeline(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting DeliveryPipeline: %s", err) + } + + log.Printf("[DEBUG] Finished deleting DeliveryPipeline %q", d.Id()) + return nil +} + +func resourceClouddeployDeliveryPipelineImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/deliveryPipelines/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/deliveryPipelines/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandClouddeployDeliveryPipelineSerialPipeline(o interface{}) *clouddeploy.DeliveryPipelineSerialPipeline { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipeline + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipeline + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipeline{ + Stages: expandClouddeployDeliveryPipelineSerialPipelineStagesArray(obj["stages"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipeline(obj *clouddeploy.DeliveryPipelineSerialPipeline) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "stages": flattenClouddeployDeliveryPipelineSerialPipelineStagesArray(obj.Stages), + } + + return []interface{}{transformed} + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesArray(o interface{}) []clouddeploy.DeliveryPipelineSerialPipelineStages { + if o == nil { + return make([]clouddeploy.DeliveryPipelineSerialPipelineStages, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]clouddeploy.DeliveryPipelineSerialPipelineStages, 0) + } + + items := make([]clouddeploy.DeliveryPipelineSerialPipelineStages, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStages(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStages(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStages { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStages + } + + obj := o.(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStages{ + DeployParameters: expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(obj["deploy_parameters"]), + Profiles: tpgdclresource.ExpandStringArray(obj["profiles"]), + Strategy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj["strategy"]), + TargetId: dcl.String(obj["target_id"].(string)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesArray(objs []clouddeploy.DeliveryPipelineSerialPipelineStages) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStages(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStages(obj *clouddeploy.DeliveryPipelineSerialPipelineStages) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deploy_parameters": flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(obj.DeployParameters), + "profiles": obj.Profiles, + "strategy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj.Strategy), + "target_id": obj.TargetId, + } + + return transformed + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(o interface{}) []clouddeploy.DeliveryPipelineSerialPipelineStagesDeployParameters { + if o == nil { + return make([]clouddeploy.DeliveryPipelineSerialPipelineStagesDeployParameters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]clouddeploy.DeliveryPipelineSerialPipelineStagesDeployParameters, 0) + } + + items := make([]clouddeploy.DeliveryPipelineSerialPipelineStagesDeployParameters, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesDeployParameters { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesDeployParameters + } + + obj := o.(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesDeployParameters{ + Values: tpgresource.CheckStringMap(obj["values"]), + MatchTargetLabels: tpgresource.CheckStringMap(obj["match_target_labels"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParametersArray(objs []clouddeploy.DeliveryPipelineSerialPipelineStagesDeployParameters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesDeployParameters(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesDeployParameters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "values": obj.Values, + "match_target_labels": obj.MatchTargetLabels, + } + + return transformed + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategy(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategy { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategy{ + Canary: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj["canary"]), + Standard: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj["standard"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "canary": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj.Canary), + "standard": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj.Standard), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanary { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanary + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanary{ + CanaryDeployment: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj["canary_deployment"]), + CustomCanaryDeployment: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj["custom_canary_deployment"]), + RuntimeConfig: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj["runtime_config"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanary(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanary) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "canary_deployment": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj.CanaryDeployment), + "custom_canary_deployment": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj.CustomCanaryDeployment), + "runtime_config": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj.RuntimeConfig), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment{ + Percentages: tpgdclresource.ExpandIntegerArray(obj["percentages"]), + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCanaryDeployment) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "percentages": obj.Percentages, + "verify": obj.Verify, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment{ + PhaseConfigs: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(obj["phase_configs"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeployment) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "phase_configs": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(obj.PhaseConfigs), + } + + return []interface{}{transformed} + +} +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(o interface{}) []clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if o == nil { + return make([]clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0) + } + + items := make([]clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs, 0, len(objs)) + for _, item := range objs { + i := expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(item) + items = append(items, *i) + } + + return items +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs + } + + obj := o.(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs{ + Percentage: dcl.Int64(int64(obj["percentage"].(int))), + PhaseId: dcl.String(obj["phase_id"].(string)), + Profiles: tpgdclresource.ExpandStringArray(obj["profiles"]), + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigsArray(objs []clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(&item) + items = append(items, i) + } + + return items +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryCustomCanaryDeploymentPhaseConfigs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "percentage": obj.Percentage, + "phase_id": obj.PhaseId, + "profiles": obj.Profiles, + "verify": obj.Verify, + } + + return transformed + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig{ + CloudRun: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj["cloud_run"]), + Kubernetes: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj["kubernetes"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cloud_run": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj.CloudRun), + "kubernetes": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj.Kubernetes), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun{ + AutomaticTrafficControl: dcl.Bool(obj["automatic_traffic_control"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigCloudRun) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "automatic_traffic_control": obj.AutomaticTrafficControl, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes{ + GatewayServiceMesh: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj["gateway_service_mesh"]), + ServiceNetworking: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj["service_networking"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetes) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gateway_service_mesh": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj.GatewayServiceMesh), + "service_networking": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj.ServiceNetworking), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh{ + Deployment: dcl.String(obj["deployment"].(string)), + HttpRoute: dcl.String(obj["http_route"].(string)), + Service: dcl.String(obj["service"].(string)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deployment": obj.Deployment, + "http_route": obj.HttpRoute, + "service": obj.Service, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking{ + Deployment: dcl.String(obj["deployment"].(string)), + Service: dcl.String(obj["service"].(string)), + DisablePodOverprovisioning: dcl.Bool(obj["disable_pod_overprovisioning"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyCanaryRuntimeConfigKubernetesServiceNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "deployment": obj.Deployment, + "service": obj.Service, + "disable_pod_overprovisioning": obj.DisablePodOverprovisioning, + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyStandard { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyStandard{ + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyStandard) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "verify": obj.Verify, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineCondition(obj *clouddeploy.DeliveryPipelineCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pipeline_ready_condition": flattenClouddeployDeliveryPipelineConditionPipelineReadyCondition(obj.PipelineReadyCondition), + "targets_present_condition": flattenClouddeployDeliveryPipelineConditionTargetsPresentCondition(obj.TargetsPresentCondition), + "targets_type_condition": flattenClouddeployDeliveryPipelineConditionTargetsTypeCondition(obj.TargetsTypeCondition), + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionPipelineReadyCondition(obj *clouddeploy.DeliveryPipelineConditionPipelineReadyCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "status": obj.Status, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionTargetsPresentCondition(obj *clouddeploy.DeliveryPipelineConditionTargetsPresentCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "missing_targets": obj.MissingTargets, + "status": obj.Status, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} + +func flattenClouddeployDeliveryPipelineConditionTargetsTypeCondition(obj *clouddeploy.DeliveryPipelineConditionTargetsTypeCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "error_details": obj.ErrorDetails, + "status": obj.Status, + } + + return []interface{}{transformed} + +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go new file mode 100644 index 0000000000..b1fa63a592 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_delivery_pipeline_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package clouddeploy + +import ( + "context" + "log" + "testing" + + clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ClouddeployDeliveryPipeline", testSweepClouddeployDeliveryPipeline) +} + +func testSweepClouddeployDeliveryPipeline(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ClouddeployDeliveryPipeline") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLClouddeployClient(config, config.UserAgent, "", 0) + err = client.DeleteAllDeliveryPipeline(context.Background(), d["project"], d["location"], isDeletableClouddeployDeliveryPipeline) + if err != nil { + return err + } + return nil +} + +func isDeletableClouddeployDeliveryPipeline(r *clouddeploy.DeliveryPipeline) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_clouddeploy_target.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_clouddeploy_target.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target.go index 66368b5cd9..b6074a3eeb 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_clouddeploy_target.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package clouddeploy import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceClouddeployTarget() *schema.Resource { @@ -72,7 +79,14 @@ func ResourceClouddeployTarget() *schema.Resource { Description: "Information specifying an Anthos Cluster.", MaxItems: 1, Elem: ClouddeployTargetAnthosClusterSchema(), - ConflictsWith: []string{"gke"}, + ConflictsWith: []string{"gke", "run"}, + }, + + "deploy_parameters": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. The deploy parameters to use for this target.", + Elem: &schema.Schema{Type: schema.TypeString}, }, "description": { @@ -95,7 +109,7 @@ func ResourceClouddeployTarget() *schema.Resource { Description: "Information specifying a GKE Cluster.", MaxItems: 1, Elem: ClouddeployTargetGkeSchema(), - ConflictsWith: []string{"anthos_cluster"}, + ConflictsWith: []string{"anthos_cluster", "run"}, }, "labels": { @@ -110,7 +124,7 @@ func ResourceClouddeployTarget() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -120,6 +134,15 @@ func ResourceClouddeployTarget() *schema.Resource { Description: "Optional. Whether or not the `Target` requires approval.", }, + "run": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying a Cloud Run deployment target.", + MaxItems: 1, + Elem: ClouddeployTargetRunSchema(), + ConflictsWith: []string{"gke", "anthos_cluster"}, + }, + "create_time": { Type: schema.TypeString, Computed: true, @@ -159,7 +182,7 @@ func ClouddeployTargetAnthosClusterSchema() *schema.Resource { "membership": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Membership of the GKE Hub-registered cluster to which to apply the Skaffold configuration. Format is `projects/{project}/locations/{location}/memberships/{membership_name}`.", }, }, @@ -200,7 +223,7 @@ func ClouddeployTargetExecutionConfigsSchema() *schema.Resource { "worker_pool": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The resource name of the `WorkerPool`, with the format `projects/{project}/locations/{location}/workerPools/{worker_pool}`. If this optional field is unspecified, the default Cloud Build pool will be used.", }, }, @@ -213,7 +236,7 @@ func ClouddeployTargetGkeSchema() *schema.Resource { "cluster": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Information specifying a GKE Cluster. Format is `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}.", }, @@ -226,9 +249,21 @@ func ClouddeployTargetGkeSchema() *schema.Resource { } } +func ClouddeployTargetRunSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + Description: "Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`.", + }, + }, + } +} + func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -236,14 +271,16 @@ func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) e obj := &clouddeploy.Target{ Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), Description: dcl.String(d.Get("description").(string)), ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), Gke: expandClouddeployTargetGke(d.Get("gke")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), } id, err := obj.ID() @@ -251,18 +288,18 @@ func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -284,8 +321,8 @@ func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) e } func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -293,27 +330,29 @@ func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) err obj := &clouddeploy.Target{ Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), Description: dcl.String(d.Get("description").(string)), ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), Gke: expandClouddeployTargetGke(d.Get("gke")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -322,7 +361,7 @@ func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) err res, err := client.GetTarget(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ClouddeployTarget %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("location", res.Location); err != nil { @@ -337,6 +376,9 @@ func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) err if err = d.Set("anthos_cluster", flattenClouddeployTargetAnthosCluster(res.AnthosCluster)); err != nil { return fmt.Errorf("error setting anthos_cluster in state: %s", err) } + if err = d.Set("deploy_parameters", res.DeployParameters); err != nil { + return fmt.Errorf("error setting deploy_parameters in state: %s", err) + } if err = d.Set("description", res.Description); err != nil { return fmt.Errorf("error setting description in state: %s", err) } @@ -355,6 +397,9 @@ func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) err if err = d.Set("require_approval", res.RequireApproval); err != nil { return fmt.Errorf("error setting require_approval in state: %s", err) } + if err = d.Set("run", flattenClouddeployTargetRun(res.Run)); err != nil { + return fmt.Errorf("error setting run in state: %s", err) + } if err = d.Set("create_time", res.CreateTime); err != nil { return fmt.Errorf("error setting create_time in state: %s", err) } @@ -374,8 +419,8 @@ func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) err return nil } func resourceClouddeployTargetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -383,28 +428,30 @@ func resourceClouddeployTargetUpdate(d *schema.ResourceData, meta interface{}) e obj := &clouddeploy.Target{ Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), Description: dcl.String(d.Get("description").(string)), ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), Gke: expandClouddeployTargetGke(d.Get("gke")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -426,8 +473,8 @@ func resourceClouddeployTargetUpdate(d *schema.ResourceData, meta interface{}) e } func resourceClouddeployTargetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -435,28 +482,30 @@ func resourceClouddeployTargetDelete(d *schema.ResourceData, meta interface{}) e obj := &clouddeploy.Target{ Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AnthosCluster: expandClouddeployTargetAnthosCluster(d.Get("anthos_cluster")), + DeployParameters: tpgresource.CheckStringMap(d.Get("deploy_parameters")), Description: dcl.String(d.Get("description").(string)), ExecutionConfigs: expandClouddeployTargetExecutionConfigsArray(d.Get("execution_configs")), Gke: expandClouddeployTargetGke(d.Get("gke")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), } log.Printf("[DEBUG] Deleting Target %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLClouddeployClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -471,9 +520,9 @@ func resourceClouddeployTargetDelete(d *schema.ResourceData, meta interface{}) e } func resourceClouddeployTargetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/targets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -482,7 +531,7 @@ func resourceClouddeployTargetImport(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/targets/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/targets/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -606,6 +655,32 @@ func flattenClouddeployTargetGke(obj *clouddeploy.TargetGke) interface{} { return []interface{}{transformed} +} + +func expandClouddeployTargetRun(o interface{}) *clouddeploy.TargetRun { + if o == nil { + return clouddeploy.EmptyTargetRun + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyTargetRun + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.TargetRun{ + Location: dcl.String(obj["location"].(string)), + } +} + +func flattenClouddeployTargetRun(obj *clouddeploy.TargetRun) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "location": obj.Location, + } + + return []interface{}{transformed} + } func flattenClouddeployTargetExecutionConfigsUsagesArray(obj []clouddeploy.TargetExecutionConfigsUsagesEnum) interface{} { if obj == nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target_sweeper.go new file mode 100644 index 0000000000..f1ab2d2f23 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/clouddeploy/resource_clouddeploy_target_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package clouddeploy + +import ( + "context" + "log" + "testing" + + clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ClouddeployTarget", testSweepClouddeployTarget) +} + +func testSweepClouddeployTarget(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ClouddeployTarget") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLClouddeployClient(config, config.UserAgent, "", 0) + err = client.DeleteAllTarget(context.Background(), d["project"], d["location"], isDeletableClouddeployTarget) + if err != nil { + return err + } + return nil +} + +func isDeletableClouddeployTarget(r *clouddeploy.Target) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/cloudfunctions_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/cloudfunctions_operation.go new file mode 100644 index 0000000000..ead74c81f3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/cloudfunctions_operation.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudfunctions + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudfunctions/v1" +) + +type CloudFunctionsOperationWaiter struct { + Service *cloudfunctions.Service + tpgresource.CommonOperationWaiter +} + +func (w *CloudFunctionsOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + return w.Service.Operations.Get(w.Op.Name).Do() +} + +func CloudFunctionsOperationWait(config *transport_tpg.Config, op *cloudfunctions.Operation, activity, userAgent string, timeout time.Duration) error { + w := &CloudFunctionsOperationWaiter{ + Service: config.NewCloudFunctionsClient(userAgent), + } + if err := w.SetOp(op); err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} + +func IsCloudFunctionsSourceCodeError(err error) (bool, string) { + if operr, ok := err.(*tpgresource.CommonOpError); ok { + if operr.Code == 3 && operr.Message == "Failed to retrieve function source code" { + return true, fmt.Sprintf("Retry on Function failing to pull code from GCS") + } + } + return false, "" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/data_source_google_cloudfunctions_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/data_source_google_cloudfunctions_function.go new file mode 100644 index 0000000000..9414e533fb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/data_source_google_cloudfunctions_function.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudfunctions + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleCloudFunctionsFunction() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCloudFunctionsFunction().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleCloudFunctionsFunctionRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleCloudFunctionsFunctionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + cloudFuncId := &CloudFunctionId{ + Project: project, + Region: region, + Name: d.Get("name").(string), + } + + d.SetId(cloudFuncId.CloudFunctionId()) + + err = resourceCloudFunctionsRead(d, meta) + if err != nil { + return err + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/iam_cloudfunctions_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/iam_cloudfunctions_function.go new file mode 100644 index 0000000000..62736ef7aa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/iam_cloudfunctions_function.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudfunctions + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var CloudFunctionsCloudFunctionIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "cloud_function": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type CloudFunctionsCloudFunctionIamUpdater struct { + project string + region string + cloudFunction string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func CloudFunctionsCloudFunctionIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("cloud_function"); ok { + values["cloud_function"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("cloud_function").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudFunctionsCloudFunctionIamUpdater{ + project: values["project"], + region: values["region"], + cloudFunction: values["cloud_function"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("cloud_function", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting cloud_function: %s", err) + } + + return u, nil +} + +func CloudFunctionsCloudFunctionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudFunctionsCloudFunctionIamUpdater{ + project: values["project"], + region: values["region"], + cloudFunction: values["cloud_function"], + d: d, + Config: config, + } + if err := d.Set("cloud_function", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting cloud_function: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudFunctionsCloudFunctionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyCloudFunctionUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudFunctionsCloudFunctionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyCloudFunctionUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudFunctionsCloudFunctionIamUpdater) qualifyCloudFunctionUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudFunctionsBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.region, u.cloudFunction), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudFunctionsCloudFunctionIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.region, u.cloudFunction) +} + +func (u *CloudFunctionsCloudFunctionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudfunctions-cloudfunction-%s", u.GetResourceId()) +} + +func (u *CloudFunctionsCloudFunctionIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudfunctions cloudfunction %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudfunctions_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudfunctions_function.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function.go index 3edf0f2635..6cc6808d2e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudfunctions_function.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function.go @@ -1,10 +1,17 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudfunctions import ( "regexp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/cloudfunctions/v1" "fmt" @@ -26,13 +33,13 @@ var allowedVpcConnectorEgressSettings = []string{ "PRIVATE_RANGES_ONLY", } -type cloudFunctionId struct { +type CloudFunctionId struct { Project string Region string Name string } -func (s *cloudFunctionId) cloudFunctionId() string { +func (s *CloudFunctionId) CloudFunctionId() string { return fmt.Sprintf("projects/%s/locations/%s/functions/%s", s.Project, s.Region, s.Name) } @@ -53,19 +60,19 @@ func labelKeyValidator(val interface{}, key string) (warns []string, errs []erro return } -func (s *cloudFunctionId) locationId() string { +func (s *CloudFunctionId) locationId() string { return fmt.Sprintf("projects/%s/locations/%s", s.Project, s.Region) } -func parseCloudFunctionId(d *schema.ResourceData, config *Config) (*cloudFunctionId, error) { - if err := parseImportId([]string{ +func parseCloudFunctionId(d *schema.ResourceData, config *transport_tpg.Config) (*CloudFunctionId, error) { + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", }, d, config); err != nil { return nil, err } - return &cloudFunctionId{ + return &CloudFunctionId{ Project: d.Get("project").(string), Region: d.Get("region").(string), Name: d.Get("name").(string), @@ -76,7 +83,7 @@ func parseCloudFunctionId(d *schema.ResourceData, config *Config) (*cloudFunctio // at start/end func validateResourceCloudFunctionsFunctionName(v interface{}, k string) (ws []string, errors []error) { re := `^(?:[a-zA-Z](?:[-_a-zA-Z0-9]{0,61}[a-zA-Z0-9])?)$` - return validateRegexp(re)(v, k) + return verify.ValidateRegexp(re)(v, k) } func partsCompare(a, b, reg string) bool { @@ -109,7 +116,7 @@ func partsCompare(a, b, reg string) bool { return true } -// based on compareSelfLinkOrResourceName, but less reusable and allows multi-/ +// based on CompareSelfLinkOrResourceName, but less reusable and allows multi-/ // strings in the new state (config) part func compareSelfLinkOrResourceNameWithMultipleParts(_, old, new string, _ *schema.ResourceData) bool { // two formats based on expandEventTrigger() @@ -272,7 +279,7 @@ func ResourceCloudFunctionsFunction() *schema.Resource { "vpc_connector": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The VPC Network Connector that this cloud function can connect to. It can be either the fully-qualified URI, or the short name of the network connector resource. The format of this field is projects/*/locations/*/connectors/*.`, }, @@ -456,36 +463,41 @@ func ResourceCloudFunctionsFunction() *schema.Resource { }, }, }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `Describes the current stage of a deployment.`, + }, }, UseJSONNumber: true, } } func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - cloudFuncId := &cloudFunctionId{ + cloudFuncId := &CloudFunctionId{ Project: project, Region: region, Name: d.Get("name").(string), } function := &cloudfunctions.CloudFunction{ - Name: cloudFuncId.cloudFunctionId(), + Name: cloudFuncId.CloudFunctionId(), Runtime: d.Get("runtime").(string), ServiceAccountEmail: d.Get("service_account_email").(string), ForceSendFields: []string{}, @@ -549,15 +561,15 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro } if _, ok := d.GetOk("labels"); ok { - function.Labels = expandLabels(d) + function.Labels = tpgresource.ExpandLabels(d) } if _, ok := d.GetOk("environment_variables"); ok { - function.EnvironmentVariables = expandEnvironmentVariables(d) + function.EnvironmentVariables = tpgresource.ExpandEnvironmentVariables(d) } if _, ok := d.GetOk("build_environment_variables"); ok { - function.BuildEnvironmentVariables = expandBuildEnvironmentVariables(d) + function.BuildEnvironmentVariables = tpgresource.ExpandBuildEnvironmentVariables(d) } if v, ok := d.GetOk("vpc_connector"); ok { @@ -593,19 +605,23 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro // We retry the whole create-and-wait because Cloud Functions // will sometimes fail a creation operation entirely if it fails to pull // source code and we need to try the whole creation again. - rerr := RetryTimeDuration(func() error { - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Create( - cloudFuncId.locationId(), function).Do() - if err != nil { - return err - } + rerr := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Create( + cloudFuncId.locationId(), function).Do() + if err != nil { + return err + } - // Name of function should be unique - d.SetId(cloudFuncId.cloudFunctionId()) + // Name of function should be unique + d.SetId(cloudFuncId.CloudFunctionId()) - return cloudFunctionsOperationWait(config, op, "Creating CloudFunctions Function", userAgent, - d.Timeout(schema.TimeoutCreate)) - }, d.Timeout(schema.TimeoutCreate), isCloudFunctionsSourceCodeError) + return CloudFunctionsOperationWait(config, op, "Creating CloudFunctions Function", userAgent, + d.Timeout(schema.TimeoutCreate)) + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{IsCloudFunctionsSourceCodeError}, + }) if rerr != nil { return rerr } @@ -614,8 +630,8 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro } func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -625,9 +641,9 @@ func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error return err } - function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() + function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.CloudFunctionId()).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) } if err := d.Set("name", cloudFuncId.Name); err != nil { @@ -702,6 +718,10 @@ func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error setting secret_volumes: %s", err) } + if err := d.Set("status", function.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + if function.HttpsTrigger != nil { if err := d.Set("trigger_http", true); err != nil { return fmt.Errorf("Error setting trigger_http: %s", err) @@ -744,13 +764,13 @@ func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG]: Updating google_cloudfunctions_function") - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -761,9 +781,9 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro } // The full function needs to supplied in the PATCH call to evaluate some Organization Policies. https://github.com/hashicorp/terraform-provider-google/issues/6603 - function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() + function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.CloudFunctionId()).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) } // The full function may contain a reference to manually uploaded code if the function was imported from gcloud @@ -822,7 +842,7 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro } if d.HasChange("labels") { - function.Labels = expandLabels(d) + function.Labels = tpgresource.ExpandLabels(d) updateMaskArr = append(updateMaskArr, "labels") } @@ -832,12 +852,12 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro } if d.HasChange("environment_variables") { - function.EnvironmentVariables = expandEnvironmentVariables(d) + function.EnvironmentVariables = tpgresource.ExpandEnvironmentVariables(d) updateMaskArr = append(updateMaskArr, "environmentVariables") } if d.HasChange("build_environment_variables") { - function.BuildEnvironmentVariables = expandBuildEnvironmentVariables(d) + function.BuildEnvironmentVariables = tpgresource.ExpandBuildEnvironmentVariables(d) updateMaskArr = append(updateMaskArr, "buildEnvironmentVariables") } @@ -892,16 +912,19 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro if len(updateMaskArr) > 0 { log.Printf("[DEBUG] Send Patch CloudFunction Configuration request: %#v", function) updateMask := strings.Join(updateMaskArr, ",") - rerr := RetryTimeDuration(func() error { - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Patch(function.Name, function). - UpdateMask(updateMask).Do() - if err != nil { - return err - } - - return cloudFunctionsOperationWait(config, op, "Updating CloudFunctions Function", userAgent, - d.Timeout(schema.TimeoutUpdate)) - }, d.Timeout(schema.TimeoutUpdate)) + rerr := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Patch(function.Name, function). + UpdateMask(updateMask).Do() + if err != nil { + return err + } + + return CloudFunctionsOperationWait(config, op, "Updating CloudFunctions Function", userAgent, + d.Timeout(schema.TimeoutUpdate)) + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if rerr != nil { return fmt.Errorf("Error while updating cloudfunction configuration: %s", rerr) } @@ -912,8 +935,8 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro } func resourceCloudFunctionsDestroy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -923,11 +946,11 @@ func resourceCloudFunctionsDestroy(d *schema.ResourceData, meta interface{}) err return err } - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Delete(cloudFuncId.cloudFunctionId()).Do() + op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Delete(cloudFuncId.CloudFunctionId()).Do() if err != nil { return err } - err = cloudFunctionsOperationWait(config, op, "Deleting CloudFunctions Function", userAgent, + err = CloudFunctionsOperationWait(config, op, "Deleting CloudFunctions Function", userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { return err diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function_sweeper.go new file mode 100644 index 0000000000..45db50cf35 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions/resource_cloudfunctions_function_sweeper.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudfunctions + +import ( + "fmt" + "log" + "os" + "strings" + + "io/ioutil" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +const testFunctionsSourceArchivePrefix = "cloudfunczip" + +func init() { + sweeper.AddTestSweepers("gcp_cloud_function_source_archive", sweepCloudFunctionSourceZipArchives) +} + +func sweepCloudFunctionSourceZipArchives(_ string) error { + files, err := ioutil.ReadDir(os.TempDir()) + if err != nil { + log.Printf("Error reading files: %s", err) + return nil + } + for _, f := range files { + if f.IsDir() { + continue + } + if strings.HasPrefix(f.Name(), testFunctionsSourceArchivePrefix) { + filepath := fmt.Sprintf("%s/%s", os.TempDir(), f.Name()) + if err := os.Remove(filepath); err != nil { + log.Printf("Error removing files: %s", err) + return nil + } + log.Printf("[INFO] cloud functions sweeper removed old file %s", filepath) + } + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/cloudfunctions2_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/cloudfunctions2_operation.go new file mode 100644 index 0000000000..a6c758ac63 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/cloudfunctions2_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudfunctions2 + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type Cloudfunctions2OperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *Cloudfunctions2OperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.Cloudfunctions2BasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createCloudfunctions2Waiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*Cloudfunctions2OperationWaiter, error) { + w := &Cloudfunctions2OperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func Cloudfunctions2OperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createCloudfunctions2Waiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func Cloudfunctions2OperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createCloudfunctions2Waiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/data_source_google_cloudfunctions2_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/data_source_google_cloudfunctions2_function.go new file mode 100644 index 0000000000..c604aa179c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/data_source_google_cloudfunctions2_function.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudfunctions2 + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleCloudFunctions2Function() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCloudfunctions2function().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name", "location") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleCloudFunctions2FunctionRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleCloudFunctions2FunctionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/locations/%s/functions/%s", project, d.Get("location").(string), d.Get("name").(string))) + + err = resourceCloudfunctions2functionRead(d, meta) + if err != nil { + return err + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/iam_cloudfunctions2_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/iam_cloudfunctions2_function.go new file mode 100644 index 0000000000..725285142a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/iam_cloudfunctions2_function.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudfunctions2 + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var Cloudfunctions2functionIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "cloud_function": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type Cloudfunctions2functionIamUpdater struct { + project string + location string + cloudFunction string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func Cloudfunctions2functionIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("cloud_function"); ok { + values["cloud_function"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("cloud_function").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &Cloudfunctions2functionIamUpdater{ + project: values["project"], + location: values["location"], + cloudFunction: values["cloud_function"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("cloud_function", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting cloud_function: %s", err) + } + + return u, nil +} + +func Cloudfunctions2functionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &Cloudfunctions2functionIamUpdater{ + project: values["project"], + location: values["location"], + cloudFunction: values["cloud_function"], + d: d, + Config: config, + } + if err := d.Set("cloud_function", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting cloud_function: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *Cloudfunctions2functionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyfunctionUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *Cloudfunctions2functionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyfunctionUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *Cloudfunctions2functionIamUpdater) qualifyfunctionUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{Cloudfunctions2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.location, u.cloudFunction), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *Cloudfunctions2functionIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.location, u.cloudFunction) +} + +func (u *Cloudfunctions2functionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudfunctions2-function-%s", u.GetResourceId()) +} + +func (u *Cloudfunctions2functionIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudfunctions2 function %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudfunctions2_function.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudfunctions2_function.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function.go index 08d0b4ea71..9d9b38f5ac 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloudfunctions2_function.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package cloudfunctions2 import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceCloudfunctions2function() *schema.Resource { @@ -118,14 +125,14 @@ function, optional when updating an existing function.`, "invert_regex": { Type: schema.TypeBool, Optional: true, - Description: `Only trigger a build if the revision regex does + Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, }, "project_id": { Type: schema.TypeString, Optional: true, ForceNew: true, - Description: `ID of the project that owns the Cloud Source Repository. If omitted, the + Description: `ID of the project that owns the Cloud Source Repository. If omitted, the project ID requesting the build is assumed.`, }, "repo_name": { @@ -158,7 +165,7 @@ project ID requesting the build is assumed.`, "generation": { Type: schema.TypeInt, Optional: true, - Description: `Google Cloud Storage generation for the object. If the generation + Description: `Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.`, }, "object": { @@ -222,7 +229,7 @@ as the transport topic for the event delivery.`, "retry_policy": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"RETRY_POLICY_UNSPECIFIED", "RETRY_POLICY_DO_NOT_RETRY", "RETRY_POLICY_RETRY", ""}), + ValidateFunc: verify.ValidateEnum([]string{"RETRY_POLICY_UNSPECIFIED", "RETRY_POLICY_DO_NOT_RETRY", "RETRY_POLICY_RETRY", ""}), Description: `Describes the retry policy in case of function's execution failure. Retried execution is charged as any other execution. Possible values: ["RETRY_POLICY_UNSPECIFIED", "RETRY_POLICY_DO_NOT_RETRY", "RETRY_POLICY_RETRY"]`, }, @@ -281,6 +288,7 @@ region. If not provided, defaults to the same region as the function.`, }, "available_memory": { Type: schema.TypeString, + Computed: true, Optional: true, Description: `The amount of memory available for a function. Defaults to 256M. Supported units are k, M, G, Mi, Gi. If no unit is @@ -295,12 +303,13 @@ supplied the value is interpreted as bytes.`, "ingress_settings": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"ALLOW_ALL", "ALLOW_INTERNAL_ONLY", "ALLOW_INTERNAL_AND_GCLB", ""}), + ValidateFunc: verify.ValidateEnum([]string{"ALLOW_ALL", "ALLOW_INTERNAL_ONLY", "ALLOW_INTERNAL_AND_GCLB", ""}), Description: `Available ingress settings. Defaults to "ALLOW_ALL" if unspecified. Default value: "ALLOW_ALL" Possible values: ["ALLOW_ALL", "ALLOW_INTERNAL_ONLY", "ALLOW_INTERNAL_AND_GCLB"]`, Default: "ALLOW_ALL", }, "max_instance_count": { Type: schema.TypeInt, + Computed: true, Optional: true, Description: `The limit on the maximum number of function instances that may coexist at a given time.`, @@ -404,6 +413,7 @@ given time.`, }, "timeout_seconds": { Type: schema.TypeInt, + Computed: true, Optional: true, Description: `The function execution timeout. Execution is considered failed and can be terminated if the function is not completed at the end of the @@ -417,7 +427,7 @@ timeout period. Defaults to 60 seconds.`, "vpc_connector_egress_settings": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED", "PRIVATE_RANGES_ONLY", "ALL_TRAFFIC", ""}), + ValidateFunc: verify.ValidateEnum([]string{"VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED", "PRIVATE_RANGES_ONLY", "ALL_TRAFFIC", ""}), Description: `Available egress settings. Possible values: ["VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED", "PRIVATE_RANGES_ONLY", "ALL_TRAFFIC"]`, }, "gcf_uri": { @@ -448,6 +458,11 @@ timeout period. Defaults to 60 seconds.`, Computed: true, Description: `The last update timestamp of a Cloud Function.`, }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The deployed url for the function.`, + }, "project": { Type: schema.TypeString, Optional: true, @@ -489,8 +504,8 @@ The only allowed value is 'match-path-pattern'. } func resourceCloudfunctions2functionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -499,41 +514,41 @@ func resourceCloudfunctions2functionCreate(d *schema.ResourceData, meta interfac nameProp, err := expandCloudfunctions2functionName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } descriptionProp, err := expandCloudfunctions2functionDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } buildConfigProp, err := expandCloudfunctions2functionBuildConfig(d.Get("build_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("build_config"); !isEmptyValue(reflect.ValueOf(buildConfigProp)) && (ok || !reflect.DeepEqual(v, buildConfigProp)) { + } else if v, ok := d.GetOkExists("build_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(buildConfigProp)) && (ok || !reflect.DeepEqual(v, buildConfigProp)) { obj["buildConfig"] = buildConfigProp } serviceConfigProp, err := expandCloudfunctions2functionServiceConfig(d.Get("service_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("service_config"); !isEmptyValue(reflect.ValueOf(serviceConfigProp)) && (ok || !reflect.DeepEqual(v, serviceConfigProp)) { + } else if v, ok := d.GetOkExists("service_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceConfigProp)) && (ok || !reflect.DeepEqual(v, serviceConfigProp)) { obj["serviceConfig"] = serviceConfigProp } eventTriggerProp, err := expandCloudfunctions2functionEventTrigger(d.Get("event_trigger"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("event_trigger"); !isEmptyValue(reflect.ValueOf(eventTriggerProp)) && (ok || !reflect.DeepEqual(v, eventTriggerProp)) { + } else if v, ok := d.GetOkExists("event_trigger"); !tpgresource.IsEmptyValue(reflect.ValueOf(eventTriggerProp)) && (ok || !reflect.DeepEqual(v, eventTriggerProp)) { obj["eventTrigger"] = eventTriggerProp } labelsProp, err := expandCloudfunctions2functionLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } - url, err := replaceVars(d, config, "{{Cloudfunctions2BasePath}}projects/{{project}}/locations/{{location}}/functions?functionId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{Cloudfunctions2BasePath}}projects/{{project}}/locations/{{location}}/functions?functionId={{name}}") if err != nil { return err } @@ -541,24 +556,32 @@ func resourceCloudfunctions2functionCreate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Creating new function: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for function: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating function: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/functions/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/functions/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -582,7 +605,7 @@ func resourceCloudfunctions2functionCreate(d *schema.ResourceData, meta interfac } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/functions/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/functions/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -594,33 +617,39 @@ func resourceCloudfunctions2functionCreate(d *schema.ResourceData, meta interfac } func resourceCloudfunctions2functionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{Cloudfunctions2BasePath}}projects/{{project}}/locations/{{location}}/functions/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{Cloudfunctions2BasePath}}projects/{{project}}/locations/{{location}}/functions/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for function: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Cloudfunctions2function %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Cloudfunctions2function %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -636,6 +665,9 @@ func resourceCloudfunctions2functionRead(d *schema.ResourceData, meta interface{ if err := d.Set("environment", flattenCloudfunctions2functionEnvironment(res["environment"], d, config)); err != nil { return fmt.Errorf("Error reading function: %s", err) } + if err := d.Set("url", flattenCloudfunctions2functionUrl(res["url"], d, config)); err != nil { + return fmt.Errorf("Error reading function: %s", err) + } if err := d.Set("state", flattenCloudfunctions2functionState(res["state"], d, config)); err != nil { return fmt.Errorf("Error reading function: %s", err) } @@ -659,15 +691,15 @@ func resourceCloudfunctions2functionRead(d *schema.ResourceData, meta interface{ } func resourceCloudfunctions2functionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for function: %s", err) } @@ -677,35 +709,35 @@ func resourceCloudfunctions2functionUpdate(d *schema.ResourceData, meta interfac descriptionProp, err := expandCloudfunctions2functionDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } buildConfigProp, err := expandCloudfunctions2functionBuildConfig(d.Get("build_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("build_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, buildConfigProp)) { + } else if v, ok := d.GetOkExists("build_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, buildConfigProp)) { obj["buildConfig"] = buildConfigProp } serviceConfigProp, err := expandCloudfunctions2functionServiceConfig(d.Get("service_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("service_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceConfigProp)) { + } else if v, ok := d.GetOkExists("service_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceConfigProp)) { obj["serviceConfig"] = serviceConfigProp } eventTriggerProp, err := expandCloudfunctions2functionEventTrigger(d.Get("event_trigger"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("event_trigger"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eventTriggerProp)) { + } else if v, ok := d.GetOkExists("event_trigger"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eventTriggerProp)) { obj["eventTrigger"] = eventTriggerProp } labelsProp, err := expandCloudfunctions2functionLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } - url, err := replaceVars(d, config, "{{Cloudfunctions2BasePath}}projects/{{project}}/locations/{{location}}/functions/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{Cloudfunctions2BasePath}}projects/{{project}}/locations/{{location}}/functions/{{name}}") if err != nil { return err } @@ -732,19 +764,27 @@ func resourceCloudfunctions2functionUpdate(d *schema.ResourceData, meta interfac if d.HasChange("labels") { updateMask = append(updateMask, "labels") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating function %q: %s", d.Id(), err) @@ -764,21 +804,21 @@ func resourceCloudfunctions2functionUpdate(d *schema.ResourceData, meta interfac } func resourceCloudfunctions2functionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for function: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{Cloudfunctions2BasePath}}projects/{{project}}/locations/{{location}}/functions/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{Cloudfunctions2BasePath}}projects/{{project}}/locations/{{location}}/functions/{{name}}") if err != nil { return err } @@ -787,13 +827,21 @@ func resourceCloudfunctions2functionDelete(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Deleting function %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "function") + return transport_tpg.HandleNotFoundError(err, d, "function") } err = Cloudfunctions2OperationWaitTime( @@ -809,8 +857,8 @@ func resourceCloudfunctions2functionDelete(d *schema.ResourceData, meta interfac } func resourceCloudfunctions2functionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -819,7 +867,7 @@ func resourceCloudfunctions2functionImport(d *schema.ResourceData, meta interfac } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/functions/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/functions/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -828,26 +876,30 @@ func resourceCloudfunctions2functionImport(d *schema.ResourceData, meta interfac return []*schema.ResourceData{d}, nil } -func flattenCloudfunctions2functionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenCloudfunctions2functionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenCloudfunctions2functionDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEnvironment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -872,19 +924,19 @@ func flattenCloudfunctions2functionBuildConfig(v interface{}, d *schema.Resource flattenCloudfunctions2functionBuildConfigDockerRepository(original["dockerRepository"], d, config) return []interface{}{transformed} } -func flattenCloudfunctions2functionBuildConfigBuild(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigBuild(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigRuntime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigRuntime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigEntryPoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigEntryPoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -899,7 +951,7 @@ func flattenCloudfunctions2functionBuildConfigSource(v interface{}, d *schema.Re flattenCloudfunctions2functionBuildConfigSourceRepoSource(original["repoSource"], d, config) return []interface{}{transformed} } -func flattenCloudfunctions2functionBuildConfigSourceStorageSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceStorageSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -917,7 +969,7 @@ func flattenCloudfunctions2functionBuildConfigSourceStorageSource(v interface{}, return []interface{}{transformed} } -func flattenCloudfunctions2functionBuildConfigSourceStorageSourceBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceStorageSourceBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // This flatten function is shared between the resource and the datasource. // TF Input format: {bucket-name} // GET Response format: gcf-v2-sources-{Project-number}-{location} @@ -934,7 +986,7 @@ func flattenCloudfunctions2functionBuildConfigSourceStorageSourceBucket(v interf return v } -func flattenCloudfunctions2functionBuildConfigSourceStorageSourceObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceStorageSourceObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // This flatten function is shared between the resource and the datasource. // TF Input format: {object-name} // GET Response format: {function-name}/{object-name} @@ -951,10 +1003,10 @@ func flattenCloudfunctions2functionBuildConfigSourceStorageSourceObject(v interf return v } -func flattenCloudfunctions2functionBuildConfigSourceStorageSourceGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceStorageSourceGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -968,7 +1020,7 @@ func flattenCloudfunctions2functionBuildConfigSourceStorageSourceGeneration(v in return v // let terraform core handle it otherwise } -func flattenCloudfunctions2functionBuildConfigSourceRepoSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceRepoSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -993,47 +1045,47 @@ func flattenCloudfunctions2functionBuildConfigSourceRepoSource(v interface{}, d flattenCloudfunctions2functionBuildConfigSourceRepoSourceInvertRegex(original["invertRegex"], d, config) return []interface{}{transformed} } -func flattenCloudfunctions2functionBuildConfigSourceRepoSourceProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceRepoSourceProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigSourceRepoSourceRepoName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceRepoSourceRepoName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigSourceRepoSourceBranchName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceRepoSourceBranchName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigSourceRepoSourceTagName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceRepoSourceTagName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigSourceRepoSourceCommitSha(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceRepoSourceCommitSha(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigSourceRepoSourceDir(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceRepoSourceDir(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigSourceRepoSourceInvertRegex(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigSourceRepoSourceInvertRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigWorkerPool(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigWorkerPool(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigEnvironmentVariables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigEnvironmentVariables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionBuildConfigDockerRepository(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionBuildConfigDockerRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1059,9 +1111,9 @@ func flattenCloudfunctions2functionServiceConfig(v interface{}, d *schema.Resour transformed["min_instance_count"] = flattenCloudfunctions2functionServiceConfigMinInstanceCount(original["minInstanceCount"], d, config) transformed["vpc_connector"] = - flattenCloudfunctions2functionServiceConfigVPCConnector(original["vpcConnector"], d, config) + flattenCloudfunctions2functionServiceConfigVpcConnector(original["vpcConnector"], d, config) transformed["vpc_connector_egress_settings"] = - flattenCloudfunctions2functionServiceConfigVPCConnectorEgressSettings(original["vpcConnectorEgressSettings"], d, config) + flattenCloudfunctions2functionServiceConfigVpcConnectorEgressSettings(original["vpcConnectorEgressSettings"], d, config) transformed["ingress_settings"] = flattenCloudfunctions2functionServiceConfigIngressSettings(original["ingressSettings"], d, config) transformed["uri"] = @@ -1078,14 +1130,14 @@ func flattenCloudfunctions2functionServiceConfig(v interface{}, d *schema.Resour flattenCloudfunctions2functionServiceConfigSecretVolumes(original["secretVolumes"], d, config) return []interface{}{transformed} } -func flattenCloudfunctions2functionServiceConfigService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1099,14 +1151,14 @@ func flattenCloudfunctions2functionServiceConfigTimeoutSeconds(v interface{}, d return v // let terraform core handle it otherwise } -func flattenCloudfunctions2functionServiceConfigAvailableMemory(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigAvailableMemory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigMaxInstanceRequestConcurrency(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigMaxInstanceRequestConcurrency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1120,18 +1172,18 @@ func flattenCloudfunctions2functionServiceConfigMaxInstanceRequestConcurrency(v return v // let terraform core handle it otherwise } -func flattenCloudfunctions2functionServiceConfigAvailableCpu(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigAvailableCpu(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigEnvironmentVariables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigEnvironmentVariables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigMaxInstanceCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigMaxInstanceCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1145,10 +1197,10 @@ func flattenCloudfunctions2functionServiceConfigMaxInstanceCount(v interface{}, return v // let terraform core handle it otherwise } -func flattenCloudfunctions2functionServiceConfigMinInstanceCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigMinInstanceCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1162,35 +1214,35 @@ func flattenCloudfunctions2functionServiceConfigMinInstanceCount(v interface{}, return v // let terraform core handle it otherwise } -func flattenCloudfunctions2functionServiceConfigVPCConnector(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigVpcConnector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigVPCConnectorEgressSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigVpcConnectorEgressSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigIngressSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigIngressSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigGcfUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigGcfUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigAllTrafficOnLatestRevision(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigAllTrafficOnLatestRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1211,23 +1263,23 @@ func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariables(v int } return transformed } -func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariablesKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariablesKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariablesProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariablesProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariablesSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariablesSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariablesVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretEnvironmentVariablesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretVolumes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretVolumes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1248,19 +1300,19 @@ func flattenCloudfunctions2functionServiceConfigSecretVolumes(v interface{}, d * } return transformed } -func flattenCloudfunctions2functionServiceConfigSecretVolumesMountPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretVolumesMountPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretVolumesProjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretVolumesProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretVolumesSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretVolumesSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretVolumesVersions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretVolumesVersions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1279,15 +1331,15 @@ func flattenCloudfunctions2functionServiceConfigSecretVolumesVersions(v interfac } return transformed } -func flattenCloudfunctions2functionServiceConfigSecretVolumesVersionsVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretVolumesVersionsVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionServiceConfigSecretVolumesVersionsPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionServiceConfigSecretVolumesVersionsPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTrigger(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTrigger(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1312,19 +1364,19 @@ func flattenCloudfunctions2functionEventTrigger(v interface{}, d *schema.Resourc flattenCloudfunctions2functionEventTriggerRetryPolicy(original["retryPolicy"], d, config) return []interface{}{transformed} } -func flattenCloudfunctions2functionEventTriggerTrigger(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerTrigger(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTriggerTriggerRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerTriggerRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTriggerEventType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerEventType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTriggerEventFilters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerEventFilters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1344,47 +1396,47 @@ func flattenCloudfunctions2functionEventTriggerEventFilters(v interface{}, d *sc } return transformed } -func flattenCloudfunctions2functionEventTriggerEventFiltersAttribute(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerEventFiltersAttribute(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTriggerEventFiltersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerEventFiltersValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTriggerEventFiltersOperator(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerEventFiltersOperator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTriggerPubsubTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTriggerServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionEventTriggerRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionEventTriggerRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudfunctions2functionLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudfunctions2functionLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandCloudfunctions2functionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{location}}/functions/{{name}}") +func expandCloudfunctions2functionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/functions/{{name}}") } -func expandCloudfunctions2functionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1396,68 +1448,68 @@ func expandCloudfunctions2functionBuildConfig(v interface{}, d TerraformResource transformedBuild, err := expandCloudfunctions2functionBuildConfigBuild(original["build"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBuild); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBuild); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["build"] = transformedBuild } transformedRuntime, err := expandCloudfunctions2functionBuildConfigRuntime(original["runtime"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRuntime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRuntime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["runtime"] = transformedRuntime } transformedEntryPoint, err := expandCloudfunctions2functionBuildConfigEntryPoint(original["entry_point"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEntryPoint); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEntryPoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["entryPoint"] = transformedEntryPoint } transformedSource, err := expandCloudfunctions2functionBuildConfigSource(original["source"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["source"] = transformedSource } transformedWorkerPool, err := expandCloudfunctions2functionBuildConfigWorkerPool(original["worker_pool"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWorkerPool); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWorkerPool); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["workerPool"] = transformedWorkerPool } transformedEnvironmentVariables, err := expandCloudfunctions2functionBuildConfigEnvironmentVariables(original["environment_variables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnvironmentVariables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnvironmentVariables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["environmentVariables"] = transformedEnvironmentVariables } transformedDockerRepository, err := expandCloudfunctions2functionBuildConfigDockerRepository(original["docker_repository"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDockerRepository); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDockerRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dockerRepository"] = transformedDockerRepository } return transformed, nil } -func expandCloudfunctions2functionBuildConfigBuild(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigBuild(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigRuntime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigRuntime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigEntryPoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigEntryPoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1469,21 +1521,21 @@ func expandCloudfunctions2functionBuildConfigSource(v interface{}, d TerraformRe transformedStorageSource, err := expandCloudfunctions2functionBuildConfigSourceStorageSource(original["storage_source"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStorageSource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStorageSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["storageSource"] = transformedStorageSource } transformedRepoSource, err := expandCloudfunctions2functionBuildConfigSourceRepoSource(original["repo_source"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRepoSource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRepoSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["repoSource"] = transformedRepoSource } return transformed, nil } -func expandCloudfunctions2functionBuildConfigSourceStorageSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceStorageSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1495,40 +1547,40 @@ func expandCloudfunctions2functionBuildConfigSourceStorageSource(v interface{}, transformedBucket, err := expandCloudfunctions2functionBuildConfigSourceStorageSourceBucket(original["bucket"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["bucket"] = transformedBucket } transformedObject, err := expandCloudfunctions2functionBuildConfigSourceStorageSourceObject(original["object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["object"] = transformedObject } transformedGeneration, err := expandCloudfunctions2functionBuildConfigSourceStorageSourceGeneration(original["generation"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["generation"] = transformedGeneration } return transformed, nil } -func expandCloudfunctions2functionBuildConfigSourceStorageSourceBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceStorageSourceBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceStorageSourceObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceStorageSourceObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceStorageSourceGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceStorageSourceGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceRepoSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceRepoSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1540,88 +1592,88 @@ func expandCloudfunctions2functionBuildConfigSourceRepoSource(v interface{}, d T transformedProjectId, err := expandCloudfunctions2functionBuildConfigSourceRepoSourceProjectId(original["project_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedRepoName, err := expandCloudfunctions2functionBuildConfigSourceRepoSourceRepoName(original["repo_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRepoName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRepoName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["repoName"] = transformedRepoName } transformedBranchName, err := expandCloudfunctions2functionBuildConfigSourceRepoSourceBranchName(original["branch_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBranchName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBranchName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["branchName"] = transformedBranchName } transformedTagName, err := expandCloudfunctions2functionBuildConfigSourceRepoSourceTagName(original["tag_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTagName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTagName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tagName"] = transformedTagName } transformedCommitSha, err := expandCloudfunctions2functionBuildConfigSourceRepoSourceCommitSha(original["commit_sha"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCommitSha); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCommitSha); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["commitSha"] = transformedCommitSha } transformedDir, err := expandCloudfunctions2functionBuildConfigSourceRepoSourceDir(original["dir"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDir); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDir); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dir"] = transformedDir } transformedInvertRegex, err := expandCloudfunctions2functionBuildConfigSourceRepoSourceInvertRegex(original["invert_regex"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInvertRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["invertRegex"] = transformedInvertRegex } return transformed, nil } -func expandCloudfunctions2functionBuildConfigSourceRepoSourceProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceRepoSourceProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceRepoSourceRepoName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceRepoSourceRepoName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceRepoSourceBranchName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceRepoSourceBranchName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceRepoSourceTagName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceRepoSourceTagName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceRepoSourceCommitSha(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceRepoSourceCommitSha(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceRepoSourceDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceRepoSourceDir(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigSourceRepoSourceInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigSourceRepoSourceInvertRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigWorkerPool(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigWorkerPool(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionBuildConfigEnvironmentVariables(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandCloudfunctions2functionBuildConfigEnvironmentVariables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1632,11 +1684,11 @@ func expandCloudfunctions2functionBuildConfigEnvironmentVariables(v interface{}, return m, nil } -func expandCloudfunctions2functionBuildConfigDockerRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionBuildConfigDockerRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1648,146 +1700,146 @@ func expandCloudfunctions2functionServiceConfig(v interface{}, d TerraformResour transformedService, err := expandCloudfunctions2functionServiceConfigService(original["service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["service"] = transformedService } transformedTimeoutSeconds, err := expandCloudfunctions2functionServiceConfigTimeoutSeconds(original["timeout_seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeoutSeconds"] = transformedTimeoutSeconds } transformedAvailableMemory, err := expandCloudfunctions2functionServiceConfigAvailableMemory(original["available_memory"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAvailableMemory); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAvailableMemory); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["availableMemory"] = transformedAvailableMemory } transformedMaxInstanceRequestConcurrency, err := expandCloudfunctions2functionServiceConfigMaxInstanceRequestConcurrency(original["max_instance_request_concurrency"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxInstanceRequestConcurrency); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxInstanceRequestConcurrency); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxInstanceRequestConcurrency"] = transformedMaxInstanceRequestConcurrency } transformedAvailableCpu, err := expandCloudfunctions2functionServiceConfigAvailableCpu(original["available_cpu"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAvailableCpu); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAvailableCpu); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["availableCpu"] = transformedAvailableCpu } transformedEnvironmentVariables, err := expandCloudfunctions2functionServiceConfigEnvironmentVariables(original["environment_variables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnvironmentVariables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnvironmentVariables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["environmentVariables"] = transformedEnvironmentVariables } transformedMaxInstanceCount, err := expandCloudfunctions2functionServiceConfigMaxInstanceCount(original["max_instance_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxInstanceCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxInstanceCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxInstanceCount"] = transformedMaxInstanceCount } transformedMinInstanceCount, err := expandCloudfunctions2functionServiceConfigMinInstanceCount(original["min_instance_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinInstanceCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinInstanceCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minInstanceCount"] = transformedMinInstanceCount } - transformedVPCConnector, err := expandCloudfunctions2functionServiceConfigVPCConnector(original["vpc_connector"], d, config) + transformedVpcConnector, err := expandCloudfunctions2functionServiceConfigVpcConnector(original["vpc_connector"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVPCConnector); val.IsValid() && !isEmptyValue(val) { - transformed["vpcConnector"] = transformedVPCConnector + } else if val := reflect.ValueOf(transformedVpcConnector); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcConnector"] = transformedVpcConnector } - transformedVPCConnectorEgressSettings, err := expandCloudfunctions2functionServiceConfigVPCConnectorEgressSettings(original["vpc_connector_egress_settings"], d, config) + transformedVpcConnectorEgressSettings, err := expandCloudfunctions2functionServiceConfigVpcConnectorEgressSettings(original["vpc_connector_egress_settings"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVPCConnectorEgressSettings); val.IsValid() && !isEmptyValue(val) { - transformed["vpcConnectorEgressSettings"] = transformedVPCConnectorEgressSettings + } else if val := reflect.ValueOf(transformedVpcConnectorEgressSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcConnectorEgressSettings"] = transformedVpcConnectorEgressSettings } transformedIngressSettings, err := expandCloudfunctions2functionServiceConfigIngressSettings(original["ingress_settings"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIngressSettings); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIngressSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ingressSettings"] = transformedIngressSettings } transformedUri, err := expandCloudfunctions2functionServiceConfigUri(original["uri"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["uri"] = transformedUri } transformedGcfUri, err := expandCloudfunctions2functionServiceConfigGcfUri(original["gcf_uri"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGcfUri); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGcfUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gcfUri"] = transformedGcfUri } transformedServiceAccountEmail, err := expandCloudfunctions2functionServiceConfigServiceAccountEmail(original["service_account_email"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceAccountEmail"] = transformedServiceAccountEmail } transformedAllTrafficOnLatestRevision, err := expandCloudfunctions2functionServiceConfigAllTrafficOnLatestRevision(original["all_traffic_on_latest_revision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllTrafficOnLatestRevision); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllTrafficOnLatestRevision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allTrafficOnLatestRevision"] = transformedAllTrafficOnLatestRevision } transformedSecretEnvironmentVariables, err := expandCloudfunctions2functionServiceConfigSecretEnvironmentVariables(original["secret_environment_variables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecretEnvironmentVariables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecretEnvironmentVariables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secretEnvironmentVariables"] = transformedSecretEnvironmentVariables } transformedSecretVolumes, err := expandCloudfunctions2functionServiceConfigSecretVolumes(original["secret_volumes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecretVolumes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecretVolumes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secretVolumes"] = transformedSecretVolumes } return transformed, nil } -func expandCloudfunctions2functionServiceConfigService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigAvailableMemory(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigAvailableMemory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigMaxInstanceRequestConcurrency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigMaxInstanceRequestConcurrency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigAvailableCpu(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigAvailableCpu(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigEnvironmentVariables(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandCloudfunctions2functionServiceConfigEnvironmentVariables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1798,43 +1850,43 @@ func expandCloudfunctions2functionServiceConfigEnvironmentVariables(v interface{ return m, nil } -func expandCloudfunctions2functionServiceConfigMaxInstanceCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigMaxInstanceCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigMinInstanceCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigMinInstanceCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigVPCConnector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigVpcConnector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigVPCConnectorEgressSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigVpcConnectorEgressSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigIngressSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigIngressSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigGcfUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigGcfUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigAllTrafficOnLatestRevision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigAllTrafficOnLatestRevision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1847,28 +1899,28 @@ func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariables(v inte transformedKey, err := expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesKey(original["key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["key"] = transformedKey } transformedProjectId, err := expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesProjectId(original["project_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedSecret, err := expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesSecret(original["secret"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secret"] = transformedSecret } transformedVersion, err := expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesVersion(original["version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["version"] = transformedVersion } @@ -1877,23 +1929,23 @@ func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariables(v inte return req, nil } -func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretEnvironmentVariablesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretVolumes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1906,28 +1958,28 @@ func expandCloudfunctions2functionServiceConfigSecretVolumes(v interface{}, d Te transformedMountPath, err := expandCloudfunctions2functionServiceConfigSecretVolumesMountPath(original["mount_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mountPath"] = transformedMountPath } transformedProjectId, err := expandCloudfunctions2functionServiceConfigSecretVolumesProjectId(original["project_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } transformedSecret, err := expandCloudfunctions2functionServiceConfigSecretVolumesSecret(original["secret"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secret"] = transformedSecret } transformedVersions, err := expandCloudfunctions2functionServiceConfigSecretVolumesVersions(original["versions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVersions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVersions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["versions"] = transformedVersions } @@ -1936,19 +1988,19 @@ func expandCloudfunctions2functionServiceConfigSecretVolumes(v interface{}, d Te return req, nil } -func expandCloudfunctions2functionServiceConfigSecretVolumesMountPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretVolumesMountPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretVolumesProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretVolumesProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretVolumesSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretVolumesSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretVolumesVersions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretVolumesVersions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1961,14 +2013,14 @@ func expandCloudfunctions2functionServiceConfigSecretVolumesVersions(v interface transformedVersion, err := expandCloudfunctions2functionServiceConfigSecretVolumesVersionsVersion(original["version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["version"] = transformedVersion } transformedPath, err := expandCloudfunctions2functionServiceConfigSecretVolumesVersionsPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } @@ -1977,15 +2029,15 @@ func expandCloudfunctions2functionServiceConfigSecretVolumesVersions(v interface return req, nil } -func expandCloudfunctions2functionServiceConfigSecretVolumesVersionsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretVolumesVersionsVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionServiceConfigSecretVolumesVersionsPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionServiceConfigSecretVolumesVersionsPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTrigger(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTrigger(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1997,68 +2049,68 @@ func expandCloudfunctions2functionEventTrigger(v interface{}, d TerraformResourc transformedTrigger, err := expandCloudfunctions2functionEventTriggerTrigger(original["trigger"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["trigger"] = transformedTrigger } transformedTriggerRegion, err := expandCloudfunctions2functionEventTriggerTriggerRegion(original["trigger_region"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTriggerRegion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTriggerRegion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["triggerRegion"] = transformedTriggerRegion } transformedEventType, err := expandCloudfunctions2functionEventTriggerEventType(original["event_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEventType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEventType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["eventType"] = transformedEventType } transformedEventFilters, err := expandCloudfunctions2functionEventTriggerEventFilters(original["event_filters"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEventFilters); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEventFilters); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["eventFilters"] = transformedEventFilters } transformedPubsubTopic, err := expandCloudfunctions2functionEventTriggerPubsubTopic(original["pubsub_topic"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pubsubTopic"] = transformedPubsubTopic } transformedServiceAccountEmail, err := expandCloudfunctions2functionEventTriggerServiceAccountEmail(original["service_account_email"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceAccountEmail"] = transformedServiceAccountEmail } transformedRetryPolicy, err := expandCloudfunctions2functionEventTriggerRetryPolicy(original["retry_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryPolicy"] = transformedRetryPolicy } return transformed, nil } -func expandCloudfunctions2functionEventTriggerTrigger(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerTrigger(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTriggerTriggerRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerTriggerRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTriggerEventType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerEventType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTriggerEventFilters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerEventFilters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -2072,21 +2124,21 @@ func expandCloudfunctions2functionEventTriggerEventFilters(v interface{}, d Terr transformedAttribute, err := expandCloudfunctions2functionEventTriggerEventFiltersAttribute(original["attribute"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAttribute); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAttribute); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["attribute"] = transformedAttribute } transformedValue, err := expandCloudfunctions2functionEventTriggerEventFiltersValue(original["value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["value"] = transformedValue } transformedOperator, err := expandCloudfunctions2functionEventTriggerEventFiltersOperator(original["operator"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOperator); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOperator); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["operator"] = transformedOperator } @@ -2095,31 +2147,31 @@ func expandCloudfunctions2functionEventTriggerEventFilters(v interface{}, d Terr return req, nil } -func expandCloudfunctions2functionEventTriggerEventFiltersAttribute(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerEventFiltersAttribute(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTriggerEventFiltersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerEventFiltersValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTriggerEventFiltersOperator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerEventFiltersOperator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTriggerPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTriggerServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionEventTriggerRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudfunctions2functionEventTriggerRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudfunctions2functionLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandCloudfunctions2functionLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function_sweeper.go new file mode 100644 index 0000000000..79e8309f93 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2/resource_cloudfunctions2_function_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudfunctions2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("Cloudfunctions2function", testSweepCloudfunctions2function) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudfunctions2function(region string) error { + resourceName := "Cloudfunctions2function" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudfunctions.googleapis.com/v2/projects/{{project}}/locations/{{location}}/functions", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["functions"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudfunctions.googleapis.com/v2/projects/{{project}}/locations/{{location}}/functions/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/cloud_identity_group_membership_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/cloud_identity_group_membership_utils.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/cloud_identity_group_membership_utils.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/cloud_identity_group_membership_utils.go index e2a1e22b81..c13586c279 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/cloud_identity_group_membership_utils.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/cloud_identity_group_membership_utils.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudidentity import ( "log" @@ -14,7 +16,7 @@ func transformCloudIdentityGroupMembershipReadError(err error) error { // This error occurs when either the group membership does not exist, or permission is denied. It is // deliberately ambiguous so that existence information is not revealed to the caller. However, for // the Read function, we can only assume that the membership does not exist, and proceed with attempting - // other operations. Since handleNotFoundError(...) expects an error code of 404 when a resource does not + // other operations. Since HandleNotFoundError(...) expects an error code of 404 when a resource does not // exist, to get the desired behavior, we modify the error code to be 404. gErr.Code = 404 } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_group_memberships.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_group_memberships.go new file mode 100644 index 0000000000..caec9cd1f0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_group_memberships.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudidentity + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudidentity/v1" +) + +func DataSourceGoogleCloudIdentityGroupMemberships() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCloudIdentityGroupMembership().Schema) + + return &schema.Resource{ + Read: dataSourceGoogleCloudIdentityGroupMembershipsRead, + + Schema: map[string]*schema.Schema{ + "memberships": { + Type: schema.TypeList, + Computed: true, + Description: `List of Cloud Identity group memberships.`, + Elem: &schema.Resource{ + Schema: dsSchema, + }, + }, + "group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Group to get memberships from.`, + }, + }, + } +} + +func dataSourceGoogleCloudIdentityGroupMembershipsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + result := []map[string]interface{}{} + membershipsCall := config.NewCloudIdentityClient(userAgent).Groups.Memberships.List(d.Get("group").(string)).View("FULL") + if config.UserProjectOverride { + billingProject := "" + // err may be nil - project isn't required for this resource + if project, err := tpgresource.GetProject(d, config); err == nil { + billingProject = project + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if billingProject != "" { + membershipsCall.Header().Set("X-Goog-User-Project", billingProject) + } + } + + err = membershipsCall.Pages(config.Context, func(resp *cloudidentity.ListMembershipsResponse) error { + for _, member := range resp.Memberships { + result = append(result, map[string]interface{}{ + "name": member.Name, + "roles": flattenCloudIdentityGroupMembershipsRoles(member.Roles), + "preferred_member_key": flattenCloudIdentityGroupsEntityKey(member.PreferredMemberKey), + }) + } + + return nil + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroupMemberships %q", d.Id())) + } + + if err := d.Set("memberships", result); err != nil { + return fmt.Errorf("Error setting memberships: %s", err) + } + d.SetId(time.Now().UTC().String()) + return nil +} + +func flattenCloudIdentityGroupMembershipsRoles(roles []*cloudidentity.MembershipRole) []interface{} { + transformed := []interface{}{} + + for _, role := range roles { + transformed = append(transformed, map[string]interface{}{ + "name": role.Name, + }) + } + return transformed +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_identity_groups.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_groups.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_identity_groups.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_groups.go index 1dd38e8cb0..29e6b7029c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_cloud_identity_groups.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/data_source_cloud_identity_groups.go @@ -1,7 +1,11 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudidentity import ( "fmt" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -10,7 +14,7 @@ import ( func DataSourceGoogleCloudIdentityGroups() *schema.Resource { // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceCloudIdentityGroup().Schema) + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCloudIdentityGroup().Schema) return &schema.Resource{ Read: dataSourceGoogleCloudIdentityGroupsRead, @@ -39,8 +43,8 @@ groups or customers/{customer_id} for Google Groups.`, } func dataSourceGoogleCloudIdentityGroupsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -50,12 +54,12 @@ func dataSourceGoogleCloudIdentityGroupsRead(d *schema.ResourceData, meta interf if config.UserProjectOverride { billingProject := "" // err may be nil - project isn't required for this resource - if project, err := getProject(d, config); err == nil { + if project, err := tpgresource.GetProject(d, config); err == nil { billingProject = project } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } @@ -63,7 +67,7 @@ func dataSourceGoogleCloudIdentityGroupsRead(d *schema.ResourceData, meta interf groupsCall.Header().Set("X-Goog-User-Project", billingProject) } } - err = groupsCall.Pages(config.context, func(resp *cloudidentity.ListGroupsResponse) error { + err = groupsCall.Pages(config.Context, func(resp *cloudidentity.ListGroupsResponse) error { for _, group := range resp.Groups { result = append(result, map[string]interface{}{ "name": group.Name, @@ -77,7 +81,7 @@ func dataSourceGoogleCloudIdentityGroupsRead(d *schema.ResourceData, meta interf return nil }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroups %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroups %q", d.Id())) } if err := d.Set("groups", result); err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group.go new file mode 100644 index 0000000000..557e5f6fa9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group.go @@ -0,0 +1,609 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudidentity + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCloudIdentityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudIdentityGroupCreate, + Read: resourceCloudIdentityGroupRead, + Update: resourceCloudIdentityGroupUpdate, + Delete: resourceCloudIdentityGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudIdentityGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "group_key": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `EntityKey of the Group.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the entity. + +For Google-managed entities, the id must be the email address of an existing +group or user. + +For external-identity-mapped entities, the id must be a string conforming +to the Identity Source's requirements. + +Must be unique within a namespace.`, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The namespace in which the entity exists. + +If not specified, the EntityKey represents a Google-managed entity +such as a Google user or a Google Group. + +If specified, the EntityKey represents an external-identity-mapped group. +The namespace must correspond to an identity source created in Admin Console +and must be in the form of 'identitysources/{identity_source_id}'.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Required: true, + Description: `One or more label entries that apply to the Group. Currently supported labels contain a key with an empty value. + +Google Groups are the default type of group and have a label with a key of cloudidentity.googleapis.com/groups.discussion_forum and an empty value. + +Existing Google Groups can have an additional label with a key of cloudidentity.googleapis.com/groups.security and an empty value added to them. This is an immutable change and the security label cannot be removed once added. + +Dynamic groups have a label with a key of cloudidentity.googleapis.com/groups.dynamic. + +Identity-mapped groups for Cloud Search have a label with a key of system/groups/external and an empty value.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the entity under which this Group resides in the +Cloud Identity resource hierarchy. + +Must be of the form identitysources/{identity_source_id} for external-identity-mapped +groups or customers/{customer_id} for Google Groups.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An extended description to help users determine the purpose of a Group. +Must not be longer than 4,096 characters.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `The display name of the Group.`, + }, + "initial_group_config": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INITIAL_GROUP_CONFIG_UNSPECIFIED", "WITH_INITIAL_OWNER", "EMPTY", ""}), + Description: `The initial configuration options for creating a Group. + +See the +[API reference](https://cloud.google.com/identity/docs/reference/rest/v1beta1/groups/create#initialgroupconfig) +for possible values. Default value: "EMPTY" Possible values: ["INITIAL_GROUP_CONFIG_UNSPECIFIED", "WITH_INITIAL_OWNER", "EMPTY"]`, + Default: "EMPTY", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the Group was created.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Resource name of the Group in the format: groups/{group_id}, where group_id +is the unique ID assigned to the Group.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the Group was last updated.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudIdentityGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + groupKeyProp, err := expandCloudIdentityGroupGroupKey(d.Get("group_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("group_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(groupKeyProp)) && (ok || !reflect.DeepEqual(v, groupKeyProp)) { + obj["groupKey"] = groupKeyProp + } + parentProp, err := expandCloudIdentityGroupParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + displayNameProp, err := expandCloudIdentityGroupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandCloudIdentityGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCloudIdentityGroupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}groups?initialGroupConfig={{initial_group_config}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Group: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Group: %s", err) + } + if err := d.Set("name", flattenCloudIdentityGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + err = transport_tpg.PollingWaitTime(resourceCloudIdentityGroupPollRead(d, meta), transport_tpg.PollCheckForExistenceWith403, "Creating Group", d.Timeout(schema.TimeoutCreate), 10) + if err != nil { + return fmt.Errorf("Error waiting to create Group: %s", err) + } + + log.Printf("[DEBUG] Finished creating Group %q: %#v", d.Id(), res) + + return resourceCloudIdentityGroupRead(d, meta) +} + +func resourceCloudIdentityGroupPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") + if err != nil { + return nil, err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + return res, nil + } +} + +func resourceCloudIdentityGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroup %q", d.Id())) + } + + if err := d.Set("name", flattenCloudIdentityGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("group_key", flattenCloudIdentityGroupGroupKey(res["groupKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("parent", flattenCloudIdentityGroupParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("display_name", flattenCloudIdentityGroupDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("description", flattenCloudIdentityGroupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("create_time", flattenCloudIdentityGroupCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("update_time", flattenCloudIdentityGroupUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("labels", flattenCloudIdentityGroupLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + + return nil +} + +func resourceCloudIdentityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandCloudIdentityGroupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandCloudIdentityGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCloudIdentityGroupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Group %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Group %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Group %q: %#v", d.Id(), res) + } + + err = transport_tpg.PollingWaitTime(resourceCloudIdentityGroupPollRead(d, meta), transport_tpg.PollCheckForExistenceWith403, "Updating Group", d.Timeout(schema.TimeoutUpdate), 10) + if err != nil { + return err + } + + return resourceCloudIdentityGroupRead(d, meta) +} + +func resourceCloudIdentityGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Group %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Group") + } + + err = transport_tpg.PollingWaitTime(resourceCloudIdentityGroupPollRead(d, meta), transport_tpg.PollCheckForAbsenceWith403, "Deleting Group", d.Timeout(schema.TimeoutCreate), 10) + if err != nil { + return fmt.Errorf("Error waiting to delete Group: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Group %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudIdentityGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + + if d.Get("initial_group_config") == nil { + d.Set("initial_group_config", "EMPTY") + } + + if err := d.Set("name", name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name) + return []*schema.ResourceData{d}, nil +} + +func flattenCloudIdentityGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupGroupKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["id"] = + flattenCloudIdentityGroupGroupKeyId(original["id"], d, config) + transformed["namespace"] = + flattenCloudIdentityGroupGroupKeyNamespace(original["namespace"], d, config) + return []interface{}{transformed} +} +func flattenCloudIdentityGroupGroupKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupGroupKeyNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudIdentityGroupGroupKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandCloudIdentityGroupGroupKeyId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedNamespace, err := expandCloudIdentityGroupGroupKeyNamespace(original["namespace"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespace"] = transformedNamespace + } + + return transformed, nil +} + +func expandCloudIdentityGroupGroupKeyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdentityGroupGroupKeyNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdentityGroupParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdentityGroupDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdentityGroupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdentityGroupLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group_membership.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group_membership.go new file mode 100644 index 0000000000..1e7c042b22 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group_membership.go @@ -0,0 +1,533 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudidentity + +import ( + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCloudIdentityGroupMembership() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudIdentityGroupMembershipCreate, + Read: resourceCloudIdentityGroupMembershipRead, + Update: resourceCloudIdentityGroupMembershipUpdate, + Delete: resourceCloudIdentityGroupMembershipDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudIdentityGroupMembershipImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Group to create this membership in.`, + }, + "roles": { + Type: schema.TypeSet, + Required: true, + Description: `The MembershipRoles that apply to the Membership. +Must not contain duplicate MembershipRoles with the same name.`, + Elem: cloudidentityGroupMembershipRolesSchema(), + // Default schema.HashSchema is used. + }, + "preferred_member_key": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `EntityKey of the member.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the entity. + +For Google-managed entities, the id must be the email address of an existing +group or user. + +For external-identity-mapped entities, the id must be a string conforming +to the Identity Source's requirements. + +Must be unique within a namespace.`, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The namespace in which the entity exists. + +If not specified, the EntityKey represents a Google-managed entity +such as a Google user or a Google Group. + +If specified, the EntityKey represents an external-identity-mapped group. +The namespace must correspond to an identity source created in Admin Console +and must be in the form of 'identitysources/{identity_source_id}'.`, + }, + }, + }, + ExactlyOneOf: []string{"preferred_member_key"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the Membership was created.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the Membership, of the form groups/{group_id}/memberships/{membership_id}.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of the membership.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the Membership was last updated.`, + }, + }, + UseJSONNumber: true, + } +} + +func cloudidentityGroupMembershipRolesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"OWNER", "MANAGER", "MEMBER"}), + Description: `The name of the MembershipRole. Must be one of OWNER, MANAGER, MEMBER. Possible values: ["OWNER", "MANAGER", "MEMBER"]`, + }, + }, + } +} + +func resourceCloudIdentityGroupMembershipCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + preferredMemberKeyProp, err := expandCloudIdentityGroupMembershipPreferredMemberKey(d.Get("preferred_member_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("preferred_member_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(preferredMemberKeyProp)) && (ok || !reflect.DeepEqual(v, preferredMemberKeyProp)) { + obj["preferredMemberKey"] = preferredMemberKeyProp + } + rolesProp, err := expandCloudIdentityGroupMembershipRoles(d.Get("roles"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("roles"); !tpgresource.IsEmptyValue(reflect.ValueOf(rolesProp)) && (ok || !reflect.DeepEqual(v, rolesProp)) { + obj["roles"] = rolesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}{{group}}/memberships") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GroupMembership: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GroupMembership: %s", err) + } + if err := d.Set("name", flattenCloudIdentityGroupMembershipName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating GroupMembership %q: %#v", d.Id(), res) + + return resourceCloudIdentityGroupMembershipRead(d, meta) +} + +func resourceCloudIdentityGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(transformCloudIdentityGroupMembershipReadError(err), d, fmt.Sprintf("CloudIdentityGroupMembership %q", d.Id())) + } + + if err := d.Set("name", flattenCloudIdentityGroupMembershipName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GroupMembership: %s", err) + } + if err := d.Set("preferred_member_key", flattenCloudIdentityGroupMembershipPreferredMemberKey(res["preferredMemberKey"], d, config)); err != nil { + return fmt.Errorf("Error reading GroupMembership: %s", err) + } + if err := d.Set("create_time", flattenCloudIdentityGroupMembershipCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading GroupMembership: %s", err) + } + if err := d.Set("update_time", flattenCloudIdentityGroupMembershipUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading GroupMembership: %s", err) + } + if err := d.Set("roles", flattenCloudIdentityGroupMembershipRoles(res["roles"], d, config)); err != nil { + return fmt.Errorf("Error reading GroupMembership: %s", err) + } + if err := d.Set("type", flattenCloudIdentityGroupMembershipType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading GroupMembership: %s", err) + } + + return nil +} + +func resourceCloudIdentityGroupMembershipUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + d.Partial(true) + + if d.HasChange("roles") { + obj := make(map[string]interface{}) + + rolesProp, err := expandCloudIdentityGroupMembershipRoles(d.Get("roles"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("roles"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rolesProp)) { + obj["roles"] = rolesProp + } + + obj, err = resourceCloudIdentityGroupMembershipUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}:modifyMembershipRoles") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating GroupMembership %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GroupMembership %q: %#v", d.Id(), res) + } + + } + + d.Partial(false) + + return resourceCloudIdentityGroupMembershipRead(d, meta) +} + +func resourceCloudIdentityGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GroupMembership %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GroupMembership") + } + + log.Printf("[DEBUG] Finished deleting GroupMembership %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudIdentityGroupMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Configure "group" property, which does not appear in the response body. + group := regexp.MustCompile(`groups/[^/]+`).FindString(id) + if err := d.Set("group", group); err != nil { + return nil, fmt.Errorf("Error setting group property: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudIdentityGroupMembershipName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupMembershipPreferredMemberKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["id"] = + flattenCloudIdentityGroupMembershipPreferredMemberKeyId(original["id"], d, config) + transformed["namespace"] = + flattenCloudIdentityGroupMembershipPreferredMemberKeyNamespace(original["namespace"], d, config) + return []interface{}{transformed} +} +func flattenCloudIdentityGroupMembershipPreferredMemberKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupMembershipPreferredMemberKeyNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupMembershipCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupMembershipUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupMembershipRoles(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(cloudidentityGroupMembershipRolesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "name": flattenCloudIdentityGroupMembershipRolesName(original["name"], d, config), + }) + } + return transformed +} +func flattenCloudIdentityGroupMembershipRolesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdentityGroupMembershipType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudIdentityGroupMembershipPreferredMemberKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandCloudIdentityGroupMembershipPreferredMemberKeyId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedNamespace, err := expandCloudIdentityGroupMembershipPreferredMemberKeyNamespace(original["namespace"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespace"] = transformedNamespace + } + + return transformed, nil +} + +func expandCloudIdentityGroupMembershipPreferredMemberKeyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdentityGroupMembershipPreferredMemberKeyNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdentityGroupMembershipRoles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudIdentityGroupMembershipRolesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudIdentityGroupMembershipRolesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceCloudIdentityGroupMembershipUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Return object for modifyMembershipRoles (we build request object from scratch, without using `obj`) + b, a := d.GetChange("roles") + before := b.(*schema.Set) + after := a.(*schema.Set) + // ref: https://cloud.google.com/identity/docs/reference/rest/v1/groups.memberships/modifyMembershipRoles#request-body + addRoles := after.Difference(before).List() + var removeRoles []string + for _, r := range before.Difference(after).List() { + removeRoles = append(removeRoles, r.(map[string]interface{})["name"].(string)) + } + req := map[string]interface{}{"addRoles": addRoles, "removeRoles": removeRoles} + return req, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group_sweeper.go new file mode 100644 index 0000000000..f25b5e6993 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudidentity/resource_cloud_identity_group_sweeper.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudidentity + +import ( + "context" + "fmt" + "log" + "net/url" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudIdentityGroup", testSweepCloudIdentityGroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudIdentityGroup(region string) error { + resourceName := "CloudIdentityGroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + custId := envvar.GetTestCustIdFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "parent": url.PathEscape(fmt.Sprintf("customers/%s", custId)), + }, + } + + listTemplate := "https://cloudidentity.googleapis.com/v1/groups?parent={{parent}}" + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["groups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["displayName"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := obj["name"].(string) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(obj["displayName"].(string)) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudidentity.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudids/cloud_ids_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudids/cloud_ids_operation.go new file mode 100644 index 0000000000..2217856ee9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudids/cloud_ids_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudids + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type CloudIdsOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *CloudIdsOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.CloudIdsBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createCloudIdsWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*CloudIdsOperationWaiter, error) { + w := &CloudIdsOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func CloudIdsOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createCloudIdsWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func CloudIdsOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createCloudIdsWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudids/resource_cloud_ids_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudids/resource_cloud_ids_endpoint.go new file mode 100644 index 0000000000..6b4b3a9e4b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudids/resource_cloud_ids_endpoint.go @@ -0,0 +1,501 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudids + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCloudIdsEndpoint() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudIdsEndpointCreate, + Read: resourceCloudIdsEndpointRead, + Update: resourceCloudIdsEndpointUpdate, + Delete: resourceCloudIdsEndpointDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudIdsEndpointImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the endpoint.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the endpoint in the format projects/{project_id}/locations/{locationId}/endpoints/{endpointId}.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the VPC network that is connected to the IDS endpoint. This can either contain the VPC network name itself (like "src-net") or the full URL to the network (like "projects/{project_id}/global/networks/src-net").`, + }, + "severity": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INFORMATIONAL", "LOW", "MEDIUM", "HIGH", "CRITICAL"}), + Description: `The minimum alert severity level that is reported by the endpoint. Possible values: ["INFORMATIONAL", "LOW", "MEDIUM", "HIGH", "CRITICAL"]`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of the endpoint.`, + }, + "threat_exceptions": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for threat IDs excluded from generating alerts. Limit: 99 IDs.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC 3339 text format.`, + }, + "endpoint_forwarding_rule": { + Type: schema.TypeString, + Computed: true, + Description: `URL of the endpoint's network address to which traffic is to be sent by Packet Mirroring.`, + }, + "endpoint_ip": { + Type: schema.TypeString, + Computed: true, + Description: `Internal IP address of the endpoint's network entry point.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Last update timestamp in RFC 3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudIdsEndpointCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandCloudIdsEndpointName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandCloudIdsEndpointNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + descriptionProp, err := expandCloudIdsEndpointDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + severityProp, err := expandCloudIdsEndpointSeverity(d.Get("severity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("severity"); !tpgresource.IsEmptyValue(reflect.ValueOf(severityProp)) && (ok || !reflect.DeepEqual(v, severityProp)) { + obj["severity"] = severityProp + } + threatExceptionsProp, err := expandCloudIdsEndpointThreatExceptions(d.Get("threat_exceptions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("threat_exceptions"); !tpgresource.IsEmptyValue(reflect.ValueOf(threatExceptionsProp)) && (ok || !reflect.DeepEqual(v, threatExceptionsProp)) { + obj["threatExceptions"] = threatExceptionsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdsBasePath}}projects/{{project}}/locations/{{location}}/endpoints?endpointId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Endpoint: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Endpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Endpoint: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = CloudIdsOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Endpoint", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Endpoint: %s", err) + } + + if err := d.Set("name", flattenCloudIdsEndpointName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Endpoint %q: %#v", d.Id(), res) + + return resourceCloudIdsEndpointRead(d, meta) +} + +func resourceCloudIdsEndpointRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdsBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Endpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudIdsEndpoint %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + + if err := d.Set("name", flattenCloudIdsEndpointName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("create_time", flattenCloudIdsEndpointCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("update_time", flattenCloudIdsEndpointUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("network", flattenCloudIdsEndpointNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("description", flattenCloudIdsEndpointDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("endpoint_forwarding_rule", flattenCloudIdsEndpointEndpointForwardingRule(res["endpointForwardingRule"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("endpoint_ip", flattenCloudIdsEndpointEndpointIp(res["endpointIp"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("severity", flattenCloudIdsEndpointSeverity(res["severity"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("threat_exceptions", flattenCloudIdsEndpointThreatExceptions(res["threatExceptions"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + + return nil +} + +func resourceCloudIdsEndpointUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Endpoint: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + threatExceptionsProp, err := expandCloudIdsEndpointThreatExceptions(d.Get("threat_exceptions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("threat_exceptions"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, threatExceptionsProp)) { + obj["threatExceptions"] = threatExceptionsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdsBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Endpoint %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("threat_exceptions") { + updateMask = append(updateMask, "threatExceptions") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Endpoint %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Endpoint %q: %#v", d.Id(), res) + } + + err = CloudIdsOperationWaitTime( + config, res, project, "Updating Endpoint", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCloudIdsEndpointRead(d, meta) +} + +func resourceCloudIdsEndpointDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Endpoint: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIdsBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Endpoint %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Endpoint") + } + + err = CloudIdsOperationWaitTime( + config, res, project, "Deleting Endpoint", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Endpoint %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudIdsEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/endpoints/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudIdsEndpointName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + parts := strings.Split(d.Get("name").(string), "/") + return parts[len(parts)-1] +} + +func flattenCloudIdsEndpointCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdsEndpointUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdsEndpointNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdsEndpointDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdsEndpointEndpointForwardingRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdsEndpointEndpointIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdsEndpointSeverity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIdsEndpointThreatExceptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudIdsEndpointName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") +} + +func expandCloudIdsEndpointNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdsEndpointDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdsEndpointSeverity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIdsEndpointThreatExceptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/iam_cloudiot_registry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/iam_cloudiot_registry.go new file mode 100644 index 0000000000..84b3210e7b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/iam_cloudiot_registry.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudiot + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var CloudIotDeviceRegistryIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type CloudIotDeviceRegistryIamUpdater struct { + project string + region string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func CloudIotDeviceRegistryIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudIotDeviceRegistryIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func CloudIotDeviceRegistryIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudIotDeviceRegistryIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyDeviceRegistryUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyDeviceRegistryUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) qualifyDeviceRegistryUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudIotBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/registries/%s", u.project, u.region, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/registries/%s", u.project, u.region, u.name) +} + +func (u *CloudIotDeviceRegistryIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudiot-deviceregistry-%s", u.GetResourceId()) +} + +func (u *CloudIotDeviceRegistryIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudiot deviceregistry %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_device.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_device.go new file mode 100644 index 0000000000..edd6a7cb6b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_device.go @@ -0,0 +1,959 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudiot + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCloudIotDevice() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudIotDeviceCreate, + Read: resourceCloudIotDeviceRead, + Update: resourceCloudIotDeviceUpdate, + Delete: resourceCloudIotDeviceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudIotDeviceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A unique name for the resource.`, + }, + "registry": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the device registry where this device should be created.`, + }, + "blocked": { + Type: schema.TypeBool, + Optional: true, + Description: `If a device is blocked, connections or requests from this device will fail.`, + }, + "credentials": { + Type: schema.TypeList, + Optional: true, + Description: `The credentials used to authenticate this device.`, + MaxItems: 3, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_key": { + Type: schema.TypeList, + Required: true, + Description: `A public key used to verify the signature of JSON Web Tokens (JWTs).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "format": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"RSA_PEM", "RSA_X509_PEM", "ES256_PEM", "ES256_X509_PEM"}), + Description: `The format of the key. Possible values: ["RSA_PEM", "RSA_X509_PEM", "ES256_PEM", "ES256_X509_PEM"]`, + }, + "key": { + Type: schema.TypeString, + Required: true, + Description: `The key data.`, + }, + }, + }, + }, + "expiration_time": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The time at which this credential becomes invalid.`, + }, + }, + }, + }, + "gateway_config": { + Type: schema.TypeList, + Optional: true, + Description: `Gateway-related configuration and state.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gateway_auth_method": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ASSOCIATION_ONLY", "DEVICE_AUTH_TOKEN_ONLY", "ASSOCIATION_AND_DEVICE_AUTH_TOKEN", ""}), + Description: `Indicates whether the device is a gateway. Possible values: ["ASSOCIATION_ONLY", "DEVICE_AUTH_TOKEN_ONLY", "ASSOCIATION_AND_DEVICE_AUTH_TOKEN"]`, + }, + "gateway_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"GATEWAY", "NON_GATEWAY", ""}), + Description: `Indicates whether the device is a gateway. Default value: "NON_GATEWAY" Possible values: ["GATEWAY", "NON_GATEWAY"]`, + Default: "NON_GATEWAY", + }, + "last_accessed_gateway_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID of the gateway the device accessed most recently.`, + }, + "last_accessed_gateway_time": { + Type: schema.TypeString, + Computed: true, + Description: `The most recent time at which the device accessed the gateway specified in last_accessed_gateway.`, + }, + }, + }, + }, + "log_level": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "ERROR", "INFO", "DEBUG", ""}), + Description: `The logging verbosity for device activity. Possible values: ["NONE", "ERROR", "INFO", "DEBUG"]`, + }, + "metadata": { + Type: schema.TypeMap, + Optional: true, + Description: `The metadata key-value pairs assigned to the device.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "config": { + Type: schema.TypeList, + Computed: true, + Description: `The most recent device configuration, which is eventually sent from Cloud IoT Core to the device.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "binary_data": { + Type: schema.TypeString, + Optional: true, + Description: `The device configuration data.`, + }, + "cloud_update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which this configuration version was updated in Cloud IoT Core.`, + }, + "device_ack_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which Cloud IoT Core received the acknowledgment from the device, +indicating that the device has received this configuration version.`, + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: `The version of this update.`, + }, + }, + }, + }, + "last_config_ack_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last time a cloud-to-device config version acknowledgment was received from the device.`, + }, + "last_config_send_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last time a cloud-to-device config version was sent to the device.`, + }, + "last_error_status": { + Type: schema.TypeList, + Computed: true, + Description: `The error message of the most recent error, such as a failure to publish to Cloud Pub/Sub.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeList, + Optional: true, + Description: `A list of messages that carry the error details.`, + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, + "message": { + Type: schema.TypeString, + Optional: true, + Description: `A developer-facing error message, which should be in English.`, + }, + "number": { + Type: schema.TypeInt, + Optional: true, + Description: `The status code, which should be an enum value of google.rpc.Code.`, + }, + }, + }, + }, + "last_error_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the most recent error occurred, such as a failure to publish to Cloud Pub/Sub.`, + }, + "last_event_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last time a telemetry event was received.`, + }, + "last_heartbeat_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last time an MQTT PINGREQ was received.`, + }, + "last_state_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last time a state event was received.`, + }, + "num_id": { + Type: schema.TypeString, + Computed: true, + Description: `A server-defined unique numeric ID for the device. +This is a more compact way to identify devices, and it is globally unique.`, + }, + "state": { + Type: schema.TypeList, + Computed: true, + Description: `The state most recently received from the device.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "binary_data": { + Type: schema.TypeString, + Optional: true, + Description: `The device state data.`, + }, + "update_time": { + Type: schema.TypeString, + Optional: true, + Description: `The time at which this state version was updated in Cloud IoT Core.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudIotDeviceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + idProp, err := expandCloudIotDeviceName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + credentialsProp, err := expandCloudIotDeviceCredentials(d.Get("credentials"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("credentials"); !tpgresource.IsEmptyValue(reflect.ValueOf(credentialsProp)) && (ok || !reflect.DeepEqual(v, credentialsProp)) { + obj["credentials"] = credentialsProp + } + blockedProp, err := expandCloudIotDeviceBlocked(d.Get("blocked"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("blocked"); !tpgresource.IsEmptyValue(reflect.ValueOf(blockedProp)) && (ok || !reflect.DeepEqual(v, blockedProp)) { + obj["blocked"] = blockedProp + } + logLevelProp, err := expandCloudIotDeviceLogLevel(d.Get("log_level"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("log_level"); !tpgresource.IsEmptyValue(reflect.ValueOf(logLevelProp)) && (ok || !reflect.DeepEqual(v, logLevelProp)) { + obj["logLevel"] = logLevelProp + } + metadataProp, err := expandCloudIotDeviceMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + gatewayConfigProp, err := expandCloudIotDeviceGatewayConfig(d.Get("gateway_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gateway_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(gatewayConfigProp)) && (ok || !reflect.DeepEqual(v, gatewayConfigProp)) { + obj["gatewayConfig"] = gatewayConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Device: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Device: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{registry}}/devices/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Device %q: %#v", d.Id(), res) + + return resourceCloudIotDeviceRead(d, meta) +} + +func resourceCloudIotDeviceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudIotDevice %q", d.Id())) + } + + if err := d.Set("name", flattenCloudIotDeviceName(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("num_id", flattenCloudIotDeviceNumId(res["numId"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("credentials", flattenCloudIotDeviceCredentials(res["credentials"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("last_heartbeat_time", flattenCloudIotDeviceLastHeartbeatTime(res["lastHeartbeatTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("last_event_time", flattenCloudIotDeviceLastEventTime(res["lastEventTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("last_state_time", flattenCloudIotDeviceLastStateTime(res["lastStateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("last_config_ack_time", flattenCloudIotDeviceLastConfigAckTime(res["lastConfigAckTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("last_config_send_time", flattenCloudIotDeviceLastConfigSendTime(res["lastConfigSendTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("blocked", flattenCloudIotDeviceBlocked(res["blocked"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("last_error_time", flattenCloudIotDeviceLastErrorTime(res["lastErrorTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("last_error_status", flattenCloudIotDeviceLastErrorStatus(res["lastErrorStatus"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("config", flattenCloudIotDeviceConfig(res["config"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("state", flattenCloudIotDeviceState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("log_level", flattenCloudIotDeviceLogLevel(res["logLevel"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("metadata", flattenCloudIotDeviceMetadata(res["metadata"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + if err := d.Set("gateway_config", flattenCloudIotDeviceGatewayConfig(res["gatewayConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Device: %s", err) + } + + return nil +} + +func resourceCloudIotDeviceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + credentialsProp, err := expandCloudIotDeviceCredentials(d.Get("credentials"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("credentials"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, credentialsProp)) { + obj["credentials"] = credentialsProp + } + blockedProp, err := expandCloudIotDeviceBlocked(d.Get("blocked"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("blocked"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, blockedProp)) { + obj["blocked"] = blockedProp + } + logLevelProp, err := expandCloudIotDeviceLogLevel(d.Get("log_level"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("log_level"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logLevelProp)) { + obj["logLevel"] = logLevelProp + } + metadataProp, err := expandCloudIotDeviceMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + gatewayConfigProp, err := expandCloudIotDeviceGatewayConfig(d.Get("gateway_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gateway_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gatewayConfigProp)) { + obj["gatewayConfig"] = gatewayConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Device %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("credentials") { + updateMask = append(updateMask, "credentials") + } + + if d.HasChange("blocked") { + updateMask = append(updateMask, "blocked") + } + + if d.HasChange("log_level") { + updateMask = append(updateMask, "logLevel") + } + + if d.HasChange("metadata") { + updateMask = append(updateMask, "metadata") + } + + if d.HasChange("gateway_config") { + updateMask = append(updateMask, "gateway_config.gateway_auth_method") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Device %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Device %q: %#v", d.Id(), res) + } + + return resourceCloudIotDeviceRead(d, meta) +} + +func resourceCloudIotDeviceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Device %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Device") + } + + log.Printf("[DEBUG] Finished deleting Device %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudIotDeviceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/devices/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{registry}}/devices/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudIotDeviceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceNumId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "expiration_time": flattenCloudIotDeviceCredentialsExpirationTime(original["expirationTime"], d, config), + "public_key": flattenCloudIotDeviceCredentialsPublicKey(original["publicKey"], d, config), + }) + } + return transformed +} +func flattenCloudIotDeviceCredentialsExpirationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceCredentialsPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["format"] = + flattenCloudIotDeviceCredentialsPublicKeyFormat(original["format"], d, config) + transformed["key"] = + flattenCloudIotDeviceCredentialsPublicKeyKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenCloudIotDeviceCredentialsPublicKeyFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceCredentialsPublicKeyKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLastHeartbeatTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLastEventTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLastStateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLastConfigAckTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLastConfigSendTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceBlocked(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLastErrorTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLastErrorStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["number"] = + flattenCloudIotDeviceLastErrorStatusNumber(original["number"], d, config) + transformed["message"] = + flattenCloudIotDeviceLastErrorStatusMessage(original["message"], d, config) + transformed["details"] = + flattenCloudIotDeviceLastErrorStatusDetails(original["details"], d, config) + return []interface{}{transformed} +} +func flattenCloudIotDeviceLastErrorStatusNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudIotDeviceLastErrorStatusMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLastErrorStatusDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["version"] = + flattenCloudIotDeviceConfigVersion(original["version"], d, config) + transformed["cloud_update_time"] = + flattenCloudIotDeviceConfigCloudUpdateTime(original["cloudUpdateTime"], d, config) + transformed["device_ack_time"] = + flattenCloudIotDeviceConfigDeviceAckTime(original["deviceAckTime"], d, config) + transformed["binary_data"] = + flattenCloudIotDeviceConfigBinaryData(original["binaryData"], d, config) + return []interface{}{transformed} +} +func flattenCloudIotDeviceConfigVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceConfigCloudUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceConfigDeviceAckTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceConfigBinaryData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["update_time"] = + flattenCloudIotDeviceStateUpdateTime(original["updateTime"], d, config) + transformed["binary_data"] = + flattenCloudIotDeviceStateBinaryData(original["binaryData"], d, config) + return []interface{}{transformed} +} +func flattenCloudIotDeviceStateUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceStateBinaryData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceLogLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceGatewayConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["gateway_type"] = + flattenCloudIotDeviceGatewayConfigGatewayType(original["gatewayType"], d, config) + transformed["gateway_auth_method"] = + flattenCloudIotDeviceGatewayConfigGatewayAuthMethod(original["gatewayAuthMethod"], d, config) + transformed["last_accessed_gateway_id"] = + flattenCloudIotDeviceGatewayConfigLastAccessedGatewayId(original["lastAccessedGatewayId"], d, config) + transformed["last_accessed_gateway_time"] = + flattenCloudIotDeviceGatewayConfigLastAccessedGatewayTime(original["lastAccessedGatewayTime"], d, config) + return []interface{}{transformed} +} +func flattenCloudIotDeviceGatewayConfigGatewayType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceGatewayConfigGatewayAuthMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceGatewayConfigLastAccessedGatewayId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceGatewayConfigLastAccessedGatewayTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudIotDeviceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpirationTime, err := expandCloudIotDeviceCredentialsExpirationTime(original["expiration_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpirationTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expirationTime"] = transformedExpirationTime + } + + transformedPublicKey, err := expandCloudIotDeviceCredentialsPublicKey(original["public_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicKey"] = transformedPublicKey + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudIotDeviceCredentialsExpirationTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceCredentialsPublicKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFormat, err := expandCloudIotDeviceCredentialsPublicKeyFormat(original["format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["format"] = transformedFormat + } + + transformedKey, err := expandCloudIotDeviceCredentialsPublicKeyKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandCloudIotDeviceCredentialsPublicKeyFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceCredentialsPublicKeyKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceBlocked(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceLogLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudIotDeviceGatewayConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGatewayType, err := expandCloudIotDeviceGatewayConfigGatewayType(original["gateway_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGatewayType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gatewayType"] = transformedGatewayType + } + + transformedGatewayAuthMethod, err := expandCloudIotDeviceGatewayConfigGatewayAuthMethod(original["gateway_auth_method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGatewayAuthMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gatewayAuthMethod"] = transformedGatewayAuthMethod + } + + transformedLastAccessedGatewayId, err := expandCloudIotDeviceGatewayConfigLastAccessedGatewayId(original["last_accessed_gateway_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLastAccessedGatewayId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lastAccessedGatewayId"] = transformedLastAccessedGatewayId + } + + transformedLastAccessedGatewayTime, err := expandCloudIotDeviceGatewayConfigLastAccessedGatewayTime(original["last_accessed_gateway_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLastAccessedGatewayTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lastAccessedGatewayTime"] = transformedLastAccessedGatewayTime + } + + return transformed, nil +} + +func expandCloudIotDeviceGatewayConfigGatewayType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceGatewayConfigGatewayAuthMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceGatewayConfigLastAccessedGatewayId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceGatewayConfigLastAccessedGatewayTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_device_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_device_sweeper.go new file mode 100644 index 0000000000..21c95cd94f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_device_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudiot + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudIotDevice", testSweepCloudIotDevice) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudIotDevice(region string) error { + resourceName := "CloudIotDevice" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudiot.googleapis.com/v1/{{registry}}/devices", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["devices"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudiot.googleapis.com/v1/{{registry}}/devices/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_registry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_registry.go new file mode 100644 index 0000000000..0e2879ba08 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_registry.go @@ -0,0 +1,880 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudiot + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func expandCloudIotDeviceRegistryHTTPConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHTTPEnabledState, err := expandCloudIotDeviceRegistryHTTPEnabledState(original["http_enabled_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHTTPEnabledState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpEnabledState"] = transformedHTTPEnabledState + } + + return transformed, nil +} + +func expandCloudIotDeviceRegistryHTTPEnabledState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceRegistryMqttConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMqttEnabledState, err := expandCloudIotDeviceRegistryMqttEnabledState(original["mqtt_enabled_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMqttEnabledState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mqttEnabledState"] = transformedMqttEnabledState + } + + return transformed, nil +} + +func expandCloudIotDeviceRegistryMqttEnabledState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceRegistryStateNotificationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubTopicName, err := expandCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(original["pubsub_topic_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubTopicName"] = transformedPubsubTopicName + } + + return transformed, nil +} + +func expandCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceRegistryCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicKeyCertificate, err := expandCloudIotDeviceRegistryCredentialsPublicKeyCertificate(original["public_key_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicKeyCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicKeyCertificate"] = transformedPublicKeyCertificate + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandCloudIotDeviceRegistryCredentialsPublicKeyCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFormat, err := expandCloudIotDeviceRegistryPublicKeyCertificateFormat(original["format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["format"] = transformedFormat + } + + transformedCertificate, err := expandCloudIotDeviceRegistryPublicKeyCertificateCertificate(original["certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificate"] = transformedCertificate + } + + return transformed, nil +} + +func expandCloudIotDeviceRegistryPublicKeyCertificateFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceRegistryPublicKeyCertificateCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenCloudIotDeviceRegistryCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + log.Printf("[DEBUG] Flattening device resitry credentials: %q", d.Id()) + if v == nil { + log.Printf("[DEBUG] The credentials array is nil: %q", d.Id()) + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + log.Printf("[DEBUG] Original credential: %+v", original) + if len(original) < 1 { + log.Printf("[DEBUG] Excluding empty credential that the API returned. %q", d.Id()) + continue + } + log.Printf("[DEBUG] Credentials array before appending a new credential: %+v", transformed) + transformed = append(transformed, map[string]interface{}{ + "public_key_certificate": flattenCloudIotDeviceRegistryCredentialsPublicKeyCertificate(original["publicKeyCertificate"], d, config), + }) + log.Printf("[DEBUG] Credentials array after appending a new credential: %+v", transformed) + } + return transformed +} + +func flattenCloudIotDeviceRegistryCredentialsPublicKeyCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + log.Printf("[DEBUG] Flattening device resitry credentials public key certificate: %q", d.Id()) + if v == nil { + log.Printf("[DEBUG] The public key certificate is nil: %q", d.Id()) + return v + } + + original := v.(map[string]interface{}) + log.Printf("[DEBUG] Original public key certificate: %+v", original) + transformed := make(map[string]interface{}) + + transformedPublicKeyCertificateFormat := flattenCloudIotDeviceRegistryPublicKeyCertificateFormat(original["format"], d, config) + transformed["format"] = transformedPublicKeyCertificateFormat + + transformedPublicKeyCertificateCertificate := flattenCloudIotDeviceRegistryPublicKeyCertificateCertificate(original["certificate"], d, config) + transformed["certificate"] = transformedPublicKeyCertificateCertificate + + log.Printf("[DEBUG] Transformed public key certificate: %+v", transformed) + + return transformed +} + +func flattenCloudIotDeviceRegistryPublicKeyCertificateFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceRegistryPublicKeyCertificateCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceRegistryHTTPConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHTTPEnabledState := flattenCloudIotDeviceRegistryHTTPConfigHTTPEnabledState(original["httpEnabledState"], d, config) + transformed["http_enabled_state"] = transformedHTTPEnabledState + + return transformed +} + +func flattenCloudIotDeviceRegistryHTTPConfigHTTPEnabledState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceRegistryMqttConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMqttEnabledState := flattenCloudIotDeviceRegistryMqttConfigMqttEnabledState(original["mqttEnabledState"], d, config) + transformed["mqtt_enabled_state"] = transformedMqttEnabledState + + return transformed +} + +func flattenCloudIotDeviceRegistryMqttConfigMqttEnabledState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceRegistryStateNotificationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + log.Printf("[DEBUG] Flattening state notification config: %+v", v) + if v == nil { + return v + } + + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubTopicName := flattenCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(original["pubsubTopicName"], d, config) + if val := reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + log.Printf("[DEBUG] pubsub topic name is not null: %v", d.Get("pubsub_topic_name")) + transformed["pubsub_topic_name"] = transformedPubsubTopicName + } + + return transformed +} + +func flattenCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func ValidateCloudIotDeviceRegistryID(v interface{}, k string) (warnings []string, errors []error) { + value := v.(string) + if strings.HasPrefix(value, "goog") { + errors = append(errors, fmt.Errorf( + "%q (%q) can not start with \"goog\"", k, value)) + } + if !regexp.MustCompile(verify.CloudIoTIdRegex).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, verify.CloudIoTIdRegex)) + } + return +} + +func validateCloudIotDeviceRegistrySubfolderMatch(v interface{}, k string) (warnings []string, errors []error) { + value := v.(string) + if strings.HasPrefix(value, "/") { + errors = append(errors, fmt.Errorf( + "%q (%q) can not start with '/'", k, value)) + } + return +} + +func ResourceCloudIotDeviceRegistry() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudIotDeviceRegistryCreate, + Read: resourceCloudIotDeviceRegistryRead, + Update: resourceCloudIotDeviceRegistryUpdate, + Delete: resourceCloudIotDeviceRegistryDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudIotDeviceRegistryImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateCloudIotDeviceRegistryID, + Description: `A unique name for the resource, required by device registry.`, + }, + "event_notification_configs": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `List of configurations for event notifications, such as PubSub topics +to publish device events to.`, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_topic_name": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `PubSub topic name to publish device events.`, + }, + "subfolder_matches": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateCloudIotDeviceRegistrySubfolderMatch, + Description: `If the subfolder name matches this string exactly, this +configuration will be used. The string must not include the +leading '/' character. If empty, all strings are matched. Empty +value can only be used for the last 'event_notification_configs' +item.`, + }, + }, + }, + }, + "log_level": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "ERROR", "INFO", "DEBUG", ""}), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("NONE"), + Description: `The default logging verbosity for activity from devices in this +registry. Specifies which events should be written to logs. For +example, if the LogLevel is ERROR, only events that terminate in +errors will be logged. LogLevel is inclusive; enabling INFO logging +will also enable ERROR logging. Default value: "NONE" Possible values: ["NONE", "ERROR", "INFO", "DEBUG"]`, + Default: "NONE", + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The region in which the created registry should reside. +If it is not provided, the provider region is used.`, + }, + "state_notification_config": { + Type: schema.TypeMap, + Description: `A PubSub topic to publish device state updates.`, + Optional: true, + }, + "mqtt_config": { + Type: schema.TypeMap, + Description: `Activate or deactivate MQTT.`, + Computed: true, + Optional: true, + }, + "http_config": { + Type: schema.TypeMap, + Description: `Activate or deactivate HTTP.`, + Computed: true, + Optional: true, + }, + "credentials": { + Type: schema.TypeList, + Description: `List of public key certificates to authenticate devices.`, + Optional: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_key_certificate": { + Type: schema.TypeMap, + Description: `A public key certificate format and data.`, + Required: true, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudIotDeviceRegistryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + idProp, err := expandCloudIotDeviceRegistryName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + eventNotificationConfigsProp, err := expandCloudIotDeviceRegistryEventNotificationConfigs(d.Get("event_notification_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("event_notification_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(eventNotificationConfigsProp)) && (ok || !reflect.DeepEqual(v, eventNotificationConfigsProp)) { + obj["eventNotificationConfigs"] = eventNotificationConfigsProp + } + logLevelProp, err := expandCloudIotDeviceRegistryLogLevel(d.Get("log_level"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("log_level"); !tpgresource.IsEmptyValue(reflect.ValueOf(logLevelProp)) && (ok || !reflect.DeepEqual(v, logLevelProp)) { + obj["logLevel"] = logLevelProp + } + + obj, err = resourceCloudIotDeviceRegistryEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DeviceRegistry: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DeviceRegistry: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/registries/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DeviceRegistry %q: %#v", d.Id(), res) + + return resourceCloudIotDeviceRegistryRead(d, meta) +} + +func resourceCloudIotDeviceRegistryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudIotDeviceRegistry %q", d.Id())) + } + + res, err = resourceCloudIotDeviceRegistryDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing CloudIotDeviceRegistry because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + + if err := d.Set("name", flattenCloudIotDeviceRegistryName(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + if err := d.Set("event_notification_configs", flattenCloudIotDeviceRegistryEventNotificationConfigs(res["eventNotificationConfigs"], d, config)); err != nil { + return fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + if err := d.Set("log_level", flattenCloudIotDeviceRegistryLogLevel(res["logLevel"], d, config)); err != nil { + return fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + + return nil +} + +func resourceCloudIotDeviceRegistryUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + eventNotificationConfigsProp, err := expandCloudIotDeviceRegistryEventNotificationConfigs(d.Get("event_notification_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("event_notification_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eventNotificationConfigsProp)) { + obj["eventNotificationConfigs"] = eventNotificationConfigsProp + } + logLevelProp, err := expandCloudIotDeviceRegistryLogLevel(d.Get("log_level"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("log_level"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logLevelProp)) { + obj["logLevel"] = logLevelProp + } + + obj, err = resourceCloudIotDeviceRegistryEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DeviceRegistry %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("event_notification_configs") { + updateMask = append(updateMask, "eventNotificationConfigs") + } + + if d.HasChange("log_level") { + updateMask = append(updateMask, "logLevel") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + log.Printf("[DEBUG] updateMask before adding extra schema entries %q: %v", d.Id(), updateMask) + + log.Printf("[DEBUG] Pre-update on state notification config: %q", d.Id()) + if d.HasChange("state_notification_config") { + log.Printf("[DEBUG] %q stateNotificationConfig.pubsubTopicName has a change. Adding it to the update mask", d.Id()) + updateMask = append(updateMask, "stateNotificationConfig.pubsubTopicName") + } + + log.Printf("[DEBUG] Pre-update on MQTT config: %q", d.Id()) + if d.HasChange("mqtt_config") { + log.Printf("[DEBUG] %q mqttConfig.mqttEnabledState has a change. Adding it to the update mask", d.Id()) + updateMask = append(updateMask, "mqttConfig.mqttEnabledState") + } + + log.Printf("[DEBUG] Pre-update on HTTP config: %q", d.Id()) + if d.HasChange("http_config") { + log.Printf("[DEBUG] %q httpConfig.httpEnabledState has a change. Adding it to the update mask", d.Id()) + updateMask = append(updateMask, "httpConfig.httpEnabledState") + } + + log.Printf("[DEBUG] Pre-update on credentials: %q", d.Id()) + if d.HasChange("credentials") { + log.Printf("[DEBUG] %q credentials has a change. Adding it to the update mask", d.Id()) + updateMask = append(updateMask, "credentials") + } + + log.Printf("[DEBUG] updateMask after adding extra schema entries %q: %v", d.Id(), updateMask) + + // Refreshing updateMask after adding extra schema entries + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + log.Printf("[DEBUG] Update URL %q: %v", d.Id(), url) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DeviceRegistry %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DeviceRegistry %q: %#v", d.Id(), res) + } + + return resourceCloudIotDeviceRegistryRead(d, meta) +} + +func resourceCloudIotDeviceRegistryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DeviceRegistry %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DeviceRegistry") + } + + log.Printf("[DEBUG] Finished deleting DeviceRegistry %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudIotDeviceRegistryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/registries/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudIotDeviceRegistryName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceRegistryEventNotificationConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "subfolder_matches": flattenCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(original["subfolderMatches"], d, config), + "pubsub_topic_name": flattenCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(original["pubsubTopicName"], d, config), + }) + } + return transformed +} +func flattenCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudIotDeviceRegistryLogLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudIotDeviceRegistryName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceRegistryEventNotificationConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSubfolderMatches, err := expandCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(original["subfolder_matches"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubfolderMatches); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subfolderMatches"] = transformedSubfolderMatches + } + + transformedPubsubTopicName, err := expandCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(original["pubsub_topic_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubTopicName"] = transformedPubsubTopicName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudIotDeviceRegistryLogLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceCloudIotDeviceRegistryEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + log.Printf("[DEBUG] Resource data before encoding extra schema entries %q: %#v", d.Id(), obj) + + log.Printf("[DEBUG] Encoding state notification config: %q", d.Id()) + stateNotificationConfigProp, err := expandCloudIotDeviceRegistryStateNotificationConfig(d.Get("state_notification_config"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("state_notification_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(stateNotificationConfigProp)) && (ok || !reflect.DeepEqual(v, stateNotificationConfigProp)) { + log.Printf("[DEBUG] Encoding %q. Setting stateNotificationConfig: %#v", d.Id(), stateNotificationConfigProp) + obj["stateNotificationConfig"] = stateNotificationConfigProp + } + + log.Printf("[DEBUG] Encoding HTTP config: %q", d.Id()) + httpConfigProp, err := expandCloudIotDeviceRegistryHTTPConfig(d.Get("http_config"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("http_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpConfigProp)) && (ok || !reflect.DeepEqual(v, httpConfigProp)) { + log.Printf("[DEBUG] Encoding %q. Setting httpConfig: %#v", d.Id(), httpConfigProp) + obj["httpConfig"] = httpConfigProp + } + + log.Printf("[DEBUG] Encoding MQTT config: %q", d.Id()) + mqttConfigProp, err := expandCloudIotDeviceRegistryMqttConfig(d.Get("mqtt_config"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("mqtt_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(mqttConfigProp)) && (ok || !reflect.DeepEqual(v, mqttConfigProp)) { + log.Printf("[DEBUG] Encoding %q. Setting mqttConfig: %#v", d.Id(), mqttConfigProp) + obj["mqttConfig"] = mqttConfigProp + } + + log.Printf("[DEBUG] Encoding credentials: %q", d.Id()) + credentialsProp, err := expandCloudIotDeviceRegistryCredentials(d.Get("credentials"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("credentials"); !tpgresource.IsEmptyValue(reflect.ValueOf(credentialsProp)) && (ok || !reflect.DeepEqual(v, credentialsProp)) { + log.Printf("[DEBUG] Encoding %q. Setting credentials: %#v", d.Id(), credentialsProp) + obj["credentials"] = credentialsProp + } + + log.Printf("[DEBUG] Resource data after encoding extra schema entries %q: %#v", d.Id(), obj) + + return obj, nil +} + +func resourceCloudIotDeviceRegistryDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + log.Printf("[DEBUG] Decoding state notification config: %q", d.Id()) + log.Printf("[DEBUG] State notification config before decoding: %v", d.Get("state_notification_config")) + if err := d.Set("state_notification_config", flattenCloudIotDeviceRegistryStateNotificationConfig(res["stateNotificationConfig"], d, config)); err != nil { + return nil, fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + log.Printf("[DEBUG] State notification config after decoding: %v", d.Get("state_notification_config")) + + log.Printf("[DEBUG] Decoding HTTP config: %q", d.Id()) + log.Printf("[DEBUG] HTTP config before decoding: %v", d.Get("http_config")) + if err := d.Set("http_config", flattenCloudIotDeviceRegistryHTTPConfig(res["httpConfig"], d, config)); err != nil { + return nil, fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + log.Printf("[DEBUG] HTTP config after decoding: %v", d.Get("http_config")) + + log.Printf("[DEBUG] Decoding MQTT config: %q", d.Id()) + log.Printf("[DEBUG] MQTT config before decoding: %v", d.Get("mqtt_config")) + if err := d.Set("mqtt_config", flattenCloudIotDeviceRegistryMqttConfig(res["mqttConfig"], d, config)); err != nil { + return nil, fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + log.Printf("[DEBUG] MQTT config after decoding: %v", d.Get("mqtt_config")) + + log.Printf("[DEBUG] Decoding credentials: %q", d.Id()) + log.Printf("[DEBUG] credentials before decoding: %v", d.Get("credentials")) + if err := d.Set("credentials", flattenCloudIotDeviceRegistryCredentials(res["credentials"], d, config)); err != nil { + return nil, fmt.Errorf("Error reading DeviceRegistry: %s", err) + } + log.Printf("[DEBUG] credentials after decoding: %v", d.Get("credentials")) + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_registry_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_registry_sweeper.go new file mode 100644 index 0000000000..161314e0e6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudiot/resource_cloudiot_registry_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudiot + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudIotDeviceRegistry", testSweepCloudIotDeviceRegistry) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudIotDeviceRegistry(region string) error { + resourceName := "CloudIotDeviceRegistry" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudiot.googleapis.com/v1/projects/{{project}}/locations/{{region}}/registries", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["deviceRegistries"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudiot.googleapis.com/v1/projects/{{project}}/locations/{{region}}/registries/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/cloudrun_polling.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/cloudrun_polling.go new file mode 100644 index 0000000000..e25f50eaf9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/cloudrun_polling.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudrun + +import ( + "fmt" + "log" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +const readyStatusType string = "Ready" +const pendingCertificateReason string = "CertificatePending" + +type Condition struct { + Type string + Status string + Reason string + Message string +} + +// KnativeStatus is a struct that can contain a Knative style resource's Status block. It is not +// intended to be used for anything other than polling for the success of the given resource. +type KnativeStatus struct { + Metadata struct { + Name string + Namespace string + SelfLink string + } + Status struct { + Conditions []Condition + ObservedGeneration float64 + } +} + +func getGeneration(res map[string]interface{}) (int, error) { + metadata, ok := res["metadata"] + if !ok { + return 0, fmt.Errorf("Unable to find knative metadata") + } + m, ok := metadata.(map[string]interface{}) + if !ok { + return 0, fmt.Errorf("Unable to find generation in knative metadata") + } + gen, ok := m["generation"] + if !ok { + return 0, fmt.Errorf("Unable to find generation in knative metadata") + } + return int(gen.(float64)), nil +} + +func PollCheckKnativeStatusFunc(knativeRestResponse map[string]interface{}) func(resp map[string]interface{}, respErr error) transport_tpg.PollResult { + return func(resp map[string]interface{}, respErr error) transport_tpg.PollResult { + if respErr != nil { + return transport_tpg.ErrorPollResult(respErr) + } + s := KnativeStatus{} + if err := tpgresource.Convert(resp, &s); err != nil { + return transport_tpg.ErrorPollResult(errwrap.Wrapf("unable to get KnativeStatus: {{err}}", err)) + } + + gen, err := getGeneration(knativeRestResponse) + if err != nil { + return transport_tpg.ErrorPollResult(errwrap.Wrapf("unable to find Knative generation: {{err}}", err)) + } + if int(s.Status.ObservedGeneration) < gen { + return transport_tpg.PendingStatusPollResult("waiting for observed generation to match") + } + for _, condition := range s.Status.Conditions { + if condition.Type == readyStatusType { + log.Printf("[DEBUG] checking KnativeStatus Ready condition %s: %s", condition.Status, condition.Message) + switch condition.Status { + case "True": + // Resource is ready + return transport_tpg.SuccessPollResult() + case "Unknown": + // DomainMapping can enter a 'terminal' state where "Ready" status is "Unknown" + // but the resource is waiting for external verification of DNS records. + if condition.Reason == pendingCertificateReason { + return transport_tpg.SuccessPollResult() + } + return transport_tpg.PendingStatusPollResult(fmt.Sprintf("%s:%s", condition.Status, condition.Message)) + case "False": + return transport_tpg.ErrorPollResult(fmt.Errorf(`resource is in failed state "Ready:False", message: %s`, condition.Message)) + } + } + } + return transport_tpg.PendingStatusPollResult("no status yet") + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/data_source_cloud_run_locations.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/data_source_cloud_run_locations.go new file mode 100644 index 0000000000..b35e3304fd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/data_source_cloud_run_locations.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudrun + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleCloudRunLocations() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleCloudRunLocationsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "locations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleCloudRunLocationsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "https://run.googleapis.com/v1/projects/{{project}}/locations") + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error listing Cloud Run Locations : %s", err) + } + + locationsRaw := flattenCloudRunLocations(res) + + locations := make([]string, len(locationsRaw)) + for i, loc := range locationsRaw { + locations[i] = loc.(string) + } + sort.Strings(locations) + + log.Printf("[DEBUG] Received Google Cloud Run Locations: %q", locations) + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("locations", locations); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + + d.SetId(fmt.Sprintf("projects/%s", project)) + + return nil +} + +func flattenCloudRunLocations(resp map[string]interface{}) []interface{} { + regionList := resp["locations"].([]interface{}) + regions := make([]interface{}, len(regionList)) + for i, v := range regionList { + regionObj := v.(map[string]interface{}) + regions[i] = regionObj["locationId"] + } + return regions +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/data_source_cloud_run_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/data_source_cloud_run_service.go new file mode 100644 index 0000000000..9f89acf3b4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/data_source_cloud_run_service.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudrun + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleCloudRunService() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCloudRunService().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name", "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleCloudRunServiceRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleCloudRunServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceCloudRunServiceRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/iam_cloud_run_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/iam_cloud_run_service.go new file mode 100644 index 0000000000..a6ff513c58 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/iam_cloud_run_service.go @@ -0,0 +1,247 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrun + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var CloudRunServiceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type CloudRunServiceIamUpdater struct { + project string + location string + service string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func CloudRunServiceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("service"); ok { + values["service"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudRunServiceIamUpdater{ + project: values["project"], + location: values["location"], + service: values["service"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("service", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting service: %s", err) + } + + return u, nil +} + +func CloudRunServiceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudRunServiceIamUpdater{ + project: values["project"], + location: values["location"], + service: values["service"], + d: d, + Config: config, + } + if err := d.Set("service", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting service: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudRunServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyServiceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudRunServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyServiceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudRunServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudRunBasePath}}%s:%s", fmt.Sprintf("v1/projects/%s/locations/%s/services/%s", u.project, u.location, u.service), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudRunServiceIamUpdater) GetResourceId() string { + return fmt.Sprintf("v1/projects/%s/locations/%s/services/%s", u.project, u.location, u.service) +} + +func (u *CloudRunServiceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudrun-service-%s", u.GetResourceId()) +} + +func (u *CloudRunServiceIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudrun service %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping.go new file mode 100644 index 0000000000..2ee55348a3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping.go @@ -0,0 +1,917 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrun + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +var domainMappingGoogleProvidedLabels = []string{ + "cloud.googleapis.com/location", + "run.googleapis.com/overrideAt", +} + +func DomainMappingLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the labels provided by Google + for _, label := range domainMappingGoogleProvidedLabels { + if strings.Contains(k, label) && new == "" { + return true + } + } + + // Let diff be determined by labels (above) + if strings.Contains(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func ResourceCloudRunDomainMapping() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudRunDomainMappingCreate, + Read: resourceCloudRunDomainMappingRead, + Delete: resourceCloudRunDomainMappingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudRunDomainMappingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the cloud run instance. eg us-central1`, + }, + "metadata": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Metadata associated with this DomainMapping.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespace": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `In Cloud Run the namespace must be equal to either the +project ID or project number.`, + }, + "annotations": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: cloudrunAnnotationDiffSuppress, + Description: `Annotations is a key value map stored with a resource that +may be set by external tools to store and retrieve arbitrary metadata. More +info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + +**Note**: The Cloud Run API may add additional annotations that were not provided in your config. +If terraform plan shows a diff where a server-side annotation is added, you can add it to your config +or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: DomainMappingLabelDiffSuppress, + Description: `Map of string keys and values that can be used to organize and categorize +(scope and select) objects. May match selectors of replication controllers +and routes. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `A sequence number representing a specific generation of the desired state.`, + }, + "resource_version": { + Type: schema.TypeString, + Computed: true, + Description: `An opaque value that represents the internal version of this object that +can be used by clients to determine when objects have changed. May be used +for optimistic concurrency, change detection, and the watch operation on a +resource or set of resources. They may only be valid for a +particular resource or set of resources. + +More info: +https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `SelfLink is a URL representing this object.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `UID is a unique id generated by the server on successful creation of a resource and is not +allowed to change on PUT operations. + +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name should be a [verified](https://support.google.com/webmasters/answer/9008080) domain`, + }, + "spec": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The spec for this DomainMapping.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "route_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Cloud Run Service that this DomainMapping applies to. +The route must exist.`, + }, + "certificate_mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "AUTOMATIC", ""}), + Description: `The mode of the certificate. Default value: "AUTOMATIC" Possible values: ["NONE", "AUTOMATIC"]`, + Default: "AUTOMATIC", + }, + "force_override": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If set, the mapping will override any mapping set before this spec was set. +It is recommended that the user leaves this empty to receive an error +warning about a potential conflict and only set it once the respective UI +has given such a warning.`, + }, + }, + }, + }, + "status": { + Type: schema.TypeList, + Computed: true, + Description: `The current status of the DomainMapping.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_records": { + Type: schema.TypeList, + Optional: true, + Description: `The resource records required to configure this domain mapping. These +records must be added to the domain's DNS configuration in order to +serve the application via this domain mapping.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"A", "AAAA", "CNAME", ""}), + Description: `Resource record type. Example: 'AAAA'. Possible values: ["A", "AAAA", "CNAME"]`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Relative name of the object affected by this record. Only applicable for +'CNAME' records. Example: 'www'.`, + }, + "rrdata": { + Type: schema.TypeString, + Computed: true, + Description: `Data for this record. Values vary by record type, as defined in RFC 1035 +(section 5) and RFC 1034 (section 3.6.1).`, + }, + }, + }, + }, + "conditions": { + Type: schema.TypeList, + Computed: true, + Description: `Array of observed DomainMappingConditions, indicating the current state +of the DomainMapping.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `One-word CamelCase reason for the condition's current status.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `Status of the condition, one of True, False, Unknown.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Type of domain mapping condition.`, + }, + }, + }, + }, + "mapped_route_name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the route that the mapping currently points to.`, + }, + "observed_generation": { + Type: schema.TypeInt, + Computed: true, + Description: `ObservedGeneration is the 'Generation' of the DomainMapping that +was last processed by the controller.`, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudRunDomainMappingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + specProp, err := expandCloudRunDomainMappingSpec(d.Get("spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(specProp)) && (ok || !reflect.DeepEqual(v, specProp)) { + obj["spec"] = specProp + } + metadataProp, err := expandCloudRunDomainMappingMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + + obj, err = resourceCloudRunDomainMappingEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DomainMapping: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainMapping: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return fmt.Errorf("Error creating DomainMapping: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/domainmappings/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = transport_tpg.PollingWaitTime(resourceCloudRunDomainMappingPollRead(d, meta), PollCheckKnativeStatusFunc(res), "Creating DomainMapping", d.Timeout(schema.TimeoutCreate), 1) + if err != nil { + return fmt.Errorf("Error waiting to create DomainMapping: %s", err) + } + + log.Printf("[DEBUG] Finished creating DomainMapping %q: %#v", d.Id(), res) + + return resourceCloudRunDomainMappingRead(d, meta) +} + +func resourceCloudRunDomainMappingPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for DomainMapping: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return res, err + } + res, err = resourceCloudRunDomainMappingDecoder(d, meta, res) + if err != nil { + return nil, err + } + if res == nil { + return nil, tpgresource.Fake404("decoded", "CloudRunDomainMapping") + } + + return res, nil + } +} + +func resourceCloudRunDomainMappingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainMapping: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudRunDomainMapping %q", d.Id())) + } + + res, err = resourceCloudRunDomainMappingDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing CloudRunDomainMapping because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + + if err := d.Set("status", flattenCloudRunDomainMappingStatus(res["status"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + if err := d.Set("spec", flattenCloudRunDomainMappingSpec(res["spec"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + if err := d.Set("metadata", flattenCloudRunDomainMappingMetadata(res["metadata"], d, config)); err != nil { + return fmt.Errorf("Error reading DomainMapping: %s", err) + } + + return nil +} + +func resourceCloudRunDomainMappingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DomainMapping: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DomainMapping %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DomainMapping") + } + + log.Printf("[DEBUG] Finished deleting DomainMapping %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudRunDomainMappingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "locations/(?P[^/]+)/namespaces/(?P[^/]+)/domainmappings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/domainmappings/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudRunDomainMappingStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["conditions"] = + flattenCloudRunDomainMappingStatusConditions(original["conditions"], d, config) + transformed["observed_generation"] = + flattenCloudRunDomainMappingStatusObservedGeneration(original["observedGeneration"], d, config) + transformed["resource_records"] = + flattenCloudRunDomainMappingStatusResourceRecords(original["resourceRecords"], d, config) + transformed["mapped_route_name"] = + flattenCloudRunDomainMappingStatusMappedRouteName(original["mappedRouteName"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunDomainMappingStatusConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "message": flattenCloudRunDomainMappingStatusConditionsMessage(original["message"], d, config), + "status": flattenCloudRunDomainMappingStatusConditionsStatus(original["status"], d, config), + "reason": flattenCloudRunDomainMappingStatusConditionsReason(original["reason"], d, config), + "type": flattenCloudRunDomainMappingStatusConditionsType(original["type"], d, config), + }) + } + return transformed +} +func flattenCloudRunDomainMappingStatusConditionsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingStatusConditionsStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingStatusConditionsReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingStatusConditionsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingStatusObservedGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunDomainMappingStatusResourceRecords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "type": flattenCloudRunDomainMappingStatusResourceRecordsType(original["type"], d, config), + "rrdata": flattenCloudRunDomainMappingStatusResourceRecordsRrdata(original["rrdata"], d, config), + "name": flattenCloudRunDomainMappingStatusResourceRecordsName(original["name"], d, config), + }) + } + return transformed +} +func flattenCloudRunDomainMappingStatusResourceRecordsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingStatusResourceRecordsRrdata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingStatusResourceRecordsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingStatusMappedRouteName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["force_override"] = + flattenCloudRunDomainMappingSpecForceOverride(original["forceOverride"], d, config) + transformed["route_name"] = + flattenCloudRunDomainMappingSpecRouteName(original["routeName"], d, config) + transformed["certificate_mode"] = + flattenCloudRunDomainMappingSpecCertificateMode(original["certificateMode"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunDomainMappingSpecForceOverride(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // We want to ignore read on this field, but cannot because it is nested + return d.Get("spec.0.force_override") +} + +func flattenCloudRunDomainMappingSpecRouteName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingSpecCertificateMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["labels"] = + flattenCloudRunDomainMappingMetadataLabels(original["labels"], d, config) + transformed["generation"] = + flattenCloudRunDomainMappingMetadataGeneration(original["generation"], d, config) + transformed["resource_version"] = + flattenCloudRunDomainMappingMetadataResourceVersion(original["resourceVersion"], d, config) + transformed["self_link"] = + flattenCloudRunDomainMappingMetadataSelfLink(original["selfLink"], d, config) + transformed["uid"] = + flattenCloudRunDomainMappingMetadataUid(original["uid"], d, config) + transformed["namespace"] = + flattenCloudRunDomainMappingMetadataNamespace(original["namespace"], d, config) + transformed["annotations"] = + flattenCloudRunDomainMappingMetadataAnnotations(original["annotations"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunDomainMappingMetadataLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingMetadataGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunDomainMappingMetadataResourceVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingMetadataSelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingMetadataUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunDomainMappingMetadataNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("project") +} + +func flattenCloudRunDomainMappingMetadataAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudRunDomainMappingSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedForceOverride, err := expandCloudRunDomainMappingSpecForceOverride(original["force_override"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedForceOverride); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["forceOverride"] = transformedForceOverride + } + + transformedRouteName, err := expandCloudRunDomainMappingSpecRouteName(original["route_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRouteName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["routeName"] = transformedRouteName + } + + transformedCertificateMode, err := expandCloudRunDomainMappingSpecCertificateMode(original["certificate_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCertificateMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["certificateMode"] = transformedCertificateMode + } + + return transformed, nil +} + +func expandCloudRunDomainMappingSpecForceOverride(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunDomainMappingSpecRouteName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} + +func expandCloudRunDomainMappingSpecCertificateMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunDomainMappingMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLabels, err := expandCloudRunDomainMappingMetadataLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedGeneration, err := expandCloudRunDomainMappingMetadataGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + transformedResourceVersion, err := expandCloudRunDomainMappingMetadataResourceVersion(original["resource_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceVersion"] = transformedResourceVersion + } + + transformedSelfLink, err := expandCloudRunDomainMappingMetadataSelfLink(original["self_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSelfLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["selfLink"] = transformedSelfLink + } + + transformedUid, err := expandCloudRunDomainMappingMetadataUid(original["uid"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUid); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uid"] = transformedUid + } + + transformedNamespace, err := expandCloudRunDomainMappingMetadataNamespace(original["namespace"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespace"] = transformedNamespace + } + + transformedAnnotations, err := expandCloudRunDomainMappingMetadataAnnotations(original["annotations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["annotations"] = transformedAnnotations + } + + return transformed, nil +} + +func expandCloudRunDomainMappingMetadataLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunDomainMappingMetadataGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunDomainMappingMetadataResourceVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunDomainMappingMetadataSelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunDomainMappingMetadataUid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunDomainMappingMetadataNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunDomainMappingMetadataAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func resourceCloudRunDomainMappingEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + name := d.Get("name").(string) + metadata := obj["metadata"].(map[string]interface{}) + metadata["name"] = name + + // The only acceptable version/kind right now + obj["apiVersion"] = "domains.cloudrun.com/v1" + obj["kind"] = "DomainMapping" + return obj, nil +} + +func resourceCloudRunDomainMappingDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // metadata is not present if the API returns an error + if obj, ok := res["metadata"]; ok { + if meta, ok := obj.(map[string]interface{}); ok { + res["name"] = meta["name"] + } else { + return nil, fmt.Errorf("Unable to decode 'metadata' block from API response.") + } + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping_sweeper.go new file mode 100644 index 0000000000..64f0b917c5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_domain_mapping_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrun + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudRunDomainMapping", testSweepCloudRunDomainMapping) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudRunDomainMapping(region string) error { + resourceName := "CloudRunDomainMapping" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{location}}-run.googleapis.com/apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["domainMappings"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{location}}-run.googleapis.com/apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service.go new file mode 100644 index 0000000000..3634135383 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service.go @@ -0,0 +1,4135 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrun + +import ( + "context" + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" +) + +func revisionNameCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + autogen := diff.Get("autogenerate_revision_name").(bool) + if autogen && diff.HasChange("template.0.metadata.0.name") { + return fmt.Errorf("google_cloud_run_service: `template.metadata.name` cannot be set while `autogenerate_revision_name` is true. Please remove the field or set `autogenerate_revision_name` to false.") + } + + return nil +} + +var cloudRunGoogleProvidedAnnotations = regexp.MustCompile(`serving\.knative\.dev/(?:(?:creator)|(?:lastModifier))$|run\.googleapis\.com/(?:(?:ingress-status)|(?:operation-id))$|cloud\.googleapis\.com/(?:(?:location))`) + +func cloudrunAnnotationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the annotations provided by Google + if cloudRunGoogleProvidedAnnotations.MatchString(k) && new == "" { + return true + } + + if strings.HasSuffix(k, "run.googleapis.com/ingress") { + return old == "all" && new == "" + } + + // Let diff be determined by annotations (above) + if strings.Contains(k, "annotations.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +var cloudRunGoogleProvidedTemplateAnnotations = regexp.MustCompile(`template\.0\.metadata\.0\.annotations\.run\.googleapis\.com/sandbox`) +var cloudRunGoogleProvidedTemplateAnnotations_autoscaling_maxscale = regexp.MustCompile(`template\.0\.metadata\.0\.annotations\.autoscaling\.knative\.dev/maxScale`) + +func cloudrunTemplateAnnotationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the annotations provided by API + if cloudRunGoogleProvidedTemplateAnnotations.MatchString(k) && + old == "gvisor" && new == "" { + return true + } + + if cloudRunGoogleProvidedTemplateAnnotations_autoscaling_maxscale.MatchString(k) && new == "" { + return true + } + + // For other keys, don't suppress diff. + return false +} + +var cloudRunGoogleProvidedLabels = regexp.MustCompile(`cloud\.googleapis\.com/(?:(?:location))`) + +func cloudrunLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the labels provided by Google + if cloudRunGoogleProvidedLabels.MatchString(k) && new == "" { + return true + } + + // Let diff be determined by labels (above) + if strings.Contains(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func ResourceCloudRunService() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudRunServiceCreate, + Read: resourceCloudRunServiceRead, + Update: resourceCloudRunServiceUpdate, + Delete: resourceCloudRunServiceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudRunServiceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 1, + CustomizeDiff: customdiff.All( + revisionNameCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the cloud run instance. eg us-central1`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name must be unique within a Google Cloud project and region. +Is required when creating resources. Name is primarily intended +for creation idempotence and configuration definition. Cannot be updated. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names`, + }, + "template": { + Type: schema.TypeList, + Optional: true, + Description: `template holds the latest specification for the Revision to +be stamped out. The template references the container image, and may also +include labels and annotations that should be attached to the Revision. +To correlate a Revision, and/or to force a Revision to be created when the +spec doesn't otherwise change, a nonce label may be provided in the +template metadata. For more details, see: +https://github.com/knative/serving/blob/main/docs/client-conventions.md#associate-modifications-with-revisions + +Cloud Run does not currently support referencing a build that is +responsible for materializing the container image from source.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "spec": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `RevisionSpec holds the desired state of the Revision (from the client).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "containers": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Containers defines the unit of execution for this Revision.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image": { + Type: schema.TypeString, + Required: true, + Description: `Docker image name. This is most often a reference to a container located +in the container registry, such as gcr.io/cloudrun/hello`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `Arguments to the entrypoint. +The docker image's CMD is used if this is not provided.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "command": { + Type: schema.TypeList, + Optional: true, + Description: `Entrypoint array. Not executed within a shell. +The docker image's ENTRYPOINT is used if this is not provided.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "env": { + Type: schema.TypeSet, + Optional: true, + Description: `List of environment variables to set in the container.`, + Elem: cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema(), + // Default schema.HashSchema is used. + }, + "env_from": { + Type: schema.TypeList, + Optional: true, + Deprecated: "Not supported by Cloud Run fully managed", + ForceNew: true, + Description: `List of sources to populate environment variables in the container. +All invalid keys will be reported as an event when the container is starting. +When a key exists in multiple sources, the value associated with the last source will +take precedence. Values defined by an Env with a duplicate key will take +precedence.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_map_ref": { + Type: schema.TypeList, + Optional: true, + Description: `The ConfigMap to select from.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_object_reference": { + Type: schema.TypeList, + Optional: true, + Description: `The ConfigMap to select from.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the referent.`, + }, + }, + }, + }, + "optional": { + Type: schema.TypeBool, + Optional: true, + Description: `Specify whether the ConfigMap must be defined`, + }, + }, + }, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: `An optional identifier to prepend to each key in the ConfigMap.`, + }, + "secret_ref": { + Type: schema.TypeList, + Optional: true, + Description: `The Secret to select from.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_object_reference": { + Type: schema.TypeList, + Optional: true, + Description: `The Secret to select from.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the referent.`, + }, + }, + }, + }, + "optional": { + Type: schema.TypeBool, + Optional: true, + Description: `Specify whether the Secret must be defined`, + }, + }, + }, + }, + }, + }, + }, + "liveness_probe": { + Type: schema.TypeList, + Optional: true, + Description: `Periodic probe of container liveness. Container will be restarted if the probe fails.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "failure_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum consecutive failures for the probe to be considered failed after +having succeeded. Defaults to 3. Minimum value is 1.`, + Default: 3, + }, + "grpc": { + Type: schema.TypeList, + Optional: true, + Description: `GRPC specifies an action involving a GRPC port.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + "service": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the service to place in the gRPC HealthCheckRequest +(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). +If this is not specified, the default behavior is defined by gRPC.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "http_get": { + Type: schema.TypeList, + Optional: true, + Description: `HttpGet specifies the http request to perform.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Custom headers to set in the request. HTTP allows repeated headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The header field value.`, + Default: "", + }, + }, + }, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to access on the HTTP server. If set, it should not be empty string.`, + Default: "/", + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "initial_delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after the container has started before the probe is +initiated. +Defaults to 0 seconds. Minimum value is 0. Maximum value is 3600.`, + Default: 0, + }, + "period_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to perform the probe. +Default to 10 seconds. Minimum value is 1. Maximum value is 3600.`, + Default: 10, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. Maximum value is 3600. +Must be smaller than period_seconds.`, + Default: 1, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Name of the container`, + }, + "ports": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `List of open ports in the container.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_port": { + Type: schema.TypeInt, + Optional: true, + Description: `Port number the container listens on. This must be a valid port number (between 1 and 65535). Defaults to "8080".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `If specified, used to specify which protocol to use. Allowed values are "http1" (HTTP/1) and "h2c" (HTTP/2 end-to-end). Defaults to "http1".`, + }, + "protocol": { + Type: schema.TypeString, + Optional: true, + Description: `Protocol for port. Must be "TCP". Defaults to "TCP".`, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Compute Resources required by this container. Used to set values such as max memory`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Limits describes the maximum amount of compute resources allowed. +The values of the map is string form of the 'quantity' k8s type: +https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "requests": { + Type: schema.TypeMap, + Optional: true, + Description: `Requests describes the minimum amount of compute resources required. +If Requests is omitted for a container, it defaults to Limits if that is +explicitly specified, otherwise to an implementation-defined value. +The values of the map is string form of the 'quantity' k8s type: +https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "startup_probe": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Startup probe of application within the container. +All other probes are disabled if a startup probe is provided, until it +succeeds. Container will not be added to service endpoints if the probe fails.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "failure_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum consecutive failures for the probe to be considered failed after +having succeeded. Defaults to 3. Minimum value is 1.`, + Default: 3, + }, + "grpc": { + Type: schema.TypeList, + Optional: true, + Description: `GRPC specifies an action involving a GRPC port.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + "service": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the service to place in the gRPC HealthCheckRequest +(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). +If this is not specified, the default behavior is defined by gRPC.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "http_get": { + Type: schema.TypeList, + Optional: true, + Description: `HttpGet specifies the http request to perform.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Custom headers to set in the request. HTTP allows repeated headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The header field value.`, + Default: "", + }, + }, + }, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to access on the HTTP server. If set, it should not be empty string.`, + Default: "/", + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "initial_delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after the container has started before the probe is +initiated. +Defaults to 0 seconds. Minimum value is 0. Maximum value is 240.`, + Default: 0, + }, + "period_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to perform the probe. +Default to 10 seconds. Minimum value is 1. Maximum value is 240.`, + Default: 10, + }, + "tcp_socket": { + Type: schema.TypeList, + Optional: true, + Description: `TcpSocket specifies an action involving a TCP port.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. Maximum value is 3600. +Must be smaller than periodSeconds.`, + Default: 1, + }, + }, + }, + }, + "volume_mounts": { + Type: schema.TypeList, + Optional: true, + Description: `Volume to mount into the container's filesystem. +Only supports SecretVolumeSources.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_path": { + Type: schema.TypeString, + Required: true, + Description: `Path within the container at which the volume should be mounted. Must +not contain ':'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `This must match the Name of a Volume.`, + }, + }, + }, + }, + "working_dir": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Not supported by Cloud Run fully managed", + ForceNew: true, + Description: `Container's working directory. +If not specified, the container runtime's default will be used, which +might be configured in the container image.`, + }, + }, + }, + }, + "container_concurrency": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `ContainerConcurrency specifies the maximum allowed in-flight (concurrent) +requests per container of the Revision. Values are: +- '0' thread-safe, the system should manage the max concurrency. This is + the default value. +- '1' not-thread-safe. Single concurrency +- '2-N' thread-safe, max concurrency of N`, + }, + "service_account_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Email address of the IAM service account associated with the revision of the +service. The service account represents the identity of the running revision, +and determines what permissions the revision has. If not provided, the revision +will use the project's default service account.`, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `TimeoutSeconds holds the max duration the instance is allowed for responding to a request.`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `Volume represents a named volume in a container.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Volume's name.`, + }, + "secret": { + Type: schema.TypeList, + Optional: true, + Description: `The secret's value will be presented as the content of a file whose +name is defined in the item path. If no items are defined, the name of +the file is the secret_name.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. By default, the secret +is assumed to be in the same project. +If the secret is in another project, you must define an alias. +An alias definition has the form: +{alias}:projects/{project-id|project-number}/secrets/{secret-name}. +If multiple alias definitions are needed, they must be separated by +commas. +The alias definitions must be set on the run.googleapis.com/secrets +annotation.`, + }, + "default_mode": { + Type: schema.TypeInt, + Optional: true, + Description: `Mode bits to use on created files by default. Must be a value between 0000 +and 0777. Defaults to 0644. Directories within the path are not affected by +this setting. This might be in conflict with other options that affect the +file mode, like fsGroup, and the result can be other mode bits set.`, + }, + "items": { + Type: schema.TypeList, + Optional: true, + Description: `If unspecified, the volume will expose a file whose name is the +secret_name. +If specified, the key will be used as the version to fetch from Cloud +Secret Manager and the path will be the name of the file exposed in the +volume. When items are defined, they must specify a key and a path.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Secret Manager secret version. +Can be 'latest' for the latest value or an integer for a specific version.`, + }, + "path": { + Type: schema.TypeString, + Required: true, + Description: `The relative path of the file to map the key to. +May not be an absolute path. +May not contain the path element '..'. +May not start with the string '..'.`, + }, + "mode": { + Type: schema.TypeInt, + Optional: true, + Description: `Mode bits to use on this file, must be a value between 0000 and 0777. If +not specified, the volume defaultMode will be used. This might be in +conflict with other options that affect the file mode, like fsGroup, and +the result can be other mode bits set.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "serving_state": { + Type: schema.TypeString, + Computed: true, + Deprecated: "Not supported by Cloud Run fully managed", + Description: `ServingState holds a value describing the state the resources +are in for this Revision. +It is expected +that the system will manipulate this based on routability and load.`, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Optional metadata for this Revision, including labels and annotations. +Name will be generated by the Configuration. To set minimum instances +for this revision, use the "autoscaling.knative.dev/minScale" annotation +key. To set maximum instances for this revision, use the +"autoscaling.knative.dev/maxScale" annotation key. To set Cloud SQL +connections for the revision, use the "run.googleapis.com/cloudsql-instances" +annotation key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "annotations": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: cloudrunTemplateAnnotationDiffSuppress, + Description: `Annotations is a key value map stored with a resource that +may be set by external tools to store and retrieve arbitrary metadata. More +info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + +**Note**: The Cloud Run API may add additional annotations that were not provided in your config. +If terraform plan shows a diff where a server-side annotation is added, you can add it to your config +or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. + +Annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted. Use the following annotation +keys to configure features on a Revision template: + +- 'autoscaling.knative.dev/maxScale' sets the [maximum number of container + instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--max-instances) of the Revision to run. +- 'autoscaling.knative.dev/minScale' sets the [minimum number of container + instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--min-instances) of the Revision to run. +- 'run.googleapis.com/client-name' sets the client name calling the Cloud Run API. +- 'run.googleapis.com/cloudsql-instances' sets the [Cloud SQL + instances](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--add-cloudsql-instances) the Revision connects to. +- 'run.googleapis.com/cpu-throttling' sets whether to throttle the CPU when the container is not actively serving + requests. See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--[no-]cpu-throttling. +- 'run.googleapis.com/encryption-key-shutdown-hours' sets the number of hours to wait before an automatic shutdown + server after CMEK key revocation is detected. +- 'run.googleapis.com/encryption-key' sets the [CMEK key](https://cloud.google.com/run/docs/securing/using-cmek) + reference to encrypt the container with. +- 'run.googleapis.com/execution-environment' sets the [execution + environment](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--execution-environment) + where the application will run. +- 'run.googleapis.com/post-key-revocation-action-type' sets the + [action type](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--post-key-revocation-action-type) + after CMEK key revocation. +- 'run.googleapis.com/secrets' sets a list of key-value pairs to set as + [secrets](https://cloud.google.com/run/docs/configuring/secrets#yaml). +- 'run.googleapis.com/sessionAffinity' sets whether to enable + [session affinity](https://cloud.google.com/sdk/gcloud/reference/beta/run/deploy#--[no-]session-affinity) + for connections to the Revision. +- 'run.googleapis.com/startup-cpu-boost' sets whether to allocate extra CPU to containers on startup. + See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--[no-]cpu-boost. +- 'run.googleapis.com/vpc-access-connector' sets a [VPC connector](https://cloud.google.com/run/docs/configuring/connecting-vpc#terraform_1) + for the Revision. +- 'run.googleapis.com/vpc-access-egress' sets the outbound traffic to send through the VPC connector for this resource. + See https://cloud.google.com/sdk/gcloud/reference/run/deploy#--vpc-egress.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Map of string keys and values that can be used to organize and categorize +(scope and select) objects.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Name must be unique within a Google Cloud project and region. +Is required when creating resources. Name is primarily intended +for creation idempotence and configuration definition. Cannot be updated.`, + }, + "namespace": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `In Cloud Run the namespace must be equal to either the +project ID or project number. It will default to the resource's project.`, + }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `A sequence number representing a specific generation of the desired state.`, + }, + "resource_version": { + Type: schema.TypeString, + Computed: true, + Description: `An opaque value that represents the internal version of this object that +can be used by clients to determine when objects have changed. May be used +for optimistic concurrency, change detection, and the watch operation on a +resource or set of resources. They may only be valid for a +particular resource or set of resources.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `SelfLink is a URL representing this object.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `UID is a unique id generated by the server on successful creation of a resource and is not +allowed to change on PUT operations.`, + }, + }, + }, + }, + }, + }, + }, + "traffic": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Traffic specifies how to distribute traffic over a collection of Knative Revisions +and Configurations`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percent": { + Type: schema.TypeInt, + Required: true, + Description: `Percent specifies percent of the traffic to this Revision or Configuration.`, + }, + "latest_revision": { + Type: schema.TypeBool, + Optional: true, + Description: `LatestRevision may be optionally provided to indicate that the latest ready +Revision of the Configuration should be used for this traffic target. When +provided LatestRevision must be true if RevisionName is empty; it must be +false when RevisionName is non-empty.`, + }, + "revision_name": { + Type: schema.TypeString, + Optional: true, + Description: `RevisionName of a specific revision to which to send this portion of traffic.`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `Tag is optionally used to expose a dedicated url for referencing this target exclusively.`, + }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `URL displays the URL for accessing tagged traffic targets. URL is displayed in status, +and is disallowed on spec. URL must contain a scheme (e.g. http://) and a hostname, +but may not contain anything else (e.g. basic auth, url path, etc.)`, + }, + }, + }, + }, + + "metadata": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Metadata associated with this Service, including name, namespace, labels, +and annotations.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "annotations": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: cloudrunAnnotationDiffSuppress, + Description: `Annotations is a key value map stored with a resource that +may be set by external tools to store and retrieve arbitrary metadata. More +info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + +**Note**: The Cloud Run API may add additional annotations that were not provided in your config. +If terraform plan shows a diff where a server-side annotation is added, you can add it to your config +or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. + +Annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted. Use the following annotation +keys to configure features on a Service: + +- 'run.googleapis.com/binary-authorization-breakglass' sets the [Binary Authorization breakglass](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--breakglass). +- 'run.googleapis.com/binary-authorization' sets the [Binary Authorization](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--binary-authorization). +- 'run.googleapis.com/client-name' sets the client name calling the Cloud Run API. +- 'run.googleapis.com/custom-audiences' sets the [custom audiences](https://cloud.google.com/sdk/gcloud/reference/alpha/run/deploy#--add-custom-audiences) + that can be used in the audience field of ID token for authenticated requests. +- 'run.googleapis.com/description' sets a user defined description for the Service. +- 'run.googleapis.com/ingress' sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress) + for the Service. For example, '"run.googleapis.com/ingress" = "all"'. +- 'run.googleapis.com/launch-stage' sets the [launch stage](https://cloud.google.com/run/docs/troubleshooting#launch-stage-validation) + when a preview feature is used. For example, '"run.googleapis.com/launch-stage": "BETA"'`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: cloudrunLabelDiffSuppress, + Description: `Map of string keys and values that can be used to organize and categorize +(scope and select) objects. May match selectors of replication controllers +and routes.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "namespace": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `In Cloud Run the namespace must be equal to either the +project ID or project number.`, + }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `A sequence number representing a specific generation of the desired state.`, + }, + "resource_version": { + Type: schema.TypeString, + Computed: true, + Description: `An opaque value that represents the internal version of this object that +can be used by clients to determine when objects have changed. May be used +for optimistic concurrency, change detection, and the watch operation on a +resource or set of resources. They may only be valid for a +particular resource or set of resources.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `SelfLink is a URL representing this object.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `UID is a unique id generated by the server on successful creation of a resource and is not +allowed to change on PUT operations.`, + }, + }, + }, + }, + "status": { + Type: schema.TypeList, + Computed: true, + Description: `The current status of the Service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conditions": { + Type: schema.TypeList, + Computed: true, + Description: `Array of observed Service Conditions, indicating the current ready state of the service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `One-word CamelCase reason for the condition's current status.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `Status of the condition, one of True, False, Unknown.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Type of domain mapping condition.`, + }, + }, + }, + }, + "latest_created_revision_name": { + Type: schema.TypeString, + Computed: true, + Description: `From ConfigurationStatus. LatestCreatedRevisionName is the last revision that was created +from this Service's Configuration. It might not be ready yet, for that use +LatestReadyRevisionName.`, + }, + "latest_ready_revision_name": { + Type: schema.TypeString, + Computed: true, + Description: `From ConfigurationStatus. LatestReadyRevisionName holds the name of the latest Revision +stamped out from this Service's Configuration that has had its "Ready" condition become +"True".`, + }, + "observed_generation": { + Type: schema.TypeInt, + Computed: true, + Description: `ObservedGeneration is the 'Generation' of the Route that was last processed by the +controller. + +Clients polling for completed reconciliation should poll until observedGeneration = +metadata.generation and the Ready condition's status is True or False.`, + }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `From RouteStatus. URL holds the url that will distribute traffic over the provided traffic +targets. It generally has the form +https://{route-hash}-{project-hash}-{cluster-level-suffix}.a.run.app`, + }, + }, + }, + }, + "autogenerate_revision_name": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to 'true', the revision name (template.metadata.name) will be omitted and +autogenerated by Cloud Run. This cannot be set to 'true' while 'template.metadata.name' +is also set. +(For legacy support, if 'template.metadata.name' is unset in state while +this field is set to false, the revision name will still autogenerate.)`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the environment variable.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Defaults to "".`, + }, + "value_from": { + Type: schema.TypeList, + Optional: true, + Description: `Source for the environment variable's value. Only supports secret_key_ref.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_key_ref": { + Type: schema.TypeList, + Required: true, + Description: `Selects a key (version) of a secret in Secret Manager.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A Cloud Secret Manager secret version. Must be 'latest' for the latest +version or an integer for a specific version.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. By default, the secret is assumed to be in the same project. +If the secret is in another project, you must define an alias. +An alias definition has the form: :projects/{project-id|project-number}/secrets/. +If multiple alias definitions are needed, they must be separated by commas. +The alias definitions must be set on the run.googleapis.com/secrets annotation.`, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceCloudRunServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + specProp, err := expandCloudRunServiceSpec(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(specProp)) { + obj["spec"] = specProp + } + metadataProp, err := expandCloudRunServiceMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + + obj, err = resourceCloudRunServiceEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Service: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return fmt.Errorf("Error creating Service: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = transport_tpg.PollingWaitTime(resourceCloudRunServicePollRead(d, meta), PollCheckKnativeStatusFunc(res), "Creating Service", d.Timeout(schema.TimeoutCreate), 1) + if err != nil { + return fmt.Errorf("Error waiting to create Service: %s", err) + } + + log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) + + return resourceCloudRunServiceRead(d, meta) +} + +func resourceCloudRunServicePollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return res, err + } + res, err = resourceCloudRunServiceDecoder(d, meta, res) + if err != nil { + return nil, err + } + if res == nil { + return nil, tpgresource.Fake404("decoded", "CloudRunService") + } + + return res, nil + } +} + +func resourceCloudRunServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudRunService %q", d.Id())) + } + + res, err = resourceCloudRunServiceDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing CloudRunService because it no longer exists.") + d.SetId("") + return nil + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("autogenerate_revision_name"); !ok { + if err := d.Set("autogenerate_revision_name", false); err != nil { + return fmt.Errorf("Error setting autogenerate_revision_name: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + // Terraform must set the top level schema field, but since this object contains collapsed properties + // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. + if flattenedProp := flattenCloudRunServiceSpec(res["spec"], d, config); flattenedProp != nil { + if gerr, ok := flattenedProp.(*googleapi.Error); ok { + return fmt.Errorf("Error reading Service: %s", gerr) + } + casted := flattenedProp.([]interface{})[0] + if casted != nil { + for k, v := range casted.(map[string]interface{}) { + if err := d.Set(k, v); err != nil { + return fmt.Errorf("Error setting %s: %s", k, err) + } + } + } + } + if err := d.Set("status", flattenCloudRunServiceStatus(res["status"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("metadata", flattenCloudRunServiceMetadata(res["metadata"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + return nil +} + +func resourceCloudRunServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + specProp, err := expandCloudRunServiceSpec(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(specProp)) { + obj["spec"] = specProp + } + metadataProp, err := expandCloudRunServiceMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + + obj, err = resourceCloudRunServiceEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + + if err != nil { + return fmt.Errorf("Error updating Service %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) + } + + err = transport_tpg.PollingWaitTime(resourceCloudRunServicePollRead(d, meta), PollCheckKnativeStatusFunc(res), "Updating Service", d.Timeout(schema.TimeoutUpdate), 1) + if err != nil { + return err + } + + return resourceCloudRunServiceRead(d, meta) +} + +func resourceCloudRunServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Service %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCloudRunCreationConflict}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Service") + } + + log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudRunServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "locations/(?P[^/]+)/namespaces/(?P[^/]+)/services/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("autogenerate_revision_name", false); err != nil { + return nil, fmt.Errorf("Error setting autogenerate_revision_name: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudRunServiceSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["traffic"] = + flattenCloudRunServiceSpecTraffic(original["traffic"], d, config) + transformed["template"] = + flattenCloudRunServiceSpecTemplate(original["template"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTraffic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "revision_name": flattenCloudRunServiceSpecTrafficRevisionName(original["revisionName"], d, config), + "percent": flattenCloudRunServiceSpecTrafficPercent(original["percent"], d, config), + "tag": flattenCloudRunServiceSpecTrafficTag(original["tag"], d, config), + "latest_revision": flattenCloudRunServiceSpecTrafficLatestRevision(original["latestRevision"], d, config), + "url": flattenCloudRunServiceSpecTrafficUrl(original["url"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTrafficRevisionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTrafficPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTrafficTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTrafficLatestRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTrafficUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["metadata"] = + flattenCloudRunServiceSpecTemplateMetadata(original["metadata"], d, config) + transformed["spec"] = + flattenCloudRunServiceSpecTemplateSpec(original["spec"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["labels"] = + flattenCloudRunServiceSpecTemplateMetadataLabels(original["labels"], d, config) + transformed["generation"] = + flattenCloudRunServiceSpecTemplateMetadataGeneration(original["generation"], d, config) + transformed["resource_version"] = + flattenCloudRunServiceSpecTemplateMetadataResourceVersion(original["resourceVersion"], d, config) + transformed["self_link"] = + flattenCloudRunServiceSpecTemplateMetadataSelfLink(original["selfLink"], d, config) + transformed["uid"] = + flattenCloudRunServiceSpecTemplateMetadataUid(original["uid"], d, config) + transformed["namespace"] = + flattenCloudRunServiceSpecTemplateMetadataNamespace(original["namespace"], d, config) + transformed["annotations"] = + flattenCloudRunServiceSpecTemplateMetadataAnnotations(original["annotations"], d, config) + transformed["name"] = + flattenCloudRunServiceSpecTemplateMetadataName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateMetadataLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateMetadataGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateMetadataResourceVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateMetadataSelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateMetadataUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateMetadataNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateMetadataAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateMetadataName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["containers"] = + flattenCloudRunServiceSpecTemplateSpecContainers(original["containers"], d, config) + transformed["container_concurrency"] = + flattenCloudRunServiceSpecTemplateSpecContainerConcurrency(original["containerConcurrency"], d, config) + transformed["timeout_seconds"] = + flattenCloudRunServiceSpecTemplateSpecTimeoutSeconds(original["timeoutSeconds"], d, config) + transformed["service_account_name"] = + flattenCloudRunServiceSpecTemplateSpecServiceAccountName(original["serviceAccountName"], d, config) + transformed["volumes"] = + flattenCloudRunServiceSpecTemplateSpecVolumes(original["volumes"], d, config) + transformed["serving_state"] = + flattenCloudRunServiceSpecTemplateSpecServingState(original["servingState"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunServiceSpecTemplateSpecContainersName(original["name"], d, config), + "working_dir": flattenCloudRunServiceSpecTemplateSpecContainersWorkingDir(original["workingDir"], d, config), + "args": flattenCloudRunServiceSpecTemplateSpecContainersArgs(original["args"], d, config), + "env_from": flattenCloudRunServiceSpecTemplateSpecContainersEnvFrom(original["envFrom"], d, config), + "image": flattenCloudRunServiceSpecTemplateSpecContainersImage(original["image"], d, config), + "command": flattenCloudRunServiceSpecTemplateSpecContainersCommand(original["command"], d, config), + "env": flattenCloudRunServiceSpecTemplateSpecContainersEnv(original["env"], d, config), + "ports": flattenCloudRunServiceSpecTemplateSpecContainersPorts(original["ports"], d, config), + "resources": flattenCloudRunServiceSpecTemplateSpecContainersResources(original["resources"], d, config), + "volume_mounts": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMounts(original["volumeMounts"], d, config), + "startup_probe": flattenCloudRunServiceSpecTemplateSpecContainersStartupProbe(original["startupProbe"], d, config), + "liveness_probe": flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbe(original["livenessProbe"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecContainersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersWorkingDir(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "prefix": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(original["prefix"], d, config), + "config_map_ref": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(original["configMapRef"], d, config), + "secret_ref": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(original["secretRef"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["optional"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(original["optional"], d, config) + transformed["local_object_reference"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(original["localObjectReference"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["local_object_reference"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(original["localObjectReference"], d, config) + transformed["optional"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(original["optional"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersCommand(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "name": flattenCloudRunServiceSpecTemplateSpecContainersEnvName(original["name"], d, config), + "value": flattenCloudRunServiceSpecTemplateSpecContainersEnvValue(original["value"], d, config), + "value_from": flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(original["valueFrom"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecContainersEnvName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnvValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret_key_ref"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(original["secretKeyRef"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(original["key"], d, config) + transformed["name"] = + flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunServiceSpecTemplateSpecContainersPortsName(original["name"], d, config), + "protocol": flattenCloudRunServiceSpecTemplateSpecContainersPortsProtocol(original["protocol"], d, config), + "container_port": flattenCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(original["containerPort"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecContainersPortsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersPortsProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["limits"] = + flattenCloudRunServiceSpecTemplateSpecContainersResourcesLimits(original["limits"], d, config) + transformed["requests"] = + flattenCloudRunServiceSpecTemplateSpecContainersResourcesRequests(original["requests"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersResourcesLimits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersResourcesRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMounts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "mount_path": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(original["mountPath"], d, config), + "name": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(original["name"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbe(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["initial_delay_seconds"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeInitialDelaySeconds(original["initialDelaySeconds"], d, config) + transformed["timeout_seconds"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeTimeoutSeconds(original["timeoutSeconds"], d, config) + transformed["period_seconds"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbePeriodSeconds(original["periodSeconds"], d, config) + transformed["failure_threshold"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeFailureThreshold(original["failureThreshold"], d, config) + transformed["tcp_socket"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeTcpSocket(original["tcpSocket"], d, config) + transformed["http_get"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGet(original["httpGet"], d, config) + transformed["grpc"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpc(original["grpc"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeTcpSocket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeTcpSocketPort(original["port"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["path"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetPath(original["path"], d, config) + transformed["port"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetPort(original["port"], d, config) + transformed["http_headers"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config), + "value": flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeadersValue(original["value"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpcPort(original["port"], d, config) + transformed["service"] = + flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpcService(original["service"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpcPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpcService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbe(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["initial_delay_seconds"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeInitialDelaySeconds(original["initialDelaySeconds"], d, config) + transformed["timeout_seconds"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeTimeoutSeconds(original["timeoutSeconds"], d, config) + transformed["period_seconds"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbePeriodSeconds(original["periodSeconds"], d, config) + transformed["failure_threshold"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeFailureThreshold(original["failureThreshold"], d, config) + transformed["http_get"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGet(original["httpGet"], d, config) + transformed["grpc"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpc(original["grpc"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["path"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetPath(original["path"], d, config) + transformed["port"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetPort(original["port"], d, config) + transformed["http_headers"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config), + "value": flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeadersValue(original["value"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpcPort(original["port"], d, config) + transformed["service"] = + flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpcService(original["service"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpcPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpcService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecContainerConcurrency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecServiceAccountName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunServiceSpecTemplateSpecVolumesName(original["name"], d, config), + "secret": flattenCloudRunServiceSpecTemplateSpecVolumesSecret(original["secret"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecVolumesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret_name"] = + flattenCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(original["secretName"], d, config) + transformed["default_mode"] = + flattenCloudRunServiceSpecTemplateSpecVolumesSecretDefaultMode(original["defaultMode"], d, config) + transformed["items"] = + flattenCloudRunServiceSpecTemplateSpecVolumesSecretItems(original["items"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesSecretDefaultMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItems(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "key": flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(original["key"], d, config), + "path": flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(original["path"], d, config), + "mode": flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(original["mode"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceSpecTemplateSpecServingState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["conditions"] = + flattenCloudRunServiceStatusConditions(original["conditions"], d, config) + transformed["url"] = + flattenCloudRunServiceStatusUrl(original["url"], d, config) + transformed["observed_generation"] = + flattenCloudRunServiceStatusObservedGeneration(original["observedGeneration"], d, config) + transformed["latest_created_revision_name"] = + flattenCloudRunServiceStatusLatestCreatedRevisionName(original["latestCreatedRevisionName"], d, config) + transformed["latest_ready_revision_name"] = + flattenCloudRunServiceStatusLatestReadyRevisionName(original["latestReadyRevisionName"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceStatusConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "message": flattenCloudRunServiceStatusConditionsMessage(original["message"], d, config), + "status": flattenCloudRunServiceStatusConditionsStatus(original["status"], d, config), + "reason": flattenCloudRunServiceStatusConditionsReason(original["reason"], d, config), + "type": flattenCloudRunServiceStatusConditionsType(original["type"], d, config), + }) + } + return transformed +} +func flattenCloudRunServiceStatusConditionsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceStatusConditionsStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceStatusConditionsReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceStatusConditionsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceStatusUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceStatusObservedGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceStatusLatestCreatedRevisionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceStatusLatestReadyRevisionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["labels"] = + flattenCloudRunServiceMetadataLabels(original["labels"], d, config) + transformed["generation"] = + flattenCloudRunServiceMetadataGeneration(original["generation"], d, config) + transformed["resource_version"] = + flattenCloudRunServiceMetadataResourceVersion(original["resourceVersion"], d, config) + transformed["self_link"] = + flattenCloudRunServiceMetadataSelfLink(original["selfLink"], d, config) + transformed["uid"] = + flattenCloudRunServiceMetadataUid(original["uid"], d, config) + transformed["namespace"] = + flattenCloudRunServiceMetadataNamespace(original["namespace"], d, config) + transformed["annotations"] = + flattenCloudRunServiceMetadataAnnotations(original["annotations"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunServiceMetadataLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceMetadataGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunServiceMetadataResourceVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceMetadataSelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceMetadataUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunServiceMetadataNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("project") +} + +func flattenCloudRunServiceMetadataAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudRunServiceSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + transformed := make(map[string]interface{}) + transformedTraffic, err := expandCloudRunServiceSpecTraffic(d.Get("traffic"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTraffic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["traffic"] = transformedTraffic + } + + transformedTemplate, err := expandCloudRunServiceSpecTemplate(d.Get("template"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTemplate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["template"] = transformedTemplate + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTraffic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRevisionName, err := expandCloudRunServiceSpecTrafficRevisionName(original["revision_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRevisionName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["revisionName"] = transformedRevisionName + } + + transformedPercent, err := expandCloudRunServiceSpecTrafficPercent(original["percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["percent"] = transformedPercent + } + + transformedTag, err := expandCloudRunServiceSpecTrafficTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + transformedLatestRevision, err := expandCloudRunServiceSpecTrafficLatestRevision(original["latest_revision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLatestRevision); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["latestRevision"] = transformedLatestRevision + } + + transformedUrl, err := expandCloudRunServiceSpecTrafficUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTrafficRevisionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTrafficPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTrafficTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTrafficLatestRevision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTrafficUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMetadata, err := expandCloudRunServiceSpecTemplateMetadata(original["metadata"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metadata"] = transformedMetadata + } + + transformedSpec, err := expandCloudRunServiceSpecTemplateSpec(original["spec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["spec"] = transformedSpec + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLabels, err := expandCloudRunServiceSpecTemplateMetadataLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedGeneration, err := expandCloudRunServiceSpecTemplateMetadataGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + transformedResourceVersion, err := expandCloudRunServiceSpecTemplateMetadataResourceVersion(original["resource_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceVersion"] = transformedResourceVersion + } + + transformedSelfLink, err := expandCloudRunServiceSpecTemplateMetadataSelfLink(original["self_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSelfLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["selfLink"] = transformedSelfLink + } + + transformedUid, err := expandCloudRunServiceSpecTemplateMetadataUid(original["uid"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUid); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uid"] = transformedUid + } + + transformedNamespace, err := expandCloudRunServiceSpecTemplateMetadataNamespace(original["namespace"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespace"] = transformedNamespace + } + + transformedAnnotations, err := expandCloudRunServiceSpecTemplateMetadataAnnotations(original["annotations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["annotations"] = transformedAnnotations + } + + transformedName, err := expandCloudRunServiceSpecTemplateMetadataName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateMetadataLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunServiceSpecTemplateMetadataGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateMetadataResourceVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateMetadataSelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateMetadataUid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +// If the property hasn't been explicitly set in config use the project defined by the provider or env. +func expandCloudRunServiceSpecTemplateMetadataNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return project, nil + } + } + return v, nil +} + +func expandCloudRunServiceSpecTemplateMetadataAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunServiceSpecTemplateMetadataName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if d.Get("autogenerate_revision_name") == true { + return nil, nil + } + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContainers, err := expandCloudRunServiceSpecTemplateSpecContainers(original["containers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainers); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["containers"] = transformedContainers + } + + transformedContainerConcurrency, err := expandCloudRunServiceSpecTemplateSpecContainerConcurrency(original["container_concurrency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainerConcurrency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["containerConcurrency"] = transformedContainerConcurrency + } + + transformedTimeoutSeconds, err := expandCloudRunServiceSpecTemplateSpecTimeoutSeconds(original["timeout_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeoutSeconds"] = transformedTimeoutSeconds + } + + transformedServiceAccountName, err := expandCloudRunServiceSpecTemplateSpecServiceAccountName(original["service_account_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountName"] = transformedServiceAccountName + } + + transformedVolumes, err := expandCloudRunServiceSpecTemplateSpecVolumes(original["volumes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["volumes"] = transformedVolumes + } + + transformedServingState, err := expandCloudRunServiceSpecTemplateSpecServingState(original["serving_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServingState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["servingState"] = transformedServingState + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedWorkingDir, err := expandCloudRunServiceSpecTemplateSpecContainersWorkingDir(original["working_dir"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWorkingDir); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["workingDir"] = transformedWorkingDir + } + + transformedArgs, err := expandCloudRunServiceSpecTemplateSpecContainersArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedEnvFrom, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFrom(original["env_from"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnvFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["envFrom"] = transformedEnvFrom + } + + transformedImage, err := expandCloudRunServiceSpecTemplateSpecContainersImage(original["image"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["image"] = transformedImage + } + + transformedCommand, err := expandCloudRunServiceSpecTemplateSpecContainersCommand(original["command"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommand); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["command"] = transformedCommand + } + + transformedEnv, err := expandCloudRunServiceSpecTemplateSpecContainersEnv(original["env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["env"] = transformedEnv + } + + transformedPorts, err := expandCloudRunServiceSpecTemplateSpecContainersPorts(original["ports"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ports"] = transformedPorts + } + + transformedResources, err := expandCloudRunServiceSpecTemplateSpecContainersResources(original["resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resources"] = transformedResources + } + + transformedVolumeMounts, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMounts(original["volume_mounts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumeMounts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["volumeMounts"] = transformedVolumeMounts + } + + transformedStartupProbe, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbe(original["startup_probe"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartupProbe); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startupProbe"] = transformedStartupProbe + } + + transformedLivenessProbe, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbe(original["liveness_probe"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLivenessProbe); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["livenessProbe"] = transformedLivenessProbe + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersWorkingDir(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPrefix, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(original["prefix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrefix); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["prefix"] = transformedPrefix + } + + transformedConfigMapRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(original["config_map_ref"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfigMapRef); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["configMapRef"] = transformedConfigMapRef + } + + transformedSecretRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(original["secret_ref"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretRef); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretRef"] = transformedSecretRef + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOptional, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(original["optional"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOptional); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["optional"] = transformedOptional + } + + transformedLocalObjectReference, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(original["local_object_reference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalObjectReference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localObjectReference"] = transformedLocalObjectReference + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLocalObjectReference, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(original["local_object_reference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalObjectReference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localObjectReference"] = transformedLocalObjectReference + } + + transformedOptional, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(original["optional"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOptional); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["optional"] = transformedOptional + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersCommand(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedValueFrom, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(original["value_from"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValueFrom); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["valueFrom"] = transformedValueFrom + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecretKeyRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(original["secret_key_ref"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretKeyRef); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretKeyRef"] = transformedSecretKeyRef + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersPortsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedProtocol, err := expandCloudRunServiceSpecTemplateSpecContainersPortsProtocol(original["protocol"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProtocol); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["protocol"] = transformedProtocol + } + + transformedContainerPort, err := expandCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(original["container_port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainerPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["containerPort"] = transformedContainerPort + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersPortsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersPortsProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLimits, err := expandCloudRunServiceSpecTemplateSpecContainersResourcesLimits(original["limits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["limits"] = transformedLimits + } + + transformedRequests, err := expandCloudRunServiceSpecTemplateSpecContainersResourcesRequests(original["requests"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requests"] = transformedRequests + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersResourcesLimits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersResourcesRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersVolumeMounts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMountPath, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(original["mount_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mountPath"] = transformedMountPath + } + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbe(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInitialDelaySeconds, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["initialDelaySeconds"] = transformedInitialDelaySeconds + } + + transformedTimeoutSeconds, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeTimeoutSeconds(original["timeout_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeoutSeconds"] = transformedTimeoutSeconds + } + + transformedPeriodSeconds, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbePeriodSeconds(original["period_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["periodSeconds"] = transformedPeriodSeconds + } + + transformedFailureThreshold, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeFailureThreshold(original["failure_threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["failureThreshold"] = transformedFailureThreshold + } + + transformedTcpSocket, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeTcpSocket(original["tcp_socket"], d, config) + if err != nil { + return nil, err + } else { + transformed["tcpSocket"] = transformedTcpSocket + } + + transformedHttpGet, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGet(original["http_get"], d, config) + if err != nil { + return nil, err + } else { + transformed["httpGet"] = transformedHttpGet + } + + transformedGrpc, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpc(original["grpc"], d, config) + if err != nil { + return nil, err + } else { + transformed["grpc"] = transformedGrpc + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeInitialDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbePeriodSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeFailureThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeTcpSocket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeTcpSocketPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeTcpSocketPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedPort, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedHttpHeaders, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeaders(original["http_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpHeaders"] = transformedHttpHeaders + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeadersValue(original["value"], d, config) + if err != nil { + return nil, err + } else { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpcPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedService, err := expandCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpcService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpcPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersStartupProbeGrpcService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbe(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInitialDelaySeconds, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["initialDelaySeconds"] = transformedInitialDelaySeconds + } + + transformedTimeoutSeconds, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeTimeoutSeconds(original["timeout_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeoutSeconds"] = transformedTimeoutSeconds + } + + transformedPeriodSeconds, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbePeriodSeconds(original["period_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["periodSeconds"] = transformedPeriodSeconds + } + + transformedFailureThreshold, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeFailureThreshold(original["failure_threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["failureThreshold"] = transformedFailureThreshold + } + + transformedHttpGet, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGet(original["http_get"], d, config) + if err != nil { + return nil, err + } else { + transformed["httpGet"] = transformedHttpGet + } + + transformedGrpc, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpc(original["grpc"], d, config) + if err != nil { + return nil, err + } else { + transformed["grpc"] = transformedGrpc + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeInitialDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbePeriodSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeFailureThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedPort, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedHttpHeaders, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeaders(original["http_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpHeaders"] = transformedHttpHeaders + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeadersValue(original["value"], d, config) + if err != nil { + return nil, err + } else { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpcPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedService, err := expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpcService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpcPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainersLivenessProbeGrpcService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecContainerConcurrency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecServiceAccountName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunServiceSpecTemplateSpecVolumesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedSecret, err := expandCloudRunServiceSpecTemplateSpecVolumesSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secret"] = transformedSecret + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecretName, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(original["secret_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretName"] = transformedSecretName + } + + transformedDefaultMode, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretDefaultMode(original["default_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDefaultMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["defaultMode"] = transformedDefaultMode + } + + transformedItems, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItems(original["items"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedItems); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["items"] = transformedItems + } + + return transformed, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesSecretDefaultMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesSecretItems(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedPath, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedMode, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(original["mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mode"] = transformedMode + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceSpecTemplateSpecServingState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLabels, err := expandCloudRunServiceMetadataLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedGeneration, err := expandCloudRunServiceMetadataGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + transformedResourceVersion, err := expandCloudRunServiceMetadataResourceVersion(original["resource_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceVersion"] = transformedResourceVersion + } + + transformedSelfLink, err := expandCloudRunServiceMetadataSelfLink(original["self_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSelfLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["selfLink"] = transformedSelfLink + } + + transformedUid, err := expandCloudRunServiceMetadataUid(original["uid"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUid); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uid"] = transformedUid + } + + transformedNamespace, err := expandCloudRunServiceMetadataNamespace(original["namespace"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespace"] = transformedNamespace + } + + transformedAnnotations, err := expandCloudRunServiceMetadataAnnotations(original["annotations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["annotations"] = transformedAnnotations + } + + return transformed, nil +} + +func expandCloudRunServiceMetadataLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunServiceMetadataGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceMetadataResourceVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceMetadataSelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunServiceMetadataUid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +// If the property hasn't been explicitly set in config use the project defined by the provider or env. +func expandCloudRunServiceMetadataNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return project, nil + } + } + return v, nil +} + +func expandCloudRunServiceMetadataAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func resourceCloudRunServiceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + name := d.Get("name").(string) + if obj["metadata"] == nil { + obj["metadata"] = make(map[string]interface{}) + } + metadata := obj["metadata"].(map[string]interface{}) + metadata["name"] = name + + // The only acceptable version/kind right now + obj["apiVersion"] = "serving.knative.dev/v1" + obj["kind"] = "Service" + return obj, nil +} + +func resourceCloudRunServiceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // metadata is not present if the API returns an error + if obj, ok := res["metadata"]; ok { + if meta, ok := obj.(map[string]interface{}); ok { + res["name"] = meta["name"] + } else { + return nil, fmt.Errorf("Unable to decode 'metadata' block from API response.") + } + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service_sweeper.go new file mode 100644 index 0000000000..4da66e5e6e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrun/resource_cloud_run_service_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrun + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudRunService", testSweepCloudRunService) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudRunService(region string) error { + resourceName := "CloudRunService" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{location}}-run.googleapis.com/apis/serving.knative.dev/v1/namespaces/{{project}}/services", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["services"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{location}}-run.googleapis.com/apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/cloud_run_v2_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/cloud_run_v2_operation.go new file mode 100644 index 0000000000..5fd23754ff --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/cloud_run_v2_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrunv2 + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type CloudRunV2OperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *CloudRunV2OperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.CloudRunV2BasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createCloudRunV2Waiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*CloudRunV2OperationWaiter, error) { + w := &CloudRunV2OperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func CloudRunV2OperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createCloudRunV2Waiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func CloudRunV2OperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createCloudRunV2Waiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/iam_cloud_run_v2_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/iam_cloud_run_v2_job.go new file mode 100644 index 0000000000..c3339d3baa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/iam_cloud_run_v2_job.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrunv2 + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var CloudRunV2JobIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type CloudRunV2JobIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func CloudRunV2JobIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudRunV2JobIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func CloudRunV2JobIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudRunV2JobIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudRunV2JobIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyJobUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudRunV2JobIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyJobUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudRunV2JobIamUpdater) qualifyJobUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudRunV2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/jobs/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudRunV2JobIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/jobs/%s", u.project, u.location, u.name) +} + +func (u *CloudRunV2JobIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudrunv2-job-%s", u.GetResourceId()) +} + +func (u *CloudRunV2JobIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudrunv2 job %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/iam_cloud_run_v2_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/iam_cloud_run_v2_service.go new file mode 100644 index 0000000000..03be7c8208 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/iam_cloud_run_v2_service.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrunv2 + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var CloudRunV2ServiceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type CloudRunV2ServiceIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func CloudRunV2ServiceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudRunV2ServiceIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func CloudRunV2ServiceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudRunV2ServiceIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudRunV2ServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyServiceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudRunV2ServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyServiceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudRunV2ServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudRunV2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudRunV2ServiceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.name) +} + +func (u *CloudRunV2ServiceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudrunv2-service-%s", u.GetResourceId()) +} + +func (u *CloudRunV2ServiceIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudrunv2 service %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_v2_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_v2_job.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job.go index 9cf8962268..a350c1a347 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_cloud_run_v2_job.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package cloudrunv2 import ( "fmt" @@ -21,6 +24,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceCloudRunV2Job() *schema.Resource { @@ -45,7 +52,7 @@ func ResourceCloudRunV2Job() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `Name of the Job.`, }, "template": { @@ -395,14 +402,14 @@ This field is not supported in Cloud Run Job currently.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2", ""}), + ValidateFunc: verify.ValidateEnum([]string{"EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2", ""}), Description: `The execution environment being used to host this Task. Possible values: ["EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2"]`, }, "max_retries": { Type: schema.TypeInt, - Computed: true, Optional: true, Description: `Number of retries allowed per Task, before marking this Task failed.`, + Default: 3, }, "service_account": { Type: schema.TypeString, @@ -509,7 +516,7 @@ A duration in seconds with up to nine fractional digits, ending with 's'. Exampl "egress": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"ALL_TRAFFIC", "PRIVATE_RANGES_ONLY", ""}), + ValidateFunc: verify.ValidateEnum([]string{"ALL_TRAFFIC", "PRIVATE_RANGES_ONLY", ""}), Description: `Traffic VPC egress settings. Possible values: ["ALL_TRAFFIC", "PRIVATE_RANGES_ONLY"]`, }, }, @@ -518,11 +525,27 @@ A duration in seconds with up to nine fractional digits, ending with 's'. Exampl }, }, }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. + +Cloud Run API v2 does not support annotations with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. +All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. + +This field follows Kubernetes annotations' namespacing, limits, and rules.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `KRM-style labels for the resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + Description: `Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, +or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or +https://cloud.google.com/run/docs/configuring/labels. + +Cloud Run API v2 does not support labels with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. +All system labels in v1 now have a corresponding field in v2 ExecutionTemplate.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, "parallelism": { Type: schema.TypeInt, @@ -539,6 +562,17 @@ A duration in seconds with up to nine fractional digits, ending with 's'. Exampl }, }, }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. + +Cloud Run API v2 does not support annotations with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected on new resources. +All system annotations in v1 now have a corresponding field in v2 Job. + +This field follows Kubernetes annotations' namespacing, limits, and rules.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "binary_authorization": { Type: schema.TypeList, Optional: true, @@ -570,17 +604,24 @@ A duration in seconds with up to nine fractional digits, ending with 's'. Exampl Description: `Arbitrary version identifier for the API client.`, }, "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `KRM-style labels for the resource. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels Cloud Run will populate some labels with 'run.googleapis.com' or 'serving.knative.dev' namespaces. Those labels are read-only, and user changes will not be preserved.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + Description: `Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, +environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. + +Cloud Run API v2 does not support labels with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. +All system labels in v1 now have a corresponding field in v2 Job.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, "launch_stage": { Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED", ""}), - Description: `The launch stage as defined by Google Cloud Platform Launch Stages. Cloud Run supports ALPHA, BETA, and GA. If no value is specified, GA is assumed. Possible values: ["UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, + ValidateFunc: verify.ValidateEnum([]string{"UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED", ""}), + Description: `The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/products#product-launch-stages). Cloud Run supports ALPHA, BETA, and GA. +If no value is specified, GA is assumed. Set the launch stage to a preview stage on input to allow use of preview features in that stage. On read (or output), describes whether the resource uses preview features. + +For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output. Possible values: ["UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, }, "location": { Type: schema.TypeString, @@ -766,8 +807,8 @@ A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to n } func resourceCloudRunV2JobCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -776,41 +817,47 @@ func resourceCloudRunV2JobCreate(d *schema.ResourceData, meta interface{}) error labelsProp, err := expandCloudRunV2JobLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } + annotationsProp, err := expandCloudRunV2JobAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } clientProp, err := expandCloudRunV2JobClient(d.Get("client"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("client"); !isEmptyValue(reflect.ValueOf(clientProp)) && (ok || !reflect.DeepEqual(v, clientProp)) { + } else if v, ok := d.GetOkExists("client"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientProp)) && (ok || !reflect.DeepEqual(v, clientProp)) { obj["client"] = clientProp } clientVersionProp, err := expandCloudRunV2JobClientVersion(d.Get("client_version"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("client_version"); !isEmptyValue(reflect.ValueOf(clientVersionProp)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { + } else if v, ok := d.GetOkExists("client_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientVersionProp)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { obj["clientVersion"] = clientVersionProp } launchStageProp, err := expandCloudRunV2JobLaunchStage(d.Get("launch_stage"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(reflect.ValueOf(launchStageProp)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { + } else if v, ok := d.GetOkExists("launch_stage"); !tpgresource.IsEmptyValue(reflect.ValueOf(launchStageProp)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { obj["launchStage"] = launchStageProp } binaryAuthorizationProp, err := expandCloudRunV2JobBinaryAuthorization(d.Get("binary_authorization"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("binary_authorization"); !isEmptyValue(reflect.ValueOf(binaryAuthorizationProp)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { + } else if v, ok := d.GetOkExists("binary_authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(binaryAuthorizationProp)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { obj["binaryAuthorization"] = binaryAuthorizationProp } templateProp, err := expandCloudRunV2JobTemplate(d.Get("template"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("template"); !isEmptyValue(reflect.ValueOf(templateProp)) && (ok || !reflect.DeepEqual(v, templateProp)) { + } else if v, ok := d.GetOkExists("template"); !tpgresource.IsEmptyValue(reflect.ValueOf(templateProp)) && (ok || !reflect.DeepEqual(v, templateProp)) { obj["template"] = templateProp } - url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs?jobId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs?jobId={{name}}") if err != nil { return err } @@ -818,24 +865,32 @@ func resourceCloudRunV2JobCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Creating new Job: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Job: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Job: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -855,7 +910,7 @@ func resourceCloudRunV2JobCreate(d *schema.ResourceData, meta interface{}) error } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -867,33 +922,39 @@ func resourceCloudRunV2JobCreate(d *schema.ResourceData, meta interface{}) error } func resourceCloudRunV2JobRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Job: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("CloudRunV2Job %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudRunV2Job %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -909,6 +970,9 @@ func resourceCloudRunV2JobRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("labels", flattenCloudRunV2JobLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Job: %s", err) } + if err := d.Set("annotations", flattenCloudRunV2JobAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } if err := d.Set("client", flattenCloudRunV2JobClient(res["client"], d, config)); err != nil { return fmt.Errorf("Error reading Job: %s", err) } @@ -950,15 +1014,15 @@ func resourceCloudRunV2JobRead(d *schema.ResourceData, meta interface{}) error { } func resourceCloudRunV2JobUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Job: %s", err) } @@ -968,41 +1032,47 @@ func resourceCloudRunV2JobUpdate(d *schema.ResourceData, meta interface{}) error labelsProp, err := expandCloudRunV2JobLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } + annotationsProp, err := expandCloudRunV2JobAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } clientProp, err := expandCloudRunV2JobClient(d.Get("client"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("client"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientProp)) { + } else if v, ok := d.GetOkExists("client"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientProp)) { obj["client"] = clientProp } clientVersionProp, err := expandCloudRunV2JobClientVersion(d.Get("client_version"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("client_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { + } else if v, ok := d.GetOkExists("client_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { obj["clientVersion"] = clientVersionProp } launchStageProp, err := expandCloudRunV2JobLaunchStage(d.Get("launch_stage"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { + } else if v, ok := d.GetOkExists("launch_stage"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { obj["launchStage"] = launchStageProp } binaryAuthorizationProp, err := expandCloudRunV2JobBinaryAuthorization(d.Get("binary_authorization"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("binary_authorization"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { + } else if v, ok := d.GetOkExists("binary_authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { obj["binaryAuthorization"] = binaryAuthorizationProp } templateProp, err := expandCloudRunV2JobTemplate(d.Get("template"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("template"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, templateProp)) { + } else if v, ok := d.GetOkExists("template"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, templateProp)) { obj["template"] = templateProp } - url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") if err != nil { return err } @@ -1010,11 +1080,19 @@ func resourceCloudRunV2JobUpdate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Updating Job %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Job %q: %s", d.Id(), err) @@ -1034,21 +1112,21 @@ func resourceCloudRunV2JobUpdate(d *schema.ResourceData, meta interface{}) error } func resourceCloudRunV2JobDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Job: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") if err != nil { return err } @@ -1057,13 +1135,21 @@ func resourceCloudRunV2JobDelete(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Deleting Job %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Job") + return transport_tpg.HandleNotFoundError(err, d, "Job") } err = CloudRunV2OperationWaitTime( @@ -1079,8 +1165,8 @@ func resourceCloudRunV2JobDelete(d *schema.ResourceData, meta interface{}) error } func resourceCloudRunV2JobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -1089,7 +1175,7 @@ func resourceCloudRunV2JobImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1098,31 +1184,35 @@ func resourceCloudRunV2JobImport(d *schema.ResourceData, meta interface{}) ([]*s return []*schema.ResourceData{d}, nil } -func flattenCloudRunV2JobUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2JobGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobClient(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobClient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobClientVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobClientVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobLaunchStage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobLaunchStage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobBinaryAuthorization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobBinaryAuthorization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1137,15 +1227,15 @@ func flattenCloudRunV2JobBinaryAuthorization(v interface{}, d *schema.ResourceDa flattenCloudRunV2JobBinaryAuthorizationUseDefault(original["useDefault"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobBinaryAuthorizationBreakglassJustification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobBinaryAuthorizationBreakglassJustification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobBinaryAuthorizationUseDefault(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobBinaryAuthorizationUseDefault(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1156,6 +1246,8 @@ func flattenCloudRunV2JobTemplate(v interface{}, d *schema.ResourceData, config transformed := make(map[string]interface{}) transformed["labels"] = flattenCloudRunV2JobTemplateLabels(original["labels"], d, config) + transformed["annotations"] = + flattenCloudRunV2JobTemplateAnnotations(original["annotations"], d, config) transformed["parallelism"] = flattenCloudRunV2JobTemplateParallelism(original["parallelism"], d, config) transformed["task_count"] = @@ -1164,14 +1256,18 @@ func flattenCloudRunV2JobTemplate(v interface{}, d *schema.ResourceData, config flattenCloudRunV2JobTemplateTemplate(original["template"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateParallelism(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateParallelism(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1185,10 +1281,10 @@ func flattenCloudRunV2JobTemplateParallelism(v interface{}, d *schema.ResourceDa return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTaskCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTaskCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1202,7 +1298,7 @@ func flattenCloudRunV2JobTemplateTaskCount(v interface{}, d *schema.ResourceData return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1224,12 +1320,12 @@ func flattenCloudRunV2JobTemplateTemplate(v interface{}, d *schema.ResourceData, transformed["encryption_key"] = flattenCloudRunV2JobTemplateTemplateEncryptionKey(original["encryptionKey"], d, config) transformed["vpc_access"] = - flattenCloudRunV2JobTemplateTemplateVPCAccess(original["vpcAccess"], d, config) + flattenCloudRunV2JobTemplateTemplateVpcAccess(original["vpcAccess"], d, config) transformed["max_retries"] = flattenCloudRunV2JobTemplateTemplateMaxRetries(original["maxRetries"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1257,23 +1353,23 @@ func flattenCloudRunV2JobTemplateTemplateContainers(v interface{}, d *schema.Res } return transformed } -func flattenCloudRunV2JobTemplateTemplateContainersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersCommand(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersCommand(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersArgs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1293,15 +1389,15 @@ func flattenCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d *schema. } return transformed } -func flattenCloudRunV2JobTemplateTemplateContainersEnvName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersEnvName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersEnvValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersEnvValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1314,7 +1410,7 @@ func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSource(v interface{}, flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(original["secretKeyRef"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1329,15 +1425,15 @@ func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(v flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(original["version"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1350,11 +1446,11 @@ func flattenCloudRunV2JobTemplateTemplateContainersResources(v interface{}, d *s flattenCloudRunV2JobTemplateTemplateContainersResourcesLimits(original["limits"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersResourcesLimits(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersResourcesLimits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1373,14 +1469,14 @@ func flattenCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d *schem } return transformed } -func flattenCloudRunV2JobTemplateTemplateContainersPortsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersPortsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersPortsContainerPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersPortsContainerPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1394,7 +1490,7 @@ func flattenCloudRunV2JobTemplateTemplateContainersPortsContainerPort(v interfac return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1413,19 +1509,19 @@ func flattenCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d } return transformed } -func flattenCloudRunV2JobTemplateTemplateContainersVolumeMountsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersVolumeMountsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersWorkingDir(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersWorkingDir(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1448,10 +1544,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(original["tcpSocket"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1465,10 +1561,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeco return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1482,10 +1578,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(v return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1499,10 +1595,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(v return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1516,7 +1612,7 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1528,11 +1624,11 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(v interf flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1551,15 +1647,15 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeade } return transformed } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1569,10 +1665,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(v inte flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(original["port"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1586,7 +1682,7 @@ func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(v return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1609,10 +1705,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(original["tcpSocket"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1626,10 +1722,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySecon return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1643,10 +1739,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(v return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1660,10 +1756,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(v i return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1677,7 +1773,7 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold( return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1689,11 +1785,11 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(v interfa flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1712,15 +1808,15 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeader } return transformed } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1730,10 +1826,10 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(v inter flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(original["port"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1747,7 +1843,7 @@ func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(v i return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateVolumes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1767,11 +1863,11 @@ func flattenCloudRunV2JobTemplateTemplateVolumes(v interface{}, d *schema.Resour } return transformed } -func flattenCloudRunV2JobTemplateTemplateVolumesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateVolumesSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1788,14 +1884,14 @@ func flattenCloudRunV2JobTemplateTemplateVolumesSecret(v interface{}, d *schema. flattenCloudRunV2JobTemplateTemplateVolumesSecretItems(original["items"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateVolumesSecretSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesSecretSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1809,7 +1905,7 @@ func flattenCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(v interface{}, return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1829,18 +1925,18 @@ func flattenCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d *sc } return transformed } -func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1854,7 +1950,7 @@ func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(v interface{}, d return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1867,27 +1963,27 @@ func flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(v interface{}, flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(original["instances"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateExecutionEnvironment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateExecutionEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateVPCAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVpcAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1897,23 +1993,23 @@ func flattenCloudRunV2JobTemplateTemplateVPCAccess(v interface{}, d *schema.Reso } transformed := make(map[string]interface{}) transformed["connector"] = - flattenCloudRunV2JobTemplateTemplateVPCAccessConnector(original["connector"], d, config) + flattenCloudRunV2JobTemplateTemplateVpcAccessConnector(original["connector"], d, config) transformed["egress"] = - flattenCloudRunV2JobTemplateTemplateVPCAccessEgress(original["egress"], d, config) + flattenCloudRunV2JobTemplateTemplateVpcAccessEgress(original["egress"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTemplateTemplateVPCAccessConnector(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVpcAccessConnector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateVPCAccessEgress(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateVpcAccessEgress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTemplateTemplateMaxRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTemplateTemplateMaxRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1927,11 +2023,11 @@ func flattenCloudRunV2JobTemplateTemplateMaxRetries(v interface{}, d *schema.Res return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobObservedGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobObservedGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTerminalCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1958,39 +2054,39 @@ func flattenCloudRunV2JobTerminalCondition(v interface{}, d *schema.ResourceData flattenCloudRunV2JobTerminalConditionExecutionReason(original["executionReason"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobTerminalConditionType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalConditionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTerminalConditionState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalConditionState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTerminalConditionMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalConditionMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTerminalConditionLastTransitionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalConditionLastTransitionTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTerminalConditionSeverity(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalConditionSeverity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTerminalConditionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalConditionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTerminalConditionRevisionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalConditionRevisionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobTerminalConditionExecutionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobTerminalConditionExecutionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2015,42 +2111,42 @@ func flattenCloudRunV2JobConditions(v interface{}, d *schema.ResourceData, confi } return transformed } -func flattenCloudRunV2JobConditionsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditionsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobConditionsState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditionsState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobConditionsMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditionsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobConditionsLastTransitionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditionsLastTransitionTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobConditionsSeverity(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditionsSeverity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobConditionsReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditionsReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobConditionsRevisionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditionsRevisionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobConditionsExecutionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobConditionsExecutionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobExecutionCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobExecutionCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2064,7 +2160,7 @@ func flattenCloudRunV2JobExecutionCount(v interface{}, d *schema.ResourceData, c return v // let terraform core handle it otherwise } -func flattenCloudRunV2JobLatestCreatedExecution(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobLatestCreatedExecution(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2081,27 +2177,38 @@ func flattenCloudRunV2JobLatestCreatedExecution(v interface{}, d *schema.Resourc flattenCloudRunV2JobLatestCreatedExecutionCompletionTime(original["completionTime"], d, config) return []interface{}{transformed} } -func flattenCloudRunV2JobLatestCreatedExecutionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobLatestCreatedExecutionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobLatestCreatedExecutionCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobLatestCreatedExecutionCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobLatestCreatedExecutionCompletionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobLatestCreatedExecutionCompletionTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobReconciling(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenCloudRunV2JobEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenCloudRunV2JobEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandCloudRunV2JobLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandCloudRunV2JobLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2JobAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2112,19 +2219,19 @@ func expandCloudRunV2JobLabels(v interface{}, d TerraformResourceData, config *C return m, nil } -func expandCloudRunV2JobClient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobClient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobClientVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobClientVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobLaunchStage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobLaunchStage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobBinaryAuthorization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobBinaryAuthorization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2136,29 +2243,29 @@ func expandCloudRunV2JobBinaryAuthorization(v interface{}, d TerraformResourceDa transformedBreakglassJustification, err := expandCloudRunV2JobBinaryAuthorizationBreakglassJustification(original["breakglass_justification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBreakglassJustification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBreakglassJustification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["breakglassJustification"] = transformedBreakglassJustification } transformedUseDefault, err := expandCloudRunV2JobBinaryAuthorizationUseDefault(original["use_default"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUseDefault); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUseDefault); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["useDefault"] = transformedUseDefault } return transformed, nil } -func expandCloudRunV2JobBinaryAuthorizationBreakglassJustification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobBinaryAuthorizationBreakglassJustification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobBinaryAuthorizationUseDefault(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobBinaryAuthorizationUseDefault(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2170,35 +2277,42 @@ func expandCloudRunV2JobTemplate(v interface{}, d TerraformResourceData, config transformedLabels, err := expandCloudRunV2JobTemplateLabels(original["labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["labels"] = transformedLabels } + transformedAnnotations, err := expandCloudRunV2JobTemplateAnnotations(original["annotations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["annotations"] = transformedAnnotations + } + transformedParallelism, err := expandCloudRunV2JobTemplateParallelism(original["parallelism"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedParallelism); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedParallelism); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["parallelism"] = transformedParallelism } transformedTaskCount, err := expandCloudRunV2JobTemplateTaskCount(original["task_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTaskCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTaskCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["taskCount"] = transformedTaskCount } transformedTemplate, err := expandCloudRunV2JobTemplateTemplate(original["template"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTemplate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTemplate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["template"] = transformedTemplate } return transformed, nil } -func expandCloudRunV2JobTemplateLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandCloudRunV2JobTemplateLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2209,15 +2323,26 @@ func expandCloudRunV2JobTemplateLabels(v interface{}, d TerraformResourceData, c return m, nil } -func expandCloudRunV2JobTemplateParallelism(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2JobTemplateParallelism(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTaskCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTaskCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2229,63 +2354,63 @@ func expandCloudRunV2JobTemplateTemplate(v interface{}, d TerraformResourceData, transformedContainers, err := expandCloudRunV2JobTemplateTemplateContainers(original["containers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedContainers); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedContainers); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["containers"] = transformedContainers } transformedVolumes, err := expandCloudRunV2JobTemplateTemplateVolumes(original["volumes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["volumes"] = transformedVolumes } transformedTimeout, err := expandCloudRunV2JobTemplateTemplateTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedServiceAccount, err := expandCloudRunV2JobTemplateTemplateServiceAccount(original["service_account"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceAccount"] = transformedServiceAccount } transformedExecutionEnvironment, err := expandCloudRunV2JobTemplateTemplateExecutionEnvironment(original["execution_environment"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExecutionEnvironment); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExecutionEnvironment); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["executionEnvironment"] = transformedExecutionEnvironment } transformedEncryptionKey, err := expandCloudRunV2JobTemplateTemplateEncryptionKey(original["encryption_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEncryptionKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEncryptionKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["encryptionKey"] = transformedEncryptionKey } - transformedVPCAccess, err := expandCloudRunV2JobTemplateTemplateVPCAccess(original["vpc_access"], d, config) + transformedVpcAccess, err := expandCloudRunV2JobTemplateTemplateVpcAccess(original["vpc_access"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVPCAccess); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccess"] = transformedVPCAccess + } else if val := reflect.ValueOf(transformedVpcAccess); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcAccess"] = transformedVpcAccess } transformedMaxRetries, err := expandCloudRunV2JobTemplateTemplateMaxRetries(original["max_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRetries); val.IsValid() && !isEmptyValue(val) { + } else { transformed["maxRetries"] = transformedMaxRetries } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2298,77 +2423,77 @@ func expandCloudRunV2JobTemplateTemplateContainers(v interface{}, d TerraformRes transformedName, err := expandCloudRunV2JobTemplateTemplateContainersName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedImage, err := expandCloudRunV2JobTemplateTemplateContainersImage(original["image"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["image"] = transformedImage } transformedCommand, err := expandCloudRunV2JobTemplateTemplateContainersCommand(original["command"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCommand); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCommand); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["command"] = transformedCommand } transformedArgs, err := expandCloudRunV2JobTemplateTemplateContainersArgs(original["args"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["args"] = transformedArgs } transformedEnv, err := expandCloudRunV2JobTemplateTemplateContainersEnv(original["env"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["env"] = transformedEnv } transformedResources, err := expandCloudRunV2JobTemplateTemplateContainersResources(original["resources"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["resources"] = transformedResources } transformedPorts, err := expandCloudRunV2JobTemplateTemplateContainersPorts(original["ports"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ports"] = transformedPorts } transformedVolumeMounts, err := expandCloudRunV2JobTemplateTemplateContainersVolumeMounts(original["volume_mounts"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVolumeMounts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVolumeMounts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["volumeMounts"] = transformedVolumeMounts } transformedWorkingDir, err := expandCloudRunV2JobTemplateTemplateContainersWorkingDir(original["working_dir"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWorkingDir); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWorkingDir); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["workingDir"] = transformedWorkingDir } transformedLivenessProbe, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbe(original["liveness_probe"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLivenessProbe); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLivenessProbe); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["livenessProbe"] = transformedLivenessProbe } transformedStartupProbe, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbe(original["startup_probe"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStartupProbe); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStartupProbe); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["startupProbe"] = transformedStartupProbe } @@ -2377,23 +2502,23 @@ func expandCloudRunV2JobTemplateTemplateContainers(v interface{}, d TerraformRes return req, nil } -func expandCloudRunV2JobTemplateTemplateContainersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersCommand(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersCommand(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersArgs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2406,21 +2531,21 @@ func expandCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d Terraform transformedName, err := expandCloudRunV2JobTemplateTemplateContainersEnvName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedValue, err := expandCloudRunV2JobTemplateTemplateContainersEnvValue(original["value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["value"] = transformedValue } transformedValueSource, err := expandCloudRunV2JobTemplateTemplateContainersEnvValueSource(original["value_source"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedValueSource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedValueSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["valueSource"] = transformedValueSource } @@ -2429,15 +2554,15 @@ func expandCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d Terraform return req, nil } -func expandCloudRunV2JobTemplateTemplateContainersEnvName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersEnvName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersEnvValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersEnvValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersEnvValueSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersEnvValueSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2449,14 +2574,14 @@ func expandCloudRunV2JobTemplateTemplateContainersEnvValueSource(v interface{}, transformedSecretKeyRef, err := expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(original["secret_key_ref"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecretKeyRef); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecretKeyRef); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secretKeyRef"] = transformedSecretKeyRef } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2468,29 +2593,29 @@ func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(v i transformedSecret, err := expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(original["secret"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secret"] = transformedSecret } transformedVersion, err := expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(original["version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["version"] = transformedVersion } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2502,14 +2627,14 @@ func expandCloudRunV2JobTemplateTemplateContainersResources(v interface{}, d Ter transformedLimits, err := expandCloudRunV2JobTemplateTemplateContainersResourcesLimits(original["limits"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["limits"] = transformedLimits } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersResourcesLimits(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandCloudRunV2JobTemplateTemplateContainersResourcesLimits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2520,7 +2645,7 @@ func expandCloudRunV2JobTemplateTemplateContainersResourcesLimits(v interface{}, return m, nil } -func expandCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2533,14 +2658,14 @@ func expandCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d Terrafo transformedName, err := expandCloudRunV2JobTemplateTemplateContainersPortsName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedContainerPort, err := expandCloudRunV2JobTemplateTemplateContainersPortsContainerPort(original["container_port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedContainerPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedContainerPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["containerPort"] = transformedContainerPort } @@ -2549,15 +2674,15 @@ func expandCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d Terrafo return req, nil } -func expandCloudRunV2JobTemplateTemplateContainersPortsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersPortsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersPortsContainerPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersPortsContainerPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2570,14 +2695,14 @@ func expandCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d transformedName, err := expandCloudRunV2JobTemplateTemplateContainersVolumeMountsName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedMountPath, err := expandCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(original["mount_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mountPath"] = transformedMountPath } @@ -2586,19 +2711,19 @@ func expandCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d return req, nil } -func expandCloudRunV2JobTemplateTemplateContainersVolumeMountsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersVolumeMountsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersWorkingDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersWorkingDir(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2610,28 +2735,28 @@ func expandCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, d transformedInitialDelaySeconds, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["initialDelaySeconds"] = transformedInitialDelaySeconds } transformedTimeoutSeconds, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(original["timeout_seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeoutSeconds"] = transformedTimeoutSeconds } transformedPeriodSeconds, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(original["period_seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["periodSeconds"] = transformedPeriodSeconds } transformedFailureThreshold, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(original["failure_threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["failureThreshold"] = transformedFailureThreshold } @@ -2652,23 +2777,23 @@ func expandCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, d return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -2685,25 +2810,25 @@ func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(v interfa transformedPath, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedHttpHeaders, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(original["http_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpHeaders"] = transformedHttpHeaders } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2716,7 +2841,7 @@ func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeader transformedName, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } @@ -2732,15 +2857,15 @@ func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeader return req, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -2757,18 +2882,18 @@ func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(v inter transformedPort, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2780,28 +2905,28 @@ func expandCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d transformedInitialDelaySeconds, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["initialDelaySeconds"] = transformedInitialDelaySeconds } transformedTimeoutSeconds, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(original["timeout_seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeoutSeconds"] = transformedTimeoutSeconds } transformedPeriodSeconds, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(original["period_seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["periodSeconds"] = transformedPeriodSeconds } transformedFailureThreshold, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(original["failure_threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["failureThreshold"] = transformedFailureThreshold } @@ -2822,23 +2947,23 @@ func expandCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -2855,25 +2980,25 @@ func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(v interfac transformedPath, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedHttpHeaders, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(original["http_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpHeaders"] = transformedHttpHeaders } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2886,7 +3011,7 @@ func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders transformedName, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } @@ -2902,15 +3027,15 @@ func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders return req, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -2927,18 +3052,18 @@ func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(v interf transformedPort, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2951,21 +3076,21 @@ func expandCloudRunV2JobTemplateTemplateVolumes(v interface{}, d TerraformResour transformedName, err := expandCloudRunV2JobTemplateTemplateVolumesName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedSecret, err := expandCloudRunV2JobTemplateTemplateVolumesSecret(original["secret"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secret"] = transformedSecret } transformedCloudSqlInstance, err := expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(original["cloud_sql_instance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCloudSqlInstance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCloudSqlInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cloudSqlInstance"] = transformedCloudSqlInstance } @@ -2974,11 +3099,11 @@ func expandCloudRunV2JobTemplateTemplateVolumes(v interface{}, d TerraformResour return req, nil } -func expandCloudRunV2JobTemplateTemplateVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVolumesSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2990,36 +3115,36 @@ func expandCloudRunV2JobTemplateTemplateVolumesSecret(v interface{}, d Terraform transformedSecret, err := expandCloudRunV2JobTemplateTemplateVolumesSecretSecret(original["secret"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secret"] = transformedSecret } transformedDefaultMode, err := expandCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(original["default_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultMode"] = transformedDefaultMode } transformedItems, err := expandCloudRunV2JobTemplateTemplateVolumesSecretItems(original["items"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedItems); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedItems); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["items"] = transformedItems } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateVolumesSecretSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesSecretSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3032,21 +3157,21 @@ func expandCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d Terr transformedPath, err := expandCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedVersion, err := expandCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(original["version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["version"] = transformedVersion } transformedMode, err := expandCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(original["mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mode"] = transformedMode } @@ -3055,19 +3180,19 @@ func expandCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d Terr return req, nil } -func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3079,34 +3204,34 @@ func expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(v interface{}, d transformedInstances, err := expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(original["instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["instances"] = transformedInstances } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateExecutionEnvironment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateExecutionEnvironment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVPCAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVpcAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3115,31 +3240,31 @@ func expandCloudRunV2JobTemplateTemplateVPCAccess(v interface{}, d TerraformReso original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedConnector, err := expandCloudRunV2JobTemplateTemplateVPCAccessConnector(original["connector"], d, config) + transformedConnector, err := expandCloudRunV2JobTemplateTemplateVpcAccessConnector(original["connector"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConnector); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConnector); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["connector"] = transformedConnector } - transformedEgress, err := expandCloudRunV2JobTemplateTemplateVPCAccessEgress(original["egress"], d, config) + transformedEgress, err := expandCloudRunV2JobTemplateTemplateVpcAccessEgress(original["egress"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEgress); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEgress); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["egress"] = transformedEgress } return transformed, nil } -func expandCloudRunV2JobTemplateTemplateVPCAccessConnector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVpcAccessConnector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateVPCAccessEgress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateVpcAccessEgress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandCloudRunV2JobTemplateTemplateMaxRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandCloudRunV2JobTemplateTemplateMaxRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job_sweeper.go new file mode 100644 index 0000000000..88c7e7d47c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_job_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrunv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudRunV2Job", testSweepCloudRunV2Job) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudRunV2Job(region string) error { + resourceName := "CloudRunV2Job" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/jobs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["jobs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/jobs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service.go new file mode 100644 index 0000000000..6cbd04120b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service.go @@ -0,0 +1,3894 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrunv2 + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceCloudRunV2Service() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudRunV2ServiceCreate, + Read: resourceCloudRunV2ServiceRead, + Update: resourceCloudRunV2ServiceUpdate, + Delete: resourceCloudRunV2ServiceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudRunV2ServiceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the Service.`, + }, + "template": { + Type: schema.TypeList, + Required: true, + Description: `The template used to create revisions for this Service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. + +Cloud Run API v2 does not support annotations with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. +All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. + +This field follows Kubernetes annotations' namespacing, limits, and rules.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "containers": { + Type: schema.TypeList, + Optional: true, + Description: `Holds the containers that define the unit of execution for this Service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image": { + Type: schema.TypeString, + Required: true, + Description: `URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "command": { + Type: schema.TypeList, + Optional: true, + Description: `Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "env": { + Type: schema.TypeList, + Optional: true, + Description: `List of environment variables to set in the container.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes`, + }, + "value_source": { + Type: schema.TypeList, + Optional: true, + Description: `Source for the environment variable's value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_key_ref": { + Type: schema.TypeList, + Optional: true, + Description: `Selects a secret and a specific version from Cloud Secret Manager.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project.`, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "liveness_probe": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "failure_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.`, + Default: 3, + }, + "grpc": { + Type: schema.TypeList, + Optional: true, + Description: `GRPC specifies an action involving a GRPC port.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + "service": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the service to place in the gRPC HealthCheckRequest +(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). +If this is not specified, the default behavior is defined by gRPC.`, + }, + }, + }, + }, + "http_get": { + Type: schema.TypeList, + Optional: true, + Description: `HTTPGet specifies the http request to perform.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Custom headers to set in the request. HTTP allows repeated headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The header field value`, + Default: "", + }, + }, + }, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to access on the HTTP server. Defaults to '/'.`, + Default: "/", + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + }, + "initial_delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + Default: 0, + }, + "period_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds`, + Default: 10, + }, + "tcp_socket": { + Type: schema.TypeList, + Optional: true, + Deprecated: "Cloud Run does not support tcp socket in liveness probe and `liveness_probe.tcp_socket` field will be removed in a future major release.", + Description: `TCPSocket specifies an action involving a TCP port. This field is not supported in liveness probe currently.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Optional: true, + Description: `Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080.`, + }, + }, + }, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + Default: 1, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the container specified as a DNS_LABEL.`, + }, + "ports": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. + +If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_port": { + Type: schema.TypeInt, + Optional: true, + Description: `Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c".`, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_idle": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines whether CPU should be throttled or not outside of requests.`, + }, + "limits": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "startup_cpu_boost": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines whether CPU should be boosted on startup of a new container instance above the requested CPU threshold, this can help reduce cold-start latency.`, + }, + }, + }, + }, + "startup_probe": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "failure_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.`, + Default: 3, + }, + "grpc": { + Type: schema.TypeList, + Optional: true, + Description: `GRPC specifies an action involving a GRPC port.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Number must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + "service": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the service to place in the gRPC HealthCheckRequest +(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). +If this is not specified, the default behavior is defined by gRPC.`, + }, + }, + }, + }, + "http_get": { + Type: schema.TypeList, + Optional: true, + Description: `HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Custom headers to set in the request. HTTP allows repeated headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The header field value`, + Default: "", + }, + }, + }, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to access on the HTTP server. Defaults to '/'.`, + Default: "/", + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + }, + "initial_delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + Default: 0, + }, + "period_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds`, + Default: 10, + }, + "tcp_socket": { + Type: schema.TypeList, + Optional: true, + Description: `TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Must be in the range 1 to 65535. +If not specified, defaults to the same value as container.ports[0].containerPort.`, + }, + }, + }, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + Default: 1, + }, + }, + }, + }, + "volume_mounts": { + Type: schema.TypeList, + Optional: true, + Description: `Volume to mount into the container's filesystem.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_path": { + Type: schema.TypeString, + Required: true, + Description: `Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `This must match the Name of a Volume.`, + }, + }, + }, + }, + "working_dir": { + Type: schema.TypeString, + Optional: true, + Description: `Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image.`, + }, + }, + }, + }, + "encryption_key": { + Type: schema.TypeString, + Optional: true, + Description: `A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek`, + }, + "execution_environment": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2", ""}), + Description: `The sandbox environment to host this Revision. Possible values: ["EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2"]`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. +For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. + +Cloud Run API v2 does not support labels with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. +All system labels in v1 now have a corresponding field in v2 RevisionTemplate.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "max_instance_request_concurrency": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Sets the maximum number of requests that each serving instance can receive.`, + }, + "revision": { + Type: schema.TypeString, + Optional: true, + Description: `The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name.`, + }, + "scaling": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Scaling settings for this Revision.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_instance_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum number of serving instances that this resource should have.`, + }, + "min_instance_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of serving instances that this resource should have.`, + }, + }, + }, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account.`, + }, + "session_affinity": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity`, + }, + "timeout": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Max allowed time for an instance to respond to a request. + +A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `A list of Volumes to make available to containers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Volume's name.`, + }, + "cloud_sql_instance": { + Type: schema.TypeList, + Optional: true, + Description: `For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instances": { + Type: schema.TypeList, + Optional: true, + Description: `The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance}`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "secret": { + Type: schema.TypeList, + Optional: true, + Description: `Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project.`, + }, + "default_mode": { + Type: schema.TypeInt, + Optional: true, + Description: `Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting.`, + }, + "items": { + Type: schema.TypeList, + Optional: true, + Description: `If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeInt, + Required: true, + Description: `Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used.`, + }, + "path": { + Type: schema.TypeString, + Required: true, + Description: `The relative path of the secret in the container.`, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "vpc_access": { + Type: schema.TypeList, + Optional: true, + Description: `VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "connector": { + Type: schema.TypeString, + Optional: true, + Description: `VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number.`, + }, + "egress": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ALL_TRAFFIC", "PRIVATE_RANGES_ONLY", ""}), + Description: `Traffic VPC egress settings. Possible values: ["ALL_TRAFFIC", "PRIVATE_RANGES_ONLY"]`, + }, + }, + }, + }, + }, + }, + }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. + +Cloud Run API v2 does not support annotations with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected in new resources. +All system annotations in v1 now have a corresponding field in v2 Service. + +This field follows Kubernetes annotations' namespacing, limits, and rules.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "binary_authorization": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for the Binary Authorization feature.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "breakglass_justification": { + Type: schema.TypeString, + Optional: true, + Description: `If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass`, + }, + "use_default": { + Type: schema.TypeBool, + Optional: true, + Description: `If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled.`, + }, + }, + }, + }, + "client": { + Type: schema.TypeString, + Optional: true, + Description: `Arbitrary identifier for the API client.`, + }, + "client_version": { + Type: schema.TypeString, + Optional: true, + Description: `Arbitrary version identifier for the API client.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `User-provided description of the Service. This field currently has a 512-character limit.`, + }, + "ingress": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"INGRESS_TRAFFIC_ALL", "INGRESS_TRAFFIC_INTERNAL_ONLY", "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER", ""}), + Description: `Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. Possible values: ["INGRESS_TRAFFIC_ALL", "INGRESS_TRAFFIC_INTERNAL_ONLY", "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER"]`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, +environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. + +Cloud Run API v2 does not support labels with 'run.googleapis.com', 'cloud.googleapis.com', 'serving.knative.dev', or 'autoscaling.knative.dev' namespaces, and they will be rejected. +All system labels in v1 now have a corresponding field in v2 Service.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "launch_stage": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED", ""}), + Description: `The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/products#product-launch-stages). Cloud Run supports ALPHA, BETA, and GA. +If no value is specified, GA is assumed. Set the launch stage to a preview stage on input to allow use of preview features in that stage. On read (or output), describes whether the resource uses preview features. + +For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output. Possible values: ["UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location of the cloud run service`, + }, + "traffic": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Specifies how to distribute traffic over a collection of Revisions belonging to the Service. If traffic is empty or not provided, defaults to 100% traffic to the latest Ready Revision.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percent": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Specifies percent of the traffic to this Revision. This defaults to zero if unspecified.`, + }, + "revision": { + Type: schema.TypeString, + Optional: true, + Description: `Revision to which to send this portion of traffic, if traffic allocation is by revision.`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `Indicates a string to be part of the URI to exclusively reference this target.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST", "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION", ""}), + Description: `The allocation type for this traffic target. Possible values: ["TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST", "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"]`, + }, + }, + }, + }, + "conditions": { + Type: schema.TypeList, + Computed: true, + Description: `The Conditions of all other associated sub-resources. They contain additional diagnostics information in case the Service does not reach its Serving state. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execution_reason": { + Type: schema.TypeString, + Computed: true, + Description: `A reason for the execution condition.`, + }, + "last_transition_time": { + Type: schema.TypeString, + Computed: true, + Description: `Last time the condition transitioned from one status to another. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `A common (service-level) reason for this condition.`, + }, + "revision_reason": { + Type: schema.TypeString, + Computed: true, + Description: `A reason for the revision condition.`, + }, + "severity": { + Type: schema.TypeString, + Computed: true, + Description: `How to interpret failures of this condition, one of Error, Warning, Info`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the condition.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready.`, + }, + }, + }, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates.`, + }, + "generation": { + Type: schema.TypeString, + Computed: true, + Description: `A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer.`, + }, + "latest_created_revision": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the last created revision. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, + }, + "latest_ready_revision": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the latest revision that is serving traffic. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, + }, + "observed_generation": { + Type: schema.TypeString, + Computed: true, + Description: `The generation of this Service currently serving traffic. See comments in reconciling for additional information on reconciliation process in Cloud Run. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer.`, + }, + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: `Returns true if the Service is currently being acted upon by the system to bring it into the desired state. + +When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, observedGeneration, latest_ready_revison, trafficStatuses, and uri will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in terminalCondition.state. + +If reconciliation succeeded, the following fields will match: traffic and trafficStatuses, observedGeneration and generation, latestReadyRevision and latestCreatedRevision. + +If reconciliation failed, trafficStatuses, observedGeneration, and latestReadyRevision will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in terminalCondition and conditions.`, + }, + "terminal_condition": { + Type: schema.TypeList, + Computed: true, + Description: `The Condition of this Service, containing its readiness status, and detailed error information in case it did not reach a serving state. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execution_reason": { + Type: schema.TypeString, + Computed: true, + Description: `A reason for the execution condition.`, + }, + "last_transition_time": { + Type: schema.TypeString, + Computed: true, + Description: `Last time the condition transitioned from one status to another.`, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `A common (service-level) reason for this condition.`, + }, + "revision_reason": { + Type: schema.TypeString, + Computed: true, + Description: `A reason for the revision condition.`, + }, + "severity": { + Type: schema.TypeString, + Computed: true, + Description: `How to interpret failures of this condition, one of Error, Warning, Info`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the condition.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready.`, + }, + }, + }, + }, + "traffic_statuses": { + Type: schema.TypeList, + Computed: true, + Description: `Detailed status information for corresponding traffic targets. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "percent": { + Type: schema.TypeInt, + Computed: true, + Description: `Specifies percent of the traffic to this Revision.`, + }, + "revision": { + Type: schema.TypeString, + Computed: true, + Description: `Revision to which this traffic is sent.`, + }, + "tag": { + Type: schema.TypeString, + Computed: true, + Description: `Indicates the string used in the URI to exclusively reference this target.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The allocation type for this traffic target.`, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + Description: `Displays the target URI.`, + }, + }, + }, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Server assigned unique identifier for the trigger. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted.`, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + Description: `The main URI in which this Service is serving traffic.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudRunV2ServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandCloudRunV2ServiceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCloudRunV2ServiceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + annotationsProp, err := expandCloudRunV2ServiceAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + clientProp, err := expandCloudRunV2ServiceClient(d.Get("client"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientProp)) && (ok || !reflect.DeepEqual(v, clientProp)) { + obj["client"] = clientProp + } + clientVersionProp, err := expandCloudRunV2ServiceClientVersion(d.Get("client_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientVersionProp)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { + obj["clientVersion"] = clientVersionProp + } + ingressProp, err := expandCloudRunV2ServiceIngress(d.Get("ingress"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ingress"); !tpgresource.IsEmptyValue(reflect.ValueOf(ingressProp)) && (ok || !reflect.DeepEqual(v, ingressProp)) { + obj["ingress"] = ingressProp + } + launchStageProp, err := expandCloudRunV2ServiceLaunchStage(d.Get("launch_stage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("launch_stage"); !tpgresource.IsEmptyValue(reflect.ValueOf(launchStageProp)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { + obj["launchStage"] = launchStageProp + } + binaryAuthorizationProp, err := expandCloudRunV2ServiceBinaryAuthorization(d.Get("binary_authorization"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("binary_authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(binaryAuthorizationProp)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { + obj["binaryAuthorization"] = binaryAuthorizationProp + } + templateProp, err := expandCloudRunV2ServiceTemplate(d.Get("template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("template"); !tpgresource.IsEmptyValue(reflect.ValueOf(templateProp)) && (ok || !reflect.DeepEqual(v, templateProp)) { + obj["template"] = templateProp + } + trafficProp, err := expandCloudRunV2ServiceTraffic(d.Get("traffic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("traffic"); !tpgresource.IsEmptyValue(reflect.ValueOf(trafficProp)) && (ok || !reflect.DeepEqual(v, trafficProp)) { + obj["traffic"] = trafficProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/services?serviceId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Service: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Service: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = CloudRunV2OperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Service", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Service: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) + + return resourceCloudRunV2ServiceRead(d, meta) +} + +func resourceCloudRunV2ServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/services/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudRunV2Service %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + if err := d.Set("description", flattenCloudRunV2ServiceDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("uid", flattenCloudRunV2ServiceUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("generation", flattenCloudRunV2ServiceGeneration(res["generation"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("labels", flattenCloudRunV2ServiceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("annotations", flattenCloudRunV2ServiceAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("client", flattenCloudRunV2ServiceClient(res["client"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("client_version", flattenCloudRunV2ServiceClientVersion(res["clientVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("ingress", flattenCloudRunV2ServiceIngress(res["ingress"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("launch_stage", flattenCloudRunV2ServiceLaunchStage(res["launchStage"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("binary_authorization", flattenCloudRunV2ServiceBinaryAuthorization(res["binaryAuthorization"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("template", flattenCloudRunV2ServiceTemplate(res["template"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("traffic", flattenCloudRunV2ServiceTraffic(res["traffic"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("observed_generation", flattenCloudRunV2ServiceObservedGeneration(res["observedGeneration"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("terminal_condition", flattenCloudRunV2ServiceTerminalCondition(res["terminalCondition"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("conditions", flattenCloudRunV2ServiceConditions(res["conditions"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("latest_ready_revision", flattenCloudRunV2ServiceLatestReadyRevision(res["latestReadyRevision"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("latest_created_revision", flattenCloudRunV2ServiceLatestCreatedRevision(res["latestCreatedRevision"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("traffic_statuses", flattenCloudRunV2ServiceTrafficStatuses(res["trafficStatuses"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("uri", flattenCloudRunV2ServiceUri(res["uri"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("reconciling", flattenCloudRunV2ServiceReconciling(res["reconciling"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("etag", flattenCloudRunV2ServiceEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + return nil +} + +func resourceCloudRunV2ServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCloudRunV2ServiceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandCloudRunV2ServiceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + annotationsProp, err := expandCloudRunV2ServiceAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } + clientProp, err := expandCloudRunV2ServiceClient(d.Get("client"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientProp)) { + obj["client"] = clientProp + } + clientVersionProp, err := expandCloudRunV2ServiceClientVersion(d.Get("client_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { + obj["clientVersion"] = clientVersionProp + } + ingressProp, err := expandCloudRunV2ServiceIngress(d.Get("ingress"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ingress"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ingressProp)) { + obj["ingress"] = ingressProp + } + launchStageProp, err := expandCloudRunV2ServiceLaunchStage(d.Get("launch_stage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("launch_stage"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { + obj["launchStage"] = launchStageProp + } + binaryAuthorizationProp, err := expandCloudRunV2ServiceBinaryAuthorization(d.Get("binary_authorization"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("binary_authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { + obj["binaryAuthorization"] = binaryAuthorizationProp + } + templateProp, err := expandCloudRunV2ServiceTemplate(d.Get("template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("template"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, templateProp)) { + obj["template"] = templateProp + } + trafficProp, err := expandCloudRunV2ServiceTraffic(d.Get("traffic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("traffic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trafficProp)) { + obj["traffic"] = trafficProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/services/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Service %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) + } + + err = CloudRunV2OperationWaitTime( + config, res, project, "Updating Service", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCloudRunV2ServiceRead(d, meta) +} + +func resourceCloudRunV2ServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/services/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Service %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Service") + } + + err = CloudRunV2OperationWaitTime( + config, res, project, "Deleting Service", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudRunV2ServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudRunV2ServiceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceClient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceClientVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceIngress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceLaunchStage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceBinaryAuthorization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["breakglass_justification"] = + flattenCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(original["breakglassJustification"], d, config) + transformed["use_default"] = + flattenCloudRunV2ServiceBinaryAuthorizationUseDefault(original["useDefault"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceBinaryAuthorizationUseDefault(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["revision"] = + flattenCloudRunV2ServiceTemplateRevision(original["revision"], d, config) + transformed["labels"] = + flattenCloudRunV2ServiceTemplateLabels(original["labels"], d, config) + transformed["annotations"] = + flattenCloudRunV2ServiceTemplateAnnotations(original["annotations"], d, config) + transformed["scaling"] = + flattenCloudRunV2ServiceTemplateScaling(original["scaling"], d, config) + transformed["vpc_access"] = + flattenCloudRunV2ServiceTemplateVpcAccess(original["vpcAccess"], d, config) + transformed["timeout"] = + flattenCloudRunV2ServiceTemplateTimeout(original["timeout"], d, config) + transformed["service_account"] = + flattenCloudRunV2ServiceTemplateServiceAccount(original["serviceAccount"], d, config) + transformed["containers"] = + flattenCloudRunV2ServiceTemplateContainers(original["containers"], d, config) + transformed["volumes"] = + flattenCloudRunV2ServiceTemplateVolumes(original["volumes"], d, config) + transformed["execution_environment"] = + flattenCloudRunV2ServiceTemplateExecutionEnvironment(original["executionEnvironment"], d, config) + transformed["encryption_key"] = + flattenCloudRunV2ServiceTemplateEncryptionKey(original["encryptionKey"], d, config) + transformed["max_instance_request_concurrency"] = + flattenCloudRunV2ServiceTemplateMaxInstanceRequestConcurrency(original["maxInstanceRequestConcurrency"], d, config) + transformed["session_affinity"] = + flattenCloudRunV2ServiceTemplateSessionAffinity(original["sessionAffinity"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_instance_count"] = + flattenCloudRunV2ServiceTemplateScalingMinInstanceCount(original["minInstanceCount"], d, config) + transformed["max_instance_count"] = + flattenCloudRunV2ServiceTemplateScalingMaxInstanceCount(original["maxInstanceCount"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateScalingMinInstanceCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateScalingMaxInstanceCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateVpcAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["connector"] = + flattenCloudRunV2ServiceTemplateVpcAccessConnector(original["connector"], d, config) + transformed["egress"] = + flattenCloudRunV2ServiceTemplateVpcAccessEgress(original["egress"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateVpcAccessConnector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateVpcAccessEgress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2ServiceTemplateContainersName(original["name"], d, config), + "image": flattenCloudRunV2ServiceTemplateContainersImage(original["image"], d, config), + "command": flattenCloudRunV2ServiceTemplateContainersCommand(original["command"], d, config), + "args": flattenCloudRunV2ServiceTemplateContainersArgs(original["args"], d, config), + "env": flattenCloudRunV2ServiceTemplateContainersEnv(original["env"], d, config), + "resources": flattenCloudRunV2ServiceTemplateContainersResources(original["resources"], d, config), + "ports": flattenCloudRunV2ServiceTemplateContainersPorts(original["ports"], d, config), + "volume_mounts": flattenCloudRunV2ServiceTemplateContainersVolumeMounts(original["volumeMounts"], d, config), + "working_dir": flattenCloudRunV2ServiceTemplateContainersWorkingDir(original["workingDir"], d, config), + "liveness_probe": flattenCloudRunV2ServiceTemplateContainersLivenessProbe(original["livenessProbe"], d, config), + "startup_probe": flattenCloudRunV2ServiceTemplateContainersStartupProbe(original["startupProbe"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTemplateContainersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersCommand(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersEnv(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2ServiceTemplateContainersEnvName(original["name"], d, config), + "value": flattenCloudRunV2ServiceTemplateContainersEnvValue(original["value"], d, config), + "value_source": flattenCloudRunV2ServiceTemplateContainersEnvValueSource(original["valueSource"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTemplateContainersEnvName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersEnvValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersEnvValueSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret_key_ref"] = + flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRef(original["secretKeyRef"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret"] = + flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefSecret(original["secret"], d, config) + transformed["version"] = + flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefVersion(original["version"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["limits"] = + flattenCloudRunV2ServiceTemplateContainersResourcesLimits(original["limits"], d, config) + transformed["cpu_idle"] = + flattenCloudRunV2ServiceTemplateContainersResourcesCpuIdle(original["cpuIdle"], d, config) + transformed["startup_cpu_boost"] = + flattenCloudRunV2ServiceTemplateContainersResourcesStartupCpuBoost(original["startupCpuBoost"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersResourcesLimits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersResourcesCpuIdle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersResourcesStartupCpuBoost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2ServiceTemplateContainersPortsName(original["name"], d, config), + "container_port": flattenCloudRunV2ServiceTemplateContainersPortsContainerPort(original["containerPort"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTemplateContainersPortsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersPortsContainerPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersVolumeMounts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2ServiceTemplateContainersVolumeMountsName(original["name"], d, config), + "mount_path": flattenCloudRunV2ServiceTemplateContainersVolumeMountsMountPath(original["mountPath"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTemplateContainersVolumeMountsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersVolumeMountsMountPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersWorkingDir(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbe(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["initial_delay_seconds"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeInitialDelaySeconds(original["initialDelaySeconds"], d, config) + transformed["timeout_seconds"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeTimeoutSeconds(original["timeoutSeconds"], d, config) + transformed["period_seconds"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbePeriodSeconds(original["periodSeconds"], d, config) + transformed["failure_threshold"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeFailureThreshold(original["failureThreshold"], d, config) + transformed["http_get"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGet(original["httpGet"], d, config) + transformed["tcp_socket"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocket(original["tcpSocket"], d, config) + transformed["grpc"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpc(original["grpc"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["path"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPath(original["path"], d, config) + transformed["port"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPort(original["port"], d, config) + transformed["http_headers"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config), + "value": flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersValue(original["value"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocketPort(original["port"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpcPort(original["port"], d, config) + transformed["service"] = + flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpcService(original["service"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpcPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersLivenessProbeGrpcService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbe(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["initial_delay_seconds"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeInitialDelaySeconds(original["initialDelaySeconds"], d, config) + transformed["timeout_seconds"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeTimeoutSeconds(original["timeoutSeconds"], d, config) + transformed["period_seconds"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbePeriodSeconds(original["periodSeconds"], d, config) + transformed["failure_threshold"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeFailureThreshold(original["failureThreshold"], d, config) + transformed["http_get"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGet(original["httpGet"], d, config) + transformed["tcp_socket"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeTcpSocket(original["tcpSocket"], d, config) + transformed["grpc"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpc(original["grpc"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["path"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPath(original["path"], d, config) + transformed["port"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPort(original["port"], d, config) + transformed["http_headers"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config), + "value": flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersValue(original["value"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeTcpSocket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeTcpSocketPort(original["port"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersStartupProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpcPort(original["port"], d, config) + transformed["service"] = + flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpcService(original["service"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpcPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateContainersStartupProbeGrpcService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateVolumes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2ServiceTemplateVolumesName(original["name"], d, config), + "secret": flattenCloudRunV2ServiceTemplateVolumesSecret(original["secret"], d, config), + "cloud_sql_instance": flattenCloudRunV2ServiceTemplateVolumesCloudSqlInstance(original["cloudSqlInstance"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTemplateVolumesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateVolumesSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret"] = + flattenCloudRunV2ServiceTemplateVolumesSecretSecret(original["secret"], d, config) + transformed["default_mode"] = + flattenCloudRunV2ServiceTemplateVolumesSecretDefaultMode(original["defaultMode"], d, config) + transformed["items"] = + flattenCloudRunV2ServiceTemplateVolumesSecretItems(original["items"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateVolumesSecretSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateVolumesSecretDefaultMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateVolumesSecretItems(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "path": flattenCloudRunV2ServiceTemplateVolumesSecretItemsPath(original["path"], d, config), + "version": flattenCloudRunV2ServiceTemplateVolumesSecretItemsVersion(original["version"], d, config), + "mode": flattenCloudRunV2ServiceTemplateVolumesSecretItemsMode(original["mode"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTemplateVolumesSecretItemsPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateVolumesSecretItemsVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateVolumesSecretItemsMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateVolumesCloudSqlInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["instances"] = + flattenCloudRunV2ServiceTemplateVolumesCloudSqlInstanceInstances(original["instances"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTemplateVolumesCloudSqlInstanceInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateExecutionEnvironment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTemplateMaxInstanceRequestConcurrency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTemplateSessionAffinity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTraffic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "type": flattenCloudRunV2ServiceTrafficType(original["type"], d, config), + "revision": flattenCloudRunV2ServiceTrafficRevision(original["revision"], d, config), + "percent": flattenCloudRunV2ServiceTrafficPercent(original["percent"], d, config), + "tag": flattenCloudRunV2ServiceTrafficTag(original["tag"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTrafficType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTrafficRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTrafficPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTrafficTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceObservedGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTerminalCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenCloudRunV2ServiceTerminalConditionType(original["type"], d, config) + transformed["state"] = + flattenCloudRunV2ServiceTerminalConditionState(original["state"], d, config) + transformed["message"] = + flattenCloudRunV2ServiceTerminalConditionMessage(original["message"], d, config) + transformed["last_transition_time"] = + flattenCloudRunV2ServiceTerminalConditionLastTransitionTime(original["lastTransitionTime"], d, config) + transformed["severity"] = + flattenCloudRunV2ServiceTerminalConditionSeverity(original["severity"], d, config) + transformed["reason"] = + flattenCloudRunV2ServiceTerminalConditionReason(original["reason"], d, config) + transformed["revision_reason"] = + flattenCloudRunV2ServiceTerminalConditionRevisionReason(original["revisionReason"], d, config) + transformed["execution_reason"] = + flattenCloudRunV2ServiceTerminalConditionExecutionReason(original["executionReason"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2ServiceTerminalConditionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTerminalConditionState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTerminalConditionMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTerminalConditionLastTransitionTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTerminalConditionSeverity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTerminalConditionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTerminalConditionRevisionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTerminalConditionExecutionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "type": flattenCloudRunV2ServiceConditionsType(original["type"], d, config), + "state": flattenCloudRunV2ServiceConditionsState(original["state"], d, config), + "message": flattenCloudRunV2ServiceConditionsMessage(original["message"], d, config), + "last_transition_time": flattenCloudRunV2ServiceConditionsLastTransitionTime(original["lastTransitionTime"], d, config), + "severity": flattenCloudRunV2ServiceConditionsSeverity(original["severity"], d, config), + "reason": flattenCloudRunV2ServiceConditionsReason(original["reason"], d, config), + "revision_reason": flattenCloudRunV2ServiceConditionsRevisionReason(original["revisionReason"], d, config), + "execution_reason": flattenCloudRunV2ServiceConditionsExecutionReason(original["executionReason"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceConditionsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceConditionsState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceConditionsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceConditionsLastTransitionTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceConditionsSeverity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceConditionsReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceConditionsRevisionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceConditionsExecutionReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceLatestReadyRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceLatestCreatedRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTrafficStatuses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "type": flattenCloudRunV2ServiceTrafficStatusesType(original["type"], d, config), + "revision": flattenCloudRunV2ServiceTrafficStatusesRevision(original["revision"], d, config), + "percent": flattenCloudRunV2ServiceTrafficStatusesPercent(original["percent"], d, config), + "tag": flattenCloudRunV2ServiceTrafficStatusesTag(original["tag"], d, config), + "uri": flattenCloudRunV2ServiceTrafficStatusesUri(original["uri"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2ServiceTrafficStatusesType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTrafficStatusesRevision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTrafficStatusesPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2ServiceTrafficStatusesTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceTrafficStatusesUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudRunV2ServiceEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudRunV2ServiceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2ServiceAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2ServiceClient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceClientVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceIngress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceLaunchStage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceBinaryAuthorization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBreakglassJustification, err := expandCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(original["breakglass_justification"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBreakglassJustification); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["breakglassJustification"] = transformedBreakglassJustification + } + + transformedUseDefault, err := expandCloudRunV2ServiceBinaryAuthorizationUseDefault(original["use_default"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseDefault); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["useDefault"] = transformedUseDefault + } + + return transformed, nil +} + +func expandCloudRunV2ServiceBinaryAuthorizationBreakglassJustification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceBinaryAuthorizationUseDefault(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRevision, err := expandCloudRunV2ServiceTemplateRevision(original["revision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRevision); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["revision"] = transformedRevision + } + + transformedLabels, err := expandCloudRunV2ServiceTemplateLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedAnnotations, err := expandCloudRunV2ServiceTemplateAnnotations(original["annotations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnnotations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["annotations"] = transformedAnnotations + } + + transformedScaling, err := expandCloudRunV2ServiceTemplateScaling(original["scaling"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaling); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaling"] = transformedScaling + } + + transformedVpcAccess, err := expandCloudRunV2ServiceTemplateVpcAccess(original["vpc_access"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVpcAccess); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcAccess"] = transformedVpcAccess + } + + transformedTimeout, err := expandCloudRunV2ServiceTemplateTimeout(original["timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeout"] = transformedTimeout + } + + transformedServiceAccount, err := expandCloudRunV2ServiceTemplateServiceAccount(original["service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccount"] = transformedServiceAccount + } + + transformedContainers, err := expandCloudRunV2ServiceTemplateContainers(original["containers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainers); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["containers"] = transformedContainers + } + + transformedVolumes, err := expandCloudRunV2ServiceTemplateVolumes(original["volumes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["volumes"] = transformedVolumes + } + + transformedExecutionEnvironment, err := expandCloudRunV2ServiceTemplateExecutionEnvironment(original["execution_environment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExecutionEnvironment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["executionEnvironment"] = transformedExecutionEnvironment + } + + transformedEncryptionKey, err := expandCloudRunV2ServiceTemplateEncryptionKey(original["encryption_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptionKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encryptionKey"] = transformedEncryptionKey + } + + transformedMaxInstanceRequestConcurrency, err := expandCloudRunV2ServiceTemplateMaxInstanceRequestConcurrency(original["max_instance_request_concurrency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxInstanceRequestConcurrency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxInstanceRequestConcurrency"] = transformedMaxInstanceRequestConcurrency + } + + transformedSessionAffinity, err := expandCloudRunV2ServiceTemplateSessionAffinity(original["session_affinity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSessionAffinity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sessionAffinity"] = transformedSessionAffinity + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateRevision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2ServiceTemplateAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2ServiceTemplateScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinInstanceCount, err := expandCloudRunV2ServiceTemplateScalingMinInstanceCount(original["min_instance_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinInstanceCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minInstanceCount"] = transformedMinInstanceCount + } + + transformedMaxInstanceCount, err := expandCloudRunV2ServiceTemplateScalingMaxInstanceCount(original["max_instance_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxInstanceCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxInstanceCount"] = transformedMaxInstanceCount + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateScalingMinInstanceCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateScalingMaxInstanceCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVpcAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConnector, err := expandCloudRunV2ServiceTemplateVpcAccessConnector(original["connector"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnector); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["connector"] = transformedConnector + } + + transformedEgress, err := expandCloudRunV2ServiceTemplateVpcAccessEgress(original["egress"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEgress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["egress"] = transformedEgress + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateVpcAccessConnector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVpcAccessEgress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2ServiceTemplateContainersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedImage, err := expandCloudRunV2ServiceTemplateContainersImage(original["image"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["image"] = transformedImage + } + + transformedCommand, err := expandCloudRunV2ServiceTemplateContainersCommand(original["command"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommand); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["command"] = transformedCommand + } + + transformedArgs, err := expandCloudRunV2ServiceTemplateContainersArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedEnv, err := expandCloudRunV2ServiceTemplateContainersEnv(original["env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["env"] = transformedEnv + } + + transformedResources, err := expandCloudRunV2ServiceTemplateContainersResources(original["resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resources"] = transformedResources + } + + transformedPorts, err := expandCloudRunV2ServiceTemplateContainersPorts(original["ports"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ports"] = transformedPorts + } + + transformedVolumeMounts, err := expandCloudRunV2ServiceTemplateContainersVolumeMounts(original["volume_mounts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumeMounts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["volumeMounts"] = transformedVolumeMounts + } + + transformedWorkingDir, err := expandCloudRunV2ServiceTemplateContainersWorkingDir(original["working_dir"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWorkingDir); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["workingDir"] = transformedWorkingDir + } + + transformedLivenessProbe, err := expandCloudRunV2ServiceTemplateContainersLivenessProbe(original["liveness_probe"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLivenessProbe); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["livenessProbe"] = transformedLivenessProbe + } + + transformedStartupProbe, err := expandCloudRunV2ServiceTemplateContainersStartupProbe(original["startup_probe"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartupProbe); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startupProbe"] = transformedStartupProbe + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTemplateContainersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersCommand(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2ServiceTemplateContainersEnvName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunV2ServiceTemplateContainersEnvValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedValueSource, err := expandCloudRunV2ServiceTemplateContainersEnvValueSource(original["value_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValueSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["valueSource"] = transformedValueSource + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTemplateContainersEnvName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersEnvValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersEnvValueSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecretKeyRef, err := expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRef(original["secret_key_ref"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretKeyRef); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretKeyRef"] = transformedSecretKeyRef + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecret, err := expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secret"] = transformedSecret + } + + transformedVersion, err := expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLimits, err := expandCloudRunV2ServiceTemplateContainersResourcesLimits(original["limits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["limits"] = transformedLimits + } + + transformedCpuIdle, err := expandCloudRunV2ServiceTemplateContainersResourcesCpuIdle(original["cpu_idle"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpuIdle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cpuIdle"] = transformedCpuIdle + } + + transformedStartupCpuBoost, err := expandCloudRunV2ServiceTemplateContainersResourcesStartupCpuBoost(original["startup_cpu_boost"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartupCpuBoost); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startupCpuBoost"] = transformedStartupCpuBoost + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersResourcesLimits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2ServiceTemplateContainersResourcesCpuIdle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersResourcesStartupCpuBoost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2ServiceTemplateContainersPortsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedContainerPort, err := expandCloudRunV2ServiceTemplateContainersPortsContainerPort(original["container_port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainerPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["containerPort"] = transformedContainerPort + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTemplateContainersPortsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersPortsContainerPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersVolumeMounts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2ServiceTemplateContainersVolumeMountsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedMountPath, err := expandCloudRunV2ServiceTemplateContainersVolumeMountsMountPath(original["mount_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mountPath"] = transformedMountPath + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTemplateContainersVolumeMountsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersVolumeMountsMountPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersWorkingDir(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbe(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInitialDelaySeconds, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["initialDelaySeconds"] = transformedInitialDelaySeconds + } + + transformedTimeoutSeconds, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeTimeoutSeconds(original["timeout_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeoutSeconds"] = transformedTimeoutSeconds + } + + transformedPeriodSeconds, err := expandCloudRunV2ServiceTemplateContainersLivenessProbePeriodSeconds(original["period_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["periodSeconds"] = transformedPeriodSeconds + } + + transformedFailureThreshold, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeFailureThreshold(original["failure_threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["failureThreshold"] = transformedFailureThreshold + } + + transformedHttpGet, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGet(original["http_get"], d, config) + if err != nil { + return nil, err + } else { + transformed["httpGet"] = transformedHttpGet + } + + transformedTcpSocket, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocket(original["tcp_socket"], d, config) + if err != nil { + return nil, err + } else { + transformed["tcpSocket"] = transformedTcpSocket + } + + transformedGrpc, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpc(original["grpc"], d, config) + if err != nil { + return nil, err + } else { + transformed["grpc"] = transformedGrpc + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbePeriodSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeFailureThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedPort, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedHttpHeaders, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeaders(original["http_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpHeaders"] = transformedHttpHeaders + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersValue(original["value"], d, config) + if err != nil { + return nil, err + } else { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocketPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpcPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedService, err := expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpcService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpcPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersLivenessProbeGrpcService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbe(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInitialDelaySeconds, err := expandCloudRunV2ServiceTemplateContainersStartupProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["initialDelaySeconds"] = transformedInitialDelaySeconds + } + + transformedTimeoutSeconds, err := expandCloudRunV2ServiceTemplateContainersStartupProbeTimeoutSeconds(original["timeout_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeoutSeconds"] = transformedTimeoutSeconds + } + + transformedPeriodSeconds, err := expandCloudRunV2ServiceTemplateContainersStartupProbePeriodSeconds(original["period_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["periodSeconds"] = transformedPeriodSeconds + } + + transformedFailureThreshold, err := expandCloudRunV2ServiceTemplateContainersStartupProbeFailureThreshold(original["failure_threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["failureThreshold"] = transformedFailureThreshold + } + + transformedHttpGet, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGet(original["http_get"], d, config) + if err != nil { + return nil, err + } else { + transformed["httpGet"] = transformedHttpGet + } + + transformedTcpSocket, err := expandCloudRunV2ServiceTemplateContainersStartupProbeTcpSocket(original["tcp_socket"], d, config) + if err != nil { + return nil, err + } else { + transformed["tcpSocket"] = transformedTcpSocket + } + + transformedGrpc, err := expandCloudRunV2ServiceTemplateContainersStartupProbeGrpc(original["grpc"], d, config) + if err != nil { + return nil, err + } else { + transformed["grpc"] = transformedGrpc + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbePeriodSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeFailureThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedPort, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedHttpHeaders, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeaders(original["http_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpHeaders"] = transformedHttpHeaders + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersValue(original["value"], d, config) + if err != nil { + return nil, err + } else { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeTcpSocket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunV2ServiceTemplateContainersStartupProbeTcpSocketPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeTcpSocketPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeGrpc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunV2ServiceTemplateContainersStartupProbeGrpcPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedService, err := expandCloudRunV2ServiceTemplateContainersStartupProbeGrpcService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeGrpcPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateContainersStartupProbeGrpcService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVolumes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2ServiceTemplateVolumesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedSecret, err := expandCloudRunV2ServiceTemplateVolumesSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secret"] = transformedSecret + } + + transformedCloudSqlInstance, err := expandCloudRunV2ServiceTemplateVolumesCloudSqlInstance(original["cloud_sql_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudSqlInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudSqlInstance"] = transformedCloudSqlInstance + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTemplateVolumesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVolumesSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecret, err := expandCloudRunV2ServiceTemplateVolumesSecretSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secret"] = transformedSecret + } + + transformedDefaultMode, err := expandCloudRunV2ServiceTemplateVolumesSecretDefaultMode(original["default_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDefaultMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["defaultMode"] = transformedDefaultMode + } + + transformedItems, err := expandCloudRunV2ServiceTemplateVolumesSecretItems(original["items"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedItems); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["items"] = transformedItems + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateVolumesSecretSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVolumesSecretDefaultMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVolumesSecretItems(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudRunV2ServiceTemplateVolumesSecretItemsPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedVersion, err := expandCloudRunV2ServiceTemplateVolumesSecretItemsVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedMode, err := expandCloudRunV2ServiceTemplateVolumesSecretItemsMode(original["mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mode"] = transformedMode + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTemplateVolumesSecretItemsPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVolumesSecretItemsVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVolumesSecretItemsMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateVolumesCloudSqlInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInstances, err := expandCloudRunV2ServiceTemplateVolumesCloudSqlInstanceInstances(original["instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instances"] = transformedInstances + } + + return transformed, nil +} + +func expandCloudRunV2ServiceTemplateVolumesCloudSqlInstanceInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateExecutionEnvironment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateMaxInstanceRequestConcurrency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTemplateSessionAffinity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTraffic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandCloudRunV2ServiceTrafficType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedRevision, err := expandCloudRunV2ServiceTrafficRevision(original["revision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRevision); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["revision"] = transformedRevision + } + + transformedPercent, err := expandCloudRunV2ServiceTrafficPercent(original["percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["percent"] = transformedPercent + } + + transformedTag, err := expandCloudRunV2ServiceTrafficTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2ServiceTrafficType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTrafficRevision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTrafficPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2ServiceTrafficTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service_sweeper.go new file mode 100644 index 0000000000..d90dbf6a3c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/resource_cloud_run_v2_service_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudrunv2 + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudRunV2Service", testSweepCloudRunV2Service) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudRunV2Service(region string) error { + resourceName := "CloudRunV2Service" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/services", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["services"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/services/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/runadminv3_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/runadminv3_operation.go new file mode 100644 index 0000000000..4cd79db490 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2/runadminv3_operation.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package cloudrunv2 + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/run/v2" +) + +type RunAdminV2OperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *RunAdminV2OperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + url := fmt.Sprintf("%s%s", w.Config.CloudRunV2BasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createRunAdminV2Waiter(config *transport_tpg.Config, op *run.GoogleLongrunningOperation, project, activity, userAgent string) (*RunAdminV2OperationWaiter, error) { + w := &RunAdminV2OperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func RunAdminV2OperationWaitTimeWithResponse(config *transport_tpg.Config, op *run.GoogleLongrunningOperation, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createRunAdminV2Waiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func RunAdminV2OperationWaitTime(config *transport_tpg.Config, op *run.GoogleLongrunningOperation, project, activity, userAgent string, timeout time.Duration) error { + if op.Done { + return nil + } + w, err := createRunAdminV2Waiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler/resource_cloud_scheduler_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler/resource_cloud_scheduler_job.go new file mode 100644 index 0000000000..25a7978a16 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler/resource_cloud_scheduler_job.go @@ -0,0 +1,1621 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudscheduler + +import ( + "context" + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// Both oidc and oauth headers cannot be set +func validateAuthHeaders(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + httpBlock := diff.Get("http_target.0").(map[string]interface{}) + + if httpBlock != nil { + oauth := httpBlock["oauth_token"] + oidc := httpBlock["oidc_token"] + + if oauth != nil && oidc != nil { + if len(oidc.([]interface{})) > 0 && len(oauth.([]interface{})) > 0 { + return fmt.Errorf("Error in http_target: only one of oauth_token or oidc_token can be specified, but not both.") + } + } + } + + return nil +} + +func authHeaderDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // If generating an `oauth_token` and `scope` is not provided in the configuration, + // the default "https://www.googleapis.com/auth/cloud-platform" scope will be used. + // Similarly, if generating an `oidc_token` and `audience` is not provided in the + // configuration, the URI specified in target will be used. Although not in the + // configuration, in both cases the default is returned in the object, but is not in. + // state. We suppress the diff if the values are these defaults but are not stored in state. + + b := strings.Split(k, ".") + if b[0] == "http_target" && len(b) > 4 { + block := b[2] + attr := b[4] + + if block == "oauth_token" && attr == "scope" { + if old == tpgresource.CanonicalizeServiceScope("cloud-platform") && new == "" { + return true + } + } + + if block == "oidc_token" && attr == "audience" { + uri := d.Get(strings.Join(b[0:2], ".") + ".uri") + if old == uri && new == "" { + return true + } + } + + } + + return false +} + +func validateHttpHeaders() schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + headers := i.(map[string]interface{}) + if _, ok := headers["Content-Length"]; ok { + es = append(es, fmt.Errorf("Cannot set the Content-Length header on %s", k)) + return + } + r := regexp.MustCompile(`(X-Google-|X-AppEngine-).*`) + for key := range headers { + if r.MatchString(key) { + es = append(es, fmt.Errorf("Cannot set the %s header on %s", key, k)) + return + } + } + + return + } +} + +func ResourceCloudSchedulerJob() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudSchedulerJobCreate, + Read: resourceCloudSchedulerJobRead, + Update: resourceCloudSchedulerJobUpdate, + Delete: resourceCloudSchedulerJobDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudSchedulerJobImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + validateAuthHeaders, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the job.`, + }, + "app_engine_http_target": { + Type: schema.TypeList, + Optional: true, + Description: `App Engine HTTP target. +If the job providers a App Engine HTTP target the cron will +send a request to the service instance`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "relative_uri": { + Type: schema.TypeString, + Required: true, + Description: `The relative URI. +The relative URL must begin with "/" and must be a valid HTTP relative URL. +It can contain a path, query string arguments, and \# fragments. +If the relative URL is empty, then the root path "/" will be used. +No spaces are allowed, and the maximum length allowed is 2083 characters`, + }, + "app_engine_routing": { + Type: schema.TypeList, + Optional: true, + Description: `App Engine Routing setting for the job.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Optional: true, + Description: `App instance. +By default, the job is sent to an instance which is available when the job is attempted.`, + AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, + }, + "service": { + Type: schema.TypeString, + Optional: true, + Description: `App service. +By default, the job is sent to the service which is the default service when the job is attempted.`, + AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `App version. +By default, the job is sent to the version which is the default version when the job is attempted.`, + AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, + }, + }, + }, + }, + "body": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateBase64String, + Description: `HTTP request body. +A request body is allowed only if the HTTP method is POST or PUT. +It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. + +A base64-encoded string.`, + }, + "headers": { + Type: schema.TypeMap, + Optional: true, + ValidateFunc: validateHttpHeaders(), + Description: `HTTP request headers. +This map contains the header field names and values. +Headers can be set when the job is created.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "http_method": { + Type: schema.TypeString, + Optional: true, + Description: `Which HTTP method to use for the request.`, + }, + }, + }, + ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, + }, + "attempt_deadline": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("180s"), + Description: `The deadline for job attempts. If the request handler does not respond by this deadline then the request is +cancelled and the attempt is marked as a DEADLINE_EXCEEDED failure. The failed attempt can be viewed in +execution logs. Cloud Scheduler will retry the job according to the RetryConfig. +The allowed duration for this deadline is: +* For HTTP targets, between 15 seconds and 30 minutes. +* For App Engine HTTP targets, between 15 seconds and 24 hours. +* **Note**: For PubSub targets, this field is ignored - setting it will introduce an unresolvable diff. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"`, + Default: "180s", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description for the job. +This string must not contain more than 500 characters.`, + }, + "http_target": { + Type: schema.TypeList, + Optional: true, + Description: `HTTP target. +If the job providers a http_target the cron will +send a request to the targeted url`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.LastSlashDiffSuppress, + Description: `The full URI path that the request will be sent to.`, + }, + "body": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateBase64String, + Description: `HTTP request body. +A request body is allowed only if the HTTP method is POST, PUT, or PATCH. +It is an error to set body on a job with an incompatible HttpMethod. + +A base64-encoded string.`, + }, + "headers": { + Type: schema.TypeMap, + Optional: true, + ValidateFunc: validateHttpHeaders(), + Description: `This map contains the header field names and values. +Repeated headers are not supported, but a header value can contain commas.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "http_method": { + Type: schema.TypeString, + Optional: true, + Description: `Which HTTP method to use for the request.`, + }, + "oauth_token": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: authHeaderDiffSuppress, + Description: `Contains information needed for generating an OAuth token. +This type of authorization should be used when sending requests to a GCP endpoint.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_account_email": { + Type: schema.TypeString, + Required: true, + Description: `Service account email to be used for generating OAuth token. +The service account must be within the same project as the job.`, + }, + "scope": { + Type: schema.TypeString, + Optional: true, + Description: `OAuth scope to be used for generating OAuth access token. If not specified, +"https://www.googleapis.com/auth/cloud-platform" will be used.`, + }, + }, + }, + }, + "oidc_token": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: authHeaderDiffSuppress, + Description: `Contains information needed for generating an OpenID Connect token. +This type of authorization should be used when sending requests to third party endpoints or Cloud Run.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_account_email": { + Type: schema.TypeString, + Required: true, + Description: `Service account email to be used for generating OAuth token. +The service account must be within the same project as the job.`, + }, + "audience": { + Type: schema.TypeString, + Optional: true, + Description: `Audience to be used when generating OIDC token. If not specified, +the URI specified in target will be used.`, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, + }, + "paused": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Sets the job to a paused state. Jobs default to being enabled when this property is not set.`, + }, + "pubsub_target": { + Type: schema.TypeList, + Optional: true, + Description: `Pub/Sub target +If the job providers a Pub/Sub target the cron will publish +a message to the provided topic`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic_name": { + Type: schema.TypeString, + Required: true, + Description: `The full resource name for the Cloud Pub/Sub topic to which +messages will be published when a job is delivered. ~>**NOTE:** +The topic name must be in the same format as required by PubSub's +PublishRequest.name, e.g. 'projects/my-project/topics/my-topic'.`, + }, + "attributes": { + Type: schema.TypeMap, + Optional: true, + Description: `Attributes for PubsubMessage. +Pubsub message must contain either non-empty data, or at least one attribute.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "data": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateBase64String, + Description: `The message payload for PubsubMessage. +Pubsub message must contain either non-empty data, or at least one attribute. + + A base64-encoded string.`, + }, + }, + }, + ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Region where the scheduler job resides. If it is not provided, Terraform will use the provider default.`, + }, + "retry_config": { + Type: schema.TypeList, + Optional: true, + Description: `By default, if a job does not complete successfully, +meaning that an acknowledgement is not received from the handler, +then it will be retried with exponential backoff according to the settings`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_backoff_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The maximum amount of time to wait before retrying a job after it fails. +A duration in seconds with up to nine fractional digits, terminated by 's'.`, + AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, + }, + "max_doublings": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The time between retries will double maxDoublings times. +A job's retry interval starts at minBackoffDuration, +then doubles maxDoublings times, then increases linearly, +and finally retries retries at intervals of maxBackoffDuration up to retryCount times.`, + AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, + }, + "max_retry_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The time limit for retrying a failed job, measured from time when an execution was first attempted. +If specified with retryCount, the job will be retried until both limits are reached. +A duration in seconds with up to nine fractional digits, terminated by 's'.`, + AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, + }, + "min_backoff_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The minimum amount of time to wait before retrying a job after it fails. +A duration in seconds with up to nine fractional digits, terminated by 's'.`, + AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, + }, + "retry_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The number of attempts that the system will make to run a +job using the exponential backoff procedure described by maxDoublings. +Values greater than 5 and negative values are not allowed.`, + AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, + }, + }, + }, + }, + "schedule": { + Type: schema.TypeString, + Optional: true, + Description: `Describes the schedule on which the job will be executed.`, + }, + "time_zone": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies the time zone to be used in interpreting schedule. +The value of this field must be a time zone name from the tz database.`, + Default: "Etc/UTC", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the job.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudSchedulerJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandCloudSchedulerJobName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandCloudSchedulerJobDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + scheduleProp, err := expandCloudSchedulerJobSchedule(d.Get("schedule"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(scheduleProp)) && (ok || !reflect.DeepEqual(v, scheduleProp)) { + obj["schedule"] = scheduleProp + } + timeZoneProp, err := expandCloudSchedulerJobTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + pausedProp, err := expandCloudSchedulerJobPaused(d.Get("paused"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("paused"); !tpgresource.IsEmptyValue(reflect.ValueOf(pausedProp)) && (ok || !reflect.DeepEqual(v, pausedProp)) { + obj["paused"] = pausedProp + } + attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attempt_deadline"); !tpgresource.IsEmptyValue(reflect.ValueOf(attemptDeadlineProp)) && (ok || !reflect.DeepEqual(v, attemptDeadlineProp)) { + obj["attemptDeadline"] = attemptDeadlineProp + } + retryConfigProp, err := expandCloudSchedulerJobRetryConfig(d.Get("retry_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retry_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(retryConfigProp)) && (ok || !reflect.DeepEqual(v, retryConfigProp)) { + obj["retryConfig"] = retryConfigProp + } + pubsubTargetProp, err := expandCloudSchedulerJobPubsubTarget(d.Get("pubsub_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubTargetProp)) && (ok || !reflect.DeepEqual(v, pubsubTargetProp)) { + obj["pubsubTarget"] = pubsubTargetProp + } + appEngineHttpTargetProp, err := expandCloudSchedulerJobAppEngineHttpTarget(d.Get("app_engine_http_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine_http_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(appEngineHttpTargetProp)) && (ok || !reflect.DeepEqual(v, appEngineHttpTargetProp)) { + obj["appEngineHttpTarget"] = appEngineHttpTargetProp + } + httpTargetProp, err := expandCloudSchedulerJobHttpTarget(d.Get("http_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpTargetProp)) && (ok || !reflect.DeepEqual(v, httpTargetProp)) { + obj["httpTarget"] = httpTargetProp + } + + obj, err = resourceCloudSchedulerJobEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Job: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Job: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + endpoint := "resume" // Default to enabled + logSuccessMsg := "Job state has been set to ENABLED" + if paused, pausedOk := d.GetOk("paused"); pausedOk && paused.(bool) { + endpoint = "pause" + logSuccessMsg = "Job state has been set to PAUSED" + } + + linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) + url, err = tpgresource.ReplaceVars(d, config, linkTmpl) + if err != nil { + return err + } + + emptyReqBody := make(map[string]interface{}) + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: emptyReqBody, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) + } + + log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) + + log.Printf("[DEBUG] Finished creating Job %q: %#v", d.Id(), res) + + return resourceCloudSchedulerJobRead(d, meta) +} + +func resourceCloudSchedulerJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudSchedulerJob %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + + if err := d.Set("name", flattenCloudSchedulerJobName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("description", flattenCloudSchedulerJobDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("schedule", flattenCloudSchedulerJobSchedule(res["schedule"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("time_zone", flattenCloudSchedulerJobTimeZone(res["timeZone"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("state", flattenCloudSchedulerJobState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("paused", flattenCloudSchedulerJobPaused(res["paused"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("attempt_deadline", flattenCloudSchedulerJobAttemptDeadline(res["attemptDeadline"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("retry_config", flattenCloudSchedulerJobRetryConfig(res["retryConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("pubsub_target", flattenCloudSchedulerJobPubsubTarget(res["pubsubTarget"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("app_engine_http_target", flattenCloudSchedulerJobAppEngineHttpTarget(res["appEngineHttpTarget"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("http_target", flattenCloudSchedulerJobHttpTarget(res["httpTarget"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + + return nil +} + +func resourceCloudSchedulerJobUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandCloudSchedulerJobDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + scheduleProp, err := expandCloudSchedulerJobSchedule(d.Get("schedule"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scheduleProp)) { + obj["schedule"] = scheduleProp + } + timeZoneProp, err := expandCloudSchedulerJobTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + pausedProp, err := expandCloudSchedulerJobPaused(d.Get("paused"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("paused"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pausedProp)) { + obj["paused"] = pausedProp + } + attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attempt_deadline"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attemptDeadlineProp)) { + obj["attemptDeadline"] = attemptDeadlineProp + } + retryConfigProp, err := expandCloudSchedulerJobRetryConfig(d.Get("retry_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retry_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryConfigProp)) { + obj["retryConfig"] = retryConfigProp + } + pubsubTargetProp, err := expandCloudSchedulerJobPubsubTarget(d.Get("pubsub_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubTargetProp)) { + obj["pubsubTarget"] = pubsubTargetProp + } + appEngineHttpTargetProp, err := expandCloudSchedulerJobAppEngineHttpTarget(d.Get("app_engine_http_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine_http_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, appEngineHttpTargetProp)) { + obj["appEngineHttpTarget"] = appEngineHttpTargetProp + } + httpTargetProp, err := expandCloudSchedulerJobHttpTarget(d.Get("http_target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpTargetProp)) { + obj["httpTarget"] = httpTargetProp + } + + obj, err = resourceCloudSchedulerJobUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Job %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Job %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Job %q: %#v", d.Id(), res) + } + + if d.HasChange("paused") { + endpoint := "resume" // Default to enabled + logSuccessMsg := "Job state has been set to ENABLED" + if paused, pausedOk := d.GetOk("paused"); pausedOk { + if paused.(bool) { + endpoint = "pause" + logSuccessMsg = "Job state has been set to PAUSED" + } + } + + linkTmpl := fmt.Sprintf("{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}:%s", endpoint) + url, err = tpgresource.ReplaceVars(d, config, linkTmpl) + if err != nil { + return err + } + + emptyReqBody := make(map[string]interface{}) + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: emptyReqBody, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error setting Cloud Scheduler Job status: %s", err) + } + + log.Printf("[DEBUG] Finished updating Job %q status: %s", d.Id(), logSuccessMsg) + } + return resourceCloudSchedulerJobRead(d, meta) +} + +func resourceCloudSchedulerJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Job %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Job") + } + + log.Printf("[DEBUG] Finished deleting Job %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudSchedulerJobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudSchedulerJobName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenCloudSchedulerJobDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobPaused(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + state := d.Get("state") + if state == "PAUSED" { + return true + } + if state == "ENABLED" { + return false + } + return false // Job has an error state that's not paused or enabled +} + +func flattenCloudSchedulerJobAttemptDeadline(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobRetryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["retry_count"] = + flattenCloudSchedulerJobRetryConfigRetryCount(original["retryCount"], d, config) + transformed["max_retry_duration"] = + flattenCloudSchedulerJobRetryConfigMaxRetryDuration(original["maxRetryDuration"], d, config) + transformed["min_backoff_duration"] = + flattenCloudSchedulerJobRetryConfigMinBackoffDuration(original["minBackoffDuration"], d, config) + transformed["max_backoff_duration"] = + flattenCloudSchedulerJobRetryConfigMaxBackoffDuration(original["maxBackoffDuration"], d, config) + transformed["max_doublings"] = + flattenCloudSchedulerJobRetryConfigMaxDoublings(original["maxDoublings"], d, config) + return []interface{}{transformed} +} +func flattenCloudSchedulerJobRetryConfigRetryCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudSchedulerJobRetryConfigMaxRetryDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobRetryConfigMinBackoffDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobRetryConfigMaxBackoffDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobRetryConfigMaxDoublings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudSchedulerJobPubsubTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["topic_name"] = + flattenCloudSchedulerJobPubsubTargetTopicName(original["topicName"], d, config) + transformed["data"] = + flattenCloudSchedulerJobPubsubTargetData(original["data"], d, config) + transformed["attributes"] = + flattenCloudSchedulerJobPubsubTargetAttributes(original["attributes"], d, config) + return []interface{}{transformed} +} +func flattenCloudSchedulerJobPubsubTargetTopicName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobPubsubTargetData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobPubsubTargetAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobAppEngineHttpTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["http_method"] = + flattenCloudSchedulerJobAppEngineHttpTargetHttpMethod(original["httpMethod"], d, config) + transformed["app_engine_routing"] = + flattenCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(original["appEngineRouting"], d, config) + transformed["relative_uri"] = + flattenCloudSchedulerJobAppEngineHttpTargetRelativeUri(original["relativeUri"], d, config) + transformed["body"] = + flattenCloudSchedulerJobAppEngineHttpTargetBody(original["body"], d, config) + transformed["headers"] = + flattenCloudSchedulerJobAppEngineHttpTargetHeaders(original["headers"], d, config) + return []interface{}{transformed} +} +func flattenCloudSchedulerJobAppEngineHttpTargetHttpMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +// An `appEngineRouting` in API response is useless, so we set config values rather than api response to state. +func flattenCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + if stateV, ok := d.GetOk("app_engine_http_target"); ok && len(stateV.([]interface{})) > 0 { + return d.Get("app_engine_http_target.0.app_engine_routing") + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service"] = original["service"] + transformed["version"] = original["version"] + transformed["instance"] = original["instance"] + return []interface{}{transformed} +} + +func flattenCloudSchedulerJobAppEngineHttpTargetRelativeUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobAppEngineHttpTargetBody(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobAppEngineHttpTargetHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + var headers = v.(map[string]interface{}) + if v, ok := headers["User-Agent"]; ok { + if v.(string) == "AppEngine-Google; (+http://code.google.com/appengine)" { + delete(headers, "User-Agent") + } else if v.(string) == "Google-Cloud-Scheduler" { + delete(headers, "User-Agent") + } else { + headers["User-Agent"] = strings.TrimSpace(strings.Replace(v.(string), "AppEngine-Google; (+http://code.google.com/appengine)", "", -1)) + } + } + if v, ok := headers["Content-Type"]; ok { + if v.(string) == "application/octet-stream" { + delete(headers, "Content-Type") + } + } + r := regexp.MustCompile(`(X-Google-|X-AppEngine-|Content-Length).*`) + for key := range headers { + if r.MatchString(key) { + delete(headers, key) + } + } + return headers +} + +func flattenCloudSchedulerJobHttpTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenCloudSchedulerJobHttpTargetUri(original["uri"], d, config) + transformed["http_method"] = + flattenCloudSchedulerJobHttpTargetHttpMethod(original["httpMethod"], d, config) + transformed["body"] = + flattenCloudSchedulerJobHttpTargetBody(original["body"], d, config) + transformed["headers"] = + flattenCloudSchedulerJobHttpTargetHeaders(original["headers"], d, config) + transformed["oauth_token"] = + flattenCloudSchedulerJobHttpTargetOauthToken(original["oauthToken"], d, config) + transformed["oidc_token"] = + flattenCloudSchedulerJobHttpTargetOidcToken(original["oidcToken"], d, config) + return []interface{}{transformed} +} +func flattenCloudSchedulerJobHttpTargetUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobHttpTargetHttpMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobHttpTargetBody(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobHttpTargetHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + var headers = v.(map[string]interface{}) + if v, ok := headers["User-Agent"]; ok { + if v.(string) == "AppEngine-Google; (+http://code.google.com/appengine)" { + delete(headers, "User-Agent") + } else if v.(string) == "Google-Cloud-Scheduler" { + delete(headers, "User-Agent") + } else { + headers["User-Agent"] = strings.TrimSpace(strings.Replace(v.(string), "AppEngine-Google; (+http://code.google.com/appengine)", "", -1)) + } + } + if v, ok := headers["Content-Type"]; ok { + if v.(string) == "application/octet-stream" { + delete(headers, "Content-Type") + } + } + r := regexp.MustCompile(`(X-Google-|X-AppEngine-|Content-Length).*`) + for key := range headers { + if r.MatchString(key) { + delete(headers, key) + } + } + return headers +} + +func flattenCloudSchedulerJobHttpTargetOauthToken(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account_email"] = + flattenCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) + transformed["scope"] = + flattenCloudSchedulerJobHttpTargetOauthTokenScope(original["scope"], d, config) + return []interface{}{transformed} +} +func flattenCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobHttpTargetOauthTokenScope(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobHttpTargetOidcToken(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account_email"] = + flattenCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) + transformed["audience"] = + flattenCloudSchedulerJobHttpTargetOidcTokenAudience(original["audience"], d, config) + return []interface{}{transformed} +} +func flattenCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudSchedulerJobHttpTargetOidcTokenAudience(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudSchedulerJobName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") +} + +func expandCloudSchedulerJobDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobPaused(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobAttemptDeadline(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobRetryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRetryCount, err := expandCloudSchedulerJobRetryConfigRetryCount(original["retry_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRetryCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["retryCount"] = transformedRetryCount + } + + transformedMaxRetryDuration, err := expandCloudSchedulerJobRetryConfigMaxRetryDuration(original["max_retry_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxRetryDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxRetryDuration"] = transformedMaxRetryDuration + } + + transformedMinBackoffDuration, err := expandCloudSchedulerJobRetryConfigMinBackoffDuration(original["min_backoff_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinBackoffDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minBackoffDuration"] = transformedMinBackoffDuration + } + + transformedMaxBackoffDuration, err := expandCloudSchedulerJobRetryConfigMaxBackoffDuration(original["max_backoff_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxBackoffDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxBackoffDuration"] = transformedMaxBackoffDuration + } + + transformedMaxDoublings, err := expandCloudSchedulerJobRetryConfigMaxDoublings(original["max_doublings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxDoublings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxDoublings"] = transformedMaxDoublings + } + + return transformed, nil +} + +func expandCloudSchedulerJobRetryConfigRetryCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobRetryConfigMaxRetryDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobRetryConfigMinBackoffDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobRetryConfigMaxBackoffDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobRetryConfigMaxDoublings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobPubsubTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTopicName, err := expandCloudSchedulerJobPubsubTargetTopicName(original["topic_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopicName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topicName"] = transformedTopicName + } + + transformedData, err := expandCloudSchedulerJobPubsubTargetData(original["data"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedData); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["data"] = transformedData + } + + transformedAttributes, err := expandCloudSchedulerJobPubsubTargetAttributes(original["attributes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAttributes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["attributes"] = transformedAttributes + } + + return transformed, nil +} + +func expandCloudSchedulerJobPubsubTargetTopicName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobPubsubTargetData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobPubsubTargetAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudSchedulerJobAppEngineHttpTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHttpMethod, err := expandCloudSchedulerJobAppEngineHttpTargetHttpMethod(original["http_method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpMethod"] = transformedHttpMethod + } + + transformedAppEngineRouting, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(original["app_engine_routing"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAppEngineRouting); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["appEngineRouting"] = transformedAppEngineRouting + } + + transformedRelativeUri, err := expandCloudSchedulerJobAppEngineHttpTargetRelativeUri(original["relative_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRelativeUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["relativeUri"] = transformedRelativeUri + } + + transformedBody, err := expandCloudSchedulerJobAppEngineHttpTargetBody(original["body"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBody); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["body"] = transformedBody + } + + transformedHeaders, err := expandCloudSchedulerJobAppEngineHttpTargetHeaders(original["headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["headers"] = transformedHeaders + } + + return transformed, nil +} + +func expandCloudSchedulerJobAppEngineHttpTargetHttpMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedService, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + transformedVersion, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedInstance, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingInstance(original["instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instance"] = transformedInstance + } + + return transformed, nil +} + +func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobAppEngineHttpTargetRelativeUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobAppEngineHttpTargetBody(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobAppEngineHttpTargetHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudSchedulerJobHttpTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandCloudSchedulerJobHttpTargetUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedHttpMethod, err := expandCloudSchedulerJobHttpTargetHttpMethod(original["http_method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpMethod"] = transformedHttpMethod + } + + transformedBody, err := expandCloudSchedulerJobHttpTargetBody(original["body"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBody); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["body"] = transformedBody + } + + transformedHeaders, err := expandCloudSchedulerJobHttpTargetHeaders(original["headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["headers"] = transformedHeaders + } + + transformedOauthToken, err := expandCloudSchedulerJobHttpTargetOauthToken(original["oauth_token"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOauthToken); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oauthToken"] = transformedOauthToken + } + + transformedOidcToken, err := expandCloudSchedulerJobHttpTargetOidcToken(original["oidc_token"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOidcToken); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oidcToken"] = transformedOidcToken + } + + return transformed, nil +} + +func expandCloudSchedulerJobHttpTargetUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobHttpTargetHttpMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobHttpTargetBody(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobHttpTargetHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudSchedulerJobHttpTargetOauthToken(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccountEmail, err := expandCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(original["service_account_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountEmail"] = transformedServiceAccountEmail + } + + transformedScope, err := expandCloudSchedulerJobHttpTargetOauthTokenScope(original["scope"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScope); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scope"] = transformedScope + } + + return transformed, nil +} + +func expandCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobHttpTargetOauthTokenScope(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobHttpTargetOidcToken(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccountEmail, err := expandCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(original["service_account_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountEmail"] = transformedServiceAccountEmail + } + + transformedAudience, err := expandCloudSchedulerJobHttpTargetOidcTokenAudience(original["audience"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAudience); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["audience"] = transformedAudience + } + + return transformed, nil +} + +func expandCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudSchedulerJobHttpTargetOidcTokenAudience(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceCloudSchedulerJobEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "paused") // Field doesn't exist in API + return obj, nil +} + +func resourceCloudSchedulerJobUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "paused") // Field doesn't exist in API + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler/resource_cloud_scheduler_job_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler/resource_cloud_scheduler_job_sweeper.go new file mode 100644 index 0000000000..ae2377993b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler/resource_cloud_scheduler_job_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudscheduler + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudSchedulerJob", testSweepCloudSchedulerJob) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudSchedulerJob(region string) error { + resourceName := "CloudSchedulerJob" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudscheduler.googleapis.com/v1/projects/{{project}}/locations/{{region}}/jobs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["jobs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudscheduler.googleapis.com/v1/projects/{{project}}/locations/{{region}}/jobs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/iam_cloud_tasks_queue.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/iam_cloud_tasks_queue.go new file mode 100644 index 0000000000..7863fc0541 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/iam_cloud_tasks_queue.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudtasks + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var CloudTasksQueueIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type CloudTasksQueueIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func CloudTasksQueueIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudTasksQueueIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func CloudTasksQueueIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &CloudTasksQueueIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *CloudTasksQueueIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyQueueUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *CloudTasksQueueIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyQueueUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *CloudTasksQueueIamUpdater) qualifyQueueUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{CloudTasksBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/queues/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *CloudTasksQueueIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/queues/%s", u.project, u.location, u.name) +} + +func (u *CloudTasksQueueIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-cloudtasks-queue-%s", u.GetResourceId()) +} + +func (u *CloudTasksQueueIamUpdater) DescribeResource() string { + return fmt.Sprintf("cloudtasks queue %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue.go new file mode 100644 index 0000000000..560edccb41 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue.go @@ -0,0 +1,907 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudtasks + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func suppressOmittedMaxDuration(_, old, new string, _ *schema.ResourceData) bool { + if old == "" && new == "0s" { + log.Printf("[INFO] max retry is 0s and api omitted field, suppressing diff") + return true + } + return false +} + +func ResourceCloudTasksQueue() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudTasksQueueCreate, + Read: resourceCloudTasksQueueRead, + Update: resourceCloudTasksQueueUpdate, + Delete: resourceCloudTasksQueueDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudTasksQueueImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the queue`, + }, + "app_engine_routing_override": { + Type: schema.TypeList, + Optional: true, + Description: `Overrides for task-level appEngineRouting. These settings apply only +to App Engine tasks in this queue`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Optional: true, + Description: `App instance. + +By default, the task is sent to an instance which is available when the task is attempted.`, + }, + "service": { + Type: schema.TypeString, + Optional: true, + Description: `App service. + +By default, the task is sent to the service which is the default service when the task is attempted.`, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `App version. + +By default, the task is sent to the version which is the default version when the task is attempted.`, + }, + "host": { + Type: schema.TypeString, + Computed: true, + Description: `The host that the task is sent to.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The queue name.`, + }, + "rate_limits": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Rate limits for task dispatches. + +The queue's actual dispatch rate is the result of: + +* Number of tasks in the queue +* User-specified throttling: rateLimits, retryConfig, and the queue's state. +* System throttling due to 429 (Too Many Requests) or 503 (Service + Unavailable) responses from the worker, high error rates, or to + smooth sudden large traffic spikes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_concurrent_dispatches": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The maximum number of concurrent tasks that Cloud Tasks allows to +be dispatched for this queue. After this threshold has been +reached, Cloud Tasks stops dispatching tasks until the number of +concurrent requests decreases.`, + }, + "max_dispatches_per_second": { + Type: schema.TypeFloat, + Computed: true, + Optional: true, + Description: `The maximum rate at which tasks are dispatched from this queue. + +If unspecified when the queue is created, Cloud Tasks will pick the default.`, + }, + "max_burst_size": { + Type: schema.TypeInt, + Computed: true, + Description: `The max burst size. + +Max burst size limits how fast tasks in queue are processed when many tasks are +in the queue and the rate is high. This field allows the queue to have a high +rate so processing starts shortly after a task is enqueued, but still limits +resource usage when many tasks are enqueued in a short period of time.`, + }, + }, + }, + }, + "retry_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Settings that determine the retry behavior.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_attempts": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Number of attempts per task. + +Cloud Tasks will attempt the task maxAttempts times (that is, if +the first attempt fails, then there will be maxAttempts - 1 +retries). Must be >= -1. + +If unspecified when the queue is created, Cloud Tasks will pick +the default. + +-1 indicates unlimited attempts.`, + }, + "max_backoff": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `A task will be scheduled for retry between minBackoff and +maxBackoff duration after it fails, if the queue's RetryConfig +specifies that the task should be retried.`, + }, + "max_doublings": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The time between retries will double maxDoublings times. + +A task's retry interval starts at minBackoff, then doubles maxDoublings times, +then increases linearly, and finally retries retries at intervals of maxBackoff +up to maxAttempts times.`, + }, + "max_retry_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: suppressOmittedMaxDuration, + Description: `If positive, maxRetryDuration specifies the time limit for +retrying a failed task, measured from when the task was first +attempted. Once maxRetryDuration time has passed and the task has +been attempted maxAttempts times, no further attempts will be +made and the task will be deleted. + +If zero, then the task age is unlimited.`, + }, + "min_backoff": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `A task will be scheduled for retry between minBackoff and +maxBackoff duration after it fails, if the queue's RetryConfig +specifies that the task should be retried.`, + }, + }, + }, + }, + "stackdriver_logging_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration options for writing logs to Stackdriver Logging.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sampling_ratio": { + Type: schema.TypeFloat, + Required: true, + Description: `Specifies the fraction of operations to write to Stackdriver Logging. +This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is the +default and means that no operations are logged.`, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudTasksQueueCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandCloudTasksQueueName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + appEngineRoutingOverrideProp, err := expandCloudTasksQueueAppEngineRoutingOverride(d.Get("app_engine_routing_override"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine_routing_override"); !tpgresource.IsEmptyValue(reflect.ValueOf(appEngineRoutingOverrideProp)) && (ok || !reflect.DeepEqual(v, appEngineRoutingOverrideProp)) { + obj["appEngineRoutingOverride"] = appEngineRoutingOverrideProp + } + rateLimitsProp, err := expandCloudTasksQueueRateLimits(d.Get("rate_limits"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rate_limits"); !tpgresource.IsEmptyValue(reflect.ValueOf(rateLimitsProp)) && (ok || !reflect.DeepEqual(v, rateLimitsProp)) { + obj["rateLimits"] = rateLimitsProp + } + retryConfigProp, err := expandCloudTasksQueueRetryConfig(d.Get("retry_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retry_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(retryConfigProp)) && (ok || !reflect.DeepEqual(v, retryConfigProp)) { + obj["retryConfig"] = retryConfigProp + } + stackdriverLoggingConfigProp, err := expandCloudTasksQueueStackdriverLoggingConfig(d.Get("stackdriver_logging_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("stackdriver_logging_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(stackdriverLoggingConfigProp)) && (ok || !reflect.DeepEqual(v, stackdriverLoggingConfigProp)) { + obj["stackdriverLoggingConfig"] = stackdriverLoggingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Queue: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Queue: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Queue: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Queue %q: %#v", d.Id(), res) + + return resourceCloudTasksQueueRead(d, meta) +} + +func resourceCloudTasksQueueRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Queue: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("CloudTasksQueue %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Queue: %s", err) + } + + if err := d.Set("name", flattenCloudTasksQueueName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Queue: %s", err) + } + if err := d.Set("app_engine_routing_override", flattenCloudTasksQueueAppEngineRoutingOverride(res["appEngineRoutingOverride"], d, config)); err != nil { + return fmt.Errorf("Error reading Queue: %s", err) + } + if err := d.Set("rate_limits", flattenCloudTasksQueueRateLimits(res["rateLimits"], d, config)); err != nil { + return fmt.Errorf("Error reading Queue: %s", err) + } + if err := d.Set("retry_config", flattenCloudTasksQueueRetryConfig(res["retryConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Queue: %s", err) + } + if err := d.Set("stackdriver_logging_config", flattenCloudTasksQueueStackdriverLoggingConfig(res["stackdriverLoggingConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Queue: %s", err) + } + + return nil +} + +func resourceCloudTasksQueueUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Queue: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + appEngineRoutingOverrideProp, err := expandCloudTasksQueueAppEngineRoutingOverride(d.Get("app_engine_routing_override"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine_routing_override"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, appEngineRoutingOverrideProp)) { + obj["appEngineRoutingOverride"] = appEngineRoutingOverrideProp + } + rateLimitsProp, err := expandCloudTasksQueueRateLimits(d.Get("rate_limits"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rate_limits"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rateLimitsProp)) { + obj["rateLimits"] = rateLimitsProp + } + retryConfigProp, err := expandCloudTasksQueueRetryConfig(d.Get("retry_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retry_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryConfigProp)) { + obj["retryConfig"] = retryConfigProp + } + stackdriverLoggingConfigProp, err := expandCloudTasksQueueStackdriverLoggingConfig(d.Get("stackdriver_logging_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("stackdriver_logging_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stackdriverLoggingConfigProp)) { + obj["stackdriverLoggingConfig"] = stackdriverLoggingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Queue %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("app_engine_routing_override") { + updateMask = append(updateMask, "appEngineRoutingOverride") + } + + if d.HasChange("rate_limits") { + updateMask = append(updateMask, "rateLimits") + } + + if d.HasChange("retry_config") { + updateMask = append(updateMask, "retryConfig") + } + + if d.HasChange("stackdriver_logging_config") { + updateMask = append(updateMask, "stackdriverLoggingConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Queue %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Queue %q: %#v", d.Id(), res) + } + + return resourceCloudTasksQueueRead(d, meta) +} + +func resourceCloudTasksQueueDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Queue: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Queue %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Queue") + } + + log.Printf("[DEBUG] Finished deleting Queue %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudTasksQueueImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudTasksQueueName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +// service, version, and instance are input-only. host is output-only. +func flattenCloudTasksQueueAppEngineRoutingOverride(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["host"] = original["host"] + if override, ok := d.GetOk("app_engine_routing_override"); ok && len(override.([]interface{})) > 0 { + transformed["service"] = d.Get("app_engine_routing_override.0.service") + transformed["version"] = d.Get("app_engine_routing_override.0.version") + transformed["instance"] = d.Get("app_engine_routing_override.0.instance") + } + return []interface{}{transformed} +} + +func flattenCloudTasksQueueRateLimits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["max_dispatches_per_second"] = + flattenCloudTasksQueueRateLimitsMaxDispatchesPerSecond(original["maxDispatchesPerSecond"], d, config) + transformed["max_concurrent_dispatches"] = + flattenCloudTasksQueueRateLimitsMaxConcurrentDispatches(original["maxConcurrentDispatches"], d, config) + transformed["max_burst_size"] = + flattenCloudTasksQueueRateLimitsMaxBurstSize(original["maxBurstSize"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueRateLimitsMaxDispatchesPerSecond(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueRateLimitsMaxConcurrentDispatches(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudTasksQueueRateLimitsMaxBurstSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudTasksQueueRetryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["max_attempts"] = + flattenCloudTasksQueueRetryConfigMaxAttempts(original["maxAttempts"], d, config) + transformed["max_retry_duration"] = + flattenCloudTasksQueueRetryConfigMaxRetryDuration(original["maxRetryDuration"], d, config) + transformed["min_backoff"] = + flattenCloudTasksQueueRetryConfigMinBackoff(original["minBackoff"], d, config) + transformed["max_backoff"] = + flattenCloudTasksQueueRetryConfigMaxBackoff(original["maxBackoff"], d, config) + transformed["max_doublings"] = + flattenCloudTasksQueueRetryConfigMaxDoublings(original["maxDoublings"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueRetryConfigMaxAttempts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudTasksQueueRetryConfigMaxRetryDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueRetryConfigMinBackoff(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueRetryConfigMaxBackoff(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenCloudTasksQueueRetryConfigMaxDoublings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudTasksQueueStackdriverLoggingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["sampling_ratio"] = + flattenCloudTasksQueueStackdriverLoggingConfigSamplingRatio(original["samplingRatio"], d, config) + return []interface{}{transformed} +} +func flattenCloudTasksQueueStackdriverLoggingConfigSamplingRatio(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandCloudTasksQueueName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") +} + +func expandCloudTasksQueueAppEngineRoutingOverride(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedService, err := expandCloudTasksQueueAppEngineRoutingOverrideService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + transformedVersion, err := expandCloudTasksQueueAppEngineRoutingOverrideVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedInstance, err := expandCloudTasksQueueAppEngineRoutingOverrideInstance(original["instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instance"] = transformedInstance + } + + transformedHost, err := expandCloudTasksQueueAppEngineRoutingOverrideHost(original["host"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["host"] = transformedHost + } + + return transformed, nil +} + +func expandCloudTasksQueueAppEngineRoutingOverrideService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueAppEngineRoutingOverrideVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueAppEngineRoutingOverrideInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueAppEngineRoutingOverrideHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueRateLimits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaxDispatchesPerSecond, err := expandCloudTasksQueueRateLimitsMaxDispatchesPerSecond(original["max_dispatches_per_second"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxDispatchesPerSecond); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxDispatchesPerSecond"] = transformedMaxDispatchesPerSecond + } + + transformedMaxConcurrentDispatches, err := expandCloudTasksQueueRateLimitsMaxConcurrentDispatches(original["max_concurrent_dispatches"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxConcurrentDispatches); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxConcurrentDispatches"] = transformedMaxConcurrentDispatches + } + + transformedMaxBurstSize, err := expandCloudTasksQueueRateLimitsMaxBurstSize(original["max_burst_size"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxBurstSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxBurstSize"] = transformedMaxBurstSize + } + + return transformed, nil +} + +func expandCloudTasksQueueRateLimitsMaxDispatchesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueRateLimitsMaxConcurrentDispatches(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueRateLimitsMaxBurstSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueRetryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaxAttempts, err := expandCloudTasksQueueRetryConfigMaxAttempts(original["max_attempts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxAttempts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxAttempts"] = transformedMaxAttempts + } + + transformedMaxRetryDuration, err := expandCloudTasksQueueRetryConfigMaxRetryDuration(original["max_retry_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxRetryDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxRetryDuration"] = transformedMaxRetryDuration + } + + transformedMinBackoff, err := expandCloudTasksQueueRetryConfigMinBackoff(original["min_backoff"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinBackoff); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minBackoff"] = transformedMinBackoff + } + + transformedMaxBackoff, err := expandCloudTasksQueueRetryConfigMaxBackoff(original["max_backoff"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxBackoff); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxBackoff"] = transformedMaxBackoff + } + + transformedMaxDoublings, err := expandCloudTasksQueueRetryConfigMaxDoublings(original["max_doublings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxDoublings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxDoublings"] = transformedMaxDoublings + } + + return transformed, nil +} + +func expandCloudTasksQueueRetryConfigMaxAttempts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueRetryConfigMaxRetryDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueRetryConfigMinBackoff(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueRetryConfigMaxBackoff(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueRetryConfigMaxDoublings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudTasksQueueStackdriverLoggingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSamplingRatio, err := expandCloudTasksQueueStackdriverLoggingConfigSamplingRatio(original["sampling_ratio"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSamplingRatio); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["samplingRatio"] = transformedSamplingRatio + } + + return transformed, nil +} + +func expandCloudTasksQueueStackdriverLoggingConfigSamplingRatio(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue_sweeper.go new file mode 100644 index 0000000000..c496e95a89 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/cloudtasks/resource_cloud_tasks_queue_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package cloudtasks + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudTasksQueue", testSweepCloudTasksQueue) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudTasksQueue(region string) error { + resourceName := "CloudTasksQueue" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudtasks.googleapis.com/v2/projects/{{project}}/locations/{{location}}/queues", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["queues"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudtasks.googleapis.com/v2/projects/{{project}}/locations/{{location}}/queues/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/composer_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/composer_operation.go new file mode 100644 index 0000000000..4825137177 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/composer_operation.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package composer + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/composer/v1" +) + +type ComposerOperationWaiter struct { + Service *composer.ProjectsLocationsService + tpgresource.CommonOperationWaiter +} + +func (w *ComposerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + return w.Service.Operations.Get(w.Op.Name).Do() +} + +func ComposerOperationWaitTime(config *transport_tpg.Config, op *composer.Operation, project, activity, userAgent string, timeout time.Duration) error { + w := &ComposerOperationWaiter{ + Service: config.NewComposerClient(userAgent).Projects.Locations, + } + if err := w.SetOp(op); err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/data_source_google_composer_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/data_source_google_composer_environment.go new file mode 100644 index 0000000000..755b73cf8b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/data_source_google_composer_environment.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package composer + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComposerEnvironment() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComposerEnvironment().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleComposerEnvironmentRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + envName := d.Get("name").(string) + + d.SetId(fmt.Sprintf("projects/%s/locations/%s/environments/%s", project, region, envName)) + + return resourceComposerEnvironmentRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_composer_image_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/data_source_google_composer_image_versions.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_composer_image_versions.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/data_source_google_composer_image_versions.go index 6b320ce9a2..3d6b973dbc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_composer_image_versions.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/data_source_google_composer_image_versions.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package composer import ( "fmt" "log" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleComposerImageVersions() *schema.Resource { @@ -43,28 +47,28 @@ func DataSourceGoogleComposerImageVersions() *schema.Resource { } func dataSourceGoogleComposerImageVersionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComposerBasePath}}projects/{{project}}/locations/{{region}}/imageVersions") + url, err := tpgresource.ReplaceVars(d, config, "{{ComposerBasePath}}projects/{{project}}/locations/{{region}}/imageVersions") if err != nil { return err } - versions, err := paginatedListRequest(project, url, userAgent, config, flattenGoogleComposerImageVersions) + versions, err := tpgresource.PaginatedListRequest(project, url, userAgent, config, flattenGoogleComposerImageVersions) if err != nil { return fmt.Errorf("Error listing Composer image versions: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_composer_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment.go similarity index 92% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_composer_environment.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment.go index 44b14be8d8..703f024d45 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_composer_environment.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package composer import ( "fmt" @@ -11,6 +13,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/composer/v1" ) @@ -60,6 +66,7 @@ var ( "config.0.workloads_config", "config.0.environment_size", "config.0.master_authorized_networks_config", + "config.0.resilience_mode", } recoveryConfigKeys = []string{ @@ -144,7 +151,7 @@ func ResourceComposerEnvironment() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCEName, + ValidateFunc: verify.ValidateGCEName, Description: `Name of the environment.`, }, "region": { @@ -191,7 +198,7 @@ func ResourceComposerEnvironment() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Compute Engine zone in which to deploy the VMs running the Apache Airflow software, specified as the zone name or relative resource name (e.g. "projects/{project}/zones/{zone}"). Must belong to the enclosing environment's project and region. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, }, "machine_type": { @@ -199,7 +206,7 @@ func ResourceComposerEnvironment() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, }, "network": { @@ -207,14 +214,14 @@ func ResourceComposerEnvironment() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. The network must belong to the environment's project. If unspecified, the "default" network ID in the environment's project is used. If a Custom Subnet Network is provided, subnetwork must also be provided.`, }, "subnetwork": { Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Compute Engine subnetwork to be used for machine communications, , specified as a self-link, relative resource name (e.g. "projects/{project}/regions/{region}/subnetworks/{subnetwork}"), or by name. If subnetwork is provided, network must also be provided and the subnetwork must belong to the enclosing environment's project and region.`, }, "disk_size_gb": { @@ -301,7 +308,7 @@ func ResourceComposerEnvironment() *schema.Resource { ForceNew: true, AtLeastOneOf: composerIpAllocationPolicyKeys, Description: `The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both.`, - DiffSuppressFunc: cidrOrSizeDiffSuppress, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.cluster_secondary_range_name"}, }, "services_ipv4_cidr_block": { @@ -310,7 +317,7 @@ func ResourceComposerEnvironment() *schema.Resource { ForceNew: true, AtLeastOneOf: composerIpAllocationPolicyKeys, Description: `The IP address range used to allocate IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either services_secondary_range_name or services_ipv4_cidr_block but not both.`, - DiffSuppressFunc: cidrOrSizeDiffSuppress, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.services_secondary_range_name"}, }, }, @@ -399,7 +406,7 @@ func ResourceComposerEnvironment() *schema.Resource { Optional: true, ForceNew: true, AtLeastOneOf: composerSoftwareConfigKeys, - ValidateFunc: validateRegexp(composerEnvironmentVersionRegexp), + ValidateFunc: verify.ValidateRegexp(composerEnvironmentVersionRegexp), DiffSuppressFunc: composerImageVersionDiffSuppress, Description: `The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression composer-([0-9]+(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-([0-9]+(\.[0-9]+(\.[0-9]+)?)?). The Cloud Composer portion of the image version is a full semantic version, or an alias in the form of major version number or 'latest'. The Apache Airflow portion of the image version is a full semantic version that points to one of the supported Apache Airflow versions, or an alias in the form of only major or major.minor versions specified. See documentation for more details and version list.`, }, @@ -485,7 +492,7 @@ func ResourceComposerEnvironment() *schema.Resource { Computed: true, AtLeastOneOf: composerPrivateEnvironmentConfig, ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, }, }, @@ -733,6 +740,15 @@ func ResourceComposerEnvironment() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{"ENVIRONMENT_SIZE_SMALL", "ENVIRONMENT_SIZE_MEDIUM", "ENVIRONMENT_SIZE_LARGE"}, false), Description: `The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, }, + "resilience_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + AtLeastOneOf: composerConfigKeys, + ValidateFunc: validation.StringInSlice([]string{"HIGH_RESILIENCE"}, false), + Description: `Whether high resilience is enabled or not. This field is supported for Cloud Composer environments in versions composer-2.1.15-airflow-*.*.* and newer.`, + }, "master_authorized_networks_config": { Type: schema.TypeList, Optional: true, @@ -785,8 +801,8 @@ func ResourceComposerEnvironment() *schema.Resource { } func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -802,22 +818,22 @@ func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) } env := &composer.Environment{ - Name: envName.resourceName(), - Labels: expandLabels(d), + Name: envName.ResourceName(), + Labels: tpgresource.ExpandLabels(d), Config: transformedConfig, } // Some fields cannot be specified during create and must be updated post-creation. updateOnlyEnv := getComposerEnvironmentPostCreateUpdateObj(env) - log.Printf("[DEBUG] Creating new Environment %q", envName.parentName()) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Create(envName.parentName(), env).Do() + log.Printf("[DEBUG] Creating new Environment %q", envName.ParentName()) + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Create(envName.ParentName(), env).Do() if err != nil { return err } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -851,8 +867,8 @@ func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) } func resourceComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -862,20 +878,20 @@ func resourceComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) e return err } - res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.resourceName()).Do() + res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.ResourceName()).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComposerEnvironment %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComposerEnvironment %q", d.Id())) } - // Set from getProject(d) + // Set from GetProject(d) if err := d.Set("project", envName.Project); err != nil { return fmt.Errorf("Error setting Environment: %s", err) } - // Set from getRegion(d) + // Set from GetRegion(d) if err := d.Set("region", envName.Region); err != nil { return fmt.Errorf("Error setting Environment: %s", err) } - if err := d.Set("name", GetResourceNameFromSelfLink(res.Name)); err != nil { + if err := d.Set("name", tpgresource.GetResourceNameFromSelfLink(res.Name)); err != nil { return fmt.Errorf("Error setting Environment: %s", err) } if err := d.Set("config", flattenComposerEnvironmentConfig(res.Config)); err != nil { @@ -888,8 +904,8 @@ func resourceComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) e } func resourceComposerEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { - tfConfig := meta.(*Config) - userAgent, err := generateUserAgentString(d, tfConfig.UserAgent) + tfConfig := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, tfConfig.UserAgent) if err != nil { return err } @@ -1078,7 +1094,7 @@ func resourceComposerEnvironmentUpdate(d *schema.ResourceData, meta interface{}) } if d.HasChange("labels") { - patchEnv := &composer.Environment{Labels: expandLabels(d)} + patchEnv := &composer.Environment{Labels: tpgresource.ExpandLabels(d)} err := resourceComposerEnvironmentPatchField("labels", userAgent, patchEnv, d, tfConfig) if err != nil { return err @@ -1089,7 +1105,7 @@ func resourceComposerEnvironmentUpdate(d *schema.ResourceData, meta interface{}) return resourceComposerEnvironmentRead(d, tfConfig) } -func resourceComposerEnvironmentPostCreateUpdate(updateEnv *composer.Environment, d *schema.ResourceData, cfg *Config, userAgent string) error { +func resourceComposerEnvironmentPostCreateUpdate(updateEnv *composer.Environment, d *schema.ResourceData, cfg *transport_tpg.Config, userAgent string) error { if updateEnv == nil { return nil } @@ -1107,7 +1123,7 @@ func resourceComposerEnvironmentPostCreateUpdate(updateEnv *composer.Environment return resourceComposerEnvironmentRead(d, cfg) } -func resourceComposerEnvironmentPatchField(updateMask, userAgent string, env *composer.Environment, d *schema.ResourceData, config *Config) error { +func resourceComposerEnvironmentPatchField(updateMask, userAgent string, env *composer.Environment, d *schema.ResourceData, config *transport_tpg.Config) error { envJson, _ := env.MarshalJSON() log.Printf("[DEBUG] Updating Environment %q (updateMask = %q): %s", d.Id(), updateMask, string(envJson)) envName, err := resourceComposerEnvironmentName(d, config) @@ -1116,7 +1132,7 @@ func resourceComposerEnvironmentPatchField(updateMask, userAgent string, env *co } op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments. - Patch(envName.resourceName(), env). + Patch(envName.ResourceName(), env). UpdateMask(updateMask).Do() if err != nil { return err @@ -1135,8 +1151,8 @@ func resourceComposerEnvironmentPatchField(updateMask, userAgent string, env *co } func resourceComposerEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -1147,7 +1163,7 @@ func resourceComposerEnvironmentDelete(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.resourceName()).Do() + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.ResourceName()).Do() if err != nil { return err } @@ -1164,13 +1180,13 @@ func resourceComposerEnvironmentDelete(d *schema.ResourceData, meta interface{}) } func resourceComposerEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1199,6 +1215,7 @@ func flattenComposerEnvironmentConfig(envCfg *composer.EnvironmentConfig) interf transformed["workloads_config"] = flattenComposerEnvironmentConfigWorkloadsConfig(envCfg.WorkloadsConfig) transformed["recovery_config"] = flattenComposerEnvironmentConfigRecoveryConfig(envCfg.RecoveryConfig) transformed["environment_size"] = envCfg.EnvironmentSize + transformed["resilience_mode"] = envCfg.ResilienceMode transformed["master_authorized_networks_config"] = flattenComposerEnvironmentConfigMasterAuthorizedNetworksConfig(envCfg.MasterAuthorizedNetworksConfig) return []interface{}{transformed} } @@ -1395,14 +1412,14 @@ func flattenComposerEnvironmentConfigNodeConfigOauthScopes(v interface{}) interf if v == nil { return v } - return schema.NewSet(schema.HashString, convertStringArrToInterface(v.([]string))) + return schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(v.([]string))) } func flattenComposerEnvironmentConfigNodeConfigTags(v interface{}) interface{} { if v == nil { return v } - return schema.NewSet(schema.HashString, convertStringArrToInterface(v.([]string))) + return schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(v.([]string))) } func flattenComposerEnvironmentConfigSoftwareConfig(softwareCfg *composer.SoftwareConfig) interface{} { @@ -1440,7 +1457,7 @@ func flattenComposerEnvironmentConfigMasterAuthorizedNetworksConfig(masterAuthNe return []interface{}{masterAuthorizedNetworksConfig} } -func expandComposerEnvironmentConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.EnvironmentConfig, error) { +func expandComposerEnvironmentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.EnvironmentConfig, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1514,6 +1531,13 @@ func expandComposerEnvironmentConfig(v interface{}, d *schema.ResourceData, conf return nil, err } transformed.EnvironmentSize = transformedEnvironmentSize + + transformedResilienceMode, err := expandComposerEnvironmentConfigResilienceMode(original["resilience_mode"], d, config) + if err != nil { + return nil, err + } + transformed.ResilienceMode = transformedResilienceMode + transformedMasterAuthorizedNetworksConfig, err := expandComposerEnvironmentConfigMasterAuthorizedNetworksConfig(original["master_authorized_networks_config"], d, config) if err != nil { return nil, err @@ -1529,14 +1553,14 @@ func expandComposerEnvironmentConfig(v interface{}, d *schema.ResourceData, conf return transformed, nil } -func expandComposerEnvironmentConfigNodeCount(v interface{}, d *schema.ResourceData, config *Config) (int64, error) { +func expandComposerEnvironmentConfigNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (int64, error) { if v == nil { return 0, nil } return int64(v.(int)), nil } -func expandComposerEnvironmentConfigWebServerNetworkAccessControl(v interface{}, d *schema.ResourceData, config *Config) (*composer.WebServerNetworkAccessControl, error) { +func expandComposerEnvironmentConfigWebServerNetworkAccessControl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.WebServerNetworkAccessControl, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1565,7 +1589,7 @@ func expandComposerEnvironmentConfigWebServerNetworkAccessControl(v interface{}, return transformed, nil } -func expandComposerEnvironmentConfigMasterAuthorizedNetworksConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.MasterAuthorizedNetworksConfig, error) { +func expandComposerEnvironmentConfigMasterAuthorizedNetworksConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.MasterAuthorizedNetworksConfig, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1592,7 +1616,7 @@ func expandComposerEnvironmentConfigMasterAuthorizedNetworksConfig(v interface{} return transformed, nil } -func expandComposerEnvironmentConfigDatabaseConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.DatabaseConfig, error) { +func expandComposerEnvironmentConfigDatabaseConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.DatabaseConfig, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1606,7 +1630,7 @@ func expandComposerEnvironmentConfigDatabaseConfig(v interface{}, d *schema.Reso return transformed, nil } -func expandComposerEnvironmentConfigWebServerConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.WebServerConfig, error) { +func expandComposerEnvironmentConfigWebServerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.WebServerConfig, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1620,7 +1644,7 @@ func expandComposerEnvironmentConfigWebServerConfig(v interface{}, d *schema.Res return transformed, nil } -func expandComposerEnvironmentConfigEncryptionConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.EncryptionConfig, error) { +func expandComposerEnvironmentConfigEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.EncryptionConfig, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1634,7 +1658,7 @@ func expandComposerEnvironmentConfigEncryptionConfig(v interface{}, d *schema.Re return transformed, nil } -func expandComposerEnvironmentConfigMaintenanceWindow(v interface{}, d *schema.ResourceData, config *Config) (*composer.MaintenanceWindow, error) { +func expandComposerEnvironmentConfigMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.MaintenanceWindow, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1658,7 +1682,7 @@ func expandComposerEnvironmentConfigMaintenanceWindow(v interface{}, d *schema.R return transformed, nil } -func expandComposerEnvironmentConfigWorkloadsConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.WorkloadsConfig, error) { +func expandComposerEnvironmentConfigWorkloadsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.WorkloadsConfig, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1706,7 +1730,7 @@ func expandComposerEnvironmentConfigWorkloadsConfig(v interface{}, d *schema.Res return transformed, nil } -func expandComposerEnvironmentConfigRecoveryConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.RecoveryConfig, error) { +func expandComposerEnvironmentConfigRecoveryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.RecoveryConfig, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1730,14 +1754,21 @@ func expandComposerEnvironmentConfigRecoveryConfig(v interface{}, d *schema.Reso return transformed, nil } -func expandComposerEnvironmentConfigEnvironmentSize(v interface{}, d *schema.ResourceData, config *Config) (string, error) { +func expandComposerEnvironmentConfigEnvironmentSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + if v == nil { + return "", nil + } + return v.(string), nil +} + +func expandComposerEnvironmentConfigResilienceMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { if v == nil { return "", nil } return v.(string), nil } -func expandComposerEnvironmentConfigPrivateEnvironmentConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.PrivateEnvironmentConfig, error) { +func expandComposerEnvironmentConfigPrivateEnvironmentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.PrivateEnvironmentConfig, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1781,7 +1812,7 @@ func expandComposerEnvironmentConfigPrivateEnvironmentConfig(v interface{}, d *s return transformed, nil } -func expandComposerEnvironmentConfigNodeConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.NodeConfig, error) { +func expandComposerEnvironmentConfigNodeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.NodeConfig, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1860,7 +1891,7 @@ func expandComposerEnvironmentConfigNodeConfig(v interface{}, d *schema.Resource return transformed, nil } -func expandComposerEnvironmentIPAllocationPolicy(v interface{}, d *schema.ResourceData, config *Config) (*composer.IPAllocationPolicy, error) { +func expandComposerEnvironmentIPAllocationPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.IPAllocationPolicy, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1892,49 +1923,49 @@ func expandComposerEnvironmentIPAllocationPolicy(v interface{}, d *schema.Resour } -func expandComposerEnvironmentServiceAccount(v interface{}, d *schema.ResourceData, config *Config) (string, error) { +func expandComposerEnvironmentServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { serviceAccount := v.(string) if len(serviceAccount) == 0 { return "", nil } - return GetResourceNameFromSelfLink(serviceAccount), nil + return tpgresource.GetResourceNameFromSelfLink(serviceAccount), nil } -func expandComposerEnvironmentZone(v interface{}, d *schema.ResourceData, config *Config) (string, error) { +func expandComposerEnvironmentZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { zone := v.(string) if len(zone) == 0 { return zone, nil } if !strings.Contains(zone, "/") { - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return "", err } return fmt.Sprintf("projects/%s/zones/%s", project, zone), nil } - return getRelativePath(zone) + return tpgresource.GetRelativePath(zone) } -func expandComposerEnvironmentMachineType(v interface{}, d *schema.ResourceData, config *Config, nodeCfgZone string) (string, error) { +func expandComposerEnvironmentMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config, nodeCfgZone string) (string, error) { machineType := v.(string) - requiredZone := GetResourceNameFromSelfLink(nodeCfgZone) + requiredZone := tpgresource.GetResourceNameFromSelfLink(nodeCfgZone) - fv, err := ParseMachineTypesFieldValue(v.(string), d, config) + fv, err := tpgresource.ParseMachineTypesFieldValue(v.(string), d, config) if err != nil { // Try to construct machine type with zone/project given in config. - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return "", err } - fv = &ZonalFieldValue{ + fv = &tpgresource.ZonalFieldValue{ Project: project, Zone: requiredZone, - Name: GetResourceNameFromSelfLink(machineType), - resourceType: "machineTypes", + Name: tpgresource.GetResourceNameFromSelfLink(machineType), + ResourceType: "machineTypes", } } @@ -1946,30 +1977,30 @@ func expandComposerEnvironmentMachineType(v interface{}, d *schema.ResourceData, return fv.RelativeLink(), nil } -func expandComposerEnvironmentNetwork(v interface{}, d *schema.ResourceData, config *Config) (string, error) { - fv, err := ParseNetworkFieldValue(v.(string), d, config) +func expandComposerEnvironmentNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + fv, err := tpgresource.ParseNetworkFieldValue(v.(string), d, config) if err != nil { return "", err } return fv.RelativeLink(), nil } -func expandComposerEnvironmentSubnetwork(v interface{}, d *schema.ResourceData, config *Config) (string, error) { - fv, err := ParseSubnetworkFieldValue(v.(string), d, config) +func expandComposerEnvironmentSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + fv, err := tpgresource.ParseSubnetworkFieldValue(v.(string), d, config) if err != nil { return "", err } return fv.RelativeLink(), nil } -func expandComposerEnvironmentSetList(v interface{}, d *schema.ResourceData, config *Config) ([]string, error) { +func expandComposerEnvironmentSetList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) ([]string, error) { if v == nil { return nil, nil } - return convertStringArr(v.(*schema.Set).List()), nil + return tpgresource.ConvertStringArr(v.(*schema.Set).List()), nil } -func expandComposerEnvironmentConfigSoftwareConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.SoftwareConfig, error) { +func expandComposerEnvironmentConfigSoftwareConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.SoftwareConfig, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1991,7 +2022,7 @@ func expandComposerEnvironmentConfigSoftwareConfig(v interface{}, d *schema.Reso func expandComposerEnvironmentConfigSoftwareConfigStringMap(softwareConfig map[string]interface{}, k string) map[string]string { v, ok := softwareConfig[k] if ok && v != nil { - return convertStringMap(v.(map[string]interface{})) + return tpgresource.ConvertStringMap(v.(map[string]interface{})) } return map[string]string{} } @@ -2035,19 +2066,19 @@ func validateComposerEnvironmentEnvVariables(v interface{}, k string) (ws []stri return ws, errors } -func handleComposerEnvironmentCreationOpFailure(id string, envName *composerEnvironmentName, d *schema.ResourceData, config *Config) error { - userAgent, err := generateUserAgentString(d, config.UserAgent) +func handleComposerEnvironmentCreationOpFailure(id string, envName *ComposerEnvironmentName, d *schema.ResourceData, config *transport_tpg.Config) error { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } log.Printf("[WARNING] Creation operation for Composer Environment %q failed, check Environment isn't still running", id) // Try to get possible created but invalid environment. - env, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.resourceName()).Do() + env, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.ResourceName()).Do() if err != nil { // If error is 401, we don't have to clean up environment, return nil. // Otherwise, we encountered another error. - return handleNotFoundError(err, d, fmt.Sprintf("Composer Environment %q", envName.resourceName())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Composer Environment %q", envName.ResourceName())) } if env.State == "CREATING" { @@ -2058,7 +2089,7 @@ func handleComposerEnvironmentCreationOpFailure(id string, envName *composerEnvi } log.Printf("[WARNING] Environment %q from failed creation operation was created, deleting.", id) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.resourceName()).Do() + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.ResourceName()).Do() if err != nil { return fmt.Errorf("Could not delete the invalid created environment with state %q: %s", env.State, err) } @@ -2094,35 +2125,35 @@ func getComposerEnvironmentPostCreateUpdateObj(env *composer.Environment) (updat return updateEnv } -func resourceComposerEnvironmentName(d *schema.ResourceData, config *Config) (*composerEnvironmentName, error) { - project, err := getProject(d, config) +func resourceComposerEnvironmentName(d *schema.ResourceData, config *transport_tpg.Config) (*ComposerEnvironmentName, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return nil, err } - return &composerEnvironmentName{ + return &ComposerEnvironmentName{ Project: project, Region: region, Environment: d.Get("name").(string), }, nil } -type composerEnvironmentName struct { +type ComposerEnvironmentName struct { Project string Region string Environment string } -func (n *composerEnvironmentName) resourceName() string { +func (n *ComposerEnvironmentName) ResourceName() string { return fmt.Sprintf("projects/%s/locations/%s/environments/%s", n.Project, n.Region, n.Environment) } -func (n *composerEnvironmentName) parentName() string { +func (n *ComposerEnvironmentName) ParentName() string { return fmt.Sprintf("projects/%s/locations/%s", n.Project, n.Region) } @@ -2131,15 +2162,15 @@ func (n *composerEnvironmentName) parentName() string { func compareServiceAccountEmailToLink(_, old, new string, _ *schema.ResourceData) bool { // old is the service account email returned from the server. if !strings.HasPrefix("projects/", old) { - return old == GetResourceNameFromSelfLink(new) + return old == tpgresource.GetResourceNameFromSelfLink(new) } - return compareSelfLinkRelativePaths("", old, new, nil) + return tpgresource.CompareSelfLinkRelativePaths("", old, new, nil) } func validateServiceAccountRelativeNameOrEmail(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - serviceAccountRe := "(" + strings.Join(PossibleServiceAccountNames, "|") + ")" + serviceAccountRe := "(" + strings.Join(verify.PossibleServiceAccountNames, "|") + ")" if strings.HasPrefix(value, "projects/") { serviceAccountRe = fmt.Sprintf("projects/(.+)/serviceAccounts/%s", serviceAccountRe) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment_sweeper.go new file mode 100644 index 0000000000..197fdb1ef8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/composer/resource_composer_environment_sweeper.go @@ -0,0 +1,168 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package composer + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "log" + "time" + + "github.com/hashicorp/go-multierror" + "google.golang.org/api/storage/v1" +) + +func init() { + sweeper.AddTestSweepers("gcp_composer_environment", testSweepComposerResources) +} + +/** + * CLEAN UP HELPER FUNCTIONS + * Because the environments are flaky and bucket deletion rates can be + * rate-limited, for now just warn instead of returning actual errors. + */ +func testSweepComposerResources(region string) error { + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + return fmt.Errorf("error getting shared config for region: %s", err) + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Fatalf("error loading: %s", err) + } + + // us-central is passed as the region for our sweepers, but there are also + // many tests that use the us-east1 region + regions := []string{"us-central1", "us-east1"} + for _, r := range regions { + // Environments need to be cleaned up because the service is flaky. + if err := testSweepComposerEnvironments(config, r); err != nil { + log.Printf("[WARNING] unable to clean up all environments: %s", err) + } + + // Buckets need to be cleaned up because they just don't get deleted on purpose. + if err := testSweepComposerEnvironmentBuckets(config, r); err != nil { + log.Printf("[WARNING] unable to clean up all environment storage buckets: %s", err) + } + } + + return nil +} + +func testSweepComposerEnvironments(config *transport_tpg.Config, region string) error { + found, err := config.NewComposerClient(config.UserAgent).Projects.Locations.Environments.List( + fmt.Sprintf("projects/%s/locations/%s", config.Project, region)).Do() + if err != nil { + return fmt.Errorf("error listing storage buckets for composer environment: %s", err) + } + + if len(found.Environments) == 0 { + log.Printf("composer: no environments need to be cleaned up") + return nil + } + + log.Printf("composer: %d environments need to be cleaned up", len(found.Environments)) + + var allErrors error + for _, e := range found.Environments { + createdAt, err := time.Parse(time.RFC3339Nano, e.CreateTime) + if err != nil { + return fmt.Errorf("composer: environment %q has invalid create time %q", e.Name, e.CreateTime) + } + // Skip environments that were created in same day + // This sweeper should really only clean out very old environments. + if time.Since(createdAt) < time.Hour*24 { + log.Printf("composer: skipped environment %q, it was created today", e.Name) + continue + } + + switch e.State { + case "CREATING": + fallthrough + case "UPDATING": + log.Printf("composer: skipping pending Environment %q with state %q", e.Name, e.State) + case "DELETING": + log.Printf("composer: skipping pending Environment %q that is currently deleting", e.Name) + case "RUNNING": + fallthrough + case "ERROR": + fallthrough + default: + op, deleteErr := config.NewComposerClient(config.UserAgent).Projects.Locations.Environments.Delete(e.Name).Do() + if deleteErr != nil { + allErrors = multierror.Append(allErrors, fmt.Errorf("composer: unable to delete environment %q: %s", e.Name, deleteErr)) + continue + } + waitErr := ComposerOperationWaitTime(config, op, config.Project, "Sweeping old test environments", config.UserAgent, 10*time.Minute) + if waitErr != nil { + allErrors = multierror.Append(allErrors, fmt.Errorf("composer: unable to delete environment %q: %s", e.Name, waitErr)) + } + } + } + return allErrors +} + +func testSweepComposerEnvironmentBuckets(config *transport_tpg.Config, region string) error { + artifactsBName := fmt.Sprintf("artifacts.%s.appspot.com", config.Project) + artifactBucket, err := config.NewStorageClient(config.UserAgent).Buckets.Get(artifactsBName).Do() + if err != nil { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + log.Printf("composer environment bucket %q not found, doesn't need to be cleaned up", artifactsBName) + } else { + return err + } + } else if err = testSweepComposerEnvironmentCleanUpBucket(config, artifactBucket); err != nil { + return err + } + + found, err := config.NewStorageClient(config.UserAgent).Buckets.List(config.Project).Prefix(region).Do() + if err != nil { + return fmt.Errorf("error listing storage buckets created when testing composer environment: %s", err) + } + if len(found.Items) == 0 { + log.Printf("No environment-specific buckets need to be cleaned up") + return nil + } + + for _, bucket := range found.Items { + if _, ok := bucket.Labels["goog-composer-environment"]; !ok { + continue + } + if err := testSweepComposerEnvironmentCleanUpBucket(config, bucket); err != nil { + return err + } + } + return nil +} + +func testSweepComposerEnvironmentCleanUpBucket(config *transport_tpg.Config, bucket *storage.Bucket) error { + var allErrors error + objList, err := config.NewStorageClient(config.UserAgent).Objects.List(bucket.Name).Do() + if err != nil { + allErrors = multierror.Append(allErrors, + fmt.Errorf("Unable to list objects to delete for bucket %q: %s", bucket.Name, err)) + } + + for _, o := range objList.Items { + if err := config.NewStorageClient(config.UserAgent).Objects.Delete(bucket.Name, o.Name).Do(); err != nil { + allErrors = multierror.Append(allErrors, + fmt.Errorf("Unable to delete object %q from bucket %q: %s", o.Name, bucket.Name, err)) + } + } + + if err := config.NewStorageClient(config.UserAgent).Buckets.Delete(bucket.Name).Do(); err != nil { + allErrors = multierror.Append(allErrors, fmt.Errorf("Unable to delete bucket %q: %s", bucket.Name, err)) + } + + if allErrors != nil { + return fmt.Errorf("Unable to clean up bucket %q: %v", bucket.Name, allErrors) + } + + log.Printf("Cleaned up bucket %q for composer environment tests", bucket.Name) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_backend_service_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_backend_service_helpers.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_backend_service_helpers.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_backend_service_helpers.go index 350636fec0..3920c73f52 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_backend_service_helpers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_backend_service_helpers.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "google.golang.org/api/compute/v1" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_instance_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_helpers.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_instance_helpers.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_helpers.go index f40e276aec..33f43f25f3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_instance_helpers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_helpers.go @@ -1,9 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "reflect" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/googleapi" @@ -108,7 +113,7 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { transformed := &compute.SchedulingNodeAffinity{ Key: nodeAff["key"].(string), Operator: nodeAff["operator"].(string), - Values: convertStringArr(nodeAff["values"].(*schema.Set).List()), + Values: tpgresource.ConvertStringArr(nodeAff["values"].(*schema.Set).List()), } scheduling.NodeAffinities = append(scheduling.NodeAffinities, transformed) } @@ -146,7 +151,7 @@ func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { nodeAffinities.Add(map[string]interface{}{ "key": na.Key, "operator": na.Operator, - "values": schema.NewSet(schema.HashString, convertStringArrToInterface(na.Values)), + "values": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(na.Values)), }) } schedulingMap["node_affinities"] = nodeAffinities @@ -184,7 +189,7 @@ func flattenIpv6AccessConfigs(ipv6AccessConfigs []*compute.AccessConfig) []map[s return flattened } -func flattenNetworkInterfaces(d *schema.ResourceData, config *Config, networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string, string, string, error) { +func flattenNetworkInterfaces(d *schema.ResourceData, config *transport_tpg.Config, networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string, string, string, error) { flattened := make([]map[string]interface{}, len(networkInterfaces)) var region, internalIP, externalIP string @@ -192,7 +197,7 @@ func flattenNetworkInterfaces(d *schema.ResourceData, config *Config, networkInt var ac []map[string]interface{} ac, externalIP = flattenAccessConfigs(iface.AccessConfigs) - subnet, err := ParseSubnetworkFieldValue(iface.Subnetwork, d, config) + subnet, err := tpgresource.ParseSubnetworkFieldValue(iface.Subnetwork, d, config) if err != nil { return nil, "", "", "", err } @@ -200,8 +205,8 @@ func flattenNetworkInterfaces(d *schema.ResourceData, config *Config, networkInt flattened[i] = map[string]interface{}{ "network_ip": iface.NetworkIP, - "network": ConvertSelfLinkToV1(iface.Network), - "subnetwork": ConvertSelfLinkToV1(iface.Subnetwork), + "network": tpgresource.ConvertSelfLinkToV1(iface.Network), + "subnetwork": tpgresource.ConvertSelfLinkToV1(iface.Subnetwork), "subnetwork_project": subnet.Project, "access_config": ac, "alias_ip_range": flattenAliasIpRange(iface.AliasIpRanges), @@ -220,6 +225,7 @@ func flattenNetworkInterfaces(d *schema.ResourceData, config *Config, networkInt if internalIP == "" { internalIP = iface.NetworkIP } + } return flattened, region, internalIP, externalIP, nil } @@ -258,7 +264,7 @@ func expandIpv6AccessConfigs(configs []interface{}) []*compute.AccessConfig { return iacs } -func expandNetworkInterfaces(d TerraformResourceData, config *Config) ([]*compute.NetworkInterface, error) { +func expandNetworkInterfaces(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]*compute.NetworkInterface, error) { configs := d.Get("network_interface").([]interface{}) ifaces := make([]*compute.NetworkInterface, len(configs)) for i, raw := range configs { @@ -270,13 +276,13 @@ func expandNetworkInterfaces(d TerraformResourceData, config *Config) ([]*comput return nil, fmt.Errorf("exactly one of network or subnetwork must be provided") } - nf, err := ParseNetworkFieldValue(network, d, config) + nf, err := tpgresource.ParseNetworkFieldValue(network, d, config) if err != nil { return nil, fmt.Errorf("cannot determine self_link for network %q: %s", network, err) } subnetProjectField := fmt.Sprintf("network_interface.%d.subnetwork_project", i) - sf, err := ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) + sf, err := tpgresource.ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) if err != nil { return nil, fmt.Errorf("cannot determine self_link for subnetwork %q: %s", subnetwork, err) } @@ -301,7 +307,7 @@ func flattenServiceAccounts(serviceAccounts []*compute.ServiceAccount) []map[str for i, serviceAccount := range serviceAccounts { result[i] = map[string]interface{}{ "email": serviceAccount.Email, - "scopes": schema.NewSet(stringScopeHashcode, convertStringArrToInterface(serviceAccount.Scopes)), + "scopes": schema.NewSet(tpgresource.StringScopeHashcode, tpgresource.ConvertStringArrToInterface(serviceAccount.Scopes)), } } return result @@ -314,7 +320,7 @@ func expandServiceAccounts(configs []interface{}) []*compute.ServiceAccount { accounts[i] = &compute.ServiceAccount{ Email: data["email"].(string), - Scopes: canonicalizeServiceScopes(convertStringSet(data["scopes"].(*schema.Set))), + Scopes: tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(data["scopes"].(*schema.Set))), } if accounts[i].Email == "" { @@ -335,7 +341,7 @@ func flattenGuestAccelerators(accelerators []*compute.AcceleratorConfig) []map[s return acceleratorsSchema } -func resourceInstanceTags(d TerraformResourceData) *compute.Tags { +func resourceInstanceTags(d tpgresource.TerraformResourceData) *compute.Tags { // Calculate the tags var tags *compute.Tags if v := d.Get("tags"); v != nil { @@ -352,7 +358,7 @@ func resourceInstanceTags(d TerraformResourceData) *compute.Tags { return tags } -func expandShieldedVmConfigs(d TerraformResourceData) *compute.ShieldedInstanceConfig { +func expandShieldedVmConfigs(d tpgresource.TerraformResourceData) *compute.ShieldedInstanceConfig { if _, ok := d.GetOk("shielded_instance_config"); !ok { return nil } @@ -366,7 +372,7 @@ func expandShieldedVmConfigs(d TerraformResourceData) *compute.ShieldedInstanceC } } -func expandConfidentialInstanceConfig(d TerraformResourceData) *compute.ConfidentialInstanceConfig { +func expandConfidentialInstanceConfig(d tpgresource.TerraformResourceData) *compute.ConfidentialInstanceConfig { if _, ok := d.GetOk("confidential_instance_config"); !ok { return nil } @@ -387,7 +393,7 @@ func flattenConfidentialInstanceConfig(ConfidentialInstanceConfig *compute.Confi }} } -func expandAdvancedMachineFeatures(d TerraformResourceData) *compute.AdvancedMachineFeatures { +func expandAdvancedMachineFeatures(d tpgresource.TerraformResourceData) *compute.AdvancedMachineFeatures { if _, ok := d.GetOk("advanced_machine_features"); !ok { return nil } @@ -423,7 +429,7 @@ func flattenShieldedVmConfig(shieldedVmConfig *compute.ShieldedInstanceConfig) [ }} } -func expandDisplayDevice(d TerraformResourceData) *compute.DisplayDevice { +func expandDisplayDevice(d tpgresource.TerraformResourceData) *compute.DisplayDevice { if _, ok := d.GetOk("enable_display"); !ok { return nil } @@ -509,8 +515,8 @@ func hasNodeAffinitiesChanged(oScheduling, newScheduling map[string]interface{}) return true } - // convertStringSet will sort the set into a slice, allowing DeepEqual - if !reflect.DeepEqual(convertStringSet(oldNodeAffinity["values"].(*schema.Set)), convertStringSet(newNodeAffinity["values"].(*schema.Set))) { + // ConvertStringSet will sort the set into a slice, allowing DeepEqual + if !reflect.DeepEqual(tpgresource.ConvertStringSet(oldNodeAffinity["values"].(*schema.Set)), tpgresource.ConvertStringSet(newNodeAffinity["values"].(*schema.Set))) { return true } } @@ -568,3 +574,34 @@ func flattenReservationAffinity(affinity *compute.ReservationAffinity) []map[str return []map[string]interface{}{flattened} } + +func expandNetworkPerformanceConfig(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*compute.NetworkPerformanceConfig, error) { + configs, ok := d.GetOk("network_performance_config") + if !ok { + return nil, nil + } + + npcSlice := configs.([]interface{}) + if len(npcSlice) > 1 { + return nil, fmt.Errorf("cannot specify multiple network_performance_configs") + } + + if len(npcSlice) == 0 || npcSlice[0] == nil { + return nil, nil + } + npc := npcSlice[0].(map[string]interface{}) + return &compute.NetworkPerformanceConfig{ + TotalEgressBandwidthTier: npc["total_egress_bandwidth_tier"].(string), + }, nil +} + +func flattenNetworkPerformanceConfig(c *compute.NetworkPerformanceConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "total_egress_bandwidth_tier": c.TotalEgressBandwidthTier, + }, + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_instance_network_interface_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_network_interface_helpers.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_instance_network_interface_helpers.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_network_interface_helpers.go index 2ebccd7137..dc27f81d59 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_instance_network_interface_helpers.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_instance_network_interface_helpers.go @@ -1,14 +1,17 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/compute/v1" ) -func computeInstanceDeleteAccessConfigs(d *schema.ResourceData, config *Config, instNetworkInterface *compute.NetworkInterface, project, zone, userAgent, instanceName string) error { +func computeInstanceDeleteAccessConfigs(d *schema.ResourceData, config *transport_tpg.Config, instNetworkInterface *compute.NetworkInterface, project, zone, userAgent, instanceName string) error { // Delete any accessConfig that currently exists in instNetworkInterface for _, ac := range instNetworkInterface.AccessConfigs { op, err := config.NewComputeClient(userAgent).Instances.DeleteAccessConfig( @@ -24,7 +27,7 @@ func computeInstanceDeleteAccessConfigs(d *schema.ResourceData, config *Config, return nil } -func computeInstanceAddAccessConfigs(d *schema.ResourceData, config *Config, instNetworkInterface *compute.NetworkInterface, accessConfigs []*compute.AccessConfig, project, zone, userAgent, instanceName string) error { +func computeInstanceAddAccessConfigs(d *schema.ResourceData, config *transport_tpg.Config, instNetworkInterface *compute.NetworkInterface, accessConfigs []*compute.AccessConfig, project, zone, userAgent, instanceName string) error { // Create new ones for _, ac := range accessConfigs { op, err := config.NewComputeClient(userAgent).Instances.AddAccessConfig(project, zone, instanceName, instNetworkInterface.Name, ac).Do() @@ -39,7 +42,7 @@ func computeInstanceAddAccessConfigs(d *schema.ResourceData, config *Config, ins return nil } -func computeInstanceCreateUpdateWhileStoppedCall(d *schema.ResourceData, config *Config, networkInterfacePatchObj *compute.NetworkInterface, accessConfigs []*compute.AccessConfig, accessConfigsHaveChanged bool, index int, project, zone, userAgent, instanceName string) func(inst *compute.Instance) error { +func computeInstanceCreateUpdateWhileStoppedCall(d *schema.ResourceData, config *transport_tpg.Config, networkInterfacePatchObj *compute.NetworkInterface, accessConfigs []*compute.AccessConfig, accessConfigsHaveChanged bool, index int, project, zone, userAgent, instanceName string) func(inst *compute.Instance) error { // Access configs' ip changes when the instance stops invalidating our fingerprint // expect caller to re-validate instance before calling patch this is why we expect diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_operation.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_operation.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_operation.go index fdcfcf9376..653368e885 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/compute_operation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/compute_operation.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "bytes" @@ -9,6 +11,9 @@ import ( "log" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/compute/v1" ) @@ -68,10 +73,10 @@ func (w *ComputeOperationWaiter) QueryOp() (interface{}, error) { } } if w.Op.Zone != "" { - zone := GetResourceNameFromSelfLink(w.Op.Zone) + zone := tpgresource.GetResourceNameFromSelfLink(w.Op.Zone) return w.Service.ZoneOperations.Get(w.Project, zone, w.Op.Name).Do() } else if w.Op.Region != "" { - region := GetResourceNameFromSelfLink(w.Op.Region) + region := tpgresource.GetResourceNameFromSelfLink(w.Op.Region) return w.Service.RegionOperations.Get(w.Project, region, w.Op.Name).Do() } return w.Service.GlobalOperations.Get(w.Project, w.Op.Name).Do() @@ -93,16 +98,16 @@ func (w *ComputeOperationWaiter) TargetStates() []string { return []string{"DONE"} } -func ComputeOperationWaitTime(config *Config, res interface{}, project, activity, userAgent string, timeout time.Duration) error { +func ComputeOperationWaitTime(config *transport_tpg.Config, res interface{}, project, activity, userAgent string, timeout time.Duration) error { op := &compute.Operation{} - err := Convert(res, op) + err := tpgresource.Convert(res, op) if err != nil { return err } w := &ComputeOperationWaiter{ Service: config.NewComputeClient(userAgent), - Context: config.context, + Context: config.Context, Op: op, Project: project, } @@ -110,7 +115,7 @@ func ComputeOperationWaitTime(config *Config, res interface{}, project, activity if err := w.SetOp(op); err != nil { return err } - return OperationWait(w, activity, timeout, config.PollInterval) + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) } // ComputeOperationError wraps compute.OperationError and implements the @@ -135,6 +140,14 @@ func writeOperationError(w io.StringWriter, opError *compute.OperationErrorError var link *compute.HelpLink for _, ed := range opError.ErrorDetails { + if opError.Code == "QUOTA_EXCEEDED" && ed.QuotaInfo != nil { + w.WriteString("\tmetric name = " + ed.QuotaInfo.MetricName + "\n") + w.WriteString("\tlimit name = " + ed.QuotaInfo.LimitName + "\n") + if ed.QuotaInfo.Dimensions != nil { + w.WriteString("\tdimensions = " + fmt.Sprint(ed.QuotaInfo.Dimensions) + "\n") + } + break + } if lm == nil && ed.LocalizedMessage != nil { lm = ed.LocalizedMessage } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_health_check.go new file mode 100644 index 0000000000..5b9347e4cf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_health_check.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeHealthCheck() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeHealthCheck().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleComputeHealthCheckRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + id, err := tpgresource.ReplaceVars(d, meta.(*transport_tpg.Config), "projects/{{project}}/global/healthChecks/{{name}}") + if err != nil { + return err + } + d.SetId(id) + + return resourceComputeHealthCheckRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_lb_ip_ranges.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_lb_ip_ranges.go similarity index 93% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_lb_ip_ranges.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_lb_ip_ranges.go index e5317a2540..8f030defaf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_compute_lb_ip_ranges.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_lb_ip_ranges.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_network_endpoint_group.go new file mode 100644 index 0000000000..181a75af57 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_network_endpoint_group.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeNetworkEndpointGroup() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeNetworkEndpointGroup().Schema) + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "zone") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "self_link") + + return &schema.Resource{ + Read: dataSourceComputeNetworkEndpointGroupRead, + Schema: dsSchema, + } +} + +func dataSourceComputeNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + if name, ok := d.GetOk("name"); ok { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", project, zone, name.(string))) + } else if selfLink, ok := d.GetOk("self_link"); ok { + parsed, err := tpgresource.ParseNetworkEndpointGroupFieldValue(selfLink.(string), d, config) + if err != nil { + return err + } + if err := d.Set("name", parsed.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("zone", parsed.Zone); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("project", parsed.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", parsed.Project, parsed.Zone, parsed.Name)) + } else { + return errors.New("Must provide either `self_link` or `zone/name`") + } + + return resourceComputeNetworkEndpointGroupRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_network_peering.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_network_peering.go new file mode 100644 index 0000000000..174c5e04eb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_compute_network_peering.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +const regexGCEName = "^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$" + +func DataSourceComputeNetworkPeering() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeNetworkPeering().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name", "network") + + dsSchema["name"].ValidateFunc = verify.ValidateRegexp(regexGCEName) + dsSchema["network"].ValidateFunc = verify.ValidateRegexp(peerNetworkLinkRegex) + return &schema.Resource{ + Read: dataSourceComputeNetworkPeeringRead, + Schema: dsSchema, + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(4 * time.Minute), + }, + } +} + +func dataSourceComputeNetworkPeeringRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%s/%s", networkFieldValue.Name, d.Get("name").(string))) + + return resourceComputeNetworkPeeringRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_address.go new file mode 100644 index 0000000000..6c36d08e5f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_address.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var ( + computeAddressIdTemplate = "projects/%s/regions/%s/addresses/%s" + computeAddressLinkRegex = regexp.MustCompile("projects/(.+)/regions/(.+)/addresses/(.+)$") +) + +func DataSourceGoogleComputeAddress() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeAddressRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "address": { + Type: schema.TypeString, + Computed: true, + }, + + "address_type": { + Type: schema.TypeString, + Computed: true, + }, + + "network": { + Type: schema.TypeString, + Computed: true, + }, + + "network_tier": { + Type: schema.TypeString, + Computed: true, + }, + + "prefix_length": { + Type: schema.TypeInt, + Computed: true, + }, + + "purpose": { + Type: schema.TypeString, + Computed: true, + }, + + "subnetwork": { + Type: schema.TypeString, + Computed: true, + }, + + "users": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + address, err := config.NewComputeClient(userAgent).Addresses.Get(project, region, name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Address Not Found : %s", name)) + } + + if err := d.Set("address", address.Address); err != nil { + return fmt.Errorf("Error setting address: %s", err) + } + if err := d.Set("address_type", address.AddressType); err != nil { + return fmt.Errorf("Error setting address_type: %s", err) + } + if err := d.Set("network", address.Network); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("network_tier", address.NetworkTier); err != nil { + return fmt.Errorf("Error setting network_tier: %s", err) + } + if err := d.Set("prefix_length", address.PrefixLength); err != nil { + return fmt.Errorf("Error setting prefix_length: %s", err) + } + if err := d.Set("purpose", address.Purpose); err != nil { + return fmt.Errorf("Error setting purpose: %s", err) + } + if err := d.Set("subnetwork", address.Subnetwork); err != nil { + return fmt.Errorf("Error setting subnetwork: %s", err) + } + if err := d.Set("status", address.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + if err := d.Set("self_link", address.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + + d.SetId(fmt.Sprintf("projects/%s/regions/%s/addresses/%s", project, region, name)) + return nil +} + +type ComputeAddressId struct { + Project string + Region string + Name string +} + +func (s ComputeAddressId) CanonicalId() string { + return fmt.Sprintf(computeAddressIdTemplate, s.Project, s.Region, s.Name) +} + +func ParseComputeAddressId(id string, config *transport_tpg.Config) (*ComputeAddressId, error) { + var parts []string + if computeAddressLinkRegex.MatchString(id) { + parts = computeAddressLinkRegex.FindStringSubmatch(id) + + return &ComputeAddressId{ + Project: parts[1], + Region: parts[2], + Name: parts[3], + }, nil + } else { + parts = strings.Split(id, "/") + } + + if len(parts) == 3 { + return &ComputeAddressId{ + Project: parts[0], + Region: parts[1], + Name: parts[2], + }, nil + } else if len(parts) == 2 { + // Project is optional. + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{region}/{name}` id format.") + } + + return &ComputeAddressId{ + Project: config.Project, + Region: parts[0], + Name: parts[1], + }, nil + } else if len(parts) == 1 { + // Project and region is optional + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{name}` id format.") + } + if config.Region == "" { + return nil, fmt.Errorf("The default region for the provider must be set when using the `{name}` id format.") + } + + return &ComputeAddressId{ + Project: config.Project, + Region: config.Region, + Name: parts[0], + }, nil + } + + return nil, fmt.Errorf("Invalid compute address id. Expecting resource link, `{project}/{region}/{name}`, `{region}/{name}` or `{name}` format.") +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_addresses.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_addresses.go similarity index 93% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_addresses.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_addresses.go index dbac6fc5a6..a7ad6ad8d2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_addresses.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_addresses.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "context" "fmt" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/compute/v1" @@ -100,13 +105,13 @@ AND (scheduling.automaticRestart = true) """`, } func dataSourceGoogleComputeAddressesRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return diag.FromErr(err) } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return diag.FromErr(err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_backend_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_backend_bucket.go new file mode 100644 index 0000000000..2c0acbb4b6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_backend_bucket.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeBackendBucket() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeBackendBucket().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceComputeBackendBucketRead, + Schema: dsSchema, + } +} + +func dataSourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + backendBucketName := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/global/backendBuckets/%s", project, backendBucketName)) + + return resourceComputeBackendBucketRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_backend_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_backend_service.go new file mode 100644 index 0000000000..a906e270ea --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_backend_service.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeBackendService() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeBackendService().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceComputeBackendServiceRead, + Schema: dsSchema, + } +} + +func dataSourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + serviceName := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/global/backendServices/%s", project, serviceName)) + + return resourceComputeBackendServiceRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_default_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_default_service_account.go new file mode 100644 index 0000000000..877a1b0675 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_default_service_account.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeDefaultServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeDefaultServiceAccountRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "email": { + Type: schema.TypeString, + Computed: true, + }, + "unique_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleComputeDefaultServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + projectCompResource, err := config.NewComputeClient(userAgent).Projects.Get(project).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GCE default service account") + } + + serviceAccountName, err := tpgresource.ServiceAccountFQN(projectCompResource.DefaultServiceAccount, d, config) + if err != nil { + return err + } + + sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(serviceAccountName).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service Account %q", serviceAccountName)) + } + + d.SetId(sa.Name) + if err := d.Set("email", sa.Email); err != nil { + return fmt.Errorf("Error setting email: %s", err) + } + if err := d.Set("unique_id", sa.UniqueId); err != nil { + return fmt.Errorf("Error setting unique_id: %s", err) + } + if err := d.Set("project", sa.ProjectId); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", sa.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("display_name", sa.DisplayName); err != nil { + return fmt.Errorf("Error setting display_name: %s", err) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_disk.go new file mode 100644 index 0000000000..a2d03cf26d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_disk.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeDisk() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeDisk().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "zone") + + return &schema.Resource{ + Read: dataSourceGoogleComputeDiskRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceComputeDiskRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_forwarding_rule.go new file mode 100644 index 0000000000..f8d546310a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_forwarding_rule.go @@ -0,0 +1,47 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeForwardingRule() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeForwardingRule().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + + return &schema.Resource{ + Read: dataSourceGoogleComputeForwardingRuleRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/regions/%s/forwardingRules/%s", project, region, name)) + + return resourceComputeForwardingRuleRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_global_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_global_address.go new file mode 100644 index 0000000000..9762f076e9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_global_address.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeGlobalAddress() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeGlobalAddressRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "address": { + Type: schema.TypeString, + Computed: true, + }, + + "address_type": { + Type: schema.TypeString, + Computed: true, + }, + + "network": { + Type: schema.TypeString, + Computed: true, + }, + + "network_tier": { + Type: schema.TypeString, + Computed: true, + }, + + "prefix_length": { + Type: schema.TypeInt, + Computed: true, + }, + + "purpose": { + Type: schema.TypeString, + Computed: true, + }, + + "subnetwork": { + Type: schema.TypeString, + Computed: true, + }, + + "users": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + address, err := config.NewComputeClient(userAgent).GlobalAddresses.Get(project, name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Global Address Not Found : %s", name)) + } + + if err := d.Set("address", address.Address); err != nil { + return fmt.Errorf("Error setting address: %s", err) + } + if err := d.Set("address_type", address.AddressType); err != nil { + return fmt.Errorf("Error setting address_type: %s", err) + } + if err := d.Set("network", address.Network); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("network_tier", address.NetworkTier); err != nil { + return fmt.Errorf("Error setting network_tier: %s", err) + } + if err := d.Set("prefix_length", address.PrefixLength); err != nil { + return fmt.Errorf("Error setting prefix_length: %s", err) + } + if err := d.Set("purpose", address.Purpose); err != nil { + return fmt.Errorf("Error setting purpose: %s", err) + } + if err := d.Set("subnetwork", address.Subnetwork); err != nil { + return fmt.Errorf("Error setting subnetwork: %s", err) + } + if err := d.Set("status", address.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + if err := d.Set("self_link", address.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/global/addresses/%s", project, name)) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ha_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ha_vpn_gateway.go new file mode 100644 index 0000000000..bfa88cee79 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ha_vpn_gateway.go @@ -0,0 +1,47 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeHaVpnGateway() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeHaVpnGateway().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + + return &schema.Resource{ + Read: dataSourceGoogleComputeHaVpnGatewayRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeHaVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/regions/%s/vpnGateways/%s", project, region, name)) + + return resourceComputeHaVpnGatewayRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_image.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_image.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_image.go index 23537d71ed..672d493743 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_image.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_image.go @@ -1,11 +1,16 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" "strconv" + "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/compute/v1" ) @@ -107,18 +112,23 @@ func DataSourceGoogleComputeImage() *schema.Resource { Optional: true, ForceNew: true, }, + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, } } func dataSourceGoogleComputeImageRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -142,6 +152,19 @@ func dataSourceGoogleComputeImageRead(d *schema.ResourceData, meta interface{}) for _, im := range images.Items { image = im } + } else if mr, ok := d.GetOk("most_recent"); len(images.Items) >= 1 && ok && mr.(bool) { + most_recent := time.UnixMicro(0) + for _, im := range images.Items { + parsedTS, err := time.Parse(time.RFC3339, im.CreationTimestamp) + if err != nil { + return fmt.Errorf("error parsing creation timestamp: %w", err) + } + + if parsedTS.After(most_recent) { + most_recent = parsedTS + image = im + } + } } else { return fmt.Errorf("your filter has returned more than one image or no image. Please refine your filter to return exactly one image") } @@ -218,7 +241,7 @@ func dataSourceGoogleComputeImageRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting status: %s", err) } - id, err := replaceVars(d, config, "projects/{{project}}/global/images/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/images/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance.go index 8b0e6faa21..f50208fbb0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance.go @@ -1,17 +1,21 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleComputeInstance() *schema.Resource { // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeInstance().Schema) + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeInstance().Schema) // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "name", "self_link", "project", "zone") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "self_link", "project", "zone") return &schema.Resource{ Read: dataSourceGoogleComputeInstanceRead, @@ -20,20 +24,20 @@ func DataSourceGoogleComputeInstance() *schema.Resource { } func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, zone, name, err := GetZonalResourcePropertiesFromSelfLinkOrSchema(d, config) + project, zone, name, err := tpgresource.GetZonalResourcePropertiesFromSelfLinkOrSchema(d, config) if err != nil { return err } instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, name).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", name)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance %s", name)) } md := flattenMetadataBeta(instance.Metadata) @@ -44,7 +48,7 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err := d.Set("can_ip_forward", instance.CanIpForward); err != nil { return fmt.Errorf("Error setting can_ip_forward: %s", err) } - if err := d.Set("machine_type", GetResourceNameFromSelfLink(instance.MachineType)); err != nil { + if err := d.Set("machine_type", tpgresource.GetResourceNameFromSelfLink(instance.MachineType)); err != nil { return fmt.Errorf("Error setting machine_type: %s", err) } @@ -84,7 +88,7 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err := d.Set("tags_fingerprint", instance.Tags.Fingerprint); err != nil { return fmt.Errorf("Error setting tags_fingerprint: %s", err) } - if err := d.Set("tags", convertStringArrToInterface(instance.Tags.Items)); err != nil { + if err := d.Set("tags", tpgresource.ConvertStringArrToInterface(instance.Tags.Items)); err != nil { return fmt.Errorf("Error setting tags: %s", err) } } @@ -111,7 +115,7 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ scratchDisks = append(scratchDisks, flattenScratchDisk(disk)) } else { di := map[string]interface{}{ - "source": ConvertSelfLinkToV1(disk.Source), + "source": tpgresource.ConvertSelfLinkToV1(disk.Source), "device_name": disk.DeviceName, "mode": disk.Mode, } @@ -173,7 +177,7 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err := d.Set("deletion_protection", instance.DeletionProtection); err != nil { return fmt.Errorf("Error setting deletion_protection: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(instance.SelfLink)); err != nil { return fmt.Errorf("Error setting self_link: %s", err) } if err := d.Set("instance_id", fmt.Sprintf("%d", instance.Id)); err != nil { @@ -182,7 +186,7 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err := d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } - if err := d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)); err != nil { + if err := d.Set("zone", tpgresource.GetResourceNameFromSelfLink(instance.Zone)); err != nil { return fmt.Errorf("Error setting zone: %s", err) } if err := d.Set("current_status", instance.Status); err != nil { @@ -191,6 +195,6 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err := d.Set("name", instance.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } - d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, GetResourceNameFromSelfLink(instance.Zone), instance.Name)) + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, tpgresource.GetResourceNameFromSelfLink(instance.Zone), instance.Name)) return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_group.go similarity index 84% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_group.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_group.go index b164f7b4df..8dddeb27fc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_group.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_group.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "errors" "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleComputeInstanceGroup() *schema.Resource { @@ -81,19 +85,19 @@ func DataSourceGoogleComputeInstanceGroup() *schema.Resource { } func dataSourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) if name, ok := d.GetOk("name"); ok { - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", project, zone, name.(string))) } else if selfLink, ok := d.GetOk("self_link"); ok { - parsed, err := ParseInstanceGroupFieldValue(selfLink.(string), d, config) + parsed, err := tpgresource.ParseInstanceGroupFieldValue(selfLink.(string), d, config) if err != nil { return fmt.Errorf("InstanceGroup name, zone or project could not be parsed from %s", selfLink) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_group_manager.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_group_manager.go new file mode 100644 index 0000000000..4a5a5ebda1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_group_manager.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeInstanceGroupManager() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeInstanceGroupManager().Schema) + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "self_link", "project", "zone") + + return &schema.Resource{ + Read: dataSourceComputeInstanceGroupManagerRead, + Schema: dsSchema, + } +} + +func dataSourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + if selfLink, ok := d.GetOk("self_link"); ok { + parsed, err := tpgresource.ParseInstanceGroupFieldValue(selfLink.(string), d, config) + if err != nil { + return fmt.Errorf("InstanceGroup name, zone or project could not be parsed from %s", selfLink) + } + if err := d.Set("name", parsed.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("zone", parsed.Zone); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("project", parsed.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroupManagers/%s", parsed.Project, parsed.Zone, parsed.Name)) + } else if name, ok := d.GetOk("name"); ok { + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroupManagers/%s", project, zone, name.(string))) + } else { + return errors.New("Must provide either `self_link` or `zone/name`") + } + + err := resourceComputeInstanceGroupManagerRead(d, meta) + + if err != nil { + return err + } + if d.Id() == "" { + return errors.New("Instance Manager Group not found") + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_serial_port.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_serial_port.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_serial_port.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_serial_port.go index 8d67aca419..131cd29429 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_instance_serial_port.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_serial_port.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleComputeInstanceSerialPort() *schema.Resource { @@ -37,20 +41,20 @@ func DataSourceGoogleComputeInstanceSerialPort() *schema.Resource { } func computeInstanceSerialPortRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } if err := d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_template.go new file mode 100644 index 0000000000..5370c96484 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_instance_template.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "google.golang.org/api/compute/v1" +) + +func DataSourceGoogleComputeInstanceTemplate() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeInstanceTemplate().Schema) + + dsSchema["filter"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + } + dsSchema["self_link_unique"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + } + dsSchema["most_recent"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "filter", "most_recent", "project", "self_link_unique") + + mutuallyExclusive := []string{"name", "filter", "self_link_unique"} + for _, n := range mutuallyExclusive { + dsSchema[n].ExactlyOneOf = mutuallyExclusive + } + + return &schema.Resource{ + Read: datasourceComputeInstanceTemplateRead, + Schema: dsSchema, + } +} + +func datasourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + if v, ok := d.GetOk("name"); ok { + return retrieveInstance(d, meta, project, v.(string)) + } + if v, ok := d.GetOk("filter"); ok { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + templates, err := config.NewComputeClient(userAgent).InstanceTemplates.List(project).Filter(v.(string)).Do() + if err != nil { + return fmt.Errorf("error retrieving list of instance templates: %s", err) + } + + mostRecent := d.Get("most_recent").(bool) + if mostRecent { + sort.Sort(ByCreationTimestamp(templates.Items)) + } + + count := len(templates.Items) + if count == 1 || count > 1 && mostRecent { + return retrieveInstance(d, meta, project, templates.Items[0].Name) + } + + return fmt.Errorf("your filter has returned %d instance template(s). Please refine your filter or set most_recent to return exactly one instance template", len(templates.Items)) + } + if v, ok := d.GetOk("self_link_unique"); ok { + return retrieveInstanceFromUniqueId(d, meta, project, v.(string)) + } + + return fmt.Errorf("one of name, filters or self_link_unique must be set") +} + +func retrieveInstance(d *schema.ResourceData, meta interface{}, project, name string) error { + d.SetId("projects/" + project + "/global/instanceTemplates/" + name) + + return resourceComputeInstanceTemplateRead(d, meta) +} + +func retrieveInstanceFromUniqueId(d *schema.ResourceData, meta interface{}, project, self_link_unique string) error { + normalId, _ := parseUniqueId(self_link_unique) + d.SetId(normalId) + d.Set("self_link_unique", self_link_unique) + + return resourceComputeInstanceTemplateRead(d, meta) +} + +// ByCreationTimestamp implements sort.Interface for []*InstanceTemplate based on +// the CreationTimestamp field. +type ByCreationTimestamp []*compute.InstanceTemplate + +func (a ByCreationTimestamp) Len() int { return len(a) } +func (a ByCreationTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByCreationTimestamp) Less(i, j int) bool { + return a[i].CreationTimestamp > a[j].CreationTimestamp +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_network.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_network.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_network.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_network.go index 07fb531223..bc317f9d06 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_network.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_network.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleComputeNetwork() *schema.Resource { @@ -46,20 +50,20 @@ func DataSourceGoogleComputeNetwork() *schema.Resource { } func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } name := d.Get("name").(string) network, err := config.NewComputeClient(userAgent).Networks.Get(project, name).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Network Not Found : %s", name)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Network Not Found : %s", name)) } if err := d.Set("gateway_ipv4", network.GatewayIPv4); err != nil { return fmt.Errorf("Error setting gateway_ipv4: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_node_types.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_node_types.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_node_types.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_node_types.go index d73c01f9cc..6d5dd9b4ba 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_node_types.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_node_types.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -6,6 +8,8 @@ import ( "sort" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/compute/v1" ) @@ -34,18 +38,18 @@ func DataSourceGoogleComputeNodeTypes() *schema.Resource { } func dataSourceGoogleComputeNodeTypesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return fmt.Errorf("Please specify zone to get appropriate node types for zone. Unable to get zone: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_instance_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_group.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_instance_group.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_group.go index e6c5948411..e957d114f8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_region_instance_group.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_group.go @@ -1,9 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/compute/v1" @@ -86,13 +91,13 @@ func DataSourceGoogleComputeRegionInstanceGroup() *schema.Resource { } func dataSourceComputeRegionInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, region, name, err := GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) + project, region, name, err := tpgresource.GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) if err != nil { return err } @@ -100,7 +105,7 @@ func dataSourceComputeRegionInstanceGroupRead(d *schema.ResourceData, meta inter instanceGroup, err := config.NewComputeClient(userAgent).RegionInstanceGroups.Get( project, region, name).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Region Instance Group %q", name)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Region Instance Group %q", name)) } members, err := config.NewComputeClient(userAgent).RegionInstanceGroups.ListInstances( diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_template.go new file mode 100644 index 0000000000..8db869f065 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_instance_template.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_network_endpoint_group.go new file mode 100644 index 0000000000..ef7597b2e1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_network_endpoint_group.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeRegionNetworkEndpointGroup() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRegionNetworkEndpointGroup().Schema) + + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "self_link") + + return &schema.Resource{ + Read: dataSourceComputeRegionNetworkEndpointGroupRead, + Schema: dsSchema, + } +} + +func dataSourceComputeRegionNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + if name, ok := d.GetOk("name"); ok { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/regions/%s/networkEndpointGroups/%s", project, region, name.(string))) + } else if selfLink, ok := d.GetOk("self_link"); ok { + parsed, err := tpgresource.ParseNetworkEndpointGroupRegionalFieldValue(selfLink.(string), d, config) + if err != nil { + return err + } + if err := d.Set("name", parsed.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("project", parsed.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", parsed.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + + d.SetId(fmt.Sprintf("projects/%s/regions/%s/networkEndpointGroups/%s", parsed.Project, parsed.Region, parsed.Name)) + } else { + return errors.New("Must provide either `self_link` or `region/name`") + } + + return resourceComputeRegionNetworkEndpointGroupRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_ssl_certificate.go new file mode 100644 index 0000000000..7ad067d252 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_region_ssl_certificate.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleRegionComputeSslCertificate() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRegionSslCertificate().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + + return &schema.Resource{ + Read: dataSourceComputeRegionSslCertificateRead, + Schema: dsSchema, + } +} + +func dataSourceComputeRegionSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, region, name, err := tpgresource.GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/regions/%s/sslCertificates/%s", project, region, name)) + + return resourceComputeRegionSslCertificateRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_regions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_regions.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_regions.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_regions.go index d2b2176550..f73c4a7e47 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_regions.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_regions.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" "sort" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/compute/v1" @@ -34,13 +39,13 @@ func DataSourceGoogleComputeRegions() *schema.Resource { } func dataSourceGoogleComputeRegionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_resource_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_resource_policy.go new file mode 100644 index 0000000000..89fb4ea20c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_resource_policy.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeResourcePolicy() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeResourcePolicy().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleComputeResourcePolicyRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeResourcePolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)) + + return resourceComputeResourcePolicyRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router.go new file mode 100644 index 0000000000..5b21f09e21 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func DataSourceGoogleComputeRouter() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRouter().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "network") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceComputeRouterRead, + Schema: dsSchema, + } +} + +func dataSourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { + routerName := d.Get("name").(string) + + d.SetId(routerName) + return resourceComputeRouterRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router_nat.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router_nat.go new file mode 100644 index 0000000000..793abf2fcb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router_nat.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeRouterNat() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRouterNat().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name", "router") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleComputeRouterNatRead, + Schema: dsSchema, + } + +} + +func dataSourceGoogleComputeRouterNatRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{region}}/{{router}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceComputeRouterNatRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router_status.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router_status.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router_status.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router_status.go index c8291fb945..b8d73a0ea6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_router_status.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_router_status.go @@ -1,14 +1,18 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/compute/v1" ) func DataSourceGoogleComputeRouterStatus() *schema.Resource { - routeElemSchema := datasourceSchemaFromResourceSchema(ResourceComputeRoute().Schema) + routeElemSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRoute().Schema) return &schema.Resource{ Read: dataSourceComputeRouterStatusRead, @@ -57,18 +61,18 @@ func DataSourceGoogleComputeRouterStatus() *schema.Resource { } func dataSourceComputeRouterStatusRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -97,7 +101,7 @@ func dataSourceComputeRouterStatusRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting best_routes_for_router: %s", err) } - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_snapshot.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_snapshot.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_snapshot.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_snapshot.go index f0f75fa134..316f4d6b2e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_snapshot.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_snapshot.go @@ -1,9 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "sort" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/compute/v1" ) @@ -11,7 +16,7 @@ import ( func DataSourceGoogleComputeSnapshot() *schema.Resource { // Generate datasource schema from resource - dsSchema := datasourceSchemaFromResourceSchema(ResourceComputeSnapshot().Schema) + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeSnapshot().Schema) dsSchema["filter"] = &schema.Schema{ Type: schema.TypeString, @@ -23,7 +28,7 @@ func DataSourceGoogleComputeSnapshot() *schema.Resource { } // Set 'Optional' schema elements - addOptionalFieldsToSchema(dsSchema, "name", "filter", "most_recent", "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "filter", "most_recent", "project") dsSchema["name"].ExactlyOneOf = []string{"name", "filter"} dsSchema["filter"].ExactlyOneOf = []string{"name", "filter"} @@ -35,9 +40,9 @@ func DataSourceGoogleComputeSnapshot() *schema.Resource { } func dataSourceGoogleComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -47,7 +52,7 @@ func dataSourceGoogleComputeSnapshotRead(d *schema.ResourceData, meta interface{ } if v, ok := d.GetOk("filter"); ok { - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -58,7 +63,7 @@ func dataSourceGoogleComputeSnapshotRead(d *schema.ResourceData, meta interface{ billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } projectGetCall.Header().Add("X-Goog-User-Project", billingProject) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ssl_certificate.go new file mode 100644 index 0000000000..522f4e2c3b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ssl_certificate.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeSslCertificate() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeSslCertificate().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceComputeSslCertificateRead, + Schema: dsSchema, + } +} + +func dataSourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + certificateName := d.Get("name").(string) + + d.SetId(fmt.Sprintf("projects/%s/global/sslCertificates/%s", project, certificateName)) + + return resourceComputeSslCertificateRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ssl_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ssl_policy.go new file mode 100644 index 0000000000..5fd1667374 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_ssl_policy.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeSslPolicy() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeSslPolicy().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: datasourceComputeSslPolicyRead, + Schema: dsSchema, + } +} + +func datasourceComputeSslPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + policyName := d.Get("name").(string) + + d.SetId(fmt.Sprintf("projects/%s/global/sslPolicies/%s", project, policyName)) + + return resourceComputeSslPolicyRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_subnetwork.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_subnetwork.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_subnetwork.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_subnetwork.go index d1a9fc7ddd..e94b5ecf1f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_subnetwork.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_subnetwork.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/compute/v1" ) @@ -73,20 +77,20 @@ func DataSourceGoogleComputeSubnetwork() *schema.Resource { } func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, region, name, err := GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) + project, region, name, err := tpgresource.GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) if err != nil { return err } subnetwork, err := config.NewComputeClient(userAgent).Subnetworks.Get(project, region, name).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Subnetwork Not Found : %s", name)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Subnetwork Not Found : %s", name)) } if err := d.Set("ip_cidr_range", subnetwork.IpCidrRange); err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_vpn_gateway.go new file mode 100644 index 0000000000..3fc6d85549 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_vpn_gateway.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/compute/v1" +) + +func DataSourceGoogleComputeVpnGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeVpnGatewayRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "network": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.NewComputeClient(userAgent)) + + gateway, err := vpnGatewaysService.Get(project, region, name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VPN Gateway Not Found : %s", name)) + } + if err := d.Set("network", tpgresource.ConvertSelfLinkToV1(gateway.Network)); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("region", gateway.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("self_link", gateway.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("description", gateway.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/regions/%s/targetVpnGateways/%s", project, region, name)) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_zones.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_zones.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_zones.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_zones.go index 8d9dca1fbc..97b8dfae1e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_compute_zones.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_compute_zones.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -6,6 +8,9 @@ import ( "sort" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/compute/v1" @@ -39,8 +44,8 @@ func DataSourceGoogleComputeZones() *schema.Resource { } func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -50,7 +55,7 @@ func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) region = r.(string) } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -61,7 +66,7 @@ func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) } zones := []string{} - err = config.NewComputeClient(userAgent).Zones.List(project).Filter(filter).Pages(config.context, func(zl *compute.ZoneList) error { + err = config.NewComputeClient(userAgent).Zones.List(project).Filter(filter).Pages(config.Context, func(zl *compute.ZoneList) error { for _, zone := range zl.Items { // We have no way to guarantee a specific base path for the region, but the built-in API-level filtering // only lets us query on exact matches, so we do our own filtering here. diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_global_compute_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_global_compute_forwarding_rule.go new file mode 100644 index 0000000000..75b31c5da1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/data_source_google_global_compute_forwarding_rule.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeGlobalForwardingRule() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeGlobalForwardingRule().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleComputeGlobalForwardingRuleRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/global/forwardingRules/%s", project, name)) + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/disk_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/disk_type.go new file mode 100644 index 0000000000..c0be2b363d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/disk_type.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// readDiskType finds the disk type with the given name. +func readDiskType(c *transport_tpg.Config, d tpgresource.TerraformResourceData, name string) (*tpgresource.ZonalFieldValue, error) { + return tpgresource.ParseZonalFieldValue("diskTypes", name, "project", "zone", d, c, false) +} + +// readRegionDiskType finds the disk type with the given name. +func readRegionDiskType(c *transport_tpg.Config, d tpgresource.TerraformResourceData, name string) (*tpgresource.RegionalFieldValue, error) { + return tpgresource.ParseRegionalFieldValue("diskTypes", name, "project", "region", "zone", d, c, false) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_disk.go new file mode 100644 index 0000000000..e72688a655 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_disk.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ComputeDiskIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ComputeDiskIamUpdater struct { + project string + zone string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ComputeDiskIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + zone, _ := tpgresource.GetZone(d, config) + if zone != "" { + if err := d.Set("zone", zone); err != nil { + return nil, fmt.Errorf("Error setting zone: %s", err) + } + } + values["zone"] = zone + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeDiskIamUpdater{ + project: values["project"], + zone: values["zone"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", u.zone); err != nil { + return nil, fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ComputeDiskIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + zone, _ := tpgresource.GetZone(d, config) + if zone != "" { + values["zone"] = zone + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeDiskIamUpdater{ + project: values["project"], + zone: values["zone"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeDiskIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyDiskUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeDiskIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyDiskUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeDiskIamUpdater) qualifyDiskUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/zones/%s/disks/%s", u.project, u.zone, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeDiskIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/zones/%s/disks/%s", u.project, u.zone, u.name) +} + +func (u *ComputeDiskIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-disk-%s", u.GetResourceId()) +} + +func (u *ComputeDiskIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute disk %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_image.go new file mode 100644 index 0000000000..818e84e2d8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_image.go @@ -0,0 +1,225 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ComputeImageIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "image": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ComputeImageIamUpdater struct { + project string + image string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ComputeImageIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("image"); ok { + values["image"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/global/images/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("image").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeImageIamUpdater{ + project: values["project"], + image: values["image"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("image", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting image: %s", err) + } + + return u, nil +} + +func ComputeImageIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/global/images/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeImageIamUpdater{ + project: values["project"], + image: values["image"], + d: d, + Config: config, + } + if err := d.Set("image", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting image: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeImageIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyImageUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeImageIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyImageUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeImageIamUpdater) qualifyImageUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/global/images/%s", u.project, u.image), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeImageIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/global/images/%s", u.project, u.image) +} + +func (u *ComputeImageIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-image-%s", u.GetResourceId()) +} + +func (u *ComputeImageIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute image %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_instance.go new file mode 100644 index 0000000000..f0e60b5aab --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_instance.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ComputeInstanceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ComputeInstanceIamUpdater struct { + project string + zone string + instanceName string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ComputeInstanceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + zone, _ := tpgresource.GetZone(d, config) + if zone != "" { + if err := d.Set("zone", zone); err != nil { + return nil, fmt.Errorf("Error setting zone: %s", err) + } + } + values["zone"] = zone + if v, ok := d.GetOk("instance_name"); ok { + values["instance_name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance_name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeInstanceIamUpdater{ + project: values["project"], + zone: values["zone"], + instanceName: values["instance_name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", u.zone); err != nil { + return nil, fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("instance_name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting instance_name: %s", err) + } + + return u, nil +} + +func ComputeInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + zone, _ := tpgresource.GetZone(d, config) + if zone != "" { + values["zone"] = zone + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeInstanceIamUpdater{ + project: values["project"], + zone: values["zone"], + instanceName: values["instance_name"], + d: d, + Config: config, + } + if err := d.Set("instance_name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting instance_name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyInstanceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyInstanceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeInstanceIamUpdater) qualifyInstanceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/zones/%s/instances/%s", u.project, u.zone, u.instanceName), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeInstanceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/zones/%s/instances/%s", u.project, u.zone, u.instanceName) +} + +func (u *ComputeInstanceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-instance-%s", u.GetResourceId()) +} + +func (u *ComputeInstanceIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute instance %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_region_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_region_disk.go new file mode 100644 index 0000000000..0b4aa6f13c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_region_disk.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ComputeRegionDiskIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ComputeRegionDiskIamUpdater struct { + project string + region string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ComputeRegionDiskIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeRegionDiskIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ComputeRegionDiskIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeRegionDiskIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeRegionDiskIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyRegionDiskUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeRegionDiskIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyRegionDiskUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeRegionDiskIamUpdater) qualifyRegionDiskUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/regions/%s/disks/%s", u.project, u.region, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeRegionDiskIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/regions/%s/disks/%s", u.project, u.region, u.name) +} + +func (u *ComputeRegionDiskIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-regiondisk-%s", u.GetResourceId()) +} + +func (u *ComputeRegionDiskIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute regiondisk %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_snapshot.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_snapshot.go new file mode 100644 index 0000000000..fa25b65d47 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_snapshot.go @@ -0,0 +1,221 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ComputeSnapshotIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ComputeSnapshotIamUpdater struct { + project string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ComputeSnapshotIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeSnapshotIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func ComputeSnapshotIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeSnapshotIamUpdater{ + project: values["project"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeSnapshotIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifySnapshotUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeSnapshotIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifySnapshotUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeSnapshotIamUpdater) qualifySnapshotUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/global/snapshots/%s", u.project, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeSnapshotIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/global/snapshots/%s", u.project, u.name) +} + +func (u *ComputeSnapshotIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-snapshot-%s", u.GetResourceId()) +} + +func (u *ComputeSnapshotIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute snapshot %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_subnetwork.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_subnetwork.go new file mode 100644 index 0000000000..02e90c8af1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/iam_compute_subnetwork.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ComputeSubnetworkIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "subnetwork": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ComputeSubnetworkIamUpdater struct { + project string + region string + subnetwork string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ComputeSubnetworkIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("subnetwork"); ok { + values["subnetwork"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("subnetwork").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeSubnetworkIamUpdater{ + project: values["project"], + region: values["region"], + subnetwork: values["subnetwork"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("subnetwork", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting subnetwork: %s", err) + } + + return u, nil +} + +func ComputeSubnetworkIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ComputeSubnetworkIamUpdater{ + project: values["project"], + region: values["region"], + subnetwork: values["subnetwork"], + d: d, + Config: config, + } + if err := d.Set("subnetwork", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting subnetwork: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ComputeSubnetworkIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifySubnetworkUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ComputeSubnetworkIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifySubnetworkUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ComputeSubnetworkIamUpdater) qualifySubnetworkUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ComputeBasePath}}%s/%s", fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", u.project, u.region, u.subnetwork), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ComputeSubnetworkIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", u.project, u.region, u.subnetwork) +} + +func (u *ComputeSubnetworkIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-compute-subnetwork-%s", u.GetResourceId()) +} + +func (u *ComputeSubnetworkIamUpdater) DescribeResource() string { + return fmt.Sprintf("compute subnetwork %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/image.go similarity index 89% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/image.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/image.go index 21b17dc896..df985b333a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/image.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/image.go @@ -1,10 +1,16 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "regexp" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/googleapi" ) @@ -14,16 +20,16 @@ const ( ) var ( - resolveImageProjectImage = regexp.MustCompile(fmt.Sprintf("projects/(%s)/global/images/(%s)$", ProjectRegex, resolveImageImageRegex)) - resolveImageProjectFamily = regexp.MustCompile(fmt.Sprintf("projects/(%s)/global/images/family/(%s)$", ProjectRegex, resolveImageFamilyRegex)) + resolveImageProjectImage = regexp.MustCompile(fmt.Sprintf("projects/(%s)/global/images/(%s)$", verify.ProjectRegex, resolveImageImageRegex)) + resolveImageProjectFamily = regexp.MustCompile(fmt.Sprintf("projects/(%s)/global/images/family/(%s)$", verify.ProjectRegex, resolveImageFamilyRegex)) resolveImageGlobalImage = regexp.MustCompile(fmt.Sprintf("^global/images/(%s)$", resolveImageImageRegex)) resolveImageGlobalFamily = regexp.MustCompile(fmt.Sprintf("^global/images/family/(%s)$", resolveImageFamilyRegex)) resolveImageFamilyFamily = regexp.MustCompile(fmt.Sprintf("^family/(%s)$", resolveImageFamilyRegex)) - resolveImageProjectImageShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", ProjectRegex, resolveImageImageRegex)) - resolveImageProjectFamilyShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", ProjectRegex, resolveImageFamilyRegex)) + resolveImageProjectImageShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", verify.ProjectRegex, resolveImageImageRegex)) + resolveImageProjectFamilyShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", verify.ProjectRegex, resolveImageFamilyRegex)) resolveImageFamily = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageFamilyRegex)) resolveImageImage = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageImageRegex)) - resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/[a-z0-9]+/projects/(%s)/global/images/(%s)", ProjectRegex, resolveImageImageRegex)) + resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/[a-z0-9]+/projects/(%s)/global/images/(%s)", verify.ProjectRegex, resolveImageImageRegex)) windowsSqlImage = regexp.MustCompile("^sql-(?:server-)?([0-9]{4})-([a-z]+)-windows-(?:server-)?([0-9]{4})(?:-r([0-9]+))?-dc-v[0-9]+$") canonicalUbuntuLtsImage = regexp.MustCompile("^ubuntu-(minimal-)?([0-9]+)(?:.*(arm64))?.*$") @@ -32,7 +38,7 @@ var ( // built-in projects to look for images/families containing the string // on the left in -var imageMap = map[string]string{ +var ImageMap = map[string]string{ "centos": "centos-cloud", "coreos": "coreos-cloud", "debian": "debian-cloud", @@ -45,7 +51,7 @@ var imageMap = map[string]string{ "windows-sql": "windows-sql-cloud", } -func resolveImageImageExists(c *Config, project, name, userAgent string) (bool, error) { +func resolveImageImageExists(c *transport_tpg.Config, project, name, userAgent string) (bool, error) { if _, err := c.NewComputeClient(userAgent).Images.Get(project, name).Do(); err == nil { return true, nil } else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -55,7 +61,7 @@ func resolveImageImageExists(c *Config, project, name, userAgent string) (bool, } } -func resolveImageFamilyExists(c *Config, project, name, userAgent string) (bool, error) { +func resolveImageFamilyExists(c *transport_tpg.Config, project, name, userAgent string) (bool, error) { if _, err := c.NewComputeClient(userAgent).Images.GetFromFamily(project, name).Do(); err == nil { return true, nil } else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -90,9 +96,9 @@ func sanityTestRegexMatches(expected int, got []string, regexType, name string) // If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}. // If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}. // If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family} -func resolveImage(c *Config, project, name, userAgent string) (string, error) { +func ResolveImage(c *transport_tpg.Config, project, name, userAgent string) (string, error) { var builtInProject string - for k, v := range imageMap { + for k, v := range ImageMap { if strings.Contains(name, k) { builtInProject = v break @@ -204,14 +210,14 @@ func resolveImage(c *Config, project, name, userAgent string) (string, error) { return "", fmt.Errorf("Could not find image or family %s", name) } -// resolveImageRefToRelativeURI takes the output of resolveImage and coerces it +// resolveImageRefToRelativeURI takes the output of ResolveImage and coerces it // into a relative URI. In the event that a global/images/IMAGE or -// global/images/family/FAMILY reference is returned from resolveImage, +// global/images/family/FAMILY reference is returned from ResolveImage, // providerProject will be used as the project for the self_link. func resolveImageRefToRelativeURI(providerProject, name string) (string, error) { switch { case resolveImageLink.MatchString(name): // https://www.googleapis.com/compute/v1/projects/xyz/global/images/xyz - namePath, err := getRelativePath(name) + namePath, err := tpgresource.GetRelativePath(name) if err != nil { return "", err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/metadata.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/metadata.go new file mode 100644 index 0000000000..a767115575 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/metadata.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "errors" + "sort" + + "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Since the google compute API uses optimistic locking, there is a chance +// we need to resubmit our updated metadata. To do this, you need to provide +// an update function that attempts to submit your metadata +func MetadataRetryWrapper(update func() error) error { + return transport_tpg.MetadataRetryWrapper(update) +} + +// Update the metadata (serverMD) according to the provided diff (oldMDMap v +// newMDMap). +func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { + curMDMap := make(map[string]string) + // Load metadata on server into map + for _, kv := range serverMD.Items { + // If the server state has a key that we had in our old + // state, but not in our new state, we should delete it + _, okOld := oldMDMap[kv.Key] + _, okNew := newMDMap[kv.Key] + if okOld && !okNew { + continue + } else { + curMDMap[kv.Key] = *kv.Value + } + } + + // Insert new metadata into existing metadata (overwriting when needed) + for key, val := range newMDMap { + curMDMap[key] = val.(string) + } + + // Reformat old metadata into a list + serverMD.Items = nil + for key, val := range curMDMap { + v := val + serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } +} + +// Update the beta metadata (serverMD) according to the provided diff (oldMDMap v +// newMDMap). +func BetaMetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { + curMDMap := make(map[string]string) + // Load metadata on server into map + for _, kv := range serverMD.Items { + // If the server state has a key that we had in our old + // state, but not in our new state, we should delete it + _, okOld := oldMDMap[kv.Key] + _, okNew := newMDMap[kv.Key] + if okOld && !okNew { + continue + } else { + curMDMap[kv.Key] = *kv.Value + } + } + + // Insert new metadata into existing metadata (overwriting when needed) + for key, val := range newMDMap { + curMDMap[key] = val.(string) + } + + // Reformat old metadata into a list + serverMD.Items = nil + for key, val := range curMDMap { + v := val + serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } +} + +func expandComputeMetadata(m map[string]interface{}) []*compute.MetadataItems { + metadata := make([]*compute.MetadataItems, len(m)) + var keys []string + for key := range m { + keys = append(keys, key) + } + sort.Strings(keys) + // Append new metadata to existing metadata + for _, key := range keys { + v := m[key].(string) + metadata = append(metadata, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } + + return metadata +} + +func flattenMetadataBeta(metadata *compute.Metadata) map[string]string { + metadataMap := make(map[string]string) + for _, item := range metadata.Items { + metadataMap[item.Key] = *item.Value + } + return metadataMap +} + +// This function differs from flattenMetadataBeta only in that it takes +// compute.metadata rather than compute.metadata as an argument. It should +// be removed in favour of flattenMetadataBeta if/when all resources using it get +// beta support. +func FlattenMetadata(metadata *compute.Metadata) map[string]interface{} { + metadataMap := make(map[string]interface{}) + for _, item := range metadata.Items { + metadataMap[item.Key] = *item.Value + } + return metadataMap +} + +func resourceInstanceMetadata(d tpgresource.TerraformResourceData) (*compute.Metadata, error) { + m := &compute.Metadata{} + mdMap := d.Get("metadata").(map[string]interface{}) + if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { + if w, ok := mdMap["startup-script"]; ok { + // metadata.startup-script could be from metadata_startup_script in the first place + if v != w { + return nil, errors.New("Cannot provide both metadata_startup_script and metadata.startup-script.") + } + } + mdMap["startup-script"] = v + } + if len(mdMap) > 0 { + m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) + var keys []string + for k := range mdMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := mdMap[k].(string) + m.Items = append(m.Items, &compute.MetadataItems{ + Key: k, + Value: &v, + }) + } + + // Set the fingerprint. If the metadata has never been set before + // then this will just be blank. + m.Fingerprint = d.Get("metadata_fingerprint").(string) + } + + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_address.go new file mode 100644 index 0000000000..a8be79f1b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_address.go @@ -0,0 +1,589 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeAddressCreate, + Read: resourceComputeAddressRead, + Delete: resourceComputeAddressDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeAddressImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), + Description: `Name of the resource. The name must be 1-63 characters long, and +comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' +which means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The static external IP address represented by this resource. Only +IPv4 is supported. An address may only be specified for INTERNAL +address types. The IP address must be inside the specified subnetwork, +if any. Set by the API if undefined.`, + }, + "address_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INTERNAL", "EXTERNAL", ""}), + Description: `The type of address to reserve. +Note: if you set this argument's value as 'INTERNAL' you need to leave the 'network_tier' argument unset in that resource block. Default value: "EXTERNAL" Possible values: ["INTERNAL", "EXTERNAL"]`, + Default: "EXTERNAL", + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the network in which to reserve the address. This field +can only be used with INTERNAL type with the VPC_PEERING and +IPSEC_INTERCONNECT purposes.`, + }, + "network_tier": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"PREMIUM", "STANDARD", ""}), + Description: `The networking tier used for configuring this address. If this field is not +specified, it is assumed to be PREMIUM. +This argument should not be used when configuring Internal addresses, because [network tier cannot be set for internal traffic; it's always Premium](https://cloud.google.com/network-tiers/docs/overview). Possible values: ["PREMIUM", "STANDARD"]`, + }, + "prefix_length": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The prefix length if the resource represents an IP range.`, + }, + "purpose": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The purpose of this resource, which can be one of the following values. + +* GCE_ENDPOINT for addresses that are used by VM instances, alias IP +ranges, load balancers, and similar resources. + +* SHARED_LOADBALANCER_VIP for an address that can be used by multiple +internal load balancers. + +* VPC_PEERING for addresses that are reserved for VPC peer networks. + +* IPSEC_INTERCONNECT for addresses created from a private IP range that +are reserved for a VLAN attachment in an HA VPN over Cloud Interconnect +configuration. These addresses are regional resources. + +* PRIVATE_SERVICE_CONNECT for a private network address that is used to +configure Private Service Connect. Only global internal addresses can use +this purpose. + + +This should only be set when using an Internal address.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Region in which the created address should reside. +If it is not provided, the provider region is used.`, + }, + "subnetwork": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the subnetwork in which to reserve the address. If an IP +address is specified, it must be within the subnetwork's IP range. +This field can only be used with INTERNAL type with +GCE_ENDPOINT/DNS_RESOLVER purposes.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "users": { + Type: schema.TypeList, + Computed: true, + Description: `The URLs of the resources that are using this address.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + addressProp, err := expandComputeAddressAddress(d.Get("address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("address"); !tpgresource.IsEmptyValue(reflect.ValueOf(addressProp)) && (ok || !reflect.DeepEqual(v, addressProp)) { + obj["address"] = addressProp + } + addressTypeProp, err := expandComputeAddressAddressType(d.Get("address_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("address_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(addressTypeProp)) && (ok || !reflect.DeepEqual(v, addressTypeProp)) { + obj["addressType"] = addressTypeProp + } + descriptionProp, err := expandComputeAddressDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeAddressName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + purposeProp, err := expandComputeAddressPurpose(d.Get("purpose"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("purpose"); !tpgresource.IsEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { + obj["purpose"] = purposeProp + } + networkTierProp, err := expandComputeAddressNetworkTier(d.Get("network_tier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkTierProp)) && (ok || !reflect.DeepEqual(v, networkTierProp)) { + obj["networkTier"] = networkTierProp + } + subnetworkProp, err := expandComputeAddressSubnetwork(d.Get("subnetwork"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetwork"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { + obj["subnetwork"] = subnetworkProp + } + networkProp, err := expandComputeAddressNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + prefixLengthProp, err := expandComputeAddressPrefixLength(d.Get("prefix_length"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("prefix_length"); !tpgresource.IsEmptyValue(reflect.ValueOf(prefixLengthProp)) && (ok || !reflect.DeepEqual(v, prefixLengthProp)) { + obj["prefixLength"] = prefixLengthProp + } + regionProp, err := expandComputeAddressRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Address: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Address: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Address: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/addresses/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Address", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Address: %s", err) + } + + log.Printf("[DEBUG] Finished creating Address %q: %#v", d.Id(), res) + + return resourceComputeAddressRead(d, meta) +} + +func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Address: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeAddress %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + + if err := d.Set("address", flattenComputeAddressAddress(res["address"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("address_type", flattenComputeAddressAddressType(res["addressType"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeAddressCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("description", flattenComputeAddressDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("name", flattenComputeAddressName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("purpose", flattenComputeAddressPurpose(res["purpose"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("network_tier", flattenComputeAddressNetworkTier(res["networkTier"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("subnetwork", flattenComputeAddressSubnetwork(res["subnetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("users", flattenComputeAddressUsers(res["users"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("network", flattenComputeAddressNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("prefix_length", flattenComputeAddressPrefixLength(res["prefixLength"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("region", flattenComputeAddressRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Address: %s", err) + } + + return nil +} + +func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Address: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Address %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Address") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Address", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Address %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeAddressImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/addresses/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/addresses/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeAddressAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeAddressAddressType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "EXTERNAL" + } + + return v +} + +func flattenComputeAddressCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeAddressDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeAddressName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeAddressPurpose(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeAddressNetworkTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeAddressSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeAddressUsers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeAddressNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeAddressPrefixLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeAddressRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeAddressAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeAddressAddressType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeAddressDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeAddressName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeAddressPurpose(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeAddressNetworkTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeAddressSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeAddressNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeAddressPrefixLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeAddressRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_address_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_address_sweeper.go new file mode 100644 index 0000000000..d251336cb6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_address_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeAddress", testSweepComputeAddress) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeAddress(region string) error { + resourceName := "ComputeAddress" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/addresses", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/addresses/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_attached_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_attached_disk.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_attached_disk.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_attached_disk.go index b020e82c3f..7e5b610c2c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_attached_disk.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_attached_disk.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -6,6 +8,9 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -33,14 +38,14 @@ func ResourceComputeAttachedDisk() *schema.Resource { Required: true, ForceNew: true, Description: `name or self_link of the disk that will be attached.`, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, "instance": { Type: schema.TypeString, Required: true, ForceNew: true, Description: `name or self_link of the compute instance that the disk will be attached to. If the self_link is provided then zone and project are extracted from the self link. If only the name is used then zone and project must be defined as properties on the resource or provider.`, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, "project": { Type: schema.TypeString, @@ -77,24 +82,24 @@ func ResourceComputeAttachedDisk() *schema.Resource { } func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) if err != nil { return err } disk := d.Get("disk").(string) - diskName := GetResourceNameFromSelfLink(disk) + diskName := tpgresource.GetResourceNameFromSelfLink(disk) diskSrc := fmt.Sprintf("projects/%s/zones/%s/disks/%s", zv.Project, zv.Zone, diskName) // Check if the disk is a regional disk if strings.Contains(disk, "regions") { - rv, err := ParseRegionDiskFieldValue(disk, d, config) + rv, err := tpgresource.ParseRegionDiskFieldValue(disk, d, config) if err != nil { return err } @@ -125,13 +130,13 @@ func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error } func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) if err != nil { return err } @@ -142,16 +147,16 @@ func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error setting zone: %s", err) } - diskName := GetResourceNameFromSelfLink(d.Get("disk").(string)) + diskName := tpgresource.GetResourceNameFromSelfLink(d.Get("disk").(string)) instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AttachedDisk %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AttachedDisk %q", d.Id())) } // Iterate through the instance's attached disks as this is the only way to // confirm the disk is actually attached - ad := findDiskByName(instance.Disks, diskName) + ad := FindDiskByName(instance.Disks, diskName) if ad == nil { log.Printf("[WARN] Referenced disk wasn't found attached to this compute instance. Removing from state.") d.SetId("") @@ -166,14 +171,14 @@ func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error { } // Force the referenced resources to a self-link in state because it's more specific then name. - instancePath, err := getRelativePath(instance.SelfLink) + instancePath, err := tpgresource.GetRelativePath(instance.SelfLink) if err != nil { return err } if err := d.Set("instance", instancePath); err != nil { return fmt.Errorf("Error setting instance: %s", err) } - diskPath, err := getRelativePath(ad.Source) + diskPath, err := tpgresource.GetRelativePath(ad.Source) if err != nil { return err } @@ -185,18 +190,18 @@ func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error { } func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) if err != nil { return err } - diskName := GetResourceNameFromSelfLink(d.Get("disk").(string)) + diskName := tpgresource.GetResourceNameFromSelfLink(d.Get("disk").(string)) instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() if err != nil { @@ -205,7 +210,7 @@ func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error // Confirm the disk is still attached before making the call to detach it. If the disk isn't listed as an attached // disk on the compute instance then return as though the delete call succeed since this is the desired state. - ad := findDiskByName(instance.Disks, diskName) + ad := FindDiskByName(instance.Disks, diskName) if ad == nil { return nil } @@ -225,16 +230,16 @@ func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error } func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - err := parseImportId( + err := tpgresource.ParseImportId( []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) if err != nil { return nil, err } - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{instance}}/{{disk}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{instance}}/{{disk}}") if err != nil { return nil, err } @@ -243,9 +248,9 @@ func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*sc return []*schema.ResourceData{d}, nil } -func findDiskByName(disks []*compute.AttachedDisk, id string) *compute.AttachedDisk { +func FindDiskByName(disks []*compute.AttachedDisk, id string) *compute.AttachedDisk { for _, disk := range disks { - if compareSelfLinkOrResourceName("", disk.Source, id, nil) { + if tpgresource.CompareSelfLinkOrResourceName("", disk.Source, id, nil) { return disk } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_autoscaler.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_autoscaler.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_autoscaler.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_autoscaler.go index 78da6ecae7..bd292c4c7c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_autoscaler.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_autoscaler.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "fmt" @@ -21,6 +24,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeAutoscaler() *schema.Resource { @@ -171,7 +178,7 @@ of the instances.`, "type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE", ""}), Description: `Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values: ["GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE"]`, }, @@ -181,7 +188,7 @@ Stackdriver Monitoring metric. Possible values: ["GAUGE", "DELTA_PER_SECOND", "D "mode": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"OFF", "ONLY_UP", "ON", ""}), + ValidateFunc: verify.ValidateEnum([]string{"OFF", "ONLY_UP", "ON", ""}), Description: `Defines operating mode for this policy. Default value: "ON" Possible values: ["OFF", "ONLY_UP", "ON"]`, Default: "ON", }, @@ -280,7 +287,7 @@ to include directives regarding slower scale down, as described above.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCEName, + ValidateFunc: verify.ValidateGCEName, Description: `Name of the resource. The name must be 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following @@ -290,7 +297,7 @@ character, which cannot be a dash.`, "target": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `URL of the managed instance group that this autoscaler will scale.`, }, "description": { @@ -303,7 +310,7 @@ character, which cannot be a dash.`, Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `URL of the zone where the instance group resides.`, }, "creation_timestamp": { @@ -327,8 +334,8 @@ character, which cannot be a dash.`, } func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -337,35 +344,35 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e nameProp, err := expandComputeAutoscalerName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } descriptionProp, err := expandComputeAutoscalerDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } autoscalingPolicyProp, err := expandComputeAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(autoscalingPolicyProp)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { + } else if v, ok := d.GetOkExists("autoscaling_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoscalingPolicyProp)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { obj["autoscalingPolicy"] = autoscalingPolicyProp } targetProp, err := expandComputeAutoscalerTarget(d.Get("target"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { obj["target"] = targetProp } zoneProp, err := expandComputeAutoscalerZone(d.Get("zone"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { obj["zone"] = zoneProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers") if err != nil { return err } @@ -373,24 +380,32 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Creating new Autoscaler: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Autoscaler: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Autoscaler: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -412,33 +427,39 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e } func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Autoscaler: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeAutoscaler %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeAutoscaler %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -463,7 +484,7 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("zone", flattenComputeAutoscalerZone(res["zone"], d, config)); err != nil { return fmt.Errorf("Error reading Autoscaler: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading Autoscaler: %s", err) } @@ -471,15 +492,15 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err } func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Autoscaler: %s", err) } @@ -489,35 +510,35 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e nameProp, err := expandComputeAutoscalerName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } descriptionProp, err := expandComputeAutoscalerDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } autoscalingPolicyProp, err := expandComputeAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { + } else if v, ok := d.GetOkExists("autoscaling_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { obj["autoscalingPolicy"] = autoscalingPolicyProp } targetProp, err := expandComputeAutoscalerTarget(d.Get("target"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { obj["target"] = targetProp } zoneProp, err := expandComputeAutoscalerZone(d.Get("zone"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, zoneProp)) { obj["zone"] = zoneProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers?autoscaler={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers?autoscaler={{name}}") if err != nil { return err } @@ -525,11 +546,19 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Updating Autoscaler %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Autoscaler %q: %s", d.Id(), err) @@ -549,21 +578,21 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e } func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Autoscaler: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") if err != nil { return err } @@ -572,13 +601,21 @@ func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Deleting Autoscaler %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Autoscaler") + return transport_tpg.HandleNotFoundError(err, d, "Autoscaler") } err = ComputeOperationWaitTime( @@ -594,8 +631,8 @@ func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) e } func resourceComputeAutoscalerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/zones/(?P[^/]+)/autoscalers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -605,7 +642,7 @@ func resourceComputeAutoscalerImport(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -614,19 +651,19 @@ func resourceComputeAutoscalerImport(d *schema.ResourceData, meta interface{}) ( return []*schema.ResourceData{d}, nil } -func flattenComputeAutoscalerCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -655,10 +692,10 @@ func flattenComputeAutoscalerAutoscalingPolicy(v interface{}, d *schema.Resource flattenComputeAutoscalerAutoscalingPolicyScalingSchedules(original["scalingSchedules"], d, config) return []interface{}{transformed} } -func flattenComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -672,10 +709,10 @@ func flattenComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *sche return v // let terraform core handle it otherwise } -func flattenComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -689,10 +726,10 @@ func flattenComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *sche return v // let terraform core handle it otherwise } -func flattenComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -706,11 +743,11 @@ func flattenComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *s return v // let terraform core handle it otherwise } -func flattenComputeAutoscalerAutoscalingPolicyMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -725,7 +762,7 @@ func flattenComputeAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d *s flattenComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(original["timeWindowSec"], d, config) return []interface{}{transformed} } -func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -740,10 +777,10 @@ func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas( flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(original["percent"], d, config) return []interface{}{transformed} } -func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -757,10 +794,10 @@ func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasF return v // let terraform core handle it otherwise } -func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -774,10 +811,10 @@ func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasP return v // let terraform core handle it otherwise } -func flattenComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -791,7 +828,7 @@ func flattenComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v inte return v // let terraform core handle it otherwise } -func flattenComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -806,19 +843,19 @@ func flattenComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *s flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(original["predictiveMethod"], d, config) return []interface{}{transformed} } -func flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "NONE" } return v } -func flattenComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -838,19 +875,19 @@ func flattenComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.Re } return transformed } -func flattenComputeAutoscalerAutoscalingPolicyMetricName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyMetricName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyMetricType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyMetricType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -863,11 +900,11 @@ func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interfa flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["utilizationTarget"], d, config) return []interface{}{transformed} } -func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -887,10 +924,10 @@ func flattenComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d } return transformed } -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -904,18 +941,18 @@ func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplica return v // let terraform core handle it otherwise } -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -929,37 +966,37 @@ func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v inte return v // let terraform core handle it otherwise } -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeAutoscalerTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeAutoscalerZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeAutoscalerZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func expandComputeAutoscalerName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -978,79 +1015,79 @@ func expandComputeAutoscalerAutoscalingPolicy(v interface{}, d TerraformResource transformedMaxReplicas, err := expandComputeAutoscalerAutoscalingPolicyMaxReplicas(original["max_replicas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxReplicas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxReplicas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxNumReplicas"] = transformedMaxReplicas } transformedCooldownPeriod, err := expandComputeAutoscalerAutoscalingPolicyCooldownPeriod(original["cooldown_period"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["coolDownPeriodSec"] = transformedCooldownPeriod } transformedMode, err := expandComputeAutoscalerAutoscalingPolicyMode(original["mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mode"] = transformedMode } transformedScaleInControl, err := expandComputeAutoscalerAutoscalingPolicyScaleInControl(original["scale_in_control"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScaleInControl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScaleInControl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scaleInControl"] = transformedScaleInControl } transformedCpuUtilization, err := expandComputeAutoscalerAutoscalingPolicyCpuUtilization(original["cpu_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cpuUtilization"] = transformedCpuUtilization } transformedMetric, err := expandComputeAutoscalerAutoscalingPolicyMetric(original["metric"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMetric); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMetric); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["customMetricUtilizations"] = transformedMetric } transformedLoadBalancingUtilization, err := expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["load_balancing_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLoadBalancingUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLoadBalancingUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["loadBalancingUtilization"] = transformedLoadBalancingUtilization } transformedScalingSchedules, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedules(original["scaling_schedules"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScalingSchedules); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScalingSchedules); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scalingSchedules"] = transformedScalingSchedules } return transformed, nil } -func expandComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1062,21 +1099,21 @@ func expandComputeAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d Ter transformedMaxScaledInReplicas, err := expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(original["max_scaled_in_replicas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxScaledInReplicas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxScaledInReplicas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxScaledInReplicas"] = transformedMaxScaledInReplicas } transformedTimeWindowSec, err := expandComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(original["time_window_sec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeWindowSec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeWindowSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeWindowSec"] = transformedTimeWindowSec } return transformed, nil } -func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1088,33 +1125,33 @@ func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v transformedFixed, err := expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(original["fixed"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixed); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixed); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixed"] = transformedFixed } transformedPercent, err := expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(original["percent"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percent"] = transformedPercent } return transformed, nil } -func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1126,29 +1163,29 @@ func expandComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d Ter transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["target"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["utilizationTarget"] = transformedTarget } transformedPredictiveMethod, err := expandComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(original["predictive_method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPredictiveMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPredictiveMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["predictiveMethod"] = transformedPredictiveMethod } return transformed, nil } -func expandComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1161,21 +1198,21 @@ func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d TerraformRe transformedName, err := expandComputeAutoscalerAutoscalingPolicyMetricName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["metric"] = transformedName } transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyMetricTarget(original["target"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["utilizationTarget"] = transformedTarget } transformedType, err := expandComputeAutoscalerAutoscalingPolicyMetricType(original["type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["utilizationTargetType"] = transformedType } @@ -1184,19 +1221,19 @@ func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d TerraformRe return req, nil } -func expandComputeAutoscalerAutoscalingPolicyMetricName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyMetricName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyMetricType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyMetricType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1208,18 +1245,18 @@ func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interfac transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["target"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["utilizationTarget"] = transformedTarget } return transformed, nil } -func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { if v == nil { return map[string]interface{}{}, nil } @@ -1238,39 +1275,39 @@ func expandComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d T transformedSchedule, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(original["schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schedule"] = transformedSchedule } transformedTimeZone, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(original["time_zone"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeZone"] = transformedTimeZone } transformedDurationSec, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(original["duration_sec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDurationSec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDurationSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["durationSec"] = transformedDurationSec } transformedDisabled, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } transformedDescription, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } - transformedName, err := expandString(original["name"], d, config) + transformedName, err := tpgresource.ExpandString(original["name"], d, config) if err != nil { return nil, err } @@ -1279,40 +1316,40 @@ func expandComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d T return m, nil } -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeAutoscalerTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeAutoscalerTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil || v.(string) == "" { return "", nil } - f, err := parseZonalFieldValue("instanceGroupManagers", v.(string), "project", "zone", d, config, true) + f, err := tpgresource.ParseZonalFieldValue("instanceGroupManagers", v.(string), "project", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for target: %s", err) } - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+f.RelativeLink()) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+f.RelativeLink()) if err != nil { return nil, err } @@ -1320,8 +1357,8 @@ func expandComputeAutoscalerTarget(v interface{}, d TerraformResourceData, confi return url, nil } -func expandComputeAutoscalerZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) +func expandComputeAutoscalerZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("zones", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for zone: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_autoscaler_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_autoscaler_sweeper.go new file mode 100644 index 0000000000..315f74bc72 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_autoscaler_sweeper.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeAutoscaler", testSweepComputeAutoscaler) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeAutoscaler(region string) error { + resourceName := "ComputeAutoscaler" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/aggregated/autoscalers", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + var rl []interface{} + zones := resourceList.(map[string]interface{}) + // Loop through every zone in the list response + for _, zonesValue := range zones { + zone := zonesValue.(map[string]interface{}) + for k, v := range zone { + // Zone map either has resources or a warning stating there were no resources found in the zone + if k != "warning" { + resourcesInZone := v.([]interface{}) + rl = append(rl, resourcesInZone...) + } + } + } + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}" + if obj["zone"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource zone was nil", resourceName) + return nil + } + zone := tpgresource.GetResourceNameFromSelfLink(obj["zone"].(string)) + deleteTemplate = strings.Replace(deleteTemplate, "{{zone}}", zone, -1) + + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket.go new file mode 100644 index 0000000000..87558efed3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket.go @@ -0,0 +1,1214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeBackendBucket() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeBackendBucketCreate, + Read: resourceComputeBackendBucketRead, + Update: resourceComputeBackendBucketUpdate, + Delete: resourceComputeBackendBucketDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeBackendBucketImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: `Cloud Storage bucket name.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the +last character, which cannot be a dash.`, + }, + "cdn_policy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Cloud CDN configuration for this Backend Bucket.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bypass_cache_on_request_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings.`, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_name": { + Type: schema.TypeString, + Optional: true, + Description: `The header field name to match on when bypassing cache. Values are case-insensitive.`, + }, + }, + }, + }, + "cache_key_policy": { + Type: schema.TypeList, + Optional: true, + Description: `The CacheKeyPolicy for this CdnPolicy.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "include_http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Allows HTTP request headers (by name) to be used in the +cache key.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.query_string_whitelist", "cdn_policy.0.cache_key_policy.0.include_http_headers"}, + }, + "query_string_whitelist": { + Type: schema.TypeList, + Optional: true, + Description: `Names of query string parameters to include in cache keys. +Default parameters are always included. '&' and '=' will +be percent encoded and not treated as delimiters.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.query_string_whitelist", "cdn_policy.0.cache_key_policy.0.include_http_headers"}, + }, + }, + }, + }, + "cache_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}), + Description: `Specifies the cache setting for all responses from this backend. +The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"]`, + }, + "client_ttl": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, + }, + "default_ttl": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Specifies the default TTL for cached content served by this origin for responses +that do not have an existing valid TTL (max-age or s-max-age).`, + }, + "max_ttl": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, + }, + "negative_caching": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects.`, + }, + "negative_caching_policy": { + Type: schema.TypeList, + Optional: true, + Description: `Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. +Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeInt, + Optional: true, + Description: `The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 +can be specified as values, and you cannot specify a status code more than once.`, + }, + "ttl": { + Type: schema.TypeInt, + Optional: true, + Description: `The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s +(30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.`, + }, + }, + }, + }, + "request_coalescing": { + Type: schema.TypeBool, + Optional: true, + Description: `If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin.`, + }, + "serve_while_stale": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache.`, + }, + "signed_url_cache_max_age_sec": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum number of seconds the response to a signed URL request will +be considered fresh. After this time period, +the response will be revalidated before being served. +When serving responses to signed URL requests, +Cloud CDN will internally behave as though +all responses from this backend had a "Cache-Control: public, +max-age=[TTL]" header, regardless of any existing Cache-Control +header. The actual headers served in responses will not be altered.`, + }, + }, + }, + }, + "compression_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"AUTOMATIC", "DISABLED", ""}), + Description: `Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header. Possible values: ["AUTOMATIC", "DISABLED"]`, + }, + "custom_response_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Headers that the HTTP/S load balancer should add to proxied responses.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional textual description of the resource; provided by the +client when the resource is created.`, + }, + "edge_security_policy": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The security policy associated with this backend bucket.`, + }, + "enable_cdn": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, enable Cloud CDN for this BackendBucket.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeBackendBucketCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + bucketNameProp, err := expandComputeBackendBucketBucketName(d.Get("bucket_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketNameProp)) && (ok || !reflect.DeepEqual(v, bucketNameProp)) { + obj["bucketName"] = bucketNameProp + } + cdnPolicyProp, err := expandComputeBackendBucketCdnPolicy(d.Get("cdn_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cdn_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(cdnPolicyProp)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { + obj["cdnPolicy"] = cdnPolicyProp + } + compressionModeProp, err := expandComputeBackendBucketCompressionMode(d.Get("compression_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("compression_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(compressionModeProp)) && (ok || !reflect.DeepEqual(v, compressionModeProp)) { + obj["compressionMode"] = compressionModeProp + } + edgeSecurityPolicyProp, err := expandComputeBackendBucketEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("edge_security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(edgeSecurityPolicyProp)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { + obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp + } + customResponseHeadersProp, err := expandComputeBackendBucketCustomResponseHeaders(d.Get("custom_response_headers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_response_headers"); !tpgresource.IsEmptyValue(reflect.ValueOf(customResponseHeadersProp)) && (ok || !reflect.DeepEqual(v, customResponseHeadersProp)) { + obj["customResponseHeaders"] = customResponseHeadersProp + } + descriptionProp, err := expandComputeBackendBucketDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + enableCdnProp, err := expandComputeBackendBucketEnableCdn(d.Get("enable_cdn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_cdn"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableCdnProp)) && (ok || !reflect.DeepEqual(v, enableCdnProp)) { + obj["enableCdn"] = enableCdnProp + } + nameProp, err := expandComputeBackendBucketName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + obj, err = resourceComputeBackendBucketEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new BackendBucket: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendBucket: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating BackendBucket: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating BackendBucket", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create BackendBucket: %s", err) + } + + // security_policy isn't set by Create / Update + if o, n := d.GetChange("edge_security_policy"); o.(string) != n.(string) { + pol, err := tpgresource.ParseSecurityPolicyFieldValue(n.(string), d, config) + if err != nil { + return errwrap.Wrapf("Error parsing Backend Service security policy: {{err}}", err) + } + + spr := emptySecurityPolicyReference() + spr.SecurityPolicy = pol.RelativeLink() + op, err := config.NewComputeClient(userAgent).BackendBuckets.SetEdgeSecurityPolicy(project, obj["name"].(string), spr).Do() + if err != nil { + return errwrap.Wrapf("Error setting Backend Service security policy: {{err}}", err) + } + // This uses the create timeout for simplicity, though technically this code appears in both create and update + waitErr := ComputeOperationWaitTime(config, op, project, "Setting Backend Service Security Policy", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + return waitErr + } + } + + log.Printf("[DEBUG] Finished creating BackendBucket %q: %#v", d.Id(), res) + + return resourceComputeBackendBucketRead(d, meta) +} + +func resourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendBucket: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeBackendBucket %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + + if err := d.Set("bucket_name", flattenComputeBackendBucketBucketName(res["bucketName"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("cdn_policy", flattenComputeBackendBucketCdnPolicy(res["cdnPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("compression_mode", flattenComputeBackendBucketCompressionMode(res["compressionMode"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("edge_security_policy", flattenComputeBackendBucketEdgeSecurityPolicy(res["edgeSecurityPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("custom_response_headers", flattenComputeBackendBucketCustomResponseHeaders(res["customResponseHeaders"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeBackendBucketCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("description", flattenComputeBackendBucketDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("enable_cdn", flattenComputeBackendBucketEnableCdn(res["enableCdn"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("name", flattenComputeBackendBucketName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading BackendBucket: %s", err) + } + + return nil +} + +func resourceComputeBackendBucketUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendBucket: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + bucketNameProp, err := expandComputeBackendBucketBucketName(d.Get("bucket_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketNameProp)) { + obj["bucketName"] = bucketNameProp + } + cdnPolicyProp, err := expandComputeBackendBucketCdnPolicy(d.Get("cdn_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cdn_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { + obj["cdnPolicy"] = cdnPolicyProp + } + compressionModeProp, err := expandComputeBackendBucketCompressionMode(d.Get("compression_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("compression_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, compressionModeProp)) { + obj["compressionMode"] = compressionModeProp + } + edgeSecurityPolicyProp, err := expandComputeBackendBucketEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("edge_security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { + obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp + } + customResponseHeadersProp, err := expandComputeBackendBucketCustomResponseHeaders(d.Get("custom_response_headers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_response_headers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customResponseHeadersProp)) { + obj["customResponseHeaders"] = customResponseHeadersProp + } + descriptionProp, err := expandComputeBackendBucketDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + enableCdnProp, err := expandComputeBackendBucketEnableCdn(d.Get("enable_cdn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_cdn"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableCdnProp)) { + obj["enableCdn"] = enableCdnProp + } + nameProp, err := expandComputeBackendBucketName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + obj, err = resourceComputeBackendBucketEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating BackendBucket %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating BackendBucket %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating BackendBucket %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating BackendBucket", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + // security_policy isn't set by Create / Update + if o, n := d.GetChange("edge_security_policy"); o.(string) != n.(string) { + pol, err := tpgresource.ParseSecurityPolicyFieldValue(n.(string), d, config) + if err != nil { + return errwrap.Wrapf("Error parsing Backend Service security policy: {{err}}", err) + } + + spr := emptySecurityPolicyReference() + spr.SecurityPolicy = pol.RelativeLink() + op, err := config.NewComputeClient(userAgent).BackendBuckets.SetEdgeSecurityPolicy(project, obj["name"].(string), spr).Do() + if err != nil { + return errwrap.Wrapf("Error setting Backend Service security policy: {{err}}", err) + } + // This uses the create timeout for simplicity, though technically this code appears in both create and update + waitErr := ComputeOperationWaitTime(config, op, project, "Setting Backend Service Security Policy", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + return waitErr + } + } + return resourceComputeBackendBucketRead(d, meta) +} + +func resourceComputeBackendBucketDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendBucket: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting BackendBucket %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BackendBucket") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting BackendBucket", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting BackendBucket %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeBackendBucketImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/backendBuckets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeBackendBucketBucketName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCdnPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cache_key_policy"] = + flattenComputeBackendBucketCdnPolicyCacheKeyPolicy(original["cacheKeyPolicy"], d, config) + transformed["signed_url_cache_max_age_sec"] = + flattenComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(original["signedUrlCacheMaxAgeSec"], d, config) + transformed["default_ttl"] = + flattenComputeBackendBucketCdnPolicyDefaultTtl(original["defaultTtl"], d, config) + transformed["max_ttl"] = + flattenComputeBackendBucketCdnPolicyMaxTtl(original["maxTtl"], d, config) + transformed["client_ttl"] = + flattenComputeBackendBucketCdnPolicyClientTtl(original["clientTtl"], d, config) + transformed["negative_caching"] = + flattenComputeBackendBucketCdnPolicyNegativeCaching(original["negativeCaching"], d, config) + transformed["negative_caching_policy"] = + flattenComputeBackendBucketCdnPolicyNegativeCachingPolicy(original["negativeCachingPolicy"], d, config) + transformed["cache_mode"] = + flattenComputeBackendBucketCdnPolicyCacheMode(original["cacheMode"], d, config) + transformed["serve_while_stale"] = + flattenComputeBackendBucketCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) + transformed["request_coalescing"] = + flattenComputeBackendBucketCdnPolicyRequestCoalescing(original["requestCoalescing"], d, config) + transformed["bypass_cache_on_request_headers"] = + flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(original["bypassCacheOnRequestHeaders"], d, config) + return []interface{}{transformed} +} +func flattenComputeBackendBucketCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["query_string_whitelist"] = + flattenComputeBackendBucketCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["queryStringWhitelist"], d, config) + transformed["include_http_headers"] = + flattenComputeBackendBucketCdnPolicyCacheKeyPolicyIncludeHttpHeaders(original["includeHttpHeaders"], d, config) + return []interface{}{transformed} +} +func flattenComputeBackendBucketCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCdnPolicyCacheKeyPolicyIncludeHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendBucketCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendBucketCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendBucketCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendBucketCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "code": flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(original["code"], d, config), + "ttl": flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config), + }) + } + return transformed +} +func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendBucketCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeBackendBucketCdnPolicyRequestCoalescing(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "header_name": flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["headerName"], d, config), + }) + } + return transformed +} +func flattenComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCompressionMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketEdgeSecurityPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCustomResponseHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketEnableCdn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendBucketName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeBackendBucketBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCacheKeyPolicy, err := expandComputeBackendBucketCdnPolicyCacheKeyPolicy(original["cache_key_policy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cacheKeyPolicy"] = transformedCacheKeyPolicy + } + + transformedSignedUrlCacheMaxAgeSec, err := expandComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(original["signed_url_cache_max_age_sec"], d, config) + if err != nil { + return nil, err + } else { + transformed["signedUrlCacheMaxAgeSec"] = transformedSignedUrlCacheMaxAgeSec + } + + transformedDefaultTtl, err := expandComputeBackendBucketCdnPolicyDefaultTtl(original["default_ttl"], d, config) + if err != nil { + return nil, err + } else { + transformed["defaultTtl"] = transformedDefaultTtl + } + + transformedMaxTtl, err := expandComputeBackendBucketCdnPolicyMaxTtl(original["max_ttl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxTtl"] = transformedMaxTtl + } + + transformedClientTtl, err := expandComputeBackendBucketCdnPolicyClientTtl(original["client_ttl"], d, config) + if err != nil { + return nil, err + } else { + transformed["clientTtl"] = transformedClientTtl + } + + transformedNegativeCaching, err := expandComputeBackendBucketCdnPolicyNegativeCaching(original["negative_caching"], d, config) + if err != nil { + return nil, err + } else { + transformed["negativeCaching"] = transformedNegativeCaching + } + + transformedNegativeCachingPolicy, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy + } + + transformedCacheMode, err := expandComputeBackendBucketCdnPolicyCacheMode(original["cache_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCacheMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cacheMode"] = transformedCacheMode + } + + transformedServeWhileStale, err := expandComputeBackendBucketCdnPolicyServeWhileStale(original["serve_while_stale"], d, config) + if err != nil { + return nil, err + } else { + transformed["serveWhileStale"] = transformedServeWhileStale + } + + transformedRequestCoalescing, err := expandComputeBackendBucketCdnPolicyRequestCoalescing(original["request_coalescing"], d, config) + if err != nil { + return nil, err + } else { + transformed["requestCoalescing"] = transformedRequestCoalescing + } + + transformedBypassCacheOnRequestHeaders, err := expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(original["bypass_cache_on_request_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBypassCacheOnRequestHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bypassCacheOnRequestHeaders"] = transformedBypassCacheOnRequestHeaders + } + + return transformed, nil +} + +func expandComputeBackendBucketCdnPolicyCacheKeyPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedQueryStringWhitelist, err := expandComputeBackendBucketCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["query_string_whitelist"], d, config) + if err != nil { + return nil, err + } else { + transformed["queryStringWhitelist"] = transformedQueryStringWhitelist + } + + transformedIncludeHttpHeaders, err := expandComputeBackendBucketCdnPolicyCacheKeyPolicyIncludeHttpHeaders(original["include_http_headers"], d, config) + if err != nil { + return nil, err + } else { + transformed["includeHttpHeaders"] = transformedIncludeHttpHeaders + } + + return transformed, nil +} + +func expandComputeBackendBucketCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyCacheKeyPolicyIncludeHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyDefaultTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyMaxTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyClientTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyNegativeCaching(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyNegativeCachingPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCode, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(original["code"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["code"] = transformedCode + } + + transformedTtl, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config) + if err != nil { + return nil, err + } else { + transformed["ttl"] = transformedTtl + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyCacheMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyRequestCoalescing(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHeaderName, err := expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["header_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["headerName"] = transformedHeaderName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCompressionMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketEdgeSecurityPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketCustomResponseHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketEnableCdn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeBackendBucketEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // This custom encoder helps prevent sending 0 for clientTtl, defaultTtl and + // maxTtl in API calls to update these values when unset in the provider + // (doing so results in an API level error) + c, cdnPolicyOk := d.GetOk("cdn_policy") + + // Only apply during updates + if !cdnPolicyOk || obj["cdnPolicy"] == nil { + return obj, nil + } + + currentCdnPolicies := c.([]interface{}) + + // state does not contain cdnPolicy, so we can return early here as well + if len(currentCdnPolicies) == 0 { + return obj, nil + } + + futureCdnPolicy := obj["cdnPolicy"].(map[string]interface{}) + currentCdnPolicy := currentCdnPolicies[0].(map[string]interface{}) + + cacheMode, ok := futureCdnPolicy["cache_mode"].(string) + // Fallback to state if doesn't exist in object + if !ok { + cacheMode = currentCdnPolicy["cache_mode"].(string) + } + + switch cacheMode { + case "USE_ORIGIN_HEADERS": + if _, ok := futureCdnPolicy["clientTtl"]; ok { + delete(futureCdnPolicy, "clientTtl") + } + if _, ok := futureCdnPolicy["defaultTtl"]; ok { + delete(futureCdnPolicy, "defaultTtl") + } + if _, ok := futureCdnPolicy["maxTtl"]; ok { + delete(futureCdnPolicy, "maxTtl") + } + } + + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_signed_url_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_signed_url_key.go new file mode 100644 index 0000000000..e137af8207 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_signed_url_key.go @@ -0,0 +1,364 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeBackendBucketSignedUrlKey() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeBackendBucketSignedUrlKeyCreate, + Read: resourceComputeBackendBucketSignedUrlKeyRead, + Delete: resourceComputeBackendBucketSignedUrlKeyDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backend_bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The backend bucket this signed URL key belongs.`, + }, + "key_value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `128-bit key value used for signing the URL. The key value must be a +valid RFC 4648 Section 5 base64url encoded string.`, + Sensitive: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), + Description: `Name of the signed URL key.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeBackendBucketSignedUrlKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + keyNameProp, err := expandNestedComputeBackendBucketSignedUrlKeyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyNameProp)) && (ok || !reflect.DeepEqual(v, keyNameProp)) { + obj["keyName"] = keyNameProp + } + keyValueProp, err := expandNestedComputeBackendBucketSignedUrlKeyKeyValue(d.Get("key_value"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key_value"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyValueProp)) && (ok || !reflect.DeepEqual(v, keyValueProp)) { + obj["keyValue"] = keyValueProp + } + backendBucketProp, err := expandNestedComputeBackendBucketSignedUrlKeyBackendBucket(d.Get("backend_bucket"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_bucket"); !tpgresource.IsEmptyValue(reflect.ValueOf(backendBucketProp)) && (ok || !reflect.DeepEqual(v, backendBucketProp)) { + obj["backendBucket"] = backendBucketProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}/addSignedUrlKey") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new BackendBucketSignedUrlKey: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating BackendBucketSignedUrlKey: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{backend_bucket}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating BackendBucketSignedUrlKey", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create BackendBucketSignedUrlKey: %s", err) + } + + log.Printf("[DEBUG] Finished creating BackendBucketSignedUrlKey %q: %#v", d.Id(), res) + + return resourceComputeBackendBucketSignedUrlKeyRead(d, meta) +} + +func resourceComputeBackendBucketSignedUrlKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeBackendBucketSignedUrlKey %q", d.Id())) + } + + res, err = flattenNestedComputeBackendBucketSignedUrlKey(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeBackendBucketSignedUrlKey because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading BackendBucketSignedUrlKey: %s", err) + } + + if err := d.Set("name", flattenNestedComputeBackendBucketSignedUrlKeyName(res["keyName"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendBucketSignedUrlKey: %s", err) + } + + return nil +} + +func resourceComputeBackendBucketSignedUrlKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}/deleteSignedUrlKey?keyName={{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting BackendBucketSignedUrlKey %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BackendBucketSignedUrlKey") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting BackendBucketSignedUrlKey", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting BackendBucketSignedUrlKey %q: %#v", d.Id(), res) + return nil +} + +func flattenNestedComputeBackendBucketSignedUrlKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeBackendBucketSignedUrlKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeBackendBucketSignedUrlKeyKeyValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeBackendBucketSignedUrlKeyBackendBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("backendBuckets", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for backend_bucket: %s", err) + } + return f.RelativeLink(), nil +} + +func flattenNestedComputeBackendBucketSignedUrlKey(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["cdnPolicy"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["signedUrlKeyNames"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value cdnPolicy.signedUrlKeyNames. Actual value: %v", v) + } + + _, item, err := resourceComputeBackendBucketSignedUrlKeyFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeBackendBucketSignedUrlKeyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputeBackendBucketSignedUrlKeyName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeBackendBucketSignedUrlKeyName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + // List response only contains the ID - construct a response object. + item := map[string]interface{}{ + "keyName": itemRaw, + } + + itemName := flattenNestedComputeBackendBucketSignedUrlKeyName(item["keyName"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with keyName= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_signed_url_key_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_signed_url_key_sweeper.go new file mode 100644 index 0000000000..0c407d9525 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_signed_url_key_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeBackendBucketSignedUrlKey", testSweepComputeBackendBucketSignedUrlKey) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeBackendBucketSignedUrlKey(region string) error { + resourceName := "ComputeBackendBucketSignedUrlKey" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/backendBuckets/{{backend_bucket}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["cdnPolicy"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/backendBuckets/{{backend_bucket}}/deleteSignedUrlKey?keyName={{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_sweeper.go new file mode 100644 index 0000000000..ed96619afa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_bucket_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeBackendBucket", testSweepComputeBackendBucket) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeBackendBucket(region string) error { + resourceName := "ComputeBackendBucket" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/backendBuckets", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/backendBuckets/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_service.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service.go index bf475398d3..d37e582876 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_backend_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "bytes" @@ -24,13 +27,18 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/googleapi" ) // suppress changes on sample_rate if log_config is set to disabled. func suppressWhenDisabled(k, old, new string, d *schema.ResourceData) bool { _, n := d.GetChange("log_config.0.enable") - if isEmptyValue(reflect.ValueOf(n)) { + if tpgresource.IsEmptyValue(reflect.ValueOf(n)) { return true } return false @@ -60,7 +68,7 @@ func resourceGoogleComputeBackendServiceBackendHash(v interface{}) int { m := v.(map[string]interface{}) log.Printf("[DEBUG] hashing %v", m) - if group, err := getRelativePath(m["group"].(string)); err != nil { + if group, err := tpgresource.GetRelativePath(m["group"].(string)); err != nil { log.Printf("[WARN] Error on retrieving relative path of instance group: %s", err) buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) } else { @@ -170,8 +178,8 @@ func resourceGoogleComputeBackendServiceBackendHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%v-", v.(bool))) } - log.Printf("[DEBUG] computed hash value of %v from %v", hashcode(buf.String()), buf.String()) - return hashcode(buf.String()) + log.Printf("[DEBUG] computed hash value of %v from %v", tpgresource.Hashcode(buf.String()), buf.String()) + return tpgresource.Hashcode(buf.String()) } func ResourceComputeBackendService() *schema.Resource { @@ -231,6 +239,21 @@ When the load balancing scheme is INTERNAL, this field is not used.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "bypass_cache_on_request_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. +The cache is bypassed for all cdnPolicy.cacheMode settings.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name to match on when bypassing cache. Values are case-insensitive.`, + }, + }, + }, + }, "cache_key_policy": { Type: schema.TypeList, Optional: true, @@ -319,7 +342,7 @@ delimiters.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}), Description: `Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"]`, }, @@ -450,7 +473,7 @@ Defaults to 3.`, "compression_mode": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"AUTOMATIC", "DISABLED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"AUTOMATIC", "DISABLED", ""}), Description: `Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header. Possible values: ["AUTOMATIC", "DISABLED"]`, }, "connection_draining_timeout_sec": { @@ -576,7 +599,7 @@ responses.`, "edge_security_policy": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The resource URL for the edge security policy associated with this backend service.`, }, "enable_cdn": { @@ -600,7 +623,7 @@ For internal load balancing, a URL to a HealthCheck resource must be specified i Elem: &schema.Schema{ Type: schema.TypeString, }, - Set: selfLinkRelativePathHash, + Set: tpgresource.SelfLinkRelativePathHash, }, "iap": { Type: schema.TypeList, @@ -633,7 +656,7 @@ For internal load balancing, a URL to a HealthCheck resource must be specified i Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"EXTERNAL", "INTERNAL_SELF_MANAGED", "EXTERNAL_MANAGED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"EXTERNAL", "INTERNAL_SELF_MANAGED", "EXTERNAL_MANAGED", ""}), Description: `Indicates whether the backend service will be used with internal or external load balancing. A backend service created for one type of load balancing cannot be used with the other. For more information, refer to @@ -693,7 +716,7 @@ by a locally installed custom policy implementation.`, "name": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV"}), + ValidateFunc: verify.ValidateEnum([]string{"ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV"}), Description: `The name of a locality load balancer policy to be used. The value should be one of the predefined ones as supported by localityLbPolicy, although at the moment only ROUND_ROBIN is supported. @@ -740,7 +763,7 @@ The possible values are: "locality_lb_policy": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV", "WEIGHTED_MAGLEV", ""}), + ValidateFunc: verify.ValidateEnum([]string{"ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV", "WEIGHTED_MAGLEV", ""}), Description: `The load balancing algorithm used within the scope of the locality. The possible values are: @@ -822,6 +845,7 @@ If logging is enabled, logs will be exported to Stackdriver.`, the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0.`, + Default: 1.0, AtLeastOneOf: []string{"log_config.0.enable", "log_config.0.sample_rate"}, }, }, @@ -988,7 +1012,7 @@ scheme is EXTERNAL.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"HTTP", "HTTPS", "HTTP2", "TCP", "SSL", "GRPC", ""}), + ValidateFunc: verify.ValidateEnum([]string{"HTTP", "HTTPS", "HTTP2", "TCP", "SSL", "GRPC", ""}), Description: `The protocol this BackendService uses to communicate with backends. The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer types and may result in errors if used with the GA API. Possible values: ["HTTP", "HTTPS", "HTTP2", "TCP", "SSL", "GRPC"]`, @@ -996,7 +1020,7 @@ types and may result in errors if used with the GA API. Possible values: ["HTTP" "security_policy": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The security policy associated with this backend service.`, }, "security_settings": { @@ -1012,7 +1036,7 @@ load_balancing_scheme set to INTERNAL_SELF_MANAGED.`, "client_tls_policy": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource.`, @@ -1034,7 +1058,7 @@ alt name matches one of the specified values.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", ""}), Description: `Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. Possible values: ["NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE"]`, }, @@ -1082,7 +1106,7 @@ func computeBackendServiceBackendSchema() *schema.Resource { "group": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The fully-qualified URL of an Instance Group or Network Endpoint Group resource. In case of instance group this defines the list of instances that serve traffic. Member virtual machine @@ -1104,7 +1128,7 @@ partial URL.`, "balancing_mode": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"UTILIZATION", "RATE", "CONNECTION", ""}), + ValidateFunc: verify.ValidateEnum([]string{"UTILIZATION", "RATE", "CONNECTION", ""}), Description: `Specifies the balancing mode for this backend. For global HTTP(S) or TCP/SSL load balancing, the default is @@ -1209,8 +1233,8 @@ CPU utilization target for the group. Valid range is [0.0, 1.0].`, } func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -1219,79 +1243,79 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ affinityCookieTtlSecProp, err := expandComputeBackendServiceAffinityCookieTtlSec(d.Get("affinity_cookie_ttl_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !isEmptyValue(reflect.ValueOf(affinityCookieTtlSecProp)) && (ok || !reflect.DeepEqual(v, affinityCookieTtlSecProp)) { + } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(affinityCookieTtlSecProp)) && (ok || !reflect.DeepEqual(v, affinityCookieTtlSecProp)) { obj["affinityCookieTtlSec"] = affinityCookieTtlSecProp } backendsProp, err := expandComputeBackendServiceBackend(d.Get("backend"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("backend"); !isEmptyValue(reflect.ValueOf(backendsProp)) && (ok || !reflect.DeepEqual(v, backendsProp)) { + } else if v, ok := d.GetOkExists("backend"); !tpgresource.IsEmptyValue(reflect.ValueOf(backendsProp)) && (ok || !reflect.DeepEqual(v, backendsProp)) { obj["backends"] = backendsProp } circuitBreakersProp, err := expandComputeBackendServiceCircuitBreakers(d.Get("circuit_breakers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("circuit_breakers"); !isEmptyValue(reflect.ValueOf(circuitBreakersProp)) && (ok || !reflect.DeepEqual(v, circuitBreakersProp)) { + } else if v, ok := d.GetOkExists("circuit_breakers"); !tpgresource.IsEmptyValue(reflect.ValueOf(circuitBreakersProp)) && (ok || !reflect.DeepEqual(v, circuitBreakersProp)) { obj["circuitBreakers"] = circuitBreakersProp } compressionModeProp, err := expandComputeBackendServiceCompressionMode(d.Get("compression_mode"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("compression_mode"); !isEmptyValue(reflect.ValueOf(compressionModeProp)) && (ok || !reflect.DeepEqual(v, compressionModeProp)) { + } else if v, ok := d.GetOkExists("compression_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(compressionModeProp)) && (ok || !reflect.DeepEqual(v, compressionModeProp)) { obj["compressionMode"] = compressionModeProp } consistentHashProp, err := expandComputeBackendServiceConsistentHash(d.Get("consistent_hash"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("consistent_hash"); !isEmptyValue(reflect.ValueOf(consistentHashProp)) && (ok || !reflect.DeepEqual(v, consistentHashProp)) { + } else if v, ok := d.GetOkExists("consistent_hash"); !tpgresource.IsEmptyValue(reflect.ValueOf(consistentHashProp)) && (ok || !reflect.DeepEqual(v, consistentHashProp)) { obj["consistentHash"] = consistentHashProp } cdnPolicyProp, err := expandComputeBackendServiceCdnPolicy(d.Get("cdn_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(reflect.ValueOf(cdnPolicyProp)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { + } else if v, ok := d.GetOkExists("cdn_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(cdnPolicyProp)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { obj["cdnPolicy"] = cdnPolicyProp } connectionDrainingProp, err := expandComputeBackendServiceConnectionDraining(nil, d, config) if err != nil { return err - } else if !isEmptyValue(reflect.ValueOf(connectionDrainingProp)) { + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(connectionDrainingProp)) { obj["connectionDraining"] = connectionDrainingProp } customRequestHeadersProp, err := expandComputeBackendServiceCustomRequestHeaders(d.Get("custom_request_headers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("custom_request_headers"); !isEmptyValue(reflect.ValueOf(customRequestHeadersProp)) && (ok || !reflect.DeepEqual(v, customRequestHeadersProp)) { + } else if v, ok := d.GetOkExists("custom_request_headers"); !tpgresource.IsEmptyValue(reflect.ValueOf(customRequestHeadersProp)) && (ok || !reflect.DeepEqual(v, customRequestHeadersProp)) { obj["customRequestHeaders"] = customRequestHeadersProp } customResponseHeadersProp, err := expandComputeBackendServiceCustomResponseHeaders(d.Get("custom_response_headers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("custom_response_headers"); !isEmptyValue(reflect.ValueOf(customResponseHeadersProp)) && (ok || !reflect.DeepEqual(v, customResponseHeadersProp)) { + } else if v, ok := d.GetOkExists("custom_response_headers"); !tpgresource.IsEmptyValue(reflect.ValueOf(customResponseHeadersProp)) && (ok || !reflect.DeepEqual(v, customResponseHeadersProp)) { obj["customResponseHeaders"] = customResponseHeadersProp } fingerprintProp, err := expandComputeBackendServiceFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { obj["fingerprint"] = fingerprintProp } descriptionProp, err := expandComputeBackendServiceDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } enableCDNProp, err := expandComputeBackendServiceEnableCDN(d.Get("enable_cdn"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(reflect.ValueOf(enableCDNProp)) && (ok || !reflect.DeepEqual(v, enableCDNProp)) { + } else if v, ok := d.GetOkExists("enable_cdn"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableCDNProp)) && (ok || !reflect.DeepEqual(v, enableCDNProp)) { obj["enableCDN"] = enableCDNProp } healthChecksProp, err := expandComputeBackendServiceHealthChecks(d.Get("health_checks"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("health_checks"); !isEmptyValue(reflect.ValueOf(healthChecksProp)) && (ok || !reflect.DeepEqual(v, healthChecksProp)) { + } else if v, ok := d.GetOkExists("health_checks"); !tpgresource.IsEmptyValue(reflect.ValueOf(healthChecksProp)) && (ok || !reflect.DeepEqual(v, healthChecksProp)) { obj["healthChecks"] = healthChecksProp } iapProp, err := expandComputeBackendServiceIap(d.Get("iap"), d, config) @@ -1303,79 +1327,79 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ loadBalancingSchemeProp, err := expandComputeBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { + } else if v, ok := d.GetOkExists("load_balancing_scheme"); !tpgresource.IsEmptyValue(reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { obj["loadBalancingScheme"] = loadBalancingSchemeProp } localityLbPolicyProp, err := expandComputeBackendServiceLocalityLbPolicy(d.Get("locality_lb_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("locality_lb_policy"); !isEmptyValue(reflect.ValueOf(localityLbPolicyProp)) && (ok || !reflect.DeepEqual(v, localityLbPolicyProp)) { + } else if v, ok := d.GetOkExists("locality_lb_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(localityLbPolicyProp)) && (ok || !reflect.DeepEqual(v, localityLbPolicyProp)) { obj["localityLbPolicy"] = localityLbPolicyProp } localityLbPoliciesProp, err := expandComputeBackendServiceLocalityLbPolicies(d.Get("locality_lb_policies"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("locality_lb_policies"); !isEmptyValue(reflect.ValueOf(localityLbPoliciesProp)) && (ok || !reflect.DeepEqual(v, localityLbPoliciesProp)) { + } else if v, ok := d.GetOkExists("locality_lb_policies"); !tpgresource.IsEmptyValue(reflect.ValueOf(localityLbPoliciesProp)) && (ok || !reflect.DeepEqual(v, localityLbPoliciesProp)) { obj["localityLbPolicies"] = localityLbPoliciesProp } nameProp, err := expandComputeBackendServiceName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } outlierDetectionProp, err := expandComputeBackendServiceOutlierDetection(d.Get("outlier_detection"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("outlier_detection"); !isEmptyValue(reflect.ValueOf(outlierDetectionProp)) && (ok || !reflect.DeepEqual(v, outlierDetectionProp)) { + } else if v, ok := d.GetOkExists("outlier_detection"); !tpgresource.IsEmptyValue(reflect.ValueOf(outlierDetectionProp)) && (ok || !reflect.DeepEqual(v, outlierDetectionProp)) { obj["outlierDetection"] = outlierDetectionProp } portNameProp, err := expandComputeBackendServicePortName(d.Get("port_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("port_name"); !isEmptyValue(reflect.ValueOf(portNameProp)) && (ok || !reflect.DeepEqual(v, portNameProp)) { + } else if v, ok := d.GetOkExists("port_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(portNameProp)) && (ok || !reflect.DeepEqual(v, portNameProp)) { obj["portName"] = portNameProp } protocolProp, err := expandComputeBackendServiceProtocol(d.Get("protocol"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { obj["protocol"] = protocolProp } securityPolicyProp, err := expandComputeBackendServiceSecurityPolicy(d.Get("security_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("security_policy"); !isEmptyValue(reflect.ValueOf(securityPolicyProp)) && (ok || !reflect.DeepEqual(v, securityPolicyProp)) { + } else if v, ok := d.GetOkExists("security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(securityPolicyProp)) && (ok || !reflect.DeepEqual(v, securityPolicyProp)) { obj["securityPolicy"] = securityPolicyProp } edgeSecurityPolicyProp, err := expandComputeBackendServiceEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("edge_security_policy"); !isEmptyValue(reflect.ValueOf(edgeSecurityPolicyProp)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { + } else if v, ok := d.GetOkExists("edge_security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(edgeSecurityPolicyProp)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp } securitySettingsProp, err := expandComputeBackendServiceSecuritySettings(d.Get("security_settings"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("security_settings"); !isEmptyValue(reflect.ValueOf(securitySettingsProp)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { + } else if v, ok := d.GetOkExists("security_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(securitySettingsProp)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { obj["securitySettings"] = securitySettingsProp } sessionAffinityProp, err := expandComputeBackendServiceSessionAffinity(d.Get("session_affinity"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("session_affinity"); !isEmptyValue(reflect.ValueOf(sessionAffinityProp)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { + } else if v, ok := d.GetOkExists("session_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(sessionAffinityProp)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { obj["sessionAffinity"] = sessionAffinityProp } timeoutSecProp, err := expandComputeBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } logConfigProp, err := expandComputeBackendServiceLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } @@ -1384,7 +1408,7 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices") if err != nil { return err } @@ -1392,24 +1416,32 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Creating new BackendService: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for BackendService: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating BackendService: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/backendServices/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1427,7 +1459,7 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ // security_policy isn't set by Create / Update if o, n := d.GetChange("security_policy"); o.(string) != n.(string) { - pol, err := ParseSecurityPolicyFieldValue(n.(string), d, config) + pol, err := tpgresource.ParseSecurityPolicyFieldValue(n.(string), d, config) if err != nil { return errwrap.Wrapf("Error parsing Backend Service security policy: {{err}}", err) } @@ -1446,7 +1478,7 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ } // edge security_policy isn't set by Create / Update if o, n := d.GetChange("edge_security_policy"); o.(string) != n.(string) { - pol, err := ParseSecurityPolicyFieldValue(n.(string), d, config) + pol, err := tpgresource.ParseSecurityPolicyFieldValue(n.(string), d, config) if err != nil { return errwrap.Wrapf("Error parsing Backend Service edge security policy: {{err}}", err) } @@ -1470,33 +1502,39 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ } func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for BackendService: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeBackendService %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeBackendService %q", d.Id())) } res, err = resourceComputeBackendServiceDecoder(d, meta, res) @@ -1614,7 +1652,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) if err := d.Set("log_config", flattenComputeBackendServiceLogConfig(res["logConfig"], d, config)); err != nil { return fmt.Errorf("Error reading BackendService: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading BackendService: %s", err) } @@ -1622,15 +1660,15 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) } func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for BackendService: %s", err) } @@ -1640,79 +1678,79 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ affinityCookieTtlSecProp, err := expandComputeBackendServiceAffinityCookieTtlSec(d.Get("affinity_cookie_ttl_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, affinityCookieTtlSecProp)) { + } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, affinityCookieTtlSecProp)) { obj["affinityCookieTtlSec"] = affinityCookieTtlSecProp } backendsProp, err := expandComputeBackendServiceBackend(d.Get("backend"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("backend"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backendsProp)) { + } else if v, ok := d.GetOkExists("backend"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backendsProp)) { obj["backends"] = backendsProp } circuitBreakersProp, err := expandComputeBackendServiceCircuitBreakers(d.Get("circuit_breakers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("circuit_breakers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, circuitBreakersProp)) { + } else if v, ok := d.GetOkExists("circuit_breakers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, circuitBreakersProp)) { obj["circuitBreakers"] = circuitBreakersProp } compressionModeProp, err := expandComputeBackendServiceCompressionMode(d.Get("compression_mode"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("compression_mode"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, compressionModeProp)) { + } else if v, ok := d.GetOkExists("compression_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, compressionModeProp)) { obj["compressionMode"] = compressionModeProp } consistentHashProp, err := expandComputeBackendServiceConsistentHash(d.Get("consistent_hash"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("consistent_hash"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, consistentHashProp)) { + } else if v, ok := d.GetOkExists("consistent_hash"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, consistentHashProp)) { obj["consistentHash"] = consistentHashProp } cdnPolicyProp, err := expandComputeBackendServiceCdnPolicy(d.Get("cdn_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { + } else if v, ok := d.GetOkExists("cdn_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { obj["cdnPolicy"] = cdnPolicyProp } connectionDrainingProp, err := expandComputeBackendServiceConnectionDraining(nil, d, config) if err != nil { return err - } else if !isEmptyValue(reflect.ValueOf(connectionDrainingProp)) { + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(connectionDrainingProp)) { obj["connectionDraining"] = connectionDrainingProp } customRequestHeadersProp, err := expandComputeBackendServiceCustomRequestHeaders(d.Get("custom_request_headers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("custom_request_headers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customRequestHeadersProp)) { + } else if v, ok := d.GetOkExists("custom_request_headers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customRequestHeadersProp)) { obj["customRequestHeaders"] = customRequestHeadersProp } customResponseHeadersProp, err := expandComputeBackendServiceCustomResponseHeaders(d.Get("custom_response_headers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("custom_response_headers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customResponseHeadersProp)) { + } else if v, ok := d.GetOkExists("custom_response_headers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customResponseHeadersProp)) { obj["customResponseHeaders"] = customResponseHeadersProp } fingerprintProp, err := expandComputeBackendServiceFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { obj["fingerprint"] = fingerprintProp } descriptionProp, err := expandComputeBackendServiceDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } enableCDNProp, err := expandComputeBackendServiceEnableCDN(d.Get("enable_cdn"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableCDNProp)) { + } else if v, ok := d.GetOkExists("enable_cdn"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableCDNProp)) { obj["enableCDN"] = enableCDNProp } healthChecksProp, err := expandComputeBackendServiceHealthChecks(d.Get("health_checks"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("health_checks"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthChecksProp)) { + } else if v, ok := d.GetOkExists("health_checks"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthChecksProp)) { obj["healthChecks"] = healthChecksProp } iapProp, err := expandComputeBackendServiceIap(d.Get("iap"), d, config) @@ -1724,79 +1762,79 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ loadBalancingSchemeProp, err := expandComputeBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { + } else if v, ok := d.GetOkExists("load_balancing_scheme"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { obj["loadBalancingScheme"] = loadBalancingSchemeProp } localityLbPolicyProp, err := expandComputeBackendServiceLocalityLbPolicy(d.Get("locality_lb_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("locality_lb_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, localityLbPolicyProp)) { + } else if v, ok := d.GetOkExists("locality_lb_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, localityLbPolicyProp)) { obj["localityLbPolicy"] = localityLbPolicyProp } localityLbPoliciesProp, err := expandComputeBackendServiceLocalityLbPolicies(d.Get("locality_lb_policies"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("locality_lb_policies"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, localityLbPoliciesProp)) { + } else if v, ok := d.GetOkExists("locality_lb_policies"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, localityLbPoliciesProp)) { obj["localityLbPolicies"] = localityLbPoliciesProp } nameProp, err := expandComputeBackendServiceName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } outlierDetectionProp, err := expandComputeBackendServiceOutlierDetection(d.Get("outlier_detection"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("outlier_detection"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, outlierDetectionProp)) { + } else if v, ok := d.GetOkExists("outlier_detection"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, outlierDetectionProp)) { obj["outlierDetection"] = outlierDetectionProp } portNameProp, err := expandComputeBackendServicePortName(d.Get("port_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("port_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portNameProp)) { + } else if v, ok := d.GetOkExists("port_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portNameProp)) { obj["portName"] = portNameProp } protocolProp, err := expandComputeBackendServiceProtocol(d.Get("protocol"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, protocolProp)) { obj["protocol"] = protocolProp } securityPolicyProp, err := expandComputeBackendServiceSecurityPolicy(d.Get("security_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("security_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securityPolicyProp)) { + } else if v, ok := d.GetOkExists("security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securityPolicyProp)) { obj["securityPolicy"] = securityPolicyProp } edgeSecurityPolicyProp, err := expandComputeBackendServiceEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("edge_security_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { + } else if v, ok := d.GetOkExists("edge_security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp } securitySettingsProp, err := expandComputeBackendServiceSecuritySettings(d.Get("security_settings"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("security_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { + } else if v, ok := d.GetOkExists("security_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { obj["securitySettings"] = securitySettingsProp } sessionAffinityProp, err := expandComputeBackendServiceSessionAffinity(d.Get("session_affinity"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("session_affinity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { + } else if v, ok := d.GetOkExists("session_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { obj["sessionAffinity"] = sessionAffinityProp } timeoutSecProp, err := expandComputeBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } logConfigProp, err := expandComputeBackendServiceLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } @@ -1805,7 +1843,7 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") if err != nil { return err } @@ -1813,11 +1851,19 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Updating BackendService %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating BackendService %q: %s", d.Id(), err) @@ -1835,7 +1881,7 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ // security_policy isn't set by Create / Update if o, n := d.GetChange("security_policy"); o.(string) != n.(string) { - pol, err := ParseSecurityPolicyFieldValue(n.(string), d, config) + pol, err := tpgresource.ParseSecurityPolicyFieldValue(n.(string), d, config) if err != nil { return errwrap.Wrapf("Error parsing Backend Service security policy: {{err}}", err) } @@ -1854,7 +1900,7 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ } // edge security_policy isn't set by Create / Update if o, n := d.GetChange("edge_security_policy"); o.(string) != n.(string) { - pol, err := ParseSecurityPolicyFieldValue(n.(string), d, config) + pol, err := tpgresource.ParseSecurityPolicyFieldValue(n.(string), d, config) if err != nil { return errwrap.Wrapf("Error parsing Backend Service edge security policy: {{err}}", err) } @@ -1875,21 +1921,21 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ } func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for BackendService: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") if err != nil { return err } @@ -1898,13 +1944,21 @@ func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Deleting BackendService %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "BackendService") + return transport_tpg.HandleNotFoundError(err, d, "BackendService") } err = ComputeOperationWaitTime( @@ -1920,8 +1974,8 @@ func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{ } func resourceComputeBackendServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/global/backendServices/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -1930,7 +1984,7 @@ func resourceComputeBackendServiceImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/backendServices/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1939,10 +1993,10 @@ func resourceComputeBackendServiceImport(d *schema.ResourceData, meta interface{ return []*schema.ResourceData{d}, nil } -func flattenComputeBackendServiceAffinityCookieTtlSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceAffinityCookieTtlSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1956,7 +2010,7 @@ func flattenComputeBackendServiceAffinityCookieTtlSec(v interface{}, d *schema.R return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceBackend(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackend(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1984,29 +2038,29 @@ func flattenComputeBackendServiceBackend(v interface{}, d *schema.ResourceData, } return transformed } -func flattenComputeBackendServiceBackendBalancingMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendBalancingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceBackendCapacityScaler(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendCapacityScaler(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceBackendDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceBackendGroup(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeBackendServiceBackendMaxConnections(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendMaxConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2020,10 +2074,10 @@ func flattenComputeBackendServiceBackendMaxConnections(v interface{}, d *schema. return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2037,10 +2091,10 @@ func flattenComputeBackendServiceBackendMaxConnectionsPerInstance(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2054,10 +2108,10 @@ func flattenComputeBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceBackendMaxRate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendMaxRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2071,19 +2125,19 @@ func flattenComputeBackendServiceBackendMaxRate(v interface{}, d *schema.Resourc return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceBackendMaxRatePerInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendMaxRatePerInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceBackendMaxRatePerEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendMaxRatePerEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceBackendMaxUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceBackendMaxUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCircuitBreakers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCircuitBreakers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2104,10 +2158,10 @@ func flattenComputeBackendServiceCircuitBreakers(v interface{}, d *schema.Resour flattenComputeBackendServiceCircuitBreakersMaxRetries(original["maxRetries"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2121,10 +2175,10 @@ func flattenComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(v inter return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCircuitBreakersMaxConnections(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCircuitBreakersMaxConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2138,10 +2192,10 @@ func flattenComputeBackendServiceCircuitBreakersMaxConnections(v interface{}, d return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2155,10 +2209,10 @@ func flattenComputeBackendServiceCircuitBreakersMaxPendingRequests(v interface{} return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCircuitBreakersMaxRequests(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCircuitBreakersMaxRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2172,10 +2226,10 @@ func flattenComputeBackendServiceCircuitBreakersMaxRequests(v interface{}, d *sc return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCircuitBreakersMaxRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCircuitBreakersMaxRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2189,11 +2243,11 @@ func flattenComputeBackendServiceCircuitBreakersMaxRetries(v interface{}, d *sch return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCompressionMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCompressionMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceConsistentHash(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHash(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2210,7 +2264,7 @@ func flattenComputeBackendServiceConsistentHash(v interface{}, d *schema.Resourc flattenComputeBackendServiceConsistentHashMinimumRingSize(original["minimumRingSize"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceConsistentHashHttpCookie(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHashHttpCookie(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2227,7 +2281,7 @@ func flattenComputeBackendServiceConsistentHashHttpCookie(v interface{}, d *sche flattenComputeBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceConsistentHashHttpCookieTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHashHttpCookieTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2242,10 +2296,10 @@ func flattenComputeBackendServiceConsistentHashHttpCookieTtl(v interface{}, d *s flattenComputeBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2259,10 +2313,10 @@ func flattenComputeBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{ return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2276,22 +2330,22 @@ func flattenComputeBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceConsistentHashHttpCookieName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHashHttpCookieName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceConsistentHashHttpCookiePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHashHttpCookiePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceConsistentHashHttpHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHashHttpHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceConsistentHashMinimumRingSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConsistentHashMinimumRingSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2305,7 +2359,7 @@ func flattenComputeBackendServiceConsistentHashMinimumRingSize(v interface{}, d return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCdnPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2332,9 +2386,11 @@ func flattenComputeBackendServiceCdnPolicy(v interface{}, d *schema.ResourceData flattenComputeBackendServiceCdnPolicyCacheMode(original["cacheMode"], d, config) transformed["serve_while_stale"] = flattenComputeBackendServiceCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) + transformed["bypass_cache_on_request_headers"] = + flattenComputeBackendServiceCdnPolicyBypassCacheOnRequestHeaders(original["bypassCacheOnRequestHeaders"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2359,44 +2415,44 @@ func flattenComputeBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d *schem flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(original["includeNamedCookies"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHttpHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2410,10 +2466,10 @@ func flattenComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2427,10 +2483,10 @@ func flattenComputeBackendServiceCdnPolicyDefaultTtl(v interface{}, d *schema.Re return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2444,10 +2500,10 @@ func flattenComputeBackendServiceCdnPolicyMaxTtl(v interface{}, d *schema.Resour return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2461,11 +2517,11 @@ func flattenComputeBackendServiceCdnPolicyClientTtl(v interface{}, d *schema.Res return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2484,10 +2540,10 @@ func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d } return transformed } -func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2501,10 +2557,10 @@ func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{ return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2518,14 +2574,14 @@ func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(v interface{} return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCdnPolicyServeWhileStale(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyServeWhileStale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2539,7 +2595,29 @@ func flattenComputeBackendServiceCdnPolicyServeWhileStale(v interface{}, d *sche return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceConnectionDraining(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "header_name": flattenComputeBackendServiceCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["headerName"], d, config), + }) + } + return transformed +} +func flattenComputeBackendServiceCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeBackendServiceConnectionDraining(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2552,10 +2630,10 @@ func flattenComputeBackendServiceConnectionDraining(v interface{}, d *schema.Res flattenComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(original["drainingTimeoutSec"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2569,47 +2647,47 @@ func flattenComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec( return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceCustomRequestHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCustomRequestHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeBackendServiceCustomResponseHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceCustomResponseHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeBackendServiceFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceEnableCDN(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceEnableCDN(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceHealthChecks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceHealthChecks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) } -func flattenComputeBackendServiceGeneratedId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceGeneratedId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2623,7 +2701,7 @@ func flattenComputeBackendServiceGeneratedId(v interface{}, d *schema.ResourceDa return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceIap(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceIap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2640,27 +2718,27 @@ func flattenComputeBackendServiceIap(v interface{}, d *schema.ResourceData, conf flattenComputeBackendServiceIapOauth2ClientSecretSha256(original["oauth2ClientSecretSha256"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceIapOauth2ClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceIapOauth2ClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceIapOauth2ClientSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceIapOauth2ClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("iap.0.oauth2_client_secret") } -func flattenComputeBackendServiceIapOauth2ClientSecretSha256(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceIapOauth2ClientSecretSha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceLocalityLbPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLocalityLbPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceLocalityLbPolicies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLocalityLbPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2679,7 +2757,7 @@ func flattenComputeBackendServiceLocalityLbPolicies(v interface{}, d *schema.Res } return transformed } -func flattenComputeBackendServiceLocalityLbPoliciesPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLocalityLbPoliciesPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2692,11 +2770,11 @@ func flattenComputeBackendServiceLocalityLbPoliciesPolicy(v interface{}, d *sche flattenComputeBackendServiceLocalityLbPoliciesPolicyName(original["name"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceLocalityLbPoliciesPolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLocalityLbPoliciesPolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceLocalityLbPoliciesCustomPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLocalityLbPoliciesCustomPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2711,19 +2789,19 @@ func flattenComputeBackendServiceLocalityLbPoliciesCustomPolicy(v interface{}, d flattenComputeBackendServiceLocalityLbPoliciesCustomPolicyData(original["data"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceLocalityLbPoliciesCustomPolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLocalityLbPoliciesCustomPolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceLocalityLbPoliciesCustomPolicyData(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLocalityLbPoliciesCustomPolicyData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceOutlierDetection(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2756,7 +2834,7 @@ func flattenComputeBackendServiceOutlierDetection(v interface{}, d *schema.Resou flattenComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(original["successRateStdevFactor"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2771,10 +2849,10 @@ func flattenComputeBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2788,10 +2866,10 @@ func flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v inter return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2805,10 +2883,10 @@ func flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interfa return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2822,10 +2900,10 @@ func flattenComputeBackendServiceOutlierDetectionConsecutiveErrors(v interface{} return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2839,10 +2917,10 @@ func flattenComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(v int return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2856,10 +2934,10 @@ func flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v in return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2873,10 +2951,10 @@ func flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFail return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2890,7 +2968,7 @@ func flattenComputeBackendServiceOutlierDetectionEnforcingSuccessRate(v interfac return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2905,10 +2983,10 @@ func flattenComputeBackendServiceOutlierDetectionInterval(v interface{}, d *sche flattenComputeBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2922,10 +3000,10 @@ func flattenComputeBackendServiceOutlierDetectionIntervalSeconds(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionIntervalNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionIntervalNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2939,10 +3017,10 @@ func flattenComputeBackendServiceOutlierDetectionIntervalNanos(v interface{}, d return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2956,10 +3034,10 @@ func flattenComputeBackendServiceOutlierDetectionMaxEjectionPercent(v interface{ return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2973,10 +3051,10 @@ func flattenComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(v inter return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2990,10 +3068,10 @@ func flattenComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(v inte return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3007,23 +3085,23 @@ func flattenComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(v interf return v // let terraform core handle it otherwise } -func flattenComputeBackendServicePortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServicePortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceSecurityPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceSecurityPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceEdgeSecurityPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceEdgeSecurityPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceSecuritySettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceSecuritySettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3038,25 +3116,25 @@ func flattenComputeBackendServiceSecuritySettings(v interface{}, d *schema.Resou flattenComputeBackendServiceSecuritySettingsSubjectAltNames(original["subjectAltNames"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceSecuritySettingsClientTlsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceSecuritySettingsClientTlsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeBackendServiceSecuritySettingsSubjectAltNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceSecuritySettingsSubjectAltNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceSessionAffinity(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceSessionAffinity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3070,7 +3148,7 @@ func flattenComputeBackendServiceTimeoutSec(v interface{}, d *schema.ResourceDat return v // let terraform core handle it otherwise } -func flattenComputeBackendServiceLogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3085,19 +3163,19 @@ func flattenComputeBackendServiceLogConfig(v interface{}, d *schema.ResourceData flattenComputeBackendServiceLogConfigSampleRate(original["sampleRate"], d, config) return []interface{}{transformed} } -func flattenComputeBackendServiceLogConfigEnable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLogConfigEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeBackendServiceLogConfigSampleRate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeBackendServiceLogConfigSampleRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandComputeBackendServiceAffinityCookieTtlSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceAffinityCookieTtlSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackend(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackend(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -3111,7 +3189,7 @@ func expandComputeBackendServiceBackend(v interface{}, d TerraformResourceData, transformedBalancingMode, err := expandComputeBackendServiceBackendBalancingMode(original["balancing_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBalancingMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBalancingMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["balancingMode"] = transformedBalancingMode } @@ -3125,63 +3203,63 @@ func expandComputeBackendServiceBackend(v interface{}, d TerraformResourceData, transformedDescription, err := expandComputeBackendServiceBackendDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedGroup, err := expandComputeBackendServiceBackendGroup(original["group"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGroup); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGroup); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["group"] = transformedGroup } transformedMaxConnections, err := expandComputeBackendServiceBackendMaxConnections(original["max_connections"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConnections); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConnections); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConnections"] = transformedMaxConnections } transformedMaxConnectionsPerInstance, err := expandComputeBackendServiceBackendMaxConnectionsPerInstance(original["max_connections_per_instance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConnectionsPerInstance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConnectionsPerInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConnectionsPerInstance"] = transformedMaxConnectionsPerInstance } transformedMaxConnectionsPerEndpoint, err := expandComputeBackendServiceBackendMaxConnectionsPerEndpoint(original["max_connections_per_endpoint"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConnectionsPerEndpoint); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConnectionsPerEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConnectionsPerEndpoint"] = transformedMaxConnectionsPerEndpoint } transformedMaxRate, err := expandComputeBackendServiceBackendMaxRate(original["max_rate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRate"] = transformedMaxRate } transformedMaxRatePerInstance, err := expandComputeBackendServiceBackendMaxRatePerInstance(original["max_rate_per_instance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRatePerInstance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRatePerInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRatePerInstance"] = transformedMaxRatePerInstance } transformedMaxRatePerEndpoint, err := expandComputeBackendServiceBackendMaxRatePerEndpoint(original["max_rate_per_endpoint"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRatePerEndpoint); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRatePerEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRatePerEndpoint"] = transformedMaxRatePerEndpoint } transformedMaxUtilization, err := expandComputeBackendServiceBackendMaxUtilization(original["max_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxUtilization"] = transformedMaxUtilization } @@ -3190,51 +3268,51 @@ func expandComputeBackendServiceBackend(v interface{}, d TerraformResourceData, return req, nil } -func expandComputeBackendServiceBackendBalancingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendBalancingMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendCapacityScaler(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendCapacityScaler(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendMaxConnections(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendMaxConnections(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendMaxRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendMaxRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendMaxRatePerInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendMaxRatePerInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendMaxRatePerEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendMaxRatePerEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceBackendMaxUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceBackendMaxUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCircuitBreakers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCircuitBreakers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3246,66 +3324,66 @@ func expandComputeBackendServiceCircuitBreakers(v interface{}, d TerraformResour transformedMaxRequestsPerConnection, err := expandComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(original["max_requests_per_connection"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRequestsPerConnection); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRequestsPerConnection); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRequestsPerConnection"] = transformedMaxRequestsPerConnection } transformedMaxConnections, err := expandComputeBackendServiceCircuitBreakersMaxConnections(original["max_connections"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConnections); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConnections); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConnections"] = transformedMaxConnections } transformedMaxPendingRequests, err := expandComputeBackendServiceCircuitBreakersMaxPendingRequests(original["max_pending_requests"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxPendingRequests); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxPendingRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxPendingRequests"] = transformedMaxPendingRequests } transformedMaxRequests, err := expandComputeBackendServiceCircuitBreakersMaxRequests(original["max_requests"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRequests); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRequests"] = transformedMaxRequests } transformedMaxRetries, err := expandComputeBackendServiceCircuitBreakersMaxRetries(original["max_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRetries"] = transformedMaxRetries } return transformed, nil } -func expandComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCircuitBreakersMaxConnections(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCircuitBreakersMaxConnections(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCircuitBreakersMaxRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCircuitBreakersMaxRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCircuitBreakersMaxRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCircuitBreakersMaxRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCompressionMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCompressionMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceConsistentHash(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHash(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3317,28 +3395,28 @@ func expandComputeBackendServiceConsistentHash(v interface{}, d TerraformResourc transformedHttpCookie, err := expandComputeBackendServiceConsistentHashHttpCookie(original["http_cookie"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpCookie); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpCookie); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpCookie"] = transformedHttpCookie } transformedHttpHeaderName, err := expandComputeBackendServiceConsistentHashHttpHeaderName(original["http_header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpHeaderName"] = transformedHttpHeaderName } transformedMinimumRingSize, err := expandComputeBackendServiceConsistentHashMinimumRingSize(original["minimum_ring_size"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinimumRingSize); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinimumRingSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minimumRingSize"] = transformedMinimumRingSize } return transformed, nil } -func expandComputeBackendServiceConsistentHashHttpCookie(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHashHttpCookie(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3350,28 +3428,28 @@ func expandComputeBackendServiceConsistentHashHttpCookie(v interface{}, d Terraf transformedTtl, err := expandComputeBackendServiceConsistentHashHttpCookieTtl(original["ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ttl"] = transformedTtl } transformedName, err := expandComputeBackendServiceConsistentHashHttpCookieName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedPath, err := expandComputeBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } return transformed, nil } -func expandComputeBackendServiceConsistentHashHttpCookieTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHashHttpCookieTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3383,45 +3461,45 @@ func expandComputeBackendServiceConsistentHashHttpCookieTtl(v interface{}, d Ter transformedSeconds, err := expandComputeBackendServiceConsistentHashHttpCookieTtlSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceConsistentHashHttpCookieName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHashHttpCookieName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceConsistentHashHttpCookiePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHashHttpCookiePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceConsistentHashHttpHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHashHttpHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceConsistentHashMinimumRingSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConsistentHashMinimumRingSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3433,35 +3511,35 @@ func expandComputeBackendServiceCdnPolicy(v interface{}, d TerraformResourceData transformedCacheKeyPolicy, err := expandComputeBackendServiceCdnPolicyCacheKeyPolicy(original["cache_key_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cacheKeyPolicy"] = transformedCacheKeyPolicy } transformedSignedUrlCacheMaxAgeSec, err := expandComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(original["signed_url_cache_max_age_sec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSignedUrlCacheMaxAgeSec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSignedUrlCacheMaxAgeSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["signedUrlCacheMaxAgeSec"] = transformedSignedUrlCacheMaxAgeSec } transformedDefaultTtl, err := expandComputeBackendServiceCdnPolicyDefaultTtl(original["default_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultTtl"] = transformedDefaultTtl } transformedMaxTtl, err := expandComputeBackendServiceCdnPolicyMaxTtl(original["max_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxTtl"] = transformedMaxTtl } transformedClientTtl, err := expandComputeBackendServiceCdnPolicyClientTtl(original["client_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClientTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClientTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["clientTtl"] = transformedClientTtl } @@ -3475,14 +3553,14 @@ func expandComputeBackendServiceCdnPolicy(v interface{}, d TerraformResourceData transformedNegativeCachingPolicy, err := expandComputeBackendServiceCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy } transformedCacheMode, err := expandComputeBackendServiceCdnPolicyCacheMode(original["cache_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCacheMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCacheMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cacheMode"] = transformedCacheMode } @@ -3493,10 +3571,17 @@ func expandComputeBackendServiceCdnPolicy(v interface{}, d TerraformResourceData transformed["serveWhileStale"] = transformedServeWhileStale } + transformedBypassCacheOnRequestHeaders, err := expandComputeBackendServiceCdnPolicyBypassCacheOnRequestHeaders(original["bypass_cache_on_request_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBypassCacheOnRequestHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bypassCacheOnRequestHeaders"] = transformedBypassCacheOnRequestHeaders + } + return transformed, nil } -func expandComputeBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3557,57 +3642,57 @@ func expandComputeBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d Terrafo return transformed, nil } -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyDefaultTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyDefaultTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyMaxTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyMaxTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyClientTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyClientTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyNegativeCaching(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyNegativeCaching(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3620,7 +3705,7 @@ func expandComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d transformedCode, err := expandComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(original["code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["code"] = transformedCode } @@ -3636,66 +3721,92 @@ func expandComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d return req, nil } -func expandComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyCacheMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyCacheMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyServeWhileStale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCdnPolicyServeWhileStale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCdnPolicyBypassCacheOnRequestHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHeaderName, err := expandComputeBackendServiceCdnPolicyBypassCacheOnRequestHeadersHeaderName(original["header_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["headerName"] = transformedHeaderName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeBackendServiceCdnPolicyBypassCacheOnRequestHeadersHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceConnectionDraining(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConnectionDraining(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { transformed := make(map[string]interface{}) transformedConnectionDrainingTimeoutSec, err := expandComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(d.Get("connection_draining_timeout_sec"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConnectionDrainingTimeoutSec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConnectionDrainingTimeoutSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["drainingTimeoutSec"] = transformedConnectionDrainingTimeoutSec } return transformed, nil } -func expandComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceCustomRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCustomRequestHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeBackendServiceCustomResponseHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceCustomResponseHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeBackendServiceFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceEnableCDN(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceEnableCDN(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceHealthChecks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceHealthChecks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeBackendServiceIap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceIap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3707,7 +3818,7 @@ func expandComputeBackendServiceIap(v interface{}, d TerraformResourceData, conf transformedOauth2ClientId, err := expandComputeBackendServiceIapOauth2ClientId(original["oauth2_client_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOauth2ClientId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOauth2ClientId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oauth2ClientId"] = transformedOauth2ClientId } @@ -3721,34 +3832,34 @@ func expandComputeBackendServiceIap(v interface{}, d TerraformResourceData, conf transformedOauth2ClientSecretSha256, err := expandComputeBackendServiceIapOauth2ClientSecretSha256(original["oauth2_client_secret_sha256"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOauth2ClientSecretSha256); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOauth2ClientSecretSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oauth2ClientSecretSha256"] = transformedOauth2ClientSecretSha256 } return transformed, nil } -func expandComputeBackendServiceIapOauth2ClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceIapOauth2ClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceIapOauth2ClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceIapOauth2ClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceIapOauth2ClientSecretSha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceIapOauth2ClientSecretSha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceLoadBalancingScheme(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLoadBalancingScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceLocalityLbPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLocalityLbPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceLocalityLbPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLocalityLbPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3761,14 +3872,14 @@ func expandComputeBackendServiceLocalityLbPolicies(v interface{}, d TerraformRes transformedPolicy, err := expandComputeBackendServiceLocalityLbPoliciesPolicy(original["policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["policy"] = transformedPolicy } transformedCustomPolicy, err := expandComputeBackendServiceLocalityLbPoliciesCustomPolicy(original["custom_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCustomPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCustomPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["customPolicy"] = transformedCustomPolicy } @@ -3777,7 +3888,7 @@ func expandComputeBackendServiceLocalityLbPolicies(v interface{}, d TerraformRes return req, nil } -func expandComputeBackendServiceLocalityLbPoliciesPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLocalityLbPoliciesPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3789,18 +3900,18 @@ func expandComputeBackendServiceLocalityLbPoliciesPolicy(v interface{}, d Terraf transformedName, err := expandComputeBackendServiceLocalityLbPoliciesPolicyName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } return transformed, nil } -func expandComputeBackendServiceLocalityLbPoliciesPolicyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLocalityLbPoliciesPolicyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceLocalityLbPoliciesCustomPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLocalityLbPoliciesCustomPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3812,33 +3923,33 @@ func expandComputeBackendServiceLocalityLbPoliciesCustomPolicy(v interface{}, d transformedName, err := expandComputeBackendServiceLocalityLbPoliciesCustomPolicyName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedData, err := expandComputeBackendServiceLocalityLbPoliciesCustomPolicyData(original["data"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedData); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedData); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["data"] = transformedData } return transformed, nil } -func expandComputeBackendServiceLocalityLbPoliciesCustomPolicyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLocalityLbPoliciesCustomPolicyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceLocalityLbPoliciesCustomPolicyData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLocalityLbPoliciesCustomPolicyData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3850,84 +3961,84 @@ func expandComputeBackendServiceOutlierDetection(v interface{}, d TerraformResou transformedBaseEjectionTime, err := expandComputeBackendServiceOutlierDetectionBaseEjectionTime(original["base_ejection_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBaseEjectionTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBaseEjectionTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["baseEjectionTime"] = transformedBaseEjectionTime } transformedConsecutiveErrors, err := expandComputeBackendServiceOutlierDetectionConsecutiveErrors(original["consecutive_errors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConsecutiveErrors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConsecutiveErrors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["consecutiveErrors"] = transformedConsecutiveErrors } transformedConsecutiveGatewayFailure, err := expandComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(original["consecutive_gateway_failure"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConsecutiveGatewayFailure); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConsecutiveGatewayFailure); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["consecutiveGatewayFailure"] = transformedConsecutiveGatewayFailure } transformedEnforcingConsecutiveErrors, err := expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(original["enforcing_consecutive_errors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnforcingConsecutiveErrors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnforcingConsecutiveErrors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enforcingConsecutiveErrors"] = transformedEnforcingConsecutiveErrors } transformedEnforcingConsecutiveGatewayFailure, err := expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(original["enforcing_consecutive_gateway_failure"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnforcingConsecutiveGatewayFailure); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnforcingConsecutiveGatewayFailure); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enforcingConsecutiveGatewayFailure"] = transformedEnforcingConsecutiveGatewayFailure } transformedEnforcingSuccessRate, err := expandComputeBackendServiceOutlierDetectionEnforcingSuccessRate(original["enforcing_success_rate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnforcingSuccessRate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnforcingSuccessRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enforcingSuccessRate"] = transformedEnforcingSuccessRate } transformedInterval, err := expandComputeBackendServiceOutlierDetectionInterval(original["interval"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInterval); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["interval"] = transformedInterval } transformedMaxEjectionPercent, err := expandComputeBackendServiceOutlierDetectionMaxEjectionPercent(original["max_ejection_percent"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxEjectionPercent); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxEjectionPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxEjectionPercent"] = transformedMaxEjectionPercent } transformedSuccessRateMinimumHosts, err := expandComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(original["success_rate_minimum_hosts"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuccessRateMinimumHosts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuccessRateMinimumHosts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["successRateMinimumHosts"] = transformedSuccessRateMinimumHosts } transformedSuccessRateRequestVolume, err := expandComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(original["success_rate_request_volume"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuccessRateRequestVolume); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuccessRateRequestVolume); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["successRateRequestVolume"] = transformedSuccessRateRequestVolume } transformedSuccessRateStdevFactor, err := expandComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(original["success_rate_stdev_factor"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuccessRateStdevFactor); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuccessRateStdevFactor); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["successRateStdevFactor"] = transformedSuccessRateStdevFactor } return transformed, nil } -func expandComputeBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3939,49 +4050,49 @@ func expandComputeBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, transformedSeconds, err := expandComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3993,61 +4104,61 @@ func expandComputeBackendServiceOutlierDetectionInterval(v interface{}, d Terraf transformedSeconds, err := expandComputeBackendServiceOutlierDetectionIntervalSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionIntervalNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionIntervalNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServicePortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServicePortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceSecurityPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceSecurityPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceEdgeSecurityPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceEdgeSecurityPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceSecuritySettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceSecuritySettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4059,41 +4170,41 @@ func expandComputeBackendServiceSecuritySettings(v interface{}, d TerraformResou transformedClientTlsPolicy, err := expandComputeBackendServiceSecuritySettingsClientTlsPolicy(original["client_tls_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClientTlsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClientTlsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["clientTlsPolicy"] = transformedClientTlsPolicy } transformedSubjectAltNames, err := expandComputeBackendServiceSecuritySettingsSubjectAltNames(original["subject_alt_names"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubjectAltNames); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubjectAltNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subjectAltNames"] = transformedSubjectAltNames } return transformed, nil } -func expandComputeBackendServiceSecuritySettingsClientTlsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) +func expandComputeBackendServiceSecuritySettingsClientTlsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for client_tls_policy: %s", err) } return f.RelativeLink(), nil } -func expandComputeBackendServiceSecuritySettingsSubjectAltNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceSecuritySettingsSubjectAltNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceSessionAffinity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceSessionAffinity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLogConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4112,18 +4223,18 @@ func expandComputeBackendServiceLogConfig(v interface{}, d TerraformResourceData transformedSampleRate, err := expandComputeBackendServiceLogConfigSampleRate(original["sample_rate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSampleRate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSampleRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sampleRate"] = transformedSampleRate } return transformed, nil } -func expandComputeBackendServiceLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLogConfigEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeBackendServiceLogConfigSampleRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeBackendServiceLogConfigSampleRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_signed_url_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_signed_url_key.go new file mode 100644 index 0000000000..332f95acc6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_signed_url_key.go @@ -0,0 +1,364 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeBackendServiceSignedUrlKey() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeBackendServiceSignedUrlKeyCreate, + Read: resourceComputeBackendServiceSignedUrlKeyRead, + Delete: resourceComputeBackendServiceSignedUrlKeyDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backend_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The backend service this signed URL key belongs.`, + }, + "key_value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `128-bit key value used for signing the URL. The key value must be a +valid RFC 4648 Section 5 base64url encoded string.`, + Sensitive: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), + Description: `Name of the signed URL key.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeBackendServiceSignedUrlKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + keyNameProp, err := expandNestedComputeBackendServiceSignedUrlKeyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyNameProp)) && (ok || !reflect.DeepEqual(v, keyNameProp)) { + obj["keyName"] = keyNameProp + } + keyValueProp, err := expandNestedComputeBackendServiceSignedUrlKeyKeyValue(d.Get("key_value"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key_value"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyValueProp)) && (ok || !reflect.DeepEqual(v, keyValueProp)) { + obj["keyValue"] = keyValueProp + } + backendServiceProp, err := expandNestedComputeBackendServiceSignedUrlKeyBackendService(d.Get("backend_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(backendServiceProp)) && (ok || !reflect.DeepEqual(v, backendServiceProp)) { + obj["backendService"] = backendServiceProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "signedUrlKey/{{project}}/backendServices/{{backend_service}}/") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}/addSignedUrlKey") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new BackendServiceSignedUrlKey: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating BackendServiceSignedUrlKey: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/backendServices/{{backend_service}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating BackendServiceSignedUrlKey", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create BackendServiceSignedUrlKey: %s", err) + } + + log.Printf("[DEBUG] Finished creating BackendServiceSignedUrlKey %q: %#v", d.Id(), res) + + return resourceComputeBackendServiceSignedUrlKeyRead(d, meta) +} + +func resourceComputeBackendServiceSignedUrlKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeBackendServiceSignedUrlKey %q", d.Id())) + } + + res, err = flattenNestedComputeBackendServiceSignedUrlKey(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeBackendServiceSignedUrlKey because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading BackendServiceSignedUrlKey: %s", err) + } + + if err := d.Set("name", flattenNestedComputeBackendServiceSignedUrlKeyName(res["keyName"], d, config)); err != nil { + return fmt.Errorf("Error reading BackendServiceSignedUrlKey: %s", err) + } + + return nil +} + +func resourceComputeBackendServiceSignedUrlKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "signedUrlKey/{{project}}/backendServices/{{backend_service}}/") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}/deleteSignedUrlKey?keyName={{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting BackendServiceSignedUrlKey %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BackendServiceSignedUrlKey") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting BackendServiceSignedUrlKey", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting BackendServiceSignedUrlKey %q: %#v", d.Id(), res) + return nil +} + +func flattenNestedComputeBackendServiceSignedUrlKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeBackendServiceSignedUrlKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeBackendServiceSignedUrlKeyKeyValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeBackendServiceSignedUrlKeyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for backend_service: %s", err) + } + return f.RelativeLink(), nil +} + +func flattenNestedComputeBackendServiceSignedUrlKey(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["cdnPolicy"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + + v, ok = res["signedUrlKeyNames"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value cdnPolicy.signedUrlKeyNames. Actual value: %v", v) + } + + _, item, err := resourceComputeBackendServiceSignedUrlKeyFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeBackendServiceSignedUrlKeyFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputeBackendServiceSignedUrlKeyName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeBackendServiceSignedUrlKeyName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + // List response only contains the ID - construct a response object. + item := map[string]interface{}{ + "keyName": itemRaw, + } + + itemName := flattenNestedComputeBackendServiceSignedUrlKeyName(item["keyName"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with keyName= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_signed_url_key_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_signed_url_key_sweeper.go new file mode 100644 index 0000000000..9187491181 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_signed_url_key_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeBackendServiceSignedUrlKey", testSweepComputeBackendServiceSignedUrlKey) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeBackendServiceSignedUrlKey(region string) error { + resourceName := "ComputeBackendServiceSignedUrlKey" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/backendServices/{{backend_service}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["cdnPolicy"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/backendServices/{{backend_service}}/deleteSignedUrlKey?keyName={{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_sweeper.go new file mode 100644 index 0000000000..38615bf0b2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_backend_service_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeBackendService", testSweepComputeBackendService) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeBackendService(region string) error { + resourceName := "ComputeBackendService" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/backendServices", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/backendServices/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk.go new file mode 100644 index 0000000000..f36451f2f0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk.go @@ -0,0 +1,2127 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// diffsupress for hyperdisk provisioned_iops +func hyperDiskIopsUpdateDiffSupress(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + if !strings.Contains(d.Get("type").(string), "hyperdisk") { + resourceSchema := ResourceComputeDisk().Schema + for field := range resourceSchema { + if field == "provisioned_iops" && d.HasChange(field) { + if err := d.ForceNew(field); err != nil { + return err + } + } + } + } + + return nil +} + +// diffsupress for beta and to check change in source_disk attribute +func sourceDiskDiffSupress(_, old, new string, _ *schema.ResourceData) bool { + s1 := strings.TrimPrefix(old, "https://www.googleapis.com/compute/beta") + s2 := strings.TrimPrefix(new, "https://www.googleapis.com/compute/v1") + if strings.HasSuffix(s1, s2) { + return true + } + return false +} + +// Is the new disk size smaller than the old one? +func IsDiskShrinkage(_ context.Context, old, new, _ interface{}) bool { + // It's okay to remove size entirely. + if old == nil || new == nil { + return false + } + return new.(int) < old.(int) +} + +// We cannot suppress the diff for the case when family name is not part of the image name since we can't +// make a network call in a DiffSuppressFunc. +func DiskImageDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + // Understand that this function solves a messy problem ("how do we tell if the diff between two images + // is 'ForceNew-worthy', without making a network call?") in the best way we can: through a series of special + // cases and regexes. If you find yourself here because you are trying to add a new special case, + // you are probably looking for the diskImageFamilyEquals function and its subfunctions. + // In order to keep this maintainable, we need to ensure that the positive and negative examples + // in resource_compute_disk_test.go are as complete as possible. + + // 'old' is read from the API. + // It always has the format 'https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)' + matches := resolveImageLink.FindStringSubmatch(old) + if matches == nil { + // Image read from the API doesn't have the expected format. In practice, it should never happen + return false + } + oldProject := matches[1] + oldName := matches[2] + + // Partial or full self link family + if resolveImageProjectFamily.MatchString(new) { + // Value matches pattern "projects/{project}/global/images/family/{family-name}$" + matches := resolveImageProjectFamily.FindStringSubmatch(new) + newProject := matches[1] + newFamilyName := matches[2] + + return diskImageProjectNameEquals(oldProject, newProject) && diskImageFamilyEquals(oldName, newFamilyName) + } + + // Partial or full self link image + if resolveImageProjectImage.MatchString(new) { + // Value matches pattern "projects/{project}/global/images/{image-name}$" + matches := resolveImageProjectImage.FindStringSubmatch(new) + newProject := matches[1] + newImageName := matches[2] + + return diskImageProjectNameEquals(oldProject, newProject) && diskImageEquals(oldName, newImageName) + } + + // Partial link without project family + if resolveImageGlobalFamily.MatchString(new) { + // Value is "global/images/family/{family-name}" + matches := resolveImageGlobalFamily.FindStringSubmatch(new) + familyName := matches[1] + + return diskImageFamilyEquals(oldName, familyName) + } + + // Partial link without project image + if resolveImageGlobalImage.MatchString(new) { + // Value is "global/images/{image-name}" + matches := resolveImageGlobalImage.FindStringSubmatch(new) + imageName := matches[1] + + return diskImageEquals(oldName, imageName) + } + + // Family shorthand + if resolveImageFamilyFamily.MatchString(new) { + // Value is "family/{family-name}" + matches := resolveImageFamilyFamily.FindStringSubmatch(new) + familyName := matches[1] + + return diskImageFamilyEquals(oldName, familyName) + } + + // Shorthand for image or family + if resolveImageProjectImageShorthand.MatchString(new) { + // Value is "{project}/{image-name}" or "{project}/{family-name}" + matches := resolveImageProjectImageShorthand.FindStringSubmatch(new) + newProject := matches[1] + newName := matches[2] + + return diskImageProjectNameEquals(oldProject, newProject) && + (diskImageEquals(oldName, newName) || diskImageFamilyEquals(oldName, newName)) + } + + // Image or family only + if diskImageEquals(oldName, new) || diskImageFamilyEquals(oldName, new) { + // Value is "{image-name}" or "{family-name}" + return true + } + + return false +} + +func diskImageProjectNameEquals(project1, project2 string) bool { + // Convert short project name to full name + // For instance, centos => centos-cloud + fullProjectName, ok := ImageMap[project2] + if ok { + project2 = fullProjectName + } + + return project1 == project2 +} + +func diskImageEquals(oldImageName, newImageName string) bool { + return oldImageName == newImageName +} + +func diskImageFamilyEquals(imageName, familyName string) bool { + // Handles the case when the image name includes the family name + // e.g. image name: debian-11-bullseye-v20220719, family name: debian-11 + + // First condition is to check if image contains arm64 because of case like: + // image name: opensuse-leap-15-4-v20220713-arm64, family name: opensuse-leap (should not be evaluated during handling of amd64 cases) + // In second condition, we have to check for amd64 because of cases like: + // image name: ubuntu-2210-kinetic-amd64-v20221022, family name: ubuntu-2210 (should not suppress) + if !strings.Contains(imageName, "-arm64") && strings.Contains(imageName, strings.TrimSuffix(familyName, "-amd64")) { + if strings.Contains(imageName, "-amd64") { + return strings.HasSuffix(familyName, "-amd64") + } else { + return !strings.HasSuffix(familyName, "-amd64") + } + } + + // We have to check for arm64 because of cases like: + // image name: opensuse-leap-15-4-v20220713-arm64, family name: opensuse-leap (should not suppress) + if strings.Contains(imageName, strings.TrimSuffix(familyName, "-arm64")) { + if strings.Contains(imageName, "-arm64") { + return strings.HasSuffix(familyName, "-arm64") + } else { + return !strings.HasSuffix(familyName, "-arm64") + } + } + + if suppressCanonicalFamilyDiff(imageName, familyName) { + return true + } + + if suppressCosFamilyDiff(imageName, familyName) { + return true + } + + if suppressWindowsSqlFamilyDiff(imageName, familyName) { + return true + } + + if suppressWindowsFamilyDiff(imageName, familyName) { + return true + } + + return false +} + +// e.g. image: ubuntu-1404-trusty-v20180122, family: ubuntu-1404-lts +func suppressCanonicalFamilyDiff(imageName, familyName string) bool { + parts := canonicalUbuntuLtsImage.FindStringSubmatch(imageName) + if len(parts) == 4 { + var f string + if parts[3] == "" { + f = fmt.Sprintf("ubuntu-%s%s-lts", parts[1], parts[2]) + } else { + f = fmt.Sprintf("ubuntu-%s%s-lts-%s", parts[1], parts[2], parts[3]) + } + if f == familyName { + return true + } + } + + return false +} + +// e.g. image: cos-NN-*, family: cos-NN-lts +func suppressCosFamilyDiff(imageName, familyName string) bool { + parts := cosLtsImage.FindStringSubmatch(imageName) + if len(parts) == 2 { + f := fmt.Sprintf("cos-%s-lts", parts[1]) + if f == familyName { + return true + } + } + + return false +} + +// e.g. image: sql-2017-standard-windows-2016-dc-v20180109, family: sql-std-2017-win-2016 +// e.g. image: sql-2017-express-windows-2012-r2-dc-v20180109, family: sql-exp-2017-win-2012-r2 +func suppressWindowsSqlFamilyDiff(imageName, familyName string) bool { + parts := windowsSqlImage.FindStringSubmatch(imageName) + if len(parts) == 5 { + edition := parts[2] // enterprise, standard or web. + sqlVersion := parts[1] + windowsVersion := parts[3] + + // Translate edition + switch edition { + case "enterprise": + edition = "ent" + case "standard": + edition = "std" + case "express": + edition = "exp" + } + + var f string + if revision := parts[4]; revision != "" { + // With revision + f = fmt.Sprintf("sql-%s-%s-win-%s-r%s", edition, sqlVersion, windowsVersion, revision) + } else { + // No revision + f = fmt.Sprintf("sql-%s-%s-win-%s", edition, sqlVersion, windowsVersion) + } + + if f == familyName { + return true + } + } + + return false +} + +// e.g. image: windows-server-1709-dc-core-v20180109, family: windows-1709-core +// e.g. image: windows-server-1709-dc-core-for-containers-v20180109, family: "windows-1709-core-for-containers +func suppressWindowsFamilyDiff(imageName, familyName string) bool { + updatedFamilyString := strings.Replace(familyName, "windows-", "windows-server-", 1) + updatedImageName := strings.Replace(imageName, "-dc-", "-", 1) + + return strings.Contains(updatedImageName, updatedFamilyString) +} + +func ResourceComputeDisk() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeDiskCreate, + Read: resourceComputeDiskRead, + Update: resourceComputeDiskUpdate, + Delete: resourceComputeDiskDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeDiskImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("size", IsDiskShrinkage), + hyperDiskIopsUpdateDiffSupress, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "async_primary_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Primary disk for asynchronous disk replication.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "disk_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Encrypts the disk using a customer-supplied encryption key. + +After you encrypt a disk with a customer-supplied key, you must +provide the same key if you use the disk later (e.g. to create a disk +snapshot or an image, or to attach the disk to a virtual machine). + +Customer-supplied encryption keys do not protect access to metadata of +the disk. + +If you do not provide an encryption key when creating the disk, then +the disk will be encrypted using an automatically generated key and +you do not need to provide a key to use the disk later.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_self_link": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName +in the cloud console. Your project's Compute Engine System service account +('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have +'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. +See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, + }, + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account used for the encryption request for the given KMS key. +If absent, the Compute Engine Service Agent service account is used.`, + }, + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a 256-bit customer-supplied encryption key, encoded in +RFC 4648 base64 to either encrypt or decrypt this resource.`, + Sensitive: true, + }, + "rsa_encrypted_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit +customer-supplied encryption key to either encrypt or decrypt +this resource. You can provide either the rawKey or the rsaEncryptedKey.`, + Sensitive: true, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied +encryption key that protects this resource.`, + }, + }, + }, + }, + "guest_os_features": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A list of features to enable on the guest operating system. +Applicable only for bootable disks.`, + Elem: computeDiskGuestOsFeaturesSchema(), + // Default schema.HashSchema is used. + }, + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: DiskImageDiffSuppress, + Description: `The image from which to initialize this disk. This can be +one of: the image's 'self_link', 'projects/{project}/global/images/{image}', +'projects/{project}/global/images/family/{family}', 'global/images/{image}', +'global/images/family/{family}', 'family/{family}', '{project}/{family}', +'{project}/{image}', '{family}', or '{image}'. If referred by family, the +images names must include the family name. If they don't, use the +[google_compute_image data source](/docs/providers/google/d/compute_image.html). +For instance, the image 'centos-6-v20180104' includes its family name 'centos-6'. +These images can be referred by family name here.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this disk. A list of key->value pairs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "licenses": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Any applicable license URI.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "physical_block_size_bytes": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Physical block size of the persistent disk, in bytes. If not present +in a request, a default value is used. Currently supported sizes +are 4096 and 16384, other sizes may be added in the future. +If an unsupported value is requested, the error message will list +the supported values for the caller's project.`, + }, + "provisioned_iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Indicates how many IOPS must be provisioned for the disk. +Note: Updating currently is only supported by hyperdisk skus without the need to delete and recreate the disk, hyperdisk +allows for an update of IOPS every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it`, + }, + "provisioned_throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Indicates how much Throughput must be provisioned for the disk. +Note: Updating currently is only supported by hyperdisk skus without the need to delete and recreate the disk, hyperdisk +allows for an update of Throughput every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it`, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Size of the persistent disk, specified in GB. You can specify this +field when creating a persistent disk using the 'image' or +'snapshot' parameter, or specify it alone to create an empty +persistent disk. + +If you specify this field along with 'image' or 'snapshot', +the value must not be less than the size of the image +or the size of the snapshot. + +~>**NOTE** If you change the size, Terraform updates the disk size +if upsizing is detected but recreates the disk if downsizing is requested. +You can add 'lifecycle.prevent_destroy' in the config to prevent destroying +and recreating.`, + }, + "snapshot": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The source snapshot used to create this disk. You can provide this as +a partial or full URL to the resource. If the snapshot is in another +project than this disk, you must supply a full URL. For example, the +following are valid values: + +* 'https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot' +* 'projects/project/global/snapshots/snapshot' +* 'global/snapshots/snapshot' +* 'snapshot'`, + }, + "source_disk": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: sourceDiskDiffSupress, + Description: `The source disk used to create this disk. You can provide this as a partial or full URL to the resource. +For example, the following are valid values: + +* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk} +* https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk} +* projects/{project}/zones/{zone}/disks/{disk} +* projects/{project}/regions/{region}/disks/{disk} +* zones/{zone}/disks/{disk} +* regions/{region}/disks/{disk}`, + }, + "source_image_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The customer-supplied encryption key of the source image. Required if +the source image is protected by a customer-supplied encryption key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_self_link": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName +in the cloud console. Your project's Compute Engine System service account +('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have +'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. +See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, + }, + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account used for the encryption request for the given KMS key. +If absent, the Compute Engine Service Agent service account is used.`, + }, + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a 256-bit customer-supplied encryption key, encoded in +RFC 4648 base64 to either encrypt or decrypt this resource.`, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied +encryption key that protects this resource.`, + }, + }, + }, + }, + "source_snapshot_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The customer-supplied encryption key of the source snapshot. Required +if the source snapshot is protected by a customer-supplied encryption +key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_self_link": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName +in the cloud console. Your project's Compute Engine System service account +('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have +'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. +See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, + }, + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account used for the encryption request for the given KMS key. +If absent, the Compute Engine Service Agent service account is used.`, + }, + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a 256-bit customer-supplied encryption key, encoded in +RFC 4648 base64 to either encrypt or decrypt this resource.`, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied +encryption key that protects this resource.`, + }, + }, + }, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the disk type resource describing which disk type to use to +create the disk. Provide this when creating the disk.`, + Default: "pd-standard", + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the zone where the disk resides.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint used for optimistic locking of this resource. Used +internally during updates.`, + }, + "last_attach_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Last attach timestamp in RFC3339 text format.`, + }, + "last_detach_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Last detach timestamp in RFC3339 text format.`, + }, + "source_disk_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID value of the disk used to create this image. This value may +be used to determine whether the image was taken from the current +or a previous instance of a given disk name.`, + }, + "source_image_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID value of the image used to create this disk. This value +identifies the exact image that was used to create this persistent +disk. For example, if you created the persistent disk from an image +that was later deleted and recreated under the same name, the source +image ID would identify the exact version of the image that was used.`, + }, + "source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique ID of the snapshot used to create this disk. This value +identifies the exact snapshot that was used to create this persistent +disk. For example, if you created the persistent disk from a snapshot +that was later deleted and recreated under the same name, the source +snapshot ID would identify the exact version of the snapshot that was +used.`, + }, + "users": { + Type: schema.TypeList, + Computed: true, + Description: `Links to the users of the disk (attached instances) in form: +project/zones/zone/instances/instance`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func computeDiskGuestOsFeaturesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC", "SEV_LIVE_MIGRATABLE", "SEV_SNP_CAPABLE", "SUSPEND_RESUME_COMPATIBLE", "TDX_CAPABLE"}), + Description: `The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. Possible values: ["MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC", "SEV_LIVE_MIGRATABLE", "SEV_SNP_CAPABLE", "SUSPEND_RESUME_COMPATIBLE", "TDX_CAPABLE"]`, + }, + }, + } +} + +func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelFingerprintProp, err := expandComputeDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + descriptionProp, err := expandComputeDiskDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + nameProp, err := expandComputeDiskName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("size"); !tpgresource.IsEmptyValue(reflect.ValueOf(sizeGbProp)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) { + obj["sizeGb"] = sizeGbProp + } + physicalBlockSizeBytesProp, err := expandComputeDiskPhysicalBlockSizeBytes(d.Get("physical_block_size_bytes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("physical_block_size_bytes"); !tpgresource.IsEmptyValue(reflect.ValueOf(physicalBlockSizeBytesProp)) && (ok || !reflect.DeepEqual(v, physicalBlockSizeBytesProp)) { + obj["physicalBlockSizeBytes"] = physicalBlockSizeBytesProp + } + sourceDiskProp, err := expandComputeDiskSourceDisk(d.Get("source_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_disk"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { + obj["sourceDisk"] = sourceDiskProp + } + typeProp, err := expandComputeDiskType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + sourceImageProp, err := expandComputeDiskImage(d.Get("image"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("image"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceImageProp)) && (ok || !reflect.DeepEqual(v, sourceImageProp)) { + obj["sourceImage"] = sourceImageProp + } + provisionedIopsProp, err := expandComputeDiskProvisionedIops(d.Get("provisioned_iops"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("provisioned_iops"); !tpgresource.IsEmptyValue(reflect.ValueOf(provisionedIopsProp)) && (ok || !reflect.DeepEqual(v, provisionedIopsProp)) { + obj["provisionedIops"] = provisionedIopsProp + } + provisionedThroughputProp, err := expandComputeDiskProvisionedThroughput(d.Get("provisioned_throughput"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("provisioned_throughput"); !tpgresource.IsEmptyValue(reflect.ValueOf(provisionedThroughputProp)) && (ok || !reflect.DeepEqual(v, provisionedThroughputProp)) { + obj["provisionedThroughput"] = provisionedThroughputProp + } + asyncPrimaryDiskProp, err := expandComputeDiskAsyncPrimaryDisk(d.Get("async_primary_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("async_primary_disk"); !tpgresource.IsEmptyValue(reflect.ValueOf(asyncPrimaryDiskProp)) && (ok || !reflect.DeepEqual(v, asyncPrimaryDiskProp)) { + obj["asyncPrimaryDisk"] = asyncPrimaryDiskProp + } + guestOsFeaturesProp, err := expandComputeDiskGuestOsFeatures(d.Get("guest_os_features"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("guest_os_features"); !tpgresource.IsEmptyValue(reflect.ValueOf(guestOsFeaturesProp)) && (ok || !reflect.DeepEqual(v, guestOsFeaturesProp)) { + obj["guestOsFeatures"] = guestOsFeaturesProp + } + licensesProp, err := expandComputeDiskLicenses(d.Get("licenses"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("licenses"); !tpgresource.IsEmptyValue(reflect.ValueOf(licensesProp)) && (ok || !reflect.DeepEqual(v, licensesProp)) { + obj["licenses"] = licensesProp + } + zoneProp, err := expandComputeDiskZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + sourceImageEncryptionKeyProp, err := expandComputeDiskSourceImageEncryptionKey(d.Get("source_image_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_image_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceImageEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceImageEncryptionKeyProp)) { + obj["sourceImageEncryptionKey"] = sourceImageEncryptionKeyProp + } + diskEncryptionKeyProp, err := expandComputeDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disk_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyProp)) { + obj["diskEncryptionKey"] = diskEncryptionKeyProp + } + sourceSnapshotProp, err := expandComputeDiskSnapshot(d.Get("snapshot"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("snapshot"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceSnapshotProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) { + obj["sourceSnapshot"] = sourceSnapshotProp + } + sourceSnapshotEncryptionKeyProp, err := expandComputeDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_snapshot_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceSnapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotEncryptionKeyProp)) { + obj["sourceSnapshotEncryptionKey"] = sourceSnapshotEncryptionKeyProp + } + + obj, err = resourceComputeDiskEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Disk: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Disk: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Disk: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Disk", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Disk: %s", err) + } + + log.Printf("[DEBUG] Finished creating Disk %q: %#v", d.Id(), res) + + return resourceComputeDiskRead(d, meta) +} + +func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Disk: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id())) + } + + res, err = resourceComputeDiskDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeDisk because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + + if err := d.Set("label_fingerprint", flattenComputeDiskLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeDiskCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("description", flattenComputeDiskDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("last_attach_timestamp", flattenComputeDiskLastAttachTimestamp(res["lastAttachTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("last_detach_timestamp", flattenComputeDiskLastDetachTimestamp(res["lastDetachTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("labels", flattenComputeDiskLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("name", flattenComputeDiskName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("size", flattenComputeDiskSize(res["sizeGb"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("users", flattenComputeDiskUsers(res["users"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("physical_block_size_bytes", flattenComputeDiskPhysicalBlockSizeBytes(res["physicalBlockSizeBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_disk", flattenComputeDiskSourceDisk(res["sourceDisk"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_disk_id", flattenComputeDiskSourceDiskId(res["sourceDiskId"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("type", flattenComputeDiskType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("image", flattenComputeDiskImage(res["sourceImage"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("provisioned_iops", flattenComputeDiskProvisionedIops(res["provisionedIops"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("provisioned_throughput", flattenComputeDiskProvisionedThroughput(res["provisionedThroughput"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("async_primary_disk", flattenComputeDiskAsyncPrimaryDisk(res["asyncPrimaryDisk"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("guest_os_features", flattenComputeDiskGuestOsFeatures(res["guestOsFeatures"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("licenses", flattenComputeDiskLicenses(res["licenses"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("zone", flattenComputeDiskZone(res["zone"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_image_encryption_key", flattenComputeDiskSourceImageEncryptionKey(res["sourceImageEncryptionKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_image_id", flattenComputeDiskSourceImageId(res["sourceImageId"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("disk_encryption_key", flattenComputeDiskDiskEncryptionKey(res["diskEncryptionKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("snapshot", flattenComputeDiskSnapshot(res["sourceSnapshot"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_snapshot_encryption_key", flattenComputeDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("source_snapshot_id", flattenComputeDiskSourceSnapshotId(res["sourceSnapshotId"], d, config)); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Disk: %s", err) + } + + return nil +} + +func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Disk: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("label_fingerprint") || d.HasChange("labels") { + obj := make(map[string]interface{}) + + labelFingerprintProp, err := expandComputeDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + obj, err = resourceComputeDiskUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}/setLabels") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Disk", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("size") { + obj := make(map[string]interface{}) + + sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("size"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) { + obj["sizeGb"] = sizeGbProp + } + + obj, err = resourceComputeDiskUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}/resize") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Disk", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("provisioned_iops") { + obj := make(map[string]interface{}) + + provisionedIopsProp, err := expandComputeDiskProvisionedIops(d.Get("provisioned_iops"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("provisioned_iops"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, provisionedIopsProp)) { + obj["provisionedIops"] = provisionedIopsProp + } + + obj, err = resourceComputeDiskUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}?paths=provisionedIops") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Disk", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("provisioned_throughput") { + obj := make(map[string]interface{}) + + provisionedThroughputProp, err := expandComputeDiskProvisionedThroughput(d.Get("provisioned_throughput"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("provisioned_throughput"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, provisionedThroughputProp)) { + obj["provisionedThroughput"] = provisionedThroughputProp + } + + obj, err = resourceComputeDiskUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}?paths=provisionedThroughput") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Disk", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeDiskRead(d, meta) +} + +func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Disk: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + readRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id())) + } + + // if disks are attached to instances, they must be detached before the disk can be deleted + if v, ok := readRes["users"].([]interface{}); ok { + type detachArgs struct{ project, zone, instance, deviceName string } + var detachCalls []detachArgs + + for _, instance := range tpgresource.ConvertStringArr(v) { + self := d.Get("self_link").(string) + instanceProject, instanceZone, instanceName, err := tpgresource.GetLocationalResourcePropertiesFromSelfLinkString(instance) + if err != nil { + return err + } + + i, err := config.NewComputeClient(userAgent).Instances.Get(instanceProject, instanceZone, instanceName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance) + continue + } + return fmt.Errorf("Error retrieving instance %s: %s", instance, err.Error()) + } + for _, disk := range i.Disks { + if tpgresource.CompareSelfLinkOrResourceName("", disk.Source, self, nil) { + detachCalls = append(detachCalls, detachArgs{ + project: instanceProject, + zone: tpgresource.GetResourceNameFromSelfLink(i.Zone), + instance: i.Name, + deviceName: disk.DeviceName, + }) + } + } + } + + for _, call := range detachCalls { + op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do() + if err != nil { + return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project, + call.zone, call.instance, err.Error()) + } + err = ComputeOperationWaitTime(config, op, call.project, + fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance), userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { + log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance) + continue + } + return err + } + } + } + log.Printf("[DEBUG] Deleting Disk %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Disk") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Disk", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Disk %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeDiskLabelFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskLastAttachTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskLastDetachTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeDiskUsers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeDiskPhysicalBlockSizeBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeDiskSourceDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceDiskId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenComputeDiskImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskProvisionedIops(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeDiskProvisionedThroughput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeDiskAsyncPrimaryDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["disk"] = + flattenComputeDiskAsyncPrimaryDiskDisk(original["disk"], d, config) + return []interface{}{transformed} +} +func flattenComputeDiskAsyncPrimaryDiskDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskGuestOsFeatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(computeDiskGuestOsFeaturesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "type": flattenComputeDiskGuestOsFeaturesType(original["type"], d, config), + }) + } + return transformed +} +func flattenComputeDiskGuestOsFeaturesType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskLicenses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeDiskZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenComputeDiskSourceImageEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeDiskSourceImageEncryptionKeyRawKey(original["rawKey"], d, config) + transformed["sha256"] = + flattenComputeDiskSourceImageEncryptionKeySha256(original["sha256"], d, config) + transformed["kms_key_self_link"] = + flattenComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) + transformed["kms_key_service_account"] = + flattenComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) + return []interface{}{transformed} +} +func flattenComputeDiskSourceImageEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceImageEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceImageId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeDiskDiskEncryptionKeyRawKey(original["rawKey"], d, config) + transformed["rsa_encrypted_key"] = + flattenComputeDiskDiskEncryptionKeyRsaEncryptedKey(original["rsaEncryptedKey"], d, config) + transformed["sha256"] = + flattenComputeDiskDiskEncryptionKeySha256(original["sha256"], d, config) + transformed["kms_key_self_link"] = + flattenComputeDiskDiskEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) + transformed["kms_key_service_account"] = + flattenComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) + return []interface{}{transformed} +} +func flattenComputeDiskDiskEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskDiskEncryptionKeyRsaEncryptedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskDiskEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskDiskEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSnapshot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeDiskSourceSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) + transformed["kms_key_self_link"] = + flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) + transformed["sha256"] = + flattenComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) + transformed["kms_key_service_account"] = + flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) + return []interface{}{transformed} +} +func flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeDiskSourceSnapshotId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeDiskLabelFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeDiskName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskPhysicalBlockSizeBytes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSourceDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseZonalFieldValue("diskTypes", v.(string), "project", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for type: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeDiskImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskProvisionedIops(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskProvisionedThroughput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskAsyncPrimaryDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisk, err := expandComputeDiskAsyncPrimaryDiskDisk(original["disk"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisk); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["disk"] = transformedDisk + } + + return transformed, nil +} + +func expandComputeDiskAsyncPrimaryDiskDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskGuestOsFeatures(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandComputeDiskGuestOsFeaturesType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeDiskGuestOsFeaturesType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskLicenses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for licenses: nil") + } + f, err := tpgresource.ParseGlobalFieldValue("licenses", raw.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for licenses: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeDiskZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("zones", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeDiskSourceImageEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRawKey, err := expandComputeDiskSourceImageEncryptionKeyRawKey(original["raw_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rawKey"] = transformedRawKey + } + + transformedSha256, err := expandComputeDiskSourceImageEncryptionKeySha256(original["sha256"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256"] = transformedSha256 + } + + transformedKmsKeySelfLink, err := expandComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeySelfLink + } + + transformedKmsKeyServiceAccount, err := expandComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount + } + + return transformed, nil +} + +func expandComputeDiskSourceImageEncryptionKeyRawKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSourceImageEncryptionKeySha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskDiskEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRawKey, err := expandComputeDiskDiskEncryptionKeyRawKey(original["raw_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rawKey"] = transformedRawKey + } + + transformedRsaEncryptedKey, err := expandComputeDiskDiskEncryptionKeyRsaEncryptedKey(original["rsa_encrypted_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRsaEncryptedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rsaEncryptedKey"] = transformedRsaEncryptedKey + } + + transformedSha256, err := expandComputeDiskDiskEncryptionKeySha256(original["sha256"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256"] = transformedSha256 + } + + transformedKmsKeySelfLink, err := expandComputeDiskDiskEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeySelfLink + } + + transformedKmsKeyServiceAccount, err := expandComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount + } + + return transformed, nil +} + +func expandComputeDiskDiskEncryptionKeyRawKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskDiskEncryptionKeyRsaEncryptedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskDiskEncryptionKeySha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskDiskEncryptionKeyKmsKeySelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSnapshot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for snapshot: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeDiskSourceSnapshotEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRawKey, err := expandComputeDiskSourceSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rawKey"] = transformedRawKey + } + + transformedKmsKeySelfLink, err := expandComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeySelfLink + } + + transformedSha256, err := expandComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256"] = transformedSha256 + } + + transformedKmsKeyServiceAccount, err := expandComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount + } + + return transformed, nil +} + +func expandComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + if v, ok := d.GetOk("type"); ok { + log.Printf("[DEBUG] Loading disk type: %s", v.(string)) + diskType, err := readDiskType(config, d, v.(string)) + if err != nil { + return nil, fmt.Errorf( + "Error loading disk type '%s': %s", + v.(string), err) + } + + obj["type"] = diskType.RelativeLink() + } + + if v, ok := d.GetOk("image"); ok { + log.Printf("[DEBUG] Resolving image name: %s", v.(string)) + imageUrl, err := ResolveImage(config, project, v.(string), userAgent) + if err != nil { + return nil, fmt.Errorf( + "Error resolving image name '%s': %s", + v.(string), err) + } + + obj["sourceImage"] = imageUrl + log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) + } + + return obj, nil +} + +func resourceComputeDiskUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + + if (d.HasChange("provisioned_iops") && strings.Contains(d.Get("type").(string), "hyperdisk")) || (d.HasChange("provisioned_throughput") && strings.Contains(d.Get("type").(string), "hyperdisk")) { + nameProp := d.Get("name") + if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + } + return obj, nil +} + +func resourceComputeDiskDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v, ok := res["diskEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["rawKey"] = d.Get("disk_encryption_key.0.raw_key") + transformed["rsaEncryptedKey"] = d.Get("disk_encryption_key.0.rsa_encrypted_key") + transformed["sha256"] = original["sha256"] + + if kmsKeyName, ok := original["kmsKeyName"]; ok { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] + } + + if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { + transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount + } + + res["diskEncryptionKey"] = transformed + } + + if v, ok := res["sourceImageEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["rawKey"] = d.Get("source_image_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + + if kmsKeyName, ok := original["kmsKeyName"]; ok { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] + } + + if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { + transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount + } + + res["sourceImageEncryptionKey"] = transformed + } + + if v, ok := res["sourceSnapshotEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["rawKey"] = d.Get("source_snapshot_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + + if kmsKeyName, ok := original["kmsKeyName"]; ok { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] + } + + if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { + transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount + } + + res["sourceSnapshotEncryptionKey"] = transformed + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_async_replication.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_async_replication.go new file mode 100644 index 0000000000..ea81d823e6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_async_replication.go @@ -0,0 +1,301 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/compute/v1" +) + +func ResourceComputeDiskAsyncReplication() *schema.Resource { + return &schema.Resource{ + Create: resourceDiskAsyncReplicationCreate, + Read: resourceDiskAsyncReplicationRead, + Delete: resourceDiskAsyncReplicationDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "primary_disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Primary disk for asynchronous replication.`, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "secondary_disk": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Description: `Secondary disk for asynchronous replication.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Secondary disk for asynchronous replication.`, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Output-only. Status of replication on the secondary disk.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func asyncReplicationGetComputeClient(d *schema.ResourceData, meta interface{}) (*compute.Service, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + clientCompute := config.NewComputeClient(userAgent) + return clientCompute, nil +} + +func asyncReplicationGetDiskFromConfig(disk string, d *schema.ResourceData, meta interface{}) (zv *tpgresource.ZonalFieldValue, rv *tpgresource.RegionalFieldValue, resourceId string, err error) { + config := meta.(*transport_tpg.Config) + + var zonalMatch bool + zonalMatch, err = regexp.MatchString(fmt.Sprintf(tpgresource.ZonalLinkBasePattern, "disks"), disk) + if err != nil { + return + } + zv, parseErr := tpgresource.ParseDiskFieldValue(disk, d, config) + if !zonalMatch || parseErr != nil { + rv, err = tpgresource.ParseRegionDiskFieldValue(disk, d, config) + if err != nil { + return + } + var regionalMatch bool + regionalMatch, err = regexp.MatchString(fmt.Sprintf(tpgresource.RegionalLinkBasePattern, "disks"), disk) + if !regionalMatch || err != nil { + err = fmt.Errorf("regional disk expected: %s", disk) + return + } + resourceId = fmt.Sprintf(tpgresource.RegionalLinkTemplate, rv.Project, rv.Region, "disks", rv.Name) + } else { + resourceId = fmt.Sprintf(tpgresource.ZonalLinkTemplate, zv.Project, zv.Zone, "disks", zv.Name) + } + return +} + +func asyncReplicationGetDiskStatus(client *compute.Service, zv *tpgresource.ZonalFieldValue, rv *tpgresource.RegionalFieldValue) (diskStatus *compute.Disk, err error) { + if rv == nil { // Zonal disk + diskStatus, err = client.Disks.Get(zv.Project, zv.Zone, zv.Name).Do() + log.Printf("[DEBUG] Get disk zones/%s/%s: %v", zv.Zone, zv.Name, diskStatus) + } else { + diskStatus, err = client.RegionDisks.Get(rv.Project, rv.Region, rv.Name).Do() + log.Printf("[DEBUG] Get disk regions/%s/%s: %v", rv.Region, rv.Name, diskStatus) + } + return +} + +func resourceDiskAsyncReplicationCreate(d *schema.ResourceData, meta interface{}) error { + clientCompute, err := asyncReplicationGetComputeClient(d, meta) + if err != nil { + return err + } + + zv, rv, resourceId, err := asyncReplicationGetDiskFromConfig(d.Get("primary_disk").(string), d, meta) + if err != nil { + return err + } + + secondaryDiskList := d.Get("secondary_disk").([]interface{}) + secondaryDiskMap := secondaryDiskList[0].(map[string]interface{}) + secondaryDisk := secondaryDiskMap["disk"].(string) + if rv == nil { // Zonal disk + replicationRequest := compute.DisksStartAsyncReplicationRequest{ + AsyncSecondaryDisk: secondaryDisk, + } + _, err = clientCompute.Disks.StartAsyncReplication(zv.Project, zv.Zone, zv.Name, &replicationRequest).Do() + if err != nil { + return err + } + } else { + replicationRequest := compute.RegionDisksStartAsyncReplicationRequest{ + AsyncSecondaryDisk: secondaryDisk, + } + _, err = clientCompute.RegionDisks.StartAsyncReplication(rv.Project, rv.Region, rv.Name, &replicationRequest).Do() + if err != nil { + return err + } + } + err = resource.Retry(time.Minute*time.Duration(5), func() *resource.RetryError { + diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) + if err != nil { + return resource.NonRetryableError(err) + } + if diskStatus.ResourceStatus == nil { + return resource.NonRetryableError(fmt.Errorf("no resource status for disk: %s", resourceId)) + } + if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[secondaryDisk]; ok { + if secondaryState.State != "ACTIVE" { + time.Sleep(5 * time.Second) + return resource.RetryableError(fmt.Errorf("secondary disk %s state (%s) is not: ACTIVE", secondaryDisk, secondaryState)) + } + return nil + } + time.Sleep(5 * time.Second) + return resource.RetryableError(fmt.Errorf("secondary disk %s state not available", secondaryDisk)) + }) + if err != nil { + return err + } + d.SetId(resourceId) + return resourceDiskAsyncReplicationRead(d, meta) +} + +func resourceDiskAsyncReplicationRead(d *schema.ResourceData, meta interface{}) error { + clientCompute, err := asyncReplicationGetComputeClient(d, meta) + if err != nil { + return err + } + + primaryDisk := d.Get("primary_disk").(string) + if primaryDisk == "" { + primaryDisk = d.Id() + d.Set("primary_disk", primaryDisk) + } + + zv, rv, resourceId, err := asyncReplicationGetDiskFromConfig(primaryDisk, d, meta) + if err != nil { + return err + } + + diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) + if err != nil { + return err + } + + secondaryDisks := make([]map[string]string, 0) + existingSecondaryDisks := make(map[string]bool, 0) + for _, disk := range diskStatus.AsyncSecondaryDisks { + secondaryDisk := make(map[string]string) + + _, _, resourceName, err := asyncReplicationGetDiskFromConfig(disk.AsyncReplicationDisk.Disk, d, meta) + if err != nil { + return err + } + + if diskStatus.ResourceStatus == nil { + return fmt.Errorf("no resource status for disk: %s", resourceId) + } + + secondaryDisk["disk"] = resourceName + existingSecondaryDisks[resourceName] = true + if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[resourceName]; ok { + // Note this might be other than ACTIVE or STOPPED, but we wait for proper state + // on replication start/stop so it shouldnt affect Terraform + log.Printf("[DEBUG] Secondary disk %s is in state: %s", resourceName, secondaryState.State) + secondaryDisk["state"] = secondaryState.State + } + secondaryDisks = append(secondaryDisks, secondaryDisk) + } + + log.Printf("[DEBUG] Secondary disks: %v", secondaryDisks) + if err = d.Set("secondary_disk", secondaryDisks); err != nil { + return fmt.Errorf("Error setting secondary_disk: %s", err) + } + d.SetId(resourceId) + return nil +} + +func resourceDiskAsyncReplicationDelete(d *schema.ResourceData, meta interface{}) error { + clientCompute, err := asyncReplicationGetComputeClient(d, meta) + if err != nil { + return err + } + + zv, rv, _, err := asyncReplicationGetDiskFromConfig(d.Get("primary_disk").(string), d, meta) + if err != nil { + return err + } + + var replicationStopped bool = false + secondaryDiskList := d.Get("secondary_disk").([]interface{}) + secondaryDiskMap := secondaryDiskList[0].(map[string]interface{}) + secondaryDisk := secondaryDiskMap["disk"].(string) + _, _, resourceName, err := asyncReplicationGetDiskFromConfig(secondaryDisk, d, meta) + if err != nil { + return err + } + + diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) + if err != nil { + return err + } + + if diskStatus.ResourceStatus == nil { + // Nothing to do, replication not running + return nil + } + + if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[resourceName]; ok { + if secondaryState.State != "STOPPED" { + replicationStopped = true + if rv == nil { // Zonal disk + _, err = clientCompute.Disks.StopAsyncReplication(zv.Project, zv.Zone, zv.Name).Do() + if err != nil { + return err + } + } else { + _, err = clientCompute.RegionDisks.StopAsyncReplication(rv.Project, rv.Region, rv.Name).Do() + if err != nil { + return err + } + } + err = resource.Retry(time.Minute*time.Duration(5), func() *resource.RetryError { + diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) + if err != nil { + return resource.NonRetryableError(err) + } + if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[resourceName]; ok { + if secondaryState.State != "STOPPED" { + time.Sleep(5 * time.Second) + return resource.RetryableError(fmt.Errorf("secondary disk %s state (%s) is not STOPPED", secondaryDisk, secondaryState)) + } + return nil + } + return resource.NonRetryableError(fmt.Errorf("secondary disk %s state not available", secondaryDisk)) + }) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("could not find secondary disk: %s", secondaryDisk) + } + + if replicationStopped { + // Allow the replication to quiescence + time.Sleep(5000 * time.Millisecond) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_resource_policy_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_resource_policy_attachment.go new file mode 100644 index 0000000000..bbfd5c9d26 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_resource_policy_attachment.go @@ -0,0 +1,425 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeDiskResourcePolicyAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeDiskResourcePolicyAttachmentCreate, + Read: resourceComputeDiskResourcePolicyAttachmentRead, + Delete: resourceComputeDiskResourcePolicyAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeDiskResourcePolicyAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the disk in which the resource policies are attached to.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource policy to be attached to the disk for scheduling snapshot +creation. Do not specify the self link.`, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the zone where the disk resides.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeDiskResourcePolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + obj, err = resourceComputeDiskResourcePolicyAttachmentEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}/addResourcePolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DiskResourcePolicyAttachment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DiskResourcePolicyAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{disk}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating DiskResourcePolicyAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create DiskResourcePolicyAttachment: %s", err) + } + + log.Printf("[DEBUG] Finished creating DiskResourcePolicyAttachment %q: %#v", d.Id(), res) + + return resourceComputeDiskResourcePolicyAttachmentRead(d, meta) +} + +func resourceComputeDiskResourcePolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeDiskResourcePolicyAttachment %q", d.Id())) + } + + res, err = flattenNestedComputeDiskResourcePolicyAttachment(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeDiskResourcePolicyAttachment because it couldn't be matched.") + d.SetId("") + return nil + } + + res, err = resourceComputeDiskResourcePolicyAttachmentDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeDiskResourcePolicyAttachment because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DiskResourcePolicyAttachment: %s", err) + } + + if err := d.Set("name", flattenNestedComputeDiskResourcePolicyAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading DiskResourcePolicyAttachment: %s", err) + } + + return nil +} + +func resourceComputeDiskResourcePolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}/removeResourcePolicies") + if err != nil { + return err + } + + var obj map[string]interface{} + obj = make(map[string]interface{}) + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + if zone == "" { + return fmt.Errorf("zone must be non-empty - set in resource or at provider-level") + } + + // resourcePolicies are referred to by region but affixed to zonal disks. + // We construct the regional name from the zone: + // + // projects/{project}/regions/{region}/resourcePolicies/{resourceId} + region := tpgresource.GetRegionFromZone(zone) + if region == "" { + return fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) + } + + name, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(name)) && (ok || !reflect.DeepEqual(v, name)) { + obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)} + } + log.Printf("[DEBUG] Deleting DiskResourcePolicyAttachment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DiskResourcePolicyAttachment") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting DiskResourcePolicyAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting DiskResourcePolicyAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeDiskResourcePolicyAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{disk}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeDiskResourcePolicyAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeDiskResourcePolicyAttachmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeDiskResourcePolicyAttachmentEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return nil, err + } + if zone == "" { + return nil, fmt.Errorf("zone must be non-empty - set in resource or at provider-level") + } + + // resourcePolicies are referred to by region but affixed to zonal disks. + // We construct the regional name from the zone: + // + // projects/{project}/regions/{region}/resourcePolicies/{resourceId} + region := tpgresource.GetRegionFromZone(zone) + if region == "" { + return nil, fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) + } + + obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, obj["name"])} + delete(obj, "name") + return obj, nil +} + +func flattenNestedComputeDiskResourcePolicyAttachment(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["resourcePolicies"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value resourcePolicies. Actual value: %v", v) + } + + _, item, err := resourceComputeDiskResourcePolicyAttachmentFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeDiskResourcePolicyAttachmentFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeDiskResourcePolicyAttachmentName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + // List response only contains the ID - construct a response object. + item := map[string]interface{}{ + "name": itemRaw, + } + + // Decode list item before comparing. + item, err := resourceComputeDiskResourcePolicyAttachmentDecoder(d, meta, item) + if err != nil { + return -1, nil, err + } + + itemName := flattenNestedComputeDiskResourcePolicyAttachmentName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} +func resourceComputeDiskResourcePolicyAttachmentDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + res["name"] = tpgresource.GetResourceNameFromSelfLink(res["name"].(string)) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_sweeper.go new file mode 100644 index 0000000000..e5aadd6823 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_disk_sweeper.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// This will sweep GCE Disk resources +func init() { + sweeper.AddTestSweepers("ComputeDisk", testSweepDisk) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDisk(region string) error { + resourceName := "ComputeDisk" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + zones := []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-f", "us-east1-b", "us-east1-c", "us-east1-d", "us-west1-a", "us-west1-b", "us-west1-c"} + for _, zone := range zones { + servicesUrl := "https://compute.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks" + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: servicesUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", servicesUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["id"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource id was nil", resourceName) + return nil + } + + id := obj["name"].(string) + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(id) { + nonPrefixCount++ + continue + } + + deleteUrl := servicesUrl + "/" + id + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, id) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf-test prefix remain for zone %s", nonPrefixCount, zone) + } + + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway.go new file mode 100644 index 0000000000..c0a64161e6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway.go @@ -0,0 +1,562 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeExternalVpnGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeExternalVpnGatewayCreate, + Read: resourceComputeExternalVpnGatewayRead, + Update: resourceComputeExternalVpnGatewayUpdate, + Delete: resourceComputeExternalVpnGatewayDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeExternalVpnGatewayImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "interface": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of interfaces on this external VPN gateway.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The numeric ID for this interface. Allowed values are based on the redundancy type +of this external VPN gateway +* '0 - SINGLE_IP_INTERNALLY_REDUNDANT' +* '0, 1 - TWO_IPS_REDUNDANCY' +* '0, 1, 2, 3 - FOUR_IPS_REDUNDANCY'`, + }, + "ip_address": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `IP address of the interface in the external VPN gateway. +Only IPv4 is supported. This IP address can be either from +your on-premise gateway or another Cloud provider's VPN gateway, +it cannot be an IP address from Google Compute Engine.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels for the external VPN gateway resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "redundancy_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"FOUR_IPS_REDUNDANCY", "SINGLE_IP_INTERNALLY_REDUNDANT", "TWO_IPS_REDUNDANCY", ""}), + Description: `Indicates the redundancy type of this external VPN gateway Possible values: ["FOUR_IPS_REDUNDANCY", "SINGLE_IP_INTERNALLY_REDUNDANT", "TWO_IPS_REDUNDANCY"]`, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint used for optimistic locking of this resource. Used +internally during updates.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeExternalVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeExternalVpnGatewayDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandComputeExternalVpnGatewayLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeExternalVpnGatewayLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + nameProp, err := expandComputeExternalVpnGatewayName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + redundancyTypeProp, err := expandComputeExternalVpnGatewayRedundancyType(d.Get("redundancy_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redundancy_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(redundancyTypeProp)) && (ok || !reflect.DeepEqual(v, redundancyTypeProp)) { + obj["redundancyType"] = redundancyTypeProp + } + interfacesProp, err := expandComputeExternalVpnGatewayInterface(d.Get("interface"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("interface"); !tpgresource.IsEmptyValue(reflect.ValueOf(interfacesProp)) && (ok || !reflect.DeepEqual(v, interfacesProp)) { + obj["interfaces"] = interfacesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ExternalVpnGateway: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ExternalVpnGateway: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/externalVpnGateways/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating ExternalVpnGateway", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ExternalVpnGateway: %s", err) + } + + log.Printf("[DEBUG] Finished creating ExternalVpnGateway %q: %#v", d.Id(), res) + + return resourceComputeExternalVpnGatewayRead(d, meta) +} + +func resourceComputeExternalVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeExternalVpnGateway %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) + } + + if err := d.Set("description", flattenComputeExternalVpnGatewayDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) + } + if err := d.Set("labels", flattenComputeExternalVpnGatewayLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) + } + if err := d.Set("label_fingerprint", flattenComputeExternalVpnGatewayLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) + } + if err := d.Set("name", flattenComputeExternalVpnGatewayName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) + } + if err := d.Set("redundancy_type", flattenComputeExternalVpnGatewayRedundancyType(res["redundancyType"], d, config)); err != nil { + return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) + } + if err := d.Set("interface", flattenComputeExternalVpnGatewayInterface(res["interfaces"], d, config)); err != nil { + return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading ExternalVpnGateway: %s", err) + } + + return nil +} + +func resourceComputeExternalVpnGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("labels") || d.HasChange("label_fingerprint") { + obj := make(map[string]interface{}) + + labelsProp, err := expandComputeExternalVpnGatewayLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeExternalVpnGatewayLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways/{{name}}/setLabels") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating ExternalVpnGateway %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ExternalVpnGateway %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ExternalVpnGateway", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeExternalVpnGatewayRead(d, meta) +} + +func resourceComputeExternalVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ExternalVpnGateway %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ExternalVpnGateway") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting ExternalVpnGateway", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ExternalVpnGateway %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeExternalVpnGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/externalVpnGateways/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/externalVpnGateways/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeExternalVpnGatewayDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeExternalVpnGatewayLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeExternalVpnGatewayLabelFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeExternalVpnGatewayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeExternalVpnGatewayRedundancyType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeExternalVpnGatewayInterface(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenComputeExternalVpnGatewayInterfaceId(original["id"], d, config), + "ip_address": flattenComputeExternalVpnGatewayInterfaceIpAddress(original["ipAddress"], d, config), + }) + } + return transformed +} +func flattenComputeExternalVpnGatewayInterfaceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeExternalVpnGatewayInterfaceIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeExternalVpnGatewayDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeExternalVpnGatewayLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeExternalVpnGatewayLabelFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeExternalVpnGatewayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeExternalVpnGatewayRedundancyType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeExternalVpnGatewayInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandComputeExternalVpnGatewayInterfaceId(original["id"], d, config) + if err != nil { + return nil, err + } else { + transformed["id"] = transformedId + } + + transformedIpAddress, err := expandComputeExternalVpnGatewayInterfaceIpAddress(original["ip_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipAddress"] = transformedIpAddress + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeExternalVpnGatewayInterfaceId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeExternalVpnGatewayInterfaceIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway_sweeper.go new file mode 100644 index 0000000000..d26234d768 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_external_vpn_gateway_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeExternalVpnGateway", testSweepComputeExternalVpnGateway) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeExternalVpnGateway(region string) error { + resourceName := "ComputeExternalVpnGateway" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/externalVpnGateways", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/externalVpnGateways/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall.go new file mode 100644 index 0000000000..4dad3fdfb4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall.go @@ -0,0 +1,1210 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "bytes" + "context" + "fmt" + "log" + "reflect" + "sort" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func resourceComputeFirewallRuleHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["protocol"].(string)))) + + // We need to make sure to sort the strings below so that we always + // generate the same hash code no matter what is in the set. + if v, ok := m["ports"]; ok && v != nil { + s := tpgresource.ConvertStringArr(v.([]interface{})) + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + return tpgresource.Hashcode(buf.String()) +} + +func diffSuppressEnableLogging(k, old, new string, d *schema.ResourceData) bool { + if k == "log_config.#" { + if new == "0" && d.Get("enable_logging").(bool) { + return true + } + } + + return false +} + +func resourceComputeFirewallEnableLoggingCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + enableLogging, enableExists := diff.GetOkExists("enable_logging") + if !enableExists { + return nil + } + + logConfigExists := diff.Get("log_config.#").(int) != 0 + if logConfigExists && enableLogging == false { + return fmt.Errorf("log_config cannot be defined when enable_logging is false") + } + + return nil +} + +// Per https://github.com/hashicorp/terraform-provider-google/issues/2924 +// Make one of the source_ parameters Required in ingress google_compute_firewall +func resourceComputeFirewallSourceFieldsCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + direction := diff.Get("direction").(string) + + if direction != "EGRESS" { + _, tagsOk := diff.GetOk("source_tags") + _, rangesOk := diff.GetOk("source_ranges") + _, sasOk := diff.GetOk("source_service_accounts") + + _, tagsExist := diff.GetOkExists("source_tags") + _, rangesExist := diff.GetOkExists("source_ranges") + _, sasExist := diff.GetOkExists("source_service_accounts") + + if !tagsOk && !rangesOk && !sasOk && !tagsExist && !rangesExist && !sasExist { + return fmt.Errorf("one of source_tags, source_ranges, or source_service_accounts must be defined") + } + } + + return nil +} + +func diffSuppressSourceRanges(k, old, new string, d *schema.ResourceData) bool { + if k == "source_ranges.#" { + if old == "1" && new == "0" { + // Allow diffing on the individual element if we are going from 1 -> 0 + // this allows for diff suppress on ["0.0.0.0/0"] -> [] + return true + } + // For any other source_ranges.# diff, don't suppress + return false + } + kLength := "source_ranges.#" + oldLength, newLength := d.GetChange(kLength) + oldInt, ok := oldLength.(int) + + if !ok { + return false + } + + newInt, ok := newLength.(int) + if !ok { + return false + } + + // Diff suppress only should suppress removing the default range + // This should probably be newInt == 0, but due to Terraform core internals + // (bug?) values found via GetChange may not have the correct new value + // in some circumstances + if oldInt == 1 && newInt == 1 { + if old == "0.0.0.0/0" && new == "" { + return true + } + } + // For any other source_ranges value diff, don't suppress + return false +} + +func ResourceComputeFirewall() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFirewallCreate, + Read: resourceComputeFirewallRead, + Update: resourceComputeFirewallUpdate, + Delete: resourceComputeFirewallDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeFirewallImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 1, + MigrateState: resourceComputeFirewallMigrateState, + CustomizeDiff: customdiff.All( + resourceComputeFirewallEnableLoggingCustomizeDiff, + resourceComputeFirewallSourceFieldsCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the network to attach this firewall to.`, + }, + "allow": { + Type: schema.TypeSet, + Optional: true, + Description: `The list of ALLOW rules specified by this firewall. Each rule +specifies a protocol and port-range tuple that describes a permitted +connection.`, + Elem: computeFirewallAllowSchema(), + Set: resourceComputeFirewallRuleHash, + ExactlyOneOf: []string{"allow", "deny"}, + }, + "deny": { + Type: schema.TypeSet, + Optional: true, + Description: `The list of DENY rules specified by this firewall. Each rule specifies +a protocol and port-range tuple that describes a denied connection.`, + Elem: computeFirewallDenySchema(), + Set: resourceComputeFirewallRuleHash, + ExactlyOneOf: []string{"allow", "deny"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "destination_ranges": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Description: `If destination ranges are specified, the firewall will apply only to +traffic that has destination IP address in these ranges. These ranges +must be expressed in CIDR format. IPv4 or IPv6 ranges are supported.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "direction": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INGRESS", "EGRESS", ""}), + Description: `Direction of traffic to which this firewall applies; default is +INGRESS. Note: For INGRESS traffic, one of 'source_ranges', +'source_tags' or 'source_service_accounts' is required. Possible values: ["INGRESS", "EGRESS"]`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Denotes whether the firewall rule is disabled, i.e not applied to the +network it is associated with. When set to true, the firewall rule is +not enforced and the network behaves as if it did not exist. If this +is unspecified, the firewall rule will be enabled.`, + }, + "log_config": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: diffSuppressEnableLogging, + Description: `This field denotes the logging options for a particular firewall rule. +If defined, logging is enabled, and logs will be exported to Cloud Logging.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA"}), + Description: `This field denotes whether to include or exclude metadata for firewall logs. Possible values: ["EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA"]`, + }, + }, + }, + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 65535), + Description: `Priority for this rule. This is an integer between 0 and 65535, both +inclusive. When not specified, the value assumed is 1000. Relative +priorities determine precedence of conflicting rules. Lower value of +priority implies higher precedence (eg, a rule with priority 0 has +higher precedence than a rule with priority 1). DENY rules take +precedence over ALLOW rules having equal priority.`, + Default: 1000, + }, + "source_ranges": { + Type: schema.TypeSet, + Optional: true, + DiffSuppressFunc: diffSuppressSourceRanges, + Description: `If source ranges are specified, the firewall will apply only to +traffic that has source IP address in these ranges. These ranges must +be expressed in CIDR format. One or both of sourceRanges and +sourceTags may be set. If both properties are set, the firewall will +apply to traffic that has source IP address within sourceRanges OR the +source IP that belongs to a tag listed in the sourceTags property. The +connection does not need to match both properties for the firewall to +apply. IPv4 or IPv6 ranges are supported. For INGRESS traffic, one of +'source_ranges', 'source_tags' or 'source_service_accounts' is required.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "source_service_accounts": { + Type: schema.TypeSet, + Optional: true, + Description: `If source service accounts are specified, the firewall will apply only +to traffic originating from an instance with a service account in this +list. Source service accounts cannot be used to control traffic to an +instance's external IP address because service accounts are associated +with an instance, not an IP address. sourceRanges can be set at the +same time as sourceServiceAccounts. If both are set, the firewall will +apply to traffic that has source IP address within sourceRanges OR the +source IP belongs to an instance with service account listed in +sourceServiceAccount. The connection does not need to match both +properties for the firewall to apply. sourceServiceAccounts cannot be +used at the same time as sourceTags or targetTags. For INGRESS traffic, +one of 'source_ranges', 'source_tags' or 'source_service_accounts' is required.`, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + ConflictsWith: []string{"source_tags", "target_tags"}, + }, + "source_tags": { + Type: schema.TypeSet, + Optional: true, + Description: `If source tags are specified, the firewall will apply only to traffic +with source IP that belongs to a tag listed in source tags. Source +tags cannot be used to control traffic to an instance's external IP +address. Because tags are associated with an instance, not an IP +address. One or both of sourceRanges and sourceTags may be set. If +both properties are set, the firewall will apply to traffic that has +source IP address within sourceRanges OR the source IP that belongs to +a tag listed in the sourceTags property. The connection does not need +to match both properties for the firewall to apply. For INGRESS traffic, +one of 'source_ranges', 'source_tags' or 'source_service_accounts' is required.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + ConflictsWith: []string{"source_service_accounts", "target_service_accounts"}, + }, + "target_service_accounts": { + Type: schema.TypeSet, + Optional: true, + Description: `A list of service accounts indicating sets of instances located in the +network that may make network connections as specified in allowed[]. +targetServiceAccounts cannot be used at the same time as targetTags or +sourceTags. If neither targetServiceAccounts nor targetTags are +specified, the firewall rule applies to all instances on the specified +network.`, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + ConflictsWith: []string{"source_tags", "target_tags"}, + }, + "target_tags": { + Type: schema.TypeSet, + Optional: true, + Description: `A list of instance tags indicating sets of instances located in the +network that may make network connections as specified in allowed[]. +If no targetTags are specified, the firewall rule applies to all +instances on the specified network.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + ConflictsWith: []string{"source_service_accounts", "target_service_accounts"}, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "enable_logging": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Deprecated: "Deprecated in favor of log_config", + Description: "This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver.", + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func computeFirewallAllowSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "protocol": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareCaseInsensitive, + Description: `The IP protocol to which this rule applies. The protocol type is +required when creating a firewall rule. This value can either be +one of the following well known protocol strings (tcp, udp, +icmp, esp, ah, sctp, ipip, all), or the IP protocol number.`, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Description: `An optional list of ports to which this rule applies. This field +is only applicable for UDP or TCP protocol. Each entry must be +either an integer or a range. If not specified, this rule +applies to connections through any port. + +Example inputs include: ["22"], ["80","443"], and +["12345-12349"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func computeFirewallDenySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "protocol": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareCaseInsensitive, + Description: `The IP protocol to which this rule applies. The protocol type is +required when creating a firewall rule. This value can either be +one of the following well known protocol strings (tcp, udp, +icmp, esp, ah, sctp, ipip, all), or the IP protocol number.`, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Description: `An optional list of ports to which this rule applies. This field +is only applicable for UDP or TCP protocol. Each entry must be +either an integer or a range. If not specified, this rule +applies to connections through any port. + +Example inputs include: ["22"], ["80","443"], and +["12345-12349"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + allowedProp, err := expandComputeFirewallAllow(d.Get("allow"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow"); !tpgresource.IsEmptyValue(reflect.ValueOf(allowedProp)) && (ok || !reflect.DeepEqual(v, allowedProp)) { + obj["allowed"] = allowedProp + } + deniedProp, err := expandComputeFirewallDeny(d.Get("deny"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deny"); !tpgresource.IsEmptyValue(reflect.ValueOf(deniedProp)) && (ok || !reflect.DeepEqual(v, deniedProp)) { + obj["denied"] = deniedProp + } + descriptionProp, err := expandComputeFirewallDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + destinationRangesProp, err := expandComputeFirewallDestinationRanges(d.Get("destination_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(destinationRangesProp)) && (ok || !reflect.DeepEqual(v, destinationRangesProp)) { + obj["destinationRanges"] = destinationRangesProp + } + directionProp, err := expandComputeFirewallDirection(d.Get("direction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("direction"); !tpgresource.IsEmptyValue(reflect.ValueOf(directionProp)) && (ok || !reflect.DeepEqual(v, directionProp)) { + obj["direction"] = directionProp + } + disabledProp, err := expandComputeFirewallDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); ok || !reflect.DeepEqual(v, disabledProp) { + obj["disabled"] = disabledProp + } + logConfigProp, err := expandComputeFirewallLogConfig(d.Get("log_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { + obj["logConfig"] = logConfigProp + } + nameProp, err := expandComputeFirewallName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandComputeFirewallNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + priorityProp, err := expandComputeFirewallPriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); ok || !reflect.DeepEqual(v, priorityProp) { + obj["priority"] = priorityProp + } + sourceRangesProp, err := expandComputeFirewallSourceRanges(d.Get("source_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceRangesProp)) && (ok || !reflect.DeepEqual(v, sourceRangesProp)) { + obj["sourceRanges"] = sourceRangesProp + } + sourceServiceAccountsProp, err := expandComputeFirewallSourceServiceAccounts(d.Get("source_service_accounts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_service_accounts"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceServiceAccountsProp)) && (ok || !reflect.DeepEqual(v, sourceServiceAccountsProp)) { + obj["sourceServiceAccounts"] = sourceServiceAccountsProp + } + sourceTagsProp, err := expandComputeFirewallSourceTags(d.Get("source_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceTagsProp)) && (ok || !reflect.DeepEqual(v, sourceTagsProp)) { + obj["sourceTags"] = sourceTagsProp + } + targetServiceAccountsProp, err := expandComputeFirewallTargetServiceAccounts(d.Get("target_service_accounts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_service_accounts"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetServiceAccountsProp)) && (ok || !reflect.DeepEqual(v, targetServiceAccountsProp)) { + obj["targetServiceAccounts"] = targetServiceAccountsProp + } + targetTagsProp, err := expandComputeFirewallTargetTags(d.Get("target_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetTagsProp)) && (ok || !reflect.DeepEqual(v, targetTagsProp)) { + obj["targetTags"] = targetTagsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Firewall: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Firewall: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Firewall: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/firewalls/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Firewall", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Firewall: %s", err) + } + + log.Printf("[DEBUG] Finished creating Firewall %q: %#v", d.Id(), res) + + return resourceComputeFirewallRead(d, meta) +} + +func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Firewall: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeFirewall %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + + if err := d.Set("allow", flattenComputeFirewallAllow(res["allowed"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeFirewallCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("deny", flattenComputeFirewallDeny(res["denied"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("description", flattenComputeFirewallDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("destination_ranges", flattenComputeFirewallDestinationRanges(res["destinationRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("direction", flattenComputeFirewallDirection(res["direction"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("disabled", flattenComputeFirewallDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("log_config", flattenComputeFirewallLogConfig(res["logConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("name", flattenComputeFirewallName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("network", flattenComputeFirewallNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("priority", flattenComputeFirewallPriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("source_ranges", flattenComputeFirewallSourceRanges(res["sourceRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("source_service_accounts", flattenComputeFirewallSourceServiceAccounts(res["sourceServiceAccounts"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("source_tags", flattenComputeFirewallSourceTags(res["sourceTags"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("target_service_accounts", flattenComputeFirewallTargetServiceAccounts(res["targetServiceAccounts"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("target_tags", flattenComputeFirewallTargetTags(res["targetTags"], d, config)); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Firewall: %s", err) + } + + return nil +} + +func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Firewall: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + allowedProp, err := expandComputeFirewallAllow(d.Get("allow"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, allowedProp)) { + obj["allowed"] = allowedProp + } + deniedProp, err := expandComputeFirewallDeny(d.Get("deny"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deny"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deniedProp)) { + obj["denied"] = deniedProp + } + descriptionProp, err := expandComputeFirewallDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + destinationRangesProp, err := expandComputeFirewallDestinationRanges(d.Get("destination_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationRangesProp)) { + obj["destinationRanges"] = destinationRangesProp + } + disabledProp, err := expandComputeFirewallDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); ok || !reflect.DeepEqual(v, disabledProp) { + obj["disabled"] = disabledProp + } + logConfigProp, err := expandComputeFirewallLogConfig(d.Get("log_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { + obj["logConfig"] = logConfigProp + } + networkProp, err := expandComputeFirewallNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + priorityProp, err := expandComputeFirewallPriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); ok || !reflect.DeepEqual(v, priorityProp) { + obj["priority"] = priorityProp + } + sourceRangesProp, err := expandComputeFirewallSourceRanges(d.Get("source_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceRangesProp)) { + obj["sourceRanges"] = sourceRangesProp + } + sourceServiceAccountsProp, err := expandComputeFirewallSourceServiceAccounts(d.Get("source_service_accounts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_service_accounts"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceServiceAccountsProp)) { + obj["sourceServiceAccounts"] = sourceServiceAccountsProp + } + sourceTagsProp, err := expandComputeFirewallSourceTags(d.Get("source_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceTagsProp)) { + obj["sourceTags"] = sourceTagsProp + } + targetServiceAccountsProp, err := expandComputeFirewallTargetServiceAccounts(d.Get("target_service_accounts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_service_accounts"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetServiceAccountsProp)) { + obj["targetServiceAccounts"] = targetServiceAccountsProp + } + targetTagsProp, err := expandComputeFirewallTargetTags(d.Get("target_tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetTagsProp)) { + obj["targetTags"] = targetTagsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Firewall %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Firewall %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Firewall %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Firewall", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeFirewallRead(d, meta) +} + +func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Firewall: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Firewall %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Firewall") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Firewall", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Firewall %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeFirewallImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/firewalls/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/firewalls/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeFirewallAllow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(resourceComputeFirewallRuleHash, []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "protocol": flattenComputeFirewallAllowProtocol(original["IPProtocol"], d, config), + "ports": flattenComputeFirewallAllowPorts(original["ports"], d, config), + }) + } + return transformed +} +func flattenComputeFirewallAllowProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallAllowPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallDeny(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(resourceComputeFirewallRuleHash, []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "protocol": flattenComputeFirewallDenyProtocol(original["IPProtocol"], d, config), + "ports": flattenComputeFirewallDenyPorts(original["ports"], d, config), + }) + } + return transformed +} +func flattenComputeFirewallDenyProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallDenyPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallDestinationRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeFirewallDirection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + + v, ok := original["enable"] + if ok && !v.(bool) { + return nil + } + + transformed := make(map[string]interface{}) + transformed["metadata"] = original["metadata"] + return []interface{}{transformed} +} + +func flattenComputeFirewallName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeFirewallNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeFirewallPriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeFirewallSourceRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeFirewallSourceServiceAccounts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeFirewallSourceTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeFirewallTargetServiceAccounts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeFirewallTargetTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func expandComputeFirewallAllow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProtocol, err := expandComputeFirewallAllowProtocol(original["protocol"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProtocol); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["IPProtocol"] = transformedProtocol + } + + transformedPorts, err := expandComputeFirewallAllowPorts(original["ports"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ports"] = transformedPorts + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeFirewallAllowProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallAllowPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallDeny(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProtocol, err := expandComputeFirewallDenyProtocol(original["protocol"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProtocol); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["IPProtocol"] = transformedProtocol + } + + transformedPorts, err := expandComputeFirewallDenyPorts(original["ports"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ports"] = transformedPorts + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeFirewallDenyProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallDenyPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallDestinationRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeFirewallDirection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallLogConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + transformed := make(map[string]interface{}) + + if len(l) == 0 || l[0] == nil { + // send enable = enable_logging value to ensure correct logging status if there is no config + transformed["enable"] = d.Get("enable_logging").(bool) + return transformed, nil + } + + raw := l[0] + original := raw.(map[string]interface{}) + + // The log_config block is specified, so logging should be enabled + transformed["enable"] = true + transformed["metadata"] = original["metadata"] + + return transformed, nil +} + +func expandComputeFirewallName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeFirewallPriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeFirewallSourceRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeFirewallSourceServiceAccounts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeFirewallSourceTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeFirewallTargetServiceAccounts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeFirewallTargetTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_migrate.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_migrate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_migrate.go index d228813324..8fb2e780fc 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_migrate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_migrate.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy.go index e8e1cfecf3..55d00b7dac 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeFirewallPolicy() *schema.Resource { @@ -49,7 +56,7 @@ func ResourceComputeFirewallPolicy() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The parent of the firewall policy.", }, @@ -112,7 +119,7 @@ func ResourceComputeFirewallPolicy() *schema.Resource { } func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &compute.FirewallPolicy{ Parent: dcl.String(d.Get("parent").(string)), @@ -125,18 +132,18 @@ func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -169,7 +176,7 @@ func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{ } func resourceComputeFirewallPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &compute.FirewallPolicy{ Parent: dcl.String(d.Get("parent").(string)), @@ -178,17 +185,17 @@ func resourceComputeFirewallPolicyRead(d *schema.ResourceData, meta interface{}) Name: dcl.StringOrNil(d.Get("name").(string)), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -197,7 +204,7 @@ func resourceComputeFirewallPolicyRead(d *schema.ResourceData, meta interface{}) res, err := client.GetFirewallPolicy(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ComputeFirewallPolicy %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("parent", res.Parent); err != nil { @@ -234,7 +241,7 @@ func resourceComputeFirewallPolicyRead(d *schema.ResourceData, meta interface{}) return nil } func resourceComputeFirewallPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &compute.FirewallPolicy{ Parent: dcl.String(d.Get("parent").(string)), @@ -242,19 +249,19 @@ func resourceComputeFirewallPolicyUpdate(d *schema.ResourceData, meta interface{ Description: dcl.String(d.Get("description").(string)), Name: dcl.StringOrNil(d.Get("name").(string)), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -276,7 +283,7 @@ func resourceComputeFirewallPolicyUpdate(d *schema.ResourceData, meta interface{ } func resourceComputeFirewallPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &compute.FirewallPolicy{ Parent: dcl.String(d.Get("parent").(string)), @@ -286,17 +293,17 @@ func resourceComputeFirewallPolicyDelete(d *schema.ResourceData, meta interface{ } log.Printf("[DEBUG] Deleting FirewallPolicy %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -311,9 +318,9 @@ func resourceComputeFirewallPolicyDelete(d *schema.ResourceData, meta interface{ } func resourceComputeFirewallPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "locations/global/firewallPolicies/(?P[^/]+)", "(?P[^/]+)", }, d, config); err != nil { @@ -321,7 +328,7 @@ func resourceComputeFirewallPolicyImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "locations/global/firewallPolicies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy_association.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy_association.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy_association.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy_association.go index 8ef110972f..9bd518717c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_firewall_policy_association.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy_association.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeFirewallPolicyAssociation() *schema.Resource { @@ -47,7 +54,7 @@ func ResourceComputeFirewallPolicyAssociation() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The target that the firewall policy is attached to.", }, @@ -55,7 +62,7 @@ func ResourceComputeFirewallPolicyAssociation() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The firewall policy ID of the association.", }, @@ -76,7 +83,7 @@ func ResourceComputeFirewallPolicyAssociation() *schema.Resource { } func resourceComputeFirewallPolicyAssociationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &compute.FirewallPolicyAssociation{ AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), @@ -89,18 +96,18 @@ func resourceComputeFirewallPolicyAssociationCreate(d *schema.ResourceData, meta return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -122,7 +129,7 @@ func resourceComputeFirewallPolicyAssociationCreate(d *schema.ResourceData, meta } func resourceComputeFirewallPolicyAssociationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &compute.FirewallPolicyAssociation{ AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), @@ -130,17 +137,17 @@ func resourceComputeFirewallPolicyAssociationRead(d *schema.ResourceData, meta i Name: dcl.String(d.Get("name").(string)), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -149,7 +156,7 @@ func resourceComputeFirewallPolicyAssociationRead(d *schema.ResourceData, meta i res, err := client.GetFirewallPolicyAssociation(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ComputeFirewallPolicyAssociation %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("attachment_target", res.AttachmentTarget); err != nil { @@ -169,7 +176,7 @@ func resourceComputeFirewallPolicyAssociationRead(d *schema.ResourceData, meta i } func resourceComputeFirewallPolicyAssociationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &compute.FirewallPolicyAssociation{ AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), @@ -178,17 +185,17 @@ func resourceComputeFirewallPolicyAssociationDelete(d *schema.ResourceData, meta } log.Printf("[DEBUG] Deleting FirewallPolicyAssociation %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -203,9 +210,9 @@ func resourceComputeFirewallPolicyAssociationDelete(d *schema.ResourceData, meta } func resourceComputeFirewallPolicyAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "locations/global/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", }, d, config); err != nil { @@ -213,7 +220,7 @@ func resourceComputeFirewallPolicyAssociationImport(d *schema.ResourceData, meta } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy_rule.go new file mode 100644 index 0000000000..f6dd7c060f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_policy_rule.go @@ -0,0 +1,580 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeFirewallPolicyRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFirewallPolicyRuleCreate, + Read: resourceComputeFirewallPolicyRuleRead, + Update: resourceComputeFirewallPolicyRuleUpdate, + Delete: resourceComputeFirewallPolicyRuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeFirewallPolicyRuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + Description: "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".", + }, + + "direction": { + Type: schema.TypeString, + Required: true, + Description: "The direction in which this rule applies. Possible values: INGRESS, EGRESS", + }, + + "firewall_policy": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The firewall policy of the resource.", + }, + + "match": { + Type: schema.TypeList, + Required: true, + Description: "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced.", + MaxItems: 1, + Elem: ComputeFirewallPolicyRuleMatchSchema(), + }, + + "priority": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "An optional description for this resource.", + }, + + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", + }, + + "enable_logging": { + Type: schema.TypeBool, + Optional: true, + Description: "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", + }, + + "target_resources": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "target_service_accounts": { + Type: schema.TypeList, + Optional: true, + Description: "A list of service accounts indicating the sets of instances that are applied with this rule.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "kind": { + Type: schema.TypeString, + Computed: true, + Description: "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", + }, + + "rule_tuple_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Calculation of the complexity of a single firewall policy rule.", + }, + }, + } +} + +func ComputeFirewallPolicyRuleMatchSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "layer4_configs": { + Type: schema.TypeList, + Required: true, + Description: "Pairs of IP protocols and ports that the rule should match.", + Elem: ComputeFirewallPolicyRuleMatchLayer4ConfigsSchema(), + }, + + "dest_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dest_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dest_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dest_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dest_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: "Name of the Google Cloud Threat Intelligence list.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: "Name of the Google Cloud Threat Intelligence list.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ComputeFirewallPolicyRuleMatchLayer4ConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_protocol": { + Type: schema.TypeString, + Required: true, + Description: "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", + }, + + "ports": { + Type: schema.TypeList, + Optional: true, + Description: "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceComputeFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &compute.FirewallPolicyRule{ + Action: dcl.String(d.Get("action").(string)), + Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), + Priority: dcl.Int64(int64(d.Get("priority").(int))), + Description: dcl.String(d.Get("description").(string)), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), + TargetResources: tpgdclresource.ExpandStringArray(d.Get("target_resources")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating FirewallPolicyRule: %s", err) + } + + log.Printf("[DEBUG] Finished creating FirewallPolicyRule %q: %#v", d.Id(), res) + + return resourceComputeFirewallPolicyRuleRead(d, meta) +} + +func resourceComputeFirewallPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &compute.FirewallPolicyRule{ + Action: dcl.String(d.Get("action").(string)), + Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), + Priority: dcl.Int64(int64(d.Get("priority").(int))), + Description: dcl.String(d.Get("description").(string)), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), + TargetResources: tpgdclresource.ExpandStringArray(d.Get("target_resources")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetFirewallPolicyRule(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ComputeFirewallPolicyRule %q", d.Id()) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("action", res.Action); err != nil { + return fmt.Errorf("error setting action in state: %s", err) + } + if err = d.Set("direction", res.Direction); err != nil { + return fmt.Errorf("error setting direction in state: %s", err) + } + if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { + return fmt.Errorf("error setting firewall_policy in state: %s", err) + } + if err = d.Set("match", flattenComputeFirewallPolicyRuleMatch(res.Match)); err != nil { + return fmt.Errorf("error setting match in state: %s", err) + } + if err = d.Set("priority", res.Priority); err != nil { + return fmt.Errorf("error setting priority in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("disabled", res.Disabled); err != nil { + return fmt.Errorf("error setting disabled in state: %s", err) + } + if err = d.Set("enable_logging", res.EnableLogging); err != nil { + return fmt.Errorf("error setting enable_logging in state: %s", err) + } + if err = d.Set("target_resources", res.TargetResources); err != nil { + return fmt.Errorf("error setting target_resources in state: %s", err) + } + if err = d.Set("target_service_accounts", res.TargetServiceAccounts); err != nil { + return fmt.Errorf("error setting target_service_accounts in state: %s", err) + } + if err = d.Set("kind", res.Kind); err != nil { + return fmt.Errorf("error setting kind in state: %s", err) + } + if err = d.Set("rule_tuple_count", res.RuleTupleCount); err != nil { + return fmt.Errorf("error setting rule_tuple_count in state: %s", err) + } + + return nil +} +func resourceComputeFirewallPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &compute.FirewallPolicyRule{ + Action: dcl.String(d.Get("action").(string)), + Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), + Priority: dcl.Int64(int64(d.Get("priority").(int))), + Description: dcl.String(d.Get("description").(string)), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), + TargetResources: tpgdclresource.ExpandStringArray(d.Get("target_resources")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating FirewallPolicyRule: %s", err) + } + + log.Printf("[DEBUG] Finished creating FirewallPolicyRule %q: %#v", d.Id(), res) + + return resourceComputeFirewallPolicyRuleRead(d, meta) +} + +func resourceComputeFirewallPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + obj := &compute.FirewallPolicyRule{ + Action: dcl.String(d.Get("action").(string)), + Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), + Priority: dcl.Int64(int64(d.Get("priority").(int))), + Description: dcl.String(d.Get("description").(string)), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), + TargetResources: tpgdclresource.ExpandStringArray(d.Get("target_resources")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), + } + + log.Printf("[DEBUG] Deleting FirewallPolicyRule %q", d.Id()) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteFirewallPolicyRule(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting FirewallPolicyRule: %s", err) + } + + log.Printf("[DEBUG] Finished deleting FirewallPolicyRule %q", d.Id()) + return nil +} + +func resourceComputeFirewallPolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "locations/global/firewallPolicies/(?P[^/]+)/rules/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandComputeFirewallPolicyRuleMatch(o interface{}) *compute.FirewallPolicyRuleMatch { + if o == nil { + return compute.EmptyFirewallPolicyRuleMatch + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return compute.EmptyFirewallPolicyRuleMatch + } + obj := objArr[0].(map[string]interface{}) + return &compute.FirewallPolicyRuleMatch{ + Layer4Configs: expandComputeFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), + DestAddressGroups: tpgdclresource.ExpandStringArray(obj["dest_address_groups"]), + DestFqdns: tpgdclresource.ExpandStringArray(obj["dest_fqdns"]), + DestIPRanges: tpgdclresource.ExpandStringArray(obj["dest_ip_ranges"]), + DestRegionCodes: tpgdclresource.ExpandStringArray(obj["dest_region_codes"]), + DestThreatIntelligences: tpgdclresource.ExpandStringArray(obj["dest_threat_intelligences"]), + SrcAddressGroups: tpgdclresource.ExpandStringArray(obj["src_address_groups"]), + SrcFqdns: tpgdclresource.ExpandStringArray(obj["src_fqdns"]), + SrcIPRanges: tpgdclresource.ExpandStringArray(obj["src_ip_ranges"]), + SrcRegionCodes: tpgdclresource.ExpandStringArray(obj["src_region_codes"]), + SrcThreatIntelligences: tpgdclresource.ExpandStringArray(obj["src_threat_intelligences"]), + } +} + +func flattenComputeFirewallPolicyRuleMatch(obj *compute.FirewallPolicyRuleMatch) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "layer4_configs": flattenComputeFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), + "dest_address_groups": obj.DestAddressGroups, + "dest_fqdns": obj.DestFqdns, + "dest_ip_ranges": obj.DestIPRanges, + "dest_region_codes": obj.DestRegionCodes, + "dest_threat_intelligences": obj.DestThreatIntelligences, + "src_address_groups": obj.SrcAddressGroups, + "src_fqdns": obj.SrcFqdns, + "src_ip_ranges": obj.SrcIPRanges, + "src_region_codes": obj.SrcRegionCodes, + "src_threat_intelligences": obj.SrcThreatIntelligences, + } + + return []interface{}{transformed} + +} +func expandComputeFirewallPolicyRuleMatchLayer4ConfigsArray(o interface{}) []compute.FirewallPolicyRuleMatchLayer4Configs { + if o == nil { + return make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 || objs[0] == nil { + return make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0) + } + + items := make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0, len(objs)) + for _, item := range objs { + i := expandComputeFirewallPolicyRuleMatchLayer4Configs(item) + items = append(items, *i) + } + + return items +} + +func expandComputeFirewallPolicyRuleMatchLayer4Configs(o interface{}) *compute.FirewallPolicyRuleMatchLayer4Configs { + if o == nil { + return compute.EmptyFirewallPolicyRuleMatchLayer4Configs + } + + obj := o.(map[string]interface{}) + return &compute.FirewallPolicyRuleMatchLayer4Configs{ + IPProtocol: dcl.String(obj["ip_protocol"].(string)), + Ports: tpgdclresource.ExpandStringArray(obj["ports"]), + } +} + +func flattenComputeFirewallPolicyRuleMatchLayer4ConfigsArray(objs []compute.FirewallPolicyRuleMatchLayer4Configs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenComputeFirewallPolicyRuleMatchLayer4Configs(&item) + items = append(items, i) + } + + return items +} + +func flattenComputeFirewallPolicyRuleMatchLayer4Configs(obj *compute.FirewallPolicyRuleMatchLayer4Configs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ip_protocol": obj.IPProtocol, + "ports": obj.Ports, + } + + return transformed + +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_sweeper.go new file mode 100644 index 0000000000..e801efe9ad --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_firewall_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeFirewall", testSweepComputeFirewall) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeFirewall(region string) error { + resourceName := "ComputeFirewall" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/firewalls", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/firewalls/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule.go new file mode 100644 index 0000000000..6d3bc5147d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule.go @@ -0,0 +1,1420 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeForwardingRuleCreate, + Read: resourceComputeForwardingRuleRead, + Update: resourceComputeForwardingRuleUpdate, + Delete: resourceComputeForwardingRuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeForwardingRuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource; provided by the client when the resource is created. +The name must be 1-63 characters long, and comply with +[RFC1035](https://www.ietf.org/rfc/rfc1035.txt). + +Specifically, the name must be 1-63 characters long and match the regular +expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first +character must be a lowercase letter, and all following characters must +be a dash, lowercase letter, or digit, except the last character, which +cannot be a dash. + +For Private Service Connect forwarding rules that forward traffic to Google +APIs, the forwarding rule name must be a 1-20 characters string with +lowercase letters and numbers and must start with a letter.`, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.InternalIpDiffSuppress, + Description: `IP address for which this forwarding rule accepts traffic. When a client +sends traffic to this IP address, the forwarding rule directs the traffic +to the referenced 'target' or 'backendService'. + +While creating a forwarding rule, specifying an 'IPAddress' is +required under the following circumstances: + +* When the 'target' is set to 'targetGrpcProxy' and +'validateForProxyless' is set to 'true', the +'IPAddress' should be set to '0.0.0.0'. +* When the 'target' is a Private Service Connect Google APIs +bundle, you must specify an 'IPAddress'. + + +Otherwise, you can optionally specify an IP address that references an +existing static (reserved) IP address resource. When omitted, Google Cloud +assigns an ephemeral IP address. + +Use one of the following formats to specify an IP address while creating a +forwarding rule: + +* IP address number, as in '100.1.2.3' +* IPv6 address range, as in '2600:1234::/96' +* Full resource URL, as in +'https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name' +* Partial URL or by name, as in: + * 'projects/project_id/regions/region/addresses/address-name' + * 'regions/region/addresses/address-name' + * 'global/addresses/address-name' + * 'address-name' + + +The forwarding rule's 'target' or 'backendService', +and in most cases, also the 'loadBalancingScheme', determine the +type of IP address that you can use. For detailed information, see +[IP address +specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + +When reading an 'IPAddress', the API always returns the IP +address number.`, + }, + "ip_protocol": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"TCP", "UDP", "ESP", "AH", "SCTP", "ICMP", "L3_DEFAULT", ""}), + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: `The IP protocol to which this rule applies. + +For protocol forwarding, valid +options are 'TCP', 'UDP', 'ESP', +'AH', 'SCTP', 'ICMP' and +'L3_DEFAULT'. + +The valid IP protocols are different for different load balancing products +as described in [Load balancing +features](https://cloud.google.com/load-balancing/docs/features#protocols_from_the_load_balancer_to_the_backends). Possible values: ["TCP", "UDP", "ESP", "AH", "SCTP", "ICMP", "L3_DEFAULT"]`, + }, + "all_ports": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `This field can only be used: +* If 'IPProtocol' is one of TCP, UDP, or SCTP. +* By internal TCP/UDP load balancers, backend service-based network load +balancers, and internal and external protocol forwarding. + + +Set this field to true to allow packets addressed to any port or packets +lacking destination port information (for example, UDP fragments after the +first fragment) to be forwarded to the backends configured with this +forwarding rule. + +The 'ports', 'port_range', and +'allPorts' fields are mutually exclusive.`, + }, + "allow_global_access": { + Type: schema.TypeBool, + Optional: true, + Description: `This field is used along with the 'backend_service' field for +internal load balancing or with the 'target' field for internal +TargetInstance. + +If the field is set to 'TRUE', clients can access ILB from all +regions. + +Otherwise only allows access from clients in the same region as the +internal load balancer.`, + }, + "allow_psc_global_access": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `This is used in PSC consumer ForwardingRule to control whether the PSC endpoint can be accessed from another region.`, + }, + "backend_service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the backend service to which the forwarding rule sends traffic. + +Required for Internal TCP/UDP Load Balancing and Network Load Balancing; +must be omitted for all other load balancer types.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "is_mirroring_collector": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates whether or not this load balancer can be used as a collector for +packet mirroring. To prevent mirroring loops, instances behind this +load balancer will not have their traffic mirrored even if a +'PacketMirroring' rule applies to them. + +This can only be set to true for load balancers that have their +'loadBalancingScheme' set to 'INTERNAL'.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this forwarding rule. A list of key->value pairs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "load_balancing_scheme": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED", ""}), + Description: `Specifies the forwarding rule type. + +For more information about forwarding rules, refer to +[Forwarding rule concepts](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts). Default value: "EXTERNAL" Possible values: ["EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED"]`, + Default: "EXTERNAL", + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `This field is not used for external load balancing. + +For Internal TCP/UDP Load Balancing, this field identifies the network that +the load balanced IP should belong to for this Forwarding Rule. +If the subnetwork is specified, the network of the subnetwork will be used. +If neither subnetwork nor this field is specified, the default network will +be used. + +For Private Service Connect forwarding rules that forward traffic to Google +APIs, a network must be provided.`, + }, + "network_tier": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"PREMIUM", "STANDARD", ""}), + Description: `This signifies the networking tier used for configuring +this load balancer and can only take the following values: +'PREMIUM', 'STANDARD'. + +For regional ForwardingRule, the valid values are 'PREMIUM' and +'STANDARD'. For GlobalForwardingRule, the valid value is +'PREMIUM'. + +If this field is not specified, it is assumed to be 'PREMIUM'. +If 'IPAddress' is specified, this value must be equal to the +networkTier of the Address. Possible values: ["PREMIUM", "STANDARD"]`, + }, + "no_automate_dns_zone": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.`, + }, + "port_range": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.PortRangeDiffSuppress, + Description: `This field can only be used: + +* If 'IPProtocol' is one of TCP, UDP, or SCTP. +* By backend service-based network load balancers, target pool-based +network load balancers, internal proxy load balancers, external proxy load +balancers, Traffic Director, external protocol forwarding, and Classic VPN. +Some products have restrictions on what ports can be used. See +[port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) +for details. + + +Only packets addressed to ports in the specified range will be forwarded to +the backends configured with this forwarding rule. + +The 'ports' and 'port_range' fields are mutually exclusive. + +For external forwarding rules, two or more forwarding rules cannot use the +same '[IPAddress, IPProtocol]' pair, and cannot have +overlapping 'portRange's. + +For internal forwarding rules within the same VPC network, two or more +forwarding rules cannot use the same '[IPAddress, IPProtocol]' +pair, and cannot have overlapping 'portRange's.`, + }, + "ports": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: `This field can only be used: + +* If 'IPProtocol' is one of TCP, UDP, or SCTP. +* By internal TCP/UDP load balancers, backend service-based network load +balancers, and internal protocol forwarding. + + +You can specify a list of up to five ports by number, separated by commas. +The ports can be contiguous or discontiguous. Only packets addressed to +these ports will be forwarded to the backends configured with this +forwarding rule. + +For external forwarding rules, two or more forwarding rules cannot use the +same '[IPAddress, IPProtocol]' pair, and cannot share any values +defined in 'ports'. + +For internal forwarding rules within the same VPC network, two or more +forwarding rules cannot use the same '[IPAddress, IPProtocol]' +pair, and cannot share any values defined in 'ports'. + +The 'ports' and 'port_range' fields are mutually exclusive.`, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the region where the regional forwarding rule resides. + +This field is not applicable to global forwarding rules.`, + }, + "service_directory_registrations": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Service Directory resources to register this forwarding rule with. + +Currently, only supports a single Service Directory resource.`, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespace": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Service Directory namespace to register the forwarding rule under.`, + }, + "service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Service Directory service to register the forwarding rule under.`, + }, + }, + }, + }, + "service_label": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `An optional prefix to the service name for this Forwarding Rule. +If specified, will be the first label of the fully qualified service +name. + +The label must be 1-63 characters long, and comply with RFC1035. +Specifically, the label must be 1-63 characters long and match the +regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first +character must be a lowercase letter, and all following characters +must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash. + +This field is only used for INTERNAL load balancing.`, + }, + "source_ip_ranges": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each sourceIpRange entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subnetwork": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `This field identifies the subnetwork that the load balanced IP should +belong to for this Forwarding Rule, used in internal load balancing and +network load balancing with IPv6. + +If the network specified is in auto subnet mode, this field is optional. +However, a subnetwork must be specified if the network is in custom subnet +mode or when creating external forwarding rule with IPv6.`, + }, + "target": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The URL of the target resource to receive the matched traffic. For +regional forwarding rules, this target must be in the same region as the +forwarding rule. For global forwarding rules, this target must be a global +load balancing resource. + +The forwarded traffic must be of a type appropriate to the target object. +* For load balancers, see the "Target" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). +* For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: + * 'vpc-sc' - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). + * 'all-apis' - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). + + +For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment.`, + }, + "base_forwarding_rule": { + Type: schema.TypeString, + Computed: true, + Description: `[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint used for optimistic locking of this resource. Used +internally during updates.`, + }, + "psc_connection_id": { + Type: schema.TypeString, + Computed: true, + Description: `The PSC connection id of the PSC Forwarding Rule.`, + }, + "psc_connection_status": { + Type: schema.TypeString, + Computed: true, + Description: `The PSC connection status of the PSC Forwarding Rule. Possible values: 'STATUS_UNSPECIFIED', 'PENDING', 'ACCEPTED', 'REJECTED', 'CLOSED'`, + }, + "service_name": { + Type: schema.TypeString, + Computed: true, + Description: `The internal fully qualified service name for this Forwarding Rule. + +This field is only used for INTERNAL load balancing.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + isMirroringCollectorProp, err := expandComputeForwardingRuleIsMirroringCollector(d.Get("is_mirroring_collector"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_mirroring_collector"); !tpgresource.IsEmptyValue(reflect.ValueOf(isMirroringCollectorProp)) && (ok || !reflect.DeepEqual(v, isMirroringCollectorProp)) { + obj["isMirroringCollector"] = isMirroringCollectorProp + } + descriptionProp, err := expandComputeForwardingRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + IPAddressProp, err := expandComputeForwardingRuleIPAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(IPAddressProp)) && (ok || !reflect.DeepEqual(v, IPAddressProp)) { + obj["IPAddress"] = IPAddressProp + } + IPProtocolProp, err := expandComputeForwardingRuleIPProtocol(d.Get("ip_protocol"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(IPProtocolProp)) && (ok || !reflect.DeepEqual(v, IPProtocolProp)) { + obj["IPProtocol"] = IPProtocolProp + } + backendServiceProp, err := expandComputeForwardingRuleBackendService(d.Get("backend_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(backendServiceProp)) && (ok || !reflect.DeepEqual(v, backendServiceProp)) { + obj["backendService"] = backendServiceProp + } + loadBalancingSchemeProp, err := expandComputeForwardingRuleLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("load_balancing_scheme"); !tpgresource.IsEmptyValue(reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { + obj["loadBalancingScheme"] = loadBalancingSchemeProp + } + nameProp, err := expandComputeForwardingRuleName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandComputeForwardingRuleNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + portRangeProp, err := expandComputeForwardingRulePortRange(d.Get("port_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(portRangeProp)) && (ok || !reflect.DeepEqual(v, portRangeProp)) { + obj["portRange"] = portRangeProp + } + portsProp, err := expandComputeForwardingRulePorts(d.Get("ports"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ports"); !tpgresource.IsEmptyValue(reflect.ValueOf(portsProp)) && (ok || !reflect.DeepEqual(v, portsProp)) { + obj["ports"] = portsProp + } + subnetworkProp, err := expandComputeForwardingRuleSubnetwork(d.Get("subnetwork"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetwork"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { + obj["subnetwork"] = subnetworkProp + } + targetProp, err := expandComputeForwardingRuleTarget(d.Get("target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { + obj["target"] = targetProp + } + allowGlobalAccessProp, err := expandComputeForwardingRuleAllowGlobalAccess(d.Get("allow_global_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_global_access"); ok || !reflect.DeepEqual(v, allowGlobalAccessProp) { + obj["allowGlobalAccess"] = allowGlobalAccessProp + } + labelsProp, err := expandComputeForwardingRuleLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeForwardingRuleLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + allPortsProp, err := expandComputeForwardingRuleAllPorts(d.Get("all_ports"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("all_ports"); !tpgresource.IsEmptyValue(reflect.ValueOf(allPortsProp)) && (ok || !reflect.DeepEqual(v, allPortsProp)) { + obj["allPorts"] = allPortsProp + } + networkTierProp, err := expandComputeForwardingRuleNetworkTier(d.Get("network_tier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkTierProp)) && (ok || !reflect.DeepEqual(v, networkTierProp)) { + obj["networkTier"] = networkTierProp + } + serviceDirectoryRegistrationsProp, err := expandComputeForwardingRuleServiceDirectoryRegistrations(d.Get("service_directory_registrations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_directory_registrations"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceDirectoryRegistrationsProp)) && (ok || !reflect.DeepEqual(v, serviceDirectoryRegistrationsProp)) { + obj["serviceDirectoryRegistrations"] = serviceDirectoryRegistrationsProp + } + serviceLabelProp, err := expandComputeForwardingRuleServiceLabel(d.Get("service_label"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_label"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceLabelProp)) && (ok || !reflect.DeepEqual(v, serviceLabelProp)) { + obj["serviceLabel"] = serviceLabelProp + } + sourceIpRangesProp, err := expandComputeForwardingRuleSourceIpRanges(d.Get("source_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_ip_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceIpRangesProp)) && (ok || !reflect.DeepEqual(v, sourceIpRangesProp)) { + obj["sourceIpRanges"] = sourceIpRangesProp + } + allowPscGlobalAccessProp, err := expandComputeForwardingRuleAllowPscGlobalAccess(d.Get("allow_psc_global_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_psc_global_access"); ok || !reflect.DeepEqual(v, allowPscGlobalAccessProp) { + obj["allowPscGlobalAccess"] = allowPscGlobalAccessProp + } + noAutomateDnsZoneProp, err := expandComputeForwardingRuleNoAutomateDnsZone(d.Get("no_automate_dns_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("no_automate_dns_zone"); ok || !reflect.DeepEqual(v, noAutomateDnsZoneProp) { + obj["noAutomateDnsZone"] = noAutomateDnsZoneProp + } + regionProp, err := expandComputeForwardingRuleRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + log.Printf("[DEBUG] Creating new ForwardingRule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ForwardingRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ForwardingRule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating ForwardingRule", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ForwardingRule: %s", err) + } + + if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + // Labels cannot be set in a create. We'll have to set them here. + err = resourceComputeForwardingRuleRead(d, meta) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + // d.Get("labels") will have been overridden by the Read call. + labelsProp, err := expandComputeForwardingRuleLabels(v, d, config) + if err != nil { + return err + } + obj["labels"] = labelsProp + labelFingerprintProp := d.Get("label_fingerprint") + obj["labelFingerprint"] = labelFingerprintProp + + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels") + if err != nil { + return err + } + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return fmt.Errorf("Error adding labels to ComputeForwardingRule %q: %s", d.Id(), err) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ComputeForwardingRule Labels", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + } + + log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ForwardingRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeForwardingRule %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeForwardingRuleCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("is_mirroring_collector", flattenComputeForwardingRuleIsMirroringCollector(res["isMirroringCollector"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("psc_connection_id", flattenComputeForwardingRulePscConnectionId(res["pscConnectionId"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("psc_connection_status", flattenComputeForwardingRulePscConnectionStatus(res["pscConnectionStatus"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("description", flattenComputeForwardingRuleDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("ip_address", flattenComputeForwardingRuleIPAddress(res["IPAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("ip_protocol", flattenComputeForwardingRuleIPProtocol(res["IPProtocol"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("backend_service", flattenComputeForwardingRuleBackendService(res["backendService"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("load_balancing_scheme", flattenComputeForwardingRuleLoadBalancingScheme(res["loadBalancingScheme"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("name", flattenComputeForwardingRuleName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("network", flattenComputeForwardingRuleNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("port_range", flattenComputeForwardingRulePortRange(res["portRange"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("ports", flattenComputeForwardingRulePorts(res["ports"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("subnetwork", flattenComputeForwardingRuleSubnetwork(res["subnetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("target", flattenComputeForwardingRuleTarget(res["target"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("allow_global_access", flattenComputeForwardingRuleAllowGlobalAccess(res["allowGlobalAccess"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("labels", flattenComputeForwardingRuleLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("label_fingerprint", flattenComputeForwardingRuleLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("all_ports", flattenComputeForwardingRuleAllPorts(res["allPorts"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("network_tier", flattenComputeForwardingRuleNetworkTier(res["networkTier"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("service_directory_registrations", flattenComputeForwardingRuleServiceDirectoryRegistrations(res["serviceDirectoryRegistrations"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("service_label", flattenComputeForwardingRuleServiceLabel(res["serviceLabel"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("service_name", flattenComputeForwardingRuleServiceName(res["serviceName"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("source_ip_ranges", flattenComputeForwardingRuleSourceIpRanges(res["sourceIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("base_forwarding_rule", flattenComputeForwardingRuleBaseForwardingRule(res["baseForwardingRule"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("allow_psc_global_access", flattenComputeForwardingRuleAllowPscGlobalAccess(res["allowPscGlobalAccess"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("region", flattenComputeForwardingRuleRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + + return nil +} + +func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ForwardingRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + d.Partial(true) + + if d.HasChange("target") { + obj := make(map[string]interface{}) + + targetProp, err := expandComputeForwardingRuleTarget(d.Get("target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { + obj["target"] = targetProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setTarget") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating ForwardingRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ForwardingRule %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ForwardingRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("allow_global_access") { + obj := make(map[string]interface{}) + + allowGlobalAccessProp, err := expandComputeForwardingRuleAllowGlobalAccess(d.Get("allow_global_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_global_access"); ok || !reflect.DeepEqual(v, allowGlobalAccessProp) { + obj["allowGlobalAccess"] = allowGlobalAccessProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating ForwardingRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ForwardingRule %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ForwardingRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("labels") || d.HasChange("label_fingerprint") { + obj := make(map[string]interface{}) + + labelsProp, err := expandComputeForwardingRuleLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeForwardingRuleLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating ForwardingRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ForwardingRule %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ForwardingRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ForwardingRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ForwardingRule %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ForwardingRule") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting ForwardingRule", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ForwardingRule %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeForwardingRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/forwardingRules/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeForwardingRuleCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleIsMirroringCollector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRulePscConnectionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRulePscConnectionStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleIPAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleIPProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeForwardingRuleLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeForwardingRulePortRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRulePorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeForwardingRuleSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeForwardingRuleTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleAllowGlobalAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleLabelFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleAllPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleNetworkTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleServiceDirectoryRegistrations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "namespace": flattenComputeForwardingRuleServiceDirectoryRegistrationsNamespace(original["namespace"], d, config), + "service": flattenComputeForwardingRuleServiceDirectoryRegistrationsService(original["service"], d, config), + }) + } + return transformed +} +func flattenComputeForwardingRuleServiceDirectoryRegistrationsNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleServiceDirectoryRegistrationsService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleServiceLabel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleSourceIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleBaseForwardingRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleAllowPscGlobalAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeForwardingRuleRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeForwardingRuleIsMirroringCollector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleIPAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleIPProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + // This method returns a full self link from a partial self link. + if v == nil || v.(string) == "" { + // It does not try to construct anything from empty. + return "", nil + } else if strings.HasPrefix(v.(string), "https://") { + // Anything that starts with a URL scheme is assumed to be a self link worth using. + return v, nil + } else if strings.HasPrefix(v.(string), "projects/") { + // If the self link references a project, we'll just stuck the compute prefix on it + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + if err != nil { + return "", err + } + return url, nil + } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { + // For regional or zonal resources which include their region or zone, just put the project in front. + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + if err != nil { + return nil, err + } + return url + v.(string), nil + } + // Anything else is assumed to be a regional resource, with a partial link that begins with the resource name. + // This isn't very likely - it's a last-ditch effort to extract something useful here. We can do a better job + // as soon as MultiResourceRefs are working since we'll know the types that this field is supposed to point to. + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/") + if err != nil { + return nil, err + } + return url + v.(string), nil +} + +func expandComputeForwardingRuleLoadBalancingScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeForwardingRulePortRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRulePorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v.(*schema.Set).List(), nil +} + +func expandComputeForwardingRuleSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeForwardingRuleTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + // This method returns a full self link from a partial self link. + if v == nil || v.(string) == "" { + // It does not try to construct anything from empty. + return "", nil + } else if strings.HasPrefix(v.(string), "https://") { + // Anything that starts with a URL scheme is assumed to be a self link worth using. + return v, nil + } else if strings.HasPrefix(v.(string), "projects/") { + // If the self link references a project, we'll just stuck the compute prefix on it + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + if err != nil { + return "", err + } + return url, nil + } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { + // For regional or zonal resources which include their region or zone, just put the project in front. + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + if err != nil { + return nil, err + } + return url + v.(string), nil + } + // Anything else is assumed to be a regional resource, with a partial link that begins with the resource name. + // This isn't very likely - it's a last-ditch effort to extract something useful here. We can do a better job + // as soon as MultiResourceRefs are working since we'll know the types that this field is supposed to point to. + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/") + if err != nil { + return nil, err + } + return url + v.(string), nil +} + +func expandComputeForwardingRuleAllowGlobalAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeForwardingRuleLabelFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleAllPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleNetworkTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleServiceDirectoryRegistrations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNamespace, err := expandComputeForwardingRuleServiceDirectoryRegistrationsNamespace(original["namespace"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespace"] = transformedNamespace + } + + transformedService, err := expandComputeForwardingRuleServiceDirectoryRegistrationsService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeForwardingRuleServiceDirectoryRegistrationsNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleServiceDirectoryRegistrationsService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleServiceLabel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleSourceIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleAllowPscGlobalAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleNoAutomateDnsZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeForwardingRuleRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule_sweeper.go new file mode 100644 index 0000000000..708ba3d89f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_forwarding_rule_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeForwardingRule", testSweepComputeForwardingRule) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeForwardingRule(region string) error { + resourceName := "ComputeForwardingRule" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/forwardingRules", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address.go new file mode 100644 index 0000000000..c0b180a4c9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address.go @@ -0,0 +1,491 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeGlobalAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalAddressCreate, + Read: resourceComputeGlobalAddressRead, + Delete: resourceComputeGlobalAddressDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeGlobalAddressImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The IP address or beginning of the address range represented by this +resource. This can be supplied as an input to reserve a specific +address or omitted to allow GCP to choose a valid one for you.`, + }, + "address_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"EXTERNAL", "INTERNAL", ""}), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("EXTERNAL"), + Description: `The type of the address to reserve. + +* EXTERNAL indicates public/external single IP address. +* INTERNAL indicates internal IP ranges belonging to some network. Default value: "EXTERNAL" Possible values: ["EXTERNAL", "INTERNAL"]`, + Default: "EXTERNAL", + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "ip_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4", "IPV6", ""}), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("IPV4"), + Description: `The IP Version that will be used by this address. The default value is 'IPV4'. Possible values: ["IPV4", "IPV6"]`, + }, + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the network in which to reserve the IP range. The IP range +must be in RFC1918 space. The network cannot be deleted if there are +any reserved IP ranges referring to it. + +This should only be set when using an Internal address.`, + }, + "prefix_length": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The prefix length of the IP range. If not present, it means the +address field is a single IP address. + +This field is not applicable to addresses with addressType=EXTERNAL, +or addressType=INTERNAL when purpose=PRIVATE_SERVICE_CONNECT`, + }, + "purpose": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The purpose of the resource. Possible values include: + +* VPC_PEERING - for peer networks + +* PRIVATE_SERVICE_CONNECT - for ([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) only) Private Service Connect networks`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + addressProp, err := expandComputeGlobalAddressAddress(d.Get("address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("address"); !tpgresource.IsEmptyValue(reflect.ValueOf(addressProp)) && (ok || !reflect.DeepEqual(v, addressProp)) { + obj["address"] = addressProp + } + descriptionProp, err := expandComputeGlobalAddressDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeGlobalAddressName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + ipVersionProp, err := expandComputeGlobalAddressIpVersion(d.Get("ip_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipVersionProp)) && (ok || !reflect.DeepEqual(v, ipVersionProp)) { + obj["ipVersion"] = ipVersionProp + } + prefixLengthProp, err := expandComputeGlobalAddressPrefixLength(d.Get("prefix_length"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("prefix_length"); !tpgresource.IsEmptyValue(reflect.ValueOf(prefixLengthProp)) && (ok || !reflect.DeepEqual(v, prefixLengthProp)) { + obj["prefixLength"] = prefixLengthProp + } + addressTypeProp, err := expandComputeGlobalAddressAddressType(d.Get("address_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("address_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(addressTypeProp)) && (ok || !reflect.DeepEqual(v, addressTypeProp)) { + obj["addressType"] = addressTypeProp + } + purposeProp, err := expandComputeGlobalAddressPurpose(d.Get("purpose"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("purpose"); !tpgresource.IsEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { + obj["purpose"] = purposeProp + } + networkProp, err := expandComputeGlobalAddressNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GlobalAddress: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalAddress: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GlobalAddress: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/addresses/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating GlobalAddress", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create GlobalAddress: %s", err) + } + + log.Printf("[DEBUG] Finished creating GlobalAddress %q: %#v", d.Id(), res) + + return resourceComputeGlobalAddressRead(d, meta) +} + +func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalAddress: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeGlobalAddress %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + + if err := d.Set("address", flattenComputeGlobalAddressAddress(res["address"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeGlobalAddressCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("description", flattenComputeGlobalAddressDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("name", flattenComputeGlobalAddressName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("ip_version", flattenComputeGlobalAddressIpVersion(res["ipVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("prefix_length", flattenComputeGlobalAddressPrefixLength(res["prefixLength"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("address_type", flattenComputeGlobalAddressAddressType(res["addressType"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("purpose", flattenComputeGlobalAddressPurpose(res["purpose"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("network", flattenComputeGlobalAddressNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading GlobalAddress: %s", err) + } + + return nil +} + +func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalAddress: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GlobalAddress %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GlobalAddress") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting GlobalAddress", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GlobalAddress %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeGlobalAddressImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/addresses/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/addresses/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeGlobalAddressAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalAddressCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalAddressDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalAddressName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalAddressIpVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalAddressPrefixLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeGlobalAddressAddressType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalAddressPurpose(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalAddressNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeGlobalAddressAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalAddressDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalAddressName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalAddressIpVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalAddressPrefixLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalAddressAddressType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalAddressPurpose(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalAddressNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address_sweeper.go new file mode 100644 index 0000000000..0f30f43b0d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_address_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeGlobalAddress", testSweepComputeGlobalAddress) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeGlobalAddress(region string) error { + resourceName := "ComputeGlobalAddress" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/addresses", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/addresses/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule.go new file mode 100644 index 0000000000..d9154e7c26 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule.go @@ -0,0 +1,1066 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeGlobalForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalForwardingRuleCreate, + Read: resourceComputeGlobalForwardingRuleRead, + Update: resourceComputeGlobalForwardingRuleUpdate, + Delete: resourceComputeGlobalForwardingRuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeGlobalForwardingRuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource; provided by the client when the resource is created. +The name must be 1-63 characters long, and comply with +[RFC1035](https://www.ietf.org/rfc/rfc1035.txt). + +Specifically, the name must be 1-63 characters long and match the regular +expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first +character must be a lowercase letter, and all following characters must +be a dash, lowercase letter, or digit, except the last character, which +cannot be a dash. + +For Private Service Connect forwarding rules that forward traffic to Google +APIs, the forwarding rule name must be a 1-20 characters string with +lowercase letters and numbers and must start with a letter.`, + }, + "target": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The URL of the target resource to receive the matched traffic. For +regional forwarding rules, this target must be in the same region as the +forwarding rule. For global forwarding rules, this target must be a global +load balancing resource. + +The forwarded traffic must be of a type appropriate to the target object. +* For load balancers, see the "Target" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). +* For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: + * 'vpc-sc' - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). + * 'all-apis' - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). + + +For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment.`, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.InternalIpDiffSuppress, + Description: `IP address for which this forwarding rule accepts traffic. When a client +sends traffic to this IP address, the forwarding rule directs the traffic +to the referenced 'target'. + +While creating a forwarding rule, specifying an 'IPAddress' is +required under the following circumstances: + +* When the 'target' is set to 'targetGrpcProxy' and +'validateForProxyless' is set to 'true', the +'IPAddress' should be set to '0.0.0.0'. +* When the 'target' is a Private Service Connect Google APIs +bundle, you must specify an 'IPAddress'. + + +Otherwise, you can optionally specify an IP address that references an +existing static (reserved) IP address resource. When omitted, Google Cloud +assigns an ephemeral IP address. + +Use one of the following formats to specify an IP address while creating a +forwarding rule: + +* IP address number, as in '100.1.2.3' +* IPv6 address range, as in '2600:1234::/96' +* Full resource URL, as in +'https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name' +* Partial URL or by name, as in: + * 'projects/project_id/regions/region/addresses/address-name' + * 'regions/region/addresses/address-name' + * 'global/addresses/address-name' + * 'address-name' + + +The forwarding rule's 'target', +and in most cases, also the 'loadBalancingScheme', determine the +type of IP address that you can use. For detailed information, see +[IP address +specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + +When reading an 'IPAddress', the API always returns the IP +address number.`, + }, + "ip_protocol": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"TCP", "UDP", "ESP", "AH", "SCTP", "ICMP", ""}), + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: `The IP protocol to which this rule applies. + +For protocol forwarding, valid +options are 'TCP', 'UDP', 'ESP', +'AH', 'SCTP', 'ICMP' and +'L3_DEFAULT'. + +The valid IP protocols are different for different load balancing products +as described in [Load balancing +features](https://cloud.google.com/load-balancing/docs/features#protocols_from_the_load_balancer_to_the_backends). Possible values: ["TCP", "UDP", "ESP", "AH", "SCTP", "ICMP"]`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "ip_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4", "IPV6", ""}), + Description: `The IP Version that will be used by this global forwarding rule. Possible values: ["IPV4", "IPV6"]`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this forwarding rule. A list of key->value pairs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "load_balancing_scheme": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL_SELF_MANAGED", ""}), + Description: `Specifies the forwarding rule type. + +For more information about forwarding rules, refer to +[Forwarding rule concepts](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts). Default value: "EXTERNAL" Possible values: ["EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL_SELF_MANAGED"]`, + Default: "EXTERNAL", + }, + "metadata_filters": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Opaque filter criteria used by Loadbalancer to restrict routing +configuration to a limited set xDS compliant clients. In their xDS +requests to Loadbalancer, xDS clients present node metadata. If a +match takes place, the relevant routing configuration is made available +to those proxies. + +For each metadataFilter in this list, if its filterMatchCriteria is set +to MATCH_ANY, at least one of the filterLabels must match the +corresponding label provided in the metadata. If its filterMatchCriteria +is set to MATCH_ALL, then all of its filterLabels must match with +corresponding labels in the provided metadata. + +metadataFilters specified here can be overridden by those specified in +the UrlMap that this ForwardingRule references. + +metadataFilters only applies to Loadbalancers that have their +loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter_labels": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The list of label value pairs that must match labels in the +provided metadata based on filterMatchCriteria + +This list must not be empty and can have at the most 64 entries.`, + MinItems: 1, + MaxItems: 64, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the metadata label. The length must be between +1 and 1024 characters, inclusive.`, + }, + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The value that the label must match. The value has a maximum +length of 1024 characters.`, + }, + }, + }, + }, + "filter_match_criteria": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MATCH_ANY", "MATCH_ALL"}), + Description: `Specifies how individual filterLabel matches within the list of +filterLabels contribute towards the overall metadataFilter match. + +MATCH_ANY - At least one of the filterLabels must have a matching +label in the provided metadata. +MATCH_ALL - All filterLabels must have matching labels in the +provided metadata. Possible values: ["MATCH_ANY", "MATCH_ALL"]`, + }, + }, + }, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `This field is not used for external load balancing. + +For Internal TCP/UDP Load Balancing, this field identifies the network that +the load balanced IP should belong to for this Forwarding Rule. +If the subnetwork is specified, the network of the subnetwork will be used. +If neither subnetwork nor this field is specified, the default network will +be used. + +For Private Service Connect forwarding rules that forward traffic to Google +APIs, a network must be provided.`, + }, + "no_automate_dns_zone": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.`, + }, + "port_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.PortRangeDiffSuppress, + Description: `This field can only be used: + +* If 'IPProtocol' is one of TCP, UDP, or SCTP. +* By backend service-based network load balancers, target pool-based +network load balancers, internal proxy load balancers, external proxy load +balancers, Traffic Director, external protocol forwarding, and Classic VPN. +Some products have restrictions on what ports can be used. See +[port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) +for details. + + +* TargetHttpProxy: 80, 8080 +* TargetHttpsProxy: 443 +* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, + 1883, 5222 +* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, + 1883, 5222 +* TargetVpnGateway: 500, 4500`, + }, + "source_ip_ranges": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each sourceIpRange entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "base_forwarding_rule": { + Type: schema.TypeString, + Computed: true, + Description: `[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.`, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint used for optimistic locking of this resource. Used +internally during updates.`, + }, + "psc_connection_id": { + Type: schema.TypeString, + Computed: true, + Description: `The PSC connection id of the PSC Forwarding Rule.`, + }, + "psc_connection_status": { + Type: schema.TypeString, + Computed: true, + Description: `The PSC connection status of the PSC Forwarding Rule. Possible values: 'STATUS_UNSPECIFIED', 'PENDING', 'ACCEPTED', 'REJECTED', 'CLOSED'`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeGlobalForwardingRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + IPAddressProp, err := expandComputeGlobalForwardingRuleIPAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(IPAddressProp)) && (ok || !reflect.DeepEqual(v, IPAddressProp)) { + obj["IPAddress"] = IPAddressProp + } + IPProtocolProp, err := expandComputeGlobalForwardingRuleIPProtocol(d.Get("ip_protocol"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(IPProtocolProp)) && (ok || !reflect.DeepEqual(v, IPProtocolProp)) { + obj["IPProtocol"] = IPProtocolProp + } + ipVersionProp, err := expandComputeGlobalForwardingRuleIpVersion(d.Get("ip_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipVersionProp)) && (ok || !reflect.DeepEqual(v, ipVersionProp)) { + obj["ipVersion"] = ipVersionProp + } + labelsProp, err := expandComputeGlobalForwardingRuleLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeGlobalForwardingRuleLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + loadBalancingSchemeProp, err := expandComputeGlobalForwardingRuleLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("load_balancing_scheme"); !tpgresource.IsEmptyValue(reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { + obj["loadBalancingScheme"] = loadBalancingSchemeProp + } + metadataFiltersProp, err := expandComputeGlobalForwardingRuleMetadataFilters(d.Get("metadata_filters"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata_filters"); !tpgresource.IsEmptyValue(reflect.ValueOf(metadataFiltersProp)) && (ok || !reflect.DeepEqual(v, metadataFiltersProp)) { + obj["metadataFilters"] = metadataFiltersProp + } + nameProp, err := expandComputeGlobalForwardingRuleName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandComputeGlobalForwardingRuleNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + portRangeProp, err := expandComputeGlobalForwardingRulePortRange(d.Get("port_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(portRangeProp)) && (ok || !reflect.DeepEqual(v, portRangeProp)) { + obj["portRange"] = portRangeProp + } + targetProp, err := expandComputeGlobalForwardingRuleTarget(d.Get("target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { + obj["target"] = targetProp + } + sourceIpRangesProp, err := expandComputeGlobalForwardingRuleSourceIpRanges(d.Get("source_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_ip_ranges"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceIpRangesProp)) && (ok || !reflect.DeepEqual(v, sourceIpRangesProp)) { + obj["sourceIpRanges"] = sourceIpRangesProp + } + noAutomateDnsZoneProp, err := expandComputeGlobalForwardingRuleNoAutomateDnsZone(d.Get("no_automate_dns_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("no_automate_dns_zone"); ok || !reflect.DeepEqual(v, noAutomateDnsZoneProp) { + obj["noAutomateDnsZone"] = noAutomateDnsZoneProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + log.Printf("[DEBUG] Creating new GlobalForwardingRule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalForwardingRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GlobalForwardingRule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating GlobalForwardingRule", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create GlobalForwardingRule: %s", err) + } + + if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + // Labels cannot be set in a create. We'll have to set them here. + err = resourceComputeGlobalForwardingRuleRead(d, meta) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + // d.Get("labels") will have been overridden by the Read call. + labelsProp, err := expandComputeGlobalForwardingRuleLabels(v, d, config) + if err != nil { + return err + } + obj["labels"] = labelsProp + labelFingerprintProp := d.Get("label_fingerprint") + obj["labelFingerprint"] = labelFingerprintProp + + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules/{{name}}/setLabels") + if err != nil { + return err + } + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return fmt.Errorf("Error adding labels to ComputeGlobalForwardingRule %q: %s", d.Id(), err) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ComputeGlobalForwardingRule Labels", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + } + + log.Printf("[DEBUG] Finished creating GlobalForwardingRule %q: %#v", d.Id(), res) + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} + +func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalForwardingRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeGlobalForwardingRule %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + + if err := d.Set("psc_connection_id", flattenComputeGlobalForwardingRulePscConnectionId(res["pscConnectionId"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("psc_connection_status", flattenComputeGlobalForwardingRulePscConnectionStatus(res["pscConnectionStatus"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("description", flattenComputeGlobalForwardingRuleDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("ip_address", flattenComputeGlobalForwardingRuleIPAddress(res["IPAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("ip_protocol", flattenComputeGlobalForwardingRuleIPProtocol(res["IPProtocol"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("ip_version", flattenComputeGlobalForwardingRuleIpVersion(res["ipVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("labels", flattenComputeGlobalForwardingRuleLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("label_fingerprint", flattenComputeGlobalForwardingRuleLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("load_balancing_scheme", flattenComputeGlobalForwardingRuleLoadBalancingScheme(res["loadBalancingScheme"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("metadata_filters", flattenComputeGlobalForwardingRuleMetadataFilters(res["metadataFilters"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("name", flattenComputeGlobalForwardingRuleName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("network", flattenComputeGlobalForwardingRuleNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("port_range", flattenComputeGlobalForwardingRulePortRange(res["portRange"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("target", flattenComputeGlobalForwardingRuleTarget(res["target"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("source_ip_ranges", flattenComputeGlobalForwardingRuleSourceIpRanges(res["sourceIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("base_forwarding_rule", flattenComputeGlobalForwardingRuleBaseForwardingRule(res["baseForwardingRule"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + + return nil +} + +func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalForwardingRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + d.Partial(true) + + if d.HasChange("labels") || d.HasChange("label_fingerprint") { + obj := make(map[string]interface{}) + + labelsProp, err := expandComputeGlobalForwardingRuleLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeGlobalForwardingRuleLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules/{{name}}/setLabels") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating GlobalForwardingRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GlobalForwardingRule %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating GlobalForwardingRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("target") { + obj := make(map[string]interface{}) + + targetProp, err := expandComputeGlobalForwardingRuleTarget(d.Get("target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { + obj["target"] = targetProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules/{{name}}/setTarget") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating GlobalForwardingRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GlobalForwardingRule %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating GlobalForwardingRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} + +func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalForwardingRule: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GlobalForwardingRule %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GlobalForwardingRule") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting GlobalForwardingRule", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GlobalForwardingRule %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeGlobalForwardingRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/forwardingRules/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeGlobalForwardingRulePscConnectionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRulePscConnectionStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleIPAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleIPProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleIpVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleLabelFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleMetadataFilters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "filter_match_criteria": flattenComputeGlobalForwardingRuleMetadataFiltersFilterMatchCriteria(original["filterMatchCriteria"], d, config), + "filter_labels": flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabels(original["filterLabels"], d, config), + }) + } + return transformed +} +func flattenComputeGlobalForwardingRuleMetadataFiltersFilterMatchCriteria(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabelsName(original["name"], d, config), + "value": flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabelsValue(original["value"], d, config), + }) + } + return transformed +} +func flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabelsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabelsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeGlobalForwardingRulePortRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleSourceIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalForwardingRuleBaseForwardingRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeGlobalForwardingRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleIPAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleIPProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleIpVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeGlobalForwardingRuleLabelFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleLoadBalancingScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleMetadataFilters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFilterMatchCriteria, err := expandComputeGlobalForwardingRuleMetadataFiltersFilterMatchCriteria(original["filter_match_criteria"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilterMatchCriteria); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filterMatchCriteria"] = transformedFilterMatchCriteria + } + + transformedFilterLabels, err := expandComputeGlobalForwardingRuleMetadataFiltersFilterLabels(original["filter_labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilterLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filterLabels"] = transformedFilterLabels + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeGlobalForwardingRuleMetadataFiltersFilterMatchCriteria(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleMetadataFiltersFilterLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandComputeGlobalForwardingRuleMetadataFiltersFilterLabelsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandComputeGlobalForwardingRuleMetadataFiltersFilterLabelsValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeGlobalForwardingRuleMetadataFiltersFilterLabelsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleMetadataFiltersFilterLabelsValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeGlobalForwardingRulePortRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleSourceIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalForwardingRuleNoAutomateDnsZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule_sweeper.go new file mode 100644 index 0000000000..52b3aa2340 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_forwarding_rule_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeGlobalForwardingRule", testSweepComputeGlobalForwardingRule) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeGlobalForwardingRule(region string) error { + resourceName := "ComputeGlobalForwardingRule" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/forwardingRules", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/forwardingRules/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint.go new file mode 100644 index 0000000000..d2cfa40c71 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint.go @@ -0,0 +1,491 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeGlobalNetworkEndpoint() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalNetworkEndpointCreate, + Read: resourceComputeGlobalNetworkEndpointRead, + Delete: resourceComputeGlobalNetworkEndpointDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeGlobalNetworkEndpointImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "global_network_endpoint_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The global network endpoint group this endpoint is part of.`, + }, + "port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Port number of the external endpoint.`, + }, + "fqdn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Fully qualified domain name of network endpoint. +This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.`, + AtLeastOneOf: []string{"fqdn", "ip_address"}, + }, + "ip_address": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `IPv4 address external endpoint.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeGlobalNetworkEndpointCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + portProp, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + ipAddressProp, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipAddressProp)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { + obj["ipAddress"] = ipAddressProp + } + fqdnProp, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fqdn"); !tpgresource.IsEmptyValue(reflect.ValueOf(fqdnProp)) && (ok || !reflect.DeepEqual(v, fqdnProp)) { + obj["fqdn"] = fqdnProp + } + + obj, err = resourceComputeGlobalNetworkEndpointEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "networkEndpoint/{{project}}/{{global_network_endpoint_group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/attachNetworkEndpoints") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GlobalNetworkEndpoint: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GlobalNetworkEndpoint: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating GlobalNetworkEndpoint", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create GlobalNetworkEndpoint: %s", err) + } + + log.Printf("[DEBUG] Finished creating GlobalNetworkEndpoint %q: %#v", d.Id(), res) + + return resourceComputeGlobalNetworkEndpointRead(d, meta) +} + +func resourceComputeGlobalNetworkEndpointRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/listNetworkEndpoints") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeGlobalNetworkEndpoint %q", d.Id())) + } + + res, err = flattenNestedComputeGlobalNetworkEndpoint(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeGlobalNetworkEndpoint because it couldn't be matched.") + d.SetId("") + return nil + } + + res, err = resourceComputeGlobalNetworkEndpointDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeGlobalNetworkEndpoint because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) + } + + if err := d.Set("port", flattenNestedComputeGlobalNetworkEndpointPort(res["port"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) + } + if err := d.Set("ip_address", flattenNestedComputeGlobalNetworkEndpointIpAddress(res["ipAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) + } + if err := d.Set("fqdn", flattenNestedComputeGlobalNetworkEndpointFqdn(res["fqdn"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) + } + + return nil +} + +func resourceComputeGlobalNetworkEndpointDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "networkEndpoint/{{project}}/{{global_network_endpoint_group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/detachNetworkEndpoints") + if err != nil { + return err + } + + var obj map[string]interface{} + toDelete := make(map[string]interface{}) + portProp, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, config) + if err != nil { + return err + } + if portProp != "" { + toDelete["port"] = portProp + } + + ipAddressProp, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } + if ipAddressProp != "" { + toDelete["ipAddress"] = ipAddressProp + } + + fqdnProp, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, config) + if err != nil { + return err + } + if fqdnProp != "" { + toDelete["fqdn"] = fqdnProp + } + + obj = map[string]interface{}{ + "networkEndpoints": []map[string]interface{}{toDelete}, + } + log.Printf("[DEBUG] Deleting GlobalNetworkEndpoint %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GlobalNetworkEndpoint") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting GlobalNetworkEndpoint", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GlobalNetworkEndpoint %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeGlobalNetworkEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + // FQDN, port and ip_address are optional, so use * instead of + when reading the import id + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/networkEndpointGroups/(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)", + "(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeGlobalNetworkEndpointPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles int given in float64 format + if floatVal, ok := v.(float64); ok { + return int(floatVal) + } + return v +} + +func flattenNestedComputeGlobalNetworkEndpointIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeGlobalNetworkEndpointFqdn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeGlobalNetworkEndpointPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeGlobalNetworkEndpointIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeGlobalNetworkEndpointFqdn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeGlobalNetworkEndpointEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. + if err := d.Set("global_network_endpoint_group", tpgresource.GetResourceNameFromSelfLink(d.Get("global_network_endpoint_group").(string))); err != nil { + return nil, fmt.Errorf("Error setting global_network_endpoint_group: %s", err) + } + + wrappedReq := map[string]interface{}{ + "networkEndpoints": []interface{}{obj}, + } + return wrappedReq, nil +} + +func flattenNestedComputeGlobalNetworkEndpoint(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["items"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value items. Actual value: %v", v) + } + + _, item, err := resourceComputeGlobalNetworkEndpointFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeGlobalNetworkEndpointFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedIpAddress, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedIpAddress := flattenNestedComputeGlobalNetworkEndpointIpAddress(expectedIpAddress, d, meta.(*transport_tpg.Config)) + expectedFqdn, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedFqdn := flattenNestedComputeGlobalNetworkEndpointFqdn(expectedFqdn, d, meta.(*transport_tpg.Config)) + expectedPort, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedPort := flattenNestedComputeGlobalNetworkEndpointPort(expectedPort, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + // Decode list item before comparing. + item, err := resourceComputeGlobalNetworkEndpointDecoder(d, meta, item) + if err != nil { + return -1, nil, err + } + + itemIpAddress := flattenNestedComputeGlobalNetworkEndpointIpAddress(item["ipAddress"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemIpAddress)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedIpAddress))) && !reflect.DeepEqual(itemIpAddress, expectedFlattenedIpAddress) { + log.Printf("[DEBUG] Skipping item with ipAddress= %#v, looking for %#v)", itemIpAddress, expectedFlattenedIpAddress) + continue + } + itemFqdn := flattenNestedComputeGlobalNetworkEndpointFqdn(item["fqdn"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemFqdn)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedFqdn))) && !reflect.DeepEqual(itemFqdn, expectedFlattenedFqdn) { + log.Printf("[DEBUG] Skipping item with fqdn= %#v, looking for %#v)", itemFqdn, expectedFlattenedFqdn) + continue + } + itemPort := flattenNestedComputeGlobalNetworkEndpointPort(item["port"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemPort)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedPort))) && !reflect.DeepEqual(itemPort, expectedFlattenedPort) { + log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} +func resourceComputeGlobalNetworkEndpointDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + v, ok := res["networkEndpoint"] + if !ok || v == nil { + return res, nil + } + + return v.(map[string]interface{}), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint_group.go new file mode 100644 index 0000000000..c344761368 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint_group.go @@ -0,0 +1,360 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeGlobalNetworkEndpointGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalNetworkEndpointGroupCreate, + Read: resourceComputeGlobalNetworkEndpointGroupRead, + Delete: resourceComputeGlobalNetworkEndpointGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeGlobalNetworkEndpointGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource; provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "network_endpoint_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INTERNET_IP_PORT", "INTERNET_FQDN_PORT"}), + Description: `Type of network endpoints in this network endpoint group. Possible values: ["INTERNET_IP_PORT", "INTERNET_FQDN_PORT"]`, + }, + "default_port": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The default port used if the port number is not specified in the +network endpoint.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeGlobalNetworkEndpointGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeGlobalNetworkEndpointGroupName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeGlobalNetworkEndpointGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + networkEndpointTypeProp, err := expandComputeGlobalNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_endpoint_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkEndpointTypeProp)) && (ok || !reflect.DeepEqual(v, networkEndpointTypeProp)) { + obj["networkEndpointType"] = networkEndpointTypeProp + } + defaultPortProp, err := expandComputeGlobalNetworkEndpointGroupDefaultPort(d.Get("default_port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_port"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultPortProp)) && (ok || !reflect.DeepEqual(v, defaultPortProp)) { + obj["defaultPort"] = defaultPortProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GlobalNetworkEndpointGroup: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GlobalNetworkEndpointGroup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networkEndpointGroups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating GlobalNetworkEndpointGroup", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create GlobalNetworkEndpointGroup: %s", err) + } + + log.Printf("[DEBUG] Finished creating GlobalNetworkEndpointGroup %q: %#v", d.Id(), res) + + return resourceComputeGlobalNetworkEndpointGroupRead(d, meta) +} + +func resourceComputeGlobalNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeGlobalNetworkEndpointGroup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) + } + + if err := d.Set("name", flattenComputeGlobalNetworkEndpointGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) + } + if err := d.Set("description", flattenComputeGlobalNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) + } + if err := d.Set("network_endpoint_type", flattenComputeGlobalNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) + } + if err := d.Set("default_port", flattenComputeGlobalNetworkEndpointGroupDefaultPort(res["defaultPort"], d, config)); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) + } + + return nil +} + +func resourceComputeGlobalNetworkEndpointGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GlobalNetworkEndpointGroup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GlobalNetworkEndpointGroup") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting GlobalNetworkEndpointGroup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GlobalNetworkEndpointGroup %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeGlobalNetworkEndpointGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/networkEndpointGroups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networkEndpointGroups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeGlobalNetworkEndpointGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalNetworkEndpointGroupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalNetworkEndpointGroupNetworkEndpointType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeGlobalNetworkEndpointGroupDefaultPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandComputeGlobalNetworkEndpointGroupName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalNetworkEndpointGroupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalNetworkEndpointGroupNetworkEndpointType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeGlobalNetworkEndpointGroupDefaultPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint_group_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint_group_sweeper.go new file mode 100644 index 0000000000..7e4e97c4b3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_global_network_endpoint_group_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeGlobalNetworkEndpointGroup", testSweepComputeGlobalNetworkEndpointGroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeGlobalNetworkEndpointGroup(region string) error { + resourceName := "ComputeGlobalNetworkEndpointGroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/networkEndpointGroups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/networkEndpointGroups/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway.go new file mode 100644 index 0000000000..b61d31026a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway.go @@ -0,0 +1,536 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeHaVpnGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHaVpnGatewayCreate, + Read: resourceComputeHaVpnGatewayRead, + Delete: resourceComputeHaVpnGatewayDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeHaVpnGatewayImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network this VPN gateway is accepting traffic for.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The region this gateway should sit in.`, + }, + "stack_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4_ONLY", "IPV4_IPV6", ""}), + Description: `The stack type for this VPN gateway to identify the IP protocols that are enabled. +If not specified, IPV4_ONLY will be used. Default value: "IPV4_ONLY" Possible values: ["IPV4_ONLY", "IPV4_IPV6"]`, + Default: "IPV4_ONLY", + }, + "vpn_interfaces": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A list of interfaces on this VPN gateway.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The numeric ID of this VPN gateway interface.`, + }, + "interconnect_attachment": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the interconnect attachment resource. When the value +of this field is present, the VPN Gateway will be used for +IPsec-encrypted Cloud Interconnect; all Egress or Ingress +traffic for this VPN Gateway interface will go through the +specified interconnect attachment resource. + +Not currently available publicly.`, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The external IP address for this VPN gateway interface.`, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeHaVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeHaVpnGatewayDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeHaVpnGatewayName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandComputeHaVpnGatewayNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + stackTypeProp, err := expandComputeHaVpnGatewayStackType(d.Get("stack_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("stack_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(stackTypeProp)) && (ok || !reflect.DeepEqual(v, stackTypeProp)) { + obj["stackType"] = stackTypeProp + } + vpnInterfacesProp, err := expandComputeHaVpnGatewayVpnInterfaces(d.Get("vpn_interfaces"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vpn_interfaces"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpnInterfacesProp)) && (ok || !reflect.DeepEqual(v, vpnInterfacesProp)) { + obj["vpnInterfaces"] = vpnInterfacesProp + } + regionProp, err := expandComputeHaVpnGatewayRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new HaVpnGateway: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating HaVpnGateway: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating HaVpnGateway", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create HaVpnGateway: %s", err) + } + + log.Printf("[DEBUG] Finished creating HaVpnGateway %q: %#v", d.Id(), res) + + return resourceComputeHaVpnGatewayRead(d, meta) +} + +func resourceComputeHaVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeHaVpnGateway %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } + + if err := d.Set("description", flattenComputeHaVpnGatewayDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } + if err := d.Set("name", flattenComputeHaVpnGatewayName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } + if err := d.Set("network", flattenComputeHaVpnGatewayNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } + if err := d.Set("stack_type", flattenComputeHaVpnGatewayStackType(res["stackType"], d, config)); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } + if err := d.Set("vpn_interfaces", flattenComputeHaVpnGatewayVpnInterfaces(res["vpnInterfaces"], d, config)); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } + if err := d.Set("region", flattenComputeHaVpnGatewayRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading HaVpnGateway: %s", err) + } + + return nil +} + +func resourceComputeHaVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting HaVpnGateway %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "HaVpnGateway") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting HaVpnGateway", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting HaVpnGateway %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeHaVpnGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/vpnGateways/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeHaVpnGatewayDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHaVpnGatewayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHaVpnGatewayNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeHaVpnGatewayStackType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "IPV4_ONLY" + } + + return v +} + +func flattenComputeHaVpnGatewayVpnInterfaces(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenComputeHaVpnGatewayVpnInterfacesId(original["id"], d, config), + "ip_address": flattenComputeHaVpnGatewayVpnInterfacesIpAddress(original["ipAddress"], d, config), + "interconnect_attachment": flattenComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(original["interconnectAttachment"], d, config), + }) + } + return transformed +} +func flattenComputeHaVpnGatewayVpnInterfacesId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHaVpnGatewayVpnInterfacesIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeHaVpnGatewayRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeHaVpnGatewayDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHaVpnGatewayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHaVpnGatewayNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeHaVpnGatewayStackType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHaVpnGatewayVpnInterfaces(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandComputeHaVpnGatewayVpnInterfacesId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedIpAddress, err := expandComputeHaVpnGatewayVpnInterfacesIpAddress(original["ip_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipAddress"] = transformedIpAddress + } + + transformedInterconnectAttachment, err := expandComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(original["interconnect_attachment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInterconnectAttachment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["interconnectAttachment"] = transformedInterconnectAttachment + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeHaVpnGatewayVpnInterfacesId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHaVpnGatewayVpnInterfacesIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("interconnectAttachments", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for interconnect_attachment: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeHaVpnGatewayRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway_sweeper.go new file mode 100644 index 0000000000..f740df3588 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ha_vpn_gateway_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeHaVpnGateway", testSweepComputeHaVpnGateway) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeHaVpnGateway(region string) error { + resourceName := "ComputeHaVpnGateway" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/vpnGateways", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_health_check.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check.go index 7437d67a0d..96ccb4a2db 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_health_check.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -23,7 +26,12 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) // Whether the port should be set or not @@ -126,7 +134,9 @@ func ResourceComputeHealthCheck() *schema.Resource { Delete: schema.DefaultTimeout(20 * time.Minute), }, - CustomizeDiff: healthCheckCustomizeDiff, + CustomizeDiff: customdiff.All( + healthCheckCustomizeDiff, + ), Schema: map[string]*schema.Schema{ "name": { @@ -190,7 +200,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -251,7 +261,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -271,7 +281,7 @@ If not specified, HTTP2 health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -330,7 +340,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -350,7 +360,7 @@ If not specified, HTTP health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -409,7 +419,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -429,7 +439,7 @@ If not specified, HTTPS health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -498,7 +508,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -518,7 +528,7 @@ If not specified, SSL health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -570,7 +580,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -590,7 +600,7 @@ If not specified, TCP health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -658,8 +668,8 @@ consecutive failures. The default value is 2.`, } func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -668,7 +678,7 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) checkIntervalSecProp, err := expandComputeHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(reflect.ValueOf(checkIntervalSecProp)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { + } else if v, ok := d.GetOkExists("check_interval_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(checkIntervalSecProp)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { obj["checkIntervalSec"] = checkIntervalSecProp } descriptionProp, err := expandComputeHealthCheckDescription(d.Get("description"), d, config) @@ -680,67 +690,67 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) healthyThresholdProp, err := expandComputeHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(reflect.ValueOf(healthyThresholdProp)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { + } else if v, ok := d.GetOkExists("healthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(healthyThresholdProp)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { obj["healthyThreshold"] = healthyThresholdProp } nameProp, err := expandComputeHealthCheckName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } timeoutSecProp, err := expandComputeHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } unhealthyThresholdProp, err := expandComputeHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(reflect.ValueOf(unhealthyThresholdProp)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { + } else if v, ok := d.GetOkExists("unhealthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(unhealthyThresholdProp)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { obj["unhealthyThreshold"] = unhealthyThresholdProp } httpHealthCheckProp, err := expandComputeHealthCheckHttpHealthCheck(d.Get("http_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("http_health_check"); !isEmptyValue(reflect.ValueOf(httpHealthCheckProp)) && (ok || !reflect.DeepEqual(v, httpHealthCheckProp)) { + } else if v, ok := d.GetOkExists("http_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpHealthCheckProp)) && (ok || !reflect.DeepEqual(v, httpHealthCheckProp)) { obj["httpHealthCheck"] = httpHealthCheckProp } httpsHealthCheckProp, err := expandComputeHealthCheckHttpsHealthCheck(d.Get("https_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("https_health_check"); !isEmptyValue(reflect.ValueOf(httpsHealthCheckProp)) && (ok || !reflect.DeepEqual(v, httpsHealthCheckProp)) { + } else if v, ok := d.GetOkExists("https_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpsHealthCheckProp)) && (ok || !reflect.DeepEqual(v, httpsHealthCheckProp)) { obj["httpsHealthCheck"] = httpsHealthCheckProp } tcpHealthCheckProp, err := expandComputeHealthCheckTcpHealthCheck(d.Get("tcp_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_health_check"); !isEmptyValue(reflect.ValueOf(tcpHealthCheckProp)) && (ok || !reflect.DeepEqual(v, tcpHealthCheckProp)) { + } else if v, ok := d.GetOkExists("tcp_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(tcpHealthCheckProp)) && (ok || !reflect.DeepEqual(v, tcpHealthCheckProp)) { obj["tcpHealthCheck"] = tcpHealthCheckProp } sslHealthCheckProp, err := expandComputeHealthCheckSslHealthCheck(d.Get("ssl_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("ssl_health_check"); !isEmptyValue(reflect.ValueOf(sslHealthCheckProp)) && (ok || !reflect.DeepEqual(v, sslHealthCheckProp)) { + } else if v, ok := d.GetOkExists("ssl_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslHealthCheckProp)) && (ok || !reflect.DeepEqual(v, sslHealthCheckProp)) { obj["sslHealthCheck"] = sslHealthCheckProp } http2HealthCheckProp, err := expandComputeHealthCheckHttp2HealthCheck(d.Get("http2_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("http2_health_check"); !isEmptyValue(reflect.ValueOf(http2HealthCheckProp)) && (ok || !reflect.DeepEqual(v, http2HealthCheckProp)) { + } else if v, ok := d.GetOkExists("http2_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(http2HealthCheckProp)) && (ok || !reflect.DeepEqual(v, http2HealthCheckProp)) { obj["http2HealthCheck"] = http2HealthCheckProp } grpcHealthCheckProp, err := expandComputeHealthCheckGrpcHealthCheck(d.Get("grpc_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("grpc_health_check"); !isEmptyValue(reflect.ValueOf(grpcHealthCheckProp)) && (ok || !reflect.DeepEqual(v, grpcHealthCheckProp)) { + } else if v, ok := d.GetOkExists("grpc_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(grpcHealthCheckProp)) && (ok || !reflect.DeepEqual(v, grpcHealthCheckProp)) { obj["grpcHealthCheck"] = grpcHealthCheckProp } logConfigProp, err := expandComputeHealthCheckLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } @@ -749,7 +759,7 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks") if err != nil { return err } @@ -757,24 +767,32 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Creating new HealthCheck: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for HealthCheck: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating HealthCheck: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/healthChecks/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/healthChecks/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -796,33 +814,39 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) } func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for HealthCheck: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeHealthCheck %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeHealthCheck %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -874,7 +898,7 @@ func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) er if err := d.Set("log_config", flattenComputeHealthCheckLogConfig(res["logConfig"], d, config)); err != nil { return fmt.Errorf("Error reading HealthCheck: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading HealthCheck: %s", err) } @@ -882,15 +906,15 @@ func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) er } func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for HealthCheck: %s", err) } @@ -900,7 +924,7 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) checkIntervalSecProp, err := expandComputeHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { + } else if v, ok := d.GetOkExists("check_interval_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { obj["checkIntervalSec"] = checkIntervalSecProp } descriptionProp, err := expandComputeHealthCheckDescription(d.Get("description"), d, config) @@ -912,67 +936,67 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) healthyThresholdProp, err := expandComputeHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { + } else if v, ok := d.GetOkExists("healthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { obj["healthyThreshold"] = healthyThresholdProp } nameProp, err := expandComputeHealthCheckName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } timeoutSecProp, err := expandComputeHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } unhealthyThresholdProp, err := expandComputeHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { + } else if v, ok := d.GetOkExists("unhealthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { obj["unhealthyThreshold"] = unhealthyThresholdProp } httpHealthCheckProp, err := expandComputeHealthCheckHttpHealthCheck(d.Get("http_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("http_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpHealthCheckProp)) { + } else if v, ok := d.GetOkExists("http_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpHealthCheckProp)) { obj["httpHealthCheck"] = httpHealthCheckProp } httpsHealthCheckProp, err := expandComputeHealthCheckHttpsHealthCheck(d.Get("https_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("https_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpsHealthCheckProp)) { + } else if v, ok := d.GetOkExists("https_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpsHealthCheckProp)) { obj["httpsHealthCheck"] = httpsHealthCheckProp } tcpHealthCheckProp, err := expandComputeHealthCheckTcpHealthCheck(d.Get("tcp_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpHealthCheckProp)) { + } else if v, ok := d.GetOkExists("tcp_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpHealthCheckProp)) { obj["tcpHealthCheck"] = tcpHealthCheckProp } sslHealthCheckProp, err := expandComputeHealthCheckSslHealthCheck(d.Get("ssl_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("ssl_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslHealthCheckProp)) { + } else if v, ok := d.GetOkExists("ssl_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslHealthCheckProp)) { obj["sslHealthCheck"] = sslHealthCheckProp } http2HealthCheckProp, err := expandComputeHealthCheckHttp2HealthCheck(d.Get("http2_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("http2_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, http2HealthCheckProp)) { + } else if v, ok := d.GetOkExists("http2_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, http2HealthCheckProp)) { obj["http2HealthCheck"] = http2HealthCheckProp } grpcHealthCheckProp, err := expandComputeHealthCheckGrpcHealthCheck(d.Get("grpc_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("grpc_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, grpcHealthCheckProp)) { + } else if v, ok := d.GetOkExists("grpc_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, grpcHealthCheckProp)) { obj["grpcHealthCheck"] = grpcHealthCheckProp } logConfigProp, err := expandComputeHealthCheckLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } @@ -981,7 +1005,7 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") if err != nil { return err } @@ -989,11 +1013,19 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Updating HealthCheck %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating HealthCheck %q: %s", d.Id(), err) @@ -1013,21 +1045,21 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) } func resourceComputeHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for HealthCheck: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") if err != nil { return err } @@ -1036,13 +1068,21 @@ func resourceComputeHealthCheckDelete(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Deleting HealthCheck %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "HealthCheck") + return transport_tpg.HandleNotFoundError(err, d, "HealthCheck") } err = ComputeOperationWaitTime( @@ -1058,8 +1098,8 @@ func resourceComputeHealthCheckDelete(d *schema.ResourceData, meta interface{}) } func resourceComputeHealthCheckImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/global/healthChecks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -1068,7 +1108,7 @@ func resourceComputeHealthCheckImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/healthChecks/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/healthChecks/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1077,10 +1117,10 @@ func resourceComputeHealthCheckImport(d *schema.ResourceData, meta interface{}) return []*schema.ResourceData{d}, nil } -func flattenComputeHealthCheckCheckIntervalSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckCheckIntervalSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1094,18 +1134,18 @@ func flattenComputeHealthCheckCheckIntervalSec(v interface{}, d *schema.Resource return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHealthyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1119,14 +1159,14 @@ func flattenComputeHealthCheckHealthyThreshold(v interface{}, d *schema.Resource return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1140,10 +1180,10 @@ func flattenComputeHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1157,11 +1197,11 @@ func flattenComputeHealthCheckUnhealthyThreshold(v interface{}, d *schema.Resour return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1186,22 +1226,22 @@ func flattenComputeHealthCheckHttpHealthCheck(v interface{}, d *schema.ResourceD flattenComputeHealthCheckHttpHealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeHealthCheckHttpHealthCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpHealthCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1215,19 +1255,19 @@ func flattenComputeHealthCheckHttpHealthCheckPort(v interface{}, d *schema.Resou return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckHttpHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpsHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpsHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1252,22 +1292,22 @@ func flattenComputeHealthCheckHttpsHealthCheck(v interface{}, d *schema.Resource flattenComputeHealthCheckHttpsHealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeHealthCheckHttpsHealthCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpsHealthCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpsHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpsHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpsHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpsHealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpsHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpsHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1281,19 +1321,19 @@ func flattenComputeHealthCheckHttpsHealthCheckPort(v interface{}, d *schema.Reso return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckHttpsHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpsHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckTcpHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckTcpHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1316,18 +1356,18 @@ func flattenComputeHealthCheckTcpHealthCheck(v interface{}, d *schema.ResourceDa flattenComputeHealthCheckTcpHealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeHealthCheckTcpHealthCheckRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckTcpHealthCheckRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckTcpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckTcpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckTcpHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckTcpHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1341,19 +1381,19 @@ func flattenComputeHealthCheckTcpHealthCheckPort(v interface{}, d *schema.Resour return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckTcpHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckTcpHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckTcpHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckTcpHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckTcpHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckTcpHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckSslHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckSslHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1376,18 +1416,18 @@ func flattenComputeHealthCheckSslHealthCheck(v interface{}, d *schema.ResourceDa flattenComputeHealthCheckSslHealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeHealthCheckSslHealthCheckRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckSslHealthCheckRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckSslHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckSslHealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckSslHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckSslHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1401,19 +1441,19 @@ func flattenComputeHealthCheckSslHealthCheckPort(v interface{}, d *schema.Resour return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckSslHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckSslHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckSslHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckSslHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckSslHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckSslHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttp2HealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttp2HealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1438,22 +1478,22 @@ func flattenComputeHealthCheckHttp2HealthCheck(v interface{}, d *schema.Resource flattenComputeHealthCheckHttp2HealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeHealthCheckHttp2HealthCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttp2HealthCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttp2HealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttp2HealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttp2HealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttp2HealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttp2HealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttp2HealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1467,19 +1507,19 @@ func flattenComputeHealthCheckHttp2HealthCheckPort(v interface{}, d *schema.Reso return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckHttp2HealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttp2HealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckGrpcHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckGrpcHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1498,10 +1538,10 @@ func flattenComputeHealthCheckGrpcHealthCheck(v interface{}, d *schema.ResourceD flattenComputeHealthCheckGrpcHealthCheckGrpcServiceName(original["grpcServiceName"], d, config) return []interface{}{transformed} } -func flattenComputeHealthCheckGrpcHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckGrpcHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1515,19 +1555,19 @@ func flattenComputeHealthCheckGrpcHealthCheckPort(v interface{}, d *schema.Resou return v // let terraform core handle it otherwise } -func flattenComputeHealthCheckGrpcHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckGrpcHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeHealthCheckLogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeHealthCheckLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { transformed := make(map[string]interface{}) if v == nil { // Disabled by default, but API will not return object if value is false @@ -1540,31 +1580,31 @@ func flattenComputeHealthCheckLogConfig(v interface{}, d *schema.ResourceData, c return []interface{}{transformed} } -func expandComputeHealthCheckCheckIntervalSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckCheckIntervalSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckUnhealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckUnhealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1576,84 +1616,84 @@ func expandComputeHealthCheckHttpHealthCheck(v interface{}, d TerraformResourceD transformedHost, err := expandComputeHealthCheckHttpHealthCheckHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedRequestPath, err := expandComputeHealthCheckHttpHealthCheckRequestPath(original["request_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestPath"] = transformedRequestPath } transformedResponse, err := expandComputeHealthCheckHttpHealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeHealthCheckHttpHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeHealthCheckHttpHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeHealthCheckHttpHealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeHealthCheckHttpHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeHealthCheckHttpHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpHealthCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpHealthCheckRequestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpHealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpHealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpsHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpsHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1665,84 +1705,84 @@ func expandComputeHealthCheckHttpsHealthCheck(v interface{}, d TerraformResource transformedHost, err := expandComputeHealthCheckHttpsHealthCheckHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedRequestPath, err := expandComputeHealthCheckHttpsHealthCheckRequestPath(original["request_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestPath"] = transformedRequestPath } transformedResponse, err := expandComputeHealthCheckHttpsHealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeHealthCheckHttpsHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeHealthCheckHttpsHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeHealthCheckHttpsHealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeHealthCheckHttpsHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeHealthCheckHttpsHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpsHealthCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpsHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpsHealthCheckRequestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpsHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpsHealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpsHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpsHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpsHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpsHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckTcpHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckTcpHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1754,73 +1794,73 @@ func expandComputeHealthCheckTcpHealthCheck(v interface{}, d TerraformResourceDa transformedRequest, err := expandComputeHealthCheckTcpHealthCheckRequest(original["request"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequest); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["request"] = transformedRequest } transformedResponse, err := expandComputeHealthCheckTcpHealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeHealthCheckTcpHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeHealthCheckTcpHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeHealthCheckTcpHealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeHealthCheckTcpHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeHealthCheckTcpHealthCheckRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckTcpHealthCheckRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckTcpHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckTcpHealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckTcpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckTcpHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckTcpHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckTcpHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckTcpHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckTcpHealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckTcpHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckTcpHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckSslHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckSslHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1832,73 +1872,73 @@ func expandComputeHealthCheckSslHealthCheck(v interface{}, d TerraformResourceDa transformedRequest, err := expandComputeHealthCheckSslHealthCheckRequest(original["request"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequest); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["request"] = transformedRequest } transformedResponse, err := expandComputeHealthCheckSslHealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeHealthCheckSslHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeHealthCheckSslHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeHealthCheckSslHealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeHealthCheckSslHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeHealthCheckSslHealthCheckRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckSslHealthCheckRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckSslHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckSslHealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckSslHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckSslHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckSslHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckSslHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckSslHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckSslHealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckSslHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckSslHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttp2HealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttp2HealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1910,84 +1950,84 @@ func expandComputeHealthCheckHttp2HealthCheck(v interface{}, d TerraformResource transformedHost, err := expandComputeHealthCheckHttp2HealthCheckHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedRequestPath, err := expandComputeHealthCheckHttp2HealthCheckRequestPath(original["request_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestPath"] = transformedRequestPath } transformedResponse, err := expandComputeHealthCheckHttp2HealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeHealthCheckHttp2HealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeHealthCheckHttp2HealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeHealthCheckHttp2HealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeHealthCheckHttp2HealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeHealthCheckHttp2HealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttp2HealthCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttp2HealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttp2HealthCheckRequestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttp2HealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttp2HealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttp2HealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttp2HealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttp2HealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttp2HealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckGrpcHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckGrpcHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1999,51 +2039,51 @@ func expandComputeHealthCheckGrpcHealthCheck(v interface{}, d TerraformResourceD transformedPort, err := expandComputeHealthCheckGrpcHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeHealthCheckGrpcHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedPortSpecification, err := expandComputeHealthCheckGrpcHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } transformedGrpcServiceName, err := expandComputeHealthCheckGrpcHealthCheckGrpcServiceName(original["grpc_service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGrpcServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGrpcServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["grpcServiceName"] = transformedGrpcServiceName } return transformed, nil } -func expandComputeHealthCheckGrpcHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckGrpcHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckGrpcHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckGrpcHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeHealthCheckLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckLogConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2055,14 +2095,14 @@ func expandComputeHealthCheckLogConfig(v interface{}, d TerraformResourceData, c transformedEnable, err := expandComputeHealthCheckLogConfigEnable(original["enable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enable"] = transformedEnable } return transformed, nil } -func expandComputeHealthCheckLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeHealthCheckLogConfigEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check_sweeper.go new file mode 100644 index 0000000000..055c0df0fa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_health_check_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeHealthCheck", testSweepComputeHealthCheck) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeHealthCheck(region string) error { + resourceName := "ComputeHealthCheck" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/healthChecks", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/healthChecks/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_http_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_http_health_check.go new file mode 100644 index 0000000000..98ecb4dd95 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_http_health_check.go @@ -0,0 +1,654 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeHttpHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpHealthCheckCreate, + Read: resourceComputeHttpHealthCheckRead, + Update: resourceComputeHttpHealthCheckUpdate, + Delete: resourceComputeHttpHealthCheckDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeHttpHealthCheckImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the +last character, which cannot be a dash.`, + }, + "check_interval_sec": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to send a health check. The default value is 5 +seconds.`, + Default: 5, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "healthy_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `A so-far unhealthy instance will be marked healthy after this many +consecutive successes. The default value is 2.`, + Default: 2, + }, + "host": { + Type: schema.TypeString, + Optional: true, + Description: `The value of the host header in the HTTP health check request. If +left empty (default value), the public IP on behalf of which this +health check is performed will be used.`, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Description: `The TCP port number for the HTTP health check request. +The default value is 80.`, + Default: 80, + }, + "request_path": { + Type: schema.TypeString, + Optional: true, + Description: `The request path of the HTTP health check request. +The default value is /.`, + Default: "/", + }, + "timeout_sec": { + Type: schema.TypeInt, + Optional: true, + Description: `How long (in seconds) to wait before claiming failure. +The default value is 5 seconds. It is invalid for timeoutSec to have +greater value than checkIntervalSec.`, + Default: 5, + }, + "unhealthy_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `A so-far healthy instance will be marked unhealthy after this many +consecutive failures. The default value is 2.`, + Default: 2, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + checkIntervalSecProp, err := expandComputeHttpHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("check_interval_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(checkIntervalSecProp)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { + obj["checkIntervalSec"] = checkIntervalSecProp + } + descriptionProp, err := expandComputeHttpHealthCheckDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + healthyThresholdProp, err := expandComputeHttpHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("healthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(healthyThresholdProp)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { + obj["healthyThreshold"] = healthyThresholdProp + } + hostProp, err := expandComputeHttpHealthCheckHost(d.Get("host"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("host"); !tpgresource.IsEmptyValue(reflect.ValueOf(hostProp)) && (ok || !reflect.DeepEqual(v, hostProp)) { + obj["host"] = hostProp + } + nameProp, err := expandComputeHttpHealthCheckName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + portProp, err := expandComputeHttpHealthCheckPort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + requestPathProp, err := expandComputeHttpHealthCheckRequestPath(d.Get("request_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("request_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(requestPathProp)) && (ok || !reflect.DeepEqual(v, requestPathProp)) { + obj["requestPath"] = requestPathProp + } + timeoutSecProp, err := expandComputeHttpHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + obj["timeoutSec"] = timeoutSecProp + } + unhealthyThresholdProp, err := expandComputeHttpHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("unhealthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(unhealthyThresholdProp)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { + obj["unhealthyThreshold"] = unhealthyThresholdProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new HttpHealthCheck: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating HttpHealthCheck: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/httpHealthChecks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating HttpHealthCheck", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create HttpHealthCheck: %s", err) + } + + log.Printf("[DEBUG] Finished creating HttpHealthCheck %q: %#v", d.Id(), res) + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeHttpHealthCheck %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + + if err := d.Set("check_interval_sec", flattenComputeHttpHealthCheckCheckIntervalSec(res["checkIntervalSec"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeHttpHealthCheckCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("description", flattenComputeHttpHealthCheckDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("healthy_threshold", flattenComputeHttpHealthCheckHealthyThreshold(res["healthyThreshold"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("host", flattenComputeHttpHealthCheckHost(res["host"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("name", flattenComputeHttpHealthCheckName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("port", flattenComputeHttpHealthCheckPort(res["port"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("request_path", flattenComputeHttpHealthCheckRequestPath(res["requestPath"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("timeout_sec", flattenComputeHttpHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("unhealthy_threshold", flattenComputeHttpHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + + return nil +} + +func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + checkIntervalSecProp, err := expandComputeHttpHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("check_interval_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { + obj["checkIntervalSec"] = checkIntervalSecProp + } + descriptionProp, err := expandComputeHttpHealthCheckDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + healthyThresholdProp, err := expandComputeHttpHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("healthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { + obj["healthyThreshold"] = healthyThresholdProp + } + hostProp, err := expandComputeHttpHealthCheckHost(d.Get("host"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("host"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostProp)) { + obj["host"] = hostProp + } + nameProp, err := expandComputeHttpHealthCheckName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + portProp, err := expandComputeHttpHealthCheckPort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + requestPathProp, err := expandComputeHttpHealthCheckRequestPath(d.Get("request_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("request_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requestPathProp)) { + obj["requestPath"] = requestPathProp + } + timeoutSecProp, err := expandComputeHttpHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + obj["timeoutSec"] = timeoutSecProp + } + unhealthyThresholdProp, err := expandComputeHttpHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("unhealthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { + obj["unhealthyThreshold"] = unhealthyThresholdProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating HttpHealthCheck %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating HttpHealthCheck %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating HttpHealthCheck %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating HttpHealthCheck", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting HttpHealthCheck %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "HttpHealthCheck") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting HttpHealthCheck", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting HttpHealthCheck %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeHttpHealthCheckImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/httpHealthChecks/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/httpHealthChecks/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeHttpHealthCheckCheckIntervalSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHttpHealthCheckCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpHealthCheckDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpHealthCheckHealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHttpHealthCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpHealthCheckName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHttpHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHttpHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandComputeHttpHealthCheckCheckIntervalSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpHealthCheckDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpHealthCheckHealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpHealthCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpHealthCheckName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpHealthCheckRequestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpHealthCheckTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpHealthCheckUnhealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_http_health_check_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_http_health_check_sweeper.go new file mode 100644 index 0000000000..ef5c59c240 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_http_health_check_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeHttpHealthCheck", testSweepComputeHttpHealthCheck) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeHttpHealthCheck(region string) error { + resourceName := "ComputeHttpHealthCheck" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/httpHealthChecks", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/httpHealthChecks/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_https_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_https_health_check.go new file mode 100644 index 0000000000..e3eeda14a5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_https_health_check.go @@ -0,0 +1,654 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeHttpsHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpsHealthCheckCreate, + Read: resourceComputeHttpsHealthCheckRead, + Update: resourceComputeHttpsHealthCheckUpdate, + Delete: resourceComputeHttpsHealthCheckDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeHttpsHealthCheckImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the +last character, which cannot be a dash.`, + }, + "check_interval_sec": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to send a health check. The default value is 5 +seconds.`, + Default: 5, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "healthy_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `A so-far unhealthy instance will be marked healthy after this many +consecutive successes. The default value is 2.`, + Default: 2, + }, + "host": { + Type: schema.TypeString, + Optional: true, + Description: `The value of the host header in the HTTPS health check request. If +left empty (default value), the public IP on behalf of which this +health check is performed will be used.`, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Description: `The TCP port number for the HTTPS health check request. +The default value is 443.`, + Default: 443, + }, + "request_path": { + Type: schema.TypeString, + Optional: true, + Description: `The request path of the HTTPS health check request. +The default value is /.`, + Default: "/", + }, + "timeout_sec": { + Type: schema.TypeInt, + Optional: true, + Description: `How long (in seconds) to wait before claiming failure. +The default value is 5 seconds. It is invalid for timeoutSec to have +greater value than checkIntervalSec.`, + Default: 5, + }, + "unhealthy_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `A so-far healthy instance will be marked unhealthy after this many +consecutive failures. The default value is 2.`, + Default: 2, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + checkIntervalSecProp, err := expandComputeHttpsHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("check_interval_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(checkIntervalSecProp)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { + obj["checkIntervalSec"] = checkIntervalSecProp + } + descriptionProp, err := expandComputeHttpsHealthCheckDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + healthyThresholdProp, err := expandComputeHttpsHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("healthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(healthyThresholdProp)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { + obj["healthyThreshold"] = healthyThresholdProp + } + hostProp, err := expandComputeHttpsHealthCheckHost(d.Get("host"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("host"); !tpgresource.IsEmptyValue(reflect.ValueOf(hostProp)) && (ok || !reflect.DeepEqual(v, hostProp)) { + obj["host"] = hostProp + } + nameProp, err := expandComputeHttpsHealthCheckName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + portProp, err := expandComputeHttpsHealthCheckPort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + requestPathProp, err := expandComputeHttpsHealthCheckRequestPath(d.Get("request_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("request_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(requestPathProp)) && (ok || !reflect.DeepEqual(v, requestPathProp)) { + obj["requestPath"] = requestPathProp + } + timeoutSecProp, err := expandComputeHttpsHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + obj["timeoutSec"] = timeoutSecProp + } + unhealthyThresholdProp, err := expandComputeHttpsHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("unhealthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(unhealthyThresholdProp)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { + obj["unhealthyThreshold"] = unhealthyThresholdProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new HttpsHealthCheck: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating HttpsHealthCheck: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/httpsHealthChecks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating HttpsHealthCheck", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create HttpsHealthCheck: %s", err) + } + + log.Printf("[DEBUG] Finished creating HttpsHealthCheck %q: %#v", d.Id(), res) + + return resourceComputeHttpsHealthCheckRead(d, meta) +} + +func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeHttpsHealthCheck %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + + if err := d.Set("check_interval_sec", flattenComputeHttpsHealthCheckCheckIntervalSec(res["checkIntervalSec"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeHttpsHealthCheckCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("description", flattenComputeHttpsHealthCheckDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("healthy_threshold", flattenComputeHttpsHealthCheckHealthyThreshold(res["healthyThreshold"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("host", flattenComputeHttpsHealthCheckHost(res["host"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("name", flattenComputeHttpsHealthCheckName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("port", flattenComputeHttpsHealthCheckPort(res["port"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("request_path", flattenComputeHttpsHealthCheckRequestPath(res["requestPath"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("timeout_sec", flattenComputeHttpsHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("unhealthy_threshold", flattenComputeHttpsHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + + return nil +} + +func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + checkIntervalSecProp, err := expandComputeHttpsHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("check_interval_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { + obj["checkIntervalSec"] = checkIntervalSecProp + } + descriptionProp, err := expandComputeHttpsHealthCheckDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + healthyThresholdProp, err := expandComputeHttpsHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("healthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { + obj["healthyThreshold"] = healthyThresholdProp + } + hostProp, err := expandComputeHttpsHealthCheckHost(d.Get("host"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("host"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostProp)) { + obj["host"] = hostProp + } + nameProp, err := expandComputeHttpsHealthCheckName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + portProp, err := expandComputeHttpsHealthCheckPort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + requestPathProp, err := expandComputeHttpsHealthCheckRequestPath(d.Get("request_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("request_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requestPathProp)) { + obj["requestPath"] = requestPathProp + } + timeoutSecProp, err := expandComputeHttpsHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + obj["timeoutSec"] = timeoutSecProp + } + unhealthyThresholdProp, err := expandComputeHttpsHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("unhealthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { + obj["unhealthyThreshold"] = unhealthyThresholdProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating HttpsHealthCheck %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating HttpsHealthCheck %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating HttpsHealthCheck %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating HttpsHealthCheck", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeHttpsHealthCheckRead(d, meta) +} + +func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting HttpsHealthCheck %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "HttpsHealthCheck") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting HttpsHealthCheck", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting HttpsHealthCheck %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeHttpsHealthCheckImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/httpsHealthChecks/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/httpsHealthChecks/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeHttpsHealthCheckCheckIntervalSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHttpsHealthCheckCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpsHealthCheckDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpsHealthCheckHealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHttpsHealthCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpsHealthCheckName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpsHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHttpsHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeHttpsHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeHttpsHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandComputeHttpsHealthCheckCheckIntervalSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpsHealthCheckDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpsHealthCheckHealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpsHealthCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpsHealthCheckName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpsHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpsHealthCheckRequestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpsHealthCheckTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeHttpsHealthCheckUnhealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_https_health_check_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_https_health_check_sweeper.go new file mode 100644 index 0000000000..1c256cc5d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_https_health_check_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeHttpsHealthCheck", testSweepComputeHttpsHealthCheck) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeHttpsHealthCheck(region string) error { + resourceName := "ComputeHttpsHealthCheck" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/httpsHealthChecks", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/httpsHealthChecks/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_image.go new file mode 100644 index 0000000000..da26bd119f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_image.go @@ -0,0 +1,979 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeImage() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeImageCreate, + Read: resourceComputeImageRead, + Update: resourceComputeImageUpdate, + Delete: resourceComputeImageDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeImageImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource; provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the +last character, which cannot be a dash.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Size of the image when restored onto a persistent disk (in GB).`, + }, + "family": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the image family to which this image belongs. You can +create disks by specifying an image family instead of a specific +image name. The image family always returns its latest image that is +not deprecated. The name of the image family must comply with +RFC1035.`, + }, + "guest_os_features": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A list of features to enable on the guest operating system. +Applicable only for bootable images.`, + Elem: computeImageGuestOsFeaturesSchema(), + // Default schema.HashSchema is used. + }, + "image_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Encrypts the image using a customer-supplied encryption key. + +After you encrypt an image with a customer-supplied key, you must +provide the same key if you use the image later (e.g. to create a +disk from the image)`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_self_link": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The self link of the encryption key that is stored in Google Cloud +KMS.`, + }, + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account being used for the encryption request for the +given KMS key. If absent, the Compute Engine default service +account is used.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this Image.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "licenses": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Any applicable license URI.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "raw_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The parameters of the raw disk image.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full Google Cloud Storage URL where disk storage is stored +You must provide either this property or the sourceDisk property +but not both.`, + }, + "container_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"TAR", ""}), + Description: `The format used to encode and transmit the block device, which +should be TAR. This is just a container and transmission format +and not a runtime format. Provided by the client when the disk +image is created. Default value: "TAR" Possible values: ["TAR"]`, + Default: "TAR", + }, + "sha1": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional SHA1 checksum of the disk image before unpackaging. +This is provided by the client when the disk image is created.`, + }, + }, + }, + }, + "source_disk": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The source disk to create this image based on. +You must provide either this property or the +rawDisk.source property but not both to create an image.`, + }, + "source_image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the source image used to create this image. In order to create an image, you must provide the full or partial +URL of one of the following: + +* The selfLink URL +* This property +* The rawDisk.source URL +* The sourceDisk URL`, + }, + "source_snapshot": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the source snapshot used to create this image. + +In order to create an image, you must provide the full or partial URL of one of the following: + +* The selfLink URL +* This property +* The sourceImage URL +* The rawDisk.source URL +* The sourceDisk URL`, + }, + "storage_locations": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Cloud Storage bucket storage location of the image +(regional or multi-regional). +Reference link: https://cloud.google.com/compute/docs/reference/rest/v1/images`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "archive_size_bytes": { + Type: schema.TypeInt, + Computed: true, + Description: `Size of the image tar.gz archive stored in Google Cloud Storage (in +bytes).`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint used for optimistic locking of this resource. Used +internally during updates.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func computeImageGuestOsFeaturesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC", "SEV_LIVE_MIGRATABLE", "SEV_SNP_CAPABLE", "SUSPEND_RESUME_COMPATIBLE", "TDX_CAPABLE"}), + Description: `The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. Possible values: ["MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC", "SEV_LIVE_MIGRATABLE", "SEV_SNP_CAPABLE", "SUSPEND_RESUME_COMPATIBLE", "TDX_CAPABLE"]`, + }, + }, + } +} + +func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeImageDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + storageLocationsProp, err := expandComputeImageStorageLocations(d.Get("storage_locations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("storage_locations"); !tpgresource.IsEmptyValue(reflect.ValueOf(storageLocationsProp)) && (ok || !reflect.DeepEqual(v, storageLocationsProp)) { + obj["storageLocations"] = storageLocationsProp + } + diskSizeGbProp, err := expandComputeImageDiskSizeGb(d.Get("disk_size_gb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disk_size_gb"); !tpgresource.IsEmptyValue(reflect.ValueOf(diskSizeGbProp)) && (ok || !reflect.DeepEqual(v, diskSizeGbProp)) { + obj["diskSizeGb"] = diskSizeGbProp + } + familyProp, err := expandComputeImageFamily(d.Get("family"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("family"); !tpgresource.IsEmptyValue(reflect.ValueOf(familyProp)) && (ok || !reflect.DeepEqual(v, familyProp)) { + obj["family"] = familyProp + } + guestOsFeaturesProp, err := expandComputeImageGuestOsFeatures(d.Get("guest_os_features"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("guest_os_features"); !tpgresource.IsEmptyValue(reflect.ValueOf(guestOsFeaturesProp)) && (ok || !reflect.DeepEqual(v, guestOsFeaturesProp)) { + obj["guestOsFeatures"] = guestOsFeaturesProp + } + imageEncryptionKeyProp, err := expandComputeImageImageEncryptionKey(d.Get("image_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("image_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(imageEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, imageEncryptionKeyProp)) { + obj["imageEncryptionKey"] = imageEncryptionKeyProp + } + labelsProp, err := expandComputeImageLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeImageLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + licensesProp, err := expandComputeImageLicenses(d.Get("licenses"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("licenses"); !tpgresource.IsEmptyValue(reflect.ValueOf(licensesProp)) && (ok || !reflect.DeepEqual(v, licensesProp)) { + obj["licenses"] = licensesProp + } + nameProp, err := expandComputeImageName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + rawDiskProp, err := expandComputeImageRawDisk(d.Get("raw_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("raw_disk"); !tpgresource.IsEmptyValue(reflect.ValueOf(rawDiskProp)) && (ok || !reflect.DeepEqual(v, rawDiskProp)) { + obj["rawDisk"] = rawDiskProp + } + sourceDiskProp, err := expandComputeImageSourceDisk(d.Get("source_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_disk"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { + obj["sourceDisk"] = sourceDiskProp + } + sourceImageProp, err := expandComputeImageSourceImage(d.Get("source_image"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_image"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceImageProp)) && (ok || !reflect.DeepEqual(v, sourceImageProp)) { + obj["sourceImage"] = sourceImageProp + } + sourceSnapshotProp, err := expandComputeImageSourceSnapshot(d.Get("source_snapshot"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_snapshot"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceSnapshotProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) { + obj["sourceSnapshot"] = sourceSnapshotProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Image: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Image: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Image: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/images/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Image", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Image: %s", err) + } + + log.Printf("[DEBUG] Finished creating Image %q: %#v", d.Id(), res) + + return resourceComputeImageRead(d, meta) +} + +func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Image: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeImage %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + + if err := d.Set("archive_size_bytes", flattenComputeImageArchiveSizeBytes(res["archiveSizeBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeImageCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("description", flattenComputeImageDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("storage_locations", flattenComputeImageStorageLocations(res["storageLocations"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("disk_size_gb", flattenComputeImageDiskSizeGb(res["diskSizeGb"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("family", flattenComputeImageFamily(res["family"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("guest_os_features", flattenComputeImageGuestOsFeatures(res["guestOsFeatures"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("image_encryption_key", flattenComputeImageImageEncryptionKey(res["imageEncryptionKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("labels", flattenComputeImageLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("label_fingerprint", flattenComputeImageLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("licenses", flattenComputeImageLicenses(res["licenses"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("name", flattenComputeImageName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("source_disk", flattenComputeImageSourceDisk(res["sourceDisk"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("source_image", flattenComputeImageSourceImage(res["sourceImage"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("source_snapshot", flattenComputeImageSourceSnapshot(res["sourceSnapshot"], d, config)); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Image: %s", err) + } + + return nil +} + +func resourceComputeImageUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Image: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("labels") || d.HasChange("label_fingerprint") { + obj := make(map[string]interface{}) + + labelsProp, err := expandComputeImageLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeImageLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}/setLabels") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Image %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Image %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Image", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeImageRead(d, meta) +} + +func resourceComputeImageDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Image: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Image %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Image") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Image", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Image %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeImageImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/images/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/images/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeImageArchiveSizeBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeImageCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageStorageLocations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageDiskSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeImageFamily(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageGuestOsFeatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(computeImageGuestOsFeaturesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "type": flattenComputeImageGuestOsFeaturesType(original["type"], d, config), + }) + } + return transformed +} +func flattenComputeImageGuestOsFeaturesType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageImageEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_self_link"] = + flattenComputeImageImageEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) + transformed["kms_key_service_account"] = + flattenComputeImageImageEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) + return []interface{}{transformed} +} +func flattenComputeImageImageEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + vStr := v.(string) + return strings.Split(vStr, "/cryptoKeyVersions/")[0] +} + +func flattenComputeImageImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageLabelFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageLicenses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeImageName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeImageSourceDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeImageSourceImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeImageSourceSnapshot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeImageDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageStorageLocations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageFamily(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageGuestOsFeatures(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandComputeImageGuestOsFeaturesType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeImageGuestOsFeaturesType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageImageEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeySelfLink, err := expandComputeImageImageEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeySelfLink + } + + transformedKmsKeyServiceAccount, err := expandComputeImageImageEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount + } + + return transformed, nil +} + +func expandComputeImageImageEncryptionKeyKmsKeySelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeImageLabelFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageLicenses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for licenses: nil") + } + f, err := tpgresource.ParseGlobalFieldValue("licenses", raw.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for licenses: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeImageName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageRawDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContainerType, err := expandComputeImageRawDiskContainerType(original["container_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainerType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["containerType"] = transformedContainerType + } + + transformedSha1, err := expandComputeImageRawDiskSha1(original["sha1"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha1); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha1Checksum"] = transformedSha1 + } + + transformedSource, err := expandComputeImageRawDiskSource(original["source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["source"] = transformedSource + } + + return transformed, nil +} + +func expandComputeImageRawDiskContainerType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageRawDiskSha1(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageRawDiskSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeImageSourceDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseZonalFieldValue("disks", v.(string), "project", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for source_disk: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeImageSourceImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("images", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for source_image: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeImageSourceSnapshot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for source_snapshot: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_image_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_image_sweeper.go new file mode 100644 index 0000000000..98536e88e4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_image_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeImage", testSweepComputeImage) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeImage(region string) error { + resourceName := "ComputeImage" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/images", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/images/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance.go index 611da4b158..b3ffe4c5db 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "context" @@ -18,6 +20,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mitchellh/hashstructure" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/compute/v1" ) @@ -37,6 +42,7 @@ var ( "boot_disk.0.initialize_params.0.type", "boot_disk.0.initialize_params.0.image", "boot_disk.0.initialize_params.0.labels", + "boot_disk.0.initialize_params.0.resource_manager_tags", } schedulingKeys = []string{ @@ -64,7 +70,7 @@ func forceNewIfNetworkIPNotUpdatable(ctx context.Context, d *schema.ResourceDiff return forceNewIfNetworkIPNotUpdatableFunc(d) } -func forceNewIfNetworkIPNotUpdatableFunc(d TerraformResourceDiff) error { +func forceNewIfNetworkIPNotUpdatableFunc(d tpgresource.TerraformResourceDiff) error { oldCount, newCount := d.GetChange("network_interface.#") if oldCount.(int) != newCount.(int) { return nil @@ -88,6 +94,14 @@ func forceNewIfNetworkIPNotUpdatableFunc(d TerraformResourceDiff) error { return nil } +// User may specify AUTOMATIC using any case; the API will accept it and return an empty string. +func ComputeInstanceMinCpuPlatformEmptyOrAutomaticDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + old = strings.ToLower(old) + new = strings.ToLower(new) + defaultVal := "automatic" + return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) +} + func ResourceComputeInstance() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceCreate, @@ -99,7 +113,7 @@ func ResourceComputeInstance() *schema.Resource { }, SchemaVersion: 6, - MigrateState: resourceComputeInstanceMigrateState, + MigrateState: ResourceComputeInstanceMigrateState, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(20 * time.Minute), @@ -159,7 +173,7 @@ func ResourceComputeInstance() *schema.Resource { AtLeastOneOf: bootDiskKeys, ForceNew: true, ConflictsWith: []string{"boot_disk.0.disk_encryption_key_raw"}, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Computed: true, Description: `The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, }, @@ -199,7 +213,7 @@ func ResourceComputeInstance() *schema.Resource { AtLeastOneOf: initializeParamsKeys, Computed: true, ForceNew: true, - DiffSuppressFunc: diskImageDiffSuppress, + DiffSuppressFunc: DiskImageDiffSuppress, Description: `The image from which this disk was initialised.`, }, @@ -211,6 +225,14 @@ func ResourceComputeInstance() *schema.Resource { ForceNew: true, Description: `A set of key/value label pairs assigned to the disk.`, }, + + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + ForceNew: true, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, }, }, }, @@ -232,7 +254,7 @@ func ResourceComputeInstance() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"boot_disk.initialize_params"}, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the disk attached to this instance.`, }, }, @@ -263,7 +285,7 @@ func ResourceComputeInstance() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the network attached to this interface.`, }, @@ -271,7 +293,7 @@ func ResourceComputeInstance() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the subnetwork attached to this interface.`, }, @@ -339,7 +361,7 @@ func ResourceComputeInstance() *schema.Resource { "ip_cidr_range": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: ipCidrRangeDiffSuppress, + DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, Description: `The IP CIDR range represented by this alias IP range.`, }, "subnetwork_range_name": { @@ -404,6 +426,24 @@ func ResourceComputeInstance() *schema.Resource { }, }, }, + "network_performance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Configures network performance settings for the instance. If not specified, the instance will be created with its default network performance configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_egress_bandwidth_tier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"TIER_1", "DEFAULT"}, false), + Description: `The egress bandwidth tier to enable. Possible values:TIER_1, DEFAULT`, + }, + }, + }, + }, "allow_stopping_for_update": { Type: schema.TypeBool, Optional: true, @@ -419,7 +459,7 @@ func ResourceComputeInstance() *schema.Resource { "source": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the disk attached to this instance.`, }, @@ -448,7 +488,7 @@ func ResourceComputeInstance() *schema.Resource { "kms_key_self_link": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Computed: true, Description: `The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, }, @@ -508,13 +548,32 @@ func ResourceComputeInstance() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The accelerator type resource exposed to this instance. E.g. nvidia-tesla-k80.`, }, }, }, }, + "params": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Stores additional params passed with the request, but not persisted as part of resource payload.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + // This field is intentionally not updatable. The API overrides all existing tags on the field when updated. See go/gce-tags-terraform-support for details. + ForceNew: true, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, + }, + }, + }, + "labels": { Type: schema.TypeMap, Optional: true, @@ -537,10 +596,11 @@ func ResourceComputeInstance() *schema.Resource { }, "min_cpu_platform": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `The minimum CPU platform specified for the VM instance.`, + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The minimum CPU platform specified for the VM instance.`, + DiffSuppressFunc: ComputeInstanceMinCpuPlatformEmptyOrAutomaticDiffSuppress, }, "project": { @@ -593,7 +653,7 @@ func ResourceComputeInstance() *schema.Resource { Optional: true, AtLeastOneOf: schedulingKeys, Elem: instanceSchedulingNodeAffinitiesElemSchema(), - DiffSuppressFunc: emptyOrDefaultStringSuppress(""), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, }, @@ -669,10 +729,10 @@ func ResourceComputeInstance() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) + return tpgresource.CanonicalizeServiceScope(v.(string)) }, }, - Set: stringScopeHashcode, + Set: tpgresource.StringScopeHashcode, }, }, }, @@ -685,7 +745,7 @@ func ResourceComputeInstance() *schema.Resource { // Since this block is used by the API based on which // image being used, the field needs to be marked as Computed. Computed: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress(""), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), Description: `The shielded vm config being used by the instance.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -767,9 +827,12 @@ func ResourceComputeInstance() *schema.Resource { Description: `Desired status of the instance. Either "RUNNING" or "TERMINATED".`, }, "current_status": { - Type: schema.TypeString, - Computed: true, - Description: `Current status of the instance.`, + Type: schema.TypeString, + Computed: true, + Description: ` + Current status of the instance. + This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. + For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, }, "tags": { Type: schema.TypeSet, @@ -833,7 +896,7 @@ func ResourceComputeInstance() *schema.Resource { "resource_policies": { Type: schema.TypeList, Elem: &schema.Schema{Type: schema.TypeString}, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Optional: true, MaxItems: 1, Description: `A list of self_links of resource policies to attach to the instance. Currently a max of 1 resource policy is supported.`, @@ -899,33 +962,33 @@ func ResourceComputeInstance() *schema.Resource { } } -func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, error) { - project, err := getProject(d, config) +func getInstance(config *transport_tpg.Config, d *schema.ResourceData) (*compute.Instance, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return nil, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() if err != nil { - return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) } return instance, nil } -func getDisk(diskUri string, d *schema.ResourceData, config *Config) (*compute.Disk, error) { - source, err := ParseDiskFieldValue(diskUri, d, config) +func getDisk(diskUri string, d *schema.ResourceData, config *transport_tpg.Config) (*compute.Disk, error) { + source, err := tpgresource.ParseDiskFieldValue(diskUri, d, config) if err != nil { return nil, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -938,11 +1001,11 @@ func getDisk(diskUri string, d *schema.ResourceData, config *Config) (*compute.D return disk, err } -func expandComputeInstance(project string, d *schema.ResourceData, config *Config) (*compute.Instance, error) { +func expandComputeInstance(project string, d *schema.ResourceData, config *transport_tpg.Config) (*compute.Instance, error) { // Get the machine type var machineTypeUrl string if mt, ok := d.GetOk("machine_type"); ok { - machineType, err := ParseMachineTypesFieldValue(mt.(string), d, config) + machineType, err := tpgresource.ParseMachineTypesFieldValue(mt.(string), d, config) if err != nil { return nil, fmt.Errorf( "Error loading machine type: %s", @@ -987,6 +1050,11 @@ func expandComputeInstance(project string, d *schema.ResourceData, config *Confi return nil, fmt.Errorf("Error creating scheduling: %s", err) } + params, err := expandParams(d) + if err != nil { + return nil, fmt.Errorf("Error creating params: %s", err) + } + metadata, err := resourceInstanceMetadata(d) if err != nil { return nil, fmt.Errorf("Error creating metadata: %s", err) @@ -996,6 +1064,10 @@ func expandComputeInstance(project string, d *schema.ResourceData, config *Confi if err != nil { return nil, fmt.Errorf("Error creating network interfaces: %s", err) } + networkPerformanceConfig, err := expandNetworkPerformanceConfig(d, config) + if err != nil { + return nil, fmt.Errorf("Error creating network performance config: %s", err) + } accels, err := expandInstanceGuestAccelerators(d, config) if err != nil { return nil, fmt.Errorf("Error creating guest accelerators: %s", err) @@ -1015,8 +1087,10 @@ func expandComputeInstance(project string, d *schema.ResourceData, config *Confi Metadata: metadata, Name: d.Get("name").(string), NetworkInterfaces: networkInterfaces, + NetworkPerformanceConfig: networkPerformanceConfig, Tags: resourceInstanceTags(d), - Labels: expandLabels(d), + Params: params, + Labels: tpgresource.ExpandLabels(d), ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), GuestAccelerators: accels, MinCpuPlatform: d.Get("min_cpu_platform").(string), @@ -1028,7 +1102,7 @@ func expandComputeInstance(project string, d *schema.ResourceData, config *Confi AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), ShieldedInstanceConfig: expandShieldedVmConfigs(d), DisplayDevice: expandDisplayDevice(d), - ResourcePolicies: convertStringArr(d.Get("resource_policies").([]interface{})), + ResourcePolicies: tpgresource.ConvertStringArr(d.Get("resource_policies").([]interface{})), ReservationAffinity: reservationAffinity, }, nil } @@ -1055,7 +1129,7 @@ func getAllStatusBut(status string) []string { return computeInstanceStatus } -func waitUntilInstanceHasDesiredStatus(config *Config, d *schema.ResourceData) error { +func waitUntilInstanceHasDesiredStatus(config *transport_tpg.Config, d *schema.ResourceData) error { desiredStatus := d.Get("desired_status").(string) if desiredStatus != "" { @@ -1087,19 +1161,19 @@ func waitUntilInstanceHasDesiredStatus(config *Config, d *schema.ResourceData) e } func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } // Get the zone - z, err := getZone(d, config) + z, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -1141,9 +1215,9 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1176,9 +1250,12 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("can_ip_forward", instance.CanIpForward); err != nil { return fmt.Errorf("Error setting can_ip_forward: %s", err) } - if err := d.Set("machine_type", GetResourceNameFromSelfLink(instance.MachineType)); err != nil { + if err := d.Set("machine_type", tpgresource.GetResourceNameFromSelfLink(instance.MachineType)); err != nil { return fmt.Errorf("Error setting machine_type: %s", err) } + if err := d.Set("network_performance_config", flattenNetworkPerformanceConfig(instance.NetworkPerformanceConfig)); err != nil { + return err + } // Set the networks // Use the first external IP found for the default connection info. networkInterfaces, _, internalIP, externalIP, err := flattenNetworkInterfaces(d, config, instance.NetworkInterfaces) @@ -1208,7 +1285,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("tags_fingerprint", instance.Tags.Fingerprint); err != nil { return fmt.Errorf("Error setting tags_fingerprint: %s", err) } - if err := d.Set("tags", convertStringArrToInterface(instance.Tags.Items)); err != nil { + if err := d.Set("tags", tpgresource.ConvertStringArrToInterface(instance.Tags.Items)); err != nil { return fmt.Errorf("Error setting tags: %s", err) } } @@ -1235,13 +1312,13 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error s := disk["source"].(string) var sourceLink string if strings.Contains(s, "regions/") { - source, err := ParseRegionDiskFieldValue(disk["source"].(string), d, config) + source, err := tpgresource.ParseRegionDiskFieldValue(disk["source"].(string), d, config) if err != nil { return err } sourceLink = source.RelativeLink() } else { - source, err := ParseDiskFieldValue(disk["source"].(string), d, config) + source, err := tpgresource.ParseDiskFieldValue(disk["source"].(string), d, config) if err != nil { return err } @@ -1262,13 +1339,13 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } else { var sourceLink string if strings.Contains(disk.Source, "regions/") { - source, err := ParseRegionDiskFieldValue(disk.Source, d, config) + source, err := tpgresource.ParseRegionDiskFieldValue(disk.Source, d, config) if err != nil { return err } sourceLink = source.RelativeLink() } else { - source, err := ParseDiskFieldValue(disk.Source, d, config) + source, err := tpgresource.ParseDiskFieldValue(disk.Source, d, config) if err != nil { return err } @@ -1276,7 +1353,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } adIndex, inConfig := attachedDiskSources[sourceLink] di := map[string]interface{}{ - "source": ConvertSelfLinkToV1(disk.Source), + "source": tpgresource.ConvertSelfLinkToV1(disk.Source), "device_name": disk.DeviceName, "mode": disk.Mode, } @@ -1319,7 +1396,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } } - zone := GetResourceNameFromSelfLink(instance.Zone) + zone := tpgresource.GetResourceNameFromSelfLink(instance.Zone) if err := d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)); err != nil { return fmt.Errorf("Error setting service_account: %s", err) @@ -1351,7 +1428,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("deletion_protection", instance.DeletionProtection); err != nil { return fmt.Errorf("Error setting deletion_protection: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(instance.SelfLink)); err != nil { return fmt.Errorf("Error setting self_link: %s", err) } if err := d.Set("instance_id", fmt.Sprintf("%d", instance.Id)); err != nil { @@ -1396,18 +1473,18 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -1416,7 +1493,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Change back to getInstance(config, d) once updating alias ips is GA. instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) } // Enable partial mode for the resource since it is possible @@ -1429,13 +1506,13 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } metadataV1 := &compute.Metadata{} - if err := Convert(metadata, metadataV1); err != nil { + if err := tpgresource.Convert(metadata, metadataV1); err != nil { return err } // We're retrying for an error 412 where the metadata fingerprint is out of date - err = retry( - func() error { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { // retrieve up-to-date metadata from the API in case several updates hit simultaneously. instances // sometimes but not always share metadata fingerprints. instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() @@ -1457,7 +1534,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return nil }, - ) + }) if err != nil { return err @@ -1467,7 +1544,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("tags") { tags := resourceInstanceTags(d) tagsV1 := &compute.Tags{} - if err := Convert(tags, tagsV1); err != nil { + if err := tpgresource.Convert(tags, tagsV1); err != nil { return err } op, err := config.NewComputeClient(userAgent).Instances.SetTags( @@ -1483,7 +1560,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } if d.HasChange("labels") { - labels := expandLabels(d) + labels := tpgresource.ExpandLabels(d) labelFingerprint := d.Get("label_fingerprint").(string) req := compute.InstancesSetLabelsRequest{Labels: labels, LabelFingerprint: labelFingerprint} @@ -1513,7 +1590,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } } - resourcePolicies := convertStringArr(d.Get("resource_policies").([]interface{})) + resourcePolicies := tpgresource.ConvertStringArr(d.Get("resource_policies").([]interface{})) if len(resourcePolicies) > 0 { req := compute.InstancesAddResourcePoliciesRequest{ResourcePolicies: resourcePolicies} @@ -1586,7 +1663,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange(prefix + ".subnetwork") { if !d.HasChange(prefix + ".network") { subnetProjectField := prefix + ".subnetwork_project" - sf, err := ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) + sf, err := tpgresource.ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) if err != nil { return fmt.Errorf("Cannot determine self_link for subnetwork %q: %s", subnetwork, err) } @@ -1594,7 +1671,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return errwrap.Wrapf("Error getting subnetwork value: {{err}}", err) } - nf, err := ParseNetworkFieldValue(resp.Network, d, config) + nf, err := tpgresource.ParseNetworkFieldValue(resp.Network, d, config) if err != nil { return fmt.Errorf("Cannot determine self_link for network %q: %s", resp.Network, err) } @@ -1746,7 +1823,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if _, ok := oDisks[hash]; !ok { computeDiskV1 := &compute.AttachedDisk{} - err = Convert(computeDisk, computeDiskV1) + err = tpgresource.Convert(computeDisk, computeDiskV1) if err != nil { return err } @@ -1817,8 +1894,8 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } if d.HasChange("can_ip_forward") { - err = retry( - func() error { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() if err != nil { return fmt.Errorf("Error retrieving instance: %s", err) @@ -1838,7 +1915,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return nil }, - ) + }) if err != nil { return err @@ -1897,40 +1974,34 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } } - if d.HasChange("machine_type") { - mt, err := ParseMachineTypesFieldValue(d.Get("machine_type").(string), d, config) - if err != nil { - return err - } - req := &compute.InstancesSetMachineTypeRequest{ - MachineType: mt.RelativeLink(), + if d.HasChange("min_cpu_platform") { + minCpuPlatform := d.Get("min_cpu_platform") + req := &compute.InstancesSetMinCpuPlatformRequest{ + MinCpuPlatform: minCpuPlatform.(string), } - op, err := config.NewComputeClient(userAgent).Instances.SetMachineType(project, zone, instance.Name, req).Do() + op, err := config.NewComputeClient(userAgent).Instances.SetMinCpuPlatform(project, zone, instance.Name, req).Do() if err != nil { return err } - opErr := ComputeOperationWaitTime(config, op, project, "updating machinetype", userAgent, d.Timeout(schema.TimeoutUpdate)) + opErr := ComputeOperationWaitTime(config, op, project, "updating min cpu platform", userAgent, d.Timeout(schema.TimeoutUpdate)) if opErr != nil { return opErr } } - if d.HasChange("min_cpu_platform") { - minCpuPlatform, ok := d.GetOk("min_cpu_platform") - // Even though you don't have to set minCpuPlatform on create, you do have to set it to an - // actual value on update. "Automatic" is the default. This will be read back from the API as empty, - // so we don't need to worry about diffs. - if !ok { - minCpuPlatform = "Automatic" + if d.HasChange("machine_type") { + mt, err := tpgresource.ParseMachineTypesFieldValue(d.Get("machine_type").(string), d, config) + if err != nil { + return err } - req := &compute.InstancesSetMinCpuPlatformRequest{ - MinCpuPlatform: minCpuPlatform.(string), + req := &compute.InstancesSetMachineTypeRequest{ + MachineType: mt.RelativeLink(), } - op, err := config.NewComputeClient(userAgent).Instances.SetMinCpuPlatform(project, zone, instance.Name, req).Do() + op, err := config.NewComputeClient(userAgent).Instances.SetMachineType(project, zone, instance.Name, req).Do() if err != nil { return err } - opErr := ComputeOperationWaitTime(config, op, project, "updating min cpu platform", userAgent, d.Timeout(schema.TimeoutUpdate)) + opErr := ComputeOperationWaitTime(config, op, project, "updating machinetype", userAgent, d.Timeout(schema.TimeoutUpdate)) if opErr != nil { return opErr } @@ -1942,7 +2013,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if len(sa) > 0 && sa[0] != nil { saMap := sa[0].(map[string]interface{}) req.Email = saMap["email"].(string) - req.Scopes = canonicalizeServiceScopes(convertStringSet(saMap["scopes"].(*schema.Set))) + req.Scopes = tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(saMap["scopes"].(*schema.Set))) } op, err := config.NewComputeClient(userAgent).Instances.SetServiceAccount(project, zone, instance.Name, req).Do() if err != nil { @@ -2005,8 +2076,8 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } if d.HasChange("advanced_machine_features") { - err = retry( - func() error { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { // retrieve up-to-date instance from the API in case several updates hit simultaneously. instances // sometimes but not always share metadata fingerprints. instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() @@ -2028,7 +2099,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return nil }, - ) + }) if err != nil { return err @@ -2071,18 +2142,18 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return resourceComputeInstanceRead(d, meta) } -func startInstanceOperation(d *schema.ResourceData, config *Config) (*compute.Operation, error) { - project, err := getProject(d, config) +func startInstanceOperation(d *schema.ResourceData, config *transport_tpg.Config) (*compute.Operation, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return nil, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -2091,7 +2162,7 @@ func startInstanceOperation(d *schema.ResourceData, config *Config) (*compute.Op // Change back to getInstance(config, d) once updating alias ips is GA. instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() if err != nil { - return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", instance.Name)) + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance %s", instance.Name)) } // Retrieve instance from config to pull encryption keys if necessary @@ -2122,18 +2193,18 @@ func startInstanceOperation(d *schema.ResourceData, config *Config) (*compute.Op } func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceData, meta interface{}) (*compute.AttachedDisk, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) s := diskConfig["source"].(string) var sourceLink string if strings.Contains(s, "regions/") { - source, err := ParseRegionDiskFieldValue(s, d, config) + source, err := tpgresource.ParseRegionDiskFieldValue(s, d, config) if err != nil { return nil, err } sourceLink = source.RelativeLink() } else { - source, err := ParseDiskFieldValue(s, d, config) + source, err := tpgresource.ParseDiskFieldValue(s, d, config) if err != nil { return nil, err } @@ -2177,7 +2248,7 @@ func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceDat // See comment on expandInstanceTemplateGuestAccelerators regarding why this // code is duplicated. -func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*compute.AcceleratorConfig, error) { +func expandInstanceGuestAccelerators(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]*compute.AcceleratorConfig, error) { configs, ok := d.GetOk("guest_accelerator") if !ok { return nil, nil @@ -2189,7 +2260,7 @@ func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([ if data["count"].(int) == 0 { continue } - at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config) + at, err := tpgresource.ParseAcceleratorFieldValue(data["type"].(string), d, config) if err != nil { return nil, fmt.Errorf("cannot parse accelerator type: %v", err) } @@ -2266,18 +2337,18 @@ func desiredStatusDiff(_ context.Context, diff *schema.ResourceDiff, meta interf } func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -2308,8 +2379,8 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err } func resourceComputeInstanceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -2318,7 +2389,7 @@ func resourceComputeInstanceImportState(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -2327,8 +2398,18 @@ func resourceComputeInstanceImportState(d *schema.ResourceData, meta interface{} return []*schema.ResourceData{d}, nil } -func expandBootDisk(d *schema.ResourceData, config *Config, project string) (*compute.AttachedDisk, error) { - userAgent, err := generateUserAgentString(d, config.UserAgent) +func expandParams(d *schema.ResourceData) (*compute.InstanceParams, error) { + params := &compute.InstanceParams{} + + if _, ok := d.GetOk("params.0.resource_manager_tags"); ok { + params.ResourceManagerTags = tpgresource.ExpandStringMap(d, "params.0.resource_manager_tags") + } + + return params, nil +} + +func expandBootDisk(d *schema.ResourceData, config *transport_tpg.Config, project string) (*compute.AttachedDisk, error) { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -2359,7 +2440,7 @@ func expandBootDisk(d *schema.ResourceData, config *Config, project string) (*co } if v, ok := d.GetOk("boot_disk.0.source"); ok { - source, err := ParseDiskFieldValue(v.(string), d, config) + source, err := tpgresource.ParseDiskFieldValue(v.(string), d, config) if err != nil { return nil, err } @@ -2384,7 +2465,7 @@ func expandBootDisk(d *schema.ResourceData, config *Config, project string) (*co if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok { imageName := v.(string) - imageUrl, err := resolveImage(config, project, imageName, userAgent) + imageUrl, err := ResolveImage(config, project, imageName, userAgent) if err != nil { return nil, fmt.Errorf("Error resolving image name '%s': %s", imageName, err) } @@ -2393,7 +2474,11 @@ func expandBootDisk(d *schema.ResourceData, config *Config, project string) (*co } if _, ok := d.GetOk("boot_disk.0.initialize_params.0.labels"); ok { - disk.InitializeParams.Labels = expandStringMap(d, "boot_disk.0.initialize_params.0.labels") + disk.InitializeParams.Labels = tpgresource.ExpandStringMap(d, "boot_disk.0.initialize_params.0.labels") + } + + if _, ok := d.GetOk("boot_disk.0.initialize_params.0.resource_manager_tags"); ok { + disk.InitializeParams.ResourceManagerTags = tpgresource.ExpandStringMap(d, "boot_disk.0.initialize_params.0.resource_manager_tags") } } @@ -2404,12 +2489,12 @@ func expandBootDisk(d *schema.ResourceData, config *Config, project string) (*co return disk, nil } -func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config *Config) []map[string]interface{} { +func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config *transport_tpg.Config) []map[string]interface{} { result := map[string]interface{}{ "auto_delete": disk.AutoDelete, "device_name": disk.DeviceName, "mode": disk.Mode, - "source": ConvertSelfLinkToV1(disk.Source), + "source": tpgresource.ConvertSelfLinkToV1(disk.Source), // disk_encryption_key_raw is not returned from the API, so copy it from what the user // originally specified to avoid diffs. "disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"), @@ -2427,12 +2512,13 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config } } else { result["initialize_params"] = []map[string]interface{}{{ - "type": GetResourceNameFromSelfLink(diskDetails.Type), + "type": tpgresource.GetResourceNameFromSelfLink(diskDetails.Type), // If the config specifies a family name that doesn't match the image name, then // the diff won't be properly suppressed. See DiffSuppressFunc for this field. - "image": diskDetails.SourceImage, - "size": diskDetails.SizeGb, - "labels": diskDetails.Labels, + "image": diskDetails.SourceImage, + "size": diskDetails.SizeGb, + "labels": diskDetails.Labels, + "resource_manager_tags": d.Get("boot_disk.0.initialize_params.0.resource_manager_tags"), }} } @@ -2450,7 +2536,7 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config return []map[string]interface{}{result} } -func expandScratchDisks(d *schema.ResourceData, config *Config, project string) ([]*compute.AttachedDisk, error) { +func expandScratchDisks(d *schema.ResourceData, config *transport_tpg.Config, project string) ([]*compute.AttachedDisk, error) { diskType, err := readDiskType(config, d, "local-ssd") if err != nil { return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_machine_image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_machine_image.go new file mode 100644 index 0000000000..d88474082e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_machine_image.go @@ -0,0 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_from_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_template.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_from_template.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_template.go index 05396a652c..c0fe895232 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_from_template.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_from_template.go @@ -1,11 +1,18 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( + "encoding/json" "fmt" "log" + "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/compute/v1" ) @@ -89,19 +96,19 @@ func recurseOnSchema(s map[string]*schema.Schema, f func(*schema.Schema)) { } func resourceComputeInstanceFromTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } // Get the zone - z, err := getZone(d, config) + z, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -116,19 +123,65 @@ func resourceComputeInstanceFromTemplateCreate(d *schema.ResourceData, meta inte return err } - tpl, err := ParseInstanceTemplateFieldValue(d.Get("source_instance_template").(string), d, config) + sourceInstanceTemplate := ConvertToUniqueIdWhenPresent(d.Get("source_instance_template").(string)) + tpl, err := tpgresource.ParseInstanceTemplateFieldValue(sourceInstanceTemplate, d, config) if err != nil { return err } - it, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, tpl.Name).Do() - if err != nil { - return err - } + it := compute.InstanceTemplate{} + var relativeUrl string - instance.Disks, err = adjustInstanceFromTemplateDisks(d, config, it, zone, project) - if err != nil { - return err + if strings.Contains(sourceInstanceTemplate, "global/instanceTemplates") { + instanceTemplate, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, tpl.Name).Do() + if err != nil { + return err + } + + it = *instanceTemplate + relativeUrl = tpl.RelativeLink() + + instance.Disks, err = adjustInstanceFromTemplateDisks(d, config, &it, zone, project, false) + if err != nil { + return err + } + } else { + relativeUrl, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceTemplates/"+tpl.Name) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceTemplates/"+tpl.Name) + if err != nil { + return err + } + + instanceTemplate, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + + instancePropertiesObj, err := json.Marshal(instanceTemplate) + if err != nil { + fmt.Println(err) + return err + } + + if err := json.Unmarshal(instancePropertiesObj, &it); err != nil { + fmt.Println(err) + return err + } + + instance.Disks, err = adjustInstanceFromTemplateDisks(d, config, &it, zone, project, true) + if err != nil { + return err + } } // when we make the original call to expandComputeInstance expandScheduling is called, which sets default values. @@ -153,12 +206,12 @@ func resourceComputeInstanceFromTemplateCreate(d *schema.ResourceData, meta inte // Assume for now that all fields are exact snake_case versions of the API fields. // This won't necessarily always be true, but it serves as a good approximation and // can be adjusted later as we discover issues. - instance.ForceSendFields = append(instance.ForceSendFields, SnakeToPascalCase(f)) + instance.ForceSendFields = append(instance.ForceSendFields, tpgresource.SnakeToPascalCase(f)) } } log.Printf("[INFO] Requesting instance creation") - op, err := config.NewComputeClient(userAgent).Instances.Insert(project, zone.Name, instance).SourceInstanceTemplate(tpl.RelativeLink()).Do() + op, err := config.NewComputeClient(userAgent).Instances.Insert(project, zone.Name, instance).SourceInstanceTemplate(relativeUrl).Do() if err != nil { return fmt.Errorf("Error creating instance: %s", err) } @@ -180,7 +233,7 @@ func resourceComputeInstanceFromTemplateCreate(d *schema.ResourceData, meta inte // Instances have disks spread across multiple schema properties. This function // ensures that overriding one of these properties does not override the others. -func adjustInstanceFromTemplateDisks(d *schema.ResourceData, config *Config, it *compute.InstanceTemplate, zone *compute.Zone, project string) ([]*compute.AttachedDisk, error) { +func adjustInstanceFromTemplateDisks(d *schema.ResourceData, config *transport_tpg.Config, it *compute.InstanceTemplate, zone *compute.Zone, project string, isFromRegionalTemplate bool) ([]*compute.AttachedDisk, error) { disks := []*compute.AttachedDisk{} if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { bootDisk, err := expandBootDisk(d, config, project) @@ -192,7 +245,7 @@ func adjustInstanceFromTemplateDisks(d *schema.ResourceData, config *Config, it // boot disk was not overridden, so use the one from the instance template for _, disk := range it.Properties.Disks { if disk.Boot { - if disk.Source != "" { + if disk.Source != "" && !isFromRegionalTemplate { // Instances need a URL for the disk, but instance templates only have the name disk.Source = fmt.Sprintf("projects/%s/zones/%s/disks/%s", project, zone.Name, disk.Source) } @@ -246,7 +299,7 @@ func adjustInstanceFromTemplateDisks(d *schema.ResourceData, config *Config, it // attached disks were not overridden, so use the ones from the instance template for _, disk := range it.Properties.Disks { if !disk.Boot && disk.Type != "SCRATCH" { - if s := disk.Source; s != "" { + if s := disk.Source; s != "" && !isFromRegionalTemplate { // Instances need a URL for the disk source, but instance templates // only have the name (since they're global). disk.Source = fmt.Sprintf("zones/%s/disks/%s", zone.Name, s) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group.go index c2a34c1795..80f09d825f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -6,6 +8,9 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/googleapi" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -60,7 +65,7 @@ func ResourceComputeInstanceGroup() *schema.Resource { Optional: true, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: selfLinkRelativePathHash, + Set: tpgresource.SelfLinkRelativePathHash, Description: `The list of instances in the group, in self_link format. When adding instances they must all be in the same network and zone as the instance group.`, }, @@ -89,7 +94,7 @@ func ResourceComputeInstanceGroup() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, ForceNew: true, Description: `The URL of the network the instance group is in. If this is different from the network where the instances are in, the creation fails. Defaults to the network where the instances are in (if neither network nor instances is specified, this field will be blank).`, }, @@ -137,18 +142,18 @@ func validInstanceURLs(instanceUrls []string) bool { } func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -190,14 +195,14 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} } if v, ok := d.GetOk("instances"); ok { - tmpUrls := convertStringArr(v.(*schema.Set).List()) + tmpUrls := tpgresource.ConvertStringArr(v.(*schema.Set).List()) var instanceUrls []string for _, v := range tmpUrls { if strings.HasPrefix(v, "https://") { instanceUrls = append(instanceUrls, v) } else { - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v) if err != nil { return err } @@ -227,18 +232,18 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} } func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -248,7 +253,7 @@ func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) instanceGroup, err := config.NewComputeClient(userAgent).InstanceGroups.Get( project, zone, name).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", name)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", name)) } // retrieve instance group members @@ -304,18 +309,18 @@ func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) return nil } func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -327,8 +332,8 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} // to-do check for no instances from_, to_ := d.GetChange("instances") - from := convertStringArr(from_.(*schema.Set).List()) - to := convertStringArr(to_.(*schema.Set).List()) + from := tpgresource.ConvertStringArr(from_.(*schema.Set).List()) + to := tpgresource.ConvertStringArr(to_.(*schema.Set).List()) if !validInstanceURLs(from) { return fmt.Errorf("Error invalid instance URLs: %v", from) @@ -337,7 +342,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error invalid instance URLs: %v", to) } - add, remove := calcAddRemove(from, to) + add, remove := tpgresource.CalcAddRemove(from, to) if len(remove) > 0 { removeReq := &compute.InstanceGroupsRemoveInstancesRequest{ @@ -409,18 +414,18 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} } func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -440,15 +445,15 @@ func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{} } func resourceComputeInstanceGroupImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroups/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", }, d, config); err != nil { return nil, err } - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{name}}") if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_manager.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_manager.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager.go index eba69357ee..da96e5fdf3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_manager.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager.go @@ -1,8 +1,11 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" + "regexp" "strings" "time" @@ -10,6 +13,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/compute/v1" ) @@ -50,7 +56,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { "instance_template": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: compareSelfLinkRelativePathsIgnoreParams, Description: `The full URL to an instance template from which all new instances of this version will be created.`, }, @@ -154,7 +160,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Set: selfLinkRelativePathHash, + Set: tpgresource.SelfLinkRelativePathHash, Description: `The full URL of all target pools to which new instances in the group are added. Updating the target pools attribute does not affect existing instances.`, }, @@ -183,7 +189,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { "health_check": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The health check resource that signals autohealing.`, }, @@ -262,7 +268,7 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{"RECREATE", "SUBSTITUTE", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("SUBSTITUTE"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("SUBSTITUTE"), Description: `The instance replacement method for managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set max_unavailable_fixed or max_unavailable_percent to be greater than 0.`, }, }, @@ -371,6 +377,33 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { } } +func parseUniqueId(s string) (string, string) { + splits := strings.SplitN(s, "?uniqueId=", 2) + if len(splits) == 2 { + return splits[0], splits[1] + } + return s, "" +} + +func compareSelfLinkRelativePathsIgnoreParams(_unused1, old, new string, _unused2 *schema.ResourceData) bool { + oldName, oldUniqueId := parseUniqueId(old) + newName, newUniqueId := parseUniqueId(new) + if oldUniqueId != "" && newUniqueId != "" && oldUniqueId != newUniqueId { + return false + } + return tpgresource.CompareSelfLinkRelativePaths(_unused1, oldName, newName, _unused2) +} + +func ConvertToUniqueIdWhenPresent(s string) string { + original, uniqueId := parseUniqueId(s) + if uniqueId != "" { + splits := strings.Split(original, "/") + splits[len(splits)-1] = uniqueId + return strings.Join(splits, "/") + } + return s +} + func getNamedPorts(nps []interface{}) []*compute.NamedPort { namedPorts := make([]*compute.NamedPort, 0, len(nps)) for _, v := range nps { @@ -398,18 +431,18 @@ func getNamedPortsBeta(nps []interface{}) []*compute.NamedPort { } func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -422,7 +455,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte TargetSize: int64(d.Get("target_size").(int)), ListManagedInstancesResults: d.Get("list_managed_instances_results").(string), NamedPorts: getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()), - TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)), + TargetPools: tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)), AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandUpdatePolicy(d.Get("update_policy").([]interface{})), @@ -441,7 +474,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte } // It probably maybe worked, so store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") if err != nil { return err } @@ -455,7 +488,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte // before attempting to Read the state of the manager. This allows a graceful resumption of a Create that was killed // by the upstream Terraform process exiting early such as a sigterm. select { - case <-config.context.Done(): + case <-config.Context.Done(): log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", op.Name) if err := d.Set("operation", op.Name); err != nil { return fmt.Errorf("Error setting operation: %s", err) @@ -494,7 +527,7 @@ func flattenVersions(versions []*compute.InstanceGroupManagerVersion) []map[stri for _, version := range versions { versionMap := make(map[string]interface{}) versionMap["name"] = version.Name - versionMap["instance_template"] = ConvertSelfLinkToV1(version.InstanceTemplate) + versionMap["instance_template"] = tpgresource.ConvertSelfLinkToV1(version.InstanceTemplate) versionMap["target_size"] = flattenFixedOrPercent(version.TargetSize) result = append(result, versionMap) } @@ -515,24 +548,24 @@ func flattenFixedOrPercent(fixedOrPercent *compute.FixedOrPercent) []map[string] } func getManager(d *schema.ResourceData, meta interface{}) (*compute.InstanceGroupManager, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } - zone, _ := getZone(d, config) + zone, _ := tpgresource.GetZone(d, config) name := d.Get("name").(string) manager, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Get(project, zone, name).Do() if err != nil { - return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance Group Manager %q", name)) + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance Group Manager %q", name)) } if manager == nil { @@ -547,13 +580,13 @@ func getManager(d *schema.ResourceData, meta interface{}) (*compute.InstanceGrou } func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -561,7 +594,7 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf operation := d.Get("operation").(string) if operation != "" { log.Printf("[DEBUG] in progress operation detected at %v, attempting to resume", operation) - zone, _ := getZone(d, config) + zone, _ := tpgresource.GetZone(d, config) op := &compute.Operation{ Name: operation, Zone: zone, @@ -594,7 +627,7 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf if err := d.Set("name", manager.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } - if err := d.Set("zone", GetResourceNameFromSelfLink(manager.Zone)); err != nil { + if err := d.Set("zone", tpgresource.GetResourceNameFromSelfLink(manager.Zone)); err != nil { return fmt.Errorf("Error setting zone: %s", err) } if err := d.Set("description", manager.Description); err != nil { @@ -609,7 +642,7 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf if err := d.Set("list_managed_instances_results", manager.ListManagedInstancesResults); err != nil { return fmt.Errorf("Error setting list_managed_instances_results: %s", err) } - if err = d.Set("target_pools", mapStringArr(manager.TargetPools, ConvertSelfLinkToV1)); err != nil { + if err = d.Set("target_pools", tpgresource.MapStringArr(manager.TargetPools, tpgresource.ConvertSelfLinkToV1)); err != nil { return fmt.Errorf("Error setting target_pools in state: %s", err.Error()) } if err = d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)); err != nil { @@ -621,10 +654,10 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf if err := d.Set("fingerprint", manager.Fingerprint); err != nil { return fmt.Errorf("Error setting fingerprint: %s", err) } - if err := d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup)); err != nil { + if err := d.Set("instance_group", tpgresource.ConvertSelfLinkToV1(manager.InstanceGroup)); err != nil { return fmt.Errorf("Error setting instance_group: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(manager.SelfLink)); err != nil { return fmt.Errorf("Error setting self_link: %s", err) } @@ -652,19 +685,19 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf } func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -681,7 +714,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } if d.HasChange("target_pools") { - updatedManager.TargetPools = convertStringSet(d.Get("target_pools").(*schema.Set)) + updatedManager.TargetPools = tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)) updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetPools") change = true } @@ -782,26 +815,34 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) if d.Get("wait_for_instances").(bool) { err := computeIGMWaitForInstanceStatus(d, meta) if err != nil { + notFound, reErr := regexp.MatchString(`not found`, err.Error()) + if reErr != nil { + return reErr + } + if notFound { + // manager was not found, we can exit gracefully + return nil + } return err } } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - zone, _ := getZone(d, config) + zone, _ := tpgresource.GetZone(d, config) name := d.Get("name").(string) op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Delete(project, zone, name).Do() @@ -935,7 +976,7 @@ func expandVersions(configured []interface{}) []*compute.InstanceGroupManagerVer version := compute.InstanceGroupManagerVersion{ Name: data["name"].(string), - InstanceTemplate: data["instance_template"].(string), + InstanceTemplate: ConvertToUniqueIdWhenPresent(data["instance_template"].(string)), TargetSize: expandFixedOrPercent(data["target_size"].([]interface{})), } @@ -1111,13 +1152,13 @@ func resourceInstanceGroupManagerStateImporter(d *schema.ResourceData, meta inte if err := d.Set("wait_for_instances_status", "STABLE"); err != nil { return nil, fmt.Errorf("Error setting wait_for_instances_status: %s", err) } - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager_sweeper.go new file mode 100644 index 0000000000..e5c00c1394 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_manager_sweeper.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func init() { + sweeper.AddTestSweepers("ComputeInstanceGroupManager", testSweepComputeInstanceGroupManager) +} + +// At the time of writing, the CI only passes us-central1 as the region. +// Since we can read all instances across zones, we don't really use this param. +func testSweepComputeInstanceGroupManager(region string) error { + resourceName := "ComputeInstanceGroupManager" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + found, err := config.NewComputeClient(config.UserAgent).InstanceGroupManagers.AggregatedList(config.Project).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request: %s", err) + return nil + } + + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for zone, itemList := range found.Items { + for _, igm := range itemList.InstanceGroupManagers { + if !sweeper.IsSweepableTestResource(igm.Name) { + nonPrefixCount++ + continue + } + + // Don't wait on operations as we may have a lot to delete + _, err := config.NewComputeClient(config.UserAgent).InstanceGroupManagers.Delete(config.Project, tpgresource.GetResourceNameFromSelfLink(zone), igm.Name).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting %s resource %s : %s", resourceName, igm.Name, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, igm.Name) + } + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_migrate.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_migrate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_migrate.go index 4c71597354..4241896068 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_group_migrate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_migrate.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_named_port.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_named_port.go new file mode 100644 index 0000000000..8852fd0b5d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_named_port.go @@ -0,0 +1,531 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeInstanceGroupNamedPort() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupNamedPortCreate, + Read: resourceComputeInstanceGroupNamedPortRead, + Delete: resourceComputeInstanceGroupNamedPortDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeInstanceGroupNamedPortImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The name of the instance group.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name for this named port. The name must be 1-63 characters +long, and comply with RFC1035.`, + }, + "port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The port number, which can be a value between 1 and 65535.`, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The zone of the instance group.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeInstanceGroupNamedPortCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeInstanceGroupNamedPortName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + portProp, err := expandNestedComputeInstanceGroupNamedPortPort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + + obj, err = resourceComputeInstanceGroupNamedPortEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new InstanceGroupNamedPort: %#v", obj) + + obj, err = resourceComputeInstanceGroupNamedPortPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating InstanceGroupNamedPort: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating InstanceGroupNamedPort", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create InstanceGroupNamedPort: %s", err) + } + + log.Printf("[DEBUG] Finished creating InstanceGroupNamedPort %q: %#v", d.Id(), res) + + return resourceComputeInstanceGroupNamedPortRead(d, meta) +} + +func resourceComputeInstanceGroupNamedPortRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeInstanceGroupNamedPort %q", d.Id())) + } + + res, err = flattenNestedComputeInstanceGroupNamedPort(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeInstanceGroupNamedPort because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) + } + + if err := d.Set("name", flattenNestedComputeInstanceGroupNamedPortName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) + } + if err := d.Set("port", flattenNestedComputeInstanceGroupNamedPortPort(res["port"], d, config)); err != nil { + return fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) + } + + return nil +} + +func resourceComputeInstanceGroupNamedPortDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceComputeInstanceGroupNamedPortPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "InstanceGroupNamedPort") + } + log.Printf("[DEBUG] Deleting InstanceGroupNamedPort %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "InstanceGroupNamedPort") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting InstanceGroupNamedPort", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting InstanceGroupNamedPort %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeInstanceGroupNamedPortImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroups/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeInstanceGroupNamedPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeInstanceGroupNamedPortPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandNestedComputeInstanceGroupNamedPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeInstanceGroupNamedPortPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeInstanceGroupNamedPortEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + ig, err := tpgresource.ParseInstanceGroupFieldValue(d.Get("group").(string), d, config) + if err != nil { + return nil, err + } + + if err := d.Set("group", ig.Name); err != nil { + return nil, fmt.Errorf("Error setting group: %s", err) + } + if err := d.Set("zone", ig.Zone); err != nil { + return nil, fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("project", ig.Project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + return obj, nil +} + +func flattenNestedComputeInstanceGroupNamedPort(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["namedPorts"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value namedPorts. Actual value: %v", v) + } + + _, item, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedPort, err := expandNestedComputeInstanceGroupNamedPortPort(d.Get("port"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedPort := flattenNestedComputeInstanceGroupNamedPortPort(expectedPort, d, meta.(*transport_tpg.Config)) + expectedName, err := expandNestedComputeInstanceGroupNamedPortName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeInstanceGroupNamedPortName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemPort := flattenNestedComputeInstanceGroupNamedPortPort(item["port"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemPort)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedPort))) && !reflect.DeepEqual(itemPort, expectedFlattenedPort) { + log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) + continue + } + itemName := flattenNestedComputeInstanceGroupNamedPortName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceComputeInstanceGroupNamedPortPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeInstanceGroupNamedPortListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create InstanceGroupNamedPort, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "namedPorts": append(currItems, obj), + } + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceComputeInstanceGroupNamedPortPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeInstanceGroupNamedPortListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "ComputeInstanceGroupNamedPort") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "namedPorts": updatedItems, + } + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceComputeInstanceGroupNamedPortListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") + if err != nil { + return nil, err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + + v, ok = res["namedPorts"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "namedPorts"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_named_port_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_named_port_sweeper.go new file mode 100644 index 0000000000..6fe251db81 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_group_named_port_sweeper.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeInstanceGroupNamedPort", testSweepComputeInstanceGroupNamedPort) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeInstanceGroupNamedPort(region string) error { + resourceName := "ComputeInstanceGroupNamedPort" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/aggregated/instanceGroups/{{group}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["namedPorts"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + var rl []interface{} + zones := resourceList.(map[string]interface{}) + // Loop through every zone in the list response + for _, zonesValue := range zones { + zone := zonesValue.(map[string]interface{}) + for k, v := range zone { + // Zone map either has resources or a warning stating there were no resources found in the zone + if k != "warning" { + resourcesInZone := v.([]interface{}) + rl = append(rl, resourcesInZone...) + } + } + } + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts" + if obj["zone"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource zone was nil", resourceName) + return nil + } + zone := tpgresource.GetResourceNameFromSelfLink(obj["zone"].(string)) + deleteTemplate = strings.Replace(deleteTemplate, "{{zone}}", zone, -1) + + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_migrate.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_migrate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_migrate.go index ad89e55ad0..6f39fd5a37 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_migrate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_migrate.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -6,12 +8,15 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "google.golang.org/api/compute/v1" ) -func resourceComputeInstanceMigrateState( +func ResourceComputeInstanceMigrateState( v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { if is.Empty() { log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") @@ -161,7 +166,7 @@ func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, for service_acct_index, newScopes := range newScopesMap { for _, newScope := range newScopes { - hash := hashcode(canonicalizeServiceScope(newScope)) + hash := tpgresource.Hashcode(tpgresource.CanonicalizeServiceScope(newScope)) newKey := fmt.Sprintf("service_account.%s.scopes.%d", service_acct_index, hash) is.Attributes[newKey] = newScope } @@ -185,7 +190,7 @@ func migrateStateV3toV4(is *terraform.InstanceState, meta interface{}) (*terrafo // we have no other way to know which source belongs to which attached disk. // Also note that the following code modifies the returned instance- if you need immutability, please change // this to make a copy of the needed data. - config := meta.(*Config) + config := meta.(*transport_tpg.Config) instance, err := getInstanceFromInstanceState(config, is) if err != nil { return is, fmt.Errorf("migration error: %s", err) @@ -237,7 +242,7 @@ func migrateStateV3toV4(is *terraform.InstanceState, meta interface{}) (*terrafo for _, disk := range instance.Disks { if disk.Boot { - is.Attributes["boot_disk.0.source"] = GetResourceNameFromSelfLink(disk.Source) + is.Attributes["boot_disk.0.source"] = tpgresource.GetResourceNameFromSelfLink(disk.Source) is.Attributes["boot_disk.0.device_name"] = disk.DeviceName break } @@ -310,7 +315,7 @@ func migrateStateV4toV5(is *terraform.InstanceState, meta interface{}) (*terrafo return is, nil } -func getInstanceFromInstanceState(config *Config, is *terraform.InstanceState) (*compute.Instance, error) { +func getInstanceFromInstanceState(config *transport_tpg.Config, is *terraform.InstanceState) (*compute.Instance, error) { project, ok := is.Attributes["project"] if !ok { if config.Project == "" { @@ -338,7 +343,7 @@ func getInstanceFromInstanceState(config *Config, is *terraform.InstanceState) ( return instance, nil } -func getAllDisksFromInstanceState(config *Config, is *terraform.InstanceState) ([]*compute.Disk, error) { +func getAllDisksFromInstanceState(config *transport_tpg.Config, is *terraform.InstanceState) ([]*compute.Disk, error) { project, ok := is.Attributes["project"] if !ok { if config.Project == "" { @@ -374,7 +379,7 @@ func getAllDisksFromInstanceState(config *Config, is *terraform.InstanceState) ( return diskList, nil } -func getDiskFromAttributes(config *Config, instance *compute.Instance, allDisks map[string]*compute.Disk, attributes map[string]string, i int) (*compute.AttachedDisk, error) { +func getDiskFromAttributes(config *transport_tpg.Config, instance *compute.Instance, allDisks map[string]*compute.Disk, attributes map[string]string, i int) (*compute.AttachedDisk, error) { if diskSource := attributes[fmt.Sprintf("disk.%d.disk", i)]; diskSource != "" { return getDiskFromSource(instance, diskSource) } @@ -447,8 +452,8 @@ func getDiskFromEncryptionKey(instance *compute.Instance, encryptionKey string) return nil, fmt.Errorf("could not find attached disk with encryption hash %q", encryptionSha) } -func getDiskFromAutoDeleteAndImage(config *Config, instance *compute.Instance, allDisks map[string]*compute.Disk, autoDelete bool, image, project, zone string) (*compute.AttachedDisk, error) { - img, err := resolveImage(config, project, image, config.UserAgent) +func getDiskFromAutoDeleteAndImage(config *transport_tpg.Config, instance *compute.Instance, allDisks map[string]*compute.Disk, autoDelete bool, image, project, zone string) (*compute.AttachedDisk, error) { + img, err := ResolveImage(config, project, image, config.UserAgent) if err != nil { return nil, err } @@ -462,8 +467,8 @@ func getDiskFromAutoDeleteAndImage(config *Config, instance *compute.Instance, a } if disk.AutoDelete == autoDelete { // Read the disk to check if its image matches - fullDisk := allDisks[GetResourceNameFromSelfLink(disk.Source)] - sourceImage, err := getRelativePath(fullDisk.SourceImage) + fullDisk := allDisks[tpgresource.GetResourceNameFromSelfLink(disk.Source)] + sourceImage, err := tpgresource.GetRelativePath(fullDisk.SourceImage) if err != nil { return nil, err } @@ -487,8 +492,8 @@ func getDiskFromAutoDeleteAndImage(config *Config, instance *compute.Instance, a } if disk.AutoDelete == autoDelete { // Read the disk to check if its image matches - fullDisk := allDisks[GetResourceNameFromSelfLink(disk.Source)] - sourceImage, err := getRelativePath(fullDisk.SourceImage) + fullDisk := allDisks[tpgresource.GetResourceNameFromSelfLink(disk.Source)] + sourceImage, err := tpgresource.GetRelativePath(fullDisk.SourceImage) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_sweeper.go new file mode 100644 index 0000000000..9ae4ef2fb5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_sweeper.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func init() { + sweeper.AddTestSweepers("ComputeInstance", testSweepComputeInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region. +// Since we can read all instances across zones, we don't really use this param. +func testSweepComputeInstance(region string) error { + resourceName := "ComputeInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + found, err := config.NewComputeClient(config.UserAgent).Instances.AggregatedList(config.Project).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request: %s", err) + return nil + } + + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for zone, itemList := range found.Items { + for _, instance := range itemList.Instances { + if !sweeper.IsSweepableTestResource(instance.Name) { + nonPrefixCount++ + continue + } + + // Don't wait on operations as we may have a lot to delete + _, err := config.NewComputeClient(config.UserAgent).Instances.Delete(config.Project, tpgresource.GetResourceNameFromSelfLink(zone), instance.Name).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting %s resource %s : %s", resourceName, instance.Name, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, instance.Name) + } + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_template.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template.go index fd2b133a43..db6ebe5805 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_template.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "context" @@ -13,6 +15,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/compute/v1" ) @@ -68,7 +74,7 @@ func ResourceComputeInstanceTemplate() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateGCEName, + ValidateFunc: verify.ValidateGCEName, Description: `The name of the instance template. If you leave this blank, Terraform will auto-generate a unique name.`, }, @@ -273,7 +279,7 @@ Google Cloud KMS.`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The self link of the encryption key that is stored in Google Cloud KMS.`, }, }, @@ -288,7 +294,7 @@ Google Cloud KMS.`, Description: `A list (short name or id) of resource policies to attach to this disk. Currently a max of 1 resource policy is supported.`, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: compareResourceNames, + DiffSuppressFunc: tpgresource.CompareResourceNames, }, }, }, @@ -343,6 +349,24 @@ Google Cloud KMS.`, Computed: true, Description: `The unique fingerprint of the metadata.`, }, + "network_performance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Configures network performance settings for the instance. If not specified, the instance will be created with its default network performance configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_egress_bandwidth_tier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"TIER_1", "DEFAULT"}, false), + Description: `The egress bandwidth tier to enable. Possible values:TIER_1, DEFAULT`, + }, + }, + }, + }, "network_interface": { Type: schema.TypeList, Optional: true, @@ -355,7 +379,7 @@ Google Cloud KMS.`, Optional: true, ForceNew: true, Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the network to attach this interface to. Use network attribute for Legacy or Auto subnetted networks and subnetwork for custom subnetted networks.`, }, @@ -364,7 +388,7 @@ Google Cloud KMS.`, Optional: true, ForceNew: true, Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name of the subnetwork to attach this interface to. The subnetwork must exist in the same region this instance will be created in. Either network or subnetwork must be provided.`, }, @@ -437,7 +461,7 @@ Google Cloud KMS.`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: ipCidrRangeDiffSuppress, + DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, Description: `The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.`, }, "subnetwork_range_name": { @@ -563,7 +587,7 @@ Google Cloud KMS.`, AtLeastOneOf: schedulingInstTemplateKeys, ForceNew: true, Elem: instanceSchedulingNodeAffinitiesElemSchema(), - DiffSuppressFunc: emptyOrDefaultStringSuppress(""), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, }, "min_node_cpus": { @@ -597,6 +621,12 @@ Google Cloud KMS.`, Description: `The URI of the created resource.`, }, + "self_link_unique": { + Type: schema.TypeString, + Computed: true, + Description: `A special URI of the created resource that uniquely identifies this instance template.`, + }, + "service_account": { Type: schema.TypeList, MaxItems: 1, @@ -621,10 +651,10 @@ Google Cloud KMS.`, Elem: &schema.Schema{ Type: schema.TypeString, StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) + return tpgresource.CanonicalizeServiceScope(v.(string)) }, }, - Set: stringScopeHashcode, + Set: tpgresource.StringScopeHashcode, }, }, }, @@ -639,7 +669,7 @@ Google Cloud KMS.`, // Since this block is used by the API based on which // image being used, the field needs to be marked as Computed. Computed: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress(""), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enable_secure_boot": { @@ -737,7 +767,7 @@ Google Cloud KMS.`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80.`, }, }, @@ -783,7 +813,7 @@ Google Cloud KMS.`, Description: `A list of self_links of resource policies to attach to the instance. Currently a max of 1 resource policy is supported.`, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: compareResourceNames, + DiffSuppressFunc: tpgresource.CompareResourceNames, }, }, @@ -837,7 +867,7 @@ Google Cloud KMS.`, } func resourceComputeInstanceTemplateSourceImageCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) numDisks := diff.Get("disk.#").(int) for i := 0; i < numDisks; i++ { @@ -851,11 +881,11 @@ func resourceComputeInstanceTemplateSourceImageCustomizeDiff(_ context.Context, // project must be retrieved once we know there is a diff to resolve, otherwise it will // attempt to retrieve project during `plan` before all calculated fields are ready // see https://github.com/hashicorp/terraform-provider-google/issues/2878 - project, err := getProjectFromDiff(diff, config) + project, err := tpgresource.GetProjectFromDiff(diff, config) if err != nil { return err } - oldResolved, err := resolveImage(config, project, old.(string), config.UserAgent) + oldResolved, err := ResolveImage(config, project, old.(string), config.UserAgent) if err != nil { return err } @@ -863,7 +893,7 @@ func resourceComputeInstanceTemplateSourceImageCustomizeDiff(_ context.Context, if err != nil { return err } - newResolved, err := resolveImage(config, project, new.(string), config.UserAgent) + newResolved, err := ResolveImage(config, project, new.(string), config.UserAgent) if err != nil { return err } @@ -888,7 +918,7 @@ func resourceComputeInstanceTemplateScratchDiskCustomizeDiff(_ context.Context, return resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff) } -func resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff TerraformResourceDiff) error { +func resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { numDisks := diff.Get("disk.#").(int) for i := 0; i < numDisks; i++ { // misspelled on purpose, type is a special symbol @@ -930,13 +960,13 @@ func resourceComputeInstanceTemplateBootDiskCustomizeDiff(_ context.Context, dif return nil } -func buildDisks(d *schema.ResourceData, config *Config) ([]*compute.AttachedDisk, error) { - project, err := getProject(d, config) +func buildDisks(d *schema.ResourceData, config *transport_tpg.Config) ([]*compute.AttachedDisk, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -969,7 +999,10 @@ func buildDisks(d *schema.ResourceData, config *Config) ([]*compute.AttachedDisk disk.DiskEncryptionKey.KmsKeyName = v.(string) } } - + // Assign disk.DiskSizeGb and disk.InitializeParams.DiskSizeGb the same value + if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { + disk.DiskSizeGb = int64(v.(int)) + } if v, ok := d.GetOk(prefix + ".source"); ok { disk.Source = v.(string) conflicts := []string{"disk_size_gb", "disk_name", "disk_type", "source_image", "source_snapshot", "labels"} @@ -984,6 +1017,7 @@ func buildDisks(d *schema.ResourceData, config *Config) ([]*compute.AttachedDisk if v, ok := d.GetOk(prefix + ".disk_name"); ok { disk.InitializeParams.DiskName = v.(string) } + // Assign disk.DiskSizeGb and disk.InitializeParams.DiskSizeGb the same value if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { disk.InitializeParams.DiskSizeGb = int64(v.(int)) } @@ -992,11 +1026,11 @@ func buildDisks(d *schema.ResourceData, config *Config) ([]*compute.AttachedDisk disk.InitializeParams.DiskType = v.(string) } - disk.InitializeParams.Labels = expandStringMap(d, prefix+".labels") + disk.InitializeParams.Labels = tpgresource.ExpandStringMap(d, prefix+".labels") if v, ok := d.GetOk(prefix + ".source_image"); ok { imageName := v.(string) - imageUrl, err := resolveImage(config, project, imageName, userAgent) + imageUrl, err := ResolveImage(config, project, imageName, userAgent) if err != nil { return nil, fmt.Errorf( "Error resolving image name '%s': %s", @@ -1062,7 +1096,7 @@ func buildDisks(d *schema.ResourceData, config *Config) ([]*compute.AttachedDisk // 'zones/us-east1-b/acceleratorTypes/nvidia-tesla-k80'. // Accelerator type 'zones/us-east1-b/acceleratorTypes/nvidia-tesla-k80' // must be a valid resource name (not an url). -func expandInstanceTemplateGuestAccelerators(d TerraformResourceData, config *Config) []*compute.AcceleratorConfig { +func expandInstanceTemplateGuestAccelerators(d tpgresource.TerraformResourceData, config *transport_tpg.Config) []*compute.AcceleratorConfig { configs, ok := d.GetOk("guest_accelerator") if !ok { return nil @@ -1085,18 +1119,18 @@ func expandInstanceTemplateGuestAccelerators(d TerraformResourceData, config *Co return guestAccelerators } -func expandInstanceTemplateResourcePolicies(d TerraformResourceData, dataKey string) []string { - return convertAndMapStringArr(d.Get(dataKey).([]interface{}), GetResourceNameFromSelfLink) +func expandInstanceTemplateResourcePolicies(d tpgresource.TerraformResourceData, dataKey string) []string { + return tpgresource.ConvertAndMapStringArr(d.Get(dataKey).([]interface{}), tpgresource.GetResourceNameFromSelfLink) } func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1120,6 +1154,10 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac if err != nil { return err } + networkPerformanceConfig, err := expandNetworkPerformanceConfig(d, config) + if err != nil { + return nil + } reservationAffinity, err := expandReservationAffinity(d) if err != nil { return err @@ -1135,6 +1173,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac Disks: disks, Metadata: metadata, NetworkInterfaces: networks, + NetworkPerformanceConfig: networkPerformanceConfig, Scheduling: scheduling, ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), Tags: resourceInstanceTags(d), @@ -1146,7 +1185,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac } if _, ok := d.GetOk("labels"); ok { - instanceProperties.Labels = expandLabels(d) + instanceProperties.Labels = tpgresource.ExpandLabels(d) } var itName string @@ -1170,6 +1209,8 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac // Store the ID now d.SetId(fmt.Sprintf("projects/%s/global/instanceTemplates/%s", project, instanceTemplate.Name)) + // And also the unique ID + d.Set("self_link_unique", fmt.Sprintf("%v?uniqueId=%v", d.Id(), op.TargetId)) err = ComputeOperationWaitTime(config, op, project, "Creating Instance Template", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { @@ -1237,10 +1278,13 @@ func flattenDisk(disk *compute.AttachedDisk, configDisk map[string]any, defaultP diskMap["disk_type"] = disk.InitializeParams.DiskType diskMap["disk_name"] = disk.InitializeParams.DiskName diskMap["labels"] = disk.InitializeParams.Labels - // The API does not return a disk size value for scratch disks. They can only be one size, - // so we can assume that size here. - if disk.InitializeParams.DiskSizeGb == 0 && disk.Type == "SCRATCH" { + // The API does not return a disk size value for scratch disks. They are largely only one size, + // so we can assume that size here. Prefer disk.DiskSizeGb over the deprecated + // disk.InitializeParams.DiskSizeGb. + if disk.DiskSizeGb == 0 && disk.InitializeParams.DiskSizeGb == 0 && disk.Type == "SCRATCH" { diskMap["disk_size_gb"] = DEFAULT_SCRATCH_DISK_SIZE_GB + } else if disk.DiskSizeGb != 0 { + diskMap["disk_size_gb"] = disk.DiskSizeGb } else { diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb } @@ -1259,7 +1303,7 @@ func flattenDisk(disk *compute.AttachedDisk, configDisk map[string]any, defaultP diskMap["boot"] = disk.Boot diskMap["device_name"] = disk.DeviceName diskMap["interface"] = disk.Interface - diskMap["source"] = ConvertSelfLinkToV1(disk.Source) + diskMap["source"] = tpgresource.ConvertSelfLinkToV1(disk.Source) diskMap["mode"] = disk.Mode diskMap["type"] = disk.Type @@ -1400,23 +1444,27 @@ func flattenDisks(disks []*compute.AttachedDisk, d *schema.ResourceData, default } func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - splits := strings.Split(d.Id(), "/") + idStr := d.Id() + if v, ok := d.GetOk("self_link_unique"); ok && v != "" { + idStr = ConvertToUniqueIdWhenPresent(v.(string)) + } + + splits := strings.Split(idStr, "/") instanceTemplate, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, splits[len(splits)-1]).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) } - // Set the metadata fingerprint if there is one. if instanceTemplate.Properties.Metadata != nil { if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil { @@ -1458,6 +1506,9 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil { return fmt.Errorf("Error setting self_link: %s", err) } + if err = d.Set("self_link_unique", fmt.Sprintf("%v?uniqueId=%v", instanceTemplate.SelfLink, instanceTemplate.Id)); err != nil { + return fmt.Errorf("Error setting self_link_unique: %s", err) + } if err = d.Set("name", instanceTemplate.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } @@ -1490,6 +1541,9 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ if err = d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } + if err := d.Set("network_performance_config", flattenNetworkPerformanceConfig(instanceTemplate.Properties.NetworkPerformanceConfig)); err != nil { + return err + } if instanceTemplate.Properties.NetworkInterfaces != nil { networkInterfaces, region, _, _, err := flattenNetworkInterfaces(d, config, instanceTemplate.Properties.NetworkInterfaces) if err != nil { @@ -1563,13 +1617,13 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ } func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1615,13 +1669,13 @@ func expandResourceComputeInstanceTemplateScheduling(d *schema.ResourceData, met } func resourceComputeInstanceTemplateImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/global/instanceTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/global/instanceTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/instanceTemplates/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/instanceTemplates/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_template_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template_migrate.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_template_migrate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template_migrate.go index df648d62db..f30d54d834 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_instance_template_migrate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template_migrate.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template_sweeper.go new file mode 100644 index 0000000000..5a5299d992 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_instance_template_sweeper.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +// This will sweep Compute Instance Templates +func init() { + sweeper.AddTestSweepers("ComputeInstanceTemplate", testSweepComputeInstanceTemplate) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeInstanceTemplate(region string) error { + resourceName := "ComputeInstanceTemplate" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + instanceTemplates, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.List(config.Project).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request instance templates LIST: %s", err) + return nil + } + + numTemplates := len(instanceTemplates.Items) + if numTemplates == 0 { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", numTemplates, resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, instanceTemplate := range instanceTemplates.Items { + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(instanceTemplate.Name) { + nonPrefixCount++ + continue + } + + // Don't wait on operations as we may have a lot to delete + _, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.Delete(config.Project, instanceTemplate.Name).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting instance template: %s", instanceTemplate.Name) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, instanceTemplate.Name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf-test prefix remain.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_interconnect_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_interconnect_attachment.go new file mode 100644 index 0000000000..54477f31ea --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_interconnect_attachment.go @@ -0,0 +1,941 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// waitForAttachmentToBeProvisioned waits for an attachment to leave the +// "UNPROVISIONED" state, to indicate that it's either ready or awaiting partner +// activity. +func waitForAttachmentToBeProvisioned(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { + return resource.Retry(timeout, func() *resource.RetryError { + if err := resourceComputeInterconnectAttachmentRead(d, config); err != nil { + return resource.NonRetryableError(err) + } + + name := d.Get("name").(string) + state := d.Get("state").(string) + if state == "UNPROVISIONED" { + return resource.RetryableError(fmt.Errorf("InterconnectAttachment %q has state %q.", name, state)) + } + log.Printf("InterconnectAttachment %q has state %q.", name, state) + return nil + }) +} + +func ResourceComputeInterconnectAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInterconnectAttachmentCreate, + Read: resourceComputeInterconnectAttachmentRead, + Update: resourceComputeInterconnectAttachmentUpdate, + Delete: resourceComputeInterconnectAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeInterconnectAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z]([-a-z0-9]*[a-z0-9])?$`), + Description: `Name of the resource. Provided by the client when the resource is created. The +name must be 1-63 characters long, and comply with RFC1035. Specifically, the +name must be 1-63 characters long and match the regular expression +'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a +lowercase letter, and all following characters must be a dash, lowercase +letter, or digit, except the last character, which cannot be a dash.`, + }, + "router": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the cloud router to be used for dynamic routing. This router must be in +the same region as this InterconnectAttachment. The InterconnectAttachment will +automatically connect the Interconnect to the network & region within which the +Cloud Router is configured.`, + }, + "admin_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the VLAN attachment is enabled or disabled. When using +PARTNER type this will Pre-Activate the interconnect attachment`, + Default: true, + }, + "bandwidth": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"BPS_50M", "BPS_100M", "BPS_200M", "BPS_300M", "BPS_400M", "BPS_500M", "BPS_1G", "BPS_2G", "BPS_5G", "BPS_10G", "BPS_20G", "BPS_50G", ""}), + Description: `Provisioned bandwidth capacity for the interconnect attachment. +For attachments of type DEDICATED, the user can set the bandwidth. +For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. +Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, +Defaults to BPS_10G Possible values: ["BPS_50M", "BPS_100M", "BPS_200M", "BPS_300M", "BPS_400M", "BPS_500M", "BPS_1G", "BPS_2G", "BPS_5G", "BPS_10G", "BPS_20G", "BPS_50G"]`, + }, + "candidate_subnets": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Up to 16 candidate prefixes that can be used to restrict the allocation +of cloudRouterIpAddress and customerRouterIpAddress for this attachment. +All prefixes must be within link-local address space (169.254.0.0/16) +and must be /29 or shorter (/28, /27, etc). Google will attempt to select +an unused /29 from the supplied candidate prefix(es). The request will +fail if all possible /29s are in use on Google's edge. If not supplied, +Google will randomly select an unused /29 from all of link-local space.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of this resource.`, + }, + "edge_availability_domain": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Desired availability domain for the attachment. Only available for type +PARTNER, at creation time. For improved reliability, customers should +configure a pair of attachments with one per availability domain. The +selected availability domain will be provided to the Partner via the +pairing key so that the provisioned circuit will lie in the specified +domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.`, + }, + "encryption": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "IPSEC", ""}), + Description: `Indicates the user-supplied encryption option of this interconnect +attachment. Can only be specified at attachment creation for PARTNER or +DEDICATED attachments. + +* NONE - This is the default value, which means that the VLAN attachment +carries unencrypted traffic. VMs are able to send traffic to, or receive +traffic from, such a VLAN attachment. + +* IPSEC - The VLAN attachment carries only encrypted traffic that is +encrypted by an IPsec device, such as an HA VPN gateway or third-party +IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, +such a VLAN attachment. To use HA VPN over Cloud Interconnect, the VLAN +attachment must be created with this option. Default value: "NONE" Possible values: ["NONE", "IPSEC"]`, + Default: "NONE", + }, + "interconnect": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the underlying Interconnect object that this attachment's +traffic will traverse through. Required if type is DEDICATED, must not +be set if type is PARTNER.`, + }, + "ipsec_internal_addresses": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `URL of addresses that have been reserved for the interconnect attachment, +Used only for interconnect attachment that has the encryption option as +IPSEC. + +The addresses must be RFC 1918 IP address ranges. When creating HA VPN +gateway over the interconnect attachment, if the attachment is configured +to use an RFC 1918 IP address, then the VPN gateway's IP address will be +allocated from the IP address range specified here. + +For example, if the HA VPN gateway's interface 0 is paired to this +interconnect attachment, then an RFC 1918 IP address for the VPN gateway +interface 0 will be allocated from the IP address specified for this +interconnect attachment. + +If this field is not specified for interconnect attachment that has +encryption option as IPSEC, later on when creating HA VPN gateway on this +interconnect attachment, the HA VPN gateway's IP address will be +allocated from regional external IP address pool.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "mtu": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Maximum Transmission Unit (MTU), in bytes, of packets passing through +this interconnect attachment. Currently, only 1440 and 1500 are allowed. If not specified, the value will default to 1440.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Region where the regional interconnect attachment resides.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DEDICATED", "PARTNER", "PARTNER_PROVIDER", ""}), + Description: `The type of InterconnectAttachment you wish to create. Defaults to +DEDICATED. Possible values: ["DEDICATED", "PARTNER", "PARTNER_PROVIDER"]`, + }, + "vlan_tag8021q": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When +using PARTNER type this will be managed upstream.`, + }, + "cloud_router_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 address + prefix length to be configured on Cloud Router +Interface for this interconnect attachment.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "customer_router_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 address + prefix length to be configured on the customer +router subinterface for this interconnect attachment.`, + }, + "google_reference_id": { + Type: schema.TypeString, + Computed: true, + Description: `Google reference ID, to be used when raising support tickets with +Google or otherwise to debug backend connectivity issues.`, + }, + "pairing_key": { + Type: schema.TypeString, + Computed: true, + Description: `[Output only for type PARTNER. Not present for DEDICATED]. The opaque +identifier of an PARTNER attachment used to initiate provisioning with +a selected partner. Of the form "XXXXX/region/domain"`, + }, + "partner_asn": { + Type: schema.TypeString, + Computed: true, + Description: `[Output only for type PARTNER. Not present for DEDICATED]. Optional +BGP ASN for the router that should be supplied by a layer 3 Partner if +they configured BGP on behalf of the customer.`, + }, + "private_interconnect_info": { + Type: schema.TypeList, + Computed: true, + Description: `Information specific to an InterconnectAttachment. This property +is populated if the interconnect that this is attached to is of type DEDICATED.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tag8021q": { + Type: schema.TypeInt, + Computed: true, + Description: `802.1q encapsulation tag to be used for traffic between +Google and the customer, going to and from this network and region.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `[Output Only] The current state of this attachment's functionality.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeInterconnectAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + adminEnabledProp, err := expandComputeInterconnectAttachmentAdminEnabled(d.Get("admin_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admin_enabled"); ok || !reflect.DeepEqual(v, adminEnabledProp) { + obj["adminEnabled"] = adminEnabledProp + } + interconnectProp, err := expandComputeInterconnectAttachmentInterconnect(d.Get("interconnect"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("interconnect"); !tpgresource.IsEmptyValue(reflect.ValueOf(interconnectProp)) && (ok || !reflect.DeepEqual(v, interconnectProp)) { + obj["interconnect"] = interconnectProp + } + descriptionProp, err := expandComputeInterconnectAttachmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + mtuProp, err := expandComputeInterconnectAttachmentMtu(d.Get("mtu"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mtu"); !tpgresource.IsEmptyValue(reflect.ValueOf(mtuProp)) && (ok || !reflect.DeepEqual(v, mtuProp)) { + obj["mtu"] = mtuProp + } + bandwidthProp, err := expandComputeInterconnectAttachmentBandwidth(d.Get("bandwidth"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bandwidth"); !tpgresource.IsEmptyValue(reflect.ValueOf(bandwidthProp)) && (ok || !reflect.DeepEqual(v, bandwidthProp)) { + obj["bandwidth"] = bandwidthProp + } + edgeAvailabilityDomainProp, err := expandComputeInterconnectAttachmentEdgeAvailabilityDomain(d.Get("edge_availability_domain"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("edge_availability_domain"); !tpgresource.IsEmptyValue(reflect.ValueOf(edgeAvailabilityDomainProp)) && (ok || !reflect.DeepEqual(v, edgeAvailabilityDomainProp)) { + obj["edgeAvailabilityDomain"] = edgeAvailabilityDomainProp + } + typeProp, err := expandComputeInterconnectAttachmentType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + routerProp, err := expandComputeInterconnectAttachmentRouter(d.Get("router"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router"); !tpgresource.IsEmptyValue(reflect.ValueOf(routerProp)) && (ok || !reflect.DeepEqual(v, routerProp)) { + obj["router"] = routerProp + } + nameProp, err := expandComputeInterconnectAttachmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + candidateSubnetsProp, err := expandComputeInterconnectAttachmentCandidateSubnets(d.Get("candidate_subnets"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("candidate_subnets"); !tpgresource.IsEmptyValue(reflect.ValueOf(candidateSubnetsProp)) && (ok || !reflect.DeepEqual(v, candidateSubnetsProp)) { + obj["candidateSubnets"] = candidateSubnetsProp + } + vlanTag8021qProp, err := expandComputeInterconnectAttachmentVlanTag8021q(d.Get("vlan_tag8021q"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vlan_tag8021q"); !tpgresource.IsEmptyValue(reflect.ValueOf(vlanTag8021qProp)) && (ok || !reflect.DeepEqual(v, vlanTag8021qProp)) { + obj["vlanTag8021q"] = vlanTag8021qProp + } + ipsecInternalAddressesProp, err := expandComputeInterconnectAttachmentIpsecInternalAddresses(d.Get("ipsec_internal_addresses"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipsec_internal_addresses"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipsecInternalAddressesProp)) && (ok || !reflect.DeepEqual(v, ipsecInternalAddressesProp)) { + obj["ipsecInternalAddresses"] = ipsecInternalAddressesProp + } + encryptionProp, err := expandComputeInterconnectAttachmentEncryption(d.Get("encryption"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionProp)) && (ok || !reflect.DeepEqual(v, encryptionProp)) { + obj["encryption"] = encryptionProp + } + regionProp, err := expandComputeInterconnectAttachmentRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new InterconnectAttachment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating InterconnectAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating InterconnectAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create InterconnectAttachment: %s", err) + } + + if err := waitForAttachmentToBeProvisioned(d, config, d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("Error waiting for InterconnectAttachment %q to be provisioned: %q", d.Get("name").(string), err) + } + + log.Printf("[DEBUG] Finished creating InterconnectAttachment %q: %#v", d.Id(), res) + + return resourceComputeInterconnectAttachmentRead(d, meta) +} + +func resourceComputeInterconnectAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeInterconnectAttachment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + + if err := d.Set("admin_enabled", flattenComputeInterconnectAttachmentAdminEnabled(res["adminEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("cloud_router_ip_address", flattenComputeInterconnectAttachmentCloudRouterIpAddress(res["cloudRouterIpAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("customer_router_ip_address", flattenComputeInterconnectAttachmentCustomerRouterIpAddress(res["customerRouterIpAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("interconnect", flattenComputeInterconnectAttachmentInterconnect(res["interconnect"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("description", flattenComputeInterconnectAttachmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("mtu", flattenComputeInterconnectAttachmentMtu(res["mtu"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("bandwidth", flattenComputeInterconnectAttachmentBandwidth(res["bandwidth"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("edge_availability_domain", flattenComputeInterconnectAttachmentEdgeAvailabilityDomain(res["edgeAvailabilityDomain"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("pairing_key", flattenComputeInterconnectAttachmentPairingKey(res["pairingKey"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("partner_asn", flattenComputeInterconnectAttachmentPartnerAsn(res["partnerAsn"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("private_interconnect_info", flattenComputeInterconnectAttachmentPrivateInterconnectInfo(res["privateInterconnectInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("type", flattenComputeInterconnectAttachmentType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("state", flattenComputeInterconnectAttachmentState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("google_reference_id", flattenComputeInterconnectAttachmentGoogleReferenceId(res["googleReferenceId"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("router", flattenComputeInterconnectAttachmentRouter(res["router"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeInterconnectAttachmentCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("name", flattenComputeInterconnectAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("vlan_tag8021q", flattenComputeInterconnectAttachmentVlanTag8021q(res["vlanTag8021q"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("ipsec_internal_addresses", flattenComputeInterconnectAttachmentIpsecInternalAddresses(res["ipsecInternalAddresses"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("encryption", flattenComputeInterconnectAttachmentEncryption(res["encryption"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("region", flattenComputeInterconnectAttachmentRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading InterconnectAttachment: %s", err) + } + + return nil +} + +func resourceComputeInterconnectAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + adminEnabledProp, err := expandComputeInterconnectAttachmentAdminEnabled(d.Get("admin_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admin_enabled"); ok || !reflect.DeepEqual(v, adminEnabledProp) { + obj["adminEnabled"] = adminEnabledProp + } + descriptionProp, err := expandComputeInterconnectAttachmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + mtuProp, err := expandComputeInterconnectAttachmentMtu(d.Get("mtu"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mtu"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mtuProp)) { + obj["mtu"] = mtuProp + } + bandwidthProp, err := expandComputeInterconnectAttachmentBandwidth(d.Get("bandwidth"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bandwidth"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bandwidthProp)) { + obj["bandwidth"] = bandwidthProp + } + regionProp, err := expandComputeInterconnectAttachmentRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating InterconnectAttachment %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating InterconnectAttachment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating InterconnectAttachment %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating InterconnectAttachment", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeInterconnectAttachmentRead(d, meta) +} + +func resourceComputeInterconnectAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + if err := waitForAttachmentToBeProvisioned(d, config, d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("Error waiting for InterconnectAttachment %q to be provisioned: %q", d.Get("name").(string), err) + } + log.Printf("[DEBUG] Deleting InterconnectAttachment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "InterconnectAttachment") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting InterconnectAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting InterconnectAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeInterconnectAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/interconnectAttachments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeInterconnectAttachmentAdminEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentCloudRouterIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentCustomerRouterIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentInterconnect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentMtu(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles int given in float64 format + if floatVal, ok := v.(float64); ok { + return fmt.Sprintf("%d", int(floatVal)) + } + return v +} + +func flattenComputeInterconnectAttachmentBandwidth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentEdgeAvailabilityDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentPairingKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentPartnerAsn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentPrivateInterconnectInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["tag8021q"] = + flattenComputeInterconnectAttachmentPrivateInterconnectInfoTag8021q(original["tag8021q"], d, config) + return []interface{}{transformed} +} +func flattenComputeInterconnectAttachmentPrivateInterconnectInfoTag8021q(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeInterconnectAttachmentType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentGoogleReferenceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentRouter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeInterconnectAttachmentCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeInterconnectAttachmentVlanTag8021q(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeInterconnectAttachmentIpsecInternalAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeInterconnectAttachmentEncryption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "NONE" + } + + return v +} + +func flattenComputeInterconnectAttachmentRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeInterconnectAttachmentAdminEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentInterconnect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentMtu(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentBandwidth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentEdgeAvailabilityDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentRouter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("routers", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for router: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeInterconnectAttachmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentCandidateSubnets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentVlanTag8021q(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentIpsecInternalAddresses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for ipsec_internal_addresses: nil") + } + f, err := tpgresource.ParseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for ipsec_internal_addresses: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeInterconnectAttachmentEncryption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeInterconnectAttachmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate.go new file mode 100644 index 0000000000..f9e8256a44 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate.go @@ -0,0 +1,464 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeManagedSslCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeManagedSslCertificateCreate, + Read: resourceComputeManagedSslCertificateRead, + Delete: resourceComputeManagedSslCertificateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeManagedSslCertificateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "managed": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Properties relevant to a managed certificate. These will be used if the +certificate is managed (as indicated by a value of 'MANAGED' in 'type').`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domains": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.AbsoluteDomainSuppress, + Description: `Domains for which a managed SSL certificate will be valid. Currently, +there can be up to 100 domains in this list.`, + MaxItems: 100, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash. + + +These are in the same namespace as the managed SSL certificates.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MANAGED", ""}), + Description: `Enum field whose value is always 'MANAGED' - used to signal to the API +which type this is. Default value: "MANAGED" Possible values: ["MANAGED"]`, + Default: "MANAGED", + }, + "certificate_id": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The unique identifier for the resource.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "expire_time": { + Type: schema.TypeString, + Computed: true, + Description: `Expire time of the certificate in RFC3339 text format.`, + }, + "subject_alternative_names": { + Type: schema.TypeList, + Computed: true, + Description: `Domains associated with the certificate via Subject Alternative Name.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeManagedSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeManagedSslCertificateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeManagedSslCertificateName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + managedProp, err := expandComputeManagedSslCertificateManaged(d.Get("managed"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("managed"); !tpgresource.IsEmptyValue(reflect.ValueOf(managedProp)) && (ok || !reflect.DeepEqual(v, managedProp)) { + obj["managed"] = managedProp + } + typeProp, err := expandComputeManagedSslCertificateType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ManagedSslCertificate: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ManagedSslCertificate: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating ManagedSslCertificate", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ManagedSslCertificate: %s", err) + } + + log.Printf("[DEBUG] Finished creating ManagedSslCertificate %q: %#v", d.Id(), res) + + return resourceComputeManagedSslCertificateRead(d, meta) +} + +func resourceComputeManagedSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeManagedSslCertificate %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeManagedSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + if err := d.Set("description", flattenComputeManagedSslCertificateDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + if err := d.Set("certificate_id", flattenComputeManagedSslCertificateCertificateId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + if err := d.Set("name", flattenComputeManagedSslCertificateName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + if err := d.Set("managed", flattenComputeManagedSslCertificateManaged(res["managed"], d, config)); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + if err := d.Set("type", flattenComputeManagedSslCertificateType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + if err := d.Set("subject_alternative_names", flattenComputeManagedSslCertificateSubjectAlternativeNames(res["subjectAlternativeNames"], d, config)); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + if err := d.Set("expire_time", flattenComputeManagedSslCertificateExpireTime(res["expireTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading ManagedSslCertificate: %s", err) + } + + return nil +} + +func resourceComputeManagedSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ManagedSslCertificate %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ManagedSslCertificate") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting ManagedSslCertificate", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ManagedSslCertificate %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeManagedSslCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/sslCertificates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeManagedSslCertificateCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeManagedSslCertificateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeManagedSslCertificateCertificateId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeManagedSslCertificateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeManagedSslCertificateManaged(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["domains"] = + flattenComputeManagedSslCertificateManagedDomains(original["domains"], d, config) + return []interface{}{transformed} +} +func flattenComputeManagedSslCertificateManagedDomains(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeManagedSslCertificateType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeManagedSslCertificateSubjectAlternativeNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeManagedSslCertificateExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeManagedSslCertificateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeManagedSslCertificateName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeManagedSslCertificateManaged(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDomains, err := expandComputeManagedSslCertificateManagedDomains(original["domains"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDomains); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["domains"] = transformedDomains + } + + return transformed, nil +} + +func expandComputeManagedSslCertificateManagedDomains(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeManagedSslCertificateType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate_sweeper.go new file mode 100644 index 0000000000..0d195fe04d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_managed_ssl_certificate_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeManagedSslCertificate", testSweepComputeManagedSslCertificate) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeManagedSslCertificate(region string) error { + resourceName := "ComputeManagedSslCertificate" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/sslCertificates", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/sslCertificates/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network.go new file mode 100644 index 0000000000..f3f2efcf7b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network.go @@ -0,0 +1,649 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" +) + +func ResourceComputeNetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkCreate, + Read: resourceComputeNetworkRead, + Update: resourceComputeNetworkUpdate, + Delete: resourceComputeNetworkDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "auto_create_subnetworks": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `When set to 'true', the network is created in "auto subnet mode" and +it will create a subnet for each region automatically across the +'10.128.0.0/9' address range. + +When set to 'false', the network is created in "custom subnet mode" so +the user can explicitly connect subnetwork resources.`, + Default: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. The resource must be +recreated to modify this field.`, + }, + "enable_ula_internal_ipv6": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enable ULA internal ipv6 on this network. Enabling this feature will assign +a /48 from google defined ULA prefix fd20::/20.`, + }, + "internal_ipv6_range": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `When enabling ula internal ipv6, caller optionally can specify the /48 range +they want from the google defined ULA prefix fd20::/20. The input must be a +valid /48 ULA IPv6 address and must be within the fd20::/20. Operation will +fail if the speficied /48 is already in used by another resource. +If the field is not speficied, then a /48 range will be randomly allocated from fd20::/20 and returned via this field.`, + }, + "mtu": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Maximum Transmission Unit in bytes. The default value is 1460 bytes. +The minimum value for this field is 1300 and the maximum value is 8896 bytes (jumbo frames). +Note that packets larger than 1500 bytes (standard Ethernet) can be subject to TCP-MSS clamping or dropped +with an ICMP 'Fragmentation-Needed' message if the packets are routed to the Internet or other VPCs +with varying MTUs.`, + }, + "network_firewall_policy_enforcement_order": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"BEFORE_CLASSIC_FIREWALL", "AFTER_CLASSIC_FIREWALL", ""}), + Description: `Set the order that Firewall Rules and Firewall Policies are evaluated. Default value: "AFTER_CLASSIC_FIREWALL" Possible values: ["BEFORE_CLASSIC_FIREWALL", "AFTER_CLASSIC_FIREWALL"]`, + Default: "AFTER_CLASSIC_FIREWALL", + }, + "routing_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"REGIONAL", "GLOBAL", ""}), + Description: `The network-wide routing mode to use. If set to 'REGIONAL', this +network's cloud routers will only advertise routes with subnetworks +of this network in the same region as the router. If set to 'GLOBAL', +this network's cloud routers will advertise routes with all +subnetworks of this network, across regions. Possible values: ["REGIONAL", "GLOBAL"]`, + }, + + "gateway_ipv4": { + Type: schema.TypeString, + Computed: true, + Description: `The gateway address for default routing out of the network. This value +is selected by GCP.`, + }, + "delete_default_routes_on_create": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to 'true', default routes ('0.0.0.0/0') will be deleted +immediately after network creation. Defaults to 'false'.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeNetworkDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeNetworkName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + autoCreateSubnetworksProp, err := expandComputeNetworkAutoCreateSubnetworks(d.Get("auto_create_subnetworks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("auto_create_subnetworks"); ok || !reflect.DeepEqual(v, autoCreateSubnetworksProp) { + obj["autoCreateSubnetworks"] = autoCreateSubnetworksProp + } + routingConfigProp, err := expandComputeNetworkRoutingConfig(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(routingConfigProp)) { + obj["routingConfig"] = routingConfigProp + } + mtuProp, err := expandComputeNetworkMtu(d.Get("mtu"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mtu"); !tpgresource.IsEmptyValue(reflect.ValueOf(mtuProp)) && (ok || !reflect.DeepEqual(v, mtuProp)) { + obj["mtu"] = mtuProp + } + enableUlaInternalIpv6Prop, err := expandComputeNetworkEnableUlaInternalIpv6(d.Get("enable_ula_internal_ipv6"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ula_internal_ipv6"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableUlaInternalIpv6Prop)) && (ok || !reflect.DeepEqual(v, enableUlaInternalIpv6Prop)) { + obj["enableUlaInternalIpv6"] = enableUlaInternalIpv6Prop + } + internalIpv6RangeProp, err := expandComputeNetworkInternalIpv6Range(d.Get("internal_ipv6_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("internal_ipv6_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(internalIpv6RangeProp)) && (ok || !reflect.DeepEqual(v, internalIpv6RangeProp)) { + obj["internalIpv6Range"] = internalIpv6RangeProp + } + networkFirewallPolicyEnforcementOrderProp, err := expandComputeNetworkNetworkFirewallPolicyEnforcementOrder(d.Get("network_firewall_policy_enforcement_order"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_firewall_policy_enforcement_order"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkFirewallPolicyEnforcementOrderProp)) && (ok || !reflect.DeepEqual(v, networkFirewallPolicyEnforcementOrderProp)) { + obj["networkFirewallPolicyEnforcementOrder"] = networkFirewallPolicyEnforcementOrderProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Network: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Network: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Network: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Network", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Network: %s", err) + } + + if d.Get("delete_default_routes_on_create").(bool) { + token := "" + for paginate := true; paginate; { + network, err := config.NewComputeClient(userAgent).Networks.Get(project, d.Get("name").(string)).Do() + if err != nil { + return fmt.Errorf("Error finding network in proj: %s", err) + } + filter := fmt.Sprintf("(network=\"%s\") AND (destRange=\"0.0.0.0/0\")", network.SelfLink) + log.Printf("[DEBUG] Getting routes for network %q with filter '%q'", d.Get("name").(string), filter) + resp, err := config.NewComputeClient(userAgent).Routes.List(project).Filter(filter).Do() + if err != nil { + return fmt.Errorf("Error listing routes in proj: %s", err) + } + + log.Printf("[DEBUG] Found %d routes rules in %q network", len(resp.Items), d.Get("name").(string)) + + for _, route := range resp.Items { + op, err := config.NewComputeClient(userAgent).Routes.Delete(project, route.Name).Do() + if err != nil { + return fmt.Errorf("Error deleting route: %s", err) + } + err = ComputeOperationWaitTime(config, op, project, "Deleting Route", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + + token = resp.NextPageToken + paginate = token != "" + } + } + + log.Printf("[DEBUG] Finished creating Network %q: %#v", d.Id(), res) + + return resourceComputeNetworkRead(d, meta) +} + +func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Network: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetwork %q", d.Id())) + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("delete_default_routes_on_create"); !ok { + if err := d.Set("delete_default_routes_on_create", false); err != nil { + return fmt.Errorf("Error setting delete_default_routes_on_create: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + + if err := d.Set("description", flattenComputeNetworkDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + if err := d.Set("gateway_ipv4", flattenComputeNetworkGatewayIpv4(res["gatewayIPv4"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + if err := d.Set("name", flattenComputeNetworkName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + if err := d.Set("auto_create_subnetworks", flattenComputeNetworkAutoCreateSubnetworks(res["autoCreateSubnetworks"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + // Terraform must set the top level schema field, but since this object contains collapsed properties + // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. + if flattenedProp := flattenComputeNetworkRoutingConfig(res["routingConfig"], d, config); flattenedProp != nil { + if gerr, ok := flattenedProp.(*googleapi.Error); ok { + return fmt.Errorf("Error reading Network: %s", gerr) + } + casted := flattenedProp.([]interface{})[0] + if casted != nil { + for k, v := range casted.(map[string]interface{}) { + if err := d.Set(k, v); err != nil { + return fmt.Errorf("Error setting %s: %s", k, err) + } + } + } + } + if err := d.Set("mtu", flattenComputeNetworkMtu(res["mtu"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + if err := d.Set("enable_ula_internal_ipv6", flattenComputeNetworkEnableUlaInternalIpv6(res["enableUlaInternalIpv6"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + if err := d.Set("internal_ipv6_range", flattenComputeNetworkInternalIpv6Range(res["internalIpv6Range"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + if err := d.Set("network_firewall_policy_enforcement_order", flattenComputeNetworkNetworkFirewallPolicyEnforcementOrder(res["networkFirewallPolicyEnforcementOrder"], d, config)); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Network: %s", err) + } + + return nil +} + +func resourceComputeNetworkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Network: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("routing_mode") || d.HasChange("network_firewall_policy_enforcement_order") { + obj := make(map[string]interface{}) + + routingConfigProp, err := expandComputeNetworkRoutingConfig(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(routingConfigProp)) { + obj["routingConfig"] = routingConfigProp + } + networkFirewallPolicyEnforcementOrderProp, err := expandComputeNetworkNetworkFirewallPolicyEnforcementOrder(d.Get("network_firewall_policy_enforcement_order"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_firewall_policy_enforcement_order"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkFirewallPolicyEnforcementOrderProp)) { + obj["networkFirewallPolicyEnforcementOrder"] = networkFirewallPolicyEnforcementOrderProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Network %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Network %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Network", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeNetworkRead(d, meta) +} + +func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Network: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Network %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Network") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Network", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Network %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeNetworkImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/networks/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networks/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("delete_default_routes_on_create", false); err != nil { + return nil, fmt.Errorf("Error setting delete_default_routes_on_create: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeNetworkDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkGatewayIpv4(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAutoCreateSubnetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkRoutingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["routing_mode"] = + flattenComputeNetworkRoutingConfigRoutingMode(original["routingMode"], d, config) + return []interface{}{transformed} +} +func flattenComputeNetworkRoutingConfigRoutingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkMtu(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeNetworkEnableUlaInternalIpv6(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkInternalIpv6Range(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkNetworkFirewallPolicyEnforcementOrder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeNetworkDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAutoCreateSubnetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkRoutingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + transformed := make(map[string]interface{}) + transformedRoutingMode, err := expandComputeNetworkRoutingConfigRoutingMode(d.Get("routing_mode"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRoutingMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["routingMode"] = transformedRoutingMode + } + + return transformed, nil +} + +func expandComputeNetworkRoutingConfigRoutingMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkMtu(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkEnableUlaInternalIpv6(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkInternalIpv6Range(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkNetworkFirewallPolicyEnforcementOrder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint.go new file mode 100644 index 0000000000..f1c6abae7d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint.go @@ -0,0 +1,506 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeNetworkEndpoint() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkEndpointCreate, + Read: resourceComputeNetworkEndpointRead, + Delete: resourceComputeNetworkEndpointDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkEndpointImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `IPv4 address of network endpoint. The IP address must belong +to a VM in GCE (either the primary IP or as part of an aliased IP +range).`, + }, + "network_endpoint_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The network endpoint group this endpoint is part of.`, + }, + "instance": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name for a specific VM instance that the IP address belongs to. +This is required for network endpoints of type GCE_VM_IP_PORT. +The instance must be in the same zone of network endpoint group.`, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Port number of network endpoint. +**Note** 'port' is required unless the Network Endpoint Group is created +with the type of 'GCE_VM_IP'`, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Zone where the containing network endpoint group is located.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNetworkEndpointCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + instanceProp, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceProp)) && (ok || !reflect.DeepEqual(v, instanceProp)) { + obj["instance"] = instanceProp + } + portProp, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipAddressProp)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { + obj["ipAddress"] = ipAddressProp + } + + obj, err = resourceComputeNetworkEndpointEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NetworkEndpoint: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NetworkEndpoint: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/{{instance}}/{{ip_address}}/{{port}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating NetworkEndpoint", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create NetworkEndpoint: %s", err) + } + + log.Printf("[DEBUG] Finished creating NetworkEndpoint %q: %#v", d.Id(), res) + + return resourceComputeNetworkEndpointRead(d, meta) +} + +func resourceComputeNetworkEndpointRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkEndpoint %q", d.Id())) + } + + res, err = flattenNestedComputeNetworkEndpoint(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeNetworkEndpoint because it couldn't be matched.") + d.SetId("") + return nil + } + + res, err = resourceComputeNetworkEndpointDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeNetworkEndpoint because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NetworkEndpoint: %s", err) + } + + if err := d.Set("instance", flattenNestedComputeNetworkEndpointInstance(res["instance"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpoint: %s", err) + } + if err := d.Set("port", flattenNestedComputeNetworkEndpointPort(res["port"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpoint: %s", err) + } + if err := d.Set("ip_address", flattenNestedComputeNetworkEndpointIpAddress(res["ipAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpoint: %s", err) + } + + return nil +} + +func resourceComputeNetworkEndpointDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints") + if err != nil { + return err + } + + var obj map[string]interface{} + toDelete := make(map[string]interface{}) + instanceProp, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, config) + if err != nil { + return err + } + if instanceProp != "" { + toDelete["instance"] = instanceProp + } + + portProp, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, config) + if err != nil { + return err + } + if portProp != 0 { + toDelete["port"] = portProp + } + + ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } + toDelete["ipAddress"] = ipAddressProp + + obj = map[string]interface{}{ + "networkEndpoints": []map[string]interface{}{toDelete}, + } + log.Printf("[DEBUG] Deleting NetworkEndpoint %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NetworkEndpoint") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting NetworkEndpoint", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting NetworkEndpoint %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeNetworkEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + // instance is optional, so use * instead of + when reading the import id + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)/(?P[^/]*)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]*)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]*)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]*)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/{{instance}}/{{ip_address}}/{{port}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeNetworkEndpointInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenNestedComputeNetworkEndpointPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles int given in float64 format + if floatVal, ok := v.(float64); ok { + return int(floatVal) + } + return v +} + +func flattenNestedComputeNetworkEndpointIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeNetworkEndpointInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} + +func expandNestedComputeNetworkEndpointPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeNetworkEndpointIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeNetworkEndpointEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. + if err := d.Set("network_endpoint_group", tpgresource.GetResourceNameFromSelfLink(d.Get("network_endpoint_group").(string))); err != nil { + return nil, fmt.Errorf("Error setting network_endpoint_group: %s", err) + } + + wrappedReq := map[string]interface{}{ + "networkEndpoints": []interface{}{obj}, + } + return wrappedReq, nil +} + +func flattenNestedComputeNetworkEndpoint(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["items"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value items. Actual value: %v", v) + } + + _, item, err := resourceComputeNetworkEndpointFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeNetworkEndpointFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedInstance, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedInstance := flattenNestedComputeNetworkEndpointInstance(expectedInstance, d, meta.(*transport_tpg.Config)) + expectedIpAddress, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedIpAddress := flattenNestedComputeNetworkEndpointIpAddress(expectedIpAddress, d, meta.(*transport_tpg.Config)) + expectedPort, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedPort := flattenNestedComputeNetworkEndpointPort(expectedPort, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + // Decode list item before comparing. + item, err := resourceComputeNetworkEndpointDecoder(d, meta, item) + if err != nil { + return -1, nil, err + } + + itemInstance := flattenNestedComputeNetworkEndpointInstance(item["instance"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemInstance)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedInstance))) && !reflect.DeepEqual(itemInstance, expectedFlattenedInstance) { + log.Printf("[DEBUG] Skipping item with instance= %#v, looking for %#v)", itemInstance, expectedFlattenedInstance) + continue + } + itemIpAddress := flattenNestedComputeNetworkEndpointIpAddress(item["ipAddress"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemIpAddress)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedIpAddress))) && !reflect.DeepEqual(itemIpAddress, expectedFlattenedIpAddress) { + log.Printf("[DEBUG] Skipping item with ipAddress= %#v, looking for %#v)", itemIpAddress, expectedFlattenedIpAddress) + continue + } + itemPort := flattenNestedComputeNetworkEndpointPort(item["port"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemPort)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedPort))) && !reflect.DeepEqual(itemPort, expectedFlattenedPort) { + log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} +func resourceComputeNetworkEndpointDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + v, ok := res["networkEndpoint"] + if !ok || v == nil { + return res, nil + } + + return v.(map[string]interface{}), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint_group.go new file mode 100644 index 0000000000..1d8b9ca1f3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint_group.go @@ -0,0 +1,490 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeNetworkEndpointGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkEndpointGroupCreate, + Read: resourceComputeNetworkEndpointGroupRead, + Delete: resourceComputeNetworkEndpointGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkEndpointGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource; provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network to which all network endpoints in the NEG belong. +Uses "default" project network if unspecified.`, + }, + "default_port": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The default port used if the port number is not specified in the +network endpoint.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "network_endpoint_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT", ""}), + Description: `Type of network endpoints in this network endpoint group. +NON_GCP_PRIVATE_IP_PORT is used for hybrid connectivity network +endpoint groups (see https://cloud.google.com/load-balancing/docs/hybrid). +Note that NON_GCP_PRIVATE_IP_PORT can only be used with Backend Services +that 1) have the following load balancing schemes: EXTERNAL, EXTERNAL_MANAGED, +INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or +CONNECTION balancing modes. + +Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, and NON_GCP_PRIVATE_IP_PORT. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP", "GCE_VM_IP_PORT", "NON_GCP_PRIVATE_IP_PORT"]`, + Default: "GCE_VM_IP_PORT", + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareOptionalSubnet, + Description: `Optional subnetwork to which all network endpoints in the NEG belong.`, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Zone where the network endpoint group is located.`, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + Description: `Number of network endpoints in the network endpoint group.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNetworkEndpointGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeNetworkEndpointGroupName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeNetworkEndpointGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + networkEndpointTypeProp, err := expandComputeNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_endpoint_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkEndpointTypeProp)) && (ok || !reflect.DeepEqual(v, networkEndpointTypeProp)) { + obj["networkEndpointType"] = networkEndpointTypeProp + } + networkProp, err := expandComputeNetworkEndpointGroupNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + subnetworkProp, err := expandComputeNetworkEndpointGroupSubnetwork(d.Get("subnetwork"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetwork"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { + obj["subnetwork"] = subnetworkProp + } + defaultPortProp, err := expandComputeNetworkEndpointGroupDefaultPort(d.Get("default_port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_port"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultPortProp)) && (ok || !reflect.DeepEqual(v, defaultPortProp)) { + obj["defaultPort"] = defaultPortProp + } + zoneProp, err := expandComputeNetworkEndpointGroupZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NetworkEndpointGroup: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NetworkEndpointGroup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating NetworkEndpointGroup", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create NetworkEndpointGroup: %s", err) + } + + log.Printf("[DEBUG] Finished creating NetworkEndpointGroup %q: %#v", d.Id(), res) + + return resourceComputeNetworkEndpointGroupRead(d, meta) +} + +func resourceComputeNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkEndpointGroup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + + if err := d.Set("name", flattenComputeNetworkEndpointGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + if err := d.Set("description", flattenComputeNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + if err := d.Set("network_endpoint_type", flattenComputeNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + if err := d.Set("size", flattenComputeNetworkEndpointGroupSize(res["size"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + if err := d.Set("network", flattenComputeNetworkEndpointGroupNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + if err := d.Set("subnetwork", flattenComputeNetworkEndpointGroupSubnetwork(res["subnetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + if err := d.Set("default_port", flattenComputeNetworkEndpointGroupDefaultPort(res["defaultPort"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + if err := d.Set("zone", flattenComputeNetworkEndpointGroupZone(res["zone"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) + } + + return nil +} + +func resourceComputeNetworkEndpointGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting NetworkEndpointGroup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NetworkEndpointGroup") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting NetworkEndpointGroup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting NetworkEndpointGroup %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeNetworkEndpointGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeNetworkEndpointGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkEndpointGroupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkEndpointGroupNetworkEndpointType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkEndpointGroupSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeNetworkEndpointGroupNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeNetworkEndpointGroupSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeNetworkEndpointGroupDefaultPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeNetworkEndpointGroupZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeNetworkEndpointGroupName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkEndpointGroupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkEndpointGroupNetworkEndpointType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkEndpointGroupNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeNetworkEndpointGroupSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeNetworkEndpointGroupDefaultPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkEndpointGroupZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("zones", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint_group_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint_group_sweeper.go new file mode 100644 index 0000000000..3ac48ca275 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoint_group_sweeper.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeNetworkEndpointGroup", testSweepComputeNetworkEndpointGroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeNetworkEndpointGroup(region string) error { + resourceName := "ComputeNetworkEndpointGroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/aggregated/networkEndpointGroups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + var rl []interface{} + zones := resourceList.(map[string]interface{}) + // Loop through every zone in the list response + for _, zonesValue := range zones { + zone := zonesValue.(map[string]interface{}) + for k, v := range zone { + // Zone map either has resources or a warning stating there were no resources found in the zone + if k != "warning" { + resourcesInZone := v.([]interface{}) + rl = append(rl, resourcesInZone...) + } + } + } + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}" + if obj["zone"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource zone was nil", resourceName) + return nil + } + zone := tpgresource.GetResourceNameFromSelfLink(obj["zone"].(string)) + deleteTemplate = strings.Replace(deleteTemplate, "{{zone}}", zone, -1) + + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoints.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoints.go new file mode 100644 index 0000000000..f49eda68e4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_endpoints.go @@ -0,0 +1,756 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type NetworkEndpointsNetworkEndpoint struct { + IPAddress string + Port int + Instance string +} + +func NetworkEndpointsNetworkEndpointConvertToStruct(endpoint interface{}) NetworkEndpointsNetworkEndpoint { + e := endpoint.(map[string]interface{}) + ipAddress := e["ip_address"].(string) + port := e["port"].(int) + instance, _ := e["instance"].(string) + return NetworkEndpointsNetworkEndpoint{ + IPAddress: ipAddress, + Port: port, + Instance: instance, + } +} + +func NetworkEndpointsNetworkEndpointConvertToAny(endpoint NetworkEndpointsNetworkEndpoint) interface{} { + m := make(map[string]interface{}) + m["ip_address"] = endpoint.IPAddress + m["port"] = endpoint.Port + m["instance"] = endpoint.Instance + return m +} + +// Continues to read network endpoints as long as there are unread pages remaining +func networkEndpointsPaginatedRead(d *schema.ResourceData, config *transport_tpg.Config, userAgent, url, project, billingProject, pt string) ([]interface{}, error) { + var allEndpoints []interface{} + for len(pt) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: fmt.Sprintf("%s?pageToken=%s", url, pt), + UserAgent: userAgent, + }) + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkEndpoints %q", d.Id())) + } + resEndpoints := res["items"].([]interface{}) + allEndpoints = append(allEndpoints, resEndpoints...) + pt, _ = res["nextPageToken"].(string) + } + return allEndpoints, nil +} + +// Mutates the parent NEG by attaching or detaching endpoints in chunks. `url` determines if endpoints are attached or detached. +// The last page is not processed, but instead returned for the Create/Delete functions to write. +func networkEndpointsPaginatedMutate(d *schema.ResourceData, endpoints []interface{}, config *transport_tpg.Config, userAgent, url, project, billingProject string, chunkSize int, returnLastPage bool) ([]interface{}, error) { + // Pull out what this mutation is doing - either attachNetworkEndpoints or detachNetworkEndpoints + verb := url[len(url)-len("attachNetworkEndpoints"):] + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/endpoints") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + i := 0 + for ; i < len(endpoints); i += chunkSize { + j := i + chunkSize + if j > len(endpoints) { + if returnLastPage { + break + } + j = len(endpoints) + } + timeoutType := schema.TimeoutCreate + if verb != "attachNetworkEndpoints" { + timeoutType = schema.TimeoutDelete + } + body := map[string]interface{}{"networkEndpoints": endpoints[i:j]} + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: body, + Timeout: d.Timeout(timeoutType), + }) + if err != nil { + return nil, fmt.Errorf("Error during %s: %s", verb, err) + } + + err = ComputeOperationWaitTime( + config, res, project, verb, userAgent, + d.Timeout(schema.TimeoutDefault)) + + if err != nil { + // The mutation wasn't applied + return nil, fmt.Errorf("Error in %s operation: %s", verb, err) + } + + log.Printf("[DEBUG] Finished %s %q: %#v", verb, id, res) + } + if returnLastPage { + return endpoints[i:], nil + } + return nil, nil +} + +func ResourceComputeNetworkEndpoints() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkEndpointsCreate, + Read: resourceComputeNetworkEndpointsRead, + Update: resourceComputeNetworkEndpointsUpdate, + Delete: resourceComputeNetworkEndpointsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkEndpointsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "network_endpoint_group": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The network endpoint group these endpoints are part of.`, + }, + "network_endpoints": { + Type: schema.TypeSet, + Optional: true, + Description: `The network endpoints to be added to the enclosing network endpoint group +(NEG). Each endpoint specifies an IP address and port, along with +additional information depending on the NEG type.`, + Elem: computeNetworkEndpointsNetworkEndpointsSchema(), + // Default schema.HashSchema is used. + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Zone where the containing network endpoint group is located.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func computeNetworkEndpointsNetworkEndpointsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Required: true, + Description: `IPv4 address of network endpoint. The IP address must belong +to a VM in GCE (either the primary IP or as part of an aliased IP +range).`, + }, + "instance": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name for a specific VM instance that the IP address belongs to. +This is required for network endpoints of type GCE_VM_IP_PORT. +The instance must be in the same zone as the network endpoint group.`, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Description: `Port number of network endpoint. +**Note** 'port' is required unless the Network Endpoint Group is created +with the type of 'GCE_VM_IP'`, + }, + }, + } +} + +func resourceComputeNetworkEndpointsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + networkEndpointsProp, err := expandComputeNetworkEndpointsNetworkEndpoints(d.Get("network_endpoints"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_endpoints"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkEndpointsProp)) && (ok || !reflect.DeepEqual(v, networkEndpointsProp)) { + obj["networkEndpoints"] = networkEndpointsProp + } + + obj, err = resourceComputeNetworkEndpointsEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NetworkEndpoints: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpoints: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + chunkSize := 500 // API only accepts 500 endpoints at a time + lastPage, err := networkEndpointsPaginatedMutate(d, obj["networkEndpoints"].([]interface{}), config, userAgent, url, project, billingProject, chunkSize, true) + if err != nil { + // networkEndpointsPaginatedMutate already adds error description + return err + } + obj["networkEndpoints"] = lastPage + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NetworkEndpoints: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/endpoints") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating NetworkEndpoints", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create NetworkEndpoints: %s", err) + } + + log.Printf("[DEBUG] Finished creating NetworkEndpoints %q: %#v", d.Id(), res) + + return resourceComputeNetworkEndpointsRead(d, meta) +} + +func resourceComputeNetworkEndpointsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpoints: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkEndpoints %q", d.Id())) + } + + res, err = resourceComputeNetworkEndpointsDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeNetworkEndpoints because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NetworkEndpoints: %s", err) + } + + if err := d.Set("network_endpoints", flattenComputeNetworkEndpointsNetworkEndpoints(res["networkEndpoints"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkEndpoints: %s", err) + } + + return nil +} + +func resourceComputeNetworkEndpointsUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpoints: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + networkEndpointsProp, err := expandComputeNetworkEndpointsNetworkEndpoints(d.Get("network_endpoints"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_endpoints"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkEndpointsProp)) { + obj["networkEndpoints"] = networkEndpointsProp + } + + obj, err = resourceComputeNetworkEndpointsEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating NetworkEndpoints %q: %#v", d.Id(), obj) + detachUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints") + o, n := d.GetChange("network_endpoints") + + oldEndpoints := make(map[NetworkEndpointsNetworkEndpoint]struct{}) + newEndpoints := make(map[NetworkEndpointsNetworkEndpoint]struct{}) + + for _, e := range o.(*schema.Set).List() { + oldEndpoints[NetworkEndpointsNetworkEndpointConvertToStruct(e)] = struct{}{} + } + + for _, e := range n.(*schema.Set).List() { + newEndpoints[NetworkEndpointsNetworkEndpointConvertToStruct(e)] = struct{}{} + } + + // We want to ignore any endpoints that are shared between the two. + endpointsToKeep := make(map[NetworkEndpointsNetworkEndpoint]struct{}) + for e := range oldEndpoints { + if _, ok := newEndpoints[e]; ok { + endpointsToKeep[e] = struct{}{} + } + } + log.Printf("number of old endpoints: %v\n", len(oldEndpoints)) + log.Printf("number of new endpoints: %v\n", len(newEndpoints)) + log.Printf("number of shared endpoints: %v\n", len(endpointsToKeep)) + + for e := range endpointsToKeep { + // Removing all shared endpoints from the old endpoints yields the list of endpoints to detach. + delete(oldEndpoints, e) + // Removing all shared endpoints from the new endpoints yields the list of endpoints to attch. + delete(newEndpoints, e) + } + + var endpointsToDetach []interface{} + for e := range oldEndpoints { + endpointsToDetach = append(endpointsToDetach, NetworkEndpointsNetworkEndpointConvertToAny(e)) + } + var endpointsToAttach []interface{} + for e := range newEndpoints { + endpointsToAttach = append(endpointsToAttach, NetworkEndpointsNetworkEndpointConvertToAny(e)) + } + + log.Printf("number of endpoints to detach: %v\n", len(endpointsToDetach)) + log.Printf("number of endpoints to attach: %v\n", len(endpointsToAttach)) + + chunkSize := 500 // API only accepts 500 endpoints at a time + + _, err = networkEndpointsPaginatedMutate(d, endpointsToDetach, config, userAgent, detachUrl, project, billingProject, chunkSize, false) + if err != nil { + // networkEndpointsPaginatedMutate already adds error description + return err + } + + lastPage, err := networkEndpointsPaginatedMutate(d, endpointsToAttach, config, userAgent, url, project, billingProject, chunkSize, true) + if err != nil { + // networkEndpointsPaginatedMutate already adds error description + return err + } + + obj = map[string]interface{}{ + "networkEndpoints": lastPage, + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating NetworkEndpoints %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating NetworkEndpoints %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating NetworkEndpoints", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeNetworkEndpointsRead(d, meta) +} + +func resourceComputeNetworkEndpointsDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkEndpoints: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints") + if err != nil { + return err + } + + var obj map[string]interface{} + var endpointsToDelete []interface{} + + endpoints := d.Get("network_endpoints").(*schema.Set).List() + + for _, e := range endpoints { + endpoint := e.(map[string]interface{}) + toDelete := make(map[string]interface{}) + instanceProp, err := expandNestedComputeNetworkEndpointInstance(endpoint["instance"], d, config) + if err != nil { + return err + } + if instanceProp != "" { + toDelete["instance"] = instanceProp + } + + portProp, err := expandNestedComputeNetworkEndpointPort(endpoint["port"], d, config) + if err != nil { + return err + } + if portProp != 0 { + toDelete["port"] = portProp + } + + ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(endpoint["ip_address"], d, config) + if err != nil { + return err + } + toDelete["ipAddress"] = ipAddressProp + endpointsToDelete = append(endpointsToDelete, toDelete) + } + + chunkSize := 500 // API only accepts 500 endpoints at a time + lastPage, err := networkEndpointsPaginatedMutate(d, endpointsToDelete, config, userAgent, url, project, billingProject, chunkSize, true) + if err != nil { + // networkEndpointsPaginatedMutate already adds error description + return err + } + + obj = map[string]interface{}{ + "networkEndpoints": lastPage, + } + log.Printf("[DEBUG] Deleting NetworkEndpoints %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NetworkEndpoints") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting NetworkEndpoints", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting NetworkEndpoints %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeNetworkEndpointsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/endpoints") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeNetworkEndpointsNetworkEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(computeNetworkEndpointsNetworkEndpointsSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "instance": flattenComputeNetworkEndpointsNetworkEndpointsInstance(original["instance"], d, config), + "port": flattenComputeNetworkEndpointsNetworkEndpointsPort(original["port"], d, config), + "ip_address": flattenComputeNetworkEndpointsNetworkEndpointsIpAddress(original["ipAddress"], d, config), + }) + } + return transformed +} +func flattenComputeNetworkEndpointsNetworkEndpointsInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeNetworkEndpointsNetworkEndpointsPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles int given in float64 format + if floatVal, ok := v.(float64); ok { + return int(floatVal) + } + return v +} + +func flattenComputeNetworkEndpointsNetworkEndpointsIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeNetworkEndpointsNetworkEndpoints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInstance, err := expandComputeNetworkEndpointsNetworkEndpointsInstance(original["instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instance"] = transformedInstance + } + + transformedPort, err := expandComputeNetworkEndpointsNetworkEndpointsPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedIpAddress, err := expandComputeNetworkEndpointsNetworkEndpointsIpAddress(original["ip_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipAddress"] = transformedIpAddress + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeNetworkEndpointsNetworkEndpointsInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} + +func expandComputeNetworkEndpointsNetworkEndpointsPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkEndpointsNetworkEndpointsIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeNetworkEndpointsEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. + if err := d.Set("network_endpoint_group", tpgresource.GetResourceNameFromSelfLink(d.Get("network_endpoint_group").(string))); err != nil { + return nil, fmt.Errorf("Error setting network_endpoint_group: %s", err) + } + + return obj, nil +} + +func resourceComputeNetworkEndpointsDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + // Read past the first page to get all endpoints. + pt, _ := res["nextPageToken"].(string) + allEndpoints, err := networkEndpointsPaginatedRead(d, config, userAgent, url, project, billingProject, pt) + if err != nil { + // networkEndpointsPaginatedRead already adds error description + return nil, err + } + firstPage := res["items"].([]interface{}) + allEndpoints = append(firstPage, allEndpoints...) + + // listNetworkEndpoints returns data in a different structure, so we need to + // convert to the Terraform schema. + var transformed []interface{} + for _, e := range allEndpoints { + t := e.(map[string]interface{})["networkEndpoint"] + transformed = append(transformed, t) + } + + return map[string]interface{}{"networkEndpoints": transformed}, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy.go index 6c56c0f9ac..b1b9897489 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeNetworkFirewallPolicy() *schema.Resource { @@ -63,7 +70,7 @@ func ResourceComputeNetworkFirewallPolicy() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -107,8 +114,8 @@ func ResourceComputeNetworkFirewallPolicy() *schema.Resource { } func resourceComputeNetworkFirewallPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -124,18 +131,18 @@ func resourceComputeNetworkFirewallPolicyCreate(d *schema.ResourceData, meta int return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -157,8 +164,8 @@ func resourceComputeNetworkFirewallPolicyCreate(d *schema.ResourceData, meta int } func resourceComputeNetworkFirewallPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -169,17 +176,17 @@ func resourceComputeNetworkFirewallPolicyRead(d *schema.ResourceData, meta inter Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -188,7 +195,7 @@ func resourceComputeNetworkFirewallPolicyRead(d *schema.ResourceData, meta inter res, err := client.GetNetworkFirewallPolicy(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ComputeNetworkFirewallPolicy %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("name", res.Name); err != nil { @@ -222,8 +229,8 @@ func resourceComputeNetworkFirewallPolicyRead(d *schema.ResourceData, meta inter return nil } func resourceComputeNetworkFirewallPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -233,19 +240,19 @@ func resourceComputeNetworkFirewallPolicyUpdate(d *schema.ResourceData, meta int Description: dcl.String(d.Get("description").(string)), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -267,8 +274,8 @@ func resourceComputeNetworkFirewallPolicyUpdate(d *schema.ResourceData, meta int } func resourceComputeNetworkFirewallPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -280,17 +287,17 @@ func resourceComputeNetworkFirewallPolicyDelete(d *schema.ResourceData, meta int } log.Printf("[DEBUG] Deleting NetworkFirewallPolicy %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -305,9 +312,9 @@ func resourceComputeNetworkFirewallPolicyDelete(d *schema.ResourceData, meta int } func resourceComputeNetworkFirewallPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/global/firewallPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -316,7 +323,7 @@ func resourceComputeNetworkFirewallPolicyImport(d *schema.ResourceData, meta int } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy_association.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy_association.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association.go index faffeaed69..41a81c8420 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy_association.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_association.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeNetworkFirewallPolicyAssociation() *schema.Resource { @@ -47,7 +54,7 @@ func ResourceComputeNetworkFirewallPolicyAssociation() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The target that the firewall policy is attached to.", }, @@ -55,7 +62,7 @@ func ResourceComputeNetworkFirewallPolicyAssociation() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The firewall policy ID of the association.", }, @@ -71,7 +78,7 @@ func ResourceComputeNetworkFirewallPolicyAssociation() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -85,8 +92,8 @@ func ResourceComputeNetworkFirewallPolicyAssociation() *schema.Resource { } func resourceComputeNetworkFirewallPolicyAssociationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -98,23 +105,23 @@ func resourceComputeNetworkFirewallPolicyAssociationCreate(d *schema.ResourceDat Project: dcl.String(project), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -136,8 +143,8 @@ func resourceComputeNetworkFirewallPolicyAssociationCreate(d *schema.ResourceDat } func resourceComputeNetworkFirewallPolicyAssociationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -149,17 +156,17 @@ func resourceComputeNetworkFirewallPolicyAssociationRead(d *schema.ResourceData, Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -168,7 +175,7 @@ func resourceComputeNetworkFirewallPolicyAssociationRead(d *schema.ResourceData, res, err := client.GetNetworkFirewallPolicyAssociation(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ComputeNetworkFirewallPolicyAssociation %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("attachment_target", res.AttachmentTarget); err != nil { @@ -191,8 +198,8 @@ func resourceComputeNetworkFirewallPolicyAssociationRead(d *schema.ResourceData, } func resourceComputeNetworkFirewallPolicyAssociationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -205,17 +212,17 @@ func resourceComputeNetworkFirewallPolicyAssociationDelete(d *schema.ResourceDat } log.Printf("[DEBUG] Deleting NetworkFirewallPolicyAssociation %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -230,9 +237,9 @@ func resourceComputeNetworkFirewallPolicyAssociationDelete(d *schema.ResourceDat } func resourceComputeNetworkFirewallPolicyAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/global/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", }, d, config); err != nil { @@ -240,7 +247,7 @@ func resourceComputeNetworkFirewallPolicyAssociationImport(d *schema.ResourceDat } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy_rule.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule.go index ce1a7ebcda..a4b4ead678 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_network_firewall_policy_rule.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_rule.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeNetworkFirewallPolicyRule() *schema.Resource { @@ -48,7 +55,7 @@ func ResourceComputeNetworkFirewallPolicyRule() *schema.Resource { "action": { Type: schema.TypeString, Required: true, - Description: "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + Description: "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".", }, "direction": { @@ -61,7 +68,7 @@ func ResourceComputeNetworkFirewallPolicyRule() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The firewall policy of the resource.", }, @@ -103,7 +110,7 @@ func ResourceComputeNetworkFirewallPolicyRule() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -152,6 +159,20 @@ func ComputeNetworkFirewallPolicyRuleMatchSchema() *schema.Resource { Elem: ComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsSchema(), }, + "dest_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dest_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "dest_ip_ranges": { Type: schema.TypeList, Optional: true, @@ -159,6 +180,34 @@ func ComputeNetworkFirewallPolicyRuleMatchSchema() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, + "dest_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dest_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: "Name of the Google Cloud Threat Intelligence list.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "src_ip_ranges": { Type: schema.TypeList, Optional: true, @@ -166,12 +215,26 @@ func ComputeNetworkFirewallPolicyRuleMatchSchema() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, + "src_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "src_secure_tags": { Type: schema.TypeList, Optional: true, Description: "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", Elem: ComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsSchema(), }, + + "src_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: "Name of the Google Cloud Threat Intelligence list.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, } } @@ -201,7 +264,7 @@ func ComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsSchema() *schema.Resource "name": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", }, @@ -220,7 +283,7 @@ func ComputeNetworkFirewallPolicyRuleTargetSecureTagsSchema() *schema.Resource { "name": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", }, @@ -234,8 +297,8 @@ func ComputeNetworkFirewallPolicyRuleTargetSecureTagsSchema() *schema.Resource { } func resourceComputeNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -252,7 +315,7 @@ func resourceComputeNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta Project: dcl.String(project), RuleName: dcl.String(d.Get("rule_name").(string)), TargetSecureTags: expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), } id, err := obj.ID() @@ -260,18 +323,18 @@ func resourceComputeNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -293,8 +356,8 @@ func resourceComputeNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta } func resourceComputeNetworkFirewallPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -311,20 +374,20 @@ func resourceComputeNetworkFirewallPolicyRuleRead(d *schema.ResourceData, meta i Project: dcl.String(project), RuleName: dcl.String(d.Get("rule_name").(string)), TargetSecureTags: expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -333,7 +396,7 @@ func resourceComputeNetworkFirewallPolicyRuleRead(d *schema.ResourceData, meta i res, err := client.GetNetworkFirewallPolicyRule(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ComputeNetworkFirewallPolicyRule %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("action", res.Action); err != nil { @@ -382,8 +445,8 @@ func resourceComputeNetworkFirewallPolicyRuleRead(d *schema.ResourceData, meta i return nil } func resourceComputeNetworkFirewallPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -400,21 +463,21 @@ func resourceComputeNetworkFirewallPolicyRuleUpdate(d *schema.ResourceData, meta Project: dcl.String(project), RuleName: dcl.String(d.Get("rule_name").(string)), TargetSecureTags: expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -436,8 +499,8 @@ func resourceComputeNetworkFirewallPolicyRuleUpdate(d *schema.ResourceData, meta } func resourceComputeNetworkFirewallPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -454,21 +517,21 @@ func resourceComputeNetworkFirewallPolicyRuleDelete(d *schema.ResourceData, meta Project: dcl.String(project), RuleName: dcl.String(d.Get("rule_name").(string)), TargetSecureTags: expandComputeNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), } log.Printf("[DEBUG] Deleting NetworkFirewallPolicyRule %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -483,9 +546,9 @@ func resourceComputeNetworkFirewallPolicyRuleDelete(d *schema.ResourceData, meta } func resourceComputeNetworkFirewallPolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/global/firewallPolicies/(?P[^/]+)/rules/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -494,7 +557,7 @@ func resourceComputeNetworkFirewallPolicyRuleImport(d *schema.ResourceData, meta } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -513,10 +576,18 @@ func expandComputeNetworkFirewallPolicyRuleMatch(o interface{}) *compute.Network } obj := objArr[0].(map[string]interface{}) return &compute.NetworkFirewallPolicyRuleMatch{ - Layer4Configs: expandComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), - DestIPRanges: expandStringArray(obj["dest_ip_ranges"]), - SrcIPRanges: expandStringArray(obj["src_ip_ranges"]), - SrcSecureTags: expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj["src_secure_tags"]), + Layer4Configs: expandComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), + DestAddressGroups: tpgdclresource.ExpandStringArray(obj["dest_address_groups"]), + DestFqdns: tpgdclresource.ExpandStringArray(obj["dest_fqdns"]), + DestIPRanges: tpgdclresource.ExpandStringArray(obj["dest_ip_ranges"]), + DestRegionCodes: tpgdclresource.ExpandStringArray(obj["dest_region_codes"]), + DestThreatIntelligences: tpgdclresource.ExpandStringArray(obj["dest_threat_intelligences"]), + SrcAddressGroups: tpgdclresource.ExpandStringArray(obj["src_address_groups"]), + SrcFqdns: tpgdclresource.ExpandStringArray(obj["src_fqdns"]), + SrcIPRanges: tpgdclresource.ExpandStringArray(obj["src_ip_ranges"]), + SrcRegionCodes: tpgdclresource.ExpandStringArray(obj["src_region_codes"]), + SrcSecureTags: expandComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj["src_secure_tags"]), + SrcThreatIntelligences: tpgdclresource.ExpandStringArray(obj["src_threat_intelligences"]), } } @@ -525,10 +596,18 @@ func flattenComputeNetworkFirewallPolicyRuleMatch(obj *compute.NetworkFirewallPo return nil } transformed := map[string]interface{}{ - "layer4_configs": flattenComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), - "dest_ip_ranges": obj.DestIPRanges, - "src_ip_ranges": obj.SrcIPRanges, - "src_secure_tags": flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj.SrcSecureTags), + "layer4_configs": flattenComputeNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), + "dest_address_groups": obj.DestAddressGroups, + "dest_fqdns": obj.DestFqdns, + "dest_ip_ranges": obj.DestIPRanges, + "dest_region_codes": obj.DestRegionCodes, + "dest_threat_intelligences": obj.DestThreatIntelligences, + "src_address_groups": obj.SrcAddressGroups, + "src_fqdns": obj.SrcFqdns, + "src_ip_ranges": obj.SrcIPRanges, + "src_region_codes": obj.SrcRegionCodes, + "src_secure_tags": flattenComputeNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj.SrcSecureTags), + "src_threat_intelligences": obj.SrcThreatIntelligences, } return []interface{}{transformed} @@ -561,7 +640,7 @@ func expandComputeNetworkFirewallPolicyRuleMatchLayer4Configs(o interface{}) *co obj := o.(map[string]interface{}) return &compute.NetworkFirewallPolicyRuleMatchLayer4Configs{ IPProtocol: dcl.String(obj["ip_protocol"].(string)), - Ports: expandStringArray(obj["ports"]), + Ports: tpgdclresource.ExpandStringArray(obj["ports"]), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_sweeper.go new file mode 100644 index 0000000000..df9f06e352 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_firewall_policy_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "testing" + + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeNetworkFirewallPolicy", testSweepComputeNetworkFirewallPolicy) +} + +func testSweepComputeNetworkFirewallPolicy(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ComputeNetworkFirewallPolicy") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLComputeClient(config, config.UserAgent, "", 0) + err = client.DeleteAllNetworkFirewallPolicy(context.Background(), d["project"], d["location"], isDeletableComputeNetworkFirewallPolicy) + if err != nil { + return err + } + return nil +} + +func isDeletableComputeNetworkFirewallPolicy(r *compute.NetworkFirewallPolicy) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_peering.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_peering.go new file mode 100644 index 0000000000..cf598da404 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_peering.go @@ -0,0 +1,380 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + "log" + "reflect" + "sort" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/compute/v1" +) + +const peerNetworkLinkRegex = "projects/(" + verify.ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" + +func ResourceComputeNetworkPeering() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkPeeringCreate, + Read: resourceComputeNetworkPeeringRead, + Update: resourceComputeNetworkPeeringUpdate, + Delete: resourceComputeNetworkPeeringDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkPeeringImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the peering.`, + }, + + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(peerNetworkLinkRegex), + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The primary network of the peering.`, + }, + + "peer_network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(peerNetworkLinkRegex), + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The peer network in the peering. The peer network may belong to a different project.`, + }, + + "export_custom_routes": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether to export the custom routes to the peer network. Defaults to false.`, + }, + + "import_custom_routes": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether to export the custom routes from the peer network. Defaults to false.`, + }, + + "export_subnet_routes_with_public_ip": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Default: true, + }, + + "import_subnet_routes_with_public_ip": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State for the peering, either ACTIVE or INACTIVE. The peering is ACTIVE when there's a matching configuration in the peer network.`, + }, + + "state_details": { + Type: schema.TypeString, + Computed: true, + Description: `Details about the current state of the peering.`, + }, + + "stack_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4_ONLY", "IPV4_IPV6"}), + Description: `Which IP version(s) of traffic and routes are allowed to be imported or exported between peer networks. The default value is IPV4_ONLY. Possible values: ["IPV4_ONLY", "IPV4_IPV6"]`, + Default: "IPV4_ONLY", + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNetworkPeeringCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + peerNetworkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) + if err != nil { + return err + } + + request := &compute.NetworksAddPeeringRequest{} + request.NetworkPeering = expandNetworkPeering(d) + + // Only one peering operation at a time can be performed for a given network. + // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. + peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) + for _, kn := range peeringLockNames { + transport_tpg.MutexStore.Lock(kn) + defer transport_tpg.MutexStore.Unlock(kn) + } + + addOp, err := config.NewComputeClient(userAgent).Networks.AddPeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() + if err != nil { + return fmt.Errorf("Error adding network peering: %s", err) + } + + err = ComputeOperationWaitTime(config, addOp, networkFieldValue.Project, "Adding Network Peering", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", networkFieldValue.Name, d.Get("name").(string))) + + return resourceComputeNetworkPeeringRead(d, meta) +} + +func resourceComputeNetworkPeeringRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + peeringName := d.Get("name").(string) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + + network, err := config.NewComputeClient(userAgent).Networks.Get(networkFieldValue.Project, networkFieldValue.Name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Network %q", networkFieldValue.Name)) + } + + peering := findPeeringFromNetwork(network, peeringName) + if peering == nil { + log.Printf("[WARN] Removing network peering %s from network %s because it's gone", peeringName, network.Name) + d.SetId("") + return nil + } + + if err := d.Set("peer_network", peering.Network); err != nil { + return fmt.Errorf("Error setting peer_network: %s", err) + } + if err := d.Set("name", peering.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("import_custom_routes", peering.ImportCustomRoutes); err != nil { + return fmt.Errorf("Error setting import_custom_routes: %s", err) + } + if err := d.Set("export_custom_routes", peering.ExportCustomRoutes); err != nil { + return fmt.Errorf("Error setting export_custom_routes: %s", err) + } + if err := d.Set("import_subnet_routes_with_public_ip", peering.ImportSubnetRoutesWithPublicIp); err != nil { + return fmt.Errorf("Error setting import_subnet_routes_with_public_ip: %s", err) + } + if err := d.Set("export_subnet_routes_with_public_ip", peering.ExportSubnetRoutesWithPublicIp); err != nil { + return fmt.Errorf("Error setting export_subnet_routes_with_public_ip: %s", err) + } + if err := d.Set("state", peering.State); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + if err := d.Set("state_details", peering.StateDetails); err != nil { + return fmt.Errorf("Error setting state_details: %s", err) + } + if err := d.Set("stack_type", flattenNetworkPeeringStackType(peering.StackType, d, config)); err != nil { + return fmt.Errorf("Error setting stack_type: %s", err) + } + + return nil +} + +func resourceComputeNetworkPeeringUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + peerNetworkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) + if err != nil { + return err + } + + request := &compute.NetworksUpdatePeeringRequest{} + request.NetworkPeering = expandNetworkPeering(d) + + // Only one peering operation at a time can be performed for a given network. + // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. + peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) + for _, kn := range peeringLockNames { + transport_tpg.MutexStore.Lock(kn) + defer transport_tpg.MutexStore.Unlock(kn) + } + + updateOp, err := config.NewComputeClient(userAgent).Networks.UpdatePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() + if err != nil { + return fmt.Errorf("Error updating network peering: %s", err) + } + + err = ComputeOperationWaitTime(config, updateOp, networkFieldValue.Project, "Updating Network Peering", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + return resourceComputeNetworkPeeringRead(d, meta) +} + +func resourceComputeNetworkPeeringDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Remove the `network` to `peer_network` peering + name := d.Get("name").(string) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + peerNetworkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) + if err != nil { + return err + } + + request := &compute.NetworksRemovePeeringRequest{ + Name: name, + } + + // Only one peering operation at a time can be performed for a given network. + // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. + peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) + for _, kn := range peeringLockNames { + transport_tpg.MutexStore.Lock(kn) + defer transport_tpg.MutexStore.Unlock(kn) + } + + removeOp, err := config.NewComputeClient(userAgent).Networks.RemovePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Peering `%s` already removed from network `%s`", name, networkFieldValue.Name) + } else { + return fmt.Errorf("Error removing peering `%s` from network `%s`: %s", name, networkFieldValue.Name, err) + } + } else { + err = ComputeOperationWaitTime(config, removeOp, networkFieldValue.Project, "Removing Network Peering", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + } + + return nil +} + +func findPeeringFromNetwork(network *compute.Network, peeringName string) *compute.NetworkPeering { + for _, p := range network.Peerings { + if p.Name == peeringName { + return p + } + } + return nil +} +func expandNetworkPeering(d *schema.ResourceData) *compute.NetworkPeering { + return &compute.NetworkPeering{ + ExchangeSubnetRoutes: true, + Name: d.Get("name").(string), + Network: d.Get("peer_network").(string), + ExportCustomRoutes: d.Get("export_custom_routes").(bool), + ImportCustomRoutes: d.Get("import_custom_routes").(bool), + ExportSubnetRoutesWithPublicIp: d.Get("export_subnet_routes_with_public_ip").(bool), + ImportSubnetRoutesWithPublicIp: d.Get("import_subnet_routes_with_public_ip").(bool), + StackType: d.Get("stack_type").(string), + ForceSendFields: []string{"ExportSubnetRoutesWithPublicIp", "ImportCustomRoutes", "ExportCustomRoutes"}, + } +} + +func flattenNetworkPeeringStackType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // To prevent the perma-diff caused by the absence of `stack_type` in API responses for older resource + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "IPV4_ONLY" + } + + return v +} + +func sortedNetworkPeeringMutexKeys(networkName, peerNetworkName *tpgresource.GlobalFieldValue) []string { + // Whether you delete the peering from network A to B or the one from B to A, they + // cannot happen at the same time. + networks := []string{ + fmt.Sprintf("%s/peerings", networkName.RelativeLink()), + fmt.Sprintf("%s/peerings", peerNetworkName.RelativeLink()), + } + sort.Strings(networks) + return networks +} + +func resourceComputeNetworkPeeringImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + splits := strings.Split(d.Id(), "/") + if len(splits) != 3 { + return nil, fmt.Errorf("Error parsing network peering import format, expected: {project}/{network}/{name}") + } + project := splits[0] + network := splits[1] + name := splits[2] + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + // Since the format of the network URL in the peering might be different depending on the ComputeBasePath, + // just read the network self link from the API. + net, err := config.NewComputeClient(userAgent).Networks.Get(project, network).Do() + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Network %q", splits[1])) + } + + if err := d.Set("network", tpgresource.ConvertSelfLinkToV1(net.SelfLink)); err != nil { + return nil, fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("name", name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + // Replace import id for the resource id + id := fmt.Sprintf("%s/%s", network, name) + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_peering_routes_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_peering_routes_config.go new file mode 100644 index 0000000000..5b14077057 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_peering_routes_config.go @@ -0,0 +1,437 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeNetworkPeeringRoutesConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkPeeringRoutesConfigCreate, + Read: resourceComputeNetworkPeeringRoutesConfigRead, + Update: resourceComputeNetworkPeeringRoutesConfigUpdate, + Delete: resourceComputeNetworkPeeringRoutesConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkPeeringRoutesConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "export_custom_routes": { + Type: schema.TypeBool, + Required: true, + Description: `Whether to export the custom routes to the peer network.`, + }, + "import_custom_routes": { + Type: schema.TypeBool, + Required: true, + Description: `Whether to import the custom routes to the peer network.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the primary network for the peering.`, + }, + "peering": { + Type: schema.TypeString, + Required: true, + Description: `Name of the peering.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNetworkPeeringRoutesConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peering"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + exportCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(d.Get("export_custom_routes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("export_custom_routes"); ok || !reflect.DeepEqual(v, exportCustomRoutesProp) { + obj["exportCustomRoutes"] = exportCustomRoutesProp + } + importCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(d.Get("import_custom_routes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("import_custom_routes"); ok || !reflect.DeepEqual(v, importCustomRoutesProp) { + obj["importCustomRoutes"] = importCustomRoutesProp + } + + obj, err = resourceComputeNetworkPeeringRoutesConfigEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}/updatePeering") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NetworkPeeringRoutesConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NetworkPeeringRoutesConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating NetworkPeeringRoutesConfig", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create NetworkPeeringRoutesConfig: %s", err) + } + + log.Printf("[DEBUG] Finished creating NetworkPeeringRoutesConfig %q: %#v", d.Id(), res) + + return resourceComputeNetworkPeeringRoutesConfigRead(d, meta) +} + +func resourceComputeNetworkPeeringRoutesConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkPeeringRoutesConfig %q", d.Id())) + } + + res, err = flattenNestedComputeNetworkPeeringRoutesConfig(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeNetworkPeeringRoutesConfig because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) + } + + if err := d.Set("peering", flattenNestedComputeNetworkPeeringRoutesConfigPeering(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) + } + if err := d.Set("export_custom_routes", flattenNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(res["exportCustomRoutes"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) + } + if err := d.Set("import_custom_routes", flattenNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(res["importCustomRoutes"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) + } + + return nil +} + +func resourceComputeNetworkPeeringRoutesConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peering"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + exportCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(d.Get("export_custom_routes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("export_custom_routes"); ok || !reflect.DeepEqual(v, exportCustomRoutesProp) { + obj["exportCustomRoutes"] = exportCustomRoutesProp + } + importCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(d.Get("import_custom_routes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("import_custom_routes"); ok || !reflect.DeepEqual(v, importCustomRoutesProp) { + obj["importCustomRoutes"] = importCustomRoutesProp + } + + obj, err = resourceComputeNetworkPeeringRoutesConfigEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}/updatePeering") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating NetworkPeeringRoutesConfig %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating NetworkPeeringRoutesConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating NetworkPeeringRoutesConfig %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating NetworkPeeringRoutesConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeNetworkPeeringRoutesConfigRead(d, meta) +} + +func resourceComputeNetworkPeeringRoutesConfigDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] Compute NetworkPeeringRoutesConfig resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceComputeNetworkPeeringRoutesConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/networks/(?P[^/]+)/networkPeerings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeNetworkPeeringRoutesConfigPeering(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeNetworkPeeringRoutesConfigPeering(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeNetworkPeeringRoutesConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Stick request in a networkPeering block as in + // https://cloud.google.com/compute/docs/reference/rest/v1/networks/updatePeering + newObj := make(map[string]interface{}) + newObj["networkPeering"] = obj + return newObj, nil +} + +func flattenNestedComputeNetworkPeeringRoutesConfig(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["peerings"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value peerings. Actual value: %v", v) + } + + _, item, err := resourceComputeNetworkPeeringRoutesConfigFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeNetworkPeeringRoutesConfigFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedPeering, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedPeering := flattenNestedComputeNetworkPeeringRoutesConfigPeering(expectedPeering, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemPeering := flattenNestedComputeNetworkPeeringRoutesConfigPeering(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemPeering)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedPeering))) && !reflect.DeepEqual(itemPeering, expectedFlattenedPeering) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemPeering, expectedFlattenedPeering) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_sweeper.go new file mode 100644 index 0000000000..b37b891df1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_network_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeNetwork", testSweepComputeNetwork) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeNetwork(region string) error { + resourceName := "ComputeNetwork" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/networks", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/networks/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_group.go new file mode 100644 index 0000000000..e795b4a34f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_group.go @@ -0,0 +1,874 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeNodeGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNodeGroupCreate, + Read: resourceComputeNodeGroupRead, + Update: resourceComputeNodeGroupUpdate, + Delete: resourceComputeNodeGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNodeGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "node_template": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the node template to which this node group belongs.`, + }, + "autoscaling_policy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `If you use sole-tenant nodes for your workloads, you can use the node +group autoscaler to automatically manage the sizes of your node groups.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_nodes": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Maximum size of the node group. Set to a value less than or equal +to 100 and greater than or equal to min-nodes.`, + }, + "mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"OFF", "ON", "ONLY_SCALE_OUT"}), + Description: `The autoscaling mode. Set to one of the following: + - OFF: Disables the autoscaler. + - ON: Enables scaling in and scaling out. + - ONLY_SCALE_OUT: Enables only scaling out. + You must use this mode if your node groups are configured to + restart their hosted VMs on minimal servers. Possible values: ["OFF", "ON", "ONLY_SCALE_OUT"]`, + }, + "min_nodes": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Minimum size of the node group. Must be less +than or equal to max-nodes. The default value is 0.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional textual description of the resource.`, + }, + "initial_size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The initial number of nodes in the node group. One of 'initial_size' or 'size' must be specified.`, + ExactlyOneOf: []string{"size", "initial_size"}, + }, + "maintenance_policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.`, + Default: "DEFAULT", + }, + "maintenance_window": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `contains properties for the timeframe of maintenance`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the resource.`, + }, + "share_settings": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Share settings for the node group.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "share_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ORGANIZATION", "SPECIFIC_PROJECTS", "LOCAL"}), + Description: `Node group sharing type. Possible values: ["ORGANIZATION", "SPECIFIC_PROJECTS", "LOCAL"]`, + }, + "project_map": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: `A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The project id/number should be the same as the key of this project config in the project map.`, + }, + }, + }, + }, + }, + }, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The total number of nodes in the node group. One of 'initial_size' or 'size' must be specified.`, + ExactlyOneOf: []string{"size", "initial_size"}, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Zone where this node group is located`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNodeGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeNodeGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeNodeGroupName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + nodeTemplateProp, err := expandComputeNodeGroupNodeTemplate(d.Get("node_template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("node_template"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeTemplateProp)) && (ok || !reflect.DeepEqual(v, nodeTemplateProp)) { + obj["nodeTemplate"] = nodeTemplateProp + } + sizeProp, err := expandComputeNodeGroupSize(d.Get("size"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("size"); ok || !reflect.DeepEqual(v, sizeProp) { + obj["size"] = sizeProp + } + maintenancePolicyProp, err := expandComputeNodeGroupMaintenancePolicy(d.Get("maintenance_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenancePolicyProp)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { + obj["maintenancePolicy"] = maintenancePolicyProp + } + maintenanceWindowProp, err := expandComputeNodeGroupMaintenanceWindow(d.Get("maintenance_window"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_window"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenanceWindowProp)) && (ok || !reflect.DeepEqual(v, maintenanceWindowProp)) { + obj["maintenanceWindow"] = maintenanceWindowProp + } + autoscalingPolicyProp, err := expandComputeNodeGroupAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("autoscaling_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoscalingPolicyProp)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { + obj["autoscalingPolicy"] = autoscalingPolicyProp + } + shareSettingsProp, err := expandComputeNodeGroupShareSettings(d.Get("share_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("share_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(shareSettingsProp)) && (ok || !reflect.DeepEqual(v, shareSettingsProp)) { + obj["shareSettings"] = shareSettingsProp + } + zoneProp, err := expandComputeNodeGroupZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups?initialNodeCount=PRE_CREATE_REPLACE_ME") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NodeGroup: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NodeGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + var sizeParam string + if v, ok := d.GetOkExists("size"); ok { + sizeParam = fmt.Sprintf("%v", v) + } else if v, ok := d.GetOkExists("initial_size"); ok { + sizeParam = fmt.Sprintf("%v", v) + } + + url = regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sizeParam) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NodeGroup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating NodeGroup", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create NodeGroup: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodeGroup %q: %#v", d.Id(), res) + + return resourceComputeNodeGroupRead(d, meta) +} + +func resourceComputeNodeGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NodeGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNodeGroup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeNodeGroupCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("description", flattenComputeNodeGroupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("name", flattenComputeNodeGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("node_template", flattenComputeNodeGroupNodeTemplate(res["nodeTemplate"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("size", flattenComputeNodeGroupSize(res["size"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("maintenance_policy", flattenComputeNodeGroupMaintenancePolicy(res["maintenancePolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("maintenance_window", flattenComputeNodeGroupMaintenanceWindow(res["maintenanceWindow"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("autoscaling_policy", flattenComputeNodeGroupAutoscalingPolicy(res["autoscalingPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("share_settings", flattenComputeNodeGroupShareSettings(res["shareSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("zone", flattenComputeNodeGroupZone(res["zone"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } + + return nil +} + +func resourceComputeNodeGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NodeGroup: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("node_template") { + obj := make(map[string]interface{}) + + nodeTemplateProp, err := expandComputeNodeGroupNodeTemplate(d.Get("node_template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("node_template"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeTemplateProp)) { + obj["nodeTemplate"] = nodeTemplateProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}/setNodeTemplate") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating NodeGroup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating NodeGroup %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating NodeGroup", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeNodeGroupRead(d, meta) +} + +func resourceComputeNodeGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NodeGroup: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting NodeGroup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NodeGroup") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting NodeGroup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting NodeGroup %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeNodeGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/nodeGroups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeNodeGroupCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeGroupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeGroupNodeTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeNodeGroupSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeNodeGroupMaintenancePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeGroupMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_time"] = + flattenComputeNodeGroupMaintenanceWindowStartTime(original["startTime"], d, config) + return []interface{}{transformed} +} +func flattenComputeNodeGroupMaintenanceWindowStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeGroupAutoscalingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mode"] = + flattenComputeNodeGroupAutoscalingPolicyMode(original["mode"], d, config) + transformed["min_nodes"] = + flattenComputeNodeGroupAutoscalingPolicyMinNodes(original["minNodes"], d, config) + transformed["max_nodes"] = + flattenComputeNodeGroupAutoscalingPolicyMaxNodes(original["maxNodes"], d, config) + return []interface{}{transformed} +} +func flattenComputeNodeGroupAutoscalingPolicyMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeGroupAutoscalingPolicyMinNodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeNodeGroupAutoscalingPolicyMaxNodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeNodeGroupShareSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["share_type"] = + flattenComputeNodeGroupShareSettingsShareType(original["shareType"], d, config) + transformed["project_map"] = + flattenComputeNodeGroupShareSettingsProjectMap(original["projectMap"], d, config) + return []interface{}{transformed} +} +func flattenComputeNodeGroupShareSettingsShareType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeGroupShareSettingsProjectMap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "id": k, + "project_id": flattenComputeNodeGroupShareSettingsProjectMapProjectId(original["projectId"], d, config), + }) + } + return transformed +} +func flattenComputeNodeGroupShareSettingsProjectMapProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeGroupZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeNodeGroupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupNodeTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("nodeTemplates", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for node_template: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeNodeGroupSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupMaintenancePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStartTime, err := expandComputeNodeGroupMaintenanceWindowStartTime(original["start_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTime"] = transformedStartTime + } + + return transformed, nil +} + +func expandComputeNodeGroupMaintenanceWindowStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupAutoscalingPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMode, err := expandComputeNodeGroupAutoscalingPolicyMode(original["mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mode"] = transformedMode + } + + transformedMinNodes, err := expandComputeNodeGroupAutoscalingPolicyMinNodes(original["min_nodes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinNodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minNodes"] = transformedMinNodes + } + + transformedMaxNodes, err := expandComputeNodeGroupAutoscalingPolicyMaxNodes(original["max_nodes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxNodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxNodes"] = transformedMaxNodes + } + + return transformed, nil +} + +func expandComputeNodeGroupAutoscalingPolicyMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupAutoscalingPolicyMinNodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupAutoscalingPolicyMaxNodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupShareSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedShareType, err := expandComputeNodeGroupShareSettingsShareType(original["share_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedShareType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["shareType"] = transformedShareType + } + + transformedProjectMap, err := expandComputeNodeGroupShareSettingsProjectMap(original["project_map"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectMap); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectMap"] = transformedProjectMap + } + + return transformed, nil +} + +func expandComputeNodeGroupShareSettingsShareType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupShareSettingsProjectMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectId, err := expandComputeNodeGroupShareSettingsProjectMapProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedId, err := tpgresource.ExpandString(original["id"], d, config) + if err != nil { + return nil, err + } + m[transformedId] = transformed + } + return m, nil +} + +func expandComputeNodeGroupShareSettingsProjectMapProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeGroupZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("zones", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_group_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_group_sweeper.go new file mode 100644 index 0000000000..e42ba405c5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_group_sweeper.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeNodeGroup", testSweepComputeNodeGroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeNodeGroup(region string) error { + resourceName := "ComputeNodeGroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/aggregated/nodeGroups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + var rl []interface{} + zones := resourceList.(map[string]interface{}) + // Loop through every zone in the list response + for _, zonesValue := range zones { + zone := zonesValue.(map[string]interface{}) + for k, v := range zone { + // Zone map either has resources or a warning stating there were no resources found in the zone + if k != "warning" { + resourcesInZone := v.([]interface{}) + rl = append(rl, resourcesInZone...) + } + } + } + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}" + if obj["zone"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource zone was nil", resourceName) + return nil + } + zone := tpgresource.GetResourceNameFromSelfLink(obj["zone"].(string)) + deleteTemplate = strings.Replace(deleteTemplate, "{{zone}}", zone, -1) + + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template.go new file mode 100644 index 0000000000..6a7dd66802 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template.go @@ -0,0 +1,616 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeNodeTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNodeTemplateCreate, + Read: resourceComputeNodeTemplateRead, + Delete: resourceComputeNodeTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNodeTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cpu_overcommit_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ENABLED", "NONE", ""}), + Description: `CPU overcommit. Default value: "NONE" Possible values: ["ENABLED", "NONE"]`, + Default: "NONE", + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional textual description of the resource.`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the resource.`, + }, + "node_affinity_labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Labels to use for node affinity, which will be used in +instance scheduling.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "node_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Node type to use for nodes group that are created from this template. +Only one of nodeTypeFlexibility and nodeType can be specified.`, + ConflictsWith: []string{"node_type_flexibility"}, + }, + "node_type_flexibility": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Flexible properties for the desired node type. Node groups that +use this node template will create nodes of a type that matches +these properties. Only one of nodeTypeFlexibility and nodeType can +be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpus": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Number of virtual CPUs to use.`, + AtLeastOneOf: []string{"node_type_flexibility.0.cpus", "node_type_flexibility.0.memory"}, + }, + "memory": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Physical memory available to the node, defined in MB.`, + AtLeastOneOf: []string{"node_type_flexibility.0.cpus", "node_type_flexibility.0.memory"}, + }, + "local_ssd": { + Type: schema.TypeString, + Computed: true, + Description: `Use local SSD`, + }, + }, + }, + ConflictsWith: []string{"node_type"}, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Region where nodes using the node template will be created. +If it is not provided, the provider region is used.`, + }, + "server_binding": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The server binding policy for nodes using this template. Determines +where the nodes should restart following a maintenance event.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"RESTART_NODE_ON_ANY_SERVER", "RESTART_NODE_ON_MINIMAL_SERVERS"}), + Description: `Type of server binding policy. If 'RESTART_NODE_ON_ANY_SERVER', +nodes using this template will restart on any physical server +following a maintenance event. + +If 'RESTART_NODE_ON_MINIMAL_SERVER', nodes using this template +will restart on the same physical server following a maintenance +event, instead of being live migrated to or restarted on a new +physical server. This option may be useful if you are using +software licenses tied to the underlying server characteristics +such as physical sockets or cores, to avoid the need for +additional licenses when maintenance occurs. However, VMs on such +nodes will experience outages while maintenance is applied. Possible values: ["RESTART_NODE_ON_ANY_SERVER", "RESTART_NODE_ON_MINIMAL_SERVERS"]`, + }, + }, + }, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNodeTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeNodeTemplateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeNodeTemplateName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + nodeAffinityLabelsProp, err := expandComputeNodeTemplateNodeAffinityLabels(d.Get("node_affinity_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("node_affinity_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeAffinityLabelsProp)) && (ok || !reflect.DeepEqual(v, nodeAffinityLabelsProp)) { + obj["nodeAffinityLabels"] = nodeAffinityLabelsProp + } + nodeTypeProp, err := expandComputeNodeTemplateNodeType(d.Get("node_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("node_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeTypeProp)) && (ok || !reflect.DeepEqual(v, nodeTypeProp)) { + obj["nodeType"] = nodeTypeProp + } + nodeTypeFlexibilityProp, err := expandComputeNodeTemplateNodeTypeFlexibility(d.Get("node_type_flexibility"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("node_type_flexibility"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeTypeFlexibilityProp)) && (ok || !reflect.DeepEqual(v, nodeTypeFlexibilityProp)) { + obj["nodeTypeFlexibility"] = nodeTypeFlexibilityProp + } + serverBindingProp, err := expandComputeNodeTemplateServerBinding(d.Get("server_binding"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_binding"); !tpgresource.IsEmptyValue(reflect.ValueOf(serverBindingProp)) && (ok || !reflect.DeepEqual(v, serverBindingProp)) { + obj["serverBinding"] = serverBindingProp + } + cpuOvercommitTypeProp, err := expandComputeNodeTemplateCpuOvercommitType(d.Get("cpu_overcommit_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cpu_overcommit_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(cpuOvercommitTypeProp)) && (ok || !reflect.DeepEqual(v, cpuOvercommitTypeProp)) { + obj["cpuOvercommitType"] = cpuOvercommitTypeProp + } + regionProp, err := expandComputeNodeTemplateRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NodeTemplate: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NodeTemplate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NodeTemplate: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating NodeTemplate", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create NodeTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodeTemplate %q: %#v", d.Id(), res) + + return resourceComputeNodeTemplateRead(d, meta) +} + +func resourceComputeNodeTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NodeTemplate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNodeTemplate %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeNodeTemplateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("description", flattenComputeNodeTemplateDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("name", flattenComputeNodeTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("node_affinity_labels", flattenComputeNodeTemplateNodeAffinityLabels(res["nodeAffinityLabels"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("node_type", flattenComputeNodeTemplateNodeType(res["nodeType"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("node_type_flexibility", flattenComputeNodeTemplateNodeTypeFlexibility(res["nodeTypeFlexibility"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("server_binding", flattenComputeNodeTemplateServerBinding(res["serverBinding"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("cpu_overcommit_type", flattenComputeNodeTemplateCpuOvercommitType(res["cpuOvercommitType"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("region", flattenComputeNodeTemplateRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading NodeTemplate: %s", err) + } + + return nil +} + +func resourceComputeNodeTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NodeTemplate: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting NodeTemplate %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NodeTemplate") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting NodeTemplate", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting NodeTemplate %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeNodeTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/nodeTemplates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeNodeTemplateCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateNodeAffinityLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateNodeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateNodeTypeFlexibility(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cpus"] = + flattenComputeNodeTemplateNodeTypeFlexibilityCpus(original["cpus"], d, config) + transformed["memory"] = + flattenComputeNodeTemplateNodeTypeFlexibilityMemory(original["memory"], d, config) + transformed["local_ssd"] = + flattenComputeNodeTemplateNodeTypeFlexibilityLocalSsd(original["localSsd"], d, config) + return []interface{}{transformed} +} +func flattenComputeNodeTemplateNodeTypeFlexibilityCpus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateNodeTypeFlexibilityMemory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateNodeTypeFlexibilityLocalSsd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateServerBinding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenComputeNodeTemplateServerBindingType(original["type"], d, config) + return []interface{}{transformed} +} +func flattenComputeNodeTemplateServerBindingType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateCpuOvercommitType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNodeTemplateRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeNodeTemplateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateNodeAffinityLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeNodeTemplateNodeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateNodeTypeFlexibility(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCpus, err := expandComputeNodeTemplateNodeTypeFlexibilityCpus(original["cpus"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpus); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cpus"] = transformedCpus + } + + transformedMemory, err := expandComputeNodeTemplateNodeTypeFlexibilityMemory(original["memory"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMemory); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["memory"] = transformedMemory + } + + transformedLocalSsd, err := expandComputeNodeTemplateNodeTypeFlexibilityLocalSsd(original["local_ssd"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalSsd); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localSsd"] = transformedLocalSsd + } + + return transformed, nil +} + +func expandComputeNodeTemplateNodeTypeFlexibilityCpus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateNodeTypeFlexibilityMemory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateNodeTypeFlexibilityLocalSsd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateServerBinding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandComputeNodeTemplateServerBindingType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + return transformed, nil +} + +func expandComputeNodeTemplateServerBindingType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateCpuOvercommitType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNodeTemplateRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template_sweeper.go new file mode 100644 index 0000000000..b11a51743d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_node_template_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeNodeTemplate", testSweepComputeNodeTemplate) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeNodeTemplate(region string) error { + resourceName := "ComputeNodeTemplate" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/nodeTemplates", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_packet_mirroring.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_packet_mirroring.go new file mode 100644 index 0000000000..1dd0a2e670 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_packet_mirroring.go @@ -0,0 +1,940 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputePacketMirroring() *schema.Resource { + return &schema.Resource{ + Create: resourceComputePacketMirroringCreate, + Read: resourceComputePacketMirroringRead, + Update: resourceComputePacketMirroringUpdate, + Delete: resourceComputePacketMirroringDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputePacketMirroringImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "collector_ilb": { + Type: schema.TypeList, + Required: true, + Description: `The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL) +that will be used as collector for mirrored traffic. The +specified forwarding rule must have is_mirroring_collector +set to true.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the forwarding rule.`, + }, + }, + }, + }, + "mirrored_resources": { + Type: schema.TypeList, + Required: true, + Description: `A means of specifying which resources to mirror.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instances": { + Type: schema.TypeList, + Optional: true, + Description: `All the listed instances will be mirrored. Specify at most 50.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the instances where this rule should be active.`, + }, + }, + }, + AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, + }, + "subnetworks": { + Type: schema.TypeList, + Optional: true, + Description: `All instances in one of these subnetworks will be mirrored.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the subnetwork where this rule should be active.`, + }, + }, + }, + AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Description: `All instances with these tags will be mirrored.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateGCEName, + Description: `The name of the packet mirroring rule`, + }, + "network": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Specifies the mirrored VPC network. Only packets in this network +will be mirrored. All mirrored VMs should have a NIC in the given +network. All mirrored subnetworks should belong to the given network.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The full self_link URL of the network where this rule is active.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A human-readable description of the rule.`, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + Description: `A filter for mirrored traffic. If unset, all traffic is mirrored.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `IP CIDR ranges that apply as a filter on the source (ingress) or +destination (egress) IP in the IP header. Only IPv4 is supported.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "direction": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"INGRESS", "EGRESS", "BOTH", ""}), + Description: `Direction of traffic to mirror. Default value: "BOTH" Possible values: ["INGRESS", "EGRESS", "BOTH"]`, + Default: "BOTH", + }, + "ip_protocols": { + Type: schema.TypeList, + Optional: true, + Description: `Possible IP protocols including tcp, udp, icmp and esp`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "priority": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Since only one rule can be active at a time, priority is +used to break ties in the case of two rules that apply to +the same instances.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The Region in which the created address should reside. +If it is not provided, the provider region is used.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputePacketMirroringCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputePacketMirroringName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputePacketMirroringDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + regionProp, err := expandComputePacketMirroringRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + networkProp, err := expandComputePacketMirroringNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + priorityProp, err := expandComputePacketMirroringPriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + collectorIlbProp, err := expandComputePacketMirroringCollectorIlb(d.Get("collector_ilb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("collector_ilb"); !tpgresource.IsEmptyValue(reflect.ValueOf(collectorIlbProp)) && (ok || !reflect.DeepEqual(v, collectorIlbProp)) { + obj["collectorIlb"] = collectorIlbProp + } + filterProp, err := expandComputePacketMirroringFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + mirroredResourcesProp, err := expandComputePacketMirroringMirroredResources(d.Get("mirrored_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mirrored_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(mirroredResourcesProp)) && (ok || !reflect.DeepEqual(v, mirroredResourcesProp)) { + obj["mirroredResources"] = mirroredResourcesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new PacketMirroring: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PacketMirroring: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating PacketMirroring: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating PacketMirroring", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create PacketMirroring: %s", err) + } + + log.Printf("[DEBUG] Finished creating PacketMirroring %q: %#v", d.Id(), res) + + return resourceComputePacketMirroringRead(d, meta) +} + +func resourceComputePacketMirroringRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PacketMirroring: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputePacketMirroring %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + + if err := d.Set("name", flattenComputePacketMirroringName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + if err := d.Set("description", flattenComputePacketMirroringDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + if err := d.Set("region", flattenComputePacketMirroringRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + if err := d.Set("network", flattenComputePacketMirroringNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + if err := d.Set("priority", flattenComputePacketMirroringPriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + if err := d.Set("collector_ilb", flattenComputePacketMirroringCollectorIlb(res["collectorIlb"], d, config)); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + if err := d.Set("filter", flattenComputePacketMirroringFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + if err := d.Set("mirrored_resources", flattenComputePacketMirroringMirroredResources(res["mirroredResources"], d, config)); err != nil { + return fmt.Errorf("Error reading PacketMirroring: %s", err) + } + + return nil +} + +func resourceComputePacketMirroringUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PacketMirroring: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandComputePacketMirroringName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + regionProp, err := expandComputePacketMirroringRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + priorityProp, err := expandComputePacketMirroringPriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + collectorIlbProp, err := expandComputePacketMirroringCollectorIlb(d.Get("collector_ilb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("collector_ilb"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, collectorIlbProp)) { + obj["collectorIlb"] = collectorIlbProp + } + filterProp, err := expandComputePacketMirroringFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + mirroredResourcesProp, err := expandComputePacketMirroringMirroredResources(d.Get("mirrored_resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mirrored_resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mirroredResourcesProp)) { + obj["mirroredResources"] = mirroredResourcesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating PacketMirroring %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating PacketMirroring %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating PacketMirroring %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating PacketMirroring", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputePacketMirroringRead(d, meta) +} + +func resourceComputePacketMirroringDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PacketMirroring: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting PacketMirroring %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "PacketMirroring") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting PacketMirroring", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting PacketMirroring %q: %#v", d.Id(), res) + return nil +} + +func resourceComputePacketMirroringImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/packetMirrorings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputePacketMirroringName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePacketMirroringDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePacketMirroringRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenComputePacketMirroringNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["url"] = + flattenComputePacketMirroringNetworkUrl(original["url"], d, config) + return []interface{}{transformed} +} +func flattenComputePacketMirroringNetworkUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputePacketMirroringPriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputePacketMirroringCollectorIlb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["url"] = + flattenComputePacketMirroringCollectorIlbUrl(original["url"], d, config) + return []interface{}{transformed} +} +func flattenComputePacketMirroringCollectorIlbUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputePacketMirroringFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ip_protocols"] = + flattenComputePacketMirroringFilterIpProtocols(original["IPProtocols"], d, config) + transformed["cidr_ranges"] = + flattenComputePacketMirroringFilterCidrRanges(original["cidrRanges"], d, config) + transformed["direction"] = + flattenComputePacketMirroringFilterDirection(original["direction"], d, config) + return []interface{}{transformed} +} +func flattenComputePacketMirroringFilterIpProtocols(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePacketMirroringFilterCidrRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePacketMirroringFilterDirection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePacketMirroringMirroredResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["subnetworks"] = + flattenComputePacketMirroringMirroredResourcesSubnetworks(original["subnetworks"], d, config) + transformed["instances"] = + flattenComputePacketMirroringMirroredResourcesInstances(original["instances"], d, config) + transformed["tags"] = + flattenComputePacketMirroringMirroredResourcesTags(original["tags"], d, config) + return []interface{}{transformed} +} +func flattenComputePacketMirroringMirroredResourcesSubnetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "url": flattenComputePacketMirroringMirroredResourcesSubnetworksUrl(original["url"], d, config), + }) + } + return transformed +} +func flattenComputePacketMirroringMirroredResourcesSubnetworksUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputePacketMirroringMirroredResourcesInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "url": flattenComputePacketMirroringMirroredResourcesInstancesUrl(original["url"], d, config), + }) + } + return transformed +} +func flattenComputePacketMirroringMirroredResourcesInstancesUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputePacketMirroringMirroredResourcesTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputePacketMirroringName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePacketMirroringDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePacketMirroringRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePacketMirroringNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrl, err := expandComputePacketMirroringNetworkUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + return transformed, nil +} + +func expandComputePacketMirroringNetworkUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for url: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputePacketMirroringPriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePacketMirroringCollectorIlb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrl, err := expandComputePacketMirroringCollectorIlbUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + return transformed, nil +} + +func expandComputePacketMirroringCollectorIlbUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("forwardingRules", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for url: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputePacketMirroringFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIpProtocols, err := expandComputePacketMirroringFilterIpProtocols(original["ip_protocols"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpProtocols); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["IPProtocols"] = transformedIpProtocols + } + + transformedCidrRanges, err := expandComputePacketMirroringFilterCidrRanges(original["cidr_ranges"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCidrRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cidrRanges"] = transformedCidrRanges + } + + transformedDirection, err := expandComputePacketMirroringFilterDirection(original["direction"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDirection); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["direction"] = transformedDirection + } + + return transformed, nil +} + +func expandComputePacketMirroringFilterIpProtocols(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePacketMirroringFilterCidrRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePacketMirroringFilterDirection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePacketMirroringMirroredResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSubnetworks, err := expandComputePacketMirroringMirroredResourcesSubnetworks(original["subnetworks"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubnetworks); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subnetworks"] = transformedSubnetworks + } + + transformedInstances, err := expandComputePacketMirroringMirroredResourcesInstances(original["instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instances"] = transformedInstances + } + + transformedTags, err := expandComputePacketMirroringMirroredResourcesTags(original["tags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tags"] = transformedTags + } + + return transformed, nil +} + +func expandComputePacketMirroringMirroredResourcesSubnetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrl, err := expandComputePacketMirroringMirroredResourcesSubnetworksUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputePacketMirroringMirroredResourcesSubnetworksUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for url: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputePacketMirroringMirroredResourcesInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrl, err := expandComputePacketMirroringMirroredResourcesInstancesUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputePacketMirroringMirroredResourcesInstancesUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseZonalFieldValue("instances", v.(string), "project", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for url: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputePacketMirroringMirroredResourcesTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_packet_mirroring_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_packet_mirroring_sweeper.go new file mode 100644 index 0000000000..2a7490a856 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_packet_mirroring_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputePacketMirroring", testSweepComputePacketMirroring) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputePacketMirroring(region string) error { + resourceName := "ComputePacketMirroring" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/packetMirrorings", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["packetMirrorings"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_per_instance_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_per_instance_config.go new file mode 100644 index 0000000000..14f4ca9e85 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_per_instance_config.go @@ -0,0 +1,778 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputePerInstanceConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceComputePerInstanceConfigCreate, + Read: resourceComputePerInstanceConfigRead, + Update: resourceComputePerInstanceConfigUpdate, + Delete: resourceComputePerInstanceConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputePerInstanceConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance_group_manager": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The instance group manager this instance config is part of.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name for this per-instance config and its corresponding instance.`, + }, + "preserved_state": { + Type: schema.TypeList, + Optional: true, + Description: `The preserved state for this instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeSet, + Optional: true, + Description: `Stateful disks for the instance.`, + Elem: computePerInstanceConfigPreservedStateDiskSchema(), + // Default schema.HashSchema is used. + }, + "metadata": { + Type: schema.TypeMap, + Optional: true, + Description: `Preserved metadata defined for this instance. This is a list of key->value pairs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Zone where the containing instance group manager is located`, + }, + "minimal_action": { + Type: schema.TypeString, + Optional: true, + Default: "NONE", + Description: `The minimal action to perform on the instance during an update. +Default is 'NONE'. Possible values are: +* REPLACE +* RESTART +* REFRESH +* NONE`, + }, + "most_disruptive_allowed_action": { + Type: schema.TypeString, + Optional: true, + Default: "REPLACE", + Description: `The most disruptive action to perform on the instance during an update. +Default is 'REPLACE'. Possible values are: +* REPLACE +* RESTART +* REFRESH +* NONE`, + }, + "remove_instance_state_on_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `When true, deleting this config will immediately remove any specified state from the underlying instance. +When false, deleting this config will *not* immediately remove any state from the underlying instance. +State will be removed on the next instance recreation or update.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func computePerInstanceConfigPreservedStateDiskSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance.`, + }, + "source": { + Type: schema.TypeString, + Required: true, + Description: `The URI of an existing persistent disk to attach under the specified device-name in the format +'projects/project-id/zones/zone/disks/disk-name'.`, + }, + "delete_rule": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION", ""}), + Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. +The available options are 'NEVER' and 'ON_PERMANENT_INSTANCE_DELETION'. +'NEVER' - detach the disk when the VM is deleted, but do not delete the disk. +'ON_PERMANENT_INSTANCE_DELETION' will delete the stateful disk when the VM is permanently +deleted from the instance group. Default value: "NEVER" Possible values: ["NEVER", "ON_PERMANENT_INSTANCE_DELETION"]`, + Default: "NEVER", + }, + "mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"READ_ONLY", "READ_WRITE", ""}), + Description: `The mode of the disk. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"]`, + Default: "READ_WRITE", + }, + }, + } +} + +func resourceComputePerInstanceConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + preservedStateProp, err := expandNestedComputePerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("preserved_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(preservedStateProp)) && (ok || !reflect.DeepEqual(v, preservedStateProp)) { + obj["preservedState"] = preservedStateProp + } + + obj, err = resourceComputePerInstanceConfigEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/createInstances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new PerInstanceConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating PerInstanceConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{instance_group_manager}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create PerInstanceConfig: %s", err) + } + + log.Printf("[DEBUG] Finished creating PerInstanceConfig %q: %#v", d.Id(), res) + + return resourceComputePerInstanceConfigRead(d, meta) +} + +func resourceComputePerInstanceConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/listPerInstanceConfigs") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputePerInstanceConfig %q", d.Id())) + } + + res, err = flattenNestedComputePerInstanceConfig(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputePerInstanceConfig because it couldn't be matched.") + d.SetId("") + return nil + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("minimal_action"); !ok { + if err := d.Set("minimal_action", "NONE"); err != nil { + return fmt.Errorf("Error setting minimal_action: %s", err) + } + } + if _, ok := d.GetOkExists("most_disruptive_allowed_action"); !ok { + if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { + return fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) + } + } + if _, ok := d.GetOkExists("remove_instance_state_on_destroy"); !ok { + if err := d.Set("remove_instance_state_on_destroy", false); err != nil { + return fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading PerInstanceConfig: %s", err) + } + + if err := d.Set("name", flattenNestedComputePerInstanceConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading PerInstanceConfig: %s", err) + } + if err := d.Set("preserved_state", flattenNestedComputePerInstanceConfigPreservedState(res["preservedState"], d, config)); err != nil { + return fmt.Errorf("Error reading PerInstanceConfig: %s", err) + } + + return nil +} + +func resourceComputePerInstanceConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + preservedStateProp, err := expandNestedComputePerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("preserved_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, preservedStateProp)) { + obj["preservedState"] = preservedStateProp + } + + obj, err = resourceComputePerInstanceConfigUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/updatePerInstanceConfigs") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating PerInstanceConfig %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating PerInstanceConfig %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + // Instance name in applyUpdatesToInstances request must include zone + instanceName, err := tpgresource.ReplaceVars(d, config, "zones/{{zone}}/instances/{{name}}") + if err != nil { + return err + } + + obj = make(map[string]interface{}) + obj["instances"] = []string{instanceName} + + minAction := d.Get("minimal_action") + if minAction == "" { + minAction = "NONE" + } + obj["minimalAction"] = minAction + + mostDisruptiveAction := d.Get("most_disruptive_allowed_action") + if tpgresource.IsEmptyValue(reflect.ValueOf(mostDisruptiveAction)) { + mostDisruptiveAction = "REPLACE" + } + obj["mostDisruptiveAllowedAction"] = mostDisruptiveAction + + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/applyUpdatesToInstances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) + } + + err = ComputeOperationWaitTime( + config, res, project, "Applying update to PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + return resourceComputePerInstanceConfigRead(d, meta) +} + +func resourceComputePerInstanceConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/deletePerInstanceConfigs") + if err != nil { + return err + } + + var obj map[string]interface{} + obj = map[string]interface{}{ + "names": [1]string{d.Get("name").(string)}, + } + log.Printf("[DEBUG] Deleting PerInstanceConfig %q", d.Id()) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "PerInstanceConfig") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + // Potentially delete the state managed by this config + if d.Get("remove_instance_state_on_destroy").(bool) { + // Instance name in applyUpdatesToInstances request must include zone + instanceName, err := tpgresource.ReplaceVars(d, config, "zones/{{zone}}/instances/{{name}}") + if err != nil { + return err + } + + obj = make(map[string]interface{}) + obj["instances"] = []string{instanceName} + + // The deletion must be applied to the instance after the PerInstanceConfig is deleted + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/applyUpdatesToInstances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) + } + + err = ComputeOperationWaitTime( + config, res, project, "Applying update to PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) + } + + // PerInstanceConfig goes into "DELETING" state while the instance is actually deleted + err = transport_tpg.PollingWaitTime(resourceComputePerInstanceConfigPollRead(d, meta), PollCheckInstanceConfigDeleted, "Deleting PerInstanceConfig", d.Timeout(schema.TimeoutDelete), 1) + if err != nil { + return fmt.Errorf("Error waiting for delete on PerInstanceConfig %q: %s", d.Id(), err) + } + } + + log.Printf("[DEBUG] Finished deleting PerInstanceConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceComputePerInstanceConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{zone}}/{{instance_group_manager}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("minimal_action", "NONE"); err != nil { + return nil, fmt.Errorf("Error setting minimal_action: %s", err) + } + if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { + return nil, fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) + } + if err := d.Set("remove_instance_state_on_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputePerInstanceConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputePerInstanceConfigPreservedState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["metadata"] = + flattenNestedComputePerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) + transformed["disk"] = + flattenNestedComputePerInstanceConfigPreservedStateDisk(original["disks"], d, config) + return []interface{}{transformed} +} +func flattenNestedComputePerInstanceConfigPreservedStateMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputePerInstanceConfigPreservedStateDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + disks := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(disks)) + for devName, deleteRuleRaw := range disks { + diskObj := deleteRuleRaw.(map[string]interface{}) + source, err := tpgresource.GetRelativePath(diskObj["source"].(string)) + if err != nil { + source = diskObj["source"].(string) + } + transformed = append(transformed, map[string]interface{}{ + "device_name": devName, + "delete_rule": diskObj["autoDelete"], + "source": source, + "mode": diskObj["mode"], + }) + } + return transformed +} + +func expandNestedComputePerInstanceConfigName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputePerInstanceConfigPreservedState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMetadata, err := expandNestedComputePerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metadata"] = transformedMetadata + } + + transformedDisk, err := expandNestedComputePerInstanceConfigPreservedStateDisk(original["disk"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisk); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["disks"] = transformedDisk + } + + return transformed, nil +} + +func expandNestedComputePerInstanceConfigPreservedStateMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandNestedComputePerInstanceConfigPreservedStateDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + l := v.(*schema.Set).List() + req := make(map[string]interface{}) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + deviceName := original["device_name"].(string) + diskObj := make(map[string]interface{}) + deleteRule := original["delete_rule"].(string) + if deleteRule != "" { + diskObj["autoDelete"] = deleteRule + } + source := original["source"] + if source != "" { + diskObj["source"] = source + } + mode := original["mode"] + if source != "" { + diskObj["mode"] = mode + } + req[deviceName] = diskObj + } + return req, nil +} + +func resourceComputePerInstanceConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + wrappedReq := map[string]interface{}{ + "instances": []interface{}{obj}, + } + return wrappedReq, nil +} + +func resourceComputePerInstanceConfigUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // updates and creates use different wrapping object names + wrappedReq := map[string]interface{}{ + "perInstanceConfigs": []interface{}{obj}, + } + return wrappedReq, nil +} + +func flattenNestedComputePerInstanceConfig(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["items"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value items. Actual value: %v", v) + } + + _, item, err := resourceComputePerInstanceConfigFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputePerInstanceConfigFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputePerInstanceConfigName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemName := flattenNestedComputePerInstanceConfigName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_default_network_tier.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_default_network_tier.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_default_network_tier.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_default_network_tier.go index 1dd52f8aee..f59bd43d1c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_default_network_tier.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_default_network_tier.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -49,13 +54,13 @@ func ResourceComputeProjectDefaultNetworkTier() *schema.Resource { } func resourceComputeProjectDefaultNetworkTierCreateOrUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - projectID, err := getProject(d, config) + projectID, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -80,8 +85,8 @@ func resourceComputeProjectDefaultNetworkTierCreateOrUpdate(d *schema.ResourceDa } func resourceComputeProjectDefaultNetworkTierRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -90,7 +95,7 @@ func resourceComputeProjectDefaultNetworkTierRead(d *schema.ResourceData, meta i project, err := config.NewComputeClient(userAgent).Projects.Get(projectId).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project data for project %q", projectId)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project data for project %q", projectId)) } err = d.Set("network_tier", project.DefaultNetworkTier) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_metadata.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_metadata.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata.go index 71d5c93698..577d5dee93 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_metadata.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/compute/v1" @@ -48,13 +53,13 @@ func ResourceComputeProjectMetadata() *schema.Resource { } func resourceComputeProjectMetadataCreateOrUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - projectID, err := getProject(d, config) + projectID, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -74,16 +79,16 @@ func resourceComputeProjectMetadataCreateOrUpdate(d *schema.ResourceData, meta i } func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } // At import time, we have no state to draw from. We'll wrongly pull the - // provider default project if we use a normal getProject, so we need to + // provider default project if we use a normal GetProject, so we need to // rely on the `id` field being set to the project. - // At any other time we can use getProject, as state will have the correct + // At any other time we can use GetProject, as state will have the correct // value; the project pulled from config / the provider / at import time. // // Note that if a user imports a project other than their provider project @@ -93,10 +98,10 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} project, err := config.NewComputeClient(userAgent).Projects.Get(projectId).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectId)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectId)) } - err = d.Set("metadata", flattenMetadata(project.CommonInstanceMetadata)) + err = d.Set("metadata", FlattenMetadata(project.CommonInstanceMetadata)) if err != nil { return fmt.Errorf("Error setting metadata: %s", err) } @@ -109,13 +114,13 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} } func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - projectID, err := getProject(d, config) + projectID, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -129,7 +134,7 @@ func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface return resourceComputeProjectMetadataRead(d, meta) } -func resourceComputeProjectMetadataSet(projectID, userAgent string, config *Config, md *compute.Metadata, timeout time.Duration) error { +func resourceComputeProjectMetadataSet(projectID, userAgent string, config *transport_tpg.Config, md *compute.Metadata, timeout time.Duration) error { createMD := func() error { log.Printf("[DEBUG] Loading project service: %s", projectID) project, err := config.NewComputeClient(userAgent).Projects.Get(projectID).Do() @@ -147,6 +152,6 @@ func resourceComputeProjectMetadataSet(projectID, userAgent string, config *Conf return ComputeOperationWaitTime(config, op, project.Name, "SetCommonMetadata", userAgent, timeout) } - err := MetadataRetryWrapper(createMD) + err := transport_tpg.MetadataRetryWrapper(createMD) return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_metadata_item.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata_item.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_metadata_item.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata_item.go index a48d2306a4..04ce3253c1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_project_metadata_item.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_project_metadata_item.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/compute/v1" @@ -58,13 +63,13 @@ func ResourceComputeProjectMetadataItem() *schema.Resource { } func resourceComputeProjectMetadataItemCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - projectID, err := getProject(d, config) + projectID, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -83,13 +88,13 @@ func resourceComputeProjectMetadataItemCreate(d *schema.ResourceData, meta inter } func resourceComputeProjectMetadataItemRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - projectID, err := getProject(d, config) + projectID, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -100,7 +105,7 @@ func resourceComputeProjectMetadataItemRead(d *schema.ResourceData, meta interfa return fmt.Errorf("Error loading project '%s': %s", projectID, err) } - md := flattenMetadata(project.CommonInstanceMetadata) + md := FlattenMetadata(project.CommonInstanceMetadata) val, ok := md[d.Id()] if !ok { // Resource no longer exists @@ -122,13 +127,13 @@ func resourceComputeProjectMetadataItemRead(d *schema.ResourceData, meta interfa } func resourceComputeProjectMetadataItemUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - projectID, err := getProject(d, config) + projectID, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -147,13 +152,13 @@ func resourceComputeProjectMetadataItemUpdate(d *schema.ResourceData, meta inter } func resourceComputeProjectMetadataItemDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - projectID, err := getProject(d, config) + projectID, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -169,11 +174,11 @@ func resourceComputeProjectMetadataItemDelete(d *schema.ResourceData, meta inter return nil } -func updateComputeCommonInstanceMetadata(config *Config, projectID, key, userAgent string, afterVal *string, timeout time.Duration, failIfPresent metadataPresentBehavior) error { +func updateComputeCommonInstanceMetadata(config *transport_tpg.Config, projectID, key, userAgent string, afterVal *string, timeout time.Duration, failIfPresent metadataPresentBehavior) error { updateMD := func() error { lockName := fmt.Sprintf("projects/%s/commoninstancemetadata", projectID) - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) log.Printf("[DEBUG] Loading project metadata: %s", projectID) project, err := config.NewComputeClient(userAgent).Projects.Get(projectID).Do() @@ -181,7 +186,7 @@ func updateComputeCommonInstanceMetadata(config *Config, projectID, key, userAge return fmt.Errorf("Error loading project '%s': %s", projectID, err) } - md := flattenMetadata(project.CommonInstanceMetadata) + md := FlattenMetadata(project.CommonInstanceMetadata) val, ok := md[key] @@ -224,5 +229,5 @@ func updateComputeCommonInstanceMetadata(config *Config, projectID, key, userAge return ComputeOperationWaitTime(config, op, project.Name, "SetCommonInstanceMetadata", userAgent, timeout) } - return MetadataRetryWrapper(updateMD) + return transport_tpg.MetadataRetryWrapper(updateMD) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix.go new file mode 100644 index 0000000000..6b7b53fe75 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix.go @@ -0,0 +1,341 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputePublicAdvertisedPrefix() *schema.Resource { + return &schema.Resource{ + Create: resourceComputePublicAdvertisedPrefixCreate, + Read: resourceComputePublicAdvertisedPrefixRead, + Delete: resourceComputePublicAdvertisedPrefixDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputePublicAdvertisedPrefixImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dns_verification_ip": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The IPv4 address to be used for reverse DNS verification.`, + }, + "ip_cidr_range": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The IPv4 address range, in CIDR format, represented by this public advertised prefix.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. The name must be 1-63 characters long, and +comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' +which means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputePublicAdvertisedPrefixCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputePublicAdvertisedPrefixDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputePublicAdvertisedPrefixName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + dnsVerificationIpProp, err := expandComputePublicAdvertisedPrefixDnsVerificationIp(d.Get("dns_verification_ip"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dns_verification_ip"); !tpgresource.IsEmptyValue(reflect.ValueOf(dnsVerificationIpProp)) && (ok || !reflect.DeepEqual(v, dnsVerificationIpProp)) { + obj["dnsVerificationIp"] = dnsVerificationIpProp + } + ipCidrRangeProp, err := expandComputePublicAdvertisedPrefixIpCidrRange(d.Get("ip_cidr_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_cidr_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipCidrRangeProp)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { + obj["ipCidrRange"] = ipCidrRangeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/publicAdvertisedPrefixes") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new PublicAdvertisedPrefix: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PublicAdvertisedPrefix: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating PublicAdvertisedPrefix: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/publicAdvertisedPrefixes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating PublicAdvertisedPrefix", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create PublicAdvertisedPrefix: %s", err) + } + + log.Printf("[DEBUG] Finished creating PublicAdvertisedPrefix %q: %#v", d.Id(), res) + + return resourceComputePublicAdvertisedPrefixRead(d, meta) +} + +func resourceComputePublicAdvertisedPrefixRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/publicAdvertisedPrefixes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PublicAdvertisedPrefix: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputePublicAdvertisedPrefix %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) + } + + if err := d.Set("description", flattenComputePublicAdvertisedPrefixDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) + } + if err := d.Set("name", flattenComputePublicAdvertisedPrefixName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) + } + if err := d.Set("dns_verification_ip", flattenComputePublicAdvertisedPrefixDnsVerificationIp(res["dnsVerificationIp"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) + } + if err := d.Set("ip_cidr_range", flattenComputePublicAdvertisedPrefixIpCidrRange(res["ipCidrRange"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading PublicAdvertisedPrefix: %s", err) + } + + return nil +} + +func resourceComputePublicAdvertisedPrefixDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PublicAdvertisedPrefix: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/publicAdvertisedPrefixes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting PublicAdvertisedPrefix %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "PublicAdvertisedPrefix") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting PublicAdvertisedPrefix", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting PublicAdvertisedPrefix %q: %#v", d.Id(), res) + return nil +} + +func resourceComputePublicAdvertisedPrefixImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/publicAdvertisedPrefixes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/publicAdvertisedPrefixes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputePublicAdvertisedPrefixDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePublicAdvertisedPrefixName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePublicAdvertisedPrefixDnsVerificationIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePublicAdvertisedPrefixIpCidrRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputePublicAdvertisedPrefixDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePublicAdvertisedPrefixName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePublicAdvertisedPrefixDnsVerificationIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePublicAdvertisedPrefixIpCidrRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix_sweeper.go new file mode 100644 index 0000000000..c268188bfc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_advertised_prefix_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputePublicAdvertisedPrefix", testSweepComputePublicAdvertisedPrefix) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputePublicAdvertisedPrefix(region string) error { + resourceName := "ComputePublicAdvertisedPrefix" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/publicAdvertisedPrefixes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["publicAdvertisedPrefixs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/publicAdvertisedPrefixes/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_delegated_prefix.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_delegated_prefix.go new file mode 100644 index 0000000000..0f217ff5d0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_delegated_prefix.go @@ -0,0 +1,372 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputePublicDelegatedPrefix() *schema.Resource { + return &schema.Resource{ + Create: resourceComputePublicDelegatedPrefixCreate, + Read: resourceComputePublicDelegatedPrefixRead, + Delete: resourceComputePublicDelegatedPrefixDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputePublicDelegatedPrefixImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "ip_cidr_range": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The IPv4 address range, in CIDR format, represented by this public advertised prefix.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. The name must be 1-63 characters long, and +comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' +which means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "parent_prefix": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of parent prefix. Either PublicAdvertisedPrefix or PublicDelegatedPrefix.`, + }, + "region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A region where the prefix will reside.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "is_live_migration": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, the prefix will be live migrated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputePublicDelegatedPrefixCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputePublicDelegatedPrefixDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + isLiveMigrationProp, err := expandComputePublicDelegatedPrefixIsLiveMigration(d.Get("is_live_migration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_live_migration"); !tpgresource.IsEmptyValue(reflect.ValueOf(isLiveMigrationProp)) && (ok || !reflect.DeepEqual(v, isLiveMigrationProp)) { + obj["isLiveMigration"] = isLiveMigrationProp + } + nameProp, err := expandComputePublicDelegatedPrefixName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + parentPrefixProp, err := expandComputePublicDelegatedPrefixParentPrefix(d.Get("parent_prefix"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent_prefix"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentPrefixProp)) && (ok || !reflect.DeepEqual(v, parentPrefixProp)) { + obj["parentPrefix"] = parentPrefixProp + } + ipCidrRangeProp, err := expandComputePublicDelegatedPrefixIpCidrRange(d.Get("ip_cidr_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_cidr_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipCidrRangeProp)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { + obj["ipCidrRange"] = ipCidrRangeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/publicDelegatedPrefixes") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new PublicDelegatedPrefix: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PublicDelegatedPrefix: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating PublicDelegatedPrefix: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/publicDelegatedPrefixes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating PublicDelegatedPrefix", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create PublicDelegatedPrefix: %s", err) + } + + log.Printf("[DEBUG] Finished creating PublicDelegatedPrefix %q: %#v", d.Id(), res) + + return resourceComputePublicDelegatedPrefixRead(d, meta) +} + +func resourceComputePublicDelegatedPrefixRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/publicDelegatedPrefixes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PublicDelegatedPrefix: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputePublicDelegatedPrefix %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading PublicDelegatedPrefix: %s", err) + } + + if err := d.Set("description", flattenComputePublicDelegatedPrefixDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicDelegatedPrefix: %s", err) + } + if err := d.Set("is_live_migration", flattenComputePublicDelegatedPrefixIsLiveMigration(res["isLiveMigration"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicDelegatedPrefix: %s", err) + } + if err := d.Set("name", flattenComputePublicDelegatedPrefixName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicDelegatedPrefix: %s", err) + } + if err := d.Set("parent_prefix", flattenComputePublicDelegatedPrefixParentPrefix(res["parentPrefix"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicDelegatedPrefix: %s", err) + } + if err := d.Set("ip_cidr_range", flattenComputePublicDelegatedPrefixIpCidrRange(res["ipCidrRange"], d, config)); err != nil { + return fmt.Errorf("Error reading PublicDelegatedPrefix: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading PublicDelegatedPrefix: %s", err) + } + + return nil +} + +func resourceComputePublicDelegatedPrefixDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PublicDelegatedPrefix: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/publicDelegatedPrefixes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting PublicDelegatedPrefix %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "PublicDelegatedPrefix") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting PublicDelegatedPrefix", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting PublicDelegatedPrefix %q: %#v", d.Id(), res) + return nil +} + +func resourceComputePublicDelegatedPrefixImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/publicDelegatedPrefixes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/publicDelegatedPrefixes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputePublicDelegatedPrefixDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePublicDelegatedPrefixIsLiveMigration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePublicDelegatedPrefixName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePublicDelegatedPrefixParentPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputePublicDelegatedPrefixIpCidrRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputePublicDelegatedPrefixDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePublicDelegatedPrefixIsLiveMigration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePublicDelegatedPrefixName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePublicDelegatedPrefixParentPrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputePublicDelegatedPrefixIpCidrRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_delegated_prefix_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_delegated_prefix_sweeper.go new file mode 100644 index 0000000000..3628e7183e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_public_delegated_prefix_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputePublicDelegatedPrefix", testSweepComputePublicDelegatedPrefix) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputePublicDelegatedPrefix(region string) error { + resourceName := "ComputePublicDelegatedPrefix" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/publicDelegatedPrefixes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["publicDelegatedPrefixs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/publicDelegatedPrefixes/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_autoscaler.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_autoscaler.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_autoscaler.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_autoscaler.go index 569937fd7b..4bbf0b1942 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_autoscaler.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_autoscaler.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "fmt" @@ -21,6 +24,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeRegionAutoscaler() *schema.Resource { @@ -171,7 +178,7 @@ of the instances.`, "type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE", ""}), Description: `Defines how target utilization value is expressed for a Stackdriver Monitoring metric. Possible values: ["GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE"]`, }, @@ -181,7 +188,7 @@ Stackdriver Monitoring metric. Possible values: ["GAUGE", "DELTA_PER_SECOND", "D "mode": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"OFF", "ONLY_UP", "ON", ""}), + ValidateFunc: verify.ValidateEnum([]string{"OFF", "ONLY_UP", "ON", ""}), Description: `Defines operating mode for this policy. Default value: "ON" Possible values: ["OFF", "ONLY_UP", "ON"]`, Default: "ON", }, @@ -280,7 +287,7 @@ to include directives regarding slower scale down, as described above.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCEName, + ValidateFunc: verify.ValidateGCEName, Description: `Name of the resource. The name must be 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following @@ -290,7 +297,7 @@ character, which cannot be a dash.`, "target": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `URL of the managed instance group that this autoscaler will scale.`, }, "description": { @@ -303,7 +310,7 @@ character, which cannot be a dash.`, Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `URL of the region where the instance group resides.`, }, "creation_timestamp": { @@ -327,8 +334,8 @@ character, which cannot be a dash.`, } func resourceComputeRegionAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -337,35 +344,35 @@ func resourceComputeRegionAutoscalerCreate(d *schema.ResourceData, meta interfac nameProp, err := expandComputeRegionAutoscalerName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } descriptionProp, err := expandComputeRegionAutoscalerDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } autoscalingPolicyProp, err := expandComputeRegionAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(autoscalingPolicyProp)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { + } else if v, ok := d.GetOkExists("autoscaling_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoscalingPolicyProp)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { obj["autoscalingPolicy"] = autoscalingPolicyProp } targetProp, err := expandComputeRegionAutoscalerTarget(d.Get("target"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { obj["target"] = targetProp } regionProp, err := expandComputeRegionAutoscalerRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers") if err != nil { return err } @@ -373,24 +380,32 @@ func resourceComputeRegionAutoscalerCreate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Creating new RegionAutoscaler: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionAutoscaler: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating RegionAutoscaler: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -412,33 +427,39 @@ func resourceComputeRegionAutoscalerCreate(d *schema.ResourceData, meta interfac } func resourceComputeRegionAutoscalerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionAutoscaler: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionAutoscaler %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionAutoscaler %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -463,7 +484,7 @@ func resourceComputeRegionAutoscalerRead(d *schema.ResourceData, meta interface{ if err := d.Set("region", flattenComputeRegionAutoscalerRegion(res["region"], d, config)); err != nil { return fmt.Errorf("Error reading RegionAutoscaler: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading RegionAutoscaler: %s", err) } @@ -471,15 +492,15 @@ func resourceComputeRegionAutoscalerRead(d *schema.ResourceData, meta interface{ } func resourceComputeRegionAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionAutoscaler: %s", err) } @@ -489,35 +510,35 @@ func resourceComputeRegionAutoscalerUpdate(d *schema.ResourceData, meta interfac nameProp, err := expandComputeRegionAutoscalerName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } descriptionProp, err := expandComputeRegionAutoscalerDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } autoscalingPolicyProp, err := expandComputeRegionAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { + } else if v, ok := d.GetOkExists("autoscaling_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoscalingPolicyProp)) { obj["autoscalingPolicy"] = autoscalingPolicyProp } targetProp, err := expandComputeRegionAutoscalerTarget(d.Get("target"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { obj["target"] = targetProp } regionProp, err := expandComputeRegionAutoscalerRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers?autoscaler={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers?autoscaler={{name}}") if err != nil { return err } @@ -525,11 +546,19 @@ func resourceComputeRegionAutoscalerUpdate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Updating RegionAutoscaler %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating RegionAutoscaler %q: %s", d.Id(), err) @@ -549,21 +578,21 @@ func resourceComputeRegionAutoscalerUpdate(d *schema.ResourceData, meta interfac } func resourceComputeRegionAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionAutoscaler: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") if err != nil { return err } @@ -572,13 +601,21 @@ func resourceComputeRegionAutoscalerDelete(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Deleting RegionAutoscaler %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "RegionAutoscaler") + return transport_tpg.HandleNotFoundError(err, d, "RegionAutoscaler") } err = ComputeOperationWaitTime( @@ -594,8 +631,8 @@ func resourceComputeRegionAutoscalerDelete(d *schema.ResourceData, meta interfac } func resourceComputeRegionAutoscalerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/autoscalers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -605,7 +642,7 @@ func resourceComputeRegionAutoscalerImport(d *schema.ResourceData, meta interfac } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -614,19 +651,19 @@ func resourceComputeRegionAutoscalerImport(d *schema.ResourceData, meta interfac return []*schema.ResourceData{d}, nil } -func flattenComputeRegionAutoscalerCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -655,10 +692,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d *schema.Re flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(original["scalingSchedules"], d, config) return []interface{}{transformed} } -func flattenComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -672,10 +709,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d return v // let terraform core handle it otherwise } -func flattenComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -689,10 +726,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d return v // let terraform core handle it otherwise } -func flattenComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -706,11 +743,11 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{} return v // let terraform core handle it otherwise } -func flattenComputeRegionAutoscalerAutoscalingPolicyMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -725,7 +762,7 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControl(v interface{} flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(original["timeWindowSec"], d, config) return []interface{}{transformed} } -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -740,10 +777,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInRep flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(original["percent"], d, config) return []interface{}{transformed} } -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -757,10 +794,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInRep return v // let terraform core handle it otherwise } -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -774,10 +811,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInRep return v // let terraform core handle it otherwise } -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -791,7 +828,7 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec( return v // let terraform core handle it otherwise } -func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -806,19 +843,19 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{} flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(original["predictiveMethod"], d, config) return []interface{}{transformed} } -func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "NONE" } return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -838,19 +875,19 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *sch } return transformed } -func flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -863,11 +900,11 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v i flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["utilizationTarget"], d, config) return []interface{}{transformed} } -func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -887,10 +924,10 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface } return transformed } -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -904,18 +941,18 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredR return v // let terraform core handle it otherwise } -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -929,34 +966,34 @@ func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec( return v // let terraform core handle it otherwise } -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionAutoscalerRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionAutoscalerRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func expandComputeRegionAutoscalerName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -975,79 +1012,79 @@ func expandComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d TerraformRe transformedMaxReplicas, err := expandComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(original["max_replicas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxReplicas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxReplicas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxNumReplicas"] = transformedMaxReplicas } transformedCooldownPeriod, err := expandComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(original["cooldown_period"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["coolDownPeriodSec"] = transformedCooldownPeriod } transformedMode, err := expandComputeRegionAutoscalerAutoscalingPolicyMode(original["mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mode"] = transformedMode } transformedScaleInControl, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControl(original["scale_in_control"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScaleInControl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScaleInControl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scaleInControl"] = transformedScaleInControl } transformedCpuUtilization, err := expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(original["cpu_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cpuUtilization"] = transformedCpuUtilization } transformedMetric, err := expandComputeRegionAutoscalerAutoscalingPolicyMetric(original["metric"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMetric); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMetric); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["customMetricUtilizations"] = transformedMetric } transformedLoadBalancingUtilization, err := expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["load_balancing_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLoadBalancingUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLoadBalancingUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["loadBalancingUtilization"] = transformedLoadBalancingUtilization } transformedScalingSchedules, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(original["scaling_schedules"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScalingSchedules); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScalingSchedules); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scalingSchedules"] = transformedScalingSchedules } return transformed, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1059,21 +1096,21 @@ func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControl(v interface{}, transformedMaxScaledInReplicas, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(original["max_scaled_in_replicas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxScaledInReplicas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxScaledInReplicas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxScaledInReplicas"] = transformedMaxScaledInReplicas } transformedTimeWindowSec, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(original["time_window_sec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeWindowSec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeWindowSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeWindowSec"] = transformedTimeWindowSec } return transformed, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1085,33 +1122,33 @@ func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInRepl transformedFixed, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(original["fixed"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixed); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixed); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixed"] = transformedFixed } transformedPercent, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(original["percent"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percent"] = transformedPercent } return transformed, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1123,29 +1160,29 @@ func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["target"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["utilizationTarget"] = transformedTarget } transformedPredictiveMethod, err := expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(original["predictive_method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPredictiveMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPredictiveMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["predictiveMethod"] = transformedPredictiveMethod } return transformed, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1158,21 +1195,21 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d Terra transformedName, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["metric"] = transformedName } transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(original["target"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["utilizationTarget"] = transformedTarget } transformedType, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricType(original["type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["utilizationTargetType"] = transformedType } @@ -1181,19 +1218,19 @@ func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d Terra return req, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1205,18 +1242,18 @@ func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v in transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["target"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTarget); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["utilizationTarget"] = transformedTarget } return transformed, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { if v == nil { return map[string]interface{}{}, nil } @@ -1235,39 +1272,39 @@ func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface{ transformedSchedule, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(original["schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schedule"] = transformedSchedule } transformedTimeZone, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(original["time_zone"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeZone"] = transformedTimeZone } transformedDurationSec, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(original["duration_sec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDurationSec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDurationSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["durationSec"] = transformedDurationSec } transformedDisabled, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } transformedDescription, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } - transformedName, err := expandString(original["name"], d, config) + transformedName, err := tpgresource.ExpandString(original["name"], d, config) if err != nil { return nil, err } @@ -1276,36 +1313,36 @@ func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface{ return m, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionAutoscalerTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionAutoscalerRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) +func expandComputeRegionAutoscalerRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for region: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_autoscaler_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_autoscaler_sweeper.go new file mode 100644 index 0000000000..7fe7056907 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_autoscaler_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionAutoscaler", testSweepComputeRegionAutoscaler) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionAutoscaler(region string) error { + resourceName := "ComputeRegionAutoscaler" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/autoscalers", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/autoscalers/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_backend_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_backend_service.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service.go index eac0389c2d..dc2e95e0fa 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_backend_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -21,7 +24,13 @@ import ( "reflect" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/googleapi" ) @@ -82,7 +91,7 @@ func validateNonManagedBackendServiceBackends(backends []interface{}, d *schema. } backend := b.(map[string]interface{}) for _, fn := range backendServiceOnlyManagedFieldNames { - if v, ok := backend[fn]; ok && !isEmptyValue(reflect.ValueOf(v)) { + if v, ok := backend[fn]; ok && !tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return fmt.Errorf("%q cannot be set for non-managed backend service, found value %v", fn, v) } } @@ -132,8 +141,10 @@ func ResourceComputeRegionBackendService() *schema.Resource { }, SchemaVersion: 1, - MigrateState: migrateStateNoop, - CustomizeDiff: customDiffRegionBackendService, + MigrateState: tpgresource.MigrateStateNoop, + CustomizeDiff: customdiff.All( + customDiffRegionBackendService, + ), Schema: map[string]*schema.Schema{ "name": { @@ -251,7 +262,7 @@ delimiters.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}), Description: `Specifies the cache setting for all responses from this backend. The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"]`, }, @@ -542,7 +553,7 @@ or serverless NEG as a backend.`, Elem: &schema.Schema{ Type: schema.TypeString, }, - Set: selfLinkRelativePathHash, + Set: tpgresource.SelfLinkRelativePathHash, }, "iap": { Type: schema.TypeList, @@ -575,7 +586,7 @@ or serverless NEG as a backend.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED", ""}), Description: `Indicates what kind of load balancing this regional backend service will be used for. A backend service created for one type of load balancing cannot be used with the other(s). For more information, refer to @@ -585,7 +596,7 @@ balancing cannot be used with the other(s). For more information, refer to "locality_lb_policy": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV", "WEIGHTED_MAGLEV", ""}), + ValidateFunc: verify.ValidateEnum([]string{"ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV", "WEIGHTED_MAGLEV", ""}), Description: `The load balancing algorithm used within the scope of the locality. The possible values are: @@ -667,6 +678,7 @@ If logging is enabled, logs will be exported to Stackdriver.`, the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0.`, + Default: 1.0, AtLeastOneOf: []string{"log_config.0.enable", "log_config.0.sample_rate"}, }, }, @@ -675,7 +687,7 @@ The default value is 1.0.`, "network": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The URL of the network to which this backend service belongs. This field can only be specified when the load balancing scheme is set to INTERNAL.`, }, @@ -844,7 +856,7 @@ Must be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"HTTP", "HTTPS", "HTTP2", "SSL", "TCP", "UDP", "GRPC", "UNSPECIFIED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"HTTP", "HTTPS", "HTTP2", "SSL", "TCP", "UDP", "GRPC", "UNSPECIFIED", ""}), Description: `The protocol this RegionBackendService uses to communicate with backends. The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer types and may result in errors if used with the GA API. Possible values: ["HTTP", "HTTPS", "HTTP2", "SSL", "TCP", "UDP", "GRPC", "UNSPECIFIED"]`, @@ -853,7 +865,7 @@ types and may result in errors if used with the GA API. Possible values: ["HTTP" Type: schema.TypeString, Computed: true, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Region in which the created backend service should reside. If it is not provided, the provider region is used.`, }, @@ -861,7 +873,7 @@ If it is not provided, the provider region is used.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION", ""}), Description: `Type of session affinity to use. The default is NONE. Session affinity is not applicable if the protocol is UDP. Possible values: ["NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION"]`, }, @@ -904,7 +916,7 @@ func computeRegionBackendServiceBackendSchema() *schema.Resource { "group": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The fully-qualified URL of an Instance Group or Network Endpoint Group resource. In case of instance group this defines the list of instances that serve traffic. Member virtual machine @@ -929,7 +941,7 @@ partial URL.`, "balancing_mode": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"UTILIZATION", "RATE", "CONNECTION", ""}), + ValidateFunc: verify.ValidateEnum([]string{"UTILIZATION", "RATE", "CONNECTION", ""}), Description: `Specifies the balancing mode for this backend. See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) @@ -1039,8 +1051,8 @@ Cannot be set for INTERNAL backend services.`, } func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -1049,67 +1061,67 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte affinityCookieTtlSecProp, err := expandComputeRegionBackendServiceAffinityCookieTtlSec(d.Get("affinity_cookie_ttl_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !isEmptyValue(reflect.ValueOf(affinityCookieTtlSecProp)) && (ok || !reflect.DeepEqual(v, affinityCookieTtlSecProp)) { + } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(affinityCookieTtlSecProp)) && (ok || !reflect.DeepEqual(v, affinityCookieTtlSecProp)) { obj["affinityCookieTtlSec"] = affinityCookieTtlSecProp } backendsProp, err := expandComputeRegionBackendServiceBackend(d.Get("backend"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("backend"); !isEmptyValue(reflect.ValueOf(backendsProp)) && (ok || !reflect.DeepEqual(v, backendsProp)) { + } else if v, ok := d.GetOkExists("backend"); !tpgresource.IsEmptyValue(reflect.ValueOf(backendsProp)) && (ok || !reflect.DeepEqual(v, backendsProp)) { obj["backends"] = backendsProp } circuitBreakersProp, err := expandComputeRegionBackendServiceCircuitBreakers(d.Get("circuit_breakers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("circuit_breakers"); !isEmptyValue(reflect.ValueOf(circuitBreakersProp)) && (ok || !reflect.DeepEqual(v, circuitBreakersProp)) { + } else if v, ok := d.GetOkExists("circuit_breakers"); !tpgresource.IsEmptyValue(reflect.ValueOf(circuitBreakersProp)) && (ok || !reflect.DeepEqual(v, circuitBreakersProp)) { obj["circuitBreakers"] = circuitBreakersProp } consistentHashProp, err := expandComputeRegionBackendServiceConsistentHash(d.Get("consistent_hash"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("consistent_hash"); !isEmptyValue(reflect.ValueOf(consistentHashProp)) && (ok || !reflect.DeepEqual(v, consistentHashProp)) { + } else if v, ok := d.GetOkExists("consistent_hash"); !tpgresource.IsEmptyValue(reflect.ValueOf(consistentHashProp)) && (ok || !reflect.DeepEqual(v, consistentHashProp)) { obj["consistentHash"] = consistentHashProp } cdnPolicyProp, err := expandComputeRegionBackendServiceCdnPolicy(d.Get("cdn_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(reflect.ValueOf(cdnPolicyProp)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { + } else if v, ok := d.GetOkExists("cdn_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(cdnPolicyProp)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { obj["cdnPolicy"] = cdnPolicyProp } connectionDrainingProp, err := expandComputeRegionBackendServiceConnectionDraining(nil, d, config) if err != nil { return err - } else if !isEmptyValue(reflect.ValueOf(connectionDrainingProp)) { + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(connectionDrainingProp)) { obj["connectionDraining"] = connectionDrainingProp } descriptionProp, err := expandComputeRegionBackendServiceDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } failoverPolicyProp, err := expandComputeRegionBackendServiceFailoverPolicy(d.Get("failover_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("failover_policy"); !isEmptyValue(reflect.ValueOf(failoverPolicyProp)) && (ok || !reflect.DeepEqual(v, failoverPolicyProp)) { + } else if v, ok := d.GetOkExists("failover_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(failoverPolicyProp)) && (ok || !reflect.DeepEqual(v, failoverPolicyProp)) { obj["failoverPolicy"] = failoverPolicyProp } enableCDNProp, err := expandComputeRegionBackendServiceEnableCDN(d.Get("enable_cdn"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(reflect.ValueOf(enableCDNProp)) && (ok || !reflect.DeepEqual(v, enableCDNProp)) { + } else if v, ok := d.GetOkExists("enable_cdn"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableCDNProp)) && (ok || !reflect.DeepEqual(v, enableCDNProp)) { obj["enableCDN"] = enableCDNProp } fingerprintProp, err := expandComputeRegionBackendServiceFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { obj["fingerprint"] = fingerprintProp } healthChecksProp, err := expandComputeRegionBackendServiceHealthChecks(d.Get("health_checks"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("health_checks"); !isEmptyValue(reflect.ValueOf(healthChecksProp)) && (ok || !reflect.DeepEqual(v, healthChecksProp)) { + } else if v, ok := d.GetOkExists("health_checks"); !tpgresource.IsEmptyValue(reflect.ValueOf(healthChecksProp)) && (ok || !reflect.DeepEqual(v, healthChecksProp)) { obj["healthChecks"] = healthChecksProp } iapProp, err := expandComputeRegionBackendServiceIap(d.Get("iap"), d, config) @@ -1121,67 +1133,67 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte loadBalancingSchemeProp, err := expandComputeRegionBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { + } else if v, ok := d.GetOkExists("load_balancing_scheme"); !tpgresource.IsEmptyValue(reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { obj["loadBalancingScheme"] = loadBalancingSchemeProp } localityLbPolicyProp, err := expandComputeRegionBackendServiceLocalityLbPolicy(d.Get("locality_lb_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("locality_lb_policy"); !isEmptyValue(reflect.ValueOf(localityLbPolicyProp)) && (ok || !reflect.DeepEqual(v, localityLbPolicyProp)) { + } else if v, ok := d.GetOkExists("locality_lb_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(localityLbPolicyProp)) && (ok || !reflect.DeepEqual(v, localityLbPolicyProp)) { obj["localityLbPolicy"] = localityLbPolicyProp } nameProp, err := expandComputeRegionBackendServiceName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } outlierDetectionProp, err := expandComputeRegionBackendServiceOutlierDetection(d.Get("outlier_detection"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("outlier_detection"); !isEmptyValue(reflect.ValueOf(outlierDetectionProp)) && (ok || !reflect.DeepEqual(v, outlierDetectionProp)) { + } else if v, ok := d.GetOkExists("outlier_detection"); !tpgresource.IsEmptyValue(reflect.ValueOf(outlierDetectionProp)) && (ok || !reflect.DeepEqual(v, outlierDetectionProp)) { obj["outlierDetection"] = outlierDetectionProp } portNameProp, err := expandComputeRegionBackendServicePortName(d.Get("port_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("port_name"); !isEmptyValue(reflect.ValueOf(portNameProp)) && (ok || !reflect.DeepEqual(v, portNameProp)) { + } else if v, ok := d.GetOkExists("port_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(portNameProp)) && (ok || !reflect.DeepEqual(v, portNameProp)) { obj["portName"] = portNameProp } protocolProp, err := expandComputeRegionBackendServiceProtocol(d.Get("protocol"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { obj["protocol"] = protocolProp } sessionAffinityProp, err := expandComputeRegionBackendServiceSessionAffinity(d.Get("session_affinity"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("session_affinity"); !isEmptyValue(reflect.ValueOf(sessionAffinityProp)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { + } else if v, ok := d.GetOkExists("session_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(sessionAffinityProp)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { obj["sessionAffinity"] = sessionAffinityProp } timeoutSecProp, err := expandComputeRegionBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } logConfigProp, err := expandComputeRegionBackendServiceLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } networkProp, err := expandComputeRegionBackendServiceNetwork(d.Get("network"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { obj["network"] = networkProp } regionProp, err := expandComputeRegionBackendServiceRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } @@ -1190,7 +1202,7 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices") if err != nil { return err } @@ -1198,24 +1210,32 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Creating new RegionBackendService: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionBackendService: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating RegionBackendService: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/backendServices/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/backendServices/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1237,33 +1257,39 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte } func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionBackendService: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionBackendService %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionBackendService %q", d.Id())) } res, err = resourceComputeRegionBackendServiceDecoder(d, meta, res) @@ -1366,7 +1392,7 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf if err := d.Set("region", flattenComputeRegionBackendServiceRegion(res["region"], d, config)); err != nil { return fmt.Errorf("Error reading RegionBackendService: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading RegionBackendService: %s", err) } @@ -1374,15 +1400,15 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf } func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionBackendService: %s", err) } @@ -1392,67 +1418,67 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte affinityCookieTtlSecProp, err := expandComputeRegionBackendServiceAffinityCookieTtlSec(d.Get("affinity_cookie_ttl_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, affinityCookieTtlSecProp)) { + } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, affinityCookieTtlSecProp)) { obj["affinityCookieTtlSec"] = affinityCookieTtlSecProp } backendsProp, err := expandComputeRegionBackendServiceBackend(d.Get("backend"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("backend"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backendsProp)) { + } else if v, ok := d.GetOkExists("backend"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backendsProp)) { obj["backends"] = backendsProp } circuitBreakersProp, err := expandComputeRegionBackendServiceCircuitBreakers(d.Get("circuit_breakers"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("circuit_breakers"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, circuitBreakersProp)) { + } else if v, ok := d.GetOkExists("circuit_breakers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, circuitBreakersProp)) { obj["circuitBreakers"] = circuitBreakersProp } consistentHashProp, err := expandComputeRegionBackendServiceConsistentHash(d.Get("consistent_hash"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("consistent_hash"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, consistentHashProp)) { + } else if v, ok := d.GetOkExists("consistent_hash"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, consistentHashProp)) { obj["consistentHash"] = consistentHashProp } cdnPolicyProp, err := expandComputeRegionBackendServiceCdnPolicy(d.Get("cdn_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { + } else if v, ok := d.GetOkExists("cdn_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cdnPolicyProp)) { obj["cdnPolicy"] = cdnPolicyProp } connectionDrainingProp, err := expandComputeRegionBackendServiceConnectionDraining(nil, d, config) if err != nil { return err - } else if !isEmptyValue(reflect.ValueOf(connectionDrainingProp)) { + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(connectionDrainingProp)) { obj["connectionDraining"] = connectionDrainingProp } descriptionProp, err := expandComputeRegionBackendServiceDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } failoverPolicyProp, err := expandComputeRegionBackendServiceFailoverPolicy(d.Get("failover_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("failover_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, failoverPolicyProp)) { + } else if v, ok := d.GetOkExists("failover_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, failoverPolicyProp)) { obj["failoverPolicy"] = failoverPolicyProp } enableCDNProp, err := expandComputeRegionBackendServiceEnableCDN(d.Get("enable_cdn"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableCDNProp)) { + } else if v, ok := d.GetOkExists("enable_cdn"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableCDNProp)) { obj["enableCDN"] = enableCDNProp } fingerprintProp, err := expandComputeRegionBackendServiceFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { obj["fingerprint"] = fingerprintProp } healthChecksProp, err := expandComputeRegionBackendServiceHealthChecks(d.Get("health_checks"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("health_checks"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthChecksProp)) { + } else if v, ok := d.GetOkExists("health_checks"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthChecksProp)) { obj["healthChecks"] = healthChecksProp } iapProp, err := expandComputeRegionBackendServiceIap(d.Get("iap"), d, config) @@ -1464,67 +1490,67 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte loadBalancingSchemeProp, err := expandComputeRegionBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { + } else if v, ok := d.GetOkExists("load_balancing_scheme"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { obj["loadBalancingScheme"] = loadBalancingSchemeProp } localityLbPolicyProp, err := expandComputeRegionBackendServiceLocalityLbPolicy(d.Get("locality_lb_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("locality_lb_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, localityLbPolicyProp)) { + } else if v, ok := d.GetOkExists("locality_lb_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, localityLbPolicyProp)) { obj["localityLbPolicy"] = localityLbPolicyProp } nameProp, err := expandComputeRegionBackendServiceName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } outlierDetectionProp, err := expandComputeRegionBackendServiceOutlierDetection(d.Get("outlier_detection"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("outlier_detection"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, outlierDetectionProp)) { + } else if v, ok := d.GetOkExists("outlier_detection"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, outlierDetectionProp)) { obj["outlierDetection"] = outlierDetectionProp } portNameProp, err := expandComputeRegionBackendServicePortName(d.Get("port_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("port_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portNameProp)) { + } else if v, ok := d.GetOkExists("port_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portNameProp)) { obj["portName"] = portNameProp } protocolProp, err := expandComputeRegionBackendServiceProtocol(d.Get("protocol"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, protocolProp)) { obj["protocol"] = protocolProp } sessionAffinityProp, err := expandComputeRegionBackendServiceSessionAffinity(d.Get("session_affinity"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("session_affinity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { + } else if v, ok := d.GetOkExists("session_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionAffinityProp)) { obj["sessionAffinity"] = sessionAffinityProp } timeoutSecProp, err := expandComputeRegionBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } logConfigProp, err := expandComputeRegionBackendServiceLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } networkProp, err := expandComputeRegionBackendServiceNetwork(d.Get("network"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networkProp)) { obj["network"] = networkProp } regionProp, err := expandComputeRegionBackendServiceRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } @@ -1533,7 +1559,7 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") if err != nil { return err } @@ -1541,11 +1567,19 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Updating RegionBackendService %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating RegionBackendService %q: %s", d.Id(), err) @@ -1565,21 +1599,21 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte } func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionBackendService: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") if err != nil { return err } @@ -1588,13 +1622,21 @@ func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Deleting RegionBackendService %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "RegionBackendService") + return transport_tpg.HandleNotFoundError(err, d, "RegionBackendService") } err = ComputeOperationWaitTime( @@ -1610,8 +1652,8 @@ func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta inte } func resourceComputeRegionBackendServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/backendServices/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -1621,7 +1663,7 @@ func resourceComputeRegionBackendServiceImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/backendServices/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/backendServices/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1630,10 +1672,10 @@ func resourceComputeRegionBackendServiceImport(d *schema.ResourceData, meta inte return []*schema.ResourceData{d}, nil } -func flattenComputeRegionBackendServiceAffinityCookieTtlSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceAffinityCookieTtlSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1647,7 +1689,7 @@ func flattenComputeRegionBackendServiceAffinityCookieTtlSec(v interface{}, d *sc return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceBackend(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackend(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1676,33 +1718,33 @@ func flattenComputeRegionBackendServiceBackend(v interface{}, d *schema.Resource } return transformed } -func flattenComputeRegionBackendServiceBackendBalancingMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendBalancingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceBackendCapacityScaler(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendCapacityScaler(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceBackendDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceBackendFailover(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendFailover(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceBackendGroup(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionBackendServiceBackendMaxConnections(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendMaxConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1716,10 +1758,10 @@ func flattenComputeRegionBackendServiceBackendMaxConnections(v interface{}, d *s return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1733,10 +1775,10 @@ func flattenComputeRegionBackendServiceBackendMaxConnectionsPerInstance(v interf return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1750,10 +1792,10 @@ func flattenComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(v interf return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceBackendMaxRate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendMaxRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1767,19 +1809,19 @@ func flattenComputeRegionBackendServiceBackendMaxRate(v interface{}, d *schema.R return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceBackendMaxRatePerInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendMaxRatePerInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceBackendMaxRatePerEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendMaxRatePerEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceBackendMaxUtilization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceBackendMaxUtilization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceCircuitBreakers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCircuitBreakers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1800,10 +1842,10 @@ func flattenComputeRegionBackendServiceCircuitBreakers(v interface{}, d *schema. flattenComputeRegionBackendServiceCircuitBreakersMaxRetries(original["maxRetries"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1817,10 +1859,10 @@ func flattenComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(v return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCircuitBreakersMaxConnections(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCircuitBreakersMaxConnections(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1834,10 +1876,10 @@ func flattenComputeRegionBackendServiceCircuitBreakersMaxConnections(v interface return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1851,10 +1893,10 @@ func flattenComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(v inter return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCircuitBreakersMaxRequests(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCircuitBreakersMaxRequests(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1868,10 +1910,10 @@ func flattenComputeRegionBackendServiceCircuitBreakersMaxRequests(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCircuitBreakersMaxRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCircuitBreakersMaxRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1885,7 +1927,7 @@ func flattenComputeRegionBackendServiceCircuitBreakersMaxRetries(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceConsistentHash(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHash(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1902,7 +1944,7 @@ func flattenComputeRegionBackendServiceConsistentHash(v interface{}, d *schema.R flattenComputeRegionBackendServiceConsistentHashMinimumRingSize(original["minimumRingSize"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1919,7 +1961,7 @@ func flattenComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d flattenComputeRegionBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1934,10 +1976,10 @@ func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{} flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1951,10 +1993,10 @@ func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(v inte return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1968,22 +2010,22 @@ func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(v interf return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceConsistentHashHttpCookieName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHashHttpCookieName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceConsistentHashHttpCookiePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHashHttpCookiePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceConsistentHashHttpHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHashHttpHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceConsistentHashMinimumRingSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConsistentHashMinimumRingSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1997,7 +2039,7 @@ func flattenComputeRegionBackendServiceConsistentHashMinimumRingSize(v interface return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCdnPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2026,7 +2068,7 @@ func flattenComputeRegionBackendServiceCdnPolicy(v interface{}, d *schema.Resour flattenComputeRegionBackendServiceCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2049,40 +2091,40 @@ func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(original["includeNamedCookies"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2096,10 +2138,10 @@ func flattenComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interf return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2113,10 +2155,10 @@ func flattenComputeRegionBackendServiceCdnPolicyDefaultTtl(v interface{}, d *sch return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2130,10 +2172,10 @@ func flattenComputeRegionBackendServiceCdnPolicyMaxTtl(v interface{}, d *schema. return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2147,11 +2189,11 @@ func flattenComputeRegionBackendServiceCdnPolicyClientTtl(v interface{}, d *sche return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2169,10 +2211,10 @@ func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interfac } return transformed } -func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2186,14 +2228,14 @@ func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(v inte return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceCdnPolicyServeWhileStale(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCdnPolicyServeWhileStale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2207,7 +2249,7 @@ func flattenComputeRegionBackendServiceCdnPolicyServeWhileStale(v interface{}, d return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceConnectionDraining(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConnectionDraining(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2220,10 +2262,10 @@ func flattenComputeRegionBackendServiceConnectionDraining(v interface{}, d *sche flattenComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(original["drainingTimeoutSec"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2237,15 +2279,15 @@ func flattenComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeo return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceFailoverPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceFailoverPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2262,34 +2304,34 @@ func flattenComputeRegionBackendServiceFailoverPolicy(v interface{}, d *schema.R flattenComputeRegionBackendServiceFailoverPolicyFailoverRatio(original["failoverRatio"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceFailoverPolicyFailoverRatio(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceFailoverPolicyFailoverRatio(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceEnableCDN(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceEnableCDN(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceHealthChecks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceHealthChecks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) } -func flattenComputeRegionBackendServiceIap(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceIap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2306,31 +2348,31 @@ func flattenComputeRegionBackendServiceIap(v interface{}, d *schema.ResourceData flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(original["oauth2ClientSecretSha256"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceIapOauth2ClientSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceIapOauth2ClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("iap.0.oauth2_client_secret") } -func flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceLocalityLbPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceLocalityLbPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceOutlierDetection(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2363,7 +2405,7 @@ func flattenComputeRegionBackendServiceOutlierDetection(v interface{}, d *schema flattenComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(original["successRateStdevFactor"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2378,10 +2420,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interf flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2395,10 +2437,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2412,10 +2454,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(v i return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2429,10 +2471,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(v inter return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2446,10 +2488,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2463,10 +2505,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveError return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2480,10 +2522,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatew return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2497,7 +2539,7 @@ func flattenComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(v in return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2512,10 +2554,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d flattenComputeRegionBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2529,10 +2571,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionIntervalSeconds(v interfa return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionIntervalNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionIntervalNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2546,10 +2588,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionIntervalNanos(v interface return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2563,10 +2605,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(v inte return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2580,10 +2622,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(v return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2597,10 +2639,10 @@ func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume( return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2614,22 +2656,22 @@ func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(v return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServicePortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServicePortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceSessionAffinity(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceSessionAffinity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2643,7 +2685,7 @@ func flattenComputeRegionBackendServiceTimeoutSec(v interface{}, d *schema.Resou return v // let terraform core handle it otherwise } -func flattenComputeRegionBackendServiceLogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2658,33 +2700,33 @@ func flattenComputeRegionBackendServiceLogConfig(v interface{}, d *schema.Resour flattenComputeRegionBackendServiceLogConfigSampleRate(original["sampleRate"], d, config) return []interface{}{transformed} } -func flattenComputeRegionBackendServiceLogConfigEnable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceLogConfigEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceLogConfigSampleRate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceLogConfigSampleRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionBackendServiceNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionBackendServiceRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionBackendServiceRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func expandComputeRegionBackendServiceAffinityCookieTtlSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceAffinityCookieTtlSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackend(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackend(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -2698,7 +2740,7 @@ func expandComputeRegionBackendServiceBackend(v interface{}, d TerraformResource transformedBalancingMode, err := expandComputeRegionBackendServiceBackendBalancingMode(original["balancing_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBalancingMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBalancingMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["balancingMode"] = transformedBalancingMode } @@ -2712,70 +2754,70 @@ func expandComputeRegionBackendServiceBackend(v interface{}, d TerraformResource transformedDescription, err := expandComputeRegionBackendServiceBackendDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedFailover, err := expandComputeRegionBackendServiceBackendFailover(original["failover"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFailover); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFailover); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["failover"] = transformedFailover } transformedGroup, err := expandComputeRegionBackendServiceBackendGroup(original["group"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGroup); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGroup); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["group"] = transformedGroup } transformedMaxConnections, err := expandComputeRegionBackendServiceBackendMaxConnections(original["max_connections"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConnections); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConnections); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConnections"] = transformedMaxConnections } transformedMaxConnectionsPerInstance, err := expandComputeRegionBackendServiceBackendMaxConnectionsPerInstance(original["max_connections_per_instance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConnectionsPerInstance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConnectionsPerInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConnectionsPerInstance"] = transformedMaxConnectionsPerInstance } transformedMaxConnectionsPerEndpoint, err := expandComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(original["max_connections_per_endpoint"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConnectionsPerEndpoint); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConnectionsPerEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConnectionsPerEndpoint"] = transformedMaxConnectionsPerEndpoint } transformedMaxRate, err := expandComputeRegionBackendServiceBackendMaxRate(original["max_rate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRate"] = transformedMaxRate } transformedMaxRatePerInstance, err := expandComputeRegionBackendServiceBackendMaxRatePerInstance(original["max_rate_per_instance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRatePerInstance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRatePerInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRatePerInstance"] = transformedMaxRatePerInstance } transformedMaxRatePerEndpoint, err := expandComputeRegionBackendServiceBackendMaxRatePerEndpoint(original["max_rate_per_endpoint"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRatePerEndpoint); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRatePerEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRatePerEndpoint"] = transformedMaxRatePerEndpoint } transformedMaxUtilization, err := expandComputeRegionBackendServiceBackendMaxUtilization(original["max_utilization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxUtilization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxUtilization"] = transformedMaxUtilization } @@ -2784,55 +2826,55 @@ func expandComputeRegionBackendServiceBackend(v interface{}, d TerraformResource return req, nil } -func expandComputeRegionBackendServiceBackendBalancingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendBalancingMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendCapacityScaler(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendCapacityScaler(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendFailover(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendFailover(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendMaxConnections(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendMaxConnections(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendMaxRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendMaxRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendMaxRatePerInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendMaxRatePerInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendMaxRatePerEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendMaxRatePerEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceBackendMaxUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceBackendMaxUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCircuitBreakers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCircuitBreakers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2844,62 +2886,62 @@ func expandComputeRegionBackendServiceCircuitBreakers(v interface{}, d Terraform transformedMaxRequestsPerConnection, err := expandComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(original["max_requests_per_connection"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRequestsPerConnection); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRequestsPerConnection); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRequestsPerConnection"] = transformedMaxRequestsPerConnection } transformedMaxConnections, err := expandComputeRegionBackendServiceCircuitBreakersMaxConnections(original["max_connections"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxConnections); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxConnections); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxConnections"] = transformedMaxConnections } transformedMaxPendingRequests, err := expandComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(original["max_pending_requests"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxPendingRequests); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxPendingRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxPendingRequests"] = transformedMaxPendingRequests } transformedMaxRequests, err := expandComputeRegionBackendServiceCircuitBreakersMaxRequests(original["max_requests"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRequests); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRequests"] = transformedMaxRequests } transformedMaxRetries, err := expandComputeRegionBackendServiceCircuitBreakersMaxRetries(original["max_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRetries"] = transformedMaxRetries } return transformed, nil } -func expandComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCircuitBreakersMaxConnections(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCircuitBreakersMaxConnections(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCircuitBreakersMaxRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCircuitBreakersMaxRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCircuitBreakersMaxRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCircuitBreakersMaxRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceConsistentHash(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHash(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2911,28 +2953,28 @@ func expandComputeRegionBackendServiceConsistentHash(v interface{}, d TerraformR transformedHttpCookie, err := expandComputeRegionBackendServiceConsistentHashHttpCookie(original["http_cookie"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpCookie); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpCookie); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpCookie"] = transformedHttpCookie } transformedHttpHeaderName, err := expandComputeRegionBackendServiceConsistentHashHttpHeaderName(original["http_header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpHeaderName"] = transformedHttpHeaderName } transformedMinimumRingSize, err := expandComputeRegionBackendServiceConsistentHashMinimumRingSize(original["minimum_ring_size"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinimumRingSize); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinimumRingSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minimumRingSize"] = transformedMinimumRingSize } return transformed, nil } -func expandComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2944,28 +2986,28 @@ func expandComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d transformedTtl, err := expandComputeRegionBackendServiceConsistentHashHttpCookieTtl(original["ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ttl"] = transformedTtl } transformedName, err := expandComputeRegionBackendServiceConsistentHashHttpCookieName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedPath, err := expandComputeRegionBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } return transformed, nil } -func expandComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2977,45 +3019,45 @@ func expandComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{}, transformedSeconds, err := expandComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceConsistentHashHttpCookieName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHashHttpCookieName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceConsistentHashHttpCookiePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHashHttpCookiePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceConsistentHashHttpHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHashHttpHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceConsistentHashMinimumRingSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConsistentHashMinimumRingSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3027,35 +3069,35 @@ func expandComputeRegionBackendServiceCdnPolicy(v interface{}, d TerraformResour transformedCacheKeyPolicy, err := expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(original["cache_key_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cacheKeyPolicy"] = transformedCacheKeyPolicy } transformedSignedUrlCacheMaxAgeSec, err := expandComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(original["signed_url_cache_max_age_sec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSignedUrlCacheMaxAgeSec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSignedUrlCacheMaxAgeSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["signedUrlCacheMaxAgeSec"] = transformedSignedUrlCacheMaxAgeSec } transformedDefaultTtl, err := expandComputeRegionBackendServiceCdnPolicyDefaultTtl(original["default_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultTtl"] = transformedDefaultTtl } transformedMaxTtl, err := expandComputeRegionBackendServiceCdnPolicyMaxTtl(original["max_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxTtl"] = transformedMaxTtl } transformedClientTtl, err := expandComputeRegionBackendServiceCdnPolicyClientTtl(original["client_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClientTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClientTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["clientTtl"] = transformedClientTtl } @@ -3069,14 +3111,14 @@ func expandComputeRegionBackendServiceCdnPolicy(v interface{}, d TerraformResour transformedNegativeCachingPolicy, err := expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy } transformedCacheMode, err := expandComputeRegionBackendServiceCdnPolicyCacheMode(original["cache_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCacheMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCacheMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cacheMode"] = transformedCacheMode } @@ -3090,7 +3132,7 @@ func expandComputeRegionBackendServiceCdnPolicy(v interface{}, d TerraformResour return transformed, nil } -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3144,53 +3186,53 @@ func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d T return transformed, nil } -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeNamedCookies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyDefaultTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyDefaultTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyMaxTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyMaxTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyClientTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyClientTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyNegativeCaching(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyNegativeCaching(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3203,7 +3245,7 @@ func expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface transformedCode, err := expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(original["code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["code"] = transformedCode } @@ -3212,19 +3254,19 @@ func expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface return req, nil } -func expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyCacheMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyCacheMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceCdnPolicyServeWhileStale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceCdnPolicyServeWhileStale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceConnectionDraining(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConnectionDraining(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { transformed := make(map[string]interface{}) transformedConnectionDrainingTimeoutSec, err := expandComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(d.Get("connection_draining_timeout_sec"), d, config) if err != nil { @@ -3236,15 +3278,15 @@ func expandComputeRegionBackendServiceConnectionDraining(v interface{}, d Terraf return transformed, nil } -func expandComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceFailoverPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceFailoverPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3256,7 +3298,7 @@ func expandComputeRegionBackendServiceFailoverPolicy(v interface{}, d TerraformR transformedDisableConnectionDrainOnFailover, err := expandComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(original["disable_connection_drain_on_failover"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisableConnectionDrainOnFailover); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisableConnectionDrainOnFailover); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disableConnectionDrainOnFailover"] = transformedDisableConnectionDrainOnFailover } @@ -3270,39 +3312,39 @@ func expandComputeRegionBackendServiceFailoverPolicy(v interface{}, d TerraformR transformedFailoverRatio, err := expandComputeRegionBackendServiceFailoverPolicyFailoverRatio(original["failover_ratio"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFailoverRatio); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFailoverRatio); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["failoverRatio"] = transformedFailoverRatio } return transformed, nil } -func expandComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceFailoverPolicyFailoverRatio(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceFailoverPolicyFailoverRatio(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceEnableCDN(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceEnableCDN(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceHealthChecks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceHealthChecks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeRegionBackendServiceIap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceIap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3314,7 +3356,7 @@ func expandComputeRegionBackendServiceIap(v interface{}, d TerraformResourceData transformedOauth2ClientId, err := expandComputeRegionBackendServiceIapOauth2ClientId(original["oauth2_client_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOauth2ClientId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOauth2ClientId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oauth2ClientId"] = transformedOauth2ClientId } @@ -3328,38 +3370,38 @@ func expandComputeRegionBackendServiceIap(v interface{}, d TerraformResourceData transformedOauth2ClientSecretSha256, err := expandComputeRegionBackendServiceIapOauth2ClientSecretSha256(original["oauth2_client_secret_sha256"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOauth2ClientSecretSha256); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOauth2ClientSecretSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oauth2ClientSecretSha256"] = transformedOauth2ClientSecretSha256 } return transformed, nil } -func expandComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceIapOauth2ClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceIapOauth2ClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceLocalityLbPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceLocalityLbPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3371,84 +3413,84 @@ func expandComputeRegionBackendServiceOutlierDetection(v interface{}, d Terrafor transformedBaseEjectionTime, err := expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(original["base_ejection_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBaseEjectionTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBaseEjectionTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["baseEjectionTime"] = transformedBaseEjectionTime } transformedConsecutiveErrors, err := expandComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(original["consecutive_errors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConsecutiveErrors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConsecutiveErrors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["consecutiveErrors"] = transformedConsecutiveErrors } transformedConsecutiveGatewayFailure, err := expandComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(original["consecutive_gateway_failure"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConsecutiveGatewayFailure); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConsecutiveGatewayFailure); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["consecutiveGatewayFailure"] = transformedConsecutiveGatewayFailure } transformedEnforcingConsecutiveErrors, err := expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(original["enforcing_consecutive_errors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnforcingConsecutiveErrors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnforcingConsecutiveErrors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enforcingConsecutiveErrors"] = transformedEnforcingConsecutiveErrors } transformedEnforcingConsecutiveGatewayFailure, err := expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(original["enforcing_consecutive_gateway_failure"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnforcingConsecutiveGatewayFailure); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnforcingConsecutiveGatewayFailure); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enforcingConsecutiveGatewayFailure"] = transformedEnforcingConsecutiveGatewayFailure } transformedEnforcingSuccessRate, err := expandComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(original["enforcing_success_rate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnforcingSuccessRate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnforcingSuccessRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enforcingSuccessRate"] = transformedEnforcingSuccessRate } transformedInterval, err := expandComputeRegionBackendServiceOutlierDetectionInterval(original["interval"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInterval); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["interval"] = transformedInterval } transformedMaxEjectionPercent, err := expandComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(original["max_ejection_percent"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxEjectionPercent); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxEjectionPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxEjectionPercent"] = transformedMaxEjectionPercent } transformedSuccessRateMinimumHosts, err := expandComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(original["success_rate_minimum_hosts"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuccessRateMinimumHosts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuccessRateMinimumHosts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["successRateMinimumHosts"] = transformedSuccessRateMinimumHosts } transformedSuccessRateRequestVolume, err := expandComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(original["success_rate_request_volume"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuccessRateRequestVolume); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuccessRateRequestVolume); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["successRateRequestVolume"] = transformedSuccessRateRequestVolume } transformedSuccessRateStdevFactor, err := expandComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(original["success_rate_stdev_factor"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuccessRateStdevFactor); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuccessRateStdevFactor); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["successRateStdevFactor"] = transformedSuccessRateStdevFactor } return transformed, nil } -func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3460,49 +3502,49 @@ func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interfa transformedSeconds, err := expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3514,61 +3556,61 @@ func expandComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d transformedSeconds, err := expandComputeRegionBackendServiceOutlierDetectionIntervalSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeRegionBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeRegionBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionIntervalNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionIntervalNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServicePortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServicePortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceSessionAffinity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceSessionAffinity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceLogConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3587,31 +3629,31 @@ func expandComputeRegionBackendServiceLogConfig(v interface{}, d TerraformResour transformedSampleRate, err := expandComputeRegionBackendServiceLogConfigSampleRate(original["sample_rate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSampleRate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSampleRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sampleRate"] = transformedSampleRate } return transformed, nil } -func expandComputeRegionBackendServiceLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceLogConfigEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceLogConfigSampleRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionBackendServiceLogConfigSampleRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionBackendServiceNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) +func expandComputeRegionBackendServiceNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for network: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionBackendServiceRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) +func expandComputeRegionBackendServiceRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for region: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service_sweeper.go new file mode 100644 index 0000000000..dad6aa5bb7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_backend_service_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionBackendService", testSweepComputeRegionBackendService) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionBackendService(region string) error { + resourceName := "ComputeRegionBackendService" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/backendServices", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/backendServices/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_commitment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_commitment.go new file mode 100644 index 0000000000..5f2bb08b95 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_commitment.go @@ -0,0 +1,713 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeRegionCommitment() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionCommitmentCreate, + Read: resourceComputeRegionCommitmentRead, + Delete: resourceComputeRegionCommitmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionCommitmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource. The name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "plan": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"TWELVE_MONTH", "THIRTY_SIX_MONTH"}), + Description: `The plan for this commitment, which determines duration and discount rate. +The currently supported plans are TWELVE_MONTH (1 year), and THIRTY_SIX_MONTH (3 years). Possible values: ["TWELVE_MONTH", "THIRTY_SIX_MONTH"]`, + }, + "auto_renew": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Specifies whether to enable automatic renewal for the commitment. +The default value is false if not specified. +If the field is set to true, the commitment will be automatically renewed for either +one or three years according to the terms of the existing commitment.`, + }, + "category": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"LICENSE", "MACHINE", ""}), + Description: `The category of the commitment. Category MACHINE specifies commitments composed of +machine resources such as VCPU or MEMORY, listed in resources. Category LICENSE +specifies commitments composed of software licenses, listed in licenseResources. +Note that only MACHINE commitments should have a Type specified. Possible values: ["LICENSE", "MACHINE"]`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "license_resource": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The license specification required as part of a license commitment.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "license": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Any applicable license URI.`, + }, + "amount": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The number of licenses purchased.`, + }, + "cores_per_license": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies the core range of the instance for which this license applies.`, + }, + }, + }, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the region where this commitment may be used.`, + }, + "resources": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A list of commitment amounts for particular resources. +Note that VCPU and MEMORY resource commitments must occur together.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the accelerator type resource. Applicable only when the type is ACCELERATOR.`, + }, + "amount": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The amount of the resource purchased (in a type-dependent unit, +such as bytes). For vCPUs, this can just be an integer. For memory, +this must be provided in MB. Memory must be a multiple of 256 MB, +with up to 6.5GB of memory per every vCPU.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Type of resource for which this commitment applies. +Possible values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR.`, + }, + }, + }, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The type of commitment, which affects the discount rate and the eligible resources. +The type could be one of the following value: 'MEMORY_OPTIMIZED', 'ACCELERATOR_OPTIMIZED', +'GENERAL_PURPOSE_N1', 'GENERAL_PURPOSE_N2', 'GENERAL_PURPOSE_N2D', 'GENERAL_PURPOSE_E2', +'GENERAL_PURPOSE_T2D', 'GENERAL_PURPOSE_C3', 'COMPUTE_OPTIMIZED_C2', 'COMPUTE_OPTIMIZED_C2D' and +'GRAPHICS_OPTIMIZED_G2'`, + }, + "commitment_id": { + Type: schema.TypeInt, + Computed: true, + Description: `Unique identifier for the resource.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "end_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Commitment end time in RFC3339 text format.`, + }, + "start_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Commitment start time in RFC3339 text format.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `Status of the commitment with regards to eventual expiration +(each commitment has an end date defined).`, + }, + "status_message": { + Type: schema.TypeString, + Computed: true, + Description: `A human-readable explanation of the status.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionCommitmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeRegionCommitmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeRegionCommitmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + planProp, err := expandComputeRegionCommitmentPlan(d.Get("plan"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("plan"); !tpgresource.IsEmptyValue(reflect.ValueOf(planProp)) && (ok || !reflect.DeepEqual(v, planProp)) { + obj["plan"] = planProp + } + resourcesProp, err := expandComputeRegionCommitmentResources(d.Get("resources"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourcesProp)) && (ok || !reflect.DeepEqual(v, resourcesProp)) { + obj["resources"] = resourcesProp + } + typeProp, err := expandComputeRegionCommitmentType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + categoryProp, err := expandComputeRegionCommitmentCategory(d.Get("category"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("category"); !tpgresource.IsEmptyValue(reflect.ValueOf(categoryProp)) && (ok || !reflect.DeepEqual(v, categoryProp)) { + obj["category"] = categoryProp + } + licenseResourceProp, err := expandComputeRegionCommitmentLicenseResource(d.Get("license_resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("license_resource"); !tpgresource.IsEmptyValue(reflect.ValueOf(licenseResourceProp)) && (ok || !reflect.DeepEqual(v, licenseResourceProp)) { + obj["licenseResource"] = licenseResourceProp + } + autoRenewProp, err := expandComputeRegionCommitmentAutoRenew(d.Get("auto_renew"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("auto_renew"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoRenewProp)) && (ok || !reflect.DeepEqual(v, autoRenewProp)) { + obj["autoRenew"] = autoRenewProp + } + regionProp, err := expandComputeRegionCommitmentRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/commitments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionCommitment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionCommitment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionCommitment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/commitments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionCommitment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionCommitment: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionCommitment %q: %#v", d.Id(), res) + + return resourceComputeRegionCommitmentRead(d, meta) +} + +func resourceComputeRegionCommitmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/commitments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionCommitment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionCommitment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + + if err := d.Set("commitment_id", flattenComputeRegionCommitmentCommitmentId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeRegionCommitmentCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("name", flattenComputeRegionCommitmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("description", flattenComputeRegionCommitmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("status", flattenComputeRegionCommitmentStatus(res["status"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("status_message", flattenComputeRegionCommitmentStatusMessage(res["statusMessage"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("plan", flattenComputeRegionCommitmentPlan(res["plan"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("start_timestamp", flattenComputeRegionCommitmentStartTimestamp(res["startTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("end_timestamp", flattenComputeRegionCommitmentEndTimestamp(res["endTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("resources", flattenComputeRegionCommitmentResources(res["resources"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("type", flattenComputeRegionCommitmentType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("category", flattenComputeRegionCommitmentCategory(res["category"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("license_resource", flattenComputeRegionCommitmentLicenseResource(res["licenseResource"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("auto_renew", flattenComputeRegionCommitmentAutoRenew(res["autoRenew"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("region", flattenComputeRegionCommitmentRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading RegionCommitment: %s", err) + } + + return nil +} + +func resourceComputeRegionCommitmentDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] Compute RegionCommitment resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceComputeRegionCommitmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/commitments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/commitments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRegionCommitmentCommitmentId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionCommitmentCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentStatusMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentPlan(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentStartTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentEndTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "type": flattenComputeRegionCommitmentResourcesType(original["type"], d, config), + "amount": flattenComputeRegionCommitmentResourcesAmount(original["amount"], d, config), + "accelerator_type": flattenComputeRegionCommitmentResourcesAcceleratorType(original["acceleratorType"], d, config), + }) + } + return transformed +} +func flattenComputeRegionCommitmentResourcesType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentResourcesAmount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentResourcesAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentCategory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentLicenseResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["license"] = + flattenComputeRegionCommitmentLicenseResourceLicense(original["license"], d, config) + transformed["amount"] = + flattenComputeRegionCommitmentLicenseResourceAmount(original["amount"], d, config) + transformed["cores_per_license"] = + flattenComputeRegionCommitmentLicenseResourceCoresPerLicense(original["coresPerLicense"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionCommitmentLicenseResourceLicense(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentLicenseResourceAmount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentLicenseResourceCoresPerLicense(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentAutoRenew(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionCommitmentRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeRegionCommitmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentPlan(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandComputeRegionCommitmentResourcesType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedAmount, err := expandComputeRegionCommitmentResourcesAmount(original["amount"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAmount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["amount"] = transformedAmount + } + + transformedAcceleratorType, err := expandComputeRegionCommitmentResourcesAcceleratorType(original["accelerator_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorType"] = transformedAcceleratorType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeRegionCommitmentResourcesType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentResourcesAmount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentResourcesAcceleratorType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentCategory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentLicenseResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLicense, err := expandComputeRegionCommitmentLicenseResourceLicense(original["license"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLicense); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["license"] = transformedLicense + } + + transformedAmount, err := expandComputeRegionCommitmentLicenseResourceAmount(original["amount"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAmount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["amount"] = transformedAmount + } + + transformedCoresPerLicense, err := expandComputeRegionCommitmentLicenseResourceCoresPerLicense(original["cores_per_license"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCoresPerLicense); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["coresPerLicense"] = transformedCoresPerLicense + } + + return transformed, nil +} + +func expandComputeRegionCommitmentLicenseResourceLicense(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentLicenseResourceAmount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentLicenseResourceCoresPerLicense(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentAutoRenew(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionCommitmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_disk.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_disk.go new file mode 100644 index 0000000000..5e4e0f8007 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_disk.go @@ -0,0 +1,1417 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeRegionDisk() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionDiskCreate, + Read: resourceComputeRegionDiskRead, + Update: resourceComputeRegionDiskUpdate, + Delete: resourceComputeRegionDiskDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionDiskImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("size", IsDiskShrinkage), + hyperDiskIopsUpdateDiffSupress, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "replica_zones": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `URLs of the zones where the disk should be replicated to.`, + MinItems: 2, + MaxItems: 2, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "async_primary_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Primary disk for asynchronous disk replication.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "disk_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Encrypts the disk using a customer-supplied encryption key. + +After you encrypt a disk with a customer-supplied key, you must +provide the same key if you use the disk later (e.g. to create a disk +snapshot or an image, or to attach the disk to a virtual machine). + +Customer-supplied encryption keys do not protect access to metadata of +the disk. + +If you do not provide an encryption key when creating the disk, then +the disk will be encrypted using an automatically generated key and +you do not need to provide a key to use the disk later.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the encryption key that is stored in Google Cloud KMS.`, + }, + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a 256-bit customer-supplied encryption key, encoded in +RFC 4648 base64 to either encrypt or decrypt this resource.`, + Sensitive: true, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied +encryption key that protects this resource.`, + }, + }, + }, + }, + "guest_os_features": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A list of features to enable on the guest operating system. +Applicable only for bootable disks.`, + Elem: computeRegionDiskGuestOsFeaturesSchema(), + // Default schema.HashSchema is used. + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this disk. A list of key->value pairs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "licenses": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Any applicable license URI.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "physical_block_size_bytes": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Physical block size of the persistent disk, in bytes. If not present +in a request, a default value is used. Currently supported sizes +are 4096 and 16384, other sizes may be added in the future. +If an unsupported value is requested, the error message will list +the supported values for the caller's project.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the region where the disk resides.`, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Size of the persistent disk, specified in GB. You can specify this +field when creating a persistent disk using the sourceImage or +sourceSnapshot parameter, or specify it alone to create an empty +persistent disk. + +If you specify this field along with sourceImage or sourceSnapshot, +the value of sizeGb must not be less than the size of the sourceImage +or the size of the snapshot.`, + }, + "snapshot": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The source snapshot used to create this disk. You can provide this as +a partial or full URL to the resource. For example, the following are +valid values: + +* 'https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot' +* 'projects/project/global/snapshots/snapshot' +* 'global/snapshots/snapshot' +* 'snapshot'`, + }, + "source_disk": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: sourceDiskDiffSupress, + Description: `The source disk used to create this disk. You can provide this as a partial or full URL to the resource. +For example, the following are valid values: + +* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk} +* https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk} +* projects/{project}/zones/{zone}/disks/{disk} +* projects/{project}/regions/{region}/disks/{disk} +* zones/{zone}/disks/{disk} +* regions/{region}/disks/{disk}`, + }, + "source_snapshot_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The customer-supplied encryption key of the source snapshot. Required +if the source snapshot is protected by a customer-supplied encryption +key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a 256-bit customer-supplied encryption key, encoded in +RFC 4648 base64 to either encrypt or decrypt this resource.`, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied +encryption key that protects this resource.`, + }, + }, + }, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the disk type resource describing which disk type to use to +create the disk. Provide this when creating the disk.`, + Default: "pd-standard", + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint used for optimistic locking of this resource. Used +internally during updates.`, + }, + "last_attach_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Last attach timestamp in RFC3339 text format.`, + }, + "last_detach_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Last detach timestamp in RFC3339 text format.`, + }, + "source_disk_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID value of the disk used to create this image. This value may +be used to determine whether the image was taken from the current +or a previous instance of a given disk name.`, + }, + "source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique ID of the snapshot used to create this disk. This value +identifies the exact snapshot that was used to create this persistent +disk. For example, if you created the persistent disk from a snapshot +that was later deleted and recreated under the same name, the source +snapshot ID would identify the exact version of the snapshot that was +used.`, + }, + "users": { + Type: schema.TypeList, + Computed: true, + Description: `Links to the users of the disk (attached instances) in form: +project/zones/zone/instances/instance`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func computeRegionDiskGuestOsFeaturesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC", "SEV_LIVE_MIGRATABLE", "SEV_SNP_CAPABLE", "SUSPEND_RESUME_COMPATIBLE", "TDX_CAPABLE"}), + Description: `The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. Possible values: ["MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC", "SEV_LIVE_MIGRATABLE", "SEV_SNP_CAPABLE", "SUSPEND_RESUME_COMPATIBLE", "TDX_CAPABLE"]`, + }, + }, + } +} + +func resourceComputeRegionDiskCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelFingerprintProp, err := expandComputeRegionDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + descriptionProp, err := expandComputeRegionDiskDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandComputeRegionDiskLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + nameProp, err := expandComputeRegionDiskName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + sizeGbProp, err := expandComputeRegionDiskSize(d.Get("size"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("size"); !tpgresource.IsEmptyValue(reflect.ValueOf(sizeGbProp)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) { + obj["sizeGb"] = sizeGbProp + } + physicalBlockSizeBytesProp, err := expandComputeRegionDiskPhysicalBlockSizeBytes(d.Get("physical_block_size_bytes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("physical_block_size_bytes"); !tpgresource.IsEmptyValue(reflect.ValueOf(physicalBlockSizeBytesProp)) && (ok || !reflect.DeepEqual(v, physicalBlockSizeBytesProp)) { + obj["physicalBlockSizeBytes"] = physicalBlockSizeBytesProp + } + replicaZonesProp, err := expandComputeRegionDiskReplicaZones(d.Get("replica_zones"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("replica_zones"); !tpgresource.IsEmptyValue(reflect.ValueOf(replicaZonesProp)) && (ok || !reflect.DeepEqual(v, replicaZonesProp)) { + obj["replicaZones"] = replicaZonesProp + } + typeProp, err := expandComputeRegionDiskType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + sourceDiskProp, err := expandComputeRegionDiskSourceDisk(d.Get("source_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_disk"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { + obj["sourceDisk"] = sourceDiskProp + } + asyncPrimaryDiskProp, err := expandComputeRegionDiskAsyncPrimaryDisk(d.Get("async_primary_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("async_primary_disk"); !tpgresource.IsEmptyValue(reflect.ValueOf(asyncPrimaryDiskProp)) && (ok || !reflect.DeepEqual(v, asyncPrimaryDiskProp)) { + obj["asyncPrimaryDisk"] = asyncPrimaryDiskProp + } + guestOsFeaturesProp, err := expandComputeRegionDiskGuestOsFeatures(d.Get("guest_os_features"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("guest_os_features"); !tpgresource.IsEmptyValue(reflect.ValueOf(guestOsFeaturesProp)) && (ok || !reflect.DeepEqual(v, guestOsFeaturesProp)) { + obj["guestOsFeatures"] = guestOsFeaturesProp + } + licensesProp, err := expandComputeRegionDiskLicenses(d.Get("licenses"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("licenses"); !tpgresource.IsEmptyValue(reflect.ValueOf(licensesProp)) && (ok || !reflect.DeepEqual(v, licensesProp)) { + obj["licenses"] = licensesProp + } + regionProp, err := expandComputeRegionDiskRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + diskEncryptionKeyProp, err := expandComputeRegionDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disk_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyProp)) { + obj["diskEncryptionKey"] = diskEncryptionKeyProp + } + sourceSnapshotProp, err := expandComputeRegionDiskSnapshot(d.Get("snapshot"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("snapshot"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceSnapshotProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) { + obj["sourceSnapshot"] = sourceSnapshotProp + } + sourceSnapshotEncryptionKeyProp, err := expandComputeRegionDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_snapshot_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceSnapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotEncryptionKeyProp)) { + obj["sourceSnapshotEncryptionKey"] = sourceSnapshotEncryptionKeyProp + } + + obj, err = resourceComputeRegionDiskEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionDisk: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionDisk: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionDisk: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionDisk", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionDisk: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionDisk %q: %#v", d.Id(), res) + + return resourceComputeRegionDiskRead(d, meta) +} + +func resourceComputeRegionDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionDisk: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionDisk %q", d.Id())) + } + + res, err = resourceComputeRegionDiskDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeRegionDisk because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + + if err := d.Set("label_fingerprint", flattenComputeRegionDiskLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeRegionDiskCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("description", flattenComputeRegionDiskDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("last_attach_timestamp", flattenComputeRegionDiskLastAttachTimestamp(res["lastAttachTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("last_detach_timestamp", flattenComputeRegionDiskLastDetachTimestamp(res["lastDetachTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("labels", flattenComputeRegionDiskLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("name", flattenComputeRegionDiskName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("size", flattenComputeRegionDiskSize(res["sizeGb"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("users", flattenComputeRegionDiskUsers(res["users"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("physical_block_size_bytes", flattenComputeRegionDiskPhysicalBlockSizeBytes(res["physicalBlockSizeBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("replica_zones", flattenComputeRegionDiskReplicaZones(res["replicaZones"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("type", flattenComputeRegionDiskType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("source_disk", flattenComputeRegionDiskSourceDisk(res["sourceDisk"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("source_disk_id", flattenComputeRegionDiskSourceDiskId(res["sourceDiskId"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("async_primary_disk", flattenComputeRegionDiskAsyncPrimaryDisk(res["asyncPrimaryDisk"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("guest_os_features", flattenComputeRegionDiskGuestOsFeatures(res["guestOsFeatures"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("licenses", flattenComputeRegionDiskLicenses(res["licenses"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("region", flattenComputeRegionDiskRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("disk_encryption_key", flattenComputeRegionDiskDiskEncryptionKey(res["diskEncryptionKey"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("snapshot", flattenComputeRegionDiskSnapshot(res["sourceSnapshot"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("source_snapshot_encryption_key", flattenComputeRegionDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("source_snapshot_id", flattenComputeRegionDiskSourceSnapshotId(res["sourceSnapshotId"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading RegionDisk: %s", err) + } + + return nil +} + +func resourceComputeRegionDiskUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionDisk: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("label_fingerprint") || d.HasChange("labels") { + obj := make(map[string]interface{}) + + labelFingerprintProp, err := expandComputeRegionDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + labelsProp, err := expandComputeRegionDiskLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}/setLabels") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating RegionDisk %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RegionDisk %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RegionDisk", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("size") { + obj := make(map[string]interface{}) + + sizeGbProp, err := expandComputeRegionDiskSize(d.Get("size"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("size"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) { + obj["sizeGb"] = sizeGbProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}/resize") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating RegionDisk %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RegionDisk %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RegionDisk", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeRegionDiskRead(d, meta) +} + +func resourceComputeRegionDiskDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionDisk: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + readRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id())) + } + + // if disks are attached to instances, they must be detached before the disk can be deleted + if v, ok := readRes["users"].([]interface{}); ok { + type detachArgs struct{ project, zone, instance, deviceName string } + var detachCalls []detachArgs + + for _, instance := range tpgresource.ConvertStringArr(v) { + self := d.Get("self_link").(string) + instanceProject, instanceZone, instanceName, err := tpgresource.GetLocationalResourcePropertiesFromSelfLinkString(instance) + if err != nil { + return err + } + + i, err := config.NewComputeClient(userAgent).Instances.Get(instanceProject, instanceZone, instanceName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance) + continue + } + return fmt.Errorf("Error retrieving instance %s: %s", instance, err.Error()) + } + for _, disk := range i.Disks { + if tpgresource.CompareSelfLinkOrResourceName("", disk.Source, self, nil) { + detachCalls = append(detachCalls, detachArgs{ + project: instanceProject, + zone: tpgresource.GetResourceNameFromSelfLink(i.Zone), + instance: i.Name, + deviceName: disk.DeviceName, + }) + } + } + } + + for _, call := range detachCalls { + op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do() + if err != nil { + return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project, + call.zone, call.instance, err.Error()) + } + err = ComputeOperationWaitTime(config, op, call.project, + fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance), userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { + log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance) + continue + } + return err + } + } + } + log.Printf("[DEBUG] Deleting RegionDisk %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionDisk") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RegionDisk", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RegionDisk %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRegionDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRegionDiskLabelFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskLastAttachTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskLastDetachTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionDiskUsers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeRegionDiskPhysicalBlockSizeBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionDiskReplicaZones(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeRegionDiskType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenComputeRegionDiskSourceDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskSourceDiskId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskAsyncPrimaryDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["disk"] = + flattenComputeRegionDiskAsyncPrimaryDiskDisk(original["disk"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionDiskAsyncPrimaryDiskDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskGuestOsFeatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(computeRegionDiskGuestOsFeaturesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "type": flattenComputeRegionDiskGuestOsFeaturesType(original["type"], d, config), + }) + } + return transformed +} +func flattenComputeRegionDiskGuestOsFeaturesType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskLicenses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeRegionDiskRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenComputeRegionDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeRegionDiskDiskEncryptionKeyRawKey(original["rawKey"], d, config) + transformed["sha256"] = + flattenComputeRegionDiskDiskEncryptionKeySha256(original["sha256"], d, config) + transformed["kms_key_name"] = + flattenComputeRegionDiskDiskEncryptionKeyKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionDiskDiskEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskDiskEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskDiskEncryptionKeyKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskSnapshot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionDiskSourceSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) + transformed["sha256"] = + flattenComputeRegionDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskSourceSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionDiskSourceSnapshotId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeRegionDiskLabelFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeRegionDiskName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskPhysicalBlockSizeBytes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskReplicaZones(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for replica_zones: nil") + } + f, err := tpgresource.ParseGlobalFieldValue("zones", raw.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for replica_zones: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeRegionDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("diskTypes", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for type: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionDiskSourceDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskAsyncPrimaryDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisk, err := expandComputeRegionDiskAsyncPrimaryDiskDisk(original["disk"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisk); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["disk"] = transformedDisk + } + + return transformed, nil +} + +func expandComputeRegionDiskAsyncPrimaryDiskDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskGuestOsFeatures(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandComputeRegionDiskGuestOsFeaturesType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeRegionDiskGuestOsFeaturesType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskLicenses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for licenses: nil") + } + f, err := tpgresource.ParseGlobalFieldValue("licenses", raw.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for licenses: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeRegionDiskRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionDiskDiskEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRawKey, err := expandComputeRegionDiskDiskEncryptionKeyRawKey(original["raw_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rawKey"] = transformedRawKey + } + + transformedSha256, err := expandComputeRegionDiskDiskEncryptionKeySha256(original["sha256"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256"] = transformedSha256 + } + + transformedKmsKeyName, err := expandComputeRegionDiskDiskEncryptionKeyKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandComputeRegionDiskDiskEncryptionKeyRawKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskDiskEncryptionKeySha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskDiskEncryptionKeyKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskSnapshot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for snapshot: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionDiskSourceSnapshotEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRawKey, err := expandComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rawKey"] = transformedRawKey + } + + transformedSha256, err := expandComputeRegionDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256"] = transformedSha256 + } + + return transformed, nil +} + +func expandComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionDiskSourceSnapshotEncryptionKeySha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeRegionDiskEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + if v, ok := d.GetOk("type"); ok { + log.Printf("[DEBUG] Loading disk type: %s", v.(string)) + diskType, err := readRegionDiskType(config, d, v.(string)) + if err != nil { + return nil, fmt.Errorf( + "Error loading disk type '%s': %s", + v.(string), err) + } + + obj["type"] = diskType.RelativeLink() + } + + if v, ok := d.GetOk("image"); ok { + log.Printf("[DEBUG] Resolving image name: %s", v.(string)) + imageUrl, err := ResolveImage(config, project, v.(string), userAgent) + if err != nil { + return nil, fmt.Errorf( + "Error resolving image name '%s': %s", + v.(string), err) + } + + obj["sourceImage"] = imageUrl + log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) + } + + return obj, nil +} + +func resourceComputeRegionDiskDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v, ok := res["diskEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["rawKey"] = d.Get("disk_encryption_key.0.raw_key") + transformed["rsaEncryptedKey"] = d.Get("disk_encryption_key.0.rsa_encrypted_key") + transformed["sha256"] = original["sha256"] + + if kmsKeyName, ok := original["kmsKeyName"]; ok { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] + } + + if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { + transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount + } + + res["diskEncryptionKey"] = transformed + } + + if v, ok := res["sourceImageEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["rawKey"] = d.Get("source_image_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + + if kmsKeyName, ok := original["kmsKeyName"]; ok { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] + } + + if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { + transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount + } + + res["sourceImageEncryptionKey"] = transformed + } + + if v, ok := res["sourceSnapshotEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["rawKey"] = d.Get("source_snapshot_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + + if kmsKeyName, ok := original["kmsKeyName"]; ok { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] + } + + if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { + transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount + } + + res["sourceSnapshotEncryptionKey"] = transformed + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_disk_resource_policy_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_disk_resource_policy_attachment.go new file mode 100644 index 0000000000..8a2fa6420c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_disk_resource_policy_attachment.go @@ -0,0 +1,407 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeRegionDiskResourcePolicyAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionDiskResourcePolicyAttachmentCreate, + Read: resourceComputeRegionDiskResourcePolicyAttachmentRead, + Delete: resourceComputeRegionDiskResourcePolicyAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionDiskResourcePolicyAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the regional disk in which the resource policies are attached to.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource policy to be attached to the disk for scheduling snapshot +creation. Do not specify the self link.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the region where the disk resides.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionDiskResourcePolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeRegionDiskResourcePolicyAttachmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + obj, err = resourceComputeRegionDiskResourcePolicyAttachmentEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}/addResourcePolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionDiskResourcePolicyAttachment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionDiskResourcePolicyAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{region}}/{{disk}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionDiskResourcePolicyAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionDiskResourcePolicyAttachment: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionDiskResourcePolicyAttachment %q: %#v", d.Id(), res) + + return resourceComputeRegionDiskResourcePolicyAttachmentRead(d, meta) +} + +func resourceComputeRegionDiskResourcePolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionDiskResourcePolicyAttachment %q", d.Id())) + } + + res, err = flattenNestedComputeRegionDiskResourcePolicyAttachment(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeRegionDiskResourcePolicyAttachment because it couldn't be matched.") + d.SetId("") + return nil + } + + res, err = resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeRegionDiskResourcePolicyAttachment because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionDiskResourcePolicyAttachment: %s", err) + } + + if err := d.Set("name", flattenNestedComputeRegionDiskResourcePolicyAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionDiskResourcePolicyAttachment: %s", err) + } + + return nil +} + +func resourceComputeRegionDiskResourcePolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}/removeResourcePolicies") + if err != nil { + return err + } + + var obj map[string]interface{} + obj = make(map[string]interface{}) + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + if region == "" { + return fmt.Errorf("region must be non-empty - set in resource or at provider-level") + } + + name, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(name)) && (ok || !reflect.DeepEqual(v, name)) { + obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)} + } + log.Printf("[DEBUG] Deleting RegionDiskResourcePolicyAttachment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionDiskResourcePolicyAttachment") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RegionDiskResourcePolicyAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RegionDiskResourcePolicyAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRegionDiskResourcePolicyAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{region}}/{{disk}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeRegionDiskResourcePolicyAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeRegionDiskResourcePolicyAttachmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeRegionDiskResourcePolicyAttachmentEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return nil, err + } + if region == "" { + return nil, fmt.Errorf("region must be non-empty - set in resource or at provider-level") + } + + obj["resourcePolicies"] = []interface{}{fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, obj["name"])} + delete(obj, "name") + return obj, nil +} + +func flattenNestedComputeRegionDiskResourcePolicyAttachment(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["resourcePolicies"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value resourcePolicies. Actual value: %v", v) + } + + _, item, err := resourceComputeRegionDiskResourcePolicyAttachmentFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeRegionDiskResourcePolicyAttachmentFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputeRegionDiskResourcePolicyAttachmentName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeRegionDiskResourcePolicyAttachmentName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + // List response only contains the ID - construct a response object. + item := map[string]interface{}{ + "name": itemRaw, + } + + // Decode list item before comparing. + item, err := resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d, meta, item) + if err != nil { + return -1, nil, err + } + + itemName := flattenNestedComputeRegionDiskResourcePolicyAttachmentName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} +func resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + res["name"] = tpgresource.GetResourceNameFromSelfLink(res["name"].(string)) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_health_check.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_health_check.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_health_check.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_health_check.go index 5108cf6391..a41b7eb4a4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_health_check.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_health_check.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "fmt" @@ -20,7 +23,12 @@ import ( "reflect" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeRegionHealthCheck() *schema.Resource { @@ -40,7 +48,9 @@ func ResourceComputeRegionHealthCheck() *schema.Resource { Delete: schema.DefaultTimeout(20 * time.Minute), }, - CustomizeDiff: healthCheckCustomizeDiff, + CustomizeDiff: customdiff.All( + healthCheckCustomizeDiff, + ), Schema: map[string]*schema.Schema{ "name": { @@ -106,7 +116,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -167,7 +177,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -187,7 +197,7 @@ If not specified, HTTP2 health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -246,7 +256,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -266,7 +276,7 @@ If not specified, HTTP health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -325,7 +335,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -345,7 +355,7 @@ If not specified, HTTPS health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -394,7 +404,7 @@ which means no health check logging will be done.`, Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Region in which the created health check should reside. If it is not provided, the provider region is used.`, }, @@ -423,7 +433,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -443,7 +453,7 @@ If not specified, SSL health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -495,7 +505,7 @@ port_name are defined, port takes precedence.`, "port_specification": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}), Description: `Specifies how port is selected for health checking, can be one of the following values: @@ -515,7 +525,7 @@ If not specified, TCP health check follows behavior specified in 'port' and "proxy_header": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NONE", "PROXY_V1", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), Description: `Specifies the type of proxy header to append before sending data to the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, Default: "NONE", @@ -583,8 +593,8 @@ consecutive failures. The default value is 2.`, } func resourceComputeRegionHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -593,7 +603,7 @@ func resourceComputeRegionHealthCheckCreate(d *schema.ResourceData, meta interfa checkIntervalSecProp, err := expandComputeRegionHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(reflect.ValueOf(checkIntervalSecProp)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { + } else if v, ok := d.GetOkExists("check_interval_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(checkIntervalSecProp)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { obj["checkIntervalSec"] = checkIntervalSecProp } descriptionProp, err := expandComputeRegionHealthCheckDescription(d.Get("description"), d, config) @@ -605,73 +615,73 @@ func resourceComputeRegionHealthCheckCreate(d *schema.ResourceData, meta interfa healthyThresholdProp, err := expandComputeRegionHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(reflect.ValueOf(healthyThresholdProp)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { + } else if v, ok := d.GetOkExists("healthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(healthyThresholdProp)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { obj["healthyThreshold"] = healthyThresholdProp } nameProp, err := expandComputeRegionHealthCheckName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } unhealthyThresholdProp, err := expandComputeRegionHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(reflect.ValueOf(unhealthyThresholdProp)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { + } else if v, ok := d.GetOkExists("unhealthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(unhealthyThresholdProp)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { obj["unhealthyThreshold"] = unhealthyThresholdProp } timeoutSecProp, err := expandComputeRegionHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutSecProp)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } httpHealthCheckProp, err := expandComputeRegionHealthCheckHttpHealthCheck(d.Get("http_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("http_health_check"); !isEmptyValue(reflect.ValueOf(httpHealthCheckProp)) && (ok || !reflect.DeepEqual(v, httpHealthCheckProp)) { + } else if v, ok := d.GetOkExists("http_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpHealthCheckProp)) && (ok || !reflect.DeepEqual(v, httpHealthCheckProp)) { obj["httpHealthCheck"] = httpHealthCheckProp } httpsHealthCheckProp, err := expandComputeRegionHealthCheckHttpsHealthCheck(d.Get("https_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("https_health_check"); !isEmptyValue(reflect.ValueOf(httpsHealthCheckProp)) && (ok || !reflect.DeepEqual(v, httpsHealthCheckProp)) { + } else if v, ok := d.GetOkExists("https_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpsHealthCheckProp)) && (ok || !reflect.DeepEqual(v, httpsHealthCheckProp)) { obj["httpsHealthCheck"] = httpsHealthCheckProp } tcpHealthCheckProp, err := expandComputeRegionHealthCheckTcpHealthCheck(d.Get("tcp_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_health_check"); !isEmptyValue(reflect.ValueOf(tcpHealthCheckProp)) && (ok || !reflect.DeepEqual(v, tcpHealthCheckProp)) { + } else if v, ok := d.GetOkExists("tcp_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(tcpHealthCheckProp)) && (ok || !reflect.DeepEqual(v, tcpHealthCheckProp)) { obj["tcpHealthCheck"] = tcpHealthCheckProp } sslHealthCheckProp, err := expandComputeRegionHealthCheckSslHealthCheck(d.Get("ssl_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("ssl_health_check"); !isEmptyValue(reflect.ValueOf(sslHealthCheckProp)) && (ok || !reflect.DeepEqual(v, sslHealthCheckProp)) { + } else if v, ok := d.GetOkExists("ssl_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslHealthCheckProp)) && (ok || !reflect.DeepEqual(v, sslHealthCheckProp)) { obj["sslHealthCheck"] = sslHealthCheckProp } http2HealthCheckProp, err := expandComputeRegionHealthCheckHttp2HealthCheck(d.Get("http2_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("http2_health_check"); !isEmptyValue(reflect.ValueOf(http2HealthCheckProp)) && (ok || !reflect.DeepEqual(v, http2HealthCheckProp)) { + } else if v, ok := d.GetOkExists("http2_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(http2HealthCheckProp)) && (ok || !reflect.DeepEqual(v, http2HealthCheckProp)) { obj["http2HealthCheck"] = http2HealthCheckProp } grpcHealthCheckProp, err := expandComputeRegionHealthCheckGrpcHealthCheck(d.Get("grpc_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("grpc_health_check"); !isEmptyValue(reflect.ValueOf(grpcHealthCheckProp)) && (ok || !reflect.DeepEqual(v, grpcHealthCheckProp)) { + } else if v, ok := d.GetOkExists("grpc_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(grpcHealthCheckProp)) && (ok || !reflect.DeepEqual(v, grpcHealthCheckProp)) { obj["grpcHealthCheck"] = grpcHealthCheckProp } logConfigProp, err := expandComputeRegionHealthCheckLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } regionProp, err := expandComputeRegionHealthCheckRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } @@ -680,7 +690,7 @@ func resourceComputeRegionHealthCheckCreate(d *schema.ResourceData, meta interfa return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks") if err != nil { return err } @@ -688,24 +698,32 @@ func resourceComputeRegionHealthCheckCreate(d *schema.ResourceData, meta interfa log.Printf("[DEBUG] Creating new RegionHealthCheck: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionHealthCheck: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating RegionHealthCheck: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -727,33 +745,39 @@ func resourceComputeRegionHealthCheckCreate(d *schema.ResourceData, meta interfa } func resourceComputeRegionHealthCheckRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionHealthCheck: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionHealthCheck %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionHealthCheck %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -808,7 +832,7 @@ func resourceComputeRegionHealthCheckRead(d *schema.ResourceData, meta interface if err := d.Set("region", flattenComputeRegionHealthCheckRegion(res["region"], d, config)); err != nil { return fmt.Errorf("Error reading RegionHealthCheck: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading RegionHealthCheck: %s", err) } @@ -816,15 +840,15 @@ func resourceComputeRegionHealthCheckRead(d *schema.ResourceData, meta interface } func resourceComputeRegionHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionHealthCheck: %s", err) } @@ -834,7 +858,7 @@ func resourceComputeRegionHealthCheckUpdate(d *schema.ResourceData, meta interfa checkIntervalSecProp, err := expandComputeRegionHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { + } else if v, ok := d.GetOkExists("check_interval_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, checkIntervalSecProp)) { obj["checkIntervalSec"] = checkIntervalSecProp } descriptionProp, err := expandComputeRegionHealthCheckDescription(d.Get("description"), d, config) @@ -846,73 +870,73 @@ func resourceComputeRegionHealthCheckUpdate(d *schema.ResourceData, meta interfa healthyThresholdProp, err := expandComputeRegionHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { + } else if v, ok := d.GetOkExists("healthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, healthyThresholdProp)) { obj["healthyThreshold"] = healthyThresholdProp } nameProp, err := expandComputeRegionHealthCheckName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } unhealthyThresholdProp, err := expandComputeRegionHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { + } else if v, ok := d.GetOkExists("unhealthy_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unhealthyThresholdProp)) { obj["unhealthyThreshold"] = unhealthyThresholdProp } timeoutSecProp, err := expandComputeRegionHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { + } else if v, ok := d.GetOkExists("timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutSecProp)) { obj["timeoutSec"] = timeoutSecProp } httpHealthCheckProp, err := expandComputeRegionHealthCheckHttpHealthCheck(d.Get("http_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("http_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpHealthCheckProp)) { + } else if v, ok := d.GetOkExists("http_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpHealthCheckProp)) { obj["httpHealthCheck"] = httpHealthCheckProp } httpsHealthCheckProp, err := expandComputeRegionHealthCheckHttpsHealthCheck(d.Get("https_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("https_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpsHealthCheckProp)) { + } else if v, ok := d.GetOkExists("https_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpsHealthCheckProp)) { obj["httpsHealthCheck"] = httpsHealthCheckProp } tcpHealthCheckProp, err := expandComputeRegionHealthCheckTcpHealthCheck(d.Get("tcp_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpHealthCheckProp)) { + } else if v, ok := d.GetOkExists("tcp_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpHealthCheckProp)) { obj["tcpHealthCheck"] = tcpHealthCheckProp } sslHealthCheckProp, err := expandComputeRegionHealthCheckSslHealthCheck(d.Get("ssl_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("ssl_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslHealthCheckProp)) { + } else if v, ok := d.GetOkExists("ssl_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslHealthCheckProp)) { obj["sslHealthCheck"] = sslHealthCheckProp } http2HealthCheckProp, err := expandComputeRegionHealthCheckHttp2HealthCheck(d.Get("http2_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("http2_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, http2HealthCheckProp)) { + } else if v, ok := d.GetOkExists("http2_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, http2HealthCheckProp)) { obj["http2HealthCheck"] = http2HealthCheckProp } grpcHealthCheckProp, err := expandComputeRegionHealthCheckGrpcHealthCheck(d.Get("grpc_health_check"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("grpc_health_check"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, grpcHealthCheckProp)) { + } else if v, ok := d.GetOkExists("grpc_health_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, grpcHealthCheckProp)) { obj["grpcHealthCheck"] = grpcHealthCheckProp } logConfigProp, err := expandComputeRegionHealthCheckLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } regionProp, err := expandComputeRegionHealthCheckRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } @@ -921,7 +945,7 @@ func resourceComputeRegionHealthCheckUpdate(d *schema.ResourceData, meta interfa return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") if err != nil { return err } @@ -929,11 +953,19 @@ func resourceComputeRegionHealthCheckUpdate(d *schema.ResourceData, meta interfa log.Printf("[DEBUG] Updating RegionHealthCheck %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating RegionHealthCheck %q: %s", d.Id(), err) @@ -953,21 +985,21 @@ func resourceComputeRegionHealthCheckUpdate(d *schema.ResourceData, meta interfa } func resourceComputeRegionHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionHealthCheck: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") if err != nil { return err } @@ -976,13 +1008,21 @@ func resourceComputeRegionHealthCheckDelete(d *schema.ResourceData, meta interfa log.Printf("[DEBUG] Deleting RegionHealthCheck %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "RegionHealthCheck") + return transport_tpg.HandleNotFoundError(err, d, "RegionHealthCheck") } err = ComputeOperationWaitTime( @@ -998,8 +1038,8 @@ func resourceComputeRegionHealthCheckDelete(d *schema.ResourceData, meta interfa } func resourceComputeRegionHealthCheckImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/healthChecks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -1009,7 +1049,7 @@ func resourceComputeRegionHealthCheckImport(d *schema.ResourceData, meta interfa } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1018,10 +1058,10 @@ func resourceComputeRegionHealthCheckImport(d *schema.ResourceData, meta interfa return []*schema.ResourceData{d}, nil } -func flattenComputeRegionHealthCheckCheckIntervalSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckCheckIntervalSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1035,18 +1075,18 @@ func flattenComputeRegionHealthCheckCheckIntervalSec(v interface{}, d *schema.Re return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHealthyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1060,14 +1100,14 @@ func flattenComputeRegionHealthCheckHealthyThreshold(v interface{}, d *schema.Re return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckUnhealthyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1081,10 +1121,10 @@ func flattenComputeRegionHealthCheckUnhealthyThreshold(v interface{}, d *schema. return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1098,11 +1138,11 @@ func flattenComputeRegionHealthCheckTimeoutSec(v interface{}, d *schema.Resource return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1127,22 +1167,22 @@ func flattenComputeRegionHealthCheckHttpHealthCheck(v interface{}, d *schema.Res flattenComputeRegionHealthCheckHttpHealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeRegionHealthCheckHttpHealthCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpHealthCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1156,19 +1196,19 @@ func flattenComputeRegionHealthCheckHttpHealthCheckPort(v interface{}, d *schema return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckHttpHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpsHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpsHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1193,22 +1233,22 @@ func flattenComputeRegionHealthCheckHttpsHealthCheck(v interface{}, d *schema.Re flattenComputeRegionHealthCheckHttpsHealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeRegionHealthCheckHttpsHealthCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpsHealthCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpsHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpsHealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpsHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpsHealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpsHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpsHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1222,19 +1262,19 @@ func flattenComputeRegionHealthCheckHttpsHealthCheckPort(v interface{}, d *schem return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckHttpsHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpsHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckTcpHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckTcpHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1257,18 +1297,18 @@ func flattenComputeRegionHealthCheckTcpHealthCheck(v interface{}, d *schema.Reso flattenComputeRegionHealthCheckTcpHealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeRegionHealthCheckTcpHealthCheckRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckTcpHealthCheckRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckTcpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckTcpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckTcpHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckTcpHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1282,19 +1322,19 @@ func flattenComputeRegionHealthCheckTcpHealthCheckPort(v interface{}, d *schema. return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckTcpHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckTcpHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckTcpHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckTcpHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckTcpHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckTcpHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckSslHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckSslHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1317,18 +1357,18 @@ func flattenComputeRegionHealthCheckSslHealthCheck(v interface{}, d *schema.Reso flattenComputeRegionHealthCheckSslHealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeRegionHealthCheckSslHealthCheckRequest(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckSslHealthCheckRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckSslHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckSslHealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckSslHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckSslHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1342,19 +1382,19 @@ func flattenComputeRegionHealthCheckSslHealthCheckPort(v interface{}, d *schema. return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckSslHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckSslHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckSslHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckSslHealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckSslHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckSslHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttp2HealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttp2HealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1379,22 +1419,22 @@ func flattenComputeRegionHealthCheckHttp2HealthCheck(v interface{}, d *schema.Re flattenComputeRegionHealthCheckHttp2HealthCheckPortSpecification(original["portSpecification"], d, config) return []interface{}{transformed} } -func flattenComputeRegionHealthCheckHttp2HealthCheckHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttp2HealthCheckHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttp2HealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttp2HealthCheckRequestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttp2HealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttp2HealthCheckResponse(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttp2HealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttp2HealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1408,19 +1448,19 @@ func flattenComputeRegionHealthCheckHttp2HealthCheckPort(v interface{}, d *schem return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckHttp2HealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttp2HealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckGrpcHealthCheck(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckGrpcHealthCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1439,10 +1479,10 @@ func flattenComputeRegionHealthCheckGrpcHealthCheck(v interface{}, d *schema.Res flattenComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(original["grpcServiceName"], d, config) return []interface{}{transformed} } -func flattenComputeRegionHealthCheckGrpcHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckGrpcHealthCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1456,19 +1496,19 @@ func flattenComputeRegionHealthCheckGrpcHealthCheckPort(v interface{}, d *schema return v // let terraform core handle it otherwise } -func flattenComputeRegionHealthCheckGrpcHealthCheckPortName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckGrpcHealthCheckPortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionHealthCheckLogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { transformed := make(map[string]interface{}) if v == nil { // Disabled by default, but API will not return object if value is false @@ -1481,38 +1521,38 @@ func flattenComputeRegionHealthCheckLogConfig(v interface{}, d *schema.ResourceD return []interface{}{transformed} } -func flattenComputeRegionHealthCheckRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionHealthCheckRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func expandComputeRegionHealthCheckCheckIntervalSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckCheckIntervalSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckUnhealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckUnhealthyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1524,84 +1564,84 @@ func expandComputeRegionHealthCheckHttpHealthCheck(v interface{}, d TerraformRes transformedHost, err := expandComputeRegionHealthCheckHttpHealthCheckHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedRequestPath, err := expandComputeRegionHealthCheckHttpHealthCheckRequestPath(original["request_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestPath"] = transformedRequestPath } transformedResponse, err := expandComputeRegionHealthCheckHttpHealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeRegionHealthCheckHttpHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeRegionHealthCheckHttpHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeRegionHealthCheckHttpHealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeRegionHealthCheckHttpHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeRegionHealthCheckHttpHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpHealthCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpHealthCheckRequestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpHealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpHealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpsHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpsHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1613,84 +1653,84 @@ func expandComputeRegionHealthCheckHttpsHealthCheck(v interface{}, d TerraformRe transformedHost, err := expandComputeRegionHealthCheckHttpsHealthCheckHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedRequestPath, err := expandComputeRegionHealthCheckHttpsHealthCheckRequestPath(original["request_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestPath"] = transformedRequestPath } transformedResponse, err := expandComputeRegionHealthCheckHttpsHealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeRegionHealthCheckHttpsHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeRegionHealthCheckHttpsHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeRegionHealthCheckHttpsHealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeRegionHealthCheckHttpsHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeRegionHealthCheckHttpsHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpsHealthCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpsHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpsHealthCheckRequestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpsHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpsHealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpsHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpsHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpsHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpsHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckTcpHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckTcpHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1702,73 +1742,73 @@ func expandComputeRegionHealthCheckTcpHealthCheck(v interface{}, d TerraformReso transformedRequest, err := expandComputeRegionHealthCheckTcpHealthCheckRequest(original["request"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequest); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["request"] = transformedRequest } transformedResponse, err := expandComputeRegionHealthCheckTcpHealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeRegionHealthCheckTcpHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeRegionHealthCheckTcpHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeRegionHealthCheckTcpHealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeRegionHealthCheckTcpHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeRegionHealthCheckTcpHealthCheckRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckTcpHealthCheckRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckTcpHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckTcpHealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckTcpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckTcpHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckTcpHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckTcpHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckTcpHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckTcpHealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckTcpHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckTcpHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckSslHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckSslHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1780,73 +1820,73 @@ func expandComputeRegionHealthCheckSslHealthCheck(v interface{}, d TerraformReso transformedRequest, err := expandComputeRegionHealthCheckSslHealthCheckRequest(original["request"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequest); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["request"] = transformedRequest } transformedResponse, err := expandComputeRegionHealthCheckSslHealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeRegionHealthCheckSslHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeRegionHealthCheckSslHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeRegionHealthCheckSslHealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeRegionHealthCheckSslHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeRegionHealthCheckSslHealthCheckRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckSslHealthCheckRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckSslHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckSslHealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckSslHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckSslHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckSslHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckSslHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckSslHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckSslHealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckSslHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckSslHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttp2HealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttp2HealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1858,84 +1898,84 @@ func expandComputeRegionHealthCheckHttp2HealthCheck(v interface{}, d TerraformRe transformedHost, err := expandComputeRegionHealthCheckHttp2HealthCheckHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedRequestPath, err := expandComputeRegionHealthCheckHttp2HealthCheckRequestPath(original["request_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestPath"] = transformedRequestPath } transformedResponse, err := expandComputeRegionHealthCheckHttp2HealthCheckResponse(original["response"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["response"] = transformedResponse } transformedPort, err := expandComputeRegionHealthCheckHttp2HealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeRegionHealthCheckHttp2HealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedProxyHeader, err := expandComputeRegionHealthCheckHttp2HealthCheckProxyHeader(original["proxy_header"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyHeader"] = transformedProxyHeader } transformedPortSpecification, err := expandComputeRegionHealthCheckHttp2HealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } return transformed, nil } -func expandComputeRegionHealthCheckHttp2HealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttp2HealthCheckHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttp2HealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttp2HealthCheckRequestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttp2HealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttp2HealthCheckResponse(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttp2HealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttp2HealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttp2HealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttp2HealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckGrpcHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckGrpcHealthCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1947,51 +1987,51 @@ func expandComputeRegionHealthCheckGrpcHealthCheck(v interface{}, d TerraformRes transformedPort, err := expandComputeRegionHealthCheckGrpcHealthCheckPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPortName, err := expandComputeRegionHealthCheckGrpcHealthCheckPortName(original["port_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portName"] = transformedPortName } transformedPortSpecification, err := expandComputeRegionHealthCheckGrpcHealthCheckPortSpecification(original["port_specification"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPortSpecification); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["portSpecification"] = transformedPortSpecification } transformedGrpcServiceName, err := expandComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(original["grpc_service_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGrpcServiceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGrpcServiceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["grpcServiceName"] = transformedGrpcServiceName } return transformed, nil } -func expandComputeRegionHealthCheckGrpcHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckGrpcHealthCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckGrpcHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckGrpcHealthCheckPortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckLogConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2003,19 +2043,19 @@ func expandComputeRegionHealthCheckLogConfig(v interface{}, d TerraformResourceD transformedEnable, err := expandComputeRegionHealthCheckLogConfigEnable(original["enable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enable"] = transformedEnable } return transformed, nil } -func expandComputeRegionHealthCheckLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionHealthCheckLogConfigEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionHealthCheckRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) +func expandComputeRegionHealthCheckRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for region: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_health_check_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_health_check_sweeper.go new file mode 100644 index 0000000000..352f78729b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_health_check_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionHealthCheck", testSweepComputeRegionHealthCheck) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionHealthCheck(region string) error { + resourceName := "ComputeRegionHealthCheck" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/healthChecks", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/healthChecks/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_instance_group_manager.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_instance_group_manager.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager.go index 757bf640fd..058b538729 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_instance_group_manager.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager.go @@ -1,8 +1,11 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" + "regexp" "strings" "time" @@ -10,6 +13,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/compute/v1" ) @@ -51,7 +57,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { "instance_template": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: compareSelfLinkRelativePathsIgnoreParams, Description: `The full URL to an instance template from which all new instances of this version will be created.`, }, @@ -156,7 +162,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Set: selfLinkRelativePathHash, + Set: tpgresource.SelfLinkRelativePathHash, Description: `The full URL of all target pools to which new instances in the group are added. Updating the target pools attribute does not affect existing instances.`, }, "target_size": { @@ -202,7 +208,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { "health_check": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, Description: `The health check resource that signals autohealing.`, }, @@ -225,7 +231,7 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Set: hashZoneFromSelfLinkOrResourceName, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, }, @@ -302,14 +308,14 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{"PROACTIVE", "NONE", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("PROACTIVE"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("PROACTIVE"), Description: `The instance redistribution policy for regional managed instance groups. Valid values are: "PROACTIVE", "NONE". If PROACTIVE (default), the group attempts to maintain an even distribution of VM instances across zones in the region. If NONE, proactive redistribution is disabled.`, }, "replacement_method": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{"RECREATE", "SUBSTITUTE", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("SUBSTITUTE"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("SUBSTITUTE"), Description: `The instance replacement method for regional managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set max_unavailable_fixed or max_unavailable_percent to be greater than 0.`, }, }, @@ -400,18 +406,18 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { } func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -423,7 +429,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met TargetSize: int64(d.Get("target_size").(int)), ListManagedInstancesResults: d.Get("list_managed_instances_results").(string), NamedPorts: getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()), - TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)), + TargetPools: tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)), AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), @@ -439,7 +445,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met return fmt.Errorf("Error creating RegionInstanceGroupManager: %s", err) } - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -479,19 +485,19 @@ func computeRIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) type getInstanceManagerFunc func(*schema.ResourceData, interface{}) (*compute.InstanceGroupManager, error) func getRegionalManager(d *schema.ResourceData, meta interface{}) (*compute.InstanceGroupManager, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return nil, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -499,7 +505,7 @@ func getRegionalManager(d *schema.ResourceData, meta interface{}) (*compute.Inst name := d.Get("name").(string) manager, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Get(project, region, name).Do() if err != nil { - return nil, handleNotFoundError(err, d, fmt.Sprintf("Region Instance Manager %q", name)) + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Region Instance Manager %q", name)) } return manager, nil @@ -512,6 +518,12 @@ func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, log.Printf("[WARNING] Error in fetching manager while waiting for instances to come up: %s\n", err) return nil, "error", err } + if m == nil { + // getManager/getRegional manager call handleNotFoundError, which will return a nil error and nil object in the case + // that the original error was a 404. if m == nil here, we will assume that it was not found return an "instance manager not found" + // error so that we can parse it later on and handle it there + return nil, "error", fmt.Errorf("instance manager not found") + } if m.Status.IsStable { if waitForUpdates { // waitForUpdates waits for versions to be reached and per instance configs to be updated (if present) @@ -535,7 +547,7 @@ func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, } func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) manager, err := getRegionalManager(d, meta) if err != nil { @@ -547,7 +559,7 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta return nil } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -558,7 +570,7 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err := d.Set("name", manager.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } - if err := d.Set("region", GetResourceNameFromSelfLink(manager.Region)); err != nil { + if err := d.Set("region", tpgresource.GetResourceNameFromSelfLink(manager.Region)); err != nil { return fmt.Errorf("Error setting region: %s", err) } if err := d.Set("description", manager.Description); err != nil { @@ -573,7 +585,7 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err := d.Set("list_managed_instances_results", manager.ListManagedInstancesResults); err != nil { return fmt.Errorf("Error setting list_managed_instances_results: %s", err) } - if err := d.Set("target_pools", mapStringArr(manager.TargetPools, ConvertSelfLinkToV1)); err != nil { + if err := d.Set("target_pools", tpgresource.MapStringArr(manager.TargetPools, tpgresource.ConvertSelfLinkToV1)); err != nil { return fmt.Errorf("Error setting target_pools in state: %s", err.Error()) } if err := d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)); err != nil { @@ -582,7 +594,7 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err := d.Set("fingerprint", manager.Fingerprint); err != nil { return fmt.Errorf("Error setting fingerprint: %s", err) } - if err := d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup)); err != nil { + if err := d.Set("instance_group", tpgresource.ConvertSelfLinkToV1(manager.InstanceGroup)); err != nil { return fmt.Errorf("Error setting instance_group: %s", err) } if err := d.Set("distribution_policy_zones", flattenDistributionPolicy(manager.DistributionPolicy)); err != nil { @@ -591,7 +603,7 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err := d.Set("distribution_policy_target_shape", manager.DistributionPolicy.TargetShape); err != nil { return err } - if err := d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(manager.SelfLink)); err != nil { return fmt.Errorf("Error setting self_link: %s", err) } @@ -621,19 +633,19 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta } func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -644,7 +656,7 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met var change bool if d.HasChange("target_pools") { - updatedManager.TargetPools = convertStringSet(d.Get("target_pools").(*schema.Set)) + updatedManager.TargetPools = tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)) updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetPools") change = true } @@ -739,26 +751,34 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met } func resourceComputeRegionInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) if d.Get("wait_for_instances").(bool) { err := computeRIGMWaitForInstanceStatus(d, meta) if err != nil { + notFound, reErr := regexp.MatchString(`not found`, err.Error()) + if reErr != nil { + return reErr + } + if notFound { + // manager was not found, we can exit gracefully + return nil + } return err } } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -885,7 +905,7 @@ func flattenDistributionPolicy(distributionPolicy *compute.DistributionPolicy) [ if distributionPolicy != nil { for _, zone := range distributionPolicy.Zones { - zones = append(zones, GetResourceNameFromSelfLink(zone.Zone)) + zones = append(zones, tpgresource.GetResourceNameFromSelfLink(zone.Zone)) } } @@ -896,7 +916,7 @@ func hashZoneFromSelfLinkOrResourceName(value interface{}) int { parts := strings.Split(value.(string), "/") resource := parts[len(parts)-1] - return hashcode(resource) + return tpgresource.Hashcode(resource) } func resourceRegionInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { @@ -906,13 +926,13 @@ func resourceRegionInstanceGroupManagerStateImporter(d *schema.ResourceData, met if err := d.Set("wait_for_instances_status", "STABLE"); err != nil { return nil, fmt.Errorf("Error setting wait_for_instances_status: %s", err) } - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager_sweeper.go new file mode 100644 index 0000000000..282dc999c4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_group_manager_sweeper.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionInstanceGroupManager", testSweepComputeRegionInstanceGroupManager) +} + +// At the time of writing, the CI only passes us-central1 as the region. +// Since we can read all instances across zones, we don't really use this param. +func testSweepComputeRegionInstanceGroupManager(region string) error { + resourceName := "ComputeRegionInstanceGroupManager" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + found, err := config.NewComputeClient(config.UserAgent).RegionInstanceGroupManagers.List(config.Project, region).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request: %s", err) + return nil + } + + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, rigm := range found.Items { + if !sweeper.IsSweepableTestResource(rigm.Name) { + nonPrefixCount++ + continue + } + + // Don't wait on operations as we may have a lot to delete + _, err := config.NewComputeClient(config.UserAgent).RegionInstanceGroupManagers.Delete(config.Project, region, rigm.Name).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting %s resource %s : %s", resourceName, rigm.Name, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, rigm.Name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_template.go new file mode 100644 index 0000000000..8db869f065 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_instance_template.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_endpoint_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_endpoint_group.go new file mode 100644 index 0000000000..73d12c8df8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_endpoint_group.go @@ -0,0 +1,801 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeRegionNetworkEndpointGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionNetworkEndpointGroupCreate, + Read: resourceComputeRegionNetworkEndpointGroupRead, + Delete: resourceComputeRegionNetworkEndpointGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionNetworkEndpointGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource; provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the region where the Serverless NEGs Reside.`, + }, + "app_engine": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Only valid when networkEndpointType is "SERVERLESS". +Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional serving service. +The service name must be 1-63 characters long, and comply with RFC1035. +Example value: "default", "my-service".`, + }, + "url_mask": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A template to parse service and version fields from a request URL. +URL mask allows for routing to multiple App Engine services without +having to create multiple Network Endpoint Groups and backend services. + +For example, the request URLs "foo1-dot-appname.appspot.com/v1" and +"foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with +URL mask "-dot-appname.appspot.com/". The URL mask will parse +them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively.`, + }, + "version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional serving version. +The version must be 1-63 characters long, and comply with RFC1035. +Example value: "v1", "v2".`, + }, + }, + }, + ConflictsWith: []string{"cloud_run", "cloud_function"}, + }, + "cloud_function": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Only valid when networkEndpointType is "SERVERLESS". +Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "function": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A user-defined name of the Cloud Function. +The function name is case-sensitive and must be 1-63 characters long. +Example value: "func1".`, + AtLeastOneOf: []string{"cloud_function.0.function", "cloud_function.0.url_mask"}, + }, + "url_mask": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A template to parse function field from a request URL. URL mask allows +for routing to multiple Cloud Functions without having to create +multiple Network Endpoint Groups and backend services. + +For example, request URLs "mydomain.com/function1" and "mydomain.com/function2" +can be backed by the same Serverless NEG with URL mask "/". The URL mask +will parse them to { function = "function1" } and { function = "function2" } respectively.`, + AtLeastOneOf: []string{"cloud_function.0.function", "cloud_function.0.url_mask"}, + }, + }, + }, + ConflictsWith: []string{"cloud_run", "app_engine"}, + }, + "cloud_run": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Only valid when networkEndpointType is "SERVERLESS". +Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Cloud Run service is the main resource of Cloud Run. +The service must be 1-63 characters long, and comply with RFC1035. +Example value: "run-service".`, + AtLeastOneOf: []string{"cloud_run.0.service", "cloud_run.0.url_mask"}, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Cloud Run tag represents the "named-revision" to provide +additional fine-grained traffic routing information. +The tag must be 1-63 characters long, and comply with RFC1035. +Example value: "revision-0010".`, + }, + "url_mask": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A template to parse service and tag fields from a request URL. +URL mask allows for routing to multiple Run services without having +to create multiple network endpoint groups and backend services. + +For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" +an be backed by the same Serverless Network Endpoint Group (NEG) with +URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } +and { service="bar2", tag="foo2" } respectively.`, + AtLeastOneOf: []string{"cloud_run.0.service", "cloud_run.0.url_mask"}, + }, + }, + }, + ConflictsWith: []string{"cloud_function", "app_engine"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource.`, + }, + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `This field is only used for PSC. +The URL of the network to which all network endpoints in the NEG belong. Uses +"default" project network if unspecified.`, + }, + "network_endpoint_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"SERVERLESS", "PRIVATE_SERVICE_CONNECT", ""}), + Description: `Type of network endpoints in this network endpoint group. Defaults to SERVERLESS Default value: "SERVERLESS" Possible values: ["SERVERLESS", "PRIVATE_SERVICE_CONNECT"]`, + Default: "SERVERLESS", + }, + "psc_target_service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The target service url used to set up private service connection to +a Google API or a PSC Producer Service Attachment.`, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `This field is only used for PSC. +Optional URL of the subnetwork to which all network endpoints in the NEG belong.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionNetworkEndpointGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeRegionNetworkEndpointGroupName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeRegionNetworkEndpointGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + networkEndpointTypeProp, err := expandComputeRegionNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_endpoint_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkEndpointTypeProp)) && (ok || !reflect.DeepEqual(v, networkEndpointTypeProp)) { + obj["networkEndpointType"] = networkEndpointTypeProp + } + pscTargetServiceProp, err := expandComputeRegionNetworkEndpointGroupPscTargetService(d.Get("psc_target_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("psc_target_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(pscTargetServiceProp)) && (ok || !reflect.DeepEqual(v, pscTargetServiceProp)) { + obj["pscTargetService"] = pscTargetServiceProp + } + networkProp, err := expandComputeRegionNetworkEndpointGroupNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + subnetworkProp, err := expandComputeRegionNetworkEndpointGroupSubnetwork(d.Get("subnetwork"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetwork"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { + obj["subnetwork"] = subnetworkProp + } + cloudRunProp, err := expandComputeRegionNetworkEndpointGroupCloudRun(d.Get("cloud_run"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloud_run"); !tpgresource.IsEmptyValue(reflect.ValueOf(cloudRunProp)) && (ok || !reflect.DeepEqual(v, cloudRunProp)) { + obj["cloudRun"] = cloudRunProp + } + appEngineProp, err := expandComputeRegionNetworkEndpointGroupAppEngine(d.Get("app_engine"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine"); !tpgresource.IsEmptyValue(reflect.ValueOf(appEngineProp)) && (ok || !reflect.DeepEqual(v, appEngineProp)) { + obj["appEngine"] = appEngineProp + } + cloudFunctionProp, err := expandComputeRegionNetworkEndpointGroupCloudFunction(d.Get("cloud_function"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloud_function"); !tpgresource.IsEmptyValue(reflect.ValueOf(cloudFunctionProp)) && (ok || !reflect.DeepEqual(v, cloudFunctionProp)) { + obj["cloudFunction"] = cloudFunctionProp + } + regionProp, err := expandComputeRegionNetworkEndpointGroupRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionNetworkEndpointGroup: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionNetworkEndpointGroup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionNetworkEndpointGroup", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionNetworkEndpointGroup: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionNetworkEndpointGroup %q: %#v", d.Id(), res) + + return resourceComputeRegionNetworkEndpointGroupRead(d, meta) +} + +func resourceComputeRegionNetworkEndpointGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionNetworkEndpointGroup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + + if err := d.Set("name", flattenComputeRegionNetworkEndpointGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("description", flattenComputeRegionNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("network_endpoint_type", flattenComputeRegionNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("psc_target_service", flattenComputeRegionNetworkEndpointGroupPscTargetService(res["pscTargetService"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("network", flattenComputeRegionNetworkEndpointGroupNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("subnetwork", flattenComputeRegionNetworkEndpointGroupSubnetwork(res["subnetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("cloud_run", flattenComputeRegionNetworkEndpointGroupCloudRun(res["cloudRun"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("app_engine", flattenComputeRegionNetworkEndpointGroupAppEngine(res["appEngine"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("cloud_function", flattenComputeRegionNetworkEndpointGroupCloudFunction(res["cloudFunction"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("region", flattenComputeRegionNetworkEndpointGroupRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) + } + + return nil +} + +func resourceComputeRegionNetworkEndpointGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting RegionNetworkEndpointGroup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionNetworkEndpointGroup") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RegionNetworkEndpointGroup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RegionNetworkEndpointGroup %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRegionNetworkEndpointGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRegionNetworkEndpointGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupNetworkEndpointType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupPscTargetService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionNetworkEndpointGroupSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service"] = + flattenComputeRegionNetworkEndpointGroupCloudRunService(original["service"], d, config) + transformed["tag"] = + flattenComputeRegionNetworkEndpointGroupCloudRunTag(original["tag"], d, config) + transformed["url_mask"] = + flattenComputeRegionNetworkEndpointGroupCloudRunUrlMask(original["urlMask"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionNetworkEndpointGroupCloudRunService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupCloudRunTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupCloudRunUrlMask(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupAppEngine(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["service"] = + flattenComputeRegionNetworkEndpointGroupAppEngineService(original["service"], d, config) + transformed["version"] = + flattenComputeRegionNetworkEndpointGroupAppEngineVersion(original["version"], d, config) + transformed["url_mask"] = + flattenComputeRegionNetworkEndpointGroupAppEngineUrlMask(original["urlMask"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionNetworkEndpointGroupAppEngineService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupAppEngineVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupAppEngineUrlMask(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupCloudFunction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["function"] = + flattenComputeRegionNetworkEndpointGroupCloudFunctionFunction(original["function"], d, config) + transformed["url_mask"] = + flattenComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(original["urlMask"], d, config) + return []interface{}{transformed} +} +func flattenComputeRegionNetworkEndpointGroupCloudFunctionFunction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionNetworkEndpointGroupRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeRegionNetworkEndpointGroupName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupNetworkEndpointType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupPscTargetService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionNetworkEndpointGroupSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedService, err := expandComputeRegionNetworkEndpointGroupCloudRunService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + transformedTag, err := expandComputeRegionNetworkEndpointGroupCloudRunTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupCloudRunUrlMask(original["url_mask"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrlMask); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["urlMask"] = transformedUrlMask + } + + return transformed, nil +} + +func expandComputeRegionNetworkEndpointGroupCloudRunService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupCloudRunTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupCloudRunUrlMask(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupAppEngine(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedService, err := expandComputeRegionNetworkEndpointGroupAppEngineService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + transformedVersion, err := expandComputeRegionNetworkEndpointGroupAppEngineVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupAppEngineUrlMask(original["url_mask"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrlMask); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["urlMask"] = transformedUrlMask + } + + return transformed, nil +} + +func expandComputeRegionNetworkEndpointGroupAppEngineService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupAppEngineVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupAppEngineUrlMask(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupCloudFunction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFunction, err := expandComputeRegionNetworkEndpointGroupCloudFunctionFunction(original["function"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFunction); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["function"] = transformedFunction + } + + transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(original["url_mask"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrlMask); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["urlMask"] = transformedUrlMask + } + + return transformed, nil +} + +func expandComputeRegionNetworkEndpointGroupCloudFunctionFunction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionNetworkEndpointGroupRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_endpoint_group_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_endpoint_group_sweeper.go new file mode 100644 index 0000000000..53815f68c3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_endpoint_group_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionNetworkEndpointGroup", testSweepComputeRegionNetworkEndpointGroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionNetworkEndpointGroup(region string) error { + resourceName := "ComputeRegionNetworkEndpointGroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/networkEndpointGroups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy.go index 5de5769f8e..4a597ffa2a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeRegionNetworkFirewallPolicy() *schema.Resource { @@ -63,7 +70,7 @@ func ResourceComputeRegionNetworkFirewallPolicy() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -115,12 +122,12 @@ func ResourceComputeRegionNetworkFirewallPolicy() *schema.Resource { } func resourceComputeRegionNetworkFirewallPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -132,23 +139,23 @@ func resourceComputeRegionNetworkFirewallPolicyCreate(d *schema.ResourceData, me Location: dcl.String(region), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{name}}") if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -170,12 +177,12 @@ func resourceComputeRegionNetworkFirewallPolicyCreate(d *schema.ResourceData, me } func resourceComputeRegionNetworkFirewallPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -187,17 +194,17 @@ func resourceComputeRegionNetworkFirewallPolicyRead(d *schema.ResourceData, meta Location: dcl.String(region), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -206,7 +213,7 @@ func resourceComputeRegionNetworkFirewallPolicyRead(d *schema.ResourceData, meta res, err := client.GetNetworkFirewallPolicy(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ComputeRegionNetworkFirewallPolicy %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("name", res.Name); err != nil { @@ -243,12 +250,12 @@ func resourceComputeRegionNetworkFirewallPolicyRead(d *schema.ResourceData, meta return nil } func resourceComputeRegionNetworkFirewallPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -259,19 +266,19 @@ func resourceComputeRegionNetworkFirewallPolicyUpdate(d *schema.ResourceData, me Project: dcl.String(project), Location: dcl.String(region), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -293,12 +300,12 @@ func resourceComputeRegionNetworkFirewallPolicyUpdate(d *schema.ResourceData, me } func resourceComputeRegionNetworkFirewallPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -311,17 +318,17 @@ func resourceComputeRegionNetworkFirewallPolicyDelete(d *schema.ResourceData, me } log.Printf("[DEBUG] Deleting NetworkFirewallPolicy %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -336,9 +343,9 @@ func resourceComputeRegionNetworkFirewallPolicyDelete(d *schema.ResourceData, me } func resourceComputeRegionNetworkFirewallPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/firewallPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -348,7 +355,7 @@ func resourceComputeRegionNetworkFirewallPolicyImport(d *schema.ResourceData, me } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy_association.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy_association.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association.go index d725f71a67..7c18fb6ecb 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy_association.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_association.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeRegionNetworkFirewallPolicyAssociation() *schema.Resource { @@ -47,7 +54,7 @@ func ResourceComputeRegionNetworkFirewallPolicyAssociation() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The target that the firewall policy is attached to.", }, @@ -55,7 +62,7 @@ func ResourceComputeRegionNetworkFirewallPolicyAssociation() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The firewall policy ID of the association.", }, @@ -71,7 +78,7 @@ func ResourceComputeRegionNetworkFirewallPolicyAssociation() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -93,12 +100,12 @@ func ResourceComputeRegionNetworkFirewallPolicyAssociation() *schema.Resource { } func resourceComputeRegionNetworkFirewallPolicyAssociationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -111,23 +118,23 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationCreate(d *schema.Resou Location: dcl.String(region), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}}") if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -149,12 +156,12 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationCreate(d *schema.Resou } func resourceComputeRegionNetworkFirewallPolicyAssociationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -167,17 +174,17 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationRead(d *schema.Resourc Location: dcl.String(region), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -186,7 +193,7 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationRead(d *schema.Resourc res, err := client.GetNetworkFirewallPolicyAssociation(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ComputeRegionNetworkFirewallPolicyAssociation %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("attachment_target", res.AttachmentTarget); err != nil { @@ -212,12 +219,12 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationRead(d *schema.Resourc } func resourceComputeRegionNetworkFirewallPolicyAssociationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -231,17 +238,17 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationDelete(d *schema.Resou } log.Printf("[DEBUG] Deleting NetworkFirewallPolicyAssociation %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -256,9 +263,9 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationDelete(d *schema.Resou } func resourceComputeRegionNetworkFirewallPolicyAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", }, d, config); err != nil { @@ -266,7 +273,7 @@ func resourceComputeRegionNetworkFirewallPolicyAssociationImport(d *schema.Resou } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/associations/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy_rule.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule.go index 558fc54cca..036329f79d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_network_firewall_policy_rule.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_rule.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeRegionNetworkFirewallPolicyRule() *schema.Resource { @@ -48,7 +55,7 @@ func ResourceComputeRegionNetworkFirewallPolicyRule() *schema.Resource { "action": { Type: schema.TypeString, Required: true, - Description: "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + Description: "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".", }, "direction": { @@ -61,7 +68,7 @@ func ResourceComputeRegionNetworkFirewallPolicyRule() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The firewall policy of the resource.", }, @@ -103,7 +110,7 @@ func ResourceComputeRegionNetworkFirewallPolicyRule() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -160,6 +167,20 @@ func ComputeRegionNetworkFirewallPolicyRuleMatchSchema() *schema.Resource { Elem: ComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsSchema(), }, + "dest_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10. Destination address groups is only supported in Egress rules.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dest_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: "Domain names that will be used to match against the resolved domain name of destination of traffic. Can only be specified if DIRECTION is egress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "dest_ip_ranges": { Type: schema.TypeList, Optional: true, @@ -167,6 +188,34 @@ func ComputeRegionNetworkFirewallPolicyRuleMatchSchema() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, + "dest_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is egress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dest_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: "Name of the Google Cloud Threat Intelligence list.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_address_groups": { + Type: schema.TypeList, + Optional: true, + Description: "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10. Source address groups is only supported in Ingress rules.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_fqdns": { + Type: schema.TypeList, + Optional: true, + Description: "Domain names that will be used to match against the resolved domain name of source of traffic. Can only be specified if DIRECTION is ingress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "src_ip_ranges": { Type: schema.TypeList, Optional: true, @@ -174,12 +223,26 @@ func ComputeRegionNetworkFirewallPolicyRuleMatchSchema() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, + "src_region_codes": { + Type: schema.TypeList, + Optional: true, + Description: "The Unicode country codes whose IP addresses will be used to match against the source of traffic. Can only be specified if DIRECTION is ingress.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "src_secure_tags": { Type: schema.TypeList, Optional: true, Description: "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", Elem: ComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsSchema(), }, + + "src_threat_intelligences": { + Type: schema.TypeList, + Optional: true, + Description: "Name of the Google Cloud Threat Intelligence list.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, } } @@ -209,7 +272,7 @@ func ComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsSchema() *schema.Re "name": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", }, @@ -228,7 +291,7 @@ func ComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsSchema() *schema.Reso "name": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Name of the secure tag, created with TagManager's TagValue API. @pattern tagValues/[0-9]+", }, @@ -242,12 +305,12 @@ func ComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsSchema() *schema.Reso } func resourceComputeRegionNetworkFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -265,26 +328,26 @@ func resourceComputeRegionNetworkFirewallPolicyRuleCreate(d *schema.ResourceData Location: dcl.String(region), RuleName: dcl.String(d.Get("rule_name").(string)), TargetSecureTags: expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), } - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/{{priority}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/{{priority}}") if err != nil { return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -306,12 +369,12 @@ func resourceComputeRegionNetworkFirewallPolicyRuleCreate(d *schema.ResourceData } func resourceComputeRegionNetworkFirewallPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -329,20 +392,20 @@ func resourceComputeRegionNetworkFirewallPolicyRuleRead(d *schema.ResourceData, Location: dcl.String(region), RuleName: dcl.String(d.Get("rule_name").(string)), TargetSecureTags: expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -351,7 +414,7 @@ func resourceComputeRegionNetworkFirewallPolicyRuleRead(d *schema.ResourceData, res, err := client.GetNetworkFirewallPolicyRule(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ComputeRegionNetworkFirewallPolicyRule %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("action", res.Action); err != nil { @@ -403,12 +466,12 @@ func resourceComputeRegionNetworkFirewallPolicyRuleRead(d *schema.ResourceData, return nil } func resourceComputeRegionNetworkFirewallPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -426,21 +489,21 @@ func resourceComputeRegionNetworkFirewallPolicyRuleUpdate(d *schema.ResourceData Location: dcl.String(region), RuleName: dcl.String(d.Get("rule_name").(string)), TargetSecureTags: expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -462,12 +525,12 @@ func resourceComputeRegionNetworkFirewallPolicyRuleUpdate(d *schema.ResourceData } func resourceComputeRegionNetworkFirewallPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -485,21 +548,21 @@ func resourceComputeRegionNetworkFirewallPolicyRuleDelete(d *schema.ResourceData Location: dcl.String(region), RuleName: dcl.String(d.Get("rule_name").(string)), TargetSecureTags: expandComputeRegionNetworkFirewallPolicyRuleTargetSecureTagsArray(d.Get("target_secure_tags")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + TargetServiceAccounts: tpgdclresource.ExpandStringArray(d.Get("target_service_accounts")), } log.Printf("[DEBUG] Deleting NetworkFirewallPolicyRule %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -514,9 +577,9 @@ func resourceComputeRegionNetworkFirewallPolicyRuleDelete(d *schema.ResourceData } func resourceComputeRegionNetworkFirewallPolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/firewallPolicies/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -526,7 +589,7 @@ func resourceComputeRegionNetworkFirewallPolicyRuleImport(d *schema.ResourceData } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/{{priority}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/firewallPolicies/{{firewall_policy}}/{{priority}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -545,10 +608,18 @@ func expandComputeRegionNetworkFirewallPolicyRuleMatch(o interface{}) *compute.N } obj := objArr[0].(map[string]interface{}) return &compute.NetworkFirewallPolicyRuleMatch{ - Layer4Configs: expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), - DestIPRanges: expandStringArray(obj["dest_ip_ranges"]), - SrcIPRanges: expandStringArray(obj["src_ip_ranges"]), - SrcSecureTags: expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj["src_secure_tags"]), + Layer4Configs: expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), + DestAddressGroups: tpgdclresource.ExpandStringArray(obj["dest_address_groups"]), + DestFqdns: tpgdclresource.ExpandStringArray(obj["dest_fqdns"]), + DestIPRanges: tpgdclresource.ExpandStringArray(obj["dest_ip_ranges"]), + DestRegionCodes: tpgdclresource.ExpandStringArray(obj["dest_region_codes"]), + DestThreatIntelligences: tpgdclresource.ExpandStringArray(obj["dest_threat_intelligences"]), + SrcAddressGroups: tpgdclresource.ExpandStringArray(obj["src_address_groups"]), + SrcFqdns: tpgdclresource.ExpandStringArray(obj["src_fqdns"]), + SrcIPRanges: tpgdclresource.ExpandStringArray(obj["src_ip_ranges"]), + SrcRegionCodes: tpgdclresource.ExpandStringArray(obj["src_region_codes"]), + SrcSecureTags: expandComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj["src_secure_tags"]), + SrcThreatIntelligences: tpgdclresource.ExpandStringArray(obj["src_threat_intelligences"]), } } @@ -557,10 +628,18 @@ func flattenComputeRegionNetworkFirewallPolicyRuleMatch(obj *compute.NetworkFire return nil } transformed := map[string]interface{}{ - "layer4_configs": flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), - "dest_ip_ranges": obj.DestIPRanges, - "src_ip_ranges": obj.SrcIPRanges, - "src_secure_tags": flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj.SrcSecureTags), + "layer4_configs": flattenComputeRegionNetworkFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), + "dest_address_groups": obj.DestAddressGroups, + "dest_fqdns": obj.DestFqdns, + "dest_ip_ranges": obj.DestIPRanges, + "dest_region_codes": obj.DestRegionCodes, + "dest_threat_intelligences": obj.DestThreatIntelligences, + "src_address_groups": obj.SrcAddressGroups, + "src_fqdns": obj.SrcFqdns, + "src_ip_ranges": obj.SrcIPRanges, + "src_region_codes": obj.SrcRegionCodes, + "src_secure_tags": flattenComputeRegionNetworkFirewallPolicyRuleMatchSrcSecureTagsArray(obj.SrcSecureTags), + "src_threat_intelligences": obj.SrcThreatIntelligences, } return []interface{}{transformed} @@ -593,7 +672,7 @@ func expandComputeRegionNetworkFirewallPolicyRuleMatchLayer4Configs(o interface{ obj := o.(map[string]interface{}) return &compute.NetworkFirewallPolicyRuleMatchLayer4Configs{ IPProtocol: dcl.String(obj["ip_protocol"].(string)), - Ports: expandStringArray(obj["ports"]), + Ports: tpgdclresource.ExpandStringArray(obj["ports"]), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_sweeper.go new file mode 100644 index 0000000000..86472da376 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_network_firewall_policy_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "testing" + + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionNetworkFirewallPolicy", testSweepComputeRegionNetworkFirewallPolicy) +} + +func testSweepComputeRegionNetworkFirewallPolicy(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ComputeRegionNetworkFirewallPolicy") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLComputeClient(config, config.UserAgent, "", 0) + err = client.DeleteAllNetworkFirewallPolicy(context.Background(), d["project"], d["location"], isDeletableComputeRegionNetworkFirewallPolicy) + if err != nil { + return err + } + return nil +} + +func isDeletableComputeRegionNetworkFirewallPolicy(r *compute.NetworkFirewallPolicy) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_per_instance_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_per_instance_config.go new file mode 100644 index 0000000000..1b6149f681 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_per_instance_config.go @@ -0,0 +1,788 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeRegionPerInstanceConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionPerInstanceConfigCreate, + Read: resourceComputeRegionPerInstanceConfigRead, + Update: resourceComputeRegionPerInstanceConfigUpdate, + Delete: resourceComputeRegionPerInstanceConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionPerInstanceConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name for this per-instance config and its corresponding instance.`, + }, + "region_instance_group_manager": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The region instance group manager this instance config is part of.`, + }, + "preserved_state": { + Type: schema.TypeList, + Optional: true, + Description: `The preserved state for this instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeSet, + Optional: true, + Description: `Stateful disks for the instance.`, + Elem: computeRegionPerInstanceConfigPreservedStateDiskSchema(), + // Default schema.HashSchema is used. + }, + "metadata": { + Type: schema.TypeMap, + Optional: true, + Description: `Preserved metadata defined for this instance. This is a list of key->value pairs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Region where the containing instance group manager is located`, + }, + "minimal_action": { + Type: schema.TypeString, + Optional: true, + Default: "NONE", + Description: `The minimal action to perform on the instance during an update. +Default is 'NONE'. Possible values are: +* REPLACE +* RESTART +* REFRESH +* NONE`, + }, + "most_disruptive_allowed_action": { + Type: schema.TypeString, + Optional: true, + Default: "REPLACE", + Description: `The most disruptive action to perform on the instance during an update. +Default is 'REPLACE'. Possible values are: +* REPLACE +* RESTART +* REFRESH +* NONE`, + }, + "remove_instance_state_on_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `When true, deleting this config will immediately remove any specified state from the underlying instance. +When false, deleting this config will *not* immediately remove any state from the underlying instance. +State will be removed on the next instance recreation or update.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func computeRegionPerInstanceConfigPreservedStateDiskSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance.`, + }, + "source": { + Type: schema.TypeString, + Required: true, + Description: `The URI of an existing persistent disk to attach under the specified device-name in the format +'projects/project-id/zones/zone/disks/disk-name'.`, + }, + "delete_rule": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION", ""}), + Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. +The available options are 'NEVER' and 'ON_PERMANENT_INSTANCE_DELETION'. +'NEVER' - detach the disk when the VM is deleted, but do not delete the disk. +'ON_PERMANENT_INSTANCE_DELETION' will delete the stateful disk when the VM is permanently +deleted from the instance group. Default value: "NEVER" Possible values: ["NEVER", "ON_PERMANENT_INSTANCE_DELETION"]`, + Default: "NEVER", + }, + "mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"READ_ONLY", "READ_WRITE", ""}), + Description: `The mode of the disk. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"]`, + Default: "READ_WRITE", + }, + }, + } +} + +func resourceComputeRegionPerInstanceConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + preservedStateProp, err := expandNestedComputeRegionPerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("preserved_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(preservedStateProp)) && (ok || !reflect.DeepEqual(v, preservedStateProp)) { + obj["preservedState"] = preservedStateProp + } + + obj, err = resourceComputeRegionPerInstanceConfigEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/createInstances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionPerInstanceConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionPerInstanceConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionPerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionPerInstanceConfig: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionPerInstanceConfig %q: %#v", d.Id(), res) + + return resourceComputeRegionPerInstanceConfigRead(d, meta) +} + +func resourceComputeRegionPerInstanceConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listPerInstanceConfigs") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionPerInstanceConfig %q", d.Id())) + } + + res, err = flattenNestedComputeRegionPerInstanceConfig(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeRegionPerInstanceConfig because it couldn't be matched.") + d.SetId("") + return nil + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("minimal_action"); !ok { + if err := d.Set("minimal_action", "NONE"); err != nil { + return fmt.Errorf("Error setting minimal_action: %s", err) + } + } + if _, ok := d.GetOkExists("most_disruptive_allowed_action"); !ok { + if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { + return fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) + } + } + if _, ok := d.GetOkExists("remove_instance_state_on_destroy"); !ok { + if err := d.Set("remove_instance_state_on_destroy", false); err != nil { + return fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) + } + + if err := d.Set("name", flattenNestedComputeRegionPerInstanceConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) + } + if err := d.Set("preserved_state", flattenNestedComputeRegionPerInstanceConfigPreservedState(res["preservedState"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) + } + + return nil +} + +func resourceComputeRegionPerInstanceConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + preservedStateProp, err := expandNestedComputeRegionPerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("preserved_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, preservedStateProp)) { + obj["preservedState"] = preservedStateProp + } + + obj, err = resourceComputeRegionPerInstanceConfigUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/updatePerInstanceConfigs") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating RegionPerInstanceConfig %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating RegionPerInstanceConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RegionPerInstanceConfig %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RegionPerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + // Instance name in applyUpdatesToInstances request must include zone + instanceName, err := findInstanceName(d, config) + if err != nil { + return err + } + + obj = make(map[string]interface{}) + obj["instances"] = []string{instanceName} + + minAction := d.Get("minimal_action") + if minAction == "" { + minAction = "NONE" + } + obj["minimalAction"] = minAction + + mostDisruptiveAction := d.Get("most_disruptive_allowed_action") + if tpgresource.IsEmptyValue(reflect.ValueOf(mostDisruptiveAction)) { + mostDisruptiveAction = "REPLACE" + } + obj["mostDisruptiveAllowedAction"] = mostDisruptiveAction + + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/applyUpdatesToInstances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) + } + + err = ComputeOperationWaitTime( + config, res, project, "Applying update to PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + return resourceComputeRegionPerInstanceConfigRead(d, meta) +} + +func resourceComputeRegionPerInstanceConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/deletePerInstanceConfigs") + if err != nil { + return err + } + + var obj map[string]interface{} + obj = map[string]interface{}{ + "names": [1]string{d.Get("name").(string)}, + } + log.Printf("[DEBUG] Deleting RegionPerInstanceConfig %q", d.Id()) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionPerInstanceConfig") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RegionPerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + // Potentially delete the state managed by this config + if d.Get("remove_instance_state_on_destroy").(bool) { + // Instance name in applyUpdatesToInstances request must include zone + instanceName, err := findInstanceName(d, config) + if err != nil { + return err + } + + obj = make(map[string]interface{}) + obj["instances"] = []string{instanceName} + + // Updates must be applied to the instance after deleting the PerInstanceConfig + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/applyUpdatesToInstances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) + } + + err = ComputeOperationWaitTime( + config, res, project, "Applying update to PerInstanceConfig", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) + } + + // RegionPerInstanceConfig goes into "DELETING" state while the instance is actually deleted + err = transport_tpg.PollingWaitTime(resourceComputeRegionPerInstanceConfigPollRead(d, meta), PollCheckInstanceConfigDeleted, "Deleting RegionPerInstanceConfig", d.Timeout(schema.TimeoutDelete), 1) + if err != nil { + return fmt.Errorf("Error waiting for delete on RegionPerInstanceConfig %q: %s", d.Id(), err) + } + } + + log.Printf("[DEBUG] Finished deleting RegionPerInstanceConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRegionPerInstanceConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("minimal_action", "NONE"); err != nil { + return nil, fmt.Errorf("Error setting minimal_action: %s", err) + } + if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { + return nil, fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) + } + if err := d.Set("remove_instance_state_on_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeRegionPerInstanceConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRegionPerInstanceConfigPreservedState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["metadata"] = + flattenNestedComputeRegionPerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) + transformed["disk"] = + flattenNestedComputeRegionPerInstanceConfigPreservedStateDisk(original["disks"], d, config) + return []interface{}{transformed} +} +func flattenNestedComputeRegionPerInstanceConfigPreservedStateMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRegionPerInstanceConfigPreservedStateDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + disks := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(disks)) + for devName, deleteRuleRaw := range disks { + diskObj := deleteRuleRaw.(map[string]interface{}) + source, err := tpgresource.GetRelativePath(diskObj["source"].(string)) + if err != nil { + source = diskObj["source"].(string) + } + transformed = append(transformed, map[string]interface{}{ + "device_name": devName, + "delete_rule": diskObj["autoDelete"], + "source": source, + "mode": diskObj["mode"], + }) + } + return transformed +} + +func expandNestedComputeRegionPerInstanceConfigName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRegionPerInstanceConfigPreservedState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMetadata, err := expandNestedComputeRegionPerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metadata"] = transformedMetadata + } + + transformedDisk, err := expandNestedComputeRegionPerInstanceConfigPreservedStateDisk(original["disk"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisk); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["disks"] = transformedDisk + } + + return transformed, nil +} + +func expandNestedComputeRegionPerInstanceConfigPreservedStateMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandNestedComputeRegionPerInstanceConfigPreservedStateDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + l := v.(*schema.Set).List() + req := make(map[string]interface{}) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + deviceName := original["device_name"].(string) + diskObj := make(map[string]interface{}) + deleteRule := original["delete_rule"].(string) + if deleteRule != "" { + diskObj["autoDelete"] = deleteRule + } + source := original["source"] + if source != "" { + diskObj["source"] = source + } + mode := original["mode"] + if source != "" { + diskObj["mode"] = mode + } + req[deviceName] = diskObj + } + return req, nil +} + +func resourceComputeRegionPerInstanceConfigEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + wrappedReq := map[string]interface{}{ + "instances": []interface{}{obj}, + } + return wrappedReq, nil +} + +func resourceComputeRegionPerInstanceConfigUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // updates and creates use different wrapping object names + wrappedReq := map[string]interface{}{ + "perInstanceConfigs": []interface{}{obj}, + } + return wrappedReq, nil +} + +func flattenNestedComputeRegionPerInstanceConfig(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["items"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value items. Actual value: %v", v) + } + + _, item, err := resourceComputeRegionPerInstanceConfigFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeRegionPerInstanceConfigFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeRegionPerInstanceConfigName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemName := flattenNestedComputeRegionPerInstanceConfigName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate.go new file mode 100644 index 0000000000..90fed441b9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate.go @@ -0,0 +1,462 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeRegionSslCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionSslCertificateCreate, + Read: resourceComputeRegionSslCertificateRead, + Delete: resourceComputeRegionSslCertificateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionSslCertificateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "certificate": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The certificate in PEM format. +The certificate chain must be no greater than 5 certs long. +The chain must include at least one intermediate cert.`, + Sensitive: true, + }, + "private_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.Sha256DiffSuppress, + Description: `The write-only private key in PEM format.`, + Sensitive: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash. + + +These are in the same namespace as the managed SSL certificates.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Region in which the created regional ssl certificate should reside. +If it is not provided, the provider region is used.`, + }, + "certificate_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "expire_time": { + Type: schema.TypeString, + Computed: true, + Description: `Expire time of the certificate in RFC3339 text format.`, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name"}, + Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + certificateProp, err := expandComputeRegionSslCertificateCertificate(d.Get("certificate"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateProp)) && (ok || !reflect.DeepEqual(v, certificateProp)) { + obj["certificate"] = certificateProp + } + descriptionProp, err := expandComputeRegionSslCertificateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeRegionSslCertificateName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + privateKeyProp, err := expandComputeRegionSslCertificatePrivateKey(d.Get("private_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateKeyProp)) && (ok || !reflect.DeepEqual(v, privateKeyProp)) { + obj["privateKey"] = privateKeyProp + } + regionProp, err := expandComputeRegionSslCertificateRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionSslCertificate: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionSslCertificate: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionSslCertificate", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionSslCertificate: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionSslCertificate %q: %#v", d.Id(), res) + + return resourceComputeRegionSslCertificateRead(d, meta) +} + +func resourceComputeRegionSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionSslCertificate %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + + if err := d.Set("certificate", flattenComputeRegionSslCertificateCertificate(res["certificate"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeRegionSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + if err := d.Set("description", flattenComputeRegionSslCertificateDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + if err := d.Set("expire_time", flattenComputeRegionSslCertificateExpireTime(res["expireTime"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + if err := d.Set("certificate_id", flattenComputeRegionSslCertificateCertificateId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + if err := d.Set("name", flattenComputeRegionSslCertificateName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + if err := d.Set("region", flattenComputeRegionSslCertificateRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading RegionSslCertificate: %s", err) + } + + return nil +} + +func resourceComputeRegionSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting RegionSslCertificate %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionSslCertificate") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RegionSslCertificate", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RegionSslCertificate %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRegionSslCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/sslCertificates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRegionSslCertificateCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionSslCertificateCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionSslCertificateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionSslCertificateExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionSslCertificateCertificateId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionSslCertificateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionSslCertificateRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeRegionSslCertificateCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionSslCertificateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionSslCertificateName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + var certName string + if v, ok := d.GetOk("name"); ok { + certName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + certName = resource.PrefixedUniqueId(v.(string)) + } else { + certName = resource.UniqueId() + } + + // We need to get the {{name}} into schema to set the ID using tpgresource.ReplaceVars + if err := d.Set("name", certName); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return certName, nil +} + +func expandComputeRegionSslCertificatePrivateKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionSslCertificateRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate_sweeper.go new file mode 100644 index 0000000000..0867123bdd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_ssl_certificate_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionSslCertificate", testSweepComputeRegionSslCertificate) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionSslCertificate(region string) error { + resourceName := "ComputeRegionSslCertificate" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/sslCertificates", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy.go new file mode 100644 index 0000000000..0c1657ff0e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy.go @@ -0,0 +1,465 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeRegionTargetHttpProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionTargetHttpProxyCreate, + Read: resourceComputeRegionTargetHttpProxyRead, + Update: resourceComputeRegionTargetHttpProxyUpdate, + Delete: resourceComputeRegionTargetHttpProxyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionTargetHttpProxyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "url_map": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the RegionUrlMap resource that defines the mapping from URL +to the BackendService.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Region in which the created target https proxy should reside. +If it is not provided, the provider region is used.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "proxy_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionTargetHttpProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeRegionTargetHttpProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeRegionTargetHttpProxyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + urlMapProp, err := expandComputeRegionTargetHttpProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + regionProp, err := expandComputeRegionTargetHttpProxyRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionTargetHttpProxy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionTargetHttpProxy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionTargetHttpProxy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionTargetHttpProxy: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionTargetHttpProxy %q: %#v", d.Id(), res) + + return resourceComputeRegionTargetHttpProxyRead(d, meta) +} + +func resourceComputeRegionTargetHttpProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionTargetHttpProxy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeRegionTargetHttpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } + if err := d.Set("description", flattenComputeRegionTargetHttpProxyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } + if err := d.Set("proxy_id", flattenComputeRegionTargetHttpProxyProxyId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } + if err := d.Set("name", flattenComputeRegionTargetHttpProxyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } + if err := d.Set("url_map", flattenComputeRegionTargetHttpProxyUrlMap(res["urlMap"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } + if err := d.Set("region", flattenComputeRegionTargetHttpProxyRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) + } + + return nil +} + +func resourceComputeRegionTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("url_map") { + obj := make(map[string]interface{}) + + urlMapProp, err := expandComputeRegionTargetHttpProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}/setUrlMap") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating RegionTargetHttpProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RegionTargetHttpProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RegionTargetHttpProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeRegionTargetHttpProxyRead(d, meta) +} + +func resourceComputeRegionTargetHttpProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting RegionTargetHttpProxy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionTargetHttpProxy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RegionTargetHttpProxy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RegionTargetHttpProxy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRegionTargetHttpProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/targetHttpProxies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRegionTargetHttpProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetHttpProxyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetHttpProxyProxyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionTargetHttpProxyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetHttpProxyUrlMap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionTargetHttpProxyRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeRegionTargetHttpProxyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionTargetHttpProxyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionTargetHttpProxyUrlMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("urlMaps", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for url_map: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionTargetHttpProxyRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy_sweeper.go new file mode 100644 index 0000000000..e3364ea0ef --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_http_proxy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionTargetHttpProxy", testSweepComputeRegionTargetHttpProxy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionTargetHttpProxy(region string) error { + resourceName := "ComputeRegionTargetHttpProxy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetHttpProxies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["regionTargetHttpProxies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy.go new file mode 100644 index 0000000000..3a916276ee --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy.go @@ -0,0 +1,550 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeRegionTargetHttpsProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionTargetHttpsProxyCreate, + Read: resourceComputeRegionTargetHttpsProxyRead, + Update: resourceComputeRegionTargetHttpsProxyUpdate, + Delete: resourceComputeRegionTargetHttpsProxyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionTargetHttpsProxyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "ssl_certificates": { + Type: schema.TypeList, + Required: true, + Description: `A list of RegionSslCertificate resources that are used to authenticate +connections between users and the load balancer. Currently, exactly +one SSL certificate must be specified.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "url_map": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the RegionUrlMap resource that defines the mapping from URL +to the RegionBackendService.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Region in which the created target https proxy should reside. +If it is not provided, the provider region is used.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "proxy_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionTargetHttpsProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeRegionTargetHttpsProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeRegionTargetHttpsProxyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + sslCertificatesProp, err := expandComputeRegionTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslCertificatesProp)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { + obj["sslCertificates"] = sslCertificatesProp + } + urlMapProp, err := expandComputeRegionTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + regionProp, err := expandComputeRegionTargetHttpsProxyRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionTargetHttpsProxy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionTargetHttpsProxy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionTargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionTargetHttpsProxy: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionTargetHttpsProxy %q: %#v", d.Id(), res) + + return resourceComputeRegionTargetHttpsProxyRead(d, meta) +} + +func resourceComputeRegionTargetHttpsProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionTargetHttpsProxy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeRegionTargetHttpsProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + if err := d.Set("description", flattenComputeRegionTargetHttpsProxyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + if err := d.Set("proxy_id", flattenComputeRegionTargetHttpsProxyProxyId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + if err := d.Set("name", flattenComputeRegionTargetHttpsProxyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + if err := d.Set("ssl_certificates", flattenComputeRegionTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + if err := d.Set("url_map", flattenComputeRegionTargetHttpsProxyUrlMap(res["urlMap"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + if err := d.Set("region", flattenComputeRegionTargetHttpsProxyRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) + } + + return nil +} + +func resourceComputeRegionTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("ssl_certificates") { + obj := make(map[string]interface{}) + + sslCertificatesProp, err := expandComputeRegionTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { + obj["sslCertificates"] = sslCertificatesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslCertificates") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating RegionTargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RegionTargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RegionTargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("url_map") { + obj := make(map[string]interface{}) + + urlMapProp, err := expandComputeRegionTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setUrlMap") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating RegionTargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RegionTargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RegionTargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeRegionTargetHttpsProxyRead(d, meta) +} + +func resourceComputeRegionTargetHttpsProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting RegionTargetHttpsProxy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionTargetHttpsProxy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RegionTargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RegionTargetHttpsProxy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRegionTargetHttpsProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/targetHttpsProxies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRegionTargetHttpsProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetHttpsProxyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetHttpsProxyProxyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionTargetHttpsProxyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetHttpsProxySslCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeRegionTargetHttpsProxyUrlMap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionTargetHttpsProxyRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeRegionTargetHttpsProxyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionTargetHttpsProxyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionTargetHttpsProxySslCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for ssl_certificates: nil") + } + f, err := tpgresource.ParseRegionalFieldValue("sslCertificates", raw.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for ssl_certificates: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeRegionTargetHttpsProxyUrlMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("urlMaps", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for url_map: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionTargetHttpsProxyRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy_sweeper.go new file mode 100644 index 0000000000..db39e1bfe7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_https_proxy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionTargetHttpsProxy", testSweepComputeRegionTargetHttpsProxy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionTargetHttpsProxy(region string) error { + resourceName := "ComputeRegionTargetHttpsProxy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetHttpsProxies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["regionTargetHttpsProxies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_tcp_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_tcp_proxy.go new file mode 100644 index 0000000000..e259ced6ca --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_tcp_proxy.go @@ -0,0 +1,450 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeRegionTargetTcpProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionTargetTcpProxyCreate, + Read: resourceComputeRegionTargetTcpProxyRead, + Delete: resourceComputeRegionTargetTcpProxyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionTargetTcpProxyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backend_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the BackendService resource.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "proxy_bind": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `This field only applies when the forwarding rule that references +this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, + }, + "proxy_header": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), + Description: `Specifies the type of proxy header to append before sending data to +the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, + Default: "NONE", + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Region in which the created target TCP proxy should reside. +If it is not provided, the provider region is used.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "proxy_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionTargetTcpProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeRegionTargetTcpProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeRegionTargetTcpProxyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + proxyHeaderProp, err := expandComputeRegionTargetTcpProxyProxyHeader(d.Get("proxy_header"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_header"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyHeaderProp)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { + obj["proxyHeader"] = proxyHeaderProp + } + serviceProp, err := expandComputeRegionTargetTcpProxyBackendService(d.Get("backend_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceProp)) && (ok || !reflect.DeepEqual(v, serviceProp)) { + obj["service"] = serviceProp + } + proxyBindProp, err := expandComputeRegionTargetTcpProxyProxyBind(d.Get("proxy_bind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_bind"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyBindProp)) && (ok || !reflect.DeepEqual(v, proxyBindProp)) { + obj["proxyBind"] = proxyBindProp + } + regionProp, err := expandComputeRegionTargetTcpProxyRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetTcpProxies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RegionTargetTcpProxy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetTcpProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionTargetTcpProxy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RegionTargetTcpProxy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RegionTargetTcpProxy: %s", err) + } + + log.Printf("[DEBUG] Finished creating RegionTargetTcpProxy %q: %#v", d.Id(), res) + + return resourceComputeRegionTargetTcpProxyRead(d, meta) +} + +func resourceComputeRegionTargetTcpProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetTcpProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionTargetTcpProxy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeRegionTargetTcpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + if err := d.Set("description", flattenComputeRegionTargetTcpProxyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + if err := d.Set("proxy_id", flattenComputeRegionTargetTcpProxyProxyId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + if err := d.Set("name", flattenComputeRegionTargetTcpProxyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + if err := d.Set("proxy_header", flattenComputeRegionTargetTcpProxyProxyHeader(res["proxyHeader"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + if err := d.Set("backend_service", flattenComputeRegionTargetTcpProxyBackendService(res["service"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + if err := d.Set("proxy_bind", flattenComputeRegionTargetTcpProxyProxyBind(res["proxyBind"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + if err := d.Set("region", flattenComputeRegionTargetTcpProxyRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading RegionTargetTcpProxy: %s", err) + } + + return nil +} + +func resourceComputeRegionTargetTcpProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RegionTargetTcpProxy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting RegionTargetTcpProxy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionTargetTcpProxy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RegionTargetTcpProxy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RegionTargetTcpProxy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRegionTargetTcpProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/targetTcpProxies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRegionTargetTcpProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetTcpProxyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetTcpProxyProxyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRegionTargetTcpProxyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetTcpProxyProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetTcpProxyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRegionTargetTcpProxyProxyBind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRegionTargetTcpProxyRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeRegionTargetTcpProxyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionTargetTcpProxyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionTargetTcpProxyProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionTargetTcpProxyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for backend_service: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRegionTargetTcpProxyProxyBind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRegionTargetTcpProxyRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_tcp_proxy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_tcp_proxy_sweeper.go new file mode 100644 index 0000000000..fb5674ad21 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_target_tcp_proxy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionTargetTcpProxy", testSweepComputeRegionTargetTcpProxy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionTargetTcpProxy(region string) error { + resourceName := "ComputeRegionTargetTcpProxy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetTcpProxies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["regionTargetTcpProxies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetTcpProxies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_url_map.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_url_map.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_url_map.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_url_map.go index 024c815ca4..2237ee3071 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_region_url_map.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_url_map.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "fmt" @@ -22,6 +25,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeRegionUrlMap() *schema.Resource { @@ -245,7 +252,7 @@ Not supported when the URL map is bound to a target gRPC proxy that has the vali "backend_service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The full or partial URL to the RegionBackendService resource being mirrored to. The backend service configured for a mirroring policy must reference backends that are of the same type as the original backend service matched in the URL map. Serverless NEG backends are not currently supported as a mirrored backend service.`, @@ -387,7 +394,7 @@ After a backend service is identified and before forwarding the request to the b "backend_service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight.`, }, "header_action": { @@ -491,7 +498,7 @@ The value must be from 0 to 1000.`, "default_service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the @@ -554,7 +561,7 @@ the redirect. The value must be between 1 and 1024 characters.`, "redirect_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. Supported values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. @@ -601,7 +608,7 @@ you create the resource.`, "default_service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `A reference to a RegionBackendService resource. This will be used if none of the pathRules defined by this PathMatcher is matched by the URL's path portion.`, @@ -659,7 +666,7 @@ the redirect. The value must be between 1 and 1024 characters.`, "redirect_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. Supported values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. @@ -884,7 +891,7 @@ service, the host / authority header is suffixed with -shadow.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The RegionBackendService resource being mirrored to.`, }, }, @@ -1024,7 +1031,7 @@ HttpRouteAction.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The default RegionBackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.`, @@ -1131,7 +1138,7 @@ prior to sending the response back to the client.`, "service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The region backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending @@ -1196,7 +1203,7 @@ must be between 1 and 1024 characters.`, "redirect_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. Supported values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. @@ -1490,7 +1497,7 @@ length of 1024 characters.`, "filter_match_criteria": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"MATCH_ALL", "MATCH_ANY"}), + ValidateFunc: verify.ValidateEnum([]string{"MATCH_ALL", "MATCH_ANY"}), Description: `Specifies how individual filterLabel matches within the list of filterLabels contribute towards the overall metadataFilter match. Supported values are: @@ -1741,7 +1748,7 @@ service, the host / authority header is suffixed with -shadow.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The RegionBackendService resource being mirrored to.`, }, }, @@ -1881,7 +1888,7 @@ HttpRouteAction.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The default RegionBackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.`, @@ -1988,7 +1995,7 @@ prior to sending the response back to the client.`, "service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The region backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending @@ -2045,7 +2052,7 @@ must be between 1 and 1024 characters.`, "redirect_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. Supported values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. @@ -2081,7 +2088,7 @@ portion of the original URL is retained. The default value is false.`, Type: schema.TypeString, Computed: true, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Region in which the url map should reside. If it is not provided, the provider region is used.`, }, @@ -2105,7 +2112,7 @@ succeed only if all of the test cases pass.`, "service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `A reference to expected RegionBackendService resource the given URL should be mapped to.`, }, "description": { @@ -2179,8 +2186,8 @@ when you create the resource.`, } func resourceComputeRegionUrlMapCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -2189,65 +2196,65 @@ func resourceComputeRegionUrlMapCreate(d *schema.ResourceData, meta interface{}) defaultServiceProp, err := expandComputeRegionUrlMapDefaultService(d.Get("default_service"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_service"); !isEmptyValue(reflect.ValueOf(defaultServiceProp)) && (ok || !reflect.DeepEqual(v, defaultServiceProp)) { + } else if v, ok := d.GetOkExists("default_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultServiceProp)) && (ok || !reflect.DeepEqual(v, defaultServiceProp)) { obj["defaultService"] = defaultServiceProp } descriptionProp, err := expandComputeRegionUrlMapDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } hostRulesProp, err := expandComputeRegionUrlMapHostRule(d.Get("host_rule"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("host_rule"); !isEmptyValue(reflect.ValueOf(hostRulesProp)) && (ok || !reflect.DeepEqual(v, hostRulesProp)) { + } else if v, ok := d.GetOkExists("host_rule"); !tpgresource.IsEmptyValue(reflect.ValueOf(hostRulesProp)) && (ok || !reflect.DeepEqual(v, hostRulesProp)) { obj["hostRules"] = hostRulesProp } fingerprintProp, err := expandComputeRegionUrlMapFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { obj["fingerprint"] = fingerprintProp } nameProp, err := expandComputeRegionUrlMapName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } pathMatchersProp, err := expandComputeRegionUrlMapPathMatcher(d.Get("path_matcher"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("path_matcher"); !isEmptyValue(reflect.ValueOf(pathMatchersProp)) && (ok || !reflect.DeepEqual(v, pathMatchersProp)) { + } else if v, ok := d.GetOkExists("path_matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(pathMatchersProp)) && (ok || !reflect.DeepEqual(v, pathMatchersProp)) { obj["pathMatchers"] = pathMatchersProp } testsProp, err := expandComputeRegionUrlMapTest(d.Get("test"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("test"); !isEmptyValue(reflect.ValueOf(testsProp)) && (ok || !reflect.DeepEqual(v, testsProp)) { + } else if v, ok := d.GetOkExists("test"); !tpgresource.IsEmptyValue(reflect.ValueOf(testsProp)) && (ok || !reflect.DeepEqual(v, testsProp)) { obj["tests"] = testsProp } defaultUrlRedirectProp, err := expandComputeRegionUrlMapDefaultUrlRedirect(d.Get("default_url_redirect"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_url_redirect"); !isEmptyValue(reflect.ValueOf(defaultUrlRedirectProp)) && (ok || !reflect.DeepEqual(v, defaultUrlRedirectProp)) { + } else if v, ok := d.GetOkExists("default_url_redirect"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultUrlRedirectProp)) && (ok || !reflect.DeepEqual(v, defaultUrlRedirectProp)) { obj["defaultUrlRedirect"] = defaultUrlRedirectProp } defaultRouteActionProp, err := expandComputeRegionUrlMapDefaultRouteAction(d.Get("default_route_action"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_route_action"); !isEmptyValue(reflect.ValueOf(defaultRouteActionProp)) && (ok || !reflect.DeepEqual(v, defaultRouteActionProp)) { + } else if v, ok := d.GetOkExists("default_route_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultRouteActionProp)) && (ok || !reflect.DeepEqual(v, defaultRouteActionProp)) { obj["defaultRouteAction"] = defaultRouteActionProp } regionProp, err := expandComputeRegionUrlMapRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps") if err != nil { return err } @@ -2255,24 +2262,32 @@ func resourceComputeRegionUrlMapCreate(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Creating new RegionUrlMap: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionUrlMap: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating RegionUrlMap: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -2294,33 +2309,39 @@ func resourceComputeRegionUrlMapCreate(d *schema.ResourceData, meta interface{}) } func resourceComputeRegionUrlMapRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionUrlMap: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionUrlMap %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionUrlMap %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -2363,7 +2384,7 @@ func resourceComputeRegionUrlMapRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("region", flattenComputeRegionUrlMapRegion(res["region"], d, config)); err != nil { return fmt.Errorf("Error reading RegionUrlMap: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading RegionUrlMap: %s", err) } @@ -2371,15 +2392,15 @@ func resourceComputeRegionUrlMapRead(d *schema.ResourceData, meta interface{}) e } func resourceComputeRegionUrlMapUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionUrlMap: %s", err) } @@ -2389,65 +2410,65 @@ func resourceComputeRegionUrlMapUpdate(d *schema.ResourceData, meta interface{}) defaultServiceProp, err := expandComputeRegionUrlMapDefaultService(d.Get("default_service"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultServiceProp)) { + } else if v, ok := d.GetOkExists("default_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultServiceProp)) { obj["defaultService"] = defaultServiceProp } descriptionProp, err := expandComputeRegionUrlMapDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } hostRulesProp, err := expandComputeRegionUrlMapHostRule(d.Get("host_rule"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("host_rule"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostRulesProp)) { + } else if v, ok := d.GetOkExists("host_rule"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostRulesProp)) { obj["hostRules"] = hostRulesProp } fingerprintProp, err := expandComputeRegionUrlMapFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { obj["fingerprint"] = fingerprintProp } nameProp, err := expandComputeRegionUrlMapName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } pathMatchersProp, err := expandComputeRegionUrlMapPathMatcher(d.Get("path_matcher"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("path_matcher"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pathMatchersProp)) { + } else if v, ok := d.GetOkExists("path_matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pathMatchersProp)) { obj["pathMatchers"] = pathMatchersProp } testsProp, err := expandComputeRegionUrlMapTest(d.Get("test"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("test"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, testsProp)) { + } else if v, ok := d.GetOkExists("test"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, testsProp)) { obj["tests"] = testsProp } defaultUrlRedirectProp, err := expandComputeRegionUrlMapDefaultUrlRedirect(d.Get("default_url_redirect"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_url_redirect"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultUrlRedirectProp)) { + } else if v, ok := d.GetOkExists("default_url_redirect"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultUrlRedirectProp)) { obj["defaultUrlRedirect"] = defaultUrlRedirectProp } defaultRouteActionProp, err := expandComputeRegionUrlMapDefaultRouteAction(d.Get("default_route_action"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_route_action"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultRouteActionProp)) { + } else if v, ok := d.GetOkExists("default_route_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultRouteActionProp)) { obj["defaultRouteAction"] = defaultRouteActionProp } regionProp, err := expandComputeRegionUrlMapRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") if err != nil { return err } @@ -2455,11 +2476,19 @@ func resourceComputeRegionUrlMapUpdate(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Updating RegionUrlMap %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating RegionUrlMap %q: %s", d.Id(), err) @@ -2479,21 +2508,21 @@ func resourceComputeRegionUrlMapUpdate(d *schema.ResourceData, meta interface{}) } func resourceComputeRegionUrlMapDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RegionUrlMap: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") if err != nil { return err } @@ -2502,13 +2531,21 @@ func resourceComputeRegionUrlMapDelete(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Deleting RegionUrlMap %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "RegionUrlMap") + return transport_tpg.HandleNotFoundError(err, d, "RegionUrlMap") } err = ComputeOperationWaitTime( @@ -2524,8 +2561,8 @@ func resourceComputeRegionUrlMapDelete(d *schema.ResourceData, meta interface{}) } func resourceComputeRegionUrlMapImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/urlMaps/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -2535,7 +2572,7 @@ func resourceComputeRegionUrlMapImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -2544,22 +2581,22 @@ func resourceComputeRegionUrlMapImport(d *schema.ResourceData, meta interface{}) return []*schema.ResourceData{d}, nil } -func flattenComputeRegionUrlMapCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapHostRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapHostRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2579,25 +2616,25 @@ func flattenComputeRegionUrlMapHostRule(v interface{}, d *schema.ResourceData, c } return transformed } -func flattenComputeRegionUrlMapHostRuleDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapHostRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapHostRuleHosts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapHostRuleHosts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeRegionUrlMapHostRulePathMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapHostRulePathMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapMapId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapMapId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2611,15 +2648,15 @@ func flattenComputeRegionUrlMapMapId(v interface{}, d *schema.ResourceData, conf return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2642,22 +2679,22 @@ func flattenComputeRegionUrlMapPathMatcher(v interface{}, d *schema.ResourceData } return transformed } -func flattenComputeRegionUrlMapPathMatcherDefaultService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDefaultService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapPathMatcherDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2680,10 +2717,10 @@ func flattenComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d *schema.Re } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesPriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesPriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2697,14 +2734,14 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesPriority(v interface{}, d *s return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2723,7 +2760,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2743,23 +2780,23 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersTo } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2779,23 +2816,23 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersT } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2819,11 +2856,11 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2848,27 +2885,27 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v in } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2883,10 +2920,10 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRange flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(original["rangeStart"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2900,10 +2937,10 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRange return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2917,19 +2954,19 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRange return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2948,7 +2985,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2967,23 +3004,23 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFil } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3004,27 +3041,27 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatc } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3049,7 +3086,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3076,38 +3113,38 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v inte flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3121,7 +3158,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge( return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3136,7 +3173,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPol flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3151,10 +3188,10 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPol flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3168,11 +3205,11 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPol return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3187,7 +3224,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPol flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3202,10 +3239,10 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPol flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3219,15 +3256,15 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPol return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3240,14 +3277,14 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPoli flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3264,10 +3301,10 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v int flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3281,7 +3318,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRet return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3296,10 +3333,10 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTry flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3313,15 +3350,15 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTry return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3336,10 +3373,10 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(v interfa flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3353,11 +3390,11 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v in return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3372,15 +3409,15 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v inte flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3400,14 +3437,14 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSe } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3426,7 +3463,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSe flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3446,23 +3483,23 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSe } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3482,26 +3519,26 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSe } return transformed } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3515,7 +3552,7 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSe return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3538,31 +3575,31 @@ func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3583,21 +3620,21 @@ func flattenComputeRegionUrlMapPathMatcherPathRule(v interface{}, d *schema.Reso } return transformed } -func flattenComputeRegionUrlMapPathMatcherPathRuleService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapPathMatcherPathRulePaths(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRulePaths(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3622,7 +3659,7 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteAction(v interface{}, d * flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3649,38 +3686,38 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interf flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3694,7 +3731,7 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3709,7 +3746,7 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolic flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3724,10 +3761,10 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolic flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3741,11 +3778,11 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolic return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3760,7 +3797,7 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolic flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3775,10 +3812,10 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolic flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3792,15 +3829,15 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolic return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3813,14 +3850,14 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3837,10 +3874,10 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v inter flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3854,7 +3891,7 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetri return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3869,10 +3906,10 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTi flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3886,15 +3923,15 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTi return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3909,10 +3946,10 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(v interface flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3926,11 +3963,11 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v inte return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3945,15 +3982,15 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interf flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3973,14 +4010,14 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServ } return transformed } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3999,7 +4036,7 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServ flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4019,23 +4056,23 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServ } return transformed } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4055,26 +4092,26 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServ } return transformed } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4088,7 +4125,7 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServ return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4111,31 +4148,31 @@ func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d * flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4158,31 +4195,31 @@ func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d *s flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapTest(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapTest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4203,26 +4240,26 @@ func flattenComputeRegionUrlMapTest(v interface{}, d *schema.ResourceData, confi } return transformed } -func flattenComputeRegionUrlMapTestDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapTestDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapTestHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapTestHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapTestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapTestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapTestService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapTestService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapDefaultUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4245,31 +4282,31 @@ func flattenComputeRegionUrlMapDefaultUrlRedirect(v interface{}, d *schema.Resou flattenComputeRegionUrlMapDefaultUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4294,7 +4331,7 @@ func flattenComputeRegionUrlMapDefaultRouteAction(v interface{}, d *schema.Resou flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4314,17 +4351,17 @@ func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServices(v inter } return transformed } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4338,7 +4375,7 @@ func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesWeight(v return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4357,11 +4394,11 @@ func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAc flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4381,23 +4418,23 @@ func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAc } return transformed } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4417,19 +4454,19 @@ func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAc } return transformed } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4444,15 +4481,15 @@ func flattenComputeRegionUrlMapDefaultRouteActionUrlRewrite(v interface{}, d *sc flattenComputeRegionUrlMapDefaultRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4467,14 +4504,14 @@ func flattenComputeRegionUrlMapDefaultRouteActionTimeout(v interface{}, d *schem flattenComputeRegionUrlMapDefaultRouteActionTimeoutNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4488,7 +4525,7 @@ func flattenComputeRegionUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d * return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4505,14 +4542,14 @@ func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicy(v interface{}, d *s flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4526,7 +4563,7 @@ func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyNumRetries(v interfa return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4541,14 +4578,14 @@ func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v inte flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4562,7 +4599,7 @@ func flattenComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4575,14 +4612,14 @@ func flattenComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicy(v interface flattenComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4609,30 +4646,30 @@ func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicy(v interface{}, d *sc flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4646,15 +4683,15 @@ func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4669,7 +4706,7 @@ func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicy(v interfac flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4684,7 +4721,7 @@ func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v int flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4699,14 +4736,14 @@ func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedD flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4720,11 +4757,11 @@ func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedD return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4739,10 +4776,10 @@ func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v int flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4756,30 +4793,30 @@ func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpSt return v // let terraform core handle it otherwise } -func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeRegionUrlMapRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeRegionUrlMapRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func expandComputeRegionUrlMapDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapDefaultService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for default_service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapHostRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapHostRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -4793,21 +4830,21 @@ func expandComputeRegionUrlMapHostRule(v interface{}, d TerraformResourceData, c transformedDescription, err := expandComputeRegionUrlMapHostRuleDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedHosts, err := expandComputeRegionUrlMapHostRuleHosts(original["hosts"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHosts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHosts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hosts"] = transformedHosts } transformedPathMatcher, err := expandComputeRegionUrlMapHostRulePathMatcher(original["path_matcher"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathMatcher); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathMatcher); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathMatcher"] = transformedPathMatcher } @@ -4816,28 +4853,28 @@ func expandComputeRegionUrlMapHostRule(v interface{}, d TerraformResourceData, c return req, nil } -func expandComputeRegionUrlMapHostRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapHostRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapHostRuleHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapHostRuleHosts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeRegionUrlMapHostRulePathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapHostRulePathMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4850,42 +4887,42 @@ func expandComputeRegionUrlMapPathMatcher(v interface{}, d TerraformResourceData transformedDefaultService, err := expandComputeRegionUrlMapPathMatcherDefaultService(original["default_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultService"] = transformedDefaultService } transformedDescription, err := expandComputeRegionUrlMapPathMatcherDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedName, err := expandComputeRegionUrlMapPathMatcherName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedRouteRules, err := expandComputeRegionUrlMapPathMatcherRouteRules(original["route_rules"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRouteRules); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRouteRules); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["routeRules"] = transformedRouteRules } transformedPathRule, err := expandComputeRegionUrlMapPathMatcherPathRule(original["path_rule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRules"] = transformedPathRule } transformedDefaultUrlRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirect(original["default_url_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultUrlRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultUrlRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultUrlRedirect"] = transformedDefaultUrlRedirect } @@ -4894,23 +4931,23 @@ func expandComputeRegionUrlMapPathMatcher(v interface{}, d TerraformResourceData return req, nil } -func expandComputeRegionUrlMapPathMatcherDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapPathMatcherDefaultService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for default_service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapPathMatcherDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4923,42 +4960,42 @@ func expandComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d TerraformRe transformedPriority, err := expandComputeRegionUrlMapPathMatcherRouteRulesPriority(original["priority"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["priority"] = transformedPriority } transformedService, err := expandComputeRegionUrlMapPathMatcherRouteRulesService(original["service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["service"] = transformedService } transformedHeaderAction, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } transformedMatchRules, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRules(original["match_rules"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMatchRules); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMatchRules); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["matchRules"] = transformedMatchRules } transformedRouteAction, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteAction(original["route_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["routeAction"] = transformedRouteAction } transformedUrlRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(original["url_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRedirect"] = transformedUrlRedirect } @@ -4967,19 +5004,19 @@ func expandComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d TerraformRe return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesPriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapPathMatcherRouteRulesService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4991,35 +5028,35 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d transformedRequestHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedRequestHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5032,21 +5069,21 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToA transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -5055,23 +5092,23 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToA return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5084,21 +5121,21 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersTo transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -5107,23 +5144,23 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersTo return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5136,49 +5173,49 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d T transformedFullPathMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(original["full_path_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fullPathMatch"] = transformedFullPathMatch } transformedHeaderMatches, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(original["header_matches"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderMatches); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderMatches); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerMatches"] = transformedHeaderMatches } transformedIgnoreCase, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(original["ignore_case"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ignoreCase"] = transformedIgnoreCase } transformedMetadataFilters, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(original["metadata_filters"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMetadataFilters); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMetadataFilters); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["metadataFilters"] = transformedMetadataFilters } transformedPrefixMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(original["prefix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixMatch"] = transformedPrefixMatch } transformedQueryParameterMatches, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(original["query_parameter_matches"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedQueryParameterMatches); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedQueryParameterMatches); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["queryParameterMatches"] = transformedQueryParameterMatches } transformedRegexMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(original["regex_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["regexMatch"] = transformedRegexMatch } @@ -5187,11 +5224,11 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d T return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5204,56 +5241,56 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v int transformedExactMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(original["exact_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exactMatch"] = transformedExactMatch } transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedInvertMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(original["invert_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInvertMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInvertMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["invertMatch"] = transformedInvertMatch } transformedPrefixMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(original["prefix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixMatch"] = transformedPrefixMatch } transformedPresentMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(original["present_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["presentMatch"] = transformedPresentMatch } transformedRangeMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(original["range_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRangeMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRangeMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rangeMatch"] = transformedRangeMatch } transformedRegexMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(original["regex_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["regexMatch"] = transformedRegexMatch } transformedSuffixMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(original["suffix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["suffixMatch"] = transformedSuffixMatch } @@ -5262,27 +5299,27 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v int return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5294,41 +5331,41 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeM transformedRangeEnd, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(original["range_end"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRangeEnd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRangeEnd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rangeEnd"] = transformedRangeEnd } transformedRangeStart, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(original["range_start"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRangeStart); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRangeStart); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rangeStart"] = transformedRangeStart } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5341,14 +5378,14 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v i transformedFilterLabels, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(original["filter_labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilterLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFilterLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["filterLabels"] = transformedFilterLabels } transformedFilterMatchCriteria, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(original["filter_match_criteria"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilterMatchCriteria); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFilterMatchCriteria); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["filterMatchCriteria"] = transformedFilterMatchCriteria } @@ -5357,7 +5394,7 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v i return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5370,14 +5407,14 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilt transformedName, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(original["value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["value"] = transformedValue } @@ -5386,23 +5423,23 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilt return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5415,28 +5452,28 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatch transformedExactMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(original["exact_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exactMatch"] = transformedExactMatch } transformedName, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedPresentMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(original["present_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["presentMatch"] = transformedPresentMatch } transformedRegexMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(original["regex_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["regexMatch"] = transformedRegexMatch } @@ -5445,27 +5482,27 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatch return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5477,56 +5514,56 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d transformedCorsPolicy, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(original["cors_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["corsPolicy"] = transformedCorsPolicy } transformedFaultInjectionPolicy, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy } transformedRequestMirrorPolicy, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy } transformedRetryPolicy, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(original["retry_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryPolicy"] = transformedRetryPolicy } transformedTimeout, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedUrlRewrite, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedWeightedBackendServices, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weightedBackendServices"] = transformedWeightedBackendServices } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5538,95 +5575,95 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v inter transformedAllowCredentials, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCredentials"] = transformedAllowCredentials } transformedAllowHeaders, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowHeaders"] = transformedAllowHeaders } transformedAllowMethods, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowMethods"] = transformedAllowMethods } transformedAllowOriginRegexes, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOriginRegexes"] = transformedAllowOriginRegexes } transformedAllowOrigins, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOrigins"] = transformedAllowOrigins } transformedDisabled, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } transformedExposeHeaders, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exposeHeaders"] = transformedExposeHeaders } transformedMaxAge, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(original["max_age"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAge"] = transformedMaxAge } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5638,21 +5675,21 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPoli transformedAbort, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["abort"] = transformedAbort } transformedDelay, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["delay"] = transformedDelay } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5664,29 +5701,29 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPoli transformedHttpStatus, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpStatus"] = transformedHttpStatus } transformedPercentage, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5698,21 +5735,21 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPoli transformedFixedDelay, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixedDelay"] = transformedFixedDelay } transformedPercentage, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5724,33 +5761,33 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPoli transformedNanos, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5762,22 +5799,22 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolic transformedBackendService, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for backend_service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5789,32 +5826,32 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v inte transformedNumRetries, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["numRetries"] = transformedNumRetries } transformedPerTryTimeout, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["perTryTimeout"] = transformedPerTryTimeout } transformedRetryConditions, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryConditions"] = transformedRetryConditions } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5826,33 +5863,33 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryT transformedNanos, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5864,29 +5901,29 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(v interfac transformedNanos, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5898,29 +5935,29 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v inter transformedHostRewrite, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } transformedPathPrefixRewrite, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5933,21 +5970,21 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSer transformedBackendService, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } transformedHeaderAction, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } transformedWeight, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } @@ -5956,15 +5993,15 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSer return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for backend_service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5976,35 +6013,35 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSer transformedRequestHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedRequestHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6017,21 +6054,21 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSer transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -6040,23 +6077,23 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSer return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6069,21 +6106,21 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSer transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -6092,27 +6129,27 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendSer return req, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6124,73 +6161,73 @@ func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d transformedHostRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedHttpsRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedPathRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedStripQuery, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6203,28 +6240,28 @@ func expandComputeRegionUrlMapPathMatcherPathRule(v interface{}, d TerraformReso transformedService, err := expandComputeRegionUrlMapPathMatcherPathRuleService(original["service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["service"] = transformedService } transformedPaths, err := expandComputeRegionUrlMapPathMatcherPathRulePaths(original["paths"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPaths); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPaths); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["paths"] = transformedPaths } transformedRouteAction, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteAction(original["route_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["routeAction"] = transformedRouteAction } transformedUrlRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(original["url_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRedirect"] = transformedUrlRedirect } @@ -6233,20 +6270,20 @@ func expandComputeRegionUrlMapPathMatcherPathRule(v interface{}, d TerraformReso return req, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapPathMatcherPathRuleService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapPathMatcherPathRulePaths(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRulePaths(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6258,56 +6295,56 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteAction(v interface{}, d Te transformedCorsPolicy, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(original["cors_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["corsPolicy"] = transformedCorsPolicy } transformedFaultInjectionPolicy, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy } transformedRequestMirrorPolicy, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy } transformedRetryPolicy, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(original["retry_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryPolicy"] = transformedRetryPolicy } transformedTimeout, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedUrlRewrite, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedWeightedBackendServices, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weightedBackendServices"] = transformedWeightedBackendServices } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6319,95 +6356,95 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interfa transformedAllowCredentials, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCredentials"] = transformedAllowCredentials } transformedAllowHeaders, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowHeaders"] = transformedAllowHeaders } transformedAllowMethods, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowMethods"] = transformedAllowMethods } transformedAllowOriginRegexes, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOriginRegexes"] = transformedAllowOriginRegexes } transformedAllowOrigins, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOrigins"] = transformedAllowOrigins } transformedDisabled, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } transformedExposeHeaders, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exposeHeaders"] = transformedExposeHeaders } transformedMaxAge, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(original["max_age"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAge"] = transformedMaxAge } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6419,21 +6456,21 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy transformedAbort, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["abort"] = transformedAbort } transformedDelay, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["delay"] = transformedDelay } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6445,29 +6482,29 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy transformedHttpStatus, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpStatus"] = transformedHttpStatus } transformedPercentage, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6479,21 +6516,21 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy transformedFixedDelay, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixedDelay"] = transformedFixedDelay } transformedPercentage, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6505,33 +6542,33 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy transformedNanos, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6543,22 +6580,22 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy( transformedBackendService, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for backend_service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6570,32 +6607,32 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interf transformedNumRetries, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["numRetries"] = transformedNumRetries } transformedPerTryTimeout, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["perTryTimeout"] = transformedPerTryTimeout } transformedRetryConditions, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryConditions"] = transformedRetryConditions } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6607,33 +6644,33 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTim transformedNanos, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6645,29 +6682,29 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{ transformedNanos, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6679,29 +6716,29 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interfa transformedHostRewrite, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } transformedPathPrefixRewrite, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6714,21 +6751,21 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServi transformedBackendService, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } transformedHeaderAction, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } transformedWeight, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } @@ -6737,15 +6774,15 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServi return req, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for backend_service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6757,35 +6794,35 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServi transformedRequestHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedRequestHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6798,21 +6835,21 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServi transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -6821,23 +6858,23 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServi return req, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6850,21 +6887,21 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServi transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -6873,27 +6910,27 @@ func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServi return req, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6905,73 +6942,73 @@ func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d Te transformedHostRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedHttpsRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedPathRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedStripQuery, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6983,73 +7020,73 @@ func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d Ter transformedHostRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedHttpsRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedPathRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedStripQuery, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapTest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapTest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7062,28 +7099,28 @@ func expandComputeRegionUrlMapTest(v interface{}, d TerraformResourceData, confi transformedDescription, err := expandComputeRegionUrlMapTestDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedHost, err := expandComputeRegionUrlMapTestHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedPath, err := expandComputeRegionUrlMapTestPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedService, err := expandComputeRegionUrlMapTestService(original["service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["service"] = transformedService } @@ -7092,27 +7129,27 @@ func expandComputeRegionUrlMapTest(v interface{}, d TerraformResourceData, confi return req, nil } -func expandComputeRegionUrlMapTestDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapTestDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapTestHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapTestHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapTestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapTestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapTestService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapTestService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapDefaultUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7124,73 +7161,73 @@ func expandComputeRegionUrlMapDefaultUrlRedirect(v interface{}, d TerraformResou transformedHostRedirect, err := expandComputeRegionUrlMapDefaultUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedHttpsRedirect, err := expandComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedPathRedirect, err := expandComputeRegionUrlMapDefaultUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedStripQuery, err := expandComputeRegionUrlMapDefaultUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandComputeRegionUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7202,56 +7239,56 @@ func expandComputeRegionUrlMapDefaultRouteAction(v interface{}, d TerraformResou transformedWeightedBackendServices, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weightedBackendServices"] = transformedWeightedBackendServices } transformedUrlRewrite, err := expandComputeRegionUrlMapDefaultRouteActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedTimeout, err := expandComputeRegionUrlMapDefaultRouteActionTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedRetryPolicy, err := expandComputeRegionUrlMapDefaultRouteActionRetryPolicy(original["retry_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryPolicy"] = transformedRetryPolicy } transformedRequestMirrorPolicy, err := expandComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy } transformedCorsPolicy, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicy(original["cors_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["corsPolicy"] = transformedCorsPolicy } transformedFaultInjectionPolicy, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7264,21 +7301,21 @@ func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServices(v interf transformedBackendService, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } transformedWeight, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } transformedHeaderAction, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } @@ -7287,19 +7324,19 @@ func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServices(v interf return req, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for backend_service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7311,39 +7348,39 @@ func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAct transformedRequestHeadersToRemove, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedRequestHeadersToAdd, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7356,21 +7393,21 @@ func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAct transformedHeaderName, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -7379,23 +7416,23 @@ func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAct return req, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7408,21 +7445,21 @@ func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAct transformedHeaderName, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -7431,19 +7468,19 @@ func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderAct return req, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7455,29 +7492,29 @@ func expandComputeRegionUrlMapDefaultRouteActionUrlRewrite(v interface{}, d Terr transformedPathPrefixRewrite, err := expandComputeRegionUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite } transformedHostRewrite, err := expandComputeRegionUrlMapDefaultRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7489,29 +7526,29 @@ func expandComputeRegionUrlMapDefaultRouteActionTimeout(v interface{}, d Terrafo transformedSeconds, err := expandComputeRegionUrlMapDefaultRouteActionTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeRegionUrlMapDefaultRouteActionTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7523,36 +7560,36 @@ func expandComputeRegionUrlMapDefaultRouteActionRetryPolicy(v interface{}, d Ter transformedRetryConditions, err := expandComputeRegionUrlMapDefaultRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryConditions"] = transformedRetryConditions } transformedNumRetries, err := expandComputeRegionUrlMapDefaultRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["numRetries"] = transformedNumRetries } transformedPerTryTimeout, err := expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["perTryTimeout"] = transformedPerTryTimeout } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7564,29 +7601,29 @@ func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v inter transformedSeconds, err := expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7598,22 +7635,22 @@ func expandComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{ transformedBackendService, err := expandComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) +func expandComputeRegionUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for backend_service: %s", err) } return f.RelativeLink(), nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7625,95 +7662,95 @@ func expandComputeRegionUrlMapDefaultRouteActionCorsPolicy(v interface{}, d Terr transformedAllowOrigins, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOrigins"] = transformedAllowOrigins } transformedAllowOriginRegexes, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOriginRegexes"] = transformedAllowOriginRegexes } transformedAllowMethods, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowMethods"] = transformedAllowMethods } transformedAllowHeaders, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowHeaders"] = transformedAllowHeaders } transformedExposeHeaders, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exposeHeaders"] = transformedExposeHeaders } transformedMaxAge, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicyMaxAge(original["max_age"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAge"] = transformedMaxAge } transformedAllowCredentials, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCredentials"] = transformedAllowCredentials } transformedDisabled, err := expandComputeRegionUrlMapDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7725,21 +7762,21 @@ func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicy(v interface transformedDelay, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["delay"] = transformedDelay } transformedAbort, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["abort"] = transformedAbort } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7751,21 +7788,21 @@ func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v inte transformedFixedDelay, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixedDelay"] = transformedFixedDelay } transformedPercentage, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7777,33 +7814,33 @@ func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDe transformedSeconds, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7815,30 +7852,30 @@ func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v inte transformedHttpStatus, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpStatus"] = transformedHttpStatus } transformedPercentage, err := expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeRegionUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeRegionUrlMapRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) +func expandComputeRegionUrlMapRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for region: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_url_map_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_url_map_sweeper.go new file mode 100644 index 0000000000..03b1805788 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_region_url_map_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRegionUrlMap", testSweepComputeRegionUrlMap) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRegionUrlMap(region string) error { + resourceName := "ComputeRegionUrlMap" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/urlMaps", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/urlMaps/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_reservation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_reservation.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_reservation.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_reservation.go index 0d875a0f49..a3f3f2c556 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_reservation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_reservation.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "fmt" @@ -25,6 +28,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeReservation() *schema.Resource { @@ -129,7 +136,7 @@ reserves disks of type 'local-ssd'.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"SCSI", "NVME", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SCSI", "NVME", ""}), Description: `The disk interface to use for attaching this disk. Default value: "SCSI" Possible values: ["SCSI", "NVME"]`, Default: "SCSI", }, @@ -161,7 +168,7 @@ for information on available CPU platforms.`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The zone where the reservation is made.`, }, "description": { @@ -200,7 +207,7 @@ for information on available CPU platforms.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"LOCAL", "SPECIFIC_PROJECTS", ""}), + ValidateFunc: verify.ValidateEnum([]string{"LOCAL", "SPECIFIC_PROJECTS", ""}), Description: `Type of sharing for this shared-reservation Possible values: ["LOCAL", "SPECIFIC_PROJECTS"]`, }, }, @@ -247,8 +254,8 @@ reservations that are tied to a commitment.`, } func resourceComputeReservationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -257,41 +264,41 @@ func resourceComputeReservationCreate(d *schema.ResourceData, meta interface{}) descriptionProp, err := expandComputeReservationDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } nameProp, err := expandComputeReservationName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } specificReservationRequiredProp, err := expandComputeReservationSpecificReservationRequired(d.Get("specific_reservation_required"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("specific_reservation_required"); !isEmptyValue(reflect.ValueOf(specificReservationRequiredProp)) && (ok || !reflect.DeepEqual(v, specificReservationRequiredProp)) { + } else if v, ok := d.GetOkExists("specific_reservation_required"); !tpgresource.IsEmptyValue(reflect.ValueOf(specificReservationRequiredProp)) && (ok || !reflect.DeepEqual(v, specificReservationRequiredProp)) { obj["specificReservationRequired"] = specificReservationRequiredProp } shareSettingsProp, err := expandComputeReservationShareSettings(d.Get("share_settings"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("share_settings"); !isEmptyValue(reflect.ValueOf(shareSettingsProp)) && (ok || !reflect.DeepEqual(v, shareSettingsProp)) { + } else if v, ok := d.GetOkExists("share_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(shareSettingsProp)) && (ok || !reflect.DeepEqual(v, shareSettingsProp)) { obj["shareSettings"] = shareSettingsProp } specificReservationProp, err := expandComputeReservationSpecificReservation(d.Get("specific_reservation"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("specific_reservation"); !isEmptyValue(reflect.ValueOf(specificReservationProp)) && (ok || !reflect.DeepEqual(v, specificReservationProp)) { + } else if v, ok := d.GetOkExists("specific_reservation"); !tpgresource.IsEmptyValue(reflect.ValueOf(specificReservationProp)) && (ok || !reflect.DeepEqual(v, specificReservationProp)) { obj["specificReservation"] = specificReservationProp } zoneProp, err := expandComputeReservationZone(d.Get("zone"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { obj["zone"] = zoneProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations") if err != nil { return err } @@ -299,24 +306,32 @@ func resourceComputeReservationCreate(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Creating new Reservation: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Reservation: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Reservation: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/reservations/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -338,33 +353,39 @@ func resourceComputeReservationCreate(d *schema.ResourceData, meta interface{}) } func resourceComputeReservationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Reservation: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeReservation %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeReservation %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -395,7 +416,7 @@ func resourceComputeReservationRead(d *schema.ResourceData, meta interface{}) er if err := d.Set("zone", flattenComputeReservationZone(res["zone"], d, config)); err != nil { return fmt.Errorf("Error reading Reservation: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading Reservation: %s", err) } @@ -403,15 +424,15 @@ func resourceComputeReservationRead(d *schema.ResourceData, meta interface{}) er } func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Reservation: %s", err) } @@ -421,7 +442,7 @@ func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) shareSettingsProp, err := expandComputeReservationShareSettings(d.Get("share_settings"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("share_settings"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, shareSettingsProp)) { + } else if v, ok := d.GetOkExists("share_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, shareSettingsProp)) { obj["shareSettings"] = shareSettingsProp } @@ -430,7 +451,7 @@ func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return err } @@ -441,14 +462,14 @@ func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("share_settings") { updateMask = append(updateMask, "shareSettings") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } if d.HasChange("share_settings") { - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return err } @@ -460,13 +481,21 @@ func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } // if updateMask is empty we are not updating anything so skip the post if len(updateMask) > 0 { - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) @@ -490,7 +519,7 @@ func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) specificReservationProp, err := expandComputeReservationSpecificReservation(d.Get("specific_reservation"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("specific_reservation"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, specificReservationProp)) { + } else if v, ok := d.GetOkExists("specific_reservation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, specificReservationProp)) { obj["specificReservation"] = specificReservationProp } @@ -499,13 +528,13 @@ func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}/resize") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}/resize") if err != nil { return err } if d.HasChange("share_settings") { - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return err } @@ -517,11 +546,19 @@ func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) } else { @@ -542,21 +579,21 @@ func resourceComputeReservationUpdate(d *schema.ResourceData, meta interface{}) } func resourceComputeReservationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Reservation: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return err } @@ -565,13 +602,21 @@ func resourceComputeReservationDelete(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Deleting Reservation %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Reservation") + return transport_tpg.HandleNotFoundError(err, d, "Reservation") } err = ComputeOperationWaitTime( @@ -587,8 +632,8 @@ func resourceComputeReservationDelete(d *schema.ResourceData, meta interface{}) } func resourceComputeReservationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/zones/(?P[^/]+)/reservations/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -598,7 +643,7 @@ func resourceComputeReservationImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/reservations/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/reservations/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -607,31 +652,31 @@ func resourceComputeReservationImport(d *schema.ResourceData, meta interface{}) return []*schema.ResourceData{d}, nil } -func flattenComputeReservationCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationCommitment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationCommitment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationSpecificReservationRequired(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationRequired(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationSpecificReservation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -648,10 +693,10 @@ func flattenComputeReservationSpecificReservation(v interface{}, d *schema.Resou flattenComputeReservationSpecificReservationInstanceProperties(original["instanceProperties"], d, config) return []interface{}{transformed} } -func flattenComputeReservationSpecificReservationCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -665,10 +710,10 @@ func flattenComputeReservationSpecificReservationCount(v interface{}, d *schema. return v // let terraform core handle it otherwise } -func flattenComputeReservationSpecificReservationInUseCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInUseCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -682,7 +727,7 @@ func flattenComputeReservationSpecificReservationInUseCount(v interface{}, d *sc return v // let terraform core handle it otherwise } -func flattenComputeReservationSpecificReservationInstanceProperties(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstanceProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -701,15 +746,15 @@ func flattenComputeReservationSpecificReservationInstanceProperties(v interface{ flattenComputeReservationSpecificReservationInstancePropertiesLocalSsds(original["localSsds"], d, config) return []interface{}{transformed} } -func flattenComputeReservationSpecificReservationInstancePropertiesMachineType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstancePropertiesMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -728,14 +773,14 @@ func flattenComputeReservationSpecificReservationInstancePropertiesGuestAccelera } return transformed } -func flattenComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -749,7 +794,7 @@ func flattenComputeReservationSpecificReservationInstancePropertiesGuestAccelera return v // let terraform core handle it otherwise } -func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -768,14 +813,14 @@ func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsds(v i } return transformed } -func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -789,26 +834,26 @@ func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsDisk return v // let terraform core handle it otherwise } -func flattenComputeReservationZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeReservationZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func expandComputeReservationDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservationRequired(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationRequired(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationShareSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationShareSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -820,25 +865,25 @@ func expandComputeReservationShareSettings(v interface{}, d TerraformResourceDat transformedShareType, err := expandComputeReservationShareSettingsShareType(original["share_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedShareType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedShareType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["shareType"] = transformedShareType } transformedProjectMap, err := expandComputeReservationShareSettingsProjectMap(original["project_map"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProjectMap); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProjectMap); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectMap"] = transformedProjectMap } return transformed, nil } -func expandComputeReservationShareSettingsShareType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationShareSettingsShareType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationShareSettingsProjectMap(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { +func expandComputeReservationShareSettingsProjectMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { if v == nil { return map[string]interface{}{}, nil } @@ -850,11 +895,11 @@ func expandComputeReservationShareSettingsProjectMap(v interface{}, d TerraformR transformedProjectId, err := expandComputeReservationShareSettingsProjectMapProjectId(original["project_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["projectId"] = transformedProjectId } - transformedId, err := expandString(original["id"], d, config) + transformedId, err := tpgresource.ExpandString(original["id"], d, config) if err != nil { return nil, err } @@ -863,11 +908,11 @@ func expandComputeReservationShareSettingsProjectMap(v interface{}, d TerraformR return m, nil } -func expandComputeReservationShareSettingsProjectMapProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationShareSettingsProjectMapProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -879,36 +924,36 @@ func expandComputeReservationSpecificReservation(v interface{}, d TerraformResou transformedCount, err := expandComputeReservationSpecificReservationCount(original["count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["count"] = transformedCount } transformedInUseCount, err := expandComputeReservationSpecificReservationInUseCount(original["in_use_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInUseCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInUseCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["inUseCount"] = transformedInUseCount } transformedInstanceProperties, err := expandComputeReservationSpecificReservationInstanceProperties(original["instance_properties"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstanceProperties); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstanceProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["instanceProperties"] = transformedInstanceProperties } return transformed, nil } -func expandComputeReservationSpecificReservationCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservationInUseCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInUseCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservationInstanceProperties(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstanceProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -920,43 +965,43 @@ func expandComputeReservationSpecificReservationInstanceProperties(v interface{} transformedMachineType, err := expandComputeReservationSpecificReservationInstancePropertiesMachineType(original["machine_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["machineType"] = transformedMachineType } transformedMinCpuPlatform, err := expandComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(original["min_cpu_platform"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinCpuPlatform); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinCpuPlatform); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minCpuPlatform"] = transformedMinCpuPlatform } transformedGuestAccelerators, err := expandComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(original["guest_accelerators"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGuestAccelerators); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGuestAccelerators); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["guestAccelerators"] = transformedGuestAccelerators } transformedLocalSsds, err := expandComputeReservationSpecificReservationInstancePropertiesLocalSsds(original["local_ssds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocalSsds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocalSsds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["localSsds"] = transformedLocalSsds } return transformed, nil } -func expandComputeReservationSpecificReservationInstancePropertiesMachineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstancePropertiesMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -969,14 +1014,14 @@ func expandComputeReservationSpecificReservationInstancePropertiesGuestAccelerat transformedAcceleratorType, err := expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(original["accelerator_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAcceleratorType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAcceleratorType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["acceleratorType"] = transformedAcceleratorType } transformedAcceleratorCount, err := expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(original["accelerator_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAcceleratorCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAcceleratorCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["acceleratorCount"] = transformedAcceleratorCount } @@ -985,15 +1030,15 @@ func expandComputeReservationSpecificReservationInstancePropertiesGuestAccelerat return req, nil } -func expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservationInstancePropertiesLocalSsds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstancePropertiesLocalSsds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1006,14 +1051,14 @@ func expandComputeReservationSpecificReservationInstancePropertiesLocalSsds(v in transformedInterface, err := expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(original["interface"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInterface); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInterface); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["interface"] = transformedInterface } transformedDiskSizeGb, err := expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(original["disk_size_gb"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["diskSizeGb"] = transformedDiskSizeGb } @@ -1022,16 +1067,16 @@ func expandComputeReservationSpecificReservationInstancePropertiesLocalSsds(v in return req, nil } -func expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeReservationZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) +func expandComputeReservationZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("zones", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for zone: %s", err) } @@ -1040,7 +1085,7 @@ func expandComputeReservationZone(v interface{}, d TerraformResourceData, config func resourceComputeReservationUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { newObj := make(map[string]interface{}) - config := meta.(*Config) + config := meta.(*transport_tpg.Config) maskId := "" firstProject := true urlUpdateMask := "" @@ -1050,14 +1095,14 @@ func resourceComputeReservationUpdateEncoder(d *schema.ResourceData, meta interf nameProp, err := expandComputeReservationName(d.Get("name"), d, config) if err != nil { return nil, fmt.Errorf("Invalid value for name: %s", err) - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { newObj["name"] = nameProp } // Get zone. zoneProp, err := expandComputeReservationZone(d.Get("zone"), d, config) if err != nil { return nil, fmt.Errorf("Invalid value for zone: %s", err) - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { newObj["zone"] = zoneProp } transformed := make(map[string]interface{}) @@ -1076,10 +1121,10 @@ func resourceComputeReservationUpdateEncoder(d *schema.ResourceData, meta interf singleProject := make(map[string]interface{}) // set up project_map. transformedProjectId := original["project_id"] - if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { + if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { singleProject["projectId"] = transformedProjectId } - transformedId, err := expandString(original["id"], d, config) + transformedId, err := tpgresource.ExpandString(original["id"], d, config) if err != nil { return nil, fmt.Errorf("Invalid value for id: %s", err) } @@ -1107,7 +1152,7 @@ func resourceComputeReservationUpdateEncoder(d *schema.ResourceData, meta interf _, err := strconv.Atoi(projectId) // convert id to number. if err != nil { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) project, err := config.NewResourceManagerClient(config.UserAgent).Projects.Get(projectId).Do() if err != nil { return nil, fmt.Errorf("Invalid value for projectId: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_reservation_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_reservation_sweeper.go new file mode 100644 index 0000000000..8039b9740c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_reservation_sweeper.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeReservation", testSweepComputeReservation) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeReservation(region string) error { + resourceName := "ComputeReservation" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/aggregated/reservations", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + var rl []interface{} + zones := resourceList.(map[string]interface{}) + // Loop through every zone in the list response + for _, zonesValue := range zones { + zone := zonesValue.(map[string]interface{}) + for k, v := range zone { + // Zone map either has resources or a warning stating there were no resources found in the zone + if k != "warning" { + resourcesInZone := v.([]interface{}) + rl = append(rl, resourcesInZone...) + } + } + } + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/reservations/{{name}}" + if obj["zone"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource zone was nil", resourceName) + return nil + } + zone := tpgresource.GetResourceNameFromSelfLink(obj["zone"].(string)) + deleteTemplate = strings.Replace(deleteTemplate, "{{zone}}", zone, -1) + + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_resource_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_resource_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy.go index f3d83d7790..d910151fa5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_resource_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "fmt" @@ -21,6 +24,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeResourcePolicy() *schema.Resource { @@ -57,6 +64,24 @@ which cannot be a dash.`, ForceNew: true, Description: `An optional description of this resource. Provide this property when you create the resource.`, }, + "disk_consistency_group_policy": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Replication consistency group for asynchronous disk replication.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Enable disk consistency on the resource policy.`, + }, + }, + }, + ConflictsWith: []string{"snapshot_schedule_policy", "group_placement_policy", "instance_schedule_policy"}, + }, "group_placement_policy": { Type: schema.TypeList, Optional: true, @@ -76,7 +101,7 @@ availability domain, they will not be put in the same low latency network`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"COLLOCATED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"COLLOCATED", ""}), Description: `Collocation specifies whether to place VMs inside the same availability domain on the same low-latency network. Specify 'COLLOCATED' to enable collocation. Can only be specified with 'vm_count'. If compute instances are created with a COLLOCATED policy, then exactly 'vm_count' instances must be created at the same time with the resource policy @@ -92,7 +117,7 @@ exact number of VMs.`, }, }, }, - ConflictsWith: []string{"instance_schedule_policy", "snapshot_schedule_policy"}, + ConflictsWith: []string{"instance_schedule_policy", "snapshot_schedule_policy", "disk_consistency_group_policy"}, }, "instance_schedule_policy": { Type: schema.TypeList, @@ -159,14 +184,14 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, }, }, }, - ConflictsWith: []string{"snapshot_schedule_policy", "group_placement_policy"}, + ConflictsWith: []string{"snapshot_schedule_policy", "group_placement_policy", "disk_consistency_group_policy"}, }, "region": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `Region where resource policy resides.`, }, "snapshot_schedule_policy": { @@ -197,13 +222,13 @@ from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, Type: schema.TypeInt, Required: true, ForceNew: true, - Description: `The number of days between snapshots.`, + Description: `Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. Days in cycle for snapshot schedule policy must be 1.`, }, "start_time": { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateHourlyOnly, + ValidateFunc: verify.ValidateHourlyOnly, Description: `This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.`, @@ -230,7 +255,7 @@ both 13:00-5 and 08:00 are valid.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateHourlyOnly, + ValidateFunc: verify.ValidateHourlyOnly, Description: `Time within the window to start the operations. It must be in an hourly format "HH:MM", where HH : [00-23] and MM : [00] GMT. @@ -283,7 +308,7 @@ eg: 21:00`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY", ""}), + ValidateFunc: verify.ValidateEnum([]string{"KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY", ""}), Description: `Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. Default value: "KEEP_AUTO_SNAPSHOTS" Possible values: ["KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY"]`, Default: "KEEP_AUTO_SNAPSHOTS", @@ -303,8 +328,8 @@ the source disk is deleted. Default value: "KEEP_AUTO_SNAPSHOTS" Possible values Type: schema.TypeString, Optional: true, ForceNew: true, - Description: `Creates the new snapshot in the snapshot chain labeled with the -specified name. The chain name must be 1-63 characters long and comply + Description: `Creates the new snapshot in the snapshot chain labeled with the +specified name. The chain name must be 1-63 characters long and comply with RFC1035.`, }, "guest_flush": { @@ -340,7 +365,7 @@ with RFC1035.`, }, }, }, - ConflictsWith: []string{"group_placement_policy", "instance_schedule_policy"}, + ConflictsWith: []string{"group_placement_policy", "instance_schedule_policy", "disk_consistency_group_policy"}, }, "project": { Type: schema.TypeString, @@ -364,7 +389,7 @@ func computeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), Description: `The day of the week to create the snapshot. e.g. MONDAY Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, }, "start_time": { @@ -379,8 +404,8 @@ It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT.`, } func resourceComputeResourcePolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -389,41 +414,47 @@ func resourceComputeResourcePolicyCreate(d *schema.ResourceData, meta interface{ nameProp, err := expandComputeResourcePolicyName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } descriptionProp, err := expandComputeResourcePolicyDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } snapshotSchedulePolicyProp, err := expandComputeResourcePolicySnapshotSchedulePolicy(d.Get("snapshot_schedule_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("snapshot_schedule_policy"); !isEmptyValue(reflect.ValueOf(snapshotSchedulePolicyProp)) && (ok || !reflect.DeepEqual(v, snapshotSchedulePolicyProp)) { + } else if v, ok := d.GetOkExists("snapshot_schedule_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(snapshotSchedulePolicyProp)) && (ok || !reflect.DeepEqual(v, snapshotSchedulePolicyProp)) { obj["snapshotSchedulePolicy"] = snapshotSchedulePolicyProp } groupPlacementPolicyProp, err := expandComputeResourcePolicyGroupPlacementPolicy(d.Get("group_placement_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("group_placement_policy"); !isEmptyValue(reflect.ValueOf(groupPlacementPolicyProp)) && (ok || !reflect.DeepEqual(v, groupPlacementPolicyProp)) { + } else if v, ok := d.GetOkExists("group_placement_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(groupPlacementPolicyProp)) && (ok || !reflect.DeepEqual(v, groupPlacementPolicyProp)) { obj["groupPlacementPolicy"] = groupPlacementPolicyProp } instanceSchedulePolicyProp, err := expandComputeResourcePolicyInstanceSchedulePolicy(d.Get("instance_schedule_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("instance_schedule_policy"); !isEmptyValue(reflect.ValueOf(instanceSchedulePolicyProp)) && (ok || !reflect.DeepEqual(v, instanceSchedulePolicyProp)) { + } else if v, ok := d.GetOkExists("instance_schedule_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceSchedulePolicyProp)) && (ok || !reflect.DeepEqual(v, instanceSchedulePolicyProp)) { obj["instanceSchedulePolicy"] = instanceSchedulePolicyProp } + diskConsistencyGroupPolicyProp, err := expandComputeResourcePolicyDiskConsistencyGroupPolicy(d.Get("disk_consistency_group_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disk_consistency_group_policy"); ok || !reflect.DeepEqual(v, diskConsistencyGroupPolicyProp) { + obj["diskConsistencyGroupPolicy"] = diskConsistencyGroupPolicyProp + } regionProp, err := expandComputeResourcePolicyRegion(d.Get("region"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { obj["region"] = regionProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies") if err != nil { return err } @@ -431,24 +462,32 @@ func resourceComputeResourcePolicyCreate(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Creating new ResourcePolicy: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ResourcePolicy: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating ResourcePolicy: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -470,33 +509,39 @@ func resourceComputeResourcePolicyCreate(d *schema.ResourceData, meta interface{ } func resourceComputeResourcePolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ResourcePolicy: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeResourcePolicy %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeResourcePolicy %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -518,10 +563,13 @@ func resourceComputeResourcePolicyRead(d *schema.ResourceData, meta interface{}) if err := d.Set("instance_schedule_policy", flattenComputeResourcePolicyInstanceSchedulePolicy(res["instanceSchedulePolicy"], d, config)); err != nil { return fmt.Errorf("Error reading ResourcePolicy: %s", err) } + if err := d.Set("disk_consistency_group_policy", flattenComputeResourcePolicyDiskConsistencyGroupPolicy(res["diskConsistencyGroupPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading ResourcePolicy: %s", err) + } if err := d.Set("region", flattenComputeResourcePolicyRegion(res["region"], d, config)); err != nil { return fmt.Errorf("Error reading ResourcePolicy: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading ResourcePolicy: %s", err) } @@ -529,21 +577,21 @@ func resourceComputeResourcePolicyRead(d *schema.ResourceData, meta interface{}) } func resourceComputeResourcePolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ResourcePolicy: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") if err != nil { return err } @@ -552,13 +600,21 @@ func resourceComputeResourcePolicyDelete(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Deleting ResourcePolicy %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "ResourcePolicy") + return transport_tpg.HandleNotFoundError(err, d, "ResourcePolicy") } err = ComputeOperationWaitTime( @@ -574,8 +630,8 @@ func resourceComputeResourcePolicyDelete(d *schema.ResourceData, meta interface{ } func resourceComputeResourcePolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/resourcePolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -585,7 +641,7 @@ func resourceComputeResourcePolicyImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -594,15 +650,15 @@ func resourceComputeResourcePolicyImport(d *schema.ResourceData, meta interface{ return []*schema.ResourceData{d}, nil } -func flattenComputeResourcePolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicySnapshotSchedulePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -619,7 +675,7 @@ func flattenComputeResourcePolicySnapshotSchedulePolicy(v interface{}, d *schema flattenComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(original["snapshotProperties"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicySnapshotSchedulePolicySchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicySchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -636,7 +692,7 @@ func flattenComputeResourcePolicySnapshotSchedulePolicySchedule(v interface{}, d flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(original["weeklySchedule"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -651,10 +707,10 @@ func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(v flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(original["startTime"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -668,11 +724,11 @@ func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHou return v // let terraform core handle it otherwise } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -687,10 +743,10 @@ func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(v i flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(original["startTime"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -704,11 +760,11 @@ func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDays return v // let terraform core handle it otherwise } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -721,7 +777,7 @@ func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(v flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(original["dayOfWeeks"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -740,15 +796,15 @@ func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDay } return transformed } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -763,10 +819,10 @@ func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(v interfa flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(original["onSourceDiskDelete"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -780,11 +836,11 @@ func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetenti return v // let terraform core handle it otherwise } -func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -803,26 +859,26 @@ func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v inte flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesChainName(original["chainName"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesGuestFlush(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesGuestFlush(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesChainName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesChainName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicyGroupPlacementPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyGroupPlacementPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -839,10 +895,10 @@ func flattenComputeResourcePolicyGroupPlacementPolicy(v interface{}, d *schema.R flattenComputeResourcePolicyGroupPlacementPolicyCollocation(original["collocation"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicyGroupPlacementPolicyVmCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyGroupPlacementPolicyVmCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -856,10 +912,10 @@ func flattenComputeResourcePolicyGroupPlacementPolicyVmCount(v interface{}, d *s return v // let terraform core handle it otherwise } -func flattenComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -873,11 +929,11 @@ func flattenComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(v i return v // let terraform core handle it otherwise } -func flattenComputeResourcePolicyGroupPlacementPolicyCollocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyGroupPlacementPolicyCollocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicyInstanceSchedulePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyInstanceSchedulePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -898,7 +954,7 @@ func flattenComputeResourcePolicyInstanceSchedulePolicy(v interface{}, d *schema flattenComputeResourcePolicyInstanceSchedulePolicyExpirationTime(original["expirationTime"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -911,11 +967,11 @@ func flattenComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(v interfa flattenComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(original["schedule"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -928,38 +984,47 @@ func flattenComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(v interfac flattenComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(original["schedule"], d, config) return []interface{}{transformed} } -func flattenComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicyInstanceSchedulePolicyTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyInstanceSchedulePolicyTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicyInstanceSchedulePolicyStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyInstanceSchedulePolicyStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicyInstanceSchedulePolicyExpirationTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyInstanceSchedulePolicyExpirationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeResourcePolicyRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeResourcePolicyDiskConsistencyGroupPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = true + return []interface{}{transformed} +} + +func flattenComputeResourcePolicyRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func expandComputeResourcePolicyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -971,28 +1036,28 @@ func expandComputeResourcePolicySnapshotSchedulePolicy(v interface{}, d Terrafor transformedSchedule, err := expandComputeResourcePolicySnapshotSchedulePolicySchedule(original["schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schedule"] = transformedSchedule } transformedRetentionPolicy, err := expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(original["retention_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetentionPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetentionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retentionPolicy"] = transformedRetentionPolicy } transformedSnapshotProperties, err := expandComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(original["snapshot_properties"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSnapshotProperties); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSnapshotProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["snapshotProperties"] = transformedSnapshotProperties } return transformed, nil } -func expandComputeResourcePolicySnapshotSchedulePolicySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicySchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1004,28 +1069,28 @@ func expandComputeResourcePolicySnapshotSchedulePolicySchedule(v interface{}, d transformedHourlySchedule, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(original["hourly_schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHourlySchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHourlySchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hourlySchedule"] = transformedHourlySchedule } transformedDailySchedule, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(original["daily_schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDailySchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDailySchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dailySchedule"] = transformedDailySchedule } transformedWeeklySchedule, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(original["weekly_schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeeklySchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeeklySchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weeklySchedule"] = transformedWeeklySchedule } return transformed, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1037,29 +1102,29 @@ func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(v i transformedHoursInCycle, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(original["hours_in_cycle"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHoursInCycle); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHoursInCycle); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hoursInCycle"] = transformedHoursInCycle } transformedStartTime, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(original["start_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["startTime"] = transformedStartTime } return transformed, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1071,29 +1136,29 @@ func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(v in transformedDaysInCycle, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(original["days_in_cycle"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDaysInCycle); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDaysInCycle); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["daysInCycle"] = transformedDaysInCycle } transformedStartTime, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(original["start_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["startTime"] = transformedStartTime } return transformed, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1105,14 +1170,14 @@ func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(v i transformedDayOfWeeks, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(original["day_of_weeks"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeeks); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDayOfWeeks); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dayOfWeeks"] = transformedDayOfWeeks } return transformed, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1126,14 +1191,14 @@ func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayO transformedStartTime, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(original["start_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["startTime"] = transformedStartTime } transformedDay, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(original["day"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["day"] = transformedDay } @@ -1142,15 +1207,15 @@ func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayO return req, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1162,29 +1227,29 @@ func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(v interfac transformedMaxRetentionDays, err := expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(original["max_retention_days"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxRetentionDays); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxRetentionDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxRetentionDays"] = transformedMaxRetentionDays } transformedOnSourceDiskDelete, err := expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(original["on_source_disk_delete"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOnSourceDiskDelete); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOnSourceDiskDelete); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["onSourceDiskDelete"] = transformedOnSourceDiskDelete } return transformed, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1196,14 +1261,14 @@ func expandComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v inter transformedLabels, err := expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(original["labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["labels"] = transformedLabels } transformedStorageLocations, err := expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(original["storage_locations"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStorageLocations); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStorageLocations); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["storageLocations"] = transformedStorageLocations } @@ -1217,14 +1282,14 @@ func expandComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v inter transformedChainName, err := expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesChainName(original["chain_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedChainName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedChainName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["chainName"] = transformedChainName } return transformed, nil } -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1235,20 +1300,20 @@ func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(v return m, nil } -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesGuestFlush(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesGuestFlush(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesChainName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesChainName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyGroupPlacementPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyGroupPlacementPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1260,40 +1325,40 @@ func expandComputeResourcePolicyGroupPlacementPolicy(v interface{}, d TerraformR transformedVmCount, err := expandComputeResourcePolicyGroupPlacementPolicyVmCount(original["vm_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVmCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVmCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["vmCount"] = transformedVmCount } transformedAvailabilityDomainCount, err := expandComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(original["availability_domain_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAvailabilityDomainCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAvailabilityDomainCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["availabilityDomainCount"] = transformedAvailabilityDomainCount } transformedCollocation, err := expandComputeResourcePolicyGroupPlacementPolicyCollocation(original["collocation"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCollocation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCollocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["collocation"] = transformedCollocation } return transformed, nil } -func expandComputeResourcePolicyGroupPlacementPolicyVmCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyGroupPlacementPolicyVmCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyGroupPlacementPolicyCollocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyGroupPlacementPolicyCollocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyInstanceSchedulePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyInstanceSchedulePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1305,42 +1370,42 @@ func expandComputeResourcePolicyInstanceSchedulePolicy(v interface{}, d Terrafor transformedVmStartSchedule, err := expandComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(original["vm_start_schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVmStartSchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVmStartSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["vmStartSchedule"] = transformedVmStartSchedule } transformedVmStopSchedule, err := expandComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(original["vm_stop_schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVmStopSchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVmStopSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["vmStopSchedule"] = transformedVmStopSchedule } transformedTimeZone, err := expandComputeResourcePolicyInstanceSchedulePolicyTimeZone(original["time_zone"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeZone"] = transformedTimeZone } transformedStartTime, err := expandComputeResourcePolicyInstanceSchedulePolicyStartTime(original["start_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["startTime"] = transformedStartTime } transformedExpirationTime, err := expandComputeResourcePolicyInstanceSchedulePolicyExpirationTime(original["expiration_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExpirationTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExpirationTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["expirationTime"] = transformedExpirationTime } return transformed, nil } -func expandComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1352,18 +1417,18 @@ func expandComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(v interfac transformedSchedule, err := expandComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(original["schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schedule"] = transformedSchedule } return transformed, nil } -func expandComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1375,31 +1440,48 @@ func expandComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(v interface transformedSchedule, err := expandComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(original["schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schedule"] = transformedSchedule } return transformed, nil } -func expandComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyInstanceSchedulePolicyTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyInstanceSchedulePolicyTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyInstanceSchedulePolicyStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyInstanceSchedulePolicyStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyInstanceSchedulePolicyExpirationTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeResourcePolicyInstanceSchedulePolicyExpirationTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeResourcePolicyRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) +func expandComputeResourcePolicyDiskConsistencyGroupPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + if isEnabled, ok := original["enabled"]; ok { + if !isEnabled.(bool) { + return nil, nil + } + } + transformed := make(map[string]interface{}) + return transformed, nil +} + +func expandComputeResourcePolicyRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for region: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy_sweeper.go new file mode 100644 index 0000000000..8c2c8151cc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_resource_policy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeResourcePolicy", testSweepComputeResourcePolicy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeResourcePolicy(region string) error { + resourceName := "ComputeResourcePolicy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/resourcePolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_route.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_route.go new file mode 100644 index 0000000000..991ce0f112 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_route.go @@ -0,0 +1,672 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeRoute() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouteCreate, + Read: resourceComputeRouteRead, + Delete: resourceComputeRouteDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRouteImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dest_range": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The destination range of outgoing packets that this route applies to. +Only IPv4 is supported.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z]([-a-z0-9]*[a-z0-9])?$`), + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the +last character, which cannot be a dash.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network that this route applies to.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property +when you create the resource.`, + }, + "next_hop_gateway": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL to a gateway that should handle matching packets. +Currently, you can only specify the internet gateway, using a full or +partial valid URL: +* 'https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway' +* 'projects/project/global/gateways/default-internet-gateway' +* 'global/gateways/default-internet-gateway' +* The string 'default-internet-gateway'.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, + }, + "next_hop_ilb": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareIpAddressOrSelfLinkOrResourceName, + Description: `The IP address or URL to a forwarding rule of type +loadBalancingScheme=INTERNAL that should handle matching +packets. + +With the GA provider you can only specify the forwarding +rule as a partial or full URL. For example, the following +are all valid values: +* 10.128.0.56 +* https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule +* regions/region/forwardingRules/forwardingRule + +When the beta provider, you can also specify the IP address +of a forwarding rule from the same VPC or any peered VPC. + +Note that this can only be used when the destinationRange is +a public (non-RFC 1918) IP CIDR range.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, + }, + "next_hop_instance": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL to an instance that should handle matching packets. +You can specify this as a full or partial URL. For example: +* 'https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance' +* 'projects/project/zones/zone/instances/instance' +* 'zones/zone/instances/instance' +* Just the instance name, with the zone in 'next_hop_instance_zone'.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, + }, + "next_hop_ip": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Network IP address of an instance that should handle matching packets.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, + }, + "next_hop_vpn_tunnel": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL to a VpnTunnel that should handle matching packets.`, + ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The priority of this route. Priority is used to break ties in cases +where there is more than one matching route of equal prefix length. + +In the case of two routes with equal prefix length, the one with the +lowest-numbered priority value wins. + +Default value is 1000. Valid range is 0 through 65535.`, + Default: 1000, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: `A list of instance tags to which this route applies.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "next_hop_network": { + Type: schema.TypeString, + Computed: true, + Description: `URL to a Network that should handle matching packets.`, + }, + "next_hop_instance_zone": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: "The zone of the instance specified in next_hop_instance. Omit if next_hop_instance is specified as a URL.", + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + destRangeProp, err := expandComputeRouteDestRange(d.Get("dest_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dest_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(destRangeProp)) && (ok || !reflect.DeepEqual(v, destRangeProp)) { + obj["destRange"] = destRangeProp + } + descriptionProp, err := expandComputeRouteDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeRouteName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandComputeRouteNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + priorityProp, err := expandComputeRoutePriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); ok || !reflect.DeepEqual(v, priorityProp) { + obj["priority"] = priorityProp + } + tagsProp, err := expandComputeRouteTags(d.Get("tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(tagsProp)) && (ok || !reflect.DeepEqual(v, tagsProp)) { + obj["tags"] = tagsProp + } + nextHopGatewayProp, err := expandComputeRouteNextHopGateway(d.Get("next_hop_gateway"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("next_hop_gateway"); !tpgresource.IsEmptyValue(reflect.ValueOf(nextHopGatewayProp)) && (ok || !reflect.DeepEqual(v, nextHopGatewayProp)) { + obj["nextHopGateway"] = nextHopGatewayProp + } + nextHopInstanceProp, err := expandComputeRouteNextHopInstance(d.Get("next_hop_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("next_hop_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(nextHopInstanceProp)) && (ok || !reflect.DeepEqual(v, nextHopInstanceProp)) { + obj["nextHopInstance"] = nextHopInstanceProp + } + nextHopIpProp, err := expandComputeRouteNextHopIp(d.Get("next_hop_ip"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("next_hop_ip"); !tpgresource.IsEmptyValue(reflect.ValueOf(nextHopIpProp)) && (ok || !reflect.DeepEqual(v, nextHopIpProp)) { + obj["nextHopIp"] = nextHopIpProp + } + nextHopVpnTunnelProp, err := expandComputeRouteNextHopVpnTunnel(d.Get("next_hop_vpn_tunnel"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("next_hop_vpn_tunnel"); !tpgresource.IsEmptyValue(reflect.ValueOf(nextHopVpnTunnelProp)) && (ok || !reflect.DeepEqual(v, nextHopVpnTunnelProp)) { + obj["nextHopVpnTunnel"] = nextHopVpnTunnelProp + } + nextHopIlbProp, err := expandComputeRouteNextHopIlb(d.Get("next_hop_ilb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("next_hop_ilb"); !tpgresource.IsEmptyValue(reflect.ValueOf(nextHopIlbProp)) && (ok || !reflect.DeepEqual(v, nextHopIlbProp)) { + obj["nextHopIlb"] = nextHopIlbProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Route: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Route: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsPeeringOperationInProgress}, + }) + if err != nil { + return fmt.Errorf("Error creating Route: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/routes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Route", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Route: %s", err) + } + + log.Printf("[DEBUG] Finished creating Route %q: %#v", d.Id(), res) + + return resourceComputeRouteRead(d, meta) +} + +func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Route: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsPeeringOperationInProgress}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRoute %q", d.Id())) + } + + res, err = resourceComputeRouteDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeRoute because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + + if err := d.Set("dest_range", flattenComputeRouteDestRange(res["destRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("description", flattenComputeRouteDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("name", flattenComputeRouteName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("network", flattenComputeRouteNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("priority", flattenComputeRoutePriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("tags", flattenComputeRouteTags(res["tags"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("next_hop_gateway", flattenComputeRouteNextHopGateway(res["nextHopGateway"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("next_hop_instance", flattenComputeRouteNextHopInstance(res["nextHopInstance"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("next_hop_ip", flattenComputeRouteNextHopIp(res["nextHopIp"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("next_hop_vpn_tunnel", flattenComputeRouteNextHopVpnTunnel(res["nextHopVpnTunnel"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("next_hop_network", flattenComputeRouteNextHopNetwork(res["nextHopNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("next_hop_ilb", flattenComputeRouteNextHopIlb(res["nextHopIlb"], d, config)); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Route: %s", err) + } + + return nil +} + +func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Route: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Route %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsPeeringOperationInProgress}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Route") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Route", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Route %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRouteImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/routes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/routes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRouteDestRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouteDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouteName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouteNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRoutePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRouteTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeRouteNextHopGateway(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouteNextHopInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRouteNextHopIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouteNextHopVpnTunnel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRouteNextHopNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouteNextHopIlb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeRouteDestRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouteDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouteName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouteNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRoutePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouteTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v.(*schema.Set).List(), nil +} + +func expandComputeRouteNextHopGateway(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == "default-internet-gateway" { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/gateways/default-internet-gateway") + } else { + return v, nil + } +} + +func expandComputeRouteNextHopInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == "" { + return v, nil + } + val, err := tpgresource.ParseZonalFieldValue("instances", v.(string), "project", "next_hop_instance_zone", d, config, true) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + nextInstance, err := config.NewComputeClient(userAgent).Instances.Get(val.Project, val.Zone, val.Name).Do() + if err != nil { + return nil, err + } + return nextInstance.SelfLink, nil +} + +func expandComputeRouteNextHopIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouteNextHopVpnTunnel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("vpnTunnels", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for next_hop_vpn_tunnel: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRouteNextHopIlb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeRouteDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v, ok := res["nextHopInstance"]; ok { + val, err := tpgresource.ParseZonalFieldValue("instances", v.(string), "project", "next_hop_instance_zone", d, meta.(*transport_tpg.Config), true) + if err != nil { + return nil, err + } + if err := d.Set("next_hop_instance_zone", val.Zone); err != nil { + return nil, fmt.Errorf("Error setting next_hop_instance_zone: %s", err) + } + res["nextHopInstance"] = val.RelativeLink() + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_route_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_route_sweeper.go new file mode 100644 index 0000000000..e3fd76d9e1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_route_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRoute", testSweepComputeRoute) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRoute(region string) error { + resourceName := "ComputeRoute" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/routes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/routes/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router.go new file mode 100644 index 0000000000..158858592c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router.go @@ -0,0 +1,789 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// customizeDiff func for additional checks on google_compute_router properties: +func resourceComputeRouterCustomDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + + block := diff.Get("bgp.0").(map[string]interface{}) + advertiseMode := block["advertise_mode"] + advertisedGroups := block["advertised_groups"].([]interface{}) + advertisedIPRanges := block["advertised_ip_ranges"].([]interface{}) + + if advertiseMode == "DEFAULT" && len(advertisedGroups) != 0 { + return fmt.Errorf("Error in bgp: advertised_groups cannot be specified when using advertise_mode DEFAULT") + } + if advertiseMode == "DEFAULT" && len(advertisedIPRanges) != 0 { + return fmt.Errorf("Error in bgp: advertised_ip_ranges cannot be specified when using advertise_mode DEFAULT") + } + + return nil +} + +func ResourceComputeRouter() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterCreate, + Read: resourceComputeRouterRead, + Update: resourceComputeRouterUpdate, + Delete: resourceComputeRouterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + resourceComputeRouterCustomDiff, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource. The name must be 1-63 characters long, and +comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' +which means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the network to which this router belongs.`, + }, + "bgp": { + Type: schema.TypeList, + Optional: true, + Description: `BGP information specific to this router.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "asn": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: verify.ValidateRFC6996Asn, + Description: `Local BGP Autonomous System Number (ASN). Must be an RFC6996 +private ASN, either 16-bit or 32-bit. The value will be fixed for +this router resource. All VPN tunnels that link to this router +will have the same local ASN.`, + }, + "advertise_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DEFAULT", "CUSTOM", ""}), + Description: `User-specified flag to indicate which mode to use for advertisement. Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"]`, + Default: "DEFAULT", + }, + "advertised_groups": { + Type: schema.TypeList, + Optional: true, + Description: `User-specified list of prefix groups to advertise in custom mode. +This field can only be populated if advertiseMode is CUSTOM and +is advertised to all peers of the router. These groups will be +advertised in addition to any specified prefixes. Leave this field +blank to advertise no custom groups. + +This enum field has the one valid value: ALL_SUBNETS`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "advertised_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `User-specified list of individual IP ranges to advertise in +custom mode. This field can only be populated if advertiseMode +is CUSTOM and is advertised to all peers of the router. These IP +ranges will be advertised in addition to any specified groups. +Leave this field blank to advertise no custom IP ranges.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "range": { + Type: schema.TypeString, + Required: true, + Description: `The IP range to advertise. The value must be a +CIDR-formatted string.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `User-specified description for the IP range.`, + }, + }, + }, + }, + "keepalive_interval": { + Type: schema.TypeInt, + Optional: true, + Description: `The interval in seconds between BGP keepalive messages that are sent +to the peer. Hold time is three times the interval at which keepalive +messages are sent, and the hold time is the maximum number of seconds +allowed to elapse between successive keepalive messages that BGP +receives from a peer. + +BGP will use the smaller of either the local hold time value or the +peer's hold time value as the hold time for the BGP connection +between the two peers. If set, this value must be between 20 and 60. +The default is 20.`, + Default: 20, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of this resource.`, + }, + "encrypted_interconnect_router": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates if a router is dedicated for use with encrypted VLAN +attachments (interconnectAttachments).`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Region where the router resides.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeRouterName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeRouterDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); ok || !reflect.DeepEqual(v, descriptionProp) { + obj["description"] = descriptionProp + } + networkProp, err := expandComputeRouterNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + bgpProp, err := expandComputeRouterBgp(d.Get("bgp"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bgp"); ok || !reflect.DeepEqual(v, bgpProp) { + obj["bgp"] = bgpProp + } + encryptedInterconnectRouterProp, err := expandComputeRouterEncryptedInterconnectRouter(d.Get("encrypted_interconnect_router"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encrypted_interconnect_router"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptedInterconnectRouterProp)) && (ok || !reflect.DeepEqual(v, encryptedInterconnectRouterProp)) { + obj["encryptedInterconnectRouter"] = encryptedInterconnectRouterProp + } + regionProp, err := expandComputeRouterRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Router: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Router: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Router: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Router", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Router: %s", err) + } + + log.Printf("[DEBUG] Finished creating Router %q: %#v", d.Id(), res) + + return resourceComputeRouterRead(d, meta) +} + +func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Router: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRouter %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeRouterCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + if err := d.Set("name", flattenComputeRouterName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + if err := d.Set("description", flattenComputeRouterDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + if err := d.Set("network", flattenComputeRouterNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + if err := d.Set("bgp", flattenComputeRouterBgp(res["bgp"], d, config)); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + if err := d.Set("encrypted_interconnect_router", flattenComputeRouterEncryptedInterconnectRouter(res["encryptedInterconnectRouter"], d, config)); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + if err := d.Set("region", flattenComputeRouterRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Router: %s", err) + } + + return nil +} + +func resourceComputeRouterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Router: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeRouterDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); ok || !reflect.DeepEqual(v, descriptionProp) { + obj["description"] = descriptionProp + } + bgpProp, err := expandComputeRouterBgp(d.Get("bgp"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bgp"); ok || !reflect.DeepEqual(v, bgpProp) { + obj["bgp"] = bgpProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Router %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Router %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Router %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Router", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeRouterRead(d, meta) +} + +func resourceComputeRouterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Router: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Router %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Router") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Router", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Router %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRouterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeRouterCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouterDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouterNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeRouterBgp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["asn"] = + flattenComputeRouterBgpAsn(original["asn"], d, config) + transformed["advertise_mode"] = + flattenComputeRouterBgpAdvertiseMode(original["advertiseMode"], d, config) + transformed["advertised_groups"] = + flattenComputeRouterBgpAdvertisedGroups(original["advertisedGroups"], d, config) + transformed["advertised_ip_ranges"] = + flattenComputeRouterBgpAdvertisedIpRanges(original["advertisedIpRanges"], d, config) + transformed["keepalive_interval"] = + flattenComputeRouterBgpKeepaliveInterval(original["keepaliveInterval"], d, config) + return []interface{}{transformed} +} +func flattenComputeRouterBgpAsn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRouterBgpAdvertiseMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouterBgpAdvertisedGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouterBgpAdvertisedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "range": flattenComputeRouterBgpAdvertisedIpRangesRange(original["range"], d, config), + "description": flattenComputeRouterBgpAdvertisedIpRangesDescription(original["description"], d, config), + }) + } + return transformed +} +func flattenComputeRouterBgpAdvertisedIpRangesRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouterBgpAdvertisedIpRangesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouterBgpKeepaliveInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeRouterEncryptedInterconnectRouter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeRouterRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeRouterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeRouterBgp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAsn, err := expandComputeRouterBgpAsn(original["asn"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAsn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["asn"] = transformedAsn + } + + transformedAdvertiseMode, err := expandComputeRouterBgpAdvertiseMode(original["advertise_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdvertiseMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["advertiseMode"] = transformedAdvertiseMode + } + + transformedAdvertisedGroups, err := expandComputeRouterBgpAdvertisedGroups(original["advertised_groups"], d, config) + if err != nil { + return nil, err + } else { + transformed["advertisedGroups"] = transformedAdvertisedGroups + } + + transformedAdvertisedIpRanges, err := expandComputeRouterBgpAdvertisedIpRanges(original["advertised_ip_ranges"], d, config) + if err != nil { + return nil, err + } else { + transformed["advertisedIpRanges"] = transformedAdvertisedIpRanges + } + + transformedKeepaliveInterval, err := expandComputeRouterBgpKeepaliveInterval(original["keepalive_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKeepaliveInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["keepaliveInterval"] = transformedKeepaliveInterval + } + + return transformed, nil +} + +func expandComputeRouterBgpAsn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterBgpAdvertiseMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterBgpAdvertisedGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterBgpAdvertisedIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRange, err := expandComputeRouterBgpAdvertisedIpRangesRange(original["range"], d, config) + if err != nil { + return nil, err + } else { + transformed["range"] = transformedRange + } + + transformedDescription, err := expandComputeRouterBgpAdvertisedIpRangesDescription(original["description"], d, config) + if err != nil { + return nil, err + } else { + transformed["description"] = transformedDescription + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeRouterBgpAdvertisedIpRangesRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterBgpAdvertisedIpRangesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterBgpKeepaliveInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterEncryptedInterconnectRouter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeRouterRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_interface.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_interface.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_interface.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_interface.go index 415907ed60..07bd297727 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_interface.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_interface.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -7,6 +9,9 @@ import ( "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/googleapi" @@ -44,7 +49,7 @@ func ResourceComputeRouterInterface() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, AtLeastOneOf: []string{"ip_range", "interconnect_attachment", "subnetwork", "vpn_tunnel"}, ConflictsWith: []string{"interconnect_attachment", "subnetwork"}, Description: `The name or resource link to the VPN tunnel this interface will be linked to. Changing this forces a new interface to be created. Only one of vpn_tunnel, interconnect_attachment or subnetwork can be specified.`, @@ -53,7 +58,7 @@ func ResourceComputeRouterInterface() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, AtLeastOneOf: []string{"ip_range", "interconnect_attachment", "subnetwork", "vpn_tunnel"}, ConflictsWith: []string{"subnetwork", "vpn_tunnel"}, Description: `The name or resource link to the VLAN interconnect for this interface. Changing this forces a new interface to be created. Only one of interconnect_attachment, subnetwork or vpn_tunnel can be specified.`, @@ -76,7 +81,7 @@ func ResourceComputeRouterInterface() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, AtLeastOneOf: []string{"ip_range", "interconnect_attachment", "subnetwork", "vpn_tunnel"}, ConflictsWith: []string{"interconnect_attachment", "vpn_tunnel"}, Description: `The URI of the subnetwork resource that this interface belongs to, which must be in the same region as the Cloud Router. Changing this forces a new interface to be created. Only one of subnetwork, interconnect_attachment or vpn_tunnel can be specified.`, @@ -109,18 +114,18 @@ func ResourceComputeRouterInterface() *schema.Resource { } func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -128,9 +133,9 @@ func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface routerName := d.Get("router").(string) ifaceName := d.Get("name").(string) - routerLock := getRouterLockName(region, routerName) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) + routerLock := tpgresource.GetRouterLockName(region, routerName) + transport_tpg.MutexStore.Lock(routerLock) + defer transport_tpg.MutexStore.Unlock(routerLock) routersService := config.NewComputeClient(userAgent).Routers router, err := routersService.Get(project, region, routerName).Do() @@ -177,7 +182,7 @@ func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface } if icVal, ok := d.GetOk("interconnect_attachment"); ok { - interconnectAttachment, err := getInterconnectAttachmentLink(config, project, region, icVal.(string), userAgent) + interconnectAttachment, err := tpgresource.GetInterconnectAttachmentLink(config, project, region, icVal.(string), userAgent) if err != nil { return err } @@ -210,18 +215,18 @@ func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface } func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -280,18 +285,18 @@ func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{} } func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -299,9 +304,9 @@ func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface routerName := d.Get("router").(string) ifaceName := d.Get("name").(string) - routerLock := getRouterLockName(region, routerName) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) + routerLock := tpgresource.GetRouterLockName(region, routerName) + transport_tpg.MutexStore.Lock(routerLock) + defer transport_tpg.MutexStore.Unlock(routerLock) routersService := config.NewComputeClient(userAgent).Routers router, err := routersService.Get(project, region, routerName).Do() @@ -366,19 +371,35 @@ func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface func resourceComputeRouterInterfaceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { parts := strings.Split(d.Id(), "/") - if len(parts) != 3 { - return nil, fmt.Errorf("Invalid router interface specifier. Expecting {region}/{router}/{interface}") - } - - if err := d.Set("region", parts[0]); err != nil { - return nil, fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("router", parts[1]); err != nil { - return nil, fmt.Errorf("Error setting router: %s", err) - } - if err := d.Set("name", parts[2]); err != nil { - return nil, fmt.Errorf("Error setting name: %s", err) + switch len(parts) { + case 3: + // {{region}}/{{router}}/{{name}} import id + if err := d.Set("region", parts[0]); err != nil { + return nil, fmt.Errorf("error setting region: %s", err) + } + if err := d.Set("router", parts[1]); err != nil { + return nil, fmt.Errorf("error setting router: %s", err) + } + if err := d.Set("name", parts[2]); err != nil { + return nil, fmt.Errorf("error setting name: %s", err) + } + return []*schema.ResourceData{d}, nil + case 4: + // {{project}}/{{region}}/{{router}}/{{name}} import id + if err := d.Set("project", parts[0]); err != nil { + return nil, fmt.Errorf("error setting project: %s", err) + } + if err := d.Set("region", parts[1]); err != nil { + return nil, fmt.Errorf("error setting region: %s", err) + } + if err := d.Set("router", parts[2]); err != nil { + return nil, fmt.Errorf("error setting router: %s", err) + } + if err := d.Set("name", parts[3]); err != nil { + return nil, fmt.Errorf("error setting name: %s", err) + } + return []*schema.ResourceData{d}, nil } - return []*schema.ResourceData{d}, nil + return nil, fmt.Errorf("invalid router interface specifier. Expecting either {region}/{router}/{interface} or {project}/{region}/{router}/{interface} import id format") } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_nat.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_nat.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat.go index f325a283d2..47b52aa71b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_router_nat.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "context" @@ -22,8 +25,13 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func resourceNameSetFromSelfLinkSet(v interface{}) *schema.Set { @@ -36,7 +44,7 @@ func resourceNameSetFromSelfLinkSet(v interface{}) *schema.Set { if v == nil { continue } - ls = append(ls, GetResourceNameFromSelfLink(v.(string))) + ls = append(ls, tpgresource.GetResourceNameFromSelfLink(v.(string))) } return schema.NewSet(schema.HashString, ls) } @@ -100,7 +108,7 @@ func computeRouterNatSubnetworkHash(v interface{}) int { } } - return schema.HashString(NameFromSelfLinkStateFunc(name)) + sourceIpRangesHash + secondaryIpRangeHash + return schema.HashString(tpgresource.NameFromSelfLinkStateFunc(name)) + sourceIpRangesHash + secondaryIpRangeHash } func computeRouterNatIPsHash(v interface{}) int { @@ -109,7 +117,7 @@ func computeRouterNatIPsHash(v interface{}) int { if len(newParts) == 1 { return schema.HashString(newParts[0]) } - return schema.HashString(GetResourceNameFromSelfLink(val)) + return schema.HashString(tpgresource.GetResourceNameFromSelfLink(val)) } func computeRouterNatRulesHash(v interface{}) int { @@ -171,21 +179,23 @@ func ResourceComputeRouterNat() *schema.Resource { Delete: schema.DefaultTimeout(20 * time.Minute), }, - CustomizeDiff: resourceComputeRouterNatDrainNatIpsCustomDiff, + CustomizeDiff: customdiff.All( + resourceComputeRouterNatDrainNatIpsCustomDiff, + ), Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateRFC1035Name(2, 63), + ValidateFunc: verify.ValidateRFC1035Name(2, 63), Description: `Name of the NAT service. The name must be 1-63 characters long and comply with RFC1035.`, }, "nat_ip_allocate_option": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"MANUAL_ONLY", "AUTO_ONLY"}), + ValidateFunc: verify.ValidateEnum([]string{"MANUAL_ONLY", "AUTO_ONLY"}), Description: `How external IPs should be allocated for this NAT. Valid values are 'AUTO_ONLY' for only allowing NAT IPs allocated by Google Cloud Platform, or 'MANUAL_ONLY' for only user-allocated NAT IP addresses. Possible values: ["MANUAL_ONLY", "AUTO_ONLY"]`, @@ -194,13 +204,13 @@ Platform, or 'MANUAL_ONLY' for only user-allocated NAT IP addresses. Possible va Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name of the Cloud Router in which this NAT will be configured.`, }, "source_subnetwork_ip_ranges_to_nat": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", "LIST_OF_SUBNETWORKS"}), + ValidateFunc: verify.ValidateEnum([]string{"ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", "LIST_OF_SUBNETWORKS"}), Description: `How NAT should be configured per Subnetwork. If 'ALL_SUBNETWORKS_ALL_IP_RANGES', all of the IP ranges in every Subnetwork are allowed to Nat. @@ -219,7 +229,7 @@ other RouterNat section in any Router for this network in this region. Possible valid static external IPs that have been assigned to the NAT.`, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, // Default schema.HashSchema is used. }, @@ -263,7 +273,7 @@ see the [official documentation](https://cloud.google.com/nat/docs/overview#spec "filter": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"ERRORS_ONLY", "TRANSLATIONS_ONLY", "ALL"}), + ValidateFunc: verify.ValidateEnum([]string{"ERRORS_ONLY", "TRANSLATIONS_ONLY", "ALL"}), Description: `Specifies the desired filtering of logs on this NAT. Possible values: ["ERRORS_ONLY", "TRANSLATIONS_ONLY", "ALL"]`, }, }, @@ -287,7 +297,7 @@ This field can only be set when enableDynamicPortAllocation is enabled.`, is set to MANUAL_ONLY.`, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, Set: computeRouterNatIPsHash, }, @@ -296,7 +306,7 @@ is set to MANUAL_ONLY.`, Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `Region where the router and NAT reside.`, }, "rules": { @@ -358,7 +368,7 @@ func computeRouterNatSubnetworkSchema() *schema.Resource { "name": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `Self-link of subnetwork to NAT`, }, "source_ip_ranges_to_nat": { @@ -432,7 +442,7 @@ These IP addresses must be valid static external IP addresses assigned to the pr This field is used for public NAT.`, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, Set: computeRouterNatIPsHash, }, @@ -445,7 +455,7 @@ These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT.`, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, Set: computeRouterNatIPsHash, }, @@ -462,8 +472,8 @@ This field is used for public NAT.`, } func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -472,13 +482,13 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er nameProp, err := expandNestedComputeRouterNatName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } natIpAllocateOptionProp, err := expandNestedComputeRouterNatNatIpAllocateOption(d.Get("nat_ip_allocate_option"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("nat_ip_allocate_option"); !isEmptyValue(reflect.ValueOf(natIpAllocateOptionProp)) && (ok || !reflect.DeepEqual(v, natIpAllocateOptionProp)) { + } else if v, ok := d.GetOkExists("nat_ip_allocate_option"); !tpgresource.IsEmptyValue(reflect.ValueOf(natIpAllocateOptionProp)) && (ok || !reflect.DeepEqual(v, natIpAllocateOptionProp)) { obj["natIpAllocateOption"] = natIpAllocateOptionProp } natIpsProp, err := expandNestedComputeRouterNatNatIps(d.Get("nat_ips"), d, config) @@ -496,7 +506,7 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er sourceSubnetworkIpRangesToNatProp, err := expandNestedComputeRouterNatSourceSubnetworkIpRangesToNat(d.Get("source_subnetwork_ip_ranges_to_nat"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("source_subnetwork_ip_ranges_to_nat"); !isEmptyValue(reflect.ValueOf(sourceSubnetworkIpRangesToNatProp)) && (ok || !reflect.DeepEqual(v, sourceSubnetworkIpRangesToNatProp)) { + } else if v, ok := d.GetOkExists("source_subnetwork_ip_ranges_to_nat"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceSubnetworkIpRangesToNatProp)) && (ok || !reflect.DeepEqual(v, sourceSubnetworkIpRangesToNatProp)) { obj["sourceSubnetworkIpRangesToNat"] = sourceSubnetworkIpRangesToNatProp } subnetworksProp, err := expandNestedComputeRouterNatSubnetwork(d.Get("subnetwork"), d, config) @@ -508,13 +518,13 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er minPortsPerVmProp, err := expandNestedComputeRouterNatMinPortsPerVm(d.Get("min_ports_per_vm"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("min_ports_per_vm"); !isEmptyValue(reflect.ValueOf(minPortsPerVmProp)) && (ok || !reflect.DeepEqual(v, minPortsPerVmProp)) { + } else if v, ok := d.GetOkExists("min_ports_per_vm"); !tpgresource.IsEmptyValue(reflect.ValueOf(minPortsPerVmProp)) && (ok || !reflect.DeepEqual(v, minPortsPerVmProp)) { obj["minPortsPerVm"] = minPortsPerVmProp } maxPortsPerVmProp, err := expandNestedComputeRouterNatMaxPortsPerVm(d.Get("max_ports_per_vm"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("max_ports_per_vm"); !isEmptyValue(reflect.ValueOf(maxPortsPerVmProp)) && (ok || !reflect.DeepEqual(v, maxPortsPerVmProp)) { + } else if v, ok := d.GetOkExists("max_ports_per_vm"); !tpgresource.IsEmptyValue(reflect.ValueOf(maxPortsPerVmProp)) && (ok || !reflect.DeepEqual(v, maxPortsPerVmProp)) { obj["maxPortsPerVm"] = maxPortsPerVmProp } enableDynamicPortAllocationProp, err := expandNestedComputeRouterNatEnableDynamicPortAllocation(d.Get("enable_dynamic_port_allocation"), d, config) @@ -526,31 +536,31 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er udpIdleTimeoutSecProp, err := expandNestedComputeRouterNatUdpIdleTimeoutSec(d.Get("udp_idle_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("udp_idle_timeout_sec"); !isEmptyValue(reflect.ValueOf(udpIdleTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, udpIdleTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("udp_idle_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(udpIdleTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, udpIdleTimeoutSecProp)) { obj["udpIdleTimeoutSec"] = udpIdleTimeoutSecProp } icmpIdleTimeoutSecProp, err := expandNestedComputeRouterNatIcmpIdleTimeoutSec(d.Get("icmp_idle_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("icmp_idle_timeout_sec"); !isEmptyValue(reflect.ValueOf(icmpIdleTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, icmpIdleTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("icmp_idle_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(icmpIdleTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, icmpIdleTimeoutSecProp)) { obj["icmpIdleTimeoutSec"] = icmpIdleTimeoutSecProp } tcpEstablishedIdleTimeoutSecProp, err := expandNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(d.Get("tcp_established_idle_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_established_idle_timeout_sec"); !isEmptyValue(reflect.ValueOf(tcpEstablishedIdleTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, tcpEstablishedIdleTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("tcp_established_idle_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(tcpEstablishedIdleTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, tcpEstablishedIdleTimeoutSecProp)) { obj["tcpEstablishedIdleTimeoutSec"] = tcpEstablishedIdleTimeoutSecProp } tcpTransitoryIdleTimeoutSecProp, err := expandNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(d.Get("tcp_transitory_idle_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_transitory_idle_timeout_sec"); !isEmptyValue(reflect.ValueOf(tcpTransitoryIdleTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, tcpTransitoryIdleTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("tcp_transitory_idle_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(tcpTransitoryIdleTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, tcpTransitoryIdleTimeoutSecProp)) { obj["tcpTransitoryIdleTimeoutSec"] = tcpTransitoryIdleTimeoutSecProp } tcpTimeWaitTimeoutSecProp, err := expandNestedComputeRouterNatTcpTimeWaitTimeoutSec(d.Get("tcp_time_wait_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_time_wait_timeout_sec"); !isEmptyValue(reflect.ValueOf(tcpTimeWaitTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, tcpTimeWaitTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("tcp_time_wait_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(tcpTimeWaitTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, tcpTimeWaitTimeoutSecProp)) { obj["tcpTimeWaitTimeoutSec"] = tcpTimeWaitTimeoutSecProp } logConfigProp, err := expandNestedComputeRouterNatLogConfig(d.Get("log_config"), d, config) @@ -572,14 +582,14 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er obj["enableEndpointIndependentMapping"] = enableEndpointIndependentMappingProp } - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") if err != nil { return err } @@ -592,24 +602,32 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RouterNat: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating RouterNat: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{router}}/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{region}}/{{router}}/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -631,33 +649,39 @@ func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) er } func resourceComputeRouterNatRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RouterNat: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeRouterNat %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRouterNat %q", d.Id())) } res, err = flattenNestedComputeRouterNat(d, meta, res) @@ -732,15 +756,15 @@ func resourceComputeRouterNatRead(d *schema.ResourceData, meta interface{}) erro } func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RouterNat: %s", err) } @@ -750,7 +774,7 @@ func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) er natIpAllocateOptionProp, err := expandNestedComputeRouterNatNatIpAllocateOption(d.Get("nat_ip_allocate_option"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("nat_ip_allocate_option"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, natIpAllocateOptionProp)) { + } else if v, ok := d.GetOkExists("nat_ip_allocate_option"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, natIpAllocateOptionProp)) { obj["natIpAllocateOption"] = natIpAllocateOptionProp } natIpsProp, err := expandNestedComputeRouterNatNatIps(d.Get("nat_ips"), d, config) @@ -768,7 +792,7 @@ func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) er sourceSubnetworkIpRangesToNatProp, err := expandNestedComputeRouterNatSourceSubnetworkIpRangesToNat(d.Get("source_subnetwork_ip_ranges_to_nat"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("source_subnetwork_ip_ranges_to_nat"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceSubnetworkIpRangesToNatProp)) { + } else if v, ok := d.GetOkExists("source_subnetwork_ip_ranges_to_nat"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceSubnetworkIpRangesToNatProp)) { obj["sourceSubnetworkIpRangesToNat"] = sourceSubnetworkIpRangesToNatProp } subnetworksProp, err := expandNestedComputeRouterNatSubnetwork(d.Get("subnetwork"), d, config) @@ -780,13 +804,13 @@ func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) er minPortsPerVmProp, err := expandNestedComputeRouterNatMinPortsPerVm(d.Get("min_ports_per_vm"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("min_ports_per_vm"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, minPortsPerVmProp)) { + } else if v, ok := d.GetOkExists("min_ports_per_vm"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, minPortsPerVmProp)) { obj["minPortsPerVm"] = minPortsPerVmProp } maxPortsPerVmProp, err := expandNestedComputeRouterNatMaxPortsPerVm(d.Get("max_ports_per_vm"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("max_ports_per_vm"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxPortsPerVmProp)) { + } else if v, ok := d.GetOkExists("max_ports_per_vm"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxPortsPerVmProp)) { obj["maxPortsPerVm"] = maxPortsPerVmProp } enableDynamicPortAllocationProp, err := expandNestedComputeRouterNatEnableDynamicPortAllocation(d.Get("enable_dynamic_port_allocation"), d, config) @@ -798,31 +822,31 @@ func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) er udpIdleTimeoutSecProp, err := expandNestedComputeRouterNatUdpIdleTimeoutSec(d.Get("udp_idle_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("udp_idle_timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, udpIdleTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("udp_idle_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, udpIdleTimeoutSecProp)) { obj["udpIdleTimeoutSec"] = udpIdleTimeoutSecProp } icmpIdleTimeoutSecProp, err := expandNestedComputeRouterNatIcmpIdleTimeoutSec(d.Get("icmp_idle_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("icmp_idle_timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, icmpIdleTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("icmp_idle_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, icmpIdleTimeoutSecProp)) { obj["icmpIdleTimeoutSec"] = icmpIdleTimeoutSecProp } tcpEstablishedIdleTimeoutSecProp, err := expandNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(d.Get("tcp_established_idle_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_established_idle_timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpEstablishedIdleTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("tcp_established_idle_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpEstablishedIdleTimeoutSecProp)) { obj["tcpEstablishedIdleTimeoutSec"] = tcpEstablishedIdleTimeoutSecProp } tcpTransitoryIdleTimeoutSecProp, err := expandNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(d.Get("tcp_transitory_idle_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_transitory_idle_timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpTransitoryIdleTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("tcp_transitory_idle_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpTransitoryIdleTimeoutSecProp)) { obj["tcpTransitoryIdleTimeoutSec"] = tcpTransitoryIdleTimeoutSecProp } tcpTimeWaitTimeoutSecProp, err := expandNestedComputeRouterNatTcpTimeWaitTimeoutSec(d.Get("tcp_time_wait_timeout_sec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tcp_time_wait_timeout_sec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpTimeWaitTimeoutSecProp)) { + } else if v, ok := d.GetOkExists("tcp_time_wait_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpTimeWaitTimeoutSecProp)) { obj["tcpTimeWaitTimeoutSec"] = tcpTimeWaitTimeoutSecProp } logConfigProp, err := expandNestedComputeRouterNatLogConfig(d.Get("log_config"), d, config) @@ -844,14 +868,14 @@ func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) er obj["enableEndpointIndependentMapping"] = enableEndpointIndependentMappingProp } - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") if err != nil { return err } @@ -864,11 +888,19 @@ func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) er } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating RouterNat %q: %s", d.Id(), err) @@ -888,28 +920,28 @@ func resourceComputeRouterNatUpdate(d *schema.ResourceData, meta interface{}) er } func resourceComputeRouterNatDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for RouterNat: %s", err) } billingProject = project - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") if err != nil { return err } @@ -918,18 +950,26 @@ func resourceComputeRouterNatDelete(d *schema.ResourceData, meta interface{}) er obj, err = resourceComputeRouterNatPatchDeleteEncoder(d, meta, obj) if err != nil { - return handleNotFoundError(err, d, "RouterNat") + return transport_tpg.HandleNotFoundError(err, d, "RouterNat") } log.Printf("[DEBUG] Deleting RouterNat %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "RouterNat") + return transport_tpg.HandleNotFoundError(err, d, "RouterNat") } err = ComputeOperationWaitTime( @@ -945,8 +985,8 @@ func resourceComputeRouterNatDelete(d *schema.ResourceData, meta interface{}) er } func resourceComputeRouterNatImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -956,7 +996,7 @@ func resourceComputeRouterNatImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{router}}/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{region}}/{{router}}/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -965,33 +1005,33 @@ func resourceComputeRouterNatImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenNestedComputeRouterNatName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNestedComputeRouterNatNatIpAllocateOption(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatNatIpAllocateOption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNestedComputeRouterNatNatIps(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatNatIps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) } -func flattenNestedComputeRouterNatDrainNatIps(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatDrainNatIps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) } -func flattenNestedComputeRouterNatSourceSubnetworkIpRangesToNat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatSourceSubnetworkIpRangesToNat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNestedComputeRouterNatSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1011,31 +1051,31 @@ func flattenNestedComputeRouterNatSubnetwork(v interface{}, d *schema.ResourceDa } return transformed } -func flattenNestedComputeRouterNatSubnetworkName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatSubnetworkName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenNestedComputeRouterNatSubnetworkSourceIpRangesToNat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatSubnetworkSourceIpRangesToNat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenNestedComputeRouterNatMinPortsPerVm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatMinPortsPerVm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1049,10 +1089,10 @@ func flattenNestedComputeRouterNatMinPortsPerVm(v interface{}, d *schema.Resourc return v // let terraform core handle it otherwise } -func flattenNestedComputeRouterNatMaxPortsPerVm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatMaxPortsPerVm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1066,17 +1106,17 @@ func flattenNestedComputeRouterNatMaxPortsPerVm(v interface{}, d *schema.Resourc return v // let terraform core handle it otherwise } -func flattenNestedComputeRouterNatEnableDynamicPortAllocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatEnableDynamicPortAllocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNestedComputeRouterNatUdpIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenNestedComputeRouterNatUdpIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return 30 } // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } @@ -1084,13 +1124,13 @@ func flattenNestedComputeRouterNatUdpIdleTimeoutSec(v interface{}, d *schema.Res return v } -func flattenNestedComputeRouterNatIcmpIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenNestedComputeRouterNatIcmpIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return 30 } // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } @@ -1098,13 +1138,13 @@ func flattenNestedComputeRouterNatIcmpIdleTimeoutSec(v interface{}, d *schema.Re return v } -func flattenNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return 1200 } // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } @@ -1112,13 +1152,13 @@ func flattenNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(v interface{}, d return v } -func flattenNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return 30 } // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } @@ -1126,13 +1166,13 @@ func flattenNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(v interface{}, d * return v } -func flattenNestedComputeRouterNatTcpTimeWaitTimeoutSec(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenNestedComputeRouterNatTcpTimeWaitTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return 120 } // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } @@ -1140,7 +1180,7 @@ func flattenNestedComputeRouterNatTcpTimeWaitTimeoutSec(v interface{}, d *schema return v } -func flattenNestedComputeRouterNatLogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1155,15 +1195,15 @@ func flattenNestedComputeRouterNatLogConfig(v interface{}, d *schema.ResourceDat flattenNestedComputeRouterNatLogConfigFilter(original["filter"], d, config) return []interface{}{transformed} } -func flattenNestedComputeRouterNatLogConfigEnable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatLogConfigEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNestedComputeRouterNatLogConfigFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatLogConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNestedComputeRouterNatRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1184,10 +1224,10 @@ func flattenNestedComputeRouterNatRules(v interface{}, d *schema.ResourceData, c } return transformed } -func flattenNestedComputeRouterNatRulesRuleNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatRulesRuleNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1201,15 +1241,15 @@ func flattenNestedComputeRouterNatRulesRuleNumber(v interface{}, d *schema.Resou return v // let terraform core handle it otherwise } -func flattenNestedComputeRouterNatRulesDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatRulesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNestedComputeRouterNatRulesMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatRulesMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNestedComputeRouterNatRulesAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatRulesAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1224,33 +1264,33 @@ func flattenNestedComputeRouterNatRulesAction(v interface{}, d *schema.ResourceD flattenNestedComputeRouterNatRulesActionSourceNatDrainIps(original["sourceNatDrainIps"], d, config) return []interface{}{transformed} } -func flattenNestedComputeRouterNatRulesActionSourceNatActiveIps(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatRulesActionSourceNatActiveIps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return schema.NewSet(computeRouterNatIPsHash, convertStringArrToInterface(convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1))) + return schema.NewSet(computeRouterNatIPsHash, tpgresource.ConvertStringArrToInterface(tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1))) } -func flattenNestedComputeRouterNatRulesActionSourceNatDrainIps(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatRulesActionSourceNatDrainIps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return schema.NewSet(computeRouterNatIPsHash, convertStringArrToInterface(convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1))) + return schema.NewSet(computeRouterNatIPsHash, tpgresource.ConvertStringArrToInterface(tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1))) } -func flattenNestedComputeRouterNatEnableEndpointIndependentMapping(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNestedComputeRouterNatEnableEndpointIndependentMapping(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNestedComputeRouterNatName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatNatIpAllocateOption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatNatIpAllocateOption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatNatIps(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatNatIps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1258,7 +1298,7 @@ func expandNestedComputeRouterNatNatIps(v interface{}, d TerraformResourceData, if raw == nil { return nil, fmt.Errorf("Invalid value for nat_ips: nil") } - f, err := parseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) + f, err := tpgresource.ParseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for nat_ips: %s", err) } @@ -1267,7 +1307,7 @@ func expandNestedComputeRouterNatNatIps(v interface{}, d TerraformResourceData, return req, nil } -func expandNestedComputeRouterNatDrainNatIps(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatDrainNatIps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1275,7 +1315,7 @@ func expandNestedComputeRouterNatDrainNatIps(v interface{}, d TerraformResourceD if raw == nil { return nil, fmt.Errorf("Invalid value for drain_nat_ips: nil") } - f, err := parseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) + f, err := tpgresource.ParseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for drain_nat_ips: %s", err) } @@ -1284,11 +1324,11 @@ func expandNestedComputeRouterNatDrainNatIps(v interface{}, d TerraformResourceD return req, nil } -func expandNestedComputeRouterNatSourceSubnetworkIpRangesToNat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatSourceSubnetworkIpRangesToNat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1302,21 +1342,21 @@ func expandNestedComputeRouterNatSubnetwork(v interface{}, d TerraformResourceDa transformedName, err := expandNestedComputeRouterNatSubnetworkName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedSourceIpRangesToNat, err := expandNestedComputeRouterNatSubnetworkSourceIpRangesToNat(original["source_ip_ranges_to_nat"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceIpRangesToNat); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceIpRangesToNat); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceIpRangesToNat"] = transformedSourceIpRangesToNat } transformedSecondaryIpRangeNames, err := expandNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(original["secondary_ip_range_names"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecondaryIpRangeNames); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecondaryIpRangeNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secondaryIpRangeNames"] = transformedSecondaryIpRangeNames } @@ -1325,57 +1365,57 @@ func expandNestedComputeRouterNatSubnetwork(v interface{}, d TerraformResourceDa return req, nil } -func expandNestedComputeRouterNatSubnetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) +func expandNestedComputeRouterNatSubnetworkName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for name: %s", err) } return f.RelativeLink(), nil } -func expandNestedComputeRouterNatSubnetworkSourceIpRangesToNat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatSubnetworkSourceIpRangesToNat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandNestedComputeRouterNatMinPortsPerVm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatMinPortsPerVm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatMaxPortsPerVm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatMaxPortsPerVm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatEnableDynamicPortAllocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatEnableDynamicPortAllocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatUdpIdleTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatUdpIdleTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatIcmpIdleTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatIcmpIdleTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatTcpTimeWaitTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatTcpTimeWaitTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatLogConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1387,29 +1427,29 @@ func expandNestedComputeRouterNatLogConfig(v interface{}, d TerraformResourceDat transformedEnable, err := expandNestedComputeRouterNatLogConfigEnable(original["enable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enable"] = transformedEnable } transformedFilter, err := expandNestedComputeRouterNatLogConfigFilter(original["filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["filter"] = transformedFilter } return transformed, nil } -func expandNestedComputeRouterNatLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatLogConfigEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatLogConfigFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatLogConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1430,21 +1470,21 @@ func expandNestedComputeRouterNatRules(v interface{}, d TerraformResourceData, c transformedDescription, err := expandNestedComputeRouterNatRulesDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedMatch, err := expandNestedComputeRouterNatRulesMatch(original["match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["match"] = transformedMatch } transformedAction, err := expandNestedComputeRouterNatRulesAction(original["action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["action"] = transformedAction } @@ -1453,19 +1493,19 @@ func expandNestedComputeRouterNatRules(v interface{}, d TerraformResourceData, c return req, nil } -func expandNestedComputeRouterNatRulesRuleNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatRulesRuleNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatRulesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatRulesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatRulesMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatRulesMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNestedComputeRouterNatRulesAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatRulesAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1477,21 +1517,21 @@ func expandNestedComputeRouterNatRulesAction(v interface{}, d TerraformResourceD transformedSourceNatActiveIps, err := expandNestedComputeRouterNatRulesActionSourceNatActiveIps(original["source_nat_active_ips"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceNatActiveIps); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceNatActiveIps); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceNatActiveIps"] = transformedSourceNatActiveIps } transformedSourceNatDrainIps, err := expandNestedComputeRouterNatRulesActionSourceNatDrainIps(original["source_nat_drain_ips"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceNatDrainIps); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceNatDrainIps); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceNatDrainIps"] = transformedSourceNatDrainIps } return transformed, nil } -func expandNestedComputeRouterNatRulesActionSourceNatActiveIps(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatRulesActionSourceNatActiveIps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1499,7 +1539,7 @@ func expandNestedComputeRouterNatRulesActionSourceNatActiveIps(v interface{}, d if raw == nil { return nil, fmt.Errorf("Invalid value for source_nat_active_ips: nil") } - f, err := parseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) + f, err := tpgresource.ParseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for source_nat_active_ips: %s", err) } @@ -1508,7 +1548,7 @@ func expandNestedComputeRouterNatRulesActionSourceNatActiveIps(v interface{}, d return req, nil } -func expandNestedComputeRouterNatRulesActionSourceNatDrainIps(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatRulesActionSourceNatDrainIps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1516,7 +1556,7 @@ func expandNestedComputeRouterNatRulesActionSourceNatDrainIps(v interface{}, d T if raw == nil { return nil, fmt.Errorf("Invalid value for source_nat_drain_ips: nil") } - f, err := parseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) + f, err := tpgresource.ParseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) if err != nil { return nil, fmt.Errorf("Invalid value for source_nat_drain_ips: %s", err) } @@ -1525,7 +1565,7 @@ func expandNestedComputeRouterNatRulesActionSourceNatDrainIps(v interface{}, d T return req, nil } -func expandNestedComputeRouterNatEnableEndpointIndependentMapping(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNestedComputeRouterNatEnableEndpointIndependentMapping(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1556,11 +1596,11 @@ func flattenNestedComputeRouterNat(d *schema.ResourceData, meta interface{}, res } func resourceComputeRouterNatFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeRouterNatName(d.Get("name"), d, meta.(*Config)) + expectedName, err := expandNestedComputeRouterNatName(d.Get("name"), d, meta.(*transport_tpg.Config)) if err != nil { return -1, nil, err } - expectedFlattenedName := flattenNestedComputeRouterNatName(expectedName, d, meta.(*Config)) + expectedFlattenedName := flattenNestedComputeRouterNatName(expectedName, d, meta.(*transport_tpg.Config)) // Search list for this resource. for idx, itemRaw := range items { @@ -1569,9 +1609,9 @@ func resourceComputeRouterNatFindNestedObjectInList(d *schema.ResourceData, meta } item := itemRaw.(map[string]interface{}) - itemName := flattenNestedComputeRouterNatName(item["name"], d, meta.(*Config)) - // isEmptyValue check so that if one is nil and the other is "", that's considered a match - if !(isEmptyValue(reflect.ValueOf(itemName)) && isEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + itemName := flattenNestedComputeRouterNatName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) continue } @@ -1653,7 +1693,7 @@ func resourceComputeRouterNatPatchDeleteEncoder(d *schema.ResourceData, meta int } if item == nil { // Spoof 404 error for proper handling by Delete (i.e. no-op) - return nil, fake404("nested", "ComputeRouterNat") + return nil, tpgresource.Fake404("nested", "ComputeRouterNat") } updatedItems := append(currItems[:idx], currItems[idx+1:]...) @@ -1667,22 +1707,28 @@ func resourceComputeRouterNatPatchDeleteEncoder(d *schema.ResourceData, meta int // ListForPatch handles making API request to get parent resource and // extracting list of objects. func resourceComputeRouterNatListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") if err != nil { return nil, err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } - res, err := SendRequest(config, "GET", project, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat_sweeper.go new file mode 100644 index 0000000000..d1ef077b94 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_nat_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRouterNat", testSweepComputeRouterNat) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRouterNat(region string) error { + resourceName := "ComputeRouterNat" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers/{{router}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["nats"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers/{{router}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer.go new file mode 100644 index 0000000000..1890b0b605 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer.go @@ -0,0 +1,1328 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "net" + "reflect" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ipv6RepresentationDiffSuppress(_, old, new string, d *schema.ResourceData) bool { + //Diff suppress any equal IPV6 address in different representations + //An IPV6 address can have long or short representations + //E.g 2001:0cb0:0000:0000:0fc0:0000:0000:0abc, after compression: + //A) 2001:0cb0::0fc0:0000:0000:0abc (Omit groups of all zeros) + //B) 2001:cb0:0:0:fc0::abc (Omit leading zeros) + //C) 2001:cb0::fc0:0:0:abc (Combining A and B) + //The GCP API follows rule B) for normalzation + + oldIp := net.ParseIP(old) + newIp := net.ParseIP(new) + return oldIp.Equal(newIp) +} + +func ResourceComputeRouterBgpPeer() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterBgpPeerCreate, + Read: resourceComputeRouterBgpPeerRead, + Update: resourceComputeRouterBgpPeerUpdate, + Delete: resourceComputeRouterBgpPeerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterBgpPeerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "interface": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the interface the BGP peer is associated with.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRFC1035Name(2, 63), + Description: `Name of this BGP peer. The name must be 1-63 characters long, +and comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which +means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "peer_asn": { + Type: schema.TypeInt, + Required: true, + Description: `Peer BGP Autonomous System Number (ASN). +Each BGP interface may use a different value.`, + }, + "router": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Cloud Router in which this BgpPeer will be configured.`, + }, + "advertise_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DEFAULT", "CUSTOM", ""}), + Description: `User-specified flag to indicate which mode to use for advertisement. +Valid values of this enum field are: 'DEFAULT', 'CUSTOM' Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"]`, + Default: "DEFAULT", + }, + "advertised_groups": { + Type: schema.TypeList, + Optional: true, + Description: `User-specified list of prefix groups to advertise in custom +mode, which can take one of the following options: + +* 'ALL_SUBNETS': Advertises all available subnets, including peer VPC subnets. +* 'ALL_VPC_SUBNETS': Advertises the router's own VPC subnets. +* 'ALL_PEER_VPC_SUBNETS': Advertises peer subnets of the router's VPC network. + + +Note that this field can only be populated if advertiseMode is 'CUSTOM' +and overrides the list defined for the router (in the "bgp" message). +These groups are advertised in addition to any specified prefixes. +Leave this field blank to advertise no custom groups.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "advertised_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `User-specified list of individual IP ranges to advertise in +custom mode. This field can only be populated if advertiseMode +is 'CUSTOM' and is advertised to all peers of the router. These IP +ranges will be advertised in addition to any specified groups. +Leave this field blank to advertise no custom IP ranges.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "range": { + Type: schema.TypeString, + Required: true, + Description: `The IP range to advertise. The value must be a +CIDR-formatted string.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `User-specified description for the IP range.`, + }, + }, + }, + }, + "advertised_route_priority": { + Type: schema.TypeInt, + Optional: true, + Description: `The priority of routes advertised to this BGP peer. +Where there is more than one matching route of maximum +length, the routes with the lowest priority value win.`, + }, + "bfd": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `BFD configuration for the BGP peering.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "session_initialization_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ACTIVE", "DISABLED", "PASSIVE"}), + Description: `The BFD session initialization mode for this BGP peer. +If set to 'ACTIVE', the Cloud Router will initiate the BFD session +for this BGP peer. If set to 'PASSIVE', the Cloud Router will wait +for the peer router to initiate the BFD session for this BGP peer. +If set to 'DISABLED', BFD is disabled for this BGP peer. Possible values: ["ACTIVE", "DISABLED", "PASSIVE"]`, + }, + "min_receive_interval": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum interval, in milliseconds, between BFD control packets +received from the peer router. The actual value is negotiated +between the two routers and is equal to the greater of this value +and the transmit interval of the other router. If set, this value +must be between 1000 and 30000.`, + Default: 1000, + }, + "min_transmit_interval": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum interval, in milliseconds, between BFD control packets +transmitted to the peer router. The actual value is negotiated +between the two routers and is equal to the greater of this value +and the corresponding receive interval of the other router. If set, +this value must be between 1000 and 30000.`, + Default: 1000, + }, + "multiplier": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of consecutive BFD packets that must be missed before +BFD declares that a peer is unavailable. If set, the value must +be a value between 5 and 16.`, + Default: 5, + }, + }, + }, + }, + "enable": { + Type: schema.TypeBool, + Optional: true, + Description: `The status of the BGP peer connection. If set to false, any active session +with the peer is terminated and all associated routing information is removed. +If set to true, the peer connection can be established with routing information. +The default is true.`, + Default: true, + }, + "enable_ipv6": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.`, + Default: false, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `IP address of the interface inside Google Cloud Platform. +Only IPv4 is supported.`, + }, + "ipv6_nexthop_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpAddress, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `IPv6 address of the interface inside Google Cloud Platform. +The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. +If you do not specify the next hop addresses, Google Cloud automatically +assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.`, + }, + "peer_ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `IP address of the BGP interface outside Google Cloud Platform. +Only IPv4 is supported. Required if 'ip_address' is set.`, + }, + "peer_ipv6_nexthop_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpAddress, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `IPv6 address of the BGP interface outside Google Cloud Platform. +The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. +If you do not specify the next hop addresses, Google Cloud automatically +assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Region where the router and BgpPeer reside. +If it is not provided, the provider region is used.`, + }, + "router_appliance_instance": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URI of the VM instance that is used as third-party router appliances +such as Next Gen Firewalls, Virtual Routers, or Router Appliances. +The VM instance must be located in zones contained in the same region as +this Cloud Router. The VM instance is the peer side of the BGP session.`, + }, + "management_type": { + Type: schema.TypeString, + Computed: true, + Description: `The resource that configures and manages this BGP peer. + +* 'MANAGED_BY_USER' is the default value and can be managed by +you or other users +* 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and +managed by Cloud Interconnect, specifically by an +InterconnectAttachment of type PARTNER. Google automatically +creates, updates, and deletes this type of BGP peer when the +PARTNER InterconnectAttachment is created, updated, +or deleted.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRouterBgpPeerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + interfaceNameProp, err := expandNestedComputeRouterBgpPeerInterface(d.Get("interface"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("interface"); !tpgresource.IsEmptyValue(reflect.ValueOf(interfaceNameProp)) && (ok || !reflect.DeepEqual(v, interfaceNameProp)) { + obj["interfaceName"] = interfaceNameProp + } + ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipAddressProp)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { + obj["ipAddress"] = ipAddressProp + } + peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerIpAddressProp)) && (ok || !reflect.DeepEqual(v, peerIpAddressProp)) { + obj["peerIpAddress"] = peerIpAddressProp + } + peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_asn"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerAsnProp)) && (ok || !reflect.DeepEqual(v, peerAsnProp)) { + obj["peerAsn"] = peerAsnProp + } + advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + obj["advertisedRoutePriority"] = advertisedRoutePriorityProp + } + advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertise_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(advertiseModeProp)) && (ok || !reflect.DeepEqual(v, advertiseModeProp)) { + obj["advertiseMode"] = advertiseModeProp + } + advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_groups"); ok || !reflect.DeepEqual(v, advertisedGroupsProp) { + obj["advertisedGroups"] = advertisedGroupsProp + } + advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { + obj["advertisedIpRanges"] = advertisedIpRangesProp + } + bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bfd"); !tpgresource.IsEmptyValue(reflect.ValueOf(bfdProp)) && (ok || !reflect.DeepEqual(v, bfdProp)) { + obj["bfd"] = bfdProp + } + enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable"); ok || !reflect.DeepEqual(v, enableProp) { + obj["enable"] = enableProp + } + routerApplianceInstanceProp, err := expandNestedComputeRouterBgpPeerRouterApplianceInstance(d.Get("router_appliance_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router_appliance_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(routerApplianceInstanceProp)) && (ok || !reflect.DeepEqual(v, routerApplianceInstanceProp)) { + obj["routerApplianceInstance"] = routerApplianceInstanceProp + } + enableIpv6Prop, err := expandNestedComputeRouterBgpPeerEnableIpv6(d.Get("enable_ipv6"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ipv6"); ok || !reflect.DeepEqual(v, enableIpv6Prop) { + obj["enableIpv6"] = enableIpv6Prop + } + ipv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerIpv6NexthopAddress(d.Get("ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipv6NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, ipv6NexthopAddressProp)) { + obj["ipv6NexthopAddress"] = ipv6NexthopAddressProp + } + peerIpv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(d.Get("peer_ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerIpv6NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, peerIpv6NexthopAddressProp)) { + obj["peerIpv6NexthopAddress"] = peerIpv6NexthopAddressProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RouterBgpPeer: %#v", obj) + + obj, err = resourceComputeRouterBgpPeerPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RouterBgpPeer: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RouterBgpPeer: %s", err) + } + + log.Printf("[DEBUG] Finished creating RouterBgpPeer %q: %#v", d.Id(), res) + + return resourceComputeRouterBgpPeerRead(d, meta) +} + +func resourceComputeRouterBgpPeerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRouterBgpPeer %q", d.Id())) + } + + res, err = flattenNestedComputeRouterBgpPeer(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeRouterBgpPeer because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + + if err := d.Set("name", flattenNestedComputeRouterBgpPeerName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("interface", flattenNestedComputeRouterBgpPeerInterface(res["interfaceName"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("ip_address", flattenNestedComputeRouterBgpPeerIpAddress(res["ipAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_ip_address", flattenNestedComputeRouterBgpPeerPeerIpAddress(res["peerIpAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_asn", flattenNestedComputeRouterBgpPeerPeerAsn(res["peerAsn"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_route_priority", flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(res["advertisedRoutePriority"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertise_mode", flattenNestedComputeRouterBgpPeerAdvertiseMode(res["advertiseMode"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_groups", flattenNestedComputeRouterBgpPeerAdvertisedGroups(res["advertisedGroups"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_ip_ranges", flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(res["advertisedIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("management_type", flattenNestedComputeRouterBgpPeerManagementType(res["managementType"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("bfd", flattenNestedComputeRouterBgpPeerBfd(res["bfd"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("enable", flattenNestedComputeRouterBgpPeerEnable(res["enable"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("router_appliance_instance", flattenNestedComputeRouterBgpPeerRouterApplianceInstance(res["routerApplianceInstance"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("enable_ipv6", flattenNestedComputeRouterBgpPeerEnableIpv6(res["enableIpv6"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("ipv6_nexthop_address", flattenNestedComputeRouterBgpPeerIpv6NexthopAddress(res["ipv6NexthopAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_ipv6_nexthop_address", flattenNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(res["peerIpv6NexthopAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + + return nil +} + +func resourceComputeRouterBgpPeerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { + obj["ipAddress"] = ipAddressProp + } + peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerIpAddressProp)) { + obj["peerIpAddress"] = peerIpAddressProp + } + peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_asn"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerAsnProp)) { + obj["peerAsn"] = peerAsnProp + } + advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + obj["advertisedRoutePriority"] = advertisedRoutePriorityProp + } + advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertise_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, advertiseModeProp)) { + obj["advertiseMode"] = advertiseModeProp + } + advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_groups"); ok || !reflect.DeepEqual(v, advertisedGroupsProp) { + obj["advertisedGroups"] = advertisedGroupsProp + } + advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { + obj["advertisedIpRanges"] = advertisedIpRangesProp + } + bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bfd"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bfdProp)) { + obj["bfd"] = bfdProp + } + enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable"); ok || !reflect.DeepEqual(v, enableProp) { + obj["enable"] = enableProp + } + routerApplianceInstanceProp, err := expandNestedComputeRouterBgpPeerRouterApplianceInstance(d.Get("router_appliance_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router_appliance_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routerApplianceInstanceProp)) { + obj["routerApplianceInstance"] = routerApplianceInstanceProp + } + enableIpv6Prop, err := expandNestedComputeRouterBgpPeerEnableIpv6(d.Get("enable_ipv6"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ipv6"); ok || !reflect.DeepEqual(v, enableIpv6Prop) { + obj["enableIpv6"] = enableIpv6Prop + } + ipv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerIpv6NexthopAddress(d.Get("ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipv6NexthopAddressProp)) { + obj["ipv6NexthopAddress"] = ipv6NexthopAddressProp + } + peerIpv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(d.Get("peer_ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerIpv6NexthopAddressProp)) { + obj["peerIpv6NexthopAddress"] = peerIpv6NexthopAddressProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating RouterBgpPeer %q: %#v", d.Id(), obj) + + obj, err = resourceComputeRouterBgpPeerPatchUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating RouterBgpPeer %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RouterBgpPeer %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeRouterBgpPeerRead(d, meta) +} + +func resourceComputeRouterBgpPeerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceComputeRouterBgpPeerPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RouterBgpPeer") + } + log.Printf("[DEBUG] Deleting RouterBgpPeer %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RouterBgpPeer") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RouterBgpPeer %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRouterBgpPeerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeRouterBgpPeerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerInterface(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerAsn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "DEFAULT" + } + + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "range": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config), + "description": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config), + }) + } + return transformed +} +func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerManagementType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerBfd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["session_initialization_mode"] = + flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["sessionInitializationMode"], d, config) + transformed["min_transmit_interval"] = + flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["minTransmitInterval"], d, config) + transformed["min_receive_interval"] = + flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["minReceiveInterval"], d, config) + transformed["multiplier"] = + flattenNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) + return []interface{}{transformed} +} +func flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return true + } + b, err := strconv.ParseBool(v.(string)) + if err != nil { + // If we can't convert it into a bool return value as is and let caller handle it + return v + } + return b +} + +func flattenNestedComputeRouterBgpPeerRouterApplianceInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenNestedComputeRouterBgpPeerEnableIpv6(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerIpv6NexthopAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeRouterBgpPeerName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerAsn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRange, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["range"] = transformedRange + } + + transformedDescription, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config) + if err != nil { + return nil, err + } else { + transformed["description"] = transformedDescription + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSessionInitializationMode, err := expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["session_initialization_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSessionInitializationMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sessionInitializationMode"] = transformedSessionInitializationMode + } + + transformedMinTransmitInterval, err := expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["min_transmit_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinTransmitInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minTransmitInterval"] = transformedMinTransmitInterval + } + + transformedMinReceiveInterval, err := expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["min_receive_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinReceiveInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minReceiveInterval"] = transformedMinReceiveInterval + } + + transformedMultiplier, err := expandNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMultiplier); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["multiplier"] = transformedMultiplier + } + + return transformed, nil +} + +func expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return strings.ToUpper(strconv.FormatBool(v.(bool))), nil +} + +func expandNestedComputeRouterBgpPeerRouterApplianceInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseZonalFieldValue("instances", v.(string), "project", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for router_appliance_instance: %s", err) + } + return f.RelativeLink(), nil +} + +func expandNestedComputeRouterBgpPeerEnableIpv6(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerIpv6NexthopAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedComputeRouterBgpPeer(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["bgpPeers"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value bgpPeers. Actual value: %v", v) + } + + _, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeRouterBgpPeerFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeRouterBgpPeerName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemName := flattenNestedComputeRouterBgpPeerName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceComputeRouterBgpPeerPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create RouterBgpPeer, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "bgpPeers": append(currItems, obj), + } + + return res, nil +} + +// PatchUpdateEncoder handles creating request data to PATCH parent resource +// with list including updated object. +func resourceComputeRouterBgpPeerPatchUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + items, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, items) + if err != nil { + return nil, err + } + + // Return error if item to update does not exist. + if item == nil { + return nil, fmt.Errorf("Unable to update RouterBgpPeer %q - not found in list", d.Id()) + } + + // Merge new object into old. + for k, v := range obj { + item[k] = v + } + items[idx] = item + + // Return list with new item added + res := map[string]interface{}{ + "bgpPeers": items, + } + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceComputeRouterBgpPeerPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "ComputeRouterBgpPeer") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "bgpPeers": updatedItems, + } + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceComputeRouterBgpPeerListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return nil, err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + + v, ok = res["bgpPeers"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "bgpPeers"`) + } + return ls, nil + } + return nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer_sweeper.go new file mode 100644 index 0000000000..80a5aeb49a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_peer_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRouterBgpPeer", testSweepComputeRouterBgpPeer) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRouterBgpPeer(region string) error { + resourceName := "ComputeRouterBgpPeer" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers/{{router}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["bgpPeers"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers/{{router}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_sweeper.go new file mode 100644 index 0000000000..19720c1af1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_router_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRouter", testSweepComputeRouter) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRouter(region string) error { + resourceName := "ComputeRouter" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_security_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy.go similarity index 94% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_security_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy.go index fd0e82917b..3dc0a3ce37 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_security_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_security_policy.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "context" @@ -10,6 +12,11 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/compute/v1" ) @@ -25,9 +32,9 @@ func ResourceComputeSecurityPolicy() *schema.Resource { CustomizeDiff: rulesCustomizeDiff, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(4 * time.Minute), - Update: schema.DefaultTimeout(4 * time.Minute), - Delete: schema.DefaultTimeout(4 * time.Minute), + Create: schema.DefaultTimeout(8 * time.Minute), + Update: schema.DefaultTimeout(8 * time.Minute), + Delete: schema.DefaultTimeout(8 * time.Minute), }, Schema: map[string]*schema.Schema{ @@ -35,7 +42,7 @@ func ResourceComputeSecurityPolicy() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateGCEName, + ValidateFunc: verify.ValidateGCEName, Description: `The name of the security policy.`, }, @@ -451,13 +458,13 @@ func rulesCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, _ interfac } func resourceComputeSecurityPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -498,7 +505,7 @@ func resourceComputeSecurityPolicyCreate(d *schema.ResourceData, meta interface{ return errwrap.Wrapf("Error creating SecurityPolicy: {{err}}", err) } - id, err := replaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -513,13 +520,13 @@ func resourceComputeSecurityPolicyCreate(d *schema.ResourceData, meta interface{ } func resourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -530,7 +537,7 @@ func resourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) securityPolicy, err := client.SecurityPolicies.Get(project, sp).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SecurityPolicy %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityPolicy %q", d.Id())) } if err := d.Set("name", securityPolicy.Name); err != nil { @@ -551,7 +558,7 @@ func resourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) if err := d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(securityPolicy.SelfLink)); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(securityPolicy.SelfLink)); err != nil { return fmt.Errorf("Error setting self_link: %s", err) } if err := d.Set("advanced_options_config", flattenSecurityPolicyAdvancedOptionsConfig(securityPolicy.AdvancedOptionsConfig)); err != nil { @@ -570,13 +577,13 @@ func resourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) } func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -696,13 +703,13 @@ func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{ } func resourceComputeSecurityPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -767,7 +774,7 @@ func expandSecurityPolicyMatchConfig(configured []interface{}) *compute.Security data := configured[0].(map[string]interface{}) return &compute.SecurityPolicyRuleMatcherConfig{ - SrcIpRanges: convertStringArr(data["src_ip_ranges"].(*schema.Set).List()), + SrcIpRanges: tpgresource.ConvertStringArr(data["src_ip_ranges"].(*schema.Set).List()), } } @@ -824,7 +831,7 @@ func flattenMatchConfig(conf *compute.SecurityPolicyRuleMatcherConfig) []map[str } data := map[string]interface{}{ - "src_ip_ranges": schema.NewSet(schema.HashString, convertStringArrToInterface(conf.SrcIpRanges)), + "src_ip_ranges": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(conf.SrcIpRanges)), } return []map[string]interface{}{data} @@ -881,7 +888,7 @@ func expandSecurityPolicyAdvancedOptionsConfigJsonCustomConfig(configured []inte data := configured[0].(map[string]interface{}) return &compute.SecurityPolicyAdvancedOptionsConfigJsonCustomConfig{ - ContentTypes: convertStringArr(data["content_types"].(*schema.Set).List()), + ContentTypes: tpgresource.ConvertStringArr(data["content_types"].(*schema.Set).List()), } } @@ -891,7 +898,7 @@ func flattenSecurityPolicyAdvancedOptionsConfigJsonCustomConfig(conf *compute.Se } data := map[string]interface{}{ - "content_types": schema.NewSet(schema.HashString, convertStringArrToInterface(conf.ContentTypes)), + "content_types": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(conf.ContentTypes)), } return []map[string]interface{}{data} @@ -1128,13 +1135,13 @@ func flattenSecurityPolicyRequestHeader(conf *compute.SecurityPolicyRuleHttpHead } func resourceSecurityPolicyStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/global/securityPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/global/securityPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment.go new file mode 100644 index 0000000000..cd94d42f0a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment.go @@ -0,0 +1,800 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeServiceAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeServiceAttachmentCreate, + Read: resourceComputeServiceAttachmentRead, + Update: resourceComputeServiceAttachmentUpdate, + Delete: resourceComputeServiceAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeServiceAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "connection_preference": { + Type: schema.TypeString, + Required: true, + Description: `The connection preference to use for this service attachment. Valid +values include "ACCEPT_AUTOMATIC", "ACCEPT_MANUAL".`, + }, + "enable_proxy_protocol": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `If true, enable the proxy protocol which is for supplying client TCP/IP +address data in TCP connections that traverse proxies on their way to +destination servers.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. The name must be 1-63 characters long, and +comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' +which means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "nat_subnets": { + Type: schema.TypeList, + Required: true, + Description: `An array of subnets that is provided for NAT in this service attachment.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "target_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of a forwarding rule that represents the service identified by +this service attachment.`, + }, + "consumer_accept_lists": { + Type: schema.TypeList, + Optional: true, + Description: `An array of projects that are allowed to connect to this service +attachment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "connection_limit": { + Type: schema.TypeInt, + Required: true, + Description: `The number of consumer forwarding rules the consumer project can +create.`, + }, + "project_id_or_num": { + Type: schema.TypeString, + Required: true, + Description: `A project that is allowed to connect to this service attachment.`, + }, + }, + }, + }, + "consumer_reject_lists": { + Type: schema.TypeList, + Optional: true, + Description: `An array of projects that are not allowed to connect to this service +attachment.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of this resource.`, + }, + "domain_names": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If specified, the domain name will be used during the integration between +the PSC connected endpoints and the Cloud DNS. For example, this is a +valid domain name: "p.mycompany.com.". Current max number of domain names +supported is 1.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the region where the resource resides.`, + }, + "connected_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `An array of the consumer forwarding rules connected to this service +attachment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the consumer forwarding rule.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `The status of the connection from the consumer forwarding rule to +this service attachment.`, + }, + }, + }, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `Fingerprint of this resource. This field is used internally during +updates of this resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeServiceAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeServiceAttachmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeServiceAttachmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + fingerprintProp, err := expandComputeServiceAttachmentFingerprint(d.Get("fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + obj["fingerprint"] = fingerprintProp + } + connectionPreferenceProp, err := expandComputeServiceAttachmentConnectionPreference(d.Get("connection_preference"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connection_preference"); !tpgresource.IsEmptyValue(reflect.ValueOf(connectionPreferenceProp)) && (ok || !reflect.DeepEqual(v, connectionPreferenceProp)) { + obj["connectionPreference"] = connectionPreferenceProp + } + targetServiceProp, err := expandComputeServiceAttachmentTargetService(d.Get("target_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetServiceProp)) && (ok || !reflect.DeepEqual(v, targetServiceProp)) { + obj["targetService"] = targetServiceProp + } + natSubnetsProp, err := expandComputeServiceAttachmentNatSubnets(d.Get("nat_subnets"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("nat_subnets"); ok || !reflect.DeepEqual(v, natSubnetsProp) { + obj["natSubnets"] = natSubnetsProp + } + enableProxyProtocolProp, err := expandComputeServiceAttachmentEnableProxyProtocol(d.Get("enable_proxy_protocol"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_proxy_protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableProxyProtocolProp)) && (ok || !reflect.DeepEqual(v, enableProxyProtocolProp)) { + obj["enableProxyProtocol"] = enableProxyProtocolProp + } + domainNamesProp, err := expandComputeServiceAttachmentDomainNames(d.Get("domain_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("domain_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(domainNamesProp)) && (ok || !reflect.DeepEqual(v, domainNamesProp)) { + obj["domainNames"] = domainNamesProp + } + consumerRejectListsProp, err := expandComputeServiceAttachmentConsumerRejectLists(d.Get("consumer_reject_lists"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("consumer_reject_lists"); ok || !reflect.DeepEqual(v, consumerRejectListsProp) { + obj["consumerRejectLists"] = consumerRejectListsProp + } + consumerAcceptListsProp, err := expandComputeServiceAttachmentConsumerAcceptLists(d.Get("consumer_accept_lists"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("consumer_accept_lists"); ok || !reflect.DeepEqual(v, consumerAcceptListsProp) { + obj["consumerAcceptLists"] = consumerAcceptListsProp + } + regionProp, err := expandComputeServiceAttachmentRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ServiceAttachment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ServiceAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating ServiceAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ServiceAttachment: %s", err) + } + + log.Printf("[DEBUG] Finished creating ServiceAttachment %q: %#v", d.Id(), res) + + return resourceComputeServiceAttachmentRead(d, meta) +} + +func resourceComputeServiceAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeServiceAttachment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + + if err := d.Set("name", flattenComputeServiceAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("description", flattenComputeServiceAttachmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("fingerprint", flattenComputeServiceAttachmentFingerprint(res["fingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("connection_preference", flattenComputeServiceAttachmentConnectionPreference(res["connectionPreference"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("connected_endpoints", flattenComputeServiceAttachmentConnectedEndpoints(res["connectedEndpoints"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("target_service", flattenComputeServiceAttachmentTargetService(res["targetService"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("nat_subnets", flattenComputeServiceAttachmentNatSubnets(res["natSubnets"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("enable_proxy_protocol", flattenComputeServiceAttachmentEnableProxyProtocol(res["enableProxyProtocol"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("domain_names", flattenComputeServiceAttachmentDomainNames(res["domainNames"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("consumer_reject_lists", flattenComputeServiceAttachmentConsumerRejectLists(res["consumerRejectLists"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("consumer_accept_lists", flattenComputeServiceAttachmentConsumerAcceptLists(res["consumerAcceptLists"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("region", flattenComputeServiceAttachmentRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading ServiceAttachment: %s", err) + } + + return nil +} + +func resourceComputeServiceAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeServiceAttachmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + fingerprintProp, err := expandComputeServiceAttachmentFingerprint(d.Get("fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + obj["fingerprint"] = fingerprintProp + } + connectionPreferenceProp, err := expandComputeServiceAttachmentConnectionPreference(d.Get("connection_preference"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connection_preference"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, connectionPreferenceProp)) { + obj["connectionPreference"] = connectionPreferenceProp + } + natSubnetsProp, err := expandComputeServiceAttachmentNatSubnets(d.Get("nat_subnets"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("nat_subnets"); ok || !reflect.DeepEqual(v, natSubnetsProp) { + obj["natSubnets"] = natSubnetsProp + } + consumerRejectListsProp, err := expandComputeServiceAttachmentConsumerRejectLists(d.Get("consumer_reject_lists"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("consumer_reject_lists"); ok || !reflect.DeepEqual(v, consumerRejectListsProp) { + obj["consumerRejectLists"] = consumerRejectListsProp + } + consumerAcceptListsProp, err := expandComputeServiceAttachmentConsumerAcceptLists(d.Get("consumer_accept_lists"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("consumer_accept_lists"); ok || !reflect.DeepEqual(v, consumerAcceptListsProp) { + obj["consumerAcceptLists"] = consumerAcceptListsProp + } + + obj, err = resourceComputeServiceAttachmentUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ServiceAttachment %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ServiceAttachment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ServiceAttachment %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ServiceAttachment", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeServiceAttachmentRead(d, meta) +} + +func resourceComputeServiceAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ServiceAttachment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ServiceAttachment") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting ServiceAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ServiceAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeServiceAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/serviceAttachments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeServiceAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentConnectionPreference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentConnectedEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "endpoint": flattenComputeServiceAttachmentConnectedEndpointsEndpoint(original["endpoint"], d, config), + "status": flattenComputeServiceAttachmentConnectedEndpointsStatus(original["status"], d, config), + }) + } + return transformed +} +func flattenComputeServiceAttachmentConnectedEndpointsEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentConnectedEndpointsStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentTargetService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeServiceAttachmentNatSubnets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeServiceAttachmentEnableProxyProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentDomainNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentConsumerRejectLists(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentConsumerAcceptLists(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "project_id_or_num": flattenComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(original["projectIdOrNum"], d, config), + "connection_limit": flattenComputeServiceAttachmentConsumerAcceptListsConnectionLimit(original["connectionLimit"], d, config), + }) + } + return transformed +} +func flattenComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeServiceAttachmentConsumerAcceptListsConnectionLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeServiceAttachmentRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeServiceAttachmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentConnectionPreference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentTargetService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("forwardingRules", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for target_service: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeServiceAttachmentNatSubnets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for nat_subnets: nil") + } + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", raw.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for nat_subnets: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeServiceAttachmentEnableProxyProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentDomainNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentConsumerRejectLists(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentConsumerAcceptLists(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectIdOrNum, err := expandComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(original["project_id_or_num"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectIdOrNum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectIdOrNum"] = transformedProjectIdOrNum + } + + transformedConnectionLimit, err := expandComputeServiceAttachmentConsumerAcceptListsConnectionLimit(original["connection_limit"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnectionLimit); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["connectionLimit"] = transformedConnectionLimit + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentConsumerAcceptListsConnectionLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeServiceAttachmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} + +func resourceComputeServiceAttachmentUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + + // need to send value in PATCH due to validation bug on api b/198329756 + nameProp := d.Get("name") + if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + // need to send value in PATCH due to validation bug on api b/198308475 + enableProxyProtocolProp := d.Get("enable_proxy_protocol") + if v, ok := d.GetOkExists("enable_proxy_protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableProxyProtocolProp)) { + obj["enableProxyProtocol"] = enableProxyProtocolProp + } + + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment_sweeper.go new file mode 100644 index 0000000000..f9b200854b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_service_attachment_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeServiceAttachment", testSweepComputeServiceAttachment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeServiceAttachment(region string) error { + resourceName := "ComputeServiceAttachment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/serviceAttachments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["serviceAttachments"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_shared_vpc_host_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_shared_vpc_host_project.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_shared_vpc_host_project.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_shared_vpc_host_project.go index 590510d3d9..41c23e1b1e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_shared_vpc_host_project.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_shared_vpc_host_project.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -6,6 +8,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceComputeSharedVpcHostProject() *schema.Resource { @@ -35,8 +39,8 @@ func ResourceComputeSharedVpcHostProject() *schema.Resource { } func resourceComputeSharedVpcHostProjectCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -59,8 +63,8 @@ func resourceComputeSharedVpcHostProjectCreate(d *schema.ResourceData, meta inte } func resourceComputeSharedVpcHostProjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -69,7 +73,7 @@ func resourceComputeSharedVpcHostProjectRead(d *schema.ResourceData, meta interf project, err := config.NewComputeClient(userAgent).Projects.Get(hostProject).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project data for project %q", hostProject)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project data for project %q", hostProject)) } if project.XpnProjectStatus != "HOST" { @@ -85,8 +89,8 @@ func resourceComputeSharedVpcHostProjectRead(d *schema.ResourceData, meta interf } func resourceComputeSharedVpcHostProjectDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_shared_vpc_service_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_shared_vpc_service_project.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_shared_vpc_service_project.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_shared_vpc_service_project.go index ee10cc861e..e9e9a8139e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_shared_vpc_service_project.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_shared_vpc_service_project.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -6,6 +8,9 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/googleapi" @@ -55,8 +60,8 @@ func ResourceComputeSharedVpcServiceProject() *schema.Resource { } func resourceComputeSharedVpcServiceProjectCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -85,8 +90,8 @@ func resourceComputeSharedVpcServiceProjectCreate(d *schema.ResourceData, meta i } func resourceComputeSharedVpcServiceProjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -123,7 +128,7 @@ func resourceComputeSharedVpcServiceProjectRead(d *schema.ResourceData, meta int } func resourceComputeSharedVpcServiceProjectDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) hostProject := d.Get("host_project").(string) serviceProject := d.Get("service_project").(string) @@ -143,8 +148,8 @@ func resourceComputeSharedVpcServiceProjectDelete(d *schema.ResourceData, meta i return nil } -func disableXpnResource(d *schema.ResourceData, config *Config, hostProject, project string) error { - userAgent, err := generateUserAgentString(d, config.UserAgent) +func disableXpnResource(d *schema.ResourceData, config *transport_tpg.Config, hostProject, project string) error { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_snapshot.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_snapshot.go new file mode 100644 index 0000000000..bcecc85739 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_snapshot.go @@ -0,0 +1,918 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSnapshotCreate, + Read: resourceComputeSnapshotRead, + Update: resourceComputeSnapshotUpdate, + Delete: resourceComputeSnapshotDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeSnapshotImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource; provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "source_disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the disk used to create this snapshot.`, + }, + "chain_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Creates the new snapshot in the snapshot chain labeled with the +specified name. The chain name must be 1-63 characters long and +comply with RFC1035. This is an uncommon option only for advanced +service owners who needs to create separate snapshot chains, for +example, for chargeback tracking. When you describe your snapshot +resource, this field is visible only if it has a non-empty value.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this Snapshot.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "snapshot_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Encrypts the snapshot using a customer-supplied encryption key. + +After you encrypt a snapshot using a customer-supplied key, you must +provide the same key if you use the snapshot later. For example, you +must provide the encryption key when you create a disk from the +encrypted snapshot in a future request. + +Customer-supplied encryption keys do not protect access to metadata of +the snapshot. + +If you do not provide an encryption key when creating the snapshot, +then the snapshot will be encrypted using an automatically generated +key and you do not need to provide a key to use the snapshot later.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_self_link": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the encryption key that is stored in Google Cloud KMS.`, + }, + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account used for the encryption request for the given KMS key. +If absent, the Compute Engine Service Agent service account is used.`, + }, + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a 256-bit customer-supplied encryption key, encoded in +RFC 4648 base64 to either encrypt or decrypt this resource.`, + Sensitive: true, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied +encryption key that protects this resource.`, + }, + }, + }, + }, + "source_disk_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The customer-supplied encryption key of the source snapshot. Required +if the source snapshot is protected by a customer-supplied encryption +key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account used for the encryption request for the given KMS key. +If absent, the Compute Engine Service Agent service account is used.`, + }, + "raw_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a 256-bit customer-supplied encryption key, encoded in +RFC 4648 base64 to either encrypt or decrypt this resource.`, + Sensitive: true, + }, + }, + }, + }, + "storage_locations": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Cloud Storage bucket storage location of the snapshot (regional or multi-regional).`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the zone where the disk is hosted.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: `Size of the snapshot, specified in GB.`, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint used for optimistic locking of this resource. Used +internally during updates.`, + }, + "licenses": { + Type: schema.TypeList, + Computed: true, + Description: `A list of public visible licenses that apply to this snapshot. This +can be because the original image had licenses attached (such as a +Windows image). snapshotEncryptionKey nested object Encrypts the +snapshot using a customer-supplied encryption key.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "snapshot_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "storage_bytes": { + Type: schema.TypeInt, + Computed: true, + Description: `A size of the storage used by the snapshot. As snapshots share +storage, this number is expected to change with snapshot +creation/deletion.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + chainNameProp, err := expandComputeSnapshotChainName(d.Get("chain_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("chain_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(chainNameProp)) && (ok || !reflect.DeepEqual(v, chainNameProp)) { + obj["chainName"] = chainNameProp + } + nameProp, err := expandComputeSnapshotName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeSnapshotDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + storageLocationsProp, err := expandComputeSnapshotStorageLocations(d.Get("storage_locations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("storage_locations"); !tpgresource.IsEmptyValue(reflect.ValueOf(storageLocationsProp)) && (ok || !reflect.DeepEqual(v, storageLocationsProp)) { + obj["storageLocations"] = storageLocationsProp + } + labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + sourceDiskProp, err := expandComputeSnapshotSourceDisk(d.Get("source_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_disk"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { + obj["sourceDisk"] = sourceDiskProp + } + zoneProp, err := expandComputeSnapshotZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + snapshotEncryptionKeyProp, err := expandComputeSnapshotSnapshotEncryptionKey(d.Get("snapshot_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("snapshot_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(snapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, snapshotEncryptionKeyProp)) { + obj["snapshotEncryptionKey"] = snapshotEncryptionKeyProp + } + sourceDiskEncryptionKeyProp, err := expandComputeSnapshotSourceDiskEncryptionKey(d.Get("source_disk_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_disk_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceDiskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceDiskEncryptionKeyProp)) { + obj["sourceDiskEncryptionKey"] = sourceDiskEncryptionKeyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}PRE_CREATE_REPLACE_ME/createSnapshot") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Snapshot: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Snapshot: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + url = regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sourceDiskProp.(string)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Snapshot: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/snapshots/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Snapshot", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Snapshot: %s", err) + } + + log.Printf("[DEBUG] Finished creating Snapshot %q: %#v", d.Id(), res) + + return resourceComputeSnapshotRead(d, meta) +} + +func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Snapshot: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSnapshot %q", d.Id())) + } + + res, err = resourceComputeSnapshotDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ComputeSnapshot because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeSnapshotCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("snapshot_id", flattenComputeSnapshotSnapshotId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("disk_size_gb", flattenComputeSnapshotDiskSizeGb(res["diskSizeGb"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("chain_name", flattenComputeSnapshotChainName(res["chainName"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("name", flattenComputeSnapshotName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("description", flattenComputeSnapshotDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("storage_bytes", flattenComputeSnapshotStorageBytes(res["storageBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("storage_locations", flattenComputeSnapshotStorageLocations(res["storageLocations"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("licenses", flattenComputeSnapshotLicenses(res["licenses"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("labels", flattenComputeSnapshotLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("label_fingerprint", flattenComputeSnapshotLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("source_disk", flattenComputeSnapshotSourceDisk(res["sourceDisk"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("snapshot_encryption_key", flattenComputeSnapshotSnapshotEncryptionKey(res["snapshotEncryptionKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + + return nil +} + +func resourceComputeSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Snapshot: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("labels") || d.HasChange("label_fingerprint") { + obj := make(map[string]interface{}) + + labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}/setLabels") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Snapshot %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Snapshot %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Snapshot", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeSnapshotRead(d, meta) +} + +func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Snapshot: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Snapshot %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Snapshot") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Snapshot", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Snapshot %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeSnapshotImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/snapshots/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeSnapshotCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotSnapshotId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeSnapshotDiskSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeSnapshotChainName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotStorageBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeSnapshotStorageLocations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotLicenses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeSnapshotLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotLabelFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotSourceDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeSnapshotSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeSnapshotSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) + transformed["sha256"] = + flattenComputeSnapshotSnapshotEncryptionKeySha256(original["sha256"], d, config) + transformed["kms_key_self_link"] = + flattenComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) + transformed["kms_key_service_account"] = + flattenComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) + return []interface{}{transformed} +} +func flattenComputeSnapshotSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("snapshot_encryption_key.0.raw_key") +} + +func flattenComputeSnapshotSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeSnapshotChainName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotStorageLocations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeSnapshotLabelFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotSourceDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseZonalFieldValue("disks", v.(string), "project", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for source_disk: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeSnapshotZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("zones", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeSnapshotSnapshotEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRawKey, err := expandComputeSnapshotSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rawKey"] = transformedRawKey + } + + transformedSha256, err := expandComputeSnapshotSnapshotEncryptionKeySha256(original["sha256"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256"] = transformedSha256 + } + + transformedKmsKeySelfLink, err := expandComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeySelfLink + } + + transformedKmsKeyServiceAccount, err := expandComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount + } + + return transformed, nil +} + +func expandComputeSnapshotSnapshotEncryptionKeyRawKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotSnapshotEncryptionKeySha256(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotSourceDiskEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRawKey, err := expandComputeSnapshotSourceDiskEncryptionKeyRawKey(original["raw_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRawKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rawKey"] = transformedRawKey + } + + transformedKmsKeyServiceAccount, err := expandComputeSnapshotSourceDiskEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount + } + + return transformed, nil +} + +func expandComputeSnapshotSourceDiskEncryptionKeyRawKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotSourceDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceComputeSnapshotDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v, ok := res["snapshotEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["rawKey"] = d.Get("snapshot_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + + if kmsKeyName, ok := original["kmsKeyName"]; ok { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] + } + + if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { + transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount + } + + res["snapshotEncryptionKey"] = transformed + } + + if v, ok := res["sourceDiskEncryptionKey"]; ok { + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + // The raw key won't be returned, so we need to use the original. + transformed["rawKey"] = d.Get("source_disk_encryption_key.0.raw_key") + transformed["sha256"] = original["sha256"] + + if kmsKeyName, ok := original["kmsKeyName"]; ok { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + transformed["kmsKeyName"] = strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] + } + + if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { + transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount + } + + res["sourceDiskEncryptionKey"] = transformed + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_snapshot_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_snapshot_sweeper.go new file mode 100644 index 0000000000..efb994728c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_snapshot_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeSnapshot", testSweepComputeSnapshot) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeSnapshot(region string) error { + resourceName := "ComputeSnapshot" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/snapshots", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/snapshots/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate.go new file mode 100644 index 0000000000..1b1af789e5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate.go @@ -0,0 +1,428 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeSslCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSslCertificateCreate, + Read: resourceComputeSslCertificateRead, + Delete: resourceComputeSslCertificateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeSslCertificateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "certificate": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The certificate in PEM format. +The certificate chain must be no greater than 5 certs long. +The chain must include at least one intermediate cert.`, + Sensitive: true, + }, + "private_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.Sha256DiffSuppress, + Description: `The write-only private key in PEM format.`, + Sensitive: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash. + + +These are in the same namespace as the managed SSL certificates.`, + }, + "certificate_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "expire_time": { + Type: schema.TypeString, + Computed: true, + Description: `Expire time of the certificate in RFC3339 text format.`, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name"}, + Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + certificateProp, err := expandComputeSslCertificateCertificate(d.Get("certificate"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateProp)) && (ok || !reflect.DeepEqual(v, certificateProp)) { + obj["certificate"] = certificateProp + } + descriptionProp, err := expandComputeSslCertificateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeSslCertificateName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + privateKeyProp, err := expandComputeSslCertificatePrivateKey(d.Get("private_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateKeyProp)) && (ok || !reflect.DeepEqual(v, privateKeyProp)) { + obj["privateKey"] = privateKeyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SslCertificate: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SslCertificate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating SslCertificate: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating SslCertificate", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create SslCertificate: %s", err) + } + + log.Printf("[DEBUG] Finished creating SslCertificate %q: %#v", d.Id(), res) + + return resourceComputeSslCertificateRead(d, meta) +} + +func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SslCertificate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSslCertificate %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading SslCertificate: %s", err) + } + + if err := d.Set("certificate", flattenComputeSslCertificateCertificate(res["certificate"], d, config)); err != nil { + return fmt.Errorf("Error reading SslCertificate: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading SslCertificate: %s", err) + } + if err := d.Set("description", flattenComputeSslCertificateDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading SslCertificate: %s", err) + } + if err := d.Set("expire_time", flattenComputeSslCertificateExpireTime(res["expireTime"], d, config)); err != nil { + return fmt.Errorf("Error reading SslCertificate: %s", err) + } + if err := d.Set("certificate_id", flattenComputeSslCertificateCertificateId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading SslCertificate: %s", err) + } + if err := d.Set("name", flattenComputeSslCertificateName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading SslCertificate: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading SslCertificate: %s", err) + } + + return nil +} + +func resourceComputeSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SslCertificate: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting SslCertificate %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "SslCertificate") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting SslCertificate", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting SslCertificate %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeSslCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/sslCertificates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeSslCertificateCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslCertificateCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslCertificateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslCertificateExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslCertificateCertificateId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeSslCertificateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeSslCertificateCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSslCertificateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSslCertificateName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + var certName string + if v, ok := d.GetOk("name"); ok { + certName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + certName = resource.PrefixedUniqueId(v.(string)) + } else { + certName = resource.UniqueId() + } + + // We need to get the {{name}} into schema to set the ID using tpgresource.ReplaceVars + if err := d.Set("name", certName); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return certName, nil +} + +func expandComputeSslCertificatePrivateKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate_sweeper.go new file mode 100644 index 0000000000..f60935ef1f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_certificate_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeSslCertificate", testSweepComputeSslCertificate) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeSslCertificate(region string) error { + resourceName := "ComputeSslCertificate" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/sslCertificates", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/sslCertificates/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_policy.go new file mode 100644 index 0000000000..ca49d2bbbd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_policy.go @@ -0,0 +1,557 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func sslPolicyCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + profile := diff.Get("profile") + customFeaturesCount := diff.Get("custom_features.#") + + // Validate that policy configs aren't incompatible during all phases + // CUSTOM profile demands non-zero custom_features, and other profiles (i.e., not CUSTOM) demand zero custom_features + if diff.HasChange("profile") || diff.HasChange("custom_features") { + if profile.(string) == "CUSTOM" { + if customFeaturesCount.(int) == 0 { + return fmt.Errorf("Error in SSL Policy %s: the profile is set to %s but no custom_features are set.", diff.Get("name"), profile.(string)) + } + } else { + if customFeaturesCount != 0 { + return fmt.Errorf("Error in SSL Policy %s: the profile is set to %s but using custom_features requires the profile to be CUSTOM.", diff.Get("name"), profile.(string)) + } + } + return nil + } + return nil +} + +func ResourceComputeSslPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSslPolicyCreate, + Read: resourceComputeSslPolicyRead, + Update: resourceComputeSslPolicyUpdate, + Delete: resourceComputeSslPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeSslPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + sslPolicyCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "custom_features": { + Type: schema.TypeSet, + Optional: true, + Description: `Profile specifies the set of SSL features that can be used by the +load balancer when negotiating SSL with clients. This can be one of +'COMPATIBLE', 'MODERN', 'RESTRICTED', or 'CUSTOM'. If using 'CUSTOM', +the set of SSL features to enable must be specified in the +'customFeatures' field. + +See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) +for which ciphers are available to use. **Note**: this argument +*must* be present when using the 'CUSTOM' profile. This argument +*must not* be present when using any other profile.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "min_tls_version": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TLS_1_0", "TLS_1_1", "TLS_1_2", ""}), + Description: `The minimum version of SSL protocol that can be used by the clients +to establish a connection with the load balancer. Default value: "TLS_1_0" Possible values: ["TLS_1_0", "TLS_1_1", "TLS_1_2"]`, + Default: "TLS_1_0", + }, + "profile": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"COMPATIBLE", "MODERN", "RESTRICTED", "CUSTOM", ""}), + Description: `Profile specifies the set of SSL features that can be used by the +load balancer when negotiating SSL with clients. If using 'CUSTOM', +the set of SSL features to enable must be specified in the +'customFeatures' field. + +See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) +for information on what cipher suites each profile provides. If +'CUSTOM' is used, the 'custom_features' attribute **must be set**. Default value: "COMPATIBLE" Possible values: ["COMPATIBLE", "MODERN", "RESTRICTED", "CUSTOM"]`, + Default: "COMPATIBLE", + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "enabled_features": { + Type: schema.TypeSet, + Computed: true, + Description: `The list of features enabled in the SSL policy.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `Fingerprint of this resource. A hash of the contents stored in this +object. This field is used in optimistic locking.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeSslPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeSslPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeSslPolicyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + profileProp, err := expandComputeSslPolicyProfile(d.Get("profile"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(profileProp)) && (ok || !reflect.DeepEqual(v, profileProp)) { + obj["profile"] = profileProp + } + minTlsVersionProp, err := expandComputeSslPolicyMinTlsVersion(d.Get("min_tls_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("min_tls_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(minTlsVersionProp)) && (ok || !reflect.DeepEqual(v, minTlsVersionProp)) { + obj["minTlsVersion"] = minTlsVersionProp + } + customFeaturesProp, err := expandComputeSslPolicyCustomFeatures(d.Get("custom_features"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_features"); !tpgresource.IsEmptyValue(reflect.ValueOf(customFeaturesProp)) && (ok || !reflect.DeepEqual(v, customFeaturesProp)) { + obj["customFeatures"] = customFeaturesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SslPolicy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SslPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating SslPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/sslPolicies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating SslPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create SslPolicy: %s", err) + } + + log.Printf("[DEBUG] Finished creating SslPolicy %q: %#v", d.Id(), res) + + return resourceComputeSslPolicyRead(d, meta) +} + +func resourceComputeSslPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SslPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSslPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeSslPolicyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + if err := d.Set("description", flattenComputeSslPolicyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + if err := d.Set("name", flattenComputeSslPolicyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + if err := d.Set("profile", flattenComputeSslPolicyProfile(res["profile"], d, config)); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + if err := d.Set("min_tls_version", flattenComputeSslPolicyMinTlsVersion(res["minTlsVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + if err := d.Set("enabled_features", flattenComputeSslPolicyEnabledFeatures(res["enabledFeatures"], d, config)); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + if err := d.Set("custom_features", flattenComputeSslPolicyCustomFeatures(res["customFeatures"], d, config)); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + if err := d.Set("fingerprint", flattenComputeSslPolicyFingerprint(res["fingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading SslPolicy: %s", err) + } + + return nil +} + +func resourceComputeSslPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SslPolicy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + profileProp, err := expandComputeSslPolicyProfile(d.Get("profile"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, profileProp)) { + obj["profile"] = profileProp + } + minTlsVersionProp, err := expandComputeSslPolicyMinTlsVersion(d.Get("min_tls_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("min_tls_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, minTlsVersionProp)) { + obj["minTlsVersion"] = minTlsVersionProp + } + customFeaturesProp, err := expandComputeSslPolicyCustomFeatures(d.Get("custom_features"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_features"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customFeaturesProp)) { + obj["customFeatures"] = customFeaturesProp + } + + obj, err = resourceComputeSslPolicyUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating SslPolicy %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating SslPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating SslPolicy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating SslPolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeSslPolicyRead(d, meta) +} + +func resourceComputeSslPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SslPolicy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting SslPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "SslPolicy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting SslPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting SslPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeSslPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/sslPolicies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/sslPolicies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeSslPolicyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslPolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslPolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslPolicyProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslPolicyMinTlsVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSslPolicyEnabledFeatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeSslPolicyCustomFeatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeSslPolicyFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeSslPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSslPolicyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSslPolicyProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSslPolicyMinTlsVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSslPolicyCustomFeatures(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func resourceComputeSslPolicyUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // TODO(https://github.com/GoogleCloudPlatform/magic-modules/issues/184): Handle fingerprint consistently + obj["fingerprint"] = d.Get("fingerprint") + + // TODO(https://github.com/GoogleCloudPlatform/magic-modules/issues/183): Can we generalize this + // Send a null fields if customFeatures is empty. + if v, ok := obj["customFeatures"]; ok && len(v.([]interface{})) == 0 { + obj["customFeatures"] = nil + } + + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_policy_sweeper.go new file mode 100644 index 0000000000..a0c34f6f78 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_ssl_policy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeSslPolicy", testSweepComputeSslPolicy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeSslPolicy(region string) error { + resourceName := "ComputeSslPolicy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/sslPolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/sslPolicies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork.go new file mode 100644 index 0000000000..521ec922d7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork.go @@ -0,0 +1,1299 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "fmt" + "log" + "net" + "reflect" + "time" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// Whether the IP CIDR change shrinks the block. +func IsShrinkageIpCidr(_ context.Context, old, new, _ interface{}) bool { + _, oldCidr, oldErr := net.ParseCIDR(old.(string)) + _, newCidr, newErr := net.ParseCIDR(new.(string)) + + if oldErr != nil || newErr != nil { + // This should never happen. The ValidateFunc on the field ensures it. + return false + } + + oldStart, oldEnd := cidr.AddressRange(oldCidr) + + if newCidr.Contains(oldStart) && newCidr.Contains(oldEnd) { + // This is a CIDR range expansion, no need to ForceNew, we have an update method for it. + return false + } + + return true +} + +func ResourceComputeSubnetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSubnetworkCreate, + Read: resourceComputeSubnetworkRead, + Update: resourceComputeSubnetworkUpdate, + Delete: resourceComputeSubnetworkDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeSubnetworkImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + resourceComputeSubnetworkSecondaryIpRangeSetStyleDiff, + customdiff.ForceNewIfChange("ip_cidr_range", IsShrinkageIpCidr), + ), + + Schema: map[string]*schema.Schema{ + "ip_cidr_range": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateIpCidrRange, + Description: `The range of internal addresses that are owned by this subnetwork. +Provide this property when you create the subnetwork. For example, +10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and +non-overlapping within a network. Only IPv4 is supported.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `The name of the resource, provided by the client when initially +creating the resource. The name must be 1-63 characters long, and +comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which +means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network this subnet belongs to. +Only networks that are in the distributed mode can have subnetworks.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when +you create the resource. This field can be set only at resource +creation time.`, + }, + "ipv6_access_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EXTERNAL", "INTERNAL", ""}), + Description: `The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation +or the first time the subnet is updated into IPV4_IPV6 dual stack. If the ipv6_type is EXTERNAL then this subnet +cannot enable direct path. Possible values: ["EXTERNAL", "INTERNAL"]`, + }, + "log_config": { + Type: schema.TypeList, + Optional: true, + Description: `Denotes the logging options for the subnetwork flow logs. If logging is enabled +logs will be exported to Stackdriver. This field cannot be set if the 'purpose' of this +subnetwork is 'INTERNAL_HTTPS_LOAD_BALANCER'`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aggregation_interval": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"INTERVAL_5_SEC", "INTERVAL_30_SEC", "INTERVAL_1_MIN", "INTERVAL_5_MIN", "INTERVAL_10_MIN", "INTERVAL_15_MIN", ""}), + Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. +Toggles the aggregation interval for collecting flow logs. Increasing the +interval time will reduce the amount of generated flow logs for long +lasting connections. Default is an interval of 5 seconds per connection. Default value: "INTERVAL_5_SEC" Possible values: ["INTERVAL_5_SEC", "INTERVAL_30_SEC", "INTERVAL_1_MIN", "INTERVAL_5_MIN", "INTERVAL_10_MIN", "INTERVAL_15_MIN"]`, + Default: "INTERVAL_5_SEC", + AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, + }, + "filter_expr": { + Type: schema.TypeString, + Optional: true, + Description: `Export filter used to define which VPC flow logs should be logged, as as CEL expression. See +https://cloud.google.com/vpc/docs/flow-logs#filtering for details on how to format this field. +The default value is 'true', which evaluates to include everything.`, + Default: "true", + AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, + }, + "flow_sampling": { + Type: schema.TypeFloat, + Optional: true, + Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. +The value of the field must be in [0, 1]. Set the sampling rate of VPC +flow logs within the subnetwork where 1.0 means all collected logs are +reported and 0.0 means no logs are reported. Default is 0.5 which means +half of all collected logs are reported.`, + Default: 0.5, + AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, + }, + "metadata": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA", "CUSTOM_METADATA", ""}), + Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. +Configures whether metadata fields should be added to the reported VPC +flow logs. Default value: "INCLUDE_ALL_METADATA" Possible values: ["EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA", "CUSTOM_METADATA"]`, + Default: "INCLUDE_ALL_METADATA", + AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, + }, + "metadata_fields": { + Type: schema.TypeSet, + Optional: true, + Description: `List of metadata fields that should be added to reported logs. +Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" is set to CUSTOM_METADATA.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + }, + }, + }, + "private_ip_google_access": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `When enabled, VMs in this subnetwork without external IP addresses can +access Google APIs and services by using Private Google Access.`, + }, + "private_ipv6_google_access": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The private IPv6 google access type for the VMs in this subnet.`, + }, + "purpose": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The purpose of the resource. This field can be either 'PRIVATE_RFC_1918', 'INTERNAL_HTTPS_LOAD_BALANCER', 'REGIONAL_MANAGED_PROXY', or 'PRIVATE_SERVICE_CONNECT'. +A subnetwork with purpose set to 'INTERNAL_HTTPS_LOAD_BALANCER' is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. +A subnetwork in a given region with purpose set to 'REGIONAL_MANAGED_PROXY' is a proxy-only subnet and is shared between all the regional Envoy-based load balancers. +A subnetwork with purpose set to 'PRIVATE_SERVICE_CONNECT' reserves the subnet for hosting a Private Service Connect published service. +If unspecified, the purpose defaults to 'PRIVATE_RFC_1918'. +The enableFlowLogs field isn't supported with the purpose field set to 'INTERNAL_HTTPS_LOAD_BALANCER'.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The GCP region for this subnetwork.`, + }, + "role": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ACTIVE", "BACKUP", ""}), + Description: `The role of subnetwork. +The value can be set to 'ACTIVE' or 'BACKUP'. +An 'ACTIVE' subnetwork is one that is currently being used. +A 'BACKUP' subnetwork is one that is ready to be promoted to 'ACTIVE' or is currently draining. + +Subnetwork role must be specified when purpose is set to 'INTERNAL_HTTPS_LOAD_BALANCER' or 'REGIONAL_MANAGED_PROXY'. Possible values: ["ACTIVE", "BACKUP"]`, + }, + "secondary_ip_range": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Description: `An array of configurations for secondary IP ranges for VM instances +contained in this subnetwork. The primary IP of such VM must belong +to the primary ipCidrRange of the subnetwork. The alias IPs may belong +to either primary or secondary ranges. + +**Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid +breaking users during the 0.12 upgrade. To explicitly send a list +of zero objects you must use the following syntax: +'example=[]' +For more details about this behavior, see [this section](https://www.terraform.io/docs/configuration/attr-as-blocks.html#defining-a-fixed-object-collection-value).`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_cidr_range": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateIpCidrRange, + Description: `The range of IP addresses belonging to this subnetwork secondary +range. Provide this property when you create the subnetwork. +Ranges must be unique and non-overlapping with all primary and +secondary IP ranges within a network. Only IPv4 is supported.`, + }, + "range_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateGCEName, + Description: `The name associated with this subnetwork secondary range, used +when adding an alias IP range to a VM instance. The name must +be 1-63 characters long, and comply with RFC1035. The name +must be unique within the subnetwork.`, + }, + }, + }, + }, + "stack_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4_ONLY", "IPV4_IPV6", ""}), + Description: `The stack type for this subnet to identify whether the IPv6 feature is enabled or not. +If not specified IPV4_ONLY will be used. Possible values: ["IPV4_ONLY", "IPV4_IPV6"]`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "external_ipv6_prefix": { + Type: schema.TypeString, + Computed: true, + Description: `The range of external IPv6 addresses that are owned by this subnetwork.`, + }, + "gateway_address": { + Type: schema.TypeString, + Computed: true, + Description: `The gateway address for default routes to reach destination addresses +outside this subnetwork.`, + }, + "ipv6_cidr_range": { + Type: schema.TypeString, + Computed: true, + Description: `The range of internal IPv6 addresses that are owned by this subnetwork.`, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: "Fingerprint of this resource. This field is used internally during updates of this resource.", + Deprecated: "This field is not useful for users, and has been removed as an output.", + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeSubnetworkSecondaryIpRangeSetStyleDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + keys := diff.GetChangedKeysPrefix("secondary_ip_range") + if len(keys) == 0 { + return nil + } + oldCount, newCount := diff.GetChange("secondary_ip_range.#") + var count int + // There could be duplicates - worth continuing even if the counts are unequal. + if oldCount.(int) < newCount.(int) { + count = newCount.(int) + } else { + count = oldCount.(int) + } + + if count < 1 { + return nil + } + old := make([]interface{}, count) + new := make([]interface{}, count) + for i := 0; i < count; i++ { + o, n := diff.GetChange(fmt.Sprintf("secondary_ip_range.%d", i)) + + if o != nil { + old = append(old, o) + } + if n != nil { + new = append(new, n) + } + } + + oldSet := schema.NewSet(schema.HashResource(ResourceComputeSubnetwork().Schema["secondary_ip_range"].Elem.(*schema.Resource)), old) + newSet := schema.NewSet(schema.HashResource(ResourceComputeSubnetwork().Schema["secondary_ip_range"].Elem.(*schema.Resource)), new) + + if oldSet.Equal(newSet) { + if err := diff.Clear("secondary_ip_range"); err != nil { + return err + } + } + + return nil +} + +func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeSubnetworkDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + ipCidrRangeProp, err := expandComputeSubnetworkIpCidrRange(d.Get("ip_cidr_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_cidr_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipCidrRangeProp)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { + obj["ipCidrRange"] = ipCidrRangeProp + } + nameProp, err := expandComputeSubnetworkName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandComputeSubnetworkNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + purposeProp, err := expandComputeSubnetworkPurpose(d.Get("purpose"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("purpose"); !tpgresource.IsEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { + obj["purpose"] = purposeProp + } + roleProp, err := expandComputeSubnetworkRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + secondaryIpRangesProp, err := expandComputeSubnetworkSecondaryIpRange(d.Get("secondary_ip_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("secondary_ip_range"); ok || !reflect.DeepEqual(v, secondaryIpRangesProp) { + obj["secondaryIpRanges"] = secondaryIpRangesProp + } + privateIpGoogleAccessProp, err := expandComputeSubnetworkPrivateIpGoogleAccess(d.Get("private_ip_google_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_ip_google_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateIpGoogleAccessProp)) && (ok || !reflect.DeepEqual(v, privateIpGoogleAccessProp)) { + obj["privateIpGoogleAccess"] = privateIpGoogleAccessProp + } + privateIpv6GoogleAccessProp, err := expandComputeSubnetworkPrivateIpv6GoogleAccess(d.Get("private_ipv6_google_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_ipv6_google_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateIpv6GoogleAccessProp)) && (ok || !reflect.DeepEqual(v, privateIpv6GoogleAccessProp)) { + obj["privateIpv6GoogleAccess"] = privateIpv6GoogleAccessProp + } + regionProp, err := expandComputeSubnetworkRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + logConfigProp, err := expandComputeSubnetworkLogConfig(d.Get("log_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { + obj["logConfig"] = logConfigProp + } + stackTypeProp, err := expandComputeSubnetworkStackType(d.Get("stack_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("stack_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(stackTypeProp)) && (ok || !reflect.DeepEqual(v, stackTypeProp)) { + obj["stackType"] = stackTypeProp + } + ipv6AccessTypeProp, err := expandComputeSubnetworkIpv6AccessType(d.Get("ipv6_access_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv6_access_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipv6AccessTypeProp)) && (ok || !reflect.DeepEqual(v, ipv6AccessTypeProp)) { + obj["ipv6AccessType"] = ipv6AccessTypeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Subnetwork: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Subnetwork: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Subnetwork: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating Subnetwork", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Subnetwork: %s", err) + } + + log.Printf("[DEBUG] Finished creating Subnetwork %q: %#v", d.Id(), res) + + return resourceComputeSubnetworkRead(d, meta) +} + +func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Subnetwork: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeSubnetworkCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("description", flattenComputeSubnetworkDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("gateway_address", flattenComputeSubnetworkGatewayAddress(res["gatewayAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("ip_cidr_range", flattenComputeSubnetworkIpCidrRange(res["ipCidrRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("name", flattenComputeSubnetworkName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("network", flattenComputeSubnetworkNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("purpose", flattenComputeSubnetworkPurpose(res["purpose"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("role", flattenComputeSubnetworkRole(res["role"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("secondary_ip_range", flattenComputeSubnetworkSecondaryIpRange(res["secondaryIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("private_ip_google_access", flattenComputeSubnetworkPrivateIpGoogleAccess(res["privateIpGoogleAccess"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("private_ipv6_google_access", flattenComputeSubnetworkPrivateIpv6GoogleAccess(res["privateIpv6GoogleAccess"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("region", flattenComputeSubnetworkRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("log_config", flattenComputeSubnetworkLogConfig(res["logConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("stack_type", flattenComputeSubnetworkStackType(res["stackType"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("ipv6_access_type", flattenComputeSubnetworkIpv6AccessType(res["ipv6AccessType"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("ipv6_cidr_range", flattenComputeSubnetworkIpv6CidrRange(res["ipv6CidrRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("external_ipv6_prefix", flattenComputeSubnetworkExternalIpv6Prefix(res["externalIpv6Prefix"], d, config)); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + + return nil +} + +func resourceComputeSubnetworkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Subnetwork: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("ip_cidr_range") { + obj := make(map[string]interface{}) + + ipCidrRangeProp, err := expandComputeSubnetworkIpCidrRange(d.Get("ip_cidr_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_cidr_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { + obj["ipCidrRange"] = ipCidrRangeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/expandIpCidrRange") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("private_ip_google_access") { + obj := make(map[string]interface{}) + + privateIpGoogleAccessProp, err := expandComputeSubnetworkPrivateIpGoogleAccess(d.Get("private_ip_google_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_ip_google_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateIpGoogleAccessProp)) { + obj["privateIpGoogleAccess"] = privateIpGoogleAccessProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/setPrivateIpGoogleAccess") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("private_ipv6_google_access") || d.HasChange("stack_type") || d.HasChange("ipv6_access_type") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + privateIpv6GoogleAccessProp, err := expandComputeSubnetworkPrivateIpv6GoogleAccess(d.Get("private_ipv6_google_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_ipv6_google_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateIpv6GoogleAccessProp)) { + obj["privateIpv6GoogleAccess"] = privateIpv6GoogleAccessProp + } + stackTypeProp, err := expandComputeSubnetworkStackType(d.Get("stack_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("stack_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stackTypeProp)) { + obj["stackType"] = stackTypeProp + } + ipv6AccessTypeProp, err := expandComputeSubnetworkIpv6AccessType(d.Get("ipv6_access_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv6_access_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipv6AccessTypeProp)) { + obj["ipv6AccessType"] = ipv6AccessTypeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("log_config") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + logConfigProp, err := expandComputeSubnetworkLogConfig(d.Get("log_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("log_config"); ok || !reflect.DeepEqual(v, logConfigProp) { + obj["logConfig"] = logConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("role") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + roleProp, err := expandComputeSubnetworkRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("secondary_ip_range") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + secondaryIpRangesProp, err := expandComputeSubnetworkSecondaryIpRange(d.Get("secondary_ip_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("secondary_ip_range"); ok || !reflect.DeepEqual(v, secondaryIpRangesProp) { + obj["secondaryIpRanges"] = secondaryIpRangesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeSubnetworkRead(d, meta) +} + +func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Subnetwork: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Subnetwork %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Subnetwork") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting Subnetwork", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Subnetwork %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeSubnetworkImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeSubnetworkCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkGatewayAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkIpCidrRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeSubnetworkPurpose(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkSecondaryIpRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "range_name": flattenComputeSubnetworkSecondaryIpRangeRangeName(original["rangeName"], d, config), + "ip_cidr_range": flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(original["ipCidrRange"], d, config), + }) + } + return transformed +} +func flattenComputeSubnetworkSecondaryIpRangeRangeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkPrivateIpGoogleAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkPrivateIpv6GoogleAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenComputeSubnetworkLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + + v, ok := original["enable"] + if ok && !v.(bool) { + return nil + } + + transformed := make(map[string]interface{}) + transformed["flow_sampling"] = original["flowSampling"] + transformed["aggregation_interval"] = original["aggregationInterval"] + transformed["metadata"] = original["metadata"] + if original["metadata"].(string) == "CUSTOM_METADATA" { + transformed["metadata_fields"] = original["metadataFields"] + } else { + // MetadataFields can only be set when metadata is CUSTOM_METADATA. However, when updating + // from custom to include/exclude, the API will return the previous values of the metadata fields, + // despite not actually having any custom fields at the moment. The API team has confirmed + // this as WAI (b/162771344), so we work around it by clearing the response if metadata is + // not custom. + transformed["metadata_fields"] = nil + } + transformed["filter_expr"] = original["filterExpr"] + + return []interface{}{transformed} +} + +func flattenComputeSubnetworkStackType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkIpv6AccessType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkIpv6CidrRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeSubnetworkExternalIpv6Prefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeSubnetworkDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkIpCidrRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeSubnetworkPurpose(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkSecondaryIpRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRangeName, err := expandComputeSubnetworkSecondaryIpRangeRangeName(original["range_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRangeName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rangeName"] = transformedRangeName + } + + transformedIpCidrRange, err := expandComputeSubnetworkSecondaryIpRangeIpCidrRange(original["ip_cidr_range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpCidrRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipCidrRange"] = transformedIpCidrRange + } + + req = append(req, transformed) + } + return req, nil +} + +func expandComputeSubnetworkSecondaryIpRangeRangeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkSecondaryIpRangeIpCidrRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkPrivateIpGoogleAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkPrivateIpv6GoogleAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeSubnetworkLogConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + transformed := make(map[string]interface{}) + if len(l) == 0 || l[0] == nil { + purpose, ok := d.GetOkExists("purpose") + + if ok && (purpose.(string) == "REGIONAL_MANAGED_PROXY" || purpose.(string) == "INTERNAL_HTTPS_LOAD_BALANCER") { + // Subnetworks for regional L7 ILB/XLB do not accept any values for logConfig + return nil, nil + } + // send enable = false to ensure logging is disabled if there is no config + transformed["enable"] = false + return transformed, nil + } + + raw := l[0] + original := raw.(map[string]interface{}) + + // The log_config block is specified, so logging should be enabled + transformed["enable"] = true + transformed["aggregationInterval"] = original["aggregation_interval"] + transformed["flowSampling"] = original["flow_sampling"] + transformed["metadata"] = original["metadata"] + transformed["filterExpr"] = original["filter_expr"] + + // make it JSON marshallable + transformed["metadataFields"] = original["metadata_fields"].(*schema.Set).List() + + return transformed, nil +} + +func expandComputeSubnetworkStackType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeSubnetworkIpv6AccessType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork_sweeper.go new file mode 100644 index 0000000000..d13459ef34 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_subnetwork_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeSubnetwork", testSweepComputeSubnetwork) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeSubnetwork(region string) error { + resourceName := "ComputeSubnetwork" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/subnetworks", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/subnetworks/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_grpc_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_grpc_proxy.go new file mode 100644 index 0000000000..70925ed1c0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_grpc_proxy.go @@ -0,0 +1,475 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeTargetGrpcProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetGrpcProxyCreate, + Read: resourceComputeTargetGrpcProxyRead, + Update: resourceComputeTargetGrpcProxyUpdate, + Delete: resourceComputeTargetGrpcProxyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeTargetGrpcProxyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource +is created. The name must be 1-63 characters long, and comply +with RFC1035. Specifically, the name must be 1-63 characters long +and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which +means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of this resource.`, + }, + "url_map": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL to the UrlMap resource that defines the mapping from URL to +the BackendService. The protocol field in the BackendService +must be set to GRPC.`, + }, + "validate_for_proxyless": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, indicates that the BackendServices referenced by +the urlMap may be accessed by gRPC applications without using +a sidecar proxy. This will enable configuration checks on urlMap +and its referenced BackendServices to not allow unsupported features. +A gRPC application must use "xds:///" scheme in the target URI +of the service it is connecting to. If false, indicates that the +BackendServices referenced by the urlMap will be accessed by gRPC +applications via a sidecar proxy. In this case, a gRPC application +must not use "xds:///" scheme in the target URI of the service +it is connecting to`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `Fingerprint of this resource. A hash of the contents stored in +this object. This field is used in optimistic locking. This field +will be ignored when inserting a TargetGrpcProxy. An up-to-date +fingerprint must be provided in order to patch/update the +TargetGrpcProxy; otherwise, the request will fail with error +412 conditionNotMet. To see the latest fingerprint, make a get() +request to retrieve the TargetGrpcProxy. A base64-encoded string.`, + }, + "self_link_with_id": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL with id for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeTargetGrpcProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeTargetGrpcProxyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeTargetGrpcProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + urlMapProp, err := expandComputeTargetGrpcProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + validateForProxylessProp, err := expandComputeTargetGrpcProxyValidateForProxyless(d.Get("validate_for_proxyless"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("validate_for_proxyless"); !tpgresource.IsEmptyValue(reflect.ValueOf(validateForProxylessProp)) && (ok || !reflect.DeepEqual(v, validateForProxylessProp)) { + obj["validateForProxyless"] = validateForProxylessProp + } + fingerprintProp, err := expandComputeTargetGrpcProxyFingerprint(d.Get("fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + obj["fingerprint"] = fingerprintProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TargetGrpcProxy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TargetGrpcProxy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetGrpcProxies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating TargetGrpcProxy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create TargetGrpcProxy: %s", err) + } + + log.Printf("[DEBUG] Finished creating TargetGrpcProxy %q: %#v", d.Id(), res) + + return resourceComputeTargetGrpcProxyRead(d, meta) +} + +func resourceComputeTargetGrpcProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeTargetGrpcProxy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeTargetGrpcProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + if err := d.Set("name", flattenComputeTargetGrpcProxyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + if err := d.Set("description", flattenComputeTargetGrpcProxyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + if err := d.Set("self_link_with_id", flattenComputeTargetGrpcProxySelfLinkWithId(res["selfLinkWithId"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + if err := d.Set("url_map", flattenComputeTargetGrpcProxyUrlMap(res["urlMap"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + if err := d.Set("validate_for_proxyless", flattenComputeTargetGrpcProxyValidateForProxyless(res["validateForProxyless"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + if err := d.Set("fingerprint", flattenComputeTargetGrpcProxyFingerprint(res["fingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading TargetGrpcProxy: %s", err) + } + + return nil +} + +func resourceComputeTargetGrpcProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeTargetGrpcProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + fingerprintProp, err := expandComputeTargetGrpcProxyFingerprint(d.Get("fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + obj["fingerprint"] = fingerprintProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating TargetGrpcProxy %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating TargetGrpcProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetGrpcProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetGrpcProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeTargetGrpcProxyRead(d, meta) +} + +func resourceComputeTargetGrpcProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TargetGrpcProxy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TargetGrpcProxy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting TargetGrpcProxy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TargetGrpcProxy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeTargetGrpcProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/targetGrpcProxies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetGrpcProxies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeTargetGrpcProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetGrpcProxyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetGrpcProxyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetGrpcProxySelfLinkWithId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetGrpcProxyUrlMap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetGrpcProxyValidateForProxyless(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetGrpcProxyFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeTargetGrpcProxyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetGrpcProxyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetGrpcProxyUrlMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetGrpcProxyValidateForProxyless(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetGrpcProxyFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_grpc_proxy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_grpc_proxy_sweeper.go new file mode 100644 index 0000000000..c451f1c86a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_grpc_proxy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeTargetGrpcProxy", testSweepComputeTargetGrpcProxy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeTargetGrpcProxy(region string) error { + resourceName := "ComputeTargetGrpcProxy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetGrpcProxies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetGrpcProxies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy.go new file mode 100644 index 0000000000..329f2b9376 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy.go @@ -0,0 +1,497 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeTargetHttpProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetHttpProxyCreate, + Read: resourceComputeTargetHttpProxyRead, + Update: resourceComputeTargetHttpProxyUpdate, + Delete: resourceComputeTargetHttpProxyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeTargetHttpProxyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "url_map": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the UrlMap resource that defines the mapping from URL +to the BackendService.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "http_keep_alive_timeout_sec": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Specifies how long to keep a connection open, after completing a response, +while there is no matching traffic (in seconds). If an HTTP keepalive is +not specified, a default value (610 seconds) will be used. For Global +external HTTP(S) load balancer, the minimum allowed value is 5 seconds and +the maximum allowed value is 1200 seconds. For Global external HTTP(S) +load balancer (classic), this option is not available publicly.`, + }, + "proxy_bind": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `This field only applies when the forwarding rule that references +this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "proxy_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeTargetHttpProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeTargetHttpProxyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + urlMapProp, err := expandComputeTargetHttpProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + proxyBindProp, err := expandComputeTargetHttpProxyProxyBind(d.Get("proxy_bind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_bind"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyBindProp)) && (ok || !reflect.DeepEqual(v, proxyBindProp)) { + obj["proxyBind"] = proxyBindProp + } + httpKeepAliveTimeoutSecProp, err := expandComputeTargetHttpProxyHttpKeepAliveTimeoutSec(d.Get("http_keep_alive_timeout_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_keep_alive_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpKeepAliveTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, httpKeepAliveTimeoutSecProp)) { + obj["httpKeepAliveTimeoutSec"] = httpKeepAliveTimeoutSecProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TargetHttpProxy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TargetHttpProxy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetHttpProxies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating TargetHttpProxy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create TargetHttpProxy: %s", err) + } + + log.Printf("[DEBUG] Finished creating TargetHttpProxy %q: %#v", d.Id(), res) + + return resourceComputeTargetHttpProxyRead(d, meta) +} + +func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeTargetHttpProxy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeTargetHttpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + if err := d.Set("description", flattenComputeTargetHttpProxyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + if err := d.Set("proxy_id", flattenComputeTargetHttpProxyProxyId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + if err := d.Set("name", flattenComputeTargetHttpProxyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + if err := d.Set("url_map", flattenComputeTargetHttpProxyUrlMap(res["urlMap"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + if err := d.Set("proxy_bind", flattenComputeTargetHttpProxyProxyBind(res["proxyBind"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + if err := d.Set("http_keep_alive_timeout_sec", flattenComputeTargetHttpProxyHttpKeepAliveTimeoutSec(res["httpKeepAliveTimeoutSec"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + + return nil +} + +func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("url_map") { + obj := make(map[string]interface{}) + + urlMapProp, err := expandComputeTargetHttpProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpProxies/{{name}}/setUrlMap") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetHttpProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetHttpProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeTargetHttpProxyRead(d, meta) +} + +func resourceComputeTargetHttpProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TargetHttpProxy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TargetHttpProxy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting TargetHttpProxy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TargetHttpProxy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeTargetHttpProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/targetHttpProxies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetHttpProxies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeTargetHttpProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpProxyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpProxyProxyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeTargetHttpProxyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpProxyUrlMap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeTargetHttpProxyProxyBind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpProxyHttpKeepAliveTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandComputeTargetHttpProxyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetHttpProxyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetHttpProxyUrlMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("urlMaps", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for url_map: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeTargetHttpProxyProxyBind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetHttpProxyHttpKeepAliveTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy_sweeper.go new file mode 100644 index 0000000000..01c2032af5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_http_proxy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeTargetHttpProxy", testSweepComputeTargetHttpProxy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeTargetHttpProxy(region string) error { + resourceName := "ComputeTargetHttpProxy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetHttpProxies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetHttpProxies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy.go new file mode 100644 index 0000000000..cc2edba891 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy.go @@ -0,0 +1,796 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeTargetHttpsProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetHttpsProxyCreate, + Read: resourceComputeTargetHttpsProxyRead, + Update: resourceComputeTargetHttpsProxyUpdate, + Delete: resourceComputeTargetHttpsProxyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeTargetHttpsProxyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "url_map": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the UrlMap resource that defines the mapping from URL +to the BackendService.`, + }, + "certificate_map": { + Type: schema.TypeString, + Optional: true, + Description: `A reference to the CertificateMap resource uri that identifies a certificate map +associated with the given target proxy. This field can only be set for global target proxies. +Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}'.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "http_keep_alive_timeout_sec": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Specifies how long to keep a connection open, after completing a response, +while there is no matching traffic (in seconds). If an HTTP keepalive is +not specified, a default value (610 seconds) will be used. For Global +external HTTP(S) load balancer, the minimum allowed value is 5 seconds and +the maximum allowed value is 1200 seconds. For Global external HTTP(S) +load balancer (classic), this option is not available publicly.`, + }, + "proxy_bind": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `This field only applies when the forwarding rule that references +this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, + }, + "quic_override": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "ENABLE", "DISABLE", ""}), + Description: `Specifies the QUIC override policy for this resource. This determines +whether the load balancer will attempt to negotiate QUIC with clients +or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is +specified, Google manages whether QUIC is used. Default value: "NONE" Possible values: ["NONE", "ENABLE", "DISABLE"]`, + Default: "NONE", + }, + "ssl_certificates": { + Type: schema.TypeList, + Optional: true, + Description: `A list of SslCertificate resources that are used to authenticate +connections between users and the load balancer. At least one SSL +certificate must be specified.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "ssl_policy": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the SslPolicy resource that will be associated with +the TargetHttpsProxy resource. If not set, the TargetHttpsProxy +resource will not have any SSL policy configured.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "proxy_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeTargetHttpsProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeTargetHttpsProxyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + quicOverrideProp, err := expandComputeTargetHttpsProxyQuicOverride(d.Get("quic_override"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("quic_override"); !tpgresource.IsEmptyValue(reflect.ValueOf(quicOverrideProp)) && (ok || !reflect.DeepEqual(v, quicOverrideProp)) { + obj["quicOverride"] = quicOverrideProp + } + sslCertificatesProp, err := expandComputeTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslCertificatesProp)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { + obj["sslCertificates"] = sslCertificatesProp + } + certificateMapProp, err := expandComputeTargetHttpsProxyCertificateMap(d.Get("certificate_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateMapProp)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { + obj["certificateMap"] = certificateMapProp + } + sslPolicyProp, err := expandComputeTargetHttpsProxySslPolicy(d.Get("ssl_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslPolicyProp)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { + obj["sslPolicy"] = sslPolicyProp + } + urlMapProp, err := expandComputeTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(urlMapProp)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + proxyBindProp, err := expandComputeTargetHttpsProxyProxyBind(d.Get("proxy_bind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_bind"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyBindProp)) && (ok || !reflect.DeepEqual(v, proxyBindProp)) { + obj["proxyBind"] = proxyBindProp + } + httpKeepAliveTimeoutSecProp, err := expandComputeTargetHttpsProxyHttpKeepAliveTimeoutSec(d.Get("http_keep_alive_timeout_sec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_keep_alive_timeout_sec"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpKeepAliveTimeoutSecProp)) && (ok || !reflect.DeepEqual(v, httpKeepAliveTimeoutSecProp)) { + obj["httpKeepAliveTimeoutSec"] = httpKeepAliveTimeoutSecProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TargetHttpsProxy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TargetHttpsProxy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetHttpsProxies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create TargetHttpsProxy: %s", err) + } + + log.Printf("[DEBUG] Finished creating TargetHttpsProxy %q: %#v", d.Id(), res) + + return resourceComputeTargetHttpsProxyRead(d, meta) +} + +func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeTargetHttpsProxy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeTargetHttpsProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("description", flattenComputeTargetHttpsProxyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("proxy_id", flattenComputeTargetHttpsProxyProxyId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("name", flattenComputeTargetHttpsProxyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("quic_override", flattenComputeTargetHttpsProxyQuicOverride(res["quicOverride"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("ssl_certificates", flattenComputeTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("certificate_map", flattenComputeTargetHttpsProxyCertificateMap(res["certificateMap"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("ssl_policy", flattenComputeTargetHttpsProxySslPolicy(res["sslPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("url_map", flattenComputeTargetHttpsProxyUrlMap(res["urlMap"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("proxy_bind", flattenComputeTargetHttpsProxyProxyBind(res["proxyBind"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("http_keep_alive_timeout_sec", flattenComputeTargetHttpsProxyHttpKeepAliveTimeoutSec(res["httpKeepAliveTimeoutSec"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + + return nil +} + +func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("quic_override") { + obj := make(map[string]interface{}) + + quicOverrideProp, err := expandComputeTargetHttpsProxyQuicOverride(d.Get("quic_override"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("quic_override"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, quicOverrideProp)) { + obj["quicOverride"] = quicOverrideProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setQuicOverride") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("ssl_certificates") { + obj := make(map[string]interface{}) + + sslCertificatesProp, err := expandComputeTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { + obj["sslCertificates"] = sslCertificatesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpsProxies/{{name}}/setSslCertificates") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("certificate_map") { + obj := make(map[string]interface{}) + + certificateMapProp, err := expandComputeTargetHttpsProxyCertificateMap(d.Get("certificate_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { + obj["certificateMap"] = certificateMapProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setCertificateMap") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("ssl_policy") { + obj := make(map[string]interface{}) + + sslPolicyProp, err := expandComputeTargetHttpsProxySslPolicy(d.Get("ssl_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { + obj["sslPolicy"] = sslPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setSslPolicy") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("url_map") { + obj := make(map[string]interface{}) + + urlMapProp, err := expandComputeTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("url_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, urlMapProp)) { + obj["urlMap"] = urlMapProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpsProxies/{{name}}/setUrlMap") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeTargetHttpsProxyRead(d, meta) +} + +func resourceComputeTargetHttpsProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TargetHttpsProxy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TargetHttpsProxy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting TargetHttpsProxy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TargetHttpsProxy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeTargetHttpsProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/targetHttpsProxies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetHttpsProxies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeTargetHttpsProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpsProxyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpsProxyProxyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeTargetHttpsProxyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpsProxyQuicOverride(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "NONE" + } + + return v +} + +func flattenComputeTargetHttpsProxySslCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeTargetHttpsProxyCertificateMap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpsProxySslPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeTargetHttpsProxyUrlMap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeTargetHttpsProxyProxyBind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetHttpsProxyHttpKeepAliveTimeoutSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandComputeTargetHttpsProxyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetHttpsProxyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetHttpsProxyQuicOverride(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetHttpsProxySslCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for ssl_certificates: nil") + } + f, err := tpgresource.ParseGlobalFieldValue("sslCertificates", raw.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for ssl_certificates: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeTargetHttpsProxyCertificateMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetHttpsProxySslPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("sslPolicies", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for ssl_policy: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeTargetHttpsProxyUrlMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("urlMaps", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for url_map: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeTargetHttpsProxyProxyBind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetHttpsProxyHttpKeepAliveTimeoutSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy_sweeper.go new file mode 100644 index 0000000000..35a5f6ab76 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_https_proxy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeTargetHttpsProxy", testSweepComputeTargetHttpsProxy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeTargetHttpsProxy(region string) error { + resourceName := "ComputeTargetHttpsProxy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetHttpsProxies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetHttpsProxies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_instance.go new file mode 100644 index 0000000000..d6be8a5fed --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_instance.go @@ -0,0 +1,430 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeTargetInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetInstanceCreate, + Read: resourceComputeTargetInstanceRead, + Delete: resourceComputeTargetInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeTargetInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Compute instance VM handling traffic for this target instance. +Accepts the instance self-link, relative path +(e.g. 'projects/project/zones/zone/instances/instance') or name. If +name is given, the zone will default to the given zone or +the provider-default zone and the project will default to the +provider-level project.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "nat_policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NO_NAT", ""}), + Description: `NAT option controlling how IPs are NAT'ed to the instance. +Currently only NO_NAT (default value) is supported. Default value: "NO_NAT" Possible values: ["NO_NAT"]`, + Default: "NO_NAT", + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the zone where the target instance resides.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeTargetInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeTargetInstanceName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeTargetInstanceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + instanceProp, err := expandComputeTargetInstanceInstance(d.Get("instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceProp)) && (ok || !reflect.DeepEqual(v, instanceProp)) { + obj["instance"] = instanceProp + } + natPolicyProp, err := expandComputeTargetInstanceNatPolicy(d.Get("nat_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("nat_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(natPolicyProp)) && (ok || !reflect.DeepEqual(v, natPolicyProp)) { + obj["natPolicy"] = natPolicyProp + } + zoneProp, err := expandComputeTargetInstanceZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TargetInstance: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetInstance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TargetInstance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating TargetInstance", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create TargetInstance: %s", err) + } + + log.Printf("[DEBUG] Finished creating TargetInstance %q: %#v", d.Id(), res) + + return resourceComputeTargetInstanceRead(d, meta) +} + +func resourceComputeTargetInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetInstance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeTargetInstance %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TargetInstance: %s", err) + } + + if err := d.Set("name", flattenComputeTargetInstanceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetInstance: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeTargetInstanceCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetInstance: %s", err) + } + if err := d.Set("description", flattenComputeTargetInstanceDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetInstance: %s", err) + } + if err := d.Set("instance", flattenComputeTargetInstanceInstance(res["instance"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetInstance: %s", err) + } + if err := d.Set("nat_policy", flattenComputeTargetInstanceNatPolicy(res["natPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetInstance: %s", err) + } + if err := d.Set("zone", flattenComputeTargetInstanceZone(res["zone"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetInstance: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading TargetInstance: %s", err) + } + + return nil +} + +func resourceComputeTargetInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetInstance: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TargetInstance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TargetInstance") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting TargetInstance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TargetInstance %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeTargetInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/targetInstances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeTargetInstanceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetInstanceCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetInstanceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetInstanceInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeTargetInstanceNatPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetInstanceZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeTargetInstanceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetInstanceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetInstanceInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + // This method returns a full self link from a partial self link. + if v == nil || v.(string) == "" { + // It does not try to construct anything from empty. + return "", nil + } else if strings.HasPrefix(v.(string), "https://") { + // Anything that starts with a URL scheme is assumed to be a self link worth using. + return v, nil + } else if strings.HasPrefix(v.(string), "projects/") { + // If the self link references a project, we'll just stuck the compute prefix on it + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + if err != nil { + return "", err + } + return url, nil + } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { + // For regional or zonal resources which include their region or zone, just put the project in front. + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + if err != nil { + return nil, err + } + return url + v.(string), nil + } + // Anything else is assumed to be a regional resource, with a partial link that begins with the resource name. + // This isn't very likely - it's a last-ditch effort to extract something useful here. We can do a better job + // as soon as MultiResourceRefs are working since we'll know the types that this field is supposed to point to. + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/") + if err != nil { + return nil, err + } + return url + v.(string), nil +} + +func expandComputeTargetInstanceNatPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetInstanceZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("zones", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_instance_sweeper.go new file mode 100644 index 0000000000..5af971850e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_instance_sweeper.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeTargetInstance", testSweepComputeTargetInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeTargetInstance(region string) error { + resourceName := "ComputeTargetInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/aggregated/targetInstances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + var rl []interface{} + zones := resourceList.(map[string]interface{}) + // Loop through every zone in the list response + for _, zonesValue := range zones { + zone := zonesValue.(map[string]interface{}) + for k, v := range zone { + // Zone map either has resources or a warning stating there were no resources found in the zone + if k != "warning" { + resourcesInZone := v.([]interface{}) + rl = append(rl, resourcesInZone...) + } + } + } + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}" + if obj["zone"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource zone was nil", resourceName) + return nil + } + zone := tpgresource.GetResourceNameFromSelfLink(obj["zone"].(string)) + deleteTemplate = strings.Replace(deleteTemplate, "{{zone}}", zone, -1) + + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_pool.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_pool.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_pool.go index d8f79f6ffd..98f320765b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_target_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_pool.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" @@ -7,13 +9,16 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/googleapi" "google.golang.org/api/compute/v1" ) -var instancesSelfLinkPattern = regexp.MustCompile(fmt.Sprintf(zonalLinkBasePattern, "instances")) +var instancesSelfLinkPattern = regexp.MustCompile(fmt.Sprintf(tpgresource.ZonalLinkBasePattern, "instances")) func ResourceComputeTargetPool() *schema.Resource { return &schema.Resource{ @@ -67,7 +72,7 @@ func ResourceComputeTargetPool() *schema.Resource { MaxItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, Description: `List of zero or one health check name or self_link. Only legacy google_compute_http_health_check is supported.`, }, @@ -140,12 +145,12 @@ func canonicalizeInstanceRef(instanceRef string) string { } // Healthchecks need to exist before being referred to from the target pool. -func convertHealthChecks(healthChecks []interface{}, d *schema.ResourceData, config *Config) ([]string, error) { +func convertHealthChecks(healthChecks []interface{}, d *schema.ResourceData, config *transport_tpg.Config) ([]string, error) { if len(healthChecks) == 0 { return []string{}, nil } - hc, err := ParseHttpHealthCheckFieldValue(healthChecks[0].(string), d, config) + hc, err := tpgresource.ParseHttpHealthCheckFieldValue(healthChecks[0].(string), d, config) if err != nil { return nil, err } @@ -155,7 +160,7 @@ func convertHealthChecks(healthChecks []interface{}, d *schema.ResourceData, con // Instances do not need to exist yet, so we simply generate URLs. // Instances can be full URLS or zone/name -func convertInstancesToUrls(d *schema.ResourceData, config *Config, project string, names *schema.Set) ([]string, error) { +func convertInstancesToUrls(d *schema.ResourceData, config *transport_tpg.Config, project string, names *schema.Set) ([]string, error) { urls := make([]string, len(names.List())) for i, nameI := range names.List() { name := nameI.(string) @@ -167,7 +172,7 @@ func convertInstancesToUrls(d *schema.ResourceData, config *Config, project stri if len(splitName) != 2 { return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) } else { - url, err := replaceVars(d, config, fmt.Sprintf( + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf( "{{ComputeBasePath}}projects/%s/zones/%s/instances/%s", project, splitName[0], splitName[1])) if err != nil { @@ -181,18 +186,18 @@ func convertInstancesToUrls(d *schema.ResourceData, config *Config, project stri } func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -230,7 +235,7 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e } // It probably maybe worked, so store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -244,18 +249,18 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e } func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -275,7 +280,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e if err != nil { return err } - add, remove := calcAddRemove(fromUrls, toUrls) + add, remove := tpgresource.CalcAddRemove(fromUrls, toUrls) removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ HealthChecks: make([]*compute.HealthCheckReference, len(remove)), @@ -392,18 +397,18 @@ func convertInstancesFromUrls(urls []string) []string { } func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -411,7 +416,7 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err tpool, err := config.NewComputeClient(userAgent).TargetPools.Get( project, region, d.Get("name").(string)).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) } if err := d.Set("self_link", tpool.SelfLink); err != nil { @@ -441,7 +446,7 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("name", tpool.Name); err != nil { return fmt.Errorf("Error setting name: %s", err) } - if err := d.Set("region", GetResourceNameFromSelfLink(tpool.Region)); err != nil { + if err := d.Set("region", tpgresource.GetResourceNameFromSelfLink(tpool.Region)); err != nil { return fmt.Errorf("Error setting region: %s", err) } if err := d.Set("session_affinity", tpool.SessionAffinity); err != nil { @@ -454,18 +459,18 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err } func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -486,8 +491,8 @@ func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) e } func resourceTargetPoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/regions/(?P[^/]+)/targetPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -497,7 +502,7 @@ func resourceTargetPoolStateImporter(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_ssl_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_ssl_proxy.go new file mode 100644 index 0000000000..e4c27e3c13 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_ssl_proxy.go @@ -0,0 +1,725 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeTargetSslProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetSslProxyCreate, + Read: resourceComputeTargetSslProxyRead, + Update: resourceComputeTargetSslProxyUpdate, + Delete: resourceComputeTargetSslProxyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeTargetSslProxyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backend_service": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the BackendService resource.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "certificate_map": { + Type: schema.TypeString, + Optional: true, + Description: `A reference to the CertificateMap resource uri that identifies a certificate map +associated with the given target proxy. This field can only be set for global target proxies. +Accepted format is '//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}'.`, + ExactlyOneOf: []string{"ssl_certificates", "certificate_map"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "proxy_header": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), + Description: `Specifies the type of proxy header to append before sending data to +the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, + Default: "NONE", + }, + "ssl_certificates": { + Type: schema.TypeList, + Optional: true, + Description: `A list of SslCertificate resources that are used to authenticate +connections between users and the load balancer. At least one +SSL certificate must be specified.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + ExactlyOneOf: []string{"ssl_certificates", "certificate_map"}, + }, + "ssl_policy": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the SslPolicy resource that will be associated with +the TargetSslProxy resource. If not set, the TargetSslProxy +resource will not have any SSL policy configured.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "proxy_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeTargetSslProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeTargetSslProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeTargetSslProxyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + proxyHeaderProp, err := expandComputeTargetSslProxyProxyHeader(d.Get("proxy_header"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_header"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyHeaderProp)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { + obj["proxyHeader"] = proxyHeaderProp + } + serviceProp, err := expandComputeTargetSslProxyBackendService(d.Get("backend_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceProp)) && (ok || !reflect.DeepEqual(v, serviceProp)) { + obj["service"] = serviceProp + } + sslCertificatesProp, err := expandComputeTargetSslProxySslCertificates(d.Get("ssl_certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslCertificatesProp)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { + obj["sslCertificates"] = sslCertificatesProp + } + certificateMapProp, err := expandComputeTargetSslProxyCertificateMap(d.Get("certificate_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateMapProp)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { + obj["certificateMap"] = certificateMapProp + } + sslPolicyProp, err := expandComputeTargetSslProxySslPolicy(d.Get("ssl_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslPolicyProp)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { + obj["sslPolicy"] = sslPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TargetSslProxy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TargetSslProxy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetSslProxies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating TargetSslProxy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create TargetSslProxy: %s", err) + } + + log.Printf("[DEBUG] Finished creating TargetSslProxy %q: %#v", d.Id(), res) + + return resourceComputeTargetSslProxyRead(d, meta) +} + +func resourceComputeTargetSslProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeTargetSslProxy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeTargetSslProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("description", flattenComputeTargetSslProxyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("proxy_id", flattenComputeTargetSslProxyProxyId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("name", flattenComputeTargetSslProxyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("proxy_header", flattenComputeTargetSslProxyProxyHeader(res["proxyHeader"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("backend_service", flattenComputeTargetSslProxyBackendService(res["service"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("ssl_certificates", flattenComputeTargetSslProxySslCertificates(res["sslCertificates"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("certificate_map", flattenComputeTargetSslProxyCertificateMap(res["certificateMap"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("ssl_policy", flattenComputeTargetSslProxySslPolicy(res["sslPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading TargetSslProxy: %s", err) + } + + return nil +} + +func resourceComputeTargetSslProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("proxy_header") { + obj := make(map[string]interface{}) + + proxyHeaderProp, err := expandComputeTargetSslProxyProxyHeader(d.Get("proxy_header"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_header"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { + obj["proxyHeader"] = proxyHeaderProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setProxyHeader") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetSslProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("backend_service") { + obj := make(map[string]interface{}) + + serviceProp, err := expandComputeTargetSslProxyBackendService(d.Get("backend_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceProp)) { + obj["service"] = serviceProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setBackendService") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetSslProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("ssl_certificates") { + obj := make(map[string]interface{}) + + sslCertificatesProp, err := expandComputeTargetSslProxySslCertificates(d.Get("ssl_certificates"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslCertificatesProp)) { + obj["sslCertificates"] = sslCertificatesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setSslCertificates") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetSslProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("certificate_map") { + obj := make(map[string]interface{}) + + certificateMapProp, err := expandComputeTargetSslProxyCertificateMap(d.Get("certificate_map"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_map"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, certificateMapProp)) { + obj["certificateMap"] = certificateMapProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setCertificateMap") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetSslProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("ssl_policy") { + obj := make(map[string]interface{}) + + sslPolicyProp, err := expandComputeTargetSslProxySslPolicy(d.Get("ssl_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ssl_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { + obj["sslPolicy"] = sslPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setSslPolicy") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetSslProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeTargetSslProxyRead(d, meta) +} + +func resourceComputeTargetSslProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TargetSslProxy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TargetSslProxy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting TargetSslProxy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TargetSslProxy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeTargetSslProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/targetSslProxies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetSslProxies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeTargetSslProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetSslProxyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetSslProxyProxyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeTargetSslProxyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetSslProxyProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetSslProxyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeTargetSslProxySslCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeTargetSslProxyCertificateMap(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetSslProxySslPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeTargetSslProxyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetSslProxyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetSslProxyProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetSslProxyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for backend_service: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeTargetSslProxySslCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for ssl_certificates: nil") + } + f, err := tpgresource.ParseGlobalFieldValue("sslCertificates", raw.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for ssl_certificates: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeTargetSslProxyCertificateMap(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetSslProxySslPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("sslPolicies", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for ssl_policy: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_ssl_proxy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_ssl_proxy_sweeper.go new file mode 100644 index 0000000000..387694e2a7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_ssl_proxy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeTargetSslProxy", testSweepComputeTargetSslProxy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeTargetSslProxy(region string) error { + resourceName := "ComputeTargetSslProxy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetSslProxies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetSslProxies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_tcp_proxy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_tcp_proxy.go new file mode 100644 index 0000000000..e9288dc409 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_tcp_proxy.go @@ -0,0 +1,523 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeTargetTcpProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetTcpProxyCreate, + Read: resourceComputeTargetTcpProxyRead, + Update: resourceComputeTargetTcpProxyUpdate, + Delete: resourceComputeTargetTcpProxyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeTargetTcpProxyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "backend_service": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the BackendService resource.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "proxy_bind": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `This field only applies when the forwarding rule that references +this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, + }, + "proxy_header": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "PROXY_V1", ""}), + Description: `Specifies the type of proxy header to append before sending data to +the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, + Default: "NONE", + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "proxy_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeTargetTcpProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeTargetTcpProxyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeTargetTcpProxyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + proxyHeaderProp, err := expandComputeTargetTcpProxyProxyHeader(d.Get("proxy_header"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_header"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyHeaderProp)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { + obj["proxyHeader"] = proxyHeaderProp + } + serviceProp, err := expandComputeTargetTcpProxyBackendService(d.Get("backend_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceProp)) && (ok || !reflect.DeepEqual(v, serviceProp)) { + obj["service"] = serviceProp + } + proxyBindProp, err := expandComputeTargetTcpProxyProxyBind(d.Get("proxy_bind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_bind"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyBindProp)) && (ok || !reflect.DeepEqual(v, proxyBindProp)) { + obj["proxyBind"] = proxyBindProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TargetTcpProxy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TargetTcpProxy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetTcpProxies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating TargetTcpProxy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create TargetTcpProxy: %s", err) + } + + log.Printf("[DEBUG] Finished creating TargetTcpProxy %q: %#v", d.Id(), res) + + return resourceComputeTargetTcpProxyRead(d, meta) +} + +func resourceComputeTargetTcpProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeTargetTcpProxy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeTargetTcpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + if err := d.Set("description", flattenComputeTargetTcpProxyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + if err := d.Set("proxy_id", flattenComputeTargetTcpProxyProxyId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + if err := d.Set("name", flattenComputeTargetTcpProxyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + if err := d.Set("proxy_header", flattenComputeTargetTcpProxyProxyHeader(res["proxyHeader"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + if err := d.Set("backend_service", flattenComputeTargetTcpProxyBackendService(res["service"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + if err := d.Set("proxy_bind", flattenComputeTargetTcpProxyProxyBind(res["proxyBind"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading TargetTcpProxy: %s", err) + } + + return nil +} + +func resourceComputeTargetTcpProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("proxy_header") { + obj := make(map[string]interface{}) + + proxyHeaderProp, err := expandComputeTargetTcpProxyProxyHeader(d.Get("proxy_header"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("proxy_header"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, proxyHeaderProp)) { + obj["proxyHeader"] = proxyHeaderProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}/setProxyHeader") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetTcpProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetTcpProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetTcpProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("backend_service") { + obj := make(map[string]interface{}) + + serviceProp, err := expandComputeTargetTcpProxyBackendService(d.Get("backend_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backend_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceProp)) { + obj["service"] = serviceProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}/setBackendService") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating TargetTcpProxy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TargetTcpProxy %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating TargetTcpProxy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeTargetTcpProxyRead(d, meta) +} + +func resourceComputeTargetTcpProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TargetTcpProxy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TargetTcpProxy") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting TargetTcpProxy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TargetTcpProxy %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeTargetTcpProxyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/global/targetTcpProxies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/targetTcpProxies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeTargetTcpProxyCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetTcpProxyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetTcpProxyProxyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeTargetTcpProxyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetTcpProxyProxyHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeTargetTcpProxyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeTargetTcpProxyProxyBind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandComputeTargetTcpProxyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetTcpProxyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetTcpProxyProxyHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeTargetTcpProxyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for backend_service: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeTargetTcpProxyProxyBind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_tcp_proxy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_tcp_proxy_sweeper.go new file mode 100644 index 0000000000..b3d05715f8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_target_tcp_proxy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeTargetTcpProxy", testSweepComputeTargetTcpProxy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeTargetTcpProxy(region string) error { + resourceName := "ComputeTargetTcpProxy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetTcpProxies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/targetTcpProxies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_url_map.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_url_map.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_url_map.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_url_map.go index 3fd087b985..fb10faa665 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_compute_url_map.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_url_map.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package compute import ( "fmt" @@ -23,6 +26,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceComputeUrlMap() *schema.Resource { @@ -247,7 +254,7 @@ the host / authority header is suffixed with -shadow.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The full or partial URL to the BackendService resource being mirrored to.`, }, }, @@ -399,7 +406,7 @@ additional settings specified in this HttpRouteAction.`, "backend_service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.`, @@ -513,7 +520,7 @@ The value must be between 0 and 1000`, "default_service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The backend service or backend bucket to use when none of the given rules match.`, ExactlyOneOf: []string{"default_service", "default_url_redirect", "default_route_action.0.weighted_backend_services"}, }, @@ -570,7 +577,7 @@ the redirect. The value must be between 1 and 1024 characters.`, "redirect_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. Supported values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. @@ -876,7 +883,7 @@ the host / authority header is suffixed with -shadow.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The full or partial URL to the BackendService resource being mirrored to.`, }, }, @@ -1015,7 +1022,7 @@ additional settings specified in this HttpRouteAction.`, "backend_service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.`, @@ -1126,7 +1133,7 @@ The value must be between 0 and 1000`, "default_service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The backend service or backend bucket to use when none of the given paths match.`, }, "default_url_redirect": { @@ -1182,7 +1189,7 @@ the redirect. The value must be between 1 and 1024 characters.`, "redirect_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. Supported values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. @@ -1491,7 +1498,7 @@ service, the host / authority header is suffixed with -shadow.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The BackendService resource being mirrored to.`, }, }, @@ -1631,7 +1638,7 @@ HttpRouteAction.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The default BackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.`, @@ -1738,7 +1745,7 @@ prior to sending the response back to the client.`, "service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The backend service or backend bucket to use if any of the given paths match.`, }, "url_redirect": { @@ -1797,7 +1804,7 @@ must be between 1 and 1024 characters.`, "redirect_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. Supported values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. @@ -2086,7 +2093,7 @@ length of 1024 characters.`, "filter_match_criteria": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"MATCH_ALL", "MATCH_ANY"}), + ValidateFunc: verify.ValidateEnum([]string{"MATCH_ALL", "MATCH_ANY"}), Description: `Specifies how individual filterLabel matches within the list of filterLabels contribute towards the overall metadataFilter match. Supported values are: - MATCH_ANY: At least one of the filterLabels must have a matching label in the @@ -2097,6 +2104,19 @@ the provided metadata. Possible values: ["MATCH_ALL", "MATCH_ANY"]`, }, }, }, + "path_template_match": { + Type: schema.TypeString, + Optional: true, + Description: `For satisfying the matchRule condition, the path of the request +must match the wildcard pattern specified in pathTemplateMatch +after removing any query parameters and anchor that may be part +of the original URL. + +pathTemplateMatch must be between 1 and 255 characters +(inclusive). The pattern specified by pathTemplateMatch may +have at most 5 wildcard operators and at most 5 variable +captures in total.`, + }, "prefix_match": { Type: schema.TypeString, Optional: true, @@ -2336,7 +2356,7 @@ service, the host / authority header is suffixed with -shadow.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The BackendService resource being mirrored to.`, }, }, @@ -2459,6 +2479,24 @@ header is replaced with contents of hostRewrite. The value must be between 1 and portion of the request's path is replaced by pathPrefixRewrite. The value must be between 1 and 1024 characters.`, }, + "path_template_rewrite": { + Type: schema.TypeString, + Optional: true, + Description: `Prior to forwarding the request to the selected origin, if the +request matched a pathTemplateMatch, the matching portion of the +request's path is replaced re-written using the pattern specified +by pathTemplateRewrite. + +pathTemplateRewrite must be between 1 and 255 characters +(inclusive), must start with a '/', and must only use variables +captured by the route's pathTemplate matchers. + +pathTemplateRewrite may only be used when all of a route's +MatchRules specify pathTemplate. + +Only one of pathPrefixRewrite and pathTemplateRewrite may be +specified.`, + }, }, }, }, @@ -2478,7 +2516,7 @@ HttpRouteAction.`, "backend_service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The default BackendService resource. Before forwarding the request to backendService, the loadbalancer applies any relevant headerActions specified as part of this backendServiceWeight.`, @@ -2585,7 +2623,7 @@ prior to sending the response back to the client.`, "service": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending @@ -2634,7 +2672,7 @@ retaining the remaining portion of the URL before redirecting the request.`, "redirect_response_code": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. Supported values are: * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. @@ -2685,7 +2723,7 @@ tests per UrlMap.`, "service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The backend service or backend bucket link that should be matched by this test.`, }, "description": { @@ -2758,8 +2796,8 @@ the resource.`, } func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -2768,65 +2806,65 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error defaultServiceProp, err := expandComputeUrlMapDefaultService(d.Get("default_service"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_service"); !isEmptyValue(reflect.ValueOf(defaultServiceProp)) && (ok || !reflect.DeepEqual(v, defaultServiceProp)) { + } else if v, ok := d.GetOkExists("default_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultServiceProp)) && (ok || !reflect.DeepEqual(v, defaultServiceProp)) { obj["defaultService"] = defaultServiceProp } descriptionProp, err := expandComputeUrlMapDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } fingerprintProp, err := expandComputeUrlMapFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { obj["fingerprint"] = fingerprintProp } headerActionProp, err := expandComputeUrlMapHeaderAction(d.Get("header_action"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("header_action"); !isEmptyValue(reflect.ValueOf(headerActionProp)) && (ok || !reflect.DeepEqual(v, headerActionProp)) { + } else if v, ok := d.GetOkExists("header_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(headerActionProp)) && (ok || !reflect.DeepEqual(v, headerActionProp)) { obj["headerAction"] = headerActionProp } hostRulesProp, err := expandComputeUrlMapHostRule(d.Get("host_rule"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("host_rule"); !isEmptyValue(reflect.ValueOf(hostRulesProp)) && (ok || !reflect.DeepEqual(v, hostRulesProp)) { + } else if v, ok := d.GetOkExists("host_rule"); !tpgresource.IsEmptyValue(reflect.ValueOf(hostRulesProp)) && (ok || !reflect.DeepEqual(v, hostRulesProp)) { obj["hostRules"] = hostRulesProp } nameProp, err := expandComputeUrlMapName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } pathMatchersProp, err := expandComputeUrlMapPathMatcher(d.Get("path_matcher"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("path_matcher"); !isEmptyValue(reflect.ValueOf(pathMatchersProp)) && (ok || !reflect.DeepEqual(v, pathMatchersProp)) { + } else if v, ok := d.GetOkExists("path_matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(pathMatchersProp)) && (ok || !reflect.DeepEqual(v, pathMatchersProp)) { obj["pathMatchers"] = pathMatchersProp } testsProp, err := expandComputeUrlMapTest(d.Get("test"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("test"); !isEmptyValue(reflect.ValueOf(testsProp)) && (ok || !reflect.DeepEqual(v, testsProp)) { + } else if v, ok := d.GetOkExists("test"); !tpgresource.IsEmptyValue(reflect.ValueOf(testsProp)) && (ok || !reflect.DeepEqual(v, testsProp)) { obj["tests"] = testsProp } defaultUrlRedirectProp, err := expandComputeUrlMapDefaultUrlRedirect(d.Get("default_url_redirect"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_url_redirect"); !isEmptyValue(reflect.ValueOf(defaultUrlRedirectProp)) && (ok || !reflect.DeepEqual(v, defaultUrlRedirectProp)) { + } else if v, ok := d.GetOkExists("default_url_redirect"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultUrlRedirectProp)) && (ok || !reflect.DeepEqual(v, defaultUrlRedirectProp)) { obj["defaultUrlRedirect"] = defaultUrlRedirectProp } defaultRouteActionProp, err := expandComputeUrlMapDefaultRouteAction(d.Get("default_route_action"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_route_action"); !isEmptyValue(reflect.ValueOf(defaultRouteActionProp)) && (ok || !reflect.DeepEqual(v, defaultRouteActionProp)) { + } else if v, ok := d.GetOkExists("default_route_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultRouteActionProp)) && (ok || !reflect.DeepEqual(v, defaultRouteActionProp)) { obj["defaultRouteAction"] = defaultRouteActionProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps") if err != nil { return err } @@ -2834,24 +2872,32 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Creating new UrlMap: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for UrlMap: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating UrlMap: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/urlMaps/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/urlMaps/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -2873,33 +2919,39 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error } func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for UrlMap: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeUrlMap %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeUrlMap %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -2942,7 +2994,7 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("default_route_action", flattenComputeUrlMapDefaultRouteAction(res["defaultRouteAction"], d, config)); err != nil { return fmt.Errorf("Error reading UrlMap: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { return fmt.Errorf("Error reading UrlMap: %s", err) } @@ -2950,15 +3002,15 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { } func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for UrlMap: %s", err) } @@ -2968,65 +3020,65 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error defaultServiceProp, err := expandComputeUrlMapDefaultService(d.Get("default_service"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_service"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultServiceProp)) { + } else if v, ok := d.GetOkExists("default_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultServiceProp)) { obj["defaultService"] = defaultServiceProp } descriptionProp, err := expandComputeUrlMapDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } fingerprintProp, err := expandComputeUrlMapFingerprint(d.Get("fingerprint"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { obj["fingerprint"] = fingerprintProp } headerActionProp, err := expandComputeUrlMapHeaderAction(d.Get("header_action"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("header_action"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, headerActionProp)) { + } else if v, ok := d.GetOkExists("header_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, headerActionProp)) { obj["headerAction"] = headerActionProp } hostRulesProp, err := expandComputeUrlMapHostRule(d.Get("host_rule"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("host_rule"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostRulesProp)) { + } else if v, ok := d.GetOkExists("host_rule"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hostRulesProp)) { obj["hostRules"] = hostRulesProp } nameProp, err := expandComputeUrlMapName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } pathMatchersProp, err := expandComputeUrlMapPathMatcher(d.Get("path_matcher"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("path_matcher"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pathMatchersProp)) { + } else if v, ok := d.GetOkExists("path_matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pathMatchersProp)) { obj["pathMatchers"] = pathMatchersProp } testsProp, err := expandComputeUrlMapTest(d.Get("test"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("test"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, testsProp)) { + } else if v, ok := d.GetOkExists("test"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, testsProp)) { obj["tests"] = testsProp } defaultUrlRedirectProp, err := expandComputeUrlMapDefaultUrlRedirect(d.Get("default_url_redirect"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_url_redirect"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultUrlRedirectProp)) { + } else if v, ok := d.GetOkExists("default_url_redirect"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultUrlRedirectProp)) { obj["defaultUrlRedirect"] = defaultUrlRedirectProp } defaultRouteActionProp, err := expandComputeUrlMapDefaultRouteAction(d.Get("default_route_action"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("default_route_action"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultRouteActionProp)) { + } else if v, ok := d.GetOkExists("default_route_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultRouteActionProp)) { obj["defaultRouteAction"] = defaultRouteActionProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") if err != nil { return err } @@ -3034,11 +3086,19 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Updating UrlMap %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating UrlMap %q: %s", d.Id(), err) @@ -3058,21 +3118,21 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error } func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for UrlMap: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") if err != nil { return err } @@ -3081,13 +3141,21 @@ func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Deleting UrlMap %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "UrlMap") + return transport_tpg.HandleNotFoundError(err, d, "UrlMap") } err = ComputeOperationWaitTime( @@ -3103,8 +3171,8 @@ func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error } func resourceComputeUrlMapImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/global/urlMaps/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -3113,7 +3181,7 @@ func resourceComputeUrlMapImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/urlMaps/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/global/urlMaps/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -3122,25 +3190,25 @@ func resourceComputeUrlMapImport(d *schema.ResourceData, meta interface{}) ([]*s return []*schema.ResourceData{d}, nil } -func flattenComputeUrlMapCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapMapId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapMapId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3154,11 +3222,11 @@ func flattenComputeUrlMapMapId(v interface{}, d *schema.ResourceData, config *Co return v // let terraform core handle it otherwise } -func flattenComputeUrlMapFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3177,7 +3245,7 @@ func flattenComputeUrlMapHeaderAction(v interface{}, d *schema.ResourceData, con flattenComputeUrlMapHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3197,23 +3265,23 @@ func flattenComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d *schem } return transformed } -func flattenComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3233,23 +3301,23 @@ func flattenComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d *sche } return transformed } -func flattenComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHostRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHostRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3269,26 +3337,26 @@ func flattenComputeUrlMapHostRule(v interface{}, d *schema.ResourceData, config } return transformed } -func flattenComputeUrlMapHostRuleDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHostRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapHostRuleHosts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHostRuleHosts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeUrlMapHostRulePathMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapHostRulePathMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3313,18 +3381,18 @@ func flattenComputeUrlMapPathMatcher(v interface{}, d *schema.ResourceData, conf } return transformed } -func flattenComputeUrlMapPathMatcherDefaultService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3343,7 +3411,7 @@ func flattenComputeUrlMapPathMatcherHeaderAction(v interface{}, d *schema.Resour flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3363,23 +3431,23 @@ func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{ } return transformed } -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3399,27 +3467,27 @@ func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface } return transformed } -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3440,21 +3508,21 @@ func flattenComputeUrlMapPathMatcherPathRule(v interface{}, d *schema.ResourceDa } return transformed } -func flattenComputeUrlMapPathMatcherPathRuleService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherPathRulePaths(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRulePaths(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenComputeUrlMapPathMatcherPathRuleRouteAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3479,7 +3547,7 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteAction(v interface{}, d *schema flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3506,38 +3574,38 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3551,7 +3619,7 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interf return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3566,7 +3634,7 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v in flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3581,10 +3649,10 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3598,11 +3666,11 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3617,7 +3685,7 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3632,10 +3700,10 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3649,15 +3717,15 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3670,14 +3738,14 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v int flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3694,10 +3762,10 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{} flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3711,7 +3779,7 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v i return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3726,10 +3794,10 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout( flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3743,15 +3811,15 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutN return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3766,10 +3834,10 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3783,11 +3851,11 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{ return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3802,15 +3870,15 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3830,14 +3898,14 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v } return transformed } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3856,7 +3924,7 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHe flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3876,23 +3944,23 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHe } return transformed } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3912,26 +3980,26 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHe } return transformed } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3945,7 +4013,7 @@ func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWe return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3968,31 +4036,31 @@ func flattenComputeUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d *schema flattenComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4015,10 +4083,10 @@ func flattenComputeUrlMapPathMatcherRouteRules(v interface{}, d *schema.Resource } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesPriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesPriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4032,14 +4100,14 @@ func flattenComputeUrlMapPathMatcherRouteRulesPriority(v interface{}, d *schema. return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4058,7 +4126,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d *sch flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4078,23 +4146,23 @@ func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4114,23 +4182,23 @@ func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4150,15 +4218,16 @@ func flattenComputeUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d *schem "prefix_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(original["prefixMatch"], d, config), "query_parameter_matches": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(original["queryParameterMatches"], d, config), "regex_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(original["regexMatch"], d, config), + "path_template_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesPathTemplateMatch(original["pathTemplateMatch"], d, config), }) } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4183,27 +4252,27 @@ func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interfac } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4218,10 +4287,10 @@ func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch( flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(original["rangeStart"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4235,10 +4304,10 @@ func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchR return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4252,19 +4321,19 @@ func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchR return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4283,7 +4352,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interf } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4302,23 +4371,23 @@ func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLab } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4339,27 +4408,31 @@ func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesPathTemplateMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4384,7 +4457,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d *sche flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4411,38 +4484,38 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{ flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4456,7 +4529,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v inte return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4471,7 +4544,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4486,10 +4559,10 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbo flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4503,11 +4576,11 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbo return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4522,7 +4595,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDel flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4537,10 +4610,10 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDel flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4554,15 +4627,15 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDel return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4575,14 +4648,14 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v i flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4599,10 +4672,10 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4616,7 +4689,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4631,10 +4704,10 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeou flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4648,15 +4721,15 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeou return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4671,10 +4744,10 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(original["seconds"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4688,11 +4761,11 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interfac return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4705,17 +4778,23 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{ flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) transformed["path_prefix_rewrite"] = flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) + transformed["path_template_rewrite"] = + flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathTemplateRewrite(original["pathTemplateRewrite"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathTemplateRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4735,14 +4814,14 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4761,7 +4840,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4781,23 +4860,23 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4817,26 +4896,26 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices } return transformed } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -4850,7 +4929,7 @@ func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4873,31 +4952,31 @@ func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d *sche flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4920,31 +4999,31 @@ func flattenComputeUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d *schema. flattenComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -4969,7 +5048,7 @@ func flattenComputeUrlMapPathMatcherDefaultRouteAction(v interface{}, d *schema. flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -4989,17 +5068,17 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v } return transformed } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5013,7 +5092,7 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWei return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5032,11 +5111,11 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHea flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -5056,23 +5135,23 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHea } return transformed } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -5092,19 +5171,19 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHea } return transformed } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5119,15 +5198,15 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(v interface{}, flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5142,14 +5221,14 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeout(v interface{}, d * flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5163,7 +5242,7 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(v interface{} return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5180,14 +5259,14 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(v interface{}, flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5201,7 +5280,7 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(v in return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5216,14 +5295,14 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(v flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5237,7 +5316,7 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNa return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5250,14 +5329,14 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(v inte flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5284,30 +5363,30 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(v interface{}, flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5321,15 +5400,15 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(v interfa return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5344,7 +5423,7 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(v int flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5359,7 +5438,7 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay( flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5374,14 +5453,14 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayF flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5395,11 +5474,11 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayF return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5414,10 +5493,10 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort( flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5431,11 +5510,11 @@ func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortH return v // let terraform core handle it otherwise } -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapTest(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapTest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -5456,26 +5535,26 @@ func flattenComputeUrlMapTest(v interface{}, d *schema.ResourceData, config *Con } return transformed } -func flattenComputeUrlMapTestDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapTestDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapTestHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapTestHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapTestPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapTestPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapTestService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapTestService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapDefaultUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5498,31 +5577,31 @@ func flattenComputeUrlMapDefaultUrlRedirect(v interface{}, d *schema.ResourceDat flattenComputeUrlMapDefaultUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5547,7 +5626,7 @@ func flattenComputeUrlMapDefaultRouteAction(v interface{}, d *schema.ResourceDat flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -5567,17 +5646,17 @@ func flattenComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{} } return transformed } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5591,7 +5670,7 @@ func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(v inter return v // let terraform core handle it otherwise } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5610,11 +5689,11 @@ func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -5634,23 +5713,23 @@ func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRe } return transformed } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -5670,19 +5749,19 @@ func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRe } return transformed } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5697,15 +5776,15 @@ func flattenComputeUrlMapDefaultRouteActionUrlRewrite(v interface{}, d *schema.R flattenComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5720,14 +5799,14 @@ func flattenComputeUrlMapDefaultRouteActionTimeout(v interface{}, d *schema.Reso flattenComputeUrlMapDefaultRouteActionTimeoutNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5741,7 +5820,7 @@ func flattenComputeUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d *schema return v // let terraform core handle it otherwise } -func flattenComputeUrlMapDefaultRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5758,14 +5837,14 @@ func flattenComputeUrlMapDefaultRouteActionRetryPolicy(v interface{}, d *schema. flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5779,7 +5858,7 @@ func flattenComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, return v // let terraform core handle it otherwise } -func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5794,14 +5873,14 @@ func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{ flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5815,7 +5894,7 @@ func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v inter return v // let terraform core handle it otherwise } -func flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5828,14 +5907,14 @@ func flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenComputeUrlMapDefaultRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5862,30 +5941,30 @@ func flattenComputeUrlMapDefaultRouteActionCorsPolicy(v interface{}, d *schema.R flattenComputeUrlMapDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5899,15 +5978,15 @@ func flattenComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *sc return v // let terraform core handle it otherwise } -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5922,7 +6001,7 @@ func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5937,7 +6016,7 @@ func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5952,14 +6031,14 @@ func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -5973,11 +6052,11 @@ func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNa return v // let terraform core handle it otherwise } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -5992,10 +6071,10 @@ func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) return []interface{}{transformed} } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -6009,11 +6088,11 @@ func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v return v // let terraform core handle it otherwise } -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandComputeUrlMapDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -6023,21 +6102,21 @@ func expandComputeUrlMapDefaultService(v interface{}, d TerraformResourceData, c return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -6045,15 +6124,15 @@ func expandComputeUrlMapDefaultService(v interface{}, d TerraformResourceData, c return f.RelativeLink(), nil } -func expandComputeUrlMapDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6065,35 +6144,35 @@ func expandComputeUrlMapHeaderAction(v interface{}, d TerraformResourceData, con transformedRequestHeadersToAdd, err := expandComputeUrlMapHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedRequestHeadersToRemove, err := expandComputeUrlMapHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeUrlMapHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeUrlMapHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } return transformed, nil } -func expandComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6106,21 +6185,21 @@ func expandComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d Terrafo transformedHeaderName, err := expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -6129,23 +6208,23 @@ func expandComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d Terrafo return req, nil } -func expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6158,21 +6237,21 @@ func expandComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d Terraf transformedHeaderName, err := expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -6181,23 +6260,23 @@ func expandComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d Terraf return req, nil } -func expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHostRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHostRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -6211,21 +6290,21 @@ func expandComputeUrlMapHostRule(v interface{}, d TerraformResourceData, config transformedDescription, err := expandComputeUrlMapHostRuleDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedHosts, err := expandComputeUrlMapHostRuleHosts(original["hosts"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHosts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHosts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hosts"] = transformedHosts } transformedPathMatcher, err := expandComputeUrlMapHostRulePathMatcher(original["path_matcher"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathMatcher); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathMatcher); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathMatcher"] = transformedPathMatcher } @@ -6234,24 +6313,24 @@ func expandComputeUrlMapHostRule(v interface{}, d TerraformResourceData, config return req, nil } -func expandComputeUrlMapHostRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHostRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapHostRuleHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHostRuleHosts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeUrlMapHostRulePathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapHostRulePathMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6264,56 +6343,56 @@ func expandComputeUrlMapPathMatcher(v interface{}, d TerraformResourceData, conf transformedDefaultService, err := expandComputeUrlMapPathMatcherDefaultService(original["default_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultService"] = transformedDefaultService } transformedDescription, err := expandComputeUrlMapPathMatcherDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedHeaderAction, err := expandComputeUrlMapPathMatcherHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } transformedName, err := expandComputeUrlMapPathMatcherName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedPathRule, err := expandComputeUrlMapPathMatcherPathRule(original["path_rule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRules"] = transformedPathRule } transformedRouteRules, err := expandComputeUrlMapPathMatcherRouteRules(original["route_rules"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRouteRules); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRouteRules); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["routeRules"] = transformedRouteRules } transformedDefaultUrlRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirect(original["default_url_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultUrlRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultUrlRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultUrlRedirect"] = transformedDefaultUrlRedirect } transformedDefaultRouteAction, err := expandComputeUrlMapPathMatcherDefaultRouteAction(original["default_route_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultRouteAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultRouteAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultRouteAction"] = transformedDefaultRouteAction } @@ -6322,7 +6401,7 @@ func expandComputeUrlMapPathMatcher(v interface{}, d TerraformResourceData, conf return req, nil } -func expandComputeUrlMapPathMatcherDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -6332,21 +6411,21 @@ func expandComputeUrlMapPathMatcherDefaultService(v interface{}, d TerraformReso return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -6354,11 +6433,11 @@ func expandComputeUrlMapPathMatcherDefaultService(v interface{}, d TerraformReso return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6370,35 +6449,35 @@ func expandComputeUrlMapPathMatcherHeaderAction(v interface{}, d TerraformResour transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } return transformed, nil } -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6411,21 +6490,21 @@ func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{} transformedHeaderName, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -6434,23 +6513,23 @@ func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{} return req, nil } -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6463,21 +6542,21 @@ func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface{ transformedHeaderName, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -6486,27 +6565,27 @@ func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface{ return req, nil } -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -6519,28 +6598,28 @@ func expandComputeUrlMapPathMatcherPathRule(v interface{}, d TerraformResourceDa transformedService, err := expandComputeUrlMapPathMatcherPathRuleService(original["service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["service"] = transformedService } transformedPaths, err := expandComputeUrlMapPathMatcherPathRulePaths(original["paths"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPaths); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPaths); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["paths"] = transformedPaths } transformedRouteAction, err := expandComputeUrlMapPathMatcherPathRuleRouteAction(original["route_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["routeAction"] = transformedRouteAction } transformedUrlRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirect(original["url_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRedirect"] = transformedUrlRedirect } @@ -6549,7 +6628,7 @@ func expandComputeUrlMapPathMatcherPathRule(v interface{}, d TerraformResourceDa return req, nil } -func expandComputeUrlMapPathMatcherPathRuleService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -6559,21 +6638,21 @@ func expandComputeUrlMapPathMatcherPathRuleService(v interface{}, d TerraformRes return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -6581,12 +6660,12 @@ func expandComputeUrlMapPathMatcherPathRuleService(v interface{}, d TerraformRes return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherPathRulePaths(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRulePaths(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6598,56 +6677,56 @@ func expandComputeUrlMapPathMatcherPathRuleRouteAction(v interface{}, d Terrafor transformedCorsPolicy, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(original["cors_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["corsPolicy"] = transformedCorsPolicy } transformedFaultInjectionPolicy, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy } transformedRequestMirrorPolicy, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy } transformedRetryPolicy, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(original["retry_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryPolicy"] = transformedRetryPolicy } transformedTimeout, err := expandComputeUrlMapPathMatcherPathRuleRouteActionTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedUrlRewrite, err := expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedWeightedBackendServices, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weightedBackendServices"] = transformedWeightedBackendServices } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6659,95 +6738,95 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, transformedAllowCredentials, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCredentials"] = transformedAllowCredentials } transformedAllowHeaders, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowHeaders"] = transformedAllowHeaders } transformedAllowMethods, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowMethods"] = transformedAllowMethods } transformedAllowOriginRegexes, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOriginRegexes"] = transformedAllowOriginRegexes } transformedAllowOrigins, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOrigins"] = transformedAllowOrigins } transformedDisabled, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } transformedExposeHeaders, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exposeHeaders"] = transformedExposeHeaders } transformedMaxAge, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(original["max_age"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAge"] = transformedMaxAge } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6759,21 +6838,21 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v int transformedAbort, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["abort"] = transformedAbort } transformedDelay, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["delay"] = transformedDelay } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6785,29 +6864,29 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort( transformedHttpStatus, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpStatus"] = transformedHttpStatus } transformedPercentage, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6819,21 +6898,21 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay( transformedFixedDelay, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixedDelay"] = transformedFixedDelay } transformedPercentage, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6845,33 +6924,33 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayF transformedNanos, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6883,14 +6962,14 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v inte transformedBackendService, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -6900,21 +6979,21 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackend return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -6922,7 +7001,7 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackend return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6934,32 +7013,32 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, transformedNumRetries, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["numRetries"] = transformedNumRetries } transformedPerTryTimeout, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["perTryTimeout"] = transformedPerTryTimeout } transformedRetryConditions, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryConditions"] = transformedRetryConditions } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -6971,33 +7050,33 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v transformedNanos, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7009,29 +7088,29 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d T transformedNanos, err := expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7043,29 +7122,29 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, transformedHostRewrite, err := expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } transformedPathPrefixRewrite, err := expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7078,21 +7157,21 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v transformedBackendService, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } transformedHeaderAction, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } transformedWeight, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } @@ -7101,7 +7180,7 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v return req, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -7111,21 +7190,21 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBac return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -7133,7 +7212,7 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBac return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7145,35 +7224,35 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHea transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7186,21 +7265,21 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHea transformedHeaderName, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -7209,23 +7288,23 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHea return req, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7238,21 +7317,21 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHea transformedHeaderName, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -7261,27 +7340,27 @@ func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHea return req, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7293,73 +7372,73 @@ func expandComputeUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d Terrafor transformedHostRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedHttpsRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedPathRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedStripQuery, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7372,42 +7451,42 @@ func expandComputeUrlMapPathMatcherRouteRules(v interface{}, d TerraformResource transformedPriority, err := expandComputeUrlMapPathMatcherRouteRulesPriority(original["priority"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["priority"] = transformedPriority } transformedService, err := expandComputeUrlMapPathMatcherRouteRulesService(original["service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["service"] = transformedService } transformedHeaderAction, err := expandComputeUrlMapPathMatcherRouteRulesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } transformedMatchRules, err := expandComputeUrlMapPathMatcherRouteRulesMatchRules(original["match_rules"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMatchRules); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMatchRules); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["matchRules"] = transformedMatchRules } transformedRouteAction, err := expandComputeUrlMapPathMatcherRouteRulesRouteAction(original["route_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["routeAction"] = transformedRouteAction } transformedUrlRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirect(original["url_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRedirect"] = transformedUrlRedirect } @@ -7416,11 +7495,11 @@ func expandComputeUrlMapPathMatcherRouteRules(v interface{}, d TerraformResource return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesPriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -7430,21 +7509,21 @@ func expandComputeUrlMapPathMatcherRouteRulesService(v interface{}, d TerraformR return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -7452,7 +7531,7 @@ func expandComputeUrlMapPathMatcherRouteRulesService(v interface{}, d TerraformR return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7464,35 +7543,35 @@ func expandComputeUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d Terra transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7505,21 +7584,21 @@ func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v i transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -7528,23 +7607,23 @@ func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v i return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7557,21 +7636,21 @@ func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -7580,23 +7659,23 @@ func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7609,62 +7688,69 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d Terrafo transformedFullPathMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(original["full_path_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fullPathMatch"] = transformedFullPathMatch } transformedHeaderMatches, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(original["header_matches"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderMatches); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderMatches); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerMatches"] = transformedHeaderMatches } transformedIgnoreCase, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(original["ignore_case"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ignoreCase"] = transformedIgnoreCase } transformedMetadataFilters, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(original["metadata_filters"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMetadataFilters); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMetadataFilters); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["metadataFilters"] = transformedMetadataFilters } transformedPrefixMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(original["prefix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixMatch"] = transformedPrefixMatch } transformedQueryParameterMatches, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(original["query_parameter_matches"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedQueryParameterMatches); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedQueryParameterMatches); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["queryParameterMatches"] = transformedQueryParameterMatches } transformedRegexMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(original["regex_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["regexMatch"] = transformedRegexMatch } + transformedPathTemplateMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesPathTemplateMatch(original["path_template_match"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPathTemplateMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pathTemplateMatch"] = transformedPathTemplateMatch + } + req = append(req, transformed) } return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7677,56 +7763,56 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface transformedExactMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(original["exact_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exactMatch"] = transformedExactMatch } transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedInvertMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(original["invert_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInvertMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInvertMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["invertMatch"] = transformedInvertMatch } transformedPrefixMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(original["prefix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixMatch"] = transformedPrefixMatch } transformedPresentMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(original["present_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["presentMatch"] = transformedPresentMatch } transformedRangeMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(original["range_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRangeMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRangeMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rangeMatch"] = transformedRangeMatch } transformedRegexMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(original["regex_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["regexMatch"] = transformedRegexMatch } transformedSuffixMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(original["suffix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["suffixMatch"] = transformedSuffixMatch } @@ -7735,27 +7821,27 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7767,41 +7853,41 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v transformedRangeEnd, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(original["range_end"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRangeEnd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRangeEnd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rangeEnd"] = transformedRangeEnd } transformedRangeStart, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(original["range_start"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRangeStart); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRangeStart); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rangeStart"] = transformedRangeStart } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7814,14 +7900,14 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interfa transformedFilterLabels, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(original["filter_labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilterLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFilterLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["filterLabels"] = transformedFilterLabels } transformedFilterMatchCriteria, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(original["filter_match_criteria"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilterMatchCriteria); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFilterMatchCriteria); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["filterMatchCriteria"] = transformedFilterMatchCriteria } @@ -7830,7 +7916,7 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interfa return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7843,14 +7929,14 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabe transformedName, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedValue, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(original["value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["value"] = transformedValue } @@ -7859,23 +7945,23 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabe return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -7888,28 +7974,28 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v i transformedExactMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(original["exact_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exactMatch"] = transformedExactMatch } transformedName, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedPresentMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(original["present_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["presentMatch"] = transformedPresentMatch } transformedRegexMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(original["regex_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRegexMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["regexMatch"] = transformedRegexMatch } @@ -7918,27 +8004,31 @@ func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v i return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesMatchRulesPathTemplateMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -7950,56 +8040,56 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d Terraf transformedCorsPolicy, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(original["cors_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["corsPolicy"] = transformedCorsPolicy } transformedFaultInjectionPolicy, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy } transformedRequestMirrorPolicy, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy } transformedRetryPolicy, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(original["retry_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryPolicy"] = transformedRetryPolicy } transformedTimeout, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedUrlRewrite, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedWeightedBackendServices, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weightedBackendServices"] = transformedWeightedBackendServices } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8011,95 +8101,95 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{} transformedAllowCredentials, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCredentials"] = transformedAllowCredentials } transformedAllowHeaders, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowHeaders"] = transformedAllowHeaders } transformedAllowMethods, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowMethods"] = transformedAllowMethods } transformedAllowOriginRegexes, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOriginRegexes"] = transformedAllowOriginRegexes } transformedAllowOrigins, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOrigins"] = transformedAllowOrigins } transformedDisabled, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } transformedExposeHeaders, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exposeHeaders"] = transformedExposeHeaders } transformedMaxAge, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(original["max_age"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAge"] = transformedMaxAge } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8111,21 +8201,21 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v i transformedAbort, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["abort"] = transformedAbort } transformedDelay, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["delay"] = transformedDelay } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8137,29 +8227,29 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbor transformedHttpStatus, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpStatus"] = transformedHttpStatus } transformedPercentage, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8171,21 +8261,21 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDela transformedFixedDelay, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixedDelay"] = transformedFixedDelay } transformedPercentage, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8197,33 +8287,33 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDela transformedNanos, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8235,14 +8325,14 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v in transformedBackendService, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -8252,21 +8342,21 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBacke return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -8274,7 +8364,7 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBacke return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8286,32 +8376,32 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{ transformedNumRetries, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["numRetries"] = transformedNumRetries } transformedPerTryTimeout, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["perTryTimeout"] = transformedPerTryTimeout } transformedRetryConditions, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryConditions"] = transformedRetryConditions } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8323,33 +8413,33 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout transformedNanos, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8361,29 +8451,29 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d transformedNanos, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } transformedSeconds, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8395,29 +8485,40 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{} transformedHostRewrite, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } transformedPathPrefixRewrite, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite } + transformedPathTemplateRewrite, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathTemplateRewrite(original["path_template_rewrite"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPathTemplateRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pathTemplateRewrite"] = transformedPathTemplateRewrite + } + return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathTemplateRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -8430,21 +8531,21 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices( transformedBackendService, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } transformedHeaderAction, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } transformedWeight, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } @@ -8453,7 +8554,7 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices( return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -8463,21 +8564,21 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesB return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -8485,7 +8586,7 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesB return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8497,35 +8598,35 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesH transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -8538,21 +8639,21 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesH transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -8561,23 +8662,23 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesH return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -8590,21 +8691,21 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesH transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -8613,27 +8714,27 @@ func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesH return req, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8645,73 +8746,73 @@ func expandComputeUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d Terraf transformedHostRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedHttpsRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedPathRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedStripQuery, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8723,73 +8824,73 @@ func expandComputeUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d Terraform transformedHostRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedHttpsRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedPathRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedStripQuery, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8801,56 +8902,56 @@ func expandComputeUrlMapPathMatcherDefaultRouteAction(v interface{}, d Terraform transformedWeightedBackendServices, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weightedBackendServices"] = transformedWeightedBackendServices } transformedUrlRewrite, err := expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedTimeout, err := expandComputeUrlMapPathMatcherDefaultRouteActionTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedRetryPolicy, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(original["retry_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryPolicy"] = transformedRetryPolicy } transformedRequestMirrorPolicy, err := expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy } transformedCorsPolicy, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(original["cors_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["corsPolicy"] = transformedCorsPolicy } transformedFaultInjectionPolicy, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -8863,21 +8964,21 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v i transformedBackendService, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } transformedWeight, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } transformedHeaderAction, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } @@ -8886,7 +8987,7 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v i return req, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -8896,21 +8997,21 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBack return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -8918,11 +9019,11 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBack return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -8934,39 +9035,39 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHead transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -8979,21 +9080,21 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHead transformedHeaderName, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -9002,23 +9103,23 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHead return req, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -9031,21 +9132,21 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHead transformedHeaderName, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -9054,19 +9155,19 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHead return req, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9078,29 +9179,29 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(v interface{}, d transformedPathPrefixRewrite, err := expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite } transformedHostRewrite, err := expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9112,29 +9213,29 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionTimeout(v interface{}, d Te transformedSeconds, err := expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9146,36 +9247,36 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(v interface{}, transformedRetryConditions, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryConditions"] = transformedRetryConditions } transformedNumRetries, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["numRetries"] = transformedNumRetries } transformedPerTryTimeout, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["perTryTimeout"] = transformedPerTryTimeout } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9187,29 +9288,29 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(v transformedSeconds, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9221,14 +9322,14 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(v inter transformedBackendService, err := expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -9238,21 +9339,21 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendS return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -9260,7 +9361,7 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendS return f.RelativeLink(), nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9272,95 +9373,95 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(v interface{}, d transformedAllowOrigins, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOrigins"] = transformedAllowOrigins } transformedAllowOriginRegexes, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOriginRegexes"] = transformedAllowOriginRegexes } transformedAllowMethods, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowMethods"] = transformedAllowMethods } transformedAllowHeaders, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowHeaders"] = transformedAllowHeaders } transformedExposeHeaders, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exposeHeaders"] = transformedExposeHeaders } transformedMaxAge, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(original["max_age"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAge"] = transformedMaxAge } transformedAllowCredentials, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCredentials"] = transformedAllowCredentials } transformedDisabled, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9372,21 +9473,21 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(v inte transformedDelay, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["delay"] = transformedDelay } transformedAbort, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["abort"] = transformedAbort } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9398,21 +9499,21 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(v transformedFixedDelay, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixedDelay"] = transformedFixedDelay } transformedPercentage, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9424,33 +9525,33 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFi transformedSeconds, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9462,29 +9563,29 @@ func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(v transformedHttpStatus, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpStatus"] = transformedHttpStatus } transformedPercentage, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapTest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapTest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -9497,28 +9598,28 @@ func expandComputeUrlMapTest(v interface{}, d TerraformResourceData, config *Con transformedDescription, err := expandComputeUrlMapTestDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedHost, err := expandComputeUrlMapTestHost(original["host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["host"] = transformedHost } transformedPath, err := expandComputeUrlMapTestPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedService, err := expandComputeUrlMapTestService(original["service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["service"] = transformedService } @@ -9527,19 +9628,19 @@ func expandComputeUrlMapTest(v interface{}, d TerraformResourceData, config *Con return req, nil } -func expandComputeUrlMapTestDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapTestDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapTestHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapTestHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapTestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapTestPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapTestService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapTestService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -9549,21 +9650,21 @@ func expandComputeUrlMapTestService(v interface{}, d TerraformResourceData, conf return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -9571,7 +9672,7 @@ func expandComputeUrlMapTestService(v interface{}, d TerraformResourceData, conf return f.RelativeLink(), nil } -func expandComputeUrlMapDefaultUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9583,73 +9684,73 @@ func expandComputeUrlMapDefaultUrlRedirect(v interface{}, d TerraformResourceDat transformedHostRedirect, err := expandComputeUrlMapDefaultUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedHttpsRedirect, err := expandComputeUrlMapDefaultUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedPathRedirect, err := expandComputeUrlMapDefaultUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandComputeUrlMapDefaultUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandComputeUrlMapDefaultUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedStripQuery, err := expandComputeUrlMapDefaultUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandComputeUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9661,56 +9762,56 @@ func expandComputeUrlMapDefaultRouteAction(v interface{}, d TerraformResourceDat transformedWeightedBackendServices, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weightedBackendServices"] = transformedWeightedBackendServices } transformedUrlRewrite, err := expandComputeUrlMapDefaultRouteActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedTimeout, err := expandComputeUrlMapDefaultRouteActionTimeout(original["timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeout"] = transformedTimeout } transformedRetryPolicy, err := expandComputeUrlMapDefaultRouteActionRetryPolicy(original["retry_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryPolicy"] = transformedRetryPolicy } transformedRequestMirrorPolicy, err := expandComputeUrlMapDefaultRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy } transformedCorsPolicy, err := expandComputeUrlMapDefaultRouteActionCorsPolicy(original["cors_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["corsPolicy"] = transformedCorsPolicy } transformedFaultInjectionPolicy, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -9723,21 +9824,21 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, transformedBackendService, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } transformedWeight, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } transformedHeaderAction, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } @@ -9746,7 +9847,7 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, return req, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -9756,21 +9857,21 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService( return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -9778,11 +9879,11 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService( return f.RelativeLink(), nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9794,39 +9895,39 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v transformedRequestHeadersToRemove, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove } transformedRequestHeadersToAdd, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } transformedResponseHeadersToRemove, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove } transformedResponseHeadersToAdd, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -9839,21 +9940,21 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionReq transformedHeaderName, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -9862,23 +9963,23 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionReq return req, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -9891,21 +9992,21 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRes transformedHeaderName, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -9914,19 +10015,19 @@ func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRes return req, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9938,29 +10039,29 @@ func expandComputeUrlMapDefaultRouteActionUrlRewrite(v interface{}, d TerraformR transformedPathPrefixRewrite, err := expandComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite } transformedHostRewrite, err := expandComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -9972,29 +10073,29 @@ func expandComputeUrlMapDefaultRouteActionTimeout(v interface{}, d TerraformReso transformedSeconds, err := expandComputeUrlMapDefaultRouteActionTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeUrlMapDefaultRouteActionTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -10006,36 +10107,36 @@ func expandComputeUrlMapDefaultRouteActionRetryPolicy(v interface{}, d Terraform transformedRetryConditions, err := expandComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRetryConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["retryConditions"] = transformedRetryConditions } transformedNumRetries, err := expandComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNumRetries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["numRetries"] = transformedNumRetries } transformedPerTryTimeout, err := expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["perTryTimeout"] = transformedPerTryTimeout } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -10047,29 +10148,29 @@ func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{} transformedSeconds, err := expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -10081,14 +10182,14 @@ func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d T transformedBackendService, err := expandComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBackendService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["backendService"] = transformedBackendService } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { // It does not try to construct anything from empty. @@ -10098,21 +10199,21 @@ func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v in return v, nil } else if strings.HasPrefix(v.(string), "projects/") { // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } return url, nil } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") if err != nil { return nil, err } return url + v.(string), nil } // Anything else is assumed to be a reference to a global backend service. - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) + f, err := tpgresource.ParseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) if err != nil { return "", err } @@ -10120,7 +10221,7 @@ func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v in return f.RelativeLink(), nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -10132,95 +10233,95 @@ func expandComputeUrlMapDefaultRouteActionCorsPolicy(v interface{}, d TerraformR transformedAllowOrigins, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOrigins"] = transformedAllowOrigins } transformedAllowOriginRegexes, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOriginRegexes"] = transformedAllowOriginRegexes } transformedAllowMethods, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowMethods"] = transformedAllowMethods } transformedAllowHeaders, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowHeaders"] = transformedAllowHeaders } transformedExposeHeaders, err := expandComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exposeHeaders"] = transformedExposeHeaders } transformedMaxAge, err := expandComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(original["max_age"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAge"] = transformedMaxAge } transformedAllowCredentials, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCredentials"] = transformedAllowCredentials } transformedDisabled, err := expandComputeUrlMapDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -10232,21 +10333,21 @@ func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d transformedDelay, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["delay"] = transformedDelay } transformedAbort, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAbort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["abort"] = transformedAbort } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -10258,21 +10359,21 @@ func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{ transformedFixedDelay, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixedDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixedDelay"] = transformedFixedDelay } transformedPercentage, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -10284,33 +10385,33 @@ func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v transformedSeconds, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -10322,24 +10423,24 @@ func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{ transformedHttpStatus, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpStatus); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpStatus"] = transformedHttpStatus } transformedPercentage, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percentage"] = transformedPercentage } return transformed, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_url_map_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_url_map_sweeper.go new file mode 100644 index 0000000000..f81b71da87 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_url_map_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeUrlMap", testSweepComputeUrlMap) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeUrlMap(region string) error { + resourceName := "ComputeUrlMap" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/urlMaps", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/urlMaps/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_gateway.go new file mode 100644 index 0000000000..2da48a6b84 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_gateway.go @@ -0,0 +1,397 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceComputeVpnGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeVpnGatewayCreate, + Read: resourceComputeVpnGatewayRead, + Delete: resourceComputeVpnGatewayDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeVpnGatewayImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is +created. The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and +match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means +the first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network this VPN gateway is accepting traffic for.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The region this gateway should sit in.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "gateway_id": { + Type: schema.TypeInt, + Computed: true, + Description: `The unique identifier for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeVpnGatewayDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + nameProp, err := expandComputeVpnGatewayName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandComputeVpnGatewayNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + regionProp, err := expandComputeVpnGatewayRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new VpnGateway: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for VpnGateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating VpnGateway: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating VpnGateway", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create VpnGateway: %s", err) + } + + log.Printf("[DEBUG] Finished creating VpnGateway %q: %#v", d.Id(), res) + + return resourceComputeVpnGatewayRead(d, meta) +} + +func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for VpnGateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeVpnGateway %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading VpnGateway: %s", err) + } + + if err := d.Set("creation_timestamp", flattenComputeVpnGatewayCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnGateway: %s", err) + } + if err := d.Set("description", flattenComputeVpnGatewayDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnGateway: %s", err) + } + if err := d.Set("name", flattenComputeVpnGatewayName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnGateway: %s", err) + } + if err := d.Set("gateway_id", flattenComputeVpnGatewayGatewayId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnGateway: %s", err) + } + if err := d.Set("network", flattenComputeVpnGatewayNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnGateway: %s", err) + } + if err := d.Set("region", flattenComputeVpnGatewayRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnGateway: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading VpnGateway: %s", err) + } + + return nil +} + +func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for VpnGateway: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting VpnGateway %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "VpnGateway") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting VpnGateway", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting VpnGateway %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeVpnGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/targetVpnGateways/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeVpnGatewayCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnGatewayDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnGatewayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnGatewayGatewayId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeVpnGatewayNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeVpnGatewayRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeVpnGatewayDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnGatewayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnGatewayNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("networks", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for network: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeVpnGatewayRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_gateway_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_gateway_sweeper.go new file mode 100644 index 0000000000..b4aa40e60f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_gateway_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeVpnGateway", testSweepComputeVpnGateway) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeVpnGateway(region string) error { + resourceName := "ComputeVpnGateway" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetVpnGateways", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_tunnel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_tunnel.go new file mode 100644 index 0000000000..0aa8191fb3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_tunnel.go @@ -0,0 +1,901 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "bytes" + "fmt" + "log" + "net" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// validatePeerAddr returns false if a tunnel's peer_ip property +// is invalid. Currently, only addresses that collide with RFC +// 5735 (https://tools.ietf.org/html/rfc5735) fail validation. +func validatePeerAddr(i interface{}, val string) ([]string, []error) { + ip := net.ParseIP(i.(string)) + if ip == nil { + return nil, []error{fmt.Errorf("could not parse %q to IP address", val)} + } + for _, test := range invalidPeerAddrs { + if bytes.Compare(ip, test.from) >= 0 && bytes.Compare(ip, test.to) <= 0 { + return nil, []error{fmt.Errorf("address is invalid (is between %q and %q, conflicting with RFC5735)", test.from, test.to)} + } + } + return nil, nil +} + +// invalidPeerAddrs is a collection of IP address ranges that represent +// a conflict with RFC 5735 (https://tools.ietf.org/html/rfc5735#page-3). +// CIDR range notations in the RFC were converted to a (from, to) pair +// for easy checking with bytes.Compare. +var invalidPeerAddrs = []struct { + from net.IP + to net.IP +}{ + { + from: net.ParseIP("0.0.0.0"), + to: net.ParseIP("0.255.255.255"), + }, + { + from: net.ParseIP("10.0.0.0"), + to: net.ParseIP("10.255.255.255"), + }, + { + from: net.ParseIP("127.0.0.0"), + to: net.ParseIP("127.255.255.255"), + }, + { + from: net.ParseIP("169.254.0.0"), + to: net.ParseIP("169.254.255.255"), + }, + { + from: net.ParseIP("172.16.0.0"), + to: net.ParseIP("172.31.255.255"), + }, + { + from: net.ParseIP("192.0.0.0"), + to: net.ParseIP("192.0.0.255"), + }, + { + from: net.ParseIP("192.0.2.0"), + to: net.ParseIP("192.0.2.255"), + }, + { + from: net.ParseIP("192.88.99.0"), + to: net.ParseIP("192.88.99.255"), + }, + { + from: net.ParseIP("192.168.0.0"), + to: net.ParseIP("192.168.255.255"), + }, + { + from: net.ParseIP("198.18.0.0"), + to: net.ParseIP("198.19.255.255"), + }, + { + from: net.ParseIP("198.51.100.0"), + to: net.ParseIP("198.51.100.255"), + }, + { + from: net.ParseIP("203.0.113.0"), + to: net.ParseIP("203.0.113.255"), + }, + { + from: net.ParseIP("224.0.0.0"), + to: net.ParseIP("239.255.255.255"), + }, + { + from: net.ParseIP("240.0.0.0"), + to: net.ParseIP("255.255.255.255"), + }, + { + from: net.ParseIP("255.255.255.255"), + to: net.ParseIP("255.255.255.255"), + }, +} + +func getVpnTunnelLink(config *transport_tpg.Config, project, region, tunnel, userAgent string) (string, error) { + if !strings.Contains(tunnel, "/") { + // Tunnel value provided is just the name, lookup the tunnel SelfLink + tunnelData, err := config.NewComputeClient(userAgent).VpnTunnels.Get( + project, region, tunnel).Do() + if err != nil { + return "", fmt.Errorf("Error reading tunnel: %s", err) + } + tunnel = tunnelData.SelfLink + } + + return tunnel, nil + +} + +func ResourceComputeVpnTunnel() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeVpnTunnelCreate, + Read: resourceComputeVpnTunnelRead, + Delete: resourceComputeVpnTunnelDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeVpnTunnelImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. The name must be 1-63 characters long, and +comply with RFC1035. Specifically, the name must be 1-63 +characters long and match the regular expression +'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character +must be a lowercase letter, and all following characters must +be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "shared_secret": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Shared secret used to set the secure session between the Cloud VPN +gateway and the peer VPN gateway.`, + Sensitive: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource.`, + }, + "ike_version": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `IKE protocol version to use when establishing the VPN tunnel with +peer VPN gateway. +Acceptable IKE versions are 1 or 2. Default version is 2.`, + Default: 2, + }, + "local_traffic_selector": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Local traffic selector to use when establishing the VPN tunnel with +peer VPN gateway. The value should be a CIDR formatted string, +for example '192.168.0.0/16'. The ranges should be disjoint. +Only IPv4 is supported.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "peer_external_gateway": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the peer side external VPN gateway to which this VPN tunnel is connected.`, + ConflictsWith: []string{"peer_gcp_gateway"}, + }, + "peer_external_gateway_interface": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The interface ID of the external VPN gateway to which this VPN tunnel is connected.`, + }, + "peer_gcp_gateway": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. +If provided, the VPN tunnel will automatically use the same vpn_gateway_interface +ID in the peer GCP VPN gateway. +This field must reference a 'google_compute_ha_vpn_gateway' resource.`, + ConflictsWith: []string{"peer_external_gateway"}, + }, + "peer_ip": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validatePeerAddr, + Description: `IP address of the peer VPN gateway. Only IPv4 is supported.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The region where the tunnel is located. If unset, is set to the region of 'target_vpn_gateway'.`, + }, + "remote_traffic_selector": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Remote traffic selector to use when establishing the VPN tunnel with +peer VPN gateway. The value should be a CIDR formatted string, +for example '192.168.0.0/16'. The ranges should be disjoint. +Only IPv4 is supported.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "router": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of router resource to be used for dynamic routing.`, + }, + "target_vpn_gateway": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the Target VPN gateway with which this VPN tunnel is +associated.`, + }, + "vpn_gateway": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the VPN gateway with which this VPN tunnel is associated. +This must be used if a High Availability VPN gateway resource is created. +This field must reference a 'google_compute_ha_vpn_gateway' resource.`, + }, + "vpn_gateway_interface": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The interface ID of the VPN gateway with which this VPN tunnel is associated.`, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "detailed_status": { + Type: schema.TypeString, + Computed: true, + Description: `Detailed status message for the VPN tunnel.`, + }, + "shared_secret_hash": { + Type: schema.TypeString, + Computed: true, + Description: `Hash of the shared secret.`, + }, + "tunnel_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier for the resource. This identifier is defined by the server.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandComputeVpnTunnelName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeVpnTunnelDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + targetVpnGatewayProp, err := expandComputeVpnTunnelTargetVpnGateway(d.Get("target_vpn_gateway"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target_vpn_gateway"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetVpnGatewayProp)) && (ok || !reflect.DeepEqual(v, targetVpnGatewayProp)) { + obj["targetVpnGateway"] = targetVpnGatewayProp + } + vpnGatewayProp, err := expandComputeVpnTunnelVpnGateway(d.Get("vpn_gateway"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vpn_gateway"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpnGatewayProp)) && (ok || !reflect.DeepEqual(v, vpnGatewayProp)) { + obj["vpnGateway"] = vpnGatewayProp + } + vpnGatewayInterfaceProp, err := expandComputeVpnTunnelVpnGatewayInterface(d.Get("vpn_gateway_interface"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vpn_gateway_interface"); ok || !reflect.DeepEqual(v, vpnGatewayInterfaceProp) { + obj["vpnGatewayInterface"] = vpnGatewayInterfaceProp + } + peerExternalGatewayProp, err := expandComputeVpnTunnelPeerExternalGateway(d.Get("peer_external_gateway"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_external_gateway"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerExternalGatewayProp)) && (ok || !reflect.DeepEqual(v, peerExternalGatewayProp)) { + obj["peerExternalGateway"] = peerExternalGatewayProp + } + peerExternalGatewayInterfaceProp, err := expandComputeVpnTunnelPeerExternalGatewayInterface(d.Get("peer_external_gateway_interface"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_external_gateway_interface"); ok || !reflect.DeepEqual(v, peerExternalGatewayInterfaceProp) { + obj["peerExternalGatewayInterface"] = peerExternalGatewayInterfaceProp + } + peerGcpGatewayProp, err := expandComputeVpnTunnelPeerGcpGateway(d.Get("peer_gcp_gateway"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_gcp_gateway"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerGcpGatewayProp)) && (ok || !reflect.DeepEqual(v, peerGcpGatewayProp)) { + obj["peerGcpGateway"] = peerGcpGatewayProp + } + routerProp, err := expandComputeVpnTunnelRouter(d.Get("router"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router"); !tpgresource.IsEmptyValue(reflect.ValueOf(routerProp)) && (ok || !reflect.DeepEqual(v, routerProp)) { + obj["router"] = routerProp + } + peerIpProp, err := expandComputeVpnTunnelPeerIp(d.Get("peer_ip"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ip"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerIpProp)) && (ok || !reflect.DeepEqual(v, peerIpProp)) { + obj["peerIp"] = peerIpProp + } + sharedSecretProp, err := expandComputeVpnTunnelSharedSecret(d.Get("shared_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("shared_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(sharedSecretProp)) && (ok || !reflect.DeepEqual(v, sharedSecretProp)) { + obj["sharedSecret"] = sharedSecretProp + } + ikeVersionProp, err := expandComputeVpnTunnelIkeVersion(d.Get("ike_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ike_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(ikeVersionProp)) && (ok || !reflect.DeepEqual(v, ikeVersionProp)) { + obj["ikeVersion"] = ikeVersionProp + } + localTrafficSelectorProp, err := expandComputeVpnTunnelLocalTrafficSelector(d.Get("local_traffic_selector"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("local_traffic_selector"); !tpgresource.IsEmptyValue(reflect.ValueOf(localTrafficSelectorProp)) && (ok || !reflect.DeepEqual(v, localTrafficSelectorProp)) { + obj["localTrafficSelector"] = localTrafficSelectorProp + } + remoteTrafficSelectorProp, err := expandComputeVpnTunnelRemoteTrafficSelector(d.Get("remote_traffic_selector"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("remote_traffic_selector"); !tpgresource.IsEmptyValue(reflect.ValueOf(remoteTrafficSelectorProp)) && (ok || !reflect.DeepEqual(v, remoteTrafficSelectorProp)) { + obj["remoteTrafficSelector"] = remoteTrafficSelectorProp + } + regionProp, err := expandComputeVpnTunnelRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + obj, err = resourceComputeVpnTunnelEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new VpnTunnel: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for VpnTunnel: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating VpnTunnel: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating VpnTunnel", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create VpnTunnel: %s", err) + } + + log.Printf("[DEBUG] Finished creating VpnTunnel %q: %#v", d.Id(), res) + + return resourceComputeVpnTunnelRead(d, meta) +} + +func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for VpnTunnel: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeVpnTunnel %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + + if err := d.Set("tunnel_id", flattenComputeVpnTunnelTunnelId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeVpnTunnelCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("name", flattenComputeVpnTunnelName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("description", flattenComputeVpnTunnelDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("target_vpn_gateway", flattenComputeVpnTunnelTargetVpnGateway(res["targetVpnGateway"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("vpn_gateway", flattenComputeVpnTunnelVpnGateway(res["vpnGateway"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("vpn_gateway_interface", flattenComputeVpnTunnelVpnGatewayInterface(res["vpnGatewayInterface"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("peer_external_gateway", flattenComputeVpnTunnelPeerExternalGateway(res["peerExternalGateway"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("peer_external_gateway_interface", flattenComputeVpnTunnelPeerExternalGatewayInterface(res["peerExternalGatewayInterface"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("peer_gcp_gateway", flattenComputeVpnTunnelPeerGcpGateway(res["peerGcpGateway"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("router", flattenComputeVpnTunnelRouter(res["router"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("peer_ip", flattenComputeVpnTunnelPeerIp(res["peerIp"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("shared_secret_hash", flattenComputeVpnTunnelSharedSecretHash(res["sharedSecretHash"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("ike_version", flattenComputeVpnTunnelIkeVersion(res["ikeVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("local_traffic_selector", flattenComputeVpnTunnelLocalTrafficSelector(res["localTrafficSelector"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("remote_traffic_selector", flattenComputeVpnTunnelRemoteTrafficSelector(res["remoteTrafficSelector"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("detailed_status", flattenComputeVpnTunnelDetailedStatus(res["detailedStatus"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("region", flattenComputeVpnTunnelRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading VpnTunnel: %s", err) + } + + return nil +} + +func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for VpnTunnel: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting VpnTunnel %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "VpnTunnel") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting VpnTunnel", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting VpnTunnel %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeVpnTunnelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/vpnTunnels/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeVpnTunnelTunnelId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnTunnelCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnTunnelName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnTunnelDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnTunnelTargetVpnGateway(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeVpnTunnelVpnGateway(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeVpnTunnelVpnGatewayInterface(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeVpnTunnelPeerExternalGateway(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeVpnTunnelPeerExternalGatewayInterface(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeVpnTunnelPeerGcpGateway(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeVpnTunnelRouter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenComputeVpnTunnelPeerIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnTunnelSharedSecretHash(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnTunnelIkeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenComputeVpnTunnelLocalTrafficSelector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeVpnTunnelRemoteTrafficSelector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenComputeVpnTunnelDetailedStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeVpnTunnelRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandComputeVpnTunnelName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnTunnelDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnTunnelTargetVpnGateway(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("targetVpnGateways", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for target_vpn_gateway: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeVpnTunnelVpnGateway(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("vpnGateways", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for vpn_gateway: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeVpnTunnelVpnGatewayInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnTunnelPeerExternalGateway(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("externalVpnGateways", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for peer_external_gateway: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeVpnTunnelPeerExternalGatewayInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnTunnelPeerGcpGateway(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("vpnGateways", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for peer_gcp_gateway: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeVpnTunnelRouter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil || v.(string) == "" { + return "", nil + } + f, err := tpgresource.ParseRegionalFieldValue("routers", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for router: %s", err) + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+f.RelativeLink()) + if err != nil { + return nil, err + } + + return url, nil +} + +func expandComputeVpnTunnelPeerIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnTunnelSharedSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnTunnelIkeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeVpnTunnelLocalTrafficSelector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeVpnTunnelRemoteTrafficSelector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeVpnTunnelRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} + +func resourceComputeVpnTunnelEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + f, err := tpgresource.ParseRegionalFieldValue("targetVpnGateways", d.Get("target_vpn_gateway").(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, err + } + if _, ok := d.GetOk("project"); !ok { + if err := d.Set("project", f.Project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + if _, ok := d.GetOk("region"); !ok { + if err := d.Set("region", f.Region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_tunnel_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_tunnel_sweeper.go new file mode 100644 index 0000000000..59b40c0187 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_compute_vpn_tunnel_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeVpnTunnel", testSweepComputeVpnTunnel) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeVpnTunnel(region string) error { + resourceName := "ComputeVpnTunnel" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/vpnTunnels", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_usage_export_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_usage_export_bucket.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_usage_export_bucket.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_usage_export_bucket.go index 7dc4a14a01..48436da9fd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_usage_export_bucket.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/resource_usage_export_bucket.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute import ( "fmt" "log" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/compute/v1" @@ -50,20 +55,20 @@ func ResourceProjectUsageBucket() *schema.Resource { } func resourceProjectUsageBucketRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } p, err := config.NewComputeClient(userAgent).Projects.Get(project).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project data for project %s", project)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project data for project %s", project)) } if p.UsageExportLocation == nil { @@ -85,13 +90,13 @@ func resourceProjectUsageBucketRead(d *schema.ResourceData, meta interface{}) er } func resourceProjectUsageBucketCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -118,13 +123,13 @@ func resourceProjectUsageBucketCreate(d *schema.ResourceData, meta interface{}) } func resourceProjectUsageBucketDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/security_policy_association_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/security_policy_association_utils.go new file mode 100644 index 0000000000..8db869f065 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/security_policy_association_utils.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/stateful_mig_polling.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/stateful_mig_polling.go new file mode 100644 index 0000000000..45c4b442cf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/compute/stateful_mig_polling.go @@ -0,0 +1,171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package compute + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// PerInstanceConfig needs both regular operation polling AND custom polling for deletion which is why this is not generated +func resourceComputePerInstanceConfigPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/listPerInstanceConfigs") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + res, err = flattenNestedComputePerInstanceConfig(d, meta, res) + if err != nil { + return nil, err + } + + // Returns nil res if nested object is not found + return res, nil + } +} + +// RegionPerInstanceConfig needs both regular operation polling AND custom polling for deletion which is why this is not generated +func resourceComputeRegionPerInstanceConfigPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listPerInstanceConfigs") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + res, err = flattenNestedComputeRegionPerInstanceConfig(d, meta, res) + if err != nil { + return nil, err + } + + // Returns nil res if nested object is not found + return res, nil + } +} + +// Returns an instance name in the form zones/{zone}/instances/{instance} for the managed +// instance matching the name of a PerInstanceConfig +func findInstanceName(d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listManagedInstances") + if err != nil { + return "", err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return "", err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return "", err + } + instanceNameToFind := fmt.Sprintf("/%s", d.Get("name").(string)) + + token := "" + for paginate := true; paginate; { + urlWithToken := "" + if token != "" { + urlWithToken = fmt.Sprintf("%s?maxResults=1&pageToken=%s", url, token) + } else { + urlWithToken = fmt.Sprintf("%s?maxResults=1", url) + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: urlWithToken, + UserAgent: userAgent, + }) + if err != nil { + return "", err + } + + managedInstances, ok := res["managedInstances"] + if !ok { + return "", fmt.Errorf("Failed to parse response for listManagedInstances for %s", d.Id()) + } + + managedInstancesArr := managedInstances.([]interface{}) + for _, managedInstanceRaw := range managedInstancesArr { + instance := managedInstanceRaw.(map[string]interface{}) + name, ok := instance["instance"] + if !ok { + return "", fmt.Errorf("Failed to read instance name for managed instance: %#v", instance) + } + if strings.HasSuffix(name.(string), instanceNameToFind) { + return name.(string), nil + } + } + + tokenRaw, paginate := res["nextPageToken"] + if paginate { + token = tokenRaw.(string) + } + } + + return "", fmt.Errorf("Failed to find managed instance with name: %s", instanceNameToFind) +} + +func PollCheckInstanceConfigDeleted(resp map[string]interface{}, respErr error) transport_tpg.PollResult { + if respErr != nil { + return transport_tpg.ErrorPollResult(respErr) + } + + // Nested object 404 appears as nil response + if resp == nil { + // Config no longer exists + return transport_tpg.SuccessPollResult() + } + + // Read status + status := resp["status"].(string) + if status == "DELETING" { + return transport_tpg.PendingStatusPollResult("Still deleting") + } + return transport_tpg.ErrorPollResult(fmt.Errorf("Expected PerInstanceConfig to be deleting but status is: %s", status)) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/container_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/container_operation.go new file mode 100644 index 0000000000..e3827f5fdd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/container_operation.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container + +import ( + "context" + "errors" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/container/v1" +) + +type ContainerOperationWaiter struct { + Service *container.Service + Context context.Context + Op *container.Operation + Project string + Location string + UserProjectOverride bool +} + +func (w *ContainerOperationWaiter) State() string { + if w == nil || w.Op == nil { + return "" + } + return w.Op.Status +} + +func (w *ContainerOperationWaiter) Error() error { + if w == nil || w.Op == nil { + return nil + } + + // Error gets called during operation polling to see if there is an error. + // Since container's operation doesn't have an "error" field, we must wait + // until it's done and check the status message + for _, pending := range w.PendingStates() { + if w.Op.Status == pending { + return nil + } + } + + if w.Op.StatusMessage != "" { + return fmt.Errorf(w.Op.StatusMessage) + } + + return nil +} + +func (w *ContainerOperationWaiter) IsRetryable(error) bool { + return false +} + +func (w *ContainerOperationWaiter) SetOp(op interface{}) error { + var ok bool + w.Op, ok = op.(*container.Operation) + if !ok { + return fmt.Errorf("Unable to set operation. Bad type!") + } + return nil +} + +func (w *ContainerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil || w.Op == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + name := fmt.Sprintf("projects/%s/locations/%s/operations/%s", + w.Project, w.Location, w.Op.Name) + + var op *container.Operation + select { + case <-w.Context.Done(): + log.Println("[WARN] request has been cancelled early") + return op, errors.New("unable to finish polling, context has been cancelled") + default: + // default must be here to keep the previous case from blocking + } + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (opErr error) { + opGetCall := w.Service.Projects.Locations.Operations.Get(name) + if w.UserProjectOverride { + opGetCall.Header().Add("X-Goog-User-Project", w.Project) + } + op, opErr = opGetCall.Do() + return opErr + }, + Timeout: transport_tpg.DefaultRequestTimeout, + }) + + return op, err +} + +func (w *ContainerOperationWaiter) OpName() string { + if w == nil || w.Op == nil { + return "" + } + return w.Op.Name +} + +func (w *ContainerOperationWaiter) PendingStates() []string { + return []string{"PENDING", "RUNNING"} +} + +func (w *ContainerOperationWaiter) TargetStates() []string { + return []string{"DONE"} +} + +func ContainerOperationWait(config *transport_tpg.Config, op *container.Operation, project, location, activity, userAgent string, timeout time.Duration) error { + w := &ContainerOperationWaiter{ + Service: config.NewContainerClient(userAgent), + Context: config.Context, + Op: op, + Project: project, + Location: location, + UserProjectOverride: config.UserProjectOverride, + } + + if err := w.SetOp(op); err != nil { + return err + } + + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_cluster.go new file mode 100644 index 0000000000..93f9758e99 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_cluster.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleContainerCluster() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceContainerCluster().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "location") + + return &schema.Resource{ + Read: datasourceContainerClusterRead, + Schema: dsSchema, + } +} + +func datasourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + clusterName := d.Get("name").(string) + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + id := containerClusterFullName(project, location, clusterName) + + d.SetId(id) + + if err := resourceContainerClusterRead(d, meta); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_engine_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_engine_versions.go similarity index 89% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_engine_versions.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_engine_versions.go index 14d6f12382..bd98b528a0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_container_engine_versions.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/data_source_google_container_engine_versions.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container import ( "fmt" @@ -6,6 +8,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleContainerEngineVersions() *schema.Resource { @@ -61,18 +65,18 @@ func DataSourceGoogleContainerEngineVersions() *schema.Resource { } func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/node_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/node_config.go new file mode 100644 index 0000000000..8be072efa2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/node_config.go @@ -0,0 +1,1137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "google.golang.org/api/container/v1" +) + +// Matches gke-default scope from https://cloud.google.com/sdk/gcloud/reference/container/clusters/create +var defaultOauthScopes = []string{ + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append", +} + +func schemaLoggingVariant() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: `Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.`, + Default: "DEFAULT", + ValidateFunc: validation.StringInSlice([]string{"DEFAULT", "MAX_THROUGHPUT"}, false), + } +} + +func schemaGcfsConfig(forceNew bool) *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `GCFS configuration for this node.`, + ForceNew: forceNew, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: forceNew, + Description: `Whether or not GCFS is enabled`, + }, + }, + }, + } +} + +func schemaNodeConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The configuration of the nodepool`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(10), + Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`, + }, + + "disk_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd`, + }, + + "guest_accelerator": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + // Legacy config mode allows removing GPU's from an existing resource + // See https://www.terraform.io/docs/configuration/attr-as-blocks.html + ConfigMode: schema.SchemaConfigModeAttr, + Description: `List of the type and count of accelerator cards attached to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The number of the accelerator cards exposed to an instance.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The accelerator type resource name.`, + }, + "gpu_driver_installation_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + ConfigMode: schema.SchemaConfigModeAttr, + Description: `Configuration for auto installation of GPU driver.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gpu_driver_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Mode for how the GPU driver is installed.`, + ValidateFunc: validation.StringInSlice([]string{"GPU_DRIVER_VERSION_UNSPECIFIED", "INSTALLATION_DISABLED", "DEFAULT", "LATEST"}, false), + }, + }, + }, + }, + "gpu_partition_size": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)`, + }, + "gpu_sharing_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + ConfigMode: schema.SchemaConfigModeAttr, + Description: `Configuration for GPU sharing.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gpu_sharing_strategy": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)`, + }, + "max_shared_clients_per_gpu": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The maximum number of containers that can share a GPU.`, + }, + }, + }, + }, + }, + }, + }, + + "image_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: `The image type to use for this node. Note that for a given image type, the latest version of it will be used.`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + // Computed=true because GKE Sandbox will automatically add labels to nodes that can/cannot run sandboxed pods. + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.`, + }, + + "resource_labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The GCE resource labels (a map of key/value pairs) to be applied to the node pool.`, + }, + + "local_ssd_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The number of local SSD disks to be attached to the node.`, + }, + + "logging_variant": schemaLoggingVariant(), + + "ephemeral_storage_local_ssd_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_ssd_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.`, + }, + }, + }, + }, + + "local_nvme_ssd_block_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Parameters for raw-block local NVMe SSDs.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_ssd_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.`, + }, + }, + }, + }, + + "gcfs_config": schemaGcfsConfig(true), + + "gvnic": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Enable or disable gvnic in the node pool.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether or not gvnic is enabled`, + }, + }, + }, + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The name of a Google Compute Engine machine type.`, + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The metadata key/value pairs assigned to instances in the cluster.`, + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.`, + }, + + "oauth_scopes": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The set of Google API scopes to be made available on all of the node VMs.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return tpgresource.CanonicalizeServiceScope(v.(string)) + }, + }, + DiffSuppressFunc: containerClusterAddedScopesSuppress, + Set: tpgresource.StringScopeHashcode, + }, + + "preemptible": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `Whether the nodes are created as preemptible VM instances.`, + }, + "reservation_affinity": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The reservation affinity configuration for the node pool.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consume_reservation_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Corresponds to the type of reservation consumption.`, + ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"}, false), + }, + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The label key of a reservation resource.`, + }, + "values": { + Type: schema.TypeSet, + Description: "The label values of the reservation resource.", + ForceNew: true, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "spot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `Whether the nodes are created as spot VM instances.`, + }, + + "service_account": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The Google Cloud Platform Service Account to be used by the node VMs.`, + }, + + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of instance tags applied to all nodes.`, + }, + + "shielded_instance_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Shielded Instance options.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `Defines whether the instance has Secure Boot enabled.`, + }, + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: `Defines whether the instance has integrity monitoring enabled.`, + }, + }, + }, + }, + + "taint": { + Type: schema.TypeList, + Optional: true, + // Computed=true because GKE Sandbox will automatically add taints to nodes that can/cannot run sandboxed pods. + Computed: true, + ForceNew: true, + // Legacy config mode allows explicitly defining an empty taint. + // See https://www.terraform.io/docs/configuration/attr-as-blocks.html + ConfigMode: schema.SchemaConfigModeAttr, + Description: `List of Kubernetes taints to be applied to each node.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Key for taint.`, + }, + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Value for taint.`, + }, + "effect": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"NO_SCHEDULE", "PREFER_NO_SCHEDULE", "NO_EXECUTE"}, false), + Description: `Effect for taint.`, + }, + }, + }, + }, + + "workload_metadata_config": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The workload metadata configuration for this node.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"MODE_UNSPECIFIED", "GCE_METADATA", "GKE_METADATA"}, false), + Description: `Mode is the configuration for how to expose metadata to workloads running on the node.`, + }, + }, + }, + }, + + "boot_disk_kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, + }, + // Note that AtLeastOneOf can't be set because this schema is reused by + // two different resources. + "kubelet_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Node kubelet configs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_manager_policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"static", "none", ""}, false), + Description: `Control the CPU management policy on the node.`, + }, + "cpu_cfs_quota": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable CPU CFS quota enforcement for containers that specify CPU limits.`, + }, + "cpu_cfs_quota_period": { + Type: schema.TypeString, + Optional: true, + Description: `Set the CPU CFS quota period value 'cpu.cfs_period_us'.`, + }, + "pod_pids_limit": { + Type: schema.TypeInt, + Optional: true, + Description: `Controls the maximum number of processes allowed to run in a pod.`, + }, + }, + }, + }, + + "linux_node_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Parameters that can be configured on Linux nodes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sysctls": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.`, + }, + }, + }, + }, + "node_group": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.`, + }, + + "advanced_machine_features": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies options for controlling advanced machine features.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threads_per_core": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, + }, + }, + }, + }, + "sole_tenant_config": { + Type: schema.TypeList, + Optional: true, + Description: `Node affinity options for sole tenant node pools.`, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_affinity": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Description: `.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `.`, + }, + "operator": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `.`, + ValidateFunc: validation.StringInSlice([]string{"IN", "NOT_IN"}, false), + }, + "values": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults { + configs := configured.([]interface{}) + if len(configs) == 0 || configs[0] == nil { + return nil + } + config := configs[0].(map[string]interface{}) + + nodeConfigDefaults := &container.NodeConfigDefaults{} + if variant, ok := config["logging_variant"]; ok { + nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{ + VariantConfig: &container.LoggingVariantConfig{ + Variant: variant.(string), + }, + } + } + return nodeConfigDefaults +} + +func expandNodeConfig(v interface{}) *container.NodeConfig { + nodeConfigs := v.([]interface{}) + nc := &container.NodeConfig{ + // Defaults can't be set on a list/set in the schema, so set the default on create here. + OauthScopes: defaultOauthScopes, + } + if len(nodeConfigs) == 0 { + return nc + } + + nodeConfig := nodeConfigs[0].(map[string]interface{}) + + if v, ok := nodeConfig["machine_type"]; ok { + nc.MachineType = v.(string) + } + + if v, ok := nodeConfig["guest_accelerator"]; ok { + accels := v.([]interface{}) + guestAccelerators := make([]*container.AcceleratorConfig, 0, len(accels)) + for _, raw := range accels { + data := raw.(map[string]interface{}) + if data["count"].(int) == 0 { + continue + } + guestAcceleratorConfig := &container.AcceleratorConfig{ + AcceleratorCount: int64(data["count"].(int)), + AcceleratorType: data["type"].(string), + GpuPartitionSize: data["gpu_partition_size"].(string), + } + + if v, ok := data["gpu_driver_installation_config"]; ok && len(v.([]interface{})) > 0 { + gpuDriverInstallationConfig := data["gpu_driver_installation_config"].([]interface{})[0].(map[string]interface{}) + guestAcceleratorConfig.GpuDriverInstallationConfig = &container.GPUDriverInstallationConfig{ + GpuDriverVersion: gpuDriverInstallationConfig["gpu_driver_version"].(string), + } + } + + if v, ok := data["gpu_sharing_config"]; ok && len(v.([]interface{})) > 0 { + gpuSharingConfig := data["gpu_sharing_config"].([]interface{})[0].(map[string]interface{}) + guestAcceleratorConfig.GpuSharingConfig = &container.GPUSharingConfig{ + GpuSharingStrategy: gpuSharingConfig["gpu_sharing_strategy"].(string), + MaxSharedClientsPerGpu: int64(gpuSharingConfig["max_shared_clients_per_gpu"].(int)), + } + } + + guestAccelerators = append(guestAccelerators, guestAcceleratorConfig) + } + nc.Accelerators = guestAccelerators + } + + if v, ok := nodeConfig["disk_size_gb"]; ok { + nc.DiskSizeGb = int64(v.(int)) + } + + if v, ok := nodeConfig["disk_type"]; ok { + nc.DiskType = v.(string) + } + + if v, ok := nodeConfig["local_ssd_count"]; ok { + nc.LocalSsdCount = int64(v.(int)) + } + + if v, ok := nodeConfig["logging_variant"]; ok { + nc.LoggingConfig = &container.NodePoolLoggingConfig{ + VariantConfig: &container.LoggingVariantConfig{ + Variant: v.(string), + }, + } + } + + if v, ok := nodeConfig["local_nvme_ssd_block_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.LocalNvmeSsdBlockConfig = &container.LocalNvmeSsdBlockConfig{ + LocalSsdCount: int64(conf["local_ssd_count"].(int)), + } + } + + if v, ok := nodeConfig["ephemeral_storage_local_ssd_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.EphemeralStorageLocalSsdConfig = &container.EphemeralStorageLocalSsdConfig{ + LocalSsdCount: int64(conf["local_ssd_count"].(int)), + } + } + + if v, ok := nodeConfig["gcfs_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.GcfsConfig = &container.GcfsConfig{ + Enabled: conf["enabled"].(bool), + } + } + + if v, ok := nodeConfig["gvnic"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.Gvnic = &container.VirtualNIC{ + Enabled: conf["enabled"].(bool), + } + } + + if v, ok := nodeConfig["reservation_affinity"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + valuesSet := conf["values"].(*schema.Set) + values := make([]string, valuesSet.Len()) + for i, value := range valuesSet.List() { + values[i] = value.(string) + } + + nc.ReservationAffinity = &container.ReservationAffinity{ + ConsumeReservationType: conf["consume_reservation_type"].(string), + Key: conf["key"].(string), + Values: values, + } + } + + if scopes, ok := nodeConfig["oauth_scopes"]; ok { + scopesSet := scopes.(*schema.Set) + scopes := make([]string, scopesSet.Len()) + for i, scope := range scopesSet.List() { + scopes[i] = tpgresource.CanonicalizeServiceScope(scope.(string)) + } + + nc.OauthScopes = scopes + } + + if v, ok := nodeConfig["service_account"]; ok { + nc.ServiceAccount = v.(string) + } + + if v, ok := nodeConfig["metadata"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + nc.Metadata = m + } + + if v, ok := nodeConfig["image_type"]; ok { + nc.ImageType = v.(string) + } + + if v, ok := nodeConfig["labels"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + nc.Labels = m + } + + if v, ok := nodeConfig["resource_labels"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + nc.ResourceLabels = m + } + + if v, ok := nodeConfig["tags"]; ok { + tagsList := v.([]interface{}) + tags := []string{} + for _, v := range tagsList { + if v != nil { + tags = append(tags, v.(string)) + } + } + nc.Tags = tags + } + + if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{ + EnableSecureBoot: conf["enable_secure_boot"].(bool), + EnableIntegrityMonitoring: conf["enable_integrity_monitoring"].(bool), + } + } + + // Preemptible Is Optional+Default, so it always has a value + nc.Preemptible = nodeConfig["preemptible"].(bool) + + // Spot Is Optional+Default, so it always has a value + nc.Spot = nodeConfig["spot"].(bool) + + if v, ok := nodeConfig["min_cpu_platform"]; ok { + nc.MinCpuPlatform = v.(string) + } + + if v, ok := nodeConfig["taint"]; ok && len(v.([]interface{})) > 0 { + taints := v.([]interface{}) + nodeTaints := make([]*container.NodeTaint, 0, len(taints)) + for _, raw := range taints { + data := raw.(map[string]interface{}) + taint := &container.NodeTaint{ + Key: data["key"].(string), + Value: data["value"].(string), + Effect: data["effect"].(string), + } + nodeTaints = append(nodeTaints, taint) + } + nc.Taints = nodeTaints + } + + if v, ok := nodeConfig["workload_metadata_config"]; ok { + nc.WorkloadMetadataConfig = expandWorkloadMetadataConfig(v) + } + + if v, ok := nodeConfig["boot_disk_kms_key"]; ok { + nc.BootDiskKmsKey = v.(string) + } + + if v, ok := nodeConfig["kubelet_config"]; ok { + nc.KubeletConfig = expandKubeletConfig(v) + } + + if v, ok := nodeConfig["linux_node_config"]; ok { + nc.LinuxNodeConfig = expandLinuxNodeConfig(v) + } + + if v, ok := nodeConfig["node_group"]; ok { + nc.NodeGroup = v.(string) + } + + if v, ok := nodeConfig["advanced_machine_features"]; ok && len(v.([]interface{})) > 0 { + advanced_machine_features := v.([]interface{})[0].(map[string]interface{}) + nc.AdvancedMachineFeatures = &container.AdvancedMachineFeatures{ + ThreadsPerCore: int64(advanced_machine_features["threads_per_core"].(int)), + } + } + + if v, ok := nodeConfig["sole_tenant_config"]; ok && len(v.([]interface{})) > 0 { + nc.SoleTenantConfig = expandSoleTenantConfig(v) + } + + return nc +} + +func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + wmc := &container.WorkloadMetadataConfig{} + + cfg := ls[0].(map[string]interface{}) + + if v, ok := cfg["mode"]; ok { + wmc.Mode = v.(string) + } + + return wmc +} + +func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + cfg := ls[0].(map[string]interface{}) + kConfig := &container.NodeKubeletConfig{} + if cpuManagerPolicy, ok := cfg["cpu_manager_policy"]; ok { + kConfig.CpuManagerPolicy = cpuManagerPolicy.(string) + } + if cpuCfsQuota, ok := cfg["cpu_cfs_quota"]; ok { + kConfig.CpuCfsQuota = cpuCfsQuota.(bool) + kConfig.ForceSendFields = append(kConfig.ForceSendFields, "CpuCfsQuota") + } + if cpuCfsQuotaPeriod, ok := cfg["cpu_cfs_quota_period"]; ok { + kConfig.CpuCfsQuotaPeriod = cpuCfsQuotaPeriod.(string) + } + if podPidsLimit, ok := cfg["pod_pids_limit"]; ok { + kConfig.PodPidsLimit = int64(podPidsLimit.(int)) + } + return kConfig +} + +func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + cfg := ls[0].(map[string]interface{}) + sysCfgRaw, ok := cfg["sysctls"] + if !ok { + return nil + } + m := make(map[string]string) + for k, v := range sysCfgRaw.(map[string]interface{}) { + m[k] = v.(string) + } + return &container.LinuxNodeConfig{ + Sysctls: m, + } +} + +func expandSoleTenantConfig(v interface{}) *container.SoleTenantConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + cfg := ls[0].(map[string]interface{}) + affinitiesRaw, ok := cfg["node_affinity"] + if !ok { + return nil + } + affinities := make([]*container.NodeAffinity, 0) + for _, v := range affinitiesRaw.(*schema.Set).List() { + na := v.(map[string]interface{}) + + affinities = append(affinities, &container.NodeAffinity{ + Key: na["key"].(string), + Operator: na["operator"].(string), + Values: tpgresource.ConvertStringArr(na["values"].([]interface{})), + }) + } + return &container.SoleTenantConfig{ + NodeAffinities: affinities, + } +} + +func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + if c == nil { + return result + } + + result = append(result, map[string]interface{}{}) + + result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig) + + return result +} + +func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} { + config := make([]map[string]interface{}, 0, 1) + + if c == nil { + return config + } + + config = append(config, map[string]interface{}{ + "machine_type": c.MachineType, + "disk_size_gb": c.DiskSizeGb, + "disk_type": c.DiskType, + "guest_accelerator": flattenContainerGuestAccelerators(c.Accelerators), + "local_ssd_count": c.LocalSsdCount, + "logging_variant": flattenLoggingVariant(c.LoggingConfig), + "local_nvme_ssd_block_config": flattenLocalNvmeSsdBlockConfig(c.LocalNvmeSsdBlockConfig), + "ephemeral_storage_local_ssd_config": flattenEphemeralStorageLocalSsdConfig(c.EphemeralStorageLocalSsdConfig), + "gcfs_config": flattenGcfsConfig(c.GcfsConfig), + "gvnic": flattenGvnic(c.Gvnic), + "reservation_affinity": flattenGKEReservationAffinity(c.ReservationAffinity), + "service_account": c.ServiceAccount, + "metadata": c.Metadata, + "image_type": c.ImageType, + "labels": c.Labels, + "resource_labels": c.ResourceLabels, + "tags": c.Tags, + "preemptible": c.Preemptible, + "spot": c.Spot, + "min_cpu_platform": c.MinCpuPlatform, + "shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig), + "taint": flattenTaints(c.Taints), + "workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig), + "boot_disk_kms_key": c.BootDiskKmsKey, + "kubelet_config": flattenKubeletConfig(c.KubeletConfig), + "linux_node_config": flattenLinuxNodeConfig(c.LinuxNodeConfig), + "node_group": c.NodeGroup, + "advanced_machine_features": flattenAdvancedMachineFeaturesConfig(c.AdvancedMachineFeatures), + "sole_tenant_config": flattenSoleTenantConfig(c.SoleTenantConfig), + }) + + if len(c.OauthScopes) > 0 { + config[0]["oauth_scopes"] = schema.NewSet(tpgresource.StringScopeHashcode, tpgresource.ConvertStringArrToInterface(c.OauthScopes)) + } + + return config +} + +func flattenAdvancedMachineFeaturesConfig(c *container.AdvancedMachineFeatures) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "threads_per_core": c.ThreadsPerCore, + }) + } + return result +} + +func flattenContainerGuestAccelerators(c []*container.AcceleratorConfig) []map[string]interface{} { + result := []map[string]interface{}{} + for _, accel := range c { + accelerator := map[string]interface{}{ + "count": accel.AcceleratorCount, + "type": accel.AcceleratorType, + "gpu_partition_size": accel.GpuPartitionSize, + } + if accel.GpuDriverInstallationConfig != nil { + accelerator["gpu_driver_installation_config"] = []map[string]interface{}{ + { + "gpu_driver_version": accel.GpuDriverInstallationConfig.GpuDriverVersion, + }, + } + } + if accel.GpuSharingConfig != nil { + accelerator["gpu_sharing_config"] = []map[string]interface{}{ + { + "gpu_sharing_strategy": accel.GpuSharingConfig.GpuSharingStrategy, + "max_shared_clients_per_gpu": accel.GpuSharingConfig.MaxSharedClientsPerGpu, + }, + } + } + result = append(result, accelerator) + } + return result +} + +func flattenShieldedInstanceConfig(c *container.ShieldedInstanceConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enable_secure_boot": c.EnableSecureBoot, + "enable_integrity_monitoring": c.EnableIntegrityMonitoring, + }) + } + return result +} + +func flattenLocalNvmeSsdBlockConfig(c *container.LocalNvmeSsdBlockConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "local_ssd_count": c.LocalSsdCount, + }) + } + return result +} + +func flattenEphemeralStorageLocalSsdConfig(c *container.EphemeralStorageLocalSsdConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "local_ssd_count": c.LocalSsdCount, + }) + } + return result +} + +func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string { + variant := "DEFAULT" + if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" { + variant = c.VariantConfig.Variant + } + return variant +} + +func flattenGcfsConfig(c *container.GcfsConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + }) + } + return result +} + +func flattenGvnic(c *container.VirtualNIC) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + }) + } + return result +} + +func flattenGKEReservationAffinity(c *container.ReservationAffinity) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "consume_reservation_type": c.ConsumeReservationType, + "key": c.Key, + "values": c.Values, + }) + } + return result +} + +func flattenTaints(c []*container.NodeTaint) []map[string]interface{} { + result := []map[string]interface{}{} + for _, taint := range c { + result = append(result, map[string]interface{}{ + "key": taint.Key, + "value": taint.Value, + "effect": taint.Effect, + }) + } + return result +} + +func flattenWorkloadMetadataConfig(c *container.WorkloadMetadataConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "mode": c.Mode, + }) + } + return result +} + +func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "cpu_cfs_quota": c.CpuCfsQuota, + "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, + "cpu_manager_policy": c.CpuManagerPolicy, + "pod_pids_limit": c.PodPidsLimit, + }) + } + return result +} + +func flattenLinuxNodeConfig(c *container.LinuxNodeConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "sysctls": c.Sysctls, + }) + } + return result +} + +func flattenSoleTenantConfig(c *container.SoleTenantConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c == nil { + return result + } + affinities := []map[string]interface{}{} + for _, affinity := range c.NodeAffinities { + affinities = append(affinities, map[string]interface{}{ + "key": affinity.Key, + "operator": affinity.Operator, + "values": affinity.Values, + }) + } + return append(result, map[string]interface{}{ + "node_affinity": affinities, + }) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_cluster.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster.go index d3221fda7e..ea0be96779 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container import ( "context" @@ -16,11 +18,15 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/container/v1" ) var ( - instanceGroupManagerURL = regexp.MustCompile(fmt.Sprintf("projects/(%s)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)", ProjectRegex)) + instanceGroupManagerURL = regexp.MustCompile(fmt.Sprintf("projects/(%s)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)", verify.ProjectRegex)) masterAuthorizedNetworksConfig = &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -100,7 +106,7 @@ func clusterSchemaNodeConfig() *schema.Schema { schemaMap := nodeConfigSch.Elem.(*schema.Resource).Schema for _, k := range forceNewClusterNodeConfigFields { if sch, ok := schemaMap[k]; ok { - changeFieldSchemaToForceNew(sch) + tpgresource.ChangeFieldSchemaToForceNew(sch) } } return nodeConfigSch @@ -497,7 +503,7 @@ func ResourceContainerCluster() *schema.Resource { "min_cpu_platform": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress("automatic"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("automatic"), Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell.`, }, "boot_disk_kms_key": { @@ -677,7 +683,7 @@ func ResourceContainerCluster() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ValidateFunc: orEmpty(validateRFC1918Network(8, 32)), + ValidateFunc: verify.OrEmpty(verify.ValidateRFC1918Network(8, 32)), ConflictsWith: []string{"ip_allocation_policy"}, Description: `The IP address range of the Kubernetes pods in this cluster in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block in 10.0.0.0/8. This field will only work for routes-based clusters, where ip_allocation_policy is not defined.`, }, @@ -836,8 +842,8 @@ func ResourceContainerCluster() *schema.Resource { "start_time": { Type: schema.TypeString, Required: true, - ValidateFunc: validateRFC3339Time, - DiffSuppressFunc: rfc3339TimeDiffSuppress, + ValidateFunc: verify.ValidateRFC3339Time, + DiffSuppressFunc: tpgresource.Rfc3339TimeDiffSuppress, }, "duration": { Type: schema.TypeString, @@ -860,12 +866,12 @@ func ResourceContainerCluster() *schema.Resource { "start_time": { Type: schema.TypeString, Required: true, - ValidateFunc: validateRFC3339Date, + ValidateFunc: verify.ValidateRFC3339Date, }, "end_time": { Type: schema.TypeString, Required: true, - ValidateFunc: validateRFC3339Date, + ValidateFunc: verify.ValidateRFC3339Date, }, "recurrence": { Type: schema.TypeString, @@ -889,12 +895,12 @@ func ResourceContainerCluster() *schema.Resource { "start_time": { Type: schema.TypeString, Required: true, - ValidateFunc: validateRFC3339Date, + ValidateFunc: verify.ValidateRFC3339Date, }, "end_time": { Type: schema.TypeString, Required: true, - ValidateFunc: validateRFC3339Date, + ValidateFunc: verify.ValidateRFC3339Date, }, "exclusion_options": { Type: schema.TypeList, @@ -919,6 +925,33 @@ func ResourceContainerCluster() *schema.Resource { }, }, + "security_posture_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Description: `Defines the config needed to enable/disable features for the Security Posture API`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "BASIC", "MODE_UNSPECIFIED"}, false), + Description: `Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED and BASIC.`, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("MODE_UNSPECIFIED"), + }, + "vulnerability_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"VULNERABILITY_DISABLED", "VULNERABILITY_BASIC", "VULNERABILITY_MODE_UNSPECIFIED"}, false), + Description: `Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. Available options include VULNERABILITY_DISABLED and VULNERABILITY_BASIC.`, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("VULNERABILITY_MODE_UNSPECIFIED"), + }, + }, + }, + }, "monitoring_config": { Type: schema.TypeList, Optional: true, @@ -929,7 +962,8 @@ func ResourceContainerCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "enable_components": { Type: schema.TypeList, - Required: true, + Optional: true, + Computed: true, Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, and SCHEDULER.`, Elem: &schema.Schema{ Type: schema.TypeString, @@ -1102,7 +1136,7 @@ func ResourceContainerCluster() *schema.Resource { Optional: true, Default: "default", ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the Google Compute Engine network to which the cluster is connected. For Shared VPC, set this to the self link of the shared network.`, }, @@ -1125,7 +1159,7 @@ func ResourceContainerCluster() *schema.Resource { Default: "PROVIDER_UNSPECIFIED", Optional: true, ValidateFunc: validation.StringInSlice([]string{"PROVIDER_UNSPECIFIED", "CALICO"}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("PROVIDER_UNSPECIFIED"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("PROVIDER_UNSPECIFIED"), Description: `The selected network policy provider. Defaults to PROVIDER_UNSPECIFIED.`, }, }, @@ -1168,7 +1202,7 @@ func ResourceContainerCluster() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the Google Compute Engine subnetwork in which the cluster's instances are launched.`, }, @@ -1213,7 +1247,7 @@ func ResourceContainerCluster() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: ipAllocationRangeFields, - DiffSuppressFunc: cidrOrSizeDiffSuppress, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, Description: `The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, }, @@ -1223,7 +1257,7 @@ func ResourceContainerCluster() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: ipAllocationRangeFields, - DiffSuppressFunc: cidrOrSizeDiffSuppress, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, Description: `The IP address range of the services IPs in this cluster. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, }, @@ -1245,6 +1279,31 @@ func ResourceContainerCluster() *schema.Resource { ConflictsWith: ipAllocationCidrBlockFields, Description: `The name of the existing secondary range in the cluster's subnetwork to use for service ClusterIPs. Alternatively, services_ipv4_cidr_block can be used to automatically create a GKE-managed one.`, }, + + "stack_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "IPV4", + ValidateFunc: validation.StringInSlice([]string{"IPV4", "IPV4_IPV6"}, false), + Description: `The IP Stack type of the cluster. Choose between IPV4 and IPV4_IPV6. Default type is IPV4 Only if not set`, + }, + "pod_cidr_overprovision_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for cluster level pod cidr overprovision. Default is disabled=false.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, }, }, }, @@ -1300,7 +1359,7 @@ func ResourceContainerCluster() *schema.Resource { Optional: true, ForceNew: true, AtLeastOneOf: privateClusterConfigKeys, - ValidateFunc: orEmpty(validation.IsCIDRNetwork(28, 28)), + ValidateFunc: verify.OrEmpty(validation.IsCIDRNetwork(28, 28)), Description: `The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning private IP addresses to the cluster master(s) and the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network, and it must be a /28 subnet. See Private Cluster Limitations for more details. This field only applies to private clusters, when enable_private_nodes is true.`, }, "peering_name": { @@ -1318,7 +1377,7 @@ func ResourceContainerCluster() *schema.Resource { Optional: true, ForceNew: true, AtLeastOneOf: privateClusterConfigKeys, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `Subnetwork in cluster's network where master's endpoint will be provisioned.`, }, "public_endpoint": { @@ -1515,7 +1574,7 @@ func ResourceContainerCluster() *schema.Resource { Computed: true, Description: `The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.`, ValidateFunc: validation.StringInSlice([]string{"DATAPATH_PROVIDER_UNSPECIFIED", "LEGACY_DATAPATH", "ADVANCED_DATAPATH"}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("DATAPATH_PROVIDER_UNSPECIFIED"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("DATAPATH_PROVIDER_UNSPECIFIED"), }, "enable_intranode_visibility": { @@ -1625,6 +1684,7 @@ func ResourceContainerCluster() *schema.Resource { "gateway_api_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Description: `Configuration for GKE Gateway API controller.`, Elem: &schema.Resource{ @@ -1632,7 +1692,7 @@ func ResourceContainerCluster() *schema.Resource { "channel": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{"CHANNEL_DISABLED", "CHANNEL_STANDARD"}, false), + ValidateFunc: validation.StringInSlice([]string{"CHANNEL_DISABLED", "CHANNEL_EXPERIMENTAL", "CHANNEL_STANDARD"}, false), Description: `The Gateway API release channel to use for Gateway API.`, }, }, @@ -1700,25 +1760,25 @@ func resourceNodeConfigEmptyGuestAccelerator(_ context.Context, diff *schema.Res } func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return err } clusterName := d.Get("name").(string) - ipAllocationBlock, err := expandIPAllocationPolicy(d.Get("ip_allocation_policy"), d.Get("networking_mode").(string)) + ipAllocationBlock, err := expandIPAllocationPolicy(d.Get("ip_allocation_policy"), d.Get("networking_mode").(string), d.Get("enable_autopilot").(bool)) if err != nil { return err } @@ -1761,7 +1821,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er MasterAuth: expandMasterAuth(d.Get("master_auth")), NotificationConfig: expandNotificationConfig(d.Get("notification_config")), ConfidentialNodes: expandConfidentialNodes(d.Get("confidential_nodes")), - ResourceLabels: expandStringMap(d, "resource_labels"), + ResourceLabels: tpgresource.ExpandStringMap(d, "resource_labels"), CostManagementConfig: expandCostManagementConfig(d.Get("cost_management_config")), } @@ -1795,14 +1855,14 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er // GKE requires a full list of node locations // but when using a multi-zonal cluster our schema only asks for the // additional zones, so append the cluster location if it's a zone - if isZone(location) { + if tpgresource.IsZone(location) { locationsSet.Add(location) } - cluster.Locations = convertStringSet(locationsSet) + cluster.Locations = tpgresource.ConvertStringSet(locationsSet) } if v, ok := d.GetOk("network"); ok { - network, err := ParseNetworkFieldValue(v.(string), d, config) + network, err := tpgresource.ParseNetworkFieldValue(v.(string), d, config) if err != nil { return err } @@ -1810,7 +1870,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } if v, ok := d.GetOk("subnetwork"); ok { - subnetwork, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "location", "location", d, config, true) // variant of ParseSubnetworkFieldValue + subnetwork, err := tpgresource.ParseRegionalFieldValue("subnetworks", v.(string), "project", "location", "location", d, config, true) // variant of ParseSubnetworkFieldValue if err != nil { return err } @@ -1887,22 +1947,28 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er return err } + if v, ok := d.GetOk("security_posture_config"); ok { + cluster.SecurityPostureConfig = expandSecurityPostureConfig(v) + } + req := &container.CreateClusterRequest{ Cluster: cluster, } - mutexKV.Lock(containerClusterMutexKey(project, location, clusterName)) - defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName)) + transport_tpg.MutexStore.Lock(containerClusterMutexKey(project, location, clusterName)) + defer transport_tpg.MutexStore.Unlock(containerClusterMutexKey(project, location, clusterName)) parent := fmt.Sprintf("projects/%s/locations/%s", project, location) var op *container.Operation - err = retry(func() error { - clusterCreateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Create(parent, req) - if config.UserProjectOverride { - clusterCreateCall.Header().Add("X-Goog-User-Project", project) - } - op, err = clusterCreateCall.Do() - return err + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + clusterCreateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Create(parent, req) + if config.UserProjectOverride { + clusterCreateCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterCreateCall.Do() + return err + }, }) if err != nil { return err @@ -1911,14 +1977,14 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er d.SetId(containerClusterFullName(project, location, clusterName)) // Wait until it's created - waitErr := containerOperationWait(config, op, project, location, "creating GKE cluster", userAgent, d.Timeout(schema.TimeoutCreate)) + waitErr := ContainerOperationWait(config, op, project, location, "creating GKE cluster", userAgent, d.Timeout(schema.TimeoutCreate)) if waitErr != nil { // Check if the create operation failed because Terraform was prematurely terminated. If it was we can persist the // operation id to state so that a subsequent refresh of this resource will wait until the operation has terminated // before attempting to Read the state of the cluster. This allows a graceful resumption of a Create that was killed // by the upstream Terraform process exiting early such as a sigterm. select { - case <-config.context.Done(): + case <-config.Context.Done(): log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", op.Name) if err := d.Set("operation", op.Name); err != nil { return fmt.Errorf("Error setting operation: %s", err) @@ -1953,18 +2019,20 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er if d.Get("remove_default_node_pool").(bool) { parent := fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") - err = retry(func() error { - clusterNodePoolDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(parent) - if config.UserProjectOverride { - clusterNodePoolDeleteCall.Header().Add("X-Goog-User-Project", project) - } - op, err = clusterNodePoolDeleteCall.Do() - return err + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + clusterNodePoolDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(parent) + if config.UserProjectOverride { + clusterNodePoolDeleteCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterNodePoolDeleteCall.Do() + return err + }, }) if err != nil { return errwrap.Wrapf("Error deleting default node pool: {{err}}", err) } - err = containerOperationWait(config, op, project, location, "removing default node pool", userAgent, d.Timeout(schema.TimeoutCreate)) + err = ContainerOperationWait(config, op, project, location, "removing default node pool", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { return errwrap.Wrapf("Error while waiting to delete default node pool: {{err}}", err) } @@ -1987,18 +2055,18 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return err } @@ -2014,7 +2082,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("operation", ""); err != nil { return fmt.Errorf("Error setting operation: %s", err) } - waitErr := containerOperationWait(config, op, project, location, "resuming GKE cluster", userAgent, d.Timeout(schema.TimeoutRead)) + waitErr := ContainerOperationWait(config, op, project, location, "resuming GKE cluster", userAgent, d.Timeout(schema.TimeoutRead)) if waitErr != nil { // Try a GET on the cluster so we can see the state in debug logs. This will help classify error states. clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(containerClusterFullName(project, location, clusterName)) @@ -2047,7 +2115,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro cluster, err := clusterGetCall.Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) } if err := d.Set("name", cluster.Name); err != nil { @@ -2061,7 +2129,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error setting location: %s", err) } - locations := schema.NewSet(schema.HashString, convertStringArrToInterface(cluster.Locations)) + locations := schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(cluster.Locations)) locations.Remove(cluster.Zone) // Remove the original zone since we only store additional zones if err := d.Set("node_locations", locations); err != nil { return fmt.Errorf("Error setting node_locations: %s", err) @@ -2261,22 +2329,26 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro return err } + if err := d.Set("security_posture_config", flattenSecurityPostureConfig(cluster.SecurityPostureConfig)); err != nil { + return err + } + return nil } func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return err } @@ -2303,7 +2375,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er return err } // Wait until it's updated - return containerOperationWait(config, op, project, location, updateDescription, userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, updateDescription, userAgent, d.Timeout(schema.TimeoutUpdate)) } } @@ -2319,7 +2391,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } updateF := updateFunc(req, "updating GKE cluster master authorized networks") - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE cluster %s master authorized networks config has been updated", d.Id()) @@ -2335,7 +2407,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE cluster addons") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2351,7 +2423,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE cluster autoscaling") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2371,7 +2443,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE binary authorization") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2389,7 +2461,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating enable private endpoint") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2409,7 +2481,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating master global access config") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2425,7 +2497,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE binary authorization") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2445,7 +2517,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE shielded nodes") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2471,13 +2543,13 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - err = containerOperationWait(config, op, project, location, "updating Release Channel", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "updating Release Channel", userAgent, d.Timeout(schema.TimeoutUpdate)) log.Println("[DEBUG] done updating release_channel") return err } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2507,13 +2579,13 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - err = containerOperationWait(config, op, project, location, "updating GKE Intra Node Visibility", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "updating GKE Intra Node Visibility", userAgent, d.Timeout(schema.TimeoutUpdate)) log.Println("[DEBUG] done updating enable_intranode_visibility") return err } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2539,13 +2611,13 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - err = containerOperationWait(config, op, project, location, "updating GKE Private IPv6 Google Access", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "updating GKE Private IPv6 Google Access", userAgent, d.Timeout(schema.TimeoutUpdate)) log.Println("[DEBUG] done updating private_ipv6_google_access") return err } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2576,13 +2648,13 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - err = containerOperationWait(config, op, project, location, "updating L4", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "updating L4", userAgent, d.Timeout(schema.TimeoutUpdate)) log.Println("[DEBUG] done updating enable_intranode_visibility") return err } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2599,7 +2671,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating cost management config") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2614,7 +2686,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } updateF := updateFunc(req, "updating GKE cluster authenticator groups config") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2640,13 +2712,13 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - err = containerOperationWait(config, op, project, location, "updating GKE Default SNAT status", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "updating GKE Default SNAT status", userAgent, d.Timeout(schema.TimeoutUpdate)) log.Println("[DEBUG] done updating default_snat_status") return err } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2671,11 +2743,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - return containerOperationWait(config, op, project, location, "updating GKE cluster maintenance policy", userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, "updating GKE cluster maintenance policy", userAgent, d.Timeout(schema.TimeoutUpdate)) } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2693,35 +2765,35 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er // zones, then remove the ones we aren't using anymore. azSet := azSetOld.Union(azSetNew) - if isZone(location) { + if tpgresource.IsZone(location) { azSet.Add(location) } req := &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ - DesiredLocations: convertStringSet(azSet), + DesiredLocations: tpgresource.ConvertStringSet(azSet), }, } updateF := updateFunc(req, "updating GKE cluster node locations") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } - if isZone(location) { + if tpgresource.IsZone(location) { azSetNew.Add(location) } if !azSet.Equal(azSetNew) { req = &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ - DesiredLocations: convertStringSet(azSetNew), + DesiredLocations: tpgresource.ConvertStringSet(azSetNew), }, } updateF := updateFunc(req, "updating GKE cluster node locations") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } } @@ -2749,13 +2821,13 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - err = containerOperationWait(config, op, project, location, "updating GKE legacy ABAC", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "updating GKE legacy ABAC", userAgent, d.Timeout(schema.TimeoutUpdate)) log.Println("[DEBUG] done updating enable_legacy_abac") return err } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2784,11 +2856,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - return containerOperationWait(config, op, project, location, "updating GKE logging+monitoring service", userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, "updating GKE logging+monitoring service", userAgent, d.Timeout(schema.TimeoutUpdate)) } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2814,13 +2886,13 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - err = containerOperationWait(config, op, project, location, "updating GKE cluster network policy", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "updating GKE cluster network policy", userAgent, d.Timeout(schema.TimeoutUpdate)) log.Println("[DEBUG] done updating network_policy") return err } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2865,7 +2937,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE master version") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE cluster %s: master has been updated to %s", d.Id(), ver) @@ -2889,7 +2961,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } updateF := updateFunc(req, "updating GKE default node pool node version") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE cluster %s: default node pool has been updated to %s", d.Id(), @@ -2925,11 +2997,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - return containerOperationWait(config, op, project, location, "updating GKE image type", userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, "updating GKE image type", userAgent, d.Timeout(schema.TimeoutUpdate)) } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2956,13 +3028,13 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - err = containerOperationWait(config, op, project, location, "updating Notification Config", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "updating Notification Config", userAgent, d.Timeout(schema.TimeoutUpdate)) log.Println("[DEBUG] done updating notification_config") return err } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -2979,7 +3051,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE cluster vertical pod autoscaling") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -3006,9 +3078,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er return err } // Wait until it's updated - return containerOperationWait(config, op, project, location, "updating GKE cluster service externalips config", userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, "updating GKE cluster service externalips config", userAgent, d.Timeout(schema.TimeoutUpdate)) } - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE cluster %s service externalips config has been updated", d.Id()) @@ -3033,9 +3105,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er return err } // Wait until it's updated - return containerOperationWait(config, op, project, location, "updating GKE cluster mesh certificates config", userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, "updating GKE cluster mesh certificates config", userAgent, d.Timeout(schema.TimeoutUpdate)) } - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE cluster %s mesh certificates config has been updated", d.Id()) @@ -3060,9 +3132,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er return err } // Wait until it's updated - return containerOperationWait(config, op, project, location, "updating GKE cluster database encryption config", userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, "updating GKE cluster database encryption config", userAgent, d.Timeout(schema.TimeoutUpdate)) } - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE cluster %s database encryption config has been updated", d.Id()) @@ -3088,7 +3160,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE cluster workload identity config") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -3103,7 +3175,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } updateF := updateFunc(req, "updating GKE cluster logging config") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -3118,7 +3190,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } updateF := updateFunc(req, "updating GKE cluster monitoring config") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -3129,7 +3201,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er resourceLabels := d.Get("resource_labels").(map[string]interface{}) labelFingerprint := d.Get("label_fingerprint").(string) req := &container.SetLabelsRequest{ - ResourceLabels: convertStringMap(resourceLabels), + ResourceLabels: tpgresource.ConvertStringMap(resourceLabels), LabelFingerprint: labelFingerprint, } updateF := func() error { @@ -3144,11 +3216,11 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - return containerOperationWait(config, op, project, location, "updating GKE resource labels", userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, "updating GKE resource labels", userAgent, d.Timeout(schema.TimeoutUpdate)) } // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } } @@ -3161,12 +3233,12 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } op, err := clusterNodePoolDeleteCall.Do() if err != nil { - if !IsGoogleApiErrorWithCode(err, 404) { + if !transport_tpg.IsGoogleApiErrorWithCode(err, 404) { return errwrap.Wrapf("Error deleting default node pool: {{err}}", err) } log.Printf("[WARN] Container cluster %q default node pool already removed, no change", d.Id()) } else { - err = containerOperationWait(config, op, project, location, "removing default node pool", userAgent, d.Timeout(schema.TimeoutUpdate)) + err = ContainerOperationWait(config, op, project, location, "removing default node pool", userAgent, d.Timeout(schema.TimeoutUpdate)) if err != nil { return errwrap.Wrapf("Error deleting default node pool: {{err}}", err) } @@ -3192,9 +3264,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er return err } // Wait until it's updated - return containerOperationWait(config, op, project, location, "updating GKE cluster resource usage export config", userAgent, d.Timeout(schema.TimeoutUpdate)) + return ContainerOperationWait(config, op, project, location, "updating GKE cluster resource usage export config", userAgent, d.Timeout(schema.TimeoutUpdate)) } - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id()) @@ -3210,7 +3282,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE Gateway API") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -3233,7 +3305,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er updateF := updateFunc(req, "updating GKE cluster desired node pool logging configuration defaults.") // Call update serially. - if err := lockedCall(lockKey, updateF); err != nil { + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { return err } @@ -3241,6 +3313,20 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("security_posture_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredSecurityPostureConfig: expandSecurityPostureConfig(d.Get("security_posture_config")), + }, + } + updateF := updateFunc(req, "updating GKE cluster master Security Posture Config") + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Security Posture Config has been updated to %#v", d.Id(), req.Update.DesiredSecurityPostureConfig) + } + d.Partial(false) if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -3251,18 +3337,18 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return err } @@ -3270,7 +3356,7 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er clusterName := d.Get("name").(string) if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutDelete)); err != nil { - if IsGoogleApiErrorWithCode(err, 404) { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { log.Printf("[INFO] GKE cluster %s doesn't exist to delete", d.Id()) return nil } @@ -3278,8 +3364,8 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er } log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) - mutexKV.Lock(containerClusterMutexKey(project, location, clusterName)) - defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName)) + transport_tpg.MutexStore.Lock(containerClusterMutexKey(project, location, clusterName)) + defer transport_tpg.MutexStore.Unlock(containerClusterMutexKey(project, location, clusterName)) var op *container.Operation var count = 0 @@ -3309,7 +3395,7 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er } // Wait until it's deleted - waitErr := containerOperationWait(config, op, project, location, "deleting GKE cluster", userAgent, d.Timeout(schema.TimeoutDelete)) + waitErr := ContainerOperationWait(config, op, project, location, "deleting GKE cluster", userAgent, d.Timeout(schema.TimeoutDelete)) if waitErr != nil { return waitErr } @@ -3326,19 +3412,19 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er // but implemented in separate function as it doesn't try to lock already // locked cluster state, does different error handling, and doesn't do retries. func cleanFailedContainerCluster(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -3353,11 +3439,11 @@ func cleanFailedContainerCluster(d *schema.ResourceData, meta interface{}) error } op, err := clusterDeleteCall.Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) } // Wait until it's deleted - waitErr := containerOperationWait(config, op, project, location, "deleting GKE cluster", userAgent, d.Timeout(schema.TimeoutDelete)) + waitErr := ContainerOperationWait(config, op, project, location, "deleting GKE cluster", userAgent, d.Timeout(schema.TimeoutDelete)) if waitErr != nil { return waitErr } @@ -3374,7 +3460,7 @@ var containerClusterRestingStates = RestingStates{ } // returns a state with no error if the state is a resting state, and the last state with an error otherwise -func containerClusterAwaitRestingState(config *Config, project, location, clusterName, userAgent string, timeout time.Duration) (state string, err error) { +func containerClusterAwaitRestingState(config *transport_tpg.Config, project, location, clusterName, userAgent string, timeout time.Duration) (state string, err error) { err = resource.Retry(timeout, func() *resource.RetryError { name := containerClusterFullName(project, location, clusterName) clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) @@ -3488,42 +3574,61 @@ func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig { return ac } -func expandIPAllocationPolicy(configured interface{}, networkingMode string) (*container.IPAllocationPolicy, error) { +func expandPodCidrOverprovisionConfig(configured interface{}) *container.PodCIDROverprovisionConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + config := l[0].(map[string]interface{}) + return &container.PodCIDROverprovisionConfig{ + Disable: config["disabled"].(bool), + ForceSendFields: []string{"Disable"}, + } +} + +func expandIPAllocationPolicy(configured interface{}, networkingMode string, autopilot bool) (*container.IPAllocationPolicy, error) { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { if networkingMode == "VPC_NATIVE" { + if autopilot { + return nil, nil + } return nil, fmt.Errorf("`ip_allocation_policy` block is required for VPC_NATIVE clusters.") } return &container.IPAllocationPolicy{ UseIpAliases: false, UseRoutes: true, + StackType: "IPV4", ForceSendFields: []string{"UseIpAliases"}, }, nil } config := l[0].(map[string]interface{}) - return &container.IPAllocationPolicy{ - UseIpAliases: networkingMode == "VPC_NATIVE" || networkingMode == "", - ClusterIpv4CidrBlock: config["cluster_ipv4_cidr_block"].(string), - ServicesIpv4CidrBlock: config["services_ipv4_cidr_block"].(string), + stackType := config["stack_type"].(string) + return &container.IPAllocationPolicy{ + UseIpAliases: networkingMode == "VPC_NATIVE" || networkingMode == "", + ClusterIpv4CidrBlock: config["cluster_ipv4_cidr_block"].(string), + ServicesIpv4CidrBlock: config["services_ipv4_cidr_block"].(string), ClusterSecondaryRangeName: config["cluster_secondary_range_name"].(string), ServicesSecondaryRangeName: config["services_secondary_range_name"].(string), ForceSendFields: []string{"UseIpAliases"}, UseRoutes: networkingMode == "ROUTES", + StackType: stackType, + PodCidrOverprovisionConfig: expandPodCidrOverprovisionConfig(config["pod_cidr_overprovision_config"]), }, nil } func expandMaintenancePolicy(d *schema.ResourceData, meta interface{}) *container.MaintenancePolicy { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // We have to perform a full Get() as part of this, to get the fingerprint. We can't do this // at any other time, because the fingerprint update might happen between plan and apply. // We can omit error checks, since to have gotten this far, a project is definitely configured. - project, _ := getProject(d, config) - location, _ := getLocation(d, config) + project, _ := tpgresource.GetProject(d, config) + location, _ := tpgresource.GetLocation(d, config) clusterName := d.Get("name").(string) name := containerClusterFullName(project, location, clusterName) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil } @@ -3669,7 +3774,7 @@ func expandAutoProvisioningDefaults(configured interface{}, d *schema.ResourceDa config := l[0].(map[string]interface{}) npd := &container.AutoprovisioningNodePoolDefaults{ - OauthScopes: convertStringArr(config["oauth_scopes"].([]interface{})), + OauthScopes: tpgresource.ConvertStringArr(config["oauth_scopes"].([]interface{})), ServiceAccount: config["service_account"].(string), DiskSizeGb: int64(config["disk_size"].(int)), DiskType: config["disk_type"].(string), @@ -3748,7 +3853,7 @@ func expandStandardRolloutPolicy(configured interface{}) *container.StandardRoll func expandManagement(configured interface{}) *container.NodeManagement { l, ok := configured.([]interface{}) if !ok || l == nil || len(l) == 0 || l[0] == nil { - return &container.NodeManagement{} + return nil } config := l[0].(map[string]interface{}) @@ -3790,6 +3895,36 @@ func expandAuthenticatorGroupsConfig(configured interface{}) *container.Authenti return result } +func expandSecurityPostureConfig(configured interface{}) *container.SecurityPostureConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + spc := &container.SecurityPostureConfig{} + spConfig := l[0].(map[string]interface{}) + if v, ok := spConfig["mode"]; ok { + spc.Mode = v.(string) + } + + if v, ok := spConfig["vulnerability_mode"]; ok { + spc.VulnerabilityMode = v.(string) + } + return spc +} + +func flattenSecurityPostureConfig(spc *container.SecurityPostureConfig) []map[string]interface{} { + if spc == nil { + return nil + } + result := make(map[string]interface{}) + + result["mode"] = spc.Mode + result["vulnerability_mode"] = spc.VulnerabilityMode + + return []map[string]interface{}{result} +} + func expandNotificationConfig(configured interface{}) *container.NotificationConfig { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -3816,7 +3951,7 @@ func expandNotificationConfig(configured interface{}) *container.NotificationCon filter := vv.([]interface{})[0].(map[string]interface{}) eventType := filter["event_type"].([]interface{}) nc.Pubsub.Filter = &container.Filter{ - EventType: convertStringArr(eventType), + EventType: tpgresource.ConvertStringArr(eventType), } } @@ -4129,7 +4264,7 @@ func expandContainerClusterLoggingConfig(configured interface{}) *container.Logg var components []string if l[0] != nil { config := l[0].(map[string]interface{}) - components = convertStringArr(config["enable_components"].([]interface{})) + components = tpgresource.ConvertStringArr(config["enable_components"].([]interface{})) } return &container.LoggingConfig{ @@ -4150,7 +4285,7 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig if v, ok := config["enable_components"]; ok { enable_components := v.([]interface{}) mc.ComponentConfig = &container.MonitoringComponentConfig{ - EnableComponents: convertStringArr(enable_components), + EnableComponents: tpgresource.ConvertStringArr(enable_components), } } if v, ok := config["managed_prometheus"]; ok && len(v.([]interface{})) > 0 { @@ -4362,7 +4497,7 @@ func flattenClusterAddonsConfig(c *container.AddonsConfig) []map[string]interfac return []map[string]interface{}{result} } -func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*container.NodePool) ([]map[string]interface{}, error) { +func flattenClusterNodePools(d *schema.ResourceData, config *transport_tpg.Config, c []*container.NodePool) ([]map[string]interface{}, error) { nodePools := make([]map[string]interface{}, 0, len(c)) for i, np := range c { @@ -4452,7 +4587,7 @@ func flattenDefaultSnatStatus(c *container.DefaultSnatStatus) []map[string]inter return result } -func flattenWorkloadIdentityConfig(c *container.WorkloadIdentityConfig, d *schema.ResourceData, config *Config) []map[string]interface{} { +func flattenWorkloadIdentityConfig(c *container.WorkloadIdentityConfig, d *schema.ResourceData, config *transport_tpg.Config) []map[string]interface{} { if c == nil { return nil } @@ -4464,7 +4599,19 @@ func flattenWorkloadIdentityConfig(c *container.WorkloadIdentityConfig, d *schem } } -func flattenIPAllocationPolicy(c *container.Cluster, d *schema.ResourceData, config *Config) ([]map[string]interface{}, error) { +func flattenPodCidrOverprovisionConfig(c *container.PodCIDROverprovisionConfig) []map[string]interface{} { + if c == nil { + return nil + } + + return []map[string]interface{}{ + { + "disabled": c.Disable, + }, + } +} + +func flattenIPAllocationPolicy(c *container.Cluster, d *schema.ResourceData, config *transport_tpg.Config) ([]map[string]interface{}, error) { // If IP aliasing isn't enabled, none of the values in this block can be set. if c == nil || c.IpAllocationPolicy == nil || !c.IpAllocationPolicy.UseIpAliases { if err := d.Set("networking_mode", "ROUTES"); err != nil { @@ -4477,12 +4624,22 @@ func flattenIPAllocationPolicy(c *container.Cluster, d *schema.ResourceData, con } p := c.IpAllocationPolicy + + // handle older clusters that return JSON null + // corresponding to "STACK_TYPE_UNSPECIFIED" due to GKE declining to backfill + // equivalent to default_if_empty + if p.StackType == "" { + p.StackType = "IPV4" + } + return []map[string]interface{}{ { "cluster_ipv4_cidr_block": p.ClusterIpv4CidrBlock, "services_ipv4_cidr_block": p.ServicesIpv4CidrBlock, "cluster_secondary_range_name": p.ClusterSecondaryRangeName, "services_secondary_range_name": p.ServicesSecondaryRangeName, + "stack_type": p.StackType, + "pod_cidr_overprovision_config": flattenPodCidrOverprovisionConfig(p.PodCidrOverprovisionConfig), }, }, nil } @@ -4821,22 +4978,22 @@ func flattenManagedPrometheusConfig(c *container.ManagedPrometheusConfig) []map[ } func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } - if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { return nil, err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return nil, err } @@ -4863,13 +5020,13 @@ func containerClusterFullName(project, location, cluster string) string { return fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, cluster) } -func extractNodePoolInformationFromCluster(d *schema.ResourceData, config *Config, clusterName string) (*NodePoolInformation, error) { - project, err := getProject(d, config) +func extractNodePoolInformationFromCluster(d *schema.ResourceData, config *transport_tpg.Config, clusterName string) (*NodePoolInformation, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return nil, err } @@ -4881,11 +5038,6 @@ func extractNodePoolInformationFromCluster(d *schema.ResourceData, config *Confi }, nil } -func cidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // If the user specified a size and the API returned a full cidr block, suppress. - return strings.HasPrefix(new, "/") && strings.HasSuffix(old, new) -} - // Suppress unremovable default scope values from GCP. // If the default service account would not otherwise have it, the `monitoring.write` scope // is added to a GKE cluster's scopes regardless of what the user provided. @@ -4905,16 +5057,16 @@ func containerClusterAddedScopesSuppress(k, old, new string, d *schema.ResourceD } // combine what the default scopes are with what was passed - m := golangSetFromStringSlice(append(addedScopes, convertStringArr(n.([]interface{}))...)) - combined := stringSliceFromGolangSet(m) + m := tpgresource.GolangSetFromStringSlice(append(addedScopes, tpgresource.ConvertStringArr(n.([]interface{}))...)) + combined := tpgresource.StringSliceFromGolangSet(m) // compare if the combined new scopes and default scopes differ from the old scopes - if len(combined) != len(convertStringArr(o.([]interface{}))) { + if len(combined) != len(tpgresource.ConvertStringArr(o.([]interface{}))) { return false } for _, i := range combined { - if stringInSlice(convertStringArr(o.([]interface{})), i) { + if tpgresource.StringInSlice(tpgresource.ConvertStringArr(o.([]interface{})), i) { continue } @@ -4967,6 +5119,9 @@ func containerClusterAutopilotCustomizeDiff(_ context.Context, d *schema.Resourc if err := d.SetNew("enable_intranode_visibility", true); err != nil { return err } + if err := d.SetNew("networking_mode", "VPC_NATIVE"); err != nil { + return err + } } return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_cluster_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_migrate.go similarity index 95% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_cluster_migrate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_migrate.go index 5b3d3336e1..eda75ba9a0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_cluster_migrate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_migrate.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container import ( "fmt" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_sweeper.go new file mode 100644 index 0000000000..41c91eb9d1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_cluster_sweeper.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepers("gcp_container_cluster", testSweepContainerClusters) +} + +func testSweepContainerClusters(region string) error { + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Fatalf("error getting shared config for region: %s", err) + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Fatalf("error loading: %s", err) + } + + // List clusters for all zones by using "-" as the zone name + found, err := config.NewContainerClient(config.UserAgent).Projects.Zones.Clusters.List(config.Project, "-").Do() + if err != nil { + log.Printf("error listing container clusters: %s", err) + return nil + } + + if len(found.Clusters) == 0 { + log.Printf("No container clusters found.") + return nil + } + + for _, cluster := range found.Clusters { + if sweeper.IsSweepableTestResource(cluster.Name) { + log.Printf("Sweeping Container Cluster: %s", cluster.Name) + clusterURL := fmt.Sprintf("projects/%s/locations/%s/clusters/%s", config.Project, cluster.Location, cluster.Name) + _, err := config.NewContainerClient(config.UserAgent).Projects.Locations.Clusters.Delete(clusterURL).Do() + + if err != nil { + log.Printf("Error, failed to delete cluster %s: %s", cluster.Name, err) + return nil + } + } + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_node_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_node_pool.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool.go index c4a6a6e659..95756bdc50 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_node_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container import ( "fmt" @@ -11,6 +13,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/container/v1" ) @@ -43,7 +50,7 @@ func ResourceContainerNodePool() *schema.Resource { UseJSONNumber: true, - Schema: mergeSchemas( + Schema: tpgresource.MergeSchemas( schemaNodePool, map[string]*schema.Schema{ "project": { @@ -342,6 +349,7 @@ var schemaNodePool = map[string]*schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, Description: `The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.`, }, "pod_ipv4_cidr_block": { @@ -349,9 +357,25 @@ var schemaNodePool = map[string]*schema.Schema{ Optional: true, ForceNew: true, Computed: true, - ValidateFunc: validateIpCidrRange, + ValidateFunc: verify.ValidateIpCidrRange, Description: `The IP address range for pod IPs in this node pool. Only applicable if create_pod_range is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.`, }, + "pod_cidr_overprovision_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, }, }, }, @@ -397,7 +421,7 @@ func (nodePoolInformation *NodePoolInformation) nodePoolLockKey(nodePoolName str ) } -func extractNodePoolInformation(d *schema.ResourceData, config *Config) (*NodePoolInformation, error) { +func extractNodePoolInformation(d *schema.ResourceData, config *transport_tpg.Config) (*NodePoolInformation, error) { cluster := d.Get("cluster").(string) if fieldValues := clusterIdRegex.FindStringSubmatch(cluster); fieldValues != nil { @@ -410,12 +434,12 @@ func extractNodePoolInformation(d *schema.ResourceData, config *Config) (*NodePo } log.Printf("[DEBUG] parent cluster %s does not match regex %s", cluster, clusterIdRegex.String()) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - location, err := getLocation(d, config) + location, err := tpgresource.GetLocation(d, config) if err != nil { return nil, err } @@ -428,8 +452,8 @@ func extractNodePoolInformation(d *schema.ResourceData, config *Config) (*NodePo } func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -446,13 +470,13 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e // Acquire read-lock on cluster. clusterLockKey := nodePoolInfo.clusterLockKey() - mutexKV.RLock(clusterLockKey) - defer mutexKV.RUnlock(clusterLockKey) + transport_tpg.MutexStore.RLock(clusterLockKey) + defer transport_tpg.MutexStore.RUnlock(clusterLockKey) // Acquire write-lock on nodepool. npLockKey := nodePoolInfo.nodePoolLockKey(nodePool.Name) - mutexKV.Lock(npLockKey) - defer mutexKV.Unlock(npLockKey) + transport_tpg.MutexStore.Lock(npLockKey) + defer transport_tpg.MutexStore.Unlock(npLockKey) req := &container.CreateNodePoolRequest{ NodePool: nodePool, @@ -469,7 +493,7 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) } _, err = clusterNodePoolsGetCall.Do() - if err != nil && IsGoogleApiErrorWithCode(err, 404) { + if err != nil && transport_tpg.IsGoogleApiErrorWithCode(err, 404) { // Set the ID before we attempt to create if the resource doesn't exist. That // way, if we receive an error but the resource is created anyway, it will be // refreshed on the next call to apply. @@ -487,7 +511,7 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e operation, err = clusterNodePoolsCreateCall.Do() if err != nil { - if isFailedPreconditionError(err) { + if tpgresource.IsFailedPreconditionError(err) { // We get failed precondition errors if the cluster is updating // while we try to add the node pool. return resource.RetryableError(err) @@ -501,7 +525,7 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e } timeout -= time.Since(startTime) - waitErr := containerOperationWait(config, + waitErr := ContainerOperationWait(config, operation, nodePoolInfo.project, nodePoolInfo.location, "creating GKE NodePool", userAgent, timeout) @@ -511,7 +535,7 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e // before attempting to Read the state of the cluster. This allows a graceful resumption of a Create that was killed // by the upstream Terraform process exiting early such as a sigterm. select { - case <-config.context.Done(): + case <-config.Context.Done(): log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", operation.Name) if err := d.Set("operation", operation.Name); err != nil { return fmt.Errorf("Error setting operation: %s", err) @@ -549,8 +573,8 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e } func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -569,7 +593,7 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("operation", ""); err != nil { return fmt.Errorf("Error setting operation: %s", err) } - waitErr := containerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "resuming GKE node pool", userAgent, d.Timeout(schema.TimeoutRead)) + waitErr := ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "resuming GKE node pool", userAgent, d.Timeout(schema.TimeoutRead)) if waitErr != nil { return waitErr } @@ -583,7 +607,7 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err } nodePool, err := clusterNodePoolsGetCall.Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NodePool %q from cluster %q", name, nodePoolInfo.cluster)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NodePool %q from cluster %q", name, nodePoolInfo.cluster)) } npMap, err := flattenNodePool(d, config, nodePool, "") @@ -608,8 +632,8 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err } func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -645,8 +669,8 @@ func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) e } func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -662,7 +686,7 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e if err != nil { // If the node pool doesn't get created and then we try to delete it, we get an error, // but I don't think we need an error during delete if it doesn't exist - if IsGoogleApiErrorWithCode(err, 404) { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { log.Printf("node pool %q not found, doesn't need to be cleaned up", name) return nil } else { @@ -672,13 +696,13 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e // Acquire read-lock on cluster. clusterLockKey := nodePoolInfo.clusterLockKey() - mutexKV.RLock(clusterLockKey) - defer mutexKV.RUnlock(clusterLockKey) + transport_tpg.MutexStore.RLock(clusterLockKey) + defer transport_tpg.MutexStore.RUnlock(clusterLockKey) // Acquire write-lock on nodepool. npLockKey := nodePoolInfo.nodePoolLockKey(name) - mutexKV.Lock(npLockKey) - defer mutexKV.Unlock(npLockKey) + transport_tpg.MutexStore.Lock(npLockKey) + defer transport_tpg.MutexStore.Unlock(npLockKey) timeout := d.Timeout(schema.TimeoutDelete) startTime := time.Now() @@ -692,7 +716,7 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e operation, err = clusterNodePoolsDeleteCall.Do() if err != nil { - if isFailedPreconditionError(err) { + if tpgresource.IsFailedPreconditionError(err) { // We get failed precondition errors if the cluster is updating // while we try to delete the node pool. return resource.RetryableError(err) @@ -710,7 +734,7 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e timeout -= time.Since(startTime) // Wait until it's deleted - waitErr := containerOperationWait(config, operation, nodePoolInfo.project, nodePoolInfo.location, "deleting GKE NodePool", userAgent, timeout) + waitErr := ContainerOperationWait(config, operation, nodePoolInfo.project, nodePoolInfo.location, "deleting GKE NodePool", userAgent, timeout) if waitErr != nil { return waitErr } @@ -723,13 +747,13 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e } func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) nodePoolInfo, err := extractNodePoolInformation(d, config) if err != nil { return false, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return false, err } @@ -742,7 +766,7 @@ func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) ( _, err = clusterNodePoolsGetCall.Do() if err != nil { - if err = handleNotFoundError(err, d, fmt.Sprintf("Container NodePool %s", name)); err == nil { + if err = transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Container NodePool %s", name)); err == nil { return false, nil } // There was some other error in reading the resource @@ -752,25 +776,25 @@ func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) ( } func resourceContainerNodePoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } - if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)/nodePools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)/nodePools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { return nil, err } - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/nodePools/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/nodePools/{{name}}") if err != nil { return nil, err } d.SetId(id) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } @@ -819,7 +843,7 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool, var locations []string if v, ok := d.GetOk("node_locations"); ok && v.(*schema.Set).Len() > 0 { - locations = convertStringSet(v.(*schema.Set)) + locations = tpgresource.ConvertStringSet(v.(*schema.Set)) } np := &container.NodePool{ @@ -973,8 +997,8 @@ func flattenNodePoolUpgradeSettings(us *container.UpgradeSettings) []map[string] return []map[string]interface{}{upgradeSettings} } -func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodePool, prefix string) (map[string]interface{}, error) { - userAgent, err := generateUserAgentString(d, config.UserAgent) +func flattenNodePool(d *schema.ResourceData, config *transport_tpg.Config, np *container.NodePool, prefix string) (map[string]interface{}, error) { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -992,7 +1016,7 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP return nil, fmt.Errorf("Error reading instance group manage URL '%q'", url) } igm, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() - if IsGoogleApiErrorWithCode(err, 404) { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { // The IGM URL in is stale; don't include it continue } @@ -1011,7 +1035,7 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP "name": np.Name, "name_prefix": d.Get(prefix + "name_prefix"), "initial_node_count": np.InitialNodeCount, - "node_locations": schema.NewSet(schema.HashString, convertStringArrToInterface(np.Locations)), + "node_locations": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(np.Locations)), "node_count": nodeCount, "node_config": flattenNodeConfig(np.Config), "instance_group_urls": igmUrls, @@ -1068,10 +1092,11 @@ func flattenNodeNetworkConfig(c *container.NodeNetworkConfig, d *schema.Resource result := []map[string]interface{}{} if c != nil { result = append(result, map[string]interface{}{ - "create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required - "pod_ipv4_cidr_block": c.PodIpv4CidrBlock, - "pod_range": c.PodRange, - "enable_private_nodes": c.EnablePrivateNodes, + "create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required + "pod_ipv4_cidr_block": c.PodIpv4CidrBlock, + "pod_range": c.PodRange, + "enable_private_nodes": c.EnablePrivateNodes, + "pod_cidr_overprovision_config": flattenPodCidrOverprovisionConfig(c.PodCidrOverprovisionConfig), }) } return result @@ -1105,22 +1130,24 @@ func expandNodeNetworkConfig(v interface{}) *container.NodeNetworkConfig { nnc.ForceSendFields = []string{"EnablePrivateNodes"} } + nnc.PodCidrOverprovisionConfig = expandPodCidrOverprovisionConfig(networkNodeConfig["pod_cidr_overprovision_config"]) + return nnc } func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *NodePoolInformation, prefix string, timeout time.Duration) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) name := d.Get(prefix + "name").(string) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } // Acquire read-lock on cluster. clusterLockKey := nodePoolInfo.clusterLockKey() - mutexKV.RLock(clusterLockKey) - defer mutexKV.RUnlock(clusterLockKey) + transport_tpg.MutexStore.RLock(clusterLockKey) + defer transport_tpg.MutexStore.RUnlock(clusterLockKey) // Nodepool write-lock will be acquired when update function is called. npLockKey := nodePoolInfo.nodePoolLockKey(name) @@ -1161,13 +1188,13 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id()) @@ -1198,14 +1225,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool logging_variant", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1252,14 +1279,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool tags", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated tags for node pool %s", name) @@ -1273,7 +1300,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node if v, ok := d.GetOk(prefix + "node_config.0.resource_labels"); ok { resourceLabels := v.(map[string]interface{}) req.ResourceLabels = &container.ResourceLabels{ - Labels: convertStringMap(resourceLabels), + Labels: tpgresource.ConvertStringMap(resourceLabels), } } @@ -1288,7 +1315,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool resource labels", userAgent, @@ -1296,7 +1323,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Call update serially. - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1311,7 +1338,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node if v, ok := d.GetOk(prefix + "node_config.0.labels"); ok { labels := v.(map[string]interface{}) req.Labels = &container.NodeLabels{ - Labels: convertStringMap(labels), + Labels: tpgresource.ConvertStringMap(labels), } } @@ -1326,7 +1353,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool labels", userAgent, @@ -1334,7 +1361,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Call update serially. - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1360,13 +1387,13 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated image type in Node Pool %s", d.Id()) @@ -1393,14 +1420,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool workload_metadata_config", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) @@ -1426,14 +1453,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool kubelet_config", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1459,14 +1486,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool linux_node_config", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1492,12 +1519,12 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool size", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE node pool %s size has been updated to %d", name, newSize) @@ -1527,12 +1554,12 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool management", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated management in Node Pool %s", name) @@ -1555,11 +1582,11 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool version", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated version in Node Pool %s", name) @@ -1567,7 +1594,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node if d.HasChange(prefix + "node_locations") { req := &container.UpdateNodePoolRequest{ - Locations: convertStringSet(d.Get(prefix + "node_locations").(*schema.Set)), + Locations: tpgresource.ConvertStringSet(d.Get(prefix + "node_locations").(*schema.Set)), } updateF := func() error { clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) @@ -1581,10 +1608,10 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool node locations", userAgent, timeout) + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool node locations", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated node locations in Node Pool %s", name) @@ -1633,13 +1660,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node if v, ok := blueGreenSettingsConfig["standard_rollout_policy"]; ok && len(v.([]interface{})) > 0 { standardRolloutPolicy := &container.StandardRolloutPolicy{} - standardRolloutPolicyConfig := v.([]interface{})[0].(map[string]interface{}) - standardRolloutPolicy.BatchSoakDuration = standardRolloutPolicyConfig["batch_soak_duration"].(string) - if v, ok := standardRolloutPolicyConfig["batch_node_count"]; ok { - standardRolloutPolicy.BatchNodeCount = int64(v.(int)) - } - if v, ok := standardRolloutPolicyConfig["batch_percentage"]; ok { - standardRolloutPolicy.BatchPercentage = v.(float64) + if standardRolloutPolicyConfig, ok := v.([]interface{})[0].(map[string]interface{}); ok { + standardRolloutPolicy.BatchSoakDuration = standardRolloutPolicyConfig["batch_soak_duration"].(string) + if v, ok := standardRolloutPolicyConfig["batch_node_count"]; ok { + standardRolloutPolicy.BatchNodeCount = int64(v.(int)) + } + if v, ok := standardRolloutPolicyConfig["batch_percentage"]; ok { + standardRolloutPolicy.BatchPercentage = v.(float64) + } } blueGreenSettings.StandardRolloutPolicy = standardRolloutPolicy } @@ -1661,9 +1689,9 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool upgrade settings", userAgent, timeout) + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool upgrade settings", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated upgrade settings in Node Pool %s", name) @@ -1687,14 +1715,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Wait until it's updated - return containerOperationWait(config, op, + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool workload_metadata_config", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1719,7 +1747,7 @@ var containerNodePoolRestingStates = RestingStates{ // takes in a config object, full node pool name, project name and the current CRUD action timeout // returns a state with no error if the state is a resting state, and the last state with an error otherwise -func containerNodePoolAwaitRestingState(config *Config, name, project, userAgent string, timeout time.Duration) (state string, err error) { +func containerNodePoolAwaitRestingState(config *transport_tpg.Config, name, project, userAgent string, timeout time.Duration) (state string, err error) { err = resource.Retry(timeout, func() *resource.RetryError { clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(name) if config.UserProjectOverride { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_node_pool_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool_migrate.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_node_pool_migrate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool_migrate.go index 02593c79dc..ad73437fad 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_node_pool_migrate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/resource_container_node_pool_migrate.go @@ -1,9 +1,12 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func resourceContainerNodePoolMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/state_util.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/state_util.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/state_util.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/state_util.go index bb79c126c5..a29a6a1ff4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/state_util.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/container/state_util.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package container // A StateType represents the specific type of resting state that a state value // is. diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_container_registry_image.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/data_source_container_registry_image.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_container_registry_image.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/data_source_container_registry_image.go index 4a28fccd88..fa75535424 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_container_registry_image.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/data_source_container_registry_image.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package containeranalysis import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleContainerImage() *schema.Resource { @@ -41,8 +45,8 @@ func DataSourceGoogleContainerImage() *schema.Resource { } func containerRegistryImageRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_container_registry_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/data_source_container_registry_repository.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_container_registry_repository.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/data_source_container_registry_repository.go index fe95c3db5a..fe9afcb2bd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_container_registry_repository.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/data_source_container_registry_repository.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package containeranalysis import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleContainerRepo() *schema.Resource { @@ -29,8 +33,8 @@ func DataSourceGoogleContainerRepo() *schema.Resource { } func containerRegistryRepoRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/iam_container_analysis_note.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/iam_container_analysis_note.go new file mode 100644 index 0000000000..1a43833204 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/iam_container_analysis_note.go @@ -0,0 +1,221 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package containeranalysis + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ContainerAnalysisNoteIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "note": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ContainerAnalysisNoteIamUpdater struct { + project string + note string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ContainerAnalysisNoteIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("note"); ok { + values["note"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/notes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("note").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ContainerAnalysisNoteIamUpdater{ + project: values["project"], + note: values["note"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("note", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting note: %s", err) + } + + return u, nil +} + +func ContainerAnalysisNoteIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/notes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ContainerAnalysisNoteIamUpdater{ + project: values["project"], + note: values["note"], + d: d, + Config: config, + } + if err := d.Set("note", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting note: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ContainerAnalysisNoteIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyNoteUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ContainerAnalysisNoteIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyNoteUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ContainerAnalysisNoteIamUpdater) qualifyNoteUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ContainerAnalysisBasePath}}%s:%s", fmt.Sprintf("projects/%s/notes/%s", u.project, u.note), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ContainerAnalysisNoteIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/notes/%s", u.project, u.note) +} + +func (u *ContainerAnalysisNoteIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-containeranalysis-note-%s", u.GetResourceId()) +} + +func (u *ContainerAnalysisNoteIamUpdater) DescribeResource() string { + return fmt.Sprintf("containeranalysis note %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_note.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_note.go new file mode 100644 index 0000000000..da3d5afbc8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_note.go @@ -0,0 +1,774 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package containeranalysis + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAnalysisNote() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAnalysisNoteCreate, + Read: resourceContainerAnalysisNoteRead, + Update: resourceContainerAnalysisNoteUpdate, + Delete: resourceContainerAnalysisNoteDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAnalysisNoteImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "attestation_authority": { + Type: schema.TypeList, + Required: true, + Description: `Note kind that represents a logical attestation "role" or "authority". +For example, an organization might have one AttestationAuthority for +"QA" and one for "build". This Note is intended to act strictly as a +grouping mechanism for the attached Occurrences (Attestations). This +grouping mechanism also provides a security boundary, since IAM ACLs +gate the ability for a principle to attach an Occurrence to a given +Note. It also provides a single point of lookup to find all attached +Attestation Occurrences, even if they don't all live in the same +project.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hint": { + Type: schema.TypeList, + Required: true, + Description: `This submessage provides human-readable hints about the purpose of +the AttestationAuthority. Because the name of a Note acts as its +resource reference, it is important to disambiguate the canonical +name of the Note (which might be a UUID for security purposes) +from "readable" names more suitable for debug output. Note that +these hints should NOT be used to look up AttestationAuthorities +in security sensitive contexts, such as when looking up +Attestations to verify.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "human_readable_name": { + Type: schema.TypeString, + Required: true, + Description: `The human readable name of this Attestation Authority, for +example "qa".`, + }, + }, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the note.`, + }, + "expiration_time": { + Type: schema.TypeString, + Optional: true, + Description: `Time of expiration for this note. Leave empty if note does not expire.`, + }, + "long_description": { + Type: schema.TypeString, + Optional: true, + Description: `A detailed description of the note`, + }, + "related_note_names": { + Type: schema.TypeSet, + Optional: true, + Description: `Names of other notes related to this note.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "related_url": { + Type: schema.TypeSet, + Optional: true, + Description: `URLs associated with this note and related metadata.`, + Elem: containeranalysisNoteRelatedUrlSchema(), + // Default schema.HashSchema is used. + }, + "short_description": { + Type: schema.TypeString, + Optional: true, + Description: `A one sentence description of the note.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time this note was created.`, + }, + "kind": { + Type: schema.TypeString, + Computed: true, + Description: `The type of analysis this note describes`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time this note was last updated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func containeranalysisNoteRelatedUrlSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: `Specific URL associated with the resource.`, + }, + "label": { + Type: schema.TypeString, + Optional: true, + Description: `Label to describe usage of the URL`, + }, + }, + } +} + +func resourceContainerAnalysisNoteCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandContainerAnalysisNoteName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + shortDescriptionProp, err := expandContainerAnalysisNoteShortDescription(d.Get("short_description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("short_description"); !tpgresource.IsEmptyValue(reflect.ValueOf(shortDescriptionProp)) && (ok || !reflect.DeepEqual(v, shortDescriptionProp)) { + obj["shortDescription"] = shortDescriptionProp + } + longDescriptionProp, err := expandContainerAnalysisNoteLongDescription(d.Get("long_description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("long_description"); !tpgresource.IsEmptyValue(reflect.ValueOf(longDescriptionProp)) && (ok || !reflect.DeepEqual(v, longDescriptionProp)) { + obj["longDescription"] = longDescriptionProp + } + relatedUrlProp, err := expandContainerAnalysisNoteRelatedUrl(d.Get("related_url"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("related_url"); !tpgresource.IsEmptyValue(reflect.ValueOf(relatedUrlProp)) && (ok || !reflect.DeepEqual(v, relatedUrlProp)) { + obj["relatedUrl"] = relatedUrlProp + } + expirationTimeProp, err := expandContainerAnalysisNoteExpirationTime(d.Get("expiration_time"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("expiration_time"); !tpgresource.IsEmptyValue(reflect.ValueOf(expirationTimeProp)) && (ok || !reflect.DeepEqual(v, expirationTimeProp)) { + obj["expirationTime"] = expirationTimeProp + } + relatedNoteNamesProp, err := expandContainerAnalysisNoteRelatedNoteNames(d.Get("related_note_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("related_note_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(relatedNoteNamesProp)) && (ok || !reflect.DeepEqual(v, relatedNoteNamesProp)) { + obj["relatedNoteNames"] = relatedNoteNamesProp + } + attestationAuthorityProp, err := expandContainerAnalysisNoteAttestationAuthority(d.Get("attestation_authority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attestation_authority"); !tpgresource.IsEmptyValue(reflect.ValueOf(attestationAuthorityProp)) && (ok || !reflect.DeepEqual(v, attestationAuthorityProp)) { + obj["attestationAuthority"] = attestationAuthorityProp + } + + obj, err = resourceContainerAnalysisNoteEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/notes/{{name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes?noteId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Note: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Note: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Note: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/notes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Note %q: %#v", d.Id(), res) + + return resourceContainerAnalysisNoteRead(d, meta) +} + +func resourceContainerAnalysisNoteRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Note: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ContainerAnalysisNote %q", d.Id())) + } + + res, err = resourceContainerAnalysisNoteDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ContainerAnalysisNote because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + + if err := d.Set("name", flattenContainerAnalysisNoteName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("short_description", flattenContainerAnalysisNoteShortDescription(res["shortDescription"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("long_description", flattenContainerAnalysisNoteLongDescription(res["longDescription"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("kind", flattenContainerAnalysisNoteKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("related_url", flattenContainerAnalysisNoteRelatedUrl(res["relatedUrl"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("expiration_time", flattenContainerAnalysisNoteExpirationTime(res["expirationTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("create_time", flattenContainerAnalysisNoteCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("update_time", flattenContainerAnalysisNoteUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("related_note_names", flattenContainerAnalysisNoteRelatedNoteNames(res["relatedNoteNames"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + if err := d.Set("attestation_authority", flattenContainerAnalysisNoteAttestationAuthority(res["attestationAuthority"], d, config)); err != nil { + return fmt.Errorf("Error reading Note: %s", err) + } + + return nil +} + +func resourceContainerAnalysisNoteUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Note: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + shortDescriptionProp, err := expandContainerAnalysisNoteShortDescription(d.Get("short_description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("short_description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, shortDescriptionProp)) { + obj["shortDescription"] = shortDescriptionProp + } + longDescriptionProp, err := expandContainerAnalysisNoteLongDescription(d.Get("long_description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("long_description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, longDescriptionProp)) { + obj["longDescription"] = longDescriptionProp + } + relatedUrlProp, err := expandContainerAnalysisNoteRelatedUrl(d.Get("related_url"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("related_url"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, relatedUrlProp)) { + obj["relatedUrl"] = relatedUrlProp + } + expirationTimeProp, err := expandContainerAnalysisNoteExpirationTime(d.Get("expiration_time"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("expiration_time"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, expirationTimeProp)) { + obj["expirationTime"] = expirationTimeProp + } + relatedNoteNamesProp, err := expandContainerAnalysisNoteRelatedNoteNames(d.Get("related_note_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("related_note_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, relatedNoteNamesProp)) { + obj["relatedNoteNames"] = relatedNoteNamesProp + } + attestationAuthorityProp, err := expandContainerAnalysisNoteAttestationAuthority(d.Get("attestation_authority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attestation_authority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attestationAuthorityProp)) { + obj["attestationAuthority"] = attestationAuthorityProp + } + + obj, err = resourceContainerAnalysisNoteEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/notes/{{name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Note %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("short_description") { + updateMask = append(updateMask, "shortDescription") + } + + if d.HasChange("long_description") { + updateMask = append(updateMask, "longDescription") + } + + if d.HasChange("related_url") { + updateMask = append(updateMask, "relatedUrl") + } + + if d.HasChange("expiration_time") { + updateMask = append(updateMask, "expirationTime") + } + + if d.HasChange("related_note_names") { + updateMask = append(updateMask, "relatedNoteNames") + } + + if d.HasChange("attestation_authority") { + updateMask = append(updateMask, "attestationAuthority") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Note %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Note %q: %#v", d.Id(), res) + } + + return resourceContainerAnalysisNoteRead(d, meta) +} + +func resourceContainerAnalysisNoteDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Note: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/notes/{{name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Note %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Note") + } + + log.Printf("[DEBUG] Finished deleting Note %q: %#v", d.Id(), res) + return nil +} + +func resourceContainerAnalysisNoteImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/notes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/notes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenContainerAnalysisNoteName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenContainerAnalysisNoteShortDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisNoteLongDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisNoteKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisNoteRelatedUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(containeranalysisNoteRelatedUrlSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "url": flattenContainerAnalysisNoteRelatedUrlUrl(original["url"], d, config), + "label": flattenContainerAnalysisNoteRelatedUrlLabel(original["label"], d, config), + }) + } + return transformed +} +func flattenContainerAnalysisNoteRelatedUrlUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisNoteRelatedUrlLabel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisNoteExpirationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisNoteCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisNoteUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisNoteRelatedNoteNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenContainerAnalysisNoteAttestationAuthority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hint"] = + flattenContainerAnalysisNoteAttestationAuthorityHint(original["hint"], d, config) + return []interface{}{transformed} +} +func flattenContainerAnalysisNoteAttestationAuthorityHint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["human_readable_name"] = + flattenContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(original["humanReadableName"], d, config) + return []interface{}{transformed} +} +func flattenContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandContainerAnalysisNoteName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisNoteShortDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisNoteLongDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisNoteRelatedUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrl, err := expandContainerAnalysisNoteRelatedUrlUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + transformedLabel, err := expandContainerAnalysisNoteRelatedUrlLabel(original["label"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["label"] = transformedLabel + } + + req = append(req, transformed) + } + return req, nil +} + +func expandContainerAnalysisNoteRelatedUrlUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisNoteRelatedUrlLabel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisNoteExpirationTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisNoteRelatedNoteNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandContainerAnalysisNoteAttestationAuthority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHint, err := expandContainerAnalysisNoteAttestationAuthorityHint(original["hint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hint"] = transformedHint + } + + return transformed, nil +} + +func expandContainerAnalysisNoteAttestationAuthorityHint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHumanReadableName, err := expandContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(original["human_readable_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHumanReadableName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["humanReadableName"] = transformedHumanReadableName + } + + return transformed, nil +} + +func expandContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceContainerAnalysisNoteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Field was renamed in GA API + obj["attestation"] = obj["attestationAuthority"] + delete(obj, "attestationAuthority") + + return obj, nil +} + +func resourceContainerAnalysisNoteDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Field was renamed in GA API + res["attestationAuthority"] = res["attestation"] + delete(res, "attestation") + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_note_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_note_sweeper.go new file mode 100644 index 0000000000..d3591f9215 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_note_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package containeranalysis + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ContainerAnalysisNote", testSweepContainerAnalysisNote) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepContainerAnalysisNote(region string) error { + resourceName := "ContainerAnalysisNote" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://containeranalysis.googleapis.com/v1/projects/{{project}}/notes?noteId={{name}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["notes"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://containeranalysis.googleapis.com/v1/projects/{{project}}/notes/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_occurrence.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_occurrence.go new file mode 100644 index 0000000000..33f2469745 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_occurrence.go @@ -0,0 +1,681 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package containeranalysis + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerAnalysisOccurrence() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAnalysisOccurrenceCreate, + Read: resourceContainerAnalysisOccurrenceRead, + Update: resourceContainerAnalysisOccurrenceUpdate, + Delete: resourceContainerAnalysisOccurrenceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAnalysisOccurrenceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "attestation": { + Type: schema.TypeList, + Required: true, + Description: `Occurrence that represents a single "attestation". The authenticity +of an attestation can be verified using the attached signature. +If the verifier trusts the public key of the signer, then verifying +the signature is sufficient to establish trust. In this circumstance, +the authority to which this attestation is attached is primarily +useful for lookup (how to find this attestation if you already +know the authority and artifact to be verified) and intent (for +which authority this attestation was intended to sign.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "serialized_payload": { + Type: schema.TypeString, + Required: true, + Description: `The serialized payload that is verified by one or +more signatures. A base64-encoded string.`, + }, + "signatures": { + Type: schema.TypeSet, + Required: true, + Description: `One or more signatures over serializedPayload. +Verifier implementations should consider this attestation +message verified if at least one signature verifies +serializedPayload. See Signature in common.proto for more +details on signature structure and verification.`, + Elem: containeranalysisOccurrenceAttestationSignaturesSchema(), + // Default schema.HashSchema is used. + }, + }, + }, + }, + "note_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The analysis note associated with this occurrence, in the form of +projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a +filter in list requests.`, + }, + "resource_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Immutable. A URI that represents the resource for which +the occurrence applies. For example, +https://gcr.io/project/image@sha256:123abc for a Docker image.`, + }, + "remediation": { + Type: schema.TypeString, + Optional: true, + Description: `A description of actions that can be taken to remedy the note.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the repository was created.`, + }, + "kind": { + Type: schema.TypeString, + Computed: true, + Description: `The note kind which explicitly denotes which of the occurrence +details are specified. This field can be used as a filter in list +requests.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the occurrence.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the repository was last updated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func containeranalysisOccurrenceAttestationSignaturesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_key_id": { + Type: schema.TypeString, + Required: true, + Description: `The identifier for the public key that verifies this +signature. MUST be an RFC3986 conformant +URI. * When possible, the key id should be an +immutable reference, such as a cryptographic digest. +Examples of valid values: + +* OpenPGP V4 public key fingerprint. See https://www.iana.org/assignments/uri-schemes/prov/openpgp4fpr + for more details on this scheme. + * 'openpgp4fpr:74FAF3B861BDA0870C7B6DEF607E48D2A663AEEA' +* RFC6920 digest-named SubjectPublicKeyInfo (digest of the DER serialization): + * "ni:///sha-256;cD9o9Cq6LG3jD0iKXqEi_vdjJGecm_iXkbqVoScViaU"`, + }, + "signature": { + Type: schema.TypeString, + Optional: true, + Description: `The content of the signature, an opaque bytestring. +The payload that this signature verifies MUST be +unambiguously provided with the Signature during +verification. A wrapper message might provide the +payload explicitly. Alternatively, a message might +have a canonical serialization that can always be +unambiguously computed to derive the payload.`, + }, + }, + } +} + +func resourceContainerAnalysisOccurrenceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + resourceUriProp, err := expandContainerAnalysisOccurrenceResourceUri(d.Get("resource_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceUriProp)) && (ok || !reflect.DeepEqual(v, resourceUriProp)) { + obj["resourceUri"] = resourceUriProp + } + noteNameProp, err := expandContainerAnalysisOccurrenceNoteName(d.Get("note_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("note_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(noteNameProp)) && (ok || !reflect.DeepEqual(v, noteNameProp)) { + obj["noteName"] = noteNameProp + } + remediationProp, err := expandContainerAnalysisOccurrenceRemediation(d.Get("remediation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("remediation"); !tpgresource.IsEmptyValue(reflect.ValueOf(remediationProp)) && (ok || !reflect.DeepEqual(v, remediationProp)) { + obj["remediation"] = remediationProp + } + attestationProp, err := expandContainerAnalysisOccurrenceAttestation(d.Get("attestation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attestation"); !tpgresource.IsEmptyValue(reflect.ValueOf(attestationProp)) && (ok || !reflect.DeepEqual(v, attestationProp)) { + obj["attestation"] = attestationProp + } + + obj, err = resourceContainerAnalysisOccurrenceEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{note_name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Occurrence: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Occurrence: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Occurrence: %s", err) + } + if err := d.Set("name", flattenContainerAnalysisOccurrenceName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/occurrences/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Occurrence %q: %#v", d.Id(), res) + + return resourceContainerAnalysisOccurrenceRead(d, meta) +} + +func resourceContainerAnalysisOccurrenceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Occurrence: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ContainerAnalysisOccurrence %q", d.Id())) + } + + res, err = resourceContainerAnalysisOccurrenceDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ContainerAnalysisOccurrence because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + + if err := d.Set("name", flattenContainerAnalysisOccurrenceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + if err := d.Set("resource_uri", flattenContainerAnalysisOccurrenceResourceUri(res["resourceUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + if err := d.Set("note_name", flattenContainerAnalysisOccurrenceNoteName(res["noteName"], d, config)); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + if err := d.Set("kind", flattenContainerAnalysisOccurrenceKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + if err := d.Set("remediation", flattenContainerAnalysisOccurrenceRemediation(res["remediation"], d, config)); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + if err := d.Set("create_time", flattenContainerAnalysisOccurrenceCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + if err := d.Set("update_time", flattenContainerAnalysisOccurrenceUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + if err := d.Set("attestation", flattenContainerAnalysisOccurrenceAttestation(res["attestation"], d, config)); err != nil { + return fmt.Errorf("Error reading Occurrence: %s", err) + } + + return nil +} + +func resourceContainerAnalysisOccurrenceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Occurrence: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + remediationProp, err := expandContainerAnalysisOccurrenceRemediation(d.Get("remediation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("remediation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, remediationProp)) { + obj["remediation"] = remediationProp + } + attestationProp, err := expandContainerAnalysisOccurrenceAttestation(d.Get("attestation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attestation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attestationProp)) { + obj["attestation"] = attestationProp + } + + obj, err = resourceContainerAnalysisOccurrenceUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "{{note_name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Occurrence %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("remediation") { + updateMask = append(updateMask, "remediation") + } + + if d.HasChange("attestation") { + updateMask = append(updateMask, "attestation") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Occurrence %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Occurrence %q: %#v", d.Id(), res) + } + + return resourceContainerAnalysisOccurrenceRead(d, meta) +} + +func resourceContainerAnalysisOccurrenceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Occurrence: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "{{note_name}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Occurrence %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Occurrence") + } + + log.Printf("[DEBUG] Finished deleting Occurrence %q: %#v", d.Id(), res) + return nil +} + +func resourceContainerAnalysisOccurrenceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/occurrences/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/occurrences/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenContainerAnalysisOccurrenceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenContainerAnalysisOccurrenceResourceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisOccurrenceNoteName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisOccurrenceKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisOccurrenceRemediation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisOccurrenceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisOccurrenceUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisOccurrenceAttestation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["serialized_payload"] = + flattenContainerAnalysisOccurrenceAttestationSerializedPayload(original["serializedPayload"], d, config) + transformed["signatures"] = + flattenContainerAnalysisOccurrenceAttestationSignatures(original["signatures"], d, config) + return []interface{}{transformed} +} +func flattenContainerAnalysisOccurrenceAttestationSerializedPayload(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisOccurrenceAttestationSignatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(containeranalysisOccurrenceAttestationSignaturesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "signature": flattenContainerAnalysisOccurrenceAttestationSignaturesSignature(original["signature"], d, config), + "public_key_id": flattenContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(original["publicKeyId"], d, config), + }) + } + return transformed +} +func flattenContainerAnalysisOccurrenceAttestationSignaturesSignature(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandContainerAnalysisOccurrenceResourceUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisOccurrenceNoteName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisOccurrenceRemediation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisOccurrenceAttestation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSerializedPayload, err := expandContainerAnalysisOccurrenceAttestationSerializedPayload(original["serialized_payload"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSerializedPayload); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serializedPayload"] = transformedSerializedPayload + } + + transformedSignatures, err := expandContainerAnalysisOccurrenceAttestationSignatures(original["signatures"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSignatures); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["signatures"] = transformedSignatures + } + + return transformed, nil +} + +func expandContainerAnalysisOccurrenceAttestationSerializedPayload(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisOccurrenceAttestationSignatures(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSignature, err := expandContainerAnalysisOccurrenceAttestationSignaturesSignature(original["signature"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSignature); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["signature"] = transformedSignature + } + + transformedPublicKeyId, err := expandContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(original["public_key_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicKeyId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicKeyId"] = transformedPublicKeyId + } + + req = append(req, transformed) + } + return req, nil +} + +func expandContainerAnalysisOccurrenceAttestationSignaturesSignature(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceContainerAnalysisOccurrenceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // encoder logic only in non-GA versions + + return obj, nil +} + +func resourceContainerAnalysisOccurrenceUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Note is required, even for PATCH + noteNameProp, err := expandContainerAnalysisOccurrenceNoteName(d.Get("note_name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("note_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(noteNameProp)) && (ok || !reflect.DeepEqual(v, noteNameProp)) { + obj["noteName"] = noteNameProp + } + + return resourceContainerAnalysisOccurrenceEncoder(d, meta, obj) +} + +func resourceContainerAnalysisOccurrenceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // encoder logic only in non-GA version + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_occurrence_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_occurrence_sweeper.go new file mode 100644 index 0000000000..181ccebf4f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_analysis_occurrence_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package containeranalysis + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ContainerAnalysisOccurrence", testSweepContainerAnalysisOccurrence) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepContainerAnalysisOccurrence(region string) error { + resourceName := "ContainerAnalysisOccurrence" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://containeranalysis.googleapis.com/v1/projects/{{project}}/occurrences", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["occurrences"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://containeranalysis.googleapis.com/v1/projects/{{project}}/occurrences/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_registry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_registry.go new file mode 100644 index 0000000000..abb9480b55 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeranalysis/resource_container_registry.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package containeranalysis + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceContainerRegistry() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerRegistryCreate, + Read: resourceContainerRegistryRead, + Delete: resourceContainerRegistryDelete, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, + Description: `The location of the registry. One of ASIA, EU, US or not specified. See the official documentation for more information on registry locations.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "bucket_self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceContainerRegistryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Project: %s", project) + + location := d.Get("location").(string) + log.Printf("[DEBUG] location: %s", location) + urlBase := "https://gcr.io/v2/token" + if location != "" { + urlBase = fmt.Sprintf("https://%s.gcr.io/v2/token", strings.ToLower(location)) + } + + // Performing a token handshake with the GCR API causes the backing bucket to create if it hasn't already. + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("%s?service=gcr.io&scope=repository:{{project}}/my-repo:push,pull", urlBase)) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + + if err != nil { + return err + } + return resourceContainerRegistryRead(d, meta) +} + +func resourceContainerRegistryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + location := d.Get("location").(string) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + name := "" + if location != "" { + name = fmt.Sprintf("%s.artifacts.%s.appspot.com", strings.ToLower(location), project) + } else { + name = fmt.Sprintf("artifacts.%s.appspot.com", project) + } + + res, err := config.NewStorageClient(userAgent).Buckets.Get(name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Container Registry Storage Bucket %q", name)) + } + log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) + + // Update the ID according to the bucket ID + if err := d.Set("bucket_self_link", res.SelfLink); err != nil { + return fmt.Errorf("Error setting bucket_self_link: %s", err) + } + + d.SetId(res.Id) + return nil +} + +func resourceContainerRegistryDelete(d *schema.ResourceData, meta interface{}) error { + // Don't delete the backing bucket as this is not a supported GCR action + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/container_attached_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/container_attached_operation.go new file mode 100644 index 0000000000..f8f5f290f0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/container_attached_operation.go @@ -0,0 +1,75 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package containerattached + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type ContainerAttachedOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *ContainerAttachedOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + + region := tpgresource.GetRegionFromRegionalSelfLink(w.CommonOperationWaiter.Op.Name) + + // Returns the proper get. + url := fmt.Sprintf("https://%s-gkemulticloud.googleapis.com/v1/%s", region, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createContainerAttachedWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*ContainerAttachedOperationWaiter, error) { + w := &ContainerAttachedOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func ContainerAttachedOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createContainerAttachedWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func ContainerAttachedOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createContainerAttachedWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/data_source_google_container_attached_install_manifest.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/data_source_google_container_attached_install_manifest.go new file mode 100644 index 0000000000..fec6ba611d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/data_source_google_container_attached_install_manifest.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package containerattached + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleContainerAttachedInstallManifest() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleContainerAttachedInstallManifestRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + }, + "location": { + Type: schema.TypeString, + Required: true, + }, + "cluster_id": { + Type: schema.TypeString, + Required: true, + }, + "platform_version": { + Type: schema.TypeString, + Required: true, + }, + "manifest": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleContainerAttachedInstallManifestRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + clusterId := d.Get("cluster_id").(string) + platformVersion := d.Get("platform_version").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + if len(location) == 0 { + return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}:generateAttachedClusterInstallManifest") + if err != nil { + return err + } + params := map[string]string{ + "attached_cluster_id": clusterId, + "platform_version": platformVersion, + } + url, err = transport_tpg.AddQueryParams(url, params) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + + if err := d.Set("manifest", res["manifest"]); err != nil { + return fmt.Errorf("Error setting manifest: %s", err) + } + + d.SetId(time.Now().UTC().String()) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/data_source_google_container_attached_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/data_source_google_container_attached_versions.go new file mode 100644 index 0000000000..374743530e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/data_source_google_container_attached_versions.go @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package containerattached + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleContainerAttachedVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleContainerAttachedVersionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + }, + "location": { + Type: schema.TypeString, + Required: true, + }, + "valid_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleContainerAttachedVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + if len(location) == 0 { + return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedServerConfig") + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + var validVersions []string + for _, v := range res["validVersions"].([]interface{}) { + vm := v.(map[string]interface{}) + validVersions = append(validVersions, vm["version"].(string)) + } + if err := d.Set("valid_versions", validVersions); err != nil { + return err + } + + d.SetId(time.Now().UTC().String()) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_attached_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/resource_container_attached_cluster.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_attached_cluster.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/resource_container_attached_cluster.go index d5c05b99b5..bf46f0735d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_attached_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerattached/resource_container_attached_cluster.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package containerattached import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func suppressAttachedClustersLoggingConfigDiff(_, old, new string, d *schema.ResourceData) bool { @@ -29,7 +36,7 @@ func suppressAttachedClustersLoggingConfigDiff(_, old, new string, d *schema.Res return true } _, n := d.GetChange("logging_config.0.component_config.0.enable_components") - if isEmptyValue(reflect.ValueOf(n)) { + if tpgresource.IsEmptyValue(reflect.ValueOf(n)) { return true } return false @@ -71,7 +78,7 @@ func ResourceContainerAttachedCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateRegexp(`^projects/[0-9]+$`), + ValidateFunc: verify.ValidateRegexp(`^projects/[0-9]+$`), Description: `The number of the Fleet host project where this cluster will be registered.`, }, "membership": { @@ -194,7 +201,7 @@ than 255 UTF-8 encoded bytes.`, Description: `The components to be enabled. Possible values: ["SYSTEM_COMPONENTS", "WORKLOADS"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"SYSTEM_COMPONENTS", "WORKLOADS"}), + ValidateFunc: verify.ValidateEnum([]string{"SYSTEM_COMPONENTS", "WORKLOADS"}), }, }, }, @@ -326,8 +333,8 @@ the Workload Identity Pool.`, } func resourceContainerAttachedClusterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -336,43 +343,43 @@ func resourceContainerAttachedClusterCreate(d *schema.ResourceData, meta interfa nameProp, err := expandContainerAttachedClusterName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } descriptionProp, err := expandContainerAttachedClusterDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } oidcConfigProp, err := expandContainerAttachedClusterOidcConfig(d.Get("oidc_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("oidc_config"); !isEmptyValue(reflect.ValueOf(oidcConfigProp)) && (ok || !reflect.DeepEqual(v, oidcConfigProp)) { + } else if v, ok := d.GetOkExists("oidc_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(oidcConfigProp)) && (ok || !reflect.DeepEqual(v, oidcConfigProp)) { obj["oidcConfig"] = oidcConfigProp } platformVersionProp, err := expandContainerAttachedClusterPlatformVersion(d.Get("platform_version"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("platform_version"); !isEmptyValue(reflect.ValueOf(platformVersionProp)) && (ok || !reflect.DeepEqual(v, platformVersionProp)) { + } else if v, ok := d.GetOkExists("platform_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(platformVersionProp)) && (ok || !reflect.DeepEqual(v, platformVersionProp)) { obj["platformVersion"] = platformVersionProp } distributionProp, err := expandContainerAttachedClusterDistribution(d.Get("distribution"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("distribution"); !isEmptyValue(reflect.ValueOf(distributionProp)) && (ok || !reflect.DeepEqual(v, distributionProp)) { + } else if v, ok := d.GetOkExists("distribution"); !tpgresource.IsEmptyValue(reflect.ValueOf(distributionProp)) && (ok || !reflect.DeepEqual(v, distributionProp)) { obj["distribution"] = distributionProp } fleetProp, err := expandContainerAttachedClusterFleet(d.Get("fleet"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fleet"); !isEmptyValue(reflect.ValueOf(fleetProp)) && (ok || !reflect.DeepEqual(v, fleetProp)) { + } else if v, ok := d.GetOkExists("fleet"); !tpgresource.IsEmptyValue(reflect.ValueOf(fleetProp)) && (ok || !reflect.DeepEqual(v, fleetProp)) { obj["fleet"] = fleetProp } annotationsProp, err := expandContainerAttachedClusterAnnotations(d.Get("annotations"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("annotations"); !isEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { obj["annotations"] = annotationsProp } loggingConfigProp, err := expandContainerAttachedClusterLoggingConfig(d.Get("logging_config"), d, config) @@ -384,17 +391,17 @@ func resourceContainerAttachedClusterCreate(d *schema.ResourceData, meta interfa authorizationProp, err := expandContainerAttachedClusterAuthorization(d.Get("authorization"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("authorization"); !isEmptyValue(reflect.ValueOf(authorizationProp)) && (ok || !reflect.DeepEqual(v, authorizationProp)) { + } else if v, ok := d.GetOkExists("authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorizationProp)) && (ok || !reflect.DeepEqual(v, authorizationProp)) { obj["authorization"] = authorizationProp } monitoringConfigProp, err := expandContainerAttachedClusterMonitoringConfig(d.Get("monitoring_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("monitoring_config"); !isEmptyValue(reflect.ValueOf(monitoringConfigProp)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { + } else if v, ok := d.GetOkExists("monitoring_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(monitoringConfigProp)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { obj["monitoringConfig"] = monitoringConfigProp } - url, err := replaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters?attached_cluster_id={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters?attached_cluster_id={{name}}") if err != nil { return err } @@ -402,24 +409,32 @@ func resourceContainerAttachedClusterCreate(d *schema.ResourceData, meta interfa log.Printf("[DEBUG] Creating new Cluster: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Cluster: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Cluster: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -443,7 +458,7 @@ func resourceContainerAttachedClusterCreate(d *schema.ResourceData, meta interfa } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -455,33 +470,39 @@ func resourceContainerAttachedClusterCreate(d *schema.ResourceData, meta interfa } func resourceContainerAttachedClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Cluster: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ContainerAttachedCluster %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ContainerAttachedCluster %q", d.Id())) } // Explicitly set virtual fields to default values if unset @@ -556,15 +577,15 @@ func resourceContainerAttachedClusterRead(d *schema.ResourceData, meta interface } func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Cluster: %s", err) } @@ -574,31 +595,31 @@ func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interfa descriptionProp, err := expandContainerAttachedClusterDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } oidcConfigProp, err := expandContainerAttachedClusterOidcConfig(d.Get("oidc_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("oidc_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oidcConfigProp)) { + } else if v, ok := d.GetOkExists("oidc_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oidcConfigProp)) { obj["oidcConfig"] = oidcConfigProp } platformVersionProp, err := expandContainerAttachedClusterPlatformVersion(d.Get("platform_version"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("platform_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, platformVersionProp)) { + } else if v, ok := d.GetOkExists("platform_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, platformVersionProp)) { obj["platformVersion"] = platformVersionProp } fleetProp, err := expandContainerAttachedClusterFleet(d.Get("fleet"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fleet"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fleetProp)) { + } else if v, ok := d.GetOkExists("fleet"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fleetProp)) { obj["fleet"] = fleetProp } annotationsProp, err := expandContainerAttachedClusterAnnotations(d.Get("annotations"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("annotations"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { obj["annotations"] = annotationsProp } loggingConfigProp, err := expandContainerAttachedClusterLoggingConfig(d.Get("logging_config"), d, config) @@ -610,17 +631,17 @@ func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interfa authorizationProp, err := expandContainerAttachedClusterAuthorization(d.Get("authorization"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("authorization"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorizationProp)) { + } else if v, ok := d.GetOkExists("authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorizationProp)) { obj["authorization"] = authorizationProp } monitoringConfigProp, err := expandContainerAttachedClusterMonitoringConfig(d.Get("monitoring_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("monitoring_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { + } else if v, ok := d.GetOkExists("monitoring_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { obj["monitoringConfig"] = monitoringConfigProp } - url, err := replaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") if err != nil { return err } @@ -659,9 +680,9 @@ func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interfa if d.HasChange("monitoring_config") { updateMask = append(updateMask, "monitoringConfig") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } @@ -684,17 +705,25 @@ func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interfa newUpdateMask = append(newUpdateMask, mask) } // Overwrite the previously set mask. - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Cluster %q: %s", d.Id(), err) @@ -714,21 +743,21 @@ func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interfa } func resourceContainerAttachedClusterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Cluster: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") if err != nil { return err } @@ -736,7 +765,7 @@ func resourceContainerAttachedClusterDelete(d *schema.ResourceData, meta interfa var obj map[string]interface{} if v, ok := d.GetOk("deletion_policy"); ok { if v == "DELETE_IGNORE_ERRORS" { - url, err = addQueryParams(url, map[string]string{"ignore_errors": "true"}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"ignore_errors": "true"}) if err != nil { return err } @@ -745,13 +774,21 @@ func resourceContainerAttachedClusterDelete(d *schema.ResourceData, meta interfa log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Cluster") + return transport_tpg.HandleNotFoundError(err, d, "Cluster") } err = ContainerAttachedOperationWaitTime( @@ -767,8 +804,8 @@ func resourceContainerAttachedClusterDelete(d *schema.ResourceData, meta interfa } func resourceContainerAttachedClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/attachedClusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -777,7 +814,7 @@ func resourceContainerAttachedClusterImport(d *schema.ResourceData, meta interfa } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -791,18 +828,18 @@ func resourceContainerAttachedClusterImport(d *schema.ResourceData, meta interfa return []*schema.ResourceData{d}, nil } -func flattenContainerAttachedClusterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func flattenContainerAttachedClusterDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterOidcConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterOidcConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -817,27 +854,27 @@ func flattenContainerAttachedClusterOidcConfig(v interface{}, d *schema.Resource flattenContainerAttachedClusterOidcConfigJwks(original["jwks"], d, config) return []interface{}{transformed} } -func flattenContainerAttachedClusterOidcConfigIssuerUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterOidcConfigIssuerUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterOidcConfigJwks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterOidcConfigJwks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterPlatformVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterPlatformVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterDistribution(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterDistribution(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterClusterRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterClusterRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterFleet(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterFleet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -852,43 +889,43 @@ func flattenContainerAttachedClusterFleet(v interface{}, d *schema.ResourceData, flattenContainerAttachedClusterFleetProject(original["project"], d, config) return []interface{}{transformed} } -func flattenContainerAttachedClusterFleetMembership(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterFleetMembership(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterFleetProject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterFleetProject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterReconciling(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterKubernetesVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterKubernetesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterAnnotations(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterWorkloadIdentityConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterWorkloadIdentityConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -905,19 +942,19 @@ func flattenContainerAttachedClusterWorkloadIdentityConfig(v interface{}, d *sch flattenContainerAttachedClusterWorkloadIdentityConfigWorkloadPool(original["workloadPool"], d, config) return []interface{}{transformed} } -func flattenContainerAttachedClusterWorkloadIdentityConfigIdentityProvider(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterWorkloadIdentityConfigIdentityProvider(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterWorkloadIdentityConfigIssuerUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterWorkloadIdentityConfigIssuerUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterWorkloadIdentityConfigWorkloadPool(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterWorkloadIdentityConfigWorkloadPool(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterLoggingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterLoggingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -927,7 +964,7 @@ func flattenContainerAttachedClusterLoggingConfig(v interface{}, d *schema.Resou flattenContainerAttachedClusterLoggingConfigComponentConfig(original["componentConfig"], d, config) return []interface{}{transformed} } -func flattenContainerAttachedClusterLoggingConfigComponentConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterLoggingConfigComponentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -937,11 +974,11 @@ func flattenContainerAttachedClusterLoggingConfigComponentConfig(v interface{}, flattenContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(original["enableComponents"], d, config) return []interface{}{transformed} } -func flattenContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenContainerAttachedClusterErrors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -959,25 +996,28 @@ func flattenContainerAttachedClusterErrors(v interface{}, d *schema.ResourceData } return transformed } -func flattenContainerAttachedClusterErrorsMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterErrorsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } // The custom expander transforms input into something like this: -// authorization { -// admin_users [ -// { username = "user1" }, -// { username = "user2" } -// ] -// } +// +// authorization { +// admin_users [ +// { username = "user1" }, +// { username = "user2" } +// ] +// } +// // The custom flattener transforms input back into something like this: -// authorization { -// admin_users = [ -// "user1", -// "user2" -// ] -// } -func flattenContainerAttachedClusterAuthorization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +// +// authorization { +// admin_users = [ +// "user1", +// "user2" +// ] +// } +func flattenContainerAttachedClusterAuthorization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -994,7 +1034,7 @@ func flattenContainerAttachedClusterAuthorization(v interface{}, d *schema.Resou return []interface{}{transformed} } -func flattenContainerAttachedClusterMonitoringConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterMonitoringConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1004,7 +1044,7 @@ func flattenContainerAttachedClusterMonitoringConfig(v interface{}, d *schema.Re flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(original["managedPrometheusConfig"], d, config) return []interface{}{transformed} } -func flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1014,19 +1054,19 @@ func flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(v in flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(original["enabled"], d, config) return []interface{}{transformed} } -func flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandContainerAttachedClusterName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandContainerAttachedClusterDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandContainerAttachedClusterOidcConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterOidcConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1038,37 +1078,37 @@ func expandContainerAttachedClusterOidcConfig(v interface{}, d TerraformResource transformedIssuerUrl, err := expandContainerAttachedClusterOidcConfigIssuerUrl(original["issuer_url"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIssuerUrl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIssuerUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["issuerUrl"] = transformedIssuerUrl } transformedJwks, err := expandContainerAttachedClusterOidcConfigJwks(original["jwks"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedJwks); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedJwks); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["jwks"] = transformedJwks } return transformed, nil } -func expandContainerAttachedClusterOidcConfigIssuerUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterOidcConfigIssuerUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandContainerAttachedClusterOidcConfigJwks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterOidcConfigJwks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandContainerAttachedClusterPlatformVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterPlatformVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandContainerAttachedClusterDistribution(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterDistribution(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandContainerAttachedClusterFleet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterFleet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1080,29 +1120,29 @@ func expandContainerAttachedClusterFleet(v interface{}, d TerraformResourceData, transformedMembership, err := expandContainerAttachedClusterFleetMembership(original["membership"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMembership); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMembership); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["membership"] = transformedMembership } transformedProject, err := expandContainerAttachedClusterFleetProject(original["project"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["project"] = transformedProject } return transformed, nil } -func expandContainerAttachedClusterFleetMembership(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterFleetMembership(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandContainerAttachedClusterFleetProject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterFleetProject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandContainerAttachedClusterAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandContainerAttachedClusterAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1113,7 +1153,7 @@ func expandContainerAttachedClusterAnnotations(v interface{}, d TerraformResourc return m, nil } -func expandContainerAttachedClusterLoggingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterLoggingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { transformed := make(map[string]interface{}) @@ -1133,7 +1173,7 @@ func expandContainerAttachedClusterLoggingConfig(v interface{}, d TerraformResou return transformed, nil } -func expandContainerAttachedClusterLoggingConfigComponentConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterLoggingConfigComponentConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1157,7 +1197,7 @@ func expandContainerAttachedClusterLoggingConfigComponentConfig(v interface{}, d return transformed, nil } -func expandContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1166,20 +1206,23 @@ type attachedClusterUser struct { } // The custom expander transforms input into something like this: -// authorization { -// admin_users [ -// { username = "user1" }, -// { username = "user2" } -// ] -// } +// +// authorization { +// admin_users [ +// { username = "user1" }, +// { username = "user2" } +// ] +// } +// // The custom flattener transforms input back into something like this: -// authorization { -// admin_users = [ -// "user1", -// "user2" -// ] -// } -func expandContainerAttachedClusterAuthorization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +// +// authorization { +// admin_users = [ +// "user1", +// "user2" +// ] +// } +func expandContainerAttachedClusterAuthorization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1196,7 +1239,7 @@ func expandContainerAttachedClusterAuthorization(v interface{}, d TerraformResou return transformed, nil } -func expandContainerAttachedClusterMonitoringConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterMonitoringConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1213,14 +1256,14 @@ func expandContainerAttachedClusterMonitoringConfig(v interface{}, d TerraformRe transformedManagedPrometheusConfig, err := expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(original["managed_prometheus_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedManagedPrometheusConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedManagedPrometheusConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["managedPrometheusConfig"] = transformedManagedPrometheusConfig } return transformed, nil } -func expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1237,13 +1280,13 @@ func expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(v int transformedEnabled, err := expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(original["enabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enabled"] = transformedEnabled } return transformed, nil } -func expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/data_source_google_container_aws_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/data_source_google_container_aws_versions.go new file mode 100644 index 0000000000..bf495bf18a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/data_source_google_container_aws_versions.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package containeraws + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleContainerAwsVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleContainerAwsVersionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + }, + "location": { + Type: schema.TypeString, + Optional: true, + }, + "valid_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "supported_regions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleContainerAwsVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + if len(location) == 0 { + return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAwsBasePath}}projects/{{project}}/locations/{{location}}/awsServerConfig") + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + if err := d.Set("supported_regions", res["supportedAwsRegions"]); err != nil { + return err + } + var validVersions []string + for _, v := range res["validVersions"].([]interface{}) { + vm := v.(map[string]interface{}) + validVersions = append(validVersions, vm["version"].(string)) + } + if err := d.Set("valid_versions", validVersions); err != nil { + return err + } + + d.SetId(time.Now().UTC().String()) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_aws_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_cluster.go similarity index 89% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_aws_cluster.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_cluster.go index 8f547f843d..2c4b1024b3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_aws_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_cluster.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package containeraws import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceContainerAwsCluster() *schema.Resource { @@ -94,7 +101,6 @@ func ResourceContainerAwsCluster() *schema.Resource { "networking": { Type: schema.TypeList, Required: true, - ForceNew: true, Description: "Cluster-wide networking configuration.", MaxItems: 1, Elem: ContainerAwsClusterNetworkingSchema(), @@ -118,7 +124,7 @@ func ResourceContainerAwsCluster() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -376,6 +382,14 @@ func ContainerAwsClusterControlPlaneMainVolumeSchema() *schema.Resource { Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3.", + }, + "volume_type": { Type: schema.TypeString, Computed: true, @@ -428,6 +442,13 @@ func ContainerAwsClusterControlPlaneRootVolumeSchema() *schema.Resource { Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3.", + }, + "volume_type": { Type: schema.TypeString, Computed: true, @@ -458,7 +479,7 @@ func ContainerAwsClusterFleetSchema() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The number of the Fleet host project where this cluster will be registered.", }, @@ -496,6 +517,12 @@ func ContainerAwsClusterNetworkingSchema() *schema.Resource { ForceNew: true, Description: "The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation.", }, + + "per_node_pool_sg_rules_disabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Disable the per node pool subnet security group rules on the control plane security group. When set to true, you must also provide one or more security groups that ensure node pools are able to send requests to the control plane on TCP/443 and TCP/8132. Failure to do so may result in unavailable node pools.", + }, }, } } @@ -525,8 +552,8 @@ func ContainerAwsClusterWorkloadIdentityConfigSchema() *schema.Resource { } func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -539,7 +566,7 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), Networking: expandContainerAwsClusterNetworking(d.Get("networking")), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), Description: dcl.String(d.Get("description").(string)), Project: dcl.String(project), } @@ -549,18 +576,18 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -582,8 +609,8 @@ func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) } func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -596,22 +623,22 @@ func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) e Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), Networking: expandContainerAwsClusterNetworking(d.Get("networking")), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), Description: dcl.String(d.Get("description").(string)), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -620,7 +647,7 @@ func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) e res, err := client.GetCluster(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAwsCluster %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("authorization", flattenContainerAwsClusterAuthorization(res.Authorization)); err != nil { @@ -681,8 +708,8 @@ func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) e return nil } func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -695,23 +722,23 @@ func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), Networking: expandContainerAwsClusterNetworking(d.Get("networking")), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), Description: dcl.String(d.Get("description").(string)), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -733,8 +760,8 @@ func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) } func resourceContainerAwsClusterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -747,23 +774,23 @@ func resourceContainerAwsClusterDelete(d *schema.ResourceData, meta interface{}) Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), Networking: expandContainerAwsClusterNetworking(d.Get("networking")), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), Description: dcl.String(d.Get("description").(string)), Project: dcl.String(project), } log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -778,9 +805,9 @@ func resourceContainerAwsClusterDelete(d *schema.ResourceData, meta interface{}) } func resourceContainerAwsClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/awsClusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -789,7 +816,7 @@ func resourceContainerAwsClusterImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -893,15 +920,15 @@ func expandContainerAwsClusterControlPlane(o interface{}) *containeraws.ClusterC ConfigEncryption: expandContainerAwsClusterControlPlaneConfigEncryption(obj["config_encryption"]), DatabaseEncryption: expandContainerAwsClusterControlPlaneDatabaseEncryption(obj["database_encryption"]), IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), - SubnetIds: expandStringArray(obj["subnet_ids"]), + SubnetIds: tpgdclresource.ExpandStringArray(obj["subnet_ids"]), Version: dcl.String(obj["version"].(string)), InstanceType: dcl.StringOrNil(obj["instance_type"].(string)), MainVolume: expandContainerAwsClusterControlPlaneMainVolume(obj["main_volume"]), ProxyConfig: expandContainerAwsClusterControlPlaneProxyConfig(obj["proxy_config"]), RootVolume: expandContainerAwsClusterControlPlaneRootVolume(obj["root_volume"]), - SecurityGroupIds: expandStringArray(obj["security_group_ids"]), + SecurityGroupIds: tpgdclresource.ExpandStringArray(obj["security_group_ids"]), SshConfig: expandContainerAwsClusterControlPlaneSshConfig(obj["ssh_config"]), - Tags: checkStringMap(obj["tags"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), } } @@ -1022,6 +1049,7 @@ func expandContainerAwsClusterControlPlaneMainVolume(o interface{}) *containeraw Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), VolumeType: containeraws.ClusterControlPlaneMainVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), } } @@ -1034,6 +1062,7 @@ func flattenContainerAwsClusterControlPlaneMainVolume(obj *containeraws.ClusterC "iops": obj.Iops, "kms_key_arn": obj.KmsKeyArn, "size_gib": obj.SizeGib, + "throughput": obj.Throughput, "volume_type": obj.VolumeType, } @@ -1082,6 +1111,7 @@ func expandContainerAwsClusterControlPlaneRootVolume(o interface{}) *containeraw Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), VolumeType: containeraws.ClusterControlPlaneRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), } } @@ -1094,6 +1124,7 @@ func flattenContainerAwsClusterControlPlaneRootVolume(obj *containeraws.ClusterC "iops": obj.Iops, "kms_key_arn": obj.KmsKeyArn, "size_gib": obj.SizeGib, + "throughput": obj.Throughput, "volume_type": obj.VolumeType, } @@ -1164,9 +1195,10 @@ func expandContainerAwsClusterNetworking(o interface{}) *containeraws.ClusterNet } obj := objArr[0].(map[string]interface{}) return &containeraws.ClusterNetworking{ - PodAddressCidrBlocks: expandStringArray(obj["pod_address_cidr_blocks"]), - ServiceAddressCidrBlocks: expandStringArray(obj["service_address_cidr_blocks"]), - VPCId: dcl.String(obj["vpc_id"].(string)), + PodAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["service_address_cidr_blocks"]), + VPCId: dcl.String(obj["vpc_id"].(string)), + PerNodePoolSgRulesDisabled: dcl.Bool(obj["per_node_pool_sg_rules_disabled"].(bool)), } } @@ -1175,9 +1207,10 @@ func flattenContainerAwsClusterNetworking(obj *containeraws.ClusterNetworking) i return nil } transformed := map[string]interface{}{ - "pod_address_cidr_blocks": obj.PodAddressCidrBlocks, - "service_address_cidr_blocks": obj.ServiceAddressCidrBlocks, - "vpc_id": obj.VPCId, + "pod_address_cidr_blocks": obj.PodAddressCidrBlocks, + "service_address_cidr_blocks": obj.ServiceAddressCidrBlocks, + "vpc_id": obj.VPCId, + "per_node_pool_sg_rules_disabled": obj.PerNodePoolSgRulesDisabled, } return []interface{}{transformed} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_cluster_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_cluster_sweeper.go new file mode 100644 index 0000000000..08e62e4608 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_cluster_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package containeraws + +import ( + "context" + "log" + "testing" + + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ContainerAwsCluster", testSweepContainerAwsCluster) +} + +func testSweepContainerAwsCluster(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAwsCluster") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLContainerAwsClient(config, config.UserAgent, "", 0) + err = client.DeleteAllCluster(context.Background(), d["project"], d["location"], isDeletableContainerAwsCluster) + if err != nil { + return err + } + return nil +} + +func isDeletableContainerAwsCluster(r *containeraws.Cluster) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_aws_node_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_node_pool.go similarity index 89% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_aws_node_pool.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_node_pool.go index 7abd907df4..0f5e123aa9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_aws_node_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containeraws/resource_container_aws_node_pool.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package containeraws import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceContainerAwsNodePool() *schema.Resource { @@ -57,7 +64,7 @@ func ResourceContainerAwsNodePool() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The awsCluster for the resource", }, @@ -117,7 +124,7 @@ func ResourceContainerAwsNodePool() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -341,6 +348,13 @@ func ContainerAwsNodePoolConfigRootVolumeSchema() *schema.Resource { Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: "Optional. The throughput to provision for the volume, in MiB/s. Only valid if the volume type is GP3.", + }, + "volume_type": { Type: schema.TypeString, Computed: true, @@ -404,8 +418,8 @@ func ContainerAwsNodePoolMaxPodsConstraintSchema() *schema.Resource { } func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -419,7 +433,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} Name: dcl.String(d.Get("name").(string)), SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), Project: dcl.String(project), } @@ -428,18 +442,18 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -461,8 +475,8 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} } func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -476,21 +490,21 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) Name: dcl.String(d.Get("name").(string)), SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -499,7 +513,7 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) res, err := client.GetNodePool(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAwsNodePool %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("autoscaling", flattenContainerAwsNodePoolAutoscaling(res.Autoscaling)); err != nil { @@ -554,8 +568,8 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) return nil } func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -569,22 +583,22 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{} Name: dcl.String(d.Get("name").(string)), SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -606,8 +620,8 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{} } func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -621,22 +635,22 @@ func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{} Name: dcl.String(d.Get("name").(string)), SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), Project: dcl.String(project), } log.Printf("[DEBUG] Deleting NodePool %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -651,9 +665,9 @@ func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{} } func resourceContainerAwsNodePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/awsClusters/(?P[^/]+)/awsNodePools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -662,7 +676,7 @@ func resourceContainerAwsNodePoolImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -713,12 +727,12 @@ func expandContainerAwsNodePoolConfig(o interface{}) *containeraws.NodePoolConfi IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), AutoscalingMetricsCollection: expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(obj["autoscaling_metrics_collection"]), InstanceType: dcl.StringOrNil(obj["instance_type"].(string)), - Labels: checkStringMap(obj["labels"]), + Labels: tpgresource.CheckStringMap(obj["labels"]), ProxyConfig: expandContainerAwsNodePoolConfigProxyConfig(obj["proxy_config"]), RootVolume: expandContainerAwsNodePoolConfigRootVolume(obj["root_volume"]), - SecurityGroupIds: expandStringArray(obj["security_group_ids"]), + SecurityGroupIds: tpgdclresource.ExpandStringArray(obj["security_group_ids"]), SshConfig: expandContainerAwsNodePoolConfigSshConfig(obj["ssh_config"]), - Tags: checkStringMap(obj["tags"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), Taints: expandContainerAwsNodePoolConfigTaintsArray(obj["taints"]), } } @@ -782,7 +796,7 @@ func expandContainerAwsNodePoolConfigAutoscalingMetricsCollection(o interface{}) obj := objArr[0].(map[string]interface{}) return &containeraws.NodePoolConfigAutoscalingMetricsCollection{ Granularity: dcl.String(obj["granularity"].(string)), - Metrics: expandStringArray(obj["metrics"]), + Metrics: tpgdclresource.ExpandStringArray(obj["metrics"]), } } @@ -840,6 +854,7 @@ func expandContainerAwsNodePoolConfigRootVolume(o interface{}) *containeraws.Nod Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + Throughput: dcl.Int64OrNil(int64(obj["throughput"].(int))), VolumeType: containeraws.NodePoolConfigRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), } } @@ -852,6 +867,7 @@ func flattenContainerAwsNodePoolConfigRootVolume(obj *containeraws.NodePoolConfi "iops": obj.Iops, "kms_key_arn": obj.KmsKeyArn, "size_gib": obj.SizeGib, + "throughput": obj.Throughput, "volume_type": obj.VolumeType, } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/data_source_google_container_azure_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/data_source_google_container_azure_versions.go new file mode 100644 index 0000000000..91a19101d9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/data_source_google_container_azure_versions.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package containerazure + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleContainerAzureVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleContainerAzureVersionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + }, + "location": { + Type: schema.TypeString, + Optional: true, + }, + "valid_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "supported_regions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleContainerAzureVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + if len(location) == 0 { + return fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAzureBasePath}}projects/{{project}}/locations/{{location}}/azureServerConfig") + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + if err := d.Set("supported_regions", res["supportedAzureRegions"]); err != nil { + return err + } + var validVersions []string + for _, v := range res["validVersions"].([]interface{}) { + vm := v.(map[string]interface{}) + validVersions = append(validVersions, vm["version"].(string)) + } + if err := d.Set("valid_versions", validVersions); err != nil { + return err + } + + d.SetId(time.Now().UTC().String()) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_client.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_client.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_client.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_client.go index d795f95f28..d63114a4ee 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_client.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package containerazure import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceContainerAzureClient() *schema.Resource { @@ -76,7 +83,7 @@ func ResourceContainerAzureClient() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -102,8 +109,8 @@ func ResourceContainerAzureClient() *schema.Resource { } func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -121,18 +128,18 @@ func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -154,8 +161,8 @@ func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{} } func resourceContainerAzureClientRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -168,17 +175,17 @@ func resourceContainerAzureClientRead(d *schema.ResourceData, meta interface{}) Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -187,7 +194,7 @@ func resourceContainerAzureClientRead(d *schema.ResourceData, meta interface{}) res, err := client.GetClient(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAzureClient %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("application_id", res.ApplicationId); err != nil { @@ -219,8 +226,8 @@ func resourceContainerAzureClientRead(d *schema.ResourceData, meta interface{}) } func resourceContainerAzureClientDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -234,17 +241,17 @@ func resourceContainerAzureClientDelete(d *schema.ResourceData, meta interface{} } log.Printf("[DEBUG] Deleting Client %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -259,9 +266,9 @@ func resourceContainerAzureClientDelete(d *schema.ResourceData, meta interface{} } func resourceContainerAzureClientImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClients/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -270,7 +277,7 @@ func resourceContainerAzureClientImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClients/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClients/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_client_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_client_sweeper.go new file mode 100644 index 0000000000..ae8f4f0a89 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_client_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package containerazure + +import ( + "context" + "log" + "testing" + + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ContainerAzureClient", testSweepContainerAzureClient) +} + +func testSweepContainerAzureClient(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAzureClient") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, "", 0) + err = client.DeleteAllClient(context.Background(), d["project"], d["location"], isDeletableContainerAzureClient) + if err != nil { + return err + } + return nil +} + +func isDeletableContainerAzureClient(r *containerazure.AzureClient) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_cluster.go similarity index 92% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_cluster.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_cluster.go index 8cae6faf19..00c30fa446 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_cluster.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package containerazure import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceContainerAzureCluster() *schema.Resource { @@ -127,7 +134,7 @@ func ResourceContainerAzureCluster() *schema.Resource { "client": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names.", ConflictsWith: []string{"azure_services_authentication"}, }, @@ -143,7 +150,7 @@ func ResourceContainerAzureCluster() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -413,7 +420,7 @@ func ContainerAzureClusterFleetSchema() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The number of the Fleet host project where this cluster will be registered.", }, @@ -498,8 +505,8 @@ func ContainerAzureClusterWorkloadIdentityConfigSchema() *schema.Resource { } func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -513,7 +520,7 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ Name: dcl.String(d.Get("name").(string)), Networking: expandContainerAzureClusterNetworking(d.Get("networking")), ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), Client: dcl.String(d.Get("client").(string)), Description: dcl.String(d.Get("description").(string)), @@ -525,18 +532,18 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -558,8 +565,8 @@ func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{ } func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -573,24 +580,24 @@ func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) Name: dcl.String(d.Get("name").(string)), Networking: expandContainerAzureClusterNetworking(d.Get("networking")), ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), Client: dcl.String(d.Get("client").(string)), Description: dcl.String(d.Get("description").(string)), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -599,7 +606,7 @@ func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) res, err := client.GetCluster(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAzureCluster %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("authorization", flattenContainerAzureClusterAuthorization(res.Authorization)); err != nil { @@ -669,8 +676,8 @@ func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) return nil } func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -684,25 +691,25 @@ func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{ Name: dcl.String(d.Get("name").(string)), Networking: expandContainerAzureClusterNetworking(d.Get("networking")), ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), Client: dcl.String(d.Get("client").(string)), Description: dcl.String(d.Get("description").(string)), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -724,8 +731,8 @@ func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{ } func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -739,7 +746,7 @@ func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{ Name: dcl.String(d.Get("name").(string)), Networking: expandContainerAzureClusterNetworking(d.Get("networking")), ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AzureServicesAuthentication: expandContainerAzureClusterAzureServicesAuthentication(d.Get("azure_services_authentication")), Client: dcl.String(d.Get("client").(string)), Description: dcl.String(d.Get("description").(string)), @@ -747,17 +754,17 @@ func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{ } log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -772,9 +779,9 @@ func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{ } func resourceContainerAzureClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -783,7 +790,7 @@ func resourceContainerAzureClusterImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -891,7 +898,7 @@ func expandContainerAzureClusterControlPlane(o interface{}) *containerazure.Clus ProxyConfig: expandContainerAzureClusterControlPlaneProxyConfig(obj["proxy_config"]), ReplicaPlacements: expandContainerAzureClusterControlPlaneReplicaPlacementsArray(obj["replica_placements"]), RootVolume: expandContainerAzureClusterControlPlaneRootVolume(obj["root_volume"]), - Tags: checkStringMap(obj["tags"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), VmSize: dcl.StringOrNil(obj["vm_size"].(string)), } } @@ -1143,8 +1150,8 @@ func expandContainerAzureClusterNetworking(o interface{}) *containerazure.Cluste } obj := objArr[0].(map[string]interface{}) return &containerazure.ClusterNetworking{ - PodAddressCidrBlocks: expandStringArray(obj["pod_address_cidr_blocks"]), - ServiceAddressCidrBlocks: expandStringArray(obj["service_address_cidr_blocks"]), + PodAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: tpgdclresource.ExpandStringArray(obj["service_address_cidr_blocks"]), VirtualNetworkId: dcl.String(obj["virtual_network_id"].(string)), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_cluster_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_cluster_sweeper.go new file mode 100644 index 0000000000..f5ab7f06f6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_cluster_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package containerazure + +import ( + "context" + "log" + "testing" + + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ContainerAzureCluster", testSweepContainerAzureCluster) +} + +func testSweepContainerAzureCluster(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAzureCluster") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, "", 0) + err = client.DeleteAllCluster(context.Background(), d["project"], d["location"], isDeletableContainerAzureCluster) + if err != nil { + return err + } + return nil +} + +func isDeletableContainerAzureCluster(r *containerazure.Cluster) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_node_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_node_pool.go similarity index 88% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_node_pool.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_node_pool.go index d2f28111b6..56f790deb2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_container_azure_node_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/containerazure/resource_container_azure_node_pool.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package containerazure import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceContainerAzureNodePool() *schema.Resource { @@ -57,7 +64,7 @@ func ResourceContainerAzureNodePool() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The azureCluster for the resource", }, @@ -125,7 +132,7 @@ func ResourceContainerAzureNodePool() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -295,8 +302,8 @@ func ContainerAzureNodePoolMaxPodsConstraintSchema() *schema.Resource { } func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -310,7 +317,7 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface Name: dcl.String(d.Get("name").(string)), SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), Project: dcl.String(project), } @@ -320,18 +327,18 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -353,8 +360,8 @@ func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface } func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -368,22 +375,22 @@ func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{} Name: dcl.String(d.Get("name").(string)), SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -392,7 +399,7 @@ func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{} res, err := client.GetNodePool(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("ContainerAzureNodePool %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("autoscaling", flattenContainerAzureNodePoolAutoscaling(res.Autoscaling)); err != nil { @@ -450,8 +457,8 @@ func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{} return nil } func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -465,23 +472,23 @@ func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface Name: dcl.String(d.Get("name").(string)), SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -503,8 +510,8 @@ func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface } func resourceContainerAzureNodePoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -518,23 +525,23 @@ func resourceContainerAzureNodePoolDelete(d *schema.ResourceData, meta interface Name: dcl.String(d.Get("name").(string)), SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), - Annotations: checkStringMap(d.Get("annotations")), + Annotations: tpgresource.CheckStringMap(d.Get("annotations")), AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), Project: dcl.String(project), } log.Printf("[DEBUG] Deleting NodePool %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -549,9 +556,9 @@ func resourceContainerAzureNodePoolDelete(d *schema.ResourceData, meta interface } func resourceContainerAzureNodePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClusters/(?P[^/]+)/azureNodePools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -560,7 +567,7 @@ func resourceContainerAzureNodePoolImport(d *schema.ResourceData, meta interface } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -610,7 +617,7 @@ func expandContainerAzureNodePoolConfig(o interface{}) *containerazure.NodePoolC SshConfig: expandContainerAzureNodePoolConfigSshConfig(obj["ssh_config"]), ProxyConfig: expandContainerAzureNodePoolConfigProxyConfig(obj["proxy_config"]), RootVolume: expandContainerAzureNodePoolConfigRootVolume(obj["root_volume"]), - Tags: checkStringMap(obj["tags"]), + Tags: tpgresource.CheckStringMap(obj["tags"]), VmSize: dcl.StringOrNil(obj["vm_size"].(string)), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/database_migration_service_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/database_migration_service_operation.go new file mode 100644 index 0000000000..dffe7285e6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/database_migration_service_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package databasemigrationservice + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type DatabaseMigrationServiceOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *DatabaseMigrationServiceOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.DatabaseMigrationServiceBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createDatabaseMigrationServiceWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DatabaseMigrationServiceOperationWaiter, error) { + w := &DatabaseMigrationServiceOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func DatabaseMigrationServiceOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createDatabaseMigrationServiceWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile.go new file mode 100644 index 0000000000..64a1c9dc9d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile.go @@ -0,0 +1,2477 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package databasemigrationservice + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDatabaseMigrationServiceConnectionProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceDatabaseMigrationServiceConnectionProfileCreate, + Read: resourceDatabaseMigrationServiceConnectionProfileRead, + Update: resourceDatabaseMigrationServiceConnectionProfileUpdate, + Delete: resourceDatabaseMigrationServiceConnectionProfileDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDatabaseMigrationServiceConnectionProfileImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "connection_profile_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the connection profile.`, + }, + "alloydb": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies required connection parameters, and the parameters required to create an AlloyDB destination cluster.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + Description: `Required. The AlloyDB cluster ID that this connection profile is associated with.`, + }, + "settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Immutable. Metadata used to create the destination AlloyDB cluster.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "initial_user": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Required. Input only. Initial user to setup during cluster creation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "password": { + Type: schema.TypeString, + Required: true, + Description: `The initial password for the user.`, + Sensitive: true, + }, + "user": { + Type: schema.TypeString, + Required: true, + Description: `The database username.`, + }, + "password_set": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Indicates if the initialUser.password field has been set.`, + }, + }, + }, + }, + "vpc_network": { + Type: schema.TypeString, + Required: true, + Description: `Required. The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. +It is specified in the form: 'projects/{project_number}/global/networks/{network_id}'. This is required to create a cluster.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels for the AlloyDB cluster created by DMS.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "primary_instance_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for the cluster's primary instance`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: `The database username.`, + }, + "machine_config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Configuration for the machines that host the underlying database engine.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_count": { + Type: schema.TypeInt, + Required: true, + Description: `The number of CPU's in the VM instance.`, + }, + }, + }, + }, + "database_flags": { + Type: schema.TypeMap, + Optional: true, + Description: `Database flags to pass to AlloyDB when DMS is creating the AlloyDB cluster and instances. See the AlloyDB documentation for how these can be used.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels for the AlloyDB primary instance created by DMS.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "private_ip": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The private IP address for the Instance. This is the connection endpoint for an end-user application.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"mysql", "postgresql", "cloudsql", "alloydb"}, + }, + "cloudsql": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies required connection parameters, and, optionally, the parameters required to create a Cloud SQL destination database instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Immutable. Metadata used to create the destination Cloud SQL database.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_id": { + Type: schema.TypeString, + Required: true, + Description: `The Database Migration Service source connection profile ID, in the format: projects/my_project_name/locations/us-central1/connectionProfiles/connection_profile_ID`, + }, + "activation_policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ALWAYS", "NEVER", ""}), + Description: `The activation policy specifies when the instance is activated; it is applicable only when the instance state is 'RUNNABLE'. Possible values: ["ALWAYS", "NEVER"]`, + }, + "auto_storage_increase": { + Type: schema.TypeBool, + Optional: true, + Description: `If you enable this setting, Cloud SQL checks your available storage every 30 seconds. If the available storage falls below a threshold size, Cloud SQL automatically adds additional storage capacity. +If the available storage repeatedly falls below the threshold size, Cloud SQL continues to add storage until it reaches the maximum of 30 TB.`, + }, + "cmek_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The KMS key name used for the csql instance.`, + }, + "collation": { + Type: schema.TypeString, + Optional: true, + Description: `The Cloud SQL default instance level collation.`, + }, + "data_disk_size_gb": { + Type: schema.TypeString, + Optional: true, + Description: `The storage capacity available to the database, in GB. The minimum (and default) size is 10GB.`, + }, + "data_disk_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"PD_SSD", "PD_HDD", ""}), + Description: `The type of storage. Possible values: ["PD_SSD", "PD_HDD"]`, + }, + "database_flags": { + Type: schema.TypeMap, + Optional: true, + Description: `The database flags passed to the Cloud SQL instance at startup.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "database_version": { + Type: schema.TypeString, + Optional: true, + Description: `The database engine type and version. +Currently supported values located at https://cloud.google.com/database-migration/docs/reference/rest/v1/projects.locations.connectionProfiles#sqldatabaseversion`, + }, + "ip_config": { + Type: schema.TypeList, + Optional: true, + Description: `The settings for IP Management. This allows to enable or disable the instance IP and manage which external networks can connect to the instance. The IPv4 address cannot be disabled.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_networks": { + Type: schema.TypeList, + Optional: true, + Description: `The list of external networks that are allowed to connect to the instance using the IP.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Required: true, + Description: `The allowlisted value for the access control list.`, + }, + "expire_time": { + Type: schema.TypeString, + Optional: true, + Description: `The time when this access control entry expires in RFC 3339 format.`, + ExactlyOneOf: []string{}, + }, + "label": { + Type: schema.TypeString, + Optional: true, + Description: `A label to identify this entry.`, + }, + "ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Input only. The time-to-leave of this access control entry.`, + }, + }, + }, + }, + "enable_ipv4": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the instance should be assigned an IPv4 address or not.`, + }, + "private_network": { + Type: schema.TypeString, + Optional: true, + Description: `The resource link for the VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. +This setting can be updated, but it cannot be removed after it is set.`, + }, + "require_ssl": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether SSL connections over IP should be enforced or not.`, + }, + }, + }, + }, + "root_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Input only. Initial root password.`, + Sensitive: true, + }, + "storage_auto_resize_limit": { + Type: schema.TypeString, + Optional: true, + Description: `The maximum size to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit.`, + }, + "tier": { + Type: schema.TypeString, + Optional: true, + Description: `The tier (or machine type) for this instance, for example: db-n1-standard-1 (MySQL instances) or db-custom-1-3840 (PostgreSQL instances). +For more information, see https://cloud.google.com/sql/docs/mysql/instance-settings`, + }, + "user_labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The resource labels for a Cloud SQL instance to use to annotate any related underlying resources such as Compute Engine VMs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Description: `The Google Cloud Platform zone where your Cloud SQL datdabse instance is located.`, + }, + "root_password_set": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Indicates If this connection profile root password is stored.`, + }, + }, + }, + }, + "cloud_sql_id": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The Cloud SQL instance ID that this connection profile is associated with.`, + }, + "private_ip": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The Cloud SQL database instance's private IP.`, + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The Cloud SQL database instance's public IP.`, + }, + }, + }, + ExactlyOneOf: []string{"mysql", "postgresql", "cloudsql", "alloydb"}, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `The connection profile display name.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The resource labels for connection profile to use to annotate any related underlying resources such as Compute Engine VMs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location where the connection profile should reside.`, + }, + "mysql": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies connection parameters required specifically for MySQL databases.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Required: true, + Description: `Required. The IP or hostname of the source MySQL database.`, + }, + "password": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. +This field is not returned on request, and the value is encrypted when stored in Database Migration Service.`, + Sensitive: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + Description: `Required. The network port of the source MySQL database.`, + }, + "username": { + Type: schema.TypeString, + Required: true, + Description: `Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.`, + }, + "cloud_sql_id": { + Type: schema.TypeString, + Optional: true, + Description: `If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.`, + }, + "ssl": { + Type: schema.TypeList, + Optional: true, + Description: `SSL configuration for the destination to connect to the source database.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_certificate": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. +The replica will use this certificate to verify it's connecting to the right host.`, + Sensitive: true, + }, + "client_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server. +If this field is used then the 'clientKey' field is mandatory`, + Sensitive: true, + }, + "client_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. +If this field is used then the 'clientCertificate' field is mandatory.`, + Sensitive: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The current connection profile state.`, + }, + }, + }, + }, + "password_set": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Indicates If this connection profile password is stored.`, + }, + }, + }, + ExactlyOneOf: []string{"mysql", "postgresql", "cloudsql", "alloydb"}, + }, + "postgresql": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies connection parameters required specifically for PostgreSQL databases.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Required: true, + Description: `Required. The IP or hostname of the source MySQL database.`, + }, + "password": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. +This field is not returned on request, and the value is encrypted when stored in Database Migration Service.`, + Sensitive: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + Description: `Required. The network port of the source MySQL database.`, + }, + "username": { + Type: schema.TypeString, + Required: true, + Description: `Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.`, + }, + "cloud_sql_id": { + Type: schema.TypeString, + Optional: true, + Description: `If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.`, + }, + "ssl": { + Type: schema.TypeList, + Optional: true, + Description: `SSL configuration for the destination to connect to the source database.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_certificate": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. Input only. The x509 PEM-encoded certificate of the CA that signed the source database server's certificate. +The replica will use this certificate to verify it's connecting to the right host.`, + Sensitive: true, + }, + "client_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Input only. The x509 PEM-encoded certificate that will be used by the replica to authenticate against the source database server. +If this field is used then the 'clientKey' field is mandatory`, + Sensitive: true, + RequiredWith: []string{}, + }, + "client_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Input only. The unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with the Client Certificate. +If this field is used then the 'clientCertificate' field is mandatory.`, + Sensitive: true, + RequiredWith: []string{}, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The current connection profile state.`, + }, + }, + }, + }, + "network_architecture": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with.`, + }, + "password_set": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Indicates If this connection profile password is stored.`, + }, + }, + }, + ExactlyOneOf: []string{"mysql", "postgresql", "cloudsql", "alloydb"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The timestamp when the resource was created. A timestamp in RFC3339 UTC 'Zulu' format, accurate to nanoseconds. Example: '2014-10-02T15:01:23.045123456Z'.`, + }, + "dbprovider": { + Type: schema.TypeString, + Computed: true, + Description: `The database provider.`, + }, + "error": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. The error details in case of state FAILED.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeInt, + Computed: true, + Description: `The status code, which should be an enum value of google.rpc.Code.`, + }, + "details": { + Type: schema.TypeList, + Computed: true, + Description: `A list of messages that carry the error details.`, + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of this connection profile resource in the form of projects/{project}/locations/{location}/connectionProfiles/{connectionProfile}.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current connection profile state.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDatabaseMigrationServiceConnectionProfileCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDatabaseMigrationServiceConnectionProfileDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandDatabaseMigrationServiceConnectionProfileLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + mysqlProp, err := expandDatabaseMigrationServiceConnectionProfileMysql(d.Get("mysql"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mysql"); !tpgresource.IsEmptyValue(reflect.ValueOf(mysqlProp)) && (ok || !reflect.DeepEqual(v, mysqlProp)) { + obj["mysql"] = mysqlProp + } + postgresqlProp, err := expandDatabaseMigrationServiceConnectionProfilePostgresql(d.Get("postgresql"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("postgresql"); !tpgresource.IsEmptyValue(reflect.ValueOf(postgresqlProp)) && (ok || !reflect.DeepEqual(v, postgresqlProp)) { + obj["postgresql"] = postgresqlProp + } + cloudsqlProp, err := expandDatabaseMigrationServiceConnectionProfileCloudsql(d.Get("cloudsql"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloudsql"); !tpgresource.IsEmptyValue(reflect.ValueOf(cloudsqlProp)) && (ok || !reflect.DeepEqual(v, cloudsqlProp)) { + obj["cloudsql"] = cloudsqlProp + } + alloydbProp, err := expandDatabaseMigrationServiceConnectionProfileAlloydb(d.Get("alloydb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("alloydb"); !tpgresource.IsEmptyValue(reflect.ValueOf(alloydbProp)) && (ok || !reflect.DeepEqual(v, alloydbProp)) { + obj["alloydb"] = alloydbProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatabaseMigrationServiceBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles?connectionProfileId={{connection_profile_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ConnectionProfile: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ConnectionProfile: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ConnectionProfile: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DatabaseMigrationServiceOperationWaitTime( + config, res, project, "Creating ConnectionProfile", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create ConnectionProfile: %s", err) + } + + log.Printf("[DEBUG] Finished creating ConnectionProfile %q: %#v", d.Id(), res) + + return resourceDatabaseMigrationServiceConnectionProfileRead(d, meta) +} + +func resourceDatabaseMigrationServiceConnectionProfileRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatabaseMigrationServiceBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ConnectionProfile: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DatabaseMigrationServiceConnectionProfile %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + + if err := d.Set("name", flattenDatabaseMigrationServiceConnectionProfileName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("display_name", flattenDatabaseMigrationServiceConnectionProfileDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("create_time", flattenDatabaseMigrationServiceConnectionProfileCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("labels", flattenDatabaseMigrationServiceConnectionProfileLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("state", flattenDatabaseMigrationServiceConnectionProfileState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("error", flattenDatabaseMigrationServiceConnectionProfileError(res["error"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("dbprovider", flattenDatabaseMigrationServiceConnectionProfileDbprovider(res["provider"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("mysql", flattenDatabaseMigrationServiceConnectionProfileMysql(res["mysql"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("postgresql", flattenDatabaseMigrationServiceConnectionProfilePostgresql(res["postgresql"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("cloudsql", flattenDatabaseMigrationServiceConnectionProfileCloudsql(res["cloudsql"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + if err := d.Set("alloydb", flattenDatabaseMigrationServiceConnectionProfileAlloydb(res["alloydb"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectionProfile: %s", err) + } + + return nil +} + +func resourceDatabaseMigrationServiceConnectionProfileUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ConnectionProfile: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDatabaseMigrationServiceConnectionProfileDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandDatabaseMigrationServiceConnectionProfileLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + mysqlProp, err := expandDatabaseMigrationServiceConnectionProfileMysql(d.Get("mysql"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("mysql"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mysqlProp)) { + obj["mysql"] = mysqlProp + } + postgresqlProp, err := expandDatabaseMigrationServiceConnectionProfilePostgresql(d.Get("postgresql"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("postgresql"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, postgresqlProp)) { + obj["postgresql"] = postgresqlProp + } + cloudsqlProp, err := expandDatabaseMigrationServiceConnectionProfileCloudsql(d.Get("cloudsql"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cloudsql"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cloudsqlProp)) { + obj["cloudsql"] = cloudsqlProp + } + alloydbProp, err := expandDatabaseMigrationServiceConnectionProfileAlloydb(d.Get("alloydb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("alloydb"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, alloydbProp)) { + obj["alloydb"] = alloydbProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatabaseMigrationServiceBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ConnectionProfile %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("mysql") { + updateMask = append(updateMask, "mysql") + } + + if d.HasChange("postgresql") { + updateMask = append(updateMask, "postgresql") + } + + if d.HasChange("cloudsql") { + updateMask = append(updateMask, "cloudsql") + } + + if d.HasChange("alloydb") { + updateMask = append(updateMask, "alloydb") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ConnectionProfile %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ConnectionProfile %q: %#v", d.Id(), res) + } + + err = DatabaseMigrationServiceOperationWaitTime( + config, res, project, "Updating ConnectionProfile", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceDatabaseMigrationServiceConnectionProfileRead(d, meta) +} + +func resourceDatabaseMigrationServiceConnectionProfileDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ConnectionProfile: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DatabaseMigrationServiceBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ConnectionProfile %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ConnectionProfile") + } + + err = DatabaseMigrationServiceOperationWaitTime( + config, res, project, "Deleting ConnectionProfile", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ConnectionProfile %q: %#v", d.Id(), res) + return nil +} + +func resourceDatabaseMigrationServiceConnectionProfileImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/connectionProfiles/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDatabaseMigrationServiceConnectionProfileName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileError(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenDatabaseMigrationServiceConnectionProfileErrorCode(original["code"], d, config) + transformed["message"] = + flattenDatabaseMigrationServiceConnectionProfileErrorMessage(original["message"], d, config) + transformed["details"] = + flattenDatabaseMigrationServiceConnectionProfileErrorDetails(original["details"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileErrorCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatabaseMigrationServiceConnectionProfileErrorMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileErrorDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileDbprovider(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileMysql(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["host"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlHost(original["host"], d, config) + transformed["port"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlPort(original["port"], d, config) + transformed["username"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlUsername(original["username"], d, config) + transformed["password"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlPassword(original["password"], d, config) + transformed["password_set"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlPasswordSet(original["passwordSet"], d, config) + transformed["ssl"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlSsl(original["ssl"], d, config) + transformed["cloud_sql_id"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlCloudSqlId(original["cloudSqlId"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileMysqlHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("mysql.0.password") +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlPasswordSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlSsl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlSslType(original["type"], d, config) + transformed["client_key"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlSslClientKey(original["clientKey"], d, config) + transformed["client_certificate"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlSslClientCertificate(original["clientCertificate"], d, config) + transformed["ca_certificate"] = + flattenDatabaseMigrationServiceConnectionProfileMysqlSslCaCertificate(original["caCertificate"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileMysqlSslType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlSslClientKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("mysql.0.ssl.0.client_key") +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlSslClientCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("mysql.0.ssl.0.client_certificate") +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlSslCaCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("mysql.0.ssl.0.ca_certificate") +} + +func flattenDatabaseMigrationServiceConnectionProfileMysqlCloudSqlId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresql(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["host"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlHost(original["host"], d, config) + transformed["port"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlPort(original["port"], d, config) + transformed["username"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlUsername(original["username"], d, config) + transformed["password"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlPassword(original["password"], d, config) + transformed["password_set"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlPasswordSet(original["passwordSet"], d, config) + transformed["ssl"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlSsl(original["ssl"], d, config) + transformed["cloud_sql_id"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlCloudSqlId(original["cloudSqlId"], d, config) + transformed["network_architecture"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlNetworkArchitecture(original["networkArchitecture"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("postgresql.0.password") +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlPasswordSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlSsl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlSslType(original["type"], d, config) + transformed["client_key"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlSslClientKey(original["clientKey"], d, config) + transformed["client_certificate"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlSslClientCertificate(original["clientCertificate"], d, config) + transformed["ca_certificate"] = + flattenDatabaseMigrationServiceConnectionProfilePostgresqlSslCaCertificate(original["caCertificate"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlSslType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlSslClientKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("postgresql.0.ssl.0.client_key") +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlSslClientCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("postgresql.0.ssl.0.client_certificate") +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlSslCaCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("postgresql.0.ssl.0.ca_certificate") +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlCloudSqlId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfilePostgresqlNetworkArchitecture(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsql(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cloud_sql_id"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlCloudSqlId(original["cloudSqlId"], d, config) + transformed["settings"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettings(original["settings"], d, config) + transformed["private_ip"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlPrivateIp(original["privateIp"], d, config) + transformed["public_ip"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlPublicIp(original["publicIp"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlCloudSqlId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["database_version"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDatabaseVersion(original["databaseVersion"], d, config) + transformed["user_labels"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsUserLabels(original["userLabels"], d, config) + transformed["tier"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsTier(original["tier"], d, config) + transformed["storage_auto_resize_limit"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsStorageAutoResizeLimit(original["storageAutoResizeLimit"], d, config) + transformed["activation_policy"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsActivationPolicy(original["activationPolicy"], d, config) + transformed["ip_config"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfig(original["ipConfig"], d, config) + transformed["auto_storage_increase"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsAutoStorageIncrease(original["autoStorageIncrease"], d, config) + transformed["database_flags"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDatabaseFlags(original["databaseFlags"], d, config) + transformed["data_disk_type"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDataDiskType(original["dataDiskType"], d, config) + transformed["data_disk_size_gb"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDataDiskSizeGb(original["dataDiskSizeGb"], d, config) + transformed["zone"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsZone(original["zone"], d, config) + transformed["source_id"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsSourceId(original["sourceId"], d, config) + transformed["root_password"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsRootPassword(original["rootPassword"], d, config) + transformed["root_password_set"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsRootPasswordSet(original["rootPasswordSet"], d, config) + transformed["collation"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsCollation(original["collation"], d, config) + transformed["cmek_key_name"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsCmekKeyName(original["cmekKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDatabaseVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsUserLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsStorageAutoResizeLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsActivationPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_ipv4"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigEnableIpv4(original["enableIpv4"], d, config) + transformed["private_network"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigPrivateNetwork(original["privateNetwork"], d, config) + transformed["require_ssl"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigRequireSsl(original["requireSsl"], d, config) + transformed["authorized_networks"] = + flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworks(original["authorizedNetworks"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigEnableIpv4(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigPrivateNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigRequireSsl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "value": flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksValue(original["value"], d, config), + "label": flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksLabel(original["label"], d, config), + "expire_time": flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksExpireTime(original["expireTime"], d, config), + "ttl": flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksTtl(original["ttl"], d, config), + }) + } + return transformed +} +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksLabel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsAutoStorageIncrease(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDatabaseFlags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDataDiskType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDataDiskSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsSourceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsRootPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("cloudsql.0.settings.0.root_password") +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsRootPasswordSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlSettingsCmekKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlPrivateIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileCloudsqlPublicIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cluster_id"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbClusterId(original["clusterId"], d, config) + transformed["settings"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettings(original["settings"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileAlloydbClusterId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["initial_user"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUser(original["initialUser"], d, config) + transformed["vpc_network"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsVpcNetwork(original["vpcNetwork"], d, config) + transformed["labels"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsLabels(original["labels"], d, config) + transformed["primary_instance_settings"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettings(original["primaryInstanceSettings"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUser(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["user"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserUser(original["user"], d, config) + transformed["password"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserPassword(original["password"], d, config) + transformed["password_set"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserPasswordSet(original["passwordSet"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserUser(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("alloydb.0.settings.0.initial_user.0.password") +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserPasswordSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsVpcNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["id"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsId(original["id"], d, config) + transformed["machine_config"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfig(original["machineConfig"], d, config) + transformed["database_flags"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsDatabaseFlags(original["databaseFlags"], d, config) + transformed["labels"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsLabels(original["labels"], d, config) + transformed["private_ip"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsPrivateIp(original["privateIp"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cpu_count"] = + flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigCpuCount(original["cpuCount"], d, config) + return []interface{}{transformed} +} +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigCpuCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsDatabaseFlags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsPrivateIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDatabaseMigrationServiceConnectionProfileDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysql(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHost, err := expandDatabaseMigrationServiceConnectionProfileMysqlHost(original["host"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["host"] = transformedHost + } + + transformedPort, err := expandDatabaseMigrationServiceConnectionProfileMysqlPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedUsername, err := expandDatabaseMigrationServiceConnectionProfileMysqlUsername(original["username"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["username"] = transformedUsername + } + + transformedPassword, err := expandDatabaseMigrationServiceConnectionProfileMysqlPassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + transformedPasswordSet, err := expandDatabaseMigrationServiceConnectionProfileMysqlPasswordSet(original["password_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPasswordSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["passwordSet"] = transformedPasswordSet + } + + transformedSsl, err := expandDatabaseMigrationServiceConnectionProfileMysqlSsl(original["ssl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSsl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ssl"] = transformedSsl + } + + transformedCloudSqlId, err := expandDatabaseMigrationServiceConnectionProfileMysqlCloudSqlId(original["cloud_sql_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudSqlId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudSqlId"] = transformedCloudSqlId + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlPasswordSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlSsl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandDatabaseMigrationServiceConnectionProfileMysqlSslType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedClientKey, err := expandDatabaseMigrationServiceConnectionProfileMysqlSslClientKey(original["client_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientKey"] = transformedClientKey + } + + transformedClientCertificate, err := expandDatabaseMigrationServiceConnectionProfileMysqlSslClientCertificate(original["client_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientCertificate"] = transformedClientCertificate + } + + transformedCaCertificate, err := expandDatabaseMigrationServiceConnectionProfileMysqlSslCaCertificate(original["ca_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCaCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["caCertificate"] = transformedCaCertificate + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlSslType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlSslClientKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlSslClientCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlSslCaCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileMysqlCloudSqlId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresql(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHost, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlHost(original["host"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["host"] = transformedHost + } + + transformedPort, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedUsername, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlUsername(original["username"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["username"] = transformedUsername + } + + transformedPassword, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlPassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + transformedPasswordSet, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlPasswordSet(original["password_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPasswordSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["passwordSet"] = transformedPasswordSet + } + + transformedSsl, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlSsl(original["ssl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSsl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ssl"] = transformedSsl + } + + transformedCloudSqlId, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlCloudSqlId(original["cloud_sql_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudSqlId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudSqlId"] = transformedCloudSqlId + } + + transformedNetworkArchitecture, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlNetworkArchitecture(original["network_architecture"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkArchitecture); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkArchitecture"] = transformedNetworkArchitecture + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlPasswordSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlSsl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlSslType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedClientKey, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlSslClientKey(original["client_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientKey"] = transformedClientKey + } + + transformedClientCertificate, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlSslClientCertificate(original["client_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientCertificate"] = transformedClientCertificate + } + + transformedCaCertificate, err := expandDatabaseMigrationServiceConnectionProfilePostgresqlSslCaCertificate(original["ca_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCaCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["caCertificate"] = transformedCaCertificate + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlSslType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlSslClientKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlSslClientCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlSslCaCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlCloudSqlId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfilePostgresqlNetworkArchitecture(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsql(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCloudSqlId, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlCloudSqlId(original["cloud_sql_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudSqlId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudSqlId"] = transformedCloudSqlId + } + + transformedSettings, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettings(original["settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["settings"] = transformedSettings + } + + transformedPrivateIp, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlPrivateIp(original["private_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateIp"] = transformedPrivateIp + } + + transformedPublicIp, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlPublicIp(original["public_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicIp"] = transformedPublicIp + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlCloudSqlId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatabaseVersion, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDatabaseVersion(original["database_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabaseVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["databaseVersion"] = transformedDatabaseVersion + } + + transformedUserLabels, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsUserLabels(original["user_labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUserLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["userLabels"] = transformedUserLabels + } + + transformedTier, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsTier(original["tier"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTier); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tier"] = transformedTier + } + + transformedStorageAutoResizeLimit, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsStorageAutoResizeLimit(original["storage_auto_resize_limit"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStorageAutoResizeLimit); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storageAutoResizeLimit"] = transformedStorageAutoResizeLimit + } + + transformedActivationPolicy, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsActivationPolicy(original["activation_policy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedActivationPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["activationPolicy"] = transformedActivationPolicy + } + + transformedIpConfig, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfig(original["ip_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipConfig"] = transformedIpConfig + } + + transformedAutoStorageIncrease, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsAutoStorageIncrease(original["auto_storage_increase"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutoStorageIncrease); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["autoStorageIncrease"] = transformedAutoStorageIncrease + } + + transformedDatabaseFlags, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDatabaseFlags(original["database_flags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabaseFlags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["databaseFlags"] = transformedDatabaseFlags + } + + transformedDataDiskType, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDataDiskType(original["data_disk_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataDiskType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataDiskType"] = transformedDataDiskType + } + + transformedDataDiskSizeGb, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDataDiskSizeGb(original["data_disk_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataDiskSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataDiskSizeGb"] = transformedDataDiskSizeGb + } + + transformedZone, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsZone(original["zone"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["zone"] = transformedZone + } + + transformedSourceId, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsSourceId(original["source_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourceId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sourceId"] = transformedSourceId + } + + transformedRootPassword, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsRootPassword(original["root_password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRootPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rootPassword"] = transformedRootPassword + } + + transformedRootPasswordSet, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsRootPasswordSet(original["root_password_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRootPasswordSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rootPasswordSet"] = transformedRootPasswordSet + } + + transformedCollation, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsCollation(original["collation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["collation"] = transformedCollation + } + + transformedCmekKeyName, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsCmekKeyName(original["cmek_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCmekKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cmekKeyName"] = transformedCmekKeyName + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDatabaseVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsUserLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsStorageAutoResizeLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsActivationPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnableIpv4, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigEnableIpv4(original["enable_ipv4"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableIpv4); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableIpv4"] = transformedEnableIpv4 + } + + transformedPrivateNetwork, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigPrivateNetwork(original["private_network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateNetwork"] = transformedPrivateNetwork + } + + transformedRequireSsl, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigRequireSsl(original["require_ssl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireSsl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireSsl"] = transformedRequireSsl + } + + transformedAuthorizedNetworks, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworks(original["authorized_networks"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuthorizedNetworks); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["authorizedNetworks"] = transformedAuthorizedNetworks + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigEnableIpv4(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigPrivateNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigRequireSsl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedValue, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedLabel, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksLabel(original["label"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["label"] = transformedLabel + } + + transformedExpireTime, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksExpireTime(original["expire_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpireTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expireTime"] = transformedExpireTime + } + + transformedTtl, err := expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksTtl(original["ttl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ttl"] = transformedTtl + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksLabel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksExpireTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsIpConfigAuthorizedNetworksTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsAutoStorageIncrease(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDatabaseFlags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDataDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsDataDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsSourceId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsRootPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsRootPasswordSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlSettingsCmekKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlPrivateIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileCloudsqlPublicIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedClusterId, err := expandDatabaseMigrationServiceConnectionProfileAlloydbClusterId(original["cluster_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClusterId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clusterId"] = transformedClusterId + } + + transformedSettings, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettings(original["settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["settings"] = transformedSettings + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbClusterId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInitialUser, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUser(original["initial_user"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInitialUser); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["initialUser"] = transformedInitialUser + } + + transformedVpcNetwork, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsVpcNetwork(original["vpc_network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVpcNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpcNetwork"] = transformedVpcNetwork + } + + transformedLabels, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedPrimaryInstanceSettings, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettings(original["primary_instance_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryInstanceSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primaryInstanceSettings"] = transformedPrimaryInstanceSettings + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUser(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUser, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserUser(original["user"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUser); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["user"] = transformedUser + } + + transformedPassword, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserPassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + transformedPasswordSet, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserPasswordSet(original["password_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPasswordSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["passwordSet"] = transformedPasswordSet + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserUser(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsInitialUserPasswordSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsVpcNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedMachineConfig, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfig(original["machine_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineConfig"] = transformedMachineConfig + } + + transformedDatabaseFlags, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsDatabaseFlags(original["database_flags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabaseFlags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["databaseFlags"] = transformedDatabaseFlags + } + + transformedLabels, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedPrivateIp, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsPrivateIp(original["private_ip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateIp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateIp"] = transformedPrivateIp + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCpuCount, err := expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigCpuCount(original["cpu_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCpuCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cpuCount"] = transformedCpuCount + } + + return transformed, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigCpuCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsDatabaseFlags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDatabaseMigrationServiceConnectionProfileAlloydbSettingsPrimaryInstanceSettingsPrivateIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile_sweeper.go new file mode 100644 index 0000000000..5ec85cac9d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice/resource_database_migration_service_connection_profile_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package databasemigrationservice + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DatabaseMigrationServiceConnectionProfile", testSweepDatabaseMigrationServiceConnectionProfile) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDatabaseMigrationServiceConnectionProfile(region string) error { + resourceName := "DatabaseMigrationServiceConnectionProfile" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datamigration.googleapis.com/v1/projects/{{project}}/locations/{{location}}/connectionProfiles", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["connectionProfiles"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datamigration.googleapis.com/v1/projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_entry_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_entry_group.go new file mode 100644 index 0000000000..54b5dce937 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_entry_group.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataCatalogEntryGroupIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "entry_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataCatalogEntryGroupIamUpdater struct { + project string + region string + entryGroup string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataCatalogEntryGroupIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("entry_group"); ok { + values["entry_group"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/entryGroups/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("entry_group").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataCatalogEntryGroupIamUpdater{ + project: values["project"], + region: values["region"], + entryGroup: values["entry_group"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("entry_group", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting entry_group: %s", err) + } + + return u, nil +} + +func DataCatalogEntryGroupIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/entryGroups/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataCatalogEntryGroupIamUpdater{ + project: values["project"], + region: values["region"], + entryGroup: values["entry_group"], + d: d, + Config: config, + } + if err := d.Set("entry_group", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting entry_group: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataCatalogEntryGroupIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyEntryGroupUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataCatalogEntryGroupIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyEntryGroupUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataCatalogEntryGroupIamUpdater) qualifyEntryGroupUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/entryGroups/%s", u.project, u.region, u.entryGroup), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataCatalogEntryGroupIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/entryGroups/%s", u.project, u.region, u.entryGroup) +} + +func (u *DataCatalogEntryGroupIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-datacatalog-entrygroup-%s", u.GetResourceId()) +} + +func (u *DataCatalogEntryGroupIamUpdater) DescribeResource() string { + return fmt.Sprintf("datacatalog entrygroup %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_policy_tag.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_policy_tag.go new file mode 100644 index 0000000000..1f963027ab --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_policy_tag.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataCatalogPolicyTagIamSchema = map[string]*schema.Schema{ + "policy_tag": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataCatalogPolicyTagIamUpdater struct { + policyTag string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataCatalogPolicyTagIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("policy_tag"); ok { + values["policy_tag"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P.+)"}, d, config, d.Get("policy_tag").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataCatalogPolicyTagIamUpdater{ + policyTag: values["policy_tag"], + d: d, + Config: config, + } + + if err := d.Set("policy_tag", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting policy_tag: %s", err) + } + + return u, nil +} + +func DataCatalogPolicyTagIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P.+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataCatalogPolicyTagIamUpdater{ + policyTag: values["policy_tag"], + d: d, + Config: config, + } + if err := d.Set("policy_tag", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting policy_tag: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataCatalogPolicyTagIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyPolicyTagUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataCatalogPolicyTagIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyPolicyTagUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataCatalogPolicyTagIamUpdater) qualifyPolicyTagUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", fmt.Sprintf("%s", u.policyTag), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataCatalogPolicyTagIamUpdater) GetResourceId() string { + return fmt.Sprintf("%s", u.policyTag) +} + +func (u *DataCatalogPolicyTagIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-datacatalog-policytag-%s", u.GetResourceId()) +} + +func (u *DataCatalogPolicyTagIamUpdater) DescribeResource() string { + return fmt.Sprintf("datacatalog policytag %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_tag_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_tag_template.go new file mode 100644 index 0000000000..5873baf768 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_tag_template.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataCatalogTagTemplateIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "tag_template": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataCatalogTagTemplateIamUpdater struct { + project string + region string + tagTemplate string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataCatalogTagTemplateIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("tag_template"); ok { + values["tag_template"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/tagTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_template").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataCatalogTagTemplateIamUpdater{ + project: values["project"], + region: values["region"], + tagTemplate: values["tag_template"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("tag_template", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting tag_template: %s", err) + } + + return u, nil +} + +func DataCatalogTagTemplateIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/tagTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataCatalogTagTemplateIamUpdater{ + project: values["project"], + region: values["region"], + tagTemplate: values["tag_template"], + d: d, + Config: config, + } + if err := d.Set("tag_template", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting tag_template: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataCatalogTagTemplateIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTagTemplateUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataCatalogTagTemplateIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTagTemplateUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataCatalogTagTemplateIamUpdater) qualifyTagTemplateUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/tagTemplates/%s", u.project, u.region, u.tagTemplate), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataCatalogTagTemplateIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/tagTemplates/%s", u.project, u.region, u.tagTemplate) +} + +func (u *DataCatalogTagTemplateIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-datacatalog-tagtemplate-%s", u.GetResourceId()) +} + +func (u *DataCatalogTagTemplateIamUpdater) DescribeResource() string { + return fmt.Sprintf("datacatalog tagtemplate %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_taxonomy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_taxonomy.go new file mode 100644 index 0000000000..184ce9f7f6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/iam_data_catalog_taxonomy.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataCatalogTaxonomyIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "taxonomy": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataCatalogTaxonomyIamUpdater struct { + project string + region string + taxonomy string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataCatalogTaxonomyIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("taxonomy"); ok { + values["taxonomy"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/taxonomies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("taxonomy").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataCatalogTaxonomyIamUpdater{ + project: values["project"], + region: values["region"], + taxonomy: values["taxonomy"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("taxonomy", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting taxonomy: %s", err) + } + + return u, nil +} + +func DataCatalogTaxonomyIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/taxonomies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataCatalogTaxonomyIamUpdater{ + project: values["project"], + region: values["region"], + taxonomy: values["taxonomy"], + d: d, + Config: config, + } + if err := d.Set("taxonomy", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting taxonomy: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataCatalogTaxonomyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTaxonomyUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataCatalogTaxonomyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTaxonomyUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataCatalogTaxonomyIamUpdater) qualifyTaxonomyUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/taxonomies/%s", u.project, u.region, u.taxonomy), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataCatalogTaxonomyIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/taxonomies/%s", u.project, u.region, u.taxonomy) +} + +func (u *DataCatalogTaxonomyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-datacatalog-taxonomy-%s", u.GetResourceId()) +} + +func (u *DataCatalogTaxonomyIamUpdater) DescribeResource() string { + return fmt.Sprintf("datacatalog taxonomy %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_entry.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_entry.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry.go index fccad883f3..0f71a32a90 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_data_catalog_entry.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package datacatalog import ( "encoding/json" @@ -26,6 +29,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceDataCatalogEntry() *schema.Resource { @@ -142,7 +149,7 @@ for what fields this schema can contain.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"FILESET", ""}), + ValidateFunc: verify.ValidateEnum([]string{"FILESET", ""}), Description: `The type of the entry. Only used for Entries with types in the EntryType enum. Currently, only FILESET enum value is allowed. All other entries created through Data Catalog must use userSpecifiedType. Possible values: ["FILESET"]`, ExactlyOneOf: []string{"type", "user_specified_type"}, @@ -150,7 +157,7 @@ Currently, only FILESET enum value is allowed. All other entries created through "user_specified_system": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), + ValidateFunc: verify.ValidateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), Description: `This field indicates the entry's source system that Data Catalog does not integrate with. userSpecifiedSystem strings must begin with a letter or underscore and can only contain letters, numbers, and underscores; are case insensitive; must be at least 1 character and at most 64 characters long.`, @@ -158,7 +165,7 @@ and underscores; are case insensitive; must be at least 1 character and at most "user_specified_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), + ValidateFunc: verify.ValidateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), Description: `Entry type if it does not fit any of the input-allowed values listed in EntryType enum above. When creating an entry, users should check the enum values first, if nothing matches the entry to be created, then provide a custom value, for example "my_special_type". @@ -256,8 +263,8 @@ Note that this Entry and its child resources may not actually be stored in the l } func resourceDataCatalogEntryCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -266,53 +273,53 @@ func resourceDataCatalogEntryCreate(d *schema.ResourceData, meta interface{}) er linkedResourceProp, err := expandDataCatalogEntryLinkedResource(d.Get("linked_resource"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("linked_resource"); !isEmptyValue(reflect.ValueOf(linkedResourceProp)) && (ok || !reflect.DeepEqual(v, linkedResourceProp)) { + } else if v, ok := d.GetOkExists("linked_resource"); !tpgresource.IsEmptyValue(reflect.ValueOf(linkedResourceProp)) && (ok || !reflect.DeepEqual(v, linkedResourceProp)) { obj["linkedResource"] = linkedResourceProp } displayNameProp, err := expandDataCatalogEntryDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } descriptionProp, err := expandDataCatalogEntryDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } schemaProp, err := expandDataCatalogEntrySchema(d.Get("schema"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("schema"); !isEmptyValue(reflect.ValueOf(schemaProp)) && (ok || !reflect.DeepEqual(v, schemaProp)) { + } else if v, ok := d.GetOkExists("schema"); !tpgresource.IsEmptyValue(reflect.ValueOf(schemaProp)) && (ok || !reflect.DeepEqual(v, schemaProp)) { obj["schema"] = schemaProp } typeProp, err := expandDataCatalogEntryType(d.Get("type"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { obj["type"] = typeProp } userSpecifiedTypeProp, err := expandDataCatalogEntryUserSpecifiedType(d.Get("user_specified_type"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("user_specified_type"); !isEmptyValue(reflect.ValueOf(userSpecifiedTypeProp)) && (ok || !reflect.DeepEqual(v, userSpecifiedTypeProp)) { + } else if v, ok := d.GetOkExists("user_specified_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(userSpecifiedTypeProp)) && (ok || !reflect.DeepEqual(v, userSpecifiedTypeProp)) { obj["userSpecifiedType"] = userSpecifiedTypeProp } userSpecifiedSystemProp, err := expandDataCatalogEntryUserSpecifiedSystem(d.Get("user_specified_system"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("user_specified_system"); !isEmptyValue(reflect.ValueOf(userSpecifiedSystemProp)) && (ok || !reflect.DeepEqual(v, userSpecifiedSystemProp)) { + } else if v, ok := d.GetOkExists("user_specified_system"); !tpgresource.IsEmptyValue(reflect.ValueOf(userSpecifiedSystemProp)) && (ok || !reflect.DeepEqual(v, userSpecifiedSystemProp)) { obj["userSpecifiedSystem"] = userSpecifiedSystemProp } gcsFilesetSpecProp, err := expandDataCatalogEntryGcsFilesetSpec(d.Get("gcs_fileset_spec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("gcs_fileset_spec"); !isEmptyValue(reflect.ValueOf(gcsFilesetSpecProp)) && (ok || !reflect.DeepEqual(v, gcsFilesetSpecProp)) { + } else if v, ok := d.GetOkExists("gcs_fileset_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(gcsFilesetSpecProp)) && (ok || !reflect.DeepEqual(v, gcsFilesetSpecProp)) { obj["gcsFilesetSpec"] = gcsFilesetSpecProp } - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{entry_group}}/entries?entryId={{entry_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{entry_group}}/entries?entryId={{entry_id}}") if err != nil { return err } @@ -325,11 +332,19 @@ func resourceDataCatalogEntryCreate(d *schema.ResourceData, meta interface{}) er } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Entry: %s", err) } @@ -338,7 +353,7 @@ func resourceDataCatalogEntryCreate(d *schema.ResourceData, meta interface{}) er } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -350,13 +365,13 @@ func resourceDataCatalogEntryCreate(d *schema.ResourceData, meta interface{}) er } func resourceDataCatalogEntryRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") if err != nil { return err } @@ -368,13 +383,19 @@ func resourceDataCatalogEntryRead(d *schema.ResourceData, meta interface{}) erro } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataCatalogEntry %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataCatalogEntry %q", d.Id())) } if err := d.Set("name", flattenDataCatalogEntryName(res["name"], d, config)); err != nil { @@ -418,8 +439,8 @@ func resourceDataCatalogEntryRead(d *schema.ResourceData, meta interface{}) erro } func resourceDataCatalogEntryUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -430,47 +451,47 @@ func resourceDataCatalogEntryUpdate(d *schema.ResourceData, meta interface{}) er linkedResourceProp, err := expandDataCatalogEntryLinkedResource(d.Get("linked_resource"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("linked_resource"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, linkedResourceProp)) { + } else if v, ok := d.GetOkExists("linked_resource"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, linkedResourceProp)) { obj["linkedResource"] = linkedResourceProp } displayNameProp, err := expandDataCatalogEntryDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } descriptionProp, err := expandDataCatalogEntryDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } schemaProp, err := expandDataCatalogEntrySchema(d.Get("schema"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("schema"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, schemaProp)) { + } else if v, ok := d.GetOkExists("schema"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, schemaProp)) { obj["schema"] = schemaProp } userSpecifiedTypeProp, err := expandDataCatalogEntryUserSpecifiedType(d.Get("user_specified_type"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("user_specified_type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userSpecifiedTypeProp)) { + } else if v, ok := d.GetOkExists("user_specified_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userSpecifiedTypeProp)) { obj["userSpecifiedType"] = userSpecifiedTypeProp } userSpecifiedSystemProp, err := expandDataCatalogEntryUserSpecifiedSystem(d.Get("user_specified_system"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("user_specified_system"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userSpecifiedSystemProp)) { + } else if v, ok := d.GetOkExists("user_specified_system"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userSpecifiedSystemProp)) { obj["userSpecifiedSystem"] = userSpecifiedSystemProp } gcsFilesetSpecProp, err := expandDataCatalogEntryGcsFilesetSpec(d.Get("gcs_fileset_spec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("gcs_fileset_spec"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gcsFilesetSpecProp)) { + } else if v, ok := d.GetOkExists("gcs_fileset_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gcsFilesetSpecProp)) { obj["gcsFilesetSpec"] = gcsFilesetSpecProp } - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") if err != nil { return err } @@ -505,9 +526,9 @@ func resourceDataCatalogEntryUpdate(d *schema.ResourceData, meta interface{}) er if d.HasChange("gcs_fileset_spec") { updateMask = append(updateMask, "gcsFilesetSpec") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } @@ -516,11 +537,19 @@ func resourceDataCatalogEntryUpdate(d *schema.ResourceData, meta interface{}) er } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Entry %q: %s", d.Id(), err) @@ -532,15 +561,15 @@ func resourceDataCatalogEntryUpdate(d *schema.ResourceData, meta interface{}) er } func resourceDataCatalogEntryDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") if err != nil { return err } @@ -554,13 +583,21 @@ func resourceDataCatalogEntryDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Deleting Entry %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Entry") + return transport_tpg.HandleNotFoundError(err, d, "Entry") } log.Printf("[DEBUG] Finished deleting Entry %q: %#v", d.Id(), res) @@ -568,10 +605,10 @@ func resourceDataCatalogEntryDelete(d *schema.ResourceData, meta interface{}) er } func resourceDataCatalogEntryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { return nil, err } @@ -591,23 +628,23 @@ func resourceDataCatalogEntryImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenDataCatalogEntryName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryLinkedResource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryLinkedResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntrySchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntrySchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -619,23 +656,23 @@ func flattenDataCatalogEntrySchema(v interface{}, d *schema.ResourceData, config return string(b) } -func flattenDataCatalogEntryType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryUserSpecifiedType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryUserSpecifiedType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryIntegratedSystem(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryIntegratedSystem(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryUserSpecifiedSystem(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryUserSpecifiedSystem(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryGcsFilesetSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryGcsFilesetSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -650,11 +687,11 @@ func flattenDataCatalogEntryGcsFilesetSpec(v interface{}, d *schema.ResourceData flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(original["sampleGcsFileSpecs"], d, config) return []interface{}{transformed} } -func flattenDataCatalogEntryGcsFilesetSpecFilePatterns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryGcsFilesetSpecFilePatterns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -673,14 +710,14 @@ func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d *s } return transformed } -func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -694,7 +731,7 @@ func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(v interfac return v // let terraform core handle it otherwise } -func flattenDataCatalogEntryBigqueryTableSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryTableSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -711,11 +748,11 @@ func flattenDataCatalogEntryBigqueryTableSpec(v interface{}, d *schema.ResourceD flattenDataCatalogEntryBigqueryTableSpecTableSpec(original["tableSpec"], d, config) return []interface{}{transformed} } -func flattenDataCatalogEntryBigqueryTableSpecTableSourceType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryTableSpecTableSourceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryBigqueryTableSpecViewSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryTableSpecViewSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -728,11 +765,11 @@ func flattenDataCatalogEntryBigqueryTableSpecViewSpec(v interface{}, d *schema.R flattenDataCatalogEntryBigqueryTableSpecViewSpecViewQuery(original["viewQuery"], d, config) return []interface{}{transformed} } -func flattenDataCatalogEntryBigqueryTableSpecViewSpecViewQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryTableSpecViewSpecViewQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryBigqueryTableSpecTableSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryTableSpecTableSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -745,11 +782,11 @@ func flattenDataCatalogEntryBigqueryTableSpecTableSpec(v interface{}, d *schema. flattenDataCatalogEntryBigqueryTableSpecTableSpecGroupedEntry(original["groupedEntry"], d, config) return []interface{}{transformed} } -func flattenDataCatalogEntryBigqueryTableSpecTableSpecGroupedEntry(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryTableSpecTableSpecGroupedEntry(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryBigqueryDateShardedSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryDateShardedSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -766,18 +803,18 @@ func flattenDataCatalogEntryBigqueryDateShardedSpec(v interface{}, d *schema.Res flattenDataCatalogEntryBigqueryDateShardedSpecShardCount(original["shardCount"], d, config) return []interface{}{transformed} } -func flattenDataCatalogEntryBigqueryDateShardedSpecDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryDateShardedSpecDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryBigqueryDateShardedSpecTablePrefix(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryDateShardedSpecTablePrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataCatalogEntryBigqueryDateShardedSpecShardCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataCatalogEntryBigqueryDateShardedSpecShardCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -791,19 +828,19 @@ func flattenDataCatalogEntryBigqueryDateShardedSpecShardCount(v interface{}, d * return v // let terraform core handle it otherwise } -func expandDataCatalogEntryLinkedResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryLinkedResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataCatalogEntryDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataCatalogEntryDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataCatalogEntrySchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntrySchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { b := []byte(v.(string)) if len(b) == 0 { return nil, nil @@ -815,19 +852,19 @@ func expandDataCatalogEntrySchema(v interface{}, d TerraformResourceData, config return m, nil } -func expandDataCatalogEntryType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataCatalogEntryUserSpecifiedType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryUserSpecifiedType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataCatalogEntryUserSpecifiedSystem(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryUserSpecifiedSystem(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataCatalogEntryGcsFilesetSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryGcsFilesetSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -839,25 +876,25 @@ func expandDataCatalogEntryGcsFilesetSpec(v interface{}, d TerraformResourceData transformedFilePatterns, err := expandDataCatalogEntryGcsFilesetSpecFilePatterns(original["file_patterns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilePatterns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFilePatterns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["filePatterns"] = transformedFilePatterns } transformedSampleGcsFileSpecs, err := expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(original["sample_gcs_file_specs"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSampleGcsFileSpecs); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSampleGcsFileSpecs); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sampleGcsFileSpecs"] = transformedSampleGcsFileSpecs } return transformed, nil } -func expandDataCatalogEntryGcsFilesetSpecFilePatterns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryGcsFilesetSpecFilePatterns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -870,14 +907,14 @@ func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d Ter transformedFilePath, err := expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(original["file_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFilePath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFilePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["filePath"] = transformedFilePath } transformedSizeBytes, err := expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(original["size_bytes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSizeBytes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSizeBytes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sizeBytes"] = transformedSizeBytes } @@ -886,10 +923,10 @@ func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d Ter return req, nil } -func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_group.go new file mode 100644 index 0000000000..9f6721f369 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_group.go @@ -0,0 +1,388 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDataCatalogEntryGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceDataCatalogEntryGroupCreate, + Read: resourceDataCatalogEntryGroupRead, + Update: resourceDataCatalogEntryGroupUpdate, + Delete: resourceDataCatalogEntryGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataCatalogEntryGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "entry_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), + Description: `The id of the entry group to create. The id must begin with a letter or underscore, +contain only English letters, numbers and underscores, and be at most 64 characters.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Entry group description, which can consist of several sentences or paragraphs that describe entry group contents.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `A short name to identify the entry group, for example, "analytics data - jan 2011".`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `EntryGroup location region.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the entry group in URL format. Example: projects/{project}/locations/{location}/entryGroups/{entryGroupId}`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataCatalogEntryGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDataCatalogEntryGroupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDataCatalogEntryGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}projects/{{project}}/locations/{{region}}/entryGroups?entryGroupId={{entry_group_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EntryGroup: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EntryGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EntryGroup: %s", err) + } + if err := d.Set("name", flattenDataCatalogEntryGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EntryGroup %q: %#v", d.Id(), res) + + return resourceDataCatalogEntryGroupRead(d, meta) +} + +func resourceDataCatalogEntryGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EntryGroup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataCatalogEntryGroup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading EntryGroup: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error reading EntryGroup: %s", err) + } + + if err := d.Set("name", flattenDataCatalogEntryGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EntryGroup: %s", err) + } + if err := d.Set("display_name", flattenDataCatalogEntryGroupDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading EntryGroup: %s", err) + } + if err := d.Set("description", flattenDataCatalogEntryGroupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading EntryGroup: %s", err) + } + + return nil +} + +func resourceDataCatalogEntryGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EntryGroup: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDataCatalogEntryGroupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDataCatalogEntryGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating EntryGroup %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating EntryGroup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating EntryGroup %q: %#v", d.Id(), res) + } + + return resourceDataCatalogEntryGroupRead(d, meta) +} + +func resourceDataCatalogEntryGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EntryGroup: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting EntryGroup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EntryGroup") + } + + log.Printf("[DEBUG] Finished deleting EntryGroup %q: %#v", d.Id(), res) + return nil +} + +func resourceDataCatalogEntryGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + egRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/entryGroups/(.+)") + + parts := egRegex.FindStringSubmatch(name) + if len(parts) != 4 { + return nil, fmt.Errorf("entry group name does not fit the format %s", egRegex) + } + if err := d.Set("project", parts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", parts[2]); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("entry_group_id", parts[3]); err != nil { + return nil, fmt.Errorf("Error setting entry_group_id: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenDataCatalogEntryGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogEntryGroupDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogEntryGroupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataCatalogEntryGroupDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogEntryGroupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_group_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_group_sweeper.go new file mode 100644 index 0000000000..8bcc0ce795 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_group_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataCatalogEntryGroup", testSweepDataCatalogEntryGroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataCatalogEntryGroup(region string) error { + resourceName := "DataCatalogEntryGroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datacatalog.googleapis.com/v1/projects/{{project}}/locations/{{region}}/entryGroups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["entryGroups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datacatalog.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_sweeper.go new file mode 100644 index 0000000000..2d7840b69d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_entry_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataCatalogEntry", testSweepDataCatalogEntry) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataCatalogEntry(region string) error { + resourceName := "DataCatalogEntry" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datacatalog.googleapis.com/v1/{{entry_group}}/entries", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["entries"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datacatalog.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_policy_tag.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_policy_tag.go new file mode 100644 index 0000000000..90bb4d0f74 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_policy_tag.go @@ -0,0 +1,382 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataCatalogPolicyTag() *schema.Resource { + return &schema.Resource{ + Create: resourceDataCatalogPolicyTagCreate, + Read: resourceDataCatalogPolicyTagRead, + Update: resourceDataCatalogPolicyTagUpdate, + Delete: resourceDataCatalogPolicyTagDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataCatalogPolicyTagImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `User defined name of this policy tag. It must: be unique within the parent +taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; +not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8.`, + }, + "taxonomy": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Taxonomy the policy tag is associated with`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of this policy tag. It must: contain only unicode characters, tabs, +newlines, carriage returns and page breaks; and be at most 2000 bytes long when +encoded in UTF-8. If not set, defaults to an empty description. +If not set, defaults to an empty description.`, + }, + "parent_policy_tag": { + Type: schema.TypeString, + Optional: true, + Description: `Resource name of this policy tag's parent policy tag. +If empty, it means this policy tag is a top level policy tag. +If not set, defaults to an empty string.`, + }, + "child_policy_tags": { + Type: schema.TypeList, + Computed: true, + Description: `Resource names of child policy tags of this policy tag.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Resource name of this policy tag, whose format is: +"projects/{project}/locations/{region}/taxonomies/{taxonomy}/policyTags/{policytag}"`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataCatalogPolicyTagCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDataCatalogPolicyTagDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDataCatalogPolicyTagDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + parentPolicyTagProp, err := expandDataCatalogPolicyTagParentPolicyTag(d.Get("parent_policy_tag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent_policy_tag"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentPolicyTagProp)) && (ok || !reflect.DeepEqual(v, parentPolicyTagProp)) { + obj["parentPolicyTag"] = parentPolicyTagProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{taxonomy}}/policyTags") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new PolicyTag: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating PolicyTag: %s", err) + } + if err := d.Set("name", flattenDataCatalogPolicyTagName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating PolicyTag %q: %#v", d.Id(), res) + + return resourceDataCatalogPolicyTagRead(d, meta) +} + +func resourceDataCatalogPolicyTagRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataCatalogPolicyTag %q", d.Id())) + } + + if err := d.Set("name", flattenDataCatalogPolicyTagName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading PolicyTag: %s", err) + } + if err := d.Set("display_name", flattenDataCatalogPolicyTagDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading PolicyTag: %s", err) + } + if err := d.Set("description", flattenDataCatalogPolicyTagDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading PolicyTag: %s", err) + } + if err := d.Set("parent_policy_tag", flattenDataCatalogPolicyTagParentPolicyTag(res["parentPolicyTag"], d, config)); err != nil { + return fmt.Errorf("Error reading PolicyTag: %s", err) + } + if err := d.Set("child_policy_tags", flattenDataCatalogPolicyTagChildPolicyTags(res["childPolicyTags"], d, config)); err != nil { + return fmt.Errorf("Error reading PolicyTag: %s", err) + } + + return nil +} + +func resourceDataCatalogPolicyTagUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandDataCatalogPolicyTagDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDataCatalogPolicyTagDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + parentPolicyTagProp, err := expandDataCatalogPolicyTagParentPolicyTag(d.Get("parent_policy_tag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent_policy_tag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parentPolicyTagProp)) { + obj["parentPolicyTag"] = parentPolicyTagProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating PolicyTag %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("parent_policy_tag") { + updateMask = append(updateMask, "parentPolicyTag") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating PolicyTag %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating PolicyTag %q: %#v", d.Id(), res) + } + + return resourceDataCatalogPolicyTagRead(d, meta) +} + +func resourceDataCatalogPolicyTagDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting PolicyTag %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "PolicyTag") + } + + log.Printf("[DEBUG] Finished deleting PolicyTag %q: %#v", d.Id(), res) + return nil +} + +func resourceDataCatalogPolicyTagImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "(?Pprojects/[^/]+/locations/[^/]+/taxonomies/[^/]+)/policyTags/(?P.+)"}, d, config); err != nil { + return nil, err + } + + originalName := d.Get("name").(string) + originalTaxonomy := d.Get("taxonomy").(string) + name := fmt.Sprintf("%s/policyTags/%s", originalTaxonomy, originalName) + + if err := d.Set("name", name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name) + return []*schema.ResourceData{d}, nil +} + +func flattenDataCatalogPolicyTagName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogPolicyTagDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogPolicyTagDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogPolicyTagParentPolicyTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogPolicyTagChildPolicyTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataCatalogPolicyTagDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogPolicyTagDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogPolicyTagParentPolicyTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_policy_tag_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_policy_tag_sweeper.go new file mode 100644 index 0000000000..5332376dc5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_policy_tag_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataCatalogPolicyTag", testSweepDataCatalogPolicyTag) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataCatalogPolicyTag(region string) error { + resourceName := "DataCatalogPolicyTag" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datacatalog.googleapis.com/v1/{{taxonomy}}/policyTags", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["policyTags"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datacatalog.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag.go new file mode 100644 index 0000000000..4cac55f4fc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag.go @@ -0,0 +1,674 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDataCatalogTag() *schema.Resource { + return &schema.Resource{ + Create: resourceDataCatalogTagCreate, + Read: resourceDataCatalogTagRead, + Update: resourceDataCatalogTagUpdate, + Delete: resourceDataCatalogTagDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataCatalogTagImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "fields": { + Type: schema.TypeSet, + Required: true, + Description: `This maps the ID of a tag field to the value of and additional information about that field. +Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_name": { + Type: schema.TypeString, + Required: true, + }, + "bool_value": { + Type: schema.TypeBool, + Optional: true, + Description: `Holds the value for a tag field with boolean type.`, + }, + "double_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `Holds the value for a tag field with double type.`, + }, + "enum_value": { + Type: schema.TypeString, + Optional: true, + Description: `The display name of the enum value.`, + }, + + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `Holds the value for a tag field with string type.`, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `Holds the value for a tag field with timestamp type.`, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + Description: `The display name of this field`, + }, + "order": { + Type: schema.TypeInt, + Computed: true, + Description: `The order of this field with respect to other fields in this tag. For example, a higher value can indicate +a more important field. The value can be negative. Multiple fields can have the same order, and field orders +within a tag do not have to be sequential.`, + }, + }, + }, + }, + "template": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the tag template that this tag uses. Example: +projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId} +This field cannot be modified after creation.`, + }, + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Resources like Entry can have schemas associated with them. This scope allows users to attach tags to an +individual column based on that schema. + +For attaching a tag to a nested column, use '.' to separate the column names. Example: +'outer_column.inner_column'`, + }, + "parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the parent this tag is attached to. This can be the name of an entry or an entry group. If an entry group, the tag will be attached to +all entries in that group.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the tag in URL format. Example: +projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/entries/{entryId}/tags/{tag_id} or +projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/tags/{tag_id} +where tag_id is a system-generated identifier. Note that this Tag may not actually be stored in the location in this name.`, + }, + "template_displayname": { + Type: schema.TypeString, + Computed: true, + Description: `The display name of the tag template.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataCatalogTagCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + templateProp, err := expandNestedDataCatalogTagTemplate(d.Get("template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("template"); !tpgresource.IsEmptyValue(reflect.ValueOf(templateProp)) && (ok || !reflect.DeepEqual(v, templateProp)) { + obj["template"] = templateProp + } + fieldsProp, err := expandNestedDataCatalogTagFields(d.Get("fields"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fields"); !tpgresource.IsEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { + obj["fields"] = fieldsProp + } + columnProp, err := expandNestedDataCatalogTagColumn(d.Get("column"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("column"); !tpgresource.IsEmptyValue(reflect.ValueOf(columnProp)) && (ok || !reflect.DeepEqual(v, columnProp)) { + obj["column"] = columnProp + } + + obj, err = resourceDataCatalogTagEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{parent}}/tags") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Tag: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Tag: %s", err) + } + if err := d.Set("name", flattenNestedDataCatalogTagName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Tag %q: %#v", d.Id(), res) + + return resourceDataCatalogTagRead(d, meta) +} + +func resourceDataCatalogTagRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{parent}}/tags?pageSize=1000") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataCatalogTag %q", d.Id())) + } + + res, err = flattenNestedDataCatalogTag(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing DataCatalogTag because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenNestedDataCatalogTagName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Tag: %s", err) + } + if err := d.Set("template", flattenNestedDataCatalogTagTemplate(res["template"], d, config)); err != nil { + return fmt.Errorf("Error reading Tag: %s", err) + } + if err := d.Set("template_displayname", flattenNestedDataCatalogTagTemplateDisplayname(res["templateDisplayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Tag: %s", err) + } + if err := d.Set("fields", flattenNestedDataCatalogTagFields(res["fields"], d, config)); err != nil { + return fmt.Errorf("Error reading Tag: %s", err) + } + if err := d.Set("column", flattenNestedDataCatalogTagColumn(res["column"], d, config)); err != nil { + return fmt.Errorf("Error reading Tag: %s", err) + } + + return nil +} + +func resourceDataCatalogTagUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + fieldsProp, err := expandNestedDataCatalogTagFields(d.Get("fields"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fields"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { + obj["fields"] = fieldsProp + } + columnProp, err := expandNestedDataCatalogTagColumn(d.Get("column"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("column"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, columnProp)) { + obj["column"] = columnProp + } + + obj, err = resourceDataCatalogTagEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Tag %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("fields") { + updateMask = append(updateMask, "fields") + } + + if d.HasChange("column") { + updateMask = append(updateMask, "column") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Tag %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Tag %q: %#v", d.Id(), res) + } + + return resourceDataCatalogTagRead(d, meta) +} + +func resourceDataCatalogTagDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Tag %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Tag") + } + + log.Printf("[DEBUG] Finished deleting Tag %q: %#v", d.Id(), res) + return nil +} + +func resourceDataCatalogTagImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + egRegex := regexp.MustCompile("(.+)/tags") + + parts := egRegex.FindStringSubmatch(name) + if len(parts) != 2 { + return nil, fmt.Errorf("entry name does not fit the format %s", egRegex) + } + + if err := d.Set("parent", parts[1]); err != nil { + return nil, fmt.Errorf("Error setting parent: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenNestedDataCatalogTagName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedDataCatalogTagTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedDataCatalogTagTemplateDisplayname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedDataCatalogTagFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "field_name": k, + "display_name": flattenNestedDataCatalogTagFieldsDisplayName(original["display_name"], d, config), + "order": flattenNestedDataCatalogTagFieldsOrder(original["order"], d, config), + "double_value": flattenNestedDataCatalogTagFieldsDoubleValue(original["doubleValue"], d, config), + "string_value": flattenNestedDataCatalogTagFieldsStringValue(original["stringValue"], d, config), + "bool_value": flattenNestedDataCatalogTagFieldsBoolValue(original["boolValue"], d, config), + "timestamp_value": flattenNestedDataCatalogTagFieldsTimestampValue(original["timestampValue"], d, config), + "enum_value": flattenNestedDataCatalogTagFieldsEnumValue(original["enumValue"], d, config), + }) + } + return transformed +} +func flattenNestedDataCatalogTagFieldsDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedDataCatalogTagFieldsOrder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedDataCatalogTagFieldsDoubleValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedDataCatalogTagFieldsStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedDataCatalogTagFieldsBoolValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedDataCatalogTagFieldsTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedDataCatalogTagFieldsEnumValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + + return v.(map[string]interface{})["displayName"] +} + +func flattenNestedDataCatalogTagColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedDataCatalogTagTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedDataCatalogTagFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisplayName, err := expandNestedDataCatalogTagFieldsDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["display_name"] = transformedDisplayName + } + + transformedOrder, err := expandNestedDataCatalogTagFieldsOrder(original["order"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrder); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["order"] = transformedOrder + } + + transformedDoubleValue, err := expandNestedDataCatalogTagFieldsDoubleValue(original["double_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDoubleValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["doubleValue"] = transformedDoubleValue + } + + transformedStringValue, err := expandNestedDataCatalogTagFieldsStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBoolValue, err := expandNestedDataCatalogTagFieldsBoolValue(original["bool_value"], d, config) + if err != nil { + return nil, err + } else { + transformed["boolValue"] = transformedBoolValue + } + + transformedTimestampValue, err := expandNestedDataCatalogTagFieldsTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedEnumValue, err := expandNestedDataCatalogTagFieldsEnumValue(original["enum_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnumValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enumValue"] = transformedEnumValue + } + + transformedFieldName, err := tpgresource.ExpandString(original["field_name"], d, config) + if err != nil { + return nil, err + } + m[transformedFieldName] = transformed + } + return m, nil +} + +func expandNestedDataCatalogTagFieldsDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedDataCatalogTagFieldsOrder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedDataCatalogTagFieldsDoubleValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedDataCatalogTagFieldsStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedDataCatalogTagFieldsBoolValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedDataCatalogTagFieldsTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedDataCatalogTagFieldsEnumValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + // we flattened the original["enum_value"]["display_name"] object to be just original["enum_value"] so here, + // v is the value we want from the config + transformed := make(map[string]interface{}) + if val := reflect.ValueOf(v); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = v + } + + return transformed, nil +} + +func expandNestedDataCatalogTagColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceDataCatalogTagEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + if obj["fields"] != nil { + // IsEmptyValue() does not work for a boolean as it shows + // false when it is 'empty'. Filter boolValue here based on + // the rule api does not take more than 1 'value' + fields := obj["fields"].(map[string]interface{}) + for _, elements := range fields { + values := elements.(map[string]interface{}) + if len(values) > 1 { + for val := range values { + if val == "boolValue" { + delete(values, "boolValue") + } + } + } + } + } + return obj, nil +} + +func flattenNestedDataCatalogTag(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["tags"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value tags. Actual value: %v", v) + } + + _, item, err := resourceDataCatalogTagFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceDataCatalogTagFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName := d.Get("name") + expectedFlattenedName := flattenNestedDataCatalogTagName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemName := flattenNestedDataCatalogTagName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag_sweeper.go new file mode 100644 index 0000000000..389aea8fc3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataCatalogTag", testSweepDataCatalogTag) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataCatalogTag(region string) error { + resourceName := "DataCatalogTag" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datacatalog.googleapis.com/v1/{{parent}}/tags", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["tags"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datacatalog.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag_template.go new file mode 100644 index 0000000000..89553a89f1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_tag_template.go @@ -0,0 +1,930 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// Use it to delete TagTemplate Field +func deleteTagTemplateField(d *schema.ResourceData, config *transport_tpg.Config, name, billingProject, userAgent string) error { + + url_delete, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}/fields/"+name+"?force={{force_delete}}") + if err != nil { + return err + } + var obj map[string]interface{} + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url_delete, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return fmt.Errorf("Error deleting TagTemplate Field %v: %s", name, err) + } + + log.Printf("[DEBUG] Finished deleting TagTemplate Field %q: %#v", name, res) + return nil +} + +// Use it to create TagTemplate Field +func createTagTemplateField(d *schema.ResourceData, config *transport_tpg.Config, body map[string]interface{}, name, billingProject, userAgent string) error { + + url_create, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}/fields") + if err != nil { + return err + } + + url_create, err = transport_tpg.AddQueryParams(url_create, map[string]string{"tagTemplateFieldId": name}) + if err != nil { + return err + } + + res_create, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url_create, + UserAgent: userAgent, + Body: body, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TagTemplate Field: %s", err) + } + + if err != nil { + return fmt.Errorf("Error creating TagTemplate Field %v: %s", name, err) + } else { + log.Printf("[DEBUG] Finished creating TagTemplate Field %v: %#v", name, res_create) + } + + return nil +} + +func ResourceDataCatalogTagTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceDataCatalogTagTemplateCreate, + Read: resourceDataCatalogTagTemplateRead, + Update: resourceDataCatalogTagTemplateUpdate, + Delete: resourceDataCatalogTagTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataCatalogTagTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "fields": { + Type: schema.TypeSet, + Required: true, + Description: `Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields. The change of field_id will be resulting in re-creating of field. The change of primitive_type will be resulting in re-creating of field, however if the field is a required, you cannot update it.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_id": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeList, + Required: true, + Description: `The type of value this tag field can contain.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enum_type": { + Type: schema.TypeList, + Optional: true, + Description: `Represents an enum type. + Exactly one of 'primitive_type' or 'enum_type' must be set`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_values": { + Type: schema.TypeSet, + Required: true, + Description: `The set of allowed values for this enum. The display names of the +values must be case-insensitively unique within this set. Currently, +enum values can only be added to the list of allowed values. Deletion +and renaming of enum values are not supported. +Can have up to 500 allowed values.`, + Elem: datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema(), + // Default schema.HashSchema is used. + }, + }, + }, + }, + "primitive_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DOUBLE", "STRING", "BOOL", "TIMESTAMP", ""}), + Description: `Represents primitive types - string, bool etc. + Exactly one of 'primitive_type' or 'enum_type' must be set Possible values: ["DOUBLE", "STRING", "BOOL", "TIMESTAMP"]`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `A description for this field.`, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The display name for this field.`, + }, + "is_required": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Whether this is a required field. Defaults to false.`, + }, + "order": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The order of this field with respect to other fields in this tag template. +A higher value indicates a more important field. The value can be negative. +Multiple fields can have the same order, and field orders within a tag do not have to be sequential.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the tag template field in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}/fields/{field}`, + }, + }, + }, + }, + "tag_template_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z_][a-z0-9_]{0,63}$`), + Description: `The id of the tag template to create.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `The display name for this template.`, + }, + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Description: `This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template.`, + Default: false, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Template location region.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the tag template in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The display name of the enum value.`, + }, + }, + } +} + +func resourceDataCatalogTagTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDataCatalogTagTemplateDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + fieldsProp, err := expandDataCatalogTagTemplateFields(d.Get("fields"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fields"); !tpgresource.IsEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { + obj["fields"] = fieldsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}projects/{{project}}/locations/{{region}}/tagTemplates?tagTemplateId={{tag_template_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TagTemplate: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TagTemplate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TagTemplate: %s", err) + } + if err := d.Set("name", flattenDataCatalogTagTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating TagTemplate %q: %#v", d.Id(), res) + + return resourceDataCatalogTagTemplateRead(d, meta) +} + +func resourceDataCatalogTagTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TagTemplate: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataCatalogTagTemplate %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TagTemplate: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error reading TagTemplate: %s", err) + } + + if err := d.Set("name", flattenDataCatalogTagTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TagTemplate: %s", err) + } + if err := d.Set("display_name", flattenDataCatalogTagTemplateDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading TagTemplate: %s", err) + } + if err := d.Set("fields", flattenDataCatalogTagTemplateFields(res["fields"], d, config)); err != nil { + return fmt.Errorf("Error reading TagTemplate: %s", err) + } + + return nil +} + +func resourceDataCatalogTagTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TagTemplate: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDataCatalogTagTemplateDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + fieldsProp, err := expandDataCatalogTagTemplateFields(d.Get("fields"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fields"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { + obj["fields"] = fieldsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating TagTemplate %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + if len(updateMask) > 0 { + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating TagTemplate %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TagTemplate %q: %#v", d.Id(), res) + } + + } + + // since fields have a separate endpoint, + // we need to handle it manually + + type FieldChange struct { + Old, New map[string]interface{} + } + + o, n := d.GetChange("fields") + vals := make(map[string]*FieldChange) + + // this will create a dictionary with the value + // of field_id as the key that will contain the + // maps of old and new values + for _, raw := range o.(*schema.Set).List() { + obj := raw.(map[string]interface{}) + k := obj["field_id"].(string) + vals[k] = &FieldChange{Old: obj} + } + + for _, raw := range n.(*schema.Set).List() { + obj := raw.(map[string]interface{}) + k := obj["field_id"].(string) + if _, ok := vals[k]; !ok { + // if key is not present in the vals, + // then create an empty object to hold the new value + vals[k] = &FieldChange{} + } + vals[k].New = obj + } + + // fields schema to create schema.set below + dataCatalogTagTemplateFieldsSchema := &schema.Resource{ + Schema: ResourceDataCatalogTagTemplate().Schema["fields"].Elem.(*schema.Resource).Schema, + } + + for name, change := range vals { + // A few different situations to deal with in here: + // - change.Old is nil: create a new role + // - change.New is nil: remove an existing role + // - both are set: test if New is different than Old and update if so + + changeOldSet := schema.NewSet(schema.HashResource(dataCatalogTagTemplateFieldsSchema), []interface{}{}) + changeOldSet.Add(change.Old) + var changeOldProp map[string]interface{} + if len(change.Old) != 0 { + changeOldProp, _ = expandDataCatalogTagTemplateFields(changeOldSet, nil, nil) + changeOldProp = changeOldProp[name].(map[string]interface{}) + } + + changeNewSet := schema.NewSet(schema.HashResource(dataCatalogTagTemplateFieldsSchema), []interface{}{}) + changeNewSet.Add(change.New) + var changeNewProp map[string]interface{} + if len(change.New) != 0 { + changeNewProp, _ = expandDataCatalogTagTemplateFields(changeNewSet, nil, nil) + changeNewProp = changeNewProp[name].(map[string]interface{}) + } + + // if old state is empty, then we have a new field to create + if len(change.Old) == 0 { + err := createTagTemplateField(d, config, changeNewProp, name, billingProject, userAgent) + if err != nil { + return err + } + + continue + } + + // if new state is empty, then we need to delete the current field + if len(change.New) == 0 { + err := deleteTagTemplateField(d, config, name, billingProject, userAgent) + if err != nil { + return err + } + + continue + } + + // if we have old and new values, but are not equal, update with the new state + if !reflect.DeepEqual(changeOldProp, changeNewProp) { + url1, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}/fields/"+name) + if err != nil { + return err + } + + oldType := changeOldProp["type"].(map[string]interface{}) + newType := changeNewProp["type"].(map[string]interface{}) + + if oldType["primitiveType"] != newType["primitiveType"] { + // As primitiveType can't be changed, it is considered as ForceNew which triggers the deletion of old field and recreation of a new field + // Before that, we need to check that is_required is True for the newType or not, as we don't have support to add new required field in the existing TagTemplate, + // So in such cases, we can simply return the error + + // Reason for checking the isRequired in changeNewProp - + // Because this changeNewProp check should be ignored when the user wants to update the primitive type and make it optional rather than keeping it required. + if changeNewProp["isRequired"] != nil && changeNewProp["isRequired"].(bool) { + return fmt.Errorf("Updating the primitive type for a required field on an existing tag template is not supported as TagTemplateField %q is required", name) + } + + // delete changeOldProp + err_delete := deleteTagTemplateField(d, config, name, billingProject, userAgent) + if err_delete != nil { + return err_delete + } + + // recreate changeNewProp + err_create := createTagTemplateField(d, config, changeNewProp, name, billingProject, userAgent) + if err_create != nil { + return err_create + } + + log.Printf("[DEBUG] Finished updating TagTemplate Field %q", name) + return resourceDataCatalogTagTemplateRead(d, meta) + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url1, + UserAgent: userAgent, + Body: changeNewProp, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return fmt.Errorf("Error updating TagTemplate Field %v: %s", name, err) + } + + log.Printf("[DEBUG] Finished updating TagTemplate Field %q: %#v", name, res) + } + } + return resourceDataCatalogTagTemplateRead(d, meta) +} + +func resourceDataCatalogTagTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TagTemplate: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}?force={{force_delete}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TagTemplate %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TagTemplate") + } + + log.Printf("[DEBUG] Finished deleting TagTemplate %q: %#v", d.Id(), res) + return nil +} + +func resourceDataCatalogTagTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + egRegex := regexp.MustCompile("projects/(.+)/locations/(.+)/tagTemplates/(.+)") + + parts := egRegex.FindStringSubmatch(name) + if len(parts) != 4 { + return nil, fmt.Errorf("tag template name does not fit the format %s", egRegex) + } + if err := d.Set("project", parts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", parts[2]); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("tag_template_id", parts[3]); err != nil { + return nil, fmt.Errorf("Error setting tag_template_id: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenDataCatalogTagTemplateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTagTemplateDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTagTemplateFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "field_id": k, + "name": flattenDataCatalogTagTemplateFieldsName(original["name"], d, config), + "display_name": flattenDataCatalogTagTemplateFieldsDisplayName(original["displayName"], d, config), + "description": flattenDataCatalogTagTemplateFieldsDescription(original["description"], d, config), + "type": flattenDataCatalogTagTemplateFieldsType(original["type"], d, config), + "is_required": flattenDataCatalogTagTemplateFieldsIsRequired(original["isRequired"], d, config), + "order": flattenDataCatalogTagTemplateFieldsOrder(original["order"], d, config), + }) + } + return transformed +} +func flattenDataCatalogTagTemplateFieldsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTagTemplateFieldsDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTagTemplateFieldsDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTagTemplateFieldsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["primitive_type"] = + flattenDataCatalogTagTemplateFieldsTypePrimitiveType(original["primitiveType"], d, config) + transformed["enum_type"] = + flattenDataCatalogTagTemplateFieldsTypeEnumType(original["enumType"], d, config) + return []interface{}{transformed} +} +func flattenDataCatalogTagTemplateFieldsTypePrimitiveType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTagTemplateFieldsTypeEnumType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["allowed_values"] = + flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(original["allowedValues"], d, config) + return []interface{}{transformed} +} +func flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "display_name": flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(original["displayName"], d, config), + }) + } + return transformed +} +func flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTagTemplateFieldsIsRequired(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTagTemplateFieldsOrder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandDataCatalogTagTemplateDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTagTemplateFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataCatalogTagTemplateFieldsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedDisplayName, err := expandDataCatalogTagTemplateFieldsDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + transformedDescription, err := expandDataCatalogTagTemplateFieldsDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedType, err := expandDataCatalogTagTemplateFieldsType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedIsRequired, err := expandDataCatalogTagTemplateFieldsIsRequired(original["is_required"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsRequired); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isRequired"] = transformedIsRequired + } + + transformedOrder, err := expandDataCatalogTagTemplateFieldsOrder(original["order"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrder); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["order"] = transformedOrder + } + + transformedFieldId, err := tpgresource.ExpandString(original["field_id"], d, config) + if err != nil { + return nil, err + } + m[transformedFieldId] = transformed + } + return m, nil +} + +func expandDataCatalogTagTemplateFieldsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTagTemplateFieldsDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTagTemplateFieldsDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTagTemplateFieldsType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPrimitiveType, err := expandDataCatalogTagTemplateFieldsTypePrimitiveType(original["primitive_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimitiveType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primitiveType"] = transformedPrimitiveType + } + + transformedEnumType, err := expandDataCatalogTagTemplateFieldsTypeEnumType(original["enum_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnumType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enumType"] = transformedEnumType + } + + return transformed, nil +} + +func expandDataCatalogTagTemplateFieldsTypePrimitiveType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTagTemplateFieldsTypeEnumType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowedValues, err := expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(original["allowed_values"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedValues); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedValues"] = transformedAllowedValues + } + + return transformed, nil +} + +func expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisplayName, err := expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTagTemplateFieldsIsRequired(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTagTemplateFieldsOrder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_taxonomy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_taxonomy.go new file mode 100644 index 0000000000..faf660e2d3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_taxonomy.go @@ -0,0 +1,408 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDataCatalogTaxonomy() *schema.Resource { + return &schema.Resource{ + Create: resourceDataCatalogTaxonomyCreate, + Read: resourceDataCatalogTaxonomyRead, + Update: resourceDataCatalogTaxonomyUpdate, + Delete: resourceDataCatalogTaxonomyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataCatalogTaxonomyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `User defined name of this taxonomy. +It must: contain only unicode letters, numbers, underscores, dashes +and spaces; not start or end with spaces; and be at most 200 bytes +long when encoded in UTF-8.`, + }, + "activated_policy_types": { + Type: schema.TypeList, + Optional: true, + Description: `A list of policy types that are activated for this taxonomy. If not set, +defaults to an empty list. Possible values: ["POLICY_TYPE_UNSPECIFIED", "FINE_GRAINED_ACCESS_CONTROL"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"POLICY_TYPE_UNSPECIFIED", "FINE_GRAINED_ACCESS_CONTROL"}), + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of this taxonomy. It must: contain only unicode characters, +tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes +long when encoded in UTF-8. If not set, defaults to an empty description.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Taxonomy location region.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Resource name of this taxonomy, whose format is: +"projects/{project}/locations/{region}/taxonomies/{taxonomy}".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataCatalogTaxonomyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDataCatalogTaxonomyDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDataCatalogTaxonomyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + activatedPolicyTypesProp, err := expandDataCatalogTaxonomyActivatedPolicyTypes(d.Get("activated_policy_types"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("activated_policy_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(activatedPolicyTypesProp)) && (ok || !reflect.DeepEqual(v, activatedPolicyTypesProp)) { + obj["activatedPolicyTypes"] = activatedPolicyTypesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}projects/{{project}}/locations/{{region}}/taxonomies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Taxonomy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Taxonomy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Taxonomy: %s", err) + } + if err := d.Set("name", flattenDataCatalogTaxonomyName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Taxonomy %q: %#v", d.Id(), res) + + return resourceDataCatalogTaxonomyRead(d, meta) +} + +func resourceDataCatalogTaxonomyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Taxonomy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataCatalogTaxonomy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Taxonomy: %s", err) + } + + if err := d.Set("name", flattenDataCatalogTaxonomyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Taxonomy: %s", err) + } + if err := d.Set("display_name", flattenDataCatalogTaxonomyDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Taxonomy: %s", err) + } + if err := d.Set("description", flattenDataCatalogTaxonomyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Taxonomy: %s", err) + } + if err := d.Set("activated_policy_types", flattenDataCatalogTaxonomyActivatedPolicyTypes(res["activatedPolicyTypes"], d, config)); err != nil { + return fmt.Errorf("Error reading Taxonomy: %s", err) + } + + return nil +} + +func resourceDataCatalogTaxonomyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Taxonomy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDataCatalogTaxonomyDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDataCatalogTaxonomyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + activatedPolicyTypesProp, err := expandDataCatalogTaxonomyActivatedPolicyTypes(d.Get("activated_policy_types"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("activated_policy_types"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, activatedPolicyTypesProp)) { + obj["activatedPolicyTypes"] = activatedPolicyTypesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Taxonomy %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("activated_policy_types") { + updateMask = append(updateMask, "activatedPolicyTypes") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Taxonomy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Taxonomy %q: %#v", d.Id(), res) + } + + return resourceDataCatalogTaxonomyRead(d, meta) +} + +func resourceDataCatalogTaxonomyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Taxonomy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Taxonomy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Taxonomy") + } + + log.Printf("[DEBUG] Finished deleting Taxonomy %q: %#v", d.Id(), res) + return nil +} + +func resourceDataCatalogTaxonomyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + d.SetId(name) + + re := regexp.MustCompile("projects/(.+)/(?:locations|regions)/(.+)/taxonomies/(.+)") + if matches := re.FindStringSubmatch(name); matches != nil { + d.Set("project", matches[1]) + d.Set("region", matches[2]) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenDataCatalogTaxonomyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTaxonomyDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTaxonomyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataCatalogTaxonomyActivatedPolicyTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataCatalogTaxonomyDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTaxonomyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataCatalogTaxonomyActivatedPolicyTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_taxonomy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_taxonomy_sweeper.go new file mode 100644 index 0000000000..b2c41b64af --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datacatalog/resource_data_catalog_taxonomy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datacatalog + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataCatalogTaxonomy", testSweepDataCatalogTaxonomy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataCatalogTaxonomy(region string) error { + resourceName := "DataCatalogTaxonomy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datacatalog.googleapis.com/v1/projects/{{project}}/locations/{{region}}/taxonomies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["taxonomies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datacatalog.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataflow/resource_dataflow_flex_template_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataflow/resource_dataflow_flex_template_job.go new file mode 100644 index 0000000000..6ca5ed3366 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataflow/resource_dataflow_flex_template_job.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataflow diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataflow_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataflow/resource_dataflow_job.go similarity index 84% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataflow_job.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataflow/resource_dataflow_job.go index b667cdb27c..1815fff0ec 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataflow_job.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataflow/resource_dataflow_job.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataflow import ( "context" @@ -7,6 +9,9 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -18,12 +23,12 @@ import ( const resourceDataflowJobGoogleProvidedLabelPrefix = "labels.goog-dataflow-provided" -var dataflowTerminatingStatesMap = map[string]struct{}{ +var DataflowTerminatingStatesMap = map[string]struct{}{ "JOB_STATE_CANCELLING": {}, "JOB_STATE_DRAINING": {}, } -var dataflowTerminalStatesMap = map[string]struct{}{ +var DataflowTerminalStatesMap = map[string]struct{}{ "JOB_STATE_DONE": {}, "JOB_STATE_FAILED": {}, "JOB_STATE_CANCELLED": {}, @@ -116,6 +121,7 @@ func ResourceDataflowJob() *schema.Resource { "labels": { Type: schema.TypeMap, Optional: true, + Computed: true, DiffSuppressFunc: resourceDataflowJobLabelDiffSuppress, Description: `User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. NOTE: Google-provided Dataflow templates often provide default labels that begin with goog-dataflow-provided. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.`, }, @@ -162,14 +168,14 @@ func ResourceDataflowJob() *schema.Resource { "network": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The network to which VMs will be assigned. If it is not provided, "default" will be used.`, }, "subnetwork": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".`, }, @@ -252,31 +258,31 @@ func resourceDataflowJobTypeCustomizeDiff(_ context.Context, d *schema.ResourceD // return true if a job is in a terminal state, OR if a job is in a // terminating state and skipWait is true func shouldStopDataflowJobDeleteQuery(state string, skipWait bool) bool { - _, stopQuery := dataflowTerminalStatesMap[state] + _, stopQuery := DataflowTerminalStatesMap[state] if !stopQuery && skipWait { - _, stopQuery = dataflowTerminatingStatesMap[state] + _, stopQuery = DataflowTerminatingStatesMap[state] } return stopQuery } func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - params := expandStringMap(d, "parameters") + params := tpgresource.ExpandStringMap(d, "parameters") env, err := resourceDataflowJobSetupEnv(d, config) if err != nil { @@ -300,18 +306,18 @@ func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error { } func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -320,7 +326,7 @@ func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { job, err := resourceDataflowJobGetJob(config, project, region, userAgent, id) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", id)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", id)) } if err := d.Set("job_id", job.Id); err != nil { @@ -345,7 +351,7 @@ func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error setting kms_key_name: %s", err) } - sdkPipelineOptions, err := ConvertToMap(job.Environment.SdkPipelineOptions) + sdkPipelineOptions, err := tpgresource.ConvertToMap(job.Environment.SdkPipelineOptions) if err != nil { return err } @@ -383,24 +389,24 @@ func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interfa return nil } - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } - params := expandStringMap(d, "parameters") - tnamemapping := expandStringMap(d, "transform_name_mapping") + params := tpgresource.ExpandStringMap(d, "parameters") + tnamemapping := tpgresource.ExpandStringMap(d, "transform_name_mapping") env, err := resourceDataflowJobSetupEnv(d, config) if err != nil { @@ -416,10 +422,14 @@ func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interfa } var response *dataflow.LaunchTemplateResponse - err = RetryTimeDuration(func() (updateErr error) { - response, updateErr = resourceDataflowJobLaunchTemplate(config, project, region, userAgent, d.Get("template_gcs_path").(string), &request) - return updateErr - }, time.Minute*time.Duration(5), isDataflowJobUpdateRetryableError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (updateErr error) { + response, updateErr = resourceDataflowJobLaunchTemplate(config, project, region, userAgent, d.Get("template_gcs_path").(string), &request) + return updateErr + }, + Timeout: time.Minute * time.Duration(5), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsDataflowJobUpdateRetryableError}, + }) if err != nil { return err } @@ -434,18 +444,18 @@ func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interfa } func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -526,40 +536,40 @@ func resourceDataflowJobMapRequestedState(policy string) (string, error) { } } -func resourceDataflowJobCreateJob(config *Config, project, region, userAgent string, request *dataflow.CreateJobFromTemplateRequest) (*dataflow.Job, error) { +func resourceDataflowJobCreateJob(config *transport_tpg.Config, project, region, userAgent string, request *dataflow.CreateJobFromTemplateRequest) (*dataflow.Job, error) { if region == "" { return config.NewDataflowClient(userAgent).Projects.Templates.Create(project, request).Do() } return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Create(project, region, request).Do() } -func resourceDataflowJobGetJob(config *Config, project, region, userAgent string, id string) (*dataflow.Job, error) { +func resourceDataflowJobGetJob(config *transport_tpg.Config, project, region, userAgent string, id string) (*dataflow.Job, error) { if region == "" { return config.NewDataflowClient(userAgent).Projects.Jobs.Get(project, id).View("JOB_VIEW_ALL").Do() } return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Get(project, region, id).View("JOB_VIEW_ALL").Do() } -func resourceDataflowJobUpdateJob(config *Config, project, region, userAgent string, id string, job *dataflow.Job) (*dataflow.Job, error) { +func resourceDataflowJobUpdateJob(config *transport_tpg.Config, project, region, userAgent string, id string, job *dataflow.Job) (*dataflow.Job, error) { if region == "" { return config.NewDataflowClient(userAgent).Projects.Jobs.Update(project, id, job).Do() } return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Update(project, region, id, job).Do() } -func resourceDataflowJobLaunchTemplate(config *Config, project, region, userAgent string, gcsPath string, request *dataflow.LaunchTemplateParameters) (*dataflow.LaunchTemplateResponse, error) { +func resourceDataflowJobLaunchTemplate(config *transport_tpg.Config, project, region, userAgent string, gcsPath string, request *dataflow.LaunchTemplateParameters) (*dataflow.LaunchTemplateResponse, error) { if region == "" { return config.NewDataflowClient(userAgent).Projects.Templates.Launch(project, request).GcsPath(gcsPath).Do() } return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Launch(project, region, request).GcsPath(gcsPath).Do() } -func resourceDataflowJobSetupEnv(d *schema.ResourceData, config *Config) (dataflow.RuntimeEnvironment, error) { - zone, _ := getZone(d, config) +func resourceDataflowJobSetupEnv(d *schema.ResourceData, config *transport_tpg.Config) (dataflow.RuntimeEnvironment, error) { + zone, _ := tpgresource.GetZone(d, config) - labels := expandStringMap(d, "labels") + labels := tpgresource.ExpandStringMap(d, "labels") - additionalExperiments := convertStringSet(d.Get("additional_experiments").(*schema.Set)) + additionalExperiments := tpgresource.ConvertStringSet(d.Get("additional_experiments").(*schema.Set)) env := dataflow.RuntimeEnvironment{ MaxWorkers: int64(d.Get("max_workers").(int)), @@ -624,21 +634,21 @@ func resourceDataflowJobIsVirtualUpdate(d *schema.ResourceData, resourceSchema m return false } -func waitForDataflowJobToBeUpdated(d *schema.ResourceData, config *Config, replacementJobID, userAgent string, timeout time.Duration) error { +func waitForDataflowJobToBeUpdated(d *schema.ResourceData, config *transport_tpg.Config, replacementJobID, userAgent string, timeout time.Duration) error { return resource.Retry(timeout, func() *resource.RetryError { - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return resource.NonRetryableError(err) } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return resource.NonRetryableError(err) } replacementJob, err := resourceDataflowJobGetJob(config, project, region, userAgent, replacementJobID) if err != nil { - if isRetryableError(err) { + if transport_tpg.IsRetryableError(err, nil, nil) { return resource.RetryableError(err) } return resource.NonRetryableError(err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/data_fusion_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/data_fusion_operation.go new file mode 100644 index 0000000000..99092fcfc5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/data_fusion_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datafusion + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type DataFusionOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *DataFusionOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.DataFusionBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createDataFusionWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DataFusionOperationWaiter, error) { + w := &DataFusionOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func DataFusionOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createDataFusionWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func DataFusionOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createDataFusionWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/iam_data_fusion_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/iam_data_fusion_instance.go new file mode 100644 index 0000000000..9343367c91 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/iam_data_fusion_instance.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datafusion + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataFusionInstanceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataFusionInstanceIamUpdater struct { + project string + region string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataFusionInstanceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + } + values["region"] = region + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataFusionInstanceIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", u.region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func DataFusionInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + region, _ := tpgresource.GetRegion(d, config) + if region != "" { + values["region"] = region + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataFusionInstanceIamUpdater{ + project: values["project"], + region: values["region"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataFusionInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyInstanceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataFusionInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyInstanceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataFusionInstanceIamUpdater) qualifyInstanceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataFusionBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.region, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataFusionInstanceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.region, u.name) +} + +func (u *DataFusionInstanceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-datafusion-instance-%s", u.GetResourceId()) +} + +func (u *DataFusionInstanceIamUpdater) DescribeResource() string { + return fmt.Sprintf("datafusion instance %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/resource_data_fusion_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/resource_data_fusion_instance.go new file mode 100644 index 0000000000..35125a77ce --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/resource_data_fusion_instance.go @@ -0,0 +1,1198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datafusion + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +var instanceAcceleratorOptions = []string{ + "delta.default.checkpoint.directory", + "ui.feature.cdc", +} + +func instanceOptionsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the options generated by adding an accelerator to a data fusion instance + for _, option := range instanceAcceleratorOptions { + if strings.Contains(k, option) && new == "" { + return true + } + } + + // Let diff be determined by options (above) + if strings.Contains(k, "options.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func ResourceDataFusionInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceDataFusionInstanceCreate, + Read: resourceDataFusionInstanceRead, + Update: resourceDataFusionInstanceUpdate, + Delete: resourceDataFusionInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataFusionInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(25 * time.Minute), + Delete: schema.DefaultTimeout(50 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the instance or a fully qualified identifier for the instance.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"BASIC", "ENTERPRISE", "DEVELOPER"}), + Description: `Represents the type of Data Fusion instance. Each type is configured with +the default settings for processing and memory. +- BASIC: Basic Data Fusion instance. In Basic type, the user will be able to create data pipelines +using point and click UI. However, there are certain limitations, such as fewer number +of concurrent pipelines, no support for streaming pipelines, etc. +- ENTERPRISE: Enterprise Data Fusion instance. In Enterprise type, the user will have more features +available, such as support for streaming pipelines, higher number of concurrent pipelines, etc. +- DEVELOPER: Developer Data Fusion instance. In Developer type, the user will have all features available but +with restrictive capabilities. This is to help enterprises design and develop their data ingestion and integration +pipelines at low cost. Possible values: ["BASIC", "ENTERPRISE", "DEVELOPER"]`, + }, + "accelerators": { + Type: schema.TypeList, + Optional: true, + Description: `List of accelerators enabled for this CDF instance. + +If accelerators are enabled it is possible a permadiff will be created with the Options field. +Users will need to either manually update their state file to include these diffed options, or include the field in a [lifecycle ignore changes block](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes).`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"CDC", "HEALTHCARE", "CCAI_INSIGHTS"}), + Description: `The type of an accelator for a CDF instance. Possible values: ["CDC", "HEALTHCARE", "CCAI_INSIGHTS"]`, + }, + "state": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ENABLED", "DISABLED"}), + Description: `The type of an accelator for a CDF instance. Possible values: ["ENABLED", "DISABLED"]`, + }, + }, + }, + }, + "crypto_key_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_reference": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of projects/*/locations/*/keyRings/*/cryptoKeys/*.`, + }, + }, + }, + }, + "dataproc_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of the instance.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Display name for an instance.`, + }, + "enable_rbac": { + Type: schema.TypeBool, + Optional: true, + Description: `Option to enable granular role-based access control.`, + }, + "enable_stackdriver_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Option to enable Stackdriver Logging.`, + }, + "enable_stackdriver_monitoring": { + Type: schema.TypeBool, + Optional: true, + Description: `Option to enable Stackdriver Monitoring.`, + }, + "event_publish_config": { + Type: schema.TypeList, + Optional: true, + Description: `Option to enable and pass metadata for event publishing.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Option to enable Event Publishing.`, + }, + "topic": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the Pub/Sub topic. Format: projects/{projectId}/topics/{topic_id}`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The resource labels for instance to use to annotate any related underlying resources, +such as Compute Engine VMs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Network configuration options. These are required when a private Data Fusion instance is to be created.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_allocation": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The IP range in CIDR notation to use for the managed Data Fusion instance +nodes. This range must not overlap with any other ranges used in the Data Fusion instance network.`, + }, + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the network in the project with which the tenant project +will be peered for executing pipelines. In case of shared VPC where the network resides in another host +project the network should specified in the form of projects/{host-project-id}/global/networks/{network}`, + }, + }, + }, + }, + "options": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: instanceOptionsDiffSuppress, + Description: `Map of additional options used to configure the behavior of Data Fusion instance.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "private_instance": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Specifies whether the Data Fusion instance should be private. If set to +true, all Data Fusion nodes will have private IP addresses and will not be +able to access the public internet.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The region of the Data Fusion instance.`, + }, + "version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Current version of the Data Fusion.`, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Name of the zone in which the Data Fusion instance will be created. Only DEVELOPER instances use this field.`, + }, + "api_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `Endpoint on which the REST APIs is accessible.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the instance was created in RFC3339 UTC "Zulu" format, accurate to nanoseconds.`, + }, + "gcs_bucket": { + Type: schema.TypeString, + Computed: true, + Description: `Cloud Storage bucket generated by Data Fusion in the customer project.`, + }, + "p4_service_account": { + Type: schema.TypeString, + Computed: true, + Description: `P4 service account for the customer project.`, + }, + "service_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `Endpoint on which the Data Fusion UI and REST APIs are accessible.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of this Data Fusion instance. +- CREATING: Instance is being created +- RUNNING: Instance is running and ready for requests +- FAILED: Instance creation failed +- DELETING: Instance is being deleted +- UPGRADING: Instance is being upgraded +- RESTARTING: Instance is being restarted`, + }, + "state_message": { + Type: schema.TypeString, + Computed: true, + Description: `Additional information about the current state of this Data Fusion instance if available.`, + }, + "tenant_project_id": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the tenant project.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the instance was last updated in RFC3339 UTC "Zulu" format, accurate to nanoseconds.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataFusionInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandDataFusionInstanceName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandDataFusionInstanceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + typeProp, err := expandDataFusionInstanceType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + enableStackdriverLoggingProp, err := expandDataFusionInstanceEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableStackdriverLoggingProp)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { + obj["enableStackdriverLogging"] = enableStackdriverLoggingProp + } + enableStackdriverMonitoringProp, err := expandDataFusionInstanceEnableStackdriverMonitoring(d.Get("enable_stackdriver_monitoring"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_stackdriver_monitoring"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableStackdriverMonitoringProp)) && (ok || !reflect.DeepEqual(v, enableStackdriverMonitoringProp)) { + obj["enableStackdriverMonitoring"] = enableStackdriverMonitoringProp + } + enableRbacProp, err := expandDataFusionInstanceEnableRbac(d.Get("enable_rbac"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_rbac"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableRbacProp)) && (ok || !reflect.DeepEqual(v, enableRbacProp)) { + obj["enableRbac"] = enableRbacProp + } + labelsProp, err := expandDataFusionInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + optionsProp, err := expandDataFusionInstanceOptions(d.Get("options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("options"); !tpgresource.IsEmptyValue(reflect.ValueOf(optionsProp)) && (ok || !reflect.DeepEqual(v, optionsProp)) { + obj["options"] = optionsProp + } + versionProp, err := expandDataFusionInstanceVersion(d.Get("version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version"); !tpgresource.IsEmptyValue(reflect.ValueOf(versionProp)) && (ok || !reflect.DeepEqual(v, versionProp)) { + obj["version"] = versionProp + } + privateInstanceProp, err := expandDataFusionInstancePrivateInstance(d.Get("private_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateInstanceProp)) && (ok || !reflect.DeepEqual(v, privateInstanceProp)) { + obj["privateInstance"] = privateInstanceProp + } + dataprocServiceAccountProp, err := expandDataFusionInstanceDataprocServiceAccount(d.Get("dataproc_service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dataproc_service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataprocServiceAccountProp)) && (ok || !reflect.DeepEqual(v, dataprocServiceAccountProp)) { + obj["dataprocServiceAccount"] = dataprocServiceAccountProp + } + networkConfigProp, err := expandDataFusionInstanceNetworkConfig(d.Get("network_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkConfigProp)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { + obj["networkConfig"] = networkConfigProp + } + zoneProp, err := expandDataFusionInstanceZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + displayNameProp, err := expandDataFusionInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + cryptoKeyConfigProp, err := expandDataFusionInstanceCryptoKeyConfig(d.Get("crypto_key_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("crypto_key_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(cryptoKeyConfigProp)) && (ok || !reflect.DeepEqual(v, cryptoKeyConfigProp)) { + obj["cryptoKeyConfig"] = cryptoKeyConfigProp + } + eventPublishConfigProp, err := expandDataFusionInstanceEventPublishConfig(d.Get("event_publish_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("event_publish_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(eventPublishConfigProp)) && (ok || !reflect.DeepEqual(v, eventPublishConfigProp)) { + obj["eventPublishConfig"] = eventPublishConfigProp + } + acceleratorsProp, err := expandDataFusionInstanceAccelerators(d.Get("accelerators"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("accelerators"); !tpgresource.IsEmptyValue(reflect.ValueOf(acceleratorsProp)) && (ok || !reflect.DeepEqual(v, acceleratorsProp)) { + obj["accelerators"] = acceleratorsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataFusionBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Instance: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Instance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = DataFusionOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Instance", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Instance: %s", err) + } + + if err := d.Set("name", flattenDataFusionInstanceName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) + + return resourceDataFusionInstanceRead(d, meta) +} + +func resourceDataFusionInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataFusionBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataFusionInstance %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + if err := d.Set("name", flattenDataFusionInstanceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("description", flattenDataFusionInstanceDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("type", flattenDataFusionInstanceType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("enable_stackdriver_logging", flattenDataFusionInstanceEnableStackdriverLogging(res["enableStackdriverLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("enable_stackdriver_monitoring", flattenDataFusionInstanceEnableStackdriverMonitoring(res["enableStackdriverMonitoring"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("enable_rbac", flattenDataFusionInstanceEnableRbac(res["enableRbac"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("labels", flattenDataFusionInstanceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("options", flattenDataFusionInstanceOptions(res["options"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("create_time", flattenDataFusionInstanceCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("update_time", flattenDataFusionInstanceUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("state", flattenDataFusionInstanceState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("state_message", flattenDataFusionInstanceStateMessage(res["stateMessage"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("service_endpoint", flattenDataFusionInstanceServiceEndpoint(res["serviceEndpoint"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("version", flattenDataFusionInstanceVersion(res["version"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("private_instance", flattenDataFusionInstancePrivateInstance(res["privateInstance"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("dataproc_service_account", flattenDataFusionInstanceDataprocServiceAccount(res["dataprocServiceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("tenant_project_id", flattenDataFusionInstanceTenantProjectId(res["tenantProjectId"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("gcs_bucket", flattenDataFusionInstanceGcsBucket(res["gcsBucket"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("network_config", flattenDataFusionInstanceNetworkConfig(res["networkConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("zone", flattenDataFusionInstanceZone(res["zone"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("display_name", flattenDataFusionInstanceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("api_endpoint", flattenDataFusionInstanceApiEndpoint(res["apiEndpoint"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("p4_service_account", flattenDataFusionInstanceP4ServiceAccount(res["p4ServiceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("crypto_key_config", flattenDataFusionInstanceCryptoKeyConfig(res["cryptoKeyConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("event_publish_config", flattenDataFusionInstanceEventPublishConfig(res["eventPublishConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("accelerators", flattenDataFusionInstanceAccelerators(res["accelerators"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + return nil +} + +func resourceDataFusionInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + enableStackdriverLoggingProp, err := expandDataFusionInstanceEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { + obj["enableStackdriverLogging"] = enableStackdriverLoggingProp + } + enableStackdriverMonitoringProp, err := expandDataFusionInstanceEnableStackdriverMonitoring(d.Get("enable_stackdriver_monitoring"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_stackdriver_monitoring"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableStackdriverMonitoringProp)) { + obj["enableStackdriverMonitoring"] = enableStackdriverMonitoringProp + } + enableRbacProp, err := expandDataFusionInstanceEnableRbac(d.Get("enable_rbac"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_rbac"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableRbacProp)) { + obj["enableRbac"] = enableRbacProp + } + labelsProp, err := expandDataFusionInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + versionProp, err := expandDataFusionInstanceVersion(d.Get("version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionProp)) { + obj["version"] = versionProp + } + eventPublishConfigProp, err := expandDataFusionInstanceEventPublishConfig(d.Get("event_publish_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("event_publish_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eventPublishConfigProp)) { + obj["eventPublishConfig"] = eventPublishConfigProp + } + acceleratorsProp, err := expandDataFusionInstanceAccelerators(d.Get("accelerators"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("accelerators"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, acceleratorsProp)) { + obj["accelerators"] = acceleratorsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataFusionBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("enable_stackdriver_logging") { + updateMask = append(updateMask, "enableStackdriverLogging") + } + + if d.HasChange("enable_stackdriver_monitoring") { + updateMask = append(updateMask, "enableStackdriverMonitoring") + } + + if d.HasChange("enable_rbac") { + updateMask = append(updateMask, "enableRbac") + } + + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = DataFusionOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceDataFusionInstanceRead(d, meta) +} + +func resourceDataFusionInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DataFusionBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Instance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Instance") + } + + err = DataFusionOperationWaitTime( + config, res, project, "Deleting Instance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) + return nil +} + +func resourceDataFusionInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataFusionInstanceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDataFusionInstanceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceEnableStackdriverMonitoring(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceEnableRbac(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceStateMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceServiceEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstancePrivateInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceDataprocServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceTenantProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceNetworkConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ip_allocation"] = + flattenDataFusionInstanceNetworkConfigIpAllocation(original["ipAllocation"], d, config) + transformed["network"] = + flattenDataFusionInstanceNetworkConfigNetwork(original["network"], d, config) + return []interface{}{transformed} +} +func flattenDataFusionInstanceNetworkConfigIpAllocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceNetworkConfigNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceApiEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceP4ServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceCryptoKeyConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key_reference"] = + flattenDataFusionInstanceCryptoKeyConfigKeyReference(original["keyReference"], d, config) + return []interface{}{transformed} +} +func flattenDataFusionInstanceCryptoKeyConfigKeyReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceEventPublishConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenDataFusionInstanceEventPublishConfigEnabled(original["enabled"], d, config) + transformed["topic"] = + flattenDataFusionInstanceEventPublishConfigTopic(original["topic"], d, config) + return []interface{}{transformed} +} +func flattenDataFusionInstanceEventPublishConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceEventPublishConfigTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceAccelerators(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "accelerator_type": flattenDataFusionInstanceAcceleratorsAcceleratorType(original["acceleratorType"], d, config), + "state": flattenDataFusionInstanceAcceleratorsState(original["state"], d, config), + }) + } + return transformed +} +func flattenDataFusionInstanceAcceleratorsAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceAcceleratorsState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataFusionInstanceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") +} + +func expandDataFusionInstanceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceEnableStackdriverLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceEnableStackdriverMonitoring(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceEnableRbac(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataFusionInstanceOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataFusionInstanceVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstancePrivateInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceDataprocServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceNetworkConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIpAllocation, err := expandDataFusionInstanceNetworkConfigIpAllocation(original["ip_allocation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpAllocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipAllocation"] = transformedIpAllocation + } + + transformedNetwork, err := expandDataFusionInstanceNetworkConfigNetwork(original["network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["network"] = transformedNetwork + } + + return transformed, nil +} + +func expandDataFusionInstanceNetworkConfigIpAllocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceNetworkConfigNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceCryptoKeyConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKeyReference, err := expandDataFusionInstanceCryptoKeyConfigKeyReference(original["key_reference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKeyReference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["keyReference"] = transformedKeyReference + } + + return transformed, nil +} + +func expandDataFusionInstanceCryptoKeyConfigKeyReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceEventPublishConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandDataFusionInstanceEventPublishConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + transformedTopic, err := expandDataFusionInstanceEventPublishConfigTopic(original["topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topic"] = transformedTopic + } + + return transformed, nil +} + +func expandDataFusionInstanceEventPublishConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceEventPublishConfigTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceAccelerators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAcceleratorType, err := expandDataFusionInstanceAcceleratorsAcceleratorType(original["accelerator_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorType"] = transformedAcceleratorType + } + + transformedState, err := expandDataFusionInstanceAcceleratorsState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataFusionInstanceAcceleratorsAcceleratorType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceAcceleratorsState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/resource_data_fusion_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/resource_data_fusion_instance_sweeper.go new file mode 100644 index 0000000000..a13f189071 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datafusion/resource_data_fusion_instance_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datafusion + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataFusionInstance", testSweepDataFusionInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataFusionInstance(region string) error { + resourceName := "DataFusionInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datafusion.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datafusion.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_deidentify_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_deidentify_template.go new file mode 100644 index 0000000000..a9c2a7a2ea --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_deidentify_template.go @@ -0,0 +1,18472 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datalossprevention + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDataLossPreventionDeidentifyTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceDataLossPreventionDeidentifyTemplateCreate, + Read: resourceDataLossPreventionDeidentifyTemplateRead, + Update: resourceDataLossPreventionDeidentifyTemplateUpdate, + Delete: resourceDataLossPreventionDeidentifyTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataLossPreventionDeidentifyTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "deidentify_config": { + Type: schema.TypeList, + Required: true, + Description: `Configuration of the deidentify template`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_transformations": { + Type: schema.TypeList, + Optional: true, + Description: `Treat the dataset as an image and redact.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "transforms": { + Type: schema.TypeList, + Required: true, + Description: `For determination of how redaction of images should occur.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all_info_types": { + Type: schema.TypeList, + Optional: true, + Description: `Apply transformation to all findings not specified in other ImageTransformation's selectedInfoTypes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "all_text": { + Type: schema.TypeList, + Optional: true, + Description: `Apply transformation to all text that doesn't match an infoType.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "redaction_color": { + Type: schema.TypeList, + Optional: true, + Description: `The color to use when redacting content from an image. If not specified, the default is black.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "blue": { + Type: schema.TypeFloat, + Optional: true, + Description: `The amount of blue in the color as a value in the interval [0, 1].`, + Default: 0.0, + }, + "green": { + Type: schema.TypeFloat, + Optional: true, + Description: `The amount of green in the color as a value in the interval [0, 1].`, + Default: 0.0, + }, + "red": { + Type: schema.TypeFloat, + Optional: true, + Description: `The amount of red in the color as a value in the interval [0, 1].`, + Default: 0.0, + }, + }, + }, + }, + "selected_info_types": { + Type: schema.TypeList, + Optional: true, + Description: `Apply transformation to the selected infoTypes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "info_types": { + Type: schema.TypeList, + Required: true, + Description: `InfoTypes to apply the transformation to. Leaving this empty will apply the transformation to apply to +all findings that correspond to infoTypes that were requested in InspectConfig.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"deidentify_config.0.info_type_transformations", "deidentify_config.0.record_transformations", "deidentify_config.0.image_transformations"}, + }, + "info_type_transformations": { + Type: schema.TypeList, + Optional: true, + Description: `Treat the dataset as free-form text and apply the same free text transformation everywhere`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "transformations": { + Type: schema.TypeList, + Required: true, + Description: `Transformation for each infoType. Cannot specify more than one for a given infoType.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "primitive_transformation": { + Type: schema.TypeList, + Required: true, + Description: `Primitive transformation to apply to the infoType. +The 'primitive_transformation' block must only contain one argument, corresponding to the type of transformation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucketing_config": { + Type: schema.TypeList, + Optional: true, + Description: `Generalization function that buckets values based on ranges. The ranges and replacement values are dynamically provided by the user for custom behavior, such as 1-30 -> LOW 31-65 -> MEDIUM 66-100 -> HIGH +This can be used on data of type: number, long, string, timestamp. +If the provided value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. +See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "buckets": { + Type: schema.TypeList, + Optional: true, + Description: `Set of buckets. Ranges must be non-overlapping. +Bucket is represented as a range, along with replacement values.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "replacement_value": { + Type: schema.TypeList, + Required: true, + Description: `Replacement value for this bucket. +The 'replacement_value' block must only contain one argument.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + "max": { + Type: schema.TypeList, + Optional: true, + Description: `Upper bound of the range, exclusive; type must match min. +The 'max' block must only contain one argument. See the 'bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + "min": { + Type: schema.TypeList, + Optional: true, + Description: `Lower bound of the range, inclusive. Type should be the same as max if used. +The 'min' block must only contain one argument. See the 'bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "character_mask_config": { + Type: schema.TypeList, + Optional: true, + Description: `Partially mask a string by replacing a given number of characters with a fixed character. +Masking can start from the beginning or end of the string.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "characters_to_ignore": { + Type: schema.TypeList, + Optional: true, + Description: `Characters to skip when doing de-identification of a value. These will be left alone and skipped.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "characters_to_skip": { + Type: schema.TypeString, + Optional: true, + Description: `Characters to not transform when masking.`, + }, + "common_characters_to_ignore": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE", ""}), + Description: `Common characters to not transform when masking. Useful to avoid removing punctuation. Possible values: ["NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE"]`, + }, + }, + }, + }, + "masking_character": { + Type: schema.TypeString, + Optional: true, + Description: `Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string +such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for +strings, and 0 for digits.`, + }, + "number_to_mask": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally.`, + }, + "reverse_order": { + Type: schema.TypeBool, + Optional: true, + Description: `Mask characters in reverse order. For example, if masking_character is 0, number_to_mask is 14, and reverse_order is 'false', then the +input string '1234-5678-9012-3456' is masked as '00000000000000-3456'.`, + }, + }, + }, + }, + "crypto_deterministic_config": { + Type: schema.TypeList, + Optional: true, + Description: `Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "context": { + Type: schema.TypeList, + Optional: true, + Description: `A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. + +If the context is not set, plaintext would be used as is for encryption. If the context is set but: + +1. there is no record present when transforming a given value or +2. the field is not present when transforming a given value, + +plaintext would be used as is for encryption. + +Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `The key used by the encryption function.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + }, + }, + }, + }, + }, + }, + }, + "surrogate_info_type": { + Type: schema.TypeList, + Optional: true, + Description: `The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} + +For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' + +This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. + +Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. + +In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either + +* reverse a surrogate that does not correspond to an actual identifier +* be unable to parse the surrogate and result in an error + +Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Optional version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + "crypto_hash_config": { + Type: schema.TypeList, + Optional: true, + Description: `Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. The key size must be either 32 or 64 bytes. +Outputs a base64 encoded representation of the hashed output (for example, L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=). +Currently, only string and integer values can be hashed. +See https://cloud.google.com/dlp/docs/pseudonymization to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `The key used by the encryption function.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "crypto_replace_ffx_fpe_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the 'content.reidentify' API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. + +Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "common_alphabet": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC", ""}), + Description: `Common alphabets. Possible values: ["FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC"]`, + }, + "context": { + Type: schema.TypeList, + Optional: true, + Description: `The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. + +If the context is set but: + +1. there is no record present when transforming a given value or +2. the field is not present when transforming a given value, + +a default tweak will be used. + +Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's. Currently, the referenced field may be of value type integer or string. + +The tweak is constructed as a sequence of bytes in big endian byte order such that: + +* a 64 bit integer is encoded followed by a single byte of value 1 +* a string is encoded in UTF-8 format followed by a single byte of value 2`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `The key used by the encryption algorithm.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + }, + }, + }, + }, + }, + }, + }, + "custom_alphabet": { + Type: schema.TypeString, + Optional: true, + Description: `This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: + +''0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~'!@#$%^&*()_-+={[}]|:;"'<,>.?/''`, + }, + "radix": { + Type: schema.TypeInt, + Optional: true, + Description: `The native way to select the alphabet. Must be in the range \[2, 95\].`, + }, + "surrogate_info_type": { + Type: schema.TypeList, + Optional: true, + Description: `The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate + +For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' + +This annotation identifies the surrogate when inspecting content using the custom infoType ['SurrogateType'](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. + +In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Optional version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + "date_shift_config": { + Type: schema.TypeList, + Optional: true, + Description: `Shifts dates by random number of days, with option to be consistent for the same context.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "lower_bound_days": { + Type: schema.TypeInt, + Required: true, + Description: `Range of shift in days. Negative means shift to earlier in time.`, + }, + "upper_bound_days": { + Type: schema.TypeInt, + Required: true, + Description: `Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). +Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction.`, + }, + "context": { + Type: schema.TypeList, + Optional: true, + Description: `Points to the field that contains the context, for example, an entity id. +If set, must also set cryptoKey. If set, shift will be consistent for the given context.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `The key used by the encryption function.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. +A base64-encoded string.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "fixed_size_bucketing_config": { + Type: schema.TypeList, + Optional: true, + Description: `Buckets values based on fixed size ranges. The Bucketing transformation can provide all of this functionality, but requires more configuration. This message is provided as a convenience to the user for simple bucketing strategies. + +The transformed value will be a hyphenated string of {lower_bound}-{upper_bound}. For example, if lower_bound = 10 and upper_bound = 20, all values that are within this bucket will be replaced with "10-20". + +This can be used on data of type: double, long. + +If the bound Value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. + +See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_size": { + Type: schema.TypeFloat, + Required: true, + Description: `Size of each bucket (except for minimum and maximum buckets). +So if lower_bound = 10, upper_bound = 89, and bucketSize = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. +Precision up to 2 decimals works.`, + }, + "lower_bound": { + Type: schema.TypeList, + Required: true, + Description: `Lower bound value of buckets. +All values less than lower_bound are grouped together into a single bucket; for example if lower_bound = 10, then all values less than 10 are replaced with the value "-10". +The 'lower_bound' block must only contain one argument. See the 'fixed_size_bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + }, + }, + }, + "upper_bound": { + Type: schema.TypeList, + Required: true, + Description: `Upper bound value of buckets. +All values greater than upper_bound are grouped together into a single bucket; for example if upper_bound = 89, then all values greater than 89 are replaced with the value "89+". +The 'upper_bound' block must only contain one argument. See the 'fixed_size_bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + }, + }, + }, + }, + }, + }, + "redact_config": { + Type: schema.TypeList, + Optional: true, + Description: `Redact a given value. For example, if used with an InfoTypeTransformation transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the output would be 'My phone number is '.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "replace_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replace each input value with a given value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "new_value": { + Type: schema.TypeList, + Required: true, + Description: `Replace each input value with a given value. +The 'new_value' block must only contain one argument. For example when replacing the contents of a string-type field, only 'string_value' should be set.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a +year by itself or a year and month where the day is not significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of year. Must be from 1 to 12, or 0 if specifying a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of date. Must be from 1 to 9999, or 0 if specifying a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeInt, + Optional: true, + Description: `An integer value.`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + "replace_dictionary_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replace with a value randomly drawn (with replacement) from a dictionary.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "word_list": { + Type: schema.TypeList, + Required: true, + Description: `A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "words": { + Type: schema.TypeList, + Required: true, + Description: `Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "replace_with_info_type_config": { + Type: schema.TypeBool, + Optional: true, + Description: `Replace each matching finding with the name of the info type.`, + }, + "time_part_config": { + Type: schema.TypeList, + Optional: true, + Description: `For use with Date, Timestamp, and TimeOfDay, extract or preserve a portion of the value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "part_to_extract": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"YEAR", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "WEEK_OF_YEAR", "HOUR_OF_DAY", ""}), + Description: `The part of the time to keep. Possible values: ["YEAR", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "WEEK_OF_YEAR", "HOUR_OF_DAY"]`, + }, + }, + }, + }, + }, + }, + }, + "info_types": { + Type: schema.TypeList, + Optional: true, + Description: `InfoTypes to apply the transformation to. Leaving this empty will apply the transformation to apply to +all findings that correspond to infoTypes that were requested in InspectConfig.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"deidentify_config.0.info_type_transformations", "deidentify_config.0.record_transformations", "deidentify_config.0.image_transformations"}, + }, + "record_transformations": { + Type: schema.TypeList, + Optional: true, + Description: `Treat the dataset as structured. Transformations can be applied to specific locations within structured datasets, such as transforming a column within a table.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_transformations": { + Type: schema.TypeList, + Optional: true, + Description: `Transform the record by applying various field transformations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fields": { + Type: schema.TypeList, + Required: true, + Description: `Input field(s) to apply the transformation to. When you have columns that reference their position within a list, omit the index from the FieldId. +FieldId name matching ignores the index. For example, instead of "contact.nums[0].type", use "contact.nums.type".`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "condition": { + Type: schema.TypeList, + Optional: true, + Description: `Only apply the transformation if the condition evaluates to true for the given RecordCondition. The conditions are allowed to reference fields that are not used in the actual transformation. +Example Use Cases: +- Apply a different bucket transformation to an age column if the zip code column for the same record is within a specific range. +- Redact a field if the date of birth field is greater than 85.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expressions": { + Type: schema.TypeList, + Optional: true, + Description: `An expression.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conditions": { + Type: schema.TypeList, + Optional: true, + Description: `Conditions to apply to the expression.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conditions": { + Type: schema.TypeList, + Optional: true, + Description: `A collection of conditions.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeList, + Required: true, + Description: `Field within the record this condition is evaluated against.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"EQUAL_TO", "NOT_EQUAL_TO", "GREATER_THAN", "LESS_THAN", "GREATER_THAN_OR_EQUALS", "LESS_THAN_OR_EQUALS", "EXISTS"}), + Description: `Operator used to compare the field or infoType to the value. Possible values: ["EQUAL_TO", "NOT_EQUAL_TO", "GREATER_THAN", "LESS_THAN", "GREATER_THAN_OR_EQUALS", "LESS_THAN_OR_EQUALS", "EXISTS"]`, + }, + "value": { + Type: schema.TypeList, + Optional: true, + Description: `Value to compare against. +The 'value' block must only contain one argument. For example when a condition is evaluated against a string-type field, only 'string_value' should be set. +This argument is mandatory, except for conditions using the 'EXISTS' operator.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 31), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 12), + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 9999), + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 24), + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 59), + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 999999999), + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateRFC3339Date, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "logical_operator": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"AND", ""}), + Description: `The operator to apply to the result of conditions. Default and currently only supported value is AND Default value: "AND" Possible values: ["AND"]`, + Default: "AND", + }, + }, + }, + }, + }, + }, + }, + "info_type_transformations": { + Type: schema.TypeList, + Optional: true, + Description: `Treat the contents of the field as free text, and selectively transform content that matches an InfoType. +Only one of 'primitive_transformation' or 'info_type_transformations' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "transformations": { + Type: schema.TypeList, + Required: true, + Description: `Transformation for each infoType. Cannot specify more than one for a given infoType.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "primitive_transformation": { + Type: schema.TypeList, + Required: true, + Description: `Apply the transformation to the entire field. +The 'primitive_transformation' block must only contain one argument, corresponding to the type of transformation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucketing_config": { + Type: schema.TypeList, + Optional: true, + Description: `Generalization function that buckets values based on ranges. The ranges and replacement values are dynamically provided by the user for custom behavior, such as 1-30 -> LOW 31-65 -> MEDIUM 66-100 -> HIGH +This can be used on data of type: number, long, string, timestamp. +If the provided value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. +See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "buckets": { + Type: schema.TypeList, + Required: true, + Description: `Set of buckets. Ranges must be non-overlapping. +Bucket is represented as a range, along with replacement values.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "replacement_value": { + Type: schema.TypeList, + Required: true, + Description: `Replacement value for this bucket. +The 'replacement_value' block must only contain one argument.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + "max": { + Type: schema.TypeList, + Optional: true, + Description: `Upper bound of the range, exclusive; type must match min. +The 'max' block must only contain one argument. See the 'bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + "min": { + Type: schema.TypeList, + Optional: true, + Description: `Lower bound of the range, inclusive. Type should be the same as max if used. +The 'min' block must only contain one argument. See the 'bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "character_mask_config": { + Type: schema.TypeList, + Optional: true, + Description: `Partially mask a string by replacing a given number of characters with a fixed character. Masking can start from the beginning or end of the string. This can be used on data of any type (numbers, longs, and so on) and when de-identifying structured data we'll attempt to preserve the original data's type. (This allows you to take a long like 123 and modify it to a string like **3).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "characters_to_ignore": { + Type: schema.TypeList, + Optional: true, + Description: `Characters to skip when doing de-identification of a value. These will be left alone and skipped.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "characters_to_skip": { + Type: schema.TypeString, + Optional: true, + Description: `Characters to not transform when masking. Only one of this or 'common_characters_to_ignore' must be specified.`, + }, + "common_characters_to_ignore": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE", ""}), + Description: `Common characters to not transform when masking. Useful to avoid removing punctuation. Only one of this or 'characters_to_skip' must be specified. Possible values: ["NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE"]`, + }, + }, + }, + }, + "masking_character": { + Type: schema.TypeString, + Optional: true, + Description: `Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string +such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for +strings, and 0 for digits.`, + }, + "number_to_mask": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally. +If number_to_mask is negative, this denotes inverse masking. Cloud DLP masks all but a number of characters. For example, suppose you have the following values: +- 'masking_character' is * +- 'number_to_mask' is -4 +- 'reverse_order' is false +- 'characters_to_ignore' includes - +- Input string is 1234-5678-9012-3456 + +The resulting de-identified string is ****-****-****-3456. Cloud DLP masks all but the last four characters. If reverseOrder is true, all but the first four characters are masked as 1234-****-****-****.`, + }, + "reverse_order": { + Type: schema.TypeBool, + Optional: true, + Description: `Mask characters in reverse order. For example, if masking_character is 0, number_to_mask is 14, and reverse_order is 'false', then the +input string '1234-5678-9012-3456' is masked as '00000000000000-3456'.`, + }, + }, + }, + }, + "crypto_deterministic_config": { + Type: schema.TypeList, + Optional: true, + Description: `Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeList, + Required: true, + Description: `The key used by the encryption function. For deterministic encryption using AES-SIV, the provided key is internally expanded to 64 bytes prior to use.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, 'transient' or 'unwrapped' must be specified. +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, 'unwrapped' or 'kms_wrapped' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, 'transient' or 'kms_wrapped' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + Sensitive: true, + }, + }, + }, + }, + }, + }, + }, + "surrogate_info_type": { + Type: schema.TypeList, + Required: true, + Description: `The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} + +For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' + +This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. + +Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. + +In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either + +* reverse a surrogate that does not correspond to an actual identifier +* be unable to parse the surrogate and result in an error + +Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Optional version name for this InfoType.`, + }, + }, + }, + }, + "context": { + Type: schema.TypeList, + Optional: true, + Description: `A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. + +If the context is not set, plaintext would be used as is for encryption. If the context is set but: + +1. there is no record present when transforming a given value or +2. the field is not present when transforming a given value, + +plaintext would be used as is for encryption. + +Note that case (1) is expected when an InfoTypeTransformation is applied to both structured and unstructured ContentItems.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + }, + }, + }, + "crypto_hash_config": { + Type: schema.TypeList, + Optional: true, + Description: `Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. The key size must be either 32 or 64 bytes. +Outputs a base64 encoded representation of the hashed output (for example, L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=). +Currently, only string and integer values can be hashed. +See https://cloud.google.com/dlp/docs/pseudonymization to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeList, + Required: true, + Description: `The key used by the encryption function.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, 'transient' or 'unwrapped' must be specified. +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, 'unwrapped' or 'kms_wrapped' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, 'transient' or 'kms_wrapped' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + Sensitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "crypto_replace_ffx_fpe_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the 'content.reidentify' API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. + +Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeList, + Required: true, + Description: `The key used by the encryption algorithm.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, 'transient' or 'unwrapped' must be specified. +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, 'unwrapped' or 'kms_wrapped' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, 'transient' or 'kms_wrapped' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + Sensitive: true, + }, + }, + }, + }, + }, + }, + }, + "common_alphabet": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC", ""}), + Description: `Common alphabets. Only one of this, 'custom_alphabet' or 'radix' must be specified. Possible values: ["NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC"]`, + }, + "context": { + Type: schema.TypeList, + Optional: true, + Description: `The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. + +If the context is set but: + +1. there is no record present when transforming a given value or +2. the field is not present when transforming a given value, + +a default tweak will be used. + +Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's. Currently, the referenced field may be of value type integer or string. + +The tweak is constructed as a sequence of bytes in big endian byte order such that: + +* a 64 bit integer is encoded followed by a single byte of value 1 +* a string is encoded in UTF-8 format followed by a single byte of value 2`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "custom_alphabet": { + Type: schema.TypeString, + Optional: true, + Description: `This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: + +''0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~'!@#$%^&*()_-+={[}]|:;"'<,>.?/''. Only one of this, 'common_alphabet' or 'radix' must be specified.`, + }, + "radix": { + Type: schema.TypeInt, + Optional: true, + Description: `The native way to select the alphabet. Must be in the range \[2, 95\]. Only one of this, 'custom_alphabet' or 'common_alphabet' must be specified.`, + }, + "surrogate_info_type": { + Type: schema.TypeList, + Optional: true, + Description: `The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate + +For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' + +This annotation identifies the surrogate when inspecting content using the custom infoType ['SurrogateType'](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. + +In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Optional version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + "date_shift_config": { + Type: schema.TypeList, + Optional: true, + Description: `Shifts dates by random number of days, with option to be consistent for the same context. See https://cloud.google.com/dlp/docs/concepts-date-shifting to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "lower_bound_days": { + Type: schema.TypeInt, + Required: true, + Description: `For example, -5 means shift date to at most 5 days back in the past.`, + }, + "upper_bound_days": { + Type: schema.TypeInt, + Required: true, + Description: `Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction. + +For example, 3 means shift date to at most 3 days into the future.`, + }, + "context": { + Type: schema.TypeList, + Optional: true, + Description: `Points to the field that contains the context, for example, an entity id. +If set, must also set cryptoKey. If set, shift will be consistent for the given context.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `Causes the shift to be computed based on this key and the context. This results in the same shift for the same context and cryptoKey. If set, must also set context. Can only be applied to table items.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). Only one of this, 'transient' or 'unwrapped' must be specified. +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes. Only one of this, 'unwrapped' or 'kms_wrapped' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible. Only one of this, 'transient' or 'kms_wrapped' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + Sensitive: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "fixed_size_bucketing_config": { + Type: schema.TypeList, + Optional: true, + Description: `Buckets values based on fixed size ranges. The Bucketing transformation can provide all of this functionality, but requires more configuration. This message is provided as a convenience to the user for simple bucketing strategies. + +The transformed value will be a hyphenated string of {lower_bound}-{upper_bound}. For example, if lower_bound = 10 and upper_bound = 20, all values that are within this bucket will be replaced with "10-20". + +This can be used on data of type: double, long. + +If the bound Value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. + +See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_size": { + Type: schema.TypeFloat, + Required: true, + Description: `Size of each bucket (except for minimum and maximum buckets). +So if lower_bound = 10, upper_bound = 89, and bucketSize = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. +Precision up to 2 decimals works.`, + }, + "lower_bound": { + Type: schema.TypeList, + Required: true, + Description: `Lower bound value of buckets. +All values less than lower_bound are grouped together into a single bucket; for example if lower_bound = 10, then all values less than 10 are replaced with the value "-10". +The 'lower_bound' block must only contain one argument. See the 'fixed_size_bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + }, + }, + }, + "upper_bound": { + Type: schema.TypeList, + Required: true, + Description: `Upper bound value of buckets. +All values greater than upper_bound are grouped together into a single bucket; for example if upper_bound = 89, then all values greater than 89 are replaced with the value "89+". +The 'upper_bound' block must only contain one argument. See the 'fixed_size_bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + }, + }, + }, + }, + }, + }, + "redact_config": { + Type: schema.TypeList, + Optional: true, + Description: `Redact a given value. For example, if used with an InfoTypeTransformation transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the output would be 'My phone number is '.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "replace_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replace each input value with a given value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "new_value": { + Type: schema.TypeList, + Required: true, + Description: `Replace each input value with a given value. +The 'new_value' block must only contain one argument. For example when replacing the contents of a string-type field, only 'string_value' should be set.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 31), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 12), + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 9999), + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 24), + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 59), + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 999999999), + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateRFC3339Date, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + "replace_dictionary_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replace with a value randomly drawn (with replacement) from a dictionary.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "word_list": { + Type: schema.TypeList, + Required: true, + Description: `A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "words": { + Type: schema.TypeList, + Required: true, + Description: `Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "replace_with_info_type_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replace each matching finding with the name of the info type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "time_part_config": { + Type: schema.TypeList, + Optional: true, + Description: `For use with Date, Timestamp, and TimeOfDay, extract or preserve a portion of the value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "part_to_extract": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"YEAR", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "WEEK_OF_YEAR", "HOUR_OF_DAY"}), + Description: `The part of the time to keep. Possible values: ["YEAR", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "WEEK_OF_YEAR", "HOUR_OF_DAY"]`, + }, + }, + }, + }, + }, + }, + }, + "info_types": { + Type: schema.TypeList, + Optional: true, + Description: `InfoTypes to apply the transformation to. Leaving this empty will apply the transformation to apply to +all findings that correspond to infoTypes that were requested in InspectConfig.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "primitive_transformation": { + Type: schema.TypeList, + Optional: true, + Description: `Apply the transformation to the entire field. +The 'primitive_transformation' block must only contain one argument, corresponding to the type of transformation. +Only one of 'primitive_transformation' or 'info_type_transformations' must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucketing_config": { + Type: schema.TypeList, + Optional: true, + Description: `Generalization function that buckets values based on ranges. The ranges and replacement values are dynamically provided by the user for custom behavior, such as 1-30 -> LOW 31-65 -> MEDIUM 66-100 -> HIGH +This can be used on data of type: number, long, string, timestamp. +If the provided value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. +See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "buckets": { + Type: schema.TypeList, + Optional: true, + Description: `Set of buckets. Ranges must be non-overlapping. +Bucket is represented as a range, along with replacement values.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "replacement_value": { + Type: schema.TypeList, + Required: true, + Description: `Replacement value for this bucket. +The 'replacement_value' block must only contain one argument.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + "max": { + Type: schema.TypeList, + Optional: true, + Description: `Upper bound of the range, exclusive; type must match min. +The 'max' block must only contain one argument. See the 'bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + "min": { + Type: schema.TypeList, + Optional: true, + Description: `Lower bound of the range, inclusive. Type should be the same as max if used. +The 'min' block must only contain one argument. See the 'bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "character_mask_config": { + Type: schema.TypeList, + Optional: true, + Description: `Partially mask a string by replacing a given number of characters with a fixed character. Masking can start from the beginning or end of the string. This can be used on data of any type (numbers, longs, and so on) and when de-identifying structured data we'll attempt to preserve the original data's type. (This allows you to take a long like 123 and modify it to a string like **3).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "characters_to_ignore": { + Type: schema.TypeList, + Optional: true, + Description: `Characters to skip when doing de-identification of a value. These will be left alone and skipped.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "characters_to_skip": { + Type: schema.TypeString, + Optional: true, + Description: `Characters to not transform when masking.`, + }, + "common_characters_to_ignore": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE", ""}), + Description: `Common characters to not transform when masking. Useful to avoid removing punctuation. Possible values: ["NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE"]`, + }, + }, + }, + }, + "masking_character": { + Type: schema.TypeString, + Optional: true, + Description: `Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string +such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for +strings, and 0 for digits.`, + }, + "number_to_mask": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally. +If number_to_mask is negative, this denotes inverse masking. Cloud DLP masks all but a number of characters. For example, suppose you have the following values: +- 'masking_character' is * +- 'number_to_mask' is -4 +- 'reverse_order' is false +- 'characters_to_ignore' includes - +- Input string is 1234-5678-9012-3456 + +The resulting de-identified string is ****-****-****-3456. Cloud DLP masks all but the last four characters. If reverseOrder is true, all but the first four characters are masked as 1234-****-****-****.`, + }, + "reverse_order": { + Type: schema.TypeBool, + Optional: true, + Description: `Mask characters in reverse order. For example, if masking_character is 0, number_to_mask is 14, and reverse_order is 'false', then the +input string '1234-5678-9012-3456' is masked as '00000000000000-3456'.`, + }, + }, + }, + }, + "crypto_deterministic_config": { + Type: schema.TypeList, + Optional: true, + Description: `Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "context": { + Type: schema.TypeList, + Optional: true, + Description: `A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. + +If the context is not set, plaintext would be used as is for encryption. If the context is set but: + +1. there is no record present when transforming a given value or +2. the field is not present when transforming a given value, + +plaintext would be used as is for encryption. + +Note that case (1) is expected when an InfoTypeTransformation is applied to both structured and unstructured ContentItems.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `The key used by the encryption function. For deterministic encryption using AES-SIV, the provided key is internally expanded to 64 bytes prior to use.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + }, + }, + }, + }, + }, + }, + }, + "surrogate_info_type": { + Type: schema.TypeList, + Optional: true, + Description: `The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} + +For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' + +This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. + +Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. + +In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either + +* reverse a surrogate that does not correspond to an actual identifier +* be unable to parse the surrogate and result in an error + +Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Optional version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + "crypto_hash_config": { + Type: schema.TypeList, + Optional: true, + Description: `Pseudonymization method that generates surrogates via cryptographic hashing. Uses SHA-256. The key size must be either 32 or 64 bytes. +Outputs a base64 encoded representation of the hashed output (for example, L7k0BHmF1ha5U3NfGykjro4xWi1MPVQPjhMAZbSV9mM=). +Currently, only string and integer values can be hashed. +See https://cloud.google.com/dlp/docs/pseudonymization to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `The key used by the encryption function.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "crypto_replace_ffx_fpe_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the 'content.reidentify' API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. + +Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "common_alphabet": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC", ""}), + Description: `Common alphabets. Possible values: ["FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC"]`, + }, + "context": { + Type: schema.TypeList, + Optional: true, + Description: `The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. + +If the context is set but: + +1. there is no record present when transforming a given value or +2. the field is not present when transforming a given value, + +a default tweak will be used. + +Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's. Currently, the referenced field may be of value type integer or string. + +The tweak is constructed as a sequence of bytes in big endian byte order such that: + +* a 64 bit integer is encoded followed by a single byte of value 1 +* a string is encoded in UTF-8 format followed by a single byte of value 2`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `The key used by the encryption algorithm.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + }, + }, + }, + }, + }, + }, + }, + "custom_alphabet": { + Type: schema.TypeString, + Optional: true, + Description: `This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: + +''0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~'!@#$%^&*()_-+={[}]|:;"'<,>.?/''`, + }, + "radix": { + Type: schema.TypeInt, + Optional: true, + Description: `The native way to select the alphabet. Must be in the range \[2, 95\].`, + }, + "surrogate_info_type": { + Type: schema.TypeList, + Optional: true, + Description: `The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate + +For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' + +This annotation identifies the surrogate when inspecting content using the custom infoType ['SurrogateType'](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. + +In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Optional version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + "date_shift_config": { + Type: schema.TypeList, + Optional: true, + Description: `Shifts dates by random number of days, with option to be consistent for the same context. See https://cloud.google.com/dlp/docs/concepts-date-shifting to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "lower_bound_days": { + Type: schema.TypeInt, + Required: true, + Description: `For example, -5 means shift date to at most 5 days back in the past.`, + }, + "upper_bound_days": { + Type: schema.TypeInt, + Required: true, + Description: `Range of shift in days. Actual shift will be selected at random within this range (inclusive ends). Negative means shift to earlier in time. Must not be more than 365250 days (1000 years) each direction. + +For example, 3 means shift date to at most 3 days into the future.`, + }, + "context": { + Type: schema.TypeList, + Optional: true, + Description: `Points to the field that contains the context, for example, an entity id. +If set, must also set cryptoKey. If set, shift will be consistent for the given context.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "crypto_key": { + Type: schema.TypeList, + Optional: true, + Description: `Causes the shift to be computed based on this key and the context. This results in the same shift for the same context and cryptoKey. If set, must also set context. Can only be applied to table items.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_wrapped": { + Type: schema.TypeList, + Optional: true, + Description: `KMS wrapped key. +Include to use an existing data crypto key wrapped by KMS. The wrapped key must be a 128-, 192-, or 256-bit key. Authorization requires the following IAM permissions when sending a request to perform a crypto transformation using a KMS-wrapped crypto key: dlp.kms.encrypt +For more information, see [Creating a wrapped key](https://cloud.google.com/dlp/docs/create-wrapped-key). +Note: When you use Cloud KMS for cryptographic operations, [charges apply](https://cloud.google.com/kms/pricing).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, + }, + "wrapped_key": { + Type: schema.TypeString, + Required: true, + Description: `The wrapped data crypto key. + +A base64-encoded string.`, + }, + }, + }, + }, + "transient": { + Type: schema.TypeList, + Optional: true, + Description: `Transient crypto key. Use this to have a random data crypto key generated. It will be discarded after the request finishes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, + }, + }, + }, + }, + "unwrapped": { + Type: schema.TypeList, + Optional: true, + Description: `Unwrapped crypto key. Using raw keys is prone to security risks due to accidentally leaking the key. Choose another type of key if possible.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `A 128/192/256 bit key. + +A base64-encoded string.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "fixed_size_bucketing_config": { + Type: schema.TypeList, + Optional: true, + Description: `Buckets values based on fixed size ranges. The Bucketing transformation can provide all of this functionality, but requires more configuration. This message is provided as a convenience to the user for simple bucketing strategies. + +The transformed value will be a hyphenated string of {lower_bound}-{upper_bound}. For example, if lower_bound = 10 and upper_bound = 20, all values that are within this bucket will be replaced with "10-20". + +This can be used on data of type: double, long. + +If the bound Value type differs from the type of data being transformed, we will first attempt converting the type of the data to be transformed to match the type of the bound before comparing. + +See https://cloud.google.com/dlp/docs/concepts-bucketing to learn more.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_size": { + Type: schema.TypeFloat, + Required: true, + Description: `Size of each bucket (except for minimum and maximum buckets). +So if lower_bound = 10, upper_bound = 89, and bucketSize = 10, then the following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, 60-70, 70-80, 80-89, 89+. +Precision up to 2 decimals works.`, + }, + "lower_bound": { + Type: schema.TypeList, + Required: true, + Description: `Lower bound value of buckets. +All values less than lower_bound are grouped together into a single bucket; for example if lower_bound = 10, then all values less than 10 are replaced with the value "-10". +The 'lower_bound' block must only contain one argument. See the 'fixed_size_bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + "upper_bound": { + Type: schema.TypeList, + Required: true, + Description: `Upper bound value of buckets. +All values greater than upper_bound are grouped together into a single bucket; for example if upper_bound = 89, then all values greater than 89 are replaced with the value "89+". +The 'upper_bound' block must only contain one argument. See the 'fixed_size_bucketing_config' block description for more information about choosing a data type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + "redact_config": { + Type: schema.TypeList, + Optional: true, + Description: `Redact a given value. For example, if used with an InfoTypeTransformation transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the output would be 'My phone number is '.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "replace_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replace with a specified value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "new_value": { + Type: schema.TypeList, + Required: true, + Description: `Replace each input value with a given value. +The 'new_value' block must only contain one argument. For example when replacing the contents of a string-type field, only 'string_value' should be set.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 31), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 12), + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 9999), + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 24), + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 59), + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 999999999), + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateRFC3339Date, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + "replace_dictionary_config": { + Type: schema.TypeList, + Optional: true, + Description: `Replace with a value randomly drawn (with replacement) from a dictionary.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "word_list": { + Type: schema.TypeList, + Optional: true, + Description: `A list of words to select from for random replacement. The [limits](https://cloud.google.com/dlp/limits) page contains details about the size limits of dictionaries.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "words": { + Type: schema.TypeList, + Required: true, + Description: `Words or phrases defining the dictionary. The dictionary must contain at least one phrase and every phrase must contain at least 2 characters that are letters or digits.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "time_part_config": { + Type: schema.TypeList, + Optional: true, + Description: `For use with Date, Timestamp, and TimeOfDay, extract or preserve a portion of the value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "part_to_extract": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"YEAR", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "WEEK_OF_YEAR", "HOUR_OF_DAY", ""}), + Description: `The part of the time to keep. Possible values: ["YEAR", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "WEEK_OF_YEAR", "HOUR_OF_DAY"]`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + AtLeastOneOf: []string{"deidentify_config.0.record_transformations.0.field_transformations", "deidentify_config.0.record_transformations.0.record_suppressions"}, + }, + "record_suppressions": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration defining which records get suppressed entirely. Records that match any suppression rule are omitted from the output.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "condition": { + Type: schema.TypeList, + Optional: true, + Description: `A condition that when it evaluates to true will result in the record being evaluated to be suppressed from the transformed content.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expressions": { + Type: schema.TypeList, + Optional: true, + Description: `An expression, consisting of an operator and conditions.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conditions": { + Type: schema.TypeList, + Optional: true, + Description: `Conditions to apply to the expression.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conditions": { + Type: schema.TypeList, + Optional: true, + Description: `A collection of conditions.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeList, + Required: true, + Description: `Field within the record this condition is evaluated against.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"EQUAL_TO", "NOT_EQUAL_TO", "GREATER_THAN", "LESS_THAN", "GREATER_THAN_OR_EQUALS", "LESS_THAN_OR_EQUALS", "EXISTS"}), + Description: `Operator used to compare the field or infoType to the value. Possible values: ["EQUAL_TO", "NOT_EQUAL_TO", "GREATER_THAN", "LESS_THAN", "GREATER_THAN_OR_EQUALS", "LESS_THAN_OR_EQUALS", "EXISTS"]`, + }, + "value": { + Type: schema.TypeList, + Optional: true, + Description: `Value to compare against. [Mandatory, except for EXISTS tests.]`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boolean_value": { + Type: schema.TypeBool, + Optional: true, + Description: `A boolean value.`, + }, + "date_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a whole or partial calendar date.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.`, + }, + }, + }, + }, + "day_of_week_value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}), + Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "float_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A float value.`, + }, + "integer_value": { + Type: schema.TypeString, + Optional: true, + Description: `An integer value (int64 format)`, + }, + "string_value": { + Type: schema.TypeString, + Optional: true, + Description: `A string value.`, + }, + "time_value": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a time of day.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + Description: `Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "timestamp_value": { + Type: schema.TypeString, + Optional: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "logical_operator": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"AND", ""}), + Description: `The operator to apply to the result of conditions. Default and currently only supported value is AND. Default value: "AND" Possible values: ["AND"]`, + Default: "AND", + }, + }, + }, + }, + }, + }, + }, + }, + }, + AtLeastOneOf: []string{"deidentify_config.0.record_transformations.0.field_transformations", "deidentify_config.0.record_transformations.0.record_suppressions"}, + }, + }, + }, + ExactlyOneOf: []string{"deidentify_config.0.info_type_transformations", "deidentify_config.0.record_transformations", "deidentify_config.0.image_transformations"}, + }, + }, + }, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The parent of the template in any of the following formats: + +* 'projects/{{project}}' +* 'projects/{{project}}/locations/{{location}}' +* 'organizations/{{organization_id}}' +* 'organizations/{{organization_id}}/locations/{{location}}'`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the template.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User set display name of the template.`, + }, + "template_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The template id can contain uppercase and lowercase letters, numbers, and hyphens; +that is, it must match the regular expression: [a-zA-Z\d-_]+. The maximum length is +100 characters. Can be empty to allow the system to generate one.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The creation timestamp of an deidentifyTemplate. Set by the server.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the template. Set by the server.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last update timestamp of an deidentifyTemplate. Set by the server.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataLossPreventionDeidentifyTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataLossPreventionDeidentifyTemplateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataLossPreventionDeidentifyTemplateDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + deidentifyConfigProp, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(d.Get("deidentify_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deidentify_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(deidentifyConfigProp)) && (ok || !reflect.DeepEqual(v, deidentifyConfigProp)) { + obj["deidentifyConfig"] = deidentifyConfigProp + } + + obj, err = resourceDataLossPreventionDeidentifyTemplateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DeidentifyTemplate: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DeidentifyTemplate: %s", err) + } + if err := d.Set("name", flattenDataLossPreventionDeidentifyTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/deidentifyTemplates/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DeidentifyTemplate %q: %#v", d.Id(), res) + + return resourceDataLossPreventionDeidentifyTemplateRead(d, meta) +} + +func resourceDataLossPreventionDeidentifyTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataLossPreventionDeidentifyTemplate %q", d.Id())) + } + + res, err = resourceDataLossPreventionDeidentifyTemplateDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing DataLossPreventionDeidentifyTemplate because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenDataLossPreventionDeidentifyTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) + } + if err := d.Set("description", flattenDataLossPreventionDeidentifyTemplateDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) + } + if err := d.Set("display_name", flattenDataLossPreventionDeidentifyTemplateDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) + } + if err := d.Set("create_time", flattenDataLossPreventionDeidentifyTemplateCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) + } + if err := d.Set("update_time", flattenDataLossPreventionDeidentifyTemplateUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) + } + if err := d.Set("deidentify_config", flattenDataLossPreventionDeidentifyTemplateDeidentifyConfig(res["deidentifyConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading DeidentifyTemplate: %s", err) + } + + return nil +} + +func resourceDataLossPreventionDeidentifyTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataLossPreventionDeidentifyTemplateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataLossPreventionDeidentifyTemplateDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + deidentifyConfigProp, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(d.Get("deidentify_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deidentify_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deidentifyConfigProp)) { + obj["deidentifyConfig"] = deidentifyConfigProp + } + + obj, err = resourceDataLossPreventionDeidentifyTemplateUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DeidentifyTemplate %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("deidentify_config") { + updateMask = append(updateMask, "deidentifyConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DeidentifyTemplate %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DeidentifyTemplate %q: %#v", d.Id(), res) + } + + return resourceDataLossPreventionDeidentifyTemplateRead(d, meta) +} + +func resourceDataLossPreventionDeidentifyTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DeidentifyTemplate %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DeidentifyTemplate") + } + + log.Printf("[DEBUG] Finished deleting DeidentifyTemplate %q: %#v", d.Id(), res) + return nil +} + +func resourceDataLossPreventionDeidentifyTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // Custom import to handle parent possibilities + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + parts := strings.Split(d.Get("name").(string), "/") + if len(parts) == 6 { + if err := d.Set("name", parts[5]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(parts) == 4 { + if err := d.Set("name", parts[3]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/deidentifyTemplate/{{name}}", d.Get("name").(string)) + } + // Remove "/deidentifyTemplate/{{name}}" from the id + parts = parts[:len(parts)-2] + if err := d.Set("parent", strings.Join(parts, "/")); err != nil { + return nil, fmt.Errorf("Error setting parent: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/deidentifyTemplates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataLossPreventionDeidentifyTemplateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDataLossPreventionDeidentifyTemplateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["image_transformations"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformations(original["imageTransformations"], d, config) + transformed["info_type_transformations"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(original["infoTypeTransformations"], d, config) + transformed["record_transformations"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformations(original["recordTransformations"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transforms"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransforms(original["transforms"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransforms(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "redaction_color": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColor(original["redactionColor"], d, config), + "selected_info_types": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypes(original["selectedInfoTypes"], d, config), + "all_info_types": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsAllInfoTypes(original["allInfoTypes"], d, config), + "all_text": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsAllText(original["allText"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["red"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorRed(original["red"], d, config) + transformed["blue"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorBlue(original["blue"], d, config) + transformed["green"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorGreen(original["green"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorRed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorBlue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorGreen(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["info_types"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypes(original["infoTypes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsAllInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsAllText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transformations"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(original["transformations"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "info_types": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(original["infoTypes"], d, config), + "primitive_transformation": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(original["primitiveTransformation"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["replace_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(original["replaceConfig"], d, config) + transformed["replace_with_info_type_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(original["replaceWithInfoTypeConfig"], d, config) + transformed["character_mask_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(original["characterMaskConfig"], d, config) + transformed["crypto_deterministic_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["cryptoDeterministicConfig"], d, config) + transformed["crypto_replace_ffx_fpe_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["cryptoReplaceFfxFpeConfig"], d, config) + transformed["replace_dictionary_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfig(original["replaceDictionaryConfig"], d, config) + transformed["date_shift_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig(original["dateShiftConfig"], d, config) + transformed["fixed_size_bucketing_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig(original["fixedSizeBucketingConfig"], d, config) + transformed["bucketing_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig(original["bucketingConfig"], d, config) + transformed["time_part_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig(original["timePartConfig"], d, config) + transformed["redact_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig(original["redactConfig"], d, config) + transformed["crypto_hash_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig(original["cryptoHashConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["new_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(original["newValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v != nil +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["masking_character"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["maskingCharacter"], d, config) + transformed["number_to_mask"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["numberToMask"], d, config) + transformed["reverse_order"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverseOrder"], d, config) + transformed["characters_to_ignore"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["charactersToIgnore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "characters_to_skip": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["charactersToSkip"], d, config), + "common_characters_to_ignore": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["commonCharactersToIgnore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["cryptoKey"], d, config) + transformed["surrogate_info_type"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogateInfoType"], d, config) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["cryptoKey"], d, config) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) + transformed["surrogate_info_type"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogateInfoType"], d, config) + transformed["common_alphabet"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["commonAlphabet"], d, config) + transformed["custom_alphabet"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["customAlphabet"], d, config) + transformed["radix"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["word_list"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(original["wordList"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["words"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(original["words"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext(original["context"], d, config) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(original["cryptoKey"], d, config) + transformed["upper_bound_days"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(original["upperBoundDays"], d, config) + transformed["lower_bound_days"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(original["lowerBoundDays"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["lower_bound"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(original["lowerBound"], d, config) + transformed["upper_bound"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(original["upperBound"], d, config) + transformed["bucket_size"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(original["bucketSize"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(original["floatValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(original["floatValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["buckets"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets(original["buckets"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "min": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin(original["min"], d, config), + "max": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax(original["max"], d, config), + "replacement_value": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(original["replacementValue"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(original["stringValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(original["stringValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(original["stringValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["part_to_extract"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtract(original["partToExtract"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtract(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(original["cryptoKey"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["field_transformations"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations(original["fieldTransformations"], d, config) + transformed["record_suppressions"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions(original["recordSuppressions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "fields": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields(original["fields"], d, config), + "condition": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition(original["condition"], d, config), + "primitive_transformation": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation(original["primitiveTransformation"], d, config), + "info_type_transformations": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformations(original["infoTypeTransformations"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFieldsName(original["name"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFieldsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expressions"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions(original["expressions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["logical_operator"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperator(original["logicalOperator"], d, config) + transformed["conditions"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions(original["conditions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["conditions"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions(original["conditions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "field": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField(original["field"], d, config), + "operator": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperator(original["operator"], d, config), + "value": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue(original["value"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsFieldName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsFieldName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["replace_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig(original["replaceConfig"], d, config) + transformed["redact_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig(original["redactConfig"], d, config) + transformed["character_mask_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig(original["characterMaskConfig"], d, config) + transformed["crypto_replace_ffx_fpe_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["cryptoReplaceFfxFpeConfig"], d, config) + transformed["fixed_size_bucketing_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig(original["fixedSizeBucketingConfig"], d, config) + transformed["bucketing_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig(original["bucketingConfig"], d, config) + transformed["time_part_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig(original["timePartConfig"], d, config) + transformed["crypto_hash_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig(original["cryptoHashConfig"], d, config) + transformed["date_shift_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig(original["dateShiftConfig"], d, config) + transformed["crypto_deterministic_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["cryptoDeterministicConfig"], d, config) + transformed["replace_dictionary_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfig(original["replaceDictionaryConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["new_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue(original["newValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["masking_character"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["maskingCharacter"], d, config) + transformed["number_to_mask"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["numberToMask"], d, config) + transformed["reverse_order"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverseOrder"], d, config) + transformed["characters_to_ignore"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["charactersToIgnore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "characters_to_skip": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["charactersToSkip"], d, config), + "common_characters_to_ignore": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["commonCharactersToIgnore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["cryptoKey"], d, config) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) + transformed["surrogate_info_type"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogateInfoType"], d, config) + transformed["common_alphabet"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["commonAlphabet"], d, config) + transformed["custom_alphabet"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["customAlphabet"], d, config) + transformed["radix"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["lower_bound"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(original["lowerBound"], d, config) + transformed["upper_bound"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(original["upperBound"], d, config) + transformed["bucket_size"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(original["bucketSize"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["buckets"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets(original["buckets"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "min": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin(original["min"], d, config), + "max": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax(original["max"], d, config), + "replacement_value": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(original["replacementValue"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["part_to_extract"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtract(original["partToExtract"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtract(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(original["cryptoKey"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["upper_bound_days"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(original["upperBoundDays"], d, config) + transformed["lower_bound_days"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(original["lowerBoundDays"], d, config) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext(original["context"], d, config) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(original["cryptoKey"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["cryptoKey"], d, config) + transformed["surrogate_info_type"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogateInfoType"], d, config) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["word_list"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(original["wordList"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["words"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(original["words"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transformations"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformations(original["transformations"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "info_types": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypes(original["infoTypes"], d, config), + "primitive_transformation": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformation(original["primitiveTransformation"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["replace_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(original["replaceConfig"], d, config) + transformed["redact_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig(original["redactConfig"], d, config) + transformed["character_mask_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(original["characterMaskConfig"], d, config) + transformed["crypto_replace_ffx_fpe_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["cryptoReplaceFfxFpeConfig"], d, config) + transformed["fixed_size_bucketing_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig(original["fixedSizeBucketingConfig"], d, config) + transformed["bucketing_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig(original["bucketingConfig"], d, config) + transformed["replace_with_info_type_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(original["replaceWithInfoTypeConfig"], d, config) + transformed["time_part_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig(original["timePartConfig"], d, config) + transformed["crypto_hash_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig(original["cryptoHashConfig"], d, config) + transformed["date_shift_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig(original["dateShiftConfig"], d, config) + transformed["crypto_deterministic_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["cryptoDeterministicConfig"], d, config) + transformed["replace_dictionary_config"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfig(original["replaceDictionaryConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["new_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(original["newValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["masking_character"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["maskingCharacter"], d, config) + transformed["number_to_mask"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["numberToMask"], d, config) + transformed["reverse_order"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverseOrder"], d, config) + transformed["characters_to_ignore"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["charactersToIgnore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "characters_to_skip": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["charactersToSkip"], d, config), + "common_characters_to_ignore": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["commonCharactersToIgnore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["cryptoKey"], d, config) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) + transformed["surrogate_info_type"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogateInfoType"], d, config) + transformed["common_alphabet"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["commonAlphabet"], d, config) + transformed["custom_alphabet"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["customAlphabet"], d, config) + transformed["radix"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["lower_bound"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(original["lowerBound"], d, config) + transformed["upper_bound"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(original["upperBound"], d, config) + transformed["bucket_size"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(original["bucketSize"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(original["floatValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(original["floatValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["buckets"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets(original["buckets"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "min": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin(original["min"], d, config), + "max": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax(original["max"], d, config), + "replacement_value": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(original["replacementValue"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(original["stringValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(original["stringValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(original["stringValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["part_to_extract"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtract(original["partToExtract"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtract(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(original["cryptoKey"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["upper_bound_days"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(original["upperBoundDays"], d, config) + transformed["lower_bound_days"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(original["lowerBoundDays"], d, config) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext(original["context"], d, config) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(original["cryptoKey"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["crypto_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["cryptoKey"], d, config) + transformed["surrogate_info_type"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogateInfoType"], d, config) + transformed["context"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["transient"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) + transformed["unwrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + transformed["kms_wrapped"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["wrapped_key"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) + transformed["crypto_key_name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["word_list"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(original["wordList"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["words"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(original["words"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "condition": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition(original["condition"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expressions"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions(original["expressions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["logical_operator"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperator(original["logicalOperator"], d, config) + transformed["conditions"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions(original["conditions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["conditions"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions(original["conditions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "field": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField(original["field"], d, config), + "operator": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperator(original["operator"], d, config), + "value": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue(original["value"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsFieldName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsFieldName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["integer_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueIntegerValue(original["integerValue"], d, config) + transformed["float_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueFloatValue(original["floatValue"], d, config) + transformed["string_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueStringValue(original["stringValue"], d, config) + transformed["boolean_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueBooleanValue(original["booleanValue"], d, config) + transformed["timestamp_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimestampValue(original["timestampValue"], d, config) + transformed["time_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue(original["timeValue"], d, config) + transformed["date_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue(original["dateValue"], d, config) + transformed["day_of_week_value"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValue(original["dayOfWeekValue"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueIntegerValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueFloatValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueStringValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueBooleanValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimestampValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueHours(original["hours"], d, config) + transformed["minutes"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueYear(original["year"], d, config) + transformed["month"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueMonth(original["month"], d, config) + transformed["day"] = + flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataLossPreventionDeidentifyTemplateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedImageTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformations(original["image_transformations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageTransformations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageTransformations"] = transformedImageTransformations + } + + transformedInfoTypeTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(original["info_type_transformations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypeTransformations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypeTransformations"] = transformedInfoTypeTransformations + } + + transformedRecordTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformations(original["record_transformations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecordTransformations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recordTransformations"] = transformedRecordTransformations + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransforms, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransforms(original["transforms"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransforms); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transforms"] = transformedTransforms + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransforms(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRedactionColor, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColor(original["redaction_color"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRedactionColor); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["redactionColor"] = transformedRedactionColor + } + + transformedSelectedInfoTypes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypes(original["selected_info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSelectedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["selectedInfoTypes"] = transformedSelectedInfoTypes + } + + transformedAllInfoTypes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsAllInfoTypes(original["all_info_types"], d, config) + if err != nil { + return nil, err + } else { + transformed["allInfoTypes"] = transformedAllInfoTypes + } + + transformedAllText, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsAllText(original["all_text"], d, config) + if err != nil { + return nil, err + } else { + transformed["allText"] = transformedAllText + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRed, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorRed(original["red"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRed); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["red"] = transformedRed + } + + transformedBlue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorBlue(original["blue"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBlue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["blue"] = transformedBlue + } + + transformedGreen, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorGreen(original["green"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGreen); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["green"] = transformedGreen + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorRed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorBlue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsRedactionColorGreen(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoTypes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsSelectedInfoTypesInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsAllInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigImageTransformationsTransformsAllText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(original["transformations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransformations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transformations"] = transformedTransformations + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoTypes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + transformedPrimitiveTransformation, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(original["primitive_transformation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimitiveTransformation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primitiveTransformation"] = transformedPrimitiveTransformation + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedReplaceConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(original["replace_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplaceConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replaceConfig"] = transformedReplaceConfig + } + + transformedReplaceWithInfoTypeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(original["replace_with_info_type_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplaceWithInfoTypeConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replaceWithInfoTypeConfig"] = transformedReplaceWithInfoTypeConfig + } + + transformedCharacterMaskConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(original["character_mask_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharacterMaskConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["characterMaskConfig"] = transformedCharacterMaskConfig + } + + transformedCryptoDeterministicConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["crypto_deterministic_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoDeterministicConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoDeterministicConfig"] = transformedCryptoDeterministicConfig + } + + transformedCryptoReplaceFfxFpeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["crypto_replace_ffx_fpe_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoReplaceFfxFpeConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoReplaceFfxFpeConfig"] = transformedCryptoReplaceFfxFpeConfig + } + + transformedReplaceDictionaryConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfig(original["replace_dictionary_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplaceDictionaryConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replaceDictionaryConfig"] = transformedReplaceDictionaryConfig + } + + transformedDateShiftConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig(original["date_shift_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateShiftConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateShiftConfig"] = transformedDateShiftConfig + } + + transformedFixedSizeBucketingConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig(original["fixed_size_bucketing_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFixedSizeBucketingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fixedSizeBucketingConfig"] = transformedFixedSizeBucketingConfig + } + + transformedBucketingConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig(original["bucketing_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketingConfig"] = transformedBucketingConfig + } + + transformedTimePartConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig(original["time_part_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimePartConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timePartConfig"] = transformedTimePartConfig + } + + transformedRedactConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig(original["redact_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["redactConfig"] = transformedRedactConfig + } + + transformedCryptoHashConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig(original["crypto_hash_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoHashConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoHashConfig"] = transformedCryptoHashConfig + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNewValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(original["new_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNewValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["newValue"] = transformedNewValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil || !v.(bool) { + return nil, nil + } + + return struct{}{}, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaskingCharacter, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["masking_character"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaskingCharacter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maskingCharacter"] = transformedMaskingCharacter + } + + transformedNumberToMask, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["number_to_mask"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumberToMask); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numberToMask"] = transformedNumberToMask + } + + transformedReverseOrder, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverse_order"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReverseOrder); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["reverseOrder"] = transformedReverseOrder + } + + transformedCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["characters_to_ignore"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharactersToIgnore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["charactersToIgnore"] = transformedCharactersToIgnore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCharactersToSkip, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["characters_to_skip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharactersToSkip); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["charactersToSkip"] = transformedCharactersToSkip + } + + transformedCommonCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["common_characters_to_ignore"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommonCharactersToIgnore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commonCharactersToIgnore"] = transformedCommonCharactersToIgnore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogate_info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["surrogateInfoType"] = transformedSurrogateInfoType + } + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogate_info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["surrogateInfoType"] = transformedSurrogateInfoType + } + + transformedCommonAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["common_alphabet"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommonAlphabet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commonAlphabet"] = transformedCommonAlphabet + } + + transformedCustomAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["custom_alphabet"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomAlphabet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customAlphabet"] = transformedCustomAlphabet + } + + transformedRadix, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRadix); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["radix"] = transformedRadix + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWordList, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(original["word_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wordList"] = transformedWordList + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWords, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(original["words"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["words"] = transformedWords + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + transformedUpperBoundDays, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(original["upper_bound_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpperBoundDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["upperBoundDays"] = transformedUpperBoundDays + } + + transformedLowerBoundDays, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(original["lower_bound_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLowerBoundDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lowerBoundDays"] = transformedLowerBoundDays + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLowerBound, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(original["lower_bound"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLowerBound); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lowerBound"] = transformedLowerBound + } + + transformedUpperBound, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(original["upper_bound"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpperBound); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["upperBound"] = transformedUpperBound + } + + transformedBucketSize, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(original["bucket_size"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketSize"] = transformedBucketSize + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBuckets, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets(original["buckets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBuckets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["buckets"] = transformedBuckets + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMin, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin(original["min"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["min"] = transformedMin + } + + transformedMax, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax(original["max"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["max"] = transformedMax + } + + transformedReplacementValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(original["replacement_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplacementValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replacementValue"] = transformedReplacementValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPartToExtract, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtract(original["part_to_extract"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPartToExtract); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["partToExtract"] = transformedPartToExtract + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtract(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFieldTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations(original["field_transformations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFieldTransformations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fieldTransformations"] = transformedFieldTransformations + } + + transformedRecordSuppressions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions(original["record_suppressions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecordSuppressions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recordSuppressions"] = transformedRecordSuppressions + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFields, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields(original["fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fields"] = transformedFields + } + + transformedCondition, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition(original["condition"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCondition); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["condition"] = transformedCondition + } + + transformedPrimitiveTransformation, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation(original["primitive_transformation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimitiveTransformation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primitiveTransformation"] = transformedPrimitiveTransformation + } + + transformedInfoTypeTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformations(original["info_type_transformations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypeTransformations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypeTransformations"] = transformedInfoTypeTransformations + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFieldsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsFieldsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpressions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions(original["expressions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpressions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expressions"] = transformedExpressions + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLogicalOperator, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperator(original["logical_operator"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLogicalOperator); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["logicalOperator"] = transformedLogicalOperator + } + + transformedConditions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions(original["conditions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditions"] = transformedConditions + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsLogicalOperator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConditions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions(original["conditions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditions"] = transformedConditions + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedField, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField(original["field"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedField); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["field"] = transformedField + } + + transformedOperator, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperator(original["operator"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperator); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["operator"] = transformedOperator + } + + transformedValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsField(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsFieldName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsFieldName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsOperator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsConditionExpressionsConditionsConditionsValueDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedReplaceConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig(original["replace_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplaceConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replaceConfig"] = transformedReplaceConfig + } + + transformedRedactConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig(original["redact_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["redactConfig"] = transformedRedactConfig + } + + transformedCharacterMaskConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig(original["character_mask_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharacterMaskConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["characterMaskConfig"] = transformedCharacterMaskConfig + } + + transformedCryptoReplaceFfxFpeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["crypto_replace_ffx_fpe_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoReplaceFfxFpeConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoReplaceFfxFpeConfig"] = transformedCryptoReplaceFfxFpeConfig + } + + transformedFixedSizeBucketingConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig(original["fixed_size_bucketing_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFixedSizeBucketingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fixedSizeBucketingConfig"] = transformedFixedSizeBucketingConfig + } + + transformedBucketingConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig(original["bucketing_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketingConfig"] = transformedBucketingConfig + } + + transformedTimePartConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig(original["time_part_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimePartConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timePartConfig"] = transformedTimePartConfig + } + + transformedCryptoHashConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig(original["crypto_hash_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoHashConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoHashConfig"] = transformedCryptoHashConfig + } + + transformedDateShiftConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig(original["date_shift_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateShiftConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateShiftConfig"] = transformedDateShiftConfig + } + + transformedCryptoDeterministicConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["crypto_deterministic_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoDeterministicConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoDeterministicConfig"] = transformedCryptoDeterministicConfig + } + + transformedReplaceDictionaryConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfig(original["replace_dictionary_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplaceDictionaryConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replaceDictionaryConfig"] = transformedReplaceDictionaryConfig + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNewValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue(original["new_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNewValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["newValue"] = transformedNewValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationRedactConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaskingCharacter, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["masking_character"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaskingCharacter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maskingCharacter"] = transformedMaskingCharacter + } + + transformedNumberToMask, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["number_to_mask"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumberToMask); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numberToMask"] = transformedNumberToMask + } + + transformedReverseOrder, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverse_order"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReverseOrder); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["reverseOrder"] = transformedReverseOrder + } + + transformedCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["characters_to_ignore"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharactersToIgnore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["charactersToIgnore"] = transformedCharactersToIgnore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCharactersToSkip, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["characters_to_skip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharactersToSkip); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["charactersToSkip"] = transformedCharactersToSkip + } + + transformedCommonCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["common_characters_to_ignore"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommonCharactersToIgnore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commonCharactersToIgnore"] = transformedCommonCharactersToIgnore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogate_info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["surrogateInfoType"] = transformedSurrogateInfoType + } + + transformedCommonAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["common_alphabet"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommonAlphabet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commonAlphabet"] = transformedCommonAlphabet + } + + transformedCustomAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["custom_alphabet"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomAlphabet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customAlphabet"] = transformedCustomAlphabet + } + + transformedRadix, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRadix); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["radix"] = transformedRadix + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLowerBound, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(original["lower_bound"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLowerBound); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lowerBound"] = transformedLowerBound + } + + transformedUpperBound, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(original["upper_bound"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpperBound); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["upperBound"] = transformedUpperBound + } + + transformedBucketSize, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(original["bucket_size"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketSize"] = transformedBucketSize + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBuckets, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets(original["buckets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBuckets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["buckets"] = transformedBuckets + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBuckets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMin, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin(original["min"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["min"] = transformedMin + } + + transformedMax, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax(original["max"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["max"] = transformedMax + } + + transformedReplacementValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(original["replacement_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplacementValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replacementValue"] = transformedReplacementValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMax(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPartToExtract, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtract(original["part_to_extract"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPartToExtract); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["partToExtract"] = transformedPartToExtract + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationTimePartConfigPartToExtract(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUpperBoundDays, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(original["upper_bound_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpperBoundDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["upperBoundDays"] = transformedUpperBoundDays + } + + transformedLowerBoundDays, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(original["lower_bound_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLowerBoundDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lowerBoundDays"] = transformedLowerBoundDays + } + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogate_info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["surrogateInfoType"] = transformedSurrogateInfoType + } + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWordList, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(original["word_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wordList"] = transformedWordList + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWords, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(original["words"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["words"] = transformedWords + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformations(original["transformations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransformations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transformations"] = transformedTransformations + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoTypes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + transformedPrimitiveTransformation, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformation(original["primitive_transformation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimitiveTransformation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["primitiveTransformation"] = transformedPrimitiveTransformation + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedReplaceConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(original["replace_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplaceConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replaceConfig"] = transformedReplaceConfig + } + + transformedRedactConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig(original["redact_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["redactConfig"] = transformedRedactConfig + } + + transformedCharacterMaskConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(original["character_mask_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharacterMaskConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["characterMaskConfig"] = transformedCharacterMaskConfig + } + + transformedCryptoReplaceFfxFpeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["crypto_replace_ffx_fpe_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoReplaceFfxFpeConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoReplaceFfxFpeConfig"] = transformedCryptoReplaceFfxFpeConfig + } + + transformedFixedSizeBucketingConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig(original["fixed_size_bucketing_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFixedSizeBucketingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fixedSizeBucketingConfig"] = transformedFixedSizeBucketingConfig + } + + transformedBucketingConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig(original["bucketing_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketingConfig"] = transformedBucketingConfig + } + + transformedReplaceWithInfoTypeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(original["replace_with_info_type_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["replaceWithInfoTypeConfig"] = transformedReplaceWithInfoTypeConfig + } + + transformedTimePartConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig(original["time_part_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimePartConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timePartConfig"] = transformedTimePartConfig + } + + transformedCryptoHashConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig(original["crypto_hash_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoHashConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoHashConfig"] = transformedCryptoHashConfig + } + + transformedDateShiftConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig(original["date_shift_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateShiftConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateShiftConfig"] = transformedDateShiftConfig + } + + transformedCryptoDeterministicConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["crypto_deterministic_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoDeterministicConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoDeterministicConfig"] = transformedCryptoDeterministicConfig + } + + transformedReplaceDictionaryConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfig(original["replace_dictionary_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplaceDictionaryConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replaceDictionaryConfig"] = transformedReplaceDictionaryConfig + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNewValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(original["new_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNewValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["newValue"] = transformedNewValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationRedactConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaskingCharacter, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["masking_character"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaskingCharacter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maskingCharacter"] = transformedMaskingCharacter + } + + transformedNumberToMask, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["number_to_mask"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumberToMask); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numberToMask"] = transformedNumberToMask + } + + transformedReverseOrder, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverse_order"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReverseOrder); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["reverseOrder"] = transformedReverseOrder + } + + transformedCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["characters_to_ignore"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharactersToIgnore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["charactersToIgnore"] = transformedCharactersToIgnore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCharactersToSkip, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(original["characters_to_skip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCharactersToSkip); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["charactersToSkip"] = transformedCharactersToSkip + } + + transformedCommonCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["common_characters_to_ignore"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommonCharactersToIgnore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commonCharactersToIgnore"] = transformedCommonCharactersToIgnore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharactersToSkip(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogate_info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["surrogateInfoType"] = transformedSurrogateInfoType + } + + transformedCommonAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["common_alphabet"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommonAlphabet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["commonAlphabet"] = transformedCommonAlphabet + } + + transformedCustomAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["custom_alphabet"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomAlphabet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customAlphabet"] = transformedCustomAlphabet + } + + transformedRadix, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRadix); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["radix"] = transformedRadix + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLowerBound, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(original["lower_bound"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLowerBound); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lowerBound"] = transformedLowerBound + } + + transformedUpperBound, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(original["upper_bound"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpperBound); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["upperBound"] = transformedUpperBound + } + + transformedBucketSize, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(original["bucket_size"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketSize"] = transformedBucketSize + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBound(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigLowerBoundFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBound(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigUpperBoundFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationFixedSizeBucketingConfigBucketSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBuckets, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets(original["buckets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBuckets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["buckets"] = transformedBuckets + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBuckets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMin, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin(original["min"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["min"] = transformedMin + } + + transformedMax, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax(original["max"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["max"] = transformedMax + } + + transformedReplacementValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(original["replacement_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplacementValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replacementValue"] = transformedReplacementValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMinDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMax(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsMaxDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationBucketingConfigBucketsReplacementValueDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPartToExtract, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtract(original["part_to_extract"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPartToExtract); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["partToExtract"] = transformedPartToExtract + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationTimePartConfigPartToExtract(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoHashConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUpperBoundDays, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(original["upper_bound_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpperBoundDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["upperBoundDays"] = transformedUpperBoundDays + } + + transformedLowerBoundDays, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(original["lower_bound_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLowerBoundDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["lowerBoundDays"] = transformedLowerBoundDays + } + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigUpperBoundDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigLowerBoundDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationDateShiftConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["crypto_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKey"] = transformedCryptoKey + } + + transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogate_info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["surrogateInfoType"] = transformedSurrogateInfoType + } + + transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContext); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["context"] = transformedContext + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransient); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transient"] = transformedTransient + } + + transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnwrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unwrapped"] = transformedUnwrapped + } + + transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsWrapped"] = transformedKmsWrapped + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWrappedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wrappedKey"] = transformedWrappedKey + } + + transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cryptoKeyName"] = transformedCryptoKeyName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWordList, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(original["word_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wordList"] = transformedWordList + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWords, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(original["words"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["words"] = transformedWords + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsFieldTransformationsInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceDictionaryConfigWordListWords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCondition, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition(original["condition"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCondition); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["condition"] = transformedCondition + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpressions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions(original["expressions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpressions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expressions"] = transformedExpressions + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLogicalOperator, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperator(original["logical_operator"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLogicalOperator); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["logicalOperator"] = transformedLogicalOperator + } + + transformedConditions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions(original["conditions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditions"] = transformedConditions + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsLogicalOperator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConditions, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions(original["conditions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditions"] = transformedConditions + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedField, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField(original["field"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedField); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["field"] = transformedField + } + + transformedOperator, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperator(original["operator"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOperator); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["operator"] = transformedOperator + } + + transformedValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsField(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsFieldName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsFieldName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsOperator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueIntegerValue(original["integer_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIntegerValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["integerValue"] = transformedIntegerValue + } + + transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueFloatValue(original["float_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFloatValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["floatValue"] = transformedFloatValue + } + + transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueStringValue(original["string_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStringValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["stringValue"] = transformedStringValue + } + + transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueBooleanValue(original["boolean_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBooleanValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["booleanValue"] = transformedBooleanValue + } + + transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimestampValue(original["timestamp_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampValue"] = transformedTimestampValue + } + + transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue(original["time_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timeValue"] = transformedTimeValue + } + + transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue(original["date_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDateValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dateValue"] = transformedDateValue + } + + transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValue(original["day_of_week_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeekValue"] = transformedDayOfWeekValue + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueIntegerValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueFloatValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueStringValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueBooleanValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimestampValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueTimeValueNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDateValueDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigRecordTransformationsRecordSuppressionsConditionExpressionsConditionsConditionsValueDayOfWeekValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceDataLossPreventionDeidentifyTemplateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["deidentifyTemplate"] = obj + templateIdProp, ok := d.GetOk("template_id") + if ok && templateIdProp != nil { + newObj["templateId"] = templateIdProp + } + return newObj, nil +} + +func resourceDataLossPreventionDeidentifyTemplateUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["deidentifyTemplate"] = obj + return newObj, nil +} + +func resourceDataLossPreventionDeidentifyTemplateDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + if err := d.Set("template_id", flattenDataLossPreventionDeidentifyTemplateName(res["name"], d, config)); err != nil { + return nil, fmt.Errorf("Error reading DeidentifyTemplate: %s", err) + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_inspect_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_inspect_template.go new file mode 100644 index 0000000000..b242c3d36e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_inspect_template.go @@ -0,0 +1,3133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datalossprevention + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDataLossPreventionInspectTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceDataLossPreventionInspectTemplateCreate, + Read: resourceDataLossPreventionInspectTemplateRead, + Update: resourceDataLossPreventionInspectTemplateUpdate, + Delete: resourceDataLossPreventionInspectTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataLossPreventionInspectTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The parent of the inspect template in any of the following formats: + +* 'projects/{{project}}' +* 'projects/{{project}}/locations/{{location}}' +* 'organizations/{{organization_id}}' +* 'organizations/{{organization_id}}/locations/{{location}}'`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the inspect template.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User set display name of the inspect template.`, + }, + "inspect_config": { + Type: schema.TypeList, + Optional: true, + Description: `The core content of the template.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content_options": { + Type: schema.TypeList, + Optional: true, + Description: `List of options defining data content to scan. If empty, text, images, and other content will be included. Possible values: ["CONTENT_TEXT", "CONTENT_IMAGE"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"CONTENT_TEXT", "CONTENT_IMAGE"}), + }, + }, + "custom_info_types": { + Type: schema.TypeList, + Optional: true, + Description: `Custom info types to be used. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "info_type": { + Type: schema.TypeList, + Required: true, + Description: `CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing +infoTypes and that infoType is specified in 'info_types' field. Specifying the latter adds findings to the +one detected by the system. If built-in info type is not specified in 'info_types' list then the name is +treated as a custom info type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names +listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version name for this InfoType.`, + }, + }, + }, + }, + "dictionary": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Dictionary which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_path": { + Type: schema.TypeList, + Optional: true, + Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, + }, + }, + }, + }, + "word_list": { + Type: schema.TypeList, + Optional: true, + Description: `List of words or phrases to search for.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "words": { + Type: schema.TypeList, + Required: true, + Description: `Words or phrases defining the dictionary. The dictionary must contain at least one +phrase and every phrase must contain at least 2 characters that are letters or digits.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "exclusion_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EXCLUSION_TYPE_EXCLUDE", ""}), + Description: `If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching. Possible values: ["EXCLUSION_TYPE_EXCLUDE"]`, + }, + "likelihood": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), + Description: `Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria +specified by the rule. Default value: "VERY_LIKELY" Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, + Default: "VERY_LIKELY", + }, + "regex": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Regular expression which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pattern": { + Type: schema.TypeString, + Required: true, + Description: `Pattern defining the regular expression. +Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "stored_type": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A reference to a StoredInfoType to use with scanning.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Resource name of the requested StoredInfoType, for example 'organizations/433245324/storedInfoTypes/432452342' +or 'projects/project-id/storedInfoTypes/432452342'.`, + }, + }, + }, + }, + "surrogate_type": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Message for detecting output from deidentification transformations that support reversing.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + }, + "exclude_info_types": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, excludes type information of the findings.`, + }, + "include_quote": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, a contextual quote from the data that triggered a finding is included in the response.`, + }, + "info_types": { + Type: schema.TypeList, + Optional: true, + Description: `Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list +or listed at https://cloud.google.com/dlp/docs/infotypes-reference. + +When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. +By default this may be all types, but may change over time as detectors are updated.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed +at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version of the information type to use. By default, the version is set to stable`, + }, + }, + }, + }, + "limits": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration to control the number of findings returned.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_findings_per_item": { + Type: schema.TypeInt, + Required: true, + Description: `Max number of findings that will be returned for each item scanned. The maximum returned is 2000.`, + }, + "max_findings_per_request": { + Type: schema.TypeInt, + Required: true, + Description: `Max number of findings that will be returned per request/job. The maximum returned is 2000.`, + }, + "max_findings_per_info_type": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration of findings limit given for specified infoTypes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "info_type": { + Type: schema.TypeList, + Required: true, + Description: `Type of information the findings limit applies to. Only one limit per infoType should be provided. If InfoTypeLimit does +not have an infoType, the DLP API applies the limit against all infoTypes that are found but not +specified in another InfoTypeLimit.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed +at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version name for this InfoType.`, + }, + }, + }, + }, + "max_findings": { + Type: schema.TypeInt, + Required: true, + Description: `Max findings limit for the given infoType.`, + }, + }, + }, + }, + }, + }, + }, + "min_likelihood": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), + Description: `Only returns findings equal or above this threshold. See https://cloud.google.com/dlp/docs/likelihood for more info Default value: "POSSIBLE" Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, + Default: "POSSIBLE", + }, + "rule_set": { + Type: schema.TypeList, + Optional: true, + Description: `Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, +other rules are executed in the order they are specified for each info type.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "info_types": { + Type: schema.TypeList, + Required: true, + Description: `List of infoTypes this rule set is applied to.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed +at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version name for this InfoType.`, + }, + }, + }, + }, + "rules": { + Type: schema.TypeList, + Required: true, + Description: `Set of rules to be applied to infoTypes. The rules are applied in order.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "exclusion_rule": { + Type: schema.TypeList, + Optional: true, + Description: `The rule that specifies conditions when findings of infoTypes specified in InspectionRuleSet are removed from results.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "matching_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"MATCHING_TYPE_FULL_MATCH", "MATCHING_TYPE_PARTIAL_MATCH", "MATCHING_TYPE_INVERSE_MATCH"}), + Description: `How the rule is applied. See the documentation for more information: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#MatchingType Possible values: ["MATCHING_TYPE_FULL_MATCH", "MATCHING_TYPE_PARTIAL_MATCH", "MATCHING_TYPE_INVERSE_MATCH"]`, + }, + "dictionary": { + Type: schema.TypeList, + Optional: true, + Description: `Dictionary which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_path": { + Type: schema.TypeList, + Optional: true, + Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, + }, + }, + }, + }, + "word_list": { + Type: schema.TypeList, + Optional: true, + Description: `List of words or phrases to search for.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "words": { + Type: schema.TypeList, + Required: true, + Description: `Words or phrases defining the dictionary. The dictionary must contain at least one +phrase and every phrase must contain at least 2 characters that are letters or digits.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "exclude_by_hotword": { + Type: schema.TypeList, + Optional: true, + Description: `Drop if the hotword rule is contained in the proximate context. +For tabular data, the context includes the column name.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hotword_regex": { + Type: schema.TypeList, + Required: true, + Description: `Regular expression pattern defining what qualifies as a hotword.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pattern": { + Type: schema.TypeString, + Required: true, + Description: `Pattern defining the regular expression. Its syntax +(https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, +the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "proximity": { + Type: schema.TypeList, + Required: true, + Description: `Proximity of the finding within which the entire hotword must reside. The total length of the window cannot +exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be +used to match substrings of the finding itself. For example, the certainty of a phone number regex +'(\d{3}) \d{3}-\d{4}' could be adjusted upwards if the area code is known to be the local area code of a company +office using the hotword regex '(xxx)', where 'xxx' is the area code in question.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "window_after": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters after the finding to consider.`, + }, + "window_before": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters before the finding to consider.`, + }, + }, + }, + }, + }, + }, + }, + "exclude_info_types": { + Type: schema.TypeList, + Optional: true, + Description: `Set of infoTypes for which findings would affect this rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "info_types": { + Type: schema.TypeList, + Required: true, + Description: `If a finding is matched by any of the infoType detectors listed here, the finding will be excluded from the scan results.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed +at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version name for this InfoType.`, + }, + }, + }, + }, + }, + }, + }, + "regex": { + Type: schema.TypeList, + Optional: true, + Description: `Regular expression which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pattern": { + Type: schema.TypeString, + Required: true, + Description: `Pattern defining the regular expression. +Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + }, + }, + "hotword_rule": { + Type: schema.TypeList, + Optional: true, + Description: `Hotword-based detection rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hotword_regex": { + Type: schema.TypeList, + Required: true, + Description: `Regular expression pattern defining what qualifies as a hotword.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pattern": { + Type: schema.TypeString, + Required: true, + Description: `Pattern defining the regular expression. Its syntax +(https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, +the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "likelihood_adjustment": { + Type: schema.TypeList, + Required: true, + Description: `Likelihood adjustment to apply to all matching findings.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed_likelihood": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), + Description: `Set the likelihood of a finding to a fixed value. Either this or relative_likelihood can be set. Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, + }, + "relative_likelihood": { + Type: schema.TypeInt, + Optional: true, + Description: `Increase or decrease the likelihood by the specified number of levels. For example, +if a finding would be POSSIBLE without the detection rule and relativeLikelihood is 1, +then it is upgraded to LIKELY, while a value of -1 would downgrade it to UNLIKELY. +Likelihood may never drop below VERY_UNLIKELY or exceed VERY_LIKELY, so applying an +adjustment of 1 followed by an adjustment of -1 when base likelihood is VERY_LIKELY +will result in a final likelihood of LIKELY. Either this or fixed_likelihood can be set.`, + }, + }, + }, + }, + "proximity": { + Type: schema.TypeList, + Required: true, + Description: `Proximity of the finding within which the entire hotword must reside. The total length of the window cannot +exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be +used to match substrings of the finding itself. For example, the certainty of a phone number regex +'(\d{3}) \d{3}-\d{4}' could be adjusted upwards if the area code is known to be the local area code of a company +office using the hotword regex '(xxx)', where 'xxx' is the area code in question.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "window_after": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters after the finding to consider. Either this or window_before must be specified`, + }, + "window_before": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters before the finding to consider. Either this or window_after must be specified`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "template_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The template id can contain uppercase and lowercase letters, numbers, and hyphens; +that is, it must match the regular expression: [a-zA-Z\d-_]+. The maximum length is +100 characters. Can be empty to allow the system to generate one.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the inspect template. Set by the server.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataLossPreventionInspectTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataLossPreventionInspectTemplateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataLossPreventionInspectTemplateDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + inspectConfigProp, err := expandDataLossPreventionInspectTemplateInspectConfig(d.Get("inspect_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("inspect_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(inspectConfigProp)) && (ok || !reflect.DeepEqual(v, inspectConfigProp)) { + obj["inspectConfig"] = inspectConfigProp + } + + obj, err = resourceDataLossPreventionInspectTemplateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new InspectTemplate: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating InspectTemplate: %s", err) + } + if err := d.Set("name", flattenDataLossPreventionInspectTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/inspectTemplates/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating InspectTemplate %q: %#v", d.Id(), res) + + return resourceDataLossPreventionInspectTemplateRead(d, meta) +} + +func resourceDataLossPreventionInspectTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataLossPreventionInspectTemplate %q", d.Id())) + } + + res, err = resourceDataLossPreventionInspectTemplateDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing DataLossPreventionInspectTemplate because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenDataLossPreventionInspectTemplateName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading InspectTemplate: %s", err) + } + if err := d.Set("description", flattenDataLossPreventionInspectTemplateDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading InspectTemplate: %s", err) + } + if err := d.Set("display_name", flattenDataLossPreventionInspectTemplateDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading InspectTemplate: %s", err) + } + if err := d.Set("inspect_config", flattenDataLossPreventionInspectTemplateInspectConfig(res["inspectConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading InspectTemplate: %s", err) + } + + return nil +} + +func resourceDataLossPreventionInspectTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataLossPreventionInspectTemplateDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataLossPreventionInspectTemplateDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + inspectConfigProp, err := expandDataLossPreventionInspectTemplateInspectConfig(d.Get("inspect_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("inspect_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inspectConfigProp)) { + obj["inspectConfig"] = inspectConfigProp + } + + obj, err = resourceDataLossPreventionInspectTemplateUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating InspectTemplate %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("inspect_config") { + updateMask = append(updateMask, "inspectConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating InspectTemplate %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating InspectTemplate %q: %#v", d.Id(), res) + } + + return resourceDataLossPreventionInspectTemplateRead(d, meta) +} + +func resourceDataLossPreventionInspectTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting InspectTemplate %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "InspectTemplate") + } + + log.Printf("[DEBUG] Finished deleting InspectTemplate %q: %#v", d.Id(), res) + return nil +} + +func resourceDataLossPreventionInspectTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // Custom import to handle parent possibilities + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + parts := strings.Split(d.Get("name").(string), "/") + if len(parts) == 6 { + if err := d.Set("name", parts[5]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(parts) == 4 { + if err := d.Set("name", parts[3]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/inspectTemplate/{{name}}", d.Get("name").(string)) + } + // Remove "/inspectTemplate/{{name}}" from the id + parts = parts[:len(parts)-2] + if err := d.Set("parent", strings.Join(parts, "/")); err != nil { + return nil, fmt.Errorf("Error setting parent: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/inspectTemplates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataLossPreventionInspectTemplateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDataLossPreventionInspectTemplateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["exclude_info_types"] = + flattenDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(original["excludeInfoTypes"], d, config) + transformed["include_quote"] = + flattenDataLossPreventionInspectTemplateInspectConfigIncludeQuote(original["includeQuote"], d, config) + transformed["min_likelihood"] = + flattenDataLossPreventionInspectTemplateInspectConfigMinLikelihood(original["minLikelihood"], d, config) + transformed["limits"] = + flattenDataLossPreventionInspectTemplateInspectConfigLimits(original["limits"], d, config) + transformed["info_types"] = + flattenDataLossPreventionInspectTemplateInspectConfigInfoTypes(original["infoTypes"], d, config) + transformed["content_options"] = + flattenDataLossPreventionInspectTemplateInspectConfigContentOptions(original["contentOptions"], d, config) + transformed["rule_set"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSet(original["ruleSet"], d, config) + transformed["custom_info_types"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(original["customInfoTypes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigIncludeQuote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigMinLikelihood(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigLimits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["max_findings_per_item"] = + flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(original["maxFindingsPerItem"], d, config) + transformed["max_findings_per_request"] = + flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(original["maxFindingsPerRequest"], d, config) + transformed["max_findings_per_info_type"] = + flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(original["maxFindingsPerInfoType"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "info_type": flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(original["infoType"], d, config), + "max_findings": flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(original["maxFindings"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigContentOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "info_types": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(original["infoTypes"], d, config), + "rules": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRules(original["rules"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "hotword_rule": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(original["hotwordRule"], d, config), + "exclusion_rule": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(original["exclusionRule"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hotword_regex"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(original["hotwordRegex"], d, config) + transformed["proximity"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(original["proximity"], d, config) + transformed["likelihood_adjustment"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(original["likelihoodAdjustment"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["window_before"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(original["windowBefore"], d, config) + transformed["window_after"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(original["windowAfter"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["fixed_likelihood"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(original["fixedLikelihood"], d, config) + transformed["relative_likelihood"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(original["relativeLikelihood"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["matching_type"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(original["matchingType"], d, config) + transformed["dictionary"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(original["dictionary"], d, config) + transformed["regex"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(original["regex"], d, config) + transformed["exclude_info_types"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(original["excludeInfoTypes"], d, config) + transformed["exclude_by_hotword"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotword(original["excludeByHotword"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["word_list"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(original["wordList"], d, config) + transformed["cloud_storage_path"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["words"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(original["words"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["info_types"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(original["infoTypes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hotword_regex"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegex(original["hotwordRegex"], d, config) + transformed["proximity"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximity(original["proximity"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["window_before"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowBefore(original["windowBefore"], d, config) + transformed["window_after"] = + flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowAfter(original["windowAfter"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowBefore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowAfter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "info_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(original["infoType"], d, config), + "likelihood": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(original["likelihood"], d, config), + "exclusion_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(original["exclusionType"], d, config), + "sensitivity_score": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + "regex": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(original["regex"], d, config), + "dictionary": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(original["dictionary"], d, config), + "surrogate_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSurrogateType(original["surrogateType"], d, config), + "stored_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(original["storedType"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["word_list"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(original["wordList"], d, config) + transformed["cloud_storage_path"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["words"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(original["words"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSurrogateType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataLossPreventionInspectTemplateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExcludeInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(original["exclude_info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeInfoTypes"] = transformedExcludeInfoTypes + } + + transformedIncludeQuote, err := expandDataLossPreventionInspectTemplateInspectConfigIncludeQuote(original["include_quote"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeQuote); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeQuote"] = transformedIncludeQuote + } + + transformedMinLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigMinLikelihood(original["min_likelihood"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinLikelihood); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minLikelihood"] = transformedMinLikelihood + } + + transformedLimits, err := expandDataLossPreventionInspectTemplateInspectConfigLimits(original["limits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["limits"] = transformedLimits + } + + transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + transformedContentOptions, err := expandDataLossPreventionInspectTemplateInspectConfigContentOptions(original["content_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContentOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["contentOptions"] = transformedContentOptions + } + + transformedRuleSet, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSet(original["rule_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRuleSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ruleSet"] = transformedRuleSet + } + + transformedCustomInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(original["custom_info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customInfoTypes"] = transformedCustomInfoTypes + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigIncludeQuote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigMinLikelihood(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaxFindingsPerItem, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(original["max_findings_per_item"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxFindingsPerItem); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxFindingsPerItem"] = transformedMaxFindingsPerItem + } + + transformedMaxFindingsPerRequest, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(original["max_findings_per_request"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxFindingsPerRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxFindingsPerRequest"] = transformedMaxFindingsPerRequest + } + + transformedMaxFindingsPerInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(original["max_findings_per_info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxFindingsPerInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxFindingsPerInfoType"] = transformedMaxFindingsPerInfoType + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(original["info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoType"] = transformedInfoType + } + + transformedMaxFindings, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(original["max_findings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxFindings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxFindings"] = transformedMaxFindings + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigContentOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + transformedRules, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRules(original["rules"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRules); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rules"] = transformedRules + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHotwordRule, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(original["hotword_rule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHotwordRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hotwordRule"] = transformedHotwordRule + } + + transformedExclusionRule, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(original["exclusion_rule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExclusionRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["exclusionRule"] = transformedExclusionRule + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHotwordRegex, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(original["hotword_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHotwordRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hotwordRegex"] = transformedHotwordRegex + } + + transformedProximity, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(original["proximity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProximity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["proximity"] = transformedProximity + } + + transformedLikelihoodAdjustment, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(original["likelihood_adjustment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLikelihoodAdjustment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["likelihoodAdjustment"] = transformedLikelihoodAdjustment + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWindowBefore, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(original["window_before"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWindowBefore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["windowBefore"] = transformedWindowBefore + } + + transformedWindowAfter, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(original["window_after"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWindowAfter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["windowAfter"] = transformedWindowAfter + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFixedLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(original["fixed_likelihood"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFixedLikelihood); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fixedLikelihood"] = transformedFixedLikelihood + } + + transformedRelativeLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(original["relative_likelihood"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRelativeLikelihood); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["relativeLikelihood"] = transformedRelativeLikelihood + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMatchingType, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(original["matching_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMatchingType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["matchingType"] = transformedMatchingType + } + + transformedDictionary, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(original["dictionary"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDictionary); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dictionary"] = transformedDictionary + } + + transformedRegex, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(original["regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["regex"] = transformedRegex + } + + transformedExcludeInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(original["exclude_info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeInfoTypes"] = transformedExcludeInfoTypes + } + + transformedExcludeByHotword, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotword(original["exclude_by_hotword"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeByHotword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeByHotword"] = transformedExcludeByHotword + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWordList, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(original["word_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wordList"] = transformedWordList + } + + transformedCloudStoragePath, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStoragePath"] = transformedCloudStoragePath + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWords, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(original["words"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["words"] = transformedWords + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHotwordRegex, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegex(original["hotword_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHotwordRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hotwordRegex"] = transformedHotwordRegex + } + + transformedProximity, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximity(original["proximity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProximity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["proximity"] = transformedProximity + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWindowBefore, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowBefore(original["window_before"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWindowBefore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["windowBefore"] = transformedWindowBefore + } + + transformedWindowAfter, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowAfter(original["window_after"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWindowAfter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["windowAfter"] = transformedWindowAfter + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowBefore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowAfter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(original["info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoType"] = transformedInfoType + } + + transformedLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(original["likelihood"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLikelihood); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["likelihood"] = transformedLikelihood + } + + transformedExclusionType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(original["exclusion_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExclusionType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["exclusionType"] = transformedExclusionType + } + + transformedSensitivityScore, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + transformedRegex, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(original["regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["regex"] = transformedRegex + } + + transformedDictionary, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(original["dictionary"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDictionary); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dictionary"] = transformedDictionary + } + + transformedSurrogateType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSurrogateType(original["surrogate_type"], d, config) + if err != nil { + return nil, err + } else { + transformed["surrogateType"] = transformedSurrogateType + } + + transformedStoredType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(original["stored_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStoredType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storedType"] = transformedStoredType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWordList, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(original["word_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wordList"] = transformedWordList + } + + transformedCloudStoragePath, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStoragePath"] = transformedCloudStoragePath + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWords, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(original["words"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["words"] = transformedWords + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesSurrogateType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceDataLossPreventionInspectTemplateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["inspectTemplate"] = obj + templateIdProp, ok := d.GetOk("template_id") + if ok && templateIdProp != nil { + newObj["templateId"] = templateIdProp + } + return newObj, nil +} + +func resourceDataLossPreventionInspectTemplateUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["inspectTemplate"] = obj + return newObj, nil +} + +func resourceDataLossPreventionInspectTemplateDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + if err := d.Set("template_id", flattenDataLossPreventionInspectTemplateName(res["name"], d, config)); err != nil { + return nil, fmt.Errorf("Error reading InspectTemplate: %s", err) + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_job_trigger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_job_trigger.go new file mode 100644 index 0000000000..51cb8cc90d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_job_trigger.go @@ -0,0 +1,5807 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datalossprevention + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDataLossPreventionJobTrigger() *schema.Resource { + return &schema.Resource{ + Create: resourceDataLossPreventionJobTriggerCreate, + Read: resourceDataLossPreventionJobTriggerRead, + Update: resourceDataLossPreventionJobTriggerUpdate, + Delete: resourceDataLossPreventionJobTriggerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataLossPreventionJobTriggerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The parent of the trigger, either in the format 'projects/{{project}}' +or 'projects/{{project}}/locations/{{location}}'`, + }, + "triggers": { + Type: schema.TypeList, + Required: true, + Description: `What event needs to occur for a new job to be started.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "manual": { + Type: schema.TypeList, + Optional: true, + Description: `For use with hybrid jobs. Jobs must be manually created and finished.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "schedule": { + Type: schema.TypeList, + Optional: true, + Description: `Schedule for triggered jobs`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recurrence_period_duration": { + Type: schema.TypeString, + Optional: true, + Description: `With this option a job is started a regular periodic basis. For example: every day (86400 seconds). + +A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. + +This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days. + +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + }, + }, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the job trigger.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User set display name of the job trigger.`, + }, + "inspect_job": { + Type: schema.TypeList, + Optional: true, + Description: `Controls what and how to inspect for findings.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_config": { + Type: schema.TypeList, + Required: true, + Description: `Information on where to inspect`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "big_query_options": { + Type: schema.TypeList, + Optional: true, + Description: `Options defining BigQuery table and row identifiers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table_reference": { + Type: schema.TypeList, + Required: true, + Description: `Set of files to scan.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The dataset ID of the table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The Google Cloud Platform project ID of the project containing the table.`, + }, + "table_id": { + Type: schema.TypeString, + Required: true, + Description: `The name of the table.`, + }, + }, + }, + }, + "excluded_fields": { + Type: schema.TypeList, + Optional: true, + Description: `References to fields excluded from scanning. +This allows you to skip inspection of entire columns which you know have no findings.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name describing the field excluded from scanning.`, + }, + }, + }, + }, + "identifying_fields": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the BigQuery fields that will be returned with findings. +If not specified, no identifying fields will be returned for findings.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of a BigQuery field to be returned with the findings.`, + }, + }, + }, + }, + "included_fields": { + Type: schema.TypeList, + Optional: true, + Description: `Limit scanning only to these fields.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name describing the field to which scanning is limited.`, + }, + }, + }, + }, + "rows_limit": { + Type: schema.TypeInt, + Optional: true, + Description: `Max number of rows to scan. If the table has more rows than this value, the rest of the rows are omitted. +If not set, or if set to 0, all rows will be scanned. Only one of rowsLimit and rowsLimitPercent can be +specified. Cannot be used in conjunction with TimespanConfig.`, + }, + "rows_limit_percent": { + Type: schema.TypeInt, + Optional: true, + Description: `Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded down. +Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of +rowsLimit and rowsLimitPercent can be specified. Cannot be used in conjunction with TimespanConfig.`, + }, + "sample_method": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TOP", "RANDOM_START", ""}), + Description: `How to sample rows if not all rows are scanned. Meaningful only when used in conjunction with either +rowsLimit or rowsLimitPercent. If not specified, rows are scanned in the order BigQuery reads them. Default value: "TOP" Possible values: ["TOP", "RANDOM_START"]`, + Default: "TOP", + }, + }, + }, + }, + "cloud_storage_options": { + Type: schema.TypeList, + Optional: true, + Description: `Options defining a file or a set of files within a Google Cloud Storage bucket.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "file_set": { + Type: schema.TypeList, + Required: true, + Description: `Set of files to scan.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regex_file_set": { + Type: schema.TypeList, + Optional: true, + Description: `The regex-filtered set of files to scan.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of a Cloud Storage bucket.`, + }, + "exclude_regex": { + Type: schema.TypeList, + Optional: true, + Description: `A list of regular expressions matching file paths to exclude. All files in the bucket that match at +least one of these regular expressions will be excluded from the scan.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "include_regex": { + Type: schema.TypeList, + Optional: true, + Description: `A list of regular expressions matching file paths to include. All files in the bucket +that match at least one of these regular expressions will be included in the set of files, +except for those that also match an item in excludeRegex. Leaving this field empty will +match all files by default (this is equivalent to including .* in the list)`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ExactlyOneOf: []string{"inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.url", "inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.regex_file_set"}, + }, + "url": { + Type: schema.TypeString, + Optional: true, + Description: `The Cloud Storage url of the file(s) to scan, in the format 'gs:///'. Trailing wildcard +in the path is allowed. + +If the url ends in a trailing slash, the bucket or directory represented by the url will be scanned +non-recursively (content in sub-directories will not be scanned). This means that 'gs://mybucket/' is +equivalent to 'gs://mybucket/*', and 'gs://mybucket/directory/' is equivalent to 'gs://mybucket/directory/*'.`, + ExactlyOneOf: []string{"inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.url", "inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.regex_file_set"}, + }, + }, + }, + }, + "bytes_limit_per_file": { + Type: schema.TypeInt, + Optional: true, + Description: `Max number of bytes to scan from a file. If a scanned file's size is bigger than this value +then the rest of the bytes are omitted.`, + }, + "bytes_limit_per_file_percent": { + Type: schema.TypeInt, + Optional: true, + Description: `Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. +Must be between 0 and 100, inclusively. Both 0 and 100 means no limit.`, + }, + "file_types": { + Type: schema.TypeList, + Optional: true, + Description: `List of file type groups to include in the scan. If empty, all files are scanned and available data +format processors are applied. In addition, the binary content of the selected files is always scanned as well. +Images are scanned only as binary if the specified region does not support image inspection and no fileTypes were specified. Possible values: ["BINARY_FILE", "TEXT_FILE", "IMAGE", "WORD", "PDF", "AVRO", "CSV", "TSV", "POWERPOINT", "EXCEL"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"BINARY_FILE", "TEXT_FILE", "IMAGE", "WORD", "PDF", "AVRO", "CSV", "TSV", "POWERPOINT", "EXCEL"}), + }, + }, + "files_limit_percent": { + Type: schema.TypeInt, + Optional: true, + Description: `Limits the number of files to scan to this percentage of the input FileSet. Number of files scanned is rounded down. +Must be between 0 and 100, inclusively. Both 0 and 100 means no limit.`, + }, + "sample_method": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TOP", "RANDOM_START", ""}), + Description: `How to sample bytes if not all bytes are scanned. Meaningful only when used in conjunction with bytesLimitPerFile. +If not specified, scanning would start from the top. Possible values: ["TOP", "RANDOM_START"]`, + }, + }, + }, + }, + "datastore_options": { + Type: schema.TypeList, + Optional: true, + Description: `Options defining a data set within Google Cloud Datastore.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kind": { + Type: schema.TypeList, + Required: true, + Description: `A representation of a Datastore kind.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the Datastore kind.`, + }, + }, + }, + }, + "partition_id": { + Type: schema.TypeList, + Required: true, + Description: `Datastore partition ID. A partition ID identifies a grouping of entities. The grouping +is always by project and namespace, however the namespace ID may be empty.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the project to which the entities belong.`, + }, + "namespace_id": { + Type: schema.TypeString, + Optional: true, + Description: `If not empty, the ID of the namespace to which the entities belong.`, + }, + }, + }, + }, + }, + }, + }, + "hybrid_options": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration to control jobs where the content being inspected is outside of Google Cloud Platform.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A short description of where the data is coming from. Will be stored once in the job. 256 max length.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `To organize findings, these labels will be added to each finding. + +Label keys must be between 1 and 63 characters long and must conform to the following regular expression: '[a-z]([-a-z0-9]*[a-z0-9])?'. + +Label values must be between 0 and 63 characters long and must conform to the regular expression '([a-z]([-a-z0-9]*[a-z0-9])?)?'. + +No more than 10 labels can be associated with a given finding. + +Examples: +* '"environment" : "production"' +* '"pipeline" : "etl"'`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "required_finding_label_keys": { + Type: schema.TypeList, + Optional: true, + Description: `These are labels that each inspection request must include within their 'finding_labels' map. Request +may contain others, but any missing one of these will be rejected. + +Label keys must be between 1 and 63 characters long and must conform to the following regular expression: '[a-z]([-a-z0-9]*[a-z0-9])?'. + +No more than 10 keys can be required.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "table_options": { + Type: schema.TypeList, + Optional: true, + Description: `If the container is a table, additional information to make findings meaningful such as the columns that are primary keys.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identifying_fields": { + Type: schema.TypeList, + Optional: true, + Description: `The columns that are the primary keys for table objects included in ContentItem. A copy of this +cell's value will stored alongside alongside each finding so that the finding can be traced to +the specific row it came from. No more than 3 may be provided.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "timespan_config": { + Type: schema.TypeList, + Optional: true, + Description: `Information on where to inspect`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timestamp_field": { + Type: schema.TypeList, + Required: true, + Description: `Information on where to inspect`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Specification of the field containing the timestamp of scanned items. Used for data sources like Datastore and BigQuery. + +For BigQuery: Required to filter out rows based on the given start and end times. If not specified and the table was +modified between the given start and end times, the entire table will be scanned. The valid data types of the timestamp +field are: INTEGER, DATE, TIMESTAMP, or DATETIME BigQuery column. + +For Datastore. Valid data types of the timestamp field are: TIMESTAMP. Datastore entity will be scanned if the +timestamp property does not exist or its value is empty or invalid.`, + }, + }, + }, + }, + "enable_auto_population_of_timespan_config": { + Type: schema.TypeBool, + Optional: true, + Description: `When the job is started by a JobTrigger we will automatically figure out a valid startTime to avoid +scanning files that have not been modified since the last time the JobTrigger executed. This will +be based on the time of the execution of the last run of the JobTrigger.`, + }, + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: `Exclude files or rows newer than this value. If set to zero, no upper time limit is applied.`, + AtLeastOneOf: []string{"inspect_job.0.storage_config.0.timespan_config.0.start_time", "inspect_job.0.storage_config.0.timespan_config.0.end_time"}, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: `Exclude files or rows older than this value.`, + AtLeastOneOf: []string{"inspect_job.0.storage_config.0.timespan_config.0.start_time", "inspect_job.0.storage_config.0.timespan_config.0.end_time"}, + }, + }, + }, + }, + }, + }, + }, + "actions": { + Type: schema.TypeList, + Optional: true, + Description: `A task to execute on the completion of a job.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deidentify": { + Type: schema.TypeList, + Optional: true, + Description: `Create a de-identified copy of the requested table or files.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_output": { + Type: schema.TypeString, + Required: true, + Description: `User settable Cloud Storage bucket and folders to store de-identified files. + +This field must be set for cloud storage deidentification. + +The output Cloud Storage bucket must be different from the input bucket. + +De-identified files will overwrite files in the output path. + +Form of: gs://bucket/folder/ or gs://bucket`, + }, + "file_types_to_transform": { + Type: schema.TypeList, + Optional: true, + Description: `List of user-specified file type groups to transform. If specified, only the files with these filetypes will be transformed. + +If empty, all supported files will be transformed. Supported types may be automatically added over time. + +If a file type is set in this field that isn't supported by the Deidentify action then the job will fail and will not be successfully created/started. Possible values: ["IMAGE", "TEXT_FILE", "CSV", "TSV"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"IMAGE", "TEXT_FILE", "CSV", "TSV"}), + }, + }, + "transformation_config": { + Type: schema.TypeList, + Optional: true, + Description: `User specified deidentify templates and configs for structured, unstructured, and image files.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deidentify_template": { + Type: schema.TypeString, + Optional: true, + Description: `If this template is specified, it will serve as the default de-identify template.`, + }, + "image_redact_template": { + Type: schema.TypeString, + Optional: true, + Description: `If this template is specified, it will serve as the de-identify template for images.`, + }, + "structured_deidentify_template": { + Type: schema.TypeString, + Optional: true, + Description: `If this template is specified, it will serve as the de-identify template for structured content such as delimited files and tables.`, + }, + }, + }, + }, + "transformation_details_storage_config": { + Type: schema.TypeList, + Optional: true, + Description: `Config for storing transformation details.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeList, + Required: true, + Description: `The BigQuery table in which to store the output.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the dataset containing this table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the project containing this table.`, + }, + "table_id": { + Type: schema.TypeString, + Optional: true, + Description: `The ID of the table. The ID must contain only letters (a-z, +A-Z), numbers (0-9), or underscores (_). The maximum length +is 1,024 characters.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "job_notification_emails": { + Type: schema.TypeList, + Optional: true, + Description: `Sends an email when the job completes. The email goes to IAM project owners and technical Essential Contacts.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "pub_sub": { + Type: schema.TypeList, + Optional: true, + Description: `Publish a message into a given Pub/Sub topic when the job completes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Required: true, + Description: `Cloud Pub/Sub topic to send notifications to.`, + }, + }, + }, + }, + "publish_findings_to_cloud_data_catalog": { + Type: schema.TypeList, + Optional: true, + Description: `Publish findings of a DlpJob to Data Catalog.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "publish_summary_to_cscc": { + Type: schema.TypeList, + Optional: true, + Description: `Publish the result summary of a DlpJob to the Cloud Security Command Center.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "publish_to_stackdriver": { + Type: schema.TypeList, + Optional: true, + Description: `Enable Stackdriver metric dlp.googleapis.com/findingCount.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "save_findings": { + Type: schema.TypeList, + Optional: true, + Description: `If set, the detailed findings will be persisted to the specified OutputStorageConfig. Only a single instance of this action can be specified. Compatible with: Inspect, Risk`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "output_config": { + Type: schema.TypeList, + Required: true, + Description: `Information on where to store output`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeList, + Required: true, + Description: `Information on the location of the target BigQuery Table.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `Dataset ID of the table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The Google Cloud Platform project ID of the project containing the table.`, + }, + "table_id": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the table. If is not set a new one will be generated for you with the following format: +'dlp_googleapis_yyyy_mm_dd_[dlp_job_id]'. Pacific timezone will be used for generating the date details.`, + }, + }, + }, + }, + "output_schema": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS", ""}), + Description: `Schema used for writing the findings for Inspect jobs. This field is only used for +Inspect and must be unspecified for Risk jobs. Columns are derived from the Finding +object. If appending to an existing table, any columns from the predefined schema +that are missing will be added. No columns in the existing table will be deleted. + +If unspecified, then all available columns will be used for a new table or an (existing) +table with no schema, and no changes will be made to an existing table that has a schema. +Only for use with external storage. Possible values: ["BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS"]`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "inspect_config": { + Type: schema.TypeList, + Optional: true, + Description: `The core content of the template.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "custom_info_types": { + Type: schema.TypeList, + Optional: true, + Description: `Custom info types to be used. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "info_type": { + Type: schema.TypeList, + Required: true, + Description: `CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing +infoTypes and that infoType is specified in 'info_types' field. Specifying the latter adds findings to the +one detected by the system. If built-in info type is not specified in 'info_types' list then the name is +treated as a custom info type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names +listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version of the information type to use. By default, the version is set to stable.`, + }, + }, + }, + }, + "dictionary": { + Type: schema.TypeList, + Optional: true, + Description: `Dictionary which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_path": { + Type: schema.TypeList, + Optional: true, + Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, + }, + }, + }, + }, + "word_list": { + Type: schema.TypeList, + Optional: true, + Description: `List of words or phrases to search for.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "words": { + Type: schema.TypeList, + Required: true, + Description: `Words or phrases defining the dictionary. The dictionary must contain at least one +phrase and every phrase must contain at least 2 characters that are letters or digits.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "exclusion_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EXCLUSION_TYPE_EXCLUDE", ""}), + Description: `If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching. Possible values: ["EXCLUSION_TYPE_EXCLUDE"]`, + }, + "likelihood": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), + Description: `Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria +specified by the rule. Default value: "VERY_LIKELY" Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, + Default: "VERY_LIKELY", + }, + "regex": { + Type: schema.TypeList, + Optional: true, + Description: `Regular expression which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pattern": { + Type: schema.TypeString, + Required: true, + Description: `Pattern defining the regular expression. +Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "stored_type": { + Type: schema.TypeList, + Optional: true, + Description: `A reference to a StoredInfoType to use with scanning.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Resource name of the requested StoredInfoType, for example 'organizations/433245324/storedInfoTypes/432452342' +or 'projects/project-id/storedInfoTypes/432452342'.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The creation timestamp of an inspectTemplate. Set by the server.`, + }, + }, + }, + }, + "surrogate_type": { + Type: schema.TypeList, + Optional: true, + Description: `Message for detecting output from deidentification transformations that support reversing.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + }, + "exclude_info_types": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, excludes type information of the findings.`, + }, + "include_quote": { + Type: schema.TypeBool, + Optional: true, + Description: `When true, a contextual quote from the data that triggered a finding is included in the response.`, + }, + "info_types": { + Type: schema.TypeList, + Optional: true, + Description: `Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list +or listed at https://cloud.google.com/dlp/docs/infotypes-reference. + +When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. +By default this may be all types, but may change over time as detectors are updated.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed +at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version of the information type to use. By default, the version is set to stable`, + }, + }, + }, + }, + "limits": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration to control the number of findings returned.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_findings_per_info_type": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration of findings limit given for specified infoTypes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "info_type": { + Type: schema.TypeList, + Optional: true, + Description: `Type of information the findings limit applies to. Only one limit per infoType should be provided. If InfoTypeLimit does +not have an infoType, the DLP API applies the limit against all infoTypes that are found but not +specified in another InfoTypeLimit.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed +at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version of the information type to use. By default, the version is set to stable`, + }, + }, + }, + }, + "max_findings": { + Type: schema.TypeInt, + Optional: true, + Description: `Max findings limit for the given infoType.`, + }, + }, + }, + AtLeastOneOf: []string{"inspect_job.0.inspect_config.0.limits.0.max_findings_per_item", "inspect_job.0.inspect_config.0.limits.0.max_findings_per_request", "inspect_job.0.inspect_config.0.limits.0.max_findings_per_info_type"}, + }, + "max_findings_per_item": { + Type: schema.TypeInt, + Optional: true, + Description: `Max number of findings that will be returned for each item scanned. The maximum returned is 2000.`, + AtLeastOneOf: []string{"inspect_job.0.inspect_config.0.limits.0.max_findings_per_item", "inspect_job.0.inspect_config.0.limits.0.max_findings_per_request", "inspect_job.0.inspect_config.0.limits.0.max_findings_per_info_type"}, + }, + "max_findings_per_request": { + Type: schema.TypeInt, + Optional: true, + Description: `Max number of findings that will be returned per request/job. The maximum returned is 2000.`, + AtLeastOneOf: []string{"inspect_job.0.inspect_config.0.limits.0.max_findings_per_item", "inspect_job.0.inspect_config.0.limits.0.max_findings_per_request", "inspect_job.0.inspect_config.0.limits.0.max_findings_per_info_type"}, + }, + }, + }, + }, + "min_likelihood": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), + Description: `Only returns findings equal or above this threshold. See https://cloud.google.com/dlp/docs/likelihood for more info Default value: "POSSIBLE" Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, + Default: "POSSIBLE", + }, + "rule_set": { + Type: schema.TypeList, + Optional: true, + Description: `Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, +other rules are executed in the order they are specified for each info type.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rules": { + Type: schema.TypeList, + Required: true, + Description: `Set of rules to be applied to infoTypes. The rules are applied in order.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "exclusion_rule": { + Type: schema.TypeList, + Optional: true, + Description: `The rule that specifies conditions when findings of infoTypes specified in InspectionRuleSet are removed from results.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "matching_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"MATCHING_TYPE_FULL_MATCH", "MATCHING_TYPE_PARTIAL_MATCH", "MATCHING_TYPE_INVERSE_MATCH"}), + Description: `How the rule is applied. See the documentation for more information: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#MatchingType Possible values: ["MATCHING_TYPE_FULL_MATCH", "MATCHING_TYPE_PARTIAL_MATCH", "MATCHING_TYPE_INVERSE_MATCH"]`, + }, + "dictionary": { + Type: schema.TypeList, + Optional: true, + Description: `Dictionary which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_path": { + Type: schema.TypeList, + Optional: true, + Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, + }, + }, + }, + }, + "word_list": { + Type: schema.TypeList, + Optional: true, + Description: `List of words or phrases to search for.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "words": { + Type: schema.TypeList, + Required: true, + Description: `Words or phrases defining the dictionary. The dictionary must contain at least one +phrase and every phrase must contain at least 2 characters that are letters or digits.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "exclude_by_hotword": { + Type: schema.TypeList, + Optional: true, + Description: `Drop if the hotword rule is contained in the proximate context.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hotword_regex": { + Type: schema.TypeList, + Optional: true, + Description: `Regular expression pattern defining what qualifies as a hotword.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, +the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "pattern": { + Type: schema.TypeString, + Optional: true, + Description: `Pattern defining the regular expression. Its syntax +(https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + }, + }, + }, + "proximity": { + Type: schema.TypeList, + Optional: true, + Description: `Proximity of the finding within which the entire hotword must reside. The total length of the window cannot +exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be +used to match substrings of the finding itself. For example, the certainty of a phone number regex +'(\d{3}) \d{3}-\d{4}' could be adjusted upwards if the area code is known to be the local area code of a company +office using the hotword regex '(xxx)', where 'xxx' is the area code in question.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "window_after": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters after the finding to consider. Either this or window_before must be specified`, + }, + "window_before": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters before the finding to consider. Either this or window_after must be specified`, + }, + }, + }, + }, + }, + }, + }, + "exclude_info_types": { + Type: schema.TypeList, + Optional: true, + Description: `Set of infoTypes for which findings would affect this rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "info_types": { + Type: schema.TypeList, + Required: true, + Description: `If a finding is matched by any of the infoType detectors listed here, the finding will be excluded from the scan results.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed +at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version of the information type to use. By default, the version is set to stable.`, + }, + }, + }, + }, + }, + }, + }, + "regex": { + Type: schema.TypeList, + Optional: true, + Description: `Regular expression which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pattern": { + Type: schema.TypeString, + Required: true, + Description: `Pattern defining the regular expression. +Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + }, + }, + "hotword_rule": { + Type: schema.TypeList, + Optional: true, + Description: `Hotword-based detection rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hotword_regex": { + Type: schema.TypeList, + Optional: true, + Description: `Regular expression pattern defining what qualifies as a hotword.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, +the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "pattern": { + Type: schema.TypeString, + Optional: true, + Description: `Pattern defining the regular expression. Its syntax +(https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + }, + }, + }, + "likelihood_adjustment": { + Type: schema.TypeList, + Optional: true, + Description: `Likelihood adjustment to apply to all matching findings.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed_likelihood": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}), + Description: `Set the likelihood of a finding to a fixed value. Either this or relative_likelihood can be set. Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, + }, + "relative_likelihood": { + Type: schema.TypeInt, + Optional: true, + Description: `Increase or decrease the likelihood by the specified number of levels. For example, +if a finding would be POSSIBLE without the detection rule and relativeLikelihood is 1, +then it is upgraded to LIKELY, while a value of -1 would downgrade it to UNLIKELY. +Likelihood may never drop below VERY_UNLIKELY or exceed VERY_LIKELY, so applying an +adjustment of 1 followed by an adjustment of -1 when base likelihood is VERY_LIKELY +will result in a final likelihood of LIKELY. Either this or fixed_likelihood can be set.`, + }, + }, + }, + }, + "proximity": { + Type: schema.TypeList, + Optional: true, + Description: `Proximity of the finding within which the entire hotword must reside. The total length of the window cannot +exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be +used to match substrings of the finding itself. For example, the certainty of a phone number regex +'(\d{3}) \d{3}-\d{4}' could be adjusted upwards if the area code is known to be the local area code of a company +office using the hotword regex '(xxx)', where 'xxx' is the area code in question.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "window_after": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters after the finding to consider. Either this or window_before must be specified`, + }, + "window_before": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of characters before the finding to consider. Either this or window_after must be specified`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "info_types": { + Type: schema.TypeList, + Optional: true, + Description: `List of infoTypes this rule set is applied to.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed +at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, + }, + "sensitivity_score": { + Type: schema.TypeList, + Optional: true, + Description: `Optional custom sensitivity for this InfoType. This only applies to data profiling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "score": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"}), + Description: `The sensitivity score applied to the resource. Possible values: ["SENSITIVITY_LOW", "SENSITIVITY_MODERATE", "SENSITIVITY_HIGH"]`, + }, + }, + }, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Description: `Version of the information type to use. By default, the version is set to stable.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "inspect_template_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the template to run when this job is triggered.`, + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"PAUSED", "HEALTHY", "CANCELLED", ""}), + Description: `Whether the trigger is currently active. Default value: "HEALTHY" Possible values: ["PAUSED", "HEALTHY", "CANCELLED"]`, + Default: "HEALTHY", + }, + "trigger_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The trigger id can contain uppercase and lowercase letters, numbers, and hyphens; +that is, it must match the regular expression: [a-zA-Z\d-_]+. +The maximum length is 100 characters. Can be empty to allow the system to generate one.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The creation timestamp of an inspectTemplate. Set by the server.`, + }, + "last_run_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of the last time this trigger executed.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the job trigger. Set by the server.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The last update timestamp of an inspectTemplate. Set by the server.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataLossPreventionJobTriggerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataLossPreventionJobTriggerDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataLossPreventionJobTriggerDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + statusProp, err := expandDataLossPreventionJobTriggerStatus(d.Get("status"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("status"); !tpgresource.IsEmptyValue(reflect.ValueOf(statusProp)) && (ok || !reflect.DeepEqual(v, statusProp)) { + obj["status"] = statusProp + } + triggersProp, err := expandDataLossPreventionJobTriggerTriggers(d.Get("triggers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("triggers"); !tpgresource.IsEmptyValue(reflect.ValueOf(triggersProp)) && (ok || !reflect.DeepEqual(v, triggersProp)) { + obj["triggers"] = triggersProp + } + inspectJobProp, err := expandDataLossPreventionJobTriggerInspectJob(d.Get("inspect_job"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("inspect_job"); !tpgresource.IsEmptyValue(reflect.ValueOf(inspectJobProp)) && (ok || !reflect.DeepEqual(v, inspectJobProp)) { + obj["inspectJob"] = inspectJobProp + } + + obj, err = resourceDataLossPreventionJobTriggerEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new JobTrigger: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating JobTrigger: %s", err) + } + if err := d.Set("name", flattenDataLossPreventionJobTriggerName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/jobTriggers/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating JobTrigger %q: %#v", d.Id(), res) + + return resourceDataLossPreventionJobTriggerRead(d, meta) +} + +func resourceDataLossPreventionJobTriggerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataLossPreventionJobTrigger %q", d.Id())) + } + + res, err = resourceDataLossPreventionJobTriggerDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing DataLossPreventionJobTrigger because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenDataLossPreventionJobTriggerName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + if err := d.Set("create_time", flattenDataLossPreventionJobTriggerCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + if err := d.Set("update_time", flattenDataLossPreventionJobTriggerUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + if err := d.Set("description", flattenDataLossPreventionJobTriggerDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + if err := d.Set("display_name", flattenDataLossPreventionJobTriggerDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + if err := d.Set("last_run_time", flattenDataLossPreventionJobTriggerLastRunTime(res["lastRunTime"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + if err := d.Set("status", flattenDataLossPreventionJobTriggerStatus(res["status"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + if err := d.Set("triggers", flattenDataLossPreventionJobTriggerTriggers(res["triggers"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + if err := d.Set("inspect_job", flattenDataLossPreventionJobTriggerInspectJob(res["inspectJob"], d, config)); err != nil { + return fmt.Errorf("Error reading JobTrigger: %s", err) + } + + return nil +} + +func resourceDataLossPreventionJobTriggerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataLossPreventionJobTriggerDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataLossPreventionJobTriggerDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + statusProp, err := expandDataLossPreventionJobTriggerStatus(d.Get("status"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("status"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, statusProp)) { + obj["status"] = statusProp + } + triggersProp, err := expandDataLossPreventionJobTriggerTriggers(d.Get("triggers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("triggers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, triggersProp)) { + obj["triggers"] = triggersProp + } + inspectJobProp, err := expandDataLossPreventionJobTriggerInspectJob(d.Get("inspect_job"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("inspect_job"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inspectJobProp)) { + obj["inspectJob"] = inspectJobProp + } + + obj, err = resourceDataLossPreventionJobTriggerUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating JobTrigger %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("status") { + updateMask = append(updateMask, "status") + } + + if d.HasChange("triggers") { + updateMask = append(updateMask, "triggers") + } + + if d.HasChange("inspect_job") { + updateMask = append(updateMask, "inspectJob") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating JobTrigger %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating JobTrigger %q: %#v", d.Id(), res) + } + + return resourceDataLossPreventionJobTriggerRead(d, meta) +} + +func resourceDataLossPreventionJobTriggerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting JobTrigger %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "JobTrigger") + } + + log.Printf("[DEBUG] Finished deleting JobTrigger %q: %#v", d.Id(), res) + return nil +} + +func resourceDataLossPreventionJobTriggerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // Custom import to handle parent possibilities + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + parts := strings.Split(d.Get("name").(string), "/") + if len(parts) == 6 { + if err := d.Set("name", parts[5]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(parts) == 4 { + if err := d.Set("name", parts[3]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/jobTrigger/{{name}}", d.Get("name").(string)) + } + // Remove "/jobTrigger/{{name}}" from the id + parts = parts[:len(parts)-2] + if err := d.Set("parent", strings.Join(parts, "/")); err != nil { + return nil, fmt.Errorf("Error setting parent: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/jobTriggers/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataLossPreventionJobTriggerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDataLossPreventionJobTriggerCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerLastRunTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerTriggers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schedule": flattenDataLossPreventionJobTriggerTriggersSchedule(original["schedule"], d, config), + "manual": flattenDataLossPreventionJobTriggerTriggersManual(original["manual"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerTriggersSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["recurrence_period_duration"] = + flattenDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(original["recurrencePeriodDuration"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerTriggersManual(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionJobTriggerInspectJob(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["inspect_template_name"] = + flattenDataLossPreventionJobTriggerInspectJobInspectTemplateName(original["inspectTemplateName"], d, config) + transformed["inspect_config"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfig(original["inspectConfig"], d, config) + transformed["storage_config"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfig(original["storageConfig"], d, config) + transformed["actions"] = + flattenDataLossPreventionJobTriggerInspectJobActions(original["actions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectTemplateName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["exclude_info_types"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigExcludeInfoTypes(original["excludeInfoTypes"], d, config) + transformed["include_quote"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigIncludeQuote(original["includeQuote"], d, config) + transformed["min_likelihood"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigMinLikelihood(original["minLikelihood"], d, config) + transformed["limits"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimits(original["limits"], d, config) + transformed["info_types"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypes(original["infoTypes"], d, config) + transformed["rule_set"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSet(original["ruleSet"], d, config) + transformed["custom_info_types"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypes(original["customInfoTypes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigExcludeInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigIncludeQuote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigMinLikelihood(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["max_findings_per_item"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerItem(original["maxFindingsPerItem"], d, config) + transformed["max_findings_per_request"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerRequest(original["maxFindingsPerRequest"], d, config) + transformed["max_findings_per_info_type"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType(original["maxFindingsPerInfoType"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerItem(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "info_type": flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(original["infoType"], d, config), + "max_findings": flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(original["maxFindings"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "info_types": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypes(original["infoTypes"], d, config), + "rules": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRules(original["rules"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "hotword_rule": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule(original["hotwordRule"], d, config), + "exclusion_rule": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule(original["exclusionRule"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hotword_regex"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex(original["hotwordRegex"], d, config) + transformed["proximity"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity(original["proximity"], d, config) + transformed["likelihood_adjustment"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(original["likelihoodAdjustment"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["window_before"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(original["windowBefore"], d, config) + transformed["window_after"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(original["windowAfter"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["fixed_likelihood"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(original["fixedLikelihood"], d, config) + transformed["relative_likelihood"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(original["relativeLikelihood"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["matching_type"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingType(original["matchingType"], d, config) + transformed["dictionary"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary(original["dictionary"], d, config) + transformed["regex"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex(original["regex"], d, config) + transformed["exclude_info_types"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(original["excludeInfoTypes"], d, config) + transformed["exclude_by_hotword"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotword(original["excludeByHotword"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["word_list"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(original["wordList"], d, config) + transformed["cloud_storage_path"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["words"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(original["words"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["info_types"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(original["infoTypes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(original["name"], d, config), + "version": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesVersion(original["version"], d, config), + "sensitivity_score": flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hotword_regex"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegex(original["hotwordRegex"], d, config) + transformed["proximity"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximity(original["proximity"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["window_before"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowBefore(original["windowBefore"], d, config) + transformed["window_after"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowAfter(original["windowAfter"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowBefore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowAfter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "info_type": flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType(original["infoType"], d, config), + "likelihood": flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihood(original["likelihood"], d, config), + "exclusion_type": flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionType(original["exclusionType"], d, config), + "sensitivity_score": flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSensitivityScore(original["sensitivityScore"], d, config), + "regex": flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegex(original["regex"], d, config), + "dictionary": flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary(original["dictionary"], d, config), + "stored_type": flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType(original["storedType"], d, config), + "surrogate_type": flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType(original["surrogateType"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeName(original["name"], d, config) + transformed["version"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeVersion(original["version"], d, config) + transformed["sensitivity_score"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeSensitivityScore(original["sensitivityScore"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihood(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSensitivityScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["score"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSensitivityScoreScore(original["score"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSensitivityScoreScore(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["word_list"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList(original["wordList"], d, config) + transformed["cloud_storage_path"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["words"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordListWords(original["words"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordListWords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeName(original["name"], d, config) + transformed["create_time"] = + flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeCreateTime(original["createTime"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["timespan_config"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(original["timespanConfig"], d, config) + transformed["datastore_options"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(original["datastoreOptions"], d, config) + transformed["cloud_storage_options"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(original["cloudStorageOptions"], d, config) + transformed["big_query_options"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(original["bigQueryOptions"], d, config) + transformed["hybrid_options"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptions(original["hybridOptions"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_time"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(original["startTime"], d, config) + transformed["end_time"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(original["endTime"], d, config) + transformed["enable_auto_population_of_timespan_config"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(original["enableAutoPopulationOfTimespanConfig"], d, config) + transformed["timestamp_field"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(original["timestampField"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["partition_id"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(original["partitionId"], d, config) + transformed["kind"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(original["kind"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project_id"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(original["projectId"], d, config) + transformed["namespace_id"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(original["namespaceId"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["file_set"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(original["fileSet"], d, config) + transformed["bytes_limit_per_file"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(original["bytesLimitPerFile"], d, config) + transformed["bytes_limit_per_file_percent"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(original["bytesLimitPerFilePercent"], d, config) + transformed["files_limit_percent"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(original["filesLimitPercent"], d, config) + transformed["file_types"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(original["fileTypes"], d, config) + transformed["sample_method"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(original["sampleMethod"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["url"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(original["url"], d, config) + transformed["regex_file_set"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(original["regexFileSet"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket_name"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(original["bucketName"], d, config) + transformed["include_regex"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(original["includeRegex"], d, config) + transformed["exclude_regex"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(original["excludeRegex"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["table_reference"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(original["tableReference"], d, config) + transformed["rows_limit"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(original["rowsLimit"], d, config) + transformed["rows_limit_percent"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(original["rowsLimitPercent"], d, config) + transformed["sample_method"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(original["sampleMethod"], d, config) + transformed["identifying_fields"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(original["identifyingFields"], d, config) + transformed["included_fields"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields(original["includedFields"], d, config) + transformed["excluded_fields"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields(original["excludedFields"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project_id"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(original["projectId"], d, config) + transformed["dataset_id"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(original["datasetId"], d, config) + transformed["table_id"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(original["tableId"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsName(original["name"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFieldsName(original["name"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFieldsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFieldsName(original["name"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFieldsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["description"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsDescription(original["description"], d, config) + transformed["required_finding_label_keys"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsRequiredFindingLabelKeys(original["requiredFindingLabelKeys"], d, config) + transformed["table_options"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptions(original["tableOptions"], d, config) + transformed["labels"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsLabels(original["labels"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsRequiredFindingLabelKeys(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identifying_fields"] = + flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields(original["identifyingFields"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFieldsName(original["name"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFieldsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "save_findings": flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindings(original["saveFindings"], d, config), + "pub_sub": flattenDataLossPreventionJobTriggerInspectJobActionsPubSub(original["pubSub"], d, config), + "publish_summary_to_cscc": flattenDataLossPreventionJobTriggerInspectJobActionsPublishSummaryToCscc(original["publishSummaryToCscc"], d, config), + "publish_findings_to_cloud_data_catalog": flattenDataLossPreventionJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(original["publishFindingsToCloudDataCatalog"], d, config), + "job_notification_emails": flattenDataLossPreventionJobTriggerInspectJobActionsJobNotificationEmails(original["jobNotificationEmails"], d, config), + "deidentify": flattenDataLossPreventionJobTriggerInspectJobActionsDeidentify(original["deidentify"], d, config), + "publish_to_stackdriver": flattenDataLossPreventionJobTriggerInspectJobActionsPublishToStackdriver(original["publishToStackdriver"], d, config), + }) + } + return transformed +} +func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["output_config"] = + flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(original["outputConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["table"] = + flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(original["table"], d, config) + transformed["output_schema"] = + flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(original["outputSchema"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project_id"] = + flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(original["projectId"], d, config) + transformed["dataset_id"] = + flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(original["datasetId"], d, config) + transformed["table_id"] = + flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(original["tableId"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsPubSub(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["topic"] = + flattenDataLossPreventionJobTriggerInspectJobActionsPubSubTopic(original["topic"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobActionsPubSubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsPublishSummaryToCscc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsJobNotificationEmails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentify(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cloud_storage_output"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyCloudStorageOutput(original["cloudStorageOutput"], d, config) + transformed["file_types_to_transform"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyFileTypesToTransform(original["fileTypesToTransform"], d, config) + transformed["transformation_config"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfig(original["transformationConfig"], d, config) + transformed["transformation_details_storage_config"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfig(original["transformationDetailsStorageConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyCloudStorageOutput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyFileTypesToTransform(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["deidentify_template"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigDeidentifyTemplate(original["deidentifyTemplate"], d, config) + transformed["structured_deidentify_template"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigStructuredDeidentifyTemplate(original["structuredDeidentifyTemplate"], d, config) + transformed["image_redact_template"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigImageRedactTemplate(original["imageRedactTemplate"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigDeidentifyTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigStructuredDeidentifyTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigImageRedactTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["table"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTable(original["table"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableDatasetId(original["datasetId"], d, config) + transformed["project_id"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableProjectId(original["projectId"], d, config) + transformed["table_id"] = + flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableTableId(original["tableId"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableTableId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionJobTriggerInspectJobActionsPublishToStackdriver(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func expandDataLossPreventionJobTriggerDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerTriggers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchedule, err := expandDataLossPreventionJobTriggerTriggersSchedule(original["schedule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schedule"] = transformedSchedule + } + + transformedManual, err := expandDataLossPreventionJobTriggerTriggersManual(original["manual"], d, config) + if err != nil { + return nil, err + } else { + transformed["manual"] = transformedManual + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerTriggersSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRecurrencePeriodDuration, err := expandDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(original["recurrence_period_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecurrencePeriodDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recurrencePeriodDuration"] = transformedRecurrencePeriodDuration + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerTriggersManual(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJob(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInspectTemplateName, err := expandDataLossPreventionJobTriggerInspectJobInspectTemplateName(original["inspect_template_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInspectTemplateName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inspectTemplateName"] = transformedInspectTemplateName + } + + transformedInspectConfig, err := expandDataLossPreventionJobTriggerInspectJobInspectConfig(original["inspect_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInspectConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inspectConfig"] = transformedInspectConfig + } + + transformedStorageConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfig(original["storage_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStorageConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storageConfig"] = transformedStorageConfig + } + + transformedActions, err := expandDataLossPreventionJobTriggerInspectJobActions(original["actions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedActions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["actions"] = transformedActions + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectTemplateName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExcludeInfoTypes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigExcludeInfoTypes(original["exclude_info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeInfoTypes"] = transformedExcludeInfoTypes + } + + transformedIncludeQuote, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigIncludeQuote(original["include_quote"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeQuote); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeQuote"] = transformedIncludeQuote + } + + transformedMinLikelihood, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigMinLikelihood(original["min_likelihood"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinLikelihood); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minLikelihood"] = transformedMinLikelihood + } + + transformedLimits, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimits(original["limits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["limits"] = transformedLimits + } + + transformedInfoTypes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + transformedRuleSet, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSet(original["rule_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRuleSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ruleSet"] = transformedRuleSet + } + + transformedCustomInfoTypes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypes(original["custom_info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customInfoTypes"] = transformedCustomInfoTypes + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigExcludeInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigIncludeQuote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigMinLikelihood(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaxFindingsPerItem, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerItem(original["max_findings_per_item"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxFindingsPerItem); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxFindingsPerItem"] = transformedMaxFindingsPerItem + } + + transformedMaxFindingsPerRequest, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerRequest(original["max_findings_per_request"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxFindingsPerRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxFindingsPerRequest"] = transformedMaxFindingsPerRequest + } + + transformedMaxFindingsPerInfoType, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType(original["max_findings_per_info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxFindingsPerInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxFindingsPerInfoType"] = transformedMaxFindingsPerInfoType + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerItem(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoType, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(original["info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoType"] = transformedInfoType + } + + transformedMaxFindings, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(original["max_findings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxFindings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxFindings"] = transformedMaxFindings + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoTypes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + transformedRules, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRules(original["rules"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRules); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rules"] = transformedRules + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHotwordRule, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule(original["hotword_rule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHotwordRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hotwordRule"] = transformedHotwordRule + } + + transformedExclusionRule, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule(original["exclusion_rule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExclusionRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["exclusionRule"] = transformedExclusionRule + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHotwordRegex, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex(original["hotword_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHotwordRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hotwordRegex"] = transformedHotwordRegex + } + + transformedProximity, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity(original["proximity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProximity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["proximity"] = transformedProximity + } + + transformedLikelihoodAdjustment, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(original["likelihood_adjustment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLikelihoodAdjustment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["likelihoodAdjustment"] = transformedLikelihoodAdjustment + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWindowBefore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(original["window_before"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWindowBefore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["windowBefore"] = transformedWindowBefore + } + + transformedWindowAfter, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(original["window_after"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWindowAfter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["windowAfter"] = transformedWindowAfter + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFixedLikelihood, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(original["fixed_likelihood"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFixedLikelihood); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fixedLikelihood"] = transformedFixedLikelihood + } + + transformedRelativeLikelihood, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(original["relative_likelihood"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRelativeLikelihood); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["relativeLikelihood"] = transformedRelativeLikelihood + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMatchingType, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingType(original["matching_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMatchingType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["matchingType"] = transformedMatchingType + } + + transformedDictionary, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary(original["dictionary"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDictionary); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dictionary"] = transformedDictionary + } + + transformedRegex, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex(original["regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["regex"] = transformedRegex + } + + transformedExcludeInfoTypes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(original["exclude_info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeInfoTypes"] = transformedExcludeInfoTypes + } + + transformedExcludeByHotword, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotword(original["exclude_by_hotword"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeByHotword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeByHotword"] = transformedExcludeByHotword + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWordList, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(original["word_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wordList"] = transformedWordList + } + + transformedCloudStoragePath, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStoragePath"] = transformedCloudStoragePath + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWords, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(original["words"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["words"] = transformedWords + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoTypes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(original["info_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoTypes"] = transformedInfoTypes + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHotwordRegex, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegex(original["hotword_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHotwordRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hotwordRegex"] = transformedHotwordRegex + } + + transformedProximity, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximity(original["proximity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProximity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["proximity"] = transformedProximity + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordHotwordRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWindowBefore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowBefore(original["window_before"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWindowBefore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["windowBefore"] = transformedWindowBefore + } + + transformedWindowAfter, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowAfter(original["window_after"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWindowAfter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["windowAfter"] = transformedWindowAfter + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowBefore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeByHotwordProximityWindowAfter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInfoType, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType(original["info_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInfoType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["infoType"] = transformedInfoType + } + + transformedLikelihood, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihood(original["likelihood"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLikelihood); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["likelihood"] = transformedLikelihood + } + + transformedExclusionType, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionType(original["exclusion_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExclusionType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["exclusionType"] = transformedExclusionType + } + + transformedSensitivityScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + transformedRegex, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegex(original["regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["regex"] = transformedRegex + } + + transformedDictionary, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary(original["dictionary"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDictionary); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dictionary"] = transformedDictionary + } + + transformedStoredType, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType(original["stored_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStoredType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["storedType"] = transformedStoredType + } + + transformedSurrogateType, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType(original["surrogate_type"], d, config) + if err != nil { + return nil, err + } else { + transformed["surrogateType"] = transformedSurrogateType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedSensitivityScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeSensitivityScore(original["sensitivity_score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSensitivityScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sensitivityScore"] = transformedSensitivityScore + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihood(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSensitivityScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScore, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSensitivityScoreScore(original["score"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScore); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["score"] = transformedScore + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSensitivityScoreScore(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWordList, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList(original["word_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wordList"] = transformedWordList + } + + transformedCloudStoragePath, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStoragePath"] = transformedCloudStoragePath + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWords, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordListWords(original["words"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["words"] = transformedWords + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordListWords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedCreateTime, err := expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeCreateTime(original["create_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCreateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["createTime"] = transformedCreateTime + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeCreateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTimespanConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(original["timespan_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimespanConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timespanConfig"] = transformedTimespanConfig + } + + transformedDatastoreOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(original["datastore_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatastoreOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datastoreOptions"] = transformedDatastoreOptions + } + + transformedCloudStorageOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(original["cloud_storage_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStorageOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStorageOptions"] = transformedCloudStorageOptions + } + + transformedBigQueryOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(original["big_query_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBigQueryOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bigQueryOptions"] = transformedBigQueryOptions + } + + transformedHybridOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptions(original["hybrid_options"], d, config) + if err != nil { + return nil, err + } else { + transformed["hybridOptions"] = transformedHybridOptions + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStartTime, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(original["start_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTime"] = transformedStartTime + } + + transformedEndTime, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(original["end_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endTime"] = transformedEndTime + } + + transformedEnableAutoPopulationOfTimespanConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(original["enable_auto_population_of_timespan_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableAutoPopulationOfTimespanConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableAutoPopulationOfTimespanConfig"] = transformedEnableAutoPopulationOfTimespanConfig + } + + transformedTimestampField, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(original["timestamp_field"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimestampField); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["timestampField"] = transformedTimestampField + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPartitionId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(original["partition_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPartitionId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["partitionId"] = transformedPartitionId + } + + transformedKind, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(original["kind"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKind); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kind"] = transformedKind + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedNamespaceId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(original["namespace_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespaceId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespaceId"] = transformedNamespaceId + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFileSet, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(original["file_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileSet"] = transformedFileSet + } + + transformedBytesLimitPerFile, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(original["bytes_limit_per_file"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBytesLimitPerFile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bytesLimitPerFile"] = transformedBytesLimitPerFile + } + + transformedBytesLimitPerFilePercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(original["bytes_limit_per_file_percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBytesLimitPerFilePercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bytesLimitPerFilePercent"] = transformedBytesLimitPerFilePercent + } + + transformedFilesLimitPercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(original["files_limit_percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilesLimitPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filesLimitPercent"] = transformedFilesLimitPercent + } + + transformedFileTypes, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(original["file_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileTypes"] = transformedFileTypes + } + + transformedSampleMethod, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(original["sample_method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSampleMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sampleMethod"] = transformedSampleMethod + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrl, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + transformedRegexFileSet, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(original["regex_file_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRegexFileSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["regexFileSet"] = transformedRegexFileSet + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucketName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(original["bucket_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucketName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucketName"] = transformedBucketName + } + + transformedIncludeRegex, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(original["include_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeRegex"] = transformedIncludeRegex + } + + transformedExcludeRegex, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(original["exclude_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludeRegex"] = transformedExcludeRegex + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTableReference, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(original["table_reference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableReference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableReference"] = transformedTableReference + } + + transformedRowsLimit, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(original["rows_limit"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRowsLimit); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rowsLimit"] = transformedRowsLimit + } + + transformedRowsLimitPercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(original["rows_limit_percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRowsLimitPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rowsLimitPercent"] = transformedRowsLimitPercent + } + + transformedSampleMethod, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(original["sample_method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSampleMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sampleMethod"] = transformedSampleMethod + } + + transformedIdentifyingFields, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(original["identifying_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentifyingFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identifyingFields"] = transformedIdentifyingFields + } + + transformedIncludedFields, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields(original["included_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludedFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includedFields"] = transformedIncludedFields + } + + transformedExcludedFields, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields(original["excluded_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludedFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["excludedFields"] = transformedExcludedFields + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedDatasetId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedTableId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(original["table_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableId"] = transformedTableId + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFieldsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFieldsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFieldsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFieldsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDescription, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedRequiredFindingLabelKeys, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsRequiredFindingLabelKeys(original["required_finding_label_keys"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequiredFindingLabelKeys); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requiredFindingLabelKeys"] = transformedRequiredFindingLabelKeys + } + + transformedTableOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptions(original["table_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableOptions"] = transformedTableOptions + } + + transformedLabels, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsRequiredFindingLabelKeys(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdentifyingFields, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields(original["identifying_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdentifyingFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["identifyingFields"] = transformedIdentifyingFields + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFieldsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFieldsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobStorageConfigHybridOptionsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSaveFindings, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindings(original["save_findings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSaveFindings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["saveFindings"] = transformedSaveFindings + } + + transformedPubSub, err := expandDataLossPreventionJobTriggerInspectJobActionsPubSub(original["pub_sub"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubSub); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubSub"] = transformedPubSub + } + + transformedPublishSummaryToCscc, err := expandDataLossPreventionJobTriggerInspectJobActionsPublishSummaryToCscc(original["publish_summary_to_cscc"], d, config) + if err != nil { + return nil, err + } else { + transformed["publishSummaryToCscc"] = transformedPublishSummaryToCscc + } + + transformedPublishFindingsToCloudDataCatalog, err := expandDataLossPreventionJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(original["publish_findings_to_cloud_data_catalog"], d, config) + if err != nil { + return nil, err + } else { + transformed["publishFindingsToCloudDataCatalog"] = transformedPublishFindingsToCloudDataCatalog + } + + transformedJobNotificationEmails, err := expandDataLossPreventionJobTriggerInspectJobActionsJobNotificationEmails(original["job_notification_emails"], d, config) + if err != nil { + return nil, err + } else { + transformed["jobNotificationEmails"] = transformedJobNotificationEmails + } + + transformedDeidentify, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentify(original["deidentify"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDeidentify); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["deidentify"] = transformedDeidentify + } + + transformedPublishToStackdriver, err := expandDataLossPreventionJobTriggerInspectJobActionsPublishToStackdriver(original["publish_to_stackdriver"], d, config) + if err != nil { + return nil, err + } else { + transformed["publishToStackdriver"] = transformedPublishToStackdriver + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOutputConfig, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(original["output_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOutputConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["outputConfig"] = transformedOutputConfig + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedOutputSchema, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(original["output_schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOutputSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["outputSchema"] = transformedOutputSchema + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedDatasetId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedTableId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(original["table_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableId"] = transformedTableId + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsPubSub(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTopic, err := expandDataLossPreventionJobTriggerInspectJobActionsPubSubTopic(original["topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["topic"] = transformedTopic + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsPubSubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsPublishSummaryToCscc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsJobNotificationEmails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentify(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCloudStorageOutput, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyCloudStorageOutput(original["cloud_storage_output"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStorageOutput); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStorageOutput"] = transformedCloudStorageOutput + } + + transformedFileTypesToTransform, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyFileTypesToTransform(original["file_types_to_transform"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileTypesToTransform); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fileTypesToTransform"] = transformedFileTypesToTransform + } + + transformedTransformationConfig, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfig(original["transformation_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransformationConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transformationConfig"] = transformedTransformationConfig + } + + transformedTransformationDetailsStorageConfig, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfig(original["transformation_details_storage_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTransformationDetailsStorageConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["transformationDetailsStorageConfig"] = transformedTransformationDetailsStorageConfig + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyCloudStorageOutput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyFileTypesToTransform(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDeidentifyTemplate, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigDeidentifyTemplate(original["deidentify_template"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDeidentifyTemplate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["deidentifyTemplate"] = transformedDeidentifyTemplate + } + + transformedStructuredDeidentifyTemplate, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigStructuredDeidentifyTemplate(original["structured_deidentify_template"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStructuredDeidentifyTemplate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["structuredDeidentifyTemplate"] = transformedStructuredDeidentifyTemplate + } + + transformedImageRedactTemplate, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigImageRedactTemplate(original["image_redact_template"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageRedactTemplate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageRedactTemplate"] = transformedImageRedactTemplate + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigDeidentifyTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigStructuredDeidentifyTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationConfigImageRedactTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["table"] = transformedTable + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatasetId, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedTableId, err := expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableTableId(original["table_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableId"] = transformedTableId + } + + return transformed, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsDeidentifyTransformationDetailsStorageConfigTableTableId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionJobTriggerInspectJobActionsPublishToStackdriver(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func resourceDataLossPreventionJobTriggerEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + + newObj := make(map[string]interface{}) + newObj["jobTrigger"] = obj + triggerIdProp, ok := d.GetOk("trigger_id") + if ok && triggerIdProp != nil { + newObj["triggerId"] = triggerIdProp + } + return newObj, nil +} + +func resourceDataLossPreventionJobTriggerUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["jobTrigger"] = obj + return newObj, nil +} + +func resourceDataLossPreventionJobTriggerDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + if err := d.Set("trigger_id", flattenDataLossPreventionJobTriggerName(res["name"], d, config)); err != nil { + return nil, fmt.Errorf("Error reading JobTrigger: %s", err) + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_stored_info_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_stored_info_type.go new file mode 100644 index 0000000000..06d886e214 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datalossprevention/resource_data_loss_prevention_stored_info_type.go @@ -0,0 +1,1191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datalossprevention + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// This customizeDiff allows updating the dictionary, regex, and large_custom_dictionary fields, but +// it recreates the resource if changing between these fields. e.g., updating the regex field should +// be allowed, while changing from regex to dictionary should trigger the recreation of the resource. +func storedInfoTypeCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { + oldDict, newDict := diff.GetChange("dictionary") + oldRegex, newRegex := diff.GetChange("regex") + oldLargeCD, newLargeCD := diff.GetChange("large_custom_dictionary") + if !tpgresource.IsEmptyValue(reflect.ValueOf(oldDict)) && tpgresource.IsEmptyValue(reflect.ValueOf(newDict)) { + diff.ForceNew("dictionary") + return nil + } + if !tpgresource.IsEmptyValue(reflect.ValueOf(oldRegex)) && tpgresource.IsEmptyValue(reflect.ValueOf(newRegex)) { + diff.ForceNew("regex") + return nil + } + if !tpgresource.IsEmptyValue(reflect.ValueOf(oldLargeCD)) && tpgresource.IsEmptyValue(reflect.ValueOf(newLargeCD)) { + diff.ForceNew("large_custom_dictionary") + return nil + } + return nil +} + +func storedInfoTypeCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + return storedInfoTypeCustomizeDiffFunc(diff) +} + +func ResourceDataLossPreventionStoredInfoType() *schema.Resource { + return &schema.Resource{ + Create: resourceDataLossPreventionStoredInfoTypeCreate, + Read: resourceDataLossPreventionStoredInfoTypeRead, + Update: resourceDataLossPreventionStoredInfoTypeUpdate, + Delete: resourceDataLossPreventionStoredInfoTypeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataLossPreventionStoredInfoTypeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + storedInfoTypeCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The parent of the info type in any of the following formats: + +* 'projects/{{project}}' +* 'projects/{{project}}/locations/{{location}}' +* 'organizations/{{organization_id}}' +* 'organizations/{{organization_id}}/locations/{{location}}'`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the info type.`, + }, + "dictionary": { + Type: schema.TypeList, + Optional: true, + Description: `Dictionary which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_storage_path": { + Type: schema.TypeList, + Optional: true, + Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, + }, + }, + }, + ExactlyOneOf: []string{"dictionary.0.word_list", "dictionary.0.cloud_storage_path"}, + }, + "word_list": { + Type: schema.TypeList, + Optional: true, + Description: `List of words or phrases to search for.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "words": { + Type: schema.TypeList, + Required: true, + Description: `Words or phrases defining the dictionary. The dictionary must contain at least one +phrase and every phrase must contain at least 2 characters that are letters or digits.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ExactlyOneOf: []string{"dictionary.0.word_list", "dictionary.0.cloud_storage_path"}, + }, + }, + }, + ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User set display name of the info type.`, + }, + "large_custom_dictionary": { + Type: schema.TypeList, + Optional: true, + Description: `Dictionary which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "output_path": { + Type: schema.TypeList, + Required: true, + Description: `Location to store dictionary artifacts in Google Cloud Storage. These files will only be accessible by project owners and the DLP API. +If any of these artifacts are modified, the dictionary is considered invalid and can no longer be used.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, + }, + }, + }, + }, + "big_query_field": { + Type: schema.TypeList, + Optional: true, + Description: `Field in a BigQuery table where each cell represents a dictionary phrase.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeList, + Required: true, + Description: `Designated field in the BigQuery table.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name describing the field.`, + }, + }, + }, + }, + "table": { + Type: schema.TypeList, + Required: true, + Description: `Field in a BigQuery table where each cell represents a dictionary phrase.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The dataset ID of the table.`, + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The Google Cloud Platform project ID of the project containing the table.`, + }, + "table_id": { + Type: schema.TypeString, + Required: true, + Description: `The name of the table.`, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"large_custom_dictionary.0.cloud_storage_file_set", "large_custom_dictionary.0.big_query_field"}, + }, + "cloud_storage_file_set": { + Type: schema.TypeList, + Optional: true, + Description: `Set of files containing newline-delimited lists of dictionary phrases.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: `The url, in the format 'gs:///'. Trailing wildcard in the path is allowed.`, + }, + }, + }, + ExactlyOneOf: []string{"large_custom_dictionary.0.cloud_storage_file_set", "large_custom_dictionary.0.big_query_field"}, + }, + }, + }, + ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, + }, + "regex": { + Type: schema.TypeList, + Optional: true, + Description: `Regular expression which defines the rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pattern": { + Type: schema.TypeString, + Required: true, + Description: `Pattern defining the regular expression. +Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, + }, + "group_indexes": { + Type: schema.TypeList, + Optional: true, + Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, + }, + "stored_info_type_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The storedInfoType ID can contain uppercase and lowercase letters, numbers, and hyphens; +that is, it must match the regular expression: [a-zA-Z\d-_]+. The maximum length is 100 +characters. Can be empty to allow the system to generate one.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the info type. Set by the server.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataLossPreventionStoredInfoTypeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataLossPreventionStoredInfoTypeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataLossPreventionStoredInfoTypeDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + regexProp, err := expandDataLossPreventionStoredInfoTypeRegex(d.Get("regex"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("regex"); !tpgresource.IsEmptyValue(reflect.ValueOf(regexProp)) && (ok || !reflect.DeepEqual(v, regexProp)) { + obj["regex"] = regexProp + } + dictionaryProp, err := expandDataLossPreventionStoredInfoTypeDictionary(d.Get("dictionary"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dictionary"); !tpgresource.IsEmptyValue(reflect.ValueOf(dictionaryProp)) && (ok || !reflect.DeepEqual(v, dictionaryProp)) { + obj["dictionary"] = dictionaryProp + } + largeCustomDictionaryProp, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionary(d.Get("large_custom_dictionary"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("large_custom_dictionary"); !tpgresource.IsEmptyValue(reflect.ValueOf(largeCustomDictionaryProp)) && (ok || !reflect.DeepEqual(v, largeCustomDictionaryProp)) { + obj["largeCustomDictionary"] = largeCustomDictionaryProp + } + + obj, err = resourceDataLossPreventionStoredInfoTypeEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new StoredInfoType: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating StoredInfoType: %s", err) + } + if err := d.Set("name", flattenDataLossPreventionStoredInfoTypeName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/storedInfoTypes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = transport_tpg.PollingWaitTime(resourceDataLossPreventionStoredInfoTypePollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating StoredInfoType", d.Timeout(schema.TimeoutCreate), 1) + if err != nil { + return fmt.Errorf("Error waiting to create StoredInfoType: %s", err) + } + + log.Printf("[DEBUG] Finished creating StoredInfoType %q: %#v", d.Id(), res) + + return resourceDataLossPreventionStoredInfoTypeRead(d, meta) +} + +func resourceDataLossPreventionStoredInfoTypePollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") + if err != nil { + return nil, err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + res, err = resourceDataLossPreventionStoredInfoTypeDecoder(d, meta, res) + if err != nil { + return nil, err + } + if res == nil { + return nil, tpgresource.Fake404("decoded", "DataLossPreventionStoredInfoType") + } + + return res, nil + } +} + +func resourceDataLossPreventionStoredInfoTypeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataLossPreventionStoredInfoType %q", d.Id())) + } + + res, err = resourceDataLossPreventionStoredInfoTypeDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing DataLossPreventionStoredInfoType because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenDataLossPreventionStoredInfoTypeName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading StoredInfoType: %s", err) + } + if err := d.Set("description", flattenDataLossPreventionStoredInfoTypeDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading StoredInfoType: %s", err) + } + if err := d.Set("display_name", flattenDataLossPreventionStoredInfoTypeDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading StoredInfoType: %s", err) + } + if err := d.Set("regex", flattenDataLossPreventionStoredInfoTypeRegex(res["regex"], d, config)); err != nil { + return fmt.Errorf("Error reading StoredInfoType: %s", err) + } + if err := d.Set("dictionary", flattenDataLossPreventionStoredInfoTypeDictionary(res["dictionary"], d, config)); err != nil { + return fmt.Errorf("Error reading StoredInfoType: %s", err) + } + if err := d.Set("large_custom_dictionary", flattenDataLossPreventionStoredInfoTypeLargeCustomDictionary(res["largeCustomDictionary"], d, config)); err != nil { + return fmt.Errorf("Error reading StoredInfoType: %s", err) + } + + return nil +} + +func resourceDataLossPreventionStoredInfoTypeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataLossPreventionStoredInfoTypeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataLossPreventionStoredInfoTypeDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + regexProp, err := expandDataLossPreventionStoredInfoTypeRegex(d.Get("regex"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("regex"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, regexProp)) { + obj["regex"] = regexProp + } + dictionaryProp, err := expandDataLossPreventionStoredInfoTypeDictionary(d.Get("dictionary"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dictionary"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dictionaryProp)) { + obj["dictionary"] = dictionaryProp + } + largeCustomDictionaryProp, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionary(d.Get("large_custom_dictionary"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("large_custom_dictionary"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, largeCustomDictionaryProp)) { + obj["largeCustomDictionary"] = largeCustomDictionaryProp + } + + obj, err = resourceDataLossPreventionStoredInfoTypeUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating StoredInfoType %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("regex") { + updateMask = append(updateMask, "regex") + } + + if d.HasChange("dictionary") { + updateMask = append(updateMask, "dictionary") + } + + if d.HasChange("large_custom_dictionary") { + updateMask = append(updateMask, "largeCustomDictionary") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating StoredInfoType %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating StoredInfoType %q: %#v", d.Id(), res) + } + + return resourceDataLossPreventionStoredInfoTypeRead(d, meta) +} + +func resourceDataLossPreventionStoredInfoTypeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting StoredInfoType %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "StoredInfoType") + } + + log.Printf("[DEBUG] Finished deleting StoredInfoType %q: %#v", d.Id(), res) + return nil +} + +func resourceDataLossPreventionStoredInfoTypeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // Custom import to handle parent possibilities + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + parts := strings.Split(d.Get("name").(string), "/") + if len(parts) == 6 { + if err := d.Set("name", parts[5]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(parts) == 4 { + if err := d.Set("name", parts[3]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else { + return nil, fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/storedInfoType/{{name}}", d.Get("name").(string)) + } + // Remove "/storedInfoType/{{name}}" from the id + parts = parts[:len(parts)-2] + if err := d.Set("parent", strings.Join(parts, "/")); err != nil { + return nil, fmt.Errorf("Error setting parent: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/storedInfoTypes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataLossPreventionStoredInfoTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDataLossPreventionStoredInfoTypeDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pattern"] = + flattenDataLossPreventionStoredInfoTypeRegexPattern(original["pattern"], d, config) + transformed["group_indexes"] = + flattenDataLossPreventionStoredInfoTypeRegexGroupIndexes(original["groupIndexes"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeRegexPattern(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeRegexGroupIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeDictionary(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["word_list"] = + flattenDataLossPreventionStoredInfoTypeDictionaryWordList(original["wordList"], d, config) + transformed["cloud_storage_path"] = + flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeDictionaryWordList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["words"] = + flattenDataLossPreventionStoredInfoTypeDictionaryWordListWords(original["words"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeDictionaryWordListWords(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionary(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["output_path"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(original["outputPath"], d, config) + transformed["cloud_storage_file_set"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(original["cloudStorageFileSet"], d, config) + transformed["big_query_field"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(original["bigQueryField"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(original["path"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["url"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(original["url"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["table"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(original["table"], d, config) + transformed["field"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(original["field"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project_id"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(original["projectId"], d, config) + transformed["dataset_id"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(original["datasetId"], d, config) + transformed["table_id"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(original["tableId"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataLossPreventionStoredInfoTypeDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPattern, err := expandDataLossPreventionStoredInfoTypeRegexPattern(original["pattern"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPattern); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pattern"] = transformedPattern + } + + transformedGroupIndexes, err := expandDataLossPreventionStoredInfoTypeRegexGroupIndexes(original["group_indexes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupIndexes"] = transformedGroupIndexes + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeRegexPattern(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeRegexGroupIndexes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeDictionary(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWordList, err := expandDataLossPreventionStoredInfoTypeDictionaryWordList(original["word_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWordList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["wordList"] = transformedWordList + } + + transformedCloudStoragePath, err := expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStoragePath"] = transformedCloudStoragePath + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeDictionaryWordList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedWords, err := expandDataLossPreventionStoredInfoTypeDictionaryWordListWords(original["words"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWords); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["words"] = transformedWords + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeDictionaryWordListWords(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionary(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOutputPath, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(original["output_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOutputPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["outputPath"] = transformedOutputPath + } + + transformedCloudStorageFileSet, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(original["cloud_storage_file_set"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudStorageFileSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudStorageFileSet"] = transformedCloudStorageFileSet + } + + transformedBigQueryField, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(original["big_query_field"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBigQueryField); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bigQueryField"] = transformedBigQueryField + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrl, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedField, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(original["field"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedField); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["field"] = transformedField + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProjectId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + transformedDatasetId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(original["dataset_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId + } + + transformedTableId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(original["table_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableId"] = transformedTableId + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceDataLossPreventionStoredInfoTypeEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["config"] = obj + storedInfoTypeIdProp, ok := d.GetOk("stored_info_type_id") + if ok && storedInfoTypeIdProp != nil { + newObj["storedInfoTypeId"] = storedInfoTypeIdProp + } + return newObj, nil +} + +func resourceDataLossPreventionStoredInfoTypeUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["config"] = obj + return newObj, nil +} + +func resourceDataLossPreventionStoredInfoTypeDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Stored info types come back nested with previous versions. We only want the current + // version in the unwrapped form + name := res["name"].(string) + v, ok := res["currentVersion"] + if !ok || v == nil { + return nil, nil + } + + current := v.(map[string]interface{}) + configRaw, ok := current["config"] + if !ok || configRaw == nil { + return nil, nil + } + + config := configRaw.(map[string]interface{}) + // Name comes back on the top level, so set here + config["name"] = name + + configMeta := meta.(*transport_tpg.Config) + if err := d.Set("stored_info_type_id", flattenDataLossPreventionStoredInfoTypeName(res["name"], d, configMeta)); err != nil { + return nil, fmt.Errorf("Error reading StoredInfoType: %s", err) + } + + return config, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/dataplex_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/dataplex_operation.go new file mode 100644 index 0000000000..5742359ba9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/dataplex_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataplex + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type DataplexOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *DataplexOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.DataplexBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createDataplexWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DataplexOperationWaiter, error) { + w := &DataplexOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func DataplexOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createDataplexWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_asset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_asset.go new file mode 100644 index 0000000000..859eb9dc57 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_asset.go @@ -0,0 +1,275 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataplex + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataplexAssetIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "dataplex_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "asset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataplexAssetIamUpdater struct { + project string + location string + lake string + dataplexZone string + asset string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataplexAssetIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("lake"); ok { + values["lake"] = v.(string) + } + + if v, ok := d.GetOk("dataplex_zone"); ok { + values["dataplex_zone"] = v.(string) + } + + if v, ok := d.GetOk("asset"); ok { + values["asset"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("asset").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataplexAssetIamUpdater{ + project: values["project"], + location: values["location"], + lake: values["lake"], + dataplexZone: values["dataplex_zone"], + asset: values["asset"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("lake", u.lake); err != nil { + return nil, fmt.Errorf("Error setting lake: %s", err) + } + if err := d.Set("dataplex_zone", u.dataplexZone); err != nil { + return nil, fmt.Errorf("Error setting dataplex_zone: %s", err) + } + if err := d.Set("asset", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting asset: %s", err) + } + + return u, nil +} + +func DataplexAssetIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataplexAssetIamUpdater{ + project: values["project"], + location: values["location"], + lake: values["lake"], + dataplexZone: values["dataplex_zone"], + asset: values["asset"], + d: d, + Config: config, + } + if err := d.Set("asset", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting asset: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataplexAssetIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyAssetUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataplexAssetIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyAssetUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataplexAssetIamUpdater) qualifyAssetUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataplexBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", u.project, u.location, u.lake, u.dataplexZone, u.asset), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataplexAssetIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s/assets/%s", u.project, u.location, u.lake, u.dataplexZone, u.asset) +} + +func (u *DataplexAssetIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataplex-asset-%s", u.GetResourceId()) +} + +func (u *DataplexAssetIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataplex asset %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_datascan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_datascan.go new file mode 100644 index 0000000000..3e57c9914b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_datascan.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataplex + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataplexDatascanIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "data_scan_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataplexDatascanIamUpdater struct { + project string + location string + dataScanId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataplexDatascanIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("data_scan_id"); ok { + values["data_scan_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataScans/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("data_scan_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataplexDatascanIamUpdater{ + project: values["project"], + location: values["location"], + dataScanId: values["data_scan_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("data_scan_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting data_scan_id: %s", err) + } + + return u, nil +} + +func DataplexDatascanIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/dataScans/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataplexDatascanIamUpdater{ + project: values["project"], + location: values["location"], + dataScanId: values["data_scan_id"], + d: d, + Config: config, + } + if err := d.Set("data_scan_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting data_scan_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataplexDatascanIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyDatascanUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataplexDatascanIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyDatascanUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataplexDatascanIamUpdater) qualifyDatascanUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataplexBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/dataScans/%s", u.project, u.location, u.dataScanId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataplexDatascanIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/dataScans/%s", u.project, u.location, u.dataScanId) +} + +func (u *DataplexDatascanIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataplex-datascan-%s", u.GetResourceId()) +} + +func (u *DataplexDatascanIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataplex datascan %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_lake.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_lake.go new file mode 100644 index 0000000000..21c92fd18b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_lake.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataplex + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataplexLakeIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataplexLakeIamUpdater struct { + project string + location string + lake string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataplexLakeIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("lake"); ok { + values["lake"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("lake").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataplexLakeIamUpdater{ + project: values["project"], + location: values["location"], + lake: values["lake"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("lake", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting lake: %s", err) + } + + return u, nil +} + +func DataplexLakeIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataplexLakeIamUpdater{ + project: values["project"], + location: values["location"], + lake: values["lake"], + d: d, + Config: config, + } + if err := d.Set("lake", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting lake: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataplexLakeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyLakeUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataplexLakeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyLakeUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataplexLakeIamUpdater) qualifyLakeUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataplexBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/lakes/%s", u.project, u.location, u.lake), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataplexLakeIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/lakes/%s", u.project, u.location, u.lake) +} + +func (u *DataplexLakeIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataplex-lake-%s", u.GetResourceId()) +} + +func (u *DataplexLakeIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataplex lake %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_zone.go new file mode 100644 index 0000000000..090ce04d5a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/iam_dataplex_zone.go @@ -0,0 +1,260 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataplex + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataplexZoneIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "lake": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "dataplex_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataplexZoneIamUpdater struct { + project string + location string + lake string + dataplexZone string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataplexZoneIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("lake"); ok { + values["lake"] = v.(string) + } + + if v, ok := d.GetOk("dataplex_zone"); ok { + values["dataplex_zone"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("dataplex_zone").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataplexZoneIamUpdater{ + project: values["project"], + location: values["location"], + lake: values["lake"], + dataplexZone: values["dataplex_zone"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("lake", u.lake); err != nil { + return nil, fmt.Errorf("Error setting lake: %s", err) + } + if err := d.Set("dataplex_zone", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting dataplex_zone: %s", err) + } + + return u, nil +} + +func DataplexZoneIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataplexZoneIamUpdater{ + project: values["project"], + location: values["location"], + lake: values["lake"], + dataplexZone: values["dataplex_zone"], + d: d, + Config: config, + } + if err := d.Set("dataplex_zone", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting dataplex_zone: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataplexZoneIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyZoneUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataplexZoneIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyZoneUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataplexZoneIamUpdater) qualifyZoneUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataplexBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s", u.project, u.location, u.lake, u.dataplexZone), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataplexZoneIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/lakes/%s/zones/%s", u.project, u.location, u.lake, u.dataplexZone) +} + +func (u *DataplexZoneIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataplex-zone-%s", u.GetResourceId()) +} + +func (u *DataplexZoneIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataplex zone %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_asset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_asset.go similarity index 89% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_asset.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_asset.go index f930954bc7..78ef7d6a13 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_asset.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_asset.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package dataplex import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceDataplexAsset() *schema.Resource { @@ -113,7 +120,7 @@ func ResourceDataplexAsset() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -405,8 +412,8 @@ func DataplexAssetSecurityStatusSchema() *schema.Resource { } func resourceDataplexAssetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -420,7 +427,7 @@ func resourceDataplexAssetCreate(d *schema.ResourceData, meta interface{}) error ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } @@ -429,18 +436,18 @@ func resourceDataplexAssetCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -462,8 +469,8 @@ func resourceDataplexAssetCreate(d *schema.ResourceData, meta interface{}) error } func resourceDataplexAssetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -477,21 +484,21 @@ func resourceDataplexAssetRead(d *schema.ResourceData, meta interface{}) error { ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -500,7 +507,7 @@ func resourceDataplexAssetRead(d *schema.ResourceData, meta interface{}) error { res, err := client.GetAsset(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("DataplexAsset %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("dataplex_zone", res.DataplexZone); err != nil { @@ -558,8 +565,8 @@ func resourceDataplexAssetRead(d *schema.ResourceData, meta interface{}) error { return nil } func resourceDataplexAssetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -573,22 +580,22 @@ func resourceDataplexAssetUpdate(d *schema.ResourceData, meta interface{}) error ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -610,8 +617,8 @@ func resourceDataplexAssetUpdate(d *schema.ResourceData, meta interface{}) error } func resourceDataplexAssetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -625,22 +632,22 @@ func resourceDataplexAssetDelete(d *schema.ResourceData, meta interface{}) error ResourceSpec: expandDataplexAssetResourceSpec(d.Get("resource_spec")), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } log.Printf("[DEBUG] Deleting Asset %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -655,9 +662,9 @@ func resourceDataplexAssetDelete(d *schema.ResourceData, meta interface{}) error } func resourceDataplexAssetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)/assets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -666,7 +673,7 @@ func resourceDataplexAssetImport(d *schema.ResourceData, meta interface{}) ([]*s } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -687,8 +694,8 @@ func expandDataplexAssetDiscoverySpec(o interface{}) *dataplex.AssetDiscoverySpe return &dataplex.AssetDiscoverySpec{ Enabled: dcl.Bool(obj["enabled"].(bool)), CsvOptions: expandDataplexAssetDiscoverySpecCsvOptions(obj["csv_options"]), - ExcludePatterns: expandStringArray(obj["exclude_patterns"]), - IncludePatterns: expandStringArray(obj["include_patterns"]), + ExcludePatterns: tpgdclresource.ExpandStringArray(obj["exclude_patterns"]), + IncludePatterns: tpgdclresource.ExpandStringArray(obj["include_patterns"]), JsonOptions: expandDataplexAssetDiscoverySpecJsonOptions(obj["json_options"]), Schedule: dcl.String(obj["schedule"].(string)), } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan.go new file mode 100644 index 0000000000..6586b47be0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan.go @@ -0,0 +1,3061 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataplex + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDataplexDatascan() *schema.Resource { + return &schema.Resource{ + Create: resourceDataplexDatascanCreate, + Read: resourceDataplexDatascanRead, + Update: resourceDataplexDatascanUpdate, + Delete: resourceDataplexDatascanDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataplexDatascanImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The data source for DataScan.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "entity": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.`, + ExactlyOneOf: []string{"data.0.entity", "data.0.resource"}, + }, + "resource": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: +(Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.`, + ExactlyOneOf: []string{"data.0.entity", "data.0.resource"}, + }, + }, + }, + }, + "data_scan_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.`, + }, + "execution_spec": { + Type: schema.TypeList, + Required: true, + Description: `DataScan execution settings.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "trigger": { + Type: schema.TypeList, + Required: true, + Description: `Spec related to how often and when a scan should be triggered.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "on_demand": { + Type: schema.TypeList, + Optional: true, + Description: `The scan runs once via dataScans.run API.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"execution_spec.0.trigger.0.on_demand", "execution_spec.0.trigger.0.schedule"}, + }, + "schedule": { + Type: schema.TypeList, + Optional: true, + Description: `The scan is scheduled to run periodically.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cron": { + Type: schema.TypeString, + Required: true, + Description: `Cron schedule for running scans periodically. This field is required for Schedule scans.`, + }, + }, + }, + ExactlyOneOf: []string{"execution_spec.0.trigger.0.on_demand", "execution_spec.0.trigger.0.schedule"}, + }, + }, + }, + }, + "field": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.`, + }, + }, + }, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location where the data scan should reside.`, + }, + "data_profile_spec": { + Type: schema.TypeList, + Optional: true, + Description: `DataProfileScan related setting.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "row_filter": { + Type: schema.TypeString, + Optional: true, + Description: `A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10`, + }, + "sampling_percent": { + Type: schema.TypeFloat, + Optional: true, + Description: `The percentage of the records to be selected from the dataset for DataScan.`, + }, + }, + }, + ExactlyOneOf: []string{"data_quality_spec", "data_profile_spec"}, + }, + "data_quality_spec": { + Type: schema.TypeList, + Optional: true, + Description: `DataQualityScan related setting.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "row_filter": { + Type: schema.TypeString, + Optional: true, + Description: `A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10`, + }, + "rules": { + Type: schema.TypeList, + Optional: true, + Description: `The list of rules to evaluate against a data source. At least one rule is required.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dimension": { + Type: schema.TypeString, + Required: true, + Description: `The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]`, + }, + "column": { + Type: schema.TypeString, + Optional: true, + Description: `The unnested column which this rule is evaluated against.`, + }, + "ignore_null": { + Type: schema.TypeBool, + Optional: true, + Description: `Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.`, + }, + "non_null_expectation": { + Type: schema.TypeList, + Optional: true, + Description: `ColumnMap rule which evaluates whether each column value is null.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "range_expectation": { + Type: schema.TypeList, + Optional: true, + Description: `ColumnMap rule which evaluates whether each column value lies between a specified range.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_value": { + Type: schema.TypeString, + Optional: true, + Description: `The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.`, + }, + "min_value": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.`, + }, + "strict_max_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. +Only relevant if a maxValue has been defined. Default = false.`, + Default: false, + }, + "strict_min_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. +Only relevant if a minValue has been defined. Default = false.`, + Default: false, + }, + }, + }, + }, + "regex_expectation": { + Type: schema.TypeList, + Optional: true, + Description: `ColumnMap rule which evaluates whether each column value matches a specified regex.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regex": { + Type: schema.TypeString, + Required: true, + Description: `A regular expression the column value is expected to match.`, + }, + }, + }, + }, + "row_condition_expectation": { + Type: schema.TypeList, + Optional: true, + Description: `Table rule which evaluates whether each row passes the specified condition.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sql_expression": { + Type: schema.TypeString, + Required: true, + Description: `The SQL expression.`, + }, + }, + }, + }, + "set_expectation": { + Type: schema.TypeList, + Optional: true, + Description: `ColumnMap rule which evaluates whether each column value is contained by a specified set.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeList, + Required: true, + Description: `Expected values for the column value.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "statistic_range_expectation": { + Type: schema.TypeList, + Optional: true, + Description: `ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "statistic": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"STATISTIC_UNDEFINED", "MEAN", "MIN", "MAX"}), + Description: `column statistics. Possible values: ["STATISTIC_UNDEFINED", "MEAN", "MIN", "MAX"]`, + }, + "max_value": { + Type: schema.TypeString, + Optional: true, + Description: `The maximum column statistic value allowed for a row to pass this validation. +At least one of minValue and maxValue need to be provided.`, + }, + "min_value": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum column statistic value allowed for a row to pass this validation. +At least one of minValue and maxValue need to be provided.`, + }, + "strict_max_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. +Only relevant if a maxValue has been defined. Default = false.`, + Default: false, + }, + "strict_min_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. +Only relevant if a minValue has been defined. Default = false.`, + Default: false, + }, + }, + }, + }, + "table_condition_expectation": { + Type: schema.TypeList, + Optional: true, + Description: `Table rule which evaluates whether the provided expression is true.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sql_expression": { + Type: schema.TypeString, + Required: true, + Description: `The SQL expression.`, + }, + }, + }, + }, + "threshold": { + Type: schema.TypeFloat, + Optional: true, + Description: `The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).`, + }, + "uniqueness_expectation": { + Type: schema.TypeList, + Optional: true, + Description: `ColumnAggregate rule which evaluates whether the column has duplicates.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + }, + "sampling_percent": { + Type: schema.TypeFloat, + Optional: true, + Description: `The percentage of the records to be selected from the dataset for DataScan.`, + }, + }, + }, + ExactlyOneOf: []string{"data_quality_spec", "data_profile_spec"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the scan.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User friendly display name.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the scan. A list of key->value pairs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the scan was created.`, + }, + "data_profile_result": { + Type: schema.TypeList, + Computed: true, + Description: `The result of the data profile scan.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "row_count": { + Type: schema.TypeString, + Optional: true, + Description: `The count of rows scanned.`, + }, + "profile": { + Type: schema.TypeList, + Computed: true, + Description: `The profile information per field.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fields": { + Type: schema.TypeList, + Optional: true, + Description: `List of fields with structural and profile information for each field.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Optional: true, + Description: `The mode of the field. Possible values include: +1. REQUIRED, if it is a required field. +2. NULLABLE, if it is an optional field. +3. REPEATED, if it is a repeated field.`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the field.`, + }, + "profile": { + Type: schema.TypeList, + Optional: true, + Description: `Profile information for the corresponding field.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "distinct_ratio": { + Type: schema.TypeInt, + Optional: true, + Description: `Ratio of rows with distinct values against total scanned rows. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode.`, + }, + "top_n_values": { + Type: schema.TypeList, + Optional: true, + Description: `The list of top N non-null values and number of times they occur in the scanned data. N is 10 or equal to the number of distinct values in the field, whichever is smaller. Not available for complex non-groupable field type RECORD and fields with REPEATABLE mode.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeString, + Optional: true, + Description: `Count of the corresponding value in the scanned data.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `String value of a top N non-null value.`, + }, + }, + }, + }, + "double_profile": { + Type: schema.TypeList, + Computed: true, + Description: `Double type field information.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "average": { + Type: schema.TypeInt, + Optional: true, + Description: `Average of non-null values in the scanned data. NaN, if the field has a NaN.`, + }, + "max": { + Type: schema.TypeString, + Optional: true, + Description: `Maximum of non-null values in the scanned data. NaN, if the field has a NaN.`, + }, + "min": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum of non-null values in the scanned data. NaN, if the field has a NaN.`, + }, + "quartiles": { + Type: schema.TypeString, + Optional: true, + Description: `A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.`, + }, + "standard_deviation": { + Type: schema.TypeInt, + Optional: true, + Description: `Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.`, + }, + }, + }, + }, + "integer_profile": { + Type: schema.TypeList, + Computed: true, + Description: `Integer type field information.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "average": { + Type: schema.TypeInt, + Optional: true, + Description: `Average of non-null values in the scanned data. NaN, if the field has a NaN.`, + }, + "max": { + Type: schema.TypeString, + Optional: true, + Description: `Maximum of non-null values in the scanned data. NaN, if the field has a NaN.`, + }, + "min": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum of non-null values in the scanned data. NaN, if the field has a NaN.`, + }, + "quartiles": { + Type: schema.TypeString, + Optional: true, + Description: `A quartile divides the number of data points into four parts, or quarters, of more-or-less equal size. Three main quartiles used are: The first quartile (Q1) splits off the lowest 25% of data from the highest 75%. It is also known as the lower or 25th empirical quartile, as 25% of the data is below this point. The second quartile (Q2) is the median of a data set. So, 50% of the data lies below this point. The third quartile (Q3) splits off the highest 25% of data from the lowest 75%. It is known as the upper or 75th empirical quartile, as 75% of the data lies below this point. Here, the quartiles is provided as an ordered list of quartile values for the scanned data, occurring in order Q1, median, Q3.`, + }, + "standard_deviation": { + Type: schema.TypeInt, + Optional: true, + Description: `Standard deviation of non-null values in the scanned data. NaN, if the field has a NaN.`, + }, + }, + }, + }, + "null_ratio": { + Type: schema.TypeInt, + Computed: true, + Description: `Ratio of rows with null value against total scanned rows.`, + }, + "string_profile": { + Type: schema.TypeList, + Computed: true, + Description: `String type field information.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "average_length": { + Type: schema.TypeInt, + Optional: true, + Description: `Average length of non-null values in the scanned data.`, + }, + "max_length": { + Type: schema.TypeString, + Optional: true, + Description: `Maximum length of non-null values in the scanned data.`, + }, + "min_length": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum length of non-null values in the scanned data.`, + }, + }, + }, + }, + }, + }, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: `The field data type.`, + }, + }, + }, + }, + }, + }, + }, + "scanned_data": { + Type: schema.TypeList, + Computed: true, + Description: `The data scanned for this result.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "incremental_field": { + Type: schema.TypeList, + Optional: true, + Description: `The range denoted by values of an incremental field`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end": { + Type: schema.TypeString, + Optional: true, + Description: `Value that marks the end of the range.`, + }, + "field": { + Type: schema.TypeString, + Optional: true, + Description: `The field that contains values which monotonically increases over time (e.g. a timestamp column).`, + }, + "start": { + Type: schema.TypeString, + Optional: true, + Description: `Value that marks the start of the range.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "data_quality_result": { + Type: schema.TypeList, + Computed: true, + Description: `The result of the data quality scan.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dimensions": { + Type: schema.TypeList, + Optional: true, + Description: `A list of results at the dimension level.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "passed": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the dimension passed or failed.`, + }, + }, + }, + }, + "passed": { + Type: schema.TypeBool, + Computed: true, + Description: `Overall data quality result -- true if all rules passed.`, + }, + "row_count": { + Type: schema.TypeString, + Computed: true, + Description: `The count of rows processed.`, + }, + "rules": { + Type: schema.TypeList, + Computed: true, + Description: `A list of all the rules in a job, and their results.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "evaluated_count": { + Type: schema.TypeString, + Computed: true, + Description: `The number of rows a rule was evaluated against. This field is only valid for ColumnMap type rules. +Evaluated count can be configured to either +1. include all rows (default) - with null rows automatically failing rule evaluation, or +2. exclude null rows from the evaluatedCount, by setting ignore_nulls = true.`, + }, + "failing_rows_query": { + Type: schema.TypeString, + Computed: true, + Description: `The query to find rows that did not pass this rule. Only applies to ColumnMap and RowCondition rules.`, + }, + "null_count": { + Type: schema.TypeString, + Computed: true, + Description: `The number of rows with null values in the specified column.`, + }, + "pass_ratio": { + Type: schema.TypeInt, + Computed: true, + Description: `The ratio of passedCount / evaluatedCount. This field is only valid for ColumnMap type rules.`, + }, + "passed": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the rule passed or failed.`, + }, + "passed_count": { + Type: schema.TypeString, + Computed: true, + Description: `The number of rows which passed a rule evaluation. This field is only valid for ColumnMap type rules.`, + }, + "rule": { + Type: schema.TypeList, + Computed: true, + Description: `The rule specified in the DataQualitySpec, as is.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `The unnested column which this rule is evaluated against.`, + }, + "dimension": { + Type: schema.TypeString, + Optional: true, + Description: `The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]`, + }, + "ignore_null": { + Type: schema.TypeBool, + Optional: true, + Description: `Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.`, + }, + "threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).`, + }, + "non_null_expectation": { + Type: schema.TypeList, + Computed: true, + Description: `ColumnMap rule which evaluates whether each column value is null.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "range_expectation": { + Type: schema.TypeList, + Computed: true, + Description: `ColumnMap rule which evaluates whether each column value lies between a specified range.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_value": { + Type: schema.TypeString, + Optional: true, + Description: `The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.`, + }, + "min_value": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.`, + }, + "strict_max_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. +Only relevant if a maxValue has been defined. Default = false.`, + Default: false, + }, + "strict_min_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. +Only relevant if a minValue has been defined. Default = false.`, + Default: false, + }, + }, + }, + }, + "regex_expectation": { + Type: schema.TypeList, + Computed: true, + Description: `ColumnMap rule which evaluates whether each column value matches a specified regex.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regex": { + Type: schema.TypeString, + Optional: true, + Description: `A regular expression the column value is expected to match.`, + }, + }, + }, + }, + "row_condition_expectation": { + Type: schema.TypeList, + Computed: true, + Description: `Table rule which evaluates whether each row passes the specified condition.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sql_expression": { + Type: schema.TypeString, + Optional: true, + Description: `The SQL expression.`, + }, + }, + }, + }, + "set_expectation": { + Type: schema.TypeList, + Computed: true, + Description: `ColumnMap rule which evaluates whether each column value is contained by a specified set.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeList, + Optional: true, + Description: `Expected values for the column value.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "statistic_range_expectation": { + Type: schema.TypeList, + Computed: true, + Description: `ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_value": { + Type: schema.TypeString, + Optional: true, + Description: `The maximum column statistic value allowed for a row to pass this validation. +At least one of minValue and maxValue need to be provided.`, + }, + "min_value": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum column statistic value allowed for a row to pass this validation. +At least one of minValue and maxValue need to be provided.`, + }, + "statistic": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"STATISTIC_UNDEFINED", "MEAN", "MIN", "MAX", ""}), + Description: `column statistics. Possible values: ["STATISTIC_UNDEFINED", "MEAN", "MIN", "MAX"]`, + }, + "strict_max_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. +Only relevant if a maxValue has been defined. Default = false.`, + }, + "strict_min_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. +Only relevant if a minValue has been defined. Default = false.`, + }, + }, + }, + }, + "table_condition_expectation": { + Type: schema.TypeList, + Computed: true, + Description: `Table rule which evaluates whether the provided expression is true.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sql_expression": { + Type: schema.TypeString, + Optional: true, + Description: `The SQL expression.`, + }, + }, + }, + }, + "uniqueness_expectation": { + Type: schema.TypeList, + Computed: true, + Description: `ColumnAggregate rule which evaluates whether the column has duplicates.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + }, + }, + }, + }, + }, + }, + "scanned_data": { + Type: schema.TypeList, + Computed: true, + Description: `The data scanned for this result.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "incremental_field": { + Type: schema.TypeList, + Optional: true, + Description: `The range denoted by values of an incremental field`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end": { + Type: schema.TypeString, + Optional: true, + Description: `Value that marks the end of the range.`, + }, + "field": { + Type: schema.TypeString, + Optional: true, + Description: `The field that contains values which monotonically increases over time (e.g. a timestamp column).`, + }, + "start": { + Type: schema.TypeString, + Optional: true, + Description: `Value that marks the start of the range.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "execution_status": { + Type: schema.TypeList, + Computed: true, + Description: `Status of the data scan execution.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "latest_job_end_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the latest DataScanJob started.`, + }, + "latest_job_start_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the latest DataScanJob ended.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Current state of the DataScan.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of DataScan.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the scan was last updated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataplexDatascanCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataplexDatascanDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataplexDatascanDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandDataplexDatascanLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + dataProp, err := expandDataplexDatascanData(d.Get("data"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataProp)) && (ok || !reflect.DeepEqual(v, dataProp)) { + obj["data"] = dataProp + } + executionSpecProp, err := expandDataplexDatascanExecutionSpec(d.Get("execution_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("execution_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(executionSpecProp)) && (ok || !reflect.DeepEqual(v, executionSpecProp)) { + obj["executionSpec"] = executionSpecProp + } + dataQualitySpecProp, err := expandDataplexDatascanDataQualitySpec(d.Get("data_quality_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_quality_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataQualitySpecProp)) && (ok || !reflect.DeepEqual(v, dataQualitySpecProp)) { + obj["dataQualitySpec"] = dataQualitySpecProp + } + dataProfileSpecProp, err := expandDataplexDatascanDataProfileSpec(d.Get("data_profile_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_profile_spec"); ok || !reflect.DeepEqual(v, dataProfileSpecProp) { + obj["dataProfileSpec"] = dataProfileSpecProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataplexBasePath}}projects/{{project}}/locations/{{location}}/dataScans?dataScanId={{data_scan_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Datascan: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Datascan: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Datascan: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DataplexOperationWaitTime( + config, res, project, "Creating Datascan", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Datascan: %s", err) + } + + log.Printf("[DEBUG] Finished creating Datascan %q: %#v", d.Id(), res) + + return resourceDataplexDatascanRead(d, meta) +} + +func resourceDataplexDatascanRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataplexBasePath}}projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}?view=FULL") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Datascan: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataplexDatascan %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + + if err := d.Set("name", flattenDataplexDatascanName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("uid", flattenDataplexDatascanUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("description", flattenDataplexDatascanDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("display_name", flattenDataplexDatascanDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("labels", flattenDataplexDatascanLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("state", flattenDataplexDatascanState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("create_time", flattenDataplexDatascanCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("update_time", flattenDataplexDatascanUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("data", flattenDataplexDatascanData(res["data"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("execution_spec", flattenDataplexDatascanExecutionSpec(res["executionSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("execution_status", flattenDataplexDatascanExecutionStatus(res["executionStatus"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("type", flattenDataplexDatascanType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("data_quality_spec", flattenDataplexDatascanDataQualitySpec(res["dataQualitySpec"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("data_profile_spec", flattenDataplexDatascanDataProfileSpec(res["dataProfileSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("data_quality_result", flattenDataplexDatascanDataQualityResult(res["dataQualityResult"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + if err := d.Set("data_profile_result", flattenDataplexDatascanDataProfileResult(res["dataProfileResult"], d, config)); err != nil { + return fmt.Errorf("Error reading Datascan: %s", err) + } + + return nil +} + +func resourceDataplexDatascanUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Datascan: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandDataplexDatascanDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandDataplexDatascanDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandDataplexDatascanLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + executionSpecProp, err := expandDataplexDatascanExecutionSpec(d.Get("execution_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("execution_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, executionSpecProp)) { + obj["executionSpec"] = executionSpecProp + } + dataQualitySpecProp, err := expandDataplexDatascanDataQualitySpec(d.Get("data_quality_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_quality_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dataQualitySpecProp)) { + obj["dataQualitySpec"] = dataQualitySpecProp + } + dataProfileSpecProp, err := expandDataplexDatascanDataProfileSpec(d.Get("data_profile_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_profile_spec"); ok || !reflect.DeepEqual(v, dataProfileSpecProp) { + obj["dataProfileSpec"] = dataProfileSpecProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataplexBasePath}}projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Datascan %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("execution_spec") { + updateMask = append(updateMask, "executionSpec") + } + + if d.HasChange("data_quality_spec") { + updateMask = append(updateMask, "dataQualitySpec") + } + + if d.HasChange("data_profile_spec") { + updateMask = append(updateMask, "dataProfileSpec") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Datascan %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Datascan %q: %#v", d.Id(), res) + } + + err = DataplexOperationWaitTime( + config, res, project, "Updating Datascan", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceDataplexDatascanRead(d, meta) +} + +func resourceDataplexDatascanDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Datascan: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DataplexBasePath}}projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Datascan %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Datascan") + } + + err = DataplexOperationWaitTime( + config, res, project, "Deleting Datascan", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Datascan %q: %#v", d.Id(), res) + return nil +} + +func resourceDataplexDatascanImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/dataScans/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataplexDatascanName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["entity"] = + flattenDataplexDatascanDataEntity(original["entity"], d, config) + transformed["resource"] = + flattenDataplexDatascanDataResource(original["resource"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataEntity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanExecutionSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["trigger"] = + flattenDataplexDatascanExecutionSpecTrigger(original["trigger"], d, config) + transformed["field"] = + flattenDataplexDatascanExecutionSpecField(original["field"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanExecutionSpecTrigger(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["on_demand"] = + flattenDataplexDatascanExecutionSpecTriggerOnDemand(original["onDemand"], d, config) + transformed["schedule"] = + flattenDataplexDatascanExecutionSpecTriggerSchedule(original["schedule"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanExecutionSpecTriggerOnDemand(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataplexDatascanExecutionSpecTriggerSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cron"] = + flattenDataplexDatascanExecutionSpecTriggerScheduleCron(original["cron"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanExecutionSpecTriggerScheduleCron(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanExecutionSpecField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanExecutionStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["latest_job_end_time"] = + flattenDataplexDatascanExecutionStatusLatestJobEndTime(original["latestJobEndTime"], d, config) + transformed["latest_job_start_time"] = + flattenDataplexDatascanExecutionStatusLatestJobStartTime(original["latestJobStartTime"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanExecutionStatusLatestJobEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanExecutionStatusLatestJobStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["sampling_percent"] = + flattenDataplexDatascanDataQualitySpecSamplingPercent(original["samplingPercent"], d, config) + transformed["row_filter"] = + flattenDataplexDatascanDataQualitySpecRowFilter(original["rowFilter"], d, config) + transformed["rules"] = + flattenDataplexDatascanDataQualitySpecRules(original["rules"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualitySpecSamplingPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRowFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDataplexDatascanDataQualitySpecRulesColumn(original["column"], d, config), + "ignore_null": flattenDataplexDatascanDataQualitySpecRulesIgnoreNull(original["ignoreNull"], d, config), + "dimension": flattenDataplexDatascanDataQualitySpecRulesDimension(original["dimension"], d, config), + "threshold": flattenDataplexDatascanDataQualitySpecRulesThreshold(original["threshold"], d, config), + "range_expectation": flattenDataplexDatascanDataQualitySpecRulesRangeExpectation(original["rangeExpectation"], d, config), + "non_null_expectation": flattenDataplexDatascanDataQualitySpecRulesNonNullExpectation(original["nonNullExpectation"], d, config), + "set_expectation": flattenDataplexDatascanDataQualitySpecRulesSetExpectation(original["setExpectation"], d, config), + "regex_expectation": flattenDataplexDatascanDataQualitySpecRulesRegexExpectation(original["regexExpectation"], d, config), + "uniqueness_expectation": flattenDataplexDatascanDataQualitySpecRulesUniquenessExpectation(original["uniquenessExpectation"], d, config), + "statistic_range_expectation": flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectation(original["statisticRangeExpectation"], d, config), + "row_condition_expectation": flattenDataplexDatascanDataQualitySpecRulesRowConditionExpectation(original["rowConditionExpectation"], d, config), + "table_condition_expectation": flattenDataplexDatascanDataQualitySpecRulesTableConditionExpectation(original["tableConditionExpectation"], d, config), + }) + } + return transformed +} +func flattenDataplexDatascanDataQualitySpecRulesColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesIgnoreNull(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesDimension(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesRangeExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_value"] = + flattenDataplexDatascanDataQualitySpecRulesRangeExpectationMinValue(original["minValue"], d, config) + transformed["max_value"] = + flattenDataplexDatascanDataQualitySpecRulesRangeExpectationMaxValue(original["maxValue"], d, config) + transformed["strict_min_enabled"] = + flattenDataplexDatascanDataQualitySpecRulesRangeExpectationStrictMinEnabled(original["strictMinEnabled"], d, config) + transformed["strict_max_enabled"] = + flattenDataplexDatascanDataQualitySpecRulesRangeExpectationStrictMaxEnabled(original["strictMaxEnabled"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualitySpecRulesRangeExpectationMinValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesRangeExpectationMaxValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesRangeExpectationStrictMinEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesRangeExpectationStrictMaxEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesNonNullExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataplexDatascanDataQualitySpecRulesSetExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["values"] = + flattenDataplexDatascanDataQualitySpecRulesSetExpectationValues(original["values"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualitySpecRulesSetExpectationValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesRegexExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["regex"] = + flattenDataplexDatascanDataQualitySpecRulesRegexExpectationRegex(original["regex"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualitySpecRulesRegexExpectationRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesUniquenessExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["statistic"] = + flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStatistic(original["statistic"], d, config) + transformed["min_value"] = + flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationMinValue(original["minValue"], d, config) + transformed["max_value"] = + flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationMaxValue(original["maxValue"], d, config) + transformed["strict_min_enabled"] = + flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStrictMinEnabled(original["strictMinEnabled"], d, config) + transformed["strict_max_enabled"] = + flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStrictMaxEnabled(original["strictMaxEnabled"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStatistic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationMinValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationMaxValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStrictMinEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStrictMaxEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesRowConditionExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["sql_expression"] = + flattenDataplexDatascanDataQualitySpecRulesRowConditionExpectationSqlExpression(original["sqlExpression"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualitySpecRulesRowConditionExpectationSqlExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualitySpecRulesTableConditionExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["sql_expression"] = + flattenDataplexDatascanDataQualitySpecRulesTableConditionExpectationSqlExpression(original["sqlExpression"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualitySpecRulesTableConditionExpectationSqlExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["sampling_percent"] = + flattenDataplexDatascanDataProfileSpecSamplingPercent(original["samplingPercent"], d, config) + transformed["row_filter"] = + flattenDataplexDatascanDataProfileSpecRowFilter(original["rowFilter"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileSpecSamplingPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileSpecRowFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResult(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["passed"] = + flattenDataplexDatascanDataQualityResultPassed(original["passed"], d, config) + transformed["dimensions"] = + flattenDataplexDatascanDataQualityResultDimensions(original["dimensions"], d, config) + transformed["rules"] = + flattenDataplexDatascanDataQualityResultRules(original["rules"], d, config) + transformed["row_count"] = + flattenDataplexDatascanDataQualityResultRowCount(original["rowCount"], d, config) + transformed["scanned_data"] = + flattenDataplexDatascanDataQualityResultScannedData(original["scannedData"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultPassed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultDimensions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "passed": flattenDataplexDatascanDataQualityResultDimensionsPassed(original["passed"], d, config), + }) + } + return transformed +} +func flattenDataplexDatascanDataQualityResultDimensionsPassed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "rule": flattenDataplexDatascanDataQualityResultRulesRule(original["rule"], d, config), + "passed": flattenDataplexDatascanDataQualityResultRulesPassed(original["passed"], d, config), + "evaluated_count": flattenDataplexDatascanDataQualityResultRulesEvaluatedCount(original["evaluatedCount"], d, config), + "passed_count": flattenDataplexDatascanDataQualityResultRulesPassedCount(original["passedCount"], d, config), + "null_count": flattenDataplexDatascanDataQualityResultRulesNullCount(original["nullCount"], d, config), + "pass_ratio": flattenDataplexDatascanDataQualityResultRulesPassRatio(original["passRatio"], d, config), + "failing_rows_query": flattenDataplexDatascanDataQualityResultRulesFailingRowsQuery(original["failingRowsQuery"], d, config), + }) + } + return transformed +} +func flattenDataplexDatascanDataQualityResultRulesRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["column"] = + flattenDataplexDatascanDataQualityResultRulesRuleColumn(original["column"], d, config) + transformed["ignore_null"] = + flattenDataplexDatascanDataQualityResultRulesRuleIgnoreNull(original["ignoreNull"], d, config) + transformed["dimension"] = + flattenDataplexDatascanDataQualityResultRulesRuleDimension(original["dimension"], d, config) + transformed["threshold"] = + flattenDataplexDatascanDataQualityResultRulesRuleThreshold(original["threshold"], d, config) + transformed["range_expectation"] = + flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectation(original["rangeExpectation"], d, config) + transformed["non_null_expectation"] = + flattenDataplexDatascanDataQualityResultRulesRuleNonNullExpectation(original["nonNullExpectation"], d, config) + transformed["set_expectation"] = + flattenDataplexDatascanDataQualityResultRulesRuleSetExpectation(original["setExpectation"], d, config) + transformed["regex_expectation"] = + flattenDataplexDatascanDataQualityResultRulesRuleRegexExpectation(original["regexExpectation"], d, config) + transformed["uniqueness_expectation"] = + flattenDataplexDatascanDataQualityResultRulesRuleUniquenessExpectation(original["uniquenessExpectation"], d, config) + transformed["statistic_range_expectation"] = + flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectation(original["statisticRangeExpectation"], d, config) + transformed["row_condition_expectation"] = + flattenDataplexDatascanDataQualityResultRulesRuleRowConditionExpectation(original["rowConditionExpectation"], d, config) + transformed["table_condition_expectation"] = + flattenDataplexDatascanDataQualityResultRulesRuleTableConditionExpectation(original["tableConditionExpectation"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultRulesRuleColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleIgnoreNull(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleDimension(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_value"] = + flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectationMinValue(original["minValue"], d, config) + transformed["max_value"] = + flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectationMaxValue(original["maxValue"], d, config) + transformed["strict_min_enabled"] = + flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectationStrictMinEnabled(original["strictMinEnabled"], d, config) + transformed["strict_max_enabled"] = + flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectationStrictMaxEnabled(original["strictMaxEnabled"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectationMinValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectationMaxValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectationStrictMinEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleRangeExpectationStrictMaxEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleNonNullExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataplexDatascanDataQualityResultRulesRuleSetExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["values"] = + flattenDataplexDatascanDataQualityResultRulesRuleSetExpectationValues(original["values"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultRulesRuleSetExpectationValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleRegexExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["regex"] = + flattenDataplexDatascanDataQualityResultRulesRuleRegexExpectationRegex(original["regex"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultRulesRuleRegexExpectationRegex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleUniquenessExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["statistic"] = + flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationStatistic(original["statistic"], d, config) + transformed["min_value"] = + flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationMinValue(original["minValue"], d, config) + transformed["max_value"] = + flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationMaxValue(original["maxValue"], d, config) + transformed["strict_min_enabled"] = + flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationStrictMinEnabled(original["strictMinEnabled"], d, config) + transformed["strict_max_enabled"] = + flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationStrictMaxEnabled(original["strictMaxEnabled"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationStatistic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationMinValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationMaxValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationStrictMinEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleStatisticRangeExpectationStrictMaxEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleRowConditionExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["sql_expression"] = + flattenDataplexDatascanDataQualityResultRulesRuleRowConditionExpectationSqlExpression(original["sqlExpression"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultRulesRuleRowConditionExpectationSqlExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesRuleTableConditionExpectation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["sql_expression"] = + flattenDataplexDatascanDataQualityResultRulesRuleTableConditionExpectationSqlExpression(original["sqlExpression"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultRulesRuleTableConditionExpectationSqlExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesPassed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesEvaluatedCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesPassedCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesNullCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRulesPassRatio(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataQualityResultRulesFailingRowsQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultRowCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultScannedData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["incremental_field"] = + flattenDataplexDatascanDataQualityResultScannedDataIncrementalField(original["incrementalField"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultScannedDataIncrementalField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["field"] = + flattenDataplexDatascanDataQualityResultScannedDataIncrementalFieldField(original["field"], d, config) + transformed["start"] = + flattenDataplexDatascanDataQualityResultScannedDataIncrementalFieldStart(original["start"], d, config) + transformed["end"] = + flattenDataplexDatascanDataQualityResultScannedDataIncrementalFieldEnd(original["end"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataQualityResultScannedDataIncrementalFieldField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultScannedDataIncrementalFieldStart(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataQualityResultScannedDataIncrementalFieldEnd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResult(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["row_count"] = + flattenDataplexDatascanDataProfileResultRowCount(original["rowCount"], d, config) + transformed["profile"] = + flattenDataplexDatascanDataProfileResultProfile(original["profile"], d, config) + transformed["scanned_data"] = + flattenDataplexDatascanDataProfileResultScannedData(original["scannedData"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultRowCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["fields"] = + flattenDataplexDatascanDataProfileResultProfileFields(original["fields"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultProfileFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDataplexDatascanDataProfileResultProfileFieldsName(original["name"], d, config), + "type": flattenDataplexDatascanDataProfileResultProfileFieldsType(original["type"], d, config), + "mode": flattenDataplexDatascanDataProfileResultProfileFieldsMode(original["mode"], d, config), + "profile": flattenDataplexDatascanDataProfileResultProfileFieldsProfile(original["profile"], d, config), + }) + } + return transformed +} +func flattenDataplexDatascanDataProfileResultProfileFieldsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["null_ratio"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileNullRatio(original["nullRatio"], d, config) + transformed["distinct_ratio"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileDistinctRatio(original["distinctRatio"], d, config) + transformed["top_n_values"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileTopNValues(original["topNValues"], d, config) + transformed["string_profile"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileStringProfile(original["stringProfile"], d, config) + transformed["integer_profile"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfile(original["integerProfile"], d, config) + transformed["double_profile"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfile(original["doubleProfile"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileNullRatio(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileDistinctRatio(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileTopNValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["value"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileTopNValuesValue(original["value"], d, config) + transformed["count"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileTopNValuesCount(original["count"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileTopNValuesValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileTopNValuesCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileStringProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_length"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileStringProfileMinLength(original["minLength"], d, config) + transformed["max_length"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileStringProfileMaxLength(original["maxLength"], d, config) + transformed["average_length"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileStringProfileAverageLength(original["averageLength"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileStringProfileMinLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileStringProfileMaxLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileStringProfileAverageLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["average"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileAverage(original["average"], d, config) + transformed["standard_deviation"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileStandardDeviation(original["standardDeviation"], d, config) + transformed["min"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileMin(original["min"], d, config) + transformed["quartiles"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileQuartiles(original["quartiles"], d, config) + transformed["max"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileMax(original["max"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileAverage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileStandardDeviation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileQuartiles(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileIntegerProfileMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["average"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileAverage(original["average"], d, config) + transformed["standard_deviation"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileStandardDeviation(original["standardDeviation"], d, config) + transformed["min"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileMin(original["min"], d, config) + transformed["quartiles"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileQuartiles(original["quartiles"], d, config) + transformed["max"] = + flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileMax(original["max"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileAverage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileStandardDeviation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileQuartiles(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultProfileFieldsProfileDoubleProfileMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultScannedData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["incremental_field"] = + flattenDataplexDatascanDataProfileResultScannedDataIncrementalField(original["incrementalField"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultScannedDataIncrementalField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["field"] = + flattenDataplexDatascanDataProfileResultScannedDataIncrementalFieldField(original["field"], d, config) + transformed["start"] = + flattenDataplexDatascanDataProfileResultScannedDataIncrementalFieldStart(original["start"], d, config) + transformed["end"] = + flattenDataplexDatascanDataProfileResultScannedDataIncrementalFieldEnd(original["end"], d, config) + return []interface{}{transformed} +} +func flattenDataplexDatascanDataProfileResultScannedDataIncrementalFieldField(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultScannedDataIncrementalFieldStart(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataplexDatascanDataProfileResultScannedDataIncrementalFieldEnd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataplexDatascanDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataplexDatascanData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEntity, err := expandDataplexDatascanDataEntity(original["entity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEntity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["entity"] = transformedEntity + } + + transformedResource, err := expandDataplexDatascanDataResource(original["resource"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resource"] = transformedResource + } + + return transformed, nil +} + +func expandDataplexDatascanDataEntity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanExecutionSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTrigger, err := expandDataplexDatascanExecutionSpecTrigger(original["trigger"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["trigger"] = transformedTrigger + } + + transformedField, err := expandDataplexDatascanExecutionSpecField(original["field"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedField); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["field"] = transformedField + } + + return transformed, nil +} + +func expandDataplexDatascanExecutionSpecTrigger(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOnDemand, err := expandDataplexDatascanExecutionSpecTriggerOnDemand(original["on_demand"], d, config) + if err != nil { + return nil, err + } else { + transformed["onDemand"] = transformedOnDemand + } + + transformedSchedule, err := expandDataplexDatascanExecutionSpecTriggerSchedule(original["schedule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schedule"] = transformedSchedule + } + + return transformed, nil +} + +func expandDataplexDatascanExecutionSpecTriggerOnDemand(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataplexDatascanExecutionSpecTriggerSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCron, err := expandDataplexDatascanExecutionSpecTriggerScheduleCron(original["cron"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCron); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cron"] = transformedCron + } + + return transformed, nil +} + +func expandDataplexDatascanExecutionSpecTriggerScheduleCron(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanExecutionSpecField(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSamplingPercent, err := expandDataplexDatascanDataQualitySpecSamplingPercent(original["sampling_percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSamplingPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["samplingPercent"] = transformedSamplingPercent + } + + transformedRowFilter, err := expandDataplexDatascanDataQualitySpecRowFilter(original["row_filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRowFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rowFilter"] = transformedRowFilter + } + + transformedRules, err := expandDataplexDatascanDataQualitySpecRules(original["rules"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRules); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rules"] = transformedRules + } + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecSamplingPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRowFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedColumn, err := expandDataplexDatascanDataQualitySpecRulesColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["column"] = transformedColumn + } + + transformedIgnoreNull, err := expandDataplexDatascanDataQualitySpecRulesIgnoreNull(original["ignore_null"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIgnoreNull); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ignoreNull"] = transformedIgnoreNull + } + + transformedDimension, err := expandDataplexDatascanDataQualitySpecRulesDimension(original["dimension"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDimension); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dimension"] = transformedDimension + } + + transformedThreshold, err := expandDataplexDatascanDataQualitySpecRulesThreshold(original["threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["threshold"] = transformedThreshold + } + + transformedRangeExpectation, err := expandDataplexDatascanDataQualitySpecRulesRangeExpectation(original["range_expectation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRangeExpectation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rangeExpectation"] = transformedRangeExpectation + } + + transformedNonNullExpectation, err := expandDataplexDatascanDataQualitySpecRulesNonNullExpectation(original["non_null_expectation"], d, config) + if err != nil { + return nil, err + } else { + transformed["nonNullExpectation"] = transformedNonNullExpectation + } + + transformedSetExpectation, err := expandDataplexDatascanDataQualitySpecRulesSetExpectation(original["set_expectation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSetExpectation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["setExpectation"] = transformedSetExpectation + } + + transformedRegexExpectation, err := expandDataplexDatascanDataQualitySpecRulesRegexExpectation(original["regex_expectation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRegexExpectation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["regexExpectation"] = transformedRegexExpectation + } + + transformedUniquenessExpectation, err := expandDataplexDatascanDataQualitySpecRulesUniquenessExpectation(original["uniqueness_expectation"], d, config) + if err != nil { + return nil, err + } else { + transformed["uniquenessExpectation"] = transformedUniquenessExpectation + } + + transformedStatisticRangeExpectation, err := expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectation(original["statistic_range_expectation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStatisticRangeExpectation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["statisticRangeExpectation"] = transformedStatisticRangeExpectation + } + + transformedRowConditionExpectation, err := expandDataplexDatascanDataQualitySpecRulesRowConditionExpectation(original["row_condition_expectation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRowConditionExpectation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rowConditionExpectation"] = transformedRowConditionExpectation + } + + transformedTableConditionExpectation, err := expandDataplexDatascanDataQualitySpecRulesTableConditionExpectation(original["table_condition_expectation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTableConditionExpectation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tableConditionExpectation"] = transformedTableConditionExpectation + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataplexDatascanDataQualitySpecRulesColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesIgnoreNull(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesDimension(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRangeExpectation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinValue, err := expandDataplexDatascanDataQualitySpecRulesRangeExpectationMinValue(original["min_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minValue"] = transformedMinValue + } + + transformedMaxValue, err := expandDataplexDatascanDataQualitySpecRulesRangeExpectationMaxValue(original["max_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxValue"] = transformedMaxValue + } + + transformedStrictMinEnabled, err := expandDataplexDatascanDataQualitySpecRulesRangeExpectationStrictMinEnabled(original["strict_min_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStrictMinEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["strictMinEnabled"] = transformedStrictMinEnabled + } + + transformedStrictMaxEnabled, err := expandDataplexDatascanDataQualitySpecRulesRangeExpectationStrictMaxEnabled(original["strict_max_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStrictMaxEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["strictMaxEnabled"] = transformedStrictMaxEnabled + } + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRangeExpectationMinValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRangeExpectationMaxValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRangeExpectationStrictMinEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRangeExpectationStrictMaxEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesNonNullExpectation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecRulesSetExpectation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedValues, err := expandDataplexDatascanDataQualitySpecRulesSetExpectationValues(original["values"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValues); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["values"] = transformedValues + } + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecRulesSetExpectationValues(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRegexExpectation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRegex, err := expandDataplexDatascanDataQualitySpecRulesRegexExpectationRegex(original["regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["regex"] = transformedRegex + } + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRegexExpectationRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesUniquenessExpectation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStatistic, err := expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStatistic(original["statistic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStatistic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["statistic"] = transformedStatistic + } + + transformedMinValue, err := expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationMinValue(original["min_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minValue"] = transformedMinValue + } + + transformedMaxValue, err := expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationMaxValue(original["max_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxValue"] = transformedMaxValue + } + + transformedStrictMinEnabled, err := expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStrictMinEnabled(original["strict_min_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStrictMinEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["strictMinEnabled"] = transformedStrictMinEnabled + } + + transformedStrictMaxEnabled, err := expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStrictMaxEnabled(original["strict_max_enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStrictMaxEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["strictMaxEnabled"] = transformedStrictMaxEnabled + } + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStatistic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationMinValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationMaxValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStrictMinEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesStatisticRangeExpectationStrictMaxEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRowConditionExpectation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSqlExpression, err := expandDataplexDatascanDataQualitySpecRulesRowConditionExpectationSqlExpression(original["sql_expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSqlExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sqlExpression"] = transformedSqlExpression + } + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecRulesRowConditionExpectationSqlExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataQualitySpecRulesTableConditionExpectation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSqlExpression, err := expandDataplexDatascanDataQualitySpecRulesTableConditionExpectationSqlExpression(original["sql_expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSqlExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sqlExpression"] = transformedSqlExpression + } + + return transformed, nil +} + +func expandDataplexDatascanDataQualitySpecRulesTableConditionExpectationSqlExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataProfileSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSamplingPercent, err := expandDataplexDatascanDataProfileSpecSamplingPercent(original["sampling_percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSamplingPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["samplingPercent"] = transformedSamplingPercent + } + + transformedRowFilter, err := expandDataplexDatascanDataProfileSpecRowFilter(original["row_filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRowFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rowFilter"] = transformedRowFilter + } + + return transformed, nil +} + +func expandDataplexDatascanDataProfileSpecSamplingPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataplexDatascanDataProfileSpecRowFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan_sweeper.go new file mode 100644 index 0000000000..68c75b54e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_datascan_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataplex + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataplexDatascan", testSweepDataplexDatascan) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataplexDatascan(region string) error { + resourceName := "DataplexDatascan" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://dataplex.googleapis.com/v1/projects/{{project}}/locations/{{location}}/dataScans", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["datascans"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://dataplex.googleapis.com/v1/projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_lake.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_lake.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_lake.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_lake.go index 9390a97a4b..66d7e14bae 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_lake.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_lake.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package dataplex import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceDataplexLake() *schema.Resource { @@ -55,7 +62,7 @@ func ResourceDataplexLake() *schema.Resource { "name": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The name of the lake.", }, @@ -91,7 +98,7 @@ func ResourceDataplexLake() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -209,8 +216,8 @@ func DataplexLakeMetastoreStatusSchema() *schema.Resource { } func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -220,7 +227,7 @@ func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Metastore: expandDataplexLakeMetastore(d.Get("metastore")), Project: dcl.String(project), } @@ -230,18 +237,18 @@ func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -263,8 +270,8 @@ func resourceDataplexLakeCreate(d *schema.ResourceData, meta interface{}) error } func resourceDataplexLakeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -274,22 +281,22 @@ func resourceDataplexLakeRead(d *schema.ResourceData, meta interface{}) error { Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Metastore: expandDataplexLakeMetastore(d.Get("metastore")), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -298,7 +305,7 @@ func resourceDataplexLakeRead(d *schema.ResourceData, meta interface{}) error { res, err := client.GetLake(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("DataplexLake %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("location", res.Location); err != nil { @@ -347,8 +354,8 @@ func resourceDataplexLakeRead(d *schema.ResourceData, meta interface{}) error { return nil } func resourceDataplexLakeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -358,23 +365,23 @@ func resourceDataplexLakeUpdate(d *schema.ResourceData, meta interface{}) error Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Metastore: expandDataplexLakeMetastore(d.Get("metastore")), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -396,8 +403,8 @@ func resourceDataplexLakeUpdate(d *schema.ResourceData, meta interface{}) error } func resourceDataplexLakeDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -407,23 +414,23 @@ func resourceDataplexLakeDelete(d *schema.ResourceData, meta interface{}) error Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Metastore: expandDataplexLakeMetastore(d.Get("metastore")), Project: dcl.String(project), } log.Printf("[DEBUG] Deleting Lake %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -438,9 +445,9 @@ func resourceDataplexLakeDelete(d *schema.ResourceData, meta interface{}) error } func resourceDataplexLakeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -449,7 +456,7 @@ func resourceDataplexLakeImport(d *schema.ResourceData, meta interface{}) ([]*sc } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_lake_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_lake_sweeper.go new file mode 100644 index 0000000000..8653fb8350 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_lake_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package dataplex + +import ( + "context" + "log" + "testing" + + dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataplexLake", testSweepDataplexLake) +} + +func testSweepDataplexLake(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for DataplexLake") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLDataplexClient(config, config.UserAgent, "", 0) + err = client.DeleteAllLake(context.Background(), d["project"], d["location"], isDeletableDataplexLake) + if err != nil { + return err + } + return nil +} + +func isDeletableDataplexLake(r *dataplex.Lake) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_zone.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_zone.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_zone.go index 83bbb02aac..49e2caf924 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataplex_zone.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataplex/resource_dataplex_zone.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package dataplex import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" dataplex "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceDataplexZone() *schema.Resource { @@ -70,7 +77,7 @@ func ResourceDataplexZone() *schema.Resource { "name": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The name of the zone.", }, @@ -114,7 +121,7 @@ func ResourceDataplexZone() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -289,8 +296,8 @@ func DataplexZoneAssetStatusSchema() *schema.Resource { } func resourceDataplexZoneCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -304,7 +311,7 @@ func resourceDataplexZoneCreate(d *schema.ResourceData, meta interface{}) error Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } @@ -313,18 +320,18 @@ func resourceDataplexZoneCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -346,8 +353,8 @@ func resourceDataplexZoneCreate(d *schema.ResourceData, meta interface{}) error } func resourceDataplexZoneRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -361,21 +368,21 @@ func resourceDataplexZoneRead(d *schema.ResourceData, meta interface{}) error { Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -384,7 +391,7 @@ func resourceDataplexZoneRead(d *schema.ResourceData, meta interface{}) error { res, err := client.GetZone(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("DataplexZone %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("discovery_spec", flattenDataplexZoneDiscoverySpec(res.DiscoverySpec)); err != nil { @@ -436,8 +443,8 @@ func resourceDataplexZoneRead(d *schema.ResourceData, meta interface{}) error { return nil } func resourceDataplexZoneUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -451,22 +458,22 @@ func resourceDataplexZoneUpdate(d *schema.ResourceData, meta interface{}) error Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -488,8 +495,8 @@ func resourceDataplexZoneUpdate(d *schema.ResourceData, meta interface{}) error } func resourceDataplexZoneDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -503,22 +510,22 @@ func resourceDataplexZoneDelete(d *schema.ResourceData, meta interface{}) error Type: dataplex.ZoneTypeEnumRef(d.Get("type").(string)), Description: dcl.String(d.Get("description").(string)), DisplayName: dcl.String(d.Get("display_name").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } log.Printf("[DEBUG] Deleting Zone %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataplexClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -533,9 +540,9 @@ func resourceDataplexZoneDelete(d *schema.ResourceData, meta interface{}) error } func resourceDataplexZoneImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/lakes/(?P[^/]+)/zones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -544,7 +551,7 @@ func resourceDataplexZoneImport(d *schema.ResourceData, meta interface{}) ([]*sc } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -565,8 +572,8 @@ func expandDataplexZoneDiscoverySpec(o interface{}) *dataplex.ZoneDiscoverySpec return &dataplex.ZoneDiscoverySpec{ Enabled: dcl.Bool(obj["enabled"].(bool)), CsvOptions: expandDataplexZoneDiscoverySpecCsvOptions(obj["csv_options"]), - ExcludePatterns: expandStringArray(obj["exclude_patterns"]), - IncludePatterns: expandStringArray(obj["include_patterns"]), + ExcludePatterns: tpgdclresource.ExpandStringArray(obj["exclude_patterns"]), + IncludePatterns: tpgdclresource.ExpandStringArray(obj["include_patterns"]), JsonOptions: expandDataplexZoneDiscoverySpecJsonOptions(obj["json_options"]), Schedule: dcl.StringOrNil(obj["schedule"].(string)), } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_cluster_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_cluster_operation.go new file mode 100644 index 0000000000..5d98d98c59 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_cluster_operation.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/dataproc/v1" +) + +type DataprocClusterOperationWaiter struct { + Service *dataproc.Service + tpgresource.CommonOperationWaiter +} + +func (w *DataprocClusterOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + return w.Service.Projects.Regions.Operations.Get(w.Op.Name).Do() +} + +func DataprocClusterOperationWait(config *transport_tpg.Config, op *dataproc.Operation, activity, userAgent string, timeout time.Duration) error { + w := &DataprocClusterOperationWaiter{ + Service: config.NewDataprocClient(userAgent), + } + if err := w.SetOp(op); err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_job_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_job_operation.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_job_operation.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_job_operation.go index 39e344b1d5..924c0f48e1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_job_operation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/dataproc_job_operation.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc import ( "fmt" "net/http" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/dataproc/v1" ) @@ -66,14 +71,14 @@ func (w *DataprocJobOperationWaiter) TargetStates() []string { return []string{"CANCELLED", "DONE", "ATTEMPT_FAILURE", "ERROR", "RUNNING"} } -func dataprocJobOperationWait(config *Config, region, projectId, jobId, activity, userAgent string, timeout time.Duration) error { +func DataprocJobOperationWait(config *transport_tpg.Config, region, projectId, jobId, activity, userAgent string, timeout time.Duration) error { w := &DataprocJobOperationWaiter{ Service: config.NewDataprocClient(userAgent), Region: region, ProjectId: projectId, JobId: jobId, } - return OperationWait(w, activity, timeout, config.PollInterval) + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) } type DataprocDeleteJobOperationWaiter struct { @@ -94,7 +99,7 @@ func (w *DataprocDeleteJobOperationWaiter) QueryOp() (interface{}, error) { } job, err := w.Service.Projects.Regions.Jobs.Get(w.ProjectId, w.Region, w.JobId).Do() if err != nil { - if IsGoogleApiErrorWithCode(err, http.StatusNotFound) { + if transport_tpg.IsGoogleApiErrorWithCode(err, http.StatusNotFound) { w.Status = "DELETED" return job, nil } @@ -104,7 +109,7 @@ func (w *DataprocDeleteJobOperationWaiter) QueryOp() (interface{}, error) { return job, err } -func dataprocDeleteOperationWait(config *Config, region, projectId, jobId, activity, userAgent string, timeout time.Duration) error { +func DataprocDeleteOperationWait(config *transport_tpg.Config, region, projectId, jobId, activity, userAgent string, timeout time.Duration) error { w := &DataprocDeleteJobOperationWaiter{ DataprocJobOperationWaiter{ Service: config.NewDataprocClient(userAgent), @@ -113,5 +118,5 @@ func dataprocDeleteOperationWait(config *Config, region, projectId, jobId, activ JobId: jobId, }, } - return OperationWait(w, activity, timeout, config.PollInterval) + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_autoscaling_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_autoscaling_policy.go new file mode 100644 index 0000000000..3c6998f799 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_autoscaling_policy.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataproc + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataprocAutoscalingPolicyIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "policy_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataprocAutoscalingPolicyIamUpdater struct { + project string + location string + policyId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataprocAutoscalingPolicyIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("policy_id"); ok { + values["policy_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("policy_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocAutoscalingPolicyIamUpdater{ + project: values["project"], + location: values["location"], + policyId: values["policy_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("policy_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting policy_id: %s", err) + } + + return u, nil +} + +func DataprocAutoscalingPolicyIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocAutoscalingPolicyIamUpdater{ + project: values["project"], + location: values["location"], + policyId: values["policy_id"], + d: d, + Config: config, + } + if err := d.Set("policy_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting policy_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyAutoscalingPolicyUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyAutoscalingPolicyUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) qualifyAutoscalingPolicyUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataprocBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", u.project, u.location, u.policyId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/autoscalingPolicies/%s", u.project, u.location, u.policyId) +} + +func (u *DataprocAutoscalingPolicyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataproc-autoscalingpolicy-%s", u.GetResourceId()) +} + +func (u *DataprocAutoscalingPolicyIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataproc autoscalingpolicy %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_cluster.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_cluster.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_cluster.go index 730a40ae4d..33d1f063f8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_cluster.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc import ( "fmt" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/dataproc/v1" ) @@ -33,17 +38,17 @@ type DataprocClusterIamUpdater struct { project string region string cluster string - d TerraformResourceData - Config *Config + d tpgresource.TerraformResourceData + Config *transport_tpg.Config } -func NewDataprocClusterUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) +func NewDataprocClusterUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return nil, err } @@ -64,8 +69,8 @@ func NewDataprocClusterUpdater(d TerraformResourceData, config *Config) (Resourc }, nil } -func DataprocClusterIdParseFunc(d *schema.ResourceData, config *Config) error { - fv, err := parseRegionalFieldValue("clusters", d.Id(), "project", "region", "zone", d, config, true) +func DataprocClusterIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseRegionalFieldValue("clusters", d.Id(), "project", "region", "zone", d, config, true) if err != nil { return err } @@ -88,7 +93,7 @@ func DataprocClusterIdParseFunc(d *schema.ResourceData, config *Config) error { func (u *DataprocClusterIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { req := &dataproc.GetIamPolicyRequest{} - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return nil, err } @@ -112,7 +117,7 @@ func (u *DataprocClusterIamUpdater) SetResourceIamPolicy(policy *cloudresourcema return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) } - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_job.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_job.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_job.go index f16c1069a6..1e3ff90ef4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_dataproc_job.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/iam_dataproc_job.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc import ( "fmt" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/dataproc/v1" ) @@ -33,17 +38,17 @@ type DataprocJobIamUpdater struct { project string region string jobId string - d TerraformResourceData - Config *Config + d tpgresource.TerraformResourceData + Config *transport_tpg.Config } -func NewDataprocJobUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) +func NewDataprocJobUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return nil, err } @@ -64,8 +69,8 @@ func NewDataprocJobUpdater(d TerraformResourceData, config *Config) (ResourceIam }, nil } -func DataprocJobIdParseFunc(d *schema.ResourceData, config *Config) error { - fv, err := parseRegionalFieldValue("jobs", d.Id(), "project", "region", "zone", d, config, true) +func DataprocJobIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseRegionalFieldValue("jobs", d.Id(), "project", "region", "zone", d, config, true) if err != nil { return err } @@ -88,7 +93,7 @@ func DataprocJobIdParseFunc(d *schema.ResourceData, config *Config) error { func (u *DataprocJobIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { req := &dataproc.GetIamPolicyRequest{} - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return nil, err } @@ -112,7 +117,7 @@ func (u *DataprocJobIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanage return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) } - userAgent, err := generateUserAgentString(u.d, u.Config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) if err != nil { return err } @@ -140,7 +145,7 @@ func (u *DataprocJobIamUpdater) DescribeResource() string { func resourceManagerToDataprocPolicy(p *cloudresourcemanager.Policy) (*dataproc.Policy, error) { out := &dataproc.Policy{} - err := Convert(p, out) + err := tpgresource.Convert(p, out) if err != nil { return nil, errwrap.Wrapf("Cannot convert a dataproc policy to a cloudresourcemanager policy: {{err}}", err) } @@ -149,7 +154,7 @@ func resourceManagerToDataprocPolicy(p *cloudresourcemanager.Policy) (*dataproc. func dataprocToResourceManagerPolicy(p *dataproc.Policy) (*cloudresourcemanager.Policy, error) { out := &cloudresourcemanager.Policy{} - err := Convert(p, out) + err := tpgresource.Convert(p, out) if err != nil { return nil, errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a dataproc policy: {{err}}", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_autoscaling_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_autoscaling_policy.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_autoscaling_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_autoscaling_policy.go index 715641b2d6..d69334e8fa 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_autoscaling_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_autoscaling_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package dataproc import ( "fmt" @@ -21,6 +24,9 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceDataprocAutoscalingPolicy() *schema.Resource { @@ -241,8 +247,8 @@ only on primary workers, the cluster will use primary workers only and no second } func resourceDataprocAutoscalingPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -251,29 +257,29 @@ func resourceDataprocAutoscalingPolicyCreate(d *schema.ResourceData, meta interf idProp, err := expandDataprocAutoscalingPolicyPolicyId(d.Get("policy_id"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("policy_id"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + } else if v, ok := d.GetOkExists("policy_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { obj["id"] = idProp } workerConfigProp, err := expandDataprocAutoscalingPolicyWorkerConfig(d.Get("worker_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("worker_config"); !isEmptyValue(reflect.ValueOf(workerConfigProp)) && (ok || !reflect.DeepEqual(v, workerConfigProp)) { + } else if v, ok := d.GetOkExists("worker_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(workerConfigProp)) && (ok || !reflect.DeepEqual(v, workerConfigProp)) { obj["workerConfig"] = workerConfigProp } secondaryWorkerConfigProp, err := expandDataprocAutoscalingPolicySecondaryWorkerConfig(d.Get("secondary_worker_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("secondary_worker_config"); !isEmptyValue(reflect.ValueOf(secondaryWorkerConfigProp)) && (ok || !reflect.DeepEqual(v, secondaryWorkerConfigProp)) { + } else if v, ok := d.GetOkExists("secondary_worker_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(secondaryWorkerConfigProp)) && (ok || !reflect.DeepEqual(v, secondaryWorkerConfigProp)) { obj["secondaryWorkerConfig"] = secondaryWorkerConfigProp } basicAlgorithmProp, err := expandDataprocAutoscalingPolicyBasicAlgorithm(d.Get("basic_algorithm"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("basic_algorithm"); !isEmptyValue(reflect.ValueOf(basicAlgorithmProp)) && (ok || !reflect.DeepEqual(v, basicAlgorithmProp)) { + } else if v, ok := d.GetOkExists("basic_algorithm"); !tpgresource.IsEmptyValue(reflect.ValueOf(basicAlgorithmProp)) && (ok || !reflect.DeepEqual(v, basicAlgorithmProp)) { obj["basicAlgorithm"] = basicAlgorithmProp } - url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies") + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies") if err != nil { return err } @@ -281,18 +287,26 @@ func resourceDataprocAutoscalingPolicyCreate(d *schema.ResourceData, meta interf log.Printf("[DEBUG] Creating new AutoscalingPolicy: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for AutoscalingPolicy: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating AutoscalingPolicy: %s", err) } @@ -301,7 +315,7 @@ func resourceDataprocAutoscalingPolicyCreate(d *schema.ResourceData, meta interf } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -313,33 +327,39 @@ func resourceDataprocAutoscalingPolicyCreate(d *schema.ResourceData, meta interf } func resourceDataprocAutoscalingPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for AutoscalingPolicy: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DataprocAutoscalingPolicy %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataprocAutoscalingPolicy %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -366,15 +386,15 @@ func resourceDataprocAutoscalingPolicyRead(d *schema.ResourceData, meta interfac } func resourceDataprocAutoscalingPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for AutoscalingPolicy: %s", err) } @@ -384,29 +404,29 @@ func resourceDataprocAutoscalingPolicyUpdate(d *schema.ResourceData, meta interf idProp, err := expandDataprocAutoscalingPolicyPolicyId(d.Get("policy_id"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("policy_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { + } else if v, ok := d.GetOkExists("policy_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { obj["id"] = idProp } workerConfigProp, err := expandDataprocAutoscalingPolicyWorkerConfig(d.Get("worker_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("worker_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, workerConfigProp)) { + } else if v, ok := d.GetOkExists("worker_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, workerConfigProp)) { obj["workerConfig"] = workerConfigProp } secondaryWorkerConfigProp, err := expandDataprocAutoscalingPolicySecondaryWorkerConfig(d.Get("secondary_worker_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("secondary_worker_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, secondaryWorkerConfigProp)) { + } else if v, ok := d.GetOkExists("secondary_worker_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, secondaryWorkerConfigProp)) { obj["secondaryWorkerConfig"] = secondaryWorkerConfigProp } basicAlgorithmProp, err := expandDataprocAutoscalingPolicyBasicAlgorithm(d.Get("basic_algorithm"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("basic_algorithm"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, basicAlgorithmProp)) { + } else if v, ok := d.GetOkExists("basic_algorithm"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, basicAlgorithmProp)) { obj["basicAlgorithm"] = basicAlgorithmProp } - url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") if err != nil { return err } @@ -414,11 +434,19 @@ func resourceDataprocAutoscalingPolicyUpdate(d *schema.ResourceData, meta interf log.Printf("[DEBUG] Updating AutoscalingPolicy %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating AutoscalingPolicy %q: %s", d.Id(), err) @@ -430,21 +458,21 @@ func resourceDataprocAutoscalingPolicyUpdate(d *schema.ResourceData, meta interf } func resourceDataprocAutoscalingPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for AutoscalingPolicy: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") if err != nil { return err } @@ -453,13 +481,21 @@ func resourceDataprocAutoscalingPolicyDelete(d *schema.ResourceData, meta interf log.Printf("[DEBUG] Deleting AutoscalingPolicy %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "AutoscalingPolicy") + return transport_tpg.HandleNotFoundError(err, d, "AutoscalingPolicy") } log.Printf("[DEBUG] Finished deleting AutoscalingPolicy %q: %#v", d.Id(), res) @@ -467,8 +503,8 @@ func resourceDataprocAutoscalingPolicyDelete(d *schema.ResourceData, meta interf } func resourceDataprocAutoscalingPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -477,7 +513,7 @@ func resourceDataprocAutoscalingPolicyImport(d *schema.ResourceData, meta interf } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -486,15 +522,15 @@ func resourceDataprocAutoscalingPolicyImport(d *schema.ResourceData, meta interf return []*schema.ResourceData{d}, nil } -func flattenDataprocAutoscalingPolicyPolicyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyPolicyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataprocAutoscalingPolicyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataprocAutoscalingPolicyWorkerConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyWorkerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -511,10 +547,10 @@ func flattenDataprocAutoscalingPolicyWorkerConfig(v interface{}, d *schema.Resou flattenDataprocAutoscalingPolicyWorkerConfigWeight(original["weight"], d, config) return []interface{}{transformed} } -func flattenDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -528,10 +564,10 @@ func flattenDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d * return v // let terraform core handle it otherwise } -func flattenDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -545,10 +581,10 @@ func flattenDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d * return v // let terraform core handle it otherwise } -func flattenDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -562,7 +598,7 @@ func flattenDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d *schema return v // let terraform core handle it otherwise } -func flattenDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -579,10 +615,10 @@ func flattenDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d *sch flattenDataprocAutoscalingPolicySecondaryWorkerConfigWeight(original["weight"], d, config) return []interface{}{transformed} } -func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -596,10 +632,10 @@ func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interfa return v // let terraform core handle it otherwise } -func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -613,10 +649,10 @@ func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interfa return v // let terraform core handle it otherwise } -func flattenDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -630,7 +666,7 @@ func flattenDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, return v // let terraform core handle it otherwise } -func flattenDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -645,11 +681,11 @@ func flattenDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d *schema.Res flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(original["yarnConfig"], d, config) return []interface{}{transformed} } -func flattenDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -670,31 +706,31 @@ func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d * flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(original["scaleDownMinWorkerFraction"], d, config) return []interface{}{transformed} } -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandDataprocAutoscalingPolicyPolicyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyPolicyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyWorkerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyWorkerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -706,40 +742,40 @@ func expandDataprocAutoscalingPolicyWorkerConfig(v interface{}, d TerraformResou transformedMinInstances, err := expandDataprocAutoscalingPolicyWorkerConfigMinInstances(original["min_instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minInstances"] = transformedMinInstances } transformedMaxInstances, err := expandDataprocAutoscalingPolicyWorkerConfigMaxInstances(original["max_instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxInstances"] = transformedMaxInstances } transformedWeight, err := expandDataprocAutoscalingPolicyWorkerConfigWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } return transformed, nil } -func expandDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -751,40 +787,40 @@ func expandDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d Terra transformedMinInstances, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(original["min_instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minInstances"] = transformedMinInstances } transformedMaxInstances, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(original["max_instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxInstances"] = transformedMaxInstances } transformedWeight, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigWeight(original["weight"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weight"] = transformedWeight } return transformed, nil } -func expandDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -796,25 +832,25 @@ func expandDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d TerraformRes transformedCooldownPeriod, err := expandDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(original["cooldown_period"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cooldownPeriod"] = transformedCooldownPeriod } transformedYarnConfig, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(original["yarn_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedYarnConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedYarnConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["yarnConfig"] = transformedYarnConfig } return transformed, nil } -func expandDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -826,57 +862,57 @@ func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d Te transformedGracefulDecommissionTimeout, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(original["graceful_decommission_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGracefulDecommissionTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGracefulDecommissionTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gracefulDecommissionTimeout"] = transformedGracefulDecommissionTimeout } transformedScaleUpFactor, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(original["scale_up_factor"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScaleUpFactor); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScaleUpFactor); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scaleUpFactor"] = transformedScaleUpFactor } transformedScaleDownFactor, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(original["scale_down_factor"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScaleDownFactor); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScaleDownFactor); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scaleDownFactor"] = transformedScaleDownFactor } transformedScaleUpMinWorkerFraction, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(original["scale_up_min_worker_fraction"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScaleUpMinWorkerFraction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScaleUpMinWorkerFraction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scaleUpMinWorkerFraction"] = transformedScaleUpMinWorkerFraction } transformedScaleDownMinWorkerFraction, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(original["scale_down_min_worker_fraction"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScaleDownMinWorkerFraction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScaleDownMinWorkerFraction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scaleDownMinWorkerFraction"] = transformedScaleDownMinWorkerFraction } return transformed, nil } -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_autoscaling_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_autoscaling_policy_sweeper.go new file mode 100644 index 0000000000..cc0f1e212f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_autoscaling_policy_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataproc + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataprocAutoscalingPolicy", testSweepDataprocAutoscalingPolicy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataprocAutoscalingPolicy(region string) error { + resourceName := "DataprocAutoscalingPolicy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://dataproc.googleapis.com/v1/projects/{{project}}/locations/{{location}}/autoscalingPolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["policies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://dataproc.googleapis.com/v1/projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_cluster.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_cluster.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_cluster.go index dbf750310a..2194cd2fd5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_cluster.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_cluster.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc import ( "errors" @@ -12,6 +14,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/dataproc/v1" ) @@ -544,7 +549,7 @@ func ResourceDataprocCluster() *schema.Resource { AtLeastOneOf: gceClusterConfigKeys, ForceNew: true, ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.subnetwork"}, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.`, }, @@ -554,7 +559,7 @@ func ResourceDataprocCluster() *schema.Resource { AtLeastOneOf: gceClusterConfigKeys, ForceNew: true, ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.network"}, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network.`, }, @@ -585,10 +590,10 @@ func ResourceDataprocCluster() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) + return tpgresource.CanonicalizeServiceScope(v.(string)) }, }, - Set: stringScopeHashcode, + Set: tpgresource.StringScopeHashcode, }, "internal_ip_only": { @@ -696,7 +701,7 @@ func ResourceDataprocCluster() *schema.Resource { ForceNew: true, Required: true, Description: `The URI of a sole-tenant that the cluster will be created on.`, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, }, }, @@ -950,8 +955,6 @@ by Dataproc`, Description: `The set of optional components to activate on the cluster.`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"COMPONENT_UNSPECIFIED", "ANACONDA", "DOCKER", "DRUID", "HBASE", "FLINK", - "HIVE_WEBHCAT", "JUPYTER", "KERBEROS", "PRESTO", "RANGER", "SOLR", "ZEPPELIN", "ZOOKEEPER"}, false), }, }, }, @@ -1005,14 +1008,14 @@ by Dataproc`, AtLeastOneOf: clusterConfigKeys, MaxItems: 1, Description: `The autoscaling policy config associated with the cluster.`, - DiffSuppressFunc: emptyOrUnsetBlockDiffSuppress, + DiffSuppressFunc: tpgresource.EmptyOrUnsetBlockDiffSuppress, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "policy_uri": { Type: schema.TypeString, Required: true, Description: `The autoscaling policy used by the cluster.`, - DiffSuppressFunc: locationDiffSuppress, + DiffSuppressFunc: tpgresource.LocationDiffSuppress, }, }, }, @@ -1064,7 +1067,7 @@ by Dataproc`, Type: schema.TypeString, Optional: true, Description: `The time when cluster will be auto-deleted. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - DiffSuppressFunc: timestampDiffSuppress(time.RFC3339Nano), + DiffSuppressFunc: tpgresource.TimestampDiffSuppress(time.RFC3339Nano), AtLeastOneOf: []string{ "cluster_config.0.lifecycle_config.0.idle_delete_ttl", "cluster_config.0.lifecycle_config.0.auto_delete_time", @@ -1295,13 +1298,13 @@ func acceleratorsSchema() *schema.Resource { } func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1323,7 +1326,7 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err } if _, ok := d.GetOk("labels"); ok { - cluster.Labels = expandLabels(d) + cluster.Labels = tpgresource.ExpandLabels(d) } // Checking here caters for the case where the user does not specify cluster_config @@ -1342,7 +1345,7 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err d.SetId(fmt.Sprintf("projects/%s/regions/%s/clusters/%s", project, region, cluster.ClusterName)) // Wait until it's created - waitErr := dataprocClusterOperationWait(config, op, "creating Dataproc cluster", userAgent, d.Timeout(schema.TimeoutCreate)) + waitErr := DataprocClusterOperationWait(config, op, "creating Dataproc cluster", userAgent, d.Timeout(schema.TimeoutCreate)) if waitErr != nil { // The resource didn't actually create // Note that we do not remove the ID here - this resource tends to leave @@ -1355,7 +1358,7 @@ func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) err return resourceDataprocClusterRead(d, meta) } -func expandVirtualClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.VirtualClusterConfig, error) { +func expandVirtualClusterConfig(d *schema.ResourceData, config *transport_tpg.Config) (*dataproc.VirtualClusterConfig, error) { conf := &dataproc.VirtualClusterConfig{} if v, ok := d.GetOk("virtual_cluster_config"); ok { @@ -1469,7 +1472,7 @@ func expandGkeNodePoolTarget(d *schema.ResourceData, v interface{}, clusterAddre data := v1.(map[string]interface{}) nodePool := dataproc.GkeNodePoolTarget{ NodePool: clusterAddress + "/nodePools/" + data["node_pool"].(string), - Roles: convertStringSet(data["roles"].(*schema.Set)), + Roles: tpgresource.ConvertStringSet(data["roles"].(*schema.Set)), } if v, ok := d.GetOk(fmt.Sprintf("virtual_cluster_config.0.kubernetes_cluster_config.0.gke_cluster_config.0.node_pool_target.%d.node_pool_config", i)); ok { @@ -1490,7 +1493,7 @@ func expandGkeNodePoolConfig(cfg map[string]interface{}) *dataproc.GkeNodePoolCo } if v, ok := cfg["locations"]; ok { - conf.Locations = convertStringSet(v.(*schema.Set)) + conf.Locations = tpgresource.ConvertStringSet(v.(*schema.Set)) } if autoscalingcfg, ok := cfg["autoscaling"]; ok { @@ -1537,7 +1540,7 @@ func expandGkeNodePoolAutoscalingConfig(cfg map[string]interface{}) *dataproc.Gk return conf } -func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.ClusterConfig, error) { +func expandClusterConfig(d *schema.ResourceData, config *transport_tpg.Config) (*dataproc.ClusterConfig, error) { conf := &dataproc.ClusterConfig{ // SDK requires GceClusterConfig to be specified, // even if no explicit values specified @@ -1618,7 +1621,7 @@ func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.Clus return conf, nil } -func expandGceClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.GceClusterConfig, error) { +func expandGceClusterConfig(d *schema.ResourceData, config *transport_tpg.Config) (*dataproc.GceClusterConfig, error) { conf := &dataproc.GceClusterConfig{} v, ok := d.GetOk("cluster_config.0.gce_cluster_config") @@ -1631,7 +1634,7 @@ func expandGceClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.G conf.ZoneUri = v.(string) } if v, ok := cfg["network"]; ok { - nf, err := ParseNetworkFieldValue(v.(string), d, config) + nf, err := tpgresource.ParseNetworkFieldValue(v.(string), d, config) if err != nil { return nil, fmt.Errorf("cannot determine self_link for network %q: %s", v, err) } @@ -1639,7 +1642,7 @@ func expandGceClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.G conf.NetworkUri = nf.RelativeLink() } if v, ok := cfg["subnetwork"]; ok { - snf, err := ParseSubnetworkFieldValue(v.(string), d, config) + snf, err := tpgresource.ParseSubnetworkFieldValue(v.(string), d, config) if err != nil { return nil, fmt.Errorf("cannot determine self_link for subnetwork %q: %s", v, err) } @@ -1647,7 +1650,7 @@ func expandGceClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.G conf.SubnetworkUri = snf.RelativeLink() } if v, ok := cfg["tags"]; ok { - conf.Tags = convertStringSet(v.(*schema.Set)) + conf.Tags = tpgresource.ConvertStringSet(v.(*schema.Set)) } if v, ok := cfg["service_account"]; ok { conf.ServiceAccount = v.(string) @@ -1656,7 +1659,7 @@ func expandGceClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.G scopesSet := scopes.(*schema.Set) scopes := make([]string, scopesSet.Len()) for i, scope := range scopesSet.List() { - scopes[i] = canonicalizeServiceScope(scope.(string)) + scopes[i] = tpgresource.CanonicalizeServiceScope(scope.(string)) } conf.ServiceAccountScopes = scopes } @@ -1664,7 +1667,7 @@ func expandGceClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.G conf.InternalIpOnly = v.(bool) } if v, ok := cfg["metadata"]; ok { - conf.Metadata = convertStringMap(v.(map[string]interface{})) + conf.Metadata = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := d.GetOk("cluster_config.0.gce_cluster_config.0.shielded_instance_config"); ok { cfgSic := v.([]interface{})[0].(map[string]interface{}) @@ -1689,7 +1692,7 @@ func expandGceClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.G conf.ReservationAffinity.Key = v.(string) } if v, ok := cfgRa["values"]; ok { - conf.ReservationAffinity.Values = convertStringSet(v.(*schema.Set)) + conf.ReservationAffinity.Values = tpgresource.ConvertStringSet(v.(*schema.Set)) } } if v, ok := d.GetOk("cluster_config.0.gce_cluster_config.0.node_group_affinity"); ok { @@ -1828,7 +1831,7 @@ func expandDataprocMetricConfig(cfg map[string]interface{}) *dataproc.DataprocMe data := raw.(map[string]interface{}) metric := dataproc.Metric{ MetricSource: data["metric_source"].(string), - MetricOverrides: convertStringSet(data["metric_overrides"].(*schema.Set)), + MetricOverrides: tpgresource.ConvertStringSet(data["metric_overrides"].(*schema.Set)), } metricsSet = append(metricsSet, &metric) } @@ -1898,7 +1901,7 @@ func expandInstanceGroupConfig(cfg map[string]interface{}) *dataproc.InstanceGro icg.NumInstances = int64(v.(int)) } if v, ok := cfg["machine_type"]; ok { - icg.MachineTypeUri = GetResourceNameFromSelfLink(v.(string)) + icg.MachineTypeUri = tpgresource.GetResourceNameFromSelfLink(v.(string)) } if v, ok := cfg["min_cpu_platform"]; ok { icg.MinCpuPlatform = v.(string) @@ -1945,13 +1948,13 @@ func expandAccelerators(configured []interface{}) []*dataproc.AcceleratorConfig } func resourceDataprocClusterUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -2040,7 +2043,7 @@ func resourceDataprocClusterUpdate(d *schema.ResourceData, meta interface{}) err } // Wait until it's updated - waitErr := dataprocClusterOperationWait(config, op, "updating Dataproc cluster ", userAgent, d.Timeout(schema.TimeoutUpdate)) + waitErr := DataprocClusterOperationWait(config, op, "updating Dataproc cluster ", userAgent, d.Timeout(schema.TimeoutUpdate)) if waitErr != nil { return waitErr } @@ -2052,13 +2055,13 @@ func resourceDataprocClusterUpdate(d *schema.ResourceData, meta interface{}) err } func resourceDataprocClusterRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -2069,7 +2072,7 @@ func resourceDataprocClusterRead(d *schema.ResourceData, meta interface{}) error cluster, err := config.NewDataprocClient(userAgent).Projects.Regions.Clusters.Get( project, region, clusterName).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Dataproc Cluster %q", clusterName)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Dataproc Cluster %q", clusterName)) } if err := d.Set("name", cluster.ClusterName); err != nil { @@ -2086,32 +2089,33 @@ func resourceDataprocClusterRead(d *schema.ResourceData, meta interface{}) error } var cfg []map[string]interface{} + cfg, err = flattenClusterConfig(d, cluster.Config) - if cluster.Config != nil { - cfg, err = flattenClusterConfig(d, cluster.Config) - - if err != nil { - return err - } + if err != nil { + return err + } - err = d.Set("cluster_config", cfg) - } else { - cfg, err = flattenVirtualClusterConfig(d, cluster.VirtualClusterConfig) + err = d.Set("cluster_config", cfg) + virtualCfg, err := flattenVirtualClusterConfig(d, cluster.VirtualClusterConfig) - if err != nil { - return err - } - - err = d.Set("virtual_cluster_config", cfg) + if err != nil { + return err } + err = d.Set("virtual_cluster_config", virtualCfg) + if err != nil { return err } + return nil } func flattenVirtualClusterConfig(d *schema.ResourceData, cfg *dataproc.VirtualClusterConfig) ([]map[string]interface{}, error) { + if cfg == nil { + return []map[string]interface{}{}, nil + } + data := map[string]interface{}{ "staging_bucket": d.Get("virtual_cluster_config.0.staging_bucket"), "auxiliary_services_config": flattenAuxiliaryServicesConfig(d, cfg.AuxiliaryServicesConfig), @@ -2228,6 +2232,9 @@ func flattenKubernetesSoftwareConfig(d *schema.ResourceData, cfg *dataproc.Kuber } func flattenClusterConfig(d *schema.ResourceData, cfg *dataproc.ClusterConfig) ([]map[string]interface{}, error) { + if cfg == nil { + return []map[string]interface{}{}, nil + } data := map[string]interface{}{ "staging_bucket": d.Get("cluster_config.0.staging_bucket").(string), @@ -2388,7 +2395,7 @@ func flattenAccelerators(accelerators []*dataproc.AcceleratorConfig) interface{} acceleratorsTypeSet := schema.NewSet(schema.HashResource(acceleratorsSchema()), []interface{}{}) for _, accelerator := range accelerators { data := map[string]interface{}{ - "accelerator_type": GetResourceNameFromSelfLink(accelerator.AcceleratorTypeUri), + "accelerator_type": tpgresource.GetResourceNameFromSelfLink(accelerator.AcceleratorTypeUri), "accelerator_count": int(accelerator.AcceleratorCount), } @@ -2420,11 +2427,14 @@ func flattenInitializationActions(nia []*dataproc.NodeInitializationAction) ([]m } func flattenGceClusterConfig(d *schema.ResourceData, gcc *dataproc.GceClusterConfig) []map[string]interface{} { + if gcc == nil { + return []map[string]interface{}{} + } gceConfig := map[string]interface{}{ - "tags": schema.NewSet(schema.HashString, convertStringArrToInterface(gcc.Tags)), + "tags": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(gcc.Tags)), "service_account": gcc.ServiceAccount, - "zone": GetResourceNameFromSelfLink(gcc.ZoneUri), + "zone": tpgresource.GetResourceNameFromSelfLink(gcc.ZoneUri), "internal_ip_only": gcc.InternalIpOnly, "metadata": gcc.Metadata, } @@ -2436,7 +2446,7 @@ func flattenGceClusterConfig(d *schema.ResourceData, gcc *dataproc.GceClusterCon gceConfig["subnetwork"] = gcc.SubnetworkUri } if len(gcc.ServiceAccountScopes) > 0 { - gceConfig["service_account_scopes"] = schema.NewSet(stringScopeHashcode, convertStringArrToInterface(gcc.ServiceAccountScopes)) + gceConfig["service_account_scopes"] = schema.NewSet(tpgresource.StringScopeHashcode, tpgresource.ConvertStringArrToInterface(gcc.ServiceAccountScopes)) } if gcc.ShieldedInstanceConfig != nil { gceConfig["shielded_instance_config"] = []map[string]interface{}{ @@ -2508,7 +2518,7 @@ func flattenInstanceGroupConfig(d *schema.ResourceData, icg *dataproc.InstanceGr if icg != nil { data["num_instances"] = icg.NumInstances - data["machine_type"] = GetResourceNameFromSelfLink(icg.MachineTypeUri) + data["machine_type"] = tpgresource.GetResourceNameFromSelfLink(icg.MachineTypeUri) data["min_cpu_platform"] = icg.MinCpuPlatform data["image_uri"] = icg.ImageUri data["instance_names"] = icg.InstanceNames @@ -2534,13 +2544,13 @@ func extractInitTimeout(t string) (int, error) { } func resourceDataprocClusterDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -2556,7 +2566,7 @@ func resourceDataprocClusterDelete(d *schema.ResourceData, meta interface{}) err } // Wait until it's deleted - waitErr := dataprocClusterOperationWait(config, op, "deleting Dataproc cluster", userAgent, d.Timeout(schema.TimeoutDelete)) + waitErr := DataprocClusterOperationWait(config, op, "deleting Dataproc cluster", userAgent, d.Timeout(schema.TimeoutDelete)) if waitErr != nil { return waitErr } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_job.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_job.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_job.go index f03b21b3bf..095d1c167b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_job.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_job.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataproc import ( "fmt" @@ -6,6 +8,10 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/dataproc/v1" @@ -67,7 +73,7 @@ func ResourceDataprocJob() *schema.Resource { Optional: true, ForceNew: true, Computed: true, - ValidateFunc: validateRegexp("^[a-zA-Z0-9_-]{1,100}$"), + ValidateFunc: verify.ValidateRegexp("^[a-zA-Z0-9_-]{1,100}$"), }, }, }, @@ -191,13 +197,13 @@ func resourceDataprocJobUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -221,46 +227,46 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { } if v, ok := d.GetOk("scheduling"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.Scheduling = expandJobScheduling(config) } if _, ok := d.GetOk("labels"); ok { - submitReq.Job.Labels = expandLabels(d) + submitReq.Job.Labels = tpgresource.ExpandLabels(d) } if v, ok := d.GetOk("pyspark_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.PysparkJob = expandPySparkJob(config) } if v, ok := d.GetOk("spark_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.SparkJob = expandSparkJob(config) } if v, ok := d.GetOk("hadoop_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.HadoopJob = expandHadoopJob(config) } if v, ok := d.GetOk("hive_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.HiveJob = expandHiveJob(config) } if v, ok := d.GetOk("pig_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.PigJob = expandPigJob(config) } if v, ok := d.GetOk("sparksql_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.SparkSqlJob = expandSparkSqlJob(config) } if v, ok := d.GetOk("presto_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) submitReq.Job.PrestoJob = expandPrestoJob(config) } @@ -272,7 +278,7 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { } d.SetId(fmt.Sprintf("projects/%s/regions/%s/jobs/%s", project, region, job.Reference.JobId)) - waitErr := dataprocJobOperationWait(config, region, project, job.Reference.JobId, + waitErr := DataprocJobOperationWait(config, region, project, job.Reference.JobId, "Creating Dataproc job", userAgent, d.Timeout(schema.TimeoutCreate)) if waitErr != nil { return waitErr @@ -283,14 +289,14 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { } func resourceDataprocJobRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } region := d.Get("region").(string) - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -300,7 +306,7 @@ func resourceDataprocJobRead(d *schema.ResourceData, meta interface{}) error { job, err := config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Get( project, region, jobId).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Dataproc Job %q", jobId)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Dataproc Job %q", jobId)) } if err := d.Set("force_delete", d.Get("force_delete")); err != nil { @@ -372,13 +378,13 @@ func resourceDataprocJobRead(d *schema.ResourceData, meta interface{}) error { } func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -396,7 +402,7 @@ func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { // at least not active _, _ = config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Cancel(project, region, jobId, &dataproc.CancelJobRequest{}).Do() - waitErr := dataprocJobOperationWait(config, region, project, jobId, + waitErr := DataprocJobOperationWait(config, region, project, jobId, "Cancelling Dataproc job", userAgent, d.Timeout(schema.TimeoutDelete)) if waitErr != nil { return waitErr @@ -411,7 +417,7 @@ func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { return err } - waitErr := dataprocDeleteOperationWait(config, region, project, jobId, + waitErr := DataprocDeleteOperationWait(config, region, project, jobId, "Deleting Dataproc job", userAgent, d.Timeout(schema.TimeoutDelete)) if waitErr != nil { return waitErr @@ -534,25 +540,25 @@ func expandPySparkJob(config map[string]interface{}) *dataproc.PySparkJob { job.MainPythonFileUri = v.(string) } if v, ok := config["args"]; ok { - job.Args = convertStringArr(v.([]interface{})) + job.Args = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["python_file_uris"]; ok { - job.PythonFileUris = convertStringArr(v.([]interface{})) + job.PythonFileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["file_uris"]; ok { - job.FileUris = convertStringArr(v.([]interface{})) + job.FileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["archive_uris"]; ok { - job.ArchiveUris = convertStringArr(v.([]interface{})) + job.ArchiveUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) job.LoggingConfig = expandLoggingConfig(config) } @@ -669,22 +675,22 @@ func expandSparkJob(config map[string]interface{}) *dataproc.SparkJob { } if v, ok := config["args"]; ok { - job.Args = convertStringArr(v.([]interface{})) + job.Args = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["file_uris"]; ok { - job.FileUris = convertStringArr(v.([]interface{})) + job.FileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["archive_uris"]; ok { - job.ArchiveUris = convertStringArr(v.([]interface{})) + job.ArchiveUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) job.LoggingConfig = expandLoggingConfig(config) } @@ -790,22 +796,22 @@ func expandHadoopJob(config map[string]interface{}) *dataproc.HadoopJob { } if v, ok := config["args"]; ok { - job.Args = convertStringArr(v.([]interface{})) + job.Args = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["file_uris"]; ok { - job.FileUris = convertStringArr(v.([]interface{})) + job.FileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["archive_uris"]; ok { - job.ArchiveUris = convertStringArr(v.([]interface{})) + job.ArchiveUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) job.LoggingConfig = expandLoggingConfig(config) } @@ -900,20 +906,20 @@ func expandHiveJob(config map[string]interface{}) *dataproc.HiveJob { } if v, ok := config["query_list"]; ok { job.QueryList = &dataproc.QueryList{ - Queries: convertStringArr(v.([]interface{})), + Queries: tpgresource.ConvertStringArr(v.([]interface{})), } } if v, ok := config["continue_on_failure"]; ok { job.ContinueOnFailure = v.(bool) } if v, ok := config["script_variables"]; ok { - job.ScriptVariables = convertStringMap(v.(map[string]interface{})) + job.ScriptVariables = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } return job @@ -1008,20 +1014,20 @@ func expandPigJob(config map[string]interface{}) *dataproc.PigJob { } if v, ok := config["query_list"]; ok { job.QueryList = &dataproc.QueryList{ - Queries: convertStringArr(v.([]interface{})), + Queries: tpgresource.ConvertStringArr(v.([]interface{})), } } if v, ok := config["continue_on_failure"]; ok { job.ContinueOnFailure = v.(bool) } if v, ok := config["script_variables"]; ok { - job.ScriptVariables = convertStringMap(v.(map[string]interface{})) + job.ScriptVariables = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } return job @@ -1109,17 +1115,17 @@ func expandSparkSqlJob(config map[string]interface{}) *dataproc.SparkSqlJob { } if v, ok := config["query_list"]; ok { job.QueryList = &dataproc.QueryList{ - Queries: convertStringArr(v.([]interface{})), + Queries: tpgresource.ConvertStringArr(v.([]interface{})), } } if v, ok := config["script_variables"]; ok { - job.ScriptVariables = convertStringMap(v.(map[string]interface{})) + job.ScriptVariables = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } return job @@ -1207,7 +1213,7 @@ func flattenPrestoJob(job *dataproc.PrestoJob) []map[string]interface{} { func expandPrestoJob(config map[string]interface{}) *dataproc.PrestoJob { job := &dataproc.PrestoJob{} if v, ok := config["client_tags"]; ok { - job.ClientTags = convertStringArr(v.([]interface{})) + job.ClientTags = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["continue_on_failure"]; ok { job.ContinueOnFailure = v.(bool) @@ -1217,11 +1223,11 @@ func expandPrestoJob(config map[string]interface{}) *dataproc.PrestoJob { } if v, ok := config["query_list"]; ok { job.QueryList = &dataproc.QueryList{ - Queries: convertStringArr(v.([]interface{})), + Queries: tpgresource.ConvertStringArr(v.([]interface{})), } } if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := config["output_format"]; ok { job.OutputFormat = v.(string) @@ -1236,7 +1242,7 @@ func expandPrestoJob(config map[string]interface{}) *dataproc.PrestoJob { func expandLoggingConfig(config map[string]interface{}) *dataproc.LoggingConfig { conf := &dataproc.LoggingConfig{} if v, ok := config["driver_log_levels"]; ok { - conf.DriverLogLevels = convertStringMap(v.(map[string]interface{})) + conf.DriverLogLevels = tpgresource.ConvertStringMap(v.(map[string]interface{})) } return conf } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_workflow_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_workflow_template.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_workflow_template.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_workflow_template.go index 86bc23f5ee..cd4ee0e929 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dataproc_workflow_template.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_workflow_template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package dataproc import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" dataproc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceDataprocWorkflowTemplate() *schema.Resource { @@ -102,7 +109,7 @@ func ResourceDataprocWorkflowTemplate() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -1081,7 +1088,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigSchema() *schema.Resou Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", }, @@ -1089,7 +1096,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigSchema() *schema.Resou Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", }, @@ -1113,7 +1120,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSchem Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", }, }, @@ -1127,7 +1134,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSchema Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", }, }, @@ -1177,7 +1184,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchema Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", }, @@ -1210,7 +1217,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchema Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", }, @@ -1235,7 +1242,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchema Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", }, @@ -1266,7 +1273,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGr Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", }, }, @@ -1407,7 +1414,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigSchema() * Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", }, @@ -1555,7 +1562,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigS Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", }, @@ -1762,7 +1769,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberos Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", }, @@ -1859,7 +1866,7 @@ func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSchema() * Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", }, @@ -2071,8 +2078,8 @@ func DataprocWorkflowTemplateParametersValidationValuesSchema() *schema.Resource } func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -2083,7 +2090,7 @@ func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interfa Name: dcl.String(d.Get("name").(string)), Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), DagTimeout: dcl.String(d.Get("dag_timeout").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), Project: dcl.String(project), Version: dcl.Int64OrNil(int64(d.Get("version").(int))), @@ -2094,18 +2101,18 @@ func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interfa return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -2127,8 +2134,8 @@ func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interfa } func resourceDataprocWorkflowTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -2139,23 +2146,23 @@ func resourceDataprocWorkflowTemplateRead(d *schema.ResourceData, meta interface Name: dcl.String(d.Get("name").(string)), Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), DagTimeout: dcl.String(d.Get("dag_timeout").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), Project: dcl.String(project), Version: dcl.Int64OrNil(int64(d.Get("version").(int))), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -2164,7 +2171,7 @@ func resourceDataprocWorkflowTemplateRead(d *schema.ResourceData, meta interface res, err := client.GetWorkflowTemplate(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("DataprocWorkflowTemplate %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("jobs", flattenDataprocWorkflowTemplateJobsArray(res.Jobs)); err != nil { @@ -2205,8 +2212,8 @@ func resourceDataprocWorkflowTemplateRead(d *schema.ResourceData, meta interface } func resourceDataprocWorkflowTemplateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -2217,24 +2224,24 @@ func resourceDataprocWorkflowTemplateDelete(d *schema.ResourceData, meta interfa Name: dcl.String(d.Get("name").(string)), Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), DagTimeout: dcl.String(d.Get("dag_timeout").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), Project: dcl.String(project), Version: dcl.Int64OrNil(int64(d.Get("version").(int))), } log.Printf("[DEBUG] Deleting WorkflowTemplate %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -2249,9 +2256,9 @@ func resourceDataprocWorkflowTemplateDelete(d *schema.ResourceData, meta interfa } func resourceDataprocWorkflowTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/workflowTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -2260,7 +2267,7 @@ func resourceDataprocWorkflowTemplateImport(d *schema.ResourceData, meta interfa } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -2298,9 +2305,9 @@ func expandDataprocWorkflowTemplateJobs(o interface{}) *dataproc.WorkflowTemplat StepId: dcl.String(obj["step_id"].(string)), HadoopJob: expandDataprocWorkflowTemplateJobsHadoopJob(obj["hadoop_job"]), HiveJob: expandDataprocWorkflowTemplateJobsHiveJob(obj["hive_job"]), - Labels: checkStringMap(obj["labels"]), + Labels: tpgresource.CheckStringMap(obj["labels"]), PigJob: expandDataprocWorkflowTemplateJobsPigJob(obj["pig_job"]), - PrerequisiteStepIds: expandStringArray(obj["prerequisite_step_ids"]), + PrerequisiteStepIds: tpgdclresource.ExpandStringArray(obj["prerequisite_step_ids"]), PrestoJob: expandDataprocWorkflowTemplateJobsPrestoJob(obj["presto_job"]), PysparkJob: expandDataprocWorkflowTemplateJobsPysparkJob(obj["pyspark_job"]), Scheduling: expandDataprocWorkflowTemplateJobsScheduling(obj["scheduling"]), @@ -2357,14 +2364,14 @@ func expandDataprocWorkflowTemplateJobsHadoopJob(o interface{}) *dataproc.Workfl } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsHadoopJob{ - ArchiveUris: expandStringArray(obj["archive_uris"]), - Args: expandStringArray(obj["args"]), - FileUris: expandStringArray(obj["file_uris"]), - JarFileUris: expandStringArray(obj["jar_file_uris"]), + ArchiveUris: tpgdclresource.ExpandStringArray(obj["archive_uris"]), + Args: tpgdclresource.ExpandStringArray(obj["args"]), + FileUris: tpgdclresource.ExpandStringArray(obj["file_uris"]), + JarFileUris: tpgdclresource.ExpandStringArray(obj["jar_file_uris"]), LoggingConfig: expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj["logging_config"]), MainClass: dcl.String(obj["main_class"].(string)), MainJarFileUri: dcl.String(obj["main_jar_file_uri"].(string)), - Properties: checkStringMap(obj["properties"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), } } @@ -2397,7 +2404,7 @@ func expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(o interface{}) *da } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsHadoopJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), } } @@ -2424,11 +2431,11 @@ func expandDataprocWorkflowTemplateJobsHiveJob(o interface{}) *dataproc.Workflow obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsHiveJob{ ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), - JarFileUris: expandStringArray(obj["jar_file_uris"]), - Properties: checkStringMap(obj["properties"]), + JarFileUris: tpgdclresource.ExpandStringArray(obj["jar_file_uris"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), QueryFileUri: dcl.String(obj["query_file_uri"].(string)), QueryList: expandDataprocWorkflowTemplateJobsHiveJobQueryList(obj["query_list"]), - ScriptVariables: checkStringMap(obj["script_variables"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), } } @@ -2459,7 +2466,7 @@ func expandDataprocWorkflowTemplateJobsHiveJobQueryList(o interface{}) *dataproc } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsHiveJobQueryList{ - Queries: expandStringArray(obj["queries"]), + Queries: tpgdclresource.ExpandStringArray(obj["queries"]), } } @@ -2486,12 +2493,12 @@ func expandDataprocWorkflowTemplateJobsPigJob(o interface{}) *dataproc.WorkflowT obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsPigJob{ ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), - JarFileUris: expandStringArray(obj["jar_file_uris"]), + JarFileUris: tpgdclresource.ExpandStringArray(obj["jar_file_uris"]), LoggingConfig: expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj["logging_config"]), - Properties: checkStringMap(obj["properties"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), QueryFileUri: dcl.String(obj["query_file_uri"].(string)), QueryList: expandDataprocWorkflowTemplateJobsPigJobQueryList(obj["query_list"]), - ScriptVariables: checkStringMap(obj["script_variables"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), } } @@ -2523,7 +2530,7 @@ func expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(o interface{}) *datap } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsPigJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), } } @@ -2549,7 +2556,7 @@ func expandDataprocWorkflowTemplateJobsPigJobQueryList(o interface{}) *dataproc. } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsPigJobQueryList{ - Queries: expandStringArray(obj["queries"]), + Queries: tpgdclresource.ExpandStringArray(obj["queries"]), } } @@ -2575,11 +2582,11 @@ func expandDataprocWorkflowTemplateJobsPrestoJob(o interface{}) *dataproc.Workfl } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsPrestoJob{ - ClientTags: expandStringArray(obj["client_tags"]), + ClientTags: tpgdclresource.ExpandStringArray(obj["client_tags"]), ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), LoggingConfig: expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj["logging_config"]), OutputFormat: dcl.String(obj["output_format"].(string)), - Properties: checkStringMap(obj["properties"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), QueryFileUri: dcl.String(obj["query_file_uri"].(string)), QueryList: expandDataprocWorkflowTemplateJobsPrestoJobQueryList(obj["query_list"]), } @@ -2613,7 +2620,7 @@ func expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(o interface{}) *da } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsPrestoJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), } } @@ -2639,7 +2646,7 @@ func expandDataprocWorkflowTemplateJobsPrestoJobQueryList(o interface{}) *datapr } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsPrestoJobQueryList{ - Queries: expandStringArray(obj["queries"]), + Queries: tpgdclresource.ExpandStringArray(obj["queries"]), } } @@ -2666,13 +2673,13 @@ func expandDataprocWorkflowTemplateJobsPysparkJob(o interface{}) *dataproc.Workf obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsPysparkJob{ MainPythonFileUri: dcl.String(obj["main_python_file_uri"].(string)), - ArchiveUris: expandStringArray(obj["archive_uris"]), - Args: expandStringArray(obj["args"]), - FileUris: expandStringArray(obj["file_uris"]), - JarFileUris: expandStringArray(obj["jar_file_uris"]), + ArchiveUris: tpgdclresource.ExpandStringArray(obj["archive_uris"]), + Args: tpgdclresource.ExpandStringArray(obj["args"]), + FileUris: tpgdclresource.ExpandStringArray(obj["file_uris"]), + JarFileUris: tpgdclresource.ExpandStringArray(obj["jar_file_uris"]), LoggingConfig: expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj["logging_config"]), - Properties: checkStringMap(obj["properties"]), - PythonFileUris: expandStringArray(obj["python_file_uris"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), + PythonFileUris: tpgdclresource.ExpandStringArray(obj["python_file_uris"]), } } @@ -2705,7 +2712,7 @@ func expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(o interface{}) *d } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsPysparkJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), } } @@ -2759,14 +2766,14 @@ func expandDataprocWorkflowTemplateJobsSparkJob(o interface{}) *dataproc.Workflo } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsSparkJob{ - ArchiveUris: expandStringArray(obj["archive_uris"]), - Args: expandStringArray(obj["args"]), - FileUris: expandStringArray(obj["file_uris"]), - JarFileUris: expandStringArray(obj["jar_file_uris"]), + ArchiveUris: tpgdclresource.ExpandStringArray(obj["archive_uris"]), + Args: tpgdclresource.ExpandStringArray(obj["args"]), + FileUris: tpgdclresource.ExpandStringArray(obj["file_uris"]), + JarFileUris: tpgdclresource.ExpandStringArray(obj["jar_file_uris"]), LoggingConfig: expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj["logging_config"]), MainClass: dcl.String(obj["main_class"].(string)), MainJarFileUri: dcl.String(obj["main_jar_file_uri"].(string)), - Properties: checkStringMap(obj["properties"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), } } @@ -2799,7 +2806,7 @@ func expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(o interface{}) *dat } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsSparkJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), } } @@ -2826,11 +2833,11 @@ func expandDataprocWorkflowTemplateJobsSparkRJob(o interface{}) *dataproc.Workfl obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsSparkRJob{ MainRFileUri: dcl.String(obj["main_r_file_uri"].(string)), - ArchiveUris: expandStringArray(obj["archive_uris"]), - Args: expandStringArray(obj["args"]), - FileUris: expandStringArray(obj["file_uris"]), + ArchiveUris: tpgdclresource.ExpandStringArray(obj["archive_uris"]), + Args: tpgdclresource.ExpandStringArray(obj["args"]), + FileUris: tpgdclresource.ExpandStringArray(obj["file_uris"]), LoggingConfig: expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj["logging_config"]), - Properties: checkStringMap(obj["properties"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), } } @@ -2861,7 +2868,7 @@ func expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(o interface{}) *da } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsSparkRJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), } } @@ -2887,12 +2894,12 @@ func expandDataprocWorkflowTemplateJobsSparkSqlJob(o interface{}) *dataproc.Work } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsSparkSqlJob{ - JarFileUris: expandStringArray(obj["jar_file_uris"]), + JarFileUris: tpgdclresource.ExpandStringArray(obj["jar_file_uris"]), LoggingConfig: expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj["logging_config"]), - Properties: checkStringMap(obj["properties"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), QueryFileUri: dcl.String(obj["query_file_uri"].(string)), QueryList: expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj["query_list"]), - ScriptVariables: checkStringMap(obj["script_variables"]), + ScriptVariables: tpgresource.CheckStringMap(obj["script_variables"]), } } @@ -2923,7 +2930,7 @@ func expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(o interface{}) * } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsSparkSqlJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + DriverLogLevels: tpgresource.CheckStringMap(obj["driver_log_levels"]), } } @@ -2949,7 +2956,7 @@ func expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(o interface{}) *data } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateJobsSparkSqlJobQueryList{ - Queries: expandStringArray(obj["queries"]), + Queries: tpgdclresource.ExpandStringArray(obj["queries"]), } } @@ -3003,7 +3010,7 @@ func expandDataprocWorkflowTemplatePlacementClusterSelector(o interface{}) *data } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplatePlacementClusterSelector{ - ClusterLabels: checkStringMap(obj["cluster_labels"]), + ClusterLabels: tpgresource.CheckStringMap(obj["cluster_labels"]), Zone: dcl.StringOrNil(obj["zone"].(string)), } } @@ -3033,7 +3040,7 @@ func expandDataprocWorkflowTemplatePlacementManagedCluster(o interface{}) *datap return &dataproc.WorkflowTemplatePlacementManagedCluster{ ClusterName: dcl.String(obj["cluster_name"].(string)), Config: expandDataprocWorkflowTemplatePlacementManagedClusterConfig(obj["config"]), - Labels: checkStringMap(obj["labels"]), + Labels: tpgresource.CheckStringMap(obj["labels"]), } } @@ -3191,16 +3198,16 @@ func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig{ InternalIPOnly: dcl.Bool(obj["internal_ip_only"].(bool)), - Metadata: checkStringMap(obj["metadata"]), + Metadata: tpgresource.CheckStringMap(obj["metadata"]), Network: dcl.String(obj["network"].(string)), NodeGroupAffinity: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinity(obj["node_group_affinity"]), PrivateIPv6GoogleAccess: dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(obj["private_ipv6_google_access"].(string)), ReservationAffinity: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity(obj["reservation_affinity"]), ServiceAccount: dcl.String(obj["service_account"].(string)), - ServiceAccountScopes: expandStringArray(obj["service_account_scopes"]), + ServiceAccountScopes: tpgdclresource.ExpandStringArray(obj["service_account_scopes"]), ShieldedInstanceConfig: expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig(obj["shielded_instance_config"]), Subnetwork: dcl.String(obj["subnetwork"].(string)), - Tags: expandStringArray(obj["tags"]), + Tags: tpgdclresource.ExpandStringArray(obj["tags"]), Zone: dcl.StringOrNil(obj["zone"].(string)), } } @@ -3266,7 +3273,7 @@ func expandDataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfig return &dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinity{ ConsumeReservationType: dataproc.WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(obj["consume_reservation_type"].(string)), Key: dcl.String(obj["key"].(string)), - Values: expandStringArray(obj["values"]), + Values: tpgdclresource.ExpandStringArray(obj["values"]), } } @@ -3776,7 +3783,7 @@ func expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfig(o return &dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig{ ImageVersion: dcl.String(obj["image_version"].(string)), OptionalComponents: expandDataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigOptionalComponentsArray(obj["optional_components"]), - Properties: checkStringMap(obj["properties"]), + Properties: tpgresource.CheckStringMap(obj["properties"]), } } @@ -3960,7 +3967,7 @@ func expandDataprocWorkflowTemplateParameters(o interface{}) *dataproc.WorkflowT obj := o.(map[string]interface{}) return &dataproc.WorkflowTemplateParameters{ - Fields: expandStringArray(obj["fields"]), + Fields: tpgdclresource.ExpandStringArray(obj["fields"]), Name: dcl.String(obj["name"].(string)), Description: dcl.String(obj["description"].(string)), Validation: expandDataprocWorkflowTemplateParametersValidation(obj["validation"]), @@ -4034,7 +4041,7 @@ func expandDataprocWorkflowTemplateParametersValidationRegex(o interface{}) *dat } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateParametersValidationRegex{ - Regexes: expandStringArray(obj["regexes"]), + Regexes: tpgdclresource.ExpandStringArray(obj["regexes"]), } } @@ -4060,7 +4067,7 @@ func expandDataprocWorkflowTemplateParametersValidationValues(o interface{}) *da } obj := objArr[0].(map[string]interface{}) return &dataproc.WorkflowTemplateParametersValidationValues{ - Values: expandStringArray(obj["values"]), + Values: tpgdclresource.ExpandStringArray(obj["values"]), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_workflow_template_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_workflow_template_sweeper.go new file mode 100644 index 0000000000..9fd40ff4c1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataproc/resource_dataproc_workflow_template_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package dataproc + +import ( + "context" + "log" + "testing" + + dataproc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataprocWorkflowTemplate", testSweepDataprocWorkflowTemplate) +} + +func testSweepDataprocWorkflowTemplate(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for DataprocWorkflowTemplate") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLDataprocClient(config, config.UserAgent, "", 0) + err = client.DeleteAllWorkflowTemplate(context.Background(), d["project"], d["location"], isDeletableDataprocWorkflowTemplate) + if err != nil { + return err + } + return nil +} + +func isDeletableDataprocWorkflowTemplate(r *dataproc.WorkflowTemplate) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/data_source_dataproc_metastore_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/data_source_dataproc_metastore_service.go new file mode 100644 index 0000000000..52099820f8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/data_source_dataproc_metastore_service.go @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataprocmetastore + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceDataprocMetastoreService() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceDataprocMetastoreService().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "service_id") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceDataprocMetastoreServiceRead, + Schema: dsSchema, + } +} + +func dataSourceDataprocMetastoreServiceRead(d *schema.ResourceData, meta interface{}) error { + id, err := tpgresource.ReplaceVars(d, meta.(*transport_tpg.Config), "projects/{{project}}/locations/{{location}}/services/{{service_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceDataprocMetastoreServiceRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/dataproc_metastore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/dataproc_metastore_operation.go new file mode 100644 index 0000000000..4b084a9e1d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/dataproc_metastore_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataprocmetastore + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type DataprocMetastoreOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *DataprocMetastoreOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.DataprocMetastoreBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createDataprocMetastoreWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DataprocMetastoreOperationWaiter, error) { + w := &DataprocMetastoreOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func DataprocMetastoreOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createDataprocMetastoreWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_metastore_service_diff_supress.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/dataproc_metastore_service_diff_supress.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_metastore_service_diff_supress.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/dataproc_metastore_service_diff_supress.go index 4c887bf43d..899bf423a7 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dataproc_metastore_service_diff_supress.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/dataproc_metastore_service_diff_supress.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dataprocmetastore import ( "strings" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/iam_dataproc_metastore_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/iam_dataproc_metastore_service.go new file mode 100644 index 0000000000..b03e40a8af --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/iam_dataproc_metastore_service.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataprocmetastore + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DataprocMetastoreServiceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "service_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DataprocMetastoreServiceIamUpdater struct { + project string + location string + serviceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DataprocMetastoreServiceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("service_id"); ok { + values["service_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreServiceIamUpdater{ + project: values["project"], + location: values["location"], + serviceId: values["service_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("service_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting service_id: %s", err) + } + + return u, nil +} + +func DataprocMetastoreServiceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DataprocMetastoreServiceIamUpdater{ + project: values["project"], + location: values["location"], + serviceId: values["service_id"], + d: d, + Config: config, + } + if err := d.Set("service_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting service_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DataprocMetastoreServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyServiceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DataprocMetastoreServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyServiceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocMetastoreServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DataprocMetastoreBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.serviceId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DataprocMetastoreServiceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/services/%s", u.project, u.location, u.serviceId) +} + +func (u *DataprocMetastoreServiceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataprocmetastore-service-%s", u.GetResourceId()) +} + +func (u *DataprocMetastoreServiceIamUpdater) DescribeResource() string { + return fmt.Sprintf("dataprocmetastore service %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service.go new file mode 100644 index 0000000000..261748d49f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service.go @@ -0,0 +1,1253 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataprocmetastore + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDataprocMetastoreService() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocMetastoreServiceCreate, + Read: resourceDataprocMetastoreServiceRead, + Update: resourceDataprocMetastoreServiceUpdate, + Delete: resourceDataprocMetastoreServiceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocMetastoreServiceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "service_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the metastore service. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), +and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between +3 and 63 characters.`, + }, + "database_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MYSQL", "SPANNER", ""}), + Description: `The database type that the Metastore service stores its data. Default value: "MYSQL" Possible values: ["MYSQL", "SPANNER"]`, + Default: "MYSQL", + }, + "encryption_config": { + Type: schema.TypeList, + Optional: true, + Description: `Information used to configure the Dataproc Metastore service to encrypt +customer data at rest.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The fully qualified customer provided Cloud KMS key name to use for customer data encryption. +Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)'`, + }, + }, + }, + }, + "hive_metastore_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration information specific to running Hive metastore software as the metastore service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Hive metastore schema version.`, + }, + "config_overrides": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: dataprocMetastoreServiceOverrideSuppress, + Description: `A mapping of Hive metastore configuration key-value pairs to apply to the Hive metastore (configured in hive-site.xml). +The mappings override system defaults (some keys cannot be overridden)`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "kerberos_config": { + Type: schema.TypeList, + Optional: true, + Description: `Information used to configure the Hive metastore service as a service principal in a Kerberos realm.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "keytab": { + Type: schema.TypeList, + Required: true, + Description: `A Kerberos keytab file that can be used to authenticate a service principal with a Kerberos Key Distribution Center (KDC).`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_secret": { + Type: schema.TypeString, + Required: true, + Description: `The relative resource name of a Secret Manager secret version, in the following form: + +"projects/{projectNumber}/secrets/{secret_id}/versions/{version_id}".`, + }, + }, + }, + }, + "krb5_config_gcs_uri": { + Type: schema.TypeString, + Required: true, + Description: `A Cloud Storage URI that specifies the path to a krb5.conf file. It is of the form gs://{bucket_name}/path/to/krb5.conf, although the file does not need to be named krb5.conf explicitly.`, + }, + "principal": { + Type: schema.TypeString, + Required: true, + Description: `A Kerberos principal that exists in the both the keytab the KDC to authenticate as. A typical principal is of the form "primary/instance@REALM", but there is no exact format.`, + }, + }, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the metastore service.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location where the metastore service should reside. +The default value is 'global'.`, + Default: "global", + }, + "maintenance_window": { + Type: schema.TypeList, + Optional: true, + Description: `The one hour maintenance window of the metastore service. +This specifies when the service can be restarted for maintenance purposes in UTC time. +Maintenance window is not needed for services with the 'SPANNER' database type.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_week": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + Description: `The day of week, when the window starts. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "hour_of_day": { + Type: schema.TypeInt, + Required: true, + Description: `The hour of day (0-23) when the window starts.`, + }, + }, + }, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: + +"projects/{projectNumber}/global/networks/{network_id}".`, + }, + "network_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The configuration specifying the network settings for the Dataproc Metastore service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consumers": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The consumer-side network configuration for the Dataproc Metastore instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnetwork": { + Type: schema.TypeString, + Required: true, + Description: `The subnetwork of the customer project from which an IP address is reserved and used as the Dataproc Metastore service's endpoint. +It is accessible to hosts in the subnet and to all hosts in a subnet in the same region and same network. +There must be at least one IP address available in the subnet's primary range. The subnet is specified in the following form: +'projects/{projectNumber}/regions/{region_id}/subnetworks/{subnetwork_id}`, + }, + "endpoint_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the endpoint used to access the metastore service.`, + }, + }, + }, + }, + }, + }, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The TCP port at which the metastore service is reached. Default: 9083.`, + }, + "release_channel": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"CANARY", "STABLE", ""}), + Description: `The release channel of the service. If unspecified, defaults to 'STABLE'. Default value: "STABLE" Possible values: ["CANARY", "STABLE"]`, + Default: "STABLE", + }, + "telemetry_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `The configuration specifying telemetry settings for the Dataproc Metastore service. If unspecified defaults to JSON.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_format": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"LEGACY", "JSON", ""}), + Description: `The output format of the Dataproc Metastore service's logs. Default value: "JSON" Possible values: ["LEGACY", "JSON"]`, + Default: "JSON", + }, + }, + }, + }, + "tier": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DEVELOPER", "ENTERPRISE", ""}), + Description: `The tier of the service. Possible values: ["DEVELOPER", "ENTERPRISE"]`, + }, + "artifact_gcs_uri": { + Type: schema.TypeString, + Computed: true, + Description: `A Cloud Storage URI (starting with gs://) that specifies where artifacts related to the metastore service are stored.`, + }, + "endpoint_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the endpoint used to access the metastore service.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The relative resource name of the metastore service.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the metastore service.`, + }, + "state_message": { + Type: schema.TypeString, + Computed: true, + Description: `Additional information about the current state of the metastore service, if available.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `The globally unique resource identifier of the metastore service.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataprocMetastoreServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandDataprocMetastoreServiceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + networkProp, err := expandDataprocMetastoreServiceNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + portProp, err := expandDataprocMetastoreServicePort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + tierProp, err := expandDataprocMetastoreServiceTier(d.Get("tier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { + obj["tier"] = tierProp + } + maintenanceWindowProp, err := expandDataprocMetastoreServiceMaintenanceWindow(d.Get("maintenance_window"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_window"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenanceWindowProp)) && (ok || !reflect.DeepEqual(v, maintenanceWindowProp)) { + obj["maintenanceWindow"] = maintenanceWindowProp + } + encryptionConfigProp, err := expandDataprocMetastoreServiceEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + hiveMetastoreConfigProp, err := expandDataprocMetastoreServiceHiveMetastoreConfig(d.Get("hive_metastore_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hive_metastore_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(hiveMetastoreConfigProp)) && (ok || !reflect.DeepEqual(v, hiveMetastoreConfigProp)) { + obj["hiveMetastoreConfig"] = hiveMetastoreConfigProp + } + networkConfigProp, err := expandDataprocMetastoreServiceNetworkConfig(d.Get("network_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkConfigProp)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { + obj["networkConfig"] = networkConfigProp + } + databaseTypeProp, err := expandDataprocMetastoreServiceDatabaseType(d.Get("database_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(databaseTypeProp)) && (ok || !reflect.DeepEqual(v, databaseTypeProp)) { + obj["databaseType"] = databaseTypeProp + } + releaseChannelProp, err := expandDataprocMetastoreServiceReleaseChannel(d.Get("release_channel"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("release_channel"); !tpgresource.IsEmptyValue(reflect.ValueOf(releaseChannelProp)) && (ok || !reflect.DeepEqual(v, releaseChannelProp)) { + obj["releaseChannel"] = releaseChannelProp + } + telemetryConfigProp, err := expandDataprocMetastoreServiceTelemetryConfig(d.Get("telemetry_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("telemetry_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(telemetryConfigProp)) && (ok || !reflect.DeepEqual(v, telemetryConfigProp)) { + obj["telemetryConfig"] = telemetryConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services?serviceId={{service_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Service: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Service: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{service_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DataprocMetastoreOperationWaitTime( + config, res, project, "Creating Service", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Service: %s", err) + } + + log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) + + return resourceDataprocMetastoreServiceRead(d, meta) +} + +func resourceDataprocMetastoreServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services/{{service_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DataprocMetastoreService %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + if err := d.Set("name", flattenDataprocMetastoreServiceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("labels", flattenDataprocMetastoreServiceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("network", flattenDataprocMetastoreServiceNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("endpoint_uri", flattenDataprocMetastoreServiceEndpointUri(res["endpointUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("port", flattenDataprocMetastoreServicePort(res["port"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("state", flattenDataprocMetastoreServiceState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("state_message", flattenDataprocMetastoreServiceStateMessage(res["stateMessage"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("artifact_gcs_uri", flattenDataprocMetastoreServiceArtifactGcsUri(res["artifactGcsUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("tier", flattenDataprocMetastoreServiceTier(res["tier"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("maintenance_window", flattenDataprocMetastoreServiceMaintenanceWindow(res["maintenanceWindow"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("encryption_config", flattenDataprocMetastoreServiceEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("hive_metastore_config", flattenDataprocMetastoreServiceHiveMetastoreConfig(res["hiveMetastoreConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("network_config", flattenDataprocMetastoreServiceNetworkConfig(res["networkConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("database_type", flattenDataprocMetastoreServiceDatabaseType(res["databaseType"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("release_channel", flattenDataprocMetastoreServiceReleaseChannel(res["releaseChannel"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("uid", flattenDataprocMetastoreServiceUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("telemetry_config", flattenDataprocMetastoreServiceTelemetryConfig(res["telemetryConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + return nil +} + +func resourceDataprocMetastoreServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandDataprocMetastoreServiceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + portProp, err := expandDataprocMetastoreServicePort(d.Get("port"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portProp)) { + obj["port"] = portProp + } + tierProp, err := expandDataprocMetastoreServiceTier(d.Get("tier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tierProp)) { + obj["tier"] = tierProp + } + maintenanceWindowProp, err := expandDataprocMetastoreServiceMaintenanceWindow(d.Get("maintenance_window"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_window"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenanceWindowProp)) { + obj["maintenanceWindow"] = maintenanceWindowProp + } + encryptionConfigProp, err := expandDataprocMetastoreServiceEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + hiveMetastoreConfigProp, err := expandDataprocMetastoreServiceHiveMetastoreConfig(d.Get("hive_metastore_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hive_metastore_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hiveMetastoreConfigProp)) { + obj["hiveMetastoreConfig"] = hiveMetastoreConfigProp + } + telemetryConfigProp, err := expandDataprocMetastoreServiceTelemetryConfig(d.Get("telemetry_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("telemetry_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, telemetryConfigProp)) { + obj["telemetryConfig"] = telemetryConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services/{{service_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("port") { + updateMask = append(updateMask, "port") + } + + if d.HasChange("tier") { + updateMask = append(updateMask, "tier") + } + + if d.HasChange("maintenance_window") { + updateMask = append(updateMask, "maintenanceWindow") + } + + if d.HasChange("encryption_config") { + updateMask = append(updateMask, "encryptionConfig") + } + + if d.HasChange("hive_metastore_config") { + updateMask = append(updateMask, "hiveMetastoreConfig") + } + + if d.HasChange("telemetry_config") { + updateMask = append(updateMask, "telemetryConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Service %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) + } + + err = DataprocMetastoreOperationWaitTime( + config, res, project, "Updating Service", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceDataprocMetastoreServiceRead(d, meta) +} + +func resourceDataprocMetastoreServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DataprocMetastoreBasePath}}projects/{{project}}/locations/{{location}}/services/{{service_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Service %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Service") + } + + err = DataprocMetastoreOperationWaitTime( + config, res, project, "Deleting Service", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) + return nil +} + +func resourceDataprocMetastoreServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/services/{{service_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataprocMetastoreServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceEndpointUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServicePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataprocMetastoreServiceState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceStateMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceArtifactGcsUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hour_of_day"] = + flattenDataprocMetastoreServiceMaintenanceWindowHourOfDay(original["hourOfDay"], d, config) + transformed["day_of_week"] = + flattenDataprocMetastoreServiceMaintenanceWindowDayOfWeek(original["dayOfWeek"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceMaintenanceWindowHourOfDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDataprocMetastoreServiceMaintenanceWindowDayOfWeek(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key"] = + flattenDataprocMetastoreServiceEncryptionConfigKmsKey(original["kmsKey"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceEncryptionConfigKmsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["version"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigVersion(original["version"], d, config) + transformed["config_overrides"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(original["configOverrides"], d, config) + transformed["kerberos_config"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(original["kerberosConfig"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceHiveMetastoreConfigVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["keytab"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab(original["keytab"], d, config) + transformed["principal"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(original["principal"], d, config) + transformed["krb5_config_gcs_uri"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(original["krb5ConfigGcsUri"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cloud_secret"] = + flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabCloudSecret(original["cloudSecret"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabCloudSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceNetworkConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["consumers"] = + flattenDataprocMetastoreServiceNetworkConfigConsumers(original["consumers"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceNetworkConfigConsumers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "endpoint_uri": flattenDataprocMetastoreServiceNetworkConfigConsumersEndpointUri(original["endpointUri"], d, config), + "subnetwork": flattenDataprocMetastoreServiceNetworkConfigConsumersSubnetwork(original["subnetwork"], d, config), + }) + } + return transformed +} +func flattenDataprocMetastoreServiceNetworkConfigConsumersEndpointUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceNetworkConfigConsumersSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceDatabaseType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceReleaseChannel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataprocMetastoreServiceTelemetryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["log_format"] = + flattenDataprocMetastoreServiceTelemetryConfigLogFormat(original["logFormat"], d, config) + return []interface{}{transformed} +} +func flattenDataprocMetastoreServiceTelemetryConfigLogFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDataprocMetastoreServiceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocMetastoreServiceNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServicePort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHourOfDay, err := expandDataprocMetastoreServiceMaintenanceWindowHourOfDay(original["hour_of_day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHourOfDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hourOfDay"] = transformedHourOfDay + } + + transformedDayOfWeek, err := expandDataprocMetastoreServiceMaintenanceWindowDayOfWeek(original["day_of_week"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeek"] = transformedDayOfWeek + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceMaintenanceWindowHourOfDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceMaintenanceWindowDayOfWeek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKey, err := expandDataprocMetastoreServiceEncryptionConfigKmsKey(original["kms_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKey"] = transformedKmsKey + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceEncryptionConfigKmsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersion, err := expandDataprocMetastoreServiceHiveMetastoreConfigVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedConfigOverrides, err := expandDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(original["config_overrides"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfigOverrides); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["configOverrides"] = transformedConfigOverrides + } + + transformedKerberosConfig, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(original["kerberos_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKerberosConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kerberosConfig"] = transformedKerberosConfig + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigConfigOverrides(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKeytab, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab(original["keytab"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKeytab); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["keytab"] = transformedKeytab + } + + transformedPrincipal, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(original["principal"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrincipal); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["principal"] = transformedPrincipal + } + + transformedKrb5ConfigGcsUri, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(original["krb5_config_gcs_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKrb5ConfigGcsUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["krb5ConfigGcsUri"] = transformedKrb5ConfigGcsUri + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytab(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCloudSecret, err := expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabCloudSecret(original["cloud_secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cloudSecret"] = transformedCloudSecret + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKeytabCloudSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigPrincipal(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceHiveMetastoreConfigKerberosConfigKrb5ConfigGcsUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceNetworkConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConsumers, err := expandDataprocMetastoreServiceNetworkConfigConsumers(original["consumers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConsumers); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["consumers"] = transformedConsumers + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceNetworkConfigConsumers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEndpointUri, err := expandDataprocMetastoreServiceNetworkConfigConsumersEndpointUri(original["endpoint_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndpointUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endpointUri"] = transformedEndpointUri + } + + transformedSubnetwork, err := expandDataprocMetastoreServiceNetworkConfigConsumersSubnetwork(original["subnetwork"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubnetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subnetwork"] = transformedSubnetwork + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDataprocMetastoreServiceNetworkConfigConsumersEndpointUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceNetworkConfigConsumersSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceDatabaseType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceReleaseChannel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocMetastoreServiceTelemetryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLogFormat, err := expandDataprocMetastoreServiceTelemetryConfigLogFormat(original["log_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLogFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["logFormat"] = transformedLogFormat + } + + return transformed, nil +} + +func expandDataprocMetastoreServiceTelemetryConfigLogFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service_sweeper.go new file mode 100644 index 0000000000..3e0e9d699c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore/resource_dataproc_metastore_service_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dataprocmetastore + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DataprocMetastoreService", testSweepDataprocMetastoreService) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDataprocMetastoreService(region string) error { + resourceName := "DataprocMetastoreService" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://metastore.googleapis.com/v1/projects/{{project}}/locations/{{location}}/services", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["services"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://metastore.googleapis.com/v1/projects/{{project}}/locations/{{location}}/services/{{service_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/datastore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/datastore_operation.go new file mode 100644 index 0000000000..d619703fda --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/datastore_operation.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datastore + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type DatastoreOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *DatastoreOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.DatastoreBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.DatastoreIndex409Contention}, + }) +} + +func createDatastoreWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DatastoreOperationWaiter, error) { + w := &DatastoreOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func DatastoreOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createDatastoreWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func DatastoreOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createDatastoreWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index.go new file mode 100644 index 0000000000..59a2283dc8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastore/resource_datastore_index.go @@ -0,0 +1,412 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datastore + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDatastoreIndex() *schema.Resource { + return &schema.Resource{ + Create: resourceDatastoreIndexCreate, + Read: resourceDatastoreIndexRead, + Delete: resourceDatastoreIndexDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDatastoreIndexImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "kind": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The entity kind which the index applies to.`, + }, + "ancestor": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NONE", "ALL_ANCESTORS", ""}), + Description: `Policy for including ancestors in the index. Default value: "NONE" Possible values: ["NONE", "ALL_ANCESTORS"]`, + Default: "NONE", + }, + "properties": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `An ordered list of properties to index on.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "direction": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ASCENDING", "DESCENDING"}), + Description: `The direction the index should optimize for sorting. Possible values: ["ASCENDING", "DESCENDING"]`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The property name to index.`, + }, + }, + }, + }, + "index_id": { + Type: schema.TypeString, + Computed: true, + Description: `The index id.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDatastoreIndexCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + kindProp, err := expandDatastoreIndexKind(d.Get("kind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kind"); !tpgresource.IsEmptyValue(reflect.ValueOf(kindProp)) && (ok || !reflect.DeepEqual(v, kindProp)) { + obj["kind"] = kindProp + } + ancestorProp, err := expandDatastoreIndexAncestor(d.Get("ancestor"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ancestor"); !tpgresource.IsEmptyValue(reflect.ValueOf(ancestorProp)) && (ok || !reflect.DeepEqual(v, ancestorProp)) { + obj["ancestor"] = ancestorProp + } + propertiesProp, err := expandDatastoreIndexProperties(d.Get("properties"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("properties"); !tpgresource.IsEmptyValue(reflect.ValueOf(propertiesProp)) && (ok || !reflect.DeepEqual(v, propertiesProp)) { + obj["properties"] = propertiesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Index: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.DatastoreIndex409Contention}, + }) + if err != nil { + return fmt.Errorf("Error creating Index: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = DatastoreOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Index", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Index: %s", err) + } + + if err := d.Set("index_id", flattenDatastoreIndexIndexId(opRes["indexId"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) + + return resourceDatastoreIndexRead(d, meta) +} + +func resourceDatastoreIndexRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes/{{index_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.DatastoreIndex409Contention}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DatastoreIndex %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + + if err := d.Set("index_id", flattenDatastoreIndexIndexId(res["indexId"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("kind", flattenDatastoreIndexKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("ancestor", flattenDatastoreIndexAncestor(res["ancestor"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("properties", flattenDatastoreIndexProperties(res["properties"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + + return nil +} + +func resourceDatastoreIndexDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes/{{index_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Index %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.DatastoreIndex409Contention}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Index") + } + + err = DatastoreOperationWaitTime( + config, res, project, "Deleting Index", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) + return nil +} + +func resourceDatastoreIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/indexes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDatastoreIndexIndexId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastoreIndexKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastoreIndexAncestor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastoreIndexProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDatastoreIndexPropertiesName(original["name"], d, config), + "direction": flattenDatastoreIndexPropertiesDirection(original["direction"], d, config), + }) + } + return transformed +} +func flattenDatastoreIndexPropertiesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastoreIndexPropertiesDirection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDatastoreIndexKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastoreIndexAncestor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastoreIndexProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDatastoreIndexPropertiesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedDirection, err := expandDatastoreIndexPropertiesDirection(original["direction"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDirection); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["direction"] = transformedDirection + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDatastoreIndexPropertiesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastoreIndexPropertiesDirection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/data_source_google_datastream_static_ips.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/data_source_google_datastream_static_ips.go new file mode 100644 index 0000000000..cf84d35212 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/data_source_google_datastream_static_ips.go @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package datastream + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleDatastreamStaticIps() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleDatastreamStaticIpsRead, + + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + }, + "location": { + Type: schema.TypeString, + Required: true, + }, + "static_ips": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func dataSourceGoogleDatastreamStaticIpsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}:fetchStaticIps") + if err != nil { + return err + } + + staticIps, err := tpgresource.PaginatedListRequest(project, url, userAgent, config, flattenStaticIpsList) + if err != nil { + return fmt.Errorf("Error retrieving monitoring uptime check ips: %s", err) + } + + if err := d.Set("static_ips", staticIps); err != nil { + return fmt.Errorf("Error retrieving monitoring uptime check ips: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}:fetchStaticIps") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return nil +} + +func flattenStaticIpsList(resp map[string]interface{}) []interface{} { + ipList := resp["staticIps"].([]interface{}) + staticIps := make([]interface{}, len(ipList)) + for i, u := range ipList { + staticIps[i] = u + } + return staticIps +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/datastream_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/datastream_operation.go new file mode 100644 index 0000000000..f3475d0e87 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/datastream_operation.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package datastream + +import ( + "bytes" + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + datastream "google.golang.org/api/datastream/v1" +) + +type DatastreamOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + Op datastream.Operation + tpgresource.CommonOperationWaiter +} + +func (w *DatastreamOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.DatastreamBasePath, w.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func (w *DatastreamOperationWaiter) Error() error { + if w != nil && w.Op.Error != nil { + return &DatastreamOperationError{Op: w.Op} + } + return nil +} + +func (w *DatastreamOperationWaiter) SetOp(op interface{}) error { + w.CommonOperationWaiter.SetOp(op) + if err := tpgresource.Convert(op, &w.Op); err != nil { + return err + } + return nil +} + +func createDatastreamWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*DatastreamOperationWaiter, error) { + w := &DatastreamOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func DatastreamOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createDatastreamWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.Op.Response), response) +} + +func DatastreamOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createDatastreamWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} + +// DatastreamOperationError wraps datastream.Status and implements the +// error interface so it can be returned. +type DatastreamOperationError struct { + Op datastream.Operation +} + +func (e DatastreamOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Op.Error.Details { + buf.Write(err) + buf.WriteString("\n") + } + if validations := e.extractFailedValidationResult(); validations != nil { + buf.Write(validations) + buf.WriteString("\n") + } + + return buf.String() +} + +// extractFailedValidationResult extracts the internal failed validations +// if there are any. +func (e DatastreamOperationError) extractFailedValidationResult() []byte { + var metadata datastream.OperationMetadata + data, err := e.Op.Metadata.MarshalJSON() + if err != nil { + return nil + } + err = json.Unmarshal(data, &metadata) + if err != nil { + return nil + } + if metadata.ValidationResult == nil { + return nil + } + var res []byte + for _, v := range metadata.ValidationResult.Validations { + if v.State == "FAILED" { + data, err := v.MarshalJSON() + if err != nil { + return nil + } + res = append(res, data...) + res = append(res, []byte("\n")...) + } + } + return res +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_connection_profile.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_connection_profile.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile.go index fa29c119bd..ecf1dd8c9e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_connection_profile.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package datastream import ( "fmt" @@ -22,6 +25,9 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceDatastreamConnectionProfile() *schema.Resource { @@ -340,8 +346,8 @@ If this field is used then the 'client_certificate' and the } func resourceDatastreamConnectionProfileCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -350,31 +356,31 @@ func resourceDatastreamConnectionProfileCreate(d *schema.ResourceData, meta inte labelsProp, err := expandDatastreamConnectionProfileLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } displayNameProp, err := expandDatastreamConnectionProfileDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } oracleProfileProp, err := expandDatastreamConnectionProfileOracleProfile(d.Get("oracle_profile"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("oracle_profile"); !isEmptyValue(reflect.ValueOf(oracleProfileProp)) && (ok || !reflect.DeepEqual(v, oracleProfileProp)) { + } else if v, ok := d.GetOkExists("oracle_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(oracleProfileProp)) && (ok || !reflect.DeepEqual(v, oracleProfileProp)) { obj["oracleProfile"] = oracleProfileProp } gcsProfileProp, err := expandDatastreamConnectionProfileGcsProfile(d.Get("gcs_profile"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("gcs_profile"); !isEmptyValue(reflect.ValueOf(gcsProfileProp)) && (ok || !reflect.DeepEqual(v, gcsProfileProp)) { + } else if v, ok := d.GetOkExists("gcs_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(gcsProfileProp)) && (ok || !reflect.DeepEqual(v, gcsProfileProp)) { obj["gcsProfile"] = gcsProfileProp } mysqlProfileProp, err := expandDatastreamConnectionProfileMysqlProfile(d.Get("mysql_profile"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("mysql_profile"); !isEmptyValue(reflect.ValueOf(mysqlProfileProp)) && (ok || !reflect.DeepEqual(v, mysqlProfileProp)) { + } else if v, ok := d.GetOkExists("mysql_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(mysqlProfileProp)) && (ok || !reflect.DeepEqual(v, mysqlProfileProp)) { obj["mysqlProfile"] = mysqlProfileProp } bigqueryProfileProp, err := expandDatastreamConnectionProfileBigqueryProfile(d.Get("bigquery_profile"), d, config) @@ -386,23 +392,23 @@ func resourceDatastreamConnectionProfileCreate(d *schema.ResourceData, meta inte postgresqlProfileProp, err := expandDatastreamConnectionProfilePostgresqlProfile(d.Get("postgresql_profile"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("postgresql_profile"); !isEmptyValue(reflect.ValueOf(postgresqlProfileProp)) && (ok || !reflect.DeepEqual(v, postgresqlProfileProp)) { + } else if v, ok := d.GetOkExists("postgresql_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(postgresqlProfileProp)) && (ok || !reflect.DeepEqual(v, postgresqlProfileProp)) { obj["postgresqlProfile"] = postgresqlProfileProp } forwardSshConnectivityProp, err := expandDatastreamConnectionProfileForwardSshConnectivity(d.Get("forward_ssh_connectivity"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("forward_ssh_connectivity"); !isEmptyValue(reflect.ValueOf(forwardSshConnectivityProp)) && (ok || !reflect.DeepEqual(v, forwardSshConnectivityProp)) { + } else if v, ok := d.GetOkExists("forward_ssh_connectivity"); !tpgresource.IsEmptyValue(reflect.ValueOf(forwardSshConnectivityProp)) && (ok || !reflect.DeepEqual(v, forwardSshConnectivityProp)) { obj["forwardSshConnectivity"] = forwardSshConnectivityProp } privateConnectivityProp, err := expandDatastreamConnectionProfilePrivateConnectivity(d.Get("private_connectivity"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("private_connectivity"); !isEmptyValue(reflect.ValueOf(privateConnectivityProp)) && (ok || !reflect.DeepEqual(v, privateConnectivityProp)) { + } else if v, ok := d.GetOkExists("private_connectivity"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateConnectivityProp)) && (ok || !reflect.DeepEqual(v, privateConnectivityProp)) { obj["privateConnectivity"] = privateConnectivityProp } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles?connectionProfileId={{connection_profile_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles?connectionProfileId={{connection_profile_id}}") if err != nil { return err } @@ -410,24 +416,32 @@ func resourceDatastreamConnectionProfileCreate(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Creating new ConnectionProfile: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ConnectionProfile: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating ConnectionProfile: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -451,7 +465,7 @@ func resourceDatastreamConnectionProfileCreate(d *schema.ResourceData, meta inte } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -463,33 +477,39 @@ func resourceDatastreamConnectionProfileCreate(d *schema.ResourceData, meta inte } func resourceDatastreamConnectionProfileRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ConnectionProfile: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DatastreamConnectionProfile %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DatastreamConnectionProfile %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -531,15 +551,15 @@ func resourceDatastreamConnectionProfileRead(d *schema.ResourceData, meta interf } func resourceDatastreamConnectionProfileUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ConnectionProfile: %s", err) } @@ -549,31 +569,31 @@ func resourceDatastreamConnectionProfileUpdate(d *schema.ResourceData, meta inte labelsProp, err := expandDatastreamConnectionProfileLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } displayNameProp, err := expandDatastreamConnectionProfileDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } oracleProfileProp, err := expandDatastreamConnectionProfileOracleProfile(d.Get("oracle_profile"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("oracle_profile"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oracleProfileProp)) { + } else if v, ok := d.GetOkExists("oracle_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oracleProfileProp)) { obj["oracleProfile"] = oracleProfileProp } gcsProfileProp, err := expandDatastreamConnectionProfileGcsProfile(d.Get("gcs_profile"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("gcs_profile"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gcsProfileProp)) { + } else if v, ok := d.GetOkExists("gcs_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gcsProfileProp)) { obj["gcsProfile"] = gcsProfileProp } mysqlProfileProp, err := expandDatastreamConnectionProfileMysqlProfile(d.Get("mysql_profile"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("mysql_profile"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mysqlProfileProp)) { + } else if v, ok := d.GetOkExists("mysql_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mysqlProfileProp)) { obj["mysqlProfile"] = mysqlProfileProp } bigqueryProfileProp, err := expandDatastreamConnectionProfileBigqueryProfile(d.Get("bigquery_profile"), d, config) @@ -585,23 +605,23 @@ func resourceDatastreamConnectionProfileUpdate(d *schema.ResourceData, meta inte postgresqlProfileProp, err := expandDatastreamConnectionProfilePostgresqlProfile(d.Get("postgresql_profile"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("postgresql_profile"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, postgresqlProfileProp)) { + } else if v, ok := d.GetOkExists("postgresql_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, postgresqlProfileProp)) { obj["postgresqlProfile"] = postgresqlProfileProp } forwardSshConnectivityProp, err := expandDatastreamConnectionProfileForwardSshConnectivity(d.Get("forward_ssh_connectivity"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("forward_ssh_connectivity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, forwardSshConnectivityProp)) { + } else if v, ok := d.GetOkExists("forward_ssh_connectivity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, forwardSshConnectivityProp)) { obj["forwardSshConnectivity"] = forwardSshConnectivityProp } privateConnectivityProp, err := expandDatastreamConnectionProfilePrivateConnectivity(d.Get("private_connectivity"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("private_connectivity"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateConnectivityProp)) { + } else if v, ok := d.GetOkExists("private_connectivity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateConnectivityProp)) { obj["privateConnectivity"] = privateConnectivityProp } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") if err != nil { return err } @@ -644,19 +664,27 @@ func resourceDatastreamConnectionProfileUpdate(d *schema.ResourceData, meta inte if d.HasChange("private_connectivity") { updateMask = append(updateMask, "privateConnectivity") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating ConnectionProfile %q: %s", d.Id(), err) @@ -676,21 +704,21 @@ func resourceDatastreamConnectionProfileUpdate(d *schema.ResourceData, meta inte } func resourceDatastreamConnectionProfileDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ConnectionProfile: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") if err != nil { return err } @@ -699,13 +727,21 @@ func resourceDatastreamConnectionProfileDelete(d *schema.ResourceData, meta inte log.Printf("[DEBUG] Deleting ConnectionProfile %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "ConnectionProfile") + return transport_tpg.HandleNotFoundError(err, d, "ConnectionProfile") } err = DatastreamOperationWaitTime( @@ -721,8 +757,8 @@ func resourceDatastreamConnectionProfileDelete(d *schema.ResourceData, meta inte } func resourceDatastreamConnectionProfileImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/connectionProfiles/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -731,7 +767,7 @@ func resourceDatastreamConnectionProfileImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -740,19 +776,19 @@ func resourceDatastreamConnectionProfileImport(d *schema.ResourceData, meta inte return []*schema.ResourceData{d}, nil } -func flattenDatastreamConnectionProfileName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileOracleProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileOracleProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -775,14 +811,14 @@ func flattenDatastreamConnectionProfileOracleProfile(v interface{}, d *schema.Re flattenDatastreamConnectionProfileOracleProfileConnectionAttributes(original["connectionAttributes"], d, config) return []interface{}{transformed} } -func flattenDatastreamConnectionProfileOracleProfileHostname(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileOracleProfileHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileOracleProfilePort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileOracleProfilePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -796,23 +832,23 @@ func flattenDatastreamConnectionProfileOracleProfilePort(v interface{}, d *schem return v // let terraform core handle it otherwise } -func flattenDatastreamConnectionProfileOracleProfileUsername(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileOracleProfileUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileOracleProfilePassword(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileOracleProfilePassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle_profile.0.password") } -func flattenDatastreamConnectionProfileOracleProfileDatabaseService(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileOracleProfileDatabaseService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileOracleProfileConnectionAttributes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileOracleProfileConnectionAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileGcsProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileGcsProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -827,15 +863,15 @@ func flattenDatastreamConnectionProfileGcsProfile(v interface{}, d *schema.Resou flattenDatastreamConnectionProfileGcsProfileRootPath(original["rootPath"], d, config) return []interface{}{transformed} } -func flattenDatastreamConnectionProfileGcsProfileBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileGcsProfileBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileGcsProfileRootPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileGcsProfileRootPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileMysqlProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -856,14 +892,14 @@ func flattenDatastreamConnectionProfileMysqlProfile(v interface{}, d *schema.Res flattenDatastreamConnectionProfileMysqlProfileSslConfig(original["sslConfig"], d, config) return []interface{}{transformed} } -func flattenDatastreamConnectionProfileMysqlProfileHostname(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileMysqlProfilePort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfilePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -877,15 +913,15 @@ func flattenDatastreamConnectionProfileMysqlProfilePort(v interface{}, d *schema return v // let terraform core handle it otherwise } -func flattenDatastreamConnectionProfileMysqlProfileUsername(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileMysqlProfilePassword(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfilePassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.password") } -func flattenDatastreamConnectionProfileMysqlProfileSslConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileSslConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -909,31 +945,31 @@ func flattenDatastreamConnectionProfileMysqlProfileSslConfig(v interface{}, d *s return []interface{}{transformed} } -func flattenDatastreamConnectionProfileMysqlProfileSslConfigClientKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileSslConfigClientKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.client_key") } -func flattenDatastreamConnectionProfileMysqlProfileSslConfigClientKeySet(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileSslConfigClientKeySet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileMysqlProfileSslConfigClientCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileSslConfigClientCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.client_certificate") } -func flattenDatastreamConnectionProfileMysqlProfileSslConfigClientCertificateSet(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileSslConfigClientCertificateSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileMysqlProfileSslConfigCaCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileSslConfigCaCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.ca_certificate") } -func flattenDatastreamConnectionProfileMysqlProfileSslConfigCaCertificateSet(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileMysqlProfileSslConfigCaCertificateSet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileBigqueryProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileBigqueryProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -941,7 +977,7 @@ func flattenDatastreamConnectionProfileBigqueryProfile(v interface{}, d *schema. return []interface{}{transformed} } -func flattenDatastreamConnectionProfilePostgresqlProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfilePostgresqlProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -962,14 +998,14 @@ func flattenDatastreamConnectionProfilePostgresqlProfile(v interface{}, d *schem flattenDatastreamConnectionProfilePostgresqlProfileDatabase(original["database"], d, config) return []interface{}{transformed} } -func flattenDatastreamConnectionProfilePostgresqlProfileHostname(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfilePostgresqlProfileHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfilePostgresqlProfilePort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfilePostgresqlProfilePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -983,19 +1019,19 @@ func flattenDatastreamConnectionProfilePostgresqlProfilePort(v interface{}, d *s return v // let terraform core handle it otherwise } -func flattenDatastreamConnectionProfilePostgresqlProfileUsername(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfilePostgresqlProfileUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfilePostgresqlProfilePassword(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfilePostgresqlProfilePassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("postgresql_profile.0.password") } -func flattenDatastreamConnectionProfilePostgresqlProfileDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfilePostgresqlProfileDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileForwardSshConnectivity(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileForwardSshConnectivity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1016,18 +1052,18 @@ func flattenDatastreamConnectionProfileForwardSshConnectivity(v interface{}, d * flattenDatastreamConnectionProfileForwardSshConnectivityPrivateKey(original["privateKey"], d, config) return []interface{}{transformed} } -func flattenDatastreamConnectionProfileForwardSshConnectivityHostname(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileForwardSshConnectivityHostname(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileForwardSshConnectivityUsername(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileForwardSshConnectivityUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamConnectionProfileForwardSshConnectivityPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileForwardSshConnectivityPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1041,15 +1077,15 @@ func flattenDatastreamConnectionProfileForwardSshConnectivityPort(v interface{}, return v // let terraform core handle it otherwise } -func flattenDatastreamConnectionProfileForwardSshConnectivityPassword(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileForwardSshConnectivityPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("forward_ssh_connectivity.0.password") } -func flattenDatastreamConnectionProfileForwardSshConnectivityPrivateKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfileForwardSshConnectivityPrivateKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("forward_ssh_connectivity.0.private_key") } -func flattenDatastreamConnectionProfilePrivateConnectivity(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfilePrivateConnectivity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1062,11 +1098,11 @@ func flattenDatastreamConnectionProfilePrivateConnectivity(v interface{}, d *sch flattenDatastreamConnectionProfilePrivateConnectivityPrivateConnection(original["privateConnection"], d, config) return []interface{}{transformed} } -func flattenDatastreamConnectionProfilePrivateConnectivityPrivateConnection(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamConnectionProfilePrivateConnectivityPrivateConnection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandDatastreamConnectionProfileLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandDatastreamConnectionProfileLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1077,11 +1113,11 @@ func expandDatastreamConnectionProfileLabels(v interface{}, d TerraformResourceD return m, nil } -func expandDatastreamConnectionProfileDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileOracleProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileOracleProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1093,69 +1129,69 @@ func expandDatastreamConnectionProfileOracleProfile(v interface{}, d TerraformRe transformedHostname, err := expandDatastreamConnectionProfileOracleProfileHostname(original["hostname"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostname"] = transformedHostname } transformedPort, err := expandDatastreamConnectionProfileOracleProfilePort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedUsername, err := expandDatastreamConnectionProfileOracleProfileUsername(original["username"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["username"] = transformedUsername } transformedPassword, err := expandDatastreamConnectionProfileOracleProfilePassword(original["password"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["password"] = transformedPassword } transformedDatabaseService, err := expandDatastreamConnectionProfileOracleProfileDatabaseService(original["database_service"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatabaseService); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatabaseService); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["databaseService"] = transformedDatabaseService } transformedConnectionAttributes, err := expandDatastreamConnectionProfileOracleProfileConnectionAttributes(original["connection_attributes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConnectionAttributes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConnectionAttributes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["connectionAttributes"] = transformedConnectionAttributes } return transformed, nil } -func expandDatastreamConnectionProfileOracleProfileHostname(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileOracleProfileHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileOracleProfilePort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileOracleProfilePort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileOracleProfileUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileOracleProfileUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileOracleProfilePassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileOracleProfilePassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileOracleProfileDatabaseService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileOracleProfileDatabaseService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileOracleProfileConnectionAttributes(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandDatastreamConnectionProfileOracleProfileConnectionAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1166,7 +1202,7 @@ func expandDatastreamConnectionProfileOracleProfileConnectionAttributes(v interf return m, nil } -func expandDatastreamConnectionProfileGcsProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileGcsProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1178,29 +1214,29 @@ func expandDatastreamConnectionProfileGcsProfile(v interface{}, d TerraformResou transformedBucket, err := expandDatastreamConnectionProfileGcsProfileBucket(original["bucket"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["bucket"] = transformedBucket } transformedRootPath, err := expandDatastreamConnectionProfileGcsProfileRootPath(original["root_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRootPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRootPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rootPath"] = transformedRootPath } return transformed, nil } -func expandDatastreamConnectionProfileGcsProfileBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileGcsProfileBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileGcsProfileRootPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileGcsProfileRootPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1212,58 +1248,58 @@ func expandDatastreamConnectionProfileMysqlProfile(v interface{}, d TerraformRes transformedHostname, err := expandDatastreamConnectionProfileMysqlProfileHostname(original["hostname"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostname"] = transformedHostname } transformedPort, err := expandDatastreamConnectionProfileMysqlProfilePort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedUsername, err := expandDatastreamConnectionProfileMysqlProfileUsername(original["username"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["username"] = transformedUsername } transformedPassword, err := expandDatastreamConnectionProfileMysqlProfilePassword(original["password"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["password"] = transformedPassword } transformedSslConfig, err := expandDatastreamConnectionProfileMysqlProfileSslConfig(original["ssl_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSslConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSslConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sslConfig"] = transformedSslConfig } return transformed, nil } -func expandDatastreamConnectionProfileMysqlProfileHostname(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfilePort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfilePort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfileUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfilePassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfilePassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfileSslConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileSslConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1275,73 +1311,73 @@ func expandDatastreamConnectionProfileMysqlProfileSslConfig(v interface{}, d Ter transformedClientKey, err := expandDatastreamConnectionProfileMysqlProfileSslConfigClientKey(original["client_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClientKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClientKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["clientKey"] = transformedClientKey } transformedClientKeySet, err := expandDatastreamConnectionProfileMysqlProfileSslConfigClientKeySet(original["client_key_set"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClientKeySet); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClientKeySet); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["clientKeySet"] = transformedClientKeySet } transformedClientCertificate, err := expandDatastreamConnectionProfileMysqlProfileSslConfigClientCertificate(original["client_certificate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClientCertificate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClientCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["clientCertificate"] = transformedClientCertificate } transformedClientCertificateSet, err := expandDatastreamConnectionProfileMysqlProfileSslConfigClientCertificateSet(original["client_certificate_set"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClientCertificateSet); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClientCertificateSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["clientCertificateSet"] = transformedClientCertificateSet } transformedCaCertificate, err := expandDatastreamConnectionProfileMysqlProfileSslConfigCaCertificate(original["ca_certificate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCaCertificate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCaCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["caCertificate"] = transformedCaCertificate } transformedCaCertificateSet, err := expandDatastreamConnectionProfileMysqlProfileSslConfigCaCertificateSet(original["ca_certificate_set"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCaCertificateSet); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCaCertificateSet); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["caCertificateSet"] = transformedCaCertificateSet } return transformed, nil } -func expandDatastreamConnectionProfileMysqlProfileSslConfigClientKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileSslConfigClientKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfileSslConfigClientKeySet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileSslConfigClientKeySet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfileSslConfigClientCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileSslConfigClientCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfileSslConfigClientCertificateSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileSslConfigClientCertificateSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfileSslConfigCaCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileSslConfigCaCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileMysqlProfileSslConfigCaCertificateSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileMysqlProfileSslConfigCaCertificateSet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileBigqueryProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileBigqueryProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1356,7 +1392,7 @@ func expandDatastreamConnectionProfileBigqueryProfile(v interface{}, d Terraform return transformed, nil } -func expandDatastreamConnectionProfilePostgresqlProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfilePostgresqlProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1368,62 +1404,62 @@ func expandDatastreamConnectionProfilePostgresqlProfile(v interface{}, d Terrafo transformedHostname, err := expandDatastreamConnectionProfilePostgresqlProfileHostname(original["hostname"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostname"] = transformedHostname } transformedPort, err := expandDatastreamConnectionProfilePostgresqlProfilePort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedUsername, err := expandDatastreamConnectionProfilePostgresqlProfileUsername(original["username"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["username"] = transformedUsername } transformedPassword, err := expandDatastreamConnectionProfilePostgresqlProfilePassword(original["password"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["password"] = transformedPassword } transformedDatabase, err := expandDatastreamConnectionProfilePostgresqlProfileDatabase(original["database"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["database"] = transformedDatabase } return transformed, nil } -func expandDatastreamConnectionProfilePostgresqlProfileHostname(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfilePostgresqlProfileHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfilePostgresqlProfilePort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfilePostgresqlProfilePort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfilePostgresqlProfileUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfilePostgresqlProfileUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfilePostgresqlProfilePassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfilePostgresqlProfilePassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfilePostgresqlProfileDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfilePostgresqlProfileDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileForwardSshConnectivity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileForwardSshConnectivity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1435,62 +1471,62 @@ func expandDatastreamConnectionProfileForwardSshConnectivity(v interface{}, d Te transformedHostname, err := expandDatastreamConnectionProfileForwardSshConnectivityHostname(original["hostname"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostname); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostname"] = transformedHostname } transformedUsername, err := expandDatastreamConnectionProfileForwardSshConnectivityUsername(original["username"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["username"] = transformedUsername } transformedPort, err := expandDatastreamConnectionProfileForwardSshConnectivityPort(original["port"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["port"] = transformedPort } transformedPassword, err := expandDatastreamConnectionProfileForwardSshConnectivityPassword(original["password"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["password"] = transformedPassword } transformedPrivateKey, err := expandDatastreamConnectionProfileForwardSshConnectivityPrivateKey(original["private_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrivateKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrivateKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["privateKey"] = transformedPrivateKey } return transformed, nil } -func expandDatastreamConnectionProfileForwardSshConnectivityHostname(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileForwardSshConnectivityHostname(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileForwardSshConnectivityUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileForwardSshConnectivityUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileForwardSshConnectivityPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileForwardSshConnectivityPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileForwardSshConnectivityPassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileForwardSshConnectivityPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfileForwardSshConnectivityPrivateKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfileForwardSshConnectivityPrivateKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamConnectionProfilePrivateConnectivity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfilePrivateConnectivity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1502,13 +1538,13 @@ func expandDatastreamConnectionProfilePrivateConnectivity(v interface{}, d Terra transformedPrivateConnection, err := expandDatastreamConnectionProfilePrivateConnectivityPrivateConnection(original["private_connection"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrivateConnection); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrivateConnection); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["privateConnection"] = transformedPrivateConnection } return transformed, nil } -func expandDatastreamConnectionProfilePrivateConnectivityPrivateConnection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamConnectionProfilePrivateConnectivityPrivateConnection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile_sweeper.go new file mode 100644 index 0000000000..eb3a40f3b4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_connection_profile_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datastream + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DatastreamConnectionProfile", testSweepDatastreamConnectionProfile) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDatastreamConnectionProfile(region string) error { + resourceName := "DatastreamConnectionProfile" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datastream.googleapis.com/v1/projects/{{project}}/locations/{{location}}/connectionProfiles", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["connectionProfiles"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datastream.googleapis.com/v1/projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection.go new file mode 100644 index 0000000000..fc1accc015 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection.go @@ -0,0 +1,520 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datastream + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func extractError(d *schema.ResourceData) error { + // Casts are not safe since the logic that populate it is type deterministic. + error := d.Get("error").([]interface{})[0].(map[string]interface{}) + message := error["message"].(string) + details := error["details"].(map[string]interface{}) + detailsJSON, _ := json.Marshal(details) + return fmt.Errorf("Failed to create PrivateConnection. %s details = %s", message, string(detailsJSON)) +} + +// waitForPrivateConnectionReady waits for a private connection state to become +// CREATED, if the state is FAILED propegate the error to the user. +func waitForPrivateConnectionReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { + return resource.Retry(timeout, func() *resource.RetryError { + if err := resourceDatastreamPrivateConnectionRead(d, config); err != nil { + return resource.NonRetryableError(err) + } + + name := d.Get("name").(string) + state := d.Get("state").(string) + if state == "CREATING" { + return resource.RetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) + } else if state == "CREATED" { + log.Printf("[DEBUG] PrivateConnection %q has state %q.", name, state) + return nil + } else if state == "FAILED" { + return resource.NonRetryableError(extractError(d)) + } else { + return resource.NonRetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) + } + }) +} + +func ResourceDatastreamPrivateConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceDatastreamPrivateConnectionCreate, + Read: resourceDatastreamPrivateConnectionRead, + Delete: resourceDatastreamPrivateConnectionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDatastreamPrivateConnectionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Display name.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location this private connection is located in.`, + }, + "private_connection_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The private connectivity identifier.`, + }, + "vpc_peering_config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The VPC Peering configuration is used to create VPC peering +between Datastream and the consumer's VPC.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A free subnet for peering. (CIDR of /29)`, + }, + "vpc": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Fully qualified name of the VPC that Datastream will peer to. +Format: projects/{project}/global/{networks}/{name}`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Labels.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "error": { + Type: schema.TypeList, + Computed: true, + Description: `The PrivateConnection error in case of failure.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "details": { + Type: schema.TypeMap, + Optional: true, + Description: `A list of messages that carry the error details.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "message": { + Type: schema.TypeString, + Optional: true, + Description: `A message containing more information about the error that occurred.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource's name.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the PrivateConnection.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDatastreamPrivateConnectionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandDatastreamPrivateConnectionLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + displayNameProp, err := expandDatastreamPrivateConnectionDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + vpcPeeringConfigProp, err := expandDatastreamPrivateConnectionVpcPeeringConfig(d.Get("vpc_peering_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vpc_peering_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpcPeeringConfigProp)) && (ok || !reflect.DeepEqual(v, vpcPeeringConfigProp)) { + obj["vpcPeeringConfig"] = vpcPeeringConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/privateConnections?privateConnectionId={{private_connection_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new PrivateConnection: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PrivateConnection: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating PrivateConnection: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = DatastreamOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating PrivateConnection", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create PrivateConnection: %s", err) + } + + if err := d.Set("name", flattenDatastreamPrivateConnectionName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if err := waitForPrivateConnectionReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return fmt.Errorf("Error waiting for PrivateConnection %q to be CREATED. %q", d.Get("name").(string), err) + } + + log.Printf("[DEBUG] Finished creating PrivateConnection %q: %#v", d.Id(), res) + + return resourceDatastreamPrivateConnectionRead(d, meta) +} + +func resourceDatastreamPrivateConnectionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PrivateConnection: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DatastreamPrivateConnection %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading PrivateConnection: %s", err) + } + + if err := d.Set("name", flattenDatastreamPrivateConnectionName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading PrivateConnection: %s", err) + } + if err := d.Set("labels", flattenDatastreamPrivateConnectionLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading PrivateConnection: %s", err) + } + if err := d.Set("display_name", flattenDatastreamPrivateConnectionDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading PrivateConnection: %s", err) + } + if err := d.Set("state", flattenDatastreamPrivateConnectionState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading PrivateConnection: %s", err) + } + if err := d.Set("error", flattenDatastreamPrivateConnectionError(res["error"], d, config)); err != nil { + return fmt.Errorf("Error reading PrivateConnection: %s", err) + } + if err := d.Set("vpc_peering_config", flattenDatastreamPrivateConnectionVpcPeeringConfig(res["vpcPeeringConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading PrivateConnection: %s", err) + } + + return nil +} + +func resourceDatastreamPrivateConnectionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for PrivateConnection: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting PrivateConnection %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "PrivateConnection") + } + + err = DatastreamOperationWaitTime( + config, res, project, "Deleting PrivateConnection", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting PrivateConnection %q: %#v", d.Id(), res) + return nil +} + +func resourceDatastreamPrivateConnectionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/privateConnections/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if err := waitForPrivateConnectionReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return nil, fmt.Errorf("Error waiting for PrivateConnection %q to be CREATED during importing: %q", d.Get("name").(string), err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenDatastreamPrivateConnectionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamPrivateConnectionLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamPrivateConnectionDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamPrivateConnectionState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamPrivateConnectionError(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["message"] = + flattenDatastreamPrivateConnectionErrorMessage(original["message"], d, config) + transformed["details"] = + flattenDatastreamPrivateConnectionErrorDetails(original["details"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamPrivateConnectionErrorMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamPrivateConnectionErrorDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamPrivateConnectionVpcPeeringConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["vpc"] = + flattenDatastreamPrivateConnectionVpcPeeringConfigVpc(original["vpc"], d, config) + transformed["subnet"] = + flattenDatastreamPrivateConnectionVpcPeeringConfigSubnet(original["subnet"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamPrivateConnectionVpcPeeringConfigVpc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDatastreamPrivateConnectionVpcPeeringConfigSubnet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDatastreamPrivateConnectionLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDatastreamPrivateConnectionDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamPrivateConnectionVpcPeeringConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVpc, err := expandDatastreamPrivateConnectionVpcPeeringConfigVpc(original["vpc"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVpc); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["vpc"] = transformedVpc + } + + transformedSubnet, err := expandDatastreamPrivateConnectionVpcPeeringConfigSubnet(original["subnet"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubnet); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subnet"] = transformedSubnet + } + + return transformed, nil +} + +func expandDatastreamPrivateConnectionVpcPeeringConfigVpc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamPrivateConnectionVpcPeeringConfigSubnet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection_sweeper.go new file mode 100644 index 0000000000..0333c3af63 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_private_connection_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datastream + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DatastreamPrivateConnection", testSweepDatastreamPrivateConnection) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDatastreamPrivateConnection(region string) error { + resourceName := "DatastreamPrivateConnection" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datastream.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateConnections", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["privateConnections"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datastream.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateConnections/{{private_connection_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_stream.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_stream.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream.go index 7558541de4..dfc15204af 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_datastream_stream.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package datastream import ( "context" @@ -23,18 +26,23 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) -func resourceDatastreamStreamCustomDiffFunc(diff TerraformResourceDiff) error { +func resourceDatastreamStreamCustomDiffFunc(diff tpgresource.TerraformResourceDiff) error { if diff.HasChange("desired_state") { old, new := diff.GetChange("desired_state") oldState := old.(string) newState := new.(string) - if isNewResource(diff) { + if tpgresource.IsNewResource(diff) { if newState != "NOT_STARTED" && newState != "RUNNING" { return fmt.Errorf("`desired_state` can only be set to `NOT_STARTED` or `RUNNING` when creating a new Stream") } @@ -60,7 +68,7 @@ func resourceDatastreamStreamCustomDiff(_ context.Context, diff *schema.Resource } // waitForDatastreamStreamReady waits for an agent pool to reach a stable state to indicate that it's ready. -func waitForDatastreamStreamReady(d *schema.ResourceData, config *Config, timeout time.Duration) error { +func waitForDatastreamStreamReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { return resource.Retry(timeout, func() *resource.RetryError { if err := resourceDatastreamStreamRead(d, config); err != nil { return resource.NonRetryableError(err) @@ -110,7 +118,9 @@ func ResourceDatastreamStream() *schema.Resource { Delete: schema.DefaultTimeout(20 * time.Minute), }, - CustomizeDiff: resourceDatastreamStreamCustomDiff, + CustomizeDiff: customdiff.All( + resourceDatastreamStreamCustomDiff, + ), Schema: map[string]*schema.Schema{ "destination_config": { @@ -124,7 +134,7 @@ func ResourceDatastreamStream() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: projectNumberDiffSuppress, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, Description: `Destination connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name}`, }, "bigquery_destination_config": { @@ -246,13 +256,13 @@ A duration in seconds with up to nine fractional digits, terminated by 's'. Exam "compression": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NO_COMPRESSION", "GZIP", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NO_COMPRESSION", "GZIP", ""}), Description: `Compression of the loaded JSON file. Possible values: ["NO_COMPRESSION", "GZIP"]`, }, "schema_file_format": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NO_SCHEMA_FILE", "AVRO_SCHEMA_FILE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NO_SCHEMA_FILE", "AVRO_SCHEMA_FILE", ""}), Description: `The schema file format along JSON data files. Possible values: ["NO_SCHEMA_FILE", "AVRO_SCHEMA_FILE"]`, }, }, @@ -293,7 +303,7 @@ A duration in seconds with up to nine fractional digits, terminated by 's'. Exam Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: projectNumberDiffSuppress, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, Description: `Source connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name}`, }, "mysql_source_config": { @@ -475,6 +485,14 @@ https://dev.mysql.com/doc/refman/8.0/en/data-types.html`, }, }, }, + "max_concurrent_backfill_tasks": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Maximum number of concurrent backfill tasks. The number should be non negative. +If not set (or set to 0), the system's default value will be used.`, + }, "max_concurrent_cdc_tasks": { Type: schema.TypeInt, Computed: true, @@ -1281,8 +1299,8 @@ will be encrypted using an internal Stream-specific encryption key provisioned t } func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -1291,25 +1309,25 @@ func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) er labelsProp, err := expandDatastreamStreamLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } displayNameProp, err := expandDatastreamStreamDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } sourceConfigProp, err := expandDatastreamStreamSourceConfig(d.Get("source_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("source_config"); !isEmptyValue(reflect.ValueOf(sourceConfigProp)) && (ok || !reflect.DeepEqual(v, sourceConfigProp)) { + } else if v, ok := d.GetOkExists("source_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceConfigProp)) && (ok || !reflect.DeepEqual(v, sourceConfigProp)) { obj["sourceConfig"] = sourceConfigProp } destinationConfigProp, err := expandDatastreamStreamDestinationConfig(d.Get("destination_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("destination_config"); !isEmptyValue(reflect.ValueOf(destinationConfigProp)) && (ok || !reflect.DeepEqual(v, destinationConfigProp)) { + } else if v, ok := d.GetOkExists("destination_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(destinationConfigProp)) && (ok || !reflect.DeepEqual(v, destinationConfigProp)) { obj["destinationConfig"] = destinationConfigProp } backfillAllProp, err := expandDatastreamStreamBackfillAll(d.Get("backfill_all"), d, config) @@ -1327,7 +1345,7 @@ func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) er customerManagedEncryptionKeyProp, err := expandDatastreamStreamCustomerManagedEncryptionKey(d.Get("customer_managed_encryption_key"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("customer_managed_encryption_key"); !isEmptyValue(reflect.ValueOf(customerManagedEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, customerManagedEncryptionKeyProp)) { + } else if v, ok := d.GetOkExists("customer_managed_encryption_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(customerManagedEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, customerManagedEncryptionKeyProp)) { obj["customerManagedEncryptionKey"] = customerManagedEncryptionKeyProp } @@ -1336,7 +1354,7 @@ func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) er return err } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams?streamId={{stream_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams?streamId={{stream_id}}") if err != nil { return err } @@ -1344,24 +1362,32 @@ func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Creating new Stream: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Stream: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Stream: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1385,7 +1411,7 @@ func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) er } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1408,33 +1434,39 @@ func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) er } func resourceDatastreamStreamRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Stream: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DatastreamStream %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DatastreamStream %q", d.Id())) } // Explicitly set virtual fields to default values if unset @@ -1479,15 +1511,15 @@ func resourceDatastreamStreamRead(d *schema.ResourceData, meta interface{}) erro } func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Stream: %s", err) } @@ -1497,25 +1529,25 @@ func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) er labelsProp, err := expandDatastreamStreamLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } displayNameProp, err := expandDatastreamStreamDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } sourceConfigProp, err := expandDatastreamStreamSourceConfig(d.Get("source_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("source_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceConfigProp)) { + } else if v, ok := d.GetOkExists("source_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceConfigProp)) { obj["sourceConfig"] = sourceConfigProp } destinationConfigProp, err := expandDatastreamStreamDestinationConfig(d.Get("destination_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("destination_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationConfigProp)) { + } else if v, ok := d.GetOkExists("destination_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationConfigProp)) { obj["destinationConfig"] = destinationConfigProp } backfillAllProp, err := expandDatastreamStreamBackfillAll(d.Get("backfill_all"), d, config) @@ -1536,7 +1568,7 @@ func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) er return err } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") if err != nil { return err } @@ -1567,9 +1599,9 @@ func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) er if d.HasChange("backfill_none") { updateMask = append(updateMask, "backfillNone") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } @@ -1579,9 +1611,9 @@ func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) er } // Override the previous setting of updateMask to include state. - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } @@ -1591,11 +1623,19 @@ func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) er } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Stream %q: %s", d.Id(), err) @@ -1618,21 +1658,21 @@ func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) er } func resourceDatastreamStreamDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Stream: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") if err != nil { return err } @@ -1641,13 +1681,21 @@ func resourceDatastreamStreamDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Deleting Stream %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Stream") + return transport_tpg.HandleNotFoundError(err, d, "Stream") } err = DatastreamOperationWaitTime( @@ -1663,8 +1711,8 @@ func resourceDatastreamStreamDelete(d *schema.ResourceData, meta interface{}) er } func resourceDatastreamStreamImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/streams/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -1673,7 +1721,7 @@ func resourceDatastreamStreamImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1690,19 +1738,19 @@ func resourceDatastreamStreamImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenDatastreamStreamName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1721,11 +1769,11 @@ func flattenDatastreamStreamSourceConfig(v interface{}, d *schema.ResourceData, flattenDatastreamStreamSourceConfigPostgresqlSourceConfig(original["postgresqlSourceConfig"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1737,9 +1785,11 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d *sche flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(original["excludeObjects"], d, config) transformed["max_concurrent_cdc_tasks"] = flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(original["maxConcurrentCdcTasks"], d, config) + transformed["max_concurrent_backfill_tasks"] = + flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentBackfillTasks(original["maxConcurrentBackfillTasks"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1752,7 +1802,7 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interf flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(original["mysqlDatabases"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1771,11 +1821,11 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlData } return transformed } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1794,11 +1844,11 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlData } return transformed } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1822,18 +1872,18 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlData } return transformed } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1847,22 +1897,22 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlData return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1876,7 +1926,7 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlData return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1889,7 +1939,7 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interf flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(original["mysqlDatabases"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1908,11 +1958,11 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlData } return transformed } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1931,11 +1981,11 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlData } return transformed } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1959,18 +2009,18 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlData } return transformed } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1984,22 +2034,22 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlData return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2013,10 +2063,10 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlData return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2030,7 +2080,24 @@ func flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2050,7 +2117,7 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d *sch flattenDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(original["streamLargeObjects"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2063,7 +2130,7 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v inter flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(original["oracleSchemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2082,11 +2149,11 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSc } return transformed } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2105,11 +2172,11 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSc } return transformed } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2135,18 +2202,18 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSc } return transformed } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2160,10 +2227,10 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSc return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2177,10 +2244,10 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSc return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2194,22 +2261,22 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSc return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2223,7 +2290,7 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSc return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2236,7 +2303,7 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v inter flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(original["oracleSchemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2255,11 +2322,11 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSc } return transformed } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2278,11 +2345,11 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSc } return transformed } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2308,18 +2375,18 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSc } return transformed } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2333,10 +2400,10 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSc return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2350,10 +2417,10 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSc return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2367,22 +2434,22 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSc return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2396,10 +2463,10 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSc return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2413,10 +2480,10 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks( return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2430,7 +2497,7 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillT return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2438,7 +2505,7 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v int return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2446,7 +2513,7 @@ func flattenDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v i return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2464,7 +2531,7 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d flattenDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(original["maxConcurrentBackfillTasks"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2477,7 +2544,7 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v i flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(original["postgresqlSchemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2496,11 +2563,11 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPost } return transformed } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2519,11 +2586,11 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPost } return transformed } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2548,18 +2615,18 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPost } return transformed } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2573,10 +2640,10 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPost return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2590,10 +2657,10 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPost return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2607,18 +2674,18 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPost return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2632,7 +2699,7 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPost return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2645,7 +2712,7 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v i flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(original["postgresqlSchemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2664,11 +2731,11 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPost } return transformed } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2687,11 +2754,11 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPost } return transformed } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2716,18 +2783,18 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPost } return transformed } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2741,10 +2808,10 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPost return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2758,10 +2825,10 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPost return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2775,18 +2842,18 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPost return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2800,18 +2867,18 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPost return v // let terraform core handle it otherwise } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2825,7 +2892,7 @@ func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackf return v // let terraform core handle it otherwise } -func flattenDatastreamStreamDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2842,11 +2909,11 @@ func flattenDatastreamStreamDestinationConfig(v interface{}, d *schema.ResourceD flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(original["bigqueryDestinationConfig"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2867,14 +2934,14 @@ func flattenDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(original["jsonFileFormat"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2888,11 +2955,11 @@ func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb( return v // let terraform core handle it otherwise } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2900,7 +2967,7 @@ func flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat( return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2915,15 +2982,15 @@ func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat( flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(original["compression"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2940,11 +3007,11 @@ func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interfa flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(original["sourceHierarchyDatasets"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2957,11 +3024,11 @@ func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTarg flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(original["datasetId"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2974,7 +3041,7 @@ func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHier flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(original["datasetTemplate"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2991,23 +3058,23 @@ func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHier flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(original["kmsKeyName"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAll(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAll(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3021,7 +3088,7 @@ func flattenDatastreamStreamBackfillAll(v interface{}, d *schema.ResourceData, c flattenDatastreamStreamBackfillAllOracleExcludedObjects(original["oracleExcludedObjects"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3034,7 +3101,7 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d *sc flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(original["mysqlDatabases"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3053,11 +3120,11 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v inte } return transformed } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3076,11 +3143,11 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTa } return transformed } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3104,18 +3171,18 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTa } return transformed } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3129,22 +3196,22 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTa return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3158,7 +3225,7 @@ func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTa return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3171,7 +3238,7 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(original["postgresqlSchemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3190,11 +3257,11 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema } return transformed } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3213,11 +3280,11 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema } return transformed } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3242,18 +3309,18 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema } return transformed } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3267,10 +3334,10 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3284,10 +3351,10 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3301,18 +3368,18 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3326,7 +3393,7 @@ func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchema return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3339,7 +3406,7 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d *s flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(original["oracleSchemas"], d, config) return []interface{}{transformed} } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3358,11 +3425,11 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v inte } return transformed } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3381,11 +3448,11 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleT } return transformed } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -3411,18 +3478,18 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleT } return transformed } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3436,10 +3503,10 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleT return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3453,10 +3520,10 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleT return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3470,22 +3537,22 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleT return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -3499,7 +3566,7 @@ func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleT return v // let terraform core handle it otherwise } -func flattenDatastreamStreamBackfillNone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamBackfillNone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -3507,11 +3574,11 @@ func flattenDatastreamStreamBackfillNone(v interface{}, d *schema.ResourceData, return []interface{}{transformed} } -func flattenDatastreamStreamCustomerManagedEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDatastreamStreamCustomerManagedEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandDatastreamStreamLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandDatastreamStreamLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -3522,11 +3589,11 @@ func expandDatastreamStreamLabels(v interface{}, d TerraformResourceData, config return m, nil } -func expandDatastreamStreamDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3538,7 +3605,7 @@ func expandDatastreamStreamSourceConfig(v interface{}, d TerraformResourceData, transformedSourceConnectionProfile, err := expandDatastreamStreamSourceConfigSourceConnectionProfile(original["source_connection_profile"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceConnectionProfile); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceConnectionProfile); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceConnectionProfile"] = transformedSourceConnectionProfile } @@ -3566,11 +3633,11 @@ func expandDatastreamStreamSourceConfig(v interface{}, d TerraformResourceData, return transformed, nil } -func expandDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -3587,14 +3654,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d Terraf transformedIncludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(original["include_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includeObjects"] = transformedIncludeObjects } transformedExcludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludeObjects"] = transformedExcludeObjects } @@ -3605,10 +3672,17 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d Terraf transformed["maxConcurrentCdcTasks"] = transformedMaxConcurrentCdcTasks } + transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) + if err != nil { + return nil, err + } else { + transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks + } + return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3620,14 +3694,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interfa transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(original["mysql_databases"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlDatabases"] = transformedMysqlDatabases } return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3640,14 +3714,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(original["database"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["database"] = transformedDatabase } transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlTables"] = transformedMysqlTables } @@ -3656,11 +3730,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3673,14 +3747,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlColumns"] = transformedMysqlColumns } @@ -3689,11 +3763,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3706,49 +3780,49 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["collation"] = transformedCollation } transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -3757,35 +3831,35 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3797,14 +3871,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interfa transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(original["mysql_databases"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlDatabases"] = transformedMysqlDatabases } return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3817,14 +3891,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(original["database"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["database"] = transformedDatabase } transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlTables"] = transformedMysqlTables } @@ -3833,11 +3907,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3850,14 +3924,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlColumns"] = transformedMysqlColumns } @@ -3866,11 +3940,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -3883,49 +3957,49 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["collation"] = transformedCollation } transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -3934,39 +4008,43 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -3983,14 +4061,14 @@ func expandDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d Terra transformedIncludeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(original["include_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includeObjects"] = transformedIncludeObjects } transformedExcludeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(original["exclude_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludeObjects"] = transformedExcludeObjects } @@ -4025,7 +4103,7 @@ func expandDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d Terra return transformed, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4037,14 +4115,14 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interf transformedOracleSchemas, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(original["oracle_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleSchemas"] = transformedOracleSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4057,14 +4135,14 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch transformedSchema, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } transformedOracleTables, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleTables"] = transformedOracleTables } @@ -4073,11 +4151,11 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4090,14 +4168,14 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch transformedTable, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedOracleColumns, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleColumns"] = transformedOracleColumns } @@ -4106,11 +4184,11 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4123,63 +4201,63 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch transformedColumn, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedPrecision, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } transformedScale, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } transformedEncoding, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["encoding"] = transformedEncoding } transformedPrimaryKey, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -4188,43 +4266,43 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4236,14 +4314,14 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interf transformedOracleSchemas, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(original["oracle_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleSchemas"] = transformedOracleSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4256,14 +4334,14 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch transformedSchema, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } transformedOracleTables, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleTables"] = transformedOracleTables } @@ -4272,11 +4350,11 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4289,14 +4367,14 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch transformedTable, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedOracleColumns, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleColumns"] = transformedOracleColumns } @@ -4305,11 +4383,11 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4322,63 +4400,63 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch transformedColumn, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedPrecision, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } transformedScale, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } transformedEncoding, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["encoding"] = transformedEncoding } transformedPrimaryKey, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -4387,51 +4465,51 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSch return req, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -4446,7 +4524,7 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v inte return transformed, nil } -func expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -4461,7 +4539,7 @@ func expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v in return transformed, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -4478,28 +4556,28 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d T transformedIncludeObjects, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(original["include_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includeObjects"] = transformedIncludeObjects } transformedExcludeObjects, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludeObjects"] = transformedExcludeObjects } transformedReplicationSlot, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(original["replication_slot"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplicationSlot); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplicationSlot); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replicationSlot"] = transformedReplicationSlot } transformedPublication, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(original["publication"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPublication); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPublication); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["publication"] = transformedPublication } @@ -4513,7 +4591,7 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d T return transformed, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4525,14 +4603,14 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v in transformedPostgresqlSchemas, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlSchemas"] = transformedPostgresqlSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4545,14 +4623,14 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg transformedSchema, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } transformedPostgresqlTables, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlTables"] = transformedPostgresqlTables } @@ -4561,11 +4639,11 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4578,14 +4656,14 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg transformedTable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedPostgresqlColumns, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlColumns"] = transformedPostgresqlColumns } @@ -4594,11 +4672,11 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4611,56 +4689,56 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg transformedColumn, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedPrecision, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } transformedScale, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } transformedPrimaryKey, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -4669,39 +4747,39 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4713,14 +4791,14 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v in transformedPostgresqlSchemas, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlSchemas"] = transformedPostgresqlSchemas } return transformed, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4733,14 +4811,14 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg transformedSchema, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } transformedPostgresqlTables, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlTables"] = transformedPostgresqlTables } @@ -4749,11 +4827,11 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4766,14 +4844,14 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg transformedTable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedPostgresqlColumns, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlColumns"] = transformedPostgresqlColumns } @@ -4782,11 +4860,11 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -4799,56 +4877,56 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg transformedColumn, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedPrecision, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } transformedScale, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } transformedPrimaryKey, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -4857,51 +4935,51 @@ func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostg return req, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4913,32 +4991,32 @@ func expandDatastreamStreamDestinationConfig(v interface{}, d TerraformResourceD transformedDestinationConnectionProfile, err := expandDatastreamStreamDestinationConfigDestinationConnectionProfile(original["destination_connection_profile"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDestinationConnectionProfile); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDestinationConnectionProfile); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["destinationConnectionProfile"] = transformedDestinationConnectionProfile } transformedGcsDestinationConfig, err := expandDatastreamStreamDestinationConfigGcsDestinationConfig(original["gcs_destination_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGcsDestinationConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGcsDestinationConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gcsDestinationConfig"] = transformedGcsDestinationConfig } transformedBigqueryDestinationConfig, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(original["bigquery_destination_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBigqueryDestinationConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBigqueryDestinationConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["bigqueryDestinationConfig"] = transformedBigqueryDestinationConfig } return transformed, nil } -func expandDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -4950,21 +5028,21 @@ func expandDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, transformedPath, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigPath(original["path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["path"] = transformedPath } transformedFileRotationMb, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(original["file_rotation_mb"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFileRotationMb); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFileRotationMb); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fileRotationMb"] = transformedFileRotationMb } transformedFileRotationInterval, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(original["file_rotation_interval"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFileRotationInterval); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFileRotationInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fileRotationInterval"] = transformedFileRotationInterval } @@ -4978,26 +5056,26 @@ func expandDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, transformedJsonFileFormat, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(original["json_file_format"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedJsonFileFormat); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedJsonFileFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["jsonFileFormat"] = transformedJsonFileFormat } return transformed, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -5012,7 +5090,7 @@ func expandDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v return transformed, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5024,29 +5102,29 @@ func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v transformedSchemaFileFormat, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(original["schema_file_format"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchemaFileFormat); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchemaFileFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schemaFileFormat"] = transformedSchemaFileFormat } transformedCompression, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(original["compression"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCompression); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCompression); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["compression"] = transformedCompression } return transformed, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5058,32 +5136,32 @@ func expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interfac transformedDataFreshness, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(original["data_freshness"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataFreshness); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataFreshness); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataFreshness"] = transformedDataFreshness } transformedSingleTargetDataset, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(original["single_target_dataset"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSingleTargetDataset); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSingleTargetDataset); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["singleTargetDataset"] = transformedSingleTargetDataset } transformedSourceHierarchyDatasets, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(original["source_hierarchy_datasets"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceHierarchyDatasets); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceHierarchyDatasets); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceHierarchyDatasets"] = transformedSourceHierarchyDatasets } return transformed, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5095,14 +5173,14 @@ func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTarge transformedDatasetId, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(original["dataset_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetId"] = transformedDatasetId } return transformed, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { s := v.(string) re := regexp.MustCompile(`projects/(.+)/datasets/([^\.\?\#]+)`) paths := re.FindStringSubmatch(s) @@ -5115,7 +5193,7 @@ func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTarge return s, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5127,14 +5205,14 @@ func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHiera transformedDatasetTemplate, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(original["dataset_template"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatasetTemplate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatasetTemplate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetTemplate"] = transformedDatasetTemplate } return transformed, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5146,40 +5224,40 @@ func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHiera transformedLocation, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(original["location"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["location"] = transformedLocation } transformedDatasetIdPrefix, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(original["dataset_id_prefix"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatasetIdPrefix); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatasetIdPrefix); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["datasetIdPrefix"] = transformedDatasetIdPrefix } transformedKmsKeyName, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(original["kms_key_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKeyName"] = transformedKmsKeyName } return transformed, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAll(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAll(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -5196,28 +5274,28 @@ func expandDatastreamStreamBackfillAll(v interface{}, d TerraformResourceData, c transformedMysqlExcludedObjects, err := expandDatastreamStreamBackfillAllMysqlExcludedObjects(original["mysql_excluded_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlExcludedObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlExcludedObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlExcludedObjects"] = transformedMysqlExcludedObjects } transformedPostgresqlExcludedObjects, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjects(original["postgresql_excluded_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlExcludedObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlExcludedObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlExcludedObjects"] = transformedPostgresqlExcludedObjects } transformedOracleExcludedObjects, err := expandDatastreamStreamBackfillAllOracleExcludedObjects(original["oracle_excluded_objects"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleExcludedObjects); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleExcludedObjects); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleExcludedObjects"] = transformedOracleExcludedObjects } return transformed, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5229,14 +5307,14 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d Terr transformedMysqlDatabases, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(original["mysql_databases"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlDatabases"] = transformedMysqlDatabases } return transformed, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5249,14 +5327,14 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v inter transformedDatabase, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(original["database"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["database"] = transformedDatabase } transformedMysqlTables, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlTables"] = transformedMysqlTables } @@ -5265,11 +5343,11 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v inter return req, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5282,14 +5360,14 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTab transformedTable, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedMysqlColumns, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mysqlColumns"] = transformedMysqlColumns } @@ -5298,11 +5376,11 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTab return req, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5315,49 +5393,49 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTab transformedColumn, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedCollation, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["collation"] = transformedCollation } transformedPrimaryKey, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -5366,35 +5444,35 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTab return req, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5406,14 +5484,14 @@ func expandDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d transformedPostgresqlSchemas, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlSchemas"] = transformedPostgresqlSchemas } return transformed, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5426,14 +5504,14 @@ func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas transformedSchema, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } transformedPostgresqlTables, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlTables"] = transformedPostgresqlTables } @@ -5442,11 +5520,11 @@ func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas return req, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5459,14 +5537,14 @@ func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas transformedTable, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedPostgresqlColumns, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postgresqlColumns"] = transformedPostgresqlColumns } @@ -5475,11 +5553,11 @@ func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas return req, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5492,56 +5570,56 @@ func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas transformedColumn, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedPrecision, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } transformedScale, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } transformedPrimaryKey, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -5550,39 +5628,39 @@ func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas return req, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -5594,14 +5672,14 @@ func expandDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d Ter transformedOracleSchemas, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(original["oracle_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleSchemas"] = transformedOracleSchemas } return transformed, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5614,14 +5692,14 @@ func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v inter transformedSchema, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schema"] = transformedSchema } transformedOracleTables, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleTables"] = transformedOracleTables } @@ -5630,11 +5708,11 @@ func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v inter return req, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5647,14 +5725,14 @@ func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTa transformedTable, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedOracleColumns, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oracleColumns"] = transformedOracleColumns } @@ -5663,11 +5741,11 @@ func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTa return req, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -5680,63 +5758,63 @@ func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTa transformedColumn, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["column"] = transformedColumn } transformedDataType, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataType"] = transformedDataType } transformedLength, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["length"] = transformedLength } transformedPrecision, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["precision"] = transformedPrecision } transformedScale, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["scale"] = transformedScale } transformedEncoding, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["encoding"] = transformedEncoding } transformedPrimaryKey, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } transformedNullable, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nullable"] = transformedNullable } transformedOrdinalPosition, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ordinalPosition"] = transformedOrdinalPosition } @@ -5745,43 +5823,43 @@ func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTa return req, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillNone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillNone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -5796,7 +5874,7 @@ func expandDatastreamStreamBackfillNone(v interface{}, d TerraformResourceData, return transformed, nil } -func expandDatastreamStreamCustomerManagedEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamCustomerManagedEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream_sweeper.go new file mode 100644 index 0000000000..fe70e3f078 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/datastream/resource_datastream_stream_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package datastream + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DatastreamStream", testSweepDatastreamStream) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDatastreamStream(region string) error { + resourceName := "DatastreamStream" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://datastream.googleapis.com/v1/projects/{{project}}/locations/{{location}}/streams", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["streams"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://datastream.googleapis.com/v1/projects/{{project}}/locations/{{location}}/streams/{{stream_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/deployment_manager_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/deployment_manager_operation.go new file mode 100644 index 0000000000..9ea54a291f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/deployment_manager_operation.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package deploymentmanager + +import ( + "bytes" + "fmt" + "time" + + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/compute/v1" +) + +type DeploymentManagerOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + OperationUrl string + tpgcompute.ComputeOperationWaiter +} + +func (w *DeploymentManagerOperationWaiter) IsRetryable(error) bool { + return false +} + +func (w *DeploymentManagerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil || w.Op == nil || w.Op.SelfLink == "" { + return nil, fmt.Errorf("cannot query unset/nil operation") + } + + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: w.Op.SelfLink, + UserAgent: w.UserAgent, + }) + if err != nil { + return nil, err + } + op := &compute.Operation{} + if err := tpgresource.Convert(resp, op); err != nil { + return nil, fmt.Errorf("could not convert response to operation: %v", err) + } + return op, nil +} + +func DeploymentManagerOperationWaitTime(config *transport_tpg.Config, resp interface{}, project, activity, userAgent string, timeout time.Duration) error { + op := &compute.Operation{} + err := tpgresource.Convert(resp, op) + if err != nil { + return err + } + + w := &DeploymentManagerOperationWaiter{ + Config: config, + UserAgent: userAgent, + OperationUrl: op.SelfLink, + ComputeOperationWaiter: tpgcompute.ComputeOperationWaiter{ + Project: project, + }, + } + if err := w.SetOp(op); err != nil { + return err + } + + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} + +func (w *DeploymentManagerOperationWaiter) Error() error { + if w != nil && w.Op != nil && w.Op.Error != nil { + return DeploymentManagerOperationError{ + HTTPStatusCode: w.Op.HttpErrorStatusCode, + HTTPMessage: w.Op.HttpErrorMessage, + OperationError: *w.Op.Error, + } + } + return nil +} + +// DeploymentManagerOperationError wraps information from the compute.Operation +// in an implementation of Error. +type DeploymentManagerOperationError struct { + HTTPStatusCode int64 + HTTPMessage string + compute.OperationError +} + +func (e DeploymentManagerOperationError) Error() string { + var buf bytes.Buffer + buf.WriteString("Deployment Manager returned errors for this operation, likely due to invalid configuration.") + buf.WriteString(fmt.Sprintf("Operation failed with HTTP error %d: %s.", e.HTTPStatusCode, e.HTTPMessage)) + buf.WriteString("Errors returned: \n") + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + return buf.String() +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/resource_deployment_manager_deployment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/resource_deployment_manager_deployment.go new file mode 100644 index 0000000000..f93c964ae9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/resource_deployment_manager_deployment.go @@ -0,0 +1,805 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package deploymentmanager + +import ( + "context" + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func customDiffDeploymentManagerDeployment(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + if preview := d.Get("preview").(bool); preview { + log.Printf("[WARN] Deployment preview set to true - Terraform will treat Deployment as recreate-only") + + if d.HasChange("preview") { + if err := d.ForceNew("preview"); err != nil { + return err + } + } + + if d.HasChange("target") { + if err := d.ForceNew("target"); err != nil { + return err + } + } + + if d.HasChange("labels") { + if err := d.ForceNew("labels"); err != nil { + return err + } + } + } + return nil +} + +func ResourceDeploymentManagerDeployment() *schema.Resource { + return &schema.Resource{ + Create: resourceDeploymentManagerDeploymentCreate, + Read: resourceDeploymentManagerDeploymentRead, + Update: resourceDeploymentManagerDeploymentUpdate, + Delete: resourceDeploymentManagerDeploymentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDeploymentManagerDeploymentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + customDiffDeploymentManagerDeployment, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Unique name for the deployment`, + }, + "target": { + Type: schema.TypeList, + Required: true, + Description: `Parameters that define your deployment, including the deployment +configuration and relevant templates.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config": { + Type: schema.TypeList, + Required: true, + Description: `The root configuration file to use for this deployment.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content": { + Type: schema.TypeString, + Required: true, + Description: `The full YAML contents of your configuration file.`, + }, + }, + }, + }, + "imports": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies import files for this configuration. This can be +used to import templates or other files. For example, you might +import a text file in order to use the file in a template.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content": { + Type: schema.TypeString, + Optional: true, + Description: `The full contents of the template that you want to import.`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the template to import, as declared in the YAML +configuration.`, + }, + }, + }, + }, + }, + }, + }, + "create_policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ACQUIRE", "CREATE_OR_ACQUIRE", ""}), + Description: `Set the policy to use for creating new resources. Only used on +create and update. Valid values are 'CREATE_OR_ACQUIRE' (default) or +'ACQUIRE'. If set to 'ACQUIRE' and resources do not already exist, +the deployment will fail. Note that updating this field does not +actually affect the deployment, just how it is updated. Default value: "CREATE_OR_ACQUIRE" Possible values: ["ACQUIRE", "CREATE_OR_ACQUIRE"]`, + Default: "CREATE_OR_ACQUIRE", + }, + "delete_policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ABANDON", "DELETE", ""}), + Description: `Set the policy to use for deleting new resources on update/delete. +Valid values are 'DELETE' (default) or 'ABANDON'. If 'DELETE', +resource is deleted after removal from Deployment Manager. If +'ABANDON', the resource is only removed from Deployment Manager +and is not actually deleted. Note that updating this field does not +actually change the deployment, just how it is updated. Default value: "DELETE" Possible values: ["ABANDON", "DELETE"]`, + Default: "DELETE", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Optional user-provided description of deployment.`, + }, + "labels": { + Type: schema.TypeSet, + Optional: true, + Description: `Key-value pairs to apply to this labels.`, + Elem: deploymentmanagerDeploymentLabelsSchema(), + // Default schema.HashSchema is used. + }, + "preview": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to true, a deployment is created with "shell" resources +that are not actually instantiated. This allows you to preview a +deployment. It can be updated to false to actually deploy +with real resources. + ~>**NOTE:** Deployment Manager does not allow update +of a deployment in preview (unless updating to preview=false). Thus, +Terraform will force-recreate deployments if either preview is updated +to true or if other fields are updated while preview is true.`, + Default: false, + }, + "deployment_id": { + Type: schema.TypeString, + Computed: true, + Description: `Unique identifier for deployment. Output only.`, + }, + "manifest": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. URL of the manifest representing the last manifest that +was successfully deployed.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Server defined URL for the resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func deploymentmanagerDeploymentLabelsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Optional: true, + Description: `Key for label.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Value of label.`, + }, + }, + } +} + +func resourceDeploymentManagerDeploymentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandDeploymentManagerDeploymentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandDeploymentManagerDeploymentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandDeploymentManagerDeploymentLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); ok || !reflect.DeepEqual(v, labelsProp) { + obj["labels"] = labelsProp + } + targetProp, err := expandDeploymentManagerDeploymentTarget(d.Get("target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { + obj["target"] = targetProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments?preview={{preview}}&createPolicy={{create_policy}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Deployment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Deployment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Deployment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/deployments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = DeploymentManagerOperationWaitTime( + config, res, project, "Creating Deployment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + resourceDeploymentManagerDeploymentPostCreateFailure(d, meta) + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Deployment: %s", err) + } + + log.Printf("[DEBUG] Finished creating Deployment %q: %#v", d.Id(), res) + + return resourceDeploymentManagerDeploymentRead(d, meta) +} + +func resourceDeploymentManagerDeploymentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Deployment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Deployment: %s", err) + } + + if err := d.Set("name", flattenDeploymentManagerDeploymentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Deployment: %s", err) + } + if err := d.Set("description", flattenDeploymentManagerDeploymentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Deployment: %s", err) + } + if err := d.Set("labels", flattenDeploymentManagerDeploymentLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Deployment: %s", err) + } + if err := d.Set("deployment_id", flattenDeploymentManagerDeploymentDeploymentId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading Deployment: %s", err) + } + if err := d.Set("manifest", flattenDeploymentManagerDeploymentManifest(res["manifest"], d, config)); err != nil { + return fmt.Errorf("Error reading Deployment: %s", err) + } + if err := d.Set("self_link", flattenDeploymentManagerDeploymentSelfLink(res["selfLink"], d, config)); err != nil { + return fmt.Errorf("Error reading Deployment: %s", err) + } + + return nil +} + +func resourceDeploymentManagerDeploymentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Deployment: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("preview") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + url, err := tpgresource.ReplaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?preview={{preview}}&createPolicy={{create_policy}}&deletePolicy={{delete_policy}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Deployment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Deployment %q: %#v", d.Id(), res) + } + + err = DeploymentManagerOperationWaitTime( + config, res, project, "Updating Deployment", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("description") || d.HasChange("labels") || d.HasChange("target") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + descriptionProp, err := expandDeploymentManagerDeploymentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandDeploymentManagerDeploymentLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); ok || !reflect.DeepEqual(v, labelsProp) { + obj["labels"] = labelsProp + } + targetProp, err := expandDeploymentManagerDeploymentTarget(d.Get("target"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("target"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { + obj["target"] = targetProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?preview={{preview}}&createPolicy={{create_policy}}&deletePolicy={{delete_policy}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Deployment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Deployment %q: %#v", d.Id(), res) + } + + err = DeploymentManagerOperationWaitTime( + config, res, project, "Updating Deployment", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceDeploymentManagerDeploymentRead(d, meta) +} + +func resourceDeploymentManagerDeploymentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Deployment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?deletePolicy={{delete_policy}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Deployment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Deployment") + } + + err = DeploymentManagerOperationWaitTime( + config, res, project, "Deleting Deployment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Deployment %q: %#v", d.Id(), res) + return nil +} + +func resourceDeploymentManagerDeploymentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/deployments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/deployments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDeploymentManagerDeploymentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDeploymentManagerDeploymentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDeploymentManagerDeploymentLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(deploymentmanagerDeploymentLabelsSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "key": flattenDeploymentManagerDeploymentLabelsKey(original["key"], d, config), + "value": flattenDeploymentManagerDeploymentLabelsValue(original["value"], d, config), + }) + } + return transformed +} +func flattenDeploymentManagerDeploymentLabelsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDeploymentManagerDeploymentLabelsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDeploymentManagerDeploymentDeploymentId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDeploymentManagerDeploymentManifest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDeploymentManagerDeploymentSelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDeploymentManagerDeploymentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDeploymentManagerDeploymentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDeploymentManagerDeploymentLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandDeploymentManagerDeploymentLabelsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedValue, err := expandDeploymentManagerDeploymentLabelsValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDeploymentManagerDeploymentLabelsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDeploymentManagerDeploymentLabelsValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDeploymentManagerDeploymentTarget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConfig, err := expandDeploymentManagerDeploymentTargetConfig(original["config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["config"] = transformedConfig + } + + transformedImports, err := expandDeploymentManagerDeploymentTargetImports(original["imports"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImports); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imports"] = transformedImports + } + + return transformed, nil +} + +func expandDeploymentManagerDeploymentTargetConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContent, err := expandDeploymentManagerDeploymentTargetConfigContent(original["content"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["content"] = transformedContent + } + + return transformed, nil +} + +func expandDeploymentManagerDeploymentTargetConfigContent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDeploymentManagerDeploymentTargetImports(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContent, err := expandDeploymentManagerDeploymentTargetImportsContent(original["content"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["content"] = transformedContent + } + + transformedName, err := expandDeploymentManagerDeploymentTargetImportsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDeploymentManagerDeploymentTargetImportsContent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDeploymentManagerDeploymentTargetImportsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceDeploymentManagerDeploymentPostCreateFailure(d *schema.ResourceData, meta interface{}) { + log.Printf("[WARN] Attempt to clean up Deployment if it still exists") + var cleanErr error + if cleanErr = resourceDeploymentManagerDeploymentRead(d, meta); cleanErr == nil { + if d.Id() != "" { + log.Printf("[WARN] Deployment %q still exists, attempting to delete...", d.Id()) + if cleanErr = resourceDeploymentManagerDeploymentDelete(d, meta); cleanErr == nil { + log.Printf("[WARN] Invalid Deployment was successfully deleted") + d.SetId("") + } + } + } + if cleanErr != nil { + log.Printf("[WARN] Could not confirm cleanup of Deployment if created in error state: %v", cleanErr) + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/resource_deployment_manager_deployment_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/resource_deployment_manager_deployment_sweeper.go new file mode 100644 index 0000000000..a2348ea1cd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager/resource_deployment_manager_deployment_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package deploymentmanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DeploymentManagerDeployment", testSweepDeploymentManagerDeployment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDeploymentManagerDeployment(region string) error { + resourceName := "DeploymentManagerDeployment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://www.googleapis.com/deploymentmanager/v2/projects/{{project}}/global/deployments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["deployments"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://www.googleapis.com/deploymentmanager/v2/projects/{{project}}/global/deployments/{{name}}?deletePolicy={{delete_policy}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_agent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_agent.go new file mode 100644 index 0000000000..4b672e4ccf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_agent.go @@ -0,0 +1,608 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dialogflow + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDialogflowAgent() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowAgentCreate, + Read: resourceDialogflowAgentRead, + Update: resourceDialogflowAgentUpdate, + Delete: resourceDialogflowAgentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowAgentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "default_language_code": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/docs/reference/language) +for a list of the currently supported language codes. This field cannot be updated after creation.`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of this agent.`, + }, + "time_zone": { + Type: schema.TypeString, + Required: true, + Description: `The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, +Europe/Paris.`, + }, + "api_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"API_VERSION_V1", "API_VERSION_V2", "API_VERSION_V2_BETA_1", ""}), + Description: `API version displayed in Dialogflow console. If not specified, V2 API is assumed. Clients are free to query +different service endpoints for different API versions. However, bots connectors and webhook calls will follow +the specified API version. +* API_VERSION_V1: Legacy V1 API. +* API_VERSION_V2: V2 API. +* API_VERSION_V2_BETA_1: V2beta1 API. Possible values: ["API_VERSION_V1", "API_VERSION_V2", "API_VERSION_V2_BETA_1"]`, + }, + "avatar_uri": { + Type: schema.TypeString, + Optional: true, + Description: `The URI of the agent's avatar, which are used throughout the Dialogflow console. When an image URL is entered +into this field, the Dialogflow will save the image in the backend. The address of the backend image returned +from the API will be shown in the [avatarUriBackend] field.`, + }, + "classification_threshold": { + Type: schema.TypeFloat, + Optional: true, + Description: `To filter out false positive results and still get variety in matched natural language inputs for your agent, +you can tune the machine learning classification threshold. If the returned score value is less than the threshold +value, then a fallback intent will be triggered or, if there are no fallback intents defined, no intent will be +triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the +default of 0.3 is used.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 500), + Description: `The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected.`, + }, + "enable_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines whether this agent should log conversation queries.`, + }, + "match_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MATCH_MODE_HYBRID", "MATCH_MODE_ML_ONLY", ""}), + Description: `Determines how intents are detected from user queries. +* MATCH_MODE_HYBRID: Best for agents with a small number of examples in intents and/or wide use of templates +syntax and composite entities. +* MATCH_MODE_ML_ONLY: Can be used for agents with a large number of examples in intents, especially the ones +using @sys.any or very large developer entities. Possible values: ["MATCH_MODE_HYBRID", "MATCH_MODE_ML_ONLY"]`, + }, + "supported_language_codes": { + Type: schema.TypeList, + Optional: true, + Description: `The list of all languages supported by this agent (except for the defaultLanguageCode).`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "tier": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TIER_STANDARD", "TIER_ENTERPRISE", "TIER_ENTERPRISE_PLUS", ""}), + Description: `The agent tier. If not specified, TIER_STANDARD is assumed. +* TIER_STANDARD: Standard tier. +* TIER_ENTERPRISE: Enterprise tier (Essentials). +* TIER_ENTERPRISE_PLUS: Enterprise tier (Plus). +NOTE: Due to consistency issues, the provider will not read this field from the API. Drift is possible between +the Terraform state and Dialogflow if the agent tier is changed outside of Terraform. Possible values: ["TIER_STANDARD", "TIER_ENTERPRISE", "TIER_ENTERPRISE_PLUS"]`, + }, + "avatar_uri_backend": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the agent's avatar as returned from the API. Output only. To provide an image URL for the agent avatar, +the [avatarUri] field can be used.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowAgentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowAgentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + defaultLanguageCodeProp, err := expandDialogflowAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_language_code"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultLanguageCodeProp)) && (ok || !reflect.DeepEqual(v, defaultLanguageCodeProp)) { + obj["defaultLanguageCode"] = defaultLanguageCodeProp + } + supportedLanguageCodesProp, err := expandDialogflowAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("supported_language_codes"); !tpgresource.IsEmptyValue(reflect.ValueOf(supportedLanguageCodesProp)) && (ok || !reflect.DeepEqual(v, supportedLanguageCodesProp)) { + obj["supportedLanguageCodes"] = supportedLanguageCodesProp + } + timeZoneProp, err := expandDialogflowAgentTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + descriptionProp, err := expandDialogflowAgentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + avatarUriProp, err := expandDialogflowAgentAvatarUri(d.Get("avatar_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("avatar_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(avatarUriProp)) && (ok || !reflect.DeepEqual(v, avatarUriProp)) { + obj["avatarUri"] = avatarUriProp + } + enableLoggingProp, err := expandDialogflowAgentEnableLogging(d.Get("enable_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableLoggingProp)) && (ok || !reflect.DeepEqual(v, enableLoggingProp)) { + obj["enableLogging"] = enableLoggingProp + } + matchModeProp, err := expandDialogflowAgentMatchMode(d.Get("match_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("match_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(matchModeProp)) && (ok || !reflect.DeepEqual(v, matchModeProp)) { + obj["matchMode"] = matchModeProp + } + classificationThresholdProp, err := expandDialogflowAgentClassificationThreshold(d.Get("classification_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("classification_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(classificationThresholdProp)) && (ok || !reflect.DeepEqual(v, classificationThresholdProp)) { + obj["classificationThreshold"] = classificationThresholdProp + } + apiVersionProp, err := expandDialogflowAgentApiVersion(d.Get("api_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("api_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(apiVersionProp)) && (ok || !reflect.DeepEqual(v, apiVersionProp)) { + obj["apiVersion"] = apiVersionProp + } + tierProp, err := expandDialogflowAgentTier(d.Get("tier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { + obj["tier"] = tierProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Agent: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Agent: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Agent: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Agent %q: %#v", d.Id(), res) + + return resourceDialogflowAgentRead(d, meta) +} + +func resourceDialogflowAgentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Agent: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowAgent %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + + if err := d.Set("display_name", flattenDialogflowAgentDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("default_language_code", flattenDialogflowAgentDefaultLanguageCode(res["defaultLanguageCode"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("supported_language_codes", flattenDialogflowAgentSupportedLanguageCodes(res["supportedLanguageCodes"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("time_zone", flattenDialogflowAgentTimeZone(res["timeZone"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("description", flattenDialogflowAgentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("avatar_uri_backend", flattenDialogflowAgentAvatarUriBackend(res["avatarUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("enable_logging", flattenDialogflowAgentEnableLogging(res["enableLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("match_mode", flattenDialogflowAgentMatchMode(res["matchMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("classification_threshold", flattenDialogflowAgentClassificationThreshold(res["classificationThreshold"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("api_version", flattenDialogflowAgentApiVersion(res["apiVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + + return nil +} + +func resourceDialogflowAgentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Agent: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowAgentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + defaultLanguageCodeProp, err := expandDialogflowAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_language_code"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultLanguageCodeProp)) { + obj["defaultLanguageCode"] = defaultLanguageCodeProp + } + supportedLanguageCodesProp, err := expandDialogflowAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("supported_language_codes"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, supportedLanguageCodesProp)) { + obj["supportedLanguageCodes"] = supportedLanguageCodesProp + } + timeZoneProp, err := expandDialogflowAgentTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + descriptionProp, err := expandDialogflowAgentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + avatarUriProp, err := expandDialogflowAgentAvatarUri(d.Get("avatar_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("avatar_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, avatarUriProp)) { + obj["avatarUri"] = avatarUriProp + } + enableLoggingProp, err := expandDialogflowAgentEnableLogging(d.Get("enable_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableLoggingProp)) { + obj["enableLogging"] = enableLoggingProp + } + matchModeProp, err := expandDialogflowAgentMatchMode(d.Get("match_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("match_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, matchModeProp)) { + obj["matchMode"] = matchModeProp + } + classificationThresholdProp, err := expandDialogflowAgentClassificationThreshold(d.Get("classification_threshold"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("classification_threshold"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, classificationThresholdProp)) { + obj["classificationThreshold"] = classificationThresholdProp + } + apiVersionProp, err := expandDialogflowAgentApiVersion(d.Get("api_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("api_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, apiVersionProp)) { + obj["apiVersion"] = apiVersionProp + } + tierProp, err := expandDialogflowAgentTier(d.Get("tier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tierProp)) { + obj["tier"] = tierProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Agent %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Agent %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Agent %q: %#v", d.Id(), res) + } + + return resourceDialogflowAgentRead(d, meta) +} + +func resourceDialogflowAgentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Agent: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Agent %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Agent") + } + + log.Printf("[DEBUG] Finished deleting Agent %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowAgentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowAgentDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentDefaultLanguageCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentSupportedLanguageCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentAvatarUriBackend(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentEnableLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentMatchMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentClassificationThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowAgentApiVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowAgentDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentDefaultLanguageCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentSupportedLanguageCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentAvatarUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentEnableLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentMatchMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentClassificationThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentApiVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowAgentTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_entity_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_entity_type.go new file mode 100644 index 0000000000..4926a45cda --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_entity_type.go @@ -0,0 +1,506 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dialogflow + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDialogflowEntityType() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowEntityTypeCreate, + Read: resourceDialogflowEntityTypeRead, + Update: resourceDialogflowEntityTypeUpdate, + Delete: resourceDialogflowEntityTypeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowEntityTypeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of this entity type to be displayed on the console.`, + }, + "kind": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"KIND_MAP", "KIND_LIST", "KIND_REGEXP"}), + Description: `Indicates the kind of entity type. +* KIND_MAP: Map entity types allow mapping of a group of synonyms to a reference value. +* KIND_LIST: List entity types contain a set of entries that do not map to reference values. However, list entity +types can contain references to other entity types (with or without aliases). +* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values. Possible values: ["KIND_MAP", "KIND_LIST", "KIND_REGEXP"]`, + }, + "enable_fuzzy_extraction": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables fuzzy entity extraction during classification.`, + }, + "entities": { + Type: schema.TypeList, + Optional: true, + Description: `The collection of entity entries associated with the entity type.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "synonyms": { + Type: schema.TypeList, + Required: true, + Description: `A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym +could be green onions. +For KIND_LIST entity types: +* This collection must contain exactly one synonym equal to value.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: `The primary value associated with this entity entry. For example, if the entity type is vegetable, the value +could be scallions. +For KIND_MAP entity types: +* A reference value to be used in place of synonyms. +For KIND_LIST entity types: +* A string that can contain references to other entity types (with or without aliases).`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the entity type. +Format: projects//agent/entityTypes/.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowEntityTypeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowEntityTypeDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + kindProp, err := expandDialogflowEntityTypeKind(d.Get("kind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kind"); !tpgresource.IsEmptyValue(reflect.ValueOf(kindProp)) && (ok || !reflect.DeepEqual(v, kindProp)) { + obj["kind"] = kindProp + } + enableFuzzyExtractionProp, err := expandDialogflowEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableFuzzyExtractionProp)) && (ok || !reflect.DeepEqual(v, enableFuzzyExtractionProp)) { + obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp + } + entitiesProp, err := expandDialogflowEntityTypeEntities(d.Get("entities"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entities"); !tpgresource.IsEmptyValue(reflect.ValueOf(entitiesProp)) && (ok || !reflect.DeepEqual(v, entitiesProp)) { + obj["entities"] = entitiesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/entityTypes/") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EntityType: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EntityType: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EntityType: %s", err) + } + if err := d.Set("name", flattenDialogflowEntityTypeName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating EntityType %q: %#v", d.Id(), res) + + return resourceDialogflowEntityTypeRead(d, meta) +} + +func resourceDialogflowEntityTypeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EntityType: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowEntityType %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + + if err := d.Set("name", flattenDialogflowEntityTypeName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("display_name", flattenDialogflowEntityTypeDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("kind", flattenDialogflowEntityTypeKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("enable_fuzzy_extraction", flattenDialogflowEntityTypeEnableFuzzyExtraction(res["enableFuzzyExtraction"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("entities", flattenDialogflowEntityTypeEntities(res["entities"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + + return nil +} + +func resourceDialogflowEntityTypeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EntityType: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowEntityTypeDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + kindProp, err := expandDialogflowEntityTypeKind(d.Get("kind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kind"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, kindProp)) { + obj["kind"] = kindProp + } + enableFuzzyExtractionProp, err := expandDialogflowEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableFuzzyExtractionProp)) { + obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp + } + entitiesProp, err := expandDialogflowEntityTypeEntities(d.Get("entities"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entities"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entitiesProp)) { + obj["entities"] = entitiesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating EntityType %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating EntityType %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating EntityType %q: %#v", d.Id(), res) + } + + return resourceDialogflowEntityTypeRead(d, meta) +} + +func resourceDialogflowEntityTypeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EntityType: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting EntityType %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EntityType") + } + + log.Printf("[DEBUG] Finished deleting EntityType %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowEntityTypeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) < 2 { + return nil, fmt.Errorf( + "Could not split project from name: %s", + d.Get("name"), + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowEntityTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowEntityTypeDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowEntityTypeKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowEntityTypeEnableFuzzyExtraction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowEntityTypeEntities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "value": flattenDialogflowEntityTypeEntitiesValue(original["value"], d, config), + "synonyms": flattenDialogflowEntityTypeEntitiesSynonyms(original["synonyms"], d, config), + }) + } + return transformed +} +func flattenDialogflowEntityTypeEntitiesValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowEntityTypeEntitiesSynonyms(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowEntityTypeDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowEntityTypeKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowEntityTypeEnableFuzzyExtraction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowEntityTypeEntities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedValue, err := expandDialogflowEntityTypeEntitiesValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedSynonyms, err := expandDialogflowEntityTypeEntitiesSynonyms(original["synonyms"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSynonyms); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["synonyms"] = transformedSynonyms + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDialogflowEntityTypeEntitiesValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowEntityTypeEntitiesSynonyms(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_fulfillment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_fulfillment.go new file mode 100644 index 0000000000..6ab02aa508 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_fulfillment.go @@ -0,0 +1,610 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dialogflow + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDialogflowFulfillment() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowFulfillmentCreate, + Read: resourceDialogflowFulfillmentRead, + Update: resourceDialogflowFulfillmentUpdate, + Delete: resourceDialogflowFulfillmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowFulfillmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The human-readable name of the fulfillment, unique within the agent.`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether fulfillment is enabled.`, + }, + "features": { + Type: schema.TypeList, + Optional: true, + Description: `The field defines whether the fulfillment is enabled for certain features.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"SMALLTALK"}), + Description: `The type of the feature that enabled for fulfillment. +* SMALLTALK: Fulfillment is enabled for SmallTalk. Possible values: ["SMALLTALK"]`, + }, + }, + }, + }, + "generic_web_service": { + Type: schema.TypeList, + Optional: true, + Description: `Represents configuration for a generic web service. Dialogflow supports two mechanisms for authentications: - Basic authentication with username and password. - Authentication with additional authentication headers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `The fulfillment URI for receiving POST requests. It must use https protocol.`, + }, + "password": { + Type: schema.TypeString, + Optional: true, + Description: `The password for HTTP Basic authentication.`, + }, + "request_headers": { + Type: schema.TypeMap, + Optional: true, + Description: `The HTTP request headers to send together with fulfillment requests.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "username": { + Type: schema.TypeString, + Optional: true, + Description: `The user name for HTTP Basic authentication.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the fulfillment. +Format: projects//agent/fulfillment - projects//locations//agent/fulfillment`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowFulfillmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowFulfillmentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandDialogflowFulfillmentEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + featuresProp, err := expandDialogflowFulfillmentFeatures(d.Get("features"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("features"); !tpgresource.IsEmptyValue(reflect.ValueOf(featuresProp)) && (ok || !reflect.DeepEqual(v, featuresProp)) { + obj["features"] = featuresProp + } + genericWebServiceProp, err := expandDialogflowFulfillmentGenericWebService(d.Get("generic_web_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("generic_web_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(genericWebServiceProp)) && (ok || !reflect.DeepEqual(v, genericWebServiceProp)) { + obj["genericWebService"] = genericWebServiceProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/?updateMask=name,displayName,enabled,genericWebService,features") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Fulfillment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Fulfillment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Fulfillment: %s", err) + } + if err := d.Set("name", flattenDialogflowFulfillmentName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating Fulfillment %q: %#v", d.Id(), res) + + return resourceDialogflowFulfillmentRead(d, meta) +} + +func resourceDialogflowFulfillmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Fulfillment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowFulfillment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Fulfillment: %s", err) + } + + if err := d.Set("name", flattenDialogflowFulfillmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Fulfillment: %s", err) + } + if err := d.Set("display_name", flattenDialogflowFulfillmentDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Fulfillment: %s", err) + } + if err := d.Set("enabled", flattenDialogflowFulfillmentEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Fulfillment: %s", err) + } + if err := d.Set("features", flattenDialogflowFulfillmentFeatures(res["features"], d, config)); err != nil { + return fmt.Errorf("Error reading Fulfillment: %s", err) + } + if err := d.Set("generic_web_service", flattenDialogflowFulfillmentGenericWebService(res["genericWebService"], d, config)); err != nil { + return fmt.Errorf("Error reading Fulfillment: %s", err) + } + + return nil +} + +func resourceDialogflowFulfillmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Fulfillment: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowFulfillmentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandDialogflowFulfillmentEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + featuresProp, err := expandDialogflowFulfillmentFeatures(d.Get("features"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("features"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, featuresProp)) { + obj["features"] = featuresProp + } + genericWebServiceProp, err := expandDialogflowFulfillmentGenericWebService(d.Get("generic_web_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("generic_web_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, genericWebServiceProp)) { + obj["genericWebService"] = genericWebServiceProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Fulfillment %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + + if d.HasChange("features") { + updateMask = append(updateMask, "features") + } + + if d.HasChange("generic_web_service") { + updateMask = append(updateMask, "genericWebService") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Fulfillment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Fulfillment %q: %#v", d.Id(), res) + } + + return resourceDialogflowFulfillmentRead(d, meta) +} + +func resourceDialogflowFulfillmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Fulfillment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/?updateMask=name,displayName,enabled,genericWebService,features") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Fulfillment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Fulfillment") + } + + log.Printf("[DEBUG] Finished deleting Fulfillment %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowFulfillmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) < 2 { + return nil, fmt.Errorf( + "Could not split project from name: %s", + d.Get("name"), + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowFulfillmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowFulfillmentDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowFulfillmentEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowFulfillmentFeatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "type": flattenDialogflowFulfillmentFeaturesType(original["type"], d, config), + }) + } + return transformed +} +func flattenDialogflowFulfillmentFeaturesType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowFulfillmentGenericWebService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenDialogflowFulfillmentGenericWebServiceUri(original["uri"], d, config) + transformed["username"] = + flattenDialogflowFulfillmentGenericWebServiceUsername(original["username"], d, config) + transformed["password"] = + flattenDialogflowFulfillmentGenericWebServicePassword(original["password"], d, config) + transformed["request_headers"] = + flattenDialogflowFulfillmentGenericWebServiceRequestHeaders(original["requestHeaders"], d, config) + return []interface{}{transformed} +} +func flattenDialogflowFulfillmentGenericWebServiceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowFulfillmentGenericWebServiceUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowFulfillmentGenericWebServicePassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowFulfillmentGenericWebServiceRequestHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowFulfillmentDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowFulfillmentEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowFulfillmentFeatures(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandDialogflowFulfillmentFeaturesType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDialogflowFulfillmentFeaturesType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowFulfillmentGenericWebService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandDialogflowFulfillmentGenericWebServiceUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedUsername, err := expandDialogflowFulfillmentGenericWebServiceUsername(original["username"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["username"] = transformedUsername + } + + transformedPassword, err := expandDialogflowFulfillmentGenericWebServicePassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + transformedRequestHeaders, err := expandDialogflowFulfillmentGenericWebServiceRequestHeaders(original["request_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequestHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requestHeaders"] = transformedRequestHeaders + } + + return transformed, nil +} + +func expandDialogflowFulfillmentGenericWebServiceUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowFulfillmentGenericWebServiceUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowFulfillmentGenericWebServicePassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowFulfillmentGenericWebServiceRequestHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_intent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_intent.go new file mode 100644 index 0000000000..47d2b03244 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflow/resource_dialogflow_intent.go @@ -0,0 +1,722 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dialogflow + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDialogflowIntent() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowIntentCreate, + Read: resourceDialogflowIntentRead, + Update: resourceDialogflowIntentUpdate, + Delete: resourceDialogflowIntentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowIntentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of this intent to be displayed on the console.`, + }, + "action": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The name of the action associated with the intent. +Note: The action name must not contain whitespaces.`, + }, + "default_response_platforms": { + Type: schema.TypeList, + Optional: true, + Description: `The list of platforms for which the first responses will be copied from the messages in PLATFORM_UNSPECIFIED +(i.e. default platform). Possible values: ["FACEBOOK", "SLACK", "TELEGRAM", "KIK", "SKYPE", "LINE", "VIBER", "ACTIONS_ON_GOOGLE", "GOOGLE_HANGOUTS"]`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateEnum([]string{"FACEBOOK", "SLACK", "TELEGRAM", "KIK", "SKYPE", "LINE", "VIBER", "ACTIONS_ON_GOOGLE", "GOOGLE_HANGOUTS"}), + }, + }, + "events": { + Type: schema.TypeList, + Optional: true, + Description: `The collection of event names that trigger the intent. If the collection of input contexts is not empty, all of +the contexts must be present in the active user session for an event to trigger this intent. See the +[events reference](https://cloud.google.com/dialogflow/docs/events-overview) for more details.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "input_context_names": { + Type: schema.TypeList, + Optional: true, + Description: `The list of context names required for this intent to be triggered. +Format: projects//agent/sessions/-/contexts/.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "is_fallback": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Indicates whether this is a fallback intent.`, + }, + "ml_disabled": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Indicates whether Machine Learning is disabled for the intent. +Note: If mlDisabled setting is set to true, then this intent is not taken into account during inference in ML +ONLY match mode. Also, auto-markup in the UI is turned off.`, + }, + "parent_followup_intent_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The unique identifier of the parent intent in the chain of followup intents. +Format: projects//agent/intents/.`, + }, + "priority": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The priority of this intent. Higher numbers represent higher priorities. + - If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds + to the Normal priority in the console. + - If the supplied value is negative, the intent is ignored in runtime detect intent requests.`, + }, + "reset_contexts": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `Indicates whether to delete all contexts in the current session when this intent is matched.`, + }, + "webhook_state": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"WEBHOOK_STATE_ENABLED", "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING", ""}), + Description: `Indicates whether webhooks are enabled for the intent. +* WEBHOOK_STATE_ENABLED: Webhook is enabled in the agent and in the intent. +* WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING: Webhook is enabled in the agent and in the intent. Also, each slot +filling prompt is forwarded to the webhook. Possible values: ["WEBHOOK_STATE_ENABLED", "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING"]`, + }, + "followup_intent_info": { + Type: schema.TypeList, + Computed: true, + Description: `Information about all followup intents that have this intent as a direct or indirect parent. We populate this field +only in the output.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "followup_intent_name": { + Type: schema.TypeString, + Optional: true, + Description: `The unique identifier of the followup intent. +Format: projects//agent/intents/.`, + }, + "parent_followup_intent_name": { + Type: schema.TypeString, + Optional: true, + Description: `The unique identifier of the followup intent's parent. +Format: projects//agent/intents/.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of this intent. +Format: projects//agent/intents/.`, + }, + "root_followup_intent_name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the root intent in the chain of followup intents. It identifies the correct followup +intents chain for this intent. +Format: projects//agent/intents/.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowIntentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowIntentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + webhookStateProp, err := expandDialogflowIntentWebhookState(d.Get("webhook_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("webhook_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(webhookStateProp)) && (ok || !reflect.DeepEqual(v, webhookStateProp)) { + obj["webhookState"] = webhookStateProp + } + priorityProp, err := expandDialogflowIntentPriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + isFallbackProp, err := expandDialogflowIntentIsFallback(d.Get("is_fallback"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_fallback"); !tpgresource.IsEmptyValue(reflect.ValueOf(isFallbackProp)) && (ok || !reflect.DeepEqual(v, isFallbackProp)) { + obj["isFallback"] = isFallbackProp + } + mlDisabledProp, err := expandDialogflowIntentMlDisabled(d.Get("ml_disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ml_disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(mlDisabledProp)) && (ok || !reflect.DeepEqual(v, mlDisabledProp)) { + obj["mlDisabled"] = mlDisabledProp + } + inputContextNamesProp, err := expandDialogflowIntentInputContextNames(d.Get("input_context_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("input_context_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(inputContextNamesProp)) && (ok || !reflect.DeepEqual(v, inputContextNamesProp)) { + obj["inputContextNames"] = inputContextNamesProp + } + eventsProp, err := expandDialogflowIntentEvents(d.Get("events"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("events"); !tpgresource.IsEmptyValue(reflect.ValueOf(eventsProp)) && (ok || !reflect.DeepEqual(v, eventsProp)) { + obj["events"] = eventsProp + } + actionProp, err := expandDialogflowIntentAction(d.Get("action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("action"); !tpgresource.IsEmptyValue(reflect.ValueOf(actionProp)) && (ok || !reflect.DeepEqual(v, actionProp)) { + obj["action"] = actionProp + } + resetContextsProp, err := expandDialogflowIntentResetContexts(d.Get("reset_contexts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reset_contexts"); !tpgresource.IsEmptyValue(reflect.ValueOf(resetContextsProp)) && (ok || !reflect.DeepEqual(v, resetContextsProp)) { + obj["resetContexts"] = resetContextsProp + } + defaultResponsePlatformsProp, err := expandDialogflowIntentDefaultResponsePlatforms(d.Get("default_response_platforms"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_response_platforms"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultResponsePlatformsProp)) && (ok || !reflect.DeepEqual(v, defaultResponsePlatformsProp)) { + obj["defaultResponsePlatforms"] = defaultResponsePlatformsProp + } + parentFollowupIntentNameProp, err := expandDialogflowIntentParentFollowupIntentName(d.Get("parent_followup_intent_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent_followup_intent_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentFollowupIntentNameProp)) && (ok || !reflect.DeepEqual(v, parentFollowupIntentNameProp)) { + obj["parentFollowupIntentName"] = parentFollowupIntentNameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/intents/") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Intent: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Intent: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Intent: %s", err) + } + if err := d.Set("name", flattenDialogflowIntentName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating Intent %q: %#v", d.Id(), res) + + return resourceDialogflowIntentRead(d, meta) +} + +func resourceDialogflowIntentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Intent: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowIntent %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + + if err := d.Set("name", flattenDialogflowIntentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("display_name", flattenDialogflowIntentDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("webhook_state", flattenDialogflowIntentWebhookState(res["webhookState"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("priority", flattenDialogflowIntentPriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("is_fallback", flattenDialogflowIntentIsFallback(res["isFallback"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("ml_disabled", flattenDialogflowIntentMlDisabled(res["mlDisabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("input_context_names", flattenDialogflowIntentInputContextNames(res["inputContextNames"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("events", flattenDialogflowIntentEvents(res["events"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("action", flattenDialogflowIntentAction(res["action"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("reset_contexts", flattenDialogflowIntentResetContexts(res["resetContexts"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("default_response_platforms", flattenDialogflowIntentDefaultResponsePlatforms(res["defaultResponsePlatforms"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("root_followup_intent_name", flattenDialogflowIntentRootFollowupIntentName(res["rootFollowupIntentName"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("parent_followup_intent_name", flattenDialogflowIntentParentFollowupIntentName(res["parentFollowupIntentName"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("followup_intent_info", flattenDialogflowIntentFollowupIntentInfo(res["followupIntentInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + + return nil +} + +func resourceDialogflowIntentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Intent: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowIntentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + webhookStateProp, err := expandDialogflowIntentWebhookState(d.Get("webhook_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("webhook_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, webhookStateProp)) { + obj["webhookState"] = webhookStateProp + } + priorityProp, err := expandDialogflowIntentPriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + isFallbackProp, err := expandDialogflowIntentIsFallback(d.Get("is_fallback"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_fallback"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isFallbackProp)) { + obj["isFallback"] = isFallbackProp + } + mlDisabledProp, err := expandDialogflowIntentMlDisabled(d.Get("ml_disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ml_disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, mlDisabledProp)) { + obj["mlDisabled"] = mlDisabledProp + } + inputContextNamesProp, err := expandDialogflowIntentInputContextNames(d.Get("input_context_names"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("input_context_names"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, inputContextNamesProp)) { + obj["inputContextNames"] = inputContextNamesProp + } + eventsProp, err := expandDialogflowIntentEvents(d.Get("events"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("events"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, eventsProp)) { + obj["events"] = eventsProp + } + actionProp, err := expandDialogflowIntentAction(d.Get("action"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, actionProp)) { + obj["action"] = actionProp + } + resetContextsProp, err := expandDialogflowIntentResetContexts(d.Get("reset_contexts"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reset_contexts"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, resetContextsProp)) { + obj["resetContexts"] = resetContextsProp + } + defaultResponsePlatformsProp, err := expandDialogflowIntentDefaultResponsePlatforms(d.Get("default_response_platforms"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_response_platforms"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultResponsePlatformsProp)) { + obj["defaultResponsePlatforms"] = defaultResponsePlatformsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Intent %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Intent %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Intent %q: %#v", d.Id(), res) + } + + return resourceDialogflowIntentRead(d, meta) +} + +func resourceDialogflowIntentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Intent: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Intent %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Intent") + } + + log.Printf("[DEBUG] Finished deleting Intent %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowIntentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) < 2 { + return nil, fmt.Errorf( + "Could not split project from name: %s", + d.Get("name"), + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowIntentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentWebhookState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentPriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDialogflowIntentIsFallback(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentMlDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentInputContextNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentEvents(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentResetContexts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentDefaultResponsePlatforms(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentRootFollowupIntentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentParentFollowupIntentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentFollowupIntentInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "followup_intent_name": flattenDialogflowIntentFollowupIntentInfoFollowupIntentName(original["followupIntentName"], d, config), + "parent_followup_intent_name": flattenDialogflowIntentFollowupIntentInfoParentFollowupIntentName(original["parentFollowupIntentName"], d, config), + }) + } + return transformed +} +func flattenDialogflowIntentFollowupIntentInfoFollowupIntentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowIntentFollowupIntentInfoParentFollowupIntentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowIntentDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentWebhookState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentPriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentIsFallback(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentMlDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentInputContextNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentEvents(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentResetContexts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentDefaultResponsePlatforms(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowIntentParentFollowupIntentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/dialogflow_cx_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/dialogflow_cx_operation.go new file mode 100644 index 0000000000..fa59ba69c9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/dialogflow_cx_operation.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dialogflowcx + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type DialogflowCXOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter + Location string +} + +func (w *DialogflowCXOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("https://%s-dialogflow.googleapis.com/v3/%s", w.Location, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createDialogflowCXWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent, location string) (*DialogflowCXOperationWaiter, error) { + w := &DialogflowCXOperationWaiter{ + Config: config, + UserAgent: userAgent, + Location: location, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func DialogflowCXOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent, location string, timeout time.Duration) error { + w, err := createDialogflowCXWaiter(config, op, activity, userAgent, location) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func DialogflowCXOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent, location string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createDialogflowCXWaiter(config, op, activity, userAgent, location) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_agent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_agent.go new file mode 100644 index 0000000000..2adc40e7ea --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_agent.go @@ -0,0 +1,677 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dialogflowcx + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDialogflowCXAgent() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowCXAgentCreate, + Read: resourceDialogflowCXAgentRead, + Update: resourceDialogflowCXAgentUpdate, + Delete: resourceDialogflowCXAgentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowCXAgentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "default_language_code": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/cx/docs/reference/language) +for a list of the currently supported language codes. This field cannot be updated after creation.`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The human-readable name of the agent, unique within the location.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location this agent is located in. + +~> **Note:** The first time you are deploying an Agent in your project you must configure location settings. + This is a one time step but at the moment you can only [configure location settings](https://cloud.google.com/dialogflow/cx/docs/concept/region#location-settings) via the Dialogflow CX console. + Another options is to use global location so you don't need to manually configure location settings.`, + }, + "time_zone": { + Type: schema.TypeString, + Required: true, + Description: `The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, +Europe/Paris.`, + }, + "avatar_uri": { + Type: schema.TypeString, + Optional: true, + Description: `The URI of the agent's avatar. Avatars are used throughout the Dialogflow console and in the self-hosted Web Demo integration.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 500), + Description: `The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected.`, + }, + "enable_spell_correction": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates if automatic spell correction is enabled in detect intent requests.`, + }, + "enable_stackdriver_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines whether this agent should log conversation queries.`, + }, + "security_settings": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the SecuritySettings reference for the agent. Format: projects//locations//securitySettings/.`, + }, + "speech_to_text_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Settings related to speech recognition.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_speech_adaptation": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether to use speech adaptation for speech recognition.`, + }, + }, + }, + }, + "supported_language_codes": { + Type: schema.TypeList, + Optional: true, + Description: `The list of all languages supported by this agent (except for the default_language_code).`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the agent.`, + }, + "start_flow": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the start flow in this agent. A start flow will be automatically created when the agent is created, and can only be deleted by deleting the agent. Format: projects//locations//agents//flows/.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowCXAgentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXAgentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + defaultLanguageCodeProp, err := expandDialogflowCXAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_language_code"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultLanguageCodeProp)) && (ok || !reflect.DeepEqual(v, defaultLanguageCodeProp)) { + obj["defaultLanguageCode"] = defaultLanguageCodeProp + } + supportedLanguageCodesProp, err := expandDialogflowCXAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("supported_language_codes"); !tpgresource.IsEmptyValue(reflect.ValueOf(supportedLanguageCodesProp)) && (ok || !reflect.DeepEqual(v, supportedLanguageCodesProp)) { + obj["supportedLanguageCodes"] = supportedLanguageCodesProp + } + timeZoneProp, err := expandDialogflowCXAgentTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + descriptionProp, err := expandDialogflowCXAgentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + avatarUriProp, err := expandDialogflowCXAgentAvatarUri(d.Get("avatar_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("avatar_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(avatarUriProp)) && (ok || !reflect.DeepEqual(v, avatarUriProp)) { + obj["avatarUri"] = avatarUriProp + } + speechToTextSettingsProp, err := expandDialogflowCXAgentSpeechToTextSettings(d.Get("speech_to_text_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("speech_to_text_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(speechToTextSettingsProp)) && (ok || !reflect.DeepEqual(v, speechToTextSettingsProp)) { + obj["speechToTextSettings"] = speechToTextSettingsProp + } + securitySettingsProp, err := expandDialogflowCXAgentSecuritySettings(d.Get("security_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(securitySettingsProp)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { + obj["securitySettings"] = securitySettingsProp + } + enableStackdriverLoggingProp, err := expandDialogflowCXAgentEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableStackdriverLoggingProp)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { + obj["enableStackdriverLogging"] = enableStackdriverLoggingProp + } + enableSpellCorrectionProp, err := expandDialogflowCXAgentEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_spell_correction"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableSpellCorrectionProp)) && (ok || !reflect.DeepEqual(v, enableSpellCorrectionProp)) { + obj["enableSpellCorrection"] = enableSpellCorrectionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Agent: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Agent: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Agent: %s", err) + } + if err := d.Set("name", flattenDialogflowCXAgentName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/agents/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Agent %q: %#v", d.Id(), res) + + return resourceDialogflowCXAgentRead(d, meta) +} + +func resourceDialogflowCXAgentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Agent: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowCXAgent %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + + if err := d.Set("name", flattenDialogflowCXAgentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("display_name", flattenDialogflowCXAgentDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("default_language_code", flattenDialogflowCXAgentDefaultLanguageCode(res["defaultLanguageCode"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("supported_language_codes", flattenDialogflowCXAgentSupportedLanguageCodes(res["supportedLanguageCodes"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("time_zone", flattenDialogflowCXAgentTimeZone(res["timeZone"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("description", flattenDialogflowCXAgentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("avatar_uri", flattenDialogflowCXAgentAvatarUri(res["avatarUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("speech_to_text_settings", flattenDialogflowCXAgentSpeechToTextSettings(res["speechToTextSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("start_flow", flattenDialogflowCXAgentStartFlow(res["startFlow"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("security_settings", flattenDialogflowCXAgentSecuritySettings(res["securitySettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("enable_stackdriver_logging", flattenDialogflowCXAgentEnableStackdriverLogging(res["enableStackdriverLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + if err := d.Set("enable_spell_correction", flattenDialogflowCXAgentEnableSpellCorrection(res["enableSpellCorrection"], d, config)); err != nil { + return fmt.Errorf("Error reading Agent: %s", err) + } + + return nil +} + +func resourceDialogflowCXAgentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Agent: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXAgentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + supportedLanguageCodesProp, err := expandDialogflowCXAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("supported_language_codes"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, supportedLanguageCodesProp)) { + obj["supportedLanguageCodes"] = supportedLanguageCodesProp + } + timeZoneProp, err := expandDialogflowCXAgentTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + descriptionProp, err := expandDialogflowCXAgentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + avatarUriProp, err := expandDialogflowCXAgentAvatarUri(d.Get("avatar_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("avatar_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, avatarUriProp)) { + obj["avatarUri"] = avatarUriProp + } + speechToTextSettingsProp, err := expandDialogflowCXAgentSpeechToTextSettings(d.Get("speech_to_text_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("speech_to_text_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, speechToTextSettingsProp)) { + obj["speechToTextSettings"] = speechToTextSettingsProp + } + securitySettingsProp, err := expandDialogflowCXAgentSecuritySettings(d.Get("security_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { + obj["securitySettings"] = securitySettingsProp + } + enableStackdriverLoggingProp, err := expandDialogflowCXAgentEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { + obj["enableStackdriverLogging"] = enableStackdriverLoggingProp + } + enableSpellCorrectionProp, err := expandDialogflowCXAgentEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_spell_correction"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableSpellCorrectionProp)) { + obj["enableSpellCorrection"] = enableSpellCorrectionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Agent %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("supported_language_codes") { + updateMask = append(updateMask, "supportedLanguageCodes") + } + + if d.HasChange("time_zone") { + updateMask = append(updateMask, "timeZone") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("avatar_uri") { + updateMask = append(updateMask, "avatarUri") + } + + if d.HasChange("speech_to_text_settings") { + updateMask = append(updateMask, "speechToTextSettings") + } + + if d.HasChange("security_settings") { + updateMask = append(updateMask, "securitySettings") + } + + if d.HasChange("enable_stackdriver_logging") { + updateMask = append(updateMask, "enableStackdriverLogging") + } + + if d.HasChange("enable_spell_correction") { + updateMask = append(updateMask, "enableSpellCorrection") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Agent %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Agent %q: %#v", d.Id(), res) + } + + return resourceDialogflowCXAgentRead(d, meta) +} + +func resourceDialogflowCXAgentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Agent: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Agent %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Agent") + } + + log.Printf("[DEBUG] Finished deleting Agent %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowCXAgentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/agents/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/agents/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowCXAgentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDialogflowCXAgentDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentDefaultLanguageCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentSupportedLanguageCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentAvatarUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentSpeechToTextSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_speech_adaptation"] = + flattenDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(original["enableSpeechAdaptation"], d, config) + return []interface{}{transformed} +} +func flattenDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentStartFlow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentSecuritySettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXAgentEnableSpellCorrection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowCXAgentDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentDefaultLanguageCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentSupportedLanguageCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentAvatarUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentSpeechToTextSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnableSpeechAdaptation, err := expandDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(original["enable_speech_adaptation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableSpeechAdaptation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableSpeechAdaptation"] = transformedEnableSpeechAdaptation + } + + return transformed, nil +} + +func expandDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentSecuritySettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentEnableStackdriverLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXAgentEnableSpellCorrection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_entity_type.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_entity_type.go new file mode 100644 index 0000000000..b2e0c8da4a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_entity_type.go @@ -0,0 +1,699 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dialogflowcx + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDialogflowCXEntityType() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowCXEntityTypeCreate, + Read: resourceDialogflowCXEntityTypeRead, + Update: resourceDialogflowCXEntityTypeUpdate, + Delete: resourceDialogflowCXEntityTypeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowCXEntityTypeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 64), + Description: `The human-readable name of the entity type, unique within the agent.`, + }, + "entities": { + Type: schema.TypeList, + Required: true, + Description: `The collection of entity entries associated with the entity type.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "synonyms": { + Type: schema.TypeList, + Optional: true, + Description: `A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym could be green onions. +For KIND_LIST entity types: This collection must contain exactly one synonym equal to value.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The primary value associated with this entity entry. For example, if the entity type is vegetable, the value could be scallions. +For KIND_MAP entity types: A canonical value to be used in place of synonyms. +For KIND_LIST entity types: A string that can contain references to other entity types (with or without aliases).`, + }, + }, + }, + }, + "kind": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"KIND_MAP", "KIND_LIST", "KIND_REGEXP"}), + Description: `Indicates whether the entity type can be automatically expanded. +* KIND_MAP: Map entity types allow mapping of a group of synonyms to a canonical value. +* KIND_LIST: List entity types contain a set of entries that do not map to canonical values. However, list entity types can contain references to other entity types (with or without aliases). +* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values. Possible values: ["KIND_MAP", "KIND_LIST", "KIND_REGEXP"]`, + }, + "auto_expansion_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"AUTO_EXPANSION_MODE_DEFAULT", "AUTO_EXPANSION_MODE_UNSPECIFIED", ""}), + Description: `Represents kinds of entities. +* AUTO_EXPANSION_MODE_UNSPECIFIED: Auto expansion disabled for the entity. +* AUTO_EXPANSION_MODE_DEFAULT: Allows an agent to recognize values that have not been explicitly listed in the entity. Possible values: ["AUTO_EXPANSION_MODE_DEFAULT", "AUTO_EXPANSION_MODE_UNSPECIFIED"]`, + }, + "enable_fuzzy_extraction": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables fuzzy entity extraction during classification.`, + }, + "excluded_phrases": { + Type: schema.TypeList, + Optional: true, + Description: `Collection of exceptional words and phrases that shouldn't be matched. For example, if you have a size entity type with entry giant(an adjective), you might consider adding giants(a noun) as an exclusion. +If the kind of entity type is KIND_MAP, then the phrases specified by entities and excluded phrases should be mutually exclusive.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The word or phrase to be excluded.`, + }, + }, + }, + }, + "language_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The language of the following fields in entityType: +EntityType.entities.value +EntityType.entities.synonyms +EntityType.excluded_phrases.value +If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used.`, + }, + "parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The agent to create a entity type for. +Format: projects//locations//agents/.`, + }, + "redact": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether parameters of the entity type should be redacted in log. If redaction is enabled, page parameters and intent parameters referring to the entity type will be replaced by parameter name when logging.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the entity type. +Format: projects//locations//agents//entityTypes/.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowCXEntityTypeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXEntityTypeDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + kindProp, err := expandDialogflowCXEntityTypeKind(d.Get("kind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kind"); !tpgresource.IsEmptyValue(reflect.ValueOf(kindProp)) && (ok || !reflect.DeepEqual(v, kindProp)) { + obj["kind"] = kindProp + } + autoExpansionModeProp, err := expandDialogflowCXEntityTypeAutoExpansionMode(d.Get("auto_expansion_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("auto_expansion_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoExpansionModeProp)) && (ok || !reflect.DeepEqual(v, autoExpansionModeProp)) { + obj["autoExpansionMode"] = autoExpansionModeProp + } + entitiesProp, err := expandDialogflowCXEntityTypeEntities(d.Get("entities"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entities"); !tpgresource.IsEmptyValue(reflect.ValueOf(entitiesProp)) && (ok || !reflect.DeepEqual(v, entitiesProp)) { + obj["entities"] = entitiesProp + } + excludedPhrasesProp, err := expandDialogflowCXEntityTypeExcludedPhrases(d.Get("excluded_phrases"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("excluded_phrases"); !tpgresource.IsEmptyValue(reflect.ValueOf(excludedPhrasesProp)) && (ok || !reflect.DeepEqual(v, excludedPhrasesProp)) { + obj["excludedPhrases"] = excludedPhrasesProp + } + enableFuzzyExtractionProp, err := expandDialogflowCXEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableFuzzyExtractionProp)) && (ok || !reflect.DeepEqual(v, enableFuzzyExtractionProp)) { + obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp + } + redactProp, err := expandDialogflowCXEntityTypeRedact(d.Get("redact"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redact"); !tpgresource.IsEmptyValue(reflect.ValueOf(redactProp)) && (ok || !reflect.DeepEqual(v, redactProp)) { + obj["redact"] = redactProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EntityType: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating EntityType: %s", err) + } + if err := d.Set("name", flattenDialogflowCXEntityTypeName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/entityTypes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating EntityType %q: %#v", d.Id(), res) + + return resourceDialogflowCXEntityTypeRead(d, meta) +} + +func resourceDialogflowCXEntityTypeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowCXEntityType %q", d.Id())) + } + + if err := d.Set("name", flattenDialogflowCXEntityTypeName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("display_name", flattenDialogflowCXEntityTypeDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("kind", flattenDialogflowCXEntityTypeKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("auto_expansion_mode", flattenDialogflowCXEntityTypeAutoExpansionMode(res["autoExpansionMode"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("entities", flattenDialogflowCXEntityTypeEntities(res["entities"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("excluded_phrases", flattenDialogflowCXEntityTypeExcludedPhrases(res["excludedPhrases"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("enable_fuzzy_extraction", flattenDialogflowCXEntityTypeEnableFuzzyExtraction(res["enableFuzzyExtraction"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + if err := d.Set("redact", flattenDialogflowCXEntityTypeRedact(res["redact"], d, config)); err != nil { + return fmt.Errorf("Error reading EntityType: %s", err) + } + + return nil +} + +func resourceDialogflowCXEntityTypeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXEntityTypeDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + kindProp, err := expandDialogflowCXEntityTypeKind(d.Get("kind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kind"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, kindProp)) { + obj["kind"] = kindProp + } + autoExpansionModeProp, err := expandDialogflowCXEntityTypeAutoExpansionMode(d.Get("auto_expansion_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("auto_expansion_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autoExpansionModeProp)) { + obj["autoExpansionMode"] = autoExpansionModeProp + } + entitiesProp, err := expandDialogflowCXEntityTypeEntities(d.Get("entities"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entities"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entitiesProp)) { + obj["entities"] = entitiesProp + } + excludedPhrasesProp, err := expandDialogflowCXEntityTypeExcludedPhrases(d.Get("excluded_phrases"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("excluded_phrases"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, excludedPhrasesProp)) { + obj["excludedPhrases"] = excludedPhrasesProp + } + enableFuzzyExtractionProp, err := expandDialogflowCXEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableFuzzyExtractionProp)) { + obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp + } + redactProp, err := expandDialogflowCXEntityTypeRedact(d.Get("redact"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redact"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, redactProp)) { + obj["redact"] = redactProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating EntityType %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("kind") { + updateMask = append(updateMask, "kind") + } + + if d.HasChange("auto_expansion_mode") { + updateMask = append(updateMask, "autoExpansionMode") + } + + if d.HasChange("entities") { + updateMask = append(updateMask, "entities") + } + + if d.HasChange("excluded_phrases") { + updateMask = append(updateMask, "excludedPhrases") + } + + if d.HasChange("enable_fuzzy_extraction") { + updateMask = append(updateMask, "enableFuzzyExtraction") + } + + if d.HasChange("redact") { + updateMask = append(updateMask, "redact") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating EntityType %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating EntityType %q: %#v", d.Id(), res) + } + + return resourceDialogflowCXEntityTypeRead(d, meta) +} + +func resourceDialogflowCXEntityTypeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + log.Printf("[DEBUG] Deleting EntityType %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EntityType") + } + + log.Printf("[DEBUG] Finished deleting EntityType %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowCXEntityTypeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value and parent contains slashes + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/entityTypes/(?P[^/]+)", + "(?P.+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/entityTypes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowCXEntityTypeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDialogflowCXEntityTypeDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEntityTypeKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEntityTypeAutoExpansionMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEntityTypeEntities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "value": flattenDialogflowCXEntityTypeEntitiesValue(original["value"], d, config), + "synonyms": flattenDialogflowCXEntityTypeEntitiesSynonyms(original["synonyms"], d, config), + }) + } + return transformed +} +func flattenDialogflowCXEntityTypeEntitiesValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEntityTypeEntitiesSynonyms(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEntityTypeExcludedPhrases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "value": flattenDialogflowCXEntityTypeExcludedPhrasesValue(original["value"], d, config), + }) + } + return transformed +} +func flattenDialogflowCXEntityTypeExcludedPhrasesValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEntityTypeEnableFuzzyExtraction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEntityTypeRedact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowCXEntityTypeDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEntityTypeKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEntityTypeAutoExpansionMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEntityTypeEntities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedValue, err := expandDialogflowCXEntityTypeEntitiesValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedSynonyms, err := expandDialogflowCXEntityTypeEntitiesSynonyms(original["synonyms"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSynonyms); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["synonyms"] = transformedSynonyms + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDialogflowCXEntityTypeEntitiesValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEntityTypeEntitiesSynonyms(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEntityTypeExcludedPhrases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedValue, err := expandDialogflowCXEntityTypeExcludedPhrasesValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDialogflowCXEntityTypeExcludedPhrasesValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEntityTypeEnableFuzzyExtraction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEntityTypeRedact(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_environment.go new file mode 100644 index 0000000000..820df76ab1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_environment.go @@ -0,0 +1,510 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dialogflowcx + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDialogflowCXEnvironment() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowCXEnvironmentCreate, + Read: resourceDialogflowCXEnvironmentRead, + Update: resourceDialogflowCXEnvironmentUpdate, + Delete: resourceDialogflowCXEnvironmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowCXEnvironmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 64), + Description: `The human-readable name of the environment (unique in an agent). Limit of 64 characters.`, + }, + "version_configs": { + Type: schema.TypeList, + Required: true, + Description: `A list of configurations for flow versions. You should include version configs for all flows that are reachable from [Start Flow][Agent.start_flow] in the agent. Otherwise, an error will be returned.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": { + Type: schema.TypeString, + Required: true, + Description: `Format: projects/{{project}}/locations/{{location}}/agents/{{agent}}/flows/{{flow}}/versions/{{version}}.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 500), + Description: `The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.`, + }, + "parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Agent to create an Environment for. +Format: projects//locations//agents/.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the environment.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Update time of this environment. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowCXEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXEnvironmentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDialogflowCXEnvironmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + versionConfigsProp, err := expandDialogflowCXEnvironmentVersionConfigs(d.Get("version_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(versionConfigsProp)) && (ok || !reflect.DeepEqual(v, versionConfigsProp)) { + obj["versionConfigs"] = versionConfigsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Environment: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Environment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/environments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = DialogflowCXOperationWaitTimeWithResponse( + config, res, &opRes, "Creating Environment", userAgent, location, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Environment: %s", err) + } + + if err := d.Set("name", flattenDialogflowCXEnvironmentName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{parent}}/environments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) + + return resourceDialogflowCXEnvironmentRead(d, meta) +} + +func resourceDialogflowCXEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowCXEnvironment %q", d.Id())) + } + + if err := d.Set("name", flattenDialogflowCXEnvironmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("display_name", flattenDialogflowCXEnvironmentDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("description", flattenDialogflowCXEnvironmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("version_configs", flattenDialogflowCXEnvironmentVersionConfigs(res["versionConfigs"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("update_time", flattenDialogflowCXEnvironmentUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + + return nil +} + +func resourceDialogflowCXEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXEnvironmentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDialogflowCXEnvironmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + versionConfigsProp, err := expandDialogflowCXEnvironmentVersionConfigs(d.Get("version_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionConfigsProp)) { + obj["versionConfigs"] = versionConfigsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("version_configs") { + updateMask = append(updateMask, "versionConfigs") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) + } + + err = DialogflowCXOperationWaitTime( + config, res, "Updating Environment", userAgent, location, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceDialogflowCXEnvironmentRead(d, meta) +} + +func resourceDialogflowCXEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + log.Printf("[DEBUG] Deleting Environment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Environment") + } + + err = DialogflowCXOperationWaitTime( + config, res, "Deleting Environment", userAgent, location, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowCXEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value and parent contains slashes + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/environments/(?P[^/]+)", + "(?P.+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/environments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowCXEnvironmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDialogflowCXEnvironmentDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEnvironmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEnvironmentVersionConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "version": flattenDialogflowCXEnvironmentVersionConfigsVersion(original["version"], d, config), + }) + } + return transformed +} +func flattenDialogflowCXEnvironmentVersionConfigsVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXEnvironmentUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowCXEnvironmentDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEnvironmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXEnvironmentVersionConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedVersion, err := expandDialogflowCXEnvironmentVersionConfigsVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDialogflowCXEnvironmentVersionConfigsVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_flow.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_flow.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_flow.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_flow.go index ab6e66ebe0..76281fa673 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_flow.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_flow.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package dialogflowcx import ( "fmt" @@ -24,6 +27,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceDialogflowCXFlow() *schema.Resource { @@ -174,7 +181,7 @@ If the returned score value is less than the threshold value, then a no-match ev "model_training_mode": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL", ""}), + ValidateFunc: verify.ValidateEnum([]string{"MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL", ""}), Description: `Indicates NLU model training mode. * MODEL_TRAINING_MODE_AUTOMATIC: NLU model training is automatically triggered when a flow gets modified. User can also manually trigger model training in this mode. * MODEL_TRAINING_MODE_MANUAL: User needs to manually trigger NLU model training. Best for large flows whose models take long time to train. Possible values: ["MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL"]`, @@ -182,7 +189,7 @@ If the returned score value is less than the threshold value, then a no-match ev "model_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED", ""}), Description: `Indicates the type of NLU model. * MODEL_TYPE_STANDARD: Use standard NLU model. * MODEL_TYPE_ADVANCED: Use advanced NLU model. Possible values: ["MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED"]`, @@ -322,8 +329,8 @@ Format: projects//locations//agents//flows/.+)/flows/(?P[^/]+)", "(?P.+)/(?P[^/]+)", }, d, config); err != nil { @@ -659,7 +696,7 @@ func resourceDialogflowCXFlowImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/flows/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/flows/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -668,22 +705,22 @@ func resourceDialogflowCXFlowImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenDialogflowCXFlowName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func flattenDialogflowCXFlowDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -706,19 +743,19 @@ func flattenDialogflowCXFlowTransitionRoutes(v interface{}, d *schema.ResourceDa } return transformed } -func flattenDialogflowCXFlowTransitionRoutesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesIntent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesIntent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -737,7 +774,7 @@ func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillment(v interface{}, d flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(original["tag"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -755,7 +792,7 @@ func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interfa } return transformed } -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -770,35 +807,35 @@ func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(v int flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesTargetPage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTargetPage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRoutesTargetFlow(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRoutesTargetFlow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -820,15 +857,15 @@ func flattenDialogflowCXFlowEventHandlers(v interface{}, d *schema.ResourceData, } return transformed } -func flattenDialogflowCXFlowEventHandlersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlersEvent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersEvent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlersTriggerFulfillment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTriggerFulfillment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -847,7 +884,7 @@ func flattenDialogflowCXFlowEventHandlersTriggerFulfillment(v interface{}, d *sc flattenDialogflowCXFlowEventHandlersTriggerFulfillmentTag(original["tag"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -865,7 +902,7 @@ func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{ } return transformed } -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -880,39 +917,39 @@ func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(v interf flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlersTargetPage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTargetPage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowEventHandlersTargetFlow(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowEventHandlersTargetFlow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowTransitionRouteGroups(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowTransitionRouteGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowNluSettings(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowNluSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -929,31 +966,31 @@ func flattenDialogflowCXFlowNluSettings(v interface{}, d *schema.ResourceData, c flattenDialogflowCXFlowNluSettingsModelTrainingMode(original["modelTrainingMode"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXFlowNluSettingsModelType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowNluSettingsModelType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowNluSettingsClassificationThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowNluSettingsClassificationThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowNluSettingsModelTrainingMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowNluSettingsModelTrainingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXFlowLanguageCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXFlowLanguageCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandDialogflowCXFlowDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -966,42 +1003,42 @@ func expandDialogflowCXFlowTransitionRoutes(v interface{}, d TerraformResourceDa transformedName, err := expandDialogflowCXFlowTransitionRoutesName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedIntent, err := expandDialogflowCXFlowTransitionRoutesIntent(original["intent"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIntent); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIntent); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["intent"] = transformedIntent } transformedCondition, err := expandDialogflowCXFlowTransitionRoutesCondition(original["condition"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCondition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCondition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["condition"] = transformedCondition } transformedTriggerFulfillment, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillment(original["trigger_fulfillment"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["triggerFulfillment"] = transformedTriggerFulfillment } transformedTargetPage, err := expandDialogflowCXFlowTransitionRoutesTargetPage(original["target_page"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetPage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetPage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetPage"] = transformedTargetPage } transformedTargetFlow, err := expandDialogflowCXFlowTransitionRoutesTargetFlow(original["target_flow"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetFlow); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetFlow); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetFlow"] = transformedTargetFlow } @@ -1010,19 +1047,19 @@ func expandDialogflowCXFlowTransitionRoutes(v interface{}, d TerraformResourceDa return req, nil } -func expandDialogflowCXFlowTransitionRoutesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesIntent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesIntent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTriggerFulfillment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1034,35 +1071,35 @@ func expandDialogflowCXFlowTransitionRoutesTriggerFulfillment(v interface{}, d T transformedMessages, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(original["messages"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["messages"] = transformedMessages } transformedWebhook, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(original["webhook"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["webhook"] = transformedWebhook } transformedReturnPartialResponses, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["returnPartialResponses"] = transformedReturnPartialResponses } transformedTag, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(original["tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tag"] = transformedTag } return transformed, nil } -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1075,7 +1112,7 @@ func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interfac transformedText, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } @@ -1084,7 +1121,7 @@ func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interfac return req, nil } -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1096,49 +1133,49 @@ func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(v inte transformedText, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } transformedAllowPlaybackInterruption, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption } return transformed, nil } -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesTargetPage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTargetPage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRoutesTargetFlow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRoutesTargetFlow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1151,35 +1188,35 @@ func expandDialogflowCXFlowEventHandlers(v interface{}, d TerraformResourceData, transformedName, err := expandDialogflowCXFlowEventHandlersName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedEvent, err := expandDialogflowCXFlowEventHandlersEvent(original["event"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEvent); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEvent); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["event"] = transformedEvent } transformedTriggerFulfillment, err := expandDialogflowCXFlowEventHandlersTriggerFulfillment(original["trigger_fulfillment"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["triggerFulfillment"] = transformedTriggerFulfillment } transformedTargetPage, err := expandDialogflowCXFlowEventHandlersTargetPage(original["target_page"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetPage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetPage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetPage"] = transformedTargetPage } transformedTargetFlow, err := expandDialogflowCXFlowEventHandlersTargetFlow(original["target_flow"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetFlow); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetFlow); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetFlow"] = transformedTargetFlow } @@ -1188,15 +1225,15 @@ func expandDialogflowCXFlowEventHandlers(v interface{}, d TerraformResourceData, return req, nil } -func expandDialogflowCXFlowEventHandlersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlersEvent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersEvent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlersTriggerFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTriggerFulfillment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1208,35 +1245,35 @@ func expandDialogflowCXFlowEventHandlersTriggerFulfillment(v interface{}, d Terr transformedMessages, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(original["messages"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["messages"] = transformedMessages } transformedWebhook, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(original["webhook"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["webhook"] = transformedWebhook } transformedReturnPartialResponses, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["returnPartialResponses"] = transformedReturnPartialResponses } transformedTag, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentTag(original["tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tag"] = transformedTag } return transformed, nil } -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1249,7 +1286,7 @@ func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{} transformedText, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } @@ -1258,7 +1295,7 @@ func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{} return req, nil } -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1270,53 +1307,53 @@ func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(v interfa transformedText, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } transformedAllowPlaybackInterruption, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption } return transformed, nil } -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTriggerFulfillmentTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlersTargetPage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTargetPage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowEventHandlersTargetFlow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowEventHandlersTargetFlow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowTransitionRouteGroups(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowTransitionRouteGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowNluSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowNluSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1328,39 +1365,39 @@ func expandDialogflowCXFlowNluSettings(v interface{}, d TerraformResourceData, c transformedModelType, err := expandDialogflowCXFlowNluSettingsModelType(original["model_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedModelType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedModelType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["modelType"] = transformedModelType } transformedClassificationThreshold, err := expandDialogflowCXFlowNluSettingsClassificationThreshold(original["classification_threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClassificationThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClassificationThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["classificationThreshold"] = transformedClassificationThreshold } transformedModelTrainingMode, err := expandDialogflowCXFlowNluSettingsModelTrainingMode(original["model_training_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedModelTrainingMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedModelTrainingMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["modelTrainingMode"] = transformedModelTrainingMode } return transformed, nil } -func expandDialogflowCXFlowNluSettingsModelType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowNluSettingsModelType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowNluSettingsClassificationThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowNluSettingsClassificationThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowNluSettingsModelTrainingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowNluSettingsModelTrainingMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXFlowLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXFlowLanguageCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_intent.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_intent.go new file mode 100644 index 0000000000..fabf3f2682 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_intent.go @@ -0,0 +1,901 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dialogflowcx + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDialogflowCXIntent() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowCXIntentCreate, + Read: resourceDialogflowCXIntentRead, + Update: resourceDialogflowCXIntentUpdate, + Delete: resourceDialogflowCXIntentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowCXIntentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 64), + Description: `The human-readable name of the intent, unique within the agent.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 140), + Description: `Human readable description for better understanding an intent like its scope, content, result etc. Maximum character limit: 140 characters.`, + }, + "is_fallback": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether this is a fallback intent. Currently only default fallback intent is allowed in the agent, which is added upon agent creation. +Adding training phrases to fallback intent is useful in the case of requests that are mistakenly matched, since training phrases assigned to fallback intents act as negative examples that triggers no-match event.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The key/value metadata to label an intent. Labels can contain lowercase letters, digits and the symbols '-' and '_'. International characters are allowed, including letters from unicase alphabets. Keys must start with a letter. Keys and values can be no longer than 63 characters and no more than 128 bytes. +Prefix "sys-" is reserved for Dialogflow defined labels. Currently allowed Dialogflow defined labels include: * sys-head * sys-contextual The above labels do not require value. "sys-head" means the intent is a head intent. "sys.contextual" means the intent is a contextual intent. +An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "language_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The language of the following fields in intent: +Intent.training_phrases.parts.text +If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used.`, + }, + "parameters": { + Type: schema.TypeList, + Optional: true, + Description: `The collection of parameters associated with the intent.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "entity_type": { + Type: schema.TypeString, + Required: true, + Description: `The entity type of the parameter. +Format: projects/-/locations/-/agents/-/entityTypes/ for system entity types (for example, projects/-/locations/-/agents/-/entityTypes/sys.date), or projects//locations//agents//entityTypes/ for developer entity types.`, + }, + "id": { + Type: schema.TypeString, + Required: true, + Description: `The unique identifier of the parameter. This field is used by training phrases to annotate their parts.`, + }, + "is_list": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether the parameter represents a list of values.`, + }, + "redact": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether the parameter content should be redacted in log. If redaction is enabled, the parameter content will be replaced by parameter name during logging. +Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled.`, + }, + }, + }, + }, + "parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The agent to create an intent for. +Format: projects//locations//agents/.`, + }, + "priority": { + Type: schema.TypeInt, + Optional: true, + Description: `The priority of this intent. Higher numbers represent higher priorities. +If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds to the Normal priority in the console. +If the supplied value is negative, the intent is ignored in runtime detect intent requests.`, + }, + "training_phrases": { + Type: schema.TypeList, + Optional: true, + Description: `The collection of training phrases the agent is trained on to identify the intent.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "parts": { + Type: schema.TypeList, + Required: true, + Description: `The ordered list of training phrase parts. The parts are concatenated in order to form the training phrase. +Note: The API does not automatically annotate training phrases like the Dialogflow Console does. +Note: Do not forget to include whitespace at part boundaries, so the training phrase is well formatted when the parts are concatenated. +If the training phrase does not need to be annotated with parameters, you just need a single part with only the Part.text field set. +If you want to annotate the training phrase, you must create multiple parts, where the fields of each part are populated in one of two ways: +Part.text is set to a part of the phrase that has no parameters. +Part.text is set to a part of the phrase that you want to annotate, and the parameterId field is set.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "text": { + Type: schema.TypeString, + Required: true, + Description: `The text for this part.`, + }, + "parameter_id": { + Type: schema.TypeString, + Optional: true, + Description: `The parameter used to annotate this part of the training phrase. This field is required for annotated parts of the training phrase.`, + }, + }, + }, + }, + "repeat_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Indicates how many times this example was added to the intent.`, + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the training phrase.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the intent. +Format: projects//locations//agents//intents/.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowCXIntentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXIntentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + trainingPhrasesProp, err := expandDialogflowCXIntentTrainingPhrases(d.Get("training_phrases"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("training_phrases"); !tpgresource.IsEmptyValue(reflect.ValueOf(trainingPhrasesProp)) && (ok || !reflect.DeepEqual(v, trainingPhrasesProp)) { + obj["trainingPhrases"] = trainingPhrasesProp + } + parametersProp, err := expandDialogflowCXIntentParameters(d.Get("parameters"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parameters"); !tpgresource.IsEmptyValue(reflect.ValueOf(parametersProp)) && (ok || !reflect.DeepEqual(v, parametersProp)) { + obj["parameters"] = parametersProp + } + priorityProp, err := expandDialogflowCXIntentPriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + isFallbackProp, err := expandDialogflowCXIntentIsFallback(d.Get("is_fallback"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_fallback"); !tpgresource.IsEmptyValue(reflect.ValueOf(isFallbackProp)) && (ok || !reflect.DeepEqual(v, isFallbackProp)) { + obj["isFallback"] = isFallbackProp + } + labelsProp, err := expandDialogflowCXIntentLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandDialogflowCXIntentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + languageCodeProp, err := expandDialogflowCXIntentLanguageCode(d.Get("language_code"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("language_code"); !tpgresource.IsEmptyValue(reflect.ValueOf(languageCodeProp)) && (ok || !reflect.DeepEqual(v, languageCodeProp)) { + obj["languageCode"] = languageCodeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Intent: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Intent: %s", err) + } + if err := d.Set("name", flattenDialogflowCXIntentName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/intents/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Intent %q: %#v", d.Id(), res) + + return resourceDialogflowCXIntentRead(d, meta) +} + +func resourceDialogflowCXIntentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowCXIntent %q", d.Id())) + } + + if err := d.Set("name", flattenDialogflowCXIntentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("display_name", flattenDialogflowCXIntentDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("training_phrases", flattenDialogflowCXIntentTrainingPhrases(res["trainingPhrases"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("parameters", flattenDialogflowCXIntentParameters(res["parameters"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("priority", flattenDialogflowCXIntentPriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("is_fallback", flattenDialogflowCXIntentIsFallback(res["isFallback"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("labels", flattenDialogflowCXIntentLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("description", flattenDialogflowCXIntentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + if err := d.Set("language_code", flattenDialogflowCXIntentLanguageCode(res["languageCode"], d, config)); err != nil { + return fmt.Errorf("Error reading Intent: %s", err) + } + + return nil +} + +func resourceDialogflowCXIntentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXIntentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + trainingPhrasesProp, err := expandDialogflowCXIntentTrainingPhrases(d.Get("training_phrases"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("training_phrases"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, trainingPhrasesProp)) { + obj["trainingPhrases"] = trainingPhrasesProp + } + parametersProp, err := expandDialogflowCXIntentParameters(d.Get("parameters"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parameters"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parametersProp)) { + obj["parameters"] = parametersProp + } + priorityProp, err := expandDialogflowCXIntentPriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + isFallbackProp, err := expandDialogflowCXIntentIsFallback(d.Get("is_fallback"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_fallback"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isFallbackProp)) { + obj["isFallback"] = isFallbackProp + } + labelsProp, err := expandDialogflowCXIntentLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandDialogflowCXIntentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Intent %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("training_phrases") { + updateMask = append(updateMask, "trainingPhrases") + } + + if d.HasChange("parameters") { + updateMask = append(updateMask, "parameters") + } + + if d.HasChange("priority") { + updateMask = append(updateMask, "priority") + } + + if d.HasChange("is_fallback") { + updateMask = append(updateMask, "isFallback") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Intent %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Intent %q: %#v", d.Id(), res) + } + + return resourceDialogflowCXIntentRead(d, meta) +} + +func resourceDialogflowCXIntentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + log.Printf("[DEBUG] Deleting Intent %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Intent") + } + + log.Printf("[DEBUG] Finished deleting Intent %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowCXIntentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value and parent contains slashes + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/intents/(?P[^/]+)", + "(?P.+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/intents/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowCXIntentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDialogflowCXIntentDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentTrainingPhrases(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenDialogflowCXIntentTrainingPhrasesId(original["id"], d, config), + "parts": flattenDialogflowCXIntentTrainingPhrasesParts(original["parts"], d, config), + "repeat_count": flattenDialogflowCXIntentTrainingPhrasesRepeatCount(original["repeatCount"], d, config), + }) + } + return transformed +} +func flattenDialogflowCXIntentTrainingPhrasesId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentTrainingPhrasesParts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "text": flattenDialogflowCXIntentTrainingPhrasesPartsText(original["text"], d, config), + "parameter_id": flattenDialogflowCXIntentTrainingPhrasesPartsParameterId(original["parameterId"], d, config), + }) + } + return transformed +} +func flattenDialogflowCXIntentTrainingPhrasesPartsText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentTrainingPhrasesPartsParameterId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentTrainingPhrasesRepeatCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDialogflowCXIntentParameters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenDialogflowCXIntentParametersId(original["id"], d, config), + "entity_type": flattenDialogflowCXIntentParametersEntityType(original["entityType"], d, config), + "is_list": flattenDialogflowCXIntentParametersIsList(original["isList"], d, config), + "redact": flattenDialogflowCXIntentParametersRedact(original["redact"], d, config), + }) + } + return transformed +} +func flattenDialogflowCXIntentParametersId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentParametersEntityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentParametersIsList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentParametersRedact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentPriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDialogflowCXIntentIsFallback(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXIntentLanguageCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowCXIntentDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentTrainingPhrases(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandDialogflowCXIntentTrainingPhrasesId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedParts, err := expandDialogflowCXIntentTrainingPhrasesParts(original["parts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedParts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["parts"] = transformedParts + } + + transformedRepeatCount, err := expandDialogflowCXIntentTrainingPhrasesRepeatCount(original["repeat_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepeatCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repeatCount"] = transformedRepeatCount + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDialogflowCXIntentTrainingPhrasesId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentTrainingPhrasesParts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedText, err := expandDialogflowCXIntentTrainingPhrasesPartsText(original["text"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["text"] = transformedText + } + + transformedParameterId, err := expandDialogflowCXIntentTrainingPhrasesPartsParameterId(original["parameter_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedParameterId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["parameterId"] = transformedParameterId + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDialogflowCXIntentTrainingPhrasesPartsText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentTrainingPhrasesPartsParameterId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentTrainingPhrasesRepeatCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentParameters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandDialogflowCXIntentParametersId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedEntityType, err := expandDialogflowCXIntentParametersEntityType(original["entity_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEntityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["entityType"] = transformedEntityType + } + + transformedIsList, err := expandDialogflowCXIntentParametersIsList(original["is_list"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsList); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isList"] = transformedIsList + } + + transformedRedact, err := expandDialogflowCXIntentParametersRedact(original["redact"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRedact); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["redact"] = transformedRedact + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDialogflowCXIntentParametersId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentParametersEntityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentParametersIsList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentParametersRedact(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentPriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentIsFallback(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDialogflowCXIntentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXIntentLanguageCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_page.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_page.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_page.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_page.go index 1a9e9d054c..da23bb1092 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dialogflow_cx_page.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_page.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package dialogflowcx import ( "fmt" @@ -24,6 +27,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceDialogflowCXPage() *schema.Resource { @@ -459,8 +465,8 @@ Format: projects//locations//agents//flows/.+)/pages/(?P[^/]+)", "(?P.+)/(?P[^/]+)", }, d, config); err != nil { @@ -796,7 +832,7 @@ func resourceDialogflowCXPageImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{parent}}/pages/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/pages/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -805,18 +841,18 @@ func resourceDialogflowCXPageImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenDialogflowCXPageName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func flattenDialogflowCXPageDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEntryFulfillment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEntryFulfillment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -835,7 +871,7 @@ func flattenDialogflowCXPageEntryFulfillment(v interface{}, d *schema.ResourceDa flattenDialogflowCXPageEntryFulfillmentTag(original["tag"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageEntryFulfillmentMessages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEntryFulfillmentMessages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -853,7 +889,7 @@ func flattenDialogflowCXPageEntryFulfillmentMessages(v interface{}, d *schema.Re } return transformed } -func flattenDialogflowCXPageEntryFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEntryFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -868,27 +904,27 @@ func flattenDialogflowCXPageEntryFulfillmentMessagesText(v interface{}, d *schem flattenDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageEntryFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEntryFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEntryFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEntryFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEntryFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEntryFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEntryFulfillmentTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEntryFulfillmentTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageForm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageForm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -901,7 +937,7 @@ func flattenDialogflowCXPageForm(v interface{}, d *schema.ResourceData, config * flattenDialogflowCXPageFormParameters(original["parameters"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageFormParameters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParameters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -924,23 +960,23 @@ func flattenDialogflowCXPageFormParameters(v interface{}, d *schema.ResourceData } return transformed } -func flattenDialogflowCXPageFormParametersDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersRequired(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersRequired(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersEntityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersEntityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersIsList(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersIsList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersFillBehavior(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehavior(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -953,7 +989,7 @@ func flattenDialogflowCXPageFormParametersFillBehavior(v interface{}, d *schema. flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(original["initialPromptFulfillment"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -972,7 +1008,7 @@ func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(v flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(original["tag"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -990,7 +1026,7 @@ func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMe } return transformed } -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1005,35 +1041,35 @@ func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMe flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageFormParametersRedact(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageFormParametersRedact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRouteGroups(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRouteGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1056,19 +1092,19 @@ func flattenDialogflowCXPageTransitionRoutes(v interface{}, d *schema.ResourceDa } return transformed } -func flattenDialogflowCXPageTransitionRoutesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesIntent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesIntent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTriggerFulfillment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1087,7 +1123,7 @@ func flattenDialogflowCXPageTransitionRoutesTriggerFulfillment(v interface{}, d flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(original["tag"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1105,7 +1141,7 @@ func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interfa } return transformed } -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1120,35 +1156,35 @@ func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(v int flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesTargetPage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTargetPage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageTransitionRoutesTargetFlow(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageTransitionRoutesTargetFlow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1170,15 +1206,15 @@ func flattenDialogflowCXPageEventHandlers(v interface{}, d *schema.ResourceData, } return transformed } -func flattenDialogflowCXPageEventHandlersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlersEvent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersEvent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlersTriggerFulfillment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTriggerFulfillment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1197,7 +1233,7 @@ func flattenDialogflowCXPageEventHandlersTriggerFulfillment(v interface{}, d *sc flattenDialogflowCXPageEventHandlersTriggerFulfillmentTag(original["tag"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1215,7 +1251,7 @@ func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{ } return transformed } -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1230,43 +1266,43 @@ func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(v interf flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) return []interface{}{transformed} } -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTriggerFulfillmentTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlersTargetPage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTargetPage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageEventHandlersTargetFlow(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageEventHandlersTargetFlow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDialogflowCXPageLanguageCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDialogflowCXPageLanguageCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandDialogflowCXPageDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEntryFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEntryFulfillment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1278,35 +1314,35 @@ func expandDialogflowCXPageEntryFulfillment(v interface{}, d TerraformResourceDa transformedMessages, err := expandDialogflowCXPageEntryFulfillmentMessages(original["messages"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["messages"] = transformedMessages } transformedWebhook, err := expandDialogflowCXPageEntryFulfillmentWebhook(original["webhook"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["webhook"] = transformedWebhook } transformedReturnPartialResponses, err := expandDialogflowCXPageEntryFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["returnPartialResponses"] = transformedReturnPartialResponses } transformedTag, err := expandDialogflowCXPageEntryFulfillmentTag(original["tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tag"] = transformedTag } return transformed, nil } -func expandDialogflowCXPageEntryFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEntryFulfillmentMessages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1319,7 +1355,7 @@ func expandDialogflowCXPageEntryFulfillmentMessages(v interface{}, d TerraformRe transformedText, err := expandDialogflowCXPageEntryFulfillmentMessagesText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } @@ -1328,7 +1364,7 @@ func expandDialogflowCXPageEntryFulfillmentMessages(v interface{}, d TerraformRe return req, nil } -func expandDialogflowCXPageEntryFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEntryFulfillmentMessagesText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1340,41 +1376,41 @@ func expandDialogflowCXPageEntryFulfillmentMessagesText(v interface{}, d Terrafo transformedText, err := expandDialogflowCXPageEntryFulfillmentMessagesTextText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } transformedAllowPlaybackInterruption, err := expandDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption } return transformed, nil } -func expandDialogflowCXPageEntryFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEntryFulfillmentMessagesTextText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEntryFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEntryFulfillmentWebhook(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEntryFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEntryFulfillmentReturnPartialResponses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEntryFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEntryFulfillmentTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageForm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageForm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1386,14 +1422,14 @@ func expandDialogflowCXPageForm(v interface{}, d TerraformResourceData, config * transformedParameters, err := expandDialogflowCXPageFormParameters(original["parameters"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedParameters); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedParameters); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["parameters"] = transformedParameters } return transformed, nil } -func expandDialogflowCXPageFormParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParameters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1406,42 +1442,42 @@ func expandDialogflowCXPageFormParameters(v interface{}, d TerraformResourceData transformedDisplayName, err := expandDialogflowCXPageFormParametersDisplayName(original["display_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["displayName"] = transformedDisplayName } transformedRequired, err := expandDialogflowCXPageFormParametersRequired(original["required"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequired); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequired); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["required"] = transformedRequired } transformedEntityType, err := expandDialogflowCXPageFormParametersEntityType(original["entity_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEntityType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEntityType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["entityType"] = transformedEntityType } transformedIsList, err := expandDialogflowCXPageFormParametersIsList(original["is_list"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIsList); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIsList); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["isList"] = transformedIsList } transformedFillBehavior, err := expandDialogflowCXPageFormParametersFillBehavior(original["fill_behavior"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFillBehavior); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFillBehavior); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fillBehavior"] = transformedFillBehavior } transformedRedact, err := expandDialogflowCXPageFormParametersRedact(original["redact"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedact); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedact); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redact"] = transformedRedact } @@ -1450,23 +1486,23 @@ func expandDialogflowCXPageFormParameters(v interface{}, d TerraformResourceData return req, nil } -func expandDialogflowCXPageFormParametersDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersRequired(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersRequired(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersEntityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersEntityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersIsList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersIsList(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersFillBehavior(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehavior(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1478,14 +1514,14 @@ func expandDialogflowCXPageFormParametersFillBehavior(v interface{}, d Terraform transformedInitialPromptFulfillment, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(original["initial_prompt_fulfillment"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInitialPromptFulfillment); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInitialPromptFulfillment); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["initialPromptFulfillment"] = transformedInitialPromptFulfillment } return transformed, nil } -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1497,35 +1533,35 @@ func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(v transformedMessages, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(original["messages"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["messages"] = transformedMessages } transformedWebhook, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(original["webhook"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["webhook"] = transformedWebhook } transformedReturnPartialResponses, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["returnPartialResponses"] = transformedReturnPartialResponses } transformedTag, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(original["tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tag"] = transformedTag } return transformed, nil } -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1538,7 +1574,7 @@ func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMes transformedText, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } @@ -1547,7 +1583,7 @@ func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMes return req, nil } -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1559,49 +1595,49 @@ func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMes transformedText, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } transformedAllowPlaybackInterruption, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption } return transformed, nil } -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageFormParametersRedact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageFormParametersRedact(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRouteGroups(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRouteGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1614,42 +1650,42 @@ func expandDialogflowCXPageTransitionRoutes(v interface{}, d TerraformResourceDa transformedName, err := expandDialogflowCXPageTransitionRoutesName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedIntent, err := expandDialogflowCXPageTransitionRoutesIntent(original["intent"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIntent); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIntent); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["intent"] = transformedIntent } transformedCondition, err := expandDialogflowCXPageTransitionRoutesCondition(original["condition"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCondition); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCondition); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["condition"] = transformedCondition } transformedTriggerFulfillment, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillment(original["trigger_fulfillment"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["triggerFulfillment"] = transformedTriggerFulfillment } transformedTargetPage, err := expandDialogflowCXPageTransitionRoutesTargetPage(original["target_page"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetPage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetPage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetPage"] = transformedTargetPage } transformedTargetFlow, err := expandDialogflowCXPageTransitionRoutesTargetFlow(original["target_flow"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetFlow); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetFlow); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetFlow"] = transformedTargetFlow } @@ -1658,19 +1694,19 @@ func expandDialogflowCXPageTransitionRoutes(v interface{}, d TerraformResourceDa return req, nil } -func expandDialogflowCXPageTransitionRoutesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesIntent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesIntent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesTriggerFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTriggerFulfillment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1682,35 +1718,35 @@ func expandDialogflowCXPageTransitionRoutesTriggerFulfillment(v interface{}, d T transformedMessages, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(original["messages"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["messages"] = transformedMessages } transformedWebhook, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(original["webhook"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["webhook"] = transformedWebhook } transformedReturnPartialResponses, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["returnPartialResponses"] = transformedReturnPartialResponses } transformedTag, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(original["tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tag"] = transformedTag } return transformed, nil } -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1723,7 +1759,7 @@ func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interfac transformedText, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } @@ -1732,7 +1768,7 @@ func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interfac return req, nil } -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1744,49 +1780,49 @@ func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(v inte transformedText, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } transformedAllowPlaybackInterruption, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption } return transformed, nil } -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesTargetPage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTargetPage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageTransitionRoutesTargetFlow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageTransitionRoutesTargetFlow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1799,35 +1835,35 @@ func expandDialogflowCXPageEventHandlers(v interface{}, d TerraformResourceData, transformedName, err := expandDialogflowCXPageEventHandlersName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedEvent, err := expandDialogflowCXPageEventHandlersEvent(original["event"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEvent); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEvent); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["event"] = transformedEvent } transformedTriggerFulfillment, err := expandDialogflowCXPageEventHandlersTriggerFulfillment(original["trigger_fulfillment"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["triggerFulfillment"] = transformedTriggerFulfillment } transformedTargetPage, err := expandDialogflowCXPageEventHandlersTargetPage(original["target_page"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetPage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetPage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetPage"] = transformedTargetPage } transformedTargetFlow, err := expandDialogflowCXPageEventHandlersTargetFlow(original["target_flow"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetFlow); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetFlow); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetFlow"] = transformedTargetFlow } @@ -1836,15 +1872,15 @@ func expandDialogflowCXPageEventHandlers(v interface{}, d TerraformResourceData, return req, nil } -func expandDialogflowCXPageEventHandlersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlersEvent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersEvent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlersTriggerFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTriggerFulfillment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1856,35 +1892,35 @@ func expandDialogflowCXPageEventHandlersTriggerFulfillment(v interface{}, d Terr transformedMessages, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentMessages(original["messages"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMessages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["messages"] = transformedMessages } transformedWebhook, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(original["webhook"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWebhook); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["webhook"] = transformedWebhook } transformedReturnPartialResponses, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["returnPartialResponses"] = transformedReturnPartialResponses } transformedTag, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentTag(original["tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tag"] = transformedTag } return transformed, nil } -func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1897,7 +1933,7 @@ func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{} transformedText, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } @@ -1906,7 +1942,7 @@ func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{} return req, nil } -func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1918,48 +1954,48 @@ func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(v interfa transformedText, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(original["text"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedText); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["text"] = transformedText } transformedAllowPlaybackInterruption, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption } return transformed, nil } -func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlersTriggerFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTriggerFulfillmentTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlersTargetPage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTargetPage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageEventHandlersTargetFlow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageEventHandlersTargetFlow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDialogflowCXPageLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDialogflowCXPageLanguageCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_version.go new file mode 100644 index 0000000000..57fdc6ac7b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_version.go @@ -0,0 +1,508 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dialogflowcx + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDialogflowCXVersion() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowCXVersionCreate, + Read: resourceDialogflowCXVersionRead, + Update: resourceDialogflowCXVersionUpdate, + Delete: resourceDialogflowCXVersionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowCXVersionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 64), + Description: `The human-readable name of the version. Limit of 64 characters.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 500), + Description: `The description of the version. The maximum length is 500 characters. If exceeded, the request is rejected.`, + }, + "parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Flow to create an Version for. +Format: projects//locations//agents//flows/.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Format: projects//locations//agents//flows//versions/. Version ID is a self-increasing number generated by Dialogflow upon version creation.`, + }, + "nlu_settings": { + Type: schema.TypeList, + Computed: true, + Description: `The NLU settings of the flow at version creation.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "classification_threshold": { + Type: schema.TypeFloat, + Optional: true, + Description: `To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. If the returned score value is less than the threshold value, then a no-match event will be triggered. +The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used.`, + }, + "model_training_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL", ""}), + Description: `Indicates NLU model training mode. +* MODEL_TRAINING_MODE_AUTOMATIC: NLU model training is automatically triggered when a flow gets modified. User can also manually trigger model training in this mode. +* MODEL_TRAINING_MODE_MANUAL: User needs to manually trigger NLU model training. Best for large flows whose models take long time to train. Possible values: ["MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL"]`, + }, + "model_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED", ""}), + Description: `Indicates the type of NLU model. +* MODEL_TYPE_STANDARD: Use standard NLU model. +* MODEL_TYPE_ADVANCED: Use advanced NLU model. Possible values: ["MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED"]`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of this version. +* RUNNING: Version is not ready to serve (e.g. training is running). +* SUCCEEDED: Training has succeeded and this version is ready to serve. +* FAILED: Version training failed.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowCXVersionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXVersionDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDialogflowCXVersionDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Version: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Version: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/versions/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = DialogflowCXOperationWaitTimeWithResponse( + config, res, &opRes, "Creating Version", userAgent, location, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Version: %s", err) + } + + if err := d.Set("name", flattenDialogflowCXVersionName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{parent}}/versions/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Version %q: %#v", d.Id(), res) + + return resourceDialogflowCXVersionRead(d, meta) +} + +func resourceDialogflowCXVersionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowCXVersion %q", d.Id())) + } + + if err := d.Set("name", flattenDialogflowCXVersionName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Version: %s", err) + } + if err := d.Set("display_name", flattenDialogflowCXVersionDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Version: %s", err) + } + if err := d.Set("description", flattenDialogflowCXVersionDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Version: %s", err) + } + if err := d.Set("nlu_settings", flattenDialogflowCXVersionNluSettings(res["nluSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Version: %s", err) + } + if err := d.Set("create_time", flattenDialogflowCXVersionCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Version: %s", err) + } + if err := d.Set("state", flattenDialogflowCXVersionState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Version: %s", err) + } + + return nil +} + +func resourceDialogflowCXVersionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXVersionDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandDialogflowCXVersionDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Version %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Version %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Version %q: %#v", d.Id(), res) + } + + err = DialogflowCXOperationWaitTime( + config, res, "Updating Version", userAgent, location, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceDialogflowCXVersionRead(d, meta) +} + +func resourceDialogflowCXVersionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + log.Printf("[DEBUG] Deleting Version %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Version") + } + + err = DialogflowCXOperationWaitTime( + config, res, "Deleting Version", userAgent, location, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Version %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowCXVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value and parent contains slashes + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/versions/(?P[^/]+)", + "(?P.+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/versions/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowCXVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDialogflowCXVersionDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXVersionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXVersionNluSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["model_type"] = + flattenDialogflowCXVersionNluSettingsModelType(original["modelType"], d, config) + transformed["classification_threshold"] = + flattenDialogflowCXVersionNluSettingsClassificationThreshold(original["classificationThreshold"], d, config) + transformed["model_training_mode"] = + flattenDialogflowCXVersionNluSettingsModelTrainingMode(original["modelTrainingMode"], d, config) + return []interface{}{transformed} +} +func flattenDialogflowCXVersionNluSettingsModelType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXVersionNluSettingsClassificationThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXVersionNluSettingsModelTrainingMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXVersionCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXVersionState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowCXVersionDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXVersionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_webhook.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_webhook.go new file mode 100644 index 0000000000..b8d83137c3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx/resource_dialogflow_cx_webhook.go @@ -0,0 +1,857 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dialogflowcx + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDialogflowCXWebhook() *schema.Resource { + return &schema.Resource{ + Create: resourceDialogflowCXWebhookCreate, + Read: resourceDialogflowCXWebhookRead, + Update: resourceDialogflowCXWebhookUpdate, + Delete: resourceDialogflowCXWebhookDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDialogflowCXWebhookImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The human-readable name of the webhook, unique within the agent.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether the webhook is disabled.`, + }, + "enable_spell_correction": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates if automatic spell correction is enabled in detect intent requests.`, + }, + "enable_stackdriver_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines whether this agent should log conversation queries.`, + }, + "generic_web_service": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for a generic web service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `Whether to use speech adaptation for speech recognition.`, + }, + "allowed_ca_certs": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies a list of allowed custom CA certificates (in DER format) for HTTPS verification.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "request_headers": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `The HTTP request headers to send together with webhook requests.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The agent to create a webhook for. +Format: projects//locations//agents/.`, + }, + "security_settings": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the SecuritySettings reference for the agent. Format: projects//locations//securitySettings/.`, + }, + "service_directory": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for a Service Directory service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "generic_web_service": { + Type: schema.TypeList, + Required: true, + Description: `The name of Service Directory service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `Whether to use speech adaptation for speech recognition.`, + }, + "allowed_ca_certs": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies a list of allowed custom CA certificates (in DER format) for HTTPS verification.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "request_headers": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `The HTTP request headers to send together with webhook requests.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "service": { + Type: schema.TypeString, + Required: true, + Description: `The name of Service Directory service.`, + }, + }, + }, + }, + "timeout": { + Type: schema.TypeString, + Optional: true, + Description: `Webhook execution timeout.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the webhook. +Format: projects//locations//agents//webhooks/.`, + }, + "start_flow": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the start flow in this agent. A start flow will be automatically created when the agent is created, and can only be deleted by deleting the agent. Format: projects//locations//agents//flows/.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDialogflowCXWebhookCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXWebhookDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + timeoutProp, err := expandDialogflowCXWebhookTimeout(d.Get("timeout"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("timeout"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutProp)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { + obj["timeout"] = timeoutProp + } + disabledProp, err := expandDialogflowCXWebhookDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + genericWebServiceProp, err := expandDialogflowCXWebhookGenericWebService(d.Get("generic_web_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("generic_web_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(genericWebServiceProp)) && (ok || !reflect.DeepEqual(v, genericWebServiceProp)) { + obj["genericWebService"] = genericWebServiceProp + } + serviceDirectoryProp, err := expandDialogflowCXWebhookServiceDirectory(d.Get("service_directory"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_directory"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceDirectoryProp)) && (ok || !reflect.DeepEqual(v, serviceDirectoryProp)) { + obj["serviceDirectory"] = serviceDirectoryProp + } + securitySettingsProp, err := expandDialogflowCXWebhookSecuritySettings(d.Get("security_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(securitySettingsProp)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { + obj["securitySettings"] = securitySettingsProp + } + enableStackdriverLoggingProp, err := expandDialogflowCXWebhookEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableStackdriverLoggingProp)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { + obj["enableStackdriverLogging"] = enableStackdriverLoggingProp + } + enableSpellCorrectionProp, err := expandDialogflowCXWebhookEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_spell_correction"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableSpellCorrectionProp)) && (ok || !reflect.DeepEqual(v, enableSpellCorrectionProp)) { + obj["enableSpellCorrection"] = enableSpellCorrectionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/webhooks") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Webhook: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Webhook: %s", err) + } + if err := d.Set("name", flattenDialogflowCXWebhookName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/webhooks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Webhook %q: %#v", d.Id(), res) + + return resourceDialogflowCXWebhookRead(d, meta) +} + +func resourceDialogflowCXWebhookRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/webhooks/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DialogflowCXWebhook %q", d.Id())) + } + + if err := d.Set("name", flattenDialogflowCXWebhookName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("display_name", flattenDialogflowCXWebhookDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("timeout", flattenDialogflowCXWebhookTimeout(res["timeout"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("disabled", flattenDialogflowCXWebhookDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("generic_web_service", flattenDialogflowCXWebhookGenericWebService(res["genericWebService"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("service_directory", flattenDialogflowCXWebhookServiceDirectory(res["serviceDirectory"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("start_flow", flattenDialogflowCXWebhookStartFlow(res["startFlow"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("security_settings", flattenDialogflowCXWebhookSecuritySettings(res["securitySettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("enable_stackdriver_logging", flattenDialogflowCXWebhookEnableStackdriverLogging(res["enableStackdriverLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + if err := d.Set("enable_spell_correction", flattenDialogflowCXWebhookEnableSpellCorrection(res["enableSpellCorrection"], d, config)); err != nil { + return fmt.Errorf("Error reading Webhook: %s", err) + } + + return nil +} + +func resourceDialogflowCXWebhookUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandDialogflowCXWebhookDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + timeoutProp, err := expandDialogflowCXWebhookTimeout(d.Get("timeout"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("timeout"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { + obj["timeout"] = timeoutProp + } + disabledProp, err := expandDialogflowCXWebhookDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + genericWebServiceProp, err := expandDialogflowCXWebhookGenericWebService(d.Get("generic_web_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("generic_web_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, genericWebServiceProp)) { + obj["genericWebService"] = genericWebServiceProp + } + serviceDirectoryProp, err := expandDialogflowCXWebhookServiceDirectory(d.Get("service_directory"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_directory"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceDirectoryProp)) { + obj["serviceDirectory"] = serviceDirectoryProp + } + securitySettingsProp, err := expandDialogflowCXWebhookSecuritySettings(d.Get("security_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("security_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, securitySettingsProp)) { + obj["securitySettings"] = securitySettingsProp + } + enableStackdriverLoggingProp, err := expandDialogflowCXWebhookEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableStackdriverLoggingProp)) { + obj["enableStackdriverLogging"] = enableStackdriverLoggingProp + } + enableSpellCorrectionProp, err := expandDialogflowCXWebhookEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_spell_correction"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableSpellCorrectionProp)) { + obj["enableSpellCorrection"] = enableSpellCorrectionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/webhooks/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Webhook %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("timeout") { + updateMask = append(updateMask, "timeout") + } + + if d.HasChange("disabled") { + updateMask = append(updateMask, "disabled") + } + + if d.HasChange("generic_web_service") { + updateMask = append(updateMask, "genericWebService") + } + + if d.HasChange("service_directory") { + updateMask = append(updateMask, "serviceDirectory") + } + + if d.HasChange("security_settings") { + updateMask = append(updateMask, "securitySettings") + } + + if d.HasChange("enable_stackdriver_logging") { + updateMask = append(updateMask, "enableStackdriverLogging") + } + + if d.HasChange("enable_spell_correction") { + updateMask = append(updateMask, "enableSpellCorrection") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Webhook %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Webhook %q: %#v", d.Id(), res) + } + + return resourceDialogflowCXWebhookRead(d, meta) +} + +func resourceDialogflowCXWebhookDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/webhooks/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // extract location from the parent + location := "" + + if parts := regexp.MustCompile(`locations\/([^\/]*)\/`).FindStringSubmatch(d.Get("parent").(string)); parts != nil { + location = parts[1] + } else { + return fmt.Errorf( + "Saw %s when the parent is expected to contains location %s", + d.Get("parent"), + "projects/{{project}}/locations/{{location}}/...", + ) + } + + url = strings.Replace(url, "-dialogflow", fmt.Sprintf("%s-dialogflow", location), 1) + log.Printf("[DEBUG] Deleting Webhook %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Webhook") + } + + log.Printf("[DEBUG] Finished deleting Webhook %q: %#v", d.Id(), res) + return nil +} + +func resourceDialogflowCXWebhookImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value and parent contains slashes + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/webhooks/(?P[^/]+)", + "(?P.+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/webhooks/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDialogflowCXWebhookName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDialogflowCXWebhookDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookGenericWebService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenDialogflowCXWebhookGenericWebServiceUri(original["uri"], d, config) + transformed["request_headers"] = + flattenDialogflowCXWebhookGenericWebServiceRequestHeaders(original["requestHeaders"], d, config) + transformed["allowed_ca_certs"] = + flattenDialogflowCXWebhookGenericWebServiceAllowedCaCerts(original["allowedCaCerts"], d, config) + return []interface{}{transformed} +} +func flattenDialogflowCXWebhookGenericWebServiceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookGenericWebServiceRequestHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookGenericWebServiceAllowedCaCerts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookServiceDirectory(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service"] = + flattenDialogflowCXWebhookServiceDirectoryService(original["service"], d, config) + transformed["generic_web_service"] = + flattenDialogflowCXWebhookServiceDirectoryGenericWebService(original["genericWebService"], d, config) + return []interface{}{transformed} +} +func flattenDialogflowCXWebhookServiceDirectoryService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookServiceDirectoryGenericWebService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceUri(original["uri"], d, config) + transformed["request_headers"] = + flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceRequestHeaders(original["requestHeaders"], d, config) + transformed["allowed_ca_certs"] = + flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceAllowedCaCerts(original["allowedCaCerts"], d, config) + return []interface{}{transformed} +} +func flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceRequestHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookServiceDirectoryGenericWebServiceAllowedCaCerts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookStartFlow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookSecuritySettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookEnableStackdriverLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDialogflowCXWebhookEnableSpellCorrection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDialogflowCXWebhookDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookGenericWebService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandDialogflowCXWebhookGenericWebServiceUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedRequestHeaders, err := expandDialogflowCXWebhookGenericWebServiceRequestHeaders(original["request_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequestHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requestHeaders"] = transformedRequestHeaders + } + + transformedAllowedCaCerts, err := expandDialogflowCXWebhookGenericWebServiceAllowedCaCerts(original["allowed_ca_certs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedCaCerts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedCaCerts"] = transformedAllowedCaCerts + } + + return transformed, nil +} + +func expandDialogflowCXWebhookGenericWebServiceUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookGenericWebServiceRequestHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDialogflowCXWebhookGenericWebServiceAllowedCaCerts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookServiceDirectory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedService, err := expandDialogflowCXWebhookServiceDirectoryService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + transformedGenericWebService, err := expandDialogflowCXWebhookServiceDirectoryGenericWebService(original["generic_web_service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGenericWebService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["genericWebService"] = transformedGenericWebService + } + + return transformed, nil +} + +func expandDialogflowCXWebhookServiceDirectoryService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookServiceDirectoryGenericWebService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandDialogflowCXWebhookServiceDirectoryGenericWebServiceUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedRequestHeaders, err := expandDialogflowCXWebhookServiceDirectoryGenericWebServiceRequestHeaders(original["request_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequestHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requestHeaders"] = transformedRequestHeaders + } + + transformedAllowedCaCerts, err := expandDialogflowCXWebhookServiceDirectoryGenericWebServiceAllowedCaCerts(original["allowed_ca_certs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedCaCerts); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedCaCerts"] = transformedAllowedCaCerts + } + + return transformed, nil +} + +func expandDialogflowCXWebhookServiceDirectoryGenericWebServiceUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookServiceDirectoryGenericWebServiceRequestHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDialogflowCXWebhookServiceDirectoryGenericWebServiceAllowedCaCerts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookSecuritySettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookEnableStackdriverLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDialogflowCXWebhookEnableSpellCorrection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_keys.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_keys.go new file mode 100644 index 0000000000..a48b30f7c9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_keys.go @@ -0,0 +1,396 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dns + +import ( + "context" + "fmt" + + "google.golang.org/api/dns/v1" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ datasource.DataSource = &GoogleDnsKeysDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleDnsKeysDataSource{} +) + +func NewGoogleDnsKeysDataSource() datasource.DataSource { + return &GoogleDnsKeysDataSource{} +} + +// GoogleDnsKeysDataSource defines the data source implementation +type GoogleDnsKeysDataSource struct { + client *dns.Service + project types.String +} + +type GoogleDnsKeysModel struct { + Id types.String `tfsdk:"id"` + ManagedZone types.String `tfsdk:"managed_zone"` + Project types.String `tfsdk:"project"` + KeySigningKeys types.List `tfsdk:"key_signing_keys"` + ZoneSigningKeys types.List `tfsdk:"zone_signing_keys"` +} + +type GoogleZoneSigningKey struct { + Algorithm types.String `tfsdk:"algorithm"` + CreationTime types.String `tfsdk:"creation_time"` + Description types.String `tfsdk:"description"` + Id types.String `tfsdk:"id"` + IsActive types.Bool `tfsdk:"is_active"` + KeyLength types.Int64 `tfsdk:"key_length"` + KeyTag types.Int64 `tfsdk:"key_tag"` + PublicKey types.String `tfsdk:"public_key"` + Digests types.List `tfsdk:"digests"` +} + +type GoogleKeySigningKey struct { + Algorithm types.String `tfsdk:"algorithm"` + CreationTime types.String `tfsdk:"creation_time"` + Description types.String `tfsdk:"description"` + Id types.String `tfsdk:"id"` + IsActive types.Bool `tfsdk:"is_active"` + KeyLength types.Int64 `tfsdk:"key_length"` + KeyTag types.Int64 `tfsdk:"key_tag"` + PublicKey types.String `tfsdk:"public_key"` + Digests types.List `tfsdk:"digests"` + + DSRecord types.String `tfsdk:"ds_record"` +} + +type GoogleZoneSigningKeyDigest struct { + Digest types.String `tfsdk:"digest"` + Type types.String `tfsdk:"type"` +} + +var ( + digestAttrTypes = map[string]attr.Type{ + "digest": types.StringType, + "type": types.StringType, + } +) + +func (d *GoogleDnsKeysDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dns_keys" +} + +func (d *GoogleDnsKeysDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Get the DNSKEY and DS records of DNSSEC-signed managed zones", + + Attributes: map[string]schema.Attribute{ + "managed_zone": schema.StringAttribute{ + Description: "The Name of the zone.", + MarkdownDescription: "The Name of the zone.", + Required: true, + }, + "project": schema.StringAttribute{ + Description: "The ID of the project for the Google Cloud.", + MarkdownDescription: "The ID of the project for the Google Cloud.", + Optional: true, + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "DNS keys identifier", + MarkdownDescription: "DNS keys identifier", + Computed: true, + }, + // Issue with using computed blocks in the plugin framework with protocol 5 + // See: https://developer.hashicorp.com/terraform/plugin/framework/migrating/attributes-blocks/blocks-computed#framework + "zone_signing_keys": schema.ListAttribute{ + Description: "A list of Zone-signing key (ZSK) records.", + MarkdownDescription: "A list of Zone-signing key (ZSK) records.", + ElementType: dnsKeyObject(), + Computed: true, + }, + // Issue with using computed blocks in the plugin framework with protocol 5 + // See: https://developer.hashicorp.com/terraform/plugin/framework/migrating/attributes-blocks/blocks-computed#framework + "key_signing_keys": schema.ListAttribute{ + Description: "A list of Key-signing key (KSK) records.", + MarkdownDescription: "A list of Key-signing key (KSK) records.", + ElementType: kskObject(), + Computed: true, + }, + }, + } +} + +func (d *GoogleDnsKeysDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + d.client = p.NewDnsClient(p.UserAgent, &resp.Diagnostics) + d.project = p.Project +} + +func (d *GoogleDnsKeysDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleDnsKeysModel + var metaData *fwmodels.ProviderMetaModel + var diags diag.Diagnostics + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + d.client.UserAgent = fwtransport.GenerateFrameworkUserAgentString(metaData, d.client.UserAgent) + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + fv := fwresource.ParseProjectFieldValueFramework("managedZones", data.ManagedZone.ValueString(), "project", data.Project, d.project, false, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + data.Project = types.StringValue(fv.Project) + data.ManagedZone = types.StringValue(fv.Name) + + data.Id = types.StringValue(fmt.Sprintf("projects/%s/managedZones/%s", data.Project.ValueString(), data.ManagedZone.ValueString())) + + tflog.Debug(ctx, fmt.Sprintf("fetching DNS keys from managed zone %s", data.ManagedZone.ValueString())) + + clientResp, err := d.client.DnsKeys.List(data.Project.ValueString(), data.ManagedZone.ValueString()).Do() + if err != nil { + if !transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + resp.Diagnostics.AddError(fmt.Sprintf("Error when reading or editing dataSourceDnsKeys"), err.Error()) + } + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + return + } + + tflog.Trace(ctx, "read dns keys data source") + + zoneSigningKeys, keySigningKeys := flattenSigningKeys(ctx, clientResp.DnsKeys, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + zskObjType := types.ObjectType{}.WithAttributeTypes(getDnsKeyAttrs("zoneSigning")) + data.ZoneSigningKeys, diags = types.ListValueFrom(ctx, zskObjType, zoneSigningKeys) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + kskObjType := types.ObjectType{}.WithAttributeTypes(getDnsKeyAttrs("keySigning")) + data.KeySigningKeys, diags = types.ListValueFrom(ctx, kskObjType, keySigningKeys) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +// dnsKeyObject is a helper function for the zone_signing_keys schema and +// is also used by key_signing_keys schema (called in kskObject defined below) +func dnsKeyObject() types.ObjectType { + // See comments in Schema function + // Also: https://github.com/hashicorp/terraform-plugin-framework/issues/214#issuecomment-1194666110 + return types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "algorithm": types.StringType, + "creation_time": types.StringType, + "description": types.StringType, + "id": types.StringType, + "is_active": types.BoolType, + "key_length": types.Int64Type, + "key_tag": types.Int64Type, + "public_key": types.StringType, + "digests": types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "digest": types.StringType, + "type": types.StringType, + }, + }, + }, + }, + } +} + +// kskObject is a helper function for the key_signing_keys schema +func kskObject() types.ObjectType { + nbo := dnsKeyObject() + + nbo.AttrTypes["ds_record"] = types.StringType + + return nbo +} + +func flattenSigningKeys(ctx context.Context, signingKeys []*dns.DnsKey, diags *diag.Diagnostics) ([]types.Object, []types.Object) { + var zoneSigningKeys []types.Object + var keySigningKeys []types.Object + var d diag.Diagnostics + + for _, signingKey := range signingKeys { + if signingKey != nil { + var digests []types.Object + for _, dig := range signingKey.Digests { + digest := GoogleZoneSigningKeyDigest{ + Digest: types.StringValue(dig.Digest), + Type: types.StringValue(dig.Type), + } + obj, d := types.ObjectValueFrom(ctx, digestAttrTypes, digest) + diags.Append(d...) + if diags.HasError() { + return zoneSigningKeys, keySigningKeys + } + + digests = append(digests, obj) + } + + if signingKey.Type == "keySigning" && len(signingKey.Digests) > 0 { + ksk := GoogleKeySigningKey{ + Algorithm: types.StringValue(signingKey.Algorithm), + CreationTime: types.StringValue(signingKey.CreationTime), + Description: types.StringValue(signingKey.Description), + Id: types.StringValue(signingKey.Id), + IsActive: types.BoolValue(signingKey.IsActive), + KeyLength: types.Int64Value(signingKey.KeyLength), + KeyTag: types.Int64Value(signingKey.KeyTag), + PublicKey: types.StringValue(signingKey.PublicKey), + } + + objType := types.ObjectType{}.WithAttributeTypes(digestAttrTypes) + ksk.Digests, d = types.ListValueFrom(ctx, objType, digests) + diags.Append(d...) + if diags.HasError() { + return zoneSigningKeys, keySigningKeys + } + + dsRecord, err := generateDSRecord(signingKey) + if err != nil { + diags.AddError("error generating ds record", err.Error()) + return zoneSigningKeys, keySigningKeys + } + + ksk.DSRecord = types.StringValue(dsRecord) + + obj, d := types.ObjectValueFrom(ctx, getDnsKeyAttrs(signingKey.Type), ksk) + diags.Append(d...) + if diags.HasError() { + return zoneSigningKeys, keySigningKeys + } + keySigningKeys = append(keySigningKeys, obj) + } else { + zsk := GoogleZoneSigningKey{ + Algorithm: types.StringValue(signingKey.Algorithm), + CreationTime: types.StringValue(signingKey.CreationTime), + Description: types.StringValue(signingKey.Description), + Id: types.StringValue(signingKey.Id), + IsActive: types.BoolValue(signingKey.IsActive), + KeyLength: types.Int64Value(signingKey.KeyLength), + KeyTag: types.Int64Value(signingKey.KeyTag), + PublicKey: types.StringValue(signingKey.PublicKey), + } + + objType := types.ObjectType{}.WithAttributeTypes(digestAttrTypes) + zsk.Digests, d = types.ListValueFrom(ctx, objType, digests) + diags.Append(d...) + if diags.HasError() { + return zoneSigningKeys, keySigningKeys + } + + obj, d := types.ObjectValueFrom(ctx, getDnsKeyAttrs("zoneSigning"), zsk) + diags.Append(d...) + if diags.HasError() { + return zoneSigningKeys, keySigningKeys + } + zoneSigningKeys = append(zoneSigningKeys, obj) + } + + } + } + + return zoneSigningKeys, keySigningKeys +} + +// DNSSEC Algorithm Numbers: https://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml +// The following are algorithms that are supported by Cloud DNS +var dnssecAlgoNums = map[string]int{ + "rsasha1": 5, + "rsasha256": 8, + "rsasha512": 10, + "ecdsap256sha256": 13, + "ecdsap384sha384": 14, +} + +// DS RR Digest Types: https://www.iana.org/assignments/ds-rr-types/ds-rr-types.xhtml +// The following are digests that are supported by Cloud DNS +var dnssecDigestType = map[string]int{ + "sha1": 1, + "sha256": 2, + "sha384": 4, +} + +// generateDSRecord will generate the ds_record on key signing keys +func generateDSRecord(signingKey *dns.DnsKey) (string, error) { + algoNum, found := dnssecAlgoNums[signingKey.Algorithm] + if !found { + return "", fmt.Errorf("DNSSEC Algorithm number for %s not found", signingKey.Algorithm) + } + + digestType, found := dnssecDigestType[signingKey.Digests[0].Type] + if !found { + return "", fmt.Errorf("DNSSEC Digest type for %s not found", signingKey.Digests[0].Type) + } + + return fmt.Sprintf("%d %d %d %s", + signingKey.KeyTag, + algoNum, + digestType, + signingKey.Digests[0].Digest), nil +} + +func getDnsKeyAttrs(keyType string) map[string]attr.Type { + dnsKeyAttrs := map[string]attr.Type{ + "algorithm": types.StringType, + "creation_time": types.StringType, + "description": types.StringType, + "id": types.StringType, + "is_active": types.BoolType, + "key_length": types.Int64Type, + "key_tag": types.Int64Type, + "public_key": types.StringType, + "digests": types.ListType{}.WithElementType(types.ObjectType{}.WithAttributeTypes(digestAttrTypes)), + } + + if keyType == "keySigning" { + dnsKeyAttrs["ds_record"] = types.StringType + } + + return dnsKeyAttrs +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_managed_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_managed_zone.go new file mode 100644 index 0000000000..f80c5c2679 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_managed_zone.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dns + +import ( + "context" + "fmt" + + "google.golang.org/api/dns/v1" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ datasource.DataSource = &GoogleDnsManagedZoneDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleDnsManagedZoneDataSource{} +) + +func NewGoogleDnsManagedZoneDataSource() datasource.DataSource { + return &GoogleDnsManagedZoneDataSource{} +} + +// GoogleDnsManagedZoneDataSource defines the data source implementation +type GoogleDnsManagedZoneDataSource struct { + client *dns.Service + project types.String +} + +type GoogleDnsManagedZoneModel struct { + Id types.String `tfsdk:"id"` + DnsName types.String `tfsdk:"dns_name"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + ManagedZoneId types.Int64 `tfsdk:"managed_zone_id"` + NameServers types.List `tfsdk:"name_servers"` + Visibility types.String `tfsdk:"visibility"` + Project types.String `tfsdk:"project"` +} + +func (d *GoogleDnsManagedZoneDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dns_managed_zone" +} + +func (d *GoogleDnsManagedZoneDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Provides access to a zone's attributes within Google Cloud DNS", + + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "A unique name for the resource.", + MarkdownDescription: "A unique name for the resource.", + Required: true, + }, + + // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. + "project": schema.StringAttribute{ + Description: "The ID of the project for the Google Cloud.", + MarkdownDescription: "The ID of the project for the Google Cloud.", + Optional: true, + }, + + "dns_name": schema.StringAttribute{ + Description: "The fully qualified DNS name of this zone.", + MarkdownDescription: "The fully qualified DNS name of this zone.", + Computed: true, + }, + + "description": schema.StringAttribute{ + Description: "A textual description field.", + MarkdownDescription: "A textual description field.", + Computed: true, + }, + + "managed_zone_id": schema.Int64Attribute{ + Description: "Unique identifier for the resource; defined by the server.", + MarkdownDescription: "Unique identifier for the resource; defined by the server.", + Computed: true, + }, + + "name_servers": schema.ListAttribute{ + Description: "The list of nameservers that will be authoritative for this " + + "domain. Use NS records to redirect from your DNS provider to these names, " + + "thus making Google Cloud DNS authoritative for this zone.", + MarkdownDescription: "The list of nameservers that will be authoritative for this " + + "domain. Use NS records to redirect from your DNS provider to these names, " + + "thus making Google Cloud DNS authoritative for this zone.", + Computed: true, + ElementType: types.StringType, + }, + + "visibility": schema.StringAttribute{ + Description: "The zone's visibility: public zones are exposed to the Internet, " + + "while private zones are visible only to Virtual Private Cloud resources.", + MarkdownDescription: "The zone's visibility: public zones are exposed to the Internet, " + + "while private zones are visible only to Virtual Private Cloud resources.", + Computed: true, + }, + + "id": schema.StringAttribute{ + Description: "DNS managed zone identifier", + MarkdownDescription: "DNS managed zone identifier", + Computed: true, + }, + }, + } +} + +func (d *GoogleDnsManagedZoneDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + d.client = p.NewDnsClient(p.UserAgent, &resp.Diagnostics) + d.project = p.Project +} + +func (d *GoogleDnsManagedZoneDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleDnsManagedZoneModel + var metaData *fwmodels.ProviderMetaModel + var diags diag.Diagnostics + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + d.client.UserAgent = fwtransport.GenerateFrameworkUserAgentString(metaData, d.client.UserAgent) + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + data.Project = fwresource.GetProjectFramework(data.Project, d.project, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + data.Id = types.StringValue(fmt.Sprintf("projects/%s/managedZones/%s", data.Project.ValueString(), data.Name.ValueString())) + clientResp, err := d.client.ManagedZones.Get(data.Project.ValueString(), data.Name.ValueString()).Do() + if err != nil { + fwtransport.HandleDatasourceNotFoundError(ctx, err, &resp.State, fmt.Sprintf("dataSourceDnsManagedZone %q", data.Name.ValueString()), &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + } + + tflog.Trace(ctx, "read dns record set data source") + + data.DnsName = types.StringValue(clientResp.DnsName) + data.Description = types.StringValue(clientResp.Description) + data.ManagedZoneId = types.Int64Value(int64(clientResp.Id)) + data.Visibility = types.StringValue(clientResp.Visibility) + data.NameServers, diags = types.ListValueFrom(ctx, types.StringType, clientResp.NameServers) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_record_set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_record_set.go new file mode 100644 index 0000000000..469c43666c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/data_source_dns_record_set.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dns + +import ( + "context" + "fmt" + + "google.golang.org/api/dns/v1" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ datasource.DataSource = &GoogleDnsRecordSetDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleDnsRecordSetDataSource{} +) + +func NewGoogleDnsRecordSetDataSource() datasource.DataSource { + return &GoogleDnsRecordSetDataSource{} +} + +// GoogleDnsRecordSetDataSource defines the data source implementation +type GoogleDnsRecordSetDataSource struct { + client *dns.Service + project types.String +} + +type GoogleDnsRecordSetModel struct { + Id types.String `tfsdk:"id"` + ManagedZone types.String `tfsdk:"managed_zone"` + Name types.String `tfsdk:"name"` + Rrdatas types.List `tfsdk:"rrdatas"` + Ttl types.Int64 `tfsdk:"ttl"` + Type types.String `tfsdk:"type"` + Project types.String `tfsdk:"project"` +} + +func (d *GoogleDnsRecordSetDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dns_record_set" +} + +func (d *GoogleDnsRecordSetDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "A DNS record set within Google Cloud DNS", + + Attributes: map[string]schema.Attribute{ + "managed_zone": schema.StringAttribute{ + MarkdownDescription: "The Name of the zone.", + Required: true, + }, + "name": schema.StringAttribute{ + MarkdownDescription: "The DNS name for the resource.", + Required: true, + }, + "type": schema.StringAttribute{ + MarkdownDescription: "The identifier of a supported record type. See the list of Supported DNS record types.", + Required: true, + }, + "project": schema.StringAttribute{ + MarkdownDescription: "The ID of the project for the Google Cloud.", + Optional: true, + }, + "rrdatas": schema.ListAttribute{ + MarkdownDescription: "The string data for the records in this record set.", + Computed: true, + ElementType: types.StringType, + }, + "ttl": schema.Int64Attribute{ + MarkdownDescription: "The time-to-live of this record set (seconds).", + Computed: true, + }, + "id": schema.StringAttribute{ + MarkdownDescription: "DNS record set identifier", + Computed: true, + }, + }, + } +} + +func (d *GoogleDnsRecordSetDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + d.client = p.NewDnsClient(p.UserAgent, &resp.Diagnostics) + d.project = p.Project +} + +func (d *GoogleDnsRecordSetDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleDnsRecordSetModel + var metaData *fwmodels.ProviderMetaModel + var diags diag.Diagnostics + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + d.client.UserAgent = fwtransport.GenerateFrameworkUserAgentString(metaData, d.client.UserAgent) + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + data.Project = fwresource.GetProjectFramework(data.Project, d.project, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + data.Id = types.StringValue(fmt.Sprintf("projects/%s/managedZones/%s/rrsets/%s/%s", data.Project.ValueString(), data.ManagedZone.ValueString(), data.Name.ValueString(), data.Type.ValueString())) + clientResp, err := d.client.ResourceRecordSets.List(data.Project.ValueString(), data.ManagedZone.ValueString()).Name(data.Name.ValueString()).Type(data.Type.ValueString()).Do() + if err != nil { + fwtransport.HandleDatasourceNotFoundError(ctx, err, &resp.State, fmt.Sprintf("dataSourceDnsRecordSet %q", data.Name.ValueString()), &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + } + if len(clientResp.Rrsets) != 1 { + resp.Diagnostics.AddError("only expected 1 record set", fmt.Sprintf("%d record sets were returned", len(clientResp.Rrsets))) + } + + tflog.Trace(ctx, "read dns record set data source") + + data.Type = types.StringValue(clientResp.Rrsets[0].Type) + data.Ttl = types.Int64Value(clientResp.Rrsets[0].Ttl) + data.Rrdatas, diags = types.ListValueFrom(ctx, types.StringType, clientResp.Rrsets[0].Rrdatas) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dns_change.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/dns_change.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dns_change.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/dns_change.go index 9afcf21bc4..37c7a3a654 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dns_change.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/dns_change.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dns import ( "time" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/iam_dns_managed_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/iam_dns_managed_zone.go new file mode 100644 index 0000000000..e93a2d1ee3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/iam_dns_managed_zone.go @@ -0,0 +1,221 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dns + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var DNSManagedZoneIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "managed_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type DNSManagedZoneIamUpdater struct { + project string + managedZone string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func DNSManagedZoneIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("managed_zone"); ok { + values["managed_zone"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/managedZones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("managed_zone").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &DNSManagedZoneIamUpdater{ + project: values["project"], + managedZone: values["managed_zone"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("managed_zone", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting managed_zone: %s", err) + } + + return u, nil +} + +func DNSManagedZoneIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/managedZones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &DNSManagedZoneIamUpdater{ + project: values["project"], + managedZone: values["managed_zone"], + d: d, + Config: config, + } + if err := d.Set("managed_zone", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting managed_zone: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *DNSManagedZoneIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyManagedZoneUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *DNSManagedZoneIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyManagedZoneUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DNSManagedZoneIamUpdater) qualifyManagedZoneUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{DNSBasePath}}%s:%s", fmt.Sprintf("projects/%s/managedZones/%s", u.project, u.managedZone), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *DNSManagedZoneIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/managedZones/%s", u.project, u.managedZone) +} + +func (u *DNSManagedZoneIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dns-managedzone-%s", u.GetResourceId()) +} + +func (u *DNSManagedZoneIamUpdater) DescribeResource() string { + return fmt.Sprintf("dns managedzone %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_managed_zone.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_managed_zone.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_managed_zone.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_managed_zone.go index 60d4ebcc5f..73a846014d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_managed_zone.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_managed_zone.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package dns import ( "bytes" @@ -25,6 +28,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/dns/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceDNSManagedZone() *schema.Resource { @@ -101,7 +108,7 @@ default_key_specs can only be updated when the state is 'off'.`, "algorithm": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"ecdsap256sha256", "ecdsap384sha384", "rsasha1", "rsasha256", "rsasha512", ""}), + ValidateFunc: verify.ValidateEnum([]string{"ecdsap256sha256", "ecdsap384sha384", "rsasha1", "rsasha256", "rsasha512", ""}), Description: `String mnemonic specifying the DNSSEC algorithm of this key Possible values: ["ecdsap256sha256", "ecdsap384sha384", "rsasha1", "rsasha256", "rsasha512"]`, }, "key_length": { @@ -112,7 +119,7 @@ default_key_specs can only be updated when the state is 'off'.`, "key_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"keySigning", "zoneSigning", ""}), + ValidateFunc: verify.ValidateEnum([]string{"keySigning", "zoneSigning", ""}), Description: `Specifies whether this is a key signing key (KSK) or a zone signing key (ZSK). Key signing keys have the Secure Entry Point flag set and, when active, will only be used to sign @@ -141,7 +148,7 @@ to sign all other types of resource record sets. Possible values: ["keySigning", Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"nsec", "nsec3", ""}), + ValidateFunc: verify.ValidateEnum([]string{"nsec", "nsec3", ""}), Description: `Specifies the mechanism used to provide authenticated denial-of-existence responses. non_existence can only be updated when the state is 'off'. Possible values: ["nsec", "nsec3"]`, AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, @@ -149,7 +156,7 @@ non_existence can only be updated when the state is 'off'. Possible values: ["ns "state": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"off", "on", "transfer", ""}), + ValidateFunc: verify.ValidateEnum([]string{"off", "on", "transfer", ""}), Description: `Specifies whether DNSSEC is enabled, and what mode it is in Possible values: ["off", "on", "transfer"]`, AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, }, @@ -175,11 +182,11 @@ one target is given.`, Set: func(v interface{}) int { raw := v.(map[string]interface{}) if address, ok := raw["ipv4_address"]; ok { - hashcode(address.(string)) + tpgresource.Hashcode(address.(string)) } var buf bytes.Buffer schema.SerializeResourceForHash(&buf, raw, dnsManagedZoneForwardingConfigTargetNameServersSchema()) - return hashcode(buf.String()) + return tpgresource.Hashcode(buf.String()) }, }, }, @@ -209,7 +216,7 @@ zone. The value of this field contains the network to peer with.`, "network_url": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The id or fully qualified URL of the VPC network to forward queries to. This should be formatted like 'projects/{project}/global/networks/{network}' or 'https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}'`, @@ -243,11 +250,11 @@ blocks in an update and then apply another update adding all of them back simult } raw := v.(map[string]interface{}) if url, ok := raw["network_url"]; ok { - return selfLinkNameHash(url) + return tpgresource.SelfLinkNameHash(url) } var buf bytes.Buffer schema.SerializeResourceForHash(&buf, raw, dnsManagedZonePrivateVisibilityConfigNetworksSchema()) - return hashcode(buf.String()) + return tpgresource.Hashcode(buf.String()) }, }, "gke_clusters": { @@ -259,8 +266,8 @@ blocks in an update and then apply another update adding all of them back simult "gke_cluster_name": { Type: schema.TypeString, Required: true, - Description: `The resource name of the cluster to bind this ManagedZone to. -This should be specified in the format like + Description: `The resource name of the cluster to bind this ManagedZone to. +This should be specified in the format like 'projects/*/locations/*/clusters/*'`, }, }, @@ -273,8 +280,8 @@ This should be specified in the format like Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"private", "public", ""}), - DiffSuppressFunc: caseDiffSuppress, + ValidateFunc: verify.ValidateEnum([]string{"private", "public", ""}), + DiffSuppressFunc: tpgresource.CaseDiffSuppress, Description: `The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources. Default value: "public" Possible values: ["private", "public"]`, Default: "public", @@ -322,7 +329,7 @@ func dnsManagedZonePrivateVisibilityConfigNetworksSchema() *schema.Resource { "network_url": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The id or fully qualified URL of the VPC network to bind to. This should be formatted like 'projects/{project}/global/networks/{network}' or 'https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}'`, @@ -342,7 +349,7 @@ func dnsManagedZoneForwardingConfigTargetNameServersSchema() *schema.Resource { "forwarding_path": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"default", "private", ""}), + ValidateFunc: verify.ValidateEnum([]string{"default", "private", ""}), Description: `Forwarding path for this TargetNameServer. If unset or 'default' Cloud DNS will make forwarding decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go to the Internet. When set to 'private', Cloud DNS will always send queries through VPC for this target Possible values: ["default", "private"]`, @@ -352,8 +359,8 @@ to the Internet. When set to 'private', Cloud DNS will always send queries throu } func resourceDNSManagedZoneCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -362,37 +369,37 @@ func resourceDNSManagedZoneCreate(d *schema.ResourceData, meta interface{}) erro descriptionProp, err := expandDNSManagedZoneDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } dnsNameProp, err := expandDNSManagedZoneDnsName(d.Get("dns_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("dns_name"); !isEmptyValue(reflect.ValueOf(dnsNameProp)) && (ok || !reflect.DeepEqual(v, dnsNameProp)) { + } else if v, ok := d.GetOkExists("dns_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(dnsNameProp)) && (ok || !reflect.DeepEqual(v, dnsNameProp)) { obj["dnsName"] = dnsNameProp } dnssecConfigProp, err := expandDNSManagedZoneDnssecConfig(d.Get("dnssec_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("dnssec_config"); !isEmptyValue(reflect.ValueOf(dnssecConfigProp)) && (ok || !reflect.DeepEqual(v, dnssecConfigProp)) { + } else if v, ok := d.GetOkExists("dnssec_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(dnssecConfigProp)) && (ok || !reflect.DeepEqual(v, dnssecConfigProp)) { obj["dnssecConfig"] = dnssecConfigProp } nameProp, err := expandDNSManagedZoneName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } labelsProp, err := expandDNSManagedZoneLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } visibilityProp, err := expandDNSManagedZoneVisibility(d.Get("visibility"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("visibility"); !isEmptyValue(reflect.ValueOf(visibilityProp)) && (ok || !reflect.DeepEqual(v, visibilityProp)) { + } else if v, ok := d.GetOkExists("visibility"); !tpgresource.IsEmptyValue(reflect.ValueOf(visibilityProp)) && (ok || !reflect.DeepEqual(v, visibilityProp)) { obj["visibility"] = visibilityProp } privateVisibilityConfigProp, err := expandDNSManagedZonePrivateVisibilityConfig(d.Get("private_visibility_config"), d, config) @@ -404,23 +411,23 @@ func resourceDNSManagedZoneCreate(d *schema.ResourceData, meta interface{}) erro forwardingConfigProp, err := expandDNSManagedZoneForwardingConfig(d.Get("forwarding_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("forwarding_config"); !isEmptyValue(reflect.ValueOf(forwardingConfigProp)) && (ok || !reflect.DeepEqual(v, forwardingConfigProp)) { + } else if v, ok := d.GetOkExists("forwarding_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(forwardingConfigProp)) && (ok || !reflect.DeepEqual(v, forwardingConfigProp)) { obj["forwardingConfig"] = forwardingConfigProp } peeringConfigProp, err := expandDNSManagedZonePeeringConfig(d.Get("peering_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("peering_config"); !isEmptyValue(reflect.ValueOf(peeringConfigProp)) && (ok || !reflect.DeepEqual(v, peeringConfigProp)) { + } else if v, ok := d.GetOkExists("peering_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(peeringConfigProp)) && (ok || !reflect.DeepEqual(v, peeringConfigProp)) { obj["peeringConfig"] = peeringConfigProp } cloudLoggingConfigProp, err := expandDNSManagedZoneCloudLoggingConfig(d.Get("cloud_logging_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("cloud_logging_config"); !isEmptyValue(reflect.ValueOf(cloudLoggingConfigProp)) && (ok || !reflect.DeepEqual(v, cloudLoggingConfigProp)) { + } else if v, ok := d.GetOkExists("cloud_logging_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(cloudLoggingConfigProp)) && (ok || !reflect.DeepEqual(v, cloudLoggingConfigProp)) { obj["cloudLoggingConfig"] = cloudLoggingConfigProp } - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones") + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones") if err != nil { return err } @@ -428,24 +435,32 @@ func resourceDNSManagedZoneCreate(d *schema.ResourceData, meta interface{}) erro log.Printf("[DEBUG] Creating new ManagedZone: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ManagedZone: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating ManagedZone: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/managedZones/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/managedZones/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -457,33 +472,39 @@ func resourceDNSManagedZoneCreate(d *schema.ResourceData, meta interface{}) erro } func resourceDNSManagedZoneRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ManagedZone: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DNSManagedZone %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DNSManagedZone %q", d.Id())) } // Explicitly set virtual fields to default values if unset @@ -540,15 +561,15 @@ func resourceDNSManagedZoneRead(d *schema.ResourceData, meta interface{}) error } func resourceDNSManagedZoneUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ManagedZone: %s", err) } @@ -558,37 +579,37 @@ func resourceDNSManagedZoneUpdate(d *schema.ResourceData, meta interface{}) erro descriptionProp, err := expandDNSManagedZoneDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } dnsNameProp, err := expandDNSManagedZoneDnsName(d.Get("dns_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("dns_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dnsNameProp)) { + } else if v, ok := d.GetOkExists("dns_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dnsNameProp)) { obj["dnsName"] = dnsNameProp } dnssecConfigProp, err := expandDNSManagedZoneDnssecConfig(d.Get("dnssec_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("dnssec_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dnssecConfigProp)) { + } else if v, ok := d.GetOkExists("dnssec_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dnssecConfigProp)) { obj["dnssecConfig"] = dnssecConfigProp } nameProp, err := expandDNSManagedZoneName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } labelsProp, err := expandDNSManagedZoneLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } visibilityProp, err := expandDNSManagedZoneVisibility(d.Get("visibility"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("visibility"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, visibilityProp)) { + } else if v, ok := d.GetOkExists("visibility"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, visibilityProp)) { obj["visibility"] = visibilityProp } privateVisibilityConfigProp, err := expandDNSManagedZonePrivateVisibilityConfig(d.Get("private_visibility_config"), d, config) @@ -600,19 +621,19 @@ func resourceDNSManagedZoneUpdate(d *schema.ResourceData, meta interface{}) erro forwardingConfigProp, err := expandDNSManagedZoneForwardingConfig(d.Get("forwarding_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("forwarding_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, forwardingConfigProp)) { + } else if v, ok := d.GetOkExists("forwarding_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, forwardingConfigProp)) { obj["forwardingConfig"] = forwardingConfigProp } peeringConfigProp, err := expandDNSManagedZonePeeringConfig(d.Get("peering_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("peering_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peeringConfigProp)) { + } else if v, ok := d.GetOkExists("peering_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peeringConfigProp)) { obj["peeringConfig"] = peeringConfigProp } cloudLoggingConfigProp, err := expandDNSManagedZoneCloudLoggingConfig(d.Get("cloud_logging_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("cloud_logging_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cloudLoggingConfigProp)) { + } else if v, ok := d.GetOkExists("cloud_logging_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cloudLoggingConfigProp)) { obj["cloudLoggingConfig"] = cloudLoggingConfigProp } @@ -621,7 +642,7 @@ func resourceDNSManagedZoneUpdate(d *schema.ResourceData, meta interface{}) erro return err } - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") if err != nil { return err } @@ -629,11 +650,19 @@ func resourceDNSManagedZoneUpdate(d *schema.ResourceData, meta interface{}) erro log.Printf("[DEBUG] Updating ManagedZone %q: %#v", d.Id(), obj) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating ManagedZone %q: %s", d.Id(), err) @@ -645,21 +674,21 @@ func resourceDNSManagedZoneUpdate(d *schema.ResourceData, meta interface{}) erro } func resourceDNSManagedZoneDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ManagedZone: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") if err != nil { return err } @@ -738,13 +767,21 @@ func resourceDNSManagedZoneDelete(d *schema.ResourceData, meta interface{}) erro log.Printf("[DEBUG] Deleting ManagedZone %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "ManagedZone") + return transport_tpg.HandleNotFoundError(err, d, "ManagedZone") } log.Printf("[DEBUG] Finished deleting ManagedZone %q: %#v", d.Id(), res) @@ -752,8 +789,8 @@ func resourceDNSManagedZoneDelete(d *schema.ResourceData, meta interface{}) erro } func resourceDNSManagedZoneImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/managedZones/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -762,7 +799,7 @@ func resourceDNSManagedZoneImport(d *schema.ResourceData, meta interface{}) ([]* } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/managedZones/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/managedZones/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -776,15 +813,15 @@ func resourceDNSManagedZoneImport(d *schema.ResourceData, meta interface{}) ([]* return []*schema.ResourceData{d}, nil } -func flattenDNSManagedZoneDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneDnsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneDnssecConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -803,19 +840,19 @@ func flattenDNSManagedZoneDnssecConfig(v interface{}, d *schema.ResourceData, co flattenDNSManagedZoneDnssecConfigDefaultKeySpecs(original["defaultKeySpecs"], d, config) return []interface{}{transformed} } -func flattenDNSManagedZoneDnssecConfigKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfigKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneDnssecConfigNonExistence(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfigNonExistence(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneDnssecConfigState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfigState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -836,14 +873,14 @@ func flattenDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d *schema.R } return transformed } -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -857,18 +894,18 @@ func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(v interface{}, d return v // let terraform core handle it otherwise } -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneManagedZoneID(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneManagedZoneID(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -882,31 +919,31 @@ func flattenDNSManagedZoneManagedZoneID(v interface{}, d *schema.ResourceData, c return v // let terraform core handle it otherwise } -func flattenDNSManagedZoneName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneNameServers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneNameServers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneCreationTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneCreationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneVisibility(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenDNSManagedZoneVisibility(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "public" } return v } -func flattenDNSManagedZonePrivateVisibilityConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZonePrivateVisibilityConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -921,7 +958,7 @@ func flattenDNSManagedZonePrivateVisibilityConfig(v interface{}, d *schema.Resou flattenDNSManagedZonePrivateVisibilityConfigNetworks(original["networks"], d, config) return []interface{}{transformed} } -func flattenDNSManagedZonePrivateVisibilityConfigGkeClusters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZonePrivateVisibilityConfigGkeClusters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -939,11 +976,11 @@ func flattenDNSManagedZonePrivateVisibilityConfigGkeClusters(v interface{}, d *s } return transformed } -func flattenDNSManagedZonePrivateVisibilityConfigGkeClustersGkeClusterName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZonePrivateVisibilityConfigGkeClustersGkeClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -954,11 +991,11 @@ func flattenDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d *sche } raw := v.(map[string]interface{}) if url, ok := raw["network_url"]; ok { - return selfLinkNameHash(url) + return tpgresource.SelfLinkNameHash(url) } var buf bytes.Buffer schema.SerializeResourceForHash(&buf, raw, dnsManagedZonePrivateVisibilityConfigNetworksSchema()) - return hashcode(buf.String()) + return tpgresource.Hashcode(buf.String()) }, []interface{}{}) for _, raw := range l { original := raw.(map[string]interface{}) @@ -972,11 +1009,11 @@ func flattenDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d *sche } return transformed } -func flattenDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneForwardingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneForwardingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -989,7 +1026,7 @@ func flattenDNSManagedZoneForwardingConfig(v interface{}, d *schema.ResourceData flattenDNSManagedZoneForwardingConfigTargetNameServers(original["targetNameServers"], d, config) return []interface{}{transformed} } -func flattenDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -997,11 +1034,11 @@ func flattenDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d *sc transformed := schema.NewSet(func(v interface{}) int { raw := v.(map[string]interface{}) if address, ok := raw["ipv4_address"]; ok { - hashcode(address.(string)) + tpgresource.Hashcode(address.(string)) } var buf bytes.Buffer schema.SerializeResourceForHash(&buf, raw, dnsManagedZoneForwardingConfigTargetNameServersSchema()) - return hashcode(buf.String()) + return tpgresource.Hashcode(buf.String()) }, []interface{}{}) for _, raw := range l { original := raw.(map[string]interface{}) @@ -1016,15 +1053,15 @@ func flattenDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d *sc } return transformed } -func flattenDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZonePeeringConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZonePeeringConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1037,7 +1074,7 @@ func flattenDNSManagedZonePeeringConfig(v interface{}, d *schema.ResourceData, c flattenDNSManagedZonePeeringConfigTargetNetwork(original["targetNetwork"], d, config) return []interface{}{transformed} } -func flattenDNSManagedZonePeeringConfigTargetNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZonePeeringConfigTargetNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1050,11 +1087,11 @@ func flattenDNSManagedZonePeeringConfigTargetNetwork(v interface{}, d *schema.Re flattenDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(original["networkUrl"], d, config) return []interface{}{transformed} } -func flattenDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenDNSManagedZoneCloudLoggingConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneCloudLoggingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1067,19 +1104,19 @@ func flattenDNSManagedZoneCloudLoggingConfig(v interface{}, d *schema.ResourceDa flattenDNSManagedZoneCloudLoggingConfigEnableLogging(original["enableLogging"], d, config) return []interface{}{transformed} } -func flattenDNSManagedZoneCloudLoggingConfigEnableLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenDNSManagedZoneCloudLoggingConfigEnableLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandDNSManagedZoneDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneDnsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneDnssecConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1091,47 +1128,47 @@ func expandDNSManagedZoneDnssecConfig(v interface{}, d TerraformResourceData, co transformedKind, err := expandDNSManagedZoneDnssecConfigKind(original["kind"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKind); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKind); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kind"] = transformedKind } transformedNonExistence, err := expandDNSManagedZoneDnssecConfigNonExistence(original["non_existence"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNonExistence); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNonExistence); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nonExistence"] = transformedNonExistence } transformedState, err := expandDNSManagedZoneDnssecConfigState(original["state"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["state"] = transformedState } transformedDefaultKeySpecs, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecs(original["default_key_specs"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultKeySpecs); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultKeySpecs); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultKeySpecs"] = transformedDefaultKeySpecs } return transformed, nil } -func expandDNSManagedZoneDnssecConfigKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfigKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneDnssecConfigNonExistence(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfigNonExistence(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneDnssecConfigState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfigState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1144,28 +1181,28 @@ func expandDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d TerraformR transformedAlgorithm, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(original["algorithm"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["algorithm"] = transformedAlgorithm } transformedKeyLength, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(original["key_length"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKeyLength); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKeyLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["keyLength"] = transformedKeyLength } transformedKeyType, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(original["key_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKeyType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKeyType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["keyType"] = transformedKeyType } transformedKind, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecsKind(original["kind"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKind); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKind); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kind"] = transformedKind } @@ -1174,27 +1211,27 @@ func expandDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d TerraformR return req, nil } -func expandDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandDNSManagedZoneLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1205,11 +1242,11 @@ func expandDNSManagedZoneLabels(v interface{}, d TerraformResourceData, config * return m, nil } -func expandDNSManagedZoneVisibility(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneVisibility(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZonePrivateVisibilityConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZonePrivateVisibilityConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { // The API won't remove the the field unless an empty network array is sent. @@ -1225,21 +1262,21 @@ func expandDNSManagedZonePrivateVisibilityConfig(v interface{}, d TerraformResou transformedGkeClusters, err := expandDNSManagedZonePrivateVisibilityConfigGkeClusters(original["gke_clusters"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGkeClusters); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGkeClusters); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gkeClusters"] = transformedGkeClusters } transformedNetworks, err := expandDNSManagedZonePrivateVisibilityConfigNetworks(original["networks"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNetworks); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNetworks); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["networks"] = transformedNetworks } return transformed, nil } -func expandDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1253,7 +1290,7 @@ func expandDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d Terraf transformedNetworkUrl, err := expandDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(original["network_url"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["networkUrl"] = transformedNetworkUrl } @@ -1262,7 +1299,7 @@ func expandDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d Terraf return req, nil } -func expandDNSManagedZonePrivateVisibilityConfigGkeClusters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZonePrivateVisibilityConfigGkeClusters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1275,7 +1312,7 @@ func expandDNSManagedZonePrivateVisibilityConfigGkeClusters(v interface{}, d Ter transformedGkeClusterName, err := expandDNSManagedZonePrivateVisibilityConfigGkeClustersGkeClusterName(original["gke_cluster_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGkeClusterName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGkeClusterName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gkeClusterName"] = transformedGkeClusterName } @@ -1284,24 +1321,24 @@ func expandDNSManagedZonePrivateVisibilityConfigGkeClusters(v interface{}, d Ter return req, nil } -func expandDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil || v.(string) == "" { return "", nil } else if strings.HasPrefix(v.(string), "https://") { return v, nil } - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } - return ConvertSelfLinkToV1(url), nil + return tpgresource.ConvertSelfLinkToV1(url), nil } -func expandDNSManagedZonePrivateVisibilityConfigGkeClustersGkeClusterName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZonePrivateVisibilityConfigGkeClustersGkeClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneForwardingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneForwardingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1313,14 +1350,14 @@ func expandDNSManagedZoneForwardingConfig(v interface{}, d TerraformResourceData transformedTargetNameServers, err := expandDNSManagedZoneForwardingConfigTargetNameServers(original["target_name_servers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetNameServers); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetNameServers); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetNameServers"] = transformedTargetNameServers } return transformed, nil } -func expandDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() l := v.([]interface{}) req := make([]interface{}, 0, len(l)) @@ -1334,14 +1371,14 @@ func expandDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d Terr transformedIpv4Address, err := expandDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(original["ipv4_address"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIpv4Address); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIpv4Address); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ipv4Address"] = transformedIpv4Address } transformedForwardingPath, err := expandDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(original["forwarding_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedForwardingPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedForwardingPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["forwardingPath"] = transformedForwardingPath } @@ -1350,15 +1387,15 @@ func expandDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d Terr return req, nil } -func expandDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandDNSManagedZonePeeringConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZonePeeringConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1370,14 +1407,14 @@ func expandDNSManagedZonePeeringConfig(v interface{}, d TerraformResourceData, c transformedTargetNetwork, err := expandDNSManagedZonePeeringConfigTargetNetwork(original["target_network"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTargetNetwork); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTargetNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["targetNetwork"] = transformedTargetNetwork } return transformed, nil } -func expandDNSManagedZonePeeringConfigTargetNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZonePeeringConfigTargetNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1389,27 +1426,27 @@ func expandDNSManagedZonePeeringConfigTargetNetwork(v interface{}, d TerraformRe transformedNetworkUrl, err := expandDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(original["network_url"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["networkUrl"] = transformedNetworkUrl } return transformed, nil } -func expandDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil || v.(string) == "" { return "", nil } else if strings.HasPrefix(v.(string), "https://") { return v, nil } - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } - return ConvertSelfLinkToV1(url), nil + return tpgresource.ConvertSelfLinkToV1(url), nil } -func expandDNSManagedZoneCloudLoggingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneCloudLoggingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1421,14 +1458,14 @@ func expandDNSManagedZoneCloudLoggingConfig(v interface{}, d TerraformResourceDa transformedEnableLogging, err := expandDNSManagedZoneCloudLoggingConfigEnableLogging(original["enable_logging"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableLogging); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableLogging); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableLogging"] = transformedEnableLogging } return transformed, nil } -func expandDNSManagedZoneCloudLoggingConfigEnableLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDNSManagedZoneCloudLoggingConfigEnableLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_policy.go new file mode 100644 index 0000000000..7eb3059f4a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_policy.go @@ -0,0 +1,692 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dns + +import ( + "bytes" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDNSPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceDNSPolicyCreate, + Read: resourceDNSPolicyRead, + Update: resourceDNSPolicyUpdate, + Delete: resourceDNSPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDNSPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `User assigned name for this policy.`, + }, + "alternative_name_server_config": { + Type: schema.TypeList, + Optional: true, + Description: `Sets an alternative name server for the associated networks. +When specified, all DNS queries are forwarded to a name server that you choose. +Names such as .internal are not available when an alternative name server is specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_name_servers": { + Type: schema.TypeSet, + Required: true, + Description: `Sets an alternative name server for the associated networks. When specified, +all DNS queries are forwarded to a name server that you choose. Names such as .internal +are not available when an alternative name server is specified.`, + Elem: dnsPolicyAlternativeNameServerConfigTargetNameServersSchema(), + Set: func(v interface{}) int { + raw := v.(map[string]interface{}) + if address, ok := raw["ipv4_address"]; ok { + tpgresource.Hashcode(address.(string)) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsPolicyAlternativeNameServerConfigTargetNameServersSchema()) + return tpgresource.Hashcode(buf.String()) + }, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A textual description field. Defaults to 'Managed by Terraform'.`, + Default: "Managed by Terraform", + }, + "enable_inbound_forwarding": { + Type: schema.TypeBool, + Optional: true, + Description: `Allows networks bound to this policy to receive DNS queries sent +by VMs or applications over VPN connections. When enabled, a +virtual IP address will be allocated from each of the sub-networks +that are bound to this policy.`, + }, + "enable_logging": { + Type: schema.TypeBool, + Optional: true, + Description: `Controls whether logging is enabled for the networks bound to this policy. +Defaults to no logging if not set.`, + }, + "networks": { + Type: schema.TypeSet, + Optional: true, + Description: `List of network names specifying networks to which this policy is applied.`, + Elem: dnsPolicyNetworksSchema(), + Set: func(v interface{}) int { + raw := v.(map[string]interface{}) + if url, ok := raw["network_url"]; ok { + return tpgresource.SelfLinkNameHash(url) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsPolicyNetworksSchema()) + return tpgresource.Hashcode(buf.String()) + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func dnsPolicyAlternativeNameServerConfigTargetNameServersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ipv4_address": { + Type: schema.TypeString, + Required: true, + Description: `IPv4 address to forward to.`, + }, + "forwarding_path": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"default", "private", ""}), + Description: `Forwarding path for this TargetNameServer. If unset or 'default' Cloud DNS will make forwarding +decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go +to the Internet. When set to 'private', Cloud DNS will always send queries through VPC for this target Possible values: ["default", "private"]`, + }, + }, + } +} + +func dnsPolicyNetworksSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_url": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The id or fully qualified URL of the VPC network to forward queries to. +This should be formatted like 'projects/{project}/global/networks/{network}' or +'https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}'`, + }, + }, + } +} + +func resourceDNSPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + alternativeNameServerConfigProp, err := expandDNSPolicyAlternativeNameServerConfig(d.Get("alternative_name_server_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("alternative_name_server_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(alternativeNameServerConfigProp)) && (ok || !reflect.DeepEqual(v, alternativeNameServerConfigProp)) { + obj["alternativeNameServerConfig"] = alternativeNameServerConfigProp + } + descriptionProp, err := expandDNSPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + enableInboundForwardingProp, err := expandDNSPolicyEnableInboundForwarding(d.Get("enable_inbound_forwarding"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_inbound_forwarding"); ok || !reflect.DeepEqual(v, enableInboundForwardingProp) { + obj["enableInboundForwarding"] = enableInboundForwardingProp + } + enableLoggingProp, err := expandDNSPolicyEnableLogging(d.Get("enable_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_logging"); ok || !reflect.DeepEqual(v, enableLoggingProp) { + obj["enableLogging"] = enableLoggingProp + } + nameProp, err := expandDNSPolicyName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networksProp, err := expandDNSPolicyNetworks(d.Get("networks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("networks"); !tpgresource.IsEmptyValue(reflect.ValueOf(networksProp)) && (ok || !reflect.DeepEqual(v, networksProp)) { + obj["networks"] = networksProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Policy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Policy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Policy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/policies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) + + return resourceDNSPolicyRead(d, meta) +} + +func resourceDNSPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Policy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DNSPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + + if err := d.Set("alternative_name_server_config", flattenDNSPolicyAlternativeNameServerConfig(res["alternativeNameServerConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("description", flattenDNSPolicyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("enable_inbound_forwarding", flattenDNSPolicyEnableInboundForwarding(res["enableInboundForwarding"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("enable_logging", flattenDNSPolicyEnableLogging(res["enableLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("name", flattenDNSPolicyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + if err := d.Set("networks", flattenDNSPolicyNetworks(res["networks"], d, config)); err != nil { + return fmt.Errorf("Error reading Policy: %s", err) + } + + return nil +} + +func resourceDNSPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Policy: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("alternative_name_server_config") || d.HasChange("description") || d.HasChange("enable_inbound_forwarding") || d.HasChange("enable_logging") || d.HasChange("networks") { + obj := make(map[string]interface{}) + + alternativeNameServerConfigProp, err := expandDNSPolicyAlternativeNameServerConfig(d.Get("alternative_name_server_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("alternative_name_server_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, alternativeNameServerConfigProp)) { + obj["alternativeNameServerConfig"] = alternativeNameServerConfigProp + } + descriptionProp, err := expandDNSPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + enableInboundForwardingProp, err := expandDNSPolicyEnableInboundForwarding(d.Get("enable_inbound_forwarding"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_inbound_forwarding"); ok || !reflect.DeepEqual(v, enableInboundForwardingProp) { + obj["enableInboundForwarding"] = enableInboundForwardingProp + } + enableLoggingProp, err := expandDNSPolicyEnableLogging(d.Get("enable_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_logging"); ok || !reflect.DeepEqual(v, enableLoggingProp) { + obj["enableLogging"] = enableLoggingProp + } + networksProp, err := expandDNSPolicyNetworks(d.Get("networks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("networks"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networksProp)) { + obj["networks"] = networksProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Policy %q: %#v", d.Id(), res) + } + + } + + d.Partial(false) + + return resourceDNSPolicyRead(d, meta) +} + +func resourceDNSPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Policy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + // if networks are attached, they need to be detached before the policy can be deleted + if d.Get("networks.#").(int) > 0 { + patched := make(map[string]interface{}) + patched["networks"] = nil + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: patched, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) + } + } + log.Printf("[DEBUG] Deleting Policy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Policy") + } + + log.Printf("[DEBUG] Finished deleting Policy %q: %#v", d.Id(), res) + return nil +} + +func resourceDNSPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/policies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/policies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDNSPolicyAlternativeNameServerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["target_name_servers"] = + flattenDNSPolicyAlternativeNameServerConfigTargetNameServers(original["targetNameServers"], d, config) + return []interface{}{transformed} +} +func flattenDNSPolicyAlternativeNameServerConfigTargetNameServers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(func(v interface{}) int { + raw := v.(map[string]interface{}) + if address, ok := raw["ipv4_address"]; ok { + tpgresource.Hashcode(address.(string)) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsPolicyAlternativeNameServerConfigTargetNameServersSchema()) + return tpgresource.Hashcode(buf.String()) + }, []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "ipv4_address": flattenDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(original["ipv4Address"], d, config), + "forwarding_path": flattenDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(original["forwardingPath"], d, config), + }) + } + return transformed +} +func flattenDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSPolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSPolicyEnableInboundForwarding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSPolicyEnableLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSPolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSPolicyNetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(func(v interface{}) int { + raw := v.(map[string]interface{}) + if url, ok := raw["network_url"]; ok { + return tpgresource.SelfLinkNameHash(url) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsPolicyNetworksSchema()) + return tpgresource.Hashcode(buf.String()) + }, []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "network_url": flattenDNSPolicyNetworksNetworkUrl(original["networkUrl"], d, config), + }) + } + return transformed +} +func flattenDNSPolicyNetworksNetworkUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDNSPolicyAlternativeNameServerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTargetNameServers, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServers(original["target_name_servers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetNameServers); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetNameServers"] = transformedTargetNameServers + } + + return transformed, nil +} + +func expandDNSPolicyAlternativeNameServerConfigTargetNameServers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIpv4Address, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(original["ipv4_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpv4Address); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipv4Address"] = transformedIpv4Address + } + + transformedForwardingPath, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(original["forwarding_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedForwardingPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["forwardingPath"] = transformedForwardingPath + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSPolicyEnableInboundForwarding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSPolicyEnableLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSPolicyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSPolicyNetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNetworkUrl, err := expandDNSPolicyNetworksNetworkUrl(original["network_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkUrl"] = transformedNetworkUrl + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDNSPolicyNetworksNetworkUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil || v.(string) == "" { + return "", nil + } else if strings.HasPrefix(v.(string), "https://") { + return v, nil + } + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + if err != nil { + return "", err + } + return tpgresource.ConvertSelfLinkToV1(url), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_record_set.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_record_set.go similarity index 88% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_record_set.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_record_set.go index 7c59241fa3..c5d2827ddd 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_dns_record_set.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_record_set.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package dns import ( "fmt" @@ -10,6 +12,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/dns/v1" ) @@ -23,8 +27,8 @@ func rrdatasDnsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { return false } - oList := convertStringArr(o.([]interface{})) - nList := convertStringArr(n.([]interface{})) + oList := tpgresource.ConvertStringArr(o.([]interface{})) + nList := tpgresource.ConvertStringArr(n.([]interface{})) parseFunc := func(record string) string { switch d.Get("type") { @@ -39,13 +43,13 @@ func rrdatasDnsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { return record } } - return rrdatasListDiffSuppress(oList, nList, parseFunc, d) + return RrdatasListDiffSuppress(oList, nList, parseFunc, d) } // suppress on a list when 1) its items have dups that need to be ignored // and 2) string comparison on the items may need a special parse function // example of usage can be found ../../../third_party/terraform/tests/resource_dns_record_set_test.go.erb -func rrdatasListDiffSuppress(oldList, newList []string, fun func(x string) string, _ *schema.ResourceData) bool { +func RrdatasListDiffSuppress(oldList, newList []string, fun func(x string) string, _ *schema.ResourceData) bool { // compare two lists of unordered records diff := make(map[string]bool, len(oldList)) for _, oldRecord := range oldList { @@ -84,7 +88,7 @@ func ResourceDnsRecordSet() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name of the zone in which this record set will reside.`, }, @@ -256,8 +260,8 @@ var healthCheckedTargetSchema *schema.Resource = &schema.Resource{ "load_balancer_type": { Type: schema.TypeString, Required: true, - Description: `The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb"]`, - ValidateFunc: validation.StringInSlice([]string{"regionalL4ilb"}, false), + Description: `The type of load balancer. This value is case-sensitive. Possible values: ["regionalL4ilb", "regionalL7ilb]`, + ValidateFunc: validation.StringInSlice([]string{"regionalL4ilb", "regionalL7ilb"}, false), }, "ip_address": { Type: schema.TypeString, @@ -278,7 +282,7 @@ var healthCheckedTargetSchema *schema.Resource = &schema.Resource{ "network_url": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The fully qualified url of the network in which the load balancer belongs. This should be formatted like `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`.", }, "project": { @@ -298,13 +302,13 @@ var healthCheckedTargetSchema *schema.Resource = &schema.Resource{ } func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -378,13 +382,13 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error } func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -396,14 +400,16 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { dnsType := d.Get("type").(string) var resp *dns.ResourceRecordSetsListResponse - err = retry(func() error { - var reqErr error - resp, reqErr = config.NewDnsClient(userAgent).ResourceRecordSets.List( - project, zone).Name(name).Type(dnsType).Do() - return reqErr + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + var reqErr error + resp, reqErr = config.NewDnsClient(userAgent).ResourceRecordSets.List( + project, zone).Name(name).Type(dnsType).Do() + return reqErr + }, }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DNS Record Set %q", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DNS Record Set %q", d.Get("name").(string))) } if len(resp.Rrsets) == 0 { // The resource doesn't exist anymore @@ -439,13 +445,13 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { } func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -493,7 +499,7 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] DNS Record delete request: %#v", chg) chg, err = config.NewDnsClient(userAgent).Changes.Create(project, zone, chg).Do() if err != nil { - return handleNotFoundError(err, d, "google_dns_record_set") + return transport_tpg.HandleNotFoundError(err, d, "google_dns_record_set") } w := &DnsChangeWaiter{ @@ -512,13 +518,13 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error } func resourceDnsRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -593,8 +599,8 @@ func resourceDnsRecordSetUpdate(d *schema.ResourceData, meta interface{}) error } func resourceDnsRecordSetImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/managedZones/(?P[^/]+)/rrsets/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -603,7 +609,7 @@ func resourceDnsRecordSetImportState(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/managedZones/{{managed_zone}}/rrsets/{{name}}/{{type}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/managedZones/{{managed_zone}}/rrsets/{{name}}/{{type}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -613,10 +619,10 @@ func resourceDnsRecordSetImportState(d *schema.ResourceData, meta interface{}) ( } func expandDnsRecordSetRrdata(configured []interface{}) []string { - return convertStringArr(configured) + return tpgresource.ConvertStringArr(configured) } -func expandDnsRecordSetRoutingPolicy(configured []interface{}, d TerraformResourceData, config *Config) (*dns.RRSetRoutingPolicy, error) { +func expandDnsRecordSetRoutingPolicy(configured []interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*dns.RRSetRoutingPolicy, error) { if len(configured) == 0 || configured[0] == nil { return nil, nil } @@ -664,7 +670,7 @@ func expandDnsRecordSetRoutingPolicy(configured []interface{}, d TerraformResour return nil, nil // unreachable here if ps is valid data } -func expandDnsRecordSetRoutingPolicyWrrItems(configured []interface{}, d TerraformResourceData, config *Config) ([]*dns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem, error) { +func expandDnsRecordSetRoutingPolicyWrrItems(configured []interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]*dns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem, error) { items := make([]*dns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem, 0, len(configured)) for _, raw := range configured { item, err := expandDnsRecordSetRoutingPolicyWrrItem(raw, d, config) @@ -676,20 +682,20 @@ func expandDnsRecordSetRoutingPolicyWrrItems(configured []interface{}, d Terrafo return items, nil } -func expandDnsRecordSetRoutingPolicyWrrItem(configured interface{}, d TerraformResourceData, config *Config) (*dns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem, error) { +func expandDnsRecordSetRoutingPolicyWrrItem(configured interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*dns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem, error) { data := configured.(map[string]interface{}) healthCheckedTargets, err := expandDnsRecordSetHealthCheckedTargets(data["health_checked_targets"].([]interface{}), d, config) if err != nil { return nil, err } return &dns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ - Rrdatas: convertStringArr(data["rrdatas"].([]interface{})), + Rrdatas: tpgresource.ConvertStringArr(data["rrdatas"].([]interface{})), Weight: data["weight"].(float64), HealthCheckedTargets: healthCheckedTargets, }, nil } -func expandDnsRecordSetRoutingPolicyGeoItems(configured []interface{}, d TerraformResourceData, config *Config) ([]*dns.RRSetRoutingPolicyGeoPolicyGeoPolicyItem, error) { +func expandDnsRecordSetRoutingPolicyGeoItems(configured []interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]*dns.RRSetRoutingPolicyGeoPolicyGeoPolicyItem, error) { items := make([]*dns.RRSetRoutingPolicyGeoPolicyGeoPolicyItem, 0, len(configured)) for _, raw := range configured { item, err := expandDnsRecordSetRoutingPolicyGeoItem(raw, d, config) @@ -701,20 +707,20 @@ func expandDnsRecordSetRoutingPolicyGeoItems(configured []interface{}, d Terrafo return items, nil } -func expandDnsRecordSetRoutingPolicyGeoItem(configured interface{}, d TerraformResourceData, config *Config) (*dns.RRSetRoutingPolicyGeoPolicyGeoPolicyItem, error) { +func expandDnsRecordSetRoutingPolicyGeoItem(configured interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*dns.RRSetRoutingPolicyGeoPolicyGeoPolicyItem, error) { data := configured.(map[string]interface{}) healthCheckedTargets, err := expandDnsRecordSetHealthCheckedTargets(data["health_checked_targets"].([]interface{}), d, config) if err != nil { return nil, err } return &dns.RRSetRoutingPolicyGeoPolicyGeoPolicyItem{ - Rrdatas: convertStringArr(data["rrdatas"].([]interface{})), + Rrdatas: tpgresource.ConvertStringArr(data["rrdatas"].([]interface{})), Location: data["location"].(string), HealthCheckedTargets: healthCheckedTargets, }, nil } -func expandDnsRecordSetHealthCheckedTargets(configured []interface{}, d TerraformResourceData, config *Config) (*dns.RRSetRoutingPolicyHealthCheckTargets, error) { +func expandDnsRecordSetHealthCheckedTargets(configured []interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*dns.RRSetRoutingPolicyHealthCheckTargets, error) { if len(configured) == 0 || configured[0] == nil { return nil, nil } @@ -729,7 +735,7 @@ func expandDnsRecordSetHealthCheckedTargets(configured []interface{}, d Terrafor }, nil } -func expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancers(configured []interface{}, d TerraformResourceData, config *Config) ([]*dns.RRSetRoutingPolicyLoadBalancerTarget, error) { +func expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancers(configured []interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]*dns.RRSetRoutingPolicyLoadBalancerTarget, error) { ilbs := make([]*dns.RRSetRoutingPolicyLoadBalancerTarget, 0, len(configured)) for _, raw := range configured { ilb, err := expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancer(raw, d, config) @@ -741,7 +747,7 @@ func expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancers(configured []in return ilbs, nil } -func expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancer(configured interface{}, d TerraformResourceData, config *Config) (*dns.RRSetRoutingPolicyLoadBalancerTarget, error) { +func expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancer(configured interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*dns.RRSetRoutingPolicyLoadBalancerTarget, error) { data := configured.(map[string]interface{}) networkUrl, err := expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancerNetworkUrl(data["network_url"], d, config) if err != nil { @@ -758,20 +764,20 @@ func expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancer(configured inter }, nil } -func expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancerNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDnsRecordSetHealthCheckedTargetsInternalLoadBalancerNetworkUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil || v.(string) == "" { return "", nil } else if strings.HasPrefix(v.(string), "https://") { return v, nil } - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) if err != nil { return "", err } - return ConvertSelfLinkToV1(url), nil + return tpgresource.ConvertSelfLinkToV1(url), nil } -func expandDnsRecordSetRoutingPolicyPrimaryBackup(configured []interface{}, d TerraformResourceData, config *Config) (*dns.RRSetRoutingPolicyPrimaryBackupPolicy, error) { +func expandDnsRecordSetRoutingPolicyPrimaryBackup(configured []interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*dns.RRSetRoutingPolicyPrimaryBackupPolicy, error) { if len(configured) == 0 || configured[0] == nil { return nil, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy.go new file mode 100644 index 0000000000..d5b89a8653 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy.go @@ -0,0 +1,537 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dns + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDNSResponsePolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceDNSResponsePolicyCreate, + Read: resourceDNSResponsePolicyRead, + Update: resourceDNSResponsePolicyUpdate, + Delete: resourceDNSResponsePolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDNSResponsePolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "response_policy_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The user assigned name for this Response Policy, such as 'myresponsepolicy'.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The description of the response policy, such as 'My new response policy'.`, + Default: "Managed by Terraform", + }, + "gke_clusters": { + Type: schema.TypeList, + Optional: true, + Description: `The list of Google Kubernetes Engine clusters that can see this zone.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gke_cluster_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the cluster to bind this ManagedZone to. +This should be specified in the format like +'projects/*/locations/*/clusters/*'`, + }, + }, + }, + }, + "networks": { + Type: schema.TypeList, + Optional: true, + Description: `The list of network names specifying networks to which this policy is applied.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_url": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The fully qualified URL of the VPC network to bind to. +This should be formatted like +'https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}'`, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDNSResponsePolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + responsePolicyNameProp, err := expandDNSResponsePolicyResponsePolicyName(d.Get("response_policy_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("response_policy_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(responsePolicyNameProp)) && (ok || !reflect.DeepEqual(v, responsePolicyNameProp)) { + obj["responsePolicyName"] = responsePolicyNameProp + } + descriptionProp, err := expandDNSResponsePolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + networksProp, err := expandDNSResponsePolicyNetworks(d.Get("networks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("networks"); !tpgresource.IsEmptyValue(reflect.ValueOf(networksProp)) && (ok || !reflect.DeepEqual(v, networksProp)) { + obj["networks"] = networksProp + } + gkeClustersProp, err := expandDNSResponsePolicyGkeClusters(d.Get("gke_clusters"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gke_clusters"); !tpgresource.IsEmptyValue(reflect.ValueOf(gkeClustersProp)) && (ok || !reflect.DeepEqual(v, gkeClustersProp)) { + obj["gkeClusters"] = gkeClustersProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ResponsePolicy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResponsePolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ResponsePolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/responsePolicies/{{response_policy_name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ResponsePolicy %q: %#v", d.Id(), res) + + return resourceDNSResponsePolicyRead(d, meta) +} + +func resourceDNSResponsePolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy_name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResponsePolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DNSResponsePolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ResponsePolicy: %s", err) + } + + if err := d.Set("response_policy_name", flattenDNSResponsePolicyResponsePolicyName(res["responsePolicyName"], d, config)); err != nil { + return fmt.Errorf("Error reading ResponsePolicy: %s", err) + } + if err := d.Set("description", flattenDNSResponsePolicyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ResponsePolicy: %s", err) + } + if err := d.Set("networks", flattenDNSResponsePolicyNetworks(res["networks"], d, config)); err != nil { + return fmt.Errorf("Error reading ResponsePolicy: %s", err) + } + if err := d.Set("gke_clusters", flattenDNSResponsePolicyGkeClusters(res["gkeClusters"], d, config)); err != nil { + return fmt.Errorf("Error reading ResponsePolicy: %s", err) + } + + return nil +} + +func resourceDNSResponsePolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResponsePolicy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandDNSResponsePolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + networksProp, err := expandDNSResponsePolicyNetworks(d.Get("networks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("networks"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, networksProp)) { + obj["networks"] = networksProp + } + gkeClustersProp, err := expandDNSResponsePolicyGkeClusters(d.Get("gke_clusters"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gke_clusters"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gkeClustersProp)) { + obj["gkeClusters"] = gkeClustersProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ResponsePolicy %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ResponsePolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ResponsePolicy %q: %#v", d.Id(), res) + } + + return resourceDNSResponsePolicyRead(d, meta) +} + +func resourceDNSResponsePolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResponsePolicy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy_name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + // if gke clusters are attached, they need to be detached before the response policy can be deleted + if d.Get("gke_clusters.#").(int) > 0 { + patched := make(map[string]interface{}) + patched["gkeClusters"] = nil + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy_name}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: patched, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) + } + } + + // if networks are attached, they need to be detached before the response policy can be deleted + if d.Get("networks.#").(int) > 0 { + patched := make(map[string]interface{}) + patched["networks"] = nil + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy_name}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: patched, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) + } + } + log.Printf("[DEBUG] Deleting ResponsePolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ResponsePolicy") + } + + log.Printf("[DEBUG] Finished deleting ResponsePolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceDNSResponsePolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/responsePolicies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/responsePolicies/{{response_policy_name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDNSResponsePolicyResponsePolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSResponsePolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSResponsePolicyNetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "network_url": flattenDNSResponsePolicyNetworksNetworkUrl(original["networkUrl"], d, config), + }) + } + return transformed +} +func flattenDNSResponsePolicyNetworksNetworkUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSResponsePolicyGkeClusters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "gke_cluster_name": flattenDNSResponsePolicyGkeClustersGkeClusterName(original["gkeClusterName"], d, config), + }) + } + return transformed +} +func flattenDNSResponsePolicyGkeClustersGkeClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDNSResponsePolicyResponsePolicyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSResponsePolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSResponsePolicyNetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNetworkUrl, err := expandDNSResponsePolicyNetworksNetworkUrl(original["network_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkUrl"] = transformedNetworkUrl + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDNSResponsePolicyNetworksNetworkUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil || v.(string) == "" { + return "", nil + } else if strings.HasPrefix(v.(string), "https://") { + return v, nil + } + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) + if err != nil { + return "", err + } + return tpgresource.ConvertSelfLinkToV1(url), nil +} + +func expandDNSResponsePolicyGkeClusters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGkeClusterName, err := expandDNSResponsePolicyGkeClustersGkeClusterName(original["gke_cluster_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGkeClusterName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gkeClusterName"] = transformedGkeClusterName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDNSResponsePolicyGkeClustersGkeClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy_rule.go new file mode 100644 index 0000000000..b9e1ff4daf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy_rule.go @@ -0,0 +1,529 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dns + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceDNSResponsePolicyRule() *schema.Resource { + return &schema.Resource{ + Create: resourceDNSResponsePolicyRuleCreate, + Read: resourceDNSResponsePolicyRuleRead, + Update: resourceDNSResponsePolicyRuleUpdate, + Delete: resourceDNSResponsePolicyRuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDNSResponsePolicyRuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dns_name": { + Type: schema.TypeString, + Required: true, + Description: `The DNS name (wildcard or exact) to apply this rule to. Must be unique within the Response Policy Rule.`, + }, + "response_policy": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the response policy addressed by this request.`, + }, + "rule_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `An identifier for this rule. Must be unique with the ResponsePolicy.`, + }, + "local_data": { + Type: schema.TypeList, + Optional: true, + Description: `Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; +in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_datas": { + Type: schema.TypeList, + Required: true, + Description: `All resource record sets for this selector, one per resource record type. The name must match the dns_name.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `For example, www.example.com.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"A", "AAAA", "CAA", "CNAME", "DNSKEY", "DS", "HTTPS", "IPSECVPNKEY", "MX", "NAPTR", "NS", "PTR", "SOA", "SPF", "SRV", "SSHFP", "SVCB", "TLSA", "TXT"}), + Description: `One of valid DNS resource types. Possible values: ["A", "AAAA", "CAA", "CNAME", "DNSKEY", "DS", "HTTPS", "IPSECVPNKEY", "MX", "NAPTR", "NS", "PTR", "SOA", "SPF", "SRV", "SSHFP", "SVCB", "TLSA", "TXT"]`, + }, + "rrdatas": { + Type: schema.TypeList, + Optional: true, + Description: `As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1)`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "ttl": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds that this ResourceRecordSet can be cached by +resolvers.`, + }, + }, + }, + }, + }, + }, + ConflictsWith: []string{}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDNSResponsePolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + ruleNameProp, err := expandDNSResponsePolicyRuleRuleName(d.Get("rule_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rule_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(ruleNameProp)) && (ok || !reflect.DeepEqual(v, ruleNameProp)) { + obj["ruleName"] = ruleNameProp + } + dnsNameProp, err := expandDNSResponsePolicyRuleDnsName(d.Get("dns_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dns_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(dnsNameProp)) && (ok || !reflect.DeepEqual(v, dnsNameProp)) { + obj["dnsName"] = dnsNameProp + } + localDataProp, err := expandDNSResponsePolicyRuleLocalData(d.Get("local_data"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("local_data"); !tpgresource.IsEmptyValue(reflect.ValueOf(localDataProp)) && (ok || !reflect.DeepEqual(v, localDataProp)) { + obj["localData"] = localDataProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy}}/rules") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ResponsePolicyRule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResponsePolicyRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ResponsePolicyRule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ResponsePolicyRule %q: %#v", d.Id(), res) + + return resourceDNSResponsePolicyRuleRead(d, meta) +} + +func resourceDNSResponsePolicyRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResponsePolicyRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DNSResponsePolicyRule %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ResponsePolicyRule: %s", err) + } + + if err := d.Set("rule_name", flattenDNSResponsePolicyRuleRuleName(res["ruleName"], d, config)); err != nil { + return fmt.Errorf("Error reading ResponsePolicyRule: %s", err) + } + if err := d.Set("dns_name", flattenDNSResponsePolicyRuleDnsName(res["dnsName"], d, config)); err != nil { + return fmt.Errorf("Error reading ResponsePolicyRule: %s", err) + } + if err := d.Set("local_data", flattenDNSResponsePolicyRuleLocalData(res["localData"], d, config)); err != nil { + return fmt.Errorf("Error reading ResponsePolicyRule: %s", err) + } + + return nil +} + +func resourceDNSResponsePolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResponsePolicyRule: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + dnsNameProp, err := expandDNSResponsePolicyRuleDnsName(d.Get("dns_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dns_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dnsNameProp)) { + obj["dnsName"] = dnsNameProp + } + localDataProp, err := expandDNSResponsePolicyRuleLocalData(d.Get("local_data"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("local_data"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, localDataProp)) { + obj["localData"] = localDataProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ResponsePolicyRule %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ResponsePolicyRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ResponsePolicyRule %q: %#v", d.Id(), res) + } + + return resourceDNSResponsePolicyRuleRead(d, meta) +} + +func resourceDNSResponsePolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ResponsePolicyRule: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ResponsePolicyRule %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ResponsePolicyRule") + } + + log.Printf("[DEBUG] Finished deleting ResponsePolicyRule %q: %#v", d.Id(), res) + return nil +} + +func resourceDNSResponsePolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/responsePolicies/(?P[^/]+)/rules/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDNSResponsePolicyRuleRuleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSResponsePolicyRuleDnsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSResponsePolicyRuleLocalData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["local_datas"] = + flattenDNSResponsePolicyRuleLocalDataLocalDatas(original["localDatas"], d, config) + return []interface{}{transformed} +} +func flattenDNSResponsePolicyRuleLocalDataLocalDatas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenDNSResponsePolicyRuleLocalDataLocalDatasName(original["name"], d, config), + "type": flattenDNSResponsePolicyRuleLocalDataLocalDatasType(original["type"], d, config), + "ttl": flattenDNSResponsePolicyRuleLocalDataLocalDatasTtl(original["ttl"], d, config), + "rrdatas": flattenDNSResponsePolicyRuleLocalDataLocalDatasRrdatas(original["rrdatas"], d, config), + }) + } + return transformed +} +func flattenDNSResponsePolicyRuleLocalDataLocalDatasName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSResponsePolicyRuleLocalDataLocalDatasType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDNSResponsePolicyRuleLocalDataLocalDatasTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDNSResponsePolicyRuleLocalDataLocalDatasRrdatas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDNSResponsePolicyRuleRuleName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSResponsePolicyRuleDnsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSResponsePolicyRuleLocalData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLocalDatas, err := expandDNSResponsePolicyRuleLocalDataLocalDatas(original["local_datas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalDatas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localDatas"] = transformedLocalDatas + } + + return transformed, nil +} + +func expandDNSResponsePolicyRuleLocalDataLocalDatas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDNSResponsePolicyRuleLocalDataLocalDatasName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedType, err := expandDNSResponsePolicyRuleLocalDataLocalDatasType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedTtl, err := expandDNSResponsePolicyRuleLocalDataLocalDatasTtl(original["ttl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ttl"] = transformedTtl + } + + transformedRrdatas, err := expandDNSResponsePolicyRuleLocalDataLocalDatasRrdatas(original["rrdatas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRrdatas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rrdatas"] = transformedRrdatas + } + + req = append(req, transformed) + } + return req, nil +} + +func expandDNSResponsePolicyRuleLocalDataLocalDatasName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSResponsePolicyRuleLocalDataLocalDatasType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSResponsePolicyRuleLocalDataLocalDatasTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDNSResponsePolicyRuleLocalDataLocalDatasRrdatas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy_rule_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy_rule_sweeper.go new file mode 100644 index 0000000000..f71dbdcde4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/dns/resource_dns_response_policy_rule_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package dns + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DNSResponsePolicyRule", testSweepDNSResponsePolicyRule) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDNSResponsePolicyRule(region string) error { + resourceName := "DNSResponsePolicyRule" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://dns.googleapis.com/dns/v1/projects/{{project}}/responsePolicies/{{response_policy}}/rules", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["responsePolicyRules"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://dns.googleapis.com/dns/v1/projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor.go new file mode 100644 index 0000000000..85a2dd48f9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor.go @@ -0,0 +1,312 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package documentai + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDocumentAIProcessor() *schema.Resource { + return &schema.Resource{ + Create: resourceDocumentAIProcessorCreate, + Read: resourceDocumentAIProcessorRead, + Delete: resourceDocumentAIProcessorDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDocumentAIProcessorImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The display name. Must be unique.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the resource.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The type of processor. For possible types see the [official list](https://cloud.google.com/document-ai/docs/reference/rest/v1/projects.locations/fetchProcessorTypes#google.cloud.documentai.v1.DocumentProcessorService.FetchProcessorTypes)`, + }, + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The KMS key used for encryption/decryption in CMEK scenarios. See https://cloud.google.com/security-key-management.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the processor.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDocumentAIProcessorCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + typeProp, err := expandDocumentAIProcessorType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + displayNameProp, err := expandDocumentAIProcessorDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + kmsKeyNameProp, err := expandDocumentAIProcessorKmsKeyName(d.Get("kms_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kms_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { + obj["kmsKeyName"] = kmsKeyNameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Processor: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Processor: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Processor: %s", err) + } + if err := d.Set("name", flattenDocumentAIProcessorName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/processors/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Processor %q: %#v", d.Id(), res) + + return resourceDocumentAIProcessorRead(d, meta) +} + +func resourceDocumentAIProcessorRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Processor: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DocumentAIProcessor %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Processor: %s", err) + } + + if err := d.Set("name", flattenDocumentAIProcessorName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Processor: %s", err) + } + if err := d.Set("type", flattenDocumentAIProcessorType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Processor: %s", err) + } + if err := d.Set("display_name", flattenDocumentAIProcessorDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Processor: %s", err) + } + if err := d.Set("kms_key_name", flattenDocumentAIProcessorKmsKeyName(res["kmsKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Processor: %s", err) + } + + return nil +} + +func resourceDocumentAIProcessorDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Processor: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{DocumentAIBasePath}}projects/{{project}}/locations/{{location}}/processors/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Processor %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Processor") + } + + log.Printf("[DEBUG] Finished deleting Processor %q: %#v", d.Id(), res) + return nil +} + +func resourceDocumentAIProcessorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/processors/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/processors/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDocumentAIProcessorName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenDocumentAIProcessorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDocumentAIProcessorDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDocumentAIProcessorKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDocumentAIProcessorType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDocumentAIProcessorDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDocumentAIProcessorKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor_default_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor_default_version.go new file mode 100644 index 0000000000..8047a1942b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor_default_version.go @@ -0,0 +1,211 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package documentai + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceDocumentAIProcessorDefaultVersion() *schema.Resource { + return &schema.Resource{ + Create: resourceDocumentAIProcessorDefaultVersionCreate, + Read: resourceDocumentAIProcessorDefaultVersionRead, + Delete: resourceDocumentAIProcessorDefaultVersionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDocumentAIProcessorDefaultVersionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "processor": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The processor to set the version on.`, + }, + "version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `The version to set. Using 'stable' or 'rc' will cause the API to return the latest version in that release channel. +Apply 'lifecycle.ignore_changes' to the 'version' field to suppress this diff.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDocumentAIProcessorDefaultVersionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + defaultProcessorVersionProp, err := expandDocumentAIProcessorDefaultVersionVersion(d.Get("version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultProcessorVersionProp)) && (ok || !reflect.DeepEqual(v, defaultProcessorVersionProp)) { + obj["defaultProcessorVersion"] = defaultProcessorVersionProp + } + processorProp, err := expandDocumentAIProcessorDefaultVersionProcessor(d.Get("processor"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("processor"); !tpgresource.IsEmptyValue(reflect.ValueOf(processorProp)) && (ok || !reflect.DeepEqual(v, processorProp)) { + obj["processor"] = processorProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DocumentAIBasePath}}{{processor}}:setDefaultProcessorVersion") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProcessorDefaultVersion: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if strings.Contains(url, "https://-") { + location := tpgresource.GetRegionFromRegionalSelfLink(url) + url = strings.TrimPrefix(url, "https://") + url = "https://" + location + url + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ProcessorDefaultVersion: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{processor}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ProcessorDefaultVersion %q: %#v", d.Id(), res) + + return resourceDocumentAIProcessorDefaultVersionRead(d, meta) +} + +func resourceDocumentAIProcessorDefaultVersionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{DocumentAIBasePath}}{{processor}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if strings.Contains(url, "https://-") { + location := tpgresource.GetRegionFromRegionalSelfLink(url) + url = strings.TrimPrefix(url, "https://") + url = "https://" + location + url + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("DocumentAIProcessorDefaultVersion %q", d.Id())) + } + + if err := d.Set("version", flattenDocumentAIProcessorDefaultVersionVersion(res["defaultProcessorVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading ProcessorDefaultVersion: %s", err) + } + + return nil +} + +func resourceDocumentAIProcessorDefaultVersionDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] DocumentAI ProcessorDefaultVersion resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceDocumentAIProcessorDefaultVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{processor}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDocumentAIProcessorDefaultVersionVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandDocumentAIProcessorDefaultVersionVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDocumentAIProcessorDefaultVersionProcessor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor_sweeper.go new file mode 100644 index 0000000000..2ada7c1f98 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/documentai/resource_document_ai_processor_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package documentai + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("DocumentAIProcessor", testSweepDocumentAIProcessor) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepDocumentAIProcessor(region string) error { + resourceName := "DocumentAIProcessor" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{location}}-documentai.googleapis.com/v1/projects/{{project}}/locations/{{location}}/processors", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["processors"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{location}}-documentai.googleapis.com/v1/projects/{{project}}/locations/{{location}}/processors/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact.go new file mode 100644 index 0000000000..1c124a0f2c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact.go @@ -0,0 +1,352 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package essentialcontacts + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceEssentialContactsContact() *schema.Resource { + return &schema.Resource{ + Create: resourceEssentialContactsContactCreate, + Read: resourceEssentialContactsContactRead, + Update: resourceEssentialContactsContactUpdate, + Delete: resourceEssentialContactsContactDelete, + + Importer: &schema.ResourceImporter{ + State: resourceEssentialContactsContactImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The email address to send notifications to. This does not need to be a Google account.`, + }, + "language_tag": { + Type: schema.TypeString, + Required: true, + Description: `The preferred language for notifications, as a ISO 639-1 language code. See Supported languages for a list of supported languages.`, + }, + "notification_category_subscriptions": { + Type: schema.TypeList, + Required: true, + Description: `The categories of notifications that the contact will receive communications for.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource to save this contact for. Format: organizations/{organization_id}, folders/{folder_id} or projects/{project_id}`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The identifier for the contact. Format: {resourceType}/{resource_id}/contacts/{contact_id}`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceEssentialContactsContactCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + emailProp, err := expandEssentialContactsContactEmail(d.Get("email"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("email"); !tpgresource.IsEmptyValue(reflect.ValueOf(emailProp)) && (ok || !reflect.DeepEqual(v, emailProp)) { + obj["email"] = emailProp + } + notificationCategorySubscriptionsProp, err := expandEssentialContactsContactNotificationCategorySubscriptions(d.Get("notification_category_subscriptions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_category_subscriptions"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationCategorySubscriptionsProp)) && (ok || !reflect.DeepEqual(v, notificationCategorySubscriptionsProp)) { + obj["notificationCategorySubscriptions"] = notificationCategorySubscriptionsProp + } + languageTagProp, err := expandEssentialContactsContactLanguageTag(d.Get("language_tag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("language_tag"); !tpgresource.IsEmptyValue(reflect.ValueOf(languageTagProp)) && (ok || !reflect.DeepEqual(v, languageTagProp)) { + obj["languageTag"] = languageTagProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{EssentialContactsBasePath}}{{parent}}/contacts") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Contact: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Contact: %s", err) + } + if err := d.Set("name", flattenEssentialContactsContactName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Contact %q: %#v", d.Id(), res) + + return resourceEssentialContactsContactRead(d, meta) +} + +func resourceEssentialContactsContactRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("EssentialContactsContact %q", d.Id())) + } + + if err := d.Set("name", flattenEssentialContactsContactName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Contact: %s", err) + } + if err := d.Set("email", flattenEssentialContactsContactEmail(res["email"], d, config)); err != nil { + return fmt.Errorf("Error reading Contact: %s", err) + } + if err := d.Set("notification_category_subscriptions", flattenEssentialContactsContactNotificationCategorySubscriptions(res["notificationCategorySubscriptions"], d, config)); err != nil { + return fmt.Errorf("Error reading Contact: %s", err) + } + if err := d.Set("language_tag", flattenEssentialContactsContactLanguageTag(res["languageTag"], d, config)); err != nil { + return fmt.Errorf("Error reading Contact: %s", err) + } + + return nil +} + +func resourceEssentialContactsContactUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + notificationCategorySubscriptionsProp, err := expandEssentialContactsContactNotificationCategorySubscriptions(d.Get("notification_category_subscriptions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_category_subscriptions"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationCategorySubscriptionsProp)) { + obj["notificationCategorySubscriptions"] = notificationCategorySubscriptionsProp + } + languageTagProp, err := expandEssentialContactsContactLanguageTag(d.Get("language_tag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("language_tag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, languageTagProp)) { + obj["languageTag"] = languageTagProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Contact %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("notification_category_subscriptions") { + updateMask = append(updateMask, "notificationCategorySubscriptions") + } + + if d.HasChange("language_tag") { + updateMask = append(updateMask, "languageTag") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Contact %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Contact %q: %#v", d.Id(), res) + } + + return resourceEssentialContactsContactRead(d, meta) +} + +func resourceEssentialContactsContactDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Contact %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Contact") + } + + log.Printf("[DEBUG] Finished deleting Contact %q: %#v", d.Id(), res) + return nil +} + +func resourceEssentialContactsContactImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenEssentialContactsContactName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenEssentialContactsContactEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenEssentialContactsContactNotificationCategorySubscriptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenEssentialContactsContactLanguageTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandEssentialContactsContactEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandEssentialContactsContactNotificationCategorySubscriptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandEssentialContactsContactLanguageTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact_sweeper.go new file mode 100644 index 0000000000..0310eae316 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts/resource_essential_contacts_contact_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package essentialcontacts + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("EssentialContactsContact", testSweepEssentialContactsContact) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepEssentialContactsContact(region string) error { + resourceName := "EssentialContactsContact" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://essentialcontacts.googleapis.com/v1/{{name}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["contacts"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://essentialcontacts.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_channel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_channel.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_channel.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_channel.go index 58e38018a4..25e0bd58bf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_channel.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_channel.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package eventarc import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceEventarcChannel() *schema.Resource { @@ -62,7 +69,7 @@ func ResourceEventarcChannel() *schema.Resource { "crypto_key_name": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. Resource name of a KMS crypto key (managed by the user) used to encrypt/decrypt their event data. It must match the pattern `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", }, @@ -71,7 +78,7 @@ func ResourceEventarcChannel() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -122,8 +129,8 @@ func ResourceEventarcChannel() *schema.Resource { } func resourceEventarcChannelCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -141,18 +148,18 @@ func resourceEventarcChannelCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -174,8 +181,8 @@ func resourceEventarcChannelCreate(d *schema.ResourceData, meta interface{}) err } func resourceEventarcChannelRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -188,17 +195,17 @@ func resourceEventarcChannelRead(d *schema.ResourceData, meta interface{}) error ThirdPartyProvider: dcl.String(d.Get("third_party_provider").(string)), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -207,7 +214,7 @@ func resourceEventarcChannelRead(d *schema.ResourceData, meta interface{}) error res, err := client.GetChannel(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("EventarcChannel %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("location", res.Location); err != nil { @@ -247,8 +254,8 @@ func resourceEventarcChannelRead(d *schema.ResourceData, meta interface{}) error return nil } func resourceEventarcChannelUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -260,19 +267,19 @@ func resourceEventarcChannelUpdate(d *schema.ResourceData, meta interface{}) err Project: dcl.String(project), ThirdPartyProvider: dcl.String(d.Get("third_party_provider").(string)), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -294,8 +301,8 @@ func resourceEventarcChannelUpdate(d *schema.ResourceData, meta interface{}) err } func resourceEventarcChannelDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -309,17 +316,17 @@ func resourceEventarcChannelDelete(d *schema.ResourceData, meta interface{}) err } log.Printf("[DEBUG] Deleting Channel %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -334,9 +341,9 @@ func resourceEventarcChannelDelete(d *schema.ResourceData, meta interface{}) err } func resourceEventarcChannelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/channels/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -345,7 +352,7 @@ func resourceEventarcChannelImport(d *schema.ResourceData, meta interface{}) ([] } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/channels/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/channels/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_channel_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_channel_sweeper.go new file mode 100644 index 0000000000..51158ee0a3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_channel_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package eventarc + +import ( + "context" + "log" + "testing" + + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("EventarcChannel", testSweepEventarcChannel) +} + +func testSweepEventarcChannel(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for EventarcChannel") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLEventarcClient(config, config.UserAgent, "", 0) + err = client.DeleteAllChannel(context.Background(), d["project"], d["location"], isDeletableEventarcChannel) + if err != nil { + return err + } + return nil +} + +func isDeletableEventarcChannel(r *eventarc.Channel) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_google_channel_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_google_channel_config.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_google_channel_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_google_channel_config.go index 246f25b5e4..95c9869719 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_google_channel_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_google_channel_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package eventarc import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceEventarcGoogleChannelConfig() *schema.Resource { @@ -62,7 +69,7 @@ func ResourceEventarcGoogleChannelConfig() *schema.Resource { "crypto_key_name": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. Resource name of a KMS crypto key (managed by the user) used to encrypt/decrypt their event data. It must match the pattern `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", }, @@ -71,7 +78,7 @@ func ResourceEventarcGoogleChannelConfig() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -85,8 +92,8 @@ func ResourceEventarcGoogleChannelConfig() *schema.Resource { } func resourceEventarcGoogleChannelConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -104,18 +111,18 @@ func resourceEventarcGoogleChannelConfigCreate(d *schema.ResourceData, meta inte } d.SetId(id) - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -137,8 +144,8 @@ func resourceEventarcGoogleChannelConfigCreate(d *schema.ResourceData, meta inte } func resourceEventarcGoogleChannelConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -150,17 +157,17 @@ func resourceEventarcGoogleChannelConfigRead(d *schema.ResourceData, meta interf Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -169,7 +176,7 @@ func resourceEventarcGoogleChannelConfigRead(d *schema.ResourceData, meta interf res, err := client.GetGoogleChannelConfig(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("EventarcGoogleChannelConfig %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("location", res.Location); err != nil { @@ -191,8 +198,8 @@ func resourceEventarcGoogleChannelConfigRead(d *schema.ResourceData, meta interf return nil } func resourceEventarcGoogleChannelConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -203,19 +210,19 @@ func resourceEventarcGoogleChannelConfigUpdate(d *schema.ResourceData, meta inte CryptoKeyName: dcl.String(d.Get("crypto_key_name").(string)), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -237,8 +244,8 @@ func resourceEventarcGoogleChannelConfigUpdate(d *schema.ResourceData, meta inte } func resourceEventarcGoogleChannelConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -251,17 +258,17 @@ func resourceEventarcGoogleChannelConfigDelete(d *schema.ResourceData, meta inte } log.Printf("[DEBUG] Deleting GoogleChannelConfig %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -276,9 +283,9 @@ func resourceEventarcGoogleChannelConfigDelete(d *schema.ResourceData, meta inte } func resourceEventarcGoogleChannelConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/googleChannelConfig", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -287,7 +294,7 @@ func resourceEventarcGoogleChannelConfigImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/googleChannelConfig") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/googleChannelConfig") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_trigger.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_trigger.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_trigger.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_trigger.go index 1aa8c72df2..50b9b0fd54 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_eventarc_trigger.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_trigger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package eventarc import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceEventarcTrigger() *schema.Resource { @@ -79,10 +86,16 @@ func ResourceEventarcTrigger() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The name of the channel associated with the trigger in `projects/{project}/locations/{location}/channels/{channel}` format. You must provide a channel to receive events from Eventarc SaaS partners.", }, + "event_data_content_type": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. EventDataContentType specifies the type of payload in MIME format that is expected from the CloudEvent data field. This is set to `application/json` if the value is not defined.", + }, + "labels": { Type: schema.TypeMap, Optional: true, @@ -95,14 +108,14 @@ func ResourceEventarcTrigger() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, "service_account": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The IAM service account email associated with the trigger. The service account represents the identity of the trigger. The principal who calls this API must have `iam.serviceAccounts.actAs` permission in the service account. See https://cloud.google.com/iam/docs/understanding-service-accounts#sa_common for more information. For Cloud Run destinations, this service account is used to generate identity tokens when invoking the service. See https://cloud.google.com/run/docs/triggering/pubsub-push#create-service-account for information on how to invoke authenticated Cloud Run services. In order to create Audit Log triggers, the service account should also have `roles/eventarc.eventReceiver` IAM role.", }, @@ -156,7 +169,7 @@ func EventarcTriggerDestinationSchema() *schema.Resource { "cloud_function": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "[WARNING] Configuring a Cloud Function in Trigger is not supported as of today. The Cloud Function resource name. Format: projects/{project}/locations/{location}/functions/{function}", }, @@ -179,7 +192,7 @@ func EventarcTriggerDestinationSchema() *schema.Resource { "workflow": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The resource name of the Workflow whose Executions are triggered by the events. The Workflow resource should be deployed in the same project as the trigger. Format: `projects/{project}/locations/{location}/workflows/{workflow}`", }, }, @@ -192,7 +205,7 @@ func EventarcTriggerDestinationCloudRunServiceSchema() *schema.Resource { "service": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Required. The name of the Cloud Run service being addressed. See https://cloud.google.com/run/docs/reference/rest/v1/namespaces.services. Only services located in the same project of the trigger object can be addressed.", }, @@ -218,7 +231,7 @@ func EventarcTriggerDestinationGkeSchema() *schema.Resource { "cluster": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Required. The name of the cluster the GKE service is running in. The cluster must be running in the same project as the trigger being created.", }, @@ -295,7 +308,7 @@ func EventarcTriggerTransportPubsubSchema() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Optional. The name of the Pub/Sub topic created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/topics/{TOPIC_NAME}. You may set an existing topic for triggers of the type google.cloud.pubsub.topic.v1.messagePublished` only. The topic you provide here will not be deleted by Eventarc at trigger deletion.", }, @@ -309,22 +322,23 @@ func EventarcTriggerTransportPubsubSchema() *schema.Resource { } func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } obj := &eventarc.Trigger{ - Destination: expandEventarcTriggerDestination(d.Get("destination")), - Location: dcl.String(d.Get("location").(string)), - MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), - Name: dcl.String(d.Get("name").(string)), - Channel: dcl.String(d.Get("channel").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: dcl.String(project), - ServiceAccount: dcl.String(d.Get("service_account").(string)), - Transport: expandEventarcTriggerTransport(d.Get("transport")), + Destination: expandEventarcTriggerDestination(d.Get("destination")), + Location: dcl.String(d.Get("location").(string)), + MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), + Name: dcl.String(d.Get("name").(string)), + Channel: dcl.String(d.Get("channel").(string)), + EventDataContentType: dcl.String(d.Get("event_data_content_type").(string)), + Labels: tpgresource.CheckStringMap(d.Get("labels")), + Project: dcl.String(project), + ServiceAccount: dcl.String(d.Get("service_account").(string)), + Transport: expandEventarcTriggerTransport(d.Get("transport")), } id, err := obj.ID() @@ -332,18 +346,18 @@ func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -365,35 +379,36 @@ func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) err } func resourceEventarcTriggerRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } obj := &eventarc.Trigger{ - Destination: expandEventarcTriggerDestination(d.Get("destination")), - Location: dcl.String(d.Get("location").(string)), - MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), - Name: dcl.String(d.Get("name").(string)), - Channel: dcl.String(d.Get("channel").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: dcl.String(project), - ServiceAccount: dcl.String(d.Get("service_account").(string)), - Transport: expandEventarcTriggerTransport(d.Get("transport")), - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) + Destination: expandEventarcTriggerDestination(d.Get("destination")), + Location: dcl.String(d.Get("location").(string)), + MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), + Name: dcl.String(d.Get("name").(string)), + Channel: dcl.String(d.Get("channel").(string)), + EventDataContentType: dcl.String(d.Get("event_data_content_type").(string)), + Labels: tpgresource.CheckStringMap(d.Get("labels")), + Project: dcl.String(project), + ServiceAccount: dcl.String(d.Get("service_account").(string)), + Transport: expandEventarcTriggerTransport(d.Get("transport")), + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -402,7 +417,7 @@ func resourceEventarcTriggerRead(d *schema.ResourceData, meta interface{}) error res, err := client.GetTrigger(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("EventarcTrigger %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("destination", flattenEventarcTriggerDestination(res.Destination)); err != nil { @@ -420,6 +435,9 @@ func resourceEventarcTriggerRead(d *schema.ResourceData, meta interface{}) error if err = d.Set("channel", res.Channel); err != nil { return fmt.Errorf("error setting channel in state: %s", err) } + if err = d.Set("event_data_content_type", res.EventDataContentType); err != nil { + return fmt.Errorf("error setting event_data_content_type in state: %s", err) + } if err = d.Set("labels", res.Labels); err != nil { return fmt.Errorf("error setting labels in state: %s", err) } @@ -451,36 +469,37 @@ func resourceEventarcTriggerRead(d *schema.ResourceData, meta interface{}) error return nil } func resourceEventarcTriggerUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } obj := &eventarc.Trigger{ - Destination: expandEventarcTriggerDestination(d.Get("destination")), - Location: dcl.String(d.Get("location").(string)), - MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), - Name: dcl.String(d.Get("name").(string)), - Channel: dcl.String(d.Get("channel").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: dcl.String(project), - ServiceAccount: dcl.String(d.Get("service_account").(string)), - Transport: expandEventarcTriggerTransport(d.Get("transport")), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + Destination: expandEventarcTriggerDestination(d.Get("destination")), + Location: dcl.String(d.Get("location").(string)), + MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), + Name: dcl.String(d.Get("name").(string)), + Channel: dcl.String(d.Get("channel").(string)), + EventDataContentType: dcl.String(d.Get("event_data_content_type").(string)), + Labels: tpgresource.CheckStringMap(d.Get("labels")), + Project: dcl.String(project), + ServiceAccount: dcl.String(d.Get("service_account").(string)), + Transport: expandEventarcTriggerTransport(d.Get("transport")), + } + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -502,36 +521,37 @@ func resourceEventarcTriggerUpdate(d *schema.ResourceData, meta interface{}) err } func resourceEventarcTriggerDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } obj := &eventarc.Trigger{ - Destination: expandEventarcTriggerDestination(d.Get("destination")), - Location: dcl.String(d.Get("location").(string)), - MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), - Name: dcl.String(d.Get("name").(string)), - Channel: dcl.String(d.Get("channel").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: dcl.String(project), - ServiceAccount: dcl.String(d.Get("service_account").(string)), - Transport: expandEventarcTriggerTransport(d.Get("transport")), + Destination: expandEventarcTriggerDestination(d.Get("destination")), + Location: dcl.String(d.Get("location").(string)), + MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), + Name: dcl.String(d.Get("name").(string)), + Channel: dcl.String(d.Get("channel").(string)), + EventDataContentType: dcl.String(d.Get("event_data_content_type").(string)), + Labels: tpgresource.CheckStringMap(d.Get("labels")), + Project: dcl.String(project), + ServiceAccount: dcl.String(d.Get("service_account").(string)), + Transport: expandEventarcTriggerTransport(d.Get("transport")), } log.Printf("[DEBUG] Deleting Trigger %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -546,9 +566,9 @@ func resourceEventarcTriggerDelete(d *schema.ResourceData, meta interface{}) err } func resourceEventarcTriggerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/triggers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -557,7 +577,7 @@ func resourceEventarcTriggerImport(d *schema.ResourceData, meta interface{}) ([] } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_trigger_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_trigger_sweeper.go new file mode 100644 index 0000000000..118e459b3f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/eventarc/resource_eventarc_trigger_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package eventarc + +import ( + "context" + "log" + "testing" + + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("EventarcTrigger", testSweepEventarcTrigger) +} + +func testSweepEventarcTrigger(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for EventarcTrigger") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLEventarcClient(config, config.UserAgent, "", 0) + err = client.DeleteAllTrigger(context.Background(), d["project"], d["location"], isDeletableEventarcTrigger) + if err != nil { + return err + } + return nil +} + +func isDeletableEventarcTrigger(r *eventarc.Trigger) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/filestore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/filestore_operation.go new file mode 100644 index 0000000000..e6103d2a32 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/filestore_operation.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package filestore + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type FilestoreOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *FilestoreOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.FilestoreBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) +} + +func createFilestoreWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*FilestoreOperationWaiter, error) { + w := &FilestoreOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func FilestoreOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createFilestoreWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func FilestoreOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createFilestoreWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_backup.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_backup.go new file mode 100644 index 0000000000..85185d7331 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_backup.go @@ -0,0 +1,568 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package filestore + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceFilestoreBackup() *schema.Resource { + return &schema.Resource{ + Create: resourceFilestoreBackupCreate, + Read: resourceFilestoreBackupRead, + Update: resourceFilestoreBackupUpdate, + Delete: resourceFilestoreBackupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFilestoreBackupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location of the instance. This can be a region for ENTERPRISE tier instances.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the backup. The name must be unique within the specified instance. + +The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "source_file_share": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the file share in the source Cloud Filestore instance that the backup is created from.`, + }, + "source_instance": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the source Cloud Filestore instance, in the format projects/{projectId}/locations/{locationId}/instances/{instanceId}, used to create this backup.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the backup with 2048 characters or less. Requests with longer descriptions will be rejected.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels to represent user-provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "capacity_gb": { + Type: schema.TypeString, + Computed: true, + Description: `The amount of bytes needed to allocate a full copy of the snapshot content.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the snapshot was created in RFC3339 text format.`, + }, + "download_bytes": { + Type: schema.TypeString, + Computed: true, + Description: `Amount of bytes that will be downloaded if the backup is restored.`, + }, + "kms_key_name": { + Type: schema.TypeString, + Computed: true, + Description: `KMS key name used for data encryption.`, + }, + "source_instance_tier": { + Type: schema.TypeString, + Computed: true, + Description: `The service tier of the source Cloud Filestore instance that this backup is created from.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The backup state.`, + }, + "storage_bytes": { + Type: schema.TypeString, + Computed: true, + Description: `The size of the storage used by the backup. As backups share storage, this number is expected to change with backup creation/deletion.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceFilestoreBackupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandFilestoreBackupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandFilestoreBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + sourceInstanceProp, err := expandFilestoreBackupSourceInstance(d.Get("source_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceInstanceProp)) && (ok || !reflect.DeepEqual(v, sourceInstanceProp)) { + obj["sourceInstance"] = sourceInstanceProp + } + sourceFileShareProp, err := expandFilestoreBackupSourceFileShare(d.Get("source_file_share"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_file_share"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceFileShareProp)) && (ok || !reflect.DeepEqual(v, sourceFileShareProp)) { + obj["sourceFileShare"] = sourceFileShareProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups?backupId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Backup: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return fmt.Errorf("Error creating Backup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = FilestoreOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Backup", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Backup: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Backup %q: %#v", d.Id(), res) + + return resourceFilestoreBackupRead(d, meta) +} + +func resourceFilestoreBackupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("FilestoreBackup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + + if err := d.Set("description", flattenFilestoreBackupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("state", flattenFilestoreBackupState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("create_time", flattenFilestoreBackupCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("labels", flattenFilestoreBackupLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("capacity_gb", flattenFilestoreBackupCapacityGb(res["capacityGb"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("storage_bytes", flattenFilestoreBackupStorageBytes(res["storageBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("source_instance", flattenFilestoreBackupSourceInstance(res["sourceInstance"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("source_file_share", flattenFilestoreBackupSourceFileShare(res["sourceFileShare"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("source_instance_tier", flattenFilestoreBackupSourceInstanceTier(res["sourceInstanceTier"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("download_bytes", flattenFilestoreBackupDownloadBytes(res["downloadBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("kms_key_name", flattenFilestoreBackupKmsKeyName(res["kmsKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + + return nil +} + +func resourceFilestoreBackupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandFilestoreBackupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandFilestoreBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + sourceInstanceProp, err := expandFilestoreBackupSourceInstance(d.Get("source_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceInstanceProp)) { + obj["sourceInstance"] = sourceInstanceProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Backup %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("source_instance") { + updateMask = append(updateMask, "sourceInstance") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + + if err != nil { + return fmt.Errorf("Error updating Backup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Backup %q: %#v", d.Id(), res) + } + + err = FilestoreOperationWaitTime( + config, res, project, "Updating Backup", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceFilestoreBackupRead(d, meta) +} + +func resourceFilestoreBackupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Backup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Backup") + } + + err = FilestoreOperationWaitTime( + config, res, project, "Deleting Backup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Backup %q: %#v", d.Id(), res) + return nil +} + +func resourceFilestoreBackupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/backups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenFilestoreBackupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupCapacityGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupStorageBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupSourceInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupSourceFileShare(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupSourceInstanceTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupDownloadBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreBackupKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandFilestoreBackupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFilestoreBackupLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandFilestoreBackupSourceInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFilestoreBackupSourceFileShare(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_backup_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_backup_sweeper.go new file mode 100644 index 0000000000..a0f6ce51ab --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_backup_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package filestore + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("FilestoreBackup", testSweepFilestoreBackup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepFilestoreBackup(region string) error { + resourceName := "FilestoreBackup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://file.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["backups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://file.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backups/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_instance.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance.go index 204bf81319..19937d4997 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_filestore_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package filestore import ( "context" @@ -24,6 +27,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceFilestoreInstance() *schema.Resource { @@ -44,10 +51,11 @@ func ResourceFilestoreInstance() *schema.Resource { }, SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ { Type: resourceFilestoreInstanceResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceFilestoreInstanceUpgradeV0, + Upgrade: ResourceFilestoreInstanceUpgradeV0, Version: 0, }, }, @@ -83,7 +91,7 @@ for the standard tier, or 2560 GiB for the premium tier.`, "access_mode": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"READ_ONLY", "READ_WRITE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"READ_ONLY", "READ_WRITE", ""}), Description: `Either READ_ONLY, for allowing only read requests on the exported directory, or READ_WRITE, for allowing both read and write requests. The default is READ_WRITE. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"]`, Default: "READ_WRITE", @@ -115,7 +123,7 @@ The limit is 64 IP ranges/addresses for each FileShareConfig among all NfsExport "squash_mode": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"NO_ROOT_SQUASH", "ROOT_SQUASH", ""}), + ValidateFunc: verify.ValidateEnum([]string{"NO_ROOT_SQUASH", "ROOT_SQUASH", ""}), Description: `Either NO_ROOT_SQUASH, for allowing root access on the exported directory, or ROOT_SQUASH, for not allowing root access. The default is NO_ROOT_SQUASH. Default value: "NO_ROOT_SQUASH" Possible values: ["NO_ROOT_SQUASH", "ROOT_SQUASH"]`, Default: "NO_ROOT_SQUASH", @@ -155,14 +163,14 @@ only a single network is supported.`, IP addresses assigned. Possible values: ["ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4", "MODE_IPV6"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4", "MODE_IPV6"}), + ValidateFunc: verify.ValidateEnum([]string{"ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4", "MODE_IPV6"}), }, }, "network": { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name of the GCE VPC network to which the instance is connected.`, }, @@ -170,7 +178,7 @@ instance is connected.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS", ""}), + ValidateFunc: verify.ValidateEnum([]string{"DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS", ""}), Description: `The network connect mode of the Filestore instance. If not provided, the connect mode defaults to DIRECT_PEERING. Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"]`, @@ -259,8 +267,8 @@ simultaneous updates from overwriting each other.`, } func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -269,41 +277,41 @@ func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) e descriptionProp, err := expandFilestoreInstanceDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } tierProp, err := expandFilestoreInstanceTier(d.Get("tier"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { + } else if v, ok := d.GetOkExists("tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { obj["tier"] = tierProp } labelsProp, err := expandFilestoreInstanceLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } fileSharesProp, err := expandFilestoreInstanceFileShares(d.Get("file_shares"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("file_shares"); !isEmptyValue(reflect.ValueOf(fileSharesProp)) && (ok || !reflect.DeepEqual(v, fileSharesProp)) { + } else if v, ok := d.GetOkExists("file_shares"); !tpgresource.IsEmptyValue(reflect.ValueOf(fileSharesProp)) && (ok || !reflect.DeepEqual(v, fileSharesProp)) { obj["fileShares"] = fileSharesProp } networksProp, err := expandFilestoreInstanceNetworks(d.Get("networks"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("networks"); !isEmptyValue(reflect.ValueOf(networksProp)) && (ok || !reflect.DeepEqual(v, networksProp)) { + } else if v, ok := d.GetOkExists("networks"); !tpgresource.IsEmptyValue(reflect.ValueOf(networksProp)) && (ok || !reflect.DeepEqual(v, networksProp)) { obj["networks"] = networksProp } kmsKeyNameProp, err := expandFilestoreInstanceKmsKeyName(d.Get("kms_key_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("kms_key_name"); !isEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { + } else if v, ok := d.GetOkExists("kms_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { obj["kmsKeyName"] = kmsKeyNameProp } - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances?instanceId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances?instanceId={{name}}") if err != nil { return err } @@ -311,19 +319,19 @@ func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Creating new Instance: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Instance: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } if d.Get("location") == "" { - zone, err := getZone(d, config) + zone, err := tpgresource.GetZone(d, config) if err != nil { return err } @@ -334,18 +342,27 @@ func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) e } if strings.Contains(url, "locations//") { // re-compute url now that location must be set - url, err = replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances?instanceId={{name}}") + url, err = tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances?instanceId={{name}}") if err != nil { return err } } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isNotFilestoreQuotaError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) if err != nil { return fmt.Errorf("Error creating Instance: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -365,7 +382,7 @@ func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) e } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -377,33 +394,40 @@ func resourceFilestoreInstanceCreate(d *schema.ResourceData, meta interface{}) e } func resourceFilestoreInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Instance: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil, isNotFilestoreQuotaError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("FilestoreInstance %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("FilestoreInstance %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -439,15 +463,15 @@ func resourceFilestoreInstanceRead(d *schema.ResourceData, meta interface{}) err } func resourceFilestoreInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Instance: %s", err) } @@ -457,23 +481,23 @@ func resourceFilestoreInstanceUpdate(d *schema.ResourceData, meta interface{}) e descriptionProp, err := expandFilestoreInstanceDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandFilestoreInstanceLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } fileSharesProp, err := expandFilestoreInstanceFileShares(d.Get("file_shares"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("file_shares"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fileSharesProp)) { + } else if v, ok := d.GetOkExists("file_shares"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fileSharesProp)) { obj["fileShares"] = fileSharesProp } - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") if err != nil { return err } @@ -492,19 +516,28 @@ func resourceFilestoreInstanceUpdate(d *schema.ResourceData, meta interface{}) e if d.HasChange("file_shares") { updateMask = append(updateMask, "fileShares") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isNotFilestoreQuotaError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) if err != nil { return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) @@ -524,21 +557,21 @@ func resourceFilestoreInstanceUpdate(d *schema.ResourceData, meta interface{}) e } func resourceFilestoreInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Instance: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") if err != nil { return err } @@ -547,13 +580,22 @@ func resourceFilestoreInstanceDelete(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Deleting Instance %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isNotFilestoreQuotaError) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) if err != nil { - return handleNotFoundError(err, d, "Instance") + return transport_tpg.HandleNotFoundError(err, d, "Instance") } err = FilestoreOperationWaitTime( @@ -569,8 +611,8 @@ func resourceFilestoreInstanceDelete(d *schema.ResourceData, meta interface{}) e } func resourceFilestoreInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -579,7 +621,7 @@ func resourceFilestoreInstanceImport(d *schema.ResourceData, meta interface{}) ( } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -588,23 +630,23 @@ func resourceFilestoreInstanceImport(d *schema.ResourceData, meta interface{}) ( return []*schema.ResourceData{d}, nil } -func flattenFilestoreInstanceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceFileShares(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileShares(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -625,14 +667,14 @@ func flattenFilestoreInstanceFileShares(v interface{}, d *schema.ResourceData, c } return transformed } -func flattenFilestoreInstanceFileSharesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceFileSharesCapacityGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesCapacityGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -646,11 +688,11 @@ func flattenFilestoreInstanceFileSharesCapacityGb(v interface{}, d *schema.Resou return v // let terraform core handle it otherwise } -func flattenFilestoreInstanceFileSharesSourceBackup(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesSourceBackup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -672,22 +714,22 @@ func flattenFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d *schema } return transformed } -func flattenFilestoreInstanceFileSharesNfsExportOptionsIpRanges(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesNfsExportOptionsIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceFileSharesNfsExportOptionsAccessMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesNfsExportOptionsAccessMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceFileSharesNfsExportOptionsSquashMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesNfsExportOptionsSquashMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceFileSharesNfsExportOptionsAnonUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesNfsExportOptionsAnonUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -701,10 +743,10 @@ func flattenFilestoreInstanceFileSharesNfsExportOptionsAnonUid(v interface{}, d return v // let terraform core handle it otherwise } -func flattenFilestoreInstanceFileSharesNfsExportOptionsAnonGid(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceFileSharesNfsExportOptionsAnonGid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -718,7 +760,7 @@ func flattenFilestoreInstanceFileSharesNfsExportOptionsAnonGid(v interface{}, d return v // let terraform core handle it otherwise } -func flattenFilestoreInstanceNetworks(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceNetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -740,47 +782,47 @@ func flattenFilestoreInstanceNetworks(v interface{}, d *schema.ResourceData, con } return transformed } -func flattenFilestoreInstanceNetworksNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceNetworksNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceNetworksModes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceNetworksModes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceNetworksReservedIpRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceNetworksReservedIpRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceNetworksIpAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceNetworksIpAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceNetworksConnectMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(reflect.ValueOf(v)) { +func flattenFilestoreInstanceNetworksConnectMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return "DIRECT_PEERING" } return v } -func flattenFilestoreInstanceEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenFilestoreInstanceKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenFilestoreInstanceKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandFilestoreInstanceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandFilestoreInstanceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -791,7 +833,7 @@ func expandFilestoreInstanceLabels(v interface{}, d TerraformResourceData, confi return m, nil } -func expandFilestoreInstanceFileShares(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileShares(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -804,28 +846,28 @@ func expandFilestoreInstanceFileShares(v interface{}, d TerraformResourceData, c transformedName, err := expandFilestoreInstanceFileSharesName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedCapacityGb, err := expandFilestoreInstanceFileSharesCapacityGb(original["capacity_gb"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCapacityGb); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCapacityGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["capacityGb"] = transformedCapacityGb } transformedSourceBackup, err := expandFilestoreInstanceFileSharesSourceBackup(original["source_backup"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceBackup); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSourceBackup); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sourceBackup"] = transformedSourceBackup } transformedNfsExportOptions, err := expandFilestoreInstanceFileSharesNfsExportOptions(original["nfs_export_options"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNfsExportOptions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNfsExportOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nfsExportOptions"] = transformedNfsExportOptions } @@ -834,19 +876,19 @@ func expandFilestoreInstanceFileShares(v interface{}, d TerraformResourceData, c return req, nil } -func expandFilestoreInstanceFileSharesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceFileSharesCapacityGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesCapacityGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceFileSharesSourceBackup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesSourceBackup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -859,35 +901,35 @@ func expandFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d Terrafor transformedIpRanges, err := expandFilestoreInstanceFileSharesNfsExportOptionsIpRanges(original["ip_ranges"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIpRanges); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIpRanges); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ipRanges"] = transformedIpRanges } transformedAccessMode, err := expandFilestoreInstanceFileSharesNfsExportOptionsAccessMode(original["access_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessMode"] = transformedAccessMode } transformedSquashMode, err := expandFilestoreInstanceFileSharesNfsExportOptionsSquashMode(original["squash_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSquashMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSquashMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["squashMode"] = transformedSquashMode } transformedAnonUid, err := expandFilestoreInstanceFileSharesNfsExportOptionsAnonUid(original["anon_uid"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAnonUid); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAnonUid); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["anonUid"] = transformedAnonUid } transformedAnonGid, err := expandFilestoreInstanceFileSharesNfsExportOptionsAnonGid(original["anon_gid"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAnonGid); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAnonGid); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["anonGid"] = transformedAnonGid } @@ -896,27 +938,27 @@ func expandFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d Terrafor return req, nil } -func expandFilestoreInstanceFileSharesNfsExportOptionsIpRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesNfsExportOptionsIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceFileSharesNfsExportOptionsAccessMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesNfsExportOptionsAccessMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceFileSharesNfsExportOptionsSquashMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesNfsExportOptionsSquashMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceFileSharesNfsExportOptionsAnonUid(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesNfsExportOptionsAnonUid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceFileSharesNfsExportOptionsAnonGid(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceFileSharesNfsExportOptionsAnonGid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceNetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceNetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -929,35 +971,35 @@ func expandFilestoreInstanceNetworks(v interface{}, d TerraformResourceData, con transformedNetwork, err := expandFilestoreInstanceNetworksNetwork(original["network"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["network"] = transformedNetwork } transformedModes, err := expandFilestoreInstanceNetworksModes(original["modes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedModes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedModes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["modes"] = transformedModes } transformedReservedIpRange, err := expandFilestoreInstanceNetworksReservedIpRange(original["reserved_ip_range"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReservedIpRange); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReservedIpRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["reservedIpRange"] = transformedReservedIpRange } transformedIpAddresses, err := expandFilestoreInstanceNetworksIpAddresses(original["ip_addresses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIpAddresses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIpAddresses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ipAddresses"] = transformedIpAddresses } transformedConnectMode, err := expandFilestoreInstanceNetworksConnectMode(original["connect_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConnectMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConnectMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["connectMode"] = transformedConnectMode } @@ -966,27 +1008,27 @@ func expandFilestoreInstanceNetworks(v interface{}, d TerraformResourceData, con return req, nil } -func expandFilestoreInstanceNetworksNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceNetworksNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceNetworksModes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceNetworksModes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceNetworksReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceNetworksReservedIpRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceNetworksIpAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceNetworksIpAddresses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceNetworksConnectMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceNetworksConnectMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandFilestoreInstanceKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandFilestoreInstanceKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1171,7 +1213,7 @@ simultaneous updates from overwriting each other.`, } } -func resourceFilestoreInstanceUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { +func ResourceFilestoreInstanceUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { log.Printf("[DEBUG] Attributes before migration: %#v", rawState) rawState["location"] = rawState["zone"] diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance_sweeper.go new file mode 100644 index 0000000000..e1accb9b68 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_instance_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package filestore + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("FilestoreInstance", testSweepFilestoreInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepFilestoreInstance(region string) error { + resourceName := "FilestoreInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://file.googleapis.com/v1/projects/{{project}}/locations/{{location}}/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://file.googleapis.com/v1/projects/{{project}}/locations/{{location}}/instances/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_snapshot.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_snapshot.go new file mode 100644 index 0000000000..0f1d820ea5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_snapshot.go @@ -0,0 +1,471 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package filestore + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceFilestoreSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceFilestoreSnapshotCreate, + Read: resourceFilestoreSnapshotRead, + Update: resourceFilestoreSnapshotUpdate, + Delete: resourceFilestoreSnapshotDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFilestoreSnapshotImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the filestore instance.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location of the instance. This can be a region for ENTERPRISE tier instances.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the snapshot. The name must be unique within the specified instance. + +The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the snapshot with 2048 characters or less. Requests with longer descriptions will be rejected.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels to represent user-provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the snapshot was created in RFC3339 text format.`, + }, + "filesystem_used_bytes": { + Type: schema.TypeString, + Computed: true, + Description: `The amount of bytes needed to allocate a full copy of the snapshot content.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The snapshot state.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceFilestoreSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandFilestoreSnapshotDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandFilestoreSnapshotLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots?snapshotId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Snapshot: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Snapshot: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return fmt.Errorf("Error creating Snapshot: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = FilestoreOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Snapshot", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Snapshot: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Snapshot %q: %#v", d.Id(), res) + + return resourceFilestoreSnapshotRead(d, meta) +} + +func resourceFilestoreSnapshotRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Snapshot: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("FilestoreSnapshot %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + + if err := d.Set("description", flattenFilestoreSnapshotDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("state", flattenFilestoreSnapshotState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("create_time", flattenFilestoreSnapshotCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("labels", flattenFilestoreSnapshotLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("filesystem_used_bytes", flattenFilestoreSnapshotFilesystemUsedBytes(res["filesystemUsedBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + + return nil +} + +func resourceFilestoreSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Snapshot: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandFilestoreSnapshotDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandFilestoreSnapshotLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Snapshot %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + + if err != nil { + return fmt.Errorf("Error updating Snapshot %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Snapshot %q: %#v", d.Id(), res) + } + + err = FilestoreOperationWaitTime( + config, res, project, "Updating Snapshot", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceFilestoreSnapshotRead(d, meta) +} + +func resourceFilestoreSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Snapshot: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Snapshot %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Snapshot") + } + + err = FilestoreOperationWaitTime( + config, res, project, "Deleting Snapshot", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Snapshot %q: %#v", d.Id(), res) + return nil +} + +func resourceFilestoreSnapshotImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)/snapshots/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenFilestoreSnapshotDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreSnapshotState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreSnapshotCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreSnapshotLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFilestoreSnapshotFilesystemUsedBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandFilestoreSnapshotDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFilestoreSnapshotLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_snapshot_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_snapshot_sweeper.go new file mode 100644 index 0000000000..f616eec8c3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/filestore/resource_filestore_snapshot_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package filestore + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("FilestoreSnapshot", testSweepFilestoreSnapshot) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepFilestoreSnapshot(region string) error { + resourceName := "FilestoreSnapshot" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://file.googleapis.com/v1/projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["snapshots"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://file.googleapis.com/v1/projects/{{project}}/locations/{{location}}/instances/{{instance}}/snapshots/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firebaserules_release.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_release.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firebaserules_release.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_release.go index a2d769acdf..b31fa37848 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firebaserules_release.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_release.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package firebaserules import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceFirebaserulesRelease() *schema.Resource { @@ -55,7 +62,7 @@ func ResourceFirebaserulesRelease() *schema.Resource { "ruleset_name": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Name of the `Ruleset` referred to by this `Release`. The `Ruleset` must exist for the `Release` to be created.", }, @@ -64,7 +71,7 @@ func ResourceFirebaserulesRelease() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -90,8 +97,8 @@ func ResourceFirebaserulesRelease() *schema.Resource { } func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -107,18 +114,18 @@ func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -140,8 +147,8 @@ func resourceFirebaserulesReleaseCreate(d *schema.ResourceData, meta interface{} } func resourceFirebaserulesReleaseRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -152,17 +159,17 @@ func resourceFirebaserulesReleaseRead(d *schema.ResourceData, meta interface{}) Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -171,7 +178,7 @@ func resourceFirebaserulesReleaseRead(d *schema.ResourceData, meta interface{}) res, err := client.GetRelease(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("FirebaserulesRelease %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("name", res.Name); err != nil { @@ -196,8 +203,8 @@ func resourceFirebaserulesReleaseRead(d *schema.ResourceData, meta interface{}) return nil } func resourceFirebaserulesReleaseUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -207,19 +214,19 @@ func resourceFirebaserulesReleaseUpdate(d *schema.ResourceData, meta interface{} RulesetName: dcl.String(d.Get("ruleset_name").(string)), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -241,8 +248,8 @@ func resourceFirebaserulesReleaseUpdate(d *schema.ResourceData, meta interface{} } func resourceFirebaserulesReleaseDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -254,17 +261,17 @@ func resourceFirebaserulesReleaseDelete(d *schema.ResourceData, meta interface{} } log.Printf("[DEBUG] Deleting Release %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -279,16 +286,16 @@ func resourceFirebaserulesReleaseDelete(d *schema.ResourceData, meta interface{} } func resourceFirebaserulesReleaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P.+)/releases/(?P.+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/releases/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/releases/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_release_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_release_sweeper.go new file mode 100644 index 0000000000..e70347736f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_release_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package firebaserules + +import ( + "context" + "log" + "testing" + + firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("FirebaserulesRelease", testSweepFirebaserulesRelease) +} + +func testSweepFirebaserulesRelease(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for FirebaserulesRelease") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLFirebaserulesClient(config, config.UserAgent, "", 0) + err = client.DeleteAllRelease(context.Background(), d["project"], isDeletableFirebaserulesRelease) + if err != nil { + return err + } + return nil +} + +func isDeletableFirebaserulesRelease(r *firebaserules.Release) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firebaserules_ruleset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_ruleset.go similarity index 84% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firebaserules_ruleset.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_ruleset.go index dc5a027bd6..8bd5b74807 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_firebaserules_ruleset.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_ruleset.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package firebaserules import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceFirebaserulesRuleset() *schema.Resource { @@ -57,7 +64,7 @@ func ResourceFirebaserulesRuleset() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -145,8 +152,8 @@ func FirebaserulesRulesetMetadataSchema() *schema.Resource { } func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -161,18 +168,18 @@ func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -205,8 +212,8 @@ func resourceFirebaserulesRulesetCreate(d *schema.ResourceData, meta interface{} } func resourceFirebaserulesRulesetRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -217,17 +224,17 @@ func resourceFirebaserulesRulesetRead(d *schema.ResourceData, meta interface{}) Name: dcl.StringOrNil(d.Get("name").(string)), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -236,7 +243,7 @@ func resourceFirebaserulesRulesetRead(d *schema.ResourceData, meta interface{}) res, err := client.GetRuleset(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("FirebaserulesRuleset %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("source", flattenFirebaserulesRulesetSource(res.Source)); err != nil { @@ -259,8 +266,8 @@ func resourceFirebaserulesRulesetRead(d *schema.ResourceData, meta interface{}) } func resourceFirebaserulesRulesetDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -272,17 +279,17 @@ func resourceFirebaserulesRulesetDelete(d *schema.ResourceData, meta interface{} } log.Printf("[DEBUG] Deleting Ruleset %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLFirebaserulesClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -297,9 +304,9 @@ func resourceFirebaserulesRulesetDelete(d *schema.ResourceData, meta interface{} } func resourceFirebaserulesRulesetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/rulesets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -308,7 +315,7 @@ func resourceFirebaserulesRulesetImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/rulesets/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/rulesets/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_ruleset_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_ruleset_sweeper.go new file mode 100644 index 0000000000..1a3bac5cca --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firebaserules/resource_firebaserules_ruleset_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package firebaserules + +import ( + "context" + "log" + "testing" + + firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("FirebaserulesRuleset", testSweepFirebaserulesRuleset) +} + +func testSweepFirebaserulesRuleset(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for FirebaserulesRuleset") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLFirebaserulesClient(config, config.UserAgent, "", 0) + err = client.DeleteAllRuleset(context.Background(), d["project"], isDeletableFirebaserulesRuleset) + if err != nil { + return err + } + return nil +} + +func isDeletableFirebaserulesRuleset(r *firebaserules.Ruleset) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/firestore_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/firestore_operation.go new file mode 100644 index 0000000000..ab348d5b29 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/firestore_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package firestore + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type FirestoreOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *FirestoreOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.FirestoreBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createFirestoreWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*FirestoreOperationWaiter, error) { + w := &FirestoreOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func FirestoreOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createFirestoreWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func FirestoreOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createFirestoreWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database.go new file mode 100644 index 0000000000..84a71f1f30 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_database.go @@ -0,0 +1,493 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package firestore + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceFirestoreDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceFirestoreDatabaseCreate, + Read: resourceFirestoreDatabaseRead, + Update: resourceFirestoreDatabaseUpdate, + Delete: resourceFirestoreDatabaseDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirestoreDatabaseImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the database. Available databases are listed at +https://cloud.google.com/firestore/docs/locations.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID to use for the database, which will become the final +component of the database's resource name. This value should be 4-63 +characters. Valid characters are /[a-z][0-9]-/ with first character +a letter and the last a letter or a number. Must not be +UUID-like /[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}/. +"(default)" database id is also valid.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"FIRESTORE_NATIVE", "DATASTORE_MODE"}), + Description: `The type of the database. +See https://cloud.google.com/datastore/docs/firestore-or-datastore +for information about how to choose. Possible values: ["FIRESTORE_NATIVE", "DATASTORE_MODE"]`, + }, + "app_engine_integration_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ENABLED", "DISABLED", ""}), + Description: `The App Engine integration mode to use for this database. Possible values: ["ENABLED", "DISABLED"]`, + }, + "concurrency_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"OPTIMISTIC", "PESSIMISTIC", "OPTIMISTIC_WITH_ENTITY_GROUPS", ""}), + Description: `The concurrency control mode to use for this database. Possible values: ["OPTIMISTIC", "PESSIMISTIC", "OPTIMISTIC_WITH_ENTITY_GROUPS"]`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp at which this database was created.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `This checksum is computed by the server based on the value of other fields, +and may be sent on update and delete requests to ensure the client has an +up-to-date value before proceeding.`, + }, + "key_prefix": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The keyPrefix for this database. +This keyPrefix is used, in combination with the project id ("~") to construct the application id +that is returned from the Cloud Datastore APIs in Google App Engine first generation runtimes. +This value may be empty in which case the appid to use for URL-encoded keys is the project_id (eg: foo instead of v~foo).`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceFirestoreDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandFirestoreDatabaseName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + locationIdProp, err := expandFirestoreDatabaseLocationId(d.Get("location_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("location_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationIdProp)) && (ok || !reflect.DeepEqual(v, locationIdProp)) { + obj["locationId"] = locationIdProp + } + typeProp, err := expandFirestoreDatabaseType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + concurrencyModeProp, err := expandFirestoreDatabaseConcurrencyMode(d.Get("concurrency_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("concurrency_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(concurrencyModeProp)) && (ok || !reflect.DeepEqual(v, concurrencyModeProp)) { + obj["concurrencyMode"] = concurrencyModeProp + } + appEngineIntegrationModeProp, err := expandFirestoreDatabaseAppEngineIntegrationMode(d.Get("app_engine_integration_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine_integration_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(appEngineIntegrationModeProp)) && (ok || !reflect.DeepEqual(v, appEngineIntegrationModeProp)) { + obj["appEngineIntegrationMode"] = appEngineIntegrationModeProp + } + etagProp, err := expandFirestoreDatabaseEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases?databaseId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Database: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Database: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/databases/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = FirestoreOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Database", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Database: %s", err) + } + + if err := d.Set("name", flattenFirestoreDatabaseName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/databases/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) + + return resourceFirestoreDatabaseRead(d, meta) +} + +func resourceFirestoreDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("FirestoreDatabase %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + + if err := d.Set("name", flattenFirestoreDatabaseName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("location_id", flattenFirestoreDatabaseLocationId(res["locationId"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("type", flattenFirestoreDatabaseType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("concurrency_mode", flattenFirestoreDatabaseConcurrencyMode(res["concurrencyMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("app_engine_integration_mode", flattenFirestoreDatabaseAppEngineIntegrationMode(res["appEngineIntegrationMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("key_prefix", flattenFirestoreDatabaseKeyPrefix(res["key_prefix"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("etag", flattenFirestoreDatabaseEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("create_time", flattenFirestoreDatabaseCreateTime(res["create_time"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + + return nil +} + +func resourceFirestoreDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + typeProp, err := expandFirestoreDatabaseType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + concurrencyModeProp, err := expandFirestoreDatabaseConcurrencyMode(d.Get("concurrency_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("concurrency_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, concurrencyModeProp)) { + obj["concurrencyMode"] = concurrencyModeProp + } + appEngineIntegrationModeProp, err := expandFirestoreDatabaseAppEngineIntegrationMode(d.Get("app_engine_integration_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("app_engine_integration_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, appEngineIntegrationModeProp)) { + obj["appEngineIntegrationMode"] = appEngineIntegrationModeProp + } + etagProp, err := expandFirestoreDatabaseEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Database %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("type") { + updateMask = append(updateMask, "type") + } + + if d.HasChange("concurrency_mode") { + updateMask = append(updateMask, "concurrencyMode") + } + + if d.HasChange("app_engine_integration_mode") { + updateMask = append(updateMask, "appEngineIntegrationMode") + } + + if d.HasChange("etag") { + updateMask = append(updateMask, "etag") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Database %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) + } + + err = FirestoreOperationWaitTime( + config, res, project, "Updating Database", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceFirestoreDatabaseRead(d, meta) +} + +func resourceFirestoreDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] Firestore Database resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceFirestoreDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/databases/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/databases/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenFirestoreDatabaseName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenFirestoreDatabaseLocationId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDatabaseType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDatabaseConcurrencyMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDatabaseAppEngineIntegrationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDatabaseKeyPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDatabaseEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDatabaseCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandFirestoreDatabaseName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/databases/{{name}}") +} + +func expandFirestoreDatabaseLocationId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreDatabaseType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreDatabaseConcurrencyMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreDatabaseAppEngineIntegrationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreDatabaseEtag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_document.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_document.go new file mode 100644 index 0000000000..00e005621b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_document.go @@ -0,0 +1,422 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package firestore + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceFirestoreDocument() *schema.Resource { + return &schema.Resource{ + Create: resourceFirestoreDocumentCreate, + Read: resourceFirestoreDocumentRead, + Update: resourceFirestoreDocumentUpdate, + Delete: resourceFirestoreDocumentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirestoreDocumentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "collection": { + Type: schema.TypeString, + Required: true, + Description: `The collection ID, relative to database. For example: chatrooms or chatrooms/my-document/private-messages.`, + }, + "document_id": { + Type: schema.TypeString, + Required: true, + Description: `The client-assigned document ID to use for this document during creation.`, + }, + "fields": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `The document's [fields](https://cloud.google.com/firestore/docs/reference/rest/v1/projects.databases.documents) formated as a json string.`, + }, + "database": { + Type: schema.TypeString, + Optional: true, + Description: `The Firestore database id. Defaults to '"(default)"'.`, + Default: "(default)", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 format.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `A server defined name for this index. Format: +'projects/{{project_id}}/databases/{{database_id}}/documents/{{path}}/{{document_id}}'`, + }, + "path": { + Type: schema.TypeString, + Computed: true, + Description: `A relative path to the collection this document exists within`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Last update timestamp in RFC3339 format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceFirestoreDocumentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + fieldsProp, err := expandFirestoreDocumentFields(d.Get("fields"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fields"); !tpgresource.IsEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { + obj["fields"] = fieldsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{database}}/documents/{{collection}}?documentId={{document_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Document: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Document: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Document: %s", err) + } + if err := d.Set("name", flattenFirestoreDocumentName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Document %q: %#v", d.Id(), res) + + return resourceFirestoreDocumentRead(d, meta) +} + +func resourceFirestoreDocumentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Document: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("FirestoreDocument %q", d.Id())) + } + + res, err = resourceFirestoreDocumentDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing FirestoreDocument because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Document: %s", err) + } + + if err := d.Set("name", flattenFirestoreDocumentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Document: %s", err) + } + if err := d.Set("path", flattenFirestoreDocumentPath(res["path"], d, config)); err != nil { + return fmt.Errorf("Error reading Document: %s", err) + } + if err := d.Set("fields", flattenFirestoreDocumentFields(res["fields"], d, config)); err != nil { + return fmt.Errorf("Error reading Document: %s", err) + } + if err := d.Set("create_time", flattenFirestoreDocumentCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Document: %s", err) + } + if err := d.Set("update_time", flattenFirestoreDocumentUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Document: %s", err) + } + + return nil +} + +func resourceFirestoreDocumentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Document: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + fieldsProp, err := expandFirestoreDocumentFields(d.Get("fields"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fields"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { + obj["fields"] = fieldsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Document %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Document %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Document %q: %#v", d.Id(), res) + } + + return resourceFirestoreDocumentRead(d, meta) +} + +func resourceFirestoreDocumentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Document: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Document %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Document") + } + + log.Printf("[DEBUG] Finished deleting Document %q: %#v", d.Id(), res) + return nil +} + +func resourceFirestoreDocumentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + re := regexp.MustCompile("^projects/([^/]+)/databases/([^/]+)/documents/(.+)/([^/]+)$") + match := re.FindStringSubmatch(d.Get("name").(string)) + if len(match) > 0 { + if err := d.Set("project", match[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("database", match[2]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("collection", match[3]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("document_id", match[4]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } else { + return nil, fmt.Errorf("import did not match the regex ^projects/([^/]+)/databases/([^/]+)/documents/(.+)/([^/]+)$") + } + + return []*schema.ResourceData{d}, nil +} + +func flattenFirestoreDocumentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDocumentPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDocumentFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + b, err := json.Marshal(v) + if err != nil { + // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. + log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) + } + return string(b) +} + +func flattenFirestoreDocumentCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreDocumentUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandFirestoreDocumentFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + b := []byte(v.(string)) + if len(b) == 0 { + return nil, nil + } + m := make(map[string]interface{}) + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return m, nil +} + +func resourceFirestoreDocumentDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // We use this decoder to add the path field + if name, ok := res["name"]; ok { + re := regexp.MustCompile("^projects/[^/]+/databases/[^/]+/documents/(.+)$") + match := re.FindStringSubmatch(name.(string)) + if len(match) > 0 { + res["path"] = match[1] + } + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_field.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_field.go new file mode 100644 index 0000000000..dc981aeb26 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_field.go @@ -0,0 +1,629 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package firestore + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceFirestoreField() *schema.Resource { + return &schema.Resource{ + Create: resourceFirestoreFieldCreate, + Read: resourceFirestoreFieldRead, + Update: resourceFirestoreFieldUpdate, + Delete: resourceFirestoreFieldDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirestoreFieldImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "collection": { + Type: schema.TypeString, + Required: true, + Description: `The id of the collection group to configure.`, + }, + "field": { + Type: schema.TypeString, + Required: true, + Description: `The id of the field to configure.`, + }, + "database": { + Type: schema.TypeString, + Optional: true, + Description: `The Firestore database id. Defaults to '"(default)"'.`, + Default: "(default)", + }, + "index_config": { + Type: schema.TypeList, + Optional: true, + Description: `The single field index configuration for this field. +Creating an index configuration for this field will override any inherited configuration with the +indexes specified. Configuring the index configuration with an empty block disables all indexes on +the field.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "indexes": { + Type: schema.TypeSet, + Optional: true, + Description: `The indexes to configure on the field. Order or array contains must be specified.`, + Elem: firestoreFieldIndexConfigIndexesSchema(), + // Default schema.HashSchema is used. + }, + }, + }, + }, + "ttl_config": { + Type: schema.TypeList, + Optional: true, + Description: `If set, this field is configured for TTL deletion.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the TTL configuration.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of this field. Format: +'projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/fields/{{field}}'`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func firestoreFieldIndexConfigIndexesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "array_config": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"CONTAINS", ""}), + Description: `Indicates that this field supports operations on arrayValues. Only one of 'order' and 'arrayConfig' can +be specified. Possible values: ["CONTAINS"]`, + ExactlyOneOf: []string{}, + }, + "order": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ASCENDING", "DESCENDING", ""}), + Description: `Indicates that this field supports ordering by the specified order or comparing using =, <, <=, >, >=, !=. +Only one of 'order' and 'arrayConfig' can be specified. Possible values: ["ASCENDING", "DESCENDING"]`, + ExactlyOneOf: []string{}, + }, + "query_scope": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"COLLECTION", "COLLECTION_GROUP", ""}), + Description: `The scope at which a query is run. Collection scoped queries require you specify +the collection at query time. Collection group scope allows queries across all +collections with the same id. Default value: "COLLECTION" Possible values: ["COLLECTION", "COLLECTION_GROUP"]`, + Default: "COLLECTION", + }, + }, + } +} + +func resourceFirestoreFieldCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + indexConfigProp, err := expandFirestoreFieldIndexConfig(d.Get("index_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("index_config"); ok || !reflect.DeepEqual(v, indexConfigProp) { + obj["indexConfig"] = indexConfigProp + } + ttlConfigProp, err := expandFirestoreFieldTtlConfig(d.Get("ttl_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ttl_config"); ok || !reflect.DeepEqual(v, ttlConfigProp) { + obj["ttlConfig"] = ttlConfigProp + } + + obj, err = resourceFirestoreFieldEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/fields/{{field}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Field: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Field: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Field: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = FirestoreOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Field", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Field: %s", err) + } + + if err := d.Set("name", flattenFirestoreFieldName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Field %q: %#v", d.Id(), res) + + return resourceFirestoreFieldRead(d, meta) +} + +func resourceFirestoreFieldRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Field: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("FirestoreField %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Field: %s", err) + } + + if err := d.Set("name", flattenFirestoreFieldName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Field: %s", err) + } + if err := d.Set("index_config", flattenFirestoreFieldIndexConfig(res["indexConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Field: %s", err) + } + if err := d.Set("ttl_config", flattenFirestoreFieldTtlConfig(res["ttlConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Field: %s", err) + } + + return nil +} + +func resourceFirestoreFieldUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Field: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + indexConfigProp, err := expandFirestoreFieldIndexConfig(d.Get("index_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("index_config"); ok || !reflect.DeepEqual(v, indexConfigProp) { + obj["indexConfig"] = indexConfigProp + } + ttlConfigProp, err := expandFirestoreFieldTtlConfig(d.Get("ttl_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ttl_config"); ok || !reflect.DeepEqual(v, ttlConfigProp) { + obj["ttlConfig"] = ttlConfigProp + } + + obj, err = resourceFirestoreFieldEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Field %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("index_config") { + updateMask = append(updateMask, "indexConfig") + } + + if d.HasChange("ttl_config") { + updateMask = append(updateMask, "ttlConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Field %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Field %q: %#v", d.Id(), res) + } + + err = FirestoreOperationWaitTime( + config, res, project, "Updating Field", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceFirestoreFieldRead(d, meta) +} + +func resourceFirestoreFieldDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Firestore fields cannot be deleted, instead we clear the indexConfig and ttlConfig. + + log.Printf("[DEBUG] Deleting Field %q", d.Id()) + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for App: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}{{name}}") + if err != nil { + return err + } + + updateMask := []string{"indexConfig", "ttlConfig"} + + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // Clear fields by sending an empty PATCH request with appropriate update mask. + req := make(map[string]interface{}) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: req, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error deleting Field %q: %s", d.Id(), err) + } + + err = FirestoreOperationWaitTime( + config, res, project, "Deleting Field", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Field %q", d.Id()) + return nil +} + +func resourceFirestoreFieldImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) != 8 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}", + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("database", stringParts[3]); err != nil { + return nil, fmt.Errorf("Error setting database: %s", err) + } + if err := d.Set("collection", stringParts[5]); err != nil { + return nil, fmt.Errorf("Error setting collection: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenFirestoreFieldName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreFieldIndexConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + indexConfig := v.(map[string]interface{}) + + usesAncestorConfig := false + if indexConfig["usesAncestorConfig"] != nil { + usesAncestorConfig = indexConfig["usesAncestorConfig"].(bool) + } + + if usesAncestorConfig { + // The intent when uses_ancestor_config is no config. + return []interface{}{} + } + + if indexConfig["indexes"] == nil { + // No indexes, return an existing, but empty index config. + return [1]interface{}{nil} + } + + // For Single field indexes, we put the field configuration on the index to avoid forced nesting. + l := indexConfig["indexes"].([]interface{}) + transformed := make(map[string]interface{}) + transformedIndexes := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + fields := original["fields"].([]interface{}) + sfi := fields[0].(map[string]interface{}) + transformedIndexes = append(transformedIndexes, map[string]interface{}{ + "query_scope": original["queryScope"], + "order": sfi["order"], + "array_config": sfi["arrayConfig"], + }) + } + transformed["indexes"] = transformedIndexes + return []interface{}{transformed} +} + +func flattenFirestoreFieldTtlConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["state"] = + flattenFirestoreFieldTtlConfigState(original["state"], d, config) + return []interface{}{transformed} +} +func flattenFirestoreFieldTtlConfigState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandFirestoreFieldIndexConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + // We drop all output only fields as they are unnecessary. + if v == nil { + return nil, nil + } + + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + transformedIndexConfig := make(map[string]interface{}) + + // A configured, but empty, index_config block should be sent. This is how a user would remove all indexes. + if l[0] == nil { + return transformedIndexConfig, nil + } + + indexConfig := l[0].(map[string]interface{}) + + // For Single field indexes, we put the field configuration on the index to avoid forced nesting. + // Push all order/arrayConfig down into a single element fields list. + l = indexConfig["indexes"].(*schema.Set).List() + transformedIndexes := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformedField := make(map[string]interface{}) + + if val := reflect.ValueOf(original["query_scope"]); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryScope"] = original["query_scope"] + } + + if val := reflect.ValueOf(original["order"]); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformedField["order"] = original["order"] + } + + if val := reflect.ValueOf(original["array_config"]); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformedField["arrayConfig"] = original["array_config"] + } + transformed["fields"] = [1]interface{}{ + transformedField, + } + + transformedIndexes = append(transformedIndexes, transformed) + } + transformedIndexConfig["indexes"] = transformedIndexes + return transformedIndexConfig, nil +} + +/* + * Expands an empty terraform config into an empty object. + * + * Used to differentate a user specifying an empty block versus a null/unset block. + * + * This is unique from send_empty_value, which will send an explicit null value + * for empty configuration blocks. + */ +func expandFirestoreFieldTtlConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + // A set, but empty object. + return struct{}{}, nil +} + +func resourceFirestoreFieldEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + + // We've added project / database / collection / field as split fields of the name, but + // the API doesn't expect them. Make sure we remove them from any requests. + + delete(obj, "project") + delete(obj, "database") + delete(obj, "collection") + delete(obj, "field") + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_index.go new file mode 100644 index 0000000000..e2d94a12d0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/firestore/resource_firestore_index.go @@ -0,0 +1,524 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package firestore + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +/* + * FirestoreIndex api apends __name__ as an item to the + * end of the fields list if not present. We are suppressing + * this server generated field. + */ +func FirestoreIFieldsDiffSuppressFunc(k, old, new string, d tpgresource.TerraformResourceDataChange) bool { + kLength := "fields.#" + oldLength, newLength := d.GetChange(kLength) + oldInt, ok := oldLength.(int) + if !ok { + return false + } + newInt, ok := newLength.(int) + if !ok { + return false + } + + if oldInt == newInt+1 { + kold := fmt.Sprintf("fields.%v.field_path", oldInt-1) + knew := fmt.Sprintf("fields.%v.field_path", newInt-1) + + oldLastIndexName, _ := d.GetChange(kold) + _, newLastIndexName := d.GetChange(knew) + if oldLastIndexName == "__name__" && newLastIndexName != "__name__" { + oldBase := fmt.Sprintf("fields.%v", oldInt-1) + if strings.HasPrefix(k, oldBase) || k == kLength { + return true + } + } + } + return false +} + +func firestoreIFieldsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return FirestoreIFieldsDiffSuppressFunc(k, old, new, d) +} + +func ResourceFirestoreIndex() *schema.Resource { + return &schema.Resource{ + Create: resourceFirestoreIndexCreate, + Read: resourceFirestoreIndexRead, + Delete: resourceFirestoreIndexDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFirestoreIndexImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "collection": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The collection being indexed.`, + }, + "fields": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + DiffSuppressFunc: firestoreIFieldsDiffSuppress, + Description: `The fields supported by this index. The last field entry is always for +the field path '__name__'. If, on creation, '__name__' was not +specified as the last field, it will be added automatically with the +same direction as that of the last field defined. If the final field +in a composite index is not directional, the '__name__' will be +ordered '"ASCENDING"' (unless explicitly specified otherwise).`, + MinItems: 2, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "array_config": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"CONTAINS", ""}), + Description: `Indicates that this field supports operations on arrayValues. Only one of 'order' and 'arrayConfig' can +be specified. Possible values: ["CONTAINS"]`, + }, + "field_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the field.`, + }, + "order": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ASCENDING", "DESCENDING", ""}), + Description: `Indicates that this field supports ordering by the specified order or comparing using =, <, <=, >, >=. +Only one of 'order' and 'arrayConfig' can be specified. Possible values: ["ASCENDING", "DESCENDING"]`, + }, + }, + }, + }, + "database": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Firestore database id. Defaults to '"(default)"'.`, + Default: "(default)", + }, + "query_scope": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"COLLECTION", "COLLECTION_GROUP", ""}), + Description: `The scope at which a query is run. Default value: "COLLECTION" Possible values: ["COLLECTION", "COLLECTION_GROUP"]`, + Default: "COLLECTION", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `A server defined name for this index. Format: +'projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}'`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceFirestoreIndexCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + databaseProp, err := expandFirestoreIndexDatabase(d.Get("database"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database"); !tpgresource.IsEmptyValue(reflect.ValueOf(databaseProp)) && (ok || !reflect.DeepEqual(v, databaseProp)) { + obj["database"] = databaseProp + } + collectionProp, err := expandFirestoreIndexCollection(d.Get("collection"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("collection"); !tpgresource.IsEmptyValue(reflect.ValueOf(collectionProp)) && (ok || !reflect.DeepEqual(v, collectionProp)) { + obj["collection"] = collectionProp + } + queryScopeProp, err := expandFirestoreIndexQueryScope(d.Get("query_scope"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("query_scope"); !tpgresource.IsEmptyValue(reflect.ValueOf(queryScopeProp)) && (ok || !reflect.DeepEqual(v, queryScopeProp)) { + obj["queryScope"] = queryScopeProp + } + fieldsProp, err := expandFirestoreIndexFields(d.Get("fields"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fields"); !tpgresource.IsEmptyValue(reflect.ValueOf(fieldsProp)) && (ok || !reflect.DeepEqual(v, fieldsProp)) { + obj["fields"] = fieldsProp + } + + obj, err = resourceFirestoreIndexEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Index: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Index: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = FirestoreOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Index", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Index: %s", err) + } + + if err := d.Set("name", flattenFirestoreIndexName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // The operation for this resource contains the generated name that we need + // in order to perform a READ. + metadata := res["metadata"].(map[string]interface{}) + name := metadata["index"].(string) + log.Printf("[DEBUG] Setting Index name, id to %s", name) + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name) + + log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) + + return resourceFirestoreIndexRead(d, meta) +} + +func resourceFirestoreIndexRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("FirestoreIndex %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + + if err := d.Set("name", flattenFirestoreIndexName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("query_scope", flattenFirestoreIndexQueryScope(res["queryScope"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("fields", flattenFirestoreIndexFields(res["fields"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + + return nil +} + +func resourceFirestoreIndexDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{FirestoreBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Index %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Index") + } + + err = FirestoreOperationWaitTime( + config, res, project, "Deleting Index", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) + return nil +} + +func resourceFirestoreIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) != 8 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}", + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("database", stringParts[3]); err != nil { + return nil, fmt.Errorf("Error setting database: %s", err) + } + if err := d.Set("collection", stringParts[5]); err != nil { + return nil, fmt.Errorf("Error setting collection: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenFirestoreIndexName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreIndexQueryScope(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreIndexFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "field_path": flattenFirestoreIndexFieldsFieldPath(original["fieldPath"], d, config), + "order": flattenFirestoreIndexFieldsOrder(original["order"], d, config), + "array_config": flattenFirestoreIndexFieldsArrayConfig(original["arrayConfig"], d, config), + }) + } + return transformed +} +func flattenFirestoreIndexFieldsFieldPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreIndexFieldsOrder(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenFirestoreIndexFieldsArrayConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandFirestoreIndexDatabase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreIndexCollection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreIndexQueryScope(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreIndexFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFieldPath, err := expandFirestoreIndexFieldsFieldPath(original["field_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFieldPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fieldPath"] = transformedFieldPath + } + + transformedOrder, err := expandFirestoreIndexFieldsOrder(original["order"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrder); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["order"] = transformedOrder + } + + transformedArrayConfig, err := expandFirestoreIndexFieldsArrayConfig(original["array_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArrayConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["arrayConfig"] = transformedArrayConfig + } + + req = append(req, transformed) + } + return req, nil +} + +func expandFirestoreIndexFieldsFieldPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreIndexFieldsOrder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirestoreIndexFieldsArrayConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceFirestoreIndexEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // We've added project / database / collection as split fields of the name, but + // the API doesn't expect them. Make sure we remove them from any requests. + + delete(obj, "project") + delete(obj, "database") + delete(obj, "collection") + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/data_source_google_game_services_game_server_deployment_rollout.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/data_source_google_game_services_game_server_deployment_rollout.go new file mode 100644 index 0000000000..6d5d7f59d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/data_source_google_game_services_game_server_deployment_rollout.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package gameservices + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGameServicesGameServerDeploymentRollout() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceGameServicesGameServerDeploymentRollout().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "deployment_id") + + return &schema.Resource{ + Read: dataSourceGameServicesGameServerDeploymentRolloutRead, + Schema: dsSchema, + } +} + +func dataSourceGameServicesGameServerDeploymentRolloutRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + d.SetId(id) + + return resourceGameServicesGameServerDeploymentRolloutRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/game_services_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/game_services_operation.go new file mode 100644 index 0000000000..58e5daa52c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/game_services_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type GameServicesOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *GameServicesOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.GameServicesBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createGameServicesWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*GameServicesOperationWaiter, error) { + w := &GameServicesOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func GameServicesOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createGameServicesWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func GameServicesOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createGameServicesWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_cluster.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_cluster.go new file mode 100644 index 0000000000..48d95365c5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_cluster.go @@ -0,0 +1,578 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func suppressSuffixDiff(_, old, new string, _ *schema.ResourceData) bool { + if strings.HasSuffix(old, new) { + log.Printf("[INFO] suppressing diff as %s is the same as the full path of %s", new, old) + return true + } + + return false +} + +func ResourceGameServicesGameServerCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceGameServicesGameServerClusterCreate, + Read: resourceGameServicesGameServerClusterRead, + Update: resourceGameServicesGameServerClusterUpdate, + Delete: resourceGameServicesGameServerClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGameServicesGameServerClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. The resource name of the game server cluster`, + }, + "connection_info": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Game server cluster connection information. This information is used to +manage game server clusters.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gke_cluster_reference": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Reference of the GKE cluster where the game servers are installed.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressSuffixDiff, + Description: `The full or partial name of a GKE cluster, using one of the following +forms: + +* 'projects/{project_id}/locations/{location}/clusters/{cluster_id}' +* 'locations/{location}/clusters/{cluster_id}' +* '{cluster_id}' + +If project and location are not specified, the project and location of the +GameServerCluster resource are used to generate the full name of the +GKE cluster.`, + }, + }, + }, + }, + "namespace": { + Type: schema.TypeString, + Required: true, + Description: `Namespace designated on the game server cluster where the game server +instances will be created. The namespace existence will be validated +during creation.`, + }, + }, + }, + }, + "realm_id": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The realm id of the game server realm.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Human readable description of the cluster.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels associated with this game server cluster. Each label is a +key-value pair.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `Location of the Cluster.`, + Default: "global", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource id of the game server cluster, eg: + +'projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}'. +For example, + +'projects/my-project/locations/{location}/realms/zanzibar/gameServerClusters/my-onprem-cluster'.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGameServicesGameServerClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandGameServicesGameServerClusterLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + connectionInfoProp, err := expandGameServicesGameServerClusterConnectionInfo(d.Get("connection_info"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connection_info"); !tpgresource.IsEmptyValue(reflect.ValueOf(connectionInfoProp)) && (ok || !reflect.DeepEqual(v, connectionInfoProp)) { + obj["connectionInfo"] = connectionInfoProp + } + descriptionProp, err := expandGameServicesGameServerClusterDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters?gameServerClusterId={{cluster_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GameServerCluster: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerCluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GameServerCluster: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GameServicesOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating GameServerCluster", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create GameServerCluster: %s", err) + } + + if err := d.Set("name", flattenGameServicesGameServerClusterName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating GameServerCluster %q: %#v", d.Id(), res) + + return resourceGameServicesGameServerClusterRead(d, meta) +} + +func resourceGameServicesGameServerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerCluster: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GameServicesGameServerCluster %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GameServerCluster: %s", err) + } + + if err := d.Set("name", flattenGameServicesGameServerClusterName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerCluster: %s", err) + } + if err := d.Set("labels", flattenGameServicesGameServerClusterLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerCluster: %s", err) + } + if err := d.Set("connection_info", flattenGameServicesGameServerClusterConnectionInfo(res["connectionInfo"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerCluster: %s", err) + } + if err := d.Set("description", flattenGameServicesGameServerClusterDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerCluster: %s", err) + } + + return nil +} + +func resourceGameServicesGameServerClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerCluster: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandGameServicesGameServerClusterLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandGameServicesGameServerClusterDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating GameServerCluster %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating GameServerCluster %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GameServerCluster %q: %#v", d.Id(), res) + } + + err = GameServicesOperationWaitTime( + config, res, project, "Updating GameServerCluster", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGameServicesGameServerClusterRead(d, meta) +} + +func resourceGameServicesGameServerClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerCluster: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GameServerCluster %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GameServerCluster") + } + + err = GameServicesOperationWaitTime( + config, res, project, "Deleting GameServerCluster", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GameServerCluster %q: %#v", d.Id(), res) + return nil +} + +func resourceGameServicesGameServerClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/realms/(?P[^/]+)/gameServerClusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGameServicesGameServerClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerClusterLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerClusterConnectionInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["gke_cluster_reference"] = + flattenGameServicesGameServerClusterConnectionInfoGkeClusterReference(original["gkeClusterReference"], d, config) + transformed["namespace"] = + flattenGameServicesGameServerClusterConnectionInfoNamespace(original["namespace"], d, config) + return []interface{}{transformed} +} +func flattenGameServicesGameServerClusterConnectionInfoGkeClusterReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cluster"] = + flattenGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(original["cluster"], d, config) + return []interface{}{transformed} +} +func flattenGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerClusterConnectionInfoNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerClusterDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGameServicesGameServerClusterLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandGameServicesGameServerClusterConnectionInfo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGkeClusterReference, err := expandGameServicesGameServerClusterConnectionInfoGkeClusterReference(original["gke_cluster_reference"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGkeClusterReference); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gkeClusterReference"] = transformedGkeClusterReference + } + + transformedNamespace, err := expandGameServicesGameServerClusterConnectionInfoNamespace(original["namespace"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespace"] = transformedNamespace + } + + return transformed, nil +} + +func expandGameServicesGameServerClusterConnectionInfoGkeClusterReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCluster, err := expandGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(original["cluster"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCluster); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cluster"] = transformedCluster + } + + return transformed, nil +} + +func expandGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGameServicesGameServerClusterConnectionInfoNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGameServicesGameServerClusterDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_config.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_config.go index c6db9b1b88..598e6729df 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_game_services_game_server_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package gameservices import ( "fmt" @@ -21,6 +24,9 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceGameServicesGameServerConfig() *schema.Resource { @@ -49,7 +55,7 @@ func ResourceGameServicesGameServerConfig() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `A unique id for the deployment.`, }, "fleet_configs": { @@ -210,8 +216,8 @@ any of the selector entries.`, } func resourceGameServicesGameServerConfigCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -220,29 +226,29 @@ func resourceGameServicesGameServerConfigCreate(d *schema.ResourceData, meta int descriptionProp, err := expandGameServicesGameServerConfigDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandGameServicesGameServerConfigLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } fleetConfigsProp, err := expandGameServicesGameServerConfigFleetConfigs(d.Get("fleet_configs"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("fleet_configs"); !isEmptyValue(reflect.ValueOf(fleetConfigsProp)) && (ok || !reflect.DeepEqual(v, fleetConfigsProp)) { + } else if v, ok := d.GetOkExists("fleet_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(fleetConfigsProp)) && (ok || !reflect.DeepEqual(v, fleetConfigsProp)) { obj["fleetConfigs"] = fleetConfigsProp } scalingConfigsProp, err := expandGameServicesGameServerConfigScalingConfigs(d.Get("scaling_configs"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("scaling_configs"); !isEmptyValue(reflect.ValueOf(scalingConfigsProp)) && (ok || !reflect.DeepEqual(v, scalingConfigsProp)) { + } else if v, ok := d.GetOkExists("scaling_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(scalingConfigsProp)) && (ok || !reflect.DeepEqual(v, scalingConfigsProp)) { obj["scalingConfigs"] = scalingConfigsProp } - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs?configId={{config_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs?configId={{config_id}}") if err != nil { return err } @@ -250,24 +256,32 @@ func resourceGameServicesGameServerConfigCreate(d *schema.ResourceData, meta int log.Printf("[DEBUG] Creating new GameServerConfig: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for GameServerConfig: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating GameServerConfig: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -291,7 +305,7 @@ func resourceGameServicesGameServerConfigCreate(d *schema.ResourceData, meta int } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -303,33 +317,39 @@ func resourceGameServicesGameServerConfigCreate(d *schema.ResourceData, meta int } func resourceGameServicesGameServerConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for GameServerConfig: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("GameServicesGameServerConfig %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GameServicesGameServerConfig %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -356,21 +376,21 @@ func resourceGameServicesGameServerConfigRead(d *schema.ResourceData, meta inter } func resourceGameServicesGameServerConfigDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for GameServerConfig: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") if err != nil { return err } @@ -379,13 +399,21 @@ func resourceGameServicesGameServerConfigDelete(d *schema.ResourceData, meta int log.Printf("[DEBUG] Deleting GameServerConfig %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "GameServerConfig") + return transport_tpg.HandleNotFoundError(err, d, "GameServerConfig") } err = GameServicesOperationWaitTime( @@ -401,8 +429,8 @@ func resourceGameServicesGameServerConfigDelete(d *schema.ResourceData, meta int } func resourceGameServicesGameServerConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/gameServerDeployments/(?P[^/]+)/configs/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -411,7 +439,7 @@ func resourceGameServicesGameServerConfigImport(d *schema.ResourceData, meta int } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -420,19 +448,19 @@ func resourceGameServicesGameServerConfigImport(d *schema.ResourceData, meta int return []*schema.ResourceData{d}, nil } -func flattenGameServicesGameServerConfigName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigFleetConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigFleetConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -451,15 +479,15 @@ func flattenGameServicesGameServerConfigFleetConfigs(v interface{}, d *schema.Re } return transformed } -func flattenGameServicesGameServerConfigFleetConfigsFleetSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigFleetConfigsFleetSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigFleetConfigsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigFleetConfigsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigScalingConfigs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -480,15 +508,15 @@ func flattenGameServicesGameServerConfigScalingConfigs(v interface{}, d *schema. } return transformed } -func flattenGameServicesGameServerConfigScalingConfigsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -506,11 +534,11 @@ func flattenGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d } return transformed } -func flattenGameServicesGameServerConfigScalingConfigsSelectorsLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsSelectorsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -531,27 +559,27 @@ func flattenGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d } return transformed } -func flattenGameServicesGameServerConfigScalingConfigsSchedulesStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsSchedulesStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigScalingConfigsSchedulesEndTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsSchedulesEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandGameServicesGameServerConfigDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandGameServicesGameServerConfigLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandGameServicesGameServerConfigLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -562,7 +590,7 @@ func expandGameServicesGameServerConfigLabels(v interface{}, d TerraformResource return m, nil } -func expandGameServicesGameServerConfigFleetConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigFleetConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -575,14 +603,14 @@ func expandGameServicesGameServerConfigFleetConfigs(v interface{}, d TerraformRe transformedFleetSpec, err := expandGameServicesGameServerConfigFleetConfigsFleetSpec(original["fleet_spec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFleetSpec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFleetSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fleetSpec"] = transformedFleetSpec } transformedName, err := expandGameServicesGameServerConfigFleetConfigsName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } @@ -591,15 +619,15 @@ func expandGameServicesGameServerConfigFleetConfigs(v interface{}, d TerraformRe return req, nil } -func expandGameServicesGameServerConfigFleetConfigsFleetSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigFleetConfigsFleetSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandGameServicesGameServerConfigFleetConfigsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigFleetConfigsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandGameServicesGameServerConfigScalingConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -612,28 +640,28 @@ func expandGameServicesGameServerConfigScalingConfigs(v interface{}, d Terraform transformedName, err := expandGameServicesGameServerConfigScalingConfigsName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedFleetAutoscalerSpec, err := expandGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(original["fleet_autoscaler_spec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFleetAutoscalerSpec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFleetAutoscalerSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fleetAutoscalerSpec"] = transformedFleetAutoscalerSpec } transformedSelectors, err := expandGameServicesGameServerConfigScalingConfigsSelectors(original["selectors"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSelectors); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSelectors); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["selectors"] = transformedSelectors } transformedSchedules, err := expandGameServicesGameServerConfigScalingConfigsSchedules(original["schedules"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSchedules); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSchedules); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["schedules"] = transformedSchedules } @@ -642,15 +670,15 @@ func expandGameServicesGameServerConfigScalingConfigs(v interface{}, d Terraform return req, nil } -func expandGameServicesGameServerConfigScalingConfigsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -663,7 +691,7 @@ func expandGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d transformedLabels, err := expandGameServicesGameServerConfigScalingConfigsSelectorsLabels(original["labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["labels"] = transformedLabels } @@ -672,7 +700,7 @@ func expandGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d return req, nil } -func expandGameServicesGameServerConfigScalingConfigsSelectorsLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandGameServicesGameServerConfigScalingConfigsSelectorsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -683,7 +711,7 @@ func expandGameServicesGameServerConfigScalingConfigsSelectorsLabels(v interface return m, nil } -func expandGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -696,28 +724,28 @@ func expandGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d transformedStartTime, err := expandGameServicesGameServerConfigScalingConfigsSchedulesStartTime(original["start_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["startTime"] = transformedStartTime } transformedEndTime, err := expandGameServicesGameServerConfigScalingConfigsSchedulesEndTime(original["end_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["endTime"] = transformedEndTime } transformedCronJobDuration, err := expandGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(original["cron_job_duration"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCronJobDuration); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCronJobDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cronJobDuration"] = transformedCronJobDuration } transformedCronSpec, err := expandGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(original["cron_spec"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCronSpec); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCronSpec); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cronSpec"] = transformedCronSpec } @@ -726,18 +754,18 @@ func expandGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d return req, nil } -func expandGameServicesGameServerConfigScalingConfigsSchedulesStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigsSchedulesStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandGameServicesGameServerConfigScalingConfigsSchedulesEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigsSchedulesEndTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_config_sweeper.go new file mode 100644 index 0000000000..e9964a4cb0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_config_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("GameServicesGameServerConfig", testSweepGameServicesGameServerConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepGameServicesGameServerConfig(region string) error { + resourceName := "GameServicesGameServerConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://gameservices.googleapis.com/v1/projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["gameServerConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://gameservices.googleapis.com/v1/projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment.go new file mode 100644 index 0000000000..5f328bc2aa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment.go @@ -0,0 +1,419 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceGameServicesGameServerDeployment() *schema.Resource { + return &schema.Resource{ + Create: resourceGameServicesGameServerDeploymentCreate, + Read: resourceGameServicesGameServerDeploymentRead, + Update: resourceGameServicesGameServerDeploymentUpdate, + Delete: resourceGameServicesGameServerDeploymentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGameServicesGameServerDeploymentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "deployment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A unique id for the deployment.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Human readable description of the game server deployment.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels associated with this game server deployment. Each label is a +key-value pair.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `Location of the Deployment.`, + Default: "global", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource id of the game server deployment, eg: + +'projects/{project_id}/locations/{location}/gameServerDeployments/{deployment_id}'. +For example, + +'projects/my-project/locations/{location}/gameServerDeployments/my-deployment'.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGameServicesGameServerDeploymentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandGameServicesGameServerDeploymentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandGameServicesGameServerDeploymentLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments?deploymentId={{deployment_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GameServerDeployment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GameServerDeployment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GameServicesOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating GameServerDeployment", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create GameServerDeployment: %s", err) + } + + if err := d.Set("name", flattenGameServicesGameServerDeploymentName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating GameServerDeployment %q: %#v", d.Id(), res) + + return resourceGameServicesGameServerDeploymentRead(d, meta) +} + +func resourceGameServicesGameServerDeploymentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GameServicesGameServerDeployment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GameServerDeployment: %s", err) + } + + if err := d.Set("name", flattenGameServicesGameServerDeploymentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerDeployment: %s", err) + } + if err := d.Set("description", flattenGameServicesGameServerDeploymentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerDeployment: %s", err) + } + if err := d.Set("labels", flattenGameServicesGameServerDeploymentLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerDeployment: %s", err) + } + + return nil +} + +func resourceGameServicesGameServerDeploymentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandGameServicesGameServerDeploymentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandGameServicesGameServerDeploymentLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating GameServerDeployment %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating GameServerDeployment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GameServerDeployment %q: %#v", d.Id(), res) + } + + err = GameServicesOperationWaitTime( + config, res, project, "Updating GameServerDeployment", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGameServicesGameServerDeploymentRead(d, meta) +} + +func resourceGameServicesGameServerDeploymentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GameServerDeployment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GameServerDeployment") + } + + err = GameServicesOperationWaitTime( + config, res, project, "Deleting GameServerDeployment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GameServerDeployment %q: %#v", d.Id(), res) + return nil +} + +func resourceGameServicesGameServerDeploymentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/gameServerDeployments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGameServicesGameServerDeploymentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerDeploymentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerDeploymentLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGameServicesGameServerDeploymentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGameServicesGameServerDeploymentLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_rollout.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_rollout.go new file mode 100644 index 0000000000..5e5ba27152 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_rollout.go @@ -0,0 +1,453 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceGameServicesGameServerDeploymentRollout() *schema.Resource { + return &schema.Resource{ + Create: resourceGameServicesGameServerDeploymentRolloutCreate, + Read: resourceGameServicesGameServerDeploymentRolloutRead, + Update: resourceGameServicesGameServerDeploymentRolloutUpdate, + Delete: resourceGameServicesGameServerDeploymentRolloutDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGameServicesGameServerDeploymentRolloutImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "default_game_server_config": { + Type: schema.TypeString, + Required: true, + Description: `This field points to the game server config that is +applied by default to all realms and clusters. For example, + +'projects/my-project/locations/global/gameServerDeployments/my-game/configs/my-config'.`, + }, + "deployment_id": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The deployment to rollout the new config to. Only 1 rollout must be associated with each deployment.`, + }, + "game_server_config_overrides": { + Type: schema.TypeList, + Optional: true, + Description: `The game_server_config_overrides contains the per game server config +overrides. The overrides are processed in the order they are listed. As +soon as a match is found for a cluster, the rest of the list is not +processed.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_version": { + Type: schema.TypeString, + Optional: true, + Description: `Version of the configuration.`, + }, + "realms_selector": { + Type: schema.TypeList, + Optional: true, + Description: `Selection by realms.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "realms": { + Type: schema.TypeList, + Optional: true, + Description: `List of realms to match against.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource id of the game server deployment + +eg: 'projects/my-project/locations/global/gameServerDeployments/my-deployment/rollout'.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGameServicesGameServerDeploymentRolloutCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Creating GameServerDeploymentRollout %q: ", d.Id()) + + err = resourceGameServicesGameServerDeploymentRolloutUpdate(d, meta) + if err != nil { + d.SetId("") + return fmt.Errorf("Error trying to create GameServerDeploymentRollout: %s", err) + } + + return nil +} + +func resourceGameServicesGameServerDeploymentRolloutRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GameServicesGameServerDeploymentRollout %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) + } + + if err := d.Set("name", flattenGameServicesGameServerDeploymentRolloutName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) + } + if err := d.Set("default_game_server_config", flattenGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(res["defaultGameServerConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) + } + if err := d.Set("game_server_config_overrides", flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(res["gameServerConfigOverrides"], d, config)); err != nil { + return fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) + } + + return nil +} + +func resourceGameServicesGameServerDeploymentRolloutUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + defaultGameServerConfigProp, err := expandGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(d.Get("default_game_server_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_game_server_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultGameServerConfigProp)) { + obj["defaultGameServerConfig"] = defaultGameServerConfigProp + } + gameServerConfigOverridesProp, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(d.Get("game_server_config_overrides"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("game_server_config_overrides"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, gameServerConfigOverridesProp)) { + obj["gameServerConfigOverrides"] = gameServerConfigOverridesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating GameServerDeploymentRollout %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("default_game_server_config") { + updateMask = append(updateMask, "defaultGameServerConfig") + } + + if d.HasChange("game_server_config_overrides") { + updateMask = append(updateMask, "gameServerConfigOverrides") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating GameServerDeploymentRollout %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GameServerDeploymentRollout %q: %#v", d.Id(), res) + } + + err = GameServicesOperationWaitTime( + config, res, project, "Updating GameServerDeploymentRollout", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGameServicesGameServerDeploymentRolloutRead(d, meta) +} + +func resourceGameServicesGameServerDeploymentRolloutDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout?updateMask=defaultGameServerConfig") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GameServerDeploymentRollout %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GameServerDeploymentRollout") + } + + err = GameServicesOperationWaitTime( + config, res, project, "Deleting GameServerDeploymentRollout", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GameServerDeploymentRollout %q: %#v", d.Id(), res) + return nil +} + +func resourceGameServicesGameServerDeploymentRolloutImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/gameServerDeployments/(?P[^/]+)/rollout", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGameServicesGameServerDeploymentRolloutName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "realms_selector": flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(original["realmsSelector"], d, config), + "config_version": flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(original["configVersion"], d, config), + }) + } + return transformed +} +func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["realms"] = + flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(original["realms"], d, config) + return []interface{}{transformed} +} +func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRealmsSelector, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(original["realms_selector"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRealmsSelector); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["realmsSelector"] = transformedRealmsSelector + } + + transformedConfigVersion, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(original["config_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfigVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["configVersion"] = transformedConfigVersion + } + + req = append(req, transformed) + } + return req, nil +} + +func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRealms, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(original["realms"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRealms); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["realms"] = transformedRealms + } + + return transformed, nil +} + +func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_rollout_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_rollout_sweeper.go new file mode 100644 index 0000000000..623219561d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_rollout_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("GameServicesGameServerDeploymentRollout", testSweepGameServicesGameServerDeploymentRollout) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepGameServicesGameServerDeploymentRollout(region string) error { + resourceName := "GameServicesGameServerDeploymentRollout" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://gameservices.googleapis.com/v1/projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["gameServerDeploymentRollouts"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://gameservices.googleapis.com/v1/projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout?updateMask=defaultGameServerConfig" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_sweeper.go new file mode 100644 index 0000000000..f311dfc4f9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_game_server_deployment_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("GameServicesGameServerDeployment", testSweepGameServicesGameServerDeployment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepGameServicesGameServerDeployment(region string) error { + resourceName := "GameServicesGameServerDeployment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://gameservices.googleapis.com/v1/projects/{{project}}/locations/{{location}}/gameServerDeployments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["gameServerDeployments"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://gameservices.googleapis.com/v1/projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_realm.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_realm.go new file mode 100644 index 0000000000..6bdbde246c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_realm.go @@ -0,0 +1,461 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceGameServicesRealm() *schema.Resource { + return &schema.Resource{ + Create: resourceGameServicesRealmCreate, + Read: resourceGameServicesRealmRead, + Update: resourceGameServicesRealmUpdate, + Delete: resourceGameServicesRealmDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGameServicesRealmImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "realm_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `GCP region of the Realm.`, + }, + "time_zone": { + Type: schema.TypeString, + Required: true, + Description: `Required. Time zone where all realm-specific policies are evaluated. The value of +this field must be from the IANA time zone database: +https://www.iana.org/time-zones.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Human readable description of the realm.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels associated with this realm. Each label is a key-value pair.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `Location of the Realm.`, + Default: "global", + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `ETag of the resource.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource id of the realm, of the form: +'projects/{project_id}/locations/{location}/realms/{realm_id}'. For +example, 'projects/my-project/locations/{location}/realms/my-realm'.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGameServicesRealmCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandGameServicesRealmLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + timeZoneProp, err := expandGameServicesRealmTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + descriptionProp, err := expandGameServicesRealmDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms?realmId={{realm_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Realm: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Realm: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Realm: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GameServicesOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Realm", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Realm: %s", err) + } + + if err := d.Set("name", flattenGameServicesRealmName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Realm %q: %#v", d.Id(), res) + + return resourceGameServicesRealmRead(d, meta) +} + +func resourceGameServicesRealmRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Realm: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GameServicesRealm %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Realm: %s", err) + } + + if err := d.Set("name", flattenGameServicesRealmName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Realm: %s", err) + } + if err := d.Set("labels", flattenGameServicesRealmLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Realm: %s", err) + } + if err := d.Set("time_zone", flattenGameServicesRealmTimeZone(res["timeZone"], d, config)); err != nil { + return fmt.Errorf("Error reading Realm: %s", err) + } + if err := d.Set("etag", flattenGameServicesRealmEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Realm: %s", err) + } + if err := d.Set("description", flattenGameServicesRealmDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Realm: %s", err) + } + + return nil +} + +func resourceGameServicesRealmUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Realm: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandGameServicesRealmLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + timeZoneProp, err := expandGameServicesRealmTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + descriptionProp, err := expandGameServicesRealmDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Realm %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("time_zone") { + updateMask = append(updateMask, "timeZone") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Realm %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Realm %q: %#v", d.Id(), res) + } + + err = GameServicesOperationWaitTime( + config, res, project, "Updating Realm", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGameServicesRealmRead(d, meta) +} + +func resourceGameServicesRealmDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Realm: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Realm %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Realm") + } + + err = GameServicesOperationWaitTime( + config, res, project, "Deleting Realm", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Realm %q: %#v", d.Id(), res) + return nil +} + +func resourceGameServicesRealmImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/realms/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGameServicesRealmName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesRealmLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesRealmTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesRealmEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGameServicesRealmDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGameServicesRealmLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandGameServicesRealmTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGameServicesRealmDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_realm_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_realm_sweeper.go new file mode 100644 index 0000000000..67470bc17e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gameservices/resource_game_services_realm_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gameservices + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("GameServicesRealm", testSweepGameServicesRealm) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepGameServicesRealm(region string) error { + resourceName := "GameServicesRealm" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://gameservices.googleapis.com/v1/projects/{{project}}/locations/{{location}}/realms", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["realms"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://gameservices.googleapis.com/v1/projects/{{project}}/locations/{{location}}/realms/{{realm_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/gke_backup_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/gke_backup_operation.go new file mode 100644 index 0000000000..13f04cf2ba --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/gke_backup_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkebackup + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type GKEBackupOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *GKEBackupOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.GKEBackupBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createGKEBackupWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*GKEBackupOperationWaiter, error) { + w := &GKEBackupOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func GKEBackupOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createGKEBackupWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func GKEBackupOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createGKEBackupWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/iam_gke_backup_backup_plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/iam_gke_backup_backup_plan.go new file mode 100644 index 0000000000..542d489dda --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/iam_gke_backup_backup_plan.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkebackup + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var GKEBackupBackupPlanIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type GKEBackupBackupPlanIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func GKEBackupBackupPlanIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/backupPlans/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEBackupBackupPlanIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func GKEBackupBackupPlanIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/backupPlans/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEBackupBackupPlanIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *GKEBackupBackupPlanIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyBackupPlanUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *GKEBackupBackupPlanIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyBackupPlanUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *GKEBackupBackupPlanIamUpdater) qualifyBackupPlanUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{GKEBackupBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/backupPlans/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *GKEBackupBackupPlanIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/backupPlans/%s", u.project, u.location, u.name) +} + +func (u *GKEBackupBackupPlanIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-gkebackup-backupplan-%s", u.GetResourceId()) +} + +func (u *GKEBackupBackupPlanIamUpdater) DescribeResource() string { + return fmt.Sprintf("gkebackup backupplan %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/resource_gke_backup_backup_plan.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/resource_gke_backup_backup_plan.go new file mode 100644 index 0000000000..62b4659220 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkebackup/resource_gke_backup_backup_plan.go @@ -0,0 +1,1213 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkebackup + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceGKEBackupBackupPlan() *schema.Resource { + return &schema.Resource{ + Create: resourceGKEBackupBackupPlanCreate, + Read: resourceGKEBackupBackupPlanRead, + Update: resourceGKEBackupBackupPlanUpdate, + Delete: resourceGKEBackupBackupPlanDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGKEBackupBackupPlanImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The source cluster from which Backups will be created via this BackupPlan.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The region of the Backup Plan.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full name of the BackupPlan Resource.`, + }, + "backup_config": { + Type: schema.TypeList, + Optional: true, + Description: `Defines the configuration of Backups created via this BackupPlan.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all_namespaces": { + Type: schema.TypeBool, + Optional: true, + Description: `If True, include all namespaced resources.`, + ExactlyOneOf: []string{"backup_config.0.all_namespaces", "backup_config.0.selected_namespaces", "backup_config.0.selected_applications"}, + }, + "encryption_key": { + Type: schema.TypeList, + Optional: true, + Description: `This defines a customer managed encryption key that will be used to encrypt the "config" +portion (the Kubernetes resources) of Backups created via this plan.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gcp_kms_encryption_key": { + Type: schema.TypeString, + Required: true, + Description: `Google Cloud KMS encryption key. Format: projects/*/locations/*/keyRings/*/cryptoKeys/*`, + }, + }, + }, + }, + "include_secrets": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `This flag specifies whether Kubernetes Secret resources should be included +when they fall into the scope of Backups.`, + }, + "include_volume_data": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `This flag specifies whether volume data should be backed up when PVCs are +included in the scope of a Backup.`, + }, + "selected_applications": { + Type: schema.TypeList, + Optional: true, + Description: `A list of namespaced Kubernetes Resources.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespaced_names": { + Type: schema.TypeList, + Required: true, + Description: `A list of namespaced Kubernetes resources.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of a Kubernetes Resource.`, + }, + "namespace": { + Type: schema.TypeString, + Required: true, + Description: `The namespace of a Kubernetes Resource.`, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"backup_config.0.all_namespaces", "backup_config.0.selected_namespaces", "backup_config.0.selected_applications"}, + }, + "selected_namespaces": { + Type: schema.TypeList, + Optional: true, + Description: `If set, include just the resources in the listed namespaces.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespaces": { + Type: schema.TypeList, + Required: true, + Description: `A list of Kubernetes Namespaces.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + ExactlyOneOf: []string{"backup_config.0.all_namespaces", "backup_config.0.selected_namespaces", "backup_config.0.selected_applications"}, + }, + }, + }, + }, + "backup_schedule": { + Type: schema.TypeList, + Optional: true, + Description: `Defines a schedule for automatic Backup creation via this BackupPlan.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cron_schedule": { + Type: schema.TypeString, + Optional: true, + Description: `A standard cron string that defines a repeating schedule for +creating Backups via this BackupPlan. +If this is defined, then backupRetainDays must also be defined.`, + }, + "paused": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `This flag denotes whether automatic Backup creation is paused for this BackupPlan.`, + }, + }, + }, + }, + "deactivated": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `This flag indicates whether this BackupPlan has been deactivated. +Setting this field to True locks the BackupPlan such that no further updates will be allowed +(except deletes), including the deactivated field itself. It also prevents any new Backups +from being created via this BackupPlan (including scheduled Backups).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `User specified descriptive string for this BackupPlan.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Description: A set of custom labels supplied by the user. +A list of key->value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "retention_policy": { + Type: schema.TypeList, + Optional: true, + Description: `RetentionPolicy governs lifecycle of Backups created under this plan.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backup_delete_lock_days": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Minimum age for a Backup created via this BackupPlan (in days). +Must be an integer value between 0-90 (inclusive). +A Backup created under this BackupPlan will not be deletable +until it reaches Backup's (create time + backup_delete_lock_days). +Updating this field of a BackupPlan does not affect existing Backups. +Backups created after a successful update will inherit this new value.`, + }, + "backup_retain_days": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The default maximum age of a Backup created via this BackupPlan. +This field MUST be an integer value >= 0 and <= 365. If specified, +a Backup created under this BackupPlan will be automatically deleted +after its age reaches (createTime + backupRetainDays). +If not specified, Backups created under this BackupPlan will NOT be +subject to automatic deletion. Updating this field does NOT affect +existing Backups under it. Backups created AFTER a successful update +will automatically pick up the new value. +NOTE: backupRetainDays must be >= backupDeleteLockDays. +If cronSchedule is defined, then this must be <= 360 * the creation interval.]`, + }, + "locked": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + Description: `This flag denotes whether the retention policy of this BackupPlan is locked. +If set to True, no further update is allowed on this policy, including +the locked field itself.`, + }, + }, + }, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `etag is used for optimistic concurrency control as a way to help prevent simultaneous +updates of a backup plan from overwriting each other. It is strongly suggested that +systems make use of the 'etag' in the read-modify-write cycle to perform BackupPlan updates +in order to avoid race conditions: An etag is returned in the response to backupPlans.get, +and systems are expected to put that etag in the request to backupPlans.patch or +backupPlans.delete to ensure that their change will be applied to the same version of the resource.`, + }, + "protected_pod_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of Kubernetes Pods backed up in the last successful Backup created via this BackupPlan.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The State of the BackupPlan.`, + }, + "state_reason": { + Type: schema.TypeString, + Computed: true, + Description: `Detailed description of why BackupPlan is in its current state.`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Server generated, unique identifier of UUID format.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGKEBackupBackupPlanCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandGKEBackupBackupPlanName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandGKEBackupBackupPlanDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + clusterProp, err := expandGKEBackupBackupPlanCluster(d.Get("cluster"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cluster"); !tpgresource.IsEmptyValue(reflect.ValueOf(clusterProp)) && (ok || !reflect.DeepEqual(v, clusterProp)) { + obj["cluster"] = clusterProp + } + retentionPolicyProp, err := expandGKEBackupBackupPlanRetentionPolicy(d.Get("retention_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retention_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(retentionPolicyProp)) && (ok || !reflect.DeepEqual(v, retentionPolicyProp)) { + obj["retentionPolicy"] = retentionPolicyProp + } + labelsProp, err := expandGKEBackupBackupPlanLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + backupScheduleProp, err := expandGKEBackupBackupPlanBackupSchedule(d.Get("backup_schedule"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backup_schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(backupScheduleProp)) && (ok || !reflect.DeepEqual(v, backupScheduleProp)) { + obj["backupSchedule"] = backupScheduleProp + } + deactivatedProp, err := expandGKEBackupBackupPlanDeactivated(d.Get("deactivated"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deactivated"); !tpgresource.IsEmptyValue(reflect.ValueOf(deactivatedProp)) && (ok || !reflect.DeepEqual(v, deactivatedProp)) { + obj["deactivated"] = deactivatedProp + } + backupConfigProp, err := expandGKEBackupBackupPlanBackupConfig(d.Get("backup_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backup_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(backupConfigProp)) && (ok || !reflect.DeepEqual(v, backupConfigProp)) { + obj["backupConfig"] = backupConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEBackupBasePath}}projects/{{project}}/locations/{{location}}/backupPlans?backupPlanId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new BackupPlan: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackupPlan: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating BackupPlan: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GKEBackupOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating BackupPlan", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create BackupPlan: %s", err) + } + + if err := d.Set("name", flattenGKEBackupBackupPlanName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating BackupPlan %q: %#v", d.Id(), res) + + return resourceGKEBackupBackupPlanRead(d, meta) +} + +func resourceGKEBackupBackupPlanRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEBackupBasePath}}projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackupPlan: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GKEBackupBackupPlan %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + + if err := d.Set("name", flattenGKEBackupBackupPlanName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("uid", flattenGKEBackupBackupPlanUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("description", flattenGKEBackupBackupPlanDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("cluster", flattenGKEBackupBackupPlanCluster(res["cluster"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("retention_policy", flattenGKEBackupBackupPlanRetentionPolicy(res["retentionPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("labels", flattenGKEBackupBackupPlanLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("backup_schedule", flattenGKEBackupBackupPlanBackupSchedule(res["backupSchedule"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("etag", flattenGKEBackupBackupPlanEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("deactivated", flattenGKEBackupBackupPlanDeactivated(res["deactivated"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("backup_config", flattenGKEBackupBackupPlanBackupConfig(res["backupConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("protected_pod_count", flattenGKEBackupBackupPlanProtectedPodCount(res["protectedPodCount"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("state", flattenGKEBackupBackupPlanState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + if err := d.Set("state_reason", flattenGKEBackupBackupPlanStateReason(res["stateReason"], d, config)); err != nil { + return fmt.Errorf("Error reading BackupPlan: %s", err) + } + + return nil +} + +func resourceGKEBackupBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackupPlan: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandGKEBackupBackupPlanDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + retentionPolicyProp, err := expandGKEBackupBackupPlanRetentionPolicy(d.Get("retention_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retention_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retentionPolicyProp)) { + obj["retentionPolicy"] = retentionPolicyProp + } + labelsProp, err := expandGKEBackupBackupPlanLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + backupScheduleProp, err := expandGKEBackupBackupPlanBackupSchedule(d.Get("backup_schedule"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backup_schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backupScheduleProp)) { + obj["backupSchedule"] = backupScheduleProp + } + deactivatedProp, err := expandGKEBackupBackupPlanDeactivated(d.Get("deactivated"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deactivated"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deactivatedProp)) { + obj["deactivated"] = deactivatedProp + } + backupConfigProp, err := expandGKEBackupBackupPlanBackupConfig(d.Get("backup_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backup_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, backupConfigProp)) { + obj["backupConfig"] = backupConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEBackupBasePath}}projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating BackupPlan %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("retention_policy") { + updateMask = append(updateMask, "retentionPolicy") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("backup_schedule") { + updateMask = append(updateMask, "backupSchedule") + } + + if d.HasChange("deactivated") { + updateMask = append(updateMask, "deactivated") + } + + if d.HasChange("backup_config") { + updateMask = append(updateMask, "backupConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating BackupPlan %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating BackupPlan %q: %#v", d.Id(), res) + } + + err = GKEBackupOperationWaitTime( + config, res, project, "Updating BackupPlan", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGKEBackupBackupPlanRead(d, meta) +} + +func resourceGKEBackupBackupPlanDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for BackupPlan: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEBackupBasePath}}projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting BackupPlan %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BackupPlan") + } + + err = GKEBackupOperationWaitTime( + config, res, project, "Deleting BackupPlan", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting BackupPlan %q: %#v", d.Id(), res) + return nil +} + +func resourceGKEBackupBackupPlanImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/backupPlans/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGKEBackupBackupPlanName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenGKEBackupBackupPlanUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanCluster(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanRetentionPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["backup_delete_lock_days"] = + flattenGKEBackupBackupPlanRetentionPolicyBackupDeleteLockDays(original["backupDeleteLockDays"], d, config) + transformed["backup_retain_days"] = + flattenGKEBackupBackupPlanRetentionPolicyBackupRetainDays(original["backupRetainDays"], d, config) + transformed["locked"] = + flattenGKEBackupBackupPlanRetentionPolicyLocked(original["locked"], d, config) + return []interface{}{transformed} +} +func flattenGKEBackupBackupPlanRetentionPolicyBackupDeleteLockDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenGKEBackupBackupPlanRetentionPolicyBackupRetainDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenGKEBackupBackupPlanRetentionPolicyLocked(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cron_schedule"] = + flattenGKEBackupBackupPlanBackupScheduleCronSchedule(original["cronSchedule"], d, config) + transformed["paused"] = + flattenGKEBackupBackupPlanBackupSchedulePaused(original["paused"], d, config) + return []interface{}{transformed} +} +func flattenGKEBackupBackupPlanBackupScheduleCronSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupSchedulePaused(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanDeactivated(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["include_volume_data"] = + flattenGKEBackupBackupPlanBackupConfigIncludeVolumeData(original["includeVolumeData"], d, config) + transformed["include_secrets"] = + flattenGKEBackupBackupPlanBackupConfigIncludeSecrets(original["includeSecrets"], d, config) + transformed["encryption_key"] = + flattenGKEBackupBackupPlanBackupConfigEncryptionKey(original["encryptionKey"], d, config) + transformed["all_namespaces"] = + flattenGKEBackupBackupPlanBackupConfigAllNamespaces(original["allNamespaces"], d, config) + transformed["selected_namespaces"] = + flattenGKEBackupBackupPlanBackupConfigSelectedNamespaces(original["selectedNamespaces"], d, config) + transformed["selected_applications"] = + flattenGKEBackupBackupPlanBackupConfigSelectedApplications(original["selectedApplications"], d, config) + return []interface{}{transformed} +} +func flattenGKEBackupBackupPlanBackupConfigIncludeVolumeData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupConfigIncludeSecrets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupConfigEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["gcp_kms_encryption_key"] = + flattenGKEBackupBackupPlanBackupConfigEncryptionKeyGcpKmsEncryptionKey(original["gcpKmsEncryptionKey"], d, config) + return []interface{}{transformed} +} +func flattenGKEBackupBackupPlanBackupConfigEncryptionKeyGcpKmsEncryptionKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupConfigAllNamespaces(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupConfigSelectedNamespaces(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["namespaces"] = + flattenGKEBackupBackupPlanBackupConfigSelectedNamespacesNamespaces(original["namespaces"], d, config) + return []interface{}{transformed} +} +func flattenGKEBackupBackupPlanBackupConfigSelectedNamespacesNamespaces(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupConfigSelectedApplications(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["namespaced_names"] = + flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNames(original["namespacedNames"], d, config) + return []interface{}{transformed} +} +func flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "namespace": flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesNamespace(original["namespace"], d, config), + "name": flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesName(original["name"], d, config), + }) + } + return transformed +} +func flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanProtectedPodCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenGKEBackupBackupPlanState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEBackupBackupPlanStateReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGKEBackupBackupPlanName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/backupPlans/{{name}}") +} + +func expandGKEBackupBackupPlanDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanCluster(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanRetentionPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBackupDeleteLockDays, err := expandGKEBackupBackupPlanRetentionPolicyBackupDeleteLockDays(original["backup_delete_lock_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBackupDeleteLockDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["backupDeleteLockDays"] = transformedBackupDeleteLockDays + } + + transformedBackupRetainDays, err := expandGKEBackupBackupPlanRetentionPolicyBackupRetainDays(original["backup_retain_days"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBackupRetainDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["backupRetainDays"] = transformedBackupRetainDays + } + + transformedLocked, err := expandGKEBackupBackupPlanRetentionPolicyLocked(original["locked"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocked); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["locked"] = transformedLocked + } + + return transformed, nil +} + +func expandGKEBackupBackupPlanRetentionPolicyBackupDeleteLockDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanRetentionPolicyBackupRetainDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanRetentionPolicyLocked(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandGKEBackupBackupPlanBackupSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCronSchedule, err := expandGKEBackupBackupPlanBackupScheduleCronSchedule(original["cron_schedule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCronSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cronSchedule"] = transformedCronSchedule + } + + transformedPaused, err := expandGKEBackupBackupPlanBackupSchedulePaused(original["paused"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPaused); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["paused"] = transformedPaused + } + + return transformed, nil +} + +func expandGKEBackupBackupPlanBackupScheduleCronSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanBackupSchedulePaused(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanDeactivated(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanBackupConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIncludeVolumeData, err := expandGKEBackupBackupPlanBackupConfigIncludeVolumeData(original["include_volume_data"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeVolumeData); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeVolumeData"] = transformedIncludeVolumeData + } + + transformedIncludeSecrets, err := expandGKEBackupBackupPlanBackupConfigIncludeSecrets(original["include_secrets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeSecrets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["includeSecrets"] = transformedIncludeSecrets + } + + transformedEncryptionKey, err := expandGKEBackupBackupPlanBackupConfigEncryptionKey(original["encryption_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptionKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encryptionKey"] = transformedEncryptionKey + } + + transformedAllNamespaces, err := expandGKEBackupBackupPlanBackupConfigAllNamespaces(original["all_namespaces"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllNamespaces); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allNamespaces"] = transformedAllNamespaces + } + + transformedSelectedNamespaces, err := expandGKEBackupBackupPlanBackupConfigSelectedNamespaces(original["selected_namespaces"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSelectedNamespaces); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["selectedNamespaces"] = transformedSelectedNamespaces + } + + transformedSelectedApplications, err := expandGKEBackupBackupPlanBackupConfigSelectedApplications(original["selected_applications"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSelectedApplications); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["selectedApplications"] = transformedSelectedApplications + } + + return transformed, nil +} + +func expandGKEBackupBackupPlanBackupConfigIncludeVolumeData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanBackupConfigIncludeSecrets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanBackupConfigEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGcpKmsEncryptionKey, err := expandGKEBackupBackupPlanBackupConfigEncryptionKeyGcpKmsEncryptionKey(original["gcp_kms_encryption_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcpKmsEncryptionKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcpKmsEncryptionKey"] = transformedGcpKmsEncryptionKey + } + + return transformed, nil +} + +func expandGKEBackupBackupPlanBackupConfigEncryptionKeyGcpKmsEncryptionKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanBackupConfigAllNamespaces(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanBackupConfigSelectedNamespaces(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNamespaces, err := expandGKEBackupBackupPlanBackupConfigSelectedNamespacesNamespaces(original["namespaces"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespaces); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespaces"] = transformedNamespaces + } + + return transformed, nil +} + +func expandGKEBackupBackupPlanBackupConfigSelectedNamespacesNamespaces(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanBackupConfigSelectedApplications(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNamespacedNames, err := expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNames(original["namespaced_names"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespacedNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespacedNames"] = transformedNamespacedNames + } + + return transformed, nil +} + +func expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNamespace, err := expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesNamespace(original["namespace"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["namespace"] = transformedNamespace + } + + transformedName, err := expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEBackupBackupPlanBackupConfigSelectedApplicationsNamespacedNamesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/gke_hub_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/gke_hub_operation.go new file mode 100644 index 0000000000..dceb5a2fbc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/gke_hub_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type GKEHubOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *GKEHubOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.GKEHubBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createGKEHubWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*GKEHubOperationWaiter, error) { + w := &GKEHubOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func GKEHubOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createGKEHubWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func GKEHubOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createGKEHubWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/iam_gke_hub_membership.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/iam_gke_hub_membership.go new file mode 100644 index 0000000000..315d24a5eb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/iam_gke_hub_membership.go @@ -0,0 +1,221 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var GKEHubMembershipIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "membership_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type GKEHubMembershipIamUpdater struct { + project string + membershipId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func GKEHubMembershipIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("membership_id"); ok { + values["membership_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("membership_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHubMembershipIamUpdater{ + project: values["project"], + membershipId: values["membership_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("membership_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting membership_id: %s", err) + } + + return u, nil +} + +func GKEHubMembershipIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHubMembershipIamUpdater{ + project: values["project"], + membershipId: values["membership_id"], + d: d, + Config: config, + } + if err := d.Set("membership_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting membership_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *GKEHubMembershipIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyMembershipUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *GKEHubMembershipIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyMembershipUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *GKEHubMembershipIamUpdater) qualifyMembershipUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{GKEHubBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/global/memberships/%s", u.project, u.membershipId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *GKEHubMembershipIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/global/memberships/%s", u.project, u.membershipId) +} + +func (u *GKEHubMembershipIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-gkehub-membership-%s", u.GetResourceId()) +} + +func (u *GKEHubMembershipIamUpdater) DescribeResource() string { + return fmt.Sprintf("gkehub membership %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_membership.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_membership.go new file mode 100644 index 0000000000..21dc24805f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub/resource_gke_hub_membership.go @@ -0,0 +1,582 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func suppressGkeHubEndpointSelfLinkDiff(_, old, new string, _ *schema.ResourceData) bool { + // The custom expander injects //container.googleapis.com/ if a selflink is supplied. + selfLink := strings.TrimPrefix(old, "//container.googleapis.com/") + if selfLink == new { + return true + } + + return false +} + +func ResourceGKEHubMembership() *schema.Resource { + return &schema.Resource{ + Create: resourceGKEHubMembershipCreate, + Read: resourceGKEHubMembershipRead, + Update: resourceGKEHubMembershipUpdate, + Delete: resourceGKEHubMembershipDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGKEHubMembershipImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "membership_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The client-provided identifier of the membership.`, + }, + "authority": { + Type: schema.TypeList, + Optional: true, + Description: `Authority encodes how Google will recognize identities from this Membership. +See the workload identity documentation for more details: +https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issuer": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid +with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster' (must be 'locations' rather than 'zones'). If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'.`, + }, + }, + }, + }, + "endpoint": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gke_cluster": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_link": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressGkeHubEndpointSelfLinkDiff, + Description: `Self-link of the GCP resource for the GKE cluster. +For example: '//container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster'. +It can be at the most 1000 characters in length. If the cluster is provisioned with Terraform, +this can be '"//container.googleapis.com/${google_container_cluster.my-cluster.id}"' or +'google_container_cluster.my-cluster.id'.`, + }, + }, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels to apply to this membership.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the membership.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGKEHubMembershipCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandGKEHubMembershipLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + endpointProp, err := expandGKEHubMembershipEndpoint(d.Get("endpoint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("endpoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(endpointProp)) && (ok || !reflect.DeepEqual(v, endpointProp)) { + obj["endpoint"] = endpointProp + } + authorityProp, err := expandGKEHubMembershipAuthority(d.Get("authority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authority"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorityProp)) && (ok || !reflect.DeepEqual(v, authorityProp)) { + obj["authority"] = authorityProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships?membershipId={{membership_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Membership: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Membership: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Membership: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GKEHubOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Membership", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Membership: %s", err) + } + + if err := d.Set("name", flattenGKEHubMembershipName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Membership %q: %#v", d.Id(), res) + + return resourceGKEHubMembershipRead(d, meta) +} + +func resourceGKEHubMembershipRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Membership: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GKEHubMembership %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Membership: %s", err) + } + + if err := d.Set("name", flattenGKEHubMembershipName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Membership: %s", err) + } + if err := d.Set("labels", flattenGKEHubMembershipLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Membership: %s", err) + } + if err := d.Set("endpoint", flattenGKEHubMembershipEndpoint(res["endpoint"], d, config)); err != nil { + return fmt.Errorf("Error reading Membership: %s", err) + } + if err := d.Set("authority", flattenGKEHubMembershipAuthority(res["authority"], d, config)); err != nil { + return fmt.Errorf("Error reading Membership: %s", err) + } + + return nil +} + +func resourceGKEHubMembershipUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Membership: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandGKEHubMembershipLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + authorityProp, err := expandGKEHubMembershipAuthority(d.Get("authority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorityProp)) { + obj["authority"] = authorityProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Membership %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("authority") { + updateMask = append(updateMask, "authority") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Membership %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Membership %q: %#v", d.Id(), res) + } + + err = GKEHubOperationWaitTime( + config, res, project, "Updating Membership", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGKEHubMembershipRead(d, meta) +} + +func resourceGKEHubMembershipDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Membership: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Membership %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Membership") + } + + err = GKEHubOperationWaitTime( + config, res, project, "Deleting Membership", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Membership %q: %#v", d.Id(), res) + return nil +} + +func resourceGKEHubMembershipImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/memberships/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/memberships/{{membership_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGKEHubMembershipName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHubMembershipLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHubMembershipEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["gke_cluster"] = + flattenGKEHubMembershipEndpointGkeCluster(original["gkeCluster"], d, config) + return []interface{}{transformed} +} +func flattenGKEHubMembershipEndpointGkeCluster(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resource_link"] = + flattenGKEHubMembershipEndpointGkeClusterResourceLink(original["resourceLink"], d, config) + return []interface{}{transformed} +} +func flattenGKEHubMembershipEndpointGkeClusterResourceLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHubMembershipAuthority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["issuer"] = + flattenGKEHubMembershipAuthorityIssuer(original["issuer"], d, config) + return []interface{}{transformed} +} +func flattenGKEHubMembershipAuthorityIssuer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGKEHubMembershipLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandGKEHubMembershipEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGkeCluster, err := expandGKEHubMembershipEndpointGkeCluster(original["gke_cluster"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGkeCluster); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gkeCluster"] = transformedGkeCluster + } + + return transformed, nil +} + +func expandGKEHubMembershipEndpointGkeCluster(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResourceLink, err := expandGKEHubMembershipEndpointGkeClusterResourceLink(original["resource_link"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceLink); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceLink"] = transformedResourceLink + } + + return transformed, nil +} + +func expandGKEHubMembershipEndpointGkeClusterResourceLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if strings.HasPrefix(v.(string), "//") { + return v, nil + } else { + v = "//container.googleapis.com/" + v.(string) + return v, nil + } +} + +func expandGKEHubMembershipAuthority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIssuer, err := expandGKEHubMembershipAuthorityIssuer(original["issuer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIssuer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["issuer"] = transformedIssuer + } + + return transformed, nil +} + +func expandGKEHubMembershipAuthorityIssuer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/gke_hub2_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/gke_hub2_operation.go new file mode 100644 index 0000000000..06bfd5138e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/gke_hub2_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2 + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type GKEHub2OperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *GKEHub2OperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.GKEHub2BasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createGKEHub2Waiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*GKEHub2OperationWaiter, error) { + w := &GKEHub2OperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func GKEHub2OperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createGKEHub2Waiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func GKEHub2OperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createGKEHub2Waiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/iam_gke_hub_feature.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/iam_gke_hub_feature.go new file mode 100644 index 0000000000..5a019a3e4e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/iam_gke_hub_feature.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2 + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var GKEHub2FeatureIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type GKEHub2FeatureIamUpdater struct { + project string + location string + name string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func GKEHub2FeatureIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("name"); ok { + values["name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/features/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHub2FeatureIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return u, nil +} + +func GKEHub2FeatureIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/features/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHub2FeatureIamUpdater{ + project: values["project"], + location: values["location"], + name: values["name"], + d: d, + Config: config, + } + if err := d.Set("name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *GKEHub2FeatureIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyFeatureUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *GKEHub2FeatureIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyFeatureUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *GKEHub2FeatureIamUpdater) qualifyFeatureUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{GKEHub2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/features/%s", u.project, u.location, u.name), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *GKEHub2FeatureIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/features/%s", u.project, u.location, u.name) +} + +func (u *GKEHub2FeatureIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-gkehub2-feature-%s", u.GetResourceId()) +} + +func (u *GKEHub2FeatureIamUpdater) DescribeResource() string { + return fmt.Sprintf("gkehub2 feature %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/resource_gke_hub_feature.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/resource_gke_hub_feature.go new file mode 100644 index 0000000000..92816999fc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/gkehub2/resource_gke_hub_feature.go @@ -0,0 +1,863 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2 + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceGKEHub2Feature() *schema.Resource { + return &schema.Resource{ + Create: resourceGKEHub2FeatureCreate, + Read: resourceGKEHub2FeatureRead, + Update: resourceGKEHub2FeatureUpdate, + Delete: resourceGKEHub2FeatureDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGKEHub2FeatureImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the resource`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `GCP labels for this Feature.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The full, unique name of this Feature resource`, + }, + "spec": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. Hub-wide Feature configuration. If this Feature does not support any Hub-wide configuration, this field may be unused.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fleetobservability": { + Type: schema.TypeList, + Optional: true, + Description: `Fleet Observability feature spec.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "logging_config": { + Type: schema.TypeList, + Optional: true, + Description: `Specified if fleet logging feature is enabled for the entire fleet. If UNSPECIFIED, fleet logging feature is disabled for the entire fleet.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_config": { + Type: schema.TypeList, + Optional: true, + Description: `Specified if applying the default routing config to logs not specified in other configs.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MODE_UNSPECIFIED", "COPY", "MOVE", ""}), + Description: `Specified if fleet logging feature is enabled. Possible values: ["MODE_UNSPECIFIED", "COPY", "MOVE"]`, + }, + }, + }, + }, + "fleet_scope_logs_config": { + Type: schema.TypeList, + Optional: true, + Description: `Specified if applying the routing config to all logs for all fleet scopes.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MODE_UNSPECIFIED", "COPY", "MOVE", ""}), + Description: `Specified if fleet logging feature is enabled. Possible values: ["MODE_UNSPECIFIED", "COPY", "MOVE"]`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "multiclusteringress": { + Type: schema.TypeList, + Optional: true, + Description: `Multicluster Ingress-specific spec.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_membership": { + Type: schema.TypeString, + Required: true, + Description: `Fully-qualified Membership name which hosts the MultiClusterIngress CRD. Example: 'projects/foo-proj/locations/global/memberships/bar'`, + }, + }, + }, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. When the Feature resource was created.`, + }, + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. When the Feature resource was deleted.`, + }, + "resource_state": { + Type: schema.TypeList, + Computed: true, + Description: `State of the Feature resource itself.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "has_resources": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether this Feature has outstanding resources that need to be cleaned up before it can be disabled.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the Feature resource in the Hub API.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. The Hub-wide Feature state`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. The "running state" of the Feature in this Hub.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: `The high-level, machine-readable status of this Feature.`, + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: `A human-readable description of the current status.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time this status and any related Feature-specific details were updated. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z"`, + }, + }, + }, + }, + }, + }, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. When the Feature resource was last updated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGKEHub2FeatureCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandGKEHub2FeatureLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + specProp, err := expandGKEHub2FeatureSpec(d.Get("spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(specProp)) && (ok || !reflect.DeepEqual(v, specProp)) { + obj["spec"] = specProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/features?featureId={{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + log.Printf("[DEBUG] Creating new Feature: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Feature: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Feature: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/features/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GKEHub2OperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Feature", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Feature: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/features/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Feature %q: %#v", d.Id(), res) + + return resourceGKEHub2FeatureRead(d, meta) +} + +func resourceGKEHub2FeatureRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/features/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Feature: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GKEHub2Feature %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Feature: %s", err) + } + + if err := d.Set("labels", flattenGKEHub2FeatureLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Feature: %s", err) + } + if err := d.Set("resource_state", flattenGKEHub2FeatureResourceState(res["resourceState"], d, config)); err != nil { + return fmt.Errorf("Error reading Feature: %s", err) + } + if err := d.Set("spec", flattenGKEHub2FeatureSpec(res["spec"], d, config)); err != nil { + return fmt.Errorf("Error reading Feature: %s", err) + } + if err := d.Set("state", flattenGKEHub2FeatureState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Feature: %s", err) + } + if err := d.Set("create_time", flattenGKEHub2FeatureCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Feature: %s", err) + } + if err := d.Set("update_time", flattenGKEHub2FeatureUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Feature: %s", err) + } + if err := d.Set("delete_time", flattenGKEHub2FeatureDeleteTime(res["deleteTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Feature: %s", err) + } + + return nil +} + +func resourceGKEHub2FeatureUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Feature: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + obj := make(map[string]interface{}) + labelsProp, err := expandGKEHub2FeatureLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + specProp, err := expandGKEHub2FeatureSpec(d.Get("spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, specProp)) { + obj["spec"] = specProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/features/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + log.Printf("[DEBUG] Updating Feature %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("spec") { + updateMask = append(updateMask, "spec") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Feature %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Feature %q: %#v", d.Id(), res) + } + + err = GKEHub2OperationWaitTime( + config, res, project, "Updating Feature", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGKEHub2FeatureRead(d, meta) +} + +func resourceGKEHub2FeatureDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Feature: %s", err) + } + billingProject = strings.TrimPrefix(project, "projects/") + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/features/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Feature %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Feature") + } + + err = GKEHub2OperationWaitTime( + config, res, project, "Deleting Feature", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Feature %q: %#v", d.Id(), res) + return nil +} + +func resourceGKEHub2FeatureImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/features/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/features/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGKEHub2FeatureLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureResourceState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["state"] = + flattenGKEHub2FeatureResourceStateState(original["state"], d, config) + transformed["has_resources"] = + flattenGKEHub2FeatureResourceStateHasResources(original["hasResources"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureResourceStateState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureResourceStateHasResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["multiclusteringress"] = + flattenGKEHub2FeatureSpecMulticlusteringress(original["multiclusteringress"], d, config) + transformed["fleetobservability"] = + flattenGKEHub2FeatureSpecFleetobservability(original["fleetobservability"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureSpecMulticlusteringress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["config_membership"] = + flattenGKEHub2FeatureSpecMulticlusteringressConfigMembership(original["configMembership"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureSpecMulticlusteringressConfigMembership(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureSpecFleetobservability(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["logging_config"] = + flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfig(original["loggingConfig"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["default_config"] = + flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfigDefaultConfig(original["defaultConfig"], d, config) + transformed["fleet_scope_logs_config"] = + flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(original["fleetScopeLogsConfig"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfigDefaultConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mode"] = + flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfigDefaultConfigMode(original["mode"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfigDefaultConfigMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mode"] = + flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMode(original["mode"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["state"] = + flattenGKEHub2FeatureStateState(original["state"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureStateState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenGKEHub2FeatureStateStateCode(original["code"], d, config) + transformed["description"] = + flattenGKEHub2FeatureStateStateDescription(original["description"], d, config) + transformed["update_time"] = + flattenGKEHub2FeatureStateStateUpdateTime(original["updateTime"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2FeatureStateStateCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureStateStateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureStateStateUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2FeatureDeleteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGKEHub2FeatureLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandGKEHub2FeatureSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMulticlusteringress, err := expandGKEHub2FeatureSpecMulticlusteringress(original["multiclusteringress"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMulticlusteringress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["multiclusteringress"] = transformedMulticlusteringress + } + + transformedFleetobservability, err := expandGKEHub2FeatureSpecFleetobservability(original["fleetobservability"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFleetobservability); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fleetobservability"] = transformedFleetobservability + } + + return transformed, nil +} + +func expandGKEHub2FeatureSpecMulticlusteringress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConfigMembership, err := expandGKEHub2FeatureSpecMulticlusteringressConfigMembership(original["config_membership"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfigMembership); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["configMembership"] = transformedConfigMembership + } + + return transformed, nil +} + +func expandGKEHub2FeatureSpecMulticlusteringressConfigMembership(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEHub2FeatureSpecFleetobservability(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLoggingConfig, err := expandGKEHub2FeatureSpecFleetobservabilityLoggingConfig(original["logging_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLoggingConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["loggingConfig"] = transformedLoggingConfig + } + + return transformed, nil +} + +func expandGKEHub2FeatureSpecFleetobservabilityLoggingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDefaultConfig, err := expandGKEHub2FeatureSpecFleetobservabilityLoggingConfigDefaultConfig(original["default_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDefaultConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["defaultConfig"] = transformedDefaultConfig + } + + transformedFleetScopeLogsConfig, err := expandGKEHub2FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(original["fleet_scope_logs_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFleetScopeLogsConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fleetScopeLogsConfig"] = transformedFleetScopeLogsConfig + } + + return transformed, nil +} + +func expandGKEHub2FeatureSpecFleetobservabilityLoggingConfigDefaultConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMode, err := expandGKEHub2FeatureSpecFleetobservabilityLoggingConfigDefaultConfigMode(original["mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mode"] = transformedMode + } + + return transformed, nil +} + +func expandGKEHub2FeatureSpecFleetobservabilityLoggingConfigDefaultConfigMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEHub2FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMode, err := expandGKEHub2FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMode(original["mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mode"] = transformedMode + } + + return transformed, nil +} + +func expandGKEHub2FeatureSpecFleetobservabilityLoggingConfigFleetScopeLogsConfigMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/healthcare_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/healthcare_utils.go new file mode 100644 index 0000000000..01503eb321 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/healthcare_utils.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package healthcare + +import ( + "fmt" + "regexp" + "strings" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +type HealthcareDatasetId struct { + Project string + Location string + Name string +} + +func (s *HealthcareDatasetId) DatasetId() string { + return fmt.Sprintf("projects/%s/locations/%s/datasets/%s", s.Project, s.Location, s.Name) +} + +func (s *HealthcareDatasetId) TerraformId() string { + return fmt.Sprintf("%s/%s/%s", s.Project, s.Location, s.Name) +} + +func ParseHealthcareDatasetId(id string, config *transport_tpg.Config) (*HealthcareDatasetId, error) { + parts := strings.Split(id, "/") + + datasetIdRegex := regexp.MustCompile("^(" + verify.ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})$") + datasetIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})$") + datasetRelativeLinkRegex := regexp.MustCompile("^projects/(" + verify.ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})$") + + if datasetIdRegex.MatchString(id) { + return &HealthcareDatasetId{ + Project: parts[0], + Location: parts[1], + Name: parts[2], + }, nil + } + + if datasetIdWithoutProjectRegex.MatchString(id) { + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}` id format.") + } + + return &HealthcareDatasetId{ + Project: config.Project, + Location: parts[0], + Name: parts[1], + }, nil + } + + if parts := datasetRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &HealthcareDatasetId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, nil + } + return nil, fmt.Errorf("Invalid Dataset id format, expecting `{projectId}/{locationId}/{datasetName}` or `{locationId}/{datasetName}.`") +} + +type healthcareFhirStoreId struct { + DatasetId HealthcareDatasetId + Name string +} + +func (s *healthcareFhirStoreId) FhirStoreId() string { + return fmt.Sprintf("%s/fhirStores/%s", s.DatasetId.DatasetId(), s.Name) +} + +func (s *healthcareFhirStoreId) TerraformId() string { + return fmt.Sprintf("%s/%s", s.DatasetId.TerraformId(), s.Name) +} + +func ParseHealthcareFhirStoreId(id string, config *transport_tpg.Config) (*healthcareFhirStoreId, error) { + parts := strings.Split(id, "/") + fhirStoreIdRegex := regexp.MustCompile("^(" + verify.ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") + fhirStoreIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") + fhirStoreRelativeLinkRegex := regexp.MustCompile("^projects/(" + verify.ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/fhirStores/([a-zA-Z0-9_-]{1,256})$") + + if fhirStoreIdRegex.MatchString(id) { + return &healthcareFhirStoreId{ + DatasetId: HealthcareDatasetId{ + Project: parts[0], + Location: parts[1], + Name: parts[2], + }, + Name: parts[3], + }, nil + } + + if fhirStoreIdWithoutProjectRegex.MatchString(id) { + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{fhirStoreName}` id format.") + } + + return &healthcareFhirStoreId{ + DatasetId: HealthcareDatasetId{ + Project: config.Project, + Location: parts[0], + Name: parts[1], + }, + Name: parts[2], + }, nil + } + + if parts := fhirStoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &healthcareFhirStoreId{ + DatasetId: HealthcareDatasetId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, + Name: parts[4], + }, nil + } + return nil, fmt.Errorf("Invalid FhirStore id format, expecting `{projectId}/{locationId}/{datasetName}/{fhirStoreName}` or `{locationId}/{datasetName}/{fhirStoreName}.`") +} + +type healthcareHl7V2StoreId struct { + DatasetId HealthcareDatasetId + Name string +} + +func (s *healthcareHl7V2StoreId) Hl7V2StoreId() string { + return fmt.Sprintf("%s/hl7V2Stores/%s", s.DatasetId.DatasetId(), s.Name) +} + +func (s *healthcareHl7V2StoreId) TerraformId() string { + return fmt.Sprintf("%s/%s", s.DatasetId.TerraformId(), s.Name) +} + +func ParseHealthcareHl7V2StoreId(id string, config *transport_tpg.Config) (*healthcareHl7V2StoreId, error) { + parts := strings.Split(id, "/") + hl7V2StoreIdRegex := regexp.MustCompile("^(" + verify.ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") + hl7V2StoreIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") + hl7V2StoreRelativeLinkRegex := regexp.MustCompile("^projects/(" + verify.ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/hl7V2Stores/([a-zA-Z0-9_-]{1,256})$") + + if hl7V2StoreIdRegex.MatchString(id) { + return &healthcareHl7V2StoreId{ + DatasetId: HealthcareDatasetId{ + Project: parts[0], + Location: parts[1], + Name: parts[2], + }, + Name: parts[3], + }, nil + } + + if hl7V2StoreIdWithoutProjectRegex.MatchString(id) { + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{hl7V2StoreName}` id format.") + } + + return &healthcareHl7V2StoreId{ + DatasetId: HealthcareDatasetId{ + Project: config.Project, + Location: parts[0], + Name: parts[1], + }, + Name: parts[2], + }, nil + } + + if parts := hl7V2StoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &healthcareHl7V2StoreId{ + DatasetId: HealthcareDatasetId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, + Name: parts[4], + }, nil + } + return nil, fmt.Errorf("Invalid Hl7V2Store id format, expecting `{projectId}/{locationId}/{datasetName}/{hl7V2StoreName}` or `{locationId}/{datasetName}/{hl7V2StoreName}.`") +} + +type healthcareDicomStoreId struct { + DatasetId HealthcareDatasetId + Name string +} + +func (s *healthcareDicomStoreId) DicomStoreId() string { + return fmt.Sprintf("%s/dicomStores/%s", s.DatasetId.DatasetId(), s.Name) +} + +func (s *healthcareDicomStoreId) TerraformId() string { + return fmt.Sprintf("%s/%s", s.DatasetId.TerraformId(), s.Name) +} + +func ParseHealthcareDicomStoreId(id string, config *transport_tpg.Config) (*healthcareDicomStoreId, error) { + parts := strings.Split(id, "/") + dicomStoreIdRegex := regexp.MustCompile("^(" + verify.ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") + dicomStoreIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") + dicomStoreRelativeLinkRegex := regexp.MustCompile("^projects/(" + verify.ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/dicomStores/([a-zA-Z0-9_-]{1,256})$") + + if dicomStoreIdRegex.MatchString(id) { + return &healthcareDicomStoreId{ + DatasetId: HealthcareDatasetId{ + Project: parts[0], + Location: parts[1], + Name: parts[2], + }, + Name: parts[3], + }, nil + } + + if dicomStoreIdWithoutProjectRegex.MatchString(id) { + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{dicomStoreName}` id format.") + } + + return &healthcareDicomStoreId{ + DatasetId: HealthcareDatasetId{ + Project: config.Project, + Location: parts[0], + Name: parts[1], + }, + Name: parts[2], + }, nil + } + + if parts := dicomStoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &healthcareDicomStoreId{ + DatasetId: HealthcareDatasetId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, + Name: parts[4], + }, nil + } + return nil, fmt.Errorf("Invalid DicomStore id format, expecting `{projectId}/{locationId}/{datasetName}/{dicomStoreName}` or `{locationId}/{datasetName}/{dicomStoreName}.`") +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_consent_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_consent_store.go new file mode 100644 index 0000000000..f10fd11328 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_consent_store.go @@ -0,0 +1,202 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var HealthcareConsentStoreIamSchema = map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "consent_store_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type HealthcareConsentStoreIamUpdater struct { + dataset string + consentStoreId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func HealthcareConsentStoreIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("dataset"); ok { + values["dataset"] = v.(string) + } + + if v, ok := d.GetOk("consent_store_id"); ok { + values["consent_store_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P.+)/consentStores/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("consent_store_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &HealthcareConsentStoreIamUpdater{ + dataset: values["dataset"], + consentStoreId: values["consent_store_id"], + d: d, + Config: config, + } + + if err := d.Set("dataset", u.dataset); err != nil { + return nil, fmt.Errorf("Error setting dataset: %s", err) + } + if err := d.Set("consent_store_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting consent_store_id: %s", err) + } + + return u, nil +} + +func HealthcareConsentStoreIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P.+)/consentStores/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &HealthcareConsentStoreIamUpdater{ + dataset: values["dataset"], + consentStoreId: values["consent_store_id"], + d: d, + Config: config, + } + if err := d.Set("consent_store_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting consent_store_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *HealthcareConsentStoreIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyConsentStoreUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *HealthcareConsentStoreIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyConsentStoreUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *HealthcareConsentStoreIamUpdater) qualifyConsentStoreUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{HealthcareBasePath}}%s:%s", fmt.Sprintf("%s/consentStores/%s", u.dataset, u.consentStoreId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *HealthcareConsentStoreIamUpdater) GetResourceId() string { + return fmt.Sprintf("%s/consentStores/%s", u.dataset, u.consentStoreId) +} + +func (u *HealthcareConsentStoreIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-healthcare-consentstore-%s", u.GetResourceId()) +} + +func (u *HealthcareConsentStoreIamUpdater) DescribeResource() string { + return fmt.Sprintf("healthcare consentstore %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_dataset.go new file mode 100644 index 0000000000..f7745c17e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_dataset.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package healthcare + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + healthcare "google.golang.org/api/healthcare/v1" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamHealthcareDatasetSchema = map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type HealthcareDatasetIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewHealthcareDatasetIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + dataset := d.Get("dataset_id").(string) + datasetId, err := ParseHealthcareDatasetId(dataset, config) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", dataset), err) + } + + return &HealthcareDatasetIamUpdater{ + resourceId: datasetId.DatasetId(), + d: d, + Config: config, + }, nil +} + +func DatasetIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + datasetId, err := ParseHealthcareDatasetId(d.Id(), config) + if err != nil { + return err + } + + if err := d.Set("dataset_id", datasetId.DatasetId()); err != nil { + return fmt.Errorf("Error setting dataset_id: %s", err) + } + d.SetId(datasetId.DatasetId()) + return nil +} + +func (u *HealthcareDatasetIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.GetIamPolicy(u.resourceId).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *HealthcareDatasetIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.SetIamPolicy(u.resourceId, &healthcare.SetIamPolicyRequest{ + Policy: healthcarePolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *HealthcareDatasetIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *HealthcareDatasetIamUpdater) GetMutexKey() string { + return u.resourceId +} + +func (u *HealthcareDatasetIamUpdater) DescribeResource() string { + return fmt.Sprintf("Healthcare Dataset %q", u.resourceId) +} + +func resourceManagerToHealthcarePolicy(p *cloudresourcemanager.Policy) (*healthcare.Policy, error) { + out := &healthcare.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a v1 policy to a healthcare policy: {{err}}", err) + } + return out, nil +} + +func healthcareToResourceManagerPolicy(p *healthcare.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a healthcare policy to a v1 policy: {{err}}", err) + } + return out, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_dicom_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_dicom_store.go new file mode 100644 index 0000000000..779b933741 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_dicom_store.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package healthcare + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + healthcare "google.golang.org/api/healthcare/v1" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamHealthcareDicomStoreSchema = map[string]*schema.Schema{ + "dicom_store_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type HealthcareDicomStoreIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewHealthcareDicomStoreIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + dicomStore := d.Get("dicom_store_id").(string) + dicomStoreId, err := ParseHealthcareDicomStoreId(dicomStore, config) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", dicomStore), err) + } + + return &HealthcareDicomStoreIamUpdater{ + resourceId: dicomStoreId.DicomStoreId(), + d: d, + Config: config, + }, nil +} + +func DicomStoreIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + dicomStoreId, err := ParseHealthcareDicomStoreId(d.Id(), config) + if err != nil { + return err + } + if err := d.Set("dicom_store_id", dicomStoreId.DicomStoreId()); err != nil { + return fmt.Errorf("Error setting dicom_store_id: %s", err) + } + d.SetId(dicomStoreId.DicomStoreId()) + return nil +} + +func (u *HealthcareDicomStoreIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.DicomStores.GetIamPolicy(u.resourceId).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *HealthcareDicomStoreIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.DicomStores.SetIamPolicy(u.resourceId, &healthcare.SetIamPolicyRequest{ + Policy: healthcarePolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *HealthcareDicomStoreIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *HealthcareDicomStoreIamUpdater) GetMutexKey() string { + return u.resourceId +} + +func (u *HealthcareDicomStoreIamUpdater) DescribeResource() string { + return fmt.Sprintf("Healthcare DicomStore %q", u.resourceId) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_fhir_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_fhir_store.go new file mode 100644 index 0000000000..059647498e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_fhir_store.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package healthcare + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + healthcare "google.golang.org/api/healthcare/v1" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamHealthcareFhirStoreSchema = map[string]*schema.Schema{ + "fhir_store_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type HealthcareFhirStoreIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewHealthcareFhirStoreIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + fhirStore := d.Get("fhir_store_id").(string) + fhirStoreId, err := ParseHealthcareFhirStoreId(fhirStore, config) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", fhirStore), err) + } + + return &HealthcareFhirStoreIamUpdater{ + resourceId: fhirStoreId.FhirStoreId(), + d: d, + Config: config, + }, nil +} + +func FhirStoreIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fhirStoreId, err := ParseHealthcareFhirStoreId(d.Id(), config) + if err != nil { + return err + } + if err := d.Set("fhir_store_id", fhirStoreId.FhirStoreId()); err != nil { + return fmt.Errorf("Error setting fhir_store_id: %s", err) + } + d.SetId(fhirStoreId.FhirStoreId()) + return nil +} + +func (u *HealthcareFhirStoreIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.FhirStores.GetIamPolicy(u.resourceId).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *HealthcareFhirStoreIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.FhirStores.SetIamPolicy(u.resourceId, &healthcare.SetIamPolicyRequest{ + Policy: healthcarePolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *HealthcareFhirStoreIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *HealthcareFhirStoreIamUpdater) GetMutexKey() string { + return u.resourceId +} + +func (u *HealthcareFhirStoreIamUpdater) DescribeResource() string { + return fmt.Sprintf("Healthcare FhirStore %q", u.resourceId) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_hl7_v2_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_hl7_v2_store.go new file mode 100644 index 0000000000..555da53b81 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/iam_healthcare_hl7_v2_store.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package healthcare + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + healthcare "google.golang.org/api/healthcare/v1" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamHealthcareHl7V2StoreSchema = map[string]*schema.Schema{ + "hl7_v2_store_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type HealthcareHl7V2StoreIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewHealthcareHl7V2StoreIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + hl7V2Store := d.Get("hl7_v2_store_id").(string) + hl7V2StoreId, err := ParseHealthcareHl7V2StoreId(hl7V2Store, config) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", hl7V2Store), err) + } + + return &HealthcareHl7V2StoreIamUpdater{ + resourceId: hl7V2StoreId.Hl7V2StoreId(), + d: d, + Config: config, + }, nil +} + +func Hl7V2StoreIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + hl7V2StoreId, err := ParseHealthcareHl7V2StoreId(d.Id(), config) + if err != nil { + return err + } + if err := d.Set("hl7_v2_store_id", hl7V2StoreId.Hl7V2StoreId()); err != nil { + return fmt.Errorf("Error setting hl7_v2_store_id: %s", err) + } + d.SetId(hl7V2StoreId.Hl7V2StoreId()) + return nil +} + +func (u *HealthcareHl7V2StoreIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.Hl7V2Stores.GetIamPolicy(u.resourceId).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *HealthcareHl7V2StoreIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.Hl7V2Stores.SetIamPolicy(u.resourceId, &healthcare.SetIamPolicyRequest{ + Policy: healthcarePolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *HealthcareHl7V2StoreIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *HealthcareHl7V2StoreIamUpdater) GetMutexKey() string { + return u.resourceId +} + +func (u *HealthcareHl7V2StoreIamUpdater) DescribeResource() string { + return fmt.Sprintf("Healthcare Hl7V2Store %q", u.resourceId) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_consent_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_consent_store.go new file mode 100644 index 0000000000..8a97688002 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_consent_store.go @@ -0,0 +1,373 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceHealthcareConsentStore() *schema.Resource { + return &schema.Resource{ + Create: resourceHealthcareConsentStoreCreate, + Read: resourceHealthcareConsentStoreRead, + Update: resourceHealthcareConsentStoreUpdate, + Delete: resourceHealthcareConsentStoreDelete, + + Importer: &schema.ResourceImporter{ + State: resourceHealthcareConsentStoreImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the dataset addressed by this request. Must be in the format +'projects/{project}/locations/{location}/datasets/{dataset}'`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of this ConsentStore, for example: +"consent1"`, + }, + "default_consent_ttl": { + Type: schema.TypeString, + Optional: true, + Description: `Default time to live for consents in this store. Must be at least 24 hours. Updating this field will not affect the expiration time of existing consents. + +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "enable_consent_create_on_update": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, [consents.patch] [google.cloud.healthcare.v1.consent.UpdateConsent] creates the consent if it does not already exist.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-supplied key-value pairs used to organize Consent stores. + +Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must +conform to the following PCRE regular expression: '[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}' + +Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 +bytes, and must conform to the following PCRE regular expression: '[\p{Ll}\p{Lo}\p{N}_-]{0,63}' + +No more than 64 labels can be associated with a given store. + +An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + UseJSONNumber: true, + } +} + +func resourceHealthcareConsentStoreCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + defaultConsentTtlProp, err := expandHealthcareConsentStoreDefaultConsentTtl(d.Get("default_consent_ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_consent_ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultConsentTtlProp)) && (ok || !reflect.DeepEqual(v, defaultConsentTtlProp)) { + obj["defaultConsentTtl"] = defaultConsentTtlProp + } + enableConsentCreateOnUpdateProp, err := expandHealthcareConsentStoreEnableConsentCreateOnUpdate(d.Get("enable_consent_create_on_update"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_consent_create_on_update"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableConsentCreateOnUpdateProp)) && (ok || !reflect.DeepEqual(v, enableConsentCreateOnUpdateProp)) { + obj["enableConsentCreateOnUpdate"] = enableConsentCreateOnUpdateProp + } + labelsProp, err := expandHealthcareConsentStoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores?consentStoreId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ConsentStore: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ConsentStore: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/consentStores/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ConsentStore %q: %#v", d.Id(), res) + + return resourceHealthcareConsentStoreRead(d, meta) +} + +func resourceHealthcareConsentStoreRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("HealthcareConsentStore %q", d.Id())) + } + + if err := d.Set("default_consent_ttl", flattenHealthcareConsentStoreDefaultConsentTtl(res["defaultConsentTtl"], d, config)); err != nil { + return fmt.Errorf("Error reading ConsentStore: %s", err) + } + if err := d.Set("enable_consent_create_on_update", flattenHealthcareConsentStoreEnableConsentCreateOnUpdate(res["enableConsentCreateOnUpdate"], d, config)); err != nil { + return fmt.Errorf("Error reading ConsentStore: %s", err) + } + if err := d.Set("labels", flattenHealthcareConsentStoreLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ConsentStore: %s", err) + } + + return nil +} + +func resourceHealthcareConsentStoreUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + defaultConsentTtlProp, err := expandHealthcareConsentStoreDefaultConsentTtl(d.Get("default_consent_ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_consent_ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultConsentTtlProp)) { + obj["defaultConsentTtl"] = defaultConsentTtlProp + } + enableConsentCreateOnUpdateProp, err := expandHealthcareConsentStoreEnableConsentCreateOnUpdate(d.Get("enable_consent_create_on_update"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_consent_create_on_update"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableConsentCreateOnUpdateProp)) { + obj["enableConsentCreateOnUpdate"] = enableConsentCreateOnUpdateProp + } + labelsProp, err := expandHealthcareConsentStoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ConsentStore %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("default_consent_ttl") { + updateMask = append(updateMask, "defaultConsentTtl") + } + + if d.HasChange("enable_consent_create_on_update") { + updateMask = append(updateMask, "enableConsentCreateOnUpdate") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ConsentStore %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ConsentStore %q: %#v", d.Id(), res) + } + + return resourceHealthcareConsentStoreRead(d, meta) +} + +func resourceHealthcareConsentStoreDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ConsentStore %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ConsentStore") + } + + log.Printf("[DEBUG] Finished deleting ConsentStore %q: %#v", d.Id(), res) + return nil +} + +func resourceHealthcareConsentStoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/consentStores/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/consentStores/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenHealthcareConsentStoreDefaultConsentTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareConsentStoreEnableConsentCreateOnUpdate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareConsentStoreLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandHealthcareConsentStoreDefaultConsentTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareConsentStoreEnableConsentCreateOnUpdate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareConsentStoreLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dataset.go new file mode 100644 index 0000000000..59e29853c3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dataset.go @@ -0,0 +1,375 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceHealthcareDataset() *schema.Resource { + return &schema.Resource{ + Create: resourceHealthcareDatasetCreate, + Read: resourceHealthcareDatasetRead, + Update: resourceHealthcareDatasetUpdate, + Delete: resourceHealthcareDatasetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceHealthcareDatasetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the Dataset.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name for the Dataset.`, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The default timezone used by this dataset. Must be a either a valid IANA time zone name such as +"America/New_York" or empty, which defaults to UTC. This is used for parsing times in resources +(e.g., HL7 messages) where no explicit timezone is specified.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The fully qualified name of this dataset`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceHealthcareDatasetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandHealthcareDatasetName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + timeZoneProp, err := expandHealthcareDatasetTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets?datasetId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Dataset: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.HealthcareDatasetNotInitialized}, + }) + if err != nil { + return fmt.Errorf("Error creating Dataset: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/datasets/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) + + return resourceHealthcareDatasetRead(d, meta) +} + +func resourceHealthcareDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.HealthcareDatasetNotInitialized}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("HealthcareDataset %q", d.Id())) + } + + res, err = resourceHealthcareDatasetDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing HealthcareDataset because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + + if err := d.Set("name", flattenHealthcareDatasetName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("time_zone", flattenHealthcareDatasetTimeZone(res["timeZone"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + + return nil +} + +func resourceHealthcareDatasetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + timeZoneProp, err := expandHealthcareDatasetTimeZone(d.Get("time_zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { + obj["timeZone"] = timeZoneProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("time_zone") { + updateMask = append(updateMask, "timeZone") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.HealthcareDatasetNotInitialized}, + }) + + if err != nil { + return fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) + } + + return resourceHealthcareDatasetRead(d, meta) +} + +func resourceHealthcareDatasetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.HealthcareDatasetNotInitialized}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Dataset") + } + + log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) + return nil +} + +func resourceHealthcareDatasetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/datasets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/datasets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenHealthcareDatasetName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareDatasetTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandHealthcareDatasetName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareDatasetTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceHealthcareDatasetDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Take the returned long form of the name and use it as `self_link`. + // Then modify the name to be the user specified form. + // We can't just ignore_read on `name` as the linter will + // complain that the returned `res` is never used afterwards. + // Some field needs to be actually set, and we chose `name`. + if err := d.Set("self_link", res["name"].(string)); err != nil { + return nil, fmt.Errorf("Error setting self_link: %s", err) + } + res["name"] = d.Get("name").(string) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dataset_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dataset_sweeper.go new file mode 100644 index 0000000000..747127762c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dataset_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("HealthcareDataset", testSweepHealthcareDataset) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepHealthcareDataset(region string) error { + resourceName := "HealthcareDataset" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://healthcare.googleapis.com/v1/projects/{{project}}/locations/{{location}}/datasets?datasetId={{name}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["datasets"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://healthcare.googleapis.com/v1/projects/{{project}}/locations/{{location}}/datasets/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dicom_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dicom_store.go new file mode 100644 index 0000000000..74b4349a5d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_dicom_store.go @@ -0,0 +1,435 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceHealthcareDicomStore() *schema.Resource { + return &schema.Resource{ + Create: resourceHealthcareDicomStoreCreate, + Read: resourceHealthcareDicomStoreRead, + Update: resourceHealthcareDicomStoreUpdate, + Delete: resourceHealthcareDicomStoreDelete, + + Importer: &schema.ResourceImporter{ + State: resourceHealthcareDicomStoreImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the dataset addressed by this request. Must be in the format +'projects/{project}/locations/{location}/datasets/{dataset}'`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name for the DicomStore. + +** Changing this property may recreate the Dicom store (removing all data) **`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-supplied key-value pairs used to organize DICOM stores. + +Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must +conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + +Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 +bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + +No more than 64 labels can be associated with a given store. + +An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "notification_config": { + Type: schema.TypeList, + Optional: true, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. +PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. +It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message +was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a +project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given +Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, + }, + }, + }, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The fully qualified name of this dataset`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceHealthcareDicomStoreCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandHealthcareDicomStoreName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + labelsProp, err := expandHealthcareDicomStoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + notificationConfigProp, err := expandHealthcareDicomStoreNotificationConfig(d.Get("notification_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationConfigProp)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { + obj["notificationConfig"] = notificationConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores?dicomStoreId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DicomStore: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DicomStore: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/dicomStores/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DicomStore %q: %#v", d.Id(), res) + + return resourceHealthcareDicomStoreRead(d, meta) +} + +func resourceHealthcareDicomStoreRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("HealthcareDicomStore %q", d.Id())) + } + + res, err = resourceHealthcareDicomStoreDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing HealthcareDicomStore because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenHealthcareDicomStoreName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading DicomStore: %s", err) + } + if err := d.Set("labels", flattenHealthcareDicomStoreLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading DicomStore: %s", err) + } + if err := d.Set("notification_config", flattenHealthcareDicomStoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading DicomStore: %s", err) + } + + return nil +} + +func resourceHealthcareDicomStoreUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + labelsProp, err := expandHealthcareDicomStoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + notificationConfigProp, err := expandHealthcareDicomStoreNotificationConfig(d.Get("notification_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { + obj["notificationConfig"] = notificationConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DicomStore %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("notification_config") { + updateMask = append(updateMask, "notificationConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DicomStore %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DicomStore %q: %#v", d.Id(), res) + } + + return resourceHealthcareDicomStoreRead(d, meta) +} + +func resourceHealthcareDicomStoreDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DicomStore %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DicomStore") + } + + log.Printf("[DEBUG] Finished deleting DicomStore %q: %#v", d.Id(), res) + return nil +} + +func resourceHealthcareDicomStoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + dicomStoreId, err := ParseHealthcareDicomStoreId(d.Id(), config) + if err != nil { + return nil, err + } + + if err := d.Set("dataset", dicomStoreId.DatasetId.DatasetId()); err != nil { + return nil, fmt.Errorf("Error setting dataset: %s", err) + } + if err := d.Set("name", dicomStoreId.Name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenHealthcareDicomStoreName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareDicomStoreLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareDicomStoreNotificationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pubsub_topic"] = + flattenHealthcareDicomStoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) + return []interface{}{transformed} +} +func flattenHealthcareDicomStoreNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandHealthcareDicomStoreName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareDicomStoreLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandHealthcareDicomStoreNotificationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubTopic, err := expandHealthcareDicomStoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubTopic"] = transformedPubsubTopic + } + + return transformed, nil +} + +func expandHealthcareDicomStoreNotificationConfigPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceHealthcareDicomStoreDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Take the returned long form of the name and use it as `self_link`. + // Then modify the name to be the user specified form. + // We can't just ignore_read on `name` as the linter will + // complain that the returned `res` is never used afterwards. + // Some field needs to be actually set, and we chose `name`. + if err := d.Set("self_link", res["name"].(string)); err != nil { + return nil, fmt.Errorf("Error setting self_link: %s", err) + } + res["name"] = d.Get("name").(string) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_fhir_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_fhir_store.go new file mode 100644 index 0000000000..121aadb1c1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_fhir_store.go @@ -0,0 +1,885 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceHealthcareFhirStore() *schema.Resource { + return &schema.Resource{ + Create: resourceHealthcareFhirStoreCreate, + Read: resourceHealthcareFhirStoreRead, + Update: resourceHealthcareFhirStoreUpdate, + Delete: resourceHealthcareFhirStoreDelete, + + Importer: &schema.ResourceImporter{ + State: resourceHealthcareFhirStoreImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the dataset addressed by this request. Must be in the format +'projects/{project}/locations/{location}/datasets/{dataset}'`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name for the FhirStore. + +** Changing this property may recreate the FHIR store (removing all data) **`, + }, + "version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DSTU2", "STU3", "R4"}), + Description: `The FHIR specification version. Possible values: ["DSTU2", "STU3", "R4"]`, + }, + "complex_data_type_reference_parsing": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"COMPLEX_DATA_TYPE_REFERENCE_PARSING_UNSPECIFIED", "DISABLED", "ENABLED", ""}), + Description: `Enable parsing of references within complex FHIR data types such as Extensions. If this value is set to ENABLED, then features like referential integrity and Bundle reference rewriting apply to all references. If this flag has not been specified the behavior of the FHIR store will not change, references in complex data types will not be parsed. New stores will have this value set to ENABLED by default after a notification period. Warning: turning on this flag causes processing existing resources to fail if they contain references to non-existent resources. Possible values: ["COMPLEX_DATA_TYPE_REFERENCE_PARSING_UNSPECIFIED", "DISABLED", "ENABLED"]`, + }, + "disable_referential_integrity": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to disable referential integrity in this FHIR store. This field is immutable after FHIR store +creation. The default value is false, meaning that the API will enforce referential integrity and fail the +requests that will result in inconsistent state in the FHIR store. When this field is set to true, the API +will skip referential integrity check. Consequently, operations that rely on references, such as +Patient.get$everything, will not return all the results if broken references exist. + +** Changing this property may recreate the FHIR store (removing all data) **`, + }, + "disable_resource_versioning": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to disable resource versioning for this FHIR store. This field can not be changed after the creation +of FHIR store. If set to false, which is the default behavior, all write operations will cause historical +versions to be recorded automatically. The historical versions can be fetched through the history APIs, but +cannot be updated. If set to true, no historical versions will be kept. The server will send back errors for +attempts to read the historical versions. + +** Changing this property may recreate the FHIR store (removing all data) **`, + }, + "enable_history_import": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to allow the bulk import API to accept history bundles and directly insert historical resource +versions into the FHIR store. Importing resource histories creates resource interactions that appear to have +occurred in the past, which clients may not want to allow. If set to false, history bundles within an import +will fail with an error. + +** Changing this property may recreate the FHIR store (removing all data) ** + +** This property can be changed manually in the Google Cloud Healthcare admin console without recreating the FHIR store **`, + }, + "enable_update_create": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether this FHIR store has the updateCreate capability. This determines if the client can use an Update +operation to create a new resource with a client-specified ID. If false, all IDs are server-assigned through +the Create operation and attempts to Update a non-existent resource will return errors. Please treat the audit +logs with appropriate levels of care if client-specified resource IDs contain sensitive data such as patient +identifiers, those IDs will be part of the FHIR resource path recorded in Cloud audit logs and Cloud Pub/Sub +notifications.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-supplied key-value pairs used to organize FHIR stores. + +Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must +conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + +Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 +bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + +No more than 64 labels can be associated with a given store. + +An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "notification_config": { + Type: schema.TypeList, + Optional: true, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. +PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. +It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message +was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a +project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given +Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, + }, + }, + }, + }, + "stream_configs": { + Type: schema.TypeList, + Optional: true, + Description: `A list of streaming configs that configure the destinations of streaming export for every resource mutation in +this FHIR store. Each store is allowed to have up to 10 streaming configs. After a new config is added, the next +resource mutation is streamed to the new location in addition to the existing ones. When a location is removed +from the list, the server stops streaming to that location. Before adding a new config, you must add the required +bigquery.dataEditor role to your project's Cloud Healthcare Service Agent service account. Some lag (typically on +the order of dozens of seconds) is expected before the results show up in the streaming destination.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bigquery_destination": { + Type: schema.TypeList, + Required: true, + Description: `The destination BigQuery structure that contains both the dataset location and corresponding schema config. +The output is organized in one table per resource type. The server reuses the existing tables (if any) that +are named after the resource types, e.g. "Patient", "Observation". When there is no existing table for a given +resource type, the server attempts to create one. +See the [streaming config reference](https://cloud.google.com/healthcare/docs/reference/rest/v1beta1/projects.locations.datasets.fhirStores#streamconfig) for more details.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_uri": { + Type: schema.TypeString, + Required: true, + Description: `BigQuery URI to a dataset, up to 2000 characters long, in the format bq://projectId.bqDatasetId`, + }, + "schema_config": { + Type: schema.TypeList, + Required: true, + Description: `The configuration for the exported BigQuery schema.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recursive_structure_depth": { + Type: schema.TypeInt, + Required: true, + Description: `The depth for all recursive structures in the output analytics schema. For example, concept in the CodeSystem +resource is a recursive structure; when the depth is 2, the CodeSystem table will have a column called +concept.concept but not concept.concept.concept. If not specified or set to 0, the server will use the default +value 2. The maximum depth allowed is 5.`, + }, + "schema_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ANALYTICS", "ANALYTICS_V2", "LOSSLESS", ""}), + Description: `Specifies the output schema type. + * ANALYTICS: Analytics schema defined by the FHIR community. + See https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. + * ANALYTICS_V2: Analytics V2, similar to schema defined by the FHIR community, with added support for extensions with one or more occurrences and contained resources in stringified JSON. + * LOSSLESS: A data-driven schema generated from the fields present in the FHIR data being exported, with no additional simplification. Default value: "ANALYTICS" Possible values: ["ANALYTICS", "ANALYTICS_V2", "LOSSLESS"]`, + Default: "ANALYTICS", + }, + }, + }, + }, + }, + }, + }, + "resource_types": { + Type: schema.TypeList, + Optional: true, + Description: `Supply a FHIR resource type (such as "Patient" or "Observation"). See +https://www.hl7.org/fhir/valueset-resource-types.html for a list of all FHIR resource types. The server treats +an empty list as an intent to stream all the supported resource types in this FHIR store.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The fully qualified name of this dataset`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceHealthcareFhirStoreCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandHealthcareFhirStoreName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + versionProp, err := expandHealthcareFhirStoreVersion(d.Get("version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version"); !tpgresource.IsEmptyValue(reflect.ValueOf(versionProp)) && (ok || !reflect.DeepEqual(v, versionProp)) { + obj["version"] = versionProp + } + complexDataTypeReferenceParsingProp, err := expandHealthcareFhirStoreComplexDataTypeReferenceParsing(d.Get("complex_data_type_reference_parsing"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("complex_data_type_reference_parsing"); !tpgresource.IsEmptyValue(reflect.ValueOf(complexDataTypeReferenceParsingProp)) && (ok || !reflect.DeepEqual(v, complexDataTypeReferenceParsingProp)) { + obj["complexDataTypeReferenceParsing"] = complexDataTypeReferenceParsingProp + } + enableUpdateCreateProp, err := expandHealthcareFhirStoreEnableUpdateCreate(d.Get("enable_update_create"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_update_create"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableUpdateCreateProp)) && (ok || !reflect.DeepEqual(v, enableUpdateCreateProp)) { + obj["enableUpdateCreate"] = enableUpdateCreateProp + } + disableReferentialIntegrityProp, err := expandHealthcareFhirStoreDisableReferentialIntegrity(d.Get("disable_referential_integrity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disable_referential_integrity"); !tpgresource.IsEmptyValue(reflect.ValueOf(disableReferentialIntegrityProp)) && (ok || !reflect.DeepEqual(v, disableReferentialIntegrityProp)) { + obj["disableReferentialIntegrity"] = disableReferentialIntegrityProp + } + disableResourceVersioningProp, err := expandHealthcareFhirStoreDisableResourceVersioning(d.Get("disable_resource_versioning"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disable_resource_versioning"); !tpgresource.IsEmptyValue(reflect.ValueOf(disableResourceVersioningProp)) && (ok || !reflect.DeepEqual(v, disableResourceVersioningProp)) { + obj["disableResourceVersioning"] = disableResourceVersioningProp + } + enableHistoryImportProp, err := expandHealthcareFhirStoreEnableHistoryImport(d.Get("enable_history_import"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_history_import"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableHistoryImportProp)) && (ok || !reflect.DeepEqual(v, enableHistoryImportProp)) { + obj["enableHistoryImport"] = enableHistoryImportProp + } + labelsProp, err := expandHealthcareFhirStoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + notificationConfigProp, err := expandHealthcareFhirStoreNotificationConfig(d.Get("notification_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationConfigProp)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { + obj["notificationConfig"] = notificationConfigProp + } + streamConfigsProp, err := expandHealthcareFhirStoreStreamConfigs(d.Get("stream_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("stream_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(streamConfigsProp)) && (ok || !reflect.DeepEqual(v, streamConfigsProp)) { + obj["streamConfigs"] = streamConfigsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores?fhirStoreId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FhirStore: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating FhirStore: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/fhirStores/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FhirStore %q: %#v", d.Id(), res) + + return resourceHealthcareFhirStoreRead(d, meta) +} + +func resourceHealthcareFhirStoreRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("HealthcareFhirStore %q", d.Id())) + } + + res, err = resourceHealthcareFhirStoreDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing HealthcareFhirStore because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenHealthcareFhirStoreName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("version", flattenHealthcareFhirStoreVersion(res["version"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("complex_data_type_reference_parsing", flattenHealthcareFhirStoreComplexDataTypeReferenceParsing(res["complexDataTypeReferenceParsing"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("enable_update_create", flattenHealthcareFhirStoreEnableUpdateCreate(res["enableUpdateCreate"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("disable_referential_integrity", flattenHealthcareFhirStoreDisableReferentialIntegrity(res["disableReferentialIntegrity"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("disable_resource_versioning", flattenHealthcareFhirStoreDisableResourceVersioning(res["disableResourceVersioning"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("enable_history_import", flattenHealthcareFhirStoreEnableHistoryImport(res["enableHistoryImport"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("labels", flattenHealthcareFhirStoreLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("notification_config", flattenHealthcareFhirStoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + if err := d.Set("stream_configs", flattenHealthcareFhirStoreStreamConfigs(res["streamConfigs"], d, config)); err != nil { + return fmt.Errorf("Error reading FhirStore: %s", err) + } + + return nil +} + +func resourceHealthcareFhirStoreUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + complexDataTypeReferenceParsingProp, err := expandHealthcareFhirStoreComplexDataTypeReferenceParsing(d.Get("complex_data_type_reference_parsing"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("complex_data_type_reference_parsing"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, complexDataTypeReferenceParsingProp)) { + obj["complexDataTypeReferenceParsing"] = complexDataTypeReferenceParsingProp + } + enableUpdateCreateProp, err := expandHealthcareFhirStoreEnableUpdateCreate(d.Get("enable_update_create"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_update_create"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableUpdateCreateProp)) { + obj["enableUpdateCreate"] = enableUpdateCreateProp + } + labelsProp, err := expandHealthcareFhirStoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + notificationConfigProp, err := expandHealthcareFhirStoreNotificationConfig(d.Get("notification_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { + obj["notificationConfig"] = notificationConfigProp + } + streamConfigsProp, err := expandHealthcareFhirStoreStreamConfigs(d.Get("stream_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("stream_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, streamConfigsProp)) { + obj["streamConfigs"] = streamConfigsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FhirStore %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("complex_data_type_reference_parsing") { + updateMask = append(updateMask, "complexDataTypeReferenceParsing") + } + + if d.HasChange("enable_update_create") { + updateMask = append(updateMask, "enableUpdateCreate") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("notification_config") { + updateMask = append(updateMask, "notificationConfig") + } + + if d.HasChange("stream_configs") { + updateMask = append(updateMask, "streamConfigs") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating FhirStore %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FhirStore %q: %#v", d.Id(), res) + } + + return resourceHealthcareFhirStoreRead(d, meta) +} + +func resourceHealthcareFhirStoreDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting FhirStore %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FhirStore") + } + + log.Printf("[DEBUG] Finished deleting FhirStore %q: %#v", d.Id(), res) + return nil +} + +func resourceHealthcareFhirStoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + fhirStoreId, err := ParseHealthcareFhirStoreId(d.Id(), config) + if err != nil { + return nil, err + } + + if err := d.Set("dataset", fhirStoreId.DatasetId.DatasetId()); err != nil { + return nil, fmt.Errorf("Error setting dataset: %s", err) + } + if err := d.Set("name", fhirStoreId.Name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenHealthcareFhirStoreName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreComplexDataTypeReferenceParsing(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreEnableUpdateCreate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreDisableReferentialIntegrity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreDisableResourceVersioning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreEnableHistoryImport(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreNotificationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pubsub_topic"] = + flattenHealthcareFhirStoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) + return []interface{}{transformed} +} +func flattenHealthcareFhirStoreNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreStreamConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "resource_types": flattenHealthcareFhirStoreStreamConfigsResourceTypes(original["resourceTypes"], d, config), + "bigquery_destination": flattenHealthcareFhirStoreStreamConfigsBigqueryDestination(original["bigqueryDestination"], d, config), + }) + } + return transformed +} +func flattenHealthcareFhirStoreStreamConfigsResourceTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreStreamConfigsBigqueryDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_uri"] = + flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(original["datasetUri"], d, config) + transformed["schema_config"] = + flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(original["schemaConfig"], d, config) + return []interface{}{transformed} +} +func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["schema_type"] = + flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(original["schemaType"], d, config) + transformed["recursive_structure_depth"] = + flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(original["recursiveStructureDepth"], d, config) + return []interface{}{transformed} +} +func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandHealthcareFhirStoreName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreComplexDataTypeReferenceParsing(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreEnableUpdateCreate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreDisableReferentialIntegrity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreDisableResourceVersioning(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreEnableHistoryImport(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandHealthcareFhirStoreNotificationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubTopic, err := expandHealthcareFhirStoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubTopic"] = transformedPubsubTopic + } + + return transformed, nil +} + +func expandHealthcareFhirStoreNotificationConfigPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreStreamConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResourceTypes, err := expandHealthcareFhirStoreStreamConfigsResourceTypes(original["resource_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceTypes"] = transformedResourceTypes + } + + transformedBigqueryDestination, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestination(original["bigquery_destination"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBigqueryDestination); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bigqueryDestination"] = transformedBigqueryDestination + } + + req = append(req, transformed) + } + return req, nil +} + +func expandHealthcareFhirStoreStreamConfigsResourceTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreStreamConfigsBigqueryDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatasetUri, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(original["dataset_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["datasetUri"] = transformedDatasetUri + } + + transformedSchemaConfig, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(original["schema_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchemaConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schemaConfig"] = transformedSchemaConfig + } + + return transformed, nil +} + +func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchemaType, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(original["schema_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchemaType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schemaType"] = transformedSchemaType + } + + transformedRecursiveStructureDepth, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(original["recursive_structure_depth"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecursiveStructureDepth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recursiveStructureDepth"] = transformedRecursiveStructureDepth + } + + return transformed, nil +} + +func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceHealthcareFhirStoreDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Take the returned long form of the name and use it as `self_link`. + // Then modify the name to be the user specified form. + // We can't just ignore_read on `name` as the linter will + // complain that the returned `res` is never used afterwards. + // Some field needs to be actually set, and we chose `name`. + if err := d.Set("self_link", res["name"].(string)); err != nil { + return nil, fmt.Errorf("Error setting self_link: %s", err) + } + res["name"] = d.Get("name").(string) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_hl7_v2_store.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_hl7_v2_store.go new file mode 100644 index 0000000000..a416236634 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/healthcare/resource_healthcare_hl7_v2_store.go @@ -0,0 +1,731 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package healthcare + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceHealthcareHl7V2Store() *schema.Resource { + return &schema.Resource{ + Create: resourceHealthcareHl7V2StoreCreate, + Read: resourceHealthcareHl7V2StoreRead, + Update: resourceHealthcareHl7V2StoreUpdate, + Delete: resourceHealthcareHl7V2StoreDelete, + + Importer: &schema.ResourceImporter{ + State: resourceHealthcareHl7V2StoreImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dataset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Identifies the dataset addressed by this request. Must be in the format +'projects/{project}/locations/{location}/datasets/{dataset}'`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name for the Hl7V2Store. + +** Changing this property may recreate the Hl7v2 store (removing all data) **`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-supplied key-value pairs used to organize HL7v2 stores. + +Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must +conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + +Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 +bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + +No more than 64 labels can be associated with a given store. + +An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "notification_config": { + Type: schema.TypeList, + Optional: true, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. +PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. +It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message +was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a +project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given +Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, + }, + }, + }, + }, + "notification_configs": { + Type: schema.TypeList, + Optional: true, + Description: `A list of notification configs. Each configuration uses a filter to determine whether to publish a +message (both Ingest & Create) on the corresponding notification destination. Only the message name +is sent as part of the notification. Supplied by the client.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. +PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. +It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message +was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a +project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given +Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. + +If a notification cannot be published to Cloud Pub/Sub, errors will be logged to Stackdriver`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `Restricts notifications sent for messages matching a filter. If this is empty, all messages +are matched. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings + +Fields/functions available for filtering are: + +* messageType, from the MSH-9.1 field. For example, NOT messageType = "ADT". +* send_date or sendDate, the YYYY-MM-DD date the message was sent in the dataset's timeZone, from the MSH-7 segment. For example, send_date < "2017-01-02". +* sendTime, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, sendTime < "2017-01-02T00:00:00-05:00". +* sendFacility, the care center that the message came from, from the MSH-4 segment. For example, sendFacility = "ABC". +* PatientId(value, type), which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, PatientId("123456", "MRN"). +* labels.x, a string value of the label with key x as set using the Message.labels map. For example, labels."priority"="high". The operator :* can be used to assert the existence of a label. For example, labels."priority":*.`, + }, + }, + }, + }, + "parser_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_null_header": { + Type: schema.TypeBool, + Optional: true, + Description: `Determines whether messages with no header are allowed.`, + AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema"}, + }, + "schema": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `JSON encoded string for schemas used to parse messages in this +store if schematized parsing is desired.`, + AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema", "parser_config.0.version"}, + }, + "segment_terminator": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateBase64String, + Description: `Byte(s) to be used as the segment terminator. If this is unset, '\r' will be used as segment terminator. + +A base64-encoded string.`, + AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema"}, + }, + "version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"V1", "V2", "V3", ""}), + Description: `The version of the unschematized parser to be used when a custom 'schema' is not set. Default value: "V1" Possible values: ["V1", "V2", "V3"]`, + Default: "V1", + }, + }, + }, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The fully qualified name of this dataset`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceHealthcareHl7V2StoreCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandHealthcareHl7V2StoreName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + parserConfigProp, err := expandHealthcareHl7V2StoreParserConfig(d.Get("parser_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parser_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(parserConfigProp)) && (ok || !reflect.DeepEqual(v, parserConfigProp)) { + obj["parserConfig"] = parserConfigProp + } + labelsProp, err := expandHealthcareHl7V2StoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + notificationConfigsProp, err := expandHealthcareHl7V2StoreNotificationConfigs(d.Get("notification_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationConfigsProp)) && (ok || !reflect.DeepEqual(v, notificationConfigsProp)) { + obj["notificationConfigs"] = notificationConfigsProp + } + notificationConfigProp, err := expandHealthcareHl7V2StoreNotificationConfig(d.Get("notification_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationConfigProp)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { + obj["notificationConfig"] = notificationConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores?hl7V2StoreId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Hl7V2Store: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Hl7V2Store: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{dataset}}/hl7V2Stores/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Hl7V2Store %q: %#v", d.Id(), res) + + return resourceHealthcareHl7V2StoreRead(d, meta) +} + +func resourceHealthcareHl7V2StoreRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("HealthcareHl7V2Store %q", d.Id())) + } + + res, err = resourceHealthcareHl7V2StoreDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing HealthcareHl7V2Store because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenHealthcareHl7V2StoreName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Hl7V2Store: %s", err) + } + if err := d.Set("parser_config", flattenHealthcareHl7V2StoreParserConfig(res["parserConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Hl7V2Store: %s", err) + } + if err := d.Set("labels", flattenHealthcareHl7V2StoreLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Hl7V2Store: %s", err) + } + if err := d.Set("notification_configs", flattenHealthcareHl7V2StoreNotificationConfigs(res["notificationConfigs"], d, config)); err != nil { + return fmt.Errorf("Error reading Hl7V2Store: %s", err) + } + if err := d.Set("notification_config", flattenHealthcareHl7V2StoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Hl7V2Store: %s", err) + } + + return nil +} + +func resourceHealthcareHl7V2StoreUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + parserConfigProp, err := expandHealthcareHl7V2StoreParserConfig(d.Get("parser_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parser_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parserConfigProp)) { + obj["parserConfig"] = parserConfigProp + } + labelsProp, err := expandHealthcareHl7V2StoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + notificationConfigsProp, err := expandHealthcareHl7V2StoreNotificationConfigs(d.Get("notification_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationConfigsProp)) { + obj["notificationConfigs"] = notificationConfigsProp + } + notificationConfigProp, err := expandHealthcareHl7V2StoreNotificationConfig(d.Get("notification_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationConfigProp)) { + obj["notificationConfig"] = notificationConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Hl7V2Store %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("parser_config") { + updateMask = append(updateMask, "parser_config.allow_null_header", + "parser_config.segment_terminator", + "parser_config.schema") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("notification_configs") { + updateMask = append(updateMask, "notificationConfigs") + } + + if d.HasChange("notification_config") { + updateMask = append(updateMask, "notificationConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Hl7V2Store %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Hl7V2Store %q: %#v", d.Id(), res) + } + + return resourceHealthcareHl7V2StoreRead(d, meta) +} + +func resourceHealthcareHl7V2StoreDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Hl7V2Store %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Hl7V2Store") + } + + log.Printf("[DEBUG] Finished deleting Hl7V2Store %q: %#v", d.Id(), res) + return nil +} + +func resourceHealthcareHl7V2StoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + hl7v2StoreId, err := ParseHealthcareHl7V2StoreId(d.Id(), config) + if err != nil { + return nil, err + } + + if err := d.Set("dataset", hl7v2StoreId.DatasetId.DatasetId()); err != nil { + return nil, fmt.Errorf("Error setting dataset: %s", err) + } + if err := d.Set("name", hl7v2StoreId.Name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenHealthcareHl7V2StoreName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareHl7V2StoreParserConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["allow_null_header"] = + flattenHealthcareHl7V2StoreParserConfigAllowNullHeader(original["allowNullHeader"], d, config) + transformed["segment_terminator"] = + flattenHealthcareHl7V2StoreParserConfigSegmentTerminator(original["segmentTerminator"], d, config) + transformed["schema"] = + flattenHealthcareHl7V2StoreParserConfigSchema(original["schema"], d, config) + transformed["version"] = + flattenHealthcareHl7V2StoreParserConfigVersion(original["version"], d, config) + return []interface{}{transformed} +} +func flattenHealthcareHl7V2StoreParserConfigAllowNullHeader(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareHl7V2StoreParserConfigSegmentTerminator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareHl7V2StoreParserConfigSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + b, err := json.Marshal(v) + if err != nil { + // TODO: return error once https://github.com/GoogleCloudPlatform/magic-modules/issues/3257 is fixed. + log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) + } + return string(b) +} + +func flattenHealthcareHl7V2StoreParserConfigVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareHl7V2StoreLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareHl7V2StoreNotificationConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "pubsub_topic": flattenHealthcareHl7V2StoreNotificationConfigsPubsubTopic(original["pubsubTopic"], d, config), + "filter": flattenHealthcareHl7V2StoreNotificationConfigsFilter(original["filter"], d, config), + }) + } + return transformed +} +func flattenHealthcareHl7V2StoreNotificationConfigsPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareHl7V2StoreNotificationConfigsFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenHealthcareHl7V2StoreNotificationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pubsub_topic"] = + flattenHealthcareHl7V2StoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) + return []interface{}{transformed} +} +func flattenHealthcareHl7V2StoreNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandHealthcareHl7V2StoreName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareHl7V2StoreParserConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowNullHeader, err := expandHealthcareHl7V2StoreParserConfigAllowNullHeader(original["allow_null_header"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowNullHeader); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowNullHeader"] = transformedAllowNullHeader + } + + transformedSegmentTerminator, err := expandHealthcareHl7V2StoreParserConfigSegmentTerminator(original["segment_terminator"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSegmentTerminator); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["segmentTerminator"] = transformedSegmentTerminator + } + + transformedSchema, err := expandHealthcareHl7V2StoreParserConfigSchema(original["schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schema"] = transformedSchema + } + + transformedVersion, err := expandHealthcareHl7V2StoreParserConfigVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + + return transformed, nil +} + +func expandHealthcareHl7V2StoreParserConfigAllowNullHeader(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareHl7V2StoreParserConfigSegmentTerminator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareHl7V2StoreParserConfigSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + b := []byte(v.(string)) + if len(b) == 0 { + return nil, nil + } + m := make(map[string]interface{}) + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + return m, nil +} + +func expandHealthcareHl7V2StoreParserConfigVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareHl7V2StoreLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandHealthcareHl7V2StoreNotificationConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubTopic, err := expandHealthcareHl7V2StoreNotificationConfigsPubsubTopic(original["pubsub_topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubTopic"] = transformedPubsubTopic + } + + transformedFilter, err := expandHealthcareHl7V2StoreNotificationConfigsFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + req = append(req, transformed) + } + return req, nil +} + +func expandHealthcareHl7V2StoreNotificationConfigsPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareHl7V2StoreNotificationConfigsFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandHealthcareHl7V2StoreNotificationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPubsubTopic, err := expandHealthcareHl7V2StoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pubsubTopic"] = transformedPubsubTopic + } + + return transformed, nil +} + +func expandHealthcareHl7V2StoreNotificationConfigPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceHealthcareHl7V2StoreDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Take the returned long form of the name and use it as `self_link`. + // Then modify the name to be the user specified form. + // We can't just ignore_read on `name` as the linter will + // complain that the returned `res` is never used afterwards. + // Some field needs to be actually set, and we chose `name`. + if err := d.Set("self_link", res["name"].(string)); err != nil { + return nil, fmt.Errorf("Error setting self_link: %s", err) + } + res["name"] = d.Get("name").(string) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iam2/iam2_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iam2/iam2_operation.go new file mode 100644 index 0000000000..385b72651c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iam2/iam2_operation.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iam2 + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type IAM2OperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *IAM2OperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.IAM2BasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createIAM2Waiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*IAM2OperationWaiter, error) { + w := &IAM2OperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func IAM2OperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createIAM2Waiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iam2/resource_iam_access_boundary_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iam2/resource_iam_access_boundary_policy.go new file mode 100644 index 0000000000..8d449a6940 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iam2/resource_iam_access_boundary_policy.go @@ -0,0 +1,625 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iam2 + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIAM2AccessBoundaryPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIAM2AccessBoundaryPolicyCreate, + Read: resourceIAM2AccessBoundaryPolicyRead, + Update: resourceIAM2AccessBoundaryPolicyUpdate, + Delete: resourceIAM2AccessBoundaryPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIAM2AccessBoundaryPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the policy.`, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The attachment point is identified by its URL-encoded full resource name.`, + }, + "rules": { + Type: schema.TypeList, + Required: true, + Description: `Rules to be applied.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_boundary_rule": { + Type: schema.TypeList, + Optional: true, + Description: `An access boundary rule in an IAM policy.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "availability_condition": { + Type: schema.TypeList, + Optional: true, + Description: `The availability condition further constrains the access allowed by the access boundary rule.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the expression. This is a longer text which describes the expression, +e.g. when hovered over it in a UI.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `String indicating the location of the expression for error reporting, +e.g. a file name and a position in the file.`, + }, + "title": { + Type: schema.TypeString, + Optional: true, + Description: `Title for the expression, i.e. a short string describing its purpose. +This can be used e.g. in UIs which allow to enter the expression.`, + }, + }, + }, + }, + "available_permissions": { + Type: schema.TypeList, + Optional: true, + Description: `A list of permissions that may be allowed for use on the specified resource.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "available_resource": { + Type: schema.TypeString, + Optional: true, + Description: `The full resource name of a Google Cloud resource entity.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The description of the rule.`, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `The display name of the rule.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `The hash of the resource. Used internally during updates.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIAM2AccessBoundaryPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAM2AccessBoundaryPolicyDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + etagProp, err := expandIAM2AccessBoundaryPolicyEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + rulesProp, err := expandIAM2AccessBoundaryPolicyRules(d.Get("rules"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(rulesProp)) && (ok || !reflect.DeepEqual(v, rulesProp)) { + obj["rules"] = rulesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAM2BasePath}}policies/{{parent}}/accessboundarypolicies?policyId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AccessBoundaryPolicy: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AccessBoundaryPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = IAM2OperationWaitTime( + config, res, "Creating AccessBoundaryPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create AccessBoundaryPolicy: %s", err) + } + + log.Printf("[DEBUG] Finished creating AccessBoundaryPolicy %q: %#v", d.Id(), res) + + return resourceIAM2AccessBoundaryPolicyRead(d, meta) +} + +func resourceIAM2AccessBoundaryPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAM2BasePath}}policies/{{parent}}/accessboundarypolicies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IAM2AccessBoundaryPolicy %q", d.Id())) + } + + if err := d.Set("display_name", flattenIAM2AccessBoundaryPolicyDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessBoundaryPolicy: %s", err) + } + if err := d.Set("etag", flattenIAM2AccessBoundaryPolicyEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessBoundaryPolicy: %s", err) + } + if err := d.Set("rules", flattenIAM2AccessBoundaryPolicyRules(res["rules"], d, config)); err != nil { + return fmt.Errorf("Error reading AccessBoundaryPolicy: %s", err) + } + + return nil +} + +func resourceIAM2AccessBoundaryPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAM2AccessBoundaryPolicyDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + etagProp, err := expandIAM2AccessBoundaryPolicyEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + rulesProp, err := expandIAM2AccessBoundaryPolicyRules(d.Get("rules"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rules"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rulesProp)) { + obj["rules"] = rulesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAM2BasePath}}policies/{{parent}}/accessboundarypolicies/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AccessBoundaryPolicy %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AccessBoundaryPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AccessBoundaryPolicy %q: %#v", d.Id(), res) + } + + err = IAM2OperationWaitTime( + config, res, "Updating AccessBoundaryPolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceIAM2AccessBoundaryPolicyRead(d, meta) +} + +func resourceIAM2AccessBoundaryPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{IAM2BasePath}}policies/{{parent}}/accessboundarypolicies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AccessBoundaryPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AccessBoundaryPolicy") + } + + err = IAM2OperationWaitTime( + config, res, "Deleting AccessBoundaryPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AccessBoundaryPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceIAM2AccessBoundaryPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIAM2AccessBoundaryPolicyDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAM2AccessBoundaryPolicyEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAM2AccessBoundaryPolicyRules(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "description": flattenIAM2AccessBoundaryPolicyRulesDescription(original["description"], d, config), + "access_boundary_rule": flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRule(original["accessBoundaryRule"], d, config), + }) + } + return transformed +} +func flattenIAM2AccessBoundaryPolicyRulesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["available_resource"] = + flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailableResource(original["availableResource"], d, config) + transformed["available_permissions"] = + flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailablePermissions(original["availablePermissions"], d, config) + transformed["availability_condition"] = + flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityCondition(original["availabilityCondition"], d, config) + return []interface{}{transformed} +} +func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailableResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailablePermissions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expression"] = + flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionExpression(original["expression"], d, config) + transformed["title"] = + flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionTitle(original["title"], d, config) + transformed["description"] = + flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionDescription(original["description"], d, config) + transformed["location"] = + flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIAM2AccessBoundaryPolicyDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAM2AccessBoundaryPolicyEtag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAM2AccessBoundaryPolicyRules(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDescription, err := expandIAM2AccessBoundaryPolicyRulesDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedAccessBoundaryRule, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRule(original["access_boundary_rule"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccessBoundaryRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accessBoundaryRule"] = transformedAccessBoundaryRule + } + + req = append(req, transformed) + } + return req, nil +} + +func expandIAM2AccessBoundaryPolicyRulesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAvailableResource, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailableResource(original["available_resource"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAvailableResource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["availableResource"] = transformedAvailableResource + } + + transformedAvailablePermissions, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailablePermissions(original["available_permissions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAvailablePermissions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["availablePermissions"] = transformedAvailablePermissions + } + + transformedAvailabilityCondition, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityCondition(original["availability_condition"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAvailabilityCondition); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["availabilityCondition"] = transformedAvailabilityCondition + } + + return transformed, nil +} + +func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailableResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailablePermissions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpression, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedTitle, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionTitle(original["title"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["title"] = transformedTitle + } + + transformedDescription, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedLocation, err := expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAM2AccessBoundaryPolicyRulesAccessBoundaryRuleAvailabilityConditionLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/data_source_iam_beta_workload_identity_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/data_source_iam_beta_workload_identity_pool.go new file mode 100644 index 0000000000..51ca8dc367 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/data_source_iam_beta_workload_identity_pool.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package iambeta diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/data_source_iam_beta_workload_identity_pool_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/data_source_iam_beta_workload_identity_pool_provider.go new file mode 100644 index 0000000000..51ca8dc367 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/data_source_iam_beta_workload_identity_pool_provider.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package iambeta diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/iam_beta_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/iam_beta_operation.go new file mode 100644 index 0000000000..8932b07347 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/iam_beta_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iambeta + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type IAMBetaOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *IAMBetaOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.IAMBetaBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createIAMBetaWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*IAMBetaOperationWaiter, error) { + w := &IAMBetaOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func IAMBetaOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createIAMBetaWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool.go new file mode 100644 index 0000000000..49cc038922 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool.go @@ -0,0 +1,492 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iambeta + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +const workloadIdentityPoolIdRegexp = `^[0-9a-z-]+$` + +func ValidateWorkloadIdentityPoolId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if strings.HasPrefix(value, "gcp-") { + errors = append(errors, fmt.Errorf( + "%q (%q) can not start with \"gcp-\"", k, value)) + } + + if !regexp.MustCompile(workloadIdentityPoolIdRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain only lowercase letters (a-z), numbers (0-9), or dashes (-)", k)) + } + + if len(value) < 4 { + errors = append(errors, fmt.Errorf( + "%q cannot be smaller than 4 characters", k)) + } + + if len(value) > 32 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 32 characters", k)) + } + + return +} + +func ResourceIAMBetaWorkloadIdentityPool() *schema.Resource { + return &schema.Resource{ + Create: resourceIAMBetaWorkloadIdentityPoolCreate, + Read: resourceIAMBetaWorkloadIdentityPoolRead, + Update: resourceIAMBetaWorkloadIdentityPoolUpdate, + Delete: resourceIAMBetaWorkloadIdentityPoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIAMBetaWorkloadIdentityPoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "workload_identity_pool_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateWorkloadIdentityPoolId, + Description: `The ID to use for the pool, which becomes the final component of the resource name. This +value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix +'gcp-' is reserved for use by Google, and may not be specified.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the pool. Cannot exceed 256 characters.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the pool is disabled. You cannot use a disabled pool to exchange tokens, or use +existing tokens to access resources. If the pool is re-enabled, existing tokens grant +access again.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `A display name for the pool. Cannot exceed 32 characters.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the pool as +'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}'.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the pool. +* STATE_UNSPECIFIED: State unspecified. +* ACTIVE: The pool is active, and may be used in Google Cloud policies. +* DELETED: The pool is soft-deleted. Soft-deleted pools are permanently deleted after + approximately 30 days. You can restore a soft-deleted pool using + UndeleteWorkloadIdentityPool. You cannot reuse the ID of a soft-deleted pool until it is + permanently deleted. While a pool is deleted, you cannot use it to exchange tokens, or + use existing tokens to access resources. If the pool is undeleted, existing tokens grant + access again.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIAMBetaWorkloadIdentityPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAMBetaWorkloadIdentityPoolDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandIAMBetaWorkloadIdentityPoolDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + disabledProp, err := expandIAMBetaWorkloadIdentityPoolDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools?workloadIdentityPoolId={{workload_identity_pool_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new WorkloadIdentityPool: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for WorkloadIdentityPool: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating WorkloadIdentityPool: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = IAMBetaOperationWaitTime( + config, res, project, "Creating WorkloadIdentityPool", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create WorkloadIdentityPool: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkloadIdentityPool %q: %#v", d.Id(), res) + + return resourceIAMBetaWorkloadIdentityPoolRead(d, meta) +} + +func resourceIAMBetaWorkloadIdentityPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for WorkloadIdentityPool: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IAMBetaWorkloadIdentityPool %q", d.Id())) + } + + res, err = resourceIAMBetaWorkloadIdentityPoolDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing IAMBetaWorkloadIdentityPool because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) + } + + if err := d.Set("state", flattenIAMBetaWorkloadIdentityPoolState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) + } + if err := d.Set("display_name", flattenIAMBetaWorkloadIdentityPoolDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) + } + if err := d.Set("description", flattenIAMBetaWorkloadIdentityPoolDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) + } + if err := d.Set("name", flattenIAMBetaWorkloadIdentityPoolName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) + } + if err := d.Set("disabled", flattenIAMBetaWorkloadIdentityPoolDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPool: %s", err) + } + + return nil +} + +func resourceIAMBetaWorkloadIdentityPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for WorkloadIdentityPool: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAMBetaWorkloadIdentityPoolDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandIAMBetaWorkloadIdentityPoolDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + disabledProp, err := expandIAMBetaWorkloadIdentityPoolDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating WorkloadIdentityPool %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("disabled") { + updateMask = append(updateMask, "disabled") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating WorkloadIdentityPool %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating WorkloadIdentityPool %q: %#v", d.Id(), res) + } + + err = IAMBetaOperationWaitTime( + config, res, project, "Updating WorkloadIdentityPool", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceIAMBetaWorkloadIdentityPoolRead(d, meta) +} + +func resourceIAMBetaWorkloadIdentityPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for WorkloadIdentityPool: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting WorkloadIdentityPool %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "WorkloadIdentityPool") + } + + err = IAMBetaOperationWaitTime( + config, res, project, "Deleting WorkloadIdentityPool", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting WorkloadIdentityPool %q: %#v", d.Id(), res) + return nil +} + +func resourceIAMBetaWorkloadIdentityPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/workloadIdentityPools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIAMBetaWorkloadIdentityPoolState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIAMBetaWorkloadIdentityPoolDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceIAMBetaWorkloadIdentityPoolDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v := res["state"]; v == "DELETED" { + return nil, nil + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider.go new file mode 100644 index 0000000000..5a2487aef1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider.go @@ -0,0 +1,878 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iambeta + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +const workloadIdentityPoolProviderIdRegexp = `^[0-9a-z-]+$` + +func ValidateWorkloadIdentityPoolProviderId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if strings.HasPrefix(value, "gcp-") { + errors = append(errors, fmt.Errorf( + "%q (%q) can not start with \"gcp-\"", k, value)) + } + + if !regexp.MustCompile(workloadIdentityPoolProviderIdRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain only lowercase letters (a-z), numbers (0-9), or dashes (-)", k)) + } + + if len(value) < 4 { + errors = append(errors, fmt.Errorf( + "%q cannot be smaller than 4 characters", k)) + } + + if len(value) > 32 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 32 characters", k)) + } + + return +} + +func ResourceIAMBetaWorkloadIdentityPoolProvider() *schema.Resource { + return &schema.Resource{ + Create: resourceIAMBetaWorkloadIdentityPoolProviderCreate, + Read: resourceIAMBetaWorkloadIdentityPoolProviderRead, + Update: resourceIAMBetaWorkloadIdentityPoolProviderUpdate, + Delete: resourceIAMBetaWorkloadIdentityPoolProviderDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIAMBetaWorkloadIdentityPoolProviderImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "workload_identity_pool_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID used for the pool, which is the final component of the pool resource name. This +value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix +'gcp-' is reserved for use by Google, and may not be specified.`, + }, + "workload_identity_pool_provider_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateWorkloadIdentityPoolProviderId, + Description: `The ID for the provider, which becomes the final component of the resource name. This +value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix +'gcp-' is reserved for use by Google, and may not be specified.`, + }, + "attribute_condition": { + Type: schema.TypeString, + Optional: true, + Description: `[A Common Expression Language](https://opensource.google/projects/cel) expression, in +plain text, to restrict what otherwise valid authentication credentials issued by the +provider should not be accepted. + +The expression must output a boolean representing whether to allow the federation. + +The following keywords may be referenced in the expressions: + * 'assertion': JSON representing the authentication credential issued by the provider. + * 'google': The Google attributes mapped from the assertion in the 'attribute_mappings'. + * 'attribute': The custom attributes mapped from the assertion in the 'attribute_mappings'. + +The maximum length of the attribute condition expression is 4096 characters. If +unspecified, all valid authentication credential are accepted. + +The following example shows how to only allow credentials with a mapped 'google.groups' +value of 'admins': +''' +"'admins' in google.groups" +'''`, + }, + "attribute_mapping": { + Type: schema.TypeMap, + Optional: true, + Description: `Maps attributes from authentication credentials issued by an external identity provider +to Google Cloud attributes, such as 'subject' and 'segment'. + +Each key must be a string specifying the Google Cloud IAM attribute to map to. + +The following keys are supported: + * 'google.subject': The principal IAM is authenticating. You can reference this value + in IAM bindings. This is also the subject that appears in Cloud Logging logs. + Cannot exceed 127 characters. + * 'google.groups': Groups the external identity belongs to. You can grant groups + access to resources using an IAM 'principalSet' binding; access applies to all + members of the group. + +You can also provide custom attributes by specifying 'attribute.{custom_attribute}', +where '{custom_attribute}' is the name of the custom attribute to be mapped. You can +define a maximum of 50 custom attributes. The maximum length of a mapped attribute key +is 100 characters, and the key may only contain the characters [a-z0-9_]. + +You can reference these attributes in IAM policies to define fine-grained access for a +workload to Google Cloud resources. For example: + * 'google.subject': + 'principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}' + * 'google.groups': + 'principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}' + * 'attribute.{custom_attribute}': + 'principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}' + +Each value must be a [Common Expression Language](https://opensource.google/projects/cel) +function that maps an identity provider credential to the normalized attribute specified +by the corresponding map key. + +You can use the 'assertion' keyword in the expression to access a JSON representation of +the authentication credential issued by the provider. + +The maximum length of an attribute mapping expression is 2048 characters. When evaluated, +the total size of all mapped attributes must not exceed 8KB. + +For AWS providers, the following rules apply: + - If no attribute mapping is defined, the following default mapping applies: + ''' + { + "google.subject":"assertion.arn", + "attribute.aws_role": + "assertion.arn.contains('assumed-role')" + " ? assertion.arn.extract('{account_arn}assumed-role/')" + " + 'assumed-role/'" + " + assertion.arn.extract('assumed-role/{role_name}/')" + " : assertion.arn", + } + ''' + - If any custom attribute mappings are defined, they must include a mapping to the + 'google.subject' attribute. + +For OIDC providers, the following rules apply: + - Custom attribute mappings must be defined, and must include a mapping to the + 'google.subject' attribute. For example, the following maps the 'sub' claim of the + incoming credential to the 'subject' attribute on a Google token. + ''' + {"google.subject": "assertion.sub"} + '''`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "aws": { + Type: schema.TypeList, + Optional: true, + Description: `An Amazon Web Services identity provider. Not compatible with the property oidc.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Required: true, + Description: `The AWS account ID.`, + }, + }, + }, + ExactlyOneOf: []string{"aws", "oidc"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description for the provider. Cannot exceed 256 characters.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. +However, existing tokens still grant access.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `A display name for the provider. Cannot exceed 32 characters.`, + }, + "oidc": { + Type: schema.TypeList, + Optional: true, + Description: `An OpenId Connect 1.0 identity provider. Not compatible with the property aws.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issuer_uri": { + Type: schema.TypeString, + Required: true, + Description: `The OIDC issuer URL.`, + }, + "allowed_audiences": { + Type: schema.TypeList, + Optional: true, + Description: `Acceptable values for the 'aud' field (audience) in the OIDC token. Token exchange +requests are rejected if the token audience does not match one of the configured +values. Each audience may be at most 256 characters. A maximum of 10 audiences may +be configured. + +If this list is empty, the OIDC token audience must be equal to the full canonical +resource name of the WorkloadIdentityPoolProvider, with or without the HTTPS prefix. +For example: +''' +//iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ +https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ +'''`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "jwks_json": { + Type: schema.TypeString, + Optional: true, + Description: `OIDC JWKs in JSON String format. For details on definition of a +JWK, see https:tools.ietf.org/html/rfc7517. If not set, then we +use the 'jwks_uri' from the discovery document fetched from the +.well-known path for the 'issuer_uri'. Currently, RSA and EC asymmetric +keys are supported. The JWK must use following format and include only +the following fields: +''' +{ + "keys": [ + { + "kty": "RSA/EC", + "alg": "", + "use": "sig", + "kid": "", + "n": "", + "e": "", + "x": "", + "y": "", + "crv": "" + } + ] +} +'''`, + }, + }, + }, + ExactlyOneOf: []string{"aws", "oidc"}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the provider as +'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the provider. +* STATE_UNSPECIFIED: State unspecified. +* ACTIVE: The provider is active, and may be used to validate authentication credentials. +* DELETED: The provider is soft-deleted. Soft-deleted providers are permanently deleted + after approximately 30 days. You can restore a soft-deleted provider using + UndeleteWorkloadIdentityPoolProvider. You cannot reuse the ID of a soft-deleted provider + until it is permanently deleted.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIAMBetaWorkloadIdentityPoolProviderCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAMBetaWorkloadIdentityPoolProviderDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandIAMBetaWorkloadIdentityPoolProviderDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + disabledProp, err := expandIAMBetaWorkloadIdentityPoolProviderDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + attributeMappingProp, err := expandIAMBetaWorkloadIdentityPoolProviderAttributeMapping(d.Get("attribute_mapping"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attribute_mapping"); !tpgresource.IsEmptyValue(reflect.ValueOf(attributeMappingProp)) && (ok || !reflect.DeepEqual(v, attributeMappingProp)) { + obj["attributeMapping"] = attributeMappingProp + } + attributeConditionProp, err := expandIAMBetaWorkloadIdentityPoolProviderAttributeCondition(d.Get("attribute_condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attribute_condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(attributeConditionProp)) && (ok || !reflect.DeepEqual(v, attributeConditionProp)) { + obj["attributeCondition"] = attributeConditionProp + } + awsProp, err := expandIAMBetaWorkloadIdentityPoolProviderAws(d.Get("aws"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("aws"); !tpgresource.IsEmptyValue(reflect.ValueOf(awsProp)) && (ok || !reflect.DeepEqual(v, awsProp)) { + obj["aws"] = awsProp + } + oidcProp, err := expandIAMBetaWorkloadIdentityPoolProviderOidc(d.Get("oidc"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("oidc"); !tpgresource.IsEmptyValue(reflect.ValueOf(oidcProp)) && (ok || !reflect.DeepEqual(v, oidcProp)) { + obj["oidc"] = oidcProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers?workloadIdentityPoolProviderId={{workload_identity_pool_provider_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new WorkloadIdentityPoolProvider: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for WorkloadIdentityPoolProvider: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating WorkloadIdentityPoolProvider: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = IAMBetaOperationWaitTime( + config, res, project, "Creating WorkloadIdentityPoolProvider", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create WorkloadIdentityPoolProvider: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkloadIdentityPoolProvider %q: %#v", d.Id(), res) + + return resourceIAMBetaWorkloadIdentityPoolProviderRead(d, meta) +} + +func resourceIAMBetaWorkloadIdentityPoolProviderRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for WorkloadIdentityPoolProvider: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IAMBetaWorkloadIdentityPoolProvider %q", d.Id())) + } + + res, err = resourceIAMBetaWorkloadIdentityPoolProviderDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing IAMBetaWorkloadIdentityPoolProvider because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + + if err := d.Set("state", flattenIAMBetaWorkloadIdentityPoolProviderState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + if err := d.Set("display_name", flattenIAMBetaWorkloadIdentityPoolProviderDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + if err := d.Set("description", flattenIAMBetaWorkloadIdentityPoolProviderDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + if err := d.Set("name", flattenIAMBetaWorkloadIdentityPoolProviderName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + if err := d.Set("disabled", flattenIAMBetaWorkloadIdentityPoolProviderDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + if err := d.Set("attribute_mapping", flattenIAMBetaWorkloadIdentityPoolProviderAttributeMapping(res["attributeMapping"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + if err := d.Set("attribute_condition", flattenIAMBetaWorkloadIdentityPoolProviderAttributeCondition(res["attributeCondition"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + if err := d.Set("aws", flattenIAMBetaWorkloadIdentityPoolProviderAws(res["aws"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + if err := d.Set("oidc", flattenIAMBetaWorkloadIdentityPoolProviderOidc(res["oidc"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkloadIdentityPoolProvider: %s", err) + } + + return nil +} + +func resourceIAMBetaWorkloadIdentityPoolProviderUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for WorkloadIdentityPoolProvider: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAMBetaWorkloadIdentityPoolProviderDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandIAMBetaWorkloadIdentityPoolProviderDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + disabledProp, err := expandIAMBetaWorkloadIdentityPoolProviderDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + attributeMappingProp, err := expandIAMBetaWorkloadIdentityPoolProviderAttributeMapping(d.Get("attribute_mapping"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attribute_mapping"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributeMappingProp)) { + obj["attributeMapping"] = attributeMappingProp + } + attributeConditionProp, err := expandIAMBetaWorkloadIdentityPoolProviderAttributeCondition(d.Get("attribute_condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attribute_condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributeConditionProp)) { + obj["attributeCondition"] = attributeConditionProp + } + awsProp, err := expandIAMBetaWorkloadIdentityPoolProviderAws(d.Get("aws"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("aws"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, awsProp)) { + obj["aws"] = awsProp + } + oidcProp, err := expandIAMBetaWorkloadIdentityPoolProviderOidc(d.Get("oidc"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("oidc"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oidcProp)) { + obj["oidc"] = oidcProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating WorkloadIdentityPoolProvider %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("disabled") { + updateMask = append(updateMask, "disabled") + } + + if d.HasChange("attribute_mapping") { + updateMask = append(updateMask, "attributeMapping") + } + + if d.HasChange("attribute_condition") { + updateMask = append(updateMask, "attributeCondition") + } + + if d.HasChange("aws") { + updateMask = append(updateMask, "aws") + } + + if d.HasChange("oidc") { + updateMask = append(updateMask, "oidc.allowed_audiences", + "oidc.issuer_uri", + "oidc.jwks_json") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating WorkloadIdentityPoolProvider %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating WorkloadIdentityPoolProvider %q: %#v", d.Id(), res) + } + + err = IAMBetaOperationWaitTime( + config, res, project, "Updating WorkloadIdentityPoolProvider", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceIAMBetaWorkloadIdentityPoolProviderRead(d, meta) +} + +func resourceIAMBetaWorkloadIdentityPoolProviderDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for WorkloadIdentityPoolProvider: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMBetaBasePath}}projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting WorkloadIdentityPoolProvider %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "WorkloadIdentityPoolProvider") + } + + err = IAMBetaOperationWaitTime( + config, res, project, "Deleting WorkloadIdentityPoolProvider", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting WorkloadIdentityPoolProvider %q: %#v", d.Id(), res) + return nil +} + +func resourceIAMBetaWorkloadIdentityPoolProviderImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/workloadIdentityPools/(?P[^/]+)/providers/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIAMBetaWorkloadIdentityPoolProviderState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderAttributeMapping(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderAttributeCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderAws(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["account_id"] = + flattenIAMBetaWorkloadIdentityPoolProviderAwsAccountId(original["accountId"], d, config) + return []interface{}{transformed} +} +func flattenIAMBetaWorkloadIdentityPoolProviderAwsAccountId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderOidc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["allowed_audiences"] = + flattenIAMBetaWorkloadIdentityPoolProviderOidcAllowedAudiences(original["allowedAudiences"], d, config) + transformed["issuer_uri"] = + flattenIAMBetaWorkloadIdentityPoolProviderOidcIssuerUri(original["issuerUri"], d, config) + transformed["jwks_json"] = + flattenIAMBetaWorkloadIdentityPoolProviderOidcJwksJson(original["jwksJson"], d, config) + return []interface{}{transformed} +} +func flattenIAMBetaWorkloadIdentityPoolProviderOidcAllowedAudiences(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderOidcIssuerUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMBetaWorkloadIdentityPoolProviderOidcJwksJson(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIAMBetaWorkloadIdentityPoolProviderDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderAttributeMapping(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderAttributeCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderAws(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAccountId, err := expandIAMBetaWorkloadIdentityPoolProviderAwsAccountId(original["account_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccountId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accountId"] = transformedAccountId + } + + return transformed, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderAwsAccountId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderOidc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowedAudiences, err := expandIAMBetaWorkloadIdentityPoolProviderOidcAllowedAudiences(original["allowed_audiences"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedAudiences); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedAudiences"] = transformedAllowedAudiences + } + + transformedIssuerUri, err := expandIAMBetaWorkloadIdentityPoolProviderOidcIssuerUri(original["issuer_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIssuerUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["issuerUri"] = transformedIssuerUri + } + + transformedJwksJson, err := expandIAMBetaWorkloadIdentityPoolProviderOidcJwksJson(original["jwks_json"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJwksJson); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jwksJson"] = transformedJwksJson + } + + return transformed, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderOidcAllowedAudiences(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderOidcIssuerUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMBetaWorkloadIdentityPoolProviderOidcJwksJson(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceIAMBetaWorkloadIdentityPoolProviderDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v := res["state"]; v == "DELETED" { + return nil, nil + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider_sweeper.go new file mode 100644 index 0000000000..a39486c2a1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_provider_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iambeta + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IAMBetaWorkloadIdentityPoolProvider", testSweepIAMBetaWorkloadIdentityPoolProvider) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIAMBetaWorkloadIdentityPoolProvider(region string) error { + resourceName := "IAMBetaWorkloadIdentityPoolProvider" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://iam.googleapis.com/v1/projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["workloadIdentityPoolProviders"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://iam.googleapis.com/v1/projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_sweeper.go new file mode 100644 index 0000000000..64da21537f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iambeta/resource_iam_workload_identity_pool_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iambeta + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IAMBetaWorkloadIdentityPool", testSweepIAMBetaWorkloadIdentityPool) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIAMBetaWorkloadIdentityPool(region string) error { + resourceName := "IAMBetaWorkloadIdentityPool" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://iam.googleapis.com/v1/projects/{{project}}/locations/global/workloadIdentityPools", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["workloadIdentityPools"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://iam.googleapis.com/v1/projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/iam_workforce_pool_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/iam_workforce_pool_operation.go new file mode 100644 index 0000000000..5df59b82b2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/iam_workforce_pool_operation.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iamworkforcepool + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type IAMWorkforcePoolOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *IAMWorkforcePoolOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.IAMWorkforcePoolBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createIAMWorkforcePoolWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*IAMWorkforcePoolOperationWaiter, error) { + w := &IAMWorkforcePoolOperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func IAMWorkforcePoolOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createIAMWorkforcePoolWaiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool.go new file mode 100644 index 0000000000..9bb3edc80a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool.go @@ -0,0 +1,516 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iamworkforcepool + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +const workforcePoolIdRegexp = `^[a-z][a-z0-9-]{4,61}[a-z0-9]$` + +func ValidateWorkforcePoolId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if strings.HasPrefix(value, "gcp-") { + errors = append(errors, fmt.Errorf( + "%q (%q) can not start with \"gcp-\". "+ + "The prefix `gcp-` is reserved for use by Google, and may not be specified.", k, value)) + } + + if !regexp.MustCompile(workforcePoolIdRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) must contain only lowercase letters [a-z], digits [0-9], and hyphens "+ + "[-]. The WorkforcePool ID must be between 6 and 63 characters, begin "+ + "with a letter, and cannot have a trailing hyphen.", k, value)) + } + + return +} + +func ResourceIAMWorkforcePoolWorkforcePool() *schema.Resource { + return &schema.Resource{ + Create: resourceIAMWorkforcePoolWorkforcePoolCreate, + Read: resourceIAMWorkforcePoolWorkforcePoolRead, + Update: resourceIAMWorkforcePoolWorkforcePoolUpdate, + Delete: resourceIAMWorkforcePoolWorkforcePoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIAMWorkforcePoolWorkforcePoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the resource.`, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Immutable. The resource name of the parent. Format: 'organizations/{org-id}'.`, + }, + "workforce_pool_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateWorkforcePoolId, + Description: `The name of the pool. The ID must be a globally unique string of 6 to 63 lowercase letters, +digits, or hyphens. It must start with a letter, and cannot have a trailing hyphen. +The prefix 'gcp-' is reserved for use by Google, and may not be specified.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A user-specified description of the pool. Cannot exceed 256 characters.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the pool is disabled. You cannot use a disabled pool to exchange tokens, +or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `A user-specified display name of the pool in Google Cloud Console. Cannot exceed 32 characters.`, + }, + "session_duration": { + Type: schema.TypeString, + Optional: true, + Description: `Duration that the Google Cloud access tokens, console sign-in sessions, +and 'gcloud' sign-in sessions from this pool are valid. +Must be greater than 15 minutes (900s) and less than 12 hours (43200s). +If 'sessionDuration' is not configured, minted credentials have a default duration of one hour (3600s). +A duration in seconds with up to nine fractional digits, ending with ''s''. Example: "'3.5s'".`, + Default: "3600s", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The resource name of the pool. +Format: 'locations/{location}/workforcePools/{workforcePoolId}'`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The state of the pool. + * STATE_UNSPECIFIED: State unspecified. + * ACTIVE: The pool is active, and may be used in Google Cloud policies. + * DELETED: The pool is soft-deleted. Soft-deleted pools are permanently deleted + after approximately 30 days. You can restore a soft-deleted pool using + [workforcePools.undelete](https://cloud.google.com/iam/docs/reference/rest/v1/locations.workforcePools/undelete#google.iam.admin.v1.WorkforcePools.UndeleteWorkforcePool). + You cannot reuse the ID of a soft-deleted pool until it is permanently deleted. + While a pool is deleted, you cannot use it to exchange tokens, or use + existing tokens to access resources. If the pool is undeleted, existing + tokens grant access again.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIAMWorkforcePoolWorkforcePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentProp, err := expandIAMWorkforcePoolWorkforcePoolParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + displayNameProp, err := expandIAMWorkforcePoolWorkforcePoolDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandIAMWorkforcePoolWorkforcePoolDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + disabledProp, err := expandIAMWorkforcePoolWorkforcePoolDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + sessionDurationProp, err := expandIAMWorkforcePoolWorkforcePoolSessionDuration(d.Get("session_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("session_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(sessionDurationProp)) && (ok || !reflect.DeepEqual(v, sessionDurationProp)) { + obj["sessionDuration"] = sessionDurationProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools?workforcePoolId={{workforce_pool_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new WorkforcePool: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating WorkforcePool: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/workforcePools/{{workforce_pool_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = IAMWorkforcePoolOperationWaitTime( + config, res, "Creating WorkforcePool", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create WorkforcePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkforcePool %q: %#v", d.Id(), res) + + return resourceIAMWorkforcePoolWorkforcePoolRead(d, meta) +} + +func resourceIAMWorkforcePoolWorkforcePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IAMWorkforcePoolWorkforcePool %q", d.Id())) + } + + res, err = resourceIAMWorkforcePoolWorkforcePoolDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing IAMWorkforcePoolWorkforcePool because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenIAMWorkforcePoolWorkforcePoolName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePool: %s", err) + } + if err := d.Set("parent", flattenIAMWorkforcePoolWorkforcePoolParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePool: %s", err) + } + if err := d.Set("display_name", flattenIAMWorkforcePoolWorkforcePoolDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePool: %s", err) + } + if err := d.Set("description", flattenIAMWorkforcePoolWorkforcePoolDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePool: %s", err) + } + if err := d.Set("state", flattenIAMWorkforcePoolWorkforcePoolState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePool: %s", err) + } + if err := d.Set("disabled", flattenIAMWorkforcePoolWorkforcePoolDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePool: %s", err) + } + if err := d.Set("session_duration", flattenIAMWorkforcePoolWorkforcePoolSessionDuration(res["sessionDuration"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePool: %s", err) + } + + return nil +} + +func resourceIAMWorkforcePoolWorkforcePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAMWorkforcePoolWorkforcePoolDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandIAMWorkforcePoolWorkforcePoolDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + disabledProp, err := expandIAMWorkforcePoolWorkforcePoolDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + sessionDurationProp, err := expandIAMWorkforcePoolWorkforcePoolSessionDuration(d.Get("session_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("session_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionDurationProp)) { + obj["sessionDuration"] = sessionDurationProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating WorkforcePool %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("disabled") { + updateMask = append(updateMask, "disabled") + } + + if d.HasChange("session_duration") { + updateMask = append(updateMask, "sessionDuration") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating WorkforcePool %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating WorkforcePool %q: %#v", d.Id(), res) + } + + err = IAMWorkforcePoolOperationWaitTime( + config, res, "Updating WorkforcePool", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceIAMWorkforcePoolWorkforcePoolRead(d, meta) +} + +func resourceIAMWorkforcePoolWorkforcePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting WorkforcePool %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "WorkforcePool") + } + + err = IAMWorkforcePoolOperationWaitTime( + config, res, "Deleting WorkforcePool", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting WorkforcePool %q: %#v", d.Id(), res) + return nil +} + +func resourceIAMWorkforcePoolWorkforcePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "locations/(?P[^/]+)/workforcePools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/workforcePools/{{workforce_pool_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIAMWorkforcePoolWorkforcePoolName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolSessionDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIAMWorkforcePoolWorkforcePoolParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolSessionDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceIAMWorkforcePoolWorkforcePoolDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v := res["state"]; v == "DELETED" { + return nil, nil + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_provider.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_provider.go new file mode 100644 index 0000000000..ae1626ed03 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_provider.go @@ -0,0 +1,1045 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iamworkforcepool + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +const workforcePoolProviderIdRegexp = `^[a-z0-9-]{4,32}$` + +func ValidateWorkforcePoolProviderId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if strings.HasPrefix(value, "gcp-") { + errors = append(errors, fmt.Errorf( + "%q (%q) can not start with \"gcp-\". "+ + "The prefix `gcp-` is reserved for use by Google, and may not be specified.", k, value)) + } + + if !regexp.MustCompile(workforcePoolProviderIdRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) must be 4-32 characters, and may contain the characters [a-z0-9-].", k, value)) + } + + return +} + +func ResourceIAMWorkforcePoolWorkforcePoolProvider() *schema.Resource { + return &schema.Resource{ + Create: resourceIAMWorkforcePoolWorkforcePoolProviderCreate, + Read: resourceIAMWorkforcePoolWorkforcePoolProviderRead, + Update: resourceIAMWorkforcePoolWorkforcePoolProviderUpdate, + Delete: resourceIAMWorkforcePoolWorkforcePoolProviderDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIAMWorkforcePoolWorkforcePoolProviderImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the resource.`, + }, + "provider_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateWorkforcePoolProviderId, + Description: `The ID for the provider, which becomes the final component of the resource name. +This value must be 4-32 characters, and may contain the characters [a-z0-9-]. +The prefix 'gcp-' is reserved for use by Google, and may not be specified.`, + }, + "workforce_pool_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID to use for the pool, which becomes the final component of the resource name. +The IDs must be a globally unique string of 6 to 63 lowercase letters, digits, or hyphens. +It must start with a letter, and cannot have a trailing hyphen. +The prefix 'gcp-' is reserved for use by Google, and may not be specified.`, + }, + "attribute_condition": { + Type: schema.TypeString, + Optional: true, + Description: `A [Common Expression Language](https://opensource.google/projects/cel) expression, in +plain text, to restrict what otherwise valid authentication credentials issued by the +provider should not be accepted. + +The expression must output a boolean representing whether to allow the federation. + +The following keywords may be referenced in the expressions: + * 'assertion': JSON representing the authentication credential issued by the provider. + * 'google': The Google attributes mapped from the assertion in the 'attribute_mappings'. + 'google.profile_photo' and 'google.display_name' are not supported. + * 'attribute': The custom attributes mapped from the assertion in the 'attribute_mappings'. + +The maximum length of the attribute condition expression is 4096 characters. +If unspecified, all valid authentication credentials will be accepted. + +The following example shows how to only allow credentials with a mapped 'google.groups' value of 'admins': +''' +"'admins' in google.groups" +'''`, + }, + "attribute_mapping": { + Type: schema.TypeMap, + Optional: true, + Description: `Maps attributes from the authentication credentials issued by an external identity provider +to Google Cloud attributes, such as 'subject' and 'segment'. + +Each key must be a string specifying the Google Cloud IAM attribute to map to. + +The following keys are supported: + * 'google.subject': The principal IAM is authenticating. You can reference this value in IAM bindings. + This is also the subject that appears in Cloud Logging logs. This is a required field and + the mapped subject cannot exceed 127 bytes. + * 'google.groups': Groups the authenticating user belongs to. You can grant groups access to + resources using an IAM 'principalSet' binding; access applies to all members of the group. + * 'google.display_name': The name of the authenticated user. This is an optional field and + the mapped display name cannot exceed 100 bytes. If not set, 'google.subject' will be displayed instead. + This attribute cannot be referenced in IAM bindings. + * 'google.profile_photo': The URL that specifies the authenticated user's thumbnail photo. + This is an optional field. When set, the image will be visible as the user's profile picture. + If not set, a generic user icon will be displayed instead. + This attribute cannot be referenced in IAM bindings. + +You can also provide custom attributes by specifying 'attribute.{custom_attribute}', where {custom_attribute} +is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. +The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. + +You can reference these attributes in IAM policies to define fine-grained access for a workforce pool +to Google Cloud resources. For example: + * 'google.subject': + 'principal://iam.googleapis.com/locations/{location}/workforcePools/{pool}/subject/{value}' + * 'google.groups': + 'principalSet://iam.googleapis.com/locations/{location}/workforcePools/{pool}/group/{value}' + * 'attribute.{custom_attribute}': + 'principalSet://iam.googleapis.com/locations/{location}/workforcePools/{pool}/attribute.{custom_attribute}/{value}' + +Each value must be a [Common Expression Language](https://opensource.google/projects/cel) +function that maps an identity provider credential to the normalized attribute specified +by the corresponding map key. + +You can use the 'assertion' keyword in the expression to access a JSON representation of +the authentication credential issued by the provider. + +The maximum length of an attribute mapping expression is 2048 characters. When evaluated, +the total size of all mapped attributes must not exceed 8KB. + +For OIDC providers, you must supply a custom mapping that includes the 'google.subject' attribute. +For example, the following maps the sub claim of the incoming credential to the 'subject' attribute +on a Google token: +''' +{"google.subject": "assertion.sub"} +''' + +An object containing a list of '"key": value' pairs. +Example: '{ "name": "wrench", "mass": "1.3kg", "count": "3" }'.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A user-specified description of the provider. Cannot exceed 256 characters.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. +However, existing tokens still grant access.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `A user-specified display name for the provider. Cannot exceed 32 characters.`, + }, + "oidc": { + Type: schema.TypeList, + Optional: true, + Description: `Represents an OpenId Connect 1.0 identity provider.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Required: true, + Description: `The client ID. Must match the audience claim of the JWT issued by the identity provider.`, + }, + "issuer_uri": { + Type: schema.TypeString, + Required: true, + Description: `The OIDC issuer URI. Must be a valid URI using the 'https' scheme.`, + }, + "client_secret": { + Type: schema.TypeList, + Optional: true, + Description: `The optional client secret. Required to enable Authorization Code flow for web sign-in.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeList, + Optional: true, + Description: `The value of the client secret.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "plain_text": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + Description: `The plain text of the client secret value.`, + Sensitive: true, + }, + "thumbprint": { + Type: schema.TypeString, + Computed: true, + Description: `A thumbprint to represent the current client secret value.`, + }, + }, + }, + ExactlyOneOf: []string{"oidc.0.client_secret.0.value"}, + }, + }, + }, + }, + "web_sso_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Configuration for web single sign-on for the OIDC provider. Here, web sign-in refers to console sign-in and gcloud sign-in through the browser.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "assertion_claims_behavior": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS", "ONLY_ID_TOKEN_CLAIMS"}), + Description: `The behavior for how OIDC Claims are included in the 'assertion' object used for attribute mapping and attribute condition. +* MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS: Merge the UserInfo Endpoint Claims with ID Token Claims, preferring UserInfo Claim Values for the same Claim Name. This option is available only for the Authorization Code Flow. +* ONLY_ID_TOKEN_CLAIMS: Only include ID Token Claims. Possible values: ["MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS", "ONLY_ID_TOKEN_CLAIMS"]`, + }, + "response_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"CODE", "ID_TOKEN"}), + Description: `The Response Type to request for in the OIDC Authorization Request for web sign-in. + +The 'CODE' Response Type is recommended to avoid the Implicit Flow, for security reasons. +* CODE: The 'response_type=code' selection uses the Authorization Code Flow for web sign-in. Requires a configured client secret. +* ID_TOKEN: The 'response_type=id_token' selection uses the Implicit Flow for web sign-in. Possible values: ["CODE", "ID_TOKEN"]`, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"saml", "oidc"}, + }, + "saml": { + Type: schema.TypeList, + Optional: true, + Description: `Represents a SAML identity provider.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "idp_metadata_xml": { + Type: schema.TypeString, + Required: true, + Description: `SAML Identity provider configuration metadata xml doc. +The xml document should comply with [SAML 2.0 specification](https://docs.oasis-open.org/security/saml/v2.0/saml-metadata-2.0-os.pdf). +The max size of the acceptable xml document will be bounded to 128k characters. + +The metadata xml document should satisfy the following constraints: +1) Must contain an Identity Provider Entity ID. +2) Must contain at least one non-expired signing key certificate. +3) For each signing key: + a) Valid from should be no more than 7 days from now. + b) Valid to should be no more than 10 years in the future. +4) Up to 3 IdP signing keys are allowed in the metadata xml. + +When updating the provider's metadata xml, at least one non-expired signing key +must overlap with the existing metadata. This requirement is skipped if there are +no non-expired signing keys present in the existing metadata.`, + }, + }, + }, + ExactlyOneOf: []string{"saml", "oidc"}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The resource name of the provider. +Format: 'locations/{location}/workforcePools/{workforcePoolId}/providers/{providerId}'`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the provider. +* STATE_UNSPECIFIED: State unspecified. +* ACTIVE: The provider is active and may be used to validate authentication credentials. +* DELETED: The provider is soft-deleted. Soft-deleted providers are permanently + deleted after approximately 30 days. You can restore a soft-deleted provider using + [providers.undelete](https://cloud.google.com/iam/docs/reference/rest/v1/locations.workforcePools.providers/undelete#google.iam.admin.v1.WorkforcePools.UndeleteWorkforcePoolProvider).`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIAMWorkforcePoolWorkforcePoolProviderCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + disabledProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + attributeMappingProp, err := expandIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(d.Get("attribute_mapping"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attribute_mapping"); !tpgresource.IsEmptyValue(reflect.ValueOf(attributeMappingProp)) && (ok || !reflect.DeepEqual(v, attributeMappingProp)) { + obj["attributeMapping"] = attributeMappingProp + } + attributeConditionProp, err := expandIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(d.Get("attribute_condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attribute_condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(attributeConditionProp)) && (ok || !reflect.DeepEqual(v, attributeConditionProp)) { + obj["attributeCondition"] = attributeConditionProp + } + samlProp, err := expandIAMWorkforcePoolWorkforcePoolProviderSaml(d.Get("saml"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("saml"); !tpgresource.IsEmptyValue(reflect.ValueOf(samlProp)) && (ok || !reflect.DeepEqual(v, samlProp)) { + obj["saml"] = samlProp + } + oidcProp, err := expandIAMWorkforcePoolWorkforcePoolProviderOidc(d.Get("oidc"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("oidc"); !tpgresource.IsEmptyValue(reflect.ValueOf(oidcProp)) && (ok || !reflect.DeepEqual(v, oidcProp)) { + obj["oidc"] = oidcProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers?workforcePoolProviderId={{provider_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new WorkforcePoolProvider: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating WorkforcePoolProvider: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = IAMWorkforcePoolOperationWaitTime( + config, res, "Creating WorkforcePoolProvider", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create WorkforcePoolProvider: %s", err) + } + + createdClientSecret := d.Get("oidc.0.client_secret.0.value.0.plain_text") + if createdClientSecret != nil && createdClientSecret != "" { + // After the create, reading from the API returns a new thumbprint + // for the client secret value, which clears the plain_text. We set the plain_text since + // this case should not warrant a diff. + if err := resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta); err != nil { + return err + } + oidc := d.Get("oidc") + clientSecret := oidc.([]interface{})[0].(map[string]interface{})["client_secret"] + clientSecretValue := clientSecret.([]interface{})[0].(map[string]interface{})["value"] + clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = createdClientSecret + if err := d.Set("oidc", oidc); err != nil { + return err + } + return nil + } + + log.Printf("[DEBUG] Finished creating WorkforcePoolProvider %q: %#v", d.Id(), res) + + return resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta) +} + +func resourceIAMWorkforcePoolWorkforcePoolProviderRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IAMWorkforcePoolWorkforcePoolProvider %q", d.Id())) + } + + res, err = resourceIAMWorkforcePoolWorkforcePoolProviderDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing IAMWorkforcePoolWorkforcePoolProvider because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenIAMWorkforcePoolWorkforcePoolProviderName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + if err := d.Set("display_name", flattenIAMWorkforcePoolWorkforcePoolProviderDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + if err := d.Set("description", flattenIAMWorkforcePoolWorkforcePoolProviderDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + if err := d.Set("state", flattenIAMWorkforcePoolWorkforcePoolProviderState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + if err := d.Set("disabled", flattenIAMWorkforcePoolWorkforcePoolProviderDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + if err := d.Set("attribute_mapping", flattenIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(res["attributeMapping"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + if err := d.Set("attribute_condition", flattenIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(res["attributeCondition"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + if err := d.Set("saml", flattenIAMWorkforcePoolWorkforcePoolProviderSaml(res["saml"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + if err := d.Set("oidc", flattenIAMWorkforcePoolWorkforcePoolProviderOidc(res["oidc"], d, config)); err != nil { + return fmt.Errorf("Error reading WorkforcePoolProvider: %s", err) + } + + return nil +} + +func resourceIAMWorkforcePoolWorkforcePoolProviderUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + displayNameProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + disabledProp, err := expandIAMWorkforcePoolWorkforcePoolProviderDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + attributeMappingProp, err := expandIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(d.Get("attribute_mapping"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attribute_mapping"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributeMappingProp)) { + obj["attributeMapping"] = attributeMappingProp + } + attributeConditionProp, err := expandIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(d.Get("attribute_condition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("attribute_condition"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, attributeConditionProp)) { + obj["attributeCondition"] = attributeConditionProp + } + samlProp, err := expandIAMWorkforcePoolWorkforcePoolProviderSaml(d.Get("saml"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("saml"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, samlProp)) { + obj["saml"] = samlProp + } + oidcProp, err := expandIAMWorkforcePoolWorkforcePoolProviderOidc(d.Get("oidc"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("oidc"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oidcProp)) { + obj["oidc"] = oidcProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating WorkforcePoolProvider %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("disabled") { + updateMask = append(updateMask, "disabled") + } + + if d.HasChange("attribute_mapping") { + updateMask = append(updateMask, "attributeMapping") + } + + if d.HasChange("attribute_condition") { + updateMask = append(updateMask, "attributeCondition") + } + + if d.HasChange("saml") { + updateMask = append(updateMask, "saml") + } + + if d.HasChange("oidc") { + updateMask = append(updateMask, "oidc") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating WorkforcePoolProvider %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating WorkforcePoolProvider %q: %#v", d.Id(), res) + } + + err = IAMWorkforcePoolOperationWaitTime( + config, res, "Updating WorkforcePoolProvider", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + if d.HasChange("oidc") { + updatedClientSecret := d.Get("oidc.0.client_secret.0.value.0.plain_text") + if updatedClientSecret != nil && updatedClientSecret != "" { + // After the update, reading from the API returns a different thumbprint + // for the client secret value, which clears the plain_text. We set the plain_text since + // this case should not warrant a diff. + if err := resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta); err != nil { + return err + } + oidc := d.Get("oidc") + clientSecret := oidc.([]interface{})[0].(map[string]interface{})["client_secret"] + clientSecretValue := clientSecret.([]interface{})[0].(map[string]interface{})["value"] + clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = updatedClientSecret + if err := d.Set("oidc", oidc); err != nil { + return err + } + return nil + } + } + return resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta) +} + +func resourceIAMWorkforcePoolWorkforcePoolProviderDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{IAMWorkforcePoolBasePath}}locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting WorkforcePoolProvider %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "WorkforcePoolProvider") + } + + err = IAMWorkforcePoolOperationWaitTime( + config, res, "Deleting WorkforcePoolProvider", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting WorkforcePoolProvider %q: %#v", d.Id(), res) + return nil +} + +func resourceIAMWorkforcePoolWorkforcePoolProviderImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "locations/(?P[^/]+)/workforcePools/(?P[^/]+)/providers/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderSaml(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["idp_metadata_xml"] = + flattenIAMWorkforcePoolWorkforcePoolProviderSamlIdpMetadataXml(original["idpMetadataXml"], d, config) + return []interface{}{transformed} +} +func flattenIAMWorkforcePoolWorkforcePoolProviderSamlIdpMetadataXml(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderOidc(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["issuer_uri"] = + flattenIAMWorkforcePoolWorkforcePoolProviderOidcIssuerUri(original["issuerUri"], d, config) + transformed["client_id"] = + flattenIAMWorkforcePoolWorkforcePoolProviderOidcClientId(original["clientId"], d, config) + transformed["client_secret"] = + flattenIAMWorkforcePoolWorkforcePoolProviderOidcClientSecret(original["clientSecret"], d, config) + transformed["web_sso_config"] = + flattenIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfig(original["webSsoConfig"], d, config) + return []interface{}{transformed} +} +func flattenIAMWorkforcePoolWorkforcePoolProviderOidcIssuerUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderOidcClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderOidcClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["value"] = + flattenIAMWorkforcePoolWorkforcePoolProviderOidcClientSecretValue(original["value"], d, config) + return []interface{}{transformed} +} +func flattenIAMWorkforcePoolWorkforcePoolProviderOidcClientSecretValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["thumbprint"] = original["thumbprint"] + // Trigger a diff based on the plain_text if there is no change in the thumbprint, + // otherwise leave plain_text empty to always trigger a diff. + if original["thumbprint"].(string) == d.Get("oidc.0.client_secret.0.value.0.thumbprint").(string) { + transformed["plain_text"] = d.Get("oidc.0.client_secret.0.value.0.plain_text") + } + return []interface{}{transformed} +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["response_type"] = + flattenIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfigResponseType(original["responseType"], d, config) + transformed["assertion_claims_behavior"] = + flattenIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfigAssertionClaimsBehavior(original["assertionClaimsBehavior"], d, config) + return []interface{}{transformed} +} +func flattenIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfigResponseType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfigAssertionClaimsBehavior(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIAMWorkforcePoolWorkforcePoolProviderDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderAttributeMapping(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderAttributeCondition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderSaml(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdpMetadataXml, err := expandIAMWorkforcePoolWorkforcePoolProviderSamlIdpMetadataXml(original["idp_metadata_xml"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdpMetadataXml); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["idpMetadataXml"] = transformedIdpMetadataXml + } + + return transformed, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderSamlIdpMetadataXml(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidc(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIssuerUri, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcIssuerUri(original["issuer_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIssuerUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["issuerUri"] = transformedIssuerUri + } + + transformedClientId, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcClientId(original["client_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientId"] = transformedClientId + } + + transformedClientSecret, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcClientSecret(original["client_secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientSecret"] = transformedClientSecret + } + + transformedWebSsoConfig, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfig(original["web_sso_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWebSsoConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["webSsoConfig"] = transformedWebSsoConfig + } + + return transformed, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcIssuerUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedValue, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcClientSecretValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + return transformed, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcClientSecretValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPlainText, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcClientSecretValuePlainText(original["plain_text"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPlainText); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["plainText"] = transformedPlainText + } + + transformedThumbprint, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcClientSecretValueThumbprint(original["thumbprint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedThumbprint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["thumbprint"] = transformedThumbprint + } + + return transformed, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcClientSecretValuePlainText(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcClientSecretValueThumbprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResponseType, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfigResponseType(original["response_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResponseType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["responseType"] = transformedResponseType + } + + transformedAssertionClaimsBehavior, err := expandIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfigAssertionClaimsBehavior(original["assertion_claims_behavior"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAssertionClaimsBehavior); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["assertionClaimsBehavior"] = transformedAssertionClaimsBehavior + } + + return transformed, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfigResponseType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIAMWorkforcePoolWorkforcePoolProviderOidcWebSsoConfigAssertionClaimsBehavior(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceIAMWorkforcePoolWorkforcePoolProviderDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v := res["state"]; v == "DELETED" { + return nil, nil + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_provider_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_provider_sweeper.go new file mode 100644 index 0000000000..d9a645b874 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_provider_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iamworkforcepool + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IAMWorkforcePoolWorkforcePoolProvider", testSweepIAMWorkforcePoolWorkforcePoolProvider) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIAMWorkforcePoolWorkforcePoolProvider(region string) error { + resourceName := "IAMWorkforcePoolWorkforcePoolProvider" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://iam.googleapis.com/v1/locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["workforcePoolProviders"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://iam.googleapis.com/v1/locations/{{location}}/workforcePools/{{workforce_pool_id}}/providers/{{provider_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_sweeper.go new file mode 100644 index 0000000000..9a5f3ac6eb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool/resource_iam_workforce_pool_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iamworkforcepool + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IAMWorkforcePoolWorkforcePool", testSweepIAMWorkforcePoolWorkforcePool) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIAMWorkforcePoolWorkforcePool(region string) error { + resourceName := "IAMWorkforcePoolWorkforcePool" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://iam.googleapis.com/v1/locations/{{location}}/workforcePools", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["workforcePools"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://iam.googleapis.com/v1/locations/{{location}}/workforcePools/{{workforce_pool_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/data_source_iap_client.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/data_source_iap_client.go new file mode 100644 index 0000000000..32ee9a0ced --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/data_source_iap_client.go @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package iap + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleIapClient() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceIapClient().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "brand", "client_id") + + return &schema.Resource{ + Read: dataSourceGoogleIapClientRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleIapClientRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "{{brand}}/identityAwareProxyClients/{{client_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceIapClientRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_app_engine_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_app_engine_service.go new file mode 100644 index 0000000000..a80d358c25 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_app_engine_service.go @@ -0,0 +1,241 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IapAppEngineServiceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "app_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type IapAppEngineServiceIamUpdater struct { + project string + appId string + service string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func IapAppEngineServiceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("app_id"); ok { + values["appId"] = v.(string) + } + + if v, ok := d.GetOk("service"); ok { + values["service"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &IapAppEngineServiceIamUpdater{ + project: values["project"], + appId: values["appId"], + service: values["service"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("app_id", u.appId); err != nil { + return nil, fmt.Errorf("Error setting app_id: %s", err) + } + if err := d.Set("service", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting service: %s", err) + } + + return u, nil +} + +func IapAppEngineServiceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &IapAppEngineServiceIamUpdater{ + project: values["project"], + appId: values["appId"], + service: values["service"], + d: d, + Config: config, + } + if err := d.Set("service", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting service: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *IapAppEngineServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyAppEngineServiceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *IapAppEngineServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyAppEngineServiceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *IapAppEngineServiceIamUpdater) qualifyAppEngineServiceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s", u.project, u.appId, u.service), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *IapAppEngineServiceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s", u.project, u.appId, u.service) +} + +func (u *IapAppEngineServiceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-iap-appengineservice-%s", u.GetResourceId()) +} + +func (u *IapAppEngineServiceIamUpdater) DescribeResource() string { + return fmt.Sprintf("iap appengineservice %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_app_engine_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_app_engine_version.go new file mode 100644 index 0000000000..1e9c671b75 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_app_engine_version.go @@ -0,0 +1,256 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IapAppEngineVersionIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "app_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "version_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type IapAppEngineVersionIamUpdater struct { + project string + appId string + service string + versionId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func IapAppEngineVersionIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("app_id"); ok { + values["appId"] = v.(string) + } + + if v, ok := d.GetOk("service"); ok { + values["service"] = v.(string) + } + + if v, ok := d.GetOk("version_id"); ok { + values["versionId"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("version_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &IapAppEngineVersionIamUpdater{ + project: values["project"], + appId: values["appId"], + service: values["service"], + versionId: values["versionId"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("app_id", u.appId); err != nil { + return nil, fmt.Errorf("Error setting app_id: %s", err) + } + if err := d.Set("service", u.service); err != nil { + return nil, fmt.Errorf("Error setting service: %s", err) + } + if err := d.Set("version_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting version_id: %s", err) + } + + return u, nil +} + +func IapAppEngineVersionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &IapAppEngineVersionIamUpdater{ + project: values["project"], + appId: values["appId"], + service: values["service"], + versionId: values["versionId"], + d: d, + Config: config, + } + if err := d.Set("version_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting version_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *IapAppEngineVersionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyAppEngineVersionUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *IapAppEngineVersionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyAppEngineVersionUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *IapAppEngineVersionIamUpdater) qualifyAppEngineVersionUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s/versions/%s", u.project, u.appId, u.service, u.versionId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *IapAppEngineVersionIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s/versions/%s", u.project, u.appId, u.service, u.versionId) +} + +func (u *IapAppEngineVersionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-iap-appengineversion-%s", u.GetResourceId()) +} + +func (u *IapAppEngineVersionIamUpdater) DescribeResource() string { + return fmt.Sprintf("iap appengineversion %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_tunnel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_tunnel.go new file mode 100644 index 0000000000..1f88381dbd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_tunnel.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IapTunnelIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type IapTunnelIamUpdater struct { + project string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func IapTunnelIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel", "(?P[^/]+)"}, d, config, d.Get("project").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &IapTunnelIamUpdater{ + project: values["project"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + return u, nil +} + +func IapTunnelIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &IapTunnelIamUpdater{ + project: values["project"], + d: d, + Config: config, + } + if err := d.Set("project", u.project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *IapTunnelIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTunnelUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *IapTunnelIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTunnelUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *IapTunnelIamUpdater) qualifyTunnelUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_tunnel", u.project), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *IapTunnelIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/iap_tunnel", u.project) +} + +func (u *IapTunnelIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-iap-tunnel-%s", u.GetResourceId()) +} + +func (u *IapTunnelIamUpdater) DescribeResource() string { + return fmt.Sprintf("iap tunnel %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_tunnel_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_tunnel_instance.go new file mode 100644 index 0000000000..3e160af96e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_tunnel_instance.go @@ -0,0 +1,250 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IapTunnelInstanceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type IapTunnelInstanceIamUpdater struct { + project string + zone string + instance string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func IapTunnelInstanceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + zone, _ := tpgresource.GetZone(d, config) + if zone != "" { + if err := d.Set("zone", zone); err != nil { + return nil, fmt.Errorf("Error setting zone: %s", err) + } + } + values["zone"] = zone + if v, ok := d.GetOk("instance"); ok { + values["instance"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel/zones/(?P[^/]+)/instances/(?P[^/]+)", "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &IapTunnelInstanceIamUpdater{ + project: values["project"], + zone: values["zone"], + instance: values["instance"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", u.zone); err != nil { + return nil, fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("instance", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting instance: %s", err) + } + + return u, nil +} + +func IapTunnelInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + zone, _ := tpgresource.GetZone(d, config) + if zone != "" { + values["zone"] = zone + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel/zones/(?P[^/]+)/instances/(?P[^/]+)", "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &IapTunnelInstanceIamUpdater{ + project: values["project"], + zone: values["zone"], + instance: values["instance"], + d: d, + Config: config, + } + if err := d.Set("instance", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting instance: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *IapTunnelInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTunnelInstanceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *IapTunnelInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTunnelInstanceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *IapTunnelInstanceIamUpdater) qualifyTunnelInstanceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_tunnel/zones/%s/instances/%s", u.project, u.zone, u.instance), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *IapTunnelInstanceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/iap_tunnel/zones/%s/instances/%s", u.project, u.zone, u.instance) +} + +func (u *IapTunnelInstanceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-iap-tunnelinstance-%s", u.GetResourceId()) +} + +func (u *IapTunnelInstanceIamUpdater) DescribeResource() string { + return fmt.Sprintf("iap tunnelinstance %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web.go new file mode 100644 index 0000000000..5e595d5205 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IapWebIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type IapWebIamUpdater struct { + project string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func IapWebIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web", "(?P[^/]+)"}, d, config, d.Get("project").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &IapWebIamUpdater{ + project: values["project"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + return u, nil +} + +func IapWebIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &IapWebIamUpdater{ + project: values["project"], + d: d, + Config: config, + } + if err := d.Set("project", u.project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *IapWebIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyWebUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *IapWebIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyWebUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *IapWebIamUpdater) qualifyWebUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web", u.project), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *IapWebIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/iap_web", u.project) +} + +func (u *IapWebIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-iap-web-%s", u.GetResourceId()) +} + +func (u *IapWebIamUpdater) DescribeResource() string { + return fmt.Sprintf("iap web %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_backend_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_backend_service.go new file mode 100644 index 0000000000..954497cbd1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_backend_service.go @@ -0,0 +1,226 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IapWebBackendServiceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "web_backend_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type IapWebBackendServiceIamUpdater struct { + project string + webBackendService string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func IapWebBackendServiceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("web_backend_service"); ok { + values["web_backend_service"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("web_backend_service").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &IapWebBackendServiceIamUpdater{ + project: values["project"], + webBackendService: values["web_backend_service"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("web_backend_service", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting web_backend_service: %s", err) + } + + return u, nil +} + +func IapWebBackendServiceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &IapWebBackendServiceIamUpdater{ + project: values["project"], + webBackendService: values["web_backend_service"], + d: d, + Config: config, + } + if err := d.Set("web_backend_service", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting web_backend_service: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *IapWebBackendServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyWebBackendServiceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *IapWebBackendServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyWebBackendServiceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *IapWebBackendServiceIamUpdater) qualifyWebBackendServiceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/compute/services/%s", u.project, u.webBackendService), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *IapWebBackendServiceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/iap_web/compute/services/%s", u.project, u.webBackendService) +} + +func (u *IapWebBackendServiceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-iap-webbackendservice-%s", u.GetResourceId()) +} + +func (u *IapWebBackendServiceIamUpdater) DescribeResource() string { + return fmt.Sprintf("iap webbackendservice %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_type_app_engine.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_type_app_engine.go new file mode 100644 index 0000000000..e4f637f2b4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_type_app_engine.go @@ -0,0 +1,240 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IapWebTypeAppEngineIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "app_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: IapWebTypeAppEngineDiffSuppress, + }, +} + +func IapWebTypeAppEngineDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + newParts := strings.Split(new, "appengine-") + + if len(newParts) == 1 { + // `new` is only the app engine id + // `old` is always a long name + if strings.HasSuffix(old, fmt.Sprintf("appengine-%s", new)) { + return true + } + } + return old == new +} + +type IapWebTypeAppEngineIamUpdater struct { + project string + appId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func IapWebTypeAppEngineIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("app_id"); ok { + values["appId"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("app_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &IapWebTypeAppEngineIamUpdater{ + project: values["project"], + appId: values["appId"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("app_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting app_id: %s", err) + } + + return u, nil +} + +func IapWebTypeAppEngineIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &IapWebTypeAppEngineIamUpdater{ + project: values["project"], + appId: values["appId"], + d: d, + Config: config, + } + if err := d.Set("app_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting app_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *IapWebTypeAppEngineIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyWebTypeAppEngineUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *IapWebTypeAppEngineIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyWebTypeAppEngineUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *IapWebTypeAppEngineIamUpdater) qualifyWebTypeAppEngineUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/appengine-%s", u.project, u.appId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *IapWebTypeAppEngineIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/iap_web/appengine-%s", u.project, u.appId) +} + +func (u *IapWebTypeAppEngineIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-iap-webtypeappengine-%s", u.GetResourceId()) +} + +func (u *IapWebTypeAppEngineIamUpdater) DescribeResource() string { + return fmt.Sprintf("iap webtypeappengine %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_type_compute.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_type_compute.go new file mode 100644 index 0000000000..cec6c4d9ff --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/iam_iap_web_type_compute.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IapWebTypeComputeIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type IapWebTypeComputeIamUpdater struct { + project string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func IapWebTypeComputeIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute", "(?P[^/]+)"}, d, config, d.Get("project").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &IapWebTypeComputeIamUpdater{ + project: values["project"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + + return u, nil +} + +func IapWebTypeComputeIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &IapWebTypeComputeIamUpdater{ + project: values["project"], + d: d, + Config: config, + } + if err := d.Set("project", u.project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *IapWebTypeComputeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyWebTypeComputeUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + obj = map[string]interface{}{ + "options": map[string]interface{}{ + "requestedPolicyVersion": tpgiamresource.IamPolicyVersion, + }, + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *IapWebTypeComputeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyWebTypeComputeUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *IapWebTypeComputeIamUpdater) qualifyWebTypeComputeUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{IapBasePath}}%s:%s", fmt.Sprintf("projects/%s/iap_web/compute", u.project), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *IapWebTypeComputeIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/iap_web/compute", u.project) +} + +func (u *IapWebTypeComputeIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-iap-webtypecompute-%s", u.GetResourceId()) +} + +func (u *IapWebTypeComputeIamUpdater) DescribeResource() string { + return fmt.Sprintf("iap webtypecompute %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_brand.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_brand.go new file mode 100644 index 0000000000..8d9bb3ccdf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_brand.go @@ -0,0 +1,348 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIapBrand() *schema.Resource { + return &schema.Resource{ + Create: resourceIapBrandCreate, + Read: resourceIapBrandRead, + Delete: resourceIapBrandDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIapBrandImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "application_title": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Application name displayed on OAuth consent screen.`, + }, + "support_email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Support email displayed on the OAuth consent screen. Can be either a +user or group email. When a user email is specified, the caller must +be the user with the associated email address. When a group email is +specified, the caller can be either a user or a service account which +is an owner of the specified group in Cloud Identity.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Identifier of the brand, in the format 'projects/{project_number}/brands/{brand_id}' +NOTE: The name can also be expressed as 'projects/{project_id}/brands/{brand_id}', e.g. when importing. +NOTE: The brand identification corresponds to the project number as only one +brand can be created per project.`, + }, + "org_internal_only": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the brand is only intended for usage inside the GSuite organization only.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIapBrandCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + supportEmailProp, err := expandIapBrandSupportEmail(d.Get("support_email"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("support_email"); !tpgresource.IsEmptyValue(reflect.ValueOf(supportEmailProp)) && (ok || !reflect.DeepEqual(v, supportEmailProp)) { + obj["supportEmail"] = supportEmailProp + } + applicationTitleProp, err := expandIapBrandApplicationTitle(d.Get("application_title"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("application_title"); !tpgresource.IsEmptyValue(reflect.ValueOf(applicationTitleProp)) && (ok || !reflect.DeepEqual(v, applicationTitleProp)) { + obj["applicationTitle"] = applicationTitleProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}projects/{{project}}/brands") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Brand: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Brand: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Brand: %s", err) + } + if err := d.Set("name", flattenIapBrandName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + err = transport_tpg.PollingWaitTime(resourceIapBrandPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating Brand", d.Timeout(schema.TimeoutCreate), 5) + if err != nil { + return fmt.Errorf("Error waiting to create Brand: %s", err) + } + + log.Printf("[DEBUG] Finished creating Brand %q: %#v", d.Id(), res) + + return resourceIapBrandRead(d, meta) +} + +func resourceIapBrandPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{name}}") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for Brand: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + return res, nil + } +} + +func resourceIapBrandRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Brand: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IapBrand %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Brand: %s", err) + } + + if err := d.Set("support_email", flattenIapBrandSupportEmail(res["supportEmail"], d, config)); err != nil { + return fmt.Errorf("Error reading Brand: %s", err) + } + if err := d.Set("application_title", flattenIapBrandApplicationTitle(res["applicationTitle"], d, config)); err != nil { + return fmt.Errorf("Error reading Brand: %s", err) + } + if err := d.Set("org_internal_only", flattenIapBrandOrgInternalOnly(res["orgInternalOnly"], d, config)); err != nil { + return fmt.Errorf("Error reading Brand: %s", err) + } + if err := d.Set("name", flattenIapBrandName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Brand: %s", err) + } + + return nil +} + +func resourceIapBrandDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] Iap Brand resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceIapBrandImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + nameParts := strings.Split(d.Get("name").(string), "/") + if len(nameParts) != 4 && len(nameParts) != 2 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have either shape %s or %s", + d.Get("name"), + "projects/{{project}}/brands/{{name}}", + "{{project}}/{{name}}", + ) + } + + var project string + if len(nameParts) == 4 { + project = nameParts[1] + } + if len(nameParts) == 2 { + project = nameParts[0] // Different index + + // Set `name` (and `id`) as a 4-part format so Read func produces valid URL + brand := nameParts[1] + name := fmt.Sprintf("projects/%s/brands/%s", project, brand) + if err := d.Set("name", name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name) + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenIapBrandSupportEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapBrandApplicationTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapBrandOrgInternalOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapBrandName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIapBrandSupportEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIapBrandApplicationTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_client.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_client.go new file mode 100644 index 0000000000..01a5a31c29 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/iap/resource_iap_client.go @@ -0,0 +1,268 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package iap + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIapClient() *schema.Resource { + return &schema.Resource{ + Create: resourceIapClientCreate, + Read: resourceIapClientRead, + Delete: resourceIapClientDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIapClientImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "brand": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Identifier of the brand to which this client +is attached to. The format is +'projects/{project_number}/brands/{brand_id}/identityAwareProxyClients/{client_id}'.`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Human-friendly name given to the OAuth client.`, + }, + "client_id": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Unique identifier of the OAuth client.`, + }, + "secret": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Client secret of the OAuth client.`, + Sensitive: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIapClientCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandIapClientDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Client: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IapClient409Operation}, + }) + if err != nil { + return fmt.Errorf("Error creating Client: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{brand}}/identityAwareProxyClients/{{client_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + brand := d.Get("brand") + clientId := flattenIapClientClientId(res["name"], d, config) + + if err := d.Set("client_id", clientId); err != nil { + return fmt.Errorf("Error setting client_id: %s", err) + } + d.SetId(fmt.Sprintf("%s/identityAwareProxyClients/%s", brand, clientId)) + + log.Printf("[DEBUG] Finished creating Client %q: %#v", d.Id(), res) + + return resourceIapClientRead(d, meta) +} + +func resourceIapClientRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients/{{client_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IapClient409Operation}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IapClient %q", d.Id())) + } + + if err := d.Set("secret", flattenIapClientSecret(res["secret"], d, config)); err != nil { + return fmt.Errorf("Error reading Client: %s", err) + } + if err := d.Set("display_name", flattenIapClientDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Client: %s", err) + } + if err := d.Set("client_id", flattenIapClientClientId(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Client: %s", err) + } + + return nil +} + +func resourceIapClientDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients/{{client_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Client %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IapClient409Operation}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Client") + } + + log.Printf("[DEBUG] Finished deleting Client %q: %#v", d.Id(), res) + return nil +} + +func resourceIapClientImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + nameParts := strings.Split(d.Get("brand").(string), "/") + if len(nameParts) != 6 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("brand").(string), + "projects/{{project_number}}/brands/{{brand_id}}/identityAwareProxyClients/{{client_id}}", + ) + } + + if err := d.Set("brand", fmt.Sprintf("projects/%s/brands/%s", nameParts[1], nameParts[3])); err != nil { + return nil, fmt.Errorf("Error setting brand: %s", err) + } + if err := d.Set("client_id", nameParts[5]); err != nil { + return nil, fmt.Errorf("Error setting client_id: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenIapClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapClientDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIapClientClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandIapClientDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_config.go new file mode 100644 index 0000000000..2caeabcd2e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_config.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformConfigCreate, + Read: resourceIdentityPlatformConfigRead, + Update: resourceIdentityPlatformConfigUpdate, + Delete: resourceIdentityPlatformConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "autodelete_anonymous_users": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether anonymous users will be auto-deleted after a period of 30 days`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the Config resource`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/identityPlatform:initializeAuth") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Config: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Config: %s", err) + } + if err := d.Set("name", flattenIdentityPlatformConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/config") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Update the resource after initializing auth to set fields. + if err := resourceIdentityPlatformConfigUpdate(d, meta); err != nil { + return err + } + + log.Printf("[DEBUG] Finished creating Config %q: %#v", d.Id(), res) + + return resourceIdentityPlatformConfigRead(d, meta) +} + +func resourceIdentityPlatformConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Config: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + if err := d.Set("autodelete_anonymous_users", flattenIdentityPlatformConfigAutodeleteAnonymousUsers(res["autodeleteAnonymousUsers"], d, config)); err != nil { + return fmt.Errorf("Error reading Config: %s", err) + } + + return nil +} + +func resourceIdentityPlatformConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Config: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + autodeleteAnonymousUsersProp, err := expandIdentityPlatformConfigAutodeleteAnonymousUsers(d.Get("autodelete_anonymous_users"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("autodelete_anonymous_users"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, autodeleteAnonymousUsersProp)) { + obj["autodeleteAnonymousUsers"] = autodeleteAnonymousUsersProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Config %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("autodelete_anonymous_users") { + updateMask = append(updateMask, "autodeleteAnonymousUsers") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Config %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Config %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformConfigRead(d, meta) +} + +func resourceIdentityPlatformConfigDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] IdentityPlatform Config resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceIdentityPlatformConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/config", + "projects/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/config") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformConfigAutodeleteAnonymousUsers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIdentityPlatformConfigAutodeleteAnonymousUsers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_default_supported_idp_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_default_supported_idp_config.go new file mode 100644 index 0000000000..8a69e1c8bc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_default_supported_idp_config.go @@ -0,0 +1,414 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformDefaultSupportedIdpConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformDefaultSupportedIdpConfigCreate, + Read: resourceIdentityPlatformDefaultSupportedIdpConfigRead, + Update: resourceIdentityPlatformDefaultSupportedIdpConfigUpdate, + Delete: resourceIdentityPlatformDefaultSupportedIdpConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformDefaultSupportedIdpConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Required: true, + Description: `OAuth client ID`, + }, + "client_secret": { + Type: schema.TypeString, + Required: true, + Description: `OAuth client secret`, + }, + "idp_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the IDP. Possible values include: + +* 'apple.com' + +* 'facebook.com' + +* 'gc.apple.com' + +* 'github.com' + +* 'google.com' + +* 'linkedin.com' + +* 'microsoft.com' + +* 'playgames.google.com' + +* 'twitter.com' + +* 'yahoo.com'`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `If this IDP allows the user to sign in`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the DefaultSupportedIdpConfig resource`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformDefaultSupportedIdpConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + clientIdProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientIdProp)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { + obj["clientId"] = clientIdProp + } + clientSecretProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientSecretProp)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { + obj["clientSecret"] = clientSecretProp + } + enabledProp, err := expandIdentityPlatformDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs?idpId={{idp_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DefaultSupportedIdpConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DefaultSupportedIdpConfig: %s", err) + } + if err := d.Set("name", flattenIdentityPlatformDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DefaultSupportedIdpConfig %q: %#v", d.Id(), res) + + return resourceIdentityPlatformDefaultSupportedIdpConfigRead(d, meta) +} + +func resourceIdentityPlatformDefaultSupportedIdpConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformDefaultSupportedIdpConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) + } + if err := d.Set("client_id", flattenIdentityPlatformDefaultSupportedIdpConfigClientId(res["clientId"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) + } + if err := d.Set("client_secret", flattenIdentityPlatformDefaultSupportedIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) + } + if err := d.Set("enabled", flattenIdentityPlatformDefaultSupportedIdpConfigEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) + } + + return nil +} + +func resourceIdentityPlatformDefaultSupportedIdpConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + clientIdProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { + obj["clientId"] = clientIdProp + } + clientSecretProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { + obj["clientSecret"] = clientSecretProp + } + enabledProp, err := expandIdentityPlatformDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DefaultSupportedIdpConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("client_id") { + updateMask = append(updateMask, "clientId") + } + + if d.HasChange("client_secret") { + updateMask = append(updateMask, "clientSecret") + } + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DefaultSupportedIdpConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DefaultSupportedIdpConfig %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformDefaultSupportedIdpConfigRead(d, meta) +} + +func resourceIdentityPlatformDefaultSupportedIdpConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DefaultSupportedIdpConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DefaultSupportedIdpConfig") + } + + log.Printf("[DEBUG] Finished deleting DefaultSupportedIdpConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceIdentityPlatformDefaultSupportedIdpConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/defaultSupportedIdpConfigs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformDefaultSupportedIdpConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformDefaultSupportedIdpConfigClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformDefaultSupportedIdpConfigClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformDefaultSupportedIdpConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIdentityPlatformDefaultSupportedIdpConfigClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformDefaultSupportedIdpConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_default_supported_idp_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_default_supported_idp_config_sweeper.go new file mode 100644 index 0000000000..4dbe617a1b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_default_supported_idp_config_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IdentityPlatformDefaultSupportedIdpConfig", testSweepIdentityPlatformDefaultSupportedIdpConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIdentityPlatformDefaultSupportedIdpConfig(region string) error { + resourceName := "IdentityPlatformDefaultSupportedIdpConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://identitytoolkit.googleapis.com/v2/projects/{{project}}/defaultSupportedIdpConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["defaultSupportedIdpConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://identitytoolkit.googleapis.com/v2/projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_inbound_saml_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_inbound_saml_config.go new file mode 100644 index 0000000000..77bdc38c96 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_inbound_saml_config.go @@ -0,0 +1,726 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformInboundSamlConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformInboundSamlConfigCreate, + Read: resourceIdentityPlatformInboundSamlConfigRead, + Update: resourceIdentityPlatformInboundSamlConfigUpdate, + Delete: resourceIdentityPlatformInboundSamlConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformInboundSamlConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `Human friendly display name.`, + }, + "idp_config": { + Type: schema.TypeList, + Required: true, + Description: `SAML IdP configuration when the project acts as the relying party`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "idp_certificates": { + Type: schema.TypeList, + Required: true, + Description: `The IdP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "x509_certificate": { + Type: schema.TypeString, + Optional: true, + Description: `The IdP's x509 certificate.`, + }, + }, + }, + }, + "idp_entity_id": { + Type: schema.TypeString, + Required: true, + Description: `Unique identifier for all SAML entities`, + }, + "sso_url": { + Type: schema.TypeString, + Required: true, + Description: `URL to send Authentication request to.`, + }, + "sign_request": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates if outbounding SAMLRequest should be signed.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters, +hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an +alphanumeric character, and have at least 2 characters.`, + }, + "sp_config": { + Type: schema.TypeList, + Required: true, + Description: `SAML SP (Service Provider) configuration when the project acts as the relying party to receive +and accept an authentication assertion issued by a SAML identity provider.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "callback_uri": { + Type: schema.TypeString, + Optional: true, + Description: `Callback URI where responses from IDP are handled. Must start with 'https://'.`, + }, + "sp_entity_id": { + Type: schema.TypeString, + Optional: true, + Description: `Unique identifier for all SAML entities.`, + }, + "sp_certificates": { + Type: schema.TypeList, + Computed: true, + Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "x509_certificate": { + Type: schema.TypeString, + Computed: true, + Description: `The x509 certificate`, + }, + }, + }, + }, + }, + }, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `If this config allows users to sign in with the provider.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformInboundSamlConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandIdentityPlatformInboundSamlConfigName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + displayNameProp, err := expandIdentityPlatformInboundSamlConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandIdentityPlatformInboundSamlConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + idpConfigProp, err := expandIdentityPlatformInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("idp_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(idpConfigProp)) && (ok || !reflect.DeepEqual(v, idpConfigProp)) { + obj["idpConfig"] = idpConfigProp + } + spConfigProp, err := expandIdentityPlatformInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sp_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(spConfigProp)) && (ok || !reflect.DeepEqual(v, spConfigProp)) { + obj["spConfig"] = spConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs?inboundSamlConfigId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new InboundSamlConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating InboundSamlConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/inboundSamlConfigs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating InboundSamlConfig %q: %#v", d.Id(), res) + + return resourceIdentityPlatformInboundSamlConfigRead(d, meta) +} + +func resourceIdentityPlatformInboundSamlConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformInboundSamlConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading InboundSamlConfig: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformInboundSamlConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading InboundSamlConfig: %s", err) + } + if err := d.Set("display_name", flattenIdentityPlatformInboundSamlConfigDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading InboundSamlConfig: %s", err) + } + if err := d.Set("enabled", flattenIdentityPlatformInboundSamlConfigEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading InboundSamlConfig: %s", err) + } + if err := d.Set("idp_config", flattenIdentityPlatformInboundSamlConfigIdpConfig(res["idpConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading InboundSamlConfig: %s", err) + } + if err := d.Set("sp_config", flattenIdentityPlatformInboundSamlConfigSpConfig(res["spConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading InboundSamlConfig: %s", err) + } + + return nil +} + +func resourceIdentityPlatformInboundSamlConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandIdentityPlatformInboundSamlConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandIdentityPlatformInboundSamlConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + idpConfigProp, err := expandIdentityPlatformInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("idp_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idpConfigProp)) { + obj["idpConfig"] = idpConfigProp + } + spConfigProp, err := expandIdentityPlatformInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sp_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, spConfigProp)) { + obj["spConfig"] = spConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating InboundSamlConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + + if d.HasChange("idp_config") { + updateMask = append(updateMask, "idpConfig") + } + + if d.HasChange("sp_config") { + updateMask = append(updateMask, "spConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating InboundSamlConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating InboundSamlConfig %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformInboundSamlConfigRead(d, meta) +} + +func resourceIdentityPlatformInboundSamlConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting InboundSamlConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "InboundSamlConfig") + } + + log.Printf("[DEBUG] Finished deleting InboundSamlConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceIdentityPlatformInboundSamlConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/inboundSamlConfigs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/inboundSamlConfigs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformInboundSamlConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenIdentityPlatformInboundSamlConfigDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformInboundSamlConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformInboundSamlConfigIdpConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["idp_entity_id"] = + flattenIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(original["idpEntityId"], d, config) + transformed["sso_url"] = + flattenIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(original["ssoUrl"], d, config) + transformed["sign_request"] = + flattenIdentityPlatformInboundSamlConfigIdpConfigSignRequest(original["signRequest"], d, config) + transformed["idp_certificates"] = + flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(original["idpCertificates"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformInboundSamlConfigIdpConfigSignRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "x509_certificate": flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509Certificate"], d, config), + }) + } + return transformed +} +func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformInboundSamlConfigSpConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["sp_entity_id"] = + flattenIdentityPlatformInboundSamlConfigSpConfigSpEntityId(original["spEntityId"], d, config) + transformed["callback_uri"] = + flattenIdentityPlatformInboundSamlConfigSpConfigCallbackUri(original["callbackUri"], d, config) + transformed["sp_certificates"] = + flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificates(original["spCertificates"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformInboundSamlConfigSpConfigSpEntityId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformInboundSamlConfigSpConfigCallbackUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "x509_certificate": flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509Certificate"], d, config), + }) + } + return transformed +} +func flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIdentityPlatformInboundSamlConfigName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigIdpConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdpEntityId, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(original["idp_entity_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdpEntityId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["idpEntityId"] = transformedIdpEntityId + } + + transformedSsoUrl, err := expandIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(original["sso_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSsoUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ssoUrl"] = transformedSsoUrl + } + + transformedSignRequest, err := expandIdentityPlatformInboundSamlConfigIdpConfigSignRequest(original["sign_request"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSignRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["signRequest"] = transformedSignRequest + } + + transformedIdpCertificates, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(original["idp_certificates"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdpCertificates); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["idpCertificates"] = transformedIdpCertificates + } + + return transformed, nil +} + +func expandIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigIdpConfigSignRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedX509Certificate, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedX509Certificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["x509Certificate"] = transformedX509Certificate + } + + req = append(req, transformed) + } + return req, nil +} + +func expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigSpConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSpEntityId, err := expandIdentityPlatformInboundSamlConfigSpConfigSpEntityId(original["sp_entity_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSpEntityId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["spEntityId"] = transformedSpEntityId + } + + transformedCallbackUri, err := expandIdentityPlatformInboundSamlConfigSpConfigCallbackUri(original["callback_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCallbackUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["callbackUri"] = transformedCallbackUri + } + + transformedSpCertificates, err := expandIdentityPlatformInboundSamlConfigSpConfigSpCertificates(original["sp_certificates"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSpCertificates); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["spCertificates"] = transformedSpCertificates + } + + return transformed, nil +} + +func expandIdentityPlatformInboundSamlConfigSpConfigSpEntityId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigSpConfigCallbackUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformInboundSamlConfigSpConfigSpCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedX509Certificate, err := expandIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedX509Certificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["x509Certificate"] = transformedX509Certificate + } + + req = append(req, transformed) + } + return req, nil +} + +func expandIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_inbound_saml_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_inbound_saml_config_sweeper.go new file mode 100644 index 0000000000..b0bd5c0e97 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_inbound_saml_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IdentityPlatformInboundSamlConfig", testSweepIdentityPlatformInboundSamlConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIdentityPlatformInboundSamlConfig(region string) error { + resourceName := "IdentityPlatformInboundSamlConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://identitytoolkit.googleapis.com/v2/projects/{{project}}/inboundSamlConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["inboundSamlConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://identitytoolkit.googleapis.com/v2/projects/{{project}}/inboundSamlConfigs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_oauth_idp_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_oauth_idp_config.go new file mode 100644 index 0000000000..4000aacd2d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_oauth_idp_config.go @@ -0,0 +1,463 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformOauthIdpConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformOauthIdpConfigCreate, + Read: resourceIdentityPlatformOauthIdpConfigRead, + Update: resourceIdentityPlatformOauthIdpConfigUpdate, + Delete: resourceIdentityPlatformOauthIdpConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformOauthIdpConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Required: true, + Description: `The client id of an OAuth client.`, + }, + "issuer": { + Type: schema.TypeString, + Required: true, + Description: `For OIDC Idps, the issuer identifier.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the OauthIdpConfig. Must start with 'oidc.'.`, + }, + "client_secret": { + Type: schema.TypeString, + Optional: true, + Description: `The client secret of the OAuth client, to enable OIDC code flow.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Human friendly display name.`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `If this config allows users to sign in with the provider.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformOauthIdpConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandIdentityPlatformOauthIdpConfigName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + displayNameProp, err := expandIdentityPlatformOauthIdpConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandIdentityPlatformOauthIdpConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + issuerProp, err := expandIdentityPlatformOauthIdpConfigIssuer(d.Get("issuer"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("issuer"); !tpgresource.IsEmptyValue(reflect.ValueOf(issuerProp)) && (ok || !reflect.DeepEqual(v, issuerProp)) { + obj["issuer"] = issuerProp + } + clientIdProp, err := expandIdentityPlatformOauthIdpConfigClientId(d.Get("client_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientIdProp)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { + obj["clientId"] = clientIdProp + } + clientSecretProp, err := expandIdentityPlatformOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientSecretProp)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { + obj["clientSecret"] = clientSecretProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs?oauthIdpConfigId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OauthIdpConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating OauthIdpConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/oauthIdpConfigs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating OauthIdpConfig %q: %#v", d.Id(), res) + + return resourceIdentityPlatformOauthIdpConfigRead(d, meta) +} + +func resourceIdentityPlatformOauthIdpConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformOauthIdpConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading OauthIdpConfig: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformOauthIdpConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OauthIdpConfig: %s", err) + } + if err := d.Set("display_name", flattenIdentityPlatformOauthIdpConfigDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading OauthIdpConfig: %s", err) + } + if err := d.Set("enabled", flattenIdentityPlatformOauthIdpConfigEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading OauthIdpConfig: %s", err) + } + if err := d.Set("issuer", flattenIdentityPlatformOauthIdpConfigIssuer(res["issuer"], d, config)); err != nil { + return fmt.Errorf("Error reading OauthIdpConfig: %s", err) + } + if err := d.Set("client_id", flattenIdentityPlatformOauthIdpConfigClientId(res["clientId"], d, config)); err != nil { + return fmt.Errorf("Error reading OauthIdpConfig: %s", err) + } + if err := d.Set("client_secret", flattenIdentityPlatformOauthIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { + return fmt.Errorf("Error reading OauthIdpConfig: %s", err) + } + + return nil +} + +func resourceIdentityPlatformOauthIdpConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandIdentityPlatformOauthIdpConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandIdentityPlatformOauthIdpConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + issuerProp, err := expandIdentityPlatformOauthIdpConfigIssuer(d.Get("issuer"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("issuer"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, issuerProp)) { + obj["issuer"] = issuerProp + } + clientIdProp, err := expandIdentityPlatformOauthIdpConfigClientId(d.Get("client_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { + obj["clientId"] = clientIdProp + } + clientSecretProp, err := expandIdentityPlatformOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { + obj["clientSecret"] = clientSecretProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OauthIdpConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + + if d.HasChange("issuer") { + updateMask = append(updateMask, "issuer") + } + + if d.HasChange("client_id") { + updateMask = append(updateMask, "clientId") + } + + if d.HasChange("client_secret") { + updateMask = append(updateMask, "clientSecret") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating OauthIdpConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OauthIdpConfig %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformOauthIdpConfigRead(d, meta) +} + +func resourceIdentityPlatformOauthIdpConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting OauthIdpConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "OauthIdpConfig") + } + + log.Printf("[DEBUG] Finished deleting OauthIdpConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceIdentityPlatformOauthIdpConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/oauthIdpConfigs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/oauthIdpConfigs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformOauthIdpConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenIdentityPlatformOauthIdpConfigDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformOauthIdpConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformOauthIdpConfigIssuer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformOauthIdpConfigClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformOauthIdpConfigClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIdentityPlatformOauthIdpConfigName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformOauthIdpConfigDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformOauthIdpConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformOauthIdpConfigIssuer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformOauthIdpConfigClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformOauthIdpConfigClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_oauth_idp_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_oauth_idp_config_sweeper.go new file mode 100644 index 0000000000..9654744c48 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_oauth_idp_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IdentityPlatformOauthIdpConfig", testSweepIdentityPlatformOauthIdpConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIdentityPlatformOauthIdpConfig(region string) error { + resourceName := "IdentityPlatformOauthIdpConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://identitytoolkit.googleapis.com/v2/projects/{{project}}/oauthIdpConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["oauthIdpConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://identitytoolkit.googleapis.com/v2/projects/{{project}}/oauthIdpConfigs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config.go new file mode 100644 index 0000000000..8d48ccdd6e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config.go @@ -0,0 +1,789 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformProjectDefaultConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformProjectDefaultConfigCreate, + Read: resourceIdentityPlatformProjectDefaultConfigRead, + Update: resourceIdentityPlatformProjectDefaultConfigUpdate, + Delete: resourceIdentityPlatformProjectDefaultConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformProjectDefaultConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "sign_in": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration related to local sign in methods.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_duplicate_emails": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether to allow more than one account to have the same email.`, + }, + "anonymous": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration options related to authenticating an anonymous user.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether anonymous user auth is enabled for the project or not.`, + }, + }, + }, + }, + "email": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration options related to authenticating a user by their email address.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether email auth is enabled for the project or not.`, + }, + "password_required": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether a password is required for email auth or not. If true, both an email and +password must be provided to sign in. If false, a user may sign in via either +email/password or email link.`, + }, + }, + }, + }, + "phone_number": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration options related to authenticated a user by their phone number.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether phone number auth is enabled for the project or not.`, + }, + "test_phone_numbers": { + Type: schema.TypeMap, + Optional: true, + Description: `A map of that can be used for phone auth testing.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "hash_config": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. Hash config information.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "algorithm": { + Type: schema.TypeString, + Computed: true, + Description: `Different password hash algorithms used in Identity Toolkit.`, + }, + "memory_cost": { + Type: schema.TypeInt, + Computed: true, + Description: `Memory cost for hash calculation. Used by scrypt and other similar password derivation algorithms. See https://tools.ietf.org/html/rfc7914 for explanation of field.`, + }, + "rounds": { + Type: schema.TypeInt, + Computed: true, + Description: `How many rounds for hash calculation. Used by scrypt and other similar password derivation algorithms.`, + }, + "salt_separator": { + Type: schema.TypeString, + Computed: true, + Description: `Non-printable character to be inserted between the salt and plain text password in base64.`, + }, + "signer_key": { + Type: schema.TypeString, + Computed: true, + Description: `Signer key in base64.`, + }, + }, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the Config resource. Example: "projects/my-awesome-project/config"`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformProjectDefaultConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + signInProp, err := expandIdentityPlatformProjectDefaultConfigSignIn(d.Get("sign_in"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sign_in"); !tpgresource.IsEmptyValue(reflect.ValueOf(signInProp)) && (ok || !reflect.DeepEqual(v, signInProp)) { + obj["signIn"] = signInProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ProjectDefaultConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ProjectDefaultConfig: %s", err) + } + if err := d.Set("name", flattenIdentityPlatformProjectDefaultConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ProjectDefaultConfig %q: %#v", d.Id(), res) + + return resourceIdentityPlatformProjectDefaultConfigRead(d, meta) +} + +func resourceIdentityPlatformProjectDefaultConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformProjectDefaultConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformProjectDefaultConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) + } + if err := d.Set("sign_in", flattenIdentityPlatformProjectDefaultConfigSignIn(res["signIn"], d, config)); err != nil { + return fmt.Errorf("Error reading ProjectDefaultConfig: %s", err) + } + + return nil +} + +func resourceIdentityPlatformProjectDefaultConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + signInProp, err := expandIdentityPlatformProjectDefaultConfigSignIn(d.Get("sign_in"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sign_in"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, signInProp)) { + obj["signIn"] = signInProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ProjectDefaultConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("sign_in") { + updateMask = append(updateMask, "signIn") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ProjectDefaultConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ProjectDefaultConfig %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformProjectDefaultConfigRead(d, meta) +} + +func resourceIdentityPlatformProjectDefaultConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ProjectDefaultConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/config") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ProjectDefaultConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ProjectDefaultConfig") + } + + log.Printf("[DEBUG] Finished deleting ProjectDefaultConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceIdentityPlatformProjectDefaultConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/config/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformProjectDefaultConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignIn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["email"] = + flattenIdentityPlatformProjectDefaultConfigSignInEmail(original["email"], d, config) + transformed["phone_number"] = + flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumber(original["phoneNumber"], d, config) + transformed["anonymous"] = + flattenIdentityPlatformProjectDefaultConfigSignInAnonymous(original["anonymous"], d, config) + transformed["allow_duplicate_emails"] = + flattenIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(original["allowDuplicateEmails"], d, config) + transformed["hash_config"] = + flattenIdentityPlatformProjectDefaultConfigSignInHashConfig(original["hashConfig"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformProjectDefaultConfigSignInEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenIdentityPlatformProjectDefaultConfigSignInEmailEnabled(original["enabled"], d, config) + transformed["password_required"] = + flattenIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(original["passwordRequired"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformProjectDefaultConfigSignInEmailEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(original["enabled"], d, config) + transformed["test_phone_numbers"] = + flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(original["testPhoneNumbers"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInAnonymous(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enabled"] = + flattenIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(original["enabled"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInHashConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["algorithm"] = + flattenIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(original["algorithm"], d, config) + transformed["signer_key"] = + flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(original["signerKey"], d, config) + transformed["salt_separator"] = + flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(original["saltSeparator"], d, config) + transformed["rounds"] = + flattenIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(original["rounds"], d, config) + transformed["memory_cost"] = + flattenIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(original["memoryCost"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandIdentityPlatformProjectDefaultConfigSignIn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEmail, err := expandIdentityPlatformProjectDefaultConfigSignInEmail(original["email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["email"] = transformedEmail + } + + transformedPhoneNumber, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumber(original["phone_number"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPhoneNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["phoneNumber"] = transformedPhoneNumber + } + + transformedAnonymous, err := expandIdentityPlatformProjectDefaultConfigSignInAnonymous(original["anonymous"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAnonymous); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["anonymous"] = transformedAnonymous + } + + transformedAllowDuplicateEmails, err := expandIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(original["allow_duplicate_emails"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowDuplicateEmails); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowDuplicateEmails"] = transformedAllowDuplicateEmails + } + + transformedHashConfig, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfig(original["hash_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHashConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hashConfig"] = transformedHashConfig + } + + return transformed, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInEmailEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + transformedPasswordRequired, err := expandIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(original["password_required"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPasswordRequired); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["passwordRequired"] = transformedPasswordRequired + } + + return transformed, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInEmailEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInEmailPasswordRequired(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + transformedTestPhoneNumbers, err := expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(original["test_phone_numbers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTestPhoneNumbers); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["testPhoneNumbers"] = transformedTestPhoneNumbers + } + + return transformed, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInPhoneNumberTestPhoneNumbers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInAnonymous(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + return transformed, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInAnonymousEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInAllowDuplicateEmails(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInHashConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAlgorithm, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(original["algorithm"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["algorithm"] = transformedAlgorithm + } + + transformedSignerKey, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(original["signer_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSignerKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["signerKey"] = transformedSignerKey + } + + transformedSaltSeparator, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(original["salt_separator"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSaltSeparator); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["saltSeparator"] = transformedSaltSeparator + } + + transformedRounds, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(original["rounds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRounds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rounds"] = transformedRounds + } + + transformedMemoryCost, err := expandIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(original["memory_cost"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMemoryCost); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["memoryCost"] = transformedMemoryCost + } + + return transformed, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInHashConfigAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInHashConfigSignerKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInHashConfigSaltSeparator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInHashConfigRounds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformProjectDefaultConfigSignInHashConfigMemoryCost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config_sweeper.go new file mode 100644 index 0000000000..e2743acf41 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_project_default_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IdentityPlatformProjectDefaultConfig", testSweepIdentityPlatformProjectDefaultConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIdentityPlatformProjectDefaultConfig(region string) error { + resourceName := "IdentityPlatformProjectDefaultConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://identitytoolkit.googleapis.com/v2/projects/{{project}}/config", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["projectDefaultConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://identitytoolkit.googleapis.com/v2/projects/{{project}}/config" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant.go new file mode 100644 index 0000000000..9dfe2abd1a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant.go @@ -0,0 +1,440 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformTenant() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformTenantCreate, + Read: resourceIdentityPlatformTenantRead, + Update: resourceIdentityPlatformTenantUpdate, + Delete: resourceIdentityPlatformTenantDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformTenantImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `Human friendly display name of the tenant.`, + }, + "allow_password_signup": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether to allow email/password user authentication.`, + }, + "disable_auth": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether authentication is disabled for the tenant. If true, the users under +the disabled tenant are not allowed to sign-in. Admins of the disabled tenant +are not able to manage its users.`, + }, + "enable_email_link_signin": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether to enable email link user authentication.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the tenant that is generated by the server`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformTenantCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandIdentityPlatformTenantDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + allowPasswordSignupProp, err := expandIdentityPlatformTenantAllowPasswordSignup(d.Get("allow_password_signup"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_password_signup"); !tpgresource.IsEmptyValue(reflect.ValueOf(allowPasswordSignupProp)) && (ok || !reflect.DeepEqual(v, allowPasswordSignupProp)) { + obj["allowPasswordSignup"] = allowPasswordSignupProp + } + enableEmailLinkSigninProp, err := expandIdentityPlatformTenantEnableEmailLinkSignin(d.Get("enable_email_link_signin"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_email_link_signin"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableEmailLinkSigninProp)) && (ok || !reflect.DeepEqual(v, enableEmailLinkSigninProp)) { + obj["enableEmailLinkSignin"] = enableEmailLinkSigninProp + } + disableAuthProp, err := expandIdentityPlatformTenantDisableAuth(d.Get("disable_auth"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disable_auth"); !tpgresource.IsEmptyValue(reflect.ValueOf(disableAuthProp)) && (ok || !reflect.DeepEqual(v, disableAuthProp)) { + obj["disableAuth"] = disableAuthProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Tenant: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Tenant: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Tenant: %s", err) + } + if err := d.Set("name", flattenIdentityPlatformTenantName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + if err := d.Set("name", tpgresource.GetResourceNameFromSelfLink(name.(string))); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + // Store the ID now that we have set the computed name + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Tenant %q: %#v", d.Id(), res) + + return resourceIdentityPlatformTenantRead(d, meta) +} + +func resourceIdentityPlatformTenantRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Tenant: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformTenant %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Tenant: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformTenantName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Tenant: %s", err) + } + if err := d.Set("display_name", flattenIdentityPlatformTenantDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Tenant: %s", err) + } + if err := d.Set("allow_password_signup", flattenIdentityPlatformTenantAllowPasswordSignup(res["allowPasswordSignup"], d, config)); err != nil { + return fmt.Errorf("Error reading Tenant: %s", err) + } + if err := d.Set("enable_email_link_signin", flattenIdentityPlatformTenantEnableEmailLinkSignin(res["enableEmailLinkSignin"], d, config)); err != nil { + return fmt.Errorf("Error reading Tenant: %s", err) + } + if err := d.Set("disable_auth", flattenIdentityPlatformTenantDisableAuth(res["disableAuth"], d, config)); err != nil { + return fmt.Errorf("Error reading Tenant: %s", err) + } + + return nil +} + +func resourceIdentityPlatformTenantUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Tenant: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandIdentityPlatformTenantDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + allowPasswordSignupProp, err := expandIdentityPlatformTenantAllowPasswordSignup(d.Get("allow_password_signup"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_password_signup"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, allowPasswordSignupProp)) { + obj["allowPasswordSignup"] = allowPasswordSignupProp + } + enableEmailLinkSigninProp, err := expandIdentityPlatformTenantEnableEmailLinkSignin(d.Get("enable_email_link_signin"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_email_link_signin"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableEmailLinkSigninProp)) { + obj["enableEmailLinkSignin"] = enableEmailLinkSigninProp + } + disableAuthProp, err := expandIdentityPlatformTenantDisableAuth(d.Get("disable_auth"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disable_auth"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disableAuthProp)) { + obj["disableAuth"] = disableAuthProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Tenant %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("allow_password_signup") { + updateMask = append(updateMask, "allowPasswordSignup") + } + + if d.HasChange("enable_email_link_signin") { + updateMask = append(updateMask, "enableEmailLinkSignin") + } + + if d.HasChange("disable_auth") { + updateMask = append(updateMask, "disableAuth") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Tenant %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Tenant %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformTenantRead(d, meta) +} + +func resourceIdentityPlatformTenantDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Tenant: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Tenant %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Tenant") + } + + log.Printf("[DEBUG] Finished deleting Tenant %q: %#v", d.Id(), res) + return nil +} + +func resourceIdentityPlatformTenantImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/tenants/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformTenantName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenIdentityPlatformTenantDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantAllowPasswordSignup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantEnableEmailLinkSignin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantDisableAuth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIdentityPlatformTenantDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantAllowPasswordSignup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantEnableEmailLinkSignin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantDisableAuth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_default_supported_idp_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_default_supported_idp_config.go new file mode 100644 index 0000000000..d385b673c4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_default_supported_idp_config.go @@ -0,0 +1,420 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformTenantDefaultSupportedIdpConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformTenantDefaultSupportedIdpConfigCreate, + Read: resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead, + Update: resourceIdentityPlatformTenantDefaultSupportedIdpConfigUpdate, + Delete: resourceIdentityPlatformTenantDefaultSupportedIdpConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformTenantDefaultSupportedIdpConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Required: true, + Description: `OAuth client ID`, + }, + "client_secret": { + Type: schema.TypeString, + Required: true, + Description: `OAuth client secret`, + }, + "idp_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `ID of the IDP. Possible values include: + +* 'apple.com' + +* 'facebook.com' + +* 'gc.apple.com' + +* 'github.com' + +* 'google.com' + +* 'linkedin.com' + +* 'microsoft.com' + +* 'playgames.google.com' + +* 'twitter.com' + +* 'yahoo.com'`, + }, + "tenant": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the tenant where this DefaultSupportedIdpConfig resource exists`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `If this IDP allows the user to sign in`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the default supported IDP config resource`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformTenantDefaultSupportedIdpConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + clientIdProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientIdProp)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { + obj["clientId"] = clientIdProp + } + clientSecretProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientSecretProp)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { + obj["clientSecret"] = clientSecretProp + } + enabledProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs?idpId={{idp_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TenantDefaultSupportedIdpConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TenantDefaultSupportedIdpConfig: %s", err) + } + if err := d.Set("name", flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) + + return resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d, meta) +} + +func resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformTenantDefaultSupportedIdpConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) + } + if err := d.Set("client_id", flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientId(res["clientId"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) + } + if err := d.Set("client_secret", flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) + } + if err := d.Set("enabled", flattenIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) + } + + return nil +} + +func resourceIdentityPlatformTenantDefaultSupportedIdpConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + clientIdProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { + obj["clientId"] = clientIdProp + } + clientSecretProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { + obj["clientSecret"] = clientSecretProp + } + enabledProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("client_id") { + updateMask = append(updateMask, "clientId") + } + + if d.HasChange("client_secret") { + updateMask = append(updateMask, "clientSecret") + } + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating TenantDefaultSupportedIdpConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d, meta) +} + +func resourceIdentityPlatformTenantDefaultSupportedIdpConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TenantDefaultSupportedIdpConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TenantDefaultSupportedIdpConfig") + } + + log.Printf("[DEBUG] Finished deleting TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceIdentityPlatformTenantDefaultSupportedIdpConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/tenants/(?P[^/]+)/defaultSupportedIdpConfigs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_inbound_saml_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_inbound_saml_config.go new file mode 100644 index 0000000000..f71e1a0bba --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_inbound_saml_config.go @@ -0,0 +1,732 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformTenantInboundSamlConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformTenantInboundSamlConfigCreate, + Read: resourceIdentityPlatformTenantInboundSamlConfigRead, + Update: resourceIdentityPlatformTenantInboundSamlConfigUpdate, + Delete: resourceIdentityPlatformTenantInboundSamlConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformTenantInboundSamlConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `Human friendly display name.`, + }, + "idp_config": { + Type: schema.TypeList, + Required: true, + Description: `SAML IdP configuration when the project acts as the relying party`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "idp_certificates": { + Type: schema.TypeList, + Required: true, + Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "x509_certificate": { + Type: schema.TypeString, + Optional: true, + Description: `The x509 certificate`, + }, + }, + }, + }, + "idp_entity_id": { + Type: schema.TypeString, + Required: true, + Description: `Unique identifier for all SAML entities`, + }, + "sso_url": { + Type: schema.TypeString, + Required: true, + Description: `URL to send Authentication request to.`, + }, + "sign_request": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates if outbounding SAMLRequest should be signed.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters, +hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an +alphanumeric character, and have at least 2 characters.`, + }, + "sp_config": { + Type: schema.TypeList, + Required: true, + Description: `SAML SP (Service Provider) configuration when the project acts as the relying party to receive +and accept an authentication assertion issued by a SAML identity provider.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "callback_uri": { + Type: schema.TypeString, + Required: true, + Description: `Callback URI where responses from IDP are handled. Must start with 'https://'.`, + }, + "sp_entity_id": { + Type: schema.TypeString, + Required: true, + Description: `Unique identifier for all SAML entities.`, + }, + "sp_certificates": { + Type: schema.TypeList, + Computed: true, + Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "x509_certificate": { + Type: schema.TypeString, + Computed: true, + Description: `The x509 certificate`, + }, + }, + }, + }, + }, + }, + }, + "tenant": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the tenant where this inbound SAML config resource exists`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `If this config allows users to sign in with the provider.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformTenantInboundSamlConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandIdentityPlatformTenantInboundSamlConfigName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + displayNameProp, err := expandIdentityPlatformTenantInboundSamlConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandIdentityPlatformTenantInboundSamlConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + idpConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("idp_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(idpConfigProp)) && (ok || !reflect.DeepEqual(v, idpConfigProp)) { + obj["idpConfig"] = idpConfigProp + } + spConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sp_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(spConfigProp)) && (ok || !reflect.DeepEqual(v, spConfigProp)) { + obj["spConfig"] = spConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs?inboundSamlConfigId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TenantInboundSamlConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TenantInboundSamlConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating TenantInboundSamlConfig %q: %#v", d.Id(), res) + + return resourceIdentityPlatformTenantInboundSamlConfigRead(d, meta) +} + +func resourceIdentityPlatformTenantInboundSamlConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformTenantInboundSamlConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformTenantInboundSamlConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) + } + if err := d.Set("display_name", flattenIdentityPlatformTenantInboundSamlConfigDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) + } + if err := d.Set("enabled", flattenIdentityPlatformTenantInboundSamlConfigEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) + } + if err := d.Set("idp_config", flattenIdentityPlatformTenantInboundSamlConfigIdpConfig(res["idpConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) + } + if err := d.Set("sp_config", flattenIdentityPlatformTenantInboundSamlConfigSpConfig(res["spConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) + } + + return nil +} + +func resourceIdentityPlatformTenantInboundSamlConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandIdentityPlatformTenantInboundSamlConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandIdentityPlatformTenantInboundSamlConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + idpConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("idp_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idpConfigProp)) { + obj["idpConfig"] = idpConfigProp + } + spConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("sp_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, spConfigProp)) { + obj["spConfig"] = spConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating TenantInboundSamlConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + + if d.HasChange("idp_config") { + updateMask = append(updateMask, "idpConfig") + } + + if d.HasChange("sp_config") { + updateMask = append(updateMask, "spConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating TenantInboundSamlConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TenantInboundSamlConfig %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformTenantInboundSamlConfigRead(d, meta) +} + +func resourceIdentityPlatformTenantInboundSamlConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TenantInboundSamlConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TenantInboundSamlConfig") + } + + log.Printf("[DEBUG] Finished deleting TenantInboundSamlConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceIdentityPlatformTenantInboundSamlConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/tenants/(?P[^/]+)/inboundSamlConfigs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformTenantInboundSamlConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenIdentityPlatformTenantInboundSamlConfigDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantInboundSamlConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantInboundSamlConfigIdpConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["idp_entity_id"] = + flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(original["idpEntityId"], d, config) + transformed["sso_url"] = + flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(original["ssoUrl"], d, config) + transformed["sign_request"] = + flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(original["signRequest"], d, config) + transformed["idp_certificates"] = + flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(original["idpCertificates"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "x509_certificate": flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509Certificate"], d, config), + }) + } + return transformed +} +func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantInboundSamlConfigSpConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["sp_entity_id"] = + flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(original["spEntityId"], d, config) + transformed["callback_uri"] = + flattenIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(original["callbackUri"], d, config) + transformed["sp_certificates"] = + flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(original["spCertificates"], d, config) + return []interface{}{transformed} +} +func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "x509_certificate": flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509Certificate"], d, config), + }) + } + return transformed +} +func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIdentityPlatformTenantInboundSamlConfigName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigIdpConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdpEntityId, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(original["idp_entity_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdpEntityId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["idpEntityId"] = transformedIdpEntityId + } + + transformedSsoUrl, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(original["sso_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSsoUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ssoUrl"] = transformedSsoUrl + } + + transformedSignRequest, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(original["sign_request"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSignRequest); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["signRequest"] = transformedSignRequest + } + + transformedIdpCertificates, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(original["idp_certificates"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdpCertificates); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["idpCertificates"] = transformedIdpCertificates + } + + return transformed, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedX509Certificate, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedX509Certificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["x509Certificate"] = transformedX509Certificate + } + + req = append(req, transformed) + } + return req, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigSpConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSpEntityId, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(original["sp_entity_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSpEntityId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["spEntityId"] = transformedSpEntityId + } + + transformedCallbackUri, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(original["callback_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCallbackUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["callbackUri"] = transformedCallbackUri + } + + transformedSpCertificates, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(original["sp_certificates"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSpCertificates); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["spCertificates"] = transformedSpCertificates + } + + return transformed, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedX509Certificate, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509_certificate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedX509Certificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["x509Certificate"] = transformedX509Certificate + } + + req = append(req, transformed) + } + return req, nil +} + +func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_inbound_saml_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_inbound_saml_config_sweeper.go new file mode 100644 index 0000000000..2f95721a72 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_inbound_saml_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IdentityPlatformTenantInboundSamlConfig", testSweepIdentityPlatformTenantInboundSamlConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIdentityPlatformTenantInboundSamlConfig(region string) error { + resourceName := "IdentityPlatformTenantInboundSamlConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://identitytoolkit.googleapis.com/v2/projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["tenantInboundSamlConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://identitytoolkit.googleapis.com/v2/projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_oauth_idp_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_oauth_idp_config.go new file mode 100644 index 0000000000..f22421143c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_oauth_idp_config.go @@ -0,0 +1,469 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceIdentityPlatformTenantOauthIdpConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityPlatformTenantOauthIdpConfigCreate, + Read: resourceIdentityPlatformTenantOauthIdpConfigRead, + Update: resourceIdentityPlatformTenantOauthIdpConfigUpdate, + Delete: resourceIdentityPlatformTenantOauthIdpConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceIdentityPlatformTenantOauthIdpConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Required: true, + Description: `The client id of an OAuth client.`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `Human friendly display name.`, + }, + "issuer": { + Type: schema.TypeString, + Required: true, + Description: `For OIDC Idps, the issuer identifier.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the OauthIdpConfig. Must start with 'oidc.'.`, + }, + "tenant": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the tenant where this OIDC IDP configuration resource exists`, + }, + "client_secret": { + Type: schema.TypeString, + Optional: true, + Description: `The client secret of the OAuth client, to enable OIDC code flow.`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `If this config allows users to sign in with the provider.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceIdentityPlatformTenantOauthIdpConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandIdentityPlatformTenantOauthIdpConfigName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + displayNameProp, err := expandIdentityPlatformTenantOauthIdpConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandIdentityPlatformTenantOauthIdpConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + issuerProp, err := expandIdentityPlatformTenantOauthIdpConfigIssuer(d.Get("issuer"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("issuer"); !tpgresource.IsEmptyValue(reflect.ValueOf(issuerProp)) && (ok || !reflect.DeepEqual(v, issuerProp)) { + obj["issuer"] = issuerProp + } + clientIdProp, err := expandIdentityPlatformTenantOauthIdpConfigClientId(d.Get("client_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientIdProp)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { + obj["clientId"] = clientIdProp + } + clientSecretProp, err := expandIdentityPlatformTenantOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(clientSecretProp)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { + obj["clientSecret"] = clientSecretProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs?oauthIdpConfigId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TenantOauthIdpConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TenantOauthIdpConfig: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating TenantOauthIdpConfig %q: %#v", d.Id(), res) + + return resourceIdentityPlatformTenantOauthIdpConfigRead(d, meta) +} + +func resourceIdentityPlatformTenantOauthIdpConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("IdentityPlatformTenantOauthIdpConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) + } + + if err := d.Set("name", flattenIdentityPlatformTenantOauthIdpConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) + } + if err := d.Set("display_name", flattenIdentityPlatformTenantOauthIdpConfigDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) + } + if err := d.Set("enabled", flattenIdentityPlatformTenantOauthIdpConfigEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) + } + if err := d.Set("issuer", flattenIdentityPlatformTenantOauthIdpConfigIssuer(res["issuer"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) + } + if err := d.Set("client_id", flattenIdentityPlatformTenantOauthIdpConfigClientId(res["clientId"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) + } + if err := d.Set("client_secret", flattenIdentityPlatformTenantOauthIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { + return fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) + } + + return nil +} + +func resourceIdentityPlatformTenantOauthIdpConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandIdentityPlatformTenantOauthIdpConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandIdentityPlatformTenantOauthIdpConfigEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + issuerProp, err := expandIdentityPlatformTenantOauthIdpConfigIssuer(d.Get("issuer"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("issuer"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, issuerProp)) { + obj["issuer"] = issuerProp + } + clientIdProp, err := expandIdentityPlatformTenantOauthIdpConfigClientId(d.Get("client_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientIdProp)) { + obj["clientId"] = clientIdProp + } + clientSecretProp, err := expandIdentityPlatformTenantOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_secret"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientSecretProp)) { + obj["clientSecret"] = clientSecretProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating TenantOauthIdpConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + + if d.HasChange("issuer") { + updateMask = append(updateMask, "issuer") + } + + if d.HasChange("client_id") { + updateMask = append(updateMask, "clientId") + } + + if d.HasChange("client_secret") { + updateMask = append(updateMask, "clientSecret") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating TenantOauthIdpConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TenantOauthIdpConfig %q: %#v", d.Id(), res) + } + + return resourceIdentityPlatformTenantOauthIdpConfigRead(d, meta) +} + +func resourceIdentityPlatformTenantOauthIdpConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TenantOauthIdpConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TenantOauthIdpConfig") + } + + log.Printf("[DEBUG] Finished deleting TenantOauthIdpConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceIdentityPlatformTenantOauthIdpConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/tenants/(?P[^/]+)/oauthIdpConfigs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenIdentityPlatformTenantOauthIdpConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenIdentityPlatformTenantOauthIdpConfigDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantOauthIdpConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantOauthIdpConfigIssuer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantOauthIdpConfigClientId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenIdentityPlatformTenantOauthIdpConfigClientSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandIdentityPlatformTenantOauthIdpConfigName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantOauthIdpConfigDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantOauthIdpConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantOauthIdpConfigIssuer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantOauthIdpConfigClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandIdentityPlatformTenantOauthIdpConfigClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_oauth_idp_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_oauth_idp_config_sweeper.go new file mode 100644 index 0000000000..24ed1d655e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_oauth_idp_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IdentityPlatformTenantOauthIdpConfig", testSweepIdentityPlatformTenantOauthIdpConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIdentityPlatformTenantOauthIdpConfig(region string) error { + resourceName := "IdentityPlatformTenantOauthIdpConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://identitytoolkit.googleapis.com/v2/projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["tenantOauthIdpConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://identitytoolkit.googleapis.com/v2/projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_sweeper.go new file mode 100644 index 0000000000..8d5c31ae5c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/identityplatform/resource_identity_platform_tenant_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package identityplatform + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("IdentityPlatformTenant", testSweepIdentityPlatformTenant) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepIdentityPlatformTenant(region string) error { + resourceName := "IdentityPlatformTenant" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://identitytoolkit.googleapis.com/v2/projects/{{project}}/tenants", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["tenants"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://identitytoolkit.googleapis.com/v2/projects/{{project}}/tenants/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key.go new file mode 100644 index 0000000000..45b0df810e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleKmsCryptoKey() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceKMSCryptoKey().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "key_ring") + + return &schema.Resource{ + Read: dataSourceGoogleKmsCryptoKeyRead, + Schema: dsSchema, + } + +} + +func dataSourceGoogleKmsCryptoKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + keyRingId, err := parseKmsKeyRingId(d.Get("key_ring").(string), config) + if err != nil { + return err + } + + cryptoKeyId := KmsCryptoKeyId{ + KeyRingId: *keyRingId, + Name: d.Get("name").(string), + } + + d.SetId(cryptoKeyId.CryptoKeyId()) + + return resourceKMSCryptoKeyRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_version.go new file mode 100644 index 0000000000..e72a7067d9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_crypto_key_version.go @@ -0,0 +1,206 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleKmsCryptoKeyVersion() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleKmsCryptoKeyVersionRead, + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "protection_level": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "public_key": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "pem": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceGoogleKmsCryptoKeyVersionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/{{version}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Getting attributes for CryptoKeyVersion: %#v", url) + + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Get("crypto_key").(string), config) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: cryptoKeyId.KeyRingId.Project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KmsCryptoKeyVersion %q", d.Id())) + } + + if err := d.Set("version", flattenKmsCryptoKeyVersionVersion(res["name"], d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) + } + if err := d.Set("name", flattenKmsCryptoKeyVersionName(res["name"], d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) + } + if err := d.Set("state", flattenKmsCryptoKeyVersionState(res["state"], d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) + } + if err := d.Set("protection_level", flattenKmsCryptoKeyVersionProtectionLevel(res["protectionLevel"], d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) + } + if err := d.Set("algorithm", flattenKmsCryptoKeyVersionAlgorithm(res["algorithm"], d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion: %s", err) + } + + url, err = tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Getting purpose of CryptoKey: %#v", url) + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: cryptoKeyId.KeyRingId.Project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KmsCryptoKey %q", d.Id())) + } + + if res["purpose"] == "ASYMMETRIC_SIGN" || res["purpose"] == "ASYMMETRIC_DECRYPT" { + url, err = tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/{{version}}/publicKey") + if err != nil { + return err + } + log.Printf("[DEBUG] Getting public key of CryptoKeyVersion: %#v", url) + + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: cryptoKeyId.KeyRingId.Project, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsCryptoKeyVersionsPendingGeneration}, + }) + + if err != nil { + log.Printf("Error generating public key: %s", err) + return err + } + + if err := d.Set("public_key", flattenKmsCryptoKeyVersionPublicKey(res, d)); err != nil { + return fmt.Errorf("Error setting CryptoKeyVersion public key: %s", err) + } + } + d.SetId(fmt.Sprintf("//cloudkms.googleapis.com/v1/%s/cryptoKeyVersions/%d", d.Get("crypto_key"), d.Get("version"))) + + return nil +} + +func flattenKmsCryptoKeyVersionVersion(v interface{}, d *schema.ResourceData) interface{} { + parts := strings.Split(v.(string), "/") + version := parts[len(parts)-1] + // Handles the string fixed64 format + if intVal, err := tpgresource.StringToFixed64(version); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + return v +} + +func flattenKmsCryptoKeyVersionName(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenKmsCryptoKeyVersionState(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenKmsCryptoKeyVersionProtectionLevel(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenKmsCryptoKeyVersionAlgorithm(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenKmsCryptoKeyVersionPublicKey(v interface{}, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pem"] = + flattenKmsCryptoKeyVersionPublicKeyPem(original["pem"], d) + transformed["algorithm"] = + flattenKmsCryptoKeyVersionPublicKeyAlgorithm(original["algorithm"], d) + return []interface{}{transformed} +} +func flattenKmsCryptoKeyVersionPublicKeyPem(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenKmsCryptoKeyVersionPublicKeyAlgorithm(v interface{}, d *schema.ResourceData) interface{} { + return v +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_key_ring.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_key_ring.go new file mode 100644 index 0000000000..3b1e2337a4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_key_ring.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleKmsKeyRing() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceKMSKeyRing().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleKmsKeyRingRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleKmsKeyRingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + keyRingId := KmsKeyRingId{ + Name: d.Get("name").(string), + Location: d.Get("location").(string), + Project: project, + } + d.SetId(keyRingId.KeyRingId()) + + return resourceKMSKeyRingRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret.go index d87b19d479..11cd1291c1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms import ( "google.golang.org/api/cloudkms/v1" @@ -7,6 +9,9 @@ import ( "fmt" "log" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -36,13 +41,13 @@ func DataSourceGoogleKmsSecret() *schema.Resource { } func dataSourceGoogleKmsSecretRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - cryptoKeyId, err := parseKmsCryptoKeyId(d.Get("crypto_key").(string), config) + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Get("crypto_key").(string), config) if err != nil { return err @@ -58,7 +63,7 @@ func dataSourceGoogleKmsSecretRead(d *schema.ResourceData, meta interface{}) err kmsDecryptRequest.AdditionalAuthenticatedData = aad.(string) } - decryptResponse, err := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.Decrypt(cryptoKeyId.cryptoKeyId(), kmsDecryptRequest).Do() + decryptResponse, err := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.Decrypt(cryptoKeyId.CryptoKeyId(), kmsDecryptRequest).Do() if err != nil { return fmt.Errorf("Error decrypting ciphertext: %s", err) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret_asymmetric.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret_asymmetric.go new file mode 100644 index 0000000000..3d831111e1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret_asymmetric.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret_ciphertext.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret_ciphertext.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret_ciphertext.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret_ciphertext.go index a4111ee008..4f75aea5bf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_kms_secret_ciphertext.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/data_source_google_kms_secret_ciphertext.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms import ( "google.golang.org/api/cloudkms/v1" @@ -7,6 +9,9 @@ import ( "fmt" "log" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -33,13 +38,13 @@ func DataSourceGoogleKmsSecretCiphertext() *schema.Resource { } func dataSourceGoogleKmsSecretCiphertextRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - cryptoKeyId, err := parseKmsCryptoKeyId(d.Get("crypto_key").(string), config) + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Get("crypto_key").(string), config) if err != nil { return err @@ -51,7 +56,7 @@ func dataSourceGoogleKmsSecretCiphertextRead(d *schema.ResourceData, meta interf Plaintext: plaintext, } - encryptCall := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.Encrypt(cryptoKeyId.cryptoKeyId(), kmsEncryptRequest) + encryptCall := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.Encrypt(cryptoKeyId.CryptoKeyId(), kmsEncryptRequest) if config.UserProjectOverride { encryptCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_crypto_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_crypto_key.go new file mode 100644 index 0000000000..6b2f30735d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_crypto_key.go @@ -0,0 +1,112 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudkms/v1" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamKmsCryptoKeySchema = map[string]*schema.Schema{ + "crypto_key_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type KmsCryptoKeyIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewKmsCryptoKeyIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + cryptoKey := d.Get("crypto_key_id").(string) + cryptoKeyId, err := ParseKmsCryptoKeyId(cryptoKey, config) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", cryptoKey), err) + } + + return &KmsCryptoKeyIamUpdater{ + resourceId: cryptoKeyId.CryptoKeyId(), + d: d, + Config: config, + }, nil +} + +func CryptoIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Id(), config) + if err != nil { + return err + } + if err := d.Set("crypto_key_id", cryptoKeyId.CryptoKeyId()); err != nil { + return fmt.Errorf("Error setting crypto_key_id: %s", err) + } + d.SetId(cryptoKeyId.CryptoKeyId()) + return nil +} + +func (u *KmsCryptoKeyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(u.resourceId).OptionsRequestedPolicyVersion(tpgiamresource.IamPolicyVersion).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := kmsToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *KmsCryptoKeyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + kmsPolicy, err := resourceManagerToKmsPolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + _, err = u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.SetIamPolicy(u.resourceId, &cloudkms.SetIamPolicyRequest{ + Policy: kmsPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *KmsCryptoKeyIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *KmsCryptoKeyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-kms-crypto-key-%s", u.resourceId) +} + +func (u *KmsCryptoKeyIamUpdater) DescribeResource() string { + return fmt.Sprintf("KMS CryptoKey %q", u.resourceId) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_key_ring.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_key_ring.go new file mode 100644 index 0000000000..2cd60fab2d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/iam_kms_key_ring.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudkms/v1" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamKmsKeyRingSchema = map[string]*schema.Schema{ + "key_ring_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type KmsKeyRingIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewKmsKeyRingIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + keyRing := d.Get("key_ring_id").(string) + keyRingId, err := parseKmsKeyRingId(keyRing, config) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{err}}", keyRing), err) + } + + return &KmsKeyRingIamUpdater{ + resourceId: keyRingId.KeyRingId(), + d: d, + Config: config, + }, nil +} + +func KeyRingIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + keyRingId, err := parseKmsKeyRingId(d.Id(), config) + if err != nil { + return err + } + + if err := d.Set("key_ring_id", keyRingId.KeyRingId()); err != nil { + return fmt.Errorf("Error setting key_ring_id: %s", err) + } + d.SetId(keyRingId.KeyRingId()) + return nil +} + +func (u *KmsKeyRingIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.GetIamPolicy(u.resourceId).OptionsRequestedPolicyVersion(tpgiamresource.IamPolicyVersion).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := kmsToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *KmsKeyRingIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + kmsPolicy, err := resourceManagerToKmsPolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.SetIamPolicy(u.resourceId, &cloudkms.SetIamPolicyRequest{ + Policy: kmsPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *KmsKeyRingIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *KmsKeyRingIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-kms-key-ring-%s", u.resourceId) +} + +func (u *KmsKeyRingIamUpdater) DescribeResource() string { + return fmt.Sprintf("KMS KeyRing %q", u.resourceId) +} + +func resourceManagerToKmsPolicy(p *cloudresourcemanager.Policy) (*cloudkms.Policy, error) { + out := &cloudkms.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a v1 policy to a kms policy: {{err}}", err) + } + return out, nil +} + +func kmsToResourceManagerPolicy(p *cloudkms.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a kms policy to a v1 policy: {{err}}", err) + } + return out, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/kms_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/kms_utils.go new file mode 100644 index 0000000000..7cf97d49dd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/kms_utils.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kms + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "time" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudkms/v1" +) + +type KmsKeyRingId struct { + Project string + Location string + Name string +} + +func (s *KmsKeyRingId) KeyRingId() string { + return fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", s.Project, s.Location, s.Name) +} + +func (s *KmsKeyRingId) TerraformId() string { + return fmt.Sprintf("%s/%s/%s", s.Project, s.Location, s.Name) +} + +func parseKmsKeyRingId(id string, config *transport_tpg.Config) (*KmsKeyRingId, error) { + parts := strings.Split(id, "/") + + KeyRingIdRegex := regexp.MustCompile("^(" + verify.ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") + KeyRingIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") + keyRingRelativeLinkRegex := regexp.MustCompile("^projects/(" + verify.ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})$") + + if KeyRingIdRegex.MatchString(id) { + return &KmsKeyRingId{ + Project: parts[0], + Location: parts[1], + Name: parts[2], + }, nil + } + + if KeyRingIdWithoutProjectRegex.MatchString(id) { + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{keyRingName}` id format.") + } + + return &KmsKeyRingId{ + Project: config.Project, + Location: parts[0], + Name: parts[1], + }, nil + } + + if parts := keyRingRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &KmsKeyRingId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, nil + } + return nil, fmt.Errorf("Invalid KeyRing id format, expecting `{projectId}/{locationId}/{keyRingName}` or `{locationId}/{keyRingName}.`") +} + +func kmsCryptoKeyRingsEquivalent(k, old, new string, d *schema.ResourceData) bool { + KeyRingIdWithSpecifiersRegex := regexp.MustCompile("^projects/(" + verify.ProjectRegex + ")/locations/([a-z0-9-])+/keyRings/([a-zA-Z0-9_-]{1,63})$") + normalizedKeyRingIdRegex := regexp.MustCompile("^(" + verify.ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") + if matches := KeyRingIdWithSpecifiersRegex.FindStringSubmatch(new); matches != nil { + normMatches := normalizedKeyRingIdRegex.FindStringSubmatch(old) + return normMatches != nil && normMatches[1] == matches[1] && normMatches[2] == matches[2] && normMatches[3] == matches[3] + } + return false +} + +type KmsCryptoKeyId struct { + KeyRingId KmsKeyRingId + Name string +} + +func (s *KmsCryptoKeyId) CryptoKeyId() string { + return fmt.Sprintf("%s/cryptoKeys/%s", s.KeyRingId.KeyRingId(), s.Name) +} + +func (s *KmsCryptoKeyId) TerraformId() string { + return fmt.Sprintf("%s/%s", s.KeyRingId.TerraformId(), s.Name) +} + +type kmsCryptoKeyVersionId struct { + CryptoKeyId KmsCryptoKeyId + Name string +} + +func (s *kmsCryptoKeyVersionId) cryptoKeyVersionId() string { + return fmt.Sprintf(s.Name) +} + +func (s *kmsCryptoKeyVersionId) TerraformId() string { + return fmt.Sprintf("%s/%s", s.CryptoKeyId.TerraformId(), s.Name) +} + +func validateKmsCryptoKeyRotationPeriod(value interface{}, _ string) (ws []string, errors []error) { + period := value.(string) + pattern := regexp.MustCompile(`^([0-9.]*\d)s$`) + match := pattern.FindStringSubmatch(period) + + if len(match) == 0 { + errors = append(errors, fmt.Errorf("Invalid rotation period format: %s", period)) + // Cannot continue to validate because we cannot extract a number. + return + } + + number := match[1] + seconds, err := strconv.ParseFloat(number, 64) + + if err != nil { + errors = append(errors, err) + } else { + if seconds < 86400.0 { + errors = append(errors, fmt.Errorf("Rotation period must be greater than one day")) + } + + parts := strings.Split(number, ".") + + if len(parts) > 1 && len(parts[1]) > 9 { + errors = append(errors, fmt.Errorf("Rotation period cannot have more than 9 fractional digits")) + } + } + + return +} + +func kmsCryptoKeyNextRotation(now time.Time, period string) (result string, err error) { + var duration time.Duration + + duration, err = time.ParseDuration(period) + + if err == nil { + result = now.UTC().Add(duration).Format(time.RFC3339Nano) + } + + return +} + +func ParseKmsCryptoKeyId(id string, config *transport_tpg.Config) (*KmsCryptoKeyId, error) { + parts := strings.Split(id, "/") + + cryptoKeyIdRegex := regexp.MustCompile("^(" + verify.ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})/([a-zA-Z0-9_-]{1,63})$") + cryptoKeyIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})/([a-zA-Z0-9_-]{1,63})$") + cryptoKeyRelativeLinkRegex := regexp.MustCompile("^projects/(" + verify.ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})/cryptoKeys/([a-zA-Z0-9_-]{1,63})$") + + if cryptoKeyIdRegex.MatchString(id) { + return &KmsCryptoKeyId{ + KeyRingId: KmsKeyRingId{ + Project: parts[0], + Location: parts[1], + Name: parts[2], + }, + Name: parts[3], + }, nil + } + + if cryptoKeyIdWithoutProjectRegex.MatchString(id) { + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{keyRingName}/{cryptoKeyName}` id format.") + } + + return &KmsCryptoKeyId{ + KeyRingId: KmsKeyRingId{ + Project: config.Project, + Location: parts[0], + Name: parts[1], + }, + Name: parts[2], + }, nil + } + + if parts := cryptoKeyRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &KmsCryptoKeyId{ + KeyRingId: KmsKeyRingId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, + Name: parts[4], + }, nil + } + + return nil, fmt.Errorf("Invalid CryptoKey id format, expecting `{projectId}/{locationId}/{KeyringName}/{cryptoKeyName}` or `{locationId}/{keyRingName}/{cryptoKeyName}, got id: %s`", id) +} +func parseKmsCryptoKeyVersionId(id string, config *transport_tpg.Config) (*kmsCryptoKeyVersionId, error) { + cryptoKeyVersionRelativeLinkRegex := regexp.MustCompile("^projects/(" + verify.ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})/cryptoKeys/([a-zA-Z0-9_-]{1,63})/cryptoKeyVersions/([a-zA-Z0-9_-]{1,63})$") + + if parts := cryptoKeyVersionRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &kmsCryptoKeyVersionId{ + CryptoKeyId: KmsCryptoKeyId{ + KeyRingId: KmsKeyRingId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, + Name: parts[4], + }, + Name: "projects/" + parts[1] + "/locations/" + parts[2] + "/keyRings/" + parts[3] + "/cryptoKeys/" + parts[4] + "/cryptoKeyVersions/" + parts[5], + }, nil + } + return nil, fmt.Errorf("Invalid CryptoKeyVersion id format, expecting `{projectId}/{locationId}/{KeyringName}/{cryptoKeyName}/{cryptoKeyVersion}` or `{locationId}/{keyRingName}/{cryptoKeyName}/{cryptoKeyVersion}, got id: %s`", id) +} + +func clearCryptoKeyVersions(cryptoKeyId *KmsCryptoKeyId, userAgent string, config *transport_tpg.Config) error { + versionsClient := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions + + listCall := versionsClient.List(cryptoKeyId.CryptoKeyId()) + if config.UserProjectOverride { + listCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) + } + versionsResponse, err := listCall.Do() + + if err != nil { + return err + } + + for _, version := range versionsResponse.CryptoKeyVersions { + // skip the versions that have been destroyed earlier + if version.State != "DESTROYED" && version.State != "DESTROY_SCHEDULED" { + request := &cloudkms.DestroyCryptoKeyVersionRequest{} + destroyCall := versionsClient.Destroy(version.Name, request) + if config.UserProjectOverride { + destroyCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) + } + _, err = destroyCall.Do() + + if err != nil { + return err + } + } + } + + return nil +} + +func deleteCryptoKeyVersions(cryptoKeyVersionId *kmsCryptoKeyVersionId, d *schema.ResourceData, userAgent string, config *transport_tpg.Config) error { + versionsClient := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions + request := &cloudkms.DestroyCryptoKeyVersionRequest{} + destroyCall := versionsClient.Destroy(cryptoKeyVersionId.Name, request) + if config.UserProjectOverride { + destroyCall.Header().Set("X-Goog-User-Project", cryptoKeyVersionId.CryptoKeyId.KeyRingId.Project) + } + _, err := destroyCall.Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ID %s", cryptoKeyVersionId.Name)) + } + + return nil +} + +func disableCryptoKeyRotation(cryptoKeyId *KmsCryptoKeyId, userAgent string, config *transport_tpg.Config) error { + keyClient := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys + patchCall := keyClient.Patch(cryptoKeyId.CryptoKeyId(), &cloudkms.CryptoKey{ + NullFields: []string{"rotationPeriod", "nextRotationTime"}, + }). + UpdateMask("rotationPeriod,nextRotationTime") + if config.UserProjectOverride { + patchCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) + } + _, err := patchCall.Do() + + return err +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_crypto_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_crypto_key.go new file mode 100644 index 0000000000..bd4c537d4b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_crypto_key.go @@ -0,0 +1,656 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package kms + +import ( + "context" + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceKMSCryptoKey() *schema.Resource { + return &schema.Resource{ + Create: resourceKMSCryptoKeyCreate, + Read: resourceKMSCryptoKeyRead, + Update: resourceKMSCryptoKeyUpdate, + Delete: resourceKMSCryptoKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceKMSCryptoKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 1, + + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceKMSCryptoKeyResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceKMSCryptoKeyUpgradeV0, + Version: 0, + }, + }, + + Schema: map[string]*schema.Schema{ + "key_ring": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: kmsCryptoKeyRingsEquivalent, + Description: `The KeyRing that this key belongs to. +Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name for the CryptoKey.`, + }, + "destroy_scheduled_duration": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED. +If not specified at creation time, the default duration is 24 hours.`, + }, + "import_only": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Whether this key may contain imported versions only.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels with user-defined metadata to apply to this resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "purpose": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The immutable purpose of this CryptoKey. See the +[purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) +for possible inputs. +Default value is "ENCRYPT_DECRYPT".`, + Default: "ENCRYPT_DECRYPT", + }, + "rotation_period": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.OrEmpty(validateKmsCryptoKeyRotationPeriod), + Description: `Every time this period passes, generate a new CryptoKeyVersion and set it as the primary. +The first rotation will take place after the specified period. The rotation period has +the format of a decimal number with up to 9 fractional digits, followed by the +letter 's' (seconds). It must be greater than a day (ie, 86400).`, + }, + "skip_initial_version_creation": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If set to true, the request will create a CryptoKey without any CryptoKeyVersions. +You must use the 'google_kms_key_ring_import_job' resource to import the CryptoKeyVersion.`, + }, + "version_template": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `A template describing settings for new crypto key versions.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "algorithm": { + Type: schema.TypeString, + Required: true, + Description: `The algorithm to use when creating a version based on this template. +See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs.`, + }, + "protection_level": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL", "EXTERNAL_VPC". Defaults to "SOFTWARE".`, + Default: "SOFTWARE", + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceKMSCryptoKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandKMSCryptoKeyLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + purposeProp, err := expandKMSCryptoKeyPurpose(d.Get("purpose"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("purpose"); !tpgresource.IsEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { + obj["purpose"] = purposeProp + } + rotationPeriodProp, err := expandKMSCryptoKeyRotationPeriod(d.Get("rotation_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rotation_period"); !tpgresource.IsEmptyValue(reflect.ValueOf(rotationPeriodProp)) && (ok || !reflect.DeepEqual(v, rotationPeriodProp)) { + obj["rotationPeriod"] = rotationPeriodProp + } + versionTemplateProp, err := expandKMSCryptoKeyVersionTemplate(d.Get("version_template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_template"); !tpgresource.IsEmptyValue(reflect.ValueOf(versionTemplateProp)) && (ok || !reflect.DeepEqual(v, versionTemplateProp)) { + obj["versionTemplate"] = versionTemplateProp + } + destroyScheduledDurationProp, err := expandKMSCryptoKeyDestroyScheduledDuration(d.Get("destroy_scheduled_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destroy_scheduled_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(destroyScheduledDurationProp)) && (ok || !reflect.DeepEqual(v, destroyScheduledDurationProp)) { + obj["destroyScheduledDuration"] = destroyScheduledDurationProp + } + importOnlyProp, err := expandKMSCryptoKeyImportOnly(d.Get("import_only"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("import_only"); !tpgresource.IsEmptyValue(reflect.ValueOf(importOnlyProp)) && (ok || !reflect.DeepEqual(v, importOnlyProp)) { + obj["importOnly"] = importOnlyProp + } + + obj, err = resourceKMSCryptoKeyEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys?cryptoKeyId={{name}}&skipInitialVersionCreation={{skip_initial_version_creation}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new CryptoKey: %#v", obj) + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating CryptoKey: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{key_ring}}/cryptoKeys/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating CryptoKey %q: %#v", d.Id(), res) + + return resourceKMSCryptoKeyRead(d, meta) +} + +func resourceKMSCryptoKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KMSCryptoKey %q", d.Id())) + } + + res, err = resourceKMSCryptoKeyDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing KMSCryptoKey because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("labels", flattenKMSCryptoKeyLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading CryptoKey: %s", err) + } + if err := d.Set("purpose", flattenKMSCryptoKeyPurpose(res["purpose"], d, config)); err != nil { + return fmt.Errorf("Error reading CryptoKey: %s", err) + } + if err := d.Set("rotation_period", flattenKMSCryptoKeyRotationPeriod(res["rotationPeriod"], d, config)); err != nil { + return fmt.Errorf("Error reading CryptoKey: %s", err) + } + if err := d.Set("version_template", flattenKMSCryptoKeyVersionTemplate(res["versionTemplate"], d, config)); err != nil { + return fmt.Errorf("Error reading CryptoKey: %s", err) + } + if err := d.Set("destroy_scheduled_duration", flattenKMSCryptoKeyDestroyScheduledDuration(res["destroyScheduledDuration"], d, config)); err != nil { + return fmt.Errorf("Error reading CryptoKey: %s", err) + } + if err := d.Set("import_only", flattenKMSCryptoKeyImportOnly(res["importOnly"], d, config)); err != nil { + return fmt.Errorf("Error reading CryptoKey: %s", err) + } + + return nil +} + +func resourceKMSCryptoKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + labelsProp, err := expandKMSCryptoKeyLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + rotationPeriodProp, err := expandKMSCryptoKeyRotationPeriod(d.Get("rotation_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rotation_period"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rotationPeriodProp)) { + obj["rotationPeriod"] = rotationPeriodProp + } + versionTemplateProp, err := expandKMSCryptoKeyVersionTemplate(d.Get("version_template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_template"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionTemplateProp)) { + obj["versionTemplate"] = versionTemplateProp + } + + obj, err = resourceKMSCryptoKeyUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating CryptoKey %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("rotation_period") { + updateMask = append(updateMask, "rotationPeriod", + "nextRotationTime") + } + + if d.HasChange("version_template") { + updateMask = append(updateMask, "versionTemplate.algorithm") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating CryptoKey %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating CryptoKey %q: %#v", d.Id(), res) + } + + return resourceKMSCryptoKeyRead(d, meta) +} + +func resourceKMSCryptoKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Id(), config) + if err != nil { + return err + } + + log.Printf(` +[WARNING] KMS CryptoKey resources cannot be deleted from GCP. The CryptoKey %s will be removed from Terraform state, +and all its CryptoKeyVersions will be destroyed, but it will still be present in the project.`, cryptoKeyId.CryptoKeyId()) + + // Delete all versions of the key + if err := clearCryptoKeyVersions(cryptoKeyId, userAgent, config); err != nil { + return err + } + + // Make sure automatic key rotation is disabled if set + if d.Get("rotation_period") != "" { + if err := disableCryptoKeyRotation(cryptoKeyId, userAgent, config); err != nil { + return fmt.Errorf( + "While cryptoKeyVersions were cleared, Terraform was unable to disable automatic rotation of key due to an error: %s."+ + "Please retry or manually disable automatic rotation to prevent creation of a new version of this key.", err) + } + } + + d.SetId("") + return nil +} + +func resourceKMSCryptoKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Id(), config) + if err != nil { + return nil, err + } + + if err := d.Set("key_ring", cryptoKeyId.KeyRingId.KeyRingId()); err != nil { + return nil, fmt.Errorf("Error setting key_ring: %s", err) + } + if err := d.Set("name", cryptoKeyId.Name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + if err := d.Set("skip_initial_version_creation", false); err != nil { + return nil, fmt.Errorf("Error setting skip_initial_version_creation: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "{{key_ring}}/cryptoKeys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenKMSCryptoKeyLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSCryptoKeyPurpose(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSCryptoKeyRotationPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSCryptoKeyVersionTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["algorithm"] = + flattenKMSCryptoKeyVersionTemplateAlgorithm(original["algorithm"], d, config) + transformed["protection_level"] = + flattenKMSCryptoKeyVersionTemplateProtectionLevel(original["protectionLevel"], d, config) + return []interface{}{transformed} +} +func flattenKMSCryptoKeyVersionTemplateAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSCryptoKeyVersionTemplateProtectionLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSCryptoKeyDestroyScheduledDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSCryptoKeyImportOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandKMSCryptoKeyLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandKMSCryptoKeyPurpose(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSCryptoKeyRotationPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSCryptoKeyVersionTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAlgorithm, err := expandKMSCryptoKeyVersionTemplateAlgorithm(original["algorithm"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["algorithm"] = transformedAlgorithm + } + + transformedProtectionLevel, err := expandKMSCryptoKeyVersionTemplateProtectionLevel(original["protection_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProtectionLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["protectionLevel"] = transformedProtectionLevel + } + + return transformed, nil +} + +func expandKMSCryptoKeyVersionTemplateAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSCryptoKeyVersionTemplateProtectionLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSCryptoKeyDestroyScheduledDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSCryptoKeyImportOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceKMSCryptoKeyEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // if rotationPeriod is set, nextRotationTime must also be set. + if d.Get("rotation_period") != "" { + rotationPeriod := d.Get("rotation_period").(string) + nextRotation, err := kmsCryptoKeyNextRotation(time.Now(), rotationPeriod) + + if err != nil { + return nil, fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) + } + + obj["nextRotationTime"] = nextRotation + } + + // set to false if it is not true explicitly + if !(d.Get("skip_initial_version_creation").(bool)) { + if err := d.Set("skip_initial_version_creation", false); err != nil { + return nil, fmt.Errorf("Error setting skip_initial_version_creation: %s", err) + } + } + + return obj, nil +} + +func resourceKMSCryptoKeyUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // if rotationPeriod is changed, nextRotationTime must also be set. + if d.HasChange("rotation_period") && d.Get("rotation_period") != "" { + rotationPeriod := d.Get("rotation_period").(string) + nextRotation, err := kmsCryptoKeyNextRotation(time.Now(), rotationPeriod) + + if err != nil { + return nil, fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) + } + + obj["nextRotationTime"] = nextRotation + } + + return obj, nil +} + +func resourceKMSCryptoKeyDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Modify the name to be the user specified form. + // We can't just ignore_read on `name` as the linter will + // complain that the returned `res` is never used afterwards. + // Some field needs to be actually set, and we chose `name`. + res["name"] = d.Get("name").(string) + return res, nil +} + +func resourceKMSCryptoKeyResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "key_ring": { + Type: schema.TypeString, + Required: true, + }, + "rotation_period": { + Type: schema.TypeString, + Optional: true, + }, + "version_template": { + Type: schema.TypeList, + Optional: true, + }, + "self_link": { + Type: schema.TypeString, + }, + }, + } +} + +func ResourceKMSCryptoKeyUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + config := meta.(*transport_tpg.Config) + keyRingId := rawState["key_ring"].(string) + parsed, err := parseKmsKeyRingId(keyRingId, config) + if err != nil { + return nil, err + } + rawState["key_ring"] = parsed.KeyRingId() + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_crypto_key_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_crypto_key_version.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_crypto_key_version.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_crypto_key_version.go index 048e7842b9..2f1ffac7bb 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_kms_crypto_key_version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_crypto_key_version.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package kms import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceKMSCryptoKeyVersion() *schema.Resource { @@ -53,7 +60,7 @@ Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyring}}/crypt Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"PENDING_GENERATION", "ENABLED", "DISABLED", "DESTROYED", "DESTROY_SCHEDULED", "PENDING_IMPORT", "IMPORT_FAILED", ""}), + ValidateFunc: verify.ValidateEnum([]string{"PENDING_GENERATION", "ENABLED", "DISABLED", "DESTROYED", "DESTROY_SCHEDULED", "PENDING_IMPORT", "IMPORT_FAILED", ""}), Description: `The current state of the CryptoKeyVersion. Possible values: ["PENDING_GENERATION", "ENABLED", "DISABLED", "DESTROYED", "DESTROY_SCHEDULED", "PENDING_IMPORT", "IMPORT_FAILED"]`, }, "algorithm": { @@ -147,8 +154,8 @@ Only provided for key versions with protectionLevel HSM.`, } func resourceKMSCryptoKeyVersionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -157,11 +164,11 @@ func resourceKMSCryptoKeyVersionCreate(d *schema.ResourceData, meta interface{}) stateProp, err := expandKMSCryptoKeyVersionState(d.Get("state"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("state"); !isEmptyValue(reflect.ValueOf(stateProp)) && (ok || !reflect.DeepEqual(v, stateProp)) { + } else if v, ok := d.GetOkExists("state"); !tpgresource.IsEmptyValue(reflect.ValueOf(stateProp)) && (ok || !reflect.DeepEqual(v, stateProp)) { obj["state"] = stateProp } - url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions") + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions") if err != nil { return err } @@ -170,11 +177,19 @@ func resourceKMSCryptoKeyVersionCreate(d *schema.ResourceData, meta interface{}) billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating CryptoKeyVersion: %s", err) } @@ -183,7 +198,7 @@ func resourceKMSCryptoKeyVersionCreate(d *schema.ResourceData, meta interface{}) } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -195,13 +210,13 @@ func resourceKMSCryptoKeyVersionCreate(d *schema.ResourceData, meta interface{}) } func resourceKMSCryptoKeyVersionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{KMSBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{name}}") if err != nil { return err } @@ -209,13 +224,19 @@ func resourceKMSCryptoKeyVersionRead(d *schema.ResourceData, meta interface{}) e billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("KMSCryptoKeyVersion %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KMSCryptoKeyVersion %q", d.Id())) } if err := d.Set("name", flattenKMSCryptoKeyVersionName(res["name"], d, config)); err != nil { @@ -241,8 +262,8 @@ func resourceKMSCryptoKeyVersionRead(d *schema.ResourceData, meta interface{}) e } func resourceKMSCryptoKeyVersionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -253,11 +274,11 @@ func resourceKMSCryptoKeyVersionUpdate(d *schema.ResourceData, meta interface{}) stateProp, err := expandKMSCryptoKeyVersionState(d.Get("state"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("state"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stateProp)) { + } else if v, ok := d.GetOkExists("state"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stateProp)) { obj["state"] = stateProp } - url, err := replaceVars(d, config, "{{KMSBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{name}}") if err != nil { return err } @@ -268,19 +289,27 @@ func resourceKMSCryptoKeyVersionUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("state") { updateMask = append(updateMask, "state") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating CryptoKeyVersion %q: %s", d.Id(), err) @@ -292,8 +321,8 @@ func resourceKMSCryptoKeyVersionUpdate(d *schema.ResourceData, meta interface{}) } func resourceKMSCryptoKeyVersionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -314,19 +343,19 @@ func resourceKMSCryptoKeyVersionDelete(d *schema.ResourceData, meta interface{}) func resourceKMSCryptoKeyVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) cryptoKeyVersionId, err := parseKmsCryptoKeyVersionId(d.Id(), config) if err != nil { return nil, err } - if err := d.Set("crypto_key", cryptoKeyVersionId.CryptoKeyId.cryptoKeyId()); err != nil { + if err := d.Set("crypto_key", cryptoKeyVersionId.CryptoKeyId.CryptoKeyId()); err != nil { return nil, fmt.Errorf("Error setting key_ring: %s", err) } if err := d.Set("name", cryptoKeyVersionId.Name); err != nil { return nil, fmt.Errorf("Error setting name: %s", err) } - id, err := replaceVars(d, config, "{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -335,27 +364,27 @@ func resourceKMSCryptoKeyVersionImport(d *schema.ResourceData, meta interface{}) return []*schema.ResourceData{d}, nil } -func flattenKMSCryptoKeyVersionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionProtectionLevel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionProtectionLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionGenerateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionGenerateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionAlgorithm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionAttestation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -374,15 +403,15 @@ func flattenKMSCryptoKeyVersionAttestation(v interface{}, d *schema.ResourceData flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptions(original["externalProtectionLevelOptions"], d, config) return []interface{}{transformed} } -func flattenKMSCryptoKeyVersionAttestationFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionAttestationContent(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationContent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionAttestationCertChains(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationCertChains(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -399,19 +428,19 @@ func flattenKMSCryptoKeyVersionAttestationCertChains(v interface{}, d *schema.Re flattenKMSCryptoKeyVersionAttestationCertChainsGooglePartitionCerts(original["googlePartitionCerts"], d, config) return []interface{}{transformed} } -func flattenKMSCryptoKeyVersionAttestationCertChainsCaviumCerts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationCertChainsCaviumCerts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionAttestationCertChainsGoogleCardCerts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationCertChainsGoogleCardCerts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionAttestationCertChainsGooglePartitionCerts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationCertChainsGooglePartitionCerts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -426,14 +455,14 @@ func flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptions(v inter flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptionsEkmConnectionKeyPath(original["ekmConnectionKeyPath"], d, config) return []interface{}{transformed} } -func flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptionsExternalKeyUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptionsExternalKeyUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptionsEkmConnectionKeyPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenKMSCryptoKeyVersionAttestationExternalProtectionLevelOptionsEkmConnectionKeyPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandKMSCryptoKeyVersionState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandKMSCryptoKeyVersionState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring.go new file mode 100644 index 0000000000..d2f1c44b9a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring.go @@ -0,0 +1,253 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package kms + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceKMSKeyRing() *schema.Resource { + return &schema.Resource{ + Create: resourceKMSKeyRingCreate, + Read: resourceKMSKeyRingRead, + Delete: resourceKMSKeyRingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceKMSKeyRingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the KeyRing. +A full list of valid locations can be found by running 'gcloud kms locations list'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name for the KeyRing.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceKMSKeyRingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandKMSKeyRingName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + locationProp, err := expandKMSKeyRingLocation(d.Get("location"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + + obj, err = resourceKMSKeyRingEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/keyRings?keyRingId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new KeyRing: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for KeyRing: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating KeyRing: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/keyRings/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating KeyRing %q: %#v", d.Id(), res) + + return resourceKMSKeyRingRead(d, meta) +} + +func resourceKMSKeyRingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/keyRings/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for KeyRing: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KMSKeyRing %q", d.Id())) + } + + res, err = resourceKMSKeyRingDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing KMSKeyRing because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading KeyRing: %s", err) + } + + if err := d.Set("name", flattenKMSKeyRingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading KeyRing: %s", err) + } + + return nil +} + +func resourceKMSKeyRingDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] KMS KeyRing resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceKMSKeyRingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/keyRings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/keyRings/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenKMSKeyRingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandKMSKeyRingName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSKeyRingLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceKMSKeyRingEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + return nil, nil +} + +func resourceKMSKeyRingDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Modify the name to be the user specified form. + // We can't just ignore_read on `name` as the linter will + // complain that the returned `res` is never used afterwards. + // Some field needs to be actually set, and we chose `name`. + res["name"] = d.Get("name").(string) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring_import_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring_import_job.go new file mode 100644 index 0000000000..49c2e51a23 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring_import_job.go @@ -0,0 +1,386 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package kms + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceKMSKeyRingImportJob() *schema.Resource { + return &schema.Resource{ + Create: resourceKMSKeyRingImportJobCreate, + Read: resourceKMSKeyRingImportJobRead, + Delete: resourceKMSKeyRingImportJobDelete, + + Importer: &schema.ResourceImporter{ + State: resourceKMSKeyRingImportJobImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "import_job_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `It must be unique within a KeyRing and match the regular expression [a-zA-Z0-9_-]{1,63}`, + }, + "import_method": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"RSA_OAEP_3072_SHA1_AES_256", "RSA_OAEP_4096_SHA1_AES_256"}), + Description: `The wrapping method to be used for incoming key material. Possible values: ["RSA_OAEP_3072_SHA1_AES_256", "RSA_OAEP_4096_SHA1_AES_256"]`, + }, + "key_ring": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: kmsCryptoKeyRingsEquivalent, + Description: `The KeyRing that this import job belongs to. +Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''.`, + }, + "protection_level": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"SOFTWARE", "HSM", "EXTERNAL"}), + Description: `The protection level of the ImportJob. This must match the protectionLevel of the +versionTemplate on the CryptoKey you attempt to import into. Possible values: ["SOFTWARE", "HSM", "EXTERNAL"]`, + }, + "attestation": { + Type: schema.TypeList, + Computed: true, + Description: `Statement that was generated and signed by the key creator (for example, an HSM) at key creation time. +Use this statement to verify attributes of the key as stored on the HSM, independently of Google. +Only present if the chosen ImportMethod is one with a protection level of HSM.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content": { + Type: schema.TypeString, + Computed: true, + Description: `The attestation data provided by the HSM when the key operation was performed. +A base64-encoded string.`, + }, + "format": { + Type: schema.TypeString, + Computed: true, + Description: `The format of the attestation data.`, + }, + }, + }, + }, + "expire_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which this resource is scheduled for expiration and can no longer be used. +This is in RFC3339 text format.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name for this ImportJob in the format projects/*/locations/*/keyRings/*/importJobs/*.`, + }, + "public_key": { + Type: schema.TypeList, + Computed: true, + Description: `The public key with which to wrap key material prior to import. Only returned if state is 'ACTIVE'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pem": { + Type: schema.TypeString, + Computed: true, + Description: `The public key, encoded in PEM format. For more information, see the RFC 7468 sections +for General Considerations and Textual Encoding of Subject Public Key Info.`, + }, + }, + }, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the ImportJob, indicating if it can be used.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceKMSKeyRingImportJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + importMethodProp, err := expandKMSKeyRingImportJobImportMethod(d.Get("import_method"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("import_method"); !tpgresource.IsEmptyValue(reflect.ValueOf(importMethodProp)) && (ok || !reflect.DeepEqual(v, importMethodProp)) { + obj["importMethod"] = importMethodProp + } + protectionLevelProp, err := expandKMSKeyRingImportJobProtectionLevel(d.Get("protection_level"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("protection_level"); !tpgresource.IsEmptyValue(reflect.ValueOf(protectionLevelProp)) && (ok || !reflect.DeepEqual(v, protectionLevelProp)) { + obj["protectionLevel"] = protectionLevelProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/importJobs?importJobId={{import_job_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new KeyRingImportJob: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating KeyRingImportJob: %s", err) + } + if err := d.Set("name", flattenKMSKeyRingImportJobName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating KeyRingImportJob %q: %#v", d.Id(), res) + + return resourceKMSKeyRingImportJobRead(d, meta) +} + +func resourceKMSKeyRingImportJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KMSKeyRingImportJob %q", d.Id())) + } + + if err := d.Set("name", flattenKMSKeyRingImportJobName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading KeyRingImportJob: %s", err) + } + if err := d.Set("import_method", flattenKMSKeyRingImportJobImportMethod(res["importMethod"], d, config)); err != nil { + return fmt.Errorf("Error reading KeyRingImportJob: %s", err) + } + if err := d.Set("protection_level", flattenKMSKeyRingImportJobProtectionLevel(res["protectionLevel"], d, config)); err != nil { + return fmt.Errorf("Error reading KeyRingImportJob: %s", err) + } + if err := d.Set("expire_time", flattenKMSKeyRingImportJobExpireTime(res["expireTime"], d, config)); err != nil { + return fmt.Errorf("Error reading KeyRingImportJob: %s", err) + } + if err := d.Set("state", flattenKMSKeyRingImportJobState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading KeyRingImportJob: %s", err) + } + if err := d.Set("public_key", flattenKMSKeyRingImportJobPublicKey(res["publicKey"], d, config)); err != nil { + return fmt.Errorf("Error reading KeyRingImportJob: %s", err) + } + if err := d.Set("attestation", flattenKMSKeyRingImportJobAttestation(res["attestation"], d, config)); err != nil { + return fmt.Errorf("Error reading KeyRingImportJob: %s", err) + } + + return nil +} + +func resourceKMSKeyRingImportJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting KeyRingImportJob %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "KeyRingImportJob") + } + + log.Printf("[DEBUG] Finished deleting KeyRingImportJob %q: %#v", d.Id(), res) + return nil +} + +func resourceKMSKeyRingImportJobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) != 8 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/importJobs/{{importJobId}}", + ) + } + + if err := d.Set("key_ring", stringParts[3]); err != nil { + return nil, fmt.Errorf("Error setting key_ring: %s", err) + } + if err := d.Set("import_job_id", stringParts[5]); err != nil { + return nil, fmt.Errorf("Error setting import_job_id: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenKMSKeyRingImportJobName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSKeyRingImportJobImportMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSKeyRingImportJobProtectionLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSKeyRingImportJobExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSKeyRingImportJobState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSKeyRingImportJobPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["pem"] = + flattenKMSKeyRingImportJobPublicKeyPem(original["pem"], d, config) + return []interface{}{transformed} +} +func flattenKMSKeyRingImportJobPublicKeyPem(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSKeyRingImportJobAttestation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["format"] = + flattenKMSKeyRingImportJobAttestationFormat(original["format"], d, config) + transformed["content"] = + flattenKMSKeyRingImportJobAttestationContent(original["content"], d, config) + return []interface{}{transformed} +} +func flattenKMSKeyRingImportJobAttestationFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenKMSKeyRingImportJobAttestationContent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandKMSKeyRingImportJobImportMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandKMSKeyRingImportJobProtectionLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring_import_job_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring_import_job_sweeper.go new file mode 100644 index 0000000000..70c61e0999 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_key_ring_import_job_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package kms + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("KMSKeyRingImportJob", testSweepKMSKeyRingImportJob) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepKMSKeyRingImportJob(region string) error { + resourceName := "KMSKeyRingImportJob" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudkms.googleapis.com/v1/{{key_ring}}/importJobs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["keyRingImportJobs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudkms.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_secret_ciphertext.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_secret_ciphertext.go new file mode 100644 index 0000000000..46bdb185d9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/kms/resource_kms_secret_ciphertext.go @@ -0,0 +1,231 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package kms + +import ( + "encoding/base64" + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceKMSSecretCiphertext() *schema.Resource { + return &schema.Resource{ + Create: resourceKMSSecretCiphertextCreate, + Read: resourceKMSSecretCiphertextRead, + Delete: resourceKMSSecretCiphertextDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "crypto_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full name of the CryptoKey that will be used to encrypt the provided plaintext. +Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}''`, + }, + "plaintext": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The plaintext to be encrypted.`, + Sensitive: true, + }, + "additional_authenticated_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The additional authenticated data used for integrity checks during encryption and decryption.`, + Sensitive: true, + }, + "ciphertext": { + Type: schema.TypeString, + Computed: true, + Description: `Contains the result of encrypting the provided plaintext, encoded in base64.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceKMSSecretCiphertextCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + plaintextProp, err := expandKMSSecretCiphertextPlaintext(d.Get("plaintext"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("plaintext"); !tpgresource.IsEmptyValue(reflect.ValueOf(plaintextProp)) && (ok || !reflect.DeepEqual(v, plaintextProp)) { + obj["plaintext"] = plaintextProp + } + additionalAuthenticatedDataProp, err := expandKMSSecretCiphertextAdditionalAuthenticatedData(d.Get("additional_authenticated_data"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("additional_authenticated_data"); !tpgresource.IsEmptyValue(reflect.ValueOf(additionalAuthenticatedDataProp)) && (ok || !reflect.DeepEqual(v, additionalAuthenticatedDataProp)) { + obj["additionalAuthenticatedData"] = additionalAuthenticatedDataProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}:encrypt") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SecretCiphertext: %#v", obj) + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating SecretCiphertext: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{crypto_key}}/{{ciphertext}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // we don't set anything on read and instead do it all in create + ciphertext, ok := res["ciphertext"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + if err := d.Set("ciphertext", ciphertext.(string)); err != nil { + return fmt.Errorf("Error setting ciphertext: %s", err) + } + + id, err = tpgresource.ReplaceVars(d, config, "{{crypto_key}}/{{ciphertext}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating SecretCiphertext %q: %#v", d.Id(), res) + + return resourceKMSSecretCiphertextRead(d, meta) +} + +func resourceKMSSecretCiphertextRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") + if err != nil { + return err + } + + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KMSSecretCiphertext %q", d.Id())) + } + + res, err = resourceKMSSecretCiphertextDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing KMSSecretCiphertext because it no longer exists.") + d.SetId("") + return nil + } + + return nil +} + +func resourceKMSSecretCiphertextDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] KMS SecretCiphertext resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func expandKMSSecretCiphertextPlaintext(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return base64.StdEncoding.EncodeToString([]byte(v.(string))), nil +} + +func expandKMSSecretCiphertextAdditionalAuthenticatedData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return base64.StdEncoding.EncodeToString([]byte(v.(string))), nil +} + +func resourceKMSSecretCiphertextDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_logging_project_cmek_settings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/data_source_google_logging_project_cmek_settings.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_logging_project_cmek_settings.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/data_source_google_logging_project_cmek_settings.go index 9a12e99037..e79d4dd1c8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_logging_project_cmek_settings.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/data_source_google_logging_project_cmek_settings.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleLoggingProjectCmekSettings() *schema.Resource { @@ -51,33 +55,39 @@ func DataSourceGoogleLoggingProjectCmekSettings() *schema.Resource { } func dataSourceGoogleLoggingProjectCmekSettingsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/cmekSettings") + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/cmekSettings") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for ProjectCmekSettings: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("LoggingProjectCmekSettings %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("LoggingProjectCmekSettings %q", d.Id())) } d.SetId(fmt.Sprintf("projects/%s/cmekSettings", project)) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/data_source_google_logging_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/data_source_google_logging_sink.go new file mode 100644 index 0000000000..0315bdd6e7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/data_source_google_logging_sink.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleLoggingSink() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(resourceLoggingSinkSchema()) + dsSchema["id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: `Required. An identifier for the resource in format: "projects/[PROJECT_ID]/sinks/[SINK_NAME]", "organizations/[ORGANIZATION_ID]/sinks/[SINK_NAME]", "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_NAME]", "folders/[FOLDER_ID]/sinks/[SINK_NAME]"`, + } + + return &schema.Resource{ + Read: dataSourceGoogleLoggingSinkRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleLoggingSinkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + sinkId := d.Get("id").(string) + + sink, err := config.NewLoggingClient(userAgent).Sinks.Get(sinkId).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Logging Sink %s", d.Id())) + } + + if err := flattenResourceLoggingSink(d, sink); err != nil { + return err + } + + d.SetId(sinkId) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/extract.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/extract.go new file mode 100644 index 0000000000..bf3a65c26d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/extract.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + "regexp" +) + +// ExtractFieldByPattern returns the value of a field extracted from a parent field according to the given regular expression pattern. +// An error is returned if the field already has a value different than the value extracted. +func ExtractFieldByPattern(fieldName, fieldValue, parentFieldValue, pattern string) (string, error) { + var extractedValue string + // Fetch value from container if the container exists. + if parentFieldValue != "" { + r := regexp.MustCompile(pattern) + m := r.FindStringSubmatch(parentFieldValue) + if m != nil && len(m) >= 2 { + extractedValue = m[1] + } else if fieldValue == "" { + // The pattern didn't match and the value doesn't exist. + return "", fmt.Errorf("parent of %q has no matching values from pattern %q in value %q", fieldName, pattern, parentFieldValue) + } + } + + // If both values exist and are different, error + if fieldValue != "" && extractedValue != "" && fieldValue != extractedValue { + return "", fmt.Errorf("%q has conflicting values of %q (from parent) and %q (from self)", fieldName, extractedValue, fieldValue) + } + + // If value does not exist, use the value in container. + if fieldValue == "" { + return extractedValue, nil + } + + return fieldValue, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_billing_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_billing_account.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_billing_account.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_billing_account.go index d034419019..abe76cee89 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_billing_account.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_billing_account.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/logging/v2" ) @@ -20,13 +24,13 @@ type BillingAccountLoggingExclusionUpdater struct { resourceType string resourceId string userAgent string - Config *Config + Config *transport_tpg.Config } -func NewBillingAccountLoggingExclusionUpdater(d *schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) { +func NewBillingAccountLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { billingAccount := d.Get("billing_account").(string) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -39,8 +43,8 @@ func NewBillingAccountLoggingExclusionUpdater(d *schema.ResourceData, config *Co }, nil } -func BillingAccountLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) +func BillingAccountLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + loggingExclusionId, err := ParseLoggingExclusionId(d.Id()) if err != nil { return err } @@ -49,7 +53,7 @@ func BillingAccountLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *Config return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) } - if err := d.Set("billing_account", loggingExclusionId.resourceId); err != nil { + if err := d.Set("billing_account", loggingExclusionId.ResourceId); err != nil { return fmt.Errorf("Error setting billing_account: %s", err) } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_folder.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_folder.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_folder.go index 5dea1122c9..58d9dfcb2f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_folder.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_folder.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/logging/v2" ) @@ -13,7 +18,7 @@ var FolderLoggingExclusionSchema = map[string]*schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: optionalPrefixSuppress("folders/"), + DiffSuppressFunc: tpgresource.OptionalPrefixSuppress("folders/"), }, } @@ -21,13 +26,12 @@ type FolderLoggingExclusionUpdater struct { resourceType string resourceId string userAgent string - Config *Config + Config *transport_tpg.Config } -func NewFolderLoggingExclusionUpdater(d *schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) { - folder := parseFolderId(d.Get("folder")) - - userAgent, err := generateUserAgentString(d, config.UserAgent) +func NewFolderLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { + folder := resourcemanager.ParseFolderId(d.Get("folder")) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -40,8 +44,8 @@ func NewFolderLoggingExclusionUpdater(d *schema.ResourceData, config *Config) (R }, nil } -func FolderLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) +func FolderLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + loggingExclusionId, err := ParseLoggingExclusionId(d.Id()) if err != nil { return err } @@ -50,7 +54,7 @@ func FolderLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *Config) error return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) } - if err := d.Set("folder", loggingExclusionId.resourceId); err != nil { + if err := d.Set("folder", loggingExclusionId.ResourceId); err != nil { return fmt.Errorf("Error setting folder: %s", err) } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_organization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_organization.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_organization.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_organization.go index aff2153cec..938ce6aa7b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_organization.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_organization.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/logging/v2" ) @@ -20,13 +24,13 @@ type OrganizationLoggingExclusionUpdater struct { resourceType string resourceId string userAgent string - Config *Config + Config *transport_tpg.Config } -func NewOrganizationLoggingExclusionUpdater(d *schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) { +func NewOrganizationLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { organization := d.Get("org_id").(string) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -39,8 +43,8 @@ func NewOrganizationLoggingExclusionUpdater(d *schema.ResourceData, config *Conf }, nil } -func OrganizationLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) +func OrganizationLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + loggingExclusionId, err := ParseLoggingExclusionId(d.Id()) if err != nil { return err } @@ -49,7 +53,7 @@ func OrganizationLoggingExclusionIdParseFunc(d *schema.ResourceData, _ *Config) return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) } - if err := d.Set("org_id", loggingExclusionId.resourceId); err != nil { + if err := d.Set("org_id", loggingExclusionId.ResourceId); err != nil { return fmt.Errorf("Error setting org_id: %s", err) } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_project.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_project.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_project.go index 936f50af20..2674e3fef5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/logging_exclusion_project.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_exclusion_project.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/logging/v2" ) @@ -21,16 +25,16 @@ type ProjectLoggingExclusionUpdater struct { resourceType string resourceId string userAgent string - Config *Config + Config *transport_tpg.Config } -func NewProjectLoggingExclusionUpdater(d *schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) { - pid, err := getProject(d, config) +func NewProjectLoggingExclusionUpdater(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) { + pid, err := tpgresource.GetProject(d, config) if err != nil { return nil, err } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -43,8 +47,8 @@ func NewProjectLoggingExclusionUpdater(d *schema.ResourceData, config *Config) ( }, nil } -func ProjectLoggingExclusionIdParseFunc(d *schema.ResourceData, config *Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) +func ProjectLoggingExclusionIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + loggingExclusionId, err := ParseLoggingExclusionId(d.Id()) if err != nil { return err } @@ -53,8 +57,8 @@ func ProjectLoggingExclusionIdParseFunc(d *schema.ResourceData, config *Config) return fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) } - if config.Project != loggingExclusionId.resourceId { - if err := d.Set("project", loggingExclusionId.resourceId); err != nil { + if config.Project != loggingExclusionId.ResourceId { + if err := d.Set("project", loggingExclusionId.ResourceId); err != nil { return fmt.Errorf("Error setting project: %s", err) } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_operation.go new file mode 100644 index 0000000000..f1fded2bff --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_operation.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package logging + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type LoggingOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *LoggingOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.LoggingBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createLoggingWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*LoggingOperationWaiter, error) { + w := &LoggingOperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func LoggingOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + w, err := createLoggingWaiter(config, op, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func LoggingOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createLoggingWaiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_utils.go new file mode 100644 index 0000000000..7220ee8f22 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/logging_utils.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + "regexp" +) + +// LoggingSinkResourceTypes contains all the possible Stackdriver Logging resource types. Used to parse ids safely. +var LoggingSinkResourceTypes = []string{ + "billingAccounts", + "folders", + "organizations", + "projects", +} + +// LoggingSinkId represents the parts that make up the canonical id used within terraform for a logging resource. +type LoggingSinkId struct { + resourceType string + resourceId string + name string +} + +// loggingSinkIdRegex matches valid logging sink canonical ids +var loggingSinkIdRegex = regexp.MustCompile("(.+)/(.+)/sinks/(.+)") + +// canonicalId returns the LoggingSinkId as the canonical id used within terraform. +func (l LoggingSinkId) canonicalId() string { + return fmt.Sprintf("%s/%s/sinks/%s", l.resourceType, l.resourceId, l.name) +} + +// parent returns the "parent-level" resource that the sink is in (e.g. `folders/foo` for id `folders/foo/sinks/bar`) +func (l LoggingSinkId) parent() string { + return fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) +} + +// ParseLoggingSinkId parses a canonical id into a LoggingSinkId, or returns an error on failure. +func ParseLoggingSinkId(id string) (*LoggingSinkId, error) { + parts := loggingSinkIdRegex.FindStringSubmatch(id) + if parts == nil { + return nil, fmt.Errorf("unable to parse logging sink id %#v", id) + } + // If our resourceType is not a valid logging sink resource type, complain loudly + validLoggingSinkResourceType := false + for _, v := range LoggingSinkResourceTypes { + if v == parts[1] { + validLoggingSinkResourceType = true + break + } + } + + if !validLoggingSinkResourceType { + return nil, fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], + LoggingSinkResourceTypes) + } + return &LoggingSinkId{ + resourceType: parts[1], + resourceId: parts[2], + name: parts[3], + }, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_billing_account_bucket_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_billing_account_bucket_config.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_billing_account_bucket_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_billing_account_bucket_config.go index 3248311773..ea34ad2f3c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_billing_account_bucket_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_billing_account_bucket_config.go @@ -1,10 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) var loggingBillingAccountBucketConfigSchema = map[string]*schema.Schema{ @@ -16,7 +19,7 @@ var loggingBillingAccountBucketConfigSchema = map[string]*schema.Schema{ }, } -func billingAccountBucketConfigID(d *schema.ResourceData, config *Config) (string, error) { +func billingAccountBucketConfigID(d *schema.ResourceData, config *transport_tpg.Config) (string, error) { billingAccount := d.Get("billing_account").(string) location := d.Get("location").(string) bucketID := d.Get("bucket_id").(string) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_billing_account_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_billing_account_sink.go new file mode 100644 index 0000000000..818a2e2967 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_billing_account_sink.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceLoggingBillingAccountSink() *schema.Resource { + schm := &schema.Resource{ + Create: resourceLoggingBillingAccountSinkCreate, + Read: resourceLoggingBillingAccountSinkRead, + Delete: resourceLoggingBillingAccountSinkDelete, + Update: resourceLoggingBillingAccountSinkUpdate, + Schema: resourceLoggingSinkSchema(), + Importer: &schema.ResourceImporter{ + State: resourceLoggingSinkImportState("billing_account"), + }, + UseJSONNumber: true, + } + schm.Schema["billing_account"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The billing account exported to the sink.`, + } + return schm +} + +func resourceLoggingBillingAccountSinkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + id, sink := expandResourceLoggingSink(d, "billingAccounts", d.Get("billing_account").(string)) + + // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. + _, err = config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(true).Do() + if err != nil { + return err + } + + d.SetId(id.canonicalId()) + return resourceLoggingBillingAccountSinkRead(d, meta) +} + +func resourceLoggingBillingAccountSinkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + sink, err := config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Get(d.Id()).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Billing Logging Sink %s", d.Get("name").(string))) + } + + if err := flattenResourceLoggingSink(d, sink); err != nil { + return err + } + + return nil +} + +func resourceLoggingBillingAccountSinkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + sink, updateMask := expandResourceLoggingSinkForUpdate(d) + + // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. + _, err = config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Patch(d.Id(), sink). + UpdateMask(updateMask).UniqueWriterIdentity(true).Do() + if err != nil { + return err + } + + return resourceLoggingBillingAccountSinkRead(d, meta) +} + +func resourceLoggingBillingAccountSinkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + _, err = config.NewLoggingClient(userAgent).Projects.Sinks.Delete(d.Id()).Do() + if err != nil { + return err + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_bucket_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_bucket_config.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_bucket_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_bucket_config.go index e14465daa9..8cdcfd0220 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_bucket_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_bucket_config.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" @@ -7,6 +9,8 @@ import ( "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) var loggingBucketConfigSchema = map[string]*schema.Schema{ @@ -88,7 +92,7 @@ See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/ro }, } -type loggingBucketConfigIDFunc func(d *schema.ResourceData, config *Config) (string, error) +type loggingBucketConfigIDFunc func(d *schema.ResourceData, config *transport_tpg.Config) (string, error) // ResourceLoggingBucketConfig creates a resource definition by merging a unique field (eg: folder) to a generic logging bucket // config resource. In practice the only difference between these resources is the url location. @@ -101,7 +105,7 @@ func ResourceLoggingBucketConfig(parentType string, parentSpecificSchema map[str Importer: &schema.ResourceImporter{ State: resourceLoggingBucketConfigImportState(parentType), }, - Schema: mergeSchemas(loggingBucketConfigSchema, parentSpecificSchema), + Schema: tpgresource.MergeSchemas(loggingBucketConfigSchema, parentSpecificSchema), UseJSONNumber: true, } } @@ -116,11 +120,11 @@ func resourceLoggingBucketConfigImportState(parent string) schema.StateFunc { } if len(parts) != 5 { - return nil, fmt.Errorf("Invalid id format. Format should be '{{parent}}/{{parent_id}}/locations/{{location}}/buckets/{{bucket_id}} with parent in %s", loggingSinkResourceTypes) + return nil, fmt.Errorf("Invalid id format. Format should be '{{parent}}/{{parent_id}}/locations/{{location}}/buckets/{{bucket_id}} with parent in %s", LoggingSinkResourceTypes) } validLoggingType := false - for _, v := range loggingSinkResourceTypes { + for _, v := range LoggingSinkResourceTypes { if v == parts[1] { validLoggingType = true break @@ -128,7 +132,7 @@ func resourceLoggingBucketConfigImportState(parent string) schema.StateFunc { } if !validLoggingType { return nil, fmt.Errorf("Logging parent type %s is not valid. Valid resource types: %#v", parts[1], - loggingSinkResourceTypes) + LoggingSinkResourceTypes) } if err := d.Set(parent, parts[1]+"/"+parts[2]); err != nil { @@ -149,8 +153,8 @@ func resourceLoggingBucketConfigImportState(parent string) schema.StateFunc { func resourceLoggingBucketConfigAcquireOrCreate(parentType string, iDFunc loggingBucketConfigIDFunc) func(*schema.ResourceData, interface{}) error { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -164,12 +168,17 @@ func resourceLoggingBucketConfigAcquireOrCreate(parentType string, iDFunc loggin //logging bucket can be created only at the project level, in future api may allow for folder, org and other parent resources log.Printf("[DEBUG] Fetching logging bucket config: %#v", id) - url, err := replaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", id)) + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", id)) if err != nil { return err } - res, _ := SendRequest(config, "GET", "", url, userAgent, nil) + res, _ := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) if res == nil { log.Printf("[DEGUG] Loggin Bucket not exist %s", id) // we need to pass the id in here because we don't want to set it in state @@ -185,8 +194,8 @@ func resourceLoggingBucketConfigAcquireOrCreate(parentType string, iDFunc loggin } func resourceLoggingBucketConfigCreate(d *schema.ResourceData, meta interface{}, id string) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -195,10 +204,9 @@ func resourceLoggingBucketConfigCreate(d *schema.ResourceData, meta interface{}, obj["name"] = d.Get("name") obj["description"] = d.Get("description") obj["retentionDays"] = d.Get("retention_days") - obj["locked"] = d.Get("locked") obj["cmekSettings"] = expandCmekSettings(d.Get("cmek_settings")) - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/locations/{{location}}/buckets?bucketId={{bucket_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/locations/{{location}}/buckets?bucketId={{bucket_id}}") if err != nil { return err } @@ -206,18 +214,26 @@ func resourceLoggingBucketConfigCreate(d *schema.ResourceData, meta interface{}, log.Printf("[DEBUG] Creating new Bucket: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Bucket: %s", err) } @@ -230,20 +246,25 @@ func resourceLoggingBucketConfigCreate(d *schema.ResourceData, meta interface{}, } func resourceLoggingBucketConfigRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } log.Printf("[DEBUG] Fetching logging bucket config: %#v", d.Id()) - url, err := replaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) if err != nil { return err } - res, err := SendRequest(config, "GET", "", url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) if err != nil { log.Printf("[WARN] Unable to acquire logging bucket config at %s", d.Id()) @@ -272,15 +293,15 @@ func resourceLoggingBucketConfigRead(d *schema.ResourceData, meta interface{}) e } func resourceLoggingBucketConfigUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } obj := make(map[string]interface{}) - url, err := replaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) if err != nil { return err } @@ -299,11 +320,18 @@ func resourceLoggingBucketConfigUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("cmek_settings") { updateMask = append(updateMask, "cmekSettings") } - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } - _, err = SendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Logging Bucket Config %q: %s", d.Id(), err) } @@ -321,16 +349,22 @@ func resourceLoggingBucketConfigDelete(d *schema.ResourceData, meta interface{}) } } - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) if err != nil { return err } - if _, err := SendRequestWithTimeout(config, "DELETE", "", url, userAgent, nil, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutUpdate), + }); err != nil { return fmt.Errorf("Error deleting Logging Bucket Config %q: %s", d.Id(), err) } return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_exclusion.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_exclusion.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_exclusion.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_exclusion.go index 961893ef3e..bd6a508f83 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_exclusion.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_exclusion.go @@ -1,10 +1,16 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "regexp" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/logging/v2" ) @@ -33,7 +39,7 @@ var LoggingExclusionBaseSchema = map[string]*schema.Schema{ }, } -func ResourceLoggingExclusion(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceLoggingExclusionUpdaterFunc, resourceIdParser resourceIdParserFunc) *schema.Resource { +func ResourceLoggingExclusion(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceLoggingExclusionUpdaterFunc, resourceIdParser tpgiamresource.ResourceIdParserFunc) *schema.Resource { return &schema.Resource{ Create: resourceLoggingExclusionCreate(newUpdaterFunc), Read: resourceLoggingExclusionRead(newUpdaterFunc), @@ -44,14 +50,14 @@ func ResourceLoggingExclusion(parentSpecificSchema map[string]*schema.Schema, ne State: resourceLoggingExclusionImportState(resourceIdParser), }, - Schema: mergeSchemas(LoggingExclusionBaseSchema, parentSpecificSchema), + Schema: tpgresource.MergeSchemas(LoggingExclusionBaseSchema, parentSpecificSchema), UseJSONNumber: true, } } func resourceLoggingExclusionCreate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.CreateFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { return err @@ -61,8 +67,8 @@ func resourceLoggingExclusionCreate(newUpdaterFunc newResourceLoggingExclusionUp // Logging exclusions don't seem to be able to be mutated in parallel, see // https://github.com/hashicorp/terraform-provider-google/issues/4796 - mutexKV.Lock(id.parent()) - defer mutexKV.Unlock(id.parent()) + transport_tpg.MutexStore.Lock(id.parent()) + defer transport_tpg.MutexStore.Unlock(id.parent()) err = updater.CreateLoggingExclusion(id.parent(), exclusion) if err != nil { @@ -77,7 +83,7 @@ func resourceLoggingExclusionCreate(newUpdaterFunc newResourceLoggingExclusionUp func resourceLoggingExclusionRead(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.ReadFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { return err @@ -86,7 +92,7 @@ func resourceLoggingExclusionRead(newUpdaterFunc newResourceLoggingExclusionUpda exclusion, err := updater.ReadLoggingExclusion(d.Id()) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Logging Exclusion %s", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Logging Exclusion %s", d.Get("name").(string))) } if err := flattenResourceLoggingExclusion(d, exclusion); err != nil { @@ -105,7 +111,7 @@ func resourceLoggingExclusionRead(newUpdaterFunc newResourceLoggingExclusionUpda func resourceLoggingExclusionUpdate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.UpdateFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { return err @@ -116,8 +122,8 @@ func resourceLoggingExclusionUpdate(newUpdaterFunc newResourceLoggingExclusionUp // Logging exclusions don't seem to be able to be mutated in parallel, see // https://github.com/hashicorp/terraform-provider-google/issues/4796 - mutexKV.Lock(id.parent()) - defer mutexKV.Unlock(id.parent()) + transport_tpg.MutexStore.Lock(id.parent()) + defer transport_tpg.MutexStore.Unlock(id.parent()) err = updater.UpdateLoggingExclusion(d.Id(), exclusion, updateMask) if err != nil { @@ -130,7 +136,7 @@ func resourceLoggingExclusionUpdate(newUpdaterFunc newResourceLoggingExclusionUp func resourceLoggingExclusionDelete(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.DeleteFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { return err @@ -139,8 +145,8 @@ func resourceLoggingExclusionDelete(newUpdaterFunc newResourceLoggingExclusionUp id, _ := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) // Logging exclusions don't seem to be able to be mutated in parallel, see // https://github.com/hashicorp/terraform-provider-google/issues/4796 - mutexKV.Lock(id.parent()) - defer mutexKV.Unlock(id.parent()) + transport_tpg.MutexStore.Lock(id.parent()) + defer transport_tpg.MutexStore.Unlock(id.parent()) err = updater.DeleteLoggingExclusion(d.Id()) if err != nil { @@ -152,9 +158,9 @@ func resourceLoggingExclusionDelete(newUpdaterFunc newResourceLoggingExclusionUp } } -func resourceLoggingExclusionImportState(resourceIdParser resourceIdParserFunc) schema.StateFunc { +func resourceLoggingExclusionImportState(resourceIdParser tpgiamresource.ResourceIdParserFunc) schema.StateFunc { return func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) err := resourceIdParser(d, config) if err != nil { return nil, err @@ -163,10 +169,10 @@ func resourceLoggingExclusionImportState(resourceIdParser resourceIdParserFunc) } } -func expandResourceLoggingExclusion(d *schema.ResourceData, resourceType, resourceId string) (LoggingExclusionId, *logging.LogExclusion) { +func expandResourceLoggingExclusion(d *schema.ResourceData, resourceType, ResourceId string) (LoggingExclusionId, *logging.LogExclusion) { id := LoggingExclusionId{ resourceType: resourceType, - resourceId: resourceId, + ResourceId: ResourceId, name: d.Get("name").(string), } @@ -244,7 +250,7 @@ type ResourceLoggingExclusionUpdater interface { DescribeResource() string } -type newResourceLoggingExclusionUpdaterFunc func(d *schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) +type newResourceLoggingExclusionUpdaterFunc func(d *schema.ResourceData, config *transport_tpg.Config) (ResourceLoggingExclusionUpdater, error) // loggingExclusionResourceTypes contains all the possible Stackdriver Logging resource types. Used to parse ids safely. var loggingExclusionResourceTypes = []string{ @@ -257,7 +263,7 @@ var loggingExclusionResourceTypes = []string{ // LoggingExclusionId represents the parts that make up the canonical id used within terraform for a logging resource. type LoggingExclusionId struct { resourceType string - resourceId string + ResourceId string name string } @@ -266,16 +272,16 @@ var loggingExclusionIdRegex = regexp.MustCompile("(.+)/(.+)/exclusions/(.+)") // canonicalId returns the LoggingExclusionId as the canonical id used within terraform. func (l LoggingExclusionId) canonicalId() string { - return fmt.Sprintf("%s/%s/exclusions/%s", l.resourceType, l.resourceId, l.name) + return fmt.Sprintf("%s/%s/exclusions/%s", l.resourceType, l.ResourceId, l.name) } // parent returns the "parent-level" resource that the exclusion is in (e.g. `folders/foo` for id `folders/foo/exclusions/bar`) func (l LoggingExclusionId) parent() string { - return fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) + return fmt.Sprintf("%s/%s", l.resourceType, l.ResourceId) } -// parseLoggingExclusionId parses a canonical id into a LoggingExclusionId, or returns an error on failure. -func parseLoggingExclusionId(id string) (*LoggingExclusionId, error) { +// ParseLoggingExclusionId parses a canonical id into a LoggingExclusionId, or returns an error on failure. +func ParseLoggingExclusionId(id string) (*LoggingExclusionId, error) { parts := loggingExclusionIdRegex.FindStringSubmatch(id) if parts == nil { return nil, fmt.Errorf("unable to parse logging exclusion id %#v", id) @@ -295,7 +301,7 @@ func parseLoggingExclusionId(id string) (*LoggingExclusionId, error) { } return &LoggingExclusionId{ resourceType: parts[1], - resourceId: parts[2], + ResourceId: parts[2], name: parts[3], }, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_folder_bucket_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_folder_bucket_config.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_folder_bucket_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_folder_bucket_config.go index 1efc03dc4b..9ab225303b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_folder_bucket_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_folder_bucket_config.go @@ -1,10 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) var loggingFolderBucketConfigSchema = map[string]*schema.Schema{ @@ -16,7 +19,7 @@ var loggingFolderBucketConfigSchema = map[string]*schema.Schema{ }, } -func folderBucketConfigID(d *schema.ResourceData, config *Config) (string, error) { +func folderBucketConfigID(d *schema.ResourceData, config *transport_tpg.Config) (string, error) { folder := d.Get("folder").(string) location := d.Get("location").(string) bucketID := d.Get("bucket_id").(string) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_folder_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_folder_sink.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_folder_sink.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_folder_sink.go index a37013046d..8df1066da2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_folder_sink.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_folder_sink.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceLoggingFolderSink() *schema.Resource { @@ -40,13 +45,13 @@ func ResourceLoggingFolderSink() *schema.Resource { } func resourceLoggingFolderSinkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - folder := parseFolderId(d.Get("folder")) + folder := resourcemanager.ParseFolderId(d.Get("folder")) id, sink := expandResourceLoggingSink(d, "folders", folder) sink.IncludeChildren = d.Get("include_children").(bool) @@ -61,15 +66,15 @@ func resourceLoggingFolderSinkCreate(d *schema.ResourceData, meta interface{}) e } func resourceLoggingFolderSinkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } sink, err := config.NewLoggingClient(userAgent).Folders.Sinks.Get(d.Id()).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Folder Logging Sink %s", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Folder Logging Sink %s", d.Get("name").(string))) } if err := flattenResourceLoggingSink(d, sink); err != nil { @@ -84,8 +89,8 @@ func resourceLoggingFolderSinkRead(d *schema.ResourceData, meta interface{}) err } func resourceLoggingFolderSinkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -107,8 +112,8 @@ func resourceLoggingFolderSinkUpdate(d *schema.ResourceData, meta interface{}) e } func resourceLoggingFolderSinkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_linked_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_linked_dataset.go new file mode 100644 index 0000000000..ad850aa979 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_linked_dataset.go @@ -0,0 +1,381 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package logging + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceLoggingLinkedDataset() *schema.Resource { + return &schema.Resource{ + Create: resourceLoggingLinkedDatasetCreate, + Read: resourceLoggingLinkedDatasetRead, + Delete: resourceLoggingLinkedDatasetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceLoggingLinkedDatasetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The bucket to which the linked dataset is attached.`, + }, + "link_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The id of the linked dataset.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Describes this link. The maximum length of the description is 8000 characters.`, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The location of the linked dataset.`, + }, + "parent": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The parent of the linked dataset.`, + }, + "bigquery_dataset": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `The information of a BigQuery Dataset. When a link is created, a BigQuery dataset is created along +with it, in the same project as the LogBucket it's linked to. This dataset will also have BigQuery +Views corresponding to the LogViews in the bucket.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The full resource name of the BigQuery dataset. The DATASET_ID will match the ID +of the link, so the link must match the naming restrictions of BigQuery datasets +(alphanumeric characters and underscores only). The dataset will have a resource path of +"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET_ID]"`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The creation timestamp of the link. A timestamp in RFC3339 UTC "Zulu" format, +with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" +and "2014-10-02T15:01:23.045123456Z".`, + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The linked dataset lifecycle state.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the linked dataset. The name can have up to 100 characters. A valid link id +(at the end of the link name) must only have alphanumeric characters and underscores within it.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceLoggingLinkedDatasetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandLoggingLinkedDatasetDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + obj, err = resourceLoggingLinkedDatasetEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/buckets/{{bucket}}/links?linkId={{link_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new LinkedDataset: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating LinkedDataset: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/buckets/{{bucket}}/links/{{link_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = LoggingOperationWaitTimeWithResponse( + config, res, &opRes, "Creating LinkedDataset", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create LinkedDataset: %s", err) + } + + if err := d.Set("name", flattenLoggingLinkedDatasetName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/buckets/{{bucket}}/links/{{link_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating LinkedDataset %q: %#v", d.Id(), res) + + return resourceLoggingLinkedDatasetRead(d, meta) +} + +func resourceLoggingLinkedDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/buckets/{{bucket}}/links/{{link_id}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("LoggingLinkedDataset %q", d.Id())) + } + + if err := d.Set("name", flattenLoggingLinkedDatasetName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading LinkedDataset: %s", err) + } + if err := d.Set("description", flattenLoggingLinkedDatasetDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading LinkedDataset: %s", err) + } + if err := d.Set("create_time", flattenLoggingLinkedDatasetCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading LinkedDataset: %s", err) + } + if err := d.Set("lifecycle_state", flattenLoggingLinkedDatasetLifecycleState(res["lifecycleState"], d, config)); err != nil { + return fmt.Errorf("Error reading LinkedDataset: %s", err) + } + if err := d.Set("bigquery_dataset", flattenLoggingLinkedDatasetBigqueryDataset(res["bigqueryDataset"], d, config)); err != nil { + return fmt.Errorf("Error reading LinkedDataset: %s", err) + } + + return nil +} + +func resourceLoggingLinkedDatasetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/buckets/{{bucket}}/links/{{link_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting LinkedDataset %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "LinkedDataset") + } + + err = LoggingOperationWaitTime( + config, res, "Deleting LinkedDataset", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting LinkedDataset %q: %#v", d.Id(), res) + return nil +} + +func resourceLoggingLinkedDatasetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/locations/(?P[^/]+)/buckets/(?P[^/]+)/links/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/buckets/{{bucket}}/links/{{link_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenLoggingLinkedDatasetName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLinkedDatasetDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLinkedDatasetCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLinkedDatasetLifecycleState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLinkedDatasetBigqueryDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenLoggingLinkedDatasetBigqueryDatasetDatasetId(original["datasetId"], d, config) + return []interface{}{transformed} +} +func flattenLoggingLinkedDatasetBigqueryDatasetDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandLoggingLinkedDatasetDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceLoggingLinkedDatasetEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Extract any empty fields from the bucket field. + parent := d.Get("parent").(string) + bucket := d.Get("bucket").(string) + parent, err := ExtractFieldByPattern("parent", parent, bucket, "((projects|folders|organizations|billingAccounts)/[a-z0-9A-Z-]*)/locations/.*") + if err != nil { + return nil, fmt.Errorf("error extracting parent field: %s", err) + } + location := d.Get("location").(string) + location, err = ExtractFieldByPattern("location", location, bucket, "[a-zA-Z]*/[a-z0-9A-Z-]*/locations/([a-z0-9-]*)/buckets/.*") + if err != nil { + return nil, fmt.Errorf("error extracting location field: %s", err) + } + // Set parent to the extracted value. + d.Set("parent", parent) + // Set all the other fields to their short forms before forming url and setting ID. + bucket = tpgresource.GetResourceNameFromSelfLink(bucket) + name := d.Get("name").(string) + name = tpgresource.GetResourceNameFromSelfLink(name) + d.Set("location", location) + d.Set("bucket", bucket) + d.Set("name", name) + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_view.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_view.go new file mode 100644 index 0000000000..56cda50294 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_log_view.go @@ -0,0 +1,403 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package logging + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceLoggingLogView() *schema.Resource { + return &schema.Resource{ + Create: resourceLoggingLogViewCreate, + Read: resourceLoggingLogViewRead, + Update: resourceLoggingLogViewUpdate, + Delete: resourceLoggingLogViewDelete, + + Importer: &schema.ResourceImporter{ + State: resourceLoggingLogViewImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The bucket of the resource`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `The resource name of the view. For example: \'projects/my-project/locations/global/buckets/my-bucket/views/my-view\'`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Describes this view.`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `Filter that restricts which log entries in a bucket are visible in this view. Filters are restricted to be a logical AND of ==/!= of any of the following: - originating project/folder/organization/billing account. - resource type - log id For example: SOURCE("projects/myproject") AND resource.type = "gce_instance" AND LOG_ID("stdout")`, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The location of the resource. The supported locations are: global, us-central1, us-east1, us-west1, asia-east1, europe-west1.`, + }, + "parent": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The parent of the resource.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The creation timestamp of the view.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The last update timestamp of the view.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceLoggingLogViewCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandLoggingLogViewName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandLoggingLogViewDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandLoggingLogViewFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + obj, err = resourceLoggingLogViewEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/buckets/{{bucket}}/views?viewId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new LogView: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating LogView: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating LogView %q: %#v", d.Id(), res) + + return resourceLoggingLogViewRead(d, meta) +} + +func resourceLoggingLogViewRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("LoggingLogView %q", d.Id())) + } + + if err := d.Set("description", flattenLoggingLogViewDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading LogView: %s", err) + } + if err := d.Set("create_time", flattenLoggingLogViewCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading LogView: %s", err) + } + if err := d.Set("update_time", flattenLoggingLogViewUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading LogView: %s", err) + } + if err := d.Set("filter", flattenLoggingLogViewFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading LogView: %s", err) + } + + return nil +} + +func resourceLoggingLogViewUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandLoggingLogViewDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandLoggingLogViewFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + obj, err = resourceLoggingLogViewEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating LogView %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating LogView %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating LogView %q: %#v", d.Id(), res) + } + + return resourceLoggingLogViewRead(d, meta) +} + +func resourceLoggingLogViewDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting LogView %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "LogView") + } + + log.Printf("[DEBUG] Finished deleting LogView %q: %#v", d.Id(), res) + return nil +} + +func resourceLoggingLogViewImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/locations/(?P[^/]+)/buckets/(?P[^/]+)/views/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenLoggingLogViewDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLogViewCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLogViewUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingLogViewFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandLoggingLogViewName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingLogViewDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingLogViewFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceLoggingLogViewEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Extract any empty fields from the bucket field. + parent := d.Get("parent").(string) + bucket := d.Get("bucket").(string) + parent, err := ExtractFieldByPattern("parent", parent, bucket, "((projects|folders|organizations|billingAccounts)/[a-z0-9A-Z-]*)/locations/.*") + if err != nil { + return nil, fmt.Errorf("error extracting parent field: %s", err) + } + location := d.Get("location").(string) + location, err = ExtractFieldByPattern("location", location, bucket, "[a-zA-Z]*/[a-z0-9A-Z-]*/locations/([a-z0-9-]*)/buckets/.*") + if err != nil { + return nil, fmt.Errorf("error extracting location field: %s", err) + } + // Set parent to the extracted value. + d.Set("parent", parent) + // Set all the other fields to their short forms before forming url and setting ID. + bucket = tpgresource.GetResourceNameFromSelfLink(bucket) + name := d.Get("name").(string) + name = tpgresource.GetResourceNameFromSelfLink(name) + d.Set("location", location) + d.Set("bucket", bucket) + d.Set("name", name) + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_metric.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_metric.go new file mode 100644 index 0000000000..379a499060 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_metric.go @@ -0,0 +1,1171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package logging + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceLoggingMetric() *schema.Resource { + return &schema.Resource{ + Create: resourceLoggingMetricCreate, + Read: resourceLoggingMetricRead, + Update: resourceLoggingMetricUpdate, + Delete: resourceLoggingMetricDelete, + + Importer: &schema.ResourceImporter{ + State: resourceLoggingMetricImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `An advanced logs filter (https://cloud.google.com/logging/docs/view/advanced-filters) which +is used to match log entries.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `The client-assigned metric identifier. Examples - "error_count", "nginx/requests". +Metric identifiers are limited to 100 characters and can include only the following +characters A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash +character (/) denotes a hierarchy of name pieces, and it cannot be the first character +of the name.`, + }, + "bucket_name": { + Type: schema.TypeString, + Optional: true, + Description: `The resource name of the Log Bucket that owns the Log Metric. Only Log Buckets in projects +are supported. The bucket has to be in the same project as the metric.`, + }, + "bucket_options": { + Type: schema.TypeList, + Optional: true, + Description: `The bucketOptions are required when the logs-based metric is using a DISTRIBUTION value type and it +describes the bucket boundaries used to create a histogram of the extracted values.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explicit_buckets": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies a set of buckets with arbitrary widths.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bounds": { + Type: schema.TypeList, + Required: true, + Description: `The values must be monotonically increasing.`, + Elem: &schema.Schema{ + Type: schema.TypeFloat, + }, + }, + }, + }, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, + }, + "exponential_buckets": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies an exponential sequence of buckets that have a width that is proportional to the value of +the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "growth_factor": { + Type: schema.TypeFloat, + Optional: true, + Description: `Must be greater than 1.`, + AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, + }, + "num_finite_buckets": { + Type: schema.TypeInt, + Optional: true, + Description: `Must be greater than 0.`, + AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, + }, + "scale": { + Type: schema.TypeFloat, + Optional: true, + Description: `Must be greater than 0.`, + AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, + }, + }, + }, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, + }, + "linear_buckets": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). +Each bucket represents a constant absolute uncertainty on the specific value in the bucket.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "num_finite_buckets": { + Type: schema.TypeInt, + Optional: true, + Description: `Must be greater than 0.`, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, + }, + "offset": { + Type: schema.TypeFloat, + Optional: true, + Description: `Lower bound of the first bucket.`, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, + }, + "width": { + Type: schema.TypeFloat, + Optional: true, + Description: `Must be greater than 0.`, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, + }, + }, + }, + AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of this metric, which is used in documentation. The maximum length of the +description is 8000 characters.`, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to True, then this metric is disabled and it does not generate any points.`, + }, + "label_extractors": { + Type: schema.TypeMap, + Optional: true, + Description: `A map from a label key string to an extractor expression which is used to extract data from a log +entry field and assign as the label value. Each label key specified in the LabelDescriptor must +have an associated extractor expression in this map. The syntax of the extractor expression is +the same as for the valueExtractor field.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "metric_descriptor": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `The optional metric descriptor associated with the logs-based metric. +If unspecified, it uses a default metric descriptor with a DELTA metric kind, +INT64 value type, with no labels and a unit of "1". Such a metric counts the +number of log entries matching the filter expression.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_kind": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"DELTA", "GAUGE", "CUMULATIVE"}), + Description: `Whether the metric records instantaneous values, changes to a value, etc. +Some combinations of metricKind and valueType might not be supported. +For counter metrics, set this to DELTA. Possible values: ["DELTA", "GAUGE", "CUMULATIVE"]`, + }, + "value_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION", "MONEY"}), + Description: `Whether the measurement is an integer, a floating-point number, etc. +Some combinations of metricKind and valueType might not be supported. +For counter metrics, set this to INT64. Possible values: ["BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION", "MONEY"]`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `A concise name for the metric, which can be displayed in user interfaces. Use sentence case +without an ending period, for example "Request count". This field is optional but it is +recommended to be set for any metrics associated with user-visible concepts, such as Quota.`, + }, + "labels": { + Type: schema.TypeSet, + Optional: true, + Description: `The set of labels that can be used to describe a specific instance of this metric type. For +example, the appengine.googleapis.com/http/server/response_latencies metric type has a label +for the HTTP response code, response_code, so you can look at latencies for successful responses +or just for responses that failed.`, + Elem: loggingMetricMetricDescriptorLabelsSchema(), + // Default schema.HashSchema is used. + }, + "unit": { + Type: schema.TypeString, + Optional: true, + Description: `The unit in which the metric value is reported. It is only applicable if the valueType is +'INT64', 'DOUBLE', or 'DISTRIBUTION'. The supported units are a subset of +[The Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) standard`, + Default: "1", + }, + }, + }, + }, + "value_extractor": { + Type: schema.TypeString, + Optional: true, + Description: `A valueExtractor is required when using a distribution logs-based metric to extract the values to +record from a log entry. Two functions are supported for value extraction - EXTRACT(field) or +REGEXP_EXTRACT(field, regex). The argument are 1. field - The name of the log entry field from which +the value is to be extracted. 2. regex - A regular expression using the Google RE2 syntax +(https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified +log entry field. The value of the field is converted to a string before applying the regex. It is an +error to specify a regex that does not include exactly one capture group.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func loggingMetricMetricDescriptorLabelsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The label key.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description for the label.`, + }, + "value_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"BOOL", "INT64", "STRING", ""}), + Description: `The type of data that can be assigned to the label. Default value: "STRING" Possible values: ["BOOL", "INT64", "STRING"]`, + Default: "STRING", + }, + }, + } +} + +func resourceLoggingMetricCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandLoggingMetricName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandLoggingMetricDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + bucketNameProp, err := expandLoggingMetricBucketName(d.Get("bucket_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketNameProp)) && (ok || !reflect.DeepEqual(v, bucketNameProp)) { + obj["bucketName"] = bucketNameProp + } + disabledProp, err := expandLoggingMetricDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(disabledProp)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + filterProp, err := expandLoggingMetricFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + metricDescriptorProp, err := expandLoggingMetricMetricDescriptor(d.Get("metric_descriptor"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metric_descriptor"); !tpgresource.IsEmptyValue(reflect.ValueOf(metricDescriptorProp)) && (ok || !reflect.DeepEqual(v, metricDescriptorProp)) { + obj["metricDescriptor"] = metricDescriptorProp + } + labelExtractorsProp, err := expandLoggingMetricLabelExtractors(d.Get("label_extractors"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_extractors"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelExtractorsProp)) && (ok || !reflect.DeepEqual(v, labelExtractorsProp)) { + obj["labelExtractors"] = labelExtractorsProp + } + valueExtractorProp, err := expandLoggingMetricValueExtractor(d.Get("value_extractor"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("value_extractor"); !tpgresource.IsEmptyValue(reflect.ValueOf(valueExtractorProp)) && (ok || !reflect.DeepEqual(v, valueExtractorProp)) { + obj["valueExtractor"] = valueExtractorProp + } + bucketOptionsProp, err := expandLoggingMetricBucketOptions(d.Get("bucket_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketOptionsProp)) && (ok || !reflect.DeepEqual(v, bucketOptionsProp)) { + obj["bucketOptions"] = bucketOptionsProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "customMetric/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Metric: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Metric: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Metric: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating Metric %q: %#v", d.Id(), res) + + return resourceLoggingMetricRead(d, meta) +} + +func resourceLoggingMetricRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Metric: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("LoggingMetric %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + + if err := d.Set("name", flattenLoggingMetricName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + if err := d.Set("description", flattenLoggingMetricDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + if err := d.Set("bucket_name", flattenLoggingMetricBucketName(res["bucketName"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + if err := d.Set("disabled", flattenLoggingMetricDisabled(res["disabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + if err := d.Set("filter", flattenLoggingMetricFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + if err := d.Set("metric_descriptor", flattenLoggingMetricMetricDescriptor(res["metricDescriptor"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + if err := d.Set("label_extractors", flattenLoggingMetricLabelExtractors(res["labelExtractors"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + if err := d.Set("value_extractor", flattenLoggingMetricValueExtractor(res["valueExtractor"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + if err := d.Set("bucket_options", flattenLoggingMetricBucketOptions(res["bucketOptions"], d, config)); err != nil { + return fmt.Errorf("Error reading Metric: %s", err) + } + + return nil +} + +func resourceLoggingMetricUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Metric: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandLoggingMetricName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandLoggingMetricDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + bucketNameProp, err := expandLoggingMetricBucketName(d.Get("bucket_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketNameProp)) { + obj["bucketName"] = bucketNameProp + } + disabledProp, err := expandLoggingMetricDisabled(d.Get("disabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disabledProp)) { + obj["disabled"] = disabledProp + } + filterProp, err := expandLoggingMetricFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + metricDescriptorProp, err := expandLoggingMetricMetricDescriptor(d.Get("metric_descriptor"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metric_descriptor"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metricDescriptorProp)) { + obj["metricDescriptor"] = metricDescriptorProp + } + labelExtractorsProp, err := expandLoggingMetricLabelExtractors(d.Get("label_extractors"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_extractors"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelExtractorsProp)) { + obj["labelExtractors"] = labelExtractorsProp + } + valueExtractorProp, err := expandLoggingMetricValueExtractor(d.Get("value_extractor"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("value_extractor"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, valueExtractorProp)) { + obj["valueExtractor"] = valueExtractorProp + } + bucketOptionsProp, err := expandLoggingMetricBucketOptions(d.Get("bucket_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketOptionsProp)) { + obj["bucketOptions"] = bucketOptionsProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "customMetric/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Metric %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Metric %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Metric %q: %#v", d.Id(), res) + } + + return resourceLoggingMetricRead(d, meta) +} + +func resourceLoggingMetricDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Metric: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "customMetric/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Metric %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Metric") + } + + log.Printf("[DEBUG] Finished deleting Metric %q: %#v", d.Id(), res) + return nil +} + +func resourceLoggingMetricImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenLoggingMetricName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricBucketName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricMetricDescriptor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["unit"] = + flattenLoggingMetricMetricDescriptorUnit(original["unit"], d, config) + transformed["value_type"] = + flattenLoggingMetricMetricDescriptorValueType(original["valueType"], d, config) + transformed["metric_kind"] = + flattenLoggingMetricMetricDescriptorMetricKind(original["metricKind"], d, config) + transformed["labels"] = + flattenLoggingMetricMetricDescriptorLabels(original["labels"], d, config) + transformed["display_name"] = + flattenLoggingMetricMetricDescriptorDisplayName(original["displayName"], d, config) + return []interface{}{transformed} +} +func flattenLoggingMetricMetricDescriptorUnit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricMetricDescriptorValueType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricMetricDescriptorMetricKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricMetricDescriptorLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(loggingMetricMetricDescriptorLabelsSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "key": flattenLoggingMetricMetricDescriptorLabelsKey(original["key"], d, config), + "description": flattenLoggingMetricMetricDescriptorLabelsDescription(original["description"], d, config), + "value_type": flattenLoggingMetricMetricDescriptorLabelsValueType(original["valueType"], d, config), + }) + } + return transformed +} +func flattenLoggingMetricMetricDescriptorLabelsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricMetricDescriptorLabelsDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricMetricDescriptorLabelsValueType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "STRING" + } + + return v +} + +func flattenLoggingMetricMetricDescriptorDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricLabelExtractors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricValueExtractor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricBucketOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["linear_buckets"] = + flattenLoggingMetricBucketOptionsLinearBuckets(original["linearBuckets"], d, config) + transformed["exponential_buckets"] = + flattenLoggingMetricBucketOptionsExponentialBuckets(original["exponentialBuckets"], d, config) + transformed["explicit_buckets"] = + flattenLoggingMetricBucketOptionsExplicitBuckets(original["explicitBuckets"], d, config) + return []interface{}{transformed} +} +func flattenLoggingMetricBucketOptionsLinearBuckets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["num_finite_buckets"] = + flattenLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(original["numFiniteBuckets"], d, config) + transformed["width"] = + flattenLoggingMetricBucketOptionsLinearBucketsWidth(original["width"], d, config) + transformed["offset"] = + flattenLoggingMetricBucketOptionsLinearBucketsOffset(original["offset"], d, config) + return []interface{}{transformed} +} +func flattenLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLoggingMetricBucketOptionsLinearBucketsWidth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricBucketOptionsLinearBucketsOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricBucketOptionsExponentialBuckets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["num_finite_buckets"] = + flattenLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(original["numFiniteBuckets"], d, config) + transformed["growth_factor"] = + flattenLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(original["growthFactor"], d, config) + transformed["scale"] = + flattenLoggingMetricBucketOptionsExponentialBucketsScale(original["scale"], d, config) + return []interface{}{transformed} +} +func flattenLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricBucketOptionsExponentialBucketsScale(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLoggingMetricBucketOptionsExplicitBuckets(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bounds"] = + flattenLoggingMetricBucketOptionsExplicitBucketsBounds(original["bounds"], d, config) + return []interface{}{transformed} +} +func flattenLoggingMetricBucketOptionsExplicitBucketsBounds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandLoggingMetricName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricMetricDescriptor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUnit, err := expandLoggingMetricMetricDescriptorUnit(original["unit"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnit); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unit"] = transformedUnit + } + + transformedValueType, err := expandLoggingMetricMetricDescriptorValueType(original["value_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValueType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["valueType"] = transformedValueType + } + + transformedMetricKind, err := expandLoggingMetricMetricDescriptorMetricKind(original["metric_kind"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetricKind); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metricKind"] = transformedMetricKind + } + + transformedLabels, err := expandLoggingMetricMetricDescriptorLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedDisplayName, err := expandLoggingMetricMetricDescriptorDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + return transformed, nil +} + +func expandLoggingMetricMetricDescriptorUnit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricMetricDescriptorValueType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricMetricDescriptorMetricKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricMetricDescriptorLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandLoggingMetricMetricDescriptorLabelsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedDescription, err := expandLoggingMetricMetricDescriptorLabelsDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedValueType, err := expandLoggingMetricMetricDescriptorLabelsValueType(original["value_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValueType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["valueType"] = transformedValueType + } + + req = append(req, transformed) + } + return req, nil +} + +func expandLoggingMetricMetricDescriptorLabelsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricMetricDescriptorLabelsDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricMetricDescriptorLabelsValueType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricMetricDescriptorDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricLabelExtractors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandLoggingMetricValueExtractor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricBucketOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLinearBuckets, err := expandLoggingMetricBucketOptionsLinearBuckets(original["linear_buckets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLinearBuckets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["linearBuckets"] = transformedLinearBuckets + } + + transformedExponentialBuckets, err := expandLoggingMetricBucketOptionsExponentialBuckets(original["exponential_buckets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExponentialBuckets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["exponentialBuckets"] = transformedExponentialBuckets + } + + transformedExplicitBuckets, err := expandLoggingMetricBucketOptionsExplicitBuckets(original["explicit_buckets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExplicitBuckets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["explicitBuckets"] = transformedExplicitBuckets + } + + return transformed, nil +} + +func expandLoggingMetricBucketOptionsLinearBuckets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNumFiniteBuckets, err := expandLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(original["num_finite_buckets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumFiniteBuckets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numFiniteBuckets"] = transformedNumFiniteBuckets + } + + transformedWidth, err := expandLoggingMetricBucketOptionsLinearBucketsWidth(original["width"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWidth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["width"] = transformedWidth + } + + transformedOffset, err := expandLoggingMetricBucketOptionsLinearBucketsOffset(original["offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["offset"] = transformedOffset + } + + return transformed, nil +} + +func expandLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricBucketOptionsLinearBucketsWidth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricBucketOptionsLinearBucketsOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricBucketOptionsExponentialBuckets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNumFiniteBuckets, err := expandLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(original["num_finite_buckets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumFiniteBuckets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numFiniteBuckets"] = transformedNumFiniteBuckets + } + + transformedGrowthFactor, err := expandLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(original["growth_factor"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGrowthFactor); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["growthFactor"] = transformedGrowthFactor + } + + transformedScale, err := expandLoggingMetricBucketOptionsExponentialBucketsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scale"] = transformedScale + } + + return transformed, nil +} + +func expandLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricBucketOptionsExponentialBucketsScale(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLoggingMetricBucketOptionsExplicitBuckets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBounds, err := expandLoggingMetricBucketOptionsExplicitBucketsBounds(original["bounds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBounds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bounds"] = transformedBounds + } + + return transformed, nil +} + +func expandLoggingMetricBucketOptionsExplicitBucketsBounds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_metric_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_metric_sweeper.go new file mode 100644 index 0000000000..0489af2412 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_metric_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package logging + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("LoggingMetric", testSweepLoggingMetric) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepLoggingMetric(region string) error { + resourceName := "LoggingMetric" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://logging.googleapis.com/v2/projects/{{project}}/metrics", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["metrics"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://logging.googleapis.com/v2/projects/{{project}}/metrics/{{%name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_organization_bucket_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_organization_bucket_config.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_organization_bucket_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_organization_bucket_config.go index c6bcbd5a55..a8d338f884 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_organization_bucket_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_organization_bucket_config.go @@ -1,10 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) var loggingOrganizationBucketConfigSchema = map[string]*schema.Schema{ @@ -16,7 +19,7 @@ var loggingOrganizationBucketConfigSchema = map[string]*schema.Schema{ }, } -func organizationBucketConfigID(d *schema.ResourceData, config *Config) (string, error) { +func organizationBucketConfigID(d *schema.ResourceData, config *transport_tpg.Config) (string, error) { organization := d.Get("organization").(string) location := d.Get("location").(string) bucketID := d.Get("bucket_id").(string) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_organization_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_organization_sink.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_organization_sink.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_organization_sink.go index 10d26670c1..3c7129793f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_organization_sink.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_organization_sink.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceLoggingOrganizationSink() *schema.Resource { @@ -39,8 +43,8 @@ func ResourceLoggingOrganizationSink() *schema.Resource { } func resourceLoggingOrganizationSinkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -61,15 +65,15 @@ func resourceLoggingOrganizationSinkCreate(d *schema.ResourceData, meta interfac } func resourceLoggingOrganizationSinkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } sink, err := config.NewLoggingClient(userAgent).Organizations.Sinks.Get(d.Id()).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Organization Logging Sink %s", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Organization Logging Sink %s", d.Get("name").(string))) } if err := flattenResourceLoggingSink(d, sink); err != nil { @@ -84,8 +88,8 @@ func resourceLoggingOrganizationSinkRead(d *schema.ResourceData, meta interface{ } func resourceLoggingOrganizationSinkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -107,8 +111,8 @@ func resourceLoggingOrganizationSinkUpdate(d *schema.ResourceData, meta interfac } func resourceLoggingOrganizationSinkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_bucket_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_bucket_config.go new file mode 100644 index 0000000000..a6786f8b8c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_bucket_config.go @@ -0,0 +1,393 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var loggingProjectBucketConfigSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The parent project that contains the logging bucket.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the bucket`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the bucket.`, + }, + "bucket_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the logging bucket. Logging automatically creates two log buckets: _Required and _Default.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `An optional description for this bucket.`, + }, + "locked": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the bucket is locked. The retention period on a locked bucket cannot be changed. Locked buckets may only be deleted if they are empty.`, + }, + "retention_days": { + Type: schema.TypeInt, + Optional: true, + Default: 30, + Description: `Logs will be retained by default for this amount of time, after which they will automatically be deleted. The minimum retention period is 1 day. If this value is set to zero at bucket creation time, the default time of 30 days will be used.`, + }, + "enable_analytics": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable log analytics for the bucket. Cannot be disabled once enabled.`, + DiffSuppressFunc: enableAnalyticsBackwardsChangeDiffSuppress, + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + Description: `The bucket's lifecycle such as active or deleted.`, + }, + "cmek_settings": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: `The CMEK settings of the log bucket. If present, new log entries written to this log bucket are encrypted using the CMEK key provided in this configuration. If a log bucket has CMEK settings, the CMEK settings cannot be disabled later by updating the log bucket. Changing the KMS key is allowed.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the CMEK settings.`, + }, + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name for the configured Cloud KMS key. +KMS key name format: +"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" +To enable CMEK for the bucket, set this field to a valid kmsKeyName for which the associated service account has the required cloudkms.cryptoKeyEncrypterDecrypter roles assigned for the key. +The Cloud KMS key used by the bucket can be updated by changing the kmsKeyName to a new valid key name. Encryption operations that are in progress will be completed with the key that was in use when they started. Decryption operations will be completed using the key that was used at the time of encryption unless access to that key has been revoked. +See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information.`, + }, + "kms_key_version_name": { + Type: schema.TypeString, + Computed: true, + Description: `The CryptoKeyVersion resource name for the configured Cloud KMS key. +KMS key name format: +"projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION]" +For example: +"projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1" +This is a read-only field used to convey the specific configured CryptoKeyVersion of kms_key that has been configured. It will be populated in cases where the CMEK settings are bound to a single key version.`, + }, + "service_account_id": { + Type: schema.TypeString, + Computed: true, + Description: `The service account associated with a project for which CMEK will apply. +Before enabling CMEK for a logging bucket, you must first assign the cloudkms.cryptoKeyEncrypterDecrypter role to the service account associated with the project for which CMEK will apply. Use [v2.getCmekSettings](https://cloud.google.com/logging/docs/reference/v2/rest/v2/TopLevel/getCmekSettings#google.logging.v2.ConfigServiceV2.GetCmekSettings) to obtain the service account ID. +See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/routing/managed-encryption-storage) for more information.`, + }, + }, + }, + }, +} + +func projectBucketConfigID(d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + project := d.Get("project").(string) + location := d.Get("location").(string) + bucketID := d.Get("bucket_id").(string) + + if !strings.HasPrefix(project, "project") { + project = "projects/" + project + } + + id := fmt.Sprintf("%s/locations/%s/buckets/%s", project, location, bucketID) + return id, nil +} + +// Create Logging Bucket config +func ResourceLoggingProjectBucketConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceLoggingProjectBucketConfigAcquireOrCreate("project", projectBucketConfigID), + Read: resourceLoggingProjectBucketConfigRead, + Update: resourceLoggingProjectBucketConfigUpdate, + Delete: resourceLoggingBucketConfigDelete, + Importer: &schema.ResourceImporter{ + State: resourceLoggingBucketConfigImportState("project"), + }, + Schema: loggingProjectBucketConfigSchema, + UseJSONNumber: true, + } +} + +func resourceLoggingProjectBucketConfigAcquireOrCreate(parentType string, iDFunc loggingBucketConfigIDFunc) func(*schema.ResourceData, interface{}) error { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + id, err := iDFunc(d, config) + if err != nil { + return err + } + + if parentType == "project" { + //logging bucket can be created only at the project level, in future api may allow for folder, org and other parent resources + + log.Printf("[DEBUG] Fetching logging bucket config: %#v", id) + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", id)) + if err != nil { + return err + } + + res, _ := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if res == nil { + log.Printf("[DEGUG] Loggin Bucket not exist %s", id) + // we need to pass the id in here because we don't want to set it in state + // until we know there won't be any errors on create + return resourceLoggingProjectBucketConfigCreate(d, meta, id) + } + } + + d.SetId(id) + + return resourceLoggingProjectBucketConfigUpdate(d, meta) + } +} + +func resourceLoggingProjectBucketConfigCreate(d *schema.ResourceData, meta interface{}, id string) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["name"] = d.Get("name") + obj["description"] = d.Get("description") + obj["locked"] = d.Get("locked") + obj["retentionDays"] = d.Get("retention_days") + obj["analyticsEnabled"] = d.Get("enable_analytics") + obj["cmekSettings"] = expandCmekSettings(d.Get("cmek_settings")) + + url, err := tpgresource.ReplaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/locations/{{location}}/buckets?bucketId={{bucket_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Bucket: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Bucket: %s", err) + } + + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Bucket %q: %#v", d.Id(), res) + + return resourceLoggingProjectBucketConfigRead(d, meta) +} + +func resourceLoggingProjectBucketConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + log.Printf("[DEBUG] Fetching logging bucket config: %#v", d.Id()) + + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + log.Printf("[WARN] Unable to acquire logging bucket config at %s", d.Id()) + + d.SetId("") + return err + } + + if err := d.Set("name", res["name"]); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("description", res["description"]); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("locked", res["locked"]); err != nil { + return fmt.Errorf("Error setting locked: %s", err) + } + if err := d.Set("lifecycle_state", res["lifecycleState"]); err != nil { + return fmt.Errorf("Error setting lifecycle_state: %s", err) + } + if err := d.Set("retention_days", res["retentionDays"]); err != nil { + return fmt.Errorf("Error setting retention_days: %s", err) + } + if err := d.Set("enable_analytics", res["analyticsEnabled"]); err != nil { + return fmt.Errorf("Error setting enable_analytics: %s", err) + } + + if err := d.Set("cmek_settings", flattenCmekSettings(res["cmekSettings"])); err != nil { + return fmt.Errorf("Error setting cmek_settings: %s", err) + } + + return nil +} + +func resourceLoggingProjectBucketConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) + if err != nil { + return err + } + + updateMaskAnalytics := []string{} + // Check if analytics is being enabled. Analytics enablement is an atomic operation and can not be performed while other fields + // are being updated, so we enable analytics before updating the rest of the fields. + if d.HasChange("enable_analytics") { + obj["analyticsEnabled"] = d.Get("enable_analytics") + updateMaskAnalytics = append(updateMaskAnalytics, "analyticsEnabled") + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMaskAnalytics, ",")}) + if err != nil { + return err + } + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Logging Bucket Config %q: %s", d.Id(), err) + } + } + + obj["retentionDays"] = d.Get("retention_days") + obj["description"] = d.Get("description") + obj["cmekSettings"] = expandCmekSettings(d.Get("cmek_settings")) + updateMask := []string{} + if d.HasChange("retention_days") { + updateMask = append(updateMask, "retentionDays") + } + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + if d.HasChange("cmek_settings") { + updateMask = append(updateMask, "cmekSettings") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + if len(updateMask) > 0 { + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + } + if err != nil { + return fmt.Errorf("Error updating Logging Bucket Config %q: %s", d.Id(), err) + } + + // Check if locked is being changed (although removal will fail). Locking is + // an atomic operation and can not be performed while other fields. + // update locked last so that we lock *after* setting the right settings + if d.HasChange("locked") { + updateMaskLocked := []string{"locked"} + objLocked := map[string]interface{}{ + "locked": d.Get("locked"), + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMaskLocked, ",")}) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + RawURL: url, + UserAgent: userAgent, + Body: objLocked, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Logging Bucket Config %q: %s", d.Id(), err) + } + } + + return resourceLoggingProjectBucketConfigRead(d, meta) +} + +func enableAnalyticsBackwardsChangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + oldValue, _ := strconv.ParseBool(old) + if oldValue { + return true + } + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_project_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_sink.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_project_sink.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_sink.go index 69f0b9bd2a..1839139836 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_project_sink.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_project_sink.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "context" "errors" "fmt" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -41,13 +46,13 @@ func ResourceLoggingProjectSink() *schema.Resource { } func resourceLoggingProjectSinkCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -71,7 +76,7 @@ func resourceLoggingProjectSinkCustomizeDiff(ctx context.Context, d *schema.Reso return resourceLoggingProjectSinkCustomizeDiffFunc(d) } -func resourceLoggingProjectSinkCustomizeDiffFunc(diff TerraformResourceDiff) error { +func resourceLoggingProjectSinkCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { if !diff.HasChange("bigquery_options.#") { return nil } @@ -87,20 +92,20 @@ func resourceLoggingProjectSinkCustomizeDiffFunc(diff TerraformResourceDiff) err } func resourceLoggingProjectSinkRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } sink, err := config.NewLoggingClient(userAgent).Projects.Sinks.Get(d.Id()).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Project Logging Sink %s", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project Logging Sink %s", d.Get("name").(string))) } if err := d.Set("project", project); err != nil { @@ -124,8 +129,8 @@ func resourceLoggingProjectSinkRead(d *schema.ResourceData, meta interface{}) er } func resourceLoggingProjectSinkUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -143,8 +148,8 @@ func resourceLoggingProjectSinkUpdate(d *schema.ResourceData, meta interface{}) } func resourceLoggingProjectSinkDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_sink.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_sink.go similarity index 97% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_sink.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_sink.go index 24a23e5c5a..39bee93d43 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_logging_sink.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/logging/resource_logging_sink.go @@ -1,10 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package logging import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" "google.golang.org/api/logging/v2" ) @@ -26,7 +29,7 @@ func resourceLoggingSinkSchema() map[string]*schema.Schema { "filter": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: optionalSurroundingSpacesSuppress, + DiffSuppressFunc: tpgresource.OptionalSurroundingSpacesSuppress, Description: `The filter to apply when exporting logs. Only log entries that match the filter are exported.`, }, @@ -250,7 +253,7 @@ func flattenLoggingSinkExclusion(exclusions []*logging.LogExclusion) []map[strin func resourceLoggingSinkImportState(sinkType string) schema.StateFunc { return func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - loggingSinkId, err := parseLoggingSinkId(d.Id()) + loggingSinkId, err := ParseLoggingSinkId(d.Id()) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/looker_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/looker_operation.go new file mode 100644 index 0000000000..c8e96c3d6f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/looker_operation.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package looker + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type LookerOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *LookerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.LookerBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) +} + +func createLookerWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*LookerOperationWaiter, error) { + w := &LookerOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func LookerOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createLookerWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func LookerOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createLookerWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance.go new file mode 100644 index 0000000000..9e52f9fe24 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance.go @@ -0,0 +1,1841 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package looker + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceLookerInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceLookerInstanceCreate, + Read: resourceLookerInstanceRead, + Update: resourceLookerInstanceUpdate, + Delete: resourceLookerInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceLookerInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z][a-z0-9-]{0,39}[a-z0-9]$`), + Description: `The ID of the instance or a fully qualified identifier for the instance.`, + }, + "admin_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Looker instance Admin settings.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_email_domains": { + Type: schema.TypeList, + Optional: true, + Description: `Email domain allowlist for the instance. + +Define the email domains to which your users can deliver Looker (Google Cloud core) content. +Updating this list will restart the instance. Updating the allowed email domains from terraform +means the value provided will be considered as the entire list and not an amendment to the +existing list of allowed email domains.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "consumer_network": { + Type: schema.TypeString, + Optional: true, + Description: `Network name in the consumer project in the format of: projects/{project}/global/networks/{network} +Note that the consumer network may be in a different GCP project than the consumer +project that is hosting the Looker Instance.`, + }, + "deny_maintenance_period": { + Type: schema.TypeList, + Optional: true, + Description: `Maintenance denial period for this instance. + +You must allow at least 14 days of maintenance availability +between any two deny maintenance periods.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_date": { + Type: schema.TypeList, + Required: true, + Description: `Required. Start date of the deny maintenance period`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 32), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 +to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 13), + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a +month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 10000), + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without +a year.`, + }, + }, + }, + }, + "start_date": { + Type: schema.TypeList, + Required: true, + Description: `Required. Start date of the deny maintenance period`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 32), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 +to specify a year by itself or a year and month where the day isn't significant.`, + }, + "month": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 13), + Description: `Month of a year. Must be from 1 to 12, or 0 to specify a year without a +month and day.`, + }, + "year": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 10000), + Description: `Year of the date. Must be from 1 to 9999, or 0 to specify a date without +a year.`, + }, + }, + }, + }, + "time": { + Type: schema.TypeList, + Required: true, + Description: `Required. Start time of the window in UTC time.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 23), + Description: `Hours of day in 24 hour format. Should be from 0 to 23.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 999999999), + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Seconds of minutes of the time. Must normally be from 0 to 59.`, + }, + }, + }, + }, + }, + }, + }, + "encryption_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Looker instance encryption settings.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the customer managed encryption key (CMEK) in KMS.`, + }, + "kms_key_name_version": { + Type: schema.TypeString, + Computed: true, + Description: `Full name and version of the CMEK key currently in use to encrypt Looker data.`, + }, + "kms_key_state": { + Type: schema.TypeString, + Computed: true, + Description: `Status of the customer managed encryption key (CMEK) in KMS.`, + }, + }, + }, + }, + "maintenance_window": { + Type: schema.TypeList, + Optional: true, + Description: `Maintenance window for an instance. + +Maintenance of your instance takes place once a month, and will require +your instance to be restarted during updates, which will temporarily +disrupt service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_week": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + Description: `Required. Day of the week for this MaintenanceWindow (in UTC). + +- MONDAY: Monday +- TUESDAY: Tuesday +- WEDNESDAY: Wednesday +- THURSDAY: Thursday +- FRIDAY: Friday +- SATURDAY: Saturday +- SUNDAY: Sunday Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "start_time": { + Type: schema.TypeList, + Required: true, + Description: `Required. Start time of the window in UTC time.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 23), + Description: `Hours of day in 24 hour format. Should be from 0 to 23.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 999999999), + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Seconds of minutes of the time. Must normally be from 0 to 59.`, + }, + }, + }, + }, + }, + }, + }, + "oauth_config": { + Type: schema.TypeList, + Optional: true, + Description: `Looker Instance OAuth login settings.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Required: true, + Description: `The client ID for the Oauth config.`, + }, + "client_secret": { + Type: schema.TypeString, + Required: true, + Description: `The client secret for the Oauth config.`, + }, + }, + }, + }, + "platform_edition": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"LOOKER_CORE_TRIAL", "LOOKER_CORE_STANDARD", "LOOKER_CORE_STANDARD_ANNUAL", "LOOKER_CORE_ENTERPRISE_ANNUAL", "LOOKER_CORE_EMBED_ANNUAL", "LOOKER_MODELER", ""}), + Description: `Platform editions for a Looker instance. Each edition maps to a set of instance features, like its size. Must be one of these values: +- LOOKER_CORE_TRIAL: trial instance +- LOOKER_CORE_STANDARD: pay as you go standard instance +- LOOKER_CORE_STANDARD_ANNUAL: subscription standard instance +- LOOKER_CORE_ENTERPRISE_ANNUAL: subscription enterprise instance +- LOOKER_CORE_EMBED_ANNUAL: subscription embed instance +- LOOKER_MODELER: standalone modeling service Default value: "LOOKER_CORE_TRIAL" Possible values: ["LOOKER_CORE_TRIAL", "LOOKER_CORE_STANDARD", "LOOKER_CORE_STANDARD_ANNUAL", "LOOKER_CORE_ENTERPRISE_ANNUAL", "LOOKER_CORE_EMBED_ANNUAL", "LOOKER_MODELER"]`, + Default: "LOOKER_CORE_TRIAL", + }, + "private_ip_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether private IP is enabled on the Looker instance.`, + Default: false, + }, + "public_ip_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether public IP is enabled on the Looker instance.`, + Default: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The name of the Looker region of the instance.`, + }, + "reserved_range": { + Type: schema.TypeString, + Optional: true, + Description: `Name of a reserved IP address range within the consumer network, to be used for +private service access connection. User may or may not specify this in a request.`, + }, + "user_metadata": { + Type: schema.TypeList, + Optional: true, + Description: `Metadata about users for a Looker instance. + +These settings are only available when platform edition LOOKER_CORE_STANDARD is set. + +There are ten Standard and two Developer users included in the cost of the product. +You can allocate additional Standard, Viewer, and Developer users for this instance. +It is an optional step and can be modified later. + +With the Standard edition of Looker (Google Cloud core), you can provision up to 50 +total users, distributed across Viewer, Standard, and Developer.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "additional_developer_user_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of additional Developer Users to allocate to the Looker Instance.`, + }, + "additional_standard_user_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of additional Standard Users to allocate to the Looker Instance.`, + }, + "additional_viewer_user_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of additional Viewer Users to allocate to the Looker Instance.`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the instance was created in RFC3339 UTC "Zulu" format, +accurate to nanoseconds.`, + }, + "egress_public_ip": { + Type: schema.TypeString, + Computed: true, + Description: `Public Egress IP (IPv4).`, + }, + "ingress_private_ip": { + Type: schema.TypeString, + Computed: true, + Description: `Private Ingress IP (IPv4).`, + }, + "ingress_public_ip": { + Type: schema.TypeString, + Computed: true, + Description: `Public Ingress IP (IPv4).`, + }, + "looker_uri": { + Type: schema.TypeString, + Computed: true, + Description: `Looker instance URI which can be used to access the Looker Instance UI.`, + }, + "looker_version": { + Type: schema.TypeString, + Computed: true, + Description: `The Looker version that the instance is using.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the instance was updated in RFC3339 UTC "Zulu" format, +accurate to nanoseconds.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceLookerInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + adminSettingsProp, err := expandLookerInstanceAdminSettings(d.Get("admin_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admin_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(adminSettingsProp)) && (ok || !reflect.DeepEqual(v, adminSettingsProp)) { + obj["adminSettings"] = adminSettingsProp + } + consumerNetworkProp, err := expandLookerInstanceConsumerNetwork(d.Get("consumer_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("consumer_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(consumerNetworkProp)) && (ok || !reflect.DeepEqual(v, consumerNetworkProp)) { + obj["consumerNetwork"] = consumerNetworkProp + } + denyMaintenancePeriodProp, err := expandLookerInstanceDenyMaintenancePeriod(d.Get("deny_maintenance_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deny_maintenance_period"); !tpgresource.IsEmptyValue(reflect.ValueOf(denyMaintenancePeriodProp)) && (ok || !reflect.DeepEqual(v, denyMaintenancePeriodProp)) { + obj["denyMaintenancePeriod"] = denyMaintenancePeriodProp + } + encryptionConfigProp, err := expandLookerInstanceEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + maintenanceWindowProp, err := expandLookerInstanceMaintenanceWindow(d.Get("maintenance_window"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_window"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenanceWindowProp)) && (ok || !reflect.DeepEqual(v, maintenanceWindowProp)) { + obj["maintenanceWindow"] = maintenanceWindowProp + } + oauthConfigProp, err := expandLookerInstanceOauthConfig(d.Get("oauth_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("oauth_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(oauthConfigProp)) && (ok || !reflect.DeepEqual(v, oauthConfigProp)) { + obj["oauthConfig"] = oauthConfigProp + } + platformEditionProp, err := expandLookerInstancePlatformEdition(d.Get("platform_edition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("platform_edition"); !tpgresource.IsEmptyValue(reflect.ValueOf(platformEditionProp)) && (ok || !reflect.DeepEqual(v, platformEditionProp)) { + obj["platformEdition"] = platformEditionProp + } + privateIpEnabledProp, err := expandLookerInstancePrivateIpEnabled(d.Get("private_ip_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_ip_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateIpEnabledProp)) && (ok || !reflect.DeepEqual(v, privateIpEnabledProp)) { + obj["privateIpEnabled"] = privateIpEnabledProp + } + publicIpEnabledProp, err := expandLookerInstancePublicIpEnabled(d.Get("public_ip_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("public_ip_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(publicIpEnabledProp)) && (ok || !reflect.DeepEqual(v, publicIpEnabledProp)) { + obj["publicIpEnabled"] = publicIpEnabledProp + } + reservedRangeProp, err := expandLookerInstanceReservedRange(d.Get("reserved_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(reservedRangeProp)) && (ok || !reflect.DeepEqual(v, reservedRangeProp)) { + obj["reservedRange"] = reservedRangeProp + } + userMetadataProp, err := expandLookerInstanceUserMetadata(d.Get("user_metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(userMetadataProp)) && (ok || !reflect.DeepEqual(v, userMetadataProp)) { + obj["userMetadata"] = userMetadataProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LookerBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Instance: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return fmt.Errorf("Error creating Instance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = LookerOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Instance", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Instance: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) + + return resourceLookerInstanceRead(d, meta) +} + +func resourceLookerInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LookerBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("LookerInstance %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + if err := d.Set("admin_settings", flattenLookerInstanceAdminSettings(res["adminSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("consumer_network", flattenLookerInstanceConsumerNetwork(res["consumerNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("create_time", flattenLookerInstanceCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("deny_maintenance_period", flattenLookerInstanceDenyMaintenancePeriod(res["denyMaintenancePeriod"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("egress_public_ip", flattenLookerInstanceEgressPublicIp(res["egressPublicIp"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("encryption_config", flattenLookerInstanceEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("ingress_private_ip", flattenLookerInstanceIngressPrivateIp(res["ingressPrivateIp"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("ingress_public_ip", flattenLookerInstanceIngressPublicIp(res["ingressPublicIp"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("looker_version", flattenLookerInstanceLookerVersion(res["lookerVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("looker_uri", flattenLookerInstanceLookerUri(res["lookerUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("maintenance_window", flattenLookerInstanceMaintenanceWindow(res["maintenanceWindow"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("platform_edition", flattenLookerInstancePlatformEdition(res["platformEdition"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("private_ip_enabled", flattenLookerInstancePrivateIpEnabled(res["privateIpEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("public_ip_enabled", flattenLookerInstancePublicIpEnabled(res["publicIpEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("reserved_range", flattenLookerInstanceReservedRange(res["reservedRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("update_time", flattenLookerInstanceUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("user_metadata", flattenLookerInstanceUserMetadata(res["userMetadata"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + return nil +} + +func resourceLookerInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + adminSettingsProp, err := expandLookerInstanceAdminSettings(d.Get("admin_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("admin_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, adminSettingsProp)) { + obj["adminSettings"] = adminSettingsProp + } + consumerNetworkProp, err := expandLookerInstanceConsumerNetwork(d.Get("consumer_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("consumer_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, consumerNetworkProp)) { + obj["consumerNetwork"] = consumerNetworkProp + } + denyMaintenancePeriodProp, err := expandLookerInstanceDenyMaintenancePeriod(d.Get("deny_maintenance_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("deny_maintenance_period"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, denyMaintenancePeriodProp)) { + obj["denyMaintenancePeriod"] = denyMaintenancePeriodProp + } + encryptionConfigProp, err := expandLookerInstanceEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + maintenanceWindowProp, err := expandLookerInstanceMaintenanceWindow(d.Get("maintenance_window"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_window"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenanceWindowProp)) { + obj["maintenanceWindow"] = maintenanceWindowProp + } + oauthConfigProp, err := expandLookerInstanceOauthConfig(d.Get("oauth_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("oauth_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oauthConfigProp)) { + obj["oauthConfig"] = oauthConfigProp + } + privateIpEnabledProp, err := expandLookerInstancePrivateIpEnabled(d.Get("private_ip_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("private_ip_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, privateIpEnabledProp)) { + obj["privateIpEnabled"] = privateIpEnabledProp + } + publicIpEnabledProp, err := expandLookerInstancePublicIpEnabled(d.Get("public_ip_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("public_ip_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, publicIpEnabledProp)) { + obj["publicIpEnabled"] = publicIpEnabledProp + } + reservedRangeProp, err := expandLookerInstanceReservedRange(d.Get("reserved_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, reservedRangeProp)) { + obj["reservedRange"] = reservedRangeProp + } + userMetadataProp, err := expandLookerInstanceUserMetadata(d.Get("user_metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userMetadataProp)) { + obj["userMetadata"] = userMetadataProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{LookerBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("admin_settings") { + updateMask = append(updateMask, "admin_settings.allowed_email_domains") + } + + if d.HasChange("consumer_network") { + updateMask = append(updateMask, "consumerNetwork") + } + + if d.HasChange("deny_maintenance_period") { + updateMask = append(updateMask, "denyMaintenancePeriod") + } + + if d.HasChange("encryption_config") { + updateMask = append(updateMask, "encryptionConfig") + } + + if d.HasChange("maintenance_window") { + updateMask = append(updateMask, "maintenanceWindow") + } + + if d.HasChange("oauth_config") { + updateMask = append(updateMask, "oauthConfig") + } + + if d.HasChange("private_ip_enabled") { + updateMask = append(updateMask, "privateIpEnabled") + } + + if d.HasChange("public_ip_enabled") { + updateMask = append(updateMask, "publicIpEnabled") + } + + if d.HasChange("reserved_range") { + updateMask = append(updateMask, "reservedRange") + } + + if d.HasChange("user_metadata") { + updateMask = append(updateMask, "userMetadata") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = LookerOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceLookerInstanceRead(d, meta) +} + +func resourceLookerInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{LookerBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Instance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Instance") + } + + err = LookerOperationWaitTime( + config, res, project, "Deleting Instance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) + return nil +} + +func resourceLookerInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenLookerInstanceAdminSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["allowed_email_domains"] = + flattenLookerInstanceAdminSettingsAllowedEmailDomains(original["allowedEmailDomains"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceAdminSettingsAllowedEmailDomains(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceConsumerNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceDenyMaintenancePeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_date"] = + flattenLookerInstanceDenyMaintenancePeriodStartDate(original["startDate"], d, config) + transformed["end_date"] = + flattenLookerInstanceDenyMaintenancePeriodEndDate(original["endDate"], d, config) + transformed["time"] = + flattenLookerInstanceDenyMaintenancePeriodTime(original["time"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceDenyMaintenancePeriodStartDate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenLookerInstanceDenyMaintenancePeriodStartDateYear(original["year"], d, config) + transformed["month"] = + flattenLookerInstanceDenyMaintenancePeriodStartDateMonth(original["month"], d, config) + transformed["day"] = + flattenLookerInstanceDenyMaintenancePeriodStartDateDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceDenyMaintenancePeriodStartDateYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodStartDateMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodStartDateDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodEndDate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["year"] = + flattenLookerInstanceDenyMaintenancePeriodEndDateYear(original["year"], d, config) + transformed["month"] = + flattenLookerInstanceDenyMaintenancePeriodEndDateMonth(original["month"], d, config) + transformed["day"] = + flattenLookerInstanceDenyMaintenancePeriodEndDateDay(original["day"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceDenyMaintenancePeriodEndDateYear(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodEndDateMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodEndDateDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenLookerInstanceDenyMaintenancePeriodTimeHours(original["hours"], d, config) + transformed["minutes"] = + flattenLookerInstanceDenyMaintenancePeriodTimeMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenLookerInstanceDenyMaintenancePeriodTimeSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenLookerInstanceDenyMaintenancePeriodTimeNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceDenyMaintenancePeriodTimeHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodTimeMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceDenyMaintenancePeriodTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceEgressPublicIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenLookerInstanceEncryptionConfigKmsKeyName(original["kmsKeyName"], d, config) + transformed["kms_key_state"] = + flattenLookerInstanceEncryptionConfigKmsKeyState(original["kmsKeyState"], d, config) + transformed["kms_key_name_version"] = + flattenLookerInstanceEncryptionConfigKmsKeyNameVersion(original["kmsKeyNameVersion"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceEncryptionConfigKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceEncryptionConfigKmsKeyState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceEncryptionConfigKmsKeyNameVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceIngressPrivateIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceIngressPublicIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceLookerVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceLookerUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["day_of_week"] = + flattenLookerInstanceMaintenanceWindowDayOfWeek(original["dayOfWeek"], d, config) + transformed["start_time"] = + flattenLookerInstanceMaintenanceWindowStartTime(original["startTime"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceMaintenanceWindowDayOfWeek(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceMaintenanceWindowStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenLookerInstanceMaintenanceWindowStartTimeHours(original["hours"], d, config) + transformed["minutes"] = + flattenLookerInstanceMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenLookerInstanceMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenLookerInstanceMaintenanceWindowStartTimeNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceMaintenanceWindowStartTimeHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceMaintenanceWindowStartTimeMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceMaintenanceWindowStartTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceMaintenanceWindowStartTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstancePlatformEdition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstancePrivateIpEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstancePublicIpEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceReservedRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenLookerInstanceUserMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["additional_viewer_user_count"] = + flattenLookerInstanceUserMetadataAdditionalViewerUserCount(original["additionalViewerUserCount"], d, config) + transformed["additional_standard_user_count"] = + flattenLookerInstanceUserMetadataAdditionalStandardUserCount(original["additionalStandardUserCount"], d, config) + transformed["additional_developer_user_count"] = + flattenLookerInstanceUserMetadataAdditionalDeveloperUserCount(original["additionalDeveloperUserCount"], d, config) + return []interface{}{transformed} +} +func flattenLookerInstanceUserMetadataAdditionalViewerUserCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceUserMetadataAdditionalStandardUserCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenLookerInstanceUserMetadataAdditionalDeveloperUserCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandLookerInstanceAdminSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowedEmailDomains, err := expandLookerInstanceAdminSettingsAllowedEmailDomains(original["allowed_email_domains"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedEmailDomains); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedEmailDomains"] = transformedAllowedEmailDomains + } + + return transformed, nil +} + +func expandLookerInstanceAdminSettingsAllowedEmailDomains(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceConsumerNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStartDate, err := expandLookerInstanceDenyMaintenancePeriodStartDate(original["start_date"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartDate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startDate"] = transformedStartDate + } + + transformedEndDate, err := expandLookerInstanceDenyMaintenancePeriodEndDate(original["end_date"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEndDate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["endDate"] = transformedEndDate + } + + transformedTime, err := expandLookerInstanceDenyMaintenancePeriodTime(original["time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["time"] = transformedTime + } + + return transformed, nil +} + +func expandLookerInstanceDenyMaintenancePeriodStartDate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandLookerInstanceDenyMaintenancePeriodStartDateYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandLookerInstanceDenyMaintenancePeriodStartDateMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandLookerInstanceDenyMaintenancePeriodStartDateDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandLookerInstanceDenyMaintenancePeriodStartDateYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodStartDateMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodStartDateDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodEndDate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedYear, err := expandLookerInstanceDenyMaintenancePeriodEndDateYear(original["year"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYear); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["year"] = transformedYear + } + + transformedMonth, err := expandLookerInstanceDenyMaintenancePeriodEndDateMonth(original["month"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["month"] = transformedMonth + } + + transformedDay, err := expandLookerInstanceDenyMaintenancePeriodEndDateDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + return transformed, nil +} + +func expandLookerInstanceDenyMaintenancePeriodEndDateYear(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodEndDateMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodEndDateDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandLookerInstanceDenyMaintenancePeriodTimeHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandLookerInstanceDenyMaintenancePeriodTimeMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandLookerInstanceDenyMaintenancePeriodTimeSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandLookerInstanceDenyMaintenancePeriodTimeNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandLookerInstanceDenyMaintenancePeriodTimeHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodTimeMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceDenyMaintenancePeriodTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandLookerInstanceEncryptionConfigKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + transformedKmsKeyState, err := expandLookerInstanceEncryptionConfigKmsKeyState(original["kms_key_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyState"] = transformedKmsKeyState + } + + transformedKmsKeyNameVersion, err := expandLookerInstanceEncryptionConfigKmsKeyNameVersion(original["kms_key_name_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyNameVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyNameVersion"] = transformedKmsKeyNameVersion + } + + return transformed, nil +} + +func expandLookerInstanceEncryptionConfigKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceEncryptionConfigKmsKeyState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceEncryptionConfigKmsKeyNameVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDayOfWeek, err := expandLookerInstanceMaintenanceWindowDayOfWeek(original["day_of_week"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOfWeek"] = transformedDayOfWeek + } + + transformedStartTime, err := expandLookerInstanceMaintenanceWindowStartTime(original["start_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["startTime"] = transformedStartTime + } + + return transformed, nil +} + +func expandLookerInstanceMaintenanceWindowDayOfWeek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceMaintenanceWindowStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandLookerInstanceMaintenanceWindowStartTimeHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandLookerInstanceMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandLookerInstanceMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandLookerInstanceMaintenanceWindowStartTimeNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandLookerInstanceMaintenanceWindowStartTimeHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceMaintenanceWindowStartTimeMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceMaintenanceWindowStartTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceMaintenanceWindowStartTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceOauthConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedClientId, err := expandLookerInstanceOauthConfigClientId(original["client_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientId"] = transformedClientId + } + + transformedClientSecret, err := expandLookerInstanceOauthConfigClientSecret(original["client_secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientSecret"] = transformedClientSecret + } + + return transformed, nil +} + +func expandLookerInstanceOauthConfigClientId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceOauthConfigClientSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstancePlatformEdition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstancePrivateIpEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstancePublicIpEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceReservedRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceUserMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAdditionalViewerUserCount, err := expandLookerInstanceUserMetadataAdditionalViewerUserCount(original["additional_viewer_user_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdditionalViewerUserCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["additionalViewerUserCount"] = transformedAdditionalViewerUserCount + } + + transformedAdditionalStandardUserCount, err := expandLookerInstanceUserMetadataAdditionalStandardUserCount(original["additional_standard_user_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdditionalStandardUserCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["additionalStandardUserCount"] = transformedAdditionalStandardUserCount + } + + transformedAdditionalDeveloperUserCount, err := expandLookerInstanceUserMetadataAdditionalDeveloperUserCount(original["additional_developer_user_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAdditionalDeveloperUserCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["additionalDeveloperUserCount"] = transformedAdditionalDeveloperUserCount + } + + return transformed, nil +} + +func expandLookerInstanceUserMetadataAdditionalViewerUserCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceUserMetadataAdditionalStandardUserCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLookerInstanceUserMetadataAdditionalDeveloperUserCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance_sweeper.go new file mode 100644 index 0000000000..c6576dfbaa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/looker/resource_looker_instance_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package looker + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("LookerInstance", testSweepLookerInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepLookerInstance(region string) error { + resourceName := "LookerInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://looker.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://looker.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/memcache_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/memcache_operation.go new file mode 100644 index 0000000000..e4c6a1a4a3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/memcache_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package memcache + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type MemcacheOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *MemcacheOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.MemcacheBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createMemcacheWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*MemcacheOperationWaiter, error) { + w := &MemcacheOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func MemcacheOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createMemcacheWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func MemcacheOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createMemcacheWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_memcache_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/resource_memcache_instance.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_memcache_instance.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/resource_memcache_instance.go index 84dbfc99e3..19918b9ff8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_memcache_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/resource_memcache_instance.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package memcache import ( "fmt" @@ -23,6 +26,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceMemcacheInstance() *schema.Resource { @@ -113,7 +120,7 @@ is expected to be one.`, "day": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + ValidateFunc: verify.ValidateEnum([]string{"DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), Description: `Required. The day of week that maintenance updates occur. - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. - MONDAY: Monday @@ -220,7 +227,7 @@ resolution and up to nine fractional digits.`, "memcache_version": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"MEMCACHE_1_5", ""}), + ValidateFunc: verify.ValidateEnum([]string{"MEMCACHE_1_5", ""}), Description: `The major version of Memcached software. If not provided, latest supported version will be used. Currently the latest supported major version is MEMCACHE_1_5. The minor version will be automatically determined by our system based on the latest supported minor version. Default value: "MEMCACHE_1_5" Possible values: ["MEMCACHE_1_5"]`, @@ -337,8 +344,8 @@ resolution and up to nine fractional digits.`, } func resourceMemcacheInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -347,59 +354,59 @@ func resourceMemcacheInstanceCreate(d *schema.ResourceData, meta interface{}) er displayNameProp, err := expandMemcacheInstanceDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } labelsProp, err := expandMemcacheInstanceLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } zonesProp, err := expandMemcacheInstanceZones(d.Get("zones"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("zones"); !isEmptyValue(reflect.ValueOf(zonesProp)) && (ok || !reflect.DeepEqual(v, zonesProp)) { + } else if v, ok := d.GetOkExists("zones"); !tpgresource.IsEmptyValue(reflect.ValueOf(zonesProp)) && (ok || !reflect.DeepEqual(v, zonesProp)) { obj["zones"] = zonesProp } authorizedNetworkProp, err := expandMemcacheInstanceAuthorizedNetwork(d.Get("authorized_network"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(reflect.ValueOf(authorizedNetworkProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { + } else if v, ok := d.GetOkExists("authorized_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorizedNetworkProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { obj["authorizedNetwork"] = authorizedNetworkProp } nodeCountProp, err := expandMemcacheInstanceNodeCount(d.Get("node_count"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("node_count"); !isEmptyValue(reflect.ValueOf(nodeCountProp)) && (ok || !reflect.DeepEqual(v, nodeCountProp)) { + } else if v, ok := d.GetOkExists("node_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeCountProp)) && (ok || !reflect.DeepEqual(v, nodeCountProp)) { obj["nodeCount"] = nodeCountProp } memcacheVersionProp, err := expandMemcacheInstanceMemcacheVersion(d.Get("memcache_version"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("memcache_version"); !isEmptyValue(reflect.ValueOf(memcacheVersionProp)) && (ok || !reflect.DeepEqual(v, memcacheVersionProp)) { + } else if v, ok := d.GetOkExists("memcache_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(memcacheVersionProp)) && (ok || !reflect.DeepEqual(v, memcacheVersionProp)) { obj["memcacheVersion"] = memcacheVersionProp } nodeConfigProp, err := expandMemcacheInstanceNodeConfig(d.Get("node_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("node_config"); !isEmptyValue(reflect.ValueOf(nodeConfigProp)) && (ok || !reflect.DeepEqual(v, nodeConfigProp)) { + } else if v, ok := d.GetOkExists("node_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeConfigProp)) && (ok || !reflect.DeepEqual(v, nodeConfigProp)) { obj["nodeConfig"] = nodeConfigProp } parametersProp, err := expandMemcacheInstanceMemcacheParameters(d.Get("memcache_parameters"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("memcache_parameters"); !isEmptyValue(reflect.ValueOf(parametersProp)) && (ok || !reflect.DeepEqual(v, parametersProp)) { + } else if v, ok := d.GetOkExists("memcache_parameters"); !tpgresource.IsEmptyValue(reflect.ValueOf(parametersProp)) && (ok || !reflect.DeepEqual(v, parametersProp)) { obj["parameters"] = parametersProp } maintenancePolicyProp, err := expandMemcacheInstanceMaintenancePolicy(d.Get("maintenance_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("maintenance_policy"); !isEmptyValue(reflect.ValueOf(maintenancePolicyProp)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { + } else if v, ok := d.GetOkExists("maintenance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenancePolicyProp)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { obj["maintenancePolicy"] = maintenancePolicyProp } - url, err := replaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") if err != nil { return err } @@ -407,24 +414,32 @@ func resourceMemcacheInstanceCreate(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Creating new Instance: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Instance: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Instance: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -444,7 +459,7 @@ func resourceMemcacheInstanceCreate(d *schema.ResourceData, meta interface{}) er } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -456,33 +471,39 @@ func resourceMemcacheInstanceCreate(d *schema.ResourceData, meta interface{}) er } func resourceMemcacheInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Instance: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MemcacheInstance %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MemcacheInstance %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -536,15 +557,15 @@ func resourceMemcacheInstanceRead(d *schema.ResourceData, meta interface{}) erro } func resourceMemcacheInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Instance: %s", err) } @@ -554,35 +575,35 @@ func resourceMemcacheInstanceUpdate(d *schema.ResourceData, meta interface{}) er displayNameProp, err := expandMemcacheInstanceDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } labelsProp, err := expandMemcacheInstanceLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } nodeCountProp, err := expandMemcacheInstanceNodeCount(d.Get("node_count"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("node_count"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeCountProp)) { + } else if v, ok := d.GetOkExists("node_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeCountProp)) { obj["nodeCount"] = nodeCountProp } memcacheVersionProp, err := expandMemcacheInstanceMemcacheVersion(d.Get("memcache_version"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("memcache_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, memcacheVersionProp)) { + } else if v, ok := d.GetOkExists("memcache_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, memcacheVersionProp)) { obj["memcacheVersion"] = memcacheVersionProp } maintenancePolicyProp, err := expandMemcacheInstanceMaintenancePolicy(d.Get("maintenance_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("maintenance_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { + } else if v, ok := d.GetOkExists("maintenance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { obj["maintenancePolicy"] = maintenancePolicyProp } - url, err := replaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") if err != nil { return err } @@ -609,19 +630,27 @@ func resourceMemcacheInstanceUpdate(d *schema.ResourceData, meta interface{}) er if d.HasChange("maintenance_policy") { updateMask = append(updateMask, "maintenancePolicy") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) @@ -641,21 +670,21 @@ func resourceMemcacheInstanceUpdate(d *schema.ResourceData, meta interface{}) er } func resourceMemcacheInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Instance: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") if err != nil { return err } @@ -664,13 +693,21 @@ func resourceMemcacheInstanceDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Deleting Instance %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Instance") + return transport_tpg.HandleNotFoundError(err, d, "Instance") } err = MemcacheOperationWaitTime( @@ -686,8 +723,8 @@ func resourceMemcacheInstanceDelete(d *schema.ResourceData, meta interface{}) er } func resourceMemcacheInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -697,7 +734,7 @@ func resourceMemcacheInstanceImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -706,11 +743,11 @@ func resourceMemcacheInstanceImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenMemcacheInstanceDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMemcacheNodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheNodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -732,18 +769,18 @@ func flattenMemcacheInstanceMemcacheNodes(v interface{}, d *schema.ResourceData, } return transformed } -func flattenMemcacheInstanceMemcacheNodesNodeId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheNodesNodeId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMemcacheNodesZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheNodesZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMemcacheNodesPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheNodesPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -757,45 +794,45 @@ func flattenMemcacheInstanceMemcacheNodesPort(v interface{}, d *schema.ResourceD return v // let terraform core handle it otherwise } -func flattenMemcacheInstanceMemcacheNodesHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheNodesHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMemcacheNodesState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheNodesState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceDiscoveryEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceDiscoveryEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMemcacheFullVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheFullVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceZones(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceZones(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenMemcacheInstanceAuthorizedNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceAuthorizedNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceNodeCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -809,11 +846,11 @@ func flattenMemcacheInstanceNodeCount(v interface{}, d *schema.ResourceData, con return v // let terraform core handle it otherwise } -func flattenMemcacheInstanceMemcacheVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceNodeConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceNodeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -828,10 +865,10 @@ func flattenMemcacheInstanceNodeConfig(v interface{}, d *schema.ResourceData, co flattenMemcacheInstanceNodeConfigMemorySizeMb(original["memorySizeMb"], d, config) return []interface{}{transformed} } -func flattenMemcacheInstanceNodeConfigCpuCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceNodeConfigCpuCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -845,10 +882,10 @@ func flattenMemcacheInstanceNodeConfigCpuCount(v interface{}, d *schema.Resource return v // let terraform core handle it otherwise } -func flattenMemcacheInstanceNodeConfigMemorySizeMb(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceNodeConfigMemorySizeMb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -862,7 +899,7 @@ func flattenMemcacheInstanceNodeConfigMemorySizeMb(v interface{}, d *schema.Reso return v // let terraform core handle it otherwise } -func flattenMemcacheInstanceMemcacheParameters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheParameters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -877,15 +914,15 @@ func flattenMemcacheInstanceMemcacheParameters(v interface{}, d *schema.Resource flattenMemcacheInstanceMemcacheParametersParams(original["params"], d, config) return []interface{}{transformed} } -func flattenMemcacheInstanceMemcacheParametersId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheParametersId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMemcacheParametersParams(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMemcacheParametersParams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMaintenancePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -904,19 +941,19 @@ func flattenMemcacheInstanceMaintenancePolicy(v interface{}, d *schema.ResourceD flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(original["weeklyMaintenanceWindow"], d, config) return []interface{}{transformed} } -func flattenMemcacheInstanceMaintenancePolicyCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMaintenancePolicyUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMaintenancePolicyDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -936,15 +973,15 @@ func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface } return transformed } -func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -960,10 +997,10 @@ func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(original["nanos"], d, config) return []interface{}{transformed} } -func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -977,10 +1014,10 @@ func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHou return v // let terraform core handle it otherwise } -func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -994,10 +1031,10 @@ func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMin return v // let terraform core handle it otherwise } -func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1011,10 +1048,10 @@ func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSec return v // let terraform core handle it otherwise } -func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1028,7 +1065,7 @@ func flattenMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNan return v // let terraform core handle it otherwise } -func flattenMemcacheInstanceMaintenanceSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenanceSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1045,23 +1082,23 @@ func flattenMemcacheInstanceMaintenanceSchedule(v interface{}, d *schema.Resourc flattenMemcacheInstanceMaintenanceScheduleScheduleDeadlineTime(original["scheduleDeadlineTime"], d, config) return []interface{}{transformed} } -func flattenMemcacheInstanceMaintenanceScheduleStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenanceScheduleStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMaintenanceScheduleEndTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenanceScheduleEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMemcacheInstanceMaintenanceScheduleScheduleDeadlineTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMemcacheInstanceMaintenanceScheduleScheduleDeadlineTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandMemcacheInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandMemcacheInstanceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1072,24 +1109,24 @@ func expandMemcacheInstanceLabels(v interface{}, d TerraformResourceData, config return m, nil } -func expandMemcacheInstanceZones(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceZones(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandMemcacheInstanceAuthorizedNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceAuthorizedNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMemcacheVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMemcacheVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceNodeConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceNodeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1101,29 +1138,29 @@ func expandMemcacheInstanceNodeConfig(v interface{}, d TerraformResourceData, co transformedCpuCount, err := expandMemcacheInstanceNodeConfigCpuCount(original["cpu_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCpuCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCpuCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cpuCount"] = transformedCpuCount } transformedMemorySizeMb, err := expandMemcacheInstanceNodeConfigMemorySizeMb(original["memory_size_mb"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMemorySizeMb); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMemorySizeMb); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["memorySizeMb"] = transformedMemorySizeMb } return transformed, nil } -func expandMemcacheInstanceNodeConfigCpuCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceNodeConfigCpuCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceNodeConfigMemorySizeMb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceNodeConfigMemorySizeMb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMemcacheParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMemcacheParameters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1135,25 +1172,25 @@ func expandMemcacheInstanceMemcacheParameters(v interface{}, d TerraformResource transformedId, err := expandMemcacheInstanceMemcacheParametersId(original["id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["id"] = transformedId } transformedParams, err := expandMemcacheInstanceMemcacheParametersParams(original["params"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedParams); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedParams); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["params"] = transformedParams } return transformed, nil } -func expandMemcacheInstanceMemcacheParametersId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMemcacheParametersId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMemcacheParametersParams(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandMemcacheInstanceMemcacheParametersParams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1164,7 +1201,7 @@ func expandMemcacheInstanceMemcacheParametersParams(v interface{}, d TerraformRe return m, nil } -func expandMemcacheInstanceMaintenancePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1176,47 +1213,47 @@ func expandMemcacheInstanceMaintenancePolicy(v interface{}, d TerraformResourceD transformedCreateTime, err := expandMemcacheInstanceMaintenancePolicyCreateTime(original["create_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCreateTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCreateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["createTime"] = transformedCreateTime } transformedUpdateTime, err := expandMemcacheInstanceMaintenancePolicyUpdateTime(original["update_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUpdateTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUpdateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["updateTime"] = transformedUpdateTime } transformedDescription, err := expandMemcacheInstanceMaintenancePolicyDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedWeeklyMaintenanceWindow, err := expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(original["weekly_maintenance_window"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeeklyMaintenanceWindow); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeeklyMaintenanceWindow); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weeklyMaintenanceWindow"] = transformedWeeklyMaintenanceWindow } return transformed, nil } -func expandMemcacheInstanceMaintenancePolicyCreateTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyCreateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMaintenancePolicyUpdateTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyUpdateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMaintenancePolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1229,14 +1266,14 @@ func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{ transformedDay, err := expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(original["day"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["day"] = transformedDay } transformedDuration, err := expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(original["duration"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["duration"] = transformedDuration } @@ -1252,15 +1289,15 @@ func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{ return req, nil } -func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1277,46 +1314,46 @@ func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v i transformedHours, err := expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(original["hours"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hours"] = transformedHours } transformedMinutes, err := expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minutes"] = transformedMinutes } transformedSeconds, err := expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMemcacheInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/resource_memcache_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/resource_memcache_instance_sweeper.go new file mode 100644 index 0000000000..3a95522e0b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/memcache/resource_memcache_instance_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package memcache + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MemcacheInstance", testSweepMemcacheInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMemcacheInstance(region string) error { + resourceName := "MemcacheInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://memcache.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://memcache.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/ml_engine_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/ml_engine_operation.go new file mode 100644 index 0000000000..5b7f2c12f7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/ml_engine_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package mlengine + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type MLEngineOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *MLEngineOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.MLEngineBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createMLEngineWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*MLEngineOperationWaiter, error) { + w := &MLEngineOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func MLEngineOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createMLEngineWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func MLEngineOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createMLEngineWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/resource_ml_engine_model.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/resource_ml_engine_model.go new file mode 100644 index 0000000000..43eafd1318 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/resource_ml_engine_model.go @@ -0,0 +1,448 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package mlengine + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceMLEngineModel() *schema.Resource { + return &schema.Resource{ + Create: resourceMLEngineModelCreate, + Read: resourceMLEngineModelRead, + Delete: resourceMLEngineModelDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMLEngineModelImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name specified for the model.`, + }, + "default_version": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The default version of the model. This version will be used to handle +prediction requests that do not specify a version.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name specified for the version when it was created.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The description specified for the model when it was created.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `One or more labels that you can add, to organize your models.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "online_prediction_console_logging": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, online prediction nodes send stderr and stdout streams to Stackdriver Logging`, + }, + "online_prediction_logging": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, online prediction access logs are sent to StackDriver Logging.`, + }, + "regions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of regions where the model is going to be deployed. +Currently only one region per model is supported`, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMLEngineModelCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandMLEngineModelName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandMLEngineModelDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + defaultVersionProp, err := expandMLEngineModelDefaultVersion(d.Get("default_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("default_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultVersionProp)) && (ok || !reflect.DeepEqual(v, defaultVersionProp)) { + obj["defaultVersion"] = defaultVersionProp + } + regionsProp, err := expandMLEngineModelRegions(d.Get("regions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("regions"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionsProp)) && (ok || !reflect.DeepEqual(v, regionsProp)) { + obj["regions"] = regionsProp + } + onlinePredictionLoggingProp, err := expandMLEngineModelOnlinePredictionLogging(d.Get("online_prediction_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("online_prediction_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(onlinePredictionLoggingProp)) && (ok || !reflect.DeepEqual(v, onlinePredictionLoggingProp)) { + obj["onlinePredictionLogging"] = onlinePredictionLoggingProp + } + onlinePredictionConsoleLoggingProp, err := expandMLEngineModelOnlinePredictionConsoleLogging(d.Get("online_prediction_console_logging"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("online_prediction_console_logging"); !tpgresource.IsEmptyValue(reflect.ValueOf(onlinePredictionConsoleLoggingProp)) && (ok || !reflect.DeepEqual(v, onlinePredictionConsoleLoggingProp)) { + obj["onlinePredictionConsoleLogging"] = onlinePredictionConsoleLoggingProp + } + labelsProp, err := expandMLEngineModelLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Model: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Model: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Model: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/models/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Model %q: %#v", d.Id(), res) + + return resourceMLEngineModelRead(d, meta) +} + +func resourceMLEngineModelRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Model: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MLEngineModel %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Model: %s", err) + } + + if err := d.Set("name", flattenMLEngineModelName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Model: %s", err) + } + if err := d.Set("description", flattenMLEngineModelDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Model: %s", err) + } + if err := d.Set("default_version", flattenMLEngineModelDefaultVersion(res["defaultVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Model: %s", err) + } + if err := d.Set("regions", flattenMLEngineModelRegions(res["regions"], d, config)); err != nil { + return fmt.Errorf("Error reading Model: %s", err) + } + if err := d.Set("online_prediction_logging", flattenMLEngineModelOnlinePredictionLogging(res["onlinePredictionLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading Model: %s", err) + } + if err := d.Set("online_prediction_console_logging", flattenMLEngineModelOnlinePredictionConsoleLogging(res["onlinePredictionConsoleLogging"], d, config)); err != nil { + return fmt.Errorf("Error reading Model: %s", err) + } + if err := d.Set("labels", flattenMLEngineModelLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Model: %s", err) + } + + return nil +} + +func resourceMLEngineModelDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Model: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Model %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Model") + } + + err = MLEngineOperationWaitTime( + config, res, project, "Deleting Model", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Model %q: %#v", d.Id(), res) + return nil +} + +func resourceMLEngineModelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/models/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/models/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenMLEngineModelName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenMLEngineModelDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMLEngineModelDefaultVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenMLEngineModelDefaultVersionName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenMLEngineModelDefaultVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMLEngineModelRegions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMLEngineModelOnlinePredictionLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMLEngineModelOnlinePredictionConsoleLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMLEngineModelLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandMLEngineModelName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMLEngineModelDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMLEngineModelDefaultVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandMLEngineModelDefaultVersionName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandMLEngineModelDefaultVersionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMLEngineModelRegions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMLEngineModelOnlinePredictionLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMLEngineModelOnlinePredictionConsoleLogging(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMLEngineModelLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/resource_ml_engine_model_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/resource_ml_engine_model_sweeper.go new file mode 100644 index 0000000000..fbc76ad356 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/mlengine/resource_ml_engine_model_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package mlengine + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MLEngineModel", testSweepMLEngineModel) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMLEngineModel(region string) error { + resourceName := "MLEngineModel" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://ml.googleapis.com/v1/projects/{{project}}/models", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["models"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://ml.googleapis.com/v1/projects/{{project}}/models/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_monitoring_uptime_check_ips.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_google_monitoring_uptime_check_ips.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_monitoring_uptime_check_ips.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_google_monitoring_uptime_check_ips.go index f5db89fc10..ca4808c1f9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_monitoring_uptime_check_ips.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_google_monitoring_uptime_check_ips.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package monitoring import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleMonitoringUptimeCheckIps() *schema.Resource { @@ -36,15 +40,15 @@ func DataSourceGoogleMonitoringUptimeCheckIps() *schema.Resource { } func dataSourceGoogleMonitoringUptimeCheckIpsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } url := "https://monitoring.googleapis.com/v3/uptimeCheckIps" - uptimeCheckIps, err := paginatedListRequest("", url, userAgent, config, flattenUptimeCheckIpsList) + uptimeCheckIps, err := tpgresource.PaginatedListRequest("", url, userAgent, config, flattenUptimeCheckIpsList) if err != nil { return fmt.Errorf("Error retrieving monitoring uptime check ips: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_istio_canonical_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_istio_canonical_service.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_istio_canonical_service.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_istio_canonical_service.go index 82c22000c3..3f1790d2c0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_istio_canonical_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_istio_canonical_service.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package monitoring import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_notification_channel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_notification_channel.go new file mode 100644 index 0000000000..2689ef15c0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_notification_channel.go @@ -0,0 +1,121 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package monitoring + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceMonitoringNotificationChannel() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceMonitoringNotificationChannel().Schema) + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "display_name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "type") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "labels") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "user_labels") + + return &schema.Resource{ + Read: dataSourceMonitoringNotificationChannelRead, + Schema: dsSchema, + } +} + +func dataSourceMonitoringNotificationChannelRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/notificationChannels") + if err != nil { + return err + } + + displayName := d.Get("display_name").(string) + channelType := d.Get("type").(string) + + if displayName == "" && channelType == "" { + return fmt.Errorf("At least one of display_name or type must be provided") + } + + labels, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) + if err != nil { + return err + } + + userLabels, err := expandMonitoringNotificationChannelLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } + + filters := make([]string, 0, len(labels)+2) + + if displayName != "" { + filters = append(filters, fmt.Sprintf(`display_name="%s"`, displayName)) + } + + if channelType != "" { + filters = append(filters, fmt.Sprintf(`type="%s"`, channelType)) + } + + for k, v := range labels { + filters = append(filters, fmt.Sprintf(`labels.%s="%s"`, k, v)) + } + + for k, v := range userLabels { + filters = append(filters, fmt.Sprintf(`user_labels.%s="%s"`, k, v)) + } + + filter := strings.Join(filters, " AND ") + params := map[string]string{ + "filter": filter, + } + url, err = transport_tpg.AddQueryParams(url, params) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + response, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error retrieving NotificationChannels: %s", err) + } + + var channels []interface{} + if v, ok := response["notificationChannels"]; ok { + channels = v.([]interface{}) + } + if len(channels) == 0 { + return fmt.Errorf("No NotificationChannel found using filter: %s", filter) + } + if len(channels) > 1 { + return fmt.Errorf("Found more than one 1 NotificationChannel matching specified filter: %s", filter) + } + res := channels[0].(map[string]interface{}) + + name := flattenMonitoringNotificationChannelName(res["name"], d, config).(string) + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name) + + return resourceMonitoringNotificationChannelRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service.go new file mode 100644 index 0000000000..48b88de669 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package monitoring + +import ( + "fmt" + neturl "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type monitoringServiceTypeStateSetter func(map[string]interface{}, *schema.ResourceData, interface{}) error + +// dataSourceMonitoringServiceType creates a Datasource resource for a type of service. It takes +// - schema for identifying the service, specific to the type (AppEngine moduleId) +// - list query filter to filter a specific service (type, ID) from the list of services for a parent +// - typeFlattenF for reading the service-specific schema (typeSchema) +func dataSourceMonitoringServiceType( + typeSchema map[string]*schema.Schema, + listFilter string, + typeStateSetter monitoringServiceTypeStateSetter) *schema.Resource { + + // Convert monitoring schema to ds schema + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceMonitoringService().Schema) + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + // Add schema specific to the service type + dsSchema = tpgresource.MergeSchemas(typeSchema, dsSchema) + + return &schema.Resource{ + Read: dataSourceMonitoringServiceTypeReadFromList(listFilter, typeStateSetter), + Schema: dsSchema, + } +} + +// dataSourceMonitoringServiceRead returns a ReadFunc that calls service.list with proper filters +// to identify both the type of service and underlying service resource. +// It takes the list query filter (i.e. ?filter=$listFilter) and a ReadFunc to handle reading any type-specific schema. +func dataSourceMonitoringServiceTypeReadFromList(listFilter string, typeStateSetter monitoringServiceTypeStateSetter) schema.ReadFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + filters, err := tpgresource.ReplaceVars(d, config, listFilter) + if err != nil { + return err + } + + listUrlTmpl := "{{MonitoringBasePath}}v3/projects/{{project}}/services?filter=" + neturl.QueryEscape(filters) + url, err := tpgresource.ReplaceVars(d, config, listUrlTmpl) + if err != nil { + return err + } + + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("unable to list Monitoring Service for data source: %v", err) + } + + v, ok := resp["services"] + if !ok || v == nil { + return fmt.Errorf("no Monitoring Services found for data source") + } + ls, ok := v.([]interface{}) + if !ok { + return fmt.Errorf("no Monitoring Services found for data source") + } + if len(ls) == 0 { + return fmt.Errorf("no Monitoring Services found for data source") + } + if len(ls) > 1 { + return fmt.Errorf("more than one Monitoring Services with given identifier found") + } + res := ls[0].(map[string]interface{}) + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting Service: %s", err) + } + if err := d.Set("display_name", flattenMonitoringServiceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error setting Service: %s", err) + } + if err := d.Set("telemetry", flattenMonitoringServiceTelemetry(res["telemetry"], d, config)); err != nil { + return fmt.Errorf("Error setting Service: %s", err) + } + if err := d.Set("service_id", flattenMonitoringServiceServiceId(res["name"], d, config)); err != nil { + return fmt.Errorf("Error setting Service: %s", err) + } + if err := typeStateSetter(res, d, config); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + name := flattenMonitoringServiceName(res["name"], d, config).(string) + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name) + + return nil + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_app_engine.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_app_engine.go similarity index 92% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_app_engine.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_app_engine.go index 09db702b6f..5da7588955 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_app_engine.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_app_engine.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package monitoring import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_cluster_istio.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_cluster_istio.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_cluster_istio.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_cluster_istio.go index b7f0186272..f1086e761a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_cluster_istio.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_cluster_istio.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package monitoring import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_mesh_istio.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_mesh_istio.go similarity index 95% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_mesh_istio.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_mesh_istio.go index cdbe5542f9..f8e046e1a2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_monitoring_service_mesh_istio.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/data_source_monitoring_service_mesh_istio.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package monitoring import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy.go new file mode 100644 index 0000000000..433485b0f3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy.go @@ -0,0 +1,2645 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// API does not return a value for REDUCE_NONE +func crossSeriesReducerDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return (new == "" && old == "REDUCE_NONE") || (new == "REDUCE_NONE" && old == "") +} + +func ResourceMonitoringAlertPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringAlertPolicyCreate, + Read: resourceMonitoringAlertPolicyRead, + Update: resourceMonitoringAlertPolicyUpdate, + Delete: resourceMonitoringAlertPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringAlertPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "combiner": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"AND", "OR", "AND_WITH_MATCHING_RESOURCE"}), + Description: `How to combine the results of multiple conditions to +determine if an incident should be opened. Possible values: ["AND", "OR", "AND_WITH_MATCHING_RESOURCE"]`, + }, + "conditions": { + Type: schema.TypeList, + Required: true, + Description: `A list of conditions for the policy. The conditions are combined by +AND or OR according to the combiner field. If the combined conditions +evaluate to true, then an incident is created. A policy can have from +one to six conditions.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `A short name or phrase used to identify the +condition in dashboards, notifications, and +incidents. To avoid confusion, don't use the same +display name for multiple conditions in the same +policy.`, + }, + "condition_absent": { + Type: schema.TypeList, + Optional: true, + Description: `A condition that checks that a time series +continues to receive new data points.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "duration": { + Type: schema.TypeString, + Required: true, + Description: `The amount of time that a time series must +fail to report new data to be considered +failing. Currently, only values that are a +multiple of a minute--e.g. 60s, 120s, or 300s +--are supported.`, + }, + "aggregations": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the alignment of data points in +individual time series as well as how to +combine the retrieved time series together +(such as when aggregating multiple streams +on each resource to a single stream for each +resource or when aggregating streams across +all members of a group of resources). +Multiple aggregations are applied in the +order specified.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alignment_period": { + Type: schema.TypeString, + Optional: true, + Description: `The alignment period for per-time +series alignment. If present, +alignmentPeriod must be at least +60 seconds. After per-time series +alignment, each time series will +contain data points only on the +period boundaries. If +perSeriesAligner is not specified +or equals ALIGN_NONE, then this +field is ignored. If +perSeriesAligner is specified and +does not equal ALIGN_NONE, then +this field must be defined; +otherwise an error is returned.`, + }, + "cross_series_reducer": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}), + DiffSuppressFunc: crossSeriesReducerDiffSuppress, + Description: `The approach to be used to combine +time series. Not all reducer +functions may be applied to all +time series, depending on the +metric type and the value type of +the original time series. +Reduction may change the metric +type of value type of the time +series.Time series data must be +aligned in order to perform cross- +time series reduction. If +crossSeriesReducer is specified, +then perSeriesAligner must be +specified and not equal ALIGN_NONE +and alignmentPeriod must be +specified; otherwise, an error is +returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, + }, + "group_by_fields": { + Type: schema.TypeList, + Optional: true, + Description: `The set of fields to preserve when +crossSeriesReducer is specified. +The groupByFields determine how +the time series are partitioned +into subsets prior to applying the +aggregation function. Each subset +contains time series that have the +same value for each of the +grouping fields. Each individual +time series is a member of exactly +one subset. The crossSeriesReducer +is applied to each subset of time +series. It is not possible to +reduce across different resource +types, so this field implicitly +contains resource.type. Fields not +specified in groupByFields are +aggregated away. If groupByFields +is not specified and all the time +series have the same resource +type, then the time series are +aggregated into a single output +time series. If crossSeriesReducer +is not defined, this field is +ignored.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "per_series_aligner": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}), + Description: `The approach to be used to align +individual time series. Not all +alignment functions may be applied +to all time series, depending on +the metric type and value type of +the original time series. +Alignment may change the metric +type or the value type of the time +series.Time series data must be +aligned in order to perform cross- +time series reduction. If +crossSeriesReducer is specified, +then perSeriesAligner must be +specified and not equal ALIGN_NONE +and alignmentPeriod must be +specified; otherwise, an error is +returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, + }, + }, + }, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `A filter that identifies which time series +should be compared with the threshold.The +filter is similar to the one that is +specified in the +MetricService.ListTimeSeries request (that +call is useful to verify the time series +that will be retrieved / processed) and must +specify the metric type and optionally may +contain restrictions on resource type, +resource labels, and metric labels. This +field may not exceed 2048 Unicode characters +in length.`, + }, + "trigger": { + Type: schema.TypeList, + Optional: true, + Description: `The number/percent of time series for which +the comparison must hold in order for the +condition to trigger. If unspecified, then +the condition will trigger if the comparison +is true for any of the time series that have +been identified by filter and aggregations.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + Description: `The absolute number of time series +that must fail the predicate for the +condition to be triggered.`, + }, + "percent": { + Type: schema.TypeFloat, + Optional: true, + Description: `The percentage of time series that +must fail the predicate for the +condition to be triggered.`, + }, + }, + }, + }, + }, + }, + }, + "condition_matched_log": { + Type: schema.TypeList, + Optional: true, + Description: `A condition that checks for log messages matching given constraints. +If set, no other conditions can be present.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `A logs-based filter.`, + }, + "label_extractors": { + Type: schema.TypeMap, + Optional: true, + Description: `A map from a label key to an extractor expression, which is used to +extract the value for this label key. Each entry in this map is +a specification for how data should be extracted from log entries that +match filter. Each combination of extracted values is treated as +a separate rule for the purposes of triggering notifications. +Label keys and corresponding values can be used in notifications +generated by this condition.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "condition_monitoring_query_language": { + Type: schema.TypeList, + Optional: true, + Description: `A Monitoring Query Language query that outputs a boolean stream`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "duration": { + Type: schema.TypeString, + Required: true, + Description: `The amount of time that a time series must +violate the threshold to be considered +failing. Currently, only values that are a +multiple of a minute--e.g., 0, 60, 120, or +300 seconds--are supported. If an invalid +value is given, an error will be returned. +When choosing a duration, it is useful to +keep in mind the frequency of the underlying +time series data (which may also be affected +by any alignments specified in the +aggregations field); a good duration is long +enough so that a single outlier does not +generate spurious alerts, but short enough +that unhealthy states are detected and +alerted on quickly.`, + }, + "query": { + Type: schema.TypeString, + Required: true, + Description: `Monitoring Query Language query that outputs a boolean stream.`, + }, + "evaluation_missing_data": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP", ""}), + Description: `A condition control that determines how +metric-threshold conditions are evaluated when +data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]`, + }, + "trigger": { + Type: schema.TypeList, + Optional: true, + Description: `The number/percent of time series for which +the comparison must hold in order for the +condition to trigger. If unspecified, then +the condition will trigger if the comparison +is true for any of the time series that have +been identified by filter and aggregations, +or by the ratio, if denominator_filter and +denominator_aggregations are specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + Description: `The absolute number of time series +that must fail the predicate for the +condition to be triggered.`, + }, + "percent": { + Type: schema.TypeFloat, + Optional: true, + Description: `The percentage of time series that +must fail the predicate for the +condition to be triggered.`, + }, + }, + }, + }, + }, + }, + }, + "condition_threshold": { + Type: schema.TypeList, + Optional: true, + Description: `A condition that compares a time series against a +threshold.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "comparison": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"COMPARISON_GT", "COMPARISON_GE", "COMPARISON_LT", "COMPARISON_LE", "COMPARISON_EQ", "COMPARISON_NE"}), + Description: `The comparison to apply between the time +series (indicated by filter and aggregation) +and the threshold (indicated by +threshold_value). The comparison is applied +on each time series, with the time series on +the left-hand side and the threshold on the +right-hand side. Only COMPARISON_LT and +COMPARISON_GT are supported currently. Possible values: ["COMPARISON_GT", "COMPARISON_GE", "COMPARISON_LT", "COMPARISON_LE", "COMPARISON_EQ", "COMPARISON_NE"]`, + }, + "duration": { + Type: schema.TypeString, + Required: true, + Description: `The amount of time that a time series must +violate the threshold to be considered +failing. Currently, only values that are a +multiple of a minute--e.g., 0, 60, 120, or +300 seconds--are supported. If an invalid +value is given, an error will be returned. +When choosing a duration, it is useful to +keep in mind the frequency of the underlying +time series data (which may also be affected +by any alignments specified in the +aggregations field); a good duration is long +enough so that a single outlier does not +generate spurious alerts, but short enough +that unhealthy states are detected and +alerted on quickly.`, + }, + "aggregations": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the alignment of data points in +individual time series as well as how to +combine the retrieved time series together +(such as when aggregating multiple streams +on each resource to a single stream for each +resource or when aggregating streams across +all members of a group of resources). +Multiple aggregations are applied in the +order specified.This field is similar to the +one in the MetricService.ListTimeSeries +request. It is advisable to use the +ListTimeSeries method when debugging this +field.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alignment_period": { + Type: schema.TypeString, + Optional: true, + Description: `The alignment period for per-time +series alignment. If present, +alignmentPeriod must be at least +60 seconds. After per-time series +alignment, each time series will +contain data points only on the +period boundaries. If +perSeriesAligner is not specified +or equals ALIGN_NONE, then this +field is ignored. If +perSeriesAligner is specified and +does not equal ALIGN_NONE, then +this field must be defined; +otherwise an error is returned.`, + }, + "cross_series_reducer": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}), + DiffSuppressFunc: crossSeriesReducerDiffSuppress, + Description: `The approach to be used to combine +time series. Not all reducer +functions may be applied to all +time series, depending on the +metric type and the value type of +the original time series. +Reduction may change the metric +type of value type of the time +series.Time series data must be +aligned in order to perform cross- +time series reduction. If +crossSeriesReducer is specified, +then perSeriesAligner must be +specified and not equal ALIGN_NONE +and alignmentPeriod must be +specified; otherwise, an error is +returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, + }, + "group_by_fields": { + Type: schema.TypeList, + Optional: true, + Description: `The set of fields to preserve when +crossSeriesReducer is specified. +The groupByFields determine how +the time series are partitioned +into subsets prior to applying the +aggregation function. Each subset +contains time series that have the +same value for each of the +grouping fields. Each individual +time series is a member of exactly +one subset. The crossSeriesReducer +is applied to each subset of time +series. It is not possible to +reduce across different resource +types, so this field implicitly +contains resource.type. Fields not +specified in groupByFields are +aggregated away. If groupByFields +is not specified and all the time +series have the same resource +type, then the time series are +aggregated into a single output +time series. If crossSeriesReducer +is not defined, this field is +ignored.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "per_series_aligner": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}), + Description: `The approach to be used to align +individual time series. Not all +alignment functions may be applied +to all time series, depending on +the metric type and value type of +the original time series. +Alignment may change the metric +type or the value type of the time +series.Time series data must be +aligned in order to perform cross- +time series reduction. If +crossSeriesReducer is specified, +then perSeriesAligner must be +specified and not equal ALIGN_NONE +and alignmentPeriod must be +specified; otherwise, an error is +returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, + }, + }, + }, + }, + "denominator_aggregations": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the alignment of data points in +individual time series selected by +denominatorFilter as well as how to combine +the retrieved time series together (such as +when aggregating multiple streams on each +resource to a single stream for each +resource or when aggregating streams across +all members of a group of resources).When +computing ratios, the aggregations and +denominator_aggregations fields must use the +same alignment period and produce time +series that have the same periodicity and +labels.This field is similar to the one in +the MetricService.ListTimeSeries request. It +is advisable to use the ListTimeSeries +method when debugging this field.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alignment_period": { + Type: schema.TypeString, + Optional: true, + Description: `The alignment period for per-time +series alignment. If present, +alignmentPeriod must be at least +60 seconds. After per-time series +alignment, each time series will +contain data points only on the +period boundaries. If +perSeriesAligner is not specified +or equals ALIGN_NONE, then this +field is ignored. If +perSeriesAligner is specified and +does not equal ALIGN_NONE, then +this field must be defined; +otherwise an error is returned.`, + }, + "cross_series_reducer": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}), + DiffSuppressFunc: crossSeriesReducerDiffSuppress, + Description: `The approach to be used to combine +time series. Not all reducer +functions may be applied to all +time series, depending on the +metric type and the value type of +the original time series. +Reduction may change the metric +type of value type of the time +series.Time series data must be +aligned in order to perform cross- +time series reduction. If +crossSeriesReducer is specified, +then perSeriesAligner must be +specified and not equal ALIGN_NONE +and alignmentPeriod must be +specified; otherwise, an error is +returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, + }, + "group_by_fields": { + Type: schema.TypeList, + Optional: true, + Description: `The set of fields to preserve when +crossSeriesReducer is specified. +The groupByFields determine how +the time series are partitioned +into subsets prior to applying the +aggregation function. Each subset +contains time series that have the +same value for each of the +grouping fields. Each individual +time series is a member of exactly +one subset. The crossSeriesReducer +is applied to each subset of time +series. It is not possible to +reduce across different resource +types, so this field implicitly +contains resource.type. Fields not +specified in groupByFields are +aggregated away. If groupByFields +is not specified and all the time +series have the same resource +type, then the time series are +aggregated into a single output +time series. If crossSeriesReducer +is not defined, this field is +ignored.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "per_series_aligner": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}), + Description: `The approach to be used to align +individual time series. Not all +alignment functions may be applied +to all time series, depending on +the metric type and value type of +the original time series. +Alignment may change the metric +type or the value type of the time +series.Time series data must be +aligned in order to perform cross- +time series reduction. If +crossSeriesReducer is specified, +then perSeriesAligner must be +specified and not equal ALIGN_NONE +and alignmentPeriod must be +specified; otherwise, an error is +returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, + }, + }, + }, + }, + "denominator_filter": { + Type: schema.TypeString, + Optional: true, + Description: `A filter that identifies a time series that +should be used as the denominator of a ratio +that will be compared with the threshold. If +a denominator_filter is specified, the time +series specified by the filter field will be +used as the numerator.The filter is similar +to the one that is specified in the +MetricService.ListTimeSeries request (that +call is useful to verify the time series +that will be retrieved / processed) and must +specify the metric type and optionally may +contain restrictions on resource type, +resource labels, and metric labels. This +field may not exceed 2048 Unicode characters +in length.`, + }, + "evaluation_missing_data": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP", ""}), + Description: `A condition control that determines how +metric-threshold conditions are evaluated when +data stops arriving. Possible values: ["EVALUATION_MISSING_DATA_INACTIVE", "EVALUATION_MISSING_DATA_ACTIVE", "EVALUATION_MISSING_DATA_NO_OP"]`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: `A filter that identifies which time series +should be compared with the threshold.The +filter is similar to the one that is +specified in the +MetricService.ListTimeSeries request (that +call is useful to verify the time series +that will be retrieved / processed) and must +specify the metric type and optionally may +contain restrictions on resource type, +resource labels, and metric labels. This +field may not exceed 2048 Unicode characters +in length.`, + }, + "forecast_options": { + Type: schema.TypeList, + Optional: true, + Description: `When this field is present, the 'MetricThreshold' +condition forecasts whether the time series is +predicted to violate the threshold within the +'forecastHorizon'. When this field is not set, the +'MetricThreshold' tests the current value of the +timeseries against the threshold.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "forecast_horizon": { + Type: schema.TypeString, + Required: true, + Description: `The length of time into the future to forecast +whether a timeseries will violate the threshold. +If the predicted value is found to violate the +threshold, and the violation is observed in all +forecasts made for the Configured 'duration', +then the timeseries is considered to be failing.`, + }, + }, + }, + }, + "threshold_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `A value against which to compare the time +series.`, + }, + "trigger": { + Type: schema.TypeList, + Optional: true, + Description: `The number/percent of time series for which +the comparison must hold in order for the +condition to trigger. If unspecified, then +the condition will trigger if the comparison +is true for any of the time series that have +been identified by filter and aggregations, +or by the ratio, if denominator_filter and +denominator_aggregations are specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + Description: `The absolute number of time series +that must fail the predicate for the +condition to be triggered.`, + }, + "percent": { + Type: schema.TypeFloat, + Optional: true, + Description: `The percentage of time series that +must fail the predicate for the +condition to be triggered.`, + }, + }, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique resource name for this condition. +Its syntax is: +projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] +[CONDITION_ID] is assigned by Stackdriver Monitoring when +the condition is created as part of a new or updated alerting +policy.`, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `A short name or phrase used to identify the policy in +dashboards, notifications, and incidents. To avoid confusion, don't use +the same display name for multiple policies in the same project. The +name is limited to 512 Unicode characters.`, + }, + "alert_strategy": { + Type: schema.TypeList, + Optional: true, + Description: `Control over how this alert policy's notification channels are notified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_close": { + Type: schema.TypeString, + Optional: true, + Description: `If an alert policy that was active has no data for this long, any open incidents will close.`, + }, + "notification_channel_strategy": { + Type: schema.TypeList, + Optional: true, + Description: `Control over how the notification channels in 'notification_channels' +are notified when this alert fires, on a per-channel basis.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "notification_channel_names": { + Type: schema.TypeList, + Optional: true, + Description: `The notification channels that these settings apply to. Each of these +correspond to the name field in one of the NotificationChannel objects +referenced in the notification_channels field of this AlertPolicy. The format is +'projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]'`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "renotify_interval": { + Type: schema.TypeString, + Optional: true, + Description: `The frequency at which to send reminder notifications for open incidents.`, + }, + }, + }, + }, + "notification_rate_limit": { + Type: schema.TypeList, + Optional: true, + Description: `Required for alert policies with a LogMatch condition. +This limit is not implemented for alert policies that are not log-based.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "period": { + Type: schema.TypeString, + Optional: true, + Description: `Not more than one notification per period.`, + }, + }, + }, + }, + }, + }, + }, + "documentation": { + Type: schema.TypeList, + Optional: true, + Description: `Documentation that is included with notifications and incidents related +to this policy. Best practice is for the documentation to include information +to help responders understand, mitigate, escalate, and correct the underlying +problems detected by the alerting policy. Notification channels that have +limited capacity might not show this documentation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content": { + Type: schema.TypeString, + Optional: true, + Description: `The text of the documentation, interpreted according to mimeType. +The content may not exceed 8,192 Unicode characters and may not +exceed more than 10,240 bytes when encoded in UTF-8 format, +whichever is smaller.`, + AtLeastOneOf: []string{"documentation.0.content", "documentation.0.mime_type"}, + }, + "mime_type": { + Type: schema.TypeString, + Optional: true, + Description: `The format of the content field. Presently, only the value +"text/markdown" is supported.`, + Default: "text/markdown", + AtLeastOneOf: []string{"documentation.0.content", "documentation.0.mime_type"}, + }, + }, + }, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the policy is enabled. The default is true.`, + Default: true, + }, + "notification_channels": { + Type: schema.TypeList, + Optional: true, + Description: `Identifies the notification channels to which notifications should be +sent when incidents are opened or closed or when new violations occur +on an already opened incident. Each element of this array corresponds +to the name field in each of the NotificationChannel objects that are +returned from the notificationChannels.list method. The syntax of the +entries in this field is +'projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]'`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "user_labels": { + Type: schema.TypeMap, + Optional: true, + Description: `This field is intended to be used for organizing and identifying the AlertPolicy +objects.The field can contain up to 64 entries. Each key and value is limited +to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values +can contain only lowercase letters, numerals, underscores, and dashes. Keys +must begin with a letter.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "creation_record": { + Type: schema.TypeList, + Computed: true, + Description: `A read-only record of the creation of the alerting policy. +If provided in a call to create or update, this field will +be ignored.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mutate_time": { + Type: schema.TypeString, + Computed: true, + Description: `When the change occurred.`, + }, + "mutated_by": { + Type: schema.TypeString, + Computed: true, + Description: `The email address of the user making the change.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique resource name for this policy. +Its syntax is: projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMonitoringAlertPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringAlertPolicyDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + combinerProp, err := expandMonitoringAlertPolicyCombiner(d.Get("combiner"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("combiner"); !tpgresource.IsEmptyValue(reflect.ValueOf(combinerProp)) && (ok || !reflect.DeepEqual(v, combinerProp)) { + obj["combiner"] = combinerProp + } + enabledProp, err := expandMonitoringAlertPolicyEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { + obj["enabled"] = enabledProp + } + conditionsProp, err := expandMonitoringAlertPolicyConditions(d.Get("conditions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("conditions"); !tpgresource.IsEmptyValue(reflect.ValueOf(conditionsProp)) && (ok || !reflect.DeepEqual(v, conditionsProp)) { + obj["conditions"] = conditionsProp + } + notificationChannelsProp, err := expandMonitoringAlertPolicyNotificationChannels(d.Get("notification_channels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_channels"); !tpgresource.IsEmptyValue(reflect.ValueOf(notificationChannelsProp)) && (ok || !reflect.DeepEqual(v, notificationChannelsProp)) { + obj["notificationChannels"] = notificationChannelsProp + } + alertStrategyProp, err := expandMonitoringAlertPolicyAlertStrategy(d.Get("alert_strategy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("alert_strategy"); !tpgresource.IsEmptyValue(reflect.ValueOf(alertStrategyProp)) && (ok || !reflect.DeepEqual(v, alertStrategyProp)) { + obj["alertStrategy"] = alertStrategyProp + } + userLabelsProp, err := expandMonitoringAlertPolicyUserLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(userLabelsProp)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) { + obj["userLabels"] = userLabelsProp + } + documentationProp, err := expandMonitoringAlertPolicyDocumentation(d.Get("documentation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("documentation"); !tpgresource.IsEmptyValue(reflect.ValueOf(documentationProp)) && (ok || !reflect.DeepEqual(v, documentationProp)) { + obj["documentation"] = documentationProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "alertPolicy/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/alertPolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AlertPolicy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AlertPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return fmt.Errorf("Error creating AlertPolicy: %s", err) + } + if err := d.Set("name", flattenMonitoringAlertPolicyName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating AlertPolicy %q: %#v", d.Id(), res) + + return resourceMonitoringAlertPolicyRead(d, meta) +} + +func resourceMonitoringAlertPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AlertPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringAlertPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + + if err := d.Set("name", flattenMonitoringAlertPolicyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("display_name", flattenMonitoringAlertPolicyDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("combiner", flattenMonitoringAlertPolicyCombiner(res["combiner"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("creation_record", flattenMonitoringAlertPolicyCreationRecord(res["creationRecord"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("enabled", flattenMonitoringAlertPolicyEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("conditions", flattenMonitoringAlertPolicyConditions(res["conditions"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("notification_channels", flattenMonitoringAlertPolicyNotificationChannels(res["notificationChannels"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("alert_strategy", flattenMonitoringAlertPolicyAlertStrategy(res["alertStrategy"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("user_labels", flattenMonitoringAlertPolicyUserLabels(res["userLabels"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + if err := d.Set("documentation", flattenMonitoringAlertPolicyDocumentation(res["documentation"], d, config)); err != nil { + return fmt.Errorf("Error reading AlertPolicy: %s", err) + } + + return nil +} + +func resourceMonitoringAlertPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AlertPolicy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringAlertPolicyDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + combinerProp, err := expandMonitoringAlertPolicyCombiner(d.Get("combiner"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("combiner"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, combinerProp)) { + obj["combiner"] = combinerProp + } + enabledProp, err := expandMonitoringAlertPolicyEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { + obj["enabled"] = enabledProp + } + conditionsProp, err := expandMonitoringAlertPolicyConditions(d.Get("conditions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("conditions"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, conditionsProp)) { + obj["conditions"] = conditionsProp + } + notificationChannelsProp, err := expandMonitoringAlertPolicyNotificationChannels(d.Get("notification_channels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("notification_channels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, notificationChannelsProp)) { + obj["notificationChannels"] = notificationChannelsProp + } + alertStrategyProp, err := expandMonitoringAlertPolicyAlertStrategy(d.Get("alert_strategy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("alert_strategy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, alertStrategyProp)) { + obj["alertStrategy"] = alertStrategyProp + } + userLabelsProp, err := expandMonitoringAlertPolicyUserLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) { + obj["userLabels"] = userLabelsProp + } + documentationProp, err := expandMonitoringAlertPolicyDocumentation(d.Get("documentation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("documentation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, documentationProp)) { + obj["documentation"] = documentationProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "alertPolicy/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AlertPolicy %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("combiner") { + updateMask = append(updateMask, "combiner") + } + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + + if d.HasChange("conditions") { + updateMask = append(updateMask, "conditions") + } + + if d.HasChange("notification_channels") { + updateMask = append(updateMask, "notificationChannels") + } + + if d.HasChange("alert_strategy") { + updateMask = append(updateMask, "alertStrategy") + } + + if d.HasChange("user_labels") { + updateMask = append(updateMask, "userLabels") + } + + if d.HasChange("documentation") { + updateMask = append(updateMask, "documentation") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + + if err != nil { + return fmt.Errorf("Error updating AlertPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AlertPolicy %q: %#v", d.Id(), res) + } + + return resourceMonitoringAlertPolicyRead(d, meta) +} + +func resourceMonitoringAlertPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AlertPolicy: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "alertPolicy/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AlertPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AlertPolicy") + } + + log.Printf("[DEBUG] Finished deleting AlertPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringAlertPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenMonitoringAlertPolicyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyCombiner(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyCreationRecord(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mutate_time"] = + flattenMonitoringAlertPolicyCreationRecordMutateTime(original["mutateTime"], d, config) + transformed["mutated_by"] = + flattenMonitoringAlertPolicyCreationRecordMutatedBy(original["mutatedBy"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyCreationRecordMutateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyCreationRecordMutatedBy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "condition_absent": flattenMonitoringAlertPolicyConditionsConditionAbsent(original["conditionAbsent"], d, config), + "name": flattenMonitoringAlertPolicyConditionsName(original["name"], d, config), + "condition_monitoring_query_language": flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(original["conditionMonitoringQueryLanguage"], d, config), + "condition_threshold": flattenMonitoringAlertPolicyConditionsConditionThreshold(original["conditionThreshold"], d, config), + "display_name": flattenMonitoringAlertPolicyConditionsDisplayName(original["displayName"], d, config), + "condition_matched_log": flattenMonitoringAlertPolicyConditionsConditionMatchedLog(original["conditionMatchedLog"], d, config), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyConditionsConditionAbsent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["aggregations"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentAggregations(original["aggregations"], d, config) + transformed["trigger"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentTrigger(original["trigger"], d, config) + transformed["duration"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentDuration(original["duration"], d, config) + transformed["filter"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentFilter(original["filter"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), + "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(original["groupByFields"], d, config), + "alignment_period": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), + "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentTrigger(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["percent"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(original["percent"], d, config) + transformed["count"] = + flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(original["count"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionAbsentFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["query"] = + flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(original["query"], d, config) + transformed["duration"] = + flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(original["duration"], d, config) + transformed["trigger"] = + flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(original["trigger"], d, config) + transformed["evaluation_missing_data"] = + flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(original["evaluationMissingData"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["percent"] = + flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(original["percent"], d, config) + transformed["count"] = + flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(original["count"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["threshold_value"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(original["thresholdValue"], d, config) + transformed["denominator_filter"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(original["denominatorFilter"], d, config) + transformed["denominator_aggregations"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(original["denominatorAggregations"], d, config) + transformed["duration"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdDuration(original["duration"], d, config) + transformed["forecast_options"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdForecastOptions(original["forecastOptions"], d, config) + transformed["comparison"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdComparison(original["comparison"], d, config) + transformed["trigger"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdTrigger(original["trigger"], d, config) + transformed["aggregations"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) + transformed["filter"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) + transformed["evaluation_missing_data"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(original["evaluationMissingData"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), + "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(original["groupByFields"], d, config), + "alignment_period": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), + "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdForecastOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["forecast_horizon"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdForecastOptionsForecastHorizon(original["forecastHorizon"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdForecastOptionsForecastHorizon(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdComparison(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdTrigger(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["percent"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(original["percent"], d, config) + transformed["count"] = + flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(original["count"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), + "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(original["groupByFields"], d, config), + "alignment_period": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), + "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionMatchedLog(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["filter"] = + flattenMonitoringAlertPolicyConditionsConditionMatchedLogFilter(original["filter"], d, config) + transformed["label_extractors"] = + flattenMonitoringAlertPolicyConditionsConditionMatchedLogLabelExtractors(original["labelExtractors"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyConditionsConditionMatchedLogFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyConditionsConditionMatchedLogLabelExtractors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyNotificationChannels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyAlertStrategy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["notification_rate_limit"] = + flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimit(original["notificationRateLimit"], d, config) + transformed["auto_close"] = + flattenMonitoringAlertPolicyAlertStrategyAutoClose(original["autoClose"], d, config) + transformed["notification_channel_strategy"] = + flattenMonitoringAlertPolicyAlertStrategyNotificationChannelStrategy(original["notificationChannelStrategy"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["period"] = + flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimitPeriod(original["period"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyAlertStrategyNotificationRateLimitPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyAlertStrategyAutoClose(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyAlertStrategyNotificationChannelStrategy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "notification_channel_names": flattenMonitoringAlertPolicyAlertStrategyNotificationChannelStrategyNotificationChannelNames(original["notificationChannelNames"], d, config), + "renotify_interval": flattenMonitoringAlertPolicyAlertStrategyNotificationChannelStrategyRenotifyInterval(original["renotifyInterval"], d, config), + }) + } + return transformed +} +func flattenMonitoringAlertPolicyAlertStrategyNotificationChannelStrategyNotificationChannelNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyAlertStrategyNotificationChannelStrategyRenotifyInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyUserLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyDocumentation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["content"] = + flattenMonitoringAlertPolicyDocumentationContent(original["content"], d, config) + transformed["mime_type"] = + flattenMonitoringAlertPolicyDocumentationMimeType(original["mimeType"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringAlertPolicyDocumentationContent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringAlertPolicyDocumentationMimeType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandMonitoringAlertPolicyDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyCombiner(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConditionAbsent, err := expandMonitoringAlertPolicyConditionsConditionAbsent(original["condition_absent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditionAbsent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditionAbsent"] = transformedConditionAbsent + } + + transformedName, err := expandMonitoringAlertPolicyConditionsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedConditionMonitoringQueryLanguage, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(original["condition_monitoring_query_language"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditionMonitoringQueryLanguage); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditionMonitoringQueryLanguage"] = transformedConditionMonitoringQueryLanguage + } + + transformedConditionThreshold, err := expandMonitoringAlertPolicyConditionsConditionThreshold(original["condition_threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditionThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditionThreshold"] = transformedConditionThreshold + } + + transformedDisplayName, err := expandMonitoringAlertPolicyConditionsDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + transformedConditionMatchedLog, err := expandMonitoringAlertPolicyConditionsConditionMatchedLog(original["condition_matched_log"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConditionMatchedLog); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["conditionMatchedLog"] = transformedConditionMatchedLog + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAggregations, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregations(original["aggregations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAggregations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["aggregations"] = transformedAggregations + } + + transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionAbsentTrigger(original["trigger"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["trigger"] = transformedTrigger + } + + transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionAbsentDuration(original["duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["duration"] = transformedDuration + } + + transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionAbsentFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["perSeriesAligner"] = transformedPerSeriesAligner + } + + transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(original["group_by_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupByFields"] = transformedGroupByFields + } + + transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(original["alignment_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["alignmentPeriod"] = transformedAlignmentPeriod + } + + transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["crossSeriesReducer"] = transformedCrossSeriesReducer + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentTrigger(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(original["percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["percent"] = transformedPercent + } + + transformedCount, err := expandMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(original["count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["count"] = transformedCount + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionAbsentFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedQuery, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(original["query"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["query"] = transformedQuery + } + + transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(original["duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["duration"] = transformedDuration + } + + transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(original["trigger"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["trigger"] = transformedTrigger + } + + transformedEvaluationMissingData, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(original["evaluation_missing_data"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEvaluationMissingData); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["evaluationMissingData"] = transformedEvaluationMissingData + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(original["percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["percent"] = transformedPercent + } + + transformedCount, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(original["count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["count"] = transformedCount + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageEvaluationMissingData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedThresholdValue, err := expandMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(original["threshold_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedThresholdValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["thresholdValue"] = transformedThresholdValue + } + + transformedDenominatorFilter, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(original["denominator_filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDenominatorFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["denominatorFilter"] = transformedDenominatorFilter + } + + transformedDenominatorAggregations, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(original["denominator_aggregations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDenominatorAggregations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["denominatorAggregations"] = transformedDenominatorAggregations + } + + transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionThresholdDuration(original["duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["duration"] = transformedDuration + } + + transformedForecastOptions, err := expandMonitoringAlertPolicyConditionsConditionThresholdForecastOptions(original["forecast_options"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedForecastOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["forecastOptions"] = transformedForecastOptions + } + + transformedComparison, err := expandMonitoringAlertPolicyConditionsConditionThresholdComparison(original["comparison"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedComparison); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["comparison"] = transformedComparison + } + + transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionThresholdTrigger(original["trigger"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTrigger); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["trigger"] = transformedTrigger + } + + transformedAggregations, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAggregations); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["aggregations"] = transformedAggregations + } + + transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + transformedEvaluationMissingData, err := expandMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(original["evaluation_missing_data"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEvaluationMissingData); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["evaluationMissingData"] = transformedEvaluationMissingData + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["perSeriesAligner"] = transformedPerSeriesAligner + } + + transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(original["group_by_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupByFields"] = transformedGroupByFields + } + + transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(original["alignment_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["alignmentPeriod"] = transformedAlignmentPeriod + } + + transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["crossSeriesReducer"] = transformedCrossSeriesReducer + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdForecastOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedForecastHorizon, err := expandMonitoringAlertPolicyConditionsConditionThresholdForecastOptionsForecastHorizon(original["forecast_horizon"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedForecastHorizon); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["forecastHorizon"] = transformedForecastHorizon + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdForecastOptionsForecastHorizon(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdComparison(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdTrigger(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(original["percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["percent"] = transformedPercent + } + + transformedCount, err := expandMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(original["count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["count"] = transformedCount + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["perSeriesAligner"] = transformedPerSeriesAligner + } + + transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(original["group_by_fields"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupByFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupByFields"] = transformedGroupByFields + } + + transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(original["alignment_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["alignmentPeriod"] = transformedAlignmentPeriod + } + + transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["crossSeriesReducer"] = transformedCrossSeriesReducer + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionThresholdEvaluationMissingData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMatchedLog(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionMatchedLogFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + transformedLabelExtractors, err := expandMonitoringAlertPolicyConditionsConditionMatchedLogLabelExtractors(original["label_extractors"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabelExtractors); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labelExtractors"] = transformedLabelExtractors + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMatchedLogFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyConditionsConditionMatchedLogLabelExtractors(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandMonitoringAlertPolicyNotificationChannels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyAlertStrategy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNotificationRateLimit, err := expandMonitoringAlertPolicyAlertStrategyNotificationRateLimit(original["notification_rate_limit"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNotificationRateLimit); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["notificationRateLimit"] = transformedNotificationRateLimit + } + + transformedAutoClose, err := expandMonitoringAlertPolicyAlertStrategyAutoClose(original["auto_close"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutoClose); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["autoClose"] = transformedAutoClose + } + + transformedNotificationChannelStrategy, err := expandMonitoringAlertPolicyAlertStrategyNotificationChannelStrategy(original["notification_channel_strategy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNotificationChannelStrategy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["notificationChannelStrategy"] = transformedNotificationChannelStrategy + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyAlertStrategyNotificationRateLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPeriod, err := expandMonitoringAlertPolicyAlertStrategyNotificationRateLimitPeriod(original["period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["period"] = transformedPeriod + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyAlertStrategyNotificationRateLimitPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyAlertStrategyAutoClose(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyAlertStrategyNotificationChannelStrategy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNotificationChannelNames, err := expandMonitoringAlertPolicyAlertStrategyNotificationChannelStrategyNotificationChannelNames(original["notification_channel_names"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNotificationChannelNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["notificationChannelNames"] = transformedNotificationChannelNames + } + + transformedRenotifyInterval, err := expandMonitoringAlertPolicyAlertStrategyNotificationChannelStrategyRenotifyInterval(original["renotify_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRenotifyInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["renotifyInterval"] = transformedRenotifyInterval + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringAlertPolicyAlertStrategyNotificationChannelStrategyNotificationChannelNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyAlertStrategyNotificationChannelStrategyRenotifyInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyUserLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandMonitoringAlertPolicyDocumentation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContent, err := expandMonitoringAlertPolicyDocumentationContent(original["content"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["content"] = transformedContent + } + + transformedMimeType, err := expandMonitoringAlertPolicyDocumentationMimeType(original["mime_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMimeType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mimeType"] = transformedMimeType + } + + return transformed, nil +} + +func expandMonitoringAlertPolicyDocumentationContent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringAlertPolicyDocumentationMimeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy_sweeper.go new file mode 100644 index 0000000000..b2c16a5c10 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_alert_policy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringAlertPolicy", testSweepMonitoringAlertPolicy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringAlertPolicy(region string) error { + resourceName := "MonitoringAlertPolicy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v3/projects/{{project}}/alertPolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["alertPolicies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v3/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_custom_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_custom_service.go new file mode 100644 index 0000000000..39c70133ee --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_custom_service.go @@ -0,0 +1,495 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceMonitoringService() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringServiceCreate, + Read: resourceMonitoringServiceRead, + Update: resourceMonitoringServiceUpdate, + Delete: resourceMonitoringServiceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringServiceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name used for UI elements listing this Service.`, + }, + "service_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z0-9\-]+$`), + Description: `An optional service ID to use. If not given, the server will generate a +service ID.`, + }, + "telemetry": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for how to query telemetry on a Service.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_name": { + Type: schema.TypeString, + Optional: true, + Description: `The full name of the resource that defines this service. +Formatted as described in +https://cloud.google.com/apis/design/resource_names.`, + }, + }, + }, + }, + "user_labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels which have been used to annotate the service. Label keys must start +with a letter. Label keys and values may contain lowercase letters, +numbers, underscores, and dashes. Label keys and values have a maximum +length of 63 characters, and must be less than 128 bytes in size. Up to 64 +label entries may be stored. For labels which do not have a semantic value, +the empty string may be supplied for the label value.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The full resource name for this service. The syntax is: +projects/[PROJECT_ID]/services/[SERVICE_ID].`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMonitoringServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringServiceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + userLabelsProp, err := expandMonitoringServiceUserLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_labels"); ok || !reflect.DeepEqual(v, userLabelsProp) { + obj["userLabels"] = userLabelsProp + } + telemetryProp, err := expandMonitoringServiceTelemetry(d.Get("telemetry"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("telemetry"); !tpgresource.IsEmptyValue(reflect.ValueOf(telemetryProp)) && (ok || !reflect.DeepEqual(v, telemetryProp)) { + obj["telemetry"] = telemetryProp + } + nameProp, err := expandMonitoringServiceServiceId(d.Get("service_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + obj, err = resourceMonitoringServiceEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services?serviceId={{service_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Service: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("Error creating Service: %s", err) + } + if err := d.Set("name", flattenMonitoringServiceName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) + + return resourceMonitoringServiceRead(d, meta) +} + +func resourceMonitoringServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringService %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + if err := d.Set("name", flattenMonitoringServiceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("display_name", flattenMonitoringServiceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("user_labels", flattenMonitoringServiceUserLabels(res["userLabels"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("telemetry", flattenMonitoringServiceTelemetry(res["telemetry"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + if err := d.Set("service_id", flattenMonitoringServiceServiceId(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Service: %s", err) + } + + return nil +} + +func resourceMonitoringServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringServiceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + userLabelsProp, err := expandMonitoringServiceUserLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_labels"); ok || !reflect.DeepEqual(v, userLabelsProp) { + obj["userLabels"] = userLabelsProp + } + telemetryProp, err := expandMonitoringServiceTelemetry(d.Get("telemetry"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("telemetry"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, telemetryProp)) { + obj["telemetry"] = telemetryProp + } + + obj, err = resourceMonitoringServiceEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("user_labels") { + updateMask = append(updateMask, "userLabels") + } + + if d.HasChange("telemetry") { + updateMask = append(updateMask, "telemetry") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + + if err != nil { + return fmt.Errorf("Error updating Service %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) + } + + return resourceMonitoringServiceRead(d, meta) +} + +func resourceMonitoringServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Service: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Service %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Service") + } + + log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenMonitoringServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringServiceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringServiceUserLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringServiceTelemetry(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resource_name"] = + flattenMonitoringServiceTelemetryResourceName(original["resourceName"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringServiceTelemetryResourceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringServiceServiceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandMonitoringServiceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringServiceUserLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandMonitoringServiceTelemetry(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResourceName, err := expandMonitoringServiceTelemetryResourceName(original["resource_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceName"] = transformedResourceName + } + + return transformed, nil +} + +func expandMonitoringServiceTelemetryResourceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringServiceServiceId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceMonitoringServiceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Currently only CUSTOM service types can be created, but the + // custom identifier block does not actually have fields right now. + // Set to empty to indicate manually-created service type is CUSTOM. + if _, ok := obj["custom"]; !ok { + obj["custom"] = map[string]interface{}{} + } + // Name/Service ID is a query parameter only + delete(obj, "name") + + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_custom_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_custom_service_sweeper.go new file mode 100644 index 0000000000..1d9c9b226f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_custom_service_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringService", testSweepMonitoringService) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringService(region string) error { + resourceName := "MonitoringService" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v3/projects/{{project}}/services", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["services"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v3/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_dashboard.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_dashboard.go new file mode 100644 index 0000000000..1f0eca31c9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_dashboard.go @@ -0,0 +1,252 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package monitoring + +import ( + "fmt" + "reflect" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func monitoringDashboardDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + computedFields := []string{"etag", "name"} + + oldMap, err := structure.ExpandJsonFromString(old) + if err != nil { + return false + } + + newMap, err := structure.ExpandJsonFromString(new) + if err != nil { + return false + } + + for _, f := range computedFields { + delete(oldMap, f) + delete(newMap, f) + } + + return reflect.DeepEqual(oldMap, newMap) +} + +func ResourceMonitoringDashboard() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringDashboardCreate, + Read: resourceMonitoringDashboardRead, + Update: resourceMonitoringDashboardUpdate, + Delete: resourceMonitoringDashboardDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringDashboardImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "dashboard_json": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: monitoringDashboardDiffSuppress, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, + Description: `The JSON representation of a dashboard, following the format at https://cloud.google.com/monitoring/api/ref_v3/rest/v1/projects.dashboards.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMonitoringDashboardCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj, err := structure.ExpandJsonFromString(d.Get("dashboard_json").(string)) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v1/projects/{{project}}/dashboards") + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("Error creating Dashboard: %s", err) + } + + name, ok := res["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + d.SetId(name.(string)) + + return resourceMonitoringDashboardRead(d, config) +} + +func resourceMonitoringDashboardRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url := config.MonitoringBasePath + "v1/" + d.Id() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringDashboard %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting Dashboard: %s", err) + } + + str, err := structure.FlattenJsonToString(res) + if err != nil { + return fmt.Errorf("Error reading Dashboard: %s", err) + } + if err = d.Set("dashboard_json", str); err != nil { + return fmt.Errorf("Error reading Dashboard: %s", err) + } + + return nil +} + +func resourceMonitoringDashboardUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + o, n := d.GetChange("dashboard_json") + oObj, err := structure.ExpandJsonFromString(o.(string)) + if err != nil { + return err + } + nObj, err := structure.ExpandJsonFromString(n.(string)) + if err != nil { + return err + } + + nObj["etag"] = oObj["etag"] + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + url := config.MonitoringBasePath + "v1/" + d.Id() + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: nObj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("Error updating Dashboard %q: %s", d.Id(), err) + } + + return resourceMonitoringDashboardRead(d, config) +} + +func resourceMonitoringDashboardDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url := config.MonitoringBasePath + "v1/" + d.Id() + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringDashboard %q", d.Id())) + } + + return nil +} + +func resourceMonitoringDashboardImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + parts, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/dashboards/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return nil, err + } + + if err := d.Set("project", parts["project"]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/dashboards/%s", parts["project"], parts["id"])) + + return []*schema.ResourceData{d}, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_group.go new file mode 100644 index 0000000000..36bbca39e7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_group.go @@ -0,0 +1,439 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceMonitoringGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringGroupCreate, + Read: resourceMonitoringGroupRead, + Update: resourceMonitoringGroupUpdate, + Delete: resourceMonitoringGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `A user-assigned name for this group, used only for display +purposes.`, + }, + "filter": { + Type: schema.TypeString, + Required: true, + Description: `The filter used to determine which monitored resources +belong to this group.`, + }, + "is_cluster": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, the members of this group are considered to be a +cluster. The system can perform additional analysis on +groups that are clusters.`, + }, + "parent_name": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The name of the group's parent, if it has one. The format is +"projects/{project_id_or_number}/groups/{group_id}". For +groups with no parent, parentName is the empty string, "".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `A unique identifier for this group. The format is +"projects/{project_id_or_number}/groups/{group_id}".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMonitoringGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentNameProp, err := expandMonitoringGroupParentName(d.Get("parent_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentNameProp)) && (ok || !reflect.DeepEqual(v, parentNameProp)) { + obj["parentName"] = parentNameProp + } + isClusterProp, err := expandMonitoringGroupIsCluster(d.Get("is_cluster"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_cluster"); !tpgresource.IsEmptyValue(reflect.ValueOf(isClusterProp)) && (ok || !reflect.DeepEqual(v, isClusterProp)) { + obj["isCluster"] = isClusterProp + } + displayNameProp, err := expandMonitoringGroupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + filterProp, err := expandMonitoringGroupFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/groups/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/groups") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Group: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Group: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("Error creating Group: %s", err) + } + if err := d.Set("name", flattenMonitoringGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating Group %q: %#v", d.Id(), res) + + return resourceMonitoringGroupRead(d, meta) +} + +func resourceMonitoringGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Group: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringGroup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + + if err := d.Set("parent_name", flattenMonitoringGroupParentName(res["parentName"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("name", flattenMonitoringGroupName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("is_cluster", flattenMonitoringGroupIsCluster(res["isCluster"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("display_name", flattenMonitoringGroupDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + if err := d.Set("filter", flattenMonitoringGroupFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading Group: %s", err) + } + + return nil +} + +func resourceMonitoringGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Group: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + parentNameProp, err := expandMonitoringGroupParentName(d.Get("parent_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parentNameProp)) { + obj["parentName"] = parentNameProp + } + isClusterProp, err := expandMonitoringGroupIsCluster(d.Get("is_cluster"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("is_cluster"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isClusterProp)) { + obj["isCluster"] = isClusterProp + } + displayNameProp, err := expandMonitoringGroupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + filterProp, err := expandMonitoringGroupFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/groups/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Group %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + + if err != nil { + return fmt.Errorf("Error updating Group %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Group %q: %#v", d.Id(), res) + } + + return resourceMonitoringGroupRead(d, meta) +} + +func resourceMonitoringGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Group: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/groups/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Group %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Group") + } + + log.Printf("[DEBUG] Finished deleting Group %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenMonitoringGroupParentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGroupName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGroupIsCluster(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGroupDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGroupFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandMonitoringGroupParentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringGroupIsCluster(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringGroupDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringGroupFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_group_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_group_sweeper.go new file mode 100644 index 0000000000..869c2b2c27 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_group_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringGroup", testSweepMonitoringGroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringGroup(region string) error { + resourceName := "MonitoringGroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v3/projects/{{project}}/groups", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["groups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v3/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_metric_descriptor.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_metric_descriptor.go new file mode 100644 index 0000000000..17003f9f1c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_metric_descriptor.go @@ -0,0 +1,775 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceMonitoringMetricDescriptor() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringMetricDescriptorCreate, + Read: resourceMonitoringMetricDescriptorRead, + Update: resourceMonitoringMetricDescriptorUpdate, + Delete: resourceMonitoringMetricDescriptorDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringMetricDescriptorImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A detailed description of the metric, which can be used in documentation.`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example "Request count".`, + }, + "metric_kind": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"METRIC_KIND_UNSPECIFIED", "GAUGE", "DELTA", "CUMULATIVE"}), + Description: `Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metricKind and valueType might not be supported. Possible values: ["METRIC_KIND_UNSPECIFIED", "GAUGE", "DELTA", "CUMULATIVE"]`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The metric type, including its DNS name prefix. The type is not URL-encoded. All service defined metrics must be prefixed with the service name, in the format of {service name}/{relative metric name}, such as cloudsql.googleapis.com/database/cpu/utilization. The relative metric name must have only upper and lower-case letters, digits, '/' and underscores '_' are allowed. Additionally, the maximum number of characters allowed for the relative_metric_name is 100. All user-defined metric types have the DNS name custom.googleapis.com, external.googleapis.com, or logging.googleapis.com/user/.`, + }, + "value_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION"}), + Description: `Whether the measurement is an integer, a floating-point number, etc. Some combinations of metricKind and valueType might not be supported. Possible values: ["BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION"]`, + }, + "labels": { + Type: schema.TypeSet, + Optional: true, + Description: `The set of labels that can be used to describe a specific instance of this metric type. In order to delete a label, the entire resource must be deleted, then created with the desired labels.`, + Elem: monitoringMetricDescriptorLabelsSchema(), + // Default schema.HashSchema is used. + }, + "launch_stage": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED", ""}), + Description: `The launch stage of the metric definition. Possible values: ["LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, + }, + "metadata": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Metadata which can be used to guide usage of the metric.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ingest_delay": { + Type: schema.TypeString, + Optional: true, + Description: `The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors. In '[duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration)'.`, + AtLeastOneOf: []string{"metadata.0.sample_period", "metadata.0.ingest_delay"}, + }, + "sample_period": { + Type: schema.TypeString, + Optional: true, + Description: `The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period. In '[duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration)'.`, + AtLeastOneOf: []string{"metadata.0.sample_period", "metadata.0.ingest_delay"}, + }, + }, + }, + }, + "unit": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The units in which the metric value is reported. It is only applicable if the +valueType is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of +the stored metric values. + +Different systems may scale the values to be more easily displayed (so a value of +0.02KBy might be displayed as 20By, and a value of 3523KBy might be displayed as +3.5MBy). However, if the unit is KBy, then the value of the metric is always in +thousands of bytes, no matter how it may be displayed. + +If you want a custom metric to record the exact number of CPU-seconds used by a job, +you can create an INT64 CUMULATIVE metric whose unit is s{CPU} (or equivalently +1s{CPU} or just s). If the job uses 12,005 CPU-seconds, then the value is written as +12005. + +Alternatively, if you want a custom metric to record data in a more granular way, you +can create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write the value +12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024). +The supported units are a subset of The Unified Code for Units of Measure standard. +More info can be found in the API documentation +(https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors).`, + }, + "monitored_resource_types": { + Type: schema.TypeSet, + Computed: true, + Description: `If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here. This field allows time series to be associated with the intersection of this metric type and the monitored resource types in this list.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the metric descriptor.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func monitoringMetricDescriptorLabelsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `The key for this label. The key must not exceed 100 characters. The first character of the key must be an upper- or lower-case letter, the remaining characters must be letters, digits or underscores, and the key must match the regular expression [a-zA-Z][a-zA-Z0-9_]*`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description for the label.`, + }, + "value_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"STRING", "BOOL", "INT64", ""}), + Description: `The type of data that can be assigned to the label. Default value: "STRING" Possible values: ["STRING", "BOOL", "INT64"]`, + Default: "STRING", + }, + }, + } +} + +func resourceMonitoringMetricDescriptorCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + typeProp, err := expandMonitoringMetricDescriptorType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + labelsProp, err := expandMonitoringMetricDescriptorLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + metricKindProp, err := expandMonitoringMetricDescriptorMetricKind(d.Get("metric_kind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metric_kind"); !tpgresource.IsEmptyValue(reflect.ValueOf(metricKindProp)) && (ok || !reflect.DeepEqual(v, metricKindProp)) { + obj["metricKind"] = metricKindProp + } + valueTypeProp, err := expandMonitoringMetricDescriptorValueType(d.Get("value_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("value_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(valueTypeProp)) && (ok || !reflect.DeepEqual(v, valueTypeProp)) { + obj["valueType"] = valueTypeProp + } + unitProp, err := expandMonitoringMetricDescriptorUnit(d.Get("unit"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("unit"); !tpgresource.IsEmptyValue(reflect.ValueOf(unitProp)) && (ok || !reflect.DeepEqual(v, unitProp)) { + obj["unit"] = unitProp + } + descriptionProp, err := expandMonitoringMetricDescriptorDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandMonitoringMetricDescriptorDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + metadataProp, err := expandMonitoringMetricDescriptorMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + launchStageProp, err := expandMonitoringMetricDescriptorLaunchStage(d.Get("launch_stage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("launch_stage"); !tpgresource.IsEmptyValue(reflect.ValueOf(launchStageProp)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { + obj["launchStage"] = launchStageProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/metricDescriptors") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new MetricDescriptor: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("Error creating MetricDescriptor: %s", err) + } + if err := d.Set("name", flattenMonitoringMetricDescriptorName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = transport_tpg.PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating MetricDescriptor", d.Timeout(schema.TimeoutCreate), 20) + if err != nil { + return fmt.Errorf("Error waiting to create MetricDescriptor: %s", err) + } + + log.Printf("[DEBUG] Finished creating MetricDescriptor %q: %#v", d.Id(), res) + + return resourceMonitoringMetricDescriptorRead(d, meta) +} + +func resourceMonitoringMetricDescriptorPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return res, err + } + return res, nil + } +} + +func resourceMonitoringMetricDescriptorRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringMetricDescriptor %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + + if err := d.Set("name", flattenMonitoringMetricDescriptorName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + if err := d.Set("type", flattenMonitoringMetricDescriptorType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + if err := d.Set("labels", flattenMonitoringMetricDescriptorLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + if err := d.Set("metric_kind", flattenMonitoringMetricDescriptorMetricKind(res["metricKind"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + if err := d.Set("value_type", flattenMonitoringMetricDescriptorValueType(res["valueType"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + if err := d.Set("unit", flattenMonitoringMetricDescriptorUnit(res["unit"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + if err := d.Set("description", flattenMonitoringMetricDescriptorDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + if err := d.Set("display_name", flattenMonitoringMetricDescriptorDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + if err := d.Set("monitored_resource_types", flattenMonitoringMetricDescriptorMonitoredResourceTypes(res["monitoredResourceTypes"], d, config)); err != nil { + return fmt.Errorf("Error reading MetricDescriptor: %s", err) + } + + return nil +} + +func resourceMonitoringMetricDescriptorUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + typeProp, err := expandMonitoringMetricDescriptorType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + labelsProp, err := expandMonitoringMetricDescriptorLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + metricKindProp, err := expandMonitoringMetricDescriptorMetricKind(d.Get("metric_kind"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metric_kind"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metricKindProp)) { + obj["metricKind"] = metricKindProp + } + valueTypeProp, err := expandMonitoringMetricDescriptorValueType(d.Get("value_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("value_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, valueTypeProp)) { + obj["valueType"] = valueTypeProp + } + unitProp, err := expandMonitoringMetricDescriptorUnit(d.Get("unit"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("unit"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, unitProp)) { + obj["unit"] = unitProp + } + descriptionProp, err := expandMonitoringMetricDescriptorDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandMonitoringMetricDescriptorDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + metadataProp, err := expandMonitoringMetricDescriptorMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + launchStageProp, err := expandMonitoringMetricDescriptorLaunchStage(d.Get("launch_stage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("launch_stage"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { + obj["launchStage"] = launchStageProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/metricDescriptors") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating MetricDescriptor %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + + if err != nil { + return fmt.Errorf("Error updating MetricDescriptor %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating MetricDescriptor %q: %#v", d.Id(), res) + } + + err = transport_tpg.PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), transport_tpg.PollCheckForExistence, "Updating MetricDescriptor", d.Timeout(schema.TimeoutUpdate), 20) + if err != nil { + return err + } + + return resourceMonitoringMetricDescriptorRead(d, meta) +} + +func resourceMonitoringMetricDescriptorDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting MetricDescriptor %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "MetricDescriptor") + } + + err = transport_tpg.PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), transport_tpg.PollCheckForAbsence, "Deleting MetricDescriptor", d.Timeout(schema.TimeoutCreate), 20) + if err != nil { + return fmt.Errorf("Error waiting to delete MetricDescriptor: %s", err) + } + + log.Printf("[DEBUG] Finished deleting MetricDescriptor %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringMetricDescriptorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenMonitoringMetricDescriptorName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(monitoringMetricDescriptorLabelsSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "key": flattenMonitoringMetricDescriptorLabelsKey(original["key"], d, config), + "value_type": flattenMonitoringMetricDescriptorLabelsValueType(original["valueType"], d, config), + "description": flattenMonitoringMetricDescriptorLabelsDescription(original["description"], d, config), + }) + } + return transformed +} +func flattenMonitoringMetricDescriptorLabelsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorLabelsValueType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "STRING" + } + + return v +} + +func flattenMonitoringMetricDescriptorLabelsDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorMetricKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorValueType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorUnit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringMetricDescriptorMonitoredResourceTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func expandMonitoringMetricDescriptorType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKey, err := expandMonitoringMetricDescriptorLabelsKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedValueType, err := expandMonitoringMetricDescriptorLabelsValueType(original["value_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValueType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["valueType"] = transformedValueType + } + + transformedDescription, err := expandMonitoringMetricDescriptorLabelsDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringMetricDescriptorLabelsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorLabelsValueType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorLabelsDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorMetricKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorValueType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorUnit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSamplePeriod, err := expandMonitoringMetricDescriptorMetadataSamplePeriod(original["sample_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSamplePeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["samplePeriod"] = transformedSamplePeriod + } + + transformedIngestDelay, err := expandMonitoringMetricDescriptorMetadataIngestDelay(original["ingest_delay"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIngestDelay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ingestDelay"] = transformedIngestDelay + } + + return transformed, nil +} + +func expandMonitoringMetricDescriptorMetadataSamplePeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorMetadataIngestDelay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringMetricDescriptorLaunchStage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_metric_descriptor_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_metric_descriptor_sweeper.go new file mode 100644 index 0000000000..0e08540680 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_metric_descriptor_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringMetricDescriptor", testSweepMonitoringMetricDescriptor) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringMetricDescriptor(region string) error { + resourceName := "MonitoringMetricDescriptor" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v3/projects/{{project}}/metricDescriptors", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["metricDescriptors"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v3/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_monitored_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_monitored_project.go new file mode 100644 index 0000000000..cc958efe37 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_monitored_project.go @@ -0,0 +1,412 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceMonitoringMonitoredProject() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringMonitoredProjectCreate, + Read: resourceMonitoringMonitoredProjectRead, + Delete: resourceMonitoringMonitoredProjectDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringMonitoredProjectImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 1, + + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceMonitoringMonitoredProjectResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceMonitoringMonitoredProjectUpgradeV0, + Version: 0, + }, + }, + + Schema: map[string]*schema.Schema{ + "metrics_scope": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `Required. The resource name of the existing Metrics Scope that will monitor this project. Example: locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `Immutable. The resource name of the 'MonitoredProject'. On input, the resource name includes the scoping project ID and monitored project ID. On output, it contains the equivalent project numbers. Example: 'locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}'`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time when this 'MonitoredProject' was created.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMonitoringMonitoredProjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedMonitoringMonitoredProjectName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + obj, err = resourceMonitoringMonitoredProjectEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v1/locations/global/metricsScopes/{{metrics_scope}}/projects") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new MonitoredProject: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringPermissionError}, + }) + if err != nil { + return fmt.Errorf("Error creating MonitoredProject: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating MonitoredProject %q: %#v", d.Id(), res) + + return resourceMonitoringMonitoredProjectRead(d, meta) +} + +func resourceMonitoringMonitoredProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v1/locations/global/metricsScopes/{{metrics_scope}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + name := d.Get("name").(string) + name = tpgresource.GetResourceNameFromSelfLink(name) + d.Set("name", name) + metricsScope := d.Get("metrics_scope").(string) + metricsScope = tpgresource.GetResourceNameFromSelfLink(metricsScope) + d.Set("metrics_scope", metricsScope) + url, err = tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v1/locations/global/metricsScopes/{{metrics_scope}}") + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringPermissionError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringMonitoredProject %q", d.Id())) + } + + res, err = flattenNestedMonitoringMonitoredProject(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing MonitoringMonitoredProject because it couldn't be matched.") + d.SetId("") + return nil + } + + res, err = resourceMonitoringMonitoredProjectDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing MonitoringMonitoredProject because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenNestedMonitoringMonitoredProjectName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading MonitoredProject: %s", err) + } + if err := d.Set("create_time", flattenNestedMonitoringMonitoredProjectCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading MonitoredProject: %s", err) + } + + return nil +} + +func resourceMonitoringMonitoredProjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v1/locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting MonitoredProject %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringPermissionError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "MonitoredProject") + } + + log.Printf("[DEBUG] Finished deleting MonitoredProject %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringMonitoredProjectImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + name := d.Get("name").(string) + name = tpgresource.GetResourceNameFromSelfLink(name) + d.Set("name", name) + metricsScope := d.Get("metrics_scope").(string) + metricsScope = tpgresource.GetResourceNameFromSelfLink(metricsScope) + d.Set("metrics_scope", metricsScope) + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "locations/global/metricsScopes/(?P[^/]+)/projects/(?P[^/]+)", + "v1/locations/global/metricsScopes/(?P[^/]+)/projects/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedMonitoringMonitoredProjectName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedMonitoringMonitoredProjectCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedMonitoringMonitoredProjectName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceMonitoringMonitoredProjectEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + name := d.Get("name").(string) + name = tpgresource.GetResourceNameFromSelfLink(name) + d.Set("name", name) + metricsScope := d.Get("metrics_scope").(string) + metricsScope = tpgresource.GetResourceNameFromSelfLink(metricsScope) + d.Set("metrics_scope", metricsScope) + obj["name"] = fmt.Sprintf("locations/global/metricsScopes/%s/projects/%s", metricsScope, name) + return obj, nil +} + +func flattenNestedMonitoringMonitoredProject(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["monitoredProjects"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value monitoredProjects. Actual value: %v", v) + } + + _, item, err := resourceMonitoringMonitoredProjectFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceMonitoringMonitoredProjectFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedMonitoringMonitoredProjectName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedMonitoringMonitoredProjectName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + // Decode list item before comparing. + item, err := resourceMonitoringMonitoredProjectDecoder(d, meta, item) + if err != nil { + return -1, nil, err + } + + itemName := flattenNestedMonitoringMonitoredProjectName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} +func resourceMonitoringMonitoredProjectDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + name := res["name"].(string) + name = tpgresource.GetResourceNameFromSelfLink(name) + if name != "" { + project, err := config.NewResourceManagerClient(config.UserAgent).Projects.Get(name).Do() + if err != nil { + return nil, err + } + res["name"] = project.ProjectId + } + return res, nil +} + +func resourceMonitoringMonitoredProjectResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metrics_scope": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `Required. The resource name of the existing Metrics Scope that will monitor this project. Example: locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `Immutable. The resource name of the 'MonitoredProject'. On input, the resource name includes the scoping project ID and monitored project ID. On output, it contains the equivalent project numbers. Example: 'locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}'`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time when this 'MonitoredProject' was created.`, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceMonitoringMonitoredProjectUpgradeV0(_ context.Context, rawState map[string]any, meta any) (map[string]any, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + rawState["id"] = strings.TrimPrefix(rawState["id"].(string), "v1/") + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_monitored_project_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_monitored_project_sweeper.go new file mode 100644 index 0000000000..6fe88e60c5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_monitored_project_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringMonitoredProject", testSweepMonitoringMonitoredProject) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringMonitoredProject(region string) error { + resourceName := "MonitoringMonitoredProject" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v1/locations/global/metricsScopes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["monitoredProjects"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v1/locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel.go new file mode 100644 index 0000000000..1051eda08c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel.go @@ -0,0 +1,655 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var sensitiveLabels = []string{"auth_token", "service_key", "password"} + +func sensitiveLabelCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + for _, sl := range sensitiveLabels { + mapLabel := diff.Get("labels." + sl).(string) + authLabel := diff.Get("sensitive_labels.0." + sl).(string) + if mapLabel != "" && authLabel != "" { + return fmt.Errorf("Sensitive label [%s] cannot be set in both `labels` and the `sensitive_labels` block.", sl) + } + } + return nil +} + +func ResourceMonitoringNotificationChannel() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringNotificationChannelCreate, + Read: resourceMonitoringNotificationChannelRead, + Update: resourceMonitoringNotificationChannelUpdate, + Delete: resourceMonitoringNotificationChannelDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringNotificationChannelImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + sensitiveLabelCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the notification channel. This field matches the value of the NotificationChannelDescriptor.type field. See https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannelDescriptors/list to get the list of valid values such as "email", "slack", etc...`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional human-readable description of this notification channel. This description may provide additional details, beyond the display name, for the channel. This may not exceed 1024 Unicode characters.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `An optional human-readable name for this notification channel. It is recommended that you specify a non-empty and unique name in order to make it easier to identify the channels in your project, though this is not enforced. The display name is limited to 512 Unicode characters.`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether notifications are forwarded to the described channel. This makes it possible to disable delivery of notifications to a particular channel without removing the channel from all alerting policies that reference the channel. This is a more convenient approach when the change is temporary and you want to receive notifications from the same set of alerting policies on the channel at some point in the future.`, + Default: true, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Configuration fields that define the channel and its behavior. The +permissible and required labels are specified in the +NotificationChannelDescriptor corresponding to the type field. + +Labels with sensitive data are obfuscated by the API and therefore Terraform cannot +determine if there are upstream changes to these fields. They can also be configured via +the sensitive_labels block, but cannot be configured in both places.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "sensitive_labels": { + Type: schema.TypeList, + Optional: true, + Description: `Different notification type behaviors are configured primarily using the the 'labels' field on this +resource. This block contains the labels which contain secrets or passwords so that they can be marked +sensitive and hidden from plan output. The name of the field, eg: password, will be the key +in the 'labels' map in the api request. + +Credentials may not be specified in both locations and will cause an error. Changing from one location +to a different credential configuration in the config will require an apply to update state.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auth_token": { + Type: schema.TypeString, + Optional: true, + Description: `An authorization token for a notification channel. Channel types that support this field include: slack`, + Sensitive: true, + ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, + }, + "password": { + Type: schema.TypeString, + Optional: true, + Description: `An password for a notification channel. Channel types that support this field include: webhook_basicauth`, + Sensitive: true, + ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, + }, + "service_key": { + Type: schema.TypeString, + Optional: true, + Description: `An servicekey token for a notification channel. Channel types that support this field include: pagerduty`, + Sensitive: true, + ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, + }, + }, + }, + }, + "user_labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User-supplied key/value data that does not need to conform to the corresponding NotificationChannelDescriptor's schema, unlike the labels field. This field is intended to be used for organizing and identifying the NotificationChannel objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The full REST resource name for this channel. The syntax is: +projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] +The [CHANNEL_ID] is automatically assigned by the server on creation.`, + }, + "verification_status": { + Type: schema.TypeString, + Computed: true, + Description: `Indicates whether this channel has been verified or not. On a ListNotificationChannels or GetNotificationChannel operation, this field is expected to be populated.If the value is UNVERIFIED, then it indicates that the channel is non-functioning (it both requires verification and lacks verification); otherwise, it is assumed that the channel works.If the channel is neither VERIFIED nor UNVERIFIED, it implies that the channel is of a type that does not require verification or that this specific channel has been exempted from verification because it was created prior to verification being required for channels of this type.This field cannot be modified using a standard UpdateNotificationChannel operation. To change the value of this field, you must call VerifyNotificationChannel.`, + }, + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If true, the notification channel will be deleted regardless +of its use in alert policies (the policies will be updated +to remove the channel). If false, channels that are still +referenced by an existing alerting policy will fail to be +deleted in a delete operation.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMonitoringNotificationChannelCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + typeProp, err := expandMonitoringNotificationChannelType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + userLabelsProp, err := expandMonitoringNotificationChannelUserLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(userLabelsProp)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) { + obj["userLabels"] = userLabelsProp + } + descriptionProp, err := expandMonitoringNotificationChannelDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandMonitoringNotificationChannelDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandMonitoringNotificationChannelEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { + obj["enabled"] = enabledProp + } + + obj, err = resourceMonitoringNotificationChannelEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/notifications/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/notificationChannels") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NotificationChannel: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NotificationChannel: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("Error creating NotificationChannel: %s", err) + } + if err := d.Set("name", flattenMonitoringNotificationChannelName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating NotificationChannel %q: %#v", d.Id(), res) + + return resourceMonitoringNotificationChannelRead(d, meta) +} + +func resourceMonitoringNotificationChannelRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NotificationChannel: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringNotificationChannel %q", d.Id())) + } + + res, err = resourceMonitoringNotificationChannelDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing MonitoringNotificationChannel because it no longer exists.") + d.SetId("") + return nil + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("force_delete"); !ok { + if err := d.Set("force_delete", false); err != nil { + return fmt.Errorf("Error setting force_delete: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + + if err := d.Set("labels", flattenMonitoringNotificationChannelLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + if err := d.Set("name", flattenMonitoringNotificationChannelName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + if err := d.Set("verification_status", flattenMonitoringNotificationChannelVerificationStatus(res["verificationStatus"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + if err := d.Set("type", flattenMonitoringNotificationChannelType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + if err := d.Set("user_labels", flattenMonitoringNotificationChannelUserLabels(res["userLabels"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + if err := d.Set("description", flattenMonitoringNotificationChannelDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + if err := d.Set("display_name", flattenMonitoringNotificationChannelDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + if err := d.Set("enabled", flattenMonitoringNotificationChannelEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationChannel: %s", err) + } + + return nil +} + +func resourceMonitoringNotificationChannelUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NotificationChannel: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + typeProp, err := expandMonitoringNotificationChannelType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + userLabelsProp, err := expandMonitoringNotificationChannelUserLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) { + obj["userLabels"] = userLabelsProp + } + descriptionProp, err := expandMonitoringNotificationChannelDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandMonitoringNotificationChannelDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enabledProp, err := expandMonitoringNotificationChannelEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); ok || !reflect.DeepEqual(v, enabledProp) { + obj["enabled"] = enabledProp + } + + obj, err = resourceMonitoringNotificationChannelEncoder(d, meta, obj) + if err != nil { + return err + } + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/notifications/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating NotificationChannel %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + + if err != nil { + return fmt.Errorf("Error updating NotificationChannel %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating NotificationChannel %q: %#v", d.Id(), res) + } + + return resourceMonitoringNotificationChannelRead(d, meta) +} + +func resourceMonitoringNotificationChannelDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NotificationChannel: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/notifications/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}?force={{force_delete}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting NotificationChannel %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NotificationChannel") + } + + log.Printf("[DEBUG] Finished deleting NotificationChannel %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringNotificationChannelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenMonitoringNotificationChannelLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringNotificationChannelName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringNotificationChannelVerificationStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringNotificationChannelType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringNotificationChannelUserLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringNotificationChannelDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringNotificationChannelDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringNotificationChannelEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandMonitoringNotificationChannelLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandMonitoringNotificationChannelType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringNotificationChannelUserLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandMonitoringNotificationChannelDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringNotificationChannelDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringNotificationChannelEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceMonitoringNotificationChannelEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + labelmap, ok := obj["labels"] + if !ok { + labelmap = make(map[string]string) + } + + var labels map[string]string + labels = labelmap.(map[string]string) + + for _, sl := range sensitiveLabels { + if auth, _ := d.GetOkExists("sensitive_labels.0." + sl); auth != "" { + labels[sl] = auth.(string) + } + } + + obj["labels"] = labels + + return obj, nil +} + +func resourceMonitoringNotificationChannelDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if labelmap, ok := res["labels"]; ok { + labels := labelmap.(map[string]interface{}) + for _, sl := range sensitiveLabels { + if _, apiOk := labels[sl]; apiOk { + if _, exists := d.GetOkExists("sensitive_labels.0." + sl); exists { + delete(labels, sl) + } else { + labels[sl] = d.Get("labels." + sl) + } + } + } + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel_sweeper.go new file mode 100644 index 0000000000..1e388fbd1d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_notification_channel_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringNotificationChannel", testSweepMonitoringNotificationChannel) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringNotificationChannel(region string) error { + resourceName := "MonitoringNotificationChannel" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v3/projects/{{project}}/notificationChannels", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["notificationChannels"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v3/{{name}}?force={{force_delete}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_service.go new file mode 100644 index 0000000000..0d3b1ee6d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_service.go @@ -0,0 +1,517 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceMonitoringGenericService() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringGenericServiceCreate, + Read: resourceMonitoringGenericServiceRead, + Update: resourceMonitoringGenericServiceUpdate, + Delete: resourceMonitoringGenericServiceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringGenericServiceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "service_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `An optional service ID to use. If not given, the server will generate a +service ID.`, + }, + "basic_service": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A well-known service type, defined by its service type and service labels. +Valid values of service types and services labels are described at +https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Labels that specify the resource that emits the monitoring data +which is used for SLO reporting of this 'Service'.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "service_type": { + Type: schema.TypeString, + Optional: true, + Description: `The type of service that this basic service defines, e.g. +APP_ENGINE service type`, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Name used for UI elements listing this Service.`, + }, + "user_labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels which have been used to annotate the service. Label keys must start +with a letter. Label keys and values may contain lowercase letters, +numbers, underscores, and dashes. Label keys and values have a maximum +length of 63 characters, and must be less than 128 bytes in size. Up to 64 +label entries may be stored. For labels which do not have a semantic value, +the empty string may be supplied for the label value.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The full resource name for this service. The syntax is: +projects/[PROJECT_ID]/services/[SERVICE_ID].`, + }, + "telemetry": { + Type: schema.TypeList, + Computed: true, + Description: `Configuration for how to query telemetry on a Service.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_name": { + Type: schema.TypeString, + Optional: true, + Description: `The full name of the resource that defines this service. +Formatted as described in +https://cloud.google.com/apis/design/resource_names.`, + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMonitoringGenericServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringGenericServiceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + userLabelsProp, err := expandMonitoringGenericServiceUserLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_labels"); ok || !reflect.DeepEqual(v, userLabelsProp) { + obj["userLabels"] = userLabelsProp + } + basicServiceProp, err := expandMonitoringGenericServiceBasicService(d.Get("basic_service"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("basic_service"); !tpgresource.IsEmptyValue(reflect.ValueOf(basicServiceProp)) && (ok || !reflect.DeepEqual(v, basicServiceProp)) { + obj["basicService"] = basicServiceProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services?serviceId={{service_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GenericService: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GenericService: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("Error creating GenericService: %s", err) + } + if err := d.Set("name", flattenMonitoringGenericServiceName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/services/{{service_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating GenericService %q: %#v", d.Id(), res) + + return resourceMonitoringGenericServiceRead(d, meta) +} + +func resourceMonitoringGenericServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GenericService: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringGenericService %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GenericService: %s", err) + } + + if err := d.Set("name", flattenMonitoringGenericServiceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading GenericService: %s", err) + } + if err := d.Set("display_name", flattenMonitoringGenericServiceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading GenericService: %s", err) + } + if err := d.Set("user_labels", flattenMonitoringGenericServiceUserLabels(res["userLabels"], d, config)); err != nil { + return fmt.Errorf("Error reading GenericService: %s", err) + } + if err := d.Set("telemetry", flattenMonitoringGenericServiceTelemetry(res["telemetry"], d, config)); err != nil { + return fmt.Errorf("Error reading GenericService: %s", err) + } + if err := d.Set("basic_service", flattenMonitoringGenericServiceBasicService(res["basicService"], d, config)); err != nil { + return fmt.Errorf("Error reading GenericService: %s", err) + } + + return nil +} + +func resourceMonitoringGenericServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GenericService: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringGenericServiceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + userLabelsProp, err := expandMonitoringGenericServiceUserLabels(d.Get("user_labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user_labels"); ok || !reflect.DeepEqual(v, userLabelsProp) { + obj["userLabels"] = userLabelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating GenericService %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("user_labels") { + updateMask = append(updateMask, "userLabels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + + if err != nil { + return fmt.Errorf("Error updating GenericService %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GenericService %q: %#v", d.Id(), res) + } + + return resourceMonitoringGenericServiceRead(d, meta) +} + +func resourceMonitoringGenericServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GenericService: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GenericService %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GenericService") + } + + log.Printf("[DEBUG] Finished deleting GenericService %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringGenericServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/services/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/services/{{service_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenMonitoringGenericServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGenericServiceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGenericServiceUserLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGenericServiceTelemetry(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resource_name"] = + flattenMonitoringGenericServiceTelemetryResourceName(original["resourceName"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringGenericServiceTelemetryResourceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGenericServiceBasicService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_type"] = + flattenMonitoringGenericServiceBasicServiceServiceType(original["serviceType"], d, config) + transformed["service_labels"] = + flattenMonitoringGenericServiceBasicServiceServiceLabels(original["serviceLabels"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringGenericServiceBasicServiceServiceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringGenericServiceBasicServiceServiceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandMonitoringGenericServiceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringGenericServiceUserLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandMonitoringGenericServiceBasicService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceType, err := expandMonitoringGenericServiceBasicServiceServiceType(original["service_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceType"] = transformedServiceType + } + + transformedServiceLabels, err := expandMonitoringGenericServiceBasicServiceServiceLabels(original["service_labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceLabels"] = transformedServiceLabels + } + + return transformed, nil +} + +func expandMonitoringGenericServiceBasicServiceServiceType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringGenericServiceBasicServiceServiceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_service_sweeper.go new file mode 100644 index 0000000000..05af0e7f1b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_service_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringGenericService", testSweepMonitoringGenericService) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringGenericService(region string) error { + resourceName := "MonitoringGenericService" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v3/projects/{{project}}/services", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["genericServices"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v3/projects/{{project}}/services/{{service_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_slo.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_slo.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo.go index 904efe6fbd..5345c364f4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_monitoring_slo.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package monitoring import ( "fmt" @@ -23,6 +26,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/googleapi" ) @@ -75,7 +83,7 @@ to be met. 0 < goal <= 0.999`, "calendar_period": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"DAY", "WEEK", "FORTNIGHT", "MONTH", ""}), + ValidateFunc: verify.ValidateEnum([]string{"DAY", "WEEK", "FORTNIGHT", "MONTH", ""}), Description: `A calendar period, semantically "since the start of the current ". Possible values: ["DAY", "WEEK", "FORTNIGHT", "MONTH"]`, ExactlyOneOf: []string{"rolling_period_days", "calendar_period"}, @@ -240,16 +248,14 @@ just one of min or max.`, Type: schema.TypeFloat, Optional: true, Description: `max value for the range (inclusive). If not given, -will be set to "infinity", defining an open range -">= range.min"`, +will be set to 0`, AtLeastOneOf: []string{"request_based_sli.0.distribution_cut.0.range.0.min", "request_based_sli.0.distribution_cut.0.range.0.max"}, }, "min": { Type: schema.TypeFloat, Optional: true, Description: `Min value for the range (inclusive). If not given, -will be set to "-infinity", defining an open range -"< range.max"`, +will be set to 0`, AtLeastOneOf: []string{"request_based_sli.0.distribution_cut.0.range.0.min", "request_based_sli.0.distribution_cut.0.range.0.max"}, }, }, @@ -487,16 +493,14 @@ just one of min or max.`, Type: schema.TypeFloat, Optional: true, Description: `max value for the range (inclusive). If not given, -will be set to "infinity", defining an open range -">= range.min"`, +will be set to 0`, AtLeastOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut.0.range.0.min", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut.0.range.0.max"}, }, "min": { Type: schema.TypeFloat, Optional: true, Description: `Min value for the range (inclusive). If not given, -will be set to "-infinity", defining an open range -"< range.max"`, +will be set to 0`, AtLeastOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut.0.range.0.min", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut.0.range.0.max"}, }, }, @@ -711,7 +715,7 @@ integer fraction of a day and at least 60s.`, Computed: true, Optional: true, ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z0-9\-]+$`), + ValidateFunc: verify.ValidateRegexp(`^[a-z0-9\-]+$`), Description: `The id to use for this ServiceLevelObjective. If omitted, an id will be generated instead.`, }, "user_labels": { @@ -742,8 +746,8 @@ projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SL } func resourceMonitoringSloCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -752,25 +756,25 @@ func resourceMonitoringSloCreate(d *schema.ResourceData, meta interface{}) error displayNameProp, err := expandMonitoringSloDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } goalProp, err := expandMonitoringSloGoal(d.Get("goal"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("goal"); !isEmptyValue(reflect.ValueOf(goalProp)) && (ok || !reflect.DeepEqual(v, goalProp)) { + } else if v, ok := d.GetOkExists("goal"); !tpgresource.IsEmptyValue(reflect.ValueOf(goalProp)) && (ok || !reflect.DeepEqual(v, goalProp)) { obj["goal"] = goalProp } rollingPeriodProp, err := expandMonitoringSloRollingPeriodDays(d.Get("rolling_period_days"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("rolling_period_days"); !isEmptyValue(reflect.ValueOf(rollingPeriodProp)) && (ok || !reflect.DeepEqual(v, rollingPeriodProp)) { + } else if v, ok := d.GetOkExists("rolling_period_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(rollingPeriodProp)) && (ok || !reflect.DeepEqual(v, rollingPeriodProp)) { obj["rollingPeriod"] = rollingPeriodProp } calendarPeriodProp, err := expandMonitoringSloCalendarPeriod(d.Get("calendar_period"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("calendar_period"); !isEmptyValue(reflect.ValueOf(calendarPeriodProp)) && (ok || !reflect.DeepEqual(v, calendarPeriodProp)) { + } else if v, ok := d.GetOkExists("calendar_period"); !tpgresource.IsEmptyValue(reflect.ValueOf(calendarPeriodProp)) && (ok || !reflect.DeepEqual(v, calendarPeriodProp)) { obj["calendarPeriod"] = calendarPeriodProp } userLabelsProp, err := expandMonitoringSloUserLabels(d.Get("user_labels"), d, config) @@ -782,13 +786,13 @@ func resourceMonitoringSloCreate(d *schema.ResourceData, meta interface{}) error serviceLevelIndicatorProp, err := expandMonitoringSloServiceLevelIndicator(nil, d, config) if err != nil { return err - } else if !isEmptyValue(reflect.ValueOf(serviceLevelIndicatorProp)) { + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(serviceLevelIndicatorProp)) { obj["serviceLevelIndicator"] = serviceLevelIndicatorProp } nameProp, err := expandMonitoringSloSloId(d.Get("slo_id"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("slo_id"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("slo_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } @@ -797,14 +801,14 @@ func resourceMonitoringSloCreate(d *schema.ResourceData, meta interface{}) error return err } - lockName, err := replaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") + lockName, err := tpgresource.ReplaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service}}/serviceLevelObjectives?serviceLevelObjectiveId={{slo_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service}}/serviceLevelObjectives?serviceLevelObjectiveId={{slo_id}}") if err != nil { return err } @@ -812,18 +816,26 @@ func resourceMonitoringSloCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Creating new Slo: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Slo: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Slo: %s", err) } @@ -832,7 +844,7 @@ func resourceMonitoringSloCreate(d *schema.ResourceData, meta interface{}) error } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -844,33 +856,39 @@ func resourceMonitoringSloCreate(d *schema.ResourceData, meta interface{}) error } func resourceMonitoringSloRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Slo: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("MonitoringSlo %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringSlo %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -918,15 +936,15 @@ func resourceMonitoringSloRead(d *schema.ResourceData, meta interface{}) error { } func resourceMonitoringSloUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Slo: %s", err) } @@ -936,25 +954,25 @@ func resourceMonitoringSloUpdate(d *schema.ResourceData, meta interface{}) error displayNameProp, err := expandMonitoringSloDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } goalProp, err := expandMonitoringSloGoal(d.Get("goal"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("goal"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, goalProp)) { + } else if v, ok := d.GetOkExists("goal"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, goalProp)) { obj["goal"] = goalProp } rollingPeriodProp, err := expandMonitoringSloRollingPeriodDays(d.Get("rolling_period_days"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("rolling_period_days"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rollingPeriodProp)) { + } else if v, ok := d.GetOkExists("rolling_period_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rollingPeriodProp)) { obj["rollingPeriod"] = rollingPeriodProp } calendarPeriodProp, err := expandMonitoringSloCalendarPeriod(d.Get("calendar_period"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("calendar_period"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, calendarPeriodProp)) { + } else if v, ok := d.GetOkExists("calendar_period"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, calendarPeriodProp)) { obj["calendarPeriod"] = calendarPeriodProp } userLabelsProp, err := expandMonitoringSloUserLabels(d.Get("user_labels"), d, config) @@ -966,7 +984,7 @@ func resourceMonitoringSloUpdate(d *schema.ResourceData, meta interface{}) error serviceLevelIndicatorProp, err := expandMonitoringSloServiceLevelIndicator(nil, d, config) if err != nil { return err - } else if !isEmptyValue(reflect.ValueOf(serviceLevelIndicatorProp)) { + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(serviceLevelIndicatorProp)) { obj["serviceLevelIndicator"] = serviceLevelIndicatorProp } @@ -975,14 +993,14 @@ func resourceMonitoringSloUpdate(d *schema.ResourceData, meta interface{}) error return err } - lockName, err := replaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") + lockName, err := tpgresource.ReplaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") if err != nil { return err } @@ -1037,19 +1055,27 @@ func resourceMonitoringSloUpdate(d *schema.ResourceData, meta interface{}) error "serviceLevelIndicator.windowsBased.metricSumInRange.timeSeries", "serviceLevelIndicator.windowsBased.metricSumInRange.range") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Slo %q: %s", d.Id(), err) @@ -1061,28 +1087,28 @@ func resourceMonitoringSloUpdate(d *schema.ResourceData, meta interface{}) error } func resourceMonitoringSloDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Slo: %s", err) } billingProject = project - lockName, err := replaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") + lockName, err := tpgresource.ReplaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") if err != nil { return err } @@ -1091,13 +1117,21 @@ func resourceMonitoringSloDelete(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Deleting Slo %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Slo") + return transport_tpg.HandleNotFoundError(err, d, "Slo") } log.Printf("[DEBUG] Finished deleting Slo %q: %#v", d.Id(), res) @@ -1106,29 +1140,29 @@ func resourceMonitoringSloDelete(d *schema.ResourceData, meta interface{}) error func resourceMonitoringSloImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { return nil, err } return []*schema.ResourceData{d}, nil } -func flattenMonitoringSloName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloGoal(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloGoal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandMonitoringSloRollingPeriodDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloRollingPeriodDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil } @@ -1143,15 +1177,15 @@ func expandMonitoringSloRollingPeriodDays(v interface{}, d TerraformResourceData return fmt.Sprintf("%ds", i*86400), nil } -func flattenMonitoringSloCalendarPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloCalendarPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloUserLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloUserLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicator(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicator(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1168,7 +1202,7 @@ func flattenMonitoringSloServiceLevelIndicator(v interface{}, d *schema.Resource flattenMonitoringSloServiceLevelIndicatorWindowsBasedSli(original["windowsBased"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorBasicSli(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorBasicSli(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1189,28 +1223,28 @@ func flattenMonitoringSloServiceLevelIndicatorBasicSli(v interface{}, d *schema. flattenMonitoringSloServiceLevelIndicatorBasicSliAvailability(original["availability"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorBasicSliMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorBasicSliMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenMonitoringSloServiceLevelIndicatorBasicSliLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorBasicSliLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenMonitoringSloServiceLevelIndicatorBasicSliVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorBasicSliVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenMonitoringSloServiceLevelIndicatorBasicSliLatency(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorBasicSliLatency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1223,11 +1257,11 @@ func flattenMonitoringSloServiceLevelIndicatorBasicSliLatency(v interface{}, d * flattenMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(original["threshold"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorBasicSliAvailability(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorBasicSliAvailability(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1236,7 +1270,7 @@ func flattenMonitoringSloServiceLevelIndicatorBasicSliAvailability(v interface{} return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSli(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSli(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1251,7 +1285,7 @@ func flattenMonitoringSloServiceLevelIndicatorRequestBasedSli(v interface{}, d * flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(original["distributionCut"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1268,19 +1302,19 @@ func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(v in flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(original["totalServiceFilter"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1295,11 +1329,11 @@ func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(v i flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(original["range"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1314,15 +1348,15 @@ func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRang flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(original["max"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSli(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSli(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1343,15 +1377,15 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSli(v interface{}, d * flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(original["metricSumInRange"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1368,11 +1402,11 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThres flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(original["basicSliPerformance"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1387,7 +1421,7 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThres flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(original["distributionCut"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1404,19 +1438,19 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThres flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(original["totalServiceFilter"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1431,11 +1465,11 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThres flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(original["range"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1450,15 +1484,15 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThres flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(original["max"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1479,28 +1513,28 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThres flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(original["availability"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } return schema.NewSet(schema.HashString, v.([]interface{})) } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1513,11 +1547,11 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThres flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(original["threshold"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1526,7 +1560,7 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThres return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1541,11 +1575,11 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(v flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(original["range"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1560,15 +1594,15 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRa flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(original["max"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1583,11 +1617,11 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(v flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(original["range"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1602,30 +1636,30 @@ func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRan flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(original["max"], d, config) return []interface{}{transformed} } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenMonitoringSloSloId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloSloId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func expandMonitoringSloDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloGoal(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloGoal(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func flattenMonitoringSloRollingPeriodDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenMonitoringSloRollingPeriodDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1639,11 +1673,11 @@ func flattenMonitoringSloRollingPeriodDays(v interface{}, d *schema.ResourceData return int(dur / (time.Hour * 24)) } -func expandMonitoringSloCalendarPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloCalendarPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloUserLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandMonitoringSloUserLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1654,33 +1688,33 @@ func expandMonitoringSloUserLabels(v interface{}, d TerraformResourceData, confi return m, nil } -func expandMonitoringSloServiceLevelIndicator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicator(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { transformed := make(map[string]interface{}) transformedBasicSli, err := expandMonitoringSloServiceLevelIndicatorBasicSli(d.Get("basic_sli"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBasicSli); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBasicSli); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["basicSli"] = transformedBasicSli } transformedRequestBasedSli, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSli(d.Get("request_based_sli"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestBasedSli); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestBasedSli); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestBased"] = transformedRequestBasedSli } transformedWindowsBasedSli, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSli(d.Get("windows_based_sli"), d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWindowsBasedSli); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWindowsBasedSli); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["windowsBased"] = transformedWindowsBasedSli } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorBasicSli(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorBasicSli(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1692,57 +1726,57 @@ func expandMonitoringSloServiceLevelIndicatorBasicSli(v interface{}, d Terraform transformedMethod, err := expandMonitoringSloServiceLevelIndicatorBasicSliMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedLocation, err := expandMonitoringSloServiceLevelIndicatorBasicSliLocation(original["location"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["location"] = transformedLocation } transformedVersion, err := expandMonitoringSloServiceLevelIndicatorBasicSliVersion(original["version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["version"] = transformedVersion } transformedLatency, err := expandMonitoringSloServiceLevelIndicatorBasicSliLatency(original["latency"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLatency); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLatency); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["latency"] = transformedLatency } transformedAvailability, err := expandMonitoringSloServiceLevelIndicatorBasicSliAvailability(original["availability"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAvailability); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAvailability); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["availability"] = transformedAvailability } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorBasicSliMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorBasicSliMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandMonitoringSloServiceLevelIndicatorBasicSliLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorBasicSliLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandMonitoringSloServiceLevelIndicatorBasicSliVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorBasicSliVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandMonitoringSloServiceLevelIndicatorBasicSliLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorBasicSliLatency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1754,18 +1788,18 @@ func expandMonitoringSloServiceLevelIndicatorBasicSliLatency(v interface{}, d Te transformedThreshold, err := expandMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(original["threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["threshold"] = transformedThreshold } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorBasicSliAvailability(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorBasicSliAvailability(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1777,18 +1811,18 @@ func expandMonitoringSloServiceLevelIndicatorBasicSliAvailability(v interface{}, transformedEnabled, err := expandMonitoringSloServiceLevelIndicatorBasicSliAvailabilityEnabled(original["enabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enabled"] = transformedEnabled } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorBasicSliAvailabilityEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorBasicSliAvailabilityEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSli(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSli(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1800,21 +1834,21 @@ func expandMonitoringSloServiceLevelIndicatorRequestBasedSli(v interface{}, d Te transformedGoodTotalRatio, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(original["good_total_ratio"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGoodTotalRatio); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGoodTotalRatio); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["goodTotalRatio"] = transformedGoodTotalRatio } transformedDistributionCut, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(original["distribution_cut"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDistributionCut); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDistributionCut); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["distributionCut"] = transformedDistributionCut } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1826,40 +1860,40 @@ func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(v int transformedGoodServiceFilter, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(original["good_service_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGoodServiceFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGoodServiceFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["goodServiceFilter"] = transformedGoodServiceFilter } transformedBadServiceFilter, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(original["bad_service_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBadServiceFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBadServiceFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["badServiceFilter"] = transformedBadServiceFilter } transformedTotalServiceFilter, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(original["total_service_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTotalServiceFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTotalServiceFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["totalServiceFilter"] = transformedTotalServiceFilter } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1871,25 +1905,25 @@ func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(v in transformedDistributionFilter, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(original["distribution_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDistributionFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDistributionFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["distributionFilter"] = transformedDistributionFilter } transformedRange, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(original["range"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["range"] = transformedRange } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1901,29 +1935,29 @@ func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange transformedMin, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(original["min"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["min"] = transformedMin } transformedMax, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(original["max"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["max"] = transformedMax } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSli(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSli(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1935,50 +1969,50 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSli(v interface{}, d Te transformedWindowPeriod, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(original["window_period"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWindowPeriod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWindowPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["windowPeriod"] = transformedWindowPeriod } transformedGoodBadMetricFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(original["good_bad_metric_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGoodBadMetricFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGoodBadMetricFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["goodBadMetricFilter"] = transformedGoodBadMetricFilter } transformedGoodTotalRatioThreshold, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(original["good_total_ratio_threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGoodTotalRatioThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGoodTotalRatioThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["goodTotalRatioThreshold"] = transformedGoodTotalRatioThreshold } transformedMetricMeanInRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(original["metric_mean_in_range"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMetricMeanInRange); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMetricMeanInRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["metricMeanInRange"] = transformedMetricMeanInRange } transformedMetricSumInRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(original["metric_sum_in_range"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMetricSumInRange); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMetricSumInRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["metricSumInRange"] = transformedMetricSumInRange } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1990,32 +2024,32 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresh transformedThreshold, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(original["threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["threshold"] = transformedThreshold } transformedPerformance, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(original["performance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPerformance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPerformance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["performance"] = transformedPerformance } transformedBasicSliPerformance, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(original["basic_sli_performance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBasicSliPerformance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBasicSliPerformance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["basicSliPerformance"] = transformedBasicSliPerformance } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2027,21 +2061,21 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresh transformedGoodTotalRatio, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(original["good_total_ratio"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGoodTotalRatio); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGoodTotalRatio); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["goodTotalRatio"] = transformedGoodTotalRatio } transformedDistributionCut, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(original["distribution_cut"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDistributionCut); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDistributionCut); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["distributionCut"] = transformedDistributionCut } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2053,40 +2087,40 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresh transformedGoodServiceFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(original["good_service_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGoodServiceFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGoodServiceFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["goodServiceFilter"] = transformedGoodServiceFilter } transformedBadServiceFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(original["bad_service_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBadServiceFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBadServiceFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["badServiceFilter"] = transformedBadServiceFilter } transformedTotalServiceFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(original["total_service_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTotalServiceFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTotalServiceFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["totalServiceFilter"] = transformedTotalServiceFilter } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2098,25 +2132,25 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresh transformedDistributionFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(original["distribution_filter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDistributionFilter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDistributionFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["distributionFilter"] = transformedDistributionFilter } transformedRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(original["range"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["range"] = transformedRange } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2128,29 +2162,29 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresh transformedMin, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(original["min"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["min"] = transformedMin } transformedMax, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(original["max"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["max"] = transformedMax } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2162,57 +2196,57 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresh transformedMethod, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(original["method"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["method"] = transformedMethod } transformedLocation, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(original["location"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["location"] = transformedLocation } transformedVersion, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(original["version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["version"] = transformedVersion } transformedLatency, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(original["latency"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLatency); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLatency); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["latency"] = transformedLatency } transformedAvailability, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(original["availability"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAvailability); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAvailability); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["availability"] = transformedAvailability } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { v = v.(*schema.Set).List() return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2224,18 +2258,18 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresh transformedThreshold, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(original["threshold"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedThreshold); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedThreshold); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["threshold"] = transformedThreshold } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2247,18 +2281,18 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresh transformedEnabled, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailabilityEnabled(original["enabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enabled"] = transformedEnabled } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailabilityEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailabilityEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2270,25 +2304,25 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(v transformedTimeSeries, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(original["time_series"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeSeries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeSeries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeSeries"] = transformedTimeSeries } transformedRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(original["range"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["range"] = transformedRange } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2300,29 +2334,29 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRan transformedMin, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(original["min"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["min"] = transformedMin } transformedMax, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(original["max"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["max"] = transformedMax } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2334,25 +2368,25 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(v i transformedTimeSeries, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(original["time_series"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeSeries); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeSeries); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeSeries"] = transformedTimeSeries } transformedRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(original["range"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["range"] = transformedRange } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2364,29 +2398,29 @@ func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRang transformedMin, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(original["min"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMin); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["min"] = transformedMin } transformedMax, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(original["max"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMax); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["max"] = transformedMax } return transformed, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandMonitoringSloSloId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandMonitoringSloSloId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo_sweeper.go new file mode 100644 index 0000000000..3bcc9383a4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_slo_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringSlo", testSweepMonitoringSlo) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringSlo(region string) error { + resourceName := "MonitoringSlo" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v3/projects/{{project}}/services/{{service}}/serviceLevelObjectives", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["slos"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v3/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_uptime_check_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_uptime_check_config.go new file mode 100644 index 0000000000..2827df9d91 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_uptime_check_config.go @@ -0,0 +1,1444 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func resourceMonitoringUptimeCheckConfigHttpCheckPathDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return old == "/"+new +} + +func ResourceMonitoringUptimeCheckConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitoringUptimeCheckConfigCreate, + Read: resourceMonitoringUptimeCheckConfigRead, + Update: resourceMonitoringUptimeCheckConfigUpdate, + Delete: resourceMonitoringUptimeCheckConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceMonitoringUptimeCheckConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `A human-friendly name for the uptime check configuration. The display name should be unique within a Stackdriver Workspace in order to make it easier to identify; however, uniqueness is not enforced.`, + }, + "timeout": { + Type: schema.TypeString, + Required: true, + Description: `The maximum amount of time to wait for the request to complete (must be between 1 and 60 seconds). Accepted formats https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration`, + }, + "checker_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"STATIC_IP_CHECKERS", "VPC_CHECKERS", ""}), + Description: `The checker type to use for the check. If the monitored resource type is servicedirectory_service, checkerType must be set to VPC_CHECKERS. Possible values: ["STATIC_IP_CHECKERS", "VPC_CHECKERS"]`, + }, + "content_matchers": { + Type: schema.TypeList, + Optional: true, + Description: `The expected content on the page the check is run against. Currently, only the first entry in the list is supported, and other entries will be ignored. The server will look for an exact match of the string in the page response's content. This field is optional and should only be specified if a content match is required.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content": { + Type: schema.TypeString, + Required: true, + Description: `String or regex content to match (max 1024 bytes)`, + }, + "json_path_matcher": { + Type: schema.TypeList, + Optional: true, + Description: `Information needed to perform a JSONPath content match. Used for 'ContentMatcherOption::MATCHES_JSON_PATH' and 'ContentMatcherOption::NOT_MATCHES_JSON_PATH'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "json_path": { + Type: schema.TypeString, + Required: true, + Description: `JSONPath within the response output pointing to the expected 'ContentMatcher::content' to match against.`, + }, + "json_matcher": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"EXACT_MATCH", "REGEX_MATCH", ""}), + Description: `Options to perform JSONPath content matching. Default value: "EXACT_MATCH" Possible values: ["EXACT_MATCH", "REGEX_MATCH"]`, + Default: "EXACT_MATCH", + }, + }, + }, + }, + "matcher": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"CONTAINS_STRING", "NOT_CONTAINS_STRING", "MATCHES_REGEX", "NOT_MATCHES_REGEX", "MATCHES_JSON_PATH", "NOT_MATCHES_JSON_PATH", ""}), + Description: `The type of content matcher that will be applied to the server output, compared to the content string when the check is run. Default value: "CONTAINS_STRING" Possible values: ["CONTAINS_STRING", "NOT_CONTAINS_STRING", "MATCHES_REGEX", "NOT_MATCHES_REGEX", "MATCHES_JSON_PATH", "NOT_MATCHES_JSON_PATH"]`, + Default: "CONTAINS_STRING", + }, + }, + }, + }, + "http_check": { + Type: schema.TypeList, + Optional: true, + Description: `Contains information needed to make an HTTP or HTTPS check.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accepted_response_status_codes": { + Type: schema.TypeList, + Optional: true, + Description: `If present, the check will only pass if the HTTP response status code is in this set of status codes. If empty, the HTTP status code will only pass if the HTTP status code is 200-299.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "status_class": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"STATUS_CLASS_1XX", "STATUS_CLASS_2XX", "STATUS_CLASS_3XX", "STATUS_CLASS_4XX", "STATUS_CLASS_5XX", "STATUS_CLASS_ANY", ""}), + Description: `A class of status codes to accept. Possible values: ["STATUS_CLASS_1XX", "STATUS_CLASS_2XX", "STATUS_CLASS_3XX", "STATUS_CLASS_4XX", "STATUS_CLASS_5XX", "STATUS_CLASS_ANY"]`, + }, + "status_value": { + Type: schema.TypeInt, + Optional: true, + Description: `A status code to accept.`, + }, + }, + }, + }, + "auth_info": { + Type: schema.TypeList, + Optional: true, + Description: `The authentication information. Optional when creating an HTTP check; defaults to empty.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "password": { + Type: schema.TypeString, + Required: true, + Description: `The password to authenticate.`, + Sensitive: true, + }, + "username": { + Type: schema.TypeString, + Required: true, + Description: `The username to authenticate.`, + }, + }, + }, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, + }, + "body": { + Type: schema.TypeString, + Optional: true, + Description: `The request body associated with the HTTP POST request. If contentType is URL_ENCODED, the body passed in must be URL-encoded. Users can provide a Content-Length header via the headers field or the API will do so. If the requestMethod is GET and body is not empty, the API will return an error. The maximum byte size is 1 megabyte. Note - As with all bytes fields JSON representations are base64 encoded. e.g. "foo=bar" in URL-encoded form is "foo%3Dbar" and in base64 encoding is "Zm9vJTI1M0RiYXI=".`, + }, + "content_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TYPE_UNSPECIFIED", "URL_ENCODED", ""}), + Description: `The content type to use for the check. Possible values: ["TYPE_UNSPECIFIED", "URL_ENCODED"]`, + }, + "headers": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100.`, + Elem: &schema.Schema{Type: schema.TypeString}, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, + }, + "mask_headers": { + Type: schema.TypeBool, + Optional: true, + Description: `Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if mask_headers is set to True then the headers will be obscured with ******.`, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, + }, + "path": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: resourceMonitoringUptimeCheckConfigHttpCheckPathDiffSuppress, + Description: `The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. If the provided path does not begin with "/", a "/" will be prepended automatically. Optional (defaults to "/").`, + Default: "/", + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL).`, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, + }, + "request_method": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"METHOD_UNSPECIFIED", "GET", "POST", ""}), + Description: `The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then requestMethod defaults to GET. Default value: "GET" Possible values: ["METHOD_UNSPECIFIED", "GET", "POST"]`, + Default: "GET", + }, + "use_ssl": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, use HTTPS instead of HTTP to run the check.`, + AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, + }, + "validate_ssl": { + Type: schema.TypeBool, + Optional: true, + Description: `Boolean specifying whether to include SSL certificate validation as a part of the Uptime check. Only applies to checks where monitoredResource is set to uptime_url. If useSsl is false, setting validateSsl to true has no effect.`, + }, + }, + }, + ExactlyOneOf: []string{"http_check", "tcp_check"}, + }, + "monitored_resource": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The monitored resource (https://cloud.google.com/monitoring/api/resources) associated with the configuration. The following monitored resource types are supported for uptime checks: uptime_url gce_instance gae_app aws_ec2_instance aws_elb_load_balancer k8s_service servicedirectory_service`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "labels": { + Type: schema.TypeMap, + Required: true, + ForceNew: true, + Description: `Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels "project_id", "instance_id", and "zone".`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors#MonitoredResourceDescriptor) object. For example, the type of a Compute Engine VM instance is gce_instance. For a list of types, see Monitoring resource types (https://cloud.google.com/monitoring/api/resources) and Logging resource types (https://cloud.google.com/logging/docs/api/v2/resource-list).`, + }, + }, + }, + ExactlyOneOf: []string{"monitored_resource", "resource_group"}, + }, + "period": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `How often, in seconds, the uptime check is performed. Currently, the only supported values are 60s (1 minute), 300s (5 minutes), 600s (10 minutes), and 900s (15 minutes). Optional, defaults to 300s.`, + Default: "300s", + }, + "resource_group": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The group resource associated with the configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The group of resources being monitored. Should be the 'name' of a group`, + AtLeastOneOf: []string{"resource_group.0.resource_type", "resource_group.0.group_id"}, + }, + "resource_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"RESOURCE_TYPE_UNSPECIFIED", "INSTANCE", "AWS_ELB_LOAD_BALANCER", ""}), + Description: `The resource type of the group members. Possible values: ["RESOURCE_TYPE_UNSPECIFIED", "INSTANCE", "AWS_ELB_LOAD_BALANCER"]`, + AtLeastOneOf: []string{"resource_group.0.resource_type", "resource_group.0.group_id"}, + }, + }, + }, + ExactlyOneOf: []string{"monitored_resource", "resource_group"}, + }, + "selected_regions": { + Type: schema.TypeList, + Optional: true, + Description: `The list of regions from which the check will be run. Some regions contain one location, and others contain more than one. If this field is specified, enough regions to include a minimum of 3 locations must be provided, or an error message is returned. Not specifying this field will result in uptime checks running from all regions.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "tcp_check": { + Type: schema.TypeList, + Optional: true, + Description: `Contains information needed to make a TCP check.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Required: true, + Description: `The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) to construct the full URL.`, + }, + }, + }, + ExactlyOneOf: []string{"http_check", "tcp_check"}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `A unique resource name for this UptimeCheckConfig. The format is projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].`, + }, + "uptime_check_id": { + Type: schema.TypeString, + Computed: true, + Description: `The id of the uptime check`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceMonitoringUptimeCheckConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringUptimeCheckConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + periodProp, err := expandMonitoringUptimeCheckConfigPeriod(d.Get("period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("period"); !tpgresource.IsEmptyValue(reflect.ValueOf(periodProp)) && (ok || !reflect.DeepEqual(v, periodProp)) { + obj["period"] = periodProp + } + timeoutProp, err := expandMonitoringUptimeCheckConfigTimeout(d.Get("timeout"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("timeout"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutProp)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { + obj["timeout"] = timeoutProp + } + contentMatchersProp, err := expandMonitoringUptimeCheckConfigContentMatchers(d.Get("content_matchers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("content_matchers"); !tpgresource.IsEmptyValue(reflect.ValueOf(contentMatchersProp)) && (ok || !reflect.DeepEqual(v, contentMatchersProp)) { + obj["contentMatchers"] = contentMatchersProp + } + selectedRegionsProp, err := expandMonitoringUptimeCheckConfigSelectedRegions(d.Get("selected_regions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("selected_regions"); !tpgresource.IsEmptyValue(reflect.ValueOf(selectedRegionsProp)) && (ok || !reflect.DeepEqual(v, selectedRegionsProp)) { + obj["selectedRegions"] = selectedRegionsProp + } + checkerTypeProp, err := expandMonitoringUptimeCheckConfigCheckerType(d.Get("checker_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("checker_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(checkerTypeProp)) && (ok || !reflect.DeepEqual(v, checkerTypeProp)) { + obj["checkerType"] = checkerTypeProp + } + httpCheckProp, err := expandMonitoringUptimeCheckConfigHttpCheck(d.Get("http_check"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(httpCheckProp)) && (ok || !reflect.DeepEqual(v, httpCheckProp)) { + obj["httpCheck"] = httpCheckProp + } + tcpCheckProp, err := expandMonitoringUptimeCheckConfigTcpCheck(d.Get("tcp_check"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tcp_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(tcpCheckProp)) && (ok || !reflect.DeepEqual(v, tcpCheckProp)) { + obj["tcpCheck"] = tcpCheckProp + } + resourceGroupProp, err := expandMonitoringUptimeCheckConfigResourceGroup(d.Get("resource_group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("resource_group"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceGroupProp)) && (ok || !reflect.DeepEqual(v, resourceGroupProp)) { + obj["resourceGroup"] = resourceGroupProp + } + monitoredResourceProp, err := expandMonitoringUptimeCheckConfigMonitoredResource(d.Get("monitored_resource"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("monitored_resource"); !tpgresource.IsEmptyValue(reflect.ValueOf(monitoredResourceProp)) && (ok || !reflect.DeepEqual(v, monitoredResourceProp)) { + obj["monitoredResource"] = monitoredResourceProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/groups/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/uptimeCheckConfigs") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new UptimeCheckConfig: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return fmt.Errorf("Error creating UptimeCheckConfig: %s", err) + } + if err := d.Set("name", flattenMonitoringUptimeCheckConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating UptimeCheckConfig %q: %#v", d.Id(), res) + + return resourceMonitoringUptimeCheckConfigRead(d, meta) +} + +func resourceMonitoringUptimeCheckConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("MonitoringUptimeCheckConfig %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + + if err := d.Set("name", flattenMonitoringUptimeCheckConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("uptime_check_id", flattenMonitoringUptimeCheckConfigUptimeCheckId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("display_name", flattenMonitoringUptimeCheckConfigDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("period", flattenMonitoringUptimeCheckConfigPeriod(res["period"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("timeout", flattenMonitoringUptimeCheckConfigTimeout(res["timeout"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("content_matchers", flattenMonitoringUptimeCheckConfigContentMatchers(res["contentMatchers"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("selected_regions", flattenMonitoringUptimeCheckConfigSelectedRegions(res["selectedRegions"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("checker_type", flattenMonitoringUptimeCheckConfigCheckerType(res["checkerType"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("http_check", flattenMonitoringUptimeCheckConfigHttpCheck(res["httpCheck"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("tcp_check", flattenMonitoringUptimeCheckConfigTcpCheck(res["tcpCheck"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("resource_group", flattenMonitoringUptimeCheckConfigResourceGroup(res["resourceGroup"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + if err := d.Set("monitored_resource", flattenMonitoringUptimeCheckConfigMonitoredResource(res["monitoredResource"], d, config)); err != nil { + return fmt.Errorf("Error reading UptimeCheckConfig: %s", err) + } + + return nil +} + +func resourceMonitoringUptimeCheckConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandMonitoringUptimeCheckConfigDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + timeoutProp, err := expandMonitoringUptimeCheckConfigTimeout(d.Get("timeout"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("timeout"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { + obj["timeout"] = timeoutProp + } + contentMatchersProp, err := expandMonitoringUptimeCheckConfigContentMatchers(d.Get("content_matchers"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("content_matchers"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, contentMatchersProp)) { + obj["contentMatchers"] = contentMatchersProp + } + selectedRegionsProp, err := expandMonitoringUptimeCheckConfigSelectedRegions(d.Get("selected_regions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("selected_regions"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, selectedRegionsProp)) { + obj["selectedRegions"] = selectedRegionsProp + } + httpCheckProp, err := expandMonitoringUptimeCheckConfigHttpCheck(d.Get("http_check"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("http_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, httpCheckProp)) { + obj["httpCheck"] = httpCheckProp + } + tcpCheckProp, err := expandMonitoringUptimeCheckConfigTcpCheck(d.Get("tcp_check"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tcp_check"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tcpCheckProp)) { + obj["tcpCheck"] = tcpCheckProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/groups/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating UptimeCheckConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("timeout") { + updateMask = append(updateMask, "timeout") + } + + if d.HasChange("content_matchers") { + updateMask = append(updateMask, "contentMatchers") + } + + if d.HasChange("selected_regions") { + updateMask = append(updateMask, "selectedRegions") + } + + if d.HasChange("http_check") { + updateMask = append(updateMask, "httpCheck") + } + + if d.HasChange("tcp_check") { + updateMask = append(updateMask, "tcpCheck") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + + if err != nil { + return fmt.Errorf("Error updating UptimeCheckConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating UptimeCheckConfig %q: %#v", d.Id(), res) + } + + return resourceMonitoringUptimeCheckConfigRead(d, meta) +} + +func resourceMonitoringUptimeCheckConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "stackdriver/groups/{{project}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting UptimeCheckConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsMonitoringConcurrentEditError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "UptimeCheckConfig") + } + + log.Printf("[DEBUG] Finished deleting UptimeCheckConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceMonitoringUptimeCheckConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func flattenMonitoringUptimeCheckConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigUptimeCheckId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + parts := strings.Split(d.Get("name").(string), "/") + return parts[len(parts)-1] +} + +func flattenMonitoringUptimeCheckConfigDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigContentMatchers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "content": flattenMonitoringUptimeCheckConfigContentMatchersContent(original["content"], d, config), + "matcher": flattenMonitoringUptimeCheckConfigContentMatchersMatcher(original["matcher"], d, config), + "json_path_matcher": flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcher(original["jsonPathMatcher"], d, config), + }) + } + return transformed +} +func flattenMonitoringUptimeCheckConfigContentMatchersContent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigContentMatchersMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["json_path"] = + flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonPath(original["jsonPath"], d, config) + transformed["json_matcher"] = + flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonMatcher(original["jsonMatcher"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigSelectedRegions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigCheckerType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["request_method"] = + flattenMonitoringUptimeCheckConfigHttpCheckRequestMethod(original["requestMethod"], d, config) + transformed["content_type"] = + flattenMonitoringUptimeCheckConfigHttpCheckContentType(original["contentType"], d, config) + transformed["auth_info"] = + flattenMonitoringUptimeCheckConfigHttpCheckAuthInfo(original["authInfo"], d, config) + transformed["port"] = + flattenMonitoringUptimeCheckConfigHttpCheckPort(original["port"], d, config) + transformed["headers"] = + flattenMonitoringUptimeCheckConfigHttpCheckHeaders(original["headers"], d, config) + transformed["path"] = + flattenMonitoringUptimeCheckConfigHttpCheckPath(original["path"], d, config) + transformed["use_ssl"] = + flattenMonitoringUptimeCheckConfigHttpCheckUseSsl(original["useSsl"], d, config) + transformed["validate_ssl"] = + flattenMonitoringUptimeCheckConfigHttpCheckValidateSsl(original["validateSsl"], d, config) + transformed["mask_headers"] = + flattenMonitoringUptimeCheckConfigHttpCheckMaskHeaders(original["maskHeaders"], d, config) + transformed["body"] = + flattenMonitoringUptimeCheckConfigHttpCheckBody(original["body"], d, config) + transformed["accepted_response_status_codes"] = + flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(original["acceptedResponseStatusCodes"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringUptimeCheckConfigHttpCheckRequestMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckContentType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["password"] = + flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(original["password"], d, config) + transformed["username"] = + flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(original["username"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("http_check.0.auth_info.0.password") +} + +func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenMonitoringUptimeCheckConfigHttpCheckHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckUseSsl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckValidateSsl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckMaskHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "status_value": flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(original["statusValue"], d, config), + "status_class": flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(original["statusClass"], d, config), + }) + } + return transformed +} +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigTcpCheck(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["port"] = + flattenMonitoringUptimeCheckConfigTcpCheckPort(original["port"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringUptimeCheckConfigTcpCheckPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenMonitoringUptimeCheckConfigResourceGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resource_type"] = + flattenMonitoringUptimeCheckConfigResourceGroupResourceType(original["resourceType"], d, config) + transformed["group_id"] = + flattenMonitoringUptimeCheckConfigResourceGroupGroupId(original["groupId"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringUptimeCheckConfigResourceGroupResourceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigResourceGroupGroupId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + project := d.Get("project").(string) + return fmt.Sprintf("projects/%s/groups/%s", project, v) +} + +func flattenMonitoringUptimeCheckConfigMonitoredResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenMonitoringUptimeCheckConfigMonitoredResourceType(original["type"], d, config) + transformed["labels"] = + flattenMonitoringUptimeCheckConfigMonitoredResourceLabels(original["labels"], d, config) + return []interface{}{transformed} +} +func flattenMonitoringUptimeCheckConfigMonitoredResourceType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenMonitoringUptimeCheckConfigMonitoredResourceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandMonitoringUptimeCheckConfigDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigContentMatchers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContent, err := expandMonitoringUptimeCheckConfigContentMatchersContent(original["content"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["content"] = transformedContent + } + + transformedMatcher, err := expandMonitoringUptimeCheckConfigContentMatchersMatcher(original["matcher"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMatcher); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["matcher"] = transformedMatcher + } + + transformedJsonPathMatcher, err := expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcher(original["json_path_matcher"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJsonPathMatcher); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jsonPathMatcher"] = transformedJsonPathMatcher + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringUptimeCheckConfigContentMatchersContent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigContentMatchersMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedJsonPath, err := expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonPath(original["json_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJsonPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jsonPath"] = transformedJsonPath + } + + transformedJsonMatcher, err := expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonMatcher(original["json_matcher"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJsonMatcher); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["jsonMatcher"] = transformedJsonMatcher + } + + return transformed, nil +} + +func expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigContentMatchersJsonPathMatcherJsonMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigSelectedRegions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigCheckerType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRequestMethod, err := expandMonitoringUptimeCheckConfigHttpCheckRequestMethod(original["request_method"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequestMethod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requestMethod"] = transformedRequestMethod + } + + transformedContentType, err := expandMonitoringUptimeCheckConfigHttpCheckContentType(original["content_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContentType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["contentType"] = transformedContentType + } + + transformedAuthInfo, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfo(original["auth_info"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuthInfo); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["authInfo"] = transformedAuthInfo + } + + transformedPort, err := expandMonitoringUptimeCheckConfigHttpCheckPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedHeaders, err := expandMonitoringUptimeCheckConfigHttpCheckHeaders(original["headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["headers"] = transformedHeaders + } + + transformedPath, err := expandMonitoringUptimeCheckConfigHttpCheckPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedUseSsl, err := expandMonitoringUptimeCheckConfigHttpCheckUseSsl(original["use_ssl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseSsl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["useSsl"] = transformedUseSsl + } + + transformedValidateSsl, err := expandMonitoringUptimeCheckConfigHttpCheckValidateSsl(original["validate_ssl"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValidateSsl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["validateSsl"] = transformedValidateSsl + } + + transformedMaskHeaders, err := expandMonitoringUptimeCheckConfigHttpCheckMaskHeaders(original["mask_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaskHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maskHeaders"] = transformedMaskHeaders + } + + transformedBody, err := expandMonitoringUptimeCheckConfigHttpCheckBody(original["body"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBody); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["body"] = transformedBody + } + + transformedAcceptedResponseStatusCodes, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(original["accepted_response_status_codes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceptedResponseStatusCodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceptedResponseStatusCodes"] = transformedAcceptedResponseStatusCodes + } + + return transformed, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckRequestMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckContentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAuthInfo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPassword, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(original["password"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + transformedUsername, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(original["username"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["username"] = transformedUsername + } + + return transformed, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckUseSsl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckValidateSsl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckMaskHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedStatusValue, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(original["status_value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStatusValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["statusValue"] = transformedStatusValue + } + + transformedStatusClass, err := expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(original["status_class"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStatusClass); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["statusClass"] = transformedStatusClass + } + + req = append(req, transformed) + } + return req, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigHttpCheckAcceptedResponseStatusCodesStatusClass(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigTcpCheck(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandMonitoringUptimeCheckConfigTcpCheckPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + return transformed, nil +} + +func expandMonitoringUptimeCheckConfigTcpCheckPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigResourceGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResourceType, err := expandMonitoringUptimeCheckConfigResourceGroupResourceType(original["resource_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceType"] = transformedResourceType + } + + transformedGroupId, err := expandMonitoringUptimeCheckConfigResourceGroupGroupId(original["group_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGroupId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["groupId"] = transformedGroupId + } + + return transformed, nil +} + +func expandMonitoringUptimeCheckConfigResourceGroupResourceType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigResourceGroupGroupId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} + +func expandMonitoringUptimeCheckConfigMonitoredResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandMonitoringUptimeCheckConfigMonitoredResourceType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedLabels, err := expandMonitoringUptimeCheckConfigMonitoredResourceLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + return transformed, nil +} + +func expandMonitoringUptimeCheckConfigMonitoredResourceType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandMonitoringUptimeCheckConfigMonitoredResourceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_uptime_check_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_uptime_check_config_sweeper.go new file mode 100644 index 0000000000..1c32a0c184 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/monitoring/resource_monitoring_uptime_check_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package monitoring + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("MonitoringUptimeCheckConfig", testSweepMonitoringUptimeCheckConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepMonitoringUptimeCheckConfig(region string) error { + resourceName := "MonitoringUptimeCheckConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://monitoring.googleapis.com/v3/projects/{{project}}/uptimeCheckConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["uptimeCheckConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://monitoring.googleapis.com/v3/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_connectivity_hub.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_connectivity_hub.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub.go index 54cce5e70f..a422a7cf70 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_connectivity_hub.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package networkconnectivity import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceNetworkConnectivityHub() *schema.Resource { @@ -70,7 +77,7 @@ func ResourceNetworkConnectivityHub() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -121,8 +128,8 @@ func NetworkConnectivityHubRoutingVpcsSchema() *schema.Resource { } func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -130,7 +137,7 @@ func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface obj := &networkconnectivity.Hub{ Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } @@ -139,18 +146,18 @@ func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -172,8 +179,8 @@ func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface } func resourceNetworkConnectivityHubRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -181,21 +188,21 @@ func resourceNetworkConnectivityHubRead(d *schema.ResourceData, meta interface{} obj := &networkconnectivity.Hub{ Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -204,7 +211,7 @@ func resourceNetworkConnectivityHubRead(d *schema.ResourceData, meta interface{} res, err := client.GetHub(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("NetworkConnectivityHub %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("name", res.Name); err != nil { @@ -238,8 +245,8 @@ func resourceNetworkConnectivityHubRead(d *schema.ResourceData, meta interface{} return nil } func resourceNetworkConnectivityHubUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -247,22 +254,22 @@ func resourceNetworkConnectivityHubUpdate(d *schema.ResourceData, meta interface obj := &networkconnectivity.Hub{ Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -284,8 +291,8 @@ func resourceNetworkConnectivityHubUpdate(d *schema.ResourceData, meta interface } func resourceNetworkConnectivityHubDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -293,22 +300,22 @@ func resourceNetworkConnectivityHubDelete(d *schema.ResourceData, meta interface obj := &networkconnectivity.Hub{ Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), } log.Printf("[DEBUG] Deleting Hub %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -323,9 +330,9 @@ func resourceNetworkConnectivityHubDelete(d *schema.ResourceData, meta interface } func resourceNetworkConnectivityHubImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/global/hubs/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -334,7 +341,7 @@ func resourceNetworkConnectivityHubImport(d *schema.ResourceData, meta interface } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub_sweeper.go new file mode 100644 index 0000000000..23bc57337b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_hub_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package networkconnectivity + +import ( + "context" + "log" + "testing" + + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkConnectivityHub", testSweepNetworkConnectivityHub) +} + +func testSweepNetworkConnectivityHub(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for NetworkConnectivityHub") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLNetworkConnectivityClient(config, config.UserAgent, "", 0) + err = client.DeleteAllHub(context.Background(), d["project"], isDeletableNetworkConnectivityHub) + if err != nil { + return err + } + return nil +} + +func isDeletableNetworkConnectivityHub(r *networkconnectivity.Hub) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_connectivity_spoke.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_connectivity_spoke.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke.go index f5f42550d1..dee3cf5da6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_connectivity_spoke.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package networkconnectivity import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceNetworkConnectivitySpoke() *schema.Resource { @@ -49,7 +56,7 @@ func ResourceNetworkConnectivitySpoke() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "Immutable. The URI of the hub that this spoke is attached to.", }, @@ -115,7 +122,7 @@ func ResourceNetworkConnectivitySpoke() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -202,7 +209,7 @@ func NetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesSchema() *sc Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The URI of the virtual machine resource", }, }, @@ -231,8 +238,8 @@ func NetworkConnectivitySpokeLinkedVpnTunnelsSchema() *schema.Resource { } func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -242,7 +249,7 @@ func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interfa Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), @@ -254,18 +261,18 @@ func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interfa return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -287,8 +294,8 @@ func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interfa } func resourceNetworkConnectivitySpokeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -298,24 +305,24 @@ func resourceNetworkConnectivitySpokeRead(d *schema.ResourceData, meta interface Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -324,7 +331,7 @@ func resourceNetworkConnectivitySpokeRead(d *schema.ResourceData, meta interface res, err := client.GetSpoke(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("NetworkConnectivitySpoke %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("hub", res.Hub); err != nil { @@ -370,8 +377,8 @@ func resourceNetworkConnectivitySpokeRead(d *schema.ResourceData, meta interface return nil } func resourceNetworkConnectivitySpokeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -381,25 +388,25 @@ func resourceNetworkConnectivitySpokeUpdate(d *schema.ResourceData, meta interfa Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -421,8 +428,8 @@ func resourceNetworkConnectivitySpokeUpdate(d *schema.ResourceData, meta interfa } func resourceNetworkConnectivitySpokeDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -432,7 +439,7 @@ func resourceNetworkConnectivitySpokeDelete(d *schema.ResourceData, meta interfa Location: dcl.String(d.Get("location").(string)), Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), @@ -440,17 +447,17 @@ func resourceNetworkConnectivitySpokeDelete(d *schema.ResourceData, meta interfa } log.Printf("[DEBUG] Deleting Spoke %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -465,9 +472,9 @@ func resourceNetworkConnectivitySpokeDelete(d *schema.ResourceData, meta interfa } func resourceNetworkConnectivitySpokeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/spokes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -476,7 +483,7 @@ func resourceNetworkConnectivitySpokeImport(d *schema.ResourceData, meta interfa } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/spokes/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/spokes/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -496,7 +503,7 @@ func expandNetworkConnectivitySpokeLinkedInterconnectAttachments(o interface{}) obj := objArr[0].(map[string]interface{}) return &networkconnectivity.SpokeLinkedInterconnectAttachments{ SiteToSiteDataTransfer: dcl.Bool(obj["site_to_site_data_transfer"].(bool)), - Uris: expandStringArray(obj["uris"]), + Uris: tpgdclresource.ExpandStringArray(obj["uris"]), } } @@ -609,7 +616,7 @@ func expandNetworkConnectivitySpokeLinkedVpnTunnels(o interface{}) *networkconne obj := objArr[0].(map[string]interface{}) return &networkconnectivity.SpokeLinkedVpnTunnels{ SiteToSiteDataTransfer: dcl.Bool(obj["site_to_site_data_transfer"].(bool)), - Uris: expandStringArray(obj["uris"]), + Uris: tpgdclresource.ExpandStringArray(obj["uris"]), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke_sweeper.go new file mode 100644 index 0000000000..e1c264b868 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity/resource_network_connectivity_spoke_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package networkconnectivity + +import ( + "context" + "log" + "testing" + + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkConnectivitySpoke", testSweepNetworkConnectivitySpoke) +} + +func testSweepNetworkConnectivitySpoke(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for NetworkConnectivitySpoke") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLNetworkConnectivityClient(config, config.UserAgent, "", 0) + err = client.DeleteAllSpoke(context.Background(), d["project"], d["location"], isDeletableNetworkConnectivitySpoke) + if err != nil { + return err + } + return nil +} + +func isDeletableNetworkConnectivitySpoke(r *networkconnectivity.Spoke) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/network_management_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/network_management_operation.go new file mode 100644 index 0000000000..fd92cf4748 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/network_management_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkmanagement + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type NetworkManagementOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *NetworkManagementOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.NetworkManagementBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createNetworkManagementWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*NetworkManagementOperationWaiter, error) { + w := &NetworkManagementOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func NetworkManagementOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createNetworkManagementWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func NetworkManagementOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createNetworkManagementWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/resource_network_management_connectivity_test_resource.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/resource_network_management_connectivity_test_resource.go new file mode 100644 index 0000000000..7002d0f48c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/resource_network_management_connectivity_test_resource.go @@ -0,0 +1,935 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkmanagement + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceNetworkManagementConnectivityTest() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkManagementConnectivityTestCreate, + Read: resourceNetworkManagementConnectivityTestRead, + Update: resourceNetworkManagementConnectivityTestUpdate, + Delete: resourceNetworkManagementConnectivityTestDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkManagementConnectivityTestImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "destination": { + Type: schema.TypeList, + Required: true, + Description: `Required. Destination specification of the Connectivity Test. + +You can use a combination of destination IP address, Compute +Engine VM instance, or VPC network to uniquely identify the +destination location. + +Even if the destination IP address is not unique, the source IP +location is unique. Usually, the analysis can infer the destination +endpoint from route information. + +If the destination you specify is a VM instance and the instance has +multiple network interfaces, then you must also specify either a +destination IP address or VPC network to identify the destination +interface. + +A reachability analysis proceeds even if the destination location +is ambiguous. However, the result can include endpoints that you +don't intend to test.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Optional: true, + Description: `A Compute Engine instance URI.`, + }, + "ip_address": { + Type: schema.TypeString, + Optional: true, + Description: `The IP address of the endpoint, which can be an external or +internal IP. An IPv6 address is only allowed when the test's +destination is a global load balancer VIP.`, + }, + "network": { + Type: schema.TypeString, + Optional: true, + Description: `A Compute Engine network URI.`, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Description: `The IP protocol port of the endpoint. Only applicable when +protocol is TCP or UDP.`, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Description: `Project ID where the endpoint is located. The Project ID can be +derived from the URI if you provide a VM instance or network URI. +The following are two cases where you must provide the project ID: +1. Only the IP address is specified, and the IP address is within +a GCP project. 2. When you are using Shared VPC and the IP address +that you provide is from the service project. In this case, the +network that the IP address resides in is defined in the host +project.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Unique name for the connectivity test.`, + }, + "source": { + Type: schema.TypeList, + Required: true, + Description: `Required. Source specification of the Connectivity Test. + +You can use a combination of source IP address, virtual machine +(VM) instance, or Compute Engine network to uniquely identify the +source location. + +Examples: If the source IP address is an internal IP address within +a Google Cloud Virtual Private Cloud (VPC) network, then you must +also specify the VPC network. Otherwise, specify the VM instance, +which already contains its internal IP address and VPC network +information. + +If the source of the test is within an on-premises network, then +you must provide the destination VPC network. + +If the source endpoint is a Compute Engine VM instance with multiple +network interfaces, the instance itself is not sufficient to +identify the endpoint. So, you must also specify the source IP +address or VPC network. + +A reachability analysis proceeds even if the source location is +ambiguous. However, the test result may include endpoints that +you don't intend to test.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Optional: true, + Description: `A Compute Engine instance URI.`, + }, + "ip_address": { + Type: schema.TypeString, + Optional: true, + Description: `The IP address of the endpoint, which can be an external or +internal IP. An IPv6 address is only allowed when the test's +destination is a global load balancer VIP.`, + }, + "network": { + Type: schema.TypeString, + Optional: true, + Description: `A Compute Engine network URI.`, + }, + "network_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"GCP_NETWORK", "NON_GCP_NETWORK", ""}), + Description: `Type of the network where the endpoint is located. Possible values: ["GCP_NETWORK", "NON_GCP_NETWORK"]`, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Description: `The IP protocol port of the endpoint. Only applicable when +protocol is TCP or UDP.`, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Description: `Project ID where the endpoint is located. The Project ID can be +derived from the URI if you provide a VM instance or network URI. +The following are two cases where you must provide the project ID: + +1. Only the IP address is specified, and the IP address is + within a GCP project. +2. When you are using Shared VPC and the IP address + that you provide is from the service project. In this case, + the network that the IP address resides in is defined in the + host project.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The user-supplied description of the Connectivity Test. +Maximum of 512 characters.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels to represent user-provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "protocol": { + Type: schema.TypeString, + Optional: true, + Description: `IP Protocol of the test. When not provided, "TCP" is assumed.`, + Default: "TCP", + }, + "related_projects": { + Type: schema.TypeList, + Optional: true, + Description: `Other projects that may be relevant for reachability analysis. +This is applicable to scenarios where a test can cross project +boundaries.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkManagementConnectivityTestCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNetworkManagementConnectivityTestName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandNetworkManagementConnectivityTestDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sourceProp, err := expandNetworkManagementConnectivityTestSource(d.Get("source"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceProp)) && (ok || !reflect.DeepEqual(v, sourceProp)) { + obj["source"] = sourceProp + } + destinationProp, err := expandNetworkManagementConnectivityTestDestination(d.Get("destination"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination"); !tpgresource.IsEmptyValue(reflect.ValueOf(destinationProp)) && (ok || !reflect.DeepEqual(v, destinationProp)) { + obj["destination"] = destinationProp + } + protocolProp, err := expandNetworkManagementConnectivityTestProtocol(d.Get("protocol"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + obj["protocol"] = protocolProp + } + relatedProjectsProp, err := expandNetworkManagementConnectivityTestRelatedProjects(d.Get("related_projects"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("related_projects"); !tpgresource.IsEmptyValue(reflect.ValueOf(relatedProjectsProp)) && (ok || !reflect.DeepEqual(v, relatedProjectsProp)) { + obj["relatedProjects"] = relatedProjectsProp + } + labelsProp, err := expandNetworkManagementConnectivityTestLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests?testId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ConnectivityTest: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ConnectivityTest: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = NetworkManagementOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating ConnectivityTest", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create ConnectivityTest: %s", err) + } + + if err := d.Set("name", flattenNetworkManagementConnectivityTestName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ConnectivityTest %q: %#v", d.Id(), res) + + return resourceNetworkManagementConnectivityTestRead(d, meta) +} + +func resourceNetworkManagementConnectivityTestRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkManagementConnectivityTest %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ConnectivityTest: %s", err) + } + + if err := d.Set("name", flattenNetworkManagementConnectivityTestName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectivityTest: %s", err) + } + if err := d.Set("description", flattenNetworkManagementConnectivityTestDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectivityTest: %s", err) + } + if err := d.Set("source", flattenNetworkManagementConnectivityTestSource(res["source"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectivityTest: %s", err) + } + if err := d.Set("destination", flattenNetworkManagementConnectivityTestDestination(res["destination"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectivityTest: %s", err) + } + if err := d.Set("protocol", flattenNetworkManagementConnectivityTestProtocol(res["protocol"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectivityTest: %s", err) + } + if err := d.Set("related_projects", flattenNetworkManagementConnectivityTestRelatedProjects(res["relatedProjects"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectivityTest: %s", err) + } + if err := d.Set("labels", flattenNetworkManagementConnectivityTestLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ConnectivityTest: %s", err) + } + + return nil +} + +func resourceNetworkManagementConnectivityTestUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkManagementConnectivityTestDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sourceProp, err := expandNetworkManagementConnectivityTestSource(d.Get("source"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceProp)) { + obj["source"] = sourceProp + } + destinationProp, err := expandNetworkManagementConnectivityTestDestination(d.Get("destination"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationProp)) { + obj["destination"] = destinationProp + } + protocolProp, err := expandNetworkManagementConnectivityTestProtocol(d.Get("protocol"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + obj["protocol"] = protocolProp + } + relatedProjectsProp, err := expandNetworkManagementConnectivityTestRelatedProjects(d.Get("related_projects"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("related_projects"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, relatedProjectsProp)) { + obj["relatedProjects"] = relatedProjectsProp + } + labelsProp, err := expandNetworkManagementConnectivityTestLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ConnectivityTest %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("source") { + updateMask = append(updateMask, "source.ipAddress", + "source.port", + "source.instance", + "source.network", + "source.networkType", + "source.projectId") + } + + if d.HasChange("destination") { + updateMask = append(updateMask, "destination.ipAddress", + "destination.port", + "destination.instance", + "destination.network", + "destination.projectId") + } + + if d.HasChange("protocol") { + updateMask = append(updateMask, "protocol") + } + + if d.HasChange("related_projects") { + updateMask = append(updateMask, "relatedProjects") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ConnectivityTest %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ConnectivityTest %q: %#v", d.Id(), res) + } + + err = NetworkManagementOperationWaitTime( + config, res, project, "Updating ConnectivityTest", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNetworkManagementConnectivityTestRead(d, meta) +} + +func resourceNetworkManagementConnectivityTestDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ConnectivityTest %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ConnectivityTest") + } + + err = NetworkManagementOperationWaitTime( + config, res, project, "Deleting ConnectivityTest", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ConnectivityTest %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkManagementConnectivityTestImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/connectivityTests/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkManagementConnectivityTestName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenNetworkManagementConnectivityTestDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ip_address"] = + flattenNetworkManagementConnectivityTestSourceIpAddress(original["ipAddress"], d, config) + transformed["port"] = + flattenNetworkManagementConnectivityTestSourcePort(original["port"], d, config) + transformed["instance"] = + flattenNetworkManagementConnectivityTestSourceInstance(original["instance"], d, config) + transformed["network"] = + flattenNetworkManagementConnectivityTestSourceNetwork(original["network"], d, config) + transformed["network_type"] = + flattenNetworkManagementConnectivityTestSourceNetworkType(original["networkType"], d, config) + transformed["project_id"] = + flattenNetworkManagementConnectivityTestSourceProjectId(original["projectId"], d, config) + return []interface{}{transformed} +} +func flattenNetworkManagementConnectivityTestSourceIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestSourcePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNetworkManagementConnectivityTestSourceInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestSourceNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestSourceNetworkType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestSourceProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestDestination(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["ip_address"] = + flattenNetworkManagementConnectivityTestDestinationIpAddress(original["ipAddress"], d, config) + transformed["port"] = + flattenNetworkManagementConnectivityTestDestinationPort(original["port"], d, config) + transformed["instance"] = + flattenNetworkManagementConnectivityTestDestinationInstance(original["instance"], d, config) + transformed["network"] = + flattenNetworkManagementConnectivityTestDestinationNetwork(original["network"], d, config) + transformed["project_id"] = + flattenNetworkManagementConnectivityTestDestinationProjectId(original["projectId"], d, config) + return []interface{}{transformed} +} +func flattenNetworkManagementConnectivityTestDestinationIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestDestinationPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNetworkManagementConnectivityTestDestinationInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestDestinationNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestDestinationProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestRelatedProjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkManagementConnectivityTestLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkManagementConnectivityTestName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + // projects/X/tests/Y - note not "connectivityTests" + f, err := tpgresource.ParseGlobalFieldValue("tests", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) + } + return f.RelativeLink(), nil +} + +func expandNetworkManagementConnectivityTestDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIpAddress, err := expandNetworkManagementConnectivityTestSourceIpAddress(original["ip_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipAddress"] = transformedIpAddress + } + + transformedPort, err := expandNetworkManagementConnectivityTestSourcePort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedInstance, err := expandNetworkManagementConnectivityTestSourceInstance(original["instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instance"] = transformedInstance + } + + transformedNetwork, err := expandNetworkManagementConnectivityTestSourceNetwork(original["network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["network"] = transformedNetwork + } + + transformedNetworkType, err := expandNetworkManagementConnectivityTestSourceNetworkType(original["network_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkType"] = transformedNetworkType + } + + transformedProjectId, err := expandNetworkManagementConnectivityTestSourceProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + return transformed, nil +} + +func expandNetworkManagementConnectivityTestSourceIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestSourcePort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestSourceInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestSourceNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestSourceNetworkType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestSourceProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestDestination(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIpAddress, err := expandNetworkManagementConnectivityTestDestinationIpAddress(original["ip_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipAddress"] = transformedIpAddress + } + + transformedPort, err := expandNetworkManagementConnectivityTestDestinationPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedInstance, err := expandNetworkManagementConnectivityTestDestinationInstance(original["instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstance); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instance"] = transformedInstance + } + + transformedNetwork, err := expandNetworkManagementConnectivityTestDestinationNetwork(original["network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["network"] = transformedNetwork + } + + transformedProjectId, err := expandNetworkManagementConnectivityTestDestinationProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + return transformed, nil +} + +func expandNetworkManagementConnectivityTestDestinationIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestDestinationPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestDestinationInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestDestinationNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestDestinationProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestRelatedProjects(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkManagementConnectivityTestLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/resource_network_management_connectivity_test_resource_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/resource_network_management_connectivity_test_resource_sweeper.go new file mode 100644 index 0000000000..a2c6c037bd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkmanagement/resource_network_management_connectivity_test_resource_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkmanagement + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkManagementConnectivityTest", testSweepNetworkManagementConnectivityTest) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkManagementConnectivityTest(region string) error { + resourceName := "NetworkManagementConnectivityTest" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networkmanagement.googleapis.com/v1/projects/{{project}}/locations/global/connectivityTests", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["connectivityTests"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networkmanagement.googleapis.com/v1/projects/{{project}}/locations/global/connectivityTests/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/network_security_address_group_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/network_security_address_group_operation.go new file mode 100644 index 0000000000..37d3ad28a7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/network_security_address_group_operation.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package networksecurity + +import ( + "time" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// NetworkSecurityAddressGroupOperationWaitTime is specific for address group resource because the only difference is that it does not need project param. +func NetworkSecurityAddressGroupOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + // project is not necessary for this operation. + return NetworkSecurityOperationWaitTime(config, op, "", activity, userAgent, timeout) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/network_security_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/network_security_operation.go new file mode 100644 index 0000000000..f5eb44ac50 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/network_security_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type NetworkSecurityOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *NetworkSecurityOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.NetworkSecurityBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createNetworkSecurityWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*NetworkSecurityOperationWaiter, error) { + w := &NetworkSecurityOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func NetworkSecurityOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createNetworkSecurityWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_address_group.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_address_group.go new file mode 100644 index 0000000000..b9fc69af88 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_address_group.go @@ -0,0 +1,498 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceNetworkSecurityAddressGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkSecurityAddressGroupCreate, + Read: resourceNetworkSecurityAddressGroupRead, + Update: resourceNetworkSecurityAddressGroupUpdate, + Delete: resourceNetworkSecurityAddressGroupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkSecurityAddressGroupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "capacity": { + Type: schema.TypeInt, + Required: true, + Description: `Capacity of the Address Group.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: `The location of the gateway security policy. +The default value is 'global'.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the AddressGroup resource.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4", "IPV6"}), + Description: `The type of the Address Group. Possible values are "IPV4" or "IPV6". Possible values: ["IPV4", "IPV6"]`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Free-text description of the resource.`, + }, + "items": { + Type: schema.TypeList, + Optional: true, + Description: `List of items.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the AddressGroup resource. +An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the parent this address group belongs to. Format: organizations/{organization_id} or projects/{project_id}.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp when the resource was created. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z"`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp when the resource was updated. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkSecurityAddressGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityAddressGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandNetworkSecurityAddressGroupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + typeProp, err := expandNetworkSecurityAddressGroupType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + itemsProp, err := expandNetworkSecurityAddressGroupItems(d.Get("items"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("items"); !tpgresource.IsEmptyValue(reflect.ValueOf(itemsProp)) && (ok || !reflect.DeepEqual(v, itemsProp)) { + obj["items"] = itemsProp + } + capacityProp, err := expandNetworkSecurityAddressGroupCapacity(d.Get("capacity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("capacity"); !tpgresource.IsEmptyValue(reflect.ValueOf(capacityProp)) && (ok || !reflect.DeepEqual(v, capacityProp)) { + obj["capacity"] = capacityProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}{{parent}}/locations/{{location}}/addressGroups?addressGroupId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AddressGroup: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AddressGroup: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/addressGroups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkSecurityAddressGroupOperationWaitTime( + config, res, "Creating AddressGroup", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create AddressGroup: %s", err) + } + + log.Printf("[DEBUG] Finished creating AddressGroup %q: %#v", d.Id(), res) + + return resourceNetworkSecurityAddressGroupRead(d, meta) +} + +func resourceNetworkSecurityAddressGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}{{parent}}/locations/{{location}}/addressGroups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkSecurityAddressGroup %q", d.Id())) + } + + if err := d.Set("description", flattenNetworkSecurityAddressGroupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading AddressGroup: %s", err) + } + if err := d.Set("create_time", flattenNetworkSecurityAddressGroupCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading AddressGroup: %s", err) + } + if err := d.Set("update_time", flattenNetworkSecurityAddressGroupUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading AddressGroup: %s", err) + } + if err := d.Set("labels", flattenNetworkSecurityAddressGroupLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading AddressGroup: %s", err) + } + if err := d.Set("type", flattenNetworkSecurityAddressGroupType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading AddressGroup: %s", err) + } + if err := d.Set("items", flattenNetworkSecurityAddressGroupItems(res["items"], d, config)); err != nil { + return fmt.Errorf("Error reading AddressGroup: %s", err) + } + if err := d.Set("capacity", flattenNetworkSecurityAddressGroupCapacity(res["capacity"], d, config)); err != nil { + return fmt.Errorf("Error reading AddressGroup: %s", err) + } + + return nil +} + +func resourceNetworkSecurityAddressGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityAddressGroupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandNetworkSecurityAddressGroupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + typeProp, err := expandNetworkSecurityAddressGroupType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + itemsProp, err := expandNetworkSecurityAddressGroupItems(d.Get("items"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("items"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, itemsProp)) { + obj["items"] = itemsProp + } + capacityProp, err := expandNetworkSecurityAddressGroupCapacity(d.Get("capacity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("capacity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, capacityProp)) { + obj["capacity"] = capacityProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}{{parent}}/locations/{{location}}/addressGroups/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AddressGroup %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("type") { + updateMask = append(updateMask, "type") + } + + if d.HasChange("items") { + updateMask = append(updateMask, "items") + } + + if d.HasChange("capacity") { + updateMask = append(updateMask, "capacity") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AddressGroup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AddressGroup %q: %#v", d.Id(), res) + } + + err = NetworkSecurityAddressGroupOperationWaitTime( + config, res, "Updating AddressGroup", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + return resourceNetworkSecurityAddressGroupRead(d, meta) +} + +func resourceNetworkSecurityAddressGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}{{parent}}/locations/{{location}}/addressGroups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AddressGroup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AddressGroup") + } + + err = NetworkSecurityAddressGroupOperationWaitTime( + config, res, "Deleting AddressGroup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting AddressGroup %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkSecurityAddressGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/locations/(?P[^/]+)/addressGroups/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/locations/{{location}}/addressGroups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkSecurityAddressGroupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityAddressGroupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityAddressGroupUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityAddressGroupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityAddressGroupType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityAddressGroupItems(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityAddressGroupCapacity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandNetworkSecurityAddressGroupDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityAddressGroupLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandNetworkSecurityAddressGroupType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityAddressGroupItems(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityAddressGroupCapacity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy.go new file mode 100644 index 0000000000..6e4166f0ed --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy.go @@ -0,0 +1,388 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceNetworkSecurityGatewaySecurityPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkSecurityGatewaySecurityPolicyCreate, + Read: resourceNetworkSecurityGatewaySecurityPolicyRead, + Update: resourceNetworkSecurityGatewaySecurityPolicyUpdate, + Delete: resourceNetworkSecurityGatewaySecurityPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkSecurityGatewaySecurityPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the resource. Name is of the form projects/{project}/locations/{location}/gatewaySecurityPolicies/{gatewaySecurityPolicy} +gatewaySecurityPolicy should match the pattern:(^a-z?$).`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A free-text description of the resource. Max length 1024 characters.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `The location of the gateway security policy. +The default value is 'global'.`, + Default: "global", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp when the resource was created. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z"`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL of this resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp when the resource was updated. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkSecurityGatewaySecurityPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityGatewaySecurityPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies?gatewaySecurityPolicyId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GatewaySecurityPolicy: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GatewaySecurityPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GatewaySecurityPolicy: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Creating GatewaySecurityPolicy", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create GatewaySecurityPolicy: %s", err) + } + + log.Printf("[DEBUG] Finished creating GatewaySecurityPolicy %q: %#v", d.Id(), res) + + return resourceNetworkSecurityGatewaySecurityPolicyRead(d, meta) +} + +func resourceNetworkSecurityGatewaySecurityPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GatewaySecurityPolicy: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkSecurityGatewaySecurityPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicy: %s", err) + } + + if err := d.Set("self_link", flattenNetworkSecurityGatewaySecurityPolicySelfLink(res["selfLink"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicy: %s", err) + } + if err := d.Set("create_time", flattenNetworkSecurityGatewaySecurityPolicyCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicy: %s", err) + } + if err := d.Set("update_time", flattenNetworkSecurityGatewaySecurityPolicyUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicy: %s", err) + } + if err := d.Set("description", flattenNetworkSecurityGatewaySecurityPolicyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicy: %s", err) + } + + return nil +} + +func resourceNetworkSecurityGatewaySecurityPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GatewaySecurityPolicy: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityGatewaySecurityPolicyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating GatewaySecurityPolicy %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating GatewaySecurityPolicy %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GatewaySecurityPolicy %q: %#v", d.Id(), res) + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Updating GatewaySecurityPolicy", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNetworkSecurityGatewaySecurityPolicyRead(d, meta) +} + +func resourceNetworkSecurityGatewaySecurityPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GatewaySecurityPolicy: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GatewaySecurityPolicy %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GatewaySecurityPolicy") + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Deleting GatewaySecurityPolicy", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GatewaySecurityPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkSecurityGatewaySecurityPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/gatewaySecurityPolicies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkSecurityGatewaySecurityPolicySelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkSecurityGatewaySecurityPolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_rule.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_rule.go new file mode 100644 index 0000000000..ebc9e024d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_rule.go @@ -0,0 +1,602 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceNetworkSecurityGatewaySecurityPolicyRule() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkSecurityGatewaySecurityPolicyRuleCreate, + Read: resourceNetworkSecurityGatewaySecurityPolicyRuleRead, + Update: resourceNetworkSecurityGatewaySecurityPolicyRuleUpdate, + Delete: resourceNetworkSecurityGatewaySecurityPolicyRuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkSecurityGatewaySecurityPolicyRuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "basic_profile": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"BASIC_PROFILE_UNSPECIFIED", "ALLOW", "DENY"}), + Description: `Profile which tells what the primitive action should be. Possible values are: * ALLOW * DENY. Possible values: ["BASIC_PROFILE_UNSPECIFIED", "ALLOW", "DENY"]`, + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the rule is enforced.`, + }, + "gateway_security_policy": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the gatewat security policy this rule belongs to.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location of the gateway security policy.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. ame is the full resource name so projects/{project}/locations/{location}/gatewaySecurityPolicies/{gateway_security_policy}/rules/{rule} +rule should match the pattern: (^a-z?$).`, + }, + "priority": { + Type: schema.TypeInt, + Required: true, + Description: `Priority of the rule. Lower number corresponds to higher precedence.`, + }, + "session_matcher": { + Type: schema.TypeString, + Required: true, + Description: `CEL expression for matching on session criteria.`, + }, + "application_matcher": { + Type: schema.TypeString, + Optional: true, + Description: `CEL expression for matching on L7/application level criteria.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Free-text description of the resource.`, + }, + "tls_inspection_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Flag to enable TLS inspection of traffic matching on. Can only be true if the +parent GatewaySecurityPolicy references a TLSInspectionConfig.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp when the resource was created. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z"`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL of this resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp when the resource was updated. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkSecurityGatewaySecurityPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + enabledProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + priorityProp, err := expandNetworkSecurityGatewaySecurityPolicyRulePriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(priorityProp)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + descriptionProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sessionMatcherProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleSessionMatcher(d.Get("session_matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("session_matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(sessionMatcherProp)) && (ok || !reflect.DeepEqual(v, sessionMatcherProp)) { + obj["sessionMatcher"] = sessionMatcherProp + } + applicationMatcherProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleApplicationMatcher(d.Get("application_matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("application_matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(applicationMatcherProp)) && (ok || !reflect.DeepEqual(v, applicationMatcherProp)) { + obj["applicationMatcher"] = applicationMatcherProp + } + tlsInspectionEnabledProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleTlsInspectionEnabled(d.Get("tls_inspection_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_inspection_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(tlsInspectionEnabledProp)) && (ok || !reflect.DeepEqual(v, tlsInspectionEnabledProp)) { + obj["tlsInspectionEnabled"] = tlsInspectionEnabledProp + } + basicProfileProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleBasicProfile(d.Get("basic_profile"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("basic_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(basicProfileProp)) && (ok || !reflect.DeepEqual(v, basicProfileProp)) { + obj["basicProfile"] = basicProfileProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{gateway_security_policy}}/rules?gatewaySecurityPolicyRuleId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new GatewaySecurityPolicyRule: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GatewaySecurityPolicyRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating GatewaySecurityPolicyRule: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{gateway_security_policy}}/rules/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Creating GatewaySecurityPolicyRule", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create GatewaySecurityPolicyRule: %s", err) + } + + log.Printf("[DEBUG] Finished creating GatewaySecurityPolicyRule %q: %#v", d.Id(), res) + + return resourceNetworkSecurityGatewaySecurityPolicyRuleRead(d, meta) +} + +func resourceNetworkSecurityGatewaySecurityPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{gateway_security_policy}}/rules/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GatewaySecurityPolicyRule: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkSecurityGatewaySecurityPolicyRule %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + + if err := d.Set("self_link", flattenNetworkSecurityGatewaySecurityPolicyRuleSelfLink(res["selfLink"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("create_time", flattenNetworkSecurityGatewaySecurityPolicyRuleCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("update_time", flattenNetworkSecurityGatewaySecurityPolicyRuleUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("enabled", flattenNetworkSecurityGatewaySecurityPolicyRuleEnabled(res["enabled"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("priority", flattenNetworkSecurityGatewaySecurityPolicyRulePriority(res["priority"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("description", flattenNetworkSecurityGatewaySecurityPolicyRuleDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("session_matcher", flattenNetworkSecurityGatewaySecurityPolicyRuleSessionMatcher(res["sessionMatcher"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("application_matcher", flattenNetworkSecurityGatewaySecurityPolicyRuleApplicationMatcher(res["applicationMatcher"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("tls_inspection_enabled", flattenNetworkSecurityGatewaySecurityPolicyRuleTlsInspectionEnabled(res["tlsInspectionEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + if err := d.Set("basic_profile", flattenNetworkSecurityGatewaySecurityPolicyRuleBasicProfile(res["basicProfile"], d, config)); err != nil { + return fmt.Errorf("Error reading GatewaySecurityPolicyRule: %s", err) + } + + return nil +} + +func resourceNetworkSecurityGatewaySecurityPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GatewaySecurityPolicyRule: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + enabledProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) { + obj["enabled"] = enabledProp + } + priorityProp, err := expandNetworkSecurityGatewaySecurityPolicyRulePriority(d.Get("priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("priority"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, priorityProp)) { + obj["priority"] = priorityProp + } + descriptionProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + sessionMatcherProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleSessionMatcher(d.Get("session_matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("session_matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sessionMatcherProp)) { + obj["sessionMatcher"] = sessionMatcherProp + } + applicationMatcherProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleApplicationMatcher(d.Get("application_matcher"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("application_matcher"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, applicationMatcherProp)) { + obj["applicationMatcher"] = applicationMatcherProp + } + tlsInspectionEnabledProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleTlsInspectionEnabled(d.Get("tls_inspection_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_inspection_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tlsInspectionEnabledProp)) { + obj["tlsInspectionEnabled"] = tlsInspectionEnabledProp + } + basicProfileProp, err := expandNetworkSecurityGatewaySecurityPolicyRuleBasicProfile(d.Get("basic_profile"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("basic_profile"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, basicProfileProp)) { + obj["basicProfile"] = basicProfileProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{gateway_security_policy}}/rules/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating GatewaySecurityPolicyRule %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("enabled") { + updateMask = append(updateMask, "enabled") + } + + if d.HasChange("priority") { + updateMask = append(updateMask, "priority") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("session_matcher") { + updateMask = append(updateMask, "sessionMatcher") + } + + if d.HasChange("application_matcher") { + updateMask = append(updateMask, "applicationMatcher") + } + + if d.HasChange("tls_inspection_enabled") { + updateMask = append(updateMask, "tlsInspectionEnabled") + } + + if d.HasChange("basic_profile") { + updateMask = append(updateMask, "basicProfile") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating GatewaySecurityPolicyRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating GatewaySecurityPolicyRule %q: %#v", d.Id(), res) + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Updating GatewaySecurityPolicyRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNetworkSecurityGatewaySecurityPolicyRuleRead(d, meta) +} + +func resourceNetworkSecurityGatewaySecurityPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for GatewaySecurityPolicyRule: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{gateway_security_policy}}/rules/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting GatewaySecurityPolicyRule %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "GatewaySecurityPolicyRule") + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Deleting GatewaySecurityPolicyRule", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting GatewaySecurityPolicyRule %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkSecurityGatewaySecurityPolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/gatewaySecurityPolicies/(?P[^/]+)/rules/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{gateway_security_policy}}/rules/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleSelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyRulePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleSessionMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleApplicationMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleTlsInspectionEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityGatewaySecurityPolicyRuleBasicProfile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkSecurityGatewaySecurityPolicyRuleEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityGatewaySecurityPolicyRulePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityGatewaySecurityPolicyRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityGatewaySecurityPolicyRuleSessionMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityGatewaySecurityPolicyRuleApplicationMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityGatewaySecurityPolicyRuleTlsInspectionEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityGatewaySecurityPolicyRuleBasicProfile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_rule_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_rule_sweeper.go new file mode 100644 index 0000000000..893a2844d6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_rule_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkSecurityGatewaySecurityPolicyRule", testSweepNetworkSecurityGatewaySecurityPolicyRule) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkSecurityGatewaySecurityPolicyRule(region string) error { + resourceName := "NetworkSecurityGatewaySecurityPolicyRule" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{gateway_security_policy}}/rules", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["gatewaySecurityPolicyRules"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{gateway_security_policy}}/rules/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_sweeper.go new file mode 100644 index 0000000000..be44ad4c0a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_gateway_security_policy_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkSecurityGatewaySecurityPolicy", testSweepNetworkSecurityGatewaySecurityPolicy) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkSecurityGatewaySecurityPolicy(region string) error { + resourceName := "NetworkSecurityGatewaySecurityPolicy" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["gatewaySecurityPolicies"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/gatewaySecurityPolicies/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_url_lists.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_url_lists.go new file mode 100644 index 0000000000..7b6e998884 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_url_lists.go @@ -0,0 +1,409 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceNetworkSecurityUrlLists() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkSecurityUrlListsCreate, + Read: resourceNetworkSecurityUrlListsRead, + Update: resourceNetworkSecurityUrlListsUpdate, + Delete: resourceNetworkSecurityUrlListsDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkSecurityUrlListsImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + Description: `The location of the url lists.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `Short name of the UrlList resource to be created. +This value should be 1-63 characters long, containing only letters, numbers, hyphens, and underscores, and should not start with a number. E.g. 'urlList'.`, + }, + "values": { + Type: schema.TypeList, + Required: true, + Description: `FQDNs and URLs.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Free-text description of the resource.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Time when the security policy was created. +A timestamp in RFC3339 UTC 'Zulu' format, with nanosecond resolution and up to nine fractional digits. +Examples: '2014-10-02T15:01:23Z' and '2014-10-02T15:01:23.045123456Z'`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Time when the security policy was updated. +A timestamp in RFC3339 UTC 'Zulu' format, with nanosecond resolution and up to nine fractional digits. +Examples: '2014-10-02T15:01:23Z' and '2014-10-02T15:01:23.045123456Z'.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkSecurityUrlListsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityUrlListsDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + valuesProp, err := expandNetworkSecurityUrlListsValues(d.Get("values"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("values"); !tpgresource.IsEmptyValue(reflect.ValueOf(valuesProp)) && (ok || !reflect.DeepEqual(v, valuesProp)) { + obj["values"] = valuesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/urlLists?urlListId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new UrlLists: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for UrlLists: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating UrlLists: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/urlLists/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Creating UrlLists", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create UrlLists: %s", err) + } + + log.Printf("[DEBUG] Finished creating UrlLists %q: %#v", d.Id(), res) + + return resourceNetworkSecurityUrlListsRead(d, meta) +} + +func resourceNetworkSecurityUrlListsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/urlLists/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for UrlLists: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkSecurityUrlLists %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading UrlLists: %s", err) + } + + if err := d.Set("create_time", flattenNetworkSecurityUrlListsCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading UrlLists: %s", err) + } + if err := d.Set("update_time", flattenNetworkSecurityUrlListsUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading UrlLists: %s", err) + } + if err := d.Set("description", flattenNetworkSecurityUrlListsDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading UrlLists: %s", err) + } + if err := d.Set("values", flattenNetworkSecurityUrlListsValues(res["values"], d, config)); err != nil { + return fmt.Errorf("Error reading UrlLists: %s", err) + } + + return nil +} + +func resourceNetworkSecurityUrlListsUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for UrlLists: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkSecurityUrlListsDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + valuesProp, err := expandNetworkSecurityUrlListsValues(d.Get("values"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("values"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, valuesProp)) { + obj["values"] = valuesProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/urlLists/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating UrlLists %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("values") { + updateMask = append(updateMask, "values") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating UrlLists %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating UrlLists %q: %#v", d.Id(), res) + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Updating UrlLists", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNetworkSecurityUrlListsRead(d, meta) +} + +func resourceNetworkSecurityUrlListsDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for UrlLists: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkSecurityBasePath}}projects/{{project}}/locations/{{location}}/urlLists/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting UrlLists %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "UrlLists") + } + + err = NetworkSecurityOperationWaitTime( + config, res, project, "Deleting UrlLists", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting UrlLists %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkSecurityUrlListsImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/urlLists/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/urlLists/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkSecurityUrlListsCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityUrlListsUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityUrlListsDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkSecurityUrlListsValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkSecurityUrlListsDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkSecurityUrlListsValues(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_url_lists_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_url_lists_sweeper.go new file mode 100644 index 0000000000..82dd99cf07 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networksecurity/resource_network_security_url_lists_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networksecurity + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkSecurityUrlLists", testSweepNetworkSecurityUrlLists) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkSecurityUrlLists(region string) error { + resourceName := "NetworkSecurityUrlLists" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/urlLists", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["urlListss"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networksecurity.googleapis.com/v1/projects/{{project}}/locations/{{location}}/urlLists/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/network_services_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/network_services_operation.go new file mode 100644 index 0000000000..4f8925048b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/network_services_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkservices + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type NetworkServicesOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *NetworkServicesOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.NetworkServicesBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createNetworkServicesWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*NetworkServicesOperationWaiter, error) { + w := &NetworkServicesOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func NetworkServicesOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createNetworkServicesWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_keyset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_keyset.go new file mode 100644 index 0000000000..3dfc6ded7f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_keyset.go @@ -0,0 +1,616 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkservices + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceNetworkServicesEdgeCacheKeyset() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkServicesEdgeCacheKeysetCreate, + Read: resourceNetworkServicesEdgeCacheKeysetRead, + Update: resourceNetworkServicesEdgeCacheKeysetUpdate, + Delete: resourceNetworkServicesEdgeCacheKeysetDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkServicesEdgeCacheKeysetImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource; provided by the client when the resource is created. +The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, +and all following characters must be a dash, underscore, letter or digit.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A human-readable description of the resource.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the EdgeCache resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "public_key": { + Type: schema.TypeList, + Optional: true, + Description: `An ordered list of Ed25519 public keys to use for validating signed requests. +You must specify 'public_keys' or 'validation_shared_keys' (or both). The keys in 'public_keys' are checked first. +You may specify no more than one Google-managed public key. +If you specify 'public_keys', you must specify at least one (1) key and may specify up to three (3) keys. + +Ed25519 public keys are not secret, and only allow Google to validate a request was signed by your corresponding private key. +Ensure that the private key is kept secret, and that only authorized users can add public keys to a keyset.`, + MinItems: 1, + MaxItems: 3, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the public key. The ID must be 1-63 characters long, and comply with RFC1035. +The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* +which means the first character must be a letter, and all following characters must be a dash, underscore, letter or digit.`, + }, + "managed": { + Type: schema.TypeBool, + Optional: true, + Description: `Set to true to have the CDN automatically manage this public key value.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The base64-encoded value of the Ed25519 public key. The base64 encoding can be padded (44 bytes) or unpadded (43 bytes). +Representations or encodings of the public key other than this will be rejected with an error.`, + Sensitive: true, + }, + }, + }, + AtLeastOneOf: []string{"public_key", "validation_shared_keys"}, + }, + "validation_shared_keys": { + Type: schema.TypeList, + Optional: true, + Description: `An ordered list of shared keys to use for validating signed requests. +Shared keys are secret. Ensure that only authorized users can add 'validation_shared_keys' to a keyset. +You can rotate keys by appending (pushing) a new key to the list of 'validation_shared_keys' and removing any superseded keys. +You must specify 'public_keys' or 'validation_shared_keys' (or both). The keys in 'public_keys' are checked first.`, + MinItems: 1, + MaxItems: 3, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_version": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret version in Secret Manager. + +The resource name of the secret version must be in the format 'projects/*/secrets/*/versions/*' where the '*' values are replaced by the secrets themselves. +The secrets must be at least 16 bytes large. The recommended secret size depends on the signature algorithm you are using. +* If you are using HMAC-SHA1, we suggest 20-byte secrets. +* If you are using HMAC-SHA256, we suggest 32-byte secrets. +See RFC 2104, Section 3 for more details on these recommendations.`, + }, + }, + }, + AtLeastOneOf: []string{"public_key", "validation_shared_keys"}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkServicesEdgeCacheKeysetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkServicesEdgeCacheKeysetDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandNetworkServicesEdgeCacheKeysetLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + publicKeysProp, err := expandNetworkServicesEdgeCacheKeysetPublicKey(d.Get("public_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("public_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(publicKeysProp)) && (ok || !reflect.DeepEqual(v, publicKeysProp)) { + obj["publicKeys"] = publicKeysProp + } + validationSharedKeysProp, err := expandNetworkServicesEdgeCacheKeysetValidationSharedKeys(d.Get("validation_shared_keys"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("validation_shared_keys"); !tpgresource.IsEmptyValue(reflect.ValueOf(validationSharedKeysProp)) && (ok || !reflect.DeepEqual(v, validationSharedKeysProp)) { + obj["validationSharedKeys"] = validationSharedKeysProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets?edgeCacheKeysetId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new EdgeCacheKeyset: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return fmt.Errorf("Error creating EdgeCacheKeyset: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkServicesOperationWaitTime( + config, res, project, "Creating EdgeCacheKeyset", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create EdgeCacheKeyset: %s", err) + } + + log.Printf("[DEBUG] Finished creating EdgeCacheKeyset %q: %#v", d.Id(), res) + + return resourceNetworkServicesEdgeCacheKeysetRead(d, meta) +} + +func resourceNetworkServicesEdgeCacheKeysetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkServicesEdgeCacheKeyset %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) + } + + if err := d.Set("description", flattenNetworkServicesEdgeCacheKeysetDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) + } + if err := d.Set("labels", flattenNetworkServicesEdgeCacheKeysetLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) + } + if err := d.Set("public_key", flattenNetworkServicesEdgeCacheKeysetPublicKey(res["publicKeys"], d, config)); err != nil { + return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) + } + if err := d.Set("validation_shared_keys", flattenNetworkServicesEdgeCacheKeysetValidationSharedKeys(res["validationSharedKeys"], d, config)); err != nil { + return fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) + } + + return nil +} + +func resourceNetworkServicesEdgeCacheKeysetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandNetworkServicesEdgeCacheKeysetDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandNetworkServicesEdgeCacheKeysetLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + publicKeysProp, err := expandNetworkServicesEdgeCacheKeysetPublicKey(d.Get("public_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("public_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, publicKeysProp)) { + obj["publicKeys"] = publicKeysProp + } + validationSharedKeysProp, err := expandNetworkServicesEdgeCacheKeysetValidationSharedKeys(d.Get("validation_shared_keys"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("validation_shared_keys"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, validationSharedKeysProp)) { + obj["validationSharedKeys"] = validationSharedKeysProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating EdgeCacheKeyset %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("public_key") { + updateMask = append(updateMask, "publicKeys") + } + + if d.HasChange("validation_shared_keys") { + updateMask = append(updateMask, "validationSharedKeys") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + + if err != nil { + return fmt.Errorf("Error updating EdgeCacheKeyset %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating EdgeCacheKeyset %q: %#v", d.Id(), res) + } + + err = NetworkServicesOperationWaitTime( + config, res, project, "Updating EdgeCacheKeyset", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNetworkServicesEdgeCacheKeysetRead(d, meta) +} + +func resourceNetworkServicesEdgeCacheKeysetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting EdgeCacheKeyset %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "EdgeCacheKeyset") + } + + err = NetworkServicesOperationWaitTime( + config, res, project, "Deleting EdgeCacheKeyset", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting EdgeCacheKeyset %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkServicesEdgeCacheKeysetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/edgeCacheKeysets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkServicesEdgeCacheKeysetDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesEdgeCacheKeysetLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesEdgeCacheKeysetPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenNetworkServicesEdgeCacheKeysetPublicKeyId(original["id"], d, config), + "value": flattenNetworkServicesEdgeCacheKeysetPublicKeyValue(original["value"], d, config), + "managed": flattenNetworkServicesEdgeCacheKeysetPublicKeyManaged(original["managed"], d, config), + }) + } + return transformed +} +func flattenNetworkServicesEdgeCacheKeysetPublicKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesEdgeCacheKeysetPublicKeyValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesEdgeCacheKeysetPublicKeyManaged(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesEdgeCacheKeysetValidationSharedKeys(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "secret_version": flattenNetworkServicesEdgeCacheKeysetValidationSharedKeysSecretVersion(original["secretVersion"], d, config), + }) + } + return transformed +} +func flattenNetworkServicesEdgeCacheKeysetValidationSharedKeysSecretVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkServicesEdgeCacheKeysetDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesEdgeCacheKeysetLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandNetworkServicesEdgeCacheKeysetPublicKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandNetworkServicesEdgeCacheKeysetPublicKeyId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedValue, err := expandNetworkServicesEdgeCacheKeysetPublicKeyValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedManaged, err := expandNetworkServicesEdgeCacheKeysetPublicKeyManaged(original["managed"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedManaged); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["managed"] = transformedManaged + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNetworkServicesEdgeCacheKeysetPublicKeyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesEdgeCacheKeysetPublicKeyValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesEdgeCacheKeysetPublicKeyManaged(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesEdgeCacheKeysetValidationSharedKeys(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecretVersion, err := expandNetworkServicesEdgeCacheKeysetValidationSharedKeysSecretVersion(original["secret_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["secretVersion"] = transformedSecretVersion + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNetworkServicesEdgeCacheKeysetValidationSharedKeysSecretVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_keyset_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_keyset_sweeper.go new file mode 100644 index 0000000000..40e1486dc6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_keyset_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkservices + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkServicesEdgeCacheKeyset", testSweepNetworkServicesEdgeCacheKeyset) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkServicesEdgeCacheKeyset(region string) error { + resourceName := "NetworkServicesEdgeCacheKeyset" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networkservices.googleapis.com/v1/projects/{{project}}/locations/global/edgeCacheKeysets", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["edgeCacheKeysets"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networkservices.googleapis.com/v1/projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_origin.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_origin.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_origin.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_origin.go index 983f61bc53..8dc93f4f3b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_origin.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_origin.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package networkservices import ( "fmt" @@ -23,6 +26,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceNetworkServicesEdgeCacheOrigin() *schema.Resource { @@ -96,7 +103,7 @@ This is the resource name of the secret version in the format 'projects/*/secret "failover_origin": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareResourceNames, + DiffSuppressFunc: tpgresource.CompareResourceNames, Description: `The Origin resource to try when the current origin cannot be reached. After maxAttempts is reached, the configured failoverOrigin will be used to fulfil the request. @@ -236,7 +243,7 @@ Defaults to port 443 for HTTP2 and HTTPS protocols, and port 80 for HTTP.`, Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"HTTP2", "HTTPS", "HTTP", ""}), + ValidateFunc: verify.ValidateEnum([]string{"HTTP2", "HTTPS", "HTTP", ""}), Description: `The protocol to use to connect to the configured origin. Defaults to HTTP2, and it is strongly recommended that users use HTTP2 for both security & performance. When using HTTP2 or HTTPS as the protocol, a valid, publicly-signed, unexpired TLS (SSL) certificate must be presented by the origin server. Possible values: ["HTTP2", "HTTPS", "HTTP"]`, @@ -265,7 +272,7 @@ Valid values are: - FORBIDDEN: Retry if the origin returns a HTTP 403 (Forbidden). Possible values: ["CONNECT_FAILURE", "HTTP_5XX", "GATEWAY_ERROR", "RETRIABLE_4XX", "NOT_FOUND", "FORBIDDEN"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"CONNECT_FAILURE", "HTTP_5XX", "GATEWAY_ERROR", "RETRIABLE_4XX", "NOT_FOUND", "FORBIDDEN"}), + ValidateFunc: verify.ValidateEnum([]string{"CONNECT_FAILURE", "HTTP_5XX", "GATEWAY_ERROR", "RETRIABLE_4XX", "NOT_FOUND", "FORBIDDEN"}), }, }, "timeout": { @@ -336,8 +343,8 @@ If the response headers have already been written to the connection, the respons } func resourceNetworkServicesEdgeCacheOriginCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -346,77 +353,77 @@ func resourceNetworkServicesEdgeCacheOriginCreate(d *schema.ResourceData, meta i descriptionProp, err := expandNetworkServicesEdgeCacheOriginDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandNetworkServicesEdgeCacheOriginLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } originAddressProp, err := expandNetworkServicesEdgeCacheOriginOriginAddress(d.Get("origin_address"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("origin_address"); !isEmptyValue(reflect.ValueOf(originAddressProp)) && (ok || !reflect.DeepEqual(v, originAddressProp)) { + } else if v, ok := d.GetOkExists("origin_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(originAddressProp)) && (ok || !reflect.DeepEqual(v, originAddressProp)) { obj["originAddress"] = originAddressProp } protocolProp, err := expandNetworkServicesEdgeCacheOriginProtocol(d.Get("protocol"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(protocolProp)) && (ok || !reflect.DeepEqual(v, protocolProp)) { obj["protocol"] = protocolProp } portProp, err := expandNetworkServicesEdgeCacheOriginPort(d.Get("port"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(portProp)) && (ok || !reflect.DeepEqual(v, portProp)) { obj["port"] = portProp } maxAttemptsProp, err := expandNetworkServicesEdgeCacheOriginMaxAttempts(d.Get("max_attempts"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("max_attempts"); !isEmptyValue(reflect.ValueOf(maxAttemptsProp)) && (ok || !reflect.DeepEqual(v, maxAttemptsProp)) { + } else if v, ok := d.GetOkExists("max_attempts"); !tpgresource.IsEmptyValue(reflect.ValueOf(maxAttemptsProp)) && (ok || !reflect.DeepEqual(v, maxAttemptsProp)) { obj["maxAttempts"] = maxAttemptsProp } failoverOriginProp, err := expandNetworkServicesEdgeCacheOriginFailoverOrigin(d.Get("failover_origin"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("failover_origin"); !isEmptyValue(reflect.ValueOf(failoverOriginProp)) && (ok || !reflect.DeepEqual(v, failoverOriginProp)) { + } else if v, ok := d.GetOkExists("failover_origin"); !tpgresource.IsEmptyValue(reflect.ValueOf(failoverOriginProp)) && (ok || !reflect.DeepEqual(v, failoverOriginProp)) { obj["failoverOrigin"] = failoverOriginProp } retryConditionsProp, err := expandNetworkServicesEdgeCacheOriginRetryConditions(d.Get("retry_conditions"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("retry_conditions"); !isEmptyValue(reflect.ValueOf(retryConditionsProp)) && (ok || !reflect.DeepEqual(v, retryConditionsProp)) { + } else if v, ok := d.GetOkExists("retry_conditions"); !tpgresource.IsEmptyValue(reflect.ValueOf(retryConditionsProp)) && (ok || !reflect.DeepEqual(v, retryConditionsProp)) { obj["retryConditions"] = retryConditionsProp } timeoutProp, err := expandNetworkServicesEdgeCacheOriginTimeout(d.Get("timeout"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(reflect.ValueOf(timeoutProp)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { + } else if v, ok := d.GetOkExists("timeout"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeoutProp)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { obj["timeout"] = timeoutProp } awsV4AuthenticationProp, err := expandNetworkServicesEdgeCacheOriginAwsV4Authentication(d.Get("aws_v4_authentication"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("aws_v4_authentication"); !isEmptyValue(reflect.ValueOf(awsV4AuthenticationProp)) && (ok || !reflect.DeepEqual(v, awsV4AuthenticationProp)) { + } else if v, ok := d.GetOkExists("aws_v4_authentication"); !tpgresource.IsEmptyValue(reflect.ValueOf(awsV4AuthenticationProp)) && (ok || !reflect.DeepEqual(v, awsV4AuthenticationProp)) { obj["awsV4Authentication"] = awsV4AuthenticationProp } originOverrideActionProp, err := expandNetworkServicesEdgeCacheOriginOriginOverrideAction(d.Get("origin_override_action"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("origin_override_action"); !isEmptyValue(reflect.ValueOf(originOverrideActionProp)) && (ok || !reflect.DeepEqual(v, originOverrideActionProp)) { + } else if v, ok := d.GetOkExists("origin_override_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(originOverrideActionProp)) && (ok || !reflect.DeepEqual(v, originOverrideActionProp)) { obj["originOverrideAction"] = originOverrideActionProp } originRedirectProp, err := expandNetworkServicesEdgeCacheOriginOriginRedirect(d.Get("origin_redirect"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("origin_redirect"); !isEmptyValue(reflect.ValueOf(originRedirectProp)) && (ok || !reflect.DeepEqual(v, originRedirectProp)) { + } else if v, ok := d.GetOkExists("origin_redirect"); !tpgresource.IsEmptyValue(reflect.ValueOf(originRedirectProp)) && (ok || !reflect.DeepEqual(v, originRedirectProp)) { obj["originRedirect"] = originRedirectProp } - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins?edgeCacheOriginId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins?edgeCacheOriginId={{name}}") if err != nil { return err } @@ -424,24 +431,32 @@ func resourceNetworkServicesEdgeCacheOriginCreate(d *schema.ResourceData, meta i log.Printf("[DEBUG] Creating new EdgeCacheOrigin: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for EdgeCacheOrigin: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating EdgeCacheOrigin: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -463,33 +478,39 @@ func resourceNetworkServicesEdgeCacheOriginCreate(d *schema.ResourceData, meta i } func resourceNetworkServicesEdgeCacheOriginRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for EdgeCacheOrigin: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NetworkServicesEdgeCacheOrigin %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkServicesEdgeCacheOrigin %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -537,15 +558,15 @@ func resourceNetworkServicesEdgeCacheOriginRead(d *schema.ResourceData, meta int } func resourceNetworkServicesEdgeCacheOriginUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for EdgeCacheOrigin: %s", err) } @@ -555,77 +576,77 @@ func resourceNetworkServicesEdgeCacheOriginUpdate(d *schema.ResourceData, meta i descriptionProp, err := expandNetworkServicesEdgeCacheOriginDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandNetworkServicesEdgeCacheOriginLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } originAddressProp, err := expandNetworkServicesEdgeCacheOriginOriginAddress(d.Get("origin_address"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("origin_address"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, originAddressProp)) { + } else if v, ok := d.GetOkExists("origin_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, originAddressProp)) { obj["originAddress"] = originAddressProp } protocolProp, err := expandNetworkServicesEdgeCacheOriginProtocol(d.Get("protocol"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, protocolProp)) { + } else if v, ok := d.GetOkExists("protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, protocolProp)) { obj["protocol"] = protocolProp } portProp, err := expandNetworkServicesEdgeCacheOriginPort(d.Get("port"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portProp)) { + } else if v, ok := d.GetOkExists("port"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, portProp)) { obj["port"] = portProp } maxAttemptsProp, err := expandNetworkServicesEdgeCacheOriginMaxAttempts(d.Get("max_attempts"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("max_attempts"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxAttemptsProp)) { + } else if v, ok := d.GetOkExists("max_attempts"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxAttemptsProp)) { obj["maxAttempts"] = maxAttemptsProp } failoverOriginProp, err := expandNetworkServicesEdgeCacheOriginFailoverOrigin(d.Get("failover_origin"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("failover_origin"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, failoverOriginProp)) { + } else if v, ok := d.GetOkExists("failover_origin"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, failoverOriginProp)) { obj["failoverOrigin"] = failoverOriginProp } retryConditionsProp, err := expandNetworkServicesEdgeCacheOriginRetryConditions(d.Get("retry_conditions"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("retry_conditions"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryConditionsProp)) { + } else if v, ok := d.GetOkExists("retry_conditions"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryConditionsProp)) { obj["retryConditions"] = retryConditionsProp } timeoutProp, err := expandNetworkServicesEdgeCacheOriginTimeout(d.Get("timeout"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { + } else if v, ok := d.GetOkExists("timeout"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, timeoutProp)) { obj["timeout"] = timeoutProp } awsV4AuthenticationProp, err := expandNetworkServicesEdgeCacheOriginAwsV4Authentication(d.Get("aws_v4_authentication"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("aws_v4_authentication"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, awsV4AuthenticationProp)) { + } else if v, ok := d.GetOkExists("aws_v4_authentication"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, awsV4AuthenticationProp)) { obj["awsV4Authentication"] = awsV4AuthenticationProp } originOverrideActionProp, err := expandNetworkServicesEdgeCacheOriginOriginOverrideAction(d.Get("origin_override_action"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("origin_override_action"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, originOverrideActionProp)) { + } else if v, ok := d.GetOkExists("origin_override_action"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, originOverrideActionProp)) { obj["originOverrideAction"] = originOverrideActionProp } originRedirectProp, err := expandNetworkServicesEdgeCacheOriginOriginRedirect(d.Get("origin_redirect"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("origin_redirect"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, originRedirectProp)) { + } else if v, ok := d.GetOkExists("origin_redirect"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, originRedirectProp)) { obj["originRedirect"] = originRedirectProp } - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") if err != nil { return err } @@ -680,19 +701,27 @@ func resourceNetworkServicesEdgeCacheOriginUpdate(d *schema.ResourceData, meta i if d.HasChange("origin_redirect") { updateMask = append(updateMask, "originRedirect") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating EdgeCacheOrigin %q: %s", d.Id(), err) @@ -712,21 +741,21 @@ func resourceNetworkServicesEdgeCacheOriginUpdate(d *schema.ResourceData, meta i } func resourceNetworkServicesEdgeCacheOriginDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for EdgeCacheOrigin: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") if err != nil { return err } @@ -735,13 +764,21 @@ func resourceNetworkServicesEdgeCacheOriginDelete(d *schema.ResourceData, meta i log.Printf("[DEBUG] Deleting EdgeCacheOrigin %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "EdgeCacheOrigin") + return transport_tpg.HandleNotFoundError(err, d, "EdgeCacheOrigin") } err = NetworkServicesOperationWaitTime( @@ -757,8 +794,8 @@ func resourceNetworkServicesEdgeCacheOriginDelete(d *schema.ResourceData, meta i } func resourceNetworkServicesEdgeCacheOriginImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/global/edgeCacheOrigins/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -767,7 +804,7 @@ func resourceNetworkServicesEdgeCacheOriginImport(d *schema.ResourceData, meta i } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -776,26 +813,26 @@ func resourceNetworkServicesEdgeCacheOriginImport(d *schema.ResourceData, meta i return []*schema.ResourceData{d}, nil } -func flattenNetworkServicesEdgeCacheOriginDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginOriginAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -809,10 +846,10 @@ func flattenNetworkServicesEdgeCacheOriginPort(v interface{}, d *schema.Resource return v // let terraform core handle it otherwise } -func flattenNetworkServicesEdgeCacheOriginMaxAttempts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginMaxAttempts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -826,15 +863,15 @@ func flattenNetworkServicesEdgeCacheOriginMaxAttempts(v interface{}, d *schema.R return v // let terraform core handle it otherwise } -func flattenNetworkServicesEdgeCacheOriginFailoverOrigin(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginFailoverOrigin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginRetryConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginRetryConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { out := make(map[string]string) if v == nil { @@ -858,7 +895,7 @@ func flattenNetworkServicesEdgeCacheOriginTimeout(v interface{}, d *schema.Resou return []interface{}{out} } -func flattenNetworkServicesEdgeCacheOriginAwsV4Authentication(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginAwsV4Authentication(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -875,19 +912,19 @@ func flattenNetworkServicesEdgeCacheOriginAwsV4Authentication(v interface{}, d * flattenNetworkServicesEdgeCacheOriginAwsV4AuthenticationOriginRegion(original["originRegion"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheOriginAwsV4AuthenticationAccessKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginAwsV4AuthenticationAccessKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginAwsV4AuthenticationSecretAccessKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginAwsV4AuthenticationSecretAccessKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginAwsV4AuthenticationOriginRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginAwsV4AuthenticationOriginRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginOriginOverrideAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginOverrideAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -902,7 +939,7 @@ func flattenNetworkServicesEdgeCacheOriginOriginOverrideAction(v interface{}, d flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderAction(original["headerAction"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -915,11 +952,11 @@ func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewrite(v inter flattenNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -932,7 +969,7 @@ func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderAction(v int flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -952,19 +989,19 @@ func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionReques } return transformed } -func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheOriginOriginRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -977,15 +1014,15 @@ func flattenNetworkServicesEdgeCacheOriginOriginRedirect(v interface{}, d *schem flattenNetworkServicesEdgeCacheOriginOriginRedirectRedirectConditions(original["redirectConditions"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheOriginOriginRedirectRedirectConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheOriginOriginRedirectRedirectConditions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNetworkServicesEdgeCacheOriginDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandNetworkServicesEdgeCacheOriginLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -996,31 +1033,31 @@ func expandNetworkServicesEdgeCacheOriginLabels(v interface{}, d TerraformResour return m, nil } -func expandNetworkServicesEdgeCacheOriginOriginAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginMaxAttempts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginMaxAttempts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginFailoverOrigin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginFailoverOrigin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginRetryConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1032,51 +1069,51 @@ func expandNetworkServicesEdgeCacheOriginTimeout(v interface{}, d TerraformResou transformedConnectTimeout, err := expandNetworkServicesEdgeCacheOriginTimeoutConnectTimeout(original["connect_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedConnectTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedConnectTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["connectTimeout"] = transformedConnectTimeout } transformedMaxAttemptsTimeout, err := expandNetworkServicesEdgeCacheOriginTimeoutMaxAttemptsTimeout(original["max_attempts_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAttemptsTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAttemptsTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAttemptsTimeout"] = transformedMaxAttemptsTimeout } transformedResponseTimeout, err := expandNetworkServicesEdgeCacheOriginTimeoutResponseTimeout(original["response_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseTimeout"] = transformedResponseTimeout } transformedReadTimeout, err := expandNetworkServicesEdgeCacheOriginTimeoutReadTimeout(original["read_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReadTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReadTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["readTimeout"] = transformedReadTimeout } return transformed, nil } -func expandNetworkServicesEdgeCacheOriginTimeoutConnectTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginTimeoutConnectTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginTimeoutMaxAttemptsTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginTimeoutMaxAttemptsTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginTimeoutResponseTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginTimeoutResponseTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginTimeoutReadTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginTimeoutReadTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginAwsV4Authentication(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginAwsV4Authentication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1088,40 +1125,40 @@ func expandNetworkServicesEdgeCacheOriginAwsV4Authentication(v interface{}, d Te transformedAccessKeyId, err := expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationAccessKeyId(original["access_key_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessKeyId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessKeyId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessKeyId"] = transformedAccessKeyId } transformedSecretAccessKeyVersion, err := expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationSecretAccessKeyVersion(original["secret_access_key_version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecretAccessKeyVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecretAccessKeyVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["secretAccessKeyVersion"] = transformedSecretAccessKeyVersion } transformedOriginRegion, err := expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationOriginRegion(original["origin_region"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOriginRegion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOriginRegion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["originRegion"] = transformedOriginRegion } return transformed, nil } -func expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationAccessKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationAccessKeyId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationSecretAccessKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationSecretAccessKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationOriginRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginAwsV4AuthenticationOriginRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginOriginOverrideAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginOverrideAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1133,21 +1170,21 @@ func expandNetworkServicesEdgeCacheOriginOriginOverrideAction(v interface{}, d T transformedUrlRewrite, err := expandNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedHeaderAction, err := expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } return transformed, nil } -func expandNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1159,18 +1196,18 @@ func expandNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewrite(v interf transformedHostRewrite, err := expandNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } return transformed, nil } -func expandNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginOverrideActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1182,14 +1219,14 @@ func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderAction(v inte transformedRequestHeadersToAdd, err := expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd } return transformed, nil } -func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1202,21 +1239,21 @@ func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequest transformedHeaderName, err := expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -1225,19 +1262,19 @@ func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequest return req, nil } -func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginOverrideActionHeaderActionRequestHeadersToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheOriginOriginRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1249,13 +1286,13 @@ func expandNetworkServicesEdgeCacheOriginOriginRedirect(v interface{}, d Terrafo transformedRedirectConditions, err := expandNetworkServicesEdgeCacheOriginOriginRedirectRedirectConditions(original["redirect_conditions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectConditions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectConditions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectConditions"] = transformedRedirectConditions } return transformed, nil } -func expandNetworkServicesEdgeCacheOriginOriginRedirectRedirectConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheOriginOriginRedirectRedirectConditions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_origin_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_origin_sweeper.go new file mode 100644 index 0000000000..852ea85309 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_origin_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkservices + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkServicesEdgeCacheOrigin", testSweepNetworkServicesEdgeCacheOrigin) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkServicesEdgeCacheOrigin(region string) error { + resourceName := "NetworkServicesEdgeCacheOrigin" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networkservices.googleapis.com/v1/projects/{{project}}/locations/global/edgeCacheOrigins", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["edgeCacheOrigins"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networkservices.googleapis.com/v1/projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_service.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_service.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_service.go index fcb7f40233..93a55918db 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_network_services_edge_cache_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_service.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package networkservices import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceNetworkServicesEdgeCacheService() *schema.Resource { @@ -62,7 +69,7 @@ and all following characters must be a dash, underscore, letter or digit.`, Required: true, Description: `The list of hostRules to match against. These rules define which hostnames the EdgeCacheService will match against, and which route configurations apply.`, MinItems: 1, - MaxItems: 5, + MaxItems: 10, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "hosts": { @@ -123,7 +130,7 @@ When multiple hosts are specified, hosts are matched in the following priority: Required: true, Description: `The routeRules to match against. routeRules support advanced routing behaviour, and can match on paths, headers and query parameters, as well as status codes and HTTP methods.`, MinItems: 1, - MaxItems: 64, + MaxItems: 200, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "match_rule": { @@ -354,7 +361,7 @@ Response headers are only sent to the client, and do not have an effect on the c "origin": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Origin resource that requests to this route should fetch from when a matching response is not in cache. Origins can be defined as short names ("my-origin") or fully-qualified resource URLs - e.g. "networkservices.googleapis.com/projects/my-project/global/edgecacheorigins/my-origin" Only one of origin or urlRedirect can be set.`, @@ -389,7 +396,7 @@ This field may only be specified when signedRequestMode is set to REQUIRE_TOKENS MaxItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"GENERATE_COOKIE", "GENERATE_TOKEN_HLS_COOKIELESS", "PROPAGATE_TOKEN_HLS_COOKIELESS"}), + ValidateFunc: verify.ValidateEnum([]string{"GENERATE_COOKIE", "GENERATE_TOKEN_HLS_COOKIELESS", "PROPAGATE_TOKEN_HLS_COOKIELESS"}), }, }, "copied_parameters": { @@ -489,7 +496,7 @@ included.`, Description: `Names of query string parameters to exclude from cache keys. All other parameters will be included. Either specify includedQueryParameters or excludedQueryParameters, not both. '&' and '=' will be percent encoded and not treated as delimiters.`, - MaxItems: 10, + MaxItems: 20, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -539,7 +546,7 @@ Note that specifying several headers, and/or headers that have a large range of Description: `Names of query string parameters to include in cache keys. All other parameters will be excluded. Either specify includedQueryParameters or excludedQueryParameters, not both. '&' and '=' will be percent encoded and not treated as delimiters.`, - MaxItems: 10, + MaxItems: 20, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -551,7 +558,7 @@ Either specify includedQueryParameters or excludedQueryParameters, not both. '&' Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"CACHE_ALL_STATIC", "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "BYPASS_CACHE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"CACHE_ALL_STATIC", "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "BYPASS_CACHE", ""}), Description: `Cache modes allow users to control the behaviour of the cache, what content it should cache automatically, whether to respect origin headers, or whether to unconditionally cache all responses. For all cache modes, Cache-Control headers will be passed to the client. Use clientTtl to override what is sent to the client. Possible values: ["CACHE_ALL_STATIC", "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "BYPASS_CACHE"]`, @@ -655,7 +662,7 @@ By default, signedRequestMaximumExpirationTtl is not set and the expiration time Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"DISABLED", "REQUIRE_SIGNATURES", "REQUIRE_TOKENS", ""}), + ValidateFunc: verify.ValidateEnum([]string{"DISABLED", "REQUIRE_SIGNATURES", "REQUIRE_TOKENS", ""}), Description: `Whether to enforce signed requests. The default value is DISABLED, which means all content is public, and does not authorize access. You must also set a signedRequestKeyset to enable signed requests. @@ -682,7 +689,7 @@ You may specify up to 3 signature algorithms to use. Possible values: ["ED25519" MaxItems: 3, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"ED25519", "HMAC_SHA_256", "HMAC_SHA1"}), + ValidateFunc: verify.ValidateEnum([]string{"ED25519", "HMAC_SHA_256", "HMAC_SHA1"}), }, }, "token_query_parameter": { @@ -852,7 +859,7 @@ prefixRedirect cannot be supplied together with pathRedirect. Supply one alone o Type: schema.TypeString, Computed: true, Optional: true, - ValidateFunc: validateEnum([]string{"MOVED_PERMANENTLY_DEFAULT", "FOUND", "SEE_OTHER", "TEMPORARY_REDIRECT", "PERMANENT_REDIRECT", ""}), + ValidateFunc: verify.ValidateEnum([]string{"MOVED_PERMANENTLY_DEFAULT", "FOUND", "SEE_OTHER", "TEMPORARY_REDIRECT", "PERMANENT_REDIRECT", ""}), Description: `The HTTP Status code to use for this RedirectAction. The supported values are: @@ -995,8 +1002,8 @@ If not set, the EdgeCacheService has no SSL policy configured, and will default } func resourceNetworkServicesEdgeCacheServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -1005,65 +1012,65 @@ func resourceNetworkServicesEdgeCacheServiceCreate(d *schema.ResourceData, meta descriptionProp, err := expandNetworkServicesEdgeCacheServiceDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandNetworkServicesEdgeCacheServiceLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } disableQuicProp, err := expandNetworkServicesEdgeCacheServiceDisableQuic(d.Get("disable_quic"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("disable_quic"); !isEmptyValue(reflect.ValueOf(disableQuicProp)) && (ok || !reflect.DeepEqual(v, disableQuicProp)) { + } else if v, ok := d.GetOkExists("disable_quic"); !tpgresource.IsEmptyValue(reflect.ValueOf(disableQuicProp)) && (ok || !reflect.DeepEqual(v, disableQuicProp)) { obj["disableQuic"] = disableQuicProp } disableHttp2Prop, err := expandNetworkServicesEdgeCacheServiceDisableHttp2(d.Get("disable_http2"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("disable_http2"); !isEmptyValue(reflect.ValueOf(disableHttp2Prop)) && (ok || !reflect.DeepEqual(v, disableHttp2Prop)) { + } else if v, ok := d.GetOkExists("disable_http2"); !tpgresource.IsEmptyValue(reflect.ValueOf(disableHttp2Prop)) && (ok || !reflect.DeepEqual(v, disableHttp2Prop)) { obj["disableHttp2"] = disableHttp2Prop } requireTlsProp, err := expandNetworkServicesEdgeCacheServiceRequireTls(d.Get("require_tls"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("require_tls"); !isEmptyValue(reflect.ValueOf(requireTlsProp)) && (ok || !reflect.DeepEqual(v, requireTlsProp)) { + } else if v, ok := d.GetOkExists("require_tls"); !tpgresource.IsEmptyValue(reflect.ValueOf(requireTlsProp)) && (ok || !reflect.DeepEqual(v, requireTlsProp)) { obj["requireTls"] = requireTlsProp } edgeSslCertificatesProp, err := expandNetworkServicesEdgeCacheServiceEdgeSslCertificates(d.Get("edge_ssl_certificates"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("edge_ssl_certificates"); !isEmptyValue(reflect.ValueOf(edgeSslCertificatesProp)) && (ok || !reflect.DeepEqual(v, edgeSslCertificatesProp)) { + } else if v, ok := d.GetOkExists("edge_ssl_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(edgeSslCertificatesProp)) && (ok || !reflect.DeepEqual(v, edgeSslCertificatesProp)) { obj["edgeSslCertificates"] = edgeSslCertificatesProp } sslPolicyProp, err := expandNetworkServicesEdgeCacheServiceSslPolicy(d.Get("ssl_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(reflect.ValueOf(sslPolicyProp)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { + } else if v, ok := d.GetOkExists("ssl_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(sslPolicyProp)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { obj["sslPolicy"] = sslPolicyProp } routingProp, err := expandNetworkServicesEdgeCacheServiceRouting(d.Get("routing"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("routing"); !isEmptyValue(reflect.ValueOf(routingProp)) && (ok || !reflect.DeepEqual(v, routingProp)) { + } else if v, ok := d.GetOkExists("routing"); !tpgresource.IsEmptyValue(reflect.ValueOf(routingProp)) && (ok || !reflect.DeepEqual(v, routingProp)) { obj["routing"] = routingProp } logConfigProp, err := expandNetworkServicesEdgeCacheServiceLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(logConfigProp)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } edgeSecurityPolicyProp, err := expandNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("edge_security_policy"); !isEmptyValue(reflect.ValueOf(edgeSecurityPolicyProp)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { + } else if v, ok := d.GetOkExists("edge_security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(edgeSecurityPolicyProp)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp } - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices?edgeCacheServiceId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices?edgeCacheServiceId={{name}}") if err != nil { return err } @@ -1071,24 +1078,32 @@ func resourceNetworkServicesEdgeCacheServiceCreate(d *schema.ResourceData, meta log.Printf("[DEBUG] Creating new EdgeCacheService: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for EdgeCacheService: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating EdgeCacheService: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheServices/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheServices/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1110,33 +1125,39 @@ func resourceNetworkServicesEdgeCacheServiceCreate(d *schema.ResourceData, meta } func resourceNetworkServicesEdgeCacheServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for EdgeCacheService: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NetworkServicesEdgeCacheService %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkServicesEdgeCacheService %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -1184,15 +1205,15 @@ func resourceNetworkServicesEdgeCacheServiceRead(d *schema.ResourceData, meta in } func resourceNetworkServicesEdgeCacheServiceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for EdgeCacheService: %s", err) } @@ -1202,65 +1223,65 @@ func resourceNetworkServicesEdgeCacheServiceUpdate(d *schema.ResourceData, meta descriptionProp, err := expandNetworkServicesEdgeCacheServiceDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandNetworkServicesEdgeCacheServiceLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } disableQuicProp, err := expandNetworkServicesEdgeCacheServiceDisableQuic(d.Get("disable_quic"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("disable_quic"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disableQuicProp)) { + } else if v, ok := d.GetOkExists("disable_quic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disableQuicProp)) { obj["disableQuic"] = disableQuicProp } disableHttp2Prop, err := expandNetworkServicesEdgeCacheServiceDisableHttp2(d.Get("disable_http2"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("disable_http2"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disableHttp2Prop)) { + } else if v, ok := d.GetOkExists("disable_http2"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, disableHttp2Prop)) { obj["disableHttp2"] = disableHttp2Prop } requireTlsProp, err := expandNetworkServicesEdgeCacheServiceRequireTls(d.Get("require_tls"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("require_tls"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requireTlsProp)) { + } else if v, ok := d.GetOkExists("require_tls"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, requireTlsProp)) { obj["requireTls"] = requireTlsProp } edgeSslCertificatesProp, err := expandNetworkServicesEdgeCacheServiceEdgeSslCertificates(d.Get("edge_ssl_certificates"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("edge_ssl_certificates"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, edgeSslCertificatesProp)) { + } else if v, ok := d.GetOkExists("edge_ssl_certificates"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, edgeSslCertificatesProp)) { obj["edgeSslCertificates"] = edgeSslCertificatesProp } sslPolicyProp, err := expandNetworkServicesEdgeCacheServiceSslPolicy(d.Get("ssl_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { + } else if v, ok := d.GetOkExists("ssl_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sslPolicyProp)) { obj["sslPolicy"] = sslPolicyProp } routingProp, err := expandNetworkServicesEdgeCacheServiceRouting(d.Get("routing"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("routing"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routingProp)) { + } else if v, ok := d.GetOkExists("routing"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routingProp)) { obj["routing"] = routingProp } logConfigProp, err := expandNetworkServicesEdgeCacheServiceLogConfig(d.Get("log_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { + } else if v, ok := d.GetOkExists("log_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, logConfigProp)) { obj["logConfig"] = logConfigProp } edgeSecurityPolicyProp, err := expandNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("edge_security_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { + } else if v, ok := d.GetOkExists("edge_security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, edgeSecurityPolicyProp)) { obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp } - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") if err != nil { return err } @@ -1307,19 +1328,27 @@ func resourceNetworkServicesEdgeCacheServiceUpdate(d *schema.ResourceData, meta if d.HasChange("edge_security_policy") { updateMask = append(updateMask, "edgeSecurityPolicy") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating EdgeCacheService %q: %s", d.Id(), err) @@ -1339,21 +1368,21 @@ func resourceNetworkServicesEdgeCacheServiceUpdate(d *schema.ResourceData, meta } func resourceNetworkServicesEdgeCacheServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for EdgeCacheService: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") if err != nil { return err } @@ -1362,13 +1391,21 @@ func resourceNetworkServicesEdgeCacheServiceDelete(d *schema.ResourceData, meta log.Printf("[DEBUG] Deleting EdgeCacheService %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "EdgeCacheService") + return transport_tpg.HandleNotFoundError(err, d, "EdgeCacheService") } err = NetworkServicesOperationWaitTime( @@ -1384,8 +1421,8 @@ func resourceNetworkServicesEdgeCacheServiceDelete(d *schema.ResourceData, meta } func resourceNetworkServicesEdgeCacheServiceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/global/edgeCacheServices/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -1394,7 +1431,7 @@ func resourceNetworkServicesEdgeCacheServiceImport(d *schema.ResourceData, meta } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheServices/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheServices/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1403,43 +1440,43 @@ func resourceNetworkServicesEdgeCacheServiceImport(d *schema.ResourceData, meta return []*schema.ResourceData{d}, nil } -func flattenNetworkServicesEdgeCacheServiceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceDisableQuic(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceDisableQuic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceDisableHttp2(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceDisableHttp2(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRequireTls(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRequireTls(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceEdgeSslCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceEdgeSslCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceSslPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceSslPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceIpv4Addresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceIpv4Addresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceIpv6Addresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceIpv6Addresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRouting(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRouting(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1454,7 +1491,7 @@ func flattenNetworkServicesEdgeCacheServiceRouting(v interface{}, d *schema.Reso flattenNetworkServicesEdgeCacheServiceRoutingPathMatcher(original["pathMatchers"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1474,19 +1511,19 @@ func flattenNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d *sch } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1506,15 +1543,15 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d * } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1538,15 +1575,15 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interfa } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1569,11 +1606,11 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule( } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1596,31 +1633,31 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleH } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1640,31 +1677,31 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQ } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1683,7 +1720,7 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActi flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(original["responseHeadersToRemove"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1703,19 +1740,19 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActi } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1735,19 +1772,19 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActi } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1765,11 +1802,11 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActi } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1787,11 +1824,11 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActi } return transformed } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1808,7 +1845,7 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActio flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(original["corsPolicy"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1843,23 +1880,23 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActio flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMaximumExpirationTtl(original["signedRequestMaximumExpirationTtl"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1884,51 +1921,51 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActio flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedCookieNames(original["includedCookieNames"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedCookieNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedCookieNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1943,15 +1980,15 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActio flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsAllowedSignatureAlgorithms(original["allowedSignatureAlgorithms"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsTokenQueryParameter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsTokenQueryParameter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsAllowedSignatureAlgorithms(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsAllowedSignatureAlgorithms(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignatures(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1972,31 +2009,31 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActio flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesCopiedParameters(original["copiedParameters"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesActions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesActions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesKeyset(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesKeyset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenQueryParameter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenQueryParameter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesCopiedParameters(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesCopiedParameters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMaximumExpirationTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMaximumExpirationTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2013,19 +2050,19 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActio flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(original["pathTemplateRewrite"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2050,39 +2087,39 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActio flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2105,31 +2142,31 @@ func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirec flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(original["stripQuery"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceLogConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceLogConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2144,23 +2181,23 @@ func flattenNetworkServicesEdgeCacheServiceLogConfig(v interface{}, d *schema.Re flattenNetworkServicesEdgeCacheServiceLogConfigSampleRate(original["sampleRate"], d, config) return []interface{}{transformed} } -func flattenNetworkServicesEdgeCacheServiceLogConfigEnable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceLogConfigEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceLogConfigSampleRate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceLogConfigSampleRate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNetworkServicesEdgeCacheServiceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandNetworkServicesEdgeCacheServiceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2171,27 +2208,27 @@ func expandNetworkServicesEdgeCacheServiceLabels(v interface{}, d TerraformResou return m, nil } -func expandNetworkServicesEdgeCacheServiceDisableQuic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceDisableQuic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceDisableHttp2(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceDisableHttp2(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRequireTls(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRequireTls(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceEdgeSslCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceEdgeSslCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceSslPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceSslPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRouting(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRouting(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2203,21 +2240,21 @@ func expandNetworkServicesEdgeCacheServiceRouting(v interface{}, d TerraformReso transformedHostRule, err := expandNetworkServicesEdgeCacheServiceRoutingHostRule(original["host_rule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRules"] = transformedHostRule } transformedPathMatcher, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcher(original["path_matcher"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathMatcher); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathMatcher); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathMatchers"] = transformedPathMatcher } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2230,21 +2267,21 @@ func expandNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d Terra transformedDescription, err := expandNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedHosts, err := expandNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(original["hosts"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHosts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHosts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hosts"] = transformedHosts } transformedPathMatcher, err := expandNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(original["path_matcher"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathMatcher); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathMatcher); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathMatcher"] = transformedPathMatcher } @@ -2253,19 +2290,19 @@ func expandNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d Terra return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2278,21 +2315,21 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d Te transformedName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedDescription, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedRouteRule, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(original["route_rule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRouteRule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRouteRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["routeRules"] = transformedRouteRule } @@ -2301,15 +2338,15 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d Te return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2322,49 +2359,49 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interfac transformedPriority, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(original["priority"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPriority); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["priority"] = transformedPriority } transformedDescription, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedMatchRule, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(original["match_rule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMatchRule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMatchRule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["matchRules"] = transformedMatchRule } transformedHeaderAction, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(original["header_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerAction"] = transformedHeaderAction } transformedRouteAction, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(original["route_action"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRouteAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["routeAction"] = transformedRouteAction } transformedOrigin, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(original["origin"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrigin); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrigin); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["origin"] = transformedOrigin } transformedUrlRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(original["url_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRedirect"] = transformedUrlRedirect } @@ -2373,15 +2410,15 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interfac return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2394,42 +2431,42 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(v transformedIgnoreCase, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(original["ignore_case"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ignoreCase"] = transformedIgnoreCase } transformedHeaderMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(original["header_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerMatches"] = transformedHeaderMatch } transformedQueryParameterMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(original["query_parameter_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedQueryParameterMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedQueryParameterMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["queryParameterMatches"] = transformedQueryParameterMatch } transformedPrefixMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(original["prefix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixMatch"] = transformedPrefixMatch } transformedPathTemplateMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(original["path_template_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathTemplateMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathTemplateMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathTemplateMatch"] = transformedPathTemplateMatch } transformedFullPathMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(original["full_path_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fullPathMatch"] = transformedFullPathMatch } @@ -2438,11 +2475,11 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(v return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2455,42 +2492,42 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHe transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedPresentMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(original["present_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["presentMatch"] = transformedPresentMatch } transformedExactMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(original["exact_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exactMatch"] = transformedExactMatch } transformedPrefixMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(original["prefix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixMatch"] = transformedPrefixMatch } transformedSuffixMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(original["suffix_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["suffixMatch"] = transformedSuffixMatch } transformedInvertMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(original["invert_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInvertMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInvertMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["invertMatch"] = transformedInvertMatch } @@ -2499,31 +2536,31 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHe return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2536,21 +2573,21 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQu transformedName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(original["name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["name"] = transformedName } transformedPresentMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(original["present_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPresentMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["presentMatch"] = transformedPresentMatch } transformedExactMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(original["exact_match"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExactMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exactMatch"] = transformedExactMatch } @@ -2559,31 +2596,31 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQu return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2595,35 +2632,35 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio transformedRequestHeaderToAdd, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(original["request_header_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeaderToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeaderToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToAdd"] = transformedRequestHeaderToAdd } transformedResponseHeaderToAdd, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(original["response_header_to_add"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeaderToAdd); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeaderToAdd); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToAdd"] = transformedResponseHeaderToAdd } transformedRequestHeaderToRemove, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(original["request_header_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRequestHeaderToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRequestHeaderToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["requestHeadersToRemove"] = transformedRequestHeaderToRemove } transformedResponseHeaderToRemove, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(original["response_header_to_remove"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedResponseHeaderToRemove); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedResponseHeaderToRemove); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["responseHeadersToRemove"] = transformedResponseHeaderToRemove } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2636,21 +2673,21 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -2659,19 +2696,19 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2684,21 +2721,21 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } transformedHeaderValue, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(original["header_value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerValue"] = transformedHeaderValue } transformedReplace, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(original["replace"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReplace); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["replace"] = transformedReplace } @@ -2707,19 +2744,19 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2732,7 +2769,7 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } @@ -2741,11 +2778,11 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2758,7 +2795,7 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(original["header_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHeaderName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["headerName"] = transformedHeaderName } @@ -2767,11 +2804,11 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActio return req, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2783,28 +2820,28 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction transformedCdnPolicy, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(original["cdn_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCdnPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCdnPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cdnPolicy"] = transformedCdnPolicy } transformedUrlRewrite, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(original["url_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["urlRewrite"] = transformedUrlRewrite } transformedCorsPolicy, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(original["cors_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["corsPolicy"] = transformedCorsPolicy } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2816,107 +2853,107 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction transformedCacheMode, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(original["cache_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCacheMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCacheMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cacheMode"] = transformedCacheMode } transformedClientTtl, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(original["client_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClientTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClientTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["clientTtl"] = transformedClientTtl } transformedDefaultTtl, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(original["default_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["defaultTtl"] = transformedDefaultTtl } transformedMaxTtl, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(original["max_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxTtl"] = transformedMaxTtl } transformedCacheKeyPolicy, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(original["cache_key_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cacheKeyPolicy"] = transformedCacheKeyPolicy } transformedNegativeCaching, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(original["negative_caching"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNegativeCaching); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNegativeCaching); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["negativeCaching"] = transformedNegativeCaching } transformedNegativeCachingPolicy, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy } transformedSignedRequestMode, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(original["signed_request_mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSignedRequestMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSignedRequestMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["signedRequestMode"] = transformedSignedRequestMode } transformedSignedRequestKeyset, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(original["signed_request_keyset"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSignedRequestKeyset); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSignedRequestKeyset); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["signedRequestKeyset"] = transformedSignedRequestKeyset } transformedSignedTokenOptions, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptions(original["signed_token_options"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSignedTokenOptions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSignedTokenOptions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["signedTokenOptions"] = transformedSignedTokenOptions } transformedAddSignatures, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignatures(original["add_signatures"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAddSignatures); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAddSignatures); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["addSignatures"] = transformedAddSignatures } transformedSignedRequestMaximumExpirationTtl, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMaximumExpirationTtl(original["signed_request_maximum_expiration_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSignedRequestMaximumExpirationTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSignedRequestMaximumExpirationTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["signedRequestMaximumExpirationTtl"] = transformedSignedRequestMaximumExpirationTtl } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2928,88 +2965,88 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction transformedIncludeProtocol, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(original["include_protocol"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIncludeProtocol); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIncludeProtocol); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includeProtocol"] = transformedIncludeProtocol } transformedExcludeQueryString, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(original["exclude_query_string"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludeQueryString); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludeQueryString); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludeQueryString"] = transformedExcludeQueryString } transformedExcludeHost, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(original["exclude_host"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludeHost); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludeHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludeHost"] = transformedExcludeHost } transformedIncludedQueryParameters, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(original["included_query_parameters"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIncludedQueryParameters); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIncludedQueryParameters); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includedQueryParameters"] = transformedIncludedQueryParameters } transformedExcludedQueryParameters, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(original["excluded_query_parameters"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludedQueryParameters); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludedQueryParameters); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludedQueryParameters"] = transformedExcludedQueryParameters } transformedIncludedHeaderNames, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(original["included_header_names"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIncludedHeaderNames); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIncludedHeaderNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includedHeaderNames"] = transformedIncludedHeaderNames } transformedIncludedCookieNames, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedCookieNames(original["included_cookie_names"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIncludedCookieNames); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIncludedCookieNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["includedCookieNames"] = transformedIncludedCookieNames } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedCookieNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedCookieNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -3020,15 +3057,15 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction return m, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3040,29 +3077,29 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction transformedTokenQueryParameter, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsTokenQueryParameter(original["token_query_parameter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTokenQueryParameter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTokenQueryParameter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tokenQueryParameter"] = transformedTokenQueryParameter } transformedAllowedSignatureAlgorithms, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsAllowedSignatureAlgorithms(original["allowed_signature_algorithms"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedSignatureAlgorithms); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedSignatureAlgorithms); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedSignatureAlgorithms"] = transformedAllowedSignatureAlgorithms } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsTokenQueryParameter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsTokenQueryParameter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsAllowedSignatureAlgorithms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedTokenOptionsAllowedSignatureAlgorithms(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignatures(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3074,66 +3111,66 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction transformedActions, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesActions(original["actions"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedActions); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedActions); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["actions"] = transformedActions } transformedKeyset, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesKeyset(original["keyset"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKeyset); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKeyset); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["keyset"] = transformedKeyset } transformedTokenTtl, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenTtl(original["token_ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTokenTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTokenTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tokenTtl"] = transformedTokenTtl } transformedTokenQueryParameter, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenQueryParameter(original["token_query_parameter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTokenQueryParameter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTokenQueryParameter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tokenQueryParameter"] = transformedTokenQueryParameter } transformedCopiedParameters, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesCopiedParameters(original["copied_parameters"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCopiedParameters); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCopiedParameters); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["copiedParameters"] = transformedCopiedParameters } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesActions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesActions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesKeyset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesKeyset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenQueryParameter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesTokenQueryParameter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesCopiedParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyAddSignaturesCopiedParameters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMaximumExpirationTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMaximumExpirationTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3145,40 +3182,40 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction transformedPathPrefixRewrite, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite } transformedHostRewrite, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRewrite"] = transformedHostRewrite } transformedPathTemplateRewrite, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(original["path_template_rewrite"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathTemplateRewrite); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathTemplateRewrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathTemplateRewrite"] = transformedPathTemplateRewrite } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3190,88 +3227,88 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction transformedMaxAge, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(original["max_age"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxAge); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxAge"] = transformedMaxAge } transformedAllowCredentials, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCredentials"] = transformedAllowCredentials } transformedAllowOrigins, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowOrigins"] = transformedAllowOrigins } transformedAllowMethods, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowMethods"] = transformedAllowMethods } transformedAllowHeaders, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowHeaders"] = transformedAllowHeaders } transformedExposeHeaders, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exposeHeaders"] = transformedExposeHeaders } transformedDisabled, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3283,73 +3320,73 @@ func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect transformedHostRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(original["host_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHostRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hostRedirect"] = transformedHostRedirect } transformedPathRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(original["path_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPathRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pathRedirect"] = transformedPathRedirect } transformedPrefixRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["prefixRedirect"] = transformedPrefixRedirect } transformedRedirectResponseCode, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["redirectResponseCode"] = transformedRedirectResponseCode } transformedHttpsRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(original["https_redirect"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["httpsRedirect"] = transformedHttpsRedirect } transformedStripQuery, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(original["strip_query"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStripQuery); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stripQuery"] = transformedStripQuery } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceLogConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3361,28 +3398,28 @@ func expandNetworkServicesEdgeCacheServiceLogConfig(v interface{}, d TerraformRe transformedEnable, err := expandNetworkServicesEdgeCacheServiceLogConfigEnable(original["enable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enable"] = transformedEnable } transformedSampleRate, err := expandNetworkServicesEdgeCacheServiceLogConfigSampleRate(original["sample_rate"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSampleRate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSampleRate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["sampleRate"] = transformedSampleRate } return transformed, nil } -func expandNetworkServicesEdgeCacheServiceLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceLogConfigEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceLogConfigSampleRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceLogConfigSampleRate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_service_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_service_sweeper.go new file mode 100644 index 0000000000..8ac4be7ffe --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_edge_cache_service_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkservices + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkServicesEdgeCacheService", testSweepNetworkServicesEdgeCacheService) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkServicesEdgeCacheService(region string) error { + resourceName := "NetworkServicesEdgeCacheService" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networkservices.googleapis.com/v1/projects/{{project}}/locations/global/edgeCacheServices", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["edgeCacheServices"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networkservices.googleapis.com/v1/projects/{{project}}/locations/global/edgeCacheServices/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_gateway.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_gateway.go new file mode 100644 index 0000000000..79aad1c6d6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/networkservices/resource_network_services_gateway.go @@ -0,0 +1,813 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package networkservices + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// Checks if there is another gateway under the same location. +func gatewaysSameLocation(d *schema.ResourceData, config *transport_tpg.Config, billingProject, userAgent string) ([]interface{}, error) { + log.Print("[DEBUG] Looking for gateways under the same location.") + var gateways []interface{} + + gatewaysUrl, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/{{location}}/gateways") + if err != nil { + return gateways, err + } + + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: gatewaysUrl, + UserAgent: userAgent, + }) + if err != nil { + return gateways, err + } + + data, ok := resp["gateways"] + if !ok || data == nil { + log.Print("[DEBUG] No gateways under the same location found.") + return gateways, nil + } + + gateways = data.([]interface{}) + + log.Printf("[DEBUG] There are still gateways under the same location: %#v", gateways) + + return gateways, nil +} + +// Checks if the given list of gateways contains a gateway of type SECURE_WEB_GATEWAY. +func isLastSWGGateway(gateways []interface{}, network string) bool { + log.Print("[DEBUG] Checking if this is the last gateway of type SECURE_WEB_GATEWAY.") + for _, itemRaw := range gateways { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + gType, ok := item["type"] + if !ok || gType == nil { + continue + } + + gNetwork, ok := item["network"] + if !ok || gNetwork == nil { + continue + } + + if gType.(string) == "SECURE_WEB_GATEWAY" && gNetwork.(string) == network { + return false + } + } + + log.Print("[DEBUG] There is no other gateway of type SECURE_WEB_GATEWAY.") + // no gateways of type SWG found. + return true +} + +// Deletes the swg-autogen-router if the current gateway being deleted is the type of swg so there is no other gateway using it. +func deleteSWGAutoGenRouter(d *schema.ResourceData, config *transport_tpg.Config, billingProject, userAgent string) error { + log.Printf("[DEBUG] Searching the network id by name %q.", d.Get("network")) + + networkPath := fmt.Sprintf("{{ComputeBasePath}}%s", d.Get("network")) + networkUrl, err := tpgresource.ReplaceVars(d, config, networkPath) + if err != nil { + return err + } + + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: networkUrl, + UserAgent: userAgent, + }) + if err != nil { + return err + } + + // The name of swg auto generated router is in the following format: swg-autogen-router-{NETWORK-ID} + routerId := fmt.Sprintf("swg-autogen-router-%s", resp["id"]) + log.Printf("[DEBUG] Deleting the auto generated router %q.", routerId) + + routerPath := fmt.Sprintf("{{ComputeBasePath}}projects/{{project}}/regions/{{location}}/routers/%s", routerId) + routerUrl, err := tpgresource.ReplaceVars(d, config, routerPath) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: routerUrl, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSwgAutogenRouterRetryable}, + }) + if err != nil { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + // The swg auto gen router may have already been deleted. + // No further action needed. + return nil + } + + return err + } + + return nil +} + +func ResourceNetworkServicesGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkServicesGatewayCreate, + Read: resourceNetworkServicesGatewayRead, + Update: resourceNetworkServicesGatewayUpdate, + Delete: resourceNetworkServicesGatewayDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkServicesGatewayImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Short name of the Gateway resource to be created.`, + }, + "ports": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `One or more port numbers (1-65535), on which the Gateway will receive traffic. +The proxy binds to the specified ports. Gateways of type 'SECURE_WEB_GATEWAY' are +limited to 1 port. Gateways of type 'OPEN_MESH' listen on 0.0.0.0 and support multiple ports.`, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "scope": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Immutable. Scope determines how configuration across multiple Gateway instances are merged. +The configuration for multiple Gateway instances with the same scope will be merged as presented as +a single coniguration to the proxy/load balancer. +Max length 64 characters. Scope should start with a letter and can only have letters, numbers, hyphens.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"TYPE_UNSPECIFIED", "OPEN_MESH", "SECURE_WEB_GATEWAY"}), + Description: `Immutable. The type of the customer-managed gateway. Possible values are: * OPEN_MESH * SECURE_WEB_GATEWAY. Possible values: ["TYPE_UNSPECIFIED", "OPEN_MESH", "SECURE_WEB_GATEWAY"]`, + }, + "addresses": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Zero or one IPv4-address on which the Gateway will receive the traffic. When no address is provided, +an IP from the subnetwork is allocated This field only applies to gateways of type 'SECURE_WEB_GATEWAY'. +Gateways of type 'OPEN_MESH' listen on 0.0.0.0.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "certificate_urls": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `A fully-qualified Certificates URL reference. The proxy presents a Certificate (selected based on SNI) when establishing a TLS connection. +This feature only applies to gateways of type 'SECURE_WEB_GATEWAY'.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A free-text description of the resource. Max length 1024 characters.`, + }, + "gateway_security_policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. +For example: 'projects/*/locations/*/gatewaySecurityPolicies/swg-policy'. +This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Set of label tags associated with the Gateway resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `The location of the gateway. +The default value is 'global'.`, + Default: "global", + }, + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The relative resource name identifying the VPC network that is using this configuration. +For example: 'projects/*/global/networks/network-1'. +Currently, this field is specific to gateways of type 'SECURE_WEB_GATEWAY'.`, + }, + "server_tls_policy": { + Type: schema.TypeString, + Optional: true, + Description: `A fully-qualified ServerTLSPolicy URL reference. Specifies how TLS traffic is terminated. +If empty, TLS termination is disabled.`, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The relative resource name identifying the subnetwork in which this SWG is allocated. +For example: 'projects/*/regions/us-central1/subnetworks/network-1'. +Currently, this field is specific to gateways of type 'SECURE_WEB_GATEWAY.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the AccessPolicy was created in UTC.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL of this resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the AccessPolicy was updated in UTC.`, + }, + "delete_swg_autogen_router_on_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `When deleting a gateway of type 'SECURE_WEB_GATEWAY', this boolean option will also delete auto generated router by the gateway creation. +If there is no other gateway of type 'SECURE_WEB_GATEWAY' remaining for that region and network it will be deleted.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNetworkServicesGatewayCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandNetworkServicesGatewayLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandNetworkServicesGatewayDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + typeProp, err := expandNetworkServicesGatewayType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + portsProp, err := expandNetworkServicesGatewayPorts(d.Get("ports"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ports"); !tpgresource.IsEmptyValue(reflect.ValueOf(portsProp)) && (ok || !reflect.DeepEqual(v, portsProp)) { + obj["ports"] = portsProp + } + scopeProp, err := expandNetworkServicesGatewayScope(d.Get("scope"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scope"); !tpgresource.IsEmptyValue(reflect.ValueOf(scopeProp)) && (ok || !reflect.DeepEqual(v, scopeProp)) { + obj["scope"] = scopeProp + } + serverTlsPolicyProp, err := expandNetworkServicesGatewayServerTlsPolicy(d.Get("server_tls_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_tls_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(serverTlsPolicyProp)) && (ok || !reflect.DeepEqual(v, serverTlsPolicyProp)) { + obj["serverTlsPolicy"] = serverTlsPolicyProp + } + addressesProp, err := expandNetworkServicesGatewayAddresses(d.Get("addresses"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("addresses"); !tpgresource.IsEmptyValue(reflect.ValueOf(addressesProp)) && (ok || !reflect.DeepEqual(v, addressesProp)) { + obj["addresses"] = addressesProp + } + subnetworkProp, err := expandNetworkServicesGatewaySubnetwork(d.Get("subnetwork"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetwork"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { + obj["subnetwork"] = subnetworkProp + } + networkProp, err := expandNetworkServicesGatewayNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + gatewaySecurityPolicyProp, err := expandNetworkServicesGatewayGatewaySecurityPolicy(d.Get("gateway_security_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("gateway_security_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(gatewaySecurityPolicyProp)) && (ok || !reflect.DeepEqual(v, gatewaySecurityPolicyProp)) { + obj["gatewaySecurityPolicy"] = gatewaySecurityPolicyProp + } + certificateUrlsProp, err := expandNetworkServicesGatewayCertificateUrls(d.Get("certificate_urls"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("certificate_urls"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateUrlsProp)) && (ok || !reflect.DeepEqual(v, certificateUrlsProp)) { + obj["certificateUrls"] = certificateUrlsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/{{location}}/gateways?gatewayId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Gateway: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Gateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Gateway: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gateways/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = NetworkServicesOperationWaitTime( + config, res, project, "Creating Gateway", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Gateway: %s", err) + } + + log.Printf("[DEBUG] Finished creating Gateway %q: %#v", d.Id(), res) + + return resourceNetworkServicesGatewayRead(d, meta) +} + +func resourceNetworkServicesGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/{{location}}/gateways/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Gateway: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetworkServicesGateway %q", d.Id())) + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("delete_swg_autogen_router_on_destroy"); !ok { + if err := d.Set("delete_swg_autogen_router_on_destroy", false); err != nil { + return fmt.Errorf("Error setting delete_swg_autogen_router_on_destroy: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + + if err := d.Set("self_link", flattenNetworkServicesGatewaySelfLink(res["selfLink"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("create_time", flattenNetworkServicesGatewayCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("update_time", flattenNetworkServicesGatewayUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("labels", flattenNetworkServicesGatewayLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("description", flattenNetworkServicesGatewayDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("type", flattenNetworkServicesGatewayType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("ports", flattenNetworkServicesGatewayPorts(res["ports"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("scope", flattenNetworkServicesGatewayScope(res["scope"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("server_tls_policy", flattenNetworkServicesGatewayServerTlsPolicy(res["serverTlsPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("addresses", flattenNetworkServicesGatewayAddresses(res["addresses"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("subnetwork", flattenNetworkServicesGatewaySubnetwork(res["subnetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("network", flattenNetworkServicesGatewayNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("gateway_security_policy", flattenNetworkServicesGatewayGatewaySecurityPolicy(res["gatewaySecurityPolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + if err := d.Set("certificate_urls", flattenNetworkServicesGatewayCertificateUrls(res["certificateUrls"], d, config)); err != nil { + return fmt.Errorf("Error reading Gateway: %s", err) + } + + return nil +} + +func resourceNetworkServicesGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Gateway: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandNetworkServicesGatewayLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandNetworkServicesGatewayDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + serverTlsPolicyProp, err := expandNetworkServicesGatewayServerTlsPolicy(d.Get("server_tls_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("server_tls_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serverTlsPolicyProp)) { + obj["serverTlsPolicy"] = serverTlsPolicyProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/{{location}}/gateways/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Gateway %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("server_tls_policy") { + updateMask = append(updateMask, "serverTlsPolicy") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Gateway %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Gateway %q: %#v", d.Id(), res) + } + + err = NetworkServicesOperationWaitTime( + config, res, project, "Updating Gateway", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNetworkServicesGatewayRead(d, meta) +} + +func resourceNetworkServicesGatewayDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Gateway: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/{{location}}/gateways/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Gateway %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Gateway") + } + + err = NetworkServicesOperationWaitTime( + config, res, project, "Deleting Gateway", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + if d.Get("delete_swg_autogen_router_on_destroy").(bool) { + log.Print("[DEBUG] The field delete_swg_autogen_router_on_destroy is true. Deleting swg_autogen_router.") + gateways, err := gatewaysSameLocation(d, config, billingProject, userAgent) + if err != nil { + return err + } + + network := d.Get("network").(string) + if isLastSWGGateway(gateways, network) { + err := deleteSWGAutoGenRouter(d, config, billingProject, userAgent) + if err != nil { + return err + } + } + } + + log.Printf("[DEBUG] Finished deleting Gateway %q: %#v", d.Id(), res) + return nil +} + +func resourceNetworkServicesGatewayImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/gateways/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/gateways/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("delete_swg_autogen_router_on_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting delete_swg_autogen_router_on_destroy: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkServicesGatewaySelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayPorts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayScope(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayServerTlsPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewaySubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayGatewaySecurityPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNetworkServicesGatewayCertificateUrls(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNetworkServicesGatewayLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandNetworkServicesGatewayDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewayType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewayPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewayScope(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewayServerTlsPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewayAddresses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewaySubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewayNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewayGatewaySecurityPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNetworkServicesGatewayCertificateUrls(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/iam_notebooks_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/iam_notebooks_instance.go new file mode 100644 index 0000000000..a0a32d1997 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/iam_notebooks_instance.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var NotebooksInstanceIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type NotebooksInstanceIamUpdater struct { + project string + location string + instanceName string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NotebooksInstanceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("instance_name"); ok { + values["instance_name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance_name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &NotebooksInstanceIamUpdater{ + project: values["project"], + location: values["location"], + instanceName: values["instance_name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("instance_name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting instance_name: %s", err) + } + + return u, nil +} + +func NotebooksInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &NotebooksInstanceIamUpdater{ + project: values["project"], + location: values["location"], + instanceName: values["instance_name"], + d: d, + Config: config, + } + if err := d.Set("instance_name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting instance_name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *NotebooksInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyInstanceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *NotebooksInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyInstanceUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *NotebooksInstanceIamUpdater) qualifyInstanceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{NotebooksBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.location, u.instanceName), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *NotebooksInstanceIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.location, u.instanceName) +} + +func (u *NotebooksInstanceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-notebooks-instance-%s", u.GetResourceId()) +} + +func (u *NotebooksInstanceIamUpdater) DescribeResource() string { + return fmt.Sprintf("notebooks instance %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/iam_notebooks_runtime.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/iam_notebooks_runtime.go new file mode 100644 index 0000000000..38004e86df --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/iam_notebooks_runtime.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var NotebooksRuntimeIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "runtime_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type NotebooksRuntimeIamUpdater struct { + project string + location string + runtimeName string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NotebooksRuntimeIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("runtime_name"); ok { + values["runtime_name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/runtimes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("runtime_name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &NotebooksRuntimeIamUpdater{ + project: values["project"], + location: values["location"], + runtimeName: values["runtime_name"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("runtime_name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting runtime_name: %s", err) + } + + return u, nil +} + +func NotebooksRuntimeIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/runtimes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &NotebooksRuntimeIamUpdater{ + project: values["project"], + location: values["location"], + runtimeName: values["runtime_name"], + d: d, + Config: config, + } + if err := d.Set("runtime_name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting runtime_name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *NotebooksRuntimeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyRuntimeUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *NotebooksRuntimeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyRuntimeUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *NotebooksRuntimeIamUpdater) qualifyRuntimeUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{NotebooksBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/runtimes/%s", u.project, u.location, u.runtimeName), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *NotebooksRuntimeIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/runtimes/%s", u.project, u.location, u.runtimeName) +} + +func (u *NotebooksRuntimeIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-notebooks-runtime-%s", u.GetResourceId()) +} + +func (u *NotebooksRuntimeIamUpdater) DescribeResource() string { + return fmt.Sprintf("notebooks runtime %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/notebooks_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/notebooks_operation.go new file mode 100644 index 0000000000..ff6e0b5c62 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/notebooks_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type NotebooksOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *NotebooksOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.NotebooksBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createNotebooksWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*NotebooksOperationWaiter, error) { + w := &NotebooksOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func NotebooksOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createNotebooksWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func NotebooksOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createNotebooksWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_environment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_environment.go new file mode 100644 index 0000000000..5543cfe340 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_environment.go @@ -0,0 +1,625 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceNotebooksEnvironment() *schema.Resource { + return &schema.Resource{ + Create: resourceNotebooksEnvironmentCreate, + Read: resourceNotebooksEnvironmentRead, + Update: resourceNotebooksEnvironmentUpdate, + Delete: resourceNotebooksEnvironmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNotebooksEnvironmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the zone where the machine resides.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name specified for the Environment instance. +Format: projects/{project_id}/locations/{location}/environments/{environmentId}`, + }, + "container_image": { + Type: schema.TypeList, + Optional: true, + Description: `Use a container image to start the notebook instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repository": { + Type: schema.TypeString, + Required: true, + Description: `The path to the container image repository. +For example: gcr.io/{project_id}/{imageName}`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, + }, + }, + }, + ExactlyOneOf: []string{"vm_image", "container_image"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A brief description of this environment.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Display name of this environment for the UI.`, + }, + "post_startup_script": { + Type: schema.TypeString, + Optional: true, + Description: `Path to a Bash script that automatically runs after a notebook instance fully boots up. +The path must be a URL or Cloud Storage path. Example: "gs://path-to-file/file-name"`, + }, + "vm_image": { + Type: schema.TypeList, + Optional: true, + Description: `Use a Compute Engine VM image to start the notebook instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + Description: `The name of the Google Cloud project that this VM image belongs to. +Format: projects/{project_id}`, + }, + "image_family": { + Type: schema.TypeString, + Optional: true, + Description: `Use this VM image family to find the image; the newest image in this family will be used.`, + }, + "image_name": { + Type: schema.TypeString, + Optional: true, + Description: `Use VM image name to find the image.`, + }, + }, + }, + ExactlyOneOf: []string{"vm_image", "container_image"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Instance creation time`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNotebooksEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandNotebooksEnvironmentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandNotebooksEnvironmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + postStartupScriptProp, err := expandNotebooksEnvironmentPostStartupScript(d.Get("post_startup_script"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("post_startup_script"); !tpgresource.IsEmptyValue(reflect.ValueOf(postStartupScriptProp)) && (ok || !reflect.DeepEqual(v, postStartupScriptProp)) { + obj["postStartupScript"] = postStartupScriptProp + } + vmImageProp, err := expandNotebooksEnvironmentVmImage(d.Get("vm_image"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vm_image"); !tpgresource.IsEmptyValue(reflect.ValueOf(vmImageProp)) && (ok || !reflect.DeepEqual(v, vmImageProp)) { + obj["vmImage"] = vmImageProp + } + containerImageProp, err := expandNotebooksEnvironmentContainerImage(d.Get("container_image"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("container_image"); !tpgresource.IsEmptyValue(reflect.ValueOf(containerImageProp)) && (ok || !reflect.DeepEqual(v, containerImageProp)) { + obj["containerImage"] = containerImageProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments?environmentId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Environment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Environment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Environment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = NotebooksOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Environment", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Environment: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) + + return resourceNotebooksEnvironmentRead(d, meta) +} + +func resourceNotebooksEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Environment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NotebooksEnvironment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + + if err := d.Set("display_name", flattenNotebooksEnvironmentDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("description", flattenNotebooksEnvironmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("post_startup_script", flattenNotebooksEnvironmentPostStartupScript(res["postStartupScript"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("create_time", flattenNotebooksEnvironmentCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("vm_image", flattenNotebooksEnvironmentVmImage(res["vmImage"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("container_image", flattenNotebooksEnvironmentContainerImage(res["containerImage"], d, config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + + return nil +} + +func resourceNotebooksEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Environment: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandNotebooksEnvironmentDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandNotebooksEnvironmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + postStartupScriptProp, err := expandNotebooksEnvironmentPostStartupScript(d.Get("post_startup_script"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("post_startup_script"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, postStartupScriptProp)) { + obj["postStartupScript"] = postStartupScriptProp + } + vmImageProp, err := expandNotebooksEnvironmentVmImage(d.Get("vm_image"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vm_image"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, vmImageProp)) { + obj["vmImage"] = vmImageProp + } + containerImageProp, err := expandNotebooksEnvironmentContainerImage(d.Get("container_image"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("container_image"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, containerImageProp)) { + obj["containerImage"] = containerImageProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) + } + + err = NotebooksOperationWaitTime( + config, res, project, "Updating Environment", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNotebooksEnvironmentRead(d, meta) +} + +func resourceNotebooksEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Environment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Environment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Environment") + } + + err = NotebooksOperationWaitTime( + config, res, project, "Deleting Environment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) + return nil +} + +func resourceNotebooksEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNotebooksEnvironmentDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksEnvironmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksEnvironmentPostStartupScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksEnvironmentCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksEnvironmentVmImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project"] = + flattenNotebooksEnvironmentVmImageProject(original["project"], d, config) + transformed["image_name"] = + flattenNotebooksEnvironmentVmImageImageName(original["imageName"], d, config) + transformed["image_family"] = + flattenNotebooksEnvironmentVmImageImageFamily(original["imageFamily"], d, config) + return []interface{}{transformed} +} +func flattenNotebooksEnvironmentVmImageProject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksEnvironmentVmImageImageName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksEnvironmentVmImageImageFamily(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksEnvironmentContainerImage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["repository"] = + flattenNotebooksEnvironmentContainerImageRepository(original["repository"], d, config) + transformed["tag"] = + flattenNotebooksEnvironmentContainerImageTag(original["tag"], d, config) + return []interface{}{transformed} +} +func flattenNotebooksEnvironmentContainerImageRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksEnvironmentContainerImageTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNotebooksEnvironmentDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksEnvironmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksEnvironmentPostStartupScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksEnvironmentVmImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProject, err := expandNotebooksEnvironmentVmImageProject(original["project"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["project"] = transformedProject + } + + transformedImageName, err := expandNotebooksEnvironmentVmImageImageName(original["image_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageName"] = transformedImageName + } + + transformedImageFamily, err := expandNotebooksEnvironmentVmImageImageFamily(original["image_family"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageFamily); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageFamily"] = transformedImageFamily + } + + return transformed, nil +} + +func expandNotebooksEnvironmentVmImageProject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksEnvironmentVmImageImageName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksEnvironmentVmImageImageFamily(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksEnvironmentContainerImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRepository, err := expandNotebooksEnvironmentContainerImageRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedTag, err := expandNotebooksEnvironmentContainerImageTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + return transformed, nil +} + +func expandNotebooksEnvironmentContainerImageRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksEnvironmentContainerImageTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_environment_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_environment_sweeper.go new file mode 100644 index 0000000000..f19898c453 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_environment_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NotebooksEnvironment", testSweepNotebooksEnvironment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNotebooksEnvironment(region string) error { + resourceName := "NotebooksEnvironment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://notebooks.googleapis.com/v1/projects/{{project}}/locations/{{location}}/environments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["environments"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://notebooks.googleapis.com/v1/projects/{{project}}/locations/{{location}}/environments/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance.go new file mode 100644 index 0000000000..0c0f9e4ade --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance.go @@ -0,0 +1,1474 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +const notebooksInstanceGoogleProvidedLabel = "goog-caip-notebook" + +func NotebooksInstanceLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the label provided by Google + if strings.Contains(k, notebooksInstanceGoogleProvidedLabel) && new == "" { + return true + } + + // Let diff be determined by labels (above) + if strings.Contains(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func ResourceNotebooksInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceNotebooksInstanceCreate, + Read: resourceNotebooksInstanceRead, + Update: resourceNotebooksInstanceUpdate, + Delete: resourceNotebooksInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNotebooksInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to the zone where the machine resides.`, + }, + "machine_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to a machine type which defines VM kind.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name specified for the Notebook instance.`, + }, + "accelerator_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The hardware accelerator used on this instance. If you use accelerators, +make sure that your configuration has enough vCPUs and memory to support the +machineType you have selected.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "core_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Count of cores of this accelerator.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ACCELERATOR_TYPE_UNSPECIFIED", "NVIDIA_TESLA_K80", "NVIDIA_TESLA_P100", "NVIDIA_TESLA_V100", "NVIDIA_TESLA_P4", "NVIDIA_TESLA_T4", "NVIDIA_TESLA_T4_VWS", "NVIDIA_TESLA_P100_VWS", "NVIDIA_TESLA_P4_VWS", "NVIDIA_TESLA_A100", "TPU_V2", "TPU_V3"}), + Description: `Type of this accelerator. Possible values: ["ACCELERATOR_TYPE_UNSPECIFIED", "NVIDIA_TESLA_K80", "NVIDIA_TESLA_P100", "NVIDIA_TESLA_V100", "NVIDIA_TESLA_P4", "NVIDIA_TESLA_T4", "NVIDIA_TESLA_T4_VWS", "NVIDIA_TESLA_P100_VWS", "NVIDIA_TESLA_P4_VWS", "NVIDIA_TESLA_A100", "TPU_V2", "TPU_V3"]`, + }, + }, + }, + }, + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The size of the boot disk in GB attached to this instance, +up to a maximum of 64000 GB (64 TB). The minimum recommended value is 100 GB. +If not specified, this defaults to 100.`, + }, + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), + Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, + }, + "container_image": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Use a container image to start the notebook instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repository": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The path to the container image repository. +For example: gcr.io/{project_id}/{imageName}`, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, + }, + }, + }, + ExactlyOneOf: []string{"vm_image", "container_image"}, + }, + "custom_gpu_driver_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specify a custom Cloud Storage path where the GPU driver is stored. +If not specified, we'll automatically choose from official GPU drivers.`, + }, + "data_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The size of the data disk in GB attached to this instance, +up to a maximum of 64000 GB (64 TB). +You can choose the size of the data disk based on how big your notebooks and data are. +If not specified, this defaults to 100.`, + }, + "data_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME", ""}), + Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", "PD_EXTREME"]`, + }, + "disk_encryption": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DISK_ENCRYPTION_UNSPECIFIED", "GMEK", "CMEK", ""}), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("DISK_ENCRYPTION_UNSPECIFIED"), + Description: `Disk encryption method used on the boot and data disks, defaults to GMEK. Possible values: ["DISK_ENCRYPTION_UNSPECIFIED", "GMEK", "CMEK"]`, + }, + "install_gpu_driver": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether the end user authorizes Google Cloud to install GPU driver +on this instance. If this field is empty or set to false, the GPU driver +won't be installed. Only applicable to instances with GPUs.`, + }, + "instance_owners": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of owners of this instance after creation. +Format: alias@example.com. +Currently supports one owner only. +If not specified, all of the service account users of +your VM instance's service account can use the instance.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The KMS key used to encrypt the disks, only applicable if diskEncryption is CMEK. +Format: projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}`, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + DiffSuppressFunc: NotebooksInstanceLabelDiffSuppress, + Description: `Labels to apply to this instance. These can be later modified by the setLabels method. +An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "metadata": { + Type: schema.TypeMap, + Optional: true, + Description: `Custom metadata to apply to this instance. +An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the VPC that this instance is in. +Format: projects/{project_id}/global/networks/{network_id}`, + }, + "nic_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC", ""}), + Description: `The type of vNIC driver. Possible values: ["UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC"]`, + }, + "no_proxy_access": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `The notebook instance will not register with the proxy..`, + }, + "no_public_ip": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `No public IP will be assigned to this instance.`, + }, + "no_remove_data_disk": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If true, the data disk will not be auto deleted when deleting the instance.`, + }, + "post_startup_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Path to a Bash script that automatically runs after a +notebook instance fully boots up. The path must be a URL +or Cloud Storage path (gs://path-to-file/file-name).`, + }, + "reservation_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Reservation Affinity for consuming Zonal reservation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consume_reservation_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"}), + Description: `The type of Compute Reservation. Possible values: ["NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"]`, + }, + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Corresponds to the label key of reservation resource.`, + }, + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Corresponds to the label values of reservation resource.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The service account on this instance, giving access to other +Google Cloud services. You can use any service account within +the same project, but you must have the service account user +permission to use the instance. If not specified, +the Compute Engine default service account is used.`, + }, + "service_account_scopes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Optional. The URIs of service account scopes to be included in Compute Engine instances. +If not specified, the following scopes are defined: +- https://www.googleapis.com/auth/cloud-platform +- https://www.googleapis.com/auth/userinfo.email`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "shielded_instance_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A set of Shielded Instance options. Check [Images using supported Shielded VM features] +Not all combinations are valid`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the +boot integrity of the instance. The attestation is performed against the integrity policy baseline. +This baseline is initially derived from the implicitly trusted boot image when the instance is created. +Enabled by default.`, + Default: true, + }, + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs +authentic software by verifying the digital signature of all boot components, and halting the boot process +if signature verification fails. +Disabled by default.`, + }, + "enable_vtpm": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Defines whether the instance has the vTPM enabled. +Enabled by default.`, + Default: true, + }, + }, + }, + }, + "subnet": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the subnet that this instance is in. +Format: projects/{project_id}/regions/{region}/subnetworks/{subnetwork_id}`, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The Compute Engine tags to add to instance.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "vm_image": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Use a Compute Engine VM image to start the notebook instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Google Cloud project that this VM image belongs to. +Format: projects/{project_id}`, + }, + "image_family": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Use this VM image family to find the image; the newest image in this family will be used.`, + }, + "image_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Use VM image name to find the image.`, + }, + }, + }, + ExactlyOneOf: []string{"vm_image", "container_image"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Instance creation time`, + }, + "proxy_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The proxy endpoint that is used to access the Jupyter notebook. +Only returned when the resource is in a 'PROVISIONED' state. If +needed you can utilize 'terraform apply -refresh-only' to await +the population of this value.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of this instance.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Instance update time.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNotebooksInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + machineTypeProp, err := expandNotebooksInstanceMachineType(d.Get("machine_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("machine_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(machineTypeProp)) && (ok || !reflect.DeepEqual(v, machineTypeProp)) { + obj["machineType"] = machineTypeProp + } + postStartupScriptProp, err := expandNotebooksInstancePostStartupScript(d.Get("post_startup_script"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("post_startup_script"); !tpgresource.IsEmptyValue(reflect.ValueOf(postStartupScriptProp)) && (ok || !reflect.DeepEqual(v, postStartupScriptProp)) { + obj["postStartupScript"] = postStartupScriptProp + } + instanceOwnersProp, err := expandNotebooksInstanceInstanceOwners(d.Get("instance_owners"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance_owners"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceOwnersProp)) && (ok || !reflect.DeepEqual(v, instanceOwnersProp)) { + obj["instanceOwners"] = instanceOwnersProp + } + serviceAccountProp, err := expandNotebooksInstanceServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + serviceAccountScopesProp, err := expandNotebooksInstanceServiceAccountScopes(d.Get("service_account_scopes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account_scopes"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountScopesProp)) && (ok || !reflect.DeepEqual(v, serviceAccountScopesProp)) { + obj["serviceAccountScopes"] = serviceAccountScopesProp + } + acceleratorConfigProp, err := expandNotebooksInstanceAcceleratorConfig(d.Get("accelerator_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("accelerator_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(acceleratorConfigProp)) && (ok || !reflect.DeepEqual(v, acceleratorConfigProp)) { + obj["acceleratorConfig"] = acceleratorConfigProp + } + shieldedInstanceConfigProp, err := expandNotebooksInstanceShieldedInstanceConfig(d.Get("shielded_instance_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("shielded_instance_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(shieldedInstanceConfigProp)) && (ok || !reflect.DeepEqual(v, shieldedInstanceConfigProp)) { + obj["shieldedInstanceConfig"] = shieldedInstanceConfigProp + } + nicTypeProp, err := expandNotebooksInstanceNicType(d.Get("nic_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("nic_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(nicTypeProp)) && (ok || !reflect.DeepEqual(v, nicTypeProp)) { + obj["nicType"] = nicTypeProp + } + reservationAffinityProp, err := expandNotebooksInstanceReservationAffinity(d.Get("reservation_affinity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reservation_affinity"); !tpgresource.IsEmptyValue(reflect.ValueOf(reservationAffinityProp)) && (ok || !reflect.DeepEqual(v, reservationAffinityProp)) { + obj["reservationAffinity"] = reservationAffinityProp + } + installGpuDriverProp, err := expandNotebooksInstanceInstallGpuDriver(d.Get("install_gpu_driver"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("install_gpu_driver"); !tpgresource.IsEmptyValue(reflect.ValueOf(installGpuDriverProp)) && (ok || !reflect.DeepEqual(v, installGpuDriverProp)) { + obj["installGpuDriver"] = installGpuDriverProp + } + customGpuDriverPathProp, err := expandNotebooksInstanceCustomGpuDriverPath(d.Get("custom_gpu_driver_path"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_gpu_driver_path"); !tpgresource.IsEmptyValue(reflect.ValueOf(customGpuDriverPathProp)) && (ok || !reflect.DeepEqual(v, customGpuDriverPathProp)) { + obj["customGpuDriverPath"] = customGpuDriverPathProp + } + bootDiskTypeProp, err := expandNotebooksInstanceBootDiskType(d.Get("boot_disk_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("boot_disk_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(bootDiskTypeProp)) && (ok || !reflect.DeepEqual(v, bootDiskTypeProp)) { + obj["bootDiskType"] = bootDiskTypeProp + } + bootDiskSizeGbProp, err := expandNotebooksInstanceBootDiskSizeGb(d.Get("boot_disk_size_gb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("boot_disk_size_gb"); !tpgresource.IsEmptyValue(reflect.ValueOf(bootDiskSizeGbProp)) && (ok || !reflect.DeepEqual(v, bootDiskSizeGbProp)) { + obj["bootDiskSizeGb"] = bootDiskSizeGbProp + } + dataDiskTypeProp, err := expandNotebooksInstanceDataDiskType(d.Get("data_disk_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_disk_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataDiskTypeProp)) && (ok || !reflect.DeepEqual(v, dataDiskTypeProp)) { + obj["dataDiskType"] = dataDiskTypeProp + } + dataDiskSizeGbProp, err := expandNotebooksInstanceDataDiskSizeGb(d.Get("data_disk_size_gb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("data_disk_size_gb"); !tpgresource.IsEmptyValue(reflect.ValueOf(dataDiskSizeGbProp)) && (ok || !reflect.DeepEqual(v, dataDiskSizeGbProp)) { + obj["dataDiskSizeGb"] = dataDiskSizeGbProp + } + noRemoveDataDiskProp, err := expandNotebooksInstanceNoRemoveDataDisk(d.Get("no_remove_data_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("no_remove_data_disk"); !tpgresource.IsEmptyValue(reflect.ValueOf(noRemoveDataDiskProp)) && (ok || !reflect.DeepEqual(v, noRemoveDataDiskProp)) { + obj["noRemoveDataDisk"] = noRemoveDataDiskProp + } + diskEncryptionProp, err := expandNotebooksInstanceDiskEncryption(d.Get("disk_encryption"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("disk_encryption"); !tpgresource.IsEmptyValue(reflect.ValueOf(diskEncryptionProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionProp)) { + obj["diskEncryption"] = diskEncryptionProp + } + kmsKeyProp, err := expandNotebooksInstanceKmsKey(d.Get("kms_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kms_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(kmsKeyProp)) && (ok || !reflect.DeepEqual(v, kmsKeyProp)) { + obj["kmsKey"] = kmsKeyProp + } + noPublicIpProp, err := expandNotebooksInstanceNoPublicIp(d.Get("no_public_ip"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("no_public_ip"); !tpgresource.IsEmptyValue(reflect.ValueOf(noPublicIpProp)) && (ok || !reflect.DeepEqual(v, noPublicIpProp)) { + obj["noPublicIp"] = noPublicIpProp + } + noProxyAccessProp, err := expandNotebooksInstanceNoProxyAccess(d.Get("no_proxy_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("no_proxy_access"); !tpgresource.IsEmptyValue(reflect.ValueOf(noProxyAccessProp)) && (ok || !reflect.DeepEqual(v, noProxyAccessProp)) { + obj["noProxyAccess"] = noProxyAccessProp + } + networkProp, err := expandNotebooksInstanceNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + subnetProp, err := expandNotebooksInstanceSubnet(d.Get("subnet"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnet"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetProp)) && (ok || !reflect.DeepEqual(v, subnetProp)) { + obj["subnet"] = subnetProp + } + labelsProp, err := expandNotebooksInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + tagsProp, err := expandNotebooksInstanceTags(d.Get("tags"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(tagsProp)) && (ok || !reflect.DeepEqual(v, tagsProp)) { + obj["tags"] = tagsProp + } + metadataProp, err := expandNotebooksInstanceMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + vmImageProp, err := expandNotebooksInstanceVmImage(d.Get("vm_image"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("vm_image"); !tpgresource.IsEmptyValue(reflect.ValueOf(vmImageProp)) && (ok || !reflect.DeepEqual(v, vmImageProp)) { + obj["vmImage"] = vmImageProp + } + containerImageProp, err := expandNotebooksInstanceContainerImage(d.Get("container_image"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("container_image"); !tpgresource.IsEmptyValue(reflect.ValueOf(containerImageProp)) && (ok || !reflect.DeepEqual(v, containerImageProp)) { + obj["containerImage"] = containerImageProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances?instanceId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Instance: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Instance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = NotebooksOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Instance", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Instance: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) + + return resourceNotebooksInstanceRead(d, meta) +} + +func resourceNotebooksInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NotebooksInstance %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + if err := d.Set("machine_type", flattenNotebooksInstanceMachineType(res["machineType"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("post_startup_script", flattenNotebooksInstancePostStartupScript(res["postStartupScript"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("proxy_uri", flattenNotebooksInstanceProxyUri(res["proxyUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("service_account", flattenNotebooksInstanceServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("service_account_scopes", flattenNotebooksInstanceServiceAccountScopes(res["serviceAccountScopes"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("accelerator_config", flattenNotebooksInstanceAcceleratorConfig(res["acceleratorConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("shielded_instance_config", flattenNotebooksInstanceShieldedInstanceConfig(res["shieldedInstanceConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("nic_type", flattenNotebooksInstanceNicType(res["nicType"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("reservation_affinity", flattenNotebooksInstanceReservationAffinity(res["reservationAffinity"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("state", flattenNotebooksInstanceState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("install_gpu_driver", flattenNotebooksInstanceInstallGpuDriver(res["installGpuDriver"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("custom_gpu_driver_path", flattenNotebooksInstanceCustomGpuDriverPath(res["customGpuDriverPath"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("disk_encryption", flattenNotebooksInstanceDiskEncryption(res["diskEncryption"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("kms_key", flattenNotebooksInstanceKmsKey(res["kmsKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("no_public_ip", flattenNotebooksInstanceNoPublicIp(res["noPublicIp"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("no_proxy_access", flattenNotebooksInstanceNoProxyAccess(res["noProxyAccess"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("network", flattenNotebooksInstanceNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("subnet", flattenNotebooksInstanceSubnet(res["subnet"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("labels", flattenNotebooksInstanceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("tags", flattenNotebooksInstanceTags(res["tags"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("create_time", flattenNotebooksInstanceCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("update_time", flattenNotebooksInstanceUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + return nil +} + +func resourceNotebooksInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("labels") { + obj := make(map[string]interface{}) + + labelsProp, err := expandNotebooksInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + obj, err = resourceNotebooksInstanceUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:setLabels") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = NotebooksOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + if d.HasChange("metadata") { + obj := make(map[string]interface{}) + + metadataProp, err := expandNotebooksInstanceMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + + obj, err = resourceNotebooksInstanceUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:updateMetadataItems") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = NotebooksOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceNotebooksInstanceRead(d, meta) +} + +func resourceNotebooksInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Instance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Instance") + } + + err = NotebooksOperationWaitTime( + config, res, project, "Deleting Instance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) + return nil +} + +func resourceNotebooksInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNotebooksInstanceMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenNotebooksInstancePostStartupScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceProxyUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceServiceAccountScopes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceAcceleratorConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenNotebooksInstanceAcceleratorConfigType(original["type"], d, config) + transformed["core_count"] = + flattenNotebooksInstanceAcceleratorConfigCoreCount(original["coreCount"], d, config) + return []interface{}{transformed} +} +func flattenNotebooksInstanceAcceleratorConfigType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceAcceleratorConfigCoreCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNotebooksInstanceShieldedInstanceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_integrity_monitoring"] = + flattenNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(original["enableIntegrityMonitoring"], d, config) + transformed["enable_secure_boot"] = + flattenNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(original["enableSecureBoot"], d, config) + transformed["enable_vtpm"] = + flattenNotebooksInstanceShieldedInstanceConfigEnableVtpm(original["enableVtpm"], d, config) + return []interface{}{transformed} +} +func flattenNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceShieldedInstanceConfigEnableVtpm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceNicType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceReservationAffinity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["consume_reservation_type"] = + flattenNotebooksInstanceReservationAffinityConsumeReservationType(original["consumeReservationType"], d, config) + transformed["key"] = + flattenNotebooksInstanceReservationAffinityKey(original["key"], d, config) + transformed["values"] = + flattenNotebooksInstanceReservationAffinityValues(original["values"], d, config) + return []interface{}{transformed} +} +func flattenNotebooksInstanceReservationAffinityConsumeReservationType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceReservationAffinityKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceReservationAffinityValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceInstallGpuDriver(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceCustomGpuDriverPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceDiskEncryption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceKmsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceNoPublicIp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceNoProxyAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceSubnet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNotebooksInstanceUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNotebooksInstanceMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstancePostStartupScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceInstanceOwners(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceServiceAccountScopes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceAcceleratorConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandNotebooksInstanceAcceleratorConfigType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedCoreCount, err := expandNotebooksInstanceAcceleratorConfigCoreCount(original["core_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCoreCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["coreCount"] = transformedCoreCount + } + + return transformed, nil +} + +func expandNotebooksInstanceAcceleratorConfigType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceAcceleratorConfigCoreCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceShieldedInstanceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnableIntegrityMonitoring, err := expandNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(original["enable_integrity_monitoring"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableIntegrityMonitoring); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableIntegrityMonitoring"] = transformedEnableIntegrityMonitoring + } + + transformedEnableSecureBoot, err := expandNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(original["enable_secure_boot"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableSecureBoot); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableSecureBoot"] = transformedEnableSecureBoot + } + + transformedEnableVtpm, err := expandNotebooksInstanceShieldedInstanceConfigEnableVtpm(original["enable_vtpm"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableVtpm); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableVtpm"] = transformedEnableVtpm + } + + return transformed, nil +} + +func expandNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceShieldedInstanceConfigEnableVtpm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceNicType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceReservationAffinity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConsumeReservationType, err := expandNotebooksInstanceReservationAffinityConsumeReservationType(original["consume_reservation_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConsumeReservationType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["consumeReservationType"] = transformedConsumeReservationType + } + + transformedKey, err := expandNotebooksInstanceReservationAffinityKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedValues, err := expandNotebooksInstanceReservationAffinityValues(original["values"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValues); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["values"] = transformedValues + } + + return transformed, nil +} + +func expandNotebooksInstanceReservationAffinityConsumeReservationType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceReservationAffinityKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceReservationAffinityValues(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceInstallGpuDriver(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceCustomGpuDriverPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceBootDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceBootDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceDataDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceDataDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceNoRemoveDataDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceDiskEncryption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceKmsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceNoPublicIp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceNoProxyAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceSubnet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandNotebooksInstanceTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandNotebooksInstanceVmImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProject, err := expandNotebooksInstanceVmImageProject(original["project"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["project"] = transformedProject + } + + transformedImageFamily, err := expandNotebooksInstanceVmImageImageFamily(original["image_family"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageFamily); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageFamily"] = transformedImageFamily + } + + transformedImageName, err := expandNotebooksInstanceVmImageImageName(original["image_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageName"] = transformedImageName + } + + return transformed, nil +} + +func expandNotebooksInstanceVmImageProject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceVmImageImageFamily(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceVmImageImageName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceContainerImage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRepository, err := expandNotebooksInstanceContainerImageRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedTag, err := expandNotebooksInstanceContainerImageTag(original["tag"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tag"] = transformedTag + } + + return transformed, nil +} + +func expandNotebooksInstanceContainerImageRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNotebooksInstanceContainerImageTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceNotebooksInstanceUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Update requests use "items" as the api name instead of "metadata" + // https://cloud.google.com/vertex-ai/docs/workbench/reference/rest/v1/projects.locations.instances/updateMetadataItems + if metadata, ok := obj["metadata"]; ok { + obj["items"] = metadata + delete(obj, "metadata") + } + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance_sweeper.go new file mode 100644 index 0000000000..31d31c72ff --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_instance_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NotebooksInstance", testSweepNotebooksInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNotebooksInstance(region string) error { + resourceName := "NotebooksInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://notebooks.googleapis.com/v1/projects/{{project}}/locations/{{location}}/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://notebooks.googleapis.com/v1/projects/{{project}}/locations/{{location}}/instances/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_location.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_location.go new file mode 100644 index 0000000000..86ab37ad40 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_location.go @@ -0,0 +1,347 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceNotebooksLocation() *schema.Resource { + return &schema.Resource{ + Create: resourceNotebooksLocationCreate, + Read: resourceNotebooksLocationRead, + Update: resourceNotebooksLocationUpdate, + Delete: resourceNotebooksLocationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNotebooksLocationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the Location resource.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceNotebooksLocationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNotebooksLocationName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Location: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Location: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Location: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = NotebooksOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Location", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Location: %s", err) + } + + if err := d.Set("name", flattenNotebooksLocationName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Location %q: %#v", d.Id(), res) + + return resourceNotebooksLocationRead(d, meta) +} + +func resourceNotebooksLocationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Location: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NotebooksLocation %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Location: %s", err) + } + + if err := d.Set("name", flattenNotebooksLocationName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Location: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Location: %s", err) + } + + return nil +} + +func resourceNotebooksLocationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Location: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + nameProp, err := expandNotebooksLocationName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Location %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Location %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Location %q: %#v", d.Id(), res) + } + + err = NotebooksOperationWaitTime( + config, res, project, "Updating Location", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceNotebooksLocationRead(d, meta) +} + +func resourceNotebooksLocationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Location: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Location %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Location") + } + + err = NotebooksOperationWaitTime( + config, res, project, "Deleting Location", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Location %q: %#v", d.Id(), res) + return nil +} + +func resourceNotebooksLocationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNotebooksLocationName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandNotebooksLocationName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_location_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_location_sweeper.go new file mode 100644 index 0000000000..b85bfe9cbd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_location_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NotebooksLocation", testSweepNotebooksLocation) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNotebooksLocation(region string) error { + resourceName := "NotebooksLocation" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://notebooks.googleapis.com/v1/projects/{{project}}/locations", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://notebooks.googleapis.com/v1/projects/{{project}}/locations/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_runtime.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_runtime.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_runtime.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_runtime.go index bfb71d8ad2..578f946652 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_notebooks_runtime.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_runtime.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package notebooks import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) const notebooksRuntimeGoogleProvidedLabel = "goog-caip-managed-notebook" @@ -68,7 +75,7 @@ func ResourceNotebooksRuntime() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `A reference to the zone where the machine resides.`, }, "name": { @@ -179,7 +186,7 @@ Cloud Storage path (gs://path-to-file/file-name).`, "post_startup_script_behavior": { Type: schema.TypeString, Optional: true, - ValidateFunc: validateEnum([]string{"POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED", "RUN_EVERY_START", "DOWNLOAD_AND_RUN_EVERY_START", ""}), + ValidateFunc: verify.ValidateEnum([]string{"POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED", "RUN_EVERY_START", "DOWNLOAD_AND_RUN_EVERY_START", ""}), Description: `Behavior for the post startup script. Possible values: ["POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED", "RUN_EVERY_START", "DOWNLOAD_AND_RUN_EVERY_START"]`, }, "upgradeable": { @@ -483,7 +490,7 @@ Runtimes support the following network configurations: Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC", ""}), + ValidateFunc: verify.ValidateEnum([]string{"UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC", ""}), Description: `The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet. Possible values: ["UNSPECIFIED_NIC_TYPE", "VIRTIO_NET", "GVNIC"]`, }, @@ -623,8 +630,8 @@ sessions stats.`, } func resourceNotebooksRuntimeCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -633,23 +640,23 @@ func resourceNotebooksRuntimeCreate(d *schema.ResourceData, meta interface{}) er virtualMachineProp, err := expandNotebooksRuntimeVirtualMachine(d.Get("virtual_machine"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("virtual_machine"); !isEmptyValue(reflect.ValueOf(virtualMachineProp)) && (ok || !reflect.DeepEqual(v, virtualMachineProp)) { + } else if v, ok := d.GetOkExists("virtual_machine"); !tpgresource.IsEmptyValue(reflect.ValueOf(virtualMachineProp)) && (ok || !reflect.DeepEqual(v, virtualMachineProp)) { obj["virtualMachine"] = virtualMachineProp } accessConfigProp, err := expandNotebooksRuntimeAccessConfig(d.Get("access_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("access_config"); !isEmptyValue(reflect.ValueOf(accessConfigProp)) && (ok || !reflect.DeepEqual(v, accessConfigProp)) { + } else if v, ok := d.GetOkExists("access_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(accessConfigProp)) && (ok || !reflect.DeepEqual(v, accessConfigProp)) { obj["accessConfig"] = accessConfigProp } softwareConfigProp, err := expandNotebooksRuntimeSoftwareConfig(d.Get("software_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("software_config"); !isEmptyValue(reflect.ValueOf(softwareConfigProp)) && (ok || !reflect.DeepEqual(v, softwareConfigProp)) { + } else if v, ok := d.GetOkExists("software_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(softwareConfigProp)) && (ok || !reflect.DeepEqual(v, softwareConfigProp)) { obj["softwareConfig"] = softwareConfigProp } - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/runtimes?runtimeId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/runtimes?runtimeId={{name}}") if err != nil { return err } @@ -657,24 +664,32 @@ func resourceNotebooksRuntimeCreate(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Creating new Runtime: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Runtime: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Runtime: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/runtimes/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/runtimes/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -694,7 +709,7 @@ func resourceNotebooksRuntimeCreate(d *schema.ResourceData, meta interface{}) er } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/runtimes/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/runtimes/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -706,33 +721,39 @@ func resourceNotebooksRuntimeCreate(d *schema.ResourceData, meta interface{}) er } func resourceNotebooksRuntimeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/runtimes/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/runtimes/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Runtime: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("NotebooksRuntime %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NotebooksRuntime %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -762,15 +783,15 @@ func resourceNotebooksRuntimeRead(d *schema.ResourceData, meta interface{}) erro } func resourceNotebooksRuntimeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Runtime: %s", err) } @@ -780,23 +801,23 @@ func resourceNotebooksRuntimeUpdate(d *schema.ResourceData, meta interface{}) er virtualMachineProp, err := expandNotebooksRuntimeVirtualMachine(d.Get("virtual_machine"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("virtual_machine"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, virtualMachineProp)) { + } else if v, ok := d.GetOkExists("virtual_machine"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, virtualMachineProp)) { obj["virtualMachine"] = virtualMachineProp } accessConfigProp, err := expandNotebooksRuntimeAccessConfig(d.Get("access_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("access_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessConfigProp)) { + } else if v, ok := d.GetOkExists("access_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessConfigProp)) { obj["accessConfig"] = accessConfigProp } softwareConfigProp, err := expandNotebooksRuntimeSoftwareConfig(d.Get("software_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("software_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, softwareConfigProp)) { + } else if v, ok := d.GetOkExists("software_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, softwareConfigProp)) { obj["softwareConfig"] = softwareConfigProp } - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/runtimes/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/runtimes/{{name}}") if err != nil { return err } @@ -818,19 +839,27 @@ func resourceNotebooksRuntimeUpdate(d *schema.ResourceData, meta interface{}) er "softwareConfig.customGpuDriverPath", "softwareConfig.postStartupScript") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Runtime %q: %s", d.Id(), err) @@ -850,21 +879,21 @@ func resourceNotebooksRuntimeUpdate(d *schema.ResourceData, meta interface{}) er } func resourceNotebooksRuntimeDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Runtime: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/runtimes/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/runtimes/{{name}}") if err != nil { return err } @@ -873,13 +902,21 @@ func resourceNotebooksRuntimeDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Deleting Runtime %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Runtime") + return transport_tpg.HandleNotFoundError(err, d, "Runtime") } err = NotebooksOperationWaitTime( @@ -895,8 +932,8 @@ func resourceNotebooksRuntimeDelete(d *schema.ResourceData, meta interface{}) er } func resourceNotebooksRuntimeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/runtimes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -905,7 +942,7 @@ func resourceNotebooksRuntimeImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/runtimes/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/runtimes/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -914,7 +951,7 @@ func resourceNotebooksRuntimeImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenNotebooksRuntimeVirtualMachine(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachine(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -931,15 +968,15 @@ func flattenNotebooksRuntimeVirtualMachine(v interface{}, d *schema.ResourceData flattenNotebooksRuntimeVirtualMachineVirtualMachineConfig(original["virtualMachineConfig"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeVirtualMachineInstanceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineInstanceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineInstanceId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineInstanceId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -982,15 +1019,15 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(original["reservedIpRange"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigMachineType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDisk(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1025,26 +1062,26 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDisk(v interfa flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskType(original["type"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskAutoDelete(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskAutoDelete(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskBoot(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskBoot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskDeviceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskDeviceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskGuestOsFeatures(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskGuestOsFeatures(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskIndex(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskIndex(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1058,7 +1095,7 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskIndex(v in return v // let terraform core handle it otherwise } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParams(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParams(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1079,18 +1116,18 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitialize flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsLabels(original["labels"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskSizeGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskSizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1104,39 +1141,39 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitialize return v // let terraform core handle it otherwise } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInterface(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInterface(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskKind(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskLicenses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskLicenses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1155,15 +1192,15 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImages(v } return transformed } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesRepository(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1176,11 +1213,11 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfig(v flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfigKmsKey(original["kmsKey"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfigKmsKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfigKmsKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1197,19 +1234,19 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceCo flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableIntegrityMonitoring(original["enableIntegrityMonitoring"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableSecureBoot(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableSecureBoot(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableVtpm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableVtpm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1224,14 +1261,14 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig( flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigCoreCount(original["coreCount"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigCoreCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigCoreCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1245,51 +1282,51 @@ func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigC return v // let terraform core handle it otherwise } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigSubnet(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigSubnet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigInternalIpOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigInternalIpOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigTags(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigGuestAttributes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigGuestAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeHealthState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeHealthState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeAccessConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeAccessConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1306,19 +1343,19 @@ func flattenNotebooksRuntimeAccessConfig(v interface{}, d *schema.ResourceData, flattenNotebooksRuntimeAccessConfigProxyUri(original["proxyUri"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeAccessConfigAccessType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeAccessConfigAccessType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeAccessConfigRuntimeOwner(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeAccessConfigRuntimeOwner(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeAccessConfigProxyUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeAccessConfigProxyUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1349,22 +1386,22 @@ func flattenNotebooksRuntimeSoftwareConfig(v interface{}, d *schema.ResourceData flattenNotebooksRuntimeSoftwareConfigKernels(original["kernels"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeSoftwareConfigNotebookUpgradeSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigNotebookUpgradeSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigEnableHealthMonitoring(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigEnableHealthMonitoring(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigIdleShutdown(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigIdleShutdown(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigIdleShutdownTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigIdleShutdownTimeout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1378,27 +1415,27 @@ func flattenNotebooksRuntimeSoftwareConfigIdleShutdownTimeout(v interface{}, d * return v // let terraform core handle it otherwise } -func flattenNotebooksRuntimeSoftwareConfigInstallGpuDriver(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigInstallGpuDriver(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigUpgradeable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigUpgradeable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigPostStartupScript(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigPostStartupScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigKernels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigKernels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1417,15 +1454,15 @@ func flattenNotebooksRuntimeSoftwareConfigKernels(v interface{}, d *schema.Resou } return transformed } -func flattenNotebooksRuntimeSoftwareConfigKernelsRepository(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigKernelsRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeSoftwareConfigKernelsTag(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeSoftwareConfigKernelsTag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenNotebooksRuntimeMetrics(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeMetrics(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1438,11 +1475,11 @@ func flattenNotebooksRuntimeMetrics(v interface{}, d *schema.ResourceData, confi flattenNotebooksRuntimeMetricsSystemMetrics(original["systemMetrics"], d, config) return []interface{}{transformed} } -func flattenNotebooksRuntimeMetricsSystemMetrics(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenNotebooksRuntimeMetricsSystemMetrics(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandNotebooksRuntimeVirtualMachine(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachine(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1454,36 +1491,36 @@ func expandNotebooksRuntimeVirtualMachine(v interface{}, d TerraformResourceData transformedInstanceName, err := expandNotebooksRuntimeVirtualMachineInstanceName(original["instance_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstanceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstanceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["instanceName"] = transformedInstanceName } transformedInstanceId, err := expandNotebooksRuntimeVirtualMachineInstanceId(original["instance_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstanceId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstanceId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["instanceId"] = transformedInstanceId } transformedVirtualMachineConfig, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfig(original["virtual_machine_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVirtualMachineConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVirtualMachineConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["virtualMachineConfig"] = transformedVirtualMachineConfig } return transformed, nil } -func expandNotebooksRuntimeVirtualMachineInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineInstanceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineInstanceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineInstanceId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1495,63 +1532,63 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d T transformedZone, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigZone(original["zone"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedZone); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["zone"] = transformedZone } transformedMachineType, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigMachineType(original["machine_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["machineType"] = transformedMachineType } transformedDataDisk, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDisk(original["data_disk"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataDisk); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDataDisk); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dataDisk"] = transformedDataDisk } transformedContainerImages, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImages(original["container_images"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedContainerImages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedContainerImages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["containerImages"] = transformedContainerImages } transformedEncryptionConfig, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfig(original["encryption_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEncryptionConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEncryptionConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["encryptionConfig"] = transformedEncryptionConfig } transformedShieldedInstanceConfig, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig(original["shielded_instance_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedShieldedInstanceConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedShieldedInstanceConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["shieldedInstanceConfig"] = transformedShieldedInstanceConfig } transformedAcceleratorConfig, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig(original["accelerator_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAcceleratorConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAcceleratorConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["acceleratorConfig"] = transformedAcceleratorConfig } transformedNetwork, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigNetwork(original["network"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["network"] = transformedNetwork } transformedSubnet, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigSubnet(original["subnet"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubnet); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubnet); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subnet"] = transformedSubnet } @@ -1565,57 +1602,57 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfig(v interface{}, d T transformedTags, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigTags(original["tags"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTags); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tags"] = transformedTags } transformedGuestAttributes, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigGuestAttributes(original["guest_attributes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGuestAttributes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGuestAttributes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["guestAttributes"] = transformedGuestAttributes } transformedMetadata, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigMetadata(original["metadata"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["metadata"] = transformedMetadata } transformedLabels, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigLabels(original["labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["labels"] = transformedLabels } transformedNicType, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(original["nic_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNicType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNicType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nicType"] = transformedNicType } transformedReservedIpRange, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(original["reserved_ip_range"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedReservedIpRange); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedReservedIpRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["reservedIpRange"] = transformedReservedIpRange } return transformed, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigMachineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDisk(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1627,111 +1664,111 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDisk(v interfac transformedAutoDelete, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskAutoDelete(original["auto_delete"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAutoDelete); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAutoDelete); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["autoDelete"] = transformedAutoDelete } transformedBoot, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskBoot(original["boot"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBoot); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBoot); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["boot"] = transformedBoot } transformedDeviceName, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskDeviceName(original["device_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDeviceName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDeviceName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["deviceName"] = transformedDeviceName } transformedGuestOsFeatures, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskGuestOsFeatures(original["guest_os_features"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGuestOsFeatures); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGuestOsFeatures); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["guestOsFeatures"] = transformedGuestOsFeatures } transformedIndex, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskIndex(original["index"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIndex); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIndex); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["index"] = transformedIndex } transformedInitializeParams, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParams(original["initialize_params"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInitializeParams); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInitializeParams); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["initializeParams"] = transformedInitializeParams } transformedInterface, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInterface(original["interface"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInterface); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInterface); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["interface"] = transformedInterface } transformedKind, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskKind(original["kind"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKind); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKind); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kind"] = transformedKind } transformedLicenses, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskLicenses(original["licenses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLicenses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLicenses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["licenses"] = transformedLicenses } transformedMode, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskMode(original["mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mode"] = transformedMode } transformedSource, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskSource(original["source"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["source"] = transformedSource } transformedType, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskType(original["type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["type"] = transformedType } return transformed, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskAutoDelete(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskAutoDelete(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskBoot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskBoot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskDeviceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskDeviceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskGuestOsFeatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskGuestOsFeatures(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskIndex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskIndex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParams(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParams(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1743,58 +1780,58 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeP transformedDescription, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedDiskName, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskName(original["disk_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDiskName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDiskName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["diskName"] = transformedDiskName } transformedDiskSizeGb, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskSizeGb(original["disk_size_gb"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["diskSizeGb"] = transformedDiskSizeGb } transformedDiskType, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskType(original["disk_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDiskType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDiskType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["diskType"] = transformedDiskType } transformedLabels, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsLabels(original["labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["labels"] = transformedLabels } return transformed, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeParamsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1805,31 +1842,31 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInitializeP return m, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskKind(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskLicenses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskLicenses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigDataDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1842,14 +1879,14 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImages(v i transformedRepository, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesRepository(original["repository"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["repository"] = transformedRepository } transformedTag, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesTag(original["tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tag"] = transformedTag } @@ -1858,15 +1895,15 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImages(v i return req, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigContainerImagesTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1878,18 +1915,18 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfig(v transformedKmsKey, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfigKmsKey(original["kms_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKey"] = transformedKmsKey } return transformed, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfigKmsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigEncryptionConfigKmsKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1901,40 +1938,40 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceCon transformedEnableSecureBoot, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableSecureBoot(original["enable_secure_boot"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableSecureBoot); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableSecureBoot); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableSecureBoot"] = transformedEnableSecureBoot } transformedEnableVtpm, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableVtpm(original["enable_vtpm"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableVtpm); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableVtpm); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableVtpm"] = transformedEnableVtpm } transformedEnableIntegrityMonitoring, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableIntegrityMonitoring(original["enable_integrity_monitoring"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableIntegrityMonitoring); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableIntegrityMonitoring); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableIntegrityMonitoring"] = transformedEnableIntegrityMonitoring } return transformed, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableSecureBoot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableSecureBoot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableVtpm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableVtpm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1946,45 +1983,45 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfig(v transformedType, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigType(original["type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["type"] = transformedType } transformedCoreCount, err := expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigCoreCount(original["core_count"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCoreCount); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCoreCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["coreCount"] = transformedCoreCount } return transformed, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigCoreCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigAcceleratorConfigCoreCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigSubnet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigSubnet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigInternalIpOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigInternalIpOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigGuestAttributes(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigGuestAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1995,7 +2032,7 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigGuestAttributes(v i return m, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2006,7 +2043,7 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigMetadata(v interfac return m, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2017,15 +2054,15 @@ func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigLabels(v interface{ return m, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigNicType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeVirtualMachineVirtualMachineConfigReservedIpRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeAccessConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeAccessConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2037,40 +2074,40 @@ func expandNotebooksRuntimeAccessConfig(v interface{}, d TerraformResourceData, transformedAccessType, err := expandNotebooksRuntimeAccessConfigAccessType(original["access_type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAccessType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAccessType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["accessType"] = transformedAccessType } transformedRuntimeOwner, err := expandNotebooksRuntimeAccessConfigRuntimeOwner(original["runtime_owner"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRuntimeOwner); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRuntimeOwner); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["runtimeOwner"] = transformedRuntimeOwner } transformedProxyUri, err := expandNotebooksRuntimeAccessConfigProxyUri(original["proxy_uri"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProxyUri); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProxyUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["proxyUri"] = transformedProxyUri } return transformed, nil } -func expandNotebooksRuntimeAccessConfigAccessType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeAccessConfigAccessType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeAccessConfigRuntimeOwner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeAccessConfigRuntimeOwner(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeAccessConfigProxyUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeAccessConfigProxyUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2082,113 +2119,113 @@ func expandNotebooksRuntimeSoftwareConfig(v interface{}, d TerraformResourceData transformedNotebookUpgradeSchedule, err := expandNotebooksRuntimeSoftwareConfigNotebookUpgradeSchedule(original["notebook_upgrade_schedule"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNotebookUpgradeSchedule); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNotebookUpgradeSchedule); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["notebookUpgradeSchedule"] = transformedNotebookUpgradeSchedule } transformedEnableHealthMonitoring, err := expandNotebooksRuntimeSoftwareConfigEnableHealthMonitoring(original["enable_health_monitoring"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnableHealthMonitoring); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnableHealthMonitoring); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enableHealthMonitoring"] = transformedEnableHealthMonitoring } transformedIdleShutdown, err := expandNotebooksRuntimeSoftwareConfigIdleShutdown(original["idle_shutdown"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdleShutdown); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdleShutdown); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["idleShutdown"] = transformedIdleShutdown } transformedIdleShutdownTimeout, err := expandNotebooksRuntimeSoftwareConfigIdleShutdownTimeout(original["idle_shutdown_timeout"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdleShutdownTimeout); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdleShutdownTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["idleShutdownTimeout"] = transformedIdleShutdownTimeout } transformedInstallGpuDriver, err := expandNotebooksRuntimeSoftwareConfigInstallGpuDriver(original["install_gpu_driver"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstallGpuDriver); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstallGpuDriver); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["installGpuDriver"] = transformedInstallGpuDriver } transformedUpgradeable, err := expandNotebooksRuntimeSoftwareConfigUpgradeable(original["upgradeable"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUpgradeable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUpgradeable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["upgradeable"] = transformedUpgradeable } transformedCustomGpuDriverPath, err := expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(original["custom_gpu_driver_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCustomGpuDriverPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCustomGpuDriverPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["customGpuDriverPath"] = transformedCustomGpuDriverPath } transformedPostStartupScript, err := expandNotebooksRuntimeSoftwareConfigPostStartupScript(original["post_startup_script"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostStartupScript); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostStartupScript); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postStartupScript"] = transformedPostStartupScript } transformedPostStartupScriptBehavior, err := expandNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(original["post_startup_script_behavior"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostStartupScriptBehavior); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostStartupScriptBehavior); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postStartupScriptBehavior"] = transformedPostStartupScriptBehavior } transformedKernels, err := expandNotebooksRuntimeSoftwareConfigKernels(original["kernels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKernels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKernels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kernels"] = transformedKernels } return transformed, nil } -func expandNotebooksRuntimeSoftwareConfigNotebookUpgradeSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigNotebookUpgradeSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigEnableHealthMonitoring(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigEnableHealthMonitoring(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigIdleShutdown(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigIdleShutdown(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigIdleShutdownTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigIdleShutdownTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigInstallGpuDriver(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigInstallGpuDriver(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigUpgradeable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigUpgradeable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigCustomGpuDriverPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigPostStartupScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigPostStartupScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigPostStartupScriptBehavior(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigKernels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigKernels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2201,14 +2238,14 @@ func expandNotebooksRuntimeSoftwareConfigKernels(v interface{}, d TerraformResou transformedRepository, err := expandNotebooksRuntimeSoftwareConfigKernelsRepository(original["repository"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["repository"] = transformedRepository } transformedTag, err := expandNotebooksRuntimeSoftwareConfigKernelsTag(original["tag"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTag); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["tag"] = transformedTag } @@ -2217,10 +2254,10 @@ func expandNotebooksRuntimeSoftwareConfigKernels(v interface{}, d TerraformResou return req, nil } -func expandNotebooksRuntimeSoftwareConfigKernelsRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigKernelsRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandNotebooksRuntimeSoftwareConfigKernelsTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandNotebooksRuntimeSoftwareConfigKernelsTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_runtime_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_runtime_sweeper.go new file mode 100644 index 0000000000..d7c6db1bbc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/notebooks/resource_notebooks_runtime_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package notebooks + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NotebooksRuntime", testSweepNotebooksRuntime) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNotebooksRuntime(region string) error { + resourceName := "NotebooksRuntime" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://notebooks.googleapis.com/v1/projects/{{project}}/locations/{{location}}/runtimes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["runtimes"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://notebooks.googleapis.com/v1/projects/{{project}}/locations/{{location}}/runtimes/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_org_policy_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/orgpolicy/resource_org_policy_policy.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_org_policy_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/orgpolicy/resource_org_policy_policy.go index b2f7d99015..fd245968b1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_org_policy_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/orgpolicy/resource_org_policy_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package orgpolicy import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" orgpolicy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceOrgPolicyPolicy() *schema.Resource { @@ -56,7 +63,7 @@ func ResourceOrgPolicyPolicy() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The parent of the resource.", }, @@ -199,7 +206,7 @@ func OrgPolicyPolicySpecRulesValuesSchema() *schema.Resource { } func resourceOrgPolicyPolicyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &orgpolicy.Policy{ Name: dcl.String(d.Get("name").(string)), @@ -212,18 +219,18 @@ func resourceOrgPolicyPolicyCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -245,7 +252,7 @@ func resourceOrgPolicyPolicyCreate(d *schema.ResourceData, meta interface{}) err } func resourceOrgPolicyPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &orgpolicy.Policy{ Name: dcl.String(d.Get("name").(string)), @@ -253,17 +260,17 @@ func resourceOrgPolicyPolicyRead(d *schema.ResourceData, meta interface{}) error Spec: expandOrgPolicyPolicySpec(d.Get("spec")), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -272,7 +279,7 @@ func resourceOrgPolicyPolicyRead(d *schema.ResourceData, meta interface{}) error res, err := client.GetPolicy(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("OrgPolicyPolicy %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("name", res.Name); err != nil { @@ -288,26 +295,26 @@ func resourceOrgPolicyPolicyRead(d *schema.ResourceData, meta interface{}) error return nil } func resourceOrgPolicyPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &orgpolicy.Policy{ Name: dcl.String(d.Get("name").(string)), Parent: dcl.String(d.Get("parent").(string)), Spec: expandOrgPolicyPolicySpec(d.Get("spec")), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -329,7 +336,7 @@ func resourceOrgPolicyPolicyUpdate(d *schema.ResourceData, meta interface{}) err } func resourceOrgPolicyPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) obj := &orgpolicy.Policy{ Name: dcl.String(d.Get("name").(string)), @@ -338,17 +345,17 @@ func resourceOrgPolicyPolicyDelete(d *schema.ResourceData, meta interface{}) err } log.Printf("[DEBUG] Deleting Policy %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -363,9 +370,9 @@ func resourceOrgPolicyPolicyDelete(d *schema.ResourceData, meta interface{}) err } func resourceOrgPolicyPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := resourceOrgPolicyPolicyCustomImport(d, config); err != nil { + if err := tpgdclresource.ResourceOrgPolicyPolicyCustomImport(d, config); err != nil { return nil, fmt.Errorf("error encountered in import: %v", err) } @@ -429,10 +436,10 @@ func expandOrgPolicyPolicySpecRules(o interface{}) *orgpolicy.PolicySpecRules { obj := o.(map[string]interface{}) return &orgpolicy.PolicySpecRules{ - AllowAll: expandEnumBool(obj["allow_all"].(string)), + AllowAll: tpgdclresource.ExpandEnumBool(obj["allow_all"].(string)), Condition: expandOrgPolicyPolicySpecRulesCondition(obj["condition"]), - DenyAll: expandEnumBool(obj["deny_all"].(string)), - Enforce: expandEnumBool(obj["enforce"].(string)), + DenyAll: tpgdclresource.ExpandEnumBool(obj["deny_all"].(string)), + Enforce: tpgdclresource.ExpandEnumBool(obj["enforce"].(string)), Values: expandOrgPolicyPolicySpecRulesValues(obj["values"]), } } @@ -456,10 +463,10 @@ func flattenOrgPolicyPolicySpecRules(obj *orgpolicy.PolicySpecRules) interface{} return nil } transformed := map[string]interface{}{ - "allow_all": flattenEnumBool(obj.AllowAll), + "allow_all": tpgdclresource.FlattenEnumBool(obj.AllowAll), "condition": flattenOrgPolicyPolicySpecRulesCondition(obj.Condition), - "deny_all": flattenEnumBool(obj.DenyAll), - "enforce": flattenEnumBool(obj.Enforce), + "deny_all": tpgdclresource.FlattenEnumBool(obj.DenyAll), + "enforce": tpgdclresource.FlattenEnumBool(obj.Enforce), "values": flattenOrgPolicyPolicySpecRulesValues(obj.Values), } @@ -509,8 +516,8 @@ func expandOrgPolicyPolicySpecRulesValues(o interface{}) *orgpolicy.PolicySpecRu } obj := objArr[0].(map[string]interface{}) return &orgpolicy.PolicySpecRulesValues{ - AllowedValues: expandStringArray(obj["allowed_values"]), - DeniedValues: expandStringArray(obj["denied_values"]), + AllowedValues: tpgdclresource.ExpandStringArray(obj["allowed_values"]), + DeniedValues: tpgdclresource.ExpandStringArray(obj["denied_values"]), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/os_config_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/os_config_operation.go new file mode 100644 index 0000000000..5349e9a334 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/os_config_operation.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package osconfig + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type OSConfigOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *OSConfigOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.OSConfigBasePath, w.CommonOperationWaiter.Op.Name) + url = strings.ReplaceAll(url, "https://osconfig.googleapis.com/v1beta", "https://osconfig.googleapis.com/v1") + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createOSConfigWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*OSConfigOperationWaiter, error) { + w := &OSConfigOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func OSConfigOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createOSConfigWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func OSConfigOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createOSConfigWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_os_policy_assignment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_os_policy_assignment.go new file mode 100644 index 0000000000..bf1e3fc048 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_os_policy_assignment.go @@ -0,0 +1,4670 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package osconfig + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceOSConfigOSPolicyAssignment() *schema.Resource { + return &schema.Resource{ + Create: resourceOSConfigOSPolicyAssignmentCreate, + Read: resourceOSConfigOSPolicyAssignmentRead, + Update: resourceOSConfigOSPolicyAssignmentUpdate, + Delete: resourceOSConfigOSPolicyAssignmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceOSConfigOSPolicyAssignmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance_filter": { + Type: schema.TypeList, + Required: true, + Description: `Filter to select VMs.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all": { + Type: schema.TypeBool, + Optional: true, + Description: `Target all VMs in the project. If true, no other criteria is permitted.`, + }, + "exclusion_labels": { + Type: schema.TypeList, + Optional: true, + Description: `List of label sets used for VM exclusion. +If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "inclusion_labels": { + Type: schema.TypeList, + Optional: true, + Description: `List of label sets used for VM inclusion. +If the list has more than one 'LabelSet', the VM is included if any of the label sets are applicable for the VM.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "inventories": { + Type: schema.TypeList, + Optional: true, + Description: `List of inventories to select VMs. +A VM is selected if its inventory data matches at least one of the following inventories.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "os_short_name": { + Type: schema.TypeString, + Required: true, + Description: `The OS short name`, + }, + "os_version": { + Type: schema.TypeString, + Optional: true, + Description: `The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of '7', specify the following value for this field '7.*' An empty string matches all OS versions.`, + }, + }, + }, + }, + }, + }, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the resource`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource name.`, + }, + "os_policies": { + Type: schema.TypeList, + Required: true, + Description: `List of OS policies to be applied to the VMs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: `The id of the OS policy with the following restrictions: +* Must contain only lowercase letters, numbers, and hyphens. +* Must start with a letter. +* Must be between 1-63 characters. +* Must end with a number or a letter. +* Must be unique within the assignment.`, + }, + "mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"MODE_UNSPECIFIED", "VALIDATION", "ENFORCEMENT"}), + Description: `Policy mode Possible values: ["MODE_UNSPECIFIED", "VALIDATION", "ENFORCEMENT"]`, + }, + "resource_groups": { + Type: schema.TypeList, + Required: true, + Description: `List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. +If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag 'allow_no_resource_group_match'`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resources": { + Type: schema.TypeList, + Required: true, + Description: `List of resources configured for this resource group. The resources are executed in the exact order specified here.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: `The id of the resource with the following restrictions: +* Must contain only lowercase letters, numbers, and hyphens. +* Must start with a letter. +* Must be between 1-63 characters. +* Must end with a number or a letter. +* Must be unique within the OS policy.`, + }, + "exec": { + Type: schema.TypeList, + Optional: true, + Description: `Exec resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "validate": { + Type: schema.TypeList, + Required: true, + Description: `What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interpreter": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"INTERPRETER_UNSPECIFIED", "NONE", "SHELL", "POWERSHELL"}), + Description: `The script interpreter to use. Possible values: ["INTERPRETER_UNSPECIFIED", "NONE", "SHELL", "POWERSHELL"]`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `Optional arguments to pass to the source during execution.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "file": { + Type: schema.TypeList, + Optional: true, + Description: `A remote or local file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: `Defaults to false. When false, files are subject to validations based on the file type: +Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.`, + }, + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: `A Cloud Storage object.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Bucket of the Cloud Storage object.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Storage object.`, + }, + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: `Generation number of the Cloud Storage object.`, + }, + }, + }, + }, + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: `A local path within the VM to use.`, + }, + "remote": { + Type: schema.TypeList, + Optional: true, + Description: `A generic remote file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `URI from which to fetch the object. It should contain both the protocol and path following the format '{protocol}://{location}'.`, + }, + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: `SHA256 checksum of the remote file.`, + }, + }, + }, + }, + }, + }, + }, + "output_file_path": { + Type: schema.TypeString, + Optional: true, + Description: `Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.`, + }, + "script": { + Type: schema.TypeString, + Optional: true, + Description: `An inline script. The size of the script is limited to 1024 characters.`, + }, + }, + }, + }, + "enforce": { + Type: schema.TypeList, + Optional: true, + Description: `What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interpreter": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"INTERPRETER_UNSPECIFIED", "NONE", "SHELL", "POWERSHELL"}), + Description: `The script interpreter to use. Possible values: ["INTERPRETER_UNSPECIFIED", "NONE", "SHELL", "POWERSHELL"]`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `Optional arguments to pass to the source during execution.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "file": { + Type: schema.TypeList, + Optional: true, + Description: `A remote or local file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: `Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.`, + }, + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: `A Cloud Storage object.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Bucket of the Cloud Storage object.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Storage object.`, + }, + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: `Generation number of the Cloud Storage object.`, + }, + }, + }, + }, + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: `A local path within the VM to use.`, + }, + "remote": { + Type: schema.TypeList, + Optional: true, + Description: `A generic remote file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `URI from which to fetch the object. It should contain both the protocol and path following the format '{protocol}://{location}'.`, + }, + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: `SHA256 checksum of the remote file.`, + }, + }, + }, + }, + }, + }, + }, + "output_file_path": { + Type: schema.TypeString, + Optional: true, + Description: `Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.`, + }, + "script": { + Type: schema.TypeString, + Optional: true, + Description: `An inline script. The size of the script is limited to 1024 characters.`, + }, + }, + }, + }, + }, + }, + }, + "file": { + Type: schema.TypeList, + Optional: true, + Description: `File resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `The absolute path of the file within the VM.`, + }, + "state": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"DESIRED_STATE_UNSPECIFIED", "PRESENT", "ABSENT", "CONTENTS_MATCH"}), + Description: `Desired state of the file. Possible values: ["DESIRED_STATE_UNSPECIFIED", "PRESENT", "ABSENT", "CONTENTS_MATCH"]`, + }, + "content": { + Type: schema.TypeString, + Optional: true, + Description: `A a file with this content. The size of the content is limited to 1024 characters.`, + }, + "file": { + Type: schema.TypeList, + Optional: true, + Description: `A remote or local source.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: `Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.`, + }, + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: `A Cloud Storage object.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Bucket of the Cloud Storage object.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Storage object.`, + }, + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: `Generation number of the Cloud Storage object.`, + }, + }, + }, + }, + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: `A local path within the VM to use.`, + }, + "remote": { + Type: schema.TypeList, + Optional: true, + Description: `A generic remote file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `URI from which to fetch the object. It should contain both the protocol and path following the format '{protocol}://{location}'.`, + }, + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: `SHA256 checksum of the remote file.`, + }, + }, + }, + }, + }, + }, + }, + "permissions": { + Type: schema.TypeString, + Computed: true, + Description: `Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. +Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4`, + }, + }, + }, + }, + "pkg": { + Type: schema.TypeList, + Optional: true, + Description: `Package resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "desired_state": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"DESIRED_STATE_UNSPECIFIED", "INSTALLED", "REMOVED"}), + Description: `The desired state the agent should maintain for this package. Possible values: ["DESIRED_STATE_UNSPECIFIED", "INSTALLED", "REMOVED"]`, + }, + "apt": { + Type: schema.TypeList, + Optional: true, + Description: `A package managed by Apt.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Package name.`, + }, + }, + }, + }, + "deb": { + Type: schema.TypeList, + Optional: true, + Description: `A deb package file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeList, + Required: true, + Description: `A deb package.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: `Defaults to false. When false, files are subject to validations based on the file type: +Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.`, + }, + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: `A Cloud Storage object.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Bucket of the Cloud Storage object.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Storage object.`, + }, + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: `Generation number of the Cloud Storage object.`, + }, + }, + }, + }, + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: `A local path within the VM to use.`, + }, + "remote": { + Type: schema.TypeList, + Optional: true, + Description: `A generic remote file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `URI from which to fetch the object. It should contain both the protocol and path following the format '{protocol}://{location}'.`, + }, + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: `SHA256 checksum of the remote file.`, + }, + }, + }, + }, + }, + }, + }, + "pull_deps": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether dependencies should also be installed. - install when false: 'dpkg -i package' - install when true: 'apt-get update && apt-get -y install package.deb'`, + }, + }, + }, + }, + "googet": { + Type: schema.TypeList, + Optional: true, + Description: `A package managed by GooGet.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Package name.`, + }, + }, + }, + }, + "msi": { + Type: schema.TypeList, + Optional: true, + Description: `An MSI package.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeList, + Required: true, + Description: `The MSI package.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: `Defaults to false. When false, files are subject to validations based on the file type: +Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.`, + }, + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: `A Cloud Storage object.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Bucket of the Cloud Storage object.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Storage object.`, + }, + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: `Generation number of the Cloud Storage object.`, + }, + }, + }, + }, + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: `A local path within the VM to use.`, + }, + "remote": { + Type: schema.TypeList, + Optional: true, + Description: `A generic remote file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `URI from which to fetch the object. It should contain both the protocol and path following the format '{protocol}://{location}'.`, + }, + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: `SHA256 checksum of the remote file.`, + }, + }, + }, + }, + }, + }, + }, + "properties": { + Type: schema.TypeList, + Optional: true, + Description: `Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of 'ACTION=INSTALL REBOOT=ReallySuppress'.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "rpm": { + Type: schema.TypeList, + Optional: true, + Description: `An rpm package file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeList, + Required: true, + Description: `An rpm package.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: `Defaults to false. When false, files are subject to validations based on the file type: +Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.`, + }, + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: `A Cloud Storage object.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: `Bucket of the Cloud Storage object.`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `Name of the Cloud Storage object.`, + }, + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: `Generation number of the Cloud Storage object.`, + }, + }, + }, + }, + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: `A local path within the VM to use.`, + }, + "remote": { + Type: schema.TypeList, + Optional: true, + Description: `A generic remote file.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: `URI from which to fetch the object. It should contain both the protocol and path following the format '{protocol}://{location}'.`, + }, + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: `SHA256 checksum of the remote file.`, + }, + }, + }, + }, + }, + }, + }, + "pull_deps": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether dependencies should also be installed. - install when false: 'rpm --upgrade --replacepkgs package.rpm' - install when true: 'yum -y install package.rpm' or 'zypper -y install package.rpm'`, + }, + }, + }, + }, + "yum": { + Type: schema.TypeList, + Optional: true, + Description: `A package managed by YUM.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Package name.`, + }, + }, + }, + }, + "zypper": { + Type: schema.TypeList, + Optional: true, + Description: `A package managed by Zypper.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Package name.`, + }, + }, + }, + }, + }, + }, + }, + "repository": { + Type: schema.TypeList, + Optional: true, + Description: `Package repository resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "apt": { + Type: schema.TypeList, + Optional: true, + Description: `An Apt Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ARCHIVE_TYPE_UNSPECIFIED", "DEB", "DEB_SRC"}), + Description: `Type of archive files in this repository. Possible values: ["ARCHIVE_TYPE_UNSPECIFIED", "DEB", "DEB_SRC"]`, + }, + "components": { + Type: schema.TypeList, + Required: true, + Description: `List of components for this repository. Must contain at least one item.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "distribution": { + Type: schema.TypeString, + Required: true, + Description: `Distribution of this repository.`, + }, + "uri": { + Type: schema.TypeString, + Required: true, + Description: `URI for this repository.`, + }, + "gpg_key": { + Type: schema.TypeString, + Optional: true, + Description: `URI of the key file for this repository. The agent maintains a keyring at '/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg'.`, + }, + }, + }, + }, + "goo": { + Type: schema.TypeList, + Optional: true, + Description: `A Goo Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the repository.`, + }, + "url": { + Type: schema.TypeString, + Required: true, + Description: `The url of the repository.`, + }, + }, + }, + }, + "yum": { + Type: schema.TypeList, + Optional: true, + Description: `A Yum Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "base_url": { + Type: schema.TypeString, + Required: true, + Description: `The location of the repository directory.`, + }, + "id": { + Type: schema.TypeString, + Required: true, + Description: `A one word, unique name for this repository. This is the 'repo id' in the yum config file and also the 'display_name' if 'display_name' is omitted. This id is also used as the unique identifier when checking for resource conflicts.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `The display name of the repository.`, + }, + "gpg_keys": { + Type: schema.TypeList, + Optional: true, + Description: `URIs of GPG keys.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "zypper": { + Type: schema.TypeList, + Optional: true, + Description: `A Zypper Repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "base_url": { + Type: schema.TypeString, + Required: true, + Description: `The location of the repository directory.`, + }, + "id": { + Type: schema.TypeString, + Required: true, + Description: `A one word, unique name for this repository. This is the 'repo id' in the zypper config file and also the 'display_name' if 'display_name' is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `The display name of the repository.`, + }, + "gpg_keys": { + Type: schema.TypeList, + Optional: true, + Description: `URIs of GPG keys.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "inventory_filters": { + Type: schema.TypeList, + Optional: true, + Description: `List of inventory filters for the resource group. +The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. +For example, to apply this resource group to VMs running either 'RHEL' or 'CentOS' operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' +If the list is empty, this resource group will be applied to the target VM unconditionally.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "os_short_name": { + Type: schema.TypeString, + Required: true, + Description: `The OS short name`, + }, + "os_version": { + Type: schema.TypeString, + Optional: true, + Description: `The OS version +Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of '7', specify the following value for this field '7.*' +An empty string matches all OS versions.`, + }, + }, + }, + }, + }, + }, + }, + "allow_no_resource_group_match": { + Type: schema.TypeBool, + Optional: true, + Description: `This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to 'true' if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Policy description. Length of the description is limited to 1024 characters.`, + }, + }, + }, + }, + "rollout": { + Type: schema.TypeList, + Required: true, + Description: `Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disruption_budget": { + Type: schema.TypeList, + Required: true, + Description: `The maximum number (or percentage) of VMs per zone to disrupt at any given moment.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed": { + Type: schema.TypeInt, + Optional: true, + Description: `Specifies a fixed value.`, + }, + "percent": { + Type: schema.TypeInt, + Optional: true, + Description: `Specifies the relative value defined as a percentage, which will be multiplied by a reference value.`, + }, + }, + }, + }, + "min_wait_duration": { + Type: schema.TypeString, + Required: true, + Description: `This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the 'disruption_budget' at least until this duration of time has passed after configuration changes are applied.`, + DiffSuppressFunc: compareDuration, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `OS policy assignment description. Length of the description is limited to 1024 characters.`, + }, + "baseline": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. +For a given OS policy assignment, there is only one revision with a value of 'true' for this field.`, + }, + "deleted": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Indicates that this revision deletes the OS policy assignment.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.`, + }, + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Indicates that reconciliation is in progress for the revision. This value is 'true' when the 'rollout_state' is one of: +* IN_PROGRESS +* CANCELLING`, + }, + "revision_create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The timestamp that the revision was created.`, + }, + "revision_id": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment`, + }, + "rollout_state": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. OS policy assignment rollout state`, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Server generated unique id for the OS policy assignment resource.`, + }, + "skip_await_rollout": { + Type: schema.TypeBool, + Optional: true, + Description: `Set to true to skip awaiting rollout during resource creation and update.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + }, + UseJSONNumber: true, + } +} + +func resourceOSConfigOSPolicyAssignmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandOSConfigOSPolicyAssignmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + osPoliciesProp, err := expandOSConfigOSPolicyAssignmentOsPolicies(d.Get("os_policies"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("os_policies"); !tpgresource.IsEmptyValue(reflect.ValueOf(osPoliciesProp)) && (ok || !reflect.DeepEqual(v, osPoliciesProp)) { + obj["osPolicies"] = osPoliciesProp + } + instanceFilterProp, err := expandOSConfigOSPolicyAssignmentInstanceFilter(d.Get("instance_filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance_filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceFilterProp)) && (ok || !reflect.DeepEqual(v, instanceFilterProp)) { + obj["instanceFilter"] = instanceFilterProp + } + rolloutProp, err := expandOSConfigOSPolicyAssignmentRollout(d.Get("rollout"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rollout"); !tpgresource.IsEmptyValue(reflect.ValueOf(rolloutProp)) && (ok || !reflect.DeepEqual(v, rolloutProp)) { + obj["rollout"] = rolloutProp + } + + log.Printf("[DEBUG] Creating new OSPolicyAssignment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for OSPolicyAssignment: %s", err) + } + // Shorten long form project id to short form. + billingProject = tpgresource.GetResourceNameFromSelfLink(project) + + url, err := tpgresource.ReplaceVars(d, config, "{{OSConfigBasePath}}projects/{{project}}/locations/{{location}}/osPolicyAssignments?osPolicyAssignmentId={{name}}") + if err != nil { + return err + } + // Always use GA endpoints for this resource. + url = strings.ReplaceAll(url, "https://osconfig.googleapis.com/v1beta", "https://osconfig.googleapis.com/v1") + // Remove redundant projects/ from url. + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating OSPolicyAssignment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + // Remove redundant projects/ from id. + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + if skipAwaitRollout := d.Get("skip_await_rollout").(bool); !skipAwaitRollout { + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = OSConfigOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating OSPolicyAssignment", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create OSPolicyAssignment: %s", err) + } + + if err := d.Set("name", flattenOSConfigOSPolicyAssignmentName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + // Remove redundant projects/ from id. + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + } + + log.Printf("[DEBUG] Finished creating OSPolicyAssignment %q: %#v", d.Id(), res) + + return resourceOSConfigOSPolicyAssignmentRead(d, meta) +} + +func resourceOSConfigOSPolicyAssignmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for OSPolicyAssignment: %s", err) + } + // Shorten long form project id to short form + billingProject = tpgresource.GetResourceNameFromSelfLink(project) + + url, err := tpgresource.ReplaceVars(d, config, "{{OSConfigBasePath}}projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + if err != nil { + return err + } + // Always use GA endpoints for this resource. + url = strings.ReplaceAll(url, "https://osconfig.googleapis.com/v1beta", "https://osconfig.googleapis.com/v1") + // Remove redundant projects/ from url. + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("OSConfigOSPolicyAssignment %q", d.Id())) + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("skip_await_rollout"); !ok { + if err := d.Set("skip_await_rollout", false); err != nil { + return fmt.Errorf("Error setting skip_await_rollout: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + + if err := d.Set("name", flattenOSConfigOSPolicyAssignmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("description", flattenOSConfigOSPolicyAssignmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("os_policies", flattenOSConfigOSPolicyAssignmentOsPolicies(res["osPolicies"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("instance_filter", flattenOSConfigOSPolicyAssignmentInstanceFilter(res["instanceFilter"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("rollout", flattenOSConfigOSPolicyAssignmentRollout(res["rollout"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("revision_id", flattenOSConfigOSPolicyAssignmentRevisionId(res["revisionId"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("revision_create_time", flattenOSConfigOSPolicyAssignmentRevisionCreateTime(res["revisionCreateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("etag", flattenOSConfigOSPolicyAssignmentEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("rollout_state", flattenOSConfigOSPolicyAssignmentRolloutState(res["rolloutState"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("baseline", flattenOSConfigOSPolicyAssignmentBaseline(res["baseline"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("deleted", flattenOSConfigOSPolicyAssignmentDeleted(res["deleted"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("reconciling", flattenOSConfigOSPolicyAssignmentReconciling(res["reconciling"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + if err := d.Set("uid", flattenOSConfigOSPolicyAssignmentUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading OSPolicyAssignment: %s", err) + } + + return nil +} + +func resourceOSConfigOSPolicyAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for OSPolicyAssignment: %s", err) + } + // Shorten long form project id to short form + billingProject = tpgresource.GetResourceNameFromSelfLink(project) + + obj := make(map[string]interface{}) + descriptionProp, err := expandOSConfigOSPolicyAssignmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + osPoliciesProp, err := expandOSConfigOSPolicyAssignmentOsPolicies(d.Get("os_policies"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("os_policies"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, osPoliciesProp)) { + obj["osPolicies"] = osPoliciesProp + } + instanceFilterProp, err := expandOSConfigOSPolicyAssignmentInstanceFilter(d.Get("instance_filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance_filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, instanceFilterProp)) { + obj["instanceFilter"] = instanceFilterProp + } + rolloutProp, err := expandOSConfigOSPolicyAssignmentRollout(d.Get("rollout"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rollout"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rolloutProp)) { + obj["rollout"] = rolloutProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OSConfigBasePath}}projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + if err != nil { + return err + } + // Always use GA endpoints for this resource. + url = strings.ReplaceAll(url, "https://osconfig.googleapis.com/v1beta", "https://osconfig.googleapis.com/v1") + // Remove redundant projects/ from url. + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + log.Printf("[DEBUG] Updating OSPolicyAssignment %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("os_policies") { + updateMask = append(updateMask, "osPolicies") + } + + if d.HasChange("instance_filter") { + updateMask = append(updateMask, "instanceFilter") + } + + if d.HasChange("rollout") { + updateMask = append(updateMask, "rollout") + } + // updateMask is a URL parameter but not present in the schema, so tpgresource.ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating OSPolicyAssignment %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OSPolicyAssignment %q: %#v", d.Id(), res) + } + + if skipAwaitRollout := d.Get("skip_await_rollout").(bool); !skipAwaitRollout { + err = OSConfigOperationWaitTime( + config, res, project, "Updating OSPolicyAssignment", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + + return resourceOSConfigOSPolicyAssignmentRead(d, meta) +} + +func resourceOSConfigOSPolicyAssignmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for OSPolicyAssignment: %s", err) + } + // Shorten long form project id to short form + billingProject = tpgresource.GetResourceNameFromSelfLink(project) + + url, err := tpgresource.ReplaceVars(d, config, "{{OSConfigBasePath}}projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + if err != nil { + return err + } + // Always use GA endpoints for this resource. + url = strings.ReplaceAll(url, "https://osconfig.googleapis.com/v1beta", "https://osconfig.googleapis.com/v1") + // Remove redundant projects/ from url. + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + log.Printf("[DEBUG] Deleting OSPolicyAssignment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "OSPolicyAssignment") + } + + if skipAwaitRollout := d.Get("skip_await_rollout").(bool); !skipAwaitRollout { + err = OSConfigOperationWaitTime( + config, res, project, "Deleting OSPolicyAssignment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + } + + log.Printf("[DEBUG] Finished deleting OSPolicyAssignment %q: %#v", d.Id(), res) + return nil +} + +func resourceOSConfigOSPolicyAssignmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/osPolicyAssignments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + // Remove redundant projects/ from id. + id = strings.ReplaceAll(id, "projects/projects/", "projects/") + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("skip_await_rollout", false); err != nil { + return nil, fmt.Errorf("Error setting skip_await_rollout: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func compareDuration(_, old, new string, _ *schema.ResourceData) bool { + oldDuration, err := time.ParseDuration(old) + if err != nil { + return false + } + newDuration, err := time.ParseDuration(new) + if err != nil { + return false + } + return oldDuration == newDuration +} + +func flattenOSConfigOSPolicyAssignmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenOSConfigOSPolicyAssignmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenOSConfigOSPolicyAssignmentOsPoliciesId(original["id"], d, config), + "description": flattenOSConfigOSPolicyAssignmentOsPoliciesDescription(original["description"], d, config), + "mode": flattenOSConfigOSPolicyAssignmentOsPoliciesMode(original["mode"], d, config), + "resource_groups": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroups(original["resourceGroups"], d, config), + "allow_no_resource_group_match": flattenOSConfigOSPolicyAssignmentOsPoliciesAllowNoResourceGroupMatch(original["allowNoResourceGroupMatch"], d, config), + }) + } + return transformed +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "inventory_filters": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFilters(original["inventoryFilters"], d, config), + "resources": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResources(original["resources"], d, config), + }) + } + return transformed +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFilters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "os_short_name": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFiltersOsShortName(original["osShortName"], d, config), + "os_version": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFiltersOsVersion(original["osVersion"], d, config), + }) + } + return transformed +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFiltersOsShortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFiltersOsVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesId(original["id"], d, config), + "pkg": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkg(original["pkg"], d, config), + "repository": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepository(original["repository"], d, config), + "exec": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExec(original["exec"], d, config), + "file": flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFile(original["file"], d, config), + }) + } + return transformed +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkg(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["desired_state"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDesiredState(original["desiredState"], d, config) + transformed["apt"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgApt(original["apt"], d, config) + transformed["deb"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDeb(original["deb"], d, config) + transformed["yum"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgYum(original["yum"], d, config) + transformed["zypper"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgZypper(original["zypper"], d, config) + transformed["rpm"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpm(original["rpm"], d, config) + transformed["googet"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgGooget(original["googet"], d, config) + transformed["msi"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsi(original["msi"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDesiredState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgApt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgAptName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgAptName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDeb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["source"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSource(original["source"], d, config) + transformed["pull_deps"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebPullDeps(original["pullDeps"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["remote"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemote(original["remote"], d, config) + transformed["gcs"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcs(original["gcs"], d, config) + transformed["local_path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceLocalPath(original["localPath"], d, config) + transformed["allow_insecure"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceAllowInsecure(original["allowInsecure"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemoteUri(original["uri"], d, config) + transformed["sha256_checksum"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemoteSha256Checksum(original["sha256Checksum"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemoteUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemoteSha256Checksum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsBucket(original["bucket"], d, config) + transformed["object"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsObject(original["object"], d, config) + transformed["generation"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsGeneration(original["generation"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceAllowInsecure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebPullDeps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgYum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgYumName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgYumName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgZypper(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgZypperName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgZypperName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["source"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSource(original["source"], d, config) + transformed["pull_deps"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmPullDeps(original["pullDeps"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["remote"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemote(original["remote"], d, config) + transformed["gcs"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcs(original["gcs"], d, config) + transformed["local_path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceLocalPath(original["localPath"], d, config) + transformed["allow_insecure"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceAllowInsecure(original["allowInsecure"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemoteUri(original["uri"], d, config) + transformed["sha256_checksum"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSha256Checksum(original["sha256Checksum"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemoteUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSha256Checksum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsBucket(original["bucket"], d, config) + transformed["object"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsObject(original["object"], d, config) + transformed["generation"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsGeneration(original["generation"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceAllowInsecure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmPullDeps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgGooget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgGoogetName(original["name"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgGoogetName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsi(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["source"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSource(original["source"], d, config) + transformed["properties"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiProperties(original["properties"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["remote"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemote(original["remote"], d, config) + transformed["gcs"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcs(original["gcs"], d, config) + transformed["local_path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceLocalPath(original["localPath"], d, config) + transformed["allow_insecure"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceAllowInsecure(original["allowInsecure"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemoteUri(original["uri"], d, config) + transformed["sha256_checksum"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSha256Checksum(original["sha256Checksum"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemoteUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSha256Checksum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsBucket(original["bucket"], d, config) + transformed["object"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsObject(original["object"], d, config) + transformed["generation"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsGeneration(original["generation"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceAllowInsecure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["apt"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryApt(original["apt"], d, config) + transformed["yum"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYum(original["yum"], d, config) + transformed["zypper"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypper(original["zypper"], d, config) + transformed["goo"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGoo(original["goo"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryApt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["archive_type"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptArchiveType(original["archiveType"], d, config) + transformed["uri"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptUri(original["uri"], d, config) + transformed["distribution"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptDistribution(original["distribution"], d, config) + transformed["components"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptComponents(original["components"], d, config) + transformed["gpg_key"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptGpgKey(original["gpgKey"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptArchiveType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptDistribution(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptComponents(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptGpgKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["id"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumId(original["id"], d, config) + transformed["display_name"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumDisplayName(original["displayName"], d, config) + transformed["base_url"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumBaseUrl(original["baseUrl"], d, config) + transformed["gpg_keys"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumGpgKeys(original["gpgKeys"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumBaseUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumGpgKeys(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypper(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["id"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperId(original["id"], d, config) + transformed["display_name"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperDisplayName(original["displayName"], d, config) + transformed["base_url"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperBaseUrl(original["baseUrl"], d, config) + transformed["gpg_keys"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperGpgKeys(original["gpgKeys"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperBaseUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperGpgKeys(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGoo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGooName(original["name"], d, config) + transformed["url"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGooUrl(original["url"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGooName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGooUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["validate"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidate(original["validate"], d, config) + transformed["enforce"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforce(original["enforce"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["file"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFile(original["file"], d, config) + transformed["script"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateScript(original["script"], d, config) + transformed["args"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateArgs(original["args"], d, config) + transformed["interpreter"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateInterpreter(original["interpreter"], d, config) + transformed["output_file_path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateOutputFilePath(original["outputFilePath"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["remote"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemote(original["remote"], d, config) + transformed["gcs"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcs(original["gcs"], d, config) + transformed["local_path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileLocalPath(original["localPath"], d, config) + transformed["allow_insecure"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileAllowInsecure(original["allowInsecure"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemoteUri(original["uri"], d, config) + transformed["sha256_checksum"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemoteSha256Checksum(original["sha256Checksum"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemoteUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemoteSha256Checksum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsBucket(original["bucket"], d, config) + transformed["object"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsObject(original["object"], d, config) + transformed["generation"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsGeneration(original["generation"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileAllowInsecure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateInterpreter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateOutputFilePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforce(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["file"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFile(original["file"], d, config) + transformed["script"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceScript(original["script"], d, config) + transformed["args"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceArgs(original["args"], d, config) + transformed["interpreter"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceInterpreter(original["interpreter"], d, config) + transformed["output_file_path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceOutputFilePath(original["outputFilePath"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["remote"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemote(original["remote"], d, config) + transformed["gcs"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcs(original["gcs"], d, config) + transformed["local_path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileLocalPath(original["localPath"], d, config) + transformed["allow_insecure"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileAllowInsecure(original["allowInsecure"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemoteUri(original["uri"], d, config) + transformed["sha256_checksum"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemoteSha256Checksum(original["sha256Checksum"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemoteUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemoteSha256Checksum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsBucket(original["bucket"], d, config) + transformed["object"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsObject(original["object"], d, config) + transformed["generation"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsGeneration(original["generation"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileAllowInsecure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceScript(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceArgs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceInterpreter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceOutputFilePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["file"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFile(original["file"], d, config) + transformed["content"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileContent(original["content"], d, config) + transformed["path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFilePath(original["path"], d, config) + transformed["state"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileState(original["state"], d, config) + transformed["permissions"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFilePermissions(original["permissions"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFile(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["remote"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemote(original["remote"], d, config) + transformed["gcs"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcs(original["gcs"], d, config) + transformed["local_path"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileLocalPath(original["localPath"], d, config) + transformed["allow_insecure"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileAllowInsecure(original["allowInsecure"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemote(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["uri"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemoteUri(original["uri"], d, config) + transformed["sha256_checksum"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemoteSha256Checksum(original["sha256Checksum"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemoteUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemoteSha256Checksum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["bucket"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsBucket(original["bucket"], d, config) + transformed["object"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsObject(original["object"], d, config) + transformed["generation"] = + flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsGeneration(original["generation"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileAllowInsecure(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileContent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFilePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFilePermissions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentOsPoliciesAllowNoResourceGroupMatch(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentInstanceFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["all"] = + flattenOSConfigOSPolicyAssignmentInstanceFilterAll(original["all"], d, config) + transformed["inclusion_labels"] = + flattenOSConfigOSPolicyAssignmentInstanceFilterInclusionLabels(original["inclusionLabels"], d, config) + transformed["exclusion_labels"] = + flattenOSConfigOSPolicyAssignmentInstanceFilterExclusionLabels(original["exclusionLabels"], d, config) + transformed["inventories"] = + flattenOSConfigOSPolicyAssignmentInstanceFilterInventories(original["inventories"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentInstanceFilterAll(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentInstanceFilterInclusionLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "labels": flattenOSConfigOSPolicyAssignmentInstanceFilterInclusionLabelsLabels(original["labels"], d, config), + }) + } + return transformed +} +func flattenOSConfigOSPolicyAssignmentInstanceFilterInclusionLabelsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentInstanceFilterExclusionLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "labels": flattenOSConfigOSPolicyAssignmentInstanceFilterExclusionLabelsLabels(original["labels"], d, config), + }) + } + return transformed +} +func flattenOSConfigOSPolicyAssignmentInstanceFilterExclusionLabelsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentInstanceFilterInventories(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "os_short_name": flattenOSConfigOSPolicyAssignmentInstanceFilterInventoriesOsShortName(original["osShortName"], d, config), + "os_version": flattenOSConfigOSPolicyAssignmentInstanceFilterInventoriesOsVersion(original["osVersion"], d, config), + }) + } + return transformed +} +func flattenOSConfigOSPolicyAssignmentInstanceFilterInventoriesOsShortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentInstanceFilterInventoriesOsVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentRollout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["disruption_budget"] = + flattenOSConfigOSPolicyAssignmentRolloutDisruptionBudget(original["disruptionBudget"], d, config) + transformed["min_wait_duration"] = + flattenOSConfigOSPolicyAssignmentRolloutMinWaitDuration(original["minWaitDuration"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentRolloutDisruptionBudget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["fixed"] = + flattenOSConfigOSPolicyAssignmentRolloutDisruptionBudgetFixed(original["fixed"], d, config) + transformed["percent"] = + flattenOSConfigOSPolicyAssignmentRolloutDisruptionBudgetPercent(original["percent"], d, config) + return []interface{}{transformed} +} +func flattenOSConfigOSPolicyAssignmentRolloutDisruptionBudgetFixed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOSConfigOSPolicyAssignmentRolloutDisruptionBudgetPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenOSConfigOSPolicyAssignmentRolloutMinWaitDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentRevisionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentRevisionCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentRolloutState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentBaseline(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentDeleted(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSConfigOSPolicyAssignmentUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandOSConfigOSPolicyAssignmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPolicies(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandOSConfigOSPolicyAssignmentOsPoliciesId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedDescription, err := expandOSConfigOSPolicyAssignmentOsPoliciesDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedMode, err := expandOSConfigOSPolicyAssignmentOsPoliciesMode(original["mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mode"] = transformedMode + } + + transformedResourceGroups, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroups(original["resource_groups"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceGroups); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceGroups"] = transformedResourceGroups + } + + transformedAllowNoResourceGroupMatch, err := expandOSConfigOSPolicyAssignmentOsPoliciesAllowNoResourceGroupMatch(original["allow_no_resource_group_match"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowNoResourceGroupMatch); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowNoResourceGroupMatch"] = transformedAllowNoResourceGroupMatch + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInventoryFilters, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFilters(original["inventory_filters"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInventoryFilters); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inventoryFilters"] = transformedInventoryFilters + } + + transformedResources, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResources(original["resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resources"] = transformedResources + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFilters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOsShortName, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFiltersOsShortName(original["os_short_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOsShortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["osShortName"] = transformedOsShortName + } + + transformedOsVersion, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFiltersOsVersion(original["os_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOsVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["osVersion"] = transformedOsVersion + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFiltersOsShortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsInventoryFiltersOsVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedPkg, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkg(original["pkg"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPkg); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pkg"] = transformedPkg + } + + transformedRepository, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepository(original["repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repository"] = transformedRepository + } + + transformedExec, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExec(original["exec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["exec"] = transformedExec + } + + transformedFile, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFile(original["file"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["file"] = transformedFile + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkg(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDesiredState, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDesiredState(original["desired_state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDesiredState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["desiredState"] = transformedDesiredState + } + + transformedApt, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgApt(original["apt"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApt); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["apt"] = transformedApt + } + + transformedDeb, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDeb(original["deb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDeb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["deb"] = transformedDeb + } + + transformedYum, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgYum(original["yum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["yum"] = transformedYum + } + + transformedZypper, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgZypper(original["zypper"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedZypper); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["zypper"] = transformedZypper + } + + transformedRpm, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpm(original["rpm"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRpm); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rpm"] = transformedRpm + } + + transformedGooget, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgGooget(original["googet"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGooget); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["googet"] = transformedGooget + } + + transformedMsi, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsi(original["msi"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMsi); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["msi"] = transformedMsi + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDesiredState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgApt(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgAptName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgAptName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDeb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSource, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSource(original["source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["source"] = transformedSource + } + + transformedPullDeps, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebPullDeps(original["pull_deps"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPullDeps); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pullDeps"] = transformedPullDeps + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRemote, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemote(original["remote"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRemote); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["remote"] = transformedRemote + } + + transformedGcs, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcs(original["gcs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcs"] = transformedGcs + } + + transformedLocalPath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceLocalPath(original["local_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localPath"] = transformedLocalPath + } + + transformedAllowInsecure, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceAllowInsecure(original["allow_insecure"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowInsecure); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowInsecure"] = transformedAllowInsecure + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemoteUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedSha256Checksum, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemoteSha256Checksum(original["sha256_checksum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256Checksum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256Checksum"] = transformedSha256Checksum + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemoteUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceRemoteSha256Checksum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedObject, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsObject(original["object"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["object"] = transformedObject + } + + transformedGeneration, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceGcsGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebSourceAllowInsecure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgDebPullDeps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgYum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgYumName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgYumName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgZypper(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgZypperName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgZypperName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSource, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSource(original["source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["source"] = transformedSource + } + + transformedPullDeps, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmPullDeps(original["pull_deps"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPullDeps); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["pullDeps"] = transformedPullDeps + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRemote, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemote(original["remote"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRemote); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["remote"] = transformedRemote + } + + transformedGcs, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcs(original["gcs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcs"] = transformedGcs + } + + transformedLocalPath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceLocalPath(original["local_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localPath"] = transformedLocalPath + } + + transformedAllowInsecure, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceAllowInsecure(original["allow_insecure"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowInsecure); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowInsecure"] = transformedAllowInsecure + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemoteUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedSha256Checksum, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSha256Checksum(original["sha256_checksum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256Checksum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256Checksum"] = transformedSha256Checksum + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemoteUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSha256Checksum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedObject, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsObject(original["object"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["object"] = transformedObject + } + + transformedGeneration, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceGcsGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmSourceAllowInsecure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgRpmPullDeps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgGooget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgGoogetName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgGoogetName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsi(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSource, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSource(original["source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["source"] = transformedSource + } + + transformedProperties, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiProperties(original["properties"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["properties"] = transformedProperties + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRemote, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemote(original["remote"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRemote); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["remote"] = transformedRemote + } + + transformedGcs, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcs(original["gcs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcs"] = transformedGcs + } + + transformedLocalPath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceLocalPath(original["local_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localPath"] = transformedLocalPath + } + + transformedAllowInsecure, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceAllowInsecure(original["allow_insecure"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowInsecure); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowInsecure"] = transformedAllowInsecure + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemoteUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedSha256Checksum, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSha256Checksum(original["sha256_checksum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256Checksum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256Checksum"] = transformedSha256Checksum + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemoteUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSha256Checksum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedObject, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsObject(original["object"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["object"] = transformedObject + } + + transformedGeneration, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceGcsGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiSourceAllowInsecure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesPkgMsiProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedApt, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryApt(original["apt"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApt); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["apt"] = transformedApt + } + + transformedYum, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYum(original["yum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["yum"] = transformedYum + } + + transformedZypper, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypper(original["zypper"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedZypper); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["zypper"] = transformedZypper + } + + transformedGoo, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGoo(original["goo"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGoo); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["goo"] = transformedGoo + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryApt(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedArchiveType, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptArchiveType(original["archive_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArchiveType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["archiveType"] = transformedArchiveType + } + + transformedUri, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedDistribution, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptDistribution(original["distribution"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDistribution); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["distribution"] = transformedDistribution + } + + transformedComponents, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptComponents(original["components"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedComponents); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["components"] = transformedComponents + } + + transformedGpgKey, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptGpgKey(original["gpg_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGpgKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gpgKey"] = transformedGpgKey + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptArchiveType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptDistribution(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptComponents(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryAptGpgKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedDisplayName, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + transformedBaseUrl, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumBaseUrl(original["base_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBaseUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["baseUrl"] = transformedBaseUrl + } + + transformedGpgKeys, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumGpgKeys(original["gpg_keys"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGpgKeys); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gpgKeys"] = transformedGpgKeys + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumBaseUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryYumGpgKeys(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypper(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedId, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperId(original["id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["id"] = transformedId + } + + transformedDisplayName, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperDisplayName(original["display_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisplayName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["displayName"] = transformedDisplayName + } + + transformedBaseUrl, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperBaseUrl(original["base_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBaseUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["baseUrl"] = transformedBaseUrl + } + + transformedGpgKeys, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperGpgKeys(original["gpg_keys"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGpgKeys); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gpgKeys"] = transformedGpgKeys + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperBaseUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryZypperGpgKeys(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGoo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGooName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedUrl, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGooUrl(original["url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["url"] = transformedUrl + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGooName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesRepositoryGooUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedValidate, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidate(original["validate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValidate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["validate"] = transformedValidate + } + + transformedEnforce, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforce(original["enforce"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnforce); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enforce"] = transformedEnforce + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFile, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFile(original["file"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["file"] = transformedFile + } + + transformedScript, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateScript(original["script"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["script"] = transformedScript + } + + transformedArgs, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedInterpreter, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateInterpreter(original["interpreter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["interpreter"] = transformedInterpreter + } + + transformedOutputFilePath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateOutputFilePath(original["output_file_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOutputFilePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["outputFilePath"] = transformedOutputFilePath + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRemote, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemote(original["remote"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRemote); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["remote"] = transformedRemote + } + + transformedGcs, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcs(original["gcs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcs"] = transformedGcs + } + + transformedLocalPath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileLocalPath(original["local_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localPath"] = transformedLocalPath + } + + transformedAllowInsecure, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileAllowInsecure(original["allow_insecure"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowInsecure); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowInsecure"] = transformedAllowInsecure + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemoteUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedSha256Checksum, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemoteSha256Checksum(original["sha256_checksum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256Checksum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256Checksum"] = transformedSha256Checksum + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemoteUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileRemoteSha256Checksum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedObject, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsObject(original["object"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["object"] = transformedObject + } + + transformedGeneration, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileGcsGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateFileAllowInsecure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateInterpreter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecValidateOutputFilePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforce(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFile, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFile(original["file"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["file"] = transformedFile + } + + transformedScript, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceScript(original["script"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["script"] = transformedScript + } + + transformedArgs, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedInterpreter, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceInterpreter(original["interpreter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["interpreter"] = transformedInterpreter + } + + transformedOutputFilePath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceOutputFilePath(original["output_file_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOutputFilePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["outputFilePath"] = transformedOutputFilePath + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRemote, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemote(original["remote"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRemote); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["remote"] = transformedRemote + } + + transformedGcs, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcs(original["gcs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcs"] = transformedGcs + } + + transformedLocalPath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileLocalPath(original["local_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localPath"] = transformedLocalPath + } + + transformedAllowInsecure, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileAllowInsecure(original["allow_insecure"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowInsecure); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowInsecure"] = transformedAllowInsecure + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemoteUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedSha256Checksum, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemoteSha256Checksum(original["sha256_checksum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256Checksum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256Checksum"] = transformedSha256Checksum + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemoteUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileRemoteSha256Checksum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedObject, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsObject(original["object"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["object"] = transformedObject + } + + transformedGeneration, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileGcsGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceFileAllowInsecure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceScript(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceArgs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceInterpreter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesExecEnforceOutputFilePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFile, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFile(original["file"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["file"] = transformedFile + } + + transformedContent, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileContent(original["content"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["content"] = transformedContent + } + + transformedPath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFilePath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedState, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileState(original["state"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["state"] = transformedState + } + + transformedPermissions, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFilePermissions(original["permissions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPermissions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["permissions"] = transformedPermissions + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRemote, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemote(original["remote"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRemote); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["remote"] = transformedRemote + } + + transformedGcs, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcs(original["gcs"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcs); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["gcs"] = transformedGcs + } + + transformedLocalPath, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileLocalPath(original["local_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localPath"] = transformedLocalPath + } + + transformedAllowInsecure, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileAllowInsecure(original["allow_insecure"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowInsecure); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowInsecure"] = transformedAllowInsecure + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemote(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUri, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemoteUri(original["uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uri"] = transformedUri + } + + transformedSha256Checksum, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemoteSha256Checksum(original["sha256_checksum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha256Checksum); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha256Checksum"] = transformedSha256Checksum + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemoteUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileRemoteSha256Checksum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBucket, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsBucket(original["bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bucket"] = transformedBucket + } + + transformedObject, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsObject(original["object"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["object"] = transformedObject + } + + transformedGeneration, err := expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsGeneration(original["generation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGeneration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["generation"] = transformedGeneration + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileGcsGeneration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileFileAllowInsecure(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileContent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFilePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFileState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesResourceGroupsResourcesFilePermissions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentOsPoliciesAllowNoResourceGroupMatch(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAll, err := expandOSConfigOSPolicyAssignmentInstanceFilterAll(original["all"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAll); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["all"] = transformedAll + } + + transformedInclusionLabels, err := expandOSConfigOSPolicyAssignmentInstanceFilterInclusionLabels(original["inclusion_labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInclusionLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inclusionLabels"] = transformedInclusionLabels + } + + transformedExclusionLabels, err := expandOSConfigOSPolicyAssignmentInstanceFilterExclusionLabels(original["exclusion_labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExclusionLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["exclusionLabels"] = transformedExclusionLabels + } + + transformedInventories, err := expandOSConfigOSPolicyAssignmentInstanceFilterInventories(original["inventories"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInventories); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["inventories"] = transformedInventories + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilterAll(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilterInclusionLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLabels, err := expandOSConfigOSPolicyAssignmentInstanceFilterInclusionLabelsLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilterInclusionLabelsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilterExclusionLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLabels, err := expandOSConfigOSPolicyAssignmentInstanceFilterExclusionLabelsLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilterExclusionLabelsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilterInventories(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOsShortName, err := expandOSConfigOSPolicyAssignmentInstanceFilterInventoriesOsShortName(original["os_short_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOsShortName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["osShortName"] = transformedOsShortName + } + + transformedOsVersion, err := expandOSConfigOSPolicyAssignmentInstanceFilterInventoriesOsVersion(original["os_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOsVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["osVersion"] = transformedOsVersion + } + + req = append(req, transformed) + } + return req, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilterInventoriesOsShortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentInstanceFilterInventoriesOsVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentRollout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDisruptionBudget, err := expandOSConfigOSPolicyAssignmentRolloutDisruptionBudget(original["disruption_budget"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDisruptionBudget); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["disruptionBudget"] = transformedDisruptionBudget + } + + transformedMinWaitDuration, err := expandOSConfigOSPolicyAssignmentRolloutMinWaitDuration(original["min_wait_duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinWaitDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minWaitDuration"] = transformedMinWaitDuration + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentRolloutDisruptionBudget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFixed, err := expandOSConfigOSPolicyAssignmentRolloutDisruptionBudgetFixed(original["fixed"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFixed); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fixed"] = transformedFixed + } + + transformedPercent, err := expandOSConfigOSPolicyAssignmentRolloutDisruptionBudgetPercent(original["percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["percent"] = transformedPercent + } + + return transformed, nil +} + +func expandOSConfigOSPolicyAssignmentRolloutDisruptionBudgetFixed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentRolloutDisruptionBudgetPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSConfigOSPolicyAssignmentRolloutMinWaitDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_config_patch_deployment.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_patch_deployment.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_config_patch_deployment.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_patch_deployment.go index c4742508a8..9c520ae232 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_os_config_patch_deployment.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_patch_deployment.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package osconfig import ( "fmt" @@ -22,6 +25,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceOSConfigPatchDeployment() *schema.Resource { @@ -113,7 +120,7 @@ VMs when targeting configs, for example prefix="prod-".`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateRegexp(`(?:(?:[-a-z0-9]{1,63}\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))`), + ValidateFunc: verify.ValidateRegexp(`(?:(?:[-a-z0-9]{1,63}\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))`), Description: `A name for the patch deployment in the project. When creating a name the following rules apply: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. @@ -195,7 +202,7 @@ any other patch configuration fields.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DIST", "UPGRADE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"DIST", "UPGRADE", ""}), Description: `By changing the type to DIST, the patching is performed using apt-get dist-upgrade instead. Possible values: ["DIST", "UPGRADE"]`, AtLeastOneOf: []string{"patch_config.0.apt.0.type", "patch_config.0.apt.0.excludes", "patch_config.0.apt.0.exclusive_packages"}, }, @@ -286,7 +293,7 @@ any other patch configuration fields.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"SHELL", "POWERSHELL", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SHELL", "POWERSHELL", ""}), Description: `The script interpreter to use to run the script. If no interpreter is specified the script will be executed directly, which will likely only succeed for scripts with shebang lines. Possible values: ["SHELL", "POWERSHELL"]`, }, @@ -352,7 +359,7 @@ be executed directly, which will likely only succeed for scripts with shebang li Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"SHELL", "POWERSHELL", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SHELL", "POWERSHELL", ""}), Description: `The script interpreter to use to run the script. If no interpreter is specified the script will be executed directly, which will likely only succeed for scripts with shebang lines. Possible values: ["SHELL", "POWERSHELL"]`, }, @@ -430,7 +437,7 @@ be executed directly, which will likely only succeed for scripts with shebang li Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"SHELL", "POWERSHELL", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SHELL", "POWERSHELL", ""}), Description: `The script interpreter to use to run the script. If no interpreter is specified the script will be executed directly, which will likely only succeed for scripts with shebang lines. Possible values: ["SHELL", "POWERSHELL"]`, }, @@ -496,7 +503,7 @@ be executed directly, which will likely only succeed for scripts with shebang li Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"SHELL", "POWERSHELL", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SHELL", "POWERSHELL", ""}), Description: `The script interpreter to use to run the script. If no interpreter is specified the script will be executed directly, which will likely only succeed for scripts with shebang lines. Possible values: ["SHELL", "POWERSHELL"]`, }, @@ -519,7 +526,7 @@ be executed directly, which will likely only succeed for scripts with shebang li Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"DEFAULT", "ALWAYS", "NEVER", ""}), + ValidateFunc: verify.ValidateEnum([]string{"DEFAULT", "ALWAYS", "NEVER", ""}), Description: `Post-patch reboot settings. Possible values: ["DEFAULT", "ALWAYS", "NEVER"]`, AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, }, @@ -538,7 +545,7 @@ be executed directly, which will likely only succeed for scripts with shebang li Description: `Only apply updates of these windows update classifications. If empty, all updates are applied. Possible values: ["CRITICAL", "SECURITY", "DEFINITION", "DRIVER", "FEATURE_PACK", "SERVICE_PACK", "TOOL", "UPDATE_ROLLUP", "UPDATE"]`, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validateEnum([]string{"CRITICAL", "SECURITY", "DEFINITION", "DRIVER", "FEATURE_PACK", "SERVICE_PACK", "TOOL", "UPDATE_ROLLUP", "UPDATE"}), + ValidateFunc: verify.ValidateEnum([]string{"CRITICAL", "SECURITY", "DEFINITION", "DRIVER", "FEATURE_PACK", "SERVICE_PACK", "TOOL", "UPDATE_ROLLUP", "UPDATE"}), }, ExactlyOneOf: []string{"patch_config.0.windows_update.0.classifications", "patch_config.0.windows_update.0.excludes", "patch_config.0.windows_update.0.exclusive_patches"}, }, @@ -798,7 +805,7 @@ will not run in February, April, June, etc.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), Description: `A day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, }, "week_ordinal": { @@ -834,7 +841,7 @@ A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "201 Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + ValidateFunc: verify.ValidateEnum([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), Description: `IANA Time Zone Database time zone, e.g. "America/New_York". Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, }, }, @@ -899,7 +906,7 @@ For example, if the disruption budget has a fixed value of 10, and 8 VMs fail to Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"ZONE_BY_ZONE", "CONCURRENT_ZONES"}), + ValidateFunc: verify.ValidateEnum([]string{"ZONE_BY_ZONE", "CONCURRENT_ZONES"}), Description: `Mode of the patch rollout. Possible values: ["ZONE_BY_ZONE", "CONCURRENT_ZONES"]`, }, }, @@ -941,8 +948,8 @@ A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "201 } func resourceOSConfigPatchDeploymentCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -951,43 +958,43 @@ func resourceOSConfigPatchDeploymentCreate(d *schema.ResourceData, meta interfac descriptionProp, err := expandOSConfigPatchDeploymentDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } instanceFilterProp, err := expandOSConfigPatchDeploymentInstanceFilter(d.Get("instance_filter"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("instance_filter"); !isEmptyValue(reflect.ValueOf(instanceFilterProp)) && (ok || !reflect.DeepEqual(v, instanceFilterProp)) { + } else if v, ok := d.GetOkExists("instance_filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceFilterProp)) && (ok || !reflect.DeepEqual(v, instanceFilterProp)) { obj["instanceFilter"] = instanceFilterProp } patchConfigProp, err := expandOSConfigPatchDeploymentPatchConfig(d.Get("patch_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("patch_config"); !isEmptyValue(reflect.ValueOf(patchConfigProp)) && (ok || !reflect.DeepEqual(v, patchConfigProp)) { + } else if v, ok := d.GetOkExists("patch_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(patchConfigProp)) && (ok || !reflect.DeepEqual(v, patchConfigProp)) { obj["patchConfig"] = patchConfigProp } durationProp, err := expandOSConfigPatchDeploymentDuration(d.Get("duration"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("duration"); !isEmptyValue(reflect.ValueOf(durationProp)) && (ok || !reflect.DeepEqual(v, durationProp)) { + } else if v, ok := d.GetOkExists("duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(durationProp)) && (ok || !reflect.DeepEqual(v, durationProp)) { obj["duration"] = durationProp } oneTimeScheduleProp, err := expandOSConfigPatchDeploymentOneTimeSchedule(d.Get("one_time_schedule"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("one_time_schedule"); !isEmptyValue(reflect.ValueOf(oneTimeScheduleProp)) && (ok || !reflect.DeepEqual(v, oneTimeScheduleProp)) { + } else if v, ok := d.GetOkExists("one_time_schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(oneTimeScheduleProp)) && (ok || !reflect.DeepEqual(v, oneTimeScheduleProp)) { obj["oneTimeSchedule"] = oneTimeScheduleProp } recurringScheduleProp, err := expandOSConfigPatchDeploymentRecurringSchedule(d.Get("recurring_schedule"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("recurring_schedule"); !isEmptyValue(reflect.ValueOf(recurringScheduleProp)) && (ok || !reflect.DeepEqual(v, recurringScheduleProp)) { + } else if v, ok := d.GetOkExists("recurring_schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(recurringScheduleProp)) && (ok || !reflect.DeepEqual(v, recurringScheduleProp)) { obj["recurringSchedule"] = recurringScheduleProp } rolloutProp, err := expandOSConfigPatchDeploymentRollout(d.Get("rollout"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("rollout"); !isEmptyValue(reflect.ValueOf(rolloutProp)) && (ok || !reflect.DeepEqual(v, rolloutProp)) { + } else if v, ok := d.GetOkExists("rollout"); !tpgresource.IsEmptyValue(reflect.ValueOf(rolloutProp)) && (ok || !reflect.DeepEqual(v, rolloutProp)) { obj["rollout"] = rolloutProp } @@ -996,7 +1003,7 @@ func resourceOSConfigPatchDeploymentCreate(d *schema.ResourceData, meta interfac return err } - url, err := replaceVars(d, config, "{{OSConfigBasePath}}projects/{{project}}/patchDeployments?patchDeploymentId={{patch_deployment_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{OSConfigBasePath}}projects/{{project}}/patchDeployments?patchDeploymentId={{patch_deployment_id}}") if err != nil { return err } @@ -1004,18 +1011,26 @@ func resourceOSConfigPatchDeploymentCreate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Creating new PatchDeployment: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for PatchDeployment: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating PatchDeployment: %s", err) } @@ -1024,7 +1039,7 @@ func resourceOSConfigPatchDeploymentCreate(d *schema.ResourceData, meta interfac } // Store the ID now - id, err := replaceVars(d, config, "{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1054,33 +1069,39 @@ func resourceOSConfigPatchDeploymentCreate(d *schema.ResourceData, meta interfac } func resourceOSConfigPatchDeploymentRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{OSConfigBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{OSConfigBasePath}}{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for PatchDeployment: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("OSConfigPatchDeployment %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("OSConfigPatchDeployment %q", d.Id())) } res, err = resourceOSConfigPatchDeploymentDecoder(d, meta, res) @@ -1137,21 +1158,21 @@ func resourceOSConfigPatchDeploymentRead(d *schema.ResourceData, meta interface{ } func resourceOSConfigPatchDeploymentDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for PatchDeployment: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{OSConfigBasePath}}{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{OSConfigBasePath}}{{name}}") if err != nil { return err } @@ -1160,13 +1181,21 @@ func resourceOSConfigPatchDeploymentDelete(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Deleting PatchDeployment %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "PatchDeployment") + return transport_tpg.HandleNotFoundError(err, d, "PatchDeployment") } log.Printf("[DEBUG] Finished deleting PatchDeployment %q: %#v", d.Id(), res) @@ -1175,25 +1204,25 @@ func resourceOSConfigPatchDeploymentDelete(d *schema.ResourceData, meta interfac func resourceOSConfigPatchDeploymentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { return nil, err } return []*schema.ResourceData{d}, nil } -func flattenOSConfigPatchDeploymentName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentInstanceFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentInstanceFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1214,11 +1243,11 @@ func flattenOSConfigPatchDeploymentInstanceFilter(v interface{}, d *schema.Resou flattenOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(original["instanceNamePrefixes"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentInstanceFilterAll(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentInstanceFilterAll(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1236,23 +1265,23 @@ func flattenOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d *s } return transformed } -func flattenOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentInstanceFilterZones(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentInstanceFilterZones(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentInstanceFilterInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentInstanceFilterInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1281,15 +1310,15 @@ func flattenOSConfigPatchDeploymentPatchConfig(v interface{}, d *schema.Resource flattenOSConfigPatchDeploymentPatchConfigPostStep(original["postStep"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigMigInstancesAllowed(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigMigInstancesAllowed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigRebootConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigRebootConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigApt(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigApt(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1306,19 +1335,19 @@ func flattenOSConfigPatchDeploymentPatchConfigApt(v interface{}, d *schema.Resou flattenOSConfigPatchDeploymentPatchConfigAptExclusivePackages(original["exclusivePackages"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigAptType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigAptType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigAptExcludes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigAptExcludes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigAptExclusivePackages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigAptExclusivePackages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigYum(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigYum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1337,23 +1366,23 @@ func flattenOSConfigPatchDeploymentPatchConfigYum(v interface{}, d *schema.Resou flattenOSConfigPatchDeploymentPatchConfigYumExclusivePackages(original["exclusivePackages"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigYumSecurity(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigYumSecurity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigYumMinimal(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigYumMinimal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigYumExcludes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigYumExcludes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigYumExclusivePackages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigYumExclusivePackages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigGoo(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigGoo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1366,11 +1395,11 @@ func flattenOSConfigPatchDeploymentPatchConfigGoo(v interface{}, d *schema.Resou flattenOSConfigPatchDeploymentPatchConfigGooEnabled(original["enabled"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigGooEnabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigGooEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigZypper(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigZypper(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1393,31 +1422,31 @@ func flattenOSConfigPatchDeploymentPatchConfigZypper(v interface{}, d *schema.Re flattenOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(original["exclusivePatches"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigZypperWithOptional(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigZypperWithOptional(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigZypperWithUpdate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigZypperWithUpdate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigZypperCategories(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigZypperCategories(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigZypperSeverities(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigZypperSeverities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigZypperExcludes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigZypperExcludes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1434,19 +1463,19 @@ func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdate(v interface{}, d *sc flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(original["exclusivePatches"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStep(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStep(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1461,7 +1490,7 @@ func flattenOSConfigPatchDeploymentPatchConfigPreStep(v interface{}, d *schema.R flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(original["windowsExecStepConfig"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1480,19 +1509,19 @@ func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(v inter flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(original["gcsObject"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1509,19 +1538,19 @@ func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjec flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(original["generationNumber"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1540,19 +1569,19 @@ func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(v int flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(original["gcsObject"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1569,19 +1598,19 @@ func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObj flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(original["generationNumber"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStep(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStep(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1596,7 +1625,7 @@ func flattenOSConfigPatchDeploymentPatchConfigPostStep(v interface{}, d *schema. flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(original["windowsExecStepConfig"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1615,19 +1644,19 @@ func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(v inte flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(original["gcsObject"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1644,19 +1673,19 @@ func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObje flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(original["generationNumber"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1675,19 +1704,19 @@ func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(v in flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(original["gcsObject"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1704,35 +1733,35 @@ func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsOb flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(original["generationNumber"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentLastExecuteTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentLastExecuteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentOneTimeSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentOneTimeSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1745,11 +1774,11 @@ func flattenOSConfigPatchDeploymentOneTimeSchedule(v interface{}, d *schema.Reso flattenOSConfigPatchDeploymentOneTimeScheduleExecuteTime(original["executeTime"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentOneTimeScheduleExecuteTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentOneTimeScheduleExecuteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringSchedule(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1776,7 +1805,7 @@ func flattenOSConfigPatchDeploymentRecurringSchedule(v interface{}, d *schema.Re flattenOSConfigPatchDeploymentRecurringScheduleMonthly(original["monthly"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentRecurringScheduleTimeZone(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleTimeZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1791,23 +1820,23 @@ func flattenOSConfigPatchDeploymentRecurringScheduleTimeZone(v interface{}, d *s flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(original["version"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringScheduleStartTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringScheduleEndTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1824,10 +1853,10 @@ func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDay(v interface{}, d * return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1841,10 +1870,10 @@ func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(v interface{} return v // let terraform core handle it otherwise } -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1858,10 +1887,10 @@ func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(v interface return v // let terraform core handle it otherwise } -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1875,10 +1904,10 @@ func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(v interface return v // let terraform core handle it otherwise } -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1892,15 +1921,15 @@ func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(v interface{} return v // let terraform core handle it otherwise } -func flattenOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringScheduleWeekly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleWeekly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1913,11 +1942,11 @@ func flattenOSConfigPatchDeploymentRecurringScheduleWeekly(v interface{}, d *sch flattenOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(original["dayOfWeek"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringScheduleMonthly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleMonthly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1932,7 +1961,7 @@ func flattenOSConfigPatchDeploymentRecurringScheduleMonthly(v interface{}, d *sc flattenOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(original["monthDay"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1947,10 +1976,10 @@ func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v inte flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(original["dayOfWeek"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1964,14 +1993,14 @@ func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrd return v // let terraform core handle it otherwise } -func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1985,7 +2014,7 @@ func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{ return v // let terraform core handle it otherwise } -func flattenOSConfigPatchDeploymentRollout(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRollout(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2000,11 +2029,11 @@ func flattenOSConfigPatchDeploymentRollout(v interface{}, d *schema.ResourceData flattenOSConfigPatchDeploymentRolloutDisruptionBudget(original["disruptionBudget"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentRolloutMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRolloutMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenOSConfigPatchDeploymentRolloutDisruptionBudget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRolloutDisruptionBudget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2019,10 +2048,10 @@ func flattenOSConfigPatchDeploymentRolloutDisruptionBudget(v interface{}, d *sch flattenOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(original["percent"], d, config) return []interface{}{transformed} } -func flattenOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2036,10 +2065,10 @@ func flattenOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(v interface{}, d return v // let terraform core handle it otherwise } -func flattenOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2053,11 +2082,11 @@ func flattenOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(v interface return v // let terraform core handle it otherwise } -func expandOSConfigPatchDeploymentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentInstanceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentInstanceFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2069,46 +2098,46 @@ func expandOSConfigPatchDeploymentInstanceFilter(v interface{}, d TerraformResou transformedAll, err := expandOSConfigPatchDeploymentInstanceFilterAll(original["all"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAll); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAll); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["all"] = transformedAll } transformedGroupLabels, err := expandOSConfigPatchDeploymentInstanceFilterGroupLabels(original["group_labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGroupLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGroupLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["groupLabels"] = transformedGroupLabels } transformedZones, err := expandOSConfigPatchDeploymentInstanceFilterZones(original["zones"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedZones); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedZones); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["zones"] = transformedZones } transformedInstances, err := expandOSConfigPatchDeploymentInstanceFilterInstances(original["instances"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["instances"] = transformedInstances } transformedInstanceNamePrefixes, err := expandOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(original["instance_name_prefixes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInstanceNamePrefixes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInstanceNamePrefixes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["instanceNamePrefixes"] = transformedInstanceNamePrefixes } return transformed, nil } -func expandOSConfigPatchDeploymentInstanceFilterAll(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentInstanceFilterAll(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2121,7 +2150,7 @@ func expandOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d Ter transformedLabels, err := expandOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(original["labels"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["labels"] = transformedLabels } @@ -2130,7 +2159,7 @@ func expandOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d Ter return req, nil } -func expandOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2141,19 +2170,19 @@ func expandOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(v interface{}, return m, nil } -func expandOSConfigPatchDeploymentInstanceFilterZones(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentInstanceFilterZones(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentInstanceFilterInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentInstanceFilterInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2165,78 +2194,78 @@ func expandOSConfigPatchDeploymentPatchConfig(v interface{}, d TerraformResource transformedMigInstancesAllowed, err := expandOSConfigPatchDeploymentPatchConfigMigInstancesAllowed(original["mig_instances_allowed"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMigInstancesAllowed); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMigInstancesAllowed); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["migInstancesAllowed"] = transformedMigInstancesAllowed } transformedRebootConfig, err := expandOSConfigPatchDeploymentPatchConfigRebootConfig(original["reboot_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRebootConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRebootConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rebootConfig"] = transformedRebootConfig } transformedApt, err := expandOSConfigPatchDeploymentPatchConfigApt(original["apt"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedApt); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedApt); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["apt"] = transformedApt } transformedYum, err := expandOSConfigPatchDeploymentPatchConfigYum(original["yum"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedYum); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedYum); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["yum"] = transformedYum } transformedGoo, err := expandOSConfigPatchDeploymentPatchConfigGoo(original["goo"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGoo); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGoo); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["goo"] = transformedGoo } transformedZypper, err := expandOSConfigPatchDeploymentPatchConfigZypper(original["zypper"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedZypper); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedZypper); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["zypper"] = transformedZypper } transformedWindowsUpdate, err := expandOSConfigPatchDeploymentPatchConfigWindowsUpdate(original["windows_update"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWindowsUpdate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWindowsUpdate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["windowsUpdate"] = transformedWindowsUpdate } transformedPreStep, err := expandOSConfigPatchDeploymentPatchConfigPreStep(original["pre_step"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPreStep); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPreStep); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["preStep"] = transformedPreStep } transformedPostStep, err := expandOSConfigPatchDeploymentPatchConfigPostStep(original["post_step"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostStep); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostStep); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postStep"] = transformedPostStep } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigMigInstancesAllowed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigMigInstancesAllowed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigRebootConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigRebootConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigApt(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigApt(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2248,40 +2277,40 @@ func expandOSConfigPatchDeploymentPatchConfigApt(v interface{}, d TerraformResou transformedType, err := expandOSConfigPatchDeploymentPatchConfigAptType(original["type"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["type"] = transformedType } transformedExcludes, err := expandOSConfigPatchDeploymentPatchConfigAptExcludes(original["excludes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludes"] = transformedExcludes } transformedExclusivePackages, err := expandOSConfigPatchDeploymentPatchConfigAptExclusivePackages(original["exclusive_packages"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExclusivePackages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExclusivePackages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exclusivePackages"] = transformedExclusivePackages } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigAptType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigAptType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigAptExcludes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigAptExcludes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigAptExclusivePackages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigAptExclusivePackages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigYum(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigYum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2293,51 +2322,51 @@ func expandOSConfigPatchDeploymentPatchConfigYum(v interface{}, d TerraformResou transformedSecurity, err := expandOSConfigPatchDeploymentPatchConfigYumSecurity(original["security"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSecurity); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSecurity); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["security"] = transformedSecurity } transformedMinimal, err := expandOSConfigPatchDeploymentPatchConfigYumMinimal(original["minimal"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinimal); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinimal); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minimal"] = transformedMinimal } transformedExcludes, err := expandOSConfigPatchDeploymentPatchConfigYumExcludes(original["excludes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludes"] = transformedExcludes } transformedExclusivePackages, err := expandOSConfigPatchDeploymentPatchConfigYumExclusivePackages(original["exclusive_packages"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExclusivePackages); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExclusivePackages); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exclusivePackages"] = transformedExclusivePackages } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigYumSecurity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigYumSecurity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigYumMinimal(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigYumMinimal(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigYumExcludes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigYumExcludes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigYumExclusivePackages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigYumExclusivePackages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigGoo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigGoo(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2349,18 +2378,18 @@ func expandOSConfigPatchDeploymentPatchConfigGoo(v interface{}, d TerraformResou transformedEnabled, err := expandOSConfigPatchDeploymentPatchConfigGooEnabled(original["enabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["enabled"] = transformedEnabled } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigGooEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigGooEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigZypper(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigZypper(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2372,73 +2401,73 @@ func expandOSConfigPatchDeploymentPatchConfigZypper(v interface{}, d TerraformRe transformedWithOptional, err := expandOSConfigPatchDeploymentPatchConfigZypperWithOptional(original["with_optional"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWithOptional); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWithOptional); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["withOptional"] = transformedWithOptional } transformedWithUpdate, err := expandOSConfigPatchDeploymentPatchConfigZypperWithUpdate(original["with_update"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWithUpdate); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWithUpdate); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["withUpdate"] = transformedWithUpdate } transformedCategories, err := expandOSConfigPatchDeploymentPatchConfigZypperCategories(original["categories"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCategories); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCategories); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["categories"] = transformedCategories } transformedSeverities, err := expandOSConfigPatchDeploymentPatchConfigZypperSeverities(original["severities"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeverities); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeverities); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["severities"] = transformedSeverities } transformedExcludes, err := expandOSConfigPatchDeploymentPatchConfigZypperExcludes(original["excludes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludes"] = transformedExcludes } transformedExclusivePatches, err := expandOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(original["exclusive_patches"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExclusivePatches); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExclusivePatches); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exclusivePatches"] = transformedExclusivePatches } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigZypperWithOptional(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigZypperWithOptional(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigZypperWithUpdate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigZypperWithUpdate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigZypperCategories(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigZypperCategories(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigZypperSeverities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigZypperSeverities(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigZypperExcludes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigZypperExcludes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigWindowsUpdate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigWindowsUpdate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2450,40 +2479,40 @@ func expandOSConfigPatchDeploymentPatchConfigWindowsUpdate(v interface{}, d Terr transformedClassifications, err := expandOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(original["classifications"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedClassifications); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedClassifications); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["classifications"] = transformedClassifications } transformedExcludes, err := expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(original["excludes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExcludes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["excludes"] = transformedExcludes } transformedExclusivePatches, err := expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(original["exclusive_patches"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExclusivePatches); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExclusivePatches); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["exclusivePatches"] = transformedExclusivePatches } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStep(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStep(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2495,21 +2524,21 @@ func expandOSConfigPatchDeploymentPatchConfigPreStep(v interface{}, d TerraformR transformedLinuxExecStepConfig, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(original["linux_exec_step_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLinuxExecStepConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLinuxExecStepConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["linuxExecStepConfig"] = transformedLinuxExecStepConfig } transformedWindowsExecStepConfig, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(original["windows_exec_step_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWindowsExecStepConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWindowsExecStepConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["windowsExecStepConfig"] = transformedWindowsExecStepConfig } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2521,47 +2550,47 @@ func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(v interf transformedAllowedSuccessCodes, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(original["allowed_success_codes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedSuccessCodes"] = transformedAllowedSuccessCodes } transformedInterpreter, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(original["interpreter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["interpreter"] = transformedInterpreter } transformedLocalPath, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(original["local_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["localPath"] = transformedLocalPath } transformedGcsObject, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(original["gcs_object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGcsObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGcsObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gcsObject"] = transformedGcsObject } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2573,40 +2602,40 @@ func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject transformedBucket, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(original["bucket"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["bucket"] = transformedBucket } transformedObject, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(original["object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["object"] = transformedObject } transformedGenerationNumber, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(original["generation_number"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["generationNumber"] = transformedGenerationNumber } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2618,47 +2647,47 @@ func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(v inte transformedAllowedSuccessCodes, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(original["allowed_success_codes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedSuccessCodes"] = transformedAllowedSuccessCodes } transformedInterpreter, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(original["interpreter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["interpreter"] = transformedInterpreter } transformedLocalPath, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(original["local_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["localPath"] = transformedLocalPath } transformedGcsObject, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(original["gcs_object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGcsObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGcsObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gcsObject"] = transformedGcsObject } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2670,40 +2699,40 @@ func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObje transformedBucket, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(original["bucket"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["bucket"] = transformedBucket } transformedObject, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(original["object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["object"] = transformedObject } transformedGenerationNumber, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(original["generation_number"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["generationNumber"] = transformedGenerationNumber } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStep(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStep(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2715,21 +2744,21 @@ func expandOSConfigPatchDeploymentPatchConfigPostStep(v interface{}, d Terraform transformedLinuxExecStepConfig, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(original["linux_exec_step_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLinuxExecStepConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLinuxExecStepConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["linuxExecStepConfig"] = transformedLinuxExecStepConfig } transformedWindowsExecStepConfig, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(original["windows_exec_step_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWindowsExecStepConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWindowsExecStepConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["windowsExecStepConfig"] = transformedWindowsExecStepConfig } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2741,47 +2770,47 @@ func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(v inter transformedAllowedSuccessCodes, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(original["allowed_success_codes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedSuccessCodes"] = transformedAllowedSuccessCodes } transformedInterpreter, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(original["interpreter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["interpreter"] = transformedInterpreter } transformedLocalPath, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(original["local_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["localPath"] = transformedLocalPath } transformedGcsObject, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(original["gcs_object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGcsObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGcsObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gcsObject"] = transformedGcsObject } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2793,40 +2822,40 @@ func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjec transformedBucket, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(original["bucket"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["bucket"] = transformedBucket } transformedObject, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(original["object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["object"] = transformedObject } transformedGenerationNumber, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(original["generation_number"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["generationNumber"] = transformedGenerationNumber } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2838,47 +2867,47 @@ func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(v int transformedAllowedSuccessCodes, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(original["allowed_success_codes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedSuccessCodes"] = transformedAllowedSuccessCodes } transformedInterpreter, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(original["interpreter"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedInterpreter); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["interpreter"] = transformedInterpreter } transformedLocalPath, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(original["local_path"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocalPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["localPath"] = transformedLocalPath } transformedGcsObject, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(original["gcs_object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGcsObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGcsObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["gcsObject"] = transformedGcsObject } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2890,44 +2919,44 @@ func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObj transformedBucket, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(original["bucket"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["bucket"] = transformedBucket } transformedObject, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(original["object"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedObject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["object"] = transformedObject } transformedGenerationNumber, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(original["generation_number"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["generationNumber"] = transformedGenerationNumber } return transformed, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentOneTimeSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentOneTimeSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2939,18 +2968,18 @@ func expandOSConfigPatchDeploymentOneTimeSchedule(v interface{}, d TerraformReso transformedExecuteTime, err := expandOSConfigPatchDeploymentOneTimeScheduleExecuteTime(original["execute_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExecuteTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExecuteTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["executeTime"] = transformedExecuteTime } return transformed, nil } -func expandOSConfigPatchDeploymentOneTimeScheduleExecuteTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentOneTimeScheduleExecuteTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2962,21 +2991,21 @@ func expandOSConfigPatchDeploymentRecurringSchedule(v interface{}, d TerraformRe transformedTimeZone, err := expandOSConfigPatchDeploymentRecurringScheduleTimeZone(original["time_zone"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTimeZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["timeZone"] = transformedTimeZone } transformedStartTime, err := expandOSConfigPatchDeploymentRecurringScheduleStartTime(original["start_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["startTime"] = transformedStartTime } transformedEndTime, err := expandOSConfigPatchDeploymentRecurringScheduleEndTime(original["end_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEndTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["endTime"] = transformedEndTime } @@ -2990,35 +3019,35 @@ func expandOSConfigPatchDeploymentRecurringSchedule(v interface{}, d TerraformRe transformedLastExecuteTime, err := expandOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(original["last_execute_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLastExecuteTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLastExecuteTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["lastExecuteTime"] = transformedLastExecuteTime } transformedNextExecuteTime, err := expandOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(original["next_execute_time"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNextExecuteTime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNextExecuteTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nextExecuteTime"] = transformedNextExecuteTime } transformedWeekly, err := expandOSConfigPatchDeploymentRecurringScheduleWeekly(original["weekly"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeekly); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeekly); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weekly"] = transformedWeekly } transformedMonthly, err := expandOSConfigPatchDeploymentRecurringScheduleMonthly(original["monthly"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMonthly); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMonthly); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["monthly"] = transformedMonthly } return transformed, nil } -func expandOSConfigPatchDeploymentRecurringScheduleTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleTimeZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3030,37 +3059,37 @@ func expandOSConfigPatchDeploymentRecurringScheduleTimeZone(v interface{}, d Ter transformedId, err := expandOSConfigPatchDeploymentRecurringScheduleTimeZoneId(original["id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedId); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["id"] = transformedId } transformedVersion, err := expandOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(original["version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["version"] = transformedVersion } return transformed, nil } -func expandOSConfigPatchDeploymentRecurringScheduleTimeZoneId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleTimeZoneId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleEndTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3072,59 +3101,59 @@ func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDay(v interface{}, d Te transformedHours, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(original["hours"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["hours"] = transformedHours } transformedMinutes, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(original["minutes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minutes"] = transformedMinutes } transformedSeconds, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(original["seconds"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["seconds"] = transformedSeconds } transformedNanos, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(original["nanos"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["nanos"] = transformedNanos } return transformed, nil } -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleWeekly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleWeekly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3136,18 +3165,18 @@ func expandOSConfigPatchDeploymentRecurringScheduleWeekly(v interface{}, d Terra transformedDayOfWeek, err := expandOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(original["day_of_week"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dayOfWeek"] = transformedDayOfWeek } return transformed, nil } -func expandOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleMonthly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleMonthly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3159,21 +3188,21 @@ func expandOSConfigPatchDeploymentRecurringScheduleMonthly(v interface{}, d Terr transformedWeekDayOfMonth, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(original["week_day_of_month"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeekDayOfMonth); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeekDayOfMonth); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weekDayOfMonth"] = transformedWeekDayOfMonth } transformedMonthDay, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(original["month_day"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMonthDay); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMonthDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["monthDay"] = transformedMonthDay } return transformed, nil } -func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3185,33 +3214,33 @@ func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v inter transformedWeekOrdinal, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(original["week_ordinal"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWeekOrdinal); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWeekOrdinal); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["weekOrdinal"] = transformedWeekOrdinal } transformedDayOfWeek, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(original["day_of_week"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dayOfWeek"] = transformedDayOfWeek } return transformed, nil } -func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRollout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRollout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3223,25 +3252,25 @@ func expandOSConfigPatchDeploymentRollout(v interface{}, d TerraformResourceData transformedMode, err := expandOSConfigPatchDeploymentRolloutMode(original["mode"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["mode"] = transformedMode } transformedDisruptionBudget, err := expandOSConfigPatchDeploymentRolloutDisruptionBudget(original["disruption_budget"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisruptionBudget); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisruptionBudget); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disruptionBudget"] = transformedDisruptionBudget } return transformed, nil } -func expandOSConfigPatchDeploymentRolloutMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRolloutMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRolloutDisruptionBudget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRolloutDisruptionBudget(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3253,25 +3282,25 @@ func expandOSConfigPatchDeploymentRolloutDisruptionBudget(v interface{}, d Terra transformedFixed, err := expandOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(original["fixed"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFixed); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFixed); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["fixed"] = transformedFixed } transformedPercentage, err := expandOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(original["percentage"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPercentage); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["percent"] = transformedPercentage } return transformed, nil } -func expandOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_patch_deployment_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_patch_deployment_sweeper.go new file mode 100644 index 0000000000..cd58506d4c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/osconfig/resource_os_config_patch_deployment_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package osconfig + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("OSConfigPatchDeployment", testSweepOSConfigPatchDeployment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepOSConfigPatchDeployment(region string) error { + resourceName := "OSConfigPatchDeployment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://osconfig.googleapis.com/v1/projects/{{project}}/patchDeployments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["patchDeployments"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://osconfig.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oslogin/resource_os_login_ssh_public_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oslogin/resource_os_login_ssh_public_key.go new file mode 100644 index 0000000000..3f4fa642ff --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/oslogin/resource_os_login_ssh_public_key.go @@ -0,0 +1,352 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package oslogin + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceOSLoginSSHPublicKey() *schema.Resource { + return &schema.Resource{ + Create: resourceOSLoginSSHPublicKeyCreate, + Read: resourceOSLoginSSHPublicKeyRead, + Update: resourceOSLoginSSHPublicKeyUpdate, + Delete: resourceOSLoginSSHPublicKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceOSLoginSSHPublicKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Public key text in SSH format, defined by RFC4253 section 6.6.`, + }, + "user": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The user email.`, + }, + "expiration_time_usec": { + Type: schema.TypeString, + Optional: true, + Description: `An expiration time in microseconds since epoch.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The project ID of the Google Cloud Platform project.`, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The SHA-256 fingerprint of the SSH public key.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceOSLoginSSHPublicKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + keyProp, err := expandOSLoginSSHPublicKeyKey(d.Get("key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("key"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyProp)) && (ok || !reflect.DeepEqual(v, keyProp)) { + obj["key"] = keyProp + } + expirationTimeUsecProp, err := expandOSLoginSSHPublicKeyExpirationTimeUsec(d.Get("expiration_time_usec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("expiration_time_usec"); !tpgresource.IsEmptyValue(reflect.ValueOf(expirationTimeUsecProp)) && (ok || !reflect.DeepEqual(v, expirationTimeUsecProp)) { + obj["expirationTimeUsec"] = expirationTimeUsecProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}:importSshPublicKey") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SSHPublicKey: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // Don't use `GetProject()` because we only want to set the project in the URL + // if the user set it explicitly on the resource. + if p, ok := d.GetOk("project"); ok { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"projectId": p.(string)}) + if err != nil { + return err + } + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating SSHPublicKey: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + loginProfile, ok := res["loginProfile"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + // `fingerprint` is autogenerated from the api so needs to be set post-create + sshPublicKeys := loginProfile.(map[string]interface{})["sshPublicKeys"] + for _, sshPublicKey := range sshPublicKeys.(map[string]interface{}) { + if sshPublicKey.(map[string]interface{})["key"].(string) == d.Get("key") { + if err := d.Set("fingerprint", sshPublicKey.(map[string]interface{})["fingerprint"].(string)); err != nil { + return fmt.Errorf("Error setting fingerprint: %s", err) + } + break + } + } + + // Store the ID now + id, err = tpgresource.ReplaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating SSHPublicKey %q: %#v", d.Id(), res) + + return resourceOSLoginSSHPublicKeyRead(d, meta) +} + +func resourceOSLoginSSHPublicKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("OSLoginSSHPublicKey %q", d.Id())) + } + + if err := d.Set("key", flattenOSLoginSSHPublicKeyKey(res["key"], d, config)); err != nil { + return fmt.Errorf("Error reading SSHPublicKey: %s", err) + } + if err := d.Set("expiration_time_usec", flattenOSLoginSSHPublicKeyExpirationTimeUsec(res["expirationTimeUsec"], d, config)); err != nil { + return fmt.Errorf("Error reading SSHPublicKey: %s", err) + } + if err := d.Set("fingerprint", flattenOSLoginSSHPublicKeyFingerprint(res["fingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading SSHPublicKey: %s", err) + } + + return nil +} + +func resourceOSLoginSSHPublicKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + expirationTimeUsecProp, err := expandOSLoginSSHPublicKeyExpirationTimeUsec(d.Get("expiration_time_usec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("expiration_time_usec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, expirationTimeUsecProp)) { + obj["expirationTimeUsec"] = expirationTimeUsecProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating SSHPublicKey %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("expiration_time_usec") { + updateMask = append(updateMask, "expirationTimeUsec") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating SSHPublicKey %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating SSHPublicKey %q: %#v", d.Id(), res) + } + + return resourceOSLoginSSHPublicKeyRead(d, meta) +} + +func resourceOSLoginSSHPublicKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting SSHPublicKey %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "SSHPublicKey") + } + + log.Printf("[DEBUG] Finished deleting SSHPublicKey %q: %#v", d.Id(), res) + return nil +} + +func resourceOSLoginSSHPublicKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "users/(?P[^/]+)/sshPublicKeys/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenOSLoginSSHPublicKeyKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSLoginSSHPublicKeyExpirationTimeUsec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenOSLoginSSHPublicKeyFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandOSLoginSSHPublicKeyKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandOSLoginSSHPublicKeyExpirationTimeUsec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/data_source_certificate_authority.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/data_source_certificate_authority.go new file mode 100644 index 0000000000..5976618a7c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/data_source_certificate_authority.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package privateca + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourcePrivatecaCertificateAuthority() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourcePrivatecaCertificateAuthority().Schema) + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "pool") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "certificate_authority_id") + + dsSchema["pem_csr"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + return &schema.Resource{ + Read: dataSourcePrivatecaCertificateAuthorityRead, + Schema: dsSchema, + } +} + +func dataSourcePrivatecaCertificateAuthorityRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return fmt.Errorf("Error generating user agent: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + d.SetId(id) + + err = resourcePrivatecaCertificateAuthorityRead(d, meta) + if err != nil { + return err + } + + // pem_csr is only applicable for SUBORDINATE CertificateAuthorities when their state is AWAITING_USER_ACTIVATION + if d.Get("type") == "SUBORDINATE" && d.Get("state") == "AWAITING_USER_ACTIVATION" { + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:fetch") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PrivatecaCertificateAuthority %q", d.Id())) + } + if err := d.Set("pem_csr", res["pemCsr"]); err != nil { + return fmt.Errorf("Error fetching CertificateAuthority: %s", err) + } + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/iam_privateca_ca_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/iam_privateca_ca_pool.go new file mode 100644 index 0000000000..52b0b37385 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/iam_privateca_ca_pool.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package privateca + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var PrivatecaCaPoolIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "ca_pool": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type PrivatecaCaPoolIamUpdater struct { + project string + location string + caPool string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func PrivatecaCaPoolIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("ca_pool"); ok { + values["ca_pool"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Get("ca_pool").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &PrivatecaCaPoolIamUpdater{ + project: values["project"], + location: values["location"], + caPool: values["ca_pool"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("ca_pool", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting ca_pool: %s", err) + } + + return u, nil +} + +func PrivatecaCaPoolIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &PrivatecaCaPoolIamUpdater{ + project: values["project"], + location: values["location"], + caPool: values["ca_pool"], + d: d, + Config: config, + } + if err := d.Set("ca_pool", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting ca_pool: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *PrivatecaCaPoolIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyCaPoolUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"options.requestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *PrivatecaCaPoolIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyCaPoolUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *PrivatecaCaPoolIamUpdater) qualifyCaPoolUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{PrivatecaBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/caPools/%s", u.project, u.location, u.caPool), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *PrivatecaCaPoolIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/caPools/%s", u.project, u.location, u.caPool) +} + +func (u *PrivatecaCaPoolIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-privateca-capool-%s", u.GetResourceId()) +} + +func (u *PrivatecaCaPoolIamUpdater) DescribeResource() string { + return fmt.Sprintf("privateca capool %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/iam_privateca_certificate_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/iam_privateca_certificate_template.go new file mode 100644 index 0000000000..69b972d5b1 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/iam_privateca_certificate_template.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package privateca + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var PrivatecaCertificateTemplateIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "certificate_template": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type PrivatecaCertificateTemplateIamUpdater struct { + project string + location string + certificateTemplate string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func PrivatecaCertificateTemplateIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + } + values["location"] = location + if v, ok := d.GetOk("certificate_template"); ok { + values["certificate_template"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/certificateTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Get("certificate_template").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &PrivatecaCertificateTemplateIamUpdater{ + project: values["project"], + location: values["location"], + certificateTemplate: values["certificate_template"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("location", u.location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("certificate_template", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting certificate_template: %s", err) + } + + return u, nil +} + +func PrivatecaCertificateTemplateIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + location, _ := tpgresource.GetLocation(d, config) + if location != "" { + values["location"] = location + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/certificateTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &PrivatecaCertificateTemplateIamUpdater{ + project: values["project"], + location: values["location"], + certificateTemplate: values["certificate_template"], + d: d, + Config: config, + } + if err := d.Set("certificate_template", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting certificate_template: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *PrivatecaCertificateTemplateIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyCertificateTemplateUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"options.requestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *PrivatecaCertificateTemplateIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyCertificateTemplateUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *PrivatecaCertificateTemplateIamUpdater) qualifyCertificateTemplateUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{PrivatecaBasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/%s/certificateTemplates/%s", u.project, u.location, u.certificateTemplate), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *PrivatecaCertificateTemplateIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/%s/certificateTemplates/%s", u.project, u.location, u.certificateTemplate) +} + +func (u *PrivatecaCertificateTemplateIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-privateca-certificatetemplate-%s", u.GetResourceId()) +} + +func (u *PrivatecaCertificateTemplateIamUpdater) DescribeResource() string { + return fmt.Sprintf("privateca certificatetemplate %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_ca_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_ca_utils.go new file mode 100644 index 0000000000..2c15662b6a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_ca_utils.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package privateca + +import ( + "fmt" + "log" + "math/rand" + "regexp" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// CA related utilities. + +func enableCA(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + enableUrl, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") + if err != nil { + return err + } + + log.Printf("[DEBUG] Enabling CertificateAuthority") + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: enableUrl, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = PrivatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to enable CertificateAuthority: %s", err) + } + return nil +} + +func disableCA(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + disableUrl, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") + if err != nil { + return err + } + + log.Printf("[DEBUG] Disabling CA") + + dRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: disableUrl, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error disabling CA: %s", err) + } + + var opRes map[string]interface{} + err = PrivatecaOperationWaitTimeWithResponse( + config, dRes, &opRes, project, "Disabling CA", userAgent, + d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf("Error waiting to disable CA: %s", err) + } + return nil +} + +func activateSubCAWithThirdPartyIssuer(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + // 1. prepare parameters + signedCACert := d.Get("pem_ca_certificate").(string) + + sc, ok := d.GetOk("subordinate_config") + if !ok { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + c := sc.([]interface{}) + if len(c) == 0 || c[0] == nil { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + chain, ok := c[0].(map[string]interface{})["pem_issuer_chain"] + if !ok { + return fmt.Errorf("subordinate_config.pem_issuer_chain is required to activate subordinate CA with third party issuer") + } + issuerChain := chain.([]interface{}) + if len(issuerChain) == 0 || issuerChain[0] == nil { + return fmt.Errorf("subordinate_config.pem_issuer_chain is required to activate subordinate CA with third party issuer") + } + pc := issuerChain[0].(map[string]interface{})["pem_certificates"].([]interface{}) + pemIssuerChain := make([]string, 0, len(pc)) + for _, pem := range pc { + pemIssuerChain = append(pemIssuerChain, pem.(string)) + } + + // 2. activate CA + activateObj := make(map[string]interface{}) + activateObj["pemCaCertificate"] = signedCACert + activateObj["subordinateConfig"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["pemIssuerChain"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["pemIssuerChain"].(map[string]interface{})["pemCertificates"] = pemIssuerChain + + activateUrl, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:activate") + if err != nil { + return err + } + + log.Printf("[DEBUG] Activating CertificateAuthority: %#v", activateObj) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: activateUrl, + UserAgent: userAgent, + Body: activateObj, + }) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = PrivatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Activating CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to actiavte CertificateAuthority: %s", err) + } + return nil +} + +func activateSubCAWithFirstPartyIssuer(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string) error { + // 1. get issuer + sc, ok := d.GetOk("subordinate_config") + if !ok { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + c := sc.([]interface{}) + if len(c) == 0 || c[0] == nil { + return fmt.Errorf("subordinate_config is required to activate subordinate CA") + } + ca, ok := c[0].(map[string]interface{})["certificate_authority"] + if !ok { + return fmt.Errorf("subordinate_config.certificate_authority is required to activate subordinate CA with first party issuer") + } + issuer := ca.(string) + + // 2. fetch CSR + fetchCSRUrl, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:fetch") + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: fetchCSRUrl, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("failed to fetch CSR: %v", err) + } + csr := res["pemCsr"] + + // 3. sign the CSR with first party issuer + genCertId := func() string { + currentTime := time.Now() + dateStr := currentTime.Format("20060102") + + rand.Seed(time.Now().UnixNano()) + const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + rand1 := make([]byte, 3) + for i := range rand1 { + rand1[i] = letters[rand.Intn(len(letters))] + } + rand2 := make([]byte, 3) + for i := range rand2 { + rand2[i] = letters[rand.Intn(len(letters))] + } + return fmt.Sprintf("subordinate-%v-%v-%v", dateStr, string(rand1), string(rand2)) + } + + // parseCAName parses a CA name and return the CaPool name and CaId. + parseCAName := func(n string) (string, string, error) { + parts := regexp.MustCompile(`(projects/[a-z0-9-]+/locations/[a-z0-9-]+/caPools/[a-zA-Z0-9-]+)/certificateAuthorities/([a-zA-Z0-9-]+)`).FindStringSubmatch(n) + if len(parts) != 3 { + return "", "", fmt.Errorf("failed to parse CA name: %v, parts: %v", n, parts) + } + return parts[1], parts[2], err + } + + obj := make(map[string]interface{}) + obj["pemCsr"] = csr + obj["lifetime"] = d.Get("lifetime") + + certId := genCertId() + poolName, issuerId, err := parseCAName(issuer) + if err != nil { + return err + } + + PrivatecaBasePath, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}") + if err != nil { + return err + } + signUrl := fmt.Sprintf("%v%v/certificates?certificateId=%v", PrivatecaBasePath, poolName, certId) + signUrl, err = transport_tpg.AddQueryParams(signUrl, map[string]string{"issuingCertificateAuthorityId": issuerId}) + if err != nil { + return err + } + + log.Printf("[DEBUG] Signing CA Certificate: %#v", obj) + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: signUrl, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Certificate: %s", err) + } + signedCACert := res["pemCertificate"] + + // 4. activate sub CA with the signed CA cert. + activateObj := make(map[string]interface{}) + activateObj["pemCaCertificate"] = signedCACert + activateObj["subordinateConfig"] = make(map[string]interface{}) + activateObj["subordinateConfig"].(map[string]interface{})["certificateAuthority"] = issuer + + activateUrl, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:activate") + if err != nil { + return err + } + + log.Printf("[DEBUG] Activating CertificateAuthority: %#v", activateObj) + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: activateUrl, + UserAgent: userAgent, + Body: activateObj, + }) + if err != nil { + return fmt.Errorf("Error enabling CertificateAuthority: %s", err) + } + + var opRes map[string]interface{} + err = PrivatecaOperationWaitTimeWithResponse( + config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting to actiavte CertificateAuthority: %s", err) + } + return nil +} + +// These setters are used for tests +func (u *PrivatecaCaPoolIamUpdater) SetProject(project string) { + u.project = project +} + +func (u *PrivatecaCaPoolIamUpdater) SetLocation(location string) { + u.location = location +} + +func (u *PrivatecaCaPoolIamUpdater) SetCaPool(caPool string) { + u.caPool = caPool +} + +func (u *PrivatecaCaPoolIamUpdater) SetResourceData(d tpgresource.TerraformResourceData) { + u.d = d +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_operation.go new file mode 100644 index 0000000000..528efc3d79 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package privateca + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type PrivatecaOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *PrivatecaOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.PrivatecaBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createPrivatecaWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*PrivatecaOperationWaiter, error) { + w := &PrivatecaOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func PrivatecaOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createPrivatecaWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func PrivatecaOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createPrivatecaWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_utils.go similarity index 84% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_utils.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_utils.go index f8c84aac36..7192d150e2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/privateca_utils.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/privateca_utils.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package privateca import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) // This file contains shared flatteners between PrivateCA Certificate, CaPool and CertificateAuthority. @@ -20,7 +24,7 @@ import ( // Expander utilities -func expandPrivatecaCertificateConfigX509ConfigCaOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigX509ConfigCaOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // Fields non_ca, zero_max_issuer_path_length are used to distinguish between // unset booleans and booleans set with a default value. // Unset is_ca or unset max_issuer_path_length either allow any values for these fields when @@ -62,7 +66,7 @@ func expandPrivatecaCertificateConfigX509ConfigCaOptions(v interface{}, d Terraf return transformed, nil } -func expandPrivatecaCertificateConfigX509ConfigKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigX509ConfigKeyUsage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return v, nil } @@ -88,7 +92,7 @@ func expandPrivatecaCertificateConfigX509ConfigKeyUsage(v interface{}, d Terrafo return transformed, nil } -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(v interface{}, d TerraformResourceData, config *Config) interface{} { +func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -117,7 +121,7 @@ func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(v interface{ return transformed } -func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(v interface{}, d TerraformResourceData, config *Config) interface{} { +func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -143,7 +147,7 @@ func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(v interf return transformed } -func expandPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(v interface{}, d TerraformResourceData, config *Config) interface{} { +func expandPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -163,7 +167,7 @@ func expandPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages( return transformed } -func expandPrivatecaCertificateConfigX509ConfigPolicyIds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigX509ConfigPolicyIds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return v, nil } @@ -183,7 +187,7 @@ func expandPrivatecaCertificateConfigX509ConfigPolicyIds(v interface{}, d Terraf return transformed, nil } -func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return v, nil } @@ -204,7 +208,7 @@ func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensions(v interface{ return transformed, nil } -func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(v interface{}, d TerraformResourceData, config *Config) interface{} { +func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -225,12 +229,12 @@ func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(v in return transformed } -func expandPrivatecaCertificateConfigX509ConfigAiaOcspServers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigX509ConfigAiaOcspServers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // List of strings, no processing necessary. return v, nil } -func expandPrivatecaCertificateConfigX509ConfigNameConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigX509ConfigNameConstraints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil } @@ -263,7 +267,7 @@ func expandPrivatecaCertificateConfigX509ConfigNameConstraints(v interface{}, d // Flattener utilities -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -283,15 +287,15 @@ func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensions(v interface } return transformed } -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsCritical(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsCritical(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -304,11 +308,11 @@ func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(v i flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(original["objectIdPath"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigPolicyIds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigPolicyIds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -326,15 +330,15 @@ func flattenPrivatecaCertificateConfigX509ConfigPolicyIds(v interface{}, d *sche } return transformed } -func flattenPrivatecaCertificateConfigX509ConfigPolicyIdsObjectIdPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigPolicyIdsObjectIdPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigAiaOcspServers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigAiaOcspServers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigCaOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigCaOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Special case here as the CaPool API returns an empty object rather than nil unlike the Certificate // and CertificateAuthority APIs. if v == nil || len(v.(map[string]interface{})) == 0 { @@ -359,14 +363,14 @@ func flattenPrivatecaCertificateConfigX509ConfigCaOptions(v interface{}, d *sche return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigX509ConfigCaOptionsIsCa(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigCaOptionsIsCa(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigCaOptionsMaxIssuerPathLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigCaOptionsMaxIssuerPathLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -380,7 +384,7 @@ func flattenPrivatecaCertificateConfigX509ConfigCaOptionsMaxIssuerPathLength(v i return v // let terraform core handle it otherwise } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { v = make(map[string]interface{}) } @@ -394,7 +398,7 @@ func flattenPrivatecaCertificateConfigX509ConfigKeyUsage(v interface{}, d *schem flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(original["unknownExtendedKeyUsages"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { v = make(map[string]interface{}) } @@ -420,43 +424,41 @@ func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(v interface flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(original["decipherOnly"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDigitalSignature(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDigitalSignature(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageContentCommitment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageContentCommitment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyEncipherment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyEncipherment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDataEncipherment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDataEncipherment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyAgreement(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyAgreement(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCertSign(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCertSign(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCrlSign(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCrlSign(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageEncipherOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageEncipherOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { v = make(map[string]interface{}) } @@ -476,31 +478,31 @@ func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(v inter flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(original["ocspSigning"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageServerAuth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageServerAuth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageClientAuth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageClientAuth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -518,11 +520,11 @@ func flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages } return transformed } -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsagesObjectIdPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsagesObjectIdPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigX509ConfigNameConstraints(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509ConfigNameConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_ca_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_ca_pool.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_ca_pool.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_ca_pool.go index 62a16249dd..ad2301e5cf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_ca_pool.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_ca_pool.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package privateca import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourcePrivatecaCaPool() *schema.Resource { @@ -59,7 +66,7 @@ running 'gcloud privateca locations list'.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"ENTERPRISE", "DEVOPS"}), + ValidateFunc: verify.ValidateEnum([]string{"ENTERPRISE", "DEVOPS"}), Description: `The Tier of this CaPool. Possible values: ["ENTERPRISE", "DEVOPS"]`, }, "issuance_policy": { @@ -106,7 +113,7 @@ Otherwise, any key may be used.`, "signature_algorithm": { Type: schema.TypeString, Required: true, - ValidateFunc: validateEnum([]string{"ECDSA_P256", "ECDSA_P384", "EDDSA_25519"}), + ValidateFunc: verify.ValidateEnum([]string{"ECDSA_P256", "ECDSA_P384", "EDDSA_25519"}), Description: `The algorithm used. Possible values: ["ECDSA_P256", "ECDSA_P384", "EDDSA_25519"]`, }, }, @@ -168,7 +175,7 @@ subordinate CA certificates that are allowed. If this value is less than 0, the "non_ca": { Type: schema.TypeBool, Optional: true, - Description: `When true, the "CA" in Basic Constraints extension will be set to false. + Description: `When true, the "CA" in Basic Constraints extension will be set to false. If both 'is_ca' and 'non_ca' are unset, the extension will be omitted from the CA certificate.`, }, "zero_max_issuer_path_length": { @@ -549,7 +556,7 @@ An object containing a list of "key": value pairs. Example: { "name": "wrench", "publishing_options": { Type: schema.TypeList, Optional: true, - DiffSuppressFunc: emptyOrUnsetBlockDiffSuppress, + DiffSuppressFunc: tpgresource.EmptyOrUnsetBlockDiffSuppress, Description: `The PublishingOptions to follow when issuing Certificates from any CertificateAuthority in this CaPool.`, MaxItems: 1, Elem: &schema.Resource{ @@ -569,6 +576,14 @@ in all issued Certificates. If this is false, CRLs will not be published and the be written in issued certificates. CRLs will expire 7 days from their creation. However, we will rebuild daily. CRLs are also rebuilt shortly after a certificate is revoked.`, }, + "encoding_format": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"PEM", "DER", ""}), + Description: `Specifies the encoding format of each CertificateAuthority's CA +certificate and CRLs. If this is omitted, CA certificates and CRLs +will be published in PEM. Possible values: ["PEM", "DER"]`, + }, }, }, }, @@ -584,8 +599,8 @@ also rebuilt shortly after a certificate is revoked.`, } func resourcePrivatecaCaPoolCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -594,29 +609,29 @@ func resourcePrivatecaCaPoolCreate(d *schema.ResourceData, meta interface{}) err tierProp, err := expandPrivatecaCaPoolTier(d.Get("tier"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { + } else if v, ok := d.GetOkExists("tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { obj["tier"] = tierProp } issuancePolicyProp, err := expandPrivatecaCaPoolIssuancePolicy(d.Get("issuance_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("issuance_policy"); !isEmptyValue(reflect.ValueOf(issuancePolicyProp)) && (ok || !reflect.DeepEqual(v, issuancePolicyProp)) { + } else if v, ok := d.GetOkExists("issuance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(issuancePolicyProp)) && (ok || !reflect.DeepEqual(v, issuancePolicyProp)) { obj["issuancePolicy"] = issuancePolicyProp } publishingOptionsProp, err := expandPrivatecaCaPoolPublishingOptions(d.Get("publishing_options"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("publishing_options"); !isEmptyValue(reflect.ValueOf(publishingOptionsProp)) && (ok || !reflect.DeepEqual(v, publishingOptionsProp)) { + } else if v, ok := d.GetOkExists("publishing_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(publishingOptionsProp)) && (ok || !reflect.DeepEqual(v, publishingOptionsProp)) { obj["publishingOptions"] = publishingOptionsProp } labelsProp, err := expandPrivatecaCaPoolLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools?caPoolId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools?caPoolId={{name}}") if err != nil { return err } @@ -624,24 +639,32 @@ func resourcePrivatecaCaPoolCreate(d *schema.ResourceData, meta interface{}) err log.Printf("[DEBUG] Creating new CaPool: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for CaPool: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating CaPool: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -661,7 +684,7 @@ func resourcePrivatecaCaPoolCreate(d *schema.ResourceData, meta interface{}) err } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -673,33 +696,39 @@ func resourcePrivatecaCaPoolCreate(d *schema.ResourceData, meta interface{}) err } func resourcePrivatecaCaPoolRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for CaPool: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PrivatecaCaPool %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PrivatecaCaPool %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -723,15 +752,15 @@ func resourcePrivatecaCaPoolRead(d *schema.ResourceData, meta interface{}) error } func resourcePrivatecaCaPoolUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for CaPool: %s", err) } @@ -741,23 +770,23 @@ func resourcePrivatecaCaPoolUpdate(d *schema.ResourceData, meta interface{}) err issuancePolicyProp, err := expandPrivatecaCaPoolIssuancePolicy(d.Get("issuance_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("issuance_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, issuancePolicyProp)) { + } else if v, ok := d.GetOkExists("issuance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, issuancePolicyProp)) { obj["issuancePolicy"] = issuancePolicyProp } publishingOptionsProp, err := expandPrivatecaCaPoolPublishingOptions(d.Get("publishing_options"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("publishing_options"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, publishingOptionsProp)) { + } else if v, ok := d.GetOkExists("publishing_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, publishingOptionsProp)) { obj["publishingOptions"] = publishingOptionsProp } labelsProp, err := expandPrivatecaCaPoolLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") if err != nil { return err } @@ -776,19 +805,27 @@ func resourcePrivatecaCaPoolUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("labels") { updateMask = append(updateMask, "labels") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating CaPool %q: %s", d.Id(), err) @@ -808,21 +845,21 @@ func resourcePrivatecaCaPoolUpdate(d *schema.ResourceData, meta interface{}) err } func resourcePrivatecaCaPoolDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for CaPool: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") if err != nil { return err } @@ -831,13 +868,21 @@ func resourcePrivatecaCaPoolDelete(d *schema.ResourceData, meta interface{}) err log.Printf("[DEBUG] Deleting CaPool %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "CaPool") + return transport_tpg.HandleNotFoundError(err, d, "CaPool") } err = PrivatecaOperationWaitTime( @@ -853,8 +898,8 @@ func resourcePrivatecaCaPoolDelete(d *schema.ResourceData, meta interface{}) err } func resourcePrivatecaCaPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -863,7 +908,7 @@ func resourcePrivatecaCaPoolImport(d *schema.ResourceData, meta interface{}) ([] } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -872,11 +917,11 @@ func resourcePrivatecaCaPoolImport(d *schema.ResourceData, meta interface{}) ([] return []*schema.ResourceData{d}, nil } -func flattenPrivatecaCaPoolTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -897,7 +942,7 @@ func flattenPrivatecaCaPoolIssuancePolicy(v interface{}, d *schema.ResourceData, flattenPrivatecaCaPoolIssuancePolicyBaselineValues(original["baselineValues"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -916,7 +961,7 @@ func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d *schem } return transformed } -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -931,15 +976,15 @@ func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(v interface{}, d *sc flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(original["maxModulusSize"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -952,15 +997,15 @@ func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(v interfac flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(original["signatureAlgorithm"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyMaximumLifetime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyMaximumLifetime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -975,15 +1020,15 @@ func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(v interface{}, d * flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(original["allowConfigBasedIssuance"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraints(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1000,15 +1045,15 @@ func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraints(v interface{}, d *s flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(original["celExpression"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectPassthrough(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectPassthrough(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectAltNamesPassthrough(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectAltNamesPassthrough(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1027,23 +1072,23 @@ func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(v inte flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(original["location"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolIssuancePolicyBaselineValues(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolIssuancePolicyBaselineValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { v = make(map[string]interface{}) } @@ -1063,7 +1108,7 @@ func flattenPrivatecaCaPoolIssuancePolicyBaselineValues(v interface{}, d *schema return []interface{}{transformed} } -func flattenPrivatecaCaPoolPublishingOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolPublishingOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1076,25 +1121,31 @@ func flattenPrivatecaCaPoolPublishingOptions(v interface{}, d *schema.ResourceDa flattenPrivatecaCaPoolPublishingOptionsPublishCaCert(original["publishCaCert"], d, config) transformed["publish_crl"] = flattenPrivatecaCaPoolPublishingOptionsPublishCrl(original["publishCrl"], d, config) + transformed["encoding_format"] = + flattenPrivatecaCaPoolPublishingOptionsEncodingFormat(original["encodingFormat"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCaPoolPublishingOptionsPublishCaCert(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolPublishingOptionsPublishCaCert(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolPublishingOptionsPublishCrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolPublishingOptionsPublishCrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCaPoolLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCaPoolPublishingOptionsEncodingFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandPrivatecaCaPoolTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func flattenPrivatecaCaPoolLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandPrivatecaCaPoolTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1106,42 +1157,42 @@ func expandPrivatecaCaPoolIssuancePolicy(v interface{}, d TerraformResourceData, transformedAllowedKeyTypes, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(original["allowed_key_types"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedKeyTypes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedKeyTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedKeyTypes"] = transformedAllowedKeyTypes } transformedMaximumLifetime, err := expandPrivatecaCaPoolIssuancePolicyMaximumLifetime(original["maximum_lifetime"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaximumLifetime); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaximumLifetime); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maximumLifetime"] = transformedMaximumLifetime } transformedAllowedIssuanceModes, err := expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(original["allowed_issuance_modes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowedIssuanceModes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowedIssuanceModes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowedIssuanceModes"] = transformedAllowedIssuanceModes } transformedIdentityConstraints, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraints(original["identity_constraints"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIdentityConstraints); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIdentityConstraints); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["identityConstraints"] = transformedIdentityConstraints } transformedBaselineValues, err := expandPrivatecaCaPoolIssuancePolicyBaselineValues(original["baseline_values"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedBaselineValues); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedBaselineValues); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["baselineValues"] = transformedBaselineValues } return transformed, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1154,14 +1205,14 @@ func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d Terrafo transformedRsa, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(original["rsa"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedRsa); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedRsa); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["rsa"] = transformedRsa } transformedEllipticCurve, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(original["elliptic_curve"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEllipticCurve); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEllipticCurve); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ellipticCurve"] = transformedEllipticCurve } @@ -1170,7 +1221,7 @@ func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d Terrafo return req, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1182,29 +1233,29 @@ func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(v interface{}, d Terr transformedMinModulusSize, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(original["min_modulus_size"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinModulusSize); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinModulusSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minModulusSize"] = transformedMinModulusSize } transformedMaxModulusSize, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(original["max_modulus_size"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxModulusSize); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxModulusSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxModulusSize"] = transformedMaxModulusSize } return transformed, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1216,22 +1267,22 @@ func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(v interface transformedSignatureAlgorithm, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(original["signature_algorithm"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSignatureAlgorithm); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSignatureAlgorithm); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["signatureAlgorithm"] = transformedSignatureAlgorithm } return transformed, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyMaximumLifetime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyMaximumLifetime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1243,29 +1294,29 @@ func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(v interface{}, d Te transformedAllowCsrBasedIssuance, err := expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(original["allow_csr_based_issuance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowCsrBasedIssuance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowCsrBasedIssuance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowCsrBasedIssuance"] = transformedAllowCsrBasedIssuance } transformedAllowConfigBasedIssuance, err := expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(original["allow_config_based_issuance"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAllowConfigBasedIssuance); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAllowConfigBasedIssuance); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["allowConfigBasedIssuance"] = transformedAllowConfigBasedIssuance } return transformed, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyIdentityConstraints(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1291,22 +1342,22 @@ func expandPrivatecaCaPoolIssuancePolicyIdentityConstraints(v interface{}, d Ter transformedCelExpression, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(original["cel_expression"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCelExpression); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCelExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["celExpression"] = transformedCelExpression } return transformed, nil } -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectPassthrough(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectPassthrough(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectAltNamesPassthrough(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectAltNamesPassthrough(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1318,51 +1369,51 @@ func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(v inter transformedExpression, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(original["expression"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["expression"] = transformedExpression } transformedTitle, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(original["title"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["title"] = transformedTitle } transformedDescription, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(original["description"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["description"] = transformedDescription } transformedLocation, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(original["location"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["location"] = transformedLocation } return transformed, nil } -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolIssuancePolicyBaselineValues(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolIssuancePolicyBaselineValues(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return v, nil } @@ -1415,7 +1466,7 @@ func expandPrivatecaCaPoolIssuancePolicyBaselineValues(v interface{}, d Terrafor return transformed, nil } -func expandPrivatecaCaPoolPublishingOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolPublishingOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1427,29 +1478,40 @@ func expandPrivatecaCaPoolPublishingOptions(v interface{}, d TerraformResourceDa transformedPublishCaCert, err := expandPrivatecaCaPoolPublishingOptionsPublishCaCert(original["publish_ca_cert"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPublishCaCert); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPublishCaCert); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["publishCaCert"] = transformedPublishCaCert } transformedPublishCrl, err := expandPrivatecaCaPoolPublishingOptionsPublishCrl(original["publish_crl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPublishCrl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPublishCrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["publishCrl"] = transformedPublishCrl } + transformedEncodingFormat, err := expandPrivatecaCaPoolPublishingOptionsEncodingFormat(original["encoding_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncodingFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encodingFormat"] = transformedEncodingFormat + } + return transformed, nil } -func expandPrivatecaCaPoolPublishingOptionsPublishCaCert(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolPublishingOptionsPublishCaCert(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPrivatecaCaPoolPublishingOptionsPublishCrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolPublishingOptionsPublishCrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCaPoolPublishingOptionsEncodingFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCaPoolLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandPrivatecaCaPoolLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_ca_pool_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_ca_pool_sweeper.go new file mode 100644 index 0000000000..06d8e96709 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_ca_pool_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package privateca + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PrivatecaCaPool", testSweepPrivatecaCaPool) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepPrivatecaCaPool(region string) error { + resourceName := "PrivatecaCaPool" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://privateca.googleapis.com/v1/projects/{{project}}/locations/{{location}}/caPools", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["caPools"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://privateca.googleapis.com/v1/projects/{{project}}/locations/{{location}}/caPools/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate.go index 0f3e2119d3..b07cb81d12 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package privateca import ( "fmt" @@ -22,6 +25,10 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourcePrivatecaCertificate() *schema.Resource { @@ -65,7 +72,7 @@ running 'gcloud privateca locations list'.`, Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: compareResourceNames, + DiffSuppressFunc: tpgresource.CompareResourceNames, Description: `The resource name for a CertificateTemplate used to issue this certificate, in the format 'projects/*/locations/*/certificateTemplates/*'. If this is specified, the caller must have the necessary permission to use this template. If this is @@ -101,7 +108,7 @@ should be set to 'my-ca'.`, Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"KEY_TYPE_UNSPECIFIED", "PEM"}), + ValidateFunc: verify.ValidateEnum([]string{"KEY_TYPE_UNSPECIFIED", "PEM"}), Description: `The format of the public key. Currently, only PEM format is supported. Possible values: ["KEY_TYPE_UNSPECIFIED", "PEM"]`, }, "key": { @@ -462,7 +469,7 @@ subordinate CA certificates that are allowed. If this value is less than 0, the Type: schema.TypeBool, Optional: true, ForceNew: true, - Description: `When true, the "CA" in Basic Constraints extension will be set to false. + Description: `When true, the "CA" in Basic Constraints extension will be set to false. If both 'is_ca' and 'non_ca' are unset, the extension will be omitted from the CA certificate.`, }, "zero_max_issuer_path_length": { @@ -1374,7 +1381,7 @@ This is in RFC3339 text format.`, "revocation_details": { Type: schema.TypeList, Computed: true, - Description: `Output only. Details regarding the revocation of this Certificate. This Certificate is + Description: `Output only. Details regarding the revocation of this Certificate. This Certificate is considered revoked if and only if this field is present.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -1409,8 +1416,8 @@ This is in RFC3339 text format.`, } func resourcePrivatecaCertificateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -1419,35 +1426,35 @@ func resourcePrivatecaCertificateCreate(d *schema.ResourceData, meta interface{} lifetimeProp, err := expandPrivatecaCertificateLifetime(d.Get("lifetime"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("lifetime"); !isEmptyValue(reflect.ValueOf(lifetimeProp)) && (ok || !reflect.DeepEqual(v, lifetimeProp)) { + } else if v, ok := d.GetOkExists("lifetime"); !tpgresource.IsEmptyValue(reflect.ValueOf(lifetimeProp)) && (ok || !reflect.DeepEqual(v, lifetimeProp)) { obj["lifetime"] = lifetimeProp } certificateTemplateProp, err := expandPrivatecaCertificateCertificateTemplate(d.Get("certificate_template"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("certificate_template"); !isEmptyValue(reflect.ValueOf(certificateTemplateProp)) && (ok || !reflect.DeepEqual(v, certificateTemplateProp)) { + } else if v, ok := d.GetOkExists("certificate_template"); !tpgresource.IsEmptyValue(reflect.ValueOf(certificateTemplateProp)) && (ok || !reflect.DeepEqual(v, certificateTemplateProp)) { obj["certificateTemplate"] = certificateTemplateProp } labelsProp, err := expandPrivatecaCertificateLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } pemCsrProp, err := expandPrivatecaCertificatePemCsr(d.Get("pem_csr"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("pem_csr"); !isEmptyValue(reflect.ValueOf(pemCsrProp)) && (ok || !reflect.DeepEqual(v, pemCsrProp)) { + } else if v, ok := d.GetOkExists("pem_csr"); !tpgresource.IsEmptyValue(reflect.ValueOf(pemCsrProp)) && (ok || !reflect.DeepEqual(v, pemCsrProp)) { obj["pemCsr"] = pemCsrProp } configProp, err := expandPrivatecaCertificateConfig(d.Get("config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("config"); !isEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { + } else if v, ok := d.GetOkExists("config"); !tpgresource.IsEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { obj["config"] = configProp } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates?certificateId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates?certificateId={{name}}") if err != nil { return err } @@ -1455,31 +1462,39 @@ func resourcePrivatecaCertificateCreate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Creating new Certificate: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Certificate: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } // Only include linked certificate authority if the user specified it if p, ok := d.GetOk("certificate_authority"); ok { - url, err = addQueryParams(url, map[string]string{"issuingCertificateAuthorityId": p.(string)}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"issuingCertificateAuthorityId": p.(string)}) if err != nil { return err } } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Certificate: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1491,33 +1506,39 @@ func resourcePrivatecaCertificateCreate(d *schema.ResourceData, meta interface{} } func resourcePrivatecaCertificateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Certificate: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PrivatecaCertificate %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PrivatecaCertificate %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -1568,15 +1589,15 @@ func resourcePrivatecaCertificateRead(d *schema.ResourceData, meta interface{}) } func resourcePrivatecaCertificateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Certificate: %s", err) } @@ -1586,11 +1607,11 @@ func resourcePrivatecaCertificateUpdate(d *schema.ResourceData, meta interface{} labelsProp, err := expandPrivatecaCertificateLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") if err != nil { return err } @@ -1601,19 +1622,27 @@ func resourcePrivatecaCertificateUpdate(d *schema.ResourceData, meta interface{} if d.HasChange("labels") { updateMask = append(updateMask, "labels") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Certificate %q: %s", d.Id(), err) @@ -1625,21 +1654,21 @@ func resourcePrivatecaCertificateUpdate(d *schema.ResourceData, meta interface{} } func resourcePrivatecaCertificateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Certificate: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}:revoke") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}:revoke") if err != nil { return err } @@ -1648,13 +1677,21 @@ func resourcePrivatecaCertificateDelete(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Deleting Certificate %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Certificate") + return transport_tpg.HandleNotFoundError(err, d, "Certificate") } log.Printf("[DEBUG] Finished deleting Certificate %q: %#v", d.Id(), res) @@ -1662,8 +1699,8 @@ func resourcePrivatecaCertificateDelete(d *schema.ResourceData, meta interface{} } func resourcePrivatecaCertificateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)/certificates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -1672,7 +1709,7 @@ func resourcePrivatecaCertificateImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1681,15 +1718,15 @@ func resourcePrivatecaCertificateImport(d *schema.ResourceData, meta interface{} return []*schema.ResourceData{d}, nil } -func flattenPrivatecaCertificateIssuerCertificateAuthority(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateIssuerCertificateAuthority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateLifetime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateLifetime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateRevocationDetails(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateRevocationDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1704,19 +1741,19 @@ func flattenPrivatecaCertificateRevocationDetails(v interface{}, d *schema.Resou flattenPrivatecaCertificateRevocationDetailsRevocationTime(original["revocationTime"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateRevocationDetailsRevocationState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateRevocationDetailsRevocationState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateRevocationDetailsRevocationTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateRevocationDetailsRevocationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificatePemCertificate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificatePemCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1745,7 +1782,7 @@ func flattenPrivatecaCertificateCertificateDescription(v interface{}, d *schema. flattenPrivatecaCertificateCertificateDescriptionCertFingerprint(original["certFingerprint"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1768,7 +1805,7 @@ func flattenPrivatecaCertificateCertificateDescriptionSubjectDescription(v inter flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotAfterTime(original["notAfterTime"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1795,39 +1832,39 @@ func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubject( flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCommonName(original["commonName"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCountryCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCountryCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectOrganization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectOrganization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectOrganizationalUnit(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectOrganizationalUnit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectLocality(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectLocality(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectProvince(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectProvince(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectStreetAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectStreetAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectPostalCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectPostalCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCommonName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCommonName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1848,23 +1885,23 @@ func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectA flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSans(original["customSans"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameDnsNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameDnsNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameUris(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameEmailAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameEmailAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameIpAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameIpAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSans(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSans(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1884,7 +1921,7 @@ func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectA } return transformed } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1897,35 +1934,35 @@ func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectA flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectIdObjectIdPath(original["objectIdPath"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectIdObjectIdPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectIdObjectIdPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansCritical(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansCritical(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionHexSerialNumber(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionHexSerialNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionLifetime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionLifetime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotBeforeTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotBeforeTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotAfterTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotAfterTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509Description(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509Description(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1948,7 +1985,7 @@ func flattenPrivatecaCertificateCertificateDescriptionX509Description(v interfac flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraints(original["nameConstraints"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -1968,15 +2005,15 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalE } return transformed } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsCritical(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsCritical(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsObjectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsObjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1989,11 +2026,11 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalE flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsObjectIdObjectIdPath(original["objectIdPath"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsObjectIdObjectIdPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAdditionalExtensionsObjectIdObjectIdPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionPolicyIds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionPolicyIds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2011,15 +2048,15 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionPolicyIds(v } return transformed } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionPolicyIdsObjectIdPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionPolicyIdsObjectIdPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAiaOcspServers(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionAiaOcspServers(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2034,14 +2071,14 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptions(v flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptionsMaxIssuerPathLength(original["maxIssuerPathLength"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptionsIsCa(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptionsIsCa(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptionsMaxIssuerPathLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptionsMaxIssuerPathLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -2055,7 +2092,7 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionCaOptionsMa return v // let terraform core handle it otherwise } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2072,7 +2109,7 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsage(v flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageUnknownExtendedKeyUsages(original["unknownExtendedKeyUsages"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2101,43 +2138,43 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBas flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageDecipherOnly(original["decipherOnly"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageDigitalSignature(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageDigitalSignature(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageContentCommitment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageContentCommitment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageKeyEncipherment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageKeyEncipherment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageDataEncipherment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageDataEncipherment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageKeyAgreement(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageKeyAgreement(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageCertSign(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageCertSign(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageCrlSign(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageCrlSign(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageEncipherOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageEncipherOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageDecipherOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageBaseKeyUsageDecipherOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2160,31 +2197,31 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExt flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageOcspSigning(original["ocspSigning"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageServerAuth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageServerAuth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageClientAuth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageClientAuth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageUnknownExtendedKeyUsages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageUnknownExtendedKeyUsages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2202,11 +2239,11 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageUnk } return transformed } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageUnknownExtendedKeyUsagesObjectIdPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionKeyUsageUnknownExtendedKeyUsagesObjectIdPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraints(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2235,43 +2272,43 @@ func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstra flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedUris(original["excludedUris"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsCritical(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsCritical(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsPermittedDnsNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsPermittedDnsNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedDnsNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedDnsNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsPermittedIpRanges(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsPermittedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedIpRanges(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsPermittedEmailAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsPermittedEmailAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedEmailAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedEmailAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsPermittedUris(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsPermittedUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedUris(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionX509DescriptionNameConstraintsExcludedUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValues(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValues(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2284,7 +2321,7 @@ func flattenPrivatecaCertificateCertificateDescriptionConfigValues(v interface{} flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsage(original["keyUsage"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2301,7 +2338,7 @@ func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsage(v int flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsages(original["unknownExtendedKeyUsages"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2314,7 +2351,7 @@ func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKe flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptions(original["keyUsageOptions"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2343,43 +2380,43 @@ func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKe flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDecipherOnly(original["decipherOnly"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDigitalSignature(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDigitalSignature(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsContentCommitment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsContentCommitment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsKeyEncipherment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsKeyEncipherment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDataEncipherment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDataEncipherment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsKeyAgreement(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsKeyAgreement(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsCertSign(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsCertSign(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsCrlSign(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsCrlSign(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsEncipherOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsEncipherOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDecipherOnly(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDecipherOnly(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsage(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2402,31 +2439,31 @@ func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtend flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageOcspSigning(original["ocspSigning"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageServerAuth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageServerAuth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageClientAuth(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageClientAuth(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -2444,7 +2481,7 @@ func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknow } return transformed } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2457,11 +2494,11 @@ func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknow flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectIdObjectIdPath(original["objectIdPath"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectIdObjectIdPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectIdObjectIdPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionPublicKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2476,15 +2513,15 @@ func flattenPrivatecaCertificateCertificateDescriptionPublicKey(v interface{}, d flattenPrivatecaCertificateCertificateDescriptionPublicKeyFormat(original["format"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionPublicKeyKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionPublicKeyKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionPublicKeyFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionPublicKeyFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionSubjectKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2497,11 +2534,11 @@ func flattenPrivatecaCertificateCertificateDescriptionSubjectKeyId(v interface{} flattenPrivatecaCertificateCertificateDescriptionSubjectKeyIdKeyId(original["keyId"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionSubjectKeyIdKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionSubjectKeyIdKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2514,19 +2551,19 @@ func flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyId(v interface flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyIdKeyId(original["keyId"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyIdKeyId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyIdKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionCrlDistributionPoints(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionCrlDistributionPoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionAiaIssuingCertificateUrls(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionAiaIssuingCertificateUrls(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateDescriptionCertFingerprint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionCertFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2539,39 +2576,39 @@ func flattenPrivatecaCertificateCertificateDescriptionCertFingerprint(v interfac flattenPrivatecaCertificateCertificateDescriptionCertFingerprintSha256Hash(original["sha256Hash"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateCertificateDescriptionCertFingerprintSha256Hash(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateDescriptionCertFingerprintSha256Hash(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificatePemCertificateChain(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificatePemCertificateChain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificatePemCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificatePemCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateCertificateTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateCertificateTemplate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificatePemCsr(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificatePemCsr(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2589,7 +2626,7 @@ func flattenPrivatecaCertificateConfig(v interface{}, d *schema.ResourceData, co return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigX509Config(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigX509Config(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { v = make(map[string]interface{}) } @@ -2609,7 +2646,7 @@ func flattenPrivatecaCertificateConfigX509Config(v interface{}, d *schema.Resour return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigSubjectConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2624,7 +2661,7 @@ func flattenPrivatecaCertificateConfigSubjectConfig(v interface{}, d *schema.Res flattenPrivatecaCertificateConfigSubjectConfigSubjectAltName(original["subjectAltName"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigSubjectConfigSubject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2651,39 +2688,39 @@ func flattenPrivatecaCertificateConfigSubjectConfigSubject(v interface{}, d *sch flattenPrivatecaCertificateConfigSubjectConfigSubjectCommonName(original["commonName"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectOrganization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectOrganization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectLocality(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectLocality(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectProvince(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectProvince(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectCommonName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectCommonName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2702,23 +2739,23 @@ func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltName(v interface{}, flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(original["ipAddresses"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigPublicKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigPublicKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -2733,23 +2770,23 @@ func flattenPrivatecaCertificateConfigPublicKey(v interface{}, d *schema.Resourc flattenPrivatecaCertificateConfigPublicKeyFormat(original["format"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateConfigPublicKeyKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigPublicKeyKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateConfigPublicKeyFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateConfigPublicKeyFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandPrivatecaCertificateLifetime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateLifetime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateCertificateTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateCertificateTemplate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandPrivatecaCertificateLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -2760,11 +2797,11 @@ func expandPrivatecaCertificateLabels(v interface{}, d TerraformResourceData, co return m, nil } -func expandPrivatecaCertificatePemCsr(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificatePemCsr(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2783,21 +2820,21 @@ func expandPrivatecaCertificateConfig(v interface{}, d TerraformResourceData, co transformedSubjectConfig, err := expandPrivatecaCertificateConfigSubjectConfig(original["subject_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubjectConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubjectConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subjectConfig"] = transformedSubjectConfig } transformedPublicKey, err := expandPrivatecaCertificateConfigPublicKey(original["public_key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPublicKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPublicKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["publicKey"] = transformedPublicKey } return transformed, nil } -func expandPrivatecaCertificateConfigX509Config(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigX509Config(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return v, nil } @@ -2850,7 +2887,7 @@ func expandPrivatecaCertificateConfigX509Config(v interface{}, d TerraformResour return transformed, nil } -func expandPrivatecaCertificateConfigSubjectConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2862,21 +2899,21 @@ func expandPrivatecaCertificateConfigSubjectConfig(v interface{}, d TerraformRes transformedSubject, err := expandPrivatecaCertificateConfigSubjectConfigSubject(original["subject"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subject"] = transformedSubject } transformedSubjectAltName, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltName(original["subject_alt_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubjectAltName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubjectAltName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subjectAltName"] = transformedSubjectAltName } return transformed, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2888,95 +2925,95 @@ func expandPrivatecaCertificateConfigSubjectConfigSubject(v interface{}, d Terra transformedCountryCode, err := expandPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(original["country_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCountryCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCountryCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["countryCode"] = transformedCountryCode } transformedOrganization, err := expandPrivatecaCertificateConfigSubjectConfigSubjectOrganization(original["organization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrganization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrganization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["organization"] = transformedOrganization } transformedOrganizationalUnit, err := expandPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(original["organizational_unit"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrganizationalUnit); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrganizationalUnit); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["organizationalUnit"] = transformedOrganizationalUnit } transformedLocality, err := expandPrivatecaCertificateConfigSubjectConfigSubjectLocality(original["locality"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocality); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocality); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["locality"] = transformedLocality } transformedProvince, err := expandPrivatecaCertificateConfigSubjectConfigSubjectProvince(original["province"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProvince); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProvince); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["province"] = transformedProvince } transformedStreetAddress, err := expandPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(original["street_address"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStreetAddress); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStreetAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["streetAddress"] = transformedStreetAddress } transformedPostalCode, err := expandPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(original["postal_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostalCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostalCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postalCode"] = transformedPostalCode } transformedCommonName, err := expandPrivatecaCertificateConfigSubjectConfigSubjectCommonName(original["common_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCommonName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCommonName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["commonName"] = transformedCommonName } return transformed, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectOrganization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectOrganization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectLocality(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectLocality(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectProvince(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectProvince(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectCommonName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectCommonName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectAltName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2988,51 +3025,51 @@ func expandPrivatecaCertificateConfigSubjectConfigSubjectAltName(v interface{}, transformedDnsNames, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(original["dns_names"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDnsNames); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDnsNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dnsNames"] = transformedDnsNames } transformedUris, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(original["uris"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUris); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["uris"] = transformedUris } transformedEmailAddresses, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(original["email_addresses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEmailAddresses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEmailAddresses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["emailAddresses"] = transformedEmailAddresses } transformedIpAddresses, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(original["ip_addresses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIpAddresses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIpAddresses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ipAddresses"] = transformedIpAddresses } return transformed, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigPublicKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -3044,24 +3081,24 @@ func expandPrivatecaCertificateConfigPublicKey(v interface{}, d TerraformResourc transformedKey, err := expandPrivatecaCertificateConfigPublicKeyKey(original["key"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["key"] = transformedKey } transformedFormat, err := expandPrivatecaCertificateConfigPublicKeyFormat(original["format"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedFormat); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["format"] = transformedFormat } return transformed, nil } -func expandPrivatecaCertificateConfigPublicKeyKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigPublicKeyKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateConfigPublicKeyFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateConfigPublicKeyFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate_authority.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate_authority.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority.go index b04bc4a826..451e7b2e52 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate_authority.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package privateca import ( "context" @@ -22,14 +25,19 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func resourcePrivateCaCACustomDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { if diff.HasChange("desired_state") { _, new := diff.GetChange("desired_state") - if isNewResource(diff) { + if tpgresource.IsNewResource(diff) { if diff.Get("type").(string) == "SUBORDINATE" { return fmt.Errorf("`desired_state` can not be specified when creating a SUBORDINATE CA") } @@ -45,11 +53,6 @@ func resourcePrivateCaCACustomDiff(_ context.Context, diff *schema.ResourceDiff, return nil } -func isNewResource(diff TerraformResourceDiff) bool { - name := diff.Get("name") - return name.(string) == "" -} - func ResourcePrivatecaCertificateAuthority() *schema.Resource { return &schema.Resource{ Create: resourcePrivatecaCertificateAuthorityCreate, @@ -67,7 +70,9 @@ func ResourcePrivatecaCertificateAuthority() *schema.Resource { Delete: schema.DefaultTimeout(20 * time.Minute), }, - CustomizeDiff: resourcePrivateCaCACustomDiff, + CustomizeDiff: customdiff.All( + resourcePrivateCaCACustomDiff, + ), Schema: map[string]*schema.Schema{ "certificate_authority_id": { @@ -239,7 +244,7 @@ requires setting 'zero_max_issuer_path_length = true'.`, Type: schema.TypeBool, Optional: true, ForceNew: true, - Description: `When true, the "CA" in Basic Constraints extension will be set to false. + Description: `When true, the "CA" in Basic Constraints extension will be set to false. If both 'is_ca' and 'non_ca' are unset, the extension will be omitted from the CA certificate.`, }, "zero_max_issuer_path_length": { @@ -600,7 +605,7 @@ certificate. Otherwise, it is used to sign a CSR.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"SIGN_HASH_ALGORITHM_UNSPECIFIED", "RSA_PSS_2048_SHA256", "RSA_PSS_3072_SHA256", "RSA_PSS_4096_SHA256", "RSA_PKCS1_2048_SHA256", "RSA_PKCS1_3072_SHA256", "RSA_PKCS1_4096_SHA256", "EC_P256_SHA256", "EC_P384_SHA384", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SIGN_HASH_ALGORITHM_UNSPECIFIED", "RSA_PSS_2048_SHA256", "RSA_PSS_3072_SHA256", "RSA_PSS_4096_SHA256", "RSA_PKCS1_2048_SHA256", "RSA_PKCS1_3072_SHA256", "RSA_PKCS1_4096_SHA256", "EC_P256_SHA256", "EC_P384_SHA384", ""}), Description: `The algorithm to use for creating a managed Cloud KMS key for a for a simplified experience. All managed keys will be have their ProtectionLevel as HSM. Possible values: ["SIGN_HASH_ALGORITHM_UNSPECIFIED", "RSA_PSS_2048_SHA256", "RSA_PSS_3072_SHA256", "RSA_PSS_4096_SHA256", "RSA_PKCS1_2048_SHA256", "RSA_PKCS1_3072_SHA256", "RSA_PKCS1_4096_SHA256", "EC_P256_SHA256", "EC_P384_SHA384"]`, ExactlyOneOf: []string{"key_spec.0.cloud_kms_key_version", "key_spec.0.algorithm"}, @@ -689,7 +694,7 @@ with the subordinate configuration, which describes its issuers.`, "certificate_authority": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: compareResourceNames, + DiffSuppressFunc: tpgresource.CompareResourceNames, Description: `This can refer to a CertificateAuthority that was used to create a subordinate CertificateAuthority. This field is used for information and usability purposes only. The resource name is in the format @@ -699,7 +704,7 @@ and usability purposes only. The resource name is in the format "pem_issuer_chain": { Type: schema.TypeList, Optional: true, - Description: `Contains the PEM certificate chain for the issuers of this CertificateAuthority, + Description: `Contains the PEM certificate chain for the issuers of this CertificateAuthority, but not pem certificate for this CA itself.`, MaxItems: 1, Elem: &schema.Resource{ @@ -723,7 +728,7 @@ but not pem certificate for this CA itself.`, Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validateEnum([]string{"SELF_SIGNED", "SUBORDINATE", ""}), + ValidateFunc: verify.ValidateEnum([]string{"SELF_SIGNED", "SUBORDINATE", ""}), Description: `The Type of this CertificateAuthority. ~> **Note:** For 'SUBORDINATE' Certificate Authorities, they need to @@ -816,8 +821,8 @@ in Terraform state, a 'terraform destroy' or 'terraform apply' that would delete } func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -826,47 +831,47 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in typeProp, err := expandPrivatecaCertificateAuthorityType(d.Get("type"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { obj["type"] = typeProp } configProp, err := expandPrivatecaCertificateAuthorityConfig(d.Get("config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("config"); !isEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { + } else if v, ok := d.GetOkExists("config"); !tpgresource.IsEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { obj["config"] = configProp } lifetimeProp, err := expandPrivatecaCertificateAuthorityLifetime(d.Get("lifetime"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("lifetime"); !isEmptyValue(reflect.ValueOf(lifetimeProp)) && (ok || !reflect.DeepEqual(v, lifetimeProp)) { + } else if v, ok := d.GetOkExists("lifetime"); !tpgresource.IsEmptyValue(reflect.ValueOf(lifetimeProp)) && (ok || !reflect.DeepEqual(v, lifetimeProp)) { obj["lifetime"] = lifetimeProp } keySpecProp, err := expandPrivatecaCertificateAuthorityKeySpec(d.Get("key_spec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("key_spec"); !isEmptyValue(reflect.ValueOf(keySpecProp)) && (ok || !reflect.DeepEqual(v, keySpecProp)) { + } else if v, ok := d.GetOkExists("key_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(keySpecProp)) && (ok || !reflect.DeepEqual(v, keySpecProp)) { obj["keySpec"] = keySpecProp } subordinateConfigProp, err := expandPrivatecaCertificateAuthoritySubordinateConfig(d.Get("subordinate_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("subordinate_config"); !isEmptyValue(reflect.ValueOf(subordinateConfigProp)) && (ok || !reflect.DeepEqual(v, subordinateConfigProp)) { + } else if v, ok := d.GetOkExists("subordinate_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(subordinateConfigProp)) && (ok || !reflect.DeepEqual(v, subordinateConfigProp)) { obj["subordinateConfig"] = subordinateConfigProp } gcsBucketProp, err := expandPrivatecaCertificateAuthorityGcsBucket(d.Get("gcs_bucket"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("gcs_bucket"); !isEmptyValue(reflect.ValueOf(gcsBucketProp)) && (ok || !reflect.DeepEqual(v, gcsBucketProp)) { + } else if v, ok := d.GetOkExists("gcs_bucket"); !tpgresource.IsEmptyValue(reflect.ValueOf(gcsBucketProp)) && (ok || !reflect.DeepEqual(v, gcsBucketProp)) { obj["gcsBucket"] = gcsBucketProp } labelsProp, err := expandPrivatecaCertificateAuthorityLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities?certificateAuthorityId={{certificate_authority_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities?certificateAuthorityId={{certificate_authority_id}}") if err != nil { return err } @@ -874,27 +879,35 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in log.Printf("[DEBUG] Creating new CertificateAuthority: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } // Drop `subordinateConfig` as it can not be set during CA creation. // It can be used to activate CA during post_create or pre_update. delete(obj, "subordinateConfig") - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating CertificateAuthority: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -926,7 +939,7 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -962,33 +975,39 @@ func resourcePrivatecaCertificateAuthorityCreate(d *schema.ResourceData, meta in } func resourcePrivatecaCertificateAuthorityRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PrivatecaCertificateAuthority %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PrivatecaCertificateAuthority %q", d.Id())) } res, err = resourcePrivatecaCertificateAuthorityDecoder(d, meta, res) @@ -1057,15 +1076,15 @@ func resourcePrivatecaCertificateAuthorityRead(d *schema.ResourceData, meta inte } func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) } @@ -1075,17 +1094,17 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in subordinateConfigProp, err := expandPrivatecaCertificateAuthoritySubordinateConfig(d.Get("subordinate_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("subordinate_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, subordinateConfigProp)) { + } else if v, ok := d.GetOkExists("subordinate_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, subordinateConfigProp)) { obj["subordinateConfig"] = subordinateConfigProp } labelsProp, err := expandPrivatecaCertificateAuthorityLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") if err != nil { return err } @@ -1100,9 +1119,9 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in if d.HasChange("labels") { updateMask = append(updateMask, "labels") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } @@ -1151,11 +1170,19 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating CertificateAuthority %q: %s", d.Id(), err) @@ -1175,21 +1202,21 @@ func resourcePrivatecaCertificateAuthorityUpdate(d *schema.ResourceData, meta in } func resourcePrivatecaCertificateAuthorityDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}?ignoreActiveCertificates={{ignore_active_certificates_on_deletion}}&skipGracePeriod={{skip_grace_period}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}?ignoreActiveCertificates={{ignore_active_certificates_on_deletion}}&skipGracePeriod={{skip_grace_period}}") if err != nil { return err } @@ -1200,14 +1227,20 @@ func resourcePrivatecaCertificateAuthorityDelete(d *schema.ResourceData, meta in } if d.Get("state").(string) == "ENABLED" { - disableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") + disableUrl, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") if err != nil { return err } log.Printf("[DEBUG] Disabling CertificateAuthority: %#v", obj) - dRes, err := SendRequest(config, "POST", billingProject, disableUrl, userAgent, nil) + dRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: disableUrl, + UserAgent: userAgent, + }) if err != nil { return fmt.Errorf("Error disabling CertificateAuthority: %s", err) } @@ -1223,13 +1256,21 @@ func resourcePrivatecaCertificateAuthorityDelete(d *schema.ResourceData, meta in log.Printf("[DEBUG] Deleting CertificateAuthority %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "CertificateAuthority") + return transport_tpg.HandleNotFoundError(err, d, "CertificateAuthority") } err = PrivatecaOperationWaitTime( @@ -1245,8 +1286,8 @@ func resourcePrivatecaCertificateAuthorityDelete(d *schema.ResourceData, meta in } func resourcePrivatecaCertificateAuthorityImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)/certificateAuthorities/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", @@ -1255,7 +1296,7 @@ func resourcePrivatecaCertificateAuthorityImport(d *schema.ResourceData, meta in } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1272,15 +1313,15 @@ func resourcePrivatecaCertificateAuthorityImport(d *schema.ResourceData, meta in return []*schema.ResourceData{d}, nil } -func flattenPrivatecaCertificateAuthorityName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1296,7 +1337,7 @@ func flattenPrivatecaCertificateAuthorityConfig(v interface{}, d *schema.Resourc return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthorityConfigX509Config(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigX509Config(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { v = make(map[string]interface{}) } @@ -1316,7 +1357,7 @@ func flattenPrivatecaCertificateAuthorityConfigX509Config(v interface{}, d *sche return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1331,7 +1372,7 @@ func flattenPrivatecaCertificateAuthorityConfigSubjectConfig(v interface{}, d *s flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(original["subjectAltName"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubject(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1358,39 +1399,39 @@ func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubject(v interface{ flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(original["commonName"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1409,27 +1450,27 @@ func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(v int flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(original["ipAddresses"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityLifetime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityLifetime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityKeySpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityKeySpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1444,15 +1485,15 @@ func flattenPrivatecaCertificateAuthorityKeySpec(v interface{}, d *schema.Resour flattenPrivatecaCertificateAuthorityKeySpecAlgorithm(original["algorithm"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1467,11 +1508,11 @@ func flattenPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d *sch flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(original["pemIssuerChain"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1484,23 +1525,23 @@ func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v inter flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(original["pemCertificates"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityPemCaCertificates(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityPemCaCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityGcsBucket(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityGcsBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityAccessUrls(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityAccessUrls(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1515,31 +1556,31 @@ func flattenPrivatecaCertificateAuthorityAccessUrls(v interface{}, d *schema.Res flattenPrivatecaCertificateAuthorityAccessUrlsCrlAccessUrls(original["crlAccessUrls"], d, config) return []interface{}{transformed} } -func flattenPrivatecaCertificateAuthorityAccessUrlsCaCertificateAccessUrl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityAccessUrlsCaCertificateAccessUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityAccessUrlsCrlAccessUrls(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityAccessUrlsCrlAccessUrls(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPrivatecaCertificateAuthorityLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPrivatecaCertificateAuthorityLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandPrivatecaCertificateAuthorityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1551,21 +1592,21 @@ func expandPrivatecaCertificateAuthorityConfig(v interface{}, d TerraformResourc transformedX509Config, err := expandPrivatecaCertificateAuthorityConfigX509Config(original["x509_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedX509Config); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedX509Config); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["x509Config"] = transformedX509Config } transformedSubjectConfig, err := expandPrivatecaCertificateAuthorityConfigSubjectConfig(original["subject_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubjectConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubjectConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subjectConfig"] = transformedSubjectConfig } return transformed, nil } -func expandPrivatecaCertificateAuthorityConfigX509Config(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigX509Config(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return v, nil } @@ -1618,7 +1659,7 @@ func expandPrivatecaCertificateAuthorityConfigX509Config(v interface{}, d Terraf return transformed, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1630,21 +1671,21 @@ func expandPrivatecaCertificateAuthorityConfigSubjectConfig(v interface{}, d Ter transformedSubject, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubject(original["subject"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubject); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubject); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subject"] = transformedSubject } transformedSubjectAltName, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(original["subject_alt_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSubjectAltName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSubjectAltName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["subjectAltName"] = transformedSubjectAltName } return transformed, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1656,95 +1697,95 @@ func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubject(v interface{} transformedCountryCode, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(original["country_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCountryCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCountryCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["countryCode"] = transformedCountryCode } transformedOrganization, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(original["organization"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrganization); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrganization); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["organization"] = transformedOrganization } transformedOrganizationalUnit, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(original["organizational_unit"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOrganizationalUnit); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOrganizationalUnit); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["organizationalUnit"] = transformedOrganizationalUnit } transformedLocality, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(original["locality"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedLocality); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedLocality); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["locality"] = transformedLocality } transformedProvince, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(original["province"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedProvince); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedProvince); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["province"] = transformedProvince } transformedStreetAddress, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(original["street_address"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStreetAddress); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStreetAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["streetAddress"] = transformedStreetAddress } transformedPostalCode, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(original["postal_code"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPostalCode); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPostalCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["postalCode"] = transformedPostalCode } transformedCommonName, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(original["common_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCommonName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCommonName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["commonName"] = transformedCommonName } return transformed, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1756,55 +1797,55 @@ func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(v inte transformedDnsNames, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(original["dns_names"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDnsNames); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDnsNames); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dnsNames"] = transformedDnsNames } transformedUris, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(original["uris"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUris); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUris); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["uris"] = transformedUris } transformedEmailAddresses, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(original["email_addresses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedEmailAddresses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedEmailAddresses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["emailAddresses"] = transformedEmailAddresses } transformedIpAddresses, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(original["ip_addresses"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIpAddresses); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedIpAddresses); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ipAddresses"] = transformedIpAddresses } return transformed, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityLifetime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityLifetime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityKeySpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityKeySpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1816,29 +1857,29 @@ func expandPrivatecaCertificateAuthorityKeySpec(v interface{}, d TerraformResour transformedCloudKmsKeyVersion, err := expandPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(original["cloud_kms_key_version"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCloudKmsKeyVersion); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCloudKmsKeyVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["cloudKmsKeyVersion"] = transformedCloudKmsKeyVersion } transformedAlgorithm, err := expandPrivatecaCertificateAuthorityKeySpecAlgorithm(original["algorithm"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAlgorithm); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["algorithm"] = transformedAlgorithm } return transformed, nil } -func expandPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1850,25 +1891,25 @@ func expandPrivatecaCertificateAuthoritySubordinateConfig(v interface{}, d Terra transformedCertificateAuthority, err := expandPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(original["certificate_authority"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCertificateAuthority); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCertificateAuthority); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["certificateAuthority"] = transformedCertificateAuthority } transformedPemIssuerChain, err := expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(original["pem_issuer_chain"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPemIssuerChain); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPemIssuerChain); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pemIssuerChain"] = transformedPemIssuerChain } return transformed, nil } -func expandPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthoritySubordinateConfigCertificateAuthority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1880,22 +1921,22 @@ func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChain(v interf transformedPemCertificates, err := expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(original["pem_certificates"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPemCertificates); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPemCertificates); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pemCertificates"] = transformedPemCertificates } return transformed, nil } -func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthoritySubordinateConfigPemIssuerChainPemCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityGcsBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPrivatecaCertificateAuthorityGcsBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPrivatecaCertificateAuthorityLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandPrivatecaCertificateAuthorityLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority_sweeper.go new file mode 100644 index 0000000000..3e64387df6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_authority_sweeper.go @@ -0,0 +1,152 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package privateca + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CertificateAuthority", testSweepCertificateAuthority) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCertificateAuthority(region string) error { + resourceName := "CertificateAuthority" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "location": region, + }, + } + + caPoolsUrl, err := tpgresource.ReplaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools") + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: caPoolsUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", caPoolsUrl, err) + return nil + } + + resourceList, ok := res["caPools"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + + poolName := obj["name"].(string) + + caListUrl := config.PrivatecaBasePath + poolName + "/certificateAuthorities" + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: caListUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", caPoolsUrl, err) + return nil + } + + caResourceList, ok := res["certificateAuthorities"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in certificate authority list response.") + continue + } + + carl := caResourceList.([]interface{}) + for _, cai := range carl { + obj := cai.(map[string]interface{}) + caName := obj["name"].(string) + + // Increment count and skip if resource is not sweepable. + nameParts := strings.Split(caName, "/") + id := nameParts[len(nameParts)-1] + if !sweeper.IsSweepableTestResource(id) { + nonPrefixCount++ + continue + } + + if obj["state"] == "DELETED" { + continue + } + + if obj["state"] == "ENABLED" { + disableUrl := fmt.Sprintf("%s%s:disable", config.PrivatecaBasePath, caName) + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: config.Project, + RawURL: disableUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error disabling for url %s : %s", disableUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Disabling %s resource: %s", resourceName, caName) + } + } + + deleteUrl := config.PrivatecaBasePath + caName + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Deleted a %s resource: %s", resourceName, caName) + } + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf-test prefix remain.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate_template.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_template.go similarity index 93% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate_template.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_template.go index 7fb2fb6c1c..72e7f71435 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_privateca_certificate_template.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package privateca import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" privateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourcePrivatecaCertificateTemplate() *schema.Resource { @@ -101,7 +108,7 @@ func ResourcePrivatecaCertificateTemplate() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -467,8 +474,8 @@ func PrivatecaCertificateTemplatePredefinedValuesPolicyIdsSchema() *schema.Resou } func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -478,7 +485,7 @@ func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta int Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), Project: dcl.String(project), @@ -489,18 +496,18 @@ func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta int return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -522,8 +529,8 @@ func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta int } func resourcePrivatecaCertificateTemplateRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -533,23 +540,23 @@ func resourcePrivatecaCertificateTemplateRead(d *schema.ResourceData, meta inter Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), Project: dcl.String(project), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -558,7 +565,7 @@ func resourcePrivatecaCertificateTemplateRead(d *schema.ResourceData, meta inter res, err := client.GetCertificateTemplate(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("PrivatecaCertificateTemplate %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("location", res.Location); err != nil { @@ -595,8 +602,8 @@ func resourcePrivatecaCertificateTemplateRead(d *schema.ResourceData, meta inter return nil } func resourcePrivatecaCertificateTemplateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -606,24 +613,24 @@ func resourcePrivatecaCertificateTemplateUpdate(d *schema.ResourceData, meta int Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), Project: dcl.String(project), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -645,8 +652,8 @@ func resourcePrivatecaCertificateTemplateUpdate(d *schema.ResourceData, meta int } func resourcePrivatecaCertificateTemplateDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -656,24 +663,24 @@ func resourcePrivatecaCertificateTemplateDelete(d *schema.ResourceData, meta int Name: dcl.String(d.Get("name").(string)), Description: dcl.String(d.Get("description").(string)), IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), Project: dcl.String(project), } log.Printf("[DEBUG] Deleting CertificateTemplate %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -688,9 +695,9 @@ func resourcePrivatecaCertificateTemplateDelete(d *schema.ResourceData, meta int } func resourcePrivatecaCertificateTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/certificateTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -699,7 +706,7 @@ func resourcePrivatecaCertificateTemplateImport(d *schema.ResourceData, meta int } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -823,7 +830,7 @@ func expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions obj := o.(map[string]interface{}) return &privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions{ - ObjectIdPath: expandIntegerArray(obj["object_id_path"]), + ObjectIdPath: tpgdclresource.ExpandIntegerArray(obj["object_id_path"]), } } @@ -864,7 +871,7 @@ func expandPrivatecaCertificateTemplatePredefinedValues(o interface{}) *privatec obj := objArr[0].(map[string]interface{}) return &privateca.CertificateTemplatePredefinedValues{ AdditionalExtensions: expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(obj["additional_extensions"]), - AiaOcspServers: expandStringArray(obj["aia_ocsp_servers"]), + AiaOcspServers: tpgdclresource.ExpandStringArray(obj["aia_ocsp_servers"]), CaOptions: expandPrivatecaCertificateTemplatePredefinedValuesCaOptions(obj["ca_options"]), KeyUsage: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsage(obj["key_usage"]), PolicyIds: expandPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(obj["policy_ids"]), @@ -956,7 +963,7 @@ func expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjec } obj := objArr[0].(map[string]interface{}) return &privateca.CertificateTemplatePredefinedValuesAdditionalExtensionsObjectId{ - ObjectIdPath: expandIntegerArray(obj["object_id_path"]), + ObjectIdPath: tpgdclresource.ExpandIntegerArray(obj["object_id_path"]), } } @@ -1133,7 +1140,7 @@ func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKe obj := o.(map[string]interface{}) return &privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages{ - ObjectIdPath: expandIntegerArray(obj["object_id_path"]), + ObjectIdPath: tpgdclresource.ExpandIntegerArray(obj["object_id_path"]), } } @@ -1188,7 +1195,7 @@ func expandPrivatecaCertificateTemplatePredefinedValuesPolicyIds(o interface{}) obj := o.(map[string]interface{}) return &privateca.CertificateTemplatePredefinedValuesPolicyIds{ - ObjectIdPath: expandIntegerArray(obj["object_id_path"]), + ObjectIdPath: tpgdclresource.ExpandIntegerArray(obj["object_id_path"]), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_template_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_template_sweeper.go new file mode 100644 index 0000000000..7760e82f69 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/privateca/resource_privateca_certificate_template_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package privateca + +import ( + "context" + "log" + "testing" + + privateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PrivatecaCertificateTemplate", testSweepPrivatecaCertificateTemplate) +} + +func testSweepPrivatecaCertificateTemplate(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for PrivatecaCertificateTemplate") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLPrivatecaClient(config, config.UserAgent, "", 0) + err = client.DeleteAllCertificateTemplate(context.Background(), d["project"], d["location"], isDeletablePrivatecaCertificateTemplate) + if err != nil { + return err + } + return nil +} + +func isDeletablePrivatecaCertificateTemplate(r *privateca.CertificateTemplate) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/publicca/resource_public_ca_external_account_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/publicca/resource_public_ca_external_account_key.go new file mode 100644 index 0000000000..e67897fa13 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/publicca/resource_public_ca_external_account_key.go @@ -0,0 +1,165 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package publicca + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourcePublicCAExternalAccountKey() *schema.Resource { + return &schema.Resource{ + Create: resourcePublicCAExternalAccountKeyCreate, + Read: resourcePublicCAExternalAccountKeyRead, + Delete: resourcePublicCAExternalAccountKeyDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Location for the externalAccountKey. Currently only 'global' is supported.`, + Default: "global", + }, + "b64_mac_key": { + Type: schema.TypeString, + Computed: true, + Description: `Base64-URL-encoded HS256 key. It is generated by the PublicCertificateAuthorityService +when the ExternalAccountKey is created.`, + Sensitive: true, + }, + "key_id": { + Type: schema.TypeString, + Computed: true, + Description: `It is generated by the PublicCertificateAuthorityService when the ExternalAccountKey is created.`, + Sensitive: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Resource name. projects/{project}/locations/{location}/externalAccountKeys/{keyId}.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourcePublicCAExternalAccountKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + + url, err := tpgresource.ReplaceVars(d, config, "{{PublicCABasePath}}projects/{{project}}/locations/{{location}}/externalAccountKeys") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ExternalAccountKey: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ExternalAccountKey: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ExternalAccountKey: %s", err) + } + if err := d.Set("name", flattenPublicCAExternalAccountKeyName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + if err := d.Set("key_id", flattenPublicCAExternalAccountKeyKeyId(res["keyId"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "key_id": %s`, err) + } + if err := d.Set("b64_mac_key", flattenPublicCAExternalAccountKeyB64MacKey(res["b64MacKey"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "b64_mac_key": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ExternalAccountKey %q: %#v", d.Id(), res) + + return resourcePublicCAExternalAccountKeyRead(d, meta) +} + +func resourcePublicCAExternalAccountKeyRead(d *schema.ResourceData, meta interface{}) error { + // This resource could not be read from the API. + return nil +} + +func resourcePublicCAExternalAccountKeyDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] PublicCA ExternalAccountKey resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func flattenPublicCAExternalAccountKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPublicCAExternalAccountKeyKeyId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPublicCAExternalAccountKeyB64MacKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/data_source_pubsub_subscription.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/data_source_pubsub_subscription.go new file mode 100644 index 0000000000..6896baa224 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/data_source_pubsub_subscription.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package pubsub + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGooglePubsubSubscription() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourcePubsubSubscription().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGooglePubsubSubscriptionRead, + Schema: dsSchema, + } +} + +func dataSourceGooglePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourcePubsubSubscriptionRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/data_source_pubsub_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/data_source_pubsub_topic.go new file mode 100644 index 0000000000..7cbd0de400 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/data_source_pubsub_topic.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package pubsub + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGooglePubsubTopic() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourcePubsubTopic().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGooglePubsubTopicRead, + Schema: dsSchema, + } +} + +func dataSourceGooglePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/topics/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourcePubsubTopicRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/iam_pubsub_subscription.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/iam_pubsub_subscription.go new file mode 100644 index 0000000000..cbc96b7b67 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/iam_pubsub_subscription.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package pubsub + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/pubsub/v1" +) + +var IamPubsubSubscriptionSchema = map[string]*schema.Schema{ + "subscription": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type PubsubSubscriptionIamUpdater struct { + subscription string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewPubsubSubscriptionIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + subscription := GetComputedSubscriptionName(project, d.Get("subscription").(string)) + + return &PubsubSubscriptionIamUpdater{ + subscription: subscription, + d: d, + Config: config, + }, nil +} + +func PubsubSubscriptionIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + if err := d.Set("subscription", d.Id()); err != nil { + return fmt.Errorf("Error setting subscription: %s", err) + } + return nil +} + +func (u *PubsubSubscriptionIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewPubsubClient(userAgent).Projects.Subscriptions.GetIamPolicy(u.subscription).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + v1Policy, err := pubsubToResourceManagerPolicy(p) + if err != nil { + return nil, err + } + + return v1Policy, nil +} + +func (u *PubsubSubscriptionIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + pubsubPolicy, err := resourceManagerToPubsubPolicy(policy) + if err != nil { + return err + } + + _, err = u.Config.NewPubsubClient(userAgent).Projects.Subscriptions.SetIamPolicy(u.subscription, &pubsub.SetIamPolicyRequest{ + Policy: pubsubPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *PubsubSubscriptionIamUpdater) GetResourceId() string { + return u.subscription +} + +func (u *PubsubSubscriptionIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-pubsub-subscription-%s", u.subscription) +} + +func (u *PubsubSubscriptionIamUpdater) DescribeResource() string { + return fmt.Sprintf("pubsub subscription %q", u.subscription) +} + +// v1 and v2 policy are identical +func resourceManagerToPubsubPolicy(in *cloudresourcemanager.Policy) (*pubsub.Policy, error) { + out := &pubsub.Policy{} + err := tpgresource.Convert(in, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a v1 policy to a pubsub policy: {{err}}", err) + } + return out, nil +} + +func pubsubToResourceManagerPolicy(in *pubsub.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(in, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a pubsub policy to a v1 policy: {{err}}", err) + } + return out, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/iam_pubsub_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/iam_pubsub_topic.go new file mode 100644 index 0000000000..2e50863c46 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/iam_pubsub_topic.go @@ -0,0 +1,223 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsub + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var PubsubTopicIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "topic": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type PubsubTopicIamUpdater struct { + project string + topic string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func PubsubTopicIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("topic"); ok { + values["topic"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/topics/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("topic").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &PubsubTopicIamUpdater{ + project: values["project"], + topic: values["topic"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("topic", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting topic: %s", err) + } + + return u, nil +} + +func PubsubTopicIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/topics/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &PubsubTopicIamUpdater{ + project: values["project"], + topic: values["topic"], + d: d, + Config: config, + } + if err := d.Set("topic", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting topic: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *PubsubTopicIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTopicUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.PubsubTopicProjectNotReady}, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *PubsubTopicIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTopicUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.PubsubTopicProjectNotReady}, + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *PubsubTopicIamUpdater) qualifyTopicUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{PubsubBasePath}}%s:%s", fmt.Sprintf("projects/%s/topics/%s", u.project, u.topic), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *PubsubTopicIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/topics/%s", u.project, u.topic) +} + +func (u *PubsubTopicIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-pubsub-topic-%s", u.GetResourceId()) +} + +func (u *PubsubTopicIamUpdater) DescribeResource() string { + return fmt.Sprintf("pubsub topic %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/pubsub_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/pubsub_utils.go new file mode 100644 index 0000000000..53088790e4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/pubsub_utils.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package pubsub + +import ( + "fmt" + "regexp" +) + +const PubsubTopicRegex = "projects\\/.*\\/topics\\/.*" + +func GetComputedSubscriptionName(project, subscription string) string { + match, _ := regexp.MatchString("projects\\/.*\\/subscriptions\\/.*", subscription) + if match { + return subscription + } + return fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) +} + +func GetComputedTopicName(project, topic string) string { + match, _ := regexp.MatchString(PubsubTopicRegex, topic) + if match { + return topic + } + return fmt.Sprintf("projects/%s/topics/%s", project, topic) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_schema.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_schema.go new file mode 100644 index 0000000000..49340a895c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_schema.go @@ -0,0 +1,422 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsub + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourcePubsubSchema() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubSchemaCreate, + Read: resourcePubsubSchemaRead, + Update: resourcePubsubSchemaUpdate, + Delete: resourcePubsubSchemaDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePubsubSchemaImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The ID to use for the schema, which will become the final component of the schema's resource name.`, + }, + "definition": { + Type: schema.TypeString, + Optional: true, + Description: `The definition of the schema. +This should contain a string representing the full definition of the schema +that is a valid schema definition of the type specified in type.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TYPE_UNSPECIFIED", "PROTOCOL_BUFFER", "AVRO", ""}), + Description: `The type of the schema definition Default value: "TYPE_UNSPECIFIED" Possible values: ["TYPE_UNSPECIFIED", "PROTOCOL_BUFFER", "AVRO"]`, + Default: "TYPE_UNSPECIFIED", + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourcePubsubSchemaCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + typeProp, err := expandPubsubSchemaType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + definitionProp, err := expandPubsubSchemaDefinition(d.Get("definition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("definition"); !tpgresource.IsEmptyValue(reflect.ValueOf(definitionProp)) && (ok || !reflect.DeepEqual(v, definitionProp)) { + obj["definition"] = definitionProp + } + nameProp, err := expandPubsubSchemaName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas?schemaId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Schema: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Schema: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Schema: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/schemas/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Schema %q: %#v", d.Id(), res) + + return resourcePubsubSchemaRead(d, meta) +} + +func resourcePubsubSchemaPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for Schema: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + return res, nil + } +} + +func resourcePubsubSchemaRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Schema: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PubsubSchema %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Schema: %s", err) + } + + if err := d.Set("type", flattenPubsubSchemaType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Schema: %s", err) + } + if err := d.Set("definition", flattenPubsubSchemaDefinition(res["definition"], d, config)); err != nil { + return fmt.Errorf("Error reading Schema: %s", err) + } + if err := d.Set("name", flattenPubsubSchemaName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Schema: %s", err) + } + + return nil +} + +func resourcePubsubSchemaUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Schema: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + typeProp, err := expandPubsubSchemaType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + definitionProp, err := expandPubsubSchemaDefinition(d.Get("definition"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("definition"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, definitionProp)) { + obj["definition"] = definitionProp + } + nameProp, err := expandPubsubSchemaName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + obj, err = resourcePubsubSchemaUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}:commit") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Schema %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Schema %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Schema %q: %#v", d.Id(), res) + } + + return resourcePubsubSchemaRead(d, meta) +} + +func resourcePubsubSchemaDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Schema: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Schema %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Schema") + } + + err = transport_tpg.PollingWaitTime(resourcePubsubSchemaPollRead(d, meta), transport_tpg.PollCheckForAbsence, "Deleting Schema", d.Timeout(schema.TimeoutCreate), 10) + if err != nil { + return fmt.Errorf("Error waiting to delete Schema: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Schema %q: %#v", d.Id(), res) + return nil +} + +func resourcePubsubSchemaImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/schemas/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/schemas/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenPubsubSchemaType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubSchemaDefinition(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubSchemaName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func expandPubsubSchemaType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubSchemaDefinition(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubSchemaName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} + +func resourcePubsubSchemaUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["name"] = d.Id() + obj["name"] = d.Id() + newObj["schema"] = obj + return newObj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_schema_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_schema_sweeper.go new file mode 100644 index 0000000000..9b59797a4d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_schema_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsub + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PubsubSchema", testSweepPubsubSchema) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepPubsubSchema(region string) error { + resourceName := "PubsubSchema" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://pubsub.googleapis.com/v1/projects/{{project}}/schemas", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["schemas"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://pubsub.googleapis.com/v1/projects/{{project}}/schemas/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_subscription.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_subscription.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription.go index 5bc79a9f4a..138eaedbd9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_pubsub_subscription.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package pubsub import ( "fmt" @@ -23,6 +26,9 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func comparePubsubSubscriptionExpirationPolicy(_, old, new string, _ *schema.ResourceData) bool { @@ -65,7 +71,7 @@ func ResourcePubsubSubscription() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `A reference to a Topic resource.`, }, "ack_deadline_seconds": { @@ -147,7 +153,7 @@ permission to Acknowledge() messages on this subscription.`, Format is 'projects/{project}/topics/{topic}'. The Cloud Pub/Sub service account associated with the enclosing subscription's -parent project (i.e., +parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have permission to Publish() to this topic. @@ -161,7 +167,7 @@ since messages published to a topic with no subscriptions are lost.`, Description: `The maximum number of delivery attempts for any message. The value must be between 5 and 100. -The number of delivery attempts is defined as 1 + (the sum of number of +The number of delivery attempts is defined as 1 + (the sum of number of NACKs and number of times the acknowledgement deadline has been exceeded for the message). A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that @@ -215,7 +221,7 @@ is 1 day.`, DiffSuppressFunc: comparePubsubSubscriptionExpirationPolicy, Description: `Specifies the "time-to-live" duration for an associated resource. The resource expires if it is not active for a period of ttl. -If ttl is not set, the associated resource never expires. +If ttl is set to "", the associated resource never expires. A duration in seconds with up to nine fractional digits, terminated by 's'. Example - "3.5s".`, }, @@ -226,9 +232,9 @@ Example - "3.5s".`, Type: schema.TypeString, Optional: true, ForceNew: true, - Description: `The subscription only delivers the messages that match the filter. + Description: `The subscription only delivers the messages that match the filter. Pub/Sub automatically acknowledges the messages that don't match the filter. You can filter messages -by their attributes. The maximum length of a filter is 256 bytes. After creating the subscription, +by their attributes. The maximum length of a filter is 256 bytes. After creating the subscription, you can't modify the filter.`, }, "labels": { @@ -270,7 +276,7 @@ For example, a Webhook endpoint might use "attributes": { Type: schema.TypeMap, Optional: true, - DiffSuppressFunc: ignoreMissingKeyInMap("x-goog-version"), + DiffSuppressFunc: tpgresource.IgnoreMissingKeyInMap("x-goog-version"), Description: `Endpoint configuration attributes. Every endpoint has a set of API supported attributes that can @@ -342,7 +348,7 @@ messageRetentionDuration window.`, Optional: true, Description: `A policy that specifies how Pub/Sub retries message delivery for this subscription. -If not set, the default retry policy is applied. This generally implies that messages will be retried as soon as possible for healthy subscribers. +If not set, the default retry policy is applied. This generally implies that messages will be retried as soon as possible for healthy subscribers. RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message`, MaxItems: 1, Elem: &schema.Resource{ @@ -351,15 +357,15 @@ RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded even Type: schema.TypeString, Computed: true, Optional: true, - DiffSuppressFunc: durationDiffSuppress, - Description: `The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. + DiffSuppressFunc: tpgresource.DurationDiffSuppress, + Description: `The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, }, "minimum_backoff": { Type: schema.TypeString, Computed: true, Optional: true, - DiffSuppressFunc: durationDiffSuppress, + DiffSuppressFunc: tpgresource.DurationDiffSuppress, Description: `The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, }, @@ -378,8 +384,8 @@ A duration in seconds with up to nine fractional digits, terminated by 's'. Exam } func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -388,49 +394,49 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) nameProp, err := expandPubsubSubscriptionName(d.Get("name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { obj["name"] = nameProp } topicProp, err := expandPubsubSubscriptionTopic(d.Get("topic"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("topic"); !isEmptyValue(reflect.ValueOf(topicProp)) && (ok || !reflect.DeepEqual(v, topicProp)) { + } else if v, ok := d.GetOkExists("topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(topicProp)) && (ok || !reflect.DeepEqual(v, topicProp)) { obj["topic"] = topicProp } labelsProp, err := expandPubsubSubscriptionLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } bigqueryConfigProp, err := expandPubsubSubscriptionBigqueryConfig(d.Get("bigquery_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("bigquery_config"); !isEmptyValue(reflect.ValueOf(bigqueryConfigProp)) && (ok || !reflect.DeepEqual(v, bigqueryConfigProp)) { + } else if v, ok := d.GetOkExists("bigquery_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(bigqueryConfigProp)) && (ok || !reflect.DeepEqual(v, bigqueryConfigProp)) { obj["bigqueryConfig"] = bigqueryConfigProp } pushConfigProp, err := expandPubsubSubscriptionPushConfig(d.Get("push_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("push_config"); !isEmptyValue(reflect.ValueOf(pushConfigProp)) && (ok || !reflect.DeepEqual(v, pushConfigProp)) { + } else if v, ok := d.GetOkExists("push_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(pushConfigProp)) && (ok || !reflect.DeepEqual(v, pushConfigProp)) { obj["pushConfig"] = pushConfigProp } ackDeadlineSecondsProp, err := expandPubsubSubscriptionAckDeadlineSeconds(d.Get("ack_deadline_seconds"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("ack_deadline_seconds"); !isEmptyValue(reflect.ValueOf(ackDeadlineSecondsProp)) && (ok || !reflect.DeepEqual(v, ackDeadlineSecondsProp)) { + } else if v, ok := d.GetOkExists("ack_deadline_seconds"); !tpgresource.IsEmptyValue(reflect.ValueOf(ackDeadlineSecondsProp)) && (ok || !reflect.DeepEqual(v, ackDeadlineSecondsProp)) { obj["ackDeadlineSeconds"] = ackDeadlineSecondsProp } messageRetentionDurationProp, err := expandPubsubSubscriptionMessageRetentionDuration(d.Get("message_retention_duration"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("message_retention_duration"); !isEmptyValue(reflect.ValueOf(messageRetentionDurationProp)) && (ok || !reflect.DeepEqual(v, messageRetentionDurationProp)) { + } else if v, ok := d.GetOkExists("message_retention_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(messageRetentionDurationProp)) && (ok || !reflect.DeepEqual(v, messageRetentionDurationProp)) { obj["messageRetentionDuration"] = messageRetentionDurationProp } retainAckedMessagesProp, err := expandPubsubSubscriptionRetainAckedMessages(d.Get("retain_acked_messages"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("retain_acked_messages"); !isEmptyValue(reflect.ValueOf(retainAckedMessagesProp)) && (ok || !reflect.DeepEqual(v, retainAckedMessagesProp)) { + } else if v, ok := d.GetOkExists("retain_acked_messages"); !tpgresource.IsEmptyValue(reflect.ValueOf(retainAckedMessagesProp)) && (ok || !reflect.DeepEqual(v, retainAckedMessagesProp)) { obj["retainAckedMessages"] = retainAckedMessagesProp } expirationPolicyProp, err := expandPubsubSubscriptionExpirationPolicy(d.Get("expiration_policy"), d, config) @@ -442,7 +448,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) filterProp, err := expandPubsubSubscriptionFilter(d.Get("filter"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { obj["filter"] = filterProp } deadLetterPolicyProp, err := expandPubsubSubscriptionDeadLetterPolicy(d.Get("dead_letter_policy"), d, config) @@ -454,19 +460,19 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) retryPolicyProp, err := expandPubsubSubscriptionRetryPolicy(d.Get("retry_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("retry_policy"); !isEmptyValue(reflect.ValueOf(retryPolicyProp)) && (ok || !reflect.DeepEqual(v, retryPolicyProp)) { + } else if v, ok := d.GetOkExists("retry_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(retryPolicyProp)) && (ok || !reflect.DeepEqual(v, retryPolicyProp)) { obj["retryPolicy"] = retryPolicyProp } enableMessageOrderingProp, err := expandPubsubSubscriptionEnableMessageOrdering(d.Get("enable_message_ordering"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("enable_message_ordering"); !isEmptyValue(reflect.ValueOf(enableMessageOrderingProp)) && (ok || !reflect.DeepEqual(v, enableMessageOrderingProp)) { + } else if v, ok := d.GetOkExists("enable_message_ordering"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableMessageOrderingProp)) && (ok || !reflect.DeepEqual(v, enableMessageOrderingProp)) { obj["enableMessageOrdering"] = enableMessageOrderingProp } enableExactlyOnceDeliveryProp, err := expandPubsubSubscriptionEnableExactlyOnceDelivery(d.Get("enable_exactly_once_delivery"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("enable_exactly_once_delivery"); !isEmptyValue(reflect.ValueOf(enableExactlyOnceDeliveryProp)) && (ok || !reflect.DeepEqual(v, enableExactlyOnceDeliveryProp)) { + } else if v, ok := d.GetOkExists("enable_exactly_once_delivery"); !tpgresource.IsEmptyValue(reflect.ValueOf(enableExactlyOnceDeliveryProp)) && (ok || !reflect.DeepEqual(v, enableExactlyOnceDeliveryProp)) { obj["enableExactlyOnceDelivery"] = enableExactlyOnceDeliveryProp } @@ -475,7 +481,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) return err } - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") if err != nil { return err } @@ -483,30 +489,38 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Creating new Subscription: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Subscription: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Subscription: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } d.SetId(id) - err = PollingWaitTime(resourcePubsubSubscriptionPollRead(d, meta), PollCheckForExistence, "Creating Subscription", d.Timeout(schema.TimeoutCreate), 1) + err = transport_tpg.PollingWaitTime(resourcePubsubSubscriptionPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating Subscription", d.Timeout(schema.TimeoutCreate), 1) if err != nil { log.Printf("[ERROR] Unable to confirm eventually consistent Subscription %q finished updating: %q", d.Id(), err) } @@ -516,34 +530,40 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) return resourcePubsubSubscriptionRead(d, meta) } -func resourcePubsubSubscriptionPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { +func resourcePubsubSubscriptionPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { return func() (map[string]interface{}, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") if err != nil { return nil, err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return nil, fmt.Errorf("Error fetching project for Subscription: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return res, err } @@ -552,33 +572,39 @@ func resourcePubsubSubscriptionPollRead(d *schema.ResourceData, meta interface{} } func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Subscription: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("PubsubSubscription %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PubsubSubscription %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -632,15 +658,15 @@ func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) er } func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Subscription: %s", err) } @@ -650,37 +676,37 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) labelsProp, err := expandPubsubSubscriptionLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } bigqueryConfigProp, err := expandPubsubSubscriptionBigqueryConfig(d.Get("bigquery_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("bigquery_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bigqueryConfigProp)) { + } else if v, ok := d.GetOkExists("bigquery_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bigqueryConfigProp)) { obj["bigqueryConfig"] = bigqueryConfigProp } pushConfigProp, err := expandPubsubSubscriptionPushConfig(d.Get("push_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("push_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pushConfigProp)) { + } else if v, ok := d.GetOkExists("push_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pushConfigProp)) { obj["pushConfig"] = pushConfigProp } ackDeadlineSecondsProp, err := expandPubsubSubscriptionAckDeadlineSeconds(d.Get("ack_deadline_seconds"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("ack_deadline_seconds"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ackDeadlineSecondsProp)) { + } else if v, ok := d.GetOkExists("ack_deadline_seconds"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ackDeadlineSecondsProp)) { obj["ackDeadlineSeconds"] = ackDeadlineSecondsProp } messageRetentionDurationProp, err := expandPubsubSubscriptionMessageRetentionDuration(d.Get("message_retention_duration"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("message_retention_duration"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, messageRetentionDurationProp)) { + } else if v, ok := d.GetOkExists("message_retention_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, messageRetentionDurationProp)) { obj["messageRetentionDuration"] = messageRetentionDurationProp } retainAckedMessagesProp, err := expandPubsubSubscriptionRetainAckedMessages(d.Get("retain_acked_messages"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("retain_acked_messages"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retainAckedMessagesProp)) { + } else if v, ok := d.GetOkExists("retain_acked_messages"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retainAckedMessagesProp)) { obj["retainAckedMessages"] = retainAckedMessagesProp } expirationPolicyProp, err := expandPubsubSubscriptionExpirationPolicy(d.Get("expiration_policy"), d, config) @@ -698,13 +724,13 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) retryPolicyProp, err := expandPubsubSubscriptionRetryPolicy(d.Get("retry_policy"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("retry_policy"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryPolicyProp)) { + } else if v, ok := d.GetOkExists("retry_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retryPolicyProp)) { obj["retryPolicy"] = retryPolicyProp } enableExactlyOnceDeliveryProp, err := expandPubsubSubscriptionEnableExactlyOnceDelivery(d.Get("enable_exactly_once_delivery"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("enable_exactly_once_delivery"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableExactlyOnceDeliveryProp)) { + } else if v, ok := d.GetOkExists("enable_exactly_once_delivery"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableExactlyOnceDeliveryProp)) { obj["enableExactlyOnceDelivery"] = enableExactlyOnceDeliveryProp } @@ -713,7 +739,7 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) return err } - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") if err != nil { return err } @@ -760,19 +786,27 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("enable_exactly_once_delivery") { updateMask = append(updateMask, "enableExactlyOnceDelivery") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Subscription %q: %s", d.Id(), err) @@ -784,21 +818,21 @@ func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) } func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Subscription: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") if err != nil { return err } @@ -807,13 +841,21 @@ func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Deleting Subscription %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Subscription") + return transport_tpg.HandleNotFoundError(err, d, "Subscription") } log.Printf("[DEBUG] Finished deleting Subscription %q: %#v", d.Id(), res) @@ -821,8 +863,8 @@ func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) } func resourcePubsubSubscriptionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/subscriptions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -831,7 +873,7 @@ func resourcePubsubSubscriptionImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -840,25 +882,25 @@ func resourcePubsubSubscriptionImport(d *schema.ResourceData, meta interface{}) return []*schema.ResourceData{d}, nil } -func flattenPubsubSubscriptionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return NameFromSelfLinkStateFunc(v) + return tpgresource.NameFromSelfLinkStateFunc(v) } -func flattenPubsubSubscriptionTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } - return ConvertSelfLinkToV1(v.(string)) + return tpgresource.ConvertSelfLinkToV1(v.(string)) } -func flattenPubsubSubscriptionLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionBigqueryConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionBigqueryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -877,23 +919,23 @@ func flattenPubsubSubscriptionBigqueryConfig(v interface{}, d *schema.ResourceDa flattenPubsubSubscriptionBigqueryConfigDropUnknownFields(original["dropUnknownFields"], d, config) return []interface{}{transformed} } -func flattenPubsubSubscriptionBigqueryConfigTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionBigqueryConfigTable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionBigqueryConfigUseTopicSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionBigqueryConfigUseTopicSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionBigqueryConfigWriteMetadata(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionBigqueryConfigWriteMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionBigqueryConfigDropUnknownFields(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionBigqueryConfigDropUnknownFields(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionPushConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionPushConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -910,7 +952,7 @@ func flattenPubsubSubscriptionPushConfig(v interface{}, d *schema.ResourceData, flattenPubsubSubscriptionPushConfigAttributes(original["attributes"], d, config) return []interface{}{transformed} } -func flattenPubsubSubscriptionPushConfigOidcToken(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionPushConfigOidcToken(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -925,26 +967,26 @@ func flattenPubsubSubscriptionPushConfigOidcToken(v interface{}, d *schema.Resou flattenPubsubSubscriptionPushConfigOidcTokenAudience(original["audience"], d, config) return []interface{}{transformed} } -func flattenPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionPushConfigOidcTokenAudience(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionPushConfigOidcTokenAudience(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionPushConfigPushEndpoint(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionPushConfigPushEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionPushConfigAttributes(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionPushConfigAttributes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionAckDeadlineSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionAckDeadlineSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -958,15 +1000,15 @@ func flattenPubsubSubscriptionAckDeadlineSeconds(v interface{}, d *schema.Resour return v // let terraform core handle it otherwise } -func flattenPubsubSubscriptionMessageRetentionDuration(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionMessageRetentionDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionRetainAckedMessages(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionRetainAckedMessages(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionExpirationPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionExpirationPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -976,15 +1018,15 @@ func flattenPubsubSubscriptionExpirationPolicy(v interface{}, d *schema.Resource flattenPubsubSubscriptionExpirationPolicyTtl(original["ttl"], d, config) return []interface{}{transformed} } -func flattenPubsubSubscriptionExpirationPolicyTtl(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionExpirationPolicyTtl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionFilter(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionDeadLetterPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionDeadLetterPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -999,14 +1041,14 @@ func flattenPubsubSubscriptionDeadLetterPolicy(v interface{}, d *schema.Resource flattenPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(original["maxDeliveryAttempts"], d, config) return []interface{}{transformed} } -func flattenPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -1020,7 +1062,7 @@ func flattenPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(v interface{}, return v // let terraform core handle it otherwise } -func flattenPubsubSubscriptionRetryPolicy(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionRetryPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -1035,28 +1077,28 @@ func flattenPubsubSubscriptionRetryPolicy(v interface{}, d *schema.ResourceData, flattenPubsubSubscriptionRetryPolicyMaximumBackoff(original["maximumBackoff"], d, config) return []interface{}{transformed} } -func flattenPubsubSubscriptionRetryPolicyMinimumBackoff(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionRetryPolicyMinimumBackoff(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionRetryPolicyMaximumBackoff(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionRetryPolicyMaximumBackoff(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionEnableMessageOrdering(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionEnableMessageOrdering(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenPubsubSubscriptionEnableExactlyOnceDelivery(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenPubsubSubscriptionEnableExactlyOnceDelivery(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandPubsubSubscriptionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") +func expandPubsubSubscriptionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") } -func expandPubsubSubscriptionTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) +func expandPubsubSubscriptionTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + project, err := tpgresource.GetProject(d, config) if err != nil { return "", err } @@ -1077,7 +1119,7 @@ func expandPubsubSubscriptionTopic(v interface{}, d TerraformResourceData, confi } } -func expandPubsubSubscriptionLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandPubsubSubscriptionLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1088,7 +1130,7 @@ func expandPubsubSubscriptionLabels(v interface{}, d TerraformResourceData, conf return m, nil } -func expandPubsubSubscriptionBigqueryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionBigqueryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1100,51 +1142,51 @@ func expandPubsubSubscriptionBigqueryConfig(v interface{}, d TerraformResourceDa transformedTable, err := expandPubsubSubscriptionBigqueryConfigTable(original["table"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["table"] = transformedTable } transformedUseTopicSchema, err := expandPubsubSubscriptionBigqueryConfigUseTopicSchema(original["use_topic_schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedUseTopicSchema); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedUseTopicSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["useTopicSchema"] = transformedUseTopicSchema } transformedWriteMetadata, err := expandPubsubSubscriptionBigqueryConfigWriteMetadata(original["write_metadata"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedWriteMetadata); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedWriteMetadata); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["writeMetadata"] = transformedWriteMetadata } transformedDropUnknownFields, err := expandPubsubSubscriptionBigqueryConfigDropUnknownFields(original["drop_unknown_fields"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDropUnknownFields); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDropUnknownFields); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["dropUnknownFields"] = transformedDropUnknownFields } return transformed, nil } -func expandPubsubSubscriptionBigqueryConfigTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionBigqueryConfigTable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionBigqueryConfigUseTopicSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionBigqueryConfigUseTopicSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionBigqueryConfigWriteMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionBigqueryConfigWriteMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionBigqueryConfigDropUnknownFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionBigqueryConfigDropUnknownFields(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionPushConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionPushConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1156,28 +1198,28 @@ func expandPubsubSubscriptionPushConfig(v interface{}, d TerraformResourceData, transformedOidcToken, err := expandPubsubSubscriptionPushConfigOidcToken(original["oidc_token"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedOidcToken); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedOidcToken); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["oidcToken"] = transformedOidcToken } transformedPushEndpoint, err := expandPubsubSubscriptionPushConfigPushEndpoint(original["push_endpoint"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedPushEndpoint); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedPushEndpoint); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["pushEndpoint"] = transformedPushEndpoint } transformedAttributes, err := expandPubsubSubscriptionPushConfigAttributes(original["attributes"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAttributes); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAttributes); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["attributes"] = transformedAttributes } return transformed, nil } -func expandPubsubSubscriptionPushConfigOidcToken(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionPushConfigOidcToken(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1189,33 +1231,33 @@ func expandPubsubSubscriptionPushConfigOidcToken(v interface{}, d TerraformResou transformedServiceAccountEmail, err := expandPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(original["service_account_email"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["serviceAccountEmail"] = transformedServiceAccountEmail } transformedAudience, err := expandPubsubSubscriptionPushConfigOidcTokenAudience(original["audience"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAudience); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAudience); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["audience"] = transformedAudience } return transformed, nil } -func expandPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionPushConfigOidcTokenAudience(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionPushConfigOidcTokenAudience(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionPushConfigPushEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionPushConfigPushEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionPushConfigAttributes(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandPubsubSubscriptionPushConfigAttributes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -1226,19 +1268,19 @@ func expandPubsubSubscriptionPushConfigAttributes(v interface{}, d TerraformReso return m, nil } -func expandPubsubSubscriptionAckDeadlineSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionAckDeadlineSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionMessageRetentionDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionMessageRetentionDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionRetainAckedMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionRetainAckedMessages(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionExpirationPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionExpirationPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 { return nil, nil @@ -1255,22 +1297,22 @@ func expandPubsubSubscriptionExpirationPolicy(v interface{}, d TerraformResource transformedTtl, err := expandPubsubSubscriptionExpirationPolicyTtl(original["ttl"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedTtl); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["ttl"] = transformedTtl } return transformed, nil } -func expandPubsubSubscriptionExpirationPolicyTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionExpirationPolicyTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionDeadLetterPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionDeadLetterPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1282,29 +1324,29 @@ func expandPubsubSubscriptionDeadLetterPolicy(v interface{}, d TerraformResource transformedDeadLetterTopic, err := expandPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(original["dead_letter_topic"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDeadLetterTopic); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDeadLetterTopic); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["deadLetterTopic"] = transformedDeadLetterTopic } transformedMaxDeliveryAttempts, err := expandPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(original["max_delivery_attempts"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaxDeliveryAttempts); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaxDeliveryAttempts); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maxDeliveryAttempts"] = transformedMaxDeliveryAttempts } return transformed, nil } -func expandPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionRetryPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1316,33 +1358,33 @@ func expandPubsubSubscriptionRetryPolicy(v interface{}, d TerraformResourceData, transformedMinimumBackoff, err := expandPubsubSubscriptionRetryPolicyMinimumBackoff(original["minimum_backoff"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMinimumBackoff); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMinimumBackoff); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["minimumBackoff"] = transformedMinimumBackoff } transformedMaximumBackoff, err := expandPubsubSubscriptionRetryPolicyMaximumBackoff(original["maximum_backoff"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMaximumBackoff); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMaximumBackoff); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["maximumBackoff"] = transformedMaximumBackoff } return transformed, nil } -func expandPubsubSubscriptionRetryPolicyMinimumBackoff(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionRetryPolicyMinimumBackoff(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionRetryPolicyMaximumBackoff(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionRetryPolicyMaximumBackoff(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionEnableMessageOrdering(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionEnableMessageOrdering(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandPubsubSubscriptionEnableExactlyOnceDelivery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandPubsubSubscriptionEnableExactlyOnceDelivery(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription_sweeper.go new file mode 100644 index 0000000000..dae1213f59 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_subscription_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsub + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PubsubSubscription", testSweepPubsubSubscription) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepPubsubSubscription(region string) error { + resourceName := "PubsubSubscription" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://pubsub.googleapis.com/v1/projects/{{project}}/subscriptions", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["subscriptions"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://pubsub.googleapis.com/v1/projects/{{project}}/subscriptions/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic.go new file mode 100644 index 0000000000..c3796fcfbb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic.go @@ -0,0 +1,678 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsub + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourcePubsubTopic() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubTopicCreate, + Read: resourcePubsubTopicRead, + Update: resourcePubsubTopicUpdate, + Delete: resourcePubsubTopicDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePubsubTopicImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the topic.`, + }, + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The resource name of the Cloud KMS CryptoKey to be used to protect access +to messages published on this topic. Your project's PubSub service account +('service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com') must have +'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. +The expected format is 'projects/*/locations/*/keyRings/*/cryptoKeys/*'`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `A set of key/value label pairs to assign to this Topic.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "message_retention_duration": { + Type: schema.TypeString, + Optional: true, + Description: `Indicates the minimum duration to retain a message after it is published +to the topic. If this field is set, messages published to the topic in +the last messageRetentionDuration are always available to subscribers. +For instance, it allows any attached subscription to seek to a timestamp +that is up to messageRetentionDuration in the past. If this field is not +set, message retention is controlled by settings on individual subscriptions. +Cannot be more than 31 days or less than 10 minutes.`, + }, + "message_storage_policy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Policy constraining the set of Google Cloud Platform regions where +messages published to the topic may be stored. If not present, then no +constraints are in effect.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_persistence_regions": { + Type: schema.TypeList, + Required: true, + Description: `A list of IDs of GCP regions where messages that are published to +the topic may be persisted in storage. Messages published by +publishers running in non-allowed GCP regions (or running outside +of GCP altogether) will be routed for storage in one of the +allowed regions. An empty list means that no regions are allowed, +and is not a valid configuration.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "schema_settings": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Settings for validating messages published against a schema.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `The name of the schema that messages published should be +validated against. Format is projects/{project}/schemas/{schema}. +The value of this field will be _deleted-schema_ +if the schema has been deleted.`, + }, + "encoding": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ENCODING_UNSPECIFIED", "JSON", "BINARY", ""}), + Description: `The encoding of messages validated against schema. Default value: "ENCODING_UNSPECIFIED" Possible values: ["ENCODING_UNSPECIFIED", "JSON", "BINARY"]`, + Default: "ENCODING_UNSPECIFIED", + }, + }, + }, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandPubsubTopicName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + kmsKeyNameProp, err := expandPubsubTopicKmsKeyName(d.Get("kms_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kms_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(kmsKeyNameProp)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { + obj["kmsKeyName"] = kmsKeyNameProp + } + labelsProp, err := expandPubsubTopicLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + messageStoragePolicyProp, err := expandPubsubTopicMessageStoragePolicy(d.Get("message_storage_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("message_storage_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(messageStoragePolicyProp)) && (ok || !reflect.DeepEqual(v, messageStoragePolicyProp)) { + obj["messageStoragePolicy"] = messageStoragePolicyProp + } + schemaSettingsProp, err := expandPubsubTopicSchemaSettings(d.Get("schema_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("schema_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(schemaSettingsProp)) && (ok || !reflect.DeepEqual(v, schemaSettingsProp)) { + obj["schemaSettings"] = schemaSettingsProp + } + messageRetentionDurationProp, err := expandPubsubTopicMessageRetentionDuration(d.Get("message_retention_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("message_retention_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(messageRetentionDurationProp)) && (ok || !reflect.DeepEqual(v, messageRetentionDurationProp)) { + obj["messageRetentionDuration"] = messageRetentionDurationProp + } + + obj, err = resourcePubsubTopicEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Topic: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.PubsubTopicProjectNotReady}, + }) + if err != nil { + return fmt.Errorf("Error creating Topic: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/topics/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = transport_tpg.PollingWaitTime(resourcePubsubTopicPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating Topic", d.Timeout(schema.TimeoutCreate), 1) + if err != nil { + log.Printf("[ERROR] Unable to confirm eventually consistent Topic %q finished updating: %q", d.Id(), err) + } + + log.Printf("[DEBUG] Finished creating Topic %q: %#v", d.Id(), res) + + return resourcePubsubTopicRead(d, meta) +} + +func resourcePubsubTopicPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.PubsubTopicProjectNotReady}, + }) + if err != nil { + return res, err + } + return res, nil + } +} + +func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.PubsubTopicProjectNotReady}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PubsubTopic %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + + if err := d.Set("name", flattenPubsubTopicName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + if err := d.Set("kms_key_name", flattenPubsubTopicKmsKeyName(res["kmsKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + if err := d.Set("labels", flattenPubsubTopicLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + if err := d.Set("message_storage_policy", flattenPubsubTopicMessageStoragePolicy(res["messageStoragePolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + if err := d.Set("schema_settings", flattenPubsubTopicSchemaSettings(res["schemaSettings"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + if err := d.Set("message_retention_duration", flattenPubsubTopicMessageRetentionDuration(res["messageRetentionDuration"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + + return nil +} + +func resourcePubsubTopicUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + kmsKeyNameProp, err := expandPubsubTopicKmsKeyName(d.Get("kms_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("kms_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, kmsKeyNameProp)) { + obj["kmsKeyName"] = kmsKeyNameProp + } + labelsProp, err := expandPubsubTopicLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + messageStoragePolicyProp, err := expandPubsubTopicMessageStoragePolicy(d.Get("message_storage_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("message_storage_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, messageStoragePolicyProp)) { + obj["messageStoragePolicy"] = messageStoragePolicyProp + } + schemaSettingsProp, err := expandPubsubTopicSchemaSettings(d.Get("schema_settings"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("schema_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, schemaSettingsProp)) { + obj["schemaSettings"] = schemaSettingsProp + } + messageRetentionDurationProp, err := expandPubsubTopicMessageRetentionDuration(d.Get("message_retention_duration"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("message_retention_duration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, messageRetentionDurationProp)) { + obj["messageRetentionDuration"] = messageRetentionDurationProp + } + + obj, err = resourcePubsubTopicUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Topic %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("kms_key_name") { + updateMask = append(updateMask, "kmsKeyName") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("message_storage_policy") { + updateMask = append(updateMask, "messageStoragePolicy") + } + + if d.HasChange("schema_settings") { + updateMask = append(updateMask, "schemaSettings") + } + + if d.HasChange("message_retention_duration") { + updateMask = append(updateMask, "messageRetentionDuration") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.PubsubTopicProjectNotReady}, + }) + + if err != nil { + return fmt.Errorf("Error updating Topic %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Topic %q: %#v", d.Id(), res) + } + + return resourcePubsubTopicRead(d, meta) +} + +func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Topic %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.PubsubTopicProjectNotReady}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Topic") + } + + log.Printf("[DEBUG] Finished deleting Topic %q: %#v", d.Id(), res) + return nil +} + +func resourcePubsubTopicImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/topics/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/topics/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenPubsubTopicName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenPubsubTopicKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicMessageStoragePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["allowed_persistence_regions"] = + flattenPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(original["allowedPersistenceRegions"], d, config) + return []interface{}{transformed} +} +func flattenPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicSchemaSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["schema"] = + flattenPubsubTopicSchemaSettingsSchema(original["schema"], d, config) + transformed["encoding"] = + flattenPubsubTopicSchemaSettingsEncoding(original["encoding"], d, config) + return []interface{}{transformed} +} +func flattenPubsubTopicSchemaSettingsSchema(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicSchemaSettingsEncoding(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubTopicMessageRetentionDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandPubsubTopicName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} + +func expandPubsubTopicKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubTopicLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandPubsubTopicMessageStoragePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowedPersistenceRegions, err := expandPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(original["allowed_persistence_regions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedPersistenceRegions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowedPersistenceRegions"] = transformedAllowedPersistenceRegions + } + + return transformed, nil +} + +func expandPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubTopicSchemaSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchema, err := expandPubsubTopicSchemaSettingsSchema(original["schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["schema"] = transformedSchema + } + + transformedEncoding, err := expandPubsubTopicSchemaSettingsEncoding(original["encoding"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encoding"] = transformedEncoding + } + + return transformed, nil +} + +func expandPubsubTopicSchemaSettingsSchema(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubTopicSchemaSettingsEncoding(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubTopicMessageRetentionDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourcePubsubTopicEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "name") + return obj, nil +} + +func resourcePubsubTopicUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + newObj := make(map[string]interface{}) + newObj["topic"] = obj + return newObj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic_sweeper.go new file mode 100644 index 0000000000..8c8b7a2a4c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsub/resource_pubsub_topic_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsub + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PubsubTopic", testSweepPubsubTopic) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepPubsubTopic(region string) error { + resourceName := "PubsubTopic" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://pubsub.googleapis.com/v1/projects/{{project}}/topics", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["topics"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://pubsub.googleapis.com/v1/projects/{{project}}/topics/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_reservation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_reservation.go new file mode 100644 index 0000000000..88a7fe953b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_reservation.go @@ -0,0 +1,337 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsublite + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourcePubsubLiteReservation() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubLiteReservationCreate, + Read: resourcePubsubLiteReservationRead, + Update: resourcePubsubLiteReservationUpdate, + Delete: resourcePubsubLiteReservationDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePubsubLiteReservationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the reservation.`, + }, + "throughput_capacity": { + Type: schema.TypeInt, + Required: true, + Description: `The reserved throughput capacity. Every unit of throughput capacity is +equivalent to 1 MiB/s of published messages or 2 MiB/s of subscribed +messages.`, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: `The region of the pubsub lite reservation.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourcePubsubLiteReservationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + throughputCapacityProp, err := expandPubsubLiteReservationThroughputCapacity(d.Get("throughput_capacity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("throughput_capacity"); !tpgresource.IsEmptyValue(reflect.ValueOf(throughputCapacityProp)) && (ok || !reflect.DeepEqual(v, throughputCapacityProp)) { + obj["throughputCapacity"] = throughputCapacityProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations?reservationId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Reservation: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Reservation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Reservation: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/reservations/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Reservation %q: %#v", d.Id(), res) + + return resourcePubsubLiteReservationRead(d, meta) +} + +func resourcePubsubLiteReservationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Reservation: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PubsubLiteReservation %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + + if err := d.Set("throughput_capacity", flattenPubsubLiteReservationThroughputCapacity(res["throughputCapacity"], d, config)); err != nil { + return fmt.Errorf("Error reading Reservation: %s", err) + } + + return nil +} + +func resourcePubsubLiteReservationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Reservation: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + throughputCapacityProp, err := expandPubsubLiteReservationThroughputCapacity(d.Get("throughput_capacity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("throughput_capacity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, throughputCapacityProp)) { + obj["throughputCapacity"] = throughputCapacityProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Reservation %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("throughput_capacity") { + updateMask = append(updateMask, "throughputCapacity") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Reservation %q: %#v", d.Id(), res) + } + + return resourcePubsubLiteReservationRead(d, meta) +} + +func resourcePubsubLiteReservationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Reservation: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Reservation %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Reservation") + } + + log.Printf("[DEBUG] Finished deleting Reservation %q: %#v", d.Id(), res) + return nil +} + +func resourcePubsubLiteReservationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/reservations/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/reservations/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenPubsubLiteReservationThroughputCapacity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func expandPubsubLiteReservationThroughputCapacity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_reservation_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_reservation_sweeper.go new file mode 100644 index 0000000000..fb02f80fdc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_reservation_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsublite + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PubsubLiteReservation", testSweepPubsubLiteReservation) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepPubsubLiteReservation(region string) error { + resourceName := "PubsubLiteReservation" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{region}}-pubsublite.googleapis.com/v1/admin/projects/{{project}}/locations/{{region}}/reservations", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["reservations"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{region}}-pubsublite.googleapis.com/v1/admin/projects/{{project}}/locations/{{region}}/reservations/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_subscription.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_subscription.go new file mode 100644 index 0000000000..1ca022bf4f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_subscription.go @@ -0,0 +1,459 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsublite + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourcePubsubLiteSubscription() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubLiteSubscriptionCreate, + Read: resourcePubsubLiteSubscriptionRead, + Update: resourcePubsubLiteSubscriptionUpdate, + Delete: resourcePubsubLiteSubscriptionDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePubsubLiteSubscriptionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the subscription.`, + }, + "topic": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `A reference to a Topic resource.`, + }, + "delivery_config": { + Type: schema.TypeList, + Optional: true, + Description: `The settings for this subscription's message delivery.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delivery_requirement": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"DELIVER_IMMEDIATELY", "DELIVER_AFTER_STORED", "DELIVERY_REQUIREMENT_UNSPECIFIED"}), + Description: `When this subscription should send messages to subscribers relative to messages persistence in storage. Possible values: ["DELIVER_IMMEDIATELY", "DELIVER_AFTER_STORED", "DELIVERY_REQUIREMENT_UNSPECIFIED"]`, + }, + }, + }, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: `The region of the pubsub lite topic.`, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Description: `The zone of the pubsub lite topic.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourcePubsubLiteSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + topicProp, err := expandPubsubLiteSubscriptionTopic(d.Get("topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(topicProp)) && (ok || !reflect.DeepEqual(v, topicProp)) { + obj["topic"] = topicProp + } + deliveryConfigProp, err := expandPubsubLiteSubscriptionDeliveryConfig(d.Get("delivery_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("delivery_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(deliveryConfigProp)) && (ok || !reflect.DeepEqual(v, deliveryConfigProp)) { + obj["deliveryConfig"] = deliveryConfigProp + } + + obj, err = resourcePubsubLiteSubscriptionEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions?subscriptionId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Subscription: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Subscription: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Subscription: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Subscription %q: %#v", d.Id(), res) + + return resourcePubsubLiteSubscriptionRead(d, meta) +} + +func resourcePubsubLiteSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Subscription: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PubsubLiteSubscription %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Subscription: %s", err) + } + + if err := d.Set("topic", flattenPubsubLiteSubscriptionTopic(res["topic"], d, config)); err != nil { + return fmt.Errorf("Error reading Subscription: %s", err) + } + if err := d.Set("delivery_config", flattenPubsubLiteSubscriptionDeliveryConfig(res["deliveryConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Subscription: %s", err) + } + + return nil +} + +func resourcePubsubLiteSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Subscription: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + deliveryConfigProp, err := expandPubsubLiteSubscriptionDeliveryConfig(d.Get("delivery_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("delivery_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, deliveryConfigProp)) { + obj["deliveryConfig"] = deliveryConfigProp + } + + obj, err = resourcePubsubLiteSubscriptionEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Subscription %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("delivery_config") { + updateMask = append(updateMask, "deliveryConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Subscription %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subscription %q: %#v", d.Id(), res) + } + + return resourcePubsubLiteSubscriptionRead(d, meta) +} + +func resourcePubsubLiteSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Subscription: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Subscription %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Subscription") + } + + log.Printf("[DEBUG] Finished deleting Subscription %q: %#v", d.Id(), res) + return nil +} + +func resourcePubsubLiteSubscriptionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/subscriptions/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenPubsubLiteSubscriptionTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenPubsubLiteSubscriptionDeliveryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["delivery_requirement"] = + flattenPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(original["deliveryRequirement"], d, config) + return []interface{}{transformed} +} +func flattenPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandPubsubLiteSubscriptionTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return "", err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return nil, err + } + + if zone == "" { + return nil, fmt.Errorf("zone must be non-empty - set in resource or at provider-level") + } + + topic := d.Get("topic").(string) + + re := regexp.MustCompile(`projects\/(.*)\/locations\/(.*)\/topics\/(.*)`) + match := re.FindStringSubmatch(topic) + if len(match) == 4 { + return topic, nil + } else { + // If no full topic given, we expand it to a full topic on the same project + fullTopic := fmt.Sprintf("projects/%s/locations/%s/topics/%s", project, zone, topic) + if err := d.Set("topic", fullTopic); err != nil { + return nil, fmt.Errorf("Error setting topic: %s", err) + } + return fullTopic, nil + } +} + +func expandPubsubLiteSubscriptionDeliveryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDeliveryRequirement, err := expandPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(original["delivery_requirement"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDeliveryRequirement); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["deliveryRequirement"] = transformedDeliveryRequirement + } + + return transformed, nil +} + +func expandPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourcePubsubLiteSubscriptionEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return nil, err + } + + if zone == "" { + return nil, fmt.Errorf("zone must be non-empty - set in resource or at provider-level") + } + + // API Endpoint requires region in the URL. We infer it from the zone. + + region := tpgresource.GetRegionFromZone(zone) + + if region == "" { + return nil, fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) + } + + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_subscription_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_subscription_sweeper.go new file mode 100644 index 0000000000..178052d964 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_subscription_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsublite + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PubsubLiteSubscription", testSweepPubsubLiteSubscription) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepPubsubLiteSubscription(region string) error { + resourceName := "PubsubLiteSubscription" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{region}}-pubsublite.googleapis.com/v1/admin/projects/{{project}}/locations/{{zone}}/subscriptions", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["subscriptions"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{region}}-pubsublite.googleapis.com/v1/admin/projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_topic.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_topic.go new file mode 100644 index 0000000000..9f458166e4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_topic.go @@ -0,0 +1,711 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsublite + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourcePubsubLiteTopic() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubLiteTopicCreate, + Read: resourcePubsubLiteTopicRead, + Update: resourcePubsubLiteTopicUpdate, + Delete: resourcePubsubLiteTopicDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePubsubLiteTopicImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the topic.`, + }, + "partition_config": { + Type: schema.TypeList, + Optional: true, + Description: `The settings for this topic's partitions.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + Description: `The number of partitions in the topic. Must be at least 1.`, + }, + "capacity": { + Type: schema.TypeList, + Optional: true, + Description: `The capacity configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "publish_mib_per_sec": { + Type: schema.TypeInt, + Required: true, + Description: `Subscribe throughput capacity per partition in MiB/s. Must be >= 4 and <= 16.`, + }, + "subscribe_mib_per_sec": { + Type: schema.TypeInt, + Required: true, + Description: `Publish throughput capacity per partition in MiB/s. Must be >= 4 and <= 16.`, + }, + }, + }, + }, + }, + }, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: `The region of the pubsub lite topic.`, + }, + "reservation_config": { + Type: schema.TypeList, + Optional: true, + Description: `The settings for this topic's Reservation usage.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "throughput_reservation": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Reservation to use for this topic's throughput capacity.`, + }, + }, + }, + }, + "retention_config": { + Type: schema.TypeList, + Optional: true, + Description: `The settings for a topic's message retention.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "per_partition_bytes": { + Type: schema.TypeString, + Required: true, + Description: `The provisioned storage, in bytes, per partition. If the number of bytes stored +in any of the topic's partitions grows beyond this value, older messages will be +dropped to make room for newer ones, regardless of the value of period.`, + }, + "period": { + Type: schema.TypeString, + Optional: true, + Description: `How long a published message is retained. If unset, messages will be retained as +long as the bytes retained for each partition is below perPartitionBytes. A +duration in seconds with up to nine fractional digits, terminated by 's'. +Example: "3.5s".`, + }, + }, + }, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Description: `The zone of the pubsub lite topic.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourcePubsubLiteTopicCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + partitionConfigProp, err := expandPubsubLiteTopicPartitionConfig(d.Get("partition_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("partition_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(partitionConfigProp)) && (ok || !reflect.DeepEqual(v, partitionConfigProp)) { + obj["partitionConfig"] = partitionConfigProp + } + retentionConfigProp, err := expandPubsubLiteTopicRetentionConfig(d.Get("retention_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retention_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(retentionConfigProp)) && (ok || !reflect.DeepEqual(v, retentionConfigProp)) { + obj["retentionConfig"] = retentionConfigProp + } + reservationConfigProp, err := expandPubsubLiteTopicReservationConfig(d.Get("reservation_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reservation_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(reservationConfigProp)) && (ok || !reflect.DeepEqual(v, reservationConfigProp)) { + obj["reservationConfig"] = reservationConfigProp + } + + obj, err = resourcePubsubLiteTopicEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics?topicId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Topic: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Topic: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{zone}}/topics/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Topic %q: %#v", d.Id(), res) + + return resourcePubsubLiteTopicRead(d, meta) +} + +func resourcePubsubLiteTopicRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("PubsubLiteTopic %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + + if err := d.Set("partition_config", flattenPubsubLiteTopicPartitionConfig(res["partitionConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + if err := d.Set("retention_config", flattenPubsubLiteTopicRetentionConfig(res["retentionConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + if err := d.Set("reservation_config", flattenPubsubLiteTopicReservationConfig(res["reservationConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Topic: %s", err) + } + + return nil +} + +func resourcePubsubLiteTopicUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + partitionConfigProp, err := expandPubsubLiteTopicPartitionConfig(d.Get("partition_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("partition_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, partitionConfigProp)) { + obj["partitionConfig"] = partitionConfigProp + } + retentionConfigProp, err := expandPubsubLiteTopicRetentionConfig(d.Get("retention_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("retention_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, retentionConfigProp)) { + obj["retentionConfig"] = retentionConfigProp + } + reservationConfigProp, err := expandPubsubLiteTopicReservationConfig(d.Get("reservation_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reservation_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, reservationConfigProp)) { + obj["reservationConfig"] = reservationConfigProp + } + + obj, err = resourcePubsubLiteTopicEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Topic %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("partition_config") { + updateMask = append(updateMask, "partitionConfig") + } + + if d.HasChange("retention_config") { + updateMask = append(updateMask, "retentionConfig") + } + + if d.HasChange("reservation_config") { + updateMask = append(updateMask, "reservationConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Topic %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Topic %q: %#v", d.Id(), res) + } + + return resourcePubsubLiteTopicRead(d, meta) +} + +func resourcePubsubLiteTopicDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Topic: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Topic %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Topic") + } + + log.Printf("[DEBUG] Finished deleting Topic %q: %#v", d.Id(), res) + return nil +} + +func resourcePubsubLiteTopicImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/topics/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{zone}}/topics/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenPubsubLiteTopicPartitionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["count"] = + flattenPubsubLiteTopicPartitionConfigCount(original["count"], d, config) + transformed["capacity"] = + flattenPubsubLiteTopicPartitionConfigCapacity(original["capacity"], d, config) + return []interface{}{transformed} +} +func flattenPubsubLiteTopicPartitionConfigCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenPubsubLiteTopicPartitionConfigCapacity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["publish_mib_per_sec"] = + flattenPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(original["publishMibPerSec"], d, config) + transformed["subscribe_mib_per_sec"] = + flattenPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(original["subscribeMibPerSec"], d, config) + return []interface{}{transformed} +} +func flattenPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenPubsubLiteTopicRetentionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["per_partition_bytes"] = + flattenPubsubLiteTopicRetentionConfigPerPartitionBytes(original["perPartitionBytes"], d, config) + transformed["period"] = + flattenPubsubLiteTopicRetentionConfigPeriod(original["period"], d, config) + return []interface{}{transformed} +} +func flattenPubsubLiteTopicRetentionConfigPerPartitionBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubLiteTopicRetentionConfigPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenPubsubLiteTopicReservationConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["throughput_reservation"] = + flattenPubsubLiteTopicReservationConfigThroughputReservation(original["throughputReservation"], d, config) + return []interface{}{transformed} +} +func flattenPubsubLiteTopicReservationConfigThroughputReservation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandPubsubLiteTopicPartitionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCount, err := expandPubsubLiteTopicPartitionConfigCount(original["count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["count"] = transformedCount + } + + transformedCapacity, err := expandPubsubLiteTopicPartitionConfigCapacity(original["capacity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCapacity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["capacity"] = transformedCapacity + } + + return transformed, nil +} + +func expandPubsubLiteTopicPartitionConfigCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubLiteTopicPartitionConfigCapacity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublishMibPerSec, err := expandPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(original["publish_mib_per_sec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublishMibPerSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publishMibPerSec"] = transformedPublishMibPerSec + } + + transformedSubscribeMibPerSec, err := expandPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(original["subscribe_mib_per_sec"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubscribeMibPerSec); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subscribeMibPerSec"] = transformedSubscribeMibPerSec + } + + return transformed, nil +} + +func expandPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubLiteTopicRetentionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPerPartitionBytes, err := expandPubsubLiteTopicRetentionConfigPerPartitionBytes(original["per_partition_bytes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPerPartitionBytes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["perPartitionBytes"] = transformedPerPartitionBytes + } + + transformedPeriod, err := expandPubsubLiteTopicRetentionConfigPeriod(original["period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["period"] = transformedPeriod + } + + return transformed, nil +} + +func expandPubsubLiteTopicRetentionConfigPerPartitionBytes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubLiteTopicRetentionConfigPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandPubsubLiteTopicReservationConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedThroughputReservation, err := expandPubsubLiteTopicReservationConfigThroughputReservation(original["throughput_reservation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedThroughputReservation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["throughputReservation"] = transformedThroughputReservation + } + + return transformed, nil +} + +func expandPubsubLiteTopicReservationConfigThroughputReservation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseRegionalFieldValue("reservations", v.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for throughput_reservation: %s", err) + } + // Custom due to "locations" rather than "regions". + return fmt.Sprintf("projects/%s/locations/%s/reservations/%s", f.Project, f.Region, f.Name), nil +} + +func resourcePubsubLiteTopicEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return nil, err + } + + if zone == "" { + return nil, fmt.Errorf("zone must be non-empty - set in resource or at provider-level") + } + + // API Endpoint requires region in the URL. We infer it from the zone. + + region := tpgresource.GetRegionFromZone(zone) + + if region == "" { + return nil, fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) + } + + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_topic_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_topic_sweeper.go new file mode 100644 index 0000000000..a8eb858fc0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/pubsublite/resource_pubsub_lite_topic_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package pubsublite + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("PubsubLiteTopic", testSweepPubsubLiteTopic) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepPubsubLiteTopic(region string) error { + resourceName := "PubsubLiteTopic" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{region}}-pubsublite.googleapis.com/v1/admin/projects/{{project}}/locations/{{zone}}/topics", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["topics"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{region}}-pubsublite.googleapis.com/v1/admin/projects/{{project}}/locations/{{zone}}/topics/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_recaptcha_enterprise_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_recaptcha_enterprise_key.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go index 2995cae144..89d7e84471 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_recaptcha_enterprise_key.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise/resource_recaptcha_enterprise_key.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,7 +16,7 @@ // // ---------------------------------------------------------------------------- -package google +package recaptchaenterprise import ( "context" @@ -25,6 +28,10 @@ import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" + + "github.com/hashicorp/terraform-provider-google/google/tpgdclresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceRecaptchaEnterpriseKey() *schema.Resource { @@ -81,7 +88,7 @@ func ResourceRecaptchaEnterpriseKey() *schema.Resource { Computed: true, Optional: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: "The project for the resource", }, @@ -217,8 +224,8 @@ func RecaptchaEnterpriseKeyWebSettingsSchema() *schema.Resource { } func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -227,7 +234,7 @@ func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface DisplayName: dcl.String(d.Get("display_name").(string)), AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), @@ -238,18 +245,18 @@ func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface return fmt.Errorf("error constructing id: %s", err) } d.SetId(id) - directive := CreateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.CreateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -282,8 +289,8 @@ func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface } func resourceRecaptchaEnterpriseKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -292,24 +299,24 @@ func resourceRecaptchaEnterpriseKeyRead(d *schema.ResourceData, meta interface{} DisplayName: dcl.String(d.Get("display_name").(string)), AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), Name: dcl.StringOrNil(d.Get("name").(string)), } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -318,7 +325,7 @@ func resourceRecaptchaEnterpriseKeyRead(d *schema.ResourceData, meta interface{} res, err := client.GetKey(context.Background(), obj) if err != nil { resourceName := fmt.Sprintf("RecaptchaEnterpriseKey %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) + return tpgdclresource.HandleNotFoundDCLError(err, d, resourceName) } if err = d.Set("display_name", res.DisplayName); err != nil { @@ -352,8 +359,8 @@ func resourceRecaptchaEnterpriseKeyRead(d *schema.ResourceData, meta interface{} return nil } func resourceRecaptchaEnterpriseKeyUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -362,25 +369,25 @@ func resourceRecaptchaEnterpriseKeyUpdate(d *schema.ResourceData, meta interface DisplayName: dcl.String(d.Get("display_name").(string)), AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), Name: dcl.StringOrNil(d.Get("name").(string)), } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.UserAgent) + directive := tpgdclresource.UpdateDirective + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -402,8 +409,8 @@ func resourceRecaptchaEnterpriseKeyUpdate(d *schema.ResourceData, meta interface } func resourceRecaptchaEnterpriseKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -412,7 +419,7 @@ func resourceRecaptchaEnterpriseKeyDelete(d *schema.ResourceData, meta interface DisplayName: dcl.String(d.Get("display_name").(string)), AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), - Labels: checkStringMap(d.Get("labels")), + Labels: tpgresource.CheckStringMap(d.Get("labels")), Project: dcl.String(project), TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), @@ -420,17 +427,17 @@ func resourceRecaptchaEnterpriseKeyDelete(d *schema.ResourceData, meta interface } log.Printf("[DEBUG] Deleting Key %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) - if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + client := transport_tpg.NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := tpgresource.ReplaceVars(d, config, client.Config.BasePath); err != nil { d.SetId("") return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) } else { @@ -445,9 +452,9 @@ func resourceRecaptchaEnterpriseKeyDelete(d *schema.ResourceData, meta interface } func resourceRecaptchaEnterpriseKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) - if err := parseImportId([]string{ + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/keys/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -456,7 +463,7 @@ func resourceRecaptchaEnterpriseKeyImport(d *schema.ResourceData, meta interface } // Replace import id for the resource id - id, err := replaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") + id, err := tpgresource.ReplaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -476,7 +483,7 @@ func expandRecaptchaEnterpriseKeyAndroidSettings(o interface{}) *recaptchaenterp obj := objArr[0].(map[string]interface{}) return &recaptchaenterprise.KeyAndroidSettings{ AllowAllPackageNames: dcl.Bool(obj["allow_all_package_names"].(bool)), - AllowedPackageNames: expandStringArray(obj["allowed_package_names"]), + AllowedPackageNames: tpgdclresource.ExpandStringArray(obj["allowed_package_names"]), } } @@ -504,7 +511,7 @@ func expandRecaptchaEnterpriseKeyIosSettings(o interface{}) *recaptchaenterprise obj := objArr[0].(map[string]interface{}) return &recaptchaenterprise.KeyIosSettings{ AllowAllBundleIds: dcl.Bool(obj["allow_all_bundle_ids"].(bool)), - AllowedBundleIds: expandStringArray(obj["allowed_bundle_ids"]), + AllowedBundleIds: tpgdclresource.ExpandStringArray(obj["allowed_bundle_ids"]), } } @@ -562,7 +569,7 @@ func expandRecaptchaEnterpriseKeyWebSettings(o interface{}) *recaptchaenterprise IntegrationType: recaptchaenterprise.KeyWebSettingsIntegrationTypeEnumRef(obj["integration_type"].(string)), AllowAllDomains: dcl.Bool(obj["allow_all_domains"].(bool)), AllowAmpTraffic: dcl.Bool(obj["allow_amp_traffic"].(bool)), - AllowedDomains: expandStringArray(obj["allowed_domains"]), + AllowedDomains: tpgdclresource.ExpandStringArray(obj["allowed_domains"]), ChallengeSecurityPreference: recaptchaenterprise.KeyWebSettingsChallengeSecurityPreferenceEnumRef(obj["challenge_security_preference"].(string)), } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go new file mode 100644 index 0000000000..3a9b867295 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise/resource_recaptcha_enterprise_key_sweeper.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package recaptchaenterprise + +import ( + "context" + "log" + "testing" + + recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("RecaptchaEnterpriseKey", testSweepRecaptchaEnterpriseKey) +} + +func testSweepRecaptchaEnterpriseKey(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for RecaptchaEnterpriseKey") + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := transport_tpg.NewDCLRecaptchaEnterpriseClient(config, config.UserAgent, "", 0) + err = client.DeleteAllKey(context.Background(), d["project"], isDeletableRecaptchaEnterpriseKey) + if err != nil { + return err + } + return nil +} + +func isDeletableRecaptchaEnterpriseKey(r *recaptchaenterprise.Key) bool { + return sweeper.IsSweepableTestResource(*r.Name) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/data_source_redis_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/data_source_redis_instance.go new file mode 100644 index 0000000000..00d5758135 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/data_source_redis_instance.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package redis + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleRedisInstance() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceRedisInstance().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleRedisInstanceRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleRedisInstanceRead(d *schema.ResourceData, meta interface{}) error { + id, err := tpgresource.ReplaceVars(d, meta.(*transport_tpg.Config), "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + d.SetId(id) + + return resourceRedisInstanceRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/redis_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/redis_operation.go new file mode 100644 index 0000000000..10509f9db2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/redis_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package redis + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type RedisOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *RedisOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.RedisBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createRedisWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*RedisOperationWaiter, error) { + w := &RedisOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func RedisOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createRedisWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func RedisOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createRedisWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_instance.go new file mode 100644 index 0000000000..b95ca8583d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_instance.go @@ -0,0 +1,2007 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package redis + +import ( + "context" + "fmt" + "log" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// Is the new redis version less than the old one? +func isRedisVersionDecreasing(_ context.Context, old, new, _ interface{}) bool { + return isRedisVersionDecreasingFunc(old, new) +} + +// separate function for unit testing +func isRedisVersionDecreasingFunc(old, new interface{}) bool { + if old == nil || new == nil { + return false + } + re := regexp.MustCompile(`REDIS_(\d+)_(\d+)`) + oldParsed := re.FindSubmatch([]byte(old.(string))) + newParsed := re.FindSubmatch([]byte(new.(string))) + + if oldParsed == nil || newParsed == nil { + return false + } + + oldVersion, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", oldParsed[1], oldParsed[2]), 32) + if err != nil { + return false + } + newVersion, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", newParsed[1], newParsed[2]), 32) + if err != nil { + return false + } + + return newVersion < oldVersion +} + +// returns true if old=new or old='auto' +func secondaryIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + if (strings.ToLower(new) == "auto" && old != "") || old == new { + return true + } + return false +} + +func ResourceRedisInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceRedisInstanceCreate, + Read: resourceRedisInstanceRead, + Update: resourceRedisInstanceUpdate, + Delete: resourceRedisInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceRedisInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("redis_version", isRedisVersionDecreasing), + ), + + Schema: map[string]*schema.Schema{ + "memory_size_gb": { + Type: schema.TypeInt, + Required: true, + Description: `Redis memory size in GiB.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z][a-z0-9-]{0,39}[a-z0-9]$`), + Description: `The ID of the instance or a fully qualified identifier for the instance.`, + }, + "alternative_location_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Only applicable to STANDARD_HA tier which protects the instance +against zonal failures by provisioning it across two zones. +If provided, it must be a different zone from the one provided in +[locationId].`, + }, + "auth_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Optional. Indicates whether OSS Redis AUTH is enabled for the +instance. If set to "true" AUTH is enabled on the instance. +Default value is "false" meaning AUTH is disabled.`, + Default: false, + }, + "authorized_network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The full name of the Google Compute Engine network to which the +instance is connected. If left unspecified, the default network +will be used.`, + }, + "connect_mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS", ""}), + Description: `The connection mode of the Redis instance. Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"]`, + Default: "DIRECT_PEERING", + }, + "customer_managed_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional. The KMS key reference that you want to use to encrypt the data at rest for this Redis +instance. If this is provided, CMEK is enabled.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `An arbitrary and optional user-provided name for the instance.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels to represent user provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "location_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The zone where the instance will be provisioned. If not provided, +the service will choose a zone for the instance. For STANDARD_HA tier, +instances will be created across two zones for protection against +zonal failures. If [alternativeLocationId] is also provided, it must +be different from [locationId].`, + }, + "maintenance_policy": { + Type: schema.TypeList, + Optional: true, + Description: `Maintenance policy for an instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. Description of what this policy is for. +Create/Update methods return INVALID_ARGUMENT if the +length is greater than 512.`, + }, + "weekly_maintenance_window": { + Type: schema.TypeList, + Optional: true, + Description: `Optional. Maintenance window that is applied to resources covered by this policy. +Minimum 1. For the current version, the maximum number +of weekly_window is expected to be one.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}), + Description: `Required. The day of week that maintenance updates occur. + +- DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. +- MONDAY: Monday +- TUESDAY: Tuesday +- WEDNESDAY: Wednesday +- THURSDAY: Thursday +- FRIDAY: Friday +- SATURDAY: Saturday +- SUNDAY: Sunday Possible values: ["DAY_OF_WEEK_UNSPECIFIED", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, + }, + "start_time": { + Type: schema.TypeList, + Required: true, + Description: `Required. Start time of the window in UTC time.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 23), + Description: `Hours of day in 24 hour format. Should be from 0 to 23. +An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 59), + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 999999999), + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Seconds of minutes of the time. Must normally be from 0 to 59. +An API may allow the value 60 if it allows leap-seconds.`, + }, + }, + }, + }, + "duration": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Duration of the maintenance window. +The current window is fixed at 1 hour. +A duration in seconds with up to nine fractional digits, +terminated by 's'. Example: "3.5s".`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time when the policy was created. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time when the policy was last updated. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + }, + }, + }, + "persistence_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Persistence configuration for an instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "persistence_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DISABLED", "RDB"}), + Description: `Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. + +- DISABLED: Persistence is disabled for the instance, and any existing snapshots are deleted. +- RDB: RDB based Persistence is enabled. Possible values: ["DISABLED", "RDB"]`, + }, + "rdb_snapshot_period": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ONE_HOUR", "SIX_HOURS", "TWELVE_HOURS", "TWENTY_FOUR_HOURS", ""}), + Description: `Optional. Available snapshot periods for scheduling. + +- ONE_HOUR: Snapshot every 1 hour. +- SIX_HOURS: Snapshot every 6 hours. +- TWELVE_HOURS: Snapshot every 12 hours. +- TWENTY_FOUR_HOURS: Snapshot every 24 hours. Possible values: ["ONE_HOUR", "SIX_HOURS", "TWELVE_HOURS", "TWENTY_FOUR_HOURS"]`, + }, + "rdb_snapshot_start_time": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Optional. Date and time that the first snapshot was/will be attempted, +and to which future snapshots will be aligned. If not provided, +the current time will be used. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution +and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "rdb_next_snapshot_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The next time that a snapshot attempt is scheduled to occur. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up +to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + }, + "read_replicas_mode": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"READ_REPLICAS_DISABLED", "READ_REPLICAS_ENABLED", ""}), + Description: `Optional. Read replica mode. Can only be specified when trying to create the instance. +If not set, Memorystore Redis backend will default to READ_REPLICAS_DISABLED. +- READ_REPLICAS_DISABLED: If disabled, read endpoint will not be provided and the +instance cannot scale up or down the number of replicas. +- READ_REPLICAS_ENABLED: If enabled, read endpoint will be provided and the instance +can scale up and down the number of replicas. Possible values: ["READ_REPLICAS_DISABLED", "READ_REPLICAS_ENABLED"]`, + }, + "redis_configs": { + Type: schema.TypeMap, + Optional: true, + Description: `Redis configuration parameters, according to http://redis.io/topics/config. +Please check Memorystore documentation for the list of supported parameters: +https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "redis_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The version of Redis software. If not provided, latest supported +version will be used. Please check the API documentation linked +at the top for the latest valid values.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The name of the Redis region of the instance.`, + }, + "replica_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Optional. The number of replica nodes. The valid range for the Standard Tier with +read replicas enabled is [1-5] and defaults to 2. If read replicas are not enabled +for a Standard Tier instance, the only valid value is 1 and the default is 1. +The valid value for basic tier is 0 and the default is also 0.`, + }, + "reserved_ip_range": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The CIDR range of internal addresses that are reserved for this +instance. If not provided, the service will choose an unused /29 +block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be +unique and non-overlapping with existing subnets in an authorized +network.`, + }, + "secondary_ip_range": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: secondaryIpDiffSuppress, + Description: `Optional. Additional IP range for node placement. Required when enabling read replicas on +an existing instance. For DIRECT_PEERING mode value must be a CIDR range of size /28, or +"auto". For PRIVATE_SERVICE_ACCESS mode value must be the name of an allocated address +range associated with the private service access connection, or "auto".`, + }, + "tier": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"BASIC", "STANDARD_HA", ""}), + Description: `The service tier of the instance. Must be one of these values: + +- BASIC: standalone instance +- STANDARD_HA: highly available primary/replica instances Default value: "BASIC" Possible values: ["BASIC", "STANDARD_HA"]`, + Default: "BASIC", + }, + "transit_encryption_mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"SERVER_AUTHENTICATION", "DISABLED", ""}), + Description: `The TLS mode of the Redis instance, If not provided, TLS is disabled for the instance. + +- SERVER_AUTHENTICATION: Client to Server traffic encryption enabled with server authentication Default value: "DISABLED" Possible values: ["SERVER_AUTHENTICATION", "DISABLED"]`, + Default: "DISABLED", + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time the instance was created in RFC3339 UTC "Zulu" format, +accurate to nanoseconds.`, + }, + "current_location_id": { + Type: schema.TypeString, + Computed: true, + Description: `The current zone where the Redis endpoint is placed. +For Basic Tier instances, this will always be the same as the +[locationId] provided by the user at creation time. For Standard Tier +instances, this can be either [locationId] or [alternativeLocationId] +and can change after a failover event.`, + }, + "host": { + Type: schema.TypeString, + Computed: true, + Description: `Hostname or IP address of the exposed Redis endpoint used by clients +to connect to the service.`, + }, + "maintenance_schedule": { + Type: schema.TypeList, + Computed: true, + Description: `Upcoming maintenance schedule.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The end time of any upcoming scheduled maintenance for this instance. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + "schedule_deadline_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The deadline that the maintenance schedule start time +can not go beyond, including reschedule. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + "start_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The start time of any upcoming scheduled maintenance for this instance. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +resolution and up to nine fractional digits.`, + }, + }, + }, + }, + "nodes": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. Info per node.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + Description: `Node identifying string. e.g. 'node-0', 'node-1'`, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Description: `Location of the node.`, + }, + }, + }, + }, + "persistence_iam_identity": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Cloud IAM identity used by import / export operations +to transfer data to/from Cloud Storage. Format is "serviceAccount:". +The value may change over time for a given instance so should be +checked before each import/export operation.`, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Description: `The port number of the exposed Redis endpoint.`, + }, + "read_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Hostname or IP address of the exposed readonly Redis endpoint. Standard tier only. +Targets all healthy replica nodes in instance. Replication is asynchronous and replica nodes +will exhibit some lag behind the primary. Write requests must target 'host'.`, + }, + "read_endpoint_port": { + Type: schema.TypeInt, + Computed: true, + Description: `Output only. The port number of the exposed readonly redis endpoint. Standard tier only. +Write requests should target 'port'.`, + }, + "server_ca_certs": { + Type: schema.TypeList, + Computed: true, + Description: `List of server CA certificates for the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert": { + Type: schema.TypeString, + Computed: true, + Description: `The certificate data in PEM format.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the certificate was created.`, + }, + "expire_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the certificate expires.`, + }, + "serial_number": { + Type: schema.TypeString, + Computed: true, + Description: `Serial number, as extracted from the certificate.`, + }, + "sha1_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `Sha1 Fingerprint of the certificate.`, + }, + }, + }, + }, + "auth_string": { + Type: schema.TypeString, + Description: "AUTH String set on the instance. This field will only be populated if auth_enabled is true.", + Computed: true, + Sensitive: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceRedisInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + alternativeLocationIdProp, err := expandRedisInstanceAlternativeLocationId(d.Get("alternative_location_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("alternative_location_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(alternativeLocationIdProp)) && (ok || !reflect.DeepEqual(v, alternativeLocationIdProp)) { + obj["alternativeLocationId"] = alternativeLocationIdProp + } + authEnabledProp, err := expandRedisInstanceAuthEnabled(d.Get("auth_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("auth_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(authEnabledProp)) && (ok || !reflect.DeepEqual(v, authEnabledProp)) { + obj["authEnabled"] = authEnabledProp + } + authorizedNetworkProp, err := expandRedisInstanceAuthorizedNetwork(d.Get("authorized_network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("authorized_network"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorizedNetworkProp)) && (ok || !reflect.DeepEqual(v, authorizedNetworkProp)) { + obj["authorizedNetwork"] = authorizedNetworkProp + } + connectModeProp, err := expandRedisInstanceConnectMode(d.Get("connect_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connect_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(connectModeProp)) && (ok || !reflect.DeepEqual(v, connectModeProp)) { + obj["connectMode"] = connectModeProp + } + displayNameProp, err := expandRedisInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandRedisInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + redisConfigsProp, err := expandRedisInstanceRedisConfigs(d.Get("redis_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redis_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(redisConfigsProp)) && (ok || !reflect.DeepEqual(v, redisConfigsProp)) { + obj["redisConfigs"] = redisConfigsProp + } + locationIdProp, err := expandRedisInstanceLocationId(d.Get("location_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("location_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationIdProp)) && (ok || !reflect.DeepEqual(v, locationIdProp)) { + obj["locationId"] = locationIdProp + } + nameProp, err := expandRedisInstanceName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + persistenceConfigProp, err := expandRedisInstancePersistenceConfig(d.Get("persistence_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("persistence_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(persistenceConfigProp)) && (ok || !reflect.DeepEqual(v, persistenceConfigProp)) { + obj["persistenceConfig"] = persistenceConfigProp + } + maintenancePolicyProp, err := expandRedisInstanceMaintenancePolicy(d.Get("maintenance_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenancePolicyProp)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { + obj["maintenancePolicy"] = maintenancePolicyProp + } + memorySizeGbProp, err := expandRedisInstanceMemorySizeGb(d.Get("memory_size_gb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("memory_size_gb"); !tpgresource.IsEmptyValue(reflect.ValueOf(memorySizeGbProp)) && (ok || !reflect.DeepEqual(v, memorySizeGbProp)) { + obj["memorySizeGb"] = memorySizeGbProp + } + redisVersionProp, err := expandRedisInstanceRedisVersion(d.Get("redis_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redis_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(redisVersionProp)) && (ok || !reflect.DeepEqual(v, redisVersionProp)) { + obj["redisVersion"] = redisVersionProp + } + reservedIpRangeProp, err := expandRedisInstanceReservedIpRange(d.Get("reserved_ip_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reserved_ip_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(reservedIpRangeProp)) && (ok || !reflect.DeepEqual(v, reservedIpRangeProp)) { + obj["reservedIpRange"] = reservedIpRangeProp + } + tierProp, err := expandRedisInstanceTier(d.Get("tier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tier"); !tpgresource.IsEmptyValue(reflect.ValueOf(tierProp)) && (ok || !reflect.DeepEqual(v, tierProp)) { + obj["tier"] = tierProp + } + transitEncryptionModeProp, err := expandRedisInstanceTransitEncryptionMode(d.Get("transit_encryption_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("transit_encryption_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(transitEncryptionModeProp)) && (ok || !reflect.DeepEqual(v, transitEncryptionModeProp)) { + obj["transitEncryptionMode"] = transitEncryptionModeProp + } + replicaCountProp, err := expandRedisInstanceReplicaCount(d.Get("replica_count"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("replica_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(replicaCountProp)) && (ok || !reflect.DeepEqual(v, replicaCountProp)) { + obj["replicaCount"] = replicaCountProp + } + readReplicasModeProp, err := expandRedisInstanceReadReplicasMode(d.Get("read_replicas_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("read_replicas_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(readReplicasModeProp)) && (ok || !reflect.DeepEqual(v, readReplicasModeProp)) { + obj["readReplicasMode"] = readReplicasModeProp + } + secondaryIpRangeProp, err := expandRedisInstanceSecondaryIpRange(d.Get("secondary_ip_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("secondary_ip_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(secondaryIpRangeProp)) && (ok || !reflect.DeepEqual(v, secondaryIpRangeProp)) { + obj["secondaryIpRange"] = secondaryIpRangeProp + } + customerManagedKeyProp, err := expandRedisInstanceCustomerManagedKey(d.Get("customer_managed_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("customer_managed_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(customerManagedKeyProp)) && (ok || !reflect.DeepEqual(v, customerManagedKeyProp)) { + obj["customerManagedKey"] = customerManagedKeyProp + } + + obj, err = resourceRedisInstanceEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Instance: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Instance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = RedisOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Instance", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Instance: %s", err) + } + + opRes, err = resourceRedisInstanceDecoder(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error decoding response from operation: %s", err) + } + if opRes == nil { + return fmt.Errorf("Error decoding response from operation, could not find object") + } + + if err := d.Set("name", flattenRedisInstanceName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) + + return resourceRedisInstanceRead(d, meta) +} + +func resourceRedisInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("RedisInstance %q", d.Id())) + } + + res, err = resourceRedisInstanceDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing RedisInstance because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + if err := d.Set("alternative_location_id", flattenRedisInstanceAlternativeLocationId(res["alternativeLocationId"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("auth_enabled", flattenRedisInstanceAuthEnabled(res["authEnabled"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("authorized_network", flattenRedisInstanceAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("connect_mode", flattenRedisInstanceConnectMode(res["connectMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("create_time", flattenRedisInstanceCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("current_location_id", flattenRedisInstanceCurrentLocationId(res["currentLocationId"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("display_name", flattenRedisInstanceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("host", flattenRedisInstanceHost(res["host"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("labels", flattenRedisInstanceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("redis_configs", flattenRedisInstanceRedisConfigs(res["redisConfigs"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("location_id", flattenRedisInstanceLocationId(res["locationId"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("name", flattenRedisInstanceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("persistence_config", flattenRedisInstancePersistenceConfig(res["persistenceConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("maintenance_policy", flattenRedisInstanceMaintenancePolicy(res["maintenancePolicy"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("maintenance_schedule", flattenRedisInstanceMaintenanceSchedule(res["maintenanceSchedule"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("memory_size_gb", flattenRedisInstanceMemorySizeGb(res["memorySizeGb"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("port", flattenRedisInstancePort(res["port"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("persistence_iam_identity", flattenRedisInstancePersistenceIamIdentity(res["persistenceIamIdentity"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("redis_version", flattenRedisInstanceRedisVersion(res["redisVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("tier", flattenRedisInstanceTier(res["tier"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("transit_encryption_mode", flattenRedisInstanceTransitEncryptionMode(res["transitEncryptionMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("server_ca_certs", flattenRedisInstanceServerCaCerts(res["serverCaCerts"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("replica_count", flattenRedisInstanceReplicaCount(res["replicaCount"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("nodes", flattenRedisInstanceNodes(res["nodes"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("read_endpoint", flattenRedisInstanceReadEndpoint(res["readEndpoint"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("read_endpoint_port", flattenRedisInstanceReadEndpointPort(res["readEndpointPort"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("read_replicas_mode", flattenRedisInstanceReadReplicasMode(res["readReplicasMode"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("secondary_ip_range", flattenRedisInstanceSecondaryIpRange(res["secondaryIpRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("customer_managed_key", flattenRedisInstanceCustomerManagedKey(res["customerManagedKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + return nil +} + +func resourceRedisInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + authEnabledProp, err := expandRedisInstanceAuthEnabled(d.Get("auth_enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("auth_enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authEnabledProp)) { + obj["authEnabled"] = authEnabledProp + } + displayNameProp, err := expandRedisInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandRedisInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + redisConfigsProp, err := expandRedisInstanceRedisConfigs(d.Get("redis_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redis_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, redisConfigsProp)) { + obj["redisConfigs"] = redisConfigsProp + } + persistenceConfigProp, err := expandRedisInstancePersistenceConfig(d.Get("persistence_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("persistence_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, persistenceConfigProp)) { + obj["persistenceConfig"] = persistenceConfigProp + } + maintenancePolicyProp, err := expandRedisInstanceMaintenancePolicy(d.Get("maintenance_policy"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenancePolicyProp)) { + obj["maintenancePolicy"] = maintenancePolicyProp + } + memorySizeGbProp, err := expandRedisInstanceMemorySizeGb(d.Get("memory_size_gb"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("memory_size_gb"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, memorySizeGbProp)) { + obj["memorySizeGb"] = memorySizeGbProp + } + replicaCountProp, err := expandRedisInstanceReplicaCount(d.Get("replica_count"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("replica_count"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, replicaCountProp)) { + obj["replicaCount"] = replicaCountProp + } + readReplicasModeProp, err := expandRedisInstanceReadReplicasMode(d.Get("read_replicas_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("read_replicas_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, readReplicasModeProp)) { + obj["readReplicasMode"] = readReplicasModeProp + } + secondaryIpRangeProp, err := expandRedisInstanceSecondaryIpRange(d.Get("secondary_ip_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("secondary_ip_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, secondaryIpRangeProp)) { + obj["secondaryIpRange"] = secondaryIpRangeProp + } + + obj, err = resourceRedisInstanceEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("auth_enabled") { + updateMask = append(updateMask, "authEnabled") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("redis_configs") { + updateMask = append(updateMask, "redisConfigs") + } + + if d.HasChange("persistence_config") { + updateMask = append(updateMask, "persistenceConfig") + } + + if d.HasChange("maintenance_policy") { + updateMask = append(updateMask, "maintenancePolicy") + } + + if d.HasChange("memory_size_gb") { + updateMask = append(updateMask, "memorySizeGb") + } + + if d.HasChange("replica_count") { + updateMask = append(updateMask, "replicaCount") + } + + if d.HasChange("read_replicas_mode") { + updateMask = append(updateMask, "readReplicasMode") + } + + if d.HasChange("secondary_ip_range") { + updateMask = append(updateMask, "secondaryIpRange") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // if updateMask is empty we are not updating anything so skip the post + if len(updateMask) > 0 { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = RedisOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + } + d.Partial(true) + + if d.HasChange("redis_version") { + obj := make(map[string]interface{}) + + redisVersionProp, err := expandRedisInstanceRedisVersion(d.Get("redis_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("redis_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, redisVersionProp)) { + obj["redisVersion"] = redisVersionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}:upgrade") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = RedisOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceRedisInstanceRead(d, meta) +} + +func resourceRedisInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Instance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Instance") + } + + err = RedisOperationWaitTime( + config, res, project, "Deleting Instance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) + return nil +} + +func resourceRedisInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenRedisInstanceAlternativeLocationId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceAuthEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceAuthorizedNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceConnectMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceCurrentLocationId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceRedisConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceLocationId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenRedisInstancePersistenceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["persistence_mode"] = + flattenRedisInstancePersistenceConfigPersistenceMode(original["persistenceMode"], d, config) + transformed["rdb_snapshot_period"] = + flattenRedisInstancePersistenceConfigRdbSnapshotPeriod(original["rdbSnapshotPeriod"], d, config) + transformed["rdb_next_snapshot_time"] = + flattenRedisInstancePersistenceConfigRdbNextSnapshotTime(original["rdbNextSnapshotTime"], d, config) + transformed["rdb_snapshot_start_time"] = + flattenRedisInstancePersistenceConfigRdbSnapshotStartTime(original["rdbSnapshotStartTime"], d, config) + return []interface{}{transformed} +} +func flattenRedisInstancePersistenceConfigPersistenceMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstancePersistenceConfigRdbSnapshotPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstancePersistenceConfigRdbNextSnapshotTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstancePersistenceConfigRdbSnapshotStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMaintenancePolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["create_time"] = + flattenRedisInstanceMaintenancePolicyCreateTime(original["createTime"], d, config) + transformed["update_time"] = + flattenRedisInstanceMaintenancePolicyUpdateTime(original["updateTime"], d, config) + transformed["description"] = + flattenRedisInstanceMaintenancePolicyDescription(original["description"], d, config) + transformed["weekly_maintenance_window"] = + flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindow(original["weeklyMaintenanceWindow"], d, config) + return []interface{}{transformed} +} +func flattenRedisInstanceMaintenancePolicyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMaintenancePolicyUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMaintenancePolicyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "day": flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(original["day"], d, config), + "duration": flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(original["duration"], d, config), + "start_time": flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(original["startTime"], d, config), + }) + } + return transformed +} +func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["hours"] = + flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(original["hours"], d, config) + transformed["minutes"] = + flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) + transformed["seconds"] = + flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) + transformed["nanos"] = + flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(original["nanos"], d, config) + return []interface{}{transformed} +} +func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisInstanceMaintenanceSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["start_time"] = + flattenRedisInstanceMaintenanceScheduleStartTime(original["startTime"], d, config) + transformed["end_time"] = + flattenRedisInstanceMaintenanceScheduleEndTime(original["endTime"], d, config) + transformed["schedule_deadline_time"] = + flattenRedisInstanceMaintenanceScheduleScheduleDeadlineTime(original["scheduleDeadlineTime"], d, config) + return []interface{}{transformed} +} +func flattenRedisInstanceMaintenanceScheduleStartTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMaintenanceScheduleEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMaintenanceScheduleScheduleDeadlineTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceMemorySizeGb(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisInstancePort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisInstancePersistenceIamIdentity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceRedisVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceTier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceTransitEncryptionMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceServerCaCerts(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "serial_number": flattenRedisInstanceServerCaCertsSerialNumber(original["serialNumber"], d, config), + "cert": flattenRedisInstanceServerCaCertsCert(original["cert"], d, config), + "create_time": flattenRedisInstanceServerCaCertsCreateTime(original["createTime"], d, config), + "expire_time": flattenRedisInstanceServerCaCertsExpireTime(original["expireTime"], d, config), + "sha1_fingerprint": flattenRedisInstanceServerCaCertsSha1Fingerprint(original["sha1Fingerprint"], d, config), + }) + } + return transformed +} +func flattenRedisInstanceServerCaCertsSerialNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceServerCaCertsCert(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceServerCaCertsCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceServerCaCertsExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceServerCaCertsSha1Fingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisInstanceNodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "id": flattenRedisInstanceNodesId(original["id"], d, config), + "zone": flattenRedisInstanceNodesZone(original["zone"], d, config), + }) + } + return transformed +} +func flattenRedisInstanceNodesId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceNodesZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceReadEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceReadEndpointPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenRedisInstanceReadReplicasMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceSecondaryIpRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenRedisInstanceCustomerManagedKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandRedisInstanceAlternativeLocationId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceAuthEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceAuthorizedNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + fv, err := tpgresource.ParseNetworkFieldValue(v.(string), d, config) + if err != nil { + return nil, err + } + return fv.RelativeLink(), nil +} + +func expandRedisInstanceConnectMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandRedisInstanceRedisConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandRedisInstanceLocationId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") +} + +func expandRedisInstancePersistenceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPersistenceMode, err := expandRedisInstancePersistenceConfigPersistenceMode(original["persistence_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPersistenceMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["persistenceMode"] = transformedPersistenceMode + } + + transformedRdbSnapshotPeriod, err := expandRedisInstancePersistenceConfigRdbSnapshotPeriod(original["rdb_snapshot_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRdbSnapshotPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rdbSnapshotPeriod"] = transformedRdbSnapshotPeriod + } + + transformedRdbNextSnapshotTime, err := expandRedisInstancePersistenceConfigRdbNextSnapshotTime(original["rdb_next_snapshot_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRdbNextSnapshotTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rdbNextSnapshotTime"] = transformedRdbNextSnapshotTime + } + + transformedRdbSnapshotStartTime, err := expandRedisInstancePersistenceConfigRdbSnapshotStartTime(original["rdb_snapshot_start_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRdbSnapshotStartTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rdbSnapshotStartTime"] = transformedRdbSnapshotStartTime + } + + return transformed, nil +} + +func expandRedisInstancePersistenceConfigPersistenceMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstancePersistenceConfigRdbSnapshotPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstancePersistenceConfigRdbNextSnapshotTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstancePersistenceConfigRdbSnapshotStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCreateTime, err := expandRedisInstanceMaintenancePolicyCreateTime(original["create_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCreateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["createTime"] = transformedCreateTime + } + + transformedUpdateTime, err := expandRedisInstanceMaintenancePolicyUpdateTime(original["update_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUpdateTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["updateTime"] = transformedUpdateTime + } + + transformedDescription, err := expandRedisInstanceMaintenancePolicyDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedWeeklyMaintenanceWindow, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindow(original["weekly_maintenance_window"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWeeklyMaintenanceWindow); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["weeklyMaintenanceWindow"] = transformedWeeklyMaintenanceWindow + } + + return transformed, nil +} + +func expandRedisInstanceMaintenancePolicyCreateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicyUpdateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDay, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(original["day"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDay); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["day"] = transformedDay + } + + transformedDuration, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(original["duration"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDuration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["duration"] = transformedDuration + } + + transformedStartTime, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(original["start_time"], d, config) + if err != nil { + return nil, err + } else { + transformed["startTime"] = transformedStartTime + } + + req = append(req, transformed) + } + return req, nil +} + +func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowDuration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedHours, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(original["hours"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHours); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["hours"] = transformedHours + } + + transformedMinutes, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(original["minutes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinutes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minutes"] = transformedMinutes + } + + transformedSeconds, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(original["seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["seconds"] = transformedSeconds + } + + transformedNanos, err := expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(original["nanos"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nanos"] = transformedNanos + } + + return transformed, nil +} + +func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeMinutes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeSeconds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeNanos(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceMemorySizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceRedisVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceReservedIpRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceTier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceTransitEncryptionMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceReplicaCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceReadReplicasMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceSecondaryIpRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandRedisInstanceCustomerManagedKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceRedisInstanceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + region, err := tpgresource.GetRegionFromSchema("region", "location_id", d, config) + if err != nil { + return nil, err + } + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + return obj, nil +} + +func resourceRedisInstanceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + if v, ok := res["authEnabled"].(bool); ok { + if v { + url, err := tpgresource.ReplaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}/authString") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for Instance: %s", err) + } + + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, fmt.Errorf("Error reading AuthString: %s", err) + } + + if err := d.Set("auth_string", res["authString"]); err != nil { + return nil, fmt.Errorf("Error reading Instance: %s", err) + } + } + } else { + if err := d.Set("auth_string", ""); err != nil { + return nil, fmt.Errorf("Error reading Instance: %s", err) + } + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_instance_sweeper.go new file mode 100644 index 0000000000..ddc27d03d5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/redis/resource_redis_instance_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package redis + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("RedisInstance", testSweepRedisInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepRedisInstance(region string) error { + resourceName := "RedisInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://redis.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://redis.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_active_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_active_folder.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_active_folder.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_active_folder.go index d48631ada5..c8b7e642e5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_active_folder.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_active_folder.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" ) @@ -29,8 +33,8 @@ func DataSourceGoogleActiveFolder() *schema.Resource { } func dataSourceGoogleActiveFolderRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_config.go new file mode 100644 index 0000000000..14fb1c54c6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_config.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" +) + +// Ensure the data source satisfies the expected interfaces. +var ( + _ datasource.DataSource = &GoogleClientConfigDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleClientConfigDataSource{} + _ fwresource.LocationDescriber = &GoogleClientConfigModel{} +) + +func NewGoogleClientConfigDataSource() datasource.DataSource { + return &GoogleClientConfigDataSource{} +} + +type GoogleClientConfigDataSource struct { + providerConfig *fwtransport.FrameworkProviderConfig +} + +type GoogleClientConfigModel struct { + // Id could/should be removed in future as it's not necessary in the plugin framework + // https://github.com/hashicorp/terraform-plugin-testing/issues/84 + Id types.String `tfsdk:"id"` + Project types.String `tfsdk:"project"` + Region types.String `tfsdk:"region"` + Zone types.String `tfsdk:"zone"` + AccessToken types.String `tfsdk:"access_token"` +} + +func (m *GoogleClientConfigModel) GetLocationDescription(providerConfig *fwtransport.FrameworkProviderConfig) fwresource.LocationDescription { + return fwresource.LocationDescription{ + RegionSchemaField: types.StringValue("region"), + ZoneSchemaField: types.StringValue("zone"), + ResourceRegion: m.Region, + ResourceZone: m.Zone, + ProviderRegion: providerConfig.Region, + ProviderZone: providerConfig.Zone, + } +} + +func (d *GoogleClientConfigDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_client_config" +} + +func (d *GoogleClientConfigDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + + resp.Schema = schema.Schema{ + + Description: "Use this data source to access the configuration of the Google Cloud provider.", + MarkdownDescription: "Use this data source to access the configuration of the Google Cloud provider.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + Description: "The ID of this data source in Terraform state. It is created in a projects/{{project}}/regions/{{region}}/zones/{{zone}} format and is NOT used by the data source in requests to Google APIs.", + MarkdownDescription: "The ID of this data source in Terraform state. It is created in a projects/{{project}}/regions/{{region}}/zones/{{zone}} format and is NOT used by the data source in requests to Google APIs.", + }, + "project": schema.StringAttribute{ + Description: "The ID of the project to apply any resources to.", + MarkdownDescription: "The ID of the project to apply any resources to.", + Computed: true, + }, + "region": schema.StringAttribute{ + Description: "The region to operate under.", + MarkdownDescription: "The region to operate under.", + Computed: true, + }, + "zone": schema.StringAttribute{ + Description: "The zone to operate under.", + MarkdownDescription: "The zone to operate under.", + Computed: true, + }, + "access_token": schema.StringAttribute{ + Description: "The OAuth2 access token used by the client to authenticate against the Google Cloud API.", + MarkdownDescription: "The OAuth2 access token used by the client to authenticate against the Google Cloud API.", + Computed: true, + Sensitive: true, + }, + }, + } +} + +func (d *GoogleClientConfigDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + // Required for accessing project, region, zone and tokenSource + d.providerConfig = p +} + +func (d *GoogleClientConfigDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleClientConfigModel + var metaData *fwmodels.ProviderMetaModel + var diags diag.Diagnostics + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + locationInfo := data.GetLocationDescription(d.providerConfig) + region, _ := locationInfo.GetRegion() + zone, _ := locationInfo.GetZone() + + data.Id = types.StringValue(fmt.Sprintf("projects/%s/regions/%s/zones/%s", d.providerConfig.Project.String(), region.String(), zone.String())) + data.Project = d.providerConfig.Project + data.Region = region + data.Zone = zone + + token, err := d.providerConfig.TokenSource.Token() + if err != nil { + diags.AddError("Error setting access_token", err.Error()) + return + } + data.AccessToken = types.StringValue(token.AccessToken) + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_openid_userinfo.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_openid_userinfo.go new file mode 100644 index 0000000000..9ab4a5b7d7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_client_openid_userinfo.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" +) + +// Ensure the data source satisfies the expected interfaces. +var ( + _ datasource.DataSource = &GoogleClientOpenIDUserinfoDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleClientOpenIDUserinfoDataSource{} +) + +func NewGoogleClientOpenIDUserinfoDataSource() datasource.DataSource { + return &GoogleClientOpenIDUserinfoDataSource{} +} + +type GoogleClientOpenIDUserinfoDataSource struct { + providerConfig *fwtransport.FrameworkProviderConfig +} + +type GoogleClientOpenIDUserinfoModel struct { + // Id could/should be removed in future as it's not necessary in the plugin framework + // https://github.com/hashicorp/terraform-plugin-testing/issues/84 + Id types.String `tfsdk:"id"` + Email types.String `tfsdk:"email"` +} + +func (d *GoogleClientOpenIDUserinfoDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_client_openid_userinfo" +} + +func (d *GoogleClientOpenIDUserinfoDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: `Get OpenID userinfo about the credentials used with the Google provider, specifically the email. +This datasource enables you to export the email of the account you've authenticated the provider with; this can be used alongside data.google_client_config's access_token to perform OpenID Connect authentication with GKE and configure an RBAC role for the email used. + +Note: This resource will only work as expected if the provider is configured to use the https://www.googleapis.com/auth/userinfo.email scope! You will receive an error otherwise. The provider uses this scope by default.`, + MarkdownDescription: `Get OpenID userinfo about the credentials used with the Google provider, specifically the email. +This datasource enables you to export the email of the account you've authenticated the provider with; this can be used alongside data.google_client_config's access_token to perform OpenID Connect authentication with GKE and configure an RBAC role for the email used. + +~> This resource will only work as expected if the provider is configured to use the https://www.googleapis.com/auth/userinfo.email scope! You will receive an error otherwise. The provider uses this scope by default.`, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "The ID of this data source in Terraform state. Its value is the same as the email attribute. Do not use this field, use the email attribute instead.", + MarkdownDescription: "The ID of this data source in Terraform state. Its value is the same as the `email` attribute. Do not use this field, use the `email` attribute instead.", + Computed: true, + }, + "email": schema.StringAttribute{ + Description: "The email of the account used by the provider to authenticate with GCP.", + MarkdownDescription: "The email of the account used by the provider to authenticate with GCP.", + Computed: true, + }, + }, + } +} + +func (d *GoogleClientOpenIDUserinfoDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + // Required for accessing userAgent and passing as an argument into a util function + d.providerConfig = p +} + +func (d *GoogleClientOpenIDUserinfoDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleClientOpenIDUserinfoModel + var metaData *fwmodels.ProviderMetaModel + var diags diag.Diagnostics + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + userAgent := fwtransport.GenerateFrameworkUserAgentString(metaData, d.providerConfig.UserAgent) + email := fwtransport.GetCurrentUserEmailFramework(d.providerConfig, userAgent, &diags) + + data.Email = types.StringValue(email) + data.Id = types.StringValue(email) + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder.go similarity index 84% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folder.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder.go index e6edb3a203..cf0472992d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folder.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleFolder() *schema.Resource { @@ -53,8 +57,8 @@ func DataSourceGoogleFolder() *schema.Resource { } func dataSourceFolderRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -90,7 +94,7 @@ func canonicalFolderName(ba string) string { return "folders/" + ba } -func lookupOrganizationName(parent, userAgent string, d *schema.ResourceData, config *Config) (string, error) { +func lookupOrganizationName(parent, userAgent string, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { if parent == "" || strings.HasPrefix(parent, "organizations/") { return parent, nil } else if strings.HasPrefix(parent, "folders/") { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder_organization_policy.go new file mode 100644 index 0000000000..33d4afc684 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folder_organization_policy.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func DataSourceGoogleFolderOrganizationPolicy() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceGoogleFolderOrganizationPolicy().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "folder") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "constraint") + + return &schema.Resource{ + Read: datasourceGoogleFolderOrganizationPolicyRead, + Schema: dsSchema, + } +} + +func datasourceGoogleFolderOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + + d.SetId(fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) + + return resourceGoogleFolderOrganizationPolicyRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folders.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folders.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folders.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folders.go index 9ccb82746d..7691e131ff 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_folders.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_folders.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleFolders() *schema.Resource { @@ -59,8 +63,8 @@ func DataSourceGoogleFolders() *schema.Resource { } func dataSourceGoogleFoldersRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -72,12 +76,17 @@ func dataSourceGoogleFoldersRead(d *schema.ResourceData, meta interface{}) error params["parent"] = d.Get("parent_id").(string) url := "https://cloudresourcemanager.googleapis.com/v3/folders" - url, err := addQueryParams(url, params) + url, err := transport_tpg.AddQueryParams(url, params) if err != nil { return err } - res, err := SendRequest(config, "GET", "", url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return fmt.Errorf("Error retrieving folders: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_policy.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_policy.go index 78147642fb..66532bc972 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_policy.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "encoding/json" @@ -9,6 +11,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" "google.golang.org/api/cloudresourcemanager/v1" ) @@ -16,14 +20,14 @@ import ( // to express a Google Cloud IAM policy in a data resource. This is an example // of how the schema would be used in a config: // -// data "google_iam_policy" "admin" { -// binding { -// role = "roles/storage.objectViewer" -// members = [ -// "user:evanbrown@google.com", -// ] -// } -// } +// data "google_iam_policy" "admin" { +// binding { +// role = "roles/storage.objectViewer" +// members = [ +// "user:evanbrown@google.com", +// ] +// } +// } func DataSourceGoogleIamPolicy() *schema.Resource { return &schema.Resource{ Read: dataSourceGoogleIamPolicyRead, @@ -124,8 +128,8 @@ func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) err bindingMap := map[string]*cloudresourcemanager.Binding{} for _, v := range bset.List() { binding := v.(map[string]interface{}) - members := convertStringSet(binding["members"].(*schema.Set)) - condition := expandIamCondition(binding["condition"]) + members := tpgresource.ConvertStringSet(binding["members"].(*schema.Set)) + condition := tpgiamresource.ExpandIamCondition(binding["condition"]) // Map keys are used to identify binding{} blocks that are identical except for the member lists key := binding["role"].(string) @@ -180,7 +184,7 @@ func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("policy_data", pstring); err != nil { return fmt.Errorf("Error setting policy_data: %s", err) } - d.SetId(strconv.Itoa(hashcode(pstring))) + d.SetId(strconv.Itoa(tpgresource.Hashcode(pstring))) return nil } @@ -197,7 +201,7 @@ func expandAuditConfig(set *schema.Set) []*cloudresourcemanager.AuditConfig { logConfig := y.(map[string]interface{}) auditLogConfigs = append(auditLogConfigs, &cloudresourcemanager.AuditLogConfig{ LogType: logConfig["log_type"].(string), - ExemptedMembers: convertStringArr(logConfig["exempted_members"].(*schema.Set).List()), + ExemptedMembers: tpgresource.ConvertStringArr(logConfig["exempted_members"].(*schema.Set).List()), }) } auditConfigs = append(auditConfigs, &cloudresourcemanager.AuditConfig{ diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_role.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_role.go new file mode 100644 index 0000000000..79f08123b8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_role.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleIamRole() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleIamRoleRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "title": { + Type: schema.TypeString, + Computed: true, + }, + "included_permissions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "stage": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleIamRoleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + roleName := d.Get("name").(string) + role, err := config.NewIamClient(userAgent).Roles.Get(roleName).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Error reading IAM Role %s: %s", roleName, err)) + } + + d.SetId(role.Name) + if err := d.Set("title", role.Title); err != nil { + return fmt.Errorf("Error setting title: %s", err) + } + if err := d.Set("stage", role.Stage); err != nil { + return fmt.Errorf("Error setting stage: %s", err) + } + if err := d.Set("included_permissions", role.IncludedPermissions); err != nil { + return fmt.Errorf("Error setting included_permissions: %s", err) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_testable_permissions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_testable_permissions.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_testable_permissions.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_testable_permissions.go index 74033cfd4e..eda091e5b6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_iam_testable_permissions.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_iam_testable_permissions.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" @@ -6,6 +8,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleIamTestablePermissions() *schema.Resource { @@ -63,8 +67,8 @@ func DataSourceGoogleIamTestablePermissions() *schema.Resource { } func dataSourceGoogleIamTestablePermissionsRead(d *schema.ResourceData, meta interface{}) (err error) { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -85,7 +89,13 @@ func dataSourceGoogleIamTestablePermissionsRead(d *schema.ResourceData, meta int for { url := "https://iam.googleapis.com/v1/permissions:queryTestablePermissions" body["fullResourceName"] = d.Get("full_resource_name").(string) - res, err := SendRequest(config, "POST", "", url, userAgent, body) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: body, + }) if err != nil { return fmt.Errorf("Error retrieving permissions: %s", err) } @@ -125,7 +135,7 @@ func flattenTestablePermissionsList(v interface{}, custom_support_level string, } else { csl = p["customRolesSupportLevel"] == custom_support_level } - if csl && p["stage"] != nil && stringInSlice(stages, p["stage"].(string)) { + if csl && p["stage"] != nil && tpgresource.StringInSlice(stages, p["stage"].(string)) { permissions = append(permissions, map[string]interface{}{ "name": p["name"], "title": p["title"], diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_netblock_ip_ranges.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_netblock_ip_ranges.go similarity index 98% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_netblock_ip_ranges.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_netblock_ip_ranges.go index f51055ac64..941d7690c5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_netblock_ip_ranges.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_netblock_ip_ranges.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "encoding/json" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_organization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_organization.go new file mode 100644 index 0000000000..53d9ebdc71 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_organization.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/cloudresourcemanager/v1" +) + +func DataSourceGoogleOrganization() *schema.Resource { + return &schema.Resource{ + Read: dataSourceOrganizationRead, + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"organization"}, + }, + "organization": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"domain"}, + }, + "org_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "directory_customer_id": { + Type: schema.TypeString, + Computed: true, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceOrganizationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + var organization *cloudresourcemanager.Organization + if v, ok := d.GetOk("domain"); ok { + filter := fmt.Sprintf("domain=%s", v.(string)) + var resp *cloudresourcemanager.SearchOrganizationsResponse + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (err error) { + resp, err = config.NewResourceManagerClient(userAgent).Organizations.Search(&cloudresourcemanager.SearchOrganizationsRequest{ + Filter: filter, + }).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) + if err != nil { + return fmt.Errorf("Error reading organization: %s", err) + } + + if len(resp.Organizations) == 0 { + return fmt.Errorf("Organization not found: %s", v) + } + + if len(resp.Organizations) > 1 { + // Attempt to find an exact domain match + for _, org := range resp.Organizations { + if org.DisplayName == v.(string) { + organization = org + break + } + } + if organization == nil { + return fmt.Errorf("Received multiple organizations in the response, but could not find an exact domain match.") + } + } else { + organization = resp.Organizations[0] + } + + } else if v, ok := d.GetOk("organization"); ok { + var resp *cloudresourcemanager.Organization + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (err error) { + resp, err = config.NewResourceManagerClient(userAgent).Organizations.Get(canonicalOrganizationName(v.(string))).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Organization Not Found : %s", v)) + } + + organization = resp + } else { + return fmt.Errorf("one of domain or organization must be set") + } + + d.SetId(organization.Name) + if err := d.Set("name", organization.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("org_id", tpgresource.GetResourceNameFromSelfLink(organization.Name)); err != nil { + return fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("domain", organization.DisplayName); err != nil { + return fmt.Errorf("Error setting domain: %s", err) + } + if err := d.Set("create_time", organization.CreationTime); err != nil { + return fmt.Errorf("Error setting create_time: %s", err) + } + if err := d.Set("lifecycle_state", organization.LifecycleState); err != nil { + return fmt.Errorf("Error setting lifecycle_state: %s", err) + } + if organization.Owner != nil { + if err := d.Set("directory_customer_id", organization.Owner.DirectoryCustomerId); err != nil { + return fmt.Errorf("Error setting directory_customer_id: %s", err) + } + } + + return nil +} + +func canonicalOrganizationName(ba string) string { + if strings.HasPrefix(ba, "organizations/") { + return ba + } + + return "organizations/" + ba +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project.go new file mode 100644 index 0000000000..2be4d00773 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func DataSourceGoogleProject() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceGoogleProject().Schema) + + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project_id") + + dsSchema["project_id"].ValidateFunc = verify.ValidateDSProjectID() + return &schema.Resource{ + Read: datasourceGoogleProjectRead, + Schema: dsSchema, + } +} + +func datasourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + if v, ok := d.GetOk("project_id"); ok { + project := v.(string) + d.SetId(fmt.Sprintf("projects/%s", project)) + } else { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("no project value set. `project_id` must be set at the resource level, or a default `project` value must be specified on the provider") + } + d.SetId(fmt.Sprintf("projects/%s", project)) + } + + id := d.Id() + + if err := resourceGoogleProjectRead(d, meta); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found or not in ACTIVE state", id) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project_organization_policy.go new file mode 100644 index 0000000000..e38a6230bf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project_organization_policy.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func DataSourceGoogleProjectOrganizationPolicy() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceGoogleProjectOrganizationPolicy().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "project") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "constraint") + + return &schema.Resource{ + Read: datasourceGoogleProjectOrganizationPolicyRead, + Schema: dsSchema, + } +} + +func datasourceGoogleProjectOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + + d.SetId(fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) + + return resourceGoogleProjectOrganizationPolicyRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project_service.go new file mode 100644 index 0000000000..f053aac54d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_project_service.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleProjectService() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceGoogleProjectService().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "service") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleProjectServiceRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleProjectServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{service}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceGoogleProjectServiceRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_projects.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_projects.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_projects.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_projects.go index aa2cc315c2..bbb5d603ef 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_projects.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_projects.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleProjects() *schema.Resource { @@ -62,8 +66,8 @@ func DataSourceGoogleProjects() *schema.Resource { } func datasourceGoogleProjectsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -75,12 +79,17 @@ func datasourceGoogleProjectsRead(d *schema.ResourceData, meta interface{}) erro params["filter"] = d.Get("filter").(string) url := "https://cloudresourcemanager.googleapis.com/v1/projects" - url, err := addQueryParams(url, params) + url, err := transport_tpg.AddQueryParams(url, params) if err != nil { return err } - res, err := SendRequest(config, "GET", "", url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return fmt.Errorf("Error retrieving projects: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account.go index 2190403100..5d771e0a83 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_account.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleServiceAccount() *schema.Resource { @@ -44,20 +48,20 @@ func DataSourceGoogleServiceAccount() *schema.Resource { } func dataSourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - serviceAccountName, err := serviceAccountFQN(d.Get("account_id").(string), d, config) + serviceAccountName, err := tpgresource.ServiceAccountFQN(d.Get("account_id").(string), d, config) if err != nil { return err } sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(serviceAccountName).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", serviceAccountName)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service Account %q", serviceAccountName)) } d.SetId(sa.Name) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_access_token.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_access_token.go new file mode 100644 index 0000000000..5ea291d764 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_access_token.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + iamcredentials "google.golang.org/api/iamcredentials/v1" +) + +func DataSourceGoogleServiceAccountAccessToken() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceGoogleServiceAccountAccessTokenRead, + Schema: map[string]*schema.Schema{ + "target_service_account": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRegexp("(" + strings.Join(verify.PossibleServiceAccountNames, "|") + ")"), + }, + "access_token": { + Type: schema.TypeString, + Sensitive: true, + Computed: true, + }, + "scopes": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return tpgresource.CanonicalizeServiceScope(v.(string)) + }, + }, + // ValidateFunc is not yet supported on lists or sets. + }, + "delegates": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateRegexp(verify.ServiceAccountLinkRegex), + }, + }, + "lifetime": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateDuration(), // duration <=3600s; TODO: support validateDuration(min,max) + Default: "3600s", + }, + }, + } +} + +func dataSourceGoogleServiceAccountAccessTokenRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + log.Printf("[INFO] Acquire Service Account AccessToken for %s", d.Get("target_service_account").(string)) + + service := config.NewIamCredentialsClient(userAgent) + + name := fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) + tokenRequest := &iamcredentials.GenerateAccessTokenRequest{ + Lifetime: d.Get("lifetime").(string), + Delegates: tpgresource.ConvertStringSet(d.Get("delegates").(*schema.Set)), + Scope: tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(d.Get("scopes").(*schema.Set))), + } + at, err := service.Projects.ServiceAccounts.GenerateAccessToken(name, tokenRequest).Do() + if err != nil { + return err + } + + d.SetId(name) + if err := d.Set("access_token", at.AccessToken); err != nil { + return fmt.Errorf("Error setting access_token: %s", err) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_id_token.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_id_token.go new file mode 100644 index 0000000000..9dd4169f81 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_id_token.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + iamcredentials "google.golang.org/api/iamcredentials/v1" + "google.golang.org/api/idtoken" + "google.golang.org/api/option" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "golang.org/x/net/context" +) + +const ( + userInfoScope = "https://www.googleapis.com/auth/userinfo.email" +) + +func DataSourceGoogleServiceAccountIdToken() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceGoogleServiceAccountIdTokenRead, + Schema: map[string]*schema.Schema{ + "target_audience": { + Type: schema.TypeString, + Required: true, + }, + "target_service_account": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateRegexp("(" + strings.Join(verify.PossibleServiceAccountNames, "|") + ")"), + }, + "delegates": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateRegexp(verify.ServiceAccountLinkRegex), + }, + }, + "include_email": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + // Not used currently + // https://github.com/googleapis/google-api-go-client/issues/542 + // "format": { + // Type: schema.TypeString, + // Optional: true, + // ValidateFunc: validation.StringInSlice([]string{ + // "FULL", "STANDARD"}, true), + // Default: "STANDARD", + // }, + "id_token": { + Type: schema.TypeString, + Sensitive: true, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleServiceAccountIdTokenRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + targetAudience := d.Get("target_audience").(string) + creds, err := config.GetCredentials([]string{userInfoScope}, false) + if err != nil { + return fmt.Errorf("error calling getCredentials(): %v", err) + } + + targetServiceAccount := d.Get("target_service_account").(string) + // If a target service account is provided, use the API to generate the idToken + if targetServiceAccount != "" { + // Use + // https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/generateIdToken + service := config.NewIamCredentialsClient(userAgent) + name := fmt.Sprintf("projects/-/serviceAccounts/%s", targetServiceAccount) + tokenRequest := &iamcredentials.GenerateIdTokenRequest{ + Audience: targetAudience, + IncludeEmail: d.Get("include_email").(bool), + Delegates: tpgresource.ConvertStringSet(d.Get("delegates").(*schema.Set)), + } + at, err := service.Projects.ServiceAccounts.GenerateIdToken(name, tokenRequest).Do() + if err != nil { + return fmt.Errorf("error calling iamcredentials.GenerateIdToken: %v", err) + } + + d.SetId(targetServiceAccount) + if err := d.Set("id_token", at.Token); err != nil { + return fmt.Errorf("Error setting id_token: %s", err) + } + + return nil + } + + ctx := context.Background() + co := []option.ClientOption{} + if creds.JSON != nil { + co = append(co, idtoken.WithCredentialsJSON(creds.JSON)) + } + + idTokenSource, err := idtoken.NewTokenSource(ctx, targetAudience, co...) + if err != nil { + return fmt.Errorf("unable to retrieve TokenSource: %v", err) + } + idToken, err := idTokenSource.Token() + if err != nil { + return fmt.Errorf("unable to retrieve Token: %v", err) + } + + d.SetId(targetAudience) + if err := d.Set("id_token", idToken.AccessToken); err != nil { + return fmt.Errorf("Error setting id_token: %s", err) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_jwt.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_jwt.go new file mode 100644 index 0000000000..03463893a7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_jwt.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + iamcredentials "google.golang.org/api/iamcredentials/v1" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleServiceAccountJwt() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleServiceAccountJwtRead, + Schema: map[string]*schema.Schema{ + "payload": { + Type: schema.TypeString, + Required: true, + Description: `A JSON-encoded JWT claims set that will be included in the signed JWT.`, + }, + "expires_in": { + Type: schema.TypeInt, + Optional: true, + Description: "Number of seconds until the JWT expires. If set and non-zero an `exp` claim will be added to the payload derived from the current timestamp plus expires_in seconds.", + }, + "target_service_account": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRegexp("(" + strings.Join(verify.PossibleServiceAccountNames, "|") + ")"), + }, + "delegates": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidateRegexp(verify.ServiceAccountLinkRegex), + }, + }, + "jwt": { + Type: schema.TypeString, + Sensitive: true, + Computed: true, + }, + }, + } +} + +var ( + DataSourceGoogleServiceAccountJwtNow = time.Now +) + +func dataSourceGoogleServiceAccountJwtRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + + if err != nil { + return err + } + + payload := d.Get("payload").(string) + + if expiresIn := d.Get("expires_in").(int); expiresIn != 0 { + var decoded map[string]interface{} + + if err := json.Unmarshal([]byte(payload), &decoded); err != nil { + return fmt.Errorf("error decoding `payload` while adding `exp` field: %w", err) + } + + decoded["exp"] = DataSourceGoogleServiceAccountJwtNow().Add(time.Duration(expiresIn) * time.Second).Unix() + + payloadBytesWithExp, err := json.Marshal(decoded) + + if err != nil { + return fmt.Errorf("error re-encoding `payload` while adding `exp` field: %w", err) + } + + payload = string(payloadBytesWithExp) + } + + name := fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) + + jwtRequest := &iamcredentials.SignJwtRequest{ + Payload: payload, + Delegates: tpgresource.ConvertStringSet(d.Get("delegates").(*schema.Set)), + } + + service := config.NewIamCredentialsClient(userAgent) + + jwtResponse, err := service.Projects.ServiceAccounts.SignJwt(name, jwtRequest).Do() + + if err != nil { + return fmt.Errorf("error calling iamcredentials.SignJwt: %w", err) + } + + d.SetId(name) + + if err := d.Set("jwt", jwtResponse.SignedJwt); err != nil { + return fmt.Errorf("error setting jwt attribute: %w", err) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_key.go new file mode 100644 index 0000000000..0f7d9d46b3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/data_source_google_service_account_key.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "regexp" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func DataSourceGoogleServiceAccountKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleServiceAccountKeyRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRegexp(verify.ServiceAccountKeyNameRegex), + }, + "public_key_type": { + Type: schema.TypeString, + Default: "TYPE_X509_PEM_FILE", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"TYPE_NONE", "TYPE_X509_PEM_FILE", "TYPE_RAW_PUBLIC_KEY"}, false), + }, + "project": { + Type: schema.TypeString, + Optional: true, + }, + "key_algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleServiceAccountKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + keyName := d.Get("name").(string) + + // Validate name since interpolated values (i.e from a key or service + // account resource) will not get validated at plan time. + r := regexp.MustCompile(verify.ServiceAccountKeyNameRegex) + if !r.MatchString(keyName) { + return fmt.Errorf("invalid key name %q does not match regexp %q", keyName, verify.ServiceAccountKeyNameRegex) + } + + publicKeyType := d.Get("public_key_type").(string) + + // Confirm the service account key exists + sak, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Get(keyName).PublicKeyType(publicKeyType).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", keyName)) + } + + d.SetId(sak.Name) + + if err := d.Set("name", sak.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("key_algorithm", sak.KeyAlgorithm); err != nil { + return fmt.Errorf("Error setting key_algorithm: %s", err) + } + if err := d.Set("public_key", sak.PublicKeyData); err != nil { + return fmt.Errorf("Error setting public_key: %s", err) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_folder.go new file mode 100644 index 0000000000..661a32469c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_folder.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" + resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" +) + +var IamFolderSchema = map[string]*schema.Schema{ + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type FolderIamUpdater struct { + folderId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewFolderIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + return &FolderIamUpdater{ + folderId: CanonicalFolderId(d.Get("folder").(string)), + d: d, + Config: config, + }, nil +} + +func FolderIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + if !strings.HasPrefix(d.Id(), "folders/") { + d.SetId(fmt.Sprintf("folders/%s", d.Id())) + } + if err := d.Set("folder", d.Id()); err != nil { + return fmt.Errorf("Error setting folder: %s", err) + } + return nil +} + +func (u *FolderIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + return GetFolderIamPolicyByFolderName(u.folderId, userAgent, u.Config) +} + +func (u *FolderIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + v2Policy, err := v1PolicyToV2(policy) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewResourceManagerV3Client(userAgent).Folders.SetIamPolicy(u.folderId, &resourceManagerV3.SetIamPolicyRequest{ + Policy: v2Policy, + UpdateMask: "bindings,etag,auditConfigs", + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *FolderIamUpdater) GetResourceId() string { + return u.folderId +} + +func (u *FolderIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-folder-%s", u.folderId) +} + +func (u *FolderIamUpdater) DescribeResource() string { + return fmt.Sprintf("folder %q", u.folderId) +} + +func CanonicalFolderId(folder string) string { + if strings.HasPrefix(folder, "folders/") { + return folder + } + + return "folders/" + folder +} + +// v1 and v2 policy are identical +func v1PolicyToV2(in *cloudresourcemanager.Policy) (*resourceManagerV3.Policy, error) { + out := &resourceManagerV3.Policy{} + err := tpgresource.Convert(in, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a v1 policy to a v2 policy: {{err}}", err) + } + return out, nil +} + +func v2PolicyToV1(in *resourceManagerV3.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(in, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a v2 policy to a v1 policy: {{err}}", err) + } + return out, nil +} + +// Retrieve the existing IAM Policy for a folder +func GetFolderIamPolicyByFolderName(folderName, userAgent string, config *transport_tpg.Config) (*cloudresourcemanager.Policy, error) { + p, err := config.NewResourceManagerV3Client(userAgent).Folders.GetIamPolicy(folderName, + &resourceManagerV3.GetIamPolicyRequest{ + Options: &resourceManagerV3.GetPolicyOptions{ + RequestedPolicyVersion: tpgiamresource.IamPolicyVersion, + }, + }).Do() + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for folder %q: {{err}}", folderName), err) + } + + v1Policy, err := v2PolicyToV1(p) + if err != nil { + return nil, err + } + + return v1Policy, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_organization.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_organization.go new file mode 100644 index 0000000000..8917da155f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_organization.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamOrganizationSchema = map[string]*schema.Schema{ + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The numeric ID of the organization in which you want to manage the audit logging config.`, + }, +} + +type OrganizationIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewOrganizationIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + return &OrganizationIamUpdater{ + resourceId: d.Get("org_id").(string), + d: d, + Config: config, + }, nil +} + +func OrgIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + if err := d.Set("org_id", d.Id()); err != nil { + return fmt.Errorf("Error setting org_id: %s", err) + } + return nil +} + +func (u *OrganizationIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewResourceManagerClient(userAgent).Organizations.GetIamPolicy( + "organizations/"+u.resourceId, + &cloudresourcemanager.GetIamPolicyRequest{ + Options: &cloudresourcemanager.GetPolicyOptions{ + RequestedPolicyVersion: tpgiamresource.IamPolicyVersion, + }, + }, + ).Do() + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return p, nil +} + +func (u *OrganizationIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewResourceManagerClient(userAgent).Organizations.SetIamPolicy("organizations/"+u.resourceId, &cloudresourcemanager.SetIamPolicyRequest{ + Policy: policy, + UpdateMask: "bindings,etag,auditConfigs", + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *OrganizationIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *OrganizationIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-organization-%s", u.resourceId) +} + +func (u *OrganizationIamUpdater) DescribeResource() string { + return fmt.Sprintf("organization %q", u.resourceId) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_project.go new file mode 100644 index 0000000000..08b2c5ba57 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_project.go @@ -0,0 +1,108 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamProjectSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: CompareProjectName, + }, +} + +type ProjectIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewProjectIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + return &ProjectIamUpdater{ + resourceId: d.Get("project").(string), + d: d, + Config: config, + }, nil +} + +func ProjectIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + if err := d.Set("project", d.Id()); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + return nil +} + +func (u *ProjectIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + projectId := tpgresource.GetResourceNameFromSelfLink(u.resourceId) + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewResourceManagerClient(userAgent).Projects.GetIamPolicy(projectId, + &cloudresourcemanager.GetIamPolicyRequest{ + Options: &cloudresourcemanager.GetPolicyOptions{ + RequestedPolicyVersion: tpgiamresource.IamPolicyVersion, + }, + }).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return p, nil +} + +func (u *ProjectIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + projectId := tpgresource.GetResourceNameFromSelfLink(u.resourceId) + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewResourceManagerClient(userAgent).Projects.SetIamPolicy(projectId, + &cloudresourcemanager.SetIamPolicyRequest{ + Policy: policy, + UpdateMask: "bindings,etag,auditConfigs", + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ProjectIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *ProjectIamUpdater) GetMutexKey() string { + return getProjectIamPolicyMutexKey(u.resourceId) +} + +func (u *ProjectIamUpdater) DescribeResource() string { + return fmt.Sprintf("project %q", u.resourceId) +} + +func CompareProjectName(_, old, new string, _ *schema.ResourceData) bool { + // We can either get "projects/project-id" or "project-id", so strip any prefixes + return tpgresource.GetResourceNameFromSelfLink(old) == tpgresource.GetResourceNameFromSelfLink(new) +} + +func getProjectIamPolicyMutexKey(pid string) string { + return fmt.Sprintf("iam-project-%s", pid) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_service_account.go new file mode 100644 index 0000000000..6e6369fa34 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/iam_service_account.go @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iam/v1" +) + +var IamServiceAccountSchema = map[string]*schema.Schema{ + "service_account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(verify.ServiceAccountLinkRegex), + }, +} + +type ServiceAccountIamUpdater struct { + serviceAccountId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewServiceAccountIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + return &ServiceAccountIamUpdater{ + serviceAccountId: d.Get("service_account_id").(string), + d: d, + Config: config, + }, nil +} + +func ServiceAccountIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + if err := d.Set("service_account_id", d.Id()); err != nil { + return fmt.Errorf("Error setting service_account_id: %s", err) + } + return nil +} + +func (u *ServiceAccountIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewIamClient(userAgent).Projects.ServiceAccounts.GetIamPolicy(u.serviceAccountId).OptionsRequestedPolicyVersion(tpgiamresource.IamPolicyVersion).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := iamToResourceManagerPolicy(p) + if err != nil { + return nil, err + } + + return cloudResourcePolicy, nil +} + +func (u *ServiceAccountIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + iamPolicy, err := resourceManagerToIamPolicy(policy) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewIamClient(userAgent).Projects.ServiceAccounts.SetIamPolicy(u.GetResourceId(), &iam.SetIamPolicyRequest{ + Policy: iamPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ServiceAccountIamUpdater) GetResourceId() string { + return u.serviceAccountId +} + +func (u *ServiceAccountIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-service-account-%s", u.serviceAccountId) +} + +func (u *ServiceAccountIamUpdater) DescribeResource() string { + return fmt.Sprintf("service account '%s'", u.serviceAccountId) +} + +func resourceManagerToIamPolicy(p *cloudresourcemanager.Policy) (*iam.Policy, error) { + out := &iam.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a v1 policy to a iam policy: {{err}}", err) + } + return out, nil +} + +func iamToResourceManagerPolicy(p *iam.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a iam policy to a v1 policy: {{err}}", err) + } + return out, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_billing_subaccount.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_billing_subaccount.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_billing_subaccount.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_billing_subaccount.go index 990f88a7fb..991d6530a8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_billing_subaccount.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_billing_subaccount.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" @@ -7,6 +9,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/services/billing" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/cloudbilling/v1" ) @@ -30,7 +36,7 @@ func ResourceBillingSubaccount() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, "deletion_policy": { Type: schema.TypeString, @@ -56,8 +62,8 @@ func ResourceBillingSubaccount() *schema.Resource { } func resourceBillingSubaccountCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -67,7 +73,7 @@ func resourceBillingSubaccountCreate(d *schema.ResourceData, meta interface{}) e billingAccount := &cloudbilling.BillingAccount{ DisplayName: displayName, - MasterBillingAccount: canonicalBillingAccountName(masterBillingAccount), + MasterBillingAccount: billing.CanonicalBillingAccountName(masterBillingAccount), } res, err := config.NewBillingClient(userAgent).BillingAccounts.Create(billingAccount).Do() @@ -81,8 +87,8 @@ func resourceBillingSubaccountCreate(d *schema.ResourceData, meta interface{}) e } func resourceBillingSubaccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -91,7 +97,7 @@ func resourceBillingSubaccountRead(d *schema.ResourceData, meta interface{}) err billingAccount, err := config.NewBillingClient(userAgent).BillingAccounts.Get(d.Id()).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Billing Subaccount Not Found : %s", id)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Billing Subaccount Not Found : %s", id)) } if err := d.Set("name", billingAccount.Name); err != nil { @@ -114,8 +120,8 @@ func resourceBillingSubaccountRead(d *schema.ResourceData, meta interface{}) err } func resourceBillingSubaccountUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -126,15 +132,15 @@ func resourceBillingSubaccountUpdate(d *schema.ResourceData, meta interface{}) e } _, err := config.NewBillingClient(userAgent).BillingAccounts.Patch(d.Id(), billingAccount).UpdateMask("display_name").Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Error updating billing account : %s", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Error updating billing account : %s", d.Id())) } } return resourceBillingSubaccountRead(d, meta) } func resourceBillingSubaccountDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -148,7 +154,7 @@ func resourceBillingSubaccountDelete(d *schema.ResourceData, meta interface{}) e } _, err := config.NewBillingClient(userAgent).BillingAccounts.Patch(d.Id(), billingAccount).UpdateMask("display_name").Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Error updating billing account : %s", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Error updating billing account : %s", d.Id())) } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_folder.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_folder.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder.go index d62727da7b..074017ba0d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_folder.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "encoding/json" @@ -7,6 +9,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" ) @@ -69,8 +73,8 @@ func ResourceGoogleFolder() *schema.Resource { } func resourceGoogleFolderCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -79,19 +83,22 @@ func resourceGoogleFolderCreate(d *schema.ResourceData, meta interface{}) error parent := d.Get("parent").(string) var op *resourceManagerV3.Operation - err = RetryTimeDuration(func() error { - var reqErr error - op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Create(&resourceManagerV3.Folder{ - DisplayName: displayName, - Parent: parent, - }).Do() - return reqErr - }, d.Timeout(schema.TimeoutCreate)) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + var reqErr error + op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Create(&resourceManagerV3.Folder{ + DisplayName: displayName, + Parent: parent, + }).Do() + return reqErr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating folder '%s' in '%s': %s", displayName, parent, err) } - opAsMap, err := ConvertToMap(op) + opAsMap, err := tpgresource.ConvertToMap(op) if err != nil { return err } @@ -121,15 +128,15 @@ func resourceGoogleFolderCreate(d *schema.ResourceData, meta interface{}) error } func resourceGoogleFolderRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } folder, err := getGoogleFolder(d.Id(), userAgent, d, config) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Folder Not Found : %s", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Folder Not Found : %s", d.Id())) } if err := d.Set("name", folder.Name); err != nil { @@ -156,8 +163,8 @@ func resourceGoogleFolderRead(d *schema.ResourceData, meta interface{}) error { } func resourceGoogleFolderUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -165,11 +172,13 @@ func resourceGoogleFolderUpdate(d *schema.ResourceData, meta interface{}) error d.Partial(true) if d.HasChange("display_name") { - err := retry(func() error { - _, reqErr := config.NewResourceManagerV3Client(userAgent).Folders.Patch(d.Id(), &resourceManagerV3.Folder{ - DisplayName: displayName, - }).Do() - return reqErr + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + _, reqErr := config.NewResourceManagerV3Client(userAgent).Folders.Patch(d.Id(), &resourceManagerV3.Folder{ + DisplayName: displayName, + }).Do() + return reqErr + }, }) if err != nil { return fmt.Errorf("Error updating display_name to '%s': %s", displayName, err) @@ -180,18 +189,20 @@ func resourceGoogleFolderUpdate(d *schema.ResourceData, meta interface{}) error newParent := d.Get("parent").(string) var op *resourceManagerV3.Operation - err := retry(func() error { - var reqErr error - op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Move(d.Id(), &resourceManagerV3.MoveFolderRequest{ - DestinationParent: newParent, - }).Do() - return reqErr + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + var reqErr error + op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Move(d.Id(), &resourceManagerV3.MoveFolderRequest{ + DestinationParent: newParent, + }).Do() + return reqErr + }, }) if err != nil { return fmt.Errorf("Error moving folder '%s' to '%s': %s", displayName, newParent, err) } - opAsMap, err := ConvertToMap(op) + opAsMap, err := tpgresource.ConvertToMap(op) if err != nil { return err } @@ -208,24 +219,27 @@ func resourceGoogleFolderUpdate(d *schema.ResourceData, meta interface{}) error } func resourceGoogleFolderDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } displayName := d.Get("display_name").(string) var op *resourceManagerV3.Operation - err = RetryTimeDuration(func() error { - var reqErr error - op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Delete(d.Id()).Do() - return reqErr - }, d.Timeout(schema.TimeoutDelete)) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + var reqErr error + op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Delete(d.Id()).Do() + return reqErr + }, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { return fmt.Errorf("Error deleting folder '%s': %s", displayName, err) } - opAsMap, err := ConvertToMap(op) + opAsMap, err := tpgresource.ConvertToMap(op) if err != nil { return err } @@ -252,13 +266,16 @@ func resourceGoogleFolderImportState(d *schema.ResourceData, m interface{}) ([]* // Util to get a Folder resource from API. Note that folder described by name is not necessarily the // ResourceData resource. -func getGoogleFolder(folderName, userAgent string, d *schema.ResourceData, config *Config) (*resourceManagerV3.Folder, error) { +func getGoogleFolder(folderName, userAgent string, d *schema.ResourceData, config *transport_tpg.Config) (*resourceManagerV3.Folder, error) { var folder *resourceManagerV3.Folder - err := RetryTimeDuration(func() error { - var reqErr error - folder, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Get(folderName).Do() - return reqErr - }, d.Timeout(schema.TimeoutRead)) + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + var reqErr error + folder, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Get(folderName).Do() + return reqErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder_organization_policy.go new file mode 100644 index 0000000000..71a6d6fd07 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_folder_organization_policy.go @@ -0,0 +1,197 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func ResourceGoogleFolderOrganizationPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleFolderOrganizationPolicyCreate, + Read: resourceGoogleFolderOrganizationPolicyRead, + Update: resourceGoogleFolderOrganizationPolicyUpdate, + Delete: resourceGoogleFolderOrganizationPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFolderOrgPolicyImporter, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Read: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + Schema: tpgresource.MergeSchemas( + schemaOrganizationPolicy, + map[string]*schema.Schema{ + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the folder to set the policy for. Its format is folders/{folder_id}.`, + }, + }, + ), + UseJSONNumber: true, + } +} + +func resourceFolderOrgPolicyImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "folders/(?P[^/]+)/constraints/(?P[^/]+)", + "folders/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)"}, + d, config); err != nil { + return nil, err + } + + if d.Get("folder") == "" || d.Get("constraint") == "" { + return nil, fmt.Errorf("unable to parse folder or constraint. Check import formats") + } + + if err := d.Set("folder", "folders/"+d.Get("folder").(string)); err != nil { + return nil, fmt.Errorf("Error setting folder: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func resourceGoogleFolderOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + d.SetId(fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) + + if isOrganizationPolicyUnset(d) { + return resourceGoogleFolderOrganizationPolicyDelete(d, meta) + } + + if err := setFolderOrganizationPolicy(d, meta); err != nil { + return err + } + + return resourceGoogleFolderOrganizationPolicyRead(d, meta) +} + +func resourceGoogleFolderOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + folder := CanonicalFolderId(d.Get("folder").(string)) + + var policy *cloudresourcemanager.OrgPolicy + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (getErr error) { + policy, getErr = config.NewResourceManagerClient(userAgent).Folders.GetOrgPolicy(folder, &cloudresourcemanager.GetOrgPolicyRequest{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + return getErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", folder)) + } + + if err := d.Set("constraint", policy.Constraint); err != nil { + return fmt.Errorf("Error setting constraint: %s", err) + } + if err := d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)); err != nil { + return fmt.Errorf("Error setting boolean_policy: %s", err) + } + if err := d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)); err != nil { + return fmt.Errorf("Error setting list_policy: %s", err) + } + if err := d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)); err != nil { + return fmt.Errorf("Error setting restore_policy: %s", err) + } + if err := d.Set("version", policy.Version); err != nil { + return fmt.Errorf("Error setting version: %s", err) + } + if err := d.Set("etag", policy.Etag); err != nil { + return fmt.Errorf("Error setting etag: %s", err) + } + if err := d.Set("update_time", policy.UpdateTime); err != nil { + return fmt.Errorf("Error setting update_time: %s", err) + } + + return nil +} + +func resourceGoogleFolderOrganizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + if isOrganizationPolicyUnset(d) { + return resourceGoogleFolderOrganizationPolicyDelete(d, meta) + } + + if err := setFolderOrganizationPolicy(d, meta); err != nil { + return err + } + + return resourceGoogleFolderOrganizationPolicyRead(d, meta) +} + +func resourceGoogleFolderOrganizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + folder := CanonicalFolderId(d.Get("folder").(string)) + + return transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (delErr error) { + _, delErr = config.NewResourceManagerClient(userAgent).Folders.ClearOrgPolicy(folder, &cloudresourcemanager.ClearOrgPolicyRequest{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + return delErr + }, + Timeout: d.Timeout(schema.TimeoutDelete), + }) +} + +func setFolderOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + folder := CanonicalFolderId(d.Get("folder").(string)) + + listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) + if err != nil { + return err + } + + restoreDefault, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) + if err != nil { + return err + } + + return transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (setErr error) { + _, setErr = config.NewResourceManagerClient(userAgent).Folders.SetOrgPolicy(folder, &cloudresourcemanager.SetOrgPolicyRequest{ + Policy: &cloudresourcemanager.OrgPolicy{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), + ListPolicy: listPolicy, + RestoreDefault: restoreDefault, + Version: int64(d.Get("version").(int)), + Etag: d.Get("etag").(string), + }, + }).Do() + return setErr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + }) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_organization_iam_custom_role.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_organization_iam_custom_role.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_organization_iam_custom_role.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_organization_iam_custom_role.go index c27ba1937a..5992f06ccf 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_organization_iam_custom_role.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_organization_iam_custom_role.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" "google.golang.org/api/iam/v1" ) @@ -21,10 +26,11 @@ func ResourceGoogleOrganizationIamCustomRole() *schema.Resource { Schema: map[string]*schema.Schema{ "role_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The role id to use for this role.`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The role id to use for this role.`, + ValidateFunc: verify.ValidateIAMCustomRoleID, }, "org_id": { Type: schema.TypeString, @@ -50,7 +56,7 @@ func ResourceGoogleOrganizationIamCustomRole() *schema.Resource { Default: "GA", Description: `The current launch stage of the role. Defaults to GA.`, ValidateFunc: validation.StringInSlice([]string{"ALPHA", "BETA", "GA", "DEPRECATED", "DISABLED", "EAP"}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("ALPHA"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("ALPHA"), }, "description": { Type: schema.TypeString, @@ -73,8 +79,8 @@ func ResourceGoogleOrganizationIamCustomRole() *schema.Resource { } func resourceGoogleOrganizationIamCustomRoleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -100,7 +106,7 @@ func resourceGoogleOrganizationIamCustomRoleCreate(d *schema.ResourceData, meta // If a role with same name exists and is enabled, just return error return fmt.Errorf("Custom project role %s already exists and must be imported", roleId) } - } else if err := handleNotFoundError(err, d, fmt.Sprintf("Custom Organization Role %q", roleId)); err == nil { + } else if err := transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Custom Organization Role %q", roleId)); err == nil { // If no role was found, actually create a new role. role, err := config.NewIamClient(userAgent).Organizations.Roles.Create(orgId, &iam.CreateRoleRequest{ RoleId: d.Get("role_id").(string), @@ -108,7 +114,7 @@ func resourceGoogleOrganizationIamCustomRoleCreate(d *schema.ResourceData, meta Title: d.Get("title").(string), Description: d.Get("description").(string), Stage: d.Get("stage").(string), - IncludedPermissions: convertStringSet(d.Get("permissions").(*schema.Set)), + IncludedPermissions: tpgresource.ConvertStringSet(d.Get("permissions").(*schema.Set)), }, }).Do() if err != nil { @@ -124,18 +130,18 @@ func resourceGoogleOrganizationIamCustomRoleCreate(d *schema.ResourceData, meta } func resourceGoogleOrganizationIamCustomRoleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } role, err := config.NewIamClient(userAgent).Organizations.Roles.Get(d.Id()).Do() if err != nil { - return handleNotFoundError(err, d, d.Id()) + return transport_tpg.HandleNotFoundError(err, d, d.Id()) } - parsedRoleName, err := ParseOrganizationCustomRoleName(role.Name) + parsedRoleName, err := tpgresource.ParseOrganizationCustomRoleName(role.Name) if err != nil { return err } @@ -169,8 +175,8 @@ func resourceGoogleOrganizationIamCustomRoleRead(d *schema.ResourceData, meta in } func resourceGoogleOrganizationIamCustomRoleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -196,7 +202,7 @@ func resourceGoogleOrganizationIamCustomRoleUpdate(d *schema.ResourceData, meta Title: d.Get("title").(string), Description: d.Get("description").(string), Stage: d.Get("stage").(string), - IncludedPermissions: convertStringSet(d.Get("permissions").(*schema.Set)), + IncludedPermissions: tpgresource.ConvertStringSet(d.Get("permissions").(*schema.Set)), }).Do() if err != nil { @@ -209,8 +215,8 @@ func resourceGoogleOrganizationIamCustomRoleUpdate(d *schema.ResourceData, meta } func resourceGoogleOrganizationIamCustomRoleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_organization_policy.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_organization_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_organization_policy.go index aa96904584..fd7fcf1d3d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_organization_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_organization_policy.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" @@ -6,6 +8,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/cloudresourcemanager/v1" ) @@ -18,7 +22,7 @@ var schemaOrganizationPolicy = map[string]*schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The name of the Constraint the Policy is configuring, for example, serviceuser.services.`, }, "boolean_policy": { @@ -160,7 +164,7 @@ func ResourceGoogleOrganizationPolicy() *schema.Resource { Delete: schema.DefaultTimeout(4 * time.Minute), }, - Schema: mergeSchemas( + Schema: tpgresource.MergeSchemas( schemaOrganizationPolicy, map[string]*schema.Schema{ "org_id": { @@ -187,22 +191,25 @@ func resourceGoogleOrganizationPolicyCreate(d *schema.ResourceData, meta interfa } func resourceGoogleOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } org := "organizations/" + d.Get("org_id").(string) var policy *cloudresourcemanager.OrgPolicy - err = RetryTimeDuration(func() (readErr error) { - policy, readErr = config.NewResourceManagerClient(userAgent).Organizations.GetOrgPolicy(org, &cloudresourcemanager.GetOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return readErr - }, d.Timeout(schema.TimeoutRead)) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (readErr error) { + policy, readErr = config.NewResourceManagerClient(userAgent).Organizations.GetOrgPolicy(org, &cloudresourcemanager.GetOrgPolicyRequest{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + return readErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", org)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", org)) } if err := d.Set("constraint", policy.Constraint); err != nil { @@ -243,19 +250,22 @@ func resourceGoogleOrganizationPolicyUpdate(d *schema.ResourceData, meta interfa } func resourceGoogleOrganizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } org := "organizations/" + d.Get("org_id").(string) - err = RetryTimeDuration(func() error { - _, dErr := config.NewResourceManagerClient(userAgent).Organizations.ClearOrgPolicy(org, &cloudresourcemanager.ClearOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return dErr - }, d.Timeout(schema.TimeoutDelete)) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + _, dErr := config.NewResourceManagerClient(userAgent).Organizations.ClearOrgPolicy(org, &cloudresourcemanager.ClearOrgPolicyRequest{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + return dErr + }, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { return err } @@ -293,8 +303,8 @@ func isOrganizationPolicyUnset(d *schema.ResourceData) bool { } func setOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -311,19 +321,22 @@ func setOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { return err } - err = RetryTimeDuration(func() (setErr error) { - _, setErr = config.NewResourceManagerClient(userAgent).Organizations.SetOrgPolicy(org, &cloudresourcemanager.SetOrgPolicyRequest{ - Policy: &cloudresourcemanager.OrgPolicy{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), - ListPolicy: listPolicy, - RestoreDefault: restoreDefault, - Version: int64(d.Get("version").(int)), - Etag: d.Get("etag").(string), - }, - }).Do() - return setErr - }, d.Timeout(schema.TimeoutCreate)) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (setErr error) { + _, setErr = config.NewResourceManagerClient(userAgent).Organizations.SetOrgPolicy(org, &cloudresourcemanager.SetOrgPolicyRequest{ + Policy: &cloudresourcemanager.OrgPolicy{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), + ListPolicy: listPolicy, + RestoreDefault: restoreDefault, + Version: int64(d.Get("version").(int)), + Etag: d.Get("etag").(string), + }, + }).Do() + return setErr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + }) return err } @@ -403,11 +416,11 @@ func flattenListOrganizationPolicy(policy *cloudresourcemanager.ListPolicy) []ma }} case len(policy.AllowedValues) > 0: listPolicy["allow"] = []interface{}{map[string]interface{}{ - "values": schema.NewSet(schema.HashString, convertStringArrToInterface(policy.AllowedValues)), + "values": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(policy.AllowedValues)), }} case len(policy.DeniedValues) > 0: listPolicy["deny"] = []interface{}{map[string]interface{}{ - "values": schema.NewSet(schema.HashString, convertStringArrToInterface(policy.DeniedValues)), + "values": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(policy.DeniedValues)), }} } @@ -437,7 +450,7 @@ func expandListOrganizationPolicy(configured []interface{}) (*cloudresourcemanag if all { allValues = "ALLOW" } else { - allowedValues = convertStringArr(values.List()) + allowedValues = tpgresource.ConvertStringArr(values.List()) } } @@ -449,7 +462,7 @@ func expandListOrganizationPolicy(configured []interface{}) (*cloudresourcemanag if all { allValues = "DENY" } else { - deniedValues = convertStringArr(values.List()) + deniedValues = tpgresource.ConvertStringArr(values.List()) } } @@ -464,7 +477,7 @@ func expandListOrganizationPolicy(configured []interface{}) (*cloudresourcemanag }, nil } -func canonicalOrgPolicyConstraint(constraint string) string { +func CanonicalOrgPolicyConstraint(constraint string) string { if strings.HasPrefix(constraint, "constraints/") { return constraint } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project.go new file mode 100644 index 0000000000..38f93c59fd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project.go @@ -0,0 +1,817 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "context" + "fmt" + "log" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + tpgserviceusage "github.com/hashicorp/terraform-provider-google/google/services/serviceusage" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/cloudbilling/v1" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/googleapi" + "google.golang.org/api/serviceusage/v1" +) + +type ServicesCall interface { + Header() http.Header + Do(opts ...googleapi.CallOption) (*serviceusage.Operation, error) +} + +// ResourceGoogleProject returns a *schema.Resource that allows a customer +// to declare a Google Cloud Project resource. +func ResourceGoogleProject() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + Create: resourceGoogleProjectCreate, + Read: resourceGoogleProjectRead, + Update: resourceGoogleProjectUpdate, + Delete: resourceGoogleProjectDelete, + + Importer: &schema.ResourceImporter{ + State: resourceProjectImportState, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Read: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + MigrateState: resourceGoogleProjectMigrateState, + + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateProjectID(), + Description: `The project ID. Changing this forces a new project to be created.`, + }, + "skip_delete": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `If true, the Terraform resource can be deleted without deleting the Project via the Google API.`, + }, + "auto_create_network": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Create the 'default' network automatically. Default true. If set to false, the default network will be deleted. Note that, for quota purposes, you will still need to have 1 network slot available to create the project successfully, even if you set auto_create_network to false, since the network will exist momentarily.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateProjectName(), + Description: `The display name of the project.`, + }, + "org_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"folder_id"}, + Description: `The numeric ID of the organization this project belongs to. Changing this forces a new project to be created. Only one of org_id or folder_id may be specified. If the org_id is specified then the project is created at the top level. Changing this forces the project to be migrated to the newly specified organization.`, + }, + "folder_id": { + Type: schema.TypeString, + Optional: true, + StateFunc: ParseFolderId, + ConflictsWith: []string{"org_id"}, + Description: `The numeric ID of the folder this project should be created under. Only one of org_id or folder_id may be specified. If the folder_id is specified, then the project is created under the specified folder. Changing this forces the project to be migrated to the newly specified folder.`, + }, + "number": { + Type: schema.TypeString, + Computed: true, + Description: `The numeric identifier of the project.`, + }, + "billing_account": { + Type: schema.TypeString, + Optional: true, + Description: `The alphanumeric ID of the billing account this project belongs to. The user or service account performing this operation with Terraform must have Billing Account Administrator privileges (roles/billing.admin) in the organization. See Google Cloud Billing API Access Control for more details.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value label pairs to assign to the project.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + if err = resourceGoogleProjectCheckPreRequisites(config, d, userAgent); err != nil { + return fmt.Errorf("failed pre-requisites: %v", err) + } + + var pid string + pid = d.Get("project_id").(string) + + log.Printf("[DEBUG]: Creating new project %q", pid) + project := &cloudresourcemanager.Project{ + ProjectId: pid, + Name: d.Get("name").(string), + } + + if err = getParentResourceId(d, project); err != nil { + return err + } + + if _, ok := d.GetOk("labels"); ok { + project.Labels = tpgresource.ExpandLabels(d) + } + + var op *cloudresourcemanager.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (reqErr error) { + op, reqErr = config.NewResourceManagerClient(userAgent).Projects.Create(project).Do() + return reqErr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("error creating project %s (%s): %s. "+ + "If you received a 403 error, make sure you have the"+ + " `roles/resourcemanager.projectCreator` permission", + project.ProjectId, project.Name, err) + } + + d.SetId(fmt.Sprintf("projects/%s", pid)) + + // Wait for the operation to complete + opAsMap, err := tpgresource.ConvertToMap(op) + if err != nil { + return err + } + + waitErr := ResourceManagerOperationWaitTime(config, opAsMap, "creating folder", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + // The resource wasn't actually created + d.SetId("") + return waitErr + } + + // Set the billing account + if _, ok := d.GetOk("billing_account"); ok { + err = updateProjectBillingAccount(d, config, userAgent) + if err != nil { + return err + } + } + + // Sleep for 10s, letting the billing account settle before other resources + // try to use this project. + time.Sleep(10 * time.Second) + + err = resourceGoogleProjectRead(d, meta) + if err != nil { + return err + } + + // There's no such thing as "don't auto-create network", only "delete the network + // post-creation" - but that's what it's called in the UI and let's not confuse + // people if we don't have to. The GCP Console is doing the same thing - creating + // a network and deleting it in the background. + if !d.Get("auto_create_network").(bool) { + // The compute API has to be enabled before we can delete a network. + + billingProject := project.ProjectId + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if err = EnableServiceUsageProjectServices([]string{"compute.googleapis.com"}, project.ProjectId, billingProject, userAgent, config, d.Timeout(schema.TimeoutCreate)); err != nil { + return errwrap.Wrapf("Error enabling the Compute Engine API required to delete the default network: {{err}} ", err) + } + + if err = forceDeleteComputeNetwork(d, config, project.ProjectId, "default"); err != nil { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + log.Printf("[DEBUG] Default network not found for project %q, no need to delete it", project.ProjectId) + } else { + return errwrap.Wrapf(fmt.Sprintf("Error deleting default network in project %s: {{err}}", project.ProjectId), err) + } + } + } + return nil +} + +func resourceGoogleProjectCheckPreRequisites(config *transport_tpg.Config, d *schema.ResourceData, userAgent string) error { + ib, ok := d.GetOk("billing_account") + if !ok { + return nil + } + ba := "billingAccounts/" + ib.(string) + const perm = "billing.resourceAssociations.create" + req := &cloudbilling.TestIamPermissionsRequest{ + Permissions: []string{perm}, + } + resp, err := config.NewBillingClient(userAgent).BillingAccounts.TestIamPermissions(ba, req).Do() + if err != nil { + return fmt.Errorf("failed to check permissions on billing account %q: %v", ba, err) + } + if !tpgresource.StringInSlice(resp.Permissions, perm) { + return fmt.Errorf("missing permission on %q: %v", ba, perm) + } + if !d.Get("auto_create_network").(bool) { + call := config.NewServiceUsageClient(userAgent).Services.Get("projects/00000000000/services/serviceusage.googleapis.com") + if config.UserProjectOverride { + if billingProject, err := tpgresource.GetBillingProject(d, config); err == nil { + call.Header().Add("X-Goog-User-Project", billingProject) + } + } + _, err := call.Do() + switch { + // We are querying a dummy project since the call is already coming from the quota project. + // If the API is enabled we get a not found message or accessNotConfigured if API is not enabled. + case err.Error() == "googleapi: Error 403: Project '00000000000' not found or permission denied., forbidden": + return nil + case strings.Contains(err.Error(), "accessNotConfigured"): + return fmt.Errorf("API serviceusage not enabled.\nFound error: %v", err) + } + } + return nil +} + +func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] + + p, err := readGoogleProject(d, config, userAgent) + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 403 && strings.Contains(gerr.Message, "caller does not have permission") { + return fmt.Errorf("the user does not have permission to access Project %q or it may not exist", pid) + } + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project %q", pid)) + } + + // If the project has been deleted from outside Terraform, remove it from state file. + if p.LifecycleState != "ACTIVE" { + log.Printf("[WARN] Removing project '%s' because its state is '%s' (requires 'ACTIVE').", pid, p.LifecycleState) + d.SetId("") + return nil + } + + if err := d.Set("project_id", pid); err != nil { + return fmt.Errorf("Error setting project_id: %s", err) + } + if err := d.Set("number", strconv.FormatInt(p.ProjectNumber, 10)); err != nil { + return fmt.Errorf("Error setting number: %s", err) + } + if err := d.Set("name", p.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("labels", p.Labels); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + + if p.Parent != nil { + switch p.Parent.Type { + case "organization": + if err := d.Set("org_id", p.Parent.Id); err != nil { + return fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("folder_id", ""); err != nil { + return fmt.Errorf("Error setting folder_id: %s", err) + } + case "folder": + if err := d.Set("folder_id", p.Parent.Id); err != nil { + return fmt.Errorf("Error setting folder_id: %s", err) + } + if err := d.Set("org_id", ""); err != nil { + return fmt.Errorf("Error setting org_id: %s", err) + } + } + } + + var ba *cloudbilling.ProjectBillingInfo + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (reqErr error) { + ba, reqErr = config.NewBillingClient(userAgent).Projects.GetBillingInfo(PrefixedProject(pid)).Do() + return reqErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) + // Read the billing account + if err != nil && !transport_tpg.IsApiNotEnabledError(err) { + return fmt.Errorf("Error reading billing account for project %q: %v", PrefixedProject(pid), err) + } else if transport_tpg.IsApiNotEnabledError(err) { + log.Printf("[WARN] Billing info API not enabled, please enable it to read billing info about project %q: %s", pid, err.Error()) + } else if ba.BillingAccountName != "" { + // BillingAccountName is contains the resource name of the billing account + // associated with the project, if any. For example, + // `billingAccounts/012345-567890-ABCDEF`. We care about the ID and not + // the `billingAccounts/` prefix, so we need to remove that. If the + // prefix ever changes, we'll validate to make sure it's something we + // recognize. + _ba := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") + if ba.BillingAccountName == _ba { + return fmt.Errorf("Error parsing billing account for project %q. Expected value to begin with 'billingAccounts/' but got %s", PrefixedProject(pid), ba.BillingAccountName) + } + if err := d.Set("billing_account", _ba); err != nil { + return fmt.Errorf("Error setting billing_account: %s", err) + } + } + + return nil +} + +func PrefixedProject(pid string) string { + return "projects/" + pid +} + +func getParentResourceId(d *schema.ResourceData, p *cloudresourcemanager.Project) error { + orgId := d.Get("org_id").(string) + folderId := d.Get("folder_id").(string) + + if orgId != "" && folderId != "" { + return fmt.Errorf("'org_id' and 'folder_id' cannot be both set.") + } + + if orgId != "" { + p.Parent = &cloudresourcemanager.ResourceId{ + Id: orgId, + Type: "organization", + } + } + + if folderId != "" { + p.Parent = &cloudresourcemanager.ResourceId{ + Id: ParseFolderId(folderId), + Type: "folder", + } + } + + return nil +} + +func ParseFolderId(v interface{}) string { + folderId := v.(string) + if strings.HasPrefix(folderId, "folders/") { + return folderId[8:] + } + return folderId +} + +func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] + project_name := d.Get("name").(string) + + // Read the project + // we need the project even though refresh has already been called + // because the API doesn't support patch, so we need the actual object + p, err := readGoogleProject(d, config, userAgent) + if err != nil { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + return fmt.Errorf("Project %q does not exist.", pid) + } + return fmt.Errorf("Error checking project %q: %s", pid, err) + } + + d.Partial(true) + + // Project display name has changed + if ok := d.HasChange("name"); ok { + p.Name = project_name + // Do update on project + if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { + return err + } + } + + // Project parent has changed + if d.HasChange("org_id") || d.HasChange("folder_id") { + if err := getParentResourceId(d, p); err != nil { + return err + } + + // Do update on project + if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { + return err + } + } + + // Billing account has changed + if ok := d.HasChange("billing_account"); ok { + err = updateProjectBillingAccount(d, config, userAgent) + if err != nil { + return err + } + } + + // Project Labels have changed + if ok := d.HasChange("labels"); ok { + p.Labels = tpgresource.ExpandLabels(d) + + // Do Update on project + if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { + return err + } + } + + d.Partial(false) + return resourceGoogleProjectRead(d, meta) +} + +func updateProject(config *transport_tpg.Config, d *schema.ResourceData, projectName, userAgent string, desiredProject *cloudresourcemanager.Project) (*cloudresourcemanager.Project, error) { + var newProj *cloudresourcemanager.Project + if err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (updateErr error) { + newProj, updateErr = config.NewResourceManagerClient(userAgent).Projects.Update(desiredProject.ProjectId, desiredProject).Do() + return updateErr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + }); err != nil { + return nil, fmt.Errorf("Error updating project %q: %s", projectName, err) + } + return newProj, nil +} + +func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + // Only delete projects if skip_delete isn't set + if !d.Get("skip_delete").(bool) { + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] + if err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + _, delErr := config.NewResourceManagerClient(userAgent).Projects.Delete(pid).Do() + return delErr + }, + Timeout: d.Timeout(schema.TimeoutDelete), + }); err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project %s", pid)) + } + } + d.SetId("") + return nil +} + +func resourceProjectImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] + // Prevent importing via project number, this will cause issues later + matched, err := regexp.MatchString("^\\d+$", pid) + if err != nil { + return nil, fmt.Errorf("Error matching project %q: %s", pid, err) + } + + if matched { + return nil, fmt.Errorf("Error importing project %q, please use project_id", pid) + } + + // Ensure the id format includes projects/ + d.SetId(fmt.Sprintf("projects/%s", pid)) + + // Explicitly set to default as a workaround for `ImportStateVerify` tests, and so that users + // don't see a diff immediately after import. + if err := d.Set("auto_create_network", true); err != nil { + return nil, fmt.Errorf("Error setting auto_create_network: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +// Delete a compute network along with the firewall rules inside it. +func forceDeleteComputeNetwork(d *schema.ResourceData, config *transport_tpg.Config, projectId, networkName string) error { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Read the network from the API so we can get the correct self link format. We can't construct it from the + // base path because it might not line up exactly (compute.googleapis.com vs www.googleapis.com) + net, err := config.NewComputeClient(userAgent).Networks.Get(projectId, networkName).Do() + if err != nil { + return err + } + + token := "" + for paginate := true; paginate; { + filter := fmt.Sprintf("network eq %s", net.SelfLink) + resp, err := config.NewComputeClient(userAgent).Firewalls.List(projectId).Filter(filter).Do() + if err != nil { + return errwrap.Wrapf("Error listing firewall rules in proj: {{err}}", err) + } + + log.Printf("[DEBUG] Found %d firewall rules in %q network", len(resp.Items), networkName) + + for _, firewall := range resp.Items { + op, err := config.NewComputeClient(userAgent).Firewalls.Delete(projectId, firewall.Name).Do() + if err != nil { + return errwrap.Wrapf("Error deleting firewall: {{err}}", err) + } + err = tpgcompute.ComputeOperationWaitTime(config, op, projectId, "Deleting Firewall", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + + token = resp.NextPageToken + paginate = token != "" + } + + return deleteComputeNetwork(projectId, networkName, userAgent, config) +} + +func updateProjectBillingAccount(d *schema.ResourceData, config *transport_tpg.Config, userAgent string) error { + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] + name := d.Get("billing_account").(string) + ba := &cloudbilling.ProjectBillingInfo{} + // If we're unlinking an existing billing account, an empty request does that, not an empty-string billing account. + if name != "" { + ba.BillingAccountName = "billingAccounts/" + name + } + updateBillingInfoFunc := func() error { + _, err := config.NewBillingClient(userAgent).Projects.UpdateBillingInfo(PrefixedProject(pid), ba).Do() + return err + } + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: updateBillingInfoFunc, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + if err := d.Set("billing_account", ""); err != nil { + return fmt.Errorf("Error setting billing_account: %s", err) + } + if _err, ok := err.(*googleapi.Error); ok { + return fmt.Errorf("Error setting billing account %q for project %q: %v", name, PrefixedProject(pid), _err) + } + return fmt.Errorf("Error setting billing account %q for project %q: %v", name, PrefixedProject(pid), err) + } + for retries := 0; retries < 3; retries++ { + var ba *cloudbilling.ProjectBillingInfo + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (reqErr error) { + ba, reqErr = config.NewBillingClient(userAgent).Projects.GetBillingInfo(PrefixedProject(pid)).Do() + return reqErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) + if err != nil { + return fmt.Errorf("Error getting billing info for project %q: %v", PrefixedProject(pid), err) + } + baName := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") + if baName == name { + return nil + } + time.Sleep(3 * time.Second) + } + return fmt.Errorf("Timed out waiting for billing account to return correct value. Waiting for %s, got %s.", + name, strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/")) +} + +func deleteComputeNetwork(project, network, userAgent string, config *transport_tpg.Config) error { + op, err := config.NewComputeClient(userAgent).Networks.Delete( + project, network).Do() + if err != nil { + return errwrap.Wrapf("Error deleting network: {{err}}", err) + } + + err = tpgcompute.ComputeOperationWaitTime(config, op, project, "Deleting Network", userAgent, 10*time.Minute) + if err != nil { + return err + } + return nil +} + +func readGoogleProject(d *schema.ResourceData, config *transport_tpg.Config, userAgent string) (*cloudresourcemanager.Project, error) { + var p *cloudresourcemanager.Project + // Read the project + parts := strings.Split(d.Id(), "/") + pid := parts[len(parts)-1] + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (reqErr error) { + p, reqErr = config.NewResourceManagerClient(userAgent).Projects.Get(pid).Do() + return reqErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) + return p, err +} + +// Enables services. WARNING: Use BatchRequestEnableServices for better batching if possible. +func EnableServiceUsageProjectServices(services []string, project, billingProject, userAgent string, config *transport_tpg.Config, timeout time.Duration) error { + // ServiceUsage does not allow more than 20 services to be enabled per + // batchEnable API call. See + // https://cloud.google.com/service-usage/docs/reference/rest/v1/services/batchEnable + for i := 0; i < len(services); i += maxServiceUsageBatchSize { + j := i + maxServiceUsageBatchSize + if j > len(services) { + j = len(services) + } + nextBatch := services[i:j] + if len(nextBatch) == 0 { + // All batches finished, return. + return nil + } + + if err := doEnableServicesRequest(nextBatch, project, billingProject, userAgent, config, timeout); err != nil { + return err + } + log.Printf("[DEBUG] Finished enabling next batch of %d project services: %+v", len(nextBatch), nextBatch) + } + + log.Printf("[DEBUG] Verifying that all services are enabled") + return waitForServiceUsageEnabledServices(services, project, billingProject, userAgent, config, timeout) +} + +func doEnableServicesRequest(services []string, project, billingProject, userAgent string, config *transport_tpg.Config, timeout time.Duration) error { + var op *serviceusage.Operation + + // errors can come up at multiple points, so there are a few levels of + // retrying here. + // logicalErr / waitErr: overall error on the logical operation (enabling services) + // but possibly also errors when retrieving the LRO (these are rare) + // err / reqErr: precondition errors when sending the request received instead of an LRO + logicalErr := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + var reqErr error + var call ServicesCall + if len(services) == 1 { + // BatchEnable returns an error for a single item, so enable with single endpoint + name := fmt.Sprintf("projects/%s/services/%s", project, services[0]) + req := &serviceusage.EnableServiceRequest{} + call = config.NewServiceUsageClient(userAgent).Services.Enable(name, req) + } else { + // Batch enable for multiple services. + name := fmt.Sprintf("projects/%s", project) + req := &serviceusage.BatchEnableServicesRequest{ServiceIds: services} + call = config.NewServiceUsageClient(userAgent).Services.BatchEnable(name, req) + } + + if config.UserProjectOverride && billingProject != "" { + call.Header().Add("X-Goog-User-Project", billingProject) + } + + op, reqErr = call.Do() + return handleServiceUsageRetryablePreconditionError(reqErr) + }, + Timeout: timeout, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.ServiceUsageServiceBeingActivated}, + }) + if err != nil { + return errwrap.Wrapf("failed on request preconditions: {{err}}", err) + } + + waitErr := tpgserviceusage.ServiceUsageOperationWait(config, op, billingProject, fmt.Sprintf("Enable Project %q Services: %+v", project, services), userAgent, timeout) + if waitErr != nil { + return waitErr + } + + return nil + }, + Timeout: timeout, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.ServiceUsageInternalError160009}, + }) + + if logicalErr != nil { + return errwrap.Wrapf("failed to enable services: {{err}}", logicalErr) + } + + return nil +} + +// Handle errors that are retryable at call time for serviceusage +// Specifically, errors in https://cloud.google.com/service-usage/docs/reference/rest/v1/services/batchEnable#response-body +// Errors in operations are handled separately. +// NOTE(rileykarson): This should probably be turned into a retry predicate +func handleServiceUsageRetryablePreconditionError(err error) error { + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok { + if (gerr.Code == 400 || gerr.Code == 412) && gerr.Message == "Precondition check failed." { + return &googleapi.Error{ + Code: 503, + Message: "api returned \"precondition failed\" while enabling service", + } + } + } + return err +} + +// Retrieve a project's services from the API +// if a service has been renamed, this function will list both the old and new +// forms of the service. LIST responses are expected to return only the old or +// new form, but we'll always return both. +func ListCurrentlyEnabledServices(project, billingProject, userAgent string, config *transport_tpg.Config, timeout time.Duration) (map[string]struct{}, error) { + log.Printf("[DEBUG] Listing enabled services for project %s", project) + apiServices := make(map[string]struct{}) + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + ctx := context.Background() + call := config.NewServiceUsageClient(userAgent).Services.List(fmt.Sprintf("projects/%s", project)) + if config.UserProjectOverride && billingProject != "" { + call.Header().Add("X-Goog-User-Project", billingProject) + } + return call.Fields("services/name,nextPageToken").Filter("state:ENABLED"). + Pages(ctx, func(r *serviceusage.ListServicesResponse) error { + for _, v := range r.Services { + // services are returned as "projects/{{project}}/services/{{name}}" + name := tpgresource.GetResourceNameFromSelfLink(v.Name) + + // if name not in ignoredProjectServicesSet + if _, ok := ignoredProjectServicesSet[name]; !ok { + apiServices[name] = struct{}{} + + // if a service has been renamed, set both. We'll deal + // with setting the right values later. + if v, ok := renamedServicesByOldAndNewServiceNames[name]; ok { + log.Printf("[DEBUG] Adding service alias for %s to enabled services: %s", name, v) + apiServices[v] = struct{}{} + } + } + } + return nil + }) + }, + Timeout: timeout, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Failed to list enabled services for project %s: {{err}}", project), err) + } + return apiServices, nil +} + +// waitForServiceUsageEnabledServices doesn't resend enable requests - it just +// waits for service enablement status to propagate. Essentially, it waits until +// all services show up as enabled when listing services on the project. +func waitForServiceUsageEnabledServices(services []string, project, billingProject, userAgent string, config *transport_tpg.Config, timeout time.Duration) error { + missing := make([]string, 0, len(services)) + delay := time.Duration(0) + interval := time.Second + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + // Get the list of services that are enabled on the project + enabledServices, err := ListCurrentlyEnabledServices(project, billingProject, userAgent, config, timeout) + if err != nil { + return err + } + + missing := make([]string, 0, len(services)) + for _, s := range services { + if _, ok := enabledServices[s]; !ok { + missing = append(missing, s) + } + } + if len(missing) > 0 { + log.Printf("[DEBUG] waiting %v before reading project %s services...", delay, project) + time.Sleep(delay) + delay += interval + interval += delay + + // Spoof a googleapi Error so retryTime will try again + return &googleapi.Error{ + Code: 503, + Message: fmt.Sprintf("The service(s) %q are still being enabled for project %s. This isn't a real API error, this is just eventual consistency.", missing, project), + } + } + return nil + }, + Timeout: timeout, + }) + if err != nil { + return errwrap.Wrap(err, fmt.Errorf("failed to enable some service(s) %q for project %s", missing, project)) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_default_service_accounts.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_default_service_accounts.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_default_service_accounts.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_default_service_accounts.go index cd3dab25fa..e0459feb21 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_default_service_accounts.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_default_service_accounts.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" @@ -8,6 +10,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/iam/v1" ) @@ -34,7 +39,7 @@ func ResourceGoogleProjectDefaultServiceAccounts() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateProjectID(), + ValidateFunc: verify.ValidateProjectID(), Description: `The project ID where service accounts are created.`, }, "action": { @@ -64,8 +69,8 @@ func ResourceGoogleProjectDefaultServiceAccounts() *schema.Resource { } func resourceGoogleProjectDefaultServiceAccountsDoAction(d *schema.ResourceData, meta interface{}, action, uniqueID, email, project string) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -136,8 +141,8 @@ func resourceGoogleProjectDefaultServiceAccountsDoAction(d *schema.ResourceData, } func resourceGoogleProjectDefaultServiceAccountsCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -169,7 +174,7 @@ func resourceGoogleProjectDefaultServiceAccountsCreate(d *schema.ResourceData, m return nil } -func listServiceAccounts(config *Config, d *schema.ResourceData, userAgent string) ([]*iam.ServiceAccount, error) { +func listServiceAccounts(config *transport_tpg.Config, d *schema.ResourceData, userAgent string) ([]*iam.ServiceAccount, error) { pid := d.Get("project").(string) response, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.List(PrefixedProject(pid)).Do() if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_iam_custom_role.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_iam_custom_role.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_iam_custom_role.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_iam_custom_role.go index 969f2cbf2c..f91705479d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_project_iam_custom_role.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_iam_custom_role.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" @@ -6,6 +8,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" "google.golang.org/api/iam/v1" ) @@ -26,7 +31,7 @@ func ResourceGoogleProjectIamCustomRole() *schema.Resource { Required: true, ForceNew: true, Description: `The camel case role id to use for this role. Cannot contain - characters.`, - ValidateFunc: validateIAMCustomRoleID, + ValidateFunc: verify.ValidateIAMCustomRoleID, }, "title": { Type: schema.TypeString, @@ -53,7 +58,7 @@ func ResourceGoogleProjectIamCustomRole() *schema.Resource { Default: "GA", Description: `The current launch stage of the role. Defaults to GA.`, ValidateFunc: validation.StringInSlice([]string{"ALPHA", "BETA", "GA", "DEPRECATED", "DISABLED", "EAP"}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("ALPHA"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("ALPHA"), }, "description": { Type: schema.TypeString, @@ -76,13 +81,13 @@ func ResourceGoogleProjectIamCustomRole() *schema.Resource { } func resourceGoogleProjectIamCustomRoleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -102,7 +107,7 @@ func resourceGoogleProjectIamCustomRoleCreate(d *schema.ResourceData, meta inter // If a role with same name exists and is enabled, just return error return fmt.Errorf("Custom project role %s already exists and must be imported", roleId) } - } else if err := handleNotFoundError(err, d, fmt.Sprintf("Custom Project Role %q", roleId)); err == nil { + } else if err := transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Custom Project Role %q", roleId)); err == nil { // If no role is found, actually create a new role. role, err := config.NewIamClient(userAgent).Projects.Roles.Create("projects/"+project, &iam.CreateRoleRequest{ RoleId: d.Get("role_id").(string), @@ -110,7 +115,7 @@ func resourceGoogleProjectIamCustomRoleCreate(d *schema.ResourceData, meta inter Title: d.Get("title").(string), Description: d.Get("description").(string), Stage: d.Get("stage").(string), - IncludedPermissions: convertStringSet(d.Get("permissions").(*schema.Set)), + IncludedPermissions: tpgresource.ConvertStringSet(d.Get("permissions").(*schema.Set)), }, }).Do() if err != nil { @@ -132,8 +137,8 @@ func extractProjectFromProjectIamCustomRoleID(id string) string { } func resourceGoogleProjectIamCustomRoleRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -142,10 +147,10 @@ func resourceGoogleProjectIamCustomRoleRead(d *schema.ResourceData, meta interfa role, err := config.NewIamClient(userAgent).Projects.Roles.Get(d.Id()).Do() if err != nil { - return handleNotFoundError(err, d, d.Id()) + return transport_tpg.HandleNotFoundError(err, d, d.Id()) } - if err := d.Set("role_id", GetResourceNameFromSelfLink(role.Name)); err != nil { + if err := d.Set("role_id", tpgresource.GetResourceNameFromSelfLink(role.Name)); err != nil { return fmt.Errorf("Error setting role_id: %s", err) } if err := d.Set("title", role.Title); err != nil { @@ -174,8 +179,8 @@ func resourceGoogleProjectIamCustomRoleRead(d *schema.ResourceData, meta interfa } func resourceGoogleProjectIamCustomRoleUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -200,7 +205,7 @@ func resourceGoogleProjectIamCustomRoleUpdate(d *schema.ResourceData, meta inter Title: d.Get("title").(string), Description: d.Get("description").(string), Stage: d.Get("stage").(string), - IncludedPermissions: convertStringSet(d.Get("permissions").(*schema.Set)), + IncludedPermissions: tpgresource.ConvertStringSet(d.Get("permissions").(*schema.Set)), }).Do() if err != nil { @@ -213,8 +218,8 @@ func resourceGoogleProjectIamCustomRoleUpdate(d *schema.ResourceData, meta inter } func resourceGoogleProjectIamCustomRoleDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -228,8 +233,8 @@ func resourceGoogleProjectIamCustomRoleDelete(d *schema.ResourceData, meta inter } func resourceGoogleProjectIamCustomRoleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/roles/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)", @@ -238,7 +243,7 @@ func resourceGoogleProjectIamCustomRoleImport(d *schema.ResourceData, meta inter } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/roles/{{role_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/roles/{{role_id}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_migrate.go new file mode 100644 index 0000000000..14406fe4e8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_migrate.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func resourceGoogleProjectMigrateState(v int, s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if s.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return s, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Google Project State v0; migrating to v1") + s, err := migrateGoogleProjectStateV0toV1(s, meta.(*transport_tpg.Config)) + if err != nil { + return s, err + } + return s, nil + default: + return s, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +// This migration adjusts google_project resources to include several additional attributes +// required to support project creation/deletion that was added in V1. +func migrateGoogleProjectStateV0toV1(s *terraform.InstanceState, config *transport_tpg.Config) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", s.Attributes) + + s.Attributes["skip_delete"] = "true" + s.Attributes["project_id"] = s.ID + + if s.Attributes["policy_data"] != "" { + p, err := GetProjectIamPolicy(s.ID, config) + if err != nil { + return s, fmt.Errorf("Could not retrieve project's IAM policy while attempting to migrate state from V0 to V1: %v", err) + } + s.Attributes["policy_etag"] = p.Etag + } + + log.Printf("[DEBUG] Attributes after migration: %#v", s.Attributes) + return s, nil +} + +// Retrieve the existing IAM Policy for a Project +func GetProjectIamPolicy(project string, config *transport_tpg.Config) (*cloudresourcemanager.Policy, error) { + p, err := config.NewResourceManagerClient(config.UserAgent).Projects.GetIamPolicy(project, + &cloudresourcemanager.GetIamPolicyRequest{ + Options: &cloudresourcemanager.GetPolicyOptions{ + RequestedPolicyVersion: tpgiamresource.IamPolicyVersion, + }, + }).Do() + + if err != nil { + return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) + } + return p, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_organization_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_organization_policy.go new file mode 100644 index 0000000000..b326587c12 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_organization_policy.go @@ -0,0 +1,194 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func ResourceGoogleProjectOrganizationPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectOrganizationPolicyCreate, + Read: resourceGoogleProjectOrganizationPolicyRead, + Update: resourceGoogleProjectOrganizationPolicyUpdate, + Delete: resourceGoogleProjectOrganizationPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceProjectOrgPolicyImporter, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Read: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + Schema: tpgresource.MergeSchemas( + schemaOrganizationPolicy, + map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The project ID.`, + }, + }, + ), + UseJSONNumber: true, + } +} + +func resourceProjectOrgPolicyImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+):constraints/(?P[^/]+)", + "(?P[^/]+):constraints/(?P[^/]+)", + "(?P[^/]+):(?P[^/]+)"}, + d, config); err != nil { + return nil, err + } + + if d.Get("project") == "" || d.Get("constraint") == "" { + return nil, fmt.Errorf("unable to parse project or constraint. Check import formats") + } + + return []*schema.ResourceData{d}, nil +} + +func resourceGoogleProjectOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + if isOrganizationPolicyUnset(d) { + d.SetId(fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) + return resourceGoogleProjectOrganizationPolicyDelete(d, meta) + } + + if err := setProjectOrganizationPolicy(d, meta); err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) + + return resourceGoogleProjectOrganizationPolicyRead(d, meta) +} + +func resourceGoogleProjectOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + project := PrefixedProject(d.Get("project").(string)) + + var policy *cloudresourcemanager.OrgPolicy + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (readErr error) { + policy, readErr = config.NewResourceManagerClient(userAgent).Projects.GetOrgPolicy(project, &cloudresourcemanager.GetOrgPolicyRequest{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + return readErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", project)) + } + + if err := d.Set("constraint", policy.Constraint); err != nil { + return fmt.Errorf("Error setting constraint: %s", err) + } + if err := d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)); err != nil { + return fmt.Errorf("Error setting boolean_policy: %s", err) + } + if err := d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)); err != nil { + return fmt.Errorf("Error setting list_policy: %s", err) + } + if err := d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)); err != nil { + return fmt.Errorf("Error setting restore_policy: %s", err) + } + if err := d.Set("version", policy.Version); err != nil { + return fmt.Errorf("Error setting version: %s", err) + } + if err := d.Set("etag", policy.Etag); err != nil { + return fmt.Errorf("Error setting etag: %s", err) + } + if err := d.Set("update_time", policy.UpdateTime); err != nil { + return fmt.Errorf("Error setting update_time: %s", err) + } + + return nil +} + +func resourceGoogleProjectOrganizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + if isOrganizationPolicyUnset(d) { + return resourceGoogleProjectOrganizationPolicyDelete(d, meta) + } + + if err := setProjectOrganizationPolicy(d, meta); err != nil { + return err + } + + return resourceGoogleProjectOrganizationPolicyRead(d, meta) +} + +func resourceGoogleProjectOrganizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + project := PrefixedProject(d.Get("project").(string)) + + return transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + _, err := config.NewResourceManagerClient(userAgent).Projects.ClearOrgPolicy(project, &cloudresourcemanager.ClearOrgPolicyRequest{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutDelete), + }) +} + +func setProjectOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project := PrefixedProject(d.Get("project").(string)) + + listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) + if err != nil { + return err + } + + restore_default, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) + if err != nil { + return err + } + + return transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + _, err := config.NewResourceManagerClient(userAgent).Projects.SetOrgPolicy(project, &cloudresourcemanager.SetOrgPolicyRequest{ + Policy: &cloudresourcemanager.OrgPolicy{ + Constraint: CanonicalOrgPolicyConstraint(d.Get("constraint").(string)), + BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), + ListPolicy: listPolicy, + RestoreDefault: restore_default, + Version: int64(d.Get("version").(int)), + Etag: d.Get("etag").(string), + }, + }).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutCreate), + }) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_service.go new file mode 100644 index 0000000000..68de973111 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_service.go @@ -0,0 +1,308 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + tpgserviceusage "github.com/hashicorp/terraform-provider-google/google/services/serviceusage" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" + "google.golang.org/api/serviceusage/v1" +) + +// These services can only be enabled as a side-effect of enabling other services, +// so don't bother storing them in the config or using them for diffing. +var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"} +var ignoredProjectServicesSet = tpgresource.GolangSetFromStringSlice(ignoredProjectServices) + +// Services that can't be user-specified but are otherwise valid. Renamed +// services should be added to this set during major releases. +var bannedProjectServices = []string{"bigquery-json.googleapis.com"} + +// Service Renames +// we expect when a service is renamed: +// - both service names will continue to be able to be set +// - setting one will effectively enable the other as a dependent +// - GET will return whichever service name is requested +// - LIST responses will not contain the old service name +// renames may be reverted, though, so we should canonicalise both ways until +// the old service is fully removed from the provider +// +// We handle service renames in the provider by pretending that we've read both +// the old and new service names from the API if we see either, and only setting +// the one(s) that existed in prior state in config (if any). If neither exists, +// we'll set the old service name in state. +// Additionally, in case of service rename rollbacks or unexpected early +// removals of services, if we fail to create or delete a service that's been +// renamed we'll retry using an alternate name. +// We try creation by the user-specified value followed by the other value. +// We try deletion by the old value followed by the new value. + +// map from old -> new names of services that have been renamed +// these should be removed during major provider versions. comment here with +// "DEPRECATED FOR {{version}} next to entries slated for removal in {{version}} +// upon removal, we should disallow the old name from being used even if it's +// not gone from the underlying API yet +var RenamedServices = map[string]string{} + +// RenamedServices in reverse (new -> old) +var renamedServicesByNewServiceNames = tpgresource.ReverseStringMap(RenamedServices) + +// RenamedServices expressed as both old -> new and new -> old +var renamedServicesByOldAndNewServiceNames = tpgresource.MergeStringMaps(RenamedServices, renamedServicesByNewServiceNames) + +const maxServiceUsageBatchSize = 20 + +func validateProjectServiceService(val interface{}, key string) (warns []string, errs []error) { + bannedServicesFunc := verify.StringNotInSlice(append(ignoredProjectServices, bannedProjectServices...), false) + warns, errs = bannedServicesFunc(val, key) + if len(errs) > 0 { + return + } + + // StringNotInSlice already validates that this is a string + v, _ := val.(string) + if !strings.Contains(v, ".") { + errs = append(errs, fmt.Errorf("expected %s to be a domain like serviceusage.googleapis.com", v)) + } + return +} + +func ResourceGoogleProjectService() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectServiceCreate, + Read: resourceGoogleProjectServiceRead, + Delete: resourceGoogleProjectServiceDelete, + Update: resourceGoogleProjectServiceUpdate, + + Importer: &schema.ResourceImporter{ + State: resourceGoogleProjectServiceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Read: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateProjectServiceService, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + }, + + "disable_dependent_services": { + Type: schema.TypeBool, + Optional: true, + }, + + "disable_on_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGoogleProjectServiceImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid google_project_service id format for import, expecting `{project}/{service}`, found %s", d.Id()) + } + if err := d.Set("project", parts[0]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("service", parts[1]); err != nil { + return nil, fmt.Errorf("Error setting service: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func resourceGoogleProjectServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + project = tpgresource.GetResourceNameFromSelfLink(project) + + srv := d.Get("service").(string) + id := project + "/" + srv + + // Check if the service has already been enabled + servicesRaw, err := BatchRequestReadServices(project, d, config) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) + } + servicesList := servicesRaw.(map[string]struct{}) + if _, ok := servicesList[srv]; ok { + log.Printf("[DEBUG] service %s was already found to be enabled in project %s", srv, project) + d.SetId(id) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("service", srv); err != nil { + return fmt.Errorf("Error setting service: %s", err) + } + return nil + } + + err = BatchRequestEnableService(srv, project, d, config) + if err != nil { + return err + } + d.SetId(id) + return resourceGoogleProjectServiceRead(d, meta) +} + +func resourceGoogleProjectServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + project = tpgresource.GetResourceNameFromSelfLink(project) + + // Verify project for services still exists + projectGetCall := config.NewResourceManagerClient(userAgent).Projects.Get(project) + if config.UserProjectOverride { + billingProject := project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + projectGetCall.Header().Add("X-Goog-User-Project", billingProject) + } + p, err := projectGetCall.Do() + + if err == nil && p.LifecycleState == "DELETE_REQUESTED" { + // Construct a 404 error for transport_tpg.HandleNotFoundError + err = &googleapi.Error{ + Code: 404, + Message: "Project deletion was requested", + } + } + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) + } + + servicesRaw, err := BatchRequestReadServices(project, d, config) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) + } + servicesList := servicesRaw.(map[string]struct{}) + + srv := d.Get("service").(string) + if _, ok := servicesList[srv]; ok { + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("service", srv); err != nil { + return fmt.Errorf("Error setting service: %s", err) + } + return nil + } + + log.Printf("[DEBUG] service %s not in enabled services for project %s, removing from state", srv, project) + d.SetId("") + return nil +} + +func resourceGoogleProjectServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + if disable := d.Get("disable_on_destroy"); !(disable.(bool)) { + log.Printf("[WARN] Project service %q disable_on_destroy is false, skip disabling service", d.Id()) + d.SetId("") + return nil + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + project = tpgresource.GetResourceNameFromSelfLink(project) + + service := d.Get("service").(string) + disableDependencies := d.Get("disable_dependent_services").(bool) + if err = disableServiceUsageProjectService(service, project, d, config, disableDependencies); err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project Service %s", d.Id())) + } + + d.SetId("") + return nil +} + +func resourceGoogleProjectServiceUpdate(d *schema.ResourceData, meta interface{}) error { + // This update method is no-op because the only updatable fields + // are state/config-only, i.e. they aren't sent in requests to the API. + return nil +} + +// Disables a project service. +func disableServiceUsageProjectService(service, project string, d *schema.ResourceData, config *transport_tpg.Config, disableDependentServices bool) error { + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + billingProject := project + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + name := fmt.Sprintf("projects/%s/services/%s", project, service) + servicesDisableCall := config.NewServiceUsageClient(userAgent).Services.Disable(name, &serviceusage.DisableServiceRequest{ + DisableDependentServices: disableDependentServices, + }) + if config.UserProjectOverride { + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + servicesDisableCall.Header().Add("X-Goog-User-Project", billingProject) + } + sop, err := servicesDisableCall.Do() + if err != nil { + return err + } + // Wait for the operation to complete + waitErr := tpgserviceusage.ServiceUsageOperationWait(config, sop, billingProject, "api to disable", userAgent, d.Timeout(schema.TimeoutDelete)) + if waitErr != nil { + return waitErr + } + return nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.ServiceUsageServiceBeingActivated}, + }) + if err != nil { + return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, err) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_sweeper.go new file mode 100644 index 0000000000..6aa5aa0e34 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_project_sweeper.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +var ( + TestPrefix = "tf-test" +) + +func init() { + // SKIP_PROJECT_SWEEPER can be set for a sweeper run to prevent it from + // sweeping projects. This can be useful when running sweepers in + // organizations where acceptance tests intiated by another project may + // already be in-progress. + // Example: SKIP_PROJECT_SWEEPER=1 go test ./google -v -sweep=us-central1 -sweep-run= + if os.Getenv("SKIP_PROJECT_SWEEPER") != "" { + return + } + + sweeper.AddTestSweepers("GoogleProject", testSweepProject) +} + +func testSweepProject(region string) error { + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + org := envvar.UnsafeGetTestOrgFromEnv() + if org == "" { + log.Printf("[INFO][SWEEPER_LOG] no organization set, failing project sweeper") + return fmt.Errorf("no organization set") + } + + token := "" + for paginate := true; paginate; { + // Filter for projects with test prefix + filter := fmt.Sprintf("id:\"%s*\" -lifecycleState:DELETE_REQUESTED parent.id:%v", TestPrefix, org) + found, err := config.NewResourceManagerClient(config.UserAgent).Projects.List().Filter(filter).PageToken(token).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error listing projects: %s", err) + return nil + } + + for _, project := range found.Projects { + log.Printf("[INFO][SWEEPER_LOG] Sweeping Project id: %s", project.ProjectId) + _, err := config.NewResourceManagerClient(config.UserAgent).Projects.Delete(project.ProjectId).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error, failed to delete project %s: %s", project.Name, err) + continue + } + } + token = found.NextPageToken + paginate = token != "" + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_account.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account.go index fd74c20ba9..73f5b6a4b4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_account.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account.go @@ -1,10 +1,16 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/iam/v1" @@ -42,7 +48,7 @@ func ResourceGoogleServiceAccount() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateRFC1035Name(6, 30), + ValidateFunc: verify.ValidateRFC1035Name(6, 30), Description: `The account id that is used to generate the service account email address and a stable unique id. It is unique within a project, must be 6-30 characters long, and match the regular expression [a-z]([-a-z0-9]*[a-z0-9]) to comply with RFC1035. Changing this forces a new service account to be created.`, }, "display_name": { @@ -80,13 +86,13 @@ func ResourceGoogleServiceAccount() *schema.Resource { } func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -111,10 +117,14 @@ func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{} d.SetId(sa.Name) - err = RetryTimeDuration(func() (operr error) { - _, saerr := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(d.Id()).Do() - return saerr - }, d.Timeout(schema.TimeoutCreate), isNotFoundRetryableError("service account creation")) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + _, saerr := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(d.Id()).Do() + return saerr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("service account creation")}, + }) if err != nil { return fmt.Errorf("Error reading service account after creation: %s", err) @@ -122,7 +132,7 @@ func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{} // We poll until the resource is found due to eventual consistency issue // on part of the api https://cloud.google.com/iam/docs/overview#consistency - err = PollingWaitTime(resourceServiceAccountPollRead(d, meta), PollCheckForExistence, "Creating Service Account", d.Timeout(schema.TimeoutCreate), 1) + err = transport_tpg.PollingWaitTime(resourceServiceAccountPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating Service Account", d.Timeout(schema.TimeoutCreate), 1) if err != nil { return err @@ -131,10 +141,10 @@ func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{} return resourceGoogleServiceAccountRead(d, meta) } -func resourceServiceAccountPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { +func resourceServiceAccountPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { return func() (map[string]interface{}, error) { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } @@ -150,8 +160,8 @@ func resourceServiceAccountPollRead(d *schema.ResourceData, meta interface{}) Po } func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -159,7 +169,7 @@ func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) // Confirm the service account exists sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(d.Id()).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service Account %q", d.Id())) } if err := d.Set("email", sa.Email); err != nil { @@ -193,8 +203,8 @@ func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) } func resourceGoogleServiceAccountDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -208,8 +218,8 @@ func resourceGoogleServiceAccountDelete(d *schema.ResourceData, meta interface{} } func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -270,8 +280,8 @@ func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{} } func resourceGoogleServiceAccountImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/serviceAccounts/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { @@ -279,7 +289,7 @@ func resourceGoogleServiceAccountImport(d *schema.ResourceData, meta interface{} } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/serviceAccounts/{{email}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/serviceAccounts/{{email}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_account_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account_key.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_account_key.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account_key.go index 382a1e6839..6a08d9d330 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_account_key.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account_key.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "log" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/iam/v1" @@ -94,13 +99,13 @@ func ResourceGoogleServiceAccountKey() *schema.Resource { } func resourceGoogleServiceAccountKeyCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - serviceAccountName, err := serviceAccountFQN(d.Get("service_account_id").(string), d, config) + serviceAccountName, err := tpgresource.ServiceAccountFQN(d.Get("service_account_id").(string), d, config) if err != nil { return err } @@ -138,7 +143,7 @@ func resourceGoogleServiceAccountKeyCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error setting private_key: %s", err) } - err = serviceAccountKeyWaitTime(config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys, d.Id(), d.Get("public_key_type").(string), "Creating Service account key", 4*time.Minute) + err = ServiceAccountKeyWaitTime(config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys, d.Id(), d.Get("public_key_type").(string), "Creating Service account key", 4*time.Minute) if err != nil { return err } @@ -146,8 +151,8 @@ func resourceGoogleServiceAccountKeyCreate(d *schema.ResourceData, meta interfac } func resourceGoogleServiceAccountKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -157,11 +162,11 @@ func resourceGoogleServiceAccountKeyRead(d *schema.ResourceData, meta interface{ // Confirm the service account key exists sak, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Get(d.Id()).PublicKeyType(publicKeyType).Do() if err != nil { - if err = handleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", d.Id())); err == nil { + if err = transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", d.Id())); err == nil { return nil } else { // This resource also returns 403 when it's not found. - if IsGoogleApiErrorWithCode(err, 403) { + if transport_tpg.IsGoogleApiErrorWithCode(err, 403) { log.Printf("[DEBUG] Got a 403 error trying to read service account key %s, assuming it's gone.", d.Id()) d.SetId("") return nil @@ -184,8 +189,8 @@ func resourceGoogleServiceAccountKeyRead(d *schema.ResourceData, meta interface{ } func resourceGoogleServiceAccountKeyDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -193,11 +198,11 @@ func resourceGoogleServiceAccountKeyDelete(d *schema.ResourceData, meta interfac _, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Delete(d.Id()).Do() if err != nil { - if err = handleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", d.Id())); err == nil { + if err = transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", d.Id())); err == nil { return nil } else { // This resource also returns 403 when it's not found. - if IsGoogleApiErrorWithCode(err, 403) { + if transport_tpg.IsGoogleApiErrorWithCode(err, 403) { log.Printf("[DEBUG] Got a 403 error trying to read service account key %s, assuming it's gone.", d.Id()) d.SetId("") return nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account_sweeper.go new file mode 100644 index 0000000000..ab897ecea6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_google_service_account_sweeper.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// This will sweep Service Account resources +func init() { + sweeper.AddTestSweepers("ServiceAccount", testSweepServiceAccount) +} + +// At the time of writing, the CI only passes us-central1 as the region +// We don't have a way to filter the list by zone, and it's not clear it's worth the +// effort as we only create within us-central1. +func testSweepServiceAccount(region string) error { + resourceName := "ServiceAccount" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + servicesUrl := "https://iam.googleapis.com/v1/projects/" + config.Project + "/serviceAccounts" + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: servicesUrl + "?pageSize=100", + UserAgent: config.UserAgent, + }) + + resourceList, ok := res["accounts"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource id was nil", resourceName) + return nil + } + + id := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(id) { + nonPrefixCount++ + continue + } + + deleteUrl := servicesUrl + "/" + id + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, id) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without sweepable prefix remain.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_manager_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_manager_operation.go new file mode 100644 index 0000000000..a62f6c0a74 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_manager_operation.go @@ -0,0 +1,69 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type ResourceManagerOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *ResourceManagerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.ResourceManagerBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createResourceManagerWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*ResourceManagerOperationWaiter, error) { + w := &ResourceManagerOperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func ResourceManagerOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + w, err := createResourceManagerWaiter(config, op, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func ResourceManagerOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createResourceManagerWaiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_project_service_identity.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_project_service_identity.go new file mode 100644 index 0000000000..1760a9fa9e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_project_service_identity.go @@ -0,0 +1,3 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_resource_manager_lien.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_resource_manager_lien.go new file mode 100644 index 0000000000..9679cc2104 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/resource_resource_manager_lien.go @@ -0,0 +1,494 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package resourcemanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceResourceManagerLien() *schema.Resource { + return &schema.Resource{ + Create: resourceResourceManagerLienCreate, + Read: resourceResourceManagerLienRead, + Delete: resourceResourceManagerLienDelete, + + Importer: &schema.ResourceImporter{ + State: resourceResourceManagerLienImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "origin": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A stable, user-visible/meaningful string identifying the origin +of the Lien, intended to be inspected programmatically. Maximum length of +200 characters.`, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A reference to the resource this Lien is attached to. +The server will validate the parent against those for which Liens are supported. +Since a variety of objects can have Liens against them, you must provide the type +prefix (e.g. "projects/my-project-name").`, + }, + "reason": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Concise user-visible strings indicating why an action cannot be performed +on a resource. Maximum length of 200 characters.`, + }, + "restrictions": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The types of operations which should be blocked as a result of this Lien. +Each value should correspond to an IAM permission. The server will validate +the permissions against those for which Liens are supported. An empty +list is meaningless and will be rejected. +e.g. ['resourcemanager.projects.delete']`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time of creation`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `A system-generated unique identifier for this Lien.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceResourceManagerLienCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + reasonProp, err := expandNestedResourceManagerLienReason(d.Get("reason"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reason"); !tpgresource.IsEmptyValue(reflect.ValueOf(reasonProp)) && (ok || !reflect.DeepEqual(v, reasonProp)) { + obj["reason"] = reasonProp + } + originProp, err := expandNestedResourceManagerLienOrigin(d.Get("origin"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("origin"); !tpgresource.IsEmptyValue(reflect.ValueOf(originProp)) && (ok || !reflect.DeepEqual(v, originProp)) { + obj["origin"] = originProp + } + parentProp, err := expandNestedResourceManagerLienParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + restrictionsProp, err := expandNestedResourceManagerLienRestrictions(d.Get("restrictions"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("restrictions"); !tpgresource.IsEmptyValue(reflect.ValueOf(restrictionsProp)) && (ok || !reflect.DeepEqual(v, restrictionsProp)) { + obj["restrictions"] = restrictionsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ResourceManagerBasePath}}liens") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Lien: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Lien: %s", err) + } + if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // This resource is unusual - instead of returning an Operation from + // Create, it returns the created object itself. We don't parse + // any of the values there, preferring to centralize that logic in + // Read(). In this resource, Read is also unusual - it requires + // us to know the server-side generated name of the object we're + // trying to fetch, and the only way to know that is to capture + // it here. The following two lines do that. + d.SetId(flattenNestedResourceManagerLienName(res["name"], d, config).(string)) + if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + log.Printf("[DEBUG] Finished creating Lien %q: %#v", d.Id(), res) + + return resourceResourceManagerLienRead(d, meta) +} + +func resourceResourceManagerLienRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ResourceManagerBasePath}}liens?parent={{parent}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ResourceManagerLien %q", d.Id())) + } + + res, err = flattenNestedResourceManagerLien(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ResourceManagerLien because it couldn't be matched.") + d.SetId("") + return nil + } + + res, err = resourceResourceManagerLienDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing ResourceManagerLien because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Lien: %s", err) + } + if err := d.Set("reason", flattenNestedResourceManagerLienReason(res["reason"], d, config)); err != nil { + return fmt.Errorf("Error reading Lien: %s", err) + } + if err := d.Set("origin", flattenNestedResourceManagerLienOrigin(res["origin"], d, config)); err != nil { + return fmt.Errorf("Error reading Lien: %s", err) + } + if err := d.Set("create_time", flattenNestedResourceManagerLienCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Lien: %s", err) + } + if err := d.Set("parent", flattenNestedResourceManagerLienParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading Lien: %s", err) + } + if err := d.Set("restrictions", flattenNestedResourceManagerLienRestrictions(res["restrictions"], d, config)); err != nil { + return fmt.Errorf("Error reading Lien: %s", err) + } + + return nil +} + +func resourceResourceManagerLienDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{ResourceManagerBasePath}}liens?parent={{parent}}") + if err != nil { + return err + } + + var obj map[string]interface{} + // log the old URL to make the ineffassign linter happy + // in theory, we should find a way to disable the default URL and not construct + // both, but that's a problem for another day. Today, we cheat. + log.Printf("[DEBUG] replacing URL %q with a custom delete URL", url) + url, err = tpgresource.ReplaceVars(d, config, "{{ResourceManagerBasePath}}liens/{{name}}") + if err != nil { + return err + } + log.Printf("[DEBUG] Deleting Lien %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Lien") + } + + log.Printf("[DEBUG] Finished deleting Lien %q: %#v", d.Id(), res) + return nil +} + +func resourceResourceManagerLienImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + parent, err := tpgresource.ReplaceVars(d, config, "projects/{{parent}}") + if err != nil { + return nil, err + } + if err := d.Set("parent", parent); err != nil { + return nil, fmt.Errorf("Error setting parent: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedResourceManagerLienName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenNestedResourceManagerLienReason(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedResourceManagerLienOrigin(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedResourceManagerLienCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedResourceManagerLienParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedResourceManagerLienRestrictions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedResourceManagerLienReason(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedResourceManagerLienOrigin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedResourceManagerLienParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedResourceManagerLienRestrictions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedResourceManagerLien(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["liens"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value liens. Actual value: %v", v) + } + + _, item, err := resourceResourceManagerLienFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceResourceManagerLienFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName := d.Get("name") + expectedFlattenedName := flattenNestedResourceManagerLienName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + // Decode list item before comparing. + item, err := resourceResourceManagerLienDecoder(d, meta, item) + if err != nil { + return -1, nil, err + } + + itemName := flattenNestedResourceManagerLienName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} +func resourceResourceManagerLienDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // The problem we're trying to solve here is that this property is a Project, + // and there are a lot of ways to specify a Project, including the ID vs + // Number, which is something that we can't address in a diffsuppress. + // Since we can't enforce a particular method of entering the project, + // we're just going to have to use whatever the user entered, whether + // it's project/projectName, project/12345, projectName, or 12345. + // The normal behavior of this method would be 'return res' - and that's + // what we'll fall back to if any of our conditions aren't met. Those + // conditions are: + // 1) if the new or old values contain '/', the prefix of that is 'projects'. + // 2) if either is non-numeric, a project with that ID exists. + // 3) the project IDs represented by both the new and old values are the same. + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + new := res["parent"].(string) + old := d.Get("parent").(string) + if strings.HasPrefix(new, "projects/") { + new = strings.Split(new, "/")[1] + } + if strings.HasPrefix(old, "projects/") { + old = strings.Split(old, "/")[1] + } + log.Printf("[DEBUG] Trying to figure out whether to use %s or %s", old, new) + // If there's still a '/' in there, the value must not be a project ID. + if strings.Contains(old, "/") || strings.Contains(new, "/") { + return res, nil + } + // If 'old' isn't entirely numeric, let's assume it's a project ID. + // If it's a project ID + var oldProjId int64 + var newProjId int64 + if oldVal, err := tpgresource.StringToFixed64(old); err == nil { + log.Printf("[DEBUG] The old value was a real number: %d", oldVal) + oldProjId = oldVal + } else { + pOld, err := config.NewResourceManagerClient(userAgent).Projects.Get(old).Do() + if err != nil { + return res, nil + } + oldProjId = pOld.ProjectNumber + } + if newVal, err := tpgresource.StringToFixed64(new); err == nil { + log.Printf("[DEBUG] The new value was a real number: %d", newVal) + newProjId = newVal + } else { + pNew, err := config.NewResourceManagerClient(userAgent).Projects.Get(new).Do() + if err != nil { + return res, nil + } + newProjId = pNew.ProjectNumber + } + if newProjId == oldProjId { + res["parent"] = d.Get("parent") + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_account_waiter.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/service_account_waiter.go similarity index 88% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_account_waiter.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/service_account_waiter.go index 31f826c449..cc17f6f6c9 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_account_waiter.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/service_account_waiter.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" @@ -33,7 +35,7 @@ func (w *ServiceAccountKeyWaiter) RefreshFunc() resource.StateRefreshFunc { } } -func serviceAccountKeyWaitTime(client *iam.ProjectsServiceAccountsKeysService, keyName, publicKeyType, activity string, timeout time.Duration) error { +func ServiceAccountKeyWaitTime(client *iam.ProjectsServiceAccountsKeysService, keyName, publicKeyType, activity string, timeout time.Duration) error { w := &ServiceAccountKeyWaiter{ Service: client, PublicKeyType: publicKeyType, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceusage_batching.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/serviceusage_batching.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceusage_batching.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/serviceusage_batching.go index db1e6ed5ab..92e24ccb1f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceusage_batching.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/resourcemanager/serviceusage_batching.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package resourcemanager import ( "fmt" "log" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -16,24 +21,24 @@ const ( // BatchRequestEnableServices can be used to batch requests to enable services // across resource nodes, i.e. to batch creation of several // google_project_service(s) resources. -func BatchRequestEnableService(service string, project string, d *schema.ResourceData, config *Config) error { +func BatchRequestEnableService(service string, project string, d *schema.ResourceData, config *transport_tpg.Config) error { // Renamed service create calls are relatively likely to fail, so don't try to batch the call. if altName, ok := renamedServicesByOldAndNewServiceNames[service]; ok { return tryEnableRenamedService(service, altName, project, d, config) } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - req := &BatchRequest{ + req := &transport_tpg.BatchRequest{ ResourceName: project, Body: []string{service}, CombineF: combineServiceUsageServicesBatches, @@ -48,8 +53,8 @@ func BatchRequestEnableService(service string, project string, d *schema.Resourc return err } -func tryEnableRenamedService(service, altName string, project string, d *schema.ResourceData, config *Config) error { - userAgent, err := generateUserAgentString(d, config.UserAgent) +func tryEnableRenamedService(service, altName string, project string, d *schema.ResourceData, config *transport_tpg.Config) error { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -59,7 +64,7 @@ func tryEnableRenamedService(service, altName string, project string, d *schema. billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } @@ -75,19 +80,19 @@ func tryEnableRenamedService(service, altName string, project string, d *schema. return nil } -func BatchRequestReadServices(project string, d *schema.ResourceData, config *Config) (interface{}, error) { - userAgent, err := generateUserAgentString(d, config.UserAgent) +func BatchRequestReadServices(project string, d *schema.ResourceData, config *transport_tpg.Config) (interface{}, error) { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return nil, err } billingProject := project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - req := &BatchRequest{ + req := &transport_tpg.BatchRequest{ ResourceName: project, Body: nil, // Use empty CombineF since the request is exactly the same no matter how many services we read. @@ -115,7 +120,7 @@ func combineServiceUsageServicesBatches(srvsRaw interface{}, toAddRaw interface{ return append(srvs, toAdd...), nil } -func sendBatchFuncEnableServices(config *Config, userAgent, billingProject string, timeout time.Duration) BatcherSendFunc { +func sendBatchFuncEnableServices(config *transport_tpg.Config, userAgent, billingProject string, timeout time.Duration) transport_tpg.BatcherSendFunc { return func(project string, toEnableRaw interface{}) (interface{}, error) { toEnable, ok := toEnableRaw.([]string) if !ok { @@ -125,7 +130,7 @@ func sendBatchFuncEnableServices(config *Config, userAgent, billingProject strin } } -func sendListServices(config *Config, billingProject, userAgent string, timeout time.Duration) BatcherSendFunc { +func sendListServices(config *transport_tpg.Config, billingProject, userAgent string, timeout time.Duration) transport_tpg.BatcherSendFunc { return func(project string, _ interface{}) (interface{}, error) { return ListCurrentlyEnabledServices(project, billingProject, userAgent, config, timeout) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret.go new file mode 100644 index 0000000000..e6cd825327 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package secretmanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceSecretManagerSecret() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceSecretManagerSecret().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "secret_id") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceSecretManagerSecretRead, + Schema: dsSchema, + } +} + +func dataSourceSecretManagerSecretRead(d *schema.ResourceData, meta interface{}) error { + id, err := tpgresource.ReplaceVars(d, meta.(*transport_tpg.Config), "projects/{{project}}/secrets/{{secret_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceSecretManagerSecretRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version.go similarity index 75% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret_version.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version.go index 3fbf4f9f54..d099bcb74b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_secret_manager_secret_version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package secretmanager import ( "encoding/base64" @@ -6,6 +8,9 @@ import ( "log" "regexp" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -21,7 +26,7 @@ func DataSourceSecretManagerSecretVersion() *schema.Resource { "secret": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, "version": { Type: schema.TypeString, @@ -54,13 +59,13 @@ func DataSourceSecretManagerSecretVersion() *schema.Resource { } func dataSourceSecretManagerSecretVersionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - fv, err := parseProjectFieldValue("secrets", d.Get("secret").(string), "project", d, config, false) + fv, err := tpgresource.ParseProjectFieldValue("secrets", d.Get("secret").(string), "project", d, config, false) if err != nil { return err } @@ -79,19 +84,25 @@ func dataSourceSecretManagerSecretVersionRead(d *schema.ResourceData, meta inter versionNum := d.Get("version") if versionNum != "" { - url, err = replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/{{version}}") + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/{{version}}") if err != nil { return err } } else { - url, err = replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/latest") + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/latest") if err != nil { return err } } var version map[string]interface{} - version, err = SendRequest(config, "GET", project, url, userAgent, nil) + version, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return fmt.Errorf("Error retrieving available secret manager secret versions: %s", err.Error()) } @@ -111,7 +122,13 @@ func dataSourceSecretManagerSecretVersionRead(d *schema.ResourceData, meta inter } url = fmt.Sprintf("%s:access", url) - resp, err := SendRequest(config, "GET", project, url, userAgent, nil) + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return fmt.Errorf("Error retrieving available secret manager secret version access: %s", err.Error()) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version_access.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version_access.go new file mode 100644 index 0000000000..230d295608 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/data_source_secret_manager_secret_version_access.go @@ -0,0 +1,127 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package secretmanager + +import ( + "encoding/base64" + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceSecretManagerSecretVersionAccess() *schema.Resource { + return &schema.Resource{ + Read: dataSourceSecretManagerSecretVersionAccessRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "secret": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "secret_data": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + }, + } +} + +func dataSourceSecretManagerSecretVersionAccessRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + fv, err := tpgresource.ParseProjectFieldValue("secrets", d.Get("secret").(string), "project", d, config, false) + if err != nil { + return err + } + if d.Get("project").(string) != "" && d.Get("project").(string) != fv.Project { + return fmt.Errorf("The project set on this secret version (%s) is not equal to the project where this secret exists (%s).", d.Get("project").(string), fv.Project) + } + project := fv.Project + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("secret", fv.Name); err != nil { + return fmt.Errorf("Error setting secret: %s", err) + } + + var url string + versionNum := d.Get("version") + + if versionNum != "" { + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/{{version}}") + if err != nil { + return err + } + } else { + url, err = tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/latest") + if err != nil { + return err + } + } + + url = fmt.Sprintf("%s:access", url) + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error retrieving available secret manager secret version access: %s", err.Error()) + } + + if err := d.Set("name", resp["name"].(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + secretVersionRegex := regexp.MustCompile("projects/(.+)/secrets/(.+)/versions/(.+)$") + + parts := secretVersionRegex.FindStringSubmatch(resp["name"].(string)) + // should return [full string, project number, secret name, version number] + if len(parts) != 4 { + panic(fmt.Sprintf("secret name, %s, does not match format, projects/{{project}}/secrets/{{secret}}/versions/{{version}}", resp["name"].(string))) + } + + log.Printf("[DEBUG] Received Google SecretManager Version: %q", parts[3]) + + if err := d.Set("version", parts[3]); err != nil { + return fmt.Errorf("Error setting version: %s", err) + } + + data := resp["payload"].(map[string]interface{}) + secretData, err := base64.StdEncoding.DecodeString(data["data"].(string)) + if err != nil { + return fmt.Errorf("Error decoding secret manager secret version data: %s", err.Error()) + } + if err := d.Set("secret_data", string(secretData)); err != nil { + return fmt.Errorf("Error setting secret_data: %s", err) + } + + d.SetId(resp["name"].(string)) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/iam_secret_manager_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/iam_secret_manager_secret.go new file mode 100644 index 0000000000..2277405846 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/iam_secret_manager_secret.go @@ -0,0 +1,221 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package secretmanager + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var SecretManagerSecretIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type SecretManagerSecretIamUpdater struct { + project string + secretId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func SecretManagerSecretIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("secret_id"); ok { + values["secret_id"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/secrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("secret_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &SecretManagerSecretIamUpdater{ + project: values["project"], + secretId: values["secret_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("secret_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting secret_id: %s", err) + } + + return u, nil +} + +func SecretManagerSecretIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/secrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &SecretManagerSecretIamUpdater{ + project: values["project"], + secretId: values["secret_id"], + d: d, + Config: config, + } + if err := d.Set("secret_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting secret_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *SecretManagerSecretIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifySecretUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *SecretManagerSecretIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifySecretUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *SecretManagerSecretIamUpdater) qualifySecretUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{SecretManagerBasePath}}%s:%s", fmt.Sprintf("projects/%s/secrets/%s", u.project, u.secretId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *SecretManagerSecretIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/secrets/%s", u.project, u.secretId) +} + +func (u *SecretManagerSecretIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-secretmanager-secret-%s", u.GetResourceId()) +} + +func (u *SecretManagerSecretIamUpdater) DescribeResource() string { + return fmt.Sprintf("secretmanager secret %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret.go new file mode 100644 index 0000000000..39c02cac88 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret.go @@ -0,0 +1,851 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package secretmanager + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecretManagerSecret() *schema.Resource { + return &schema.Resource{ + Create: resourceSecretManagerSecretCreate, + Read: resourceSecretManagerSecretRead, + Update: resourceSecretManagerSecretUpdate, + Delete: resourceSecretManagerSecretDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecretManagerSecretImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "replication": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The replication policy of the secret data attached to the Secret. It cannot be changed +after the Secret has been created.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automatic": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `The Secret will automatically be replicated without any restrictions.`, + ExactlyOneOf: []string{"replication.0.automatic", "replication.0.user_managed"}, + }, + "user_managed": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The Secret will be replicated to the regions specified by the user.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "replicas": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The list of Replicas for this Secret. Cannot be empty.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The canonical IDs of the location to replicate data. For example: "us-east1".`, + }, + "customer_managed_encryption": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Customer Managed Encryption for the secret.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Describes the Cloud KMS encryption key that will be used to protect destination secret.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"replication.0.automatic", "replication.0.user_managed"}, + }, + }, + }, + }, + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the project.`, + }, + "expire_time": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Timestamp in UTC when the Secret is scheduled to expire. This is always provided on output, regardless of what was sent on input. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels assigned to this Secret. + +Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, +and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + +Label values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, +and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + +No more than 64 labels can be assigned to a given resource. + +An object containing a list of "key": value pairs. Example: +{ "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "rotation": { + Type: schema.TypeList, + Optional: true, + Description: `The rotation time and period for a Secret. At 'next_rotation_time', Secret Manager will send a Pub/Sub notification to the topics configured on the Secret. 'topics' must be set to configure rotation.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "next_rotation_time": { + Type: schema.TypeString, + Optional: true, + Description: `Timestamp in UTC at which the Secret is scheduled to rotate. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + RequiredWith: []string{"rotation.0.rotation_period"}, + }, + "rotation_period": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years). +If rotationPeriod is set, 'next_rotation_time' must be set. 'next_rotation_time' will be advanced by this period when the service automatically sends rotation notifications.`, + }, + }, + }, + RequiredWith: []string{"topics"}, + }, + "topics": { + Type: schema.TypeList, + Optional: true, + Description: `A list of up to 10 Pub/Sub topics to which messages are published when control plane operations are called on the secret or its versions.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the Pub/Sub topic that will be published to, in the following format: projects/*/topics/*. +For publication to succeed, the Secret Manager Service Agent service account must have pubsub.publisher permissions on the topic.`, + }, + }, + }, + }, + "ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The TTL for the Secret. +A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the Secret was created.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the Secret. Format: +'projects/{{project}}/secrets/{{secret_id}}'`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecretManagerSecretCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandSecretManagerSecretLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + replicationProp, err := expandSecretManagerSecretReplication(d.Get("replication"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("replication"); !tpgresource.IsEmptyValue(reflect.ValueOf(replicationProp)) && (ok || !reflect.DeepEqual(v, replicationProp)) { + obj["replication"] = replicationProp + } + topicsProp, err := expandSecretManagerSecretTopics(d.Get("topics"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("topics"); !tpgresource.IsEmptyValue(reflect.ValueOf(topicsProp)) && (ok || !reflect.DeepEqual(v, topicsProp)) { + obj["topics"] = topicsProp + } + expireTimeProp, err := expandSecretManagerSecretExpireTime(d.Get("expire_time"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("expire_time"); !tpgresource.IsEmptyValue(reflect.ValueOf(expireTimeProp)) && (ok || !reflect.DeepEqual(v, expireTimeProp)) { + obj["expireTime"] = expireTimeProp + } + ttlProp, err := expandSecretManagerSecretTtl(d.Get("ttl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ttl"); !tpgresource.IsEmptyValue(reflect.ValueOf(ttlProp)) && (ok || !reflect.DeepEqual(v, ttlProp)) { + obj["ttl"] = ttlProp + } + rotationProp, err := expandSecretManagerSecretRotation(d.Get("rotation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rotation"); !tpgresource.IsEmptyValue(reflect.ValueOf(rotationProp)) && (ok || !reflect.DeepEqual(v, rotationProp)) { + obj["rotation"] = rotationProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets?secretId={{secret_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Secret: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Secret: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Secret: %s", err) + } + if err := d.Set("name", flattenSecretManagerSecretName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/secrets/{{secret_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Secret %q: %#v", d.Id(), res) + + return resourceSecretManagerSecretRead(d, meta) +} + +func resourceSecretManagerSecretRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Secret: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecretManagerSecret %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Secret: %s", err) + } + + if err := d.Set("name", flattenSecretManagerSecretName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Secret: %s", err) + } + if err := d.Set("create_time", flattenSecretManagerSecretCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Secret: %s", err) + } + if err := d.Set("labels", flattenSecretManagerSecretLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Secret: %s", err) + } + if err := d.Set("replication", flattenSecretManagerSecretReplication(res["replication"], d, config)); err != nil { + return fmt.Errorf("Error reading Secret: %s", err) + } + if err := d.Set("topics", flattenSecretManagerSecretTopics(res["topics"], d, config)); err != nil { + return fmt.Errorf("Error reading Secret: %s", err) + } + if err := d.Set("expire_time", flattenSecretManagerSecretExpireTime(res["expireTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Secret: %s", err) + } + if err := d.Set("rotation", flattenSecretManagerSecretRotation(res["rotation"], d, config)); err != nil { + return fmt.Errorf("Error reading Secret: %s", err) + } + + return nil +} + +func resourceSecretManagerSecretUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Secret: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandSecretManagerSecretLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + topicsProp, err := expandSecretManagerSecretTopics(d.Get("topics"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("topics"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, topicsProp)) { + obj["topics"] = topicsProp + } + expireTimeProp, err := expandSecretManagerSecretExpireTime(d.Get("expire_time"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("expire_time"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, expireTimeProp)) { + obj["expireTime"] = expireTimeProp + } + rotationProp, err := expandSecretManagerSecretRotation(d.Get("rotation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("rotation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, rotationProp)) { + obj["rotation"] = rotationProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Secret %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("topics") { + updateMask = append(updateMask, "topics") + } + + if d.HasChange("expire_time") { + updateMask = append(updateMask, "expireTime") + } + + if d.HasChange("rotation") { + updateMask = append(updateMask, "rotation") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Secret %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Secret %q: %#v", d.Id(), res) + } + + return resourceSecretManagerSecretRead(d, meta) +} + +func resourceSecretManagerSecretDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Secret: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Secret %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Secret") + } + + log.Printf("[DEBUG] Finished deleting Secret %q: %#v", d.Id(), res) + return nil +} + +func resourceSecretManagerSecretImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/secrets/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/secrets/{{secret_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecretManagerSecretName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretReplication(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["automatic"] = + flattenSecretManagerSecretReplicationAutomatic(original["automatic"], d, config) + transformed["user_managed"] = + flattenSecretManagerSecretReplicationUserManaged(original["userManaged"], d, config) + return []interface{}{transformed} +} +func flattenSecretManagerSecretReplicationAutomatic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v != nil +} + +func flattenSecretManagerSecretReplicationUserManaged(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["replicas"] = + flattenSecretManagerSecretReplicationUserManagedReplicas(original["replicas"], d, config) + return []interface{}{transformed} +} +func flattenSecretManagerSecretReplicationUserManagedReplicas(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "location": flattenSecretManagerSecretReplicationUserManagedReplicasLocation(original["location"], d, config), + "customer_managed_encryption": flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(original["customerManagedEncryption"], d, config), + }) + } + return transformed +} +func flattenSecretManagerSecretReplicationUserManagedReplicasLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretTopics(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenSecretManagerSecretTopicsName(original["name"], d, config), + }) + } + return transformed +} +func flattenSecretManagerSecretTopicsName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretRotation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["next_rotation_time"] = + flattenSecretManagerSecretRotationNextRotationTime(original["nextRotationTime"], d, config) + transformed["rotation_period"] = + flattenSecretManagerSecretRotationRotationPeriod(original["rotationPeriod"], d, config) + return []interface{}{transformed} +} +func flattenSecretManagerSecretRotationNextRotationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretRotationRotationPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecretManagerSecretLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandSecretManagerSecretReplication(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAutomatic, err := expandSecretManagerSecretReplicationAutomatic(original["automatic"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAutomatic); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["automatic"] = transformedAutomatic + } + + transformedUserManaged, err := expandSecretManagerSecretReplicationUserManaged(original["user_managed"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUserManaged); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["userManaged"] = transformedUserManaged + } + + return transformed, nil +} + +func expandSecretManagerSecretReplicationAutomatic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil || !v.(bool) { + return nil, nil + } + + return struct{}{}, nil +} + +func expandSecretManagerSecretReplicationUserManaged(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedReplicas, err := expandSecretManagerSecretReplicationUserManagedReplicas(original["replicas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplicas); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["replicas"] = transformedReplicas + } + + return transformed, nil +} + +func expandSecretManagerSecretReplicationUserManagedReplicas(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLocation, err := expandSecretManagerSecretReplicationUserManagedReplicasLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + transformedCustomerManagedEncryption, err := expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(original["customer_managed_encryption"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomerManagedEncryption); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customerManagedEncryption"] = transformedCustomerManagedEncryption + } + + req = append(req, transformed) + } + return req, nil +} + +func expandSecretManagerSecretReplicationUserManagedReplicasLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerSecretTopics(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandSecretManagerSecretTopicsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + req = append(req, transformed) + } + return req, nil +} + +func expandSecretManagerSecretTopicsName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerSecretExpireTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerSecretTtl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerSecretRotation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNextRotationTime, err := expandSecretManagerSecretRotationNextRotationTime(original["next_rotation_time"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNextRotationTime); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nextRotationTime"] = transformedNextRotationTime + } + + transformedRotationPeriod, err := expandSecretManagerSecretRotationRotationPeriod(original["rotation_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRotationPeriod); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["rotationPeriod"] = transformedRotationPeriod + } + + return transformed, nil +} + +func expandSecretManagerSecretRotationNextRotationTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecretManagerSecretRotationRotationPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_sweeper.go new file mode 100644 index 0000000000..a1e9915457 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_sweeper.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package secretmanager + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecretManagerSecret", testSweepSecretManagerSecret) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecretManagerSecret(region string) error { + resourceName := "SecretManagerSecret" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://secretmanager.googleapis.com/v1/projects/{{project}}/secrets", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["secrets"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://secretmanager.googleapis.com/v1/projects/{{project}}/secrets/{{secret_id}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_version.go new file mode 100644 index 0000000000..7b61dd225e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/secretmanager/resource_secret_manager_secret_version.go @@ -0,0 +1,472 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package secretmanager + +import ( + "encoding/base64" + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" +) + +func resourceSecretManagerSecretVersionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + _, err := expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } + + return resourceSecretManagerSecretVersionRead(d, meta) +} + +func ResourceSecretManagerSecretVersion() *schema.Resource { + return &schema.Resource{ + Create: resourceSecretManagerSecretVersionCreate, + Read: resourceSecretManagerSecretVersionRead, + Delete: resourceSecretManagerSecretVersionDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecretManagerSecretVersionImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Update: resourceSecretManagerSecretVersionUpdate, + + Schema: map[string]*schema.Schema{ + "secret_data": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The secret data. Must be no larger than 64KiB.`, + Sensitive: true, + }, + + "secret": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Secret Manager secret resource`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `The current state of the SecretVersion.`, + Default: true, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the Secret was created.`, + }, + "destroy_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the Secret was destroyed. Only present if state is DESTROYED.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the SecretVersion. Format: +'projects/{{project}}/secrets/{{secret_id}}/versions/{{version}}'`, + }, + "version": { + Type: schema.TypeString, + Computed: true, + Description: `The version of the Secret.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecretManagerSecretVersionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + stateProp, err := expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enabled"); !tpgresource.IsEmptyValue(reflect.ValueOf(stateProp)) && (ok || !reflect.DeepEqual(v, stateProp)) { + obj["state"] = stateProp + } + payloadProp, err := expandSecretManagerSecretVersionPayload(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(payloadProp)) { + obj["payload"] = payloadProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}{{secret}}:addVersion") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SecretVersion: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating SecretVersion: %s", err) + } + if err := d.Set("name", flattenSecretManagerSecretVersionName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + _, err = expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished creating SecretVersion %q: %#v", d.Id(), res) + + return resourceSecretManagerSecretVersionRead(d, meta) +} + +func resourceSecretManagerSecretVersionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecretManagerSecretVersion %q", d.Id())) + } + + res, err = resourceSecretManagerSecretVersionDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing SecretManagerSecretVersion because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("enabled", flattenSecretManagerSecretVersionEnabled(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading SecretVersion: %s", err) + } + if err := d.Set("name", flattenSecretManagerSecretVersionName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading SecretVersion: %s", err) + } + if err := d.Set("version", flattenSecretManagerSecretVersionVersion(res["version"], d, config)); err != nil { + return fmt.Errorf("Error reading SecretVersion: %s", err) + } + if err := d.Set("create_time", flattenSecretManagerSecretVersionCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading SecretVersion: %s", err) + } + if err := d.Set("destroy_time", flattenSecretManagerSecretVersionDestroyTime(res["destroyTime"], d, config)); err != nil { + return fmt.Errorf("Error reading SecretVersion: %s", err) + } + // Terraform must set the top level schema field, but since this object contains collapsed properties + // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. + if flattenedProp := flattenSecretManagerSecretVersionPayload(res["payload"], d, config); flattenedProp != nil { + if gerr, ok := flattenedProp.(*googleapi.Error); ok { + return fmt.Errorf("Error reading SecretVersion: %s", gerr) + } + casted := flattenedProp.([]interface{})[0] + if casted != nil { + for k, v := range casted.(map[string]interface{}) { + if err := d.Set(k, v); err != nil { + return fmt.Errorf("Error setting %s: %s", k, err) + } + } + } + } + + return nil +} + +func resourceSecretManagerSecretVersionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}{{name}}:destroy") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting SecretVersion %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "SecretVersion") + } + + log.Printf("[DEBUG] Finished deleting SecretVersion %q: %#v", d.Id(), res) + return nil +} + +func resourceSecretManagerSecretVersionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + secretRegex := regexp.MustCompile("(projects/.+/secrets/.+)/versions/.+$") + versionRegex := regexp.MustCompile("projects/(.+)/secrets/(.+)/versions/(.+)$") + + parts := secretRegex.FindStringSubmatch(name) + if len(parts) != 2 { + panic(fmt.Sprintf("Version name does not fit the format `projects/{{project}}/secrets/{{secret}}/versions/{{version}}`")) + } + if err := d.Set("secret", parts[1]); err != nil { + return nil, fmt.Errorf("Error setting secret: %s", err) + } + + parts = versionRegex.FindStringSubmatch(name) + + if err := d.Set("version", parts[3]); err != nil { + return nil, fmt.Errorf("Error setting version: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSecretManagerSecretVersionEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v.(string) == "ENABLED" { + return true + } + + return false +} + +func flattenSecretManagerSecretVersionName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretVersionVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + name := d.Get("name").(string) + secretRegex := regexp.MustCompile("projects/(.+)/secrets/(.+)/versions/(.+)$") + + parts := secretRegex.FindStringSubmatch(name) + if len(parts) != 4 { + panic(fmt.Sprintf("Version name does not fit the format `projects/{{project}}/secrets/{{secret}}/versions/{{version}}`")) + } + + return parts[3] +} + +func flattenSecretManagerSecretVersionCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretVersionDestroyTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecretManagerSecretVersionPayload(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + transformed := make(map[string]interface{}) + + // if this secret version is disabled, the api will return an error, as the value cannot be accessed, return what we have + if d.Get("enabled").(bool) == false { + transformed["secret_data"] = d.Get("secret_data") + return []interface{}{transformed} + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}{{name}}:access") + if err != nil { + return err + } + + parts := strings.Split(d.Get("name").(string), "/") + project := parts[1] + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + accessRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + + data, err := base64.StdEncoding.DecodeString(accessRes["payload"].(map[string]interface{})["data"].(string)) + if err != nil { + return err + } + transformed["secret_data"] = string(data) + return []interface{}{transformed} +} + +func expandSecretManagerSecretVersionEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + name := d.Get("name").(string) + if name == "" { + return "", nil + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecretManagerBasePath}}{{name}}") + if err != nil { + return nil, err + } + + if v == true { + url = fmt.Sprintf("%s:enable", url) + } else { + url = fmt.Sprintf("%s:disable", url) + } + + parts := strings.Split(name, "/") + project := parts[1] + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + return nil, nil +} + +func expandSecretManagerSecretVersionPayload(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + transformed := make(map[string]interface{}) + transformedSecretData, err := expandSecretManagerSecretVersionPayloadSecretData(d.Get("secret_data"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretData); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["data"] = transformedSecretData + } + + return transformed, nil +} + +func expandSecretManagerSecretVersionPayloadSecretData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return base64.StdEncoding.EncodeToString([]byte(v.(string))), nil +} + +func resourceSecretManagerSecretVersionDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v := res["state"]; v == "DESTROYED" { + return nil, nil + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/iam_scc_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/iam_scc_source.go new file mode 100644 index 0000000000..8fad7023f8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/iam_scc_source.go @@ -0,0 +1,202 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var SecurityCenterSourceIamSchema = map[string]*schema.Schema{ + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "source": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type SecurityCenterSourceIamUpdater struct { + organization string + source string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func SecurityCenterSourceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("organization"); ok { + values["organization"] = v.(string) + } + + if v, ok := d.GetOk("source"); ok { + values["source"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"organizations/(?P[^/]+)/sources/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("source").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &SecurityCenterSourceIamUpdater{ + organization: values["organization"], + source: values["source"], + d: d, + Config: config, + } + + if err := d.Set("organization", u.organization); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) + } + if err := d.Set("source", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting source: %s", err) + } + + return u, nil +} + +func SecurityCenterSourceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"organizations/(?P[^/]+)/sources/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &SecurityCenterSourceIamUpdater{ + organization: values["organization"], + source: values["source"], + d: d, + Config: config, + } + if err := d.Set("source", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting source: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *SecurityCenterSourceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifySourceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *SecurityCenterSourceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifySourceUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *SecurityCenterSourceIamUpdater) qualifySourceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{SecurityCenterBasePath}}%s:%s", fmt.Sprintf("organizations/%s/sources/%s", u.organization, u.source), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *SecurityCenterSourceIamUpdater) GetResourceId() string { + return fmt.Sprintf("organizations/%s/sources/%s", u.organization, u.source) +} + +func (u *SecurityCenterSourceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-securitycenter-source-%s", u.GetResourceId()) +} + +func (u *SecurityCenterSourceIamUpdater) DescribeResource() string { + return fmt.Sprintf("securitycenter source %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_mute_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_mute_config.go new file mode 100644 index 0000000000..7569f2ffe0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_mute_config.go @@ -0,0 +1,402 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterMuteConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterMuteConfigCreate, + Read: resourceSecurityCenterMuteConfigRead, + Update: resourceSecurityCenterMuteConfigUpdate, + Delete: resourceSecurityCenterMuteConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterMuteConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `An expression that defines the filter to apply across create/update +events of findings. While creating a filter string, be mindful of +the scope in which the mute configuration is being created. E.g., +If a filter contains project = X but is created under the +project = Y scope, it might not match any findings.`, + }, + "mute_config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Unique identifier provided by the client within the parent scope.`, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource name of the new mute configs's parent. Its format is +"organizations/[organization_id]", "folders/[folder_id]", or +"projects/[project_id]".`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the mute config.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the mute config was created. This field is set by +the server and will be ignored if provided on config creation.`, + }, + "most_recent_editor": { + Type: schema.TypeString, + Computed: true, + Description: `Email address of the user who last edited the mute config. This +field is set by the server and will be ignored if provided on +config creation or update.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the mute config. Its format is +organizations/{organization}/muteConfigs/{configId}, +folders/{folder}/muteConfigs/{configId}, +or projects/{project}/muteConfigs/{configId}`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The most recent time at which the mute config was +updated. This field is set by the server and will be ignored if +provided on config creation or update.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterMuteConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterMuteConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandSecurityCenterMuteConfigFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{parent}}/muteConfigs?muteConfigId={{mute_config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new MuteConfig: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating MuteConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterMuteConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating MuteConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterMuteConfigRead(d, meta) +} + +func resourceSecurityCenterMuteConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterMuteConfig %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterMuteConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading MuteConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterMuteConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading MuteConfig: %s", err) + } + if err := d.Set("filter", flattenSecurityCenterMuteConfigFilter(res["filter"], d, config)); err != nil { + return fmt.Errorf("Error reading MuteConfig: %s", err) + } + if err := d.Set("create_time", flattenSecurityCenterMuteConfigCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading MuteConfig: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterMuteConfigUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading MuteConfig: %s", err) + } + if err := d.Set("most_recent_editor", flattenSecurityCenterMuteConfigMostRecentEditor(res["mostRecentEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading MuteConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterMuteConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterMuteConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + filterProp, err := expandSecurityCenterMuteConfigFilter(d.Get("filter"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("filter"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) { + obj["filter"] = filterProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating MuteConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("filter") { + updateMask = append(updateMask, "filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating MuteConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating MuteConfig %q: %#v", d.Id(), res) + } + + return resourceSecurityCenterMuteConfigRead(d, meta) +} + +func resourceSecurityCenterMuteConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting MuteConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "MuteConfig") + } + + log.Printf("[DEBUG] Finished deleting MuteConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterMuteConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + // current import_formats can't import fields with forward slashes in their value + name := d.Get("name").(string) + + matched, err := regexp.MatchString("(organizations|folders|projects)/.+/muteConfigs/.+", name) + if err != nil { + return nil, fmt.Errorf("error validating import name: %s", err) + } + + if !matched { + return nil, fmt.Errorf("error validating import name: %s does not fit naming for muteConfigs. Expected %s", + name, "organizations/{organization}/muteConfigs/{configId}, folders/{folder}/muteConfigs/{configId} or projects/{project}/muteConfigs/{configId}") + } + + if err := d.Set("name", name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + // mute_config_id and parent are not returned by the API and therefore need to be set manually + stringParts := strings.Split(d.Get("name").(string), "/") + if err := d.Set("mute_config_id", stringParts[3]); err != nil { + return nil, fmt.Errorf("Error setting mute_config_id: %s", err) + } + + if err := d.Set("parent", fmt.Sprintf("%s/%s", stringParts[0], stringParts[1])); err != nil { + return nil, fmt.Errorf("Error setting mute_config_id: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterMuteConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterMuteConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterMuteConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterMuteConfigCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterMuteConfigUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterMuteConfigMostRecentEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterMuteConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterMuteConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config.go new file mode 100644 index 0000000000..bd2b3b23e0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config.go @@ -0,0 +1,471 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSecurityCenterNotificationConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterNotificationConfigCreate, + Read: resourceSecurityCenterNotificationConfigRead, + Update: resourceSecurityCenterNotificationConfigUpdate, + Delete: resourceSecurityCenterNotificationConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterNotificationConfigImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "config_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `This must be unique within the organization.`, + }, + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The organization whose Cloud Security Command Center the Notification +Config lives in.`, + }, + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Pub/Sub topic to send notifications to. Its format is +"projects/[project_id]/topics/[topic]".`, + }, + "streaming_config": { + Type: schema.TypeList, + Required: true, + Description: `The config for triggering streaming-based notifications.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + Description: `Expression that defines the filter to apply across create/update +events of assets or findings as specified by the event type. The +expression is a list of zero or more restrictions combined via +logical operators AND and OR. Parentheses are supported, and OR +has higher precedence than AND. + +Restrictions have the form and may have +a - character in front of them to indicate negation. The fields +map to those defined in the corresponding resource. + +The supported operators are: + +* = for all value types. +* >, <, >=, <= for integer values. +* :, meaning substring matching, for strings. + +The supported value types are: + +* string literals in quotes. +* integer literals without quotes. +* boolean literals true and false without quotes. + +See +[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +for information on how to write a filter.`, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the notification config (max of 1024 characters).`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this notification config, in the format +'organizations/{{organization}}/notificationConfigs/{{config_id}}'.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that needs "pubsub.topics.publish" permission to +publish to the Pub/Sub topic.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterNotificationConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubTopicProp)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(streamingConfigProp)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/notificationConfigs?configId={{config_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NotificationConfig: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NotificationConfig: %s", err) + } + if err := d.Set("name", flattenSecurityCenterNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating NotificationConfig %q: %#v", d.Id(), res) + + return resourceSecurityCenterNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterNotificationConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterNotificationConfig %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterNotificationConfigName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationConfig: %s", err) + } + if err := d.Set("description", flattenSecurityCenterNotificationConfigDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationConfig: %s", err) + } + if err := d.Set("pubsub_topic", flattenSecurityCenterNotificationConfigPubsubTopic(res["pubsubTopic"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationConfig: %s", err) + } + if err := d.Set("service_account", flattenSecurityCenterNotificationConfigServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationConfig: %s", err) + } + if err := d.Set("streaming_config", flattenSecurityCenterNotificationConfigStreamingConfig(res["streamingConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading NotificationConfig: %s", err) + } + + return nil +} + +func resourceSecurityCenterNotificationConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterNotificationConfigDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + pubsubTopicProp, err := expandSecurityCenterNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_topic"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubTopicProp)) { + obj["pubsubTopic"] = pubsubTopicProp + } + streamingConfigProp, err := expandSecurityCenterNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("streaming_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, streamingConfigProp)) { + obj["streamingConfig"] = streamingConfigProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating NotificationConfig %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("pubsub_topic") { + updateMask = append(updateMask, "pubsubTopic") + } + + if d.HasChange("streaming_config") { + updateMask = append(updateMask, "streamingConfig.filter") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating NotificationConfig %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating NotificationConfig %q: %#v", d.Id(), res) + } + + return resourceSecurityCenterNotificationConfigRead(d, meta) +} + +func resourceSecurityCenterNotificationConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting NotificationConfig %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NotificationConfig") + } + + log.Printf("[DEBUG] Finished deleting NotificationConfig %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterNotificationConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) != 4 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{organization}}/sources/{{source}}", + ) + } + + if err := d.Set("organization", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterNotificationConfigName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterNotificationConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterNotificationConfigPubsubTopic(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterNotificationConfigServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterNotificationConfigStreamingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["filter"] = + flattenSecurityCenterNotificationConfigStreamingConfigFilter(original["filter"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterNotificationConfigStreamingConfigFilter(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterNotificationConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterNotificationConfigPubsubTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterNotificationConfigStreamingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFilter, err := expandSecurityCenterNotificationConfigStreamingConfigFilter(original["filter"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFilter); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filter"] = transformedFilter + } + + return transformed, nil +} + +func expandSecurityCenterNotificationConfigStreamingConfigFilter(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config_sweeper.go new file mode 100644 index 0000000000..26a0d015e5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_notification_config_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterNotificationConfig", testSweepSecurityCenterNotificationConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterNotificationConfig(region string) error { + resourceName := "SecurityCenterNotificationConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v1/organizations/{{organization}}/notificationConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["notificationConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v1/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_source.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_source.go new file mode 100644 index 0000000000..f8b999f84e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/securitycenter/resource_scc_source.go @@ -0,0 +1,329 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceSecurityCenterSource() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterSourceCreate, + Read: resourceSecurityCenterSourceRead, + Update: resourceSecurityCenterSourceUpdate, + Delete: resourceSecurityCenterSourceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterSourceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRegexp(`[\p{L}\p{N}]({\p{L}\p{N}_- ]{0,30}[\p{L}\p{N}])?`), + Description: `The source’s display name. A source’s display name must be unique +amongst its siblings, for example, two sources with the same parent +can't share the same display name. The display name must start and end +with a letter or digit, may contain letters, digits, spaces, hyphens, +and underscores, and can be no longer than 32 characters.`, + }, + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The organization whose Cloud Security Command Center the Source +lives in.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `The description of the source (max of 1024 characters).`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of this source, in the format +'organizations/{{organization}}/sources/{{source}}'.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterSourceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterSourceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandSecurityCenterSourceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/sources") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Source: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Source: %s", err) + } + if err := d.Set("name", flattenSecurityCenterSourceName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `name` is autogenerated from the api so needs to be set post-create + name, ok := res["name"] + if !ok { + respBody, ok := res["response"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + name, ok = respBody.(map[string]interface{})["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + } + if err := d.Set("name", name.(string)); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(name.(string)) + + log.Printf("[DEBUG] Finished creating Source %q: %#v", d.Id(), res) + + return resourceSecurityCenterSourceRead(d, meta) +} + +func resourceSecurityCenterSourceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterSource %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterSourceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Source: %s", err) + } + if err := d.Set("description", flattenSecurityCenterSourceDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Source: %s", err) + } + if err := d.Set("display_name", flattenSecurityCenterSourceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Source: %s", err) + } + + return nil +} + +func resourceSecurityCenterSourceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandSecurityCenterSourceDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + displayNameProp, err := expandSecurityCenterSourceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Source %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Source %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Source %q: %#v", d.Id(), res) + } + + return resourceSecurityCenterSourceRead(d, meta) +} + +func resourceSecurityCenterSourceDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[WARNING] SecurityCenter Source resources"+ + " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ + " state, but will still be present on Google Cloud.", d.Id()) + d.SetId("") + + return nil +} + +func resourceSecurityCenterSourceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err + } + + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) != 4 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{organization}}/sources/{{source}}", + ) + } + + if err := d.Set("organization", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterSourceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterSourceDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterSourceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterSourceDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterSourceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/iam_endpoints_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/iam_endpoints_service.go new file mode 100644 index 0000000000..66bf48d6bd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/iam_endpoints_service.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package servicemanagement + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ServiceManagementServiceIamSchema = map[string]*schema.Schema{ + "service_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ServiceManagementServiceIamUpdater struct { + serviceName string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ServiceManagementServiceIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("service_name"); ok { + values["service_name"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"services/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service_name").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ServiceManagementServiceIamUpdater{ + serviceName: values["service_name"], + d: d, + Config: config, + } + + if err := d.Set("service_name", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting service_name: %s", err) + } + + return u, nil +} + +func ServiceManagementServiceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"services/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ServiceManagementServiceIamUpdater{ + serviceName: values["service_name"], + d: d, + Config: config, + } + if err := d.Set("service_name", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting service_name: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ServiceManagementServiceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyServiceUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ServiceManagementServiceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyServiceUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ServiceManagementServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ServiceManagementBasePath}}%s:%s", fmt.Sprintf("services/%s", u.serviceName), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ServiceManagementServiceIamUpdater) GetResourceId() string { + return fmt.Sprintf("services/%s", u.serviceName) +} + +func (u *ServiceManagementServiceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-servicemanagement-service-%s", u.GetResourceId()) +} + +func (u *ServiceManagementServiceIamUpdater) DescribeResource() string { + return fmt.Sprintf("servicemanagement service %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/iam_endpoints_service_consumers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/iam_endpoints_service_consumers.go new file mode 100644 index 0000000000..ec855c1c5f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/iam_endpoints_service_consumers.go @@ -0,0 +1,202 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package servicemanagement + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var ServiceManagementServiceConsumersIamSchema = map[string]*schema.Schema{ + "service_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "consumer_project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type ServiceManagementServiceConsumersIamUpdater struct { + serviceName string + consumerProject string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func ServiceManagementServiceConsumersIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("service_name"); ok { + values["service_name"] = v.(string) + } + + if v, ok := d.GetOk("consumer_project"); ok { + values["consumer_project"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"services/(?P[^/]+)/consumers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("consumer_project").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &ServiceManagementServiceConsumersIamUpdater{ + serviceName: values["service_name"], + consumerProject: values["consumer_project"], + d: d, + Config: config, + } + + if err := d.Set("service_name", u.serviceName); err != nil { + return nil, fmt.Errorf("Error setting service_name: %s", err) + } + if err := d.Set("consumer_project", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting consumer_project: %s", err) + } + + return u, nil +} + +func ServiceManagementServiceConsumersIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"services/(?P[^/]+)/consumers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &ServiceManagementServiceConsumersIamUpdater{ + serviceName: values["service_name"], + consumerProject: values["consumer_project"], + d: d, + Config: config, + } + if err := d.Set("consumer_project", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting consumer_project: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *ServiceManagementServiceConsumersIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyServiceConsumersUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *ServiceManagementServiceConsumersIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyServiceConsumersUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ServiceManagementServiceConsumersIamUpdater) qualifyServiceConsumersUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{ServiceManagementBasePath}}%s:%s", fmt.Sprintf("services/%s/consumers/%s", u.serviceName, u.consumerProject), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *ServiceManagementServiceConsumersIamUpdater) GetResourceId() string { + return fmt.Sprintf("services/%s/consumers/%s", u.serviceName, u.consumerProject) +} + +func (u *ServiceManagementServiceConsumersIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-servicemanagement-serviceconsumers-%s", u.GetResourceId()) +} + +func (u *ServiceManagementServiceConsumersIamUpdater) DescribeResource() string { + return fmt.Sprintf("servicemanagement serviceconsumers %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_endpoints_service.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/resource_endpoints_service.go similarity index 95% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_endpoints_service.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/resource_endpoints_service.go index a70fdc80d2..6c75318c2a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_endpoints_service.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/resource_endpoints_service.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package servicemanagement import ( "context" @@ -13,6 +15,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/servicemanagement/v1" ) @@ -211,13 +215,13 @@ func getEndpointServiceGRPCConfigSource(serviceConfig, protoConfig string) *serv } func resourceEndpointsServiceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -286,8 +290,8 @@ func resourceEndpointsServiceUpdate(d *schema.ResourceData, meta interface{}) er // be tweaked if the user is using gcloud. In the interest of simplicity, // we currently only support full rollouts - anyone trying to do incremental // rollouts or A/B testing is going to need a more precise tool than this resource. - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -347,8 +351,8 @@ func resourceEndpointsServiceUpdate(d *schema.ResourceData, meta interface{}) er } func resourceEndpointsServiceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -365,8 +369,8 @@ func resourceEndpointsServiceDelete(d *schema.ResourceData, meta interface{}) er } func resourceEndpointsServiceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_endpoints_service_migration.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/resource_endpoints_service_migration.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_endpoints_service_migration.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/resource_endpoints_service_migration.go index c793f41734..876da6a590 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_endpoints_service_migration.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/resource_endpoints_service_migration.go @@ -1,10 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package servicemanagement import ( "encoding/base64" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func migrateEndpointsService(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/serviceman_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/serviceman_operation.go new file mode 100644 index 0000000000..ab3fdb4f8a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicemanagement/serviceman_operation.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package servicemanagement + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/googleapi" + "google.golang.org/api/servicemanagement/v1" +) + +type ServiceManagementOperationWaiter struct { + Service *servicemanagement.APIService + tpgresource.CommonOperationWaiter +} + +func (w *ServiceManagementOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + return w.Service.Operations.Get(w.Op.Name).Do() +} + +func ServiceManagementOperationWaitTime(config *transport_tpg.Config, op *servicemanagement.Operation, activity, userAgent string, timeout time.Duration) (googleapi.RawMessage, error) { + w := &ServiceManagementOperationWaiter{ + Service: config.NewServiceManClient(userAgent), + } + + if err := w.SetOp(op); err != nil { + return nil, err + } + + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return nil, err + } + return w.Op.Response, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_networking_peered_dns_domain.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/data_source_google_service_networking_peered_dns_domain.go similarity index 88% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_networking_peered_dns_domain.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/data_source_google_service_networking_peered_dns_domain.go index f08e5e50a4..233f39f783 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_service_networking_peered_dns_domain.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/data_source_google_service_networking_peered_dns_domain.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package servicenetworking import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_networking_peered_dns_domain.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_networking_peered_dns_domain.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go index 5e6309aa08..b27f8dbb61 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_google_service_networking_peered_dns_domain.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package servicenetworking import ( "fmt" @@ -7,6 +9,9 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/servicenetworking/v1" ) @@ -90,13 +95,13 @@ func resourceGoogleServiceNetworkingPeeredDNSDomainImport(d *schema.ResourceData } func resourceGoogleServiceNetworkingPeeredDNSDomainCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -140,13 +145,13 @@ func resourceGoogleServiceNetworkingPeeredDNSDomainCreate(d *schema.ResourceData } func resourceGoogleServiceNetworkingPeeredDNSDomainRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -208,8 +213,8 @@ func resourceGoogleServiceNetworkingPeeredDNSDomainRead(d *schema.ResourceData, } func resourceGoogleServiceNetworkingPeeredDNSDomainDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -218,11 +223,14 @@ func resourceGoogleServiceNetworkingPeeredDNSDomainDelete(d *schema.ResourceData apiService := config.NewServiceNetworkingClient(userAgent) peeredDnsDomainsService := servicenetworking.NewServicesProjectsGlobalNetworksPeeredDnsDomainsService(apiService) - if err := RetryTimeDuration(func() error { - _, delErr := peeredDnsDomainsService.Delete(d.Id()).Do() - return delErr - }, d.Timeout(schema.TimeoutDelete)); err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Peered DNS domain %s", name)) + if err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + _, delErr := peeredDnsDomainsService.Delete(d.Id()).Do() + return delErr + }, + Timeout: d.Timeout(schema.TimeoutDelete), + }); err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Peered DNS domain %s", name)) } d.SetId("") @@ -232,11 +240,11 @@ func resourceGoogleServiceNetworkingPeeredDNSDomainDelete(d *schema.ResourceData // NOTE(deviavir): An out of band aspect of this API is that it uses a unique formatting of network // different from the standard self_link URI. It requires a call to the resource manager to get the project // number for the current project. -func getProjectNumber(d *schema.ResourceData, config *Config, project, userAgent string) (string, error) { +func getProjectNumber(d *schema.ResourceData, config *transport_tpg.Config, project, userAgent string) (string, error) { log.Printf("[DEBUG] Retrieving project number by doing a GET with the project id, as required by service networking") // err == nil indicates that the billing_project value was found billingProject := project - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_service_networking_connection.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_service_networking_connection.go similarity index 82% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_service_networking_connection.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_service_networking_connection.go index 642860df65..117d2217ae 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_service_networking_connection.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/resource_service_networking_connection.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package servicenetworking import ( "fmt" @@ -8,6 +10,10 @@ import ( "strings" "time" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/compute/v1" @@ -35,7 +41,7 @@ func ResourceServiceNetworkingConnection() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `Name of VPC network connected with service producers using VPC peering.`, }, // NOTE(craigatgoogle): This field is weird, it's required to make the Insert/List calls as a parameter @@ -66,24 +72,24 @@ func ResourceServiceNetworkingConnection() *schema.Resource { } func resourceServiceNetworkingConnectionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) + serviceNetworkingNetworkName, err := RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) if err != nil { return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) } connection := &servicenetworking.Connection{ Network: serviceNetworkingNetworkName, - ReservedPeeringRanges: convertStringArr(d.Get("reserved_peering_ranges").([]interface{})), + ReservedPeeringRanges: tpgresource.ConvertStringArr(d.Get("reserved_peering_ranges").([]interface{})), } - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) if err != nil { return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) } @@ -106,7 +112,7 @@ func resourceServiceNetworkingConnectionCreate(d *schema.ResourceData, meta inte // the connection name. // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { project = bp } @@ -133,8 +139,8 @@ func resourceServiceNetworkingConnectionCreate(d *schema.ResourceData, meta inte } func resourceServiceNetworkingConnectionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -144,20 +150,20 @@ func resourceServiceNetworkingConnectionRead(d *schema.ResourceData, meta interf return errwrap.Wrapf("Unable to parse Service Networking Connection id, err: {{err}}", err) } - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, connectionId.Network, userAgent) + serviceNetworkingNetworkName, err := RetrieveServiceNetworkingNetworkName(d, config, connectionId.Network, userAgent) if err != nil { return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) } network := d.Get("network").(string) - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) if err != nil { return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) } project := networkFieldValue.Project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { project = bp } @@ -201,8 +207,8 @@ func resourceServiceNetworkingConnectionRead(d *schema.ResourceData, meta interf } func resourceServiceNetworkingConnectionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -216,17 +222,17 @@ func resourceServiceNetworkingConnectionUpdate(d *schema.ResourceData, meta inte if d.HasChange("reserved_peering_ranges") { network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) + serviceNetworkingNetworkName, err := RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) if err != nil { return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) } connection := &servicenetworking.Connection{ Network: serviceNetworkingNetworkName, - ReservedPeeringRanges: convertStringArr(d.Get("reserved_peering_ranges").([]interface{})), + ReservedPeeringRanges: tpgresource.ConvertStringArr(d.Get("reserved_peering_ranges").([]interface{})), } - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) if err != nil { return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) } @@ -236,7 +242,7 @@ func resourceServiceNetworkingConnectionUpdate(d *schema.ResourceData, meta inte // and it's easier than grabbing the connection name. // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { project = bp } @@ -256,14 +262,14 @@ func resourceServiceNetworkingConnectionUpdate(d *schema.ResourceData, meta inte } func resourceServiceNetworkingConnectionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) + serviceNetworkingNetworkName, err := RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) if err != nil { return err } @@ -273,24 +279,32 @@ func resourceServiceNetworkingConnectionDelete(d *schema.ResourceData, meta inte obj["name"] = peering url := fmt.Sprintf("%s%s/removePeering", config.ComputeBasePath, serviceNetworkingNetworkName) - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) if err != nil { return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) } project := networkFieldValue.Project - res, err := SendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ServiceNetworkingConnection %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ServiceNetworkingConnection %q", d.Id())) } op := &compute.Operation{} - err = Convert(res, op) + err = tpgresource.Convert(res, op) if err != nil { return err } - err = ComputeOperationWaitTime( + err = tpgcompute.ComputeOperationWaitTime( config, op, project, "Updating Network", userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { return err @@ -358,8 +372,8 @@ func parseConnectionId(id string) (*connectionId, error) { // NOTE(craigatgoogle): An out of band aspect of this API is that it uses a unique formatting of network // different from the standard self_link URI. It requires a call to the resource manager to get the project // number for the current project. -func retrieveServiceNetworkingNetworkName(d *schema.ResourceData, config *Config, network, userAgent string) (string, error) { - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) +func RetrieveServiceNetworkingNetworkName(d *schema.ResourceData, config *transport_tpg.Config, network, userAgent string) (string, error) { + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) if err != nil { return "", errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) } @@ -371,7 +385,7 @@ func retrieveServiceNetworkingNetworkName(d *schema.ResourceData, config *Config log.Printf("[DEBUG] Retrieving project number by doing a GET with the project id, as required by service networking") // err == nil indicates that the billing_project value was found billingProject := pid - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/service_networking_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/service_networking_operation.go new file mode 100644 index 0000000000..c750b32ae7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/servicenetworking/service_networking_operation.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package servicenetworking + +import ( + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/servicenetworking/v1" +) + +type ServiceNetworkingOperationWaiter struct { + Service *servicenetworking.APIService + Project string + UserProjectOverride bool + tpgresource.CommonOperationWaiter +} + +func (w *ServiceNetworkingOperationWaiter) QueryOp() (interface{}, error) { + opGetCall := w.Service.Operations.Get(w.Op.Name) + if w.UserProjectOverride { + opGetCall.Header().Add("X-Goog-User-Project", w.Project) + } + return opGetCall.Do() +} + +func ServiceNetworkingOperationWaitTime(config *transport_tpg.Config, op *servicenetworking.Operation, activity, userAgent, project string, timeout time.Duration) error { + w := &ServiceNetworkingOperationWaiter{ + Service: config.NewServiceNetworkingClient(userAgent), + Project: project, + UserProjectOverride: config.UserProjectOverride, + } + + if err := w.SetOp(op); err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/serviceusage/service_usage_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/serviceusage/service_usage_operation.go new file mode 100644 index 0000000000..fd4f751143 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/serviceusage/service_usage_operation.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package serviceusage + +import ( + "encoding/json" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/googleapi" +) + +type ServiceUsageOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + retryCount int + tpgresource.CommonOperationWaiter +} + +func (w *ServiceUsageOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.ServiceUsageBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func (w *ServiceUsageOperationWaiter) IsRetryable(err error) bool { + // Retries errors on 403 3 times if the error message + // returned contains `has not been used in project` + maxRetries := 3 + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 403 { + if w.retryCount < maxRetries && strings.Contains(gerr.Body, "has not been used in project") { + w.retryCount += 1 + log.Printf("[DEBUG] retrying on 403 %v more times", w.retryCount-maxRetries-1) + return true + } + } + return false +} + +func createServiceUsageWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*ServiceUsageOperationWaiter, error) { + w := &ServiceUsageOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func ServiceUsageOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createServiceUsageWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func ServiceUsageOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createServiceUsageWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/serviceusage/serviceusage_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/serviceusage/serviceusage_operation.go new file mode 100644 index 0000000000..c88566d6f7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/serviceusage/serviceusage_operation.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package serviceusage + +import ( + "encoding/json" + "time" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/serviceusage/v1" +) + +func ServiceUsageOperationWait(config *transport_tpg.Config, op *serviceusage.Operation, project, activity, userAgent string, timeout time.Duration) error { + // maintained for compatibility with old code that was written before the + // autogenerated waiters. + b, err := op.MarshalJSON() + if err != nil { + return err + } + var m map[string]interface{} + if err := json.Unmarshal(b, &m); err != nil { + return err + } + return ServiceUsageOperationWaitTime(config, m, project, activity, userAgent, timeout) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/data_source_sourcerepo_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/data_source_sourcerepo_repository.go new file mode 100644 index 0000000000..5c27099ccb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/data_source_sourcerepo_repository.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sourcerepo + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleSourceRepoRepository() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceSourceRepoRepository().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleSourceRepoRepositoryRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleSourceRepoRepositoryRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/repos/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceSourceRepoRepositoryRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/iam_sourcerepo_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/iam_sourcerepo_repository.go new file mode 100644 index 0000000000..cb1ad20849 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/iam_sourcerepo_repository.go @@ -0,0 +1,230 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package sourcerepo + +import ( + "fmt" + "regexp" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var SourceRepoRepositoryIamSchema = map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "repository": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: SourceRepoRepositoryDiffSuppress, + }, +} + +func SourceRepoRepositoryDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + oldParts := regexp.MustCompile("projects/[^/]+/repos/").Split(old, -1) + if len(oldParts) == 2 { + return oldParts[1] == new + } + return new == old +} + +type SourceRepoRepositoryIamUpdater struct { + project string + repository string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func SourceRepoRepositoryIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + } + values["project"] = project + if v, ok := d.GetOk("repository"); ok { + values["repository"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/repos/(?P.+)", "(?P.+)"}, d, config, d.Get("repository").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &SourceRepoRepositoryIamUpdater{ + project: values["project"], + repository: values["repository"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("repository", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting repository: %s", err) + } + + return u, nil +} + +func SourceRepoRepositoryIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/repos/(?P.+)", "(?P.+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &SourceRepoRepositoryIamUpdater{ + project: values["project"], + repository: values["repository"], + d: d, + Config: config, + } + if err := d.Set("repository", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting repository: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *SourceRepoRepositoryIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyRepositoryUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *SourceRepoRepositoryIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyRepositoryUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *SourceRepoRepositoryIamUpdater) qualifyRepositoryUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{SourceRepoBasePath}}%s:%s", fmt.Sprintf("projects/%s/repos/%s", u.project, u.repository), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *SourceRepoRepositoryIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/repos/%s", u.project, u.repository) +} + +func (u *SourceRepoRepositoryIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-sourcerepo-repository-%s", u.GetResourceId()) +} + +func (u *SourceRepoRepositoryIamUpdater) DescribeResource() string { + return fmt.Sprintf("sourcerepo repository %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository.go new file mode 100644 index 0000000000..40f1d7b09a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository.go @@ -0,0 +1,505 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package sourcerepo + +import ( + "bytes" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func resourceSourceRepoRepositoryPubSubConfigsHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", tpgresource.GetResourceNameFromSelfLink(m["topic"].(string)))) + buf.WriteString(fmt.Sprintf("%s-", m["message_format"].(string))) + if v, ok := m["service_account_email"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return tpgresource.Hashcode(buf.String()) +} + +func ResourceSourceRepoRepository() *schema.Resource { + return &schema.Resource{ + Create: resourceSourceRepoRepositoryCreate, + Read: resourceSourceRepoRepositoryRead, + Update: resourceSourceRepoRepositoryUpdate, + Delete: resourceSourceRepoRepositoryDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSourceRepoRepositoryImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Resource name of the repository, of the form '{{repo}}'. +The repo name may contain slashes. eg, 'name/with/slash'`, + }, + "pubsub_configs": { + Type: schema.TypeSet, + Optional: true, + Description: `How this repository publishes a change in the repository through Cloud Pub/Sub. +Keyed by the topic names.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Required: true, + }, + "message_format": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"PROTOBUF", "JSON"}), + Description: `The format of the Cloud Pub/Sub messages. +- PROTOBUF: The message payload is a serialized protocol buffer of SourceRepoEvent. +- JSON: The message payload is a JSON string of SourceRepoEvent. Possible values: ["PROTOBUF", "JSON"]`, + }, + "service_account_email": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Email address of the service account used for publishing Cloud Pub/Sub messages. +This service account needs to be in the same project as the PubsubConfig. When added, +the caller needs to have iam.serviceAccounts.actAs permission on this service account. +If unspecified, it defaults to the compute engine default service account.`, + }, + }, + }, + Set: resourceSourceRepoRepositoryPubSubConfigsHash, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + Description: `The disk usage of the repo, in bytes.`, + }, + "url": { + Type: schema.TypeString, + Computed: true, + Description: `URL to clone the repository from Google Cloud Source Repositories.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSourceRepoRepositoryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandSourceRepoRepositoryName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + pubsubConfigsProp, err := expandSourceRepoRepositoryPubsubConfigs(d.Get("pubsub_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubConfigsProp)) && (ok || !reflect.DeepEqual(v, pubsubConfigsProp)) { + obj["pubsubConfigs"] = pubsubConfigsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Repository: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Repository: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Repository: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/repos/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if v, ok := d.GetOkExists("pubsub_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(pubsubConfigsProp)) && (ok || !reflect.DeepEqual(v, pubsubConfigsProp)) { + log.Printf("[DEBUG] Calling update after create to patch in pubsub_configs") + // pubsub_configs cannot be added on create + return resourceSourceRepoRepositoryUpdate(d, meta) + } + + log.Printf("[DEBUG] Finished creating Repository %q: %#v", d.Id(), res) + + return resourceSourceRepoRepositoryRead(d, meta) +} + +func resourceSourceRepoRepositoryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Repository: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SourceRepoRepository %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + + if err := d.Set("name", flattenSourceRepoRepositoryName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("url", flattenSourceRepoRepositoryUrl(res["url"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("size", flattenSourceRepoRepositorySize(res["size"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + if err := d.Set("pubsub_configs", flattenSourceRepoRepositoryPubsubConfigs(res["pubsubConfigs"], d, config)); err != nil { + return fmt.Errorf("Error reading Repository: %s", err) + } + + return nil +} + +func resourceSourceRepoRepositoryUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Repository: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + pubsubConfigsProp, err := expandSourceRepoRepositoryPubsubConfigs(d.Get("pubsub_configs"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("pubsub_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, pubsubConfigsProp)) { + obj["pubsubConfigs"] = pubsubConfigsProp + } + + obj, err = resourceSourceRepoRepositoryUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Repository %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("pubsub_configs") { + updateMask = append(updateMask, "pubsubConfigs") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Repository %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Repository %q: %#v", d.Id(), res) + } + + return resourceSourceRepoRepositoryRead(d, meta) +} + +func resourceSourceRepoRepositoryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Repository: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Repository %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Repository") + } + + log.Printf("[DEBUG] Finished deleting Repository %q: %#v", d.Id(), res) + return nil +} + +func resourceSourceRepoRepositoryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/repos/(?P.+)", + "(?P.+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/repos/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSourceRepoRepositoryName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + + // We can't use a standard name_from_self_link because the name can include /'s + parts := strings.SplitAfterN(v.(string), "/", 4) + return parts[3] +} + +func flattenSourceRepoRepositoryUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSourceRepoRepositorySize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenSourceRepoRepositoryPubsubConfigs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.(map[string]interface{}) + transformed := make([]interface{}, 0, len(l)) + for k, raw := range l { + original := raw.(map[string]interface{}) + transformed = append(transformed, map[string]interface{}{ + "topic": k, + "message_format": flattenSourceRepoRepositoryPubsubConfigsMessageFormat(original["messageFormat"], d, config), + "service_account_email": flattenSourceRepoRepositoryPubsubConfigsServiceAccountEmail(original["serviceAccountEmail"], d, config), + }) + } + return transformed +} +func flattenSourceRepoRepositoryPubsubConfigsMessageFormat(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSourceRepoRepositoryPubsubConfigsServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSourceRepoRepositoryName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.ReplaceVars(d, config, "projects/{{project}}/repos/{{name}}") +} + +func expandSourceRepoRepositoryPubsubConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + if v == nil { + return map[string]interface{}{}, nil + } + m := make(map[string]interface{}) + for _, raw := range v.(*schema.Set).List() { + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMessageFormat, err := expandSourceRepoRepositoryPubsubConfigsMessageFormat(original["message_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMessageFormat); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["messageFormat"] = transformedMessageFormat + } + + transformedServiceAccountEmail, err := expandSourceRepoRepositoryPubsubConfigsServiceAccountEmail(original["service_account_email"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountEmail"] = transformedServiceAccountEmail + } + + transformedTopic, err := expandSourceRepoRepositoryPubsubConfigsTopic(original["topic"], d, config) + if err != nil { + return nil, err + } + m[transformedTopic] = transformed + } + return m, nil +} + +func expandSourceRepoRepositoryPubsubConfigsMessageFormat(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSourceRepoRepositoryPubsubConfigsServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceSourceRepoRepositoryUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Add "topic" field using pubsubConfig map key + pubsubConfigsVal := obj["pubsubConfigs"] + if pubsubConfigsVal != nil { + pubsubConfigs := pubsubConfigsVal.(map[string]interface{}) + for key := range pubsubConfigs { + config := pubsubConfigs[key].(map[string]interface{}) + config["topic"] = key + } + } + + // Nest request body in "repo" field + newObj := make(map[string]interface{}) + newObj["repo"] = obj + return newObj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository_sweeper.go new file mode 100644 index 0000000000..cbf4de9652 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/resource_sourcerepo_repository_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package sourcerepo + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SourceRepoRepository", testSweepSourceRepoRepository) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSourceRepoRepository(region string) error { + resourceName := "SourceRepoRepository" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://sourcerepo.googleapis.com/v1/projects/{{project}}/repos", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["repos"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://sourcerepo.googleapis.com/v1/projects/{{project}}/repos/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/source_repo_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/source_repo_utils.go new file mode 100644 index 0000000000..d03ad979bb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sourcerepo/source_repo_utils.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sourcerepo + +import ( + "regexp" + + "github.com/hashicorp/terraform-provider-google/google/services/pubsub" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func expandSourceRepoRepositoryPubsubConfigsTopic(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (string, error) { + // short-circuit if the topic is a full uri so we don't need to GetProject + ok, err := regexp.MatchString(pubsub.PubsubTopicRegex, v.(string)) + if err != nil { + return "", err + } + + if ok { + return v.(string), nil + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return "", err + } + + return pubsub.GetComputedTopicName(project, v.(string)), err +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/data_source_spanner_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/data_source_spanner_instance.go new file mode 100644 index 0000000000..5b77e82f54 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/data_source_spanner_instance.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package spanner + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceSpannerInstance() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceSpannerInstance().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "config") // not sure why this is configurable + tpgresource.AddOptionalFieldsToSchema(dsSchema, "display_name") // not sure why this is configurable + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceSpannerInstanceRead, + Schema: dsSchema, + } +} + +func dataSourceSpannerInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceSpannerInstanceRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/iam_spanner_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/iam_spanner_database.go new file mode 100644 index 0000000000..69b8d4cd62 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/iam_spanner_database.go @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package spanner + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/spanner/v1" +) + +var IamSpannerDatabaseSchema = map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "database": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type SpannerDatabaseIamUpdater struct { + project string + instance string + database string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewSpannerDatabaseIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + return &SpannerDatabaseIamUpdater{ + project: project, + instance: d.Get("instance").(string), + database: d.Get("database").(string), + d: d, + Config: config, + }, nil +} + +func SpannerDatabaseIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + return tpgresource.ParseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) +} + +func (u *SpannerDatabaseIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewSpannerClient(userAgent).Projects.Instances.Databases.GetIamPolicy(SpannerDatabaseId{ + Project: u.project, + Database: u.database, + Instance: u.instance, + }.databaseUri(), &spanner.GetIamPolicyRequest{ + Options: &spanner.GetPolicyOptions{RequestedPolicyVersion: tpgiamresource.IamPolicyVersion}, + }).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := spannerToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy.Version = tpgiamresource.IamPolicyVersion + + return cloudResourcePolicy, nil +} + +func (u *SpannerDatabaseIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + spannerPolicy, err := resourceManagerToSpannerPolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + spannerPolicy.Version = tpgiamresource.IamPolicyVersion + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewSpannerClient(userAgent).Projects.Instances.Databases.SetIamPolicy(SpannerDatabaseId{ + Project: u.project, + Database: u.database, + Instance: u.instance, + }.databaseUri(), &spanner.SetIamPolicyRequest{ + Policy: spannerPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *SpannerDatabaseIamUpdater) GetResourceId() string { + return SpannerDatabaseId{ + Project: u.project, + Instance: u.instance, + Database: u.database, + }.TerraformId() +} + +func (u *SpannerDatabaseIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-spanner-database-%s-%s-%s", u.project, u.instance, u.database) +} + +func (u *SpannerDatabaseIamUpdater) DescribeResource() string { + return fmt.Sprintf("Spanner Database: %s/%s/%s", u.project, u.instance, u.database) +} + +func resourceManagerToSpannerPolicy(p *cloudresourcemanager.Policy) (*spanner.Policy, error) { + out := &spanner.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a resourcemanager policy to a spanner policy: {{err}}", err) + } + return out, nil +} + +func spannerToResourceManagerPolicy(p *spanner.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a spanner policy to a resourcemanager policy: {{err}}", err) + } + return out, nil +} + +type SpannerDatabaseId struct { + Project string + Instance string + Database string +} + +func (s SpannerDatabaseId) TerraformId() string { + return fmt.Sprintf("%s/%s/%s", s.Project, s.Instance, s.Database) +} + +func (s SpannerDatabaseId) parentProjectUri() string { + return fmt.Sprintf("projects/%s", s.Project) +} + +func (s SpannerDatabaseId) parentInstanceUri() string { + return fmt.Sprintf("%s/instances/%s", s.parentProjectUri(), s.Instance) +} + +func (s SpannerDatabaseId) databaseUri() string { + return fmt.Sprintf("%s/databases/%s", s.parentInstanceUri(), s.Database) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/iam_spanner_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/iam_spanner_instance.go new file mode 100644 index 0000000000..309bde14de --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/iam_spanner_instance.go @@ -0,0 +1,175 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package spanner + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + spanner "google.golang.org/api/spanner/v1" +) + +var IamSpannerInstanceSchema = map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type SpannerInstanceIamUpdater struct { + project string + instance string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewSpannerInstanceIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + return &SpannerInstanceIamUpdater{ + project: project, + instance: d.Get("instance").(string), + d: d, + Config: config, + }, nil +} + +func SpannerInstanceIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + id, err := extractSpannerInstanceId(d.Id()) + if err != nil { + return err + } + if err := d.Set("instance", id.Instance); err != nil { + return fmt.Errorf("Error setting instance: %s", err) + } + if err := d.Set("project", id.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + // Explicitly set the id so imported resources have the same ID format as non-imported ones. + d.SetId(id.TerraformId()) + return nil +} + +func (u *SpannerInstanceIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewSpannerClient(userAgent).Projects.Instances.GetIamPolicy(SpannerInstanceId{ + Project: u.project, + Instance: u.instance, + }.instanceUri(), &spanner.GetIamPolicyRequest{ + Options: &spanner.GetPolicyOptions{RequestedPolicyVersion: tpgiamresource.IamPolicyVersion}, + }).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := spannerToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + cloudResourcePolicy.Version = tpgiamresource.IamPolicyVersion + + return cloudResourcePolicy, nil +} + +func (u *SpannerInstanceIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + spannerPolicy, err := resourceManagerToSpannerPolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + spannerPolicy.Version = tpgiamresource.IamPolicyVersion + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewSpannerClient(userAgent).Projects.Instances.SetIamPolicy(SpannerInstanceId{ + Project: u.project, + Instance: u.instance, + }.instanceUri(), &spanner.SetIamPolicyRequest{ + Policy: spannerPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *SpannerInstanceIamUpdater) GetResourceId() string { + return SpannerInstanceId{ + Project: u.project, + Instance: u.instance, + }.TerraformId() +} + +func (u *SpannerInstanceIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-spanner-instance-%s-%s", u.project, u.instance) +} + +func (u *SpannerInstanceIamUpdater) DescribeResource() string { + return fmt.Sprintf("Spanner Instance: %s/%s", u.project, u.instance) +} + +type SpannerInstanceId struct { + Project string + Instance string +} + +func (s SpannerInstanceId) TerraformId() string { + return fmt.Sprintf("%s/%s", s.Project, s.Instance) +} + +func (s SpannerInstanceId) parentProjectUri() string { + return fmt.Sprintf("projects/%s", s.Project) +} + +func (s SpannerInstanceId) instanceUri() string { + return fmt.Sprintf("%s/instances/%s", s.parentProjectUri(), s.Instance) +} + +func (s SpannerInstanceId) instanceConfigUri(c string) string { + return fmt.Sprintf("%s/instanceConfigs/%s", s.parentProjectUri(), c) +} + +func extractSpannerInstanceId(id string) (*SpannerInstanceId, error) { + if !regexp.MustCompile("^" + verify.ProjectRegex + "/[a-z0-9-]+$").Match([]byte(id)) { + return nil, fmt.Errorf("Invalid spanner id format, expecting {projectId}/{instanceId}") + } + parts := strings.Split(id, "/") + return &SpannerInstanceId{ + Project: parts[0], + Instance: parts[1], + }, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_database.go new file mode 100644 index 0000000000..6192ab2764 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_database.go @@ -0,0 +1,842 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package spanner + +import ( + "context" + "fmt" + "log" + "reflect" + "regexp" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// customizeDiff func for additional checks on google_spanner_database properties: +func resourceSpannerDBDdlCustomDiffFunc(diff tpgresource.TerraformResourceDiff) error { + old, new := diff.GetChange("ddl") + oldDdls := old.([]interface{}) + newDdls := new.([]interface{}) + var err error + + if len(newDdls) < len(oldDdls) { + err = diff.ForceNew("ddl") + if err != nil { + return fmt.Errorf("ForceNew failed for ddl, old - %v and new - %v", oldDdls, newDdls) + } + return nil + } + + for i := range oldDdls { + if newDdls[i].(string) != oldDdls[i].(string) { + err = diff.ForceNew("ddl") + if err != nil { + return fmt.Errorf("ForceNew failed for ddl, old - %v and new - %v", oldDdls, newDdls) + } + return nil + } + } + return nil +} + +func resourceSpannerDBDdlCustomDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return resourceSpannerDBDdlCustomDiffFunc(diff) +} + +func ValidateDatabaseRetentionPeriod(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + valueError := fmt.Errorf("version_retention_period should be in range [1h, 7d], in a format resembling 1d, 24h, 1440m, or 86400s") + + r := regexp.MustCompile("^(\\d{1}d|\\d{1,3}h|\\d{2,5}m|\\d{4,6}s)$") + if !r.MatchString(value) { + errors = append(errors, valueError) + return + } + + unit := value[len(value)-1:] + multiple := value[:len(value)-1] + num, err := strconv.Atoi(multiple) + if err != nil { + errors = append(errors, valueError) + return + } + + if unit == "d" && (num < 1 || num > 7) { + errors = append(errors, valueError) + return + } + if unit == "h" && (num < 1 || num > 7*24) { + errors = append(errors, valueError) + return + } + if unit == "m" && (num < 1*60 || num > 7*24*60) { + errors = append(errors, valueError) + return + } + if unit == "s" && (num < 1*60*60 || num > 7*24*60*60) { + errors = append(errors, valueError) + return + } + + return +} + +func resourceSpannerDBVirtualUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { + // deletion_protection is the only virtual field + if d.HasChange("deletion_protection") { + for field := range resourceSchema { + if field == "deletion_protection" { + continue + } + if d.HasChange(field) { + return false + } + } + return true + } + return false +} + +func ResourceSpannerDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceSpannerDatabaseCreate, + Read: resourceSpannerDatabaseRead, + Update: resourceSpannerDatabaseUpdate, + Delete: resourceSpannerDatabaseDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSpannerDatabaseImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + resourceSpannerDBDdlCustomDiff, + ), + + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The instance to create the database on.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z][a-z0-9_\-]*[a-z0-9]$`), + Description: `A unique identifier for the database, which cannot be changed after +the instance is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].`, + }, + "database_dialect": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"GOOGLE_STANDARD_SQL", "POSTGRESQL", ""}), + Description: `The dialect of the Cloud Spanner Database. +If it is not provided, "GOOGLE_STANDARD_SQL" will be used. Possible values: ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]`, + }, + "ddl": { + Type: schema.TypeList, + Optional: true, + Description: `An optional list of DDL statements to run inside the newly created +database. Statements can create tables, indexes, etc. These statements +execute atomically with the creation of the database: if there is an +error in any statement, the database is not created.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "encryption_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Encryption configuration for the database`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Fully qualified name of the KMS key to use to encrypt this database. This key must exist +in the same location as the Spanner Database.`, + }, + }, + }, + }, + "version_retention_period": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: ValidateDatabaseRetentionPeriod, + Description: `The retention period for the database. The retention period must be between 1 hour +and 7 days, and can be specified in days, hours, minutes, or seconds. For example, +the values 1d, 24h, 1440m, and 86400s are equivalent. Default value is 1h. +If this property is used, you must avoid adding new DDL statements to 'ddl' that +update the database's version_retention_period.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `An explanation of the status of the database.`, + }, + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Whether or not to allow Terraform to destroy the instance. Unless this field is set to false +in Terraform state, a 'terraform destroy' or 'terraform apply' that would delete the instance will fail.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSpannerDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandSpannerDatabaseName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + versionRetentionPeriodProp, err := expandSpannerDatabaseVersionRetentionPeriod(d.Get("version_retention_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_retention_period"); !tpgresource.IsEmptyValue(reflect.ValueOf(versionRetentionPeriodProp)) && (ok || !reflect.DeepEqual(v, versionRetentionPeriodProp)) { + obj["versionRetentionPeriod"] = versionRetentionPeriodProp + } + extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ddl"); !tpgresource.IsEmptyValue(reflect.ValueOf(extraStatementsProp)) && (ok || !reflect.DeepEqual(v, extraStatementsProp)) { + obj["extraStatements"] = extraStatementsProp + } + encryptionConfigProp, err := expandSpannerDatabaseEncryptionConfig(d.Get("encryption_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { + obj["encryptionConfig"] = encryptionConfigProp + } + databaseDialectProp, err := expandSpannerDatabaseDatabaseDialect(d.Get("database_dialect"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_dialect"); !tpgresource.IsEmptyValue(reflect.ValueOf(databaseDialectProp)) && (ok || !reflect.DeepEqual(v, databaseDialectProp)) { + obj["databaseDialect"] = databaseDialectProp + } + instanceProp, err := expandSpannerDatabaseInstance(d.Get("instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceProp)) && (ok || !reflect.DeepEqual(v, instanceProp)) { + obj["instance"] = instanceProp + } + + obj, err = resourceSpannerDatabaseEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Database: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Database: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{instance}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = SpannerOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Database", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Database: %s", err) + } + + opRes, err = resourceSpannerDatabaseDecoder(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error decoding response from operation: %s", err) + } + if opRes == nil { + return fmt.Errorf("Error decoding response from operation, could not find object") + } + + if err := d.Set("name", flattenSpannerDatabaseName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{instance}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Note: Databases that are created with POSTGRESQL dialect do not support extra DDL + // statements at the time of database creation. To avoid users needing to run + // `terraform apply` twice to get their desired outcome, the provider does not set + // `extraStatements` in the call to the `create` endpoint and all DDL (other than + // ) is run post-create, by calling the `updateDdl` endpoint + + _, ok := opRes["name"] + if !ok { + return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") + } + + retention, retentionPeriodOk := d.GetOk("version_retention_period") + retentionPeriod := retention.(string) + ddl, ddlOk := d.GetOk("ddl") + ddlStatements := ddl.([]interface{}) + + if retentionPeriodOk || ddlOk { + + obj := make(map[string]interface{}) + updateDdls := []string{} + + if ddlOk { + for i := 0; i < len(ddlStatements); i++ { + if ddlStatements[i] != nil { + updateDdls = append(updateDdls, ddlStatements[i].(string)) + } + } + } + + if retentionPeriodOk { + dbName := d.Get("name") + retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, retentionPeriod) + if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { + retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, retentionPeriod) + } + updateDdls = append(updateDdls, retentionDdl) + } + + // Skip API call if there are no new ddl entries (due to ignoring nil values) + if len(updateDdls) > 0 { + log.Printf("[DEBUG] Applying extra DDL statements to the new Database: %#v", updateDdls) + + obj["statements"] = updateDdls + + url, err = tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}/ddl") + if err != nil { + return err + } + + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error executing DDL statements on Database: %s", err) + } + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = SpannerOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Database", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to run DDL against newly-created Database: %s", err) + } + } + } + + log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) + + return resourceSpannerDatabaseRead(d, meta) +} + +func resourceSpannerDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SpannerDatabase %q", d.Id())) + } + + res, err = resourceSpannerDatabaseDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing SpannerDatabase because it no longer exists.") + d.SetId("") + return nil + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("deletion_protection"); !ok { + if err := d.Set("deletion_protection", true); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + + if err := d.Set("name", flattenSpannerDatabaseName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("version_retention_period", flattenSpannerDatabaseVersionRetentionPeriod(res["versionRetentionPeriod"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("state", flattenSpannerDatabaseState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("encryption_config", flattenSpannerDatabaseEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("database_dialect", flattenSpannerDatabaseDatabaseDialect(res["databaseDialect"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("instance", flattenSpannerDatabaseInstance(res["instance"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + + return nil +} + +func resourceSpannerDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("version_retention_period") || d.HasChange("ddl") { + obj := make(map[string]interface{}) + + versionRetentionPeriodProp, err := expandSpannerDatabaseVersionRetentionPeriod(d.Get("version_retention_period"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("version_retention_period"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, versionRetentionPeriodProp)) { + obj["versionRetentionPeriod"] = versionRetentionPeriodProp + } + extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ddl"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, extraStatementsProp)) { + obj["extraStatements"] = extraStatementsProp + } + + obj, err = resourceSpannerDatabaseUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}/ddl") + if err != nil { + return err + } + + if len(obj["statements"].([]string)) == 0 { + // Return early to avoid making an API call that errors, + // due to containing no DDL SQL statements + return resourceSpannerDatabaseRead(d, meta) + } + + if resourceSpannerDBVirtualUpdate(d, ResourceSpannerDatabase().Schema) { + if d.Get("deletion_protection") != nil { + if err := d.Set("deletion_protection", d.Get("deletion_protection")); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + } + return nil + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Database %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) + } + + err = SpannerOperationWaitTime( + config, res, project, "Updating Database", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceSpannerDatabaseRead(d, meta) +} + +func resourceSpannerDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`") + } + log.Printf("[DEBUG] Deleting Database %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Database") + } + + err = SpannerOperationWaitTime( + config, res, project, "Deleting Database", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Database %q: %#v", d.Id(), res) + return nil +} + +func resourceSpannerDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)", + "instances/(?P[^/]+)/databases/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{instance}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("deletion_protection", true); err != nil { + return nil, fmt.Errorf("Error setting deletion_protection: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSpannerDatabaseName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenSpannerDatabaseVersionRetentionPeriod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerDatabaseState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerDatabaseEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenSpannerDatabaseEncryptionConfigKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenSpannerDatabaseEncryptionConfigKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerDatabaseDatabaseDialect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerDatabaseInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandSpannerDatabaseName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerDatabaseVersionRetentionPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerDatabaseDdl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerDatabaseEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandSpannerDatabaseEncryptionConfigKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandSpannerDatabaseEncryptionConfigKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerDatabaseDatabaseDialect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerDatabaseInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("instances", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for instance: %s", err) + } + return f.RelativeLink(), nil +} + +func resourceSpannerDatabaseEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + obj["createStatement"] = fmt.Sprintf("CREATE DATABASE `%s`", obj["name"]) + if dialect, ok := obj["databaseDialect"]; ok && dialect == "POSTGRESQL" { + obj["createStatement"] = fmt.Sprintf("CREATE DATABASE \"%s\"", obj["name"]) + } + + // Extra DDL statements are removed from the create request and instead applied to the database in + // a post-create action, to accommodate retrictions when creating PostgreSQL-enabled databases. + // https://cloud.google.com/spanner/docs/create-manage-databases#create_a_database + log.Printf("[DEBUG] Preparing to create new Database. Any extra DDL statements will be applied to the Database in a separate API call") + + delete(obj, "name") + delete(obj, "instance") + + delete(obj, "versionRetentionPeriod") + delete(obj, "extraStatements") + return obj, nil +} + +func resourceSpannerDatabaseUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + old, new := d.GetChange("ddl") + oldDdls := old.([]interface{}) + newDdls := new.([]interface{}) + updateDdls := []string{} + + // Only new ddl statments to be add to update call + for i := len(oldDdls); i < len(newDdls); i++ { + if newDdls[i] != nil { + updateDdls = append(updateDdls, newDdls[i].(string)) + } + } + + // Add statement to update version_retention_period property, if needed + if d.HasChange("version_retention_period") { + dbName := d.Get("name") + retentionDdl := fmt.Sprintf("ALTER DATABASE `%s` SET OPTIONS (version_retention_period=\"%s\")", dbName, obj["versionRetentionPeriod"]) + if dialect, ok := d.GetOk("database_dialect"); ok && dialect == "POSTGRESQL" { + retentionDdl = fmt.Sprintf("ALTER DATABASE \"%s\" SET spanner.version_retention_period TO \"%s\"", dbName, obj["versionRetentionPeriod"]) + } + updateDdls = append(updateDdls, retentionDdl) + } + + obj["statements"] = updateDdls + delete(obj, "name") + delete(obj, "versionRetentionPeriod") + delete(obj, "instance") + delete(obj, "extraStatements") + return obj, nil +} + +func resourceSpannerDatabaseDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + d.SetId(res["name"].(string)) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + res["project"] = d.Get("project").(string) + res["instance"] = d.Get("instance").(string) + res["name"] = d.Get("name").(string) + id, err := tpgresource.ReplaceVars(d, config, "{{instance}}/{{name}}") + if err != nil { + return nil, err + } + d.SetId(id) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance.go new file mode 100644 index 0000000000..b3fe2d0d38 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance.go @@ -0,0 +1,748 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package spanner + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func deleteSpannerBackups(d *schema.ResourceData, config *transport_tpg.Config, res map[string]interface{}, userAgent string, billingProject string) error { + var v interface{} + var ok bool + + v, ok = res["backups"] + if !ok || v == nil { + return nil + } + + // Iterate over the list and delete each backup. + for _, itemRaw := range v.([]interface{}) { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + backupName := item["name"].(string) + + log.Printf("[DEBUG] Found backups for resource %q: %#v)", d.Id(), item) + + path := "{{SpannerBasePath}}" + backupName + + url, err := tpgresource.ReplaceVars(d, config, path) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + } + return nil +} + +func resourceSpannerInstanceVirtualUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { + // force_destroy is the only virtual field + if d.HasChange("force_destroy") { + for field := range resourceSchema { + if field == "force_destroy" { + continue + } + if d.HasChange(field) { + return false + } + } + return true + } + return false +} + +func ResourceSpannerInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSpannerInstanceCreate, + Read: resourceSpannerInstanceRead, + Update: resourceSpannerInstanceUpdate, + Delete: resourceSpannerInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSpannerInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "config": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the instance's configuration (similar but not +quite the same as a region) which defines the geographic placement and +replication of your databases in this instance. It determines where your data +is stored. Values are typically of the form 'regional-europe-west1' , 'us-central' etc. +In order to obtain a valid list please consult the +[Configuration section of the docs](https://cloud.google.com/spanner/docs/instances).`, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The descriptive name for this instance as it appears in UIs. Must be +unique per project and between 4 and 30 characters in length.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z][-a-z0-9]*[a-z0-9]$`), + Description: `A unique identifier for the instance, which cannot be changed after +the instance is created. The name must be between 6 and 30 characters +in length. + + +If not provided, a random string starting with 'tf-' will be selected.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `An object containing a list of "key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "num_nodes": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The number of nodes allocated to this instance. Exactly one of either node_count or processing_units +must be present in terraform.`, + ExactlyOneOf: []string{"num_nodes", "processing_units"}, + }, + "processing_units": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The number of processing units allocated to this instance. Exactly one of processing_units +or node_count must be present in terraform.`, + ExactlyOneOf: []string{"num_nodes", "processing_units"}, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Instance status: 'CREATING' or 'READY'.`, + }, + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `When deleting a spanner instance, this boolean option will delete all backups of this instance. +This must be set to true if you created a backup manually in the console.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSpannerInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandSpannerInstanceName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + configProp, err := expandSpannerInstanceConfig(d.Get("config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("config"); !tpgresource.IsEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { + obj["config"] = configProp + } + displayNameProp, err := expandSpannerInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + nodeCountProp, err := expandSpannerInstanceNumNodes(d.Get("num_nodes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("num_nodes"); !tpgresource.IsEmptyValue(reflect.ValueOf(nodeCountProp)) && (ok || !reflect.DeepEqual(v, nodeCountProp)) { + obj["nodeCount"] = nodeCountProp + } + processingUnitsProp, err := expandSpannerInstanceProcessingUnits(d.Get("processing_units"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("processing_units"); !tpgresource.IsEmptyValue(reflect.ValueOf(processingUnitsProp)) && (ok || !reflect.DeepEqual(v, processingUnitsProp)) { + obj["processingUnits"] = processingUnitsProp + } + labelsProp, err := expandSpannerInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + obj, err = resourceSpannerInstanceEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Instance: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Instance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = SpannerOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Instance", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Instance: %s", err) + } + + opRes, err = resourceSpannerInstanceDecoder(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error decoding response from operation: %s", err) + } + if opRes == nil { + return fmt.Errorf("Error decoding response from operation, could not find object") + } + + if err := d.Set("name", flattenSpannerInstanceName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{project}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // This is useful if the resource in question doesn't have a perfectly consistent API + // That is, the Operation for Create might return before the Get operation shows the + // completed state of the resource. + time.Sleep(5 * time.Second) + + log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) + + return resourceSpannerInstanceRead(d, meta) +} + +func resourceSpannerInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SpannerInstance %q", d.Id())) + } + + res, err = resourceSpannerInstanceDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing SpannerInstance because it no longer exists.") + d.SetId("") + return nil + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("force_destroy"); !ok { + if err := d.Set("force_destroy", false); err != nil { + return fmt.Errorf("Error setting force_destroy: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + if err := d.Set("name", flattenSpannerInstanceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("config", flattenSpannerInstanceConfig(res["config"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("display_name", flattenSpannerInstanceDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("num_nodes", flattenSpannerInstanceNumNodes(res["nodeCount"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("processing_units", flattenSpannerInstanceProcessingUnits(res["processingUnits"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("labels", flattenSpannerInstanceLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + if err := d.Set("state", flattenSpannerInstanceState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + + return nil +} + +func resourceSpannerInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandSpannerInstanceDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + nodeCountProp, err := expandSpannerInstanceNumNodes(d.Get("num_nodes"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("num_nodes"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nodeCountProp)) { + obj["nodeCount"] = nodeCountProp + } + processingUnitsProp, err := expandSpannerInstanceProcessingUnits(d.Get("processing_units"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("processing_units"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, processingUnitsProp)) { + obj["processingUnits"] = processingUnitsProp + } + labelsProp, err := expandSpannerInstanceLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + obj, err = resourceSpannerInstanceUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) + if resourceSpannerInstanceVirtualUpdate(d, ResourceSpannerInstance().Schema) { + if d.Get("force_destroy") != nil { + if err := d.Set("force_destroy", d.Get("force_destroy")); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } + } + return nil + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) + } + + err = SpannerOperationWaitTime( + config, res, project, "Updating Instance", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceSpannerInstanceRead(d, meta) +} + +func resourceSpannerInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Instance: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + if d.Get("force_destroy").(bool) { + backupsUrl, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}/backups") + if err != nil { + return err + } + + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: backupsUrl, + UserAgent: userAgent, + }) + if err != nil { + // API returns 200 if no backups exist but the instance still exists, hence the error check. + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SpannerInstance %q", d.Id())) + } + + err = deleteSpannerBackups(d, config, resp, billingProject, userAgent) + if err != nil { + return err + } + } + log.Printf("[DEBUG] Deleting Instance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Instance") + } + + log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) + return nil +} + +func resourceSpannerInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("force_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting force_destroy: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSpannerInstanceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenSpannerInstanceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenSpannerInstanceDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerInstanceNumNodes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenSpannerInstanceProcessingUnits(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenSpannerInstanceLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSpannerInstanceState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSpannerInstanceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerInstanceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + r := regexp.MustCompile("projects/(.+)/instanceConfigs/(.+)") + if r.MatchString(v.(string)) { + return v.(string), nil + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + return fmt.Sprintf("projects/%s/instanceConfigs/%s", project, v.(string)), nil +} + +func expandSpannerInstanceDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerInstanceNumNodes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerInstanceProcessingUnits(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSpannerInstanceLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func resourceSpannerInstanceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + // Temp Logic to accommodate processing_units and num_nodes + if obj["processingUnits"] == nil && obj["nodeCount"] == nil { + obj["nodeCount"] = 1 + } + newObj := make(map[string]interface{}) + newObj["instance"] = obj + if obj["name"] == nil { + if err := d.Set("name", resource.PrefixedUniqueId("tfgen-spanid-")[:30]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + newObj["instanceId"] = d.Get("name").(string) + } else { + newObj["instanceId"] = obj["name"] + } + delete(obj, "name") + return newObj, nil +} + +func resourceSpannerInstanceUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + project, err := tpgresource.GetProject(d, meta.(*transport_tpg.Config)) + if err != nil { + return nil, err + } + obj["name"] = fmt.Sprintf("projects/%s/instances/%s", project, obj["name"]) + newObj := make(map[string]interface{}) + newObj["instance"] = obj + updateMask := make([]string, 0) + if d.HasChange("num_nodes") { + updateMask = append(updateMask, "nodeCount") + } + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + if d.HasChange("processing_units") { + updateMask = append(updateMask, "processingUnits") + } + newObj["fieldMask"] = strings.Join(updateMask, ",") + return newObj, nil +} + +func resourceSpannerInstanceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + d.SetId(res["name"].(string)) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + res["project"] = d.Get("project").(string) + res["name"] = d.Get("name").(string) + id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{name}}") + if err != nil { + return nil, err + } + d.SetId(id) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance_sweeper.go new file mode 100644 index 0000000000..7d3be511a2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/resource_spanner_instance_sweeper.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package spanner + +import ( + "context" + "log" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SpannerInstance", testSweepSpannerInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSpannerInstance(region string) error { + resourceName := "SpannerInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + spannerUrl := "https://spanner.googleapis.com/v1" + listUrl := spannerUrl + "/projects/" + config.Project + "/instances" + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := obj["name"].(string) + shortName := name[strings.LastIndex(name, "/")+1:] + + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(shortName) { + nonPrefixCount++ + continue + } + + deleteUrl := spannerUrl + "/" + name + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, shortName) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf_test prefix remain.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/spanner_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/spanner_operation.go new file mode 100644 index 0000000000..5856cf4b26 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/spanner/spanner_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package spanner + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type SpannerOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *SpannerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.SpannerBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createSpannerWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*SpannerOperationWaiter, error) { + w := &SpannerOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func SpannerOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createSpannerWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func SpannerOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createSpannerWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_sql_ca_certs.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_google_sql_ca_certs.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_sql_ca_certs.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_google_sql_ca_certs.go index 56e4d20279..52be56bd7a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_sql_ca_certs.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_google_sql_ca_certs.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql import ( "fmt" "log" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleSQLCaCerts() *schema.Resource { @@ -15,7 +19,7 @@ func DataSourceGoogleSQLCaCerts() *schema.Resource { "instance": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, }, "project": { Type: schema.TypeString, @@ -60,13 +64,13 @@ func DataSourceGoogleSQLCaCerts() *schema.Resource { } func dataSourceGoogleSQLCaCertsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - fv, err := parseProjectFieldValue("instances", d.Get("instance").(string), "project", d, config, false) + fv, err := tpgresource.ParseProjectFieldValue("instances", d.Get("instance").(string), "project", d, config, false) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_google_sql_tiers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_google_sql_tiers.go new file mode 100644 index 0000000000..6a324202f6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_google_sql_tiers.go @@ -0,0 +1,112 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + sqladmin "google.golang.org/api/sqladmin/v1beta4" +) + +func DataSourceGoogleSQLTiers() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleSQLTiersRead, + + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Project ID of the project for which to list tiers.`, + }, + "tiers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tier": { + Type: schema.TypeString, + Computed: true, + Description: `An identifier for the machine type, for example, db-custom-1-3840.`, + }, + "ram": { + Type: schema.TypeInt, + Computed: true, + Description: `The maximum ram usage of this tier in bytes.`, + }, + "disk_quota": { + Type: schema.TypeInt, + Computed: true, + Description: `The maximum disk size of this tier in bytes.`, + }, + "region": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The applicable regions for this tier.`, + }, + }, + }, + }, + }, + } +} + +func dataSourceGoogleSQLTiersRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Fetching tiers for project %s", project) + + response, err := config.NewSqlAdminClient(userAgent).Tiers.List(project).Do() + if err != nil { + return fmt.Errorf("error retrieving tiers: %s", err) + } + + log.Printf("[DEBUG] Fetched available tiers for project %s", project) + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("error setting project: %s", err) + } + if err := d.Set("tiers", flattenTiers(response.Items)); err != nil { + return fmt.Errorf("error setting tiers: %s", err) + } + + d.SetId(fmt.Sprintf("projects/%s", project)) + + return nil +} + +func flattenTiers(items []*sqladmin.Tier) []map[string]interface{} { + var tiers []map[string]interface{} + + for _, item := range items { + if item != nil { + data := map[string]interface{}{ + "tier": item.Tier, + "ram": item.RAM, + "disk_quota": item.DiskQuota, + "region": item.Region, + } + + tiers = append(tiers, data) + } + } + + return tiers +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_backup_run.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_backup_run.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_backup_run.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_backup_run.go index 37ef919f21..df9aed4c1d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_backup_run.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_backup_run.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) @@ -55,12 +59,12 @@ func DataSourceSqlBackupRun() *schema.Resource { } func dataSourceSqlBackupRunRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -103,7 +107,7 @@ func dataSourceSqlBackupRunRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error setting status: %s", err) } - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/backupRuns/{{backup_id}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance}}/backupRuns/{{backup_id}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database.go new file mode 100644 index 0000000000..037739c98c --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceSqlDatabase() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceSQLDatabase().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "instance") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceSqlDatabaseRead, + Schema: dsSchema, + } +} + +func dataSourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/instances/%s/databases/%s", project, d.Get("instance").(string), d.Get("name").(string))) + err = resourceSQLDatabaseRead(d, meta) + if err != nil { + return err + } + if err := d.Set("deletion_policy", nil); err != nil { + return fmt.Errorf("Error setting deletion_policy: %s", err) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database_instance.go new file mode 100644 index 0000000000..81e2d15fdd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database_instance.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func DataSourceSqlDatabaseInstance() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceSqlDatabaseInstance().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceSqlDatabaseInstanceRead, + Schema: dsSchema, + } +} + +func dataSourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + + return resourceSqlDatabaseInstanceRead(d, meta) + +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database_instances.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database_instances.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database_instances.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database_instances.go index f4060bb86e..55c0461999 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_sql_database_instances.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_database_instances.go @@ -1,10 +1,14 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) @@ -48,7 +52,7 @@ func DataSourceSqlDatabaseInstances() *schema.Resource { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ - Schema: datasourceSchemaFromResourceSchema(ResourceSqlDatabaseInstance().Schema), + Schema: tpgresource.DatasourceSchemaFromResourceSchema(ResourceSqlDatabaseInstance().Schema), }, }, }, @@ -56,12 +60,12 @@ func DataSourceSqlDatabaseInstances() *schema.Resource { } func dataSourceSqlDatabaseInstancesRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -98,10 +102,14 @@ func dataSourceSqlDatabaseInstancesRead(d *schema.ResourceData, meta interface{} databaseInstances := make([]map[string]interface{}, 0) for { var instances *sqladmin.InstancesListResponse - err = RetryTimeDuration(func() (rerr error) { - instances, rerr = config.NewSqlAdminClient(userAgent).Instances.List(project).Filter(filter).PageToken(pageToken).Do() - return rerr - }, d.Timeout(schema.TimeoutRead), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + instances, rerr = config.NewSqlAdminClient(userAgent).Instances.List(project).Filter(filter).PageToken(pageToken).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_databases.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_databases.go new file mode 100644 index 0000000000..09482be1b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/data_source_sql_databases.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + sqladmin "google.golang.org/api/sqladmin/v1beta4" +) + +func DataSourceSqlDatabases() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceSqlDatabasesRead, + + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: `Project ID of the project that contains the instance.`, + }, + "instance": { + Type: schema.TypeString, + Required: true, + Description: `The name of the Cloud SQL database instance in which the database belongs.`, + }, + "databases": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: tpgresource.DatasourceSchemaFromResourceSchema(ResourceSQLDatabase().Schema), + }, + }, + }, + } +} + +func dataSourceSqlDatabasesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + var databases *sqladmin.DatabasesListResponse + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + databases, rerr = config.NewSqlAdminClient(userAgent).Databases.List(project, d.Get("instance").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Databases in %q instance", d.Get("instance").(string))) + } + flattenedDatabases := flattenDatabases(databases.Items) + + //client-side sorting to provide consistent ordering of the databases + sort.SliceStable(flattenedDatabases, func(i, j int) bool { + return strings.Compare(flattenedDatabases[i]["name"].(string), flattenedDatabases[j]["name"].(string)) < 1 + }) + if err := d.Set("databases", flattenedDatabases); err != nil { + return fmt.Errorf("Error setting databases: %s", err) + } + d.SetId(fmt.Sprintf("project/%s/instance/%s/databases", project, d.Get("instance").(string))) + return nil +} + +func flattenDatabases(fetchedDatabases []*sqladmin.Database) []map[string]interface{} { + if fetchedDatabases == nil { + return make([]map[string]interface{}, 0) + } + + databases := make([]map[string]interface{}, 0, len(fetchedDatabases)) + for _, rawDatabase := range fetchedDatabases { + database := make(map[string]interface{}) + database["name"] = rawDatabase.Name + database["instance"] = rawDatabase.Instance + database["project"] = rawDatabase.Project + database["charset"] = rawDatabase.Charset + database["collation"] = rawDatabase.Collation + database["self_link"] = rawDatabase.SelfLink + + databases = append(databases, database) + } + return databases +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database.go new file mode 100644 index 0000000000..ea46ef8cb9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database.go @@ -0,0 +1,477 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package sql + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceSQLDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceSQLDatabaseCreate, + Read: resourceSQLDatabaseRead, + Update: resourceSQLDatabaseUpdate, + Delete: resourceSQLDatabaseDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSQLDatabaseImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Cloud SQL instance. This does not include the project +ID.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the database in the Cloud SQL instance. +This does not include the project ID or instance name.`, + }, + "charset": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: `The charset value. See MySQL's +[Supported Character Sets and Collations](https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html) +and Postgres' [Character Set Support](https://www.postgresql.org/docs/9.6/static/multibyte.html) +for more details and supported values. Postgres databases only support +a value of 'UTF8' at creation time.`, + }, + "collation": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The collation value. See MySQL's +[Supported Character Sets and Collations](https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html) +and Postgres' [Collation Support](https://www.postgresql.org/docs/9.6/static/collation.html) +for more details and supported values. Postgres databases only support +a value of 'en_US.UTF8' at creation time.`, + }, + "deletion_policy": { + Type: schema.TypeString, + Optional: true, + Default: "DELETE", + Description: `The deletion policy for the database. Setting ABANDON allows the resource +to be abandoned rather than deleted. This is useful for Postgres, where databases cannot be +deleted from the API if there are users other than cloudsqlsuperuser with access. Possible +values are: "ABANDON", "DELETE". Defaults to "DELETE".`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSQLDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + charsetProp, err := expandSQLDatabaseCharset(d.Get("charset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("charset"); !tpgresource.IsEmptyValue(reflect.ValueOf(charsetProp)) && (ok || !reflect.DeepEqual(v, charsetProp)) { + obj["charset"] = charsetProp + } + collationProp, err := expandSQLDatabaseCollation(d.Get("collation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("collation"); !tpgresource.IsEmptyValue(reflect.ValueOf(collationProp)) && (ok || !reflect.DeepEqual(v, collationProp)) { + obj["collation"] = collationProp + } + nameProp, err := expandSQLDatabaseName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + instanceProp, err := expandSQLDatabaseInstance(d.Get("instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceProp)) && (ok || !reflect.DeepEqual(v, instanceProp)) { + obj["instance"] = instanceProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Database: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Database: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = SqlAdminOperationWaitTime( + config, res, project, "Creating Database", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Database: %s", err) + } + + log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) + + return resourceSQLDatabaseRead(d, meta) +} + +func resourceSQLDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(transformSQLDatabaseReadError(err), d, fmt.Sprintf("SQLDatabase %q", d.Id())) + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("deletion_policy"); !ok { + if err := d.Set("deletion_policy", "DELETE"); err != nil { + return fmt.Errorf("Error setting deletion_policy: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + + if err := d.Set("charset", flattenSQLDatabaseCharset(res["charset"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("collation", flattenSQLDatabaseCollation(res["collation"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("name", flattenSQLDatabaseName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("instance", flattenSQLDatabaseInstance(res["instance"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + + return nil +} + +func resourceSQLDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + charsetProp, err := expandSQLDatabaseCharset(d.Get("charset"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("charset"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, charsetProp)) { + obj["charset"] = charsetProp + } + collationProp, err := expandSQLDatabaseCollation(d.Get("collation"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("collation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, collationProp)) { + obj["collation"] = collationProp + } + nameProp, err := expandSQLDatabaseName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + instanceProp, err := expandSQLDatabaseInstance(d.Get("instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, instanceProp)) { + obj["instance"] = instanceProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Database %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Database %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) + } + + err = SqlAdminOperationWaitTime( + config, res, project, "Updating Database", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceSQLDatabaseRead(d, meta) +} + +func resourceSQLDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Database: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { + // Allows for database to be abandoned without deletion to avoid deletion failing + // for Postgres databases in some circumstances due to existing SQL users + return nil + } + log.Printf("[DEBUG] Deleting Database %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Database") + } + + err = SqlAdminOperationWaitTime( + config, res, project, "Deleting Database", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Database %q: %#v", d.Id(), res) + return nil +} + +func resourceSQLDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)", + "instances/(?P[^/]+)/databases/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("deletion_policy", "DELETE"); err != nil { + return nil, fmt.Errorf("Error setting deletion_policy: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenSQLDatabaseCharset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLDatabaseCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLDatabaseName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLDatabaseInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSQLDatabaseCharset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLDatabaseCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLDatabaseName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLDatabaseInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_database_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_database_instance.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance.go index d47f546865..55932bb395 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_database_instance.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql import ( "context" @@ -14,12 +16,18 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/googleapi" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) // Match fully-qualified or relative URLs -const privateNetworkLinkRegex = "^(?:http(?:s)?://.+/)?projects/(" + ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" +const privateNetworkLinkRegex = "^(?:http(?:s)?://.+/)?projects/(" + verify.ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" var sqlDatabaseAuthorizedNetWorkSchemaElem *schema.Resource = &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -110,7 +118,7 @@ func ResourceSqlDatabaseInstance() *schema.Resource { }, CustomizeDiff: customdiff.All( - customdiff.ForceNewIfChange("settings.0.disk_size", isDiskShrinkage), + customdiff.ForceNewIfChange("settings.0.disk_size", compute.IsDiskShrinkage), customdiff.ForceNewIfChange("master_instance_name", isMasterInstanceNameSet), customdiff.IfValueChange("instance_type", isReplicaPromoteRequested, checkPromoteConfigurationsAndUpdateDiff), privateNetworkCustomizeDiff, @@ -149,6 +157,42 @@ func ResourceSqlDatabaseInstance() *schema.Resource { Required: true, Description: `The machine type to use. See tiers for more details and supported versions. Postgres supports only shared-core machine types, and custom machine types such as db-custom-2-13312. See the Custom Machine Type Documentation to learn about specifying custom machine types.`, }, + "edition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"ENTERPRISE", "ENTERPRISE_PLUS"}, false), + Description: `The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS.`, + }, + "advanced_machine_features": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threads_per_core": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of threads per physical core. Can be 1 or 2.`, + }, + }, + }, + }, + "data_cache_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Data cache configurations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_cache_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether data cache is enabled for the instance.`, + }, + }, + }, + }, "activation_policy": { Type: schema.TypeString, Optional: true, @@ -389,8 +433,8 @@ is set to true. Defaults to ZONAL.`, "private_network": { Type: schema.TypeString, Optional: true, - ValidateFunc: orEmpty(validateRegexp(privateNetworkLinkRegex)), - DiffSuppressFunc: compareSelfLinkRelativePaths, + ValidateFunc: verify.OrEmpty(verify.ValidateRegexp(privateNetworkLinkRegex)), + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, AtLeastOneOf: ipConfigurationKeys, Description: `The VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. Specifying a network enables private IP. At least ipv4_enabled must be enabled or a private_network must be configured. This setting can be updated, but it cannot be removed after it is set.`, }, @@ -605,7 +649,7 @@ is set to true. Defaults to ZONAL.`, "database_version": { Type: schema.TypeString, Required: true, - Description: `The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.`, + Description: `The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.`, }, "encryption_key_name": { @@ -864,9 +908,17 @@ is set to true. Defaults to ZONAL.`, "point_in_time": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: timestampDiffSuppress(time.RFC3339Nano), + DiffSuppressFunc: tpgresource.TimestampDiffSuppress(time.RFC3339Nano), Description: `The timestamp of the point in time that should be restored.`, }, + "database_names": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `(SQL Server only, use with point_in_time) clone only the specified databases from the source instance. Clone all databases if empty.`, + }, "allocated_ip_range": { Type: schema.TypeString, Optional: true, @@ -919,18 +971,18 @@ func pitrSupportDbCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v } func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - region, err := getRegion(d, config) + region, err := tpgresource.GetRegion(d, config) if err != nil { return err } @@ -956,10 +1008,12 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) } } + databaseVersion := d.Get("database_version").(string) + instance := &sqladmin.DatabaseInstance{ Name: name, Region: region, - DatabaseVersion: d.Get("database_version").(string), + DatabaseVersion: databaseVersion, MasterInstanceName: d.Get("master_instance_name").(string), ReplicaConfiguration: expandReplicaConfiguration(d.Get("replica_configuration").([]interface{})), } @@ -967,7 +1021,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) cloneContext, cloneSource := expandCloneContext(d.Get("clone").([]interface{})) s, ok := d.GetOk("settings") - desiredSettings := expandSqlDatabaseInstanceSettings(s.([]interface{})) + desiredSettings := expandSqlDatabaseInstanceSettings(s.([]interface{}), databaseVersion) if ok { instance.Settings = desiredSettings } @@ -986,8 +1040,8 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) // modified at the same time. Lock the master until we're done in order // to prevent that. if !sqlDatabaseIsMaster(d) { - mutexKV.Lock(instanceMutexKey(project, instance.MasterInstanceName)) - defer mutexKV.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance.MasterInstanceName)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) } if k, ok := d.GetOk("encryption_key_name"); ok { @@ -1000,7 +1054,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) // BinaryLogging can be enabled on replica instances but only after creation. if instance.MasterInstanceName != "" && instance.Settings != nil && instance.Settings.BackupConfiguration != nil && instance.Settings.BackupConfiguration.BinaryLogEnabled { - settingsCopy := expandSqlDatabaseInstanceSettings(s.([]interface{})) + settingsCopy := expandSqlDatabaseInstanceSettings(s.([]interface{}), databaseVersion) bc := settingsCopy.BackupConfiguration patchData = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{BackupConfiguration: bc}} @@ -1008,21 +1062,25 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) } var op *sqladmin.Operation - err = RetryTimeDuration(func() (operr error) { - if cloneContext != nil { - cloneContext.DestinationInstanceName = name - clodeReq := sqladmin.InstancesCloneRequest{CloneContext: cloneContext} - op, operr = config.NewSqlAdminClient(userAgent).Instances.Clone(project, cloneSource, &clodeReq).Do() - } else { - op, operr = config.NewSqlAdminClient(userAgent).Instances.Insert(project, instance).Do() - } - return operr - }, d.Timeout(schema.TimeoutCreate), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + if cloneContext != nil { + cloneContext.DestinationInstanceName = name + clodeReq := sqladmin.InstancesCloneRequest{CloneContext: cloneContext} + op, operr = config.NewSqlAdminClient(userAgent).Instances.Clone(project, cloneSource, &clodeReq).Do() + } else { + op, operr = config.NewSqlAdminClient(userAgent).Instances.Insert(project, instance).Do() + } + return operr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) } - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -1041,24 +1099,30 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) // risk of it being left on the instance, which would present a security concern. if sqlDatabaseIsMaster(d) { var users *sqladmin.UsersListResponse - err = RetryTimeDuration(func() error { - users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance.Name).Do() - return err - }, d.Timeout(schema.TimeoutRead), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance.Name).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) } for _, u := range users.Items { if u.Name == "root" && u.Host == "%" { - err = retry(func() error { - op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance.Name).Host(u.Host).Name(u.Name).Do() - if err == nil { - err = SqlAdminOperationWaitTime(config, op, project, "Delete default root User", userAgent, d.Timeout(schema.TimeoutCreate)) - } - return err + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance.Name).Host(u.Host).Name(u.Name).Do() + if err == nil { + err = SqlAdminOperationWaitTime(config, op, project, "Delete default root User", userAgent, d.Timeout(schema.TimeoutCreate)) + } + return err + }, }) if err != nil { - return fmt.Errorf("Error, failed to delete default 'root'@'*' user, but the database was created successfully: %s", err) + return fmt.Errorf("Error, failed to delete default 'root'@'*' u, but the database was created successfully: %s", err) } } } @@ -1066,10 +1130,14 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) // patch any fields that need to be sent postcreation if patchData != nil { - err = RetryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, instance.Name, patchData).Do() - return rerr - }, d.Timeout(schema.TimeoutUpdate), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, instance.Name, patchData).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) } @@ -1094,10 +1162,14 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) _settings := s.([]interface{})[0].(map[string]interface{}) instanceUpdate.Settings.SettingsVersion = int64(_settings["version"].(int)) var op *sqladmin.Operation - err = RetryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, name, instanceUpdate).Do() - return rerr - }, d.Timeout(schema.TimeoutUpdate), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, name, instanceUpdate).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) } @@ -1125,7 +1197,8 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) return nil } -func expandSqlDatabaseInstanceSettings(configured []interface{}) *sqladmin.Settings { +// Available fields for settings vary between database versions. +func expandSqlDatabaseInstanceSettings(configured []interface{}, databaseVersion string) *sqladmin.Settings { if len(configured) == 0 || configured[0] == nil { return nil } @@ -1134,7 +1207,10 @@ func expandSqlDatabaseInstanceSettings(configured []interface{}) *sqladmin.Setti settings := &sqladmin.Settings{ // Version is unset in Create but is set during update SettingsVersion: int64(_settings["version"].(int)), + DataCacheConfig: expandDataCacheConfig(_settings["data_cache_config"].([]interface{})), Tier: _settings["tier"].(string), + Edition: _settings["edition"].(string), + AdvancedMachineFeatures: expandSqlServerAdvancedMachineFeatures(_settings["advanced_machine_features"].([]interface{})), ForceSendFields: []string{"StorageAutoResize"}, ActivationPolicy: _settings["activation_policy"].(string), ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), @@ -1148,10 +1224,10 @@ func expandSqlDatabaseInstanceSettings(configured []interface{}) *sqladmin.Setti DataDiskType: _settings["disk_type"].(string), PricingPlan: _settings["pricing_plan"].(string), DeletionProtectionEnabled: _settings["deletion_protection_enabled"].(bool), - UserLabels: convertStringMap(_settings["user_labels"].(map[string]interface{})), + UserLabels: tpgresource.ConvertStringMap(_settings["user_labels"].(map[string]interface{})), BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].([]interface{})), - IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{})), + IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{}), databaseVersion), LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), @@ -1198,8 +1274,15 @@ func expandCloneContext(configured []interface{}) (*sqladmin.CloneContext, strin _cloneConfiguration := configured[0].(map[string]interface{}) + databaseNames := []string{} + rawDatabaseNames := _cloneConfiguration["database_names"].([]interface{}) + for _, db := range rawDatabaseNames { + databaseNames = append(databaseNames, db.(string)) + } + return &sqladmin.CloneContext{ PointInTime: _cloneConfiguration["point_in_time"].(string), + DatabaseNames: databaseNames, AllocatedIpRange: _cloneConfiguration["allocated_ip_range"].(string), }, _cloneConfiguration["source_instance_name"].(string) } @@ -1231,13 +1314,19 @@ func expandLocationPreference(configured []interface{}) *sqladmin.LocationPrefer } } -func expandIpConfiguration(configured []interface{}) *sqladmin.IpConfiguration { +func expandIpConfiguration(configured []interface{}, databaseVersion string) *sqladmin.IpConfiguration { if len(configured) == 0 || configured[0] == nil { return nil } _ipConfiguration := configured[0].(map[string]interface{}) + forceSendFields := []string{"Ipv4Enabled", "RequireSsl"} + + if !strings.HasPrefix(databaseVersion, "SQLSERVER") { + forceSendFields = append(forceSendFields, "EnablePrivatePathForGoogleCloudServices") + } + return &sqladmin.IpConfiguration{ Ipv4Enabled: _ipConfiguration["ipv4_enabled"].(bool), RequireSsl: _ipConfiguration["require_ssl"].(bool), @@ -1245,7 +1334,7 @@ func expandIpConfiguration(configured []interface{}) *sqladmin.IpConfiguration { AllocatedIpRange: _ipConfiguration["allocated_ip_range"].(string), AuthorizedNetworks: expandAuthorizedNetworks(_ipConfiguration["authorized_networks"].(*schema.Set).List()), EnablePrivatePathForGoogleCloudServices: _ipConfiguration["enable_private_path_for_google_cloud_services"].(bool), - ForceSendFields: []string{"Ipv4Enabled", "RequireSsl"}, + ForceSendFields: forceSendFields, } } @@ -1279,6 +1368,17 @@ func expandDatabaseFlags(configured []interface{}) []*sqladmin.DatabaseFlags { return databaseFlags } +func expandDataCacheConfig(configured interface{}) *sqladmin.DataCacheConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &sqladmin.DataCacheConfig{ + DataCacheEnabled: config["data_cache_enabled"].(bool), + } +} + func expandBackupConfiguration(configured []interface{}) *sqladmin.BackupConfiguration { if len(configured) == 0 || configured[0] == nil { return nil @@ -1340,6 +1440,18 @@ func expandDenyMaintenancePeriod(configured []interface{}) []*sqladmin.DenyMaint } +func expandSqlServerAdvancedMachineFeatures(configured interface{}) *sqladmin.AdvancedMachineFeatures { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.AdvancedMachineFeatures{ + ThreadsPerCore: int64(config["threads_per_core"].(int)), + } +} + func expandSqlServerAuditConfig(configured interface{}) *sqladmin.SqlServerAuditConfig { l := configured.([]interface{}) if len(l) == 0 { @@ -1386,24 +1498,28 @@ func expandPasswordValidationPolicy(configured []interface{}) *sqladmin.Password } func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } var instance *sqladmin.DatabaseInstance - err = RetryTimeDuration(func() (rerr error) { - instance, rerr = config.NewSqlAdminClient(userAgent).Instances.Get(project, d.Get("name").(string)).Do() - return rerr - }, d.Timeout(schema.TimeoutRead), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + instance, rerr = config.NewSqlAdminClient(userAgent).Instances.Get(project, d.Get("name").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) } if err := d.Set("name", instance.Name); err != nil { @@ -1492,13 +1608,13 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e } func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1525,15 +1641,19 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) var op *sqladmin.Operation var instance *sqladmin.DatabaseInstance - desiredDatabaseVersion := d.Get("database_version") + databaseVersion := d.Get("database_version").(string) // Check if the activation policy is being updated. If it is being changed to ALWAYS this should be done first. if d.HasChange("settings.0.activation_policy") && d.Get("settings.0.activation_policy").(string) == "ALWAYS" { instance = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{ActivationPolicy: "ALWAYS"}} - err = RetryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() - return rerr - }, d.Timeout(schema.TimeoutUpdate), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) } @@ -1550,11 +1670,15 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) // Check if the database version is being updated, because patching database version is an atomic operation and can not be // performed with other fields, we first patch database version before updating the rest of the fields. if d.HasChange("database_version") { - instance = &sqladmin.DatabaseInstance{DatabaseVersion: desiredDatabaseVersion.(string)} - err = RetryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() - return rerr - }, d.Timeout(schema.TimeoutUpdate), IsSqlOperationInProgressError) + instance = &sqladmin.DatabaseInstance{DatabaseVersion: databaseVersion} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) } @@ -1603,14 +1727,17 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) Password: password, } - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) var op *sqladmin.Operation updateFunc := func() error { op, err = config.NewSqlAdminClient(userAgent).Users.Update(project, instance, user).Host(host).Name(name).Do() return err } - err = RetryTimeDuration(updateFunc, d.Timeout(schema.TimeoutUpdate)) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: updateFunc, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { if err := d.Set("root_password", oldPwd.(string)); err != nil { @@ -1633,10 +1760,14 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) // performed with other fields, we first patch maintenance version before updating the rest of the fields. if d.HasChange("maintenance_version") { instance = &sqladmin.DatabaseInstance{MaintenanceVersion: maintenance_version} - err = RetryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() - return rerr - }, d.Timeout(schema.TimeoutUpdate), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) } @@ -1651,10 +1782,14 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) } if promoteReadReplicaRequired { - err = RetryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.PromoteReplica(project, d.Get("name").(string)).Do() - return rerr - }, d.Timeout(schema.TimeoutUpdate), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.PromoteReplica(project, d.Get("name").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to promote read replica instance as primary stand-alone %s: %s", instance.Name, err) } @@ -1670,7 +1805,7 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) s := d.Get("settings") instance = &sqladmin.DatabaseInstance{ - Settings: expandSqlDatabaseInstanceSettings(desiredSetting.([]interface{})), + Settings: expandSqlDatabaseInstanceSettings(desiredSetting.([]interface{}), databaseVersion), } _settings := s.([]interface{})[0].(map[string]interface{}) // Instance.Patch operation on completion updates the settings proto version by +8. As terraform does not know this it tries @@ -1683,18 +1818,22 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) // Lock on the master_instance_name just in case updating any replica // settings causes operations on the master. if v, ok := d.GetOk("master_instance_name"); ok { - mutexKV.Lock(instanceMutexKey(project, v.(string))) - defer mutexKV.Unlock(instanceMutexKey(project, v.(string))) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, v.(string))) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, v.(string))) } if _, ok := d.GetOk("instance_type"); ok { instance.InstanceType = d.Get("instance_type").(string) } - err = RetryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, d.Get("name").(string), instance).Do() - return rerr - }, d.Timeout(schema.TimeoutUpdate), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) } @@ -1728,13 +1867,13 @@ func maintenanceVersionDiffSuppress(_, old, new string, _ *schema.ResourceData) } func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -1748,22 +1887,26 @@ func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) // Lock on the master_instance_name just in case deleting a replica causes // operations on the master. if v, ok := d.GetOk("master_instance_name"); ok { - mutexKV.Lock(instanceMutexKey(project, v.(string))) - defer mutexKV.Unlock(instanceMutexKey(project, v.(string))) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, v.(string))) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, v.(string))) } var op *sqladmin.Operation - err = RetryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Delete(project, d.Get("name").(string)).Do() - if rerr != nil { - return rerr - } - err = SqlAdminOperationWaitTime(config, op, project, "Delete Instance", userAgent, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return err - } - return nil - }, d.Timeout(schema.TimeoutDelete), IsSqlOperationInProgressError, isSqlInternalError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Delete(project, d.Get("name").(string)).Do() + if rerr != nil { + return rerr + } + err = SqlAdminOperationWaitTime(config, op, project, "Delete Instance", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError, IsSqlInternalError}, + }) if err != nil { return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) } @@ -1771,8 +1914,8 @@ func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) } func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { @@ -1784,7 +1927,7 @@ func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -1797,6 +1940,7 @@ func flattenSettings(settings *sqladmin.Settings) []map[string]interface{} { data := map[string]interface{}{ "version": settings.SettingsVersion, "tier": settings.Tier, + "edition": settings.Edition, "activation_policy": settings.ActivationPolicy, "availability_type": settings.AvailabilityType, "collation": settings.Collation, @@ -1857,9 +2001,28 @@ func flattenSettings(settings *sqladmin.Settings) []map[string]interface{} { data["password_validation_policy"] = flattenPasswordValidationPolicy(settings.PasswordValidationPolicy) } + if settings.DataCacheConfig != nil { + data["data_cache_config"] = flattenDataCacheConfig(settings.DataCacheConfig) + } + + if settings.AdvancedMachineFeatures != nil { + data["advanced_machine_features"] = flattenSqlServerAdvancedMachineFeatures(settings.AdvancedMachineFeatures) + } + return []map[string]interface{}{data} } +func flattenDataCacheConfig(d *sqladmin.DataCacheConfig) []map[string]interface{} { + if d == nil { + return nil + } + return []map[string]interface{}{ + { + "data_cache_enabled": d.DataCacheEnabled, + }, + } +} + func flattenBackupConfiguration(backupConfiguration *sqladmin.BackupConfiguration) []map[string]interface{} { data := map[string]interface{}{ "binary_log_enabled": backupConfiguration.BinaryLogEnabled, @@ -1913,6 +2076,17 @@ func flattenDenyMaintenancePeriod(denyMaintenancePeriod []*sqladmin.DenyMaintena return flags } +func flattenSqlServerAdvancedMachineFeatures(advancedMachineFeatures *sqladmin.AdvancedMachineFeatures) []map[string]interface{} { + if advancedMachineFeatures == nil { + return nil + } + return []map[string]interface{}{ + { + "threads_per_core": advancedMachineFeatures.ThreadsPerCore, + }, + } +} + func flattenSqlServerAuditConfig(sqlServerAuditConfig *sqladmin.SqlServerAuditConfig) []map[string]interface{} { if sqlServerAuditConfig == nil { return nil @@ -2092,11 +2266,11 @@ func sqlDatabaseIsMaster(d *schema.ResourceData) bool { return !ok } -func sqlDatabaseInstanceServiceNetworkPrecheck(d *schema.ResourceData, config *Config, userAgent, network string) error { +func sqlDatabaseInstanceServiceNetworkPrecheck(d *schema.ResourceData, config *transport_tpg.Config, userAgent, network string) error { log.Printf("[DEBUG] checking network %q for at least one service networking connection", network) // This call requires projects.get permissions, which may not have been granted to the Terraform actor, // particularly in shared VPC setups. Most will! But it's not strictly required. - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) + serviceNetworkingNetworkName, err := servicenetworking.RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) if err != nil { var gerr *googleapi.Error if errors.As(err, &gerr) { @@ -2134,7 +2308,7 @@ func expandRestoreBackupContext(configured []interface{}) *sqladmin.RestoreBacku } } -func sqlDatabaseInstanceRestoreFromBackup(d *schema.ResourceData, config *Config, userAgent, project, instanceId string, r interface{}) error { +func sqlDatabaseInstanceRestoreFromBackup(d *schema.ResourceData, config *transport_tpg.Config, userAgent, project, instanceId string, r interface{}) error { log.Printf("[DEBUG] Initiating SQL database instance backup restore") restoreContext := r.([]interface{}) @@ -2143,10 +2317,14 @@ func sqlDatabaseInstanceRestoreFromBackup(d *schema.ResourceData, config *Config } var op *sqladmin.Operation - err := RetryTimeDuration(func() (operr error) { - op, operr = config.NewSqlAdminClient(userAgent).Instances.RestoreBackup(project, instanceId, backupRequest).Do() - return operr - }, d.Timeout(schema.TimeoutUpdate), IsSqlOperationInProgressError) + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + op, operr = config.NewSqlAdminClient(userAgent).Instances.RestoreBackup(project, instanceId, backupRequest).Do() + return operr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { return fmt.Errorf("Error, failed to restore instance from backup %s: %s", instanceId, err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance_sweeper.go new file mode 100644 index 0000000000..e85197a2d9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_database_instance_sweeper.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" +) + +func init() { + sweeper.AddTestSweepers("SQLDatabaseInstance", testSweepSQLDatabaseInstance) +} + +func testSweepSQLDatabaseInstance(region string) error { + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + return fmt.Errorf("error getting shared config for region: %s", err) + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Fatalf("error loading: %s", err) + } + + found, err := config.NewSqlAdminClient(config.UserAgent).Instances.List(config.Project).Do() + if err != nil { + log.Printf("error listing databases: %s", err) + return nil + } + + if len(found.Items) == 0 { + log.Printf("No databases found") + return nil + } + + running := map[string]struct{}{} + + for _, d := range found.Items { + if !sweeper.IsSweepableTestResource(d.Name) { + continue + } + + if d.State != "RUNNABLE" { + continue + } + running[d.Name] = struct{}{} + } + + for _, d := range found.Items { + if !sweeper.IsSweepableTestResource(d.Name) { + continue + } + + // don't delete replicas, we'll take care of that + // when deleting the database they replicate + if d.ReplicaConfiguration != nil { + continue + } + log.Printf("Destroying SQL Instance (%s)", d.Name) + + // replicas need to be stopped and destroyed before destroying a master + // instance. The ordering slice tracks replica databases for a given master + // and we call destroy on them before destroying the master + var ordering []string + for _, replicaName := range d.ReplicaNames { + // don't try to stop replicas that aren't running + if _, ok := running[replicaName]; !ok { + ordering = append(ordering, replicaName) + continue + } + + // need to stop replication before being able to destroy a database + op, err := config.NewSqlAdminClient(config.UserAgent).Instances.StopReplica(config.Project, replicaName).Do() + + if err != nil { + log.Printf("error, failed to stop replica instance (%s) for instance (%s): %s", replicaName, d.Name, err) + return nil + } + + err = SqlAdminOperationWaitTime(config, op, config.Project, "Stop Replica", config.UserAgent, 10*time.Minute) + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + log.Printf("Replication operation not found") + } else { + log.Printf("Error waiting for sqlAdmin operation: %s", err) + return nil + } + } + + ordering = append(ordering, replicaName) + } + + // ordering has a list of replicas (or none), now add the primary to the end + ordering = append(ordering, d.Name) + + for _, db := range ordering { + // destroy instances, replicas first + op, err := config.NewSqlAdminClient(config.UserAgent).Instances.Delete(config.Project, db).Do() + + if err != nil { + if strings.Contains(err.Error(), "409") { + // the GCP api can return a 409 error after the delete operation + // reaches a successful end + log.Printf("Operation not found, got 409 response") + continue + } + + log.Printf("Error, failed to delete instance %s: %s", db, err) + return nil + } + + err = SqlAdminOperationWaitTime(config, op, config.Project, "Delete Instance", config.UserAgent, 10*time.Minute) + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + log.Printf("SQL instance not found") + continue + } + log.Printf("Error, failed to delete instance %s: %s", db, err) + return nil + } + } + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_source_representation_instance.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_source_representation_instance.go new file mode 100644 index 0000000000..9a66b6eafb --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_source_representation_instance.go @@ -0,0 +1,599 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package sql + +import ( + "fmt" + "log" + "reflect" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" +) + +func ResourceSQLSourceRepresentationInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSQLSourceRepresentationInstanceCreate, + Read: resourceSQLSourceRepresentationInstanceRead, + Delete: resourceSQLSourceRepresentationInstanceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSQLSourceRepresentationInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "database_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"MYSQL_5_6", "MYSQL_5_7", "MYSQL_8_0", "POSTGRES_9_6", "POSTGRES_10", "POSTGRES_11", "POSTGRES_12", "POSTGRES_13", "POSTGRES_14"}), + Description: `The MySQL version running on your source database server. Possible values: ["MYSQL_5_6", "MYSQL_5_7", "MYSQL_8_0", "POSTGRES_9_6", "POSTGRES_10", "POSTGRES_11", "POSTGRES_12", "POSTGRES_13", "POSTGRES_14"]`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the source representation instance. Use any valid Cloud SQL instance name.`, + }, + "host": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateIpAddress, + Description: `The externally accessible IPv4 address for the source database server.`, + }, + "ca_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The CA certificate on the external server. Include only if SSL/TLS is used on the external server.`, + }, + "client_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The client certificate on the external server. Required only for server-client authentication. Include only if SSL/TLS is used on the external server.`, + }, + "client_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The private key file for the client certificate on the external server. Required only for server-client authentication. Include only if SSL/TLS is used on the external server.`, + }, + "dump_file_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A file in the bucket that contains the data from the external server.`, + }, + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The password for the replication user account.`, + Sensitive: true, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(0, 65535), + Description: `The externally accessible port for the source database server. +Defaults to 3306.`, + Default: 3306, + }, + "username": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The replication user account on the external server.`, + }, + + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The Region in which the created instance should reside. +If it is not provided, the provider region is used.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSQLSourceRepresentationInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandSQLSourceRepresentationInstanceName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + regionProp, err := expandSQLSourceRepresentationInstanceRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + databaseVersionProp, err := expandSQLSourceRepresentationInstanceDatabaseVersion(d.Get("database_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("database_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(databaseVersionProp)) && (ok || !reflect.DeepEqual(v, databaseVersionProp)) { + obj["databaseVersion"] = databaseVersionProp + } + onPremisesConfigurationProp, err := expandSQLSourceRepresentationInstanceOnPremisesConfiguration(nil, d, config) + if err != nil { + return err + } else if !tpgresource.IsEmptyValue(reflect.ValueOf(onPremisesConfigurationProp)) { + obj["onPremisesConfiguration"] = onPremisesConfigurationProp + } + + obj, err = resourceSQLSourceRepresentationInstanceEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new SourceRepresentationInstance: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating SourceRepresentationInstance: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = SqlAdminOperationWaitTime( + config, res, project, "Creating SourceRepresentationInstance", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create SourceRepresentationInstance: %s", err) + } + + log.Printf("[DEBUG] Finished creating SourceRepresentationInstance %q: %#v", d.Id(), res) + + return resourceSQLSourceRepresentationInstanceRead(d, meta) +} + +func resourceSQLSourceRepresentationInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SQLSourceRepresentationInstance %q", d.Id())) + } + + res, err = resourceSQLSourceRepresentationInstanceDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing SQLSourceRepresentationInstance because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) + } + + if err := d.Set("name", flattenSQLSourceRepresentationInstanceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) + } + if err := d.Set("region", flattenSQLSourceRepresentationInstanceRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) + } + if err := d.Set("database_version", flattenSQLSourceRepresentationInstanceDatabaseVersion(res["databaseVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) + } + // Terraform must set the top level schema field, but since this object contains collapsed properties + // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. + if flattenedProp := flattenSQLSourceRepresentationInstanceOnPremisesConfiguration(res["onPremisesConfiguration"], d, config); flattenedProp != nil { + if gerr, ok := flattenedProp.(*googleapi.Error); ok { + return fmt.Errorf("Error reading SourceRepresentationInstance: %s", gerr) + } + casted := flattenedProp.([]interface{})[0] + if casted != nil { + for k, v := range casted.(map[string]interface{}) { + if err := d.Set(k, v); err != nil { + return fmt.Errorf("Error setting %s: %s", k, err) + } + } + } + } + + return nil +} + +func resourceSQLSourceRepresentationInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting SourceRepresentationInstance %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "SourceRepresentationInstance") + } + + err = SqlAdminOperationWaitTime( + config, res, project, "Deleting SourceRepresentationInstance", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting SourceRepresentationInstance %q: %#v", d.Id(), res) + return nil +} + +func resourceSQLSourceRepresentationInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/instances/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSQLSourceRepresentationInstanceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLSourceRepresentationInstanceRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLSourceRepresentationInstanceDatabaseVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLSourceRepresentationInstanceOnPremisesConfiguration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["host"] = + flattenSQLSourceRepresentationInstanceOnPremisesConfigurationHost(original["host"], d, config) + transformed["port"] = + flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPort(original["port"], d, config) + transformed["username"] = + flattenSQLSourceRepresentationInstanceOnPremisesConfigurationUsername(original["username"], d, config) + transformed["password"] = + flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPassword(original["password"], d, config) + transformed["dump_file_path"] = + flattenSQLSourceRepresentationInstanceOnPremisesConfigurationDumpFilePath(original["dumpFilePath"], d, config) + transformed["ca_certificate"] = + flattenSQLSourceRepresentationInstanceOnPremisesConfigurationCaCertificate(original["caCertificate"], d, config) + transformed["client_certificate"] = + flattenSQLSourceRepresentationInstanceOnPremisesConfigurationClientCertificate(original["clientCertificate"], d, config) + transformed["client_key"] = + flattenSQLSourceRepresentationInstanceOnPremisesConfigurationClientKey(original["clientKey"], d, config) + return []interface{}{transformed} +} +func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationHost(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("password") +} + +func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationDumpFilePath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationCaCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationClientCertificate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationClientKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSQLSourceRepresentationInstanceName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceDatabaseVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfiguration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + transformed := make(map[string]interface{}) + transformedHost, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationHost(d.Get("host"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHost); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["host"] = transformedHost + } + + transformedPort, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationPort(d.Get("port"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["port"] = transformedPort + } + + transformedUsername, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationUsername(d.Get("username"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUsername); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["username"] = transformedUsername + } + + transformedPassword, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationPassword(d.Get("password"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPassword); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["password"] = transformedPassword + } + + transformedDumpFilePath, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationDumpFilePath(d.Get("dump_file_path"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDumpFilePath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dumpFilePath"] = transformedDumpFilePath + } + + transformedCaCertificate, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationCaCertificate(d.Get("ca_certificate"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCaCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["caCertificate"] = transformedCaCertificate + } + + transformedClientCertificate, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationClientCertificate(d.Get("client_certificate"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientCertificate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientCertificate"] = transformedClientCertificate + } + + transformedClientKey, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationClientKey(d.Get("client_key"), d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedClientKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["clientKey"] = transformedClientKey + } + + return transformed, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfigurationHost(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfigurationPort(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfigurationUsername(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfigurationPassword(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfigurationDumpFilePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfigurationCaCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfigurationClientCertificate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSQLSourceRepresentationInstanceOnPremisesConfigurationClientKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceSQLSourceRepresentationInstanceEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + opc := obj["onPremisesConfiguration"].(map[string]interface{}) + opc["hostPort"] = fmt.Sprintf("%v:%v", opc["host"], opc["port"]) + delete(opc, "host") + delete(opc, "port") + return obj, nil +} + +func resourceSQLSourceRepresentationInstanceDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v, ok := res["onPremisesConfiguration"]; ok { + opc := v.(map[string]interface{}) + hostPort := opc["hostPort"] + spl := strings.Split(hostPort.(string), ":") + if len(spl) != 2 { + return nil, fmt.Errorf("unexpected value for hostPort, expected [host]:[port], got %q", hostPort) + } + opc["host"] = spl[0] + p, err := strconv.Atoi(spl[1]) + if err != nil { + return nil, fmt.Errorf("error converting port %q to int: %v", spl[1], err) + } + opc["port"] = p + delete(opc, "hostPort") + } + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_source_representation_instance_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_source_representation_instance_sweeper.go new file mode 100644 index 0000000000..10ded9b6c4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_source_representation_instance_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package sql + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SQLSourceRepresentationInstance", testSweepSQLSourceRepresentationInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSQLSourceRepresentationInstance(region string) error { + resourceName := "SQLSourceRepresentationInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://sqladmin.googleapis.com/sql/v1beta4/projects/{{project}}/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["sourceRepresentationInstances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://sqladmin.googleapis.com/sql/v1beta4/projects/{{project}}/instances/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_ssl_cert.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_ssl_cert.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_ssl_cert.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_ssl_cert.go index 8746449e8a..91abc16d8b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_ssl_cert.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_ssl_cert.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql import ( "fmt" "log" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) @@ -93,13 +98,13 @@ func ResourceSqlSslCert() *schema.Resource { } func resourceSqlSslCertCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -111,8 +116,8 @@ func resourceSqlSslCertCreate(d *schema.ResourceData, meta interface{}) error { CommonName: commonName, } - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) resp, err := config.NewSqlAdminClient(userAgent).SslCerts.Insert(project, instance, sslCertsInsertRequest).Do() if err != nil { return fmt.Errorf("Error, failed to insert "+ @@ -143,13 +148,13 @@ func resourceSqlSslCertCreate(d *schema.ResourceData, meta interface{}) error { } func resourceSqlSslCertRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -160,7 +165,7 @@ func resourceSqlSslCertRead(d *schema.ResourceData, meta interface{}) error { sslCerts, err := config.NewSqlAdminClient(userAgent).SslCerts.Get(project, instance, fingerprint).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SQL Ssl Cert %q in instance %q", commonName, instance)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SQL Ssl Cert %q in instance %q", commonName, instance)) } if sslCerts == nil { @@ -200,13 +205,13 @@ func resourceSqlSslCertRead(d *schema.ResourceData, meta interface{}) error { } func resourceSqlSslCertDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -215,8 +220,8 @@ func resourceSqlSslCertDelete(d *schema.ResourceData, meta interface{}) error { commonName := d.Get("common_name").(string) fingerprint := d.Get("sha1_fingerprint").(string) - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) op, err := config.NewSqlAdminClient(userAgent).SslCerts.Delete(project, instance, fingerprint).Do() if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_user.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user.go similarity index 80% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_user.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user.go index fc835c7bd3..6ca23f91e6 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_user.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql import ( "fmt" @@ -6,6 +8,10 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" sqladmin "google.golang.org/api/sqladmin/v1beta4" @@ -23,6 +29,19 @@ func diffSuppressIamUserName(_, old, new string, d *schema.ResourceData) bool { return false } +func handleUserNotFoundError(err error, d *schema.ResourceData, resource string) error { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) || transport_tpg.IsGoogleApiErrorWithCode(err, 403) { + log.Printf("[WARN] Removing %s because it's gone", resource) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return errwrap.Wrapf( + fmt.Sprintf("Error when reading or editing %s: {{err}}", resource), err) +} + func ResourceSqlUser() *schema.Resource { return &schema.Resource{ Create: resourceSqlUserCreate, @@ -78,7 +97,7 @@ func ResourceSqlUser() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress("BUILT_IN"), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("BUILT_IN"), Description: `The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type. Flags include "BUILT_IN", "CLOUD_IAM_USER", or "CLOUD_IAM_SERVICE_ACCOUNT".`, ValidateFunc: validation.StringInSlice([]string{"BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", ""}, false), @@ -207,13 +226,13 @@ func expandPasswordPolicy(cfg interface{}) *sqladmin.UserPasswordValidationPolic } func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -237,18 +256,22 @@ func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { user.PasswordPolicy = pp } - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) if v, ok := d.GetOk("host"); ok { if v.(string) != "" { var fetchedInstance *sqladmin.DatabaseInstance - err = RetryTimeDuration(func() (rerr error) { - fetchedInstance, rerr = config.NewSqlAdminClient(userAgent).Instances.Get(project, instance).Do() - return rerr - }, d.Timeout(schema.TimeoutRead), IsSqlOperationInProgressError) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + fetchedInstance, rerr = config.NewSqlAdminClient(userAgent).Instances.Get(project, instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("instance").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("instance").(string))) } if !strings.Contains(fetchedInstance.DatabaseVersion, "MYSQL") { return fmt.Errorf("Error: Host field is only supported for MySQL instances: %s", fetchedInstance.DatabaseVersion) @@ -262,7 +285,10 @@ func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { user).Do() return err } - err = RetryTimeDuration(insertFunc, d.Timeout(schema.TimeoutCreate)) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: insertFunc, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error, failed to insert "+ @@ -284,13 +310,13 @@ func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { } func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -301,12 +327,16 @@ func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { var users *sqladmin.UsersListResponse err = nil - err = retryTime(func() error { - users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance).Do() - return err - }, 5) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance).Do() + return err + }, + Timeout: 5 * time.Minute, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("SQL User %q in instance %q", name, instance)) + // move away from transport_tpg.HandleNotFoundError() as we need to handle both 404 and 403 + return handleUserNotFoundError(err, d, fmt.Sprintf("SQL User %q in instance %q", name, instance)) } var user *sqladmin.User @@ -408,14 +438,14 @@ func flattenPasswordStatus(status *sqladmin.PasswordStatus) interface{} { } func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } if d.HasChange("password") || d.HasChange("password_policy") { - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -431,14 +461,17 @@ func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { Password: password, } - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) var op *sqladmin.Operation updateFunc := func() error { op, err = config.NewSqlAdminClient(userAgent).Users.Update(project, instance, user).Host(host).Name(name).Do() return err } - err = RetryTimeDuration(updateFunc, d.Timeout(schema.TimeoutUpdate)) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: updateFunc, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error, failed to update"+ @@ -459,7 +492,7 @@ func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { // Allows for user to be abandoned without deletion to avoid deletion failing @@ -467,12 +500,12 @@ func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { return nil } - userAgent, err := generateUserAgentString(d, config.UserAgent) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -481,21 +514,25 @@ func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { host := d.Get("host").(string) instance := d.Get("instance").(string) - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) var op *sqladmin.Operation - err = RetryTimeDuration(func() error { - op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance).Host(host).Name(name).Do() - if err != nil { - return err - } + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance).Host(host).Name(name).Do() + if err != nil { + return err + } - if err := SqlAdminOperationWaitTime(config, op, project, "Delete User", userAgent, d.Timeout(schema.TimeoutDelete)); err != nil { - return err - } - return nil - }, d.Timeout(schema.TimeoutDelete), IsSqlOperationInProgressError, isSqlInternalError) + if err := SqlAdminOperationWaitTime(config, op, project, "Delete User", userAgent, d.Timeout(schema.TimeoutDelete)); err != nil { + return err + } + return nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError, IsSqlInternalError}, + }) if err != nil { return fmt.Errorf("Error, failed to delete"+ diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_user_migrate.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user_migrate.go similarity index 92% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_user_migrate.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user_migrate.go index b82ef8adda..814c9f406b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_sql_user_migrate.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/resource_sql_user_migrate.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql import ( "fmt" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/sql_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/sql_utils.go new file mode 100644 index 0000000000..e8a2703baa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/sql_utils.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "log" + "strings" + + "github.com/hashicorp/errwrap" + "google.golang.org/api/googleapi" +) + +func transformSQLDatabaseReadError(err error) error { + if gErr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error); ok { + if gErr.Code == 400 && strings.Contains(gErr.Message, "Invalid request since instance is not running") { + // This error occurs when attempting a GET after deleting the sql database and sql instance. It leads to to + // inconsistent behavior as HandleNotFoundError(...) expects an error code of 404 when a resource does not + // exist. To get the desired behavior from HandleNotFoundError, modify the return code to 404 so that + // HandleNotFoundError(...) will treat this as a NotFound error + gErr.Code = 404 + } + + log.Printf("[DEBUG] Transformed SQLDatabase error") + return gErr + } + + return err +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/sqladmin_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/sqladmin_operation.go new file mode 100644 index 0000000000..6a3e493156 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/sql/sqladmin_operation.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sql + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + sqladmin "google.golang.org/api/sqladmin/v1beta4" +) + +type SqlAdminOperationWaiter struct { + Service *sqladmin.Service + Op *sqladmin.Operation + Project string +} + +func (w *SqlAdminOperationWaiter) State() string { + if w == nil { + return "Operation Waiter is nil!" + } + + if w.Op == nil { + return "Operation is nil!" + } + + return w.Op.Status +} + +func (w *SqlAdminOperationWaiter) Error() error { + if w != nil && w.Op != nil && w.Op.Error != nil { + return SqlAdminOperationError(*w.Op.Error) + } + return nil +} + +func (w *SqlAdminOperationWaiter) IsRetryable(error) bool { + return false +} + +func (w *SqlAdminOperationWaiter) SetOp(op interface{}) error { + if op == nil { + // Starting as a log statement, this may be a useful error in the future + log.Printf("[DEBUG] attempted to set nil op") + } + + sqlOp, ok := op.(*sqladmin.Operation) + w.Op = sqlOp + if !ok { + return fmt.Errorf("Unable to set operation. Bad type!") + } + + return nil +} + +func (w *SqlAdminOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, waiter is unset or nil.") + } + + if w.Op == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + + if w.Service == nil { + return nil, fmt.Errorf("Cannot query operation, service is nil.") + } + + var op interface{} + var err error + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err = w.Service.Operations.Get(w.Project, w.Op.Name).Do() + return err + }, + Timeout: transport_tpg.DefaultRequestTimeout, + }) + + return op, err +} + +func (w *SqlAdminOperationWaiter) OpName() string { + if w == nil { + return "" + } + + if w.Op == nil { + return "" + } + + return w.Op.Name +} + +func (w *SqlAdminOperationWaiter) PendingStates() []string { + return []string{"PENDING", "RUNNING"} +} + +func (w *SqlAdminOperationWaiter) TargetStates() []string { + return []string{"DONE"} +} + +func SqlAdminOperationWaitTime(config *transport_tpg.Config, res interface{}, project, activity, userAgent string, timeout time.Duration) error { + op := &sqladmin.Operation{} + err := tpgresource.Convert(res, op) + if err != nil { + return err + } + + w := &SqlAdminOperationWaiter{ + Service: config.NewSqlAdminClient(userAgent), + Op: op, + Project: project, + } + if err := w.SetOp(op); err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} + +// SqlAdminOperationError wraps sqladmin.OperationError and implements the +// error interface so it can be returned. +type SqlAdminOperationError sqladmin.OperationErrors + +func (e SqlAdminOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} + +// Retry if Cloud SQL operation returns a 429 with a specific message for +// concurrent operations. +func IsSqlInternalError(err error) (bool, string) { + if gerr, ok := err.(*SqlAdminOperationError); ok { + // SqlAdminOperationError is a non-interface type so we need to cast it through + // a layer of interface{}. :) + var ierr interface{} + ierr = gerr + if serr, ok := ierr.(*sqladmin.OperationErrors); ok && serr.Errors[0].Code == "INTERNAL_ERROR" { + return true, "Received an internal error, which is sometimes retryable for some SQL resources. Optimistically retrying." + } + + } + return false, "" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket.go new file mode 100644 index 0000000000..33a46a3555 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleStorageBucket() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceStorageBucket().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + + return &schema.Resource{ + Read: dataSourceGoogleStorageBucketRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleStorageBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Get the bucket and acl + bucket := d.Get("name").(string) + + res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() + if err != nil { + return err + } + log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) + + return setStorageBucket(d, config, res, bucket, userAgent) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_bucket_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket_object.go similarity index 77% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_bucket_object.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket_object.go index 6a8f444115..3858ec91d2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_bucket_object.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_bucket_object.go @@ -1,19 +1,24 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "fmt" "net/url" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func DataSourceGoogleStorageBucketObject() *schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(ResourceStorageBucketObject().Schema) + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceStorageBucketObject().Schema) - addOptionalFieldsToSchema(dsSchema, "bucket") - addOptionalFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "bucket") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name") return &schema.Resource{ Read: dataSourceGoogleStorageBucketObjectRead, @@ -22,8 +27,8 @@ func DataSourceGoogleStorageBucketObject() *schema.Resource { } func dataSourceGoogleStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -40,7 +45,12 @@ func dataSourceGoogleStorageBucketObjectRead(d *schema.ResourceData, meta interf // Using REST apis because the storage go client doesn't support folders url := fmt.Sprintf("https://www.googleapis.com/storage/v1/b/%s/o/%s", bucket, name) - res, err := SendRequest(config, "GET", "", url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) if err != nil { return fmt.Errorf("Error retrieving storage bucket object: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_project_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_project_service_account.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_project_service_account.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_project_service_account.go index 54e620cb23..eac2ae55f0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_google_storage_project_service_account.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_google_storage_project_service_account.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func DataSourceGoogleStorageProjectServiceAccount() *schema.Resource { @@ -34,13 +38,13 @@ func DataSourceGoogleStorageProjectServiceAccount() *schema.Resource { } func dataSourceGoogleStorageProjectServiceAccountRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -53,7 +57,7 @@ func dataSourceGoogleStorageProjectServiceAccountRead(d *schema.ResourceData, me serviceAccount, err := serviceAccountGetRequest.Do() if err != nil { - return handleNotFoundError(err, d, "GCS service account not found") + return transport_tpg.HandleNotFoundError(err, d, "GCS service account not found") } if err := d.Set("project", project); err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_storage_bucket_object_content.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_storage_bucket_object_content.go new file mode 100644 index 0000000000..c562d32f23 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_storage_bucket_object_content.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage + +import ( + "fmt" + "io/ioutil" + "net/http" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/storage/v1" +) + +func DataSourceGoogleStorageBucketObjectContent() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceStorageBucketObject().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "bucket") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "content") + + return &schema.Resource{ + Read: dataSourceGoogleStorageBucketObjectContentRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleStorageBucketObjectContentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + + objectsService := storage.NewObjectsService(config.NewStorageClient(userAgent)) + getCall := objectsService.Get(bucket, name) + + res, err := getCall.Download() + if err != nil { + return fmt.Errorf("Error downloading storage bucket object: %s", err) + } + + defer res.Body.Close() + var bodyString string + + if res.StatusCode == http.StatusOK { + bodyBytes, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Error reading all from res.Body: %s", err) + } + bodyString = string(bodyBytes) + } + + if err := d.Set("content", bodyString); err != nil { + return fmt.Errorf("Error setting content: %s", err) + } + + d.SetId(bucket + "-" + name) + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_storage_object_signed_url.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_storage_object_signed_url.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_storage_object_signed_url.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_storage_object_signed_url.go index d28eba72d9..b7a3ca87d4 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_storage_object_signed_url.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/data_source_storage_object_signed_url.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "bytes" @@ -19,6 +21,9 @@ import ( "strings" "time" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -94,7 +99,7 @@ func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []er } func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) // Build UrlData object from data source attributes urlData := &UrlData{} @@ -173,8 +178,7 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err // 2. `credentials` attribute in the provider definition. // 3. A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable. func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) { - config := meta.(*Config) - + config := meta.(*transport_tpg.Config) credentials := "" if v, ok := d.GetOk("credentials"); ok { log.Println("[DEBUG] using data source credentials to sign URL") @@ -191,7 +195,7 @@ func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error } if strings.TrimSpace(credentials) != "" { - contents, _, err := pathOrContents(credentials) + contents, _, err := verify.PathOrContents(credentials) if err != nil { return nil, errwrap.Wrapf("Error loading credentials: {{err}}", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/iam_storage_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/iam_storage_bucket.go new file mode 100644 index 0000000000..4e62012a7a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/iam_storage_bucket.go @@ -0,0 +1,194 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package storage + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var StorageBucketIamSchema = map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: StorageBucketDiffSuppress, + }, +} + +func StorageBucketDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return tpgresource.CompareResourceNames("", old, new, nil) +} + +type StorageBucketIamUpdater struct { + bucket string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func StorageBucketIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("bucket"); ok { + values["bucket"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"b/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("bucket").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &StorageBucketIamUpdater{ + bucket: values["bucket"], + d: d, + Config: config, + } + + if err := d.Set("bucket", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting bucket: %s", err) + } + + return u, nil +} + +func StorageBucketIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"b/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &StorageBucketIamUpdater{ + bucket: values["bucket"], + d: d, + Config: config, + } + if err := d.Set("bucket", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting bucket: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *StorageBucketIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyBucketUrl("iam") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *StorageBucketIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := json + + url, err := u.qualifyBucketUrl("iam") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "PUT", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *StorageBucketIamUpdater) qualifyBucketUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{StorageBasePath}}%s/%s", fmt.Sprintf("b/%s", u.bucket), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *StorageBucketIamUpdater) GetResourceId() string { + return fmt.Sprintf("b/%s", u.bucket) +} + +func (u *StorageBucketIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-storage-bucket-%s", u.GetResourceId()) +} + +func (u *StorageBucketIamUpdater) DescribeResource() string { + return fmt.Sprintf("storage bucket %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket.go similarity index 93% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket.go index cdde1ca772..247bc765d8 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "bytes" @@ -12,6 +14,9 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/gammazero/workerpool" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -81,6 +86,7 @@ func ResourceStorageBucket() *schema.Resource { "labels": { Type: schema.TypeMap, Optional: true, + Computed: true, // GCP (Dataplex) automatically adds labels DiffSuppressFunc: resourceDataplexLabelDiffSuppress, Elem: &schema.Schema{Type: schema.TypeString}, @@ -263,8 +269,25 @@ func ResourceStorageBucket() *schema.Resource { }, }, Description: `The bucket's autoclass configuration.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + _, n := d.GetChange(strings.TrimSuffix(k, ".#")) + if !strings.HasSuffix(k, ".#") { + return false + } + var l []interface{} + if new == "1" && old == "0" { + l = n.([]interface{}) + contents, ok := l[0].(map[string]interface{}) + if !ok { + return false + } + if contents["enabled"] == false { + return true + } + } + return false + }, }, - "website": { Type: schema.TypeList, Optional: true, @@ -453,20 +476,20 @@ func isPolicyLocked(_ context.Context, old, new, _ interface{}) bool { } func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } // Get the bucket and location bucket := d.Get("name").(string) - if err := checkGCSName(bucket); err != nil { + if err := tpgresource.CheckGCSName(bucket); err != nil { return err } location := d.Get("location").(string) @@ -474,7 +497,7 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // Create a bucket, setting the labels, location and name. sb := &storage.Bucket{ Name: bucket, - Labels: expandLabels(d), + Labels: tpgresource.ExpandLabels(d), Location: location, IamConfiguration: expandIamConfiguration(d), } @@ -544,9 +567,11 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error var res *storage.Bucket - err = retry(func() error { - res, err = config.NewStorageClient(userAgent).Buckets.Insert(project, sb).Do() - return err + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + res, err = config.NewStorageClient(userAgent).Buckets.Insert(project, sb).Do() + return err + }, }) if err != nil { @@ -559,10 +584,14 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // There seems to be some eventual consistency errors in some cases, so we want to check a few times // to make sure it exists before moving on - err = RetryTimeDuration(func() (operr error) { - _, retryErr := config.NewStorageClient(userAgent).Buckets.Get(res.Name).Do() - return retryErr - }, d.Timeout(schema.TimeoutCreate), isNotFoundRetryableError("bucket creation")) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + _, retryErr := config.NewStorageClient(userAgent).Buckets.Get(res.Name).Do() + return retryErr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("bucket creation")}, + }) if err != nil { return fmt.Errorf("Error reading bucket after creation: %s", err) @@ -591,8 +620,8 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error } func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -670,7 +699,7 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error } if d.HasChange("labels") { - sb.Labels = expandLabels(d) + sb.Labels = tpgresource.ExpandLabels(d) if len(sb.Labels) == 0 { sb.NullFields = append(sb.NullFields, "Labels") } @@ -707,10 +736,14 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error // There seems to be some eventual consistency errors in some cases, so we want to check a few times // to make sure it exists before moving on - err = RetryTimeDuration(func() (operr error) { - _, retryErr := config.NewStorageClient(userAgent).Buckets.Get(res.Name).Do() - return retryErr - }, d.Timeout(schema.TimeoutUpdate), isNotFoundRetryableError("bucket update")) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + _, retryErr := config.NewStorageClient(userAgent).Buckets.Get(res.Name).Do() + return retryErr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("bucket update")}, + }) if err != nil { return fmt.Errorf("Error reading bucket after update: %s", err) @@ -741,8 +774,8 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error } func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -753,14 +786,18 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { var res *storage.Bucket // There seems to be some eventual consistency errors in some cases, so we want to check a few times // to make sure it exists before moving on - err = RetryTimeDuration(func() (operr error) { - var retryErr error - res, retryErr = config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - return retryErr - }, d.Timeout(schema.TimeoutRead), isNotFoundRetryableError("bucket read")) + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + var retryErr error + res, retryErr = config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() + return retryErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("bucket read")}, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) } log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) @@ -768,8 +805,8 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { } func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -904,9 +941,9 @@ func expandCors(configured []interface{}) []*storage.BucketCors { for _, raw := range configured { data := raw.(map[string]interface{}) corsRule := storage.BucketCors{ - Origin: convertStringArr(data["origin"].([]interface{})), - Method: convertStringArr(data["method"].([]interface{})), - ResponseHeader: convertStringArr(data["response_header"].([]interface{})), + Origin: tpgresource.ConvertStringArr(data["origin"].([]interface{})), + Method: tpgresource.ConvertStringArr(data["method"].([]interface{})), + ResponseHeader: tpgresource.ConvertStringArr(data["response_header"].([]interface{})), MaxAgeSeconds: int64(data["max_age_seconds"].(int)), } @@ -1146,14 +1183,14 @@ func flattenBucketLifecycleRuleAction(action *storage.BucketLifecycleRuleAction) func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleCondition) map[string]interface{} { ruleCondition := map[string]interface{}{ "created_before": condition.CreatedBefore, - "matches_storage_class": convertStringArrToInterface(condition.MatchesStorageClass), + "matches_storage_class": tpgresource.ConvertStringArrToInterface(condition.MatchesStorageClass), "num_newer_versions": int(condition.NumNewerVersions), "custom_time_before": condition.CustomTimeBefore, "days_since_custom_time": int(condition.DaysSinceCustomTime), "days_since_noncurrent_time": int(condition.DaysSinceNoncurrentTime), "noncurrent_time_before": condition.NoncurrentTimeBefore, - "matches_prefix": convertStringArrToInterface(condition.MatchesPrefix), - "matches_suffix": convertStringArrToInterface(condition.MatchesSuffix), + "matches_prefix": tpgresource.ConvertStringArrToInterface(condition.MatchesPrefix), + "matches_suffix": tpgresource.ConvertStringArrToInterface(condition.MatchesSuffix), } if condition.Age != nil { ruleCondition["age"] = int(*condition.Age) @@ -1408,7 +1445,7 @@ func resourceGCSBucketLifecycleRuleActionHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } - return hashcode(buf.String()) + return tpgresource.Hashcode(buf.String()) } func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { @@ -1469,7 +1506,7 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { } } - return hashcode(buf.String()) + return tpgresource.Hashcode(buf.String()) } func lockRetentionPolicy(bucketsService *storage.BucketsService, bucketName string, metageneration int64) error { @@ -1502,7 +1539,7 @@ func detectLifecycleChange(d *schema.ResourceData) bool { // Resource Read and DataSource Read both need to set attributes, but Data Sources don't support Timeouts // so we pulled this portion out separately (https://github.com/hashicorp/terraform-provider-google/issues/11264) -func setStorageBucket(d *schema.ResourceData, config *Config, res *storage.Bucket, bucket, userAgent string) error { +func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res *storage.Bucket, bucket, userAgent string) error { // We are trying to support several different use cases for bucket. Buckets are globally // unique but they are associated with projects internally, but some users want to use // buckets in a project agnostic way. Thus we will check to see if the project ID has been @@ -1513,7 +1550,7 @@ func setStorageBucket(d *schema.ResourceData, config *Config, res *storage.Bucke // block, or the resource or an environment variable, we use the compute API to lookup the projectID // from the projectNumber which is included in the bucket API response if d.Get("project") == "" { - project, _ := getProject(d, config) + project, _ := tpgresource.GetProject(d, config) if err := d.Set("project", project); err != nil { return fmt.Errorf("Error setting project: %s", err) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_access_control.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_access_control.go new file mode 100644 index 0000000000..ee300a0bcd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_access_control.go @@ -0,0 +1,384 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package storage + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceStorageBucketAccessControl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketAccessControlCreate, + Read: resourceStorageBucketAccessControlRead, + Update: resourceStorageBucketAccessControlUpdate, + Delete: resourceStorageBucketAccessControlDelete, + + Importer: &schema.ResourceImporter{ + State: resourceStorageBucketAccessControlImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the bucket.`, + }, + "entity": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The entity holding the permission, in one of the following forms: + user-userId + user-email + group-groupId + group-email + domain-domain + project-team-projectId + allUsers + allAuthenticatedUsers +Examples: + The user liz@example.com would be user-liz@example.com. + The group example@googlegroups.com would be + group-example@googlegroups.com. + To refer to all members of the Google Apps for Business domain + example.com, the entity would be domain-example.com.`, + }, + "role": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"OWNER", "READER", "WRITER", ""}), + Description: `The access permission for the entity. Possible values: ["OWNER", "READER", "WRITER"]`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `The domain associated with the entity.`, + }, + "email": { + Type: schema.TypeString, + Computed: true, + Description: `The email address associated with the entity.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceStorageBucketAccessControlCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + bucketProp, err := expandStorageBucketAccessControlBucket(d.Get("bucket"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketProp)) && (ok || !reflect.DeepEqual(v, bucketProp)) { + obj["bucket"] = bucketProp + } + entityProp, err := expandStorageBucketAccessControlEntity(d.Get("entity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entity"); !tpgresource.IsEmptyValue(reflect.ValueOf(entityProp)) && (ok || !reflect.DeepEqual(v, entityProp)) { + obj["entity"] = entityProp + } + roleProp, err := expandStorageBucketAccessControlRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new BucketAccessControl: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating BucketAccessControl: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{bucket}}/{{entity}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating BucketAccessControl %q: %#v", d.Id(), res) + + return resourceStorageBucketAccessControlRead(d, meta) +} + +func resourceStorageBucketAccessControlRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageBucketAccessControl %q", d.Id())) + } + + if err := d.Set("bucket", flattenStorageBucketAccessControlBucket(res["bucket"], d, config)); err != nil { + return fmt.Errorf("Error reading BucketAccessControl: %s", err) + } + if err := d.Set("domain", flattenStorageBucketAccessControlDomain(res["domain"], d, config)); err != nil { + return fmt.Errorf("Error reading BucketAccessControl: %s", err) + } + if err := d.Set("email", flattenStorageBucketAccessControlEmail(res["email"], d, config)); err != nil { + return fmt.Errorf("Error reading BucketAccessControl: %s", err) + } + if err := d.Set("entity", flattenStorageBucketAccessControlEntity(res["entity"], d, config)); err != nil { + return fmt.Errorf("Error reading BucketAccessControl: %s", err) + } + if err := d.Set("role", flattenStorageBucketAccessControlRole(res["role"], d, config)); err != nil { + return fmt.Errorf("Error reading BucketAccessControl: %s", err) + } + + return nil +} + +func resourceStorageBucketAccessControlUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + bucketProp, err := expandStorageBucketAccessControlBucket(d.Get("bucket"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketProp)) { + obj["bucket"] = bucketProp + } + entityProp, err := expandStorageBucketAccessControlEntity(d.Get("entity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entityProp)) { + obj["entity"] = entityProp + } + roleProp, err := expandStorageBucketAccessControlRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating BucketAccessControl %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating BucketAccessControl %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating BucketAccessControl %q: %#v", d.Id(), res) + } + + return resourceStorageBucketAccessControlRead(d, meta) +} + +func resourceStorageBucketAccessControlDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting BucketAccessControl %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "BucketAccessControl") + } + + log.Printf("[DEBUG] Finished deleting BucketAccessControl %q: %#v", d.Id(), res) + return nil +} + +func resourceStorageBucketAccessControlImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{bucket}}/{{entity}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenStorageBucketAccessControlBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenStorageBucketAccessControlDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageBucketAccessControlEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageBucketAccessControlEntity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageBucketAccessControlRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandStorageBucketAccessControlBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageBucketAccessControlEntity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageBucketAccessControlRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_acl.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_acl.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_acl.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_acl.go index 4e1d349d12..7d14651a63 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_acl.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_acl.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "context" @@ -7,6 +9,9 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/storage/v1" @@ -96,7 +101,7 @@ func getBucketAclId(bucket string) string { return bucket + "-acl" } -func getRoleEntityPair(role_entity string) (*RoleEntity, error) { +func GetRoleEntityPair(role_entity string) (*RoleEntity, error) { split := strings.Split(role_entity, ":") if len(split) != 2 { return nil, fmt.Errorf("Error, each role entity pair must be " + @@ -107,8 +112,8 @@ func getRoleEntityPair(role_entity string) (*RoleEntity, error) { } func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -130,12 +135,12 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er default_acl = v.(string) } - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) if len(predefined_acl) > 0 { res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() @@ -159,7 +164,7 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error retrieving current ACLs: %s", err) } for _, v := range role_entity { - pair, err := getRoleEntityPair(v.(string)) + pair, err := GetRoleEntityPair(v.(string)) if err != nil { return err } @@ -211,8 +216,8 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er } func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -229,7 +234,7 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro res, err := config.NewStorageClient(userAgent).BucketAccessControls.List(bucket).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) } entities := make([]string, 0, len(res.Items)) for _, item := range res.Items { @@ -254,20 +259,20 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro } func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } bucket := d.Get("bucket").(string) - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) if d.HasChange("role_entity") { bkt, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() @@ -281,7 +286,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er old_re_map := make(map[string]string) for _, v := range old_re { - res, err := getRoleEntityPair(v.(string)) + res, err := GetRoleEntityPair(v.(string)) if err != nil { return fmt.Errorf( @@ -292,7 +297,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er } for _, v := range new_re { - pair, err := getRoleEntityPair(v.(string)) + pair, err := GetRoleEntityPair(v.(string)) bucketAccessControl := &storage.BucketAccessControl{ Role: pair.Role, @@ -352,20 +357,20 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er } func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } bucket := d.Get("bucket").(string) - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) bkt, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() if err != nil { @@ -375,7 +380,7 @@ func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) er re_local := d.Get("role_entity").([]interface{}) for _, v := range re_local { - res, err := getRoleEntityPair(v.(string)) + res, err := GetRoleEntityPair(v.(string)) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_object.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_object.go similarity index 90% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_object.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_object.go index 74e6cc7dc8..8c815b79e1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_bucket_object.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_object.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "bytes" @@ -6,15 +8,15 @@ import ( "io" "log" "os" - "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "crypto/md5" "crypto/sha256" "encoding/base64" - "io/ioutil" "net/http" "google.golang.org/api/googleapi" @@ -131,11 +133,11 @@ func ResourceStorageBucketObject() *schema.Resource { DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { localMd5Hash := "" if source, ok := d.GetOkExists("source"); ok { - localMd5Hash = getFileMd5Hash(source.(string)) + localMd5Hash = tpgresource.GetFileMd5Hash(source.(string)) } if content, ok := d.GetOkExists("content"); ok { - localMd5Hash = getContentMd5Hash([]byte(content.(string))) + localMd5Hash = tpgresource.GetContentMd5Hash([]byte(content.(string))) } // If `source` or `content` is dynamically set, both field will be empty. @@ -168,7 +170,7 @@ func ResourceStorageBucketObject() *schema.Resource { ForceNew: true, Computed: true, ConflictsWith: []string{"customer_encryption"}, - DiffSuppressFunc: compareCryptoKeyVersions, + DiffSuppressFunc: tpgresource.CompareCryptoKeyVersions, Description: `Resource name of the Cloud KMS key that will be used to encrypt the object. Overrides the object metadata's kmsKeyName value, if any.`, }, @@ -253,21 +255,9 @@ func objectGetID(object *storage.Object) string { return object.Bucket + "-" + object.Name } -func compareCryptoKeyVersions(_, old, new string, _ *schema.ResourceData) bool { - // The API can return cryptoKeyVersions even though it wasn't specified. - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - - kmsKeyWithoutVersions := strings.Split(old, "/cryptoKeyVersions")[0] - if kmsKeyWithoutVersions == new { - return true - } - - return false -} - func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -312,7 +302,7 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("metadata"); ok { - object.Metadata = convertStringMap(v.(map[string]interface{})) + object.Metadata = tpgresource.ConvertStringMap(v.(map[string]interface{})) } if v, ok := d.GetOk("storage_class"); ok { @@ -351,8 +341,8 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) } func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -389,8 +379,8 @@ func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{}) } func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -409,7 +399,7 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e res, err := getCall.Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket Object %q", d.Get("name").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Storage Bucket Object %q", d.Get("name").(string))) } if err := d.Set("md5hash", res.Md5Hash); err != nil { @@ -467,8 +457,8 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e } func resourceStorageBucketObjectDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -505,20 +495,11 @@ func setEncryptionHeaders(customerEncryption map[string]string, headers http.Hea } func getFileMd5Hash(filename string) string { - data, err := ioutil.ReadFile(filename) - if err != nil { - log.Printf("[WARN] Failed to read source file %q. Cannot compute md5 hash for it.", filename) - return "" - } - return getContentMd5Hash(data) + return tpgresource.GetFileMd5Hash(filename) } func getContentMd5Hash(content []byte) string { - h := md5.New() - if _, err := h.Write(content); err != nil { - log.Printf("[WARN] Failed to compute md5 hash for content: %v", err) - } - return base64.StdEncoding.EncodeToString(h.Sum(nil)) + return tpgresource.GetContentMd5Hash(content) } func expandCustomerEncryption(input []interface{}) map[string]string { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_sweeper.go new file mode 100644 index 0000000000..978bdc42fd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_bucket_sweeper.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("StorageBucket", testSweepStorageBucket) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepStorageBucket(region string) error { + resourceName := "StorageBucket" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + params := map[string]string{ + "project": config.Project, + "projection": "noAcl", // returns 1000 items instead of 200 + } + + servicesUrl, err := transport_tpg.AddQueryParams("https://storage.googleapis.com/storage/v1/b", params) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: servicesUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", servicesUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + + id := obj["name"].(string) + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(id) { + nonPrefixCount++ + continue + } + + deleteUrl := fmt.Sprintf("https://storage.googleapis.com/storage/v1/b/%s", id) + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Deleted a %s resource: %s", resourceName, id) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf-test prefix remain.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_default_object_access_control.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_default_object_access_control.go new file mode 100644 index 0000000000..390bd24265 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_default_object_access_control.go @@ -0,0 +1,477 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package storage + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceStorageDefaultObjectAccessControl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageDefaultObjectAccessControlCreate, + Read: resourceStorageDefaultObjectAccessControlRead, + Update: resourceStorageDefaultObjectAccessControlUpdate, + Delete: resourceStorageDefaultObjectAccessControlDelete, + + Importer: &schema.ResourceImporter{ + State: resourceStorageDefaultObjectAccessControlImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the bucket.`, + }, + "entity": { + Type: schema.TypeString, + Required: true, + Description: `The entity holding the permission, in one of the following forms: + * user-{{userId}} + * user-{{email}} (such as "user-liz@example.com") + * group-{{groupId}} + * group-{{email}} (such as "group-example@googlegroups.com") + * domain-{{domain}} (such as "domain-example.com") + * project-team-{{projectId}} + * allUsers + * allAuthenticatedUsers`, + }, + "role": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"OWNER", "READER"}), + Description: `The access permission for the entity. Possible values: ["OWNER", "READER"]`, + }, + "object": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the object, if applied to an object.`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `The domain associated with the entity.`, + }, + "email": { + Type: schema.TypeString, + Computed: true, + Description: `The email address associated with the entity.`, + }, + "entity_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID for the entity`, + }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `The content generation of the object, if applied to an object.`, + }, + "project_team": { + Type: schema.TypeList, + Computed: true, + Description: `The project team associated with the entity`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_number": { + Type: schema.TypeString, + Optional: true, + Description: `The project team associated with the entity`, + }, + "team": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"editors", "owners", "viewers", ""}), + Description: `The team. Possible values: ["editors", "owners", "viewers"]`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceStorageDefaultObjectAccessControlCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + bucketProp, err := expandStorageDefaultObjectAccessControlBucket(d.Get("bucket"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketProp)) && (ok || !reflect.DeepEqual(v, bucketProp)) { + obj["bucket"] = bucketProp + } + entityProp, err := expandStorageDefaultObjectAccessControlEntity(d.Get("entity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entity"); !tpgresource.IsEmptyValue(reflect.ValueOf(entityProp)) && (ok || !reflect.DeepEqual(v, entityProp)) { + obj["entity"] = entityProp + } + objectProp, err := expandStorageDefaultObjectAccessControlObject(d.Get("object"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("object"); !tpgresource.IsEmptyValue(reflect.ValueOf(objectProp)) && (ok || !reflect.DeepEqual(v, objectProp)) { + obj["object"] = objectProp + } + roleProp, err := expandStorageDefaultObjectAccessControlRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new DefaultObjectAccessControl: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating DefaultObjectAccessControl: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{bucket}}/{{entity}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating DefaultObjectAccessControl %q: %#v", d.Id(), res) + + return resourceStorageDefaultObjectAccessControlRead(d, meta) +} + +func resourceStorageDefaultObjectAccessControlRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageDefaultObjectAccessControl %q", d.Id())) + } + + if err := d.Set("domain", flattenStorageDefaultObjectAccessControlDomain(res["domain"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) + } + if err := d.Set("email", flattenStorageDefaultObjectAccessControlEmail(res["email"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) + } + if err := d.Set("entity", flattenStorageDefaultObjectAccessControlEntity(res["entity"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) + } + if err := d.Set("entity_id", flattenStorageDefaultObjectAccessControlEntityId(res["entityId"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) + } + if err := d.Set("generation", flattenStorageDefaultObjectAccessControlGeneration(res["generation"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) + } + if err := d.Set("object", flattenStorageDefaultObjectAccessControlObject(res["object"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) + } + if err := d.Set("project_team", flattenStorageDefaultObjectAccessControlProjectTeam(res["projectTeam"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) + } + if err := d.Set("role", flattenStorageDefaultObjectAccessControlRole(res["role"], d, config)); err != nil { + return fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) + } + + return nil +} + +func resourceStorageDefaultObjectAccessControlUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + bucketProp, err := expandStorageDefaultObjectAccessControlBucket(d.Get("bucket"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketProp)) { + obj["bucket"] = bucketProp + } + entityProp, err := expandStorageDefaultObjectAccessControlEntity(d.Get("entity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entityProp)) { + obj["entity"] = entityProp + } + objectProp, err := expandStorageDefaultObjectAccessControlObject(d.Get("object"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("object"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, objectProp)) { + obj["object"] = objectProp + } + roleProp, err := expandStorageDefaultObjectAccessControlRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating DefaultObjectAccessControl %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating DefaultObjectAccessControl %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating DefaultObjectAccessControl %q: %#v", d.Id(), res) + } + + return resourceStorageDefaultObjectAccessControlRead(d, meta) +} + +func resourceStorageDefaultObjectAccessControlDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting DefaultObjectAccessControl %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "DefaultObjectAccessControl") + } + + log.Printf("[DEBUG] Finished deleting DefaultObjectAccessControl %q: %#v", d.Id(), res) + return nil +} + +func resourceStorageDefaultObjectAccessControlImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{bucket}}/{{entity}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenStorageDefaultObjectAccessControlDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageDefaultObjectAccessControlEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageDefaultObjectAccessControlEntity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageDefaultObjectAccessControlEntityId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageDefaultObjectAccessControlGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenStorageDefaultObjectAccessControlObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageDefaultObjectAccessControlProjectTeam(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project_number"] = + flattenStorageDefaultObjectAccessControlProjectTeamProjectNumber(original["projectNumber"], d, config) + transformed["team"] = + flattenStorageDefaultObjectAccessControlProjectTeamTeam(original["team"], d, config) + return []interface{}{transformed} +} +func flattenStorageDefaultObjectAccessControlProjectTeamProjectNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageDefaultObjectAccessControlProjectTeamTeam(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageDefaultObjectAccessControlRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandStorageDefaultObjectAccessControlBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageDefaultObjectAccessControlEntity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageDefaultObjectAccessControlObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageDefaultObjectAccessControlRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_default_object_acl.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_default_object_acl.go similarity index 76% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_default_object_acl.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_default_object_acl.go index 849227cdcb..b47237427a 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_default_object_acl.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_default_object_acl.go @@ -1,9 +1,13 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/storage/v1" ) @@ -36,8 +40,8 @@ func ResourceStorageDefaultObjectAcl() *schema.Resource { } func resourceStorageDefaultObjectAclCreateUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -52,12 +56,12 @@ func resourceStorageDefaultObjectAclCreateUpdate(d *schema.ResourceData, meta in }) } - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() if err != nil { @@ -82,8 +86,8 @@ func resourceStorageDefaultObjectAclCreateUpdate(d *schema.ResourceData, meta in } func resourceStorageDefaultObjectAclRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -91,7 +95,7 @@ func resourceStorageDefaultObjectAclRead(d *schema.ResourceData, meta interface{ bucket := d.Get("bucket").(string) res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Projection("full").Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Default Storage Object ACL for Bucket %q", d.Get("bucket").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Default Storage Object ACL for Bucket %q", d.Get("bucket").(string))) } var roleEntities []string @@ -111,18 +115,18 @@ func resourceStorageDefaultObjectAclRead(d *schema.ResourceData, meta interface{ } func resourceStorageDefaultObjectAclDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) bucket := d.Get("bucket").(string) res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_hmac_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_hmac_key.go new file mode 100644 index 0000000000..4e5cd98706 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_hmac_key.go @@ -0,0 +1,534 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package storage + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceStorageHmacKey() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageHmacKeyCreate, + Read: resourceStorageHmacKeyRead, + Update: resourceStorageHmacKeyUpdate, + Delete: resourceStorageHmacKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceStorageHmacKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "service_account_email": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The email address of the key's associated service account.`, + }, + "state": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"ACTIVE", "INACTIVE", ""}), + Description: `The state of the key. Can be set to one of ACTIVE, INACTIVE. Default value: "ACTIVE" Possible values: ["ACTIVE", "INACTIVE"]`, + Default: "ACTIVE", + }, + "access_id": { + Type: schema.TypeString, + Computed: true, + Description: `The access ID of the HMAC Key.`, + }, + "secret": { + Type: schema.TypeString, + Computed: true, + Description: `HMAC secret key material.`, + Sensitive: true, + }, + "time_created": { + Type: schema.TypeString, + Computed: true, + Description: `'The creation time of the HMAC key in RFC 3339 format. '`, + }, + "updated": { + Type: schema.TypeString, + Computed: true, + Description: `'The last modification time of the HMAC key metadata in RFC 3339 format.'`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceStorageHmacKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + serviceAccountEmailProp, err := expandStorageHmacKeyServiceAccountEmail(d.Get("service_account_email"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account_email"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountEmailProp)) && (ok || !reflect.DeepEqual(v, serviceAccountEmailProp)) { + obj["serviceAccountEmail"] = serviceAccountEmailProp + } + stateProp, err := expandStorageHmacKeyState(d.Get("state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("state"); !tpgresource.IsEmptyValue(reflect.ValueOf(stateProp)) && (ok || !reflect.DeepEqual(v, stateProp)) { + obj["state"] = stateProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys?serviceAccountEmail={{service_account_email}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new HmacKey: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HmacKey: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating HmacKey: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // `secret` and `access_id` are generated by the API upon successful CREATE. The following + // ensures terraform has the correct values based on the Projects.hmacKeys response object. + secret, ok := res["secret"].(string) + if !ok { + return fmt.Errorf("The response to CREATE was missing an expected field. Your create did not work.") + } + + if err := d.Set("secret", secret); err != nil { + return fmt.Errorf("Error setting secret: %s", err) + } + + metadata := res["metadata"].(map[string]interface{}) + accessId, ok := metadata["accessId"].(string) + if !ok { + return fmt.Errorf("The response to CREATE was missing an expected field. Your create did not work.") + } + + if err := d.Set("access_id", accessId); err != nil { + return fmt.Errorf("Error setting access_id: %s", err) + } + + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + d.SetId(id) + + err = transport_tpg.PollingWaitTime(resourceStorageHmacKeyPollRead(d, meta), transport_tpg.PollCheckForExistence, "Creating HmacKey", d.Timeout(schema.TimeoutCreate), 1) + if err != nil { + return fmt.Errorf("Error waiting to create HmacKey: %s", err) + } + + log.Printf("[DEBUG] Finished creating HmacKey %q: %#v", d.Id(), res) + + return resourceStorageHmacKeyRead(d, meta) +} + +func resourceStorageHmacKeyPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return nil, err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, fmt.Errorf("Error fetching project for HmacKey: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + res, err = resourceStorageHmacKeyDecoder(d, meta, res) + if err != nil { + return nil, err + } + if res == nil { + return nil, tpgresource.Fake404("decoded", "StorageHmacKey") + } + + return res, nil + } +} + +func resourceStorageHmacKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HmacKey: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageHmacKey %q", d.Id())) + } + + res, err = resourceStorageHmacKeyDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing StorageHmacKey because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading HmacKey: %s", err) + } + + if err := d.Set("service_account_email", flattenStorageHmacKeyServiceAccountEmail(res["serviceAccountEmail"], d, config)); err != nil { + return fmt.Errorf("Error reading HmacKey: %s", err) + } + if err := d.Set("state", flattenStorageHmacKeyState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading HmacKey: %s", err) + } + if err := d.Set("access_id", flattenStorageHmacKeyAccessId(res["accessId"], d, config)); err != nil { + return fmt.Errorf("Error reading HmacKey: %s", err) + } + if err := d.Set("time_created", flattenStorageHmacKeyTimeCreated(res["timeCreated"], d, config)); err != nil { + return fmt.Errorf("Error reading HmacKey: %s", err) + } + if err := d.Set("updated", flattenStorageHmacKeyUpdated(res["updated"], d, config)); err != nil { + return fmt.Errorf("Error reading HmacKey: %s", err) + } + + return nil +} + +func resourceStorageHmacKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HmacKey: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("state") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageHmacKey %q", d.Id())) + } + + obj["etag"] = getRes["etag"] + + stateProp, err := expandStorageHmacKeyState(d.Get("state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("state"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, stateProp)) { + obj["state"] = stateProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating HmacKey %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating HmacKey %q: %#v", d.Id(), res) + } + + } + + d.Partial(false) + + return resourceStorageHmacKeyRead(d, meta) +} + +func resourceStorageHmacKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for HmacKey: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + getUrl, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return err + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageHmacKey %q", d.Id())) + } + + // HmacKeys need to be INACTIVE to be deleted and the API doesn't accept noop + // updates + if v := getRes["state"]; v == "ACTIVE" { + getRes["state"] = "INACTIVE" + updateUrl, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Deactivating HmacKey %q: %#v", d.Id(), getRes) + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: project, + RawURL: updateUrl, + UserAgent: userAgent, + Body: getRes, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error deactivating HmacKey %q: %s", d.Id(), err) + } + } + log.Printf("[DEBUG] Deleting HmacKey %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "HmacKey") + } + + log.Printf("[DEBUG] Finished deleting HmacKey %q: %#v", d.Id(), res) + return nil +} + +func resourceStorageHmacKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/hmacKeys/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenStorageHmacKeyServiceAccountEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageHmacKeyState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageHmacKeyAccessId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageHmacKeyTimeCreated(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageHmacKeyUpdated(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandStorageHmacKeyServiceAccountEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageHmacKeyState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceStorageHmacKeyDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + if v := res["state"]; v == "DELETED" { + return nil, nil + } + + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_notification.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_notification.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_notification.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_notification.go index 764bb4f4e9..93c869956c 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_notification.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_notification.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "fmt" @@ -6,6 +8,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/services/pubsub" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/storage/v1" ) @@ -38,7 +44,7 @@ func ResourceStorageNotification() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The Cloud Pub/Sub topic to which this subscription publishes. Expects either the topic name, assumed to belong to the default GCP provider project, or the project-level name, i.e. projects/my-gcp-project/topics/my-topic or my-topic. If the project is not set in the provider, you will need to use the project-level name.`, }, @@ -89,8 +95,8 @@ func ResourceStorageNotification() *schema.Resource { } func resourceStorageNotificationCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -98,18 +104,18 @@ func resourceStorageNotificationCreate(d *schema.ResourceData, meta interface{}) bucket := d.Get("bucket").(string) topicName := d.Get("topic").(string) - computedTopicName := getComputedTopicName("", topicName) + computedTopicName := pubsub.GetComputedTopicName("", topicName) if computedTopicName != topicName { - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } - computedTopicName = getComputedTopicName(project, topicName) + computedTopicName = pubsub.GetComputedTopicName(project, topicName) } storageNotification := &storage.Notification{ - CustomAttributes: expandStringMap(d, "custom_attributes"), - EventTypes: convertStringSet(d.Get("event_types").(*schema.Set)), + CustomAttributes: tpgresource.ExpandStringMap(d, "custom_attributes"), + EventTypes: tpgresource.ConvertStringSet(d.Get("event_types").(*schema.Set)), ObjectNamePrefix: d.Get("object_name_prefix").(string), PayloadFormat: d.Get("payload_format").(string), Topic: computedTopicName, @@ -126,17 +132,17 @@ func resourceStorageNotificationCreate(d *schema.ResourceData, meta interface{}) } func resourceStorageNotificationRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - bucket, notificationID := resourceStorageNotificationParseID(d.Id()) + bucket, notificationID := ResourceStorageNotificationParseID(d.Id()) res, err := config.NewStorageClient(userAgent).Notifications.Get(bucket, notificationID).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Notification configuration %s for bucket %s", notificationID, bucket)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Notification configuration %s for bucket %s", notificationID, bucket)) } if err := d.Set("bucket", bucket); err != nil { @@ -168,13 +174,13 @@ func resourceStorageNotificationRead(d *schema.ResourceData, meta interface{}) e } func resourceStorageNotificationDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - bucket, notificationID := resourceStorageNotificationParseID(d.Id()) + bucket, notificationID := ResourceStorageNotificationParseID(d.Id()) err = config.NewStorageClient(userAgent).Notifications.Delete(bucket, notificationID).Do() if err != nil { @@ -184,7 +190,7 @@ func resourceStorageNotificationDelete(d *schema.ResourceData, meta interface{}) return nil } -func resourceStorageNotificationParseID(id string) (string, string) { +func ResourceStorageNotificationParseID(id string) (string, string) { //bucket, NotificationID parts := strings.Split(id, "/") diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_object_access_control.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_object_access_control.go new file mode 100644 index 0000000000..241e340cba --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_object_access_control.go @@ -0,0 +1,487 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package storage + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceStorageObjectAccessControl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageObjectAccessControlCreate, + Read: resourceStorageObjectAccessControlRead, + Update: resourceStorageObjectAccessControlUpdate, + Delete: resourceStorageObjectAccessControlDelete, + + Importer: &schema.ResourceImporter{ + State: resourceStorageObjectAccessControlImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the bucket.`, + }, + "entity": { + Type: schema.TypeString, + Required: true, + Description: `The entity holding the permission, in one of the following forms: + * user-{{userId}} + * user-{{email}} (such as "user-liz@example.com") + * group-{{groupId}} + * group-{{email}} (such as "group-example@googlegroups.com") + * domain-{{domain}} (such as "domain-example.com") + * project-team-{{projectId}} + * allUsers + * allAuthenticatedUsers`, + }, + "object": { + Type: schema.TypeString, + Required: true, + Description: `The name of the object to apply the access control to.`, + }, + "role": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"OWNER", "READER"}), + Description: `The access permission for the entity. Possible values: ["OWNER", "READER"]`, + }, + "domain": { + Type: schema.TypeString, + Computed: true, + Description: `The domain associated with the entity.`, + }, + "email": { + Type: schema.TypeString, + Computed: true, + Description: `The email address associated with the entity.`, + }, + "entity_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID for the entity`, + }, + "generation": { + Type: schema.TypeInt, + Computed: true, + Description: `The content generation of the object, if applied to an object.`, + }, + "project_team": { + Type: schema.TypeList, + Computed: true, + Description: `The project team associated with the entity`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_number": { + Type: schema.TypeString, + Optional: true, + Description: `The project team associated with the entity`, + }, + "team": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"editors", "owners", "viewers", ""}), + Description: `The team. Possible values: ["editors", "owners", "viewers"]`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceStorageObjectAccessControlCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + bucketProp, err := expandStorageObjectAccessControlBucket(d.Get("bucket"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketProp)) && (ok || !reflect.DeepEqual(v, bucketProp)) { + obj["bucket"] = bucketProp + } + entityProp, err := expandStorageObjectAccessControlEntity(d.Get("entity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entity"); !tpgresource.IsEmptyValue(reflect.ValueOf(entityProp)) && (ok || !reflect.DeepEqual(v, entityProp)) { + obj["entity"] = entityProp + } + objectProp, err := expandStorageObjectAccessControlObject(d.Get("object"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("object"); !tpgresource.IsEmptyValue(reflect.ValueOf(objectProp)) && (ok || !reflect.DeepEqual(v, objectProp)) { + obj["object"] = objectProp + } + roleProp, err := expandStorageObjectAccessControlRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ObjectAccessControl: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ObjectAccessControl: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{bucket}}/{{object}}/{{entity}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ObjectAccessControl %q: %#v", d.Id(), res) + + return resourceStorageObjectAccessControlRead(d, meta) +} + +func resourceStorageObjectAccessControlRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageObjectAccessControl %q", d.Id())) + } + + if err := d.Set("bucket", flattenStorageObjectAccessControlBucket(res["bucket"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + if err := d.Set("domain", flattenStorageObjectAccessControlDomain(res["domain"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + if err := d.Set("email", flattenStorageObjectAccessControlEmail(res["email"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + if err := d.Set("entity", flattenStorageObjectAccessControlEntity(res["entity"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + if err := d.Set("entity_id", flattenStorageObjectAccessControlEntityId(res["entityId"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + if err := d.Set("generation", flattenStorageObjectAccessControlGeneration(res["generation"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + if err := d.Set("object", flattenStorageObjectAccessControlObject(res["object"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + if err := d.Set("project_team", flattenStorageObjectAccessControlProjectTeam(res["projectTeam"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + if err := d.Set("role", flattenStorageObjectAccessControlRole(res["role"], d, config)); err != nil { + return fmt.Errorf("Error reading ObjectAccessControl: %s", err) + } + + return nil +} + +func resourceStorageObjectAccessControlUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + bucketProp, err := expandStorageObjectAccessControlBucket(d.Get("bucket"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bucket"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bucketProp)) { + obj["bucket"] = bucketProp + } + entityProp, err := expandStorageObjectAccessControlEntity(d.Get("entity"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("entity"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, entityProp)) { + obj["entity"] = entityProp + } + objectProp, err := expandStorageObjectAccessControlObject(d.Get("object"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("object"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, objectProp)) { + obj["object"] = objectProp + } + roleProp, err := expandStorageObjectAccessControlRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ObjectAccessControl %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PUT", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ObjectAccessControl %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ObjectAccessControl %q: %#v", d.Id(), res) + } + + return resourceStorageObjectAccessControlRead(d, meta) +} + +func resourceStorageObjectAccessControlDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ObjectAccessControl %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ObjectAccessControl") + } + + log.Printf("[DEBUG] Finished deleting ObjectAccessControl %q: %#v", d.Id(), res) + return nil +} + +func resourceStorageObjectAccessControlImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P[^/]+)/(?P.+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{bucket}}/{{object}}/{{entity}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenStorageObjectAccessControlBucket(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenStorageObjectAccessControlDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageObjectAccessControlEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageObjectAccessControlEntity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageObjectAccessControlEntityId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageObjectAccessControlGeneration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenStorageObjectAccessControlObject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageObjectAccessControlProjectTeam(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["project_number"] = + flattenStorageObjectAccessControlProjectTeamProjectNumber(original["projectNumber"], d, config) + transformed["team"] = + flattenStorageObjectAccessControlProjectTeamTeam(original["team"], d, config) + return []interface{}{transformed} +} +func flattenStorageObjectAccessControlProjectTeamProjectNumber(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageObjectAccessControlProjectTeamTeam(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageObjectAccessControlRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandStorageObjectAccessControlBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageObjectAccessControlEntity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageObjectAccessControlObject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageObjectAccessControlRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_object_acl.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_object_acl.go similarity index 86% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_object_acl.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_object_acl.go index b90613493b..d0a8a543f2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_object_acl.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storage/resource_storage_object_acl.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storage import ( "context" "fmt" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/storage/v1" @@ -59,7 +64,7 @@ func ResourceStorageObjectAcl() *schema.Resource { // owner having OWNER. It's impossible to remove that permission though, so this custom diff // makes configs with or without that line indistinguishable. func resourceStorageObjectAclDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) bucket, ok := diff.GetOk("bucket") if !ok { // During `plan` when this is interpolated from a resource that hasn't been created yet @@ -114,8 +119,8 @@ func getObjectAclId(object string) string { } func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -123,12 +128,12 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er bucket := d.Get("bucket").(string) object := d.Get("object").(string) - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) // If we're using a predefined acl we just use the canned api. if predefinedAcl, ok := d.GetOk("predefined_acl"); ok { @@ -158,7 +163,7 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error reading object %s in %s: %v", object, bucket, err) } - create, update, remove, err := getRoleEntityChange(roleEntitiesUpstream, convertStringSet(reMap), objectOwner) + create, update, remove, err := getRoleEntityChange(roleEntitiesUpstream, tpgresource.ConvertStringSet(reMap), objectOwner) if err != nil { return fmt.Errorf("Error reading object %s in %s. Invalid schema: %v", object, bucket, err) } @@ -175,8 +180,8 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er } func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -186,7 +191,7 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro roleEntities, err := getRoleEntitiesAsStringsFromApi(config, bucket, object, userAgent) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Storage Object ACL for Bucket %q", d.Get("bucket").(string))) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Storage Object ACL for Bucket %q", d.Get("bucket").(string))) } err = d.Set("role_entity", roleEntities) @@ -199,8 +204,8 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro } func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -208,12 +213,12 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er bucket := d.Get("bucket").(string) object := d.Get("object").(string) - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) if _, ok := d.GetOk("predefined_acl"); d.HasChange("predefined_acl") && ok { res, err := config.NewStorageClient(userAgent).Objects.Get(bucket, object).Do() @@ -240,8 +245,8 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er o, n := d.GetChange("role_entity") create, update, remove, err := getRoleEntityChange( - convertStringSet(o.(*schema.Set)), - convertStringSet(n.(*schema.Set)), + tpgresource.ConvertStringSet(o.(*schema.Set)), + tpgresource.ConvertStringSet(n.(*schema.Set)), objectOwner) if err != nil { return fmt.Errorf("Error reading object %s in %s. Invalid schema: %v", object, bucket, err) @@ -257,8 +262,8 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er } func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -266,12 +271,12 @@ func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) er bucket := d.Get("bucket").(string) object := d.Get("object").(string) - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") + lockName, err := tpgresource.ReplaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") if err != nil { return err } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) res, err := config.NewStorageClient(userAgent).Objects.Get(bucket, object).Do() if err != nil { @@ -286,7 +291,7 @@ func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) er return nil } -func getRoleEntitiesAsStringsFromApi(config *Config, bucket, object, userAgent string) ([]string, error) { +func getRoleEntitiesAsStringsFromApi(config *transport_tpg.Config, bucket, object, userAgent string) ([]string, error) { var roleEntities []string res, err := config.NewStorageClient(userAgent).ObjectAccessControls.List(bucket, object).Do() if err != nil { @@ -358,7 +363,7 @@ func getRoleEntityChange(old []string, new []string, owner string) (create, upda } // Takes in lists of changes to make to a Storage Object's ACL and makes those changes -func performStorageObjectRoleEntityOperations(create []*RoleEntity, update []*RoleEntity, remove []*RoleEntity, config *Config, bucket, object, userAgent string) error { +func performStorageObjectRoleEntityOperations(create []*RoleEntity, update []*RoleEntity, remove []*RoleEntity, config *transport_tpg.Config, bucket, object, userAgent string) error { for _, v := range create { objectAccessControl := &storage.ObjectAccessControl{ Role: v.Role, diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/data_source_google_storage_transfer_project_service_account.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/data_source_google_storage_transfer_project_service_account.go new file mode 100644 index 0000000000..2961665fb2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/data_source_google_storage_transfer_project_service_account.go @@ -0,0 +1,69 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storagetransfer + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleStorageTransferProjectServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleStorageTransferProjectServiceAccountRead, + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Computed: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "subject_id": { + Type: schema.TypeString, + Computed: true, + }, + "member": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleStorageTransferProjectServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + serviceAccount, err := config.NewStorageTransferClient(userAgent).GoogleServiceAccounts.Get(project).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Google Cloud Storage Transfer service account not found") + } + + d.SetId(serviceAccount.AccountEmail) + if err := d.Set("email", serviceAccount.AccountEmail); err != nil { + return fmt.Errorf("Error setting email: %s", err) + } + if err := d.Set("subject_id", serviceAccount.SubjectId); err != nil { + return fmt.Errorf("Error setting subject_id: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("member", "serviceAccount:"+serviceAccount.AccountEmail); err != nil { + return fmt.Errorf("Error setting member: %s", err) + } + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_agent_pool.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_agent_pool.go new file mode 100644 index 0000000000..69fe6c3e1f --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_agent_pool.go @@ -0,0 +1,443 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package storagetransfer + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// waitForAgentPoolReady waits for an agent pool to leave the +// "CREATING" state and become "CREATED", to indicate that it's ready. +func waitForAgentPoolReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { + return resource.Retry(timeout, func() *resource.RetryError { + if err := resourceStorageTransferAgentPoolRead(d, config); err != nil { + return resource.NonRetryableError(err) + } + + name := d.Get("name").(string) + state := d.Get("state").(string) + if state == "CREATING" { + return resource.RetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) + } else if state == "CREATED" { + log.Printf("[DEBUG] AgentPool %q has state %q.", name, state) + return nil + } else { + return resource.NonRetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) + } + }) +} + +func ResourceStorageTransferAgentPool() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageTransferAgentPoolCreate, + Read: resourceStorageTransferAgentPoolRead, + Update: resourceStorageTransferAgentPoolUpdate, + Delete: resourceStorageTransferAgentPoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceStorageTransferAgentPoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the agent pool to create. + +The agentPoolId must meet the following requirements: +* Length of 128 characters or less. +* Not start with the string goog. +* Start with a lowercase ASCII character, followed by: + * Zero or more: lowercase Latin alphabet characters, numerals, hyphens (-), periods (.), underscores (_), or tildes (~). + * One or more numerals or lowercase ASCII characters. + +As expressed by the regular expression: ^(?!goog)[a-z]([a-z0-9-._~]*[a-z0-9])?$.`, + }, + "bandwidth_limit": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the bandwidth limit details. If this field is unspecified, the default value is set as 'No Limit'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limit_mbps": { + Type: schema.TypeString, + Required: true, + Description: `Bandwidth rate in megabytes per second, distributed across all the agents in the pool.`, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies the client-specified AgentPool description.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Specifies the state of the AgentPool.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceStorageTransferAgentPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandStorageTransferAgentPoolDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + bandwidthLimitProp, err := expandStorageTransferAgentPoolBandwidthLimit(d.Get("bandwidth_limit"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bandwidth_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(bandwidthLimitProp)) && (ok || !reflect.DeepEqual(v, bandwidthLimitProp)) { + obj["bandwidthLimit"] = bandwidthLimitProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageTransferBasePath}}projects/{{project}}/agentPools?agentPoolId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AgentPool: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AgentPool: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating AgentPool: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/agentPools/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if err := waitForAgentPoolReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return fmt.Errorf("Error waiting for AgentPool %q to be CREATED during creation: %q", d.Get("name").(string), err) + } + + log.Printf("[DEBUG] Finished creating AgentPool %q: %#v", d.Id(), res) + + return resourceStorageTransferAgentPoolRead(d, meta) +} + +func resourceStorageTransferAgentPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageTransferBasePath}}projects/{{project}}/agentPools/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AgentPool: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("StorageTransferAgentPool %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AgentPool: %s", err) + } + + if err := d.Set("display_name", flattenStorageTransferAgentPoolDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading AgentPool: %s", err) + } + if err := d.Set("state", flattenStorageTransferAgentPoolState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading AgentPool: %s", err) + } + if err := d.Set("bandwidth_limit", flattenStorageTransferAgentPoolBandwidthLimit(res["bandwidthLimit"], d, config)); err != nil { + return fmt.Errorf("Error reading AgentPool: %s", err) + } + + return nil +} + +func resourceStorageTransferAgentPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AgentPool: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandStorageTransferAgentPoolDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + bandwidthLimitProp, err := expandStorageTransferAgentPoolBandwidthLimit(d.Get("bandwidth_limit"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bandwidth_limit"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bandwidthLimitProp)) { + obj["bandwidthLimit"] = bandwidthLimitProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageTransferBasePath}}projects/{{project}}/agentPools/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AgentPool %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("bandwidth_limit") { + updateMask = append(updateMask, "bandwidthLimit") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + if err := waitForAgentPoolReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return fmt.Errorf("Error waiting for AgentPool %q to be CREATED before updating: %q", d.Get("name").(string), err) + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating AgentPool %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating AgentPool %q: %#v", d.Id(), res) + } + + return resourceStorageTransferAgentPoolRead(d, meta) +} + +func resourceStorageTransferAgentPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for AgentPool: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{StorageTransferBasePath}}projects/{{project}}/agentPools/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AgentPool %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "AgentPool") + } + + log.Printf("[DEBUG] Finished deleting AgentPool %q: %#v", d.Id(), res) + return nil +} + +func resourceStorageTransferAgentPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/agentPools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/agentPools/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if err := waitForAgentPoolReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return nil, fmt.Errorf("Error waiting for AgentPool %q to be CREATED during importing: %q", d.Get("name").(string), err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenStorageTransferAgentPoolDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageTransferAgentPoolState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenStorageTransferAgentPoolBandwidthLimit(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["limit_mbps"] = + flattenStorageTransferAgentPoolBandwidthLimitLimitMbps(original["limitMbps"], d, config) + return []interface{}{transformed} +} +func flattenStorageTransferAgentPoolBandwidthLimitLimitMbps(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandStorageTransferAgentPoolDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandStorageTransferAgentPoolBandwidthLimit(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLimitMbps, err := expandStorageTransferAgentPoolBandwidthLimitLimitMbps(original["limit_mbps"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLimitMbps); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["limitMbps"] = transformedLimitMbps + } + + return transformed, nil +} + +func expandStorageTransferAgentPoolBandwidthLimitLimitMbps(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_agent_pool_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_agent_pool_sweeper.go new file mode 100644 index 0000000000..1e19b124cc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_agent_pool_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package storagetransfer + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("StorageTransferAgentPool", testSweepStorageTransferAgentPool) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepStorageTransferAgentPool(region string) error { + resourceName := "StorageTransferAgentPool" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://storagetransfer.googleapis.com/v1/projects/{{project}}/agentPools", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["agentPools"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://storagetransfer.googleapis.com/v1/projects/{{project}}/agentPools/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_transfer_job.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_job.go similarity index 91% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_transfer_job.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_job.go index cb720ba38b..088f73be8b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_storage_transfer_job.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/storagetransfer/resource_storage_transfer_job.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package storagetransfer import ( "fmt" @@ -7,6 +9,10 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -19,6 +25,8 @@ var ( "transfer_spec.0.object_conditions.0.max_time_elapsed_since_last_modification", "transfer_spec.0.object_conditions.0.include_prefixes", "transfer_spec.0.object_conditions.0.exclude_prefixes", + "transfer_spec.0.object_conditions.0.last_modified_since", + "transfer_spec.0.object_conditions.0.last_modified_before", } transferOptionsKeys = []string{ @@ -216,7 +224,7 @@ func ResourceStorageTransferJob() *schema.Resource { }, "repeat_interval": { Type: schema.TypeString, - ValidateFunc: validateDuration(), + ValidateFunc: verify.ValidateDuration(), Optional: true, Description: `Interval between the start of each scheduled transfer. If unspecified, the default value is 24 hours. This value may not be less than 1 hour. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, Default: "86400s", @@ -261,14 +269,14 @@ func objectConditionsSchema() *schema.Schema { Schema: map[string]*schema.Schema{ "min_time_elapsed_since_last_modification": { Type: schema.TypeString, - ValidateFunc: validateDuration(), + ValidateFunc: verify.ValidateDuration(), Optional: true, AtLeastOneOf: objectConditionsKeys, Description: `A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, }, "max_time_elapsed_since_last_modification": { Type: schema.TypeString, - ValidateFunc: validateDuration(), + ValidateFunc: verify.ValidateDuration(), Optional: true, AtLeastOneOf: objectConditionsKeys, Description: `A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, @@ -293,6 +301,20 @@ func objectConditionsSchema() *schema.Schema { }, Description: `exclude_prefixes must follow the requirements described for include_prefixes.`, }, + "last_modified_since": { + Type: schema.TypeString, + ValidateFunc: verify.ValidateRFC3339Date, + Optional: true, + AtLeastOneOf: objectConditionsKeys, + Description: `If specified, only objects with a "last modification time" on or after this timestamp and objects that don't have a "last modification time" are transferred. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "last_modified_before": { + Type: schema.TypeString, + ValidateFunc: verify.ValidateRFC3339Date, + Optional: true, + AtLeastOneOf: objectConditionsKeys, + Description: `If specified, only objects with a "last modification time" before this timestamp and objects that don't have a "last modification time" are transferred. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, }, }, Description: `Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects' last_modification_time do not exclude objects in a data sink.`, @@ -423,6 +445,11 @@ func awsS3DataSchema() *schema.Resource { Type: schema.TypeString, Description: `S3 Bucket name.`, }, + "path": { + Optional: true, + Type: schema.TypeString, + Description: `S3 Bucket path in bucket to transfer.`, + }, "aws_access_key": { Type: schema.TypeList, Optional: true, @@ -524,13 +551,13 @@ func diffSuppressEmptyStartTimeOfDay(k, old, new string, d *schema.ResourceData) } func resourceStorageTransferJobCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -546,9 +573,11 @@ func resourceStorageTransferJobCreate(d *schema.ResourceData, meta interface{}) var res *storagetransfer.TransferJob - err = retry(func() error { - res, err = config.NewStorageTransferClient(userAgent).TransferJobs.Create(transferJob).Do() - return err + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + res, err = config.NewStorageTransferClient(userAgent).TransferJobs.Create(transferJob).Do() + return err + }, }) if err != nil { @@ -560,20 +589,20 @@ func resourceStorageTransferJobCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting name: %s", err) } - name := GetResourceNameFromSelfLink(res.Name) + name := tpgresource.GetResourceNameFromSelfLink(res.Name) d.SetId(fmt.Sprintf("%s/%s", project, name)) return resourceStorageTransferJobRead(d, meta) } func resourceStorageTransferJobRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -581,7 +610,7 @@ func resourceStorageTransferJobRead(d *schema.ResourceData, meta interface{}) er name := d.Get("name").(string) res, err := config.NewStorageTransferClient(userAgent).TransferJobs.Get(name, project).Do() if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Transfer Job %q", name)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Transfer Job %q", name)) } if res.Status == "DELETED" { @@ -627,13 +656,13 @@ func resourceStorageTransferJobRead(d *schema.ResourceData, meta interface{}) er } func resourceStorageTransferJobUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -699,13 +728,13 @@ func resourceStorageTransferJobUpdate(d *schema.ResourceData, meta interface{}) } func resourceStorageTransferJobDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return err } @@ -924,12 +953,14 @@ func expandAwsS3Data(awsS3Datas []interface{}) *storagetransfer.AwsS3Data { BucketName: awsS3Data["bucket_name"].(string), AwsAccessKey: expandAwsAccessKeys(awsS3Data["aws_access_key"].([]interface{})), RoleArn: awsS3Data["role_arn"].(string), + Path: awsS3Data["path"].(string), } } func flattenAwsS3Data(awsS3Data *storagetransfer.AwsS3Data, d *schema.ResourceData) []map[string]interface{} { data := map[string]interface{}{ "bucket_name": awsS3Data.BucketName, + "path": awsS3Data.Path, "role_arn": awsS3Data.RoleArn, } if awsS3Data.AwsAccessKey != nil { @@ -1029,10 +1060,12 @@ func expandObjectConditions(conditions []interface{}) *storagetransfer.ObjectCon condition := conditions[0].(map[string]interface{}) return &storagetransfer.ObjectConditions{ - ExcludePrefixes: convertStringArr(condition["exclude_prefixes"].([]interface{})), - IncludePrefixes: convertStringArr(condition["include_prefixes"].([]interface{})), + ExcludePrefixes: tpgresource.ConvertStringArr(condition["exclude_prefixes"].([]interface{})), + IncludePrefixes: tpgresource.ConvertStringArr(condition["include_prefixes"].([]interface{})), MaxTimeElapsedSinceLastModification: condition["max_time_elapsed_since_last_modification"].(string), MinTimeElapsedSinceLastModification: condition["min_time_elapsed_since_last_modification"].(string), + LastModifiedSince: condition["last_modified_since"].(string), + LastModifiedBefore: condition["last_modified_before"].(string), } } @@ -1042,6 +1075,8 @@ func flattenObjectCondition(condition *storagetransfer.ObjectConditions) []map[s "include_prefixes": condition.IncludePrefixes, "max_time_elapsed_since_last_modification": condition.MaxTimeElapsedSinceLastModification, "min_time_elapsed_since_last_modification": condition.MinTimeElapsedSinceLastModification, + "last_modified_since": condition.LastModifiedSince, + "last_modified_before": condition.LastModifiedBefore, } return []map[string]interface{}{data} } @@ -1145,7 +1180,7 @@ func expandTransferJobNotificationConfig(notificationConfigs []interface{}) *sto } if notificationConfig["event_types"] != nil { - apiData.EventTypes = convertStringArr(notificationConfig["event_types"].(*schema.Set).List()) + apiData.EventTypes = tpgresource.ConvertStringArr(notificationConfig["event_types"].(*schema.Set).List()) } log.Printf("[DEBUG] apiData: %v\n\n", apiData) @@ -1163,7 +1198,7 @@ func flattenTransferJobNotificationConfig(notificationConfig *storagetransfer.No } if notificationConfig.EventTypes != nil { - data["event_types"] = convertStringArrToInterface(notificationConfig.EventTypes) + data["event_types"] = tpgresource.ConvertStringArrToInterface(notificationConfig.EventTypes) } return []map[string]interface{}{data} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tags_tag_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/data_source_tags_tag_key.go similarity index 88% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tags_tag_key.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/data_source_tags_tag_key.go index 1d06335bb3..61498e2193 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tags_tag_key.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/data_source_tags_tag_key.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tags import ( "errors" "fmt" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" ) @@ -51,8 +56,8 @@ func DataSourceGoogleTagsTagKey() *schema.Resource { } func dataSourceGoogleTagsTagKeyRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tags_tag_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/data_source_tags_tag_value.go similarity index 88% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tags_tag_value.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/data_source_tags_tag_value.go index 234775ed98..134ffa4bc1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/data_source_tags_tag_value.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/data_source_tags_tag_value.go @@ -1,10 +1,15 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tags import ( "errors" "fmt" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" ) @@ -51,8 +56,8 @@ func DataSourceGoogleTagsTagValue() *schema.Resource { } func dataSourceGoogleTagsTagValueRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/iam_tags_tag_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/iam_tags_tag_key.go new file mode 100644 index 0000000000..6239eb1bd4 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/iam_tags_tag_key.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var TagsTagKeyIamSchema = map[string]*schema.Schema{ + "tag_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type TagsTagKeyIamUpdater struct { + tagKey string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func TagsTagKeyIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("tag_key"); ok { + values["tag_key"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"tagKeys/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_key").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &TagsTagKeyIamUpdater{ + tagKey: values["tag_key"], + d: d, + Config: config, + } + + if err := d.Set("tag_key", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting tag_key: %s", err) + } + + return u, nil +} + +func TagsTagKeyIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"tagKeys/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &TagsTagKeyIamUpdater{ + tagKey: values["tag_key"], + d: d, + Config: config, + } + if err := d.Set("tag_key", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting tag_key: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *TagsTagKeyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTagKeyUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *TagsTagKeyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTagKeyUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *TagsTagKeyIamUpdater) qualifyTagKeyUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{TagsBasePath}}%s:%s", fmt.Sprintf("tagKeys/%s", u.tagKey), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *TagsTagKeyIamUpdater) GetResourceId() string { + return fmt.Sprintf("tagKeys/%s", u.tagKey) +} + +func (u *TagsTagKeyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-tags-tagkey-%s", u.GetResourceId()) +} + +func (u *TagsTagKeyIamUpdater) DescribeResource() string { + return fmt.Sprintf("tags tagkey %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/iam_tags_tag_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/iam_tags_tag_value.go new file mode 100644 index 0000000000..52e3cb2a9e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/iam_tags_tag_value.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var TagsTagValueIamSchema = map[string]*schema.Schema{ + "tag_value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, +} + +type TagsTagValueIamUpdater struct { + tagValue string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func TagsTagValueIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("tag_value"); ok { + values["tag_value"] = v.(string) + } + + // We may have gotten either a long or short name, so attempt to parse long name if possible + m, err := tpgresource.GetImportIdQualifiers([]string{"tagValues/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_value").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &TagsTagValueIamUpdater{ + tagValue: values["tag_value"], + d: d, + Config: config, + } + + if err := d.Set("tag_value", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting tag_value: %s", err) + } + + return u, nil +} + +func TagsTagValueIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"tagValues/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &TagsTagValueIamUpdater{ + tagValue: values["tag_value"], + d: d, + Config: config, + } + if err := d.Set("tag_value", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting tag_value: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *TagsTagValueIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyTagValueUrl("getIamPolicy") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *TagsTagValueIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyTagValueUrl("setIamPolicy") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *TagsTagValueIamUpdater) qualifyTagValueUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{TagsBasePath}}%s:%s", fmt.Sprintf("tagValues/%s", u.tagValue), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *TagsTagValueIamUpdater) GetResourceId() string { + return fmt.Sprintf("tagValues/%s", u.tagValue) +} + +func (u *TagsTagValueIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-tags-tagvalue-%s", u.GetResourceId()) +} + +func (u *TagsTagValueIamUpdater) DescribeResource() string { + return fmt.Sprintf("tags tagvalue %q", u.GetResourceId()) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_location_tag_bindings.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_location_tag_bindings.go new file mode 100644 index 0000000000..66f18576ad --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_location_tag_bindings.go @@ -0,0 +1,390 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tags + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceTagsLocationTagBinding() *schema.Resource { + return &schema.Resource{ + Create: resourceTagsLocationTagBindingCreate, + Read: resourceTagsLocationTagBindingRead, + Delete: resourceTagsLocationTagBindingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceTagsLocationTagBindingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full resource name of the resource the TagValue is bound to. E.g. //cloudresourcemanager.googleapis.com/projects/123`, + }, + "tag_value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The TagValue of the TagBinding. Must be of the form tagValues/456.`, + }, + "location": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: `The geographic location where the transfer config should reside. +Examples: US, EU, asia-northeast1. The default value is US.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The generated id for the TagBinding. This is a string of the form: 'tagBindings/{full-resource-name}/{tag-value-name}'`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceTagsLocationTagBindingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentProp, err := expandNestedTagsLocationTagBindingParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + tagValueProp, err := expandNestedTagsLocationTagBindingTagValue(d.Get("tag_value"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tag_value"); !tpgresource.IsEmptyValue(reflect.ValueOf(tagValueProp)) && (ok || !reflect.DeepEqual(v, tagValueProp)) { + obj["tagValue"] = tagValueProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "tagBindings/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsLocationBasePath}}tagBindings") + log.Printf("url for TagsLocation: %s", url) + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new LocationTagBinding: %#v", obj) + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating LocationTagBinding: %s", err) + } + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + + var opRes map[string]interface{} + err = TagsLocationOperationWaitTimeWithResponse( + config, res, &opRes, "Creating LocationTagBinding", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to create LocationTagBinding: %s", err) + } + + if _, ok := opRes["tagBindings"]; ok { + opRes, err = flattenNestedTagsLocationTagBinding(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + d.SetId("") + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("name", flattenNestedTagsLocationTagBindingName(opRes["name"], d, config)); err != nil { + return err + } + + id, err := tpgresource.ReplaceVars(d, config, "{{location}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating LocationTagBinding %q: %#v", d.Id(), res) + + return resourceTagsLocationTagBindingRead(d, meta) +} + +func resourceTagsLocationTagBindingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsLocationBasePath}}tagBindings/?parent={{parent}}&pageSize=300") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) + } + log.Printf("[DEBUG] Skipping res with name for import = %#v,)", res) + + p, ok := res["tagBindings"] + if !ok || p == nil { + return nil + } + pView := p.([]interface{}) + + //if there are more than 300 bindings - handling pagination over here + if pageToken, ok := res["nextPageToken"].(string); ok { + for pageToken != "" { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"pageToken": fmt.Sprintf("%s", res["nextPageToken"])}) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) + } + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) + } + if resp == nil { + d.SetId("") + return nil + } + v, ok := resp["tagBindings"] + if !ok || v == nil { + return nil + } + pView = append(pView, v.([]interface{})...) + if token, ok := res["nextPageToken"]; ok { + pageToken = token.(string) + } else { + pageToken = "" + } + } + } + + newMap := make(map[string]interface{}, 1) + newMap["tagBindings"] = pView + + res, err = flattenNestedTagsLocationTagBinding(d, meta, newMap) + if err != nil { + return err + } + + if err := d.Set("name", flattenNestedTagsLocationTagBindingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading LocationTagBinding: %s", err) + } + if err := d.Set("parent", flattenNestedTagsLocationTagBindingParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading LocationTagBinding: %s", err) + } + if err := d.Set("tag_value", flattenNestedTagsLocationTagBindingTagValue(res["tagValue"], d, config)); err != nil { + return fmt.Errorf("Error reading LocationTagBinding: %s", err) + } + + return nil +} + +func resourceTagsLocationTagBindingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "tagBindings/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsLocationBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting LocationTagBinding %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "LocationTagBinding") + } + + err = TagsLocationOperationWaitTime( + config, res, "Deleting LocationTagBinding", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting LocationTagBinding %q: %#v", d.Id(), res) + return nil +} + +func resourceTagsLocationTagBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"(?P[^/]+)/tagBindings/(?P[^/]+)/tagValues/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + parent := d.Get("parent").(string) + parentProper := strings.ReplaceAll(parent, "%2F", "/") + d.Set("parent", parentProper) + d.Set("name", fmt.Sprintf("tagBindings/%s/tagValues/%s", parent, d.Get("tag_value").(string))) + id, err := tpgresource.ReplaceVars(d, config, "{{location}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedTagsLocationTagBindingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedTagsLocationTagBindingParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedTagsLocationTagBindingTagValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedTagsLocationTagBindingParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedTagsLocationTagBindingTagValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedTagsLocationTagBinding(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["tagBindings"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + log.Printf("[DEBUG] Hey it's in break = %#v,)", v) + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value tagBindings. Actual value: %v", v) + } + + _, item, err := resourceTagsLocationTagBindingFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceTagsLocationTagBindingFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName := d.Get("name") + expectedFlattenedName := flattenNestedTagsLocationTagBindingName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + + item := itemRaw.(map[string]interface{}) + itemName := flattenNestedTagsLocationTagBindingName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + return idx, item, nil + } + return -1, nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_binding.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_binding.go new file mode 100644 index 0000000000..5af248857a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_binding.go @@ -0,0 +1,368 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceTagsTagBinding() *schema.Resource { + return &schema.Resource{ + Create: resourceTagsTagBindingCreate, + Read: resourceTagsTagBindingRead, + Delete: resourceTagsTagBindingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceTagsTagBindingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full resource name of the resource the TagValue is bound to. E.g. //cloudresourcemanager.googleapis.com/projects/123`, + }, + "tag_value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The TagValue of the TagBinding. Must be of the form tagValues/456.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The generated id for the TagBinding. This is a string of the form: 'tagBindings/{full-resource-name}/{tag-value-name}'`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceTagsTagBindingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentProp, err := expandNestedTagsTagBindingParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + tagValueProp, err := expandNestedTagsTagBindingTagValue(d.Get("tag_value"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tag_value"); !tpgresource.IsEmptyValue(reflect.ValueOf(tagValueProp)) && (ok || !reflect.DeepEqual(v, tagValueProp)) { + obj["tagValue"] = tagValueProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "tagBindings/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagBindings") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TagBinding: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TagBinding: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "tagBindings/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = TagsOperationWaitTimeWithResponse( + config, res, &opRes, "Creating TagBinding", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create TagBinding: %s", err) + } + + if _, ok := opRes["tagBindings"]; ok { + opRes, err = flattenNestedTagsTagBinding(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("name", flattenNestedTagsTagBindingName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "tagBindings/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating TagBinding %q: %#v", d.Id(), res) + + return resourceTagsTagBindingRead(d, meta) +} + +func resourceTagsTagBindingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagBindings/?parent={{parent}}&pageSize=300") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsTagBinding %q", d.Id())) + } + + res, err = flattenNestedTagsTagBinding(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing TagsTagBinding because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("name", flattenNestedTagsTagBindingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TagBinding: %s", err) + } + if err := d.Set("parent", flattenNestedTagsTagBindingParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading TagBinding: %s", err) + } + if err := d.Set("tag_value", flattenNestedTagsTagBindingTagValue(res["tagValue"], d, config)); err != nil { + return fmt.Errorf("Error reading TagBinding: %s", err) + } + + return nil +} + +func resourceTagsTagBindingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "tagBindings/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagBindings/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TagBinding %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TagBinding") + } + + err = TagsOperationWaitTime( + config, res, "Deleting TagBinding", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TagBinding %q: %#v", d.Id(), res) + return nil +} + +func resourceTagsTagBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + // current import_formats can't import fields with forward slashes in their value + if err := tpgresource.ParseImportId([]string{ + "tagBindings/(?P.+)", + "(?P.+)", + }, d, config); err != nil { + return nil, err + } + + name := d.Get("name").(string) + d.SetId(name) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedTagsTagBindingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + parts := strings.Split(v.(string), "/") + return strings.Join(parts[len(parts)-3:], "/") +} + +func flattenNestedTagsTagBindingParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedTagsTagBindingTagValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedTagsTagBindingParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedTagsTagBindingTagValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedTagsTagBinding(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["tagBindings"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value tagBindings. Actual value: %v", v) + } + + _, item, err := resourceTagsTagBindingFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceTagsTagBindingFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName := d.Get("name") + expectedFlattenedName := flattenNestedTagsTagBindingName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemName := flattenNestedTagsTagBindingName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_binding_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_binding_sweeper.go new file mode 100644 index 0000000000..437b90d4d8 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_binding_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("TagsTagBinding", testSweepTagsTagBinding) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepTagsTagBinding(region string) error { + resourceName := "TagsTagBinding" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudresourcemanager.googleapis.com/v3/tagBindings", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["tagBindings"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudresourcemanager.googleapis.com/v3/tagBindings/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key.go new file mode 100644 index 0000000000..667394d9b9 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key.go @@ -0,0 +1,495 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceTagsTagKey() *schema.Resource { + return &schema.Resource{ + Create: resourceTagsTagKeyCreate, + Read: resourceTagsTagKeyRead, + Update: resourceTagsTagKeyUpdate, + Delete: resourceTagsTagKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceTagsTagKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `Input only. The resource name of the new TagKey's parent. Must be of the form organizations/{org_id} or projects/{project_id_or_number}.`, + }, + "short_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 63), + Description: `Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. + +The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + Description: `User-assigned description of the TagKey. Must not exceed 256 characters.`, + }, + "purpose": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"GCE_FIREWALL", ""}), + Description: `Optional. A purpose cannot be changed once set. + +A purpose denotes that this Tag is intended for use in policies of a specific policy engine, and will involve that policy engine in management operations involving this Tag. Possible values: ["GCE_FIREWALL"]`, + }, + "purpose_data": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Optional. Purpose data cannot be changed once set. + +Purpose data corresponds to the policy system that the tag is intended for. For example, the GCE_FIREWALL purpose expects data in the following format: 'network = "/"'.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Creation time. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The generated numeric id for the TagKey.`, + }, + "namespaced_name": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Namespaced name of the TagKey.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Update time. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceTagsTagKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentProp, err := expandTagsTagKeyParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + shortNameProp, err := expandTagsTagKeyShortName(d.Get("short_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("short_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(shortNameProp)) && (ok || !reflect.DeepEqual(v, shortNameProp)) { + obj["shortName"] = shortNameProp + } + descriptionProp, err := expandTagsTagKeyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + purposeProp, err := expandTagsTagKeyPurpose(d.Get("purpose"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("purpose"); !tpgresource.IsEmptyValue(reflect.ValueOf(purposeProp)) && (ok || !reflect.DeepEqual(v, purposeProp)) { + obj["purpose"] = purposeProp + } + purposeDataProp, err := expandTagsTagKeyPurposeData(d.Get("purpose_data"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("purpose_data"); !tpgresource.IsEmptyValue(reflect.ValueOf(purposeDataProp)) && (ok || !reflect.DeepEqual(v, purposeDataProp)) { + obj["purposeData"] = purposeDataProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "tagKeys/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagKeys") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TagKey: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TagKey: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "tagKeys/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = TagsOperationWaitTimeWithResponse( + config, res, &opRes, "Creating TagKey", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create TagKey: %s", err) + } + + if err := d.Set("name", flattenTagsTagKeyName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "tagKeys/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating TagKey %q: %#v", d.Id(), res) + + return resourceTagsTagKeyRead(d, meta) +} + +func resourceTagsTagKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsTagKey %q", d.Id())) + } + + if err := d.Set("name", flattenTagsTagKeyName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TagKey: %s", err) + } + if err := d.Set("parent", flattenTagsTagKeyParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading TagKey: %s", err) + } + if err := d.Set("short_name", flattenTagsTagKeyShortName(res["shortName"], d, config)); err != nil { + return fmt.Errorf("Error reading TagKey: %s", err) + } + if err := d.Set("namespaced_name", flattenTagsTagKeyNamespacedName(res["namespacedName"], d, config)); err != nil { + return fmt.Errorf("Error reading TagKey: %s", err) + } + if err := d.Set("description", flattenTagsTagKeyDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading TagKey: %s", err) + } + if err := d.Set("create_time", flattenTagsTagKeyCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading TagKey: %s", err) + } + if err := d.Set("update_time", flattenTagsTagKeyUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading TagKey: %s", err) + } + if err := d.Set("purpose", flattenTagsTagKeyPurpose(res["purpose"], d, config)); err != nil { + return fmt.Errorf("Error reading TagKey: %s", err) + } + + return nil +} + +func resourceTagsTagKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandTagsTagKeyDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "tagKeys/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating TagKey %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating TagKey %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TagKey %q: %#v", d.Id(), res) + } + + err = TagsOperationWaitTime( + config, res, "Updating TagKey", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceTagsTagKeyRead(d, meta) +} + +func resourceTagsTagKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "tagKeys/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TagKey %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TagKey") + } + + err = TagsOperationWaitTime( + config, res, "Deleting TagKey", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TagKey %q: %#v", d.Id(), res) + return nil +} + +func resourceTagsTagKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "tagKeys/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "tagKeys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenTagsTagKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenTagsTagKeyParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagKeyShortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagKeyNamespacedName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagKeyDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagKeyCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagKeyUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagKeyPurpose(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandTagsTagKeyParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTagsTagKeyShortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTagsTagKeyDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTagsTagKeyPurpose(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTagsTagKeyPurposeData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key_sweeper.go new file mode 100644 index 0000000000..828ad2a733 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_key_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("TagsTagKey", testSweepTagsTagKey) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepTagsTagKey(region string) error { + resourceName := "TagsTagKey" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudresourcemanager.googleapis.com/v3/tagKeys", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["tagKeys"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudresourcemanager.googleapis.com/v3/tagKeys/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value.go new file mode 100644 index 0000000000..f4e2d8ce6b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value.go @@ -0,0 +1,440 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceTagsTagValue() *schema.Resource { + return &schema.Resource{ + Create: resourceTagsTagValueCreate, + Read: resourceTagsTagValueRead, + Update: resourceTagsTagValueUpdate, + Delete: resourceTagsTagValueDelete, + + Importer: &schema.ResourceImporter{ + State: resourceTagsTagValueImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Input only. The resource name of the new TagValue's parent. Must be of the form tagKeys/{tag_key_id}.`, + }, + "short_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 63), + Description: `Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. + +The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + Description: `User-assigned description of the TagValue. Must not exceed 256 characters.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Creation time. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The generated numeric id for the TagValue.`, + }, + "namespaced_name": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Namespaced name of the TagValue. Will be in the format {parentNamespace}/{tagKeyShortName}/{shortName}.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Update time. +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceTagsTagValueCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentProp, err := expandTagsTagValueParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + shortNameProp, err := expandTagsTagValueShortName(d.Get("short_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("short_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(shortNameProp)) && (ok || !reflect.DeepEqual(v, shortNameProp)) { + obj["shortName"] = shortNameProp + } + descriptionProp, err := expandTagsTagValueDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "tagValues/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagValues") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new TagValue: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating TagValue: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "tagValues/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = TagsOperationWaitTimeWithResponse( + config, res, &opRes, "Creating TagValue", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create TagValue: %s", err) + } + + if err := d.Set("name", flattenTagsTagValueName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "tagValues/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating TagValue %q: %#v", d.Id(), res) + + return resourceTagsTagValueRead(d, meta) +} + +func resourceTagsTagValueRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsTagValue %q", d.Id())) + } + + if err := d.Set("name", flattenTagsTagValueName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading TagValue: %s", err) + } + if err := d.Set("parent", flattenTagsTagValueParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading TagValue: %s", err) + } + if err := d.Set("short_name", flattenTagsTagValueShortName(res["shortName"], d, config)); err != nil { + return fmt.Errorf("Error reading TagValue: %s", err) + } + if err := d.Set("namespaced_name", flattenTagsTagValueNamespacedName(res["namespacedName"], d, config)); err != nil { + return fmt.Errorf("Error reading TagValue: %s", err) + } + if err := d.Set("description", flattenTagsTagValueDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading TagValue: %s", err) + } + if err := d.Set("create_time", flattenTagsTagValueCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading TagValue: %s", err) + } + if err := d.Set("update_time", flattenTagsTagValueUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading TagValue: %s", err) + } + + return nil +} + +func resourceTagsTagValueUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + descriptionProp, err := expandTagsTagValueDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "tagValues/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating TagValue %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating TagValue %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating TagValue %q: %#v", d.Id(), res) + } + + err = TagsOperationWaitTime( + config, res, "Updating TagValue", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceTagsTagValueRead(d, meta) +} + +func resourceTagsTagValueDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "tagValues/{{parent}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting TagValue %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "TagValue") + } + + err = TagsOperationWaitTime( + config, res, "Deleting TagValue", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting TagValue %q: %#v", d.Id(), res) + return nil +} + +func resourceTagsTagValueImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "tagValues/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "tagValues/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenTagsTagValueName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenTagsTagValueParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagValueShortName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagValueNamespacedName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagValueDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagValueCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTagsTagValueUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandTagsTagValueParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTagsTagValueShortName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTagsTagValueDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value_sweeper.go new file mode 100644 index 0000000000..005d451988 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/resource_tags_tag_value_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("TagsTagValue", testSweepTagsTagValue) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepTagsTagValue(region string) error { + resourceName := "TagsTagValue" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudresourcemanager.googleapis.com/v3/tagValues", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["tagValues"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudresourcemanager.googleapis.com/v3/tagValues/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/tags_location_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/tags_location_operation.go new file mode 100644 index 0000000000..7a17e721ef --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/tags_location_operation.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tags + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type TagsLocationOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *TagsLocationOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + location := GetLocationFromOpName(w.CommonOperationWaiter.Op.Name) + if location != w.CommonOperationWaiter.Op.Name { + // Found location in Op.Name, fill it in TagsLocationBasePath and rewrite URL + url := fmt.Sprintf("%s%s", strings.Replace(w.Config.TagsLocationBasePath, "{{location}}", location, 1), w.CommonOperationWaiter.Op.Name) + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) + } else { + url := fmt.Sprintf("%s%s", w.Config.TagsBasePath, w.CommonOperationWaiter.Op.Name) + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) + } +} + +func createTagsLocationWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*TagsLocationOperationWaiter, error) { + w := &TagsLocationOperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +func TagsLocationOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + w, err := createTagsLocationWaiter(config, op, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func TagsLocationOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createTagsLocationWaiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} + +func GetLocationFromOpName(opName string) string { + re := regexp.MustCompile("operations/(?:rctb|rdtb)\\.([a-zA-Z0-9-]*)\\.([0-9]*)") + switch { + case re.MatchString(opName): + if res := re.FindStringSubmatch(opName); len(res) == 3 && res[1] != "" { + return res[1] + } + } + return opName +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/tags_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/tags_operation.go new file mode 100644 index 0000000000..4e0e99b7aa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tags/tags_operation.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tags + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type TagsOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + tpgresource.CommonOperationWaiter +} + +func (w *TagsOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.TagsBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createTagsWaiter(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string) (*TagsOperationWaiter, error) { + w := &TagsOperationWaiter{ + Config: config, + UserAgent: userAgent, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func TagsOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + w, err := createTagsWaiter(config, op, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func TagsOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createTagsWaiter(config, op, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/data_source_tpu_tensorflow_versions.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/data_source_tpu_tensorflow_versions.go new file mode 100644 index 0000000000..25a23b064b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/data_source_tpu_tensorflow_versions.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpu + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceTpuTensorflowVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTpuTensorFlowVersionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceTpuTensorFlowVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/tensorflowVersions") + if err != nil { + return err + } + + versionsRaw, err := tpgresource.PaginatedListRequest(project, url, userAgent, config, flattenTpuTensorflowVersions) + if err != nil { + return fmt.Errorf("Error listing TPU Tensorflow versions: %s", err) + } + + versions := make([]string, len(versionsRaw)) + for i, ver := range versionsRaw { + versions[i] = ver.(string) + } + sort.Strings(versions) + + log.Printf("[DEBUG] Received Google TPU Tensorflow Versions: %q", versions) + + if err := d.Set("versions", versions); err != nil { + return fmt.Errorf("Error setting versions: %s", err) + } + if err := d.Set("zone", zone); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s", project, zone)) + + return nil +} + +func flattenTpuTensorflowVersions(resp map[string]interface{}) []interface{} { + verObjList := resp["tensorflowVersions"].([]interface{}) + versions := make([]interface{}, len(verObjList)) + for i, v := range verObjList { + verObj := v.(map[string]interface{}) + versions[i] = verObj["version"] + } + return versions +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/resource_tpu_node.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/resource_tpu_node.go new file mode 100644 index 0000000000..ced68cde97 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/resource_tpu_node.go @@ -0,0 +1,745 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tpu + +import ( + "context" + "fmt" + "log" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// compareTpuNodeSchedulingConfig diff suppresses for the default +// scheduling, i.e. if preemptible is false, the API may either return no +// schedulingConfig or an empty schedulingConfig. +func compareTpuNodeSchedulingConfig(k, old, new string, d *schema.ResourceData) bool { + if k == "scheduling_config.0.preemptible" { + return old == "" && new == "false" + } + if k == "scheduling_config.#" { + o, n := d.GetChange("scheduling_config.0.preemptible") + return o.(bool) == n.(bool) + } + return false +} + +func tpuNodeCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + old, new := diff.GetChange("network") + config := meta.(*transport_tpg.Config) + + networkLinkRegex := regexp.MustCompile("projects/(.+)/global/networks/(.+)") + + var pid string + + if networkLinkRegex.MatchString(new.(string)) { + parts := networkLinkRegex.FindStringSubmatch(new.(string)) + pid = parts[1] + } + + if pid == "" { + return nil + } + + project, err := config.NewResourceManagerClient(config.UserAgent).Projects.Get(pid).Do() + if err != nil { + return fmt.Errorf("Failed to retrieve project, pid: %s, err: %s", pid, err) + } + + if networkLinkRegex.MatchString(old.(string)) { + parts := networkLinkRegex.FindStringSubmatch(old.(string)) + i, err := tpgresource.StringToFixed64(parts[1]) + if err == nil { + if project.ProjectNumber == i { + if err := diff.SetNew("network", old); err != nil { + return err + } + return nil + } + } + } + return nil +} + +func ResourceTPUNode() *schema.Resource { + return &schema.Resource{ + Create: resourceTPUNodeCreate, + Read: resourceTPUNodeRead, + Update: resourceTPUNodeUpdate, + Delete: resourceTPUNodeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceTPUNodeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpuNodeCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "accelerator_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The type of hardware accelerators associated with this node.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The immutable name of the TPU.`, + }, + "tensorflow_version": { + Type: schema.TypeString, + Required: true, + Description: `The version of Tensorflow running in the Node.`, + }, + "cidr_block": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The CIDR block that the TPU node will use when selecting an IP +address. This CIDR block must be a /29 block; the Compute Engine +networks API forbids a smaller block, and using a larger block would +be wasteful (a node can only consume one IP address). + +Errors will occur if the CIDR block has already been used for a +currently existing TPU node, the CIDR block conflicts with any +subnetworks in the user's provided network, or the provided network +is peered with another network that is using that CIDR block.`, + ConflictsWith: []string{"use_service_networking"}, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The user-supplied description of the TPU. Maximum of 512 characters.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Resource labels to represent user provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of a network to peer the TPU node to. It must be a +preexisting Compute Engine network inside of the project on which +this API has been activated. If none is provided, "default" will be +used.`, + }, + "scheduling_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareTpuNodeSchedulingConfig, + Description: `Sets the scheduling options for this TPU instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preemptible": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareTpuNodeSchedulingConfig, + Description: `Defines whether the TPU instance is preemptible.`, + }, + }, + }, + }, + "use_service_networking": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether the VPC peering for the node is set up through Service Networking API. +The VPC Peering should be set up before provisioning the node. If this field is set, +cidr_block field should not be specified. If the network that you want to peer the +TPU Node to is a Shared VPC network, the node must be created with this this field enabled.`, + Default: false, + ConflictsWith: []string{"cidr_block"}, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The GCP location for the TPU. If it is not provided, the provider zone is used.`, + }, + "network_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `The network endpoints where TPU workers can be accessed and sent work. +It is recommended that Tensorflow clients of the node first reach out +to the first (index 0) entry.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address of this network endpoint.`, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + Description: `The port of this network endpoint.`, + }, + }, + }, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: `The service account used to run the tensor flow services within the +node. To share resources, including Google Cloud Storage data, with +the Tensorflow job running in the Node, this account must have +permissions to that data.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceTPUNodeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandTPUNodeName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandTPUNodeDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + acceleratorTypeProp, err := expandTPUNodeAcceleratorType(d.Get("accelerator_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("accelerator_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(acceleratorTypeProp)) && (ok || !reflect.DeepEqual(v, acceleratorTypeProp)) { + obj["acceleratorType"] = acceleratorTypeProp + } + tensorflowVersionProp, err := expandTPUNodeTensorflowVersion(d.Get("tensorflow_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tensorflow_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(tensorflowVersionProp)) && (ok || !reflect.DeepEqual(v, tensorflowVersionProp)) { + obj["tensorflowVersion"] = tensorflowVersionProp + } + networkProp, err := expandTPUNodeNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + cidrBlockProp, err := expandTPUNodeCidrBlock(d.Get("cidr_block"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cidr_block"); !tpgresource.IsEmptyValue(reflect.ValueOf(cidrBlockProp)) && (ok || !reflect.DeepEqual(v, cidrBlockProp)) { + obj["cidrBlock"] = cidrBlockProp + } + useServiceNetworkingProp, err := expandTPUNodeUseServiceNetworking(d.Get("use_service_networking"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("use_service_networking"); !tpgresource.IsEmptyValue(reflect.ValueOf(useServiceNetworkingProp)) && (ok || !reflect.DeepEqual(v, useServiceNetworkingProp)) { + obj["useServiceNetworking"] = useServiceNetworkingProp + } + schedulingConfigProp, err := expandTPUNodeSchedulingConfig(d.Get("scheduling_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scheduling_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(schedulingConfigProp)) && (ok || !reflect.DeepEqual(v, schedulingConfigProp)) { + obj["schedulingConfig"] = schedulingConfigProp + } + labelsProp, err := expandTPUNodeLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes?nodeId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Node: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Node: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Node: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = TPUOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Node", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Node: %s", err) + } + + if err := d.Set("name", flattenTPUNodeName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Node %q: %#v", d.Id(), res) + + return resourceTPUNodeRead(d, meta) +} + +func resourceTPUNodeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Node: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TPUNode %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + + if err := d.Set("name", flattenTPUNodeName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("description", flattenTPUNodeDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("accelerator_type", flattenTPUNodeAcceleratorType(res["acceleratorType"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("tensorflow_version", flattenTPUNodeTensorflowVersion(res["tensorflowVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("network", flattenTPUNodeNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("cidr_block", flattenTPUNodeCidrBlock(res["cidrBlock"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("service_account", flattenTPUNodeServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("use_service_networking", flattenTPUNodeUseServiceNetworking(res["useServiceNetworking"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("scheduling_config", flattenTPUNodeSchedulingConfig(res["schedulingConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("network_endpoints", flattenTPUNodeNetworkEndpoints(res["networkEndpoints"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + if err := d.Set("labels", flattenTPUNodeLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Node: %s", err) + } + + return nil +} + +func resourceTPUNodeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Node: %s", err) + } + billingProject = project + + d.Partial(true) + + if d.HasChange("tensorflow_version") { + obj := make(map[string]interface{}) + + tensorflowVersionProp, err := expandTPUNodeTensorflowVersion(d.Get("tensorflow_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tensorflow_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, tensorflowVersionProp)) { + obj["tensorflowVersion"] = tensorflowVersionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}:reimage") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating Node %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Node %q: %#v", d.Id(), res) + } + + err = TPUOperationWaitTime( + config, res, project, "Updating Node", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceTPUNodeRead(d, meta) +} + +func resourceTPUNodeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Node: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Node %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Node") + } + + err = TPUOperationWaitTime( + config, res, project, "Deleting Node", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Node %q: %#v", d.Id(), res) + return nil +} + +func resourceTPUNodeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/nodes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenTPUNodeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenTPUNodeDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeTensorflowVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeCidrBlock(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeUseServiceNetworking(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeSchedulingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["preemptible"] = + flattenTPUNodeSchedulingConfigPreemptible(original["preemptible"], d, config) + return []interface{}{transformed} +} +func flattenTPUNodeSchedulingConfigPreemptible(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeNetworkEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ip_address": flattenTPUNodeNetworkEndpointsIpAddress(original["ipAddress"], d, config), + "port": flattenTPUNodeNetworkEndpointsPort(original["port"], d, config), + }) + } + return transformed +} +func flattenTPUNodeNetworkEndpointsIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenTPUNodeNetworkEndpointsPort(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenTPUNodeLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandTPUNodeName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTPUNodeDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTPUNodeAcceleratorType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTPUNodeTensorflowVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTPUNodeNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTPUNodeCidrBlock(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTPUNodeUseServiceNetworking(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTPUNodeSchedulingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPreemptible, err := expandTPUNodeSchedulingConfigPreemptible(original["preemptible"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPreemptible); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["preemptible"] = transformedPreemptible + } + + return transformed, nil +} + +func expandTPUNodeSchedulingConfigPreemptible(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandTPUNodeLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/resource_tpu_node_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/resource_tpu_node_sweeper.go new file mode 100644 index 0000000000..413a1554c7 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/resource_tpu_node_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tpu + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("TPUNode", testSweepTPUNode) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepTPUNode(region string) error { + resourceName := "TPUNode" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://tpu.googleapis.com/v1/projects/{{project}}/locations/{{zone}}/nodes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["nodes"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://tpu.googleapis.com/v1/projects/{{project}}/locations/{{zone}}/nodes/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/tpu_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/tpu_operation.go new file mode 100644 index 0000000000..43660bedf2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/tpu/tpu_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package tpu + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type TPUOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *TPUOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.TPUBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createTPUWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*TPUOperationWaiter, error) { + w := &TPUOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func TPUOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createTPUWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func TPUOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createTPUWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/data_source_vertex_ai_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/data_source_vertex_ai_index.go new file mode 100644 index 0000000000..2ce6cbd147 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/data_source_vertex_ai_index.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package vertexai + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceVertexAIIndex() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceVertexAIIndex().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name", "region") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceVertexAIIndexRead, + Schema: dsSchema, + } +} + +func dataSourceVertexAIIndexRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceVertexAIIndexRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_dataset.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_dataset.go new file mode 100644 index 0000000000..163554f3aa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_dataset.go @@ -0,0 +1,491 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceVertexAIDataset() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAIDatasetCreate, + Read: resourceVertexAIDatasetRead, + Update: resourceVertexAIDatasetUpdate, + Delete: resourceVertexAIDatasetDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The user-defined name of the Dataset. The name can be up to 128 characters long and can be consist of any UTF-8 characters.`, + }, + "metadata_schema_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/.`, + }, + "encryption_spec": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. +Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `A set of key/value label pairs to assign to this Workflow.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The region of the dataset. eg us-central1`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the dataset was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the Dataset. This value is set by Google.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the dataset was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAIDatasetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAIDatasetDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandVertexAIDatasetLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + encryptionSpecProp, err := expandVertexAIDatasetEncryptionSpec(d.Get("encryption_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { + obj["encryptionSpec"] = encryptionSpecProp + } + metadataSchemaUriProp, err := expandVertexAIDatasetMetadataSchemaUri(d.Get("metadata_schema_uri"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata_schema_uri"); !tpgresource.IsEmptyValue(reflect.ValueOf(metadataSchemaUriProp)) && (ok || !reflect.DeepEqual(v, metadataSchemaUriProp)) { + obj["metadataSchemaUri"] = metadataSchemaUriProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/datasets") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Dataset: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Dataset: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Dataset", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Dataset: %s", err) + } + + if err := d.Set("name", flattenVertexAIDatasetName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) + + return resourceVertexAIDatasetRead(d, meta) +} + +func resourceVertexAIDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIDataset %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + + if err := d.Set("name", flattenVertexAIDatasetName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("display_name", flattenVertexAIDatasetDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("create_time", flattenVertexAIDatasetCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("update_time", flattenVertexAIDatasetUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("labels", flattenVertexAIDatasetLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("encryption_spec", flattenVertexAIDatasetEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + if err := d.Set("metadata_schema_uri", flattenVertexAIDatasetMetadataSchemaUri(res["metadataSchemaUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } + + return nil +} + +func resourceVertexAIDatasetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAIDatasetDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandVertexAIDatasetLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) + } + + return resourceVertexAIDatasetRead(d, meta) +} + +func resourceVertexAIDatasetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Dataset: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Dataset") + } + + err = VertexAIOperationWaitTime( + config, res, project, "Deleting Dataset", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) + return nil +} + +func flattenVertexAIDatasetName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIDatasetDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIDatasetCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIDatasetUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIDatasetLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIDatasetEncryptionSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenVertexAIDatasetEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIDatasetEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIDatasetMetadataSchemaUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVertexAIDatasetDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIDatasetLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandVertexAIDatasetEncryptionSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandVertexAIDatasetEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandVertexAIDatasetEncryptionSpecKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIDatasetMetadataSchemaUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_endpoint.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_endpoint.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_endpoint.go index 41097ec19b..4af6a85da0 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_endpoint.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_endpoint.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package vertexai import ( "fmt" @@ -22,6 +25,9 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceVertexAIEndpoint() *schema.Resource { @@ -93,6 +99,12 @@ func ResourceVertexAIEndpoint() *schema.Resource { ForceNew: true, Description: `The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): 'projects/{project}/global/networks/{network}'. Where '{project}' is a project number, as in '12345', and '{network}' is network name.`, }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region for the resource`, + }, "create_time": { Type: schema.TypeString, Computed: true, @@ -289,8 +301,8 @@ func ResourceVertexAIEndpoint() *schema.Resource { } func resourceVertexAIEndpointCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -299,35 +311,35 @@ func resourceVertexAIEndpointCreate(d *schema.ResourceData, meta interface{}) er displayNameProp, err := expandVertexAIEndpointDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } descriptionProp, err := expandVertexAIEndpointDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandVertexAIEndpointLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } encryptionSpecProp, err := expandVertexAIEndpointEncryptionSpec(d.Get("encryption_spec"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("encryption_spec"); !isEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { + } else if v, ok := d.GetOkExists("encryption_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { obj["encryptionSpec"] = encryptionSpecProp } networkProp, err := expandVertexAIEndpointNetwork(d.Get("network"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { obj["network"] = networkProp } - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints?endpointId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints?endpointId={{name}}") if err != nil { return err } @@ -335,24 +347,32 @@ func resourceVertexAIEndpointCreate(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Creating new Endpoint: %#v", obj) billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Endpoint: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating Endpoint: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -372,7 +392,7 @@ func resourceVertexAIEndpointCreate(d *schema.ResourceData, meta interface{}) er } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -384,33 +404,39 @@ func resourceVertexAIEndpointCreate(d *schema.ResourceData, meta interface{}) er } func resourceVertexAIEndpointRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Endpoint: %s", err) } billingProject = project // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VertexAIEndpoint %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIEndpoint %q", d.Id())) } if err := d.Set("project", project); err != nil { @@ -449,15 +475,15 @@ func resourceVertexAIEndpointRead(d *schema.ResourceData, meta interface{}) erro } func resourceVertexAIEndpointUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Endpoint: %s", err) } @@ -467,23 +493,23 @@ func resourceVertexAIEndpointUpdate(d *schema.ResourceData, meta interface{}) er displayNameProp, err := expandVertexAIEndpointDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } descriptionProp, err := expandVertexAIEndpointDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandVertexAIEndpointLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") if err != nil { return err } @@ -502,19 +528,27 @@ func resourceVertexAIEndpointUpdate(d *schema.ResourceData, meta interface{}) er if d.HasChange("labels") { updateMask = append(updateMask, "labels") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating Endpoint %q: %s", d.Id(), err) @@ -526,21 +560,21 @@ func resourceVertexAIEndpointUpdate(d *schema.ResourceData, meta interface{}) er } func resourceVertexAIEndpointDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - project, err := getProject(d, config) + project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for Endpoint: %s", err) } billingProject = project - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") if err != nil { return err } @@ -549,13 +583,21 @@ func resourceVertexAIEndpointDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Deleting Endpoint %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "Endpoint") + return transport_tpg.HandleNotFoundError(err, d, "Endpoint") } err = VertexAIOperationWaitTime( @@ -571,8 +613,8 @@ func resourceVertexAIEndpointDelete(d *schema.ResourceData, meta interface{}) er } func resourceVertexAIEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "projects/(?P[^/]+)/locations/(?P[^/]+)/endpoints/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", @@ -581,7 +623,7 @@ func resourceVertexAIEndpointImport(d *schema.ResourceData, meta interface{}) ([ } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -590,15 +632,15 @@ func resourceVertexAIEndpointImport(d *schema.ResourceData, meta interface{}) ([ return []*schema.ResourceData{d}, nil } -func flattenVertexAIEndpointDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -627,7 +669,7 @@ func flattenVertexAIEndpointDeployedModels(v interface{}, d *schema.ResourceData } return transformed } -func flattenVertexAIEndpointDeployedModelsDedicatedResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -646,7 +688,7 @@ func flattenVertexAIEndpointDeployedModelsDedicatedResources(v interface{}, d *s flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs(original["autoscalingMetricSpecs"], d, config) return []interface{}{transformed} } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -663,18 +705,18 @@ func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpec(v interf flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorCount(original["acceleratorCount"], d, config) return []interface{}{transformed} } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecMachineType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -688,10 +730,10 @@ func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAccelerat return v // let terraform core handle it otherwise } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -705,10 +747,10 @@ func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMinReplicaCount(v in return v // let terraform core handle it otherwise } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -722,7 +764,7 @@ func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMaxReplicaCount(v in return v // let terraform core handle it otherwise } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v } @@ -741,14 +783,14 @@ func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpe } return transformed } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsMetricName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsMetricName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsTarget(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -762,7 +804,7 @@ func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpe return v // let terraform core handle it otherwise } -func flattenVertexAIEndpointDeployedModelsAutomaticResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsAutomaticResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -777,10 +819,10 @@ func flattenVertexAIEndpointDeployedModelsAutomaticResources(v interface{}, d *s flattenVertexAIEndpointDeployedModelsAutomaticResourcesMaxReplicaCount(original["maxReplicaCount"], d, config) return []interface{}{transformed} } -func flattenVertexAIEndpointDeployedModelsAutomaticResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsAutomaticResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -794,10 +836,10 @@ func flattenVertexAIEndpointDeployedModelsAutomaticResourcesMinReplicaCount(v in return v // let terraform core handle it otherwise } -func flattenVertexAIEndpointDeployedModelsAutomaticResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsAutomaticResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -811,35 +853,35 @@ func flattenVertexAIEndpointDeployedModelsAutomaticResourcesMaxReplicaCount(v in return v // let terraform core handle it otherwise } -func flattenVertexAIEndpointDeployedModelsId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsModel(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsModel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsModelVersionId(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsModelVersionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsEnableAccessLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsEnableAccessLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsPrivateEndpoints(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsPrivateEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -858,43 +900,43 @@ func flattenVertexAIEndpointDeployedModelsPrivateEndpoints(v interface{}, d *sch flattenVertexAIEndpointDeployedModelsPrivateEndpointsServiceAttachment(original["serviceAttachment"], d, config) return []interface{}{transformed} } -func flattenVertexAIEndpointDeployedModelsPrivateEndpointsPredictHttpUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsPrivateEndpointsPredictHttpUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsPrivateEndpointsExplainHttpUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsPrivateEndpointsExplainHttpUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsPrivateEndpointsHealthHttpUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsPrivateEndpointsHealthHttpUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsPrivateEndpointsServiceAttachment(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsPrivateEndpointsServiceAttachment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsSharedResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsSharedResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointDeployedModelsEnableContainerLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointDeployedModelsEnableContainerLogging(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointEncryptionSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointEncryptionSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -907,27 +949,27 @@ func flattenVertexAIEndpointEncryptionSpec(v interface{}, d *schema.ResourceData flattenVertexAIEndpointEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) return []interface{}{transformed} } -func flattenVertexAIEndpointEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIEndpointModelDeploymentMonitoringJob(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIEndpointModelDeploymentMonitoringJob(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandVertexAIEndpointDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIEndpointDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIEndpointDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIEndpointDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIEndpointLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandVertexAIEndpointLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -938,7 +980,7 @@ func expandVertexAIEndpointLabels(v interface{}, d TerraformResourceData, config return m, nil } -func expandVertexAIEndpointEncryptionSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIEndpointEncryptionSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -950,17 +992,17 @@ func expandVertexAIEndpointEncryptionSpec(v interface{}, d TerraformResourceData transformedKmsKeyName, err := expandVertexAIEndpointEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["kmsKeyName"] = transformedKmsKeyName } return transformed, nil } -func expandVertexAIEndpointEncryptionSpecKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIEndpointEncryptionSpecKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIEndpointNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIEndpointNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_endpoint_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_endpoint_sweeper.go new file mode 100644 index 0000000000..3c72505dca --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_endpoint_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("VertexAIEndpoint", testSweepVertexAIEndpoint) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVertexAIEndpoint(region string) error { + resourceName := "VertexAIEndpoint" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{region}}-aiplatform.googleapis.com/v1/projects/{{project}}/locations/{{location}}/endpoints", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["endpoints"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{region}}-aiplatform.googleapis.com/v1/projects/{{project}}/locations/{{location}}/endpoints/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore.go new file mode 100644 index 0000000000..05687e4f88 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore.go @@ -0,0 +1,695 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceVertexAIFeaturestore() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAIFeaturestoreCreate, + Read: resourceVertexAIFeaturestoreRead, + Update: resourceVertexAIFeaturestoreUpdate, + Delete: resourceVertexAIFeaturestoreDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVertexAIFeaturestoreImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "encryption_spec": { + Type: schema.TypeList, + Optional: true, + Description: `If set, both of the online and offline data storage will be secured by this key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the compute resource is created.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `A set of key/value label pairs to assign to this Featurestore.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the Featurestore. This value may be up to 60 characters, and valid characters are [a-z0-9_]. The first character cannot be a number.`, + }, + "online_serving_config": { + Type: schema.TypeList, + Optional: true, + Description: `Config for online serving resources.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed_node_count": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of nodes for each cluster. The number of nodes will not scale automatically but can be scaled manually by providing different values when updating.`, + ExactlyOneOf: []string{"online_serving_config.0.fixed_node_count", "online_serving_config.0.scaling"}, + }, + "scaling": { + Type: schema.TypeList, + Optional: true, + Description: `Online serving scaling configuration. Only one of fixedNodeCount and scaling can be set. Setting one will reset the other.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_node_count": { + Type: schema.TypeInt, + Required: true, + Description: `The maximum number of nodes to scale up to. Must be greater than minNodeCount, and less than or equal to 10 times of 'minNodeCount'.`, + }, + "min_node_count": { + Type: schema.TypeInt, + Required: true, + Description: `The minimum number of nodes to scale down to. Must be greater than or equal to 1.`, + }, + }, + }, + ExactlyOneOf: []string{"online_serving_config.0.fixed_node_count", "online_serving_config.0.scaling"}, + }, + }, + }, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The region of the dataset. eg us-central1`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the featurestore was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `Used to perform consistent read-modify-write updates.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the featurestore was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to true, any EntityTypes and Features for this Featurestore will also be deleted`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAIFeaturestoreCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandVertexAIFeaturestoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + onlineServingConfigProp, err := expandVertexAIFeaturestoreOnlineServingConfig(d.Get("online_serving_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("online_serving_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(onlineServingConfigProp)) && (ok || !reflect.DeepEqual(v, onlineServingConfigProp)) { + obj["onlineServingConfig"] = onlineServingConfigProp + } + encryptionSpecProp, err := expandVertexAIFeaturestoreEncryptionSpec(d.Get("encryption_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { + obj["encryptionSpec"] = encryptionSpecProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/featurestores?featurestoreId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Featurestore: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Featurestore: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Featurestore: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/featurestores/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Featurestore", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Featurestore: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/featurestores/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Featurestore %q: %#v", d.Id(), res) + + return resourceVertexAIFeaturestoreRead(d, meta) +} + +func resourceVertexAIFeaturestoreRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/featurestores/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Featurestore: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIFeaturestore %q", d.Id())) + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("force_destroy"); !ok { + if err := d.Set("force_destroy", false); err != nil { + return fmt.Errorf("Error setting force_destroy: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Featurestore: %s", err) + } + + if err := d.Set("create_time", flattenVertexAIFeaturestoreCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Featurestore: %s", err) + } + if err := d.Set("update_time", flattenVertexAIFeaturestoreUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Featurestore: %s", err) + } + if err := d.Set("labels", flattenVertexAIFeaturestoreLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Featurestore: %s", err) + } + if err := d.Set("online_serving_config", flattenVertexAIFeaturestoreOnlineServingConfig(res["onlineServingConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Featurestore: %s", err) + } + if err := d.Set("encryption_spec", flattenVertexAIFeaturestoreEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading Featurestore: %s", err) + } + + return nil +} + +func resourceVertexAIFeaturestoreUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Featurestore: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandVertexAIFeaturestoreLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + onlineServingConfigProp, err := expandVertexAIFeaturestoreOnlineServingConfig(d.Get("online_serving_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("online_serving_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, onlineServingConfigProp)) { + obj["onlineServingConfig"] = onlineServingConfigProp + } + encryptionSpecProp, err := expandVertexAIFeaturestoreEncryptionSpec(d.Get("encryption_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { + obj["encryptionSpec"] = encryptionSpecProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/featurestores/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Featurestore %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("online_serving_config") { + updateMask = append(updateMask, "onlineServingConfig") + } + + if d.HasChange("encryption_spec") { + updateMask = append(updateMask, "encryptionSpec") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Featurestore %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Featurestore %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating Featurestore", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceVertexAIFeaturestoreRead(d, meta) +} + +func resourceVertexAIFeaturestoreDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Featurestore: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/featurestores/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + if v, ok := d.GetOk("force_destroy"); ok { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"force": fmt.Sprintf("%v", v)}) + if err != nil { + return err + } + } + log.Printf("[DEBUG] Deleting Featurestore %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Featurestore") + } + + err = VertexAIOperationWaitTime( + config, res, project, "Deleting Featurestore", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Featurestore %q: %#v", d.Id(), res) + return nil +} + +func resourceVertexAIFeaturestoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/featurestores/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/featurestores/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("force_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting force_destroy: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenVertexAIFeaturestoreCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeaturestoreUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeaturestoreLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeaturestoreOnlineServingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["fixed_node_count"] = + flattenVertexAIFeaturestoreOnlineServingConfigFixedNodeCount(original["fixedNodeCount"], d, config) + transformed["scaling"] = + flattenVertexAIFeaturestoreOnlineServingConfigScaling(original["scaling"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIFeaturestoreOnlineServingConfigFixedNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIFeaturestoreOnlineServingConfigScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_node_count"] = + flattenVertexAIFeaturestoreOnlineServingConfigScalingMinNodeCount(original["minNodeCount"], d, config) + transformed["max_node_count"] = + flattenVertexAIFeaturestoreOnlineServingConfigScalingMaxNodeCount(original["maxNodeCount"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIFeaturestoreOnlineServingConfigScalingMinNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIFeaturestoreOnlineServingConfigScalingMaxNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIFeaturestoreEncryptionSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenVertexAIFeaturestoreEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIFeaturestoreEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVertexAIFeaturestoreLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandVertexAIFeaturestoreOnlineServingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFixedNodeCount, err := expandVertexAIFeaturestoreOnlineServingConfigFixedNodeCount(original["fixed_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFixedNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fixedNodeCount"] = transformedFixedNodeCount + } + + transformedScaling, err := expandVertexAIFeaturestoreOnlineServingConfigScaling(original["scaling"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaling); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scaling"] = transformedScaling + } + + return transformed, nil +} + +func expandVertexAIFeaturestoreOnlineServingConfigFixedNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIFeaturestoreOnlineServingConfigScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinNodeCount, err := expandVertexAIFeaturestoreOnlineServingConfigScalingMinNodeCount(original["min_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minNodeCount"] = transformedMinNodeCount + } + + transformedMaxNodeCount, err := expandVertexAIFeaturestoreOnlineServingConfigScalingMaxNodeCount(original["max_node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxNodeCount"] = transformedMaxNodeCount + } + + return transformed, nil +} + +func expandVertexAIFeaturestoreOnlineServingConfigScalingMinNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIFeaturestoreOnlineServingConfigScalingMaxNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIFeaturestoreEncryptionSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandVertexAIFeaturestoreEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandVertexAIFeaturestoreEncryptionSpecKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore_entitytype.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore_entitytype.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore_entitytype.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore_entitytype.go index 8ba38f288e..601508aaee 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_vertex_ai_featurestore_entitytype.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore_entitytype.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: MMv1 *** @@ -12,7 +15,7 @@ // // ---------------------------------------------------------------------------- -package google +package vertexai import ( "fmt" @@ -23,6 +26,9 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func ResourceVertexAIFeaturestoreEntitytype() *schema.Resource { @@ -190,8 +196,8 @@ If both FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days a func resourceVertexAIFeaturestoreEntitytypeCreate(d *schema.ResourceData, meta interface{}) error { var project string - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -200,19 +206,19 @@ func resourceVertexAIFeaturestoreEntitytypeCreate(d *schema.ResourceData, meta i descriptionProp, err := expandVertexAIFeaturestoreEntitytypeDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandVertexAIFeaturestoreEntitytypeLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } monitoringConfigProp, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfig(d.Get("monitoring_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("monitoring_config"); !isEmptyValue(reflect.ValueOf(monitoringConfigProp)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { + } else if v, ok := d.GetOkExists("monitoring_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(monitoringConfigProp)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { obj["monitoringConfig"] = monitoringConfigProp } @@ -221,7 +227,7 @@ func resourceVertexAIFeaturestoreEntitytypeCreate(d *schema.ResourceData, meta i return err } - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{featurestore}}/entityTypes?entityTypeId={{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{featurestore}}/entityTypes?entityTypeId={{name}}") if err != nil { return err } @@ -230,7 +236,7 @@ func resourceVertexAIFeaturestoreEntitytypeCreate(d *schema.ResourceData, meta i billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } @@ -243,13 +249,21 @@ func resourceVertexAIFeaturestoreEntitytypeCreate(d *schema.ResourceData, meta i } } } - res, err := SendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) if err != nil { return fmt.Errorf("Error creating FeaturestoreEntitytype: %s", err) } // Store the ID now - id, err := replaceVars(d, config, "{{featurestore}}/entityTypes/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{featurestore}}/entityTypes/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -269,7 +283,7 @@ func resourceVertexAIFeaturestoreEntitytypeCreate(d *schema.ResourceData, meta i } // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "{{featurestore}}/entityTypes/{{name}}") + id, err = tpgresource.ReplaceVars(d, config, "{{featurestore}}/entityTypes/{{name}}") if err != nil { return fmt.Errorf("Error constructing id: %s", err) } @@ -281,13 +295,13 @@ func resourceVertexAIFeaturestoreEntitytypeCreate(d *schema.ResourceData, meta i } func resourceVertexAIFeaturestoreEntitytypeRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{featurestore}}/entityTypes/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{featurestore}}/entityTypes/{{name}}") if err != nil { return err } @@ -295,13 +309,19 @@ func resourceVertexAIFeaturestoreEntitytypeRead(d *schema.ResourceData, meta int billingProject := "" // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequest(config, "GET", billingProject, url, userAgent, nil) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("VertexAIFeaturestoreEntitytype %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIFeaturestoreEntitytype %q", d.Id())) } if err := d.Set("description", flattenVertexAIFeaturestoreEntitytypeDescription(res["description"], d, config)); err != nil { @@ -324,8 +344,8 @@ func resourceVertexAIFeaturestoreEntitytypeRead(d *schema.ResourceData, meta int } func resourceVertexAIFeaturestoreEntitytypeUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } @@ -336,19 +356,19 @@ func resourceVertexAIFeaturestoreEntitytypeUpdate(d *schema.ResourceData, meta i descriptionProp, err := expandVertexAIFeaturestoreEntitytypeDescription(d.Get("description"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } labelsProp, err := expandVertexAIFeaturestoreEntitytypeLabels(d.Get("labels"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { obj["labels"] = labelsProp } monitoringConfigProp, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfig(d.Get("monitoring_config"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("monitoring_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { + } else if v, ok := d.GetOkExists("monitoring_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { obj["monitoringConfig"] = monitoringConfigProp } @@ -357,7 +377,7 @@ func resourceVertexAIFeaturestoreEntitytypeUpdate(d *schema.ResourceData, meta i return err } - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{featurestore}}/entityTypes/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{featurestore}}/entityTypes/{{name}}") if err != nil { return err } @@ -376,19 +396,27 @@ func resourceVertexAIFeaturestoreEntitytypeUpdate(d *schema.ResourceData, meta i if d.HasChange("monitoring_config") { updateMask = append(updateMask, "monitoringConfig") } - // updateMask is a URL parameter but not present in the schema, so replaceVars + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) if err != nil { return err } // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) if err != nil { return fmt.Errorf("Error updating FeaturestoreEntitytype %q: %s", d.Id(), err) @@ -401,15 +429,15 @@ func resourceVertexAIFeaturestoreEntitytypeUpdate(d *schema.ResourceData, meta i func resourceVertexAIFeaturestoreEntitytypeDelete(d *schema.ResourceData, meta interface{}) error { var project string - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err } billingProject := "" - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{featurestore}}/entityTypes/{{name}}") + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{featurestore}}/entityTypes/{{name}}") if err != nil { return err } @@ -427,13 +455,21 @@ func resourceVertexAIFeaturestoreEntitytypeDelete(d *schema.ResourceData, meta i log.Printf("[DEBUG] Deleting FeaturestoreEntitytype %q", d.Id()) // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } - res, err := SendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) if err != nil { - return handleNotFoundError(err, d, "FeaturestoreEntitytype") + return transport_tpg.HandleNotFoundError(err, d, "FeaturestoreEntitytype") } err = VertexAIOperationWaitTime( @@ -449,15 +485,15 @@ func resourceVertexAIFeaturestoreEntitytypeDelete(d *schema.ResourceData, meta i } func resourceVertexAIFeaturestoreEntitytypeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ "(?P.+)/entityTypes/(?P[^/]+)", }, d, config); err != nil { return nil, err } // Replace import id for the resource id - id, err := replaceVars(d, config, "{{featurestore}}/entityTypes/{{name}}") + id, err := tpgresource.ReplaceVars(d, config, "{{featurestore}}/entityTypes/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -473,23 +509,23 @@ func resourceVertexAIFeaturestoreEntitytypeImport(d *schema.ResourceData, meta i return []*schema.ResourceData{d}, nil } -func flattenVertexAIFeaturestoreEntitytypeDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIFeaturestoreEntitytypeCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIFeaturestoreEntitytypeUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIFeaturestoreEntitytypeLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -508,7 +544,7 @@ func flattenVertexAIFeaturestoreEntitytypeMonitoringConfig(v interface{}, d *sch flattenVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfig(original["categoricalThresholdConfig"], d, config) return []interface{}{transformed} } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -525,14 +561,14 @@ func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(v int flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisStalenessDays(original["stalenessDays"], d, config) return []interface{}{transformed} } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisMonitoringIntervalDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisMonitoringIntervalDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -546,10 +582,10 @@ func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisMonito return v // let terraform core handle it otherwise } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisStalenessDays(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisStalenessDays(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { - if intVal, err := StringToFixed64(strVal); err == nil { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } } @@ -563,7 +599,7 @@ func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisStalen return v // let terraform core handle it otherwise } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysis(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysis(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -578,15 +614,15 @@ func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysis flattenVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisAnomalyDetectionBaseline(original["anomalyDetectionBaseline"], d, config) return []interface{}{transformed} } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisState(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisAnomalyDetectionBaseline(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisAnomalyDetectionBaseline(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -599,11 +635,11 @@ func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConf flattenVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfigValue(original["value"], d, config) return []interface{}{transformed} } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfigValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfigValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil } @@ -616,15 +652,15 @@ func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdCo flattenVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfigValue(original["value"], d, config) return []interface{}{transformed} } -func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfigValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { +func flattenVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfigValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func expandVertexAIFeaturestoreEntitytypeDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIFeaturestoreEntitytypeLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { +func expandVertexAIFeaturestoreEntitytypeLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } @@ -635,7 +671,7 @@ func expandVertexAIFeaturestoreEntitytypeLabels(v interface{}, d TerraformResour return m, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -647,35 +683,35 @@ func expandVertexAIFeaturestoreEntitytypeMonitoringConfig(v interface{}, d Terra transformedSnapshotAnalysis, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(original["snapshot_analysis"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSnapshotAnalysis); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedSnapshotAnalysis); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["snapshotAnalysis"] = transformedSnapshotAnalysis } transformedImportFeaturesAnalysis, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysis(original["import_features_analysis"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedImportFeaturesAnalysis); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedImportFeaturesAnalysis); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["importFeaturesAnalysis"] = transformedImportFeaturesAnalysis } transformedNumericalThresholdConfig, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfig(original["numerical_threshold_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedNumericalThresholdConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedNumericalThresholdConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["numericalThresholdConfig"] = transformedNumericalThresholdConfig } transformedCategoricalThresholdConfig, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfig(original["categorical_threshold_config"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCategoricalThresholdConfig); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedCategoricalThresholdConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["categoricalThresholdConfig"] = transformedCategoricalThresholdConfig } return transformed, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -687,40 +723,40 @@ func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(v inte transformedDisabled, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(original["disabled"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["disabled"] = transformedDisabled } transformedMonitoringIntervalDays, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisMonitoringIntervalDays(original["monitoring_interval_days"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMonitoringIntervalDays); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedMonitoringIntervalDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["monitoringIntervalDays"] = transformedMonitoringIntervalDays } transformedStalenessDays, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisStalenessDays(original["staleness_days"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedStalenessDays); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedStalenessDays); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["stalenessDays"] = transformedStalenessDays } return transformed, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisMonitoringIntervalDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisMonitoringIntervalDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisStalenessDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisStalenessDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysis(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysis(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -732,29 +768,29 @@ func expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysis( transformedState, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisState(original["state"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedState); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["state"] = transformedState } transformedAnomalyDetectionBaseline, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisAnomalyDetectionBaseline(original["anomaly_detection_baseline"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedAnomalyDetectionBaseline); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedAnomalyDetectionBaseline); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["anomalyDetectionBaseline"] = transformedAnomalyDetectionBaseline } return transformed, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisAnomalyDetectionBaseline(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigImportFeaturesAnalysisAnomalyDetectionBaseline(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -766,18 +802,18 @@ func expandVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfi transformedValue, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfigValue(original["value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["value"] = transformedValue } return transformed, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfigValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigNumericalThresholdConfigValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -789,14 +825,14 @@ func expandVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdCon transformedValue, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfigValue(original["value"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { transformed["value"] = transformedValue } return transformed, nil } -func expandVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfigValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandVertexAIFeaturestoreEntitytypeMonitoringConfigCategoricalThresholdConfigValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore_entitytype_feature.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore_entitytype_feature.go new file mode 100644 index 0000000000..05e7f6c490 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_featurestore_entitytype_feature.go @@ -0,0 +1,459 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceVertexAIFeaturestoreEntitytypeFeature() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAIFeaturestoreEntitytypeFeatureCreate, + Read: resourceVertexAIFeaturestoreEntitytypeFeatureRead, + Update: resourceVertexAIFeaturestoreEntitytypeFeatureUpdate, + Delete: resourceVertexAIFeaturestoreEntitytypeFeatureDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVertexAIFeaturestoreEntitytypeFeatureImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "entitytype": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the Featurestore to use, in the format projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entitytype}.`, + }, + "value_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Type of Feature value. Immutable. https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.featurestores.entityTypes.features#ValueType`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the feature.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `A set of key/value label pairs to assign to the feature.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the feature. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the entity type was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `Used to perform consistent read-modify-write updates.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp when the entity type was most recently updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Description: "The region of the feature", + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAIFeaturestoreEntitytypeFeatureCreate(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandVertexAIFeaturestoreEntitytypeFeatureLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandVertexAIFeaturestoreEntitytypeFeatureDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + valueTypeProp, err := expandVertexAIFeaturestoreEntitytypeFeatureValueType(d.Get("value_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("value_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(valueTypeProp)) && (ok || !reflect.DeepEqual(v, valueTypeProp)) { + obj["valueType"] = valueTypeProp + } + + obj, err = resourceVertexAIFeaturestoreEntitytypeFeatureEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{entitytype}}/features?featureId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FeaturestoreEntitytypeFeature: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if v, ok := d.GetOk("entitytype"); ok { + re := regexp.MustCompile("projects/([a-zA-Z0-9-]*)/(?:locations|regions)/([a-zA-Z0-9-]*)") + switch { + case re.MatchString(v.(string)): + if res := re.FindStringSubmatch(v.(string)); len(res) == 3 && res[1] != "" { + project = res[1] + } + } + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating FeaturestoreEntitytypeFeature: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{entitytype}}/features/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating FeaturestoreEntitytypeFeature", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create FeaturestoreEntitytypeFeature: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{entitytype}}/features/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FeaturestoreEntitytypeFeature %q: %#v", d.Id(), res) + + return resourceVertexAIFeaturestoreEntitytypeFeatureRead(d, meta) +} + +func resourceVertexAIFeaturestoreEntitytypeFeatureRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{entitytype}}/features/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIFeaturestoreEntitytypeFeature %q", d.Id())) + } + + if err := d.Set("create_time", flattenVertexAIFeaturestoreEntitytypeFeatureCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) + } + if err := d.Set("update_time", flattenVertexAIFeaturestoreEntitytypeFeatureUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) + } + if err := d.Set("labels", flattenVertexAIFeaturestoreEntitytypeFeatureLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) + } + if err := d.Set("description", flattenVertexAIFeaturestoreEntitytypeFeatureDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) + } + if err := d.Set("value_type", flattenVertexAIFeaturestoreEntitytypeFeatureValueType(res["valueType"], d, config)); err != nil { + return fmt.Errorf("Error reading FeaturestoreEntitytypeFeature: %s", err) + } + + return nil +} + +func resourceVertexAIFeaturestoreEntitytypeFeatureUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + labelsProp, err := expandVertexAIFeaturestoreEntitytypeFeatureLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + descriptionProp, err := expandVertexAIFeaturestoreEntitytypeFeatureDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + obj, err = resourceVertexAIFeaturestoreEntitytypeFeatureEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{entitytype}}/features/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FeaturestoreEntitytypeFeature %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating FeaturestoreEntitytypeFeature %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FeaturestoreEntitytypeFeature %q: %#v", d.Id(), res) + } + + return resourceVertexAIFeaturestoreEntitytypeFeatureRead(d, meta) +} + +func resourceVertexAIFeaturestoreEntitytypeFeatureDelete(d *schema.ResourceData, meta interface{}) error { + var project string + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{entitytype}}/features/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + if v, ok := d.GetOk("entitytype"); ok { + re := regexp.MustCompile("projects/([a-zA-Z0-9-]*)/(?:locations|regions)/([a-zA-Z0-9-]*)") + switch { + case re.MatchString(v.(string)): + if res := re.FindStringSubmatch(v.(string)); len(res) == 3 && res[1] != "" { + project = res[1] + } + } + } + log.Printf("[DEBUG] Deleting FeaturestoreEntitytypeFeature %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FeaturestoreEntitytypeFeature") + } + + err = VertexAIOperationWaitTime( + config, res, project, "Deleting FeaturestoreEntitytypeFeature", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting FeaturestoreEntitytypeFeature %q: %#v", d.Id(), res) + return nil +} + +func resourceVertexAIFeaturestoreEntitytypeFeatureImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/features/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{entitytype}}/features/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + entitytype := d.Get("entitytype").(string) + + re := regexp.MustCompile("^projects/(.+)/locations/(.+)/featurestores/(.+)/entityTypes/(.+)$") + if parts := re.FindStringSubmatch(entitytype); parts != nil { + d.Set("region", parts[2]) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenVertexAIFeaturestoreEntitytypeFeatureCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeaturestoreEntitytypeFeatureUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeaturestoreEntitytypeFeatureLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeaturestoreEntitytypeFeatureDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeaturestoreEntitytypeFeatureValueType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVertexAIFeaturestoreEntitytypeFeatureLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandVertexAIFeaturestoreEntitytypeFeatureDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIFeaturestoreEntitytypeFeatureValueType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceVertexAIFeaturestoreEntitytypeFeatureEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + if v, ok := d.GetOk("entitytype"); ok { + re := regexp.MustCompile("^projects/(.+)/locations/(.+)/featurestores/(.+)/entityTypes/(.+)$") + if parts := re.FindStringSubmatch(v.(string)); parts != nil { + d.Set("region", parts[2]) + } + } + + return obj, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index.go new file mode 100644 index 0000000000..ef67ada265 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index.go @@ -0,0 +1,1142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceVertexAIIndex() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAIIndexCreate, + Read: resourceVertexAIIndexRead, + Update: resourceVertexAIIndexUpdate, + Delete: resourceVertexAIIndexDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVertexAIIndexImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(180 * time.Minute), + Update: schema.DefaultTimeout(180 * time.Minute), + Delete: schema.DefaultTimeout(180 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The description of the Index.`, + }, + "index_update_method": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The update method to use with this Index. The value must be the followings. If not set, BATCH_UPDATE will be used by default. +* BATCH_UPDATE: user can call indexes.patch with files on Cloud Storage of datapoints to update. +* STREAM_UPDATE: user can call indexes.upsertDatapoints/DeleteDatapoints to update the Index and the updates will be applied in corresponding DeployedIndexes in nearly real-time.`, + Default: "BATCH_UPDATE", + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels with user-defined metadata to organize your Indexes.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "metadata": { + Type: schema.TypeList, + Optional: true, + Description: `An additional information about the Index`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The configuration of the Matching Engine Index.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dimensions": { + Type: schema.TypeInt, + Required: true, + Description: `The number of dimensions of the input vectors.`, + }, + "algorithm_config": { + Type: schema.TypeList, + Optional: true, + Description: `The configuration with regard to the algorithms used for efficient search.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "brute_force_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration options for using brute force search, which simply implements the +standard linear search in the database for each query.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{}, + }, + "tree_ah_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration options for using the tree-AH algorithm (Shallow tree + Asymmetric Hashing). +Please refer to this paper for more details: https://arxiv.org/abs/1908.10396`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "leaf_node_embedding_count": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of embeddings on each leaf node. The default value is 1000 if not set.`, + Default: 1000, + }, + "leaf_nodes_to_search_percent": { + Type: schema.TypeInt, + Optional: true, + Description: `The default percentage of leaf nodes that any query may be searched. Must be in +range 1-100, inclusive. The default value is 10 (means 10%) if not set.`, + Default: 10, + }, + }, + }, + ExactlyOneOf: []string{}, + }, + }, + }, + }, + "approximate_neighbors_count": { + Type: schema.TypeInt, + Optional: true, + Description: `The default number of neighbors to find via approximate search before exact reordering is +performed. Exact reordering is a procedure where results returned by an +approximate search algorithm are reordered via a more expensive distance computation. +Required if tree-AH algorithm is used.`, + }, + "distance_measure_type": { + Type: schema.TypeString, + Optional: true, + Description: `The distance measure used in nearest neighbor search. The value must be one of the followings: +* SQUARED_L2_DISTANCE: Euclidean (L_2) Distance +* L1_DISTANCE: Manhattan (L_1) Distance +* COSINE_DISTANCE: Cosine Distance. Defined as 1 - cosine similarity. +* DOT_PRODUCT_DISTANCE: Dot Product Distance. Defined as a negative of the dot product`, + Default: "DOT_PRODUCT_DISTANCE", + }, + "feature_norm_type": { + Type: schema.TypeString, + Optional: true, + Description: `Type of normalization to be carried out on each vector. The value must be one of the followings: +* UNIT_L2_NORM: Unit L2 normalization type +* NONE: No normalization type is specified.`, + Default: "NONE", + }, + "shard_size": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Index data is split into equal parts to be processed. These are called "shards". +The shard size must be specified when creating an index. The value must be one of the followings: +* SHARD_SIZE_SMALL: Small (2GB) +* SHARD_SIZE_MEDIUM: Medium (20GB) +* SHARD_SIZE_LARGE: Large (50GB)`, + }, + }, + }, + }, + "contents_delta_uri": { + Type: schema.TypeString, + Optional: true, + Description: `Allows inserting, updating or deleting the contents of the Matching Engine Index. +The string must be a valid Cloud Storage directory path. If this +field is set when calling IndexService.UpdateIndex, then no other +Index field can be also updated as part of the same call. +The expected structure and format of the files this URI points to is +described at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format`, + }, + "is_complete_overwrite": { + Type: schema.TypeBool, + Optional: true, + Description: `If this field is set together with contentsDeltaUri when calling IndexService.UpdateIndex, +then existing content of the Index will be replaced by the data from the contentsDeltaUri.`, + Default: false, + }, + }, + }, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the index. eg us-central1`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "deployed_indexes": { + Type: schema.TypeList, + Computed: true, + Description: `The pointers to DeployedIndexes created from this Index. An Index can be only deleted if all its DeployedIndexes had been undeployed first.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "deployed_index_id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID of the DeployedIndex in the above IndexEndpoint.`, + }, + "index_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `A resource name of the IndexEndpoint.`, + }, + }, + }, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `Used to perform consistent read-modify-write updates.`, + }, + "index_stats": { + Type: schema.TypeList, + Computed: true, + Description: `Stats of the index resource.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "shards_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of shards in the Index.`, + }, + "vectors_count": { + Type: schema.TypeString, + Computed: true, + Description: `The number of vectors in the Index.`, + }, + }, + }, + }, + "metadata_schema_uri": { + Type: schema.TypeString, + Computed: true, + Description: `Points to a YAML file stored on Google Cloud Storage describing additional information about the Index, that is specific to it. Unset if the Index does not have any additional information.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the Index.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the Index was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAIIndexCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAIIndexDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandVertexAIIndexDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + metadataProp, err := expandVertexAIIndexMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(metadataProp)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + labelsProp, err := expandVertexAIIndexLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + indexUpdateMethodProp, err := expandVertexAIIndexIndexUpdateMethod(d.Get("index_update_method"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("index_update_method"); !tpgresource.IsEmptyValue(reflect.ValueOf(indexUpdateMethodProp)) && (ok || !reflect.DeepEqual(v, indexUpdateMethodProp)) { + obj["indexUpdateMethod"] = indexUpdateMethodProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Index: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Index: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Index", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Index: %s", err) + } + + if err := d.Set("name", flattenVertexAIIndexName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexes/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) + + return resourceVertexAIIndexRead(d, meta) +} + +func resourceVertexAIIndexRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIIndex %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + + if err := d.Set("name", flattenVertexAIIndexName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("display_name", flattenVertexAIIndexDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("description", flattenVertexAIIndexDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("metadata", flattenVertexAIIndexMetadata(res["metadata"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("metadata_schema_uri", flattenVertexAIIndexMetadataSchemaUri(res["metadataSchemaUri"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("deployed_indexes", flattenVertexAIIndexDeployedIndexes(res["deployedIndexes"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("labels", flattenVertexAIIndexLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("create_time", flattenVertexAIIndexCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("update_time", flattenVertexAIIndexUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("index_stats", flattenVertexAIIndexIndexStats(res["indexStats"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + if err := d.Set("index_update_method", flattenVertexAIIndexIndexUpdateMethod(res["indexUpdateMethod"], d, config)); err != nil { + return fmt.Errorf("Error reading Index: %s", err) + } + + return nil +} + +func resourceVertexAIIndexUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAIIndexDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandVertexAIIndexDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + metadataProp, err := expandVertexAIIndexMetadata(d.Get("metadata"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp + } + labelsProp, err := expandVertexAIIndexLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Index %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("metadata") { + updateMask = append(updateMask, "metadata") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + newUpdateMask := []string{} + + if d.HasChange("metadata.0.contents_delta_uri") { + // Use the current value of isCompleteOverwrite when updating contentsDeltaUri + newUpdateMask = append(newUpdateMask, "metadata.contentsDeltaUri") + newUpdateMask = append(newUpdateMask, "metadata.isCompleteOverwrite") + } + + for _, mask := range updateMask { + // Use granular update masks instead of 'metadata' to avoid the following error: + // 'If `contents_delta_gcs_uri` is set as part of `index.metadata`, then no other Index fields can be also updated as part of the same update call.' + if mask == "metadata" { + continue + } + newUpdateMask = append(newUpdateMask, mask) + } + + // Refreshing updateMask after adding extra schema entries + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Index %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Index %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating Index", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceVertexAIIndexRead(d, meta) +} + +func resourceVertexAIIndexDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Index %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Index") + } + + err = VertexAIOperationWaitTime( + config, res, project, "Deleting Index", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) + return nil +} + +func resourceVertexAIIndexImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/indexes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenVertexAIIndexName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenVertexAIIndexDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexMetadata(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["contents_delta_uri"] = + flattenVertexAIIndexMetadataContentsDeltaUri(original["contentsDeltaUri"], d, config) + transformed["is_complete_overwrite"] = + flattenVertexAIIndexMetadataIsCompleteOverwrite(original["isCompleteOverwrite"], d, config) + transformed["config"] = + flattenVertexAIIndexMetadataConfig(original["config"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexMetadataContentsDeltaUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // We want to ignore read on this field, but cannot because it is nested + return d.Get("metadata.0.contents_delta_uri") +} + +func flattenVertexAIIndexMetadataIsCompleteOverwrite(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // We want to ignore read on this field, but cannot because it is nested + return d.Get("metadata.0.is_complete_overwrite") +} + +func flattenVertexAIIndexMetadataConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dimensions"] = + flattenVertexAIIndexMetadataConfigDimensions(original["dimensions"], d, config) + transformed["approximate_neighbors_count"] = + flattenVertexAIIndexMetadataConfigApproximateNeighborsCount(original["approximateNeighborsCount"], d, config) + transformed["shard_size"] = + flattenVertexAIIndexMetadataConfigShardSize(original["shardSize"], d, config) + transformed["distance_measure_type"] = + flattenVertexAIIndexMetadataConfigDistanceMeasureType(original["distanceMeasureType"], d, config) + transformed["feature_norm_type"] = + flattenVertexAIIndexMetadataConfigFeatureNormType(original["featureNormType"], d, config) + transformed["algorithm_config"] = + flattenVertexAIIndexMetadataConfigAlgorithmConfig(original["algorithmConfig"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexMetadataConfigDimensions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexMetadataConfigApproximateNeighborsCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexMetadataConfigShardSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexMetadataConfigDistanceMeasureType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexMetadataConfigFeatureNormType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexMetadataConfigAlgorithmConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["tree_ah_config"] = + flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfig(original["treeAhConfig"], d, config) + transformed["brute_force_config"] = + flattenVertexAIIndexMetadataConfigAlgorithmConfigBruteForceConfig(original["bruteForceConfig"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["leaf_node_embedding_count"] = + flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodeEmbeddingCount(original["leafNodeEmbeddingCount"], d, config) + transformed["leaf_nodes_to_search_percent"] = + flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodesToSearchPercent(original["leafNodesToSearchPercent"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodeEmbeddingCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodesToSearchPercent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexMetadataConfigAlgorithmConfigBruteForceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenVertexAIIndexMetadataSchemaUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexDeployedIndexes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "index_endpoint": flattenVertexAIIndexDeployedIndexesIndexEndpoint(original["indexEndpoint"], d, config), + "deployed_index_id": flattenVertexAIIndexDeployedIndexesDeployedIndexId(original["deployedIndexId"], d, config), + }) + } + return transformed +} +func flattenVertexAIIndexDeployedIndexesIndexEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexDeployedIndexesDeployedIndexId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexIndexStats(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["vectors_count"] = + flattenVertexAIIndexIndexStatsVectorsCount(original["vectorsCount"], d, config) + transformed["shards_count"] = + flattenVertexAIIndexIndexStatsShardsCount(original["shardsCount"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIIndexIndexStatsVectorsCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexIndexStatsShardsCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIIndexIndexUpdateMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVertexAIIndexDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContentsDeltaUri, err := expandVertexAIIndexMetadataContentsDeltaUri(original["contents_delta_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContentsDeltaUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["contentsDeltaUri"] = transformedContentsDeltaUri + } + + transformedIsCompleteOverwrite, err := expandVertexAIIndexMetadataIsCompleteOverwrite(original["is_complete_overwrite"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIsCompleteOverwrite); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["isCompleteOverwrite"] = transformedIsCompleteOverwrite + } + + transformedConfig, err := expandVertexAIIndexMetadataConfig(original["config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["config"] = transformedConfig + } + + return transformed, nil +} + +func expandVertexAIIndexMetadataContentsDeltaUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataIsCompleteOverwrite(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDimensions, err := expandVertexAIIndexMetadataConfigDimensions(original["dimensions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDimensions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dimensions"] = transformedDimensions + } + + transformedApproximateNeighborsCount, err := expandVertexAIIndexMetadataConfigApproximateNeighborsCount(original["approximate_neighbors_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApproximateNeighborsCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["approximateNeighborsCount"] = transformedApproximateNeighborsCount + } + + transformedShardSize, err := expandVertexAIIndexMetadataConfigShardSize(original["shard_size"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedShardSize); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["shardSize"] = transformedShardSize + } + + transformedDistanceMeasureType, err := expandVertexAIIndexMetadataConfigDistanceMeasureType(original["distance_measure_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDistanceMeasureType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["distanceMeasureType"] = transformedDistanceMeasureType + } + + transformedFeatureNormType, err := expandVertexAIIndexMetadataConfigFeatureNormType(original["feature_norm_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFeatureNormType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["featureNormType"] = transformedFeatureNormType + } + + transformedAlgorithmConfig, err := expandVertexAIIndexMetadataConfigAlgorithmConfig(original["algorithm_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAlgorithmConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["algorithmConfig"] = transformedAlgorithmConfig + } + + return transformed, nil +} + +func expandVertexAIIndexMetadataConfigDimensions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataConfigApproximateNeighborsCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataConfigShardSize(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataConfigDistanceMeasureType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataConfigFeatureNormType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataConfigAlgorithmConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTreeAhConfig, err := expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfig(original["tree_ah_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTreeAhConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["treeAhConfig"] = transformedTreeAhConfig + } + + transformedBruteForceConfig, err := expandVertexAIIndexMetadataConfigAlgorithmConfigBruteForceConfig(original["brute_force_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["bruteForceConfig"] = transformedBruteForceConfig + } + + return transformed, nil +} + +func expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLeafNodeEmbeddingCount, err := expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodeEmbeddingCount(original["leaf_node_embedding_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLeafNodeEmbeddingCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["leafNodeEmbeddingCount"] = transformedLeafNodeEmbeddingCount + } + + transformedLeafNodesToSearchPercent, err := expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodesToSearchPercent(original["leaf_nodes_to_search_percent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLeafNodesToSearchPercent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["leafNodesToSearchPercent"] = transformedLeafNodesToSearchPercent + } + + return transformed, nil +} + +func expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodeEmbeddingCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataConfigAlgorithmConfigTreeAhConfigLeafNodesToSearchPercent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexMetadataConfigAlgorithmConfigBruteForceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + transformed := make(map[string]interface{}) + + return transformed, nil +} + +func expandVertexAIIndexLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandVertexAIIndexIndexUpdateMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint.go new file mode 100644 index 0000000000..7dce17b2fa --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint.go @@ -0,0 +1,490 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceVertexAIIndexEndpoint() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAIIndexEndpointCreate, + Read: resourceVertexAIIndexEndpointRead, + Update: resourceVertexAIIndexEndpointUpdate, + Delete: resourceVertexAIIndexEndpointDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVertexAIIndexEndpointImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The description of the Index.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels with user-defined metadata to organize your Indexes.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the index endpoint should be peered. +Private services access must already be configured for the network. If left unspecified, the index endpoint is not peered with any network. +[Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): 'projects/{project}/global/networks/{network}'. +Where '{project}' is a project number, as in '12345', and '{network}' is network name.`, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the index endpoint. eg us-central1`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `Used to perform consistent read-modify-write updates.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the Index.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the Index was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAIIndexEndpointCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAIIndexEndpointDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandVertexAIIndexEndpointDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandVertexAIIndexEndpointLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + networkProp, err := expandVertexAIIndexEndpointNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexEndpoints") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new IndexEndpoint: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for IndexEndpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating IndexEndpoint: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexEndpoints/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating IndexEndpoint", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create IndexEndpoint: %s", err) + } + + if err := d.Set("name", flattenVertexAIIndexEndpointName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexEndpoints/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating IndexEndpoint %q: %#v", d.Id(), res) + + return resourceVertexAIIndexEndpointRead(d, meta) +} + +func resourceVertexAIIndexEndpointRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexEndpoints/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for IndexEndpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAIIndexEndpoint %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading IndexEndpoint: %s", err) + } + + if err := d.Set("name", flattenVertexAIIndexEndpointName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpoint: %s", err) + } + if err := d.Set("display_name", flattenVertexAIIndexEndpointDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpoint: %s", err) + } + if err := d.Set("description", flattenVertexAIIndexEndpointDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpoint: %s", err) + } + if err := d.Set("labels", flattenVertexAIIndexEndpointLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpoint: %s", err) + } + if err := d.Set("create_time", flattenVertexAIIndexEndpointCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpoint: %s", err) + } + if err := d.Set("update_time", flattenVertexAIIndexEndpointUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpoint: %s", err) + } + if err := d.Set("network", flattenVertexAIIndexEndpointNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading IndexEndpoint: %s", err) + } + + return nil +} + +func resourceVertexAIIndexEndpointUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for IndexEndpoint: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAIIndexEndpointDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandVertexAIIndexEndpointDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandVertexAIIndexEndpointLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexEndpoints/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating IndexEndpoint %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating IndexEndpoint %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating IndexEndpoint %q: %#v", d.Id(), res) + } + + return resourceVertexAIIndexEndpointRead(d, meta) +} + +func resourceVertexAIIndexEndpointDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for IndexEndpoint: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexEndpoints/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting IndexEndpoint %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "IndexEndpoint") + } + + err = VertexAIOperationWaitTime( + config, res, project, "Deleting IndexEndpoint", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting IndexEndpoint %q: %#v", d.Id(), res) + return nil +} + +func resourceVertexAIIndexEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/indexEndpoints/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/indexEndpoints/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenVertexAIIndexEndpointName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenVertexAIIndexEndpointDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIIndexEndpointNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVertexAIIndexEndpointDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIIndexEndpointLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandVertexAIIndexEndpointNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint_sweeper.go new file mode 100644 index 0000000000..7fe72d71a5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_endpoint_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("VertexAIIndexEndpoint", testSweepVertexAIIndexEndpoint) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVertexAIIndexEndpoint(region string) error { + resourceName := "VertexAIIndexEndpoint" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{region}}-aiplatform.googleapis.com/v1/projects/{{project}}/locations/{{region}}/indexEndpoints", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["indexEndpoints"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{region}}-aiplatform.googleapis.com/v1/projects/{{project}}/locations/{{region}}/indexEndpoints/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_sweeper.go new file mode 100644 index 0000000000..8c241f1744 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_index_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("VertexAIIndex", testSweepVertexAIIndex) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVertexAIIndex(region string) error { + resourceName := "VertexAIIndex" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{region}}-aiplatform.googleapis.com/v1/projects/{{project}}/locations/{{region}}/indexes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["indices"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{region}}-aiplatform.googleapis.com/v1/projects/{{project}}/locations/{{region}}/indexes/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_tensorboard.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_tensorboard.go new file mode 100644 index 0000000000..dba3bc9609 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/resource_vertex_ai_tensorboard.go @@ -0,0 +1,560 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vertexai + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceVertexAITensorboard() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAITensorboardCreate, + Read: resourceVertexAITensorboardRead, + Update: resourceVertexAITensorboardUpdate, + Delete: resourceVertexAITensorboardDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVertexAITensorboardImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `User provided name of this Tensorboard.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of this Tensorboard.`, + }, + "encryption_spec": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Customer-managed encryption key spec for a Tensorboard. If set, this Tensorboard and all sub-resources of this Tensorboard will be secured by this key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. +Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels with user-defined metadata to organize your Tensorboards.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The region of the tensorboard. eg us-central1`, + }, + "blob_storage_path_prefix": { + Type: schema.TypeString, + Computed: true, + Description: `Consumer project Cloud Storage path prefix used to store blob data, which can either be a bucket or directory. Does not end with a '/'.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the Tensorboard was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the Tensorboard.`, + }, + "run_count": { + Type: schema.TypeString, + Computed: true, + Description: `The number of Runs stored in this Tensorboard.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the Tensorboard was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAITensorboardCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAITensorboardDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandVertexAITensorboardDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + encryptionSpecProp, err := expandVertexAITensorboardEncryptionSpec(d.Get("encryption_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { + obj["encryptionSpec"] = encryptionSpecProp + } + labelsProp, err := expandVertexAITensorboardLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/tensorboards") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Tensorboard: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Tensorboard: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Tensorboard: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Tensorboard", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Tensorboard: %s", err) + } + + if err := d.Set("name", flattenVertexAITensorboardName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Tensorboard %q: %#v", d.Id(), res) + + return resourceVertexAITensorboardRead(d, meta) +} + +func resourceVertexAITensorboardRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Tensorboard: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VertexAITensorboard %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + + if err := d.Set("name", flattenVertexAITensorboardName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + if err := d.Set("display_name", flattenVertexAITensorboardDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + if err := d.Set("description", flattenVertexAITensorboardDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + if err := d.Set("encryption_spec", flattenVertexAITensorboardEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + if err := d.Set("blob_storage_path_prefix", flattenVertexAITensorboardBlobStoragePathPrefix(res["blobStoragePathPrefix"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + if err := d.Set("run_count", flattenVertexAITensorboardRunCount(res["runCount"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + if err := d.Set("create_time", flattenVertexAITensorboardCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + if err := d.Set("update_time", flattenVertexAITensorboardUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + if err := d.Set("labels", flattenVertexAITensorboardLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Tensorboard: %s", err) + } + + return nil +} + +func resourceVertexAITensorboardUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Tensorboard: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAITensorboardDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandVertexAITensorboardDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandVertexAITensorboardLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Tensorboard %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Tensorboard %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Tensorboard %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating Tensorboard", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceVertexAITensorboardRead(d, meta) +} + +func resourceVertexAITensorboardDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Tensorboard: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Tensorboard %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Tensorboard") + } + + err = VertexAIOperationWaitTime( + config, res, project, "Deleting Tensorboard", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Tensorboard %q: %#v", d.Id(), res) + return nil +} + +func resourceVertexAITensorboardImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/tensorboards/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/tensorboards/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if err := d.Set("name", id); err != nil { + return nil, fmt.Errorf("Error setting name for import: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenVertexAITensorboardName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAITensorboardDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAITensorboardDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAITensorboardEncryptionSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenVertexAITensorboardEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenVertexAITensorboardEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAITensorboardBlobStoragePathPrefix(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAITensorboardRunCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAITensorboardCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAITensorboardUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAITensorboardLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVertexAITensorboardDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAITensorboardDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAITensorboardEncryptionSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandVertexAITensorboardEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandVertexAITensorboardEncryptionSpecKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAITensorboardLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/vertex_ai_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/vertex_ai_operation.go new file mode 100644 index 0000000000..cea840bacc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vertexai/vertex_ai_operation.go @@ -0,0 +1,75 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package vertexai + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type VertexAIOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *VertexAIOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + + region := tpgresource.GetRegionFromRegionalSelfLink(w.CommonOperationWaiter.Op.Name) + + // Returns the proper get. + url := fmt.Sprintf("https://%s-aiplatform.googleapis.com/v1/%s", region, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createVertexAIWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*VertexAIOperationWaiter, error) { + w := &VertexAIOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func VertexAIOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createVertexAIWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func VertexAIOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createVertexAIWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/data_source_vpc_access_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/data_source_vpc_access_connector.go new file mode 100644 index 0000000000..4898cf21cd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/data_source_vpc_access_connector.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package vpcaccess + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceVPCAccessConnector() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceVPCAccessConnector().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceVPCAccessConnectorRead, + Schema: dsSchema, + } +} + +func dataSourceVPCAccessConnectorRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + d.SetId(id) + + return resourceVPCAccessConnectorRead(d, meta) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector.go new file mode 100644 index 0000000000..bd5115f48a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector.go @@ -0,0 +1,794 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vpcaccess + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Are the number of min/max instances reduced? +func AreInstancesReduced(_ context.Context, old, new, _ interface{}) bool { + return new.(int) < old.(int) +} + +func ResourceVPCAccessConnector() *schema.Resource { + return &schema.Resource{ + Create: resourceVPCAccessConnectorCreate, + Read: resourceVPCAccessConnectorRead, + Update: resourceVPCAccessConnectorUpdate, + Delete: resourceVPCAccessConnectorDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVPCAccessConnectorImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("min_instances", AreInstancesReduced), + customdiff.ForceNewIfChange("max_instances", AreInstancesReduced)), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the resource (Max 25 characters).`, + }, + "ip_cidr_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The range of internal addresses that follows RFC 4632 notation. Example: '10.132.0.0/28'.`, + RequiredWith: []string{"network"}, + }, + "machine_type": { + Type: schema.TypeString, + Optional: true, + Description: `Machine type of VM Instance underlying connector. Default is e2-micro`, + Default: "e2-micro", + }, + "max_instances": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Maximum value of instances in autoscaling group underlying the connector.`, + }, + "max_throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(300, 1000), + Description: `Maximum throughput of the connector in Mbps, must be greater than 'min_throughput'. Default is 1000.`, + }, + "min_instances": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Minimum value of instances in autoscaling group underlying the connector.`, + }, + "min_throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(200, 900), + Description: `Minimum throughput of the connector in Mbps. Default and min is 200.`, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareResourceNames, + Description: `Name or self_link of the VPC network. Required if 'ip_cidr_range' is set.`, + ExactlyOneOf: []string{"network", "subnet.0.name"}, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Region where the VPC Access connector resides. If it is not provided, the provider region is used.`, + }, + "subnet": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The subnet in which to house the connector`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Subnet name (relative, not fully qualified). E.g. if the full subnet selfLink is +https://compute.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetName} the correct input for this field would be {subnetName}"`, + ExactlyOneOf: []string{"network", "subnet.0.name"}, + }, + "project_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Project in which the subnet exists. If not set, this project is assumed to be the project for which the connector create request was issued.`, + }, + }, + }, + }, + "connected_projects": { + Type: schema.TypeList, + Computed: true, + Description: `List of projects using the connector.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The fully qualified name of this VPC connector`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the VPC access connector.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVPCAccessConnectorCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandVPCAccessConnectorName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + networkProp, err := expandVPCAccessConnectorNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + ipCidrRangeProp, err := expandVPCAccessConnectorIpCidrRange(d.Get("ip_cidr_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_cidr_range"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipCidrRangeProp)) && (ok || !reflect.DeepEqual(v, ipCidrRangeProp)) { + obj["ipCidrRange"] = ipCidrRangeProp + } + machineTypeProp, err := expandVPCAccessConnectorMachineType(d.Get("machine_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("machine_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(machineTypeProp)) && (ok || !reflect.DeepEqual(v, machineTypeProp)) { + obj["machineType"] = machineTypeProp + } + minThroughputProp, err := expandVPCAccessConnectorMinThroughput(d.Get("min_throughput"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("min_throughput"); !tpgresource.IsEmptyValue(reflect.ValueOf(minThroughputProp)) && (ok || !reflect.DeepEqual(v, minThroughputProp)) { + obj["minThroughput"] = minThroughputProp + } + minInstancesProp, err := expandVPCAccessConnectorMinInstances(d.Get("min_instances"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("min_instances"); !tpgresource.IsEmptyValue(reflect.ValueOf(minInstancesProp)) && (ok || !reflect.DeepEqual(v, minInstancesProp)) { + obj["minInstances"] = minInstancesProp + } + maxInstancesProp, err := expandVPCAccessConnectorMaxInstances(d.Get("max_instances"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("max_instances"); !tpgresource.IsEmptyValue(reflect.ValueOf(maxInstancesProp)) && (ok || !reflect.DeepEqual(v, maxInstancesProp)) { + obj["maxInstances"] = maxInstancesProp + } + maxThroughputProp, err := expandVPCAccessConnectorMaxThroughput(d.Get("max_throughput"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("max_throughput"); !tpgresource.IsEmptyValue(reflect.ValueOf(maxThroughputProp)) && (ok || !reflect.DeepEqual(v, maxThroughputProp)) { + obj["maxThroughput"] = maxThroughputProp + } + subnetProp, err := expandVPCAccessConnectorSubnet(d.Get("subnet"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnet"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetProp)) && (ok || !reflect.DeepEqual(v, subnetProp)) { + obj["subnet"] = subnetProp + } + + obj, err = resourceVPCAccessConnectorEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors?connectorId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Connector: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Connector: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Connector: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = VPCAccessOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Connector", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Connector: %s", err) + } + + opRes, err = resourceVPCAccessConnectorDecoder(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error decoding response from operation: %s", err) + } + if opRes == nil { + return fmt.Errorf("Error decoding response from operation, could not find object") + } + + if err := d.Set("name", flattenVPCAccessConnectorName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // This is useful if the resource in question doesn't have a perfectly consistent API + // That is, the Operation for Create might return before the Get operation shows the + // completed state of the resource. + time.Sleep(5 * time.Second) + + log.Printf("[DEBUG] Finished creating Connector %q: %#v", d.Id(), res) + + return resourceVPCAccessConnectorRead(d, meta) +} + +func resourceVPCAccessConnectorRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Connector: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VPCAccessConnector %q", d.Id())) + } + + res, err = resourceVPCAccessConnectorDecoder(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing VPCAccessConnector because it no longer exists.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + + if err := d.Set("name", flattenVPCAccessConnectorName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("network", flattenVPCAccessConnectorNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("ip_cidr_range", flattenVPCAccessConnectorIpCidrRange(res["ipCidrRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("state", flattenVPCAccessConnectorState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("machine_type", flattenVPCAccessConnectorMachineType(res["machineType"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("min_throughput", flattenVPCAccessConnectorMinThroughput(res["minThroughput"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("min_instances", flattenVPCAccessConnectorMinInstances(res["minInstances"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("max_instances", flattenVPCAccessConnectorMaxInstances(res["maxInstances"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("max_throughput", flattenVPCAccessConnectorMaxThroughput(res["maxThroughput"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("connected_projects", flattenVPCAccessConnectorConnectedProjects(res["connectedProjects"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + if err := d.Set("subnet", flattenVPCAccessConnectorSubnet(res["subnet"], d, config)); err != nil { + return fmt.Errorf("Error reading Connector: %s", err) + } + + return nil +} + +func resourceVPCAccessConnectorUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Connector: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + machineTypeProp, err := expandVPCAccessConnectorMachineType(d.Get("machine_type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("machine_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, machineTypeProp)) { + obj["machineType"] = machineTypeProp + } + minInstancesProp, err := expandVPCAccessConnectorMinInstances(d.Get("min_instances"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("min_instances"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, minInstancesProp)) { + obj["minInstances"] = minInstancesProp + } + maxInstancesProp, err := expandVPCAccessConnectorMaxInstances(d.Get("max_instances"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("max_instances"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxInstancesProp)) { + obj["maxInstances"] = maxInstancesProp + } + + obj, err = resourceVPCAccessConnectorEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Connector %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("machine_type") { + updateMask = append(updateMask, "machineType") + } + + if d.HasChange("min_instances") { + updateMask = append(updateMask, "minInstances") + } + + if d.HasChange("max_instances") { + updateMask = append(updateMask, "maxInstances") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Connector %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Connector %q: %#v", d.Id(), res) + } + + err = VPCAccessOperationWaitTime( + config, res, project, "Updating Connector", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceVPCAccessConnectorRead(d, meta) +} + +func resourceVPCAccessConnectorDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Connector: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Connector %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Connector") + } + + err = VPCAccessOperationWaitTime( + config, res, project, "Deleting Connector", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Connector %q: %#v", d.Id(), res) + return nil +} + +func resourceVPCAccessConnectorImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/connectors/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenVPCAccessConnectorName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenVPCAccessConnectorNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenVPCAccessConnectorIpCidrRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVPCAccessConnectorState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVPCAccessConnectorMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVPCAccessConnectorMinThroughput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVPCAccessConnectorMinInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVPCAccessConnectorMaxInstances(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVPCAccessConnectorMaxThroughput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVPCAccessConnectorConnectedProjects(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVPCAccessConnectorSubnet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenVPCAccessConnectorSubnetName(original["name"], d, config) + transformed["project_id"] = + flattenVPCAccessConnectorSubnetProjectId(original["projectId"], d, config) + return []interface{}{transformed} +} +func flattenVPCAccessConnectorSubnetName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVPCAccessConnectorSubnetProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandVPCAccessConnectorName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVPCAccessConnectorNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return tpgresource.GetResourceNameFromSelfLink(v.(string)), nil +} + +func expandVPCAccessConnectorIpCidrRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVPCAccessConnectorMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVPCAccessConnectorMinThroughput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVPCAccessConnectorMinInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVPCAccessConnectorMaxInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVPCAccessConnectorMaxThroughput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVPCAccessConnectorSubnet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandVPCAccessConnectorSubnetName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedProjectId, err := expandVPCAccessConnectorSubnetProjectId(original["project_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectId"] = transformedProjectId + } + + return transformed, nil +} + +func expandVPCAccessConnectorSubnetName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVPCAccessConnectorSubnetProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceVPCAccessConnectorEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + delete(obj, "name") + return obj, nil +} + +func resourceVPCAccessConnectorDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + // Take the returned long form of the name and use it as `self_link`. + // Then modify the name to be the user specified form. + // We can't just ignore_read on `name` as the linter will + // complain that the returned `res` is never used afterwards. + // Some field needs to be actually set, and we chose `name`. + if err := d.Set("self_link", res["name"].(string)); err != nil { + return nil, fmt.Errorf("Error setting self_link: %s", err) + } + res["name"] = d.Get("name").(string) + return res, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector_sweeper.go new file mode 100644 index 0000000000..fe9727e2ec --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/resource_vpc_access_connector_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vpcaccess + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("VPCAccessConnector", testSweepVPCAccessConnector) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVPCAccessConnector(region string) error { + resourceName := "VPCAccessConnector" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://vpcaccess.googleapis.com/v1/projects/{{project}}/locations/{{region}}/connectors", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["connectors"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://vpcaccess.googleapis.com/v1/projects/{{project}}/locations/{{region}}/connectors/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/vpc_access_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/vpc_access_operation.go new file mode 100644 index 0000000000..20eee3887a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/vpcaccess/vpc_access_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package vpcaccess + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type VPCAccessOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *VPCAccessOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.VPCAccessBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createVPCAccessWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*VPCAccessOperationWaiter, error) { + w := &VPCAccessOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func VPCAccessOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createVPCAccessWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func VPCAccessOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createVPCAccessWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow.go new file mode 100644 index 0000000000..f6da1e0273 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow.go @@ -0,0 +1,690 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package workflows + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func ResourceWorkflowsWorkflow() *schema.Resource { + return &schema.Resource{ + Create: resourceWorkflowsWorkflowCreate, + Read: resourceWorkflowsWorkflowRead, + Update: resourceWorkflowsWorkflowUpdate, + Delete: resourceWorkflowsWorkflowDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + SchemaVersion: 1, + + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceWorkflowsWorkflowResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceWorkflowsWorkflowUpgradeV0, + Version: 0, + }, + }, + + Schema: map[string]*schema.Schema{ + "crypto_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The KMS key used to encrypt workflow and execution data. + +Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey}`, + }, + "description": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Description of the workflow provided by the user. Must be at most 1000 unicode characters long.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `A set of key/value label pairs to assign to this Workflow.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Name of the Workflow.`, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the workflow.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the service account associated with the latest workflow version. This service +account represents the identity of the workflow and determines what permissions the workflow has. +Format: projects/{project}/serviceAccounts/{account} or {account}. +Using - as a wildcard for the {project} or not providing one at all will infer the project from the account. +The {account} value can be the email address or the unique_id of the service account. +If not provided, workflow will use the project's default service account. +Modifying this field for an existing workflow results in a new workflow revision.`, + }, + "source_contents": { + Type: schema.TypeString, + Optional: true, + Description: `Workflow code to be executed. The size limit is 32KB.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "revision_id": { + Type: schema.TypeString, + Computed: true, + Description: `The revision of the workflow. A new one is generated if the service account or source contents is changed.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the workflow deployment.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name"}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceWorkflowsWorkflowCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandWorkflowsWorkflowName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandWorkflowsWorkflowDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandWorkflowsWorkflowLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + serviceAccountProp, err := expandWorkflowsWorkflowServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + sourceContentsProp, err := expandWorkflowsWorkflowSourceContents(d.Get("source_contents"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_contents"); !tpgresource.IsEmptyValue(reflect.ValueOf(sourceContentsProp)) && (ok || !reflect.DeepEqual(v, sourceContentsProp)) { + obj["sourceContents"] = sourceContentsProp + } + cryptoKeyNameProp, err := expandWorkflowsWorkflowCryptoKeyName(d.Get("crypto_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("crypto_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(cryptoKeyNameProp)) && (ok || !reflect.DeepEqual(v, cryptoKeyNameProp)) { + obj["cryptoKeyName"] = cryptoKeyNameProp + } + + obj, err = resourceWorkflowsWorkflowEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows?workflowId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Workflow: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Workflow: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Workflow: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/workflows/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = WorkflowsOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Workflow", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Workflow: %s", err) + } + + if err := d.Set("name", flattenWorkflowsWorkflowName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/workflows/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Workflow %q: %#v", d.Id(), res) + + return resourceWorkflowsWorkflowRead(d, meta) +} + +func resourceWorkflowsWorkflowRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Workflow: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("WorkflowsWorkflow %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + + if err := d.Set("name", flattenWorkflowsWorkflowName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("description", flattenWorkflowsWorkflowDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("create_time", flattenWorkflowsWorkflowCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("update_time", flattenWorkflowsWorkflowUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("state", flattenWorkflowsWorkflowState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("labels", flattenWorkflowsWorkflowLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("service_account", flattenWorkflowsWorkflowServiceAccount(res["serviceAccount"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("source_contents", flattenWorkflowsWorkflowSourceContents(res["sourceContents"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("revision_id", flattenWorkflowsWorkflowRevisionId(res["revisionId"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + if err := d.Set("crypto_key_name", flattenWorkflowsWorkflowCryptoKeyName(res["cryptoKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Workflow: %s", err) + } + + return nil +} + +func resourceWorkflowsWorkflowUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Workflow: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandWorkflowsWorkflowDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandWorkflowsWorkflowLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + serviceAccountProp, err := expandWorkflowsWorkflowServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + sourceContentsProp, err := expandWorkflowsWorkflowSourceContents(d.Get("source_contents"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_contents"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceContentsProp)) { + obj["sourceContents"] = sourceContentsProp + } + cryptoKeyNameProp, err := expandWorkflowsWorkflowCryptoKeyName(d.Get("crypto_key_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("crypto_key_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, cryptoKeyNameProp)) { + obj["cryptoKeyName"] = cryptoKeyNameProp + } + + obj, err = resourceWorkflowsWorkflowEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Workflow %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("service_account") { + updateMask = append(updateMask, "serviceAccount") + } + + if d.HasChange("source_contents") { + updateMask = append(updateMask, "sourceContents") + } + + if d.HasChange("crypto_key_name") { + updateMask = append(updateMask, "cryptoKeyName") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Workflow %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Workflow %q: %#v", d.Id(), res) + } + + err = WorkflowsOperationWaitTime( + config, res, project, "Updating Workflow", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceWorkflowsWorkflowRead(d, meta) +} + +func resourceWorkflowsWorkflowDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Workflow: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Workflow %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Workflow") + } + + err = WorkflowsOperationWaitTime( + config, res, project, "Deleting Workflow", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Workflow %q: %#v", d.Id(), res) + return nil +} + +func flattenWorkflowsWorkflowName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenWorkflowsWorkflowDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenWorkflowsWorkflowCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenWorkflowsWorkflowUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenWorkflowsWorkflowState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenWorkflowsWorkflowLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenWorkflowsWorkflowServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenWorkflowsWorkflowSourceContents(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenWorkflowsWorkflowRevisionId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenWorkflowsWorkflowCryptoKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandWorkflowsWorkflowName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandWorkflowsWorkflowDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandWorkflowsWorkflowLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandWorkflowsWorkflowServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandWorkflowsWorkflowSourceContents(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandWorkflowsWorkflowCryptoKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func resourceWorkflowsWorkflowEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + var ResName string + if v, ok := d.GetOk("name"); ok { + ResName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + ResName = resource.PrefixedUniqueId(v.(string)) + } else { + ResName = resource.UniqueId() + } + + if err := d.Set("name", ResName); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + return obj, nil +} + +func resourceWorkflowsWorkflowResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Description of the workflow provided by the user. Must be at most 1000 unicode characters long.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `A set of key/value label pairs to assign to this Workflow.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Name of the Workflow.`, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The region of the workflow.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of the service account associated with the latest workflow version. This service +account represents the identity of the workflow and determines what permissions the workflow has. + +Format: projects/{project}/serviceAccounts/{account}.`, + }, + "source_contents": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Workflow code to be executed. The size limit is 32KB.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "revision_id": { + Type: schema.TypeString, + Computed: true, + Description: `The revision of the workflow. A new one is generated if the service account or source contents is changed.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the workflow deployment.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, + }, + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name"}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func ResourceWorkflowsWorkflowUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", rawState) + + rawState["name"] = tpgresource.GetResourceNameFromSelfLink(rawState["name"].(string)) + + log.Printf("[DEBUG] Attributes after migration: %#v", rawState) + return rawState, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow_sweeper.go new file mode 100644 index 0000000000..8d860d2742 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/resource_workflows_workflow_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package workflows + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("WorkflowsWorkflow", testSweepWorkflowsWorkflow) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepWorkflowsWorkflow(region string) error { + resourceName := "WorkflowsWorkflow" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://workflows.googleapis.com/v1/projects/{{project}}/locations/{{region}}/workflows", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["workflows"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://workflows.googleapis.com/v1/projects/{{project}}/locations/{{region}}/workflows/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/workflows_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/workflows_operation.go new file mode 100644 index 0000000000..9c7f16ea45 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/services/workflows/workflows_operation.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package workflows + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type WorkflowsOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *WorkflowsOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.WorkflowsBasePath, w.CommonOperationWaiter.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createWorkflowsWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*WorkflowsOperationWaiter, error) { + w := &WorkflowsOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func WorkflowsOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createWorkflowsWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func WorkflowsOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createWorkflowsWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceusage_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceusage_operation.go deleted file mode 100644 index dca1576490..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/serviceusage_operation.go +++ /dev/null @@ -1,38 +0,0 @@ -package google - -import ( - "encoding/json" - "time" - - "google.golang.org/api/googleapi" - "google.golang.org/api/serviceusage/v1" -) - -func serviceUsageOperationWait(config *Config, op *serviceusage.Operation, project, activity, userAgent string, timeout time.Duration) error { - // maintained for compatibility with old code that was written before the - // autogenerated waiters. - b, err := op.MarshalJSON() - if err != nil { - return err - } - var m map[string]interface{} - if err := json.Unmarshal(b, &m); err != nil { - return err - } - return ServiceUsageOperationWaitTime(config, m, project, activity, userAgent, timeout) -} - -func handleServiceUsageRetryableError(err error) error { - if err == nil { - return nil - } - if gerr, ok := err.(*googleapi.Error); ok { - if (gerr.Code == 400 || gerr.Code == 412) && gerr.Message == "Precondition check failed." { - return &googleapi.Error{ - Code: 503, - Message: "api returned \"precondition failed\" while enabling service", - } - } - } - return err -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/source_repo_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/source_repo_utils.go deleted file mode 100644 index 48229bb247..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/source_repo_utils.go +++ /dev/null @@ -1,22 +0,0 @@ -package google - -import "regexp" - -func expandSourceRepoRepositoryPubsubConfigsTopic(v interface{}, d TerraformResourceData, config *Config) (string, error) { - // short-circuit if the topic is a full uri so we don't need to getProject - ok, err := regexp.MatchString(PubsubTopicRegex, v.(string)) - if err != nil { - return "", err - } - - if ok { - return v.(string), nil - } - - project, err := getProject(d, config) - if err != nil { - return "", err - } - - return getComputedTopicName(project, v.(string)), err -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/spanner_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/spanner_operation.go deleted file mode 100644 index 6ff6d20cd9..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/spanner_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type SpannerOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *SpannerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.SpannerBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createSpannerWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*SpannerOperationWaiter, error) { - w := &SpannerOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func SpannerOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createSpannerWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func SpannerOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createSpannerWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sql_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sql_utils.go deleted file mode 100644 index 1924b4952f..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sql_utils.go +++ /dev/null @@ -1,26 +0,0 @@ -package google - -import ( - "log" - "strings" - - "github.com/hashicorp/errwrap" - "google.golang.org/api/googleapi" -) - -func transformSQLDatabaseReadError(err error) error { - if gErr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error); ok { - if gErr.Code == 400 && strings.Contains(gErr.Message, "Invalid request since instance is not running") { - // This error occurs when attempting a GET after deleting the sql database and sql instance. It leads to to - // inconsistent behavior as handleNotFoundError(...) expects an error code of 404 when a resource does not - // exist. To get the desired behavior from handleNotFoundError, modify the return code to 404 so that - // handleNotFoundError(...) will treat this as a NotFound error - gErr.Code = 404 - } - - log.Printf("[DEBUG] Transformed SQLDatabase error") - return gErr - } - - return err -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sqladmin_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sqladmin_operation.go deleted file mode 100644 index c79901e6c8..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sqladmin_operation.go +++ /dev/null @@ -1,133 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "log" - "time" - - sqladmin "google.golang.org/api/sqladmin/v1beta4" -) - -type SqlAdminOperationWaiter struct { - Service *sqladmin.Service - Op *sqladmin.Operation - Project string -} - -func (w *SqlAdminOperationWaiter) State() string { - if w == nil { - return "Operation Waiter is nil!" - } - - if w.Op == nil { - return "Operation is nil!" - } - - return w.Op.Status -} - -func (w *SqlAdminOperationWaiter) Error() error { - if w != nil && w.Op != nil && w.Op.Error != nil { - return SqlAdminOperationError(*w.Op.Error) - } - return nil -} - -func (w *SqlAdminOperationWaiter) IsRetryable(error) bool { - return false -} - -func (w *SqlAdminOperationWaiter) SetOp(op interface{}) error { - if op == nil { - // Starting as a log statement, this may be a useful error in the future - log.Printf("[DEBUG] attempted to set nil op") - } - - sqlOp, ok := op.(*sqladmin.Operation) - w.Op = sqlOp - if !ok { - return fmt.Errorf("Unable to set operation. Bad type!") - } - - return nil -} - -func (w *SqlAdminOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, waiter is unset or nil.") - } - - if w.Op == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - if w.Service == nil { - return nil, fmt.Errorf("Cannot query operation, service is nil.") - } - - var op interface{} - var err error - err = RetryTimeDuration( - func() error { - op, err = w.Service.Operations.Get(w.Project, w.Op.Name).Do() - return err - }, - - DefaultRequestTimeout, - ) - - return op, err -} - -func (w *SqlAdminOperationWaiter) OpName() string { - if w == nil { - return "" - } - - if w.Op == nil { - return "" - } - - return w.Op.Name -} - -func (w *SqlAdminOperationWaiter) PendingStates() []string { - return []string{"PENDING", "RUNNING"} -} - -func (w *SqlAdminOperationWaiter) TargetStates() []string { - return []string{"DONE"} -} - -func SqlAdminOperationWaitTime(config *Config, res interface{}, project, activity, userAgent string, timeout time.Duration) error { - op := &sqladmin.Operation{} - err := Convert(res, op) - if err != nil { - return err - } - - w := &SqlAdminOperationWaiter{ - Service: config.NewSqlAdminClient(userAgent), - Op: op, - Project: project, - } - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -// SqlAdminOperationError wraps sqladmin.OperationError and implements the -// error interface so it can be returned. -type SqlAdminOperationError sqladmin.OperationErrors - -func (e SqlAdminOperationError) Error() string { - var buf bytes.Buffer - - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/stateful_mig_polling.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/stateful_mig_polling.go deleted file mode 100644 index d247a2dcaf..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/stateful_mig_polling.go +++ /dev/null @@ -1,149 +0,0 @@ -package google - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// PerInstanceConfig needs both regular operation polling AND custom polling for deletion which is why this is not generated -func resourceComputePerInstanceConfigPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/listPerInstanceConfigs") - if err != nil { - return nil, err - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - res, err := SendRequest(config, "POST", project, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = flattenNestedComputePerInstanceConfig(d, meta, res) - if err != nil { - return nil, err - } - - // Returns nil res if nested object is not found - return res, nil - } -} - -// RegionPerInstanceConfig needs both regular operation polling AND custom polling for deletion which is why this is not generated -func resourceComputeRegionPerInstanceConfigPollRead(d *schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return nil, err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listPerInstanceConfigs") - if err != nil { - return nil, err - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - res, err := SendRequest(config, "POST", project, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = flattenNestedComputeRegionPerInstanceConfig(d, meta, res) - if err != nil { - return nil, err - } - - // Returns nil res if nested object is not found - return res, nil - } -} - -// Returns an instance name in the form zones/{zone}/instances/{instance} for the managed -// instance matching the name of a PerInstanceConfig -func findInstanceName(d *schema.ResourceData, config *Config) (string, error) { - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listManagedInstances") - if err != nil { - return "", err - } - - userAgent, err := generateUserAgentString(d, config.UserAgent) - if err != nil { - return "", err - } - - project, err := getProject(d, config) - if err != nil { - return "", err - } - instanceNameToFind := fmt.Sprintf("/%s", d.Get("name").(string)) - - token := "" - for paginate := true; paginate; { - urlWithToken := "" - if token != "" { - urlWithToken = fmt.Sprintf("%s?maxResults=1&pageToken=%s", url, token) - } else { - urlWithToken = fmt.Sprintf("%s?maxResults=1", url) - } - res, err := SendRequest(config, "POST", project, urlWithToken, userAgent, nil) - if err != nil { - return "", err - } - - managedInstances, ok := res["managedInstances"] - if !ok { - return "", fmt.Errorf("Failed to parse response for listManagedInstances for %s", d.Id()) - } - - managedInstancesArr := managedInstances.([]interface{}) - for _, managedInstanceRaw := range managedInstancesArr { - instance := managedInstanceRaw.(map[string]interface{}) - name, ok := instance["instance"] - if !ok { - return "", fmt.Errorf("Failed to read instance name for managed instance: %#v", instance) - } - if strings.HasSuffix(name.(string), instanceNameToFind) { - return name.(string), nil - } - } - - tokenRaw, paginate := res["nextPageToken"] - if paginate { - token = tokenRaw.(string) - } - } - - return "", fmt.Errorf("Failed to find managed instance with name: %s", instanceNameToFind) -} - -func PollCheckInstanceConfigDeleted(resp map[string]interface{}, respErr error) PollResult { - if respErr != nil { - return ErrorPollResult(respErr) - } - - // Nested object 404 appears as nil response - if resp == nil { - // Config no longer exists - return SuccessPollResult() - } - - // Read status - status := resp["status"].(string) - if status == "DELETING" { - return PendingStatusPollResult("Still deleting") - } - return ErrorPollResult(fmt.Errorf("Expected PerInstanceConfig to be deleting but status is: %s", status)) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sweeper/gcp_sweeper.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sweeper/gcp_sweeper.go new file mode 100644 index 0000000000..d4252497c0 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/sweeper/gcp_sweeper.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package sweeper + +import ( + "encoding/hex" + "fmt" + "hash/crc32" + "runtime" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// List of prefixes used for test resource names +var testResourcePrefixes = []string{ + // tf-test and tf_test are automatically prepended to resource ids in examples that + // include a "-" or "_" respectively, and they are the preferred prefix for our test resources to use + "tf-test", + "tf_test", + "tfgen", + "gke-us-central1-tf", // composer-created disks which are abandoned by design (https://cloud.google.com/composer/pricing) + "gcs-bucket-tf-test-", // https://github.com/hashicorp/terraform-provider-google/issues/8909 + "df-", // https://github.com/hashicorp/terraform-provider-google/issues/8909 + "resourcegroup-", // https://github.com/hashicorp/terraform-provider-google/issues/8924 + "cluster-", // https://github.com/hashicorp/terraform-provider-google/issues/8924 + "k8s-fw-", // firewall rules are getting created and not cleaned up by k8 resources using this prefix +} + +// SharedConfigForRegion returns a common config setup needed for the sweeper +// functions for a given region +func SharedConfigForRegion(region string) (*transport_tpg.Config, error) { + project := envvar.GetTestProjectFromEnv() + if project == "" { + return nil, fmt.Errorf("set project using any of these env variables %v", envvar.ProjectEnvVars) + } + + if v := transport_tpg.MultiEnvSearch(envvar.CredsEnvVars); v == "" { + return nil, fmt.Errorf("set credentials using any of these env variables %v", envvar.CredsEnvVars) + } + + conf := &transport_tpg.Config{ + Credentials: envvar.GetTestCredsFromEnv(), + Region: region, + Project: project, + } + + transport_tpg.ConfigureBasePaths(conf) + + return conf, nil +} + +func IsSweepableTestResource(resourceName string) bool { + for _, p := range testResourcePrefixes { + if strings.HasPrefix(resourceName, p) { + return true + } + } + return false +} + +func AddTestSweepers(name string, sweeper func(region string) error) { + _, filename, _, _ := runtime.Caller(0) + hash := crc32.NewIEEE() + hash.Write([]byte(filename)) + hashedFilename := hex.EncodeToString(hash.Sum(nil)) + uniqueName := name + "_" + hashedFilename + + resource.AddTestSweepers(uniqueName, &resource.Sweeper{ + Name: name, + F: sweeper, + }) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tags_location_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tags_location_operation.go deleted file mode 100644 index c10f336dfe..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tags_location_operation.go +++ /dev/null @@ -1,76 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "time" -) - -type TagsLocationOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *TagsLocationOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - location := GetLocationFromOpName(w.CommonOperationWaiter.Op.Name) - if location != w.CommonOperationWaiter.Op.Name { - // Found location in Op.Name, fill it in TagsLocationBasePath and rewrite URL - url := fmt.Sprintf("%s%s", strings.Replace(w.Config.TagsLocationBasePath, "{{location}}", location, 1), w.CommonOperationWaiter.Op.Name) - return SendRequest(w.Config, "GET", "", url, w.UserAgent, nil) - } else { - url := fmt.Sprintf("%s%s", w.Config.TagsBasePath, w.CommonOperationWaiter.Op.Name) - return SendRequest(w.Config, "GET", "", url, w.UserAgent, nil) - } -} - -func createTagsLocationWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*TagsLocationOperationWaiter, error) { - w := &TagsLocationOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func TagsLocationOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { - w, err := createTagsLocationWaiter(config, op, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func TagsLocationOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createTagsLocationWaiter(config, op, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func GetLocationFromOpName(opName string) string { - re := regexp.MustCompile("operations/(?:rctb|rdtb)\\.([a-zA-Z0-9-]*)\\.([0-9]*)") - switch { - case re.MatchString(opName): - if res := re.FindStringSubmatch(opName); len(res) == 3 && res[1] != "" { - return res[1] - } - } - return opName -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tags_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tags_operation.go deleted file mode 100644 index 226cc30510..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tags_operation.go +++ /dev/null @@ -1,73 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type TagsOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *TagsOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.TagsBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createTagsWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*TagsOperationWaiter, error) { - w := &TagsOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func TagsOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout time.Duration) error { - w, err := createTagsWaiter(config, op, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func TagsOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createTagsWaiter(config, op, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/test_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/test_utils.go deleted file mode 100644 index 1c9de17f2a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/test_utils.go +++ /dev/null @@ -1,238 +0,0 @@ -package google - -import ( - "context" - "errors" - "fmt" - "math/rand" - "os" - "reflect" - "testing" - "time" - - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" -) - -type ResourceDataMock struct { - FieldsInSchema map[string]interface{} - FieldsWithHasChange []string - id string -} - -func (d *ResourceDataMock) HasChange(key string) bool { - exists := false - for _, val := range d.FieldsWithHasChange { - if key == val { - exists = true - } - } - - return exists -} - -func (d *ResourceDataMock) Get(key string) interface{} { - v, _ := d.GetOk(key) - return v -} - -func (d *ResourceDataMock) GetOk(key string) (interface{}, bool) { - v, ok := d.GetOkExists(key) - if ok && !isEmptyValue(reflect.ValueOf(v)) { - return v, true - } else { - return v, false - } -} - -func (d *ResourceDataMock) GetOkExists(key string) (interface{}, bool) { - for k, v := range d.FieldsInSchema { - if key == k { - return v, true - } - } - - return nil, false -} - -func (d *ResourceDataMock) Set(key string, value interface{}) error { - d.FieldsInSchema[key] = value - return nil -} - -func (d *ResourceDataMock) SetId(v string) { - d.id = v -} - -func (d *ResourceDataMock) Id() string { - return d.id -} - -func (d *ResourceDataMock) GetProviderMeta(dst interface{}) error { - return nil -} - -func (d *ResourceDataMock) Timeout(key string) time.Duration { - return time.Duration(1) -} - -type ResourceDiffMock struct { - Before map[string]interface{} - After map[string]interface{} - Cleared map[string]interface{} - IsForceNew bool -} - -func (d *ResourceDiffMock) GetChange(key string) (interface{}, interface{}) { - return d.Before[key], d.After[key] -} - -func (d *ResourceDiffMock) HasChange(key string) bool { - old, new := d.GetChange(key) - return old != new -} - -func (d *ResourceDiffMock) Get(key string) interface{} { - return d.After[key] -} - -func (d *ResourceDiffMock) GetOk(key string) (interface{}, bool) { - v, ok := d.After[key] - return v, ok -} - -func (d *ResourceDiffMock) Clear(key string) error { - if d.Cleared == nil { - d.Cleared = map[string]interface{}{} - } - d.Cleared[key] = true - return nil -} - -func (d *ResourceDiffMock) ForceNew(key string) error { - d.IsForceNew = true - return nil -} - -func checkDataSourceStateMatchesResourceState(dataSourceName, resourceName string) func(*terraform.State) error { - return checkDataSourceStateMatchesResourceStateWithIgnores(dataSourceName, resourceName, map[string]struct{}{}) -} - -func checkDataSourceStateMatchesResourceStateWithIgnores(dataSourceName, resourceName string, ignoreFields map[string]struct{}) func(*terraform.State) error { - return func(s *terraform.State) error { - ds, ok := s.RootModule().Resources[dataSourceName] - if !ok { - return fmt.Errorf("can't find %s in state", dataSourceName) - } - - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("can't find %s in state", resourceName) - } - - dsAttr := ds.Primary.Attributes - rsAttr := rs.Primary.Attributes - - errMsg := "" - // Data sources are often derived from resources, so iterate over the resource fields to - // make sure all fields are accounted for in the data source. - // If a field exists in the data source but not in the resource, its expected value should - // be checked separately. - for k := range rsAttr { - if _, ok := ignoreFields[k]; ok { - continue - } - if k == "%" { - continue - } - if dsAttr[k] != rsAttr[k] { - // ignore data sources where an empty list is being compared against a null list. - if k[len(k)-1:] == "#" && (dsAttr[k] == "" || dsAttr[k] == "0") && (rsAttr[k] == "" || rsAttr[k] == "0") { - continue - } - errMsg += fmt.Sprintf("%s is %s; want %s\n", k, dsAttr[k], rsAttr[k]) - } - } - - if errMsg != "" { - return errors.New(errMsg) - } - - return nil - } -} - -// General test utils - -// MuxedProviders returns the correct test provider (between the sdk version or the framework version) -func MuxedProviders(testName string) (func() tfprotov5.ProviderServer, error) { - ctx := context.Background() - - providers := []func() tfprotov5.ProviderServer{ - GetSDKProvider(testName).GRPCProvider, // sdk provider - } - - muxServer, err := tf5muxserver.NewMuxServer(ctx, providers...) - - if err != nil { - return nil, err - } - - return muxServer.ProviderServer, nil -} - -func RandString(t *testing.T, length int) string { - if !isVcrEnabled() { - return acctest.RandString(length) - } - envPath := os.Getenv("VCR_PATH") - vcrMode := os.Getenv("VCR_MODE") - s, err := vcrSource(t, envPath, vcrMode) - if err != nil { - // At this point we haven't created any resources, so fail fast - t.Fatal(err) - } - - r := rand.New(s.source) - result := make([]byte, length) - set := "abcdefghijklmnopqrstuvwxyz012346789" - for i := 0; i < length; i++ { - result[i] = set[r.Intn(len(set))] - } - return string(result) -} - -func RandInt(t *testing.T) int { - if !isVcrEnabled() { - return acctest.RandInt() - } - envPath := os.Getenv("VCR_PATH") - vcrMode := os.Getenv("VCR_MODE") - s, err := vcrSource(t, envPath, vcrMode) - if err != nil { - // At this point we haven't created any resources, so fail fast - t.Fatal(err) - } - - return rand.New(s.source).Int() -} - -// ProtoV5ProviderFactories returns a muxed ProviderServer that uses the provider code from this repo (SDK and plugin-framework). -// Used to set ProtoV5ProviderFactories in a resource.TestStep within an acceptance test. -func ProtoV5ProviderFactories(t *testing.T) map[string]func() (tfprotov5.ProviderServer, error) { - return map[string]func() (tfprotov5.ProviderServer, error){ - "google": func() (tfprotov5.ProviderServer, error) { - provider, err := MuxedProviders(t.Name()) - return provider(), err - }, - } -} - -// ProtoV5ProviderBetaFactories returns the same as ProtoV5ProviderFactories only the provider is mapped with -// "google-beta" to ensure that registry examples use `google-beta` if the example is versioned as beta; -// normal beta tests should continue to use ProtoV5ProviderFactories -func ProtoV5ProviderBetaFactories(t *testing.T) map[string]func() (tfprotov5.ProviderServer, error) { - return map[string]func() (tfprotov5.ProviderServer, error){} -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dcl.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/dcl.go similarity index 85% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dcl.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/dcl.go index 4ec72490b7..63ea09947b 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/dcl.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/dcl.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgdclresource import ( dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/expanders.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/expanders.go new file mode 100644 index 0000000000..d235b7c088 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/expanders.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgdclresource + +import ( + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func ExpandStringArray(v interface{}) []string { + arr, ok := v.([]string) + + if ok { + return arr + } + + if arr, ok := v.(*schema.Set); ok { + return tpgresource.ConvertStringSet(arr) + } + + arr = tpgresource.ConvertStringArr(v.([]interface{})) + if arr == nil { + // Send empty array specifically instead of nil + return make([]string, 0) + } + return arr +} + +func ExpandIntegerArray(v interface{}) []int64 { + arr, ok := v.([]int64) + + if ok { + return arr + } + + if arr, ok := v.(*schema.Set); ok { + return convertIntegerSet(arr) + } + + return convertIntegerArr(v.([]interface{})) +} + +func convertIntegerSet(v *schema.Set) []int64 { + return convertIntegerArr(v.List()) +} + +func convertIntegerArr(v []interface{}) []int64 { + var vi []int64 + for _, vs := range v { + vi = append(vi, int64(vs.(int))) + } + return vi +} + +// Returns the DCL representation of a three-state boolean value represented by a string in terraform. +func ExpandEnumBool(v interface{}) *bool { + s, ok := v.(string) + if !ok { + return nil + } + + switch { + case strings.EqualFold(s, "true"): + return boolPtr(true) + case strings.EqualFold(s, "false"): + return boolPtr(false) + default: + return nil + } +} + +// boolPtr returns a pointer to the given boolean. +func boolPtr(b bool) *bool { + return &b +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/flatteners.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/flatteners.go new file mode 100644 index 0000000000..fb7b302b40 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/flatteners.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgdclresource + +// Returns the terraform representation of a three-state boolean value represented by a pointer to bool in DCL. +func FlattenEnumBool(v interface{}) string { + b, ok := v.(*bool) + if !ok || b == nil { + return "" + } + if *b { + return "TRUE" + } + return "FALSE" +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/orgpolicy_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/orgpolicy_utils.go new file mode 100644 index 0000000000..6329a588ab --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/orgpolicy_utils.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgdclresource + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// OrgPolicyPolicy has a custom import method because the parent field needs to allow an additional forward slash +// to represent the type of parent (e.g. projects/{project_id}). +func ResourceOrgPolicyPolicyCustomImport(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^(?P[^/]+/?[^/]*)/policies/(?P[^/]+)", + "^(?P[^/]+/?[^/]*)/(?P[^/]+)", + }, d, config); err != nil { + return err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVarsRecursive(d, config, "{{parent}}/policies/{{name}}", false, 0) + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + // reset name to match the one from resourceOrgPolicyPolicyRead + if err := d.Set("name", id); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(id) + + return nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/tpgtools_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/tpgtools_utils.go new file mode 100644 index 0000000000..331044df02 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgdclresource/tpgtools_utils.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgdclresource + +import ( + "fmt" + "log" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func OldValue(old, new interface{}) interface{} { + return old +} + +func HandleNotFoundDCLError(err error, d *schema.ResourceData, resourceName string) error { + if dcl.IsNotFound(err) { + log.Printf("[WARN] Removing %s because it's gone", resourceName) + // The resource doesn't exist anymore + d.SetId("") + return nil + } + + return errwrap.Wrapf( + fmt.Sprintf("Error when reading or editing %s: {{err}}", resourceName), err) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/datasource_iam_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/datasource_iam_policy.go new file mode 100644 index 0000000000..98a5d2010d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/datasource_iam_policy.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgiamresource + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +var IamPolicyBaseDataSourceSchema = map[string]*schema.Schema{ + "policy_data": { + Type: schema.TypeString, + Computed: true, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, +} + +func DataSourceIamPolicy(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc NewResourceIamUpdaterFunc, options ...func(*IamSettings)) *schema.Resource { + settings := &IamSettings{} + for _, o := range options { + o(settings) + } + + return &schema.Resource{ + Read: DatasourceIamPolicyRead(newUpdaterFunc), + // if non-empty, this will be used to send a deprecation message when the + // datasource is used. + DeprecationMessage: settings.DeprecationMessage, + Schema: tpgresource.MergeSchemas(IamPolicyBaseDataSourceSchema, parentSpecificSchema), + UseJSONNumber: true, + } +} + +func DatasourceIamPolicyRead(newUpdaterFunc NewResourceIamUpdaterFunc) schema.ReadFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + policy, err := iamPolicyReadWithRetry(updater) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Resource %q with IAM Policy", updater.DescribeResource())) + } + + if err := d.Set("etag", policy.Etag); err != nil { + return fmt.Errorf("Error setting etag: %s", err) + } + if err := d.Set("policy_data", marshalIamPolicy(policy)); err != nil { + return fmt.Errorf("Error setting policy_data: %s", err) + } + d.SetId(updater.GetResourceId()) + + return nil + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/iam.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/iam.go index 7b00b11680..31ab798a61 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/iam.go @@ -1,5 +1,7 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 // Utils for modifying IAM policies for resources across GCP -package google +package tpgiamresource import ( "encoding/json" @@ -10,6 +12,9 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/davecgh/go-spew/spew" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -46,27 +51,30 @@ type ( } // Factory for generating ResourceIamUpdater for given ResourceData resource - newResourceIamUpdaterFunc func(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) + NewResourceIamUpdaterFunc func(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (ResourceIamUpdater, error) // Describes how to modify a policy for a given Terraform IAM (_policy/_member/_binding/_audit_config) resource iamPolicyModifyFunc func(p *cloudresourcemanager.Policy) error // Parser for Terraform resource identifier (d.Id) for resource whose IAM policy is being changed - resourceIdParserFunc func(d *schema.ResourceData, config *Config) error + ResourceIdParserFunc func(d *schema.ResourceData, config *transport_tpg.Config) error ) // Locking wrapper around read-only operation with retries. func iamPolicyReadWithRetry(updater ResourceIamUpdater) (*cloudresourcemanager.Policy, error) { mutexKey := updater.GetMutexKey() - mutexKV.Lock(mutexKey) - defer mutexKV.Unlock(mutexKey) + transport_tpg.MutexStore.Lock(mutexKey) + defer transport_tpg.MutexStore.Unlock(mutexKey) log.Printf("[DEBUG] Retrieving policy for %s\n", updater.DescribeResource()) var policy *cloudresourcemanager.Policy - err := retryTime(func() (perr error) { - policy, perr = updater.GetResourceIamPolicy() - return perr - }, 10) + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (perr error) { + policy, perr = updater.GetResourceIamPolicy() + return perr + }, + Timeout: 10 * time.Minute, + }) if err != nil { return nil, err } @@ -77,14 +85,14 @@ func iamPolicyReadWithRetry(updater ResourceIamUpdater) (*cloudresourcemanager.P // Locking wrapper around read-modify-write cycle for IAM policy. func iamPolicyReadModifyWrite(updater ResourceIamUpdater, modify iamPolicyModifyFunc) error { mutexKey := updater.GetMutexKey() - mutexKV.Lock(mutexKey) - defer mutexKV.Unlock(mutexKey) + transport_tpg.MutexStore.Lock(mutexKey) + defer transport_tpg.MutexStore.Unlock(mutexKey) backoff := time.Second for { log.Printf("[DEBUG]: Retrieving policy for %s\n", updater.DescribeResource()) p, err := updater.GetResourceIamPolicy() - if IsGoogleApiErrorWithCode(err, 429) { + if transport_tpg.IsGoogleApiErrorWithCode(err, 429) { log.Printf("[DEBUG] 429 while attempting to read policy for %s, waiting %v before attempting again", updater.DescribeResource(), backoff) time.Sleep(backoff) continue @@ -111,7 +119,7 @@ func iamPolicyReadModifyWrite(updater ResourceIamUpdater, modify iamPolicyModify new_p, err := updater.GetResourceIamPolicy() if err != nil { // Quota for Read is pretty limited, so watch out for running out of quota. - if IsGoogleApiErrorWithCode(err, 429) { + if transport_tpg.IsGoogleApiErrorWithCode(err, 429) { fetchBackoff = fetchBackoff * 2 } else { return err @@ -140,7 +148,7 @@ func iamPolicyReadModifyWrite(updater ResourceIamUpdater, modify iamPolicyModify } break } - if isConflictError(err) { + if tpgresource.IsConflictError(err) { log.Printf("[DEBUG]: Concurrent policy changes, restarting read-modify-write after %s\n", backoff) time.Sleep(backoff) backoff = backoff * 2 @@ -152,7 +160,7 @@ func iamPolicyReadModifyWrite(updater ResourceIamUpdater, modify iamPolicyModify // retry in the case that a service account is not found. This can happen when a service account is deleted // out of band. - if isServiceAccountNotFoundError, _ := iamServiceAccountNotFound(err); isServiceAccountNotFoundError { + if isServiceAccountNotFoundError, _ := transport_tpg.IamServiceAccountNotFound(err); isServiceAccountNotFoundError { // calling a retryable function within a retry loop is not // strictly the _best_ idea, but this error only happens in // high-traffic projects anyways @@ -325,7 +333,7 @@ func listFromIamBindingMap(bm map[iamBindingKey]map[string]struct{}) []*cloudres } b := &cloudresourcemanager.Binding{ Role: key.Role, - Members: stringSliceFromGolangSet(members), + Members: tpgresource.StringSliceFromGolangSet(members), } if !key.Condition.Empty() { b.Condition = &cloudresourcemanager.Expr{ @@ -386,7 +394,7 @@ func listFromIamAuditConfigMap(acMap map[string]map[string]map[string]struct{}) ForceSendFields: []string{"exemptedMembers"}, } if len(memberSet) > 0 { - alc.ExemptedMembers = stringSliceFromGolangSet(memberSet) + alc.ExemptedMembers = tpgresource.StringSliceFromGolangSet(memberSet) } logConfigs = append(logConfigs, alc) } @@ -430,24 +438,24 @@ func compareIamPolicies(a, b *cloudresourcemanager.Policy) bool { log.Printf("[DEBUG] policies version differ: %q vs %q", a.Version, b.Version) return false } - if !compareBindings(a.Bindings, b.Bindings) { + if !CompareBindings(a.Bindings, b.Bindings) { log.Printf("[DEBUG] policies bindings differ: %#v vs %#v", a.Bindings, b.Bindings) return false } - if !compareAuditConfigs(a.AuditConfigs, b.AuditConfigs) { + if !CompareAuditConfigs(a.AuditConfigs, b.AuditConfigs) { log.Printf("[DEBUG] policies audit configs differ: %#v vs %#v", a.AuditConfigs, b.AuditConfigs) return false } return true } -func compareBindings(a, b []*cloudresourcemanager.Binding) bool { +func CompareBindings(a, b []*cloudresourcemanager.Binding) bool { aMap := createIamBindingsMap(a) bMap := createIamBindingsMap(b) return reflect.DeepEqual(aMap, bMap) } -func compareAuditConfigs(a, b []*cloudresourcemanager.AuditConfig) bool { +func CompareAuditConfigs(a, b []*cloudresourcemanager.AuditConfig) bool { aMap := createIamAuditConfigsMap(a) bMap := createIamAuditConfigsMap(b) return reflect.DeepEqual(aMap, bMap) @@ -455,6 +463,15 @@ func compareAuditConfigs(a, b []*cloudresourcemanager.AuditConfig) bool { type IamSettings struct { DeprecationMessage string + EnableBatching bool +} + +func NewIamSettings(options ...func(*IamSettings)) *IamSettings { + settings := &IamSettings{} + for _, o := range options { + o(settings) + } + return settings } func IamWithDeprecationMessage(message string) func(s *IamSettings) { @@ -466,3 +483,78 @@ func IamWithDeprecationMessage(message string) func(s *IamSettings) { func IamWithGAResourceDeprecation() func(s *IamSettings) { return IamWithDeprecationMessage("This resource has been deprecated in the google (GA) provider, and will only be available in the google-beta provider in a future release.") } + +func IamWithBatching(s *IamSettings) { + s.EnableBatching = true +} + +// Util to deref and print auditConfigs +func DebugPrintAuditConfigs(bs []*cloudresourcemanager.AuditConfig) string { + v, _ := json.MarshalIndent(bs, "", "\t") + return string(v) +} + +// Util to deref and print bindings +func DebugPrintBindings(bs []*cloudresourcemanager.Binding) string { + v, _ := json.MarshalIndent(bs, "", "\t") + return string(v) +} + +// Returns a map representing iam bindings that are in the first map but not the second. +func missingBindingsMap(aMap, bMap map[iamBindingKey]map[string]struct{}) map[iamBindingKey]map[string]struct{} { + results := make(map[iamBindingKey]map[string]struct{}) + for key, aMembers := range aMap { + if bMembers, ok := bMap[key]; ok { + // The key is in both maps. + resultMembers := make(map[string]struct{}) + + for aMember := range aMembers { + if _, ok := bMembers[aMember]; !ok { + // The member is in a but not in b. + resultMembers[aMember] = struct{}{} + } + } + for bMember := range bMembers { + if _, ok := aMembers[bMember]; !ok { + // The member is in b but not in a. + resultMembers[bMember] = struct{}{} + } + } + + if len(resultMembers) > 0 { + results[key] = resultMembers + } + } else { + // The key is in map a but not map b. + results[key] = aMembers + } + } + + for key, bMembers := range bMap { + if _, ok := aMap[key]; !ok { + // The key is in map b but not map a. + results[key] = bMembers + } + } + + return results +} + +// Returns the bindings that are in the first set of bindings but not the second. +func MissingBindings(a, b []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { + aMap := createIamBindingsMap(a) + bMap := createIamBindingsMap(b) + + var results []*cloudresourcemanager.Binding + for key, membersSet := range missingBindingsMap(aMap, bMap) { + members := make([]string, 0, len(membersSet)) + for member := range membersSet { + members = append(members, member) + } + results = append(results, &cloudresourcemanager.Binding{ + Role: key.Role, + Members: members, + }) + } + return results +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_batching.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/iam_batching.go similarity index 78% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_batching.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/iam_batching.go index e5ecbfe552..bb9d8c6340 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/iam_batching.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/iam_batching.go @@ -1,22 +1,23 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgiamresource import ( "fmt" - "google.golang.org/api/cloudresourcemanager/v1" "time" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" ) const ( batchKeyTmplModifyIamPolicy = "%s modifyIamPolicy" - - IamBatchingEnabled = true - IamBatchingDisabled = false ) -func BatchRequestModifyIamPolicy(updater ResourceIamUpdater, modify iamPolicyModifyFunc, config *Config, reqDesc string) error { +func BatchRequestModifyIamPolicy(updater ResourceIamUpdater, modify iamPolicyModifyFunc, config *transport_tpg.Config, reqDesc string) error { batchKey := fmt.Sprintf(batchKeyTmplModifyIamPolicy, updater.GetMutexKey()) - request := &BatchRequest{ + request := &transport_tpg.BatchRequest{ ResourceName: updater.GetResourceId(), Body: []iamPolicyModifyFunc{modify}, CombineF: combineBatchIamPolicyModifiers, @@ -24,7 +25,7 @@ func BatchRequestModifyIamPolicy(updater ResourceIamUpdater, modify iamPolicyMod DebugId: reqDesc, } - _, err := config.requestBatcherIam.SendRequestWithTimeout(batchKey, request, time.Minute*30) + _, err := config.RequestBatcherIam.SendRequestWithTimeout(batchKey, request, time.Minute*30) return err } @@ -42,7 +43,7 @@ func combineBatchIamPolicyModifiers(currV interface{}, toAddV interface{}) (inte return append(currModifiers, newModifiers...), nil } -func sendBatchModifyIamPolicy(updater ResourceIamUpdater) BatcherSendFunc { +func sendBatchModifyIamPolicy(updater ResourceIamUpdater) transport_tpg.BatcherSendFunc { return func(resourceName string, body interface{}) (interface{}, error) { modifiers, ok := body.([]iamPolicyModifyFunc) if !ok { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_audit_config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_audit_config.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_audit_config.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_audit_config.go index b02d594248..7fd399c417 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_audit_config.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_audit_config.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgiamresource import ( "errors" @@ -6,6 +8,9 @@ import ( "log" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/cloudresourcemanager/v1" ) @@ -43,17 +48,15 @@ var iamAuditConfigSchema = map[string]*schema.Schema{ }, } -func ResourceIamAuditConfig(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) *schema.Resource { - return ResourceIamAuditConfigWithBatching(parentSpecificSchema, newUpdaterFunc, resourceIdParser, IamBatchingDisabled) -} +func ResourceIamAuditConfig(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc NewResourceIamUpdaterFunc, resourceIdParser ResourceIdParserFunc, options ...func(*IamSettings)) *schema.Resource { + settings := NewIamSettings(options...) -func ResourceIamAuditConfigWithBatching(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, enableBatching bool) *schema.Resource { return &schema.Resource{ - Create: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, enableBatching), + Create: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, settings.EnableBatching), Read: resourceIamAuditConfigRead(newUpdaterFunc), - Update: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, enableBatching), - Delete: resourceIamAuditConfigDelete(newUpdaterFunc, enableBatching), - Schema: mergeSchemas(iamAuditConfigSchema, parentSpecificSchema), + Update: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, settings.EnableBatching), + Delete: resourceIamAuditConfigDelete(newUpdaterFunc, settings.EnableBatching), + Schema: tpgresource.MergeSchemas(iamAuditConfigSchema, parentSpecificSchema), Importer: &schema.ResourceImporter{ State: iamAuditConfigImport(resourceIdParser), }, @@ -61,9 +64,9 @@ func ResourceIamAuditConfigWithBatching(parentSpecificSchema map[string]*schema. } } -func resourceIamAuditConfigRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { +func resourceIamAuditConfigRead(newUpdaterFunc NewResourceIamUpdaterFunc) schema.ReadFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { return err @@ -72,7 +75,7 @@ func resourceIamAuditConfigRead(newUpdaterFunc newResourceIamUpdaterFunc) schema eAuditConfig := getResourceIamAuditConfig(d) p, err := iamPolicyReadWithRetry(updater) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("AuditConfig for %s on %q", eAuditConfig.Service, updater.DescribeResource())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AuditConfig for %s on %q", eAuditConfig.Service, updater.DescribeResource())) } log.Printf("[DEBUG]: Retrieved policy for %s: %+v", updater.DescribeResource(), p) @@ -104,12 +107,12 @@ func resourceIamAuditConfigRead(newUpdaterFunc newResourceIamUpdaterFunc) schema } } -func iamAuditConfigImport(resourceIdParser resourceIdParserFunc) schema.StateFunc { +func iamAuditConfigImport(resourceIdParser ResourceIdParserFunc) schema.StateFunc { return func(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { if resourceIdParser == nil { return nil, errors.New("Import not supported for this IAM resource.") } - config := m.(*Config) + config := m.(*transport_tpg.Config) s := strings.Fields(d.Id()) if len(s) != 2 { d.SetId("") @@ -117,7 +120,7 @@ func iamAuditConfigImport(resourceIdParser resourceIdParserFunc) schema.StateFun } id, service := s[0], s[1] - // Set the ID only to the first part so all IAM types can share the same resourceIdParserFunc. + // Set the ID only to the first part so all IAM types can share the same ResourceIdParserFunc. d.SetId(id) if err := d.Set("service", service); err != nil { return nil, fmt.Errorf("Error setting service: %s", err) @@ -128,15 +131,15 @@ func iamAuditConfigImport(resourceIdParser resourceIdParserFunc) schema.StateFun } // Set the ID again so that the ID matches the ID it would have if it had been created via TF. - // Use the current ID in case it changed in the resourceIdParserFunc. + // Use the current ID in case it changed in the ResourceIdParserFunc. d.SetId(d.Id() + "/audit_config/" + service) return []*schema.ResourceData{d}, nil } } -func resourceIamAuditConfigCreateUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) func(*schema.ResourceData, interface{}) error { +func resourceIamAuditConfigCreateUpdate(newUpdaterFunc NewResourceIamUpdaterFunc, enableBatching bool) func(*schema.ResourceData, interface{}) error { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -163,9 +166,9 @@ func resourceIamAuditConfigCreateUpdate(newUpdaterFunc newResourceIamUpdaterFunc } } -func resourceIamAuditConfigDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) schema.DeleteFunc { +func resourceIamAuditConfigDelete(newUpdaterFunc NewResourceIamUpdaterFunc, enableBatching bool) schema.DeleteFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -184,7 +187,7 @@ func resourceIamAuditConfigDelete(newUpdaterFunc newResourceIamUpdaterFunc, enab err = iamPolicyReadModifyWrite(updater, modifyF) } if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Resource %s with IAM audit config %q", updater.DescribeResource(), d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Resource %s with IAM audit config %q", updater.DescribeResource(), d.Id())) } return resourceIamAuditConfigRead(newUpdaterFunc)(d, meta) @@ -198,7 +201,7 @@ func getResourceIamAuditConfig(d *schema.ResourceData) *cloudresourcemanager.Aud logConfig := y.(map[string]interface{}) auditLogConfigs[x] = &cloudresourcemanager.AuditLogConfig{ LogType: logConfig["log_type"].(string), - ExemptedMembers: convertStringArr(logConfig["exempted_members"].(*schema.Set).List()), + ExemptedMembers: tpgresource.ConvertStringArr(logConfig["exempted_members"].(*schema.Set).List()), } } return &cloudresourcemanager.AuditConfig{ @@ -214,7 +217,7 @@ func flattenAuditLogConfigs(configs []*cloudresourcemanager.AuditLogConfig) *sch for _, conf := range configs { res.Add(map[string]interface{}{ "log_type": conf.LogType, - "exempted_members": schema.NewSet(schema.HashSchema(exemptedMemberSchema), convertStringArrToInterface(conf.ExemptedMembers)), + "exempted_members": schema.NewSet(schema.HashSchema(exemptedMemberSchema), tpgresource.ConvertStringArrToInterface(conf.ExemptedMembers)), }) } return res diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_binding.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_binding.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_binding.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_binding.go index 6aef6580e3..28b26ade58 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_binding.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_binding.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgiamresource import ( "errors" @@ -6,6 +8,9 @@ import ( "log" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/davecgh/go-spew/spew" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/cloudresourcemanager/v1" @@ -22,7 +27,7 @@ var iamBindingSchema = map[string]*schema.Schema{ Required: true, Elem: &schema.Schema{ Type: schema.TypeString, - DiffSuppressFunc: caseDiffSuppress, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, ValidateFunc: validateIAMMember, }, Set: func(v interface{}) int { @@ -60,28 +65,20 @@ var iamBindingSchema = map[string]*schema.Schema{ }, } -func ResourceIamBinding(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, options ...func(*IamSettings)) *schema.Resource { - return ResourceIamBindingWithBatching(parentSpecificSchema, newUpdaterFunc, resourceIdParser, IamBatchingDisabled, options...) -} - -// Resource that batches requests to the same IAM policy across multiple IAM fine-grained resources -func ResourceIamBindingWithBatching(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, enableBatching bool, options ...func(*IamSettings)) *schema.Resource { - settings := &IamSettings{} - for _, o := range options { - o(settings) - } +func ResourceIamBinding(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc NewResourceIamUpdaterFunc, resourceIdParser ResourceIdParserFunc, options ...func(*IamSettings)) *schema.Resource { + settings := NewIamSettings(options...) return &schema.Resource{ - Create: resourceIamBindingCreateUpdate(newUpdaterFunc, enableBatching), + Create: resourceIamBindingCreateUpdate(newUpdaterFunc, settings.EnableBatching), Read: resourceIamBindingRead(newUpdaterFunc), - Update: resourceIamBindingCreateUpdate(newUpdaterFunc, enableBatching), - Delete: resourceIamBindingDelete(newUpdaterFunc, enableBatching), + Update: resourceIamBindingCreateUpdate(newUpdaterFunc, settings.EnableBatching), + Delete: resourceIamBindingDelete(newUpdaterFunc, settings.EnableBatching), // if non-empty, this will be used to send a deprecation message when the // resource is used. DeprecationMessage: settings.DeprecationMessage, - Schema: mergeSchemas(iamBindingSchema, parentSpecificSchema), + Schema: tpgresource.MergeSchemas(iamBindingSchema, parentSpecificSchema), Importer: &schema.ResourceImporter{ State: iamBindingImport(newUpdaterFunc, resourceIdParser), }, @@ -89,9 +86,9 @@ func ResourceIamBindingWithBatching(parentSpecificSchema map[string]*schema.Sche } } -func resourceIamBindingCreateUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) func(*schema.ResourceData, interface{}) error { +func resourceIamBindingCreateUpdate(newUpdaterFunc NewResourceIamUpdaterFunc, enableBatching bool) func(*schema.ResourceData, interface{}) error { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { return err @@ -123,9 +120,9 @@ func resourceIamBindingCreateUpdate(newUpdaterFunc newResourceIamUpdaterFunc, en } } -func resourceIamBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { +func resourceIamBindingRead(newUpdaterFunc NewResourceIamUpdaterFunc) schema.ReadFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -136,7 +133,7 @@ func resourceIamBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.Rea eCondition := conditionKeyFromCondition(eBinding.Condition) p, err := iamPolicyReadWithRetry(updater) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Resource %q with IAM Binding (Role %q)", updater.DescribeResource(), eBinding.Role)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Resource %q with IAM Binding (Role %q)", updater.DescribeResource(), eBinding.Role)) } log.Print(spew.Sprintf("[DEBUG] Retrieved policy for %s: %#v", updater.DescribeResource(), p)) log.Printf("[DEBUG] Looking for binding with role %q and condition %#v", eBinding.Role, eCondition) @@ -166,7 +163,7 @@ func resourceIamBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.Rea if err := d.Set("members", binding.Members); err != nil { return fmt.Errorf("Error setting members: %s", err) } - if err := d.Set("condition", flattenIamCondition(binding.Condition)); err != nil { + if err := d.Set("condition", FlattenIamCondition(binding.Condition)); err != nil { return fmt.Errorf("Error setting condition: %s", err) } } @@ -177,12 +174,12 @@ func resourceIamBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.Rea } } -func iamBindingImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) schema.StateFunc { +func iamBindingImport(newUpdaterFunc NewResourceIamUpdaterFunc, resourceIdParser ResourceIdParserFunc) schema.StateFunc { return func(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { if resourceIdParser == nil { return nil, errors.New("Import not supported for this IAM resource.") } - config := m.(*Config) + config := m.(*transport_tpg.Config) s := strings.Fields(d.Id()) var id, role string if len(s) < 2 { @@ -198,7 +195,7 @@ func iamBindingImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser id, role, conditionTitle = s[0], s[1], strings.Join(s[2:], " ") } - // Set the ID only to the first part so all IAM types can share the same resourceIdParserFunc. + // Set the ID only to the first part so all IAM types can share the same ResourceIdParserFunc. d.SetId(id) if err := d.Set("role", role); err != nil { return nil, fmt.Errorf("Error setting role: %s", err) @@ -209,7 +206,7 @@ func iamBindingImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser } // Set the ID again so that the ID matches the ID it would have if it had been created via TF. - // Use the current ID in case it changed in the resourceIdParserFunc. + // Use the current ID in case it changed in the ResourceIdParserFunc. d.SetId(d.Id() + "/" + role) // Since condition titles can have any character in them, we can't separate them from any other @@ -235,7 +232,7 @@ func iamBindingImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser } } if binding != nil { - if err := d.Set("condition", flattenIamCondition(binding.Condition)); err != nil { + if err := d.Set("condition", FlattenIamCondition(binding.Condition)); err != nil { return nil, fmt.Errorf("Error setting condition: %s", err) } if k := conditionKeyFromCondition(binding.Condition); !k.Empty() { @@ -257,9 +254,9 @@ func iamBindingImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser } } -func resourceIamBindingDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) schema.DeleteFunc { +func resourceIamBindingDelete(newUpdaterFunc NewResourceIamUpdaterFunc, enableBatching bool) schema.DeleteFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -279,7 +276,7 @@ func resourceIamBindingDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBa err = iamPolicyReadModifyWrite(updater, modifyF) } if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Resource %q for IAM binding with role %q", updater.DescribeResource(), binding.Role)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Resource %q for IAM binding with role %q", updater.DescribeResource(), binding.Role)) } return resourceIamBindingRead(newUpdaterFunc)(d, meta) @@ -289,16 +286,16 @@ func resourceIamBindingDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBa func getResourceIamBinding(d *schema.ResourceData) *cloudresourcemanager.Binding { members := d.Get("members").(*schema.Set).List() b := &cloudresourcemanager.Binding{ - Members: convertStringArr(members), + Members: tpgresource.ConvertStringArr(members), Role: d.Get("role").(string), } - if c := expandIamCondition(d.Get("condition")); c != nil { + if c := ExpandIamCondition(d.Get("condition")); c != nil { b.Condition = c } return b } -func expandIamCondition(v interface{}) *cloudresourcemanager.Expr { +func ExpandIamCondition(v interface{}) *cloudresourcemanager.Expr { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil @@ -312,7 +309,7 @@ func expandIamCondition(v interface{}) *cloudresourcemanager.Expr { } } -func flattenIamCondition(condition *cloudresourcemanager.Expr) []map[string]interface{} { +func FlattenIamCondition(condition *cloudresourcemanager.Expr) []map[string]interface{} { if conditionKeyFromCondition(condition).Empty() { return nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_member.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_member.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_member.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_member.go index d722c6b16e..ecfe91c6d2 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_member.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_member.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgiamresource import ( "errors" @@ -7,6 +9,9 @@ import ( "regexp" "strings" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/davecgh/go-spew/spew" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/api/cloudresourcemanager/v1" @@ -17,7 +22,7 @@ func iamMemberCaseDiffSuppress(k, old, new string, d *schema.ResourceData) bool if isCaseSensitive { return old == new } - return caseDiffSuppress(k, old, new, d) + return tpgresource.CaseDiffSuppress(k, old, new, d) } func validateIAMMember(i interface{}, k string) ([]string, []error) { @@ -84,12 +89,12 @@ var IamMemberBaseSchema = map[string]*schema.Schema{ }, } -func iamMemberImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) schema.StateFunc { +func iamMemberImport(newUpdaterFunc NewResourceIamUpdaterFunc, resourceIdParser ResourceIdParserFunc) schema.StateFunc { return func(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { if resourceIdParser == nil { return nil, errors.New("Import not supported for this IAM resource.") } - config := m.(*Config) + config := m.(*transport_tpg.Config) s := strings.Fields(d.Id()) var id, role, member string if len(s) < 3 { @@ -105,7 +110,7 @@ func iamMemberImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser id, role, member, conditionTitle = s[0], s[1], s[2], strings.Join(s[3:], " ") } - // Set the ID only to the first part so all IAM types can share the same resourceIdParserFunc. + // Set the ID only to the first part so all IAM types can share the same ResourceIdParserFunc. d.SetId(id) if err := d.Set("role", role); err != nil { return nil, fmt.Errorf("Error setting role: %s", err) @@ -120,7 +125,7 @@ func iamMemberImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser } // Set the ID again so that the ID matches the ID it would have if it had been created via TF. - // Use the current ID in case it changed in the resourceIdParserFunc. + // Use the current ID in case it changed in the ResourceIdParserFunc. d.SetId(d.Id() + "/" + role + "/" + normalizeIamMemberCasing(member)) // Read the upstream policy so we can set the full condition. @@ -155,7 +160,7 @@ func iamMemberImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser return nil, fmt.Errorf("Cannot find binding for %q with role %q, member %q, and condition title %q", updater.DescribeResource(), role, member, conditionTitle) } - if err := d.Set("condition", flattenIamCondition(binding.Condition)); err != nil { + if err := d.Set("condition", FlattenIamCondition(binding.Condition)); err != nil { return nil, fmt.Errorf("Error setting condition: %s", err) } if k := conditionKeyFromCondition(binding.Condition); !k.Empty() { @@ -166,26 +171,19 @@ func iamMemberImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser } } -func ResourceIamMember(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, options ...func(*IamSettings)) *schema.Resource { - return ResourceIamMemberWithBatching(parentSpecificSchema, newUpdaterFunc, resourceIdParser, IamBatchingDisabled, options...) -} - -func ResourceIamMemberWithBatching(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, enableBatching bool, options ...func(*IamSettings)) *schema.Resource { - settings := &IamSettings{} - for _, o := range options { - o(settings) - } +func ResourceIamMember(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc NewResourceIamUpdaterFunc, resourceIdParser ResourceIdParserFunc, options ...func(*IamSettings)) *schema.Resource { + settings := NewIamSettings(options...) return &schema.Resource{ - Create: resourceIamMemberCreate(newUpdaterFunc, enableBatching), + Create: resourceIamMemberCreate(newUpdaterFunc, settings.EnableBatching), Read: resourceIamMemberRead(newUpdaterFunc), - Delete: resourceIamMemberDelete(newUpdaterFunc, enableBatching), + Delete: resourceIamMemberDelete(newUpdaterFunc, settings.EnableBatching), // if non-empty, this will be used to send a deprecation message when the // resource is used. DeprecationMessage: settings.DeprecationMessage, - Schema: mergeSchemas(IamMemberBaseSchema, parentSpecificSchema), + Schema: tpgresource.MergeSchemas(IamMemberBaseSchema, parentSpecificSchema), Importer: &schema.ResourceImporter{ State: iamMemberImport(newUpdaterFunc, resourceIdParser), }, @@ -198,15 +196,15 @@ func getResourceIamMember(d *schema.ResourceData) *cloudresourcemanager.Binding Members: []string{d.Get("member").(string)}, Role: d.Get("role").(string), } - if c := expandIamCondition(d.Get("condition")); c != nil { + if c := ExpandIamCondition(d.Get("condition")); c != nil { b.Condition = c } return b } -func resourceIamMemberCreate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) schema.CreateFunc { +func resourceIamMemberCreate(newUpdaterFunc NewResourceIamUpdaterFunc, enableBatching bool) schema.CreateFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -237,9 +235,9 @@ func resourceIamMemberCreate(newUpdaterFunc newResourceIamUpdaterFunc, enableBat } } -func resourceIamMemberRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { +func resourceIamMemberRead(newUpdaterFunc NewResourceIamUpdaterFunc) schema.ReadFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -250,7 +248,7 @@ func resourceIamMemberRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.Read eCondition := conditionKeyFromCondition(eMember.Condition) p, err := iamPolicyReadWithRetry(updater) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Resource %q with IAM Member: Role %q Member %q", updater.DescribeResource(), eMember.Role, eMember.Members[0])) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Resource %q with IAM Member: Role %q Member %q", updater.DescribeResource(), eMember.Role, eMember.Members[0])) } log.Print(spew.Sprintf("[DEBUG]: Retrieved policy for %s: %#v\n", updater.DescribeResource(), p)) log.Printf("[DEBUG]: Looking for binding with role %q and condition %#v", eMember.Role, eCondition) @@ -292,16 +290,16 @@ func resourceIamMemberRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.Read if err := d.Set("role", binding.Role); err != nil { return fmt.Errorf("Error setting role: %s", err) } - if err := d.Set("condition", flattenIamCondition(binding.Condition)); err != nil { + if err := d.Set("condition", FlattenIamCondition(binding.Condition)); err != nil { return fmt.Errorf("Error setting condition: %s", err) } return nil } } -func resourceIamMemberDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) schema.DeleteFunc { +func resourceIamMemberDelete(newUpdaterFunc NewResourceIamUpdaterFunc, enableBatching bool) schema.DeleteFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -321,7 +319,7 @@ func resourceIamMemberDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBat err = iamPolicyReadModifyWrite(updater, modifyF) } if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Resource %s for IAM Member (role %q, %q)", updater.GetResourceId(), memberBind.Members[0], memberBind.Role)) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Resource %s for IAM Member (role %q, %q)", updater.GetResourceId(), memberBind.Members[0], memberBind.Role)) } return resourceIamMemberRead(newUpdaterFunc)(d, meta) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_policy.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_policy.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_policy.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_policy.go index 5fb385e788..71d5756908 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/resource_iam_policy.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgiamresource/resource_iam_policy.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgiamresource import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -7,6 +9,9 @@ import ( "errors" "fmt" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/cloudresourcemanager/v1" ) @@ -23,12 +28,12 @@ var IamPolicyBaseSchema = map[string]*schema.Schema{ }, } -func iamPolicyImport(resourceIdParser resourceIdParserFunc) schema.StateFunc { +func iamPolicyImport(resourceIdParser ResourceIdParserFunc) schema.StateFunc { return func(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { if resourceIdParser == nil { return nil, errors.New("Import not supported for this IAM resource.") } - config := m.(*Config) + config := m.(*transport_tpg.Config) err := resourceIdParser(d, config) if err != nil { return nil, err @@ -37,11 +42,8 @@ func iamPolicyImport(resourceIdParser resourceIdParserFunc) schema.StateFunc { } } -func ResourceIamPolicy(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, options ...func(*IamSettings)) *schema.Resource { - settings := &IamSettings{} - for _, o := range options { - o(settings) - } +func ResourceIamPolicy(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc NewResourceIamUpdaterFunc, resourceIdParser ResourceIdParserFunc, options ...func(*IamSettings)) *schema.Resource { + settings := NewIamSettings(options...) return &schema.Resource{ Create: ResourceIamPolicyCreate(newUpdaterFunc), @@ -53,7 +55,7 @@ func ResourceIamPolicy(parentSpecificSchema map[string]*schema.Schema, newUpdate // resource is used. DeprecationMessage: settings.DeprecationMessage, - Schema: mergeSchemas(IamPolicyBaseSchema, parentSpecificSchema), + Schema: tpgresource.MergeSchemas(IamPolicyBaseSchema, parentSpecificSchema), Importer: &schema.ResourceImporter{ State: iamPolicyImport(resourceIdParser), }, @@ -61,9 +63,9 @@ func ResourceIamPolicy(parentSpecificSchema map[string]*schema.Schema, newUpdate } } -func ResourceIamPolicyCreate(newUpdaterFunc newResourceIamUpdaterFunc) schema.CreateFunc { +func ResourceIamPolicyCreate(newUpdaterFunc NewResourceIamUpdaterFunc) schema.CreateFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -79,9 +81,9 @@ func ResourceIamPolicyCreate(newUpdaterFunc newResourceIamUpdaterFunc) schema.Cr } } -func ResourceIamPolicyRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { +func ResourceIamPolicyRead(newUpdaterFunc NewResourceIamUpdaterFunc) schema.ReadFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -90,7 +92,7 @@ func ResourceIamPolicyRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.Read policy, err := iamPolicyReadWithRetry(updater) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Resource %q with IAM Policy", updater.DescribeResource())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Resource %q with IAM Policy", updater.DescribeResource())) } if err := d.Set("etag", policy.Etag); err != nil { @@ -104,9 +106,9 @@ func ResourceIamPolicyRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.Read } } -func ResourceIamPolicyUpdate(newUpdaterFunc newResourceIamUpdaterFunc) schema.UpdateFunc { +func ResourceIamPolicyUpdate(newUpdaterFunc NewResourceIamUpdaterFunc) schema.UpdateFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { @@ -123,9 +125,9 @@ func ResourceIamPolicyUpdate(newUpdaterFunc newResourceIamUpdaterFunc) schema.Up } } -func ResourceIamPolicyDelete(newUpdaterFunc newResourceIamUpdaterFunc) schema.DeleteFunc { +func ResourceIamPolicyDelete(newUpdaterFunc NewResourceIamUpdaterFunc) schema.DeleteFunc { return func(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) + config := meta.(*transport_tpg.Config) updater, err := newUpdaterFunc(d, config) if err != nil { diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/common_diff_suppress.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/common_diff_suppress.go new file mode 100644 index 0000000000..8f3bf07a9a --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/common_diff_suppress.go @@ -0,0 +1,270 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +// Contains common diff suppress functions. + +package tpgresource + +import ( + "crypto/sha256" + "encoding/hex" + "log" + "net" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func OptionalPrefixSuppress(prefix string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + return prefix+old == new || prefix+new == old + } +} + +func IgnoreMissingKeyInMap(key string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + log.Printf("[DEBUG] - suppressing diff %q with old %q, new %q", k, old, new) + if strings.HasSuffix(k, ".%") { + oldNum, err := strconv.Atoi(old) + if err != nil { + log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", old) + return false + } + newNum, err := strconv.Atoi(new) + if err != nil { + log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", new) + return false + } + return oldNum+1 == newNum + } else if strings.HasSuffix(k, "."+key) { + return old == "" + } + return false + } +} + +func OptionalSurroundingSpacesSuppress(k, old, new string, d *schema.ResourceData) bool { + return strings.TrimSpace(old) == strings.TrimSpace(new) +} + +func EmptyOrDefaultStringSuppress(defaultVal string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) + } +} + +func IpCidrRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // The range may be a: + // A) single IP address (e.g. 10.2.3.4) + // B) CIDR format string (e.g. 10.1.2.0/24) + // C) netmask (e.g. /24) + // + // For A) and B), no diff to suppress, they have to match completely. + // For C), The API picks a network IP address and this creates a diff of the form: + // network_interface.0.alias_ip_range.0.ip_cidr_range: "10.128.1.0/24" => "/24" + // We should only compare the mask portion for this case. + if len(new) > 0 && new[0] == '/' { + oldNetmaskStartPos := strings.LastIndex(old, "/") + + if oldNetmaskStartPos != -1 { + oldNetmask := old[strings.LastIndex(old, "/"):] + if oldNetmask == new { + return true + } + } + } + + return false +} + +// Sha256DiffSuppress +// if old is the hex-encoded sha256 sum of new, treat them as equal +func Sha256DiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return hex.EncodeToString(sha256.New().Sum([]byte(old))) == new +} + +func CaseDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return strings.ToUpper(old) == strings.ToUpper(new) +} + +// Port range '80' and '80-80' is equivalent. +// `old` is read from the server and always has the full range format (e.g. '80-80', '1024-2048'). +// `new` can be either a single port or a port range. +func PortRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return old == new+"-"+new +} + +// Single-digit hour is equivalent to hour with leading zero e.g. suppress diff 1:00 => 01:00. +// Assume either value could be in either format. +func Rfc3339TimeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { + return true + } + return false +} + +func EmptyOrUnsetBlockDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + o, n := d.GetChange(strings.TrimSuffix(k, ".#")) + return EmptyOrUnsetBlockDiffSuppressLogic(k, old, new, o, n) +} + +// The core logic for EmptyOrUnsetBlockDiffSuppress, in a format that is more conducive +// to unit testing. +func EmptyOrUnsetBlockDiffSuppressLogic(k, old, new string, o, n interface{}) bool { + if !strings.HasSuffix(k, ".#") { + return false + } + var l []interface{} + if old == "0" && new == "1" { + l = n.([]interface{}) + } else if new == "0" && old == "1" { + l = o.([]interface{}) + } else { + // we don't have one set and one unset, so don't suppress the diff + return false + } + + contents, ok := l[0].(map[string]interface{}) + if !ok { + return false + } + for _, v := range contents { + if !IsEmptyValue(reflect.ValueOf(v)) { + return false + } + } + return true +} + +// Suppress diffs for values that are equivalent except for their use of the words "location" +// compared to "region" or "zone" +func LocationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return LocationDiffSuppressHelper(old, new) || LocationDiffSuppressHelper(new, old) +} + +func LocationDiffSuppressHelper(a, b string) bool { + return strings.Replace(a, "/locations/", "/regions/", 1) == b || + strings.Replace(a, "/locations/", "/zones/", 1) == b +} + +// For managed SSL certs, if new is an absolute FQDN (trailing '.') but old isn't, treat them as equals. +func AbsoluteDomainSuppress(k, old, new string, _ *schema.ResourceData) bool { + if strings.HasPrefix(k, "managed.0.domains.") { + return old == strings.TrimRight(new, ".") || new == strings.TrimRight(old, ".") + } + return false +} + +func TimestampDiffSuppress(format string) schema.SchemaDiffSuppressFunc { + return func(_, old, new string, _ *schema.ResourceData) bool { + oldT, err := time.Parse(format, old) + if err != nil { + return false + } + + newT, err := time.Parse(format, new) + if err != nil { + return false + } + + return oldT == newT + } +} + +// suppress diff when saved is Ipv4 format while new is required a reference +// this happens for an internal ip for Private Services Connect +func InternalIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return (net.ParseIP(old) != nil) && (net.ParseIP(new) == nil) +} + +// Suppress diffs for duration format. ex "60.0s" and "60s" same +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration +func DurationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + oDuration, err := time.ParseDuration(old) + if err != nil { + return false + } + nDuration, err := time.ParseDuration(new) + if err != nil { + return false + } + return oDuration == nDuration +} + +// Use this method when the field accepts either an IP address or a +// self_link referencing a resource (such as google_compute_route's +// next_hop_ilb) +func CompareIpAddressOrSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { + // if we can parse `new` as an IP address, then compare as strings + if net.ParseIP(new) != nil { + return new == old + } + + // otherwise compare as self links + return CompareSelfLinkOrResourceName("", old, new, nil) +} + +// Use this method when subnet is optioanl and auto_create_subnetworks = true +// API sometimes choose a subnet so the diff needs to be ignored +func CompareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { + if IsEmptyValue(reflect.ValueOf(new)) { + return true + } + // otherwise compare as self links + return CompareSelfLinkOrResourceName("", old, new, nil) +} + +// Suppress diffs in below cases +// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" +// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" +func LastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + if last := len(new) - 1; last >= 0 && new[last] == '/' { + new = new[:last] + } + + if last := len(old) - 1; last >= 0 && old[last] == '/' { + old = old[:last] + } + return new == old +} + +// Suppress diffs when the value read from api +// has the project number instead of the project name +func ProjectNumberDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + var a2, b2 string + reN := regexp.MustCompile("projects/\\d+") + re := regexp.MustCompile("projects/[^/]+") + replacement := []byte("projects/equal") + a2 = string(reN.ReplaceAll([]byte(old), replacement)) + b2 = string(re.ReplaceAll([]byte(new), replacement)) + return a2 == b2 +} + +func CompareCaseInsensitive(k, old, new string, d *schema.ResourceData) bool { + return strings.ToLower(old) == strings.ToLower(new) +} + +func IsNewResource(diff TerraformResourceDiff) bool { + name := diff.Get("name") + return name.(string) == "" +} + +func CompareCryptoKeyVersions(_, old, new string, _ *schema.ResourceData) bool { + // The API can return cryptoKeyVersions even though it wasn't specified. + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + + kmsKeyWithoutVersions := strings.Split(old, "/cryptoKeyVersions")[0] + if kmsKeyWithoutVersions == new { + return true + } + + return false +} + +func CidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // If the user specified a size and the API returned a full cidr block, suppress. + return strings.HasPrefix(new, "/") && strings.HasSuffix(old, new) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/common_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/common_operation.go similarity index 93% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/common_operation.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/common_operation.go index bbd724b8c2..cb96a10d0d 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/common_operation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/common_operation.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource import ( "fmt" @@ -6,6 +8,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" ) @@ -111,7 +114,7 @@ func CommonRefreshFunc(w Waiter) resource.StateRefreshFunc { op, err := w.QueryOp() if err != nil { // Retry 404 when getting operation (not resource state) - if isRetryableError(err, isNotFoundRetryableError("GET operation")) { + if transport_tpg.IsRetryableError(err, []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("GET operation")}, nil) { log.Printf("[DEBUG] Dismissed retryable error on GET operation %q: %s", w.OpName(), err) return nil, "done: false", nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/convert.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/convert.go new file mode 100644 index 0000000000..bbdc7a784e --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/convert.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource + +import ( + "encoding/json" + "reflect" +) + +// Convert between two types by converting to/from JSON. Intended to switch +// between multiple API versions, as they are strict supersets of one another. +// item and out are pointers to structs +func Convert(item, out interface{}) error { + bytes, err := json.Marshal(item) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, out) + if err != nil { + return err + } + + // Converting between maps and structs only occurs when autogenerated resources convert the result + // of an HTTP request. Those results do not contain omitted fields, so no need to set them. + if _, ok := item.(map[string]interface{}); !ok { + setOmittedFields(item, out) + } + + return nil +} + +// When converting to a map, we can't use setOmittedFields because FieldByName +// fails. Luckily, we don't use the omitted fields anymore with generated +// resources, and this function is used to bridge from handwritten -> generated. +// Since this is a known type, we can create it inline instead of needing to +// pass an object in. +func ConvertToMap(item interface{}) (map[string]interface{}, error) { + out := make(map[string]interface{}) + bytes, err := json.Marshal(item) + if err != nil { + return nil, err + } + + err = json.Unmarshal(bytes, &out) + if err != nil { + return nil, err + } + + return out, nil +} + +func setOmittedFields(item, out interface{}) { + // Both inputs must be pointers, see https://blog.golang.org/laws-of-reflection: + // "To modify a reflection object, the value must be settable." + iVal := reflect.ValueOf(item).Elem() + oVal := reflect.ValueOf(out).Elem() + + // Loop through all the fields of the struct to look for omitted fields and nested fields + for i := 0; i < iVal.NumField(); i++ { + iField := iVal.Field(i) + if IsEmptyValue(iField) { + continue + } + + fieldInfo := iVal.Type().Field(i) + oField := oVal.FieldByName(fieldInfo.Name) + + // Only look at fields that exist in the output struct + if !oField.IsValid() { + continue + } + + // If the field contains a 'json:"="' tag, then it was omitted from the Marshal/Unmarshal + // call and needs to be added back in. + if fieldInfo.Tag.Get("json") == "-" { + oField.Set(iField) + } + + // If this field is a struct, *struct, []struct, or []*struct, recurse. + if iField.Kind() == reflect.Struct { + setOmittedFields(iField.Addr().Interface(), oField.Addr().Interface()) + } + if iField.Kind() == reflect.Ptr && iField.Type().Elem().Kind() == reflect.Struct { + setOmittedFields(iField.Interface(), oField.Interface()) + } + if iField.Kind() == reflect.Slice && iField.Type().Elem().Kind() == reflect.Struct { + for j := 0; j < iField.Len(); j++ { + setOmittedFields(iField.Index(j).Addr().Interface(), oField.Index(j).Addr().Interface()) + } + } + if iField.Kind() == reflect.Slice && iField.Type().Elem().Kind() == reflect.Ptr && + iField.Type().Elem().Elem().Kind() == reflect.Struct { + for j := 0; j < iField.Len(); j++ { + setOmittedFields(iField.Index(j).Interface(), oField.Index(j).Interface()) + } + } + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/datasource_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/datasource_helpers.go new file mode 100644 index 0000000000..71309d7f3d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/datasource_helpers.go @@ -0,0 +1,75 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// DatasourceSchemaFromResourceSchema is a recursive func that +// converts an existing Resource schema to a Datasource schema. +// All schema elements are copied, but certain attributes are ignored or changed: +// - all attributes have Computed = true +// - all attributes have ForceNew, Required = false +// - Validation funcs and attributes (e.g. MaxItems) are not copied +func DatasourceSchemaFromResourceSchema(rs map[string]*schema.Schema) map[string]*schema.Schema { + ds := make(map[string]*schema.Schema, len(rs)) + for k, v := range rs { + dv := &schema.Schema{ + Computed: true, + ForceNew: false, + Required: false, + Description: v.Description, + Type: v.Type, + } + + switch v.Type { + case schema.TypeSet: + dv.Set = v.Set + fallthrough + case schema.TypeList: + // List & Set types are generally used for 2 cases: + // - a list/set of simple primitive values (e.g. list of strings) + // - a sub resource + if elem, ok := v.Elem.(*schema.Resource); ok { + // handle the case where the Element is a sub-resource + dv.Elem = &schema.Resource{ + Schema: DatasourceSchemaFromResourceSchema(elem.Schema), + } + } else { + // handle simple primitive case + dv.Elem = v.Elem + } + + default: + // Elem of all other types are copied as-is + dv.Elem = v.Elem + + } + ds[k] = dv + + } + return ds +} + +// fixDatasourceSchemaFlags is a convenience func that toggles the Computed, +// Optional + Required flags on a schema element. This is useful when the schema +// has been generated (using `DatasourceSchemaFromResourceSchema` above for +// example) and therefore the attribute flags were not set appropriately when +// first added to the schema definition. Currently only supports top-level +// schema elements. +func FixDatasourceSchemaFlags(schema map[string]*schema.Schema, required bool, keys ...string) { + for _, v := range keys { + schema[v].Computed = false + schema[v].Optional = !required + schema[v].Required = required + } +} + +func AddRequiredFieldsToSchema(schema map[string]*schema.Schema, keys ...string) { + FixDatasourceSchemaFlags(schema, true, keys...) +} + +func AddOptionalFieldsToSchema(schema map[string]*schema.Schema, keys ...string) { + FixDatasourceSchemaFlags(schema, false, keys...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/field_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/field_helpers.go new file mode 100644 index 0000000000..cc32f57a46 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/field_helpers.go @@ -0,0 +1,446 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource + +import ( + "fmt" + "regexp" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +const ( + GlobalLinkTemplate = "projects/%s/global/%s/%s" + GlobalLinkBasePattern = "projects/(.+)/global/%s/(.+)" + ZonalLinkTemplate = "projects/%s/zones/%s/%s/%s" + ZonalLinkBasePattern = "projects/(.+)/zones/(.+)/%s/(.+)" + ZonalPartialLinkBasePattern = "zones/(.+)/%s/(.+)" + RegionalLinkTemplate = "projects/%s/regions/%s/%s/%s" + RegionalLinkBasePattern = "projects/(.+)/regions/(.+)/%s/(.+)" + RegionalPartialLinkBasePattern = "regions/(.+)/%s/(.+)" + ProjectLinkTemplate = "projects/%s/%s/%s" + ProjectBasePattern = "projects/(.+)/%s/(.+)" + OrganizationLinkTemplate = "organizations/%s/%s/%s" + OrganizationBasePattern = "organizations/(.+)/%s/(.+)" +) + +// ------------------------------------------------------------ +// Field helpers +// ------------------------------------------------------------ + +func ParseNetworkFieldValue(network string, d TerraformResourceData, config *transport_tpg.Config) (*GlobalFieldValue, error) { + return ParseGlobalFieldValue("networks", network, "project", d, config, true) +} + +func ParseSubnetworkFieldValue(subnetwork string, d TerraformResourceData, config *transport_tpg.Config) (*RegionalFieldValue, error) { + return ParseRegionalFieldValue("subnetworks", subnetwork, "project", "region", "zone", d, config, true) +} + +func ParseSubnetworkFieldValueWithProjectField(subnetwork, projectField string, d TerraformResourceData, config *transport_tpg.Config) (*RegionalFieldValue, error) { + return ParseRegionalFieldValue("subnetworks", subnetwork, projectField, "region", "zone", d, config, true) +} + +func ParseSslCertificateFieldValue(sslCertificate string, d TerraformResourceData, config *transport_tpg.Config) (*GlobalFieldValue, error) { + return ParseGlobalFieldValue("sslCertificates", sslCertificate, "project", d, config, false) +} + +func ParseHttpHealthCheckFieldValue(healthCheck string, d TerraformResourceData, config *transport_tpg.Config) (*GlobalFieldValue, error) { + return ParseGlobalFieldValue("httpHealthChecks", healthCheck, "project", d, config, false) +} + +func ParseDiskFieldValue(disk string, d TerraformResourceData, config *transport_tpg.Config) (*ZonalFieldValue, error) { + return ParseZonalFieldValue("disks", disk, "project", "zone", d, config, false) +} + +func ParseRegionDiskFieldValue(disk string, d TerraformResourceData, config *transport_tpg.Config) (*RegionalFieldValue, error) { + return ParseRegionalFieldValue("disks", disk, "project", "region", "zone", d, config, false) +} + +func ParseOrganizationCustomRoleName(role string) (*OrganizationFieldValue, error) { + return ParseOrganizationFieldValue("roles", role, false) +} + +func ParseAcceleratorFieldValue(accelerator string, d TerraformResourceData, config *transport_tpg.Config) (*ZonalFieldValue, error) { + return ParseZonalFieldValue("acceleratorTypes", accelerator, "project", "zone", d, config, false) +} + +func ParseMachineTypesFieldValue(machineType string, d TerraformResourceData, config *transport_tpg.Config) (*ZonalFieldValue, error) { + return ParseZonalFieldValue("machineTypes", machineType, "project", "zone", d, config, false) +} + +func ParseInstanceFieldValue(instance string, d TerraformResourceData, config *transport_tpg.Config) (*ZonalFieldValue, error) { + return ParseZonalFieldValue("instances", instance, "project", "zone", d, config, false) +} + +func ParseInstanceGroupFieldValue(instanceGroup string, d TerraformResourceData, config *transport_tpg.Config) (*ZonalFieldValue, error) { + return ParseZonalFieldValue("instanceGroups", instanceGroup, "project", "zone", d, config, false) +} + +func ParseInstanceTemplateFieldValue(instanceTemplate string, d TerraformResourceData, config *transport_tpg.Config) (*GlobalFieldValue, error) { + return ParseGlobalFieldValue("instanceTemplates", instanceTemplate, "project", d, config, false) +} + +func ParseMachineImageFieldValue(machineImage string, d TerraformResourceData, config *transport_tpg.Config) (*GlobalFieldValue, error) { + return ParseGlobalFieldValue("machineImages", machineImage, "project", d, config, false) +} + +func ParseSecurityPolicyFieldValue(securityPolicy string, d TerraformResourceData, config *transport_tpg.Config) (*GlobalFieldValue, error) { + return ParseGlobalFieldValue("securityPolicies", securityPolicy, "project", d, config, true) +} + +func ParseNetworkEndpointGroupFieldValue(networkEndpointGroup string, d TerraformResourceData, config *transport_tpg.Config) (*ZonalFieldValue, error) { + return ParseZonalFieldValue("networkEndpointGroups", networkEndpointGroup, "project", "zone", d, config, false) +} + +func ParseNetworkEndpointGroupRegionalFieldValue(networkEndpointGroup string, d TerraformResourceData, config *transport_tpg.Config) (*RegionalFieldValue, error) { + return ParseRegionalFieldValue("networkEndpointGroups", networkEndpointGroup, "project", "region", "zone", d, config, false) +} + +// ------------------------------------------------------------ +// Base helpers used to create helpers for specific fields. +// ------------------------------------------------------------ + +type GlobalFieldValue struct { + Project string + Name string + + resourceType string +} + +func (f GlobalFieldValue) RelativeLink() string { + if len(f.Name) == 0 { + return "" + } + + return fmt.Sprintf(GlobalLinkTemplate, f.Project, f.resourceType, f.Name) +} + +// Parses a global field supporting 5 different formats: +// - https://www.googleapis.com/compute/ANY_VERSION/projects/{my_project}/global/{resource_type}/{resource_name} +// - projects/{my_project}/global/{resource_type}/{resource_name} +// - global/{resource_type}/{resource_name} +// - resource_name +// - "" (empty string). RelativeLink() returns empty if isEmptyValid is true. +// +// If the project is not specified, it first tries to get the project from the `projectSchemaField` and then fallback on the default project. +func ParseGlobalFieldValue(resourceType, fieldValue, projectSchemaField string, d TerraformResourceData, config *transport_tpg.Config, isEmptyValid bool) (*GlobalFieldValue, error) { + if len(fieldValue) == 0 { + if isEmptyValid { + return &GlobalFieldValue{resourceType: resourceType}, nil + } + return nil, fmt.Errorf("The global field for resource %s cannot be empty", resourceType) + } + + r := regexp.MustCompile(fmt.Sprintf(GlobalLinkBasePattern, resourceType)) + if parts := r.FindStringSubmatch(fieldValue); parts != nil { + return &GlobalFieldValue{ + Project: parts[1], + Name: parts[2], + + resourceType: resourceType, + }, nil + } + + project, err := GetProjectFromSchema(projectSchemaField, d, config) + if err != nil { + return nil, err + } + + return &GlobalFieldValue{ + Project: project, + Name: GetResourceNameFromSelfLink(fieldValue), + + resourceType: resourceType, + }, nil +} + +type ZonalFieldValue struct { + Project string + Zone string + Name string + + ResourceType string +} + +func (f ZonalFieldValue) RelativeLink() string { + if len(f.Name) == 0 { + return "" + } + + return fmt.Sprintf(ZonalLinkTemplate, f.Project, f.Zone, f.ResourceType, f.Name) +} + +// Parses a zonal field supporting 5 different formats: +// - https://www.googleapis.com/compute/ANY_VERSION/projects/{my_project}/zones/{zone}/{resource_type}/{resource_name} +// - projects/{my_project}/zones/{zone}/{resource_type}/{resource_name} +// - zones/{zone}/{resource_type}/{resource_name} +// - resource_name +// - "" (empty string). RelativeLink() returns empty if isEmptyValid is true. +// +// If the project is not specified, it first tries to get the project from the `projectSchemaField` and then fallback on the default project. +// If the zone is not specified, it takes the value of `zoneSchemaField`. +func ParseZonalFieldValue(resourceType, fieldValue, projectSchemaField, zoneSchemaField string, d TerraformResourceData, config *transport_tpg.Config, isEmptyValid bool) (*ZonalFieldValue, error) { + if len(fieldValue) == 0 { + if isEmptyValid { + return &ZonalFieldValue{ResourceType: resourceType}, nil + } + return nil, fmt.Errorf("The zonal field for resource %s cannot be empty.", resourceType) + } + + r := regexp.MustCompile(fmt.Sprintf(ZonalLinkBasePattern, resourceType)) + if parts := r.FindStringSubmatch(fieldValue); parts != nil { + return &ZonalFieldValue{ + Project: parts[1], + Zone: parts[2], + Name: parts[3], + ResourceType: resourceType, + }, nil + } + + project, err := GetProjectFromSchema(projectSchemaField, d, config) + if err != nil { + return nil, err + } + + r = regexp.MustCompile(fmt.Sprintf(ZonalPartialLinkBasePattern, resourceType)) + if parts := r.FindStringSubmatch(fieldValue); parts != nil { + return &ZonalFieldValue{ + Project: project, + Zone: parts[1], + Name: parts[2], + ResourceType: resourceType, + }, nil + } + + if len(zoneSchemaField) == 0 { + return nil, fmt.Errorf("Invalid field format. Got '%s', expected format '%s'", fieldValue, fmt.Sprintf(GlobalLinkTemplate, "{project}", resourceType, "{name}")) + } + + zone, ok := d.GetOk(zoneSchemaField) + if !ok { + zone = config.Zone + if zone == "" { + return nil, fmt.Errorf("A zone must be specified") + } + } + + return &ZonalFieldValue{ + Project: project, + Zone: zone.(string), + Name: GetResourceNameFromSelfLink(fieldValue), + ResourceType: resourceType, + }, nil +} + +func GetProjectFromSchema(projectSchemaField string, d TerraformResourceData, config *transport_tpg.Config) (string, error) { + res, ok := d.GetOk(projectSchemaField) + if ok && projectSchemaField != "" { + return res.(string), nil + } + if config.Project != "" { + return config.Project, nil + } + return "", fmt.Errorf("%s: required field is not set", projectSchemaField) +} + +func GetBillingProjectFromSchema(billingProjectSchemaField string, d TerraformResourceData, config *transport_tpg.Config) (string, error) { + res, ok := d.GetOk(billingProjectSchemaField) + if ok && billingProjectSchemaField != "" { + return res.(string), nil + } + if config.BillingProject != "" { + return config.BillingProject, nil + } + return "", fmt.Errorf("%s: required field is not set", billingProjectSchemaField) +} + +type OrganizationFieldValue struct { + OrgId string + Name string + + resourceType string +} + +func (f OrganizationFieldValue) RelativeLink() string { + if len(f.Name) == 0 { + return "" + } + + return fmt.Sprintf(OrganizationLinkTemplate, f.OrgId, f.resourceType, f.Name) +} + +// Parses an organization field with the following formats: +// - organizations/{my_organizations}/{resource_type}/{resource_name} +func ParseOrganizationFieldValue(resourceType, fieldValue string, isEmptyValid bool) (*OrganizationFieldValue, error) { + if len(fieldValue) == 0 { + if isEmptyValid { + return &OrganizationFieldValue{resourceType: resourceType}, nil + } + return nil, fmt.Errorf("The organization field for resource %s cannot be empty", resourceType) + } + + r := regexp.MustCompile(fmt.Sprintf(OrganizationBasePattern, resourceType)) + if parts := r.FindStringSubmatch(fieldValue); parts != nil { + return &OrganizationFieldValue{ + OrgId: parts[1], + Name: parts[2], + + resourceType: resourceType, + }, nil + } + + return nil, fmt.Errorf("Invalid field format. Got '%s', expected format '%s'", fieldValue, fmt.Sprintf(OrganizationLinkTemplate, "{org_id}", resourceType, "{name}")) +} + +type RegionalFieldValue struct { + Project string + Region string + Name string + + resourceType string +} + +func (f RegionalFieldValue) RelativeLink() string { + if len(f.Name) == 0 { + return "" + } + + return fmt.Sprintf(RegionalLinkTemplate, f.Project, f.Region, f.resourceType, f.Name) +} + +// Parses a regional field supporting 5 different formats: +// - https://www.googleapis.com/compute/ANY_VERSION/projects/{my_project}/regions/{region}/{resource_type}/{resource_name} +// - projects/{my_project}/regions/{region}/{resource_type}/{resource_name} +// - regions/{region}/{resource_type}/{resource_name} +// - resource_name +// - "" (empty string). RelativeLink() returns empty if isEmptyValid is true. +// +// If the project is not specified, it first tries to get the project from the `projectSchemaField` and then fallback on the default project. +// If the region is not specified, see function documentation for `GetRegionFromSchema`. +func ParseRegionalFieldValue(resourceType, fieldValue, projectSchemaField, regionSchemaField, zoneSchemaField string, d TerraformResourceData, config *transport_tpg.Config, isEmptyValid bool) (*RegionalFieldValue, error) { + if len(fieldValue) == 0 { + if isEmptyValid { + return &RegionalFieldValue{resourceType: resourceType}, nil + } + return nil, fmt.Errorf("The regional field for resource %s cannot be empty.", resourceType) + } + + r := regexp.MustCompile(fmt.Sprintf(RegionalLinkBasePattern, resourceType)) + if parts := r.FindStringSubmatch(fieldValue); parts != nil { + return &RegionalFieldValue{ + Project: parts[1], + Region: parts[2], + Name: parts[3], + resourceType: resourceType, + }, nil + } + + project, err := GetProjectFromSchema(projectSchemaField, d, config) + if err != nil { + return nil, err + } + + r = regexp.MustCompile(fmt.Sprintf(RegionalPartialLinkBasePattern, resourceType)) + if parts := r.FindStringSubmatch(fieldValue); parts != nil { + return &RegionalFieldValue{ + Project: project, + Region: parts[1], + Name: parts[2], + resourceType: resourceType, + }, nil + } + + region, err := GetRegionFromSchema(regionSchemaField, zoneSchemaField, d, config) + if err != nil { + return nil, err + } + + return &RegionalFieldValue{ + Project: project, + Region: region, + Name: GetResourceNameFromSelfLink(fieldValue), + resourceType: resourceType, + }, nil +} + +// Infers the region based on the following (in order of priority): +// - `regionSchemaField` in resource schema +// - region extracted from the `zoneSchemaField` in resource schema +// - provider-level region +// - region extracted from the provider-level zone +func GetRegionFromSchema(regionSchemaField, zoneSchemaField string, d TerraformResourceData, config *transport_tpg.Config) (string, error) { + // if identical such as GKE location, check if it's a zone first and find + // the region if so. Otherwise, return as it's a region. + if regionSchemaField == zoneSchemaField { + if v, ok := d.GetOk(regionSchemaField); ok { + if IsZone(v.(string)) { + return GetRegionFromZone(v.(string)), nil + } + + return v.(string), nil + } + } + + if v, ok := d.GetOk(regionSchemaField); ok && regionSchemaField != "" { + return GetResourceNameFromSelfLink(v.(string)), nil + } + if v, ok := d.GetOk(zoneSchemaField); ok && zoneSchemaField != "" { + return GetRegionFromZone(v.(string)), nil + } + if config.Region != "" { + return config.Region, nil + } + if config.Zone != "" { + return GetRegionFromZone(config.Zone), nil + } + + return "", fmt.Errorf("Cannot determine region: set in this resource, or set provider-level 'region' or 'zone'.") +} + +type ProjectFieldValue struct { + Project string + Name string + + ResourceType string +} + +func (f ProjectFieldValue) RelativeLink() string { + if len(f.Name) == 0 { + return "" + } + + return fmt.Sprintf(ProjectLinkTemplate, f.Project, f.ResourceType, f.Name) +} + +// Parses a project field with the following formats: +// - projects/{my_projects}/{resource_type}/{resource_name} +func ParseProjectFieldValue(resourceType, fieldValue, projectSchemaField string, d TerraformResourceData, config *transport_tpg.Config, isEmptyValid bool) (*ProjectFieldValue, error) { + if len(fieldValue) == 0 { + if isEmptyValid { + return &ProjectFieldValue{ResourceType: resourceType}, nil + } + return nil, fmt.Errorf("The project field for resource %s cannot be empty", resourceType) + } + + r := regexp.MustCompile(fmt.Sprintf(ProjectBasePattern, resourceType)) + if parts := r.FindStringSubmatch(fieldValue); parts != nil { + return &ProjectFieldValue{ + Project: parts[1], + Name: parts[2], + + ResourceType: resourceType, + }, nil + } + + project, err := GetProjectFromSchema(projectSchemaField, d, config) + if err != nil { + return nil, err + } + + return &ProjectFieldValue{ + Project: project, + Name: GetResourceNameFromSelfLink(fieldValue), + + ResourceType: resourceType, + }, nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/hashcode.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/hashcode.go new file mode 100644 index 0000000000..abb3f00bda --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/hashcode.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource + +import ( + "hash/crc32" +) + +// Hashcode hashes a string to a unique hashcode. +// +// crc32 returns a uint32, but for our use we need +// and non negative integer. Here we cast to an integer +// and invert it if the result is negative. +func Hashcode(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v >= 0 { + return v + } + if -v >= 0 { + return -v + } + // v == MinInt + return 0 +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/import.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/import.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/import.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/import.go index 21ad1cc2d4..3dc9abbb22 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/import.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/import.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource import ( "fmt" @@ -6,6 +8,8 @@ import ( "regexp" "strconv" "strings" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) // Parse an import id extracting field values using the given list of regexes. @@ -15,7 +19,7 @@ import ( // - projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+) (applied first) // - (?P[^/]+)/(?P[^/]+)/(?P[^/]+), // - (?P[^/]+) (applied last) -func parseImportId(idRegexes []string, d TerraformResourceData, config *Config) error { +func ParseImportId(idRegexes []string, d TerraformResourceData, config *transport_tpg.Config) error { for _, idFormat := range idRegexes { re, err := regexp.Compile(idFormat) @@ -73,9 +77,9 @@ func parseImportId(idRegexes []string, d TerraformResourceData, config *Config) return fmt.Errorf("Import id %q doesn't match any of the accepted formats: %v", d.Id(), idRegexes) } -func setDefaultValues(idRegex string, d TerraformResourceData, config *Config) error { +func setDefaultValues(idRegex string, d TerraformResourceData, config *transport_tpg.Config) error { if _, ok := d.GetOk("project"); !ok && strings.Contains(idRegex, "?P") { - project, err := getProject(d, config) + project, err := GetProject(d, config) if err != nil { return err } @@ -84,7 +88,7 @@ func setDefaultValues(idRegex string, d TerraformResourceData, config *Config) e } } if _, ok := d.GetOk("region"); !ok && strings.Contains(idRegex, "?P") { - region, err := getRegion(d, config) + region, err := GetRegion(d, config) if err != nil { return err } @@ -93,7 +97,7 @@ func setDefaultValues(idRegex string, d TerraformResourceData, config *Config) e } } if _, ok := d.GetOk("zone"); !ok && strings.Contains(idRegex, "?P") { - zone, err := getZone(d, config) + zone, err := GetZone(d, config) if err != nil { return err } @@ -107,13 +111,13 @@ func setDefaultValues(idRegex string, d TerraformResourceData, config *Config) e // Parse an import id extracting field values using the given list of regexes. // They are applied in order. The first in the list is tried first. // This does not mutate any of the parameters, returning a map of matches -// Similar to parseImportId in import.go, but less import specific +// Similar to ParseImportId in import.go, but less import specific // // e.g: // - projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+) (applied first) // - (?P[^/]+)/(?P[^/]+)/(?P[^/]+), // - (?P[^/]+) (applied last) -func getImportIdQualifiers(idRegexes []string, d TerraformResourceData, config *Config, id string) (map[string]string, error) { +func GetImportIdQualifiers(idRegexes []string, d TerraformResourceData, config *transport_tpg.Config, id string) (map[string]string, error) { for _, idFormat := range idRegexes { re, err := regexp.Compile(idFormat) @@ -156,18 +160,18 @@ func getImportIdQualifiers(idRegexes []string, d TerraformResourceData, config * // Returns a set of default values that are contained in a regular expression // This does not mutate any parameters, instead returning a map of defaults -func getDefaultValues(idRegex string, d TerraformResourceData, config *Config) (map[string]string, error) { +func getDefaultValues(idRegex string, d TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { result := make(map[string]string) if _, ok := d.GetOk("project"); !ok && strings.Contains(idRegex, "?P") { - project, _ := getProject(d, config) + project, _ := GetProject(d, config) result["project"] = project } if _, ok := d.GetOk("region"); !ok && strings.Contains(idRegex, "?P") { - region, _ := getRegion(d, config) + region, _ := GetRegion(d, config) result["region"] = region } if _, ok := d.GetOk("zone"); !ok && strings.Contains(idRegex, "?P") { - zone, _ := getZone(d, config) + zone, _ := GetZone(d, config) result["zone"] = zone } return result, nil diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/regional_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/regional_utils.go new file mode 100644 index 0000000000..8328380f0b --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/regional_utils.go @@ -0,0 +1,47 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource + +import ( + "fmt" + "strings" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +//These functions are used by both the `resource_container_node_pool` and `resource_container_cluster` for handling regional clusters + +func IsZone(location string) bool { + return len(strings.Split(location, "-")) == 3 +} + +// GetLocation attempts to get values in this order (if they exist): +// - location argument in the resource config +// - region argument in the resource config +// - zone argument in the resource config +// - zone argument set in the provider config +func GetLocation(d TerraformResourceData, config *transport_tpg.Config) (string, error) { + if v, ok := d.GetOk("location"); ok { + return v.(string), nil + } else if v, isRegionalCluster := d.GetOk("region"); isRegionalCluster { + return v.(string), nil + } else { + // If region is not explicitly set, use "zone" (or fall back to the provider-level zone). + // For now, to avoid confusion, we require region to be set in the config to create a regional + // cluster rather than falling back to the provider-level region. + return GetZone(d, config) + } +} + +// GetZone reads the "zone" value from the given resource data and falls back +// to provider's value if not given. If neither is provided, returns an error. +func GetZone(d TerraformResourceData, config *transport_tpg.Config) (string, error) { + res, ok := d.GetOk("zone") + if !ok { + if config.Zone != "" { + return config.Zone, nil + } + return "", fmt.Errorf("Cannot determine zone: set in this resource, or set provider-level zone.") + } + return GetResourceNameFromSelfLink(res.(string)), nil +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/resource_test_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/resource_test_utils.go new file mode 100644 index 0000000000..8fa6b95afd --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/resource_test_utils.go @@ -0,0 +1,182 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource + +import ( + "reflect" + "regexp" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type ResourceDataMock struct { + FieldsInSchema map[string]interface{} + FieldsWithHasChange []string + id string +} + +func (d *ResourceDataMock) HasChange(key string) bool { + exists := false + for _, val := range d.FieldsWithHasChange { + if key == val { + exists = true + } + } + + return exists +} + +func (d *ResourceDataMock) Get(key string) interface{} { + v, _ := d.GetOk(key) + return v +} + +func (d *ResourceDataMock) GetOk(key string) (interface{}, bool) { + v, ok := d.GetOkExists(key) + if ok && !IsEmptyValue(reflect.ValueOf(v)) { + return v, true + } else { + return v, false + } +} + +func (d *ResourceDataMock) GetOkExists(key string) (interface{}, bool) { + for k, v := range d.FieldsInSchema { + if key == k { + return v, true + } + } + + return nil, false +} + +func (d *ResourceDataMock) Set(key string, value interface{}) error { + d.FieldsInSchema[key] = value + return nil +} + +func (d *ResourceDataMock) SetId(v string) { + d.id = v +} + +func (d *ResourceDataMock) Id() string { + return d.id +} + +func (d *ResourceDataMock) GetProviderMeta(dst interface{}) error { + return nil +} + +func (d *ResourceDataMock) Timeout(key string) time.Duration { + return time.Duration(1) +} + +type ResourceDiffMock struct { + Before map[string]interface{} + After map[string]interface{} + Cleared map[string]interface{} + IsForceNew bool +} + +func (d *ResourceDiffMock) GetChange(key string) (interface{}, interface{}) { + return d.Before[key], d.After[key] +} + +func (d *ResourceDiffMock) HasChange(key string) bool { + old, new := d.GetChange(key) + return old != new +} + +func (d *ResourceDiffMock) Get(key string) interface{} { + return d.After[key] +} + +func (d *ResourceDiffMock) GetOk(key string) (interface{}, bool) { + v, ok := d.After[key] + return v, ok +} + +func (d *ResourceDiffMock) Clear(key string) error { + if d.Cleared == nil { + d.Cleared = map[string]interface{}{} + } + d.Cleared[key] = true + return nil +} + +func (d *ResourceDiffMock) ForceNew(key string) error { + d.IsForceNew = true + return nil +} + +// This function isn't a test of transport.go; instead, it is used as an alternative +// to ReplaceVars inside tests. +func ReplaceVarsForTest(config *transport_tpg.Config, rs *terraform.ResourceState, linkTmpl string) (string, error) { + re := regexp.MustCompile("{{([[:word:]]+)}}") + var project, region, zone string + + if strings.Contains(linkTmpl, "{{project}}") { + project = rs.Primary.Attributes["project"] + } + + if strings.Contains(linkTmpl, "{{region}}") { + region = GetResourceNameFromSelfLink(rs.Primary.Attributes["region"]) + } + + if strings.Contains(linkTmpl, "{{zone}}") { + zone = GetResourceNameFromSelfLink(rs.Primary.Attributes["zone"]) + } + + replaceFunc := func(s string) string { + m := re.FindStringSubmatch(s)[1] + if m == "project" { + return project + } + if m == "region" { + return region + } + if m == "zone" { + return zone + } + + if v, ok := rs.Primary.Attributes[m]; ok { + return v + } + + // Attempt to draw values from the provider config + if f := reflect.Indirect(reflect.ValueOf(config)).FieldByName(m); f.IsValid() { + return f.String() + } + + return "" + } + + return re.ReplaceAllStringFunc(linkTmpl, replaceFunc), nil +} + +// Used to create populated schema.ResourceData structs in tests. +// Pass in a schema and a config map containing the fields and values you wish to set +// The returned schema.ResourceData can represent a configured resource, data source or provider. +func SetupTestResourceDataFromConfigMap(t *testing.T, s map[string]*schema.Schema, configValues map[string]interface{}) *schema.ResourceData { + + // Create empty schema.ResourceData using the SDK Provider schema + emptyConfigMap := map[string]interface{}{} + d := schema.TestResourceDataRaw(t, s, emptyConfigMap) + + // Load Terraform config data + if len(configValues) > 0 { + for k, v := range configValues { + err := d.Set(k, v) + if err != nil { + t.Fatalf("error during test setup: %v", err) + } + } + } + + return d +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/self_link_helpers.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/self_link_helpers.go new file mode 100644 index 0000000000..3efc9e8254 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/self_link_helpers.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Compare only the resource name of two self links/paths. +func CompareResourceNames(_, old, new string, _ *schema.ResourceData) bool { + return GetResourceNameFromSelfLink(old) == GetResourceNameFromSelfLink(new) +} + +// Compare only the relative path of two self links. +func CompareSelfLinkRelativePaths(_, old, new string, _ *schema.ResourceData) bool { + oldStripped, err := GetRelativePath(old) + if err != nil { + return false + } + + newStripped, err := GetRelativePath(new) + if err != nil { + return false + } + + if oldStripped == newStripped { + return true + } + + return false +} + +// CompareSelfLinkOrResourceName checks if two resources are the same resource +// +// Use this method when the field accepts either a name or a self_link referencing a resource. +// The value we store (i.e. `old` in this method), must be a self_link. +func CompareSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { + newParts := strings.Split(new, "/") + + if len(newParts) == 1 { + // `new` is a name + // `old` is always a self_link + if GetResourceNameFromSelfLink(old) == newParts[0] { + return true + } + } + + // The `new` string is a self_link + return CompareSelfLinkRelativePaths("", old, new, nil) +} + +// Hash the relative path of a self link. +func SelfLinkRelativePathHash(selfLink interface{}) int { + path, _ := GetRelativePath(selfLink.(string)) + return Hashcode(path) +} + +func GetRelativePath(selfLink string) (string, error) { + stringParts := strings.SplitAfterN(selfLink, "projects/", 2) + if len(stringParts) != 2 { + return "", fmt.Errorf("String was not a self link: %s", selfLink) + } + + return "projects/" + stringParts[1], nil +} + +// Hash the name path of a self link. +func SelfLinkNameHash(selfLink interface{}) int { + name := GetResourceNameFromSelfLink(selfLink.(string)) + return Hashcode(name) +} + +func ConvertSelfLinkToV1(link string) string { + reg := regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/") + return reg.ReplaceAllString(link, "/compute/v1/projects/") +} + +func GetResourceNameFromSelfLink(link string) string { + parts := strings.Split(link, "/") + return parts[len(parts)-1] +} + +func NameFromSelfLinkStateFunc(v interface{}) string { + return GetResourceNameFromSelfLink(v.(string)) +} + +func StoreResourceName(resourceLink interface{}) string { + return GetResourceNameFromSelfLink(resourceLink.(string)) +} + +type LocationType int + +const ( + Zonal LocationType = iota + Regional + Global +) + +func GetZonalResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *transport_tpg.Config) (string, string, string, error) { + return getResourcePropertiesFromSelfLinkOrSchema(d, config, Zonal) +} + +func GetRegionalResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *transport_tpg.Config) (string, string, string, error) { + return getResourcePropertiesFromSelfLinkOrSchema(d, config, Regional) +} + +func getResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *transport_tpg.Config, locationType LocationType) (string, string, string, error) { + if selfLink, ok := d.GetOk("self_link"); ok { + return GetLocationalResourcePropertiesFromSelfLinkString(selfLink.(string)) + } else { + project, err := GetProject(d, config) + if err != nil { + return "", "", "", err + } + + location := "" + if locationType == Regional { + location, err = GetRegion(d, config) + if err != nil { + return "", "", "", err + } + } else if locationType == Zonal { + location, err = GetZone(d, config) + if err != nil { + return "", "", "", err + } + } + + n, ok := d.GetOk("name") + name := n.(string) + if !ok { + return "", "", "", errors.New("must provide either `self_link` or `name`") + } + return project, location, name, nil + } +} + +// given a full locational (non-global) self link, returns the project + region/zone + name or an error +func GetLocationalResourcePropertiesFromSelfLinkString(selfLink string) (string, string, string, error) { + parsed, err := url.Parse(selfLink) + if err != nil { + return "", "", "", err + } + + s := strings.Split(parsed.Path, "/") + + // This is a pretty bad way to tell if this is a self link, but stops us + // from accessing an index out of bounds and causing a panic. generally, we + // expect bad values to be partial URIs and names, so this will catch them + if len(s) < 9 { + return "", "", "", fmt.Errorf("value %s was not a self link", selfLink) + } + + return s[4], s[6], s[8], nil +} + +// This function supports selflinks that have regions and locations in their paths +func GetRegionFromRegionalSelfLink(selfLink string) string { + re := regexp.MustCompile("projects/[a-zA-Z0-9-]*/(?:locations|regions)/([a-zA-Z0-9-]*)") + switch { + case re.MatchString(selfLink): + if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { + return res[1] + } + } + return selfLink +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_scope.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/service_scope.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_scope.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/service_scope.go index 3bd4a997ae..6de9ca0060 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/service_scope.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/service_scope.go @@ -1,8 +1,10 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -func canonicalizeServiceScope(scope string) string { +func CanonicalizeServiceScope(scope string) string { // This is a convenience map of short names used by the gcloud tool // to the GCE auth endpoints they alias to. scopeMap := map[string]string{ @@ -39,15 +41,15 @@ func canonicalizeServiceScope(scope string) string { return scope } -func canonicalizeServiceScopes(scopes []string) []string { +func CanonicalizeServiceScopes(scopes []string) []string { cs := make([]string, len(scopes)) for i, scope := range scopes { - cs[i] = canonicalizeServiceScope(scope) + cs[i] = CanonicalizeServiceScope(scope) } return cs } -func stringScopeHashcode(v interface{}) int { - v = canonicalizeServiceScope(v.(string)) +func StringScopeHashcode(v interface{}) int { + v = CanonicalizeServiceScope(v.(string)) return schema.HashString(v) } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/utils.go new file mode 100644 index 0000000000..e474ae4ff6 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgresource/utils.go @@ -0,0 +1,706 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package tpgresource + +import ( + "crypto/md5" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "log" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/errwrap" + fwDiags "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type TerraformResourceDataChange interface { + GetChange(string) (interface{}, interface{}) +} + +type TerraformResourceData interface { + HasChange(string) bool + GetOkExists(string) (interface{}, bool) + GetOk(string) (interface{}, bool) + Get(string) interface{} + Set(string, interface{}) error + SetId(string) + Id() string + GetProviderMeta(interface{}) error + Timeout(key string) time.Duration +} + +type TerraformResourceDiff interface { + HasChange(string) bool + GetChange(string) (interface{}, interface{}) + Get(string) interface{} + GetOk(string) (interface{}, bool) + Clear(string) error + ForceNew(string) error +} + +// Contains functions that don't really belong anywhere else. + +// GetRegionFromZone returns the region from a zone for Google cloud. +// This is by removing the last two chars from the zone name to leave the region +// If there aren't enough characters in the input string, an empty string is returned +// e.g. southamerica-west1-a => southamerica-west1 +func GetRegionFromZone(zone string) string { + if zone != "" && len(zone) > 2 { + region := zone[:len(zone)-2] + return region + } + return "" +} + +// Infers the region based on the following (in order of priority): +// - `region` field in resource schema +// - region extracted from the `zone` field in resource schema +// - provider-level region +// - region extracted from the provider-level zone +func GetRegion(d TerraformResourceData, config *transport_tpg.Config) (string, error) { + return GetRegionFromSchema("region", "zone", d, config) +} + +// GetProject reads the "project" field from the given resource data and falls +// back to the provider's value if not given. If the provider's value is not +// given, an error is returned. +func GetProject(d TerraformResourceData, config *transport_tpg.Config) (string, error) { + return GetProjectFromSchema("project", d, config) +} + +// GetBillingProject reads the "billing_project" field from the given resource data and falls +// back to the provider's value if not given. If no value is found, an error is returned. +func GetBillingProject(d TerraformResourceData, config *transport_tpg.Config) (string, error) { + return GetBillingProjectFromSchema("billing_project", d, config) +} + +// GetProjectFromDiff reads the "project" field from the given diff and falls +// back to the provider's value if not given. If the provider's value is not +// given, an error is returned. +func GetProjectFromDiff(d *schema.ResourceDiff, config *transport_tpg.Config) (string, error) { + res, ok := d.GetOk("project") + if ok { + return res.(string), nil + } + if config.Project != "" { + return config.Project, nil + } + return "", fmt.Errorf("%s: required field is not set", "project") +} + +func GetRouterLockName(region string, router string) string { + return fmt.Sprintf("router/%s/%s", region, router) +} + +func IsFailedPreconditionError(err error) bool { + gerr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error) + if !ok { + return false + } + if gerr == nil { + return false + } + if gerr.Code != 400 { + return false + } + for _, e := range gerr.Errors { + if e.Reason == "failedPrecondition" { + return true + } + } + return false +} + +func IsConflictError(err error) bool { + if e, ok := err.(*googleapi.Error); ok && (e.Code == 409 || e.Code == 412) { + return true + } else if !ok && errwrap.ContainsType(err, &googleapi.Error{}) { + e := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error) + if e.Code == 409 || e.Code == 412 { + return true + } + } + return false +} + +// gRPC does not return errors of type *googleapi.Error. Instead the errors returned are *status.Error. +// See the types of codes returned here (https://pkg.go.dev/google.golang.org/grpc/codes#Code). +func IsNotFoundGrpcError(err error) bool { + if errorStatus, ok := status.FromError(err); ok && errorStatus.Code() == codes.NotFound { + return true + } + return false +} + +// ExpandLabels pulls the value of "labels" out of a TerraformResourceData as a map[string]string. +func ExpandLabels(d TerraformResourceData) map[string]string { + return ExpandStringMap(d, "labels") +} + +// ExpandEnvironmentVariables pulls the value of "environment_variables" out of a schema.ResourceData as a map[string]string. +func ExpandEnvironmentVariables(d *schema.ResourceData) map[string]string { + return ExpandStringMap(d, "environment_variables") +} + +// ExpandBuildEnvironmentVariables pulls the value of "build_environment_variables" out of a schema.ResourceData as a map[string]string. +func ExpandBuildEnvironmentVariables(d *schema.ResourceData) map[string]string { + return ExpandStringMap(d, "build_environment_variables") +} + +// ExpandStringMap pulls the value of key out of a TerraformResourceData as a map[string]string. +func ExpandStringMap(d TerraformResourceData, key string) map[string]string { + v, ok := d.GetOk(key) + + if !ok { + return map[string]string{} + } + + return ConvertStringMap(v.(map[string]interface{})) +} + +func ConvertStringMap(v map[string]interface{}) map[string]string { + m := make(map[string]string) + for k, val := range v { + m[k] = val.(string) + } + return m +} + +func ConvertStringArr(ifaceArr []interface{}) []string { + return ConvertAndMapStringArr(ifaceArr, func(s string) string { return s }) +} + +func ConvertAndMapStringArr(ifaceArr []interface{}, f func(string) string) []string { + var arr []string + for _, v := range ifaceArr { + if v == nil { + continue + } + arr = append(arr, f(v.(string))) + } + return arr +} + +func MapStringArr(original []string, f func(string) string) []string { + var arr []string + for _, v := range original { + arr = append(arr, f(v)) + } + return arr +} + +func ConvertStringArrToInterface(strs []string) []interface{} { + arr := make([]interface{}, len(strs)) + for i, str := range strs { + arr[i] = str + } + return arr +} + +func ConvertStringSet(set *schema.Set) []string { + s := make([]string, 0, set.Len()) + for _, v := range set.List() { + s = append(s, v.(string)) + } + sort.Strings(s) + + return s +} + +func GolangSetFromStringSlice(strings []string) map[string]struct{} { + set := map[string]struct{}{} + for _, v := range strings { + set[v] = struct{}{} + } + + return set +} + +func StringSliceFromGolangSet(sset map[string]struct{}) []string { + ls := make([]string, 0, len(sset)) + for s := range sset { + ls = append(ls, s) + } + sort.Strings(ls) + + return ls +} + +func ReverseStringMap(m map[string]string) map[string]string { + o := map[string]string{} + for k, v := range m { + o[v] = k + } + return o +} + +func MergeStringMaps(a, b map[string]string) map[string]string { + merged := make(map[string]string) + + for k, v := range a { + merged[k] = v + } + + for k, v := range b { + merged[k] = v + } + + return merged +} + +func MergeSchemas(a, b map[string]*schema.Schema) map[string]*schema.Schema { + merged := make(map[string]*schema.Schema) + + for k, v := range a { + merged[k] = v + } + + for k, v := range b { + merged[k] = v + } + + return merged +} + +func StringToFixed64(v string) (int64, error) { + return strconv.ParseInt(v, 10, 64) +} + +func ExtractFirstMapConfig(m []interface{}) map[string]interface{} { + if len(m) == 0 || m[0] == nil { + return map[string]interface{}{} + } + + return m[0].(map[string]interface{}) +} + +// ServiceAccountFQN will attempt to generate the fully qualified name in the format of: +// +// "projects/(-|)/serviceAccounts/@.iam.gserviceaccount.com" +// A project is required if we are trying to build the FQN from a service account id and +// and error will be returned in this case if no project is set in the resource or the +// provider-level config +func ServiceAccountFQN(serviceAccount string, d TerraformResourceData, config *transport_tpg.Config) (string, error) { + // If the service account id is already the fully qualified name + if strings.HasPrefix(serviceAccount, "projects/") { + return serviceAccount, nil + } + + // If the service account id is an email + if strings.Contains(serviceAccount, "@") { + return "projects/-/serviceAccounts/" + serviceAccount, nil + } + + // Get the project from the resource or fallback to the project + // in the provider configuration + project, err := GetProject(d, config) + if err != nil { + return "", err + } + + return fmt.Sprintf("projects/-/serviceAccounts/%s@%s.iam.gserviceaccount.com", serviceAccount, project), nil +} + +func PaginatedListRequest(project, baseUrl, userAgent string, config *transport_tpg.Config, flattener func(map[string]interface{}) []interface{}) ([]interface{}, error) { + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: baseUrl, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + ls := flattener(res) + pageToken, ok := res["pageToken"] + for ok { + if pageToken.(string) == "" { + break + } + url := fmt.Sprintf("%s?pageToken=%s", baseUrl, pageToken.(string)) + res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + ls = append(ls, flattener(res)) + pageToken, ok = res["pageToken"] + } + + return ls, nil +} + +func GetInterconnectAttachmentLink(config *transport_tpg.Config, project, region, ic, userAgent string) (string, error) { + if !strings.Contains(ic, "/") { + icData, err := config.NewComputeClient(userAgent).InterconnectAttachments.Get( + project, region, ic).Do() + if err != nil { + return "", fmt.Errorf("Error reading interconnect attachment: %s", err) + } + ic = icData.SelfLink + } + + return ic, nil +} + +// Given two sets of references (with "from" values in self link form), +// determine which need to be added or removed // during an update using +// addX/removeX APIs. +func CalcAddRemove(from []string, to []string) (add, remove []string) { + add = make([]string, 0) + remove = make([]string, 0) + for _, u := range to { + found := false + for _, v := range from { + if CompareSelfLinkOrResourceName("", v, u, nil) { + found = true + break + } + } + if !found { + add = append(add, u) + } + } + for _, u := range from { + found := false + for _, v := range to { + if CompareSelfLinkOrResourceName("", u, v, nil) { + found = true + break + } + } + if !found { + remove = append(remove, u) + } + } + return add, remove +} + +func StringInSlice(arr []string, str string) bool { + for _, i := range arr { + if i == str { + return true + } + } + + return false +} + +func MigrateStateNoop(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + return is, nil +} + +func ExpandString(v interface{}, d TerraformResourceData, config *transport_tpg.Config) (string, error) { + return v.(string), nil +} + +func ChangeFieldSchemaToForceNew(sch *schema.Schema) { + sch.ForceNew = true + switch sch.Type { + case schema.TypeList: + case schema.TypeSet: + if nestedR, ok := sch.Elem.(*schema.Resource); ok { + for _, nestedSch := range nestedR.Schema { + ChangeFieldSchemaToForceNew(nestedSch) + } + } + } +} + +func GenerateUserAgentString(d TerraformResourceData, currentUserAgent string) (string, error) { + var m transport_tpg.ProviderMeta + + err := d.GetProviderMeta(&m) + if err != nil { + return currentUserAgent, err + } + + if m.ModuleName != "" { + return strings.Join([]string{currentUserAgent, m.ModuleName}, " "), nil + } + + return currentUserAgent, nil +} + +func SnakeToPascalCase(s string) string { + split := strings.Split(s, "_") + for i := range split { + split[i] = strings.Title(split[i]) + } + return strings.Join(split, "") +} + +func CheckStringMap(v interface{}) map[string]string { + m, ok := v.(map[string]string) + if ok { + return m + } + return ConvertStringMap(v.(map[string]interface{})) +} + +// return a fake 404 so requests get retried or nested objects are considered deleted +func Fake404(reasonResourceType, resourceName string) *googleapi.Error { + return &googleapi.Error{ + Code: 404, + Message: fmt.Sprintf("%v object %v not found", reasonResourceType, resourceName), + } +} + +// validate name of the gcs bucket. Guidelines are located at https://cloud.google.com/storage/docs/naming-buckets +// this does not attempt to check for IP addresses or close misspellings of "google" +func CheckGCSName(name string) error { + if strings.HasPrefix(name, "goog") { + return fmt.Errorf("error: bucket name %s cannot start with %q", name, "goog") + } + + if strings.Contains(name, "google") { + return fmt.Errorf("error: bucket name %s cannot contain %q", name, "google") + } + + valid, _ := regexp.MatchString("^[a-z0-9][a-z0-9_.-]{1,220}[a-z0-9]$", name) + if !valid { + return fmt.Errorf("error: bucket name validation failed %v. See https://cloud.google.com/storage/docs/naming-buckets", name) + } + + for _, str := range strings.Split(name, ".") { + valid, _ := regexp.MatchString("^[a-z0-9_-]{1,63}$", str) + if !valid { + return fmt.Errorf("error: bucket name validation failed %v", str) + } + } + return nil +} + +// CheckGoogleIamPolicy makes assertions about the contents of a google_iam_policy data source's policy_data attribute +func CheckGoogleIamPolicy(value string) error { + if strings.Contains(value, "\"description\":\"\"") { + return fmt.Errorf("found an empty description field (should be omitted) in google_iam_policy data source: %s", value) + } + return nil +} + +// Retries an operation while the canonical error code is FAILED_PRECONDTION +// which indicates there is an incompatible operation already running on the +// cluster. This error can be safely retried until the incompatible operation +// completes, and the newly requested operation can begin. +func RetryWhileIncompatibleOperation(timeout time.Duration, lockKey string, f func() error) error { + return resource.Retry(timeout, func() *resource.RetryError { + if err := transport_tpg.LockedCall(lockKey, f); err != nil { + if IsFailedPreconditionError(err) { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) +} + +func FrameworkDiagsToSdkDiags(fwD fwDiags.Diagnostics) *diag.Diagnostics { + var diags diag.Diagnostics + for _, e := range fwD.Errors() { + diags = append(diags, diag.Diagnostic{ + Detail: e.Detail(), + Severity: diag.Error, + Summary: e.Summary(), + }) + } + for _, w := range fwD.Warnings() { + diags = append(diags, diag.Diagnostic{ + Detail: w.Detail(), + Severity: diag.Warning, + Summary: w.Summary(), + }) + } + + return &diags +} + +func IsEmptyValue(v reflect.Value) bool { + if !v.IsValid() { + return true + } + + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func ReplaceVars(d TerraformResourceData, config *transport_tpg.Config, linkTmpl string) (string, error) { + return ReplaceVarsRecursive(d, config, linkTmpl, false, 0) +} + +// relaceVarsForId shortens variables by running them through GetResourceNameFromSelfLink +// this allows us to use long forms of variables from configs without needing +// custom id formats. For instance: +// accessPolicies/{{access_policy}}/accessLevels/{{access_level}} +// with values: +// access_policy: accessPolicies/foo +// access_level: accessPolicies/foo/accessLevels/bar +// becomes accessPolicies/foo/accessLevels/bar +func ReplaceVarsForId(d TerraformResourceData, config *transport_tpg.Config, linkTmpl string) (string, error) { + return ReplaceVarsRecursive(d, config, linkTmpl, true, 0) +} + +// ReplaceVars must be done recursively because there are baseUrls that can contain references to regions +// (eg cloudrun service) there aren't any cases known for 2+ recursion but we will track a run away +// substitution as 10+ calls to allow for future use cases. +func ReplaceVarsRecursive(d TerraformResourceData, config *transport_tpg.Config, linkTmpl string, shorten bool, depth int) (string, error) { + if depth > 10 { + return "", errors.New("Recursive substitution detcted") + } + + // https://github.com/google/re2/wiki/Syntax + re := regexp.MustCompile("{{([%[:word:]]+)}}") + f, err := BuildReplacementFunc(re, d, config, linkTmpl, shorten) + if err != nil { + return "", err + } + final := re.ReplaceAllStringFunc(linkTmpl, f) + + if re.Match([]byte(final)) { + return ReplaceVarsRecursive(d, config, final, shorten, depth+1) + } + + return final, nil +} + +// This function replaces references to Terraform properties (in the form of {{var}}) with their value in Terraform +// It also replaces {{project}}, {{project_id_or_project}}, {{region}}, and {{zone}} with their appropriate values +// This function supports URL-encoding the result by prepending '%' to the field name e.g. {{%var}} +func BuildReplacementFunc(re *regexp.Regexp, d TerraformResourceData, config *transport_tpg.Config, linkTmpl string, shorten bool) (func(string) string, error) { + var project, projectID, region, zone string + var err error + + if strings.Contains(linkTmpl, "{{project}}") { + project, err = GetProject(d, config) + if err != nil { + return nil, err + } + } + + if strings.Contains(linkTmpl, "{{project_id_or_project}}") { + v, ok := d.GetOkExists("project_id") + if ok { + projectID, _ = v.(string) + } + if projectID == "" { + project, err = GetProject(d, config) + } + if err != nil { + return nil, err + } + } + + if strings.Contains(linkTmpl, "{{region}}") { + region, err = GetRegion(d, config) + if err != nil { + return nil, err + } + } + + if strings.Contains(linkTmpl, "{{zone}}") { + zone, err = GetZone(d, config) + if err != nil { + return nil, err + } + } + + f := func(s string) string { + + m := re.FindStringSubmatch(s)[1] + if m == "project" { + return project + } + if m == "project_id_or_project" { + if projectID != "" { + return projectID + } + return project + } + if m == "region" { + return region + } + if m == "zone" { + return zone + } + if string(m[0]) == "%" { + v, ok := d.GetOkExists(m[1:]) + if ok { + return url.PathEscape(fmt.Sprintf("%v", v)) + } + } else { + v, ok := d.GetOkExists(m) + if ok { + if shorten { + return GetResourceNameFromSelfLink(fmt.Sprintf("%v", v)) + } else { + return fmt.Sprintf("%v", v) + } + } + } + + // terraform-google-conversion doesn't provide a provider config in tests. + if config != nil { + // Attempt to draw values from the provider config if it's present. + if f := reflect.Indirect(reflect.ValueOf(config)).FieldByName(m); f.IsValid() { + return f.String() + } + } + return "" + } + + return f, nil +} + +func GetFileMd5Hash(filename string) string { + data, err := ioutil.ReadFile(filename) + if err != nil { + log.Printf("[WARN] Failed to read source file %q. Cannot compute md5 hash for it.", filename) + return "" + } + return GetContentMd5Hash(data) +} + +func GetContentMd5Hash(content []byte) string { + h := md5.New() + if _, err := h.Write(content); err != nil { + log.Printf("[WARN] Failed to compute md5 hash for content: %v", err) + } + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgtools_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgtools_utils.go deleted file mode 100644 index cdcb287d27..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpgtools_utils.go +++ /dev/null @@ -1,26 +0,0 @@ -package google - -import ( - "fmt" - "log" - - dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func oldValue(old, new interface{}) interface{} { - return old -} - -func handleNotFoundDCLError(err error, d *schema.ResourceData, resourceName string) error { - if dcl.IsNotFound(err) { - log.Printf("[WARN] Removing %s because it's gone", resourceName) - // The resource doesn't exist anymore - d.SetId("") - return nil - } - - return errwrap.Wrapf( - fmt.Sprintf("Error when reading or editing %s: {{err}}", resourceName), err) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpu_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpu_operation.go deleted file mode 100644 index be886f2612..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/tpu_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type TPUOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *TPUOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.TPUBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createTPUWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*TPUOperationWaiter, error) { - w := &TPUOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func TPUOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createTPUWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func TPUOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createTPUWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport.go deleted file mode 100644 index 6fbaab4bcb..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport.go +++ /dev/null @@ -1,265 +0,0 @@ -package google - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "reflect" - "regexp" - "strings" - "time" - - "google.golang.org/api/googleapi" -) - -var DefaultRequestTimeout = 5 * time.Minute - -func isEmptyValue(v reflect.Value) bool { - if !v.IsValid() { - return true - } - - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func SendRequest(config *Config, method, project, rawurl, userAgent string, body map[string]interface{}, errorRetryPredicates ...RetryErrorPredicateFunc) (map[string]interface{}, error) { - return SendRequestWithTimeout(config, method, project, rawurl, userAgent, body, DefaultRequestTimeout, errorRetryPredicates...) -} - -func SendRequestWithTimeout(config *Config, method, project, rawurl, userAgent string, body map[string]interface{}, timeout time.Duration, errorRetryPredicates ...RetryErrorPredicateFunc) (map[string]interface{}, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("User-Agent", userAgent) - reqHeaders.Set("Content-Type", "application/json") - - if config.UserProjectOverride && project != "" { - // When project is "NO_BILLING_PROJECT_OVERRIDE" in the function GetCurrentUserEmail, - // set the header X-Goog-User-Project to be empty string. - if project == "NO_BILLING_PROJECT_OVERRIDE" { - reqHeaders.Set("X-Goog-User-Project", "") - } else { - // Pass the project into this fn instead of parsing it from the URL because - // both project names and URLs can have colons in them. - reqHeaders.Set("X-Goog-User-Project", project) - } - } - - if timeout == 0 { - timeout = time.Duration(1) * time.Hour - } - - var res *http.Response - err := RetryTimeDuration( - func() error { - var buf bytes.Buffer - if body != nil { - err := json.NewEncoder(&buf).Encode(body) - if err != nil { - return err - } - } - - u, err := addQueryParams(rawurl, map[string]string{"alt": "json"}) - if err != nil { - return err - } - req, err := http.NewRequest(method, u, &buf) - if err != nil { - return err - } - - req.Header = reqHeaders - res, err = config.Client.Do(req) - if err != nil { - return err - } - - if err := googleapi.CheckResponse(res); err != nil { - googleapi.CloseBody(res) - return err - } - - return nil - }, - timeout, - errorRetryPredicates..., - ) - if err != nil { - return nil, err - } - - if res == nil { - return nil, fmt.Errorf("Unable to parse server response. This is most likely a terraform problem, please file a bug at https://github.com/hashicorp/terraform-provider-google/issues.") - } - - // The defer call must be made outside of the retryFunc otherwise it's closed too soon. - defer googleapi.CloseBody(res) - - // 204 responses will have no body, so we're going to error with "EOF" if we - // try to parse it. Instead, we can just return nil. - if res.StatusCode == 204 { - return nil, nil - } - result := make(map[string]interface{}) - if err := json.NewDecoder(res.Body).Decode(&result); err != nil { - return nil, err - } - - return result, nil -} - -func addQueryParams(rawurl string, params map[string]string) (string, error) { - u, err := url.Parse(rawurl) - if err != nil { - return "", err - } - q := u.Query() - for k, v := range params { - q.Set(k, v) - } - u.RawQuery = q.Encode() - return u.String(), nil -} - -func replaceVars(d TerraformResourceData, config *Config, linkTmpl string) (string, error) { - return replaceVarsRecursive(d, config, linkTmpl, false, 0) -} - -// relaceVarsForId shortens variables by running them through GetResourceNameFromSelfLink -// this allows us to use long forms of variables from configs without needing -// custom id formats. For instance: -// accessPolicies/{{access_policy}}/accessLevels/{{access_level}} -// with values: -// access_policy: accessPolicies/foo -// access_level: accessPolicies/foo/accessLevels/bar -// becomes accessPolicies/foo/accessLevels/bar -func replaceVarsForId(d TerraformResourceData, config *Config, linkTmpl string) (string, error) { - return replaceVarsRecursive(d, config, linkTmpl, true, 0) -} - -// replaceVars must be done recursively because there are baseUrls that can contain references to regions -// (eg cloudrun service) there aren't any cases known for 2+ recursion but we will track a run away -// substitution as 10+ calls to allow for future use cases. -func replaceVarsRecursive(d TerraformResourceData, config *Config, linkTmpl string, shorten bool, depth int) (string, error) { - if depth > 10 { - return "", errors.New("Recursive substitution detcted") - } - - // https://github.com/google/re2/wiki/Syntax - re := regexp.MustCompile("{{([%[:word:]]+)}}") - f, err := buildReplacementFunc(re, d, config, linkTmpl, shorten) - if err != nil { - return "", err - } - final := re.ReplaceAllStringFunc(linkTmpl, f) - - if re.Match([]byte(final)) { - return replaceVarsRecursive(d, config, final, shorten, depth+1) - } - - return final, nil -} - -// This function replaces references to Terraform properties (in the form of {{var}}) with their value in Terraform -// It also replaces {{project}}, {{project_id_or_project}}, {{region}}, and {{zone}} with their appropriate values -// This function supports URL-encoding the result by prepending '%' to the field name e.g. {{%var}} -func buildReplacementFunc(re *regexp.Regexp, d TerraformResourceData, config *Config, linkTmpl string, shorten bool) (func(string) string, error) { - var project, projectID, region, zone string - var err error - - if strings.Contains(linkTmpl, "{{project}}") { - project, err = getProject(d, config) - if err != nil { - return nil, err - } - } - - if strings.Contains(linkTmpl, "{{project_id_or_project}}") { - v, ok := d.GetOkExists("project_id") - if ok { - projectID, _ = v.(string) - } - if projectID == "" { - project, err = getProject(d, config) - } - if err != nil { - return nil, err - } - } - - if strings.Contains(linkTmpl, "{{region}}") { - region, err = getRegion(d, config) - if err != nil { - return nil, err - } - } - - if strings.Contains(linkTmpl, "{{zone}}") { - zone, err = getZone(d, config) - if err != nil { - return nil, err - } - } - - f := func(s string) string { - - m := re.FindStringSubmatch(s)[1] - if m == "project" { - return project - } - if m == "project_id_or_project" { - if projectID != "" { - return projectID - } - return project - } - if m == "region" { - return region - } - if m == "zone" { - return zone - } - if string(m[0]) == "%" { - v, ok := d.GetOkExists(m[1:]) - if ok { - return url.PathEscape(fmt.Sprintf("%v", v)) - } - } else { - v, ok := d.GetOkExists(m) - if ok { - if shorten { - return GetResourceNameFromSelfLink(fmt.Sprintf("%v", v)) - } else { - return fmt.Sprintf("%v", v) - } - } - } - - // terraform-google-conversion doesn't provide a provider config in tests. - if config != nil { - // Attempt to draw values from the provider config if it's present. - if f := reflect.Indirect(reflect.ValueOf(config)).FieldByName(m); f.IsValid() { - return f.String() - } - } - return "" - } - - return f, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/batcher.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/batcher.go similarity index 97% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/batcher.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/batcher.go index 9ab90cec08..0d1c569f7e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/batcher.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/batcher.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport import ( "context" @@ -20,7 +22,7 @@ const DefaultBatchSendIntervalSec = 3 type RequestBatcher struct { sync.Mutex - *batchingConfig + *BatchingConfig parentCtx context.Context batches map[string]*startedBatch debugId string @@ -96,18 +98,18 @@ type batchSubscriber struct { respCh chan batchResponse } -// batchingConfig contains user configuration for controlling batch requests. -type batchingConfig struct { +// BatchingConfig contains user configuration for controlling batch requests. +type BatchingConfig struct { SendAfter time.Duration EnableBatching bool } // Initializes a new batcher. -func NewRequestBatcher(debugId string, ctx context.Context, config *batchingConfig) *RequestBatcher { +func NewRequestBatcher(debugId string, ctx context.Context, config *BatchingConfig) *RequestBatcher { batcher := &RequestBatcher{ debugId: debugId, parentCtx: ctx, - batchingConfig: config, + BatchingConfig: config, batches: make(map[string]*startedBatch), } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/bigtable_client_factory.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/bigtable_client_factory.go similarity index 96% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/bigtable_client_factory.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/bigtable_client_factory.go index 19c7047efd..48e320e51e 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/bigtable_client_factory.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/bigtable_client_factory.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport import ( "context" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/common_polling.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/common_polling.go similarity index 98% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/common_polling.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/common_polling.go index fbc81b15f7..45d159f665 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/common_polling.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/common_polling.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport import ( "fmt" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config.go new file mode 100644 index 0000000000..200b132182 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config.go @@ -0,0 +1,2044 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + + grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/sirupsen/logrus" + "google.golang.org/api/option" + + "github.com/hashicorp/terraform-provider-google/google/verify" + + "golang.org/x/oauth2" + googleoauth "golang.org/x/oauth2/google" + appengine "google.golang.org/api/appengine/v1" + "google.golang.org/api/bigquery/v2" + "google.golang.org/api/bigtableadmin/v2" + "google.golang.org/api/cloudbilling/v1" + "google.golang.org/api/cloudbuild/v1" + "google.golang.org/api/cloudfunctions/v1" + "google.golang.org/api/cloudidentity/v1" + "google.golang.org/api/cloudiot/v1" + "google.golang.org/api/cloudkms/v1" + "google.golang.org/api/cloudresourcemanager/v1" + resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" + "google.golang.org/api/composer/v1" + "google.golang.org/api/compute/v1" + "google.golang.org/api/container/v1" + dataflow "google.golang.org/api/dataflow/v1b3" + "google.golang.org/api/dataproc/v1" + "google.golang.org/api/dns/v1" + healthcare "google.golang.org/api/healthcare/v1" + "google.golang.org/api/iam/v1" + iamcredentials "google.golang.org/api/iamcredentials/v1" + cloudlogging "google.golang.org/api/logging/v2" + "google.golang.org/api/pubsub/v1" + runadminv2 "google.golang.org/api/run/v2" + "google.golang.org/api/servicemanagement/v1" + "google.golang.org/api/servicenetworking/v1" + "google.golang.org/api/serviceusage/v1" + "google.golang.org/api/sourcerepo/v1" + "google.golang.org/api/spanner/v1" + sqladmin "google.golang.org/api/sqladmin/v1beta4" + "google.golang.org/api/storage/v1" + "google.golang.org/api/storagetransfer/v1" + "google.golang.org/api/transport" + "google.golang.org/grpc" +) + +type ProviderMeta struct { + ModuleName string `cty:"module_name"` +} + +type Formatter struct { + TimestampFormat string + LogFormat string +} + +// Borrowed logic from https://github.com/sirupsen/logrus/blob/master/json_formatter.go and https://github.com/t-tomalak/logrus-easy-formatter/blob/master/formatter.go +func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { + // Suppress logs if TF_LOG is not DEBUG or TRACE + if !logging.IsDebugOrHigher() { + return nil, nil + } + + // Also suppress based on log content + // - frequent transport spam + // - ListenSocket logs from gRPC + isTransportSpam := strings.Contains(entry.Message, "transport is closing") + listenSocketRegex := regexp.MustCompile(`\[Server #\d+( ListenSocket #\d+)*\]`) // Match patterns like `[Server #00]` or `[Server #00 ListenSocket #00]` + isListenSocketLog := listenSocketRegex.MatchString(entry.Message) + if isTransportSpam || isListenSocketLog { + return nil, nil + } + + output := f.LogFormat + entry.Level = logrus.DebugLevel // Force Entries to be Debug + + timestampFormat := f.TimestampFormat + + output = strings.Replace(output, "%time%", entry.Time.Format(timestampFormat), 1) + + output = strings.Replace(output, "%msg%", entry.Message, 1) + + level := strings.ToUpper(entry.Level.String()) + output = strings.Replace(output, "%lvl%", level, 1) + + var gRPCMessageFlag bool + for k, val := range entry.Data { + switch v := val.(type) { + case string: + output = strings.Replace(output, "%"+k+"%", v, 1) + case int: + s := strconv.Itoa(v) + output = strings.Replace(output, "%"+k+"%", s, 1) + case bool: + s := strconv.FormatBool(v) + output = strings.Replace(output, "%"+k+"%", s, 1) + } + + if k != "system" { + gRPCMessageFlag = true + } + } + + if gRPCMessageFlag { + data := make(logrus.Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetIndent("", " ") + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + finalOutput := append([]byte(output), b.Bytes()...) + return finalOutput, nil + } + + return []byte(output), nil +} + +// Config is the configuration structure used to instantiate the Google +// provider. +type Config struct { + DCLConfig + AccessToken string + Credentials string + ImpersonateServiceAccount string + ImpersonateServiceAccountDelegates []string + Project string + Region string + BillingProject string + Zone string + Scopes []string + BatchingConfig *BatchingConfig + UserProjectOverride bool + RequestReason string + RequestTimeout time.Duration + // PollInterval is passed to resource.StateChangeConf in common_operation.go + // It controls the interval at which we poll for successful operations + PollInterval time.Duration + + Client *http.Client + Context context.Context + UserAgent string + gRPCLoggingOptions []option.ClientOption + + tokenSource oauth2.TokenSource + + AccessApprovalBasePath string + AccessContextManagerBasePath string + ActiveDirectoryBasePath string + AlloydbBasePath string + ApigeeBasePath string + AppEngineBasePath string + ArtifactRegistryBasePath string + BeyondcorpBasePath string + BigQueryBasePath string + BigqueryAnalyticsHubBasePath string + BigqueryConnectionBasePath string + BigqueryDatapolicyBasePath string + BigqueryDataTransferBasePath string + BigqueryReservationBasePath string + BigtableBasePath string + BillingBasePath string + BinaryAuthorizationBasePath string + CertificateManagerBasePath string + CloudAssetBasePath string + CloudBuildBasePath string + Cloudbuildv2BasePath string + CloudFunctionsBasePath string + Cloudfunctions2BasePath string + CloudIdentityBasePath string + CloudIdsBasePath string + CloudIotBasePath string + CloudRunBasePath string + CloudRunV2BasePath string + CloudSchedulerBasePath string + CloudTasksBasePath string + ComputeBasePath string + ContainerAnalysisBasePath string + ContainerAttachedBasePath string + DatabaseMigrationServiceBasePath string + DataCatalogBasePath string + DataFusionBasePath string + DataLossPreventionBasePath string + DataplexBasePath string + DataprocBasePath string + DataprocMetastoreBasePath string + DatastoreBasePath string + DatastreamBasePath string + DeploymentManagerBasePath string + DialogflowBasePath string + DialogflowCXBasePath string + DNSBasePath string + DocumentAIBasePath string + EssentialContactsBasePath string + FilestoreBasePath string + FirestoreBasePath string + GameServicesBasePath string + GKEBackupBasePath string + GKEHubBasePath string + GKEHub2BasePath string + HealthcareBasePath string + IAM2BasePath string + IAMBetaBasePath string + IAMWorkforcePoolBasePath string + IapBasePath string + IdentityPlatformBasePath string + KMSBasePath string + LoggingBasePath string + LookerBasePath string + MemcacheBasePath string + MLEngineBasePath string + MonitoringBasePath string + NetworkManagementBasePath string + NetworkSecurityBasePath string + NetworkServicesBasePath string + NotebooksBasePath string + OSConfigBasePath string + OSLoginBasePath string + PrivatecaBasePath string + PublicCABasePath string + PubsubBasePath string + PubsubLiteBasePath string + RedisBasePath string + ResourceManagerBasePath string + SecretManagerBasePath string + SecurityCenterBasePath string + ServiceManagementBasePath string + ServiceUsageBasePath string + SourceRepoBasePath string + SpannerBasePath string + SQLBasePath string + StorageBasePath string + StorageTransferBasePath string + TagsBasePath string + TPUBasePath string + VertexAIBasePath string + VPCAccessBasePath string + WorkflowsBasePath string + + CloudBillingBasePath string + ComposerBasePath string + ContainerBasePath string + DataflowBasePath string + IamCredentialsBasePath string + ResourceManagerV3BasePath string + IAMBasePath string + CloudIoTBasePath string + ServiceNetworkingBasePath string + BigtableAdminBasePath string + TagsLocationBasePath string + + // dcl + ContainerAwsBasePath string + ContainerAzureBasePath string + + RequestBatcherServiceUsage *RequestBatcher + RequestBatcherIam *RequestBatcher +} + +const AccessApprovalBasePathKey = "AccessApproval" +const AccessContextManagerBasePathKey = "AccessContextManager" +const ActiveDirectoryBasePathKey = "ActiveDirectory" +const AlloydbBasePathKey = "Alloydb" +const ApigeeBasePathKey = "Apigee" +const AppEngineBasePathKey = "AppEngine" +const ArtifactRegistryBasePathKey = "ArtifactRegistry" +const BeyondcorpBasePathKey = "Beyondcorp" +const BigQueryBasePathKey = "BigQuery" +const BigqueryAnalyticsHubBasePathKey = "BigqueryAnalyticsHub" +const BigqueryConnectionBasePathKey = "BigqueryConnection" +const BigqueryDatapolicyBasePathKey = "BigqueryDatapolicy" +const BigqueryDataTransferBasePathKey = "BigqueryDataTransfer" +const BigqueryReservationBasePathKey = "BigqueryReservation" +const BigtableBasePathKey = "Bigtable" +const BillingBasePathKey = "Billing" +const BinaryAuthorizationBasePathKey = "BinaryAuthorization" +const CertificateManagerBasePathKey = "CertificateManager" +const CloudAssetBasePathKey = "CloudAsset" +const CloudBuildBasePathKey = "CloudBuild" +const Cloudbuildv2BasePathKey = "Cloudbuildv2" +const CloudFunctionsBasePathKey = "CloudFunctions" +const Cloudfunctions2BasePathKey = "Cloudfunctions2" +const CloudIdentityBasePathKey = "CloudIdentity" +const CloudIdsBasePathKey = "CloudIds" +const CloudIotBasePathKey = "CloudIot" +const CloudRunBasePathKey = "CloudRun" +const CloudRunV2BasePathKey = "CloudRunV2" +const CloudSchedulerBasePathKey = "CloudScheduler" +const CloudTasksBasePathKey = "CloudTasks" +const ComputeBasePathKey = "Compute" +const ContainerAnalysisBasePathKey = "ContainerAnalysis" +const ContainerAttachedBasePathKey = "ContainerAttached" +const DatabaseMigrationServiceBasePathKey = "DatabaseMigrationService" +const DataCatalogBasePathKey = "DataCatalog" +const DataFusionBasePathKey = "DataFusion" +const DataLossPreventionBasePathKey = "DataLossPrevention" +const DataplexBasePathKey = "Dataplex" +const DataprocBasePathKey = "Dataproc" +const DataprocMetastoreBasePathKey = "DataprocMetastore" +const DatastoreBasePathKey = "Datastore" +const DatastreamBasePathKey = "Datastream" +const DeploymentManagerBasePathKey = "DeploymentManager" +const DialogflowBasePathKey = "Dialogflow" +const DialogflowCXBasePathKey = "DialogflowCX" +const DNSBasePathKey = "DNS" +const DocumentAIBasePathKey = "DocumentAI" +const EssentialContactsBasePathKey = "EssentialContacts" +const FilestoreBasePathKey = "Filestore" +const FirestoreBasePathKey = "Firestore" +const GameServicesBasePathKey = "GameServices" +const GKEBackupBasePathKey = "GKEBackup" +const GKEHubBasePathKey = "GKEHub" +const GKEHub2BasePathKey = "GKEHub2" +const HealthcareBasePathKey = "Healthcare" +const IAM2BasePathKey = "IAM2" +const IAMBetaBasePathKey = "IAMBeta" +const IAMWorkforcePoolBasePathKey = "IAMWorkforcePool" +const IapBasePathKey = "Iap" +const IdentityPlatformBasePathKey = "IdentityPlatform" +const KMSBasePathKey = "KMS" +const LoggingBasePathKey = "Logging" +const LookerBasePathKey = "Looker" +const MemcacheBasePathKey = "Memcache" +const MLEngineBasePathKey = "MLEngine" +const MonitoringBasePathKey = "Monitoring" +const NetworkManagementBasePathKey = "NetworkManagement" +const NetworkSecurityBasePathKey = "NetworkSecurity" +const NetworkServicesBasePathKey = "NetworkServices" +const NotebooksBasePathKey = "Notebooks" +const OSConfigBasePathKey = "OSConfig" +const OSLoginBasePathKey = "OSLogin" +const PrivatecaBasePathKey = "Privateca" +const PublicCABasePathKey = "PublicCA" +const PubsubBasePathKey = "Pubsub" +const PubsubLiteBasePathKey = "PubsubLite" +const RedisBasePathKey = "Redis" +const ResourceManagerBasePathKey = "ResourceManager" +const SecretManagerBasePathKey = "SecretManager" +const SecurityCenterBasePathKey = "SecurityCenter" +const ServiceManagementBasePathKey = "ServiceManagement" +const ServiceUsageBasePathKey = "ServiceUsage" +const SourceRepoBasePathKey = "SourceRepo" +const SpannerBasePathKey = "Spanner" +const SQLBasePathKey = "SQL" +const StorageBasePathKey = "Storage" +const StorageTransferBasePathKey = "StorageTransfer" +const TagsBasePathKey = "Tags" +const TPUBasePathKey = "TPU" +const VertexAIBasePathKey = "VertexAI" +const VPCAccessBasePathKey = "VPCAccess" +const WorkflowsBasePathKey = "Workflows" +const CloudBillingBasePathKey = "CloudBilling" +const ComposerBasePathKey = "Composer" +const ContainerBasePathKey = "Container" +const DataflowBasePathKey = "Dataflow" +const IAMBasePathKey = "IAM" +const IamCredentialsBasePathKey = "IamCredentials" +const ResourceManagerV3BasePathKey = "ResourceManagerV3" +const ServiceNetworkingBasePathKey = "ServiceNetworking" +const BigtableAdminBasePathKey = "BigtableAdmin" +const ContainerAwsBasePathKey = "ContainerAws" +const ContainerAzureBasePathKey = "ContainerAzure" +const TagsLocationBasePathKey = "TagsLocation" + +// Generated product base paths +var DefaultBasePaths = map[string]string{ + AccessApprovalBasePathKey: "https://accessapproval.googleapis.com/v1/", + AccessContextManagerBasePathKey: "https://accesscontextmanager.googleapis.com/v1/", + ActiveDirectoryBasePathKey: "https://managedidentities.googleapis.com/v1/", + AlloydbBasePathKey: "https://alloydb.googleapis.com/v1/", + ApigeeBasePathKey: "https://apigee.googleapis.com/v1/", + AppEngineBasePathKey: "https://appengine.googleapis.com/v1/", + ArtifactRegistryBasePathKey: "https://artifactregistry.googleapis.com/v1/", + BeyondcorpBasePathKey: "https://beyondcorp.googleapis.com/v1/", + BigQueryBasePathKey: "https://bigquery.googleapis.com/bigquery/v2/", + BigqueryAnalyticsHubBasePathKey: "https://analyticshub.googleapis.com/v1/", + BigqueryConnectionBasePathKey: "https://bigqueryconnection.googleapis.com/v1/", + BigqueryDatapolicyBasePathKey: "https://bigquerydatapolicy.googleapis.com/v1/", + BigqueryDataTransferBasePathKey: "https://bigquerydatatransfer.googleapis.com/v1/", + BigqueryReservationBasePathKey: "https://bigqueryreservation.googleapis.com/v1/", + BigtableBasePathKey: "https://bigtableadmin.googleapis.com/v2/", + BillingBasePathKey: "https://billingbudgets.googleapis.com/v1/", + BinaryAuthorizationBasePathKey: "https://binaryauthorization.googleapis.com/v1/", + CertificateManagerBasePathKey: "https://certificatemanager.googleapis.com/v1/", + CloudAssetBasePathKey: "https://cloudasset.googleapis.com/v1/", + CloudBuildBasePathKey: "https://cloudbuild.googleapis.com/v1/", + Cloudbuildv2BasePathKey: "https://cloudbuild.googleapis.com/v2/", + CloudFunctionsBasePathKey: "https://cloudfunctions.googleapis.com/v1/", + Cloudfunctions2BasePathKey: "https://cloudfunctions.googleapis.com/v2/", + CloudIdentityBasePathKey: "https://cloudidentity.googleapis.com/v1/", + CloudIdsBasePathKey: "https://ids.googleapis.com/v1/", + CloudIotBasePathKey: "https://cloudiot.googleapis.com/v1/", + CloudRunBasePathKey: "https://{{location}}-run.googleapis.com/", + CloudRunV2BasePathKey: "https://run.googleapis.com/v2/", + CloudSchedulerBasePathKey: "https://cloudscheduler.googleapis.com/v1/", + CloudTasksBasePathKey: "https://cloudtasks.googleapis.com/v2/", + ComputeBasePathKey: "https://compute.googleapis.com/compute/v1/", + ContainerAnalysisBasePathKey: "https://containeranalysis.googleapis.com/v1/", + ContainerAttachedBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", + DatabaseMigrationServiceBasePathKey: "https://datamigration.googleapis.com/v1/", + DataCatalogBasePathKey: "https://datacatalog.googleapis.com/v1/", + DataFusionBasePathKey: "https://datafusion.googleapis.com/v1/", + DataLossPreventionBasePathKey: "https://dlp.googleapis.com/v2/", + DataplexBasePathKey: "https://dataplex.googleapis.com/v1/", + DataprocBasePathKey: "https://dataproc.googleapis.com/v1/", + DataprocMetastoreBasePathKey: "https://metastore.googleapis.com/v1/", + DatastoreBasePathKey: "https://datastore.googleapis.com/v1/", + DatastreamBasePathKey: "https://datastream.googleapis.com/v1/", + DeploymentManagerBasePathKey: "https://www.googleapis.com/deploymentmanager/v2/", + DialogflowBasePathKey: "https://dialogflow.googleapis.com/v2/", + DialogflowCXBasePathKey: "https://{{location}}-dialogflow.googleapis.com/v3/", + DNSBasePathKey: "https://dns.googleapis.com/dns/v1/", + DocumentAIBasePathKey: "https://{{location}}-documentai.googleapis.com/v1/", + EssentialContactsBasePathKey: "https://essentialcontacts.googleapis.com/v1/", + FilestoreBasePathKey: "https://file.googleapis.com/v1/", + FirestoreBasePathKey: "https://firestore.googleapis.com/v1/", + GameServicesBasePathKey: "https://gameservices.googleapis.com/v1/", + GKEBackupBasePathKey: "https://gkebackup.googleapis.com/v1/", + GKEHubBasePathKey: "https://gkehub.googleapis.com/v1/", + GKEHub2BasePathKey: "https://gkehub.googleapis.com/v1/", + HealthcareBasePathKey: "https://healthcare.googleapis.com/v1/", + IAM2BasePathKey: "https://iam.googleapis.com/v2/", + IAMBetaBasePathKey: "https://iam.googleapis.com/v1/", + IAMWorkforcePoolBasePathKey: "https://iam.googleapis.com/v1/", + IapBasePathKey: "https://iap.googleapis.com/v1/", + IdentityPlatformBasePathKey: "https://identitytoolkit.googleapis.com/v2/", + KMSBasePathKey: "https://cloudkms.googleapis.com/v1/", + LoggingBasePathKey: "https://logging.googleapis.com/v2/", + LookerBasePathKey: "https://looker.googleapis.com/v1/", + MemcacheBasePathKey: "https://memcache.googleapis.com/v1/", + MLEngineBasePathKey: "https://ml.googleapis.com/v1/", + MonitoringBasePathKey: "https://monitoring.googleapis.com/", + NetworkManagementBasePathKey: "https://networkmanagement.googleapis.com/v1/", + NetworkSecurityBasePathKey: "https://networksecurity.googleapis.com/v1/", + NetworkServicesBasePathKey: "https://networkservices.googleapis.com/v1/", + NotebooksBasePathKey: "https://notebooks.googleapis.com/v1/", + OSConfigBasePathKey: "https://osconfig.googleapis.com/v1/", + OSLoginBasePathKey: "https://oslogin.googleapis.com/v1/", + PrivatecaBasePathKey: "https://privateca.googleapis.com/v1/", + PublicCABasePathKey: "https://publicca.googleapis.com/v1/", + PubsubBasePathKey: "https://pubsub.googleapis.com/v1/", + PubsubLiteBasePathKey: "https://{{region}}-pubsublite.googleapis.com/v1/admin/", + RedisBasePathKey: "https://redis.googleapis.com/v1/", + ResourceManagerBasePathKey: "https://cloudresourcemanager.googleapis.com/v1/", + SecretManagerBasePathKey: "https://secretmanager.googleapis.com/v1/", + SecurityCenterBasePathKey: "https://securitycenter.googleapis.com/v1/", + ServiceManagementBasePathKey: "https://servicemanagement.googleapis.com/v1/", + ServiceUsageBasePathKey: "https://serviceusage.googleapis.com/v1/", + SourceRepoBasePathKey: "https://sourcerepo.googleapis.com/v1/", + SpannerBasePathKey: "https://spanner.googleapis.com/v1/", + SQLBasePathKey: "https://sqladmin.googleapis.com/sql/v1beta4/", + StorageBasePathKey: "https://storage.googleapis.com/storage/v1/", + StorageTransferBasePathKey: "https://storagetransfer.googleapis.com/v1/", + TagsBasePathKey: "https://cloudresourcemanager.googleapis.com/v3/", + TPUBasePathKey: "https://tpu.googleapis.com/v1/", + VertexAIBasePathKey: "https://{{region}}-aiplatform.googleapis.com/v1/", + VPCAccessBasePathKey: "https://vpcaccess.googleapis.com/v1/", + WorkflowsBasePathKey: "https://workflows.googleapis.com/v1/", + CloudBillingBasePathKey: "https://cloudbilling.googleapis.com/v1/", + ComposerBasePathKey: "https://composer.googleapis.com/v1/", + ContainerBasePathKey: "https://container.googleapis.com/v1/", + DataflowBasePathKey: "https://dataflow.googleapis.com/v1b3/", + IAMBasePathKey: "https://iam.googleapis.com/v1/", + IamCredentialsBasePathKey: "https://iamcredentials.googleapis.com/v1/", + ResourceManagerV3BasePathKey: "https://cloudresourcemanager.googleapis.com/v3/", + ServiceNetworkingBasePathKey: "https://servicenetworking.googleapis.com/v1/", + BigtableAdminBasePathKey: "https://bigtableadmin.googleapis.com/v2/", + ContainerAwsBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", + ContainerAzureBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", + TagsLocationBasePathKey: "https://{{location}}-cloudresourcemanager.googleapis.com/v3/", +} + +var DefaultClientScopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", +} + +func HandleSDKDefaults(d *schema.ResourceData) error { + if d.Get("impersonate_service_account") == "" { + d.Set("impersonate_service_account", MultiEnvDefault([]string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", + }, nil)) + } + + if d.Get("project") == "" { + d.Set("project", MultiEnvDefault([]string{ + "GOOGLE_PROJECT", + "GOOGLE_CLOUD_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }, nil)) + } + + if d.Get("billing_project") == "" { + d.Set("billing_project", MultiEnvDefault([]string{ + "GOOGLE_BILLING_PROJECT", + }, nil)) + } + + if d.Get("region") == "" { + d.Set("region", MultiEnvDefault([]string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", + }, nil)) + } + + if d.Get("zone") == "" { + d.Set("zone", MultiEnvDefault([]string{ + "GOOGLE_ZONE", + "GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE", + }, nil)) + } + + if _, ok := d.GetOkExists("user_project_override"); !ok { + override := MultiEnvDefault([]string{ + "USER_PROJECT_OVERRIDE", + }, nil) + + if override != nil { + b, err := strconv.ParseBool(override.(string)) + if err != nil { + return err + } + d.Set("user_project_override", b) + } + } + + if d.Get("request_reason") == "" { + d.Set("request_reason", MultiEnvDefault([]string{ + "CLOUDSDK_CORE_REQUEST_REASON", + }, nil)) + } + + // Generated Products + if d.Get("access_approval_custom_endpoint") == "" { + d.Set("access_approval_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_ACCESS_APPROVAL_CUSTOM_ENDPOINT", + }, DefaultBasePaths[AccessApprovalBasePathKey])) + } + if d.Get("access_context_manager_custom_endpoint") == "" { + d.Set("access_context_manager_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_ACCESS_CONTEXT_MANAGER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[AccessContextManagerBasePathKey])) + } + if d.Get("active_directory_custom_endpoint") == "" { + d.Set("active_directory_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_ACTIVE_DIRECTORY_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ActiveDirectoryBasePathKey])) + } + if d.Get("alloydb_custom_endpoint") == "" { + d.Set("alloydb_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_ALLOYDB_CUSTOM_ENDPOINT", + }, DefaultBasePaths[AlloydbBasePathKey])) + } + if d.Get("apigee_custom_endpoint") == "" { + d.Set("apigee_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_APIGEE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ApigeeBasePathKey])) + } + if d.Get("app_engine_custom_endpoint") == "" { + d.Set("app_engine_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_APP_ENGINE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[AppEngineBasePathKey])) + } + if d.Get("artifact_registry_custom_endpoint") == "" { + d.Set("artifact_registry_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_ARTIFACT_REGISTRY_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ArtifactRegistryBasePathKey])) + } + if d.Get("beyondcorp_custom_endpoint") == "" { + d.Set("beyondcorp_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BEYONDCORP_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BeyondcorpBasePathKey])) + } + if d.Get("big_query_custom_endpoint") == "" { + d.Set("big_query_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BIG_QUERY_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigQueryBasePathKey])) + } + if d.Get("bigquery_analytics_hub_custom_endpoint") == "" { + d.Set("bigquery_analytics_hub_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_ANALYTICS_HUB_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigqueryAnalyticsHubBasePathKey])) + } + if d.Get("bigquery_connection_custom_endpoint") == "" { + d.Set("bigquery_connection_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_CONNECTION_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigqueryConnectionBasePathKey])) + } + if d.Get("bigquery_datapolicy_custom_endpoint") == "" { + d.Set("bigquery_datapolicy_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_DATAPOLICY_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigqueryDatapolicyBasePathKey])) + } + if d.Get("bigquery_data_transfer_custom_endpoint") == "" { + d.Set("bigquery_data_transfer_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_DATA_TRANSFER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigqueryDataTransferBasePathKey])) + } + if d.Get("bigquery_reservation_custom_endpoint") == "" { + d.Set("bigquery_reservation_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BIGQUERY_RESERVATION_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigqueryReservationBasePathKey])) + } + if d.Get("bigtable_custom_endpoint") == "" { + d.Set("bigtable_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BIGTABLE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigtableBasePathKey])) + } + if d.Get("billing_custom_endpoint") == "" { + d.Set("billing_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BILLING_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BillingBasePathKey])) + } + if d.Get("binary_authorization_custom_endpoint") == "" { + d.Set("binary_authorization_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_BINARY_AUTHORIZATION_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BinaryAuthorizationBasePathKey])) + } + if d.Get("certificate_manager_custom_endpoint") == "" { + d.Set("certificate_manager_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CERTIFICATE_MANAGER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CertificateManagerBasePathKey])) + } + if d.Get("cloud_asset_custom_endpoint") == "" { + d.Set("cloud_asset_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_ASSET_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudAssetBasePathKey])) + } + if d.Get("cloud_build_custom_endpoint") == "" { + d.Set("cloud_build_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BUILD_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudBuildBasePathKey])) + } + if d.Get("cloudbuildv2_custom_endpoint") == "" { + d.Set("cloudbuildv2_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUDBUILDV2_CUSTOM_ENDPOINT", + }, DefaultBasePaths[Cloudbuildv2BasePathKey])) + } + if d.Get("cloud_functions_custom_endpoint") == "" { + d.Set("cloud_functions_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_FUNCTIONS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudFunctionsBasePathKey])) + } + if d.Get("cloudfunctions2_custom_endpoint") == "" { + d.Set("cloudfunctions2_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUDFUNCTIONS2_CUSTOM_ENDPOINT", + }, DefaultBasePaths[Cloudfunctions2BasePathKey])) + } + if d.Get("cloud_identity_custom_endpoint") == "" { + d.Set("cloud_identity_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_IDENTITY_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudIdentityBasePathKey])) + } + if d.Get("cloud_ids_custom_endpoint") == "" { + d.Set("cloud_ids_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_IDS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudIdsBasePathKey])) + } + if d.Get("cloud_iot_custom_endpoint") == "" { + d.Set("cloud_iot_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_IOT_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudIotBasePathKey])) + } + if d.Get("cloud_run_custom_endpoint") == "" { + d.Set("cloud_run_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_RUN_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudRunBasePathKey])) + } + if d.Get("cloud_run_v2_custom_endpoint") == "" { + d.Set("cloud_run_v2_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_RUN_V2_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudRunV2BasePathKey])) + } + if d.Get("cloud_scheduler_custom_endpoint") == "" { + d.Set("cloud_scheduler_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_SCHEDULER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudSchedulerBasePathKey])) + } + if d.Get("cloud_tasks_custom_endpoint") == "" { + d.Set("cloud_tasks_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CLOUD_TASKS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudTasksBasePathKey])) + } + if d.Get("compute_custom_endpoint") == "" { + d.Set("compute_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_COMPUTE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ComputeBasePathKey])) + } + if d.Get("container_analysis_custom_endpoint") == "" { + d.Set("container_analysis_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CONTAINER_ANALYSIS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ContainerAnalysisBasePathKey])) + } + if d.Get("container_attached_custom_endpoint") == "" { + d.Set("container_attached_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_CONTAINER_ATTACHED_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ContainerAttachedBasePathKey])) + } + if d.Get("database_migration_service_custom_endpoint") == "" { + d.Set("database_migration_service_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATABASE_MIGRATION_SERVICE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DatabaseMigrationServiceBasePathKey])) + } + if d.Get("data_catalog_custom_endpoint") == "" { + d.Set("data_catalog_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATA_CATALOG_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DataCatalogBasePathKey])) + } + if d.Get("data_fusion_custom_endpoint") == "" { + d.Set("data_fusion_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATA_FUSION_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DataFusionBasePathKey])) + } + if d.Get("data_loss_prevention_custom_endpoint") == "" { + d.Set("data_loss_prevention_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATA_LOSS_PREVENTION_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DataLossPreventionBasePathKey])) + } + if d.Get("dataplex_custom_endpoint") == "" { + d.Set("dataplex_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATAPLEX_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DataplexBasePathKey])) + } + if d.Get("dataproc_custom_endpoint") == "" { + d.Set("dataproc_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATAPROC_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DataprocBasePathKey])) + } + if d.Get("dataproc_metastore_custom_endpoint") == "" { + d.Set("dataproc_metastore_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATAPROC_METASTORE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DataprocMetastoreBasePathKey])) + } + if d.Get("datastore_custom_endpoint") == "" { + d.Set("datastore_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATASTORE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DatastoreBasePathKey])) + } + if d.Get("datastream_custom_endpoint") == "" { + d.Set("datastream_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DATASTREAM_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DatastreamBasePathKey])) + } + if d.Get("deployment_manager_custom_endpoint") == "" { + d.Set("deployment_manager_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DEPLOYMENT_MANAGER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DeploymentManagerBasePathKey])) + } + if d.Get("dialogflow_custom_endpoint") == "" { + d.Set("dialogflow_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DIALOGFLOW_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DialogflowBasePathKey])) + } + if d.Get("dialogflow_cx_custom_endpoint") == "" { + d.Set("dialogflow_cx_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DIALOGFLOW_CX_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DialogflowCXBasePathKey])) + } + if d.Get("dns_custom_endpoint") == "" { + d.Set("dns_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DNS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DNSBasePathKey])) + } + if d.Get("document_ai_custom_endpoint") == "" { + d.Set("document_ai_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_DOCUMENT_AI_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DocumentAIBasePathKey])) + } + if d.Get("essential_contacts_custom_endpoint") == "" { + d.Set("essential_contacts_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_ESSENTIAL_CONTACTS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[EssentialContactsBasePathKey])) + } + if d.Get("filestore_custom_endpoint") == "" { + d.Set("filestore_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_FILESTORE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[FilestoreBasePathKey])) + } + if d.Get("firestore_custom_endpoint") == "" { + d.Set("firestore_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_FIRESTORE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[FirestoreBasePathKey])) + } + if d.Get("game_services_custom_endpoint") == "" { + d.Set("game_services_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_GAME_SERVICES_CUSTOM_ENDPOINT", + }, DefaultBasePaths[GameServicesBasePathKey])) + } + if d.Get("gke_backup_custom_endpoint") == "" { + d.Set("gke_backup_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_GKE_BACKUP_CUSTOM_ENDPOINT", + }, DefaultBasePaths[GKEBackupBasePathKey])) + } + if d.Get("gke_hub_custom_endpoint") == "" { + d.Set("gke_hub_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_GKE_HUB_CUSTOM_ENDPOINT", + }, DefaultBasePaths[GKEHubBasePathKey])) + } + if d.Get("gke_hub2_custom_endpoint") == "" { + d.Set("gke_hub2_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_GKE_HUB2_CUSTOM_ENDPOINT", + }, DefaultBasePaths[GKEHub2BasePathKey])) + } + if d.Get("healthcare_custom_endpoint") == "" { + d.Set("healthcare_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_HEALTHCARE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[HealthcareBasePathKey])) + } + if d.Get("iam2_custom_endpoint") == "" { + d.Set("iam2_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_IAM2_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IAM2BasePathKey])) + } + if d.Get("iam_beta_custom_endpoint") == "" { + d.Set("iam_beta_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_IAM_BETA_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IAMBetaBasePathKey])) + } + if d.Get("iam_workforce_pool_custom_endpoint") == "" { + d.Set("iam_workforce_pool_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_IAM_WORKFORCE_POOL_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IAMWorkforcePoolBasePathKey])) + } + if d.Get("iap_custom_endpoint") == "" { + d.Set("iap_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_IAP_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IapBasePathKey])) + } + if d.Get("identity_platform_custom_endpoint") == "" { + d.Set("identity_platform_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_IDENTITY_PLATFORM_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IdentityPlatformBasePathKey])) + } + if d.Get("kms_custom_endpoint") == "" { + d.Set("kms_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_KMS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[KMSBasePathKey])) + } + if d.Get("logging_custom_endpoint") == "" { + d.Set("logging_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_LOGGING_CUSTOM_ENDPOINT", + }, DefaultBasePaths[LoggingBasePathKey])) + } + if d.Get("looker_custom_endpoint") == "" { + d.Set("looker_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_LOOKER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[LookerBasePathKey])) + } + if d.Get("memcache_custom_endpoint") == "" { + d.Set("memcache_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_MEMCACHE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[MemcacheBasePathKey])) + } + if d.Get("ml_engine_custom_endpoint") == "" { + d.Set("ml_engine_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_ML_ENGINE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[MLEngineBasePathKey])) + } + if d.Get("monitoring_custom_endpoint") == "" { + d.Set("monitoring_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_MONITORING_CUSTOM_ENDPOINT", + }, DefaultBasePaths[MonitoringBasePathKey])) + } + if d.Get("network_management_custom_endpoint") == "" { + d.Set("network_management_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_NETWORK_MANAGEMENT_CUSTOM_ENDPOINT", + }, DefaultBasePaths[NetworkManagementBasePathKey])) + } + if d.Get("network_security_custom_endpoint") == "" { + d.Set("network_security_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_NETWORK_SECURITY_CUSTOM_ENDPOINT", + }, DefaultBasePaths[NetworkSecurityBasePathKey])) + } + if d.Get("network_services_custom_endpoint") == "" { + d.Set("network_services_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_NETWORK_SERVICES_CUSTOM_ENDPOINT", + }, DefaultBasePaths[NetworkServicesBasePathKey])) + } + if d.Get("notebooks_custom_endpoint") == "" { + d.Set("notebooks_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_NOTEBOOKS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[NotebooksBasePathKey])) + } + if d.Get("os_config_custom_endpoint") == "" { + d.Set("os_config_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_OS_CONFIG_CUSTOM_ENDPOINT", + }, DefaultBasePaths[OSConfigBasePathKey])) + } + if d.Get("os_login_custom_endpoint") == "" { + d.Set("os_login_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_OS_LOGIN_CUSTOM_ENDPOINT", + }, DefaultBasePaths[OSLoginBasePathKey])) + } + if d.Get("privateca_custom_endpoint") == "" { + d.Set("privateca_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", + }, DefaultBasePaths[PrivatecaBasePathKey])) + } + if d.Get("public_ca_custom_endpoint") == "" { + d.Set("public_ca_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_PUBLIC_CA_CUSTOM_ENDPOINT", + }, DefaultBasePaths[PublicCABasePathKey])) + } + if d.Get("pubsub_custom_endpoint") == "" { + d.Set("pubsub_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_PUBSUB_CUSTOM_ENDPOINT", + }, DefaultBasePaths[PubsubBasePathKey])) + } + if d.Get("pubsub_lite_custom_endpoint") == "" { + d.Set("pubsub_lite_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_PUBSUB_LITE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[PubsubLiteBasePathKey])) + } + if d.Get("redis_custom_endpoint") == "" { + d.Set("redis_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_REDIS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[RedisBasePathKey])) + } + if d.Get("resource_manager_custom_endpoint") == "" { + d.Set("resource_manager_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_RESOURCE_MANAGER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ResourceManagerBasePathKey])) + } + if d.Get("secret_manager_custom_endpoint") == "" { + d.Set("secret_manager_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SECRET_MANAGER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[SecretManagerBasePathKey])) + } + if d.Get("security_center_custom_endpoint") == "" { + d.Set("security_center_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SECURITY_CENTER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[SecurityCenterBasePathKey])) + } + if d.Get("service_management_custom_endpoint") == "" { + d.Set("service_management_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SERVICE_MANAGEMENT_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ServiceManagementBasePathKey])) + } + if d.Get("service_usage_custom_endpoint") == "" { + d.Set("service_usage_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ServiceUsageBasePathKey])) + } + if d.Get("source_repo_custom_endpoint") == "" { + d.Set("source_repo_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SOURCE_REPO_CUSTOM_ENDPOINT", + }, DefaultBasePaths[SourceRepoBasePathKey])) + } + if d.Get("spanner_custom_endpoint") == "" { + d.Set("spanner_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SPANNER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[SpannerBasePathKey])) + } + if d.Get("sql_custom_endpoint") == "" { + d.Set("sql_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_SQL_CUSTOM_ENDPOINT", + }, DefaultBasePaths[SQLBasePathKey])) + } + if d.Get("storage_custom_endpoint") == "" { + d.Set("storage_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_STORAGE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[StorageBasePathKey])) + } + if d.Get("storage_transfer_custom_endpoint") == "" { + d.Set("storage_transfer_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_STORAGE_TRANSFER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[StorageTransferBasePathKey])) + } + if d.Get("tags_custom_endpoint") == "" { + d.Set("tags_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_TAGS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[TagsBasePathKey])) + } + if d.Get("tpu_custom_endpoint") == "" { + d.Set("tpu_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_TPU_CUSTOM_ENDPOINT", + }, DefaultBasePaths[TPUBasePathKey])) + } + if d.Get("vertex_ai_custom_endpoint") == "" { + d.Set("vertex_ai_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_VERTEX_AI_CUSTOM_ENDPOINT", + }, DefaultBasePaths[VertexAIBasePathKey])) + } + if d.Get("vpc_access_custom_endpoint") == "" { + d.Set("vpc_access_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_VPC_ACCESS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[VPCAccessBasePathKey])) + } + if d.Get("workflows_custom_endpoint") == "" { + d.Set("workflows_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_WORKFLOWS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[WorkflowsBasePathKey])) + } + + if d.Get(CloudBillingCustomEndpointEntryKey) == "" { + d.Set(CloudBillingCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BILLING_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudBillingBasePathKey])) + } + + if d.Get(ComposerCustomEndpointEntryKey) == "" { + d.Set(ComposerCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_COMPOSER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ComposerBasePathKey])) + } + + if d.Get(ContainerCustomEndpointEntryKey) == "" { + d.Set(ContainerCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CONTAINER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ContainerBasePathKey])) + } + + if d.Get(DataflowCustomEndpointEntryKey) == "" { + d.Set(DataflowCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_DATAFLOW_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DataflowBasePathKey])) + } + + if d.Get(IamCredentialsCustomEndpointEntryKey) == "" { + d.Set(IamCredentialsCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_IAM_CREDENTIALS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IamCredentialsBasePathKey])) + } + + if d.Get(ResourceManagerV3CustomEndpointEntryKey) == "" { + d.Set(ResourceManagerV3CustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_RESOURCE_MANAGER_V3_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ResourceManagerV3BasePathKey])) + } + + if d.Get(IAMCustomEndpointEntryKey) == "" { + d.Set(IAMCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_IAM_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IAMBasePathKey])) + } + + if d.Get(ServiceNetworkingCustomEndpointEntryKey) == "" { + d.Set(ServiceNetworkingCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_SERVICE_NETWORKING_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ServiceNetworkingBasePathKey])) + } + + if d.Get(TagsLocationCustomEndpointEntryKey) == "" { + d.Set(TagsLocationCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_TAGS_LOCATION_CUSTOM_ENDPOINT", + }, DefaultBasePaths[TagsLocationBasePathKey])) + } + + if d.Get(ContainerAwsCustomEndpointEntryKey) == "" { + d.Set(ContainerAwsCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CONTAINERAWS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ContainerAwsBasePathKey])) + } + + if d.Get(ContainerAzureCustomEndpointEntryKey) == "" { + d.Set(ContainerAzureCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CONTAINERAZURE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ContainerAzureBasePathKey])) + } + + return nil +} + +func (c *Config) LoadAndValidate(ctx context.Context) error { + if len(c.Scopes) == 0 { + c.Scopes = DefaultClientScopes + } + + c.Context = ctx + + tokenSource, err := c.getTokenSource(c.Scopes, false) + if err != nil { + return err + } + + c.tokenSource = tokenSource + + cleanCtx := context.WithValue(ctx, oauth2.HTTPClient, cleanhttp.DefaultClient()) + + // 1. MTLS TRANSPORT/CLIENT - sets up proper auth headers + client, _, err := transport.NewHTTPClient(cleanCtx, option.WithTokenSource(tokenSource)) + if err != nil { + return err + } + + // Userinfo is fetched before request logging is enabled to reduce additional noise. + err = c.logGoogleIdentities() + if err != nil { + return err + } + + // 2. Logging Transport - ensure we log HTTP requests to GCP APIs. + loggingTransport := logging.NewTransport("Google", client.Transport) + + // 3. Retry Transport - retries common temporary errors + // Keep order for wrapping logging so we log each retried request as well. + // This value should be used if needed to create shallow copies with additional retry predicates. + // See ClientWithAdditionalRetries + retryTransport := NewTransportWithDefaultRetries(loggingTransport) + + // 4. Header Transport - outer wrapper to inject additional headers we want to apply + // before making requests + headerTransport := NewTransportWithHeaders(retryTransport) + if c.RequestReason != "" { + headerTransport.Set("X-Goog-Request-Reason", c.RequestReason) + } + + // Ensure $userProject is set for all HTTP requests using the client if specified by the provider config + // See https://cloud.google.com/apis/docs/system-parameters + if c.UserProjectOverride && c.BillingProject != "" { + headerTransport.Set("X-Goog-User-Project", c.BillingProject) + } + + // Set final transport value. + client.Transport = headerTransport + + // This timeout is a timeout per HTTP request, not per logical operation. + client.Timeout = c.synchronousTimeout() + + c.Client = client + c.Context = ctx + c.Region = GetRegionFromRegionSelfLink(c.Region) + c.RequestBatcherServiceUsage = NewRequestBatcher("Service Usage", ctx, c.BatchingConfig) + c.RequestBatcherIam = NewRequestBatcher("IAM", ctx, c.BatchingConfig) + c.PollInterval = 10 * time.Second + + // gRPC Logging setup + logger := logrus.StandardLogger() + + logrus.SetLevel(logrus.DebugLevel) + logrus.SetFormatter(&Formatter{ + TimestampFormat: "2006/01/02 15:04:05", + LogFormat: "%time% [%lvl%] %msg% \n", + }) + + alwaysLoggingDeciderClient := func(ctx context.Context, fullMethodName string) bool { return true } + grpc_logrus.ReplaceGrpcLogger(logrus.NewEntry(logger)) + + c.gRPCLoggingOptions = append( + c.gRPCLoggingOptions, option.WithGRPCDialOption(grpc.WithUnaryInterceptor( + grpc_logrus.PayloadUnaryClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), + option.WithGRPCDialOption(grpc.WithStreamInterceptor( + grpc_logrus.PayloadStreamClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), + ) + + return nil +} + +func ExpandProviderBatchingConfig(v interface{}) (*BatchingConfig, error) { + config := &BatchingConfig{ + SendAfter: time.Second * DefaultBatchSendIntervalSec, + EnableBatching: true, + } + + if v == nil { + return config, nil + } + ls := v.([]interface{}) + if len(ls) == 0 || ls[0] == nil { + return config, nil + } + + cfgV := ls[0].(map[string]interface{}) + if sendAfterV, ok := cfgV["send_after"]; ok && sendAfterV != "" { + SendAfter, err := time.ParseDuration(sendAfterV.(string)) + if err != nil { + return nil, fmt.Errorf("unable to parse duration from 'send_after' value %q", sendAfterV) + } + config.SendAfter = SendAfter + } + + if enable, ok := cfgV["enable_batching"]; ok { + config.EnableBatching = enable.(bool) + } + + return config, nil +} + +func (c *Config) synchronousTimeout() time.Duration { + if c.RequestTimeout == 0 { + return 120 * time.Second + } + return c.RequestTimeout +} + +// Print Identities executing terraform API Calls. +func (c *Config) logGoogleIdentities() error { + if c.ImpersonateServiceAccount == "" { + + tokenSource, err := c.getTokenSource(c.Scopes, true) + if err != nil { + return err + } + c.Client = oauth2.NewClient(c.Context, tokenSource) // c.Client isn't initialised fully when this code is called. + + email, err := GetCurrentUserEmail(c, c.UserAgent) + if err != nil { + log.Printf("[INFO] error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) + } + + log.Printf("[INFO] Terraform is using this identity: %s", email) + + return nil + + } + + // Drop Impersonated ClientOption from OAuth2 TokenSource to infer original identity + + tokenSource, err := c.getTokenSource(c.Scopes, true) + if err != nil { + return err + } + c.Client = oauth2.NewClient(c.Context, tokenSource) // c.Client isn't initialised fully when this code is called. + + email, err := GetCurrentUserEmail(c, c.UserAgent) + if err != nil { + log.Printf("[INFO] error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) + } + + log.Printf("[INFO] Terraform is configured with service account impersonation, original identity: %s, impersonated identity: %s", email, c.ImpersonateServiceAccount) + + // Add the Impersonated ClientOption back in to the OAuth2 TokenSource + + tokenSource, err = c.getTokenSource(c.Scopes, false) + if err != nil { + return err + } + c.Client = oauth2.NewClient(c.Context, tokenSource) // c.Client isn't initialised fully when this code is called. + + return nil +} + +// Get a TokenSource based on the Google Credentials configured. +// If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds. +func (c *Config) getTokenSource(clientScopes []string, initialCredentialsOnly bool) (oauth2.TokenSource, error) { + creds, err := c.GetCredentials(clientScopes, initialCredentialsOnly) + if err != nil { + return nil, fmt.Errorf("%s", err) + } + return creds.TokenSource, nil +} + +// Methods to create new services from config +// Some base paths below need the version and possibly more of the path +// set on them. The client libraries are inconsistent about which values they need; +// while most only want the host URL, some older ones also want the version and some +// of those "projects" as well. You can find out if this is required by looking at +// the basePath value in the client library file. +func (c *Config) NewComputeClient(userAgent string) *compute.Service { + log.Printf("[INFO] Instantiating GCE client for path %s", c.ComputeBasePath) + clientCompute, err := compute.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client compute: %s", err) + return nil + } + clientCompute.UserAgent = userAgent + clientCompute.BasePath = c.ComputeBasePath + + return clientCompute +} + +func (c *Config) NewContainerClient(userAgent string) *container.Service { + containerClientBasePath := RemoveBasePathVersion(c.ContainerBasePath) + log.Printf("[INFO] Instantiating GKE client for path %s", containerClientBasePath) + clientContainer, err := container.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client container: %s", err) + return nil + } + clientContainer.UserAgent = userAgent + clientContainer.BasePath = containerClientBasePath + + return clientContainer +} + +func (c *Config) NewDnsClient(userAgent string) *dns.Service { + dnsClientBasePath := RemoveBasePathVersion(c.DNSBasePath) + dnsClientBasePath = strings.ReplaceAll(dnsClientBasePath, "/dns/", "") + log.Printf("[INFO] Instantiating Google Cloud DNS client for path %s", dnsClientBasePath) + clientDns, err := dns.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client dns: %s", err) + return nil + } + clientDns.UserAgent = userAgent + clientDns.BasePath = dnsClientBasePath + + return clientDns +} + +func (c *Config) NewKmsClientWithCtx(ctx context.Context, userAgent string) *cloudkms.Service { + kmsClientBasePath := RemoveBasePathVersion(c.KMSBasePath) + log.Printf("[INFO] Instantiating Google Cloud KMS client for path %s", kmsClientBasePath) + clientKms, err := cloudkms.NewService(ctx, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client kms: %s", err) + return nil + } + clientKms.UserAgent = userAgent + clientKms.BasePath = kmsClientBasePath + + return clientKms +} + +func (c *Config) NewKmsClient(userAgent string) *cloudkms.Service { + return c.NewKmsClientWithCtx(c.Context, userAgent) +} + +func (c *Config) NewLoggingClient(userAgent string) *cloudlogging.Service { + loggingClientBasePath := RemoveBasePathVersion(c.LoggingBasePath) + log.Printf("[INFO] Instantiating Google Stackdriver Logging client for path %s", loggingClientBasePath) + clientLogging, err := cloudlogging.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client logging: %s", err) + return nil + } + clientLogging.UserAgent = userAgent + clientLogging.BasePath = loggingClientBasePath + + return clientLogging +} + +func (c *Config) NewStorageClient(userAgent string) *storage.Service { + storageClientBasePath := c.StorageBasePath + log.Printf("[INFO] Instantiating Google Storage client for path %s", storageClientBasePath) + clientStorage, err := storage.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client storage: %s", err) + return nil + } + clientStorage.UserAgent = userAgent + clientStorage.BasePath = storageClientBasePath + + return clientStorage +} + +// For object uploads, we need to override the specific timeout because they are long, synchronous operations. +func (c *Config) NewStorageClientWithTimeoutOverride(userAgent string, timeout time.Duration) *storage.Service { + storageClientBasePath := c.StorageBasePath + log.Printf("[INFO] Instantiating Google Storage client for path %s", storageClientBasePath) + // Copy the existing HTTP client (which has no unexported fields [as of Oct 2021 at least], so this is safe). + // We have to do this because otherwise we will accidentally change the timeout for all other + // synchronous operations, which would not be desirable. + httpClient := &http.Client{ + Transport: c.Client.Transport, + CheckRedirect: c.Client.CheckRedirect, + Jar: c.Client.Jar, + Timeout: timeout, + } + clientStorage, err := storage.NewService(c.Context, option.WithHTTPClient(httpClient)) + if err != nil { + log.Printf("[WARN] Error creating client storage: %s", err) + return nil + } + clientStorage.UserAgent = userAgent + clientStorage.BasePath = storageClientBasePath + + return clientStorage +} + +func (c *Config) NewSqlAdminClient(userAgent string) *sqladmin.Service { + sqlClientBasePath := RemoveBasePathVersion(RemoveBasePathVersion(c.SQLBasePath)) + log.Printf("[INFO] Instantiating Google SqlAdmin client for path %s", sqlClientBasePath) + clientSqlAdmin, err := sqladmin.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client storage: %s", err) + return nil + } + clientSqlAdmin.UserAgent = userAgent + clientSqlAdmin.BasePath = sqlClientBasePath + + return clientSqlAdmin +} + +func (c *Config) NewPubsubClient(userAgent string) *pubsub.Service { + pubsubClientBasePath := RemoveBasePathVersion(c.PubsubBasePath) + log.Printf("[INFO] Instantiating Google Pubsub client for path %s", pubsubClientBasePath) + wrappedPubsubClient := ClientWithAdditionalRetries(c.Client, PubsubTopicProjectNotReady) + clientPubsub, err := pubsub.NewService(c.Context, option.WithHTTPClient(wrappedPubsubClient)) + if err != nil { + log.Printf("[WARN] Error creating client pubsub: %s", err) + return nil + } + clientPubsub.UserAgent = userAgent + clientPubsub.BasePath = pubsubClientBasePath + + return clientPubsub +} + +func (c *Config) NewDataflowClient(userAgent string) *dataflow.Service { + dataflowClientBasePath := RemoveBasePathVersion(c.DataflowBasePath) + log.Printf("[INFO] Instantiating Google Dataflow client for path %s", dataflowClientBasePath) + clientDataflow, err := dataflow.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client dataflow: %s", err) + return nil + } + clientDataflow.UserAgent = userAgent + clientDataflow.BasePath = dataflowClientBasePath + + return clientDataflow +} + +func (c *Config) NewResourceManagerClient(userAgent string) *cloudresourcemanager.Service { + resourceManagerBasePath := RemoveBasePathVersion(c.ResourceManagerBasePath) + log.Printf("[INFO] Instantiating Google Cloud ResourceManager client for path %s", resourceManagerBasePath) + clientResourceManager, err := cloudresourcemanager.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client resource manager: %s", err) + return nil + } + clientResourceManager.UserAgent = userAgent + clientResourceManager.BasePath = resourceManagerBasePath + + return clientResourceManager +} + +func (c *Config) NewResourceManagerV3Client(userAgent string) *resourceManagerV3.Service { + resourceManagerV3BasePath := RemoveBasePathVersion(c.ResourceManagerV3BasePath) + log.Printf("[INFO] Instantiating Google Cloud ResourceManager V3 client for path %s", resourceManagerV3BasePath) + clientResourceManagerV3, err := resourceManagerV3.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client resource manager v3: %s", err) + return nil + } + clientResourceManagerV3.UserAgent = userAgent + clientResourceManagerV3.BasePath = resourceManagerV3BasePath + + return clientResourceManagerV3 +} + +func (c *Config) NewIamClient(userAgent string) *iam.Service { + iamClientBasePath := RemoveBasePathVersion(c.IAMBasePath) + log.Printf("[INFO] Instantiating Google Cloud IAM client for path %s", iamClientBasePath) + clientIAM, err := iam.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client iam: %s", err) + return nil + } + clientIAM.UserAgent = userAgent + clientIAM.BasePath = iamClientBasePath + + return clientIAM +} + +func (c *Config) NewIamCredentialsClient(userAgent string) *iamcredentials.Service { + iamCredentialsClientBasePath := RemoveBasePathVersion(c.IamCredentialsBasePath) + log.Printf("[INFO] Instantiating Google Cloud IAMCredentials client for path %s", iamCredentialsClientBasePath) + clientIamCredentials, err := iamcredentials.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client iam credentials: %s", err) + return nil + } + clientIamCredentials.UserAgent = userAgent + clientIamCredentials.BasePath = iamCredentialsClientBasePath + + return clientIamCredentials +} + +func (c *Config) NewServiceManClient(userAgent string) *servicemanagement.APIService { + serviceManagementClientBasePath := RemoveBasePathVersion(c.ServiceManagementBasePath) + log.Printf("[INFO] Instantiating Google Cloud Service Management client for path %s", serviceManagementClientBasePath) + clientServiceMan, err := servicemanagement.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client service management: %s", err) + return nil + } + clientServiceMan.UserAgent = userAgent + clientServiceMan.BasePath = serviceManagementClientBasePath + + return clientServiceMan +} + +func (c *Config) NewServiceUsageClient(userAgent string) *serviceusage.Service { + serviceUsageClientBasePath := RemoveBasePathVersion(c.ServiceUsageBasePath) + log.Printf("[INFO] Instantiating Google Cloud Service Usage client for path %s", serviceUsageClientBasePath) + clientServiceUsage, err := serviceusage.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client service usage: %s", err) + return nil + } + clientServiceUsage.UserAgent = userAgent + clientServiceUsage.BasePath = serviceUsageClientBasePath + + return clientServiceUsage +} + +func (c *Config) NewBillingClient(userAgent string) *cloudbilling.APIService { + cloudBillingClientBasePath := RemoveBasePathVersion(c.CloudBillingBasePath) + log.Printf("[INFO] Instantiating Google Cloud Billing client for path %s", cloudBillingClientBasePath) + clientBilling, err := cloudbilling.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client billing: %s", err) + return nil + } + clientBilling.UserAgent = userAgent + clientBilling.BasePath = cloudBillingClientBasePath + + return clientBilling +} + +func (c *Config) NewBuildClient(userAgent string) *cloudbuild.Service { + cloudBuildClientBasePath := RemoveBasePathVersion(c.CloudBuildBasePath) + log.Printf("[INFO] Instantiating Google Cloud Build client for path %s", cloudBuildClientBasePath) + clientBuild, err := cloudbuild.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client build: %s", err) + return nil + } + clientBuild.UserAgent = userAgent + clientBuild.BasePath = cloudBuildClientBasePath + + return clientBuild +} + +func (c *Config) NewCloudFunctionsClient(userAgent string) *cloudfunctions.Service { + cloudFunctionsClientBasePath := RemoveBasePathVersion(c.CloudFunctionsBasePath) + log.Printf("[INFO] Instantiating Google Cloud CloudFunctions Client for path %s", cloudFunctionsClientBasePath) + clientCloudFunctions, err := cloudfunctions.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client cloud functions: %s", err) + return nil + } + clientCloudFunctions.UserAgent = userAgent + clientCloudFunctions.BasePath = cloudFunctionsClientBasePath + + return clientCloudFunctions +} + +func (c *Config) NewSourceRepoClient(userAgent string) *sourcerepo.Service { + sourceRepoClientBasePath := RemoveBasePathVersion(c.SourceRepoBasePath) + log.Printf("[INFO] Instantiating Google Cloud Source Repo client for path %s", sourceRepoClientBasePath) + clientSourceRepo, err := sourcerepo.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client source repo: %s", err) + return nil + } + clientSourceRepo.UserAgent = userAgent + clientSourceRepo.BasePath = sourceRepoClientBasePath + + return clientSourceRepo +} + +func (c *Config) NewBigQueryClient(userAgent string) *bigquery.Service { + bigQueryClientBasePath := c.BigQueryBasePath + log.Printf("[INFO] Instantiating Google Cloud BigQuery client for path %s", bigQueryClientBasePath) + wrappedBigQueryClient := ClientWithAdditionalRetries(c.Client, IamMemberMissing) + clientBigQuery, err := bigquery.NewService(c.Context, option.WithHTTPClient(wrappedBigQueryClient)) + if err != nil { + log.Printf("[WARN] Error creating client big query: %s", err) + return nil + } + clientBigQuery.UserAgent = userAgent + clientBigQuery.BasePath = bigQueryClientBasePath + + return clientBigQuery +} + +func (c *Config) NewSpannerClient(userAgent string) *spanner.Service { + spannerClientBasePath := RemoveBasePathVersion(c.SpannerBasePath) + log.Printf("[INFO] Instantiating Google Cloud Spanner client for path %s", spannerClientBasePath) + clientSpanner, err := spanner.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client source repo: %s", err) + return nil + } + clientSpanner.UserAgent = userAgent + clientSpanner.BasePath = spannerClientBasePath + + return clientSpanner +} + +func (c *Config) NewDataprocClient(userAgent string) *dataproc.Service { + dataprocClientBasePath := RemoveBasePathVersion(c.DataprocBasePath) + log.Printf("[INFO] Instantiating Google Cloud Dataproc client for path %s", dataprocClientBasePath) + clientDataproc, err := dataproc.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client dataproc: %s", err) + return nil + } + clientDataproc.UserAgent = userAgent + clientDataproc.BasePath = dataprocClientBasePath + + return clientDataproc +} + +func (c *Config) NewCloudIoTClient(userAgent string) *cloudiot.Service { + cloudIoTClientBasePath := RemoveBasePathVersion(c.CloudIoTBasePath) + log.Printf("[INFO] Instantiating Google Cloud IoT Core client for path %s", cloudIoTClientBasePath) + clientCloudIoT, err := cloudiot.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client cloud iot: %s", err) + return nil + } + clientCloudIoT.UserAgent = userAgent + clientCloudIoT.BasePath = cloudIoTClientBasePath + + return clientCloudIoT +} + +func (c *Config) NewAppEngineClient(userAgent string) *appengine.APIService { + appEngineClientBasePath := RemoveBasePathVersion(c.AppEngineBasePath) + log.Printf("[INFO] Instantiating App Engine client for path %s", appEngineClientBasePath) + clientAppEngine, err := appengine.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client appengine: %s", err) + return nil + } + clientAppEngine.UserAgent = userAgent + clientAppEngine.BasePath = appEngineClientBasePath + + return clientAppEngine +} + +func (c *Config) NewComposerClient(userAgent string) *composer.Service { + composerClientBasePath := RemoveBasePathVersion(c.ComposerBasePath) + log.Printf("[INFO] Instantiating Cloud Composer client for path %s", composerClientBasePath) + clientComposer, err := composer.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client composer: %s", err) + return nil + } + clientComposer.UserAgent = userAgent + clientComposer.BasePath = composerClientBasePath + + return clientComposer +} + +func (c *Config) NewServiceNetworkingClient(userAgent string) *servicenetworking.APIService { + serviceNetworkingClientBasePath := RemoveBasePathVersion(c.ServiceNetworkingBasePath) + log.Printf("[INFO] Instantiating Service Networking client for path %s", serviceNetworkingClientBasePath) + clientServiceNetworking, err := servicenetworking.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client service networking: %s", err) + return nil + } + clientServiceNetworking.UserAgent = userAgent + clientServiceNetworking.BasePath = serviceNetworkingClientBasePath + + return clientServiceNetworking +} + +func (c *Config) NewStorageTransferClient(userAgent string) *storagetransfer.Service { + storageTransferClientBasePath := RemoveBasePathVersion(c.StorageTransferBasePath) + log.Printf("[INFO] Instantiating Google Cloud Storage Transfer client for path %s", storageTransferClientBasePath) + clientStorageTransfer, err := storagetransfer.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client storage transfer: %s", err) + return nil + } + clientStorageTransfer.UserAgent = userAgent + clientStorageTransfer.BasePath = storageTransferClientBasePath + + return clientStorageTransfer +} + +func (c *Config) NewHealthcareClient(userAgent string) *healthcare.Service { + healthcareClientBasePath := RemoveBasePathVersion(c.HealthcareBasePath) + log.Printf("[INFO] Instantiating Google Cloud Healthcare client for path %s", healthcareClientBasePath) + clientHealthcare, err := healthcare.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client healthcare: %s", err) + return nil + } + clientHealthcare.UserAgent = userAgent + clientHealthcare.BasePath = healthcareClientBasePath + + return clientHealthcare +} + +func (c *Config) NewCloudIdentityClient(userAgent string) *cloudidentity.Service { + cloudidentityClientBasePath := RemoveBasePathVersion(c.CloudIdentityBasePath) + log.Printf("[INFO] Instantiating Google Cloud CloudIdentity client for path %s", cloudidentityClientBasePath) + clientCloudIdentity, err := cloudidentity.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client cloud identity: %s", err) + return nil + } + clientCloudIdentity.UserAgent = userAgent + clientCloudIdentity.BasePath = cloudidentityClientBasePath + + return clientCloudIdentity +} + +func (c *Config) BigTableClientFactory(userAgent string) *BigtableClientFactory { + bigtableClientFactory := &BigtableClientFactory{ + UserAgent: userAgent, + TokenSource: c.tokenSource, + gRPCLoggingOptions: c.gRPCLoggingOptions, + BillingProject: c.BillingProject, + UserProjectOverride: c.UserProjectOverride, + } + + return bigtableClientFactory +} + +// Unlike other clients, the Bigtable Admin client doesn't use a single +// service. Instead, there are several distinct services created off +// the base service object. To imitate most other handwritten clients, +// we expose those directly instead of providing the `Service` object +// as a factory. +func (c *Config) NewBigTableProjectsInstancesClient(userAgent string) *bigtableadmin.ProjectsInstancesService { + bigtableAdminBasePath := RemoveBasePathVersion(c.BigtableAdminBasePath) + log.Printf("[INFO] Instantiating Google Cloud BigtableAdmin for path %s", bigtableAdminBasePath) + clientBigtable, err := bigtableadmin.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client big table projects instances: %s", err) + return nil + } + clientBigtable.UserAgent = userAgent + clientBigtable.BasePath = bigtableAdminBasePath + clientBigtableProjectsInstances := bigtableadmin.NewProjectsInstancesService(clientBigtable) + + return clientBigtableProjectsInstances +} + +func (c *Config) NewBigTableProjectsInstancesTablesClient(userAgent string) *bigtableadmin.ProjectsInstancesTablesService { + bigtableAdminBasePath := RemoveBasePathVersion(c.BigtableAdminBasePath) + log.Printf("[INFO] Instantiating Google Cloud BigtableAdmin for path %s", bigtableAdminBasePath) + clientBigtable, err := bigtableadmin.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client projects instances tables: %s", err) + return nil + } + clientBigtable.UserAgent = userAgent + clientBigtable.BasePath = bigtableAdminBasePath + clientBigtableProjectsInstancesTables := bigtableadmin.NewProjectsInstancesTablesService(clientBigtable) + + return clientBigtableProjectsInstancesTables +} + +func (c *Config) NewCloudRunV2Client(userAgent string) *runadminv2.Service { + runAdminV2ClientBasePath := RemoveBasePathVersion(RemoveBasePathVersion(c.CloudRunV2BasePath)) + log.Printf("[INFO] Instantiating Google Cloud Run Admin v2 client for path %s", runAdminV2ClientBasePath) + clientRunAdminV2, err := runadminv2.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client run admin: %s", err) + return nil + } + clientRunAdminV2.UserAgent = userAgent + clientRunAdminV2.BasePath = runAdminV2ClientBasePath + + return clientRunAdminV2 +} + +// StaticTokenSource is used to be able to identify static token sources without reflection. +type StaticTokenSource struct { + oauth2.TokenSource +} + +// Get a set of credentials with a given scope (clientScopes) based on the Config object. +// If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds +// instead. +func (c *Config) GetCredentials(clientScopes []string, initialCredentialsOnly bool) (googleoauth.Credentials, error) { + if c.AccessToken != "" { + contents, _, err := verify.PathOrContents(c.AccessToken) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("Error loading access token: %s", err) + } + + token := &oauth2.Token{AccessToken: contents} + if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { + opts := []option.ClientOption{option.WithTokenSource(oauth2.StaticTokenSource(token)), option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), option.WithScopes(clientScopes...)} + creds, err := transport.Creds(context.TODO(), opts...) + if err != nil { + return googleoauth.Credentials{}, err + } + return *creds, nil + } + + log.Printf("[INFO] Authenticating using configured Google JSON 'access_token'...") + log.Printf("[INFO] -- Scopes: %s", clientScopes) + return googleoauth.Credentials{ + TokenSource: StaticTokenSource{oauth2.StaticTokenSource(token)}, + }, nil + } + + if c.Credentials != "" { + contents, _, err := verify.PathOrContents(c.Credentials) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("error loading credentials: %s", err) + } + + if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { + opts := []option.ClientOption{option.WithCredentialsJSON([]byte(contents)), option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), option.WithScopes(clientScopes...)} + creds, err := transport.Creds(context.TODO(), opts...) + if err != nil { + return googleoauth.Credentials{}, err + } + return *creds, nil + } + + creds, err := transport.Creds(c.Context, option.WithCredentialsJSON([]byte(contents)), option.WithScopes(clientScopes...)) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("unable to parse credentials from '%s': %s", contents, err) + } + + log.Printf("[INFO] Authenticating using configured Google JSON 'credentials'...") + log.Printf("[INFO] -- Scopes: %s", clientScopes) + return *creds, nil + } + + if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { + opts := option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...) + creds, err := transport.Creds(context.TODO(), opts, option.WithScopes(clientScopes...)) + if err != nil { + return googleoauth.Credentials{}, err + } + + return *creds, nil + } + + log.Printf("[INFO] Authenticating using DefaultClient...") + log.Printf("[INFO] -- Scopes: %s", clientScopes) + creds, err := transport.Creds(context.Background(), option.WithScopes(clientScopes...)) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) + } + + return *creds, nil +} + +// Remove the `/{{version}}/` from a base path if present. +func RemoveBasePathVersion(url string) string { + re := regexp.MustCompile(`(?Phttp[s]://.*)(?P/[^/]+?/$)`) + return re.ReplaceAllString(url, "$1/") +} + +// For a consumer of config.go that isn't a full fledged provider and doesn't +// have its own endpoint mechanism such as sweepers, init {{service}}BasePath +// values to a default. After using this, you should call config.LoadAndValidate. +func ConfigureBasePaths(c *Config) { + // Generated Products + c.AccessApprovalBasePath = DefaultBasePaths[AccessApprovalBasePathKey] + c.AccessContextManagerBasePath = DefaultBasePaths[AccessContextManagerBasePathKey] + c.ActiveDirectoryBasePath = DefaultBasePaths[ActiveDirectoryBasePathKey] + c.AlloydbBasePath = DefaultBasePaths[AlloydbBasePathKey] + c.ApigeeBasePath = DefaultBasePaths[ApigeeBasePathKey] + c.AppEngineBasePath = DefaultBasePaths[AppEngineBasePathKey] + c.ArtifactRegistryBasePath = DefaultBasePaths[ArtifactRegistryBasePathKey] + c.BeyondcorpBasePath = DefaultBasePaths[BeyondcorpBasePathKey] + c.BigQueryBasePath = DefaultBasePaths[BigQueryBasePathKey] + c.BigqueryAnalyticsHubBasePath = DefaultBasePaths[BigqueryAnalyticsHubBasePathKey] + c.BigqueryConnectionBasePath = DefaultBasePaths[BigqueryConnectionBasePathKey] + c.BigqueryDatapolicyBasePath = DefaultBasePaths[BigqueryDatapolicyBasePathKey] + c.BigqueryDataTransferBasePath = DefaultBasePaths[BigqueryDataTransferBasePathKey] + c.BigqueryReservationBasePath = DefaultBasePaths[BigqueryReservationBasePathKey] + c.BigtableBasePath = DefaultBasePaths[BigtableBasePathKey] + c.BillingBasePath = DefaultBasePaths[BillingBasePathKey] + c.BinaryAuthorizationBasePath = DefaultBasePaths[BinaryAuthorizationBasePathKey] + c.CertificateManagerBasePath = DefaultBasePaths[CertificateManagerBasePathKey] + c.CloudAssetBasePath = DefaultBasePaths[CloudAssetBasePathKey] + c.CloudBuildBasePath = DefaultBasePaths[CloudBuildBasePathKey] + c.Cloudbuildv2BasePath = DefaultBasePaths[Cloudbuildv2BasePathKey] + c.CloudFunctionsBasePath = DefaultBasePaths[CloudFunctionsBasePathKey] + c.Cloudfunctions2BasePath = DefaultBasePaths[Cloudfunctions2BasePathKey] + c.CloudIdentityBasePath = DefaultBasePaths[CloudIdentityBasePathKey] + c.CloudIdsBasePath = DefaultBasePaths[CloudIdsBasePathKey] + c.CloudIotBasePath = DefaultBasePaths[CloudIotBasePathKey] + c.CloudRunBasePath = DefaultBasePaths[CloudRunBasePathKey] + c.CloudRunV2BasePath = DefaultBasePaths[CloudRunV2BasePathKey] + c.CloudSchedulerBasePath = DefaultBasePaths[CloudSchedulerBasePathKey] + c.CloudTasksBasePath = DefaultBasePaths[CloudTasksBasePathKey] + c.ComputeBasePath = DefaultBasePaths[ComputeBasePathKey] + c.ContainerAnalysisBasePath = DefaultBasePaths[ContainerAnalysisBasePathKey] + c.ContainerAttachedBasePath = DefaultBasePaths[ContainerAttachedBasePathKey] + c.DatabaseMigrationServiceBasePath = DefaultBasePaths[DatabaseMigrationServiceBasePathKey] + c.DataCatalogBasePath = DefaultBasePaths[DataCatalogBasePathKey] + c.DataFusionBasePath = DefaultBasePaths[DataFusionBasePathKey] + c.DataLossPreventionBasePath = DefaultBasePaths[DataLossPreventionBasePathKey] + c.DataplexBasePath = DefaultBasePaths[DataplexBasePathKey] + c.DataprocBasePath = DefaultBasePaths[DataprocBasePathKey] + c.DataprocMetastoreBasePath = DefaultBasePaths[DataprocMetastoreBasePathKey] + c.DatastoreBasePath = DefaultBasePaths[DatastoreBasePathKey] + c.DatastreamBasePath = DefaultBasePaths[DatastreamBasePathKey] + c.DeploymentManagerBasePath = DefaultBasePaths[DeploymentManagerBasePathKey] + c.DialogflowBasePath = DefaultBasePaths[DialogflowBasePathKey] + c.DialogflowCXBasePath = DefaultBasePaths[DialogflowCXBasePathKey] + c.DNSBasePath = DefaultBasePaths[DNSBasePathKey] + c.DocumentAIBasePath = DefaultBasePaths[DocumentAIBasePathKey] + c.EssentialContactsBasePath = DefaultBasePaths[EssentialContactsBasePathKey] + c.FilestoreBasePath = DefaultBasePaths[FilestoreBasePathKey] + c.FirestoreBasePath = DefaultBasePaths[FirestoreBasePathKey] + c.GameServicesBasePath = DefaultBasePaths[GameServicesBasePathKey] + c.GKEBackupBasePath = DefaultBasePaths[GKEBackupBasePathKey] + c.GKEHubBasePath = DefaultBasePaths[GKEHubBasePathKey] + c.GKEHub2BasePath = DefaultBasePaths[GKEHub2BasePathKey] + c.HealthcareBasePath = DefaultBasePaths[HealthcareBasePathKey] + c.IAM2BasePath = DefaultBasePaths[IAM2BasePathKey] + c.IAMBetaBasePath = DefaultBasePaths[IAMBetaBasePathKey] + c.IAMWorkforcePoolBasePath = DefaultBasePaths[IAMWorkforcePoolBasePathKey] + c.IapBasePath = DefaultBasePaths[IapBasePathKey] + c.IdentityPlatformBasePath = DefaultBasePaths[IdentityPlatformBasePathKey] + c.KMSBasePath = DefaultBasePaths[KMSBasePathKey] + c.LoggingBasePath = DefaultBasePaths[LoggingBasePathKey] + c.LookerBasePath = DefaultBasePaths[LookerBasePathKey] + c.MemcacheBasePath = DefaultBasePaths[MemcacheBasePathKey] + c.MLEngineBasePath = DefaultBasePaths[MLEngineBasePathKey] + c.MonitoringBasePath = DefaultBasePaths[MonitoringBasePathKey] + c.NetworkManagementBasePath = DefaultBasePaths[NetworkManagementBasePathKey] + c.NetworkSecurityBasePath = DefaultBasePaths[NetworkSecurityBasePathKey] + c.NetworkServicesBasePath = DefaultBasePaths[NetworkServicesBasePathKey] + c.NotebooksBasePath = DefaultBasePaths[NotebooksBasePathKey] + c.OSConfigBasePath = DefaultBasePaths[OSConfigBasePathKey] + c.OSLoginBasePath = DefaultBasePaths[OSLoginBasePathKey] + c.PrivatecaBasePath = DefaultBasePaths[PrivatecaBasePathKey] + c.PublicCABasePath = DefaultBasePaths[PublicCABasePathKey] + c.PubsubBasePath = DefaultBasePaths[PubsubBasePathKey] + c.PubsubLiteBasePath = DefaultBasePaths[PubsubLiteBasePathKey] + c.RedisBasePath = DefaultBasePaths[RedisBasePathKey] + c.ResourceManagerBasePath = DefaultBasePaths[ResourceManagerBasePathKey] + c.SecretManagerBasePath = DefaultBasePaths[SecretManagerBasePathKey] + c.SecurityCenterBasePath = DefaultBasePaths[SecurityCenterBasePathKey] + c.ServiceManagementBasePath = DefaultBasePaths[ServiceManagementBasePathKey] + c.ServiceUsageBasePath = DefaultBasePaths[ServiceUsageBasePathKey] + c.SourceRepoBasePath = DefaultBasePaths[SourceRepoBasePathKey] + c.SpannerBasePath = DefaultBasePaths[SpannerBasePathKey] + c.SQLBasePath = DefaultBasePaths[SQLBasePathKey] + c.StorageBasePath = DefaultBasePaths[StorageBasePathKey] + c.StorageTransferBasePath = DefaultBasePaths[StorageTransferBasePathKey] + c.TagsBasePath = DefaultBasePaths[TagsBasePathKey] + c.TPUBasePath = DefaultBasePaths[TPUBasePathKey] + c.VertexAIBasePath = DefaultBasePaths[VertexAIBasePathKey] + c.VPCAccessBasePath = DefaultBasePaths[VPCAccessBasePathKey] + c.WorkflowsBasePath = DefaultBasePaths[WorkflowsBasePathKey] + + // Handwritten Products / Versioned / Atypical Entries + c.CloudBillingBasePath = DefaultBasePaths[CloudBillingBasePathKey] + c.ComposerBasePath = DefaultBasePaths[ComposerBasePathKey] + c.ContainerBasePath = DefaultBasePaths[ContainerBasePathKey] + c.DataprocBasePath = DefaultBasePaths[DataprocBasePathKey] + c.DataflowBasePath = DefaultBasePaths[DataflowBasePathKey] + c.IamCredentialsBasePath = DefaultBasePaths[IamCredentialsBasePathKey] + c.ResourceManagerV3BasePath = DefaultBasePaths[ResourceManagerV3BasePathKey] + c.IAMBasePath = DefaultBasePaths[IAMBasePathKey] + c.ServiceNetworkingBasePath = DefaultBasePaths[ServiceNetworkingBasePathKey] + c.BigQueryBasePath = DefaultBasePaths[BigQueryBasePathKey] + c.BigtableAdminBasePath = DefaultBasePaths[BigtableAdminBasePathKey] + c.TagsLocationBasePath = DefaultBasePaths[TagsLocationBasePathKey] +} + +func GetCurrentUserEmail(config *Config, userAgent string) (string, error) { + // When environment variables UserProjectOverride and BillingProject are set for the provider, + // the header X-Goog-User-Project is set for the API requests. + // But it causes an error when calling GetCurrentUserEmail. Set the project to be "NO_BILLING_PROJECT_OVERRIDE". + // And then it triggers the header X-Goog-User-Project to be set to empty string. + + // See https://github.com/golang/oauth2/issues/306 for a recommendation to do this from a Go maintainer + // URL retrieved from https://accounts.google.com/.well-known/openid-configuration + res, err := SendRequest(SendRequestOptions{ + Config: config, + Method: "GET", + Project: "NO_BILLING_PROJECT_OVERRIDE", + RawURL: "https://openidconnect.googleapis.com/v1/userinfo", + UserAgent: userAgent, + }) + + if err != nil { + return "", fmt.Errorf("error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) + } + if res["email"] == nil { + return "", fmt.Errorf("error retrieving email from userinfo. email was nil in the response.") + } + return res["email"].(string), nil +} + +func MultiEnvSearch(ks []string) string { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } + } + return "" +} + +// MultiEnvDefault is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefault(ks []string, dv interface{}) interface{} { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } + } + return dv +} + +func CustomEndpointValidator() validator.String { + return stringvalidator.RegexMatches(regexp.MustCompile(`.*/[^/]+/$`), "") +} + +// return the region a selfLink is referring to +func GetRegionFromRegionSelfLink(selfLink string) string { + re := regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/[a-zA-Z0-9-]*/regions/([a-zA-Z0-9-]*)") + switch { + case re.MatchString(selfLink): + if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { + return res[1] + } + } + return selfLink +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config_test_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config_test_utils.go new file mode 100644 index 0000000000..47d210280d --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/config_test_utils.go @@ -0,0 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport + +const TestFakeCredentialsPath = "../test-fixtures/fake_account.json" diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/error_retry_predicates.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/error_retry_predicates.go similarity index 79% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/error_retry_predicates.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/error_retry_predicates.go index f3c5c313e9..fb40ed6175 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/error_retry_predicates.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/error_retry_predicates.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport import ( "fmt" @@ -11,7 +13,6 @@ import ( "time" "google.golang.org/api/googleapi" - sqladmin "google.golang.org/api/sqladmin/v1beta4" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -147,28 +148,12 @@ func is403QuotaExceededPerMinuteError(err error) (bool, string) { return false, "" } -// Retry on comon googleapi error codes for retryable errors. -// TODO(#5609): This may not need to be applied globally - figure out -// what retryable error codes apply to which API. -func isCommonRetryableErrorCode(err error) (bool, string) { - gerr, ok := err.(*googleapi.Error) - if !ok { - return false, "" - } - - if gerr.Code == 429 || gerr.Code == 500 || gerr.Code == 502 || gerr.Code == 503 { - log.Printf("[DEBUG] Dismissed an error as retryable based on error code: %s", err) - return true, fmt.Sprintf("Retryable error code %d", gerr.Code) - } - return false, "" -} - // We've encountered a few common fingerprint-related strings; if this is one of // them, we're confident this is an error due to fingerprints. var FINGERPRINT_FAIL_ERRORS = []string{"Invalid fingerprint.", "Supplied fingerprint does not match current metadata fingerprint."} // Retry the operation if it looks like a fingerprint mismatch. -func isFingerprintError(err error) (bool, string) { +func IsFingerprintError(err error) (bool, string) { gerr, ok := err.(*googleapi.Error) if !ok { return false, "" @@ -187,43 +172,38 @@ func isFingerprintError(err error) (bool, string) { return false, "" } -// If a permission necessary to provision a resource is created in the same config -// as the resource itself, the permission may not have propagated by the time terraform -// attempts to create the resource. This allows those errors to be retried until the timeout expires -func iamMemberMissing(err error) (bool, string) { - if gerr, ok := err.(*googleapi.Error); ok { - if gerr.Code == 400 && strings.Contains(gerr.Body, "permission") { - return true, "Waiting for IAM member permissions to propagate." +const METADATA_FINGERPRINT_RETRIES = 10 + +// Since the google compute API uses optimistic locking, there is a chance +// we need to resubmit our updated metadata. To do this, you need to provide +// an update function that attempts to submit your metadata +func MetadataRetryWrapper(update func() error) error { + attempt := 0 + for attempt < METADATA_FINGERPRINT_RETRIES { + err := update() + if err == nil { + return nil } - } - return false, "" -} -// Cloud PubSub returns a 400 error if a topic's parent project was recently created and an -// organization policy has not propagated. -// See https://github.com/hashicorp/terraform-provider-google/issues/4349 -func pubsubTopicProjectNotReady(err error) (bool, string) { - if gerr, ok := err.(*googleapi.Error); ok { - if gerr.Code == 400 && strings.Contains(gerr.Body, "retry this operation") { - log.Printf("[DEBUG] Dismissed error as a retryable operation: %s", err) - return true, "Waiting for Pubsub topic's project to properly initialize with organiation policy" + if ok, _ := IsFingerprintError(err); !ok { + // Something else went wrong, don't retry + return err } + + log.Printf("[DEBUG] Dismissed an error as retryable as a fingerprint mismatch: %s", err) + attempt++ } - return false, "" + return fmt.Errorf("Failed to update metadata after %d retries", attempt) } -// Retry if Cloud SQL operation returns a 429 with a specific message for -// concurrent operations. -func isSqlInternalError(err error) (bool, string) { - if gerr, ok := err.(*SqlAdminOperationError); ok { - // SqlAdminOperationError is a non-interface type so we need to cast it through - // a layer of interface{}. :) - var ierr interface{} - ierr = gerr - if serr, ok := ierr.(*sqladmin.OperationErrors); ok && serr.Errors[0].Code == "INTERNAL_ERROR" { - return true, "Received an internal error, which is sometimes retryable for some SQL resources. Optimistically retrying." +// If a permission necessary to provision a resource is created in the same config +// as the resource itself, the permission may not have propagated by the time terraform +// attempts to create the resource. This allows those errors to be retried until the timeout expires +func IamMemberMissing(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && strings.Contains(gerr.Body, "permission") { + return true, "Waiting for IAM member permissions to propagate." } - } return false, "" } @@ -245,7 +225,7 @@ func IsSqlOperationInProgressError(err error) (bool, string) { // times. This can happen if a service and a dependent service aren't batched // together- eg container.googleapis.com in one request followed by compute.g.c // in the next (container relies on compute and implicitly activates it) -func serviceUsageServiceBeingActivated(err error) (bool, string) { +func ServiceUsageServiceBeingActivated(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 400 { if strings.Contains(gerr.Body, "Another activation or deactivation is in progress") { return true, "Waiting for same service activation/deactivation to finish" @@ -256,9 +236,23 @@ func serviceUsageServiceBeingActivated(err error) (bool, string) { return false, "" } +// See https://github.com/hashicorp/terraform-provider-google/issues/14691 for +// details on the error message this handles +// This is a post-operation error so it uses tpgresource.CommonOpError instead of googleapi.Error +func ServiceUsageInternalError160009(err error) (bool, string) { + // a cyclical dependency between transport/tpgresource blocks using tpgresource.CommonOpError + // so just work off the error string. Ideally, we'd use that type instead. + s := err.Error() + if strings.Contains(s, "encountered internal error") && strings.Contains(s, "160009") && strings.Contains(s, "with failed services") { + return true, "retrying internal error 160009." + } + + return false, "" +} + // Retry if Bigquery operation returns a 403 with a specific message for // concurrent operations (which are implemented in terms of 'edit quota'). -func isBigqueryIAMQuotaError(err error) (bool, string) { +func IsBigqueryIAMQuotaError(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 403 && strings.Contains(strings.ToLower(gerr.Body), "exceeded rate limits") { return true, "Waiting for Bigquery edit quota to refresh" @@ -269,7 +263,7 @@ func isBigqueryIAMQuotaError(err error) (bool, string) { // Retry if Monitoring operation returns a 409 with a specific message for // concurrent operations. -func isMonitoringConcurrentEditError(err error) (bool, string) { +func IsMonitoringConcurrentEditError(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 409 && (strings.Contains(strings.ToLower(gerr.Body), "too many concurrent edits") || strings.Contains(strings.ToLower(gerr.Body), "could not fulfill the request")) { @@ -279,33 +273,18 @@ func isMonitoringConcurrentEditError(err error) (bool, string) { return false, "" } -// Retry if filestore operation returns a 429 with a specific message for -// concurrent operations. -func isNotFilestoreQuotaError(err error) (bool, string) { - if gerr, ok := err.(*googleapi.Error); ok { - if gerr.Code == 429 { - return false, "" - } - } - return isCommonRetryableErrorCode(err) -} - -// Retry if App Engine operation returns a 409 with a specific message for -// concurrent operations, or a 404 indicating p4sa has not yet propagated. -func isAppEngineRetryableError(err error) (bool, string) { +// Retry if Monitoring operation returns a 403 +func IsMonitoringPermissionError(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { - if gerr.Code == 409 && strings.Contains(strings.ToLower(gerr.Body), "operation is already in progress") { - return true, "Waiting for other concurrent App Engine changes to finish" - } - if gerr.Code == 404 && strings.Contains(strings.ToLower(gerr.Body), "unable to retrieve p4sa") { - return true, "Waiting for P4SA propagation to GAIA" + if gerr.Code == 403 { + return true, "Waiting for project to be ready for metrics scope" } } return false, "" } // Retry if KMS CryptoKeyVersions returns a 400 for PENDING_GENERATION -func isCryptoKeyVersionsPendingGeneration(err error) (bool, string) { +func IsCryptoKeyVersionsPendingGeneration(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 400 { if strings.Contains(gerr.Body, "PENDING_GENERATION") { return true, "Waiting for pending key generation" @@ -316,7 +295,7 @@ func isCryptoKeyVersionsPendingGeneration(err error) (bool, string) { // Retry if getting a resource/operation returns a 404 for specific operations. // opType should describe the operation for which 404 can be retryable. -func isNotFoundRetryableError(opType string) RetryErrorPredicateFunc { +func IsNotFoundRetryableError(opType string) RetryErrorPredicateFunc { return func(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { return true, fmt.Sprintf("Retry 404s for %s", opType) @@ -325,16 +304,7 @@ func isNotFoundRetryableError(opType string) RetryErrorPredicateFunc { } } -func isDataflowJobUpdateRetryableError(err error) (bool, string) { - if gerr, ok := err.(*googleapi.Error); ok { - if gerr.Code == 404 && strings.Contains(gerr.Body, "in RUNNING OR DRAINING state") { - return true, "Waiting for job to be in a valid state" - } - } - return false, "" -} - -func isPeeringOperationInProgress(err error) (bool, string) { +func IsPeeringOperationInProgress(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 400 && strings.Contains(gerr.Body, "There is a peering operation in progress") { return true, "Waiting peering operation to complete" @@ -343,16 +313,7 @@ func isPeeringOperationInProgress(err error) (bool, string) { return false, "" } -func isCloudFunctionsSourceCodeError(err error) (bool, string) { - if operr, ok := err.(*CommonOpError); ok { - if operr.Code == 3 && operr.Message == "Failed to retrieve function source code" { - return true, fmt.Sprintf("Retry on Function failing to pull code from GCS") - } - } - return false, "" -} - -func datastoreIndex409Contention(err error) (bool, string) { +func DatastoreIndex409Contention(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 409 && strings.Contains(gerr.Body, "too much contention") { return true, "too much contention - waiting for less activity" @@ -361,7 +322,7 @@ func datastoreIndex409Contention(err error) (bool, string) { return false, "" } -func iapClient409Operation(err error) (bool, string) { +func IapClient409Operation(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 409 && strings.Contains(strings.ToLower(gerr.Body), "operation was aborted") { return true, "operation was aborted possibly due to concurrency issue - retrying" @@ -370,7 +331,7 @@ func iapClient409Operation(err error) (bool, string) { return false, "" } -func healthcareDatasetNotInitialized(err error) (bool, string) { +func HealthcareDatasetNotInitialized(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 404 && strings.Contains(strings.ToLower(gerr.Body), "dataset not initialized") { return true, "dataset not initialized - retrying" @@ -383,7 +344,7 @@ func healthcareDatasetNotInitialized(err error) (bool, string) { // (eg GET and LIST) but not the backing apiserver. When we encounter a 409, we can retry it. // Note that due to limitations in MMv1's error_retry_predicates this is currently applied to all requests. // We only expect to receive it on create, though. -func isCloudRunCreationConflict(err error) (bool, string) { +func IsCloudRunCreationConflict(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 409 { return true, "saw a 409 - waiting until background deletion completes" @@ -401,7 +362,7 @@ func isCloudRunCreationConflict(err error) (bool, string) { // user-provided SA could trigger this too. At the callsite, we should check // if the current etag matches the old etag and short-circuit if they do as // that indicates the new config is the likely problem. -func iamServiceAccountNotFound(err error) (bool, string) { +func IamServiceAccountNotFound(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 400 && strings.Contains(gerr.Body, "Service account") && strings.Contains(gerr.Body, "does not exist") { return true, "service account not found in IAM" @@ -411,10 +372,83 @@ func iamServiceAccountNotFound(err error) (bool, string) { return false, "" } +// Concurrent Apigee operations can fail with a 400 error +func IsApigeeRetryableError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && strings.Contains(strings.ToLower(gerr.Body), "the resource is locked by another operation") { + return true, "Waiting for other concurrent operations to finish" + } + } + + return false, "" +} + +func IsDataflowJobUpdateRetryableError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 404 && strings.Contains(gerr.Body, "in RUNNING OR DRAINING state") { + return true, "Waiting for job to be in a valid state" + } + } + return false, "" +} + +// Cloud PubSub returns a 400 error if a topic's parent project was recently created and an +// organization policy has not propagated. +// See https://github.com/hashicorp/terraform-provider-google/issues/4349 +func PubsubTopicProjectNotReady(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && strings.Contains(gerr.Body, "retry this operation") { + log.Printf("[DEBUG] Dismissed error as a retryable operation: %s", err) + return true, "Waiting for Pubsub topic's project to properly initialize with organiation policy" + } + } + return false, "" +} + +// Retry on comon googleapi error codes for retryable errors. +// TODO(#5609): This may not need to be applied globally - figure out +// what retryable error codes apply to which API. +func isCommonRetryableErrorCode(err error) (bool, string) { + gerr, ok := err.(*googleapi.Error) + if !ok { + return false, "" + } + + if gerr.Code == 429 || gerr.Code == 500 || gerr.Code == 502 || gerr.Code == 503 { + log.Printf("[DEBUG] Dismissed an error as retryable based on error code: %s", err) + return true, fmt.Sprintf("Retryable error code %d", gerr.Code) + } + return false, "" +} + +// Do not retry if operation returns a 429 +func Is429QuotaError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 429 { + return true, "429s are not retryable for this resource" + } + } + return false, "" +} + +// Retry if App Engine operation returns a 409 with a specific message for +// concurrent operations, or a 404 indicating p4sa has not yet propagated. +func IsAppEngineRetryableError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 409 && strings.Contains(strings.ToLower(gerr.Body), "operation is already in progress") { + return true, "Waiting for other concurrent App Engine changes to finish" + } + if gerr.Code == 404 && strings.Contains(strings.ToLower(gerr.Body), "unable to retrieve p4sa") { + return true, "Waiting for P4SA propagation to GAIA" + } + } + return false, "" +} + // Bigtable uses gRPC and thus does not return errors of type *googleapi.Error. // Instead the errors returned are *status.Error. See the types of codes returned // here (https://pkg.go.dev/google.golang.org/grpc/codes#Code). -func isBigTableRetryableError(err error) (bool, string) { +func IsBigTableRetryableError(err error) (bool, string) { // The error is retryable if the error code is not OK and has a retry delay. // The retry delay is currently not used. if errorStatus, ok := status.FromError(err); ok && errorStatus.Code() != codes.OK { @@ -437,11 +471,14 @@ func isBigTableRetryableError(err error) (bool, string) { return false, "" } -// Concurrent Apigee operations can fail with a 400 error -func isApigeeRetryableError(err error) (bool, string) { +// Gateway of type 'SECURE_WEB_GATEWAY' automatically creates a router but does not delete it. +// This router might be re-used by other gateways located in the same network. +// When multiple gateways are being deleted at the same time, multiple attempts to delete the +// same router will be triggered and the api throws an error saying the "The resource is not ready". +func IsSwgAutogenRouterRetryable(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { - if gerr.Code == 400 && strings.Contains(strings.ToLower(gerr.Body), "the resource is locked by another operation") { - return true, "Waiting for other concurrent operations to finish" + if gerr.Code == 400 && strings.Contains(strings.ToLower(gerr.Body), "not ready") { + return true, "Waiting swg autogen router to be ready" } } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/header_transport.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/header_transport.go similarity index 81% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/header_transport.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/header_transport.go index 607262cc2f..45ec19d3c1 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/header_transport.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/header_transport.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport import ( "net/http" @@ -10,7 +12,7 @@ type headerTransportLayer struct { baseTransit http.RoundTripper } -func newTransportWithHeaders(baseTransit http.RoundTripper) headerTransportLayer { +func NewTransportWithHeaders(baseTransit http.RoundTripper) headerTransportLayer { if baseTransit == nil { baseTransit = http.DefaultTransport } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/mutexkv.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/mutexkv.go similarity index 87% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/mutexkv.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/mutexkv.go index cd0c53a4c4..e82c4df7f5 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/mutexkv.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/mutexkv.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport import ( "log" @@ -64,3 +66,13 @@ func NewMutexKV() *MutexKV { store: make(map[string]*sync.RWMutex), } } + +// Global MutexKV +var MutexStore = NewMutexKV() + +func LockedCall(lockKey string, f func() error) error { + MutexStore.Lock(lockKey) + defer MutexStore.Unlock(lockKey) + + return f() +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_client_creation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_client_creation.go similarity index 89% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_client_creation.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_client_creation.go index 14dc78b9f8..96001024e3 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/provider_dcl_client_creation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_client_creation.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // ---------------------------------------------------------------------------- // // *** AUTO GENERATED CODE *** Type: DCL *** @@ -13,16 +16,19 @@ // // ---------------------------------------------------------------------------- -package google +package transport import ( + "fmt" dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + "log" "time" apikeys "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apikeys" assuredworkloads "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads" bigqueryreservation "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation" cloudbuild "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild" + cloudbuildv2 "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2" clouddeploy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy" cloudresourcemanager "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager" compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" @@ -32,11 +38,8 @@ import ( dataproc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc" eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" firebaserules "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules" - logging "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging" - monitoring "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring" networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" orgpolicy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy" - osconfig "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig" privateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" ) @@ -133,6 +136,29 @@ func NewDCLCloudbuildClient(config *Config, userAgent, billingProject string, ti return cloudbuild.NewClient(dclConfig) } +func NewDCLCloudbuildv2Client(config *Config, userAgent, billingProject string, timeout time.Duration) *cloudbuildv2.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.Client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.Cloudbuildv2BasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return cloudbuildv2.NewClient(dclConfig) +} + func NewDCLClouddeployClient(config *Config, userAgent, billingProject string, timeout time.Duration) *clouddeploy.Client { configOptions := []dcl.ConfigOption{ dcl.WithHTTPClient(config.Client), @@ -340,52 +366,6 @@ func NewDCLFirebaserulesClient(config *Config, userAgent, billingProject string, return firebaserules.NewClient(dclConfig) } -func NewDCLLoggingClient(config *Config, userAgent, billingProject string, timeout time.Duration) *logging.Client { - configOptions := []dcl.ConfigOption{ - dcl.WithHTTPClient(config.Client), - dcl.WithUserAgent(userAgent), - dcl.WithLogger(dclLogger{}), - dcl.WithBasePath(config.LoggingBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, dcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, dcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) - } - } - - dclConfig := dcl.NewConfig(configOptions...) - return logging.NewClient(dclConfig) -} - -func NewDCLMonitoringClient(config *Config, userAgent, billingProject string, timeout time.Duration) *monitoring.Client { - configOptions := []dcl.ConfigOption{ - dcl.WithHTTPClient(config.Client), - dcl.WithUserAgent(userAgent), - dcl.WithLogger(dclLogger{}), - dcl.WithBasePath(config.MonitoringBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, dcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, dcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) - } - } - - dclConfig := dcl.NewConfig(configOptions...) - return monitoring.NewClient(dclConfig) -} - func NewDCLNetworkConnectivityClient(config *Config, userAgent, billingProject string, timeout time.Duration) *networkconnectivity.Client { configOptions := []dcl.ConfigOption{ dcl.WithHTTPClient(config.Client), @@ -432,29 +412,6 @@ func NewDCLOrgPolicyClient(config *Config, userAgent, billingProject string, tim return orgpolicy.NewClient(dclConfig) } -func NewDCLOsConfigClient(config *Config, userAgent, billingProject string, timeout time.Duration) *osconfig.Client { - configOptions := []dcl.ConfigOption{ - dcl.WithHTTPClient(config.Client), - dcl.WithUserAgent(userAgent), - dcl.WithLogger(dclLogger{}), - dcl.WithBasePath(config.OSConfigBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, dcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, dcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) - } - } - - dclConfig := dcl.NewConfig(configOptions...) - return osconfig.NewClient(dclConfig) -} - func NewDCLPrivatecaClient(config *Config, userAgent, billingProject string, timeout time.Duration) *privateca.Client { configOptions := []dcl.ConfigOption{ dcl.WithHTTPClient(config.Client), @@ -500,3 +457,35 @@ func NewDCLRecaptchaEnterpriseClient(config *Config, userAgent, billingProject s dclConfig := dcl.NewConfig(configOptions...) return recaptchaenterprise.NewClient(dclConfig) } + +type dclLogger struct{} + +// Fatal records Fatal errors. +func (l dclLogger) Fatal(args ...interface{}) { + log.Fatal(args...) +} + +// Fatalf records Fatal errors with added arguments. +func (l dclLogger) Fatalf(format string, args ...interface{}) { + log.Fatalf(fmt.Sprintf("[DEBUG][DCL FATAL] %s", format), args...) +} + +// Info records Info errors. +func (l dclLogger) Info(args ...interface{}) { + log.Print(args...) +} + +// Infof records Info errors with added arguments. +func (l dclLogger) Infof(format string, args ...interface{}) { + log.Printf(fmt.Sprintf("[DEBUG][DCL INFO] %s", format), args...) +} + +// Warningf records Warning errors with added arguments. +func (l dclLogger) Warningf(format string, args ...interface{}) { + log.Printf(fmt.Sprintf("[DEBUG][DCL WARNING] %s", format), args...) +} + +// Warning records Warning errors. +func (l dclLogger) Warning(args ...interface{}) { + log.Print(args...) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_endpoints.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_endpoints.go new file mode 100644 index 0000000000..19785b85c3 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_dcl_endpoints.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package transport + +import ( + framework_schema "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// empty string is passed for dcl default since dcl +// [hardcodes the values](https://github.com/GoogleCloudPlatform/declarative-resource-client-library/blob/main/services/google/eventarc/beta/trigger_internal.go#L96-L103) + +var ApikeysEndpointEntryKey = "apikeys_custom_endpoint" +var ApikeysEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var AssuredWorkloadsEndpointEntryKey = "assured_workloads_custom_endpoint" +var AssuredWorkloadsEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var CloudBuildWorkerPoolEndpointEntryKey = "cloud_build_worker_pool_custom_endpoint" +var CloudBuildWorkerPoolEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var ClouddeployEndpointEntryKey = "clouddeploy_custom_endpoint" +var ClouddeployEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var CloudResourceManagerEndpointEntryKey = "cloud_resource_manager_custom_endpoint" +var CloudResourceManagerEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var EventarcEndpointEntryKey = "eventarc_custom_endpoint" +var EventarcEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var FirebaserulesEndpointEntryKey = "firebaserules_custom_endpoint" +var FirebaserulesEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var NetworkConnectivityEndpointEntryKey = "network_connectivity_custom_endpoint" +var NetworkConnectivityEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var OrgPolicyEndpointEntryKey = "org_policy_custom_endpoint" +var OrgPolicyEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +var RecaptchaEnterpriseEndpointEntryKey = "recaptcha_enterprise_custom_endpoint" +var RecaptchaEnterpriseEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, +} + +type DCLConfig struct { + ApikeysBasePath string + AssuredWorkloadsBasePath string + CloudBuildWorkerPoolBasePath string + ClouddeployBasePath string + CloudResourceManagerBasePath string + EventarcBasePath string + FirebaserulesBasePath string + NetworkConnectivityBasePath string + OrgPolicyBasePath string + RecaptchaEnterpriseBasePath string +} + +func ConfigureDCLProvider(provider *schema.Provider) { + provider.Schema[ApikeysEndpointEntryKey] = ApikeysEndpointEntry + provider.Schema[AssuredWorkloadsEndpointEntryKey] = AssuredWorkloadsEndpointEntry + provider.Schema[CloudBuildWorkerPoolEndpointEntryKey] = CloudBuildWorkerPoolEndpointEntry + provider.Schema[ClouddeployEndpointEntryKey] = ClouddeployEndpointEntry + provider.Schema[CloudResourceManagerEndpointEntryKey] = CloudResourceManagerEndpointEntry + provider.Schema[EventarcEndpointEntryKey] = EventarcEndpointEntry + provider.Schema[FirebaserulesEndpointEntryKey] = FirebaserulesEndpointEntry + provider.Schema[NetworkConnectivityEndpointEntryKey] = NetworkConnectivityEndpointEntry + provider.Schema[OrgPolicyEndpointEntryKey] = OrgPolicyEndpointEntry + provider.Schema[RecaptchaEnterpriseEndpointEntryKey] = RecaptchaEnterpriseEndpointEntry +} + +func HandleDCLCustomEndpointDefaults(d *schema.ResourceData) { + if d.Get(ApikeysEndpointEntryKey) == "" { + d.Set(ApikeysEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_APIKEYS_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(AssuredWorkloadsEndpointEntryKey) == "" { + d.Set(AssuredWorkloadsEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_ASSURED_WORKLOADS_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(CloudBuildWorkerPoolEndpointEntryKey) == "" { + d.Set(CloudBuildWorkerPoolEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BUILD_WORKER_POOL_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(ClouddeployEndpointEntryKey) == "" { + d.Set(ClouddeployEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CLOUDDEPLOY_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(CloudResourceManagerEndpointEntryKey) == "" { + d.Set(CloudResourceManagerEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CLOUD_RESOURCE_MANAGER_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(EventarcEndpointEntryKey) == "" { + d.Set(EventarcEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_EVENTARC_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(FirebaserulesEndpointEntryKey) == "" { + d.Set(FirebaserulesEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_FIREBASERULES_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(NetworkConnectivityEndpointEntryKey) == "" { + d.Set(NetworkConnectivityEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_NETWORK_CONNECTIVITY_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(OrgPolicyEndpointEntryKey) == "" { + d.Set(OrgPolicyEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_ORG_POLICY_CUSTOM_ENDPOINT", + }, "")) + } + if d.Get(RecaptchaEnterpriseEndpointEntryKey) == "" { + d.Set(RecaptchaEnterpriseEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_RECAPTCHA_ENTERPRISE_CUSTOM_ENDPOINT", + }, "")) + } +} + +// plugin-framework provider set-up +func ConfigureDCLCustomEndpointAttributesFramework(frameworkSchema *framework_schema.Schema) { + frameworkSchema.Attributes["apikeys_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["assured_workloads_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["cloud_build_worker_pool_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["clouddeploy_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["cloud_resource_manager_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["eventarc_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["firebaserules_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["network_connectivity_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["org_policy_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } + frameworkSchema.Attributes["recaptcha_enterprise_custom_endpoint"] = framework_schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + CustomEndpointValidator(), + }, + } +} + +func ProviderDCLConfigure(d *schema.ResourceData, config *Config) interface{} { + config.ApikeysBasePath = d.Get(ApikeysEndpointEntryKey).(string) + config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) + config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) + config.ClouddeployBasePath = d.Get(ClouddeployEndpointEntryKey).(string) + config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) + config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) + config.FirebaserulesBasePath = d.Get(FirebaserulesEndpointEntryKey).(string) + config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) + config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) + config.RecaptchaEnterpriseBasePath = d.Get(RecaptchaEnterpriseEndpointEntryKey).(string) + config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) + return config +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_handwritten_endpoint.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_handwritten_endpoint.go new file mode 100644 index 0000000000..690e88f6b5 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/provider_handwritten_endpoint.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// For generated resources, endpoint entries live in product-specific provider +// files. Collect handwritten ones here. If any of these are modified, be sure +// to update the provider_reference docs page. + +var CloudBillingCustomEndpointEntryKey = "cloud_billing_custom_endpoint" +var CloudBillingCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ComposerCustomEndpointEntryKey = "composer_custom_endpoint" +var ComposerCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ContainerCustomEndpointEntryKey = "container_custom_endpoint" +var ContainerCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var DataflowCustomEndpointEntryKey = "dataflow_custom_endpoint" +var DataflowCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var IAMCustomEndpointEntryKey = "iam_custom_endpoint" +var IAMCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var IamCredentialsCustomEndpointEntryKey = "iam_credentials_custom_endpoint" +var IamCredentialsCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ResourceManagerV3CustomEndpointEntryKey = "resource_manager_v3_custom_endpoint" +var ResourceManagerV3CustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ServiceNetworkingCustomEndpointEntryKey = "service_networking_custom_endpoint" +var ServiceNetworkingCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ServiceUsageCustomEndpointEntryKey = "service_usage_custom_endpoint" +var ServiceUsageCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ServiceUsageBasePathKey]), +} + +var BigtableAdminCustomEndpointEntryKey = "bigtable_custom_endpoint" +var BigtableAdminCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_BIGTABLE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigtableAdminBasePathKey]), +} + +var PrivatecaCertificateTemplateEndpointEntryKey = "privateca_custom_endpoint" +var PrivatecaCertificateTemplateCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", + }, DefaultBasePaths[PrivatecaBasePathKey]), +} + +var ContainerAwsCustomEndpointEntryKey = "container_aws_custom_endpoint" +var ContainerAwsCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ContainerAzureCustomEndpointEntryKey = "container_azure_custom_endpoint" +var ContainerAzureCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var TagsLocationCustomEndpointEntryKey = "tags_location_custom_endpoint" +var TagsLocationCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +func ValidateCustomEndpoint(v interface{}, k string) (ws []string, errors []error) { + re := `.*/[^/]+/$` + return verify.ValidateRegexp(re)(v, k) +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/retry_transport.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/retry_transport.go similarity index 97% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/retry_transport.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/retry_transport.go index 5996057277..e706defc61 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/retry_transport.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/retry_transport.go @@ -1,10 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 // A http.RoundTripper that retries common errors, with convenience constructors. // // NOTE: This meant for TEMPORARY, TRANSIENT ERRORS. // Do not use for waiting on operations or polling of resource state, // especially if the expected state (operation done, resource ready, etc) // takes longer to reach than the default client Timeout. -// In those cases, RetryTimeDuration(...)/resource.Retry with appropriate timeout +// In those cases, Retry(...)/resource.Retry with appropriate timeout // and error predicates/handling should be used as a wrapper around the request // instead. // @@ -27,7 +29,7 @@ // c.clientSqlAdmin, err = compute.NewService(ctx, option.WithHTTPClient(sqlAdminHttpClient)) // ... -package google +package transport import ( "bytes" @@ -204,7 +206,7 @@ func (t *retryTransport) checkForRetryableError(resp *http.Response, respErr err if errToCheck == nil { return nil } - if isRetryableError(errToCheck, t.retryPredicates...) { + if IsRetryableError(errToCheck, t.retryPredicates, nil) { return resource.RetryableError(errToCheck) } return resource.NonRetryableError(errToCheck) diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/retry_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/retry_utils.go new file mode 100644 index 0000000000..fb32cf94d2 --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/retry_utils.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport + +import ( + "log" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +type RetryOptions struct { + RetryFunc func() error + Timeout time.Duration + PollInterval time.Duration + ErrorRetryPredicates []RetryErrorPredicateFunc + ErrorAbortPredicates []RetryErrorPredicateFunc +} + +func Retry(opt RetryOptions) error { + if opt.Timeout == 0 { + opt.Timeout = 1 * time.Minute + } + + if opt.PollInterval != 0 { + refreshFunc := func() (interface{}, string, error) { + err := opt.RetryFunc() + if err == nil { + return "", "done", nil + } + + // Check if it is a retryable error. + if IsRetryableError(err, opt.ErrorRetryPredicates, opt.ErrorAbortPredicates) { + return "", "retrying", nil + } + + // The error is not retryable. + return "", "done", err + } + stateChange := &resource.StateChangeConf{ + Pending: []string{ + "retrying", + }, + Target: []string{ + "done", + }, + Refresh: refreshFunc, + Timeout: opt.Timeout, + PollInterval: opt.PollInterval, + } + + _, err := stateChange.WaitForState() + return err + } + + return resource.Retry(opt.Timeout, func() *resource.RetryError { + err := opt.RetryFunc() + if err == nil { + return nil + } + if IsRetryableError(err, opt.ErrorRetryPredicates, opt.ErrorAbortPredicates) { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + }) +} + +func IsRetryableError(topErr error, retryPredicates, abortPredicates []RetryErrorPredicateFunc) bool { + if topErr == nil { + return false + } + + retryPredicates = append( + // Global error retry predicates are registered in this default list. + defaultErrorRetryPredicates, + retryPredicates...) + + // Check all wrapped errors for an abortable error status. + isAbortable := false + errwrap.Walk(topErr, func(werr error) { + for _, pred := range abortPredicates { + if predAbort, predReason := pred(werr); predAbort { + log.Printf("[DEBUG] Dismissed an error as abortable. %s - %s", predReason, werr) + isAbortable = true + return + } + } + }) + if isAbortable { + return false + } + + // Check all wrapped errors for a retryable error status. + isRetryable := false + errwrap.Walk(topErr, func(werr error) { + for _, pred := range retryPredicates { + if predRetry, predReason := pred(werr); predRetry { + log.Printf("[DEBUG] Dismissed an error as retryable. %s - %s", predReason, werr) + isRetryable = true + return + } + } + }) + return isRetryable +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/transport.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/transport.go new file mode 100644 index 0000000000..300a756abf --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/transport.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" +) + +var DefaultRequestTimeout = 5 * time.Minute + +type SendRequestOptions struct { + Config *Config + Method string + Project string + RawURL string + UserAgent string + Body map[string]any + Timeout time.Duration + ErrorRetryPredicates []RetryErrorPredicateFunc + ErrorAbortPredicates []RetryErrorPredicateFunc +} + +func SendRequest(opt SendRequestOptions) (map[string]interface{}, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", opt.UserAgent) + reqHeaders.Set("Content-Type", "application/json") + + if opt.Config.UserProjectOverride && opt.Project != "" { + // When opt.Project is "NO_BILLING_PROJECT_OVERRIDE" in the function GetCurrentUserEmail, + // set the header X-Goog-User-Project to be empty string. + if opt.Project == "NO_BILLING_PROJECT_OVERRIDE" { + reqHeaders.Set("X-Goog-User-Project", "") + } else { + // Pass the project into this fn instead of parsing it from the URL because + // both project names and URLs can have colons in them. + reqHeaders.Set("X-Goog-User-Project", opt.Project) + } + } + + if opt.Timeout == 0 { + opt.Timeout = DefaultRequestTimeout + } + + var res *http.Response + err := Retry(RetryOptions{ + RetryFunc: func() error { + var buf bytes.Buffer + if opt.Body != nil { + err := json.NewEncoder(&buf).Encode(opt.Body) + if err != nil { + return err + } + } + + u, err := AddQueryParams(opt.RawURL, map[string]string{"alt": "json"}) + if err != nil { + return err + } + req, err := http.NewRequest(opt.Method, u, &buf) + if err != nil { + return err + } + + req.Header = reqHeaders + res, err = opt.Config.Client.Do(req) + if err != nil { + return err + } + + if err := googleapi.CheckResponse(res); err != nil { + googleapi.CloseBody(res) + return err + } + + return nil + }, + Timeout: opt.Timeout, + ErrorRetryPredicates: opt.ErrorRetryPredicates, + ErrorAbortPredicates: opt.ErrorAbortPredicates, + }) + if err != nil { + return nil, err + } + + if res == nil { + return nil, fmt.Errorf("Unable to parse server response. This is most likely a terraform problem, please file a bug at https://github.com/hashicorp/terraform-provider-google/issues.") + } + + // The defer call must be made outside of the retryFunc otherwise it's closed too soon. + defer googleapi.CloseBody(res) + + // 204 responses will have no body, so we're going to error with "EOF" if we + // try to parse it. Instead, we can just return nil. + if res.StatusCode == 204 { + return nil, nil + } + result := make(map[string]interface{}) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + + return result, nil +} + +func AddQueryParams(rawurl string, params map[string]string) (string, error) { + u, err := url.Parse(rawurl) + if err != nil { + return "", err + } + q := u.Query() + for k, v := range params { + q.Set(k, v) + } + u.RawQuery = q.Encode() + return u.String(), nil +} + +func HandleNotFoundError(err error, d *schema.ResourceData, resource string) error { + if IsGoogleApiErrorWithCode(err, 404) { + log.Printf("[WARN] Removing %s because it's gone", resource) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return errwrap.Wrapf( + fmt.Sprintf("Error when reading or editing %s: {{err}}", resource), err) +} + +func IsGoogleApiErrorWithCode(err error, errCode int) bool { + gerr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error) + return ok && gerr != nil && gerr.Code == errCode +} + +func IsApiNotEnabledError(err error) bool { + gerr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error) + if !ok { + return false + } + if gerr == nil { + return false + } + if gerr.Code != 403 { + return false + } + for _, e := range gerr.Errors { + if e.Reason == "accessNotConfigured" { + return true + } + } + return false +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/transport_test_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/transport_test_utils.go new file mode 100644 index 0000000000..0caefa9fbc --- /dev/null +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/transport/transport_test_utils.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package transport + +type TimeoutError struct { + timeout bool +} + +func (e *TimeoutError) Timeout() bool { + return e.timeout +} + +func (e *TimeoutError) Error() string { + return "timeout error" +} + +var TimeoutErr = &TimeoutError{timeout: true} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/utils.go deleted file mode 100644 index 92c89a698b..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/utils.go +++ /dev/null @@ -1,620 +0,0 @@ -// Contains functions that don't really belong anywhere else. - -package google - -import ( - "fmt" - "log" - "os" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/hashicorp/errwrap" - fwDiags "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "google.golang.org/api/googleapi" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type TerraformResourceDataChange interface { - GetChange(string) (interface{}, interface{}) -} - -type TerraformResourceData interface { - HasChange(string) bool - GetOkExists(string) (interface{}, bool) - GetOk(string) (interface{}, bool) - Get(string) interface{} - Set(string, interface{}) error - SetId(string) - Id() string - GetProviderMeta(interface{}) error - Timeout(key string) time.Duration -} - -type TerraformResourceDiff interface { - HasChange(string) bool - GetChange(string) (interface{}, interface{}) - Get(string) interface{} - GetOk(string) (interface{}, bool) - Clear(string) error - ForceNew(string) error -} - -// getRegionFromZone returns the region from a zone for Google cloud. -func getRegionFromZone(zone string) string { - if zone != "" && len(zone) > 2 { - region := zone[:len(zone)-2] - return region - } - return "" -} - -// Infers the region based on the following (in order of priority): -// - `region` field in resource schema -// - region extracted from the `zone` field in resource schema -// - provider-level region -// - region extracted from the provider-level zone -func getRegion(d TerraformResourceData, config *Config) (string, error) { - return getRegionFromSchema("region", "zone", d, config) -} - -// getProject reads the "project" field from the given resource data and falls -// back to the provider's value if not given. If the provider's value is not -// given, an error is returned. -func getProject(d TerraformResourceData, config *Config) (string, error) { - return getProjectFromSchema("project", d, config) -} - -// getBillingProject reads the "billing_project" field from the given resource data and falls -// back to the provider's value if not given. If no value is found, an error is returned. -func getBillingProject(d TerraformResourceData, config *Config) (string, error) { - return getBillingProjectFromSchema("billing_project", d, config) -} - -// getProjectFromDiff reads the "project" field from the given diff and falls -// back to the provider's value if not given. If the provider's value is not -// given, an error is returned. -func getProjectFromDiff(d *schema.ResourceDiff, config *Config) (string, error) { - res, ok := d.GetOk("project") - if ok { - return res.(string), nil - } - if config.Project != "" { - return config.Project, nil - } - return "", fmt.Errorf("%s: required field is not set", "project") -} - -func getRouterLockName(region string, router string) string { - return fmt.Sprintf("router/%s/%s", region, router) -} - -func handleNotFoundError(err error, d *schema.ResourceData, resource string) error { - if IsGoogleApiErrorWithCode(err, 404) { - log.Printf("[WARN] Removing %s because it's gone", resource) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return errwrap.Wrapf( - fmt.Sprintf("Error when reading or editing %s: {{err}}", resource), err) -} - -func IsGoogleApiErrorWithCode(err error, errCode int) bool { - gerr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error) - return ok && gerr != nil && gerr.Code == errCode -} - -func isApiNotEnabledError(err error) bool { - gerr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error) - if !ok { - return false - } - if gerr == nil { - return false - } - if gerr.Code != 403 { - return false - } - for _, e := range gerr.Errors { - if e.Reason == "accessNotConfigured" { - return true - } - } - return false -} - -func isFailedPreconditionError(err error) bool { - gerr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error) - if !ok { - return false - } - if gerr == nil { - return false - } - if gerr.Code != 400 { - return false - } - for _, e := range gerr.Errors { - if e.Reason == "failedPrecondition" { - return true - } - } - return false -} - -func isConflictError(err error) bool { - if e, ok := err.(*googleapi.Error); ok && (e.Code == 409 || e.Code == 412) { - return true - } else if !ok && errwrap.ContainsType(err, &googleapi.Error{}) { - e := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error) - if e.Code == 409 || e.Code == 412 { - return true - } - } - return false -} - -// gRPC does not return errors of type *googleapi.Error. Instead the errors returned are *status.Error. -// See the types of codes returned here (https://pkg.go.dev/google.golang.org/grpc/codes#Code). -func isNotFoundGrpcError(err error) bool { - if errorStatus, ok := status.FromError(err); ok && errorStatus.Code() == codes.NotFound { - return true - } - return false -} - -// expandLabels pulls the value of "labels" out of a TerraformResourceData as a map[string]string. -func expandLabels(d TerraformResourceData) map[string]string { - return expandStringMap(d, "labels") -} - -// expandEnvironmentVariables pulls the value of "environment_variables" out of a schema.ResourceData as a map[string]string. -func expandEnvironmentVariables(d *schema.ResourceData) map[string]string { - return expandStringMap(d, "environment_variables") -} - -// expandBuildEnvironmentVariables pulls the value of "build_environment_variables" out of a schema.ResourceData as a map[string]string. -func expandBuildEnvironmentVariables(d *schema.ResourceData) map[string]string { - return expandStringMap(d, "build_environment_variables") -} - -// expandStringMap pulls the value of key out of a TerraformResourceData as a map[string]string. -func expandStringMap(d TerraformResourceData, key string) map[string]string { - v, ok := d.GetOk(key) - - if !ok { - return map[string]string{} - } - - return convertStringMap(v.(map[string]interface{})) -} - -func convertStringMap(v map[string]interface{}) map[string]string { - m := make(map[string]string) - for k, val := range v { - m[k] = val.(string) - } - return m -} - -func convertStringArr(ifaceArr []interface{}) []string { - return convertAndMapStringArr(ifaceArr, func(s string) string { return s }) -} - -func convertAndMapStringArr(ifaceArr []interface{}, f func(string) string) []string { - var arr []string - for _, v := range ifaceArr { - if v == nil { - continue - } - arr = append(arr, f(v.(string))) - } - return arr -} - -func mapStringArr(original []string, f func(string) string) []string { - var arr []string - for _, v := range original { - arr = append(arr, f(v)) - } - return arr -} - -func convertStringArrToInterface(strs []string) []interface{} { - arr := make([]interface{}, len(strs)) - for i, str := range strs { - arr[i] = str - } - return arr -} - -func convertStringSet(set *schema.Set) []string { - s := make([]string, 0, set.Len()) - for _, v := range set.List() { - s = append(s, v.(string)) - } - sort.Strings(s) - - return s -} - -func golangSetFromStringSlice(strings []string) map[string]struct{} { - set := map[string]struct{}{} - for _, v := range strings { - set[v] = struct{}{} - } - - return set -} - -func stringSliceFromGolangSet(sset map[string]struct{}) []string { - ls := make([]string, 0, len(sset)) - for s := range sset { - ls = append(ls, s) - } - sort.Strings(ls) - - return ls -} - -func reverseStringMap(m map[string]string) map[string]string { - o := map[string]string{} - for k, v := range m { - o[v] = k - } - return o -} - -func mergeStringMaps(a, b map[string]string) map[string]string { - merged := make(map[string]string) - - for k, v := range a { - merged[k] = v - } - - for k, v := range b { - merged[k] = v - } - - return merged -} - -func mergeSchemas(a, b map[string]*schema.Schema) map[string]*schema.Schema { - merged := make(map[string]*schema.Schema) - - for k, v := range a { - merged[k] = v - } - - for k, v := range b { - merged[k] = v - } - - return merged -} - -func mergeResourceMaps(ms ...map[string]*schema.Resource) (map[string]*schema.Resource, error) { - merged := make(map[string]*schema.Resource) - duplicates := []string{} - - for _, m := range ms { - for k, v := range m { - if _, ok := merged[k]; ok { - duplicates = append(duplicates, k) - } - - merged[k] = v - } - } - - var err error - if len(duplicates) > 0 { - err = fmt.Errorf("saw duplicates in mergeResourceMaps: %v", duplicates) - } - - return merged, err -} - -func StringToFixed64(v string) (int64, error) { - return strconv.ParseInt(v, 10, 64) -} - -func extractFirstMapConfig(m []interface{}) map[string]interface{} { - if len(m) == 0 || m[0] == nil { - return map[string]interface{}{} - } - - return m[0].(map[string]interface{}) -} - -func lockedCall(lockKey string, f func() error) error { - mutexKV.Lock(lockKey) - defer mutexKV.Unlock(lockKey) - - return f() -} - -// This is a Printf sibling (Nprintf; Named Printf), which handles strings like -// Nprintf("Hello %{target}!", map[string]interface{}{"target":"world"}) == "Hello world!". -// This is particularly useful for generated tests, where we don't want to use Printf, -// since that would require us to generate a very particular ordering of arguments. -func Nprintf(format string, params map[string]interface{}) string { - for key, val := range params { - format = strings.Replace(format, "%{"+key+"}", fmt.Sprintf("%v", val), -1) - } - return format -} - -// serviceAccountFQN will attempt to generate the fully qualified name in the format of: -// "projects/(-|)/serviceAccounts/@.iam.gserviceaccount.com" -// A project is required if we are trying to build the FQN from a service account id and -// and error will be returned in this case if no project is set in the resource or the -// provider-level config -func serviceAccountFQN(serviceAccount string, d TerraformResourceData, config *Config) (string, error) { - // If the service account id is already the fully qualified name - if strings.HasPrefix(serviceAccount, "projects/") { - return serviceAccount, nil - } - - // If the service account id is an email - if strings.Contains(serviceAccount, "@") { - return "projects/-/serviceAccounts/" + serviceAccount, nil - } - - // Get the project from the resource or fallback to the project - // in the provider configuration - project, err := getProject(d, config) - if err != nil { - return "", err - } - - return fmt.Sprintf("projects/-/serviceAccounts/%s@%s.iam.gserviceaccount.com", serviceAccount, project), nil -} - -func paginatedListRequest(project, baseUrl, userAgent string, config *Config, flattener func(map[string]interface{}) []interface{}) ([]interface{}, error) { - res, err := SendRequest(config, "GET", project, baseUrl, userAgent, nil) - if err != nil { - return nil, err - } - - ls := flattener(res) - pageToken, ok := res["pageToken"] - for ok { - if pageToken.(string) == "" { - break - } - url := fmt.Sprintf("%s?pageToken=%s", baseUrl, pageToken.(string)) - res, err = SendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return nil, err - } - ls = append(ls, flattener(res)) - pageToken, ok = res["pageToken"] - } - - return ls, nil -} - -func getInterconnectAttachmentLink(config *Config, project, region, ic, userAgent string) (string, error) { - if !strings.Contains(ic, "/") { - icData, err := config.NewComputeClient(userAgent).InterconnectAttachments.Get( - project, region, ic).Do() - if err != nil { - return "", fmt.Errorf("Error reading interconnect attachment: %s", err) - } - ic = icData.SelfLink - } - - return ic, nil -} - -// Given two sets of references (with "from" values in self link form), -// determine which need to be added or removed // during an update using -// addX/removeX APIs. -func calcAddRemove(from []string, to []string) (add, remove []string) { - add = make([]string, 0) - remove = make([]string, 0) - for _, u := range to { - found := false - for _, v := range from { - if compareSelfLinkOrResourceName("", v, u, nil) { - found = true - break - } - } - if !found { - add = append(add, u) - } - } - for _, u := range from { - found := false - for _, v := range to { - if compareSelfLinkOrResourceName("", u, v, nil) { - found = true - break - } - } - if !found { - remove = append(remove, u) - } - } - return add, remove -} - -func stringInSlice(arr []string, str string) bool { - for _, i := range arr { - if i == str { - return true - } - } - - return false -} - -func migrateStateNoop(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - return is, nil -} - -func expandString(v interface{}, d TerraformResourceData, config *Config) (string, error) { - return v.(string), nil -} - -func changeFieldSchemaToForceNew(sch *schema.Schema) { - sch.ForceNew = true - switch sch.Type { - case schema.TypeList: - case schema.TypeSet: - if nestedR, ok := sch.Elem.(*schema.Resource); ok { - for _, nestedSch := range nestedR.Schema { - changeFieldSchemaToForceNew(nestedSch) - } - } - } -} - -func generateUserAgentString(d TerraformResourceData, currentUserAgent string) (string, error) { - var m providerMeta - - err := d.GetProviderMeta(&m) - if err != nil { - return currentUserAgent, err - } - - if m.ModuleName != "" { - return strings.Join([]string{currentUserAgent, m.ModuleName}, " "), nil - } - - return currentUserAgent, nil -} - -func SnakeToPascalCase(s string) string { - split := strings.Split(s, "_") - for i := range split { - split[i] = strings.Title(split[i]) - } - return strings.Join(split, "") -} - -func MultiEnvSearch(ks []string) string { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v - } - } - return "" -} - -func GetCurrentUserEmail(config *Config, userAgent string) (string, error) { - // When environment variables UserProjectOverride and BillingProject are set for the provider, - // the header X-Goog-User-Project is set for the API requests. - // But it causes an error when calling GetCurrentUserEmail. Set the project to be "NO_BILLING_PROJECT_OVERRIDE". - // And then it triggers the header X-Goog-User-Project to be set to empty string. - - // See https://github.com/golang/oauth2/issues/306 for a recommendation to do this from a Go maintainer - // URL retrieved from https://accounts.google.com/.well-known/openid-configuration - res, err := SendRequest(config, "GET", "NO_BILLING_PROJECT_OVERRIDE", "https://openidconnect.googleapis.com/v1/userinfo", userAgent, nil) - - if err != nil { - return "", fmt.Errorf("error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) - } - if res["email"] == nil { - return "", fmt.Errorf("error retrieving email from userinfo. email was nil in the response.") - } - return res["email"].(string), nil -} - -func checkStringMap(v interface{}) map[string]string { - m, ok := v.(map[string]string) - if ok { - return m - } - return convertStringMap(v.(map[string]interface{})) -} - -// return a fake 404 so requests get retried or nested objects are considered deleted -func fake404(reasonResourceType, resourceName string) *googleapi.Error { - return &googleapi.Error{ - Code: 404, - Message: fmt.Sprintf("%v object %v not found", reasonResourceType, resourceName), - } -} - -// validate name of the gcs bucket. Guidelines are located at https://cloud.google.com/storage/docs/naming-buckets -// this does not attempt to check for IP addresses or close misspellings of "google" -func checkGCSName(name string) error { - if strings.HasPrefix(name, "goog") { - return fmt.Errorf("error: bucket name %s cannot start with %q", name, "goog") - } - - if strings.Contains(name, "google") { - return fmt.Errorf("error: bucket name %s cannot contain %q", name, "google") - } - - valid, _ := regexp.MatchString("^[a-z0-9][a-z0-9_.-]{1,220}[a-z0-9]$", name) - if !valid { - return fmt.Errorf("error: bucket name validation failed %v. See https://cloud.google.com/storage/docs/naming-buckets", name) - } - - for _, str := range strings.Split(name, ".") { - valid, _ := regexp.MatchString("^[a-z0-9_-]{1,63}$", str) - if !valid { - return fmt.Errorf("error: bucket name validation failed %v", str) - } - } - return nil -} - -// checkGoogleIamPolicy makes assertions about the contents of a google_iam_policy data source's policy_data attribute -func checkGoogleIamPolicy(value string) error { - if strings.Contains(value, "\"description\":\"\"") { - return fmt.Errorf("found an empty description field (should be omitted) in google_iam_policy data source: %s", value) - } - return nil -} - -// Retries an operation while the canonical error code is FAILED_PRECONDTION -// which indicates there is an incompatible operation already running on the -// cluster. This error can be safely retried until the incompatible operation -// completes, and the newly requested operation can begin. -func retryWhileIncompatibleOperation(timeout time.Duration, lockKey string, f func() error) error { - return resource.Retry(timeout, func() *resource.RetryError { - if err := lockedCall(lockKey, f); err != nil { - if isFailedPreconditionError(err) { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) -} - -func frameworkDiagsToSdkDiags(fwD fwDiags.Diagnostics) diag.Diagnostics { - var diags diag.Diagnostics - for _, e := range fwD.Errors() { - diags = append(diags, diag.Diagnostic{ - Detail: e.Detail(), - Severity: diag.Error, - Summary: e.Summary(), - }) - } - for _, w := range fwD.Warnings() { - diags = append(diags, diag.Diagnostic{ - Detail: w.Detail(), - Severity: diag.Warning, - Summary: w.Summary(), - }) - } - - return diags -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vcr_utils.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vcr_utils.go deleted file mode 100644 index c7c257e444..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vcr_utils.go +++ /dev/null @@ -1,380 +0,0 @@ -package google - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "math/rand" - "net/http" - "os" - "path/filepath" - "reflect" - "regexp" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/dnaeon/go-vcr/cassette" - "github.com/dnaeon/go-vcr/recorder" - - fwDiags "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-log/tflog" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -var configsLock = sync.RWMutex{} -var sourcesLock = sync.RWMutex{} - -var configs map[string]*Config - -var sources map[string]VcrSource - -// VcrSource is a source for a given VCR test with the value that seeded it -type VcrSource struct { - seed int64 - source rand.Source -} - -func isVcrEnabled() bool { - envPath := os.Getenv("VCR_PATH") - vcrMode := os.Getenv("VCR_MODE") - return envPath != "" && vcrMode != "" -} - -// Produces a rand.Source for VCR testing based on the given mode. -// In RECORDING mode, generates a new seed and saves it to a file, using the seed for the source -// In REPLAYING mode, reads a seed from a file and creates a source from it -func vcrSource(t *testing.T, path, mode string) (*VcrSource, error) { - sourcesLock.RLock() - s, ok := sources[t.Name()] - sourcesLock.RUnlock() - if ok { - return &s, nil - } - tflog.Debug(context.Background(), fmt.Sprintf("VCR_MODE: %s", mode)) - switch mode { - case "RECORDING": - seed := rand.Int63() - s := rand.NewSource(seed) - vcrSource := VcrSource{seed: seed, source: s} - sourcesLock.Lock() - sources[t.Name()] = vcrSource - sourcesLock.Unlock() - return &vcrSource, nil - case "REPLAYING": - seed, err := readSeedFromFile(vcrSeedFile(path, t.Name())) - if err != nil { - return nil, fmt.Errorf("no cassette found on disk for %s, please replay this testcase in recording mode - %w", t.Name(), err) - } - s := rand.NewSource(seed) - vcrSource := VcrSource{seed: seed, source: s} - sourcesLock.Lock() - sources[t.Name()] = vcrSource - sourcesLock.Unlock() - return &vcrSource, nil - default: - log.Printf("[DEBUG] No valid environment var set for VCR_MODE, expected RECORDING or REPLAYING, skipping VCR. VCR_MODE: %s", mode) - return nil, errors.New("No valid VCR_MODE set") - } -} - -func readSeedFromFile(fileName string) (int64, error) { - // Max number of digits for int64 is 19 - data := make([]byte, 19) - f, err := os.Open(fileName) - if err != nil { - return 0, err - } - defer f.Close() - _, err = f.Read(data) - if err != nil { - return 0, err - } - // Remove NULL characters from seed - data = bytes.Trim(data, "\x00") - seed := string(data) - return StringToFixed64(seed) -} - -func writeSeedToFile(seed int64, fileName string) error { - f, err := os.Create(fileName) - if err != nil { - return err - } - defer f.Close() - _, err = f.WriteString(strconv.FormatInt(seed, 10)) - if err != nil { - return err - } - return nil -} - -// Retrieves a unique test name used for writing files -// replaces all `/` characters that would cause filepath issues -// This matters during tests that dispatch multiple tests, for example TestAccLoggingFolderExclusion -func vcrSeedFile(path, name string) string { - return filepath.Join(path, fmt.Sprintf("%s.seed", vcrFileName(name))) -} - -func vcrFileName(name string) string { - return strings.ReplaceAll(name, "/", "_") -} - -// VcrTest is a wrapper for resource.Test to swap out providers for VCR providers and handle VCR specific things -// Can be called when VCR is not enabled, and it will behave as normal -func VcrTest(t *testing.T, c resource.TestCase) { - if isVcrEnabled() { - defer closeRecorder(t) - } else if isReleaseDiffEnabled() { - c = initializeReleaseDiffTest(c, t.Name()) - } - resource.Test(t, c) -} - -// We need to explicitly close the VCR recorder to save the cassette -func closeRecorder(t *testing.T) { - configsLock.RLock() - config, ok := configs[t.Name()] - configsLock.RUnlock() - if ok { - // We did not cache the config if it does not use VCR - if !t.Failed() && isVcrEnabled() { - // If a test succeeds, write new seed/yaml to files - err := config.Client.Transport.(*recorder.Recorder).Stop() - if err != nil { - t.Error(err) - } - envPath := os.Getenv("VCR_PATH") - - sourcesLock.RLock() - vcrSource, ok := sources[t.Name()] - sourcesLock.RUnlock() - if ok { - err = writeSeedToFile(vcrSource.seed, vcrSeedFile(envPath, t.Name())) - if err != nil { - t.Error(err) - } - } - } - // Clean up test config - configsLock.Lock() - delete(configs, t.Name()) - configsLock.Unlock() - - sourcesLock.Lock() - delete(sources, t.Name()) - sourcesLock.Unlock() - } -} - -func isReleaseDiffEnabled() bool { - releaseDiff := os.Getenv("RELEASE_DIFF") - return releaseDiff != "" -} - -func initializeReleaseDiffTest(c resource.TestCase, testName string) resource.TestCase { - var releaseProvider string - packagePath := fmt.Sprint(reflect.TypeOf(Config{}).PkgPath()) - if strings.Contains(packagePath, "google-beta") { - releaseProvider = "google-beta" - } else { - releaseProvider = "google" - } - - if c.ExternalProviders != nil { - c.ExternalProviders[releaseProvider] = resource.ExternalProvider{} - } else { - c.ExternalProviders = map[string]resource.ExternalProvider{ - releaseProvider: {}, - } - } - - localProviderName := "google-local" - if c.Providers != nil { - c.Providers = map[string]*schema.Provider{ - localProviderName: GetSDKProvider(testName), - } - c.ProtoV5ProviderFactories = map[string]func() (tfprotov5.ProviderServer, error){ - localProviderName: func() (tfprotov5.ProviderServer, error) { - return nil, nil - }, - } - } else { - c.Providers = map[string]*schema.Provider{ - localProviderName: {}, - } - c.ProtoV5ProviderFactories = map[string]func() (tfprotov5.ProviderServer, error){ - localProviderName: func() (tfprotov5.ProviderServer, error) { - provider, err := MuxedProviders(testName) - return provider(), err - }, - } - } - - var replacementSteps []resource.TestStep - for _, testStep := range c.Steps { - if testStep.Config != "" { - ogConfig := testStep.Config - testStep.Config = reformConfigWithProvider(ogConfig, localProviderName) - if testStep.ExpectError == nil && testStep.PlanOnly == false { - newStep := resource.TestStep{ - Config: reformConfigWithProvider(ogConfig, releaseProvider), - } - testStep.PlanOnly = true - testStep.ExpectNonEmptyPlan = false - replacementSteps = append(replacementSteps, newStep) - } - replacementSteps = append(replacementSteps, testStep) - } else { - replacementSteps = append(replacementSteps, testStep) - } - } - - c.Steps = replacementSteps - - return c -} - -func reformConfigWithProvider(config, provider string) string { - configBytes := []byte(config) - providerReplacement := fmt.Sprintf("provider = %s", provider) - providerReplacementBytes := []byte(providerReplacement) - providerBlock := regexp.MustCompile(`provider *=.*google-beta.*`) - - if providerBlock.Match(configBytes) { - return string(providerBlock.ReplaceAll(configBytes, providerReplacementBytes)) - } - - providerReplacement = fmt.Sprintf("${1}\n\t%s", providerReplacement) - providerReplacementBytes = []byte(providerReplacement) - resourceHeader := regexp.MustCompile(`(resource .*google_.* .*\w+.*\{.*)`) - return string(resourceHeader.ReplaceAll(configBytes, providerReplacementBytes)) -} - -func HandleVCRConfiguration(ctx context.Context, testName string, rndTripper http.RoundTripper, pollInterval time.Duration) (time.Duration, http.RoundTripper, fwDiags.Diagnostics) { - var diags fwDiags.Diagnostics - var vcrMode recorder.Mode - switch vcrEnv := os.Getenv("VCR_MODE"); vcrEnv { - case "RECORDING": - vcrMode = recorder.ModeRecording - case "REPLAYING": - vcrMode = recorder.ModeReplaying - // When replaying, set the poll interval low to speed up tests - pollInterval = 10 * time.Millisecond - default: - tflog.Debug(ctx, fmt.Sprintf("No valid environment var set for VCR_MODE, expected RECORDING or REPLAYING, skipping VCR. VCR_MODE: %s", vcrEnv)) - return pollInterval, rndTripper, diags - } - - envPath := os.Getenv("VCR_PATH") - if envPath == "" { - tflog.Debug(ctx, "No environment var set for VCR_PATH, skipping VCR") - return pollInterval, rndTripper, diags - } - path := filepath.Join(envPath, vcrFileName(testName)) - - rec, err := recorder.NewAsMode(path, vcrMode, rndTripper) - if err != nil { - diags.AddError("error creating record as new mode", err.Error()) - return pollInterval, rndTripper, diags - } - // Defines how VCR will match requests to responses. - rec.SetMatcher(func(r *http.Request, i cassette.Request) bool { - // Default matcher compares method and URL only - if !cassette.DefaultMatcher(r, i) { - return false - } - if r.Body == nil { - return true - } - contentType := r.Header.Get("Content-Type") - // If body contains media, don't try to compare - if strings.Contains(contentType, "multipart/related") { - return true - } - - var b bytes.Buffer - if _, err := b.ReadFrom(r.Body); err != nil { - tflog.Debug(ctx, fmt.Sprintf("Failed to read request body from cassette: %v", err)) - return false - } - r.Body = ioutil.NopCloser(&b) - reqBody := b.String() - // If body matches identically, we are done - if reqBody == i.Body { - return true - } - - // JSON might be the same, but reordered. Try parsing json and comparing - if strings.Contains(contentType, "application/json") { - var reqJson, cassetteJson interface{} - if err := json.Unmarshal([]byte(reqBody), &reqJson); err != nil { - tflog.Debug(ctx, fmt.Sprintf("Failed to unmarshall request json: %v", err)) - return false - } - if err := json.Unmarshal([]byte(i.Body), &cassetteJson); err != nil { - tflog.Debug(ctx, fmt.Sprintf("Failed to unmarshall cassette json: %v", err)) - return false - } - return reflect.DeepEqual(reqJson, cassetteJson) - } - return false - }) - - return pollInterval, rec, diags -} - -// GetSDKProvider gets the SDK provider with an overwritten configure function to be called by MuxedProviders -func GetSDKProvider(testName string) *schema.Provider { - prov := Provider() - if isVcrEnabled() { - old := prov.ConfigureContextFunc - prov.ConfigureContextFunc = func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { - return getCachedConfig(ctx, d, old, testName) - } - } else { - log.Print("[DEBUG] VCR_PATH or VCR_MODE not set, skipping VCR") - } - return prov -} - -// Returns a cached config if VCR testing is enabled. This enables us to use a single HTTP transport -// for a given test, allowing for recording of HTTP interactions. -// Why this exists: schema.Provider.ConfigureFunc is called multiple times for a given test -// ConfigureFunc on our provider creates a new HTTP client and sets base paths (config.go LoadAndValidate) -// VCR requires a single HTTP client to handle all interactions so it can record and replay responses so -// this caches HTTP clients per test by replacing ConfigureFunc -func getCachedConfig(ctx context.Context, d *schema.ResourceData, configureFunc schema.ConfigureContextFunc, testName string) (*Config, diag.Diagnostics) { - configsLock.RLock() - v, ok := configs[testName] - configsLock.RUnlock() - if ok { - return v, nil - } - c, diags := configureFunc(ctx, d) - if diags.HasError() { - return nil, diags - } - - var fwD fwDiags.Diagnostics - config := c.(*Config) - config.PollInterval, config.Client.Transport, fwD = HandleVCRConfiguration(ctx, testName, config.Client.Transport, config.PollInterval) - if fwD.HasError() { - diags = append(diags, frameworkDiagsToSdkDiags(fwD)...) - return nil, diags - } - - configsLock.Lock() - configs[testName] = config - configsLock.Unlock() - return config, nil -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/path_or_contents.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/path_or_contents.go similarity index 84% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/path_or_contents.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/path_or_contents.go index 45bf0ebda2..c277db03ad 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/path_or_contents.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/path_or_contents.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package verify import ( "io/ioutil" @@ -13,7 +15,7 @@ import ( // // The boolean second return value can be called `wasPath` - it indicates if a // path was detected and a file loaded. -func pathOrContents(poc string) (string, bool, error) { +func PathOrContents(poc string) (string, bool, error) { if len(poc) == 0 { return poc, false, nil } diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/validation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/validation.go similarity index 83% rename from terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/validation.go rename to terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/validation.go index 8929335264..2d3768197f 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/validation.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/verify/validation.go @@ -1,4 +1,6 @@ -package google +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package verify import ( "encoding/base64" @@ -76,7 +78,7 @@ var ( GcpRouterPartnerAsn = int64(16550) ) -var rfc1918Networks = []string{ +var Rfc1918Networks = []string{ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", @@ -84,13 +86,13 @@ var rfc1918Networks = []string{ // validateGCEName ensures that a field matches the requirements for Compute Engine resource names // https://cloud.google.com/compute/docs/naming-resources#resource-name-format -func validateGCEName(v interface{}, k string) (ws []string, errors []error) { +func ValidateGCEName(v interface{}, k string) (ws []string, errors []error) { re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` - return validateRegexp(re)(v, k) + return ValidateRegexp(re)(v, k) } // Ensure that the BGP ASN value of Cloud Router is a valid value as per RFC6996 or a value of 16550 -func validateRFC6996Asn(v interface{}, k string) (ws []string, errors []error) { +func ValidateRFC6996Asn(v interface{}, k string) (ws []string, errors []error) { value := int64(v.(int)) if !(value >= Rfc6996Asn16BitMin && value <= Rfc6996Asn16BitMax) && !(value >= Rfc6996Asn32BitMin && value <= Rfc6996Asn32BitMax) && @@ -102,23 +104,11 @@ or be the value of [%d], got %d`, k, GcpRouterPartnerAsn, value)) return } -func validateRegexp(re string) schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(re).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) doesn't match regexp %q", k, value, re)) - } - - return - } -} - -func validateEnum(values []string) schema.SchemaValidateFunc { +func ValidateEnum(values []string) schema.SchemaValidateFunc { return validation.StringInSlice(values, false) } -func validateRFC1918Network(min, max int) schema.SchemaValidateFunc { +func ValidateRFC1918Network(min, max int) schema.SchemaValidateFunc { return func(i interface{}, k string) (s []string, es []error) { s, es = validation.IsCIDRNetwork(min, max)(i, k) @@ -128,7 +118,7 @@ func validateRFC1918Network(min, max int) schema.SchemaValidateFunc { v, _ := i.(string) ip, _, _ := net.ParseCIDR(v) - for _, c := range rfc1918Networks { + for _, c := range Rfc1918Networks { if _, ipnet, _ := net.ParseCIDR(c); ipnet.Contains(ip) { return } @@ -140,7 +130,7 @@ func validateRFC1918Network(min, max int) schema.SchemaValidateFunc { } } -func validateRFC3339Time(v interface{}, k string) (warnings []string, errors []error) { +func ValidateRFC3339Time(v interface{}, k string) (warnings []string, errors []error) { time := v.(string) if len(time) != 5 || time[2] != ':' { errors = append(errors, fmt.Errorf("%q (%q) must be in the format HH:mm (RFC3339)", k, time)) @@ -157,7 +147,7 @@ func validateRFC3339Time(v interface{}, k string) (warnings []string, errors []e return } -func validateRFC1035Name(min, max int) schema.SchemaValidateFunc { +func ValidateRFC1035Name(min, max int) schema.SchemaValidateFunc { if min < 2 || max < min { return func(i interface{}, k string) (s []string, errors []error) { if min < 2 { @@ -170,10 +160,10 @@ func validateRFC1035Name(min, max int) schema.SchemaValidateFunc { } } - return validateRegexp(fmt.Sprintf("^"+RFC1035NameTemplate+"$", min-2, max-2)) + return ValidateRegexp(fmt.Sprintf("^"+RFC1035NameTemplate+"$", min-2, max-2)) } -func validateIpCidrRange(v interface{}, k string) (warnings []string, errors []error) { +func ValidateIpCidrRange(v interface{}, k string) (warnings []string, errors []error) { _, _, err := net.ParseCIDR(v.(string)) if err != nil { errors = append(errors, fmt.Errorf("%q is not a valid IP CIDR range: %s", k, err)) @@ -181,7 +171,7 @@ func validateIpCidrRange(v interface{}, k string) (warnings []string, errors []e return } -func validateIAMCustomRoleID(v interface{}, k string) (warnings []string, errors []error) { +func ValidateIAMCustomRoleID(v interface{}, k string) (warnings []string, errors []error) { value := v.(string) if !regexp.MustCompile(IAMCustomRoleIDRegex).MatchString(value) { errors = append(errors, fmt.Errorf( @@ -190,7 +180,7 @@ func validateIAMCustomRoleID(v interface{}, k string) (warnings []string, errors return } -func orEmpty(f schema.SchemaValidateFunc) schema.SchemaValidateFunc { +func OrEmpty(f schema.SchemaValidateFunc) schema.SchemaValidateFunc { return func(i interface{}, k string) ([]string, []error) { v, ok := i.(string) if ok && v == "" { @@ -200,7 +190,7 @@ func orEmpty(f schema.SchemaValidateFunc) schema.SchemaValidateFunc { } } -func validateProjectID() schema.SchemaValidateFunc { +func ValidateProjectID() schema.SchemaValidateFunc { return func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) @@ -212,7 +202,7 @@ func validateProjectID() schema.SchemaValidateFunc { } } -func validateDSProjectID() schema.SchemaValidateFunc { +func ValidateDSProjectID() schema.SchemaValidateFunc { return func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) ids := strings.Split(value, "/") @@ -226,7 +216,7 @@ func validateDSProjectID() schema.SchemaValidateFunc { } } -func validateProjectName() schema.SchemaValidateFunc { +func ValidateProjectName() schema.SchemaValidateFunc { return func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) @@ -238,7 +228,7 @@ func validateProjectName() schema.SchemaValidateFunc { } } -func validateDuration() schema.SchemaValidateFunc { +func ValidateDuration() schema.SchemaValidateFunc { return func(i interface{}, k string) (s []string, es []error) { v, ok := i.(string) if !ok { @@ -255,7 +245,7 @@ func validateDuration() schema.SchemaValidateFunc { } } -func validateNonNegativeDuration() schema.SchemaValidateFunc { +func ValidateNonNegativeDuration() schema.SchemaValidateFunc { return func(i interface{}, k string) (s []string, es []error) { v, ok := i.(string) if !ok { @@ -278,7 +268,7 @@ func validateNonNegativeDuration() schema.SchemaValidateFunc { } } -func validateIpAddress(i interface{}, val string) ([]string, []error) { +func ValidateIpAddress(i interface{}, val string) ([]string, []error) { ip := net.ParseIP(i.(string)) if ip == nil { return nil, []error{fmt.Errorf("could not parse %q to IP address", val)} @@ -286,7 +276,7 @@ func validateIpAddress(i interface{}, val string) ([]string, []error) { return nil, nil } -func validateBase64String(i interface{}, val string) ([]string, []error) { +func ValidateBase64String(i interface{}, val string) ([]string, []error) { _, err := base64.StdEncoding.DecodeString(i.(string)) if err != nil { return nil, []error{fmt.Errorf("could not decode %q as a valid base64 value. Please use the terraform base64 functions such as base64encode() or filebase64() to supply a valid base64 string", val)} @@ -317,7 +307,7 @@ func StringNotInSlice(invalid []string, ignoreCase bool) schema.SchemaValidateFu } // Ensure that hourly timestamp strings "HH:MM" have the minutes zeroed out for hourly only inputs -func validateHourlyOnly(val interface{}, key string) (warns []string, errs []error) { +func ValidateHourlyOnly(val interface{}, key string) (warns []string, errs []error) { v := val.(string) parts := strings.Split(v, ":") if len(parts) != 2 { @@ -336,7 +326,7 @@ func validateHourlyOnly(val interface{}, key string) (warns []string, errs []err return } -func validateRFC3339Date(v interface{}, k string) (warnings []string, errors []error) { +func ValidateRFC3339Date(v interface{}, k string) (warnings []string, errors []error) { _, err := time.Parse(time.RFC3339, v.(string)) if err != nil { errors = append(errors, err) @@ -344,7 +334,7 @@ func validateRFC3339Date(v interface{}, k string) (warnings []string, errors []e return } -func validateADDomainName() schema.SchemaValidateFunc { +func ValidateADDomainName() schema.SchemaValidateFunc { return func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) @@ -355,3 +345,43 @@ func validateADDomainName() schema.SchemaValidateFunc { return } } + +func TestStringValidationCases(cases []StringValidationTestCase, validationFunc schema.SchemaValidateFunc) []error { + es := make([]error, 0) + for _, c := range cases { + es = append(es, TestStringValidation(c, validationFunc)...) + } + + return es +} + +func TestStringValidation(testCase StringValidationTestCase, validationFunc schema.SchemaValidateFunc) []error { + _, es := validationFunc(testCase.Value, testCase.TestName) + if testCase.ExpectError { + if len(es) > 0 { + return nil + } else { + return []error{fmt.Errorf("Didn't see expected error in case \"%s\" with string \"%s\"", testCase.TestName, testCase.Value)} + } + } + + return es +} + +type StringValidationTestCase struct { + TestName string + Value string + ExpectError bool +} + +func ValidateRegexp(re string) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(re).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, re)) + } + + return + } +} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vertex_ai_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vertex_ai_operation.go deleted file mode 100644 index 8e81bfdd2a..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vertex_ai_operation.go +++ /dev/null @@ -1,64 +0,0 @@ -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type VertexAIOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *VertexAIOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - region := GetRegionFromRegionalSelfLink(w.CommonOperationWaiter.Op.Name) - - // Returns the proper get. - url := fmt.Sprintf("https://%s-aiplatform.googleapis.com/v1/%s", region, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createVertexAIWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*VertexAIOperationWaiter, error) { - w := &VertexAIOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func VertexAIOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createVertexAIWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func VertexAIOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createVertexAIWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vpc_access_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vpc_access_operation.go deleted file mode 100644 index 6c9f8f4ef2..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/vpc_access_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type VPCAccessOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *VPCAccessOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.VPCAccessBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createVPCAccessWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*VPCAccessOperationWaiter, error) { - w := &VPCAccessOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func VPCAccessOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createVPCAccessWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func VPCAccessOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createVPCAccessWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/workflows_operation.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/workflows_operation.go deleted file mode 100644 index 999519ac71..0000000000 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/google/workflows_operation.go +++ /dev/null @@ -1,75 +0,0 @@ -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package google - -import ( - "encoding/json" - "fmt" - "time" -) - -type WorkflowsOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *WorkflowsOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") - } - // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.WorkflowsBasePath, w.CommonOperationWaiter.Op.Name) - - return SendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createWorkflowsWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*WorkflowsOperationWaiter, error) { - w := &WorkflowsOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -// nolint: deadcode,unused -func WorkflowsOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - w, err := createWorkflowsWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func WorkflowsOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - // This was a synchronous call - there is no operation to wait for. - return nil - } - w, err := createWorkflowsWaiter(config, op, project, activity, userAgent) - if err != nil { - // If w is nil, the op was synchronous. - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/main.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/main.go index 6d8bc701c3..eae0f9a400 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/main.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/main.go @@ -1,3 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 package main import ( @@ -5,10 +7,23 @@ import ( "flag" "log" + "github.com/hashicorp/terraform-plugin-framework/providerserver" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" - "github.com/hashicorp/terraform-provider-google/google" + + "github.com/hashicorp/terraform-provider-google/google/fwprovider" + "github.com/hashicorp/terraform-provider-google/google/provider" + ver "github.com/hashicorp/terraform-provider-google/version" +) + +var ( + // these will be set by the goreleaser configuration + // to appropriate values for the compiled binary + version string = ver.ProviderVersion + + // goreleaser can also pass the specific commit if you want + // commit string = "" ) func main() { @@ -19,7 +34,8 @@ func main() { // concat with sdkv2 provider providers := []func() tfprotov5.ProviderServer{ - google.Provider().GRPCProvider, // sdk provider + providerserver.NewProtocol5(fwprovider.New(version)), // framework provider + provider.Provider().GRPCProvider, // sdk provider } // use the muxer diff --git a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/version/version.go b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/version/version.go index b2d946a5d2..e47ba87d55 100644 --- a/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/version/version.go +++ b/terraform/providers/google/vendor/github.com/hashicorp/terraform-provider-google/version/version.go @@ -1,3 +1,5 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 package version var ( diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/AUTHORS b/terraform/providers/google/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddba0d..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/CONTRIBUTORS b/terraform/providers/google/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976f..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/block.go b/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f19521..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/cipher.go b/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 213bf204af..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -// -// Blowfish is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" - -// The code is a port of Bruce Schneier's C implementation. -// See https://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatibility, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/const.go b/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index d04077595a..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// https://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/cast5/cast5.go b/terraform/providers/google/vendor/golang.org/x/crypto/cast5/cast5.go index ddcbeb6f2a..425e8eecb0 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/cast5/cast5.go @@ -13,7 +13,10 @@ // golang.org/x/crypto/chacha20poly1305). package cast5 // import "golang.org/x/crypto/cast5" -import "errors" +import ( + "errors" + "math/bits" +) const BlockSize = 8 const KeySize = 16 @@ -241,19 +244,19 @@ func (c *Cipher) keySchedule(in []byte) { // These are the three 'f' functions. See RFC 2144, section 2.2. func f1(d, m uint32, r uint8) uint32 { t := m + d - I := (t << r) | (t >> (32 - r)) + I := bits.RotateLeft32(t, int(r)) return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] } func f2(d, m uint32, r uint8) uint32 { t := m ^ d - I := (t << r) | (t >> (32 - r)) + I := bits.RotateLeft32(t, int(r)) return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] } func f3(d, m uint32, r uint8) uint32 { t := m - d - I := (t << r) | (t >> (32 - r)) + I := bits.RotateLeft32(t, int(r)) return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20/chacha_generic.go index a2ecf5c325..93eb5ae6de 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -12,7 +12,7 @@ import ( "errors" "math/bits" - "golang.org/x/crypto/internal/subtle" + "golang.org/x/crypto/internal/alias" ) const ( @@ -189,7 +189,7 @@ func (s *Cipher) XORKeyStream(dst, src []byte) { panic("chacha20: output smaller than input") } dst = dst[:len(src)] - if subtle.InexactOverlap(dst, src) { + if alias.InexactOverlap(dst, src) { panic("chacha20: invalid buffer overlap") } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go new file mode 100644 index 0000000000..93da7322bc --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -0,0 +1,98 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its +// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and +// draft-irtf-cfrg-xchacha-01. +package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" + +import ( + "crypto/cipher" + "errors" +) + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // AEAD, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 + // variant of this AEAD, in bytes. + NonceSizeX = 24 + + // Overhead is the size of the Poly1305 authentication tag, and the + // difference between a ciphertext length and its plaintext. + Overhead = 16 +) + +type chacha20poly1305 struct { + key [KeySize]byte +} + +// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(chacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (c *chacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *chacha20poly1305) Overhead() int { + return Overhead +} + +func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + return c.seal(dst, nonce, plaintext, additionalData) +} + +var errOpen = errors.New("chacha20poly1305: message authentication failed") + +func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + return c.open(dst, nonce, ciphertext, additionalData) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go new file mode 100644 index 0000000000..0c408c5709 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/internal/alias" + "golang.org/x/sys/cpu" +) + +//go:noescape +func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool + +//go:noescape +func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) + +var ( + useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 +) + +// setupState writes a ChaCha20 input matrix to state. See +// https://tools.ietf.org/html/rfc7539#section-2.3. +func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { + state[0] = 0x61707865 + state[1] = 0x3320646e + state[2] = 0x79622d32 + state[3] = 0x6b206574 + + state[4] = binary.LittleEndian.Uint32(key[0:4]) + state[5] = binary.LittleEndian.Uint32(key[4:8]) + state[6] = binary.LittleEndian.Uint32(key[8:12]) + state[7] = binary.LittleEndian.Uint32(key[12:16]) + state[8] = binary.LittleEndian.Uint32(key[16:20]) + state[9] = binary.LittleEndian.Uint32(key[20:24]) + state[10] = binary.LittleEndian.Uint32(key[24:28]) + state[11] = binary.LittleEndian.Uint32(key[28:32]) + + state[12] = 0 + state[13] = binary.LittleEndian.Uint32(nonce[0:4]) + state[14] = binary.LittleEndian.Uint32(nonce[4:8]) + state[15] = binary.LittleEndian.Uint32(nonce[8:12]) +} + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + if !cpu.X86.HasSSSE3 { + return c.sealGeneric(dst, nonce, plaintext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ret, out := sliceForAppend(dst, len(plaintext)+16) + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) + return ret +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if !cpu.X86.HasSSSE3 { + return c.openGeneric(dst, nonce, ciphertext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ciphertext = ciphertext[:len(ciphertext)-16] + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + return ret, nil +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s new file mode 100644 index 0000000000..867c181a14 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -0,0 +1,2696 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" +// General register allocation +#define oup DI +#define inp SI +#define inl BX +#define adp CX // free to reuse, after we hash the additional data +#define keyp R8 // free to reuse, when we copy the key to stack +#define itr2 R9 // general iterator +#define itr1 CX // general iterator +#define acc0 R10 +#define acc1 R11 +#define acc2 R12 +#define t0 R13 +#define t1 R14 +#define t2 R15 +#define t3 R8 +// Register and stack allocation for the SSE code +#define rStore (0*16)(BP) +#define sStore (1*16)(BP) +#define state1Store (2*16)(BP) +#define state2Store (3*16)(BP) +#define tmpStore (4*16)(BP) +#define ctr0Store (5*16)(BP) +#define ctr1Store (6*16)(BP) +#define ctr2Store (7*16)(BP) +#define ctr3Store (8*16)(BP) +#define A0 X0 +#define A1 X1 +#define A2 X2 +#define B0 X3 +#define B1 X4 +#define B2 X5 +#define C0 X6 +#define C1 X7 +#define C2 X8 +#define D0 X9 +#define D1 X10 +#define D2 X11 +#define T0 X12 +#define T1 X13 +#define T2 X14 +#define T3 X15 +#define A3 T0 +#define B3 T1 +#define C3 T2 +#define D3 T3 +// Register and stack allocation for the AVX2 code +#define rsStoreAVX2 (0*32)(BP) +#define state1StoreAVX2 (1*32)(BP) +#define state2StoreAVX2 (2*32)(BP) +#define ctr0StoreAVX2 (3*32)(BP) +#define ctr1StoreAVX2 (4*32)(BP) +#define ctr2StoreAVX2 (5*32)(BP) +#define ctr3StoreAVX2 (6*32)(BP) +#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack +#define AA0 Y0 +#define AA1 Y5 +#define AA2 Y6 +#define AA3 Y7 +#define BB0 Y14 +#define BB1 Y9 +#define BB2 Y10 +#define BB3 Y11 +#define CC0 Y12 +#define CC1 Y13 +#define CC2 Y8 +#define CC3 Y15 +#define DD0 Y4 +#define DD1 Y1 +#define DD2 Y2 +#define DD3 Y3 +#define TT0 DD3 +#define TT1 AA3 +#define TT2 BB3 +#define TT3 CC3 +// ChaCha20 constants +DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 +// <<< 16 with PSHUFB +DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A +DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A +// <<< 8 with PSHUFB +DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B +DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B + +DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 +DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 + +DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 +DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 +// Poly1305 key clamp +DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA ·sseIncMask<>+0x00(SB)/8, $0x1 +DATA ·sseIncMask<>+0x08(SB)/8, $0x0 +// To load/store the last < 16 bytes in a buffer +DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff + +GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 +GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 +GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 +// No PALIGNR in Go ASM yet (but VPALIGNR is present). +#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 +#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 +#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 +#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 +#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 +#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 +#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 +#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 +#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 +#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 +#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 +#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 +#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 +#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 +#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 +#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 +#define shiftC0Right shiftC0Left +#define shiftC1Right shiftC1Left +#define shiftC2Right shiftC2Left +#define shiftC3Right shiftC3Left +#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 +#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 +#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 +#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 +// Some macros +#define chachaQR(A, B, C, D, T) \ + PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ + PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B + +#define chachaQR_AVX2(A, B, C, D, T) \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B + +#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 +#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX +#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 +#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 + +#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 +#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 + +#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage +#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage +// ---------------------------------------------------------------------------- +TEXT polyHashADInternal<>(SB), NOSPLIT, $0 + // adp points to beginning of additional data + // itr2 holds ad length + XORQ acc0, acc0 + XORQ acc1, acc1 + XORQ acc2, acc2 + CMPQ itr2, $13 + JNE hashADLoop + +openFastTLSAD: + // Special treatment for the TLS case of 13 bytes + MOVQ (adp), acc0 + MOVQ 5(adp), acc1 + SHRQ $24, acc1 + MOVQ $1, acc2 + polyMul + RET + +hashADLoop: + // Hash in 16 byte chunks + CMPQ itr2, $16 + JB hashADTail + polyAdd(0(adp)) + LEAQ (1*16)(adp), adp + SUBQ $16, itr2 + polyMul + JMP hashADLoop + +hashADTail: + CMPQ itr2, $0 + JE hashADDone + + // Hash last < 16 byte tail + XORQ t0, t0 + XORQ t1, t1 + XORQ t2, t2 + ADDQ itr2, adp + +hashADTailLoop: + SHLQ $8, t0, t1 + SHLQ $8, t0 + MOVB -1(adp), t2 + XORQ t2, t0 + DECQ adp + DECQ itr2 + JNE hashADTailLoop + +hashADTailFinish: + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Finished AD +hashADDone: + RET + +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Open(dst, key, src, ad []byte) bool +TEXT ·chacha20Poly1305Open(SB), 0, $288-97 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + // Check for AVX2 support + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Open_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE openSSE128 // About 16% faster + + // For long buffers, prepare the poly key first + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + MOVO D0, T1 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + MOVO D0, ctr3Store + MOVQ $10, itr2 + +openSSEPreparePolyKey: + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + DECQ itr2 + JNE openSSEPreparePolyKey + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore; MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSEMainLoop: + CMPQ inl, $256 + JB openSSEMainLoopDone + + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $4, itr1 + MOVQ inp, itr2 + +openSSEInternalLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(itr2)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(itr2), itr2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr1 + JGE openSSEInternalLoop + + polyAdd(0(itr2)) + polyMul + LEAQ (2*8)(itr2), itr2 + + CMPQ itr1, $-6 + JG openSSEInternalLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Load - xor - store + MOVO D3, tmpStore + MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) + MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) + MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) + MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) + MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) + MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) + MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) + MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) + MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) + MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) + MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) + MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) + MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) + MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) + LEAQ 256(inp), inp + LEAQ 256(oup), oup + SUBQ $256, inl + JMP openSSEMainLoop + +openSSEMainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $64 + JBE openSSETail64 + CMPQ inl, $128 + JBE openSSETail128 + CMPQ inl, $192 + JBE openSSETail192 + JMP openSSETail256 + +openSSEFinalize: + // Hash in the PT, AAD lengths + ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally, constant time compare to the tag at the end of the message + XORQ AX, AX + MOVQ $1, DX + XORQ (0*8)(inp), acc0 + XORQ (1*8)(inp), acc1 + ORQ acc1, acc0 + CMOVQEQ DX, AX + + // Return true iff tags are equal + MOVB AX, ret+96(FP) + RET + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 129 bytes +openSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +openSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE openSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore; MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSE128Open: + CMPQ inl, $16 + JB openSSETail16 + SUBQ $16, inl + + // Load for hashing + polyAdd(0(inp)) + + // Load for decryption + MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP openSSE128Open + +openSSETail16: + TESTQ inl, inl + JE openSSEFinalize + + // We can safely load the CT from the end, because it is padded with the MAC + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVOU (inp), T0 + ADDQ inl, inp + PAND -16(t0)(itr2*1), T0 + MOVO T0, 0+tmpStore + MOVQ T0, t0 + MOVQ 8+tmpStore, t1 + PXOR A1, T0 + + // We can only store one byte at a time, since plaintext can be shorter than 16 bytes +openSSETail16Store: + MOVQ T0, t3 + MOVB t3, (oup) + PSRLDQ $1, T0 + INCQ oup + DECQ inl + JNE openSSETail16Store + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + JMP openSSEFinalize + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of ciphertext +openSSETail64: + // Need to decrypt up to 64 bytes - prepare single block + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + XORQ itr2, itr2 + MOVQ inl, itr1 + CMPQ itr1, $16 + JB openSSETail64LoopB + +openSSETail64LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + SUBQ $16, itr1 + +openSSETail64LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + + CMPQ itr1, $16 + JAE openSSETail64LoopA + + CMPQ itr2, $160 + JNE openSSETail64LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + +openSSETail64DecLoop: + CMPQ inl, $16 + JB openSSETail64DecLoopDone + SUBQ $16, inl + MOVOU (inp), T0 + PXOR T0, A0 + MOVOU A0, (oup) + LEAQ 16(inp), inp + LEAQ 16(oup), oup + MOVO B0, A0 + MOVO C0, B0 + MOVO D0, C0 + JMP openSSETail64DecLoop + +openSSETail64DecLoopDone: + MOVO A0, A1 + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openSSETail128: + // Need to decrypt up to 128 bytes - prepare two blocks + MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSETail128LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + CMPQ itr2, itr1 + JB openSSETail128LoopA + + CMPQ itr2, $160 + JNE openSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr1Store, D0; PADDL ctr0Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + + SUBQ $64, inl + LEAQ 64(inp), inp + LEAQ 64(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of ciphertext +openSSETail192: + // Need to decrypt up to 192 bytes - prepare three blocks + MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store + MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store + + MOVQ inl, itr1 + MOVQ $160, itr2 + CMPQ itr1, $160 + CMOVQGT itr2, itr1 + ANDQ $-16, itr1 + XORQ itr2, itr2 + +openSSLTail192LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSLTail192LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + CMPQ itr2, itr1 + JB openSSLTail192LoopA + + CMPQ itr2, $160 + JNE openSSLTail192LoopB + + CMPQ inl, $176 + JB openSSLTail192Store + + polyAdd(160(inp)) + polyMul + + CMPQ inl, $192 + JB openSSLTail192Store + + polyAdd(176(inp)) + polyMul + +openSSLTail192Store: + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 + MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) + + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + SUBQ $128, inl + LEAQ 128(inp), inp + LEAQ 128(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openSSETail256: + // Need to decrypt up to 256 bytes - prepare four blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + XORQ itr2, itr2 + +openSSETail256Loop: + // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication + polyAdd(0(inp)(itr2*1)) + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulStage3 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + ADDQ $2*8, itr2 + CMPQ itr2, $160 + JB openSSETail256Loop + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail256HashLoop: + polyAdd(0(inp)(itr2*1)) + polyMul + ADDQ $2*8, itr2 + CMPQ itr2, itr1 + JB openSSETail256HashLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + LEAQ 192(inp), inp + LEAQ 192(oup), oup + SUBQ $192, inl + MOVO A3, A0 + MOVO B3, B0 + MOVO C3, C0 + MOVO tmpStore, D0 + + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Open_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimization, for very short buffers + CMPQ inl, $192 + JBE openAVX2192 + CMPQ inl, $320 + JBE openAVX2320 + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, state2StoreAVX2 + VMOVDQA DD0, ctr3StoreAVX2 + MOVQ $10, itr2 + +openAVX2PreparePolyKey: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + DECQ itr2 + JNE openAVX2PreparePolyKey + + VPADDD ·chacha20Constants<>(SB), AA0, AA0 + VPADDD state1StoreAVX2, BB0, BB0 + VPADDD state2StoreAVX2, CC0, CC0 + VPADDD ctr3StoreAVX2, DD0, DD0 + + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for the first 64 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + + // Hash AD + first 64 bytes + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +openAVX2InitialHash64: + polyAdd(0(inp)(itr1*1)) + polyMulAVX2 + ADDQ $16, itr1 + CMPQ itr1, $64 + JNE openAVX2InitialHash64 + + // Decrypt the first 64 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), BB0, BB0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU BB0, (1*32)(oup) + LEAQ (2*32)(inp), inp + LEAQ (2*32)(oup), oup + SUBQ $64, inl + +openAVX2MainLoop: + CMPQ inl, $512 + JB openAVX2MainLoopDone + + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + +openAVX2InternalLoop: + // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications + // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext + polyAdd(0*8(inp)(itr1*1)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(inp)(itr1*1)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(inp)(itr1*1)) + LEAQ (6*8)(itr1), itr1 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + CMPQ itr1, $480 + JNE openAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(480(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(496(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + LEAQ (32*16)(oup), oup + SUBQ $(32*16), inl + JMP openAVX2MainLoop + +openAVX2MainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $128 + JBE openAVX2Tail128 + CMPQ inl, $256 + JBE openAVX2Tail256 + CMPQ inl, $384 + JBE openAVX2Tail384 + JMP openAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +openAVX2192: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +openAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE openAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +openAVX2ShortOpen: + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openAVX2ShortOpenLoop: + CMPQ inl, $32 + JB openAVX2ShortTail32 + SUBQ $32, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + polyAdd(2*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP openAVX2ShortOpenLoop + +openAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2ShortDone + + SUBQ $16, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2ShortDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +openAVX2320: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +openAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE openAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP openAVX2ShortOpen + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD1 + VMOVDQA DD1, DD0 + + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + TESTQ itr1, itr1 + JE openAVX2Tail128LoopB + +openAVX2Tail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMulAVX2 + +openAVX2Tail128LoopB: + ADDQ $16, itr2 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail128LoopA + CMPQ itr2, $160 + JNE openAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC1, CC1 + VPADDD DD0, DD1, DD1 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + +openAVX2TailLoop: + CMPQ inl, $32 + JB openAVX2Tail + SUBQ $32, inl + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + JMP openAVX2TailLoop + +openAVX2Tail: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2TailDone + SUBQ $16, inl + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2TailDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare four blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + + // Compute the number of iterations that will hash data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $128, itr1 + SHRQ $4, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + +openAVX2Tail256LoopA: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail256LoopA + + CMPQ itr2, $10 + JNE openAVX2Tail256LoopB + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + + // Hash the remainder of data (if any) +openAVX2Tail256Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail256HashEnd + polyAdd (0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail256Hash + +// Store 128 bytes safely, then go to store loop +openAVX2Tail256HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + + VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 + VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) + LEAQ (4*32)(inp), inp + LEAQ (4*32)(oup), oup + SUBQ $4*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +openAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare six blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, ctr0StoreAVX2 + VMOVDQA DD1, ctr1StoreAVX2 + VMOVDQA DD2, ctr2StoreAVX2 + + // Compute the number of iterations that will hash two blocks of data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $256, itr1 + SHRQ $4, itr1 + ADDQ $6, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail384LoopB: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + +openAVX2Tail384LoopA: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + + CMPQ itr2, itr1 + JB openAVX2Tail384LoopB + + CMPQ itr2, $10 + JNE openAVX2Tail384LoopA + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + +openAVX2Tail384Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail384HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail384Hash + +// Store 256 bytes safely, then go to store loop +openAVX2Tail384HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + LEAQ (8*32)(inp), inp + LEAQ (8*32)(oup), oup + SUBQ $8*32, inl + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +openAVX2Tail512: + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + MOVQ inp, itr2 + +openAVX2Tail512LoopB: + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ (2*8)(itr2), itr2 + +openAVX2Tail512LoopA: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(itr2)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(itr2)) + polyMulAVX2 + LEAQ (4*8)(itr2), itr2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + INCQ itr1 + CMPQ itr1, $4 + JLT openAVX2Tail512LoopB + + CMPQ itr1, $10 + JNE openAVX2Tail512LoopA + + MOVQ inl, itr1 + SUBQ $384, itr1 + ANDQ $-16, itr1 + +openAVX2Tail512HashLoop: + TESTQ itr1, itr1 + JE openAVX2Tail512HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + SUBQ $16, itr1 + JMP openAVX2Tail512HashLoop + +openAVX2Tail512HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + LEAQ (12*32)(inp), inp + LEAQ (12*32)(oup), oup + SUBQ $12*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Seal(dst, key, src, ad []byte) +TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Seal_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE sealSSE128 // About 15% faster + + // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + + // Load state, increment counter blocks + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVQ $10, itr2 + +sealSSEIntroLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JNE sealSSEIntroLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore + MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) + + MOVQ $128, itr1 + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 + + CMPQ inl, $64 + JBE sealSSE128SealHash + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) + + ADDQ $64, itr1 + SUBQ $64, inl + LEAQ 64(inp), inp + + MOVQ $2, itr1 + MOVQ $8, itr2 + + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + CMPQ inl, $192 + JBE sealSSETail192 + +sealSSEMainLoop: + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + +sealSSEInnerLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(oup)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(oup), oup + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JGE sealSSEInnerLoop + polyAdd(0(oup)) + polyMul + LEAQ (2*8)(oup), oup + DECQ itr1 + JG sealSSEInnerLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVO tmpStore, D3 + + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + ADDQ $192, inp + MOVQ $192, itr1 + SUBQ $192, inl + MOVO A3, A1 + MOVO B3, B1 + MOVO C3, C1 + MOVO D3, D1 + CMPQ inl, $64 + JBE sealSSE128SealHash + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) + LEAQ 64(inp), inp + SUBQ $64, inl + MOVQ $6, itr1 + MOVQ $4, itr2 + CMPQ inl, $192 + JG sealSSEMainLoop + + MOVQ inl, itr1 + TESTQ inl, inl + JE sealSSE128SealHash + MOVQ $6, itr1 + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + JMP sealSSETail192 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of plaintext +sealSSETail64: + // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A1 + MOVO state1Store, B1 + MOVO state2Store, C1 + MOVO ctr3Store, D1 + PADDL ·sseIncMask<>(SB), D1 + MOVO D1, ctr0Store + +sealSSETail64LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail64LoopB: + chachaQR(A1, B1, C1, D1, T1) + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A1, B1, C1, D1, T1) + shiftB1Right; shiftC1Right; shiftD1Right + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + DECQ itr1 + JG sealSSETail64LoopA + + DECQ itr2 + JGE sealSSETail64LoopB + PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B1 + PADDL state2Store, C1 + PADDL ctr0Store, D1 + + JMP sealSSE128Seal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of plaintext +sealSSETail128: + // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + +sealSSETail128LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail128LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + DECQ itr1 + JG sealSSETail128LoopA + + DECQ itr2 + JGE sealSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr0Store, D0; PADDL ctr1Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + + MOVQ $64, itr1 + LEAQ 64(inp), inp + SUBQ $64, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of plaintext +sealSSETail192: + // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + +sealSSETail192LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail192LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + DECQ itr1 + JG sealSSETail192LoopA + + DECQ itr2 + JGE sealSSETail192LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + MOVO A2, A1 + MOVO B2, B1 + MOVO C2, C1 + MOVO D2, D1 + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special seal optimization for buffers smaller than 129 bytes +sealSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +sealSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE sealSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore + MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealSSE128SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealSSE128Seal + polyAdd(0(oup)) + polyMul + + SUBQ $16, itr1 + ADDQ $16, oup + + JMP sealSSE128SealHash + +sealSSE128Seal: + CMPQ inl, $16 + JB sealSSETail + SUBQ $16, inl + + // Load for decryption + MOVOU (inp), T0 + PXOR T0, A1 + MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + + // Extract for hashing + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP sealSSE128Seal + +sealSSETail: + TESTQ inl, inl + JE sealSSEFinalize + + // We can only load the PT one byte at a time to avoid read after end of buffer + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVQ inl, itr1 + LEAQ -1(inp)(inl*1), inp + XORQ t2, t2 + XORQ t3, t3 + XORQ AX, AX + +sealSSETailLoadLoop: + SHLQ $8, t2, t3 + SHLQ $8, t2 + MOVB (inp), AX + XORQ AX, t2 + LEAQ -1(inp), inp + DECQ itr1 + JNE sealSSETailLoadLoop + MOVQ t2, 0+tmpStore + MOVQ t3, 8+tmpStore + PXOR 0+tmpStore, A1 + MOVOU A1, (oup) + MOVOU -16(t0)(itr2*1), T0 + PAND T0, A1 + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + ADDQ inl, oup + +sealSSEFinalize: + // Hash in the buffer lengths + ADDQ ad_len+80(FP), acc0 + ADCQ src_len+56(FP), acc1 + ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally store the tag at the end of the message + MOVQ acc0, (0*8)(oup) + MOVQ acc1, (1*8)(oup) + RET + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Seal_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimizations, for very short buffers + CMPQ inl, $192 + JBE seal192AVX2 // 33% faster + CMPQ inl, $320 + JBE seal320AVX2 // 17% faster + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr2 + +sealAVX2IntroLoop: + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr2 + JNE sealAVX2IntroLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + + VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 + VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key + VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), DD0, DD0 + VMOVDQA DD0, rsStoreAVX2 + + // Hash AD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + // Can store at least 320 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), CC0, CC0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU CC0, (1*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 + VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 + VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) + + MOVQ $320, itr1 + SUBQ $320, inl + LEAQ 320(inp), inp + + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 + CMPQ inl, $128 + JBE sealAVX2SealHash + + VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 + VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVQ $8, itr1 + MOVQ $2, itr2 + + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + CMPQ inl, $512 + JBE sealAVX2Tail512 + + // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + + SUBQ $16, oup // Adjust the pointer + MOVQ $9, itr1 + JMP sealAVX2InternalLoopStart + +sealAVX2MainLoop: + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr1 + +sealAVX2InternalLoop: + polyAdd(0*8(oup)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + +sealAVX2InternalLoopStart: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(oup)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(oup)) + LEAQ (6*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr1 + JNE sealAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(-2*8(oup)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + SUBQ $(32*16), inl + CMPQ inl, $512 + JG sealAVX2MainLoop + + // Tail can only hash 480 bytes + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ 32(oup), oup + + MOVQ $10, itr1 + MOVQ $0, itr2 + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +seal192AVX2: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +sealAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE sealAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +sealAVX2ShortSeal: + // Hash aad + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealAVX2SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealAVX2ShortSealLoop + polyAdd(0(oup)) + polyMul + SUBQ $16, itr1 + ADDQ $16, oup + JMP sealAVX2SealHash + +sealAVX2ShortSealLoop: + CMPQ inl, $32 + JB sealAVX2ShortTail32 + SUBQ $32, inl + + // Load for encryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + + // Now can hash + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP sealAVX2ShortSealLoop + +sealAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB sealAVX2ShortDone + + SUBQ $16, inl + + // Load for encryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + + // Hash + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +sealAVX2ShortDone: + VZEROUPPER + JMP sealSSETail + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +seal320AVX2: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +sealAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE sealAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP sealAVX2ShortSeal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +sealAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0 + VMOVDQA state1StoreAVX2, BB0 + VMOVDQA state2StoreAVX2, CC0 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VMOVDQA DD0, DD1 + +sealAVX2Tail128LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail128LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $4, DD0, DD0, DD0 + DECQ itr1 + JG sealAVX2Tail128LoopA + DECQ itr2 + JGE sealAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA1 + VPADDD state1StoreAVX2, BB0, BB1 + VPADDD state2StoreAVX2, CC0, CC1 + VPADDD DD1, DD0, DD1 + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + JMP sealAVX2ShortSealLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +sealAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + +sealAVX2Tail256LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr1 + JG sealAVX2Tail256LoopA + DECQ itr2 + JGE sealAVX2Tail256LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +sealAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + +sealAVX2Tail384LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail384LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr1 + JG sealAVX2Tail384LoopA + DECQ itr2 + JGE sealAVX2Tail384LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0 + VPERM2I128 $0x02, CC1, DD1, TT1 + VPERM2I128 $0x13, AA1, BB1, TT2 + VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + MOVQ $256, itr1 + LEAQ 256(inp), inp + SUBQ $256, inl + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +sealAVX2Tail512: + // Need to decrypt up to 512 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + +sealAVX2Tail512LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail512LoopB: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(oup)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + + DECQ itr1 + JG sealAVX2Tail512LoopA + DECQ itr2 + JGE sealAVX2Tail512LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3 + VPXOR (0*32)(inp), CC3, CC3 + VMOVDQU CC3, (0*32)(oup) + VPERM2I128 $0x02, CC0, DD0, CC3 + VPXOR (1*32)(inp), CC3, CC3 + VMOVDQU CC3, (1*32)(oup) + VPERM2I128 $0x13, AA0, BB0, CC3 + VPXOR (2*32)(inp), CC3, CC3 + VMOVDQU CC3, (2*32)(oup) + VPERM2I128 $0x13, CC0, DD0, CC3 + VPXOR (3*32)(inp), CC3, CC3 + VMOVDQU CC3, (3*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + + MOVQ $384, itr1 + LEAQ 384(inp), inp + SUBQ $384, inl + VPERM2I128 $0x02, AA3, BB3, AA0 + VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 + VPERM2I128 $0x13, AA3, BB3, CC0 + VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + JMP sealAVX2SealHash diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go new file mode 100644 index 0000000000..6313898f0a --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" +) + +func writeWithPadding(p *poly1305.MAC, b []byte) { + p.Write(b) + if rem := len(b) % 16; rem != 0 { + var buf [16]byte + padLen := 16 - rem + p.Write(buf[:padLen]) + } +} + +func writeUint64(p *poly1305.MAC, n int) { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], uint64(n)) + p.Write(buf[:]) +} + +func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { + ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) + ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + s.XORKeyStream(ciphertext, plaintext) + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(plaintext)) + p.Sum(tag[:0]) + + return ret +} + +func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + tag := ciphertext[len(ciphertext)-16:] + ciphertext = ciphertext[:len(ciphertext)-16] + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(ciphertext)) + + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !p.Verify(tag) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + s.XORKeyStream(out, ciphertext) + return ret, nil +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go new file mode 100644 index 0000000000..f832b33d45 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package chacha20poly1305 + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + return c.sealGeneric(dst, nonce, plaintext, additionalData) +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + return c.openGeneric(dst, nonce, ciphertext, additionalData) +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go new file mode 100644 index 0000000000..1cebfe946f --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go @@ -0,0 +1,86 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "crypto/cipher" + "errors" + + "golang.org/x/crypto/chacha20" +) + +type xchacha20poly1305 struct { + key [KeySize]byte +} + +// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. +// +// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, +// suitable to be generated randomly without risk of collisions. It should be +// preferred when nonce uniqueness cannot be trivially ensured, or whenever +// nonces are randomly generated. +func NewX(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(xchacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (*xchacha20poly1305) NonceSize() int { + return NonceSizeX +} + +func (*xchacha20poly1305) Overhead() int { + return Overhead +} + +func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no + // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, + // the second half of the counter is not available. This is unlikely to be + // an issue because the cipher.AEAD API requires the entire message to be in + // memory, and the counter overflows at 256 GB. + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.seal(dst, cNonce[:], plaintext, additionalData) +} + +func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.open(dst, cNonce[:], ciphertext, additionalData) +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 0000000000..6fc2838a3f --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,824 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 0000000000..cda8e3edfd --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/builder.go b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 0000000000..c05ac7d16d --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,345 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/string.go b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 0000000000..0531a3d6f1 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,172 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/curve25519.go b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index cda3fdd354..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. -package curve25519 // import "golang.org/x/crypto/curve25519" - -import ( - "crypto/subtle" - "fmt" - - "golang.org/x/crypto/curve25519/internal/field" -) - -// ScalarMult sets dst to the product scalar * point. -// -// Deprecated: when provided a low-order point, ScalarMult will set dst to all -// zeroes, irrespective of the scalar. Instead, use the X25519 function, which -// will return an error. -func ScalarMult(dst, scalar, point *[32]byte) { - var e [32]byte - - copy(e[:], scalar[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element - x1.SetBytes(point[:]) - x2.One() - x3.Set(&x1) - z3.One() - - swap := 0 - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int(b) - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - swap = int(b) - - tmp0.Subtract(&x3, &z3) - tmp1.Subtract(&x2, &z2) - x2.Add(&x2, &z2) - z2.Add(&x3, &z3) - z3.Multiply(&tmp0, &x2) - z2.Multiply(&z2, &tmp1) - tmp0.Square(&tmp1) - tmp1.Square(&x2) - x3.Add(&z3, &z2) - z2.Subtract(&z3, &z2) - x2.Multiply(&tmp1, &tmp0) - tmp1.Subtract(&tmp1, &tmp0) - z2.Square(&z2) - - z3.Mult32(&tmp1, 121666) - x3.Square(&x3) - tmp0.Add(&tmp0, &z3) - z3.Multiply(&x1, &z2) - z2.Multiply(&tmp1, &tmp0) - } - - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - - z2.Invert(&z2) - x2.Multiply(&x2, &z2) - copy(dst[:], x2.Bytes()) -} - -// ScalarBaseMult sets dst to the product scalar * base where base is the -// standard generator. -// -// It is recommended to use the X25519 function with Basepoint instead, as -// copying into fixed size arrays can lead to unexpected bugs. -func ScalarBaseMult(dst, scalar *[32]byte) { - ScalarMult(dst, scalar, &basePoint) -} - -const ( - // ScalarSize is the size of the scalar input to X25519. - ScalarSize = 32 - // PointSize is the size of the point input to X25519. - PointSize = 32 -) - -// Basepoint is the canonical Curve25519 generator. -var Basepoint []byte - -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -func init() { Basepoint = basePoint[:] } - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} - -// X25519 returns the result of the scalar multiplication (scalar * point), -// according to RFC 7748, Section 5. scalar, point and the return value are -// slices of 32 bytes. -// -// scalar can be generated at random, for example with crypto/rand. point should -// be either Basepoint or the output of another X25519 call. -// -// If point is Basepoint (but not if it's a different slice with the same -// contents) a precomputed implementation might be used for performance. -func X25519(scalar, point []byte) ([]byte, error) { - // Outline the body of function, to let the allocation be inlined in the - // caller, and possibly avoid escaping to the heap. - var dst [32]byte - return x25519(&dst, scalar, point) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32) - } - if l := len(point); l != 32 { - return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32) - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - checkBasepoint() - ScalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - ScalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, fmt.Errorf("bad input point: low order point") - } - } - return dst[:], nil -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/README b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/README deleted file mode 100644 index e25bca7dc8..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/README +++ /dev/null @@ -1,7 +0,0 @@ -This package is kept in sync with crypto/ed25519/internal/edwards25519/field in -the standard library. - -If there are any changes in the standard library that need to be synced to this -package, run sync.sh. It will not overwrite any local changes made since the -previous sync, so it's ok to land changes in this package first, and then sync -to the standard library later. diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go deleted file mode 100644 index ca841ad99e..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package field implements fast arithmetic modulo 2^255-19. -package field - -import ( - "crypto/subtle" - "encoding/binary" - "math/bits" -) - -// Element represents an element of the field GF(2^255-19). Note that this -// is not a cryptographically secure group, and should only be used to interact -// with edwards25519.Point coordinates. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is a valid zero element. -type Element struct { - // An element t represents the integer - // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 - // - // Between operations, all limbs are expected to be lower than 2^52. - l0 uint64 - l1 uint64 - l2 uint64 - l3 uint64 - l4 uint64 -} - -const maskLow51Bits uint64 = (1 << 51) - 1 - -var feZero = &Element{0, 0, 0, 0, 0} - -// Zero sets v = 0, and returns v. -func (v *Element) Zero() *Element { - *v = *feZero - return v -} - -var feOne = &Element{1, 0, 0, 0, 0} - -// One sets v = 1, and returns v. -func (v *Element) One() *Element { - *v = *feOne - return v -} - -// reduce reduces v modulo 2^255 - 19 and returns it. -func (v *Element) reduce() *Element { - v.carryPropagate() - - // After the light reduction we now have a field element representation - // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. - - // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, - // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. - c := (v.l0 + 19) >> 51 - c = (v.l1 + c) >> 51 - c = (v.l2 + c) >> 51 - c = (v.l3 + c) >> 51 - c = (v.l4 + c) >> 51 - - // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's - // effectively applying the reduction identity to the carry. - v.l0 += 19 * c - - v.l1 += v.l0 >> 51 - v.l0 = v.l0 & maskLow51Bits - v.l2 += v.l1 >> 51 - v.l1 = v.l1 & maskLow51Bits - v.l3 += v.l2 >> 51 - v.l2 = v.l2 & maskLow51Bits - v.l4 += v.l3 >> 51 - v.l3 = v.l3 & maskLow51Bits - // no additional carry - v.l4 = v.l4 & maskLow51Bits - - return v -} - -// Add sets v = a + b, and returns v. -func (v *Element) Add(a, b *Element) *Element { - v.l0 = a.l0 + b.l0 - v.l1 = a.l1 + b.l1 - v.l2 = a.l2 + b.l2 - v.l3 = a.l3 + b.l3 - v.l4 = a.l4 + b.l4 - // Using the generic implementation here is actually faster than the - // assembly. Probably because the body of this function is so simple that - // the compiler can figure out better optimizations by inlining the carry - // propagation. TODO - return v.carryPropagateGeneric() -} - -// Subtract sets v = a - b, and returns v. -func (v *Element) Subtract(a, b *Element) *Element { - // We first add 2 * p, to guarantee the subtraction won't underflow, and - // then subtract b (which can be up to 2^255 + 2^13 * 19). - v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 - v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 - v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 - v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 - v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 - return v.carryPropagate() -} - -// Negate sets v = -a, and returns v. -func (v *Element) Negate(a *Element) *Element { - return v.Subtract(feZero, a) -} - -// Invert sets v = 1/z mod p, and returns v. -// -// If z == 0, Invert returns v = 0. -func (v *Element) Invert(z *Element) *Element { - // Inversion is implemented as exponentiation with exponent p − 2. It uses the - // same sequence of 255 squarings and 11 multiplications as [Curve25519]. - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element - - z2.Square(z) // 2 - t.Square(&z2) // 4 - t.Square(&t) // 8 - z9.Multiply(&t, z) // 9 - z11.Multiply(&z9, &z2) // 11 - t.Square(&z11) // 22 - z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 - - t.Square(&z2_5_0) // 2^6 - 2^1 - for i := 0; i < 4; i++ { - t.Square(&t) // 2^10 - 2^5 - } - z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 - - t.Square(&z2_10_0) // 2^11 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^20 - 2^10 - } - z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 - - t.Square(&z2_20_0) // 2^21 - 2^1 - for i := 0; i < 19; i++ { - t.Square(&t) // 2^40 - 2^20 - } - t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 - - t.Square(&t) // 2^41 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^50 - 2^10 - } - z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 - - t.Square(&z2_50_0) // 2^51 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^100 - 2^50 - } - z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 - - t.Square(&z2_100_0) // 2^101 - 2^1 - for i := 0; i < 99; i++ { - t.Square(&t) // 2^200 - 2^100 - } - t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 - - t.Square(&t) // 2^201 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^250 - 2^50 - } - t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 - - t.Square(&t) // 2^251 - 2^1 - t.Square(&t) // 2^252 - 2^2 - t.Square(&t) // 2^253 - 2^3 - t.Square(&t) // 2^254 - 2^4 - t.Square(&t) // 2^255 - 2^5 - - return v.Multiply(&t, &z11) // 2^255 - 21 -} - -// Set sets v = a, and returns v. -func (v *Element) Set(a *Element) *Element { - *v = *a - return v -} - -// SetBytes sets v to x, which must be a 32-byte little-endian encoding. -// -// Consistent with RFC 7748, the most significant bit (the high bit of the -// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) -// are accepted. Note that this is laxer than specified by RFC 8032. -func (v *Element) SetBytes(x []byte) *Element { - if len(x) != 32 { - panic("edwards25519: invalid field element input size") - } - - // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). - v.l0 = binary.LittleEndian.Uint64(x[0:8]) - v.l0 &= maskLow51Bits - // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). - v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 - v.l1 &= maskLow51Bits - // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). - v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 - v.l2 &= maskLow51Bits - // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). - v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 - v.l3 &= maskLow51Bits - // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). - // Note: not bytes 25:33, shift 4, to avoid overread. - v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 - v.l4 &= maskLow51Bits - - return v -} - -// Bytes returns the canonical 32-byte little-endian encoding of v. -func (v *Element) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var out [32]byte - return v.bytes(&out) -} - -func (v *Element) bytes(out *[32]byte) []byte { - t := *v - t.reduce() - - var buf [8]byte - for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { - bitsOffset := i * 51 - binary.LittleEndian.PutUint64(buf[:], l<= len(out) { - break - } - out[off] |= bb - } - } - - return out[:] -} - -// Equal returns 1 if v and u are equal, and 0 otherwise. -func (v *Element) Equal(u *Element) int { - sa, sv := u.Bytes(), v.Bytes() - return subtle.ConstantTimeCompare(sa, sv) -} - -// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. -func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } - -// Select sets v to a if cond == 1, and to b if cond == 0. -func (v *Element) Select(a, b *Element, cond int) *Element { - m := mask64Bits(cond) - v.l0 = (m & a.l0) | (^m & b.l0) - v.l1 = (m & a.l1) | (^m & b.l1) - v.l2 = (m & a.l2) | (^m & b.l2) - v.l3 = (m & a.l3) | (^m & b.l3) - v.l4 = (m & a.l4) | (^m & b.l4) - return v -} - -// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. -func (v *Element) Swap(u *Element, cond int) { - m := mask64Bits(cond) - t := m & (v.l0 ^ u.l0) - v.l0 ^= t - u.l0 ^= t - t = m & (v.l1 ^ u.l1) - v.l1 ^= t - u.l1 ^= t - t = m & (v.l2 ^ u.l2) - v.l2 ^= t - u.l2 ^= t - t = m & (v.l3 ^ u.l3) - v.l3 ^= t - u.l3 ^= t - t = m & (v.l4 ^ u.l4) - v.l4 ^= t - u.l4 ^= t -} - -// IsNegative returns 1 if v is negative, and 0 otherwise. -func (v *Element) IsNegative() int { - return int(v.Bytes()[0] & 1) -} - -// Absolute sets v to |u|, and returns v. -func (v *Element) Absolute(u *Element) *Element { - return v.Select(new(Element).Negate(u), u, u.IsNegative()) -} - -// Multiply sets v = x * y, and returns v. -func (v *Element) Multiply(x, y *Element) *Element { - feMul(v, x, y) - return v -} - -// Square sets v = x * x, and returns v. -func (v *Element) Square(x *Element) *Element { - feSquare(v, x) - return v -} - -// Mult32 sets v = x * y, and returns v. -func (v *Element) Mult32(x *Element, y uint32) *Element { - x0lo, x0hi := mul51(x.l0, y) - x1lo, x1hi := mul51(x.l1, y) - x2lo, x2hi := mul51(x.l2, y) - x3lo, x3hi := mul51(x.l3, y) - x4lo, x4hi := mul51(x.l4, y) - v.l0 = x0lo + 19*x4hi // carried over per the reduction identity - v.l1 = x1lo + x0hi - v.l2 = x2lo + x1hi - v.l3 = x3lo + x2hi - v.l4 = x4lo + x3hi - // The hi portions are going to be only 32 bits, plus any previous excess, - // so we can skip the carry propagation. - return v -} - -// mul51 returns lo + hi * 2⁵¹ = a * b. -func mul51(a uint64, b uint32) (lo uint64, hi uint64) { - mh, ml := bits.Mul64(a, uint64(b)) - lo = ml & maskLow51Bits - hi = (mh << 13) | (ml >> 51) - return -} - -// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. -func (v *Element) Pow22523(x *Element) *Element { - var t0, t1, t2 Element - - t0.Square(x) // x^2 - t1.Square(&t0) // x^4 - t1.Square(&t1) // x^8 - t1.Multiply(x, &t1) // x^9 - t0.Multiply(&t0, &t1) // x^11 - t0.Square(&t0) // x^22 - t0.Multiply(&t1, &t0) // x^31 - t1.Square(&t0) // x^62 - for i := 1; i < 5; i++ { // x^992 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 - t1.Square(&t0) // 2^11 - 2 - for i := 1; i < 10; i++ { // 2^20 - 2^10 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^20 - 1 - t2.Square(&t1) // 2^21 - 2 - for i := 1; i < 20; i++ { // 2^40 - 2^20 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^40 - 1 - t1.Square(&t1) // 2^41 - 2 - for i := 1; i < 10; i++ { // 2^50 - 2^10 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^50 - 1 - t1.Square(&t0) // 2^51 - 2 - for i := 1; i < 50; i++ { // 2^100 - 2^50 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^100 - 1 - t2.Square(&t1) // 2^101 - 2 - for i := 1; i < 100; i++ { // 2^200 - 2^100 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^200 - 1 - t1.Square(&t1) // 2^201 - 2 - for i := 1; i < 50; i++ { // 2^250 - 2^50 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^250 - 1 - t0.Square(&t0) // 2^251 - 2 - t0.Square(&t0) // 2^252 - 4 - return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) -} - -// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. -var sqrtM1 = &Element{1718705420411056, 234908883556509, - 2233514472574048, 2117202627021982, 765476049583133} - -// SqrtRatio sets r to the non-negative square root of the ratio of u and v. -// -// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio -// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, -// and returns r and 0. -func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { - var a, b Element - - // r = (u * v3) * (u * v7)^((p-5)/8) - v2 := a.Square(v) - uv3 := b.Multiply(u, b.Multiply(v2, v)) - uv7 := a.Multiply(uv3, a.Square(v2)) - r.Multiply(uv3, r.Pow22523(uv7)) - - check := a.Multiply(v, a.Square(r)) // check = v * r^2 - - uNeg := b.Negate(u) - correctSignSqrt := check.Equal(u) - flippedSignSqrt := check.Equal(uNeg) - flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) - - rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r - // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) - r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) - - r.Absolute(r) // Choose the nonnegative square root. - return r, correctSignSqrt | flippedSignSqrt -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go deleted file mode 100644 index edcf163c4e..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -package field - -// feMul sets out = a * b. It works like feMulGeneric. -// -//go:noescape -func feMul(out *Element, a *Element, b *Element) - -// feSquare sets out = a * a. It works like feSquareGeneric. -// -//go:noescape -func feSquare(out *Element, a *Element) diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s deleted file mode 100644 index 293f013c94..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -// func feMul(out *Element, a *Element, b *Element) -TEXT ·feMul(SB), NOSPLIT, $0-24 - MOVQ a+8(FP), CX - MOVQ b+16(FP), BX - - // r0 = a0×b0 - MOVQ (CX), AX - MULQ (BX) - MOVQ AX, DI - MOVQ DX, SI - - // r0 += 19×a1×b4 - MOVQ 8(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a2×b3 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a3×b2 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a4×b1 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 8(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r1 = a0×b1 - MOVQ (CX), AX - MULQ 8(BX) - MOVQ AX, R9 - MOVQ DX, R8 - - // r1 += a1×b0 - MOVQ 8(CX), AX - MULQ (BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a2×b4 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a3×b3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a4×b2 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r2 = a0×b2 - MOVQ (CX), AX - MULQ 16(BX) - MOVQ AX, R11 - MOVQ DX, R10 - - // r2 += a1×b1 - MOVQ 8(CX), AX - MULQ 8(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += a2×b0 - MOVQ 16(CX), AX - MULQ (BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a3×b4 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a4×b3 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r3 = a0×b3 - MOVQ (CX), AX - MULQ 24(BX) - MOVQ AX, R13 - MOVQ DX, R12 - - // r3 += a1×b2 - MOVQ 8(CX), AX - MULQ 16(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a2×b1 - MOVQ 16(CX), AX - MULQ 8(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a3×b0 - MOVQ 24(CX), AX - MULQ (BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += 19×a4×b4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r4 = a0×b4 - MOVQ (CX), AX - MULQ 32(BX) - MOVQ AX, R15 - MOVQ DX, R14 - - // r4 += a1×b3 - MOVQ 8(CX), AX - MULQ 24(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a2×b2 - MOVQ 16(CX), AX - MULQ 16(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a3×b1 - MOVQ 24(CX), AX - MULQ 8(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a4×b0 - MOVQ 32(CX), AX - MULQ (BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, DI, SI - SHLQ $0x0d, R9, R8 - SHLQ $0x0d, R11, R10 - SHLQ $0x0d, R13, R12 - SHLQ $0x0d, R15, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Second reduction chain (carryPropagate) - MOVQ DI, SI - SHRQ $0x33, SI - MOVQ R9, R8 - SHRQ $0x33, R8 - MOVQ R11, R10 - SHRQ $0x33, R10 - MOVQ R13, R12 - SHRQ $0x33, R12 - MOVQ R15, R14 - SHRQ $0x33, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Store output - MOVQ out+0(FP), AX - MOVQ DI, (AX) - MOVQ R9, 8(AX) - MOVQ R11, 16(AX) - MOVQ R13, 24(AX) - MOVQ R15, 32(AX) - RET - -// func feSquare(out *Element, a *Element) -TEXT ·feSquare(SB), NOSPLIT, $0-16 - MOVQ a+8(FP), CX - - // r0 = l0×l0 - MOVQ (CX), AX - MULQ (CX) - MOVQ AX, SI - MOVQ DX, BX - - // r0 += 38×l1×l4 - MOVQ 8(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r0 += 38×l2×l3 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 24(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r1 = 2×l0×l1 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 8(CX) - MOVQ AX, R8 - MOVQ DX, DI - - // r1 += 38×l2×l4 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r1 += 19×l3×l3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r2 = 2×l0×l2 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 16(CX) - MOVQ AX, R10 - MOVQ DX, R9 - - // r2 += l1×l1 - MOVQ 8(CX), AX - MULQ 8(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r2 += 38×l3×l4 - MOVQ 24(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r3 = 2×l0×l3 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 24(CX) - MOVQ AX, R12 - MOVQ DX, R11 - - // r3 += 2×l1×l2 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 16(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r3 += 19×l4×l4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r4 = 2×l0×l4 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 32(CX) - MOVQ AX, R14 - MOVQ DX, R13 - - // r4 += 2×l1×l3 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 24(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // r4 += l2×l2 - MOVQ 16(CX), AX - MULQ 16(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, SI, BX - SHLQ $0x0d, R8, DI - SHLQ $0x0d, R10, R9 - SHLQ $0x0d, R12, R11 - SHLQ $0x0d, R14, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Second reduction chain (carryPropagate) - MOVQ SI, BX - SHRQ $0x33, BX - MOVQ R8, DI - SHRQ $0x33, DI - MOVQ R10, R9 - SHRQ $0x33, R9 - MOVQ R12, R11 - SHRQ $0x33, R11 - MOVQ R14, R13 - SHRQ $0x33, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Store output - MOVQ out+0(FP), AX - MOVQ SI, (AX) - MOVQ R8, 8(AX) - MOVQ R10, 16(AX) - MOVQ R12, 24(AX) - MOVQ R14, 32(AX) - RET diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go deleted file mode 100644 index ddb6c9b8f7..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego -// +build !amd64 !gc purego - -package field - -func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } - -func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go deleted file mode 100644 index af459ef515..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -package field - -//go:noescape -func carryPropagate(v *Element) - -func (v *Element) carryPropagate() *Element { - carryPropagate(v) - return v -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s deleted file mode 100644 index 5c91e45892..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -#include "textflag.h" - -// carryPropagate works exactly like carryPropagateGeneric and uses the -// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but -// avoids loading R0-R4 twice and uses LDP and STP. -// -// See https://golang.org/issues/43145 for the main compiler issue. -// -// func carryPropagate(v *Element) -TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 - MOVD v+0(FP), R20 - - LDP 0(R20), (R0, R1) - LDP 16(R20), (R2, R3) - MOVD 32(R20), R4 - - AND $0x7ffffffffffff, R0, R10 - AND $0x7ffffffffffff, R1, R11 - AND $0x7ffffffffffff, R2, R12 - AND $0x7ffffffffffff, R3, R13 - AND $0x7ffffffffffff, R4, R14 - - ADD R0>>51, R11, R11 - ADD R1>>51, R12, R12 - ADD R2>>51, R13, R13 - ADD R3>>51, R14, R14 - // R4>>51 * 19 + R10 -> R10 - LSR $51, R4, R21 - MOVD $19, R22 - MADD R22, R10, R21, R10 - - STP (R10, R11), 0(R20) - STP (R12, R13), 16(R20) - MOVD R14, 32(R20) - - RET diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go deleted file mode 100644 index 234a5b2e5d..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !arm64 || !gc || purego -// +build !arm64 !gc purego - -package field - -func (v *Element) carryPropagate() *Element { - return v.carryPropagateGeneric() -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go deleted file mode 100644 index 7b5b78cbd6..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "math/bits" - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -// mul64 returns a * b. -func mul64(a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - return uint128{lo, hi} -} - -// addMul64 returns v + a * b. -func addMul64(v uint128, a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - lo, c := bits.Add64(lo, v.lo, 0) - hi, _ = bits.Add64(hi, v.hi, c) - return uint128{lo, hi} -} - -// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. -func shiftRightBy51(a uint128) uint64 { - return (a.hi << (64 - 51)) | (a.lo >> 51) -} - -func feMulGeneric(v, a, b *Element) { - a0 := a.l0 - a1 := a.l1 - a2 := a.l2 - a3 := a.l3 - a4 := a.l4 - - b0 := b.l0 - b1 := b.l1 - b2 := b.l2 - b3 := b.l3 - b4 := b.l4 - - // Limb multiplication works like pen-and-paper columnar multiplication, but - // with 51-bit limbs instead of digits. - // - // a4 a3 a2 a1 a0 x - // b4 b3 b2 b1 b0 = - // ------------------------ - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a4b1 a3b1 a2b1 a1b1 a0b1 + - // a4b2 a3b2 a2b2 a1b2 a0b2 + - // a4b3 a3b3 a2b3 a1b3 a0b3 + - // a4b4 a3b4 a2b4 a1b4 a0b4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to - // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, - // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. - // - // Reduction can be carried out simultaneously to multiplication. For - // example, we do not compute r5: whenever the result of a multiplication - // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. - // - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a3b1 a2b1 a1b1 a0b1 19×a4b1 + - // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + - // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + - // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // Finally we add up the columns into wide, overlapping limbs. - - a1_19 := a1 * 19 - a2_19 := a2 * 19 - a3_19 := a3 * 19 - a4_19 := a4 * 19 - - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - r0 := mul64(a0, b0) - r0 = addMul64(r0, a1_19, b4) - r0 = addMul64(r0, a2_19, b3) - r0 = addMul64(r0, a3_19, b2) - r0 = addMul64(r0, a4_19, b1) - - // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) - r1 := mul64(a0, b1) - r1 = addMul64(r1, a1, b0) - r1 = addMul64(r1, a2_19, b4) - r1 = addMul64(r1, a3_19, b3) - r1 = addMul64(r1, a4_19, b2) - - // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) - r2 := mul64(a0, b2) - r2 = addMul64(r2, a1, b1) - r2 = addMul64(r2, a2, b0) - r2 = addMul64(r2, a3_19, b4) - r2 = addMul64(r2, a4_19, b3) - - // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 - r3 := mul64(a0, b3) - r3 = addMul64(r3, a1, b2) - r3 = addMul64(r3, a2, b1) - r3 = addMul64(r3, a3, b0) - r3 = addMul64(r3, a4_19, b4) - - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - r4 := mul64(a0, b4) - r4 = addMul64(r4, a1, b3) - r4 = addMul64(r4, a2, b2) - r4 = addMul64(r4, a3, b1) - r4 = addMul64(r4, a4, b0) - - // After the multiplication, we need to reduce (carry) the five coefficients - // to obtain a result with limbs that are at most slightly larger than 2⁵¹, - // to respect the Element invariant. - // - // Overall, the reduction works the same as carryPropagate, except with - // wider inputs: we take the carry for each coefficient by shifting it right - // by 51, and add it to the limb above it. The top carry is multiplied by 19 - // according to the reduction identity and added to the lowest limb. - // - // The largest coefficient (r0) will be at most 111 bits, which guarantees - // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. - // - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) - // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² - // r0 < 2⁷ × 2⁵² × 2⁵² - // r0 < 2¹¹¹ - // - // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most - // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and - // allows us to easily apply the reduction identity. - // - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - // r4 < 5 × 2⁵² × 2⁵² - // r4 < 2¹⁰⁷ - // - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - // Now all coefficients fit into 64-bit registers but are still too large to - // be passed around as a Element. We therefore do one last carry chain, - // where the carries will be small enough to fit in the wiggle room above 2⁵¹. - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -func feSquareGeneric(v, a *Element) { - l0 := a.l0 - l1 := a.l1 - l2 := a.l2 - l3 := a.l3 - l4 := a.l4 - - // Squaring works precisely like multiplication above, but thanks to its - // symmetry we get to group a few terms together. - // - // l4 l3 l2 l1 l0 x - // l4 l3 l2 l1 l0 = - // ------------------------ - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l4l1 l3l1 l2l1 l1l1 l0l1 + - // l4l2 l3l2 l2l2 l1l2 l0l2 + - // l4l3 l3l3 l2l3 l1l3 l0l3 + - // l4l4 l3l4 l2l4 l1l4 l0l4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l3l1 l2l1 l1l1 l0l1 19×l4l1 + - // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + - // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + - // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with - // only three Mul64 and four Add64, instead of five and eight. - - l0_2 := l0 * 2 - l1_2 := l1 * 2 - - l1_38 := l1 * 38 - l2_38 := l2 * 38 - l3_38 := l3 * 38 - - l3_19 := l3 * 19 - l4_19 := l4 * 19 - - // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) - r0 := mul64(l0, l0) - r0 = addMul64(r0, l1_38, l4) - r0 = addMul64(r0, l2_38, l3) - - // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 - r1 := mul64(l0_2, l1) - r1 = addMul64(r1, l2_38, l4) - r1 = addMul64(r1, l3_19, l3) - - // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 - r2 := mul64(l0_2, l2) - r2 = addMul64(r2, l1, l1) - r2 = addMul64(r2, l3_38, l4) - - // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 - r3 := mul64(l0_2, l3) - r3 = addMul64(r3, l1_2, l2) - r3 = addMul64(r3, l4_19, l4) - - // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 - r4 := mul64(l0_2, l4) - r4 = addMul64(r4, l1_2, l3) - r4 = addMul64(r4, l2, l2) - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -// carryPropagate brings the limbs below 52 bits by applying the reduction -// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline -func (v *Element) carryPropagateGeneric() *Element { - c0 := v.l0 >> 51 - c1 := v.l1 >> 51 - c2 := v.l2 >> 51 - c3 := v.l3 >> 51 - c4 := v.l4 >> 51 - - v.l0 = v.l0&maskLow51Bits + c4*19 - v.l1 = v.l1&maskLow51Bits + c0 - v.l2 = v.l2&maskLow51Bits + c1 - v.l3 = v.l3&maskLow51Bits + c2 - v.l4 = v.l4&maskLow51Bits + c3 - - return v -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint deleted file mode 100644 index e3685f95ca..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint +++ /dev/null @@ -1 +0,0 @@ -b0c49ae9f59d233526f8934262c5bbbe14d4358d diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh deleted file mode 100644 index 1ba22a8b4c..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -euo pipefail - -cd "$(git rev-parse --show-toplevel)" - -STD_PATH=src/crypto/ed25519/internal/edwards25519/field -LOCAL_PATH=curve25519/internal/field -LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint) - -git fetch https://go.googlesource.com/go master - -if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then - echo "No changes." -else - NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint) - echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..." - git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \ - git apply -3 --directory=$LOCAL_PATH -fi diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ed25519/ed25519.go b/terraform/providers/google/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index a7828345fc..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/hkdf/hkdf.go b/terraform/providers/google/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 0000000000..dda3f143be --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/internal/alias/alias.go b/terraform/providers/google/vendor/golang.org/x/crypto/internal/alias/alias.go new file mode 100644 index 0000000000..69c17f822b --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/internal/alias/alias.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego +// +build !purego + +// Package alias implements memory aliasing tests. +package alias + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/terraform/providers/google/vendor/golang.org/x/crypto/internal/alias/alias_purego.go new file mode 100644 index 0000000000..4775b0a438 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/crypto/internal/alias/alias_purego.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego +// +build purego + +// Package alias implements memory aliasing tests. +package alias + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/terraform/providers/google/vendor/golang.org/x/crypto/internal/subtle/aliasing.go deleted file mode 100644 index 4fad24f8dc..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/internal/subtle/aliasing.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego -// +build !purego - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -import "unsafe" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && - uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/terraform/providers/google/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go deleted file mode 100644 index 80ccbed2c0..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -// This is the Google App Engine standard variant based on reflect -// because the unsafe package and cgo are disallowed. - -import "reflect" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && - reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/armor/armor.go index be342ad473..8907183ec0 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -156,7 +156,7 @@ func (r *openpgpReader) Read(p []byte) (n int, err error) { n, err = r.b64Reader.Read(p) r.currentCRC = crc24(r.currentCRC, p[:n]) - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask { return 0, ArmorCorrupt } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/keys.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/keys.go index faa2fb3693..d62f787e9d 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/keys.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/keys.go @@ -61,7 +61,7 @@ type Key struct { type KeyRing interface { // KeysById returns the set of keys that have the given key id. KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id + // KeysByIdUsage returns the set of keys with the given id // that also meet the key usage given by requiredUsage. // The requiredUsage is expressed as the bitwise-OR of // packet.KeyFlag* values. @@ -183,7 +183,7 @@ func (el EntityList) KeysById(id uint64) (keys []Key) { return } -// KeysByIdAndUsage returns the set of keys with the given id that also meet +// KeysByIdUsage returns the set of keys with the given id that also meet // the key usage given by requiredUsage. The requiredUsage is expressed as // the bitwise-OR of packet.KeyFlag* values. func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/compressed.go index e8f0b5caa7..353f945247 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -60,7 +60,7 @@ func (c *Compressed) parse(r io.Reader) error { return err } -// compressedWriterCloser represents the serialized compression stream +// compressedWriteCloser represents the serialized compression stream // header and the compressor. Its Close() method ensures that both the // compressor and serialized stream header are closed. Its Write() // method writes to the compressor. diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/opaque.go index 456d807f25..3984477310 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/opaque.go @@ -7,7 +7,6 @@ package packet import ( "bytes" "io" - "io/ioutil" "golang.org/x/crypto/openpgp/errors" ) @@ -26,7 +25,7 @@ type OpaquePacket struct { } func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) + op.Contents, err = io.ReadAll(r) return } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/private_key.go index 81abb7cef9..192aac376d 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/private_key.go @@ -13,7 +13,6 @@ import ( "crypto/rsa" "crypto/sha1" "io" - "io/ioutil" "math/big" "strconv" "time" @@ -133,7 +132,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { } } - pk.encryptedData, err = ioutil.ReadAll(r) + pk.encryptedData, err = io.ReadAll(r) if err != nil { return } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go index 6126030eb9..1a1a62964f 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go @@ -236,7 +236,7 @@ func (w *seMDCWriter) Close() (err error) { return w.w.Close() } -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// noOpCloser is like an io.NopCloser, but for an io.Writer. type noOpCloser struct { w io.Writer } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go index d19ffbc786..ff7ef53075 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go @@ -9,7 +9,6 @@ import ( "image" "image/jpeg" "io" - "io/ioutil" ) const UserAttrImageSubpacket = 1 @@ -56,7 +55,7 @@ func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { func (uat *UserAttribute) parse(r io.Reader) (err error) { // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/userid.go index d6bea7d4ac..359a462eb8 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/packet/userid.go @@ -6,7 +6,6 @@ package packet import ( "io" - "io/ioutil" "strings" ) @@ -66,7 +65,7 @@ func NewUserId(name, comment, email string) *UserId { func (uid *UserId) parse(r io.Reader) (err error) { // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return } diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go index 9de04958ea..f53244a1c7 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -268,7 +268,7 @@ func HashIdToString(id byte) (name string, ok bool) { return "", false } -// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. func HashToHashId(h crypto.Hash) (id byte, ok bool) { for _, m := range hashToHashIdMapping { if m.hash == h { diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/write.go b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/write.go index 4ee71784eb..b89d48b81d 100644 --- a/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/write.go +++ b/terraform/providers/google/vendor/golang.org/x/crypto/openpgp/write.go @@ -402,7 +402,7 @@ func (s signatureWriter) Close() error { return s.encryptedData.Close() } -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// noOpCloser is like an io.NopCloser, but for an io.Writer. // TODO: we have two of these in OpenPGP packages alone. This probably needs // to be promoted somewhere more common. type noOpCloser struct { diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/buffer.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d078d..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/certs.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index 4600c20772..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,589 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear -// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. -// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't -// appear in the Signature.Format field. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" - CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" - - // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a - // Certificate.Type (or PublicKey.Type), but only in - // ClientConfig.HostKeyAlgorithms. - CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" - CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" -) - -const ( - // Deprecated: use CertAlgoRSAv01. - CertSigAlgoRSAv01 = CertAlgoRSAv01 - // Deprecated: use CertAlgoRSASHA256v01. - CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 - // Deprecated: use CertAlgoRSASHA512v01. - CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte - Rest []byte `ssh:"rest"` -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the -// PublicKey interface, so it can be unmarshaled using -// ParsePublicKey. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -type algorithmOpenSSHCertSigner struct { - *openSSHCertSigner - algorithmSigner AlgorithmSigner -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { - return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { - return &openSSHCertSigner{cert, signer}, nil - } -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert signs the certificate with an authority, setting the Nonce, -// SignatureKey, and Signature fields. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - // Default to KeyAlgoRSASHA512 for ssh-rsa signers. - if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { - sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) - if err != nil { - return err - } - c.Signature = sig - return nil - } - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -// certKeyAlgoNames is a mapping from known certificate algorithm names to the -// corresponding public key signature algorithm. -// -// This map must be kept in sync with the one in agent/client.go. -var certKeyAlgoNames = map[string]string{ - CertAlgoRSAv01: KeyAlgoRSA, - CertAlgoRSASHA256v01: KeyAlgoRSASHA256, - CertAlgoRSASHA512v01: KeyAlgoRSASHA512, - CertAlgoDSAv01: KeyAlgoDSA, - CertAlgoECDSA256v01: KeyAlgoECDSA256, - CertAlgoECDSA384v01: KeyAlgoECDSA384, - CertAlgoECDSA521v01: KeyAlgoECDSA521, - CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, - CertAlgoED25519v01: KeyAlgoED25519, - CertAlgoSKED25519v01: KeyAlgoSKED25519, -} - -// underlyingAlgo returns the signature algorithm associated with algo (which is -// an advertised or negotiated public key or host key algorithm). These are -// usually the same, except for certificate algorithms. -func underlyingAlgo(algo string) string { - if a, ok := certKeyAlgoNames[algo]; ok { - return a - } - return algo -} - -// certificateAlgo returns the certificate algorithms that uses the provided -// underlying signature algorithm. -func certificateAlgo(algo string) (certAlgo string, ok bool) { - for certName, algoName := range certKeyAlgoNames { - if algoName == algo { - return certName, true - } - } - return "", false -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the certificate algorithm name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - certName, ok := certificateAlgo(c.Key.Type()) - if !ok { - panic("unknown certificate type for key type " + c.Key.Type()) - } - return certName -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - switch out.Format { - case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: - out.Rest = in - return out, nil, ok - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/channel.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00df..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/cipher.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 770e8a6639..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,789 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/internal/poly1305" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type cipherMode struct { - keySize int - ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) -} - -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - stream, err := createFunc(key, iv) - if err != nil { - return nil, err - } - - var streamDump []byte - if skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - mac := macModes[algs.MAC].new(macKey) - return &streamPacketCipher{ - mac: mac, - etm: macModes[algs.MAC].etm, - macResult: make([]byte, mac.Size()), - cipher: stream, - }, nil - } -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcmCipherID: {16, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - - // CBC mode is insecure and so is not included in the default config. - // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readCipherPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writeCipherPacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - if len(plain) == 0 { - return nil, errors.New("ssh: empty packet") - } - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readCipherPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} - -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - -// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com -// AEAD, which is described here: -// -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 -// -// the methods here also implement padding, which RFC4253 Section 6 -// also requires of stream ciphers. -type chacha20Poly1305Cipher struct { - lengthKey [32]byte - contentKey [32]byte - buf []byte -} - -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - if len(key) != 64 { - panic(len(key)) - } - - c := &chacha20Poly1305Cipher{ - buf: make([]byte, 256), - } - - copy(c.contentKey[:], key[:32]) - copy(c.lengthKey[:], key[32:]) - return c, nil -} - -func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return nil, err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - encryptedLength := c.buf[:4] - if _, err := io.ReadFull(r, encryptedLength); err != nil { - return nil, err - } - - var lenBytes [4]byte - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return nil, err - } - ls.XORKeyStream(lenBytes[:], encryptedLength) - - length := binary.BigEndian.Uint32(lenBytes[:]) - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - contentEnd := 4 + length - packetEnd := contentEnd + poly1305.TagSize - if uint32(cap(c.buf)) < packetEnd { - c.buf = make([]byte, packetEnd) - copy(c.buf[:], encryptedLength) - } else { - c.buf = c.buf[:packetEnd] - } - - if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { - return nil, err - } - - var mac [poly1305.TagSize]byte - copy(mac[:], c.buf[contentEnd:packetEnd]) - if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { - return nil, errors.New("ssh: MAC failure") - } - - plain := c.buf[4:contentEnd] - s.XORKeyStream(plain, plain) - - if len(plain) == 0 { - return nil, errors.New("ssh: empty packet") - } - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding)+1 >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - - plain = plain[1 : len(plain)-int(padding)] - - return plain, nil -} - -func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - // There is no blocksize, so fall back to multiple of 8 byte - // padding, as described in RFC 4253, Sec 6. - const packetSizeMultiple = 8 - - padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple - if padding < 4 { - padding += packetSizeMultiple - } - - // size (4 bytes), padding (1), payload, padding, tag. - totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize - if cap(c.buf) < totalLength { - c.buf = make([]byte, totalLength) - } else { - c.buf = c.buf[:totalLength] - } - - binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return err - } - ls.XORKeyStream(c.buf, c.buf[:4]) - c.buf[4] = byte(padding) - copy(c.buf[5:], payload) - packetEnd := 5 + len(payload) + padding - if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { - return err - } - - s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) - - var mac [poly1305.TagSize]byte - poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) - - copy(c.buf[packetEnd:], mac[:]) - - if _, err := w.Write(c.buf); err != nil { - return err - } - return nil -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/client.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index bdc356cbdf..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - handleForwardsOnce sync.Once // guards calling (*Client).handleForwards - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c, user: fullConf.User}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key exchange. -// algo is the negotiated algorithm, and may be a certificate type. -func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - if a := underlyingAlgo(algo); sig.Format != a { - return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the public key algorithms that the client will - // accept from the server for host key authentication, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from a PublicKey.Type method may be used, or - // any of the CertAlgo and KeyAlgo constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/client_auth.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index 409b5ea1d4..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,725 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "strings" -) - -type authResult int - -const ( - authFailure authResult = iota - authPartialSuccess - authSuccess -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we - // advertised willingness to receive one, which we always do) or not. See - // RFC 8308, Section 2.4. - extensions := make(map[string][]byte) - if len(packet) > 0 && packet[0] == msgExtInfo { - var extInfo extInfoMsg - if err := Unmarshal(packet, &extInfo); err != nil { - return err - } - payload := extInfo.Payload - for i := uint32(0); i < extInfo.NumExtensions; i++ { - name, rest, ok := parseString(payload) - if !ok { - return parseError(msgExtInfo) - } - value, rest, ok := parseString(rest) - if !ok { - return parseError(msgExtInfo) - } - extensions[string(name)] = value - payload = rest - } - packet, err = c.transport.readPacket() - if err != nil { - return err - } - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - var tried []string - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) - if err != nil { - return err - } - if ok == authSuccess { - // success - return nil - } else if ok == authFailure { - if m := auth.method(); !contains(tried, m) { - tried = append(tried, m) - } - } - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if contains(tried, candidateMethod) { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) -} - -func contains(list []string, e string) bool { - for _, s := range list { - if s == e { - return true - } - } - return false -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return authFailure, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { - keyFormat := signer.PublicKey().Type() - - // Like in sendKexInit, if the public key implements AlgorithmSigner we - // assume it supports all algorithms, otherwise only the key format one. - as, ok := signer.(AlgorithmSigner) - if !ok { - return algorithmSignerWrapper{signer}, keyFormat - } - - extPayload, ok := extensions["server-sig-algs"] - if !ok { - // If there is no "server-sig-algs" extension, fall back to the key - // format algorithm. - return as, keyFormat - } - - // The server-sig-algs extension only carries underlying signature - // algorithm, but we are trying to select a protocol-level public key - // algorithm, which might be a certificate type. Extend the list of server - // supported algorithms to include the corresponding certificate algorithms. - serverAlgos := strings.Split(string(extPayload), ",") - for _, algo := range serverAlgos { - if certAlgo, ok := certificateAlgo(algo); ok { - serverAlgos = append(serverAlgos, certAlgo) - } - } - - keyAlgos := algorithmsForKeyFormat(keyFormat) - algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) - if err != nil { - // If there is no overlap, try the key anyway with the key format - // algorithm, to support servers that fail to list all supported - // algorithms. - return as, keyFormat - } - return as, algo -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return authFailure, nil, err - } - var methods []string - for _, signer := range signers { - pub := signer.PublicKey() - as, algo := pickSignatureAlgorithm(signer, extensions) - - ok, err := validateKey(pub, algo, user, c) - if err != nil { - return authFailure, nil, err - } - if !ok { - continue - } - - pubKey := pub.Marshal() - data := buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, algo, pubKey) - sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) - if err != nil { - return authFailure, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: algo, - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return authFailure, nil, err - } - var success authResult - success, methods, err = handleAuthResponse(c) - if err != nil { - return authFailure, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: algo, - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, algo, c) -} - -func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { - pubKey := key.Marshal() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (authResult, []string, error) { - gotMsgExtInfo := false - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - case msgExtInfo: - // Ignore post-authentication RFC 8308 extensions, once. - if gotMsgExtInfo { - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - gotMsgExtInfo = true - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the name and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return authFailure, nil, err - } - - gotMsgExtInfo := false - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - continue - case msgExtInfo: - // Ignore post-authentication RFC 8308 extensions, once. - if gotMsgExtInfo { - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - gotMsgExtInfo = true - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return authFailure, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.Name, msg.Instruction, prompts, echos) - if err != nil { - return authFailure, nil, err - } - - if len(answers) != len(prompts) { - return authFailure, nil, fmt.Errorf("ssh: incorrect number of answers from keyboard-interactive callback %d (expected %d)", len(answers), len(prompts)) - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return authFailure, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) - if ok != authFailure || err != nil { // either success, partial success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} - -// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. -// See RFC 4462 section 3 -// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. -// target is the server host you want to log in to. -func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { - if gssAPIClient == nil { - panic("gss-api client must be not nil with enable gssapi-with-mic") - } - return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} -} - -type gssAPIWithMICCallback struct { - gssAPIClient GSSAPIClient - target string -} - -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { - m := &userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: g.method(), - } - // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. - // See RFC 4462 section 3.2. - m.Payload = appendU32(m.Payload, 1) - m.Payload = appendString(m.Payload, string(krb5OID)) - if err := c.writePacket(Marshal(m)); err != nil { - return authFailure, nil, err - } - // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an - // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or - // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. - // See RFC 4462 section 3.3. - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check - // selected mech if it is valid. - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - userAuthGSSAPIResp := &userAuthGSSAPIResponse{} - if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { - return authFailure, nil, err - } - // Start the loop into the exchange token. - // See RFC 4462 section 3.4. - var token []byte - defer g.gssAPIClient.DeleteSecContext() - for { - // Initiates the establishment of a security context between the application and a remote peer. - nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) - if err != nil { - return authFailure, nil, err - } - if len(nextToken) > 0 { - if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: nextToken, - })); err != nil { - return authFailure, nil, err - } - } - if !needContinue { - break - } - packet, err = c.readPacket() - if err != nil { - return authFailure, nil, err - } - switch packet[0] { - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthGSSAPIError: - userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} - if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { - return authFailure, nil, err - } - return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ - "Major Status: %d\n"+ - "Minor Status: %d\n"+ - "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, - userAuthGSSAPIErrorResp.Message) - case msgUserAuthGSSAPIToken: - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return authFailure, nil, err - } - token = userAuthGSSAPITokenReq.Token - } - } - // Binding Encryption Keys. - // See RFC 4462 section 3.5. - micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") - micToken, err := g.gssAPIClient.GetMIC(micField) - if err != nil { - return authFailure, nil, err - } - if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ - MIC: micToken, - })); err != nil { - return authFailure, nil, err - } - return handleAuthResponse(c) -} - -func (g *gssAPIWithMICCallback) method() string { - return "gssapi-with-mic" -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/common.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index 2a47a61ded..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,430 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} - -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden -// for the server half. -var serverForbiddenKexAlgos = map[string]struct{}{ - kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests - kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests -} - -// preferredKexAlgos specifies the default preference for key-exchange algorithms -// in preference order. -var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA512, KeyAlgoRSASHA256, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported signature algorithms to their -// respective hashes needed for signing and verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoRSASHA256: crypto.SHA256, - KeyAlgoRSASHA512: crypto.SHA512, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - // KeyAlgoED25519 doesn't pre-hash. - KeyAlgoSKECDSA256: crypto.SHA256, - KeyAlgoSKED25519: crypto.SHA256, -} - -// algorithmsForKeyFormat returns the supported signature algorithms for a given -// public key format (PublicKey.Type), in order of preference. See RFC 8332, -// Section 2. See also the note in sendKexInit on backwards compatibility. -func algorithmsForKeyFormat(keyFormat string) []string { - switch keyFormat { - case KeyAlgoRSA: - return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} - case CertAlgoRSAv01: - return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} - default: - return []string{keyFormat} - } -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -// directionAlgorithms records algorithm choices in one direction (either read or write) -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -var aeadCiphers = map[string]bool{ - gcmCipherID: true, - chacha20Poly1305ID: true, -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - stoc, ctos := &result.w, &result.r - if isClient { - ctos, stoc = stoc, ctos - } - - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - if !aeadCiphers[ctos.Cipher] { - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - } - - if !aeadCiphers[stoc.Cipher] { - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - } - - ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = preferredCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = preferredKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. algo is the advertised -// algorithm, and may be a certificate type. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo string - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/connection.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index fd6b0681b5..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/doc.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index f6bff60dc7..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/handshake.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 653dc4d2cf..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,704 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - readPacketsLeft uint32 - readBytesLeft int64 - - writePacketsLeft uint32 - writeBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() - - // Unblock reader. - t.conn.Close() -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - isServer := len(t.hostKeys) > 0 - if isServer { - for _, k := range t.hostKeys { - // If k is an AlgorithmSigner, presume it supports all signature algorithms - // associated with the key format. (Ideally AlgorithmSigner would have a - // method to advertise supported algorithms, but it doesn't. This means that - // adding support for a new algorithm is a breaking change, as we will - // immediately negotiate it even if existing implementations don't support - // it. If that ever happens, we'll have to figure something out.) - // If k is not an AlgorithmSigner, we can only assume it only supports the - // algorithms that matches the key format. (This means that Sign can't pick - // a different default.) - keyFormat := k.PublicKey().Type() - if _, ok := k.(AlgorithmSigner); ok { - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) - } else { - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) - } - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - - // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what - // algorithms the server supports for public key authentication. See RFC - // 8308, Section 2.1. - if firstKeyExchange := t.sessionID == nil; firstKeyExchange { - msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) - msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) - msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") - } - } - - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - isClient := len(t.hostKeys) == 0 - if isClient { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, &magics) - } else { - result, err = t.client(kex, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -// algorithmSignerWrapper is an AlgorithmSigner that only supports the default -// key format algorithm. -// -// This is technically a violation of the AlgorithmSigner interface, but it -// should be unreachable given where we use this. Anyway, at least it returns an -// error instead of panicing or producing an incorrect signature. -type algorithmSignerWrapper struct { - Signer -} - -func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != underlyingAlgo(a.PublicKey().Type()) { - return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") - } - return a.Sign(rand, data) -} - -func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { - for _, k := range hostKeys { - if algo == k.PublicKey().Type() { - return algorithmSignerWrapper{k} - } - k, ok := k.(AlgorithmSigner) - if !ok { - continue - } - for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { - if algo == a { - return k - } - } - } - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { - hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) - if hostKey == nil { - return nil, errors.New("ssh: internal error: negotiated unsupported signature type") - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go deleted file mode 100644 index af81d26654..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. -// -// See https://flak.tedunangst.com/post/bcrypt-pbkdf and -// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. -package bcrypt_pbkdf - -import ( - "crypto/sha512" - "errors" - "golang.org/x/crypto/blowfish" -) - -const blockSize = 32 - -// Key derives a key from the password, salt and rounds count, returning a -// []byte of length keyLen that can be used as cryptographic key. -func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { - if rounds < 1 { - return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") - } - if len(password) == 0 { - return nil, errors.New("bcrypt_pbkdf: empty password") - } - if len(salt) == 0 || len(salt) > 1<<20 { - return nil, errors.New("bcrypt_pbkdf: bad salt length") - } - if keyLen > 1024 { - return nil, errors.New("bcrypt_pbkdf: keyLen is too large") - } - - numBlocks := (keyLen + blockSize - 1) / blockSize - key := make([]byte, numBlocks*blockSize) - - h := sha512.New() - h.Write(password) - shapass := h.Sum(nil) - - shasalt := make([]byte, 0, sha512.Size) - cnt, tmp := make([]byte, 4), make([]byte, blockSize) - for block := 1; block <= numBlocks; block++ { - h.Reset() - h.Write(salt) - cnt[0] = byte(block >> 24) - cnt[1] = byte(block >> 16) - cnt[2] = byte(block >> 8) - cnt[3] = byte(block) - h.Write(cnt) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - - out := make([]byte, blockSize) - copy(out, tmp) - for i := 2; i <= rounds; i++ { - h.Reset() - h.Write(tmp) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - for j := 0; j < len(out); j++ { - out[j] ^= tmp[j] - } - } - - for i, v := range out { - key[i*numBlocks+(block-1)] = v - } - } - return key[:keyLen], nil -} - -var magic = []byte("OxychromaticBlowfishSwatDynamite") - -func bcryptHash(out, shapass, shasalt []byte) { - c, err := blowfish.NewSaltedCipher(shapass, shasalt) - if err != nil { - panic(err) - } - for i := 0; i < 64; i++ { - blowfish.ExpandKey(shasalt, c) - blowfish.ExpandKey(shapass, c) - } - copy(out, magic) - for i := 0; i < 32; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(out[i:i+8], out[i:i+8]) - } - } - // Swap bytes due to different endianness. - for i := 0; i < 32; i += 4 { - out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] - } -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/kex.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index 927a90cd46..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" - kexAlgoCurve25519SHA256 = "curve25519-sha256" - - // For the following kex only the client half contains a production - // ready implementation. The server half only consists of a minimal - // implementation to satisfy the automated tests. - kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" - kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. algo is the negotiated algorithm, and may - // be a certificate type. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int - hashFunc crypto.Hash -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := group.hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: group.hashFunc, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := group.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H, algo) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: group.hashFunc, - }, err -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H, algo) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in - // RFC 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - hashFunc: crypto.SHA1, - } - - // This are the groups called diffie-hellman-group14-sha1 and - // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, - // and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - group14 := &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: group14.g, p: group14.p, pMinus1: group14.pMinus1, - hashFunc: crypto.SHA1, - } - kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ - g: group14.g, p: group14.p, pMinus1: group14.pMinus1, - hashFunc: crypto.SHA256, - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} - kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} - kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} - kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} -} - -// curve25519sha256 implements the curve25519-sha256 (formerly known as -// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H, algo) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} - -// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and -// diffie-hellman-group-exchange-sha256 key agreement protocols, -// as described in RFC 4419 -type dhGEXSHA struct { - hashFunc crypto.Hash -} - -const ( - dhGroupExchangeMinimumBits = 2048 - dhGroupExchangePreferredBits = 2048 - dhGroupExchangeMaximumBits = 8192 -) - -func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - // Send GexRequest - kexDHGexRequest := kexDHGexRequestMsg{ - MinBits: dhGroupExchangeMinimumBits, - PreferedBits: dhGroupExchangePreferredBits, - MaxBits: dhGroupExchangeMaximumBits, - } - if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { - return nil, err - } - - // Receive GexGroup - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var msg kexDHGexGroupMsg - if err = Unmarshal(packet, &msg); err != nil { - return nil, err - } - - // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) - } - - // Check if g is safe by verifying that 1 < g < p-1 - pMinusOne := new(big.Int).Sub(msg.P, bigOne) - if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { - return nil, fmt.Errorf("ssh: server provided gex g is not safe") - } - - // Send GexInit - pHalf := new(big.Int).Rsh(msg.P, 1) - x, err := rand.Int(randSource, pHalf) - if err != nil { - return nil, err - } - X := new(big.Int).Exp(msg.G, x, msg.P) - kexDHGexInit := kexDHGexInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { - return nil, err - } - - // Receive GexReply - packet, err = c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexReply kexDHGexReplyMsg - if err = Unmarshal(packet, &kexDHGexReply); err != nil { - return nil, err - } - - if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) - - // Check if k is safe by verifying that k > 1 and k < p - 1 - if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { - return nil, fmt.Errorf("ssh: derived k is not safe") - } - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, kexDHGexReply.HostKey) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, msg.P) - writeInt(h, msg.G) - writeInt(h, X) - writeInt(h, kexDHGexReply.Y) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHGexReply.HostKey, - Signature: kexDHGexReply.Signature, - Hash: gex.hashFunc, - }, nil -} - -// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. -// -// This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { - // Receive GexRequest - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHGexRequest kexDHGexRequestMsg - if err = Unmarshal(packet, &kexDHGexRequest); err != nil { - return - } - - // Send GexGroup - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - g := big.NewInt(2) - - msg := &kexDHGexGroupMsg{ - P: p, - G: g, - } - if err := c.writePacket(Marshal(msg)); err != nil { - return nil, err - } - - // Receive GexInit - packet, err = c.readPacket() - if err != nil { - return - } - var kexDHGexInit kexDHGexInitMsg - if err = Unmarshal(packet, &kexDHGexInit); err != nil { - return - } - - pHalf := new(big.Int).Rsh(p, 1) - - y, err := rand.Int(randSource, pHalf) - if err != nil { - return - } - Y := new(big.Int).Exp(g, y, p) - - pMinusOne := new(big.Int).Sub(p, bigOne) - if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) - - hostKeyBytes := priv.PublicKey().Marshal() - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, p) - writeInt(h, g) - writeInt(h, kexDHGexInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H, algo) - if err != nil { - return nil, err - } - - kexDHGexReply := kexDHGexReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHGexReply) - - err = c.writePacket(packet) - - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: gex.hashFunc, - }, err -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/keys.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index 1c7de1a6dd..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1447 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" -) - -// Public key algorithms names. These values can appear in PublicKey.Type, -// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner -// arguments. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" - KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" - - // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not - // public key formats, so they can't appear as a PublicKey.Type. The - // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. - KeyAlgoRSASHA256 = "rsa-sha2-256" - KeyAlgoRSASHA512 = "rsa-sha2-512" -) - -const ( - // Deprecated: use KeyAlgoRSA. - SigAlgoRSA = KeyAlgoRSA - // Deprecated: use KeyAlgoRSASHA256. - SigAlgoRSASHA2256 = KeyAlgoRSASHA256 - // Deprecated: use KeyAlgoRSASHA512. - SigAlgoRSASHA2512 = KeyAlgoRSASHA512 -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoSKECDSA256: - return parseSKECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case KeyAlgoSKED25519: - return parseSKEd25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certKeyAlgoNames[algo]) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey represents a public key using an unspecified algorithm. -// -// Some PublicKeys provided by this package also implement CryptoPublicKey. -type PublicKey interface { - // Type returns the key format name, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, with the name - // prefix. To unmarshal the returned data, use the ParsePublicKey function. - Marshal() []byte - - // Verify that sig is a signature on the given data using this key. This - // method will hash the data appropriately first. sig.Format is allowed to - // be any signature algorithm compatible with the key type, the caller - // should check if it has more stringent requirements. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -// -// Some Signers provided by this package also implement AlgorithmSigner. -type Signer interface { - // PublicKey returns the associated PublicKey. - PublicKey() PublicKey - - // Sign returns a signature for the given data. This method will hash the - // data appropriately first. The signature algorithm is expected to match - // the key format returned by the PublicKey.Type method (and not to be any - // alternative algorithm supported by the key format). - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -// An AlgorithmSigner is a Signer that also supports specifying an algorithm to -// use for signing. -// -// An AlgorithmSigner can't advertise the algorithms it supports, so it should -// be prepared to be invoked with every algorithm supported by the public key -// format. -type AlgorithmSigner interface { - Signer - - // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired - // signing algorithm. Callers may pass an empty string for the algorithm in - // which case the AlgorithmSigner will use a default algorithm. This default - // doesn't currently control any behavior in this package. - SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - supportedAlgos := algorithmsForKeyFormat(r.Type()) - if !contains(supportedAlgos, sig.Format) { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - hash := hashFuncs[sig.Format] - h := hash.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := hashFuncs[sig.Format].New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) -} - -func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != "" && algorithm != k.PublicKey().Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - h := hashFuncs[k.PublicKey().Type()].New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - return ed25519PublicKey(w.KeyBytes), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k); l != ed25519.PublicKeySize { - return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - - if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := hashFuncs[sig.Format].New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// skFields holds the additional fields present in U2F/FIDO2 signatures. -// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. -type skFields struct { - // Flags contains U2F/FIDO2 flags such as 'user present' - Flags byte - // Counter is a monotonic signature counter which can be - // used to detect concurrent use of a private key, should - // it be extracted from hardware. - Counter uint32 -} - -type skECDSAPublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ecdsa.PublicKey -} - -func (k *skECDSAPublicKey) Type() string { - return KeyAlgoSKECDSA256 -} - -func (k *skECDSAPublicKey) nistID() string { - return "nistp256" -} - -func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(skECDSAPublicKey) - key.application = w.Application - - if w.Curve != "nistp256" { - return nil, nil, errors.New("ssh: unsupported curve") - } - key.Curve = elliptic.P256() - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - - return key, w.Rest, nil -} - -func (k *skECDSAPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - w := struct { - Name string - ID string - Key []byte - Application string - }{ - k.Type(), - k.nistID(), - keyBytes, - k.application, - } - - return Marshal(&w) -} - -func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := hashFuncs[sig.Format].New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var ecSig struct { - R *big.Int - S *big.Int - } - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - h.Reset() - h.Write(original) - digest := h.Sum(nil) - - if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -type skEd25519PublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ed25519.PublicKey -} - -func (k *skEd25519PublicKey) Type() string { - return KeyAlgoSKED25519 -} - -func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - key := new(skEd25519PublicKey) - key.application = w.Application - key.PublicKey = ed25519.PublicKey(w.KeyBytes) - - return key, w.Rest, nil -} - -func (k *skEd25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - Application string - }{ - KeyAlgoSKED25519, - []byte(k.PublicKey), - k.application, - } - return Marshal(&w) -} - -func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k.PublicKey); l != ed25519.PublicKeySize { - return fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - h := hashFuncs[sig.Format].New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var edSig struct { - Signature []byte `ssh:"rest"` - } - - if err := Unmarshal(sig.Blob, &edSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) -} - -func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm == "" { - algorithm = s.pubKey.Type() - } - - supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) - if !contains(supportedAlgos, algorithm) { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) - } - - hashFunc := hashFuncs[algorithm] - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: algorithm, - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - if l := len(key); l != ed25519.PublicKeySize { - return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - return ed25519PublicKey(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. If the private key is encrypted, it -// will return a PassphraseMissingError. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// A PassphraseMissingError indicates that parsing this private key requires a -// passphrase. Use ParsePrivateKeyWithPassphrase. -type PassphraseMissingError struct { - // PublicKey will be set if the private key format includes an unencrypted - // public key along with the encrypted private key. - PublicKey PublicKey -} - -func (*PassphraseMissingError) Error() string { - return "ssh: this private key is passphrase protected" -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the -// private key is encrypted, it will return a PassphraseMissingError. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, &PassphraseMissingError{} - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - // RFC5208 - https://tools.ietf.org/html/rfc5208 - case "PRIVATE KEY": - return x509.ParsePKCS8PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If the passphrase is wrong, it -// will return x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if block.Type == "OPENSSH PRIVATE KEY" { - return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) - } - - if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { - return nil, errors.New("ssh: not an encrypted key") - } - - buf, err := x509.DecryptPEMBlock(block, passphrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName != "none" || cipherName != "none" { - return nil, &PassphraseMissingError{} - } - if kdfOpts != "" { - return nil, errors.New("ssh: invalid openssh private key") - } - return privKeyBlock, nil -} - -func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { - return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName == "none" || cipherName == "none" { - return nil, errors.New("ssh: key is not password protected") - } - if kdfName != "bcrypt" { - return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") - } - - var opts struct { - Salt string - Rounds uint32 - } - if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { - return nil, err - } - - k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) - if err != nil { - return nil, err - } - key, iv := k[:32], k[32:] - - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - switch cipherName { - case "aes256-ctr": - ctr := cipher.NewCTR(c, iv) - ctr.XORKeyStream(privKeyBlock, privKeyBlock) - case "aes256-cbc": - if len(privKeyBlock)%c.BlockSize() != 0 { - return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") - } - cbc := cipher.NewCBCDecrypter(c, iv) - cbc.CryptBlocks(privKeyBlock, privKeyBlock) - default: - return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") - } - - return privKeyBlock, nil - } -} - -type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) - -// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt -// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used -// as the decrypt function to parse an unencrypted private key. See -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. -func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - if w.NumKeys != 1 { - // We only support single key files, and so does OpenSSH. - // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 - return nil, errors.New("ssh: multi-key files are not supported") - } - - privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) - if err != nil { - if err, ok := err.(*PassphraseMissingError); ok { - pub, errPub := ParsePublicKey(w.PubKey) - if errPub != nil { - return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) - } - err.PublicKey = pub - } - return nil, err - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { - if w.CipherName != "none" { - return nil, x509.IncorrectPasswordError - } - return nil, errors.New("ssh: malformed OpenSSH key") - } - - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - var curve elliptic.Curve - switch key.Curve { - case "nistp256": - curve = elliptic.P256() - case "nistp384": - curve = elliptic.P384() - case "nistp521": - curve = elliptic.P521() - default: - return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) - } - - X, Y := elliptic.Unmarshal(curve, key.Pub) - if X == nil || Y == nil { - return nil, errors.New("ssh: failed to unmarshal public key") - } - - if key.D.Cmp(curve.Params().N) >= 0 { - return nil, errors.New("ssh: scalar is out of range") - } - - x, y := curve.ScalarBaseMult(key.D.Bytes()) - if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { - return nil, errors.New("ssh: public key does not match private key") - } - - return &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: X, - Y: Y, - }, - D: key.D, - }, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -func checkOpenSSHKeyPadding(pad []byte) error { - for i, b := range pad { - if int(b) != i+1 { - return errors.New("ssh: padding not as expected") - } - } - return nil -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/mac.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a06285e..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/messages.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index 19bc67c464..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,877 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4419, section 5. -const msgKexDHGexGroup = 31 - -type kexDHGexGroupMsg struct { - P *big.Int `sshtype:"31"` - G *big.Int -} - -const msgKexDHGexInit = 32 - -type kexDHGexInitMsg struct { - X *big.Int `sshtype:"32"` -} - -const msgKexDHGexReply = 33 - -type kexDHGexReplyMsg struct { - HostKey []byte `sshtype:"33"` - Y *big.Int - Signature []byte -} - -const msgKexDHGexRequest = 34 - -type kexDHGexRequestMsg struct { - MinBits uint32 `sshtype:"34"` - PreferedBits uint32 - MaxBits uint32 -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 8308, section 2.3 -const msgExtInfo = 7 - -type extInfoMsg struct { - NumExtensions uint32 `sshtype:"7"` - Payload []byte `ssh:"rest"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - Name string `sshtype:"60"` - Instruction string - Language string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// See RFC 4462, section 3 -const msgUserAuthGSSAPIResponse = 60 - -type userAuthGSSAPIResponse struct { - SupportMech []byte `sshtype:"60"` -} - -const msgUserAuthGSSAPIToken = 61 - -type userAuthGSSAPIToken struct { - Token []byte `sshtype:"61"` -} - -const msgUserAuthGSSAPIMIC = 66 - -type userAuthGSSAPIMIC struct { - MIC []byte `sshtype:"66"` -} - -// See RFC 4462, section 3.9 -const msgUserAuthGSSAPIErrTok = 64 - -type userAuthGSSAPIErrTok struct { - ErrorToken []byte `sshtype:"64"` -} - -// See RFC 4462, section 3.8 -const msgUserAuthGSSAPIError = 65 - -type userAuthGSSAPIError struct { - MajorStatus uint32 `sshtype:"65"` - MinorStatus uint32 - Message string - LanguageTag string -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgExtInfo: - msg = new(extInfoMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - case msgUserAuthGSSAPIToken: - msg = new(userAuthGSSAPIToken) - case msgUserAuthGSSAPIMIC: - msg = new(userAuthGSSAPIMIC) - case msgUserAuthGSSAPIErrTok: - msg = new(userAuthGSSAPIErrTok) - case msgUserAuthGSSAPIError: - msg = new(userAuthGSSAPIError) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -var packetTypeNames = map[byte]string{ - msgDisconnect: "disconnectMsg", - msgServiceRequest: "serviceRequestMsg", - msgServiceAccept: "serviceAcceptMsg", - msgExtInfo: "extInfoMsg", - msgKexInit: "kexInitMsg", - msgKexDHInit: "kexDHInitMsg", - msgKexDHReply: "kexDHReplyMsg", - msgUserAuthRequest: "userAuthRequestMsg", - msgUserAuthSuccess: "userAuthSuccessMsg", - msgUserAuthFailure: "userAuthFailureMsg", - msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", - msgGlobalRequest: "globalRequestMsg", - msgRequestSuccess: "globalRequestSuccessMsg", - msgRequestFailure: "globalRequestFailureMsg", - msgChannelOpen: "channelOpenMsg", - msgChannelData: "channelDataMsg", - msgChannelOpenConfirm: "channelOpenConfirmMsg", - msgChannelOpenFailure: "channelOpenFailureMsg", - msgChannelWindowAdjust: "windowAdjustMsg", - msgChannelEOF: "channelEOFMsg", - msgChannelClose: "channelCloseMsg", - msgChannelRequest: "channelRequestMsg", - msgChannelSuccess: "channelRequestSuccessMsg", - msgChannelFailure: "channelRequestFailureMsg", -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/mux.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index 9654c01869..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return m.handleUnknownChannelPacket(id, packet) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} - -func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - // RFC 4254 section 5.4 says unrecognized channel requests should - // receive a failure response. - case *channelRequestMsg: - if msg.WantReply { - return m.sendMessage(channelRequestFailureMsg{ - PeersID: msg.PeersID, - }) - } - return nil - default: - return fmt.Errorf("ssh: invalid channel %d", id) - } -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/server.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index 70045bdfd8..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,752 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -type GSSAPIWithMICConfig struct { - // AllowLogin, must be set, is called when gssapi-with-mic - // authentication is selected (RFC 4462 section 3). The srcName is from the - // results of the GSS-API authentication. The format is username@DOMAIN. - // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. - // This callback is called after the user identity is established with GSSAPI to decide if the user can login with - // which permissions. If the user is allowed to login, it should return a nil error. - AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) - - // Server must be set. It's the implementation - // of the GSSAPIServer interface. See GSSAPIServer interface for details. - Server GSSAPIServer -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string - - // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used - // when gssapi-with-mic authentication is selected (RFC 4462 section 3). - GSSAPIWithMICConfig *GSSAPIWithMICConfig -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same public key format, it is replaced. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -// -// The returned error may be of type *ServerAuthError for -// authentication errors. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - // Check if the config contains any unsupported key exchanges - for _, kex := range fullConf.KeyExchanges { - if _, ok := serverForbiddenKexAlgos[kex]; ok { - return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) - } - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. algo is the negotiate -// algorithm and may be a certificate type. -func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { - sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && - config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || - config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, - sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { - gssAPIServer := gssapiConfig.Server - defer gssAPIServer.DeleteSecContext() - var srcName string - for { - var ( - outToken []byte - needContinue bool - ) - outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) - if err != nil { - return err, nil, nil - } - if len(outToken) != 0 { - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: outToken, - })); err != nil { - return nil, nil, err - } - } - if !needContinue { - break - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, nil, err - } - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} - if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { - return nil, nil, err - } - mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) - if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { - return err, nil, nil - } - perms, authErr = gssapiConfig.AllowLogin(s, srcName) - return authErr, perms, nil -} - -// ServerAuthError represents server authentication errors and is -// sometimes returned by NewServerConn. It appends any authentication -// errors that may occur, and is returned if all of the authentication -// methods provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. The first entry is typically ErrNoAuth. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -// ErrNoAuth is the error value returned if no -// authentication method has been passed yet. This happens as a normal -// part of the authentication loop, since the client first tries -// 'none' authentication to discover available methods. -// It is returned in ServerAuthError.Errors from NewServerConn. -var ErrNoAuth = errors.New("ssh: no auth passed yet") - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := ErrNoAuth - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configured") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) - break - } - if underlyingAlgo(algo) != sig.Format { - authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) - break - } - - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { - authErr = errors.New("ssh: gssapi-with-mic auth not configured") - break - } - gssapiConfig := config.GSSAPIWithMICConfig - userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) - if err != nil { - return nil, parseError(msgUserAuthRequest) - } - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. - if userAuthRequestGSSAPI.N == 0 { - authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") - break - } - var i uint32 - present := false - for i = 0; i < userAuthRequestGSSAPI.N; i++ { - if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { - present = true - break - } - } - if !present { - authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") - break - } - // Initial server response, see RFC 4462 section 3.3. - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ - SupportMech: krb5OID, - })); err != nil { - return nil, err - } - // Exchange token, see RFC 4462 section 3.4. - packet, err := s.transport.readPacket() - if err != nil { - return nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, err - } - authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, - userAuthReq) - if err != nil { - return nil, err - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attempts, don't bother sending the - // final SSH_MSG_USERAUTH_FAILURE message, since there are - // no more authentication methods which can be attempted, - // and this message may cause the client to re-attempt - // authentication while we send the disconnect message. - // Continue, and trigger the disconnect at the start of - // the loop. - // - // The SSH specification is somewhat confusing about this, - // RFC 4252 Section 5.1 requires each authentication failure - // be responded to with a respective SSH_MSG_USERAUTH_FAILURE - // message, but Section 4 says the server should disconnect - // after some number of attempts, but it isn't explicit which - // message should take precedence (i.e. should there be a failure - // message than a disconnect message, or if we are going to - // disconnect, should we only send that message.) - // - // Either way, OpenSSH disconnects immediately after the last - // failed authnetication attempt, and given they are typically - // considered the golden implementation it seems reasonable - // to match that behavior. - continue - } - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { - failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Name: name, - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/session.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index eca31a22d5..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,648 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - IUTF8 = 42 // RFC 8160 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/ssh_gss.go deleted file mode 100644 index 24bd7c8e83..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/asn1" - "errors" -) - -var krb5OID []byte - -func init() { - krb5OID, _ = asn1.Marshal(krb5Mesh) -} - -// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. -type GSSAPIClient interface { - // InitSecContext initiates the establishment of a security context for GSS-API between the - // ssh client and ssh server. Initially the token parameter should be specified as nil. - // The routine may return a outputToken which should be transferred to - // the ssh server, where the ssh server will present it to - // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting - // needContinue to false. To complete the context - // establishment, one or more reply tokens may be required from the ssh - // server;if so, InitSecContext will return a needContinue which is true. - // In this case, InitSecContext should be called again when the - // reply token is received from the ssh server, passing the reply - // token to InitSecContext via the token parameters. - // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. - InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) - // GetMIC generates a cryptographic MIC for the SSH2 message, and places - // the MIC in a token for transfer to the ssh server. - // The contents of the MIC field are obtained by calling GSS_GetMIC() - // over the following, using the GSS-API context that was just - // established: - // string session identifier - // byte SSH_MSG_USERAUTH_REQUEST - // string user name - // string service - // string "gssapi-with-mic" - // See RFC 2743 section 2.3.1 and RFC 4462 3.5. - GetMIC(micFiled []byte) ([]byte, error) - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. -type GSSAPIServer interface { - // AcceptSecContext allows a remotely initiated security context between the application - // and a remote peer to be established by the ssh client. The routine may return a - // outputToken which should be transferred to the ssh client, - // where the ssh client will present it to InitSecContext. - // If no token need be sent, AcceptSecContext will indicate this - // by setting the needContinue to false. To - // complete the context establishment, one or more reply tokens may be - // required from the ssh client. if so, AcceptSecContext - // will return a needContinue which is true, in which case it - // should be called again when the reply token is received from the ssh - // client, passing the token to AcceptSecContext via the - // token parameters. - // The srcName return value is the authenticated username. - // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. - AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) - // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, - // fits the supplied message is received from the ssh client. - // See RFC 2743 section 2.3.2. - VerifyMIC(micField []byte, micToken []byte) error - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -var ( - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, - // so we also support the krb5 mechanism only. - // See RFC 1964 section 1. - krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} -) - -// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST -// See RFC 4462 section 3.2. -type userAuthRequestGSSAPI struct { - N uint32 - OIDS []asn1.ObjectIdentifier -} - -func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { - n, rest, ok := parseUint32(payload) - if !ok { - return nil, errors.New("parse uint32 failed") - } - s := &userAuthRequestGSSAPI{ - N: n, - OIDS: make([]asn1.ObjectIdentifier, n), - } - for i := 0; i < int(n); i++ { - var ( - desiredMech []byte - err error - ) - desiredMech, rest, ok = parseString(rest) - if !ok { - return nil, errors.New("parse string failed") - } - if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { - return nil, err - } - - } - return s, nil -} - -// See RFC 4462 section 3.6. -func buildMIC(sessionID string, username string, service string, authMethod string) []byte { - out := make([]byte, 0, 0) - out = appendString(out, sessionID) - out = append(out, msgUserAuthRequest) - out = appendString(out, username) - out = appendString(out, service) - out = appendString(out, authMethod) - return out -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/streamlocal.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index b171b330bc..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,116 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/tcpip.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 80d35f5ec1..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// handleForwards starts goroutines handling forwarded connections. -// It's called on first use by (*Client).ListenTCP to not launch -// goroutines until needed. -func (c *Client) handleForwards() { - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/transport.go b/terraform/providers/google/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index acf5a21bbb..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writeCipherPacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readCipherPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - cipherMode := cipherModes[algs.Cipher] - - iv := make([]byte, cipherMode.ivSize) - key := make([]byte, cipherMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - - var macKey []byte - if !aeadCiphers[algs.Cipher] { - macMode := macModes[algs.MAC] - macKey = make([]byte, macMode.keySize) - generateKeyMaterial(macKey, d.macKeyTag, kex) - } - - return cipherModes[algs.Cipher].create(key, iv, macKey, algs) -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/pipe.go b/terraform/providers/google/vendor/golang.org/x/net/http2/pipe.go index c15b8a7719..684d984fd9 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/http2/pipe.go +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/pipe.go @@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { p.c.L = &p.mu } defer p.c.Signal() - if p.err != nil { + if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } - if p.breakErr != nil { - p.unread += len(d) - return len(d), nil // discard when there is no reader - } return p.b.Write(d) } diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/server.go b/terraform/providers/google/vendor/golang.org/x/net/http2/server.go index 8cb14f3c97..033b6e6db6 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/http2/server.go +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/server.go @@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewPriorityWriteScheduler(nil) + sc.writeSched = newRoundRobinWriteScheduler() } // These start at the RFC-specified defaults. If there is a higher @@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error { } if len(data) > 0 { + st.bodyBytes += int64(len(data)) wrote, err := st.body.Write(data) if err != nil { + // The handler has closed the request body. + // Return the connection-level flow control for the discarded data, + // but not the stream-level flow control. sc.sendWindowUpdate(nil, int(f.Length)-wrote) - return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) + return nil } if wrote != len(data) { panic("internal error: bad Writer") } - st.bodyBytes += int64(len(data)) } // Return any padded flow control now, since we won't @@ -2426,7 +2429,7 @@ type requestBody struct { conn *serverConn closeOnce sync.Once // for use by Close only sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body + pipe *pipe // non-nil if we have an HTTP entity message body needsContinue bool // need to send a 100-continue } @@ -2566,7 +2569,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = "" } } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + _, hasContentLength := rws.snapHeader["Content-Length"] + if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] @@ -2771,7 +2775,7 @@ func (w *responseWriter) FlushError() error { err = rws.bw.Flush() } else { // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it + // (writeChunk with zero bytes), so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. _, err = chunkWriter{rws}.Write(nil) diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/transport.go b/terraform/providers/google/vendor/golang.org/x/net/http2/transport.go index 05ba23d3d9..b9632380e7 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/http2/transport.go +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/transport.go @@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { + roundTripErr := err if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue } backoff := float64(uint(1) << (uint(retry) - 1)) @@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res timer := backoffNewTimer(d) select { case <-timer.C: - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): timer.Stop() @@ -1265,6 +1266,29 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return res, nil } + cancelRequest := func(cs *clientStream, err error) error { + cs.cc.mu.Lock() + bodyClosed := cs.reqBodyClosed + cs.cc.mu.Unlock() + // Wait for the request body to be closed. + // + // If nothing closed the body before now, abortStreamLocked + // will have started a goroutine to close it. + // + // Closing the body before returning avoids a race condition + // with net/http checking its readTrackingBody to see if the + // body was read from or closed. See golang/go#60041. + // + // The body is closed in a separate goroutine without the + // connection mutex held, but dropping the mutex before waiting + // will keep us from holding it indefinitely if the body + // close is slow for some reason. + if bodyClosed != nil { + <-bodyClosed + } + return err + } + for { select { case <-cs.respHeaderRecv: @@ -1284,10 +1308,10 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { case <-ctx.Done(): err := ctx.Err() cs.abortStream(err) - return nil, err + return nil, cancelRequest(cs, err) case <-cs.reqCancel: cs.abortStream(errRequestCanceled) - return nil, errRequestCanceled + return nil, cancelRequest(cs, errRequestCanceled) } } } @@ -1844,6 +1868,9 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if err != nil { return nil, err } + if !httpguts.ValidHostHeader(host) { + return nil, errors.New("http2: invalid Host header") + } var path string if req.Method != "CONNECT" { @@ -1880,7 +1907,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // 8.1.2.3 Request Pseudo-Header Fields // The :path pseudo-header field includes the path and query parts of the // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of + // followed by the query production, see Sections 3.3 and 3.4 of // [RFC3986]). f(":authority", host) m := req.Method @@ -2555,6 +2582,9 @@ func (b transportResponseBody) Close() error { cs := b.cs cc := cs.cc + cs.bufPipe.BreakWithError(errClosedResponseBody) + cs.abortStream(errClosedResponseBody) + unread := cs.bufPipe.Len() if unread > 0 { cc.mu.Lock() @@ -2573,9 +2603,6 @@ func (b transportResponseBody) Close() error { cc.wmu.Unlock() } - cs.bufPipe.BreakWithError(errClosedResponseBody) - cs.abortStream(errClosedResponseBody) - select { case <-cs.donec: case <-cs.ctx.Done(): diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/writesched.go b/terraform/providers/google/vendor/golang.org/x/net/http2/writesched.go index c7cd001739..cc893adc29 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/http2/writesched.go +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/writesched.go @@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { // writeQueue is used by implementations of WriteScheduler. type writeQueue struct { - s []FrameWriteRequest + s []FrameWriteRequest + prev, next *writeQueue } func (q *writeQueue) empty() bool { return len(q.s) == 0 } diff --git a/terraform/providers/google/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/terraform/providers/google/vendor/golang.org/x/net/http2/writesched_roundrobin.go new file mode 100644 index 0000000000..54fe86322d --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -0,0 +1,119 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type roundRobinWriteScheduler struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // streams maps stream ID to a queue. + streams map[uint32]*writeQueue + + // stream queues are stored in a circular linked list. + // head is the next stream to write, or nil if there are no streams open. + head *writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +// newRoundRobinWriteScheduler constructs a new write scheduler. +// The round robin scheduler priorizes control frames +// like SETTINGS and PING over DATA frames. +// When there are no control frames to send, it performs a round-robin +// selection from the ready streams. +func newRoundRobinWriteScheduler() WriteScheduler { + ws := &roundRobinWriteScheduler{ + streams: make(map[uint32]*writeQueue), + } + return ws +} + +func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + if ws.streams[streamID] != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = q + if ws.head == nil { + ws.head = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.head.prev + q.next = ws.head + q.prev.next = q + q.next.prev = q + } +} + +func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) { + q := ws.streams[streamID] + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.head = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.head == q { + ws.head = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {} + +func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()] + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + if ws.head == nil { + return FrameWriteRequest{}, false + } + q := ws.head + for { + if wr, ok := q.consume(math.MaxInt32); ok { + ws.head = q.next + return wr, true + } + q = q.next + if q == ws.head { + break + } + } + return FrameWriteRequest{}, false +} diff --git a/terraform/providers/google/vendor/golang.org/x/net/idna/idna9.0.0.go b/terraform/providers/google/vendor/golang.org/x/net/idna/idna9.0.0.go index aae6aac872..ee1698cefb 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/terraform/providers/google/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -121,7 +121,7 @@ func CheckJoiners(enable bool) Option { } } -// StrictDomainName limits the set of permissable ASCII characters to those +// StrictDomainName limits the set of permissible ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the // hyphen). This is set by default for MapForLookup and ValidateForRegistration, // but is only useful if ValidateLabels is set. diff --git a/terraform/providers/google/vendor/golang.org/x/net/idna/tables13.0.0.go b/terraform/providers/google/vendor/golang.org/x/net/idna/tables13.0.0.go index 390c5e56d2..66701eadfb 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/terraform/providers/google/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,151 +1,294 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -//go:build go1.16 -// +build go1.16 +//go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package idna // UnicodeVersion is the Unicode version from which the tables in this package are derived. const UnicodeVersion = "13.0.0" -var mappings string = "" + // Size: 8188 bytes - "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + - "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + - "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + - "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + - "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + - "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + - "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + - "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + - "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + - "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + - "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + - "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + - "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + - "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + - "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + - "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + - "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + - "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + - "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + - "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + - "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + - "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + - "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + - "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + - "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + - "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + - ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + - "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + - "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + - "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + - "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + - "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + - "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + - "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + - "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + - "月\x0511月\x0512月\x02hg\x02ev\x06令和\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニ" + - "ング\x09インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー" + - "\x09ガロン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0f" + - "キロワット\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル" + - "\x0fサンチーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット" + - "\x09ハイツ\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0c" + - "フィート\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ" + - "\x0cポイント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク" + - "\x0fマンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09" + - "ユアン\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x04" + - "2点\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + - "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + - "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + - "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + - "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + - "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + - "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + - "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + - "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + - "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + - "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + - "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x02ʍ\x04𤋮\x04𢡊\x04𢡄\x04𣏕" + - "\x04𥉉\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ" + - "\x04יִ\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּ" + - "ׂ\x04אַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04" + - "ךּ\x04כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ" + - "\x04תּ\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ" + - "\x02ڤ\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ" + - "\x02ڳ\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ" + - "\x02ۅ\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02" + - "ی\x04ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04" + - "تح\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج" + - "\x04حم\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح" + - "\x04ضخ\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ" + - "\x04فم\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل" + - "\x04كم\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ" + - "\x04مم\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى" + - "\x04هي\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 " + - "ٍّ\x05 َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04ت" + - "ر\x04تز\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04" + - "ين\x04ئخ\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه" + - "\x04شم\x04شه\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي" + - "\x04سى\x04سي\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي" + - "\x04ضى\x04ضي\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06ت" + - "حج\x06تحم\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سج" + - "ح\x06سجى\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم" + - "\x06ضحى\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي" + - "\x06غمى\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح" + - "\x06محج\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم" + - "\x06نحم\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى" + - "\x06تخي\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي" + - "\x06ضحي\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي" + - "\x06كمي\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي" + - "\x06سخي\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08" + - "عليه\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:" + - "\x01!\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\" + - "\x01$\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ" + - "\x02إ\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز" + - "\x02س\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن" + - "\x02ه\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~" + - "\x02¢\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲" + - "\x08𝆹𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η" + - "\x02κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ" + - "\x02ڡ\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029," + - "\x03(a)\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)" + - "\x03(k)\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)" + - "\x03(u)\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03p" + - "pv\x02wc\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ" + - "\x03二\x03多\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終" + - "\x03生\x03販\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指" + - "\x03走\x03打\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔" + - "三〕\x09〔二〕\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03" + - "丸\x03乁\x03你\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03" + - "具\x03㒹\x03內\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03" + - "㔕\x03勇\x03勉\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03" + - "灰\x03及\x03叟\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03" + - "啣\x03善\x03喙\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03" + - "埴\x03堍\x03型\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03" + - "姘\x03婦\x03㛮\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03" + - "屮\x03峀\x03岍\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03" + - "㡢\x03㡼\x03庰\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03" + - "忍\x03志\x03忹\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03" + - "憤\x03憯\x03懞\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03" + - "掃\x03揤\x03搢\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03" + - "書\x03晉\x03㬙\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03" + - "朡\x03杞\x03杓\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03" + - "槪\x03檨\x03櫛\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03" + - "汧\x03洖\x03派\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03" + - "淹\x03潮\x03濆\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03" + - "爵\x03牐\x03犀\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03" + - "㼛\x03甤\x03甾\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03" + - "䂖\x03硎\x03碌\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03" + - "築\x03䈧\x03糒\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03" + - "罺\x03羕\x03翺\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03" + - "䑫\x03芑\x03芋\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03" + - "莽\x03菧\x03著\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03" + - "䕫\x03虐\x03虜\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03" + - "蠁\x03䗹\x03衠\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03" + - "豕\x03貫\x03賁\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03" + - "鈸\x03鋗\x03鋘\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03" + - "䩶\x03韠\x03䪲\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03" + - "鳽\x03䳎\x03䳭\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" +var mappings string = "" + // Size: 6539 bytes + " ̈a ̄23 ́ ̧1o1⁄41⁄23⁄4i̇l·ʼnsdžⱥⱦhjrwy ̆ ̇ ̊ ̨ ̃ ̋lẍ́ ι; ̈́եւاٴوٴۇٴيٴक" + + "़ख़ग़ज़ड़ढ़फ़य़ড়ঢ়য়ਲ਼ਸ਼ਖ਼ਗ਼ਜ਼ਫ਼ଡ଼ଢ଼ําໍາຫນຫມགྷཌྷདྷབྷཛྷཀྵཱཱིུྲྀྲཱྀླྀླཱ" + + "ཱྀྀྒྷྜྷྡྷྦྷྫྷྐྵвдостъѣæbdeǝgikmnȣptuɐɑəɛɜŋɔɯvβγδφχρнɒcɕðfɟɡɥɨɩɪʝɭʟɱɰɲɳ" + + "ɴɵɸʂʃƫʉʊʋʌzʐʑʒθssάέήίόύώἀιἁιἂιἃιἄιἅιἆιἇιἠιἡιἢιἣιἤιἥιἦιἧιὠιὡιὢιὣιὤιὥιὦιὧ" + + "ιὰιαιάιᾶιι ̈͂ὴιηιήιῆι ̓̀ ̓́ ̓͂ΐ ̔̀ ̔́ ̔͂ΰ ̈̀`ὼιωιώιῶι′′′′′‵‵‵‵‵!!???!!?" + + "′′′′0456789+=()rsħnoqsmtmωåאבגדπ1⁄71⁄91⁄101⁄32⁄31⁄52⁄53⁄54⁄51⁄65⁄61⁄83" + + "⁄85⁄87⁄81⁄iiivviviiiixxi0⁄3∫∫∫∫∫∮∮∮∮∮1011121314151617181920(10)(11)(12" + + ")(13)(14)(15)(16)(17)(18)(19)(20)∫∫∫∫==⫝̸ɫɽȿɀ. ゙ ゚よりコト(ᄀ)(ᄂ)(ᄃ)(ᄅ)(ᄆ)(ᄇ)" + + "(ᄉ)(ᄋ)(ᄌ)(ᄎ)(ᄏ)(ᄐ)(ᄑ)(ᄒ)(가)(나)(다)(라)(마)(바)(사)(아)(자)(차)(카)(타)(파)(하)(주)(오전" + + ")(오후)(一)(二)(三)(四)(五)(六)(七)(八)(九)(十)(月)(火)(水)(木)(金)(土)(日)(株)(有)(社)(名)(特)(" + + "財)(祝)(労)(代)(呼)(学)(監)(企)(資)(協)(祭)(休)(自)(至)21222324252627282930313233343" + + "5참고주의3637383940414243444546474849501月2月3月4月5月6月7月8月9月10月11月12月hgev令和アパート" + + "アルファアンペアアールイニングインチウォンエスクードエーカーオンスオームカイリカラットカロリーガロンガンマギガギニーキュリーギルダーキロキロ" + + "グラムキロメートルキロワットグラムグラムトンクルゼイロクローネケースコルナコーポサイクルサンチームシリングセンチセントダースデシドルトンナノ" + + "ノットハイツパーセントパーツバーレルピアストルピクルピコビルファラッドフィートブッシェルフランヘクタールペソペニヒヘルツペンスページベータポ" + + "イントボルトホンポンドホールホーンマイクロマイルマッハマルクマンションミクロンミリミリバールメガメガトンメートルヤードヤールユアンリットルリ" + + "ラルピールーブルレムレントゲンワット0点1点2点3点4点5点6点7点8点9点10点11点12点13点14点15点16点17点18点19点20" + + "点21点22点23点24点daauovpcdmiu平成昭和大正明治株式会社panamakakbmbgbkcalpfnfmgkghzmldlk" + + "lfmnmmmcmkmm2m3m∕sm∕s2rad∕srad∕s2psnsmspvnvmvkvpwnwmwkwbqcccdc∕kgdbgyhah" + + "pinkkktlmlnlxphprsrsvwbv∕ma∕m1日2日3日4日5日6日7日8日9日10日11日12日13日14日15日16日17日1" + + "8日19日20日21日22日23日24日25日26日27日28日29日30日31日ьɦɬʞʇœʍ𤋮𢡊𢡄𣏕𥉉𥳐𧻓fffiflstմնմեմիվնմ" + + "խיִײַעהכלםרתשׁשׂשּׁשּׂאַאָאּבּגּדּהּוּזּטּיּךּכּלּמּנּסּףּפּצּקּרּשּתּו" + + "ֹבֿכֿפֿאלٱٻپڀٺٿٹڤڦڄڃچڇڍڌڎڈژڑکگڳڱںڻۀہھےۓڭۇۆۈۋۅۉېىئائەئوئۇئۆئۈئېئىیئجئحئم" + + "ئيبجبحبخبمبىبيتجتحتختمتىتيثجثمثىثيجحجمحجحمخجخحخمسجسحسخسمصحصمضجضحضخضمطحط" + + "مظمعجعمغجغمفجفحفخفمفىفيقحقمقىقيكاكجكحكخكلكمكىكيلجلحلخلملىليمجمحمخمممىمي" + + "نجنحنخنمنىنيهجهمهىهييجيحيخيميىييذٰرٰىٰ ٌّ ٍّ َّ ُّ ِّ ّٰئرئزئنبربزبنترت" + + "زتنثرثزثنمانرنزننيريزينئخئهبهتهصخلهنههٰيهثهسهشمشهـَّـُّـِّطىطيعىعيغىغيس" + + "ىسيشىشيحىحيجىجيخىخيصىصيضىضيشجشحشخشرسرصرضراًتجمتحجتحمتخمتمجتمحتمخجمححميح" + + "مىسحجسجحسجىسمحسمجسممصححصممشحمشجيشمخشممضحىضخمطمحطممطميعجمعممعمىغممغميغمى" + + "فخمقمحقمملحملحيلحىلججلخملمحمحجمحممحيمجحمجممخجمخممجخهمجهممنحمنحىنجمنجىنم" + + "ينمىيممبخيتجيتجىتخيتخىتميتمىجميجحىجمىسخىصحيشحيضحيلجيلمييحييجييميمميقمين" + + "حيعميكمينجحمخيلجمكممجحيحجيمجيفميبحيسخينجيصلےقلےاللهاكبرمحمدصلعمرسولعليه" + + "وسلمصلىصلى الله عليه وسلمجل جلالهریال,:!?_{}[]#&*-<>\\$%@ـًـَـُـِـّـْءآ" + + "أؤإئابةتثجحخدذرزسشصضطظعغفقكلمنهويلآلألإلا\x22'/^|~¢£¬¦¥𝅗𝅥𝅘𝅥𝅘𝅥𝅮𝅘𝅥𝅯𝅘𝅥𝅰𝅘𝅥𝅱" + + "𝅘𝅥𝅲𝆹𝅥𝆺𝅥𝆹𝅥𝅮𝆺𝅥𝅮𝆹𝅥𝅯𝆺𝅥𝅯ıȷαεζηκλμνξοστυψ∇∂ϝٮڡٯ0,1,2,3,4,5,6,7,8,9,(a)(b)(c" + + ")(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)(r)(s)(t)(u)(v)(w)(x)(y)(z)〔s" + + "〕wzhvsdppvwcmcmdmrdjほかココサ手字双デ二多解天交映無料前後再新初終生販声吹演投捕一三遊左中右指走打禁空合満有月申割営配〔" + + "本〕〔三〕〔二〕〔安〕〔点〕〔打〕〔盗〕〔勝〕〔敗〕得可丽丸乁你侮侻倂偺備僧像㒞免兔兤具㒹內冗冤仌冬况凵刃㓟刻剆剷㔕勇勉勤勺包匆北卉卑博即卽" + + "卿灰及叟叫叱吆咞吸呈周咢哶唐啓啣善喙喫喳嗂圖嘆圗噑噴切壮城埴堍型堲報墬売壷夆夢奢姬娛娧姘婦㛮嬈嬾寃寘寧寳寿将尢㞁屠屮峀岍嵃嵮嵫嵼巡巢㠯巽帨帽" + + "幩㡢㡼庰庳庶廊廾舁弢㣇形彫㣣徚忍志忹悁㤺㤜悔惇慈慌慎慺憎憲憤憯懞懲懶成戛扝抱拔捐挽拼捨掃揤搢揅掩㨮摩摾撝摷㩬敏敬旣書晉㬙暑㬈㫤冒冕最暜肭䏙朗" + + "望朡杞杓㭉柺枅桒梅梎栟椔㮝楂榣槪檨櫛㰘次歔㱎歲殟殺殻汎沿泍汧洖派海流浩浸涅洴港湮㴳滋滇淹潮濆瀹瀞瀛㶖灊災灷炭煅熜爨爵牐犀犕獺王㺬玥㺸瑇瑜瑱璅" + + "瓊㼛甤甾異瘐㿼䀈直眞真睊䀹瞋䁆䂖硎碌磌䃣祖福秫䄯穀穊穏䈂篆築䈧糒䊠糨糣紀絣䌁緇縂繅䌴䍙罺羕翺者聠聰䏕育脃䐋脾媵舄辞䑫芑芋芝劳花芳芽苦若茝荣莭" + + "茣莽菧著荓菊菌菜䔫蓱蓳蔖蕤䕝䕡䕫虐虜虧虩蚩蚈蜎蛢蝹蜨蝫螆蟡蠁䗹衠衣裗裞䘵裺㒻䚾䛇誠諭變豕貫賁贛起跋趼跰軔輸邔郱鄑鄛鈸鋗鋘鉼鏹鐕開䦕閷䧦雃嶲霣" + + "䩮䩶韠䪲頋頩飢䬳餩馧駂駾䯎鬒鱀鳽䳎䳭鵧䳸麻䵖黹黾鼅鼏鼖鼻" + +var mappingIndex = []uint16{ // 1650 elements + // Entry 0 - 3F + 0x0000, 0x0000, 0x0001, 0x0004, 0x0005, 0x0008, 0x0009, 0x000a, + 0x000d, 0x0010, 0x0011, 0x0012, 0x0017, 0x001c, 0x0021, 0x0024, + 0x0027, 0x002a, 0x002b, 0x002e, 0x0031, 0x0034, 0x0035, 0x0036, + 0x0037, 0x0038, 0x0039, 0x003c, 0x003f, 0x0042, 0x0045, 0x0048, + 0x004b, 0x004c, 0x004d, 0x0051, 0x0054, 0x0055, 0x005a, 0x005e, + 0x0062, 0x0066, 0x006a, 0x006e, 0x0074, 0x007a, 0x0080, 0x0086, + 0x008c, 0x0092, 0x0098, 0x009e, 0x00a4, 0x00aa, 0x00b0, 0x00b6, + 0x00bc, 0x00c2, 0x00c8, 0x00ce, 0x00d4, 0x00da, 0x00e0, 0x00e6, + // Entry 40 - 7F + 0x00ec, 0x00f2, 0x00f8, 0x00fe, 0x0104, 0x010a, 0x0110, 0x0116, + 0x011c, 0x0122, 0x0128, 0x012e, 0x0137, 0x013d, 0x0146, 0x014c, + 0x0152, 0x0158, 0x015e, 0x0164, 0x016a, 0x0170, 0x0172, 0x0174, + 0x0176, 0x0178, 0x017a, 0x017c, 0x017e, 0x0180, 0x0181, 0x0182, + 0x0183, 0x0185, 0x0186, 0x0187, 0x0188, 0x0189, 0x018a, 0x018c, + 0x018d, 0x018e, 0x018f, 0x0191, 0x0193, 0x0195, 0x0197, 0x0199, + 0x019b, 0x019d, 0x019f, 0x01a0, 0x01a2, 0x01a4, 0x01a6, 0x01a8, + 0x01aa, 0x01ac, 0x01ae, 0x01b0, 0x01b1, 0x01b3, 0x01b5, 0x01b6, + // Entry 80 - BF + 0x01b8, 0x01ba, 0x01bc, 0x01be, 0x01c0, 0x01c2, 0x01c4, 0x01c6, + 0x01c8, 0x01ca, 0x01cc, 0x01ce, 0x01d0, 0x01d2, 0x01d4, 0x01d6, + 0x01d8, 0x01da, 0x01dc, 0x01de, 0x01e0, 0x01e2, 0x01e4, 0x01e5, + 0x01e7, 0x01e9, 0x01eb, 0x01ed, 0x01ef, 0x01f1, 0x01f3, 0x01f5, + 0x01f7, 0x01f9, 0x01fb, 0x01fd, 0x0202, 0x0207, 0x020c, 0x0211, + 0x0216, 0x021b, 0x0220, 0x0225, 0x022a, 0x022f, 0x0234, 0x0239, + 0x023e, 0x0243, 0x0248, 0x024d, 0x0252, 0x0257, 0x025c, 0x0261, + 0x0266, 0x026b, 0x0270, 0x0275, 0x027a, 0x027e, 0x0282, 0x0287, + // Entry C0 - FF + 0x0289, 0x028e, 0x0293, 0x0297, 0x029b, 0x02a0, 0x02a5, 0x02aa, + 0x02af, 0x02b1, 0x02b6, 0x02bb, 0x02c0, 0x02c2, 0x02c7, 0x02c8, + 0x02cd, 0x02d1, 0x02d5, 0x02da, 0x02e0, 0x02e9, 0x02ef, 0x02f8, + 0x02fa, 0x02fc, 0x02fe, 0x0300, 0x030c, 0x030d, 0x030e, 0x030f, + 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, + 0x0319, 0x031b, 0x031d, 0x031e, 0x0320, 0x0322, 0x0324, 0x0326, + 0x0328, 0x032a, 0x032c, 0x032e, 0x0330, 0x0335, 0x033a, 0x0340, + 0x0345, 0x034a, 0x034f, 0x0354, 0x0359, 0x035e, 0x0363, 0x0368, + // Entry 100 - 13F + 0x036d, 0x0372, 0x0377, 0x037c, 0x0380, 0x0382, 0x0384, 0x0386, + 0x038a, 0x038c, 0x038e, 0x0393, 0x0399, 0x03a2, 0x03a8, 0x03b1, + 0x03b3, 0x03b5, 0x03b7, 0x03b9, 0x03bb, 0x03bd, 0x03bf, 0x03c1, + 0x03c3, 0x03c5, 0x03c7, 0x03cb, 0x03cf, 0x03d3, 0x03d7, 0x03db, + 0x03df, 0x03e3, 0x03e7, 0x03eb, 0x03ef, 0x03f3, 0x03ff, 0x0401, + 0x0406, 0x0408, 0x040a, 0x040c, 0x040e, 0x040f, 0x0413, 0x0417, + 0x041d, 0x0423, 0x0428, 0x042d, 0x0432, 0x0437, 0x043c, 0x0441, + 0x0446, 0x044b, 0x0450, 0x0455, 0x045a, 0x045f, 0x0464, 0x0469, + // Entry 140 - 17F + 0x046e, 0x0473, 0x0478, 0x047d, 0x0482, 0x0487, 0x048c, 0x0491, + 0x0496, 0x049b, 0x04a0, 0x04a5, 0x04aa, 0x04af, 0x04b4, 0x04bc, + 0x04c4, 0x04c9, 0x04ce, 0x04d3, 0x04d8, 0x04dd, 0x04e2, 0x04e7, + 0x04ec, 0x04f1, 0x04f6, 0x04fb, 0x0500, 0x0505, 0x050a, 0x050f, + 0x0514, 0x0519, 0x051e, 0x0523, 0x0528, 0x052d, 0x0532, 0x0537, + 0x053c, 0x0541, 0x0546, 0x054b, 0x0550, 0x0555, 0x055a, 0x055f, + 0x0564, 0x0569, 0x056e, 0x0573, 0x0578, 0x057a, 0x057c, 0x057e, + 0x0580, 0x0582, 0x0584, 0x0586, 0x0588, 0x058a, 0x058c, 0x058e, + // Entry 180 - 1BF + 0x0590, 0x0592, 0x0594, 0x0596, 0x059c, 0x05a2, 0x05a4, 0x05a6, + 0x05a8, 0x05aa, 0x05ac, 0x05ae, 0x05b0, 0x05b2, 0x05b4, 0x05b6, + 0x05b8, 0x05ba, 0x05bc, 0x05be, 0x05c0, 0x05c4, 0x05c8, 0x05cc, + 0x05d0, 0x05d4, 0x05d8, 0x05dc, 0x05e0, 0x05e4, 0x05e9, 0x05ee, + 0x05f3, 0x05f5, 0x05f7, 0x05fd, 0x0609, 0x0615, 0x0621, 0x062a, + 0x0636, 0x063f, 0x0648, 0x0657, 0x0663, 0x066c, 0x0675, 0x067e, + 0x068a, 0x0696, 0x069f, 0x06a8, 0x06ae, 0x06b7, 0x06c3, 0x06cf, + 0x06d5, 0x06e4, 0x06f6, 0x0705, 0x070e, 0x071d, 0x072c, 0x0738, + // Entry 1C0 - 1FF + 0x0741, 0x074a, 0x0753, 0x075f, 0x076e, 0x077a, 0x0783, 0x078c, + 0x0795, 0x079b, 0x07a1, 0x07a7, 0x07ad, 0x07b6, 0x07bf, 0x07ce, + 0x07d7, 0x07e3, 0x07f2, 0x07fb, 0x0801, 0x0807, 0x0816, 0x0822, + 0x0831, 0x083a, 0x0849, 0x084f, 0x0858, 0x0861, 0x086a, 0x0873, + 0x087c, 0x0888, 0x0891, 0x0897, 0x08a0, 0x08a9, 0x08b2, 0x08be, + 0x08c7, 0x08d0, 0x08d9, 0x08e8, 0x08f4, 0x08fa, 0x0909, 0x090f, + 0x091b, 0x0927, 0x0930, 0x0939, 0x0942, 0x094e, 0x0954, 0x095d, + 0x0969, 0x096f, 0x097e, 0x0987, 0x098b, 0x098f, 0x0993, 0x0997, + // Entry 200 - 23F + 0x099b, 0x099f, 0x09a3, 0x09a7, 0x09ab, 0x09af, 0x09b4, 0x09b9, + 0x09be, 0x09c3, 0x09c8, 0x09cd, 0x09d2, 0x09d7, 0x09dc, 0x09e1, + 0x09e6, 0x09eb, 0x09f0, 0x09f5, 0x09fa, 0x09fc, 0x09fe, 0x0a00, + 0x0a02, 0x0a04, 0x0a06, 0x0a0c, 0x0a12, 0x0a18, 0x0a1e, 0x0a2a, + 0x0a2c, 0x0a2e, 0x0a30, 0x0a32, 0x0a34, 0x0a36, 0x0a38, 0x0a3c, + 0x0a3e, 0x0a40, 0x0a42, 0x0a44, 0x0a46, 0x0a48, 0x0a4a, 0x0a4c, + 0x0a4e, 0x0a50, 0x0a52, 0x0a54, 0x0a56, 0x0a58, 0x0a5a, 0x0a5f, + 0x0a65, 0x0a6c, 0x0a74, 0x0a76, 0x0a78, 0x0a7a, 0x0a7c, 0x0a7e, + // Entry 240 - 27F + 0x0a80, 0x0a82, 0x0a84, 0x0a86, 0x0a88, 0x0a8a, 0x0a8c, 0x0a8e, + 0x0a90, 0x0a96, 0x0a98, 0x0a9a, 0x0a9c, 0x0a9e, 0x0aa0, 0x0aa2, + 0x0aa4, 0x0aa6, 0x0aa8, 0x0aaa, 0x0aac, 0x0aae, 0x0ab0, 0x0ab2, + 0x0ab4, 0x0ab9, 0x0abe, 0x0ac2, 0x0ac6, 0x0aca, 0x0ace, 0x0ad2, + 0x0ad6, 0x0ada, 0x0ade, 0x0ae2, 0x0ae7, 0x0aec, 0x0af1, 0x0af6, + 0x0afb, 0x0b00, 0x0b05, 0x0b0a, 0x0b0f, 0x0b14, 0x0b19, 0x0b1e, + 0x0b23, 0x0b28, 0x0b2d, 0x0b32, 0x0b37, 0x0b3c, 0x0b41, 0x0b46, + 0x0b4b, 0x0b50, 0x0b52, 0x0b54, 0x0b56, 0x0b58, 0x0b5a, 0x0b5c, + // Entry 280 - 2BF + 0x0b5e, 0x0b62, 0x0b66, 0x0b6a, 0x0b6e, 0x0b72, 0x0b76, 0x0b7a, + 0x0b7c, 0x0b7e, 0x0b80, 0x0b82, 0x0b86, 0x0b8a, 0x0b8e, 0x0b92, + 0x0b96, 0x0b9a, 0x0b9e, 0x0ba0, 0x0ba2, 0x0ba4, 0x0ba6, 0x0ba8, + 0x0baa, 0x0bac, 0x0bb0, 0x0bb4, 0x0bba, 0x0bc0, 0x0bc4, 0x0bc8, + 0x0bcc, 0x0bd0, 0x0bd4, 0x0bd8, 0x0bdc, 0x0be0, 0x0be4, 0x0be8, + 0x0bec, 0x0bf0, 0x0bf4, 0x0bf8, 0x0bfc, 0x0c00, 0x0c04, 0x0c08, + 0x0c0c, 0x0c10, 0x0c14, 0x0c18, 0x0c1c, 0x0c20, 0x0c24, 0x0c28, + 0x0c2c, 0x0c30, 0x0c34, 0x0c36, 0x0c38, 0x0c3a, 0x0c3c, 0x0c3e, + // Entry 2C0 - 2FF + 0x0c40, 0x0c42, 0x0c44, 0x0c46, 0x0c48, 0x0c4a, 0x0c4c, 0x0c4e, + 0x0c50, 0x0c52, 0x0c54, 0x0c56, 0x0c58, 0x0c5a, 0x0c5c, 0x0c5e, + 0x0c60, 0x0c62, 0x0c64, 0x0c66, 0x0c68, 0x0c6a, 0x0c6c, 0x0c6e, + 0x0c70, 0x0c72, 0x0c74, 0x0c76, 0x0c78, 0x0c7a, 0x0c7c, 0x0c7e, + 0x0c80, 0x0c82, 0x0c86, 0x0c8a, 0x0c8e, 0x0c92, 0x0c96, 0x0c9a, + 0x0c9e, 0x0ca2, 0x0ca4, 0x0ca8, 0x0cac, 0x0cb0, 0x0cb4, 0x0cb8, + 0x0cbc, 0x0cc0, 0x0cc4, 0x0cc8, 0x0ccc, 0x0cd0, 0x0cd4, 0x0cd8, + 0x0cdc, 0x0ce0, 0x0ce4, 0x0ce8, 0x0cec, 0x0cf0, 0x0cf4, 0x0cf8, + // Entry 300 - 33F + 0x0cfc, 0x0d00, 0x0d04, 0x0d08, 0x0d0c, 0x0d10, 0x0d14, 0x0d18, + 0x0d1c, 0x0d20, 0x0d24, 0x0d28, 0x0d2c, 0x0d30, 0x0d34, 0x0d38, + 0x0d3c, 0x0d40, 0x0d44, 0x0d48, 0x0d4c, 0x0d50, 0x0d54, 0x0d58, + 0x0d5c, 0x0d60, 0x0d64, 0x0d68, 0x0d6c, 0x0d70, 0x0d74, 0x0d78, + 0x0d7c, 0x0d80, 0x0d84, 0x0d88, 0x0d8c, 0x0d90, 0x0d94, 0x0d98, + 0x0d9c, 0x0da0, 0x0da4, 0x0da8, 0x0dac, 0x0db0, 0x0db4, 0x0db8, + 0x0dbc, 0x0dc0, 0x0dc4, 0x0dc8, 0x0dcc, 0x0dd0, 0x0dd4, 0x0dd8, + 0x0ddc, 0x0de0, 0x0de4, 0x0de8, 0x0dec, 0x0df0, 0x0df4, 0x0df8, + // Entry 340 - 37F + 0x0dfc, 0x0e00, 0x0e04, 0x0e08, 0x0e0c, 0x0e10, 0x0e14, 0x0e18, + 0x0e1d, 0x0e22, 0x0e27, 0x0e2c, 0x0e31, 0x0e36, 0x0e3a, 0x0e3e, + 0x0e42, 0x0e46, 0x0e4a, 0x0e4e, 0x0e52, 0x0e56, 0x0e5a, 0x0e5e, + 0x0e62, 0x0e66, 0x0e6a, 0x0e6e, 0x0e72, 0x0e76, 0x0e7a, 0x0e7e, + 0x0e82, 0x0e86, 0x0e8a, 0x0e8e, 0x0e92, 0x0e96, 0x0e9a, 0x0e9e, + 0x0ea2, 0x0ea6, 0x0eaa, 0x0eae, 0x0eb2, 0x0eb6, 0x0ebc, 0x0ec2, + 0x0ec8, 0x0ecc, 0x0ed0, 0x0ed4, 0x0ed8, 0x0edc, 0x0ee0, 0x0ee4, + 0x0ee8, 0x0eec, 0x0ef0, 0x0ef4, 0x0ef8, 0x0efc, 0x0f00, 0x0f04, + // Entry 380 - 3BF + 0x0f08, 0x0f0c, 0x0f10, 0x0f14, 0x0f18, 0x0f1c, 0x0f20, 0x0f24, + 0x0f28, 0x0f2c, 0x0f30, 0x0f34, 0x0f38, 0x0f3e, 0x0f44, 0x0f4a, + 0x0f50, 0x0f56, 0x0f5c, 0x0f62, 0x0f68, 0x0f6e, 0x0f74, 0x0f7a, + 0x0f80, 0x0f86, 0x0f8c, 0x0f92, 0x0f98, 0x0f9e, 0x0fa4, 0x0faa, + 0x0fb0, 0x0fb6, 0x0fbc, 0x0fc2, 0x0fc8, 0x0fce, 0x0fd4, 0x0fda, + 0x0fe0, 0x0fe6, 0x0fec, 0x0ff2, 0x0ff8, 0x0ffe, 0x1004, 0x100a, + 0x1010, 0x1016, 0x101c, 0x1022, 0x1028, 0x102e, 0x1034, 0x103a, + 0x1040, 0x1046, 0x104c, 0x1052, 0x1058, 0x105e, 0x1064, 0x106a, + // Entry 3C0 - 3FF + 0x1070, 0x1076, 0x107c, 0x1082, 0x1088, 0x108e, 0x1094, 0x109a, + 0x10a0, 0x10a6, 0x10ac, 0x10b2, 0x10b8, 0x10be, 0x10c4, 0x10ca, + 0x10d0, 0x10d6, 0x10dc, 0x10e2, 0x10e8, 0x10ee, 0x10f4, 0x10fa, + 0x1100, 0x1106, 0x110c, 0x1112, 0x1118, 0x111e, 0x1124, 0x112a, + 0x1130, 0x1136, 0x113c, 0x1142, 0x1148, 0x114e, 0x1154, 0x115a, + 0x1160, 0x1166, 0x116c, 0x1172, 0x1178, 0x1180, 0x1188, 0x1190, + 0x1198, 0x11a0, 0x11a8, 0x11b0, 0x11b6, 0x11d7, 0x11e6, 0x11ee, + 0x11ef, 0x11f0, 0x11f1, 0x11f2, 0x11f3, 0x11f4, 0x11f5, 0x11f6, + // Entry 400 - 43F + 0x11f7, 0x11f8, 0x11f9, 0x11fa, 0x11fb, 0x11fc, 0x11fd, 0x11fe, + 0x11ff, 0x1200, 0x1201, 0x1205, 0x1209, 0x120d, 0x1211, 0x1215, + 0x1219, 0x121b, 0x121d, 0x121f, 0x1221, 0x1223, 0x1225, 0x1227, + 0x1229, 0x122b, 0x122d, 0x122f, 0x1231, 0x1233, 0x1235, 0x1237, + 0x1239, 0x123b, 0x123d, 0x123f, 0x1241, 0x1243, 0x1245, 0x1247, + 0x1249, 0x124b, 0x124d, 0x124f, 0x1251, 0x1253, 0x1255, 0x1257, + 0x1259, 0x125b, 0x125d, 0x125f, 0x1263, 0x1267, 0x126b, 0x126f, + 0x1270, 0x1271, 0x1272, 0x1273, 0x1274, 0x1275, 0x1277, 0x1279, + // Entry 440 - 47F + 0x127b, 0x127d, 0x127f, 0x1287, 0x128f, 0x129b, 0x12a7, 0x12b3, + 0x12bf, 0x12cb, 0x12d3, 0x12db, 0x12e7, 0x12f3, 0x12ff, 0x130b, + 0x130d, 0x130f, 0x1311, 0x1313, 0x1315, 0x1317, 0x1319, 0x131b, + 0x131d, 0x131f, 0x1321, 0x1323, 0x1325, 0x1327, 0x1329, 0x132b, + 0x132e, 0x1331, 0x1333, 0x1335, 0x1337, 0x1339, 0x133b, 0x133d, + 0x133f, 0x1341, 0x1343, 0x1345, 0x1347, 0x1349, 0x134b, 0x134d, + 0x1350, 0x1353, 0x1356, 0x1359, 0x135c, 0x135f, 0x1362, 0x1365, + 0x1368, 0x136b, 0x136e, 0x1371, 0x1374, 0x1377, 0x137a, 0x137d, + // Entry 480 - 4BF + 0x1380, 0x1383, 0x1386, 0x1389, 0x138c, 0x138f, 0x1392, 0x1395, + 0x1398, 0x139b, 0x13a2, 0x13a4, 0x13a6, 0x13a8, 0x13ab, 0x13ad, + 0x13af, 0x13b1, 0x13b3, 0x13b5, 0x13bb, 0x13c1, 0x13c4, 0x13c7, + 0x13ca, 0x13cd, 0x13d0, 0x13d3, 0x13d6, 0x13d9, 0x13dc, 0x13df, + 0x13e2, 0x13e5, 0x13e8, 0x13eb, 0x13ee, 0x13f1, 0x13f4, 0x13f7, + 0x13fa, 0x13fd, 0x1400, 0x1403, 0x1406, 0x1409, 0x140c, 0x140f, + 0x1412, 0x1415, 0x1418, 0x141b, 0x141e, 0x1421, 0x1424, 0x1427, + 0x142a, 0x142d, 0x1430, 0x1433, 0x1436, 0x1439, 0x143c, 0x143f, + // Entry 4C0 - 4FF + 0x1442, 0x1445, 0x1448, 0x1451, 0x145a, 0x1463, 0x146c, 0x1475, + 0x147e, 0x1487, 0x1490, 0x1499, 0x149c, 0x149f, 0x14a2, 0x14a5, + 0x14a8, 0x14ab, 0x14ae, 0x14b1, 0x14b4, 0x14b7, 0x14ba, 0x14bd, + 0x14c0, 0x14c3, 0x14c6, 0x14c9, 0x14cc, 0x14cf, 0x14d2, 0x14d5, + 0x14d8, 0x14db, 0x14de, 0x14e1, 0x14e4, 0x14e7, 0x14ea, 0x14ed, + 0x14f0, 0x14f3, 0x14f6, 0x14f9, 0x14fc, 0x14ff, 0x1502, 0x1505, + 0x1508, 0x150b, 0x150e, 0x1511, 0x1514, 0x1517, 0x151a, 0x151d, + 0x1520, 0x1523, 0x1526, 0x1529, 0x152c, 0x152f, 0x1532, 0x1535, + // Entry 500 - 53F + 0x1538, 0x153b, 0x153e, 0x1541, 0x1544, 0x1547, 0x154a, 0x154d, + 0x1550, 0x1553, 0x1556, 0x1559, 0x155c, 0x155f, 0x1562, 0x1565, + 0x1568, 0x156b, 0x156e, 0x1571, 0x1574, 0x1577, 0x157a, 0x157d, + 0x1580, 0x1583, 0x1586, 0x1589, 0x158c, 0x158f, 0x1592, 0x1595, + 0x1598, 0x159b, 0x159e, 0x15a1, 0x15a4, 0x15a7, 0x15aa, 0x15ad, + 0x15b0, 0x15b3, 0x15b6, 0x15b9, 0x15bc, 0x15bf, 0x15c2, 0x15c5, + 0x15c8, 0x15cb, 0x15ce, 0x15d1, 0x15d4, 0x15d7, 0x15da, 0x15dd, + 0x15e0, 0x15e3, 0x15e6, 0x15e9, 0x15ec, 0x15ef, 0x15f2, 0x15f5, + // Entry 540 - 57F + 0x15f8, 0x15fb, 0x15fe, 0x1601, 0x1604, 0x1607, 0x160a, 0x160d, + 0x1610, 0x1613, 0x1616, 0x1619, 0x161c, 0x161f, 0x1622, 0x1625, + 0x1628, 0x162b, 0x162e, 0x1631, 0x1634, 0x1637, 0x163a, 0x163d, + 0x1640, 0x1643, 0x1646, 0x1649, 0x164c, 0x164f, 0x1652, 0x1655, + 0x1658, 0x165b, 0x165e, 0x1661, 0x1664, 0x1667, 0x166a, 0x166d, + 0x1670, 0x1673, 0x1676, 0x1679, 0x167c, 0x167f, 0x1682, 0x1685, + 0x1688, 0x168b, 0x168e, 0x1691, 0x1694, 0x1697, 0x169a, 0x169d, + 0x16a0, 0x16a3, 0x16a6, 0x16a9, 0x16ac, 0x16af, 0x16b2, 0x16b5, + // Entry 580 - 5BF + 0x16b8, 0x16bb, 0x16be, 0x16c1, 0x16c4, 0x16c7, 0x16ca, 0x16cd, + 0x16d0, 0x16d3, 0x16d6, 0x16d9, 0x16dc, 0x16df, 0x16e2, 0x16e5, + 0x16e8, 0x16eb, 0x16ee, 0x16f1, 0x16f4, 0x16f7, 0x16fa, 0x16fd, + 0x1700, 0x1703, 0x1706, 0x1709, 0x170c, 0x170f, 0x1712, 0x1715, + 0x1718, 0x171b, 0x171e, 0x1721, 0x1724, 0x1727, 0x172a, 0x172d, + 0x1730, 0x1733, 0x1736, 0x1739, 0x173c, 0x173f, 0x1742, 0x1745, + 0x1748, 0x174b, 0x174e, 0x1751, 0x1754, 0x1757, 0x175a, 0x175d, + 0x1760, 0x1763, 0x1766, 0x1769, 0x176c, 0x176f, 0x1772, 0x1775, + // Entry 5C0 - 5FF + 0x1778, 0x177b, 0x177e, 0x1781, 0x1784, 0x1787, 0x178a, 0x178d, + 0x1790, 0x1793, 0x1796, 0x1799, 0x179c, 0x179f, 0x17a2, 0x17a5, + 0x17a8, 0x17ab, 0x17ae, 0x17b1, 0x17b4, 0x17b7, 0x17ba, 0x17bd, + 0x17c0, 0x17c3, 0x17c6, 0x17c9, 0x17cc, 0x17cf, 0x17d2, 0x17d5, + 0x17d8, 0x17db, 0x17de, 0x17e1, 0x17e4, 0x17e7, 0x17ea, 0x17ed, + 0x17f0, 0x17f3, 0x17f6, 0x17f9, 0x17fc, 0x17ff, 0x1802, 0x1805, + 0x1808, 0x180b, 0x180e, 0x1811, 0x1814, 0x1817, 0x181a, 0x181d, + 0x1820, 0x1823, 0x1826, 0x1829, 0x182c, 0x182f, 0x1832, 0x1835, + // Entry 600 - 63F + 0x1838, 0x183b, 0x183e, 0x1841, 0x1844, 0x1847, 0x184a, 0x184d, + 0x1850, 0x1853, 0x1856, 0x1859, 0x185c, 0x185f, 0x1862, 0x1865, + 0x1868, 0x186b, 0x186e, 0x1871, 0x1874, 0x1877, 0x187a, 0x187d, + 0x1880, 0x1883, 0x1886, 0x1889, 0x188c, 0x188f, 0x1892, 0x1895, + 0x1898, 0x189b, 0x189e, 0x18a1, 0x18a4, 0x18a7, 0x18aa, 0x18ad, + 0x18b0, 0x18b3, 0x18b6, 0x18b9, 0x18bc, 0x18bf, 0x18c2, 0x18c5, + 0x18c8, 0x18cb, 0x18ce, 0x18d1, 0x18d4, 0x18d7, 0x18da, 0x18dd, + 0x18e0, 0x18e3, 0x18e6, 0x18e9, 0x18ec, 0x18ef, 0x18f2, 0x18f5, + // Entry 640 - 67F + 0x18f8, 0x18fb, 0x18fe, 0x1901, 0x1904, 0x1907, 0x190a, 0x190d, + 0x1910, 0x1913, 0x1916, 0x1919, 0x191c, 0x191f, 0x1922, 0x1925, + 0x1928, 0x192b, 0x192e, 0x1931, 0x1934, 0x1937, 0x193a, 0x193d, + 0x1940, 0x1943, 0x1946, 0x1949, 0x194c, 0x194f, 0x1952, 0x1955, + 0x1958, 0x195b, 0x195e, 0x1961, 0x1964, 0x1967, 0x196a, 0x196d, + 0x1970, 0x1973, 0x1976, 0x1979, 0x197c, 0x197f, 0x1982, 0x1985, + 0x1988, 0x198b, +} // Size: 3324 bytes var xorData string = "" + // Size: 4862 bytes "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + @@ -547,7 +690,7 @@ func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { return 0 } -// idnaTrie. Total size: 30288 bytes (29.58 KiB). Checksum: c0cd84404a2f6f19. +// idnaTrie. Total size: 30196 bytes (29.49 KiB). Checksum: e2ae95a945f04016. type idnaTrie struct{} func newIdnaTrie(i int) *idnaTrie { @@ -600,11 +743,11 @@ var idnaValues = [8192]uint16{ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, - 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, - 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, - 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, - 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, - 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x0012, 0xe9: 0x0018, + 0xea: 0x0019, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x0022, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0029, 0xf3: 0x0031, 0xf4: 0x003a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x0042, 0xf9: 0x0049, 0xfa: 0x0051, 0xfb: 0x0018, + 0xfc: 0x0059, 0xfd: 0x0061, 0xfe: 0x0069, 0xff: 0x0018, // Block 0x4, offset 0x100 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, @@ -614,12 +757,12 @@ var idnaValues = [8192]uint16{ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, - 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x130: 0x0071, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, - 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0079, // Block 0x5, offset 0x140 - 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, - 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x140: 0x0079, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x0081, 0x14a: 0xe00d, 0x14b: 0x0008, 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, @@ -628,7 +771,7 @@ var idnaValues = [8192]uint16{ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, - 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x0089, // Block 0x6, offset 0x180 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, @@ -642,8 +785,8 @@ var idnaValues = [8192]uint16{ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, // Block 0x7, offset 0x1c0 - 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, - 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x0091, 0x1c5: 0x0091, + 0x1c6: 0x0091, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, @@ -663,22 +806,22 @@ var idnaValues = [8192]uint16{ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, - 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, - 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0099, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x00a1, 0x23f: 0x0008, // Block 0x9, offset 0x240 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, - 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, - 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x258: 0x00d2, 0x259: 0x00da, 0x25a: 0x00e2, 0x25b: 0x00ea, 0x25c: 0x00f2, 0x25d: 0x00fa, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0101, 0x262: 0x0089, 0x263: 0x0109, 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0111, 0x285: 0x040d, 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, @@ -687,10 +830,10 @@ var idnaValues = [8192]uint16{ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, - 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, - 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x011a, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x0122, 0x2bf: 0x043d, // Block 0xb, offset 0x2c0 - 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x003a, 0x2c5: 0x012a, 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, @@ -782,8 +925,8 @@ var idnaValues = [8192]uint16{ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, - 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, - 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0139, + 0x4b6: 0x0141, 0x4b7: 0x0149, 0x4b8: 0x0151, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, // Block 0x13, offset 0x4c0 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, @@ -826,8 +969,8 @@ var idnaValues = [8192]uint16{ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, - 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, - 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x598: 0x0159, 0x599: 0x0161, 0x59a: 0x0169, 0x59b: 0x0171, 0x59c: 0x0179, 0x59d: 0x0181, + 0x59e: 0x0189, 0x59f: 0x0191, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, @@ -850,8 +993,8 @@ var idnaValues = [8192]uint16{ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, - 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, - 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0199, 0x61d: 0x01a1, + 0x61e: 0x0040, 0x61f: 0x01a9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, @@ -866,16 +1009,16 @@ var idnaValues = [8192]uint16{ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x01b1, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x01b9, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, // Block 0x1a, offset 0x680 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, - 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x698: 0x0040, 0x699: 0x01c1, 0x69a: 0x01c9, 0x69b: 0x01d1, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x01d9, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, @@ -922,7 +1065,7 @@ var idnaValues = [8192]uint16{ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x3308, 0x796: 0x3308, 0x797: 0x3008, - 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x01e1, 0x79d: 0x01e9, 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, @@ -998,32 +1141,32 @@ var idnaValues = [8192]uint16{ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008, 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, - 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x01f9, 0x934: 0x3308, 0x935: 0x3308, 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308, 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0211, 0x944: 0x0008, 0x945: 0x0008, 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0219, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0221, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0229, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0231, 0x95d: 0x0008, 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0239, 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, - 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, - 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0241, 0x974: 0x3308, 0x975: 0x0249, + 0x976: 0x0251, 0x977: 0x0259, 0x978: 0x0261, 0x979: 0x0269, 0x97a: 0x3308, 0x97b: 0x3308, 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, // Block 0x26, offset 0x980 - 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x980: 0x3308, 0x981: 0x0271, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, - 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, - 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, - 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, - 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, - 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x992: 0x3308, 0x993: 0x0279, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0281, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0289, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0291, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0299, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, - 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x02a1, 0x9ba: 0x3308, 0x9bb: 0x3308, 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, // Block 0x27, offset 0x9c0 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, @@ -1033,34 +1176,34 @@ var idnaValues = [8192]uint16{ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, - 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, - 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, - 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, - 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0019, 0x9ed: 0x02e1, 0x9ee: 0x02e9, 0x9ef: 0x0008, + 0x9f0: 0x02f1, 0x9f1: 0x02f9, 0x9f2: 0x0301, 0x9f3: 0x0309, 0x9f4: 0x00a9, 0x9f5: 0x0311, + 0x9f6: 0x00b1, 0x9f7: 0x0319, 0x9f8: 0x0101, 0x9f9: 0x0321, 0x9fa: 0x0329, 0x9fb: 0x0008, + 0x9fc: 0x0051, 0x9fd: 0x0331, 0x9fe: 0x0339, 0x9ff: 0x00b9, // Block 0x28, offset 0xa00 - 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, - 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, - 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, - 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9, - 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099, - 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, - 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, - 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa00: 0x0341, 0xa01: 0x0349, 0xa02: 0x00c1, 0xa03: 0x0019, 0xa04: 0x0351, 0xa05: 0x0359, + 0xa06: 0x05b5, 0xa07: 0x02e9, 0xa08: 0x02f1, 0xa09: 0x02f9, 0xa0a: 0x0361, 0xa0b: 0x0369, + 0xa0c: 0x0371, 0xa0d: 0x0309, 0xa0e: 0x0008, 0xa0f: 0x0319, 0xa10: 0x0321, 0xa11: 0x0379, + 0xa12: 0x0051, 0xa13: 0x0381, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0339, 0xa17: 0x0341, + 0xa18: 0x0349, 0xa19: 0x05b5, 0xa1a: 0x0389, 0xa1b: 0x0391, 0xa1c: 0x05e5, 0xa1d: 0x0399, + 0xa1e: 0x03a1, 0xa1f: 0x03a9, 0xa20: 0x03b1, 0xa21: 0x03b9, 0xa22: 0x0311, 0xa23: 0x00b9, + 0xa24: 0x0349, 0xa25: 0x0391, 0xa26: 0x0399, 0xa27: 0x03a1, 0xa28: 0x03c1, 0xa29: 0x03b1, + 0xa2a: 0x03b9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, - 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x03c9, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, // Block 0x29, offset 0xa40 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, - 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, - 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251, - 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, - 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, - 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, - 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x03d1, 0xa5c: 0x03d9, 0xa5d: 0x03e1, + 0xa5e: 0x03e9, 0xa5f: 0x0371, 0xa60: 0x03f1, 0xa61: 0x03f9, 0xa62: 0x0401, 0xa63: 0x0409, + 0xa64: 0x0411, 0xa65: 0x0419, 0xa66: 0x0421, 0xa67: 0x05fd, 0xa68: 0x0429, 0xa69: 0x0431, + 0xa6a: 0xe17d, 0xa6b: 0x0439, 0xa6c: 0x0441, 0xa6d: 0x0449, 0xa6e: 0x0451, 0xa6f: 0x0459, + 0xa70: 0x0461, 0xa71: 0x0469, 0xa72: 0x0471, 0xa73: 0x0479, 0xa74: 0x0481, 0xa75: 0x0489, + 0xa76: 0x0491, 0xa77: 0x0499, 0xa78: 0x0615, 0xa79: 0x04a1, 0xa7a: 0x04a9, 0xa7b: 0x04b1, + 0xa7c: 0x04b9, 0xa7d: 0x04c1, 0xa7e: 0x04c9, 0xa7f: 0x04d1, // Block 0x2a, offset 0xa80 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, @@ -1079,7 +1222,7 @@ var idnaValues = [8192]uint16{ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008, - 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xade: 0x04d9, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, @@ -1094,33 +1237,33 @@ var idnaValues = [8192]uint16{ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, - 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, - 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, - 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + 0xb30: 0x0008, 0xb31: 0x04e1, 0xb32: 0x0008, 0xb33: 0x04e9, 0xb34: 0x0008, 0xb35: 0x04f1, + 0xb36: 0x0008, 0xb37: 0x04f9, 0xb38: 0x0008, 0xb39: 0x0501, 0xb3a: 0x0008, 0xb3b: 0x0509, + 0xb3c: 0x0008, 0xb3d: 0x0511, 0xb3e: 0x0040, 0xb3f: 0x0040, // Block 0x2d, offset 0xb40 - 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, - 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, - 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, - 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, - 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, - 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, - 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, - 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, - 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, - 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459, - 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e, + 0xb40: 0x0519, 0xb41: 0x0521, 0xb42: 0x0529, 0xb43: 0x0531, 0xb44: 0x0539, 0xb45: 0x0541, + 0xb46: 0x0549, 0xb47: 0x0551, 0xb48: 0x0519, 0xb49: 0x0521, 0xb4a: 0x0529, 0xb4b: 0x0531, + 0xb4c: 0x0539, 0xb4d: 0x0541, 0xb4e: 0x0549, 0xb4f: 0x0551, 0xb50: 0x0559, 0xb51: 0x0561, + 0xb52: 0x0569, 0xb53: 0x0571, 0xb54: 0x0579, 0xb55: 0x0581, 0xb56: 0x0589, 0xb57: 0x0591, + 0xb58: 0x0559, 0xb59: 0x0561, 0xb5a: 0x0569, 0xb5b: 0x0571, 0xb5c: 0x0579, 0xb5d: 0x0581, + 0xb5e: 0x0589, 0xb5f: 0x0591, 0xb60: 0x0599, 0xb61: 0x05a1, 0xb62: 0x05a9, 0xb63: 0x05b1, + 0xb64: 0x05b9, 0xb65: 0x05c1, 0xb66: 0x05c9, 0xb67: 0x05d1, 0xb68: 0x0599, 0xb69: 0x05a1, + 0xb6a: 0x05a9, 0xb6b: 0x05b1, 0xb6c: 0x05b9, 0xb6d: 0x05c1, 0xb6e: 0x05c9, 0xb6f: 0x05d1, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x05d9, 0xb73: 0x05e1, 0xb74: 0x05e9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x05f1, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x04e1, + 0xb7c: 0x05e1, 0xb7d: 0x067e, 0xb7e: 0x05f9, 0xb7f: 0x069e, // Block 0x2e, offset 0xb80 - 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, - 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489, - 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, - 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, - 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, - 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xb80: 0x06be, 0xb81: 0x0602, 0xb82: 0x0609, 0xb83: 0x0611, 0xb84: 0x0619, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x0621, 0xb88: 0x06dd, 0xb89: 0x04e9, 0xb8a: 0x06f5, 0xb8b: 0x04f1, + 0xb8c: 0x0611, 0xb8d: 0x062a, 0xb8e: 0x0632, 0xb8f: 0x063a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x0641, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x04f9, 0xb9c: 0x0040, 0xb9d: 0x064a, + 0xb9e: 0x0652, 0xb9f: 0x065a, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x0661, 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, - 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, - 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, - 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, - 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040, + 0xbaa: 0x0725, 0xbab: 0x0509, 0xbac: 0xe04d, 0xbad: 0x066a, 0xbae: 0x012a, 0xbaf: 0x0672, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x0679, 0xbb3: 0x0681, 0xbb4: 0x0689, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x0691, 0xbb8: 0x073d, 0xbb9: 0x0501, 0xbba: 0x0515, 0xbbb: 0x0511, + 0xbbc: 0x0681, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040, // Block 0x2f, offset 0xbc0 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, @@ -1130,72 +1273,72 @@ var idnaValues = [8192]uint16{ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, - 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0699, 0xbf4: 0x06a1, 0xbf5: 0x0018, + 0xbf6: 0x06a9, 0xbf7: 0x06b1, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x06ba, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018, // Block 0x30, offset 0xc00 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, - 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc06: 0x0018, 0xc07: 0x06c2, 0xc08: 0x06ca, 0xc09: 0x06d2, 0xc0a: 0x0018, 0xc0b: 0x0018, 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, - 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x06d9, 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, - 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, - 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5, - 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + 0xc30: 0x06e1, 0xc31: 0x0311, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x06e9, 0xc35: 0x06f1, + 0xc36: 0x06f9, 0xc37: 0x0701, 0xc38: 0x0709, 0xc39: 0x0711, 0xc3a: 0x071a, 0xc3b: 0x07d5, + 0xc3c: 0x0722, 0xc3d: 0x072a, 0xc3e: 0x0732, 0xc3f: 0x0329, // Block 0x31, offset 0xc40 - 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, - 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed, - 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, - 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, - 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc40: 0x06e1, 0xc41: 0x0049, 0xc42: 0x0029, 0xc43: 0x0031, 0xc44: 0x06e9, 0xc45: 0x06f1, + 0xc46: 0x06f9, 0xc47: 0x0701, 0xc48: 0x0709, 0xc49: 0x0711, 0xc4a: 0x071a, 0xc4b: 0x07ed, + 0xc4c: 0x0722, 0xc4d: 0x072a, 0xc4e: 0x0732, 0xc4f: 0x0040, 0xc50: 0x0019, 0xc51: 0x02f9, + 0xc52: 0x0051, 0xc53: 0x0109, 0xc54: 0x0361, 0xc55: 0x00a9, 0xc56: 0x0319, 0xc57: 0x0101, + 0xc58: 0x0321, 0xc59: 0x0329, 0xc5a: 0x0339, 0xc5b: 0x0089, 0xc5c: 0x0341, 0xc5d: 0x0040, 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, - 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x0739, 0xc69: 0x0018, 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, // Block 0x32, offset 0xc80 - 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866, - 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249, - 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, - 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, - 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, - 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018, - 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, - 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, - 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, - 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5, - 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x03d9, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866, + 0xc86: 0x0886, 0xc87: 0x0369, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0309, 0xc8b: 0x00a9, + 0xc8c: 0x00a9, 0xc8d: 0x00a9, 0xc8e: 0x00a9, 0xc8f: 0x0741, 0xc90: 0x0311, 0xc91: 0x0311, + 0xc92: 0x0101, 0xc93: 0x0101, 0xc94: 0x0018, 0xc95: 0x0329, 0xc96: 0x0749, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0339, 0xc9a: 0x0751, 0xc9b: 0x00b9, 0xc9c: 0x00b9, 0xc9d: 0x00b9, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x0759, 0xca1: 0x08c5, 0xca2: 0x0761, 0xca3: 0x0018, + 0xca4: 0x04b1, 0xca5: 0x0018, 0xca6: 0x0769, 0xca7: 0x0018, 0xca8: 0x04b1, 0xca9: 0x0018, + 0xcaa: 0x0319, 0xcab: 0x0771, 0xcac: 0x02e9, 0xcad: 0x03d9, 0xcae: 0x0018, 0xcaf: 0x02f9, + 0xcb0: 0x02f9, 0xcb1: 0x03f1, 0xcb2: 0x0040, 0xcb3: 0x0321, 0xcb4: 0x0051, 0xcb5: 0x0779, + 0xcb6: 0x0781, 0xcb7: 0x0789, 0xcb8: 0x0791, 0xcb9: 0x0311, 0xcba: 0x0018, 0xcbb: 0x08e5, + 0xcbc: 0x0799, 0xcbd: 0x03a1, 0xcbe: 0x03a1, 0xcbf: 0x0799, // Block 0x33, offset 0xcc0 - 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, - 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, - 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, - 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, - 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, - 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439, - 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, - 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, - 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, - 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd, - 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x02f1, + 0xcc6: 0x02f1, 0xcc7: 0x02f9, 0xcc8: 0x0311, 0xcc9: 0x00b1, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x07a1, 0xcd1: 0x07a9, + 0xcd2: 0x07b1, 0xcd3: 0x07b9, 0xcd4: 0x07c1, 0xcd5: 0x07c9, 0xcd6: 0x07d1, 0xcd7: 0x07d9, + 0xcd8: 0x07e1, 0xcd9: 0x07e9, 0xcda: 0x07f1, 0xcdb: 0x07f9, 0xcdc: 0x0801, 0xcdd: 0x0809, + 0xcde: 0x0811, 0xcdf: 0x0819, 0xce0: 0x0311, 0xce1: 0x0821, 0xce2: 0x091d, 0xce3: 0x0829, + 0xce4: 0x0391, 0xce5: 0x0831, 0xce6: 0x093d, 0xce7: 0x0839, 0xce8: 0x0841, 0xce9: 0x0109, + 0xcea: 0x0849, 0xceb: 0x095d, 0xcec: 0x0101, 0xced: 0x03d9, 0xcee: 0x02f1, 0xcef: 0x0321, + 0xcf0: 0x0311, 0xcf1: 0x0821, 0xcf2: 0x097d, 0xcf3: 0x0829, 0xcf4: 0x0391, 0xcf5: 0x0831, + 0xcf6: 0x099d, 0xcf7: 0x0839, 0xcf8: 0x0841, 0xcf9: 0x0109, 0xcfa: 0x0849, 0xcfb: 0x09bd, + 0xcfc: 0x0101, 0xcfd: 0x03d9, 0xcfe: 0x02f1, 0xcff: 0x0321, // Block 0x34, offset 0xd00 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, - 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, - 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, - 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, - 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x0049, 0xd21: 0x0029, 0xd22: 0x0031, 0xd23: 0x06e9, + 0xd24: 0x06f1, 0xd25: 0x06f9, 0xd26: 0x0701, 0xd27: 0x0709, 0xd28: 0x0711, 0xd29: 0x0879, + 0xd2a: 0x0881, 0xd2b: 0x0889, 0xd2c: 0x0891, 0xd2d: 0x0899, 0xd2e: 0x08a1, 0xd2f: 0x08a9, + 0xd30: 0x08b1, 0xd31: 0x08b9, 0xd32: 0x08c1, 0xd33: 0x08c9, 0xd34: 0x0a1e, 0xd35: 0x0a3e, 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe, - 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + 0xd3c: 0x0b1e, 0xd3d: 0x08d2, 0xd3e: 0x08da, 0xd3f: 0x08e2, // Block 0x35, offset 0xd40 - 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, - 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd40: 0x08ea, 0xd41: 0x08f2, 0xd42: 0x08fa, 0xd43: 0x0902, 0xd44: 0x090a, 0xd45: 0x0912, + 0xd46: 0x091a, 0xd47: 0x0922, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e, @@ -1203,17 +1346,17 @@ var idnaValues = [8192]uint16{ 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde, 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e, 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e, - 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, - 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + 0xd76: 0x0019, 0xd77: 0x02e9, 0xd78: 0x03d9, 0xd79: 0x02f1, 0xd7a: 0x02f9, 0xd7b: 0x03f1, + 0xd7c: 0x0309, 0xd7d: 0x00a9, 0xd7e: 0x0311, 0xd7f: 0x00b1, // Block 0x36, offset 0xd80 - 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, - 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, - 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, - 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, - 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, - 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, - 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, - 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xd80: 0x0319, 0xd81: 0x0101, 0xd82: 0x0321, 0xd83: 0x0329, 0xd84: 0x0051, 0xd85: 0x0339, + 0xd86: 0x0751, 0xd87: 0x00b9, 0xd88: 0x0089, 0xd89: 0x0341, 0xd8a: 0x0349, 0xd8b: 0x0391, + 0xd8c: 0x00c1, 0xd8d: 0x0109, 0xd8e: 0x00c9, 0xd8f: 0x04b1, 0xd90: 0x0019, 0xd91: 0x02e9, + 0xd92: 0x03d9, 0xd93: 0x02f1, 0xd94: 0x02f9, 0xd95: 0x03f1, 0xd96: 0x0309, 0xd97: 0x00a9, + 0xd98: 0x0311, 0xd99: 0x00b1, 0xd9a: 0x0319, 0xd9b: 0x0101, 0xd9c: 0x0321, 0xd9d: 0x0329, + 0xd9e: 0x0051, 0xd9f: 0x0339, 0xda0: 0x0751, 0xda1: 0x00b9, 0xda2: 0x0089, 0xda3: 0x0341, + 0xda4: 0x0349, 0xda5: 0x0391, 0xda6: 0x00c1, 0xda7: 0x0109, 0xda8: 0x00c9, 0xda9: 0x04b1, + 0xdaa: 0x06e1, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, @@ -1223,12 +1366,12 @@ var idnaValues = [8192]uint16{ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, - 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5, - 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, - 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, - 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x0941, 0xde3: 0x0ed5, + 0xde4: 0x0949, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0359, 0xdee: 0x0441, 0xdef: 0x0351, + 0xdf0: 0x03d1, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, - 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + 0xdfc: 0x00b1, 0xdfd: 0x0391, 0xdfe: 0x0951, 0xdff: 0x0959, // Block 0x38, offset 0xe00 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, @@ -1254,7 +1397,7 @@ var idnaValues = [8192]uint16{ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, // Block 0x3a, offset 0xe80 - 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x0961, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, @@ -1290,17 +1433,17 @@ var idnaValues = [8192]uint16{ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0008, 0xf3c: 0x0008, 0xf3d: 0x0008, 0xf3e: 0x0008, 0xf3f: 0x0008, // Block 0x3d, offset 0xf40 - 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5, + 0xf40: 0x0b82, 0xf41: 0x0b8a, 0xf42: 0x0b92, 0xf43: 0x0b9a, 0xf44: 0x32d5, 0xf45: 0x32f5, 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, - 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761, - 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, - 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, - 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x0ba1, + 0xf52: 0x0ba9, 0xf53: 0x0bb1, 0xf54: 0x0bb9, 0xf55: 0x0bc1, 0xf56: 0x0bc9, 0xf57: 0x0bd1, + 0xf58: 0x0bd9, 0xf59: 0x0be1, 0xf5a: 0x0be9, 0xf5b: 0x0bf1, 0xf5c: 0x0bf9, 0xf5d: 0x0c01, + 0xf5e: 0x0c09, 0xf5f: 0x0c11, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5, 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475, 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535, 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5, 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5, - 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018, + 0xf7c: 0x0c19, 0xf7d: 0x0c21, 0xf7e: 0x36d5, 0xf7f: 0x0018, // Block 0x3e, offset 0xf80 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795, 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855, @@ -1310,13 +1453,13 @@ var idnaValues = [8192]uint16{ 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55, 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5, 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95, - 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, - 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, - 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + 0xfb0: 0x3cb5, 0xfb1: 0x0c29, 0xfb2: 0x0c31, 0xfb3: 0x0c39, 0xfb4: 0x0c41, 0xfb5: 0x0c49, + 0xfb6: 0x0c51, 0xfb7: 0x0c59, 0xfb8: 0x0c61, 0xfb9: 0x0c69, 0xfba: 0x0c71, 0xfbb: 0x0c79, + 0xfbc: 0x0c81, 0xfbd: 0x0c89, 0xfbe: 0x0c91, 0xfbf: 0x0c99, // Block 0x3f, offset 0xfc0 - 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, - 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, - 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d, + 0xfc0: 0x0ca1, 0xfc1: 0x0ca9, 0xfc2: 0x0cb1, 0xfc3: 0x0cb9, 0xfc4: 0x0cc1, 0xfc5: 0x0cc9, + 0xfc6: 0x0cd1, 0xfc7: 0x0cd9, 0xfc8: 0x0ce1, 0xfc9: 0x0ce9, 0xfca: 0x0cf1, 0xfcb: 0x0cf9, + 0xfcc: 0x0d01, 0xfcd: 0x3cd5, 0xfce: 0x0d09, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d, 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d, 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05, 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95, @@ -1324,769 +1467,769 @@ var idnaValues = [8192]uint16{ 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55, 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5, 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015, - 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x3cc9, + 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x0d11, // Block 0x40, offset 0x1000 - 0x1000: 0x3d01, 0x1001: 0x3d69, 0x1002: 0x3dd1, 0x1003: 0x3e39, 0x1004: 0x3e89, 0x1005: 0x3ef1, - 0x1006: 0x3f41, 0x1007: 0x3f91, 0x1008: 0x4011, 0x1009: 0x4079, 0x100a: 0x40c9, 0x100b: 0x4119, - 0x100c: 0x4169, 0x100d: 0x41d1, 0x100e: 0x4239, 0x100f: 0x4289, 0x1010: 0x42d9, 0x1011: 0x4311, - 0x1012: 0x4361, 0x1013: 0x43c9, 0x1014: 0x4431, 0x1015: 0x4469, 0x1016: 0x44e9, 0x1017: 0x4581, - 0x1018: 0x4601, 0x1019: 0x4651, 0x101a: 0x46d1, 0x101b: 0x4751, 0x101c: 0x47b9, 0x101d: 0x4809, - 0x101e: 0x4859, 0x101f: 0x48a9, 0x1020: 0x4911, 0x1021: 0x4991, 0x1022: 0x49f9, 0x1023: 0x4a49, - 0x1024: 0x4a99, 0x1025: 0x4ae9, 0x1026: 0x4b21, 0x1027: 0x4b59, 0x1028: 0x4b91, 0x1029: 0x4bc9, - 0x102a: 0x4c19, 0x102b: 0x4c69, 0x102c: 0x4ce9, 0x102d: 0x4d39, 0x102e: 0x4da1, 0x102f: 0x4e21, - 0x1030: 0x4e71, 0x1031: 0x4ea9, 0x1032: 0x4ee1, 0x1033: 0x4f61, 0x1034: 0x4fc9, 0x1035: 0x5049, - 0x1036: 0x5099, 0x1037: 0x5119, 0x1038: 0x5151, 0x1039: 0x51a1, 0x103a: 0x51f1, 0x103b: 0x5241, - 0x103c: 0x5291, 0x103d: 0x52e1, 0x103e: 0x5349, 0x103f: 0x5399, + 0x1000: 0x10f9, 0x1001: 0x1101, 0x1002: 0x40a5, 0x1003: 0x1109, 0x1004: 0x1111, 0x1005: 0x1119, + 0x1006: 0x1121, 0x1007: 0x1129, 0x1008: 0x40c5, 0x1009: 0x1131, 0x100a: 0x1139, 0x100b: 0x1141, + 0x100c: 0x40e5, 0x100d: 0x40e5, 0x100e: 0x1149, 0x100f: 0x1151, 0x1010: 0x1159, 0x1011: 0x4105, + 0x1012: 0x4125, 0x1013: 0x4145, 0x1014: 0x4165, 0x1015: 0x4185, 0x1016: 0x1161, 0x1017: 0x1169, + 0x1018: 0x1171, 0x1019: 0x1179, 0x101a: 0x1181, 0x101b: 0x41a5, 0x101c: 0x1189, 0x101d: 0x1191, + 0x101e: 0x1199, 0x101f: 0x41c5, 0x1020: 0x41e5, 0x1021: 0x11a1, 0x1022: 0x4205, 0x1023: 0x4225, + 0x1024: 0x4245, 0x1025: 0x11a9, 0x1026: 0x4265, 0x1027: 0x11b1, 0x1028: 0x11b9, 0x1029: 0x10f9, + 0x102a: 0x4285, 0x102b: 0x42a5, 0x102c: 0x42c5, 0x102d: 0x42e5, 0x102e: 0x11c1, 0x102f: 0x11c9, + 0x1030: 0x11d1, 0x1031: 0x11d9, 0x1032: 0x4305, 0x1033: 0x11e1, 0x1034: 0x11e9, 0x1035: 0x11f1, + 0x1036: 0x4325, 0x1037: 0x11f9, 0x1038: 0x1201, 0x1039: 0x11f9, 0x103a: 0x1209, 0x103b: 0x1211, + 0x103c: 0x4345, 0x103d: 0x1219, 0x103e: 0x1221, 0x103f: 0x1219, // Block 0x41, offset 0x1040 - 0x1040: 0x53d1, 0x1041: 0x5421, 0x1042: 0x5471, 0x1043: 0x54c1, 0x1044: 0x5529, 0x1045: 0x5579, - 0x1046: 0x55c9, 0x1047: 0x5619, 0x1048: 0x5699, 0x1049: 0x5701, 0x104a: 0x5739, 0x104b: 0x57b9, - 0x104c: 0x57f1, 0x104d: 0x5859, 0x104e: 0x58c1, 0x104f: 0x5911, 0x1050: 0x5961, 0x1051: 0x59b1, - 0x1052: 0x5a19, 0x1053: 0x5a51, 0x1054: 0x5aa1, 0x1055: 0x5b09, 0x1056: 0x5b41, 0x1057: 0x5bc1, - 0x1058: 0x5c11, 0x1059: 0x5c39, 0x105a: 0x5c61, 0x105b: 0x5c89, 0x105c: 0x5cb1, 0x105d: 0x5cd9, - 0x105e: 0x5d01, 0x105f: 0x5d29, 0x1060: 0x5d51, 0x1061: 0x5d79, 0x1062: 0x5da1, 0x1063: 0x5dd1, - 0x1064: 0x5e01, 0x1065: 0x5e31, 0x1066: 0x5e61, 0x1067: 0x5e91, 0x1068: 0x5ec1, 0x1069: 0x5ef1, - 0x106a: 0x5f21, 0x106b: 0x5f51, 0x106c: 0x5f81, 0x106d: 0x5fb1, 0x106e: 0x5fe1, 0x106f: 0x6011, - 0x1070: 0x6041, 0x1071: 0x4045, 0x1072: 0x6071, 0x1073: 0x6089, 0x1074: 0x4065, 0x1075: 0x60a1, - 0x1076: 0x60b9, 0x1077: 0x60d1, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60e9, 0x107b: 0x6101, - 0x107c: 0x6139, 0x107d: 0x6171, 0x107e: 0x61a9, 0x107f: 0x61e1, + 0x1040: 0x4365, 0x1041: 0x4385, 0x1042: 0x0040, 0x1043: 0x1229, 0x1044: 0x1231, 0x1045: 0x1239, + 0x1046: 0x1241, 0x1047: 0x0040, 0x1048: 0x1249, 0x1049: 0x1251, 0x104a: 0x1259, 0x104b: 0x1261, + 0x104c: 0x1269, 0x104d: 0x1271, 0x104e: 0x1199, 0x104f: 0x1279, 0x1050: 0x1281, 0x1051: 0x1289, + 0x1052: 0x43a5, 0x1053: 0x1291, 0x1054: 0x1121, 0x1055: 0x43c5, 0x1056: 0x43e5, 0x1057: 0x1299, + 0x1058: 0x0040, 0x1059: 0x4405, 0x105a: 0x12a1, 0x105b: 0x12a9, 0x105c: 0x12b1, 0x105d: 0x12b9, + 0x105e: 0x12c1, 0x105f: 0x12c9, 0x1060: 0x12d1, 0x1061: 0x12d9, 0x1062: 0x12e1, 0x1063: 0x12e9, + 0x1064: 0x12f1, 0x1065: 0x12f9, 0x1066: 0x1301, 0x1067: 0x1309, 0x1068: 0x1311, 0x1069: 0x1319, + 0x106a: 0x1321, 0x106b: 0x1329, 0x106c: 0x1331, 0x106d: 0x1339, 0x106e: 0x1341, 0x106f: 0x1349, + 0x1070: 0x1351, 0x1071: 0x1359, 0x1072: 0x1361, 0x1073: 0x1369, 0x1074: 0x1371, 0x1075: 0x1379, + 0x1076: 0x1381, 0x1077: 0x1389, 0x1078: 0x1391, 0x1079: 0x1399, 0x107a: 0x13a1, 0x107b: 0x13a9, + 0x107c: 0x13b1, 0x107d: 0x13b9, 0x107e: 0x13c1, 0x107f: 0x4425, // Block 0x42, offset 0x1080 - 0x1080: 0x6249, 0x1081: 0x6261, 0x1082: 0x40a5, 0x1083: 0x6279, 0x1084: 0x6291, 0x1085: 0x62a9, - 0x1086: 0x62c1, 0x1087: 0x62d9, 0x1088: 0x40c5, 0x1089: 0x62f1, 0x108a: 0x6319, 0x108b: 0x6331, - 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6349, 0x108f: 0x6361, 0x1090: 0x6379, 0x1091: 0x4105, - 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6391, 0x1097: 0x63a9, - 0x1098: 0x63c1, 0x1099: 0x63d9, 0x109a: 0x63f1, 0x109b: 0x41a5, 0x109c: 0x6409, 0x109d: 0x6421, - 0x109e: 0x6439, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6451, 0x10a2: 0x4205, 0x10a3: 0x4225, - 0x10a4: 0x4245, 0x10a5: 0x6469, 0x10a6: 0x4265, 0x10a7: 0x6481, 0x10a8: 0x64b1, 0x10a9: 0x6249, - 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64e9, 0x10af: 0x6529, - 0x10b0: 0x6571, 0x10b1: 0x6589, 0x10b2: 0x4305, 0x10b3: 0x65a1, 0x10b4: 0x65b9, 0x10b5: 0x65d1, - 0x10b6: 0x4325, 0x10b7: 0x65e9, 0x10b8: 0x6601, 0x10b9: 0x65e9, 0x10ba: 0x6619, 0x10bb: 0x6631, - 0x10bc: 0x4345, 0x10bd: 0x6649, 0x10be: 0x6661, 0x10bf: 0x6649, + 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, + 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, + 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, + 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, + 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, + 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, + 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, + 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x3308, + 0x10b0: 0x3318, 0x10b1: 0x3318, 0x10b2: 0x3318, 0x10b3: 0x0018, 0x10b4: 0x3308, 0x10b5: 0x3308, + 0x10b6: 0x3308, 0x10b7: 0x3308, 0x10b8: 0x3308, 0x10b9: 0x3308, 0x10ba: 0x3308, 0x10bb: 0x3308, + 0x10bc: 0x3308, 0x10bd: 0x3308, 0x10be: 0x0018, 0x10bf: 0x0008, // Block 0x43, offset 0x10c0 - 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6679, 0x10c4: 0x6691, 0x10c5: 0x66a9, - 0x10c6: 0x66c1, 0x10c7: 0x0040, 0x10c8: 0x66f9, 0x10c9: 0x6711, 0x10ca: 0x6729, 0x10cb: 0x6741, - 0x10cc: 0x6759, 0x10cd: 0x6771, 0x10ce: 0x6439, 0x10cf: 0x6789, 0x10d0: 0x67a1, 0x10d1: 0x67b9, - 0x10d2: 0x43a5, 0x10d3: 0x67d1, 0x10d4: 0x62c1, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67e9, - 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x6801, 0x10db: 0x6819, 0x10dc: 0x6831, 0x10dd: 0x6849, - 0x10de: 0x6861, 0x10df: 0x6891, 0x10e0: 0x68c1, 0x10e1: 0x68e9, 0x10e2: 0x6911, 0x10e3: 0x6939, - 0x10e4: 0x6961, 0x10e5: 0x6989, 0x10e6: 0x69b1, 0x10e7: 0x69d9, 0x10e8: 0x6a01, 0x10e9: 0x6a29, - 0x10ea: 0x6a59, 0x10eb: 0x6a89, 0x10ec: 0x6ab9, 0x10ed: 0x6ae9, 0x10ee: 0x6b19, 0x10ef: 0x6b49, - 0x10f0: 0x6b79, 0x10f1: 0x6ba9, 0x10f2: 0x6bd9, 0x10f3: 0x6c09, 0x10f4: 0x6c39, 0x10f5: 0x6c69, - 0x10f6: 0x6c99, 0x10f7: 0x6cc9, 0x10f8: 0x6cf9, 0x10f9: 0x6d29, 0x10fa: 0x6d59, 0x10fb: 0x6d89, - 0x10fc: 0x6db9, 0x10fd: 0x6de9, 0x10fe: 0x6e19, 0x10ff: 0x4425, + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x02d1, 0x10dd: 0x13c9, + 0x10de: 0x3308, 0x10df: 0x3308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, + 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, + 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, + 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, + 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, + 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, // Block 0x44, offset 0x1100 - 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, - 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, - 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, - 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, - 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, - 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, + 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, + 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, + 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, + 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, + 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, - 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, - 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, - 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, + 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, + 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, // Block 0x45, offset 0x1140 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e49, - 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, - 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, - 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, - 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, - 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, + 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, + 0x117c: 0x0008, 0x117d: 0x4445, 0x117e: 0xe00d, 0x117f: 0x0008, // Block 0x46, offset 0x1180 - 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, - 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, - 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, - 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, - 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, - 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, + 0x118c: 0x0008, 0x118d: 0x0409, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, - 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11aa: 0x13d1, 0x11ab: 0x0371, 0x11ac: 0x0401, 0x11ad: 0x13d9, 0x11ae: 0x0421, 0x11af: 0x0008, + 0x11b0: 0x13e1, 0x11b1: 0x13e9, 0x11b2: 0x0429, 0x11b3: 0x4465, 0x11b4: 0xe00d, 0x11b5: 0x0008, 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, // Block 0x47, offset 0x11c0 - 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, - 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, - 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, - 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, - 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, - 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, - 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, - 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, - 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, - 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, - 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008, + 0x11c0: 0x650d, 0x11c1: 0x652d, 0x11c2: 0x654d, 0x11c3: 0x656d, 0x11c4: 0x658d, 0x11c5: 0x65ad, + 0x11c6: 0x65cd, 0x11c7: 0x65ed, 0x11c8: 0x660d, 0x11c9: 0x662d, 0x11ca: 0x664d, 0x11cb: 0x666d, + 0x11cc: 0x668d, 0x11cd: 0x66ad, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x66cd, 0x11d1: 0x0008, + 0x11d2: 0x66ed, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x670d, 0x11d6: 0x672d, 0x11d7: 0x674d, + 0x11d8: 0x676d, 0x11d9: 0x678d, 0x11da: 0x67ad, 0x11db: 0x67cd, 0x11dc: 0x67ed, 0x11dd: 0x680d, + 0x11de: 0x682d, 0x11df: 0x0008, 0x11e0: 0x684d, 0x11e1: 0x0008, 0x11e2: 0x686d, 0x11e3: 0x0008, + 0x11e4: 0x0008, 0x11e5: 0x688d, 0x11e6: 0x68ad, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, + 0x11ea: 0x68cd, 0x11eb: 0x68ed, 0x11ec: 0x690d, 0x11ed: 0x692d, 0x11ee: 0x694d, 0x11ef: 0x696d, + 0x11f0: 0x698d, 0x11f1: 0x69ad, 0x11f2: 0x69cd, 0x11f3: 0x69ed, 0x11f4: 0x6a0d, 0x11f5: 0x6a2d, + 0x11f6: 0x6a4d, 0x11f7: 0x6a6d, 0x11f8: 0x6a8d, 0x11f9: 0x6aad, 0x11fa: 0x6acd, 0x11fb: 0x6aed, + 0x11fc: 0x6b0d, 0x11fd: 0x6b2d, 0x11fe: 0x6b4d, 0x11ff: 0x6b6d, // Block 0x48, offset 0x1200 - 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, - 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, - 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, - 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, - 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, - 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, - 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, - 0x122a: 0x6e61, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e79, 0x122e: 0x1221, 0x122f: 0x0008, - 0x1230: 0x6e91, 0x1231: 0x6ea9, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008, - 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008, - 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008, + 0x1200: 0x7acd, 0x1201: 0x7aed, 0x1202: 0x7b0d, 0x1203: 0x7b2d, 0x1204: 0x7b4d, 0x1205: 0x7b6d, + 0x1206: 0x7b8d, 0x1207: 0x7bad, 0x1208: 0x7bcd, 0x1209: 0x7bed, 0x120a: 0x7c0d, 0x120b: 0x7c2d, + 0x120c: 0x7c4d, 0x120d: 0x7c6d, 0x120e: 0x7c8d, 0x120f: 0x1409, 0x1210: 0x1411, 0x1211: 0x1419, + 0x1212: 0x7cad, 0x1213: 0x7ccd, 0x1214: 0x7ced, 0x1215: 0x1421, 0x1216: 0x1429, 0x1217: 0x1431, + 0x1218: 0x7d0d, 0x1219: 0x7d2d, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, + 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, + 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, + 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, + 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, + 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, // Block 0x49, offset 0x1240 - 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad, - 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d, - 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008, - 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d, - 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d, - 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008, - 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, - 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d, - 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d, - 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed, - 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d, + 0x1240: 0x1439, 0x1241: 0x1441, 0x1242: 0x1449, 0x1243: 0x7d4d, 0x1244: 0x7d6d, 0x1245: 0x1451, + 0x1246: 0x1451, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, + 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, + 0x1252: 0x0040, 0x1253: 0x1459, 0x1254: 0x1461, 0x1255: 0x1469, 0x1256: 0x1471, 0x1257: 0x1479, + 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x1481, + 0x125e: 0x3308, 0x125f: 0x1489, 0x1260: 0x1491, 0x1261: 0x0779, 0x1262: 0x0791, 0x1263: 0x1499, + 0x1264: 0x14a1, 0x1265: 0x14a9, 0x1266: 0x14b1, 0x1267: 0x14b9, 0x1268: 0x14c1, 0x1269: 0x071a, + 0x126a: 0x14c9, 0x126b: 0x14d1, 0x126c: 0x14d9, 0x126d: 0x14e1, 0x126e: 0x14e9, 0x126f: 0x14f1, + 0x1270: 0x14f9, 0x1271: 0x1501, 0x1272: 0x1509, 0x1273: 0x1511, 0x1274: 0x1519, 0x1275: 0x1521, + 0x1276: 0x1529, 0x1277: 0x0040, 0x1278: 0x1531, 0x1279: 0x1539, 0x127a: 0x1541, 0x127b: 0x1549, + 0x127c: 0x1551, 0x127d: 0x0040, 0x127e: 0x1559, 0x127f: 0x0040, // Block 0x4a, offset 0x1280 - 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d, - 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d, - 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6f19, 0x1290: 0x6f41, 0x1291: 0x6f69, - 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f91, 0x1296: 0x6fb9, 0x1297: 0x6fe1, - 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, - 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, - 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, - 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, - 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, - 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, - 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + 0x1280: 0x1561, 0x1281: 0x1569, 0x1282: 0x0040, 0x1283: 0x1571, 0x1284: 0x1579, 0x1285: 0x0040, + 0x1286: 0x1581, 0x1287: 0x1589, 0x1288: 0x1591, 0x1289: 0x1599, 0x128a: 0x15a1, 0x128b: 0x15a9, + 0x128c: 0x15b1, 0x128d: 0x15b9, 0x128e: 0x15c1, 0x128f: 0x15c9, 0x1290: 0x15d1, 0x1291: 0x15d1, + 0x1292: 0x15d9, 0x1293: 0x15d9, 0x1294: 0x15d9, 0x1295: 0x15d9, 0x1296: 0x15e1, 0x1297: 0x15e1, + 0x1298: 0x15e1, 0x1299: 0x15e1, 0x129a: 0x15e9, 0x129b: 0x15e9, 0x129c: 0x15e9, 0x129d: 0x15e9, + 0x129e: 0x15f1, 0x129f: 0x15f1, 0x12a0: 0x15f1, 0x12a1: 0x15f1, 0x12a2: 0x15f9, 0x12a3: 0x15f9, + 0x12a4: 0x15f9, 0x12a5: 0x15f9, 0x12a6: 0x1601, 0x12a7: 0x1601, 0x12a8: 0x1601, 0x12a9: 0x1601, + 0x12aa: 0x1609, 0x12ab: 0x1609, 0x12ac: 0x1609, 0x12ad: 0x1609, 0x12ae: 0x1611, 0x12af: 0x1611, + 0x12b0: 0x1611, 0x12b1: 0x1611, 0x12b2: 0x1619, 0x12b3: 0x1619, 0x12b4: 0x1619, 0x12b5: 0x1619, + 0x12b6: 0x1621, 0x12b7: 0x1621, 0x12b8: 0x1621, 0x12b9: 0x1621, 0x12ba: 0x1629, 0x12bb: 0x1629, + 0x12bc: 0x1629, 0x12bd: 0x1629, 0x12be: 0x1631, 0x12bf: 0x1631, // Block 0x4b, offset 0x12c0 - 0x12c0: 0x7009, 0x12c1: 0x7021, 0x12c2: 0x7039, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7051, - 0x12c6: 0x7051, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, - 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, - 0x12d2: 0x0040, 0x12d3: 0x7069, 0x12d4: 0x7091, 0x12d5: 0x70b9, 0x12d6: 0x70e1, 0x12d7: 0x7109, - 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x7131, - 0x12de: 0x3308, 0x12df: 0x7159, 0x12e0: 0x7181, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7199, - 0x12e4: 0x71b1, 0x12e5: 0x71c9, 0x12e6: 0x71e1, 0x12e7: 0x71f9, 0x12e8: 0x7211, 0x12e9: 0x1fb2, - 0x12ea: 0x7229, 0x12eb: 0x7251, 0x12ec: 0x7279, 0x12ed: 0x72b1, 0x12ee: 0x72e9, 0x12ef: 0x7311, - 0x12f0: 0x7339, 0x12f1: 0x7361, 0x12f2: 0x7389, 0x12f3: 0x73b1, 0x12f4: 0x73d9, 0x12f5: 0x7401, - 0x12f6: 0x7429, 0x12f7: 0x0040, 0x12f8: 0x7451, 0x12f9: 0x7479, 0x12fa: 0x74a1, 0x12fb: 0x74c9, - 0x12fc: 0x74f1, 0x12fd: 0x0040, 0x12fe: 0x7519, 0x12ff: 0x0040, + 0x12c0: 0x1631, 0x12c1: 0x1631, 0x12c2: 0x1639, 0x12c3: 0x1639, 0x12c4: 0x1641, 0x12c5: 0x1641, + 0x12c6: 0x1649, 0x12c7: 0x1649, 0x12c8: 0x1651, 0x12c9: 0x1651, 0x12ca: 0x1659, 0x12cb: 0x1659, + 0x12cc: 0x1661, 0x12cd: 0x1661, 0x12ce: 0x1669, 0x12cf: 0x1669, 0x12d0: 0x1669, 0x12d1: 0x1669, + 0x12d2: 0x1671, 0x12d3: 0x1671, 0x12d4: 0x1671, 0x12d5: 0x1671, 0x12d6: 0x1679, 0x12d7: 0x1679, + 0x12d8: 0x1679, 0x12d9: 0x1679, 0x12da: 0x1681, 0x12db: 0x1681, 0x12dc: 0x1681, 0x12dd: 0x1681, + 0x12de: 0x1689, 0x12df: 0x1689, 0x12e0: 0x1691, 0x12e1: 0x1691, 0x12e2: 0x1691, 0x12e3: 0x1691, + 0x12e4: 0x1699, 0x12e5: 0x1699, 0x12e6: 0x16a1, 0x12e7: 0x16a1, 0x12e8: 0x16a1, 0x12e9: 0x16a1, + 0x12ea: 0x16a9, 0x12eb: 0x16a9, 0x12ec: 0x16a9, 0x12ed: 0x16a9, 0x12ee: 0x16b1, 0x12ef: 0x16b1, + 0x12f0: 0x16b9, 0x12f1: 0x16b9, 0x12f2: 0x0818, 0x12f3: 0x0818, 0x12f4: 0x0818, 0x12f5: 0x0818, + 0x12f6: 0x0818, 0x12f7: 0x0818, 0x12f8: 0x0818, 0x12f9: 0x0818, 0x12fa: 0x0818, 0x12fb: 0x0818, + 0x12fc: 0x0818, 0x12fd: 0x0818, 0x12fe: 0x0818, 0x12ff: 0x0818, // Block 0x4c, offset 0x1300 - 0x1300: 0x7541, 0x1301: 0x7569, 0x1302: 0x0040, 0x1303: 0x7591, 0x1304: 0x75b9, 0x1305: 0x0040, - 0x1306: 0x75e1, 0x1307: 0x7609, 0x1308: 0x7631, 0x1309: 0x7659, 0x130a: 0x7681, 0x130b: 0x76a9, - 0x130c: 0x76d1, 0x130d: 0x76f9, 0x130e: 0x7721, 0x130f: 0x7749, 0x1310: 0x7771, 0x1311: 0x7771, - 0x1312: 0x7789, 0x1313: 0x7789, 0x1314: 0x7789, 0x1315: 0x7789, 0x1316: 0x77a1, 0x1317: 0x77a1, - 0x1318: 0x77a1, 0x1319: 0x77a1, 0x131a: 0x77b9, 0x131b: 0x77b9, 0x131c: 0x77b9, 0x131d: 0x77b9, - 0x131e: 0x77d1, 0x131f: 0x77d1, 0x1320: 0x77d1, 0x1321: 0x77d1, 0x1322: 0x77e9, 0x1323: 0x77e9, - 0x1324: 0x77e9, 0x1325: 0x77e9, 0x1326: 0x7801, 0x1327: 0x7801, 0x1328: 0x7801, 0x1329: 0x7801, - 0x132a: 0x7819, 0x132b: 0x7819, 0x132c: 0x7819, 0x132d: 0x7819, 0x132e: 0x7831, 0x132f: 0x7831, - 0x1330: 0x7831, 0x1331: 0x7831, 0x1332: 0x7849, 0x1333: 0x7849, 0x1334: 0x7849, 0x1335: 0x7849, - 0x1336: 0x7861, 0x1337: 0x7861, 0x1338: 0x7861, 0x1339: 0x7861, 0x133a: 0x7879, 0x133b: 0x7879, - 0x133c: 0x7879, 0x133d: 0x7879, 0x133e: 0x7891, 0x133f: 0x7891, + 0x1300: 0x0818, 0x1301: 0x0818, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, + 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, + 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, + 0x1312: 0x0040, 0x1313: 0x16c1, 0x1314: 0x16c1, 0x1315: 0x16c1, 0x1316: 0x16c1, 0x1317: 0x16c9, + 0x1318: 0x16c9, 0x1319: 0x16d1, 0x131a: 0x16d1, 0x131b: 0x16d9, 0x131c: 0x16d9, 0x131d: 0x0149, + 0x131e: 0x16e1, 0x131f: 0x16e1, 0x1320: 0x16e9, 0x1321: 0x16e9, 0x1322: 0x16f1, 0x1323: 0x16f1, + 0x1324: 0x16f9, 0x1325: 0x16f9, 0x1326: 0x16f9, 0x1327: 0x16f9, 0x1328: 0x1701, 0x1329: 0x1701, + 0x132a: 0x1709, 0x132b: 0x1709, 0x132c: 0x1711, 0x132d: 0x1711, 0x132e: 0x1719, 0x132f: 0x1719, + 0x1330: 0x1721, 0x1331: 0x1721, 0x1332: 0x1729, 0x1333: 0x1729, 0x1334: 0x1731, 0x1335: 0x1731, + 0x1336: 0x1739, 0x1337: 0x1739, 0x1338: 0x1739, 0x1339: 0x1741, 0x133a: 0x1741, 0x133b: 0x1741, + 0x133c: 0x1749, 0x133d: 0x1749, 0x133e: 0x1749, 0x133f: 0x1749, // Block 0x4d, offset 0x1340 - 0x1340: 0x7891, 0x1341: 0x7891, 0x1342: 0x78a9, 0x1343: 0x78a9, 0x1344: 0x78c1, 0x1345: 0x78c1, - 0x1346: 0x78d9, 0x1347: 0x78d9, 0x1348: 0x78f1, 0x1349: 0x78f1, 0x134a: 0x7909, 0x134b: 0x7909, - 0x134c: 0x7921, 0x134d: 0x7921, 0x134e: 0x7939, 0x134f: 0x7939, 0x1350: 0x7939, 0x1351: 0x7939, - 0x1352: 0x7951, 0x1353: 0x7951, 0x1354: 0x7951, 0x1355: 0x7951, 0x1356: 0x7969, 0x1357: 0x7969, - 0x1358: 0x7969, 0x1359: 0x7969, 0x135a: 0x7981, 0x135b: 0x7981, 0x135c: 0x7981, 0x135d: 0x7981, - 0x135e: 0x7999, 0x135f: 0x7999, 0x1360: 0x79b1, 0x1361: 0x79b1, 0x1362: 0x79b1, 0x1363: 0x79b1, - 0x1364: 0x79c9, 0x1365: 0x79c9, 0x1366: 0x79e1, 0x1367: 0x79e1, 0x1368: 0x79e1, 0x1369: 0x79e1, - 0x136a: 0x79f9, 0x136b: 0x79f9, 0x136c: 0x79f9, 0x136d: 0x79f9, 0x136e: 0x7a11, 0x136f: 0x7a11, - 0x1370: 0x7a29, 0x1371: 0x7a29, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, - 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, - 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + 0x1340: 0x1949, 0x1341: 0x1951, 0x1342: 0x1959, 0x1343: 0x1961, 0x1344: 0x1969, 0x1345: 0x1971, + 0x1346: 0x1979, 0x1347: 0x1981, 0x1348: 0x1989, 0x1349: 0x1991, 0x134a: 0x1999, 0x134b: 0x19a1, + 0x134c: 0x19a9, 0x134d: 0x19b1, 0x134e: 0x19b9, 0x134f: 0x19c1, 0x1350: 0x19c9, 0x1351: 0x19d1, + 0x1352: 0x19d9, 0x1353: 0x19e1, 0x1354: 0x19e9, 0x1355: 0x19f1, 0x1356: 0x19f9, 0x1357: 0x1a01, + 0x1358: 0x1a09, 0x1359: 0x1a11, 0x135a: 0x1a19, 0x135b: 0x1a21, 0x135c: 0x1a29, 0x135d: 0x1a31, + 0x135e: 0x1a3a, 0x135f: 0x1a42, 0x1360: 0x1a4a, 0x1361: 0x1a52, 0x1362: 0x1a5a, 0x1363: 0x1a62, + 0x1364: 0x1a69, 0x1365: 0x1a71, 0x1366: 0x1761, 0x1367: 0x1a79, 0x1368: 0x1741, 0x1369: 0x1769, + 0x136a: 0x1a81, 0x136b: 0x1a89, 0x136c: 0x1789, 0x136d: 0x1a91, 0x136e: 0x1791, 0x136f: 0x1799, + 0x1370: 0x1a99, 0x1371: 0x1aa1, 0x1372: 0x17b9, 0x1373: 0x1aa9, 0x1374: 0x17c1, 0x1375: 0x17c9, + 0x1376: 0x1ab1, 0x1377: 0x1ab9, 0x1378: 0x17d9, 0x1379: 0x1ac1, 0x137a: 0x17e1, 0x137b: 0x17e9, + 0x137c: 0x18d1, 0x137d: 0x18d9, 0x137e: 0x18f1, 0x137f: 0x18f9, // Block 0x4e, offset 0x1380 - 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, - 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, - 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, - 0x1392: 0x0040, 0x1393: 0x7a41, 0x1394: 0x7a41, 0x1395: 0x7a41, 0x1396: 0x7a41, 0x1397: 0x7a59, - 0x1398: 0x7a59, 0x1399: 0x7a71, 0x139a: 0x7a71, 0x139b: 0x7a89, 0x139c: 0x7a89, 0x139d: 0x0479, - 0x139e: 0x7aa1, 0x139f: 0x7aa1, 0x13a0: 0x7ab9, 0x13a1: 0x7ab9, 0x13a2: 0x7ad1, 0x13a3: 0x7ad1, - 0x13a4: 0x7ae9, 0x13a5: 0x7ae9, 0x13a6: 0x7ae9, 0x13a7: 0x7ae9, 0x13a8: 0x7b01, 0x13a9: 0x7b01, - 0x13aa: 0x7b19, 0x13ab: 0x7b19, 0x13ac: 0x7b41, 0x13ad: 0x7b41, 0x13ae: 0x7b69, 0x13af: 0x7b69, - 0x13b0: 0x7b91, 0x13b1: 0x7b91, 0x13b2: 0x7bb9, 0x13b3: 0x7bb9, 0x13b4: 0x7be1, 0x13b5: 0x7be1, - 0x13b6: 0x7c09, 0x13b7: 0x7c09, 0x13b8: 0x7c09, 0x13b9: 0x7c31, 0x13ba: 0x7c31, 0x13bb: 0x7c31, - 0x13bc: 0x7c59, 0x13bd: 0x7c59, 0x13be: 0x7c59, 0x13bf: 0x7c59, + 0x1380: 0x1901, 0x1381: 0x1921, 0x1382: 0x1929, 0x1383: 0x1931, 0x1384: 0x1939, 0x1385: 0x1959, + 0x1386: 0x1961, 0x1387: 0x1969, 0x1388: 0x1ac9, 0x1389: 0x1989, 0x138a: 0x1ad1, 0x138b: 0x1ad9, + 0x138c: 0x19b9, 0x138d: 0x1ae1, 0x138e: 0x19c1, 0x138f: 0x19c9, 0x1390: 0x1a31, 0x1391: 0x1ae9, + 0x1392: 0x1af1, 0x1393: 0x1a09, 0x1394: 0x1af9, 0x1395: 0x1a11, 0x1396: 0x1a19, 0x1397: 0x1751, + 0x1398: 0x1759, 0x1399: 0x1b01, 0x139a: 0x1761, 0x139b: 0x1b09, 0x139c: 0x1771, 0x139d: 0x1779, + 0x139e: 0x1781, 0x139f: 0x1789, 0x13a0: 0x1b11, 0x13a1: 0x17a1, 0x13a2: 0x17a9, 0x13a3: 0x17b1, + 0x13a4: 0x17b9, 0x13a5: 0x1b19, 0x13a6: 0x17d9, 0x13a7: 0x17f1, 0x13a8: 0x17f9, 0x13a9: 0x1801, + 0x13aa: 0x1809, 0x13ab: 0x1811, 0x13ac: 0x1821, 0x13ad: 0x1829, 0x13ae: 0x1831, 0x13af: 0x1839, + 0x13b0: 0x1841, 0x13b1: 0x1849, 0x13b2: 0x1b21, 0x13b3: 0x1851, 0x13b4: 0x1859, 0x13b5: 0x1861, + 0x13b6: 0x1869, 0x13b7: 0x1871, 0x13b8: 0x1879, 0x13b9: 0x1889, 0x13ba: 0x1891, 0x13bb: 0x1899, + 0x13bc: 0x18a1, 0x13bd: 0x18a9, 0x13be: 0x18b1, 0x13bf: 0x18b9, // Block 0x4f, offset 0x13c0 - 0x13c0: 0x8649, 0x13c1: 0x8671, 0x13c2: 0x8699, 0x13c3: 0x86c1, 0x13c4: 0x86e9, 0x13c5: 0x8711, - 0x13c6: 0x8739, 0x13c7: 0x8761, 0x13c8: 0x8789, 0x13c9: 0x87b1, 0x13ca: 0x87d9, 0x13cb: 0x8801, - 0x13cc: 0x8829, 0x13cd: 0x8851, 0x13ce: 0x8879, 0x13cf: 0x88a1, 0x13d0: 0x88c9, 0x13d1: 0x88f1, - 0x13d2: 0x8919, 0x13d3: 0x8941, 0x13d4: 0x8969, 0x13d5: 0x8991, 0x13d6: 0x89b9, 0x13d7: 0x89e1, - 0x13d8: 0x8a09, 0x13d9: 0x8a31, 0x13da: 0x8a59, 0x13db: 0x8a81, 0x13dc: 0x8aa9, 0x13dd: 0x8ad1, - 0x13de: 0x8afa, 0x13df: 0x8b2a, 0x13e0: 0x8b5a, 0x13e1: 0x8b8a, 0x13e2: 0x8bba, 0x13e3: 0x8bea, - 0x13e4: 0x8c19, 0x13e5: 0x8c41, 0x13e6: 0x7cc1, 0x13e7: 0x8c69, 0x13e8: 0x7c31, 0x13e9: 0x7ce9, - 0x13ea: 0x8c91, 0x13eb: 0x8cb9, 0x13ec: 0x7d89, 0x13ed: 0x8ce1, 0x13ee: 0x7db1, 0x13ef: 0x7dd9, - 0x13f0: 0x8d09, 0x13f1: 0x8d31, 0x13f2: 0x7e79, 0x13f3: 0x8d59, 0x13f4: 0x7ea1, 0x13f5: 0x7ec9, - 0x13f6: 0x8d81, 0x13f7: 0x8da9, 0x13f8: 0x7f19, 0x13f9: 0x8dd1, 0x13fa: 0x7f41, 0x13fb: 0x7f69, - 0x13fc: 0x83f1, 0x13fd: 0x8419, 0x13fe: 0x8491, 0x13ff: 0x84b9, + 0x13c0: 0x18c1, 0x13c1: 0x18c9, 0x13c2: 0x18e1, 0x13c3: 0x18e9, 0x13c4: 0x1909, 0x13c5: 0x1911, + 0x13c6: 0x1919, 0x13c7: 0x1921, 0x13c8: 0x1929, 0x13c9: 0x1941, 0x13ca: 0x1949, 0x13cb: 0x1951, + 0x13cc: 0x1959, 0x13cd: 0x1b29, 0x13ce: 0x1971, 0x13cf: 0x1979, 0x13d0: 0x1981, 0x13d1: 0x1989, + 0x13d2: 0x19a1, 0x13d3: 0x19a9, 0x13d4: 0x19b1, 0x13d5: 0x19b9, 0x13d6: 0x1b31, 0x13d7: 0x19d1, + 0x13d8: 0x19d9, 0x13d9: 0x1b39, 0x13da: 0x19f1, 0x13db: 0x19f9, 0x13dc: 0x1a01, 0x13dd: 0x1a09, + 0x13de: 0x1b41, 0x13df: 0x1761, 0x13e0: 0x1b09, 0x13e1: 0x1789, 0x13e2: 0x1b11, 0x13e3: 0x17b9, + 0x13e4: 0x1b19, 0x13e5: 0x17d9, 0x13e6: 0x1b49, 0x13e7: 0x1841, 0x13e8: 0x1b51, 0x13e9: 0x1b59, + 0x13ea: 0x1b61, 0x13eb: 0x1921, 0x13ec: 0x1929, 0x13ed: 0x1959, 0x13ee: 0x19b9, 0x13ef: 0x1b31, + 0x13f0: 0x1a09, 0x13f1: 0x1b41, 0x13f2: 0x1b69, 0x13f3: 0x1b71, 0x13f4: 0x1b79, 0x13f5: 0x1b81, + 0x13f6: 0x1b89, 0x13f7: 0x1b91, 0x13f8: 0x1b99, 0x13f9: 0x1ba1, 0x13fa: 0x1ba9, 0x13fb: 0x1bb1, + 0x13fc: 0x1bb9, 0x13fd: 0x1bc1, 0x13fe: 0x1bc9, 0x13ff: 0x1bd1, // Block 0x50, offset 0x1400 - 0x1400: 0x84e1, 0x1401: 0x8581, 0x1402: 0x85a9, 0x1403: 0x85d1, 0x1404: 0x85f9, 0x1405: 0x8699, - 0x1406: 0x86c1, 0x1407: 0x86e9, 0x1408: 0x8df9, 0x1409: 0x8789, 0x140a: 0x8e21, 0x140b: 0x8e49, - 0x140c: 0x8879, 0x140d: 0x8e71, 0x140e: 0x88a1, 0x140f: 0x88c9, 0x1410: 0x8ad1, 0x1411: 0x8e99, - 0x1412: 0x8ec1, 0x1413: 0x8a09, 0x1414: 0x8ee9, 0x1415: 0x8a31, 0x1416: 0x8a59, 0x1417: 0x7c71, - 0x1418: 0x7c99, 0x1419: 0x8f11, 0x141a: 0x7cc1, 0x141b: 0x8f39, 0x141c: 0x7d11, 0x141d: 0x7d39, - 0x141e: 0x7d61, 0x141f: 0x7d89, 0x1420: 0x8f61, 0x1421: 0x7e01, 0x1422: 0x7e29, 0x1423: 0x7e51, - 0x1424: 0x7e79, 0x1425: 0x8f89, 0x1426: 0x7f19, 0x1427: 0x7f91, 0x1428: 0x7fb9, 0x1429: 0x7fe1, - 0x142a: 0x8009, 0x142b: 0x8031, 0x142c: 0x8081, 0x142d: 0x80a9, 0x142e: 0x80d1, 0x142f: 0x80f9, - 0x1430: 0x8121, 0x1431: 0x8149, 0x1432: 0x8fb1, 0x1433: 0x8171, 0x1434: 0x8199, 0x1435: 0x81c1, - 0x1436: 0x81e9, 0x1437: 0x8211, 0x1438: 0x8239, 0x1439: 0x8289, 0x143a: 0x82b1, 0x143b: 0x82d9, - 0x143c: 0x8301, 0x143d: 0x8329, 0x143e: 0x8351, 0x143f: 0x8379, + 0x1400: 0x1bd9, 0x1401: 0x1be1, 0x1402: 0x1be9, 0x1403: 0x1bf1, 0x1404: 0x1bf9, 0x1405: 0x1c01, + 0x1406: 0x1c09, 0x1407: 0x1c11, 0x1408: 0x1c19, 0x1409: 0x1c21, 0x140a: 0x1c29, 0x140b: 0x1c31, + 0x140c: 0x1b59, 0x140d: 0x1c39, 0x140e: 0x1c41, 0x140f: 0x1c49, 0x1410: 0x1c51, 0x1411: 0x1b81, + 0x1412: 0x1b89, 0x1413: 0x1b91, 0x1414: 0x1b99, 0x1415: 0x1ba1, 0x1416: 0x1ba9, 0x1417: 0x1bb1, + 0x1418: 0x1bb9, 0x1419: 0x1bc1, 0x141a: 0x1bc9, 0x141b: 0x1bd1, 0x141c: 0x1bd9, 0x141d: 0x1be1, + 0x141e: 0x1be9, 0x141f: 0x1bf1, 0x1420: 0x1bf9, 0x1421: 0x1c01, 0x1422: 0x1c09, 0x1423: 0x1c11, + 0x1424: 0x1c19, 0x1425: 0x1c21, 0x1426: 0x1c29, 0x1427: 0x1c31, 0x1428: 0x1b59, 0x1429: 0x1c39, + 0x142a: 0x1c41, 0x142b: 0x1c49, 0x142c: 0x1c51, 0x142d: 0x1c21, 0x142e: 0x1c29, 0x142f: 0x1c31, + 0x1430: 0x1b59, 0x1431: 0x1b51, 0x1432: 0x1b61, 0x1433: 0x1881, 0x1434: 0x1829, 0x1435: 0x1831, + 0x1436: 0x1839, 0x1437: 0x1c21, 0x1438: 0x1c29, 0x1439: 0x1c31, 0x143a: 0x1881, 0x143b: 0x1889, + 0x143c: 0x1c59, 0x143d: 0x1c59, 0x143e: 0x0018, 0x143f: 0x0018, // Block 0x51, offset 0x1440 - 0x1440: 0x83a1, 0x1441: 0x83c9, 0x1442: 0x8441, 0x1443: 0x8469, 0x1444: 0x8509, 0x1445: 0x8531, - 0x1446: 0x8559, 0x1447: 0x8581, 0x1448: 0x85a9, 0x1449: 0x8621, 0x144a: 0x8649, 0x144b: 0x8671, - 0x144c: 0x8699, 0x144d: 0x8fd9, 0x144e: 0x8711, 0x144f: 0x8739, 0x1450: 0x8761, 0x1451: 0x8789, - 0x1452: 0x8801, 0x1453: 0x8829, 0x1454: 0x8851, 0x1455: 0x8879, 0x1456: 0x9001, 0x1457: 0x88f1, - 0x1458: 0x8919, 0x1459: 0x9029, 0x145a: 0x8991, 0x145b: 0x89b9, 0x145c: 0x89e1, 0x145d: 0x8a09, - 0x145e: 0x9051, 0x145f: 0x7cc1, 0x1460: 0x8f39, 0x1461: 0x7d89, 0x1462: 0x8f61, 0x1463: 0x7e79, - 0x1464: 0x8f89, 0x1465: 0x7f19, 0x1466: 0x9079, 0x1467: 0x8121, 0x1468: 0x90a1, 0x1469: 0x90c9, - 0x146a: 0x90f1, 0x146b: 0x8581, 0x146c: 0x85a9, 0x146d: 0x8699, 0x146e: 0x8879, 0x146f: 0x9001, - 0x1470: 0x8a09, 0x1471: 0x9051, 0x1472: 0x9119, 0x1473: 0x9151, 0x1474: 0x9189, 0x1475: 0x91c1, - 0x1476: 0x91e9, 0x1477: 0x9211, 0x1478: 0x9239, 0x1479: 0x9261, 0x147a: 0x9289, 0x147b: 0x92b1, - 0x147c: 0x92d9, 0x147d: 0x9301, 0x147e: 0x9329, 0x147f: 0x9351, + 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, + 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, + 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x1c61, 0x1451: 0x1c69, + 0x1452: 0x1c69, 0x1453: 0x1c71, 0x1454: 0x1c79, 0x1455: 0x1c81, 0x1456: 0x1c89, 0x1457: 0x1c91, + 0x1458: 0x1c99, 0x1459: 0x1c99, 0x145a: 0x1ca1, 0x145b: 0x1ca9, 0x145c: 0x1cb1, 0x145d: 0x1cb9, + 0x145e: 0x1cc1, 0x145f: 0x1cc9, 0x1460: 0x1cc9, 0x1461: 0x1cd1, 0x1462: 0x1cd9, 0x1463: 0x1cd9, + 0x1464: 0x1ce1, 0x1465: 0x1ce1, 0x1466: 0x1ce9, 0x1467: 0x1cf1, 0x1468: 0x1cf1, 0x1469: 0x1cf9, + 0x146a: 0x1d01, 0x146b: 0x1d01, 0x146c: 0x1d09, 0x146d: 0x1d09, 0x146e: 0x1d11, 0x146f: 0x1d19, + 0x1470: 0x1d19, 0x1471: 0x1d21, 0x1472: 0x1d21, 0x1473: 0x1d29, 0x1474: 0x1d31, 0x1475: 0x1d39, + 0x1476: 0x1d41, 0x1477: 0x1d41, 0x1478: 0x1d49, 0x1479: 0x1d51, 0x147a: 0x1d59, 0x147b: 0x1d61, + 0x147c: 0x1d69, 0x147d: 0x1d69, 0x147e: 0x1d71, 0x147f: 0x1d79, // Block 0x52, offset 0x1480 - 0x1480: 0x9379, 0x1481: 0x93a1, 0x1482: 0x93c9, 0x1483: 0x93f1, 0x1484: 0x9419, 0x1485: 0x9441, - 0x1486: 0x9469, 0x1487: 0x9491, 0x1488: 0x94b9, 0x1489: 0x94e1, 0x148a: 0x9509, 0x148b: 0x9531, - 0x148c: 0x90c9, 0x148d: 0x9559, 0x148e: 0x9581, 0x148f: 0x95a9, 0x1490: 0x95d1, 0x1491: 0x91c1, - 0x1492: 0x91e9, 0x1493: 0x9211, 0x1494: 0x9239, 0x1495: 0x9261, 0x1496: 0x9289, 0x1497: 0x92b1, - 0x1498: 0x92d9, 0x1499: 0x9301, 0x149a: 0x9329, 0x149b: 0x9351, 0x149c: 0x9379, 0x149d: 0x93a1, - 0x149e: 0x93c9, 0x149f: 0x93f1, 0x14a0: 0x9419, 0x14a1: 0x9441, 0x14a2: 0x9469, 0x14a3: 0x9491, - 0x14a4: 0x94b9, 0x14a5: 0x94e1, 0x14a6: 0x9509, 0x14a7: 0x9531, 0x14a8: 0x90c9, 0x14a9: 0x9559, - 0x14aa: 0x9581, 0x14ab: 0x95a9, 0x14ac: 0x95d1, 0x14ad: 0x94e1, 0x14ae: 0x9509, 0x14af: 0x9531, - 0x14b0: 0x90c9, 0x14b1: 0x90a1, 0x14b2: 0x90f1, 0x14b3: 0x8261, 0x14b4: 0x80a9, 0x14b5: 0x80d1, - 0x14b6: 0x80f9, 0x14b7: 0x94e1, 0x14b8: 0x9509, 0x14b9: 0x9531, 0x14ba: 0x8261, 0x14bb: 0x8289, - 0x14bc: 0x95f9, 0x14bd: 0x95f9, 0x14be: 0x0018, 0x14bf: 0x0018, + 0x1480: 0x1f29, 0x1481: 0x1f31, 0x1482: 0x1f39, 0x1483: 0x1f11, 0x1484: 0x1d39, 0x1485: 0x1ce9, + 0x1486: 0x1f41, 0x1487: 0x1f49, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, + 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, + 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, + 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, + 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, + 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, + 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, + 0x14b0: 0x1f51, 0x14b1: 0x1f59, 0x14b2: 0x1f61, 0x14b3: 0x1f69, 0x14b4: 0x1f71, 0x14b5: 0x1f79, + 0x14b6: 0x1f81, 0x14b7: 0x1f89, 0x14b8: 0x1f91, 0x14b9: 0x1f99, 0x14ba: 0x1fa2, 0x14bb: 0x1faa, + 0x14bc: 0x1fb1, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, // Block 0x53, offset 0x14c0 - 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, - 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, - 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x9621, 0x14d1: 0x9659, - 0x14d2: 0x9659, 0x14d3: 0x9691, 0x14d4: 0x96c9, 0x14d5: 0x9701, 0x14d6: 0x9739, 0x14d7: 0x9771, - 0x14d8: 0x97a9, 0x14d9: 0x97a9, 0x14da: 0x97e1, 0x14db: 0x9819, 0x14dc: 0x9851, 0x14dd: 0x9889, - 0x14de: 0x98c1, 0x14df: 0x98f9, 0x14e0: 0x98f9, 0x14e1: 0x9931, 0x14e2: 0x9969, 0x14e3: 0x9969, - 0x14e4: 0x99a1, 0x14e5: 0x99a1, 0x14e6: 0x99d9, 0x14e7: 0x9a11, 0x14e8: 0x9a11, 0x14e9: 0x9a49, - 0x14ea: 0x9a81, 0x14eb: 0x9a81, 0x14ec: 0x9ab9, 0x14ed: 0x9ab9, 0x14ee: 0x9af1, 0x14ef: 0x9b29, - 0x14f0: 0x9b29, 0x14f1: 0x9b61, 0x14f2: 0x9b61, 0x14f3: 0x9b99, 0x14f4: 0x9bd1, 0x14f5: 0x9c09, - 0x14f6: 0x9c41, 0x14f7: 0x9c41, 0x14f8: 0x9c79, 0x14f9: 0x9cb1, 0x14fa: 0x9ce9, 0x14fb: 0x9d21, - 0x14fc: 0x9d59, 0x14fd: 0x9d59, 0x14fe: 0x9d91, 0x14ff: 0x9dc9, + 0x14c0: 0x33c0, 0x14c1: 0x33c0, 0x14c2: 0x33c0, 0x14c3: 0x33c0, 0x14c4: 0x33c0, 0x14c5: 0x33c0, + 0x14c6: 0x33c0, 0x14c7: 0x33c0, 0x14c8: 0x33c0, 0x14c9: 0x33c0, 0x14ca: 0x33c0, 0x14cb: 0x33c0, + 0x14cc: 0x33c0, 0x14cd: 0x33c0, 0x14ce: 0x33c0, 0x14cf: 0x33c0, 0x14d0: 0x1fba, 0x14d1: 0x7d8d, + 0x14d2: 0x0040, 0x14d3: 0x1fc2, 0x14d4: 0x0122, 0x14d5: 0x1fca, 0x14d6: 0x1fd2, 0x14d7: 0x7dad, + 0x14d8: 0x7dcd, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, + 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x3308, 0x14e1: 0x3308, 0x14e2: 0x3308, 0x14e3: 0x3308, + 0x14e4: 0x3308, 0x14e5: 0x3308, 0x14e6: 0x3308, 0x14e7: 0x3308, 0x14e8: 0x3308, 0x14e9: 0x3308, + 0x14ea: 0x3308, 0x14eb: 0x3308, 0x14ec: 0x3308, 0x14ed: 0x3308, 0x14ee: 0x3308, 0x14ef: 0x3308, + 0x14f0: 0x0040, 0x14f1: 0x7ded, 0x14f2: 0x7e0d, 0x14f3: 0x1fda, 0x14f4: 0x1fda, 0x14f5: 0x072a, + 0x14f6: 0x0732, 0x14f7: 0x1fe2, 0x14f8: 0x1fea, 0x14f9: 0x7e2d, 0x14fa: 0x7e4d, 0x14fb: 0x7e6d, + 0x14fc: 0x7e2d, 0x14fd: 0x7e8d, 0x14fe: 0x7ead, 0x14ff: 0x7e8d, // Block 0x54, offset 0x1500 - 0x1500: 0xa999, 0x1501: 0xa9d1, 0x1502: 0xaa09, 0x1503: 0xa8f1, 0x1504: 0x9c09, 0x1505: 0x99d9, - 0x1506: 0xaa41, 0x1507: 0xaa79, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, - 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, - 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, - 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, - 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, - 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0xaab1, 0x1531: 0xaae9, 0x1532: 0xab21, 0x1533: 0xab69, 0x1534: 0xabb1, 0x1535: 0xabf9, - 0x1536: 0xac41, 0x1537: 0xac89, 0x1538: 0xacd1, 0x1539: 0xad19, 0x153a: 0xad52, 0x153b: 0xae62, - 0x153c: 0xaee1, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + 0x1500: 0x7ecd, 0x1501: 0x7eed, 0x1502: 0x7f0d, 0x1503: 0x7eed, 0x1504: 0x7f2d, 0x1505: 0x0018, + 0x1506: 0x0018, 0x1507: 0x1ff2, 0x1508: 0x1ffa, 0x1509: 0x7f4e, 0x150a: 0x7f6e, 0x150b: 0x7f8e, + 0x150c: 0x7fae, 0x150d: 0x1fda, 0x150e: 0x1fda, 0x150f: 0x1fda, 0x1510: 0x1fba, 0x1511: 0x7fcd, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0122, 0x1515: 0x1fc2, 0x1516: 0x1fd2, 0x1517: 0x1fca, + 0x1518: 0x7fed, 0x1519: 0x072a, 0x151a: 0x0732, 0x151b: 0x1fe2, 0x151c: 0x1fea, 0x151d: 0x7ecd, + 0x151e: 0x7f2d, 0x151f: 0x2002, 0x1520: 0x200a, 0x1521: 0x2012, 0x1522: 0x071a, 0x1523: 0x2019, + 0x1524: 0x2022, 0x1525: 0x202a, 0x1526: 0x0722, 0x1527: 0x0040, 0x1528: 0x2032, 0x1529: 0x203a, + 0x152a: 0x2042, 0x152b: 0x204a, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0x800e, 0x1531: 0x2051, 0x1532: 0x802e, 0x1533: 0x0808, 0x1534: 0x804e, 0x1535: 0x0040, + 0x1536: 0x806e, 0x1537: 0x2059, 0x1538: 0x808e, 0x1539: 0x2061, 0x153a: 0x80ae, 0x153b: 0x2069, + 0x153c: 0x80ce, 0x153d: 0x2071, 0x153e: 0x80ee, 0x153f: 0x2079, // Block 0x55, offset 0x1540 - 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, - 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, - 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaf2a, 0x1551: 0x7d8d, - 0x1552: 0x0040, 0x1553: 0xaf3a, 0x1554: 0x03c2, 0x1555: 0xaf4a, 0x1556: 0xaf5a, 0x1557: 0x7dad, - 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, - 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, - 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, - 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, - 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf6a, 0x1574: 0xaf6a, 0x1575: 0x1fd2, - 0x1576: 0x1fe2, 0x1577: 0xaf7a, 0x1578: 0xaf8a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d, - 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d, + 0x1540: 0x2081, 0x1541: 0x2089, 0x1542: 0x2089, 0x1543: 0x2091, 0x1544: 0x2091, 0x1545: 0x2099, + 0x1546: 0x2099, 0x1547: 0x20a1, 0x1548: 0x20a1, 0x1549: 0x20a9, 0x154a: 0x20a9, 0x154b: 0x20a9, + 0x154c: 0x20a9, 0x154d: 0x20b1, 0x154e: 0x20b1, 0x154f: 0x20b9, 0x1550: 0x20b9, 0x1551: 0x20b9, + 0x1552: 0x20b9, 0x1553: 0x20c1, 0x1554: 0x20c1, 0x1555: 0x20c9, 0x1556: 0x20c9, 0x1557: 0x20c9, + 0x1558: 0x20c9, 0x1559: 0x20d1, 0x155a: 0x20d1, 0x155b: 0x20d1, 0x155c: 0x20d1, 0x155d: 0x20d9, + 0x155e: 0x20d9, 0x155f: 0x20d9, 0x1560: 0x20d9, 0x1561: 0x20e1, 0x1562: 0x20e1, 0x1563: 0x20e1, + 0x1564: 0x20e1, 0x1565: 0x20e9, 0x1566: 0x20e9, 0x1567: 0x20e9, 0x1568: 0x20e9, 0x1569: 0x20f1, + 0x156a: 0x20f1, 0x156b: 0x20f9, 0x156c: 0x20f9, 0x156d: 0x2101, 0x156e: 0x2101, 0x156f: 0x2109, + 0x1570: 0x2109, 0x1571: 0x2111, 0x1572: 0x2111, 0x1573: 0x2111, 0x1574: 0x2111, 0x1575: 0x2119, + 0x1576: 0x2119, 0x1577: 0x2119, 0x1578: 0x2119, 0x1579: 0x2121, 0x157a: 0x2121, 0x157b: 0x2121, + 0x157c: 0x2121, 0x157d: 0x2129, 0x157e: 0x2129, 0x157f: 0x2129, // Block 0x56, offset 0x1580 - 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018, - 0x1586: 0x0018, 0x1587: 0xaf9a, 0x1588: 0xafaa, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e, - 0x158c: 0x7fae, 0x158d: 0xaf6a, 0x158e: 0xaf6a, 0x158f: 0xaf6a, 0x1590: 0xaf2a, 0x1591: 0x7fcd, - 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaf3a, 0x1596: 0xaf5a, 0x1597: 0xaf4a, - 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf7a, 0x159c: 0xaf8a, 0x159d: 0x7ecd, - 0x159e: 0x7f2d, 0x159f: 0xafba, 0x15a0: 0xafca, 0x15a1: 0xafda, 0x15a2: 0x1fb2, 0x15a3: 0xafe9, - 0x15a4: 0xaffa, 0x15a5: 0xb00a, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xb01a, 0x15a9: 0xb02a, - 0x15aa: 0xb03a, 0x15ab: 0xb04a, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, - 0x15b0: 0x800e, 0x15b1: 0xb059, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040, - 0x15b6: 0x806e, 0x15b7: 0xb081, 0x15b8: 0x808e, 0x15b9: 0xb0a9, 0x15ba: 0x80ae, 0x15bb: 0xb0d1, - 0x15bc: 0x80ce, 0x15bd: 0xb0f9, 0x15be: 0x80ee, 0x15bf: 0xb121, + 0x1580: 0x2129, 0x1581: 0x2131, 0x1582: 0x2131, 0x1583: 0x2131, 0x1584: 0x2131, 0x1585: 0x2139, + 0x1586: 0x2139, 0x1587: 0x2139, 0x1588: 0x2139, 0x1589: 0x2141, 0x158a: 0x2141, 0x158b: 0x2141, + 0x158c: 0x2141, 0x158d: 0x2149, 0x158e: 0x2149, 0x158f: 0x2149, 0x1590: 0x2149, 0x1591: 0x2151, + 0x1592: 0x2151, 0x1593: 0x2151, 0x1594: 0x2151, 0x1595: 0x2159, 0x1596: 0x2159, 0x1597: 0x2159, + 0x1598: 0x2159, 0x1599: 0x2161, 0x159a: 0x2161, 0x159b: 0x2161, 0x159c: 0x2161, 0x159d: 0x2169, + 0x159e: 0x2169, 0x159f: 0x2169, 0x15a0: 0x2169, 0x15a1: 0x2171, 0x15a2: 0x2171, 0x15a3: 0x2171, + 0x15a4: 0x2171, 0x15a5: 0x2179, 0x15a6: 0x2179, 0x15a7: 0x2179, 0x15a8: 0x2179, 0x15a9: 0x2181, + 0x15aa: 0x2181, 0x15ab: 0x2181, 0x15ac: 0x2181, 0x15ad: 0x2189, 0x15ae: 0x2189, 0x15af: 0x1701, + 0x15b0: 0x1701, 0x15b1: 0x2191, 0x15b2: 0x2191, 0x15b3: 0x2191, 0x15b4: 0x2191, 0x15b5: 0x2199, + 0x15b6: 0x2199, 0x15b7: 0x21a1, 0x15b8: 0x21a1, 0x15b9: 0x21a9, 0x15ba: 0x21a9, 0x15bb: 0x21b1, + 0x15bc: 0x21b1, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, // Block 0x57, offset 0x15c0 - 0x15c0: 0xb149, 0x15c1: 0xb161, 0x15c2: 0xb161, 0x15c3: 0xb179, 0x15c4: 0xb179, 0x15c5: 0xb191, - 0x15c6: 0xb191, 0x15c7: 0xb1a9, 0x15c8: 0xb1a9, 0x15c9: 0xb1c1, 0x15ca: 0xb1c1, 0x15cb: 0xb1c1, - 0x15cc: 0xb1c1, 0x15cd: 0xb1d9, 0x15ce: 0xb1d9, 0x15cf: 0xb1f1, 0x15d0: 0xb1f1, 0x15d1: 0xb1f1, - 0x15d2: 0xb1f1, 0x15d3: 0xb209, 0x15d4: 0xb209, 0x15d5: 0xb221, 0x15d6: 0xb221, 0x15d7: 0xb221, - 0x15d8: 0xb221, 0x15d9: 0xb239, 0x15da: 0xb239, 0x15db: 0xb239, 0x15dc: 0xb239, 0x15dd: 0xb251, - 0x15de: 0xb251, 0x15df: 0xb251, 0x15e0: 0xb251, 0x15e1: 0xb269, 0x15e2: 0xb269, 0x15e3: 0xb269, - 0x15e4: 0xb269, 0x15e5: 0xb281, 0x15e6: 0xb281, 0x15e7: 0xb281, 0x15e8: 0xb281, 0x15e9: 0xb299, - 0x15ea: 0xb299, 0x15eb: 0xb2b1, 0x15ec: 0xb2b1, 0x15ed: 0xb2c9, 0x15ee: 0xb2c9, 0x15ef: 0xb2e1, - 0x15f0: 0xb2e1, 0x15f1: 0xb2f9, 0x15f2: 0xb2f9, 0x15f3: 0xb2f9, 0x15f4: 0xb2f9, 0x15f5: 0xb311, - 0x15f6: 0xb311, 0x15f7: 0xb311, 0x15f8: 0xb311, 0x15f9: 0xb329, 0x15fa: 0xb329, 0x15fb: 0xb329, - 0x15fc: 0xb329, 0x15fd: 0xb341, 0x15fe: 0xb341, 0x15ff: 0xb341, + 0x15c0: 0x0040, 0x15c1: 0x1fca, 0x15c2: 0x21ba, 0x15c3: 0x2002, 0x15c4: 0x203a, 0x15c5: 0x2042, + 0x15c6: 0x200a, 0x15c7: 0x21c2, 0x15c8: 0x072a, 0x15c9: 0x0732, 0x15ca: 0x2012, 0x15cb: 0x071a, + 0x15cc: 0x1fba, 0x15cd: 0x2019, 0x15ce: 0x0961, 0x15cf: 0x21ca, 0x15d0: 0x06e1, 0x15d1: 0x0049, + 0x15d2: 0x0029, 0x15d3: 0x0031, 0x15d4: 0x06e9, 0x15d5: 0x06f1, 0x15d6: 0x06f9, 0x15d7: 0x0701, + 0x15d8: 0x0709, 0x15d9: 0x0711, 0x15da: 0x1fc2, 0x15db: 0x0122, 0x15dc: 0x2022, 0x15dd: 0x0722, + 0x15de: 0x202a, 0x15df: 0x1fd2, 0x15e0: 0x204a, 0x15e1: 0x0019, 0x15e2: 0x02e9, 0x15e3: 0x03d9, + 0x15e4: 0x02f1, 0x15e5: 0x02f9, 0x15e6: 0x03f1, 0x15e7: 0x0309, 0x15e8: 0x00a9, 0x15e9: 0x0311, + 0x15ea: 0x00b1, 0x15eb: 0x0319, 0x15ec: 0x0101, 0x15ed: 0x0321, 0x15ee: 0x0329, 0x15ef: 0x0051, + 0x15f0: 0x0339, 0x15f1: 0x0751, 0x15f2: 0x00b9, 0x15f3: 0x0089, 0x15f4: 0x0341, 0x15f5: 0x0349, + 0x15f6: 0x0391, 0x15f7: 0x00c1, 0x15f8: 0x0109, 0x15f9: 0x00c9, 0x15fa: 0x04b1, 0x15fb: 0x1ff2, + 0x15fc: 0x2032, 0x15fd: 0x1ffa, 0x15fe: 0x21d2, 0x15ff: 0x1fda, // Block 0x58, offset 0x1600 - 0x1600: 0xb341, 0x1601: 0xb359, 0x1602: 0xb359, 0x1603: 0xb359, 0x1604: 0xb359, 0x1605: 0xb371, - 0x1606: 0xb371, 0x1607: 0xb371, 0x1608: 0xb371, 0x1609: 0xb389, 0x160a: 0xb389, 0x160b: 0xb389, - 0x160c: 0xb389, 0x160d: 0xb3a1, 0x160e: 0xb3a1, 0x160f: 0xb3a1, 0x1610: 0xb3a1, 0x1611: 0xb3b9, - 0x1612: 0xb3b9, 0x1613: 0xb3b9, 0x1614: 0xb3b9, 0x1615: 0xb3d1, 0x1616: 0xb3d1, 0x1617: 0xb3d1, - 0x1618: 0xb3d1, 0x1619: 0xb3e9, 0x161a: 0xb3e9, 0x161b: 0xb3e9, 0x161c: 0xb3e9, 0x161d: 0xb401, - 0x161e: 0xb401, 0x161f: 0xb401, 0x1620: 0xb401, 0x1621: 0xb419, 0x1622: 0xb419, 0x1623: 0xb419, - 0x1624: 0xb419, 0x1625: 0xb431, 0x1626: 0xb431, 0x1627: 0xb431, 0x1628: 0xb431, 0x1629: 0xb449, - 0x162a: 0xb449, 0x162b: 0xb449, 0x162c: 0xb449, 0x162d: 0xb461, 0x162e: 0xb461, 0x162f: 0x7b01, - 0x1630: 0x7b01, 0x1631: 0xb479, 0x1632: 0xb479, 0x1633: 0xb479, 0x1634: 0xb479, 0x1635: 0xb491, - 0x1636: 0xb491, 0x1637: 0xb4b9, 0x1638: 0xb4b9, 0x1639: 0xb4e1, 0x163a: 0xb4e1, 0x163b: 0xb509, - 0x163c: 0xb509, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + 0x1600: 0x0672, 0x1601: 0x0019, 0x1602: 0x02e9, 0x1603: 0x03d9, 0x1604: 0x02f1, 0x1605: 0x02f9, + 0x1606: 0x03f1, 0x1607: 0x0309, 0x1608: 0x00a9, 0x1609: 0x0311, 0x160a: 0x00b1, 0x160b: 0x0319, + 0x160c: 0x0101, 0x160d: 0x0321, 0x160e: 0x0329, 0x160f: 0x0051, 0x1610: 0x0339, 0x1611: 0x0751, + 0x1612: 0x00b9, 0x1613: 0x0089, 0x1614: 0x0341, 0x1615: 0x0349, 0x1616: 0x0391, 0x1617: 0x00c1, + 0x1618: 0x0109, 0x1619: 0x00c9, 0x161a: 0x04b1, 0x161b: 0x1fe2, 0x161c: 0x21da, 0x161d: 0x1fea, + 0x161e: 0x21e2, 0x161f: 0x810d, 0x1620: 0x812d, 0x1621: 0x0961, 0x1622: 0x814d, 0x1623: 0x814d, + 0x1624: 0x816d, 0x1625: 0x818d, 0x1626: 0x81ad, 0x1627: 0x81cd, 0x1628: 0x81ed, 0x1629: 0x820d, + 0x162a: 0x822d, 0x162b: 0x824d, 0x162c: 0x826d, 0x162d: 0x828d, 0x162e: 0x82ad, 0x162f: 0x82cd, + 0x1630: 0x82ed, 0x1631: 0x830d, 0x1632: 0x832d, 0x1633: 0x834d, 0x1634: 0x836d, 0x1635: 0x838d, + 0x1636: 0x83ad, 0x1637: 0x83cd, 0x1638: 0x83ed, 0x1639: 0x840d, 0x163a: 0x842d, 0x163b: 0x844d, + 0x163c: 0x81ed, 0x163d: 0x846d, 0x163e: 0x848d, 0x163f: 0x824d, // Block 0x59, offset 0x1640 - 0x1640: 0x0040, 0x1641: 0xaf4a, 0x1642: 0xb532, 0x1643: 0xafba, 0x1644: 0xb02a, 0x1645: 0xb03a, - 0x1646: 0xafca, 0x1647: 0xb542, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xafda, 0x164b: 0x1fb2, - 0x164c: 0xaf2a, 0x164d: 0xafe9, 0x164e: 0x29d1, 0x164f: 0xb552, 0x1650: 0x1f41, 0x1651: 0x00c9, - 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, - 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaf3a, 0x165b: 0x03c2, 0x165c: 0xaffa, 0x165d: 0x1fc2, - 0x165e: 0xb00a, 0x165f: 0xaf5a, 0x1660: 0xb04a, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, - 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, - 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, - 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, - 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf9a, - 0x167c: 0xb01a, 0x167d: 0xafaa, 0x167e: 0xb562, 0x167f: 0xaf6a, + 0x1640: 0x84ad, 0x1641: 0x84cd, 0x1642: 0x84ed, 0x1643: 0x850d, 0x1644: 0x852d, 0x1645: 0x854d, + 0x1646: 0x856d, 0x1647: 0x858d, 0x1648: 0x850d, 0x1649: 0x85ad, 0x164a: 0x850d, 0x164b: 0x85cd, + 0x164c: 0x85cd, 0x164d: 0x85ed, 0x164e: 0x85ed, 0x164f: 0x860d, 0x1650: 0x854d, 0x1651: 0x862d, + 0x1652: 0x864d, 0x1653: 0x862d, 0x1654: 0x866d, 0x1655: 0x864d, 0x1656: 0x868d, 0x1657: 0x868d, + 0x1658: 0x86ad, 0x1659: 0x86ad, 0x165a: 0x86cd, 0x165b: 0x86cd, 0x165c: 0x864d, 0x165d: 0x814d, + 0x165e: 0x86ed, 0x165f: 0x870d, 0x1660: 0x0040, 0x1661: 0x872d, 0x1662: 0x874d, 0x1663: 0x876d, + 0x1664: 0x878d, 0x1665: 0x876d, 0x1666: 0x87ad, 0x1667: 0x87cd, 0x1668: 0x87ed, 0x1669: 0x87ed, + 0x166a: 0x880d, 0x166b: 0x880d, 0x166c: 0x882d, 0x166d: 0x882d, 0x166e: 0x880d, 0x166f: 0x880d, + 0x1670: 0x884d, 0x1671: 0x886d, 0x1672: 0x888d, 0x1673: 0x88ad, 0x1674: 0x88cd, 0x1675: 0x88ed, + 0x1676: 0x88ed, 0x1677: 0x88ed, 0x1678: 0x890d, 0x1679: 0x890d, 0x167a: 0x890d, 0x167b: 0x890d, + 0x167c: 0x87ed, 0x167d: 0x87ed, 0x167e: 0x87ed, 0x167f: 0x0040, // Block 0x5a, offset 0x1680 - 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, - 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, - 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, - 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, - 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf7a, 0x169c: 0xb572, 0x169d: 0xaf8a, - 0x169e: 0xb582, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d, - 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d, - 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd, - 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d, - 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d, - 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d, + 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x874d, 0x1683: 0x872d, 0x1684: 0x892d, 0x1685: 0x872d, + 0x1686: 0x874d, 0x1687: 0x872d, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x894d, 0x168b: 0x874d, + 0x168c: 0x896d, 0x168d: 0x892d, 0x168e: 0x896d, 0x168f: 0x874d, 0x1690: 0x0040, 0x1691: 0x0040, + 0x1692: 0x898d, 0x1693: 0x89ad, 0x1694: 0x88ad, 0x1695: 0x896d, 0x1696: 0x892d, 0x1697: 0x896d, + 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x89cd, 0x169b: 0x89ed, 0x169c: 0x89cd, 0x169d: 0x0040, + 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0x21e9, 0x16a1: 0x21f1, 0x16a2: 0x21f9, 0x16a3: 0x8a0e, + 0x16a4: 0x2201, 0x16a5: 0x2209, 0x16a6: 0x8a2d, 0x16a7: 0x0040, 0x16a8: 0x8a4d, 0x16a9: 0x8a6d, + 0x16aa: 0x8a8d, 0x16ab: 0x8a6d, 0x16ac: 0x8aad, 0x16ad: 0x8acd, 0x16ae: 0x8aed, 0x16af: 0x0040, + 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, + 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, + 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, // Block 0x5b, offset 0x16c0 - 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d, - 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd, - 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d, - 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d, - 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d, - 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d, - 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed, - 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d, - 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed, - 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d, - 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040, + 0x16c0: 0x0a08, 0x16c1: 0x0a08, 0x16c2: 0x0a08, 0x16c3: 0x0a08, 0x16c4: 0x0a08, 0x16c5: 0x0c08, + 0x16c6: 0x0808, 0x16c7: 0x0c08, 0x16c8: 0x0818, 0x16c9: 0x0c08, 0x16ca: 0x0c08, 0x16cb: 0x0808, + 0x16cc: 0x0808, 0x16cd: 0x0908, 0x16ce: 0x0c08, 0x16cf: 0x0c08, 0x16d0: 0x0c08, 0x16d1: 0x0c08, + 0x16d2: 0x0c08, 0x16d3: 0x0a08, 0x16d4: 0x0a08, 0x16d5: 0x0a08, 0x16d6: 0x0a08, 0x16d7: 0x0908, + 0x16d8: 0x0a08, 0x16d9: 0x0a08, 0x16da: 0x0a08, 0x16db: 0x0a08, 0x16dc: 0x0a08, 0x16dd: 0x0c08, + 0x16de: 0x0a08, 0x16df: 0x0a08, 0x16e0: 0x0a08, 0x16e1: 0x0c08, 0x16e2: 0x0808, 0x16e3: 0x0808, + 0x16e4: 0x0c08, 0x16e5: 0x3308, 0x16e6: 0x3308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, + 0x16ea: 0x0040, 0x16eb: 0x0a18, 0x16ec: 0x0a18, 0x16ed: 0x0a18, 0x16ee: 0x0a18, 0x16ef: 0x0c18, + 0x16f0: 0x0818, 0x16f1: 0x0818, 0x16f2: 0x0818, 0x16f3: 0x0818, 0x16f4: 0x0818, 0x16f5: 0x0818, + 0x16f6: 0x0818, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, + 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, // Block 0x5c, offset 0x1700 - 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d, - 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d, - 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040, - 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d, - 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb591, 0x1721: 0xb5a9, 0x1722: 0xb5c1, 0x1723: 0x8a0e, - 0x1724: 0xb5d9, 0x1725: 0xb5f1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d, - 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040, + 0x1700: 0x0a08, 0x1701: 0x0c08, 0x1702: 0x0a08, 0x1703: 0x0c08, 0x1704: 0x0c08, 0x1705: 0x0c08, + 0x1706: 0x0a08, 0x1707: 0x0a08, 0x1708: 0x0a08, 0x1709: 0x0c08, 0x170a: 0x0a08, 0x170b: 0x0a08, + 0x170c: 0x0c08, 0x170d: 0x0a08, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0a08, 0x1711: 0x0c08, + 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, + 0x1718: 0x0040, 0x1719: 0x0818, 0x171a: 0x0818, 0x171b: 0x0818, 0x171c: 0x0818, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, + 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0c18, + 0x172a: 0x0c18, 0x172b: 0x0c18, 0x172c: 0x0c18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0818, 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, // Block 0x5d, offset 0x1740 - 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, - 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, - 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, - 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, - 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, - 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, - 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, - 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, - 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, - 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + 0x1740: 0x3308, 0x1741: 0x3308, 0x1742: 0x3008, 0x1743: 0x3008, 0x1744: 0x0040, 0x1745: 0x0008, + 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, + 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, + 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, + 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, + 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, + 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, + 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, + 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, + 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x3308, + 0x177c: 0x3308, 0x177d: 0x0008, 0x177e: 0x3008, 0x177f: 0x3008, // Block 0x5e, offset 0x1780 - 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, - 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, - 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, - 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, - 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, - 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, - 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x1780: 0x3308, 0x1781: 0x3008, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x3008, 0x1785: 0x0040, + 0x1786: 0x0040, 0x1787: 0x3008, 0x1788: 0x3008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x3008, + 0x178c: 0x3008, 0x178d: 0x3808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x3008, + 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x3008, 0x17a3: 0x3008, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x3308, 0x17a7: 0x3308, 0x17a8: 0x3308, 0x17a9: 0x3308, + 0x17aa: 0x3308, 0x17ab: 0x3308, 0x17ac: 0x3308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, + 0x17b0: 0x3308, 0x17b1: 0x3308, 0x17b2: 0x3308, 0x17b3: 0x3308, 0x17b4: 0x3308, 0x17b5: 0x0040, 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, // Block 0x5f, offset 0x17c0 - 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, - 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, - 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, - 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17c0: 0x0008, 0x17c1: 0x0008, 0x17c2: 0x0008, 0x17c3: 0x0008, 0x17c4: 0x0008, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0040, 0x17c8: 0x0040, 0x17c9: 0x0008, 0x17ca: 0x0040, 0x17cb: 0x0040, + 0x17cc: 0x0008, 0x17cd: 0x0008, 0x17ce: 0x0008, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0008, + 0x17d2: 0x0008, 0x17d3: 0x0008, 0x17d4: 0x0040, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0040, 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, - 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0008, 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, - 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, - 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, - 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + 0x17f0: 0x3008, 0x17f1: 0x3008, 0x17f2: 0x3008, 0x17f3: 0x3008, 0x17f4: 0x3008, 0x17f5: 0x3008, + 0x17f6: 0x0040, 0x17f7: 0x3008, 0x17f8: 0x3008, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x3808, 0x17fe: 0x3b08, 0x17ff: 0x0008, // Block 0x60, offset 0x1800 - 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, - 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, - 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, - 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, - 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, - 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, - 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, - 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, - 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, - 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, - 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + 0x1800: 0x0019, 0x1801: 0x02e9, 0x1802: 0x03d9, 0x1803: 0x02f1, 0x1804: 0x02f9, 0x1805: 0x03f1, + 0x1806: 0x0309, 0x1807: 0x00a9, 0x1808: 0x0311, 0x1809: 0x00b1, 0x180a: 0x0319, 0x180b: 0x0101, + 0x180c: 0x0321, 0x180d: 0x0329, 0x180e: 0x0051, 0x180f: 0x0339, 0x1810: 0x0751, 0x1811: 0x00b9, + 0x1812: 0x0089, 0x1813: 0x0341, 0x1814: 0x0349, 0x1815: 0x0391, 0x1816: 0x00c1, 0x1817: 0x0109, + 0x1818: 0x00c9, 0x1819: 0x04b1, 0x181a: 0x0019, 0x181b: 0x02e9, 0x181c: 0x03d9, 0x181d: 0x02f1, + 0x181e: 0x02f9, 0x181f: 0x03f1, 0x1820: 0x0309, 0x1821: 0x00a9, 0x1822: 0x0311, 0x1823: 0x00b1, + 0x1824: 0x0319, 0x1825: 0x0101, 0x1826: 0x0321, 0x1827: 0x0329, 0x1828: 0x0051, 0x1829: 0x0339, + 0x182a: 0x0751, 0x182b: 0x00b9, 0x182c: 0x0089, 0x182d: 0x0341, 0x182e: 0x0349, 0x182f: 0x0391, + 0x1830: 0x00c1, 0x1831: 0x0109, 0x1832: 0x00c9, 0x1833: 0x04b1, 0x1834: 0x0019, 0x1835: 0x02e9, + 0x1836: 0x03d9, 0x1837: 0x02f1, 0x1838: 0x02f9, 0x1839: 0x03f1, 0x183a: 0x0309, 0x183b: 0x00a9, + 0x183c: 0x0311, 0x183d: 0x00b1, 0x183e: 0x0319, 0x183f: 0x0101, // Block 0x61, offset 0x1840 - 0x1840: 0x0008, 0x1841: 0x0008, 0x1842: 0x0008, 0x1843: 0x0008, 0x1844: 0x0008, 0x1845: 0x0008, - 0x1846: 0x0008, 0x1847: 0x0040, 0x1848: 0x0040, 0x1849: 0x0008, 0x184a: 0x0040, 0x184b: 0x0040, - 0x184c: 0x0008, 0x184d: 0x0008, 0x184e: 0x0008, 0x184f: 0x0008, 0x1850: 0x0008, 0x1851: 0x0008, - 0x1852: 0x0008, 0x1853: 0x0008, 0x1854: 0x0040, 0x1855: 0x0008, 0x1856: 0x0008, 0x1857: 0x0040, - 0x1858: 0x0008, 0x1859: 0x0008, 0x185a: 0x0008, 0x185b: 0x0008, 0x185c: 0x0008, 0x185d: 0x0008, - 0x185e: 0x0008, 0x185f: 0x0008, 0x1860: 0x0008, 0x1861: 0x0008, 0x1862: 0x0008, 0x1863: 0x0008, - 0x1864: 0x0008, 0x1865: 0x0008, 0x1866: 0x0008, 0x1867: 0x0008, 0x1868: 0x0008, 0x1869: 0x0008, - 0x186a: 0x0008, 0x186b: 0x0008, 0x186c: 0x0008, 0x186d: 0x0008, 0x186e: 0x0008, 0x186f: 0x0008, - 0x1870: 0x3008, 0x1871: 0x3008, 0x1872: 0x3008, 0x1873: 0x3008, 0x1874: 0x3008, 0x1875: 0x3008, - 0x1876: 0x0040, 0x1877: 0x3008, 0x1878: 0x3008, 0x1879: 0x0040, 0x187a: 0x0040, 0x187b: 0x3308, - 0x187c: 0x3308, 0x187d: 0x3808, 0x187e: 0x3b08, 0x187f: 0x0008, + 0x1840: 0x0321, 0x1841: 0x0329, 0x1842: 0x0051, 0x1843: 0x0339, 0x1844: 0x0751, 0x1845: 0x00b9, + 0x1846: 0x0089, 0x1847: 0x0341, 0x1848: 0x0349, 0x1849: 0x0391, 0x184a: 0x00c1, 0x184b: 0x0109, + 0x184c: 0x00c9, 0x184d: 0x04b1, 0x184e: 0x0019, 0x184f: 0x02e9, 0x1850: 0x03d9, 0x1851: 0x02f1, + 0x1852: 0x02f9, 0x1853: 0x03f1, 0x1854: 0x0309, 0x1855: 0x0040, 0x1856: 0x0311, 0x1857: 0x00b1, + 0x1858: 0x0319, 0x1859: 0x0101, 0x185a: 0x0321, 0x185b: 0x0329, 0x185c: 0x0051, 0x185d: 0x0339, + 0x185e: 0x0751, 0x185f: 0x00b9, 0x1860: 0x0089, 0x1861: 0x0341, 0x1862: 0x0349, 0x1863: 0x0391, + 0x1864: 0x00c1, 0x1865: 0x0109, 0x1866: 0x00c9, 0x1867: 0x04b1, 0x1868: 0x0019, 0x1869: 0x02e9, + 0x186a: 0x03d9, 0x186b: 0x02f1, 0x186c: 0x02f9, 0x186d: 0x03f1, 0x186e: 0x0309, 0x186f: 0x00a9, + 0x1870: 0x0311, 0x1871: 0x00b1, 0x1872: 0x0319, 0x1873: 0x0101, 0x1874: 0x0321, 0x1875: 0x0329, + 0x1876: 0x0051, 0x1877: 0x0339, 0x1878: 0x0751, 0x1879: 0x00b9, 0x187a: 0x0089, 0x187b: 0x0341, + 0x187c: 0x0349, 0x187d: 0x0391, 0x187e: 0x00c1, 0x187f: 0x0109, // Block 0x62, offset 0x1880 - 0x1880: 0x0039, 0x1881: 0x0ee9, 0x1882: 0x1159, 0x1883: 0x0ef9, 0x1884: 0x0f09, 0x1885: 0x1199, - 0x1886: 0x0f31, 0x1887: 0x0249, 0x1888: 0x0f41, 0x1889: 0x0259, 0x188a: 0x0f51, 0x188b: 0x0359, - 0x188c: 0x0f61, 0x188d: 0x0f71, 0x188e: 0x00d9, 0x188f: 0x0f99, 0x1890: 0x2039, 0x1891: 0x0269, - 0x1892: 0x01d9, 0x1893: 0x0fa9, 0x1894: 0x0fb9, 0x1895: 0x1089, 0x1896: 0x0279, 0x1897: 0x0369, - 0x1898: 0x0289, 0x1899: 0x13d1, 0x189a: 0x0039, 0x189b: 0x0ee9, 0x189c: 0x1159, 0x189d: 0x0ef9, - 0x189e: 0x0f09, 0x189f: 0x1199, 0x18a0: 0x0f31, 0x18a1: 0x0249, 0x18a2: 0x0f41, 0x18a3: 0x0259, - 0x18a4: 0x0f51, 0x18a5: 0x0359, 0x18a6: 0x0f61, 0x18a7: 0x0f71, 0x18a8: 0x00d9, 0x18a9: 0x0f99, - 0x18aa: 0x2039, 0x18ab: 0x0269, 0x18ac: 0x01d9, 0x18ad: 0x0fa9, 0x18ae: 0x0fb9, 0x18af: 0x1089, - 0x18b0: 0x0279, 0x18b1: 0x0369, 0x18b2: 0x0289, 0x18b3: 0x13d1, 0x18b4: 0x0039, 0x18b5: 0x0ee9, - 0x18b6: 0x1159, 0x18b7: 0x0ef9, 0x18b8: 0x0f09, 0x18b9: 0x1199, 0x18ba: 0x0f31, 0x18bb: 0x0249, - 0x18bc: 0x0f41, 0x18bd: 0x0259, 0x18be: 0x0f51, 0x18bf: 0x0359, + 0x1880: 0x00c9, 0x1881: 0x04b1, 0x1882: 0x0019, 0x1883: 0x02e9, 0x1884: 0x03d9, 0x1885: 0x02f1, + 0x1886: 0x02f9, 0x1887: 0x03f1, 0x1888: 0x0309, 0x1889: 0x00a9, 0x188a: 0x0311, 0x188b: 0x00b1, + 0x188c: 0x0319, 0x188d: 0x0101, 0x188e: 0x0321, 0x188f: 0x0329, 0x1890: 0x0051, 0x1891: 0x0339, + 0x1892: 0x0751, 0x1893: 0x00b9, 0x1894: 0x0089, 0x1895: 0x0341, 0x1896: 0x0349, 0x1897: 0x0391, + 0x1898: 0x00c1, 0x1899: 0x0109, 0x189a: 0x00c9, 0x189b: 0x04b1, 0x189c: 0x0019, 0x189d: 0x0040, + 0x189e: 0x03d9, 0x189f: 0x02f1, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0309, 0x18a3: 0x0040, + 0x18a4: 0x0040, 0x18a5: 0x00b1, 0x18a6: 0x0319, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0329, + 0x18aa: 0x0051, 0x18ab: 0x0339, 0x18ac: 0x0751, 0x18ad: 0x0040, 0x18ae: 0x0089, 0x18af: 0x0341, + 0x18b0: 0x0349, 0x18b1: 0x0391, 0x18b2: 0x00c1, 0x18b3: 0x0109, 0x18b4: 0x00c9, 0x18b5: 0x04b1, + 0x18b6: 0x0019, 0x18b7: 0x02e9, 0x18b8: 0x03d9, 0x18b9: 0x02f1, 0x18ba: 0x0040, 0x18bb: 0x03f1, + 0x18bc: 0x0040, 0x18bd: 0x00a9, 0x18be: 0x0311, 0x18bf: 0x00b1, // Block 0x63, offset 0x18c0 - 0x18c0: 0x0f61, 0x18c1: 0x0f71, 0x18c2: 0x00d9, 0x18c3: 0x0f99, 0x18c4: 0x2039, 0x18c5: 0x0269, - 0x18c6: 0x01d9, 0x18c7: 0x0fa9, 0x18c8: 0x0fb9, 0x18c9: 0x1089, 0x18ca: 0x0279, 0x18cb: 0x0369, - 0x18cc: 0x0289, 0x18cd: 0x13d1, 0x18ce: 0x0039, 0x18cf: 0x0ee9, 0x18d0: 0x1159, 0x18d1: 0x0ef9, - 0x18d2: 0x0f09, 0x18d3: 0x1199, 0x18d4: 0x0f31, 0x18d5: 0x0040, 0x18d6: 0x0f41, 0x18d7: 0x0259, - 0x18d8: 0x0f51, 0x18d9: 0x0359, 0x18da: 0x0f61, 0x18db: 0x0f71, 0x18dc: 0x00d9, 0x18dd: 0x0f99, - 0x18de: 0x2039, 0x18df: 0x0269, 0x18e0: 0x01d9, 0x18e1: 0x0fa9, 0x18e2: 0x0fb9, 0x18e3: 0x1089, - 0x18e4: 0x0279, 0x18e5: 0x0369, 0x18e6: 0x0289, 0x18e7: 0x13d1, 0x18e8: 0x0039, 0x18e9: 0x0ee9, - 0x18ea: 0x1159, 0x18eb: 0x0ef9, 0x18ec: 0x0f09, 0x18ed: 0x1199, 0x18ee: 0x0f31, 0x18ef: 0x0249, - 0x18f0: 0x0f41, 0x18f1: 0x0259, 0x18f2: 0x0f51, 0x18f3: 0x0359, 0x18f4: 0x0f61, 0x18f5: 0x0f71, - 0x18f6: 0x00d9, 0x18f7: 0x0f99, 0x18f8: 0x2039, 0x18f9: 0x0269, 0x18fa: 0x01d9, 0x18fb: 0x0fa9, - 0x18fc: 0x0fb9, 0x18fd: 0x1089, 0x18fe: 0x0279, 0x18ff: 0x0369, + 0x18c0: 0x0319, 0x18c1: 0x0101, 0x18c2: 0x0321, 0x18c3: 0x0329, 0x18c4: 0x0040, 0x18c5: 0x0339, + 0x18c6: 0x0751, 0x18c7: 0x00b9, 0x18c8: 0x0089, 0x18c9: 0x0341, 0x18ca: 0x0349, 0x18cb: 0x0391, + 0x18cc: 0x00c1, 0x18cd: 0x0109, 0x18ce: 0x00c9, 0x18cf: 0x04b1, 0x18d0: 0x0019, 0x18d1: 0x02e9, + 0x18d2: 0x03d9, 0x18d3: 0x02f1, 0x18d4: 0x02f9, 0x18d5: 0x03f1, 0x18d6: 0x0309, 0x18d7: 0x00a9, + 0x18d8: 0x0311, 0x18d9: 0x00b1, 0x18da: 0x0319, 0x18db: 0x0101, 0x18dc: 0x0321, 0x18dd: 0x0329, + 0x18de: 0x0051, 0x18df: 0x0339, 0x18e0: 0x0751, 0x18e1: 0x00b9, 0x18e2: 0x0089, 0x18e3: 0x0341, + 0x18e4: 0x0349, 0x18e5: 0x0391, 0x18e6: 0x00c1, 0x18e7: 0x0109, 0x18e8: 0x00c9, 0x18e9: 0x04b1, + 0x18ea: 0x0019, 0x18eb: 0x02e9, 0x18ec: 0x03d9, 0x18ed: 0x02f1, 0x18ee: 0x02f9, 0x18ef: 0x03f1, + 0x18f0: 0x0309, 0x18f1: 0x00a9, 0x18f2: 0x0311, 0x18f3: 0x00b1, 0x18f4: 0x0319, 0x18f5: 0x0101, + 0x18f6: 0x0321, 0x18f7: 0x0329, 0x18f8: 0x0051, 0x18f9: 0x0339, 0x18fa: 0x0751, 0x18fb: 0x00b9, + 0x18fc: 0x0089, 0x18fd: 0x0341, 0x18fe: 0x0349, 0x18ff: 0x0391, // Block 0x64, offset 0x1900 - 0x1900: 0x0289, 0x1901: 0x13d1, 0x1902: 0x0039, 0x1903: 0x0ee9, 0x1904: 0x1159, 0x1905: 0x0ef9, - 0x1906: 0x0f09, 0x1907: 0x1199, 0x1908: 0x0f31, 0x1909: 0x0249, 0x190a: 0x0f41, 0x190b: 0x0259, - 0x190c: 0x0f51, 0x190d: 0x0359, 0x190e: 0x0f61, 0x190f: 0x0f71, 0x1910: 0x00d9, 0x1911: 0x0f99, - 0x1912: 0x2039, 0x1913: 0x0269, 0x1914: 0x01d9, 0x1915: 0x0fa9, 0x1916: 0x0fb9, 0x1917: 0x1089, - 0x1918: 0x0279, 0x1919: 0x0369, 0x191a: 0x0289, 0x191b: 0x13d1, 0x191c: 0x0039, 0x191d: 0x0040, - 0x191e: 0x1159, 0x191f: 0x0ef9, 0x1920: 0x0040, 0x1921: 0x0040, 0x1922: 0x0f31, 0x1923: 0x0040, - 0x1924: 0x0040, 0x1925: 0x0259, 0x1926: 0x0f51, 0x1927: 0x0040, 0x1928: 0x0040, 0x1929: 0x0f71, - 0x192a: 0x00d9, 0x192b: 0x0f99, 0x192c: 0x2039, 0x192d: 0x0040, 0x192e: 0x01d9, 0x192f: 0x0fa9, - 0x1930: 0x0fb9, 0x1931: 0x1089, 0x1932: 0x0279, 0x1933: 0x0369, 0x1934: 0x0289, 0x1935: 0x13d1, - 0x1936: 0x0039, 0x1937: 0x0ee9, 0x1938: 0x1159, 0x1939: 0x0ef9, 0x193a: 0x0040, 0x193b: 0x1199, - 0x193c: 0x0040, 0x193d: 0x0249, 0x193e: 0x0f41, 0x193f: 0x0259, + 0x1900: 0x00c1, 0x1901: 0x0109, 0x1902: 0x00c9, 0x1903: 0x04b1, 0x1904: 0x0019, 0x1905: 0x02e9, + 0x1906: 0x0040, 0x1907: 0x02f1, 0x1908: 0x02f9, 0x1909: 0x03f1, 0x190a: 0x0309, 0x190b: 0x0040, + 0x190c: 0x0040, 0x190d: 0x00b1, 0x190e: 0x0319, 0x190f: 0x0101, 0x1910: 0x0321, 0x1911: 0x0329, + 0x1912: 0x0051, 0x1913: 0x0339, 0x1914: 0x0751, 0x1915: 0x0040, 0x1916: 0x0089, 0x1917: 0x0341, + 0x1918: 0x0349, 0x1919: 0x0391, 0x191a: 0x00c1, 0x191b: 0x0109, 0x191c: 0x00c9, 0x191d: 0x0040, + 0x191e: 0x0019, 0x191f: 0x02e9, 0x1920: 0x03d9, 0x1921: 0x02f1, 0x1922: 0x02f9, 0x1923: 0x03f1, + 0x1924: 0x0309, 0x1925: 0x00a9, 0x1926: 0x0311, 0x1927: 0x00b1, 0x1928: 0x0319, 0x1929: 0x0101, + 0x192a: 0x0321, 0x192b: 0x0329, 0x192c: 0x0051, 0x192d: 0x0339, 0x192e: 0x0751, 0x192f: 0x00b9, + 0x1930: 0x0089, 0x1931: 0x0341, 0x1932: 0x0349, 0x1933: 0x0391, 0x1934: 0x00c1, 0x1935: 0x0109, + 0x1936: 0x00c9, 0x1937: 0x04b1, 0x1938: 0x0019, 0x1939: 0x02e9, 0x193a: 0x0040, 0x193b: 0x02f1, + 0x193c: 0x02f9, 0x193d: 0x03f1, 0x193e: 0x0309, 0x193f: 0x0040, // Block 0x65, offset 0x1940 - 0x1940: 0x0f51, 0x1941: 0x0359, 0x1942: 0x0f61, 0x1943: 0x0f71, 0x1944: 0x0040, 0x1945: 0x0f99, - 0x1946: 0x2039, 0x1947: 0x0269, 0x1948: 0x01d9, 0x1949: 0x0fa9, 0x194a: 0x0fb9, 0x194b: 0x1089, - 0x194c: 0x0279, 0x194d: 0x0369, 0x194e: 0x0289, 0x194f: 0x13d1, 0x1950: 0x0039, 0x1951: 0x0ee9, - 0x1952: 0x1159, 0x1953: 0x0ef9, 0x1954: 0x0f09, 0x1955: 0x1199, 0x1956: 0x0f31, 0x1957: 0x0249, - 0x1958: 0x0f41, 0x1959: 0x0259, 0x195a: 0x0f51, 0x195b: 0x0359, 0x195c: 0x0f61, 0x195d: 0x0f71, - 0x195e: 0x00d9, 0x195f: 0x0f99, 0x1960: 0x2039, 0x1961: 0x0269, 0x1962: 0x01d9, 0x1963: 0x0fa9, - 0x1964: 0x0fb9, 0x1965: 0x1089, 0x1966: 0x0279, 0x1967: 0x0369, 0x1968: 0x0289, 0x1969: 0x13d1, - 0x196a: 0x0039, 0x196b: 0x0ee9, 0x196c: 0x1159, 0x196d: 0x0ef9, 0x196e: 0x0f09, 0x196f: 0x1199, - 0x1970: 0x0f31, 0x1971: 0x0249, 0x1972: 0x0f41, 0x1973: 0x0259, 0x1974: 0x0f51, 0x1975: 0x0359, - 0x1976: 0x0f61, 0x1977: 0x0f71, 0x1978: 0x00d9, 0x1979: 0x0f99, 0x197a: 0x2039, 0x197b: 0x0269, - 0x197c: 0x01d9, 0x197d: 0x0fa9, 0x197e: 0x0fb9, 0x197f: 0x1089, + 0x1940: 0x0311, 0x1941: 0x00b1, 0x1942: 0x0319, 0x1943: 0x0101, 0x1944: 0x0321, 0x1945: 0x0040, + 0x1946: 0x0051, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x0089, 0x194b: 0x0341, + 0x194c: 0x0349, 0x194d: 0x0391, 0x194e: 0x00c1, 0x194f: 0x0109, 0x1950: 0x00c9, 0x1951: 0x0040, + 0x1952: 0x0019, 0x1953: 0x02e9, 0x1954: 0x03d9, 0x1955: 0x02f1, 0x1956: 0x02f9, 0x1957: 0x03f1, + 0x1958: 0x0309, 0x1959: 0x00a9, 0x195a: 0x0311, 0x195b: 0x00b1, 0x195c: 0x0319, 0x195d: 0x0101, + 0x195e: 0x0321, 0x195f: 0x0329, 0x1960: 0x0051, 0x1961: 0x0339, 0x1962: 0x0751, 0x1963: 0x00b9, + 0x1964: 0x0089, 0x1965: 0x0341, 0x1966: 0x0349, 0x1967: 0x0391, 0x1968: 0x00c1, 0x1969: 0x0109, + 0x196a: 0x00c9, 0x196b: 0x04b1, 0x196c: 0x0019, 0x196d: 0x02e9, 0x196e: 0x03d9, 0x196f: 0x02f1, + 0x1970: 0x02f9, 0x1971: 0x03f1, 0x1972: 0x0309, 0x1973: 0x00a9, 0x1974: 0x0311, 0x1975: 0x00b1, + 0x1976: 0x0319, 0x1977: 0x0101, 0x1978: 0x0321, 0x1979: 0x0329, 0x197a: 0x0051, 0x197b: 0x0339, + 0x197c: 0x0751, 0x197d: 0x00b9, 0x197e: 0x0089, 0x197f: 0x0341, // Block 0x66, offset 0x1980 - 0x1980: 0x0279, 0x1981: 0x0369, 0x1982: 0x0289, 0x1983: 0x13d1, 0x1984: 0x0039, 0x1985: 0x0ee9, - 0x1986: 0x0040, 0x1987: 0x0ef9, 0x1988: 0x0f09, 0x1989: 0x1199, 0x198a: 0x0f31, 0x198b: 0x0040, - 0x198c: 0x0040, 0x198d: 0x0259, 0x198e: 0x0f51, 0x198f: 0x0359, 0x1990: 0x0f61, 0x1991: 0x0f71, - 0x1992: 0x00d9, 0x1993: 0x0f99, 0x1994: 0x2039, 0x1995: 0x0040, 0x1996: 0x01d9, 0x1997: 0x0fa9, - 0x1998: 0x0fb9, 0x1999: 0x1089, 0x199a: 0x0279, 0x199b: 0x0369, 0x199c: 0x0289, 0x199d: 0x0040, - 0x199e: 0x0039, 0x199f: 0x0ee9, 0x19a0: 0x1159, 0x19a1: 0x0ef9, 0x19a2: 0x0f09, 0x19a3: 0x1199, - 0x19a4: 0x0f31, 0x19a5: 0x0249, 0x19a6: 0x0f41, 0x19a7: 0x0259, 0x19a8: 0x0f51, 0x19a9: 0x0359, - 0x19aa: 0x0f61, 0x19ab: 0x0f71, 0x19ac: 0x00d9, 0x19ad: 0x0f99, 0x19ae: 0x2039, 0x19af: 0x0269, - 0x19b0: 0x01d9, 0x19b1: 0x0fa9, 0x19b2: 0x0fb9, 0x19b3: 0x1089, 0x19b4: 0x0279, 0x19b5: 0x0369, - 0x19b6: 0x0289, 0x19b7: 0x13d1, 0x19b8: 0x0039, 0x19b9: 0x0ee9, 0x19ba: 0x0040, 0x19bb: 0x0ef9, - 0x19bc: 0x0f09, 0x19bd: 0x1199, 0x19be: 0x0f31, 0x19bf: 0x0040, + 0x1980: 0x0349, 0x1981: 0x0391, 0x1982: 0x00c1, 0x1983: 0x0109, 0x1984: 0x00c9, 0x1985: 0x04b1, + 0x1986: 0x0019, 0x1987: 0x02e9, 0x1988: 0x03d9, 0x1989: 0x02f1, 0x198a: 0x02f9, 0x198b: 0x03f1, + 0x198c: 0x0309, 0x198d: 0x00a9, 0x198e: 0x0311, 0x198f: 0x00b1, 0x1990: 0x0319, 0x1991: 0x0101, + 0x1992: 0x0321, 0x1993: 0x0329, 0x1994: 0x0051, 0x1995: 0x0339, 0x1996: 0x0751, 0x1997: 0x00b9, + 0x1998: 0x0089, 0x1999: 0x0341, 0x199a: 0x0349, 0x199b: 0x0391, 0x199c: 0x00c1, 0x199d: 0x0109, + 0x199e: 0x00c9, 0x199f: 0x04b1, 0x19a0: 0x0019, 0x19a1: 0x02e9, 0x19a2: 0x03d9, 0x19a3: 0x02f1, + 0x19a4: 0x02f9, 0x19a5: 0x03f1, 0x19a6: 0x0309, 0x19a7: 0x00a9, 0x19a8: 0x0311, 0x19a9: 0x00b1, + 0x19aa: 0x0319, 0x19ab: 0x0101, 0x19ac: 0x0321, 0x19ad: 0x0329, 0x19ae: 0x0051, 0x19af: 0x0339, + 0x19b0: 0x0751, 0x19b1: 0x00b9, 0x19b2: 0x0089, 0x19b3: 0x0341, 0x19b4: 0x0349, 0x19b5: 0x0391, + 0x19b6: 0x00c1, 0x19b7: 0x0109, 0x19b8: 0x00c9, 0x19b9: 0x04b1, 0x19ba: 0x0019, 0x19bb: 0x02e9, + 0x19bc: 0x03d9, 0x19bd: 0x02f1, 0x19be: 0x02f9, 0x19bf: 0x03f1, // Block 0x67, offset 0x19c0 - 0x19c0: 0x0f41, 0x19c1: 0x0259, 0x19c2: 0x0f51, 0x19c3: 0x0359, 0x19c4: 0x0f61, 0x19c5: 0x0040, - 0x19c6: 0x00d9, 0x19c7: 0x0040, 0x19c8: 0x0040, 0x19c9: 0x0040, 0x19ca: 0x01d9, 0x19cb: 0x0fa9, - 0x19cc: 0x0fb9, 0x19cd: 0x1089, 0x19ce: 0x0279, 0x19cf: 0x0369, 0x19d0: 0x0289, 0x19d1: 0x0040, - 0x19d2: 0x0039, 0x19d3: 0x0ee9, 0x19d4: 0x1159, 0x19d5: 0x0ef9, 0x19d6: 0x0f09, 0x19d7: 0x1199, - 0x19d8: 0x0f31, 0x19d9: 0x0249, 0x19da: 0x0f41, 0x19db: 0x0259, 0x19dc: 0x0f51, 0x19dd: 0x0359, - 0x19de: 0x0f61, 0x19df: 0x0f71, 0x19e0: 0x00d9, 0x19e1: 0x0f99, 0x19e2: 0x2039, 0x19e3: 0x0269, - 0x19e4: 0x01d9, 0x19e5: 0x0fa9, 0x19e6: 0x0fb9, 0x19e7: 0x1089, 0x19e8: 0x0279, 0x19e9: 0x0369, - 0x19ea: 0x0289, 0x19eb: 0x13d1, 0x19ec: 0x0039, 0x19ed: 0x0ee9, 0x19ee: 0x1159, 0x19ef: 0x0ef9, - 0x19f0: 0x0f09, 0x19f1: 0x1199, 0x19f2: 0x0f31, 0x19f3: 0x0249, 0x19f4: 0x0f41, 0x19f5: 0x0259, - 0x19f6: 0x0f51, 0x19f7: 0x0359, 0x19f8: 0x0f61, 0x19f9: 0x0f71, 0x19fa: 0x00d9, 0x19fb: 0x0f99, - 0x19fc: 0x2039, 0x19fd: 0x0269, 0x19fe: 0x01d9, 0x19ff: 0x0fa9, + 0x19c0: 0x0309, 0x19c1: 0x00a9, 0x19c2: 0x0311, 0x19c3: 0x00b1, 0x19c4: 0x0319, 0x19c5: 0x0101, + 0x19c6: 0x0321, 0x19c7: 0x0329, 0x19c8: 0x0051, 0x19c9: 0x0339, 0x19ca: 0x0751, 0x19cb: 0x00b9, + 0x19cc: 0x0089, 0x19cd: 0x0341, 0x19ce: 0x0349, 0x19cf: 0x0391, 0x19d0: 0x00c1, 0x19d1: 0x0109, + 0x19d2: 0x00c9, 0x19d3: 0x04b1, 0x19d4: 0x0019, 0x19d5: 0x02e9, 0x19d6: 0x03d9, 0x19d7: 0x02f1, + 0x19d8: 0x02f9, 0x19d9: 0x03f1, 0x19da: 0x0309, 0x19db: 0x00a9, 0x19dc: 0x0311, 0x19dd: 0x00b1, + 0x19de: 0x0319, 0x19df: 0x0101, 0x19e0: 0x0321, 0x19e1: 0x0329, 0x19e2: 0x0051, 0x19e3: 0x0339, + 0x19e4: 0x0751, 0x19e5: 0x00b9, 0x19e6: 0x0089, 0x19e7: 0x0341, 0x19e8: 0x0349, 0x19e9: 0x0391, + 0x19ea: 0x00c1, 0x19eb: 0x0109, 0x19ec: 0x00c9, 0x19ed: 0x04b1, 0x19ee: 0x0019, 0x19ef: 0x02e9, + 0x19f0: 0x03d9, 0x19f1: 0x02f1, 0x19f2: 0x02f9, 0x19f3: 0x03f1, 0x19f4: 0x0309, 0x19f5: 0x00a9, + 0x19f6: 0x0311, 0x19f7: 0x00b1, 0x19f8: 0x0319, 0x19f9: 0x0101, 0x19fa: 0x0321, 0x19fb: 0x0329, + 0x19fc: 0x0051, 0x19fd: 0x0339, 0x19fe: 0x0751, 0x19ff: 0x00b9, // Block 0x68, offset 0x1a00 - 0x1a00: 0x0fb9, 0x1a01: 0x1089, 0x1a02: 0x0279, 0x1a03: 0x0369, 0x1a04: 0x0289, 0x1a05: 0x13d1, - 0x1a06: 0x0039, 0x1a07: 0x0ee9, 0x1a08: 0x1159, 0x1a09: 0x0ef9, 0x1a0a: 0x0f09, 0x1a0b: 0x1199, - 0x1a0c: 0x0f31, 0x1a0d: 0x0249, 0x1a0e: 0x0f41, 0x1a0f: 0x0259, 0x1a10: 0x0f51, 0x1a11: 0x0359, - 0x1a12: 0x0f61, 0x1a13: 0x0f71, 0x1a14: 0x00d9, 0x1a15: 0x0f99, 0x1a16: 0x2039, 0x1a17: 0x0269, - 0x1a18: 0x01d9, 0x1a19: 0x0fa9, 0x1a1a: 0x0fb9, 0x1a1b: 0x1089, 0x1a1c: 0x0279, 0x1a1d: 0x0369, - 0x1a1e: 0x0289, 0x1a1f: 0x13d1, 0x1a20: 0x0039, 0x1a21: 0x0ee9, 0x1a22: 0x1159, 0x1a23: 0x0ef9, - 0x1a24: 0x0f09, 0x1a25: 0x1199, 0x1a26: 0x0f31, 0x1a27: 0x0249, 0x1a28: 0x0f41, 0x1a29: 0x0259, - 0x1a2a: 0x0f51, 0x1a2b: 0x0359, 0x1a2c: 0x0f61, 0x1a2d: 0x0f71, 0x1a2e: 0x00d9, 0x1a2f: 0x0f99, - 0x1a30: 0x2039, 0x1a31: 0x0269, 0x1a32: 0x01d9, 0x1a33: 0x0fa9, 0x1a34: 0x0fb9, 0x1a35: 0x1089, - 0x1a36: 0x0279, 0x1a37: 0x0369, 0x1a38: 0x0289, 0x1a39: 0x13d1, 0x1a3a: 0x0039, 0x1a3b: 0x0ee9, - 0x1a3c: 0x1159, 0x1a3d: 0x0ef9, 0x1a3e: 0x0f09, 0x1a3f: 0x1199, + 0x1a00: 0x0089, 0x1a01: 0x0341, 0x1a02: 0x0349, 0x1a03: 0x0391, 0x1a04: 0x00c1, 0x1a05: 0x0109, + 0x1a06: 0x00c9, 0x1a07: 0x04b1, 0x1a08: 0x0019, 0x1a09: 0x02e9, 0x1a0a: 0x03d9, 0x1a0b: 0x02f1, + 0x1a0c: 0x02f9, 0x1a0d: 0x03f1, 0x1a0e: 0x0309, 0x1a0f: 0x00a9, 0x1a10: 0x0311, 0x1a11: 0x00b1, + 0x1a12: 0x0319, 0x1a13: 0x0101, 0x1a14: 0x0321, 0x1a15: 0x0329, 0x1a16: 0x0051, 0x1a17: 0x0339, + 0x1a18: 0x0751, 0x1a19: 0x00b9, 0x1a1a: 0x0089, 0x1a1b: 0x0341, 0x1a1c: 0x0349, 0x1a1d: 0x0391, + 0x1a1e: 0x00c1, 0x1a1f: 0x0109, 0x1a20: 0x00c9, 0x1a21: 0x04b1, 0x1a22: 0x0019, 0x1a23: 0x02e9, + 0x1a24: 0x03d9, 0x1a25: 0x02f1, 0x1a26: 0x02f9, 0x1a27: 0x03f1, 0x1a28: 0x0309, 0x1a29: 0x00a9, + 0x1a2a: 0x0311, 0x1a2b: 0x00b1, 0x1a2c: 0x0319, 0x1a2d: 0x0101, 0x1a2e: 0x0321, 0x1a2f: 0x0329, + 0x1a30: 0x0051, 0x1a31: 0x0339, 0x1a32: 0x0751, 0x1a33: 0x00b9, 0x1a34: 0x0089, 0x1a35: 0x0341, + 0x1a36: 0x0349, 0x1a37: 0x0391, 0x1a38: 0x00c1, 0x1a39: 0x0109, 0x1a3a: 0x00c9, 0x1a3b: 0x04b1, + 0x1a3c: 0x0019, 0x1a3d: 0x02e9, 0x1a3e: 0x03d9, 0x1a3f: 0x02f1, // Block 0x69, offset 0x1a40 - 0x1a40: 0x0f31, 0x1a41: 0x0249, 0x1a42: 0x0f41, 0x1a43: 0x0259, 0x1a44: 0x0f51, 0x1a45: 0x0359, - 0x1a46: 0x0f61, 0x1a47: 0x0f71, 0x1a48: 0x00d9, 0x1a49: 0x0f99, 0x1a4a: 0x2039, 0x1a4b: 0x0269, - 0x1a4c: 0x01d9, 0x1a4d: 0x0fa9, 0x1a4e: 0x0fb9, 0x1a4f: 0x1089, 0x1a50: 0x0279, 0x1a51: 0x0369, - 0x1a52: 0x0289, 0x1a53: 0x13d1, 0x1a54: 0x0039, 0x1a55: 0x0ee9, 0x1a56: 0x1159, 0x1a57: 0x0ef9, - 0x1a58: 0x0f09, 0x1a59: 0x1199, 0x1a5a: 0x0f31, 0x1a5b: 0x0249, 0x1a5c: 0x0f41, 0x1a5d: 0x0259, - 0x1a5e: 0x0f51, 0x1a5f: 0x0359, 0x1a60: 0x0f61, 0x1a61: 0x0f71, 0x1a62: 0x00d9, 0x1a63: 0x0f99, - 0x1a64: 0x2039, 0x1a65: 0x0269, 0x1a66: 0x01d9, 0x1a67: 0x0fa9, 0x1a68: 0x0fb9, 0x1a69: 0x1089, - 0x1a6a: 0x0279, 0x1a6b: 0x0369, 0x1a6c: 0x0289, 0x1a6d: 0x13d1, 0x1a6e: 0x0039, 0x1a6f: 0x0ee9, - 0x1a70: 0x1159, 0x1a71: 0x0ef9, 0x1a72: 0x0f09, 0x1a73: 0x1199, 0x1a74: 0x0f31, 0x1a75: 0x0249, - 0x1a76: 0x0f41, 0x1a77: 0x0259, 0x1a78: 0x0f51, 0x1a79: 0x0359, 0x1a7a: 0x0f61, 0x1a7b: 0x0f71, - 0x1a7c: 0x00d9, 0x1a7d: 0x0f99, 0x1a7e: 0x2039, 0x1a7f: 0x0269, + 0x1a40: 0x02f9, 0x1a41: 0x03f1, 0x1a42: 0x0309, 0x1a43: 0x00a9, 0x1a44: 0x0311, 0x1a45: 0x00b1, + 0x1a46: 0x0319, 0x1a47: 0x0101, 0x1a48: 0x0321, 0x1a49: 0x0329, 0x1a4a: 0x0051, 0x1a4b: 0x0339, + 0x1a4c: 0x0751, 0x1a4d: 0x00b9, 0x1a4e: 0x0089, 0x1a4f: 0x0341, 0x1a50: 0x0349, 0x1a51: 0x0391, + 0x1a52: 0x00c1, 0x1a53: 0x0109, 0x1a54: 0x00c9, 0x1a55: 0x04b1, 0x1a56: 0x0019, 0x1a57: 0x02e9, + 0x1a58: 0x03d9, 0x1a59: 0x02f1, 0x1a5a: 0x02f9, 0x1a5b: 0x03f1, 0x1a5c: 0x0309, 0x1a5d: 0x00a9, + 0x1a5e: 0x0311, 0x1a5f: 0x00b1, 0x1a60: 0x0319, 0x1a61: 0x0101, 0x1a62: 0x0321, 0x1a63: 0x0329, + 0x1a64: 0x0051, 0x1a65: 0x0339, 0x1a66: 0x0751, 0x1a67: 0x00b9, 0x1a68: 0x0089, 0x1a69: 0x0341, + 0x1a6a: 0x0349, 0x1a6b: 0x0391, 0x1a6c: 0x00c1, 0x1a6d: 0x0109, 0x1a6e: 0x00c9, 0x1a6f: 0x04b1, + 0x1a70: 0x0019, 0x1a71: 0x02e9, 0x1a72: 0x03d9, 0x1a73: 0x02f1, 0x1a74: 0x02f9, 0x1a75: 0x03f1, + 0x1a76: 0x0309, 0x1a77: 0x00a9, 0x1a78: 0x0311, 0x1a79: 0x00b1, 0x1a7a: 0x0319, 0x1a7b: 0x0101, + 0x1a7c: 0x0321, 0x1a7d: 0x0329, 0x1a7e: 0x0051, 0x1a7f: 0x0339, // Block 0x6a, offset 0x1a80 - 0x1a80: 0x01d9, 0x1a81: 0x0fa9, 0x1a82: 0x0fb9, 0x1a83: 0x1089, 0x1a84: 0x0279, 0x1a85: 0x0369, - 0x1a86: 0x0289, 0x1a87: 0x13d1, 0x1a88: 0x0039, 0x1a89: 0x0ee9, 0x1a8a: 0x1159, 0x1a8b: 0x0ef9, - 0x1a8c: 0x0f09, 0x1a8d: 0x1199, 0x1a8e: 0x0f31, 0x1a8f: 0x0249, 0x1a90: 0x0f41, 0x1a91: 0x0259, - 0x1a92: 0x0f51, 0x1a93: 0x0359, 0x1a94: 0x0f61, 0x1a95: 0x0f71, 0x1a96: 0x00d9, 0x1a97: 0x0f99, - 0x1a98: 0x2039, 0x1a99: 0x0269, 0x1a9a: 0x01d9, 0x1a9b: 0x0fa9, 0x1a9c: 0x0fb9, 0x1a9d: 0x1089, - 0x1a9e: 0x0279, 0x1a9f: 0x0369, 0x1aa0: 0x0289, 0x1aa1: 0x13d1, 0x1aa2: 0x0039, 0x1aa3: 0x0ee9, - 0x1aa4: 0x1159, 0x1aa5: 0x0ef9, 0x1aa6: 0x0f09, 0x1aa7: 0x1199, 0x1aa8: 0x0f31, 0x1aa9: 0x0249, - 0x1aaa: 0x0f41, 0x1aab: 0x0259, 0x1aac: 0x0f51, 0x1aad: 0x0359, 0x1aae: 0x0f61, 0x1aaf: 0x0f71, - 0x1ab0: 0x00d9, 0x1ab1: 0x0f99, 0x1ab2: 0x2039, 0x1ab3: 0x0269, 0x1ab4: 0x01d9, 0x1ab5: 0x0fa9, - 0x1ab6: 0x0fb9, 0x1ab7: 0x1089, 0x1ab8: 0x0279, 0x1ab9: 0x0369, 0x1aba: 0x0289, 0x1abb: 0x13d1, - 0x1abc: 0x0039, 0x1abd: 0x0ee9, 0x1abe: 0x1159, 0x1abf: 0x0ef9, + 0x1a80: 0x0751, 0x1a81: 0x00b9, 0x1a82: 0x0089, 0x1a83: 0x0341, 0x1a84: 0x0349, 0x1a85: 0x0391, + 0x1a86: 0x00c1, 0x1a87: 0x0109, 0x1a88: 0x00c9, 0x1a89: 0x04b1, 0x1a8a: 0x0019, 0x1a8b: 0x02e9, + 0x1a8c: 0x03d9, 0x1a8d: 0x02f1, 0x1a8e: 0x02f9, 0x1a8f: 0x03f1, 0x1a90: 0x0309, 0x1a91: 0x00a9, + 0x1a92: 0x0311, 0x1a93: 0x00b1, 0x1a94: 0x0319, 0x1a95: 0x0101, 0x1a96: 0x0321, 0x1a97: 0x0329, + 0x1a98: 0x0051, 0x1a99: 0x0339, 0x1a9a: 0x0751, 0x1a9b: 0x00b9, 0x1a9c: 0x0089, 0x1a9d: 0x0341, + 0x1a9e: 0x0349, 0x1a9f: 0x0391, 0x1aa0: 0x00c1, 0x1aa1: 0x0109, 0x1aa2: 0x00c9, 0x1aa3: 0x04b1, + 0x1aa4: 0x2279, 0x1aa5: 0x2281, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0x2289, 0x1aa9: 0x0399, + 0x1aaa: 0x03a1, 0x1aab: 0x03a9, 0x1aac: 0x2291, 0x1aad: 0x2299, 0x1aae: 0x22a1, 0x1aaf: 0x04d1, + 0x1ab0: 0x05f9, 0x1ab1: 0x22a9, 0x1ab2: 0x22b1, 0x1ab3: 0x22b9, 0x1ab4: 0x22c1, 0x1ab5: 0x22c9, + 0x1ab6: 0x22d1, 0x1ab7: 0x0799, 0x1ab8: 0x03c1, 0x1ab9: 0x04d1, 0x1aba: 0x22d9, 0x1abb: 0x22e1, + 0x1abc: 0x22e9, 0x1abd: 0x03b1, 0x1abe: 0x03b9, 0x1abf: 0x22f1, // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0x0f09, 0x1ac1: 0x1199, 0x1ac2: 0x0f31, 0x1ac3: 0x0249, 0x1ac4: 0x0f41, 0x1ac5: 0x0259, - 0x1ac6: 0x0f51, 0x1ac7: 0x0359, 0x1ac8: 0x0f61, 0x1ac9: 0x0f71, 0x1aca: 0x00d9, 0x1acb: 0x0f99, - 0x1acc: 0x2039, 0x1acd: 0x0269, 0x1ace: 0x01d9, 0x1acf: 0x0fa9, 0x1ad0: 0x0fb9, 0x1ad1: 0x1089, - 0x1ad2: 0x0279, 0x1ad3: 0x0369, 0x1ad4: 0x0289, 0x1ad5: 0x13d1, 0x1ad6: 0x0039, 0x1ad7: 0x0ee9, - 0x1ad8: 0x1159, 0x1ad9: 0x0ef9, 0x1ada: 0x0f09, 0x1adb: 0x1199, 0x1adc: 0x0f31, 0x1add: 0x0249, - 0x1ade: 0x0f41, 0x1adf: 0x0259, 0x1ae0: 0x0f51, 0x1ae1: 0x0359, 0x1ae2: 0x0f61, 0x1ae3: 0x0f71, - 0x1ae4: 0x00d9, 0x1ae5: 0x0f99, 0x1ae6: 0x2039, 0x1ae7: 0x0269, 0x1ae8: 0x01d9, 0x1ae9: 0x0fa9, - 0x1aea: 0x0fb9, 0x1aeb: 0x1089, 0x1aec: 0x0279, 0x1aed: 0x0369, 0x1aee: 0x0289, 0x1aef: 0x13d1, - 0x1af0: 0x0039, 0x1af1: 0x0ee9, 0x1af2: 0x1159, 0x1af3: 0x0ef9, 0x1af4: 0x0f09, 0x1af5: 0x1199, - 0x1af6: 0x0f31, 0x1af7: 0x0249, 0x1af8: 0x0f41, 0x1af9: 0x0259, 0x1afa: 0x0f51, 0x1afb: 0x0359, - 0x1afc: 0x0f61, 0x1afd: 0x0f71, 0x1afe: 0x00d9, 0x1aff: 0x0f99, + 0x1ac0: 0x0769, 0x1ac1: 0x22f9, 0x1ac2: 0x2289, 0x1ac3: 0x0399, 0x1ac4: 0x03a1, 0x1ac5: 0x03a9, + 0x1ac6: 0x2291, 0x1ac7: 0x2299, 0x1ac8: 0x22a1, 0x1ac9: 0x04d1, 0x1aca: 0x05f9, 0x1acb: 0x22a9, + 0x1acc: 0x22b1, 0x1acd: 0x22b9, 0x1ace: 0x22c1, 0x1acf: 0x22c9, 0x1ad0: 0x22d1, 0x1ad1: 0x0799, + 0x1ad2: 0x03c1, 0x1ad3: 0x22d9, 0x1ad4: 0x22d9, 0x1ad5: 0x22e1, 0x1ad6: 0x22e9, 0x1ad7: 0x03b1, + 0x1ad8: 0x03b9, 0x1ad9: 0x22f1, 0x1ada: 0x0769, 0x1adb: 0x2301, 0x1adc: 0x2291, 0x1add: 0x04d1, + 0x1ade: 0x22a9, 0x1adf: 0x03b1, 0x1ae0: 0x03c1, 0x1ae1: 0x0799, 0x1ae2: 0x2289, 0x1ae3: 0x0399, + 0x1ae4: 0x03a1, 0x1ae5: 0x03a9, 0x1ae6: 0x2291, 0x1ae7: 0x2299, 0x1ae8: 0x22a1, 0x1ae9: 0x04d1, + 0x1aea: 0x05f9, 0x1aeb: 0x22a9, 0x1aec: 0x22b1, 0x1aed: 0x22b9, 0x1aee: 0x22c1, 0x1aef: 0x22c9, + 0x1af0: 0x22d1, 0x1af1: 0x0799, 0x1af2: 0x03c1, 0x1af3: 0x04d1, 0x1af4: 0x22d9, 0x1af5: 0x22e1, + 0x1af6: 0x22e9, 0x1af7: 0x03b1, 0x1af8: 0x03b9, 0x1af9: 0x22f1, 0x1afa: 0x0769, 0x1afb: 0x22f9, + 0x1afc: 0x2289, 0x1afd: 0x0399, 0x1afe: 0x03a1, 0x1aff: 0x03a9, // Block 0x6c, offset 0x1b00 - 0x1b00: 0x2039, 0x1b01: 0x0269, 0x1b02: 0x01d9, 0x1b03: 0x0fa9, 0x1b04: 0x0fb9, 0x1b05: 0x1089, - 0x1b06: 0x0279, 0x1b07: 0x0369, 0x1b08: 0x0289, 0x1b09: 0x13d1, 0x1b0a: 0x0039, 0x1b0b: 0x0ee9, - 0x1b0c: 0x1159, 0x1b0d: 0x0ef9, 0x1b0e: 0x0f09, 0x1b0f: 0x1199, 0x1b10: 0x0f31, 0x1b11: 0x0249, - 0x1b12: 0x0f41, 0x1b13: 0x0259, 0x1b14: 0x0f51, 0x1b15: 0x0359, 0x1b16: 0x0f61, 0x1b17: 0x0f71, - 0x1b18: 0x00d9, 0x1b19: 0x0f99, 0x1b1a: 0x2039, 0x1b1b: 0x0269, 0x1b1c: 0x01d9, 0x1b1d: 0x0fa9, - 0x1b1e: 0x0fb9, 0x1b1f: 0x1089, 0x1b20: 0x0279, 0x1b21: 0x0369, 0x1b22: 0x0289, 0x1b23: 0x13d1, - 0x1b24: 0xbad1, 0x1b25: 0xbae9, 0x1b26: 0x0040, 0x1b27: 0x0040, 0x1b28: 0xbb01, 0x1b29: 0x1099, - 0x1b2a: 0x10b1, 0x1b2b: 0x10c9, 0x1b2c: 0xbb19, 0x1b2d: 0xbb31, 0x1b2e: 0xbb49, 0x1b2f: 0x1429, - 0x1b30: 0x1a31, 0x1b31: 0xbb61, 0x1b32: 0xbb79, 0x1b33: 0xbb91, 0x1b34: 0xbba9, 0x1b35: 0xbbc1, - 0x1b36: 0xbbd9, 0x1b37: 0x2109, 0x1b38: 0x1111, 0x1b39: 0x1429, 0x1b3a: 0xbbf1, 0x1b3b: 0xbc09, - 0x1b3c: 0xbc21, 0x1b3d: 0x10e1, 0x1b3e: 0x10f9, 0x1b3f: 0xbc39, + 0x1b00: 0x2291, 0x1b01: 0x2299, 0x1b02: 0x22a1, 0x1b03: 0x04d1, 0x1b04: 0x05f9, 0x1b05: 0x22a9, + 0x1b06: 0x22b1, 0x1b07: 0x22b9, 0x1b08: 0x22c1, 0x1b09: 0x22c9, 0x1b0a: 0x22d1, 0x1b0b: 0x0799, + 0x1b0c: 0x03c1, 0x1b0d: 0x22d9, 0x1b0e: 0x22d9, 0x1b0f: 0x22e1, 0x1b10: 0x22e9, 0x1b11: 0x03b1, + 0x1b12: 0x03b9, 0x1b13: 0x22f1, 0x1b14: 0x0769, 0x1b15: 0x2301, 0x1b16: 0x2291, 0x1b17: 0x04d1, + 0x1b18: 0x22a9, 0x1b19: 0x03b1, 0x1b1a: 0x03c1, 0x1b1b: 0x0799, 0x1b1c: 0x2289, 0x1b1d: 0x0399, + 0x1b1e: 0x03a1, 0x1b1f: 0x03a9, 0x1b20: 0x2291, 0x1b21: 0x2299, 0x1b22: 0x22a1, 0x1b23: 0x04d1, + 0x1b24: 0x05f9, 0x1b25: 0x22a9, 0x1b26: 0x22b1, 0x1b27: 0x22b9, 0x1b28: 0x22c1, 0x1b29: 0x22c9, + 0x1b2a: 0x22d1, 0x1b2b: 0x0799, 0x1b2c: 0x03c1, 0x1b2d: 0x04d1, 0x1b2e: 0x22d9, 0x1b2f: 0x22e1, + 0x1b30: 0x22e9, 0x1b31: 0x03b1, 0x1b32: 0x03b9, 0x1b33: 0x22f1, 0x1b34: 0x0769, 0x1b35: 0x22f9, + 0x1b36: 0x2289, 0x1b37: 0x0399, 0x1b38: 0x03a1, 0x1b39: 0x03a9, 0x1b3a: 0x2291, 0x1b3b: 0x2299, + 0x1b3c: 0x22a1, 0x1b3d: 0x04d1, 0x1b3e: 0x05f9, 0x1b3f: 0x22a9, // Block 0x6d, offset 0x1b40 - 0x1b40: 0x2079, 0x1b41: 0xbc51, 0x1b42: 0xbb01, 0x1b43: 0x1099, 0x1b44: 0x10b1, 0x1b45: 0x10c9, - 0x1b46: 0xbb19, 0x1b47: 0xbb31, 0x1b48: 0xbb49, 0x1b49: 0x1429, 0x1b4a: 0x1a31, 0x1b4b: 0xbb61, - 0x1b4c: 0xbb79, 0x1b4d: 0xbb91, 0x1b4e: 0xbba9, 0x1b4f: 0xbbc1, 0x1b50: 0xbbd9, 0x1b51: 0x2109, - 0x1b52: 0x1111, 0x1b53: 0xbbf1, 0x1b54: 0xbbf1, 0x1b55: 0xbc09, 0x1b56: 0xbc21, 0x1b57: 0x10e1, - 0x1b58: 0x10f9, 0x1b59: 0xbc39, 0x1b5a: 0x2079, 0x1b5b: 0xbc71, 0x1b5c: 0xbb19, 0x1b5d: 0x1429, - 0x1b5e: 0xbb61, 0x1b5f: 0x10e1, 0x1b60: 0x1111, 0x1b61: 0x2109, 0x1b62: 0xbb01, 0x1b63: 0x1099, - 0x1b64: 0x10b1, 0x1b65: 0x10c9, 0x1b66: 0xbb19, 0x1b67: 0xbb31, 0x1b68: 0xbb49, 0x1b69: 0x1429, - 0x1b6a: 0x1a31, 0x1b6b: 0xbb61, 0x1b6c: 0xbb79, 0x1b6d: 0xbb91, 0x1b6e: 0xbba9, 0x1b6f: 0xbbc1, - 0x1b70: 0xbbd9, 0x1b71: 0x2109, 0x1b72: 0x1111, 0x1b73: 0x1429, 0x1b74: 0xbbf1, 0x1b75: 0xbc09, - 0x1b76: 0xbc21, 0x1b77: 0x10e1, 0x1b78: 0x10f9, 0x1b79: 0xbc39, 0x1b7a: 0x2079, 0x1b7b: 0xbc51, - 0x1b7c: 0xbb01, 0x1b7d: 0x1099, 0x1b7e: 0x10b1, 0x1b7f: 0x10c9, + 0x1b40: 0x22b1, 0x1b41: 0x22b9, 0x1b42: 0x22c1, 0x1b43: 0x22c9, 0x1b44: 0x22d1, 0x1b45: 0x0799, + 0x1b46: 0x03c1, 0x1b47: 0x22d9, 0x1b48: 0x22d9, 0x1b49: 0x22e1, 0x1b4a: 0x22e9, 0x1b4b: 0x03b1, + 0x1b4c: 0x03b9, 0x1b4d: 0x22f1, 0x1b4e: 0x0769, 0x1b4f: 0x2301, 0x1b50: 0x2291, 0x1b51: 0x04d1, + 0x1b52: 0x22a9, 0x1b53: 0x03b1, 0x1b54: 0x03c1, 0x1b55: 0x0799, 0x1b56: 0x2289, 0x1b57: 0x0399, + 0x1b58: 0x03a1, 0x1b59: 0x03a9, 0x1b5a: 0x2291, 0x1b5b: 0x2299, 0x1b5c: 0x22a1, 0x1b5d: 0x04d1, + 0x1b5e: 0x05f9, 0x1b5f: 0x22a9, 0x1b60: 0x22b1, 0x1b61: 0x22b9, 0x1b62: 0x22c1, 0x1b63: 0x22c9, + 0x1b64: 0x22d1, 0x1b65: 0x0799, 0x1b66: 0x03c1, 0x1b67: 0x04d1, 0x1b68: 0x22d9, 0x1b69: 0x22e1, + 0x1b6a: 0x22e9, 0x1b6b: 0x03b1, 0x1b6c: 0x03b9, 0x1b6d: 0x22f1, 0x1b6e: 0x0769, 0x1b6f: 0x22f9, + 0x1b70: 0x2289, 0x1b71: 0x0399, 0x1b72: 0x03a1, 0x1b73: 0x03a9, 0x1b74: 0x2291, 0x1b75: 0x2299, + 0x1b76: 0x22a1, 0x1b77: 0x04d1, 0x1b78: 0x05f9, 0x1b79: 0x22a9, 0x1b7a: 0x22b1, 0x1b7b: 0x22b9, + 0x1b7c: 0x22c1, 0x1b7d: 0x22c9, 0x1b7e: 0x22d1, 0x1b7f: 0x0799, // Block 0x6e, offset 0x1b80 - 0x1b80: 0xbb19, 0x1b81: 0xbb31, 0x1b82: 0xbb49, 0x1b83: 0x1429, 0x1b84: 0x1a31, 0x1b85: 0xbb61, - 0x1b86: 0xbb79, 0x1b87: 0xbb91, 0x1b88: 0xbba9, 0x1b89: 0xbbc1, 0x1b8a: 0xbbd9, 0x1b8b: 0x2109, - 0x1b8c: 0x1111, 0x1b8d: 0xbbf1, 0x1b8e: 0xbbf1, 0x1b8f: 0xbc09, 0x1b90: 0xbc21, 0x1b91: 0x10e1, - 0x1b92: 0x10f9, 0x1b93: 0xbc39, 0x1b94: 0x2079, 0x1b95: 0xbc71, 0x1b96: 0xbb19, 0x1b97: 0x1429, - 0x1b98: 0xbb61, 0x1b99: 0x10e1, 0x1b9a: 0x1111, 0x1b9b: 0x2109, 0x1b9c: 0xbb01, 0x1b9d: 0x1099, - 0x1b9e: 0x10b1, 0x1b9f: 0x10c9, 0x1ba0: 0xbb19, 0x1ba1: 0xbb31, 0x1ba2: 0xbb49, 0x1ba3: 0x1429, - 0x1ba4: 0x1a31, 0x1ba5: 0xbb61, 0x1ba6: 0xbb79, 0x1ba7: 0xbb91, 0x1ba8: 0xbba9, 0x1ba9: 0xbbc1, - 0x1baa: 0xbbd9, 0x1bab: 0x2109, 0x1bac: 0x1111, 0x1bad: 0x1429, 0x1bae: 0xbbf1, 0x1baf: 0xbc09, - 0x1bb0: 0xbc21, 0x1bb1: 0x10e1, 0x1bb2: 0x10f9, 0x1bb3: 0xbc39, 0x1bb4: 0x2079, 0x1bb5: 0xbc51, - 0x1bb6: 0xbb01, 0x1bb7: 0x1099, 0x1bb8: 0x10b1, 0x1bb9: 0x10c9, 0x1bba: 0xbb19, 0x1bbb: 0xbb31, - 0x1bbc: 0xbb49, 0x1bbd: 0x1429, 0x1bbe: 0x1a31, 0x1bbf: 0xbb61, + 0x1b80: 0x03c1, 0x1b81: 0x22d9, 0x1b82: 0x22d9, 0x1b83: 0x22e1, 0x1b84: 0x22e9, 0x1b85: 0x03b1, + 0x1b86: 0x03b9, 0x1b87: 0x22f1, 0x1b88: 0x0769, 0x1b89: 0x2301, 0x1b8a: 0x2291, 0x1b8b: 0x04d1, + 0x1b8c: 0x22a9, 0x1b8d: 0x03b1, 0x1b8e: 0x03c1, 0x1b8f: 0x0799, 0x1b90: 0x2289, 0x1b91: 0x0399, + 0x1b92: 0x03a1, 0x1b93: 0x03a9, 0x1b94: 0x2291, 0x1b95: 0x2299, 0x1b96: 0x22a1, 0x1b97: 0x04d1, + 0x1b98: 0x05f9, 0x1b99: 0x22a9, 0x1b9a: 0x22b1, 0x1b9b: 0x22b9, 0x1b9c: 0x22c1, 0x1b9d: 0x22c9, + 0x1b9e: 0x22d1, 0x1b9f: 0x0799, 0x1ba0: 0x03c1, 0x1ba1: 0x04d1, 0x1ba2: 0x22d9, 0x1ba3: 0x22e1, + 0x1ba4: 0x22e9, 0x1ba5: 0x03b1, 0x1ba6: 0x03b9, 0x1ba7: 0x22f1, 0x1ba8: 0x0769, 0x1ba9: 0x22f9, + 0x1baa: 0x2289, 0x1bab: 0x0399, 0x1bac: 0x03a1, 0x1bad: 0x03a9, 0x1bae: 0x2291, 0x1baf: 0x2299, + 0x1bb0: 0x22a1, 0x1bb1: 0x04d1, 0x1bb2: 0x05f9, 0x1bb3: 0x22a9, 0x1bb4: 0x22b1, 0x1bb5: 0x22b9, + 0x1bb6: 0x22c1, 0x1bb7: 0x22c9, 0x1bb8: 0x22d1, 0x1bb9: 0x0799, 0x1bba: 0x03c1, 0x1bbb: 0x22d9, + 0x1bbc: 0x22d9, 0x1bbd: 0x22e1, 0x1bbe: 0x22e9, 0x1bbf: 0x03b1, // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0xbb79, 0x1bc1: 0xbb91, 0x1bc2: 0xbba9, 0x1bc3: 0xbbc1, 0x1bc4: 0xbbd9, 0x1bc5: 0x2109, - 0x1bc6: 0x1111, 0x1bc7: 0xbbf1, 0x1bc8: 0xbbf1, 0x1bc9: 0xbc09, 0x1bca: 0xbc21, 0x1bcb: 0x10e1, - 0x1bcc: 0x10f9, 0x1bcd: 0xbc39, 0x1bce: 0x2079, 0x1bcf: 0xbc71, 0x1bd0: 0xbb19, 0x1bd1: 0x1429, - 0x1bd2: 0xbb61, 0x1bd3: 0x10e1, 0x1bd4: 0x1111, 0x1bd5: 0x2109, 0x1bd6: 0xbb01, 0x1bd7: 0x1099, - 0x1bd8: 0x10b1, 0x1bd9: 0x10c9, 0x1bda: 0xbb19, 0x1bdb: 0xbb31, 0x1bdc: 0xbb49, 0x1bdd: 0x1429, - 0x1bde: 0x1a31, 0x1bdf: 0xbb61, 0x1be0: 0xbb79, 0x1be1: 0xbb91, 0x1be2: 0xbba9, 0x1be3: 0xbbc1, - 0x1be4: 0xbbd9, 0x1be5: 0x2109, 0x1be6: 0x1111, 0x1be7: 0x1429, 0x1be8: 0xbbf1, 0x1be9: 0xbc09, - 0x1bea: 0xbc21, 0x1beb: 0x10e1, 0x1bec: 0x10f9, 0x1bed: 0xbc39, 0x1bee: 0x2079, 0x1bef: 0xbc51, - 0x1bf0: 0xbb01, 0x1bf1: 0x1099, 0x1bf2: 0x10b1, 0x1bf3: 0x10c9, 0x1bf4: 0xbb19, 0x1bf5: 0xbb31, - 0x1bf6: 0xbb49, 0x1bf7: 0x1429, 0x1bf8: 0x1a31, 0x1bf9: 0xbb61, 0x1bfa: 0xbb79, 0x1bfb: 0xbb91, - 0x1bfc: 0xbba9, 0x1bfd: 0xbbc1, 0x1bfe: 0xbbd9, 0x1bff: 0x2109, + 0x1bc0: 0x03b9, 0x1bc1: 0x22f1, 0x1bc2: 0x0769, 0x1bc3: 0x2301, 0x1bc4: 0x2291, 0x1bc5: 0x04d1, + 0x1bc6: 0x22a9, 0x1bc7: 0x03b1, 0x1bc8: 0x03c1, 0x1bc9: 0x0799, 0x1bca: 0x2309, 0x1bcb: 0x2309, + 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x06e1, 0x1bcf: 0x0049, 0x1bd0: 0x0029, 0x1bd1: 0x0031, + 0x1bd2: 0x06e9, 0x1bd3: 0x06f1, 0x1bd4: 0x06f9, 0x1bd5: 0x0701, 0x1bd6: 0x0709, 0x1bd7: 0x0711, + 0x1bd8: 0x06e1, 0x1bd9: 0x0049, 0x1bda: 0x0029, 0x1bdb: 0x0031, 0x1bdc: 0x06e9, 0x1bdd: 0x06f1, + 0x1bde: 0x06f9, 0x1bdf: 0x0701, 0x1be0: 0x0709, 0x1be1: 0x0711, 0x1be2: 0x06e1, 0x1be3: 0x0049, + 0x1be4: 0x0029, 0x1be5: 0x0031, 0x1be6: 0x06e9, 0x1be7: 0x06f1, 0x1be8: 0x06f9, 0x1be9: 0x0701, + 0x1bea: 0x0709, 0x1beb: 0x0711, 0x1bec: 0x06e1, 0x1bed: 0x0049, 0x1bee: 0x0029, 0x1bef: 0x0031, + 0x1bf0: 0x06e9, 0x1bf1: 0x06f1, 0x1bf2: 0x06f9, 0x1bf3: 0x0701, 0x1bf4: 0x0709, 0x1bf5: 0x0711, + 0x1bf6: 0x06e1, 0x1bf7: 0x0049, 0x1bf8: 0x0029, 0x1bf9: 0x0031, 0x1bfa: 0x06e9, 0x1bfb: 0x06f1, + 0x1bfc: 0x06f9, 0x1bfd: 0x0701, 0x1bfe: 0x0709, 0x1bff: 0x0711, // Block 0x70, offset 0x1c00 - 0x1c00: 0x1111, 0x1c01: 0xbbf1, 0x1c02: 0xbbf1, 0x1c03: 0xbc09, 0x1c04: 0xbc21, 0x1c05: 0x10e1, - 0x1c06: 0x10f9, 0x1c07: 0xbc39, 0x1c08: 0x2079, 0x1c09: 0xbc71, 0x1c0a: 0xbb19, 0x1c0b: 0x1429, - 0x1c0c: 0xbb61, 0x1c0d: 0x10e1, 0x1c0e: 0x1111, 0x1c0f: 0x2109, 0x1c10: 0xbb01, 0x1c11: 0x1099, - 0x1c12: 0x10b1, 0x1c13: 0x10c9, 0x1c14: 0xbb19, 0x1c15: 0xbb31, 0x1c16: 0xbb49, 0x1c17: 0x1429, - 0x1c18: 0x1a31, 0x1c19: 0xbb61, 0x1c1a: 0xbb79, 0x1c1b: 0xbb91, 0x1c1c: 0xbba9, 0x1c1d: 0xbbc1, - 0x1c1e: 0xbbd9, 0x1c1f: 0x2109, 0x1c20: 0x1111, 0x1c21: 0x1429, 0x1c22: 0xbbf1, 0x1c23: 0xbc09, - 0x1c24: 0xbc21, 0x1c25: 0x10e1, 0x1c26: 0x10f9, 0x1c27: 0xbc39, 0x1c28: 0x2079, 0x1c29: 0xbc51, - 0x1c2a: 0xbb01, 0x1c2b: 0x1099, 0x1c2c: 0x10b1, 0x1c2d: 0x10c9, 0x1c2e: 0xbb19, 0x1c2f: 0xbb31, - 0x1c30: 0xbb49, 0x1c31: 0x1429, 0x1c32: 0x1a31, 0x1c33: 0xbb61, 0x1c34: 0xbb79, 0x1c35: 0xbb91, - 0x1c36: 0xbba9, 0x1c37: 0xbbc1, 0x1c38: 0xbbd9, 0x1c39: 0x2109, 0x1c3a: 0x1111, 0x1c3b: 0xbbf1, - 0x1c3c: 0xbbf1, 0x1c3d: 0xbc09, 0x1c3e: 0xbc21, 0x1c3f: 0x10e1, + 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115, + 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135, + 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115, + 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175, + 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115, + 0x1c1e: 0x8b3d, 0x1c1f: 0x8b3d, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08, + 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08, + 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08, + 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08, + 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08, + 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08, // Block 0x71, offset 0x1c40 - 0x1c40: 0x10f9, 0x1c41: 0xbc39, 0x1c42: 0x2079, 0x1c43: 0xbc71, 0x1c44: 0xbb19, 0x1c45: 0x1429, - 0x1c46: 0xbb61, 0x1c47: 0x10e1, 0x1c48: 0x1111, 0x1c49: 0x2109, 0x1c4a: 0xbc91, 0x1c4b: 0xbc91, - 0x1c4c: 0x0040, 0x1c4d: 0x0040, 0x1c4e: 0x1f41, 0x1c4f: 0x00c9, 0x1c50: 0x0069, 0x1c51: 0x0079, - 0x1c52: 0x1f51, 0x1c53: 0x1f61, 0x1c54: 0x1f71, 0x1c55: 0x1f81, 0x1c56: 0x1f91, 0x1c57: 0x1fa1, - 0x1c58: 0x1f41, 0x1c59: 0x00c9, 0x1c5a: 0x0069, 0x1c5b: 0x0079, 0x1c5c: 0x1f51, 0x1c5d: 0x1f61, - 0x1c5e: 0x1f71, 0x1c5f: 0x1f81, 0x1c60: 0x1f91, 0x1c61: 0x1fa1, 0x1c62: 0x1f41, 0x1c63: 0x00c9, - 0x1c64: 0x0069, 0x1c65: 0x0079, 0x1c66: 0x1f51, 0x1c67: 0x1f61, 0x1c68: 0x1f71, 0x1c69: 0x1f81, - 0x1c6a: 0x1f91, 0x1c6b: 0x1fa1, 0x1c6c: 0x1f41, 0x1c6d: 0x00c9, 0x1c6e: 0x0069, 0x1c6f: 0x0079, - 0x1c70: 0x1f51, 0x1c71: 0x1f61, 0x1c72: 0x1f71, 0x1c73: 0x1f81, 0x1c74: 0x1f91, 0x1c75: 0x1fa1, - 0x1c76: 0x1f41, 0x1c77: 0x00c9, 0x1c78: 0x0069, 0x1c79: 0x0079, 0x1c7a: 0x1f51, 0x1c7b: 0x1f61, - 0x1c7c: 0x1f71, 0x1c7d: 0x1f81, 0x1c7e: 0x1f91, 0x1c7f: 0x1fa1, + 0x1c40: 0x20b1, 0x1c41: 0x20b9, 0x1c42: 0x20d9, 0x1c43: 0x20f1, 0x1c44: 0x0040, 0x1c45: 0x2189, + 0x1c46: 0x2109, 0x1c47: 0x20e1, 0x1c48: 0x2131, 0x1c49: 0x2191, 0x1c4a: 0x2161, 0x1c4b: 0x2169, + 0x1c4c: 0x2171, 0x1c4d: 0x2179, 0x1c4e: 0x2111, 0x1c4f: 0x2141, 0x1c50: 0x2151, 0x1c51: 0x2121, + 0x1c52: 0x2159, 0x1c53: 0x2101, 0x1c54: 0x2119, 0x1c55: 0x20c9, 0x1c56: 0x20d1, 0x1c57: 0x20e9, + 0x1c58: 0x20f9, 0x1c59: 0x2129, 0x1c5a: 0x2139, 0x1c5b: 0x2149, 0x1c5c: 0x2311, 0x1c5d: 0x1689, + 0x1c5e: 0x2319, 0x1c5f: 0x2321, 0x1c60: 0x0040, 0x1c61: 0x20b9, 0x1c62: 0x20d9, 0x1c63: 0x0040, + 0x1c64: 0x2181, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0x20e1, 0x1c68: 0x0040, 0x1c69: 0x2191, + 0x1c6a: 0x2161, 0x1c6b: 0x2169, 0x1c6c: 0x2171, 0x1c6d: 0x2179, 0x1c6e: 0x2111, 0x1c6f: 0x2141, + 0x1c70: 0x2151, 0x1c71: 0x2121, 0x1c72: 0x2159, 0x1c73: 0x0040, 0x1c74: 0x2119, 0x1c75: 0x20c9, + 0x1c76: 0x20d1, 0x1c77: 0x20e9, 0x1c78: 0x0040, 0x1c79: 0x2129, 0x1c7a: 0x0040, 0x1c7b: 0x2149, + 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040, // Block 0x72, offset 0x1c80 - 0x1c80: 0xe115, 0x1c81: 0xe115, 0x1c82: 0xe135, 0x1c83: 0xe135, 0x1c84: 0xe115, 0x1c85: 0xe115, - 0x1c86: 0xe175, 0x1c87: 0xe175, 0x1c88: 0xe115, 0x1c89: 0xe115, 0x1c8a: 0xe135, 0x1c8b: 0xe135, - 0x1c8c: 0xe115, 0x1c8d: 0xe115, 0x1c8e: 0xe1f5, 0x1c8f: 0xe1f5, 0x1c90: 0xe115, 0x1c91: 0xe115, - 0x1c92: 0xe135, 0x1c93: 0xe135, 0x1c94: 0xe115, 0x1c95: 0xe115, 0x1c96: 0xe175, 0x1c97: 0xe175, - 0x1c98: 0xe115, 0x1c99: 0xe115, 0x1c9a: 0xe135, 0x1c9b: 0xe135, 0x1c9c: 0xe115, 0x1c9d: 0xe115, - 0x1c9e: 0x8b3d, 0x1c9f: 0x8b3d, 0x1ca0: 0x04b5, 0x1ca1: 0x04b5, 0x1ca2: 0x0a08, 0x1ca3: 0x0a08, - 0x1ca4: 0x0a08, 0x1ca5: 0x0a08, 0x1ca6: 0x0a08, 0x1ca7: 0x0a08, 0x1ca8: 0x0a08, 0x1ca9: 0x0a08, - 0x1caa: 0x0a08, 0x1cab: 0x0a08, 0x1cac: 0x0a08, 0x1cad: 0x0a08, 0x1cae: 0x0a08, 0x1caf: 0x0a08, - 0x1cb0: 0x0a08, 0x1cb1: 0x0a08, 0x1cb2: 0x0a08, 0x1cb3: 0x0a08, 0x1cb4: 0x0a08, 0x1cb5: 0x0a08, - 0x1cb6: 0x0a08, 0x1cb7: 0x0a08, 0x1cb8: 0x0a08, 0x1cb9: 0x0a08, 0x1cba: 0x0a08, 0x1cbb: 0x0a08, - 0x1cbc: 0x0a08, 0x1cbd: 0x0a08, 0x1cbe: 0x0a08, 0x1cbf: 0x0a08, + 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0x20d9, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040, + 0x1c86: 0x0040, 0x1c87: 0x20e1, 0x1c88: 0x0040, 0x1c89: 0x2191, 0x1c8a: 0x0040, 0x1c8b: 0x2169, + 0x1c8c: 0x0040, 0x1c8d: 0x2179, 0x1c8e: 0x2111, 0x1c8f: 0x2141, 0x1c90: 0x0040, 0x1c91: 0x2121, + 0x1c92: 0x2159, 0x1c93: 0x0040, 0x1c94: 0x2119, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0x20e9, + 0x1c98: 0x0040, 0x1c99: 0x2129, 0x1c9a: 0x0040, 0x1c9b: 0x2149, 0x1c9c: 0x0040, 0x1c9d: 0x1689, + 0x1c9e: 0x0040, 0x1c9f: 0x2321, 0x1ca0: 0x0040, 0x1ca1: 0x20b9, 0x1ca2: 0x20d9, 0x1ca3: 0x0040, + 0x1ca4: 0x2181, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0x20e1, 0x1ca8: 0x2131, 0x1ca9: 0x2191, + 0x1caa: 0x2161, 0x1cab: 0x0040, 0x1cac: 0x2171, 0x1cad: 0x2179, 0x1cae: 0x2111, 0x1caf: 0x2141, + 0x1cb0: 0x2151, 0x1cb1: 0x2121, 0x1cb2: 0x2159, 0x1cb3: 0x0040, 0x1cb4: 0x2119, 0x1cb5: 0x20c9, + 0x1cb6: 0x20d1, 0x1cb7: 0x20e9, 0x1cb8: 0x0040, 0x1cb9: 0x2129, 0x1cba: 0x2139, 0x1cbb: 0x2149, + 0x1cbc: 0x2311, 0x1cbd: 0x0040, 0x1cbe: 0x2319, 0x1cbf: 0x0040, // Block 0x73, offset 0x1cc0 - 0x1cc0: 0xb1d9, 0x1cc1: 0xb1f1, 0x1cc2: 0xb251, 0x1cc3: 0xb299, 0x1cc4: 0x0040, 0x1cc5: 0xb461, - 0x1cc6: 0xb2e1, 0x1cc7: 0xb269, 0x1cc8: 0xb359, 0x1cc9: 0xb479, 0x1cca: 0xb3e9, 0x1ccb: 0xb401, - 0x1ccc: 0xb419, 0x1ccd: 0xb431, 0x1cce: 0xb2f9, 0x1ccf: 0xb389, 0x1cd0: 0xb3b9, 0x1cd1: 0xb329, - 0x1cd2: 0xb3d1, 0x1cd3: 0xb2c9, 0x1cd4: 0xb311, 0x1cd5: 0xb221, 0x1cd6: 0xb239, 0x1cd7: 0xb281, - 0x1cd8: 0xb2b1, 0x1cd9: 0xb341, 0x1cda: 0xb371, 0x1cdb: 0xb3a1, 0x1cdc: 0xbca9, 0x1cdd: 0x7999, - 0x1cde: 0xbcc1, 0x1cdf: 0xbcd9, 0x1ce0: 0x0040, 0x1ce1: 0xb1f1, 0x1ce2: 0xb251, 0x1ce3: 0x0040, - 0x1ce4: 0xb449, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb269, 0x1ce8: 0x0040, 0x1ce9: 0xb479, - 0x1cea: 0xb3e9, 0x1ceb: 0xb401, 0x1cec: 0xb419, 0x1ced: 0xb431, 0x1cee: 0xb2f9, 0x1cef: 0xb389, - 0x1cf0: 0xb3b9, 0x1cf1: 0xb329, 0x1cf2: 0xb3d1, 0x1cf3: 0x0040, 0x1cf4: 0xb311, 0x1cf5: 0xb221, - 0x1cf6: 0xb239, 0x1cf7: 0xb281, 0x1cf8: 0x0040, 0x1cf9: 0xb341, 0x1cfa: 0x0040, 0x1cfb: 0xb3a1, + 0x1cc0: 0x20b1, 0x1cc1: 0x20b9, 0x1cc2: 0x20d9, 0x1cc3: 0x20f1, 0x1cc4: 0x2181, 0x1cc5: 0x2189, + 0x1cc6: 0x2109, 0x1cc7: 0x20e1, 0x1cc8: 0x2131, 0x1cc9: 0x2191, 0x1cca: 0x0040, 0x1ccb: 0x2169, + 0x1ccc: 0x2171, 0x1ccd: 0x2179, 0x1cce: 0x2111, 0x1ccf: 0x2141, 0x1cd0: 0x2151, 0x1cd1: 0x2121, + 0x1cd2: 0x2159, 0x1cd3: 0x2101, 0x1cd4: 0x2119, 0x1cd5: 0x20c9, 0x1cd6: 0x20d1, 0x1cd7: 0x20e9, + 0x1cd8: 0x20f9, 0x1cd9: 0x2129, 0x1cda: 0x2139, 0x1cdb: 0x2149, 0x1cdc: 0x0040, 0x1cdd: 0x0040, + 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0x20b9, 0x1ce2: 0x20d9, 0x1ce3: 0x20f1, + 0x1ce4: 0x0040, 0x1ce5: 0x2189, 0x1ce6: 0x2109, 0x1ce7: 0x20e1, 0x1ce8: 0x2131, 0x1ce9: 0x2191, + 0x1cea: 0x0040, 0x1ceb: 0x2169, 0x1cec: 0x2171, 0x1ced: 0x2179, 0x1cee: 0x2111, 0x1cef: 0x2141, + 0x1cf0: 0x2151, 0x1cf1: 0x2121, 0x1cf2: 0x2159, 0x1cf3: 0x2101, 0x1cf4: 0x2119, 0x1cf5: 0x20c9, + 0x1cf6: 0x20d1, 0x1cf7: 0x20e9, 0x1cf8: 0x20f9, 0x1cf9: 0x2129, 0x1cfa: 0x2139, 0x1cfb: 0x2149, 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, // Block 0x74, offset 0x1d00 - 0x1d00: 0x0040, 0x1d01: 0x0040, 0x1d02: 0xb251, 0x1d03: 0x0040, 0x1d04: 0x0040, 0x1d05: 0x0040, - 0x1d06: 0x0040, 0x1d07: 0xb269, 0x1d08: 0x0040, 0x1d09: 0xb479, 0x1d0a: 0x0040, 0x1d0b: 0xb401, - 0x1d0c: 0x0040, 0x1d0d: 0xb431, 0x1d0e: 0xb2f9, 0x1d0f: 0xb389, 0x1d10: 0x0040, 0x1d11: 0xb329, - 0x1d12: 0xb3d1, 0x1d13: 0x0040, 0x1d14: 0xb311, 0x1d15: 0x0040, 0x1d16: 0x0040, 0x1d17: 0xb281, - 0x1d18: 0x0040, 0x1d19: 0xb341, 0x1d1a: 0x0040, 0x1d1b: 0xb3a1, 0x1d1c: 0x0040, 0x1d1d: 0x7999, - 0x1d1e: 0x0040, 0x1d1f: 0xbcd9, 0x1d20: 0x0040, 0x1d21: 0xb1f1, 0x1d22: 0xb251, 0x1d23: 0x0040, - 0x1d24: 0xb449, 0x1d25: 0x0040, 0x1d26: 0x0040, 0x1d27: 0xb269, 0x1d28: 0xb359, 0x1d29: 0xb479, - 0x1d2a: 0xb3e9, 0x1d2b: 0x0040, 0x1d2c: 0xb419, 0x1d2d: 0xb431, 0x1d2e: 0xb2f9, 0x1d2f: 0xb389, - 0x1d30: 0xb3b9, 0x1d31: 0xb329, 0x1d32: 0xb3d1, 0x1d33: 0x0040, 0x1d34: 0xb311, 0x1d35: 0xb221, - 0x1d36: 0xb239, 0x1d37: 0xb281, 0x1d38: 0x0040, 0x1d39: 0xb341, 0x1d3a: 0xb371, 0x1d3b: 0xb3a1, - 0x1d3c: 0xbca9, 0x1d3d: 0x0040, 0x1d3e: 0xbcc1, 0x1d3f: 0x0040, + 0x1d00: 0x0040, 0x1d01: 0x232a, 0x1d02: 0x2332, 0x1d03: 0x233a, 0x1d04: 0x2342, 0x1d05: 0x234a, + 0x1d06: 0x2352, 0x1d07: 0x235a, 0x1d08: 0x2362, 0x1d09: 0x236a, 0x1d0a: 0x2372, 0x1d0b: 0x0018, + 0x1d0c: 0x0018, 0x1d0d: 0x0018, 0x1d0e: 0x0018, 0x1d0f: 0x0018, 0x1d10: 0x237a, 0x1d11: 0x2382, + 0x1d12: 0x238a, 0x1d13: 0x2392, 0x1d14: 0x239a, 0x1d15: 0x23a2, 0x1d16: 0x23aa, 0x1d17: 0x23b2, + 0x1d18: 0x23ba, 0x1d19: 0x23c2, 0x1d1a: 0x23ca, 0x1d1b: 0x23d2, 0x1d1c: 0x23da, 0x1d1d: 0x23e2, + 0x1d1e: 0x23ea, 0x1d1f: 0x23f2, 0x1d20: 0x23fa, 0x1d21: 0x2402, 0x1d22: 0x240a, 0x1d23: 0x2412, + 0x1d24: 0x241a, 0x1d25: 0x2422, 0x1d26: 0x242a, 0x1d27: 0x2432, 0x1d28: 0x243a, 0x1d29: 0x2442, + 0x1d2a: 0x2449, 0x1d2b: 0x03d9, 0x1d2c: 0x00b9, 0x1d2d: 0x1239, 0x1d2e: 0x2451, 0x1d2f: 0x0018, + 0x1d30: 0x0019, 0x1d31: 0x02e9, 0x1d32: 0x03d9, 0x1d33: 0x02f1, 0x1d34: 0x02f9, 0x1d35: 0x03f1, + 0x1d36: 0x0309, 0x1d37: 0x00a9, 0x1d38: 0x0311, 0x1d39: 0x00b1, 0x1d3a: 0x0319, 0x1d3b: 0x0101, + 0x1d3c: 0x0321, 0x1d3d: 0x0329, 0x1d3e: 0x0051, 0x1d3f: 0x0339, // Block 0x75, offset 0x1d40 - 0x1d40: 0xb1d9, 0x1d41: 0xb1f1, 0x1d42: 0xb251, 0x1d43: 0xb299, 0x1d44: 0xb449, 0x1d45: 0xb461, - 0x1d46: 0xb2e1, 0x1d47: 0xb269, 0x1d48: 0xb359, 0x1d49: 0xb479, 0x1d4a: 0x0040, 0x1d4b: 0xb401, - 0x1d4c: 0xb419, 0x1d4d: 0xb431, 0x1d4e: 0xb2f9, 0x1d4f: 0xb389, 0x1d50: 0xb3b9, 0x1d51: 0xb329, - 0x1d52: 0xb3d1, 0x1d53: 0xb2c9, 0x1d54: 0xb311, 0x1d55: 0xb221, 0x1d56: 0xb239, 0x1d57: 0xb281, - 0x1d58: 0xb2b1, 0x1d59: 0xb341, 0x1d5a: 0xb371, 0x1d5b: 0xb3a1, 0x1d5c: 0x0040, 0x1d5d: 0x0040, - 0x1d5e: 0x0040, 0x1d5f: 0x0040, 0x1d60: 0x0040, 0x1d61: 0xb1f1, 0x1d62: 0xb251, 0x1d63: 0xb299, - 0x1d64: 0x0040, 0x1d65: 0xb461, 0x1d66: 0xb2e1, 0x1d67: 0xb269, 0x1d68: 0xb359, 0x1d69: 0xb479, - 0x1d6a: 0x0040, 0x1d6b: 0xb401, 0x1d6c: 0xb419, 0x1d6d: 0xb431, 0x1d6e: 0xb2f9, 0x1d6f: 0xb389, - 0x1d70: 0xb3b9, 0x1d71: 0xb329, 0x1d72: 0xb3d1, 0x1d73: 0xb2c9, 0x1d74: 0xb311, 0x1d75: 0xb221, - 0x1d76: 0xb239, 0x1d77: 0xb281, 0x1d78: 0xb2b1, 0x1d79: 0xb341, 0x1d7a: 0xb371, 0x1d7b: 0xb3a1, - 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + 0x1d40: 0x0751, 0x1d41: 0x00b9, 0x1d42: 0x0089, 0x1d43: 0x0341, 0x1d44: 0x0349, 0x1d45: 0x0391, + 0x1d46: 0x00c1, 0x1d47: 0x0109, 0x1d48: 0x00c9, 0x1d49: 0x04b1, 0x1d4a: 0x2459, 0x1d4b: 0x11f9, + 0x1d4c: 0x2461, 0x1d4d: 0x04d9, 0x1d4e: 0x2469, 0x1d4f: 0x2471, 0x1d50: 0x0018, 0x1d51: 0x0018, + 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018, + 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018, + 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018, + 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018, + 0x1d6a: 0x2479, 0x1d6b: 0x2481, 0x1d6c: 0x2489, 0x1d6d: 0x0018, 0x1d6e: 0x0018, 0x1d6f: 0x0018, + 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018, + 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018, + 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018, // Block 0x76, offset 0x1d80 - 0x1d80: 0x0040, 0x1d81: 0xbcf2, 0x1d82: 0xbd0a, 0x1d83: 0xbd22, 0x1d84: 0xbd3a, 0x1d85: 0xbd52, - 0x1d86: 0xbd6a, 0x1d87: 0xbd82, 0x1d88: 0xbd9a, 0x1d89: 0xbdb2, 0x1d8a: 0xbdca, 0x1d8b: 0x0018, - 0x1d8c: 0x0018, 0x1d8d: 0x0018, 0x1d8e: 0x0018, 0x1d8f: 0x0018, 0x1d90: 0xbde2, 0x1d91: 0xbe02, - 0x1d92: 0xbe22, 0x1d93: 0xbe42, 0x1d94: 0xbe62, 0x1d95: 0xbe82, 0x1d96: 0xbea2, 0x1d97: 0xbec2, - 0x1d98: 0xbee2, 0x1d99: 0xbf02, 0x1d9a: 0xbf22, 0x1d9b: 0xbf42, 0x1d9c: 0xbf62, 0x1d9d: 0xbf82, - 0x1d9e: 0xbfa2, 0x1d9f: 0xbfc2, 0x1da0: 0xbfe2, 0x1da1: 0xc002, 0x1da2: 0xc022, 0x1da3: 0xc042, - 0x1da4: 0xc062, 0x1da5: 0xc082, 0x1da6: 0xc0a2, 0x1da7: 0xc0c2, 0x1da8: 0xc0e2, 0x1da9: 0xc102, - 0x1daa: 0xc121, 0x1dab: 0x1159, 0x1dac: 0x0269, 0x1dad: 0x66a9, 0x1dae: 0xc161, 0x1daf: 0x0018, - 0x1db0: 0x0039, 0x1db1: 0x0ee9, 0x1db2: 0x1159, 0x1db3: 0x0ef9, 0x1db4: 0x0f09, 0x1db5: 0x1199, - 0x1db6: 0x0f31, 0x1db7: 0x0249, 0x1db8: 0x0f41, 0x1db9: 0x0259, 0x1dba: 0x0f51, 0x1dbb: 0x0359, - 0x1dbc: 0x0f61, 0x1dbd: 0x0f71, 0x1dbe: 0x00d9, 0x1dbf: 0x0f99, + 0x1d80: 0x2499, 0x1d81: 0x24a1, 0x1d82: 0x24a9, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040, + 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040, + 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0x24b1, 0x1d91: 0x24b9, + 0x1d92: 0x24c1, 0x1d93: 0x24c9, 0x1d94: 0x24d1, 0x1d95: 0x24d9, 0x1d96: 0x24e1, 0x1d97: 0x24e9, + 0x1d98: 0x24f1, 0x1d99: 0x24f9, 0x1d9a: 0x2501, 0x1d9b: 0x2509, 0x1d9c: 0x2511, 0x1d9d: 0x2519, + 0x1d9e: 0x2521, 0x1d9f: 0x2529, 0x1da0: 0x2531, 0x1da1: 0x2539, 0x1da2: 0x2541, 0x1da3: 0x2549, + 0x1da4: 0x2551, 0x1da5: 0x2559, 0x1da6: 0x2561, 0x1da7: 0x2569, 0x1da8: 0x2571, 0x1da9: 0x2579, + 0x1daa: 0x2581, 0x1dab: 0x2589, 0x1dac: 0x2591, 0x1dad: 0x2599, 0x1dae: 0x25a1, 0x1daf: 0x25a9, + 0x1db0: 0x25b1, 0x1db1: 0x25b9, 0x1db2: 0x25c1, 0x1db3: 0x25c9, 0x1db4: 0x25d1, 0x1db5: 0x25d9, + 0x1db6: 0x25e1, 0x1db7: 0x25e9, 0x1db8: 0x25f1, 0x1db9: 0x25f9, 0x1dba: 0x2601, 0x1dbb: 0x2609, + 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, // Block 0x77, offset 0x1dc0 - 0x1dc0: 0x2039, 0x1dc1: 0x0269, 0x1dc2: 0x01d9, 0x1dc3: 0x0fa9, 0x1dc4: 0x0fb9, 0x1dc5: 0x1089, - 0x1dc6: 0x0279, 0x1dc7: 0x0369, 0x1dc8: 0x0289, 0x1dc9: 0x13d1, 0x1dca: 0xc179, 0x1dcb: 0x65e9, - 0x1dcc: 0xc191, 0x1dcd: 0x1441, 0x1dce: 0xc1a9, 0x1dcf: 0xc1c9, 0x1dd0: 0x0018, 0x1dd1: 0x0018, - 0x1dd2: 0x0018, 0x1dd3: 0x0018, 0x1dd4: 0x0018, 0x1dd5: 0x0018, 0x1dd6: 0x0018, 0x1dd7: 0x0018, - 0x1dd8: 0x0018, 0x1dd9: 0x0018, 0x1dda: 0x0018, 0x1ddb: 0x0018, 0x1ddc: 0x0018, 0x1ddd: 0x0018, - 0x1dde: 0x0018, 0x1ddf: 0x0018, 0x1de0: 0x0018, 0x1de1: 0x0018, 0x1de2: 0x0018, 0x1de3: 0x0018, - 0x1de4: 0x0018, 0x1de5: 0x0018, 0x1de6: 0x0018, 0x1de7: 0x0018, 0x1de8: 0x0018, 0x1de9: 0x0018, - 0x1dea: 0xc1e1, 0x1deb: 0xc1f9, 0x1dec: 0xc211, 0x1ded: 0x0018, 0x1dee: 0x0018, 0x1def: 0x0018, - 0x1df0: 0x0018, 0x1df1: 0x0018, 0x1df2: 0x0018, 0x1df3: 0x0018, 0x1df4: 0x0018, 0x1df5: 0x0018, - 0x1df6: 0x0018, 0x1df7: 0x0018, 0x1df8: 0x0018, 0x1df9: 0x0018, 0x1dfa: 0x0018, 0x1dfb: 0x0018, - 0x1dfc: 0x0018, 0x1dfd: 0x0018, 0x1dfe: 0x0018, 0x1dff: 0x0018, + 0x1dc0: 0x2669, 0x1dc1: 0x2671, 0x1dc2: 0x2679, 0x1dc3: 0x8b55, 0x1dc4: 0x2681, 0x1dc5: 0x2689, + 0x1dc6: 0x2691, 0x1dc7: 0x2699, 0x1dc8: 0x26a1, 0x1dc9: 0x26a9, 0x1dca: 0x26b1, 0x1dcb: 0x26b9, + 0x1dcc: 0x26c1, 0x1dcd: 0x8b75, 0x1dce: 0x26c9, 0x1dcf: 0x26d1, 0x1dd0: 0x26d9, 0x1dd1: 0x26e1, + 0x1dd2: 0x8b95, 0x1dd3: 0x26e9, 0x1dd4: 0x26f1, 0x1dd5: 0x2521, 0x1dd6: 0x8bb5, 0x1dd7: 0x26f9, + 0x1dd8: 0x2701, 0x1dd9: 0x2709, 0x1dda: 0x2711, 0x1ddb: 0x2719, 0x1ddc: 0x8bd5, 0x1ddd: 0x2721, + 0x1dde: 0x2729, 0x1ddf: 0x2731, 0x1de0: 0x2739, 0x1de1: 0x2741, 0x1de2: 0x25f9, 0x1de3: 0x2749, + 0x1de4: 0x2751, 0x1de5: 0x2759, 0x1de6: 0x2761, 0x1de7: 0x2769, 0x1de8: 0x2771, 0x1de9: 0x2779, + 0x1dea: 0x2781, 0x1deb: 0x2789, 0x1dec: 0x2791, 0x1ded: 0x2799, 0x1dee: 0x27a1, 0x1def: 0x27a9, + 0x1df0: 0x27b1, 0x1df1: 0x27b9, 0x1df2: 0x27b9, 0x1df3: 0x27b9, 0x1df4: 0x8bf5, 0x1df5: 0x27c1, + 0x1df6: 0x27c9, 0x1df7: 0x27d1, 0x1df8: 0x8c15, 0x1df9: 0x27d9, 0x1dfa: 0x27e1, 0x1dfb: 0x27e9, + 0x1dfc: 0x27f1, 0x1dfd: 0x27f9, 0x1dfe: 0x2801, 0x1dff: 0x2809, // Block 0x78, offset 0x1e00 - 0x1e00: 0xc241, 0x1e01: 0xc279, 0x1e02: 0xc2b1, 0x1e03: 0x0040, 0x1e04: 0x0040, 0x1e05: 0x0040, - 0x1e06: 0x0040, 0x1e07: 0x0040, 0x1e08: 0x0040, 0x1e09: 0x0040, 0x1e0a: 0x0040, 0x1e0b: 0x0040, - 0x1e0c: 0x0040, 0x1e0d: 0x0040, 0x1e0e: 0x0040, 0x1e0f: 0x0040, 0x1e10: 0xc2d1, 0x1e11: 0xc2f1, - 0x1e12: 0xc311, 0x1e13: 0xc331, 0x1e14: 0xc351, 0x1e15: 0xc371, 0x1e16: 0xc391, 0x1e17: 0xc3b1, - 0x1e18: 0xc3d1, 0x1e19: 0xc3f1, 0x1e1a: 0xc411, 0x1e1b: 0xc431, 0x1e1c: 0xc451, 0x1e1d: 0xc471, - 0x1e1e: 0xc491, 0x1e1f: 0xc4b1, 0x1e20: 0xc4d1, 0x1e21: 0xc4f1, 0x1e22: 0xc511, 0x1e23: 0xc531, - 0x1e24: 0xc551, 0x1e25: 0xc571, 0x1e26: 0xc591, 0x1e27: 0xc5b1, 0x1e28: 0xc5d1, 0x1e29: 0xc5f1, - 0x1e2a: 0xc611, 0x1e2b: 0xc631, 0x1e2c: 0xc651, 0x1e2d: 0xc671, 0x1e2e: 0xc691, 0x1e2f: 0xc6b1, - 0x1e30: 0xc6d1, 0x1e31: 0xc6f1, 0x1e32: 0xc711, 0x1e33: 0xc731, 0x1e34: 0xc751, 0x1e35: 0xc771, - 0x1e36: 0xc791, 0x1e37: 0xc7b1, 0x1e38: 0xc7d1, 0x1e39: 0xc7f1, 0x1e3a: 0xc811, 0x1e3b: 0xc831, - 0x1e3c: 0x0040, 0x1e3d: 0x0040, 0x1e3e: 0x0040, 0x1e3f: 0x0040, + 0x1e00: 0x2811, 0x1e01: 0x2819, 0x1e02: 0x2821, 0x1e03: 0x2829, 0x1e04: 0x2831, 0x1e05: 0x2839, + 0x1e06: 0x2839, 0x1e07: 0x2841, 0x1e08: 0x2849, 0x1e09: 0x2851, 0x1e0a: 0x2859, 0x1e0b: 0x2861, + 0x1e0c: 0x2869, 0x1e0d: 0x2871, 0x1e0e: 0x2879, 0x1e0f: 0x2881, 0x1e10: 0x2889, 0x1e11: 0x2891, + 0x1e12: 0x2899, 0x1e13: 0x28a1, 0x1e14: 0x28a9, 0x1e15: 0x28b1, 0x1e16: 0x28b9, 0x1e17: 0x28c1, + 0x1e18: 0x28c9, 0x1e19: 0x8c35, 0x1e1a: 0x28d1, 0x1e1b: 0x28d9, 0x1e1c: 0x28e1, 0x1e1d: 0x24d9, + 0x1e1e: 0x28e9, 0x1e1f: 0x28f1, 0x1e20: 0x8c55, 0x1e21: 0x8c75, 0x1e22: 0x28f9, 0x1e23: 0x2901, + 0x1e24: 0x2909, 0x1e25: 0x2911, 0x1e26: 0x2919, 0x1e27: 0x2921, 0x1e28: 0x2040, 0x1e29: 0x2929, + 0x1e2a: 0x2931, 0x1e2b: 0x2931, 0x1e2c: 0x8c95, 0x1e2d: 0x2939, 0x1e2e: 0x2941, 0x1e2f: 0x2949, + 0x1e30: 0x2951, 0x1e31: 0x8cb5, 0x1e32: 0x2959, 0x1e33: 0x2961, 0x1e34: 0x2040, 0x1e35: 0x2969, + 0x1e36: 0x2971, 0x1e37: 0x2979, 0x1e38: 0x2981, 0x1e39: 0x2989, 0x1e3a: 0x2991, 0x1e3b: 0x8cd5, + 0x1e3c: 0x2999, 0x1e3d: 0x8cf5, 0x1e3e: 0x29a1, 0x1e3f: 0x29a9, // Block 0x79, offset 0x1e40 - 0x1e40: 0xcb61, 0x1e41: 0xcb81, 0x1e42: 0xcba1, 0x1e43: 0x8b55, 0x1e44: 0xcbc1, 0x1e45: 0xcbe1, - 0x1e46: 0xcc01, 0x1e47: 0xcc21, 0x1e48: 0xcc41, 0x1e49: 0xcc61, 0x1e4a: 0xcc81, 0x1e4b: 0xcca1, - 0x1e4c: 0xccc1, 0x1e4d: 0x8b75, 0x1e4e: 0xcce1, 0x1e4f: 0xcd01, 0x1e50: 0xcd21, 0x1e51: 0xcd41, - 0x1e52: 0x8b95, 0x1e53: 0xcd61, 0x1e54: 0xcd81, 0x1e55: 0xc491, 0x1e56: 0x8bb5, 0x1e57: 0xcda1, - 0x1e58: 0xcdc1, 0x1e59: 0xcde1, 0x1e5a: 0xce01, 0x1e5b: 0xce21, 0x1e5c: 0x8bd5, 0x1e5d: 0xce41, - 0x1e5e: 0xce61, 0x1e5f: 0xce81, 0x1e60: 0xcea1, 0x1e61: 0xcec1, 0x1e62: 0xc7f1, 0x1e63: 0xcee1, - 0x1e64: 0xcf01, 0x1e65: 0xcf21, 0x1e66: 0xcf41, 0x1e67: 0xcf61, 0x1e68: 0xcf81, 0x1e69: 0xcfa1, - 0x1e6a: 0xcfc1, 0x1e6b: 0xcfe1, 0x1e6c: 0xd001, 0x1e6d: 0xd021, 0x1e6e: 0xd041, 0x1e6f: 0xd061, - 0x1e70: 0xd081, 0x1e71: 0xd0a1, 0x1e72: 0xd0a1, 0x1e73: 0xd0a1, 0x1e74: 0x8bf5, 0x1e75: 0xd0c1, - 0x1e76: 0xd0e1, 0x1e77: 0xd101, 0x1e78: 0x8c15, 0x1e79: 0xd121, 0x1e7a: 0xd141, 0x1e7b: 0xd161, - 0x1e7c: 0xd181, 0x1e7d: 0xd1a1, 0x1e7e: 0xd1c1, 0x1e7f: 0xd1e1, + 0x1e40: 0x29b1, 0x1e41: 0x29b9, 0x1e42: 0x29c1, 0x1e43: 0x29c9, 0x1e44: 0x29d1, 0x1e45: 0x29d9, + 0x1e46: 0x29e1, 0x1e47: 0x29e9, 0x1e48: 0x29f1, 0x1e49: 0x8d15, 0x1e4a: 0x29f9, 0x1e4b: 0x2a01, + 0x1e4c: 0x2a09, 0x1e4d: 0x2a11, 0x1e4e: 0x2a19, 0x1e4f: 0x8d35, 0x1e50: 0x2a21, 0x1e51: 0x8d55, + 0x1e52: 0x8d75, 0x1e53: 0x2a29, 0x1e54: 0x2a31, 0x1e55: 0x2a31, 0x1e56: 0x2a39, 0x1e57: 0x8d95, + 0x1e58: 0x8db5, 0x1e59: 0x2a41, 0x1e5a: 0x2a49, 0x1e5b: 0x2a51, 0x1e5c: 0x2a59, 0x1e5d: 0x2a61, + 0x1e5e: 0x2a69, 0x1e5f: 0x2a71, 0x1e60: 0x2a79, 0x1e61: 0x2a81, 0x1e62: 0x2a89, 0x1e63: 0x2a91, + 0x1e64: 0x8dd5, 0x1e65: 0x2a99, 0x1e66: 0x2aa1, 0x1e67: 0x2aa9, 0x1e68: 0x2ab1, 0x1e69: 0x2aa9, + 0x1e6a: 0x2ab9, 0x1e6b: 0x2ac1, 0x1e6c: 0x2ac9, 0x1e6d: 0x2ad1, 0x1e6e: 0x2ad9, 0x1e6f: 0x2ae1, + 0x1e70: 0x2ae9, 0x1e71: 0x2af1, 0x1e72: 0x2af9, 0x1e73: 0x2b01, 0x1e74: 0x2b09, 0x1e75: 0x2b11, + 0x1e76: 0x2b19, 0x1e77: 0x2b21, 0x1e78: 0x8df5, 0x1e79: 0x2b29, 0x1e7a: 0x2b31, 0x1e7b: 0x2b39, + 0x1e7c: 0x2b41, 0x1e7d: 0x2b49, 0x1e7e: 0x8e15, 0x1e7f: 0x2b51, // Block 0x7a, offset 0x1e80 - 0x1e80: 0xd201, 0x1e81: 0xd221, 0x1e82: 0xd241, 0x1e83: 0xd261, 0x1e84: 0xd281, 0x1e85: 0xd2a1, - 0x1e86: 0xd2a1, 0x1e87: 0xd2c1, 0x1e88: 0xd2e1, 0x1e89: 0xd301, 0x1e8a: 0xd321, 0x1e8b: 0xd341, - 0x1e8c: 0xd361, 0x1e8d: 0xd381, 0x1e8e: 0xd3a1, 0x1e8f: 0xd3c1, 0x1e90: 0xd3e1, 0x1e91: 0xd401, - 0x1e92: 0xd421, 0x1e93: 0xd441, 0x1e94: 0xd461, 0x1e95: 0xd481, 0x1e96: 0xd4a1, 0x1e97: 0xd4c1, - 0x1e98: 0xd4e1, 0x1e99: 0x8c35, 0x1e9a: 0xd501, 0x1e9b: 0xd521, 0x1e9c: 0xd541, 0x1e9d: 0xc371, - 0x1e9e: 0xd561, 0x1e9f: 0xd581, 0x1ea0: 0x8c55, 0x1ea1: 0x8c75, 0x1ea2: 0xd5a1, 0x1ea3: 0xd5c1, - 0x1ea4: 0xd5e1, 0x1ea5: 0xd601, 0x1ea6: 0xd621, 0x1ea7: 0xd641, 0x1ea8: 0x2040, 0x1ea9: 0xd661, - 0x1eaa: 0xd681, 0x1eab: 0xd681, 0x1eac: 0x8c95, 0x1ead: 0xd6a1, 0x1eae: 0xd6c1, 0x1eaf: 0xd6e1, - 0x1eb0: 0xd701, 0x1eb1: 0x8cb5, 0x1eb2: 0xd721, 0x1eb3: 0xd741, 0x1eb4: 0x2040, 0x1eb5: 0xd761, - 0x1eb6: 0xd781, 0x1eb7: 0xd7a1, 0x1eb8: 0xd7c1, 0x1eb9: 0xd7e1, 0x1eba: 0xd801, 0x1ebb: 0x8cd5, - 0x1ebc: 0xd821, 0x1ebd: 0x8cf5, 0x1ebe: 0xd841, 0x1ebf: 0xd861, + 0x1e80: 0x2b59, 0x1e81: 0x2b61, 0x1e82: 0x2b69, 0x1e83: 0x2b71, 0x1e84: 0x2b79, 0x1e85: 0x2b81, + 0x1e86: 0x2b89, 0x1e87: 0x2b91, 0x1e88: 0x2b99, 0x1e89: 0x2ba1, 0x1e8a: 0x8e35, 0x1e8b: 0x2ba9, + 0x1e8c: 0x2bb1, 0x1e8d: 0x2bb9, 0x1e8e: 0x2bc1, 0x1e8f: 0x2bc9, 0x1e90: 0x2bd1, 0x1e91: 0x2bd9, + 0x1e92: 0x2be1, 0x1e93: 0x2be9, 0x1e94: 0x2bf1, 0x1e95: 0x2bf9, 0x1e96: 0x2c01, 0x1e97: 0x2c09, + 0x1e98: 0x2c11, 0x1e99: 0x2c19, 0x1e9a: 0x2c21, 0x1e9b: 0x2c29, 0x1e9c: 0x2c31, 0x1e9d: 0x8e55, + 0x1e9e: 0x2c39, 0x1e9f: 0x2c41, 0x1ea0: 0x2c49, 0x1ea1: 0x2c51, 0x1ea2: 0x2c59, 0x1ea3: 0x8e75, + 0x1ea4: 0x2c61, 0x1ea5: 0x2c69, 0x1ea6: 0x2c71, 0x1ea7: 0x2c79, 0x1ea8: 0x2c81, 0x1ea9: 0x2c89, + 0x1eaa: 0x2c91, 0x1eab: 0x2c99, 0x1eac: 0x7f0d, 0x1ead: 0x2ca1, 0x1eae: 0x2ca9, 0x1eaf: 0x2cb1, + 0x1eb0: 0x8e95, 0x1eb1: 0x2cb9, 0x1eb2: 0x2cc1, 0x1eb3: 0x2cc9, 0x1eb4: 0x2cd1, 0x1eb5: 0x2cd9, + 0x1eb6: 0x2ce1, 0x1eb7: 0x8eb5, 0x1eb8: 0x8ed5, 0x1eb9: 0x8ef5, 0x1eba: 0x2ce9, 0x1ebb: 0x8f15, + 0x1ebc: 0x2cf1, 0x1ebd: 0x2cf9, 0x1ebe: 0x2d01, 0x1ebf: 0x2d09, // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xd881, 0x1ec1: 0xd8a1, 0x1ec2: 0xd8c1, 0x1ec3: 0xd8e1, 0x1ec4: 0xd901, 0x1ec5: 0xd921, - 0x1ec6: 0xd941, 0x1ec7: 0xd961, 0x1ec8: 0xd981, 0x1ec9: 0x8d15, 0x1eca: 0xd9a1, 0x1ecb: 0xd9c1, - 0x1ecc: 0xd9e1, 0x1ecd: 0xda01, 0x1ece: 0xda21, 0x1ecf: 0x8d35, 0x1ed0: 0xda41, 0x1ed1: 0x8d55, - 0x1ed2: 0x8d75, 0x1ed3: 0xda61, 0x1ed4: 0xda81, 0x1ed5: 0xda81, 0x1ed6: 0xdaa1, 0x1ed7: 0x8d95, - 0x1ed8: 0x8db5, 0x1ed9: 0xdac1, 0x1eda: 0xdae1, 0x1edb: 0xdb01, 0x1edc: 0xdb21, 0x1edd: 0xdb41, - 0x1ede: 0xdb61, 0x1edf: 0xdb81, 0x1ee0: 0xdba1, 0x1ee1: 0xdbc1, 0x1ee2: 0xdbe1, 0x1ee3: 0xdc01, - 0x1ee4: 0x8dd5, 0x1ee5: 0xdc21, 0x1ee6: 0xdc41, 0x1ee7: 0xdc61, 0x1ee8: 0xdc81, 0x1ee9: 0xdc61, - 0x1eea: 0xdca1, 0x1eeb: 0xdcc1, 0x1eec: 0xdce1, 0x1eed: 0xdd01, 0x1eee: 0xdd21, 0x1eef: 0xdd41, - 0x1ef0: 0xdd61, 0x1ef1: 0xdd81, 0x1ef2: 0xdda1, 0x1ef3: 0xddc1, 0x1ef4: 0xdde1, 0x1ef5: 0xde01, - 0x1ef6: 0xde21, 0x1ef7: 0xde41, 0x1ef8: 0x8df5, 0x1ef9: 0xde61, 0x1efa: 0xde81, 0x1efb: 0xdea1, - 0x1efc: 0xdec1, 0x1efd: 0xdee1, 0x1efe: 0x8e15, 0x1eff: 0xdf01, + 0x1ec0: 0x2d11, 0x1ec1: 0x2d19, 0x1ec2: 0x2d21, 0x1ec3: 0x2d29, 0x1ec4: 0x2d31, 0x1ec5: 0x2d39, + 0x1ec6: 0x8f35, 0x1ec7: 0x2d41, 0x1ec8: 0x2d49, 0x1ec9: 0x2d51, 0x1eca: 0x2d59, 0x1ecb: 0x2d61, + 0x1ecc: 0x2d69, 0x1ecd: 0x8f55, 0x1ece: 0x2d71, 0x1ecf: 0x2d79, 0x1ed0: 0x8f75, 0x1ed1: 0x8f95, + 0x1ed2: 0x2d81, 0x1ed3: 0x2d89, 0x1ed4: 0x2d91, 0x1ed5: 0x2d99, 0x1ed6: 0x2da1, 0x1ed7: 0x2da9, + 0x1ed8: 0x2db1, 0x1ed9: 0x2db9, 0x1eda: 0x2dc1, 0x1edb: 0x8fb5, 0x1edc: 0x2dc9, 0x1edd: 0x8fd5, + 0x1ede: 0x2dd1, 0x1edf: 0x2040, 0x1ee0: 0x2dd9, 0x1ee1: 0x2de1, 0x1ee2: 0x2de9, 0x1ee3: 0x8ff5, + 0x1ee4: 0x2df1, 0x1ee5: 0x2df9, 0x1ee6: 0x9015, 0x1ee7: 0x9035, 0x1ee8: 0x2e01, 0x1ee9: 0x2e09, + 0x1eea: 0x2e11, 0x1eeb: 0x2e19, 0x1eec: 0x2e21, 0x1eed: 0x2e21, 0x1eee: 0x2e29, 0x1eef: 0x2e31, + 0x1ef0: 0x2e39, 0x1ef1: 0x2e41, 0x1ef2: 0x2e49, 0x1ef3: 0x2e51, 0x1ef4: 0x2e59, 0x1ef5: 0x9055, + 0x1ef6: 0x2e61, 0x1ef7: 0x9075, 0x1ef8: 0x2e69, 0x1ef9: 0x9095, 0x1efa: 0x2e71, 0x1efb: 0x90b5, + 0x1efc: 0x90d5, 0x1efd: 0x90f5, 0x1efe: 0x2e79, 0x1eff: 0x2e81, // Block 0x7c, offset 0x1f00 - 0x1f00: 0xe601, 0x1f01: 0xe621, 0x1f02: 0xe641, 0x1f03: 0xe661, 0x1f04: 0xe681, 0x1f05: 0xe6a1, - 0x1f06: 0x8f35, 0x1f07: 0xe6c1, 0x1f08: 0xe6e1, 0x1f09: 0xe701, 0x1f0a: 0xe721, 0x1f0b: 0xe741, - 0x1f0c: 0xe761, 0x1f0d: 0x8f55, 0x1f0e: 0xe781, 0x1f0f: 0xe7a1, 0x1f10: 0x8f75, 0x1f11: 0x8f95, - 0x1f12: 0xe7c1, 0x1f13: 0xe7e1, 0x1f14: 0xe801, 0x1f15: 0xe821, 0x1f16: 0xe841, 0x1f17: 0xe861, - 0x1f18: 0xe881, 0x1f19: 0xe8a1, 0x1f1a: 0xe8c1, 0x1f1b: 0x8fb5, 0x1f1c: 0xe8e1, 0x1f1d: 0x8fd5, - 0x1f1e: 0xe901, 0x1f1f: 0x2040, 0x1f20: 0xe921, 0x1f21: 0xe941, 0x1f22: 0xe961, 0x1f23: 0x8ff5, - 0x1f24: 0xe981, 0x1f25: 0xe9a1, 0x1f26: 0x9015, 0x1f27: 0x9035, 0x1f28: 0xe9c1, 0x1f29: 0xe9e1, - 0x1f2a: 0xea01, 0x1f2b: 0xea21, 0x1f2c: 0xea41, 0x1f2d: 0xea41, 0x1f2e: 0xea61, 0x1f2f: 0xea81, - 0x1f30: 0xeaa1, 0x1f31: 0xeac1, 0x1f32: 0xeae1, 0x1f33: 0xeb01, 0x1f34: 0xeb21, 0x1f35: 0x9055, - 0x1f36: 0xeb41, 0x1f37: 0x9075, 0x1f38: 0xeb61, 0x1f39: 0x9095, 0x1f3a: 0xeb81, 0x1f3b: 0x90b5, - 0x1f3c: 0x90d5, 0x1f3d: 0x90f5, 0x1f3e: 0xeba1, 0x1f3f: 0xebc1, + 0x1f00: 0x2e89, 0x1f01: 0x9115, 0x1f02: 0x9135, 0x1f03: 0x9155, 0x1f04: 0x9175, 0x1f05: 0x2e91, + 0x1f06: 0x2e99, 0x1f07: 0x2e99, 0x1f08: 0x2ea1, 0x1f09: 0x2ea9, 0x1f0a: 0x2eb1, 0x1f0b: 0x2eb9, + 0x1f0c: 0x2ec1, 0x1f0d: 0x9195, 0x1f0e: 0x2ec9, 0x1f0f: 0x2ed1, 0x1f10: 0x2ed9, 0x1f11: 0x2ee1, + 0x1f12: 0x91b5, 0x1f13: 0x2ee9, 0x1f14: 0x91d5, 0x1f15: 0x91f5, 0x1f16: 0x2ef1, 0x1f17: 0x2ef9, + 0x1f18: 0x2f01, 0x1f19: 0x2f09, 0x1f1a: 0x2f11, 0x1f1b: 0x2f19, 0x1f1c: 0x9215, 0x1f1d: 0x9235, + 0x1f1e: 0x9255, 0x1f1f: 0x2040, 0x1f20: 0x2f21, 0x1f21: 0x9275, 0x1f22: 0x2f29, 0x1f23: 0x2f31, + 0x1f24: 0x2f39, 0x1f25: 0x9295, 0x1f26: 0x2f41, 0x1f27: 0x2f49, 0x1f28: 0x2f51, 0x1f29: 0x2f59, + 0x1f2a: 0x2f61, 0x1f2b: 0x92b5, 0x1f2c: 0x2f69, 0x1f2d: 0x2f71, 0x1f2e: 0x2f79, 0x1f2f: 0x2f81, + 0x1f30: 0x2f89, 0x1f31: 0x2f91, 0x1f32: 0x92d5, 0x1f33: 0x92f5, 0x1f34: 0x2f99, 0x1f35: 0x9315, + 0x1f36: 0x2fa1, 0x1f37: 0x9335, 0x1f38: 0x2fa9, 0x1f39: 0x2fb1, 0x1f3a: 0x2fb9, 0x1f3b: 0x9355, + 0x1f3c: 0x9375, 0x1f3d: 0x2fc1, 0x1f3e: 0x9395, 0x1f3f: 0x2fc9, // Block 0x7d, offset 0x1f40 - 0x1f40: 0xebe1, 0x1f41: 0x9115, 0x1f42: 0x9135, 0x1f43: 0x9155, 0x1f44: 0x9175, 0x1f45: 0xec01, - 0x1f46: 0xec21, 0x1f47: 0xec21, 0x1f48: 0xec41, 0x1f49: 0xec61, 0x1f4a: 0xec81, 0x1f4b: 0xeca1, - 0x1f4c: 0xecc1, 0x1f4d: 0x9195, 0x1f4e: 0xece1, 0x1f4f: 0xed01, 0x1f50: 0xed21, 0x1f51: 0xed41, - 0x1f52: 0x91b5, 0x1f53: 0xed61, 0x1f54: 0x91d5, 0x1f55: 0x91f5, 0x1f56: 0xed81, 0x1f57: 0xeda1, - 0x1f58: 0xedc1, 0x1f59: 0xede1, 0x1f5a: 0xee01, 0x1f5b: 0xee21, 0x1f5c: 0x9215, 0x1f5d: 0x9235, - 0x1f5e: 0x9255, 0x1f5f: 0x2040, 0x1f60: 0xee41, 0x1f61: 0x9275, 0x1f62: 0xee61, 0x1f63: 0xee81, - 0x1f64: 0xeea1, 0x1f65: 0x9295, 0x1f66: 0xeec1, 0x1f67: 0xeee1, 0x1f68: 0xef01, 0x1f69: 0xef21, - 0x1f6a: 0xef41, 0x1f6b: 0x92b5, 0x1f6c: 0xef61, 0x1f6d: 0xef81, 0x1f6e: 0xefa1, 0x1f6f: 0xefc1, - 0x1f70: 0xefe1, 0x1f71: 0xf001, 0x1f72: 0x92d5, 0x1f73: 0x92f5, 0x1f74: 0xf021, 0x1f75: 0x9315, - 0x1f76: 0xf041, 0x1f77: 0x9335, 0x1f78: 0xf061, 0x1f79: 0xf081, 0x1f7a: 0xf0a1, 0x1f7b: 0x9355, - 0x1f7c: 0x9375, 0x1f7d: 0xf0c1, 0x1f7e: 0x9395, 0x1f7f: 0xf0e1, + 0x1f40: 0x93b5, 0x1f41: 0x2fd1, 0x1f42: 0x2fd9, 0x1f43: 0x2fe1, 0x1f44: 0x2fe9, 0x1f45: 0x2ff1, + 0x1f46: 0x2ff9, 0x1f47: 0x93d5, 0x1f48: 0x93f5, 0x1f49: 0x9415, 0x1f4a: 0x9435, 0x1f4b: 0x2a29, + 0x1f4c: 0x3001, 0x1f4d: 0x3009, 0x1f4e: 0x3011, 0x1f4f: 0x3019, 0x1f50: 0x3021, 0x1f51: 0x3029, + 0x1f52: 0x3031, 0x1f53: 0x3039, 0x1f54: 0x3041, 0x1f55: 0x3049, 0x1f56: 0x3051, 0x1f57: 0x9455, + 0x1f58: 0x3059, 0x1f59: 0x3061, 0x1f5a: 0x3069, 0x1f5b: 0x3071, 0x1f5c: 0x3079, 0x1f5d: 0x3081, + 0x1f5e: 0x3089, 0x1f5f: 0x3091, 0x1f60: 0x3099, 0x1f61: 0x30a1, 0x1f62: 0x30a9, 0x1f63: 0x30b1, + 0x1f64: 0x9475, 0x1f65: 0x9495, 0x1f66: 0x94b5, 0x1f67: 0x30b9, 0x1f68: 0x30c1, 0x1f69: 0x30c9, + 0x1f6a: 0x30d1, 0x1f6b: 0x94d5, 0x1f6c: 0x30d9, 0x1f6d: 0x94f5, 0x1f6e: 0x30e1, 0x1f6f: 0x30e9, + 0x1f70: 0x9515, 0x1f71: 0x9535, 0x1f72: 0x30f1, 0x1f73: 0x30f9, 0x1f74: 0x3101, 0x1f75: 0x3109, + 0x1f76: 0x3111, 0x1f77: 0x3119, 0x1f78: 0x3121, 0x1f79: 0x3129, 0x1f7a: 0x3131, 0x1f7b: 0x3139, + 0x1f7c: 0x3141, 0x1f7d: 0x3149, 0x1f7e: 0x3151, 0x1f7f: 0x2040, // Block 0x7e, offset 0x1f80 - 0x1f80: 0xf721, 0x1f81: 0xf741, 0x1f82: 0xf761, 0x1f83: 0xf781, 0x1f84: 0xf7a1, 0x1f85: 0x9555, - 0x1f86: 0xf7c1, 0x1f87: 0xf7e1, 0x1f88: 0xf801, 0x1f89: 0xf821, 0x1f8a: 0xf841, 0x1f8b: 0x9575, - 0x1f8c: 0x9595, 0x1f8d: 0xf861, 0x1f8e: 0xf881, 0x1f8f: 0xf8a1, 0x1f90: 0xf8c1, 0x1f91: 0xf8e1, - 0x1f92: 0xf901, 0x1f93: 0x95b5, 0x1f94: 0xf921, 0x1f95: 0xf941, 0x1f96: 0xf961, 0x1f97: 0xf981, - 0x1f98: 0x95d5, 0x1f99: 0x95f5, 0x1f9a: 0xf9a1, 0x1f9b: 0xf9c1, 0x1f9c: 0xf9e1, 0x1f9d: 0x9615, - 0x1f9e: 0xfa01, 0x1f9f: 0xfa21, 0x1fa0: 0x684d, 0x1fa1: 0x9635, 0x1fa2: 0xfa41, 0x1fa3: 0xfa61, - 0x1fa4: 0xfa81, 0x1fa5: 0x9655, 0x1fa6: 0xfaa1, 0x1fa7: 0xfac1, 0x1fa8: 0xfae1, 0x1fa9: 0xfb01, - 0x1faa: 0xfb21, 0x1fab: 0xfb41, 0x1fac: 0xfb61, 0x1fad: 0x9675, 0x1fae: 0xfb81, 0x1faf: 0xfba1, - 0x1fb0: 0xfbc1, 0x1fb1: 0x9695, 0x1fb2: 0xfbe1, 0x1fb3: 0xfc01, 0x1fb4: 0xfc21, 0x1fb5: 0xfc41, - 0x1fb6: 0x7b6d, 0x1fb7: 0x96b5, 0x1fb8: 0xfc61, 0x1fb9: 0xfc81, 0x1fba: 0xfca1, 0x1fbb: 0x96d5, - 0x1fbc: 0xfcc1, 0x1fbd: 0x96f5, 0x1fbe: 0xfce1, 0x1fbf: 0xfce1, + 0x1f80: 0x3159, 0x1f81: 0x3161, 0x1f82: 0x3169, 0x1f83: 0x3171, 0x1f84: 0x3179, 0x1f85: 0x9555, + 0x1f86: 0x3181, 0x1f87: 0x3189, 0x1f88: 0x3191, 0x1f89: 0x3199, 0x1f8a: 0x31a1, 0x1f8b: 0x9575, + 0x1f8c: 0x9595, 0x1f8d: 0x31a9, 0x1f8e: 0x31b1, 0x1f8f: 0x31b9, 0x1f90: 0x31c1, 0x1f91: 0x31c9, + 0x1f92: 0x31d1, 0x1f93: 0x95b5, 0x1f94: 0x31d9, 0x1f95: 0x31e1, 0x1f96: 0x31e9, 0x1f97: 0x31f1, + 0x1f98: 0x95d5, 0x1f99: 0x95f5, 0x1f9a: 0x31f9, 0x1f9b: 0x3201, 0x1f9c: 0x3209, 0x1f9d: 0x9615, + 0x1f9e: 0x3211, 0x1f9f: 0x3219, 0x1fa0: 0x684d, 0x1fa1: 0x9635, 0x1fa2: 0x3221, 0x1fa3: 0x3229, + 0x1fa4: 0x3231, 0x1fa5: 0x9655, 0x1fa6: 0x3239, 0x1fa7: 0x3241, 0x1fa8: 0x3249, 0x1fa9: 0x3251, + 0x1faa: 0x3259, 0x1fab: 0x3261, 0x1fac: 0x3269, 0x1fad: 0x9675, 0x1fae: 0x3271, 0x1faf: 0x3279, + 0x1fb0: 0x3281, 0x1fb1: 0x9695, 0x1fb2: 0x3289, 0x1fb3: 0x3291, 0x1fb4: 0x3299, 0x1fb5: 0x32a1, + 0x1fb6: 0x7b6d, 0x1fb7: 0x96b5, 0x1fb8: 0x32a9, 0x1fb9: 0x32b1, 0x1fba: 0x32b9, 0x1fbb: 0x96d5, + 0x1fbc: 0x32c1, 0x1fbd: 0x96f5, 0x1fbe: 0x32c9, 0x1fbf: 0x32c9, // Block 0x7f, offset 0x1fc0 - 0x1fc0: 0xfd01, 0x1fc1: 0x9715, 0x1fc2: 0xfd21, 0x1fc3: 0xfd41, 0x1fc4: 0xfd61, 0x1fc5: 0xfd81, - 0x1fc6: 0xfda1, 0x1fc7: 0xfdc1, 0x1fc8: 0xfde1, 0x1fc9: 0x9735, 0x1fca: 0xfe01, 0x1fcb: 0xfe21, - 0x1fcc: 0xfe41, 0x1fcd: 0xfe61, 0x1fce: 0xfe81, 0x1fcf: 0xfea1, 0x1fd0: 0x9755, 0x1fd1: 0xfec1, - 0x1fd2: 0x9775, 0x1fd3: 0x9795, 0x1fd4: 0x97b5, 0x1fd5: 0xfee1, 0x1fd6: 0xff01, 0x1fd7: 0xff21, - 0x1fd8: 0xff41, 0x1fd9: 0xff61, 0x1fda: 0xff81, 0x1fdb: 0xffa1, 0x1fdc: 0xffc1, 0x1fdd: 0x97d5, + 0x1fc0: 0x32d1, 0x1fc1: 0x9715, 0x1fc2: 0x32d9, 0x1fc3: 0x32e1, 0x1fc4: 0x32e9, 0x1fc5: 0x32f1, + 0x1fc6: 0x32f9, 0x1fc7: 0x3301, 0x1fc8: 0x3309, 0x1fc9: 0x9735, 0x1fca: 0x3311, 0x1fcb: 0x3319, + 0x1fcc: 0x3321, 0x1fcd: 0x3329, 0x1fce: 0x3331, 0x1fcf: 0x3339, 0x1fd0: 0x9755, 0x1fd1: 0x3341, + 0x1fd2: 0x9775, 0x1fd3: 0x9795, 0x1fd4: 0x97b5, 0x1fd5: 0x3349, 0x1fd6: 0x3351, 0x1fd7: 0x3359, + 0x1fd8: 0x3361, 0x1fd9: 0x3369, 0x1fda: 0x3371, 0x1fdb: 0x3379, 0x1fdc: 0x3381, 0x1fdd: 0x97d5, 0x1fde: 0x0040, 0x1fdf: 0x0040, 0x1fe0: 0x0040, 0x1fe1: 0x0040, 0x1fe2: 0x0040, 0x1fe3: 0x0040, 0x1fe4: 0x0040, 0x1fe5: 0x0040, 0x1fe6: 0x0040, 0x1fe7: 0x0040, 0x1fe8: 0x0040, 0x1fe9: 0x0040, 0x1fea: 0x0040, 0x1feb: 0x0040, 0x1fec: 0x0040, 0x1fed: 0x0040, 0x1fee: 0x0040, 0x1fef: 0x0040, @@ -2134,7 +2277,7 @@ var idnaIndex = [2368]uint16{ 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, // Block 0x7, offset 0x1c0 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, - 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0xe3, 0x1cd: 0xe4, 0x1ce: 0x3e, 0x1cf: 0x3f, 0x1d0: 0xa0, 0x1d1: 0xa0, 0x1d2: 0xa0, 0x1d3: 0xa0, 0x1d4: 0xa0, 0x1d5: 0xa0, 0x1d6: 0xa0, 0x1d7: 0xa0, 0x1d8: 0xa0, 0x1d9: 0xa0, 0x1da: 0xa0, 0x1db: 0xa0, 0x1dc: 0xa0, 0x1dd: 0xa0, 0x1de: 0xa0, 0x1df: 0xa0, 0x1e0: 0xa0, 0x1e1: 0xa0, 0x1e2: 0xa0, 0x1e3: 0xa0, 0x1e4: 0xa0, 0x1e5: 0xa0, 0x1e6: 0xa0, 0x1e7: 0xa0, @@ -2167,143 +2310,143 @@ var idnaIndex = [2368]uint16{ 0x2a0: 0xa0, 0x2a1: 0xa0, 0x2a2: 0xa0, 0x2a3: 0xa0, 0x2a4: 0xa0, 0x2a5: 0xa0, 0x2a6: 0xa0, 0x2a7: 0xa0, 0x2a8: 0xa0, 0x2a9: 0xa0, 0x2aa: 0xa0, 0x2ab: 0xa0, 0x2ac: 0xa0, 0x2ad: 0xa0, 0x2ae: 0xa0, 0x2af: 0xa0, 0x2b0: 0xa0, 0x2b1: 0xa0, 0x2b2: 0xa0, 0x2b3: 0xa0, 0x2b4: 0xa0, 0x2b5: 0xa0, 0x2b6: 0xa0, 0x2b7: 0xa0, - 0x2b8: 0xa0, 0x2b9: 0xa0, 0x2ba: 0xa0, 0x2bb: 0xa0, 0x2bc: 0xa0, 0x2bd: 0xa0, 0x2be: 0xa0, 0x2bf: 0xe3, + 0x2b8: 0xa0, 0x2b9: 0xa0, 0x2ba: 0xa0, 0x2bb: 0xa0, 0x2bc: 0xa0, 0x2bd: 0xa0, 0x2be: 0xa0, 0x2bf: 0xe5, // Block 0xb, offset 0x2c0 0x2c0: 0xa0, 0x2c1: 0xa0, 0x2c2: 0xa0, 0x2c3: 0xa0, 0x2c4: 0xa0, 0x2c5: 0xa0, 0x2c6: 0xa0, 0x2c7: 0xa0, 0x2c8: 0xa0, 0x2c9: 0xa0, 0x2ca: 0xa0, 0x2cb: 0xa0, 0x2cc: 0xa0, 0x2cd: 0xa0, 0x2ce: 0xa0, 0x2cf: 0xa0, - 0x2d0: 0xa0, 0x2d1: 0xa0, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0xa0, 0x2d5: 0xa0, 0x2d6: 0xa0, 0x2d7: 0xa0, - 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, - 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, - 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2d0: 0xa0, 0x2d1: 0xa0, 0x2d2: 0xe6, 0x2d3: 0xe7, 0x2d4: 0xa0, 0x2d5: 0xa0, 0x2d6: 0xa0, 0x2d7: 0xa0, + 0x2d8: 0xe8, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe9, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xea, + 0x2e0: 0xeb, 0x2e1: 0xec, 0x2e2: 0xed, 0x2e3: 0xee, 0x2e4: 0xef, 0x2e5: 0xf0, 0x2e6: 0xf1, 0x2e7: 0xf2, + 0x2e8: 0xf3, 0x2e9: 0xf4, 0x2ea: 0xf5, 0x2eb: 0xf6, 0x2ec: 0xf7, 0x2ed: 0xf8, 0x2ee: 0xf9, 0x2ef: 0xfa, 0x2f0: 0xa0, 0x2f1: 0xa0, 0x2f2: 0xa0, 0x2f3: 0xa0, 0x2f4: 0xa0, 0x2f5: 0xa0, 0x2f6: 0xa0, 0x2f7: 0xa0, 0x2f8: 0xa0, 0x2f9: 0xa0, 0x2fa: 0xa0, 0x2fb: 0xa0, 0x2fc: 0xa0, 0x2fd: 0xa0, 0x2fe: 0xa0, 0x2ff: 0xa0, // Block 0xc, offset 0x300 0x300: 0xa0, 0x301: 0xa0, 0x302: 0xa0, 0x303: 0xa0, 0x304: 0xa0, 0x305: 0xa0, 0x306: 0xa0, 0x307: 0xa0, 0x308: 0xa0, 0x309: 0xa0, 0x30a: 0xa0, 0x30b: 0xa0, 0x30c: 0xa0, 0x30d: 0xa0, 0x30e: 0xa0, 0x30f: 0xa0, 0x310: 0xa0, 0x311: 0xa0, 0x312: 0xa0, 0x313: 0xa0, 0x314: 0xa0, 0x315: 0xa0, 0x316: 0xa0, 0x317: 0xa0, - 0x318: 0xa0, 0x319: 0xa0, 0x31a: 0xa0, 0x31b: 0xa0, 0x31c: 0xa0, 0x31d: 0xa0, 0x31e: 0xf9, 0x31f: 0xfa, + 0x318: 0xa0, 0x319: 0xa0, 0x31a: 0xa0, 0x31b: 0xa0, 0x31c: 0xa0, 0x31d: 0xa0, 0x31e: 0xfb, 0x31f: 0xfc, // Block 0xd, offset 0x340 - 0x340: 0xfb, 0x341: 0xfb, 0x342: 0xfb, 0x343: 0xfb, 0x344: 0xfb, 0x345: 0xfb, 0x346: 0xfb, 0x347: 0xfb, - 0x348: 0xfb, 0x349: 0xfb, 0x34a: 0xfb, 0x34b: 0xfb, 0x34c: 0xfb, 0x34d: 0xfb, 0x34e: 0xfb, 0x34f: 0xfb, - 0x350: 0xfb, 0x351: 0xfb, 0x352: 0xfb, 0x353: 0xfb, 0x354: 0xfb, 0x355: 0xfb, 0x356: 0xfb, 0x357: 0xfb, - 0x358: 0xfb, 0x359: 0xfb, 0x35a: 0xfb, 0x35b: 0xfb, 0x35c: 0xfb, 0x35d: 0xfb, 0x35e: 0xfb, 0x35f: 0xfb, - 0x360: 0xfb, 0x361: 0xfb, 0x362: 0xfb, 0x363: 0xfb, 0x364: 0xfb, 0x365: 0xfb, 0x366: 0xfb, 0x367: 0xfb, - 0x368: 0xfb, 0x369: 0xfb, 0x36a: 0xfb, 0x36b: 0xfb, 0x36c: 0xfb, 0x36d: 0xfb, 0x36e: 0xfb, 0x36f: 0xfb, - 0x370: 0xfb, 0x371: 0xfb, 0x372: 0xfb, 0x373: 0xfb, 0x374: 0xfb, 0x375: 0xfb, 0x376: 0xfb, 0x377: 0xfb, - 0x378: 0xfb, 0x379: 0xfb, 0x37a: 0xfb, 0x37b: 0xfb, 0x37c: 0xfb, 0x37d: 0xfb, 0x37e: 0xfb, 0x37f: 0xfb, + 0x340: 0xfd, 0x341: 0xfd, 0x342: 0xfd, 0x343: 0xfd, 0x344: 0xfd, 0x345: 0xfd, 0x346: 0xfd, 0x347: 0xfd, + 0x348: 0xfd, 0x349: 0xfd, 0x34a: 0xfd, 0x34b: 0xfd, 0x34c: 0xfd, 0x34d: 0xfd, 0x34e: 0xfd, 0x34f: 0xfd, + 0x350: 0xfd, 0x351: 0xfd, 0x352: 0xfd, 0x353: 0xfd, 0x354: 0xfd, 0x355: 0xfd, 0x356: 0xfd, 0x357: 0xfd, + 0x358: 0xfd, 0x359: 0xfd, 0x35a: 0xfd, 0x35b: 0xfd, 0x35c: 0xfd, 0x35d: 0xfd, 0x35e: 0xfd, 0x35f: 0xfd, + 0x360: 0xfd, 0x361: 0xfd, 0x362: 0xfd, 0x363: 0xfd, 0x364: 0xfd, 0x365: 0xfd, 0x366: 0xfd, 0x367: 0xfd, + 0x368: 0xfd, 0x369: 0xfd, 0x36a: 0xfd, 0x36b: 0xfd, 0x36c: 0xfd, 0x36d: 0xfd, 0x36e: 0xfd, 0x36f: 0xfd, + 0x370: 0xfd, 0x371: 0xfd, 0x372: 0xfd, 0x373: 0xfd, 0x374: 0xfd, 0x375: 0xfd, 0x376: 0xfd, 0x377: 0xfd, + 0x378: 0xfd, 0x379: 0xfd, 0x37a: 0xfd, 0x37b: 0xfd, 0x37c: 0xfd, 0x37d: 0xfd, 0x37e: 0xfd, 0x37f: 0xfd, // Block 0xe, offset 0x380 - 0x380: 0xfb, 0x381: 0xfb, 0x382: 0xfb, 0x383: 0xfb, 0x384: 0xfb, 0x385: 0xfb, 0x386: 0xfb, 0x387: 0xfb, - 0x388: 0xfb, 0x389: 0xfb, 0x38a: 0xfb, 0x38b: 0xfb, 0x38c: 0xfb, 0x38d: 0xfb, 0x38e: 0xfb, 0x38f: 0xfb, - 0x390: 0xfb, 0x391: 0xfb, 0x392: 0xfb, 0x393: 0xfb, 0x394: 0xfb, 0x395: 0xfb, 0x396: 0xfb, 0x397: 0xfb, - 0x398: 0xfb, 0x399: 0xfb, 0x39a: 0xfb, 0x39b: 0xfb, 0x39c: 0xfb, 0x39d: 0xfb, 0x39e: 0xfb, 0x39f: 0xfb, - 0x3a0: 0xfb, 0x3a1: 0xfb, 0x3a2: 0xfb, 0x3a3: 0xfb, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, - 0x3a8: 0x47, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, - 0x3b0: 0x102, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x103, 0x3b7: 0x52, - 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + 0x380: 0xfd, 0x381: 0xfd, 0x382: 0xfd, 0x383: 0xfd, 0x384: 0xfd, 0x385: 0xfd, 0x386: 0xfd, 0x387: 0xfd, + 0x388: 0xfd, 0x389: 0xfd, 0x38a: 0xfd, 0x38b: 0xfd, 0x38c: 0xfd, 0x38d: 0xfd, 0x38e: 0xfd, 0x38f: 0xfd, + 0x390: 0xfd, 0x391: 0xfd, 0x392: 0xfd, 0x393: 0xfd, 0x394: 0xfd, 0x395: 0xfd, 0x396: 0xfd, 0x397: 0xfd, + 0x398: 0xfd, 0x399: 0xfd, 0x39a: 0xfd, 0x39b: 0xfd, 0x39c: 0xfd, 0x39d: 0xfd, 0x39e: 0xfd, 0x39f: 0xfd, + 0x3a0: 0xfd, 0x3a1: 0xfd, 0x3a2: 0xfd, 0x3a3: 0xfd, 0x3a4: 0xfe, 0x3a5: 0xff, 0x3a6: 0x100, 0x3a7: 0x101, + 0x3a8: 0x45, 0x3a9: 0x102, 0x3aa: 0x103, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, + 0x3b0: 0x104, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x105, 0x3b7: 0x50, + 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, // Block 0xf, offset 0x3c0 - 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0xa0, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9c, 0x3c6: 0x108, 0x3c7: 0x109, - 0x3c8: 0xfb, 0x3c9: 0xfb, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, - 0x3d0: 0x110, 0x3d1: 0xa0, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xfb, 0x3d7: 0xfb, - 0x3d8: 0xa0, 0x3d9: 0xa0, 0x3da: 0xa0, 0x3db: 0xa0, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xfb, 0x3df: 0xfb, - 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xfb, 0x3e6: 0x11c, 0x3e7: 0x11d, - 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5b, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5c, 0x3ef: 0xfb, - 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0x127, 0x3f5: 0xfb, 0x3f6: 0xfb, 0x3f7: 0xfb, - 0x3f8: 0xfb, 0x3f9: 0x128, 0x3fa: 0x129, 0x3fb: 0xfb, 0x3fc: 0x12a, 0x3fd: 0x12b, 0x3fe: 0x12c, 0x3ff: 0x12d, + 0x3c0: 0x106, 0x3c1: 0x107, 0x3c2: 0xa0, 0x3c3: 0x108, 0x3c4: 0x109, 0x3c5: 0x9c, 0x3c6: 0x10a, 0x3c7: 0x10b, + 0x3c8: 0xfd, 0x3c9: 0xfd, 0x3ca: 0x10c, 0x3cb: 0x10d, 0x3cc: 0x10e, 0x3cd: 0x10f, 0x3ce: 0x110, 0x3cf: 0x111, + 0x3d0: 0x112, 0x3d1: 0xa0, 0x3d2: 0x113, 0x3d3: 0x114, 0x3d4: 0x115, 0x3d5: 0x116, 0x3d6: 0xfd, 0x3d7: 0xfd, + 0x3d8: 0xa0, 0x3d9: 0xa0, 0x3da: 0xa0, 0x3db: 0xa0, 0x3dc: 0x117, 0x3dd: 0x118, 0x3de: 0xfd, 0x3df: 0xfd, + 0x3e0: 0x119, 0x3e1: 0x11a, 0x3e2: 0x11b, 0x3e3: 0x11c, 0x3e4: 0x11d, 0x3e5: 0xfd, 0x3e6: 0x11e, 0x3e7: 0x11f, + 0x3e8: 0x120, 0x3e9: 0x121, 0x3ea: 0x122, 0x3eb: 0x59, 0x3ec: 0x123, 0x3ed: 0x124, 0x3ee: 0x5a, 0x3ef: 0xfd, + 0x3f0: 0x125, 0x3f1: 0x126, 0x3f2: 0x127, 0x3f3: 0x128, 0x3f4: 0x129, 0x3f5: 0xfd, 0x3f6: 0xfd, 0x3f7: 0xfd, + 0x3f8: 0xfd, 0x3f9: 0x12a, 0x3fa: 0x12b, 0x3fb: 0xfd, 0x3fc: 0x12c, 0x3fd: 0x12d, 0x3fe: 0x12e, 0x3ff: 0x12f, // Block 0x10, offset 0x400 - 0x400: 0x12e, 0x401: 0x12f, 0x402: 0x130, 0x403: 0x131, 0x404: 0x132, 0x405: 0x133, 0x406: 0x134, 0x407: 0x135, - 0x408: 0x136, 0x409: 0xfb, 0x40a: 0x137, 0x40b: 0x138, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xfb, 0x40f: 0xfb, - 0x410: 0x139, 0x411: 0x13a, 0x412: 0x13b, 0x413: 0x13c, 0x414: 0xfb, 0x415: 0xfb, 0x416: 0x13d, 0x417: 0x13e, - 0x418: 0x13f, 0x419: 0x140, 0x41a: 0x141, 0x41b: 0x142, 0x41c: 0x143, 0x41d: 0xfb, 0x41e: 0xfb, 0x41f: 0xfb, - 0x420: 0x144, 0x421: 0xfb, 0x422: 0x145, 0x423: 0x146, 0x424: 0x5f, 0x425: 0x147, 0x426: 0x148, 0x427: 0x149, - 0x428: 0x14a, 0x429: 0x14b, 0x42a: 0x14c, 0x42b: 0x14d, 0x42c: 0xfb, 0x42d: 0xfb, 0x42e: 0xfb, 0x42f: 0xfb, - 0x430: 0x14e, 0x431: 0x14f, 0x432: 0x150, 0x433: 0xfb, 0x434: 0x151, 0x435: 0x152, 0x436: 0x153, 0x437: 0xfb, - 0x438: 0xfb, 0x439: 0xfb, 0x43a: 0xfb, 0x43b: 0x154, 0x43c: 0xfb, 0x43d: 0xfb, 0x43e: 0x155, 0x43f: 0x156, + 0x400: 0x130, 0x401: 0x131, 0x402: 0x132, 0x403: 0x133, 0x404: 0x134, 0x405: 0x135, 0x406: 0x136, 0x407: 0x137, + 0x408: 0x138, 0x409: 0xfd, 0x40a: 0x139, 0x40b: 0x13a, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xfd, 0x40f: 0xfd, + 0x410: 0x13b, 0x411: 0x13c, 0x412: 0x13d, 0x413: 0x13e, 0x414: 0xfd, 0x415: 0xfd, 0x416: 0x13f, 0x417: 0x140, + 0x418: 0x141, 0x419: 0x142, 0x41a: 0x143, 0x41b: 0x144, 0x41c: 0x145, 0x41d: 0xfd, 0x41e: 0xfd, 0x41f: 0xfd, + 0x420: 0x146, 0x421: 0xfd, 0x422: 0x147, 0x423: 0x148, 0x424: 0x5d, 0x425: 0x149, 0x426: 0x14a, 0x427: 0x14b, + 0x428: 0x14c, 0x429: 0x14d, 0x42a: 0x14e, 0x42b: 0x14f, 0x42c: 0xfd, 0x42d: 0xfd, 0x42e: 0xfd, 0x42f: 0xfd, + 0x430: 0x150, 0x431: 0x151, 0x432: 0x152, 0x433: 0xfd, 0x434: 0x153, 0x435: 0x154, 0x436: 0x155, 0x437: 0xfd, + 0x438: 0xfd, 0x439: 0xfd, 0x43a: 0xfd, 0x43b: 0x156, 0x43c: 0xfd, 0x43d: 0xfd, 0x43e: 0x157, 0x43f: 0x158, // Block 0x11, offset 0x440 0x440: 0xa0, 0x441: 0xa0, 0x442: 0xa0, 0x443: 0xa0, 0x444: 0xa0, 0x445: 0xa0, 0x446: 0xa0, 0x447: 0xa0, - 0x448: 0xa0, 0x449: 0xa0, 0x44a: 0xa0, 0x44b: 0xa0, 0x44c: 0xa0, 0x44d: 0xa0, 0x44e: 0x157, 0x44f: 0xfb, - 0x450: 0x9c, 0x451: 0x158, 0x452: 0xa0, 0x453: 0xa0, 0x454: 0xa0, 0x455: 0x159, 0x456: 0xfb, 0x457: 0xfb, - 0x458: 0xfb, 0x459: 0xfb, 0x45a: 0xfb, 0x45b: 0xfb, 0x45c: 0xfb, 0x45d: 0xfb, 0x45e: 0xfb, 0x45f: 0xfb, - 0x460: 0xfb, 0x461: 0xfb, 0x462: 0xfb, 0x463: 0xfb, 0x464: 0xfb, 0x465: 0xfb, 0x466: 0xfb, 0x467: 0xfb, - 0x468: 0xfb, 0x469: 0xfb, 0x46a: 0xfb, 0x46b: 0xfb, 0x46c: 0xfb, 0x46d: 0xfb, 0x46e: 0xfb, 0x46f: 0xfb, - 0x470: 0xfb, 0x471: 0xfb, 0x472: 0xfb, 0x473: 0xfb, 0x474: 0xfb, 0x475: 0xfb, 0x476: 0xfb, 0x477: 0xfb, - 0x478: 0xfb, 0x479: 0xfb, 0x47a: 0xfb, 0x47b: 0xfb, 0x47c: 0xfb, 0x47d: 0xfb, 0x47e: 0xfb, 0x47f: 0xfb, + 0x448: 0xa0, 0x449: 0xa0, 0x44a: 0xa0, 0x44b: 0xa0, 0x44c: 0xa0, 0x44d: 0xa0, 0x44e: 0x159, 0x44f: 0xfd, + 0x450: 0x9c, 0x451: 0x15a, 0x452: 0xa0, 0x453: 0xa0, 0x454: 0xa0, 0x455: 0x15b, 0x456: 0xfd, 0x457: 0xfd, + 0x458: 0xfd, 0x459: 0xfd, 0x45a: 0xfd, 0x45b: 0xfd, 0x45c: 0xfd, 0x45d: 0xfd, 0x45e: 0xfd, 0x45f: 0xfd, + 0x460: 0xfd, 0x461: 0xfd, 0x462: 0xfd, 0x463: 0xfd, 0x464: 0xfd, 0x465: 0xfd, 0x466: 0xfd, 0x467: 0xfd, + 0x468: 0xfd, 0x469: 0xfd, 0x46a: 0xfd, 0x46b: 0xfd, 0x46c: 0xfd, 0x46d: 0xfd, 0x46e: 0xfd, 0x46f: 0xfd, + 0x470: 0xfd, 0x471: 0xfd, 0x472: 0xfd, 0x473: 0xfd, 0x474: 0xfd, 0x475: 0xfd, 0x476: 0xfd, 0x477: 0xfd, + 0x478: 0xfd, 0x479: 0xfd, 0x47a: 0xfd, 0x47b: 0xfd, 0x47c: 0xfd, 0x47d: 0xfd, 0x47e: 0xfd, 0x47f: 0xfd, // Block 0x12, offset 0x480 0x480: 0xa0, 0x481: 0xa0, 0x482: 0xa0, 0x483: 0xa0, 0x484: 0xa0, 0x485: 0xa0, 0x486: 0xa0, 0x487: 0xa0, 0x488: 0xa0, 0x489: 0xa0, 0x48a: 0xa0, 0x48b: 0xa0, 0x48c: 0xa0, 0x48d: 0xa0, 0x48e: 0xa0, 0x48f: 0xa0, - 0x490: 0x15a, 0x491: 0xfb, 0x492: 0xfb, 0x493: 0xfb, 0x494: 0xfb, 0x495: 0xfb, 0x496: 0xfb, 0x497: 0xfb, - 0x498: 0xfb, 0x499: 0xfb, 0x49a: 0xfb, 0x49b: 0xfb, 0x49c: 0xfb, 0x49d: 0xfb, 0x49e: 0xfb, 0x49f: 0xfb, - 0x4a0: 0xfb, 0x4a1: 0xfb, 0x4a2: 0xfb, 0x4a3: 0xfb, 0x4a4: 0xfb, 0x4a5: 0xfb, 0x4a6: 0xfb, 0x4a7: 0xfb, - 0x4a8: 0xfb, 0x4a9: 0xfb, 0x4aa: 0xfb, 0x4ab: 0xfb, 0x4ac: 0xfb, 0x4ad: 0xfb, 0x4ae: 0xfb, 0x4af: 0xfb, - 0x4b0: 0xfb, 0x4b1: 0xfb, 0x4b2: 0xfb, 0x4b3: 0xfb, 0x4b4: 0xfb, 0x4b5: 0xfb, 0x4b6: 0xfb, 0x4b7: 0xfb, - 0x4b8: 0xfb, 0x4b9: 0xfb, 0x4ba: 0xfb, 0x4bb: 0xfb, 0x4bc: 0xfb, 0x4bd: 0xfb, 0x4be: 0xfb, 0x4bf: 0xfb, + 0x490: 0x15c, 0x491: 0xfd, 0x492: 0xfd, 0x493: 0xfd, 0x494: 0xfd, 0x495: 0xfd, 0x496: 0xfd, 0x497: 0xfd, + 0x498: 0xfd, 0x499: 0xfd, 0x49a: 0xfd, 0x49b: 0xfd, 0x49c: 0xfd, 0x49d: 0xfd, 0x49e: 0xfd, 0x49f: 0xfd, + 0x4a0: 0xfd, 0x4a1: 0xfd, 0x4a2: 0xfd, 0x4a3: 0xfd, 0x4a4: 0xfd, 0x4a5: 0xfd, 0x4a6: 0xfd, 0x4a7: 0xfd, + 0x4a8: 0xfd, 0x4a9: 0xfd, 0x4aa: 0xfd, 0x4ab: 0xfd, 0x4ac: 0xfd, 0x4ad: 0xfd, 0x4ae: 0xfd, 0x4af: 0xfd, + 0x4b0: 0xfd, 0x4b1: 0xfd, 0x4b2: 0xfd, 0x4b3: 0xfd, 0x4b4: 0xfd, 0x4b5: 0xfd, 0x4b6: 0xfd, 0x4b7: 0xfd, + 0x4b8: 0xfd, 0x4b9: 0xfd, 0x4ba: 0xfd, 0x4bb: 0xfd, 0x4bc: 0xfd, 0x4bd: 0xfd, 0x4be: 0xfd, 0x4bf: 0xfd, // Block 0x13, offset 0x4c0 - 0x4c0: 0xfb, 0x4c1: 0xfb, 0x4c2: 0xfb, 0x4c3: 0xfb, 0x4c4: 0xfb, 0x4c5: 0xfb, 0x4c6: 0xfb, 0x4c7: 0xfb, - 0x4c8: 0xfb, 0x4c9: 0xfb, 0x4ca: 0xfb, 0x4cb: 0xfb, 0x4cc: 0xfb, 0x4cd: 0xfb, 0x4ce: 0xfb, 0x4cf: 0xfb, + 0x4c0: 0xfd, 0x4c1: 0xfd, 0x4c2: 0xfd, 0x4c3: 0xfd, 0x4c4: 0xfd, 0x4c5: 0xfd, 0x4c6: 0xfd, 0x4c7: 0xfd, + 0x4c8: 0xfd, 0x4c9: 0xfd, 0x4ca: 0xfd, 0x4cb: 0xfd, 0x4cc: 0xfd, 0x4cd: 0xfd, 0x4ce: 0xfd, 0x4cf: 0xfd, 0x4d0: 0xa0, 0x4d1: 0xa0, 0x4d2: 0xa0, 0x4d3: 0xa0, 0x4d4: 0xa0, 0x4d5: 0xa0, 0x4d6: 0xa0, 0x4d7: 0xa0, - 0x4d8: 0xa0, 0x4d9: 0x15b, 0x4da: 0xfb, 0x4db: 0xfb, 0x4dc: 0xfb, 0x4dd: 0xfb, 0x4de: 0xfb, 0x4df: 0xfb, - 0x4e0: 0xfb, 0x4e1: 0xfb, 0x4e2: 0xfb, 0x4e3: 0xfb, 0x4e4: 0xfb, 0x4e5: 0xfb, 0x4e6: 0xfb, 0x4e7: 0xfb, - 0x4e8: 0xfb, 0x4e9: 0xfb, 0x4ea: 0xfb, 0x4eb: 0xfb, 0x4ec: 0xfb, 0x4ed: 0xfb, 0x4ee: 0xfb, 0x4ef: 0xfb, - 0x4f0: 0xfb, 0x4f1: 0xfb, 0x4f2: 0xfb, 0x4f3: 0xfb, 0x4f4: 0xfb, 0x4f5: 0xfb, 0x4f6: 0xfb, 0x4f7: 0xfb, - 0x4f8: 0xfb, 0x4f9: 0xfb, 0x4fa: 0xfb, 0x4fb: 0xfb, 0x4fc: 0xfb, 0x4fd: 0xfb, 0x4fe: 0xfb, 0x4ff: 0xfb, + 0x4d8: 0xa0, 0x4d9: 0x15d, 0x4da: 0xfd, 0x4db: 0xfd, 0x4dc: 0xfd, 0x4dd: 0xfd, 0x4de: 0xfd, 0x4df: 0xfd, + 0x4e0: 0xfd, 0x4e1: 0xfd, 0x4e2: 0xfd, 0x4e3: 0xfd, 0x4e4: 0xfd, 0x4e5: 0xfd, 0x4e6: 0xfd, 0x4e7: 0xfd, + 0x4e8: 0xfd, 0x4e9: 0xfd, 0x4ea: 0xfd, 0x4eb: 0xfd, 0x4ec: 0xfd, 0x4ed: 0xfd, 0x4ee: 0xfd, 0x4ef: 0xfd, + 0x4f0: 0xfd, 0x4f1: 0xfd, 0x4f2: 0xfd, 0x4f3: 0xfd, 0x4f4: 0xfd, 0x4f5: 0xfd, 0x4f6: 0xfd, 0x4f7: 0xfd, + 0x4f8: 0xfd, 0x4f9: 0xfd, 0x4fa: 0xfd, 0x4fb: 0xfd, 0x4fc: 0xfd, 0x4fd: 0xfd, 0x4fe: 0xfd, 0x4ff: 0xfd, // Block 0x14, offset 0x500 - 0x500: 0xfb, 0x501: 0xfb, 0x502: 0xfb, 0x503: 0xfb, 0x504: 0xfb, 0x505: 0xfb, 0x506: 0xfb, 0x507: 0xfb, - 0x508: 0xfb, 0x509: 0xfb, 0x50a: 0xfb, 0x50b: 0xfb, 0x50c: 0xfb, 0x50d: 0xfb, 0x50e: 0xfb, 0x50f: 0xfb, - 0x510: 0xfb, 0x511: 0xfb, 0x512: 0xfb, 0x513: 0xfb, 0x514: 0xfb, 0x515: 0xfb, 0x516: 0xfb, 0x517: 0xfb, - 0x518: 0xfb, 0x519: 0xfb, 0x51a: 0xfb, 0x51b: 0xfb, 0x51c: 0xfb, 0x51d: 0xfb, 0x51e: 0xfb, 0x51f: 0xfb, + 0x500: 0xfd, 0x501: 0xfd, 0x502: 0xfd, 0x503: 0xfd, 0x504: 0xfd, 0x505: 0xfd, 0x506: 0xfd, 0x507: 0xfd, + 0x508: 0xfd, 0x509: 0xfd, 0x50a: 0xfd, 0x50b: 0xfd, 0x50c: 0xfd, 0x50d: 0xfd, 0x50e: 0xfd, 0x50f: 0xfd, + 0x510: 0xfd, 0x511: 0xfd, 0x512: 0xfd, 0x513: 0xfd, 0x514: 0xfd, 0x515: 0xfd, 0x516: 0xfd, 0x517: 0xfd, + 0x518: 0xfd, 0x519: 0xfd, 0x51a: 0xfd, 0x51b: 0xfd, 0x51c: 0xfd, 0x51d: 0xfd, 0x51e: 0xfd, 0x51f: 0xfd, 0x520: 0xa0, 0x521: 0xa0, 0x522: 0xa0, 0x523: 0xa0, 0x524: 0xa0, 0x525: 0xa0, 0x526: 0xa0, 0x527: 0xa0, - 0x528: 0x14d, 0x529: 0x15c, 0x52a: 0xfb, 0x52b: 0x15d, 0x52c: 0x15e, 0x52d: 0x15f, 0x52e: 0x160, 0x52f: 0xfb, - 0x530: 0xfb, 0x531: 0xfb, 0x532: 0xfb, 0x533: 0xfb, 0x534: 0xfb, 0x535: 0xfb, 0x536: 0xfb, 0x537: 0xfb, - 0x538: 0xfb, 0x539: 0x161, 0x53a: 0x162, 0x53b: 0xfb, 0x53c: 0xa0, 0x53d: 0x163, 0x53e: 0x164, 0x53f: 0x165, + 0x528: 0x14f, 0x529: 0x15e, 0x52a: 0xfd, 0x52b: 0x15f, 0x52c: 0x160, 0x52d: 0x161, 0x52e: 0x162, 0x52f: 0xfd, + 0x530: 0xfd, 0x531: 0xfd, 0x532: 0xfd, 0x533: 0xfd, 0x534: 0xfd, 0x535: 0xfd, 0x536: 0xfd, 0x537: 0xfd, + 0x538: 0xfd, 0x539: 0x163, 0x53a: 0x164, 0x53b: 0xfd, 0x53c: 0xa0, 0x53d: 0x165, 0x53e: 0x166, 0x53f: 0x167, // Block 0x15, offset 0x540 0x540: 0xa0, 0x541: 0xa0, 0x542: 0xa0, 0x543: 0xa0, 0x544: 0xa0, 0x545: 0xa0, 0x546: 0xa0, 0x547: 0xa0, 0x548: 0xa0, 0x549: 0xa0, 0x54a: 0xa0, 0x54b: 0xa0, 0x54c: 0xa0, 0x54d: 0xa0, 0x54e: 0xa0, 0x54f: 0xa0, 0x550: 0xa0, 0x551: 0xa0, 0x552: 0xa0, 0x553: 0xa0, 0x554: 0xa0, 0x555: 0xa0, 0x556: 0xa0, 0x557: 0xa0, - 0x558: 0xa0, 0x559: 0xa0, 0x55a: 0xa0, 0x55b: 0xa0, 0x55c: 0xa0, 0x55d: 0xa0, 0x55e: 0xa0, 0x55f: 0x166, + 0x558: 0xa0, 0x559: 0xa0, 0x55a: 0xa0, 0x55b: 0xa0, 0x55c: 0xa0, 0x55d: 0xa0, 0x55e: 0xa0, 0x55f: 0x168, 0x560: 0xa0, 0x561: 0xa0, 0x562: 0xa0, 0x563: 0xa0, 0x564: 0xa0, 0x565: 0xa0, 0x566: 0xa0, 0x567: 0xa0, 0x568: 0xa0, 0x569: 0xa0, 0x56a: 0xa0, 0x56b: 0xa0, 0x56c: 0xa0, 0x56d: 0xa0, 0x56e: 0xa0, 0x56f: 0xa0, - 0x570: 0xa0, 0x571: 0xa0, 0x572: 0xa0, 0x573: 0x167, 0x574: 0x168, 0x575: 0xfb, 0x576: 0xfb, 0x577: 0xfb, - 0x578: 0xfb, 0x579: 0xfb, 0x57a: 0xfb, 0x57b: 0xfb, 0x57c: 0xfb, 0x57d: 0xfb, 0x57e: 0xfb, 0x57f: 0xfb, + 0x570: 0xa0, 0x571: 0xa0, 0x572: 0xa0, 0x573: 0x169, 0x574: 0x16a, 0x575: 0xfd, 0x576: 0xfd, 0x577: 0xfd, + 0x578: 0xfd, 0x579: 0xfd, 0x57a: 0xfd, 0x57b: 0xfd, 0x57c: 0xfd, 0x57d: 0xfd, 0x57e: 0xfd, 0x57f: 0xfd, // Block 0x16, offset 0x580 - 0x580: 0xa0, 0x581: 0xa0, 0x582: 0xa0, 0x583: 0xa0, 0x584: 0x169, 0x585: 0x16a, 0x586: 0xa0, 0x587: 0xa0, - 0x588: 0xa0, 0x589: 0xa0, 0x58a: 0xa0, 0x58b: 0x16b, 0x58c: 0xfb, 0x58d: 0xfb, 0x58e: 0xfb, 0x58f: 0xfb, - 0x590: 0xfb, 0x591: 0xfb, 0x592: 0xfb, 0x593: 0xfb, 0x594: 0xfb, 0x595: 0xfb, 0x596: 0xfb, 0x597: 0xfb, - 0x598: 0xfb, 0x599: 0xfb, 0x59a: 0xfb, 0x59b: 0xfb, 0x59c: 0xfb, 0x59d: 0xfb, 0x59e: 0xfb, 0x59f: 0xfb, - 0x5a0: 0xfb, 0x5a1: 0xfb, 0x5a2: 0xfb, 0x5a3: 0xfb, 0x5a4: 0xfb, 0x5a5: 0xfb, 0x5a6: 0xfb, 0x5a7: 0xfb, - 0x5a8: 0xfb, 0x5a9: 0xfb, 0x5aa: 0xfb, 0x5ab: 0xfb, 0x5ac: 0xfb, 0x5ad: 0xfb, 0x5ae: 0xfb, 0x5af: 0xfb, - 0x5b0: 0xa0, 0x5b1: 0x16c, 0x5b2: 0x16d, 0x5b3: 0xfb, 0x5b4: 0xfb, 0x5b5: 0xfb, 0x5b6: 0xfb, 0x5b7: 0xfb, - 0x5b8: 0xfb, 0x5b9: 0xfb, 0x5ba: 0xfb, 0x5bb: 0xfb, 0x5bc: 0xfb, 0x5bd: 0xfb, 0x5be: 0xfb, 0x5bf: 0xfb, + 0x580: 0xa0, 0x581: 0xa0, 0x582: 0xa0, 0x583: 0xa0, 0x584: 0x16b, 0x585: 0x16c, 0x586: 0xa0, 0x587: 0xa0, + 0x588: 0xa0, 0x589: 0xa0, 0x58a: 0xa0, 0x58b: 0x16d, 0x58c: 0xfd, 0x58d: 0xfd, 0x58e: 0xfd, 0x58f: 0xfd, + 0x590: 0xfd, 0x591: 0xfd, 0x592: 0xfd, 0x593: 0xfd, 0x594: 0xfd, 0x595: 0xfd, 0x596: 0xfd, 0x597: 0xfd, + 0x598: 0xfd, 0x599: 0xfd, 0x59a: 0xfd, 0x59b: 0xfd, 0x59c: 0xfd, 0x59d: 0xfd, 0x59e: 0xfd, 0x59f: 0xfd, + 0x5a0: 0xfd, 0x5a1: 0xfd, 0x5a2: 0xfd, 0x5a3: 0xfd, 0x5a4: 0xfd, 0x5a5: 0xfd, 0x5a6: 0xfd, 0x5a7: 0xfd, + 0x5a8: 0xfd, 0x5a9: 0xfd, 0x5aa: 0xfd, 0x5ab: 0xfd, 0x5ac: 0xfd, 0x5ad: 0xfd, 0x5ae: 0xfd, 0x5af: 0xfd, + 0x5b0: 0xa0, 0x5b1: 0x16e, 0x5b2: 0x16f, 0x5b3: 0xfd, 0x5b4: 0xfd, 0x5b5: 0xfd, 0x5b6: 0xfd, 0x5b7: 0xfd, + 0x5b8: 0xfd, 0x5b9: 0xfd, 0x5ba: 0xfd, 0x5bb: 0xfd, 0x5bc: 0xfd, 0x5bd: 0xfd, 0x5be: 0xfd, 0x5bf: 0xfd, // Block 0x17, offset 0x5c0 - 0x5c0: 0x9c, 0x5c1: 0x9c, 0x5c2: 0x9c, 0x5c3: 0x16e, 0x5c4: 0x16f, 0x5c5: 0x170, 0x5c6: 0x171, 0x5c7: 0x172, - 0x5c8: 0x9c, 0x5c9: 0x173, 0x5ca: 0xfb, 0x5cb: 0x174, 0x5cc: 0x9c, 0x5cd: 0x175, 0x5ce: 0xfb, 0x5cf: 0xfb, - 0x5d0: 0x60, 0x5d1: 0x61, 0x5d2: 0x62, 0x5d3: 0x63, 0x5d4: 0x64, 0x5d5: 0x65, 0x5d6: 0x66, 0x5d7: 0x67, - 0x5d8: 0x68, 0x5d9: 0x69, 0x5da: 0x6a, 0x5db: 0x6b, 0x5dc: 0x6c, 0x5dd: 0x6d, 0x5de: 0x6e, 0x5df: 0x6f, + 0x5c0: 0x9c, 0x5c1: 0x9c, 0x5c2: 0x9c, 0x5c3: 0x170, 0x5c4: 0x171, 0x5c5: 0x172, 0x5c6: 0x173, 0x5c7: 0x174, + 0x5c8: 0x9c, 0x5c9: 0x175, 0x5ca: 0xfd, 0x5cb: 0x176, 0x5cc: 0x9c, 0x5cd: 0x177, 0x5ce: 0xfd, 0x5cf: 0xfd, + 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65, + 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d, 0x5e0: 0x9c, 0x5e1: 0x9c, 0x5e2: 0x9c, 0x5e3: 0x9c, 0x5e4: 0x9c, 0x5e5: 0x9c, 0x5e6: 0x9c, 0x5e7: 0x9c, - 0x5e8: 0x176, 0x5e9: 0x177, 0x5ea: 0x178, 0x5eb: 0xfb, 0x5ec: 0xfb, 0x5ed: 0xfb, 0x5ee: 0xfb, 0x5ef: 0xfb, - 0x5f0: 0xfb, 0x5f1: 0xfb, 0x5f2: 0xfb, 0x5f3: 0xfb, 0x5f4: 0xfb, 0x5f5: 0xfb, 0x5f6: 0xfb, 0x5f7: 0xfb, - 0x5f8: 0xfb, 0x5f9: 0xfb, 0x5fa: 0xfb, 0x5fb: 0xfb, 0x5fc: 0xfb, 0x5fd: 0xfb, 0x5fe: 0xfb, 0x5ff: 0xfb, + 0x5e8: 0x178, 0x5e9: 0x179, 0x5ea: 0x17a, 0x5eb: 0xfd, 0x5ec: 0xfd, 0x5ed: 0xfd, 0x5ee: 0xfd, 0x5ef: 0xfd, + 0x5f0: 0xfd, 0x5f1: 0xfd, 0x5f2: 0xfd, 0x5f3: 0xfd, 0x5f4: 0xfd, 0x5f5: 0xfd, 0x5f6: 0xfd, 0x5f7: 0xfd, + 0x5f8: 0xfd, 0x5f9: 0xfd, 0x5fa: 0xfd, 0x5fb: 0xfd, 0x5fc: 0xfd, 0x5fd: 0xfd, 0x5fe: 0xfd, 0x5ff: 0xfd, // Block 0x18, offset 0x600 - 0x600: 0x179, 0x601: 0xfb, 0x602: 0xfb, 0x603: 0xfb, 0x604: 0x17a, 0x605: 0x17b, 0x606: 0xfb, 0x607: 0xfb, - 0x608: 0xfb, 0x609: 0xfb, 0x60a: 0xfb, 0x60b: 0x17c, 0x60c: 0xfb, 0x60d: 0xfb, 0x60e: 0xfb, 0x60f: 0xfb, - 0x610: 0xfb, 0x611: 0xfb, 0x612: 0xfb, 0x613: 0xfb, 0x614: 0xfb, 0x615: 0xfb, 0x616: 0xfb, 0x617: 0xfb, - 0x618: 0xfb, 0x619: 0xfb, 0x61a: 0xfb, 0x61b: 0xfb, 0x61c: 0xfb, 0x61d: 0xfb, 0x61e: 0xfb, 0x61f: 0xfb, - 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x17d, 0x624: 0x70, 0x625: 0x17e, 0x626: 0xfb, 0x627: 0xfb, - 0x628: 0xfb, 0x629: 0xfb, 0x62a: 0xfb, 0x62b: 0xfb, 0x62c: 0xfb, 0x62d: 0xfb, 0x62e: 0xfb, 0x62f: 0xfb, - 0x630: 0xfb, 0x631: 0x17f, 0x632: 0x180, 0x633: 0xfb, 0x634: 0x181, 0x635: 0xfb, 0x636: 0xfb, 0x637: 0xfb, - 0x638: 0x71, 0x639: 0x72, 0x63a: 0x73, 0x63b: 0x182, 0x63c: 0xfb, 0x63d: 0xfb, 0x63e: 0xfb, 0x63f: 0xfb, + 0x600: 0x17b, 0x601: 0xfd, 0x602: 0xfd, 0x603: 0xfd, 0x604: 0x17c, 0x605: 0x17d, 0x606: 0xfd, 0x607: 0xfd, + 0x608: 0xfd, 0x609: 0xfd, 0x60a: 0xfd, 0x60b: 0x17e, 0x60c: 0xfd, 0x60d: 0xfd, 0x60e: 0xfd, 0x60f: 0xfd, + 0x610: 0xfd, 0x611: 0xfd, 0x612: 0xfd, 0x613: 0xfd, 0x614: 0xfd, 0x615: 0xfd, 0x616: 0xfd, 0x617: 0xfd, + 0x618: 0xfd, 0x619: 0xfd, 0x61a: 0xfd, 0x61b: 0xfd, 0x61c: 0xfd, 0x61d: 0xfd, 0x61e: 0xfd, 0x61f: 0xfd, + 0x620: 0x125, 0x621: 0x125, 0x622: 0x125, 0x623: 0x17f, 0x624: 0x6e, 0x625: 0x180, 0x626: 0xfd, 0x627: 0xfd, + 0x628: 0xfd, 0x629: 0xfd, 0x62a: 0xfd, 0x62b: 0xfd, 0x62c: 0xfd, 0x62d: 0xfd, 0x62e: 0xfd, 0x62f: 0xfd, + 0x630: 0xfd, 0x631: 0x181, 0x632: 0x182, 0x633: 0xfd, 0x634: 0x183, 0x635: 0xfd, 0x636: 0xfd, 0x637: 0xfd, + 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x184, 0x63c: 0xfd, 0x63d: 0xfd, 0x63e: 0xfd, 0x63f: 0xfd, // Block 0x19, offset 0x640 - 0x640: 0x183, 0x641: 0x9c, 0x642: 0x184, 0x643: 0x185, 0x644: 0x74, 0x645: 0x75, 0x646: 0x186, 0x647: 0x187, - 0x648: 0x76, 0x649: 0x188, 0x64a: 0xfb, 0x64b: 0xfb, 0x64c: 0x9c, 0x64d: 0x9c, 0x64e: 0x9c, 0x64f: 0x9c, + 0x640: 0x185, 0x641: 0x9c, 0x642: 0x186, 0x643: 0x187, 0x644: 0x72, 0x645: 0x73, 0x646: 0x188, 0x647: 0x189, + 0x648: 0x74, 0x649: 0x18a, 0x64a: 0xfd, 0x64b: 0xfd, 0x64c: 0x9c, 0x64d: 0x9c, 0x64e: 0x9c, 0x64f: 0x9c, 0x650: 0x9c, 0x651: 0x9c, 0x652: 0x9c, 0x653: 0x9c, 0x654: 0x9c, 0x655: 0x9c, 0x656: 0x9c, 0x657: 0x9c, - 0x658: 0x9c, 0x659: 0x9c, 0x65a: 0x9c, 0x65b: 0x189, 0x65c: 0x9c, 0x65d: 0x18a, 0x65e: 0x9c, 0x65f: 0x18b, - 0x660: 0x18c, 0x661: 0x18d, 0x662: 0x18e, 0x663: 0xfb, 0x664: 0x9c, 0x665: 0x18f, 0x666: 0x9c, 0x667: 0x190, - 0x668: 0x9c, 0x669: 0x191, 0x66a: 0x192, 0x66b: 0x193, 0x66c: 0x9c, 0x66d: 0x9c, 0x66e: 0x194, 0x66f: 0x195, - 0x670: 0xfb, 0x671: 0xfb, 0x672: 0xfb, 0x673: 0xfb, 0x674: 0xfb, 0x675: 0xfb, 0x676: 0xfb, 0x677: 0xfb, - 0x678: 0xfb, 0x679: 0xfb, 0x67a: 0xfb, 0x67b: 0xfb, 0x67c: 0xfb, 0x67d: 0xfb, 0x67e: 0xfb, 0x67f: 0xfb, + 0x658: 0x9c, 0x659: 0x9c, 0x65a: 0x9c, 0x65b: 0x18b, 0x65c: 0x9c, 0x65d: 0x18c, 0x65e: 0x9c, 0x65f: 0x18d, + 0x660: 0x18e, 0x661: 0x18f, 0x662: 0x190, 0x663: 0xfd, 0x664: 0x9c, 0x665: 0x191, 0x666: 0x9c, 0x667: 0x192, + 0x668: 0x9c, 0x669: 0x193, 0x66a: 0x194, 0x66b: 0x195, 0x66c: 0x9c, 0x66d: 0x9c, 0x66e: 0x196, 0x66f: 0x197, + 0x670: 0xfd, 0x671: 0xfd, 0x672: 0xfd, 0x673: 0xfd, 0x674: 0xfd, 0x675: 0xfd, 0x676: 0xfd, 0x677: 0xfd, + 0x678: 0xfd, 0x679: 0xfd, 0x67a: 0xfd, 0x67b: 0xfd, 0x67c: 0xfd, 0x67d: 0xfd, 0x67e: 0xfd, 0x67f: 0xfd, // Block 0x1a, offset 0x680 0x680: 0xa0, 0x681: 0xa0, 0x682: 0xa0, 0x683: 0xa0, 0x684: 0xa0, 0x685: 0xa0, 0x686: 0xa0, 0x687: 0xa0, 0x688: 0xa0, 0x689: 0xa0, 0x68a: 0xa0, 0x68b: 0xa0, 0x68c: 0xa0, 0x68d: 0xa0, 0x68e: 0xa0, 0x68f: 0xa0, 0x690: 0xa0, 0x691: 0xa0, 0x692: 0xa0, 0x693: 0xa0, 0x694: 0xa0, 0x695: 0xa0, 0x696: 0xa0, 0x697: 0xa0, - 0x698: 0xa0, 0x699: 0xa0, 0x69a: 0xa0, 0x69b: 0x196, 0x69c: 0xa0, 0x69d: 0xa0, 0x69e: 0xa0, 0x69f: 0xa0, + 0x698: 0xa0, 0x699: 0xa0, 0x69a: 0xa0, 0x69b: 0x198, 0x69c: 0xa0, 0x69d: 0xa0, 0x69e: 0xa0, 0x69f: 0xa0, 0x6a0: 0xa0, 0x6a1: 0xa0, 0x6a2: 0xa0, 0x6a3: 0xa0, 0x6a4: 0xa0, 0x6a5: 0xa0, 0x6a6: 0xa0, 0x6a7: 0xa0, 0x6a8: 0xa0, 0x6a9: 0xa0, 0x6aa: 0xa0, 0x6ab: 0xa0, 0x6ac: 0xa0, 0x6ad: 0xa0, 0x6ae: 0xa0, 0x6af: 0xa0, 0x6b0: 0xa0, 0x6b1: 0xa0, 0x6b2: 0xa0, 0x6b3: 0xa0, 0x6b4: 0xa0, 0x6b5: 0xa0, 0x6b6: 0xa0, 0x6b7: 0xa0, @@ -2312,8 +2455,8 @@ var idnaIndex = [2368]uint16{ 0x6c0: 0xa0, 0x6c1: 0xa0, 0x6c2: 0xa0, 0x6c3: 0xa0, 0x6c4: 0xa0, 0x6c5: 0xa0, 0x6c6: 0xa0, 0x6c7: 0xa0, 0x6c8: 0xa0, 0x6c9: 0xa0, 0x6ca: 0xa0, 0x6cb: 0xa0, 0x6cc: 0xa0, 0x6cd: 0xa0, 0x6ce: 0xa0, 0x6cf: 0xa0, 0x6d0: 0xa0, 0x6d1: 0xa0, 0x6d2: 0xa0, 0x6d3: 0xa0, 0x6d4: 0xa0, 0x6d5: 0xa0, 0x6d6: 0xa0, 0x6d7: 0xa0, - 0x6d8: 0xa0, 0x6d9: 0xa0, 0x6da: 0xa0, 0x6db: 0xa0, 0x6dc: 0x197, 0x6dd: 0xa0, 0x6de: 0xa0, 0x6df: 0xa0, - 0x6e0: 0x198, 0x6e1: 0xa0, 0x6e2: 0xa0, 0x6e3: 0xa0, 0x6e4: 0xa0, 0x6e5: 0xa0, 0x6e6: 0xa0, 0x6e7: 0xa0, + 0x6d8: 0xa0, 0x6d9: 0xa0, 0x6da: 0xa0, 0x6db: 0xa0, 0x6dc: 0x199, 0x6dd: 0xa0, 0x6de: 0xa0, 0x6df: 0xa0, + 0x6e0: 0x19a, 0x6e1: 0xa0, 0x6e2: 0xa0, 0x6e3: 0xa0, 0x6e4: 0xa0, 0x6e5: 0xa0, 0x6e6: 0xa0, 0x6e7: 0xa0, 0x6e8: 0xa0, 0x6e9: 0xa0, 0x6ea: 0xa0, 0x6eb: 0xa0, 0x6ec: 0xa0, 0x6ed: 0xa0, 0x6ee: 0xa0, 0x6ef: 0xa0, 0x6f0: 0xa0, 0x6f1: 0xa0, 0x6f2: 0xa0, 0x6f3: 0xa0, 0x6f4: 0xa0, 0x6f5: 0xa0, 0x6f6: 0xa0, 0x6f7: 0xa0, 0x6f8: 0xa0, 0x6f9: 0xa0, 0x6fa: 0xa0, 0x6fb: 0xa0, 0x6fc: 0xa0, 0x6fd: 0xa0, 0x6fe: 0xa0, 0x6ff: 0xa0, @@ -2325,34 +2468,34 @@ var idnaIndex = [2368]uint16{ 0x720: 0xa0, 0x721: 0xa0, 0x722: 0xa0, 0x723: 0xa0, 0x724: 0xa0, 0x725: 0xa0, 0x726: 0xa0, 0x727: 0xa0, 0x728: 0xa0, 0x729: 0xa0, 0x72a: 0xa0, 0x72b: 0xa0, 0x72c: 0xa0, 0x72d: 0xa0, 0x72e: 0xa0, 0x72f: 0xa0, 0x730: 0xa0, 0x731: 0xa0, 0x732: 0xa0, 0x733: 0xa0, 0x734: 0xa0, 0x735: 0xa0, 0x736: 0xa0, 0x737: 0xa0, - 0x738: 0xa0, 0x739: 0xa0, 0x73a: 0x199, 0x73b: 0xa0, 0x73c: 0xa0, 0x73d: 0xa0, 0x73e: 0xa0, 0x73f: 0xa0, + 0x738: 0xa0, 0x739: 0xa0, 0x73a: 0x19b, 0x73b: 0xa0, 0x73c: 0xa0, 0x73d: 0xa0, 0x73e: 0xa0, 0x73f: 0xa0, // Block 0x1d, offset 0x740 0x740: 0xa0, 0x741: 0xa0, 0x742: 0xa0, 0x743: 0xa0, 0x744: 0xa0, 0x745: 0xa0, 0x746: 0xa0, 0x747: 0xa0, 0x748: 0xa0, 0x749: 0xa0, 0x74a: 0xa0, 0x74b: 0xa0, 0x74c: 0xa0, 0x74d: 0xa0, 0x74e: 0xa0, 0x74f: 0xa0, 0x750: 0xa0, 0x751: 0xa0, 0x752: 0xa0, 0x753: 0xa0, 0x754: 0xa0, 0x755: 0xa0, 0x756: 0xa0, 0x757: 0xa0, 0x758: 0xa0, 0x759: 0xa0, 0x75a: 0xa0, 0x75b: 0xa0, 0x75c: 0xa0, 0x75d: 0xa0, 0x75e: 0xa0, 0x75f: 0xa0, 0x760: 0xa0, 0x761: 0xa0, 0x762: 0xa0, 0x763: 0xa0, 0x764: 0xa0, 0x765: 0xa0, 0x766: 0xa0, 0x767: 0xa0, - 0x768: 0xa0, 0x769: 0xa0, 0x76a: 0xa0, 0x76b: 0xa0, 0x76c: 0xa0, 0x76d: 0xa0, 0x76e: 0xa0, 0x76f: 0x19a, - 0x770: 0xfb, 0x771: 0xfb, 0x772: 0xfb, 0x773: 0xfb, 0x774: 0xfb, 0x775: 0xfb, 0x776: 0xfb, 0x777: 0xfb, - 0x778: 0xfb, 0x779: 0xfb, 0x77a: 0xfb, 0x77b: 0xfb, 0x77c: 0xfb, 0x77d: 0xfb, 0x77e: 0xfb, 0x77f: 0xfb, + 0x768: 0xa0, 0x769: 0xa0, 0x76a: 0xa0, 0x76b: 0xa0, 0x76c: 0xa0, 0x76d: 0xa0, 0x76e: 0xa0, 0x76f: 0x19c, + 0x770: 0xfd, 0x771: 0xfd, 0x772: 0xfd, 0x773: 0xfd, 0x774: 0xfd, 0x775: 0xfd, 0x776: 0xfd, 0x777: 0xfd, + 0x778: 0xfd, 0x779: 0xfd, 0x77a: 0xfd, 0x77b: 0xfd, 0x77c: 0xfd, 0x77d: 0xfd, 0x77e: 0xfd, 0x77f: 0xfd, // Block 0x1e, offset 0x780 - 0x780: 0xfb, 0x781: 0xfb, 0x782: 0xfb, 0x783: 0xfb, 0x784: 0xfb, 0x785: 0xfb, 0x786: 0xfb, 0x787: 0xfb, - 0x788: 0xfb, 0x789: 0xfb, 0x78a: 0xfb, 0x78b: 0xfb, 0x78c: 0xfb, 0x78d: 0xfb, 0x78e: 0xfb, 0x78f: 0xfb, - 0x790: 0xfb, 0x791: 0xfb, 0x792: 0xfb, 0x793: 0xfb, 0x794: 0xfb, 0x795: 0xfb, 0x796: 0xfb, 0x797: 0xfb, - 0x798: 0xfb, 0x799: 0xfb, 0x79a: 0xfb, 0x79b: 0xfb, 0x79c: 0xfb, 0x79d: 0xfb, 0x79e: 0xfb, 0x79f: 0xfb, - 0x7a0: 0x77, 0x7a1: 0x78, 0x7a2: 0x79, 0x7a3: 0x19b, 0x7a4: 0x7a, 0x7a5: 0x7b, 0x7a6: 0x19c, 0x7a7: 0x7c, - 0x7a8: 0x7d, 0x7a9: 0xfb, 0x7aa: 0xfb, 0x7ab: 0xfb, 0x7ac: 0xfb, 0x7ad: 0xfb, 0x7ae: 0xfb, 0x7af: 0xfb, - 0x7b0: 0xfb, 0x7b1: 0xfb, 0x7b2: 0xfb, 0x7b3: 0xfb, 0x7b4: 0xfb, 0x7b5: 0xfb, 0x7b6: 0xfb, 0x7b7: 0xfb, - 0x7b8: 0xfb, 0x7b9: 0xfb, 0x7ba: 0xfb, 0x7bb: 0xfb, 0x7bc: 0xfb, 0x7bd: 0xfb, 0x7be: 0xfb, 0x7bf: 0xfb, + 0x780: 0xfd, 0x781: 0xfd, 0x782: 0xfd, 0x783: 0xfd, 0x784: 0xfd, 0x785: 0xfd, 0x786: 0xfd, 0x787: 0xfd, + 0x788: 0xfd, 0x789: 0xfd, 0x78a: 0xfd, 0x78b: 0xfd, 0x78c: 0xfd, 0x78d: 0xfd, 0x78e: 0xfd, 0x78f: 0xfd, + 0x790: 0xfd, 0x791: 0xfd, 0x792: 0xfd, 0x793: 0xfd, 0x794: 0xfd, 0x795: 0xfd, 0x796: 0xfd, 0x797: 0xfd, + 0x798: 0xfd, 0x799: 0xfd, 0x79a: 0xfd, 0x79b: 0xfd, 0x79c: 0xfd, 0x79d: 0xfd, 0x79e: 0xfd, 0x79f: 0xfd, + 0x7a0: 0x75, 0x7a1: 0x76, 0x7a2: 0x77, 0x7a3: 0x78, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x7b, 0x7a7: 0x7c, + 0x7a8: 0x7d, 0x7a9: 0xfd, 0x7aa: 0xfd, 0x7ab: 0xfd, 0x7ac: 0xfd, 0x7ad: 0xfd, 0x7ae: 0xfd, 0x7af: 0xfd, + 0x7b0: 0xfd, 0x7b1: 0xfd, 0x7b2: 0xfd, 0x7b3: 0xfd, 0x7b4: 0xfd, 0x7b5: 0xfd, 0x7b6: 0xfd, 0x7b7: 0xfd, + 0x7b8: 0xfd, 0x7b9: 0xfd, 0x7ba: 0xfd, 0x7bb: 0xfd, 0x7bc: 0xfd, 0x7bd: 0xfd, 0x7be: 0xfd, 0x7bf: 0xfd, // Block 0x1f, offset 0x7c0 0x7c0: 0xa0, 0x7c1: 0xa0, 0x7c2: 0xa0, 0x7c3: 0xa0, 0x7c4: 0xa0, 0x7c5: 0xa0, 0x7c6: 0xa0, 0x7c7: 0xa0, - 0x7c8: 0xa0, 0x7c9: 0xa0, 0x7ca: 0xa0, 0x7cb: 0xa0, 0x7cc: 0xa0, 0x7cd: 0x19d, 0x7ce: 0xfb, 0x7cf: 0xfb, - 0x7d0: 0xfb, 0x7d1: 0xfb, 0x7d2: 0xfb, 0x7d3: 0xfb, 0x7d4: 0xfb, 0x7d5: 0xfb, 0x7d6: 0xfb, 0x7d7: 0xfb, - 0x7d8: 0xfb, 0x7d9: 0xfb, 0x7da: 0xfb, 0x7db: 0xfb, 0x7dc: 0xfb, 0x7dd: 0xfb, 0x7de: 0xfb, 0x7df: 0xfb, - 0x7e0: 0xfb, 0x7e1: 0xfb, 0x7e2: 0xfb, 0x7e3: 0xfb, 0x7e4: 0xfb, 0x7e5: 0xfb, 0x7e6: 0xfb, 0x7e7: 0xfb, - 0x7e8: 0xfb, 0x7e9: 0xfb, 0x7ea: 0xfb, 0x7eb: 0xfb, 0x7ec: 0xfb, 0x7ed: 0xfb, 0x7ee: 0xfb, 0x7ef: 0xfb, - 0x7f0: 0xfb, 0x7f1: 0xfb, 0x7f2: 0xfb, 0x7f3: 0xfb, 0x7f4: 0xfb, 0x7f5: 0xfb, 0x7f6: 0xfb, 0x7f7: 0xfb, - 0x7f8: 0xfb, 0x7f9: 0xfb, 0x7fa: 0xfb, 0x7fb: 0xfb, 0x7fc: 0xfb, 0x7fd: 0xfb, 0x7fe: 0xfb, 0x7ff: 0xfb, + 0x7c8: 0xa0, 0x7c9: 0xa0, 0x7ca: 0xa0, 0x7cb: 0xa0, 0x7cc: 0xa0, 0x7cd: 0x19d, 0x7ce: 0xfd, 0x7cf: 0xfd, + 0x7d0: 0xfd, 0x7d1: 0xfd, 0x7d2: 0xfd, 0x7d3: 0xfd, 0x7d4: 0xfd, 0x7d5: 0xfd, 0x7d6: 0xfd, 0x7d7: 0xfd, + 0x7d8: 0xfd, 0x7d9: 0xfd, 0x7da: 0xfd, 0x7db: 0xfd, 0x7dc: 0xfd, 0x7dd: 0xfd, 0x7de: 0xfd, 0x7df: 0xfd, + 0x7e0: 0xfd, 0x7e1: 0xfd, 0x7e2: 0xfd, 0x7e3: 0xfd, 0x7e4: 0xfd, 0x7e5: 0xfd, 0x7e6: 0xfd, 0x7e7: 0xfd, + 0x7e8: 0xfd, 0x7e9: 0xfd, 0x7ea: 0xfd, 0x7eb: 0xfd, 0x7ec: 0xfd, 0x7ed: 0xfd, 0x7ee: 0xfd, 0x7ef: 0xfd, + 0x7f0: 0xfd, 0x7f1: 0xfd, 0x7f2: 0xfd, 0x7f3: 0xfd, 0x7f4: 0xfd, 0x7f5: 0xfd, 0x7f6: 0xfd, 0x7f7: 0xfd, + 0x7f8: 0xfd, 0x7f9: 0xfd, 0x7fa: 0xfd, 0x7fb: 0xfd, 0x7fc: 0xfd, 0x7fd: 0xfd, 0x7fe: 0xfd, 0x7ff: 0xfd, // Block 0x20, offset 0x800 0x810: 0x0d, 0x811: 0x0e, 0x812: 0x0f, 0x813: 0x10, 0x814: 0x11, 0x815: 0x0b, 0x816: 0x12, 0x817: 0x07, 0x818: 0x13, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x14, 0x81c: 0x0b, 0x81d: 0x15, 0x81e: 0x16, 0x81f: 0x17, @@ -2370,14 +2513,14 @@ var idnaIndex = [2368]uint16{ 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, // Block 0x22, offset 0x880 - 0x880: 0x19e, 0x881: 0x19f, 0x882: 0xfb, 0x883: 0xfb, 0x884: 0x1a0, 0x885: 0x1a0, 0x886: 0x1a0, 0x887: 0x1a1, - 0x888: 0xfb, 0x889: 0xfb, 0x88a: 0xfb, 0x88b: 0xfb, 0x88c: 0xfb, 0x88d: 0xfb, 0x88e: 0xfb, 0x88f: 0xfb, - 0x890: 0xfb, 0x891: 0xfb, 0x892: 0xfb, 0x893: 0xfb, 0x894: 0xfb, 0x895: 0xfb, 0x896: 0xfb, 0x897: 0xfb, - 0x898: 0xfb, 0x899: 0xfb, 0x89a: 0xfb, 0x89b: 0xfb, 0x89c: 0xfb, 0x89d: 0xfb, 0x89e: 0xfb, 0x89f: 0xfb, - 0x8a0: 0xfb, 0x8a1: 0xfb, 0x8a2: 0xfb, 0x8a3: 0xfb, 0x8a4: 0xfb, 0x8a5: 0xfb, 0x8a6: 0xfb, 0x8a7: 0xfb, - 0x8a8: 0xfb, 0x8a9: 0xfb, 0x8aa: 0xfb, 0x8ab: 0xfb, 0x8ac: 0xfb, 0x8ad: 0xfb, 0x8ae: 0xfb, 0x8af: 0xfb, - 0x8b0: 0xfb, 0x8b1: 0xfb, 0x8b2: 0xfb, 0x8b3: 0xfb, 0x8b4: 0xfb, 0x8b5: 0xfb, 0x8b6: 0xfb, 0x8b7: 0xfb, - 0x8b8: 0xfb, 0x8b9: 0xfb, 0x8ba: 0xfb, 0x8bb: 0xfb, 0x8bc: 0xfb, 0x8bd: 0xfb, 0x8be: 0xfb, 0x8bf: 0xfb, + 0x880: 0x19e, 0x881: 0x19f, 0x882: 0xfd, 0x883: 0xfd, 0x884: 0x1a0, 0x885: 0x1a0, 0x886: 0x1a0, 0x887: 0x1a1, + 0x888: 0xfd, 0x889: 0xfd, 0x88a: 0xfd, 0x88b: 0xfd, 0x88c: 0xfd, 0x88d: 0xfd, 0x88e: 0xfd, 0x88f: 0xfd, + 0x890: 0xfd, 0x891: 0xfd, 0x892: 0xfd, 0x893: 0xfd, 0x894: 0xfd, 0x895: 0xfd, 0x896: 0xfd, 0x897: 0xfd, + 0x898: 0xfd, 0x899: 0xfd, 0x89a: 0xfd, 0x89b: 0xfd, 0x89c: 0xfd, 0x89d: 0xfd, 0x89e: 0xfd, 0x89f: 0xfd, + 0x8a0: 0xfd, 0x8a1: 0xfd, 0x8a2: 0xfd, 0x8a3: 0xfd, 0x8a4: 0xfd, 0x8a5: 0xfd, 0x8a6: 0xfd, 0x8a7: 0xfd, + 0x8a8: 0xfd, 0x8a9: 0xfd, 0x8aa: 0xfd, 0x8ab: 0xfd, 0x8ac: 0xfd, 0x8ad: 0xfd, 0x8ae: 0xfd, 0x8af: 0xfd, + 0x8b0: 0xfd, 0x8b1: 0xfd, 0x8b2: 0xfd, 0x8b3: 0xfd, 0x8b4: 0xfd, 0x8b5: 0xfd, 0x8b6: 0xfd, 0x8b7: 0xfd, + 0x8b8: 0xfd, 0x8b9: 0xfd, 0x8ba: 0xfd, 0x8bb: 0xfd, 0x8bc: 0xfd, 0x8bd: 0xfd, 0x8be: 0xfd, 0x8bf: 0xfd, // Block 0x23, offset 0x8c0 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, @@ -2393,10 +2536,10 @@ var idnaIndex = [2368]uint16{ } // idnaSparseOffset: 292 entries, 584 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x85, 0x8b, 0x94, 0xa4, 0xb2, 0xbd, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x225, 0x22f, 0x23b, 0x247, 0x253, 0x25b, 0x260, 0x26d, 0x27e, 0x282, 0x28d, 0x291, 0x29a, 0x2a2, 0x2a8, 0x2ad, 0x2b0, 0x2b4, 0x2ba, 0x2be, 0x2c2, 0x2c6, 0x2cc, 0x2d4, 0x2db, 0x2e6, 0x2f0, 0x2f4, 0x2f7, 0x2fd, 0x301, 0x303, 0x306, 0x308, 0x30b, 0x315, 0x318, 0x327, 0x32b, 0x330, 0x333, 0x337, 0x33c, 0x341, 0x347, 0x358, 0x368, 0x36e, 0x372, 0x381, 0x386, 0x38e, 0x398, 0x3a3, 0x3ab, 0x3bc, 0x3c5, 0x3d5, 0x3e2, 0x3ee, 0x3f3, 0x400, 0x404, 0x409, 0x40b, 0x40d, 0x411, 0x413, 0x417, 0x420, 0x426, 0x42a, 0x43a, 0x444, 0x449, 0x44c, 0x452, 0x459, 0x45e, 0x462, 0x468, 0x46d, 0x476, 0x47b, 0x481, 0x488, 0x48f, 0x496, 0x49a, 0x49f, 0x4a2, 0x4a7, 0x4b3, 0x4b9, 0x4be, 0x4c5, 0x4cd, 0x4d2, 0x4d6, 0x4e6, 0x4ed, 0x4f1, 0x4f5, 0x4fc, 0x4fe, 0x501, 0x504, 0x508, 0x511, 0x515, 0x51d, 0x525, 0x52d, 0x539, 0x545, 0x54b, 0x554, 0x560, 0x567, 0x570, 0x57b, 0x582, 0x591, 0x59e, 0x5ab, 0x5b4, 0x5b8, 0x5c7, 0x5cf, 0x5da, 0x5e3, 0x5e9, 0x5f1, 0x5fa, 0x605, 0x608, 0x614, 0x61d, 0x620, 0x625, 0x62e, 0x633, 0x640, 0x64b, 0x654, 0x65e, 0x661, 0x66b, 0x674, 0x680, 0x68d, 0x69a, 0x6a8, 0x6af, 0x6b3, 0x6b7, 0x6ba, 0x6bf, 0x6c2, 0x6c7, 0x6ca, 0x6d1, 0x6d8, 0x6dc, 0x6e7, 0x6ea, 0x6ed, 0x6f0, 0x6f6, 0x6fc, 0x705, 0x708, 0x70b, 0x70e, 0x711, 0x718, 0x71b, 0x720, 0x72a, 0x72d, 0x731, 0x740, 0x74c, 0x750, 0x755, 0x759, 0x75e, 0x762, 0x767, 0x770, 0x77b, 0x781, 0x787, 0x78d, 0x793, 0x79c, 0x79f, 0x7a2, 0x7a6, 0x7aa, 0x7ae, 0x7b4, 0x7ba, 0x7bf, 0x7c2, 0x7d2, 0x7d9, 0x7dc, 0x7e1, 0x7e5, 0x7eb, 0x7f2, 0x7f6, 0x7fa, 0x803, 0x80a, 0x80f, 0x813, 0x821, 0x824, 0x827, 0x82b, 0x82f, 0x832, 0x842, 0x853, 0x856, 0x85b, 0x85d, 0x85f} +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x85, 0x8b, 0x94, 0xa4, 0xb2, 0xbd, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x225, 0x22f, 0x23b, 0x247, 0x253, 0x25b, 0x260, 0x26d, 0x27e, 0x282, 0x28d, 0x291, 0x29a, 0x2a2, 0x2a8, 0x2ad, 0x2b0, 0x2b4, 0x2ba, 0x2be, 0x2c2, 0x2c6, 0x2cc, 0x2d4, 0x2db, 0x2e6, 0x2f0, 0x2f4, 0x2f7, 0x2fd, 0x301, 0x303, 0x306, 0x308, 0x30b, 0x315, 0x318, 0x327, 0x32b, 0x32f, 0x331, 0x33a, 0x33d, 0x341, 0x346, 0x34b, 0x351, 0x362, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f8, 0x3fd, 0x40a, 0x40e, 0x413, 0x415, 0x417, 0x41b, 0x41d, 0x421, 0x42a, 0x430, 0x434, 0x444, 0x44e, 0x453, 0x456, 0x45c, 0x463, 0x468, 0x46c, 0x472, 0x477, 0x480, 0x485, 0x48b, 0x492, 0x499, 0x4a0, 0x4a4, 0x4a9, 0x4ac, 0x4b1, 0x4bd, 0x4c3, 0x4c8, 0x4cf, 0x4d7, 0x4dc, 0x4e0, 0x4f0, 0x4f7, 0x4fb, 0x4ff, 0x506, 0x508, 0x50b, 0x50e, 0x512, 0x51b, 0x51f, 0x527, 0x52f, 0x537, 0x543, 0x54f, 0x555, 0x55e, 0x56a, 0x571, 0x57a, 0x585, 0x58c, 0x59b, 0x5a8, 0x5b5, 0x5be, 0x5c2, 0x5d1, 0x5d9, 0x5e4, 0x5ed, 0x5f3, 0x5fb, 0x604, 0x60f, 0x612, 0x61e, 0x627, 0x62a, 0x62f, 0x638, 0x63d, 0x64a, 0x655, 0x65e, 0x668, 0x66b, 0x675, 0x67e, 0x68a, 0x697, 0x6a4, 0x6b2, 0x6b9, 0x6bd, 0x6c1, 0x6c4, 0x6c9, 0x6cc, 0x6d1, 0x6d4, 0x6db, 0x6e2, 0x6e6, 0x6f1, 0x6f4, 0x6f7, 0x6fa, 0x700, 0x706, 0x70f, 0x712, 0x715, 0x718, 0x71b, 0x722, 0x725, 0x72a, 0x734, 0x737, 0x73b, 0x74a, 0x756, 0x75a, 0x75f, 0x763, 0x768, 0x76c, 0x771, 0x77a, 0x785, 0x78b, 0x791, 0x797, 0x79d, 0x7a6, 0x7a9, 0x7ac, 0x7b0, 0x7b4, 0x7b8, 0x7be, 0x7c4, 0x7c9, 0x7cc, 0x7dc, 0x7e3, 0x7e6, 0x7eb, 0x7ef, 0x7f5, 0x7fc, 0x800, 0x804, 0x80d, 0x814, 0x819, 0x81d, 0x82b, 0x82e, 0x831, 0x835, 0x839, 0x83c, 0x83f, 0x844, 0x846, 0x848} -// idnaSparseValues: 2146 entries, 8584 bytes -var idnaSparseValues = [2146]valueRange{ +// idnaSparseValues: 2123 entries, 8492 bytes +var idnaSparseValues = [2123]valueRange{ // Block 0x0, offset 0x0 {value: 0x0000, lo: 0x07}, {value: 0xe105, lo: 0x80, hi: 0x96}, @@ -2427,15 +2570,15 @@ var idnaSparseValues = [2146]valueRange{ // Block 0x2, offset 0x19 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x00a9, lo: 0xb0, hi: 0xb0}, {value: 0x037d, lo: 0xb1, hi: 0xb1}, - {value: 0x0259, lo: 0xb2, hi: 0xb2}, - {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x00b1, lo: 0xb2, hi: 0xb2}, + {value: 0x00b9, lo: 0xb3, hi: 0xb3}, {value: 0x034d, lo: 0xb4, hi: 0xb4}, {value: 0x0395, lo: 0xb5, hi: 0xb5}, {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, - {value: 0x0279, lo: 0xb7, hi: 0xb7}, - {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x00c1, lo: 0xb7, hi: 0xb7}, + {value: 0x00c9, lo: 0xb8, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbf}, // Block 0x3, offset 0x25 {value: 0x0000, lo: 0x01}, @@ -2457,7 +2600,7 @@ var idnaSparseValues = [2146]valueRange{ // Block 0x6, offset 0x33 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0131, lo: 0x87, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x88}, {value: 0x0018, lo: 0x89, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8c}, @@ -2643,7 +2786,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0x81, hi: 0xb0}, {value: 0x3308, lo: 0xb1, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb2}, - {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x01f1, lo: 0xb3, hi: 0xb3}, {value: 0x3308, lo: 0xb4, hi: 0xb9}, {value: 0x3b08, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, @@ -2666,8 +2809,8 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0961, lo: 0x9c, hi: 0x9c}, - {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0201, lo: 0x9c, hi: 0x9c}, + {value: 0x0209, lo: 0x9d, hi: 0x9d}, {value: 0x0008, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, // Block 0x18, offset 0xf9 @@ -3075,13 +3218,13 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0018, lo: 0xbe, hi: 0xbf}, // Block 0x44, offset 0x260 {value: 0x0000, lo: 0x0c}, - {value: 0x0e29, lo: 0x80, hi: 0x80}, - {value: 0x0e41, lo: 0x81, hi: 0x81}, - {value: 0x0e59, lo: 0x82, hi: 0x82}, - {value: 0x0e71, lo: 0x83, hi: 0x83}, - {value: 0x0e89, lo: 0x84, hi: 0x85}, - {value: 0x0ea1, lo: 0x86, hi: 0x86}, - {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x02a9, lo: 0x80, hi: 0x80}, + {value: 0x02b1, lo: 0x81, hi: 0x81}, + {value: 0x02b9, lo: 0x82, hi: 0x82}, + {value: 0x02c1, lo: 0x83, hi: 0x83}, + {value: 0x02c9, lo: 0x84, hi: 0x85}, + {value: 0x02d1, lo: 0x86, hi: 0x86}, + {value: 0x02d9, lo: 0x87, hi: 0x87}, {value: 0x057d, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0x059d, lo: 0x90, hi: 0xba}, @@ -3133,18 +3276,18 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0851, lo: 0x89, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, // Block 0x4a, offset 0x29a {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x24f1, lo: 0xac, hi: 0xac}, - {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0859, lo: 0xac, hi: 0xac}, + {value: 0x0861, lo: 0xad, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xae}, - {value: 0x2579, lo: 0xaf, hi: 0xaf}, - {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0869, lo: 0xaf, hi: 0xaf}, + {value: 0x0871, lo: 0xb0, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, // Block 0x4b, offset 0x2a2 {value: 0x0000, lo: 0x05}, @@ -3166,19 +3309,19 @@ var idnaSparseValues = [2146]valueRange{ // Block 0x4e, offset 0x2b0 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0929, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0xbf}, // Block 0x4f, offset 0x2b4 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0e7e, lo: 0xb4, hi: 0xb4}, - {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0932, lo: 0xb5, hi: 0xb5}, {value: 0x0e9e, lo: 0xb6, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, // Block 0x50, offset 0x2ba {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x9b}, - {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0939, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0xbf}, // Block 0x51, offset 0x2be {value: 0x0000, lo: 0x03}, @@ -3277,16 +3420,16 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x98}, {value: 0x3308, lo: 0x99, hi: 0x9a}, - {value: 0x29e2, lo: 0x9b, hi: 0x9b}, - {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x096a, lo: 0x9b, hi: 0x9b}, + {value: 0x0972, lo: 0x9c, hi: 0x9c}, {value: 0x0008, lo: 0x9d, hi: 0x9e}, - {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0979, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xbf}, // Block 0x61, offset 0x315 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbe}, - {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + {value: 0x0981, lo: 0xbf, hi: 0xbf}, // Block 0x62, offset 0x318 {value: 0x0000, lo: 0x0e}, {value: 0x0040, lo: 0x80, hi: 0x84}, @@ -3309,46 +3452,58 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x64, offset 0x32b - {value: 0x0030, lo: 0x04}, - {value: 0x2aa2, lo: 0x80, hi: 0x9d}, - {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x03}, + {value: 0x098a, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x65, offset 0x330 + {value: 0x0a82, lo: 0xa0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0008, lo: 0x01}, + {value: 0x0d19, lo: 0x80, hi: 0xbf}, + // Block 0x66, offset 0x331 + {value: 0x0008, lo: 0x08}, + {value: 0x0f19, lo: 0x80, hi: 0xb0}, + {value: 0x4045, lo: 0xb1, hi: 0xb1}, + {value: 0x10a1, lo: 0xb2, hi: 0xb3}, + {value: 0x4065, lo: 0xb4, hi: 0xb4}, + {value: 0x10b1, lo: 0xb5, hi: 0xb7}, + {value: 0x4085, lo: 0xb8, hi: 0xb8}, + {value: 0x4085, lo: 0xb9, hi: 0xb9}, + {value: 0x10c9, lo: 0xba, hi: 0xbf}, + // Block 0x67, offset 0x33a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x66, offset 0x333 + // Block 0x68, offset 0x33d {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x67, offset 0x337 + // Block 0x69, offset 0x341 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x68, offset 0x33c + // Block 0x6a, offset 0x346 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x69, offset 0x341 + // Block 0x6b, offset 0x34b {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xaf}, {value: 0x3308, lo: 0xb0, hi: 0xb1}, {value: 0x0018, lo: 0xb2, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6a, offset 0x347 + // Block 0x6c, offset 0x351 {value: 0x0000, lo: 0x10}, {value: 0x0040, lo: 0x80, hi: 0x81}, {value: 0xe00d, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0x83}, {value: 0x03f5, lo: 0x84, hi: 0x84}, - {value: 0x1329, lo: 0x85, hi: 0x85}, + {value: 0x0479, lo: 0x85, hi: 0x85}, {value: 0x447d, lo: 0x86, hi: 0x86}, {value: 0xe07d, lo: 0x87, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x88}, @@ -3357,10 +3512,10 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x8b, hi: 0xb4}, {value: 0xe01d, lo: 0xb5, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xb7}, - {value: 0x2009, lo: 0xb8, hi: 0xb8}, - {value: 0x6ec1, lo: 0xb9, hi: 0xb9}, + {value: 0x0741, lo: 0xb8, hi: 0xb8}, + {value: 0x13f1, lo: 0xb9, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6b, offset 0x358 + // Block 0x6d, offset 0x362 {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x81}, {value: 0x3308, lo: 0x82, hi: 0x82}, @@ -3377,19 +3532,19 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6c, offset 0x368 + // Block 0x6e, offset 0x372 {value: 0x0000, lo: 0x05}, {value: 0x0208, lo: 0x80, hi: 0xb1}, {value: 0x0108, lo: 0xb2, hi: 0xb2}, {value: 0x0008, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6d, offset 0x36e + // Block 0x6f, offset 0x378 {value: 0x0000, lo: 0x03}, {value: 0x3008, lo: 0x80, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0xb3}, {value: 0x3008, lo: 0xb4, hi: 0xbf}, - // Block 0x6e, offset 0x372 + // Block 0x70, offset 0x37c {value: 0x0000, lo: 0x0e}, {value: 0x3008, lo: 0x80, hi: 0x83}, {value: 0x3b08, lo: 0x84, hi: 0x84}, @@ -3405,13 +3560,13 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0018, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbe}, {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x6f, offset 0x381 + // Block 0x71, offset 0x38b {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x3308, lo: 0xa6, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x70, offset 0x386 + // Block 0x72, offset 0x390 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x3308, lo: 0x87, hi: 0x91}, @@ -3420,7 +3575,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x94, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x71, offset 0x38e + // Block 0x73, offset 0x398 {value: 0x0000, lo: 0x09}, {value: 0x3308, lo: 0x80, hi: 0x82}, {value: 0x3008, lo: 0x83, hi: 0x83}, @@ -3431,7 +3586,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3008, lo: 0xba, hi: 0xbb}, {value: 0x3308, lo: 0xbc, hi: 0xbd}, {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x72, offset 0x398 + // Block 0x74, offset 0x3a2 {value: 0x0000, lo: 0x0a}, {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8d}, @@ -3443,7 +3598,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xa5, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x73, offset 0x3a3 + // Block 0x75, offset 0x3ad {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa8}, {value: 0x3308, lo: 0xa9, hi: 0xae}, @@ -3452,7 +3607,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3008, lo: 0xb3, hi: 0xb4}, {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x74, offset 0x3ab + // Block 0x76, offset 0x3b5 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x82}, {value: 0x3308, lo: 0x83, hi: 0x83}, @@ -3470,7 +3625,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xbc, hi: 0xbc}, {value: 0x3008, lo: 0xbd, hi: 0xbd}, {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x75, offset 0x3bc + // Block 0x77, offset 0x3c6 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x3308, lo: 0xb0, hi: 0xb0}, @@ -3480,7 +3635,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xb7, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbd}, {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x76, offset 0x3c5 + // Block 0x78, offset 0x3cf {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x3308, lo: 0x81, hi: 0x81}, @@ -3497,7 +3652,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3008, lo: 0xb5, hi: 0xb5}, {value: 0x3b08, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x77, offset 0x3d5 + // Block 0x79, offset 0x3df {value: 0x0000, lo: 0x0c}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x86}, @@ -3511,26 +3666,26 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0xa8, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x78, offset 0x3e2 + // Block 0x7a, offset 0x3ec {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9b}, {value: 0x449d, lo: 0x9c, hi: 0x9c}, {value: 0x44b5, lo: 0x9d, hi: 0x9d}, - {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0x0941, lo: 0x9e, hi: 0x9e}, {value: 0xe06d, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa8}, - {value: 0x6ed9, lo: 0xa9, hi: 0xa9}, + {value: 0x13f9, lo: 0xa9, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x44cd, lo: 0xb0, hi: 0xbf}, - // Block 0x79, offset 0x3ee + // Block 0x7b, offset 0x3f8 {value: 0x0000, lo: 0x04}, {value: 0x44ed, lo: 0x80, hi: 0x8f}, {value: 0x450d, lo: 0x90, hi: 0x9f}, {value: 0x452d, lo: 0xa0, hi: 0xaf}, {value: 0x450d, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3f3 + // Block 0x7c, offset 0x3fd {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0xa2}, {value: 0x3008, lo: 0xa3, hi: 0xa4}, @@ -3544,76 +3699,76 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7b, offset 0x400 + // Block 0x7d, offset 0x40a {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7c, offset 0x404 + // Block 0x7e, offset 0x40e {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7d, offset 0x409 + // Block 0x7f, offset 0x413 {value: 0x0000, lo: 0x01}, {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x7e, offset 0x40b + // Block 0x80, offset 0x415 {value: 0x0020, lo: 0x01}, {value: 0x454d, lo: 0x80, hi: 0xbf}, - // Block 0x7f, offset 0x40d + // Block 0x81, offset 0x417 {value: 0x0020, lo: 0x03}, {value: 0x4d4d, lo: 0x80, hi: 0x94}, {value: 0x4b0d, lo: 0x95, hi: 0x95}, {value: 0x4fed, lo: 0x96, hi: 0xbf}, - // Block 0x80, offset 0x411 + // Block 0x82, offset 0x41b {value: 0x0020, lo: 0x01}, {value: 0x552d, lo: 0x80, hi: 0xbf}, - // Block 0x81, offset 0x413 + // Block 0x83, offset 0x41d {value: 0x0020, lo: 0x03}, {value: 0x5d2d, lo: 0x80, hi: 0x84}, {value: 0x568d, lo: 0x85, hi: 0x85}, {value: 0x5dcd, lo: 0x86, hi: 0xbf}, - // Block 0x82, offset 0x417 + // Block 0x84, offset 0x421 {value: 0x0020, lo: 0x08}, {value: 0x6b8d, lo: 0x80, hi: 0x8f}, {value: 0x6d4d, lo: 0x90, hi: 0x90}, {value: 0x6d8d, lo: 0x91, hi: 0xab}, - {value: 0x6ef1, lo: 0xac, hi: 0xac}, + {value: 0x1401, lo: 0xac, hi: 0xac}, {value: 0x70ed, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x710d, lo: 0xb0, hi: 0xbf}, - // Block 0x83, offset 0x420 + // Block 0x85, offset 0x42a {value: 0x0020, lo: 0x05}, {value: 0x730d, lo: 0x80, hi: 0xad}, {value: 0x656d, lo: 0xae, hi: 0xae}, {value: 0x78cd, lo: 0xaf, hi: 0xb5}, {value: 0x6f8d, lo: 0xb6, hi: 0xb6}, {value: 0x79ad, lo: 0xb7, hi: 0xbf}, - // Block 0x84, offset 0x426 - {value: 0x0028, lo: 0x03}, - {value: 0x7c71, lo: 0x80, hi: 0x82}, - {value: 0x7c31, lo: 0x83, hi: 0x83}, - {value: 0x7ce9, lo: 0x84, hi: 0xbf}, - // Block 0x85, offset 0x42a - {value: 0x0038, lo: 0x0f}, - {value: 0x9e01, lo: 0x80, hi: 0x83}, - {value: 0x9ea9, lo: 0x84, hi: 0x85}, - {value: 0x9ee1, lo: 0x86, hi: 0x87}, - {value: 0x9f19, lo: 0x88, hi: 0x8f}, + // Block 0x86, offset 0x430 + {value: 0x0008, lo: 0x03}, + {value: 0x1751, lo: 0x80, hi: 0x82}, + {value: 0x1741, lo: 0x83, hi: 0x83}, + {value: 0x1769, lo: 0x84, hi: 0xbf}, + // Block 0x87, offset 0x434 + {value: 0x0008, lo: 0x0f}, + {value: 0x1d81, lo: 0x80, hi: 0x83}, + {value: 0x1d99, lo: 0x84, hi: 0x85}, + {value: 0x1da1, lo: 0x86, hi: 0x87}, + {value: 0x1da9, lo: 0x88, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0xa0d9, lo: 0x92, hi: 0x97}, - {value: 0xa1f1, lo: 0x98, hi: 0x9c}, - {value: 0xa2d1, lo: 0x9d, hi: 0xb3}, - {value: 0x9d91, lo: 0xb4, hi: 0xb4}, - {value: 0x9e01, lo: 0xb5, hi: 0xb5}, - {value: 0xa7d9, lo: 0xb6, hi: 0xbb}, - {value: 0xa8b9, lo: 0xbc, hi: 0xbc}, - {value: 0xa849, lo: 0xbd, hi: 0xbd}, - {value: 0xa929, lo: 0xbe, hi: 0xbf}, - // Block 0x86, offset 0x43a + {value: 0x1de9, lo: 0x92, hi: 0x97}, + {value: 0x1e11, lo: 0x98, hi: 0x9c}, + {value: 0x1e31, lo: 0x9d, hi: 0xb3}, + {value: 0x1d71, lo: 0xb4, hi: 0xb4}, + {value: 0x1d81, lo: 0xb5, hi: 0xb5}, + {value: 0x1ee9, lo: 0xb6, hi: 0xbb}, + {value: 0x1f09, lo: 0xbc, hi: 0xbc}, + {value: 0x1ef9, lo: 0xbd, hi: 0xbd}, + {value: 0x1f19, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x444 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8c}, @@ -3624,24 +3779,24 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0xbc, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x87, offset 0x444 + // Block 0x89, offset 0x44e {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x88, offset 0x449 + // Block 0x8a, offset 0x453 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x89, offset 0x44c + // Block 0x8b, offset 0x456 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8a, offset 0x452 + // Block 0x8c, offset 0x45c {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x8e}, {value: 0x0040, lo: 0x8f, hi: 0x8f}, @@ -3649,31 +3804,31 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8b, offset 0x459 + // Block 0x8d, offset 0x463 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbc}, {value: 0x3308, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8c, offset 0x45e + // Block 0x8e, offset 0x468 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8d, offset 0x462 + // Block 0x8f, offset 0x46c {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x9f}, {value: 0x3308, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8e, offset 0x468 + // Block 0x90, offset 0x472 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xac}, {value: 0x0008, lo: 0xad, hi: 0xbf}, - // Block 0x8f, offset 0x46d + // Block 0x91, offset 0x477 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x81}, @@ -3683,20 +3838,20 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0x90, hi: 0xb5}, {value: 0x3308, lo: 0xb6, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x90, offset 0x476 + // Block 0x92, offset 0x480 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x91, offset 0x47b + // Block 0x93, offset 0x485 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x92, offset 0x481 + // Block 0x94, offset 0x48b {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3704,7 +3859,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x8b0d, lo: 0x98, hi: 0x9f}, {value: 0x8b25, lo: 0xa0, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x93, offset 0x488 + // Block 0x95, offset 0x492 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, @@ -3712,7 +3867,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x8b25, lo: 0xb0, hi: 0xb7}, {value: 0x8b0d, lo: 0xb8, hi: 0xbf}, - // Block 0x94, offset 0x48f + // Block 0x96, offset 0x499 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3720,28 +3875,28 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x94, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x95, offset 0x496 + // Block 0x97, offset 0x4a0 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x96, offset 0x49a + // Block 0x98, offset 0x4a4 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xae}, {value: 0x0018, lo: 0xaf, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x97, offset 0x49f + // Block 0x99, offset 0x4a9 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x98, offset 0x4a2 + // Block 0x9a, offset 0x4ac {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x99, offset 0x4a7 + // Block 0x9b, offset 0x4b1 {value: 0x0000, lo: 0x0b}, {value: 0x0808, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x87}, @@ -3754,20 +3909,20 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0808, lo: 0xbc, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, {value: 0x0808, lo: 0xbf, hi: 0xbf}, - // Block 0x9a, offset 0x4b3 + // Block 0x9c, offset 0x4bd {value: 0x0000, lo: 0x05}, {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x96}, {value: 0x0818, lo: 0x97, hi: 0x9f}, {value: 0x0808, lo: 0xa0, hi: 0xb6}, {value: 0x0818, lo: 0xb7, hi: 0xbf}, - // Block 0x9b, offset 0x4b9 + // Block 0x9d, offset 0x4c3 {value: 0x0000, lo: 0x04}, {value: 0x0808, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa6}, {value: 0x0818, lo: 0xa7, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9c, offset 0x4be + // Block 0x9e, offset 0x4c8 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0808, lo: 0xa0, hi: 0xb2}, @@ -3775,7 +3930,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0808, lo: 0xb4, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xba}, {value: 0x0818, lo: 0xbb, hi: 0xbf}, - // Block 0x9d, offset 0x4c5 + // Block 0x9f, offset 0x4cf {value: 0x0000, lo: 0x07}, {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0818, lo: 0x96, hi: 0x9b}, @@ -3784,18 +3939,18 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0808, lo: 0xa0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbe}, {value: 0x0818, lo: 0xbf, hi: 0xbf}, - // Block 0x9e, offset 0x4cd + // Block 0xa0, offset 0x4d7 {value: 0x0000, lo: 0x04}, {value: 0x0808, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbb}, {value: 0x0818, lo: 0xbc, hi: 0xbd}, {value: 0x0808, lo: 0xbe, hi: 0xbf}, - // Block 0x9f, offset 0x4d2 + // Block 0xa1, offset 0x4dc {value: 0x0000, lo: 0x03}, {value: 0x0818, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, {value: 0x0818, lo: 0x92, hi: 0xbf}, - // Block 0xa0, offset 0x4d6 + // Block 0xa2, offset 0x4e0 {value: 0x0000, lo: 0x0f}, {value: 0x0808, lo: 0x80, hi: 0x80}, {value: 0x3308, lo: 0x81, hi: 0x83}, @@ -3812,7 +3967,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xb8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa1, offset 0x4e6 + // Block 0xa3, offset 0x4f0 {value: 0x0000, lo: 0x06}, {value: 0x0818, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, @@ -3820,17 +3975,17 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x99, hi: 0x9f}, {value: 0x0808, lo: 0xa0, hi: 0xbc}, {value: 0x0818, lo: 0xbd, hi: 0xbf}, - // Block 0xa2, offset 0x4ed + // Block 0xa4, offset 0x4f7 {value: 0x0000, lo: 0x03}, {value: 0x0808, lo: 0x80, hi: 0x9c}, {value: 0x0818, lo: 0x9d, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa3, offset 0x4f1 + // Block 0xa5, offset 0x4fb {value: 0x0000, lo: 0x03}, {value: 0x0808, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb8}, {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa4, offset 0x4f5 + // Block 0xa6, offset 0x4ff {value: 0x0000, lo: 0x06}, {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, @@ -3838,23 +3993,23 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0808, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb7}, {value: 0x0818, lo: 0xb8, hi: 0xbf}, - // Block 0xa5, offset 0x4fc + // Block 0xa7, offset 0x506 {value: 0x0000, lo: 0x01}, {value: 0x0808, lo: 0x80, hi: 0xbf}, - // Block 0xa6, offset 0x4fe + // Block 0xa8, offset 0x508 {value: 0x0000, lo: 0x02}, {value: 0x0808, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa7, offset 0x501 + // Block 0xa9, offset 0x50b {value: 0x0000, lo: 0x02}, {value: 0x03dd, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa8, offset 0x504 + // Block 0xaa, offset 0x50e {value: 0x0000, lo: 0x03}, {value: 0x0808, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb9}, {value: 0x0818, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x508 + // Block 0xab, offset 0x512 {value: 0x0000, lo: 0x08}, {value: 0x0908, lo: 0x80, hi: 0x80}, {value: 0x0a08, lo: 0x81, hi: 0xa1}, @@ -3864,12 +4019,12 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0808, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xaa, offset 0x511 + // Block 0xac, offset 0x51b {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0818, lo: 0xa0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xab, offset 0x515 + // Block 0xad, offset 0x51f {value: 0x0000, lo: 0x07}, {value: 0x0808, lo: 0x80, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaa}, @@ -3878,7 +4033,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0808, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xac, offset 0x51d + // Block 0xae, offset 0x527 {value: 0x0000, lo: 0x07}, {value: 0x0808, lo: 0x80, hi: 0x9c}, {value: 0x0818, lo: 0x9d, hi: 0xa6}, @@ -3887,7 +4042,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0a08, lo: 0xb0, hi: 0xb2}, {value: 0x0c08, lo: 0xb3, hi: 0xb3}, {value: 0x0a08, lo: 0xb4, hi: 0xbf}, - // Block 0xad, offset 0x525 + // Block 0xaf, offset 0x52f {value: 0x0000, lo: 0x07}, {value: 0x0a08, lo: 0x80, hi: 0x84}, {value: 0x0808, lo: 0x85, hi: 0x85}, @@ -3896,7 +4051,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0c18, lo: 0x94, hi: 0x94}, {value: 0x0818, lo: 0x95, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xae, offset 0x52d + // Block 0xb0, offset 0x537 {value: 0x0000, lo: 0x0b}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0a08, lo: 0xb0, hi: 0xb0}, @@ -3909,7 +4064,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0a08, lo: 0xbb, hi: 0xbc}, {value: 0x0c08, lo: 0xbd, hi: 0xbd}, {value: 0x0a08, lo: 0xbe, hi: 0xbf}, - // Block 0xaf, offset 0x539 + // Block 0xb1, offset 0x543 {value: 0x0000, lo: 0x0b}, {value: 0x0808, lo: 0x80, hi: 0x80}, {value: 0x0a08, lo: 0x81, hi: 0x81}, @@ -3922,14 +4077,14 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x8c, hi: 0x9f}, {value: 0x0808, lo: 0xa0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb0, offset 0x545 + // Block 0xb2, offset 0x54f {value: 0x0000, lo: 0x05}, {value: 0x3008, lo: 0x80, hi: 0x80}, {value: 0x3308, lo: 0x81, hi: 0x81}, {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb7}, {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xb1, offset 0x54b + // Block 0xb3, offset 0x555 {value: 0x0000, lo: 0x08}, {value: 0x3308, lo: 0x80, hi: 0x85}, {value: 0x3b08, lo: 0x86, hi: 0x86}, @@ -3939,7 +4094,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xb2, offset 0x554 + // Block 0xb4, offset 0x55e {value: 0x0000, lo: 0x0b}, {value: 0x3308, lo: 0x80, hi: 0x81}, {value: 0x3008, lo: 0x82, hi: 0x82}, @@ -3952,7 +4107,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0018, lo: 0xbb, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xb3, offset 0x560 + // Block 0xb5, offset 0x56a {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x8f}, @@ -3960,7 +4115,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xa9, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb4, offset 0x567 + // Block 0xb6, offset 0x571 {value: 0x0000, lo: 0x08}, {value: 0x3308, lo: 0x80, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa6}, @@ -3970,7 +4125,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3b08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xb5, offset 0x570 + // Block 0xb7, offset 0x57a {value: 0x0000, lo: 0x0a}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, @@ -3982,7 +4137,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0018, lo: 0xb4, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb6, offset 0x57b + // Block 0xb8, offset 0x585 {value: 0x0000, lo: 0x06}, {value: 0x3308, lo: 0x80, hi: 0x81}, {value: 0x3008, lo: 0x82, hi: 0x82}, @@ -3990,7 +4145,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3008, lo: 0xb3, hi: 0xb5}, {value: 0x3308, lo: 0xb6, hi: 0xbe}, {value: 0x3008, lo: 0xbf, hi: 0xbf}, - // Block 0xb7, offset 0x582 + // Block 0xb9, offset 0x58c {value: 0x0000, lo: 0x0e}, {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x84}, @@ -4006,7 +4161,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb8, offset 0x591 + // Block 0xba, offset 0x59b {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0x92}, @@ -4020,7 +4175,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0018, lo: 0xb8, hi: 0xbd}, {value: 0x3308, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb9, offset 0x59e + // Block 0xbb, offset 0x5a8 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -4034,7 +4189,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0018, lo: 0xa9, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xba, offset 0x5ab + // Block 0xbc, offset 0x5b5 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x3308, lo: 0x9f, hi: 0x9f}, @@ -4044,12 +4199,12 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xbb, offset 0x5b4 + // Block 0xbd, offset 0x5be {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x3008, lo: 0xb5, hi: 0xb7}, {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xbc, offset 0x5b8 + // Block 0xbe, offset 0x5c2 {value: 0x0000, lo: 0x0e}, {value: 0x3008, lo: 0x80, hi: 0x81}, {value: 0x3b08, lo: 0x82, hi: 0x82}, @@ -4065,7 +4220,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0x9e, hi: 0x9e}, {value: 0x0008, lo: 0x9f, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xbd, offset 0x5c7 + // Block 0xbf, offset 0x5d1 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x3008, lo: 0xb0, hi: 0xb2}, @@ -4074,7 +4229,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xba, hi: 0xba}, {value: 0x3008, lo: 0xbb, hi: 0xbe}, {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xbe, offset 0x5cf + // Block 0xc0, offset 0x5d9 {value: 0x0000, lo: 0x0a}, {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x3008, lo: 0x81, hi: 0x81}, @@ -4086,7 +4241,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xbf, offset 0x5da + // Block 0xc1, offset 0x5e4 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x3008, lo: 0xaf, hi: 0xb1}, @@ -4096,14 +4251,14 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xbc, hi: 0xbd}, {value: 0x3008, lo: 0xbe, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc0, offset 0x5e3 + // Block 0xc2, offset 0x5ed {value: 0x0000, lo: 0x05}, {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9b}, {value: 0x3308, lo: 0x9c, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xc1, offset 0x5e9 + // Block 0xc3, offset 0x5f3 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x3008, lo: 0xb0, hi: 0xb2}, @@ -4112,7 +4267,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xbd, hi: 0xbd}, {value: 0x3008, lo: 0xbe, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc2, offset 0x5f1 + // Block 0xc4, offset 0x5fb {value: 0x0000, lo: 0x08}, {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x83}, @@ -4122,7 +4277,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xc3, offset 0x5fa + // Block 0xc5, offset 0x604 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x3308, lo: 0xab, hi: 0xab}, @@ -4134,11 +4289,11 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xb7, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc4, offset 0x605 + // Block 0xc6, offset 0x60f {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xc5, offset 0x608 + // Block 0xc7, offset 0x612 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9c}, @@ -4151,7 +4306,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc6, offset 0x614 + // Block 0xc8, offset 0x61e {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xab}, {value: 0x3008, lo: 0xac, hi: 0xae}, @@ -4161,17 +4316,17 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xba, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xc7, offset 0x61d + // Block 0xc9, offset 0x627 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc8, offset 0x620 + // Block 0xca, offset 0x62a {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc9, offset 0x625 + // Block 0xcb, offset 0x62f {value: 0x0000, lo: 0x08}, {value: 0x3008, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x81}, @@ -4181,13 +4336,13 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x87, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xca, offset 0x62e + // Block 0xcc, offset 0x638 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xa9}, {value: 0x0008, lo: 0xaa, hi: 0xbf}, - // Block 0xcb, offset 0x633 + // Block 0xcd, offset 0x63d {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x3008, lo: 0x91, hi: 0x93}, @@ -4201,7 +4356,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0xa3, hi: 0xa3}, {value: 0x3008, lo: 0xa4, hi: 0xa4}, {value: 0x0040, lo: 0xa5, hi: 0xbf}, - // Block 0xcc, offset 0x640 + // Block 0xce, offset 0x64a {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x3308, lo: 0x81, hi: 0x8a}, @@ -4213,7 +4368,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0xba, hi: 0xba}, {value: 0x3308, lo: 0xbb, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xcd, offset 0x64b + // Block 0xcf, offset 0x655 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x3b08, lo: 0x87, hi: 0x87}, @@ -4223,7 +4378,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3008, lo: 0x97, hi: 0x98}, {value: 0x3308, lo: 0x99, hi: 0x9b}, {value: 0x0008, lo: 0x9c, hi: 0xbf}, - // Block 0xce, offset 0x654 + // Block 0xd0, offset 0x65e {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x3308, lo: 0x8a, hi: 0x96}, @@ -4234,11 +4389,11 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0x9d, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0xa2}, {value: 0x0040, lo: 0xa3, hi: 0xbf}, - // Block 0xcf, offset 0x65e + // Block 0xd1, offset 0x668 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xd0, offset 0x661 + // Block 0xd2, offset 0x66b {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, @@ -4249,7 +4404,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xb8, hi: 0xbd}, {value: 0x3008, lo: 0xbe, hi: 0xbe}, {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xd1, offset 0x66b + // Block 0xd3, offset 0x675 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x85}, @@ -4259,7 +4414,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xd2, offset 0x674 + // Block 0xd4, offset 0x67e {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, @@ -4272,7 +4427,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3008, lo: 0xb4, hi: 0xb4}, {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xd3, offset 0x680 + // Block 0xd5, offset 0x68a {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -4286,7 +4441,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xbc, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbe}, {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xd4, offset 0x68d + // Block 0xd6, offset 0x697 {value: 0x0000, lo: 0x0c}, {value: 0x3308, lo: 0x80, hi: 0x83}, {value: 0x3b08, lo: 0x84, hi: 0x85}, @@ -4300,7 +4455,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0xa7, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xa9}, {value: 0x0008, lo: 0xaa, hi: 0xbf}, - // Block 0xd5, offset 0x69a + // Block 0xd7, offset 0x6a4 {value: 0x0000, lo: 0x0d}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x3008, lo: 0x8a, hi: 0x8e}, @@ -4315,7 +4470,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x99, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xbf}, - // Block 0xd6, offset 0x6a8 + // Block 0xd8, offset 0x6b2 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb2}, @@ -4323,41 +4478,41 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3008, lo: 0xb5, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xd7, offset 0x6af + // Block 0xd9, offset 0x6b9 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0xd8, offset 0x6b3 + // Block 0xda, offset 0x6bd {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xd9, offset 0x6b7 + // Block 0xdb, offset 0x6c1 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xda, offset 0x6ba + // Block 0xdc, offset 0x6c4 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xdb, offset 0x6bf + // Block 0xdd, offset 0x6c9 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xdc, offset 0x6c2 + // Block 0xde, offset 0x6cc {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0340, lo: 0xb0, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xdd, offset 0x6c7 + // Block 0xdf, offset 0x6d1 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xde, offset 0x6ca + // Block 0xe0, offset 0x6d4 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, @@ -4365,7 +4520,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xdf, offset 0x6d1 + // Block 0xe1, offset 0x6db {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xad}, @@ -4373,12 +4528,12 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x3308, lo: 0xb0, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xe0, offset 0x6d8 + // Block 0xe2, offset 0x6e2 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xe1, offset 0x6dc + // Block 0xe3, offset 0x6e6 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0018, lo: 0x84, hi: 0x85}, @@ -4390,33 +4545,33 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0xa3, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xe2, offset 0x6e7 + // Block 0xe4, offset 0x6f1 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xe3, offset 0x6ea + // Block 0xe5, offset 0x6f4 {value: 0x0000, lo: 0x02}, {value: 0xe105, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xe4, offset 0x6ed + // Block 0xe6, offset 0x6f7 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0xe5, offset 0x6f0 + // Block 0xe7, offset 0x6fa {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8e}, {value: 0x3308, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, {value: 0x3008, lo: 0x91, hi: 0xbf}, - // Block 0xe6, offset 0x6f6 + // Block 0xe8, offset 0x700 {value: 0x0000, lo: 0x05}, {value: 0x3008, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8e}, {value: 0x3308, lo: 0x8f, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xe7, offset 0x6fc + // Block 0xe9, offset 0x706 {value: 0x0000, lo: 0x08}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa1}, @@ -4426,23 +4581,23 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xa5, hi: 0xaf}, {value: 0x3008, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe8, offset 0x705 + // Block 0xea, offset 0x70f {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xe9, offset 0x708 + // Block 0xeb, offset 0x712 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0xea, offset 0x70b + // Block 0xec, offset 0x715 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xeb, offset 0x70e + // Block 0xed, offset 0x718 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xec, offset 0x711 + // Block 0xee, offset 0x71b {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x92}, @@ -4450,17 +4605,17 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0008, lo: 0xa4, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xed, offset 0x718 + // Block 0xef, offset 0x722 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xee, offset 0x71b + // Block 0xf0, offset 0x725 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xef, offset 0x720 + // Block 0xf1, offset 0x72a {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, @@ -4471,32 +4626,32 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x03c0, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xf0, offset 0x72a + // Block 0xf2, offset 0x734 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xf1, offset 0x72d + // Block 0xf3, offset 0x737 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa8}, {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xf2, offset 0x731 + // Block 0xf4, offset 0x73b {value: 0x0000, lo: 0x0e}, {value: 0x0018, lo: 0x80, hi: 0x9d}, - {value: 0xb609, lo: 0x9e, hi: 0x9e}, - {value: 0xb651, lo: 0x9f, hi: 0x9f}, - {value: 0xb699, lo: 0xa0, hi: 0xa0}, - {value: 0xb701, lo: 0xa1, hi: 0xa1}, - {value: 0xb769, lo: 0xa2, hi: 0xa2}, - {value: 0xb7d1, lo: 0xa3, hi: 0xa3}, - {value: 0xb839, lo: 0xa4, hi: 0xa4}, + {value: 0x2211, lo: 0x9e, hi: 0x9e}, + {value: 0x2219, lo: 0x9f, hi: 0x9f}, + {value: 0x2221, lo: 0xa0, hi: 0xa0}, + {value: 0x2229, lo: 0xa1, hi: 0xa1}, + {value: 0x2231, lo: 0xa2, hi: 0xa2}, + {value: 0x2239, lo: 0xa3, hi: 0xa3}, + {value: 0x2241, lo: 0xa4, hi: 0xa4}, {value: 0x3018, lo: 0xa5, hi: 0xa6}, {value: 0x3318, lo: 0xa7, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xac}, {value: 0x3018, lo: 0xad, hi: 0xb2}, {value: 0x0340, lo: 0xb3, hi: 0xba}, {value: 0x3318, lo: 0xbb, hi: 0xbf}, - // Block 0xf3, offset 0x740 + // Block 0xf5, offset 0x74a {value: 0x0000, lo: 0x0b}, {value: 0x3318, lo: 0x80, hi: 0x82}, {value: 0x0018, lo: 0x83, hi: 0x84}, @@ -4504,45 +4659,45 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0018, lo: 0x8c, hi: 0xa9}, {value: 0x3318, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xba}, - {value: 0xb8a1, lo: 0xbb, hi: 0xbb}, - {value: 0xb8e9, lo: 0xbc, hi: 0xbc}, - {value: 0xb931, lo: 0xbd, hi: 0xbd}, - {value: 0xb999, lo: 0xbe, hi: 0xbe}, - {value: 0xba01, lo: 0xbf, hi: 0xbf}, - // Block 0xf4, offset 0x74c + {value: 0x2249, lo: 0xbb, hi: 0xbb}, + {value: 0x2251, lo: 0xbc, hi: 0xbc}, + {value: 0x2259, lo: 0xbd, hi: 0xbd}, + {value: 0x2261, lo: 0xbe, hi: 0xbe}, + {value: 0x2269, lo: 0xbf, hi: 0xbf}, + // Block 0xf6, offset 0x756 {value: 0x0000, lo: 0x03}, - {value: 0xba69, lo: 0x80, hi: 0x80}, + {value: 0x2271, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xf5, offset 0x750 + // Block 0xf7, offset 0x75a {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x3318, lo: 0x82, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xf6, offset 0x755 + // Block 0xf8, offset 0x75f {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xf7, offset 0x759 + // Block 0xf9, offset 0x763 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xf8, offset 0x75e + // Block 0xfa, offset 0x768 {value: 0x0000, lo: 0x03}, {value: 0x3308, lo: 0x80, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xba}, {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0xf9, offset 0x762 + // Block 0xfb, offset 0x76c {value: 0x0000, lo: 0x04}, {value: 0x3308, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xb4}, {value: 0x3308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xfa, offset 0x767 + // Block 0xfc, offset 0x771 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x3308, lo: 0x84, hi: 0x84}, @@ -4552,7 +4707,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x3308, lo: 0xa1, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xfb, offset 0x770 + // Block 0xfd, offset 0x77a {value: 0x0000, lo: 0x0a}, {value: 0x3308, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -4564,35 +4719,35 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xa5, hi: 0xa5}, {value: 0x3308, lo: 0xa6, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xfc, offset 0x77b + // Block 0xfe, offset 0x785 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0008, lo: 0xb7, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0xfd, offset 0x781 + // Block 0xff, offset 0x78b {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x8e}, {value: 0x0018, lo: 0x8f, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xfe, offset 0x787 + // Block 0x100, offset 0x791 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xab}, {value: 0x3308, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xff, offset 0x78d + // Block 0x101, offset 0x797 {value: 0x0000, lo: 0x05}, {value: 0x0808, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x86}, {value: 0x0818, lo: 0x87, hi: 0x8f}, {value: 0x3308, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0x100, offset 0x793 + // Block 0x102, offset 0x79d {value: 0x0000, lo: 0x08}, {value: 0x0a08, lo: 0x80, hi: 0x83}, {value: 0x3308, lo: 0x84, hi: 0x8a}, @@ -4602,71 +4757,71 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0x9a, hi: 0x9d}, {value: 0x0818, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x101, offset 0x79c + // Block 0x103, offset 0x7a6 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0xb0}, {value: 0x0818, lo: 0xb1, hi: 0xbf}, - // Block 0x102, offset 0x79f + // Block 0x104, offset 0x7a9 {value: 0x0000, lo: 0x02}, {value: 0x0818, lo: 0x80, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x103, offset 0x7a2 + // Block 0x105, offset 0x7ac {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0818, lo: 0x81, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x104, offset 0x7a6 + // Block 0x106, offset 0x7b0 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0x105, offset 0x7aa + // Block 0x107, offset 0x7b4 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x106, offset 0x7ae + // Block 0x108, offset 0x7b8 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x107, offset 0x7b4 + // Block 0x109, offset 0x7be {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x108, offset 0x7ba + // Block 0x10a, offset 0x7c4 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0xc229, lo: 0x90, hi: 0x90}, + {value: 0x2491, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0x109, offset 0x7bf + // Block 0x10b, offset 0x7c9 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0x10a, offset 0x7c2 + // Block 0x10c, offset 0x7cc {value: 0x0000, lo: 0x0f}, - {value: 0xc851, lo: 0x80, hi: 0x80}, - {value: 0xc8a1, lo: 0x81, hi: 0x81}, - {value: 0xc8f1, lo: 0x82, hi: 0x82}, - {value: 0xc941, lo: 0x83, hi: 0x83}, - {value: 0xc991, lo: 0x84, hi: 0x84}, - {value: 0xc9e1, lo: 0x85, hi: 0x85}, - {value: 0xca31, lo: 0x86, hi: 0x86}, - {value: 0xca81, lo: 0x87, hi: 0x87}, - {value: 0xcad1, lo: 0x88, hi: 0x88}, + {value: 0x2611, lo: 0x80, hi: 0x80}, + {value: 0x2619, lo: 0x81, hi: 0x81}, + {value: 0x2621, lo: 0x82, hi: 0x82}, + {value: 0x2629, lo: 0x83, hi: 0x83}, + {value: 0x2631, lo: 0x84, hi: 0x84}, + {value: 0x2639, lo: 0x85, hi: 0x85}, + {value: 0x2641, lo: 0x86, hi: 0x86}, + {value: 0x2649, lo: 0x87, hi: 0x87}, + {value: 0x2651, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0xcb21, lo: 0x90, hi: 0x90}, - {value: 0xcb41, lo: 0x91, hi: 0x91}, + {value: 0x2659, lo: 0x90, hi: 0x90}, + {value: 0x2661, lo: 0x91, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xbf}, - // Block 0x10b, offset 0x7d2 + // Block 0x10d, offset 0x7dc {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0x9f}, @@ -4674,29 +4829,29 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x10c, offset 0x7d9 + // Block 0x10e, offset 0x7e3 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x10d, offset 0x7dc + // Block 0x10f, offset 0x7e6 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x10e, offset 0x7e1 + // Block 0x110, offset 0x7eb {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x10f, offset 0x7e5 + // Block 0x111, offset 0x7ef {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x110, offset 0x7eb + // Block 0x112, offset 0x7f5 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, @@ -4704,17 +4859,17 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0x111, offset 0x7f2 + // Block 0x113, offset 0x7fc {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0x112, offset 0x7f6 + // Block 0x114, offset 0x800 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x113, offset 0x7fa + // Block 0x115, offset 0x804 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, @@ -4724,7 +4879,7 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xb5, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x114, offset 0x803 + // Block 0x116, offset 0x80d {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8f}, @@ -4732,109 +4887,74 @@ var idnaSparseValues = [2146]valueRange{ {value: 0x0040, lo: 0xa9, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x115, offset 0x80a + // Block 0x117, offset 0x814 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0x116, offset 0x80f + // Block 0x118, offset 0x819 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x92}, {value: 0x0040, lo: 0x93, hi: 0x93}, {value: 0x0018, lo: 0x94, hi: 0xbf}, - // Block 0x117, offset 0x813 + // Block 0x119, offset 0x81d {value: 0x0000, lo: 0x0d}, {value: 0x0018, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0xaf}, - {value: 0x1f41, lo: 0xb0, hi: 0xb0}, - {value: 0x00c9, lo: 0xb1, hi: 0xb1}, - {value: 0x0069, lo: 0xb2, hi: 0xb2}, - {value: 0x0079, lo: 0xb3, hi: 0xb3}, - {value: 0x1f51, lo: 0xb4, hi: 0xb4}, - {value: 0x1f61, lo: 0xb5, hi: 0xb5}, - {value: 0x1f71, lo: 0xb6, hi: 0xb6}, - {value: 0x1f81, lo: 0xb7, hi: 0xb7}, - {value: 0x1f91, lo: 0xb8, hi: 0xb8}, - {value: 0x1fa1, lo: 0xb9, hi: 0xb9}, + {value: 0x06e1, lo: 0xb0, hi: 0xb0}, + {value: 0x0049, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb2, hi: 0xb2}, + {value: 0x0031, lo: 0xb3, hi: 0xb3}, + {value: 0x06e9, lo: 0xb4, hi: 0xb4}, + {value: 0x06f1, lo: 0xb5, hi: 0xb5}, + {value: 0x06f9, lo: 0xb6, hi: 0xb6}, + {value: 0x0701, lo: 0xb7, hi: 0xb7}, + {value: 0x0709, lo: 0xb8, hi: 0xb8}, + {value: 0x0711, lo: 0xb9, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x118, offset 0x821 + // Block 0x11a, offset 0x82b {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x119, offset 0x824 + // Block 0x11b, offset 0x82e {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x11a, offset 0x827 + // Block 0x11c, offset 0x831 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x11b, offset 0x82b + // Block 0x11d, offset 0x835 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x11c, offset 0x82f + // Block 0x11e, offset 0x839 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x11d, offset 0x832 - {value: 0x0020, lo: 0x0f}, - {value: 0xdf21, lo: 0x80, hi: 0x89}, - {value: 0x8e35, lo: 0x8a, hi: 0x8a}, - {value: 0xe061, lo: 0x8b, hi: 0x9c}, - {value: 0x8e55, lo: 0x9d, hi: 0x9d}, - {value: 0xe2a1, lo: 0x9e, hi: 0xa2}, - {value: 0x8e75, lo: 0xa3, hi: 0xa3}, - {value: 0xe341, lo: 0xa4, hi: 0xab}, - {value: 0x7f0d, lo: 0xac, hi: 0xac}, - {value: 0xe441, lo: 0xad, hi: 0xaf}, - {value: 0x8e95, lo: 0xb0, hi: 0xb0}, - {value: 0xe4a1, lo: 0xb1, hi: 0xb6}, - {value: 0x8eb5, lo: 0xb7, hi: 0xb9}, - {value: 0xe561, lo: 0xba, hi: 0xba}, - {value: 0x8f15, lo: 0xbb, hi: 0xbb}, - {value: 0xe581, lo: 0xbc, hi: 0xbf}, - // Block 0x11e, offset 0x842 - {value: 0x0020, lo: 0x10}, - {value: 0x93b5, lo: 0x80, hi: 0x80}, - {value: 0xf101, lo: 0x81, hi: 0x86}, - {value: 0x93d5, lo: 0x87, hi: 0x8a}, - {value: 0xda61, lo: 0x8b, hi: 0x8b}, - {value: 0xf1c1, lo: 0x8c, hi: 0x96}, - {value: 0x9455, lo: 0x97, hi: 0x97}, - {value: 0xf321, lo: 0x98, hi: 0xa3}, - {value: 0x9475, lo: 0xa4, hi: 0xa6}, - {value: 0xf4a1, lo: 0xa7, hi: 0xaa}, - {value: 0x94d5, lo: 0xab, hi: 0xab}, - {value: 0xf521, lo: 0xac, hi: 0xac}, - {value: 0x94f5, lo: 0xad, hi: 0xad}, - {value: 0xf541, lo: 0xae, hi: 0xaf}, - {value: 0x9515, lo: 0xb0, hi: 0xb1}, - {value: 0xf581, lo: 0xb2, hi: 0xbe}, - {value: 0x2040, lo: 0xbf, hi: 0xbf}, - // Block 0x11f, offset 0x853 + // Block 0x11f, offset 0x83c {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0xbf}, - // Block 0x120, offset 0x856 + // Block 0x120, offset 0x83f {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0340, lo: 0x81, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x9f}, {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0x121, offset 0x85b + // Block 0x121, offset 0x844 {value: 0x0000, lo: 0x01}, {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0x122, offset 0x85d + // Block 0x122, offset 0x846 {value: 0x0000, lo: 0x01}, {value: 0x33c0, lo: 0x80, hi: 0xbf}, - // Block 0x123, offset 0x85f + // Block 0x123, offset 0x848 {value: 0x0000, lo: 0x02}, {value: 0x33c0, lo: 0x80, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, } -// Total table size 43370 bytes (42KiB); checksum: EBD909C0 +// Total table size 44953 bytes (43KiB); checksum: D51909DD diff --git a/terraform/providers/google/vendor/golang.org/x/net/idna/tables15.0.0.go b/terraform/providers/google/vendor/golang.org/x/net/idna/tables15.0.0.go new file mode 100644 index 0000000000..40033778f0 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -0,0 +1,5145 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 +// +build go1.21 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "15.0.0" + +var mappings string = "" + // Size: 6704 bytes + " ̈a ̄23 ́ ̧1o1⁄41⁄23⁄4i̇l·ʼnsdžⱥⱦhjrwy ̆ ̇ ̊ ̨ ̃ ̋lẍ́ ι; ̈́եւاٴوٴۇٴيٴक" + + "़ख़ग़ज़ड़ढ़फ़य़ড়ঢ়য়ਲ਼ਸ਼ਖ਼ਗ਼ਜ਼ਫ਼ଡ଼ଢ଼ําໍາຫນຫມགྷཌྷདྷབྷཛྷཀྵཱཱིུྲྀྲཱྀླྀླཱ" + + "ཱྀྀྒྷྜྷྡྷྦྷྫྷྐྵвдостъѣæbdeǝgikmnȣptuɐɑəɛɜŋɔɯvβγδφχρнɒcɕðfɟɡɥɨɩɪʝɭʟɱɰɲɳ" + + "ɴɵɸʂʃƫʉʊʋʌzʐʑʒθssάέήίόύώἀιἁιἂιἃιἄιἅιἆιἇιἠιἡιἢιἣιἤιἥιἦιἧιὠιὡιὢιὣιὤιὥιὦιὧ" + + "ιὰιαιάιᾶιι ̈͂ὴιηιήιῆι ̓̀ ̓́ ̓͂ΐ ̔̀ ̔́ ̔͂ΰ ̈̀`ὼιωιώιῶι′′′′′‵‵‵‵‵!!???!!?" + + "′′′′0456789+=()rsħnoqsmtmωåאבגדπ1⁄71⁄91⁄101⁄32⁄31⁄52⁄53⁄54⁄51⁄65⁄61⁄83" + + "⁄85⁄87⁄81⁄iiivviviiiixxi0⁄3∫∫∫∫∫∮∮∮∮∮1011121314151617181920(10)(11)(12" + + ")(13)(14)(15)(16)(17)(18)(19)(20)∫∫∫∫==⫝̸ɫɽȿɀ. ゙ ゚よりコト(ᄀ)(ᄂ)(ᄃ)(ᄅ)(ᄆ)(ᄇ)" + + "(ᄉ)(ᄋ)(ᄌ)(ᄎ)(ᄏ)(ᄐ)(ᄑ)(ᄒ)(가)(나)(다)(라)(마)(바)(사)(아)(자)(차)(카)(타)(파)(하)(주)(오전" + + ")(오후)(一)(二)(三)(四)(五)(六)(七)(八)(九)(十)(月)(火)(水)(木)(金)(土)(日)(株)(有)(社)(名)(特)(" + + "財)(祝)(労)(代)(呼)(学)(監)(企)(資)(協)(祭)(休)(自)(至)21222324252627282930313233343" + + "5참고주의3637383940414243444546474849501月2月3月4月5月6月7月8月9月10月11月12月hgev令和アパート" + + "アルファアンペアアールイニングインチウォンエスクードエーカーオンスオームカイリカラットカロリーガロンガンマギガギニーキュリーギルダーキロキロ" + + "グラムキロメートルキロワットグラムグラムトンクルゼイロクローネケースコルナコーポサイクルサンチームシリングセンチセントダースデシドルトンナノ" + + "ノットハイツパーセントパーツバーレルピアストルピクルピコビルファラッドフィートブッシェルフランヘクタールペソペニヒヘルツペンスページベータポ" + + "イントボルトホンポンドホールホーンマイクロマイルマッハマルクマンションミクロンミリミリバールメガメガトンメートルヤードヤールユアンリットルリ" + + "ラルピールーブルレムレントゲンワット0点1点2点3点4点5点6点7点8点9点10点11点12点13点14点15点16点17点18点19点20" + + "点21点22点23点24点daauovpcdmiu平成昭和大正明治株式会社panamakakbmbgbkcalpfnfmgkghzmldlk" + + "lfmnmmmcmkmm2m3m∕sm∕s2rad∕srad∕s2psnsmspvnvmvkvpwnwmwkwbqcccdc∕kgdbgyhah" + + "pinkkktlmlnlxphprsrsvwbv∕ma∕m1日2日3日4日5日6日7日8日9日10日11日12日13日14日15日16日17日1" + + "8日19日20日21日22日23日24日25日26日27日28日29日30日31日ьɦɬʞʇœʍ𤋮𢡊𢡄𣏕𥉉𥳐𧻓fffiflstմնմեմիվնմ" + + "խיִײַעהכלםרתשׁשׂשּׁשּׂאַאָאּבּגּדּהּוּזּטּיּךּכּלּמּנּסּףּפּצּקּרּשּתּו" + + "ֹבֿכֿפֿאלٱٻپڀٺٿٹڤڦڄڃچڇڍڌڎڈژڑکگڳڱںڻۀہھےۓڭۇۆۈۋۅۉېىئائەئوئۇئۆئۈئېئىیئجئحئم" + + "ئيبجبحبخبمبىبيتجتحتختمتىتيثجثمثىثيجحجمحجحمخجخحخمسجسحسخسمصحصمضجضحضخضمطحط" + + "مظمعجعمغجغمفجفحفخفمفىفيقحقمقىقيكاكجكحكخكلكمكىكيلجلحلخلملىليمجمحمخمممىمي" + + "نجنحنخنمنىنيهجهمهىهييجيحيخيميىييذٰرٰىٰ ٌّ ٍّ َّ ُّ ِّ ّٰئرئزئنبربزبنترت" + + "زتنثرثزثنمانرنزننيريزينئخئهبهتهصخلهنههٰيهثهسهشمشهـَّـُّـِّطىطيعىعيغىغيس" + + "ىسيشىشيحىحيجىجيخىخيصىصيضىضيشجشحشخشرسرصرضراًتجمتحجتحمتخمتمجتمحتمخجمححميح" + + "مىسحجسجحسجىسمحسمجسممصححصممشحمشجيشمخشممضحىضخمطمحطممطميعجمعممعمىغممغميغمى" + + "فخمقمحقمملحملحيلحىلججلخملمحمحجمحممحيمجحمجممخجمخممجخهمجهممنحمنحىنجمنجىنم" + + "ينمىيممبخيتجيتجىتخيتخىتميتمىجميجحىجمىسخىصحيشحيضحيلجيلمييحييجييميمميقمين" + + "حيعميكمينجحمخيلجمكممجحيحجيمجيفميبحيسخينجيصلےقلےاللهاكبرمحمدصلعمرسولعليه" + + "وسلمصلىصلى الله عليه وسلمجل جلالهریال,:!?_{}[]#&*-<>\\$%@ـًـَـُـِـّـْءآ" + + "أؤإئابةتثجحخدذرزسشصضطظعغفقكلمنهويلآلألإلا\x22'/^|~¢£¬¦¥ːˑʙɓʣꭦʥʤɖɗᶑɘɞʩɤɢ" + + "ɠʛʜɧʄʪʫꞎɮʎøɶɷɺɾʀʨʦꭧʧʈⱱʏʡʢʘǀǁǂ𝅗𝅥𝅘𝅥𝅘𝅥𝅮𝅘𝅥𝅯𝅘𝅥𝅰𝅘𝅥𝅱𝅘𝅥𝅲𝆹𝅥𝆺𝅥𝆹𝅥𝅮𝆺𝅥𝅮𝆹𝅥𝅯𝆺𝅥𝅯ıȷαεζηκ" + + "λμνξοστυψ∇∂ϝабгежзиклмпруфхцчшыэюꚉәіјөүӏґѕџҫꙑұٮڡٯ0,1,2,3,4,5,6,7,8,9,(a" + + ")(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)(m)(n)(o)(p)(q)(r)(s)(t)(u)(v)(w)(x)(y" + + ")(z)〔s〕wzhvsdppvwcmcmdmrdjほかココサ手字双デ二多解天交映無料前後再新初終生販声吹演投捕一三遊左中右指走打禁空合満有月申" + + "割営配〔本〕〔三〕〔二〕〔安〕〔点〕〔打〕〔盗〕〔勝〕〔敗〕得可丽丸乁你侮侻倂偺備僧像㒞免兔兤具㒹內冗冤仌冬况凵刃㓟刻剆剷㔕勇勉勤勺包匆北卉" + + "卑博即卽卿灰及叟叫叱吆咞吸呈周咢哶唐啓啣善喙喫喳嗂圖嘆圗噑噴切壮城埴堍型堲報墬売壷夆夢奢姬娛娧姘婦㛮嬈嬾寃寘寧寳寿将尢㞁屠屮峀岍嵃嵮嵫嵼巡巢" + + "㠯巽帨帽幩㡢㡼庰庳庶廊廾舁弢㣇形彫㣣徚忍志忹悁㤺㤜悔惇慈慌慎慺憎憲憤憯懞懲懶成戛扝抱拔捐挽拼捨掃揤搢揅掩㨮摩摾撝摷㩬敏敬旣書晉㬙暑㬈㫤冒冕最" + + "暜肭䏙朗望朡杞杓㭉柺枅桒梅梎栟椔㮝楂榣槪檨櫛㰘次歔㱎歲殟殺殻汎沿泍汧洖派海流浩浸涅洴港湮㴳滋滇淹潮濆瀹瀞瀛㶖灊災灷炭煅熜爨爵牐犀犕獺王㺬玥㺸" + + "瑇瑜瑱璅瓊㼛甤甾異瘐㿼䀈直眞真睊䀹瞋䁆䂖硎碌磌䃣祖福秫䄯穀穊穏䈂篆築䈧糒䊠糨糣紀絣䌁緇縂繅䌴䍙罺羕翺者聠聰䏕育脃䐋脾媵舄辞䑫芑芋芝劳花芳芽苦" + + "若茝荣莭茣莽菧著荓菊菌菜䔫蓱蓳蔖蕤䕝䕡䕫虐虜虧虩蚩蚈蜎蛢蝹蜨蝫螆蟡蠁䗹衠衣裗裞䘵裺㒻䚾䛇誠諭變豕貫賁贛起跋趼跰軔輸邔郱鄑鄛鈸鋗鋘鉼鏹鐕開䦕閷" + + "䧦雃嶲霣䩮䩶韠䪲頋頩飢䬳餩馧駂駾䯎鬒鱀鳽䳎䳭鵧䳸麻䵖黹黾鼅鼏鼖鼻" + +var mappingIndex = []uint16{ // 1729 elements + // Entry 0 - 3F + 0x0000, 0x0000, 0x0001, 0x0004, 0x0005, 0x0008, 0x0009, 0x000a, + 0x000d, 0x0010, 0x0011, 0x0012, 0x0017, 0x001c, 0x0021, 0x0024, + 0x0027, 0x002a, 0x002b, 0x002e, 0x0031, 0x0034, 0x0035, 0x0036, + 0x0037, 0x0038, 0x0039, 0x003c, 0x003f, 0x0042, 0x0045, 0x0048, + 0x004b, 0x004c, 0x004d, 0x0051, 0x0054, 0x0055, 0x005a, 0x005e, + 0x0062, 0x0066, 0x006a, 0x006e, 0x0074, 0x007a, 0x0080, 0x0086, + 0x008c, 0x0092, 0x0098, 0x009e, 0x00a4, 0x00aa, 0x00b0, 0x00b6, + 0x00bc, 0x00c2, 0x00c8, 0x00ce, 0x00d4, 0x00da, 0x00e0, 0x00e6, + // Entry 40 - 7F + 0x00ec, 0x00f2, 0x00f8, 0x00fe, 0x0104, 0x010a, 0x0110, 0x0116, + 0x011c, 0x0122, 0x0128, 0x012e, 0x0137, 0x013d, 0x0146, 0x014c, + 0x0152, 0x0158, 0x015e, 0x0164, 0x016a, 0x0170, 0x0172, 0x0174, + 0x0176, 0x0178, 0x017a, 0x017c, 0x017e, 0x0180, 0x0181, 0x0182, + 0x0183, 0x0185, 0x0186, 0x0187, 0x0188, 0x0189, 0x018a, 0x018c, + 0x018d, 0x018e, 0x018f, 0x0191, 0x0193, 0x0195, 0x0197, 0x0199, + 0x019b, 0x019d, 0x019f, 0x01a0, 0x01a2, 0x01a4, 0x01a6, 0x01a8, + 0x01aa, 0x01ac, 0x01ae, 0x01b0, 0x01b1, 0x01b3, 0x01b5, 0x01b6, + // Entry 80 - BF + 0x01b8, 0x01ba, 0x01bc, 0x01be, 0x01c0, 0x01c2, 0x01c4, 0x01c6, + 0x01c8, 0x01ca, 0x01cc, 0x01ce, 0x01d0, 0x01d2, 0x01d4, 0x01d6, + 0x01d8, 0x01da, 0x01dc, 0x01de, 0x01e0, 0x01e2, 0x01e4, 0x01e5, + 0x01e7, 0x01e9, 0x01eb, 0x01ed, 0x01ef, 0x01f1, 0x01f3, 0x01f5, + 0x01f7, 0x01f9, 0x01fb, 0x01fd, 0x0202, 0x0207, 0x020c, 0x0211, + 0x0216, 0x021b, 0x0220, 0x0225, 0x022a, 0x022f, 0x0234, 0x0239, + 0x023e, 0x0243, 0x0248, 0x024d, 0x0252, 0x0257, 0x025c, 0x0261, + 0x0266, 0x026b, 0x0270, 0x0275, 0x027a, 0x027e, 0x0282, 0x0287, + // Entry C0 - FF + 0x0289, 0x028e, 0x0293, 0x0297, 0x029b, 0x02a0, 0x02a5, 0x02aa, + 0x02af, 0x02b1, 0x02b6, 0x02bb, 0x02c0, 0x02c2, 0x02c7, 0x02c8, + 0x02cd, 0x02d1, 0x02d5, 0x02da, 0x02e0, 0x02e9, 0x02ef, 0x02f8, + 0x02fa, 0x02fc, 0x02fe, 0x0300, 0x030c, 0x030d, 0x030e, 0x030f, + 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, + 0x0319, 0x031b, 0x031d, 0x031e, 0x0320, 0x0322, 0x0324, 0x0326, + 0x0328, 0x032a, 0x032c, 0x032e, 0x0330, 0x0335, 0x033a, 0x0340, + 0x0345, 0x034a, 0x034f, 0x0354, 0x0359, 0x035e, 0x0363, 0x0368, + // Entry 100 - 13F + 0x036d, 0x0372, 0x0377, 0x037c, 0x0380, 0x0382, 0x0384, 0x0386, + 0x038a, 0x038c, 0x038e, 0x0393, 0x0399, 0x03a2, 0x03a8, 0x03b1, + 0x03b3, 0x03b5, 0x03b7, 0x03b9, 0x03bb, 0x03bd, 0x03bf, 0x03c1, + 0x03c3, 0x03c5, 0x03c7, 0x03cb, 0x03cf, 0x03d3, 0x03d7, 0x03db, + 0x03df, 0x03e3, 0x03e7, 0x03eb, 0x03ef, 0x03f3, 0x03ff, 0x0401, + 0x0406, 0x0408, 0x040a, 0x040c, 0x040e, 0x040f, 0x0413, 0x0417, + 0x041d, 0x0423, 0x0428, 0x042d, 0x0432, 0x0437, 0x043c, 0x0441, + 0x0446, 0x044b, 0x0450, 0x0455, 0x045a, 0x045f, 0x0464, 0x0469, + // Entry 140 - 17F + 0x046e, 0x0473, 0x0478, 0x047d, 0x0482, 0x0487, 0x048c, 0x0491, + 0x0496, 0x049b, 0x04a0, 0x04a5, 0x04aa, 0x04af, 0x04b4, 0x04bc, + 0x04c4, 0x04c9, 0x04ce, 0x04d3, 0x04d8, 0x04dd, 0x04e2, 0x04e7, + 0x04ec, 0x04f1, 0x04f6, 0x04fb, 0x0500, 0x0505, 0x050a, 0x050f, + 0x0514, 0x0519, 0x051e, 0x0523, 0x0528, 0x052d, 0x0532, 0x0537, + 0x053c, 0x0541, 0x0546, 0x054b, 0x0550, 0x0555, 0x055a, 0x055f, + 0x0564, 0x0569, 0x056e, 0x0573, 0x0578, 0x057a, 0x057c, 0x057e, + 0x0580, 0x0582, 0x0584, 0x0586, 0x0588, 0x058a, 0x058c, 0x058e, + // Entry 180 - 1BF + 0x0590, 0x0592, 0x0594, 0x0596, 0x059c, 0x05a2, 0x05a4, 0x05a6, + 0x05a8, 0x05aa, 0x05ac, 0x05ae, 0x05b0, 0x05b2, 0x05b4, 0x05b6, + 0x05b8, 0x05ba, 0x05bc, 0x05be, 0x05c0, 0x05c4, 0x05c8, 0x05cc, + 0x05d0, 0x05d4, 0x05d8, 0x05dc, 0x05e0, 0x05e4, 0x05e9, 0x05ee, + 0x05f3, 0x05f5, 0x05f7, 0x05fd, 0x0609, 0x0615, 0x0621, 0x062a, + 0x0636, 0x063f, 0x0648, 0x0657, 0x0663, 0x066c, 0x0675, 0x067e, + 0x068a, 0x0696, 0x069f, 0x06a8, 0x06ae, 0x06b7, 0x06c3, 0x06cf, + 0x06d5, 0x06e4, 0x06f6, 0x0705, 0x070e, 0x071d, 0x072c, 0x0738, + // Entry 1C0 - 1FF + 0x0741, 0x074a, 0x0753, 0x075f, 0x076e, 0x077a, 0x0783, 0x078c, + 0x0795, 0x079b, 0x07a1, 0x07a7, 0x07ad, 0x07b6, 0x07bf, 0x07ce, + 0x07d7, 0x07e3, 0x07f2, 0x07fb, 0x0801, 0x0807, 0x0816, 0x0822, + 0x0831, 0x083a, 0x0849, 0x084f, 0x0858, 0x0861, 0x086a, 0x0873, + 0x087c, 0x0888, 0x0891, 0x0897, 0x08a0, 0x08a9, 0x08b2, 0x08be, + 0x08c7, 0x08d0, 0x08d9, 0x08e8, 0x08f4, 0x08fa, 0x0909, 0x090f, + 0x091b, 0x0927, 0x0930, 0x0939, 0x0942, 0x094e, 0x0954, 0x095d, + 0x0969, 0x096f, 0x097e, 0x0987, 0x098b, 0x098f, 0x0993, 0x0997, + // Entry 200 - 23F + 0x099b, 0x099f, 0x09a3, 0x09a7, 0x09ab, 0x09af, 0x09b4, 0x09b9, + 0x09be, 0x09c3, 0x09c8, 0x09cd, 0x09d2, 0x09d7, 0x09dc, 0x09e1, + 0x09e6, 0x09eb, 0x09f0, 0x09f5, 0x09fa, 0x09fc, 0x09fe, 0x0a00, + 0x0a02, 0x0a04, 0x0a06, 0x0a0c, 0x0a12, 0x0a18, 0x0a1e, 0x0a2a, + 0x0a2c, 0x0a2e, 0x0a30, 0x0a32, 0x0a34, 0x0a36, 0x0a38, 0x0a3c, + 0x0a3e, 0x0a40, 0x0a42, 0x0a44, 0x0a46, 0x0a48, 0x0a4a, 0x0a4c, + 0x0a4e, 0x0a50, 0x0a52, 0x0a54, 0x0a56, 0x0a58, 0x0a5a, 0x0a5f, + 0x0a65, 0x0a6c, 0x0a74, 0x0a76, 0x0a78, 0x0a7a, 0x0a7c, 0x0a7e, + // Entry 240 - 27F + 0x0a80, 0x0a82, 0x0a84, 0x0a86, 0x0a88, 0x0a8a, 0x0a8c, 0x0a8e, + 0x0a90, 0x0a96, 0x0a98, 0x0a9a, 0x0a9c, 0x0a9e, 0x0aa0, 0x0aa2, + 0x0aa4, 0x0aa6, 0x0aa8, 0x0aaa, 0x0aac, 0x0aae, 0x0ab0, 0x0ab2, + 0x0ab4, 0x0ab9, 0x0abe, 0x0ac2, 0x0ac6, 0x0aca, 0x0ace, 0x0ad2, + 0x0ad6, 0x0ada, 0x0ade, 0x0ae2, 0x0ae7, 0x0aec, 0x0af1, 0x0af6, + 0x0afb, 0x0b00, 0x0b05, 0x0b0a, 0x0b0f, 0x0b14, 0x0b19, 0x0b1e, + 0x0b23, 0x0b28, 0x0b2d, 0x0b32, 0x0b37, 0x0b3c, 0x0b41, 0x0b46, + 0x0b4b, 0x0b50, 0x0b52, 0x0b54, 0x0b56, 0x0b58, 0x0b5a, 0x0b5c, + // Entry 280 - 2BF + 0x0b5e, 0x0b62, 0x0b66, 0x0b6a, 0x0b6e, 0x0b72, 0x0b76, 0x0b7a, + 0x0b7c, 0x0b7e, 0x0b80, 0x0b82, 0x0b86, 0x0b8a, 0x0b8e, 0x0b92, + 0x0b96, 0x0b9a, 0x0b9e, 0x0ba0, 0x0ba2, 0x0ba4, 0x0ba6, 0x0ba8, + 0x0baa, 0x0bac, 0x0bb0, 0x0bb4, 0x0bba, 0x0bc0, 0x0bc4, 0x0bc8, + 0x0bcc, 0x0bd0, 0x0bd4, 0x0bd8, 0x0bdc, 0x0be0, 0x0be4, 0x0be8, + 0x0bec, 0x0bf0, 0x0bf4, 0x0bf8, 0x0bfc, 0x0c00, 0x0c04, 0x0c08, + 0x0c0c, 0x0c10, 0x0c14, 0x0c18, 0x0c1c, 0x0c20, 0x0c24, 0x0c28, + 0x0c2c, 0x0c30, 0x0c34, 0x0c36, 0x0c38, 0x0c3a, 0x0c3c, 0x0c3e, + // Entry 2C0 - 2FF + 0x0c40, 0x0c42, 0x0c44, 0x0c46, 0x0c48, 0x0c4a, 0x0c4c, 0x0c4e, + 0x0c50, 0x0c52, 0x0c54, 0x0c56, 0x0c58, 0x0c5a, 0x0c5c, 0x0c5e, + 0x0c60, 0x0c62, 0x0c64, 0x0c66, 0x0c68, 0x0c6a, 0x0c6c, 0x0c6e, + 0x0c70, 0x0c72, 0x0c74, 0x0c76, 0x0c78, 0x0c7a, 0x0c7c, 0x0c7e, + 0x0c80, 0x0c82, 0x0c86, 0x0c8a, 0x0c8e, 0x0c92, 0x0c96, 0x0c9a, + 0x0c9e, 0x0ca2, 0x0ca4, 0x0ca8, 0x0cac, 0x0cb0, 0x0cb4, 0x0cb8, + 0x0cbc, 0x0cc0, 0x0cc4, 0x0cc8, 0x0ccc, 0x0cd0, 0x0cd4, 0x0cd8, + 0x0cdc, 0x0ce0, 0x0ce4, 0x0ce8, 0x0cec, 0x0cf0, 0x0cf4, 0x0cf8, + // Entry 300 - 33F + 0x0cfc, 0x0d00, 0x0d04, 0x0d08, 0x0d0c, 0x0d10, 0x0d14, 0x0d18, + 0x0d1c, 0x0d20, 0x0d24, 0x0d28, 0x0d2c, 0x0d30, 0x0d34, 0x0d38, + 0x0d3c, 0x0d40, 0x0d44, 0x0d48, 0x0d4c, 0x0d50, 0x0d54, 0x0d58, + 0x0d5c, 0x0d60, 0x0d64, 0x0d68, 0x0d6c, 0x0d70, 0x0d74, 0x0d78, + 0x0d7c, 0x0d80, 0x0d84, 0x0d88, 0x0d8c, 0x0d90, 0x0d94, 0x0d98, + 0x0d9c, 0x0da0, 0x0da4, 0x0da8, 0x0dac, 0x0db0, 0x0db4, 0x0db8, + 0x0dbc, 0x0dc0, 0x0dc4, 0x0dc8, 0x0dcc, 0x0dd0, 0x0dd4, 0x0dd8, + 0x0ddc, 0x0de0, 0x0de4, 0x0de8, 0x0dec, 0x0df0, 0x0df4, 0x0df8, + // Entry 340 - 37F + 0x0dfc, 0x0e00, 0x0e04, 0x0e08, 0x0e0c, 0x0e10, 0x0e14, 0x0e18, + 0x0e1d, 0x0e22, 0x0e27, 0x0e2c, 0x0e31, 0x0e36, 0x0e3a, 0x0e3e, + 0x0e42, 0x0e46, 0x0e4a, 0x0e4e, 0x0e52, 0x0e56, 0x0e5a, 0x0e5e, + 0x0e62, 0x0e66, 0x0e6a, 0x0e6e, 0x0e72, 0x0e76, 0x0e7a, 0x0e7e, + 0x0e82, 0x0e86, 0x0e8a, 0x0e8e, 0x0e92, 0x0e96, 0x0e9a, 0x0e9e, + 0x0ea2, 0x0ea6, 0x0eaa, 0x0eae, 0x0eb2, 0x0eb6, 0x0ebc, 0x0ec2, + 0x0ec8, 0x0ecc, 0x0ed0, 0x0ed4, 0x0ed8, 0x0edc, 0x0ee0, 0x0ee4, + 0x0ee8, 0x0eec, 0x0ef0, 0x0ef4, 0x0ef8, 0x0efc, 0x0f00, 0x0f04, + // Entry 380 - 3BF + 0x0f08, 0x0f0c, 0x0f10, 0x0f14, 0x0f18, 0x0f1c, 0x0f20, 0x0f24, + 0x0f28, 0x0f2c, 0x0f30, 0x0f34, 0x0f38, 0x0f3e, 0x0f44, 0x0f4a, + 0x0f50, 0x0f56, 0x0f5c, 0x0f62, 0x0f68, 0x0f6e, 0x0f74, 0x0f7a, + 0x0f80, 0x0f86, 0x0f8c, 0x0f92, 0x0f98, 0x0f9e, 0x0fa4, 0x0faa, + 0x0fb0, 0x0fb6, 0x0fbc, 0x0fc2, 0x0fc8, 0x0fce, 0x0fd4, 0x0fda, + 0x0fe0, 0x0fe6, 0x0fec, 0x0ff2, 0x0ff8, 0x0ffe, 0x1004, 0x100a, + 0x1010, 0x1016, 0x101c, 0x1022, 0x1028, 0x102e, 0x1034, 0x103a, + 0x1040, 0x1046, 0x104c, 0x1052, 0x1058, 0x105e, 0x1064, 0x106a, + // Entry 3C0 - 3FF + 0x1070, 0x1076, 0x107c, 0x1082, 0x1088, 0x108e, 0x1094, 0x109a, + 0x10a0, 0x10a6, 0x10ac, 0x10b2, 0x10b8, 0x10be, 0x10c4, 0x10ca, + 0x10d0, 0x10d6, 0x10dc, 0x10e2, 0x10e8, 0x10ee, 0x10f4, 0x10fa, + 0x1100, 0x1106, 0x110c, 0x1112, 0x1118, 0x111e, 0x1124, 0x112a, + 0x1130, 0x1136, 0x113c, 0x1142, 0x1148, 0x114e, 0x1154, 0x115a, + 0x1160, 0x1166, 0x116c, 0x1172, 0x1178, 0x1180, 0x1188, 0x1190, + 0x1198, 0x11a0, 0x11a8, 0x11b0, 0x11b6, 0x11d7, 0x11e6, 0x11ee, + 0x11ef, 0x11f0, 0x11f1, 0x11f2, 0x11f3, 0x11f4, 0x11f5, 0x11f6, + // Entry 400 - 43F + 0x11f7, 0x11f8, 0x11f9, 0x11fa, 0x11fb, 0x11fc, 0x11fd, 0x11fe, + 0x11ff, 0x1200, 0x1201, 0x1205, 0x1209, 0x120d, 0x1211, 0x1215, + 0x1219, 0x121b, 0x121d, 0x121f, 0x1221, 0x1223, 0x1225, 0x1227, + 0x1229, 0x122b, 0x122d, 0x122f, 0x1231, 0x1233, 0x1235, 0x1237, + 0x1239, 0x123b, 0x123d, 0x123f, 0x1241, 0x1243, 0x1245, 0x1247, + 0x1249, 0x124b, 0x124d, 0x124f, 0x1251, 0x1253, 0x1255, 0x1257, + 0x1259, 0x125b, 0x125d, 0x125f, 0x1263, 0x1267, 0x126b, 0x126f, + 0x1270, 0x1271, 0x1272, 0x1273, 0x1274, 0x1275, 0x1277, 0x1279, + // Entry 440 - 47F + 0x127b, 0x127d, 0x127f, 0x1281, 0x1283, 0x1285, 0x1287, 0x1289, + 0x128c, 0x128e, 0x1290, 0x1292, 0x1294, 0x1297, 0x1299, 0x129b, + 0x129d, 0x129f, 0x12a1, 0x12a3, 0x12a5, 0x12a7, 0x12a9, 0x12ab, + 0x12ad, 0x12af, 0x12b2, 0x12b4, 0x12b6, 0x12b8, 0x12ba, 0x12bc, + 0x12be, 0x12c0, 0x12c2, 0x12c4, 0x12c6, 0x12c9, 0x12cb, 0x12cd, + 0x12d0, 0x12d2, 0x12d4, 0x12d6, 0x12d8, 0x12da, 0x12dc, 0x12de, + 0x12e6, 0x12ee, 0x12fa, 0x1306, 0x1312, 0x131e, 0x132a, 0x1332, + 0x133a, 0x1346, 0x1352, 0x135e, 0x136a, 0x136c, 0x136e, 0x1370, + // Entry 480 - 4BF + 0x1372, 0x1374, 0x1376, 0x1378, 0x137a, 0x137c, 0x137e, 0x1380, + 0x1382, 0x1384, 0x1386, 0x1388, 0x138a, 0x138d, 0x1390, 0x1392, + 0x1394, 0x1396, 0x1398, 0x139a, 0x139c, 0x139e, 0x13a0, 0x13a2, + 0x13a4, 0x13a6, 0x13a8, 0x13aa, 0x13ac, 0x13ae, 0x13b0, 0x13b2, + 0x13b4, 0x13b6, 0x13b8, 0x13ba, 0x13bc, 0x13bf, 0x13c1, 0x13c3, + 0x13c5, 0x13c7, 0x13c9, 0x13cb, 0x13cd, 0x13cf, 0x13d1, 0x13d3, + 0x13d6, 0x13d8, 0x13da, 0x13dc, 0x13de, 0x13e0, 0x13e2, 0x13e4, + 0x13e6, 0x13e8, 0x13ea, 0x13ec, 0x13ee, 0x13f0, 0x13f2, 0x13f5, + // Entry 4C0 - 4FF + 0x13f8, 0x13fb, 0x13fe, 0x1401, 0x1404, 0x1407, 0x140a, 0x140d, + 0x1410, 0x1413, 0x1416, 0x1419, 0x141c, 0x141f, 0x1422, 0x1425, + 0x1428, 0x142b, 0x142e, 0x1431, 0x1434, 0x1437, 0x143a, 0x143d, + 0x1440, 0x1447, 0x1449, 0x144b, 0x144d, 0x1450, 0x1452, 0x1454, + 0x1456, 0x1458, 0x145a, 0x1460, 0x1466, 0x1469, 0x146c, 0x146f, + 0x1472, 0x1475, 0x1478, 0x147b, 0x147e, 0x1481, 0x1484, 0x1487, + 0x148a, 0x148d, 0x1490, 0x1493, 0x1496, 0x1499, 0x149c, 0x149f, + 0x14a2, 0x14a5, 0x14a8, 0x14ab, 0x14ae, 0x14b1, 0x14b4, 0x14b7, + // Entry 500 - 53F + 0x14ba, 0x14bd, 0x14c0, 0x14c3, 0x14c6, 0x14c9, 0x14cc, 0x14cf, + 0x14d2, 0x14d5, 0x14d8, 0x14db, 0x14de, 0x14e1, 0x14e4, 0x14e7, + 0x14ea, 0x14ed, 0x14f6, 0x14ff, 0x1508, 0x1511, 0x151a, 0x1523, + 0x152c, 0x1535, 0x153e, 0x1541, 0x1544, 0x1547, 0x154a, 0x154d, + 0x1550, 0x1553, 0x1556, 0x1559, 0x155c, 0x155f, 0x1562, 0x1565, + 0x1568, 0x156b, 0x156e, 0x1571, 0x1574, 0x1577, 0x157a, 0x157d, + 0x1580, 0x1583, 0x1586, 0x1589, 0x158c, 0x158f, 0x1592, 0x1595, + 0x1598, 0x159b, 0x159e, 0x15a1, 0x15a4, 0x15a7, 0x15aa, 0x15ad, + // Entry 540 - 57F + 0x15b0, 0x15b3, 0x15b6, 0x15b9, 0x15bc, 0x15bf, 0x15c2, 0x15c5, + 0x15c8, 0x15cb, 0x15ce, 0x15d1, 0x15d4, 0x15d7, 0x15da, 0x15dd, + 0x15e0, 0x15e3, 0x15e6, 0x15e9, 0x15ec, 0x15ef, 0x15f2, 0x15f5, + 0x15f8, 0x15fb, 0x15fe, 0x1601, 0x1604, 0x1607, 0x160a, 0x160d, + 0x1610, 0x1613, 0x1616, 0x1619, 0x161c, 0x161f, 0x1622, 0x1625, + 0x1628, 0x162b, 0x162e, 0x1631, 0x1634, 0x1637, 0x163a, 0x163d, + 0x1640, 0x1643, 0x1646, 0x1649, 0x164c, 0x164f, 0x1652, 0x1655, + 0x1658, 0x165b, 0x165e, 0x1661, 0x1664, 0x1667, 0x166a, 0x166d, + // Entry 580 - 5BF + 0x1670, 0x1673, 0x1676, 0x1679, 0x167c, 0x167f, 0x1682, 0x1685, + 0x1688, 0x168b, 0x168e, 0x1691, 0x1694, 0x1697, 0x169a, 0x169d, + 0x16a0, 0x16a3, 0x16a6, 0x16a9, 0x16ac, 0x16af, 0x16b2, 0x16b5, + 0x16b8, 0x16bb, 0x16be, 0x16c1, 0x16c4, 0x16c7, 0x16ca, 0x16cd, + 0x16d0, 0x16d3, 0x16d6, 0x16d9, 0x16dc, 0x16df, 0x16e2, 0x16e5, + 0x16e8, 0x16eb, 0x16ee, 0x16f1, 0x16f4, 0x16f7, 0x16fa, 0x16fd, + 0x1700, 0x1703, 0x1706, 0x1709, 0x170c, 0x170f, 0x1712, 0x1715, + 0x1718, 0x171b, 0x171e, 0x1721, 0x1724, 0x1727, 0x172a, 0x172d, + // Entry 5C0 - 5FF + 0x1730, 0x1733, 0x1736, 0x1739, 0x173c, 0x173f, 0x1742, 0x1745, + 0x1748, 0x174b, 0x174e, 0x1751, 0x1754, 0x1757, 0x175a, 0x175d, + 0x1760, 0x1763, 0x1766, 0x1769, 0x176c, 0x176f, 0x1772, 0x1775, + 0x1778, 0x177b, 0x177e, 0x1781, 0x1784, 0x1787, 0x178a, 0x178d, + 0x1790, 0x1793, 0x1796, 0x1799, 0x179c, 0x179f, 0x17a2, 0x17a5, + 0x17a8, 0x17ab, 0x17ae, 0x17b1, 0x17b4, 0x17b7, 0x17ba, 0x17bd, + 0x17c0, 0x17c3, 0x17c6, 0x17c9, 0x17cc, 0x17cf, 0x17d2, 0x17d5, + 0x17d8, 0x17db, 0x17de, 0x17e1, 0x17e4, 0x17e7, 0x17ea, 0x17ed, + // Entry 600 - 63F + 0x17f0, 0x17f3, 0x17f6, 0x17f9, 0x17fc, 0x17ff, 0x1802, 0x1805, + 0x1808, 0x180b, 0x180e, 0x1811, 0x1814, 0x1817, 0x181a, 0x181d, + 0x1820, 0x1823, 0x1826, 0x1829, 0x182c, 0x182f, 0x1832, 0x1835, + 0x1838, 0x183b, 0x183e, 0x1841, 0x1844, 0x1847, 0x184a, 0x184d, + 0x1850, 0x1853, 0x1856, 0x1859, 0x185c, 0x185f, 0x1862, 0x1865, + 0x1868, 0x186b, 0x186e, 0x1871, 0x1874, 0x1877, 0x187a, 0x187d, + 0x1880, 0x1883, 0x1886, 0x1889, 0x188c, 0x188f, 0x1892, 0x1895, + 0x1898, 0x189b, 0x189e, 0x18a1, 0x18a4, 0x18a7, 0x18aa, 0x18ad, + // Entry 640 - 67F + 0x18b0, 0x18b3, 0x18b6, 0x18b9, 0x18bc, 0x18bf, 0x18c2, 0x18c5, + 0x18c8, 0x18cb, 0x18ce, 0x18d1, 0x18d4, 0x18d7, 0x18da, 0x18dd, + 0x18e0, 0x18e3, 0x18e6, 0x18e9, 0x18ec, 0x18ef, 0x18f2, 0x18f5, + 0x18f8, 0x18fb, 0x18fe, 0x1901, 0x1904, 0x1907, 0x190a, 0x190d, + 0x1910, 0x1913, 0x1916, 0x1919, 0x191c, 0x191f, 0x1922, 0x1925, + 0x1928, 0x192b, 0x192e, 0x1931, 0x1934, 0x1937, 0x193a, 0x193d, + 0x1940, 0x1943, 0x1946, 0x1949, 0x194c, 0x194f, 0x1952, 0x1955, + 0x1958, 0x195b, 0x195e, 0x1961, 0x1964, 0x1967, 0x196a, 0x196d, + // Entry 680 - 6BF + 0x1970, 0x1973, 0x1976, 0x1979, 0x197c, 0x197f, 0x1982, 0x1985, + 0x1988, 0x198b, 0x198e, 0x1991, 0x1994, 0x1997, 0x199a, 0x199d, + 0x19a0, 0x19a3, 0x19a6, 0x19a9, 0x19ac, 0x19af, 0x19b2, 0x19b5, + 0x19b8, 0x19bb, 0x19be, 0x19c1, 0x19c4, 0x19c7, 0x19ca, 0x19cd, + 0x19d0, 0x19d3, 0x19d6, 0x19d9, 0x19dc, 0x19df, 0x19e2, 0x19e5, + 0x19e8, 0x19eb, 0x19ee, 0x19f1, 0x19f4, 0x19f7, 0x19fa, 0x19fd, + 0x1a00, 0x1a03, 0x1a06, 0x1a09, 0x1a0c, 0x1a0f, 0x1a12, 0x1a15, + 0x1a18, 0x1a1b, 0x1a1e, 0x1a21, 0x1a24, 0x1a27, 0x1a2a, 0x1a2d, + // Entry 6C0 - 6FF + 0x1a30, +} // Size: 3482 bytes + +var xorData string = "" + // Size: 4907 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" + + "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" + + "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" + + "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" + + "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" + + "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" + + "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" + + "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" + + "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" + + "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" + + "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" + + "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" + + "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" + + "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" + + "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" + + "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" + + "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" + + "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" + + "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" + + "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" + + "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" + + "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" + + "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" + + "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" + + "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" + + "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" + + "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" + + "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" + + "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" + + "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" + + "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" + + "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" + + "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" + + "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" + + "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" + + "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" + + "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" + + "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" + + "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" + + "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" + + "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" + + "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" + + "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" + + "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" + + "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" + + "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" + + "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" + + "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" + + "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." + + "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" + + "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" + + "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" + + "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" + + "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" + + "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" + + "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" + + "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" + + "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" + + "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" + + "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" + + "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" + + "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" + + ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" + + "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" + + "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" + + "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" + + "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" + + "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" + + "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" + + "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" + + "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" + + "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" + + "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" + + "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" + + ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x03'\x02\x03)\x02\x03+" + + "\x02\x03/\x02\x03\x19\x02\x03\x1b\x02\x03\x1f\x03\x0d\x22\x18\x03\x0d" + + "\x22\x1a\x03\x0d\x22'\x03\x0d\x22/\x03\x0d\x223\x03\x0d\x22$\x02\x01\x1e" + + "\x03\x0f$!\x03\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08" + + "\x18\x03\x0f\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$" + + "\x03\x0e\x0d)\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d" + + "\x03\x0d. \x03\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03" + + "\x0d\x0d\x0f\x03\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03" + + "\x0c\x09:\x03\x0e\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18" + + "\x03\x0c\x1f\x1c\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03" + + "\x0b<+\x03\x0b8\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d" + + "\x22&\x03\x0b\x1a\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03" + + "\x0a!\x1a\x03\x0a!7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03" + + "\x0a\x00 \x03\x0a\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a" + + "\x1b-\x03\x09-\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091" + + "\x1f\x03\x093\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(" + + "\x16\x03\x09\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!" + + "\x03\x09\x1a\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03" + + "\x08\x02*\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03" + + "\x070\x0c\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x06" + + "71\x03\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 " + + "\x1d\x03\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 31598 bytes (30.86 KiB). Checksum: d3118eda0d6b5360. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 133: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 133 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 135 blocks, 8640 entries, 17280 bytes +// The third block is the zero block. +var idnaValues = [8640]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x0012, 0xe9: 0x0018, + 0xea: 0x0019, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x0022, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0029, 0xf3: 0x0031, 0xf4: 0x003a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x0042, 0xf9: 0x0049, 0xfa: 0x0051, 0xfb: 0x0018, + 0xfc: 0x0059, 0xfd: 0x0061, 0xfe: 0x0069, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0071, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0079, + // Block 0x5, offset 0x140 + 0x140: 0x0079, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x0081, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x0089, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x0091, 0x1c5: 0x0091, + 0x1c6: 0x0091, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0099, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x00a1, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x00d2, 0x259: 0x00da, 0x25a: 0x00e2, 0x25b: 0x00ea, 0x25c: 0x00f2, 0x25d: 0x00fa, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0101, 0x262: 0x0089, 0x263: 0x0109, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0111, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x011a, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x0122, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x003a, 0x2c5: 0x012a, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0818, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0139, + 0x4b6: 0x0141, 0x4b7: 0x0149, 0x4b8: 0x0151, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0c08, 0x557: 0x0c08, + 0x558: 0x0c08, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0c08, 0x571: 0x0c08, 0x572: 0x0c08, 0x573: 0x0c08, 0x574: 0x0c08, 0x575: 0x0c08, + 0x576: 0x0c08, 0x577: 0x0c08, 0x578: 0x0c08, 0x579: 0x0c08, 0x57a: 0x0c08, 0x57b: 0x0c08, + 0x57c: 0x0c08, 0x57d: 0x0c08, 0x57e: 0x0c08, 0x57f: 0x0c08, + // Block 0x16, offset 0x580 + 0x580: 0x0c08, 0x581: 0x0c08, 0x582: 0x0c08, 0x583: 0x0808, 0x584: 0x0808, 0x585: 0x0808, + 0x586: 0x0a08, 0x587: 0x0808, 0x588: 0x0818, 0x589: 0x0a08, 0x58a: 0x0a08, 0x58b: 0x0a08, + 0x58c: 0x0a08, 0x58d: 0x0a08, 0x58e: 0x0c08, 0x58f: 0x0040, 0x590: 0x0840, 0x591: 0x0840, + 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x0040, + 0x598: 0x3308, 0x599: 0x3308, 0x59a: 0x3308, 0x59b: 0x3308, 0x59c: 0x3308, 0x59d: 0x3308, + 0x59e: 0x3308, 0x59f: 0x3308, 0x5a0: 0x0a08, 0x5a1: 0x0a08, 0x5a2: 0x0a08, 0x5a3: 0x0a08, + 0x5a4: 0x0a08, 0x5a5: 0x0a08, 0x5a6: 0x0a08, 0x5a7: 0x0a08, 0x5a8: 0x0a08, 0x5a9: 0x0a08, + 0x5aa: 0x0c08, 0x5ab: 0x0c08, 0x5ac: 0x0c08, 0x5ad: 0x0808, 0x5ae: 0x0c08, 0x5af: 0x0a08, + 0x5b0: 0x0a08, 0x5b1: 0x0c08, 0x5b2: 0x0c08, 0x5b3: 0x0a08, 0x5b4: 0x0a08, 0x5b5: 0x0a08, + 0x5b6: 0x0a08, 0x5b7: 0x0a08, 0x5b8: 0x0a08, 0x5b9: 0x0c08, 0x5ba: 0x0a08, 0x5bb: 0x0a08, + 0x5bc: 0x0a08, 0x5bd: 0x0a08, 0x5be: 0x0a08, 0x5bf: 0x0a08, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x3308, + 0x5c6: 0x3308, 0x5c7: 0x3308, 0x5c8: 0x3308, 0x5c9: 0x3008, 0x5ca: 0x3008, 0x5cb: 0x3008, + 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x3008, 0x5cf: 0x3008, 0x5d0: 0x0008, 0x5d1: 0x3308, + 0x5d2: 0x3308, 0x5d3: 0x3308, 0x5d4: 0x3308, 0x5d5: 0x3308, 0x5d6: 0x3308, 0x5d7: 0x3308, + 0x5d8: 0x0159, 0x5d9: 0x0161, 0x5da: 0x0169, 0x5db: 0x0171, 0x5dc: 0x0179, 0x5dd: 0x0181, + 0x5de: 0x0189, 0x5df: 0x0191, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308, + 0x5e4: 0x0018, 0x5e5: 0x0018, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0018, 0x5f1: 0x0008, 0x5f2: 0x0008, 0x5f3: 0x0008, 0x5f4: 0x0008, 0x5f5: 0x0008, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0008, 0x5fb: 0x0008, + 0x5fc: 0x0008, 0x5fd: 0x0008, 0x5fe: 0x0008, 0x5ff: 0x0008, + // Block 0x18, offset 0x600 + 0x600: 0x0008, 0x601: 0x3308, 0x602: 0x3008, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008, + 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0008, + 0x60c: 0x0008, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008, + 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008, + 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008, + 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0040, 0x634: 0x0040, 0x635: 0x0040, + 0x636: 0x0008, 0x637: 0x0008, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040, + 0x63c: 0x3308, 0x63d: 0x0008, 0x63e: 0x3008, 0x63f: 0x3008, + // Block 0x19, offset 0x640 + 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3308, 0x644: 0x3308, 0x645: 0x0040, + 0x646: 0x0040, 0x647: 0x3008, 0x648: 0x3008, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3008, + 0x64c: 0x3008, 0x64d: 0x3b08, 0x64e: 0x0008, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x3008, + 0x658: 0x0040, 0x659: 0x0040, 0x65a: 0x0040, 0x65b: 0x0040, 0x65c: 0x0199, 0x65d: 0x01a1, + 0x65e: 0x0040, 0x65f: 0x01a9, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x3308, 0x663: 0x3308, + 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0008, 0x672: 0x0018, 0x673: 0x0018, 0x674: 0x0018, 0x675: 0x0018, + 0x676: 0x0018, 0x677: 0x0018, 0x678: 0x0018, 0x679: 0x0018, 0x67a: 0x0018, 0x67b: 0x0018, + 0x67c: 0x0008, 0x67d: 0x0018, 0x67e: 0x3308, 0x67f: 0x0040, + // Block 0x1a, offset 0x680 + 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008, + 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0040, + 0x68c: 0x0040, 0x68d: 0x0040, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0040, + 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008, + 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008, + 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008, + 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x01b1, 0x6b4: 0x0040, 0x6b5: 0x0008, + 0x6b6: 0x01b9, 0x6b7: 0x0040, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x3308, 0x6bd: 0x0040, 0x6be: 0x3008, 0x6bf: 0x3008, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x0040, 0x6c4: 0x0040, 0x6c5: 0x0040, + 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x0040, 0x6ca: 0x0040, 0x6cb: 0x3308, + 0x6cc: 0x3308, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0040, 0x6d1: 0x3308, + 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040, + 0x6d8: 0x0040, 0x6d9: 0x01c1, 0x6da: 0x01c9, 0x6db: 0x01d1, 0x6dc: 0x0008, 0x6dd: 0x0040, + 0x6de: 0x01d9, 0x6df: 0x0040, 0x6e0: 0x0040, 0x6e1: 0x0040, 0x6e2: 0x0040, 0x6e3: 0x0040, + 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x3308, 0x6f1: 0x3308, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0008, 0x6f5: 0x3308, + 0x6f6: 0x0018, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0040, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040, + // Block 0x1c, offset 0x700 + 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008, + 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008, + 0x70c: 0x0008, 0x70d: 0x0008, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0008, + 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008, + 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008, + 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008, + 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008, + 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040, + 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3008, + // Block 0x1d, offset 0x740 + 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x3308, + 0x746: 0x0040, 0x747: 0x3308, 0x748: 0x3308, 0x749: 0x3008, 0x74a: 0x0040, 0x74b: 0x3008, + 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x0040, 0x757: 0x0040, + 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0040, 0x75d: 0x0040, + 0x75e: 0x0040, 0x75f: 0x0040, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308, + 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0018, 0x771: 0x0018, 0x772: 0x0040, 0x773: 0x0040, 0x774: 0x0040, 0x775: 0x0040, + 0x776: 0x0040, 0x777: 0x0040, 0x778: 0x0040, 0x779: 0x0008, 0x77a: 0x3308, 0x77b: 0x3308, + 0x77c: 0x3308, 0x77d: 0x3308, 0x77e: 0x3308, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x0040, 0x781: 0x3308, 0x782: 0x3008, 0x783: 0x3008, 0x784: 0x0040, 0x785: 0x0008, + 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0008, + 0x78c: 0x0008, 0x78d: 0x0040, 0x78e: 0x0040, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0008, 0x797: 0x0008, + 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0008, 0x79c: 0x0008, 0x79d: 0x0008, + 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x0008, 0x7a3: 0x0008, + 0x7a4: 0x0008, 0x7a5: 0x0008, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0040, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0008, 0x7b1: 0x0040, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0040, 0x7b5: 0x0008, + 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x3308, 0x7bd: 0x0008, 0x7be: 0x3008, 0x7bf: 0x3308, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x3008, 0x7c1: 0x3308, 0x7c2: 0x3308, 0x7c3: 0x3308, 0x7c4: 0x3308, 0x7c5: 0x0040, + 0x7c6: 0x0040, 0x7c7: 0x3008, 0x7c8: 0x3008, 0x7c9: 0x0040, 0x7ca: 0x0040, 0x7cb: 0x3008, + 0x7cc: 0x3008, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040, + 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x3008, + 0x7d8: 0x0040, 0x7d9: 0x0040, 0x7da: 0x0040, 0x7db: 0x0040, 0x7dc: 0x01e1, 0x7dd: 0x01e9, + 0x7de: 0x0040, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308, + 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0018, 0x7f1: 0x0008, 0x7f2: 0x0018, 0x7f3: 0x0018, 0x7f4: 0x0018, 0x7f5: 0x0018, + 0x7f6: 0x0018, 0x7f7: 0x0018, 0x7f8: 0x0040, 0x7f9: 0x0040, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x0040, 0x7ff: 0x0040, + // Block 0x20, offset 0x800 + 0x800: 0x0040, 0x801: 0x0040, 0x802: 0x3308, 0x803: 0x0008, 0x804: 0x0040, 0x805: 0x0008, + 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0040, + 0x80c: 0x0040, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040, + 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0040, 0x817: 0x0040, + 0x818: 0x0040, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0008, 0x81d: 0x0040, + 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0040, 0x821: 0x0040, 0x822: 0x0040, 0x823: 0x0008, + 0x824: 0x0008, 0x825: 0x0040, 0x826: 0x0040, 0x827: 0x0040, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0040, 0x82c: 0x0040, 0x82d: 0x0040, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0008, 0x835: 0x0008, + 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040, + 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x3008, 0x83f: 0x3008, + // Block 0x21, offset 0x840 + 0x840: 0x3308, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040, + 0x846: 0x3308, 0x847: 0x3308, 0x848: 0x3308, 0x849: 0x0040, 0x84a: 0x3308, 0x84b: 0x3308, + 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040, + 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3308, 0x856: 0x3308, 0x857: 0x0040, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0008, + 0x85e: 0x0040, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308, + 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0040, 0x871: 0x0040, 0x872: 0x0040, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040, + 0x876: 0x0040, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0018, 0x87b: 0x0018, + 0x87c: 0x0018, 0x87d: 0x0018, 0x87e: 0x0018, 0x87f: 0x0018, + // Block 0x22, offset 0x880 + 0x880: 0x0008, 0x881: 0x3308, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x0018, 0x885: 0x0008, + 0x886: 0x0008, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0008, 0x88a: 0x0008, 0x88b: 0x0008, + 0x88c: 0x0008, 0x88d: 0x0040, 0x88e: 0x0008, 0x88f: 0x0008, 0x890: 0x0008, 0x891: 0x0040, + 0x892: 0x0008, 0x893: 0x0008, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, + 0x898: 0x0008, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, + 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, + 0x8a4: 0x0008, 0x8a5: 0x0008, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0040, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0008, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0008, 0x8b4: 0x0040, 0x8b5: 0x0008, + 0x8b6: 0x0008, 0x8b7: 0x0008, 0x8b8: 0x0008, 0x8b9: 0x0008, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x3308, 0x8bd: 0x0008, 0x8be: 0x3008, 0x8bf: 0x3308, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3008, 0x8c2: 0x3008, 0x8c3: 0x3008, 0x8c4: 0x3008, 0x8c5: 0x0040, + 0x8c6: 0x3308, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3308, 0x8cd: 0x3b08, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0040, 0x8d5: 0x3008, 0x8d6: 0x3008, 0x8d7: 0x0040, + 0x8d8: 0x0040, 0x8d9: 0x0040, 0x8da: 0x0040, 0x8db: 0x0040, 0x8dc: 0x0040, 0x8dd: 0x0008, + 0x8de: 0x0008, 0x8df: 0x0040, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0040, 0x8f1: 0x0008, 0x8f2: 0x0008, 0x8f3: 0x3008, 0x8f4: 0x0040, 0x8f5: 0x0040, + 0x8f6: 0x0040, 0x8f7: 0x0040, 0x8f8: 0x0040, 0x8f9: 0x0040, 0x8fa: 0x0040, 0x8fb: 0x0040, + 0x8fc: 0x0040, 0x8fd: 0x0040, 0x8fe: 0x0040, 0x8ff: 0x0040, + // Block 0x24, offset 0x900 + 0x900: 0x3008, 0x901: 0x3308, 0x902: 0x3308, 0x903: 0x3308, 0x904: 0x3308, 0x905: 0x0040, + 0x906: 0x3008, 0x907: 0x3008, 0x908: 0x3008, 0x909: 0x0040, 0x90a: 0x3008, 0x90b: 0x3008, + 0x90c: 0x3008, 0x90d: 0x3b08, 0x90e: 0x0008, 0x90f: 0x0018, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x3008, + 0x918: 0x0018, 0x919: 0x0018, 0x91a: 0x0018, 0x91b: 0x0018, 0x91c: 0x0018, 0x91d: 0x0018, + 0x91e: 0x0018, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x3308, 0x923: 0x3308, + 0x924: 0x0040, 0x925: 0x0040, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0018, 0x931: 0x0018, 0x932: 0x0018, 0x933: 0x0018, 0x934: 0x0018, 0x935: 0x0018, + 0x936: 0x0018, 0x937: 0x0018, 0x938: 0x0018, 0x939: 0x0018, 0x93a: 0x0008, 0x93b: 0x0008, + 0x93c: 0x0008, 0x93d: 0x0008, 0x93e: 0x0008, 0x93f: 0x0008, + // Block 0x25, offset 0x940 + 0x940: 0x0040, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0040, 0x944: 0x0008, 0x945: 0x0040, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0040, + 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0040, 0x965: 0x0008, 0x966: 0x0040, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0008, 0x96e: 0x0008, 0x96f: 0x0008, + 0x970: 0x0008, 0x971: 0x3308, 0x972: 0x0008, 0x973: 0x01f9, 0x974: 0x3308, 0x975: 0x3308, + 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x3308, 0x97a: 0x3b08, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x0008, 0x97e: 0x0040, 0x97f: 0x0040, + // Block 0x26, offset 0x980 + 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0211, 0x984: 0x0008, 0x985: 0x0008, + 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0040, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x0219, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008, + 0x992: 0x0221, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0229, + 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0231, 0x99d: 0x0008, + 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008, + 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0239, + 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0040, 0x9ae: 0x0040, 0x9af: 0x0040, + 0x9b0: 0x0040, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x0241, 0x9b4: 0x3308, 0x9b5: 0x0249, + 0x9b6: 0x0251, 0x9b7: 0x0259, 0x9b8: 0x0261, 0x9b9: 0x0269, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x3308, 0x9be: 0x3308, 0x9bf: 0x3008, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x3308, 0x9c1: 0x0271, 0x9c2: 0x3308, 0x9c3: 0x3308, 0x9c4: 0x3b08, 0x9c5: 0x0018, + 0x9c6: 0x3308, 0x9c7: 0x3308, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x3308, 0x9ce: 0x3308, 0x9cf: 0x3308, 0x9d0: 0x3308, 0x9d1: 0x3308, + 0x9d2: 0x3308, 0x9d3: 0x0279, 0x9d4: 0x3308, 0x9d5: 0x3308, 0x9d6: 0x3308, 0x9d7: 0x3308, + 0x9d8: 0x0040, 0x9d9: 0x3308, 0x9da: 0x3308, 0x9db: 0x3308, 0x9dc: 0x3308, 0x9dd: 0x0281, + 0x9de: 0x3308, 0x9df: 0x3308, 0x9e0: 0x3308, 0x9e1: 0x3308, 0x9e2: 0x0289, 0x9e3: 0x3308, + 0x9e4: 0x3308, 0x9e5: 0x3308, 0x9e6: 0x3308, 0x9e7: 0x0291, 0x9e8: 0x3308, 0x9e9: 0x3308, + 0x9ea: 0x3308, 0x9eb: 0x3308, 0x9ec: 0x0299, 0x9ed: 0x3308, 0x9ee: 0x3308, 0x9ef: 0x3308, + 0x9f0: 0x3308, 0x9f1: 0x3308, 0x9f2: 0x3308, 0x9f3: 0x3308, 0x9f4: 0x3308, 0x9f5: 0x3308, + 0x9f6: 0x3308, 0x9f7: 0x3308, 0x9f8: 0x3308, 0x9f9: 0x02a1, 0x9fa: 0x3308, 0x9fb: 0x3308, + 0x9fc: 0x3308, 0x9fd: 0x0040, 0x9fe: 0x0018, 0x9ff: 0x0018, + // Block 0x28, offset 0xa00 + 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008, + 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008, + 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008, + 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008, + 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x0008, 0xa1c: 0x0008, 0xa1d: 0x0008, + 0xa1e: 0x0008, 0xa1f: 0x0008, 0xa20: 0x0008, 0xa21: 0x0008, 0xa22: 0x0008, 0xa23: 0x0008, + 0xa24: 0x0008, 0xa25: 0x0008, 0xa26: 0x0008, 0xa27: 0x0008, 0xa28: 0x0008, 0xa29: 0x0008, + 0xa2a: 0x0008, 0xa2b: 0x0008, 0xa2c: 0x0019, 0xa2d: 0x02e1, 0xa2e: 0x02e9, 0xa2f: 0x0008, + 0xa30: 0x02f1, 0xa31: 0x02f9, 0xa32: 0x0301, 0xa33: 0x0309, 0xa34: 0x00a9, 0xa35: 0x0311, + 0xa36: 0x00b1, 0xa37: 0x0319, 0xa38: 0x0101, 0xa39: 0x0321, 0xa3a: 0x0329, 0xa3b: 0x0008, + 0xa3c: 0x0051, 0xa3d: 0x0331, 0xa3e: 0x0339, 0xa3f: 0x00b9, + // Block 0x29, offset 0xa40 + 0xa40: 0x0341, 0xa41: 0x0349, 0xa42: 0x00c1, 0xa43: 0x0019, 0xa44: 0x0351, 0xa45: 0x0359, + 0xa46: 0x05b5, 0xa47: 0x02e9, 0xa48: 0x02f1, 0xa49: 0x02f9, 0xa4a: 0x0361, 0xa4b: 0x0369, + 0xa4c: 0x0371, 0xa4d: 0x0309, 0xa4e: 0x0008, 0xa4f: 0x0319, 0xa50: 0x0321, 0xa51: 0x0379, + 0xa52: 0x0051, 0xa53: 0x0381, 0xa54: 0x05cd, 0xa55: 0x05cd, 0xa56: 0x0339, 0xa57: 0x0341, + 0xa58: 0x0349, 0xa59: 0x05b5, 0xa5a: 0x0389, 0xa5b: 0x0391, 0xa5c: 0x05e5, 0xa5d: 0x0399, + 0xa5e: 0x03a1, 0xa5f: 0x03a9, 0xa60: 0x03b1, 0xa61: 0x03b9, 0xa62: 0x0311, 0xa63: 0x00b9, + 0xa64: 0x0349, 0xa65: 0x0391, 0xa66: 0x0399, 0xa67: 0x03a1, 0xa68: 0x03c1, 0xa69: 0x03b1, + 0xa6a: 0x03b9, 0xa6b: 0x0008, 0xa6c: 0x0008, 0xa6d: 0x0008, 0xa6e: 0x0008, 0xa6f: 0x0008, + 0xa70: 0x0008, 0xa71: 0x0008, 0xa72: 0x0008, 0xa73: 0x0008, 0xa74: 0x0008, 0xa75: 0x0008, + 0xa76: 0x0008, 0xa77: 0x0008, 0xa78: 0x03c9, 0xa79: 0x0008, 0xa7a: 0x0008, 0xa7b: 0x0008, + 0xa7c: 0x0008, 0xa7d: 0x0008, 0xa7e: 0x0008, 0xa7f: 0x0008, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, + 0xa86: 0x0008, 0xa87: 0x0008, 0xa88: 0x0008, 0xa89: 0x0008, 0xa8a: 0x0008, 0xa8b: 0x0008, + 0xa8c: 0x0008, 0xa8d: 0x0008, 0xa8e: 0x0008, 0xa8f: 0x0008, 0xa90: 0x0008, 0xa91: 0x0008, + 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, + 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0008, 0xa9b: 0x03d1, 0xa9c: 0x03d9, 0xa9d: 0x03e1, + 0xa9e: 0x03e9, 0xa9f: 0x0371, 0xaa0: 0x03f1, 0xaa1: 0x03f9, 0xaa2: 0x0401, 0xaa3: 0x0409, + 0xaa4: 0x0411, 0xaa5: 0x0419, 0xaa6: 0x0421, 0xaa7: 0x05fd, 0xaa8: 0x0429, 0xaa9: 0x0431, + 0xaaa: 0xe17d, 0xaab: 0x0439, 0xaac: 0x0441, 0xaad: 0x0449, 0xaae: 0x0451, 0xaaf: 0x0459, + 0xab0: 0x0461, 0xab1: 0x0469, 0xab2: 0x0471, 0xab3: 0x0479, 0xab4: 0x0481, 0xab5: 0x0489, + 0xab6: 0x0491, 0xab7: 0x0499, 0xab8: 0x0615, 0xab9: 0x04a1, 0xaba: 0x04a9, 0xabb: 0x04b1, + 0xabc: 0x04b9, 0xabd: 0x04c1, 0xabe: 0x04c9, 0xabf: 0x04d1, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0xe00d, 0xad7: 0x0008, + 0xad8: 0xe00d, 0xad9: 0x0008, 0xada: 0xe00d, 0xadb: 0x0008, 0xadc: 0xe00d, 0xadd: 0x0008, + 0xade: 0xe00d, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0xe00d, 0xb01: 0x0008, 0xb02: 0xe00d, 0xb03: 0x0008, 0xb04: 0xe00d, 0xb05: 0x0008, + 0xb06: 0xe00d, 0xb07: 0x0008, 0xb08: 0xe00d, 0xb09: 0x0008, 0xb0a: 0xe00d, 0xb0b: 0x0008, + 0xb0c: 0xe00d, 0xb0d: 0x0008, 0xb0e: 0xe00d, 0xb0f: 0x0008, 0xb10: 0xe00d, 0xb11: 0x0008, + 0xb12: 0xe00d, 0xb13: 0x0008, 0xb14: 0xe00d, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0008, 0xb19: 0x0008, 0xb1a: 0x062d, 0xb1b: 0x064d, 0xb1c: 0x0008, 0xb1d: 0x0008, + 0xb1e: 0x04d9, 0xb1f: 0x0008, 0xb20: 0xe00d, 0xb21: 0x0008, 0xb22: 0xe00d, 0xb23: 0x0008, + 0xb24: 0xe00d, 0xb25: 0x0008, 0xb26: 0xe00d, 0xb27: 0x0008, 0xb28: 0xe00d, 0xb29: 0x0008, + 0xb2a: 0xe00d, 0xb2b: 0x0008, 0xb2c: 0xe00d, 0xb2d: 0x0008, 0xb2e: 0xe00d, 0xb2f: 0x0008, + 0xb30: 0xe00d, 0xb31: 0x0008, 0xb32: 0xe00d, 0xb33: 0x0008, 0xb34: 0xe00d, 0xb35: 0x0008, + 0xb36: 0xe00d, 0xb37: 0x0008, 0xb38: 0xe00d, 0xb39: 0x0008, 0xb3a: 0xe00d, 0xb3b: 0x0008, + 0xb3c: 0xe00d, 0xb3d: 0x0008, 0xb3e: 0xe00d, 0xb3f: 0x0008, + // Block 0x2d, offset 0xb40 + 0xb40: 0x0008, 0xb41: 0x0008, 0xb42: 0x0008, 0xb43: 0x0008, 0xb44: 0x0008, 0xb45: 0x0008, + 0xb46: 0x0040, 0xb47: 0x0040, 0xb48: 0xe045, 0xb49: 0xe045, 0xb4a: 0xe045, 0xb4b: 0xe045, + 0xb4c: 0xe045, 0xb4d: 0xe045, 0xb4e: 0x0040, 0xb4f: 0x0040, 0xb50: 0x0008, 0xb51: 0x0008, + 0xb52: 0x0008, 0xb53: 0x0008, 0xb54: 0x0008, 0xb55: 0x0008, 0xb56: 0x0008, 0xb57: 0x0008, + 0xb58: 0x0040, 0xb59: 0xe045, 0xb5a: 0x0040, 0xb5b: 0xe045, 0xb5c: 0x0040, 0xb5d: 0xe045, + 0xb5e: 0x0040, 0xb5f: 0xe045, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x0008, + 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045, + 0xb6a: 0xe045, 0xb6b: 0xe045, 0xb6c: 0xe045, 0xb6d: 0xe045, 0xb6e: 0xe045, 0xb6f: 0xe045, + 0xb70: 0x0008, 0xb71: 0x04e1, 0xb72: 0x0008, 0xb73: 0x04e9, 0xb74: 0x0008, 0xb75: 0x04f1, + 0xb76: 0x0008, 0xb77: 0x04f9, 0xb78: 0x0008, 0xb79: 0x0501, 0xb7a: 0x0008, 0xb7b: 0x0509, + 0xb7c: 0x0008, 0xb7d: 0x0511, 0xb7e: 0x0040, 0xb7f: 0x0040, + // Block 0x2e, offset 0xb80 + 0xb80: 0x0519, 0xb81: 0x0521, 0xb82: 0x0529, 0xb83: 0x0531, 0xb84: 0x0539, 0xb85: 0x0541, + 0xb86: 0x0549, 0xb87: 0x0551, 0xb88: 0x0519, 0xb89: 0x0521, 0xb8a: 0x0529, 0xb8b: 0x0531, + 0xb8c: 0x0539, 0xb8d: 0x0541, 0xb8e: 0x0549, 0xb8f: 0x0551, 0xb90: 0x0559, 0xb91: 0x0561, + 0xb92: 0x0569, 0xb93: 0x0571, 0xb94: 0x0579, 0xb95: 0x0581, 0xb96: 0x0589, 0xb97: 0x0591, + 0xb98: 0x0559, 0xb99: 0x0561, 0xb9a: 0x0569, 0xb9b: 0x0571, 0xb9c: 0x0579, 0xb9d: 0x0581, + 0xb9e: 0x0589, 0xb9f: 0x0591, 0xba0: 0x0599, 0xba1: 0x05a1, 0xba2: 0x05a9, 0xba3: 0x05b1, + 0xba4: 0x05b9, 0xba5: 0x05c1, 0xba6: 0x05c9, 0xba7: 0x05d1, 0xba8: 0x0599, 0xba9: 0x05a1, + 0xbaa: 0x05a9, 0xbab: 0x05b1, 0xbac: 0x05b9, 0xbad: 0x05c1, 0xbae: 0x05c9, 0xbaf: 0x05d1, + 0xbb0: 0x0008, 0xbb1: 0x0008, 0xbb2: 0x05d9, 0xbb3: 0x05e1, 0xbb4: 0x05e9, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x05f1, 0xbb8: 0xe045, 0xbb9: 0xe045, 0xbba: 0x0665, 0xbbb: 0x04e1, + 0xbbc: 0x05e1, 0xbbd: 0x067e, 0xbbe: 0x05f9, 0xbbf: 0x069e, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x06be, 0xbc1: 0x0602, 0xbc2: 0x0609, 0xbc3: 0x0611, 0xbc4: 0x0619, 0xbc5: 0x0040, + 0xbc6: 0x0008, 0xbc7: 0x0621, 0xbc8: 0x06dd, 0xbc9: 0x04e9, 0xbca: 0x06f5, 0xbcb: 0x04f1, + 0xbcc: 0x0611, 0xbcd: 0x062a, 0xbce: 0x0632, 0xbcf: 0x063a, 0xbd0: 0x0008, 0xbd1: 0x0008, + 0xbd2: 0x0008, 0xbd3: 0x0641, 0xbd4: 0x0040, 0xbd5: 0x0040, 0xbd6: 0x0008, 0xbd7: 0x0008, + 0xbd8: 0xe045, 0xbd9: 0xe045, 0xbda: 0x070d, 0xbdb: 0x04f9, 0xbdc: 0x0040, 0xbdd: 0x064a, + 0xbde: 0x0652, 0xbdf: 0x065a, 0xbe0: 0x0008, 0xbe1: 0x0008, 0xbe2: 0x0008, 0xbe3: 0x0661, + 0xbe4: 0x0008, 0xbe5: 0x0008, 0xbe6: 0x0008, 0xbe7: 0x0008, 0xbe8: 0xe045, 0xbe9: 0xe045, + 0xbea: 0x0725, 0xbeb: 0x0509, 0xbec: 0xe04d, 0xbed: 0x066a, 0xbee: 0x012a, 0xbef: 0x0672, + 0xbf0: 0x0040, 0xbf1: 0x0040, 0xbf2: 0x0679, 0xbf3: 0x0681, 0xbf4: 0x0689, 0xbf5: 0x0040, + 0xbf6: 0x0008, 0xbf7: 0x0691, 0xbf8: 0x073d, 0xbf9: 0x0501, 0xbfa: 0x0515, 0xbfb: 0x0511, + 0xbfc: 0x0681, 0xbfd: 0x0756, 0xbfe: 0x0776, 0xbff: 0x0040, + // Block 0x30, offset 0xc00 + 0xc00: 0x000a, 0xc01: 0x000a, 0xc02: 0x000a, 0xc03: 0x000a, 0xc04: 0x000a, 0xc05: 0x000a, + 0xc06: 0x000a, 0xc07: 0x000a, 0xc08: 0x000a, 0xc09: 0x000a, 0xc0a: 0x000a, 0xc0b: 0x03c0, + 0xc0c: 0x0003, 0xc0d: 0x0003, 0xc0e: 0x0340, 0xc0f: 0x0b40, 0xc10: 0x0018, 0xc11: 0xe00d, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x0796, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018, + 0xc24: 0x0040, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0018, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x000a, + 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0699, 0xc34: 0x06a1, 0xc35: 0x0018, + 0xc36: 0x06a9, 0xc37: 0x06b1, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018, + 0xc3c: 0x06ba, 0xc3d: 0x0018, 0xc3e: 0x07b6, 0xc3f: 0x0018, + // Block 0x31, offset 0xc40 + 0xc40: 0x0018, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0018, + 0xc46: 0x0018, 0xc47: 0x06c2, 0xc48: 0x06ca, 0xc49: 0x06d2, 0xc4a: 0x0018, 0xc4b: 0x0018, + 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0018, 0xc4f: 0x0018, 0xc50: 0x0018, 0xc51: 0x0018, + 0xc52: 0x0018, 0xc53: 0x0018, 0xc54: 0x0018, 0xc55: 0x0018, 0xc56: 0x0018, 0xc57: 0x06d9, + 0xc58: 0x0018, 0xc59: 0x0018, 0xc5a: 0x0018, 0xc5b: 0x0018, 0xc5c: 0x0018, 0xc5d: 0x0018, + 0xc5e: 0x0018, 0xc5f: 0x000a, 0xc60: 0x03c0, 0xc61: 0x0340, 0xc62: 0x0340, 0xc63: 0x0340, + 0xc64: 0x03c0, 0xc65: 0x0040, 0xc66: 0x0040, 0xc67: 0x0040, 0xc68: 0x0040, 0xc69: 0x0040, + 0xc6a: 0x0340, 0xc6b: 0x0340, 0xc6c: 0x0340, 0xc6d: 0x0340, 0xc6e: 0x0340, 0xc6f: 0x0340, + 0xc70: 0x06e1, 0xc71: 0x0311, 0xc72: 0x0040, 0xc73: 0x0040, 0xc74: 0x06e9, 0xc75: 0x06f1, + 0xc76: 0x06f9, 0xc77: 0x0701, 0xc78: 0x0709, 0xc79: 0x0711, 0xc7a: 0x071a, 0xc7b: 0x07d5, + 0xc7c: 0x0722, 0xc7d: 0x072a, 0xc7e: 0x0732, 0xc7f: 0x0329, + // Block 0x32, offset 0xc80 + 0xc80: 0x06e1, 0xc81: 0x0049, 0xc82: 0x0029, 0xc83: 0x0031, 0xc84: 0x06e9, 0xc85: 0x06f1, + 0xc86: 0x06f9, 0xc87: 0x0701, 0xc88: 0x0709, 0xc89: 0x0711, 0xc8a: 0x071a, 0xc8b: 0x07ed, + 0xc8c: 0x0722, 0xc8d: 0x072a, 0xc8e: 0x0732, 0xc8f: 0x0040, 0xc90: 0x0019, 0xc91: 0x02f9, + 0xc92: 0x0051, 0xc93: 0x0109, 0xc94: 0x0361, 0xc95: 0x00a9, 0xc96: 0x0319, 0xc97: 0x0101, + 0xc98: 0x0321, 0xc99: 0x0329, 0xc9a: 0x0339, 0xc9b: 0x0089, 0xc9c: 0x0341, 0xc9d: 0x0040, + 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x0018, 0xca1: 0x0018, 0xca2: 0x0018, 0xca3: 0x0018, + 0xca4: 0x0018, 0xca5: 0x0018, 0xca6: 0x0018, 0xca7: 0x0018, 0xca8: 0x0739, 0xca9: 0x0018, + 0xcaa: 0x0018, 0xcab: 0x0018, 0xcac: 0x0018, 0xcad: 0x0018, 0xcae: 0x0018, 0xcaf: 0x0018, + 0xcb0: 0x0018, 0xcb1: 0x0018, 0xcb2: 0x0018, 0xcb3: 0x0018, 0xcb4: 0x0018, 0xcb5: 0x0018, + 0xcb6: 0x0018, 0xcb7: 0x0018, 0xcb8: 0x0018, 0xcb9: 0x0018, 0xcba: 0x0018, 0xcbb: 0x0018, + 0xcbc: 0x0018, 0xcbd: 0x0018, 0xcbe: 0x0018, 0xcbf: 0x0018, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0806, 0xcc1: 0x0826, 0xcc2: 0x03d9, 0xcc3: 0x0845, 0xcc4: 0x0018, 0xcc5: 0x0866, + 0xcc6: 0x0886, 0xcc7: 0x0369, 0xcc8: 0x0018, 0xcc9: 0x08a5, 0xcca: 0x0309, 0xccb: 0x00a9, + 0xccc: 0x00a9, 0xccd: 0x00a9, 0xcce: 0x00a9, 0xccf: 0x0741, 0xcd0: 0x0311, 0xcd1: 0x0311, + 0xcd2: 0x0101, 0xcd3: 0x0101, 0xcd4: 0x0018, 0xcd5: 0x0329, 0xcd6: 0x0749, 0xcd7: 0x0018, + 0xcd8: 0x0018, 0xcd9: 0x0339, 0xcda: 0x0751, 0xcdb: 0x00b9, 0xcdc: 0x00b9, 0xcdd: 0x00b9, + 0xcde: 0x0018, 0xcdf: 0x0018, 0xce0: 0x0759, 0xce1: 0x08c5, 0xce2: 0x0761, 0xce3: 0x0018, + 0xce4: 0x04b1, 0xce5: 0x0018, 0xce6: 0x0769, 0xce7: 0x0018, 0xce8: 0x04b1, 0xce9: 0x0018, + 0xcea: 0x0319, 0xceb: 0x0771, 0xcec: 0x02e9, 0xced: 0x03d9, 0xcee: 0x0018, 0xcef: 0x02f9, + 0xcf0: 0x02f9, 0xcf1: 0x03f1, 0xcf2: 0x0040, 0xcf3: 0x0321, 0xcf4: 0x0051, 0xcf5: 0x0779, + 0xcf6: 0x0781, 0xcf7: 0x0789, 0xcf8: 0x0791, 0xcf9: 0x0311, 0xcfa: 0x0018, 0xcfb: 0x08e5, + 0xcfc: 0x0799, 0xcfd: 0x03a1, 0xcfe: 0x03a1, 0xcff: 0x0799, + // Block 0x34, offset 0xd00 + 0xd00: 0x0905, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x02f1, + 0xd06: 0x02f1, 0xd07: 0x02f9, 0xd08: 0x0311, 0xd09: 0x00b1, 0xd0a: 0x0018, 0xd0b: 0x0018, + 0xd0c: 0x0018, 0xd0d: 0x0018, 0xd0e: 0x0008, 0xd0f: 0x0018, 0xd10: 0x07a1, 0xd11: 0x07a9, + 0xd12: 0x07b1, 0xd13: 0x07b9, 0xd14: 0x07c1, 0xd15: 0x07c9, 0xd16: 0x07d1, 0xd17: 0x07d9, + 0xd18: 0x07e1, 0xd19: 0x07e9, 0xd1a: 0x07f1, 0xd1b: 0x07f9, 0xd1c: 0x0801, 0xd1d: 0x0809, + 0xd1e: 0x0811, 0xd1f: 0x0819, 0xd20: 0x0311, 0xd21: 0x0821, 0xd22: 0x091d, 0xd23: 0x0829, + 0xd24: 0x0391, 0xd25: 0x0831, 0xd26: 0x093d, 0xd27: 0x0839, 0xd28: 0x0841, 0xd29: 0x0109, + 0xd2a: 0x0849, 0xd2b: 0x095d, 0xd2c: 0x0101, 0xd2d: 0x03d9, 0xd2e: 0x02f1, 0xd2f: 0x0321, + 0xd30: 0x0311, 0xd31: 0x0821, 0xd32: 0x097d, 0xd33: 0x0829, 0xd34: 0x0391, 0xd35: 0x0831, + 0xd36: 0x099d, 0xd37: 0x0839, 0xd38: 0x0841, 0xd39: 0x0109, 0xd3a: 0x0849, 0xd3b: 0x09bd, + 0xd3c: 0x0101, 0xd3d: 0x03d9, 0xd3e: 0x02f1, 0xd3f: 0x0321, + // Block 0x35, offset 0xd40 + 0xd40: 0x0018, 0xd41: 0x0018, 0xd42: 0x0018, 0xd43: 0x0018, 0xd44: 0x0018, 0xd45: 0x0018, + 0xd46: 0x0018, 0xd47: 0x0018, 0xd48: 0x0018, 0xd49: 0x0018, 0xd4a: 0x0018, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0040, 0xd5d: 0x0040, + 0xd5e: 0x0040, 0xd5f: 0x0040, 0xd60: 0x0049, 0xd61: 0x0029, 0xd62: 0x0031, 0xd63: 0x06e9, + 0xd64: 0x06f1, 0xd65: 0x06f9, 0xd66: 0x0701, 0xd67: 0x0709, 0xd68: 0x0711, 0xd69: 0x0879, + 0xd6a: 0x0881, 0xd6b: 0x0889, 0xd6c: 0x0891, 0xd6d: 0x0899, 0xd6e: 0x08a1, 0xd6f: 0x08a9, + 0xd70: 0x08b1, 0xd71: 0x08b9, 0xd72: 0x08c1, 0xd73: 0x08c9, 0xd74: 0x0a1e, 0xd75: 0x0a3e, + 0xd76: 0x0a5e, 0xd77: 0x0a7e, 0xd78: 0x0a9e, 0xd79: 0x0abe, 0xd7a: 0x0ade, 0xd7b: 0x0afe, + 0xd7c: 0x0b1e, 0xd7d: 0x08d2, 0xd7e: 0x08da, 0xd7f: 0x08e2, + // Block 0x36, offset 0xd80 + 0xd80: 0x08ea, 0xd81: 0x08f2, 0xd82: 0x08fa, 0xd83: 0x0902, 0xd84: 0x090a, 0xd85: 0x0912, + 0xd86: 0x091a, 0xd87: 0x0922, 0xd88: 0x0040, 0xd89: 0x0040, 0xd8a: 0x0040, 0xd8b: 0x0040, + 0xd8c: 0x0040, 0xd8d: 0x0040, 0xd8e: 0x0040, 0xd8f: 0x0040, 0xd90: 0x0040, 0xd91: 0x0040, + 0xd92: 0x0040, 0xd93: 0x0040, 0xd94: 0x0040, 0xd95: 0x0040, 0xd96: 0x0040, 0xd97: 0x0040, + 0xd98: 0x0040, 0xd99: 0x0040, 0xd9a: 0x0040, 0xd9b: 0x0040, 0xd9c: 0x0b3e, 0xd9d: 0x0b5e, + 0xd9e: 0x0b7e, 0xd9f: 0x0b9e, 0xda0: 0x0bbe, 0xda1: 0x0bde, 0xda2: 0x0bfe, 0xda3: 0x0c1e, + 0xda4: 0x0c3e, 0xda5: 0x0c5e, 0xda6: 0x0c7e, 0xda7: 0x0c9e, 0xda8: 0x0cbe, 0xda9: 0x0cde, + 0xdaa: 0x0cfe, 0xdab: 0x0d1e, 0xdac: 0x0d3e, 0xdad: 0x0d5e, 0xdae: 0x0d7e, 0xdaf: 0x0d9e, + 0xdb0: 0x0dbe, 0xdb1: 0x0dde, 0xdb2: 0x0dfe, 0xdb3: 0x0e1e, 0xdb4: 0x0e3e, 0xdb5: 0x0e5e, + 0xdb6: 0x0019, 0xdb7: 0x02e9, 0xdb8: 0x03d9, 0xdb9: 0x02f1, 0xdba: 0x02f9, 0xdbb: 0x03f1, + 0xdbc: 0x0309, 0xdbd: 0x00a9, 0xdbe: 0x0311, 0xdbf: 0x00b1, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0319, 0xdc1: 0x0101, 0xdc2: 0x0321, 0xdc3: 0x0329, 0xdc4: 0x0051, 0xdc5: 0x0339, + 0xdc6: 0x0751, 0xdc7: 0x00b9, 0xdc8: 0x0089, 0xdc9: 0x0341, 0xdca: 0x0349, 0xdcb: 0x0391, + 0xdcc: 0x00c1, 0xdcd: 0x0109, 0xdce: 0x00c9, 0xdcf: 0x04b1, 0xdd0: 0x0019, 0xdd1: 0x02e9, + 0xdd2: 0x03d9, 0xdd3: 0x02f1, 0xdd4: 0x02f9, 0xdd5: 0x03f1, 0xdd6: 0x0309, 0xdd7: 0x00a9, + 0xdd8: 0x0311, 0xdd9: 0x00b1, 0xdda: 0x0319, 0xddb: 0x0101, 0xddc: 0x0321, 0xddd: 0x0329, + 0xdde: 0x0051, 0xddf: 0x0339, 0xde0: 0x0751, 0xde1: 0x00b9, 0xde2: 0x0089, 0xde3: 0x0341, + 0xde4: 0x0349, 0xde5: 0x0391, 0xde6: 0x00c1, 0xde7: 0x0109, 0xde8: 0x00c9, 0xde9: 0x04b1, + 0xdea: 0x06e1, 0xdeb: 0x0018, 0xdec: 0x0018, 0xded: 0x0018, 0xdee: 0x0018, 0xdef: 0x0018, + 0xdf0: 0x0018, 0xdf1: 0x0018, 0xdf2: 0x0018, 0xdf3: 0x0018, 0xdf4: 0x0018, 0xdf5: 0x0018, + 0xdf6: 0x0018, 0xdf7: 0x0018, 0xdf8: 0x0018, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018, + 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018, + // Block 0x38, offset 0xe00 + 0xe00: 0x0008, 0xe01: 0x0008, 0xe02: 0x0008, 0xe03: 0x0008, 0xe04: 0x0008, 0xe05: 0x0008, + 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0008, 0xe09: 0x0008, 0xe0a: 0x0008, 0xe0b: 0x0008, + 0xe0c: 0x0008, 0xe0d: 0x0008, 0xe0e: 0x0008, 0xe0f: 0x0008, 0xe10: 0x0008, 0xe11: 0x0008, + 0xe12: 0x0008, 0xe13: 0x0008, 0xe14: 0x0008, 0xe15: 0x0008, 0xe16: 0x0008, 0xe17: 0x0008, + 0xe18: 0x0008, 0xe19: 0x0008, 0xe1a: 0x0008, 0xe1b: 0x0008, 0xe1c: 0x0008, 0xe1d: 0x0008, + 0xe1e: 0x0008, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0x0941, 0xe23: 0x0ed5, + 0xe24: 0x0949, 0xe25: 0x0008, 0xe26: 0x0008, 0xe27: 0xe07d, 0xe28: 0x0008, 0xe29: 0xe01d, + 0xe2a: 0x0008, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0x0359, 0xe2e: 0x0441, 0xe2f: 0x0351, + 0xe30: 0x03d1, 0xe31: 0x0008, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0008, 0xe35: 0xe01d, + 0xe36: 0x0008, 0xe37: 0x0008, 0xe38: 0x0008, 0xe39: 0x0008, 0xe3a: 0x0008, 0xe3b: 0x0008, + 0xe3c: 0x00b1, 0xe3d: 0x0391, 0xe3e: 0x0951, 0xe3f: 0x0959, + // Block 0x39, offset 0xe40 + 0xe40: 0xe00d, 0xe41: 0x0008, 0xe42: 0xe00d, 0xe43: 0x0008, 0xe44: 0xe00d, 0xe45: 0x0008, + 0xe46: 0xe00d, 0xe47: 0x0008, 0xe48: 0xe00d, 0xe49: 0x0008, 0xe4a: 0xe00d, 0xe4b: 0x0008, + 0xe4c: 0xe00d, 0xe4d: 0x0008, 0xe4e: 0xe00d, 0xe4f: 0x0008, 0xe50: 0xe00d, 0xe51: 0x0008, + 0xe52: 0xe00d, 0xe53: 0x0008, 0xe54: 0xe00d, 0xe55: 0x0008, 0xe56: 0xe00d, 0xe57: 0x0008, + 0xe58: 0xe00d, 0xe59: 0x0008, 0xe5a: 0xe00d, 0xe5b: 0x0008, 0xe5c: 0xe00d, 0xe5d: 0x0008, + 0xe5e: 0xe00d, 0xe5f: 0x0008, 0xe60: 0xe00d, 0xe61: 0x0008, 0xe62: 0xe00d, 0xe63: 0x0008, + 0xe64: 0x0008, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018, + 0xe6a: 0x0018, 0xe6b: 0xe03d, 0xe6c: 0x0008, 0xe6d: 0xe01d, 0xe6e: 0x0008, 0xe6f: 0x3308, + 0xe70: 0x3308, 0xe71: 0x3308, 0xe72: 0xe00d, 0xe73: 0x0008, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0018, 0xe7a: 0x0018, 0xe7b: 0x0018, + 0xe7c: 0x0018, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018, + // Block 0x3a, offset 0xe80 + 0xe80: 0x2715, 0xe81: 0x2735, 0xe82: 0x2755, 0xe83: 0x2775, 0xe84: 0x2795, 0xe85: 0x27b5, + 0xe86: 0x27d5, 0xe87: 0x27f5, 0xe88: 0x2815, 0xe89: 0x2835, 0xe8a: 0x2855, 0xe8b: 0x2875, + 0xe8c: 0x2895, 0xe8d: 0x28b5, 0xe8e: 0x28d5, 0xe8f: 0x28f5, 0xe90: 0x2915, 0xe91: 0x2935, + 0xe92: 0x2955, 0xe93: 0x2975, 0xe94: 0x2995, 0xe95: 0x29b5, 0xe96: 0x0040, 0xe97: 0x0040, + 0xe98: 0x0040, 0xe99: 0x0040, 0xe9a: 0x0040, 0xe9b: 0x0040, 0xe9c: 0x0040, 0xe9d: 0x0040, + 0xe9e: 0x0040, 0xe9f: 0x0040, 0xea0: 0x0040, 0xea1: 0x0040, 0xea2: 0x0040, 0xea3: 0x0040, + 0xea4: 0x0040, 0xea5: 0x0040, 0xea6: 0x0040, 0xea7: 0x0040, 0xea8: 0x0040, 0xea9: 0x0040, + 0xeaa: 0x0040, 0xeab: 0x0040, 0xeac: 0x0040, 0xead: 0x0040, 0xeae: 0x0040, 0xeaf: 0x0040, + 0xeb0: 0x0040, 0xeb1: 0x0040, 0xeb2: 0x0040, 0xeb3: 0x0040, 0xeb4: 0x0040, 0xeb5: 0x0040, + 0xeb6: 0x0040, 0xeb7: 0x0040, 0xeb8: 0x0040, 0xeb9: 0x0040, 0xeba: 0x0040, 0xebb: 0x0040, + 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, + // Block 0x3b, offset 0xec0 + 0xec0: 0x000a, 0xec1: 0x0018, 0xec2: 0x0961, 0xec3: 0x0018, 0xec4: 0x0018, 0xec5: 0x0008, + 0xec6: 0x0008, 0xec7: 0x0008, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, + 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x0018, 0xed1: 0x0018, + 0xed2: 0x0018, 0xed3: 0x0018, 0xed4: 0x0018, 0xed5: 0x0018, 0xed6: 0x0018, 0xed7: 0x0018, + 0xed8: 0x0018, 0xed9: 0x0018, 0xeda: 0x0018, 0xedb: 0x0018, 0xedc: 0x0018, 0xedd: 0x0018, + 0xede: 0x0018, 0xedf: 0x0018, 0xee0: 0x0018, 0xee1: 0x0018, 0xee2: 0x0018, 0xee3: 0x0018, + 0xee4: 0x0018, 0xee5: 0x0018, 0xee6: 0x0018, 0xee7: 0x0018, 0xee8: 0x0018, 0xee9: 0x0018, + 0xeea: 0x3308, 0xeeb: 0x3308, 0xeec: 0x3308, 0xeed: 0x3308, 0xeee: 0x3018, 0xeef: 0x3018, + 0xef0: 0x0018, 0xef1: 0x0018, 0xef2: 0x0018, 0xef3: 0x0018, 0xef4: 0x0018, 0xef5: 0x0018, + 0xef6: 0xe125, 0xef7: 0x0018, 0xef8: 0x29d5, 0xef9: 0x29f5, 0xefa: 0x2a15, 0xefb: 0x0018, + 0xefc: 0x0008, 0xefd: 0x0018, 0xefe: 0x0018, 0xeff: 0x0018, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2b55, 0xf01: 0x2b75, 0xf02: 0x2b95, 0xf03: 0x2bb5, 0xf04: 0x2bd5, 0xf05: 0x2bf5, + 0xf06: 0x2bf5, 0xf07: 0x2bf5, 0xf08: 0x2c15, 0xf09: 0x2c15, 0xf0a: 0x2c15, 0xf0b: 0x2c15, + 0xf0c: 0x2c35, 0xf0d: 0x2c35, 0xf0e: 0x2c35, 0xf0f: 0x2c55, 0xf10: 0x2c75, 0xf11: 0x2c75, + 0xf12: 0x2a95, 0xf13: 0x2a95, 0xf14: 0x2c75, 0xf15: 0x2c75, 0xf16: 0x2c95, 0xf17: 0x2c95, + 0xf18: 0x2c75, 0xf19: 0x2c75, 0xf1a: 0x2a95, 0xf1b: 0x2a95, 0xf1c: 0x2c75, 0xf1d: 0x2c75, + 0xf1e: 0x2c55, 0xf1f: 0x2c55, 0xf20: 0x2cb5, 0xf21: 0x2cb5, 0xf22: 0x2cd5, 0xf23: 0x2cd5, + 0xf24: 0x0040, 0xf25: 0x2cf5, 0xf26: 0x2d15, 0xf27: 0x2d35, 0xf28: 0x2d35, 0xf29: 0x2d55, + 0xf2a: 0x2d75, 0xf2b: 0x2d95, 0xf2c: 0x2db5, 0xf2d: 0x2dd5, 0xf2e: 0x2df5, 0xf2f: 0x2e15, + 0xf30: 0x2e35, 0xf31: 0x2e55, 0xf32: 0x2e55, 0xf33: 0x2e75, 0xf34: 0x2e95, 0xf35: 0x2e95, + 0xf36: 0x2eb5, 0xf37: 0x2ed5, 0xf38: 0x2e75, 0xf39: 0x2ef5, 0xf3a: 0x2f15, 0xf3b: 0x2ef5, + 0xf3c: 0x2e75, 0xf3d: 0x2f35, 0xf3e: 0x2f55, 0xf3f: 0x2f75, + // Block 0x3d, offset 0xf40 + 0xf40: 0x2f95, 0xf41: 0x2fb5, 0xf42: 0x2d15, 0xf43: 0x2cf5, 0xf44: 0x2fd5, 0xf45: 0x2ff5, + 0xf46: 0x3015, 0xf47: 0x3035, 0xf48: 0x3055, 0xf49: 0x3075, 0xf4a: 0x3095, 0xf4b: 0x30b5, + 0xf4c: 0x30d5, 0xf4d: 0x30f5, 0xf4e: 0x3115, 0xf4f: 0x0040, 0xf50: 0x0018, 0xf51: 0x0018, + 0xf52: 0x3135, 0xf53: 0x3155, 0xf54: 0x3175, 0xf55: 0x3195, 0xf56: 0x31b5, 0xf57: 0x31d5, + 0xf58: 0x31f5, 0xf59: 0x3215, 0xf5a: 0x3235, 0xf5b: 0x3255, 0xf5c: 0x3175, 0xf5d: 0x3275, + 0xf5e: 0x3295, 0xf5f: 0x32b5, 0xf60: 0x0008, 0xf61: 0x0008, 0xf62: 0x0008, 0xf63: 0x0008, + 0xf64: 0x0008, 0xf65: 0x0008, 0xf66: 0x0008, 0xf67: 0x0008, 0xf68: 0x0008, 0xf69: 0x0008, + 0xf6a: 0x0008, 0xf6b: 0x0008, 0xf6c: 0x0008, 0xf6d: 0x0008, 0xf6e: 0x0008, 0xf6f: 0x0008, + 0xf70: 0x0008, 0xf71: 0x0008, 0xf72: 0x0008, 0xf73: 0x0008, 0xf74: 0x0008, 0xf75: 0x0008, + 0xf76: 0x0008, 0xf77: 0x0008, 0xf78: 0x0008, 0xf79: 0x0008, 0xf7a: 0x0008, 0xf7b: 0x0008, + 0xf7c: 0x0008, 0xf7d: 0x0008, 0xf7e: 0x0008, 0xf7f: 0x0008, + // Block 0x3e, offset 0xf80 + 0xf80: 0x0b82, 0xf81: 0x0b8a, 0xf82: 0x0b92, 0xf83: 0x0b9a, 0xf84: 0x32d5, 0xf85: 0x32f5, + 0xf86: 0x3315, 0xf87: 0x3335, 0xf88: 0x0018, 0xf89: 0x0018, 0xf8a: 0x0018, 0xf8b: 0x0018, + 0xf8c: 0x0018, 0xf8d: 0x0018, 0xf8e: 0x0018, 0xf8f: 0x0018, 0xf90: 0x3355, 0xf91: 0x0ba1, + 0xf92: 0x0ba9, 0xf93: 0x0bb1, 0xf94: 0x0bb9, 0xf95: 0x0bc1, 0xf96: 0x0bc9, 0xf97: 0x0bd1, + 0xf98: 0x0bd9, 0xf99: 0x0be1, 0xf9a: 0x0be9, 0xf9b: 0x0bf1, 0xf9c: 0x0bf9, 0xf9d: 0x0c01, + 0xf9e: 0x0c09, 0xf9f: 0x0c11, 0xfa0: 0x3375, 0xfa1: 0x3395, 0xfa2: 0x33b5, 0xfa3: 0x33d5, + 0xfa4: 0x33f5, 0xfa5: 0x33f5, 0xfa6: 0x3415, 0xfa7: 0x3435, 0xfa8: 0x3455, 0xfa9: 0x3475, + 0xfaa: 0x3495, 0xfab: 0x34b5, 0xfac: 0x34d5, 0xfad: 0x34f5, 0xfae: 0x3515, 0xfaf: 0x3535, + 0xfb0: 0x3555, 0xfb1: 0x3575, 0xfb2: 0x3595, 0xfb3: 0x35b5, 0xfb4: 0x35d5, 0xfb5: 0x35f5, + 0xfb6: 0x3615, 0xfb7: 0x3635, 0xfb8: 0x3655, 0xfb9: 0x3675, 0xfba: 0x3695, 0xfbb: 0x36b5, + 0xfbc: 0x0c19, 0xfbd: 0x0c21, 0xfbe: 0x36d5, 0xfbf: 0x0018, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x36f5, 0xfc1: 0x3715, 0xfc2: 0x3735, 0xfc3: 0x3755, 0xfc4: 0x3775, 0xfc5: 0x3795, + 0xfc6: 0x37b5, 0xfc7: 0x37d5, 0xfc8: 0x37f5, 0xfc9: 0x3815, 0xfca: 0x3835, 0xfcb: 0x3855, + 0xfcc: 0x3875, 0xfcd: 0x3895, 0xfce: 0x38b5, 0xfcf: 0x38d5, 0xfd0: 0x38f5, 0xfd1: 0x3915, + 0xfd2: 0x3935, 0xfd3: 0x3955, 0xfd4: 0x3975, 0xfd5: 0x3995, 0xfd6: 0x39b5, 0xfd7: 0x39d5, + 0xfd8: 0x39f5, 0xfd9: 0x3a15, 0xfda: 0x3a35, 0xfdb: 0x3a55, 0xfdc: 0x3a75, 0xfdd: 0x3a95, + 0xfde: 0x3ab5, 0xfdf: 0x3ad5, 0xfe0: 0x3af5, 0xfe1: 0x3b15, 0xfe2: 0x3b35, 0xfe3: 0x3b55, + 0xfe4: 0x3b75, 0xfe5: 0x3b95, 0xfe6: 0x1295, 0xfe7: 0x3bb5, 0xfe8: 0x3bd5, 0xfe9: 0x3bf5, + 0xfea: 0x3c15, 0xfeb: 0x3c35, 0xfec: 0x3c55, 0xfed: 0x3c75, 0xfee: 0x23b5, 0xfef: 0x3c95, + 0xff0: 0x3cb5, 0xff1: 0x0c29, 0xff2: 0x0c31, 0xff3: 0x0c39, 0xff4: 0x0c41, 0xff5: 0x0c49, + 0xff6: 0x0c51, 0xff7: 0x0c59, 0xff8: 0x0c61, 0xff9: 0x0c69, 0xffa: 0x0c71, 0xffb: 0x0c79, + 0xffc: 0x0c81, 0xffd: 0x0c89, 0xffe: 0x0c91, 0xfff: 0x0c99, + // Block 0x40, offset 0x1000 + 0x1000: 0x0ca1, 0x1001: 0x0ca9, 0x1002: 0x0cb1, 0x1003: 0x0cb9, 0x1004: 0x0cc1, 0x1005: 0x0cc9, + 0x1006: 0x0cd1, 0x1007: 0x0cd9, 0x1008: 0x0ce1, 0x1009: 0x0ce9, 0x100a: 0x0cf1, 0x100b: 0x0cf9, + 0x100c: 0x0d01, 0x100d: 0x3cd5, 0x100e: 0x0d09, 0x100f: 0x3cf5, 0x1010: 0x3d15, 0x1011: 0x3d2d, + 0x1012: 0x3d45, 0x1013: 0x3d5d, 0x1014: 0x3d75, 0x1015: 0x3d75, 0x1016: 0x3d5d, 0x1017: 0x3d8d, + 0x1018: 0x07d5, 0x1019: 0x3da5, 0x101a: 0x3dbd, 0x101b: 0x3dd5, 0x101c: 0x3ded, 0x101d: 0x3e05, + 0x101e: 0x3e1d, 0x101f: 0x3e35, 0x1020: 0x3e4d, 0x1021: 0x3e65, 0x1022: 0x3e7d, 0x1023: 0x3e95, + 0x1024: 0x3ead, 0x1025: 0x3ead, 0x1026: 0x3ec5, 0x1027: 0x3ec5, 0x1028: 0x3edd, 0x1029: 0x3edd, + 0x102a: 0x3ef5, 0x102b: 0x3f0d, 0x102c: 0x3f25, 0x102d: 0x3f3d, 0x102e: 0x3f55, 0x102f: 0x3f55, + 0x1030: 0x3f6d, 0x1031: 0x3f6d, 0x1032: 0x3f6d, 0x1033: 0x3f85, 0x1034: 0x3f9d, 0x1035: 0x3fb5, + 0x1036: 0x3fcd, 0x1037: 0x3fb5, 0x1038: 0x3fe5, 0x1039: 0x3ffd, 0x103a: 0x3f85, 0x103b: 0x4015, + 0x103c: 0x402d, 0x103d: 0x402d, 0x103e: 0x402d, 0x103f: 0x0d11, + // Block 0x41, offset 0x1040 + 0x1040: 0x10f9, 0x1041: 0x1101, 0x1042: 0x40a5, 0x1043: 0x1109, 0x1044: 0x1111, 0x1045: 0x1119, + 0x1046: 0x1121, 0x1047: 0x1129, 0x1048: 0x40c5, 0x1049: 0x1131, 0x104a: 0x1139, 0x104b: 0x1141, + 0x104c: 0x40e5, 0x104d: 0x40e5, 0x104e: 0x1149, 0x104f: 0x1151, 0x1050: 0x1159, 0x1051: 0x4105, + 0x1052: 0x4125, 0x1053: 0x4145, 0x1054: 0x4165, 0x1055: 0x4185, 0x1056: 0x1161, 0x1057: 0x1169, + 0x1058: 0x1171, 0x1059: 0x1179, 0x105a: 0x1181, 0x105b: 0x41a5, 0x105c: 0x1189, 0x105d: 0x1191, + 0x105e: 0x1199, 0x105f: 0x41c5, 0x1060: 0x41e5, 0x1061: 0x11a1, 0x1062: 0x4205, 0x1063: 0x4225, + 0x1064: 0x4245, 0x1065: 0x11a9, 0x1066: 0x4265, 0x1067: 0x11b1, 0x1068: 0x11b9, 0x1069: 0x10f9, + 0x106a: 0x4285, 0x106b: 0x42a5, 0x106c: 0x42c5, 0x106d: 0x42e5, 0x106e: 0x11c1, 0x106f: 0x11c9, + 0x1070: 0x11d1, 0x1071: 0x11d9, 0x1072: 0x4305, 0x1073: 0x11e1, 0x1074: 0x11e9, 0x1075: 0x11f1, + 0x1076: 0x4325, 0x1077: 0x11f9, 0x1078: 0x1201, 0x1079: 0x11f9, 0x107a: 0x1209, 0x107b: 0x1211, + 0x107c: 0x4345, 0x107d: 0x1219, 0x107e: 0x1221, 0x107f: 0x1219, + // Block 0x42, offset 0x1080 + 0x1080: 0x4365, 0x1081: 0x4385, 0x1082: 0x0040, 0x1083: 0x1229, 0x1084: 0x1231, 0x1085: 0x1239, + 0x1086: 0x1241, 0x1087: 0x0040, 0x1088: 0x1249, 0x1089: 0x1251, 0x108a: 0x1259, 0x108b: 0x1261, + 0x108c: 0x1269, 0x108d: 0x1271, 0x108e: 0x1199, 0x108f: 0x1279, 0x1090: 0x1281, 0x1091: 0x1289, + 0x1092: 0x43a5, 0x1093: 0x1291, 0x1094: 0x1121, 0x1095: 0x43c5, 0x1096: 0x43e5, 0x1097: 0x1299, + 0x1098: 0x0040, 0x1099: 0x4405, 0x109a: 0x12a1, 0x109b: 0x12a9, 0x109c: 0x12b1, 0x109d: 0x12b9, + 0x109e: 0x12c1, 0x109f: 0x12c9, 0x10a0: 0x12d1, 0x10a1: 0x12d9, 0x10a2: 0x12e1, 0x10a3: 0x12e9, + 0x10a4: 0x12f1, 0x10a5: 0x12f9, 0x10a6: 0x1301, 0x10a7: 0x1309, 0x10a8: 0x1311, 0x10a9: 0x1319, + 0x10aa: 0x1321, 0x10ab: 0x1329, 0x10ac: 0x1331, 0x10ad: 0x1339, 0x10ae: 0x1341, 0x10af: 0x1349, + 0x10b0: 0x1351, 0x10b1: 0x1359, 0x10b2: 0x1361, 0x10b3: 0x1369, 0x10b4: 0x1371, 0x10b5: 0x1379, + 0x10b6: 0x1381, 0x10b7: 0x1389, 0x10b8: 0x1391, 0x10b9: 0x1399, 0x10ba: 0x13a1, 0x10bb: 0x13a9, + 0x10bc: 0x13b1, 0x10bd: 0x13b9, 0x10be: 0x13c1, 0x10bf: 0x4425, + // Block 0x43, offset 0x10c0 + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008, + 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008, + 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008, + 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308, + 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308, + 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308, + 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x02d1, 0x111d: 0x13c9, + 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008, + 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008, + 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008, + 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008, + 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018, + 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018, + 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018, + 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008, + 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008, + 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008, + 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008, + 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008, + 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008, + 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d, + 0x11bc: 0x0008, 0x11bd: 0x4445, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d, + 0x11cc: 0x0008, 0x11cd: 0x0409, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0x13d1, 0x11eb: 0x0371, 0x11ec: 0x0401, 0x11ed: 0x13d9, 0x11ee: 0x0421, 0x11ef: 0x0008, + 0x11f0: 0x13e1, 0x11f1: 0x13e9, 0x11f2: 0x0429, 0x11f3: 0x4465, 0x11f4: 0xe00d, 0x11f5: 0x0008, + 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0xe00d, 0x11f9: 0x0008, 0x11fa: 0xe00d, 0x11fb: 0x0008, + 0x11fc: 0xe00d, 0x11fd: 0x0008, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0x03f5, 0x1205: 0x0479, + 0x1206: 0x447d, 0x1207: 0xe07d, 0x1208: 0x0008, 0x1209: 0xe01d, 0x120a: 0x0008, 0x120b: 0x0040, + 0x120c: 0x0040, 0x120d: 0x0040, 0x120e: 0x0040, 0x120f: 0x0040, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0x0040, 0x1213: 0x0008, 0x1214: 0x0040, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, + 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, + 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, + 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, + 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x03d9, 0x1233: 0x03f1, 0x1234: 0x0751, 0x1235: 0xe01d, + 0x1236: 0x0008, 0x1237: 0x0008, 0x1238: 0x0741, 0x1239: 0x13f1, 0x123a: 0x0008, 0x123b: 0x0008, + 0x123c: 0x0008, 0x123d: 0x0008, 0x123e: 0x0008, 0x123f: 0x0008, + // Block 0x49, offset 0x1240 + 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad, + 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d, + 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008, + 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d, + 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d, + 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d, + 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d, + 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed, + 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d, + 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d, + 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x1409, 0x1290: 0x1411, 0x1291: 0x1419, + 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x1421, 0x1296: 0x1429, 0x1297: 0x1431, + 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x1439, 0x12c1: 0x1441, 0x12c2: 0x1449, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x1451, + 0x12c6: 0x1451, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x1459, 0x12d4: 0x1461, 0x12d5: 0x1469, 0x12d6: 0x1471, 0x12d7: 0x1479, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x1481, + 0x12de: 0x3308, 0x12df: 0x1489, 0x12e0: 0x1491, 0x12e1: 0x0779, 0x12e2: 0x0791, 0x12e3: 0x1499, + 0x12e4: 0x14a1, 0x12e5: 0x14a9, 0x12e6: 0x14b1, 0x12e7: 0x14b9, 0x12e8: 0x14c1, 0x12e9: 0x071a, + 0x12ea: 0x14c9, 0x12eb: 0x14d1, 0x12ec: 0x14d9, 0x12ed: 0x14e1, 0x12ee: 0x14e9, 0x12ef: 0x14f1, + 0x12f0: 0x14f9, 0x12f1: 0x1501, 0x12f2: 0x1509, 0x12f3: 0x1511, 0x12f4: 0x1519, 0x12f5: 0x1521, + 0x12f6: 0x1529, 0x12f7: 0x0040, 0x12f8: 0x1531, 0x12f9: 0x1539, 0x12fa: 0x1541, 0x12fb: 0x1549, + 0x12fc: 0x1551, 0x12fd: 0x0040, 0x12fe: 0x1559, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x1561, 0x1301: 0x1569, 0x1302: 0x0040, 0x1303: 0x1571, 0x1304: 0x1579, 0x1305: 0x0040, + 0x1306: 0x1581, 0x1307: 0x1589, 0x1308: 0x1591, 0x1309: 0x1599, 0x130a: 0x15a1, 0x130b: 0x15a9, + 0x130c: 0x15b1, 0x130d: 0x15b9, 0x130e: 0x15c1, 0x130f: 0x15c9, 0x1310: 0x15d1, 0x1311: 0x15d1, + 0x1312: 0x15d9, 0x1313: 0x15d9, 0x1314: 0x15d9, 0x1315: 0x15d9, 0x1316: 0x15e1, 0x1317: 0x15e1, + 0x1318: 0x15e1, 0x1319: 0x15e1, 0x131a: 0x15e9, 0x131b: 0x15e9, 0x131c: 0x15e9, 0x131d: 0x15e9, + 0x131e: 0x15f1, 0x131f: 0x15f1, 0x1320: 0x15f1, 0x1321: 0x15f1, 0x1322: 0x15f9, 0x1323: 0x15f9, + 0x1324: 0x15f9, 0x1325: 0x15f9, 0x1326: 0x1601, 0x1327: 0x1601, 0x1328: 0x1601, 0x1329: 0x1601, + 0x132a: 0x1609, 0x132b: 0x1609, 0x132c: 0x1609, 0x132d: 0x1609, 0x132e: 0x1611, 0x132f: 0x1611, + 0x1330: 0x1611, 0x1331: 0x1611, 0x1332: 0x1619, 0x1333: 0x1619, 0x1334: 0x1619, 0x1335: 0x1619, + 0x1336: 0x1621, 0x1337: 0x1621, 0x1338: 0x1621, 0x1339: 0x1621, 0x133a: 0x1629, 0x133b: 0x1629, + 0x133c: 0x1629, 0x133d: 0x1629, 0x133e: 0x1631, 0x133f: 0x1631, + // Block 0x4d, offset 0x1340 + 0x1340: 0x1631, 0x1341: 0x1631, 0x1342: 0x1639, 0x1343: 0x1639, 0x1344: 0x1641, 0x1345: 0x1641, + 0x1346: 0x1649, 0x1347: 0x1649, 0x1348: 0x1651, 0x1349: 0x1651, 0x134a: 0x1659, 0x134b: 0x1659, + 0x134c: 0x1661, 0x134d: 0x1661, 0x134e: 0x1669, 0x134f: 0x1669, 0x1350: 0x1669, 0x1351: 0x1669, + 0x1352: 0x1671, 0x1353: 0x1671, 0x1354: 0x1671, 0x1355: 0x1671, 0x1356: 0x1679, 0x1357: 0x1679, + 0x1358: 0x1679, 0x1359: 0x1679, 0x135a: 0x1681, 0x135b: 0x1681, 0x135c: 0x1681, 0x135d: 0x1681, + 0x135e: 0x1689, 0x135f: 0x1689, 0x1360: 0x1691, 0x1361: 0x1691, 0x1362: 0x1691, 0x1363: 0x1691, + 0x1364: 0x1699, 0x1365: 0x1699, 0x1366: 0x16a1, 0x1367: 0x16a1, 0x1368: 0x16a1, 0x1369: 0x16a1, + 0x136a: 0x16a9, 0x136b: 0x16a9, 0x136c: 0x16a9, 0x136d: 0x16a9, 0x136e: 0x16b1, 0x136f: 0x16b1, + 0x1370: 0x16b9, 0x1371: 0x16b9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0818, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x16c1, 0x1394: 0x16c1, 0x1395: 0x16c1, 0x1396: 0x16c1, 0x1397: 0x16c9, + 0x1398: 0x16c9, 0x1399: 0x16d1, 0x139a: 0x16d1, 0x139b: 0x16d9, 0x139c: 0x16d9, 0x139d: 0x0149, + 0x139e: 0x16e1, 0x139f: 0x16e1, 0x13a0: 0x16e9, 0x13a1: 0x16e9, 0x13a2: 0x16f1, 0x13a3: 0x16f1, + 0x13a4: 0x16f9, 0x13a5: 0x16f9, 0x13a6: 0x16f9, 0x13a7: 0x16f9, 0x13a8: 0x1701, 0x13a9: 0x1701, + 0x13aa: 0x1709, 0x13ab: 0x1709, 0x13ac: 0x1711, 0x13ad: 0x1711, 0x13ae: 0x1719, 0x13af: 0x1719, + 0x13b0: 0x1721, 0x13b1: 0x1721, 0x13b2: 0x1729, 0x13b3: 0x1729, 0x13b4: 0x1731, 0x13b5: 0x1731, + 0x13b6: 0x1739, 0x13b7: 0x1739, 0x13b8: 0x1739, 0x13b9: 0x1741, 0x13ba: 0x1741, 0x13bb: 0x1741, + 0x13bc: 0x1749, 0x13bd: 0x1749, 0x13be: 0x1749, 0x13bf: 0x1749, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x1949, 0x13c1: 0x1951, 0x13c2: 0x1959, 0x13c3: 0x1961, 0x13c4: 0x1969, 0x13c5: 0x1971, + 0x13c6: 0x1979, 0x13c7: 0x1981, 0x13c8: 0x1989, 0x13c9: 0x1991, 0x13ca: 0x1999, 0x13cb: 0x19a1, + 0x13cc: 0x19a9, 0x13cd: 0x19b1, 0x13ce: 0x19b9, 0x13cf: 0x19c1, 0x13d0: 0x19c9, 0x13d1: 0x19d1, + 0x13d2: 0x19d9, 0x13d3: 0x19e1, 0x13d4: 0x19e9, 0x13d5: 0x19f1, 0x13d6: 0x19f9, 0x13d7: 0x1a01, + 0x13d8: 0x1a09, 0x13d9: 0x1a11, 0x13da: 0x1a19, 0x13db: 0x1a21, 0x13dc: 0x1a29, 0x13dd: 0x1a31, + 0x13de: 0x1a3a, 0x13df: 0x1a42, 0x13e0: 0x1a4a, 0x13e1: 0x1a52, 0x13e2: 0x1a5a, 0x13e3: 0x1a62, + 0x13e4: 0x1a69, 0x13e5: 0x1a71, 0x13e6: 0x1761, 0x13e7: 0x1a79, 0x13e8: 0x1741, 0x13e9: 0x1769, + 0x13ea: 0x1a81, 0x13eb: 0x1a89, 0x13ec: 0x1789, 0x13ed: 0x1a91, 0x13ee: 0x1791, 0x13ef: 0x1799, + 0x13f0: 0x1a99, 0x13f1: 0x1aa1, 0x13f2: 0x17b9, 0x13f3: 0x1aa9, 0x13f4: 0x17c1, 0x13f5: 0x17c9, + 0x13f6: 0x1ab1, 0x13f7: 0x1ab9, 0x13f8: 0x17d9, 0x13f9: 0x1ac1, 0x13fa: 0x17e1, 0x13fb: 0x17e9, + 0x13fc: 0x18d1, 0x13fd: 0x18d9, 0x13fe: 0x18f1, 0x13ff: 0x18f9, + // Block 0x50, offset 0x1400 + 0x1400: 0x1901, 0x1401: 0x1921, 0x1402: 0x1929, 0x1403: 0x1931, 0x1404: 0x1939, 0x1405: 0x1959, + 0x1406: 0x1961, 0x1407: 0x1969, 0x1408: 0x1ac9, 0x1409: 0x1989, 0x140a: 0x1ad1, 0x140b: 0x1ad9, + 0x140c: 0x19b9, 0x140d: 0x1ae1, 0x140e: 0x19c1, 0x140f: 0x19c9, 0x1410: 0x1a31, 0x1411: 0x1ae9, + 0x1412: 0x1af1, 0x1413: 0x1a09, 0x1414: 0x1af9, 0x1415: 0x1a11, 0x1416: 0x1a19, 0x1417: 0x1751, + 0x1418: 0x1759, 0x1419: 0x1b01, 0x141a: 0x1761, 0x141b: 0x1b09, 0x141c: 0x1771, 0x141d: 0x1779, + 0x141e: 0x1781, 0x141f: 0x1789, 0x1420: 0x1b11, 0x1421: 0x17a1, 0x1422: 0x17a9, 0x1423: 0x17b1, + 0x1424: 0x17b9, 0x1425: 0x1b19, 0x1426: 0x17d9, 0x1427: 0x17f1, 0x1428: 0x17f9, 0x1429: 0x1801, + 0x142a: 0x1809, 0x142b: 0x1811, 0x142c: 0x1821, 0x142d: 0x1829, 0x142e: 0x1831, 0x142f: 0x1839, + 0x1430: 0x1841, 0x1431: 0x1849, 0x1432: 0x1b21, 0x1433: 0x1851, 0x1434: 0x1859, 0x1435: 0x1861, + 0x1436: 0x1869, 0x1437: 0x1871, 0x1438: 0x1879, 0x1439: 0x1889, 0x143a: 0x1891, 0x143b: 0x1899, + 0x143c: 0x18a1, 0x143d: 0x18a9, 0x143e: 0x18b1, 0x143f: 0x18b9, + // Block 0x51, offset 0x1440 + 0x1440: 0x18c1, 0x1441: 0x18c9, 0x1442: 0x18e1, 0x1443: 0x18e9, 0x1444: 0x1909, 0x1445: 0x1911, + 0x1446: 0x1919, 0x1447: 0x1921, 0x1448: 0x1929, 0x1449: 0x1941, 0x144a: 0x1949, 0x144b: 0x1951, + 0x144c: 0x1959, 0x144d: 0x1b29, 0x144e: 0x1971, 0x144f: 0x1979, 0x1450: 0x1981, 0x1451: 0x1989, + 0x1452: 0x19a1, 0x1453: 0x19a9, 0x1454: 0x19b1, 0x1455: 0x19b9, 0x1456: 0x1b31, 0x1457: 0x19d1, + 0x1458: 0x19d9, 0x1459: 0x1b39, 0x145a: 0x19f1, 0x145b: 0x19f9, 0x145c: 0x1a01, 0x145d: 0x1a09, + 0x145e: 0x1b41, 0x145f: 0x1761, 0x1460: 0x1b09, 0x1461: 0x1789, 0x1462: 0x1b11, 0x1463: 0x17b9, + 0x1464: 0x1b19, 0x1465: 0x17d9, 0x1466: 0x1b49, 0x1467: 0x1841, 0x1468: 0x1b51, 0x1469: 0x1b59, + 0x146a: 0x1b61, 0x146b: 0x1921, 0x146c: 0x1929, 0x146d: 0x1959, 0x146e: 0x19b9, 0x146f: 0x1b31, + 0x1470: 0x1a09, 0x1471: 0x1b41, 0x1472: 0x1b69, 0x1473: 0x1b71, 0x1474: 0x1b79, 0x1475: 0x1b81, + 0x1476: 0x1b89, 0x1477: 0x1b91, 0x1478: 0x1b99, 0x1479: 0x1ba1, 0x147a: 0x1ba9, 0x147b: 0x1bb1, + 0x147c: 0x1bb9, 0x147d: 0x1bc1, 0x147e: 0x1bc9, 0x147f: 0x1bd1, + // Block 0x52, offset 0x1480 + 0x1480: 0x1bd9, 0x1481: 0x1be1, 0x1482: 0x1be9, 0x1483: 0x1bf1, 0x1484: 0x1bf9, 0x1485: 0x1c01, + 0x1486: 0x1c09, 0x1487: 0x1c11, 0x1488: 0x1c19, 0x1489: 0x1c21, 0x148a: 0x1c29, 0x148b: 0x1c31, + 0x148c: 0x1b59, 0x148d: 0x1c39, 0x148e: 0x1c41, 0x148f: 0x1c49, 0x1490: 0x1c51, 0x1491: 0x1b81, + 0x1492: 0x1b89, 0x1493: 0x1b91, 0x1494: 0x1b99, 0x1495: 0x1ba1, 0x1496: 0x1ba9, 0x1497: 0x1bb1, + 0x1498: 0x1bb9, 0x1499: 0x1bc1, 0x149a: 0x1bc9, 0x149b: 0x1bd1, 0x149c: 0x1bd9, 0x149d: 0x1be1, + 0x149e: 0x1be9, 0x149f: 0x1bf1, 0x14a0: 0x1bf9, 0x14a1: 0x1c01, 0x14a2: 0x1c09, 0x14a3: 0x1c11, + 0x14a4: 0x1c19, 0x14a5: 0x1c21, 0x14a6: 0x1c29, 0x14a7: 0x1c31, 0x14a8: 0x1b59, 0x14a9: 0x1c39, + 0x14aa: 0x1c41, 0x14ab: 0x1c49, 0x14ac: 0x1c51, 0x14ad: 0x1c21, 0x14ae: 0x1c29, 0x14af: 0x1c31, + 0x14b0: 0x1b59, 0x14b1: 0x1b51, 0x14b2: 0x1b61, 0x14b3: 0x1881, 0x14b4: 0x1829, 0x14b5: 0x1831, + 0x14b6: 0x1839, 0x14b7: 0x1c21, 0x14b8: 0x1c29, 0x14b9: 0x1c31, 0x14ba: 0x1881, 0x14bb: 0x1889, + 0x14bc: 0x1c59, 0x14bd: 0x1c59, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0018, 0x14c1: 0x0018, 0x14c2: 0x0018, 0x14c3: 0x0018, 0x14c4: 0x0018, 0x14c5: 0x0018, + 0x14c6: 0x0018, 0x14c7: 0x0018, 0x14c8: 0x0018, 0x14c9: 0x0018, 0x14ca: 0x0018, 0x14cb: 0x0018, + 0x14cc: 0x0018, 0x14cd: 0x0018, 0x14ce: 0x0018, 0x14cf: 0x0018, 0x14d0: 0x1c61, 0x14d1: 0x1c69, + 0x14d2: 0x1c69, 0x14d3: 0x1c71, 0x14d4: 0x1c79, 0x14d5: 0x1c81, 0x14d6: 0x1c89, 0x14d7: 0x1c91, + 0x14d8: 0x1c99, 0x14d9: 0x1c99, 0x14da: 0x1ca1, 0x14db: 0x1ca9, 0x14dc: 0x1cb1, 0x14dd: 0x1cb9, + 0x14de: 0x1cc1, 0x14df: 0x1cc9, 0x14e0: 0x1cc9, 0x14e1: 0x1cd1, 0x14e2: 0x1cd9, 0x14e3: 0x1cd9, + 0x14e4: 0x1ce1, 0x14e5: 0x1ce1, 0x14e6: 0x1ce9, 0x14e7: 0x1cf1, 0x14e8: 0x1cf1, 0x14e9: 0x1cf9, + 0x14ea: 0x1d01, 0x14eb: 0x1d01, 0x14ec: 0x1d09, 0x14ed: 0x1d09, 0x14ee: 0x1d11, 0x14ef: 0x1d19, + 0x14f0: 0x1d19, 0x14f1: 0x1d21, 0x14f2: 0x1d21, 0x14f3: 0x1d29, 0x14f4: 0x1d31, 0x14f5: 0x1d39, + 0x14f6: 0x1d41, 0x14f7: 0x1d41, 0x14f8: 0x1d49, 0x14f9: 0x1d51, 0x14fa: 0x1d59, 0x14fb: 0x1d61, + 0x14fc: 0x1d69, 0x14fd: 0x1d69, 0x14fe: 0x1d71, 0x14ff: 0x1d79, + // Block 0x54, offset 0x1500 + 0x1500: 0x1f29, 0x1501: 0x1f31, 0x1502: 0x1f39, 0x1503: 0x1f11, 0x1504: 0x1d39, 0x1505: 0x1ce9, + 0x1506: 0x1f41, 0x1507: 0x1f49, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0018, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0x1f51, 0x1531: 0x1f59, 0x1532: 0x1f61, 0x1533: 0x1f69, 0x1534: 0x1f71, 0x1535: 0x1f79, + 0x1536: 0x1f81, 0x1537: 0x1f89, 0x1538: 0x1f91, 0x1539: 0x1f99, 0x153a: 0x1fa2, 0x153b: 0x1faa, + 0x153c: 0x1fb1, 0x153d: 0x0018, 0x153e: 0x0018, 0x153f: 0x0018, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0x1fba, 0x1551: 0x7d8d, + 0x1552: 0x0040, 0x1553: 0x1fc2, 0x1554: 0x0122, 0x1555: 0x1fca, 0x1556: 0x1fd2, 0x1557: 0x7dad, + 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0x1fda, 0x1574: 0x1fda, 0x1575: 0x072a, + 0x1576: 0x0732, 0x1577: 0x1fe2, 0x1578: 0x1fea, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d, + 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d, + // Block 0x56, offset 0x1580 + 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0x1ff2, 0x1588: 0x1ffa, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e, + 0x158c: 0x7fae, 0x158d: 0x1fda, 0x158e: 0x1fda, 0x158f: 0x1fda, 0x1590: 0x1fba, 0x1591: 0x7fcd, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x0122, 0x1595: 0x1fc2, 0x1596: 0x1fd2, 0x1597: 0x1fca, + 0x1598: 0x7fed, 0x1599: 0x072a, 0x159a: 0x0732, 0x159b: 0x1fe2, 0x159c: 0x1fea, 0x159d: 0x7ecd, + 0x159e: 0x7f2d, 0x159f: 0x2002, 0x15a0: 0x200a, 0x15a1: 0x2012, 0x15a2: 0x071a, 0x15a3: 0x2019, + 0x15a4: 0x2022, 0x15a5: 0x202a, 0x15a6: 0x0722, 0x15a7: 0x0040, 0x15a8: 0x2032, 0x15a9: 0x203a, + 0x15aa: 0x2042, 0x15ab: 0x204a, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x800e, 0x15b1: 0x2051, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040, + 0x15b6: 0x806e, 0x15b7: 0x2059, 0x15b8: 0x808e, 0x15b9: 0x2061, 0x15ba: 0x80ae, 0x15bb: 0x2069, + 0x15bc: 0x80ce, 0x15bd: 0x2071, 0x15be: 0x80ee, 0x15bf: 0x2079, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x2081, 0x15c1: 0x2089, 0x15c2: 0x2089, 0x15c3: 0x2091, 0x15c4: 0x2091, 0x15c5: 0x2099, + 0x15c6: 0x2099, 0x15c7: 0x20a1, 0x15c8: 0x20a1, 0x15c9: 0x20a9, 0x15ca: 0x20a9, 0x15cb: 0x20a9, + 0x15cc: 0x20a9, 0x15cd: 0x20b1, 0x15ce: 0x20b1, 0x15cf: 0x20b9, 0x15d0: 0x20b9, 0x15d1: 0x20b9, + 0x15d2: 0x20b9, 0x15d3: 0x20c1, 0x15d4: 0x20c1, 0x15d5: 0x20c9, 0x15d6: 0x20c9, 0x15d7: 0x20c9, + 0x15d8: 0x20c9, 0x15d9: 0x20d1, 0x15da: 0x20d1, 0x15db: 0x20d1, 0x15dc: 0x20d1, 0x15dd: 0x20d9, + 0x15de: 0x20d9, 0x15df: 0x20d9, 0x15e0: 0x20d9, 0x15e1: 0x20e1, 0x15e2: 0x20e1, 0x15e3: 0x20e1, + 0x15e4: 0x20e1, 0x15e5: 0x20e9, 0x15e6: 0x20e9, 0x15e7: 0x20e9, 0x15e8: 0x20e9, 0x15e9: 0x20f1, + 0x15ea: 0x20f1, 0x15eb: 0x20f9, 0x15ec: 0x20f9, 0x15ed: 0x2101, 0x15ee: 0x2101, 0x15ef: 0x2109, + 0x15f0: 0x2109, 0x15f1: 0x2111, 0x15f2: 0x2111, 0x15f3: 0x2111, 0x15f4: 0x2111, 0x15f5: 0x2119, + 0x15f6: 0x2119, 0x15f7: 0x2119, 0x15f8: 0x2119, 0x15f9: 0x2121, 0x15fa: 0x2121, 0x15fb: 0x2121, + 0x15fc: 0x2121, 0x15fd: 0x2129, 0x15fe: 0x2129, 0x15ff: 0x2129, + // Block 0x58, offset 0x1600 + 0x1600: 0x2129, 0x1601: 0x2131, 0x1602: 0x2131, 0x1603: 0x2131, 0x1604: 0x2131, 0x1605: 0x2139, + 0x1606: 0x2139, 0x1607: 0x2139, 0x1608: 0x2139, 0x1609: 0x2141, 0x160a: 0x2141, 0x160b: 0x2141, + 0x160c: 0x2141, 0x160d: 0x2149, 0x160e: 0x2149, 0x160f: 0x2149, 0x1610: 0x2149, 0x1611: 0x2151, + 0x1612: 0x2151, 0x1613: 0x2151, 0x1614: 0x2151, 0x1615: 0x2159, 0x1616: 0x2159, 0x1617: 0x2159, + 0x1618: 0x2159, 0x1619: 0x2161, 0x161a: 0x2161, 0x161b: 0x2161, 0x161c: 0x2161, 0x161d: 0x2169, + 0x161e: 0x2169, 0x161f: 0x2169, 0x1620: 0x2169, 0x1621: 0x2171, 0x1622: 0x2171, 0x1623: 0x2171, + 0x1624: 0x2171, 0x1625: 0x2179, 0x1626: 0x2179, 0x1627: 0x2179, 0x1628: 0x2179, 0x1629: 0x2181, + 0x162a: 0x2181, 0x162b: 0x2181, 0x162c: 0x2181, 0x162d: 0x2189, 0x162e: 0x2189, 0x162f: 0x1701, + 0x1630: 0x1701, 0x1631: 0x2191, 0x1632: 0x2191, 0x1633: 0x2191, 0x1634: 0x2191, 0x1635: 0x2199, + 0x1636: 0x2199, 0x1637: 0x21a1, 0x1638: 0x21a1, 0x1639: 0x21a9, 0x163a: 0x21a9, 0x163b: 0x21b1, + 0x163c: 0x21b1, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0x1fca, 0x1642: 0x21ba, 0x1643: 0x2002, 0x1644: 0x203a, 0x1645: 0x2042, + 0x1646: 0x200a, 0x1647: 0x21c2, 0x1648: 0x072a, 0x1649: 0x0732, 0x164a: 0x2012, 0x164b: 0x071a, + 0x164c: 0x1fba, 0x164d: 0x2019, 0x164e: 0x0961, 0x164f: 0x21ca, 0x1650: 0x06e1, 0x1651: 0x0049, + 0x1652: 0x0029, 0x1653: 0x0031, 0x1654: 0x06e9, 0x1655: 0x06f1, 0x1656: 0x06f9, 0x1657: 0x0701, + 0x1658: 0x0709, 0x1659: 0x0711, 0x165a: 0x1fc2, 0x165b: 0x0122, 0x165c: 0x2022, 0x165d: 0x0722, + 0x165e: 0x202a, 0x165f: 0x1fd2, 0x1660: 0x204a, 0x1661: 0x0019, 0x1662: 0x02e9, 0x1663: 0x03d9, + 0x1664: 0x02f1, 0x1665: 0x02f9, 0x1666: 0x03f1, 0x1667: 0x0309, 0x1668: 0x00a9, 0x1669: 0x0311, + 0x166a: 0x00b1, 0x166b: 0x0319, 0x166c: 0x0101, 0x166d: 0x0321, 0x166e: 0x0329, 0x166f: 0x0051, + 0x1670: 0x0339, 0x1671: 0x0751, 0x1672: 0x00b9, 0x1673: 0x0089, 0x1674: 0x0341, 0x1675: 0x0349, + 0x1676: 0x0391, 0x1677: 0x00c1, 0x1678: 0x0109, 0x1679: 0x00c9, 0x167a: 0x04b1, 0x167b: 0x1ff2, + 0x167c: 0x2032, 0x167d: 0x1ffa, 0x167e: 0x21d2, 0x167f: 0x1fda, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0672, 0x1681: 0x0019, 0x1682: 0x02e9, 0x1683: 0x03d9, 0x1684: 0x02f1, 0x1685: 0x02f9, + 0x1686: 0x03f1, 0x1687: 0x0309, 0x1688: 0x00a9, 0x1689: 0x0311, 0x168a: 0x00b1, 0x168b: 0x0319, + 0x168c: 0x0101, 0x168d: 0x0321, 0x168e: 0x0329, 0x168f: 0x0051, 0x1690: 0x0339, 0x1691: 0x0751, + 0x1692: 0x00b9, 0x1693: 0x0089, 0x1694: 0x0341, 0x1695: 0x0349, 0x1696: 0x0391, 0x1697: 0x00c1, + 0x1698: 0x0109, 0x1699: 0x00c9, 0x169a: 0x04b1, 0x169b: 0x1fe2, 0x169c: 0x21da, 0x169d: 0x1fea, + 0x169e: 0x21e2, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x0961, 0x16a2: 0x814d, 0x16a3: 0x814d, + 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d, + 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd, + 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d, + 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d, + 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d, + 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd, + 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d, + 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d, + 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d, + 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d, + 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed, + 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d, + 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed, + 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d, + 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d, + 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d, + 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x21e9, 0x1721: 0x21f1, 0x1722: 0x21f9, 0x1723: 0x8a0e, + 0x1724: 0x2201, 0x1725: 0x2209, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d, + 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0008, 0x1741: 0x0008, 0x1742: 0x0008, 0x1743: 0x0008, 0x1744: 0x0008, 0x1745: 0x0008, + 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, + 0x174c: 0x0008, 0x174d: 0x0008, 0x174e: 0x0008, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0008, + 0x1752: 0x0008, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, + 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, + 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, + 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0040, 0x176c: 0x0040, 0x176d: 0x0040, 0x176e: 0x0040, 0x176f: 0x0018, + 0x1770: 0x8b3d, 0x1771: 0x8b55, 0x1772: 0x8b6d, 0x1773: 0x8b55, 0x1774: 0x8b85, 0x1775: 0x8b55, + 0x1776: 0x8b6d, 0x1777: 0x8b55, 0x1778: 0x8b3d, 0x1779: 0x8b9d, 0x177a: 0x8bb5, 0x177b: 0x0040, + 0x177c: 0x8bcd, 0x177d: 0x8b9d, 0x177e: 0x8bb5, 0x177f: 0x8b9d, + // Block 0x5e, offset 0x1780 + 0x1780: 0xe13d, 0x1781: 0xe14d, 0x1782: 0xe15d, 0x1783: 0xe14d, 0x1784: 0xe17d, 0x1785: 0xe14d, + 0x1786: 0xe15d, 0x1787: 0xe14d, 0x1788: 0xe13d, 0x1789: 0xe1cd, 0x178a: 0xe1dd, 0x178b: 0x0040, + 0x178c: 0xe1fd, 0x178d: 0xe1cd, 0x178e: 0xe1dd, 0x178f: 0xe1cd, 0x1790: 0xe13d, 0x1791: 0xe14d, + 0x1792: 0xe15d, 0x1793: 0x0040, 0x1794: 0xe17d, 0x1795: 0xe14d, 0x1796: 0x0040, 0x1797: 0x0008, + 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0040, 0x17a3: 0x0008, + 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0008, + 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008, + 0x17b0: 0x0008, 0x17b1: 0x0008, 0x17b2: 0x0040, 0x17b3: 0x0008, 0x17b4: 0x0008, 0x17b5: 0x0008, + 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0008, + 0x17bc: 0x0008, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x0008, 0x17c1: 0x2211, 0x17c2: 0x2219, 0x17c3: 0x02e1, 0x17c4: 0x2221, 0x17c5: 0x2229, + 0x17c6: 0x0040, 0x17c7: 0x2231, 0x17c8: 0x2239, 0x17c9: 0x2241, 0x17ca: 0x2249, 0x17cb: 0x2251, + 0x17cc: 0x2259, 0x17cd: 0x2261, 0x17ce: 0x2269, 0x17cf: 0x2271, 0x17d0: 0x2279, 0x17d1: 0x2281, + 0x17d2: 0x2289, 0x17d3: 0x2291, 0x17d4: 0x2299, 0x17d5: 0x0741, 0x17d6: 0x22a1, 0x17d7: 0x22a9, + 0x17d8: 0x22b1, 0x17d9: 0x22b9, 0x17da: 0x22c1, 0x17db: 0x13d9, 0x17dc: 0x8be5, 0x17dd: 0x22c9, + 0x17de: 0x22d1, 0x17df: 0x8c05, 0x17e0: 0x22d9, 0x17e1: 0x8c25, 0x17e2: 0x22e1, 0x17e3: 0x22e9, + 0x17e4: 0x22f1, 0x17e5: 0x0751, 0x17e6: 0x22f9, 0x17e7: 0x8c45, 0x17e8: 0x0949, 0x17e9: 0x2301, + 0x17ea: 0x2309, 0x17eb: 0x2311, 0x17ec: 0x2319, 0x17ed: 0x2321, 0x17ee: 0x2329, 0x17ef: 0x2331, + 0x17f0: 0x2339, 0x17f1: 0x0040, 0x17f2: 0x2341, 0x17f3: 0x2349, 0x17f4: 0x2351, 0x17f5: 0x2359, + 0x17f6: 0x2361, 0x17f7: 0x2369, 0x17f8: 0x2371, 0x17f9: 0x8c65, 0x17fa: 0x8c85, 0x17fb: 0x0040, + 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040, + // Block 0x60, offset 0x1800 + 0x1800: 0x0a08, 0x1801: 0x0a08, 0x1802: 0x0a08, 0x1803: 0x0a08, 0x1804: 0x0a08, 0x1805: 0x0c08, + 0x1806: 0x0808, 0x1807: 0x0c08, 0x1808: 0x0818, 0x1809: 0x0c08, 0x180a: 0x0c08, 0x180b: 0x0808, + 0x180c: 0x0808, 0x180d: 0x0908, 0x180e: 0x0c08, 0x180f: 0x0c08, 0x1810: 0x0c08, 0x1811: 0x0c08, + 0x1812: 0x0c08, 0x1813: 0x0a08, 0x1814: 0x0a08, 0x1815: 0x0a08, 0x1816: 0x0a08, 0x1817: 0x0908, + 0x1818: 0x0a08, 0x1819: 0x0a08, 0x181a: 0x0a08, 0x181b: 0x0a08, 0x181c: 0x0a08, 0x181d: 0x0c08, + 0x181e: 0x0a08, 0x181f: 0x0a08, 0x1820: 0x0a08, 0x1821: 0x0c08, 0x1822: 0x0808, 0x1823: 0x0808, + 0x1824: 0x0c08, 0x1825: 0x3308, 0x1826: 0x3308, 0x1827: 0x0040, 0x1828: 0x0040, 0x1829: 0x0040, + 0x182a: 0x0040, 0x182b: 0x0a18, 0x182c: 0x0a18, 0x182d: 0x0a18, 0x182e: 0x0a18, 0x182f: 0x0c18, + 0x1830: 0x0818, 0x1831: 0x0818, 0x1832: 0x0818, 0x1833: 0x0818, 0x1834: 0x0818, 0x1835: 0x0818, + 0x1836: 0x0818, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0a08, 0x1841: 0x0c08, 0x1842: 0x0a08, 0x1843: 0x0c08, 0x1844: 0x0c08, 0x1845: 0x0c08, + 0x1846: 0x0a08, 0x1847: 0x0a08, 0x1848: 0x0a08, 0x1849: 0x0c08, 0x184a: 0x0a08, 0x184b: 0x0a08, + 0x184c: 0x0c08, 0x184d: 0x0a08, 0x184e: 0x0c08, 0x184f: 0x0c08, 0x1850: 0x0a08, 0x1851: 0x0c08, + 0x1852: 0x0040, 0x1853: 0x0040, 0x1854: 0x0040, 0x1855: 0x0040, 0x1856: 0x0040, 0x1857: 0x0040, + 0x1858: 0x0040, 0x1859: 0x0818, 0x185a: 0x0818, 0x185b: 0x0818, 0x185c: 0x0818, 0x185d: 0x0040, + 0x185e: 0x0040, 0x185f: 0x0040, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0040, 0x1863: 0x0040, + 0x1864: 0x0040, 0x1865: 0x0040, 0x1866: 0x0040, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0c18, + 0x186a: 0x0c18, 0x186b: 0x0c18, 0x186c: 0x0c18, 0x186d: 0x0a18, 0x186e: 0x0a18, 0x186f: 0x0818, + 0x1870: 0x0040, 0x1871: 0x0040, 0x1872: 0x0040, 0x1873: 0x0040, 0x1874: 0x0040, 0x1875: 0x0040, + 0x1876: 0x0040, 0x1877: 0x0040, 0x1878: 0x0040, 0x1879: 0x0040, 0x187a: 0x0040, 0x187b: 0x0040, + 0x187c: 0x0040, 0x187d: 0x0040, 0x187e: 0x0040, 0x187f: 0x0040, + // Block 0x62, offset 0x1880 + 0x1880: 0x3308, 0x1881: 0x3308, 0x1882: 0x3008, 0x1883: 0x3008, 0x1884: 0x0040, 0x1885: 0x0008, + 0x1886: 0x0008, 0x1887: 0x0008, 0x1888: 0x0008, 0x1889: 0x0008, 0x188a: 0x0008, 0x188b: 0x0008, + 0x188c: 0x0008, 0x188d: 0x0040, 0x188e: 0x0040, 0x188f: 0x0008, 0x1890: 0x0008, 0x1891: 0x0040, + 0x1892: 0x0040, 0x1893: 0x0008, 0x1894: 0x0008, 0x1895: 0x0008, 0x1896: 0x0008, 0x1897: 0x0008, + 0x1898: 0x0008, 0x1899: 0x0008, 0x189a: 0x0008, 0x189b: 0x0008, 0x189c: 0x0008, 0x189d: 0x0008, + 0x189e: 0x0008, 0x189f: 0x0008, 0x18a0: 0x0008, 0x18a1: 0x0008, 0x18a2: 0x0008, 0x18a3: 0x0008, + 0x18a4: 0x0008, 0x18a5: 0x0008, 0x18a6: 0x0008, 0x18a7: 0x0008, 0x18a8: 0x0008, 0x18a9: 0x0040, + 0x18aa: 0x0008, 0x18ab: 0x0008, 0x18ac: 0x0008, 0x18ad: 0x0008, 0x18ae: 0x0008, 0x18af: 0x0008, + 0x18b0: 0x0008, 0x18b1: 0x0040, 0x18b2: 0x0008, 0x18b3: 0x0008, 0x18b4: 0x0040, 0x18b5: 0x0008, + 0x18b6: 0x0008, 0x18b7: 0x0008, 0x18b8: 0x0008, 0x18b9: 0x0008, 0x18ba: 0x0040, 0x18bb: 0x3308, + 0x18bc: 0x3308, 0x18bd: 0x0008, 0x18be: 0x3008, 0x18bf: 0x3008, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x3308, 0x18c1: 0x3008, 0x18c2: 0x3008, 0x18c3: 0x3008, 0x18c4: 0x3008, 0x18c5: 0x0040, + 0x18c6: 0x0040, 0x18c7: 0x3008, 0x18c8: 0x3008, 0x18c9: 0x0040, 0x18ca: 0x0040, 0x18cb: 0x3008, + 0x18cc: 0x3008, 0x18cd: 0x3808, 0x18ce: 0x0040, 0x18cf: 0x0040, 0x18d0: 0x0008, 0x18d1: 0x0040, + 0x18d2: 0x0040, 0x18d3: 0x0040, 0x18d4: 0x0040, 0x18d5: 0x0040, 0x18d6: 0x0040, 0x18d7: 0x3008, + 0x18d8: 0x0040, 0x18d9: 0x0040, 0x18da: 0x0040, 0x18db: 0x0040, 0x18dc: 0x0040, 0x18dd: 0x0008, + 0x18de: 0x0008, 0x18df: 0x0008, 0x18e0: 0x0008, 0x18e1: 0x0008, 0x18e2: 0x3008, 0x18e3: 0x3008, + 0x18e4: 0x0040, 0x18e5: 0x0040, 0x18e6: 0x3308, 0x18e7: 0x3308, 0x18e8: 0x3308, 0x18e9: 0x3308, + 0x18ea: 0x3308, 0x18eb: 0x3308, 0x18ec: 0x3308, 0x18ed: 0x0040, 0x18ee: 0x0040, 0x18ef: 0x0040, + 0x18f0: 0x3308, 0x18f1: 0x3308, 0x18f2: 0x3308, 0x18f3: 0x3308, 0x18f4: 0x3308, 0x18f5: 0x0040, + 0x18f6: 0x0040, 0x18f7: 0x0040, 0x18f8: 0x0040, 0x18f9: 0x0040, 0x18fa: 0x0040, 0x18fb: 0x0040, + 0x18fc: 0x0040, 0x18fd: 0x0040, 0x18fe: 0x0040, 0x18ff: 0x0040, + // Block 0x64, offset 0x1900 + 0x1900: 0x0008, 0x1901: 0x0008, 0x1902: 0x0008, 0x1903: 0x0008, 0x1904: 0x0008, 0x1905: 0x0008, + 0x1906: 0x0008, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0008, 0x190a: 0x0040, 0x190b: 0x0040, + 0x190c: 0x0008, 0x190d: 0x0008, 0x190e: 0x0008, 0x190f: 0x0008, 0x1910: 0x0008, 0x1911: 0x0008, + 0x1912: 0x0008, 0x1913: 0x0008, 0x1914: 0x0040, 0x1915: 0x0008, 0x1916: 0x0008, 0x1917: 0x0040, + 0x1918: 0x0008, 0x1919: 0x0008, 0x191a: 0x0008, 0x191b: 0x0008, 0x191c: 0x0008, 0x191d: 0x0008, + 0x191e: 0x0008, 0x191f: 0x0008, 0x1920: 0x0008, 0x1921: 0x0008, 0x1922: 0x0008, 0x1923: 0x0008, + 0x1924: 0x0008, 0x1925: 0x0008, 0x1926: 0x0008, 0x1927: 0x0008, 0x1928: 0x0008, 0x1929: 0x0008, + 0x192a: 0x0008, 0x192b: 0x0008, 0x192c: 0x0008, 0x192d: 0x0008, 0x192e: 0x0008, 0x192f: 0x0008, + 0x1930: 0x3008, 0x1931: 0x3008, 0x1932: 0x3008, 0x1933: 0x3008, 0x1934: 0x3008, 0x1935: 0x3008, + 0x1936: 0x0040, 0x1937: 0x3008, 0x1938: 0x3008, 0x1939: 0x0040, 0x193a: 0x0040, 0x193b: 0x3308, + 0x193c: 0x3308, 0x193d: 0x3808, 0x193e: 0x3b08, 0x193f: 0x0008, + // Block 0x65, offset 0x1940 + 0x1940: 0x0019, 0x1941: 0x02e9, 0x1942: 0x03d9, 0x1943: 0x02f1, 0x1944: 0x02f9, 0x1945: 0x03f1, + 0x1946: 0x0309, 0x1947: 0x00a9, 0x1948: 0x0311, 0x1949: 0x00b1, 0x194a: 0x0319, 0x194b: 0x0101, + 0x194c: 0x0321, 0x194d: 0x0329, 0x194e: 0x0051, 0x194f: 0x0339, 0x1950: 0x0751, 0x1951: 0x00b9, + 0x1952: 0x0089, 0x1953: 0x0341, 0x1954: 0x0349, 0x1955: 0x0391, 0x1956: 0x00c1, 0x1957: 0x0109, + 0x1958: 0x00c9, 0x1959: 0x04b1, 0x195a: 0x0019, 0x195b: 0x02e9, 0x195c: 0x03d9, 0x195d: 0x02f1, + 0x195e: 0x02f9, 0x195f: 0x03f1, 0x1960: 0x0309, 0x1961: 0x00a9, 0x1962: 0x0311, 0x1963: 0x00b1, + 0x1964: 0x0319, 0x1965: 0x0101, 0x1966: 0x0321, 0x1967: 0x0329, 0x1968: 0x0051, 0x1969: 0x0339, + 0x196a: 0x0751, 0x196b: 0x00b9, 0x196c: 0x0089, 0x196d: 0x0341, 0x196e: 0x0349, 0x196f: 0x0391, + 0x1970: 0x00c1, 0x1971: 0x0109, 0x1972: 0x00c9, 0x1973: 0x04b1, 0x1974: 0x0019, 0x1975: 0x02e9, + 0x1976: 0x03d9, 0x1977: 0x02f1, 0x1978: 0x02f9, 0x1979: 0x03f1, 0x197a: 0x0309, 0x197b: 0x00a9, + 0x197c: 0x0311, 0x197d: 0x00b1, 0x197e: 0x0319, 0x197f: 0x0101, + // Block 0x66, offset 0x1980 + 0x1980: 0x0321, 0x1981: 0x0329, 0x1982: 0x0051, 0x1983: 0x0339, 0x1984: 0x0751, 0x1985: 0x00b9, + 0x1986: 0x0089, 0x1987: 0x0341, 0x1988: 0x0349, 0x1989: 0x0391, 0x198a: 0x00c1, 0x198b: 0x0109, + 0x198c: 0x00c9, 0x198d: 0x04b1, 0x198e: 0x0019, 0x198f: 0x02e9, 0x1990: 0x03d9, 0x1991: 0x02f1, + 0x1992: 0x02f9, 0x1993: 0x03f1, 0x1994: 0x0309, 0x1995: 0x0040, 0x1996: 0x0311, 0x1997: 0x00b1, + 0x1998: 0x0319, 0x1999: 0x0101, 0x199a: 0x0321, 0x199b: 0x0329, 0x199c: 0x0051, 0x199d: 0x0339, + 0x199e: 0x0751, 0x199f: 0x00b9, 0x19a0: 0x0089, 0x19a1: 0x0341, 0x19a2: 0x0349, 0x19a3: 0x0391, + 0x19a4: 0x00c1, 0x19a5: 0x0109, 0x19a6: 0x00c9, 0x19a7: 0x04b1, 0x19a8: 0x0019, 0x19a9: 0x02e9, + 0x19aa: 0x03d9, 0x19ab: 0x02f1, 0x19ac: 0x02f9, 0x19ad: 0x03f1, 0x19ae: 0x0309, 0x19af: 0x00a9, + 0x19b0: 0x0311, 0x19b1: 0x00b1, 0x19b2: 0x0319, 0x19b3: 0x0101, 0x19b4: 0x0321, 0x19b5: 0x0329, + 0x19b6: 0x0051, 0x19b7: 0x0339, 0x19b8: 0x0751, 0x19b9: 0x00b9, 0x19ba: 0x0089, 0x19bb: 0x0341, + 0x19bc: 0x0349, 0x19bd: 0x0391, 0x19be: 0x00c1, 0x19bf: 0x0109, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x00c9, 0x19c1: 0x04b1, 0x19c2: 0x0019, 0x19c3: 0x02e9, 0x19c4: 0x03d9, 0x19c5: 0x02f1, + 0x19c6: 0x02f9, 0x19c7: 0x03f1, 0x19c8: 0x0309, 0x19c9: 0x00a9, 0x19ca: 0x0311, 0x19cb: 0x00b1, + 0x19cc: 0x0319, 0x19cd: 0x0101, 0x19ce: 0x0321, 0x19cf: 0x0329, 0x19d0: 0x0051, 0x19d1: 0x0339, + 0x19d2: 0x0751, 0x19d3: 0x00b9, 0x19d4: 0x0089, 0x19d5: 0x0341, 0x19d6: 0x0349, 0x19d7: 0x0391, + 0x19d8: 0x00c1, 0x19d9: 0x0109, 0x19da: 0x00c9, 0x19db: 0x04b1, 0x19dc: 0x0019, 0x19dd: 0x0040, + 0x19de: 0x03d9, 0x19df: 0x02f1, 0x19e0: 0x0040, 0x19e1: 0x0040, 0x19e2: 0x0309, 0x19e3: 0x0040, + 0x19e4: 0x0040, 0x19e5: 0x00b1, 0x19e6: 0x0319, 0x19e7: 0x0040, 0x19e8: 0x0040, 0x19e9: 0x0329, + 0x19ea: 0x0051, 0x19eb: 0x0339, 0x19ec: 0x0751, 0x19ed: 0x0040, 0x19ee: 0x0089, 0x19ef: 0x0341, + 0x19f0: 0x0349, 0x19f1: 0x0391, 0x19f2: 0x00c1, 0x19f3: 0x0109, 0x19f4: 0x00c9, 0x19f5: 0x04b1, + 0x19f6: 0x0019, 0x19f7: 0x02e9, 0x19f8: 0x03d9, 0x19f9: 0x02f1, 0x19fa: 0x0040, 0x19fb: 0x03f1, + 0x19fc: 0x0040, 0x19fd: 0x00a9, 0x19fe: 0x0311, 0x19ff: 0x00b1, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0319, 0x1a01: 0x0101, 0x1a02: 0x0321, 0x1a03: 0x0329, 0x1a04: 0x0040, 0x1a05: 0x0339, + 0x1a06: 0x0751, 0x1a07: 0x00b9, 0x1a08: 0x0089, 0x1a09: 0x0341, 0x1a0a: 0x0349, 0x1a0b: 0x0391, + 0x1a0c: 0x00c1, 0x1a0d: 0x0109, 0x1a0e: 0x00c9, 0x1a0f: 0x04b1, 0x1a10: 0x0019, 0x1a11: 0x02e9, + 0x1a12: 0x03d9, 0x1a13: 0x02f1, 0x1a14: 0x02f9, 0x1a15: 0x03f1, 0x1a16: 0x0309, 0x1a17: 0x00a9, + 0x1a18: 0x0311, 0x1a19: 0x00b1, 0x1a1a: 0x0319, 0x1a1b: 0x0101, 0x1a1c: 0x0321, 0x1a1d: 0x0329, + 0x1a1e: 0x0051, 0x1a1f: 0x0339, 0x1a20: 0x0751, 0x1a21: 0x00b9, 0x1a22: 0x0089, 0x1a23: 0x0341, + 0x1a24: 0x0349, 0x1a25: 0x0391, 0x1a26: 0x00c1, 0x1a27: 0x0109, 0x1a28: 0x00c9, 0x1a29: 0x04b1, + 0x1a2a: 0x0019, 0x1a2b: 0x02e9, 0x1a2c: 0x03d9, 0x1a2d: 0x02f1, 0x1a2e: 0x02f9, 0x1a2f: 0x03f1, + 0x1a30: 0x0309, 0x1a31: 0x00a9, 0x1a32: 0x0311, 0x1a33: 0x00b1, 0x1a34: 0x0319, 0x1a35: 0x0101, + 0x1a36: 0x0321, 0x1a37: 0x0329, 0x1a38: 0x0051, 0x1a39: 0x0339, 0x1a3a: 0x0751, 0x1a3b: 0x00b9, + 0x1a3c: 0x0089, 0x1a3d: 0x0341, 0x1a3e: 0x0349, 0x1a3f: 0x0391, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x00c1, 0x1a41: 0x0109, 0x1a42: 0x00c9, 0x1a43: 0x04b1, 0x1a44: 0x0019, 0x1a45: 0x02e9, + 0x1a46: 0x0040, 0x1a47: 0x02f1, 0x1a48: 0x02f9, 0x1a49: 0x03f1, 0x1a4a: 0x0309, 0x1a4b: 0x0040, + 0x1a4c: 0x0040, 0x1a4d: 0x00b1, 0x1a4e: 0x0319, 0x1a4f: 0x0101, 0x1a50: 0x0321, 0x1a51: 0x0329, + 0x1a52: 0x0051, 0x1a53: 0x0339, 0x1a54: 0x0751, 0x1a55: 0x0040, 0x1a56: 0x0089, 0x1a57: 0x0341, + 0x1a58: 0x0349, 0x1a59: 0x0391, 0x1a5a: 0x00c1, 0x1a5b: 0x0109, 0x1a5c: 0x00c9, 0x1a5d: 0x0040, + 0x1a5e: 0x0019, 0x1a5f: 0x02e9, 0x1a60: 0x03d9, 0x1a61: 0x02f1, 0x1a62: 0x02f9, 0x1a63: 0x03f1, + 0x1a64: 0x0309, 0x1a65: 0x00a9, 0x1a66: 0x0311, 0x1a67: 0x00b1, 0x1a68: 0x0319, 0x1a69: 0x0101, + 0x1a6a: 0x0321, 0x1a6b: 0x0329, 0x1a6c: 0x0051, 0x1a6d: 0x0339, 0x1a6e: 0x0751, 0x1a6f: 0x00b9, + 0x1a70: 0x0089, 0x1a71: 0x0341, 0x1a72: 0x0349, 0x1a73: 0x0391, 0x1a74: 0x00c1, 0x1a75: 0x0109, + 0x1a76: 0x00c9, 0x1a77: 0x04b1, 0x1a78: 0x0019, 0x1a79: 0x02e9, 0x1a7a: 0x0040, 0x1a7b: 0x02f1, + 0x1a7c: 0x02f9, 0x1a7d: 0x03f1, 0x1a7e: 0x0309, 0x1a7f: 0x0040, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0311, 0x1a81: 0x00b1, 0x1a82: 0x0319, 0x1a83: 0x0101, 0x1a84: 0x0321, 0x1a85: 0x0040, + 0x1a86: 0x0051, 0x1a87: 0x0040, 0x1a88: 0x0040, 0x1a89: 0x0040, 0x1a8a: 0x0089, 0x1a8b: 0x0341, + 0x1a8c: 0x0349, 0x1a8d: 0x0391, 0x1a8e: 0x00c1, 0x1a8f: 0x0109, 0x1a90: 0x00c9, 0x1a91: 0x0040, + 0x1a92: 0x0019, 0x1a93: 0x02e9, 0x1a94: 0x03d9, 0x1a95: 0x02f1, 0x1a96: 0x02f9, 0x1a97: 0x03f1, + 0x1a98: 0x0309, 0x1a99: 0x00a9, 0x1a9a: 0x0311, 0x1a9b: 0x00b1, 0x1a9c: 0x0319, 0x1a9d: 0x0101, + 0x1a9e: 0x0321, 0x1a9f: 0x0329, 0x1aa0: 0x0051, 0x1aa1: 0x0339, 0x1aa2: 0x0751, 0x1aa3: 0x00b9, + 0x1aa4: 0x0089, 0x1aa5: 0x0341, 0x1aa6: 0x0349, 0x1aa7: 0x0391, 0x1aa8: 0x00c1, 0x1aa9: 0x0109, + 0x1aaa: 0x00c9, 0x1aab: 0x04b1, 0x1aac: 0x0019, 0x1aad: 0x02e9, 0x1aae: 0x03d9, 0x1aaf: 0x02f1, + 0x1ab0: 0x02f9, 0x1ab1: 0x03f1, 0x1ab2: 0x0309, 0x1ab3: 0x00a9, 0x1ab4: 0x0311, 0x1ab5: 0x00b1, + 0x1ab6: 0x0319, 0x1ab7: 0x0101, 0x1ab8: 0x0321, 0x1ab9: 0x0329, 0x1aba: 0x0051, 0x1abb: 0x0339, + 0x1abc: 0x0751, 0x1abd: 0x00b9, 0x1abe: 0x0089, 0x1abf: 0x0341, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x0349, 0x1ac1: 0x0391, 0x1ac2: 0x00c1, 0x1ac3: 0x0109, 0x1ac4: 0x00c9, 0x1ac5: 0x04b1, + 0x1ac6: 0x0019, 0x1ac7: 0x02e9, 0x1ac8: 0x03d9, 0x1ac9: 0x02f1, 0x1aca: 0x02f9, 0x1acb: 0x03f1, + 0x1acc: 0x0309, 0x1acd: 0x00a9, 0x1ace: 0x0311, 0x1acf: 0x00b1, 0x1ad0: 0x0319, 0x1ad1: 0x0101, + 0x1ad2: 0x0321, 0x1ad3: 0x0329, 0x1ad4: 0x0051, 0x1ad5: 0x0339, 0x1ad6: 0x0751, 0x1ad7: 0x00b9, + 0x1ad8: 0x0089, 0x1ad9: 0x0341, 0x1ada: 0x0349, 0x1adb: 0x0391, 0x1adc: 0x00c1, 0x1add: 0x0109, + 0x1ade: 0x00c9, 0x1adf: 0x04b1, 0x1ae0: 0x0019, 0x1ae1: 0x02e9, 0x1ae2: 0x03d9, 0x1ae3: 0x02f1, + 0x1ae4: 0x02f9, 0x1ae5: 0x03f1, 0x1ae6: 0x0309, 0x1ae7: 0x00a9, 0x1ae8: 0x0311, 0x1ae9: 0x00b1, + 0x1aea: 0x0319, 0x1aeb: 0x0101, 0x1aec: 0x0321, 0x1aed: 0x0329, 0x1aee: 0x0051, 0x1aef: 0x0339, + 0x1af0: 0x0751, 0x1af1: 0x00b9, 0x1af2: 0x0089, 0x1af3: 0x0341, 0x1af4: 0x0349, 0x1af5: 0x0391, + 0x1af6: 0x00c1, 0x1af7: 0x0109, 0x1af8: 0x00c9, 0x1af9: 0x04b1, 0x1afa: 0x0019, 0x1afb: 0x02e9, + 0x1afc: 0x03d9, 0x1afd: 0x02f1, 0x1afe: 0x02f9, 0x1aff: 0x03f1, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x0309, 0x1b01: 0x00a9, 0x1b02: 0x0311, 0x1b03: 0x00b1, 0x1b04: 0x0319, 0x1b05: 0x0101, + 0x1b06: 0x0321, 0x1b07: 0x0329, 0x1b08: 0x0051, 0x1b09: 0x0339, 0x1b0a: 0x0751, 0x1b0b: 0x00b9, + 0x1b0c: 0x0089, 0x1b0d: 0x0341, 0x1b0e: 0x0349, 0x1b0f: 0x0391, 0x1b10: 0x00c1, 0x1b11: 0x0109, + 0x1b12: 0x00c9, 0x1b13: 0x04b1, 0x1b14: 0x0019, 0x1b15: 0x02e9, 0x1b16: 0x03d9, 0x1b17: 0x02f1, + 0x1b18: 0x02f9, 0x1b19: 0x03f1, 0x1b1a: 0x0309, 0x1b1b: 0x00a9, 0x1b1c: 0x0311, 0x1b1d: 0x00b1, + 0x1b1e: 0x0319, 0x1b1f: 0x0101, 0x1b20: 0x0321, 0x1b21: 0x0329, 0x1b22: 0x0051, 0x1b23: 0x0339, + 0x1b24: 0x0751, 0x1b25: 0x00b9, 0x1b26: 0x0089, 0x1b27: 0x0341, 0x1b28: 0x0349, 0x1b29: 0x0391, + 0x1b2a: 0x00c1, 0x1b2b: 0x0109, 0x1b2c: 0x00c9, 0x1b2d: 0x04b1, 0x1b2e: 0x0019, 0x1b2f: 0x02e9, + 0x1b30: 0x03d9, 0x1b31: 0x02f1, 0x1b32: 0x02f9, 0x1b33: 0x03f1, 0x1b34: 0x0309, 0x1b35: 0x00a9, + 0x1b36: 0x0311, 0x1b37: 0x00b1, 0x1b38: 0x0319, 0x1b39: 0x0101, 0x1b3a: 0x0321, 0x1b3b: 0x0329, + 0x1b3c: 0x0051, 0x1b3d: 0x0339, 0x1b3e: 0x0751, 0x1b3f: 0x00b9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x0089, 0x1b41: 0x0341, 0x1b42: 0x0349, 0x1b43: 0x0391, 0x1b44: 0x00c1, 0x1b45: 0x0109, + 0x1b46: 0x00c9, 0x1b47: 0x04b1, 0x1b48: 0x0019, 0x1b49: 0x02e9, 0x1b4a: 0x03d9, 0x1b4b: 0x02f1, + 0x1b4c: 0x02f9, 0x1b4d: 0x03f1, 0x1b4e: 0x0309, 0x1b4f: 0x00a9, 0x1b50: 0x0311, 0x1b51: 0x00b1, + 0x1b52: 0x0319, 0x1b53: 0x0101, 0x1b54: 0x0321, 0x1b55: 0x0329, 0x1b56: 0x0051, 0x1b57: 0x0339, + 0x1b58: 0x0751, 0x1b59: 0x00b9, 0x1b5a: 0x0089, 0x1b5b: 0x0341, 0x1b5c: 0x0349, 0x1b5d: 0x0391, + 0x1b5e: 0x00c1, 0x1b5f: 0x0109, 0x1b60: 0x00c9, 0x1b61: 0x04b1, 0x1b62: 0x0019, 0x1b63: 0x02e9, + 0x1b64: 0x03d9, 0x1b65: 0x02f1, 0x1b66: 0x02f9, 0x1b67: 0x03f1, 0x1b68: 0x0309, 0x1b69: 0x00a9, + 0x1b6a: 0x0311, 0x1b6b: 0x00b1, 0x1b6c: 0x0319, 0x1b6d: 0x0101, 0x1b6e: 0x0321, 0x1b6f: 0x0329, + 0x1b70: 0x0051, 0x1b71: 0x0339, 0x1b72: 0x0751, 0x1b73: 0x00b9, 0x1b74: 0x0089, 0x1b75: 0x0341, + 0x1b76: 0x0349, 0x1b77: 0x0391, 0x1b78: 0x00c1, 0x1b79: 0x0109, 0x1b7a: 0x00c9, 0x1b7b: 0x04b1, + 0x1b7c: 0x0019, 0x1b7d: 0x02e9, 0x1b7e: 0x03d9, 0x1b7f: 0x02f1, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x02f9, 0x1b81: 0x03f1, 0x1b82: 0x0309, 0x1b83: 0x00a9, 0x1b84: 0x0311, 0x1b85: 0x00b1, + 0x1b86: 0x0319, 0x1b87: 0x0101, 0x1b88: 0x0321, 0x1b89: 0x0329, 0x1b8a: 0x0051, 0x1b8b: 0x0339, + 0x1b8c: 0x0751, 0x1b8d: 0x00b9, 0x1b8e: 0x0089, 0x1b8f: 0x0341, 0x1b90: 0x0349, 0x1b91: 0x0391, + 0x1b92: 0x00c1, 0x1b93: 0x0109, 0x1b94: 0x00c9, 0x1b95: 0x04b1, 0x1b96: 0x0019, 0x1b97: 0x02e9, + 0x1b98: 0x03d9, 0x1b99: 0x02f1, 0x1b9a: 0x02f9, 0x1b9b: 0x03f1, 0x1b9c: 0x0309, 0x1b9d: 0x00a9, + 0x1b9e: 0x0311, 0x1b9f: 0x00b1, 0x1ba0: 0x0319, 0x1ba1: 0x0101, 0x1ba2: 0x0321, 0x1ba3: 0x0329, + 0x1ba4: 0x0051, 0x1ba5: 0x0339, 0x1ba6: 0x0751, 0x1ba7: 0x00b9, 0x1ba8: 0x0089, 0x1ba9: 0x0341, + 0x1baa: 0x0349, 0x1bab: 0x0391, 0x1bac: 0x00c1, 0x1bad: 0x0109, 0x1bae: 0x00c9, 0x1baf: 0x04b1, + 0x1bb0: 0x0019, 0x1bb1: 0x02e9, 0x1bb2: 0x03d9, 0x1bb3: 0x02f1, 0x1bb4: 0x02f9, 0x1bb5: 0x03f1, + 0x1bb6: 0x0309, 0x1bb7: 0x00a9, 0x1bb8: 0x0311, 0x1bb9: 0x00b1, 0x1bba: 0x0319, 0x1bbb: 0x0101, + 0x1bbc: 0x0321, 0x1bbd: 0x0329, 0x1bbe: 0x0051, 0x1bbf: 0x0339, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0751, 0x1bc1: 0x00b9, 0x1bc2: 0x0089, 0x1bc3: 0x0341, 0x1bc4: 0x0349, 0x1bc5: 0x0391, + 0x1bc6: 0x00c1, 0x1bc7: 0x0109, 0x1bc8: 0x00c9, 0x1bc9: 0x04b1, 0x1bca: 0x0019, 0x1bcb: 0x02e9, + 0x1bcc: 0x03d9, 0x1bcd: 0x02f1, 0x1bce: 0x02f9, 0x1bcf: 0x03f1, 0x1bd0: 0x0309, 0x1bd1: 0x00a9, + 0x1bd2: 0x0311, 0x1bd3: 0x00b1, 0x1bd4: 0x0319, 0x1bd5: 0x0101, 0x1bd6: 0x0321, 0x1bd7: 0x0329, + 0x1bd8: 0x0051, 0x1bd9: 0x0339, 0x1bda: 0x0751, 0x1bdb: 0x00b9, 0x1bdc: 0x0089, 0x1bdd: 0x0341, + 0x1bde: 0x0349, 0x1bdf: 0x0391, 0x1be0: 0x00c1, 0x1be1: 0x0109, 0x1be2: 0x00c9, 0x1be3: 0x04b1, + 0x1be4: 0x23e1, 0x1be5: 0x23e9, 0x1be6: 0x0040, 0x1be7: 0x0040, 0x1be8: 0x23f1, 0x1be9: 0x0399, + 0x1bea: 0x03a1, 0x1beb: 0x03a9, 0x1bec: 0x23f9, 0x1bed: 0x2401, 0x1bee: 0x2409, 0x1bef: 0x04d1, + 0x1bf0: 0x05f9, 0x1bf1: 0x2411, 0x1bf2: 0x2419, 0x1bf3: 0x2421, 0x1bf4: 0x2429, 0x1bf5: 0x2431, + 0x1bf6: 0x2439, 0x1bf7: 0x0799, 0x1bf8: 0x03c1, 0x1bf9: 0x04d1, 0x1bfa: 0x2441, 0x1bfb: 0x2449, + 0x1bfc: 0x2451, 0x1bfd: 0x03b1, 0x1bfe: 0x03b9, 0x1bff: 0x2459, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0769, 0x1c01: 0x2461, 0x1c02: 0x23f1, 0x1c03: 0x0399, 0x1c04: 0x03a1, 0x1c05: 0x03a9, + 0x1c06: 0x23f9, 0x1c07: 0x2401, 0x1c08: 0x2409, 0x1c09: 0x04d1, 0x1c0a: 0x05f9, 0x1c0b: 0x2411, + 0x1c0c: 0x2419, 0x1c0d: 0x2421, 0x1c0e: 0x2429, 0x1c0f: 0x2431, 0x1c10: 0x2439, 0x1c11: 0x0799, + 0x1c12: 0x03c1, 0x1c13: 0x2441, 0x1c14: 0x2441, 0x1c15: 0x2449, 0x1c16: 0x2451, 0x1c17: 0x03b1, + 0x1c18: 0x03b9, 0x1c19: 0x2459, 0x1c1a: 0x0769, 0x1c1b: 0x2469, 0x1c1c: 0x23f9, 0x1c1d: 0x04d1, + 0x1c1e: 0x2411, 0x1c1f: 0x03b1, 0x1c20: 0x03c1, 0x1c21: 0x0799, 0x1c22: 0x23f1, 0x1c23: 0x0399, + 0x1c24: 0x03a1, 0x1c25: 0x03a9, 0x1c26: 0x23f9, 0x1c27: 0x2401, 0x1c28: 0x2409, 0x1c29: 0x04d1, + 0x1c2a: 0x05f9, 0x1c2b: 0x2411, 0x1c2c: 0x2419, 0x1c2d: 0x2421, 0x1c2e: 0x2429, 0x1c2f: 0x2431, + 0x1c30: 0x2439, 0x1c31: 0x0799, 0x1c32: 0x03c1, 0x1c33: 0x04d1, 0x1c34: 0x2441, 0x1c35: 0x2449, + 0x1c36: 0x2451, 0x1c37: 0x03b1, 0x1c38: 0x03b9, 0x1c39: 0x2459, 0x1c3a: 0x0769, 0x1c3b: 0x2461, + 0x1c3c: 0x23f1, 0x1c3d: 0x0399, 0x1c3e: 0x03a1, 0x1c3f: 0x03a9, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x23f9, 0x1c41: 0x2401, 0x1c42: 0x2409, 0x1c43: 0x04d1, 0x1c44: 0x05f9, 0x1c45: 0x2411, + 0x1c46: 0x2419, 0x1c47: 0x2421, 0x1c48: 0x2429, 0x1c49: 0x2431, 0x1c4a: 0x2439, 0x1c4b: 0x0799, + 0x1c4c: 0x03c1, 0x1c4d: 0x2441, 0x1c4e: 0x2441, 0x1c4f: 0x2449, 0x1c50: 0x2451, 0x1c51: 0x03b1, + 0x1c52: 0x03b9, 0x1c53: 0x2459, 0x1c54: 0x0769, 0x1c55: 0x2469, 0x1c56: 0x23f9, 0x1c57: 0x04d1, + 0x1c58: 0x2411, 0x1c59: 0x03b1, 0x1c5a: 0x03c1, 0x1c5b: 0x0799, 0x1c5c: 0x23f1, 0x1c5d: 0x0399, + 0x1c5e: 0x03a1, 0x1c5f: 0x03a9, 0x1c60: 0x23f9, 0x1c61: 0x2401, 0x1c62: 0x2409, 0x1c63: 0x04d1, + 0x1c64: 0x05f9, 0x1c65: 0x2411, 0x1c66: 0x2419, 0x1c67: 0x2421, 0x1c68: 0x2429, 0x1c69: 0x2431, + 0x1c6a: 0x2439, 0x1c6b: 0x0799, 0x1c6c: 0x03c1, 0x1c6d: 0x04d1, 0x1c6e: 0x2441, 0x1c6f: 0x2449, + 0x1c70: 0x2451, 0x1c71: 0x03b1, 0x1c72: 0x03b9, 0x1c73: 0x2459, 0x1c74: 0x0769, 0x1c75: 0x2461, + 0x1c76: 0x23f1, 0x1c77: 0x0399, 0x1c78: 0x03a1, 0x1c79: 0x03a9, 0x1c7a: 0x23f9, 0x1c7b: 0x2401, + 0x1c7c: 0x2409, 0x1c7d: 0x04d1, 0x1c7e: 0x05f9, 0x1c7f: 0x2411, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x2419, 0x1c81: 0x2421, 0x1c82: 0x2429, 0x1c83: 0x2431, 0x1c84: 0x2439, 0x1c85: 0x0799, + 0x1c86: 0x03c1, 0x1c87: 0x2441, 0x1c88: 0x2441, 0x1c89: 0x2449, 0x1c8a: 0x2451, 0x1c8b: 0x03b1, + 0x1c8c: 0x03b9, 0x1c8d: 0x2459, 0x1c8e: 0x0769, 0x1c8f: 0x2469, 0x1c90: 0x23f9, 0x1c91: 0x04d1, + 0x1c92: 0x2411, 0x1c93: 0x03b1, 0x1c94: 0x03c1, 0x1c95: 0x0799, 0x1c96: 0x23f1, 0x1c97: 0x0399, + 0x1c98: 0x03a1, 0x1c99: 0x03a9, 0x1c9a: 0x23f9, 0x1c9b: 0x2401, 0x1c9c: 0x2409, 0x1c9d: 0x04d1, + 0x1c9e: 0x05f9, 0x1c9f: 0x2411, 0x1ca0: 0x2419, 0x1ca1: 0x2421, 0x1ca2: 0x2429, 0x1ca3: 0x2431, + 0x1ca4: 0x2439, 0x1ca5: 0x0799, 0x1ca6: 0x03c1, 0x1ca7: 0x04d1, 0x1ca8: 0x2441, 0x1ca9: 0x2449, + 0x1caa: 0x2451, 0x1cab: 0x03b1, 0x1cac: 0x03b9, 0x1cad: 0x2459, 0x1cae: 0x0769, 0x1caf: 0x2461, + 0x1cb0: 0x23f1, 0x1cb1: 0x0399, 0x1cb2: 0x03a1, 0x1cb3: 0x03a9, 0x1cb4: 0x23f9, 0x1cb5: 0x2401, + 0x1cb6: 0x2409, 0x1cb7: 0x04d1, 0x1cb8: 0x05f9, 0x1cb9: 0x2411, 0x1cba: 0x2419, 0x1cbb: 0x2421, + 0x1cbc: 0x2429, 0x1cbd: 0x2431, 0x1cbe: 0x2439, 0x1cbf: 0x0799, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x03c1, 0x1cc1: 0x2441, 0x1cc2: 0x2441, 0x1cc3: 0x2449, 0x1cc4: 0x2451, 0x1cc5: 0x03b1, + 0x1cc6: 0x03b9, 0x1cc7: 0x2459, 0x1cc8: 0x0769, 0x1cc9: 0x2469, 0x1cca: 0x23f9, 0x1ccb: 0x04d1, + 0x1ccc: 0x2411, 0x1ccd: 0x03b1, 0x1cce: 0x03c1, 0x1ccf: 0x0799, 0x1cd0: 0x23f1, 0x1cd1: 0x0399, + 0x1cd2: 0x03a1, 0x1cd3: 0x03a9, 0x1cd4: 0x23f9, 0x1cd5: 0x2401, 0x1cd6: 0x2409, 0x1cd7: 0x04d1, + 0x1cd8: 0x05f9, 0x1cd9: 0x2411, 0x1cda: 0x2419, 0x1cdb: 0x2421, 0x1cdc: 0x2429, 0x1cdd: 0x2431, + 0x1cde: 0x2439, 0x1cdf: 0x0799, 0x1ce0: 0x03c1, 0x1ce1: 0x04d1, 0x1ce2: 0x2441, 0x1ce3: 0x2449, + 0x1ce4: 0x2451, 0x1ce5: 0x03b1, 0x1ce6: 0x03b9, 0x1ce7: 0x2459, 0x1ce8: 0x0769, 0x1ce9: 0x2461, + 0x1cea: 0x23f1, 0x1ceb: 0x0399, 0x1cec: 0x03a1, 0x1ced: 0x03a9, 0x1cee: 0x23f9, 0x1cef: 0x2401, + 0x1cf0: 0x2409, 0x1cf1: 0x04d1, 0x1cf2: 0x05f9, 0x1cf3: 0x2411, 0x1cf4: 0x2419, 0x1cf5: 0x2421, + 0x1cf6: 0x2429, 0x1cf7: 0x2431, 0x1cf8: 0x2439, 0x1cf9: 0x0799, 0x1cfa: 0x03c1, 0x1cfb: 0x2441, + 0x1cfc: 0x2441, 0x1cfd: 0x2449, 0x1cfe: 0x2451, 0x1cff: 0x03b1, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x03b9, 0x1d01: 0x2459, 0x1d02: 0x0769, 0x1d03: 0x2469, 0x1d04: 0x23f9, 0x1d05: 0x04d1, + 0x1d06: 0x2411, 0x1d07: 0x03b1, 0x1d08: 0x03c1, 0x1d09: 0x0799, 0x1d0a: 0x2471, 0x1d0b: 0x2471, + 0x1d0c: 0x0040, 0x1d0d: 0x0040, 0x1d0e: 0x06e1, 0x1d0f: 0x0049, 0x1d10: 0x0029, 0x1d11: 0x0031, + 0x1d12: 0x06e9, 0x1d13: 0x06f1, 0x1d14: 0x06f9, 0x1d15: 0x0701, 0x1d16: 0x0709, 0x1d17: 0x0711, + 0x1d18: 0x06e1, 0x1d19: 0x0049, 0x1d1a: 0x0029, 0x1d1b: 0x0031, 0x1d1c: 0x06e9, 0x1d1d: 0x06f1, + 0x1d1e: 0x06f9, 0x1d1f: 0x0701, 0x1d20: 0x0709, 0x1d21: 0x0711, 0x1d22: 0x06e1, 0x1d23: 0x0049, + 0x1d24: 0x0029, 0x1d25: 0x0031, 0x1d26: 0x06e9, 0x1d27: 0x06f1, 0x1d28: 0x06f9, 0x1d29: 0x0701, + 0x1d2a: 0x0709, 0x1d2b: 0x0711, 0x1d2c: 0x06e1, 0x1d2d: 0x0049, 0x1d2e: 0x0029, 0x1d2f: 0x0031, + 0x1d30: 0x06e9, 0x1d31: 0x06f1, 0x1d32: 0x06f9, 0x1d33: 0x0701, 0x1d34: 0x0709, 0x1d35: 0x0711, + 0x1d36: 0x06e1, 0x1d37: 0x0049, 0x1d38: 0x0029, 0x1d39: 0x0031, 0x1d3a: 0x06e9, 0x1d3b: 0x06f1, + 0x1d3c: 0x06f9, 0x1d3d: 0x0701, 0x1d3e: 0x0709, 0x1d3f: 0x0711, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x3308, 0x1d41: 0x3308, 0x1d42: 0x3308, 0x1d43: 0x3308, 0x1d44: 0x3308, 0x1d45: 0x3308, + 0x1d46: 0x3308, 0x1d47: 0x0040, 0x1d48: 0x3308, 0x1d49: 0x3308, 0x1d4a: 0x3308, 0x1d4b: 0x3308, + 0x1d4c: 0x3308, 0x1d4d: 0x3308, 0x1d4e: 0x3308, 0x1d4f: 0x3308, 0x1d50: 0x3308, 0x1d51: 0x3308, + 0x1d52: 0x3308, 0x1d53: 0x3308, 0x1d54: 0x3308, 0x1d55: 0x3308, 0x1d56: 0x3308, 0x1d57: 0x3308, + 0x1d58: 0x3308, 0x1d59: 0x0040, 0x1d5a: 0x0040, 0x1d5b: 0x3308, 0x1d5c: 0x3308, 0x1d5d: 0x3308, + 0x1d5e: 0x3308, 0x1d5f: 0x3308, 0x1d60: 0x3308, 0x1d61: 0x3308, 0x1d62: 0x0040, 0x1d63: 0x3308, + 0x1d64: 0x3308, 0x1d65: 0x0040, 0x1d66: 0x3308, 0x1d67: 0x3308, 0x1d68: 0x3308, 0x1d69: 0x3308, + 0x1d6a: 0x3308, 0x1d6b: 0x0040, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040, + 0x1d70: 0x2479, 0x1d71: 0x2481, 0x1d72: 0x02a9, 0x1d73: 0x2489, 0x1d74: 0x02b1, 0x1d75: 0x2491, + 0x1d76: 0x2499, 0x1d77: 0x24a1, 0x1d78: 0x24a9, 0x1d79: 0x24b1, 0x1d7a: 0x24b9, 0x1d7b: 0x24c1, + 0x1d7c: 0x02b9, 0x1d7d: 0x24c9, 0x1d7e: 0x24d1, 0x1d7f: 0x02c1, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x02c9, 0x1d81: 0x24d9, 0x1d82: 0x24e1, 0x1d83: 0x24e9, 0x1d84: 0x24f1, 0x1d85: 0x24f9, + 0x1d86: 0x2501, 0x1d87: 0x2509, 0x1d88: 0x2511, 0x1d89: 0x2519, 0x1d8a: 0x2521, 0x1d8b: 0x2529, + 0x1d8c: 0x2531, 0x1d8d: 0x2539, 0x1d8e: 0x2541, 0x1d8f: 0x2549, 0x1d90: 0x2551, 0x1d91: 0x2479, + 0x1d92: 0x2481, 0x1d93: 0x02a9, 0x1d94: 0x2489, 0x1d95: 0x02b1, 0x1d96: 0x2491, 0x1d97: 0x2499, + 0x1d98: 0x24a1, 0x1d99: 0x24a9, 0x1d9a: 0x24b1, 0x1d9b: 0x24b9, 0x1d9c: 0x02b9, 0x1d9d: 0x24c9, + 0x1d9e: 0x02c1, 0x1d9f: 0x24d9, 0x1da0: 0x24e1, 0x1da1: 0x24e9, 0x1da2: 0x24f1, 0x1da3: 0x24f9, + 0x1da4: 0x2501, 0x1da5: 0x02d1, 0x1da6: 0x2509, 0x1da7: 0x2559, 0x1da8: 0x2531, 0x1da9: 0x2561, + 0x1daa: 0x2569, 0x1dab: 0x2571, 0x1dac: 0x2579, 0x1dad: 0x2581, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0040, 0x1db1: 0x0040, 0x1db2: 0x0040, 0x1db3: 0x0040, 0x1db4: 0x0040, 0x1db5: 0x0040, + 0x1db6: 0x0040, 0x1db7: 0x0040, 0x1db8: 0x0040, 0x1db9: 0x0040, 0x1dba: 0x0040, 0x1dbb: 0x0040, + 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xe115, 0x1dc1: 0xe115, 0x1dc2: 0xe135, 0x1dc3: 0xe135, 0x1dc4: 0xe115, 0x1dc5: 0xe115, + 0x1dc6: 0xe175, 0x1dc7: 0xe175, 0x1dc8: 0xe115, 0x1dc9: 0xe115, 0x1dca: 0xe135, 0x1dcb: 0xe135, + 0x1dcc: 0xe115, 0x1dcd: 0xe115, 0x1dce: 0xe1f5, 0x1dcf: 0xe1f5, 0x1dd0: 0xe115, 0x1dd1: 0xe115, + 0x1dd2: 0xe135, 0x1dd3: 0xe135, 0x1dd4: 0xe115, 0x1dd5: 0xe115, 0x1dd6: 0xe175, 0x1dd7: 0xe175, + 0x1dd8: 0xe115, 0x1dd9: 0xe115, 0x1dda: 0xe135, 0x1ddb: 0xe135, 0x1ddc: 0xe115, 0x1ddd: 0xe115, + 0x1dde: 0x8ca5, 0x1ddf: 0x8ca5, 0x1de0: 0x04b5, 0x1de1: 0x04b5, 0x1de2: 0x0a08, 0x1de3: 0x0a08, + 0x1de4: 0x0a08, 0x1de5: 0x0a08, 0x1de6: 0x0a08, 0x1de7: 0x0a08, 0x1de8: 0x0a08, 0x1de9: 0x0a08, + 0x1dea: 0x0a08, 0x1deb: 0x0a08, 0x1dec: 0x0a08, 0x1ded: 0x0a08, 0x1dee: 0x0a08, 0x1def: 0x0a08, + 0x1df0: 0x0a08, 0x1df1: 0x0a08, 0x1df2: 0x0a08, 0x1df3: 0x0a08, 0x1df4: 0x0a08, 0x1df5: 0x0a08, + 0x1df6: 0x0a08, 0x1df7: 0x0a08, 0x1df8: 0x0a08, 0x1df9: 0x0a08, 0x1dfa: 0x0a08, 0x1dfb: 0x0a08, + 0x1dfc: 0x0a08, 0x1dfd: 0x0a08, 0x1dfe: 0x0a08, 0x1dff: 0x0a08, + // Block 0x78, offset 0x1e00 + 0x1e00: 0x20b1, 0x1e01: 0x20b9, 0x1e02: 0x20d9, 0x1e03: 0x20f1, 0x1e04: 0x0040, 0x1e05: 0x2189, + 0x1e06: 0x2109, 0x1e07: 0x20e1, 0x1e08: 0x2131, 0x1e09: 0x2191, 0x1e0a: 0x2161, 0x1e0b: 0x2169, + 0x1e0c: 0x2171, 0x1e0d: 0x2179, 0x1e0e: 0x2111, 0x1e0f: 0x2141, 0x1e10: 0x2151, 0x1e11: 0x2121, + 0x1e12: 0x2159, 0x1e13: 0x2101, 0x1e14: 0x2119, 0x1e15: 0x20c9, 0x1e16: 0x20d1, 0x1e17: 0x20e9, + 0x1e18: 0x20f9, 0x1e19: 0x2129, 0x1e1a: 0x2139, 0x1e1b: 0x2149, 0x1e1c: 0x2589, 0x1e1d: 0x1689, + 0x1e1e: 0x2591, 0x1e1f: 0x2599, 0x1e20: 0x0040, 0x1e21: 0x20b9, 0x1e22: 0x20d9, 0x1e23: 0x0040, + 0x1e24: 0x2181, 0x1e25: 0x0040, 0x1e26: 0x0040, 0x1e27: 0x20e1, 0x1e28: 0x0040, 0x1e29: 0x2191, + 0x1e2a: 0x2161, 0x1e2b: 0x2169, 0x1e2c: 0x2171, 0x1e2d: 0x2179, 0x1e2e: 0x2111, 0x1e2f: 0x2141, + 0x1e30: 0x2151, 0x1e31: 0x2121, 0x1e32: 0x2159, 0x1e33: 0x0040, 0x1e34: 0x2119, 0x1e35: 0x20c9, + 0x1e36: 0x20d1, 0x1e37: 0x20e9, 0x1e38: 0x0040, 0x1e39: 0x2129, 0x1e3a: 0x0040, 0x1e3b: 0x2149, + 0x1e3c: 0x0040, 0x1e3d: 0x0040, 0x1e3e: 0x0040, 0x1e3f: 0x0040, + // Block 0x79, offset 0x1e40 + 0x1e40: 0x0040, 0x1e41: 0x0040, 0x1e42: 0x20d9, 0x1e43: 0x0040, 0x1e44: 0x0040, 0x1e45: 0x0040, + 0x1e46: 0x0040, 0x1e47: 0x20e1, 0x1e48: 0x0040, 0x1e49: 0x2191, 0x1e4a: 0x0040, 0x1e4b: 0x2169, + 0x1e4c: 0x0040, 0x1e4d: 0x2179, 0x1e4e: 0x2111, 0x1e4f: 0x2141, 0x1e50: 0x0040, 0x1e51: 0x2121, + 0x1e52: 0x2159, 0x1e53: 0x0040, 0x1e54: 0x2119, 0x1e55: 0x0040, 0x1e56: 0x0040, 0x1e57: 0x20e9, + 0x1e58: 0x0040, 0x1e59: 0x2129, 0x1e5a: 0x0040, 0x1e5b: 0x2149, 0x1e5c: 0x0040, 0x1e5d: 0x1689, + 0x1e5e: 0x0040, 0x1e5f: 0x2599, 0x1e60: 0x0040, 0x1e61: 0x20b9, 0x1e62: 0x20d9, 0x1e63: 0x0040, + 0x1e64: 0x2181, 0x1e65: 0x0040, 0x1e66: 0x0040, 0x1e67: 0x20e1, 0x1e68: 0x2131, 0x1e69: 0x2191, + 0x1e6a: 0x2161, 0x1e6b: 0x0040, 0x1e6c: 0x2171, 0x1e6d: 0x2179, 0x1e6e: 0x2111, 0x1e6f: 0x2141, + 0x1e70: 0x2151, 0x1e71: 0x2121, 0x1e72: 0x2159, 0x1e73: 0x0040, 0x1e74: 0x2119, 0x1e75: 0x20c9, + 0x1e76: 0x20d1, 0x1e77: 0x20e9, 0x1e78: 0x0040, 0x1e79: 0x2129, 0x1e7a: 0x2139, 0x1e7b: 0x2149, + 0x1e7c: 0x2589, 0x1e7d: 0x0040, 0x1e7e: 0x2591, 0x1e7f: 0x0040, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x20b1, 0x1e81: 0x20b9, 0x1e82: 0x20d9, 0x1e83: 0x20f1, 0x1e84: 0x2181, 0x1e85: 0x2189, + 0x1e86: 0x2109, 0x1e87: 0x20e1, 0x1e88: 0x2131, 0x1e89: 0x2191, 0x1e8a: 0x0040, 0x1e8b: 0x2169, + 0x1e8c: 0x2171, 0x1e8d: 0x2179, 0x1e8e: 0x2111, 0x1e8f: 0x2141, 0x1e90: 0x2151, 0x1e91: 0x2121, + 0x1e92: 0x2159, 0x1e93: 0x2101, 0x1e94: 0x2119, 0x1e95: 0x20c9, 0x1e96: 0x20d1, 0x1e97: 0x20e9, + 0x1e98: 0x20f9, 0x1e99: 0x2129, 0x1e9a: 0x2139, 0x1e9b: 0x2149, 0x1e9c: 0x0040, 0x1e9d: 0x0040, + 0x1e9e: 0x0040, 0x1e9f: 0x0040, 0x1ea0: 0x0040, 0x1ea1: 0x20b9, 0x1ea2: 0x20d9, 0x1ea3: 0x20f1, + 0x1ea4: 0x0040, 0x1ea5: 0x2189, 0x1ea6: 0x2109, 0x1ea7: 0x20e1, 0x1ea8: 0x2131, 0x1ea9: 0x2191, + 0x1eaa: 0x0040, 0x1eab: 0x2169, 0x1eac: 0x2171, 0x1ead: 0x2179, 0x1eae: 0x2111, 0x1eaf: 0x2141, + 0x1eb0: 0x2151, 0x1eb1: 0x2121, 0x1eb2: 0x2159, 0x1eb3: 0x2101, 0x1eb4: 0x2119, 0x1eb5: 0x20c9, + 0x1eb6: 0x20d1, 0x1eb7: 0x20e9, 0x1eb8: 0x20f9, 0x1eb9: 0x2129, 0x1eba: 0x2139, 0x1ebb: 0x2149, + 0x1ebc: 0x0040, 0x1ebd: 0x0040, 0x1ebe: 0x0040, 0x1ebf: 0x0040, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x0040, 0x1ec1: 0x25a2, 0x1ec2: 0x25aa, 0x1ec3: 0x25b2, 0x1ec4: 0x25ba, 0x1ec5: 0x25c2, + 0x1ec6: 0x25ca, 0x1ec7: 0x25d2, 0x1ec8: 0x25da, 0x1ec9: 0x25e2, 0x1eca: 0x25ea, 0x1ecb: 0x0018, + 0x1ecc: 0x0018, 0x1ecd: 0x0018, 0x1ece: 0x0018, 0x1ecf: 0x0018, 0x1ed0: 0x25f2, 0x1ed1: 0x25fa, + 0x1ed2: 0x2602, 0x1ed3: 0x260a, 0x1ed4: 0x2612, 0x1ed5: 0x261a, 0x1ed6: 0x2622, 0x1ed7: 0x262a, + 0x1ed8: 0x2632, 0x1ed9: 0x263a, 0x1eda: 0x2642, 0x1edb: 0x264a, 0x1edc: 0x2652, 0x1edd: 0x265a, + 0x1ede: 0x2662, 0x1edf: 0x266a, 0x1ee0: 0x2672, 0x1ee1: 0x267a, 0x1ee2: 0x2682, 0x1ee3: 0x268a, + 0x1ee4: 0x2692, 0x1ee5: 0x269a, 0x1ee6: 0x26a2, 0x1ee7: 0x26aa, 0x1ee8: 0x26b2, 0x1ee9: 0x26ba, + 0x1eea: 0x26c1, 0x1eeb: 0x03d9, 0x1eec: 0x00b9, 0x1eed: 0x1239, 0x1eee: 0x26c9, 0x1eef: 0x0018, + 0x1ef0: 0x0019, 0x1ef1: 0x02e9, 0x1ef2: 0x03d9, 0x1ef3: 0x02f1, 0x1ef4: 0x02f9, 0x1ef5: 0x03f1, + 0x1ef6: 0x0309, 0x1ef7: 0x00a9, 0x1ef8: 0x0311, 0x1ef9: 0x00b1, 0x1efa: 0x0319, 0x1efb: 0x0101, + 0x1efc: 0x0321, 0x1efd: 0x0329, 0x1efe: 0x0051, 0x1eff: 0x0339, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0x0751, 0x1f01: 0x00b9, 0x1f02: 0x0089, 0x1f03: 0x0341, 0x1f04: 0x0349, 0x1f05: 0x0391, + 0x1f06: 0x00c1, 0x1f07: 0x0109, 0x1f08: 0x00c9, 0x1f09: 0x04b1, 0x1f0a: 0x26d1, 0x1f0b: 0x11f9, + 0x1f0c: 0x26d9, 0x1f0d: 0x04d9, 0x1f0e: 0x26e1, 0x1f0f: 0x26e9, 0x1f10: 0x0018, 0x1f11: 0x0018, + 0x1f12: 0x0018, 0x1f13: 0x0018, 0x1f14: 0x0018, 0x1f15: 0x0018, 0x1f16: 0x0018, 0x1f17: 0x0018, + 0x1f18: 0x0018, 0x1f19: 0x0018, 0x1f1a: 0x0018, 0x1f1b: 0x0018, 0x1f1c: 0x0018, 0x1f1d: 0x0018, + 0x1f1e: 0x0018, 0x1f1f: 0x0018, 0x1f20: 0x0018, 0x1f21: 0x0018, 0x1f22: 0x0018, 0x1f23: 0x0018, + 0x1f24: 0x0018, 0x1f25: 0x0018, 0x1f26: 0x0018, 0x1f27: 0x0018, 0x1f28: 0x0018, 0x1f29: 0x0018, + 0x1f2a: 0x26f1, 0x1f2b: 0x26f9, 0x1f2c: 0x2701, 0x1f2d: 0x0018, 0x1f2e: 0x0018, 0x1f2f: 0x0018, + 0x1f30: 0x0018, 0x1f31: 0x0018, 0x1f32: 0x0018, 0x1f33: 0x0018, 0x1f34: 0x0018, 0x1f35: 0x0018, + 0x1f36: 0x0018, 0x1f37: 0x0018, 0x1f38: 0x0018, 0x1f39: 0x0018, 0x1f3a: 0x0018, 0x1f3b: 0x0018, + 0x1f3c: 0x0018, 0x1f3d: 0x0018, 0x1f3e: 0x0018, 0x1f3f: 0x0018, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0x2711, 0x1f41: 0x2719, 0x1f42: 0x2721, 0x1f43: 0x0040, 0x1f44: 0x0040, 0x1f45: 0x0040, + 0x1f46: 0x0040, 0x1f47: 0x0040, 0x1f48: 0x0040, 0x1f49: 0x0040, 0x1f4a: 0x0040, 0x1f4b: 0x0040, + 0x1f4c: 0x0040, 0x1f4d: 0x0040, 0x1f4e: 0x0040, 0x1f4f: 0x0040, 0x1f50: 0x2729, 0x1f51: 0x2731, + 0x1f52: 0x2739, 0x1f53: 0x2741, 0x1f54: 0x2749, 0x1f55: 0x2751, 0x1f56: 0x2759, 0x1f57: 0x2761, + 0x1f58: 0x2769, 0x1f59: 0x2771, 0x1f5a: 0x2779, 0x1f5b: 0x2781, 0x1f5c: 0x2789, 0x1f5d: 0x2791, + 0x1f5e: 0x2799, 0x1f5f: 0x27a1, 0x1f60: 0x27a9, 0x1f61: 0x27b1, 0x1f62: 0x27b9, 0x1f63: 0x27c1, + 0x1f64: 0x27c9, 0x1f65: 0x27d1, 0x1f66: 0x27d9, 0x1f67: 0x27e1, 0x1f68: 0x27e9, 0x1f69: 0x27f1, + 0x1f6a: 0x27f9, 0x1f6b: 0x2801, 0x1f6c: 0x2809, 0x1f6d: 0x2811, 0x1f6e: 0x2819, 0x1f6f: 0x2821, + 0x1f70: 0x2829, 0x1f71: 0x2831, 0x1f72: 0x2839, 0x1f73: 0x2841, 0x1f74: 0x2849, 0x1f75: 0x2851, + 0x1f76: 0x2859, 0x1f77: 0x2861, 0x1f78: 0x2869, 0x1f79: 0x2871, 0x1f7a: 0x2879, 0x1f7b: 0x2881, + 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0x28e1, 0x1f81: 0x28e9, 0x1f82: 0x28f1, 0x1f83: 0x8cbd, 0x1f84: 0x28f9, 0x1f85: 0x2901, + 0x1f86: 0x2909, 0x1f87: 0x2911, 0x1f88: 0x2919, 0x1f89: 0x2921, 0x1f8a: 0x2929, 0x1f8b: 0x2931, + 0x1f8c: 0x2939, 0x1f8d: 0x8cdd, 0x1f8e: 0x2941, 0x1f8f: 0x2949, 0x1f90: 0x2951, 0x1f91: 0x2959, + 0x1f92: 0x8cfd, 0x1f93: 0x2961, 0x1f94: 0x2969, 0x1f95: 0x2799, 0x1f96: 0x8d1d, 0x1f97: 0x2971, + 0x1f98: 0x2979, 0x1f99: 0x2981, 0x1f9a: 0x2989, 0x1f9b: 0x2991, 0x1f9c: 0x8d3d, 0x1f9d: 0x2999, + 0x1f9e: 0x29a1, 0x1f9f: 0x29a9, 0x1fa0: 0x29b1, 0x1fa1: 0x29b9, 0x1fa2: 0x2871, 0x1fa3: 0x29c1, + 0x1fa4: 0x29c9, 0x1fa5: 0x29d1, 0x1fa6: 0x29d9, 0x1fa7: 0x29e1, 0x1fa8: 0x29e9, 0x1fa9: 0x29f1, + 0x1faa: 0x29f9, 0x1fab: 0x2a01, 0x1fac: 0x2a09, 0x1fad: 0x2a11, 0x1fae: 0x2a19, 0x1faf: 0x2a21, + 0x1fb0: 0x2a29, 0x1fb1: 0x2a31, 0x1fb2: 0x2a31, 0x1fb3: 0x2a31, 0x1fb4: 0x8d5d, 0x1fb5: 0x2a39, + 0x1fb6: 0x2a41, 0x1fb7: 0x2a49, 0x1fb8: 0x8d7d, 0x1fb9: 0x2a51, 0x1fba: 0x2a59, 0x1fbb: 0x2a61, + 0x1fbc: 0x2a69, 0x1fbd: 0x2a71, 0x1fbe: 0x2a79, 0x1fbf: 0x2a81, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x2a89, 0x1fc1: 0x2a91, 0x1fc2: 0x2a99, 0x1fc3: 0x2aa1, 0x1fc4: 0x2aa9, 0x1fc5: 0x2ab1, + 0x1fc6: 0x2ab1, 0x1fc7: 0x2ab9, 0x1fc8: 0x2ac1, 0x1fc9: 0x2ac9, 0x1fca: 0x2ad1, 0x1fcb: 0x2ad9, + 0x1fcc: 0x2ae1, 0x1fcd: 0x2ae9, 0x1fce: 0x2af1, 0x1fcf: 0x2af9, 0x1fd0: 0x2b01, 0x1fd1: 0x2b09, + 0x1fd2: 0x2b11, 0x1fd3: 0x2b19, 0x1fd4: 0x2b21, 0x1fd5: 0x2b29, 0x1fd6: 0x2b31, 0x1fd7: 0x2b39, + 0x1fd8: 0x2b41, 0x1fd9: 0x8d9d, 0x1fda: 0x2b49, 0x1fdb: 0x2b51, 0x1fdc: 0x2b59, 0x1fdd: 0x2751, + 0x1fde: 0x2b61, 0x1fdf: 0x2b69, 0x1fe0: 0x8dbd, 0x1fe1: 0x8ddd, 0x1fe2: 0x2b71, 0x1fe3: 0x2b79, + 0x1fe4: 0x2b81, 0x1fe5: 0x2b89, 0x1fe6: 0x2b91, 0x1fe7: 0x2b99, 0x1fe8: 0x2040, 0x1fe9: 0x2ba1, + 0x1fea: 0x2ba9, 0x1feb: 0x2ba9, 0x1fec: 0x8dfd, 0x1fed: 0x2bb1, 0x1fee: 0x2bb9, 0x1fef: 0x2bc1, + 0x1ff0: 0x2bc9, 0x1ff1: 0x8e1d, 0x1ff2: 0x2bd1, 0x1ff3: 0x2bd9, 0x1ff4: 0x2040, 0x1ff5: 0x2be1, + 0x1ff6: 0x2be9, 0x1ff7: 0x2bf1, 0x1ff8: 0x2bf9, 0x1ff9: 0x2c01, 0x1ffa: 0x2c09, 0x1ffb: 0x8e3d, + 0x1ffc: 0x2c11, 0x1ffd: 0x8e5d, 0x1ffe: 0x2c19, 0x1fff: 0x2c21, + // Block 0x80, offset 0x2000 + 0x2000: 0x2c29, 0x2001: 0x2c31, 0x2002: 0x2c39, 0x2003: 0x2c41, 0x2004: 0x2c49, 0x2005: 0x2c51, + 0x2006: 0x2c59, 0x2007: 0x2c61, 0x2008: 0x2c69, 0x2009: 0x8e7d, 0x200a: 0x2c71, 0x200b: 0x2c79, + 0x200c: 0x2c81, 0x200d: 0x2c89, 0x200e: 0x2c91, 0x200f: 0x8e9d, 0x2010: 0x2c99, 0x2011: 0x8ebd, + 0x2012: 0x8edd, 0x2013: 0x2ca1, 0x2014: 0x2ca9, 0x2015: 0x2ca9, 0x2016: 0x2cb1, 0x2017: 0x8efd, + 0x2018: 0x8f1d, 0x2019: 0x2cb9, 0x201a: 0x2cc1, 0x201b: 0x2cc9, 0x201c: 0x2cd1, 0x201d: 0x2cd9, + 0x201e: 0x2ce1, 0x201f: 0x2ce9, 0x2020: 0x2cf1, 0x2021: 0x2cf9, 0x2022: 0x2d01, 0x2023: 0x2d09, + 0x2024: 0x8f3d, 0x2025: 0x2d11, 0x2026: 0x2d19, 0x2027: 0x2d21, 0x2028: 0x2d29, 0x2029: 0x2d21, + 0x202a: 0x2d31, 0x202b: 0x2d39, 0x202c: 0x2d41, 0x202d: 0x2d49, 0x202e: 0x2d51, 0x202f: 0x2d59, + 0x2030: 0x2d61, 0x2031: 0x2d69, 0x2032: 0x2d71, 0x2033: 0x2d79, 0x2034: 0x2d81, 0x2035: 0x2d89, + 0x2036: 0x2d91, 0x2037: 0x2d99, 0x2038: 0x8f5d, 0x2039: 0x2da1, 0x203a: 0x2da9, 0x203b: 0x2db1, + 0x203c: 0x2db9, 0x203d: 0x2dc1, 0x203e: 0x8f7d, 0x203f: 0x2dc9, + // Block 0x81, offset 0x2040 + 0x2040: 0x2dd1, 0x2041: 0x2dd9, 0x2042: 0x2de1, 0x2043: 0x2de9, 0x2044: 0x2df1, 0x2045: 0x2df9, + 0x2046: 0x2e01, 0x2047: 0x2e09, 0x2048: 0x2e11, 0x2049: 0x2e19, 0x204a: 0x8f9d, 0x204b: 0x2e21, + 0x204c: 0x2e29, 0x204d: 0x2e31, 0x204e: 0x2e39, 0x204f: 0x2e41, 0x2050: 0x2e49, 0x2051: 0x2e51, + 0x2052: 0x2e59, 0x2053: 0x2e61, 0x2054: 0x2e69, 0x2055: 0x2e71, 0x2056: 0x2e79, 0x2057: 0x2e81, + 0x2058: 0x2e89, 0x2059: 0x2e91, 0x205a: 0x2e99, 0x205b: 0x2ea1, 0x205c: 0x2ea9, 0x205d: 0x8fbd, + 0x205e: 0x2eb1, 0x205f: 0x2eb9, 0x2060: 0x2ec1, 0x2061: 0x2ec9, 0x2062: 0x2ed1, 0x2063: 0x8fdd, + 0x2064: 0x2ed9, 0x2065: 0x2ee1, 0x2066: 0x2ee9, 0x2067: 0x2ef1, 0x2068: 0x2ef9, 0x2069: 0x2f01, + 0x206a: 0x2f09, 0x206b: 0x2f11, 0x206c: 0x7f0d, 0x206d: 0x2f19, 0x206e: 0x2f21, 0x206f: 0x2f29, + 0x2070: 0x8ffd, 0x2071: 0x2f31, 0x2072: 0x2f39, 0x2073: 0x2f41, 0x2074: 0x2f49, 0x2075: 0x2f51, + 0x2076: 0x2f59, 0x2077: 0x901d, 0x2078: 0x903d, 0x2079: 0x905d, 0x207a: 0x2f61, 0x207b: 0x907d, + 0x207c: 0x2f69, 0x207d: 0x2f71, 0x207e: 0x2f79, 0x207f: 0x2f81, + // Block 0x82, offset 0x2080 + 0x2080: 0x2f89, 0x2081: 0x2f91, 0x2082: 0x2f99, 0x2083: 0x2fa1, 0x2084: 0x2fa9, 0x2085: 0x2fb1, + 0x2086: 0x909d, 0x2087: 0x2fb9, 0x2088: 0x2fc1, 0x2089: 0x2fc9, 0x208a: 0x2fd1, 0x208b: 0x2fd9, + 0x208c: 0x2fe1, 0x208d: 0x90bd, 0x208e: 0x2fe9, 0x208f: 0x2ff1, 0x2090: 0x90dd, 0x2091: 0x90fd, + 0x2092: 0x2ff9, 0x2093: 0x3001, 0x2094: 0x3009, 0x2095: 0x3011, 0x2096: 0x3019, 0x2097: 0x3021, + 0x2098: 0x3029, 0x2099: 0x3031, 0x209a: 0x3039, 0x209b: 0x911d, 0x209c: 0x3041, 0x209d: 0x913d, + 0x209e: 0x3049, 0x209f: 0x2040, 0x20a0: 0x3051, 0x20a1: 0x3059, 0x20a2: 0x3061, 0x20a3: 0x915d, + 0x20a4: 0x3069, 0x20a5: 0x3071, 0x20a6: 0x917d, 0x20a7: 0x919d, 0x20a8: 0x3079, 0x20a9: 0x3081, + 0x20aa: 0x3089, 0x20ab: 0x3091, 0x20ac: 0x3099, 0x20ad: 0x3099, 0x20ae: 0x30a1, 0x20af: 0x30a9, + 0x20b0: 0x30b1, 0x20b1: 0x30b9, 0x20b2: 0x30c1, 0x20b3: 0x30c9, 0x20b4: 0x30d1, 0x20b5: 0x91bd, + 0x20b6: 0x30d9, 0x20b7: 0x91dd, 0x20b8: 0x30e1, 0x20b9: 0x91fd, 0x20ba: 0x30e9, 0x20bb: 0x921d, + 0x20bc: 0x923d, 0x20bd: 0x925d, 0x20be: 0x30f1, 0x20bf: 0x30f9, + // Block 0x83, offset 0x20c0 + 0x20c0: 0x3101, 0x20c1: 0x927d, 0x20c2: 0x929d, 0x20c3: 0x92bd, 0x20c4: 0x92dd, 0x20c5: 0x3109, + 0x20c6: 0x3111, 0x20c7: 0x3111, 0x20c8: 0x3119, 0x20c9: 0x3121, 0x20ca: 0x3129, 0x20cb: 0x3131, + 0x20cc: 0x3139, 0x20cd: 0x92fd, 0x20ce: 0x3141, 0x20cf: 0x3149, 0x20d0: 0x3151, 0x20d1: 0x3159, + 0x20d2: 0x931d, 0x20d3: 0x3161, 0x20d4: 0x933d, 0x20d5: 0x935d, 0x20d6: 0x3169, 0x20d7: 0x3171, + 0x20d8: 0x3179, 0x20d9: 0x3181, 0x20da: 0x3189, 0x20db: 0x3191, 0x20dc: 0x937d, 0x20dd: 0x939d, + 0x20de: 0x93bd, 0x20df: 0x2040, 0x20e0: 0x3199, 0x20e1: 0x93dd, 0x20e2: 0x31a1, 0x20e3: 0x31a9, + 0x20e4: 0x31b1, 0x20e5: 0x93fd, 0x20e6: 0x31b9, 0x20e7: 0x31c1, 0x20e8: 0x31c9, 0x20e9: 0x31d1, + 0x20ea: 0x31d9, 0x20eb: 0x941d, 0x20ec: 0x31e1, 0x20ed: 0x31e9, 0x20ee: 0x31f1, 0x20ef: 0x31f9, + 0x20f0: 0x3201, 0x20f1: 0x3209, 0x20f2: 0x943d, 0x20f3: 0x945d, 0x20f4: 0x3211, 0x20f5: 0x947d, + 0x20f6: 0x3219, 0x20f7: 0x949d, 0x20f8: 0x3221, 0x20f9: 0x3229, 0x20fa: 0x3231, 0x20fb: 0x94bd, + 0x20fc: 0x94dd, 0x20fd: 0x3239, 0x20fe: 0x94fd, 0x20ff: 0x3241, + // Block 0x84, offset 0x2100 + 0x2100: 0x951d, 0x2101: 0x3249, 0x2102: 0x3251, 0x2103: 0x3259, 0x2104: 0x3261, 0x2105: 0x3269, + 0x2106: 0x3271, 0x2107: 0x953d, 0x2108: 0x955d, 0x2109: 0x957d, 0x210a: 0x959d, 0x210b: 0x2ca1, + 0x210c: 0x3279, 0x210d: 0x3281, 0x210e: 0x3289, 0x210f: 0x3291, 0x2110: 0x3299, 0x2111: 0x32a1, + 0x2112: 0x32a9, 0x2113: 0x32b1, 0x2114: 0x32b9, 0x2115: 0x32c1, 0x2116: 0x32c9, 0x2117: 0x95bd, + 0x2118: 0x32d1, 0x2119: 0x32d9, 0x211a: 0x32e1, 0x211b: 0x32e9, 0x211c: 0x32f1, 0x211d: 0x32f9, + 0x211e: 0x3301, 0x211f: 0x3309, 0x2120: 0x3311, 0x2121: 0x3319, 0x2122: 0x3321, 0x2123: 0x3329, + 0x2124: 0x95dd, 0x2125: 0x95fd, 0x2126: 0x961d, 0x2127: 0x3331, 0x2128: 0x3339, 0x2129: 0x3341, + 0x212a: 0x3349, 0x212b: 0x963d, 0x212c: 0x3351, 0x212d: 0x965d, 0x212e: 0x3359, 0x212f: 0x3361, + 0x2130: 0x967d, 0x2131: 0x969d, 0x2132: 0x3369, 0x2133: 0x3371, 0x2134: 0x3379, 0x2135: 0x3381, + 0x2136: 0x3389, 0x2137: 0x3391, 0x2138: 0x3399, 0x2139: 0x33a1, 0x213a: 0x33a9, 0x213b: 0x33b1, + 0x213c: 0x33b9, 0x213d: 0x33c1, 0x213e: 0x33c9, 0x213f: 0x2040, + // Block 0x85, offset 0x2140 + 0x2140: 0x33d1, 0x2141: 0x33d9, 0x2142: 0x33e1, 0x2143: 0x33e9, 0x2144: 0x33f1, 0x2145: 0x96bd, + 0x2146: 0x33f9, 0x2147: 0x3401, 0x2148: 0x3409, 0x2149: 0x3411, 0x214a: 0x3419, 0x214b: 0x96dd, + 0x214c: 0x96fd, 0x214d: 0x3421, 0x214e: 0x3429, 0x214f: 0x3431, 0x2150: 0x3439, 0x2151: 0x3441, + 0x2152: 0x3449, 0x2153: 0x971d, 0x2154: 0x3451, 0x2155: 0x3459, 0x2156: 0x3461, 0x2157: 0x3469, + 0x2158: 0x973d, 0x2159: 0x975d, 0x215a: 0x3471, 0x215b: 0x3479, 0x215c: 0x3481, 0x215d: 0x977d, + 0x215e: 0x3489, 0x215f: 0x3491, 0x2160: 0x684d, 0x2161: 0x979d, 0x2162: 0x3499, 0x2163: 0x34a1, + 0x2164: 0x34a9, 0x2165: 0x97bd, 0x2166: 0x34b1, 0x2167: 0x34b9, 0x2168: 0x34c1, 0x2169: 0x34c9, + 0x216a: 0x34d1, 0x216b: 0x34d9, 0x216c: 0x34e1, 0x216d: 0x97dd, 0x216e: 0x34e9, 0x216f: 0x34f1, + 0x2170: 0x34f9, 0x2171: 0x97fd, 0x2172: 0x3501, 0x2173: 0x3509, 0x2174: 0x3511, 0x2175: 0x3519, + 0x2176: 0x7b6d, 0x2177: 0x981d, 0x2178: 0x3521, 0x2179: 0x3529, 0x217a: 0x3531, 0x217b: 0x983d, + 0x217c: 0x3539, 0x217d: 0x985d, 0x217e: 0x3541, 0x217f: 0x3541, + // Block 0x86, offset 0x2180 + 0x2180: 0x3549, 0x2181: 0x987d, 0x2182: 0x3551, 0x2183: 0x3559, 0x2184: 0x3561, 0x2185: 0x3569, + 0x2186: 0x3571, 0x2187: 0x3579, 0x2188: 0x3581, 0x2189: 0x989d, 0x218a: 0x3589, 0x218b: 0x3591, + 0x218c: 0x3599, 0x218d: 0x35a1, 0x218e: 0x35a9, 0x218f: 0x35b1, 0x2190: 0x98bd, 0x2191: 0x35b9, + 0x2192: 0x98dd, 0x2193: 0x98fd, 0x2194: 0x991d, 0x2195: 0x35c1, 0x2196: 0x35c9, 0x2197: 0x35d1, + 0x2198: 0x35d9, 0x2199: 0x35e1, 0x219a: 0x35e9, 0x219b: 0x35f1, 0x219c: 0x35f9, 0x219d: 0x993d, + 0x219e: 0x0040, 0x219f: 0x0040, 0x21a0: 0x0040, 0x21a1: 0x0040, 0x21a2: 0x0040, 0x21a3: 0x0040, + 0x21a4: 0x0040, 0x21a5: 0x0040, 0x21a6: 0x0040, 0x21a7: 0x0040, 0x21a8: 0x0040, 0x21a9: 0x0040, + 0x21aa: 0x0040, 0x21ab: 0x0040, 0x21ac: 0x0040, 0x21ad: 0x0040, 0x21ae: 0x0040, 0x21af: 0x0040, + 0x21b0: 0x0040, 0x21b1: 0x0040, 0x21b2: 0x0040, 0x21b3: 0x0040, 0x21b4: 0x0040, 0x21b5: 0x0040, + 0x21b6: 0x0040, 0x21b7: 0x0040, 0x21b8: 0x0040, 0x21b9: 0x0040, 0x21ba: 0x0040, 0x21bb: 0x0040, + 0x21bc: 0x0040, 0x21bd: 0x0040, 0x21be: 0x0040, 0x21bf: 0x0040, +} + +// idnaIndex: 39 blocks, 2496 entries, 4992 bytes +// Block 0 is the zero block. +var idnaIndex = [2496]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x85, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x86, 0xca: 0x87, 0xcb: 0x07, 0xcc: 0x88, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x89, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x8a, 0xd6: 0x8b, 0xd7: 0x8c, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x8d, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x8e, 0xde: 0x8f, 0xdf: 0x90, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x07, 0xea: 0x08, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x09, 0xee: 0x0a, 0xef: 0x0b, + 0xf0: 0x20, 0xf1: 0x21, 0xf2: 0x21, 0xf3: 0x23, 0xf4: 0x24, + // Block 0x4, offset 0x100 + 0x120: 0x91, 0x121: 0x13, 0x122: 0x14, 0x123: 0x92, 0x124: 0x93, 0x125: 0x15, 0x126: 0x16, 0x127: 0x17, + 0x128: 0x18, 0x129: 0x19, 0x12a: 0x1a, 0x12b: 0x1b, 0x12c: 0x1c, 0x12d: 0x1d, 0x12e: 0x1e, 0x12f: 0x94, + 0x130: 0x95, 0x131: 0x1f, 0x132: 0x20, 0x133: 0x21, 0x134: 0x96, 0x135: 0x22, 0x136: 0x97, 0x137: 0x98, + 0x138: 0x99, 0x139: 0x9a, 0x13a: 0x23, 0x13b: 0x9b, 0x13c: 0x9c, 0x13d: 0x24, 0x13e: 0x25, 0x13f: 0x9d, + // Block 0x5, offset 0x140 + 0x140: 0x9e, 0x141: 0x9f, 0x142: 0xa0, 0x143: 0xa1, 0x144: 0xa2, 0x145: 0xa3, 0x146: 0xa4, 0x147: 0xa5, + 0x148: 0xa6, 0x149: 0xa7, 0x14a: 0xa8, 0x14b: 0xa9, 0x14c: 0xaa, 0x14d: 0xab, 0x14e: 0xac, 0x14f: 0xad, + 0x150: 0xae, 0x151: 0xa6, 0x152: 0xa6, 0x153: 0xa6, 0x154: 0xa6, 0x155: 0xa6, 0x156: 0xa6, 0x157: 0xa6, + 0x158: 0xa6, 0x159: 0xaf, 0x15a: 0xb0, 0x15b: 0xb1, 0x15c: 0xb2, 0x15d: 0xb3, 0x15e: 0xb4, 0x15f: 0xb5, + 0x160: 0xb6, 0x161: 0xb7, 0x162: 0xb8, 0x163: 0xb9, 0x164: 0xba, 0x165: 0xbb, 0x166: 0xbc, 0x167: 0xbd, + 0x168: 0xbe, 0x169: 0xbf, 0x16a: 0xc0, 0x16b: 0xc1, 0x16c: 0xc2, 0x16d: 0xc3, 0x16e: 0xc4, 0x16f: 0xc5, + 0x170: 0xc6, 0x171: 0xc7, 0x172: 0xc8, 0x173: 0xc9, 0x174: 0x26, 0x175: 0x27, 0x176: 0x28, 0x177: 0x88, + 0x178: 0x29, 0x179: 0x29, 0x17a: 0x2a, 0x17b: 0x29, 0x17c: 0xca, 0x17d: 0x2b, 0x17e: 0x2c, 0x17f: 0x2d, + // Block 0x6, offset 0x180 + 0x180: 0x2e, 0x181: 0x2f, 0x182: 0x30, 0x183: 0xcb, 0x184: 0x31, 0x185: 0x32, 0x186: 0xcc, 0x187: 0xa2, + 0x188: 0xcd, 0x189: 0xce, 0x18a: 0xa2, 0x18b: 0xa2, 0x18c: 0xcf, 0x18d: 0xa2, 0x18e: 0xa2, 0x18f: 0xa2, + 0x190: 0xd0, 0x191: 0x33, 0x192: 0x34, 0x193: 0x35, 0x194: 0xa2, 0x195: 0xa2, 0x196: 0xa2, 0x197: 0xa2, + 0x198: 0xa2, 0x199: 0xa2, 0x19a: 0xa2, 0x19b: 0xa2, 0x19c: 0xa2, 0x19d: 0xa2, 0x19e: 0xa2, 0x19f: 0xa2, + 0x1a0: 0xa2, 0x1a1: 0xa2, 0x1a2: 0xa2, 0x1a3: 0xa2, 0x1a4: 0xa2, 0x1a5: 0xa2, 0x1a6: 0xa2, 0x1a7: 0xa2, + 0x1a8: 0xd1, 0x1a9: 0xd2, 0x1aa: 0xa2, 0x1ab: 0xd3, 0x1ac: 0xa2, 0x1ad: 0xd4, 0x1ae: 0xd5, 0x1af: 0xa2, + 0x1b0: 0xd6, 0x1b1: 0x36, 0x1b2: 0x29, 0x1b3: 0x37, 0x1b4: 0xd7, 0x1b5: 0xd8, 0x1b6: 0xd9, 0x1b7: 0xda, + 0x1b8: 0xdb, 0x1b9: 0xdc, 0x1ba: 0xdd, 0x1bb: 0xde, 0x1bc: 0xdf, 0x1bd: 0xe0, 0x1be: 0xe1, 0x1bf: 0x38, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x39, 0x1c1: 0xe2, 0x1c2: 0xe3, 0x1c3: 0xe4, 0x1c4: 0xe5, 0x1c5: 0x3a, 0x1c6: 0x3b, 0x1c7: 0xe6, + 0x1c8: 0xe7, 0x1c9: 0x3c, 0x1ca: 0x3d, 0x1cb: 0x3e, 0x1cc: 0xe8, 0x1cd: 0xe9, 0x1ce: 0x3f, 0x1cf: 0x40, + 0x1d0: 0xa6, 0x1d1: 0xa6, 0x1d2: 0xa6, 0x1d3: 0xa6, 0x1d4: 0xa6, 0x1d5: 0xa6, 0x1d6: 0xa6, 0x1d7: 0xa6, + 0x1d8: 0xa6, 0x1d9: 0xa6, 0x1da: 0xa6, 0x1db: 0xa6, 0x1dc: 0xa6, 0x1dd: 0xa6, 0x1de: 0xa6, 0x1df: 0xa6, + 0x1e0: 0xa6, 0x1e1: 0xa6, 0x1e2: 0xa6, 0x1e3: 0xa6, 0x1e4: 0xa6, 0x1e5: 0xa6, 0x1e6: 0xa6, 0x1e7: 0xa6, + 0x1e8: 0xa6, 0x1e9: 0xa6, 0x1ea: 0xa6, 0x1eb: 0xa6, 0x1ec: 0xa6, 0x1ed: 0xa6, 0x1ee: 0xa6, 0x1ef: 0xa6, + 0x1f0: 0xa6, 0x1f1: 0xa6, 0x1f2: 0xa6, 0x1f3: 0xa6, 0x1f4: 0xa6, 0x1f5: 0xa6, 0x1f6: 0xa6, 0x1f7: 0xa6, + 0x1f8: 0xa6, 0x1f9: 0xa6, 0x1fa: 0xa6, 0x1fb: 0xa6, 0x1fc: 0xa6, 0x1fd: 0xa6, 0x1fe: 0xa6, 0x1ff: 0xa6, + // Block 0x8, offset 0x200 + 0x200: 0xa6, 0x201: 0xa6, 0x202: 0xa6, 0x203: 0xa6, 0x204: 0xa6, 0x205: 0xa6, 0x206: 0xa6, 0x207: 0xa6, + 0x208: 0xa6, 0x209: 0xa6, 0x20a: 0xa6, 0x20b: 0xa6, 0x20c: 0xa6, 0x20d: 0xa6, 0x20e: 0xa6, 0x20f: 0xa6, + 0x210: 0xa6, 0x211: 0xa6, 0x212: 0xa6, 0x213: 0xa6, 0x214: 0xa6, 0x215: 0xa6, 0x216: 0xa6, 0x217: 0xa6, + 0x218: 0xa6, 0x219: 0xa6, 0x21a: 0xa6, 0x21b: 0xa6, 0x21c: 0xa6, 0x21d: 0xa6, 0x21e: 0xa6, 0x21f: 0xa6, + 0x220: 0xa6, 0x221: 0xa6, 0x222: 0xa6, 0x223: 0xa6, 0x224: 0xa6, 0x225: 0xa6, 0x226: 0xa6, 0x227: 0xa6, + 0x228: 0xa6, 0x229: 0xa6, 0x22a: 0xa6, 0x22b: 0xa6, 0x22c: 0xa6, 0x22d: 0xa6, 0x22e: 0xa6, 0x22f: 0xa6, + 0x230: 0xa6, 0x231: 0xa6, 0x232: 0xa6, 0x233: 0xa6, 0x234: 0xa6, 0x235: 0xa6, 0x236: 0xa6, 0x237: 0xa2, + 0x238: 0xa6, 0x239: 0xa6, 0x23a: 0xa6, 0x23b: 0xa6, 0x23c: 0xa6, 0x23d: 0xa6, 0x23e: 0xa6, 0x23f: 0xa6, + // Block 0x9, offset 0x240 + 0x240: 0xa6, 0x241: 0xa6, 0x242: 0xa6, 0x243: 0xa6, 0x244: 0xa6, 0x245: 0xa6, 0x246: 0xa6, 0x247: 0xa6, + 0x248: 0xa6, 0x249: 0xa6, 0x24a: 0xa6, 0x24b: 0xa6, 0x24c: 0xa6, 0x24d: 0xa6, 0x24e: 0xa6, 0x24f: 0xa6, + 0x250: 0xa6, 0x251: 0xa6, 0x252: 0xa6, 0x253: 0xa6, 0x254: 0xa6, 0x255: 0xa6, 0x256: 0xa6, 0x257: 0xa6, + 0x258: 0xa6, 0x259: 0xa6, 0x25a: 0xa6, 0x25b: 0xa6, 0x25c: 0xa6, 0x25d: 0xa6, 0x25e: 0xa6, 0x25f: 0xa6, + 0x260: 0xa6, 0x261: 0xa6, 0x262: 0xa6, 0x263: 0xa6, 0x264: 0xa6, 0x265: 0xa6, 0x266: 0xa6, 0x267: 0xa6, + 0x268: 0xa6, 0x269: 0xa6, 0x26a: 0xa6, 0x26b: 0xa6, 0x26c: 0xa6, 0x26d: 0xa6, 0x26e: 0xa6, 0x26f: 0xa6, + 0x270: 0xa6, 0x271: 0xa6, 0x272: 0xa6, 0x273: 0xa6, 0x274: 0xa6, 0x275: 0xa6, 0x276: 0xa6, 0x277: 0xa6, + 0x278: 0xa6, 0x279: 0xa6, 0x27a: 0xa6, 0x27b: 0xa6, 0x27c: 0xa6, 0x27d: 0xa6, 0x27e: 0xa6, 0x27f: 0xa6, + // Block 0xa, offset 0x280 + 0x280: 0xa6, 0x281: 0xa6, 0x282: 0xa6, 0x283: 0xa6, 0x284: 0xa6, 0x285: 0xa6, 0x286: 0xa6, 0x287: 0xa6, + 0x288: 0xa6, 0x289: 0xa6, 0x28a: 0xa6, 0x28b: 0xa6, 0x28c: 0xa6, 0x28d: 0xa6, 0x28e: 0xa6, 0x28f: 0xa6, + 0x290: 0xa6, 0x291: 0xa6, 0x292: 0xea, 0x293: 0xeb, 0x294: 0xa6, 0x295: 0xa6, 0x296: 0xa6, 0x297: 0xa6, + 0x298: 0xec, 0x299: 0x41, 0x29a: 0x42, 0x29b: 0xed, 0x29c: 0x43, 0x29d: 0x44, 0x29e: 0x45, 0x29f: 0x46, + 0x2a0: 0xee, 0x2a1: 0xef, 0x2a2: 0xf0, 0x2a3: 0xf1, 0x2a4: 0xf2, 0x2a5: 0xf3, 0x2a6: 0xf4, 0x2a7: 0xf5, + 0x2a8: 0xf6, 0x2a9: 0xf7, 0x2aa: 0xf8, 0x2ab: 0xf9, 0x2ac: 0xfa, 0x2ad: 0xfb, 0x2ae: 0xfc, 0x2af: 0xfd, + 0x2b0: 0xa6, 0x2b1: 0xa6, 0x2b2: 0xa6, 0x2b3: 0xa6, 0x2b4: 0xa6, 0x2b5: 0xa6, 0x2b6: 0xa6, 0x2b7: 0xa6, + 0x2b8: 0xa6, 0x2b9: 0xa6, 0x2ba: 0xa6, 0x2bb: 0xa6, 0x2bc: 0xa6, 0x2bd: 0xa6, 0x2be: 0xa6, 0x2bf: 0xa6, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xa6, 0x2c1: 0xa6, 0x2c2: 0xa6, 0x2c3: 0xa6, 0x2c4: 0xa6, 0x2c5: 0xa6, 0x2c6: 0xa6, 0x2c7: 0xa6, + 0x2c8: 0xa6, 0x2c9: 0xa6, 0x2ca: 0xa6, 0x2cb: 0xa6, 0x2cc: 0xa6, 0x2cd: 0xa6, 0x2ce: 0xa6, 0x2cf: 0xa6, + 0x2d0: 0xa6, 0x2d1: 0xa6, 0x2d2: 0xa6, 0x2d3: 0xa6, 0x2d4: 0xa6, 0x2d5: 0xa6, 0x2d6: 0xa6, 0x2d7: 0xa6, + 0x2d8: 0xa6, 0x2d9: 0xa6, 0x2da: 0xa6, 0x2db: 0xa6, 0x2dc: 0xa6, 0x2dd: 0xa6, 0x2de: 0xfe, 0x2df: 0xff, + // Block 0xc, offset 0x300 + 0x300: 0x100, 0x301: 0x100, 0x302: 0x100, 0x303: 0x100, 0x304: 0x100, 0x305: 0x100, 0x306: 0x100, 0x307: 0x100, + 0x308: 0x100, 0x309: 0x100, 0x30a: 0x100, 0x30b: 0x100, 0x30c: 0x100, 0x30d: 0x100, 0x30e: 0x100, 0x30f: 0x100, + 0x310: 0x100, 0x311: 0x100, 0x312: 0x100, 0x313: 0x100, 0x314: 0x100, 0x315: 0x100, 0x316: 0x100, 0x317: 0x100, + 0x318: 0x100, 0x319: 0x100, 0x31a: 0x100, 0x31b: 0x100, 0x31c: 0x100, 0x31d: 0x100, 0x31e: 0x100, 0x31f: 0x100, + 0x320: 0x100, 0x321: 0x100, 0x322: 0x100, 0x323: 0x100, 0x324: 0x100, 0x325: 0x100, 0x326: 0x100, 0x327: 0x100, + 0x328: 0x100, 0x329: 0x100, 0x32a: 0x100, 0x32b: 0x100, 0x32c: 0x100, 0x32d: 0x100, 0x32e: 0x100, 0x32f: 0x100, + 0x330: 0x100, 0x331: 0x100, 0x332: 0x100, 0x333: 0x100, 0x334: 0x100, 0x335: 0x100, 0x336: 0x100, 0x337: 0x100, + 0x338: 0x100, 0x339: 0x100, 0x33a: 0x100, 0x33b: 0x100, 0x33c: 0x100, 0x33d: 0x100, 0x33e: 0x100, 0x33f: 0x100, + // Block 0xd, offset 0x340 + 0x340: 0x100, 0x341: 0x100, 0x342: 0x100, 0x343: 0x100, 0x344: 0x100, 0x345: 0x100, 0x346: 0x100, 0x347: 0x100, + 0x348: 0x100, 0x349: 0x100, 0x34a: 0x100, 0x34b: 0x100, 0x34c: 0x100, 0x34d: 0x100, 0x34e: 0x100, 0x34f: 0x100, + 0x350: 0x100, 0x351: 0x100, 0x352: 0x100, 0x353: 0x100, 0x354: 0x100, 0x355: 0x100, 0x356: 0x100, 0x357: 0x100, + 0x358: 0x100, 0x359: 0x100, 0x35a: 0x100, 0x35b: 0x100, 0x35c: 0x100, 0x35d: 0x100, 0x35e: 0x100, 0x35f: 0x100, + 0x360: 0x100, 0x361: 0x100, 0x362: 0x100, 0x363: 0x100, 0x364: 0x101, 0x365: 0x102, 0x366: 0x103, 0x367: 0x104, + 0x368: 0x47, 0x369: 0x105, 0x36a: 0x106, 0x36b: 0x48, 0x36c: 0x49, 0x36d: 0x4a, 0x36e: 0x4b, 0x36f: 0x4c, + 0x370: 0x107, 0x371: 0x4d, 0x372: 0x4e, 0x373: 0x4f, 0x374: 0x50, 0x375: 0x51, 0x376: 0x108, 0x377: 0x52, + 0x378: 0x53, 0x379: 0x54, 0x37a: 0x55, 0x37b: 0x56, 0x37c: 0x57, 0x37d: 0x58, 0x37e: 0x59, 0x37f: 0x5a, + // Block 0xe, offset 0x380 + 0x380: 0x109, 0x381: 0x10a, 0x382: 0xa6, 0x383: 0x10b, 0x384: 0x10c, 0x385: 0xa2, 0x386: 0x10d, 0x387: 0x10e, + 0x388: 0x100, 0x389: 0x100, 0x38a: 0x10f, 0x38b: 0x110, 0x38c: 0x111, 0x38d: 0x112, 0x38e: 0x113, 0x38f: 0x114, + 0x390: 0x115, 0x391: 0xa6, 0x392: 0x116, 0x393: 0x117, 0x394: 0x118, 0x395: 0x5b, 0x396: 0x5c, 0x397: 0x100, + 0x398: 0xa6, 0x399: 0xa6, 0x39a: 0xa6, 0x39b: 0xa6, 0x39c: 0x119, 0x39d: 0x11a, 0x39e: 0x5d, 0x39f: 0x100, + 0x3a0: 0x11b, 0x3a1: 0x11c, 0x3a2: 0x11d, 0x3a3: 0x11e, 0x3a4: 0x11f, 0x3a5: 0x100, 0x3a6: 0x120, 0x3a7: 0x121, + 0x3a8: 0x122, 0x3a9: 0x123, 0x3aa: 0x124, 0x3ab: 0x5e, 0x3ac: 0x125, 0x3ad: 0x126, 0x3ae: 0x5f, 0x3af: 0x100, + 0x3b0: 0x127, 0x3b1: 0x128, 0x3b2: 0x129, 0x3b3: 0x12a, 0x3b4: 0x12b, 0x3b5: 0x100, 0x3b6: 0x100, 0x3b7: 0x100, + 0x3b8: 0x100, 0x3b9: 0x12c, 0x3ba: 0x12d, 0x3bb: 0x12e, 0x3bc: 0x12f, 0x3bd: 0x130, 0x3be: 0x131, 0x3bf: 0x132, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x133, 0x3c1: 0x134, 0x3c2: 0x135, 0x3c3: 0x136, 0x3c4: 0x137, 0x3c5: 0x138, 0x3c6: 0x139, 0x3c7: 0x13a, + 0x3c8: 0x13b, 0x3c9: 0x13c, 0x3ca: 0x13d, 0x3cb: 0x13e, 0x3cc: 0x60, 0x3cd: 0x61, 0x3ce: 0x100, 0x3cf: 0x100, + 0x3d0: 0x13f, 0x3d1: 0x140, 0x3d2: 0x141, 0x3d3: 0x142, 0x3d4: 0x100, 0x3d5: 0x100, 0x3d6: 0x143, 0x3d7: 0x144, + 0x3d8: 0x145, 0x3d9: 0x146, 0x3da: 0x147, 0x3db: 0x148, 0x3dc: 0x149, 0x3dd: 0x14a, 0x3de: 0x100, 0x3df: 0x100, + 0x3e0: 0x14b, 0x3e1: 0x100, 0x3e2: 0x14c, 0x3e3: 0x14d, 0x3e4: 0x62, 0x3e5: 0x14e, 0x3e6: 0x14f, 0x3e7: 0x150, + 0x3e8: 0x151, 0x3e9: 0x152, 0x3ea: 0x153, 0x3eb: 0x154, 0x3ec: 0x155, 0x3ed: 0x100, 0x3ee: 0x100, 0x3ef: 0x100, + 0x3f0: 0x156, 0x3f1: 0x157, 0x3f2: 0x158, 0x3f3: 0x100, 0x3f4: 0x159, 0x3f5: 0x15a, 0x3f6: 0x15b, 0x3f7: 0x100, + 0x3f8: 0x100, 0x3f9: 0x100, 0x3fa: 0x100, 0x3fb: 0x15c, 0x3fc: 0x15d, 0x3fd: 0x15e, 0x3fe: 0x15f, 0x3ff: 0x160, + // Block 0x10, offset 0x400 + 0x400: 0xa6, 0x401: 0xa6, 0x402: 0xa6, 0x403: 0xa6, 0x404: 0xa6, 0x405: 0xa6, 0x406: 0xa6, 0x407: 0xa6, + 0x408: 0xa6, 0x409: 0xa6, 0x40a: 0xa6, 0x40b: 0xa6, 0x40c: 0xa6, 0x40d: 0xa6, 0x40e: 0x161, 0x40f: 0x100, + 0x410: 0xa2, 0x411: 0x162, 0x412: 0xa6, 0x413: 0xa6, 0x414: 0xa6, 0x415: 0x163, 0x416: 0x100, 0x417: 0x100, + 0x418: 0x100, 0x419: 0x100, 0x41a: 0x100, 0x41b: 0x100, 0x41c: 0x100, 0x41d: 0x100, 0x41e: 0x100, 0x41f: 0x100, + 0x420: 0x100, 0x421: 0x100, 0x422: 0x100, 0x423: 0x100, 0x424: 0x100, 0x425: 0x100, 0x426: 0x100, 0x427: 0x100, + 0x428: 0x100, 0x429: 0x100, 0x42a: 0x100, 0x42b: 0x100, 0x42c: 0x100, 0x42d: 0x100, 0x42e: 0x100, 0x42f: 0x100, + 0x430: 0x100, 0x431: 0x100, 0x432: 0x100, 0x433: 0x100, 0x434: 0x100, 0x435: 0x100, 0x436: 0x100, 0x437: 0x100, + 0x438: 0x100, 0x439: 0x100, 0x43a: 0x100, 0x43b: 0x100, 0x43c: 0x100, 0x43d: 0x100, 0x43e: 0x164, 0x43f: 0x165, + // Block 0x11, offset 0x440 + 0x440: 0xa6, 0x441: 0xa6, 0x442: 0xa6, 0x443: 0xa6, 0x444: 0xa6, 0x445: 0xa6, 0x446: 0xa6, 0x447: 0xa6, + 0x448: 0xa6, 0x449: 0xa6, 0x44a: 0xa6, 0x44b: 0xa6, 0x44c: 0xa6, 0x44d: 0xa6, 0x44e: 0xa6, 0x44f: 0xa6, + 0x450: 0x166, 0x451: 0x167, 0x452: 0x100, 0x453: 0x100, 0x454: 0x100, 0x455: 0x100, 0x456: 0x100, 0x457: 0x100, + 0x458: 0x100, 0x459: 0x100, 0x45a: 0x100, 0x45b: 0x100, 0x45c: 0x100, 0x45d: 0x100, 0x45e: 0x100, 0x45f: 0x100, + 0x460: 0x100, 0x461: 0x100, 0x462: 0x100, 0x463: 0x100, 0x464: 0x100, 0x465: 0x100, 0x466: 0x100, 0x467: 0x100, + 0x468: 0x100, 0x469: 0x100, 0x46a: 0x100, 0x46b: 0x100, 0x46c: 0x100, 0x46d: 0x100, 0x46e: 0x100, 0x46f: 0x100, + 0x470: 0x100, 0x471: 0x100, 0x472: 0x100, 0x473: 0x100, 0x474: 0x100, 0x475: 0x100, 0x476: 0x100, 0x477: 0x100, + 0x478: 0x100, 0x479: 0x100, 0x47a: 0x100, 0x47b: 0x100, 0x47c: 0x100, 0x47d: 0x100, 0x47e: 0x100, 0x47f: 0x100, + // Block 0x12, offset 0x480 + 0x480: 0x100, 0x481: 0x100, 0x482: 0x100, 0x483: 0x100, 0x484: 0x100, 0x485: 0x100, 0x486: 0x100, 0x487: 0x100, + 0x488: 0x100, 0x489: 0x100, 0x48a: 0x100, 0x48b: 0x100, 0x48c: 0x100, 0x48d: 0x100, 0x48e: 0x100, 0x48f: 0x100, + 0x490: 0xa6, 0x491: 0xa6, 0x492: 0xa6, 0x493: 0xa6, 0x494: 0xa6, 0x495: 0xa6, 0x496: 0xa6, 0x497: 0xa6, + 0x498: 0xa6, 0x499: 0x14a, 0x49a: 0x100, 0x49b: 0x100, 0x49c: 0x100, 0x49d: 0x100, 0x49e: 0x100, 0x49f: 0x100, + 0x4a0: 0x100, 0x4a1: 0x100, 0x4a2: 0x100, 0x4a3: 0x100, 0x4a4: 0x100, 0x4a5: 0x100, 0x4a6: 0x100, 0x4a7: 0x100, + 0x4a8: 0x100, 0x4a9: 0x100, 0x4aa: 0x100, 0x4ab: 0x100, 0x4ac: 0x100, 0x4ad: 0x100, 0x4ae: 0x100, 0x4af: 0x100, + 0x4b0: 0x100, 0x4b1: 0x100, 0x4b2: 0x100, 0x4b3: 0x100, 0x4b4: 0x100, 0x4b5: 0x100, 0x4b6: 0x100, 0x4b7: 0x100, + 0x4b8: 0x100, 0x4b9: 0x100, 0x4ba: 0x100, 0x4bb: 0x100, 0x4bc: 0x100, 0x4bd: 0x100, 0x4be: 0x100, 0x4bf: 0x100, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x100, 0x4c1: 0x100, 0x4c2: 0x100, 0x4c3: 0x100, 0x4c4: 0x100, 0x4c5: 0x100, 0x4c6: 0x100, 0x4c7: 0x100, + 0x4c8: 0x100, 0x4c9: 0x100, 0x4ca: 0x100, 0x4cb: 0x100, 0x4cc: 0x100, 0x4cd: 0x100, 0x4ce: 0x100, 0x4cf: 0x100, + 0x4d0: 0x100, 0x4d1: 0x100, 0x4d2: 0x100, 0x4d3: 0x100, 0x4d4: 0x100, 0x4d5: 0x100, 0x4d6: 0x100, 0x4d7: 0x100, + 0x4d8: 0x100, 0x4d9: 0x100, 0x4da: 0x100, 0x4db: 0x100, 0x4dc: 0x100, 0x4dd: 0x100, 0x4de: 0x100, 0x4df: 0x100, + 0x4e0: 0xa6, 0x4e1: 0xa6, 0x4e2: 0xa6, 0x4e3: 0xa6, 0x4e4: 0xa6, 0x4e5: 0xa6, 0x4e6: 0xa6, 0x4e7: 0xa6, + 0x4e8: 0x154, 0x4e9: 0x168, 0x4ea: 0x169, 0x4eb: 0x16a, 0x4ec: 0x16b, 0x4ed: 0x16c, 0x4ee: 0x16d, 0x4ef: 0x100, + 0x4f0: 0x100, 0x4f1: 0x100, 0x4f2: 0x100, 0x4f3: 0x100, 0x4f4: 0x100, 0x4f5: 0x100, 0x4f6: 0x100, 0x4f7: 0x100, + 0x4f8: 0x100, 0x4f9: 0x16e, 0x4fa: 0x16f, 0x4fb: 0x100, 0x4fc: 0xa6, 0x4fd: 0x170, 0x4fe: 0x171, 0x4ff: 0x172, + // Block 0x14, offset 0x500 + 0x500: 0xa6, 0x501: 0xa6, 0x502: 0xa6, 0x503: 0xa6, 0x504: 0xa6, 0x505: 0xa6, 0x506: 0xa6, 0x507: 0xa6, + 0x508: 0xa6, 0x509: 0xa6, 0x50a: 0xa6, 0x50b: 0xa6, 0x50c: 0xa6, 0x50d: 0xa6, 0x50e: 0xa6, 0x50f: 0xa6, + 0x510: 0xa6, 0x511: 0xa6, 0x512: 0xa6, 0x513: 0xa6, 0x514: 0xa6, 0x515: 0xa6, 0x516: 0xa6, 0x517: 0xa6, + 0x518: 0xa6, 0x519: 0xa6, 0x51a: 0xa6, 0x51b: 0xa6, 0x51c: 0xa6, 0x51d: 0xa6, 0x51e: 0xa6, 0x51f: 0x173, + 0x520: 0xa6, 0x521: 0xa6, 0x522: 0xa6, 0x523: 0xa6, 0x524: 0xa6, 0x525: 0xa6, 0x526: 0xa6, 0x527: 0xa6, + 0x528: 0xa6, 0x529: 0xa6, 0x52a: 0xa6, 0x52b: 0xa6, 0x52c: 0xa6, 0x52d: 0xa6, 0x52e: 0xa6, 0x52f: 0xa6, + 0x530: 0xa6, 0x531: 0xa6, 0x532: 0xa6, 0x533: 0x174, 0x534: 0x175, 0x535: 0x100, 0x536: 0x100, 0x537: 0x100, + 0x538: 0x100, 0x539: 0x100, 0x53a: 0x100, 0x53b: 0x100, 0x53c: 0x100, 0x53d: 0x100, 0x53e: 0x100, 0x53f: 0x100, + // Block 0x15, offset 0x540 + 0x540: 0x100, 0x541: 0x100, 0x542: 0x100, 0x543: 0x100, 0x544: 0x100, 0x545: 0x100, 0x546: 0x100, 0x547: 0x100, + 0x548: 0x100, 0x549: 0x100, 0x54a: 0x100, 0x54b: 0x100, 0x54c: 0x100, 0x54d: 0x100, 0x54e: 0x100, 0x54f: 0x100, + 0x550: 0x100, 0x551: 0x100, 0x552: 0x100, 0x553: 0x100, 0x554: 0x100, 0x555: 0x100, 0x556: 0x100, 0x557: 0x100, + 0x558: 0x100, 0x559: 0x100, 0x55a: 0x100, 0x55b: 0x100, 0x55c: 0x100, 0x55d: 0x100, 0x55e: 0x100, 0x55f: 0x100, + 0x560: 0x100, 0x561: 0x100, 0x562: 0x100, 0x563: 0x100, 0x564: 0x100, 0x565: 0x100, 0x566: 0x100, 0x567: 0x100, + 0x568: 0x100, 0x569: 0x100, 0x56a: 0x100, 0x56b: 0x100, 0x56c: 0x100, 0x56d: 0x100, 0x56e: 0x100, 0x56f: 0x100, + 0x570: 0x100, 0x571: 0x100, 0x572: 0x100, 0x573: 0x100, 0x574: 0x100, 0x575: 0x100, 0x576: 0x100, 0x577: 0x100, + 0x578: 0x100, 0x579: 0x100, 0x57a: 0x100, 0x57b: 0x100, 0x57c: 0x100, 0x57d: 0x100, 0x57e: 0x100, 0x57f: 0x176, + // Block 0x16, offset 0x580 + 0x580: 0xa6, 0x581: 0xa6, 0x582: 0xa6, 0x583: 0xa6, 0x584: 0x177, 0x585: 0x178, 0x586: 0xa6, 0x587: 0xa6, + 0x588: 0xa6, 0x589: 0xa6, 0x58a: 0xa6, 0x58b: 0x179, 0x58c: 0x100, 0x58d: 0x100, 0x58e: 0x100, 0x58f: 0x100, + 0x590: 0x100, 0x591: 0x100, 0x592: 0x100, 0x593: 0x100, 0x594: 0x100, 0x595: 0x100, 0x596: 0x100, 0x597: 0x100, + 0x598: 0x100, 0x599: 0x100, 0x59a: 0x100, 0x59b: 0x100, 0x59c: 0x100, 0x59d: 0x100, 0x59e: 0x100, 0x59f: 0x100, + 0x5a0: 0x100, 0x5a1: 0x100, 0x5a2: 0x100, 0x5a3: 0x100, 0x5a4: 0x100, 0x5a5: 0x100, 0x5a6: 0x100, 0x5a7: 0x100, + 0x5a8: 0x100, 0x5a9: 0x100, 0x5aa: 0x100, 0x5ab: 0x100, 0x5ac: 0x100, 0x5ad: 0x100, 0x5ae: 0x100, 0x5af: 0x100, + 0x5b0: 0xa6, 0x5b1: 0x17a, 0x5b2: 0x17b, 0x5b3: 0x100, 0x5b4: 0x100, 0x5b5: 0x100, 0x5b6: 0x100, 0x5b7: 0x100, + 0x5b8: 0x100, 0x5b9: 0x100, 0x5ba: 0x100, 0x5bb: 0x100, 0x5bc: 0x100, 0x5bd: 0x100, 0x5be: 0x100, 0x5bf: 0x100, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x100, 0x5c1: 0x100, 0x5c2: 0x100, 0x5c3: 0x100, 0x5c4: 0x100, 0x5c5: 0x100, 0x5c6: 0x100, 0x5c7: 0x100, + 0x5c8: 0x100, 0x5c9: 0x100, 0x5ca: 0x100, 0x5cb: 0x100, 0x5cc: 0x100, 0x5cd: 0x100, 0x5ce: 0x100, 0x5cf: 0x100, + 0x5d0: 0x100, 0x5d1: 0x100, 0x5d2: 0x100, 0x5d3: 0x100, 0x5d4: 0x100, 0x5d5: 0x100, 0x5d6: 0x100, 0x5d7: 0x100, + 0x5d8: 0x100, 0x5d9: 0x100, 0x5da: 0x100, 0x5db: 0x100, 0x5dc: 0x100, 0x5dd: 0x100, 0x5de: 0x100, 0x5df: 0x100, + 0x5e0: 0x100, 0x5e1: 0x100, 0x5e2: 0x100, 0x5e3: 0x100, 0x5e4: 0x100, 0x5e5: 0x100, 0x5e6: 0x100, 0x5e7: 0x100, + 0x5e8: 0x100, 0x5e9: 0x100, 0x5ea: 0x100, 0x5eb: 0x100, 0x5ec: 0x100, 0x5ed: 0x100, 0x5ee: 0x100, 0x5ef: 0x100, + 0x5f0: 0x100, 0x5f1: 0x100, 0x5f2: 0x100, 0x5f3: 0x100, 0x5f4: 0x100, 0x5f5: 0x100, 0x5f6: 0x100, 0x5f7: 0x100, + 0x5f8: 0x100, 0x5f9: 0x100, 0x5fa: 0x100, 0x5fb: 0x100, 0x5fc: 0x17c, 0x5fd: 0x17d, 0x5fe: 0xa2, 0x5ff: 0x17e, + // Block 0x18, offset 0x600 + 0x600: 0xa2, 0x601: 0xa2, 0x602: 0xa2, 0x603: 0x17f, 0x604: 0x180, 0x605: 0x181, 0x606: 0x182, 0x607: 0x183, + 0x608: 0xa2, 0x609: 0x184, 0x60a: 0x100, 0x60b: 0x185, 0x60c: 0xa2, 0x60d: 0x186, 0x60e: 0x100, 0x60f: 0x100, + 0x610: 0x63, 0x611: 0x64, 0x612: 0x65, 0x613: 0x66, 0x614: 0x67, 0x615: 0x68, 0x616: 0x69, 0x617: 0x6a, + 0x618: 0x6b, 0x619: 0x6c, 0x61a: 0x6d, 0x61b: 0x6e, 0x61c: 0x6f, 0x61d: 0x70, 0x61e: 0x71, 0x61f: 0x72, + 0x620: 0xa2, 0x621: 0xa2, 0x622: 0xa2, 0x623: 0xa2, 0x624: 0xa2, 0x625: 0xa2, 0x626: 0xa2, 0x627: 0xa2, + 0x628: 0x187, 0x629: 0x188, 0x62a: 0x189, 0x62b: 0x100, 0x62c: 0x100, 0x62d: 0x100, 0x62e: 0x100, 0x62f: 0x100, + 0x630: 0x100, 0x631: 0x100, 0x632: 0x100, 0x633: 0x100, 0x634: 0x100, 0x635: 0x100, 0x636: 0x100, 0x637: 0x100, + 0x638: 0x100, 0x639: 0x100, 0x63a: 0x100, 0x63b: 0x100, 0x63c: 0x18a, 0x63d: 0x100, 0x63e: 0x100, 0x63f: 0x100, + // Block 0x19, offset 0x640 + 0x640: 0x73, 0x641: 0x74, 0x642: 0x18b, 0x643: 0x100, 0x644: 0x18c, 0x645: 0x18d, 0x646: 0x100, 0x647: 0x100, + 0x648: 0x100, 0x649: 0x100, 0x64a: 0x18e, 0x64b: 0x18f, 0x64c: 0x100, 0x64d: 0x100, 0x64e: 0x100, 0x64f: 0x100, + 0x650: 0x100, 0x651: 0x100, 0x652: 0x100, 0x653: 0x190, 0x654: 0x100, 0x655: 0x100, 0x656: 0x100, 0x657: 0x100, + 0x658: 0x100, 0x659: 0x100, 0x65a: 0x100, 0x65b: 0x100, 0x65c: 0x100, 0x65d: 0x100, 0x65e: 0x100, 0x65f: 0x191, + 0x660: 0x127, 0x661: 0x127, 0x662: 0x127, 0x663: 0x192, 0x664: 0x75, 0x665: 0x193, 0x666: 0x100, 0x667: 0x100, + 0x668: 0x100, 0x669: 0x100, 0x66a: 0x100, 0x66b: 0x100, 0x66c: 0x100, 0x66d: 0x100, 0x66e: 0x100, 0x66f: 0x100, + 0x670: 0x100, 0x671: 0x194, 0x672: 0x195, 0x673: 0x100, 0x674: 0x196, 0x675: 0x100, 0x676: 0x100, 0x677: 0x100, + 0x678: 0x76, 0x679: 0x77, 0x67a: 0x78, 0x67b: 0x197, 0x67c: 0x100, 0x67d: 0x100, 0x67e: 0x100, 0x67f: 0x100, + // Block 0x1a, offset 0x680 + 0x680: 0x198, 0x681: 0xa2, 0x682: 0x199, 0x683: 0x19a, 0x684: 0x79, 0x685: 0x7a, 0x686: 0x19b, 0x687: 0x19c, + 0x688: 0x7b, 0x689: 0x19d, 0x68a: 0x100, 0x68b: 0x100, 0x68c: 0xa2, 0x68d: 0xa2, 0x68e: 0xa2, 0x68f: 0xa2, + 0x690: 0xa2, 0x691: 0xa2, 0x692: 0xa2, 0x693: 0xa2, 0x694: 0xa2, 0x695: 0xa2, 0x696: 0xa2, 0x697: 0xa2, + 0x698: 0xa2, 0x699: 0xa2, 0x69a: 0xa2, 0x69b: 0x19e, 0x69c: 0xa2, 0x69d: 0x19f, 0x69e: 0xa2, 0x69f: 0x1a0, + 0x6a0: 0x1a1, 0x6a1: 0x1a2, 0x6a2: 0x1a3, 0x6a3: 0x100, 0x6a4: 0xa2, 0x6a5: 0xa2, 0x6a6: 0xa2, 0x6a7: 0xa2, + 0x6a8: 0xa2, 0x6a9: 0x1a4, 0x6aa: 0x1a5, 0x6ab: 0x1a6, 0x6ac: 0xa2, 0x6ad: 0xa2, 0x6ae: 0x1a7, 0x6af: 0x1a8, + 0x6b0: 0x100, 0x6b1: 0x100, 0x6b2: 0x100, 0x6b3: 0x100, 0x6b4: 0x100, 0x6b5: 0x100, 0x6b6: 0x100, 0x6b7: 0x100, + 0x6b8: 0x100, 0x6b9: 0x100, 0x6ba: 0x100, 0x6bb: 0x100, 0x6bc: 0x100, 0x6bd: 0x100, 0x6be: 0x100, 0x6bf: 0x100, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0xa6, 0x6c1: 0xa6, 0x6c2: 0xa6, 0x6c3: 0xa6, 0x6c4: 0xa6, 0x6c5: 0xa6, 0x6c6: 0xa6, 0x6c7: 0xa6, + 0x6c8: 0xa6, 0x6c9: 0xa6, 0x6ca: 0xa6, 0x6cb: 0xa6, 0x6cc: 0xa6, 0x6cd: 0xa6, 0x6ce: 0xa6, 0x6cf: 0xa6, + 0x6d0: 0xa6, 0x6d1: 0xa6, 0x6d2: 0xa6, 0x6d3: 0xa6, 0x6d4: 0xa6, 0x6d5: 0xa6, 0x6d6: 0xa6, 0x6d7: 0xa6, + 0x6d8: 0xa6, 0x6d9: 0xa6, 0x6da: 0xa6, 0x6db: 0x1a9, 0x6dc: 0xa6, 0x6dd: 0xa6, 0x6de: 0xa6, 0x6df: 0xa6, + 0x6e0: 0xa6, 0x6e1: 0xa6, 0x6e2: 0xa6, 0x6e3: 0xa6, 0x6e4: 0xa6, 0x6e5: 0xa6, 0x6e6: 0xa6, 0x6e7: 0xa6, + 0x6e8: 0xa6, 0x6e9: 0xa6, 0x6ea: 0xa6, 0x6eb: 0xa6, 0x6ec: 0xa6, 0x6ed: 0xa6, 0x6ee: 0xa6, 0x6ef: 0xa6, + 0x6f0: 0xa6, 0x6f1: 0xa6, 0x6f2: 0xa6, 0x6f3: 0xa6, 0x6f4: 0xa6, 0x6f5: 0xa6, 0x6f6: 0xa6, 0x6f7: 0xa6, + 0x6f8: 0xa6, 0x6f9: 0xa6, 0x6fa: 0xa6, 0x6fb: 0xa6, 0x6fc: 0xa6, 0x6fd: 0xa6, 0x6fe: 0xa6, 0x6ff: 0xa6, + // Block 0x1c, offset 0x700 + 0x700: 0xa6, 0x701: 0xa6, 0x702: 0xa6, 0x703: 0xa6, 0x704: 0xa6, 0x705: 0xa6, 0x706: 0xa6, 0x707: 0xa6, + 0x708: 0xa6, 0x709: 0xa6, 0x70a: 0xa6, 0x70b: 0xa6, 0x70c: 0xa6, 0x70d: 0xa6, 0x70e: 0xa6, 0x70f: 0xa6, + 0x710: 0xa6, 0x711: 0xa6, 0x712: 0xa6, 0x713: 0xa6, 0x714: 0xa6, 0x715: 0xa6, 0x716: 0xa6, 0x717: 0xa6, + 0x718: 0xa6, 0x719: 0xa6, 0x71a: 0xa6, 0x71b: 0xa6, 0x71c: 0x1aa, 0x71d: 0xa6, 0x71e: 0xa6, 0x71f: 0xa6, + 0x720: 0x1ab, 0x721: 0xa6, 0x722: 0xa6, 0x723: 0xa6, 0x724: 0xa6, 0x725: 0xa6, 0x726: 0xa6, 0x727: 0xa6, + 0x728: 0xa6, 0x729: 0xa6, 0x72a: 0xa6, 0x72b: 0xa6, 0x72c: 0xa6, 0x72d: 0xa6, 0x72e: 0xa6, 0x72f: 0xa6, + 0x730: 0xa6, 0x731: 0xa6, 0x732: 0xa6, 0x733: 0xa6, 0x734: 0xa6, 0x735: 0xa6, 0x736: 0xa6, 0x737: 0xa6, + 0x738: 0xa6, 0x739: 0xa6, 0x73a: 0xa6, 0x73b: 0xa6, 0x73c: 0xa6, 0x73d: 0xa6, 0x73e: 0xa6, 0x73f: 0xa6, + // Block 0x1d, offset 0x740 + 0x740: 0xa6, 0x741: 0xa6, 0x742: 0xa6, 0x743: 0xa6, 0x744: 0xa6, 0x745: 0xa6, 0x746: 0xa6, 0x747: 0xa6, + 0x748: 0xa6, 0x749: 0xa6, 0x74a: 0xa6, 0x74b: 0xa6, 0x74c: 0xa6, 0x74d: 0xa6, 0x74e: 0xa6, 0x74f: 0xa6, + 0x750: 0xa6, 0x751: 0xa6, 0x752: 0xa6, 0x753: 0xa6, 0x754: 0xa6, 0x755: 0xa6, 0x756: 0xa6, 0x757: 0xa6, + 0x758: 0xa6, 0x759: 0xa6, 0x75a: 0xa6, 0x75b: 0xa6, 0x75c: 0xa6, 0x75d: 0xa6, 0x75e: 0xa6, 0x75f: 0xa6, + 0x760: 0xa6, 0x761: 0xa6, 0x762: 0xa6, 0x763: 0xa6, 0x764: 0xa6, 0x765: 0xa6, 0x766: 0xa6, 0x767: 0xa6, + 0x768: 0xa6, 0x769: 0xa6, 0x76a: 0xa6, 0x76b: 0xa6, 0x76c: 0xa6, 0x76d: 0xa6, 0x76e: 0xa6, 0x76f: 0xa6, + 0x770: 0xa6, 0x771: 0xa6, 0x772: 0xa6, 0x773: 0xa6, 0x774: 0xa6, 0x775: 0xa6, 0x776: 0xa6, 0x777: 0xa6, + 0x778: 0xa6, 0x779: 0xa6, 0x77a: 0x1ac, 0x77b: 0xa6, 0x77c: 0xa6, 0x77d: 0xa6, 0x77e: 0xa6, 0x77f: 0xa6, + // Block 0x1e, offset 0x780 + 0x780: 0xa6, 0x781: 0xa6, 0x782: 0xa6, 0x783: 0xa6, 0x784: 0xa6, 0x785: 0xa6, 0x786: 0xa6, 0x787: 0xa6, + 0x788: 0xa6, 0x789: 0xa6, 0x78a: 0xa6, 0x78b: 0xa6, 0x78c: 0xa6, 0x78d: 0xa6, 0x78e: 0xa6, 0x78f: 0xa6, + 0x790: 0xa6, 0x791: 0xa6, 0x792: 0xa6, 0x793: 0xa6, 0x794: 0xa6, 0x795: 0xa6, 0x796: 0xa6, 0x797: 0xa6, + 0x798: 0xa6, 0x799: 0xa6, 0x79a: 0xa6, 0x79b: 0xa6, 0x79c: 0xa6, 0x79d: 0xa6, 0x79e: 0xa6, 0x79f: 0xa6, + 0x7a0: 0xa6, 0x7a1: 0xa6, 0x7a2: 0xa6, 0x7a3: 0xa6, 0x7a4: 0xa6, 0x7a5: 0xa6, 0x7a6: 0xa6, 0x7a7: 0xa6, + 0x7a8: 0xa6, 0x7a9: 0xa6, 0x7aa: 0xa6, 0x7ab: 0xa6, 0x7ac: 0xa6, 0x7ad: 0xa6, 0x7ae: 0xa6, 0x7af: 0x1ad, + 0x7b0: 0x100, 0x7b1: 0x100, 0x7b2: 0x100, 0x7b3: 0x100, 0x7b4: 0x100, 0x7b5: 0x100, 0x7b6: 0x100, 0x7b7: 0x100, + 0x7b8: 0x100, 0x7b9: 0x100, 0x7ba: 0x100, 0x7bb: 0x100, 0x7bc: 0x100, 0x7bd: 0x100, 0x7be: 0x100, 0x7bf: 0x100, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x100, 0x7c1: 0x100, 0x7c2: 0x100, 0x7c3: 0x100, 0x7c4: 0x100, 0x7c5: 0x100, 0x7c6: 0x100, 0x7c7: 0x100, + 0x7c8: 0x100, 0x7c9: 0x100, 0x7ca: 0x100, 0x7cb: 0x100, 0x7cc: 0x100, 0x7cd: 0x100, 0x7ce: 0x100, 0x7cf: 0x100, + 0x7d0: 0x100, 0x7d1: 0x100, 0x7d2: 0x100, 0x7d3: 0x100, 0x7d4: 0x100, 0x7d5: 0x100, 0x7d6: 0x100, 0x7d7: 0x100, + 0x7d8: 0x100, 0x7d9: 0x100, 0x7da: 0x100, 0x7db: 0x100, 0x7dc: 0x100, 0x7dd: 0x100, 0x7de: 0x100, 0x7df: 0x100, + 0x7e0: 0x7c, 0x7e1: 0x7d, 0x7e2: 0x7e, 0x7e3: 0x7f, 0x7e4: 0x80, 0x7e5: 0x81, 0x7e6: 0x82, 0x7e7: 0x83, + 0x7e8: 0x84, 0x7e9: 0x100, 0x7ea: 0x100, 0x7eb: 0x100, 0x7ec: 0x100, 0x7ed: 0x100, 0x7ee: 0x100, 0x7ef: 0x100, + 0x7f0: 0x100, 0x7f1: 0x100, 0x7f2: 0x100, 0x7f3: 0x100, 0x7f4: 0x100, 0x7f5: 0x100, 0x7f6: 0x100, 0x7f7: 0x100, + 0x7f8: 0x100, 0x7f9: 0x100, 0x7fa: 0x100, 0x7fb: 0x100, 0x7fc: 0x100, 0x7fd: 0x100, 0x7fe: 0x100, 0x7ff: 0x100, + // Block 0x20, offset 0x800 + 0x800: 0xa6, 0x801: 0xa6, 0x802: 0xa6, 0x803: 0xa6, 0x804: 0xa6, 0x805: 0xa6, 0x806: 0xa6, 0x807: 0xa6, + 0x808: 0xa6, 0x809: 0xa6, 0x80a: 0xa6, 0x80b: 0xa6, 0x80c: 0xa6, 0x80d: 0x1ae, 0x80e: 0xa6, 0x80f: 0xa6, + 0x810: 0xa6, 0x811: 0xa6, 0x812: 0xa6, 0x813: 0xa6, 0x814: 0xa6, 0x815: 0xa6, 0x816: 0xa6, 0x817: 0xa6, + 0x818: 0xa6, 0x819: 0xa6, 0x81a: 0xa6, 0x81b: 0xa6, 0x81c: 0xa6, 0x81d: 0xa6, 0x81e: 0xa6, 0x81f: 0xa6, + 0x820: 0xa6, 0x821: 0xa6, 0x822: 0xa6, 0x823: 0xa6, 0x824: 0xa6, 0x825: 0xa6, 0x826: 0xa6, 0x827: 0xa6, + 0x828: 0xa6, 0x829: 0xa6, 0x82a: 0xa6, 0x82b: 0xa6, 0x82c: 0xa6, 0x82d: 0xa6, 0x82e: 0xa6, 0x82f: 0xa6, + 0x830: 0xa6, 0x831: 0xa6, 0x832: 0xa6, 0x833: 0xa6, 0x834: 0xa6, 0x835: 0xa6, 0x836: 0xa6, 0x837: 0xa6, + 0x838: 0xa6, 0x839: 0xa6, 0x83a: 0xa6, 0x83b: 0xa6, 0x83c: 0xa6, 0x83d: 0xa6, 0x83e: 0xa6, 0x83f: 0xa6, + // Block 0x21, offset 0x840 + 0x840: 0xa6, 0x841: 0xa6, 0x842: 0xa6, 0x843: 0xa6, 0x844: 0xa6, 0x845: 0xa6, 0x846: 0xa6, 0x847: 0xa6, + 0x848: 0xa6, 0x849: 0xa6, 0x84a: 0xa6, 0x84b: 0xa6, 0x84c: 0xa6, 0x84d: 0xa6, 0x84e: 0x1af, 0x84f: 0x100, + 0x850: 0x100, 0x851: 0x100, 0x852: 0x100, 0x853: 0x100, 0x854: 0x100, 0x855: 0x100, 0x856: 0x100, 0x857: 0x100, + 0x858: 0x100, 0x859: 0x100, 0x85a: 0x100, 0x85b: 0x100, 0x85c: 0x100, 0x85d: 0x100, 0x85e: 0x100, 0x85f: 0x100, + 0x860: 0x100, 0x861: 0x100, 0x862: 0x100, 0x863: 0x100, 0x864: 0x100, 0x865: 0x100, 0x866: 0x100, 0x867: 0x100, + 0x868: 0x100, 0x869: 0x100, 0x86a: 0x100, 0x86b: 0x100, 0x86c: 0x100, 0x86d: 0x100, 0x86e: 0x100, 0x86f: 0x100, + 0x870: 0x100, 0x871: 0x100, 0x872: 0x100, 0x873: 0x100, 0x874: 0x100, 0x875: 0x100, 0x876: 0x100, 0x877: 0x100, + 0x878: 0x100, 0x879: 0x100, 0x87a: 0x100, 0x87b: 0x100, 0x87c: 0x100, 0x87d: 0x100, 0x87e: 0x100, 0x87f: 0x100, + // Block 0x22, offset 0x880 + 0x890: 0x0c, 0x891: 0x0d, 0x892: 0x0e, 0x893: 0x0f, 0x894: 0x10, 0x895: 0x0a, 0x896: 0x11, 0x897: 0x07, + 0x898: 0x12, 0x899: 0x0a, 0x89a: 0x13, 0x89b: 0x14, 0x89c: 0x15, 0x89d: 0x16, 0x89e: 0x17, 0x89f: 0x18, + 0x8a0: 0x07, 0x8a1: 0x07, 0x8a2: 0x07, 0x8a3: 0x07, 0x8a4: 0x07, 0x8a5: 0x07, 0x8a6: 0x07, 0x8a7: 0x07, + 0x8a8: 0x07, 0x8a9: 0x07, 0x8aa: 0x19, 0x8ab: 0x1a, 0x8ac: 0x1b, 0x8ad: 0x07, 0x8ae: 0x1c, 0x8af: 0x1d, + 0x8b0: 0x07, 0x8b1: 0x1e, 0x8b2: 0x1f, 0x8b3: 0x0a, 0x8b4: 0x0a, 0x8b5: 0x0a, 0x8b6: 0x0a, 0x8b7: 0x0a, + 0x8b8: 0x0a, 0x8b9: 0x0a, 0x8ba: 0x0a, 0x8bb: 0x0a, 0x8bc: 0x0a, 0x8bd: 0x0a, 0x8be: 0x0a, 0x8bf: 0x0a, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0a, 0x8c1: 0x0a, 0x8c2: 0x0a, 0x8c3: 0x0a, 0x8c4: 0x0a, 0x8c5: 0x0a, 0x8c6: 0x0a, 0x8c7: 0x0a, + 0x8c8: 0x0a, 0x8c9: 0x0a, 0x8ca: 0x0a, 0x8cb: 0x0a, 0x8cc: 0x0a, 0x8cd: 0x0a, 0x8ce: 0x0a, 0x8cf: 0x0a, + 0x8d0: 0x0a, 0x8d1: 0x0a, 0x8d2: 0x0a, 0x8d3: 0x0a, 0x8d4: 0x0a, 0x8d5: 0x0a, 0x8d6: 0x0a, 0x8d7: 0x0a, + 0x8d8: 0x0a, 0x8d9: 0x0a, 0x8da: 0x0a, 0x8db: 0x0a, 0x8dc: 0x0a, 0x8dd: 0x0a, 0x8de: 0x0a, 0x8df: 0x0a, + 0x8e0: 0x0a, 0x8e1: 0x0a, 0x8e2: 0x0a, 0x8e3: 0x0a, 0x8e4: 0x0a, 0x8e5: 0x0a, 0x8e6: 0x0a, 0x8e7: 0x0a, + 0x8e8: 0x0a, 0x8e9: 0x0a, 0x8ea: 0x0a, 0x8eb: 0x0a, 0x8ec: 0x0a, 0x8ed: 0x0a, 0x8ee: 0x0a, 0x8ef: 0x0a, + 0x8f0: 0x0a, 0x8f1: 0x0a, 0x8f2: 0x0a, 0x8f3: 0x0a, 0x8f4: 0x0a, 0x8f5: 0x0a, 0x8f6: 0x0a, 0x8f7: 0x0a, + 0x8f8: 0x0a, 0x8f9: 0x0a, 0x8fa: 0x0a, 0x8fb: 0x0a, 0x8fc: 0x0a, 0x8fd: 0x0a, 0x8fe: 0x0a, 0x8ff: 0x0a, + // Block 0x24, offset 0x900 + 0x900: 0x1b0, 0x901: 0x1b1, 0x902: 0x100, 0x903: 0x100, 0x904: 0x1b2, 0x905: 0x1b2, 0x906: 0x1b2, 0x907: 0x1b3, + 0x908: 0x100, 0x909: 0x100, 0x90a: 0x100, 0x90b: 0x100, 0x90c: 0x100, 0x90d: 0x100, 0x90e: 0x100, 0x90f: 0x100, + 0x910: 0x100, 0x911: 0x100, 0x912: 0x100, 0x913: 0x100, 0x914: 0x100, 0x915: 0x100, 0x916: 0x100, 0x917: 0x100, + 0x918: 0x100, 0x919: 0x100, 0x91a: 0x100, 0x91b: 0x100, 0x91c: 0x100, 0x91d: 0x100, 0x91e: 0x100, 0x91f: 0x100, + 0x920: 0x100, 0x921: 0x100, 0x922: 0x100, 0x923: 0x100, 0x924: 0x100, 0x925: 0x100, 0x926: 0x100, 0x927: 0x100, + 0x928: 0x100, 0x929: 0x100, 0x92a: 0x100, 0x92b: 0x100, 0x92c: 0x100, 0x92d: 0x100, 0x92e: 0x100, 0x92f: 0x100, + 0x930: 0x100, 0x931: 0x100, 0x932: 0x100, 0x933: 0x100, 0x934: 0x100, 0x935: 0x100, 0x936: 0x100, 0x937: 0x100, + 0x938: 0x100, 0x939: 0x100, 0x93a: 0x100, 0x93b: 0x100, 0x93c: 0x100, 0x93d: 0x100, 0x93e: 0x100, 0x93f: 0x100, + // Block 0x25, offset 0x940 + 0x940: 0x0a, 0x941: 0x0a, 0x942: 0x0a, 0x943: 0x0a, 0x944: 0x0a, 0x945: 0x0a, 0x946: 0x0a, 0x947: 0x0a, + 0x948: 0x0a, 0x949: 0x0a, 0x94a: 0x0a, 0x94b: 0x0a, 0x94c: 0x0a, 0x94d: 0x0a, 0x94e: 0x0a, 0x94f: 0x0a, + 0x950: 0x0a, 0x951: 0x0a, 0x952: 0x0a, 0x953: 0x0a, 0x954: 0x0a, 0x955: 0x0a, 0x956: 0x0a, 0x957: 0x0a, + 0x958: 0x0a, 0x959: 0x0a, 0x95a: 0x0a, 0x95b: 0x0a, 0x95c: 0x0a, 0x95d: 0x0a, 0x95e: 0x0a, 0x95f: 0x0a, + 0x960: 0x22, 0x961: 0x0a, 0x962: 0x0a, 0x963: 0x0a, 0x964: 0x0a, 0x965: 0x0a, 0x966: 0x0a, 0x967: 0x0a, + 0x968: 0x0a, 0x969: 0x0a, 0x96a: 0x0a, 0x96b: 0x0a, 0x96c: 0x0a, 0x96d: 0x0a, 0x96e: 0x0a, 0x96f: 0x0a, + 0x970: 0x0a, 0x971: 0x0a, 0x972: 0x0a, 0x973: 0x0a, 0x974: 0x0a, 0x975: 0x0a, 0x976: 0x0a, 0x977: 0x0a, + 0x978: 0x0a, 0x979: 0x0a, 0x97a: 0x0a, 0x97b: 0x0a, 0x97c: 0x0a, 0x97d: 0x0a, 0x97e: 0x0a, 0x97f: 0x0a, + // Block 0x26, offset 0x980 + 0x980: 0x0a, 0x981: 0x0a, 0x982: 0x0a, 0x983: 0x0a, 0x984: 0x0a, 0x985: 0x0a, 0x986: 0x0a, 0x987: 0x0a, + 0x988: 0x0a, 0x989: 0x0a, 0x98a: 0x0a, 0x98b: 0x0a, 0x98c: 0x0a, 0x98d: 0x0a, 0x98e: 0x0a, 0x98f: 0x0a, +} + +// idnaSparseOffset: 303 entries, 606 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x7e, 0x87, 0x97, 0xa6, 0xb1, 0xbe, 0xcf, 0xd9, 0xe0, 0xed, 0xfe, 0x105, 0x110, 0x11f, 0x12d, 0x137, 0x139, 0x13e, 0x141, 0x144, 0x146, 0x152, 0x15d, 0x165, 0x16b, 0x171, 0x176, 0x17b, 0x17e, 0x182, 0x188, 0x18d, 0x198, 0x1a2, 0x1a8, 0x1b9, 0x1c4, 0x1c7, 0x1cf, 0x1d2, 0x1df, 0x1e7, 0x1eb, 0x1f2, 0x1fa, 0x20a, 0x216, 0x219, 0x223, 0x22f, 0x23b, 0x247, 0x24f, 0x254, 0x261, 0x272, 0x27d, 0x282, 0x28b, 0x293, 0x299, 0x29e, 0x2a1, 0x2a5, 0x2ab, 0x2af, 0x2b3, 0x2b7, 0x2bc, 0x2c4, 0x2cb, 0x2d6, 0x2e0, 0x2e4, 0x2e7, 0x2ed, 0x2f1, 0x2f3, 0x2f6, 0x2f8, 0x2fb, 0x305, 0x308, 0x317, 0x31b, 0x31f, 0x321, 0x32a, 0x32e, 0x333, 0x338, 0x33e, 0x34e, 0x354, 0x358, 0x367, 0x36c, 0x374, 0x37e, 0x389, 0x391, 0x3a2, 0x3ab, 0x3bb, 0x3c8, 0x3d4, 0x3d9, 0x3e6, 0x3ea, 0x3ef, 0x3f1, 0x3f3, 0x3f7, 0x3f9, 0x3fd, 0x406, 0x40c, 0x410, 0x420, 0x42a, 0x42f, 0x432, 0x438, 0x43f, 0x444, 0x448, 0x44e, 0x453, 0x45c, 0x461, 0x467, 0x46e, 0x475, 0x47c, 0x480, 0x483, 0x488, 0x494, 0x49a, 0x49f, 0x4a6, 0x4ae, 0x4b3, 0x4b7, 0x4c7, 0x4ce, 0x4d2, 0x4d6, 0x4dd, 0x4df, 0x4e2, 0x4e5, 0x4e9, 0x4f2, 0x4f6, 0x4fe, 0x501, 0x509, 0x514, 0x523, 0x52f, 0x535, 0x542, 0x54e, 0x556, 0x55f, 0x56a, 0x571, 0x580, 0x58d, 0x591, 0x59e, 0x5a7, 0x5ab, 0x5ba, 0x5c2, 0x5cd, 0x5d6, 0x5dc, 0x5e4, 0x5ed, 0x5f9, 0x5fc, 0x608, 0x60b, 0x614, 0x617, 0x61c, 0x625, 0x62a, 0x637, 0x642, 0x64b, 0x656, 0x659, 0x65c, 0x666, 0x66f, 0x67b, 0x688, 0x695, 0x6a3, 0x6aa, 0x6b5, 0x6bc, 0x6c0, 0x6c4, 0x6c7, 0x6cc, 0x6cf, 0x6d2, 0x6d6, 0x6d9, 0x6de, 0x6e5, 0x6e8, 0x6f0, 0x6f4, 0x6ff, 0x702, 0x705, 0x708, 0x70e, 0x714, 0x71d, 0x720, 0x723, 0x726, 0x72e, 0x733, 0x73c, 0x73f, 0x744, 0x74e, 0x752, 0x756, 0x759, 0x75c, 0x760, 0x76f, 0x77b, 0x77f, 0x784, 0x789, 0x78e, 0x792, 0x797, 0x7a0, 0x7a5, 0x7a9, 0x7af, 0x7b5, 0x7ba, 0x7c0, 0x7c6, 0x7d0, 0x7d6, 0x7df, 0x7e2, 0x7e5, 0x7e9, 0x7ed, 0x7f1, 0x7f7, 0x7fd, 0x802, 0x805, 0x815, 0x81c, 0x820, 0x827, 0x82b, 0x831, 0x838, 0x83f, 0x845, 0x84e, 0x852, 0x860, 0x863, 0x866, 0x86a, 0x86e, 0x871, 0x875, 0x878, 0x87d, 0x87f, 0x881} + +// idnaSparseValues: 2180 entries, 8720 bytes +var idnaSparseValues = [2180]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x00a9, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x00b1, lo: 0xb2, hi: 0xb2}, + {value: 0x00b9, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x00c1, lo: 0xb7, hi: 0xb7}, + {value: 0x00c9, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0131, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x05}, + {value: 0x0a08, lo: 0x80, hi: 0x88}, + {value: 0x0808, lo: 0x89, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xe, offset 0x7e + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0xf, offset 0x87 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x10, offset 0x97 + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x11, offset 0xa6 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbe + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x14, offset 0xcf + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x01f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x15, offset 0xd9 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x16, offset 0xe0 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0201, lo: 0x9c, hi: 0x9c}, + {value: 0x0209, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x17, offset 0xed + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x18, offset 0xfe + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x19, offset 0x105 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1b, offset 0x11f + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1c, offset 0x12d + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1d, offset 0x137 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1e, offset 0x139 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x1f, offset 0x13e + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x20, offset 0x141 + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x21, offset 0x144 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x22, offset 0x146 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x25, offset 0x165 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x16b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x27, offset 0x171 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x28, offset 0x176 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x29, offset 0x17b + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2a, offset 0x17e + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2b, offset 0x182 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2c, offset 0x188 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2d, offset 0x18d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x3808, lo: 0x95, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3808, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x2f, offset 0x1a2 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x30, offset 0x1a8 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x31, offset 0x1b9 + {value: 0x0000, lo: 0x0a}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x33c0, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x32, offset 0x1c4 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x33, offset 0x1c7 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x34, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x35, offset 0x1d2 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x36, offset 0x1df + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x37, offset 0x1e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x38, offset 0x1eb + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x39, offset 0x1f2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1fa + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3b, offset 0x20a + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x02}, + {value: 0x3308, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0xbf}, + // Block 0x3d, offset 0x219 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3f, offset 0x22f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x40, offset 0x23b + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x41, offset 0x247 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x42, offset 0x24f + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x43, offset 0x254 + {value: 0x0000, lo: 0x0c}, + {value: 0x02a9, lo: 0x80, hi: 0x80}, + {value: 0x02b1, lo: 0x81, hi: 0x81}, + {value: 0x02b9, lo: 0x82, hi: 0x82}, + {value: 0x02c1, lo: 0x83, hi: 0x83}, + {value: 0x02c9, lo: 0x84, hi: 0x85}, + {value: 0x02d1, lo: 0x86, hi: 0x86}, + {value: 0x02d9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x059d, lo: 0x90, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x059d, lo: 0xbd, hi: 0xbf}, + // Block 0x44, offset 0x261 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x45, offset 0x272 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x46, offset 0x27d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x47, offset 0x282 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x0851, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x48, offset 0x28b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0859, lo: 0xac, hi: 0xac}, + {value: 0x0861, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x0869, lo: 0xaf, hi: 0xaf}, + {value: 0x0871, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x293 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4a, offset 0x299 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09dd, lo: 0xa9, hi: 0xa9}, + {value: 0x09fd, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4b, offset 0x29e + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4c, offset 0x2a1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0929, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4d, offset 0x2a5 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e7e, lo: 0xb4, hi: 0xb4}, + {value: 0x0932, lo: 0xb5, hi: 0xb5}, + {value: 0x0e9e, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x0939, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x50, offset 0x2b3 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0xbf}, + // Block 0x51, offset 0x2b7 + {value: 0x0000, lo: 0x04}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ebd, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x52, offset 0x2bc + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x53, offset 0x2c4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x54, offset 0x2cb + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x55, offset 0x2d6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x56, offset 0x2e0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x57, offset 0x2e4 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x58, offset 0x2e7 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0ef5, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2ed + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0f15, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5a, offset 0x2f1 + {value: 0x0020, lo: 0x01}, + {value: 0x0f35, lo: 0x80, hi: 0xbf}, + // Block 0x5b, offset 0x2f3 + {value: 0x0020, lo: 0x02}, + {value: 0x1735, lo: 0x80, hi: 0x8f}, + {value: 0x1915, lo: 0x90, hi: 0xbf}, + // Block 0x5c, offset 0x2f6 + {value: 0x0020, lo: 0x01}, + {value: 0x1f15, lo: 0x80, hi: 0xbf}, + // Block 0x5d, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x5e, offset 0x2fb + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x096a, lo: 0x9b, hi: 0x9b}, + {value: 0x0972, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x0979, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x5f, offset 0x305 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x0981, lo: 0xbf, hi: 0xbf}, + // Block 0x60, offset 0x308 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a35, lo: 0xb1, hi: 0xb1}, + {value: 0x2a55, lo: 0xb2, hi: 0xb2}, + {value: 0x2a75, lo: 0xb3, hi: 0xb3}, + {value: 0x2a95, lo: 0xb4, hi: 0xb4}, + {value: 0x2a75, lo: 0xb5, hi: 0xb5}, + {value: 0x2ab5, lo: 0xb6, hi: 0xb6}, + {value: 0x2ad5, lo: 0xb7, hi: 0xb7}, + {value: 0x2af5, lo: 0xb8, hi: 0xb9}, + {value: 0x2b15, lo: 0xba, hi: 0xbb}, + {value: 0x2b35, lo: 0xbc, hi: 0xbd}, + {value: 0x2b15, lo: 0xbe, hi: 0xbf}, + // Block 0x61, offset 0x317 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x62, offset 0x31b + {value: 0x0008, lo: 0x03}, + {value: 0x098a, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0a82, lo: 0xa0, hi: 0xbf}, + // Block 0x63, offset 0x31f + {value: 0x0008, lo: 0x01}, + {value: 0x0d19, lo: 0x80, hi: 0xbf}, + // Block 0x64, offset 0x321 + {value: 0x0008, lo: 0x08}, + {value: 0x0f19, lo: 0x80, hi: 0xb0}, + {value: 0x4045, lo: 0xb1, hi: 0xb1}, + {value: 0x10a1, lo: 0xb2, hi: 0xb3}, + {value: 0x4065, lo: 0xb4, hi: 0xb4}, + {value: 0x10b1, lo: 0xb5, hi: 0xb7}, + {value: 0x4085, lo: 0xb8, hi: 0xb8}, + {value: 0x4085, lo: 0xb9, hi: 0xb9}, + {value: 0x10c9, lo: 0xba, hi: 0xbf}, + // Block 0x65, offset 0x32a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x66, offset 0x32e + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x67, offset 0x333 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x68, offset 0x338 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x69, offset 0x33e + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x3b08, lo: 0xac, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6a, offset 0x34e + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x354 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6c, offset 0x358 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x6d, offset 0x367 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x6e, offset 0x36c + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x6f, offset 0x374 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x37e + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x71, offset 0x389 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x72, offset 0x391 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x73, offset 0x3a2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x74, offset 0x3ab + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3bb + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x76, offset 0x3c8 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x449d, lo: 0x9c, hi: 0x9c}, + {value: 0x44b5, lo: 0x9d, hi: 0x9d}, + {value: 0x0941, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa8}, + {value: 0x13f9, lo: 0xa9, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x44cd, lo: 0xb0, hi: 0xbf}, + // Block 0x77, offset 0x3d4 + {value: 0x0000, lo: 0x04}, + {value: 0x44ed, lo: 0x80, hi: 0x8f}, + {value: 0x450d, lo: 0x90, hi: 0x9f}, + {value: 0x452d, lo: 0xa0, hi: 0xaf}, + {value: 0x450d, lo: 0xb0, hi: 0xbf}, + // Block 0x78, offset 0x3d9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x79, offset 0x3e6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3ea + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7b, offset 0x3ef + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x7c, offset 0x3f1 + {value: 0x0020, lo: 0x01}, + {value: 0x454d, lo: 0x80, hi: 0xbf}, + // Block 0x7d, offset 0x3f3 + {value: 0x0020, lo: 0x03}, + {value: 0x4d4d, lo: 0x80, hi: 0x94}, + {value: 0x4b0d, lo: 0x95, hi: 0x95}, + {value: 0x4fed, lo: 0x96, hi: 0xbf}, + // Block 0x7e, offset 0x3f7 + {value: 0x0020, lo: 0x01}, + {value: 0x552d, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x3f9 + {value: 0x0020, lo: 0x03}, + {value: 0x5d2d, lo: 0x80, hi: 0x84}, + {value: 0x568d, lo: 0x85, hi: 0x85}, + {value: 0x5dcd, lo: 0x86, hi: 0xbf}, + // Block 0x80, offset 0x3fd + {value: 0x0020, lo: 0x08}, + {value: 0x6b8d, lo: 0x80, hi: 0x8f}, + {value: 0x6d4d, lo: 0x90, hi: 0x90}, + {value: 0x6d8d, lo: 0x91, hi: 0xab}, + {value: 0x1401, lo: 0xac, hi: 0xac}, + {value: 0x70ed, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x710d, lo: 0xb0, hi: 0xbf}, + // Block 0x81, offset 0x406 + {value: 0x0020, lo: 0x05}, + {value: 0x730d, lo: 0x80, hi: 0xad}, + {value: 0x656d, lo: 0xae, hi: 0xae}, + {value: 0x78cd, lo: 0xaf, hi: 0xb5}, + {value: 0x6f8d, lo: 0xb6, hi: 0xb6}, + {value: 0x79ad, lo: 0xb7, hi: 0xbf}, + // Block 0x82, offset 0x40c + {value: 0x0008, lo: 0x03}, + {value: 0x1751, lo: 0x80, hi: 0x82}, + {value: 0x1741, lo: 0x83, hi: 0x83}, + {value: 0x1769, lo: 0x84, hi: 0xbf}, + // Block 0x83, offset 0x410 + {value: 0x0008, lo: 0x0f}, + {value: 0x1d81, lo: 0x80, hi: 0x83}, + {value: 0x1d99, lo: 0x84, hi: 0x85}, + {value: 0x1da1, lo: 0x86, hi: 0x87}, + {value: 0x1da9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x1de9, lo: 0x92, hi: 0x97}, + {value: 0x1e11, lo: 0x98, hi: 0x9c}, + {value: 0x1e31, lo: 0x9d, hi: 0xb3}, + {value: 0x1d71, lo: 0xb4, hi: 0xb4}, + {value: 0x1d81, lo: 0xb5, hi: 0xb5}, + {value: 0x1ee9, lo: 0xb6, hi: 0xbb}, + {value: 0x1f09, lo: 0xbc, hi: 0xbc}, + {value: 0x1ef9, lo: 0xbd, hi: 0xbd}, + {value: 0x1f19, lo: 0xbe, hi: 0xbf}, + // Block 0x84, offset 0x420 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x85, offset 0x42a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x86, offset 0x42f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x87, offset 0x432 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x88, offset 0x438 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x89, offset 0x43f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8a, offset 0x444 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8b, offset 0x448 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8c, offset 0x44e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8d, offset 0x453 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x90, offset 0x467 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8b0d, lo: 0x98, hi: 0x9f}, + {value: 0x8b25, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x91, offset 0x46e + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8b25, lo: 0xb0, hi: 0xb7}, + {value: 0x8b0d, lo: 0xb8, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x94, offset 0x480 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x95, offset 0x483 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x96, offset 0x488 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x97, offset 0x494 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x49a + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x99, offset 0x49f + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9a, offset 0x4a6 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x4ae + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9c, offset 0x4b3 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0x9d, offset 0x4b7 + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c7 + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0x9f, offset 0x4ce + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa0, offset 0x4d2 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa1, offset 0x4d6 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa2, offset 0x4dd + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa3, offset 0x4df + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa4, offset 0x4e2 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa5, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa6, offset 0x4e9 + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xa7, offset 0x4f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xa8, offset 0x4f6 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xac}, + {value: 0x0818, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xa9, offset 0x4fe + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbf}, + // Block 0xaa, offset 0x501 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xab, offset 0x509 + {value: 0x0000, lo: 0x0a}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb3}, + {value: 0x0c08, lo: 0xb4, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xbf}, + // Block 0xac, offset 0x514 + {value: 0x0000, lo: 0x0e}, + {value: 0x0a08, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb3}, + {value: 0x0c08, lo: 0xb4, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb7}, + {value: 0x0a08, lo: 0xb8, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xba}, + {value: 0x0a08, lo: 0xbb, hi: 0xbc}, + {value: 0x0c08, lo: 0xbd, hi: 0xbd}, + {value: 0x0a08, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x523 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0x81}, + {value: 0x0c08, lo: 0x82, hi: 0x83}, + {value: 0x0a08, lo: 0x84, hi: 0x84}, + {value: 0x0818, lo: 0x85, hi: 0x88}, + {value: 0x0c18, lo: 0x89, hi: 0x89}, + {value: 0x0a18, lo: 0x8a, hi: 0x8a}, + {value: 0x0918, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xae, offset 0x52f + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xaf, offset 0x535 + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x3b08, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xb0, offset 0x542 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb1, offset 0x54e + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb2, offset 0x556 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb3, offset 0x55f + {value: 0x0000, lo: 0x0a}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb4, offset 0x56a + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb5, offset 0x571 + {value: 0x0000, lo: 0x0e}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x3008, lo: 0x8e, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb6, offset 0x580 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xb7, offset 0x58d + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0xbf}, + // Block 0xb8, offset 0x591 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb9, offset 0x59e + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xba, offset 0x5a7 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xbb, offset 0x5ab + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xbc, offset 0x5ba + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbd, offset 0x5c2 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbe, offset 0x5cd + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbf, offset 0x5d6 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xc0, offset 0x5dc + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc1, offset 0x5e4 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc2, offset 0x5ed + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xc3, offset 0x5f9 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc4, offset 0x5fc + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc5, offset 0x608 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xc6, offset 0x60b + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc7, offset 0x614 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc8, offset 0x617 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc9, offset 0x61c + {value: 0x0000, lo: 0x08}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xca, offset 0x625 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x99}, + {value: 0x3308, lo: 0x9a, hi: 0x9b}, + {value: 0x3008, lo: 0x9c, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xbf}, + // Block 0xcc, offset 0x637 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xcd, offset 0x642 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xce, offset 0x64b + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xcf, offset 0x656 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd0, offset 0x659 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xd1, offset 0x65c + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xd2, offset 0x666 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xd3, offset 0x66f + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xd4, offset 0x67b + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x688 + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xd6, offset 0x695 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd7, offset 0x6a3 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd8, offset 0x6aa + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0xd9, offset 0x6b5 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3808, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xda, offset 0x6bc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0xdb, offset 0x6c0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xdc, offset 0x6c4 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xdd, offset 0x6c7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xde, offset 0x6cc + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xdf, offset 0x6cf + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbf}, + // Block 0xe0, offset 0x6d2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xe1, offset 0x6d6 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0340, lo: 0xb0, hi: 0xbf}, + // Block 0xe2, offset 0x6d9 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0xe3, offset 0x6de + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x6e5 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xe5, offset 0x6e8 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe6, offset 0x6f0 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xe7, offset 0x6f4 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xe8, offset 0x6ff + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xe9, offset 0x702 + {value: 0x0000, lo: 0x02}, + {value: 0xe105, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xea, offset 0x705 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xeb, offset 0x708 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbf}, + // Block 0xec, offset 0x70e + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xed, offset 0x714 + {value: 0x0000, lo: 0x08}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xee, offset 0x71d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xef, offset 0x720 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0xf0, offset 0x723 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xf1, offset 0x726 + {value: 0x0000, lo: 0x07}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xf2, offset 0x72e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xf3, offset 0x733 + {value: 0x0000, lo: 0x08}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x94}, + {value: 0x0008, lo: 0x95, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xa3}, + {value: 0x0008, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xf4, offset 0x73c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xf5, offset 0x73f + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xf6, offset 0x744 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xf7, offset 0x74e + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbf}, + // Block 0xf8, offset 0x752 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf9, offset 0x756 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xfa, offset 0x759 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xfb, offset 0x75c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xfc, offset 0x760 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0x2379, lo: 0x9e, hi: 0x9e}, + {value: 0x2381, lo: 0x9f, hi: 0x9f}, + {value: 0x2389, lo: 0xa0, hi: 0xa0}, + {value: 0x2391, lo: 0xa1, hi: 0xa1}, + {value: 0x2399, lo: 0xa2, hi: 0xa2}, + {value: 0x23a1, lo: 0xa3, hi: 0xa3}, + {value: 0x23a9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xfd, offset 0x76f + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0x23b1, lo: 0xbb, hi: 0xbb}, + {value: 0x23b9, lo: 0xbc, hi: 0xbc}, + {value: 0x23c1, lo: 0xbd, hi: 0xbd}, + {value: 0x23c9, lo: 0xbe, hi: 0xbe}, + {value: 0x23d1, lo: 0xbf, hi: 0xbf}, + // Block 0xfe, offset 0x77b + {value: 0x0000, lo: 0x03}, + {value: 0x23d9, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xff, offset 0x77f + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0x100, offset 0x784 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x101, offset 0x789 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x102, offset 0x78e + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x103, offset 0x792 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x104, offset 0x797 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x105, offset 0x7a0 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x106, offset 0x7a5 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0x107, offset 0x7a9 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x108, offset 0x7af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0x109, offset 0x7b5 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x3308, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0x10a, offset 0x7ba + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x10b, offset 0x7c0 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x10c, offset 0x7c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x10d, offset 0x7d0 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x10e, offset 0x7d6 + {value: 0x0000, lo: 0x08}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0b08, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x10f, offset 0x7df + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0x110, offset 0x7e2 + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x111, offset 0x7e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0818, lo: 0x81, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x112, offset 0x7e9 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0x113, offset 0x7ed + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x114, offset 0x7f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x115, offset 0x7f7 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x116, offset 0x7fd + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0x2709, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x117, offset 0x802 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0x118, offset 0x805 + {value: 0x0000, lo: 0x0f}, + {value: 0x2889, lo: 0x80, hi: 0x80}, + {value: 0x2891, lo: 0x81, hi: 0x81}, + {value: 0x2899, lo: 0x82, hi: 0x82}, + {value: 0x28a1, lo: 0x83, hi: 0x83}, + {value: 0x28a9, lo: 0x84, hi: 0x84}, + {value: 0x28b1, lo: 0x85, hi: 0x85}, + {value: 0x28b9, lo: 0x86, hi: 0x86}, + {value: 0x28c1, lo: 0x87, hi: 0x87}, + {value: 0x28c9, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x28d1, lo: 0x90, hi: 0x90}, + {value: 0x28d9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0x119, offset 0x815 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x11a, offset 0x81c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x11b, offset 0x820 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x11c, offset 0x827 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x11d, offset 0x82b + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x11e, offset 0x831 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0x11f, offset 0x838 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x120, offset 0x83f + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x121, offset 0x845 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x122, offset 0x84e + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0xbf}, + // Block 0x123, offset 0x852 + {value: 0x0000, lo: 0x0d}, + {value: 0x0018, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0xaf}, + {value: 0x06e1, lo: 0xb0, hi: 0xb0}, + {value: 0x0049, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb2, hi: 0xb2}, + {value: 0x0031, lo: 0xb3, hi: 0xb3}, + {value: 0x06e9, lo: 0xb4, hi: 0xb4}, + {value: 0x06f1, lo: 0xb5, hi: 0xb5}, + {value: 0x06f9, lo: 0xb6, hi: 0xb6}, + {value: 0x0701, lo: 0xb7, hi: 0xb7}, + {value: 0x0709, lo: 0xb8, hi: 0xb8}, + {value: 0x0711, lo: 0xb9, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x124, offset 0x860 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x125, offset 0x863 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x126, offset 0x866 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x127, offset 0x86a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x128, offset 0x86e + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x129, offset 0x871 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbf}, + // Block 0x12a, offset 0x875 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x12b, offset 0x878 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x12c, offset 0x87d + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x12d, offset 0x87f + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x12e, offset 0x881 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 46723 bytes (45KiB); checksum: 4CF3143A diff --git a/terraform/providers/google/vendor/golang.org/x/net/idna/trie.go b/terraform/providers/google/vendor/golang.org/x/net/idna/trie.go index c4ef847e7a..4212741728 100644 --- a/terraform/providers/google/vendor/golang.org/x/net/idna/trie.go +++ b/terraform/providers/google/vendor/golang.org/x/net/idna/trie.go @@ -6,27 +6,6 @@ package idna -// appendMapping appends the mapping for the respective rune. isMapped must be -// true. A mapping is a categorization of a rune as defined in UTS #46. -func (c info) appendMapping(b []byte, s string) []byte { - index := int(c >> indexShift) - if c&xorBit == 0 { - s := mappings[index:] - return append(b, s[1:s[0]+1]...) - } - b = append(b, s...) - if c&inlineXOR == inlineXOR { - // TODO: support and handle two-byte inline masks - b[len(b)-1] ^= byte(index) - } else { - for p := len(b) - int(xorData[index]); p < len(b); p++ { - index++ - b[p] ^= xorData[index] - } - } - return b -} - // Sparse block handling code. type valueRange struct { diff --git a/terraform/providers/google/vendor/golang.org/x/net/idna/trie12.0.0.go b/terraform/providers/google/vendor/golang.org/x/net/idna/trie12.0.0.go new file mode 100644 index 0000000000..bb63f904b3 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -0,0 +1,31 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.16 +// +build !go1.16 + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} diff --git a/terraform/providers/google/vendor/golang.org/x/net/idna/trie13.0.0.go b/terraform/providers/google/vendor/golang.org/x/net/idna/trie13.0.0.go new file mode 100644 index 0000000000..7d68a8dc13 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -0,0 +1,31 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.16 +// +build go1.16 + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + p := index + return append(b, mappings[mappingIndex[p]:mappingIndex[p+1]]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/README.md b/terraform/providers/google/vendor/golang.org/x/oauth2/README.md index 1473e1296d..781770c204 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/README.md +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/README.md @@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) * [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new packages +## Policy for new endpoints We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a @@ -29,8 +29,12 @@ package. ## Report Issues / Send Patches -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/google/default.go b/terraform/providers/google/vendor/golang.org/x/oauth2/google/default.go index db6b19e93d..2cf71f0f93 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/google/default.go +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/google/default.go @@ -8,17 +8,19 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" "runtime" + "time" "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/authhandler" ) +const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials @@ -66,6 +68,14 @@ type CredentialsParams struct { // The OAuth2 TokenURL default override. This value overrides the default TokenURL, // unless explicitly specified by the credentials config file. Optional. TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -131,10 +141,8 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, params); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + if b, err := os.ReadFile(filename); err == nil { + return CredentialsFromJSONWithParams(ctx, b, params) } // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) @@ -153,13 +161,12 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar id, _ := metadata.ProjectID() return &Credentials{ ProjectID: id, - TokenSource: ComputeTokenSource("", params.Scopes...), + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), }, nil } // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) } // FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. @@ -221,7 +228,7 @@ func wellKnownFile() string { } func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { - b, err := ioutil.ReadFile(filename) + b, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/google/doc.go b/terraform/providers/google/vendor/golang.org/x/oauth2/google/doc.go index 8a3349fc2c..ca717634a3 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/google/doc.go +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/google/doc.go @@ -26,7 +26,7 @@ // // Using workload identity federation, your application can access Google Cloud // resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC). +// provider that supports OpenID Connect (OIDC) or SAML 2.0. // Traditionally, applications running outside Google Cloud have used service // account keys to access Google Cloud resources. Using identity federation, // you can allow your workload to impersonate a service account. @@ -36,26 +36,70 @@ // Follow the detailed instructions on how to configure Workload Identity Federation // in various platforms: // -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws -// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure -// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml // // For OIDC and SAML providers, the library can retrieve tokens in three ways: // from a local file location (file-sourced credentials), from a server // (URL-sourced credentials), or from a local executable (executable-sourced // credentials). // For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC token prior to expiration. +// refreshing the file location with a new OIDC/SAML token prior to expiration. // For tokens with one hour lifetimes, the token needs to be updated in the file // every hour. The token can be stored directly as plain text or in JSON format. // For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC token. The response can be in plain text or JSON. +// return the OIDC/SAML token. The response can be in plain text or JSON. // Additional required request headers can also be specified. // For executable-sourced credentials, an application needs to be available to -// output the OIDC token and other information in a JSON format. +// output the OIDC/SAML token and other information in a JSON format. // For more information on how these work (and how to implement // executable-sourced credentials), please check out: -// https://cloud.google.com/iam/docs/using-workload-identity-federation#oidc +// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration +// +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// +// # Workforce Identity Federation +// +// Workforce identity federation lets you use an external identity provider (IdP) to +// authenticate and authorize a workforce—a group of users, such as employees, partners, +// and contractors—using IAM, so that the users can access Google Cloud services. +// Workforce identity federation extends Google Cloud's identity capabilities to support +// syncless, attribute-based single sign on. +// +// With workforce identity federation, your workforce can access Google Cloud resources +// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +// Services (AD FS), Okta, and others. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml +// +// For workforce identity federation, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC/SAML token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC/SAML token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in // // Note that this library does not perform any validation on the token_url, token_info_url, // or service_account_impersonation_url fields of the credential configuration. @@ -86,5 +130,4 @@ // same as the one obtained from the oauth2.Config returned from ConfigFromJSON or // JWTConfigFromJSON, but the Credentials may contain additional information // that is useful is some circumstances. -// package google // import "golang.org/x/oauth2/google" diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/google/google.go b/terraform/providers/google/vendor/golang.org/x/oauth2/google/google.go index a1b629a2eb..cc1223889e 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/google/google.go +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/google/google.go @@ -231,7 +231,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) + return computeTokenSource(account, 0, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) } type computeSource struct { diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/internal/oauth2.go b/terraform/providers/google/vendor/golang.org/x/oauth2/internal/oauth2.go index c0ab196cf4..14989beaf4 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -14,7 +14,7 @@ import ( // ParseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key +// PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (*rsa.PrivateKey, error) { diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/internal/token.go b/terraform/providers/google/vendor/golang.org/x/oauth2/internal/token.go index b4723fcace..58901bda53 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/internal/token.go +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/internal/token.go @@ -55,12 +55,18 @@ type Token struct { } // tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. +// providers returning a token or error in JSON form. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 type tokenJSON struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` } func (e *tokenJSON) expiry() (t time.Time) { @@ -236,21 +242,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + retrieveError := &RetrieveError{ + Response: r, + Body: body, + // attempt to populate error detail below } var token *Token content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string vals, err := url.ParseQuery(string(body)) if err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") token = &Token{ AccessToken: vals.Get("access_token"), TokenType: vals.Get("token_type"), @@ -265,8 +279,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { default: var tj tokenJSON if err = json.Unmarshal(body, &tj); err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI token = &Token{ AccessToken: tj.AccessToken, TokenType: tj.TokenType, @@ -276,17 +296,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || retrieveError.ErrorCode != "" { + return nil, retrieveError + } if token.AccessToken == "" { return nil, errors.New("oauth2: server response missing access_token") } return token, nil } +// mirrors oauth2.RetrieveError type RetrieveError struct { - Response *http.Response - Body []byte + Response *http.Response + Body []byte + ErrorCode string + ErrorDescription string + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/oauth2.go b/terraform/providers/google/vendor/golang.org/x/oauth2/oauth2.go index 291df5c833..9085fabe34 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/oauth2.go +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/oauth2.go @@ -16,6 +16,7 @@ import ( "net/url" "strings" "sync" + "time" "golang.org/x/oauth2/internal" ) @@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // // State is a token to protect the user from CSRF attacks. You must // always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. +// state query parameter on your redirect callback. // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well @@ -290,6 +291,8 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token + + expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } + t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } + +// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/terraform/providers/google/vendor/golang.org/x/oauth2/token.go b/terraform/providers/google/vendor/golang.org/x/oauth2/token.go index 822720341a..5ffce9764b 100644 --- a/terraform/providers/google/vendor/golang.org/x/oauth2/token.go +++ b/terraform/providers/google/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// expiryDelta determines how earlier a token should be considered +// defaultExpiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second +const defaultExpiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -52,6 +52,11 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -127,6 +132,11 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } @@ -165,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) } // RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code. +// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 type RetrieveError struct { Response *http.Response // Body is the body that was consumed by reading Response.Body. // It may be truncated. Body []byte + // ErrorCode is RFC 6749's 'error' parameter. + ErrorCode string + // ErrorDescription is RFC 6749's 'error_description' parameter. + ErrorDescription string + // ErrorURI is RFC 6749's 'error_uri' parameter. + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/cpu/endian_little.go b/terraform/providers/google/vendor/golang.org/x/sys/cpu/endian_little.go index fe545966b6..55db853efb 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/cpu/endian_little.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/cpu/endian_little.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh wasm package cpu diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl.go deleted file mode 100644 index 7ce8dd406f..0000000000 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris - -package unix - -import ( - "unsafe" -) - -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -// IoctlSetPointerInt performs an ioctl operation which sets an -// integer value on fd, using the specified request number. The ioctl -// argument is called with a pointer to the integer value, rather than -// passing the integer value directly. -func IoctlSetPointerInt(fd int, req uint, value int) error { - v := int32(value) - return ioctlPtr(fd, req, unsafe.Pointer(&v)) -} - -// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. -// -// To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { - // TODO: if we get the chance, remove the req parameter and - // hardcode TIOCSWINSZ. - return ioctlPtr(fd, req, unsafe.Pointer(value)) -} - -// IoctlSetTermios performs an ioctl on fd with a *Termios. -// -// The req value will usually be TCSETA or TIOCSETA. -func IoctlSetTermios(fd int, req uint, value *Termios) error { - // TODO: if we get the chance, remove the req parameter. - return ioctlPtr(fd, req, unsafe.Pointer(value)) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -// -// A few ioctl requests use the return value as an output parameter; -// for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctlPtr(fd, req, unsafe.Pointer(&value)) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctlPtr(fd, req, unsafe.Pointer(&value)) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctlPtr(fd, req, unsafe.Pointer(&value)) - return &value, err -} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_signed.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_signed.go new file mode 100644 index 0000000000..7def9580e6 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -0,0 +1,70 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris +// +build aix solaris + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req int, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req int, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req int, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req int, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_unsigned.go new file mode 100644 index 0000000000..649913d1ea --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -0,0 +1,70 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd hurd linux netbsd openbsd + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req uint, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req uint, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req uint, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_zos.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_zos.go index 6532f09af2..cdc21bf76d 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -17,14 +17,14 @@ import ( // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { +func IoctlSetInt(fd int, req int, value int) error { return ioctl(fd, req, uintptr(value)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func IoctlSetWinsize(fd int, req int, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. return ioctlPtr(fd, req, unsafe.Pointer(value)) @@ -33,7 +33,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // IoctlSetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCSETS, TCSETSW, or TCSETSF -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func IoctlSetTermios(fd int, req int, value *Termios) error { if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { return ENOSYS } @@ -47,13 +47,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // // A few ioctl requests use the return value as an output parameter; // for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err @@ -62,7 +62,7 @@ func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { // IoctlGetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCGETS -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios if req != TCGETS { return &value, ENOSYS diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/mkall.sh b/terraform/providers/google/vendor/golang.org/x/sys/unix/mkall.sh index 8e3947c368..e6f31d374d 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/mkall.sh +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit fi diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/mkerrors.sh b/terraform/providers/google/vendor/golang.org/x/sys/unix/mkerrors.sh index 7456d9ddde..0c4d14929a 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -66,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -203,6 +204,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -517,10 +519,11 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || + $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || @@ -738,7 +741,8 @@ main(void) e = errors[i].num; if(i > 0 && errors[i-1].num == e) continue; - strcpy(buf, strerror(e)); + strncpy(buf, strerror(e), sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; @@ -757,7 +761,8 @@ main(void) e = signals[i].num; if(i > 0 && signals[i-1].num == e) continue; - strcpy(buf, strsignal(e)); + strncpy(buf, strsignal(e), sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/mremap.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/mremap.go new file mode 100644 index 0000000000..86213c05d6 --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/mremap.go @@ -0,0 +1,40 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package unix + +import "unsafe" + +type mremapMmapper struct { + mmapper + mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) +} + +func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&MREMAP_FIXED != 0 { + return nil, EINVAL + } + + pOld := &oldData[cap(oldData)-1] + m.Lock() + defer m.Unlock() + bOld := m.active[pOld] + if bOld == nil || &bOld[0] != &oldData[0] { + return nil, EINVAL + } + newAddr, errno := m.mremap(uintptr(unsafe.Pointer(&bOld[0])), uintptr(len(bOld)), uintptr(newLength), flags, 0) + if errno != nil { + return nil, errno + } + bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength) + pNew := &bNew[cap(bNew)-1] + if flags&MREMAP_DONTUNMAP == 0 { + delete(m.active, pOld) + } + m.active[pNew] = bNew + return bNew, nil +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix.go index d9f5544ccf..c406ae00f4 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -408,8 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } -//sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl +//sys ioctl(fd int, req int, arg uintptr) (err error) +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index e92a0be163..f2871fa953 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 16eed17098..75718ec0f1 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_darwin.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_darwin.go index 7064d6ebab..206921504c 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -613,6 +613,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -622,7 +623,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -676,7 +676,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Kqueue_from_portset_np // Kqueue_portset // Getattrlist -// Setattrlist // Getdirentriesattr // Searchfs // Delete diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 221efc26bc..d4ce988e72 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -326,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 5bdde03e4a..afb10106f6 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -433,7 +433,6 @@ func Dup3(oldfd, newfd, flags int) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux.go index 9735331530..39de5f1430 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1699,12 +1699,23 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { return ptracePoke(PTRACE_POKEUSR, PTRACE_PEEKUSR, pid, addr, data) } +// elfNT_PRSTATUS is a copy of the debug/elf.NT_PRSTATUS constant so +// x/sys/unix doesn't need to depend on debug/elf and thus +// compress/zlib, debug/dwarf, and other packages. +const elfNT_PRSTATUS = 1 + func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) + var iov Iovec + iov.Base = (*byte)(unsafe.Pointer(regsout)) + iov.SetLen(int(unsafe.Sizeof(*regsout))) + return ptracePtr(PTRACE_GETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) + var iov Iovec + iov.Base = (*byte)(unsafe.Pointer(regs)) + iov.SetLen(int(unsafe.Sizeof(*regs))) + return ptracePtr(PTRACE_SETREGSET, pid, uintptr(elfNT_PRSTATUS), unsafe.Pointer(&iov)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -1873,7 +1884,6 @@ func Getpgrp() (pid int) { //sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) @@ -1887,6 +1897,15 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +//go:linkname syscall_prlimit syscall.prlimit +func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error + +func Prlimit(pid, resource int, newlimit, old *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old)) +} + // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. @@ -2105,11 +2124,15 @@ func writevRacedetect(iovecs []Iovec, n int) { // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) +//sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, +var mapper = &mremapMmapper{ + mmapper: mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, + }, + mremap: mremap, } func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { @@ -2120,6 +2143,10 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + return mapper.Mremap(oldData, newLength, flags) +} + //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) @@ -2412,6 +2439,21 @@ func PthreadSigmask(how int, set, oldset *Sigset_t) error { return rtSigprocmask(how, set, oldset, _C__NSIG/8) } +//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) +//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) + +func Getresuid() (ruid, euid, suid int) { + var r, e, s _C_int + getresuid(&r, &e, &s) + return int(r), int(e), int(s) +} + +func Getresgid() (rgid, egid, sgid int) { + var r, e, s _C_int + getresgid(&r, &e, &s) + return int(r), int(e), int(s) +} + /* * Unimplemented */ @@ -2453,7 +2495,6 @@ func PthreadSigmask(how int, set, oldset *Sigset_t) error { // MqTimedreceive // MqTimedsend // MqUnlink -// Mremap // Msgctl // Msgget // Msgrcv diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_386.go index ff5b5899d6..c7d9945ea1 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -97,33 +97,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 9b27035329..5b21fcfd75 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -46,7 +46,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 856ad1d635..da2986415a 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -171,33 +171,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) } func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 6422704bc5..a81f5742b8 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -39,7 +39,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -143,15 +142,6 @@ func Getrlimit(resource int, rlim *Rlimit) error { return getrlimit(resource, rlim) } -// Setrlimit prefers the prlimit64 system call. See issue 38604. -func Setrlimit(resource int, rlim *Rlimit) error { - err := Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - return setrlimit(resource, rlim) -} - func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 59dab510e9..69d2d7c3db 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -126,11 +126,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - return -} - func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { if tv == nil { return utimensat(dirfd, path, nil, 0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index bfef09a39e..76d564095e 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -37,7 +37,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index ab30250966..aae7f0ffd3 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -151,33 +151,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return r.Epc } func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index eac1cf1acc..66eff19a32 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -159,33 +159,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint32 { return r.Nip } func (r *PtraceRegs) SetPC(pc uint32) { r.Nip = pc } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 4df56616b8..806aa2574d 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -34,7 +34,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5f4243dea2..35851ef70b 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -38,7 +38,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index d0a7d40668..2f89e8f5de 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -34,7 +34,6 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index f5c793be26..7ca064ae76 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -31,7 +31,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e66865dccb..018d7d4782 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -340,7 +340,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -501,7 +500,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { // compat_43_osendmsg // compat_43_osethostid // compat_43_osethostname -// compat_43_osetrlimit // compat_43_osigblock // compat_43_osigsetmask // compat_43_osigstack diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 5e9de23ae3..c5f166a115 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -151,6 +151,21 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } +//sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) +//sysnb getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) + +func Getresuid() (ruid, euid, suid int) { + var r, e, s _C_int + getresuid(&r, &e, &s) + return int(r), int(e), int(s) +} + +func Getresgid() (rgid, egid, sgid int) { + var r, e, s _C_int + getresgid(&r, &e, &s) + return int(r), int(e), int(s) +} + //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL @@ -294,7 +309,6 @@ func Uname(uname *Utsname) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setrtable(rtable int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) @@ -339,8 +353,6 @@ func Uname(uname *Utsname) error { // getgid // getitimer // getlogin -// getresgid -// getresuid // getthrid // ktrace // lfs_bmapv diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_solaris.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_solaris.go index d3444b64d6..b600a289d3 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -545,24 +545,24 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl -//sys ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) = libc.ioctl +//sys ioctlRet(fd int, req int, arg uintptr) (ret int, err error) = libc.ioctl +//sys ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) = libc.ioctl -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, err = ioctlRet(fd, req, arg) return err } -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, err = ioctlPtrRet(fd, req, arg) return err } -func IoctlSetTermio(fd int, req uint, value *Termio) error { +func IoctlSetTermio(fd int, req int, value *Termio) error { return ioctlPtr(fd, req, unsafe.Pointer(value)) } -func IoctlGetTermio(fd int, req uint) (*Termio, error) { +func IoctlGetTermio(fd int, req int) (*Termio, error) { var value Termio err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err @@ -665,7 +665,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Setuid(uid int) (err error) //sys Shutdown(s int, how int) (err error) = libsocket.shutdown @@ -1080,11 +1079,11 @@ func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags return retCl, retData, flags, nil } -func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { +func IoctlSetIntRetInt(fd int, req int, arg int) (int, error) { return ioctlRet(fd, req, uintptr(arg)) } -func IoctlSetString(fd int, req uint, val string) error { +func IoctlSetString(fd int, req int, val string) error { bs := make([]byte, len(val)+1) copy(bs[:len(bs)-1], val) err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) @@ -1120,7 +1119,7 @@ func (l *Lifreq) GetLifruUint() uint { return *(*uint)(unsafe.Pointer(&l.Lifru[0])) } -func IoctlLifreq(fd int, req uint, l *Lifreq) error { +func IoctlLifreq(fd int, req int, l *Lifreq) error { return ioctlPtr(fd, req, unsafe.Pointer(l)) } @@ -1131,6 +1130,6 @@ func (s *Strioctl) SetInt(i int) { s.Dp = (*int8)(unsafe.Pointer(&i)) } -func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { +func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_unix.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_unix.go index 00f0aa3758..8e48c29ec3 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -587,3 +587,10 @@ func emptyIovecs(iov []Iovec) bool { } return true } + +// Setrlimit sets a resource limit. +func Setrlimit(resource int, rlim *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall.Setrlimit(resource, (*syscall.Rlimit)(rlim)) +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b295497ae4..d3d49ec3ed 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -212,8 +212,8 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP -//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL -//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 476a1c7e77..1430076271 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e36f5178d6..ab044a7427 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux.go index 398c37e52d..3784f402e5 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -493,6 +493,7 @@ const ( BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_DEV_BOUND_ONLY = 0x40 BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 @@ -826,9 +827,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2022-07-28)" + DM_VERSION_EXTRA = "-ioctl (2023-03-01)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2f + DM_VERSION_MINOR = 0x30 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -1197,6 +1198,7 @@ const ( FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 + FAN_INFO = 0x20 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 FAN_MARK_EVICTABLE = 0x200 @@ -1233,6 +1235,8 @@ const ( FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TID = 0x100 + FAN_RESPONSE_INFO_AUDIT_RULE = 0x1 + FAN_RESPONSE_INFO_NONE = 0x0 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 @@ -1860,6 +1864,7 @@ const ( MEMWRITEOOB64 = 0xc0184d15 MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 + MFD_EXEC = 0x10 MFD_HUGETLB = 0x4 MFD_HUGE_16GB = 0x88000000 MFD_HUGE_16MB = 0x60000000 @@ -1875,6 +1880,7 @@ const ( MFD_HUGE_8MB = 0x5c000000 MFD_HUGE_MASK = 0x3f MFD_HUGE_SHIFT = 0x1a + MFD_NOEXEC_SEAL = 0x8 MINIX2_SUPER_MAGIC = 0x2468 MINIX2_SUPER_MAGIC2 = 0x2478 MINIX3_SUPER_MAGIC = 0x4d5a @@ -1898,6 +1904,9 @@ const ( MOUNT_ATTR_SIZE_VER0 = 0x20 MOUNT_ATTR_STRICTATIME = 0x20 MOUNT_ATTR__ATIME = 0x70 + MREMAP_DONTUNMAP = 0x4 + MREMAP_FIXED = 0x2 + MREMAP_MAYMOVE = 0x1 MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 @@ -2204,6 +2213,7 @@ const ( PACKET_USER = 0x6 PACKET_VERSION = 0xa PACKET_VNET_HDR = 0xf + PACKET_VNET_HDR_SZ = 0x18 PARITY_CRC16_PR0 = 0x2 PARITY_CRC16_PR0_CCITT = 0x4 PARITY_CRC16_PR1 = 0x3 @@ -2221,6 +2231,7 @@ const ( PERF_ATTR_SIZE_VER5 = 0x70 PERF_ATTR_SIZE_VER6 = 0x78 PERF_ATTR_SIZE_VER7 = 0x80 + PERF_ATTR_SIZE_VER8 = 0x88 PERF_AUX_FLAG_COLLISION = 0x8 PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT = 0x0 PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW = 0x100 @@ -2361,6 +2372,7 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 PR_GET_ENDIAN = 0x13 @@ -2369,6 +2381,8 @@ const ( PR_GET_FP_MODE = 0x2e PR_GET_IO_FLUSHER = 0x3a PR_GET_KEEPCAPS = 0x7 + PR_GET_MDWE = 0x42 + PR_GET_MEMORY_MERGE = 0x44 PR_GET_NAME = 0x10 PR_GET_NO_NEW_PRIVS = 0x27 PR_GET_PDEATHSIG = 0x2 @@ -2389,6 +2403,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b PR_MTE_TAG_MASK = 0x7fff8 @@ -2423,6 +2438,8 @@ const ( PR_SET_FP_MODE = 0x2d PR_SET_IO_FLUSHER = 0x39 PR_SET_KEEPCAPS = 0x8 + PR_SET_MDWE = 0x41 + PR_SET_MEMORY_MERGE = 0x43 PR_SET_MM = 0x23 PR_SET_MM_ARG_END = 0x9 PR_SET_MM_ARG_START = 0x8 @@ -2506,6 +2523,7 @@ const ( PTRACE_GETSIGMASK = 0x420a PTRACE_GET_RSEQ_CONFIGURATION = 0x420f PTRACE_GET_SYSCALL_INFO = 0x420e + PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG = 0x4211 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -2536,6 +2554,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 PTRACE_SYSCALL_INFO_ENTRY = 0x1 @@ -2967,6 +2986,7 @@ const ( SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a + SOL_UDP = 0x11 SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 @@ -3071,7 +3091,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xd + TASKSTATS_VERSION = 0xe TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3237,6 +3257,7 @@ const ( TP_STATUS_COPY = 0x2 TP_STATUS_CSUMNOTREADY = 0x8 TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_GSO_TCP = 0x100 TP_STATUS_KERNEL = 0x0 TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 @@ -3251,6 +3272,19 @@ const ( TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 UDF_SUPER_MAGIC = 0x15013346 + UDP_CORK = 0x1 + UDP_ENCAP = 0x64 + UDP_ENCAP_ESPINUDP = 0x2 + UDP_ENCAP_ESPINUDP_NON_IKE = 0x1 + UDP_ENCAP_GTP0 = 0x4 + UDP_ENCAP_GTP1U = 0x5 + UDP_ENCAP_L2TPINUDP = 0x3 + UDP_GRO = 0x68 + UDP_NO_CHECK6_RX = 0x66 + UDP_NO_CHECK6_TX = 0x65 + UDP_SEGMENT = 0x67 + UDP_V4_FLOW = 0x2 + UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 9d5352c3e4..12a9a1389e 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -443,6 +443,7 @@ const ( TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 TOSTOP = 0x100 + TPIDR2_MAGIC = 0x54504902 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 TUNGETDEVNETNS = 0x54e3 @@ -515,6 +516,7 @@ const ( XCASE = 0x4 XTABS = 0x1800 ZA_MAGIC = 0x54366345 + ZT_MAGIC = 0x5a544e01 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f619252691..48984202c6 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -329,6 +329,54 @@ const ( SCM_WIFI_STATUS = 0x25 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 + SF_FP = 0x38 + SF_I0 = 0x20 + SF_I1 = 0x24 + SF_I2 = 0x28 + SF_I3 = 0x2c + SF_I4 = 0x30 + SF_I5 = 0x34 + SF_L0 = 0x0 + SF_L1 = 0x4 + SF_L2 = 0x8 + SF_L3 = 0xc + SF_L4 = 0x10 + SF_L5 = 0x14 + SF_L6 = 0x18 + SF_L7 = 0x1c + SF_PC = 0x3c + SF_RETP = 0x40 + SF_V9_FP = 0x70 + SF_V9_I0 = 0x40 + SF_V9_I1 = 0x48 + SF_V9_I2 = 0x50 + SF_V9_I3 = 0x58 + SF_V9_I4 = 0x60 + SF_V9_I5 = 0x68 + SF_V9_L0 = 0x0 + SF_V9_L1 = 0x8 + SF_V9_L2 = 0x10 + SF_V9_L3 = 0x18 + SF_V9_L4 = 0x20 + SF_V9_L5 = 0x28 + SF_V9_L6 = 0x30 + SF_V9_L7 = 0x38 + SF_V9_PC = 0x78 + SF_V9_RETP = 0x80 + SF_V9_XARG0 = 0x88 + SF_V9_XARG1 = 0x90 + SF_V9_XARG2 = 0x98 + SF_V9_XARG3 = 0xa0 + SF_V9_XARG4 = 0xa8 + SF_V9_XARG5 = 0xb0 + SF_V9_XXARG = 0xb8 + SF_XARG0 = 0x44 + SF_XARG1 = 0x48 + SF_XARG2 = 0x4c + SF_XARG3 = 0x50 + SF_XARG4 = 0x54 + SF_XARG5 = 0x58 + SF_XXARG = 0x5c SIOCATMARK = 0x8905 SIOCGPGRP = 0x8904 SIOCGSTAMPNS_NEW = 0x40108907 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ef9dcd1bef..9a257219d7 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -124,7 +124,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit64(int, uintptr_t); -int setrlimit64(int, uintptr_t); long long lseek64(int, long long, int); uintptr_t mmap(uintptr_t, uintptr_t, int, int, int, long long); @@ -213,7 +212,7 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) if r0 == -1 && er != nil { err = er @@ -223,7 +222,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg))) if r0 == -1 && er != nil { err = er @@ -1464,16 +1463,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - r0, er := C.setrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, er := C.lseek64(C.int(fd), C.longlong(offset), C.int(whence)) off = int64(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f86a945923..6de80c20cf 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -93,8 +93,8 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, e1 := callioctl(fd, int(req), arg) +func ioctl(fd int, req int, arg uintptr) (err error) { + _, e1 := callioctl(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -103,8 +103,8 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { - _, e1 := callioctl_ptr(fd, int(req), arg) +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + _, e1 := callioctl_ptr(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -1422,16 +1422,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, e1 := callsetrlimit(resource, uintptr(unsafe.Pointer(rlim))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, e1 := calllseek(fd, offset, whence) off = int64(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index d32a84cae2..c4d50ae500 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -124,7 +124,6 @@ import ( //go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" //go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" //go:cgo_import_dynamic libc_mmap64 mmap64 "libc.a/shr_64.o" @@ -242,7 +241,6 @@ import ( //go:linkname libc_getsystemcfg libc_getsystemcfg //go:linkname libc_umount libc_umount //go:linkname libc_getrlimit libc_getrlimit -//go:linkname libc_setrlimit libc_setrlimit //go:linkname libc_lseek libc_lseek //go:linkname libc_mmap64 libc_mmap64 @@ -363,7 +361,6 @@ var ( libc_getsystemcfg, libc_umount, libc_getrlimit, - libc_setrlimit, libc_lseek, libc_mmap64 syscallFunc ) @@ -1179,13 +1176,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) return diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index d7d8baf819..6903d3b09e 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -123,7 +123,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit(int, uintptr_t); -int setrlimit(int, uintptr_t); long long lseek(int, long long, int); uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); @@ -131,6 +130,7 @@ uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); import "C" import ( "syscall" + "unsafe" ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1055,14 +1055,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.setrlimit(C.int(resource), C.uintptr_t(rlim))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.lseek(C.int(fd), C.longlong(offset), C.int(whence))) e1 = syscall.GetErrno() diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index a29ffdd566..4037ccf7a9 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1992,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2123,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 95fe4c0eb9..4baaed0bc1 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 2fd4590bb7..51d6f3fb25 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1992,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2123,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index efa5b4c987..c3b82c0379 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 3b85134707..0eabac7ade 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1410,16 +1410,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 1129065624..ee313eb007 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 55f5abfe59..4c986e448e 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d39651c2b5..555216944a 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index ddb7408680..67a226fbf5 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 09a53a616c..f0b9ddaaa2 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 430cb24de7..7ceec233fb 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1346,16 +1346,6 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if e1 != 0 { @@ -1878,6 +1868,17 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldaddr), uintptr(oldlength), uintptr(newlength), uintptr(flags), uintptr(newaddr), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, advice int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { @@ -2182,3 +2183,17 @@ func rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + RawSyscallNoError(SYS_GETRESUID, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c81b0ad477..07b549cc25 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -411,16 +411,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2206bce7f4..5f481bf83f 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -334,16 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index edf6b39f16..824cd52c7f 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -578,16 +578,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 190609f214..e77aecfe98 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -289,16 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 5f984cbb1c..961a3afb7b 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 46fc380a40..ed05005e91 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index cbd0d4dadb..d365b718f3 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 0c13d15f07..c3f1b8bbde 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index e01432aed5..a6574cf98b 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -624,16 +624,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 13c7ee7baf..f40990264f 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 02d0c0fd61..9dfcc29974 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9fee3b1d23..0b29239583 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -269,16 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 647bbfecd6..6cde32237d 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -319,16 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index ada057f891..5253d65bf1 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -329,16 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 8e1d9c8f66..cdb2af5ae0 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 21c6950400..9d25f76b0b 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 298168f90a..d3f8035169 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 68b8bd492f..887188a529 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 0b0f910e1a..9ab9abf721 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -1894,20 +1916,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 087444250c..3dcacd30d7 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 @@ -573,11 +583,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 48ff5de75b..915761eab7 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -527,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -535,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -1894,20 +1918,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 5782cd1084..2763620b01 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 @@ -573,11 +583,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 2452a641da..8e87fdf153 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -1894,20 +1916,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index cf310420c9..c922314048 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresuid_trampoline_addr(SB)/4, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getresgid_trampoline_addr(SB)/4, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 @@ -573,11 +583,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 5e35600a60..12a7a2160e 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -1894,20 +1916,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 484bb42e0a..a6bc32c922 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 @@ -573,11 +583,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index b04cef1a19..b19e8aa031 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -1894,20 +1916,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 55af27263a..b4e7bceabf 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 @@ -573,11 +583,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 47a07ee0c2..fb99594c93 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -1894,20 +1916,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 4028255b0d..ca3f766009 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -189,6 +189,18 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getresuid(SB) + RET +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getresgid(SB) + RET +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ioctl(SB) RET @@ -687,12 +699,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_setrlimit(SB) - RET -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_setrtable(SB) RET diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 573378fdb9..32cbbbc52b 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -519,6 +519,28 @@ var libc_getcwd_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) { + syscall_rawSyscall(libc_getresuid_trampoline_addr, uintptr(unsafe.Pointer(ruid)), uintptr(unsafe.Pointer(euid)), uintptr(unsafe.Pointer(suid))) + return +} + +var libc_getresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresuid getresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { + syscall_rawSyscall(libc_getresgid_trampoline_addr, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) + return +} + +var libc_getresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getresgid getresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -1894,20 +1916,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index e1fbd4dfa8..477a7d5b21 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -158,6 +158,16 @@ TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) +TEXT libc_getresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresuid(SB) +GLOBL ·libc_getresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresuid_trampoline_addr(SB)/8, $libc_getresuid_trampoline<>(SB) + +TEXT libc_getresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getresgid(SB) +GLOBL ·libc_getresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getresgid_trampoline_addr(SB)/8, $libc_getresgid_trampoline<>(SB) + TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 @@ -573,11 +583,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 4873a1e5d3..609d1c598a 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -110,7 +110,6 @@ import ( //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" //go:cgo_import_dynamic libc_setreuid setreuid "libc.so" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" @@ -250,7 +249,6 @@ import ( //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid //go:linkname procSetreuid libc_setreuid -//go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid //go:linkname procshutdown libc_shutdown @@ -391,7 +389,6 @@ var ( procSetpriority, procSetregid, procSetreuid, - procSetrlimit, procSetsid, procSetuid, procshutdown, @@ -646,7 +643,7 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { +func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -657,7 +654,7 @@ func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) { +func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -1650,16 +1647,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 07bfe2ef9a..c31681743c 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -257,7 +257,7 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) @@ -267,7 +267,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 7ea465204b..e6ed7d637d 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -372,6 +372,7 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index e2a64f0991..690cefc3d0 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 34aa775219..5bffc10eac 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux.go index ca84727cfe..02e2462c8f 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1538,6 +1538,10 @@ const ( IFLA_GRO_MAX_SIZE = 0x3a IFLA_TSO_MAX_SIZE = 0x3b IFLA_TSO_MAX_SEGS = 0x3c + IFLA_ALLMULTI = 0x3d + IFLA_DEVLINK_PORT = 0x3e + IFLA_GSO_IPV4_MAX_SIZE = 0x3f + IFLA_GRO_IPV4_MAX_SIZE = 0x40 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1968,7 +1972,7 @@ const ( NFT_MSG_GETFLOWTABLE = 0x17 NFT_MSG_DELFLOWTABLE = 0x18 NFT_MSG_GETRULE_RESET = 0x19 - NFT_MSG_MAX = 0x1a + NFT_MSG_MAX = 0x21 NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -2555,6 +2559,11 @@ const ( BPF_REG_8 = 0x8 BPF_REG_9 = 0x9 BPF_REG_10 = 0xa + BPF_CGROUP_ITER_ORDER_UNSPEC = 0x0 + BPF_CGROUP_ITER_SELF_ONLY = 0x1 + BPF_CGROUP_ITER_DESCENDANTS_PRE = 0x2 + BPF_CGROUP_ITER_DESCENDANTS_POST = 0x3 + BPF_CGROUP_ITER_ANCESTORS_UP = 0x4 BPF_MAP_CREATE = 0x0 BPF_MAP_LOOKUP_ELEM = 0x1 BPF_MAP_UPDATE_ELEM = 0x2 @@ -2566,6 +2575,7 @@ const ( BPF_PROG_ATTACH = 0x8 BPF_PROG_DETACH = 0x9 BPF_PROG_TEST_RUN = 0xa + BPF_PROG_RUN = 0xa BPF_PROG_GET_NEXT_ID = 0xb BPF_MAP_GET_NEXT_ID = 0xc BPF_PROG_GET_FD_BY_ID = 0xd @@ -2610,6 +2620,7 @@ const ( BPF_MAP_TYPE_CPUMAP = 0x10 BPF_MAP_TYPE_XSKMAP = 0x11 BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 0x13 BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 @@ -2620,6 +2631,10 @@ const ( BPF_MAP_TYPE_STRUCT_OPS = 0x1a BPF_MAP_TYPE_RINGBUF = 0x1b BPF_MAP_TYPE_INODE_STORAGE = 0x1c + BPF_MAP_TYPE_TASK_STORAGE = 0x1d + BPF_MAP_TYPE_BLOOM_FILTER = 0x1e + BPF_MAP_TYPE_USER_RINGBUF = 0x1f + BPF_MAP_TYPE_CGRP_STORAGE = 0x20 BPF_PROG_TYPE_UNSPEC = 0x0 BPF_PROG_TYPE_SOCKET_FILTER = 0x1 BPF_PROG_TYPE_KPROBE = 0x2 @@ -2651,6 +2666,7 @@ const ( BPF_PROG_TYPE_EXT = 0x1c BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e + BPF_PROG_TYPE_SYSCALL = 0x1f BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2689,6 +2705,12 @@ const ( BPF_XDP_CPUMAP = 0x23 BPF_SK_LOOKUP = 0x24 BPF_XDP = 0x25 + BPF_SK_SKB_VERDICT = 0x26 + BPF_SK_REUSEPORT_SELECT = 0x27 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 0x28 + BPF_PERF_EVENT = 0x29 + BPF_TRACE_KPROBE_MULTI = 0x2a + BPF_LSM_CGROUP = 0x2b BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2696,6 +2718,9 @@ const ( BPF_LINK_TYPE_ITER = 0x4 BPF_LINK_TYPE_NETNS = 0x5 BPF_LINK_TYPE_XDP = 0x6 + BPF_LINK_TYPE_PERF_EVENT = 0x7 + BPF_LINK_TYPE_KPROBE_MULTI = 0x8 + BPF_LINK_TYPE_STRUCT_OPS = 0x9 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2733,6 +2758,7 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff BPF_F_CTXLEN_MASK = 0xfffff00000000 @@ -2747,6 +2773,7 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2771,10 +2798,16 @@ const ( BPF_LWT_ENCAP_SEG6 = 0x0 BPF_LWT_ENCAP_SEG6_INLINE = 0x1 BPF_LWT_ENCAP_IP = 0x2 + BPF_F_BPRM_SECUREEXEC = 0x1 + BPF_F_BROADCAST = 0x8 + BPF_F_EXCLUDE_INGRESS = 0x10 + BPF_SKB_TSTAMP_UNSPEC = 0x0 + BPF_SKB_TSTAMP_DELIVERY_MONO = 0x1 BPF_OK = 0x0 BPF_DROP = 0x2 BPF_REDIRECT = 0x7 BPF_LWT_REROUTE = 0x80 + BPF_FLOW_DISSECTOR_CONTINUE = 0x81 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 @@ -2838,6 +2871,10 @@ const ( BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_MTU_CHK_SEGS = 0x1 + BPF_MTU_CHK_RET_SUCCESS = 0x0 + BPF_MTU_CHK_RET_FRAG_NEEDED = 0x1 + BPF_MTU_CHK_RET_SEGS_TOOBIG = 0x2 BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 BPF_FD_TYPE_TRACEPOINT = 0x1 BPF_FD_TYPE_KPROBE = 0x2 @@ -2847,6 +2884,19 @@ const ( BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 + BPF_CORE_FIELD_BYTE_OFFSET = 0x0 + BPF_CORE_FIELD_BYTE_SIZE = 0x1 + BPF_CORE_FIELD_EXISTS = 0x2 + BPF_CORE_FIELD_SIGNED = 0x3 + BPF_CORE_FIELD_LSHIFT_U64 = 0x4 + BPF_CORE_FIELD_RSHIFT_U64 = 0x5 + BPF_CORE_TYPE_ID_LOCAL = 0x6 + BPF_CORE_TYPE_ID_TARGET = 0x7 + BPF_CORE_TYPE_EXISTS = 0x8 + BPF_CORE_TYPE_SIZE = 0x9 + BPF_CORE_ENUMVAL_EXISTS = 0xa + BPF_CORE_ENUMVAL_VALUE = 0xb + BPF_CORE_TYPE_MATCHES = 0xc ) const ( @@ -3605,7 +3655,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x26 + ETHTOOL_MSG_USER_MAX = 0x2b ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3645,7 +3695,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x26 + ETHTOOL_MSG_KERNEL_MAX = 0x2b ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3749,7 +3799,7 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0xd + ETHTOOL_A_RINGS_MAX = 0x10 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -3787,14 +3837,14 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x19 + ETHTOOL_A_COALESCE_MAX = 0x1c ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 ETHTOOL_A_PAUSE_RX = 0x3 ETHTOOL_A_PAUSE_TX = 0x4 ETHTOOL_A_PAUSE_STATS = 0x5 - ETHTOOL_A_PAUSE_MAX = 0x5 + ETHTOOL_A_PAUSE_MAX = 0x6 ETHTOOL_A_PAUSE_STAT_UNSPEC = 0x0 ETHTOOL_A_PAUSE_STAT_PAD = 0x1 ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 0x2 @@ -4444,7 +4494,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x141 + NL80211_ATTR_MAX = 0x145 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4673,7 +4723,7 @@ const ( NL80211_BAND_ATTR_HT_CAPA = 0x4 NL80211_BAND_ATTR_HT_MCS_SET = 0x3 NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 - NL80211_BAND_ATTR_MAX = 0xb + NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 @@ -4814,7 +4864,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x98 + NL80211_CMD_MAX = 0x99 NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5795,6 +5845,8 @@ const ( TUN_F_TSO6 = 0x4 TUN_F_TSO_ECN = 0x8 TUN_F_UFO = 0x10 + TUN_F_USO4 = 0x20 + TUN_F_USO6 = 0x40 ) const ( @@ -5804,9 +5856,10 @@ const ( ) const ( - VIRTIO_NET_HDR_GSO_NONE = 0x0 - VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 - VIRTIO_NET_HDR_GSO_UDP = 0x3 - VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 - VIRTIO_NET_HDR_GSO_ECN = 0x80 + VIRTIO_NET_HDR_GSO_NONE = 0x0 + VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 + VIRTIO_NET_HDR_GSO_UDP = 0x3 + VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 + VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 + VIRTIO_NET_HDR_GSO_ECN = 0x80 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 4ecc1495cd..6d8acbcc57 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -337,6 +337,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 34fddff964..59293c6884 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -350,6 +350,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 3b14a6031f..40cfa38c29 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -328,6 +328,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 0517651ab3..055bc4216d 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -329,6 +329,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 3b0c518134..f28affbc60 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -330,6 +330,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index fccdf4dd0f..9d71e7ccd8 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -333,6 +333,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 500de8fc07..fd5ccd332a 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -332,6 +332,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index d0434cd2c6..7704de77a2 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -332,6 +332,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 84206ba534..df00b87571 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -333,6 +333,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index ab078cf1f5..0942840db6 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -340,6 +340,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint32 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 42eb2c4cef..0348743950 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -339,6 +339,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 31304a4e8b..bad0670475 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -339,6 +339,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index c311f9612d..9ea54b7b86 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -357,6 +357,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index bba3cefac1..aa268d025c 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -352,6 +352,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index ad8a013804..444045b6c5 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -334,6 +334,8 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 + Irq_count uint64 + Irq_delay_total uint64 } type cpuMask uint64 diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/env_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/env_windows.go index 92ac05ff4e..b8ad192506 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/env_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,14 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := uintptr(unsafe.Pointer(block)) + blockp := unsafe.Pointer(block) for { - entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) + entry := UTF16PtrToString((*uint16)(blockp)) if len(entry) == 0 { break } env = append(env, entry) - blockp += 2 * (uintptr(len(entry)) + 1) + blockp = unsafe.Add(blockp, 2*(len(entry)+1)) } return env, nil } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/exec_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/exec_windows.go index 75980fd44a..a52e0331d8 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/exec_windows.go @@ -95,12 +95,17 @@ func ComposeCommandLine(args []string) string { // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. +// DecomposeCommandLine returns error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil } + utf16CommandLine, err := UTF16FromString(commandLine) + if err != nil { + return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") + } var argc int32 - argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/service.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/service.go index f8deca8397..c44a1b9636 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/service.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/service.go @@ -141,6 +141,12 @@ const ( SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 ) +type ENUM_SERVICE_STATUS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatus SERVICE_STATUS +} + type SERVICE_STATUS struct { ServiceType uint32 CurrentState uint32 @@ -212,6 +218,10 @@ type SERVICE_FAILURE_ACTIONS struct { Actions *SC_ACTION } +type SERVICE_FAILURE_ACTIONS_FLAG struct { + FailureActionsOnNonCrashFailures int32 +} + type SC_ACTION struct { Type uint32 Delay uint32 @@ -245,3 +255,4 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? //sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW //sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? +//sys EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/syscall_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/syscall_windows.go index 3723b2c224..9645900754 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -405,7 +405,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW // Process Status API (PSAPI) -//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +//sys enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses //sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules //sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation @@ -1354,6 +1354,17 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } +func EnumProcesses(processIds []uint32, bytesReturned *uint32) error { + // EnumProcesses syscall expects the size parameter to be in bytes, but the code generated with mksyscall uses + // the length of the processIds slice instead. Hence, this wrapper function is added to fix the discrepancy. + var p *uint32 + if len(processIds) > 0 { + p = &processIds[0] + } + size := uint32(len(processIds) * 4) + return enumProcesses(p, size, bytesReturned) +} + func Getpid() (pid int) { return int(GetCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/types_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/types_windows.go index 857acf1032..88e62a6385 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/types_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/types_windows.go @@ -2220,19 +2220,23 @@ type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { } const ( - // JobObjectInformationClass + // JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicAccountingInformation = 1 + JobObjectBasicAndIoAccountingInformation = 8 JobObjectBasicLimitInformation = 2 + JobObjectBasicProcessIdList = 3 JobObjectBasicUIRestrictions = 4 JobObjectCpuRateControlInformation = 15 JobObjectEndOfJobTimeInformation = 6 JobObjectExtendedLimitInformation = 9 JobObjectGroupInformation = 11 JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 + JobObjectLimitViolationInformation = 13 + JobObjectLimitViolationInformation2 = 34 JobObjectNetRateControlInformation = 32 JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 + JobObjectNotificationLimitInformation2 = 33 JobObjectSecurityLimitInformation = 5 ) diff --git a/terraform/providers/google/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/terraform/providers/google/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 6d2a268534..566dd3e315 100644 --- a/terraform/providers/google/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/terraform/providers/google/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -86,6 +86,7 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procEnumDependentServicesW = modadvapi32.NewProc("EnumDependentServicesW") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") @@ -734,6 +735,14 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes return } +func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) if r1 == 0 { @@ -3507,12 +3516,8 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u return } -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) +func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } diff --git a/terraform/providers/google/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/terraform/providers/google/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index f248effae1..ffadb7bebd 100644 --- a/terraform/providers/google/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/terraform/providers/google/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,7 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -//go:build go1.16 -// +build go1.16 +//go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package bidi diff --git a/terraform/providers/google/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/terraform/providers/google/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go new file mode 100644 index 0000000000..92cce5802c --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go @@ -0,0 +1,2043 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 +// +build go1.21 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "15.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 19904 bytes (19.44 KiB). Checksum: b1f201ed2debb6c8. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 259 blocks, 16576 entries, 16576 bytes +// The third block is the zero block. +var bidiValues = [16576]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x0001, 0x5ec: 0x0001, 0x5ed: 0x0001, 0x5ee: 0x0001, 0x5ef: 0x0001, + 0x5f0: 0x000d, 0x5f1: 0x000d, 0x5f2: 0x000d, 0x5f3: 0x000d, 0x5f4: 0x000d, 0x5f5: 0x000d, + 0x5f6: 0x000d, 0x5f7: 0x000d, 0x5f8: 0x000d, 0x5f9: 0x000d, 0x5fa: 0x000d, 0x5fb: 0x000d, + 0x5fc: 0x000d, 0x5fd: 0x000d, 0x5fe: 0x000d, 0x5ff: 0x000d, + // Block 0x18, offset 0x600 + 0x600: 0x000d, 0x601: 0x000d, 0x602: 0x000d, 0x603: 0x000d, 0x604: 0x000d, 0x605: 0x000d, + 0x606: 0x000d, 0x607: 0x000d, 0x608: 0x000d, 0x609: 0x000d, 0x60a: 0x000d, 0x60b: 0x000d, + 0x60c: 0x000d, 0x60d: 0x000d, 0x60e: 0x000d, 0x60f: 0x0001, 0x610: 0x0005, 0x611: 0x0005, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x000c, 0x619: 0x000c, 0x61a: 0x000c, 0x61b: 0x000c, 0x61c: 0x000c, 0x61d: 0x000c, + 0x61e: 0x000c, 0x61f: 0x000c, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000c, 0x64b: 0x000c, + 0x64c: 0x000c, 0x64d: 0x000c, 0x64e: 0x000c, 0x64f: 0x000c, 0x650: 0x000c, 0x651: 0x000c, + 0x652: 0x000c, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x895: 0x000c, 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97c: 0x000c, 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa81: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaca: 0x000c, + 0xad2: 0x000c, 0xad3: 0x000c, 0xad4: 0x000c, 0xad6: 0x000c, + // Block 0x2c, offset 0xb00 + 0xb31: 0x000c, 0xb34: 0x000c, 0xb35: 0x000c, + 0xb36: 0x000c, 0xb37: 0x000c, 0xb38: 0x000c, 0xb39: 0x000c, 0xb3a: 0x000c, + 0xb3f: 0x0004, + // Block 0x2d, offset 0xb40 + 0xb47: 0x000c, 0xb48: 0x000c, 0xb49: 0x000c, 0xb4a: 0x000c, 0xb4b: 0x000c, + 0xb4c: 0x000c, 0xb4d: 0x000c, 0xb4e: 0x000c, + // Block 0x2e, offset 0xb80 + 0xbb1: 0x000c, 0xbb4: 0x000c, 0xbb5: 0x000c, + 0xbb6: 0x000c, 0xbb7: 0x000c, 0xbb8: 0x000c, 0xbb9: 0x000c, 0xbba: 0x000c, 0xbbb: 0x000c, + 0xbbc: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbc8: 0x000c, 0xbc9: 0x000c, 0xbca: 0x000c, 0xbcb: 0x000c, + 0xbcc: 0x000c, 0xbcd: 0x000c, 0xbce: 0x000c, + // Block 0x30, offset 0xc00 + 0xc18: 0x000c, 0xc19: 0x000c, + 0xc35: 0x000c, + 0xc37: 0x000c, 0xc39: 0x000c, 0xc3a: 0x003a, 0xc3b: 0x002a, + 0xc3c: 0x003a, 0xc3d: 0x002a, + // Block 0x31, offset 0xc40 + 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, 0xc7d: 0x000c, 0xc7e: 0x000c, + // Block 0x32, offset 0xc80 + 0xc80: 0x000c, 0xc81: 0x000c, 0xc82: 0x000c, 0xc83: 0x000c, 0xc84: 0x000c, + 0xc86: 0x000c, 0xc87: 0x000c, + 0xc8d: 0x000c, 0xc8e: 0x000c, 0xc8f: 0x000c, 0xc90: 0x000c, 0xc91: 0x000c, + 0xc92: 0x000c, 0xc93: 0x000c, 0xc94: 0x000c, 0xc95: 0x000c, 0xc96: 0x000c, 0xc97: 0x000c, + 0xc99: 0x000c, 0xc9a: 0x000c, 0xc9b: 0x000c, 0xc9c: 0x000c, 0xc9d: 0x000c, + 0xc9e: 0x000c, 0xc9f: 0x000c, 0xca0: 0x000c, 0xca1: 0x000c, 0xca2: 0x000c, 0xca3: 0x000c, + 0xca4: 0x000c, 0xca5: 0x000c, 0xca6: 0x000c, 0xca7: 0x000c, 0xca8: 0x000c, 0xca9: 0x000c, + 0xcaa: 0x000c, 0xcab: 0x000c, 0xcac: 0x000c, 0xcad: 0x000c, 0xcae: 0x000c, 0xcaf: 0x000c, + 0xcb0: 0x000c, 0xcb1: 0x000c, 0xcb2: 0x000c, 0xcb3: 0x000c, 0xcb4: 0x000c, 0xcb5: 0x000c, + 0xcb6: 0x000c, 0xcb7: 0x000c, 0xcb8: 0x000c, 0xcb9: 0x000c, 0xcba: 0x000c, 0xcbb: 0x000c, + 0xcbc: 0x000c, + // Block 0x33, offset 0xcc0 + 0xcc6: 0x000c, + // Block 0x34, offset 0xd00 + 0xd2d: 0x000c, 0xd2e: 0x000c, 0xd2f: 0x000c, + 0xd30: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, 0xd35: 0x000c, + 0xd36: 0x000c, 0xd37: 0x000c, 0xd39: 0x000c, 0xd3a: 0x000c, + 0xd3d: 0x000c, 0xd3e: 0x000c, + // Block 0x35, offset 0xd40 + 0xd58: 0x000c, 0xd59: 0x000c, + 0xd5e: 0x000c, 0xd5f: 0x000c, 0xd60: 0x000c, + 0xd71: 0x000c, 0xd72: 0x000c, 0xd73: 0x000c, 0xd74: 0x000c, + // Block 0x36, offset 0xd80 + 0xd82: 0x000c, 0xd85: 0x000c, + 0xd86: 0x000c, + 0xd8d: 0x000c, + 0xd9d: 0x000c, + // Block 0x37, offset 0xdc0 + 0xddd: 0x000c, + 0xdde: 0x000c, 0xddf: 0x000c, + // Block 0x38, offset 0xe00 + 0xe10: 0x000a, 0xe11: 0x000a, + 0xe12: 0x000a, 0xe13: 0x000a, 0xe14: 0x000a, 0xe15: 0x000a, 0xe16: 0x000a, 0xe17: 0x000a, + 0xe18: 0x000a, 0xe19: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, + // Block 0x3a, offset 0xe80 + 0xe80: 0x0009, + 0xe9b: 0x007a, 0xe9c: 0x006a, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, 0xed4: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf12: 0x000c, 0xf13: 0x000c, + 0xf32: 0x000c, 0xf33: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf74: 0x000c, 0xf75: 0x000c, + 0xf77: 0x000c, 0xf78: 0x000c, 0xf79: 0x000c, 0xf7a: 0x000c, 0xf7b: 0x000c, + 0xf7c: 0x000c, 0xf7d: 0x000c, + // Block 0x3e, offset 0xf80 + 0xf86: 0x000c, 0xf89: 0x000c, 0xf8a: 0x000c, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000c, 0xf8f: 0x000c, 0xf90: 0x000c, 0xf91: 0x000c, + 0xf92: 0x000c, 0xf93: 0x000c, + 0xf9b: 0x0004, 0xf9d: 0x000c, + 0xfb0: 0x000a, 0xfb1: 0x000a, 0xfb2: 0x000a, 0xfb3: 0x000a, 0xfb4: 0x000a, 0xfb5: 0x000a, + 0xfb6: 0x000a, 0xfb7: 0x000a, 0xfb8: 0x000a, 0xfb9: 0x000a, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x000a, 0xfc1: 0x000a, 0xfc2: 0x000a, 0xfc3: 0x000a, 0xfc4: 0x000a, 0xfc5: 0x000a, + 0xfc6: 0x000a, 0xfc7: 0x000a, 0xfc8: 0x000a, 0xfc9: 0x000a, 0xfca: 0x000a, 0xfcb: 0x000c, + 0xfcc: 0x000c, 0xfcd: 0x000c, 0xfce: 0x000b, 0xfcf: 0x000c, + // Block 0x40, offset 0x1000 + 0x1005: 0x000c, + 0x1006: 0x000c, + 0x1029: 0x000c, + // Block 0x41, offset 0x1040 + 0x1060: 0x000c, 0x1061: 0x000c, 0x1062: 0x000c, + 0x1067: 0x000c, 0x1068: 0x000c, + 0x1072: 0x000c, + 0x1079: 0x000c, 0x107a: 0x000c, 0x107b: 0x000c, + // Block 0x42, offset 0x1080 + 0x1080: 0x000a, 0x1084: 0x000a, 0x1085: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10de: 0x000a, 0x10df: 0x000a, 0x10e0: 0x000a, 0x10e1: 0x000a, 0x10e2: 0x000a, 0x10e3: 0x000a, + 0x10e4: 0x000a, 0x10e5: 0x000a, 0x10e6: 0x000a, 0x10e7: 0x000a, 0x10e8: 0x000a, 0x10e9: 0x000a, + 0x10ea: 0x000a, 0x10eb: 0x000a, 0x10ec: 0x000a, 0x10ed: 0x000a, 0x10ee: 0x000a, 0x10ef: 0x000a, + 0x10f0: 0x000a, 0x10f1: 0x000a, 0x10f2: 0x000a, 0x10f3: 0x000a, 0x10f4: 0x000a, 0x10f5: 0x000a, + 0x10f6: 0x000a, 0x10f7: 0x000a, 0x10f8: 0x000a, 0x10f9: 0x000a, 0x10fa: 0x000a, 0x10fb: 0x000a, + 0x10fc: 0x000a, 0x10fd: 0x000a, 0x10fe: 0x000a, 0x10ff: 0x000a, + // Block 0x44, offset 0x1100 + 0x1117: 0x000c, + 0x1118: 0x000c, 0x111b: 0x000c, + // Block 0x45, offset 0x1140 + 0x1156: 0x000c, + 0x1158: 0x000c, 0x1159: 0x000c, 0x115a: 0x000c, 0x115b: 0x000c, 0x115c: 0x000c, 0x115d: 0x000c, + 0x115e: 0x000c, 0x1160: 0x000c, 0x1162: 0x000c, + 0x1165: 0x000c, 0x1166: 0x000c, 0x1167: 0x000c, 0x1168: 0x000c, 0x1169: 0x000c, + 0x116a: 0x000c, 0x116b: 0x000c, 0x116c: 0x000c, + 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117f: 0x000c, + // Block 0x46, offset 0x1180 + 0x11b0: 0x000c, 0x11b1: 0x000c, 0x11b2: 0x000c, 0x11b3: 0x000c, 0x11b4: 0x000c, 0x11b5: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, 0x11bb: 0x000c, + 0x11bc: 0x000c, 0x11bd: 0x000c, 0x11be: 0x000c, 0x11bf: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x000c, 0x11c1: 0x000c, 0x11c2: 0x000c, 0x11c3: 0x000c, 0x11c4: 0x000c, 0x11c5: 0x000c, + 0x11c6: 0x000c, 0x11c7: 0x000c, 0x11c8: 0x000c, 0x11c9: 0x000c, 0x11ca: 0x000c, 0x11cb: 0x000c, + 0x11cc: 0x000c, 0x11cd: 0x000c, 0x11ce: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, 0x1202: 0x000c, 0x1203: 0x000c, + 0x1234: 0x000c, + 0x1236: 0x000c, 0x1237: 0x000c, 0x1238: 0x000c, 0x1239: 0x000c, 0x123a: 0x000c, + 0x123c: 0x000c, + // Block 0x49, offset 0x1240 + 0x1242: 0x000c, + 0x126b: 0x000c, 0x126c: 0x000c, 0x126d: 0x000c, 0x126e: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, 0x1272: 0x000c, 0x1273: 0x000c, + // Block 0x4a, offset 0x1280 + 0x1280: 0x000c, 0x1281: 0x000c, + 0x12a2: 0x000c, 0x12a3: 0x000c, + 0x12a4: 0x000c, 0x12a5: 0x000c, 0x12a8: 0x000c, 0x12a9: 0x000c, + 0x12ab: 0x000c, 0x12ac: 0x000c, 0x12ad: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12e6: 0x000c, 0x12e8: 0x000c, 0x12e9: 0x000c, + 0x12ed: 0x000c, 0x12ef: 0x000c, + 0x12f0: 0x000c, 0x12f1: 0x000c, + // Block 0x4c, offset 0x1300 + 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, + // Block 0x4d, offset 0x1340 + 0x1350: 0x000c, 0x1351: 0x000c, + 0x1352: 0x000c, 0x1354: 0x000c, 0x1355: 0x000c, 0x1356: 0x000c, 0x1357: 0x000c, + 0x1358: 0x000c, 0x1359: 0x000c, 0x135a: 0x000c, 0x135b: 0x000c, 0x135c: 0x000c, 0x135d: 0x000c, + 0x135e: 0x000c, 0x135f: 0x000c, 0x1360: 0x000c, 0x1362: 0x000c, 0x1363: 0x000c, + 0x1364: 0x000c, 0x1365: 0x000c, 0x1366: 0x000c, 0x1367: 0x000c, 0x1368: 0x000c, + 0x136d: 0x000c, + 0x1374: 0x000c, + 0x1378: 0x000c, 0x1379: 0x000c, + // Block 0x4e, offset 0x1380 + 0x13bd: 0x000a, 0x13bf: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x000a, 0x13c1: 0x000a, + 0x13cd: 0x000a, 0x13ce: 0x000a, 0x13cf: 0x000a, + 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, + 0x13ed: 0x000a, 0x13ee: 0x000a, 0x13ef: 0x000a, + 0x13fd: 0x000a, 0x13fe: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x0009, 0x1401: 0x0009, 0x1402: 0x0009, 0x1403: 0x0009, 0x1404: 0x0009, 0x1405: 0x0009, + 0x1406: 0x0009, 0x1407: 0x0009, 0x1408: 0x0009, 0x1409: 0x0009, 0x140a: 0x0009, 0x140b: 0x000b, + 0x140c: 0x000b, 0x140d: 0x000b, 0x140f: 0x0001, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x000a, 0x1420: 0x000a, 0x1421: 0x000a, 0x1422: 0x000a, 0x1423: 0x000a, + 0x1424: 0x000a, 0x1425: 0x000a, 0x1426: 0x000a, 0x1427: 0x000a, 0x1428: 0x0009, 0x1429: 0x0007, + 0x142a: 0x000e, 0x142b: 0x000e, 0x142c: 0x000e, 0x142d: 0x000e, 0x142e: 0x000e, 0x142f: 0x0006, + 0x1430: 0x0004, 0x1431: 0x0004, 0x1432: 0x0004, 0x1433: 0x0004, 0x1434: 0x0004, 0x1435: 0x000a, + 0x1436: 0x000a, 0x1437: 0x000a, 0x1438: 0x000a, 0x1439: 0x000a, 0x143a: 0x000a, 0x143b: 0x000a, + 0x143c: 0x000a, 0x143d: 0x000a, 0x143e: 0x000a, 0x143f: 0x000a, + // Block 0x51, offset 0x1440 + 0x1440: 0x000a, 0x1441: 0x000a, 0x1442: 0x000a, 0x1443: 0x000a, 0x1444: 0x0006, 0x1445: 0x009a, + 0x1446: 0x008a, 0x1447: 0x000a, 0x1448: 0x000a, 0x1449: 0x000a, 0x144a: 0x000a, 0x144b: 0x000a, + 0x144c: 0x000a, 0x144d: 0x000a, 0x144e: 0x000a, 0x144f: 0x000a, 0x1450: 0x000a, 0x1451: 0x000a, + 0x1452: 0x000a, 0x1453: 0x000a, 0x1454: 0x000a, 0x1455: 0x000a, 0x1456: 0x000a, 0x1457: 0x000a, + 0x1458: 0x000a, 0x1459: 0x000a, 0x145a: 0x000a, 0x145b: 0x000a, 0x145c: 0x000a, 0x145d: 0x000a, + 0x145e: 0x000a, 0x145f: 0x0009, 0x1460: 0x000b, 0x1461: 0x000b, 0x1462: 0x000b, 0x1463: 0x000b, + 0x1464: 0x000b, 0x1465: 0x000b, 0x1466: 0x000e, 0x1467: 0x000e, 0x1468: 0x000e, 0x1469: 0x000e, + 0x146a: 0x000b, 0x146b: 0x000b, 0x146c: 0x000b, 0x146d: 0x000b, 0x146e: 0x000b, 0x146f: 0x000b, + 0x1470: 0x0002, 0x1474: 0x0002, 0x1475: 0x0002, + 0x1476: 0x0002, 0x1477: 0x0002, 0x1478: 0x0002, 0x1479: 0x0002, 0x147a: 0x0003, 0x147b: 0x0003, + 0x147c: 0x000a, 0x147d: 0x009a, 0x147e: 0x008a, + // Block 0x52, offset 0x1480 + 0x1480: 0x0002, 0x1481: 0x0002, 0x1482: 0x0002, 0x1483: 0x0002, 0x1484: 0x0002, 0x1485: 0x0002, + 0x1486: 0x0002, 0x1487: 0x0002, 0x1488: 0x0002, 0x1489: 0x0002, 0x148a: 0x0003, 0x148b: 0x0003, + 0x148c: 0x000a, 0x148d: 0x009a, 0x148e: 0x008a, + 0x14a0: 0x0004, 0x14a1: 0x0004, 0x14a2: 0x0004, 0x14a3: 0x0004, + 0x14a4: 0x0004, 0x14a5: 0x0004, 0x14a6: 0x0004, 0x14a7: 0x0004, 0x14a8: 0x0004, 0x14a9: 0x0004, + 0x14aa: 0x0004, 0x14ab: 0x0004, 0x14ac: 0x0004, 0x14ad: 0x0004, 0x14ae: 0x0004, 0x14af: 0x0004, + 0x14b0: 0x0004, 0x14b1: 0x0004, 0x14b2: 0x0004, 0x14b3: 0x0004, 0x14b4: 0x0004, 0x14b5: 0x0004, + 0x14b6: 0x0004, 0x14b7: 0x0004, 0x14b8: 0x0004, 0x14b9: 0x0004, 0x14ba: 0x0004, 0x14bb: 0x0004, + 0x14bc: 0x0004, 0x14bd: 0x0004, 0x14be: 0x0004, 0x14bf: 0x0004, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0004, 0x14c1: 0x0004, 0x14c2: 0x0004, 0x14c3: 0x0004, 0x14c4: 0x0004, 0x14c5: 0x0004, + 0x14c6: 0x0004, 0x14c7: 0x0004, 0x14c8: 0x0004, 0x14c9: 0x0004, 0x14ca: 0x0004, 0x14cb: 0x0004, + 0x14cc: 0x0004, 0x14cd: 0x0004, 0x14ce: 0x0004, 0x14cf: 0x0004, 0x14d0: 0x000c, 0x14d1: 0x000c, + 0x14d2: 0x000c, 0x14d3: 0x000c, 0x14d4: 0x000c, 0x14d5: 0x000c, 0x14d6: 0x000c, 0x14d7: 0x000c, + 0x14d8: 0x000c, 0x14d9: 0x000c, 0x14da: 0x000c, 0x14db: 0x000c, 0x14dc: 0x000c, 0x14dd: 0x000c, + 0x14de: 0x000c, 0x14df: 0x000c, 0x14e0: 0x000c, 0x14e1: 0x000c, 0x14e2: 0x000c, 0x14e3: 0x000c, + 0x14e4: 0x000c, 0x14e5: 0x000c, 0x14e6: 0x000c, 0x14e7: 0x000c, 0x14e8: 0x000c, 0x14e9: 0x000c, + 0x14ea: 0x000c, 0x14eb: 0x000c, 0x14ec: 0x000c, 0x14ed: 0x000c, 0x14ee: 0x000c, 0x14ef: 0x000c, + 0x14f0: 0x000c, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, 0x1505: 0x000a, + 0x1506: 0x000a, 0x1508: 0x000a, 0x1509: 0x000a, + 0x1514: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, 0x1520: 0x000a, 0x1521: 0x000a, 0x1522: 0x000a, 0x1523: 0x000a, + 0x1525: 0x000a, 0x1527: 0x000a, 0x1529: 0x000a, + 0x152e: 0x0004, + 0x153a: 0x000a, 0x153b: 0x000a, + // Block 0x55, offset 0x1540 + 0x1540: 0x000a, 0x1541: 0x000a, 0x1542: 0x000a, 0x1543: 0x000a, 0x1544: 0x000a, + 0x154a: 0x000a, 0x154b: 0x000a, + 0x154c: 0x000a, 0x154d: 0x000a, 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x000a, 0x15d3: 0x000a, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x000a, 0x1609: 0x000a, 0x160a: 0x000a, 0x160b: 0x000a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x0003, 0x1613: 0x0004, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x000a, + 0x162a: 0x000a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + 0x1636: 0x000a, 0x1637: 0x000a, 0x1638: 0x000a, 0x1639: 0x000a, 0x163a: 0x000a, 0x163b: 0x000a, + 0x163c: 0x000a, 0x163d: 0x000a, 0x163e: 0x000a, 0x163f: 0x000a, + // Block 0x59, offset 0x1640 + 0x1640: 0x000a, 0x1641: 0x000a, 0x1642: 0x000a, 0x1643: 0x000a, 0x1644: 0x000a, 0x1645: 0x000a, + 0x1646: 0x000a, 0x1647: 0x000a, 0x1648: 0x003a, 0x1649: 0x002a, 0x164a: 0x003a, 0x164b: 0x002a, + 0x164c: 0x000a, 0x164d: 0x000a, 0x164e: 0x000a, 0x164f: 0x000a, 0x1650: 0x000a, 0x1651: 0x000a, + 0x1652: 0x000a, 0x1653: 0x000a, 0x1654: 0x000a, 0x1655: 0x000a, 0x1656: 0x000a, 0x1657: 0x000a, + 0x1658: 0x000a, 0x1659: 0x000a, 0x165a: 0x000a, 0x165b: 0x000a, 0x165c: 0x000a, 0x165d: 0x000a, + 0x165e: 0x000a, 0x165f: 0x000a, 0x1660: 0x000a, 0x1661: 0x000a, 0x1662: 0x000a, 0x1663: 0x000a, + 0x1664: 0x000a, 0x1665: 0x000a, 0x1666: 0x000a, 0x1667: 0x000a, 0x1668: 0x000a, 0x1669: 0x009a, + 0x166a: 0x008a, 0x166b: 0x000a, 0x166c: 0x000a, 0x166d: 0x000a, 0x166e: 0x000a, 0x166f: 0x000a, + 0x1670: 0x000a, 0x1671: 0x000a, 0x1672: 0x000a, 0x1673: 0x000a, 0x1674: 0x000a, 0x1675: 0x000a, + // Block 0x5a, offset 0x1680 + 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, 0x16e7: 0x000a, 0x16e8: 0x000a, 0x16e9: 0x000a, + 0x16ea: 0x000a, 0x16eb: 0x000a, 0x16ec: 0x000a, 0x16ed: 0x000a, 0x16ee: 0x000a, 0x16ef: 0x000a, + 0x16f0: 0x000a, 0x16f1: 0x000a, 0x16f2: 0x000a, 0x16f3: 0x000a, 0x16f4: 0x000a, 0x16f5: 0x000a, + 0x16f6: 0x000a, 0x16f7: 0x000a, 0x16f8: 0x000a, 0x16f9: 0x000a, 0x16fa: 0x000a, 0x16fb: 0x000a, + 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, 0x16ff: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, + 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, + 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1715: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, + 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, + 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, + 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, + 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, 0x1767: 0x000a, 0x1768: 0x000a, 0x1769: 0x000a, + 0x176a: 0x000a, 0x176b: 0x000a, 0x176c: 0x000a, 0x176d: 0x000a, 0x176e: 0x000a, 0x176f: 0x000a, + 0x1770: 0x000a, 0x1771: 0x000a, 0x1772: 0x000a, 0x1773: 0x000a, 0x1774: 0x000a, 0x1775: 0x000a, + 0x1776: 0x000a, 0x1777: 0x000a, 0x1778: 0x000a, 0x1779: 0x000a, 0x177a: 0x000a, 0x177b: 0x000a, + 0x177c: 0x000a, 0x177d: 0x000a, 0x177e: 0x000a, 0x177f: 0x000a, + // Block 0x5e, offset 0x1780 + 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, + 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x0002, 0x1789: 0x0002, 0x178a: 0x0002, 0x178b: 0x0002, + 0x178c: 0x0002, 0x178d: 0x0002, 0x178e: 0x0002, 0x178f: 0x0002, 0x1790: 0x0002, 0x1791: 0x0002, + 0x1792: 0x0002, 0x1793: 0x0002, 0x1794: 0x0002, 0x1795: 0x0002, 0x1796: 0x0002, 0x1797: 0x0002, + 0x1798: 0x0002, 0x1799: 0x0002, 0x179a: 0x0002, 0x179b: 0x0002, + // Block 0x5f, offset 0x17c0 + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ec: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x000a, 0x1829: 0x000a, + 0x182a: 0x000a, 0x182b: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, + 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, + 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x003a, 0x1871: 0x002a, 0x1872: 0x003a, 0x1873: 0x002a, 0x1874: 0x003a, 0x1875: 0x002a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, + 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, + 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, + 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x003a, 0x18a7: 0x002a, 0x18a8: 0x003a, 0x18a9: 0x002a, + 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x007a, 0x18c4: 0x006a, 0x18c5: 0x009a, + 0x18c6: 0x008a, 0x18c7: 0x00ba, 0x18c8: 0x00aa, 0x18c9: 0x009a, 0x18ca: 0x008a, 0x18cb: 0x007a, + 0x18cc: 0x006a, 0x18cd: 0x00da, 0x18ce: 0x002a, 0x18cf: 0x003a, 0x18d0: 0x00ca, 0x18d1: 0x009a, + 0x18d2: 0x008a, 0x18d3: 0x007a, 0x18d4: 0x006a, 0x18d5: 0x009a, 0x18d6: 0x008a, 0x18d7: 0x00ba, + 0x18d8: 0x00aa, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x003a, 0x1919: 0x002a, 0x191a: 0x003a, 0x191b: 0x002a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x003a, 0x193d: 0x002a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19ef: 0x000c, + 0x19f0: 0x000c, 0x19f1: 0x000c, + 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c, + 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c, + 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c, + 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c, + 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c, + 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a, + 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a, + 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a, + 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a, + 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a, + 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a, + 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a, + 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a, + 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x009a, 0x1ad6: 0x008a, 0x1ad7: 0x00ba, + 0x1ad8: 0x00aa, 0x1ad9: 0x009a, 0x1ada: 0x008a, 0x1adb: 0x007a, 0x1adc: 0x006a, 0x1add: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a, + 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a, + 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, + 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a, + 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a, + 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a, + 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a, + 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c, + 0x1bf0: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, + 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a, + 0x1c20: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c7b: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a, + 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a, + 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a, + 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a, + 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a, + 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cdd: 0x000a, + 0x1cde: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d10: 0x000a, 0x1d11: 0x000a, + 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a, + 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a, + 0x1d1e: 0x000a, 0x1d1f: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a, + 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e1e: 0x000a, 0x1e1f: 0x000a, + 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e50: 0x000a, 0x1e51: 0x000a, + 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a, + 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a, + 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a, + 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a, + 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a, + 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a, + 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a, + 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a, + 0x1e86: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f2f: 0x000c, + 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c, + 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c, + 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f5e: 0x000c, 0x1f5f: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1fb0: 0x000c, 0x1fb1: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a, + 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a, + 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a, + 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a, + 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a, + 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a, + // Block 0x80, offset 0x2000 + 0x2008: 0x000a, + // Block 0x81, offset 0x2040 + 0x2042: 0x000c, + 0x2046: 0x000c, 0x204b: 0x000c, + 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a, + 0x206a: 0x000a, 0x206b: 0x000a, 0x206c: 0x000c, + 0x2078: 0x0004, 0x2079: 0x0004, + // Block 0x82, offset 0x2080 + 0x20b4: 0x000a, 0x20b5: 0x000a, + 0x20b6: 0x000a, 0x20b7: 0x000a, + // Block 0x83, offset 0x20c0 + 0x20c4: 0x000c, 0x20c5: 0x000c, + 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c, + 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c, + 0x20f0: 0x000c, 0x20f1: 0x000c, + 0x20ff: 0x000c, + // Block 0x84, offset 0x2100 + 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, + // Block 0x85, offset 0x2140 + 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c, + 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c, + // Block 0x86, offset 0x2180 + 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c, + 0x21b3: 0x000c, + 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c, + 0x21bc: 0x000c, 0x21bd: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e5: 0x000c, + // Block 0x88, offset 0x2200 + 0x2229: 0x000c, + 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c, + 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c, + 0x2236: 0x000c, + // Block 0x89, offset 0x2240 + 0x2243: 0x000c, + 0x224c: 0x000c, + 0x227c: 0x000c, + // Block 0x8a, offset 0x2280 + 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c, + 0x22b7: 0x000c, 0x22b8: 0x000c, + 0x22be: 0x000c, 0x22bf: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22c1: 0x000c, + 0x22ec: 0x000c, 0x22ed: 0x000c, + 0x22f6: 0x000c, + // Block 0x8c, offset 0x2300 + 0x232a: 0x000a, 0x232b: 0x000a, + // Block 0x8d, offset 0x2340 + 0x2365: 0x000c, 0x2368: 0x000c, + 0x236d: 0x000c, + // Block 0x8e, offset 0x2380 + 0x239d: 0x0001, + 0x239e: 0x000c, 0x239f: 0x0001, 0x23a0: 0x0001, 0x23a1: 0x0001, 0x23a2: 0x0001, 0x23a3: 0x0001, + 0x23a4: 0x0001, 0x23a5: 0x0001, 0x23a6: 0x0001, 0x23a7: 0x0001, 0x23a8: 0x0001, 0x23a9: 0x0003, + 0x23aa: 0x0001, 0x23ab: 0x0001, 0x23ac: 0x0001, 0x23ad: 0x0001, 0x23ae: 0x0001, 0x23af: 0x0001, + 0x23b0: 0x0001, 0x23b1: 0x0001, 0x23b2: 0x0001, 0x23b3: 0x0001, 0x23b4: 0x0001, 0x23b5: 0x0001, + 0x23b6: 0x0001, 0x23b7: 0x0001, 0x23b8: 0x0001, 0x23b9: 0x0001, 0x23ba: 0x0001, 0x23bb: 0x0001, + 0x23bc: 0x0001, 0x23bd: 0x0001, 0x23be: 0x0001, 0x23bf: 0x0001, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x0001, 0x23c1: 0x0001, 0x23c2: 0x0001, 0x23c3: 0x0001, 0x23c4: 0x0001, 0x23c5: 0x0001, + 0x23c6: 0x0001, 0x23c7: 0x0001, 0x23c8: 0x0001, 0x23c9: 0x0001, 0x23ca: 0x0001, 0x23cb: 0x0001, + 0x23cc: 0x0001, 0x23cd: 0x0001, 0x23ce: 0x0001, 0x23cf: 0x0001, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000d, 0x23ff: 0x000d, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000d, 0x2411: 0x000d, + 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, + 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, + 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, + 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, + 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000a, 0x243f: 0x000a, + // Block 0x91, offset 0x2440 + 0x2440: 0x000a, 0x2441: 0x000a, 0x2442: 0x000a, 0x2443: 0x000a, 0x2444: 0x000a, 0x2445: 0x000a, + 0x2446: 0x000a, 0x2447: 0x000a, 0x2448: 0x000a, 0x2449: 0x000a, 0x244a: 0x000a, 0x244b: 0x000a, + 0x244c: 0x000a, 0x244d: 0x000a, 0x244e: 0x000a, 0x244f: 0x000a, 0x2450: 0x000d, 0x2451: 0x000d, + 0x2452: 0x000d, 0x2453: 0x000d, 0x2454: 0x000d, 0x2455: 0x000d, 0x2456: 0x000d, 0x2457: 0x000d, + 0x2458: 0x000d, 0x2459: 0x000d, 0x245a: 0x000d, 0x245b: 0x000d, 0x245c: 0x000d, 0x245d: 0x000d, + 0x245e: 0x000d, 0x245f: 0x000d, 0x2460: 0x000d, 0x2461: 0x000d, 0x2462: 0x000d, 0x2463: 0x000d, + 0x2464: 0x000d, 0x2465: 0x000d, 0x2466: 0x000d, 0x2467: 0x000d, 0x2468: 0x000d, 0x2469: 0x000d, + 0x246a: 0x000d, 0x246b: 0x000d, 0x246c: 0x000d, 0x246d: 0x000d, 0x246e: 0x000d, 0x246f: 0x000d, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000d, 0x247e: 0x000d, 0x247f: 0x000d, + // Block 0x92, offset 0x2480 + 0x2480: 0x000d, 0x2481: 0x000d, 0x2482: 0x000d, 0x2483: 0x000d, 0x2484: 0x000d, 0x2485: 0x000d, + 0x2486: 0x000d, 0x2487: 0x000d, 0x2488: 0x000d, 0x2489: 0x000d, 0x248a: 0x000d, 0x248b: 0x000d, + 0x248c: 0x000d, 0x248d: 0x000d, 0x248e: 0x000d, 0x248f: 0x000a, 0x2490: 0x000b, 0x2491: 0x000b, + 0x2492: 0x000b, 0x2493: 0x000b, 0x2494: 0x000b, 0x2495: 0x000b, 0x2496: 0x000b, 0x2497: 0x000b, + 0x2498: 0x000b, 0x2499: 0x000b, 0x249a: 0x000b, 0x249b: 0x000b, 0x249c: 0x000b, 0x249d: 0x000b, + 0x249e: 0x000b, 0x249f: 0x000b, 0x24a0: 0x000b, 0x24a1: 0x000b, 0x24a2: 0x000b, 0x24a3: 0x000b, + 0x24a4: 0x000b, 0x24a5: 0x000b, 0x24a6: 0x000b, 0x24a7: 0x000b, 0x24a8: 0x000b, 0x24a9: 0x000b, + 0x24aa: 0x000b, 0x24ab: 0x000b, 0x24ac: 0x000b, 0x24ad: 0x000b, 0x24ae: 0x000b, 0x24af: 0x000b, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000a, 0x24be: 0x000a, 0x24bf: 0x000a, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000c, 0x24c1: 0x000c, 0x24c2: 0x000c, 0x24c3: 0x000c, 0x24c4: 0x000c, 0x24c5: 0x000c, + 0x24c6: 0x000c, 0x24c7: 0x000c, 0x24c8: 0x000c, 0x24c9: 0x000c, 0x24ca: 0x000c, 0x24cb: 0x000c, + 0x24cc: 0x000c, 0x24cd: 0x000c, 0x24ce: 0x000c, 0x24cf: 0x000c, 0x24d0: 0x000a, 0x24d1: 0x000a, + 0x24d2: 0x000a, 0x24d3: 0x000a, 0x24d4: 0x000a, 0x24d5: 0x000a, 0x24d6: 0x000a, 0x24d7: 0x000a, + 0x24d8: 0x000a, 0x24d9: 0x000a, + 0x24e0: 0x000c, 0x24e1: 0x000c, 0x24e2: 0x000c, 0x24e3: 0x000c, + 0x24e4: 0x000c, 0x24e5: 0x000c, 0x24e6: 0x000c, 0x24e7: 0x000c, 0x24e8: 0x000c, 0x24e9: 0x000c, + 0x24ea: 0x000c, 0x24eb: 0x000c, 0x24ec: 0x000c, 0x24ed: 0x000c, 0x24ee: 0x000c, 0x24ef: 0x000c, + 0x24f0: 0x000a, 0x24f1: 0x000a, 0x24f2: 0x000a, 0x24f3: 0x000a, 0x24f4: 0x000a, 0x24f5: 0x000a, + 0x24f6: 0x000a, 0x24f7: 0x000a, 0x24f8: 0x000a, 0x24f9: 0x000a, 0x24fa: 0x000a, 0x24fb: 0x000a, + 0x24fc: 0x000a, 0x24fd: 0x000a, 0x24fe: 0x000a, 0x24ff: 0x000a, + // Block 0x94, offset 0x2500 + 0x2500: 0x000a, 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x000a, 0x2504: 0x000a, 0x2505: 0x000a, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x000a, 0x2509: 0x000a, 0x250a: 0x000a, 0x250b: 0x000a, + 0x250c: 0x000a, 0x250d: 0x000a, 0x250e: 0x000a, 0x250f: 0x000a, 0x2510: 0x0006, 0x2511: 0x000a, + 0x2512: 0x0006, 0x2514: 0x000a, 0x2515: 0x0006, 0x2516: 0x000a, 0x2517: 0x000a, + 0x2518: 0x000a, 0x2519: 0x009a, 0x251a: 0x008a, 0x251b: 0x007a, 0x251c: 0x006a, 0x251d: 0x009a, + 0x251e: 0x008a, 0x251f: 0x0004, 0x2520: 0x000a, 0x2521: 0x000a, 0x2522: 0x0003, 0x2523: 0x0003, + 0x2524: 0x000a, 0x2525: 0x000a, 0x2526: 0x000a, 0x2528: 0x000a, 0x2529: 0x0004, + 0x252a: 0x0004, 0x252b: 0x000a, + 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, + 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, + 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000d, + // Block 0x95, offset 0x2540 + 0x2540: 0x000d, 0x2541: 0x000d, 0x2542: 0x000d, 0x2543: 0x000d, 0x2544: 0x000d, 0x2545: 0x000d, + 0x2546: 0x000d, 0x2547: 0x000d, 0x2548: 0x000d, 0x2549: 0x000d, 0x254a: 0x000d, 0x254b: 0x000d, + 0x254c: 0x000d, 0x254d: 0x000d, 0x254e: 0x000d, 0x254f: 0x000d, 0x2550: 0x000d, 0x2551: 0x000d, + 0x2552: 0x000d, 0x2553: 0x000d, 0x2554: 0x000d, 0x2555: 0x000d, 0x2556: 0x000d, 0x2557: 0x000d, + 0x2558: 0x000d, 0x2559: 0x000d, 0x255a: 0x000d, 0x255b: 0x000d, 0x255c: 0x000d, 0x255d: 0x000d, + 0x255e: 0x000d, 0x255f: 0x000d, 0x2560: 0x000d, 0x2561: 0x000d, 0x2562: 0x000d, 0x2563: 0x000d, + 0x2564: 0x000d, 0x2565: 0x000d, 0x2566: 0x000d, 0x2567: 0x000d, 0x2568: 0x000d, 0x2569: 0x000d, + 0x256a: 0x000d, 0x256b: 0x000d, 0x256c: 0x000d, 0x256d: 0x000d, 0x256e: 0x000d, 0x256f: 0x000d, + 0x2570: 0x000d, 0x2571: 0x000d, 0x2572: 0x000d, 0x2573: 0x000d, 0x2574: 0x000d, 0x2575: 0x000d, + 0x2576: 0x000d, 0x2577: 0x000d, 0x2578: 0x000d, 0x2579: 0x000d, 0x257a: 0x000d, 0x257b: 0x000d, + 0x257c: 0x000d, 0x257d: 0x000d, 0x257e: 0x000d, 0x257f: 0x000b, + // Block 0x96, offset 0x2580 + 0x2581: 0x000a, 0x2582: 0x000a, 0x2583: 0x0004, 0x2584: 0x0004, 0x2585: 0x0004, + 0x2586: 0x000a, 0x2587: 0x000a, 0x2588: 0x003a, 0x2589: 0x002a, 0x258a: 0x000a, 0x258b: 0x0003, + 0x258c: 0x0006, 0x258d: 0x0003, 0x258e: 0x0006, 0x258f: 0x0006, 0x2590: 0x0002, 0x2591: 0x0002, + 0x2592: 0x0002, 0x2593: 0x0002, 0x2594: 0x0002, 0x2595: 0x0002, 0x2596: 0x0002, 0x2597: 0x0002, + 0x2598: 0x0002, 0x2599: 0x0002, 0x259a: 0x0006, 0x259b: 0x000a, 0x259c: 0x000a, 0x259d: 0x000a, + 0x259e: 0x000a, 0x259f: 0x000a, 0x25a0: 0x000a, + 0x25bb: 0x005a, + 0x25bc: 0x000a, 0x25bd: 0x004a, 0x25be: 0x000a, 0x25bf: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x000a, + 0x25db: 0x005a, 0x25dc: 0x000a, 0x25dd: 0x004a, + 0x25de: 0x000a, 0x25df: 0x00fa, 0x25e0: 0x00ea, 0x25e1: 0x000a, 0x25e2: 0x003a, 0x25e3: 0x002a, + 0x25e4: 0x000a, 0x25e5: 0x000a, + // Block 0x98, offset 0x2600 + 0x2620: 0x0004, 0x2621: 0x0004, 0x2622: 0x000a, 0x2623: 0x000a, + 0x2624: 0x000a, 0x2625: 0x0004, 0x2626: 0x0004, 0x2628: 0x000a, 0x2629: 0x000a, + 0x262a: 0x000a, 0x262b: 0x000a, 0x262c: 0x000a, 0x262d: 0x000a, 0x262e: 0x000a, + 0x2630: 0x000b, 0x2631: 0x000b, 0x2632: 0x000b, 0x2633: 0x000b, 0x2634: 0x000b, 0x2635: 0x000b, + 0x2636: 0x000b, 0x2637: 0x000b, 0x2638: 0x000b, 0x2639: 0x000a, 0x263a: 0x000a, 0x263b: 0x000a, + 0x263c: 0x000a, 0x263d: 0x000a, 0x263e: 0x000b, 0x263f: 0x000b, + // Block 0x99, offset 0x2640 + 0x2641: 0x000a, + // Block 0x9a, offset 0x2680 + 0x2680: 0x000a, 0x2681: 0x000a, 0x2682: 0x000a, 0x2683: 0x000a, 0x2684: 0x000a, 0x2685: 0x000a, + 0x2686: 0x000a, 0x2687: 0x000a, 0x2688: 0x000a, 0x2689: 0x000a, 0x268a: 0x000a, 0x268b: 0x000a, + 0x268c: 0x000a, 0x2690: 0x000a, 0x2691: 0x000a, + 0x2692: 0x000a, 0x2693: 0x000a, 0x2694: 0x000a, 0x2695: 0x000a, 0x2696: 0x000a, 0x2697: 0x000a, + 0x2698: 0x000a, 0x2699: 0x000a, 0x269a: 0x000a, 0x269b: 0x000a, 0x269c: 0x000a, + 0x26a0: 0x000a, + // Block 0x9b, offset 0x26c0 + 0x26fd: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2720: 0x000c, 0x2721: 0x0002, 0x2722: 0x0002, 0x2723: 0x0002, + 0x2724: 0x0002, 0x2725: 0x0002, 0x2726: 0x0002, 0x2727: 0x0002, 0x2728: 0x0002, 0x2729: 0x0002, + 0x272a: 0x0002, 0x272b: 0x0002, 0x272c: 0x0002, 0x272d: 0x0002, 0x272e: 0x0002, 0x272f: 0x0002, + 0x2730: 0x0002, 0x2731: 0x0002, 0x2732: 0x0002, 0x2733: 0x0002, 0x2734: 0x0002, 0x2735: 0x0002, + 0x2736: 0x0002, 0x2737: 0x0002, 0x2738: 0x0002, 0x2739: 0x0002, 0x273a: 0x0002, 0x273b: 0x0002, + // Block 0x9d, offset 0x2740 + 0x2776: 0x000c, 0x2777: 0x000c, 0x2778: 0x000c, 0x2779: 0x000c, 0x277a: 0x000c, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x000a, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x000c, 0x2802: 0x000c, 0x2803: 0x000c, 0x2804: 0x0001, 0x2805: 0x000c, + 0x2806: 0x000c, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x000c, 0x280d: 0x000c, 0x280e: 0x000c, 0x280f: 0x000c, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x000c, 0x2839: 0x000c, 0x283a: 0x000c, 0x283b: 0x0001, + 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x000c, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, + 0x2864: 0x0001, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, + 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x0001, 0x287a: 0x0001, 0x287b: 0x0001, + 0x287c: 0x0001, 0x287d: 0x0001, 0x287e: 0x0001, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0001, 0x28a1: 0x0001, 0x28a2: 0x0001, 0x28a3: 0x0001, + 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001, + 0x28aa: 0x0001, 0x28ab: 0x0001, 0x28ac: 0x0001, 0x28ad: 0x0001, 0x28ae: 0x0001, 0x28af: 0x0001, + 0x28b0: 0x0001, 0x28b1: 0x0001, 0x28b2: 0x0001, 0x28b3: 0x0001, 0x28b4: 0x0001, 0x28b5: 0x0001, + 0x28b6: 0x0001, 0x28b7: 0x0001, 0x28b8: 0x0001, 0x28b9: 0x000a, 0x28ba: 0x000a, 0x28bb: 0x000a, + 0x28bc: 0x000a, 0x28bd: 0x000a, 0x28be: 0x000a, 0x28bf: 0x000a, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000d, 0x28c1: 0x000d, 0x28c2: 0x000d, 0x28c3: 0x000d, 0x28c4: 0x000d, 0x28c5: 0x000d, + 0x28c6: 0x000d, 0x28c7: 0x000d, 0x28c8: 0x000d, 0x28c9: 0x000d, 0x28ca: 0x000d, 0x28cb: 0x000d, + 0x28cc: 0x000d, 0x28cd: 0x000d, 0x28ce: 0x000d, 0x28cf: 0x000d, 0x28d0: 0x000d, 0x28d1: 0x000d, + 0x28d2: 0x000d, 0x28d3: 0x000d, 0x28d4: 0x000d, 0x28d5: 0x000d, 0x28d6: 0x000d, 0x28d7: 0x000d, + 0x28d8: 0x000d, 0x28d9: 0x000d, 0x28da: 0x000d, 0x28db: 0x000d, 0x28dc: 0x000d, 0x28dd: 0x000d, + 0x28de: 0x000d, 0x28df: 0x000d, 0x28e0: 0x000d, 0x28e1: 0x000d, 0x28e2: 0x000d, 0x28e3: 0x000d, + 0x28e4: 0x000c, 0x28e5: 0x000c, 0x28e6: 0x000c, 0x28e7: 0x000c, 0x28e8: 0x0001, 0x28e9: 0x0001, + 0x28ea: 0x0001, 0x28eb: 0x0001, 0x28ec: 0x0001, 0x28ed: 0x0001, 0x28ee: 0x0001, 0x28ef: 0x0001, + 0x28f0: 0x0005, 0x28f1: 0x0005, 0x28f2: 0x0005, 0x28f3: 0x0005, 0x28f4: 0x0005, 0x28f5: 0x0005, + 0x28f6: 0x0005, 0x28f7: 0x0005, 0x28f8: 0x0005, 0x28f9: 0x0005, 0x28fa: 0x0001, 0x28fb: 0x0001, + 0x28fc: 0x0001, 0x28fd: 0x0001, 0x28fe: 0x0001, 0x28ff: 0x0001, + // Block 0xa4, offset 0x2900 + 0x2900: 0x0001, 0x2901: 0x0001, 0x2902: 0x0001, 0x2903: 0x0001, 0x2904: 0x0001, 0x2905: 0x0001, + 0x2906: 0x0001, 0x2907: 0x0001, 0x2908: 0x0001, 0x2909: 0x0001, 0x290a: 0x0001, 0x290b: 0x0001, + 0x290c: 0x0001, 0x290d: 0x0001, 0x290e: 0x0001, 0x290f: 0x0001, 0x2910: 0x0001, 0x2911: 0x0001, + 0x2912: 0x0001, 0x2913: 0x0001, 0x2914: 0x0001, 0x2915: 0x0001, 0x2916: 0x0001, 0x2917: 0x0001, + 0x2918: 0x0001, 0x2919: 0x0001, 0x291a: 0x0001, 0x291b: 0x0001, 0x291c: 0x0001, 0x291d: 0x0001, + 0x291e: 0x0001, 0x291f: 0x0001, 0x2920: 0x0005, 0x2921: 0x0005, 0x2922: 0x0005, 0x2923: 0x0005, + 0x2924: 0x0005, 0x2925: 0x0005, 0x2926: 0x0005, 0x2927: 0x0005, 0x2928: 0x0005, 0x2929: 0x0005, + 0x292a: 0x0005, 0x292b: 0x0005, 0x292c: 0x0005, 0x292d: 0x0005, 0x292e: 0x0005, 0x292f: 0x0005, + 0x2930: 0x0005, 0x2931: 0x0005, 0x2932: 0x0005, 0x2933: 0x0005, 0x2934: 0x0005, 0x2935: 0x0005, + 0x2936: 0x0005, 0x2937: 0x0005, 0x2938: 0x0005, 0x2939: 0x0005, 0x293a: 0x0005, 0x293b: 0x0005, + 0x293c: 0x0005, 0x293d: 0x0005, 0x293e: 0x0005, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2940: 0x0001, 0x2941: 0x0001, 0x2942: 0x0001, 0x2943: 0x0001, 0x2944: 0x0001, 0x2945: 0x0001, + 0x2946: 0x0001, 0x2947: 0x0001, 0x2948: 0x0001, 0x2949: 0x0001, 0x294a: 0x0001, 0x294b: 0x0001, + 0x294c: 0x0001, 0x294d: 0x0001, 0x294e: 0x0001, 0x294f: 0x0001, 0x2950: 0x0001, 0x2951: 0x0001, + 0x2952: 0x0001, 0x2953: 0x0001, 0x2954: 0x0001, 0x2955: 0x0001, 0x2956: 0x0001, 0x2957: 0x0001, + 0x2958: 0x0001, 0x2959: 0x0001, 0x295a: 0x0001, 0x295b: 0x0001, 0x295c: 0x0001, 0x295d: 0x0001, + 0x295e: 0x0001, 0x295f: 0x0001, 0x2960: 0x0001, 0x2961: 0x0001, 0x2962: 0x0001, 0x2963: 0x0001, + 0x2964: 0x0001, 0x2965: 0x0001, 0x2966: 0x0001, 0x2967: 0x0001, 0x2968: 0x0001, 0x2969: 0x0001, + 0x296a: 0x0001, 0x296b: 0x000c, 0x296c: 0x000c, 0x296d: 0x0001, 0x296e: 0x0001, 0x296f: 0x0001, + 0x2970: 0x0001, 0x2971: 0x0001, 0x2972: 0x0001, 0x2973: 0x0001, 0x2974: 0x0001, 0x2975: 0x0001, + 0x2976: 0x0001, 0x2977: 0x0001, 0x2978: 0x0001, 0x2979: 0x0001, 0x297a: 0x0001, 0x297b: 0x0001, + 0x297c: 0x0001, 0x297d: 0x0001, 0x297e: 0x0001, 0x297f: 0x0001, + // Block 0xa6, offset 0x2980 + 0x2980: 0x0001, 0x2981: 0x0001, 0x2982: 0x0001, 0x2983: 0x0001, 0x2984: 0x0001, 0x2985: 0x0001, + 0x2986: 0x0001, 0x2987: 0x0001, 0x2988: 0x0001, 0x2989: 0x0001, 0x298a: 0x0001, 0x298b: 0x0001, + 0x298c: 0x0001, 0x298d: 0x0001, 0x298e: 0x0001, 0x298f: 0x0001, 0x2990: 0x0001, 0x2991: 0x0001, + 0x2992: 0x0001, 0x2993: 0x0001, 0x2994: 0x0001, 0x2995: 0x0001, 0x2996: 0x0001, 0x2997: 0x0001, + 0x2998: 0x0001, 0x2999: 0x0001, 0x299a: 0x0001, 0x299b: 0x0001, 0x299c: 0x0001, 0x299d: 0x0001, + 0x299e: 0x0001, 0x299f: 0x0001, 0x29a0: 0x0001, 0x29a1: 0x0001, 0x29a2: 0x0001, 0x29a3: 0x0001, + 0x29a4: 0x0001, 0x29a5: 0x0001, 0x29a6: 0x0001, 0x29a7: 0x0001, 0x29a8: 0x0001, 0x29a9: 0x0001, + 0x29aa: 0x0001, 0x29ab: 0x0001, 0x29ac: 0x0001, 0x29ad: 0x0001, 0x29ae: 0x0001, 0x29af: 0x0001, + 0x29b0: 0x0001, 0x29b1: 0x0001, 0x29b2: 0x0001, 0x29b3: 0x0001, 0x29b4: 0x0001, 0x29b5: 0x0001, + 0x29b6: 0x0001, 0x29b7: 0x0001, 0x29b8: 0x0001, 0x29b9: 0x0001, 0x29ba: 0x0001, 0x29bb: 0x0001, + 0x29bc: 0x0001, 0x29bd: 0x000c, 0x29be: 0x000c, 0x29bf: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x0001, 0x29c1: 0x0001, 0x29c2: 0x0001, 0x29c3: 0x0001, 0x29c4: 0x0001, 0x29c5: 0x0001, + 0x29c6: 0x0001, 0x29c7: 0x0001, 0x29c8: 0x0001, 0x29c9: 0x0001, 0x29ca: 0x0001, 0x29cb: 0x0001, + 0x29cc: 0x0001, 0x29cd: 0x0001, 0x29ce: 0x0001, 0x29cf: 0x0001, 0x29d0: 0x0001, 0x29d1: 0x0001, + 0x29d2: 0x0001, 0x29d3: 0x0001, 0x29d4: 0x0001, 0x29d5: 0x0001, 0x29d6: 0x0001, 0x29d7: 0x0001, + 0x29d8: 0x0001, 0x29d9: 0x0001, 0x29da: 0x0001, 0x29db: 0x0001, 0x29dc: 0x0001, 0x29dd: 0x0001, + 0x29de: 0x0001, 0x29df: 0x0001, 0x29e0: 0x0001, 0x29e1: 0x0001, 0x29e2: 0x0001, 0x29e3: 0x0001, + 0x29e4: 0x0001, 0x29e5: 0x0001, 0x29e6: 0x0001, 0x29e7: 0x0001, 0x29e8: 0x0001, 0x29e9: 0x0001, + 0x29ea: 0x0001, 0x29eb: 0x0001, 0x29ec: 0x0001, 0x29ed: 0x0001, 0x29ee: 0x0001, 0x29ef: 0x0001, + 0x29f0: 0x000d, 0x29f1: 0x000d, 0x29f2: 0x000d, 0x29f3: 0x000d, 0x29f4: 0x000d, 0x29f5: 0x000d, + 0x29f6: 0x000d, 0x29f7: 0x000d, 0x29f8: 0x000d, 0x29f9: 0x000d, 0x29fa: 0x000d, 0x29fb: 0x000d, + 0x29fc: 0x000d, 0x29fd: 0x000d, 0x29fe: 0x000d, 0x29ff: 0x000d, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000d, 0x2a01: 0x000d, 0x2a02: 0x000d, 0x2a03: 0x000d, 0x2a04: 0x000d, 0x2a05: 0x000d, + 0x2a06: 0x000c, 0x2a07: 0x000c, 0x2a08: 0x000c, 0x2a09: 0x000c, 0x2a0a: 0x000c, 0x2a0b: 0x000c, + 0x2a0c: 0x000c, 0x2a0d: 0x000c, 0x2a0e: 0x000c, 0x2a0f: 0x000c, 0x2a10: 0x000c, 0x2a11: 0x000d, + 0x2a12: 0x000d, 0x2a13: 0x000d, 0x2a14: 0x000d, 0x2a15: 0x000d, 0x2a16: 0x000d, 0x2a17: 0x000d, + 0x2a18: 0x000d, 0x2a19: 0x000d, 0x2a1a: 0x0001, 0x2a1b: 0x0001, 0x2a1c: 0x0001, 0x2a1d: 0x0001, + 0x2a1e: 0x0001, 0x2a1f: 0x0001, 0x2a20: 0x0001, 0x2a21: 0x0001, 0x2a22: 0x0001, 0x2a23: 0x0001, + 0x2a24: 0x0001, 0x2a25: 0x0001, 0x2a26: 0x0001, 0x2a27: 0x0001, 0x2a28: 0x0001, 0x2a29: 0x0001, + 0x2a2a: 0x0001, 0x2a2b: 0x0001, 0x2a2c: 0x0001, 0x2a2d: 0x0001, 0x2a2e: 0x0001, 0x2a2f: 0x0001, + 0x2a30: 0x0001, 0x2a31: 0x0001, 0x2a32: 0x0001, 0x2a33: 0x0001, 0x2a34: 0x0001, 0x2a35: 0x0001, + 0x2a36: 0x0001, 0x2a37: 0x0001, 0x2a38: 0x0001, 0x2a39: 0x0001, 0x2a3a: 0x0001, 0x2a3b: 0x0001, + 0x2a3c: 0x0001, 0x2a3d: 0x0001, 0x2a3e: 0x0001, 0x2a3f: 0x0001, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x0001, 0x2a41: 0x0001, 0x2a42: 0x000c, 0x2a43: 0x000c, 0x2a44: 0x000c, 0x2a45: 0x000c, + 0x2a46: 0x0001, 0x2a47: 0x0001, 0x2a48: 0x0001, 0x2a49: 0x0001, 0x2a4a: 0x0001, 0x2a4b: 0x0001, + 0x2a4c: 0x0001, 0x2a4d: 0x0001, 0x2a4e: 0x0001, 0x2a4f: 0x0001, 0x2a50: 0x0001, 0x2a51: 0x0001, + 0x2a52: 0x0001, 0x2a53: 0x0001, 0x2a54: 0x0001, 0x2a55: 0x0001, 0x2a56: 0x0001, 0x2a57: 0x0001, + 0x2a58: 0x0001, 0x2a59: 0x0001, 0x2a5a: 0x0001, 0x2a5b: 0x0001, 0x2a5c: 0x0001, 0x2a5d: 0x0001, + 0x2a5e: 0x0001, 0x2a5f: 0x0001, 0x2a60: 0x0001, 0x2a61: 0x0001, 0x2a62: 0x0001, 0x2a63: 0x0001, + 0x2a64: 0x0001, 0x2a65: 0x0001, 0x2a66: 0x0001, 0x2a67: 0x0001, 0x2a68: 0x0001, 0x2a69: 0x0001, + 0x2a6a: 0x0001, 0x2a6b: 0x0001, 0x2a6c: 0x0001, 0x2a6d: 0x0001, 0x2a6e: 0x0001, 0x2a6f: 0x0001, + 0x2a70: 0x0001, 0x2a71: 0x0001, 0x2a72: 0x0001, 0x2a73: 0x0001, 0x2a74: 0x0001, 0x2a75: 0x0001, + 0x2a76: 0x0001, 0x2a77: 0x0001, 0x2a78: 0x0001, 0x2a79: 0x0001, 0x2a7a: 0x0001, 0x2a7b: 0x0001, + 0x2a7c: 0x0001, 0x2a7d: 0x0001, 0x2a7e: 0x0001, 0x2a7f: 0x0001, + // Block 0xaa, offset 0x2a80 + 0x2a81: 0x000c, + 0x2ab8: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, 0x2abb: 0x000c, + 0x2abc: 0x000c, 0x2abd: 0x000c, 0x2abe: 0x000c, 0x2abf: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x000c, 0x2ac1: 0x000c, 0x2ac2: 0x000c, 0x2ac3: 0x000c, 0x2ac4: 0x000c, 0x2ac5: 0x000c, + 0x2ac6: 0x000c, + 0x2ad2: 0x000a, 0x2ad3: 0x000a, 0x2ad4: 0x000a, 0x2ad5: 0x000a, 0x2ad6: 0x000a, 0x2ad7: 0x000a, + 0x2ad8: 0x000a, 0x2ad9: 0x000a, 0x2ada: 0x000a, 0x2adb: 0x000a, 0x2adc: 0x000a, 0x2add: 0x000a, + 0x2ade: 0x000a, 0x2adf: 0x000a, 0x2ae0: 0x000a, 0x2ae1: 0x000a, 0x2ae2: 0x000a, 0x2ae3: 0x000a, + 0x2ae4: 0x000a, 0x2ae5: 0x000a, + 0x2af0: 0x000c, 0x2af3: 0x000c, 0x2af4: 0x000c, + 0x2aff: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x000c, 0x2b01: 0x000c, + 0x2b33: 0x000c, 0x2b34: 0x000c, 0x2b35: 0x000c, + 0x2b36: 0x000c, 0x2b39: 0x000c, 0x2b3a: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, 0x2b41: 0x000c, 0x2b42: 0x000c, + 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6d: 0x000c, 0x2b6e: 0x000c, 0x2b6f: 0x000c, + 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2bb3: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bc0: 0x000c, 0x2bc1: 0x000c, + 0x2bf6: 0x000c, 0x2bf7: 0x000c, 0x2bf8: 0x000c, 0x2bf9: 0x000c, 0x2bfa: 0x000c, 0x2bfb: 0x000c, + 0x2bfc: 0x000c, 0x2bfd: 0x000c, 0x2bfe: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c09: 0x000c, 0x2c0a: 0x000c, 0x2c0b: 0x000c, + 0x2c0c: 0x000c, 0x2c0f: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c6f: 0x000c, + 0x2c70: 0x000c, 0x2c71: 0x000c, 0x2c74: 0x000c, + 0x2c76: 0x000c, 0x2c77: 0x000c, + 0x2c7e: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2c9f: 0x000c, 0x2ca3: 0x000c, + 0x2ca4: 0x000c, 0x2ca5: 0x000c, 0x2ca6: 0x000c, 0x2ca7: 0x000c, 0x2ca8: 0x000c, 0x2ca9: 0x000c, + 0x2caa: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x000c, + 0x2ce6: 0x000c, 0x2ce7: 0x000c, 0x2ce8: 0x000c, 0x2ce9: 0x000c, + 0x2cea: 0x000c, 0x2ceb: 0x000c, 0x2cec: 0x000c, + 0x2cf0: 0x000c, 0x2cf1: 0x000c, 0x2cf2: 0x000c, 0x2cf3: 0x000c, 0x2cf4: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, 0x2d3b: 0x000c, + 0x2d3c: 0x000c, 0x2d3d: 0x000c, 0x2d3e: 0x000c, 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d42: 0x000c, 0x2d43: 0x000c, 0x2d44: 0x000c, + 0x2d46: 0x000c, + 0x2d5e: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2db6: 0x000c, 0x2db7: 0x000c, 0x2db8: 0x000c, 0x2dba: 0x000c, + 0x2dbf: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2dc0: 0x000c, 0x2dc2: 0x000c, 0x2dc3: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e3c: 0x000c, 0x2e3d: 0x000c, 0x2e3f: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e40: 0x000c, + 0x2e5c: 0x000c, 0x2e5d: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2eb9: 0x000c, 0x2eba: 0x000c, + 0x2ebd: 0x000c, 0x2ebf: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ec0: 0x000c, + 0x2ee0: 0x000a, 0x2ee1: 0x000a, 0x2ee2: 0x000a, 0x2ee3: 0x000a, + 0x2ee4: 0x000a, 0x2ee5: 0x000a, 0x2ee6: 0x000a, 0x2ee7: 0x000a, 0x2ee8: 0x000a, 0x2ee9: 0x000a, + 0x2eea: 0x000a, 0x2eeb: 0x000a, 0x2eec: 0x000a, + // Block 0xbc, offset 0x2f00 + 0x2f2b: 0x000c, 0x2f2d: 0x000c, + 0x2f30: 0x000c, 0x2f31: 0x000c, 0x2f32: 0x000c, 0x2f33: 0x000c, 0x2f34: 0x000c, 0x2f35: 0x000c, + 0x2f37: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f5d: 0x000c, + 0x2f5e: 0x000c, 0x2f5f: 0x000c, 0x2f62: 0x000c, 0x2f63: 0x000c, + 0x2f64: 0x000c, 0x2f65: 0x000c, 0x2f67: 0x000c, 0x2f68: 0x000c, 0x2f69: 0x000c, + 0x2f6a: 0x000c, 0x2f6b: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2faf: 0x000c, + 0x2fb0: 0x000c, 0x2fb1: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb4: 0x000c, 0x2fb5: 0x000c, + 0x2fb6: 0x000c, 0x2fb7: 0x000c, 0x2fb9: 0x000c, 0x2fba: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ffb: 0x000c, + 0x2ffc: 0x000c, 0x2ffe: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3003: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3054: 0x000c, 0x3055: 0x000c, 0x3056: 0x000c, 0x3057: 0x000c, + 0x305a: 0x000c, 0x305b: 0x000c, + 0x3060: 0x000c, + // Block 0xc2, offset 0x3080 + 0x3081: 0x000c, 0x3082: 0x000c, 0x3083: 0x000c, 0x3084: 0x000c, 0x3085: 0x000c, + 0x3086: 0x000c, 0x3089: 0x000c, 0x308a: 0x000c, + 0x30b3: 0x000c, 0x30b4: 0x000c, 0x30b5: 0x000c, + 0x30b6: 0x000c, 0x30b7: 0x000c, 0x30b8: 0x000c, 0x30bb: 0x000c, + 0x30bc: 0x000c, 0x30bd: 0x000c, 0x30be: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30c7: 0x000c, + 0x30d1: 0x000c, + 0x30d2: 0x000c, 0x30d3: 0x000c, 0x30d4: 0x000c, 0x30d5: 0x000c, 0x30d6: 0x000c, + 0x30d9: 0x000c, 0x30da: 0x000c, 0x30db: 0x000c, + // Block 0xc4, offset 0x3100 + 0x310a: 0x000c, 0x310b: 0x000c, + 0x310c: 0x000c, 0x310d: 0x000c, 0x310e: 0x000c, 0x310f: 0x000c, 0x3110: 0x000c, 0x3111: 0x000c, + 0x3112: 0x000c, 0x3113: 0x000c, 0x3114: 0x000c, 0x3115: 0x000c, 0x3116: 0x000c, + 0x3118: 0x000c, 0x3119: 0x000c, + // Block 0xc5, offset 0x3140 + 0x3170: 0x000c, 0x3171: 0x000c, 0x3172: 0x000c, 0x3173: 0x000c, 0x3174: 0x000c, 0x3175: 0x000c, + 0x3176: 0x000c, 0x3178: 0x000c, 0x3179: 0x000c, 0x317a: 0x000c, 0x317b: 0x000c, + 0x317c: 0x000c, 0x317d: 0x000c, + // Block 0xc6, offset 0x3180 + 0x3192: 0x000c, 0x3193: 0x000c, 0x3194: 0x000c, 0x3195: 0x000c, 0x3196: 0x000c, 0x3197: 0x000c, + 0x3198: 0x000c, 0x3199: 0x000c, 0x319a: 0x000c, 0x319b: 0x000c, 0x319c: 0x000c, 0x319d: 0x000c, + 0x319e: 0x000c, 0x319f: 0x000c, 0x31a0: 0x000c, 0x31a1: 0x000c, 0x31a2: 0x000c, 0x31a3: 0x000c, + 0x31a4: 0x000c, 0x31a5: 0x000c, 0x31a6: 0x000c, 0x31a7: 0x000c, + 0x31aa: 0x000c, 0x31ab: 0x000c, 0x31ac: 0x000c, 0x31ad: 0x000c, 0x31ae: 0x000c, 0x31af: 0x000c, + 0x31b0: 0x000c, 0x31b2: 0x000c, 0x31b3: 0x000c, 0x31b5: 0x000c, + 0x31b6: 0x000c, + // Block 0xc7, offset 0x31c0 + 0x31f1: 0x000c, 0x31f2: 0x000c, 0x31f3: 0x000c, 0x31f4: 0x000c, 0x31f5: 0x000c, + 0x31f6: 0x000c, 0x31fa: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3203: 0x000c, 0x3204: 0x000c, 0x3205: 0x000c, + 0x3207: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3250: 0x000c, 0x3251: 0x000c, + 0x3255: 0x000c, 0x3257: 0x000c, + // Block 0xca, offset 0x3280 + 0x32b3: 0x000c, 0x32b4: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x000c, 0x32c1: 0x000c, + 0x32f6: 0x000c, 0x32f7: 0x000c, 0x32f8: 0x000c, 0x32f9: 0x000c, 0x32fa: 0x000c, + // Block 0xcc, offset 0x3300 + 0x3300: 0x000c, 0x3302: 0x000c, + // Block 0xcd, offset 0x3340 + 0x3355: 0x000a, 0x3356: 0x000a, 0x3357: 0x000a, + 0x3358: 0x000a, 0x3359: 0x000a, 0x335a: 0x000a, 0x335b: 0x000a, 0x335c: 0x000a, 0x335d: 0x0004, + 0x335e: 0x0004, 0x335f: 0x0004, 0x3360: 0x0004, 0x3361: 0x000a, 0x3362: 0x000a, 0x3363: 0x000a, + 0x3364: 0x000a, 0x3365: 0x000a, 0x3366: 0x000a, 0x3367: 0x000a, 0x3368: 0x000a, 0x3369: 0x000a, + 0x336a: 0x000a, 0x336b: 0x000a, 0x336c: 0x000a, 0x336d: 0x000a, 0x336e: 0x000a, 0x336f: 0x000a, + 0x3370: 0x000a, 0x3371: 0x000a, + // Block 0xce, offset 0x3380 + 0x3380: 0x000c, + 0x3387: 0x000c, 0x3388: 0x000c, 0x3389: 0x000c, 0x338a: 0x000c, 0x338b: 0x000c, + 0x338c: 0x000c, 0x338d: 0x000c, 0x338e: 0x000c, 0x338f: 0x000c, 0x3390: 0x000c, 0x3391: 0x000c, + 0x3392: 0x000c, 0x3393: 0x000c, 0x3394: 0x000c, 0x3395: 0x000c, + // Block 0xcf, offset 0x33c0 + 0x33f0: 0x000c, 0x33f1: 0x000c, 0x33f2: 0x000c, 0x33f3: 0x000c, 0x33f4: 0x000c, + // Block 0xd0, offset 0x3400 + 0x3430: 0x000c, 0x3431: 0x000c, 0x3432: 0x000c, 0x3433: 0x000c, 0x3434: 0x000c, 0x3435: 0x000c, + 0x3436: 0x000c, + // Block 0xd1, offset 0x3440 + 0x344f: 0x000c, + // Block 0xd2, offset 0x3480 + 0x348f: 0x000c, 0x3490: 0x000c, 0x3491: 0x000c, + 0x3492: 0x000c, + // Block 0xd3, offset 0x34c0 + 0x34e2: 0x000a, + 0x34e4: 0x000c, + // Block 0xd4, offset 0x3500 + 0x351d: 0x000c, + 0x351e: 0x000c, 0x3520: 0x000b, 0x3521: 0x000b, 0x3522: 0x000b, 0x3523: 0x000b, + // Block 0xd5, offset 0x3540 + 0x3540: 0x000c, 0x3541: 0x000c, 0x3542: 0x000c, 0x3543: 0x000c, 0x3544: 0x000c, 0x3545: 0x000c, + 0x3546: 0x000c, 0x3547: 0x000c, 0x3548: 0x000c, 0x3549: 0x000c, 0x354a: 0x000c, 0x354b: 0x000c, + 0x354c: 0x000c, 0x354d: 0x000c, 0x354e: 0x000c, 0x354f: 0x000c, 0x3550: 0x000c, 0x3551: 0x000c, + 0x3552: 0x000c, 0x3553: 0x000c, 0x3554: 0x000c, 0x3555: 0x000c, 0x3556: 0x000c, 0x3557: 0x000c, + 0x3558: 0x000c, 0x3559: 0x000c, 0x355a: 0x000c, 0x355b: 0x000c, 0x355c: 0x000c, 0x355d: 0x000c, + 0x355e: 0x000c, 0x355f: 0x000c, 0x3560: 0x000c, 0x3561: 0x000c, 0x3562: 0x000c, 0x3563: 0x000c, + 0x3564: 0x000c, 0x3565: 0x000c, 0x3566: 0x000c, 0x3567: 0x000c, 0x3568: 0x000c, 0x3569: 0x000c, + 0x356a: 0x000c, 0x356b: 0x000c, 0x356c: 0x000c, 0x356d: 0x000c, + 0x3570: 0x000c, 0x3571: 0x000c, 0x3572: 0x000c, 0x3573: 0x000c, 0x3574: 0x000c, 0x3575: 0x000c, + 0x3576: 0x000c, 0x3577: 0x000c, 0x3578: 0x000c, 0x3579: 0x000c, 0x357a: 0x000c, 0x357b: 0x000c, + 0x357c: 0x000c, 0x357d: 0x000c, 0x357e: 0x000c, 0x357f: 0x000c, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000c, 0x3581: 0x000c, 0x3582: 0x000c, 0x3583: 0x000c, 0x3584: 0x000c, 0x3585: 0x000c, + 0x3586: 0x000c, + // Block 0xd7, offset 0x35c0 + 0x35e7: 0x000c, 0x35e8: 0x000c, 0x35e9: 0x000c, + 0x35f3: 0x000b, 0x35f4: 0x000b, 0x35f5: 0x000b, + 0x35f6: 0x000b, 0x35f7: 0x000b, 0x35f8: 0x000b, 0x35f9: 0x000b, 0x35fa: 0x000b, 0x35fb: 0x000c, + 0x35fc: 0x000c, 0x35fd: 0x000c, 0x35fe: 0x000c, 0x35ff: 0x000c, + // Block 0xd8, offset 0x3600 + 0x3600: 0x000c, 0x3601: 0x000c, 0x3602: 0x000c, 0x3605: 0x000c, + 0x3606: 0x000c, 0x3607: 0x000c, 0x3608: 0x000c, 0x3609: 0x000c, 0x360a: 0x000c, 0x360b: 0x000c, + 0x362a: 0x000c, 0x362b: 0x000c, 0x362c: 0x000c, 0x362d: 0x000c, + // Block 0xd9, offset 0x3640 + 0x3669: 0x000a, + 0x366a: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000c, 0x3683: 0x000c, 0x3684: 0x000c, 0x3685: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x000a, 0x36c1: 0x000a, 0x36c2: 0x000a, 0x36c3: 0x000a, 0x36c4: 0x000a, 0x36c5: 0x000a, + 0x36c6: 0x000a, 0x36c7: 0x000a, 0x36c8: 0x000a, 0x36c9: 0x000a, 0x36ca: 0x000a, 0x36cb: 0x000a, + 0x36cc: 0x000a, 0x36cd: 0x000a, 0x36ce: 0x000a, 0x36cf: 0x000a, 0x36d0: 0x000a, 0x36d1: 0x000a, + 0x36d2: 0x000a, 0x36d3: 0x000a, 0x36d4: 0x000a, 0x36d5: 0x000a, 0x36d6: 0x000a, + // Block 0xdc, offset 0x3700 + 0x371b: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3755: 0x000a, + // Block 0xde, offset 0x3780 + 0x378f: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c9: 0x000a, + // Block 0xe0, offset 0x3800 + 0x3803: 0x000a, + 0x380e: 0x0002, 0x380f: 0x0002, 0x3810: 0x0002, 0x3811: 0x0002, + 0x3812: 0x0002, 0x3813: 0x0002, 0x3814: 0x0002, 0x3815: 0x0002, 0x3816: 0x0002, 0x3817: 0x0002, + 0x3818: 0x0002, 0x3819: 0x0002, 0x381a: 0x0002, 0x381b: 0x0002, 0x381c: 0x0002, 0x381d: 0x0002, + 0x381e: 0x0002, 0x381f: 0x0002, 0x3820: 0x0002, 0x3821: 0x0002, 0x3822: 0x0002, 0x3823: 0x0002, + 0x3824: 0x0002, 0x3825: 0x0002, 0x3826: 0x0002, 0x3827: 0x0002, 0x3828: 0x0002, 0x3829: 0x0002, + 0x382a: 0x0002, 0x382b: 0x0002, 0x382c: 0x0002, 0x382d: 0x0002, 0x382e: 0x0002, 0x382f: 0x0002, + 0x3830: 0x0002, 0x3831: 0x0002, 0x3832: 0x0002, 0x3833: 0x0002, 0x3834: 0x0002, 0x3835: 0x0002, + 0x3836: 0x0002, 0x3837: 0x0002, 0x3838: 0x0002, 0x3839: 0x0002, 0x383a: 0x0002, 0x383b: 0x0002, + 0x383c: 0x0002, 0x383d: 0x0002, 0x383e: 0x0002, 0x383f: 0x0002, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000c, 0x3841: 0x000c, 0x3842: 0x000c, 0x3843: 0x000c, 0x3844: 0x000c, 0x3845: 0x000c, + 0x3846: 0x000c, 0x3847: 0x000c, 0x3848: 0x000c, 0x3849: 0x000c, 0x384a: 0x000c, 0x384b: 0x000c, + 0x384c: 0x000c, 0x384d: 0x000c, 0x384e: 0x000c, 0x384f: 0x000c, 0x3850: 0x000c, 0x3851: 0x000c, + 0x3852: 0x000c, 0x3853: 0x000c, 0x3854: 0x000c, 0x3855: 0x000c, 0x3856: 0x000c, 0x3857: 0x000c, + 0x3858: 0x000c, 0x3859: 0x000c, 0x385a: 0x000c, 0x385b: 0x000c, 0x385c: 0x000c, 0x385d: 0x000c, + 0x385e: 0x000c, 0x385f: 0x000c, 0x3860: 0x000c, 0x3861: 0x000c, 0x3862: 0x000c, 0x3863: 0x000c, + 0x3864: 0x000c, 0x3865: 0x000c, 0x3866: 0x000c, 0x3867: 0x000c, 0x3868: 0x000c, 0x3869: 0x000c, + 0x386a: 0x000c, 0x386b: 0x000c, 0x386c: 0x000c, 0x386d: 0x000c, 0x386e: 0x000c, 0x386f: 0x000c, + 0x3870: 0x000c, 0x3871: 0x000c, 0x3872: 0x000c, 0x3873: 0x000c, 0x3874: 0x000c, 0x3875: 0x000c, + 0x3876: 0x000c, 0x387b: 0x000c, + 0x387c: 0x000c, 0x387d: 0x000c, 0x387e: 0x000c, 0x387f: 0x000c, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000c, 0x3881: 0x000c, 0x3882: 0x000c, 0x3883: 0x000c, 0x3884: 0x000c, 0x3885: 0x000c, + 0x3886: 0x000c, 0x3887: 0x000c, 0x3888: 0x000c, 0x3889: 0x000c, 0x388a: 0x000c, 0x388b: 0x000c, + 0x388c: 0x000c, 0x388d: 0x000c, 0x388e: 0x000c, 0x388f: 0x000c, 0x3890: 0x000c, 0x3891: 0x000c, + 0x3892: 0x000c, 0x3893: 0x000c, 0x3894: 0x000c, 0x3895: 0x000c, 0x3896: 0x000c, 0x3897: 0x000c, + 0x3898: 0x000c, 0x3899: 0x000c, 0x389a: 0x000c, 0x389b: 0x000c, 0x389c: 0x000c, 0x389d: 0x000c, + 0x389e: 0x000c, 0x389f: 0x000c, 0x38a0: 0x000c, 0x38a1: 0x000c, 0x38a2: 0x000c, 0x38a3: 0x000c, + 0x38a4: 0x000c, 0x38a5: 0x000c, 0x38a6: 0x000c, 0x38a7: 0x000c, 0x38a8: 0x000c, 0x38a9: 0x000c, + 0x38aa: 0x000c, 0x38ab: 0x000c, 0x38ac: 0x000c, + 0x38b5: 0x000c, + // Block 0xe3, offset 0x38c0 + 0x38c4: 0x000c, + 0x38db: 0x000c, 0x38dc: 0x000c, 0x38dd: 0x000c, + 0x38de: 0x000c, 0x38df: 0x000c, 0x38e1: 0x000c, 0x38e2: 0x000c, 0x38e3: 0x000c, + 0x38e4: 0x000c, 0x38e5: 0x000c, 0x38e6: 0x000c, 0x38e7: 0x000c, 0x38e8: 0x000c, 0x38e9: 0x000c, + 0x38ea: 0x000c, 0x38eb: 0x000c, 0x38ec: 0x000c, 0x38ed: 0x000c, 0x38ee: 0x000c, 0x38ef: 0x000c, + // Block 0xe4, offset 0x3900 + 0x3900: 0x000c, 0x3901: 0x000c, 0x3902: 0x000c, 0x3903: 0x000c, 0x3904: 0x000c, 0x3905: 0x000c, + 0x3906: 0x000c, 0x3908: 0x000c, 0x3909: 0x000c, 0x390a: 0x000c, 0x390b: 0x000c, + 0x390c: 0x000c, 0x390d: 0x000c, 0x390e: 0x000c, 0x390f: 0x000c, 0x3910: 0x000c, 0x3911: 0x000c, + 0x3912: 0x000c, 0x3913: 0x000c, 0x3914: 0x000c, 0x3915: 0x000c, 0x3916: 0x000c, 0x3917: 0x000c, + 0x3918: 0x000c, 0x391b: 0x000c, 0x391c: 0x000c, 0x391d: 0x000c, + 0x391e: 0x000c, 0x391f: 0x000c, 0x3920: 0x000c, 0x3921: 0x000c, 0x3923: 0x000c, + 0x3924: 0x000c, 0x3926: 0x000c, 0x3927: 0x000c, 0x3928: 0x000c, 0x3929: 0x000c, + 0x392a: 0x000c, + // Block 0xe5, offset 0x3940 + 0x396e: 0x000c, + // Block 0xe6, offset 0x3980 + 0x39ac: 0x000c, 0x39ad: 0x000c, 0x39ae: 0x000c, 0x39af: 0x000c, + 0x39bf: 0x0004, + // Block 0xe7, offset 0x39c0 + 0x39ec: 0x000c, 0x39ed: 0x000c, 0x39ee: 0x000c, 0x39ef: 0x000c, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x0001, 0x3a01: 0x0001, 0x3a02: 0x0001, 0x3a03: 0x0001, 0x3a04: 0x0001, 0x3a05: 0x0001, + 0x3a06: 0x0001, 0x3a07: 0x0001, 0x3a08: 0x0001, 0x3a09: 0x0001, 0x3a0a: 0x0001, 0x3a0b: 0x0001, + 0x3a0c: 0x0001, 0x3a0d: 0x0001, 0x3a0e: 0x0001, 0x3a0f: 0x0001, 0x3a10: 0x000c, 0x3a11: 0x000c, + 0x3a12: 0x000c, 0x3a13: 0x000c, 0x3a14: 0x000c, 0x3a15: 0x000c, 0x3a16: 0x000c, 0x3a17: 0x0001, + 0x3a18: 0x0001, 0x3a19: 0x0001, 0x3a1a: 0x0001, 0x3a1b: 0x0001, 0x3a1c: 0x0001, 0x3a1d: 0x0001, + 0x3a1e: 0x0001, 0x3a1f: 0x0001, 0x3a20: 0x0001, 0x3a21: 0x0001, 0x3a22: 0x0001, 0x3a23: 0x0001, + 0x3a24: 0x0001, 0x3a25: 0x0001, 0x3a26: 0x0001, 0x3a27: 0x0001, 0x3a28: 0x0001, 0x3a29: 0x0001, + 0x3a2a: 0x0001, 0x3a2b: 0x0001, 0x3a2c: 0x0001, 0x3a2d: 0x0001, 0x3a2e: 0x0001, 0x3a2f: 0x0001, + 0x3a30: 0x0001, 0x3a31: 0x0001, 0x3a32: 0x0001, 0x3a33: 0x0001, 0x3a34: 0x0001, 0x3a35: 0x0001, + 0x3a36: 0x0001, 0x3a37: 0x0001, 0x3a38: 0x0001, 0x3a39: 0x0001, 0x3a3a: 0x0001, 0x3a3b: 0x0001, + 0x3a3c: 0x0001, 0x3a3d: 0x0001, 0x3a3e: 0x0001, 0x3a3f: 0x0001, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x0001, 0x3a41: 0x0001, 0x3a42: 0x0001, 0x3a43: 0x0001, 0x3a44: 0x000c, 0x3a45: 0x000c, + 0x3a46: 0x000c, 0x3a47: 0x000c, 0x3a48: 0x000c, 0x3a49: 0x000c, 0x3a4a: 0x000c, 0x3a4b: 0x0001, + 0x3a4c: 0x0001, 0x3a4d: 0x0001, 0x3a4e: 0x0001, 0x3a4f: 0x0001, 0x3a50: 0x0001, 0x3a51: 0x0001, + 0x3a52: 0x0001, 0x3a53: 0x0001, 0x3a54: 0x0001, 0x3a55: 0x0001, 0x3a56: 0x0001, 0x3a57: 0x0001, + 0x3a58: 0x0001, 0x3a59: 0x0001, 0x3a5a: 0x0001, 0x3a5b: 0x0001, 0x3a5c: 0x0001, 0x3a5d: 0x0001, + 0x3a5e: 0x0001, 0x3a5f: 0x0001, 0x3a60: 0x0001, 0x3a61: 0x0001, 0x3a62: 0x0001, 0x3a63: 0x0001, + 0x3a64: 0x0001, 0x3a65: 0x0001, 0x3a66: 0x0001, 0x3a67: 0x0001, 0x3a68: 0x0001, 0x3a69: 0x0001, + 0x3a6a: 0x0001, 0x3a6b: 0x0001, 0x3a6c: 0x0001, 0x3a6d: 0x0001, 0x3a6e: 0x0001, 0x3a6f: 0x0001, + 0x3a70: 0x0001, 0x3a71: 0x0001, 0x3a72: 0x0001, 0x3a73: 0x0001, 0x3a74: 0x0001, 0x3a75: 0x0001, + 0x3a76: 0x0001, 0x3a77: 0x0001, 0x3a78: 0x0001, 0x3a79: 0x0001, 0x3a7a: 0x0001, 0x3a7b: 0x0001, + 0x3a7c: 0x0001, 0x3a7d: 0x0001, 0x3a7e: 0x0001, 0x3a7f: 0x0001, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x0001, 0x3a81: 0x0001, 0x3a82: 0x0001, 0x3a83: 0x0001, 0x3a84: 0x0001, 0x3a85: 0x0001, + 0x3a86: 0x0001, 0x3a87: 0x0001, 0x3a88: 0x0001, 0x3a89: 0x0001, 0x3a8a: 0x0001, 0x3a8b: 0x0001, + 0x3a8c: 0x0001, 0x3a8d: 0x0001, 0x3a8e: 0x0001, 0x3a8f: 0x0001, 0x3a90: 0x0001, 0x3a91: 0x0001, + 0x3a92: 0x0001, 0x3a93: 0x0001, 0x3a94: 0x0001, 0x3a95: 0x0001, 0x3a96: 0x0001, 0x3a97: 0x0001, + 0x3a98: 0x0001, 0x3a99: 0x0001, 0x3a9a: 0x0001, 0x3a9b: 0x0001, 0x3a9c: 0x0001, 0x3a9d: 0x0001, + 0x3a9e: 0x0001, 0x3a9f: 0x0001, 0x3aa0: 0x0001, 0x3aa1: 0x0001, 0x3aa2: 0x0001, 0x3aa3: 0x0001, + 0x3aa4: 0x0001, 0x3aa5: 0x0001, 0x3aa6: 0x0001, 0x3aa7: 0x0001, 0x3aa8: 0x0001, 0x3aa9: 0x0001, + 0x3aaa: 0x0001, 0x3aab: 0x0001, 0x3aac: 0x0001, 0x3aad: 0x0001, 0x3aae: 0x0001, 0x3aaf: 0x0001, + 0x3ab0: 0x0001, 0x3ab1: 0x000d, 0x3ab2: 0x000d, 0x3ab3: 0x000d, 0x3ab4: 0x000d, 0x3ab5: 0x000d, + 0x3ab6: 0x000d, 0x3ab7: 0x000d, 0x3ab8: 0x000d, 0x3ab9: 0x000d, 0x3aba: 0x000d, 0x3abb: 0x000d, + 0x3abc: 0x000d, 0x3abd: 0x000d, 0x3abe: 0x000d, 0x3abf: 0x000d, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x000d, 0x3ac1: 0x000d, 0x3ac2: 0x000d, 0x3ac3: 0x000d, 0x3ac4: 0x000d, 0x3ac5: 0x000d, + 0x3ac6: 0x000d, 0x3ac7: 0x000d, 0x3ac8: 0x000d, 0x3ac9: 0x000d, 0x3aca: 0x000d, 0x3acb: 0x000d, + 0x3acc: 0x000d, 0x3acd: 0x000d, 0x3ace: 0x000d, 0x3acf: 0x000d, 0x3ad0: 0x000d, 0x3ad1: 0x000d, + 0x3ad2: 0x000d, 0x3ad3: 0x000d, 0x3ad4: 0x000d, 0x3ad5: 0x000d, 0x3ad6: 0x000d, 0x3ad7: 0x000d, + 0x3ad8: 0x000d, 0x3ad9: 0x000d, 0x3ada: 0x000d, 0x3adb: 0x000d, 0x3adc: 0x000d, 0x3add: 0x000d, + 0x3ade: 0x000d, 0x3adf: 0x000d, 0x3ae0: 0x000d, 0x3ae1: 0x000d, 0x3ae2: 0x000d, 0x3ae3: 0x000d, + 0x3ae4: 0x000d, 0x3ae5: 0x000d, 0x3ae6: 0x000d, 0x3ae7: 0x000d, 0x3ae8: 0x000d, 0x3ae9: 0x000d, + 0x3aea: 0x000d, 0x3aeb: 0x000d, 0x3aec: 0x000d, 0x3aed: 0x000d, 0x3aee: 0x000d, 0x3aef: 0x000d, + 0x3af0: 0x000d, 0x3af1: 0x000d, 0x3af2: 0x000d, 0x3af3: 0x000d, 0x3af4: 0x000d, 0x3af5: 0x0001, + 0x3af6: 0x0001, 0x3af7: 0x0001, 0x3af8: 0x0001, 0x3af9: 0x0001, 0x3afa: 0x0001, 0x3afb: 0x0001, + 0x3afc: 0x0001, 0x3afd: 0x0001, 0x3afe: 0x0001, 0x3aff: 0x0001, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x0001, 0x3b01: 0x000d, 0x3b02: 0x000d, 0x3b03: 0x000d, 0x3b04: 0x000d, 0x3b05: 0x000d, + 0x3b06: 0x000d, 0x3b07: 0x000d, 0x3b08: 0x000d, 0x3b09: 0x000d, 0x3b0a: 0x000d, 0x3b0b: 0x000d, + 0x3b0c: 0x000d, 0x3b0d: 0x000d, 0x3b0e: 0x000d, 0x3b0f: 0x000d, 0x3b10: 0x000d, 0x3b11: 0x000d, + 0x3b12: 0x000d, 0x3b13: 0x000d, 0x3b14: 0x000d, 0x3b15: 0x000d, 0x3b16: 0x000d, 0x3b17: 0x000d, + 0x3b18: 0x000d, 0x3b19: 0x000d, 0x3b1a: 0x000d, 0x3b1b: 0x000d, 0x3b1c: 0x000d, 0x3b1d: 0x000d, + 0x3b1e: 0x000d, 0x3b1f: 0x000d, 0x3b20: 0x000d, 0x3b21: 0x000d, 0x3b22: 0x000d, 0x3b23: 0x000d, + 0x3b24: 0x000d, 0x3b25: 0x000d, 0x3b26: 0x000d, 0x3b27: 0x000d, 0x3b28: 0x000d, 0x3b29: 0x000d, + 0x3b2a: 0x000d, 0x3b2b: 0x000d, 0x3b2c: 0x000d, 0x3b2d: 0x000d, 0x3b2e: 0x000d, 0x3b2f: 0x000d, + 0x3b30: 0x000d, 0x3b31: 0x000d, 0x3b32: 0x000d, 0x3b33: 0x000d, 0x3b34: 0x000d, 0x3b35: 0x000d, + 0x3b36: 0x000d, 0x3b37: 0x000d, 0x3b38: 0x000d, 0x3b39: 0x000d, 0x3b3a: 0x000d, 0x3b3b: 0x000d, + 0x3b3c: 0x000d, 0x3b3d: 0x000d, 0x3b3e: 0x0001, 0x3b3f: 0x0001, + // Block 0xed, offset 0x3b40 + 0x3b40: 0x000d, 0x3b41: 0x000d, 0x3b42: 0x000d, 0x3b43: 0x000d, 0x3b44: 0x000d, 0x3b45: 0x000d, + 0x3b46: 0x000d, 0x3b47: 0x000d, 0x3b48: 0x000d, 0x3b49: 0x000d, 0x3b4a: 0x000d, 0x3b4b: 0x000d, + 0x3b4c: 0x000d, 0x3b4d: 0x000d, 0x3b4e: 0x000d, 0x3b4f: 0x000d, 0x3b50: 0x000d, 0x3b51: 0x000d, + 0x3b52: 0x000d, 0x3b53: 0x000d, 0x3b54: 0x000d, 0x3b55: 0x000d, 0x3b56: 0x000d, 0x3b57: 0x000d, + 0x3b58: 0x000d, 0x3b59: 0x000d, 0x3b5a: 0x000d, 0x3b5b: 0x000d, 0x3b5c: 0x000d, 0x3b5d: 0x000d, + 0x3b5e: 0x000d, 0x3b5f: 0x000d, 0x3b60: 0x000d, 0x3b61: 0x000d, 0x3b62: 0x000d, 0x3b63: 0x000d, + 0x3b64: 0x000d, 0x3b65: 0x000d, 0x3b66: 0x000d, 0x3b67: 0x000d, 0x3b68: 0x000d, 0x3b69: 0x000d, + 0x3b6a: 0x000d, 0x3b6b: 0x000d, 0x3b6c: 0x000d, 0x3b6d: 0x000d, 0x3b6e: 0x000d, 0x3b6f: 0x000d, + 0x3b70: 0x000a, 0x3b71: 0x000a, 0x3b72: 0x000d, 0x3b73: 0x000d, 0x3b74: 0x000d, 0x3b75: 0x000d, + 0x3b76: 0x000d, 0x3b77: 0x000d, 0x3b78: 0x000d, 0x3b79: 0x000d, 0x3b7a: 0x000d, 0x3b7b: 0x000d, + 0x3b7c: 0x000d, 0x3b7d: 0x000d, 0x3b7e: 0x000d, 0x3b7f: 0x000d, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x000a, 0x3b81: 0x000a, 0x3b82: 0x000a, 0x3b83: 0x000a, 0x3b84: 0x000a, 0x3b85: 0x000a, + 0x3b86: 0x000a, 0x3b87: 0x000a, 0x3b88: 0x000a, 0x3b89: 0x000a, 0x3b8a: 0x000a, 0x3b8b: 0x000a, + 0x3b8c: 0x000a, 0x3b8d: 0x000a, 0x3b8e: 0x000a, 0x3b8f: 0x000a, 0x3b90: 0x000a, 0x3b91: 0x000a, + 0x3b92: 0x000a, 0x3b93: 0x000a, 0x3b94: 0x000a, 0x3b95: 0x000a, 0x3b96: 0x000a, 0x3b97: 0x000a, + 0x3b98: 0x000a, 0x3b99: 0x000a, 0x3b9a: 0x000a, 0x3b9b: 0x000a, 0x3b9c: 0x000a, 0x3b9d: 0x000a, + 0x3b9e: 0x000a, 0x3b9f: 0x000a, 0x3ba0: 0x000a, 0x3ba1: 0x000a, 0x3ba2: 0x000a, 0x3ba3: 0x000a, + 0x3ba4: 0x000a, 0x3ba5: 0x000a, 0x3ba6: 0x000a, 0x3ba7: 0x000a, 0x3ba8: 0x000a, 0x3ba9: 0x000a, + 0x3baa: 0x000a, 0x3bab: 0x000a, + 0x3bb0: 0x000a, 0x3bb1: 0x000a, 0x3bb2: 0x000a, 0x3bb3: 0x000a, 0x3bb4: 0x000a, 0x3bb5: 0x000a, + 0x3bb6: 0x000a, 0x3bb7: 0x000a, 0x3bb8: 0x000a, 0x3bb9: 0x000a, 0x3bba: 0x000a, 0x3bbb: 0x000a, + 0x3bbc: 0x000a, 0x3bbd: 0x000a, 0x3bbe: 0x000a, 0x3bbf: 0x000a, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x000a, 0x3bc1: 0x000a, 0x3bc2: 0x000a, 0x3bc3: 0x000a, 0x3bc4: 0x000a, 0x3bc5: 0x000a, + 0x3bc6: 0x000a, 0x3bc7: 0x000a, 0x3bc8: 0x000a, 0x3bc9: 0x000a, 0x3bca: 0x000a, 0x3bcb: 0x000a, + 0x3bcc: 0x000a, 0x3bcd: 0x000a, 0x3bce: 0x000a, 0x3bcf: 0x000a, 0x3bd0: 0x000a, 0x3bd1: 0x000a, + 0x3bd2: 0x000a, 0x3bd3: 0x000a, + 0x3be0: 0x000a, 0x3be1: 0x000a, 0x3be2: 0x000a, 0x3be3: 0x000a, + 0x3be4: 0x000a, 0x3be5: 0x000a, 0x3be6: 0x000a, 0x3be7: 0x000a, 0x3be8: 0x000a, 0x3be9: 0x000a, + 0x3bea: 0x000a, 0x3beb: 0x000a, 0x3bec: 0x000a, 0x3bed: 0x000a, 0x3bee: 0x000a, + 0x3bf1: 0x000a, 0x3bf2: 0x000a, 0x3bf3: 0x000a, 0x3bf4: 0x000a, 0x3bf5: 0x000a, + 0x3bf6: 0x000a, 0x3bf7: 0x000a, 0x3bf8: 0x000a, 0x3bf9: 0x000a, 0x3bfa: 0x000a, 0x3bfb: 0x000a, + 0x3bfc: 0x000a, 0x3bfd: 0x000a, 0x3bfe: 0x000a, 0x3bff: 0x000a, + // Block 0xf0, offset 0x3c00 + 0x3c01: 0x000a, 0x3c02: 0x000a, 0x3c03: 0x000a, 0x3c04: 0x000a, 0x3c05: 0x000a, + 0x3c06: 0x000a, 0x3c07: 0x000a, 0x3c08: 0x000a, 0x3c09: 0x000a, 0x3c0a: 0x000a, 0x3c0b: 0x000a, + 0x3c0c: 0x000a, 0x3c0d: 0x000a, 0x3c0e: 0x000a, 0x3c0f: 0x000a, 0x3c11: 0x000a, + 0x3c12: 0x000a, 0x3c13: 0x000a, 0x3c14: 0x000a, 0x3c15: 0x000a, 0x3c16: 0x000a, 0x3c17: 0x000a, + 0x3c18: 0x000a, 0x3c19: 0x000a, 0x3c1a: 0x000a, 0x3c1b: 0x000a, 0x3c1c: 0x000a, 0x3c1d: 0x000a, + 0x3c1e: 0x000a, 0x3c1f: 0x000a, 0x3c20: 0x000a, 0x3c21: 0x000a, 0x3c22: 0x000a, 0x3c23: 0x000a, + 0x3c24: 0x000a, 0x3c25: 0x000a, 0x3c26: 0x000a, 0x3c27: 0x000a, 0x3c28: 0x000a, 0x3c29: 0x000a, + 0x3c2a: 0x000a, 0x3c2b: 0x000a, 0x3c2c: 0x000a, 0x3c2d: 0x000a, 0x3c2e: 0x000a, 0x3c2f: 0x000a, + 0x3c30: 0x000a, 0x3c31: 0x000a, 0x3c32: 0x000a, 0x3c33: 0x000a, 0x3c34: 0x000a, 0x3c35: 0x000a, + // Block 0xf1, offset 0x3c40 + 0x3c40: 0x0002, 0x3c41: 0x0002, 0x3c42: 0x0002, 0x3c43: 0x0002, 0x3c44: 0x0002, 0x3c45: 0x0002, + 0x3c46: 0x0002, 0x3c47: 0x0002, 0x3c48: 0x0002, 0x3c49: 0x0002, 0x3c4a: 0x0002, 0x3c4b: 0x000a, + 0x3c4c: 0x000a, 0x3c4d: 0x000a, 0x3c4e: 0x000a, 0x3c4f: 0x000a, + 0x3c6f: 0x000a, + // Block 0xf2, offset 0x3c80 + 0x3caa: 0x000a, 0x3cab: 0x000a, 0x3cac: 0x000a, 0x3cad: 0x000a, 0x3cae: 0x000a, 0x3caf: 0x000a, + // Block 0xf3, offset 0x3cc0 + 0x3ced: 0x000a, + // Block 0xf4, offset 0x3d00 + 0x3d20: 0x000a, 0x3d21: 0x000a, 0x3d22: 0x000a, 0x3d23: 0x000a, + 0x3d24: 0x000a, 0x3d25: 0x000a, + // Block 0xf5, offset 0x3d40 + 0x3d40: 0x000a, 0x3d41: 0x000a, 0x3d42: 0x000a, 0x3d43: 0x000a, 0x3d44: 0x000a, 0x3d45: 0x000a, + 0x3d46: 0x000a, 0x3d47: 0x000a, 0x3d48: 0x000a, 0x3d49: 0x000a, 0x3d4a: 0x000a, 0x3d4b: 0x000a, + 0x3d4c: 0x000a, 0x3d4d: 0x000a, 0x3d4e: 0x000a, 0x3d4f: 0x000a, 0x3d50: 0x000a, 0x3d51: 0x000a, + 0x3d52: 0x000a, 0x3d53: 0x000a, 0x3d54: 0x000a, 0x3d55: 0x000a, 0x3d56: 0x000a, 0x3d57: 0x000a, + 0x3d5c: 0x000a, 0x3d5d: 0x000a, + 0x3d5e: 0x000a, 0x3d5f: 0x000a, 0x3d60: 0x000a, 0x3d61: 0x000a, 0x3d62: 0x000a, 0x3d63: 0x000a, + 0x3d64: 0x000a, 0x3d65: 0x000a, 0x3d66: 0x000a, 0x3d67: 0x000a, 0x3d68: 0x000a, 0x3d69: 0x000a, + 0x3d6a: 0x000a, 0x3d6b: 0x000a, 0x3d6c: 0x000a, + 0x3d70: 0x000a, 0x3d71: 0x000a, 0x3d72: 0x000a, 0x3d73: 0x000a, 0x3d74: 0x000a, 0x3d75: 0x000a, + 0x3d76: 0x000a, 0x3d77: 0x000a, 0x3d78: 0x000a, 0x3d79: 0x000a, 0x3d7a: 0x000a, 0x3d7b: 0x000a, + 0x3d7c: 0x000a, + // Block 0xf6, offset 0x3d80 + 0x3d80: 0x000a, 0x3d81: 0x000a, 0x3d82: 0x000a, 0x3d83: 0x000a, 0x3d84: 0x000a, 0x3d85: 0x000a, + 0x3d86: 0x000a, 0x3d87: 0x000a, 0x3d88: 0x000a, 0x3d89: 0x000a, 0x3d8a: 0x000a, 0x3d8b: 0x000a, + 0x3d8c: 0x000a, 0x3d8d: 0x000a, 0x3d8e: 0x000a, 0x3d8f: 0x000a, 0x3d90: 0x000a, 0x3d91: 0x000a, + 0x3d92: 0x000a, 0x3d93: 0x000a, 0x3d94: 0x000a, 0x3d95: 0x000a, 0x3d96: 0x000a, 0x3d97: 0x000a, + 0x3d98: 0x000a, 0x3d99: 0x000a, 0x3d9a: 0x000a, 0x3d9b: 0x000a, 0x3d9c: 0x000a, 0x3d9d: 0x000a, + 0x3d9e: 0x000a, 0x3d9f: 0x000a, 0x3da0: 0x000a, 0x3da1: 0x000a, 0x3da2: 0x000a, 0x3da3: 0x000a, + 0x3da4: 0x000a, 0x3da5: 0x000a, 0x3da6: 0x000a, 0x3da7: 0x000a, 0x3da8: 0x000a, 0x3da9: 0x000a, + 0x3daa: 0x000a, 0x3dab: 0x000a, 0x3dac: 0x000a, 0x3dad: 0x000a, 0x3dae: 0x000a, 0x3daf: 0x000a, + 0x3db0: 0x000a, 0x3db1: 0x000a, 0x3db2: 0x000a, 0x3db3: 0x000a, 0x3db4: 0x000a, 0x3db5: 0x000a, + 0x3db6: 0x000a, 0x3dbb: 0x000a, + 0x3dbc: 0x000a, 0x3dbd: 0x000a, 0x3dbe: 0x000a, 0x3dbf: 0x000a, + // Block 0xf7, offset 0x3dc0 + 0x3dc0: 0x000a, 0x3dc1: 0x000a, 0x3dc2: 0x000a, 0x3dc3: 0x000a, 0x3dc4: 0x000a, 0x3dc5: 0x000a, + 0x3dc6: 0x000a, 0x3dc7: 0x000a, 0x3dc8: 0x000a, 0x3dc9: 0x000a, 0x3dca: 0x000a, 0x3dcb: 0x000a, + 0x3dcc: 0x000a, 0x3dcd: 0x000a, 0x3dce: 0x000a, 0x3dcf: 0x000a, 0x3dd0: 0x000a, 0x3dd1: 0x000a, + 0x3dd2: 0x000a, 0x3dd3: 0x000a, 0x3dd4: 0x000a, 0x3dd5: 0x000a, 0x3dd6: 0x000a, 0x3dd7: 0x000a, + 0x3dd8: 0x000a, 0x3dd9: 0x000a, + 0x3de0: 0x000a, 0x3de1: 0x000a, 0x3de2: 0x000a, 0x3de3: 0x000a, + 0x3de4: 0x000a, 0x3de5: 0x000a, 0x3de6: 0x000a, 0x3de7: 0x000a, 0x3de8: 0x000a, 0x3de9: 0x000a, + 0x3dea: 0x000a, 0x3deb: 0x000a, + 0x3df0: 0x000a, + // Block 0xf8, offset 0x3e00 + 0x3e00: 0x000a, 0x3e01: 0x000a, 0x3e02: 0x000a, 0x3e03: 0x000a, 0x3e04: 0x000a, 0x3e05: 0x000a, + 0x3e06: 0x000a, 0x3e07: 0x000a, 0x3e08: 0x000a, 0x3e09: 0x000a, 0x3e0a: 0x000a, 0x3e0b: 0x000a, + 0x3e10: 0x000a, 0x3e11: 0x000a, + 0x3e12: 0x000a, 0x3e13: 0x000a, 0x3e14: 0x000a, 0x3e15: 0x000a, 0x3e16: 0x000a, 0x3e17: 0x000a, + 0x3e18: 0x000a, 0x3e19: 0x000a, 0x3e1a: 0x000a, 0x3e1b: 0x000a, 0x3e1c: 0x000a, 0x3e1d: 0x000a, + 0x3e1e: 0x000a, 0x3e1f: 0x000a, 0x3e20: 0x000a, 0x3e21: 0x000a, 0x3e22: 0x000a, 0x3e23: 0x000a, + 0x3e24: 0x000a, 0x3e25: 0x000a, 0x3e26: 0x000a, 0x3e27: 0x000a, 0x3e28: 0x000a, 0x3e29: 0x000a, + 0x3e2a: 0x000a, 0x3e2b: 0x000a, 0x3e2c: 0x000a, 0x3e2d: 0x000a, 0x3e2e: 0x000a, 0x3e2f: 0x000a, + 0x3e30: 0x000a, 0x3e31: 0x000a, 0x3e32: 0x000a, 0x3e33: 0x000a, 0x3e34: 0x000a, 0x3e35: 0x000a, + 0x3e36: 0x000a, 0x3e37: 0x000a, 0x3e38: 0x000a, 0x3e39: 0x000a, 0x3e3a: 0x000a, 0x3e3b: 0x000a, + 0x3e3c: 0x000a, 0x3e3d: 0x000a, 0x3e3e: 0x000a, 0x3e3f: 0x000a, + // Block 0xf9, offset 0x3e40 + 0x3e40: 0x000a, 0x3e41: 0x000a, 0x3e42: 0x000a, 0x3e43: 0x000a, 0x3e44: 0x000a, 0x3e45: 0x000a, + 0x3e46: 0x000a, 0x3e47: 0x000a, + 0x3e50: 0x000a, 0x3e51: 0x000a, + 0x3e52: 0x000a, 0x3e53: 0x000a, 0x3e54: 0x000a, 0x3e55: 0x000a, 0x3e56: 0x000a, 0x3e57: 0x000a, + 0x3e58: 0x000a, 0x3e59: 0x000a, + 0x3e60: 0x000a, 0x3e61: 0x000a, 0x3e62: 0x000a, 0x3e63: 0x000a, + 0x3e64: 0x000a, 0x3e65: 0x000a, 0x3e66: 0x000a, 0x3e67: 0x000a, 0x3e68: 0x000a, 0x3e69: 0x000a, + 0x3e6a: 0x000a, 0x3e6b: 0x000a, 0x3e6c: 0x000a, 0x3e6d: 0x000a, 0x3e6e: 0x000a, 0x3e6f: 0x000a, + 0x3e70: 0x000a, 0x3e71: 0x000a, 0x3e72: 0x000a, 0x3e73: 0x000a, 0x3e74: 0x000a, 0x3e75: 0x000a, + 0x3e76: 0x000a, 0x3e77: 0x000a, 0x3e78: 0x000a, 0x3e79: 0x000a, 0x3e7a: 0x000a, 0x3e7b: 0x000a, + 0x3e7c: 0x000a, 0x3e7d: 0x000a, 0x3e7e: 0x000a, 0x3e7f: 0x000a, + // Block 0xfa, offset 0x3e80 + 0x3e80: 0x000a, 0x3e81: 0x000a, 0x3e82: 0x000a, 0x3e83: 0x000a, 0x3e84: 0x000a, 0x3e85: 0x000a, + 0x3e86: 0x000a, 0x3e87: 0x000a, + 0x3e90: 0x000a, 0x3e91: 0x000a, + 0x3e92: 0x000a, 0x3e93: 0x000a, 0x3e94: 0x000a, 0x3e95: 0x000a, 0x3e96: 0x000a, 0x3e97: 0x000a, + 0x3e98: 0x000a, 0x3e99: 0x000a, 0x3e9a: 0x000a, 0x3e9b: 0x000a, 0x3e9c: 0x000a, 0x3e9d: 0x000a, + 0x3e9e: 0x000a, 0x3e9f: 0x000a, 0x3ea0: 0x000a, 0x3ea1: 0x000a, 0x3ea2: 0x000a, 0x3ea3: 0x000a, + 0x3ea4: 0x000a, 0x3ea5: 0x000a, 0x3ea6: 0x000a, 0x3ea7: 0x000a, 0x3ea8: 0x000a, 0x3ea9: 0x000a, + 0x3eaa: 0x000a, 0x3eab: 0x000a, 0x3eac: 0x000a, 0x3ead: 0x000a, + 0x3eb0: 0x000a, 0x3eb1: 0x000a, + // Block 0xfb, offset 0x3ec0 + 0x3ec0: 0x000a, 0x3ec1: 0x000a, 0x3ec2: 0x000a, 0x3ec3: 0x000a, 0x3ec4: 0x000a, 0x3ec5: 0x000a, + 0x3ec6: 0x000a, 0x3ec7: 0x000a, 0x3ec8: 0x000a, 0x3ec9: 0x000a, 0x3eca: 0x000a, 0x3ecb: 0x000a, + 0x3ecc: 0x000a, 0x3ecd: 0x000a, 0x3ece: 0x000a, 0x3ecf: 0x000a, 0x3ed0: 0x000a, 0x3ed1: 0x000a, + 0x3ed2: 0x000a, 0x3ed3: 0x000a, + 0x3ee0: 0x000a, 0x3ee1: 0x000a, 0x3ee2: 0x000a, 0x3ee3: 0x000a, + 0x3ee4: 0x000a, 0x3ee5: 0x000a, 0x3ee6: 0x000a, 0x3ee7: 0x000a, 0x3ee8: 0x000a, 0x3ee9: 0x000a, + 0x3eea: 0x000a, 0x3eeb: 0x000a, 0x3eec: 0x000a, 0x3eed: 0x000a, + 0x3ef0: 0x000a, 0x3ef1: 0x000a, 0x3ef2: 0x000a, 0x3ef3: 0x000a, 0x3ef4: 0x000a, 0x3ef5: 0x000a, + 0x3ef6: 0x000a, 0x3ef7: 0x000a, 0x3ef8: 0x000a, 0x3ef9: 0x000a, 0x3efa: 0x000a, 0x3efb: 0x000a, + 0x3efc: 0x000a, + // Block 0xfc, offset 0x3f00 + 0x3f00: 0x000a, 0x3f01: 0x000a, 0x3f02: 0x000a, 0x3f03: 0x000a, 0x3f04: 0x000a, 0x3f05: 0x000a, + 0x3f06: 0x000a, 0x3f07: 0x000a, 0x3f08: 0x000a, + 0x3f10: 0x000a, 0x3f11: 0x000a, + 0x3f12: 0x000a, 0x3f13: 0x000a, 0x3f14: 0x000a, 0x3f15: 0x000a, 0x3f16: 0x000a, 0x3f17: 0x000a, + 0x3f18: 0x000a, 0x3f19: 0x000a, 0x3f1a: 0x000a, 0x3f1b: 0x000a, 0x3f1c: 0x000a, 0x3f1d: 0x000a, + 0x3f1e: 0x000a, 0x3f1f: 0x000a, 0x3f20: 0x000a, 0x3f21: 0x000a, 0x3f22: 0x000a, 0x3f23: 0x000a, + 0x3f24: 0x000a, 0x3f25: 0x000a, 0x3f26: 0x000a, 0x3f27: 0x000a, 0x3f28: 0x000a, 0x3f29: 0x000a, + 0x3f2a: 0x000a, 0x3f2b: 0x000a, 0x3f2c: 0x000a, 0x3f2d: 0x000a, 0x3f2e: 0x000a, 0x3f2f: 0x000a, + 0x3f30: 0x000a, 0x3f31: 0x000a, 0x3f32: 0x000a, 0x3f33: 0x000a, 0x3f34: 0x000a, 0x3f35: 0x000a, + 0x3f36: 0x000a, 0x3f37: 0x000a, 0x3f38: 0x000a, 0x3f39: 0x000a, 0x3f3a: 0x000a, 0x3f3b: 0x000a, + 0x3f3c: 0x000a, 0x3f3d: 0x000a, 0x3f3f: 0x000a, + // Block 0xfd, offset 0x3f40 + 0x3f40: 0x000a, 0x3f41: 0x000a, 0x3f42: 0x000a, 0x3f43: 0x000a, 0x3f44: 0x000a, 0x3f45: 0x000a, + 0x3f4e: 0x000a, 0x3f4f: 0x000a, 0x3f50: 0x000a, 0x3f51: 0x000a, + 0x3f52: 0x000a, 0x3f53: 0x000a, 0x3f54: 0x000a, 0x3f55: 0x000a, 0x3f56: 0x000a, 0x3f57: 0x000a, + 0x3f58: 0x000a, 0x3f59: 0x000a, 0x3f5a: 0x000a, 0x3f5b: 0x000a, + 0x3f60: 0x000a, 0x3f61: 0x000a, 0x3f62: 0x000a, 0x3f63: 0x000a, + 0x3f64: 0x000a, 0x3f65: 0x000a, 0x3f66: 0x000a, 0x3f67: 0x000a, 0x3f68: 0x000a, + 0x3f70: 0x000a, 0x3f71: 0x000a, 0x3f72: 0x000a, 0x3f73: 0x000a, 0x3f74: 0x000a, 0x3f75: 0x000a, + 0x3f76: 0x000a, 0x3f77: 0x000a, 0x3f78: 0x000a, + // Block 0xfe, offset 0x3f80 + 0x3f80: 0x000a, 0x3f81: 0x000a, 0x3f82: 0x000a, 0x3f83: 0x000a, 0x3f84: 0x000a, 0x3f85: 0x000a, + 0x3f86: 0x000a, 0x3f87: 0x000a, 0x3f88: 0x000a, 0x3f89: 0x000a, 0x3f8a: 0x000a, 0x3f8b: 0x000a, + 0x3f8c: 0x000a, 0x3f8d: 0x000a, 0x3f8e: 0x000a, 0x3f8f: 0x000a, 0x3f90: 0x000a, 0x3f91: 0x000a, + 0x3f92: 0x000a, 0x3f94: 0x000a, 0x3f95: 0x000a, 0x3f96: 0x000a, 0x3f97: 0x000a, + 0x3f98: 0x000a, 0x3f99: 0x000a, 0x3f9a: 0x000a, 0x3f9b: 0x000a, 0x3f9c: 0x000a, 0x3f9d: 0x000a, + 0x3f9e: 0x000a, 0x3f9f: 0x000a, 0x3fa0: 0x000a, 0x3fa1: 0x000a, 0x3fa2: 0x000a, 0x3fa3: 0x000a, + 0x3fa4: 0x000a, 0x3fa5: 0x000a, 0x3fa6: 0x000a, 0x3fa7: 0x000a, 0x3fa8: 0x000a, 0x3fa9: 0x000a, + 0x3faa: 0x000a, 0x3fab: 0x000a, 0x3fac: 0x000a, 0x3fad: 0x000a, 0x3fae: 0x000a, 0x3faf: 0x000a, + 0x3fb0: 0x000a, 0x3fb1: 0x000a, 0x3fb2: 0x000a, 0x3fb3: 0x000a, 0x3fb4: 0x000a, 0x3fb5: 0x000a, + 0x3fb6: 0x000a, 0x3fb7: 0x000a, 0x3fb8: 0x000a, 0x3fb9: 0x000a, 0x3fba: 0x000a, 0x3fbb: 0x000a, + 0x3fbc: 0x000a, 0x3fbd: 0x000a, 0x3fbe: 0x000a, 0x3fbf: 0x000a, + // Block 0xff, offset 0x3fc0 + 0x3fc0: 0x000a, 0x3fc1: 0x000a, 0x3fc2: 0x000a, 0x3fc3: 0x000a, 0x3fc4: 0x000a, 0x3fc5: 0x000a, + 0x3fc6: 0x000a, 0x3fc7: 0x000a, 0x3fc8: 0x000a, 0x3fc9: 0x000a, 0x3fca: 0x000a, + 0x3ff0: 0x0002, 0x3ff1: 0x0002, 0x3ff2: 0x0002, 0x3ff3: 0x0002, 0x3ff4: 0x0002, 0x3ff5: 0x0002, + 0x3ff6: 0x0002, 0x3ff7: 0x0002, 0x3ff8: 0x0002, 0x3ff9: 0x0002, + // Block 0x100, offset 0x4000 + 0x403e: 0x000b, 0x403f: 0x000b, + // Block 0x101, offset 0x4040 + 0x4040: 0x000b, 0x4041: 0x000b, 0x4042: 0x000b, 0x4043: 0x000b, 0x4044: 0x000b, 0x4045: 0x000b, + 0x4046: 0x000b, 0x4047: 0x000b, 0x4048: 0x000b, 0x4049: 0x000b, 0x404a: 0x000b, 0x404b: 0x000b, + 0x404c: 0x000b, 0x404d: 0x000b, 0x404e: 0x000b, 0x404f: 0x000b, 0x4050: 0x000b, 0x4051: 0x000b, + 0x4052: 0x000b, 0x4053: 0x000b, 0x4054: 0x000b, 0x4055: 0x000b, 0x4056: 0x000b, 0x4057: 0x000b, + 0x4058: 0x000b, 0x4059: 0x000b, 0x405a: 0x000b, 0x405b: 0x000b, 0x405c: 0x000b, 0x405d: 0x000b, + 0x405e: 0x000b, 0x405f: 0x000b, 0x4060: 0x000b, 0x4061: 0x000b, 0x4062: 0x000b, 0x4063: 0x000b, + 0x4064: 0x000b, 0x4065: 0x000b, 0x4066: 0x000b, 0x4067: 0x000b, 0x4068: 0x000b, 0x4069: 0x000b, + 0x406a: 0x000b, 0x406b: 0x000b, 0x406c: 0x000b, 0x406d: 0x000b, 0x406e: 0x000b, 0x406f: 0x000b, + 0x4070: 0x000b, 0x4071: 0x000b, 0x4072: 0x000b, 0x4073: 0x000b, 0x4074: 0x000b, 0x4075: 0x000b, + 0x4076: 0x000b, 0x4077: 0x000b, 0x4078: 0x000b, 0x4079: 0x000b, 0x407a: 0x000b, 0x407b: 0x000b, + 0x407c: 0x000b, 0x407d: 0x000b, 0x407e: 0x000b, 0x407f: 0x000b, + // Block 0x102, offset 0x4080 + 0x4080: 0x000c, 0x4081: 0x000c, 0x4082: 0x000c, 0x4083: 0x000c, 0x4084: 0x000c, 0x4085: 0x000c, + 0x4086: 0x000c, 0x4087: 0x000c, 0x4088: 0x000c, 0x4089: 0x000c, 0x408a: 0x000c, 0x408b: 0x000c, + 0x408c: 0x000c, 0x408d: 0x000c, 0x408e: 0x000c, 0x408f: 0x000c, 0x4090: 0x000c, 0x4091: 0x000c, + 0x4092: 0x000c, 0x4093: 0x000c, 0x4094: 0x000c, 0x4095: 0x000c, 0x4096: 0x000c, 0x4097: 0x000c, + 0x4098: 0x000c, 0x4099: 0x000c, 0x409a: 0x000c, 0x409b: 0x000c, 0x409c: 0x000c, 0x409d: 0x000c, + 0x409e: 0x000c, 0x409f: 0x000c, 0x40a0: 0x000c, 0x40a1: 0x000c, 0x40a2: 0x000c, 0x40a3: 0x000c, + 0x40a4: 0x000c, 0x40a5: 0x000c, 0x40a6: 0x000c, 0x40a7: 0x000c, 0x40a8: 0x000c, 0x40a9: 0x000c, + 0x40aa: 0x000c, 0x40ab: 0x000c, 0x40ac: 0x000c, 0x40ad: 0x000c, 0x40ae: 0x000c, 0x40af: 0x000c, + 0x40b0: 0x000b, 0x40b1: 0x000b, 0x40b2: 0x000b, 0x40b3: 0x000b, 0x40b4: 0x000b, 0x40b5: 0x000b, + 0x40b6: 0x000b, 0x40b7: 0x000b, 0x40b8: 0x000b, 0x40b9: 0x000b, 0x40ba: 0x000b, 0x40bb: 0x000b, + 0x40bc: 0x000b, 0x40bd: 0x000b, 0x40be: 0x000b, 0x40bf: 0x000b, +} + +// bidiIndex: 26 blocks, 1664 entries, 3328 bytes +// Block 0 is the zero block. +var bidiIndex = [1664]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x13, 0xf1: 0x14, 0xf2: 0x14, 0xf3: 0x16, 0xf4: 0x17, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x136: 0x28, 0x137: 0x29, + 0x138: 0x2a, 0x139: 0x2b, 0x13a: 0x2c, 0x13b: 0x2d, 0x13c: 0x2e, 0x13d: 0x2f, 0x13e: 0x30, 0x13f: 0x31, + // Block 0x5, offset 0x140 + 0x140: 0x32, 0x141: 0x33, 0x142: 0x34, + 0x14d: 0x35, 0x14e: 0x36, + 0x150: 0x37, + 0x15a: 0x38, 0x15c: 0x39, 0x15d: 0x3a, 0x15e: 0x3b, 0x15f: 0x3c, + 0x160: 0x3d, 0x162: 0x3e, 0x164: 0x3f, 0x165: 0x40, 0x167: 0x41, + 0x168: 0x42, 0x169: 0x43, 0x16a: 0x44, 0x16b: 0x45, 0x16c: 0x46, 0x16d: 0x47, 0x16e: 0x48, 0x16f: 0x49, + 0x170: 0x4a, 0x173: 0x4b, 0x177: 0x05, + 0x17e: 0x4c, 0x17f: 0x4d, + // Block 0x6, offset 0x180 + 0x180: 0x4e, 0x181: 0x4f, 0x182: 0x50, 0x183: 0x51, 0x184: 0x52, 0x185: 0x53, 0x186: 0x54, 0x187: 0x55, + 0x188: 0x56, 0x189: 0x55, 0x18a: 0x55, 0x18b: 0x55, 0x18c: 0x57, 0x18d: 0x58, 0x18e: 0x59, 0x18f: 0x55, + 0x190: 0x5a, 0x191: 0x5b, 0x192: 0x5c, 0x193: 0x5d, 0x194: 0x55, 0x195: 0x55, 0x196: 0x55, 0x197: 0x55, + 0x198: 0x55, 0x199: 0x55, 0x19a: 0x5e, 0x19b: 0x55, 0x19c: 0x55, 0x19d: 0x5f, 0x19e: 0x55, 0x19f: 0x60, + 0x1a4: 0x55, 0x1a5: 0x55, 0x1a6: 0x61, 0x1a7: 0x62, + 0x1a8: 0x55, 0x1a9: 0x55, 0x1aa: 0x55, 0x1ab: 0x55, 0x1ac: 0x55, 0x1ad: 0x63, 0x1ae: 0x64, 0x1af: 0x55, + 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67, + 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x55, 0x1bd: 0x55, 0x1be: 0x55, 0x1bf: 0x6c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70, + 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76, + // Block 0x8, offset 0x200 + 0x237: 0x55, + // Block 0x9, offset 0x240 + 0x252: 0x77, 0x253: 0x78, + 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e, + 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85, + 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26d: 0x8a, 0x26f: 0x8b, + // Block 0xa, offset 0x280 + 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8e, 0x2b5: 0x8f, 0x2b6: 0x0e, 0x2b7: 0x90, + 0x2b8: 0x91, 0x2b9: 0x92, 0x2ba: 0x0e, 0x2bb: 0x93, 0x2bc: 0x94, 0x2bd: 0x95, 0x2bf: 0x96, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x97, 0x2c5: 0x55, 0x2c6: 0x98, 0x2c7: 0x99, + 0x2cb: 0x9a, 0x2cd: 0x9b, + 0x2e0: 0x9c, 0x2e1: 0x9c, 0x2e2: 0x9c, 0x2e3: 0x9c, 0x2e4: 0x9d, 0x2e5: 0x9c, 0x2e6: 0x9c, 0x2e7: 0x9c, + 0x2e8: 0x9e, 0x2e9: 0x9c, 0x2ea: 0x9c, 0x2eb: 0x9f, 0x2ec: 0xa0, 0x2ed: 0x9c, 0x2ee: 0x9c, 0x2ef: 0x9c, + 0x2f0: 0x9c, 0x2f1: 0x9c, 0x2f2: 0x9c, 0x2f3: 0x9c, 0x2f4: 0xa1, 0x2f5: 0x9c, 0x2f6: 0x9c, 0x2f7: 0x9c, + 0x2f8: 0x9c, 0x2f9: 0xa2, 0x2fa: 0xa3, 0x2fb: 0xa4, 0x2fc: 0xa5, 0x2fd: 0xa6, 0x2fe: 0xa7, 0x2ff: 0x9c, + // Block 0xc, offset 0x300 + 0x300: 0xa8, 0x301: 0xa9, 0x302: 0xaa, 0x303: 0x21, 0x304: 0xab, 0x305: 0xac, 0x306: 0xad, 0x307: 0xae, + 0x308: 0xaf, 0x309: 0x28, 0x30b: 0xb0, 0x30c: 0x26, 0x30d: 0xb1, + 0x310: 0xb2, 0x311: 0xb3, 0x312: 0xb4, 0x313: 0xb5, 0x316: 0xb6, 0x317: 0xb7, + 0x318: 0xb8, 0x319: 0xb9, 0x31a: 0xba, 0x31c: 0xbb, + 0x320: 0xbc, 0x324: 0xbd, 0x325: 0xbe, 0x327: 0xbf, + 0x328: 0xc0, 0x329: 0xc1, 0x32a: 0xc2, + 0x330: 0xc3, 0x332: 0xc4, 0x334: 0xc5, 0x335: 0xc6, 0x336: 0xc7, + 0x33b: 0xc8, 0x33c: 0xc9, 0x33d: 0xca, 0x33f: 0xcb, + // Block 0xd, offset 0x340 + 0x351: 0xcc, + // Block 0xe, offset 0x380 + 0x3ab: 0xcd, 0x3ac: 0xce, + 0x3bd: 0xcf, 0x3be: 0xd0, 0x3bf: 0xd1, + // Block 0xf, offset 0x3c0 + 0x3f2: 0xd2, + // Block 0x10, offset 0x400 + 0x43c: 0xd3, 0x43d: 0xd4, + // Block 0x11, offset 0x440 + 0x445: 0xd5, 0x446: 0xd6, 0x447: 0xd7, + 0x448: 0x55, 0x449: 0xd8, 0x44c: 0x55, 0x44d: 0xd9, + 0x45b: 0xda, 0x45c: 0xdb, 0x45d: 0xdc, 0x45e: 0xdd, 0x45f: 0xde, + 0x468: 0xdf, 0x469: 0xe0, 0x46a: 0xe1, + // Block 0x12, offset 0x480 + 0x480: 0xe2, 0x482: 0xcf, 0x484: 0xce, + 0x48a: 0xe3, 0x48b: 0xe4, + 0x493: 0xe5, + 0x4a0: 0x9c, 0x4a1: 0x9c, 0x4a2: 0x9c, 0x4a3: 0xe6, 0x4a4: 0x9c, 0x4a5: 0xe7, 0x4a6: 0x9c, 0x4a7: 0x9c, + 0x4a8: 0x9c, 0x4a9: 0x9c, 0x4aa: 0x9c, 0x4ab: 0x9c, 0x4ac: 0x9c, 0x4ad: 0x9c, 0x4ae: 0x9c, 0x4af: 0x9c, + 0x4b0: 0x9c, 0x4b1: 0xe8, 0x4b2: 0xe9, 0x4b3: 0x9c, 0x4b4: 0xea, 0x4b5: 0x9c, 0x4b6: 0x9c, 0x4b7: 0x9c, + 0x4b8: 0x0e, 0x4b9: 0x0e, 0x4ba: 0x0e, 0x4bb: 0xeb, 0x4bc: 0x9c, 0x4bd: 0x9c, 0x4be: 0x9c, 0x4bf: 0x9c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xec, 0x4c1: 0x55, 0x4c2: 0xed, 0x4c3: 0xee, 0x4c4: 0xef, 0x4c5: 0xf0, 0x4c6: 0xf1, + 0x4c9: 0xf2, 0x4cc: 0x55, 0x4cd: 0x55, 0x4ce: 0x55, 0x4cf: 0x55, + 0x4d0: 0x55, 0x4d1: 0x55, 0x4d2: 0x55, 0x4d3: 0x55, 0x4d4: 0x55, 0x4d5: 0x55, 0x4d6: 0x55, 0x4d7: 0x55, + 0x4d8: 0x55, 0x4d9: 0x55, 0x4da: 0x55, 0x4db: 0xf3, 0x4dc: 0x55, 0x4dd: 0xf4, 0x4de: 0x55, 0x4df: 0xf5, + 0x4e0: 0xf6, 0x4e1: 0xf7, 0x4e2: 0xf8, 0x4e4: 0x55, 0x4e5: 0x55, 0x4e6: 0x55, 0x4e7: 0x55, + 0x4e8: 0x55, 0x4e9: 0xf9, 0x4ea: 0xfa, 0x4eb: 0xfb, 0x4ec: 0x55, 0x4ed: 0x55, 0x4ee: 0xfc, 0x4ef: 0xfd, + 0x4ff: 0xfe, + // Block 0x14, offset 0x500 + 0x53f: 0xfe, + // Block 0x15, offset 0x540 + 0x550: 0x09, 0x551: 0x0a, 0x553: 0x0b, 0x556: 0x0c, + 0x55b: 0x0d, 0x55c: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, + 0x57f: 0x12, + // Block 0x16, offset 0x580 + 0x58f: 0x12, + 0x59f: 0x12, + 0x5af: 0x12, + 0x5bf: 0x12, + // Block 0x17, offset 0x5c0 + 0x5c0: 0xff, 0x5c1: 0xff, 0x5c2: 0xff, 0x5c3: 0xff, 0x5c4: 0x05, 0x5c5: 0x05, 0x5c6: 0x05, 0x5c7: 0x100, + 0x5c8: 0xff, 0x5c9: 0xff, 0x5ca: 0xff, 0x5cb: 0xff, 0x5cc: 0xff, 0x5cd: 0xff, 0x5ce: 0xff, 0x5cf: 0xff, + 0x5d0: 0xff, 0x5d1: 0xff, 0x5d2: 0xff, 0x5d3: 0xff, 0x5d4: 0xff, 0x5d5: 0xff, 0x5d6: 0xff, 0x5d7: 0xff, + 0x5d8: 0xff, 0x5d9: 0xff, 0x5da: 0xff, 0x5db: 0xff, 0x5dc: 0xff, 0x5dd: 0xff, 0x5de: 0xff, 0x5df: 0xff, + 0x5e0: 0xff, 0x5e1: 0xff, 0x5e2: 0xff, 0x5e3: 0xff, 0x5e4: 0xff, 0x5e5: 0xff, 0x5e6: 0xff, 0x5e7: 0xff, + 0x5e8: 0xff, 0x5e9: 0xff, 0x5ea: 0xff, 0x5eb: 0xff, 0x5ec: 0xff, 0x5ed: 0xff, 0x5ee: 0xff, 0x5ef: 0xff, + 0x5f0: 0xff, 0x5f1: 0xff, 0x5f2: 0xff, 0x5f3: 0xff, 0x5f4: 0xff, 0x5f5: 0xff, 0x5f6: 0xff, 0x5f7: 0xff, + 0x5f8: 0xff, 0x5f9: 0xff, 0x5fa: 0xff, 0x5fb: 0xff, 0x5fc: 0xff, 0x5fd: 0xff, 0x5fe: 0xff, 0x5ff: 0xff, + // Block 0x18, offset 0x600 + 0x60f: 0x12, + 0x61f: 0x12, + 0x620: 0x15, + 0x62f: 0x12, + 0x63f: 0x12, + // Block 0x19, offset 0x640 + 0x64f: 0x12, +} + +// Total table size 19960 bytes (19KiB); checksum: F50EF68C diff --git a/terraform/providers/google/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/terraform/providers/google/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 9115ef257e..f65785e8ac 100644 --- a/terraform/providers/google/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/terraform/providers/google/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,7 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -//go:build go1.16 -// +build go1.16 +//go:build go1.16 && !go1.21 +// +build go1.16,!go1.21 package norm diff --git a/terraform/providers/google/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/terraform/providers/google/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go new file mode 100644 index 0000000000..e1858b879d --- /dev/null +++ b/terraform/providers/google/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -0,0 +1,7908 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 +// +build go1.21 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "15.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [56]uint8{ + 0, 1, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 84, 91, 103, 107, 118, 122, 129, + 130, 132, 202, 214, 216, 218, 220, 222, + 224, 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x199A + firstCCC = 0x2DD5 + endMulti = 0x30A1 + firstLeadingCCC = 0x4AEF + firstCCCZeroExcept = 0x4BB9 + firstStarterWithNLead = 0x4BE0 + lastDecomp = 0x4BE2 + maxDecomp = 0x8000 +) + +// decomps: 19426 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xA6, 0x42, + 0xC3, 0xB0, 0x42, 0xC3, 0xB8, 0x42, 0xC4, 0xA6, + 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, 0x42, 0xC5, + 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, 0x8E, 0x42, + 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, 0xC7, 0x80, + 0x42, 0xC7, 0x81, 0x42, 0xC7, 0x82, 0x42, 0xC8, + // Bytes 100 - 13f + 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, 0x42, + 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, 0x93, + 0x42, 0xC9, 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, + 0x96, 0x42, 0xC9, 0x97, 0x42, 0xC9, 0x98, 0x42, + 0xC9, 0x99, 0x42, 0xC9, 0x9B, 0x42, 0xC9, 0x9C, + 0x42, 0xC9, 0x9E, 0x42, 0xC9, 0x9F, 0x42, 0xC9, + 0xA0, 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA2, 0x42, + 0xC9, 0xA3, 0x42, 0xC9, 0xA4, 0x42, 0xC9, 0xA5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA7, 0x42, 0xC9, + 0xA8, 0x42, 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, + 0xC9, 0xAB, 0x42, 0xC9, 0xAC, 0x42, 0xC9, 0xAD, + 0x42, 0xC9, 0xAE, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + 0x42, 0xC9, 0xB6, 0x42, 0xC9, 0xB7, 0x42, 0xC9, + 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, 0xBA, 0x42, + // Bytes 180 - 1bf + 0xC9, 0xBB, 0x42, 0xC9, 0xBD, 0x42, 0xC9, 0xBE, + 0x42, 0xCA, 0x80, 0x42, 0xCA, 0x81, 0x42, 0xCA, + 0x82, 0x42, 0xCA, 0x83, 0x42, 0xCA, 0x84, 0x42, + 0xCA, 0x88, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x8D, 0x42, 0xCA, 0x8E, 0x42, 0xCA, 0x8F, 0x42, + 0xCA, 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, + 0x42, 0xCA, 0x95, 0x42, 0xCA, 0x98, 0x42, 0xCA, + // Bytes 1c0 - 1ff + 0x99, 0x42, 0xCA, 0x9B, 0x42, 0xCA, 0x9C, 0x42, + 0xCA, 0x9D, 0x42, 0xCA, 0x9F, 0x42, 0xCA, 0xA1, + 0x42, 0xCA, 0xA2, 0x42, 0xCA, 0xA3, 0x42, 0xCA, + 0xA4, 0x42, 0xCA, 0xA5, 0x42, 0xCA, 0xA6, 0x42, + 0xCA, 0xA7, 0x42, 0xCA, 0xA8, 0x42, 0xCA, 0xA9, + 0x42, 0xCA, 0xAA, 0x42, 0xCA, 0xAB, 0x42, 0xCA, + 0xB9, 0x42, 0xCB, 0x90, 0x42, 0xCB, 0x91, 0x42, + 0xCE, 0x91, 0x42, 0xCE, 0x92, 0x42, 0xCE, 0x93, + // Bytes 200 - 23f + 0x42, 0xCE, 0x94, 0x42, 0xCE, 0x95, 0x42, 0xCE, + 0x96, 0x42, 0xCE, 0x97, 0x42, 0xCE, 0x98, 0x42, + 0xCE, 0x99, 0x42, 0xCE, 0x9A, 0x42, 0xCE, 0x9B, + 0x42, 0xCE, 0x9C, 0x42, 0xCE, 0x9D, 0x42, 0xCE, + 0x9E, 0x42, 0xCE, 0x9F, 0x42, 0xCE, 0xA0, 0x42, + 0xCE, 0xA1, 0x42, 0xCE, 0xA3, 0x42, 0xCE, 0xA4, + 0x42, 0xCE, 0xA5, 0x42, 0xCE, 0xA6, 0x42, 0xCE, + 0xA7, 0x42, 0xCE, 0xA8, 0x42, 0xCE, 0xA9, 0x42, + // Bytes 240 - 27f + 0xCE, 0xB1, 0x42, 0xCE, 0xB2, 0x42, 0xCE, 0xB3, + 0x42, 0xCE, 0xB4, 0x42, 0xCE, 0xB5, 0x42, 0xCE, + 0xB6, 0x42, 0xCE, 0xB7, 0x42, 0xCE, 0xB8, 0x42, + 0xCE, 0xB9, 0x42, 0xCE, 0xBA, 0x42, 0xCE, 0xBB, + 0x42, 0xCE, 0xBC, 0x42, 0xCE, 0xBD, 0x42, 0xCE, + 0xBE, 0x42, 0xCE, 0xBF, 0x42, 0xCF, 0x80, 0x42, + 0xCF, 0x81, 0x42, 0xCF, 0x82, 0x42, 0xCF, 0x83, + 0x42, 0xCF, 0x84, 0x42, 0xCF, 0x85, 0x42, 0xCF, + // Bytes 280 - 2bf + 0x86, 0x42, 0xCF, 0x87, 0x42, 0xCF, 0x88, 0x42, + 0xCF, 0x89, 0x42, 0xCF, 0x9C, 0x42, 0xCF, 0x9D, + 0x42, 0xD0, 0xB0, 0x42, 0xD0, 0xB1, 0x42, 0xD0, + 0xB2, 0x42, 0xD0, 0xB3, 0x42, 0xD0, 0xB4, 0x42, + 0xD0, 0xB5, 0x42, 0xD0, 0xB6, 0x42, 0xD0, 0xB7, + 0x42, 0xD0, 0xB8, 0x42, 0xD0, 0xBA, 0x42, 0xD0, + 0xBB, 0x42, 0xD0, 0xBC, 0x42, 0xD0, 0xBD, 0x42, + 0xD0, 0xBE, 0x42, 0xD0, 0xBF, 0x42, 0xD1, 0x80, + // Bytes 2c0 - 2ff + 0x42, 0xD1, 0x81, 0x42, 0xD1, 0x82, 0x42, 0xD1, + 0x83, 0x42, 0xD1, 0x84, 0x42, 0xD1, 0x85, 0x42, + 0xD1, 0x86, 0x42, 0xD1, 0x87, 0x42, 0xD1, 0x88, + 0x42, 0xD1, 0x8A, 0x42, 0xD1, 0x8B, 0x42, 0xD1, + 0x8C, 0x42, 0xD1, 0x8D, 0x42, 0xD1, 0x8E, 0x42, + 0xD1, 0x95, 0x42, 0xD1, 0x96, 0x42, 0xD1, 0x98, + 0x42, 0xD1, 0x9F, 0x42, 0xD2, 0x91, 0x42, 0xD2, + 0xAB, 0x42, 0xD2, 0xAF, 0x42, 0xD2, 0xB1, 0x42, + // Bytes 300 - 33f + 0xD3, 0x8F, 0x42, 0xD3, 0x99, 0x42, 0xD3, 0xA9, + 0x42, 0xD7, 0x90, 0x42, 0xD7, 0x91, 0x42, 0xD7, + 0x92, 0x42, 0xD7, 0x93, 0x42, 0xD7, 0x94, 0x42, + 0xD7, 0x9B, 0x42, 0xD7, 0x9C, 0x42, 0xD7, 0x9D, + 0x42, 0xD7, 0xA2, 0x42, 0xD7, 0xA8, 0x42, 0xD7, + 0xAA, 0x42, 0xD8, 0xA1, 0x42, 0xD8, 0xA7, 0x42, + 0xD8, 0xA8, 0x42, 0xD8, 0xA9, 0x42, 0xD8, 0xAA, + 0x42, 0xD8, 0xAB, 0x42, 0xD8, 0xAC, 0x42, 0xD8, + // Bytes 340 - 37f + 0xAD, 0x42, 0xD8, 0xAE, 0x42, 0xD8, 0xAF, 0x42, + 0xD8, 0xB0, 0x42, 0xD8, 0xB1, 0x42, 0xD8, 0xB2, + 0x42, 0xD8, 0xB3, 0x42, 0xD8, 0xB4, 0x42, 0xD8, + 0xB5, 0x42, 0xD8, 0xB6, 0x42, 0xD8, 0xB7, 0x42, + 0xD8, 0xB8, 0x42, 0xD8, 0xB9, 0x42, 0xD8, 0xBA, + 0x42, 0xD9, 0x81, 0x42, 0xD9, 0x82, 0x42, 0xD9, + 0x83, 0x42, 0xD9, 0x84, 0x42, 0xD9, 0x85, 0x42, + 0xD9, 0x86, 0x42, 0xD9, 0x87, 0x42, 0xD9, 0x88, + // Bytes 380 - 3bf + 0x42, 0xD9, 0x89, 0x42, 0xD9, 0x8A, 0x42, 0xD9, + 0xAE, 0x42, 0xD9, 0xAF, 0x42, 0xD9, 0xB1, 0x42, + 0xD9, 0xB9, 0x42, 0xD9, 0xBA, 0x42, 0xD9, 0xBB, + 0x42, 0xD9, 0xBE, 0x42, 0xD9, 0xBF, 0x42, 0xDA, + 0x80, 0x42, 0xDA, 0x83, 0x42, 0xDA, 0x84, 0x42, + 0xDA, 0x86, 0x42, 0xDA, 0x87, 0x42, 0xDA, 0x88, + 0x42, 0xDA, 0x8C, 0x42, 0xDA, 0x8D, 0x42, 0xDA, + 0x8E, 0x42, 0xDA, 0x91, 0x42, 0xDA, 0x98, 0x42, + // Bytes 3c0 - 3ff + 0xDA, 0xA1, 0x42, 0xDA, 0xA4, 0x42, 0xDA, 0xA6, + 0x42, 0xDA, 0xA9, 0x42, 0xDA, 0xAD, 0x42, 0xDA, + 0xAF, 0x42, 0xDA, 0xB1, 0x42, 0xDA, 0xB3, 0x42, + 0xDA, 0xBA, 0x42, 0xDA, 0xBB, 0x42, 0xDA, 0xBE, + 0x42, 0xDB, 0x81, 0x42, 0xDB, 0x85, 0x42, 0xDB, + 0x86, 0x42, 0xDB, 0x87, 0x42, 0xDB, 0x88, 0x42, + 0xDB, 0x89, 0x42, 0xDB, 0x8B, 0x42, 0xDB, 0x8C, + 0x42, 0xDB, 0x90, 0x42, 0xDB, 0x92, 0x43, 0xE0, + // Bytes 400 - 43f + 0xBC, 0x8B, 0x43, 0xE1, 0x83, 0x9C, 0x43, 0xE1, + 0x84, 0x80, 0x43, 0xE1, 0x84, 0x81, 0x43, 0xE1, + 0x84, 0x82, 0x43, 0xE1, 0x84, 0x83, 0x43, 0xE1, + 0x84, 0x84, 0x43, 0xE1, 0x84, 0x85, 0x43, 0xE1, + 0x84, 0x86, 0x43, 0xE1, 0x84, 0x87, 0x43, 0xE1, + 0x84, 0x88, 0x43, 0xE1, 0x84, 0x89, 0x43, 0xE1, + 0x84, 0x8A, 0x43, 0xE1, 0x84, 0x8B, 0x43, 0xE1, + 0x84, 0x8C, 0x43, 0xE1, 0x84, 0x8D, 0x43, 0xE1, + // Bytes 440 - 47f + 0x84, 0x8E, 0x43, 0xE1, 0x84, 0x8F, 0x43, 0xE1, + 0x84, 0x90, 0x43, 0xE1, 0x84, 0x91, 0x43, 0xE1, + 0x84, 0x92, 0x43, 0xE1, 0x84, 0x94, 0x43, 0xE1, + 0x84, 0x95, 0x43, 0xE1, 0x84, 0x9A, 0x43, 0xE1, + 0x84, 0x9C, 0x43, 0xE1, 0x84, 0x9D, 0x43, 0xE1, + 0x84, 0x9E, 0x43, 0xE1, 0x84, 0xA0, 0x43, 0xE1, + 0x84, 0xA1, 0x43, 0xE1, 0x84, 0xA2, 0x43, 0xE1, + 0x84, 0xA3, 0x43, 0xE1, 0x84, 0xA7, 0x43, 0xE1, + // Bytes 480 - 4bf + 0x84, 0xA9, 0x43, 0xE1, 0x84, 0xAB, 0x43, 0xE1, + 0x84, 0xAC, 0x43, 0xE1, 0x84, 0xAD, 0x43, 0xE1, + 0x84, 0xAE, 0x43, 0xE1, 0x84, 0xAF, 0x43, 0xE1, + 0x84, 0xB2, 0x43, 0xE1, 0x84, 0xB6, 0x43, 0xE1, + 0x85, 0x80, 0x43, 0xE1, 0x85, 0x87, 0x43, 0xE1, + 0x85, 0x8C, 0x43, 0xE1, 0x85, 0x97, 0x43, 0xE1, + 0x85, 0x98, 0x43, 0xE1, 0x85, 0x99, 0x43, 0xE1, + 0x85, 0xA0, 0x43, 0xE1, 0x86, 0x84, 0x43, 0xE1, + // Bytes 4c0 - 4ff + 0x86, 0x85, 0x43, 0xE1, 0x86, 0x88, 0x43, 0xE1, + 0x86, 0x91, 0x43, 0xE1, 0x86, 0x92, 0x43, 0xE1, + 0x86, 0x94, 0x43, 0xE1, 0x86, 0x9E, 0x43, 0xE1, + 0x86, 0xA1, 0x43, 0xE1, 0x87, 0x87, 0x43, 0xE1, + 0x87, 0x88, 0x43, 0xE1, 0x87, 0x8C, 0x43, 0xE1, + 0x87, 0x8E, 0x43, 0xE1, 0x87, 0x93, 0x43, 0xE1, + 0x87, 0x97, 0x43, 0xE1, 0x87, 0x99, 0x43, 0xE1, + 0x87, 0x9D, 0x43, 0xE1, 0x87, 0x9F, 0x43, 0xE1, + // Bytes 500 - 53f + 0x87, 0xB1, 0x43, 0xE1, 0x87, 0xB2, 0x43, 0xE1, + 0xB4, 0x82, 0x43, 0xE1, 0xB4, 0x96, 0x43, 0xE1, + 0xB4, 0x97, 0x43, 0xE1, 0xB4, 0x9C, 0x43, 0xE1, + 0xB4, 0x9D, 0x43, 0xE1, 0xB4, 0xA5, 0x43, 0xE1, + 0xB5, 0xBB, 0x43, 0xE1, 0xB6, 0x85, 0x43, 0xE1, + 0xB6, 0x91, 0x43, 0xE2, 0x80, 0x82, 0x43, 0xE2, + 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, 0xE2, + 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, 0xE2, + // Bytes 540 - 57f + 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, 0xE2, + 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, 0xE2, + 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, 0xE2, + 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, 0xE2, + 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, 0xE2, + 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, 0xE2, + 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, 0xE2, + 0xB1, 0xB1, 0x43, 0xE2, 0xB5, 0xA1, 0x43, 0xE3, + // Bytes 580 - 5bf + 0x80, 0x81, 0x43, 0xE3, 0x80, 0x82, 0x43, 0xE3, + 0x80, 0x88, 0x43, 0xE3, 0x80, 0x89, 0x43, 0xE3, + 0x80, 0x8A, 0x43, 0xE3, 0x80, 0x8B, 0x43, 0xE3, + 0x80, 0x8C, 0x43, 0xE3, 0x80, 0x8D, 0x43, 0xE3, + 0x80, 0x8E, 0x43, 0xE3, 0x80, 0x8F, 0x43, 0xE3, + 0x80, 0x90, 0x43, 0xE3, 0x80, 0x91, 0x43, 0xE3, + 0x80, 0x92, 0x43, 0xE3, 0x80, 0x94, 0x43, 0xE3, + 0x80, 0x95, 0x43, 0xE3, 0x80, 0x96, 0x43, 0xE3, + // Bytes 5c0 - 5ff + 0x80, 0x97, 0x43, 0xE3, 0x82, 0xA1, 0x43, 0xE3, + 0x82, 0xA2, 0x43, 0xE3, 0x82, 0xA3, 0x43, 0xE3, + 0x82, 0xA4, 0x43, 0xE3, 0x82, 0xA5, 0x43, 0xE3, + 0x82, 0xA6, 0x43, 0xE3, 0x82, 0xA7, 0x43, 0xE3, + 0x82, 0xA8, 0x43, 0xE3, 0x82, 0xA9, 0x43, 0xE3, + 0x82, 0xAA, 0x43, 0xE3, 0x82, 0xAB, 0x43, 0xE3, + 0x82, 0xAD, 0x43, 0xE3, 0x82, 0xAF, 0x43, 0xE3, + 0x82, 0xB1, 0x43, 0xE3, 0x82, 0xB3, 0x43, 0xE3, + // Bytes 600 - 63f + 0x82, 0xB5, 0x43, 0xE3, 0x82, 0xB7, 0x43, 0xE3, + 0x82, 0xB9, 0x43, 0xE3, 0x82, 0xBB, 0x43, 0xE3, + 0x82, 0xBD, 0x43, 0xE3, 0x82, 0xBF, 0x43, 0xE3, + 0x83, 0x81, 0x43, 0xE3, 0x83, 0x83, 0x43, 0xE3, + 0x83, 0x84, 0x43, 0xE3, 0x83, 0x86, 0x43, 0xE3, + 0x83, 0x88, 0x43, 0xE3, 0x83, 0x8A, 0x43, 0xE3, + 0x83, 0x8B, 0x43, 0xE3, 0x83, 0x8C, 0x43, 0xE3, + 0x83, 0x8D, 0x43, 0xE3, 0x83, 0x8E, 0x43, 0xE3, + // Bytes 640 - 67f + 0x83, 0x8F, 0x43, 0xE3, 0x83, 0x92, 0x43, 0xE3, + 0x83, 0x95, 0x43, 0xE3, 0x83, 0x98, 0x43, 0xE3, + 0x83, 0x9B, 0x43, 0xE3, 0x83, 0x9E, 0x43, 0xE3, + 0x83, 0x9F, 0x43, 0xE3, 0x83, 0xA0, 0x43, 0xE3, + 0x83, 0xA1, 0x43, 0xE3, 0x83, 0xA2, 0x43, 0xE3, + 0x83, 0xA3, 0x43, 0xE3, 0x83, 0xA4, 0x43, 0xE3, + 0x83, 0xA5, 0x43, 0xE3, 0x83, 0xA6, 0x43, 0xE3, + 0x83, 0xA7, 0x43, 0xE3, 0x83, 0xA8, 0x43, 0xE3, + // Bytes 680 - 6bf + 0x83, 0xA9, 0x43, 0xE3, 0x83, 0xAA, 0x43, 0xE3, + 0x83, 0xAB, 0x43, 0xE3, 0x83, 0xAC, 0x43, 0xE3, + 0x83, 0xAD, 0x43, 0xE3, 0x83, 0xAF, 0x43, 0xE3, + 0x83, 0xB0, 0x43, 0xE3, 0x83, 0xB1, 0x43, 0xE3, + 0x83, 0xB2, 0x43, 0xE3, 0x83, 0xB3, 0x43, 0xE3, + 0x83, 0xBB, 0x43, 0xE3, 0x83, 0xBC, 0x43, 0xE3, + 0x92, 0x9E, 0x43, 0xE3, 0x92, 0xB9, 0x43, 0xE3, + 0x92, 0xBB, 0x43, 0xE3, 0x93, 0x9F, 0x43, 0xE3, + // Bytes 6c0 - 6ff + 0x94, 0x95, 0x43, 0xE3, 0x9B, 0xAE, 0x43, 0xE3, + 0x9B, 0xBC, 0x43, 0xE3, 0x9E, 0x81, 0x43, 0xE3, + 0xA0, 0xAF, 0x43, 0xE3, 0xA1, 0xA2, 0x43, 0xE3, + 0xA1, 0xBC, 0x43, 0xE3, 0xA3, 0x87, 0x43, 0xE3, + 0xA3, 0xA3, 0x43, 0xE3, 0xA4, 0x9C, 0x43, 0xE3, + 0xA4, 0xBA, 0x43, 0xE3, 0xA8, 0xAE, 0x43, 0xE3, + 0xA9, 0xAC, 0x43, 0xE3, 0xAB, 0xA4, 0x43, 0xE3, + 0xAC, 0x88, 0x43, 0xE3, 0xAC, 0x99, 0x43, 0xE3, + // Bytes 700 - 73f + 0xAD, 0x89, 0x43, 0xE3, 0xAE, 0x9D, 0x43, 0xE3, + 0xB0, 0x98, 0x43, 0xE3, 0xB1, 0x8E, 0x43, 0xE3, + 0xB4, 0xB3, 0x43, 0xE3, 0xB6, 0x96, 0x43, 0xE3, + 0xBA, 0xAC, 0x43, 0xE3, 0xBA, 0xB8, 0x43, 0xE3, + 0xBC, 0x9B, 0x43, 0xE3, 0xBF, 0xBC, 0x43, 0xE4, + 0x80, 0x88, 0x43, 0xE4, 0x80, 0x98, 0x43, 0xE4, + 0x80, 0xB9, 0x43, 0xE4, 0x81, 0x86, 0x43, 0xE4, + 0x82, 0x96, 0x43, 0xE4, 0x83, 0xA3, 0x43, 0xE4, + // Bytes 740 - 77f + 0x84, 0xAF, 0x43, 0xE4, 0x88, 0x82, 0x43, 0xE4, + 0x88, 0xA7, 0x43, 0xE4, 0x8A, 0xA0, 0x43, 0xE4, + 0x8C, 0x81, 0x43, 0xE4, 0x8C, 0xB4, 0x43, 0xE4, + 0x8D, 0x99, 0x43, 0xE4, 0x8F, 0x95, 0x43, 0xE4, + 0x8F, 0x99, 0x43, 0xE4, 0x90, 0x8B, 0x43, 0xE4, + 0x91, 0xAB, 0x43, 0xE4, 0x94, 0xAB, 0x43, 0xE4, + 0x95, 0x9D, 0x43, 0xE4, 0x95, 0xA1, 0x43, 0xE4, + 0x95, 0xAB, 0x43, 0xE4, 0x97, 0x97, 0x43, 0xE4, + // Bytes 780 - 7bf + 0x97, 0xB9, 0x43, 0xE4, 0x98, 0xB5, 0x43, 0xE4, + 0x9A, 0xBE, 0x43, 0xE4, 0x9B, 0x87, 0x43, 0xE4, + 0xA6, 0x95, 0x43, 0xE4, 0xA7, 0xA6, 0x43, 0xE4, + 0xA9, 0xAE, 0x43, 0xE4, 0xA9, 0xB6, 0x43, 0xE4, + 0xAA, 0xB2, 0x43, 0xE4, 0xAC, 0xB3, 0x43, 0xE4, + 0xAF, 0x8E, 0x43, 0xE4, 0xB3, 0x8E, 0x43, 0xE4, + 0xB3, 0xAD, 0x43, 0xE4, 0xB3, 0xB8, 0x43, 0xE4, + 0xB5, 0x96, 0x43, 0xE4, 0xB8, 0x80, 0x43, 0xE4, + // Bytes 7c0 - 7ff + 0xB8, 0x81, 0x43, 0xE4, 0xB8, 0x83, 0x43, 0xE4, + 0xB8, 0x89, 0x43, 0xE4, 0xB8, 0x8A, 0x43, 0xE4, + 0xB8, 0x8B, 0x43, 0xE4, 0xB8, 0x8D, 0x43, 0xE4, + 0xB8, 0x99, 0x43, 0xE4, 0xB8, 0xA6, 0x43, 0xE4, + 0xB8, 0xA8, 0x43, 0xE4, 0xB8, 0xAD, 0x43, 0xE4, + 0xB8, 0xB2, 0x43, 0xE4, 0xB8, 0xB6, 0x43, 0xE4, + 0xB8, 0xB8, 0x43, 0xE4, 0xB8, 0xB9, 0x43, 0xE4, + 0xB8, 0xBD, 0x43, 0xE4, 0xB8, 0xBF, 0x43, 0xE4, + // Bytes 800 - 83f + 0xB9, 0x81, 0x43, 0xE4, 0xB9, 0x99, 0x43, 0xE4, + 0xB9, 0x9D, 0x43, 0xE4, 0xBA, 0x82, 0x43, 0xE4, + 0xBA, 0x85, 0x43, 0xE4, 0xBA, 0x86, 0x43, 0xE4, + 0xBA, 0x8C, 0x43, 0xE4, 0xBA, 0x94, 0x43, 0xE4, + 0xBA, 0xA0, 0x43, 0xE4, 0xBA, 0xA4, 0x43, 0xE4, + 0xBA, 0xAE, 0x43, 0xE4, 0xBA, 0xBA, 0x43, 0xE4, + 0xBB, 0x80, 0x43, 0xE4, 0xBB, 0x8C, 0x43, 0xE4, + 0xBB, 0xA4, 0x43, 0xE4, 0xBC, 0x81, 0x43, 0xE4, + // Bytes 840 - 87f + 0xBC, 0x91, 0x43, 0xE4, 0xBD, 0xA0, 0x43, 0xE4, + 0xBE, 0x80, 0x43, 0xE4, 0xBE, 0x86, 0x43, 0xE4, + 0xBE, 0x8B, 0x43, 0xE4, 0xBE, 0xAE, 0x43, 0xE4, + 0xBE, 0xBB, 0x43, 0xE4, 0xBE, 0xBF, 0x43, 0xE5, + 0x80, 0x82, 0x43, 0xE5, 0x80, 0xAB, 0x43, 0xE5, + 0x81, 0xBA, 0x43, 0xE5, 0x82, 0x99, 0x43, 0xE5, + 0x83, 0x8F, 0x43, 0xE5, 0x83, 0x9A, 0x43, 0xE5, + 0x83, 0xA7, 0x43, 0xE5, 0x84, 0xAA, 0x43, 0xE5, + // Bytes 880 - 8bf + 0x84, 0xBF, 0x43, 0xE5, 0x85, 0x80, 0x43, 0xE5, + 0x85, 0x85, 0x43, 0xE5, 0x85, 0x8D, 0x43, 0xE5, + 0x85, 0x94, 0x43, 0xE5, 0x85, 0xA4, 0x43, 0xE5, + 0x85, 0xA5, 0x43, 0xE5, 0x85, 0xA7, 0x43, 0xE5, + 0x85, 0xA8, 0x43, 0xE5, 0x85, 0xA9, 0x43, 0xE5, + 0x85, 0xAB, 0x43, 0xE5, 0x85, 0xAD, 0x43, 0xE5, + 0x85, 0xB7, 0x43, 0xE5, 0x86, 0x80, 0x43, 0xE5, + 0x86, 0x82, 0x43, 0xE5, 0x86, 0x8D, 0x43, 0xE5, + // Bytes 8c0 - 8ff + 0x86, 0x92, 0x43, 0xE5, 0x86, 0x95, 0x43, 0xE5, + 0x86, 0x96, 0x43, 0xE5, 0x86, 0x97, 0x43, 0xE5, + 0x86, 0x99, 0x43, 0xE5, 0x86, 0xA4, 0x43, 0xE5, + 0x86, 0xAB, 0x43, 0xE5, 0x86, 0xAC, 0x43, 0xE5, + 0x86, 0xB5, 0x43, 0xE5, 0x86, 0xB7, 0x43, 0xE5, + 0x87, 0x89, 0x43, 0xE5, 0x87, 0x8C, 0x43, 0xE5, + 0x87, 0x9C, 0x43, 0xE5, 0x87, 0x9E, 0x43, 0xE5, + 0x87, 0xA0, 0x43, 0xE5, 0x87, 0xB5, 0x43, 0xE5, + // Bytes 900 - 93f + 0x88, 0x80, 0x43, 0xE5, 0x88, 0x83, 0x43, 0xE5, + 0x88, 0x87, 0x43, 0xE5, 0x88, 0x97, 0x43, 0xE5, + 0x88, 0x9D, 0x43, 0xE5, 0x88, 0xA9, 0x43, 0xE5, + 0x88, 0xBA, 0x43, 0xE5, 0x88, 0xBB, 0x43, 0xE5, + 0x89, 0x86, 0x43, 0xE5, 0x89, 0x8D, 0x43, 0xE5, + 0x89, 0xB2, 0x43, 0xE5, 0x89, 0xB7, 0x43, 0xE5, + 0x8A, 0x89, 0x43, 0xE5, 0x8A, 0x9B, 0x43, 0xE5, + 0x8A, 0xA3, 0x43, 0xE5, 0x8A, 0xB3, 0x43, 0xE5, + // Bytes 940 - 97f + 0x8A, 0xB4, 0x43, 0xE5, 0x8B, 0x87, 0x43, 0xE5, + 0x8B, 0x89, 0x43, 0xE5, 0x8B, 0x92, 0x43, 0xE5, + 0x8B, 0x9E, 0x43, 0xE5, 0x8B, 0xA4, 0x43, 0xE5, + 0x8B, 0xB5, 0x43, 0xE5, 0x8B, 0xB9, 0x43, 0xE5, + 0x8B, 0xBA, 0x43, 0xE5, 0x8C, 0x85, 0x43, 0xE5, + 0x8C, 0x86, 0x43, 0xE5, 0x8C, 0x95, 0x43, 0xE5, + 0x8C, 0x97, 0x43, 0xE5, 0x8C, 0x9A, 0x43, 0xE5, + 0x8C, 0xB8, 0x43, 0xE5, 0x8C, 0xBB, 0x43, 0xE5, + // Bytes 980 - 9bf + 0x8C, 0xBF, 0x43, 0xE5, 0x8D, 0x81, 0x43, 0xE5, + 0x8D, 0x84, 0x43, 0xE5, 0x8D, 0x85, 0x43, 0xE5, + 0x8D, 0x89, 0x43, 0xE5, 0x8D, 0x91, 0x43, 0xE5, + 0x8D, 0x94, 0x43, 0xE5, 0x8D, 0x9A, 0x43, 0xE5, + 0x8D, 0x9C, 0x43, 0xE5, 0x8D, 0xA9, 0x43, 0xE5, + 0x8D, 0xB0, 0x43, 0xE5, 0x8D, 0xB3, 0x43, 0xE5, + 0x8D, 0xB5, 0x43, 0xE5, 0x8D, 0xBD, 0x43, 0xE5, + 0x8D, 0xBF, 0x43, 0xE5, 0x8E, 0x82, 0x43, 0xE5, + // Bytes 9c0 - 9ff + 0x8E, 0xB6, 0x43, 0xE5, 0x8F, 0x83, 0x43, 0xE5, + 0x8F, 0x88, 0x43, 0xE5, 0x8F, 0x8A, 0x43, 0xE5, + 0x8F, 0x8C, 0x43, 0xE5, 0x8F, 0x9F, 0x43, 0xE5, + 0x8F, 0xA3, 0x43, 0xE5, 0x8F, 0xA5, 0x43, 0xE5, + 0x8F, 0xAB, 0x43, 0xE5, 0x8F, 0xAF, 0x43, 0xE5, + 0x8F, 0xB1, 0x43, 0xE5, 0x8F, 0xB3, 0x43, 0xE5, + 0x90, 0x86, 0x43, 0xE5, 0x90, 0x88, 0x43, 0xE5, + 0x90, 0x8D, 0x43, 0xE5, 0x90, 0x8F, 0x43, 0xE5, + // Bytes a00 - a3f + 0x90, 0x9D, 0x43, 0xE5, 0x90, 0xB8, 0x43, 0xE5, + 0x90, 0xB9, 0x43, 0xE5, 0x91, 0x82, 0x43, 0xE5, + 0x91, 0x88, 0x43, 0xE5, 0x91, 0xA8, 0x43, 0xE5, + 0x92, 0x9E, 0x43, 0xE5, 0x92, 0xA2, 0x43, 0xE5, + 0x92, 0xBD, 0x43, 0xE5, 0x93, 0xB6, 0x43, 0xE5, + 0x94, 0x90, 0x43, 0xE5, 0x95, 0x8F, 0x43, 0xE5, + 0x95, 0x93, 0x43, 0xE5, 0x95, 0x95, 0x43, 0xE5, + 0x95, 0xA3, 0x43, 0xE5, 0x96, 0x84, 0x43, 0xE5, + // Bytes a40 - a7f + 0x96, 0x87, 0x43, 0xE5, 0x96, 0x99, 0x43, 0xE5, + 0x96, 0x9D, 0x43, 0xE5, 0x96, 0xAB, 0x43, 0xE5, + 0x96, 0xB3, 0x43, 0xE5, 0x96, 0xB6, 0x43, 0xE5, + 0x97, 0x80, 0x43, 0xE5, 0x97, 0x82, 0x43, 0xE5, + 0x97, 0xA2, 0x43, 0xE5, 0x98, 0x86, 0x43, 0xE5, + 0x99, 0x91, 0x43, 0xE5, 0x99, 0xA8, 0x43, 0xE5, + 0x99, 0xB4, 0x43, 0xE5, 0x9B, 0x97, 0x43, 0xE5, + 0x9B, 0x9B, 0x43, 0xE5, 0x9B, 0xB9, 0x43, 0xE5, + // Bytes a80 - abf + 0x9C, 0x96, 0x43, 0xE5, 0x9C, 0x97, 0x43, 0xE5, + 0x9C, 0x9F, 0x43, 0xE5, 0x9C, 0xB0, 0x43, 0xE5, + 0x9E, 0x8B, 0x43, 0xE5, 0x9F, 0x8E, 0x43, 0xE5, + 0x9F, 0xB4, 0x43, 0xE5, 0xA0, 0x8D, 0x43, 0xE5, + 0xA0, 0xB1, 0x43, 0xE5, 0xA0, 0xB2, 0x43, 0xE5, + 0xA1, 0x80, 0x43, 0xE5, 0xA1, 0x9A, 0x43, 0xE5, + 0xA1, 0x9E, 0x43, 0xE5, 0xA2, 0xA8, 0x43, 0xE5, + 0xA2, 0xAC, 0x43, 0xE5, 0xA2, 0xB3, 0x43, 0xE5, + // Bytes ac0 - aff + 0xA3, 0x98, 0x43, 0xE5, 0xA3, 0x9F, 0x43, 0xE5, + 0xA3, 0xAB, 0x43, 0xE5, 0xA3, 0xAE, 0x43, 0xE5, + 0xA3, 0xB0, 0x43, 0xE5, 0xA3, 0xB2, 0x43, 0xE5, + 0xA3, 0xB7, 0x43, 0xE5, 0xA4, 0x82, 0x43, 0xE5, + 0xA4, 0x86, 0x43, 0xE5, 0xA4, 0x8A, 0x43, 0xE5, + 0xA4, 0x95, 0x43, 0xE5, 0xA4, 0x9A, 0x43, 0xE5, + 0xA4, 0x9C, 0x43, 0xE5, 0xA4, 0xA2, 0x43, 0xE5, + 0xA4, 0xA7, 0x43, 0xE5, 0xA4, 0xA9, 0x43, 0xE5, + // Bytes b00 - b3f + 0xA5, 0x84, 0x43, 0xE5, 0xA5, 0x88, 0x43, 0xE5, + 0xA5, 0x91, 0x43, 0xE5, 0xA5, 0x94, 0x43, 0xE5, + 0xA5, 0xA2, 0x43, 0xE5, 0xA5, 0xB3, 0x43, 0xE5, + 0xA7, 0x98, 0x43, 0xE5, 0xA7, 0xAC, 0x43, 0xE5, + 0xA8, 0x9B, 0x43, 0xE5, 0xA8, 0xA7, 0x43, 0xE5, + 0xA9, 0xA2, 0x43, 0xE5, 0xA9, 0xA6, 0x43, 0xE5, + 0xAA, 0xB5, 0x43, 0xE5, 0xAC, 0x88, 0x43, 0xE5, + 0xAC, 0xA8, 0x43, 0xE5, 0xAC, 0xBE, 0x43, 0xE5, + // Bytes b40 - b7f + 0xAD, 0x90, 0x43, 0xE5, 0xAD, 0x97, 0x43, 0xE5, + 0xAD, 0xA6, 0x43, 0xE5, 0xAE, 0x80, 0x43, 0xE5, + 0xAE, 0x85, 0x43, 0xE5, 0xAE, 0x97, 0x43, 0xE5, + 0xAF, 0x83, 0x43, 0xE5, 0xAF, 0x98, 0x43, 0xE5, + 0xAF, 0xA7, 0x43, 0xE5, 0xAF, 0xAE, 0x43, 0xE5, + 0xAF, 0xB3, 0x43, 0xE5, 0xAF, 0xB8, 0x43, 0xE5, + 0xAF, 0xBF, 0x43, 0xE5, 0xB0, 0x86, 0x43, 0xE5, + 0xB0, 0x8F, 0x43, 0xE5, 0xB0, 0xA2, 0x43, 0xE5, + // Bytes b80 - bbf + 0xB0, 0xB8, 0x43, 0xE5, 0xB0, 0xBF, 0x43, 0xE5, + 0xB1, 0xA0, 0x43, 0xE5, 0xB1, 0xA2, 0x43, 0xE5, + 0xB1, 0xA4, 0x43, 0xE5, 0xB1, 0xA5, 0x43, 0xE5, + 0xB1, 0xAE, 0x43, 0xE5, 0xB1, 0xB1, 0x43, 0xE5, + 0xB2, 0x8D, 0x43, 0xE5, 0xB3, 0x80, 0x43, 0xE5, + 0xB4, 0x99, 0x43, 0xE5, 0xB5, 0x83, 0x43, 0xE5, + 0xB5, 0x90, 0x43, 0xE5, 0xB5, 0xAB, 0x43, 0xE5, + 0xB5, 0xAE, 0x43, 0xE5, 0xB5, 0xBC, 0x43, 0xE5, + // Bytes bc0 - bff + 0xB6, 0xB2, 0x43, 0xE5, 0xB6, 0xBA, 0x43, 0xE5, + 0xB7, 0x9B, 0x43, 0xE5, 0xB7, 0xA1, 0x43, 0xE5, + 0xB7, 0xA2, 0x43, 0xE5, 0xB7, 0xA5, 0x43, 0xE5, + 0xB7, 0xA6, 0x43, 0xE5, 0xB7, 0xB1, 0x43, 0xE5, + 0xB7, 0xBD, 0x43, 0xE5, 0xB7, 0xBE, 0x43, 0xE5, + 0xB8, 0xA8, 0x43, 0xE5, 0xB8, 0xBD, 0x43, 0xE5, + 0xB9, 0xA9, 0x43, 0xE5, 0xB9, 0xB2, 0x43, 0xE5, + 0xB9, 0xB4, 0x43, 0xE5, 0xB9, 0xBA, 0x43, 0xE5, + // Bytes c00 - c3f + 0xB9, 0xBC, 0x43, 0xE5, 0xB9, 0xBF, 0x43, 0xE5, + 0xBA, 0xA6, 0x43, 0xE5, 0xBA, 0xB0, 0x43, 0xE5, + 0xBA, 0xB3, 0x43, 0xE5, 0xBA, 0xB6, 0x43, 0xE5, + 0xBB, 0x89, 0x43, 0xE5, 0xBB, 0x8A, 0x43, 0xE5, + 0xBB, 0x92, 0x43, 0xE5, 0xBB, 0x93, 0x43, 0xE5, + 0xBB, 0x99, 0x43, 0xE5, 0xBB, 0xAC, 0x43, 0xE5, + 0xBB, 0xB4, 0x43, 0xE5, 0xBB, 0xBE, 0x43, 0xE5, + 0xBC, 0x84, 0x43, 0xE5, 0xBC, 0x8B, 0x43, 0xE5, + // Bytes c40 - c7f + 0xBC, 0x93, 0x43, 0xE5, 0xBC, 0xA2, 0x43, 0xE5, + 0xBD, 0x90, 0x43, 0xE5, 0xBD, 0x93, 0x43, 0xE5, + 0xBD, 0xA1, 0x43, 0xE5, 0xBD, 0xA2, 0x43, 0xE5, + 0xBD, 0xA9, 0x43, 0xE5, 0xBD, 0xAB, 0x43, 0xE5, + 0xBD, 0xB3, 0x43, 0xE5, 0xBE, 0x8B, 0x43, 0xE5, + 0xBE, 0x8C, 0x43, 0xE5, 0xBE, 0x97, 0x43, 0xE5, + 0xBE, 0x9A, 0x43, 0xE5, 0xBE, 0xA9, 0x43, 0xE5, + 0xBE, 0xAD, 0x43, 0xE5, 0xBF, 0x83, 0x43, 0xE5, + // Bytes c80 - cbf + 0xBF, 0x8D, 0x43, 0xE5, 0xBF, 0x97, 0x43, 0xE5, + 0xBF, 0xB5, 0x43, 0xE5, 0xBF, 0xB9, 0x43, 0xE6, + 0x80, 0x92, 0x43, 0xE6, 0x80, 0x9C, 0x43, 0xE6, + 0x81, 0xB5, 0x43, 0xE6, 0x82, 0x81, 0x43, 0xE6, + 0x82, 0x94, 0x43, 0xE6, 0x83, 0x87, 0x43, 0xE6, + 0x83, 0x98, 0x43, 0xE6, 0x83, 0xA1, 0x43, 0xE6, + 0x84, 0x88, 0x43, 0xE6, 0x85, 0x84, 0x43, 0xE6, + 0x85, 0x88, 0x43, 0xE6, 0x85, 0x8C, 0x43, 0xE6, + // Bytes cc0 - cff + 0x85, 0x8E, 0x43, 0xE6, 0x85, 0xA0, 0x43, 0xE6, + 0x85, 0xA8, 0x43, 0xE6, 0x85, 0xBA, 0x43, 0xE6, + 0x86, 0x8E, 0x43, 0xE6, 0x86, 0x90, 0x43, 0xE6, + 0x86, 0xA4, 0x43, 0xE6, 0x86, 0xAF, 0x43, 0xE6, + 0x86, 0xB2, 0x43, 0xE6, 0x87, 0x9E, 0x43, 0xE6, + 0x87, 0xB2, 0x43, 0xE6, 0x87, 0xB6, 0x43, 0xE6, + 0x88, 0x80, 0x43, 0xE6, 0x88, 0x88, 0x43, 0xE6, + 0x88, 0x90, 0x43, 0xE6, 0x88, 0x9B, 0x43, 0xE6, + // Bytes d00 - d3f + 0x88, 0xAE, 0x43, 0xE6, 0x88, 0xB4, 0x43, 0xE6, + 0x88, 0xB6, 0x43, 0xE6, 0x89, 0x8B, 0x43, 0xE6, + 0x89, 0x93, 0x43, 0xE6, 0x89, 0x9D, 0x43, 0xE6, + 0x8A, 0x95, 0x43, 0xE6, 0x8A, 0xB1, 0x43, 0xE6, + 0x8B, 0x89, 0x43, 0xE6, 0x8B, 0x8F, 0x43, 0xE6, + 0x8B, 0x93, 0x43, 0xE6, 0x8B, 0x94, 0x43, 0xE6, + 0x8B, 0xBC, 0x43, 0xE6, 0x8B, 0xBE, 0x43, 0xE6, + 0x8C, 0x87, 0x43, 0xE6, 0x8C, 0xBD, 0x43, 0xE6, + // Bytes d40 - d7f + 0x8D, 0x90, 0x43, 0xE6, 0x8D, 0x95, 0x43, 0xE6, + 0x8D, 0xA8, 0x43, 0xE6, 0x8D, 0xBB, 0x43, 0xE6, + 0x8E, 0x83, 0x43, 0xE6, 0x8E, 0xA0, 0x43, 0xE6, + 0x8E, 0xA9, 0x43, 0xE6, 0x8F, 0x84, 0x43, 0xE6, + 0x8F, 0x85, 0x43, 0xE6, 0x8F, 0xA4, 0x43, 0xE6, + 0x90, 0x9C, 0x43, 0xE6, 0x90, 0xA2, 0x43, 0xE6, + 0x91, 0x92, 0x43, 0xE6, 0x91, 0xA9, 0x43, 0xE6, + 0x91, 0xB7, 0x43, 0xE6, 0x91, 0xBE, 0x43, 0xE6, + // Bytes d80 - dbf + 0x92, 0x9A, 0x43, 0xE6, 0x92, 0x9D, 0x43, 0xE6, + 0x93, 0x84, 0x43, 0xE6, 0x94, 0xAF, 0x43, 0xE6, + 0x94, 0xB4, 0x43, 0xE6, 0x95, 0x8F, 0x43, 0xE6, + 0x95, 0x96, 0x43, 0xE6, 0x95, 0xAC, 0x43, 0xE6, + 0x95, 0xB8, 0x43, 0xE6, 0x96, 0x87, 0x43, 0xE6, + 0x96, 0x97, 0x43, 0xE6, 0x96, 0x99, 0x43, 0xE6, + 0x96, 0xA4, 0x43, 0xE6, 0x96, 0xB0, 0x43, 0xE6, + 0x96, 0xB9, 0x43, 0xE6, 0x97, 0x85, 0x43, 0xE6, + // Bytes dc0 - dff + 0x97, 0xA0, 0x43, 0xE6, 0x97, 0xA2, 0x43, 0xE6, + 0x97, 0xA3, 0x43, 0xE6, 0x97, 0xA5, 0x43, 0xE6, + 0x98, 0x93, 0x43, 0xE6, 0x98, 0xA0, 0x43, 0xE6, + 0x99, 0x89, 0x43, 0xE6, 0x99, 0xB4, 0x43, 0xE6, + 0x9A, 0x88, 0x43, 0xE6, 0x9A, 0x91, 0x43, 0xE6, + 0x9A, 0x9C, 0x43, 0xE6, 0x9A, 0xB4, 0x43, 0xE6, + 0x9B, 0x86, 0x43, 0xE6, 0x9B, 0xB0, 0x43, 0xE6, + 0x9B, 0xB4, 0x43, 0xE6, 0x9B, 0xB8, 0x43, 0xE6, + // Bytes e00 - e3f + 0x9C, 0x80, 0x43, 0xE6, 0x9C, 0x88, 0x43, 0xE6, + 0x9C, 0x89, 0x43, 0xE6, 0x9C, 0x97, 0x43, 0xE6, + 0x9C, 0x9B, 0x43, 0xE6, 0x9C, 0xA1, 0x43, 0xE6, + 0x9C, 0xA8, 0x43, 0xE6, 0x9D, 0x8E, 0x43, 0xE6, + 0x9D, 0x93, 0x43, 0xE6, 0x9D, 0x96, 0x43, 0xE6, + 0x9D, 0x9E, 0x43, 0xE6, 0x9D, 0xBB, 0x43, 0xE6, + 0x9E, 0x85, 0x43, 0xE6, 0x9E, 0x97, 0x43, 0xE6, + 0x9F, 0xB3, 0x43, 0xE6, 0x9F, 0xBA, 0x43, 0xE6, + // Bytes e40 - e7f + 0xA0, 0x97, 0x43, 0xE6, 0xA0, 0x9F, 0x43, 0xE6, + 0xA0, 0xAA, 0x43, 0xE6, 0xA1, 0x92, 0x43, 0xE6, + 0xA2, 0x81, 0x43, 0xE6, 0xA2, 0x85, 0x43, 0xE6, + 0xA2, 0x8E, 0x43, 0xE6, 0xA2, 0xA8, 0x43, 0xE6, + 0xA4, 0x94, 0x43, 0xE6, 0xA5, 0x82, 0x43, 0xE6, + 0xA6, 0xA3, 0x43, 0xE6, 0xA7, 0xAA, 0x43, 0xE6, + 0xA8, 0x82, 0x43, 0xE6, 0xA8, 0x93, 0x43, 0xE6, + 0xAA, 0xA8, 0x43, 0xE6, 0xAB, 0x93, 0x43, 0xE6, + // Bytes e80 - ebf + 0xAB, 0x9B, 0x43, 0xE6, 0xAC, 0x84, 0x43, 0xE6, + 0xAC, 0xA0, 0x43, 0xE6, 0xAC, 0xA1, 0x43, 0xE6, + 0xAD, 0x94, 0x43, 0xE6, 0xAD, 0xA2, 0x43, 0xE6, + 0xAD, 0xA3, 0x43, 0xE6, 0xAD, 0xB2, 0x43, 0xE6, + 0xAD, 0xB7, 0x43, 0xE6, 0xAD, 0xB9, 0x43, 0xE6, + 0xAE, 0x9F, 0x43, 0xE6, 0xAE, 0xAE, 0x43, 0xE6, + 0xAE, 0xB3, 0x43, 0xE6, 0xAE, 0xBA, 0x43, 0xE6, + 0xAE, 0xBB, 0x43, 0xE6, 0xAF, 0x8B, 0x43, 0xE6, + // Bytes ec0 - eff + 0xAF, 0x8D, 0x43, 0xE6, 0xAF, 0x94, 0x43, 0xE6, + 0xAF, 0x9B, 0x43, 0xE6, 0xB0, 0x8F, 0x43, 0xE6, + 0xB0, 0x94, 0x43, 0xE6, 0xB0, 0xB4, 0x43, 0xE6, + 0xB1, 0x8E, 0x43, 0xE6, 0xB1, 0xA7, 0x43, 0xE6, + 0xB2, 0x88, 0x43, 0xE6, 0xB2, 0xBF, 0x43, 0xE6, + 0xB3, 0x8C, 0x43, 0xE6, 0xB3, 0x8D, 0x43, 0xE6, + 0xB3, 0xA5, 0x43, 0xE6, 0xB3, 0xA8, 0x43, 0xE6, + 0xB4, 0x96, 0x43, 0xE6, 0xB4, 0x9B, 0x43, 0xE6, + // Bytes f00 - f3f + 0xB4, 0x9E, 0x43, 0xE6, 0xB4, 0xB4, 0x43, 0xE6, + 0xB4, 0xBE, 0x43, 0xE6, 0xB5, 0x81, 0x43, 0xE6, + 0xB5, 0xA9, 0x43, 0xE6, 0xB5, 0xAA, 0x43, 0xE6, + 0xB5, 0xB7, 0x43, 0xE6, 0xB5, 0xB8, 0x43, 0xE6, + 0xB6, 0x85, 0x43, 0xE6, 0xB7, 0x8B, 0x43, 0xE6, + 0xB7, 0x9A, 0x43, 0xE6, 0xB7, 0xAA, 0x43, 0xE6, + 0xB7, 0xB9, 0x43, 0xE6, 0xB8, 0x9A, 0x43, 0xE6, + 0xB8, 0xAF, 0x43, 0xE6, 0xB9, 0xAE, 0x43, 0xE6, + // Bytes f40 - f7f + 0xBA, 0x80, 0x43, 0xE6, 0xBA, 0x9C, 0x43, 0xE6, + 0xBA, 0xBA, 0x43, 0xE6, 0xBB, 0x87, 0x43, 0xE6, + 0xBB, 0x8B, 0x43, 0xE6, 0xBB, 0x91, 0x43, 0xE6, + 0xBB, 0x9B, 0x43, 0xE6, 0xBC, 0x8F, 0x43, 0xE6, + 0xBC, 0x94, 0x43, 0xE6, 0xBC, 0xA2, 0x43, 0xE6, + 0xBC, 0xA3, 0x43, 0xE6, 0xBD, 0xAE, 0x43, 0xE6, + 0xBF, 0x86, 0x43, 0xE6, 0xBF, 0xAB, 0x43, 0xE6, + 0xBF, 0xBE, 0x43, 0xE7, 0x80, 0x9B, 0x43, 0xE7, + // Bytes f80 - fbf + 0x80, 0x9E, 0x43, 0xE7, 0x80, 0xB9, 0x43, 0xE7, + 0x81, 0x8A, 0x43, 0xE7, 0x81, 0xAB, 0x43, 0xE7, + 0x81, 0xB0, 0x43, 0xE7, 0x81, 0xB7, 0x43, 0xE7, + 0x81, 0xBD, 0x43, 0xE7, 0x82, 0x99, 0x43, 0xE7, + 0x82, 0xAD, 0x43, 0xE7, 0x83, 0x88, 0x43, 0xE7, + 0x83, 0x99, 0x43, 0xE7, 0x84, 0xA1, 0x43, 0xE7, + 0x85, 0x85, 0x43, 0xE7, 0x85, 0x89, 0x43, 0xE7, + 0x85, 0xAE, 0x43, 0xE7, 0x86, 0x9C, 0x43, 0xE7, + // Bytes fc0 - fff + 0x87, 0x8E, 0x43, 0xE7, 0x87, 0x90, 0x43, 0xE7, + 0x88, 0x90, 0x43, 0xE7, 0x88, 0x9B, 0x43, 0xE7, + 0x88, 0xA8, 0x43, 0xE7, 0x88, 0xAA, 0x43, 0xE7, + 0x88, 0xAB, 0x43, 0xE7, 0x88, 0xB5, 0x43, 0xE7, + 0x88, 0xB6, 0x43, 0xE7, 0x88, 0xBB, 0x43, 0xE7, + 0x88, 0xBF, 0x43, 0xE7, 0x89, 0x87, 0x43, 0xE7, + 0x89, 0x90, 0x43, 0xE7, 0x89, 0x99, 0x43, 0xE7, + 0x89, 0x9B, 0x43, 0xE7, 0x89, 0xA2, 0x43, 0xE7, + // Bytes 1000 - 103f + 0x89, 0xB9, 0x43, 0xE7, 0x8A, 0x80, 0x43, 0xE7, + 0x8A, 0x95, 0x43, 0xE7, 0x8A, 0xAC, 0x43, 0xE7, + 0x8A, 0xAF, 0x43, 0xE7, 0x8B, 0x80, 0x43, 0xE7, + 0x8B, 0xBC, 0x43, 0xE7, 0x8C, 0xAA, 0x43, 0xE7, + 0x8D, 0xB5, 0x43, 0xE7, 0x8D, 0xBA, 0x43, 0xE7, + 0x8E, 0x84, 0x43, 0xE7, 0x8E, 0x87, 0x43, 0xE7, + 0x8E, 0x89, 0x43, 0xE7, 0x8E, 0x8B, 0x43, 0xE7, + 0x8E, 0xA5, 0x43, 0xE7, 0x8E, 0xB2, 0x43, 0xE7, + // Bytes 1040 - 107f + 0x8F, 0x9E, 0x43, 0xE7, 0x90, 0x86, 0x43, 0xE7, + 0x90, 0x89, 0x43, 0xE7, 0x90, 0xA2, 0x43, 0xE7, + 0x91, 0x87, 0x43, 0xE7, 0x91, 0x9C, 0x43, 0xE7, + 0x91, 0xA9, 0x43, 0xE7, 0x91, 0xB1, 0x43, 0xE7, + 0x92, 0x85, 0x43, 0xE7, 0x92, 0x89, 0x43, 0xE7, + 0x92, 0x98, 0x43, 0xE7, 0x93, 0x8A, 0x43, 0xE7, + 0x93, 0x9C, 0x43, 0xE7, 0x93, 0xA6, 0x43, 0xE7, + 0x94, 0x86, 0x43, 0xE7, 0x94, 0x98, 0x43, 0xE7, + // Bytes 1080 - 10bf + 0x94, 0x9F, 0x43, 0xE7, 0x94, 0xA4, 0x43, 0xE7, + 0x94, 0xA8, 0x43, 0xE7, 0x94, 0xB0, 0x43, 0xE7, + 0x94, 0xB2, 0x43, 0xE7, 0x94, 0xB3, 0x43, 0xE7, + 0x94, 0xB7, 0x43, 0xE7, 0x94, 0xBB, 0x43, 0xE7, + 0x94, 0xBE, 0x43, 0xE7, 0x95, 0x99, 0x43, 0xE7, + 0x95, 0xA5, 0x43, 0xE7, 0x95, 0xB0, 0x43, 0xE7, + 0x96, 0x8B, 0x43, 0xE7, 0x96, 0x92, 0x43, 0xE7, + 0x97, 0xA2, 0x43, 0xE7, 0x98, 0x90, 0x43, 0xE7, + // Bytes 10c0 - 10ff + 0x98, 0x9D, 0x43, 0xE7, 0x98, 0x9F, 0x43, 0xE7, + 0x99, 0x82, 0x43, 0xE7, 0x99, 0xA9, 0x43, 0xE7, + 0x99, 0xB6, 0x43, 0xE7, 0x99, 0xBD, 0x43, 0xE7, + 0x9A, 0xAE, 0x43, 0xE7, 0x9A, 0xBF, 0x43, 0xE7, + 0x9B, 0x8A, 0x43, 0xE7, 0x9B, 0x9B, 0x43, 0xE7, + 0x9B, 0xA3, 0x43, 0xE7, 0x9B, 0xA7, 0x43, 0xE7, + 0x9B, 0xAE, 0x43, 0xE7, 0x9B, 0xB4, 0x43, 0xE7, + 0x9C, 0x81, 0x43, 0xE7, 0x9C, 0x9E, 0x43, 0xE7, + // Bytes 1100 - 113f + 0x9C, 0x9F, 0x43, 0xE7, 0x9D, 0x80, 0x43, 0xE7, + 0x9D, 0x8A, 0x43, 0xE7, 0x9E, 0x8B, 0x43, 0xE7, + 0x9E, 0xA7, 0x43, 0xE7, 0x9F, 0x9B, 0x43, 0xE7, + 0x9F, 0xA2, 0x43, 0xE7, 0x9F, 0xB3, 0x43, 0xE7, + 0xA1, 0x8E, 0x43, 0xE7, 0xA1, 0xAB, 0x43, 0xE7, + 0xA2, 0x8C, 0x43, 0xE7, 0xA2, 0x91, 0x43, 0xE7, + 0xA3, 0x8A, 0x43, 0xE7, 0xA3, 0x8C, 0x43, 0xE7, + 0xA3, 0xBB, 0x43, 0xE7, 0xA4, 0xAA, 0x43, 0xE7, + // Bytes 1140 - 117f + 0xA4, 0xBA, 0x43, 0xE7, 0xA4, 0xBC, 0x43, 0xE7, + 0xA4, 0xBE, 0x43, 0xE7, 0xA5, 0x88, 0x43, 0xE7, + 0xA5, 0x89, 0x43, 0xE7, 0xA5, 0x90, 0x43, 0xE7, + 0xA5, 0x96, 0x43, 0xE7, 0xA5, 0x9D, 0x43, 0xE7, + 0xA5, 0x9E, 0x43, 0xE7, 0xA5, 0xA5, 0x43, 0xE7, + 0xA5, 0xBF, 0x43, 0xE7, 0xA6, 0x81, 0x43, 0xE7, + 0xA6, 0x8D, 0x43, 0xE7, 0xA6, 0x8E, 0x43, 0xE7, + 0xA6, 0x8F, 0x43, 0xE7, 0xA6, 0xAE, 0x43, 0xE7, + // Bytes 1180 - 11bf + 0xA6, 0xB8, 0x43, 0xE7, 0xA6, 0xBE, 0x43, 0xE7, + 0xA7, 0x8A, 0x43, 0xE7, 0xA7, 0x98, 0x43, 0xE7, + 0xA7, 0xAB, 0x43, 0xE7, 0xA8, 0x9C, 0x43, 0xE7, + 0xA9, 0x80, 0x43, 0xE7, 0xA9, 0x8A, 0x43, 0xE7, + 0xA9, 0x8F, 0x43, 0xE7, 0xA9, 0xB4, 0x43, 0xE7, + 0xA9, 0xBA, 0x43, 0xE7, 0xAA, 0x81, 0x43, 0xE7, + 0xAA, 0xB1, 0x43, 0xE7, 0xAB, 0x8B, 0x43, 0xE7, + 0xAB, 0xAE, 0x43, 0xE7, 0xAB, 0xB9, 0x43, 0xE7, + // Bytes 11c0 - 11ff + 0xAC, 0xA0, 0x43, 0xE7, 0xAE, 0x8F, 0x43, 0xE7, + 0xAF, 0x80, 0x43, 0xE7, 0xAF, 0x86, 0x43, 0xE7, + 0xAF, 0x89, 0x43, 0xE7, 0xB0, 0xBE, 0x43, 0xE7, + 0xB1, 0xA0, 0x43, 0xE7, 0xB1, 0xB3, 0x43, 0xE7, + 0xB1, 0xBB, 0x43, 0xE7, 0xB2, 0x92, 0x43, 0xE7, + 0xB2, 0xBE, 0x43, 0xE7, 0xB3, 0x92, 0x43, 0xE7, + 0xB3, 0x96, 0x43, 0xE7, 0xB3, 0xA3, 0x43, 0xE7, + 0xB3, 0xA7, 0x43, 0xE7, 0xB3, 0xA8, 0x43, 0xE7, + // Bytes 1200 - 123f + 0xB3, 0xB8, 0x43, 0xE7, 0xB4, 0x80, 0x43, 0xE7, + 0xB4, 0x90, 0x43, 0xE7, 0xB4, 0xA2, 0x43, 0xE7, + 0xB4, 0xAF, 0x43, 0xE7, 0xB5, 0x82, 0x43, 0xE7, + 0xB5, 0x9B, 0x43, 0xE7, 0xB5, 0xA3, 0x43, 0xE7, + 0xB6, 0xA0, 0x43, 0xE7, 0xB6, 0xBE, 0x43, 0xE7, + 0xB7, 0x87, 0x43, 0xE7, 0xB7, 0xB4, 0x43, 0xE7, + 0xB8, 0x82, 0x43, 0xE7, 0xB8, 0x89, 0x43, 0xE7, + 0xB8, 0xB7, 0x43, 0xE7, 0xB9, 0x81, 0x43, 0xE7, + // Bytes 1240 - 127f + 0xB9, 0x85, 0x43, 0xE7, 0xBC, 0xB6, 0x43, 0xE7, + 0xBC, 0xBE, 0x43, 0xE7, 0xBD, 0x91, 0x43, 0xE7, + 0xBD, 0xB2, 0x43, 0xE7, 0xBD, 0xB9, 0x43, 0xE7, + 0xBD, 0xBA, 0x43, 0xE7, 0xBE, 0x85, 0x43, 0xE7, + 0xBE, 0x8A, 0x43, 0xE7, 0xBE, 0x95, 0x43, 0xE7, + 0xBE, 0x9A, 0x43, 0xE7, 0xBE, 0xBD, 0x43, 0xE7, + 0xBF, 0xBA, 0x43, 0xE8, 0x80, 0x81, 0x43, 0xE8, + 0x80, 0x85, 0x43, 0xE8, 0x80, 0x8C, 0x43, 0xE8, + // Bytes 1280 - 12bf + 0x80, 0x92, 0x43, 0xE8, 0x80, 0xB3, 0x43, 0xE8, + 0x81, 0x86, 0x43, 0xE8, 0x81, 0xA0, 0x43, 0xE8, + 0x81, 0xAF, 0x43, 0xE8, 0x81, 0xB0, 0x43, 0xE8, + 0x81, 0xBE, 0x43, 0xE8, 0x81, 0xBF, 0x43, 0xE8, + 0x82, 0x89, 0x43, 0xE8, 0x82, 0x8B, 0x43, 0xE8, + 0x82, 0xAD, 0x43, 0xE8, 0x82, 0xB2, 0x43, 0xE8, + 0x84, 0x83, 0x43, 0xE8, 0x84, 0xBE, 0x43, 0xE8, + 0x87, 0x98, 0x43, 0xE8, 0x87, 0xA3, 0x43, 0xE8, + // Bytes 12c0 - 12ff + 0x87, 0xA8, 0x43, 0xE8, 0x87, 0xAA, 0x43, 0xE8, + 0x87, 0xAD, 0x43, 0xE8, 0x87, 0xB3, 0x43, 0xE8, + 0x87, 0xBC, 0x43, 0xE8, 0x88, 0x81, 0x43, 0xE8, + 0x88, 0x84, 0x43, 0xE8, 0x88, 0x8C, 0x43, 0xE8, + 0x88, 0x98, 0x43, 0xE8, 0x88, 0x9B, 0x43, 0xE8, + 0x88, 0x9F, 0x43, 0xE8, 0x89, 0xAE, 0x43, 0xE8, + 0x89, 0xAF, 0x43, 0xE8, 0x89, 0xB2, 0x43, 0xE8, + 0x89, 0xB8, 0x43, 0xE8, 0x89, 0xB9, 0x43, 0xE8, + // Bytes 1300 - 133f + 0x8A, 0x8B, 0x43, 0xE8, 0x8A, 0x91, 0x43, 0xE8, + 0x8A, 0x9D, 0x43, 0xE8, 0x8A, 0xB1, 0x43, 0xE8, + 0x8A, 0xB3, 0x43, 0xE8, 0x8A, 0xBD, 0x43, 0xE8, + 0x8B, 0xA5, 0x43, 0xE8, 0x8B, 0xA6, 0x43, 0xE8, + 0x8C, 0x9D, 0x43, 0xE8, 0x8C, 0xA3, 0x43, 0xE8, + 0x8C, 0xB6, 0x43, 0xE8, 0x8D, 0x92, 0x43, 0xE8, + 0x8D, 0x93, 0x43, 0xE8, 0x8D, 0xA3, 0x43, 0xE8, + 0x8E, 0xAD, 0x43, 0xE8, 0x8E, 0xBD, 0x43, 0xE8, + // Bytes 1340 - 137f + 0x8F, 0x89, 0x43, 0xE8, 0x8F, 0x8A, 0x43, 0xE8, + 0x8F, 0x8C, 0x43, 0xE8, 0x8F, 0x9C, 0x43, 0xE8, + 0x8F, 0xA7, 0x43, 0xE8, 0x8F, 0xAF, 0x43, 0xE8, + 0x8F, 0xB1, 0x43, 0xE8, 0x90, 0xBD, 0x43, 0xE8, + 0x91, 0x89, 0x43, 0xE8, 0x91, 0x97, 0x43, 0xE8, + 0x93, 0xAE, 0x43, 0xE8, 0x93, 0xB1, 0x43, 0xE8, + 0x93, 0xB3, 0x43, 0xE8, 0x93, 0xBC, 0x43, 0xE8, + 0x94, 0x96, 0x43, 0xE8, 0x95, 0xA4, 0x43, 0xE8, + // Bytes 1380 - 13bf + 0x97, 0x8D, 0x43, 0xE8, 0x97, 0xBA, 0x43, 0xE8, + 0x98, 0x86, 0x43, 0xE8, 0x98, 0x92, 0x43, 0xE8, + 0x98, 0xAD, 0x43, 0xE8, 0x98, 0xBF, 0x43, 0xE8, + 0x99, 0x8D, 0x43, 0xE8, 0x99, 0x90, 0x43, 0xE8, + 0x99, 0x9C, 0x43, 0xE8, 0x99, 0xA7, 0x43, 0xE8, + 0x99, 0xA9, 0x43, 0xE8, 0x99, 0xAB, 0x43, 0xE8, + 0x9A, 0x88, 0x43, 0xE8, 0x9A, 0xA9, 0x43, 0xE8, + 0x9B, 0xA2, 0x43, 0xE8, 0x9C, 0x8E, 0x43, 0xE8, + // Bytes 13c0 - 13ff + 0x9C, 0xA8, 0x43, 0xE8, 0x9D, 0xAB, 0x43, 0xE8, + 0x9D, 0xB9, 0x43, 0xE8, 0x9E, 0x86, 0x43, 0xE8, + 0x9E, 0xBA, 0x43, 0xE8, 0x9F, 0xA1, 0x43, 0xE8, + 0xA0, 0x81, 0x43, 0xE8, 0xA0, 0x9F, 0x43, 0xE8, + 0xA1, 0x80, 0x43, 0xE8, 0xA1, 0x8C, 0x43, 0xE8, + 0xA1, 0xA0, 0x43, 0xE8, 0xA1, 0xA3, 0x43, 0xE8, + 0xA3, 0x82, 0x43, 0xE8, 0xA3, 0x8F, 0x43, 0xE8, + 0xA3, 0x97, 0x43, 0xE8, 0xA3, 0x9E, 0x43, 0xE8, + // Bytes 1400 - 143f + 0xA3, 0xA1, 0x43, 0xE8, 0xA3, 0xB8, 0x43, 0xE8, + 0xA3, 0xBA, 0x43, 0xE8, 0xA4, 0x90, 0x43, 0xE8, + 0xA5, 0x81, 0x43, 0xE8, 0xA5, 0xA4, 0x43, 0xE8, + 0xA5, 0xBE, 0x43, 0xE8, 0xA6, 0x86, 0x43, 0xE8, + 0xA6, 0x8B, 0x43, 0xE8, 0xA6, 0x96, 0x43, 0xE8, + 0xA7, 0x92, 0x43, 0xE8, 0xA7, 0xA3, 0x43, 0xE8, + 0xA8, 0x80, 0x43, 0xE8, 0xAA, 0xA0, 0x43, 0xE8, + 0xAA, 0xAA, 0x43, 0xE8, 0xAA, 0xBF, 0x43, 0xE8, + // Bytes 1440 - 147f + 0xAB, 0x8B, 0x43, 0xE8, 0xAB, 0x92, 0x43, 0xE8, + 0xAB, 0x96, 0x43, 0xE8, 0xAB, 0xAD, 0x43, 0xE8, + 0xAB, 0xB8, 0x43, 0xE8, 0xAB, 0xBE, 0x43, 0xE8, + 0xAC, 0x81, 0x43, 0xE8, 0xAC, 0xB9, 0x43, 0xE8, + 0xAD, 0x98, 0x43, 0xE8, 0xAE, 0x80, 0x43, 0xE8, + 0xAE, 0x8A, 0x43, 0xE8, 0xB0, 0xB7, 0x43, 0xE8, + 0xB1, 0x86, 0x43, 0xE8, 0xB1, 0x88, 0x43, 0xE8, + 0xB1, 0x95, 0x43, 0xE8, 0xB1, 0xB8, 0x43, 0xE8, + // Bytes 1480 - 14bf + 0xB2, 0x9D, 0x43, 0xE8, 0xB2, 0xA1, 0x43, 0xE8, + 0xB2, 0xA9, 0x43, 0xE8, 0xB2, 0xAB, 0x43, 0xE8, + 0xB3, 0x81, 0x43, 0xE8, 0xB3, 0x82, 0x43, 0xE8, + 0xB3, 0x87, 0x43, 0xE8, 0xB3, 0x88, 0x43, 0xE8, + 0xB3, 0x93, 0x43, 0xE8, 0xB4, 0x88, 0x43, 0xE8, + 0xB4, 0x9B, 0x43, 0xE8, 0xB5, 0xA4, 0x43, 0xE8, + 0xB5, 0xB0, 0x43, 0xE8, 0xB5, 0xB7, 0x43, 0xE8, + 0xB6, 0xB3, 0x43, 0xE8, 0xB6, 0xBC, 0x43, 0xE8, + // Bytes 14c0 - 14ff + 0xB7, 0x8B, 0x43, 0xE8, 0xB7, 0xAF, 0x43, 0xE8, + 0xB7, 0xB0, 0x43, 0xE8, 0xBA, 0xAB, 0x43, 0xE8, + 0xBB, 0x8A, 0x43, 0xE8, 0xBB, 0x94, 0x43, 0xE8, + 0xBC, 0xA6, 0x43, 0xE8, 0xBC, 0xAA, 0x43, 0xE8, + 0xBC, 0xB8, 0x43, 0xE8, 0xBC, 0xBB, 0x43, 0xE8, + 0xBD, 0xA2, 0x43, 0xE8, 0xBE, 0x9B, 0x43, 0xE8, + 0xBE, 0x9E, 0x43, 0xE8, 0xBE, 0xB0, 0x43, 0xE8, + 0xBE, 0xB5, 0x43, 0xE8, 0xBE, 0xB6, 0x43, 0xE9, + // Bytes 1500 - 153f + 0x80, 0xA3, 0x43, 0xE9, 0x80, 0xB8, 0x43, 0xE9, + 0x81, 0x8A, 0x43, 0xE9, 0x81, 0xA9, 0x43, 0xE9, + 0x81, 0xB2, 0x43, 0xE9, 0x81, 0xBC, 0x43, 0xE9, + 0x82, 0x8F, 0x43, 0xE9, 0x82, 0x91, 0x43, 0xE9, + 0x82, 0x94, 0x43, 0xE9, 0x83, 0x8E, 0x43, 0xE9, + 0x83, 0x9E, 0x43, 0xE9, 0x83, 0xB1, 0x43, 0xE9, + 0x83, 0xBD, 0x43, 0xE9, 0x84, 0x91, 0x43, 0xE9, + 0x84, 0x9B, 0x43, 0xE9, 0x85, 0x89, 0x43, 0xE9, + // Bytes 1540 - 157f + 0x85, 0x8D, 0x43, 0xE9, 0x85, 0xAA, 0x43, 0xE9, + 0x86, 0x99, 0x43, 0xE9, 0x86, 0xB4, 0x43, 0xE9, + 0x87, 0x86, 0x43, 0xE9, 0x87, 0x8C, 0x43, 0xE9, + 0x87, 0x8F, 0x43, 0xE9, 0x87, 0x91, 0x43, 0xE9, + 0x88, 0xB4, 0x43, 0xE9, 0x88, 0xB8, 0x43, 0xE9, + 0x89, 0xB6, 0x43, 0xE9, 0x89, 0xBC, 0x43, 0xE9, + 0x8B, 0x97, 0x43, 0xE9, 0x8B, 0x98, 0x43, 0xE9, + 0x8C, 0x84, 0x43, 0xE9, 0x8D, 0x8A, 0x43, 0xE9, + // Bytes 1580 - 15bf + 0x8F, 0xB9, 0x43, 0xE9, 0x90, 0x95, 0x43, 0xE9, + 0x95, 0xB7, 0x43, 0xE9, 0x96, 0x80, 0x43, 0xE9, + 0x96, 0x8B, 0x43, 0xE9, 0x96, 0xAD, 0x43, 0xE9, + 0x96, 0xB7, 0x43, 0xE9, 0x98, 0x9C, 0x43, 0xE9, + 0x98, 0xAE, 0x43, 0xE9, 0x99, 0x8B, 0x43, 0xE9, + 0x99, 0x8D, 0x43, 0xE9, 0x99, 0xB5, 0x43, 0xE9, + 0x99, 0xB8, 0x43, 0xE9, 0x99, 0xBC, 0x43, 0xE9, + 0x9A, 0x86, 0x43, 0xE9, 0x9A, 0xA3, 0x43, 0xE9, + // Bytes 15c0 - 15ff + 0x9A, 0xB6, 0x43, 0xE9, 0x9A, 0xB7, 0x43, 0xE9, + 0x9A, 0xB8, 0x43, 0xE9, 0x9A, 0xB9, 0x43, 0xE9, + 0x9B, 0x83, 0x43, 0xE9, 0x9B, 0xA2, 0x43, 0xE9, + 0x9B, 0xA3, 0x43, 0xE9, 0x9B, 0xA8, 0x43, 0xE9, + 0x9B, 0xB6, 0x43, 0xE9, 0x9B, 0xB7, 0x43, 0xE9, + 0x9C, 0xA3, 0x43, 0xE9, 0x9C, 0xB2, 0x43, 0xE9, + 0x9D, 0x88, 0x43, 0xE9, 0x9D, 0x91, 0x43, 0xE9, + 0x9D, 0x96, 0x43, 0xE9, 0x9D, 0x9E, 0x43, 0xE9, + // Bytes 1600 - 163f + 0x9D, 0xA2, 0x43, 0xE9, 0x9D, 0xA9, 0x43, 0xE9, + 0x9F, 0x8B, 0x43, 0xE9, 0x9F, 0x9B, 0x43, 0xE9, + 0x9F, 0xA0, 0x43, 0xE9, 0x9F, 0xAD, 0x43, 0xE9, + 0x9F, 0xB3, 0x43, 0xE9, 0x9F, 0xBF, 0x43, 0xE9, + 0xA0, 0x81, 0x43, 0xE9, 0xA0, 0x85, 0x43, 0xE9, + 0xA0, 0x8B, 0x43, 0xE9, 0xA0, 0x98, 0x43, 0xE9, + 0xA0, 0xA9, 0x43, 0xE9, 0xA0, 0xBB, 0x43, 0xE9, + 0xA1, 0x9E, 0x43, 0xE9, 0xA2, 0xA8, 0x43, 0xE9, + // Bytes 1640 - 167f + 0xA3, 0x9B, 0x43, 0xE9, 0xA3, 0x9F, 0x43, 0xE9, + 0xA3, 0xA2, 0x43, 0xE9, 0xA3, 0xAF, 0x43, 0xE9, + 0xA3, 0xBC, 0x43, 0xE9, 0xA4, 0xA8, 0x43, 0xE9, + 0xA4, 0xA9, 0x43, 0xE9, 0xA6, 0x96, 0x43, 0xE9, + 0xA6, 0x99, 0x43, 0xE9, 0xA6, 0xA7, 0x43, 0xE9, + 0xA6, 0xAC, 0x43, 0xE9, 0xA7, 0x82, 0x43, 0xE9, + 0xA7, 0xB1, 0x43, 0xE9, 0xA7, 0xBE, 0x43, 0xE9, + 0xA9, 0xAA, 0x43, 0xE9, 0xAA, 0xA8, 0x43, 0xE9, + // Bytes 1680 - 16bf + 0xAB, 0x98, 0x43, 0xE9, 0xAB, 0x9F, 0x43, 0xE9, + 0xAC, 0x92, 0x43, 0xE9, 0xAC, 0xA5, 0x43, 0xE9, + 0xAC, 0xAF, 0x43, 0xE9, 0xAC, 0xB2, 0x43, 0xE9, + 0xAC, 0xBC, 0x43, 0xE9, 0xAD, 0x9A, 0x43, 0xE9, + 0xAD, 0xAF, 0x43, 0xE9, 0xB1, 0x80, 0x43, 0xE9, + 0xB1, 0x97, 0x43, 0xE9, 0xB3, 0xA5, 0x43, 0xE9, + 0xB3, 0xBD, 0x43, 0xE9, 0xB5, 0xA7, 0x43, 0xE9, + 0xB6, 0xB4, 0x43, 0xE9, 0xB7, 0xBA, 0x43, 0xE9, + // Bytes 16c0 - 16ff + 0xB8, 0x9E, 0x43, 0xE9, 0xB9, 0xB5, 0x43, 0xE9, + 0xB9, 0xBF, 0x43, 0xE9, 0xBA, 0x97, 0x43, 0xE9, + 0xBA, 0x9F, 0x43, 0xE9, 0xBA, 0xA5, 0x43, 0xE9, + 0xBA, 0xBB, 0x43, 0xE9, 0xBB, 0x83, 0x43, 0xE9, + 0xBB, 0x8D, 0x43, 0xE9, 0xBB, 0x8E, 0x43, 0xE9, + 0xBB, 0x91, 0x43, 0xE9, 0xBB, 0xB9, 0x43, 0xE9, + 0xBB, 0xBD, 0x43, 0xE9, 0xBB, 0xBE, 0x43, 0xE9, + 0xBC, 0x85, 0x43, 0xE9, 0xBC, 0x8E, 0x43, 0xE9, + // Bytes 1700 - 173f + 0xBC, 0x8F, 0x43, 0xE9, 0xBC, 0x93, 0x43, 0xE9, + 0xBC, 0x96, 0x43, 0xE9, 0xBC, 0xA0, 0x43, 0xE9, + 0xBC, 0xBB, 0x43, 0xE9, 0xBD, 0x83, 0x43, 0xE9, + 0xBD, 0x8A, 0x43, 0xE9, 0xBD, 0x92, 0x43, 0xE9, + 0xBE, 0x8D, 0x43, 0xE9, 0xBE, 0x8E, 0x43, 0xE9, + 0xBE, 0x9C, 0x43, 0xE9, 0xBE, 0x9F, 0x43, 0xE9, + 0xBE, 0xA0, 0x43, 0xEA, 0x99, 0x91, 0x43, 0xEA, + 0x9A, 0x89, 0x43, 0xEA, 0x9C, 0xA7, 0x43, 0xEA, + // Bytes 1740 - 177f + 0x9D, 0xAF, 0x43, 0xEA, 0x9E, 0x8E, 0x43, 0xEA, + 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x43, 0xEA, + 0xAD, 0xA6, 0x43, 0xEA, 0xAD, 0xA7, 0x44, 0xF0, + 0x9D, 0xBC, 0x84, 0x44, 0xF0, 0x9D, 0xBC, 0x85, + 0x44, 0xF0, 0x9D, 0xBC, 0x86, 0x44, 0xF0, 0x9D, + 0xBC, 0x88, 0x44, 0xF0, 0x9D, 0xBC, 0x8A, 0x44, + 0xF0, 0x9D, 0xBC, 0x9E, 0x44, 0xF0, 0xA0, 0x84, + 0xA2, 0x44, 0xF0, 0xA0, 0x94, 0x9C, 0x44, 0xF0, + // Bytes 1780 - 17bf + 0xA0, 0x94, 0xA5, 0x44, 0xF0, 0xA0, 0x95, 0x8B, + 0x44, 0xF0, 0xA0, 0x98, 0xBA, 0x44, 0xF0, 0xA0, + 0xA0, 0x84, 0x44, 0xF0, 0xA0, 0xA3, 0x9E, 0x44, + 0xF0, 0xA0, 0xA8, 0xAC, 0x44, 0xF0, 0xA0, 0xAD, + 0xA3, 0x44, 0xF0, 0xA1, 0x93, 0xA4, 0x44, 0xF0, + 0xA1, 0x9A, 0xA8, 0x44, 0xF0, 0xA1, 0x9B, 0xAA, + 0x44, 0xF0, 0xA1, 0xA7, 0x88, 0x44, 0xF0, 0xA1, + 0xAC, 0x98, 0x44, 0xF0, 0xA1, 0xB4, 0x8B, 0x44, + // Bytes 17c0 - 17ff + 0xF0, 0xA1, 0xB7, 0xA4, 0x44, 0xF0, 0xA1, 0xB7, + 0xA6, 0x44, 0xF0, 0xA2, 0x86, 0x83, 0x44, 0xF0, + 0xA2, 0x86, 0x9F, 0x44, 0xF0, 0xA2, 0x8C, 0xB1, + 0x44, 0xF0, 0xA2, 0x9B, 0x94, 0x44, 0xF0, 0xA2, + 0xA1, 0x84, 0x44, 0xF0, 0xA2, 0xA1, 0x8A, 0x44, + 0xF0, 0xA2, 0xAC, 0x8C, 0x44, 0xF0, 0xA2, 0xAF, + 0xB1, 0x44, 0xF0, 0xA3, 0x80, 0x8A, 0x44, 0xF0, + 0xA3, 0x8A, 0xB8, 0x44, 0xF0, 0xA3, 0x8D, 0x9F, + // Bytes 1800 - 183f + 0x44, 0xF0, 0xA3, 0x8E, 0x93, 0x44, 0xF0, 0xA3, + 0x8E, 0x9C, 0x44, 0xF0, 0xA3, 0x8F, 0x83, 0x44, + 0xF0, 0xA3, 0x8F, 0x95, 0x44, 0xF0, 0xA3, 0x91, + 0xAD, 0x44, 0xF0, 0xA3, 0x9A, 0xA3, 0x44, 0xF0, + 0xA3, 0xA2, 0xA7, 0x44, 0xF0, 0xA3, 0xAA, 0x8D, + 0x44, 0xF0, 0xA3, 0xAB, 0xBA, 0x44, 0xF0, 0xA3, + 0xB2, 0xBC, 0x44, 0xF0, 0xA3, 0xB4, 0x9E, 0x44, + 0xF0, 0xA3, 0xBB, 0x91, 0x44, 0xF0, 0xA3, 0xBD, + // Bytes 1840 - 187f + 0x9E, 0x44, 0xF0, 0xA3, 0xBE, 0x8E, 0x44, 0xF0, + 0xA4, 0x89, 0xA3, 0x44, 0xF0, 0xA4, 0x8B, 0xAE, + 0x44, 0xF0, 0xA4, 0x8E, 0xAB, 0x44, 0xF0, 0xA4, + 0x98, 0x88, 0x44, 0xF0, 0xA4, 0x9C, 0xB5, 0x44, + 0xF0, 0xA4, 0xA0, 0x94, 0x44, 0xF0, 0xA4, 0xB0, + 0xB6, 0x44, 0xF0, 0xA4, 0xB2, 0x92, 0x44, 0xF0, + 0xA4, 0xBE, 0xA1, 0x44, 0xF0, 0xA4, 0xBE, 0xB8, + 0x44, 0xF0, 0xA5, 0x81, 0x84, 0x44, 0xF0, 0xA5, + // Bytes 1880 - 18bf + 0x83, 0xB2, 0x44, 0xF0, 0xA5, 0x83, 0xB3, 0x44, + 0xF0, 0xA5, 0x84, 0x99, 0x44, 0xF0, 0xA5, 0x84, + 0xB3, 0x44, 0xF0, 0xA5, 0x89, 0x89, 0x44, 0xF0, + 0xA5, 0x90, 0x9D, 0x44, 0xF0, 0xA5, 0x98, 0xA6, + 0x44, 0xF0, 0xA5, 0x9A, 0x9A, 0x44, 0xF0, 0xA5, + 0x9B, 0x85, 0x44, 0xF0, 0xA5, 0xA5, 0xBC, 0x44, + 0xF0, 0xA5, 0xAA, 0xA7, 0x44, 0xF0, 0xA5, 0xAE, + 0xAB, 0x44, 0xF0, 0xA5, 0xB2, 0x80, 0x44, 0xF0, + // Bytes 18c0 - 18ff + 0xA5, 0xB3, 0x90, 0x44, 0xF0, 0xA5, 0xBE, 0x86, + 0x44, 0xF0, 0xA6, 0x87, 0x9A, 0x44, 0xF0, 0xA6, + 0x88, 0xA8, 0x44, 0xF0, 0xA6, 0x89, 0x87, 0x44, + 0xF0, 0xA6, 0x8B, 0x99, 0x44, 0xF0, 0xA6, 0x8C, + 0xBE, 0x44, 0xF0, 0xA6, 0x93, 0x9A, 0x44, 0xF0, + 0xA6, 0x94, 0xA3, 0x44, 0xF0, 0xA6, 0x96, 0xA8, + 0x44, 0xF0, 0xA6, 0x9E, 0xA7, 0x44, 0xF0, 0xA6, + 0x9E, 0xB5, 0x44, 0xF0, 0xA6, 0xAC, 0xBC, 0x44, + // Bytes 1900 - 193f + 0xF0, 0xA6, 0xB0, 0xB6, 0x44, 0xF0, 0xA6, 0xB3, + 0x95, 0x44, 0xF0, 0xA6, 0xB5, 0xAB, 0x44, 0xF0, + 0xA6, 0xBC, 0xAC, 0x44, 0xF0, 0xA6, 0xBE, 0xB1, + 0x44, 0xF0, 0xA7, 0x83, 0x92, 0x44, 0xF0, 0xA7, + 0x8F, 0x8A, 0x44, 0xF0, 0xA7, 0x99, 0xA7, 0x44, + 0xF0, 0xA7, 0xA2, 0xAE, 0x44, 0xF0, 0xA7, 0xA5, + 0xA6, 0x44, 0xF0, 0xA7, 0xB2, 0xA8, 0x44, 0xF0, + 0xA7, 0xBB, 0x93, 0x44, 0xF0, 0xA7, 0xBC, 0xAF, + // Bytes 1940 - 197f + 0x44, 0xF0, 0xA8, 0x97, 0x92, 0x44, 0xF0, 0xA8, + 0x97, 0xAD, 0x44, 0xF0, 0xA8, 0x9C, 0xAE, 0x44, + 0xF0, 0xA8, 0xAF, 0xBA, 0x44, 0xF0, 0xA8, 0xB5, + 0xB7, 0x44, 0xF0, 0xA9, 0x85, 0x85, 0x44, 0xF0, + 0xA9, 0x87, 0x9F, 0x44, 0xF0, 0xA9, 0x88, 0x9A, + 0x44, 0xF0, 0xA9, 0x90, 0x8A, 0x44, 0xF0, 0xA9, + 0x92, 0x96, 0x44, 0xF0, 0xA9, 0x96, 0xB6, 0x44, + 0xF0, 0xA9, 0xAC, 0xB0, 0x44, 0xF0, 0xAA, 0x83, + // Bytes 1980 - 19bf + 0x8E, 0x44, 0xF0, 0xAA, 0x84, 0x85, 0x44, 0xF0, + 0xAA, 0x88, 0x8E, 0x44, 0xF0, 0xAA, 0x8A, 0x91, + 0x44, 0xF0, 0xAA, 0x8E, 0x92, 0x44, 0xF0, 0xAA, + 0x98, 0x80, 0x42, 0x21, 0x21, 0x42, 0x21, 0x3F, + 0x42, 0x2E, 0x2E, 0x42, 0x30, 0x2C, 0x42, 0x30, + 0x2E, 0x42, 0x31, 0x2C, 0x42, 0x31, 0x2E, 0x42, + 0x31, 0x30, 0x42, 0x31, 0x31, 0x42, 0x31, 0x32, + 0x42, 0x31, 0x33, 0x42, 0x31, 0x34, 0x42, 0x31, + // Bytes 19c0 - 19ff + 0x35, 0x42, 0x31, 0x36, 0x42, 0x31, 0x37, 0x42, + 0x31, 0x38, 0x42, 0x31, 0x39, 0x42, 0x32, 0x2C, + 0x42, 0x32, 0x2E, 0x42, 0x32, 0x30, 0x42, 0x32, + 0x31, 0x42, 0x32, 0x32, 0x42, 0x32, 0x33, 0x42, + 0x32, 0x34, 0x42, 0x32, 0x35, 0x42, 0x32, 0x36, + 0x42, 0x32, 0x37, 0x42, 0x32, 0x38, 0x42, 0x32, + 0x39, 0x42, 0x33, 0x2C, 0x42, 0x33, 0x2E, 0x42, + 0x33, 0x30, 0x42, 0x33, 0x31, 0x42, 0x33, 0x32, + // Bytes 1a00 - 1a3f + 0x42, 0x33, 0x33, 0x42, 0x33, 0x34, 0x42, 0x33, + 0x35, 0x42, 0x33, 0x36, 0x42, 0x33, 0x37, 0x42, + 0x33, 0x38, 0x42, 0x33, 0x39, 0x42, 0x34, 0x2C, + 0x42, 0x34, 0x2E, 0x42, 0x34, 0x30, 0x42, 0x34, + 0x31, 0x42, 0x34, 0x32, 0x42, 0x34, 0x33, 0x42, + 0x34, 0x34, 0x42, 0x34, 0x35, 0x42, 0x34, 0x36, + 0x42, 0x34, 0x37, 0x42, 0x34, 0x38, 0x42, 0x34, + 0x39, 0x42, 0x35, 0x2C, 0x42, 0x35, 0x2E, 0x42, + // Bytes 1a40 - 1a7f + 0x35, 0x30, 0x42, 0x36, 0x2C, 0x42, 0x36, 0x2E, + 0x42, 0x37, 0x2C, 0x42, 0x37, 0x2E, 0x42, 0x38, + 0x2C, 0x42, 0x38, 0x2E, 0x42, 0x39, 0x2C, 0x42, + 0x39, 0x2E, 0x42, 0x3D, 0x3D, 0x42, 0x3F, 0x21, + 0x42, 0x3F, 0x3F, 0x42, 0x41, 0x55, 0x42, 0x42, + 0x71, 0x42, 0x43, 0x44, 0x42, 0x44, 0x4A, 0x42, + 0x44, 0x5A, 0x42, 0x44, 0x7A, 0x42, 0x47, 0x42, + 0x42, 0x47, 0x79, 0x42, 0x48, 0x50, 0x42, 0x48, + // Bytes 1a80 - 1abf + 0x56, 0x42, 0x48, 0x67, 0x42, 0x48, 0x7A, 0x42, + 0x49, 0x49, 0x42, 0x49, 0x4A, 0x42, 0x49, 0x55, + 0x42, 0x49, 0x56, 0x42, 0x49, 0x58, 0x42, 0x4B, + 0x42, 0x42, 0x4B, 0x4B, 0x42, 0x4B, 0x4D, 0x42, + 0x4C, 0x4A, 0x42, 0x4C, 0x6A, 0x42, 0x4D, 0x42, + 0x42, 0x4D, 0x43, 0x42, 0x4D, 0x44, 0x42, 0x4D, + 0x52, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + // Bytes 1ac0 - 1aff + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + // Bytes 1b00 - 1b3f + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + // Bytes 1b40 - 1b7f + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + // Bytes 1b80 - 1bbf + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + // Bytes 1bc0 - 1bff + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + // Bytes 1c00 - 1c3f + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + // Bytes 1c40 - 1c7f + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + // Bytes 1c80 - 1cbf + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + // Bytes 1cc0 - 1cff + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + // Bytes 1d00 - 1d3f + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + // Bytes 1d40 - 1d7f + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + // Bytes 1d80 - 1dbf + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + // Bytes 1dc0 - 1dff + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + // Bytes 1e00 - 1e3f + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + // Bytes 1e40 - 1e7f + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + // Bytes 1e80 - 1ebf + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + // Bytes 1ec0 - 1eff + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + // Bytes 1f00 - 1f3f + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + // Bytes 1f40 - 1f7f + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + // Bytes 1f80 - 1fbf + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + // Bytes 1fc0 - 1fff + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + // Bytes 2000 - 203f + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + // Bytes 2040 - 207f + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + // Bytes 2080 - 20bf + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + // Bytes 20c0 - 20ff + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + // Bytes 2100 - 213f + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + // Bytes 2140 - 217f + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + // Bytes 2180 - 21bf + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + // Bytes 21c0 - 21ff + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + // Bytes 2200 - 223f + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + // Bytes 2240 - 227f + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + // Bytes 2280 - 22bf + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + // Bytes 22c0 - 22ff + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2300 - 233f + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + // Bytes 2340 - 237f + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + // Bytes 2380 - 23bf + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + // Bytes 23c0 - 23ff + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + // Bytes 2400 - 243f + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + // Bytes 2440 - 247f + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2480 - 24bf + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + // Bytes 24c0 - 24ff + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + // Bytes 2500 - 253f + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + // Bytes 2540 - 257f + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + // Bytes 2580 - 25bf + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + // Bytes 25c0 - 25ff + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + // Bytes 2600 - 263f + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + // Bytes 2640 - 267f + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + // Bytes 2680 - 26bf + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + // Bytes 26c0 - 26ff + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + // Bytes 2700 - 273f + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + // Bytes 2740 - 277f + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + // Bytes 2780 - 27bf + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + // Bytes 27c0 - 27ff + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + // Bytes 2800 - 283f + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE4, 0xBB, 0xA4, 0xE5, 0x92, + 0x8C, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, 0xA3, + 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, 0x46, + // Bytes 2840 - 287f + 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, 0xE6, + 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, 0x80, + 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, 0xE1, + 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, + // Bytes 2880 - 28bf + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x89, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x29, + // Bytes 28c0 - 28ff + 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, 0x64, + 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, 0xA7, + 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, 0xD8, + 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x48, + // Bytes 2900 - 293f + 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, 0x84, + 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, 0xD9, + 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, 0xB9, + 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, + 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, 0xAD, + 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, 0xD8, + 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, 0x80, + 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x49, + // Bytes 2940 - 297f + 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, 0x80, + 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, 0x80, + 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, 0x9D, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, + // Bytes 2980 - 29bf + 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, 0xAC, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, + 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 29c0 - 29ff + 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, 0xE3, + 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x82, + 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x49, + 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, 0xE3, + 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, + // Bytes 2a00 - 2a3f + 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, 0x49, + 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, 0xE3, + 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, 0x83, + 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0xA4, + // Bytes 2a40 - 2a7f + 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, 0x49, + 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, + 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + // Bytes 2a80 - 2abf + 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, 0x83, + 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, 0x49, + 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, 0xE3, + // Bytes 2ac0 - 2aff + 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, 0x80, + 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x4C, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, 0x83, + 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, 0xA8, + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, 0x83, + 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, + // Bytes 2b00 - 2b3f + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, + 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, 0xE3, + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x83, + // Bytes 2b40 - 2b7f + 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0xA4, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, 0xE3, + // Bytes 2b80 - 2bbf + 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x84, + 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, + 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, 0x4C, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, 0x98, + // Bytes 2bc0 - 2bff + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, 0xE3, + 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, + 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, 0x83, + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0x4C, + 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, 0xBC, + 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, 0xE1, + 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, 0x84, + 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, + // Bytes 2c40 - 2c7f + 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, 0xBC, + // Bytes 2c80 - 2cbf + 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAC, + 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, + 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xA7, + // Bytes 2cc0 - 2cff + 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, 0x8B, + 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, 0x85, + 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, 0x82, + 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3, + // Bytes 2d00 - 2d3f + 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, + 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, 0x83, + // Bytes 2d40 - 2d7f + 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x82, + 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x52, + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, 0x82, + // Bytes 2d80 - 2dbf + 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, 0xE3, + 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, 0x84, + 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, + // Bytes 2dc0 - 2dff + 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, 0xD8, + 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, 0xA7, + 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, 0xA7, + 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, 0xAD, + 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, 0xAE, + // Bytes 2e00 - 2e3f + 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, + 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xAF, + 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, + 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xB2, + 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, + 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, + 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, 0xB5, + 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB5, + // Bytes 2e40 - 2e7f + 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, 0xB5, + 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB7, + 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, 0x80, + 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, 0xAC, + 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + // Bytes 2e80 - 2ebf + 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, + 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAD, + 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, 0x91, + 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, + // Bytes 2ec0 - 2eff + 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, 0xA7, + 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, + 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, + 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, 0x91, + 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, 0x08, + 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBA, + 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, + 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB8, + // Bytes 2f00 - 2f3f + 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, + 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, + 0xF0, 0x91, 0xA4, 0xB5, 0xF0, 0x91, 0xA4, 0xB0, + 0x01, 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, + 0xE0, 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, + 0xE0, 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x16, 0x44, + 0x44, 0x5A, 0xCC, 0x8C, 0xCD, 0x44, 0x44, 0x7A, + 0xCC, 0x8C, 0xCD, 0x44, 0x64, 0x7A, 0xCC, 0x8C, + // Bytes 2f40 - 2f7f + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, + 0xCD, 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, + 0xB9, 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, + // Bytes 2f80 - 2fbf + 0x01, 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, + 0x01, 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, + // Bytes 2fc0 - 2fff + 0x01, 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, + 0x01, 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, + 0x01, 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0x11, 0x4C, 0xE1, 0x84, 0x8C, + 0xE1, 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, + 0xB4, 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, + 0x99, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, + 0x4C, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, + // Bytes 3000 - 303f + 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x11, 0x4C, 0xE3, + 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE1, 0x84, 0x8E, + 0xE1, 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, + 0x80, 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, + 0x82, 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, + // Bytes 3040 - 307f + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, + 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x88, 0xE3, 0x82, 0x99, 0x11, 0x52, 0xE3, 0x83, + // Bytes 3080 - 30bf + 0x95, 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, + 0x01, 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, + 0x01, 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, + 0xCC, 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, + 0x03, 0x41, 0xCC, 0x80, 0xCD, 0x03, 0x41, 0xCC, + 0x81, 0xCD, 0x03, 0x41, 0xCC, 0x83, 0xCD, 0x03, + // Bytes 30c0 - 30ff + 0x41, 0xCC, 0x84, 0xCD, 0x03, 0x41, 0xCC, 0x89, + 0xCD, 0x03, 0x41, 0xCC, 0x8C, 0xCD, 0x03, 0x41, + 0xCC, 0x8F, 0xCD, 0x03, 0x41, 0xCC, 0x91, 0xCD, + 0x03, 0x41, 0xCC, 0xA5, 0xB9, 0x03, 0x41, 0xCC, + 0xA8, 0xA9, 0x03, 0x42, 0xCC, 0x87, 0xCD, 0x03, + 0x42, 0xCC, 0xA3, 0xB9, 0x03, 0x42, 0xCC, 0xB1, + 0xB9, 0x03, 0x43, 0xCC, 0x81, 0xCD, 0x03, 0x43, + 0xCC, 0x82, 0xCD, 0x03, 0x43, 0xCC, 0x87, 0xCD, + // Bytes 3100 - 313f + 0x03, 0x43, 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, + 0x87, 0xCD, 0x03, 0x44, 0xCC, 0x8C, 0xCD, 0x03, + 0x44, 0xCC, 0xA3, 0xB9, 0x03, 0x44, 0xCC, 0xA7, + 0xA9, 0x03, 0x44, 0xCC, 0xAD, 0xB9, 0x03, 0x44, + 0xCC, 0xB1, 0xB9, 0x03, 0x45, 0xCC, 0x80, 0xCD, + 0x03, 0x45, 0xCC, 0x81, 0xCD, 0x03, 0x45, 0xCC, + 0x83, 0xCD, 0x03, 0x45, 0xCC, 0x86, 0xCD, 0x03, + 0x45, 0xCC, 0x87, 0xCD, 0x03, 0x45, 0xCC, 0x88, + // Bytes 3140 - 317f + 0xCD, 0x03, 0x45, 0xCC, 0x89, 0xCD, 0x03, 0x45, + 0xCC, 0x8C, 0xCD, 0x03, 0x45, 0xCC, 0x8F, 0xCD, + 0x03, 0x45, 0xCC, 0x91, 0xCD, 0x03, 0x45, 0xCC, + 0xA8, 0xA9, 0x03, 0x45, 0xCC, 0xAD, 0xB9, 0x03, + 0x45, 0xCC, 0xB0, 0xB9, 0x03, 0x46, 0xCC, 0x87, + 0xCD, 0x03, 0x47, 0xCC, 0x81, 0xCD, 0x03, 0x47, + 0xCC, 0x82, 0xCD, 0x03, 0x47, 0xCC, 0x84, 0xCD, + 0x03, 0x47, 0xCC, 0x86, 0xCD, 0x03, 0x47, 0xCC, + // Bytes 3180 - 31bf + 0x87, 0xCD, 0x03, 0x47, 0xCC, 0x8C, 0xCD, 0x03, + 0x47, 0xCC, 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0x82, + 0xCD, 0x03, 0x48, 0xCC, 0x87, 0xCD, 0x03, 0x48, + 0xCC, 0x88, 0xCD, 0x03, 0x48, 0xCC, 0x8C, 0xCD, + 0x03, 0x48, 0xCC, 0xA3, 0xB9, 0x03, 0x48, 0xCC, + 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0xAE, 0xB9, 0x03, + 0x49, 0xCC, 0x80, 0xCD, 0x03, 0x49, 0xCC, 0x81, + 0xCD, 0x03, 0x49, 0xCC, 0x82, 0xCD, 0x03, 0x49, + // Bytes 31c0 - 31ff + 0xCC, 0x83, 0xCD, 0x03, 0x49, 0xCC, 0x84, 0xCD, + 0x03, 0x49, 0xCC, 0x86, 0xCD, 0x03, 0x49, 0xCC, + 0x87, 0xCD, 0x03, 0x49, 0xCC, 0x89, 0xCD, 0x03, + 0x49, 0xCC, 0x8C, 0xCD, 0x03, 0x49, 0xCC, 0x8F, + 0xCD, 0x03, 0x49, 0xCC, 0x91, 0xCD, 0x03, 0x49, + 0xCC, 0xA3, 0xB9, 0x03, 0x49, 0xCC, 0xA8, 0xA9, + 0x03, 0x49, 0xCC, 0xB0, 0xB9, 0x03, 0x4A, 0xCC, + 0x82, 0xCD, 0x03, 0x4B, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3200 - 323f + 0x4B, 0xCC, 0x8C, 0xCD, 0x03, 0x4B, 0xCC, 0xA3, + 0xB9, 0x03, 0x4B, 0xCC, 0xA7, 0xA9, 0x03, 0x4B, + 0xCC, 0xB1, 0xB9, 0x03, 0x4C, 0xCC, 0x81, 0xCD, + 0x03, 0x4C, 0xCC, 0x8C, 0xCD, 0x03, 0x4C, 0xCC, + 0xA7, 0xA9, 0x03, 0x4C, 0xCC, 0xAD, 0xB9, 0x03, + 0x4C, 0xCC, 0xB1, 0xB9, 0x03, 0x4D, 0xCC, 0x81, + 0xCD, 0x03, 0x4D, 0xCC, 0x87, 0xCD, 0x03, 0x4D, + 0xCC, 0xA3, 0xB9, 0x03, 0x4E, 0xCC, 0x80, 0xCD, + // Bytes 3240 - 327f + 0x03, 0x4E, 0xCC, 0x81, 0xCD, 0x03, 0x4E, 0xCC, + 0x83, 0xCD, 0x03, 0x4E, 0xCC, 0x87, 0xCD, 0x03, + 0x4E, 0xCC, 0x8C, 0xCD, 0x03, 0x4E, 0xCC, 0xA3, + 0xB9, 0x03, 0x4E, 0xCC, 0xA7, 0xA9, 0x03, 0x4E, + 0xCC, 0xAD, 0xB9, 0x03, 0x4E, 0xCC, 0xB1, 0xB9, + 0x03, 0x4F, 0xCC, 0x80, 0xCD, 0x03, 0x4F, 0xCC, + 0x81, 0xCD, 0x03, 0x4F, 0xCC, 0x86, 0xCD, 0x03, + 0x4F, 0xCC, 0x89, 0xCD, 0x03, 0x4F, 0xCC, 0x8B, + // Bytes 3280 - 32bf + 0xCD, 0x03, 0x4F, 0xCC, 0x8C, 0xCD, 0x03, 0x4F, + 0xCC, 0x8F, 0xCD, 0x03, 0x4F, 0xCC, 0x91, 0xCD, + 0x03, 0x50, 0xCC, 0x81, 0xCD, 0x03, 0x50, 0xCC, + 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x81, 0xCD, 0x03, + 0x52, 0xCC, 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x8C, + 0xCD, 0x03, 0x52, 0xCC, 0x8F, 0xCD, 0x03, 0x52, + 0xCC, 0x91, 0xCD, 0x03, 0x52, 0xCC, 0xA7, 0xA9, + 0x03, 0x52, 0xCC, 0xB1, 0xB9, 0x03, 0x53, 0xCC, + // Bytes 32c0 - 32ff + 0x82, 0xCD, 0x03, 0x53, 0xCC, 0x87, 0xCD, 0x03, + 0x53, 0xCC, 0xA6, 0xB9, 0x03, 0x53, 0xCC, 0xA7, + 0xA9, 0x03, 0x54, 0xCC, 0x87, 0xCD, 0x03, 0x54, + 0xCC, 0x8C, 0xCD, 0x03, 0x54, 0xCC, 0xA3, 0xB9, + 0x03, 0x54, 0xCC, 0xA6, 0xB9, 0x03, 0x54, 0xCC, + 0xA7, 0xA9, 0x03, 0x54, 0xCC, 0xAD, 0xB9, 0x03, + 0x54, 0xCC, 0xB1, 0xB9, 0x03, 0x55, 0xCC, 0x80, + 0xCD, 0x03, 0x55, 0xCC, 0x81, 0xCD, 0x03, 0x55, + // Bytes 3300 - 333f + 0xCC, 0x82, 0xCD, 0x03, 0x55, 0xCC, 0x86, 0xCD, + 0x03, 0x55, 0xCC, 0x89, 0xCD, 0x03, 0x55, 0xCC, + 0x8A, 0xCD, 0x03, 0x55, 0xCC, 0x8B, 0xCD, 0x03, + 0x55, 0xCC, 0x8C, 0xCD, 0x03, 0x55, 0xCC, 0x8F, + 0xCD, 0x03, 0x55, 0xCC, 0x91, 0xCD, 0x03, 0x55, + 0xCC, 0xA3, 0xB9, 0x03, 0x55, 0xCC, 0xA4, 0xB9, + 0x03, 0x55, 0xCC, 0xA8, 0xA9, 0x03, 0x55, 0xCC, + 0xAD, 0xB9, 0x03, 0x55, 0xCC, 0xB0, 0xB9, 0x03, + // Bytes 3340 - 337f + 0x56, 0xCC, 0x83, 0xCD, 0x03, 0x56, 0xCC, 0xA3, + 0xB9, 0x03, 0x57, 0xCC, 0x80, 0xCD, 0x03, 0x57, + 0xCC, 0x81, 0xCD, 0x03, 0x57, 0xCC, 0x82, 0xCD, + 0x03, 0x57, 0xCC, 0x87, 0xCD, 0x03, 0x57, 0xCC, + 0x88, 0xCD, 0x03, 0x57, 0xCC, 0xA3, 0xB9, 0x03, + 0x58, 0xCC, 0x87, 0xCD, 0x03, 0x58, 0xCC, 0x88, + 0xCD, 0x03, 0x59, 0xCC, 0x80, 0xCD, 0x03, 0x59, + 0xCC, 0x81, 0xCD, 0x03, 0x59, 0xCC, 0x82, 0xCD, + // Bytes 3380 - 33bf + 0x03, 0x59, 0xCC, 0x83, 0xCD, 0x03, 0x59, 0xCC, + 0x84, 0xCD, 0x03, 0x59, 0xCC, 0x87, 0xCD, 0x03, + 0x59, 0xCC, 0x88, 0xCD, 0x03, 0x59, 0xCC, 0x89, + 0xCD, 0x03, 0x59, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, + 0xCC, 0x81, 0xCD, 0x03, 0x5A, 0xCC, 0x82, 0xCD, + 0x03, 0x5A, 0xCC, 0x87, 0xCD, 0x03, 0x5A, 0xCC, + 0x8C, 0xCD, 0x03, 0x5A, 0xCC, 0xA3, 0xB9, 0x03, + 0x5A, 0xCC, 0xB1, 0xB9, 0x03, 0x61, 0xCC, 0x80, + // Bytes 33c0 - 33ff + 0xCD, 0x03, 0x61, 0xCC, 0x81, 0xCD, 0x03, 0x61, + 0xCC, 0x83, 0xCD, 0x03, 0x61, 0xCC, 0x84, 0xCD, + 0x03, 0x61, 0xCC, 0x89, 0xCD, 0x03, 0x61, 0xCC, + 0x8C, 0xCD, 0x03, 0x61, 0xCC, 0x8F, 0xCD, 0x03, + 0x61, 0xCC, 0x91, 0xCD, 0x03, 0x61, 0xCC, 0xA5, + 0xB9, 0x03, 0x61, 0xCC, 0xA8, 0xA9, 0x03, 0x62, + 0xCC, 0x87, 0xCD, 0x03, 0x62, 0xCC, 0xA3, 0xB9, + 0x03, 0x62, 0xCC, 0xB1, 0xB9, 0x03, 0x63, 0xCC, + // Bytes 3400 - 343f + 0x81, 0xCD, 0x03, 0x63, 0xCC, 0x82, 0xCD, 0x03, + 0x63, 0xCC, 0x87, 0xCD, 0x03, 0x63, 0xCC, 0x8C, + 0xCD, 0x03, 0x64, 0xCC, 0x87, 0xCD, 0x03, 0x64, + 0xCC, 0x8C, 0xCD, 0x03, 0x64, 0xCC, 0xA3, 0xB9, + 0x03, 0x64, 0xCC, 0xA7, 0xA9, 0x03, 0x64, 0xCC, + 0xAD, 0xB9, 0x03, 0x64, 0xCC, 0xB1, 0xB9, 0x03, + 0x65, 0xCC, 0x80, 0xCD, 0x03, 0x65, 0xCC, 0x81, + 0xCD, 0x03, 0x65, 0xCC, 0x83, 0xCD, 0x03, 0x65, + // Bytes 3440 - 347f + 0xCC, 0x86, 0xCD, 0x03, 0x65, 0xCC, 0x87, 0xCD, + 0x03, 0x65, 0xCC, 0x88, 0xCD, 0x03, 0x65, 0xCC, + 0x89, 0xCD, 0x03, 0x65, 0xCC, 0x8C, 0xCD, 0x03, + 0x65, 0xCC, 0x8F, 0xCD, 0x03, 0x65, 0xCC, 0x91, + 0xCD, 0x03, 0x65, 0xCC, 0xA8, 0xA9, 0x03, 0x65, + 0xCC, 0xAD, 0xB9, 0x03, 0x65, 0xCC, 0xB0, 0xB9, + 0x03, 0x66, 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, + 0x81, 0xCD, 0x03, 0x67, 0xCC, 0x82, 0xCD, 0x03, + // Bytes 3480 - 34bf + 0x67, 0xCC, 0x84, 0xCD, 0x03, 0x67, 0xCC, 0x86, + 0xCD, 0x03, 0x67, 0xCC, 0x87, 0xCD, 0x03, 0x67, + 0xCC, 0x8C, 0xCD, 0x03, 0x67, 0xCC, 0xA7, 0xA9, + 0x03, 0x68, 0xCC, 0x82, 0xCD, 0x03, 0x68, 0xCC, + 0x87, 0xCD, 0x03, 0x68, 0xCC, 0x88, 0xCD, 0x03, + 0x68, 0xCC, 0x8C, 0xCD, 0x03, 0x68, 0xCC, 0xA3, + 0xB9, 0x03, 0x68, 0xCC, 0xA7, 0xA9, 0x03, 0x68, + 0xCC, 0xAE, 0xB9, 0x03, 0x68, 0xCC, 0xB1, 0xB9, + // Bytes 34c0 - 34ff + 0x03, 0x69, 0xCC, 0x80, 0xCD, 0x03, 0x69, 0xCC, + 0x81, 0xCD, 0x03, 0x69, 0xCC, 0x82, 0xCD, 0x03, + 0x69, 0xCC, 0x83, 0xCD, 0x03, 0x69, 0xCC, 0x84, + 0xCD, 0x03, 0x69, 0xCC, 0x86, 0xCD, 0x03, 0x69, + 0xCC, 0x89, 0xCD, 0x03, 0x69, 0xCC, 0x8C, 0xCD, + 0x03, 0x69, 0xCC, 0x8F, 0xCD, 0x03, 0x69, 0xCC, + 0x91, 0xCD, 0x03, 0x69, 0xCC, 0xA3, 0xB9, 0x03, + 0x69, 0xCC, 0xA8, 0xA9, 0x03, 0x69, 0xCC, 0xB0, + // Bytes 3500 - 353f + 0xB9, 0x03, 0x6A, 0xCC, 0x82, 0xCD, 0x03, 0x6A, + 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, 0x81, 0xCD, + 0x03, 0x6B, 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, + 0xA3, 0xB9, 0x03, 0x6B, 0xCC, 0xA7, 0xA9, 0x03, + 0x6B, 0xCC, 0xB1, 0xB9, 0x03, 0x6C, 0xCC, 0x81, + 0xCD, 0x03, 0x6C, 0xCC, 0x8C, 0xCD, 0x03, 0x6C, + 0xCC, 0xA7, 0xA9, 0x03, 0x6C, 0xCC, 0xAD, 0xB9, + 0x03, 0x6C, 0xCC, 0xB1, 0xB9, 0x03, 0x6D, 0xCC, + // Bytes 3540 - 357f + 0x81, 0xCD, 0x03, 0x6D, 0xCC, 0x87, 0xCD, 0x03, + 0x6D, 0xCC, 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0x80, + 0xCD, 0x03, 0x6E, 0xCC, 0x81, 0xCD, 0x03, 0x6E, + 0xCC, 0x83, 0xCD, 0x03, 0x6E, 0xCC, 0x87, 0xCD, + 0x03, 0x6E, 0xCC, 0x8C, 0xCD, 0x03, 0x6E, 0xCC, + 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0xA7, 0xA9, 0x03, + 0x6E, 0xCC, 0xAD, 0xB9, 0x03, 0x6E, 0xCC, 0xB1, + 0xB9, 0x03, 0x6F, 0xCC, 0x80, 0xCD, 0x03, 0x6F, + // Bytes 3580 - 35bf + 0xCC, 0x81, 0xCD, 0x03, 0x6F, 0xCC, 0x86, 0xCD, + 0x03, 0x6F, 0xCC, 0x89, 0xCD, 0x03, 0x6F, 0xCC, + 0x8B, 0xCD, 0x03, 0x6F, 0xCC, 0x8C, 0xCD, 0x03, + 0x6F, 0xCC, 0x8F, 0xCD, 0x03, 0x6F, 0xCC, 0x91, + 0xCD, 0x03, 0x70, 0xCC, 0x81, 0xCD, 0x03, 0x70, + 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, 0x81, 0xCD, + 0x03, 0x72, 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, + 0x8C, 0xCD, 0x03, 0x72, 0xCC, 0x8F, 0xCD, 0x03, + // Bytes 35c0 - 35ff + 0x72, 0xCC, 0x91, 0xCD, 0x03, 0x72, 0xCC, 0xA7, + 0xA9, 0x03, 0x72, 0xCC, 0xB1, 0xB9, 0x03, 0x73, + 0xCC, 0x82, 0xCD, 0x03, 0x73, 0xCC, 0x87, 0xCD, + 0x03, 0x73, 0xCC, 0xA6, 0xB9, 0x03, 0x73, 0xCC, + 0xA7, 0xA9, 0x03, 0x74, 0xCC, 0x87, 0xCD, 0x03, + 0x74, 0xCC, 0x88, 0xCD, 0x03, 0x74, 0xCC, 0x8C, + 0xCD, 0x03, 0x74, 0xCC, 0xA3, 0xB9, 0x03, 0x74, + 0xCC, 0xA6, 0xB9, 0x03, 0x74, 0xCC, 0xA7, 0xA9, + // Bytes 3600 - 363f + 0x03, 0x74, 0xCC, 0xAD, 0xB9, 0x03, 0x74, 0xCC, + 0xB1, 0xB9, 0x03, 0x75, 0xCC, 0x80, 0xCD, 0x03, + 0x75, 0xCC, 0x81, 0xCD, 0x03, 0x75, 0xCC, 0x82, + 0xCD, 0x03, 0x75, 0xCC, 0x86, 0xCD, 0x03, 0x75, + 0xCC, 0x89, 0xCD, 0x03, 0x75, 0xCC, 0x8A, 0xCD, + 0x03, 0x75, 0xCC, 0x8B, 0xCD, 0x03, 0x75, 0xCC, + 0x8C, 0xCD, 0x03, 0x75, 0xCC, 0x8F, 0xCD, 0x03, + 0x75, 0xCC, 0x91, 0xCD, 0x03, 0x75, 0xCC, 0xA3, + // Bytes 3640 - 367f + 0xB9, 0x03, 0x75, 0xCC, 0xA4, 0xB9, 0x03, 0x75, + 0xCC, 0xA8, 0xA9, 0x03, 0x75, 0xCC, 0xAD, 0xB9, + 0x03, 0x75, 0xCC, 0xB0, 0xB9, 0x03, 0x76, 0xCC, + 0x83, 0xCD, 0x03, 0x76, 0xCC, 0xA3, 0xB9, 0x03, + 0x77, 0xCC, 0x80, 0xCD, 0x03, 0x77, 0xCC, 0x81, + 0xCD, 0x03, 0x77, 0xCC, 0x82, 0xCD, 0x03, 0x77, + 0xCC, 0x87, 0xCD, 0x03, 0x77, 0xCC, 0x88, 0xCD, + 0x03, 0x77, 0xCC, 0x8A, 0xCD, 0x03, 0x77, 0xCC, + // Bytes 3680 - 36bf + 0xA3, 0xB9, 0x03, 0x78, 0xCC, 0x87, 0xCD, 0x03, + 0x78, 0xCC, 0x88, 0xCD, 0x03, 0x79, 0xCC, 0x80, + 0xCD, 0x03, 0x79, 0xCC, 0x81, 0xCD, 0x03, 0x79, + 0xCC, 0x82, 0xCD, 0x03, 0x79, 0xCC, 0x83, 0xCD, + 0x03, 0x79, 0xCC, 0x84, 0xCD, 0x03, 0x79, 0xCC, + 0x87, 0xCD, 0x03, 0x79, 0xCC, 0x88, 0xCD, 0x03, + 0x79, 0xCC, 0x89, 0xCD, 0x03, 0x79, 0xCC, 0x8A, + 0xCD, 0x03, 0x79, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, + // Bytes 36c0 - 36ff + 0xCC, 0x81, 0xCD, 0x03, 0x7A, 0xCC, 0x82, 0xCD, + 0x03, 0x7A, 0xCC, 0x87, 0xCD, 0x03, 0x7A, 0xCC, + 0x8C, 0xCD, 0x03, 0x7A, 0xCC, 0xA3, 0xB9, 0x03, + 0x7A, 0xCC, 0xB1, 0xB9, 0x04, 0xC2, 0xA8, 0xCC, + 0x80, 0xCE, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, + 0x04, 0xC2, 0xA8, 0xCD, 0x82, 0xCE, 0x04, 0xC3, + 0x86, 0xCC, 0x81, 0xCD, 0x04, 0xC3, 0x86, 0xCC, + 0x84, 0xCD, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xCD, + // Bytes 3700 - 373f + 0x04, 0xC3, 0xA6, 0xCC, 0x81, 0xCD, 0x04, 0xC3, + 0xA6, 0xCC, 0x84, 0xCD, 0x04, 0xC3, 0xB8, 0xCC, + 0x81, 0xCD, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xCD, + 0x04, 0xC6, 0xB7, 0xCC, 0x8C, 0xCD, 0x04, 0xCA, + 0x92, 0xCC, 0x8C, 0xCD, 0x04, 0xCE, 0x91, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0x91, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0x91, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0x91, 0xCD, + // Bytes 3740 - 377f + 0x85, 0xDD, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0x95, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0x97, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x97, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0x99, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0x99, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0x99, 0xCC, + 0x84, 0xCD, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xCD, + 0x04, 0xCE, 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xCE, + // Bytes 3780 - 37bf + 0x9F, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xA5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, + 0x84, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x88, 0xCD, 0x04, 0xCE, + 0xA9, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xDD, + // Bytes 37c0 - 37ff + 0x04, 0xCE, 0xB1, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0xB1, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0xB1, 0xCD, + 0x85, 0xDD, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0xB5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0xB7, 0xCD, 0x85, 0xDD, 0x04, 0xCE, 0xB9, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xB9, 0xCC, 0x84, 0xCD, 0x04, 0xCE, + 0xB9, 0xCC, 0x86, 0xCD, 0x04, 0xCE, 0xB9, 0xCD, + // Bytes 3800 - 383f + 0x82, 0xCD, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xCD, + 0x04, 0xCE, 0xBF, 0xCC, 0x81, 0xCD, 0x04, 0xCF, + 0x81, 0xCC, 0x93, 0xCD, 0x04, 0xCF, 0x81, 0xCC, + 0x94, 0xCD, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xCD, + 0x04, 0xCF, 0x85, 0xCC, 0x81, 0xCD, 0x04, 0xCF, + 0x85, 0xCC, 0x84, 0xCD, 0x04, 0xCF, 0x85, 0xCC, + 0x86, 0xCD, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xCD, + 0x04, 0xCF, 0x89, 0xCD, 0x85, 0xDD, 0x04, 0xCF, + // Bytes 3840 - 387f + 0x92, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x92, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0x90, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x90, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x93, 0xCC, + 0x81, 0xCD, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xCD, + 0x04, 0xD0, 0x95, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x95, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x96, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xCD, + // Bytes 3880 - 38bf + 0x04, 0xD0, 0x97, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x98, 0xCC, 0x80, 0xCD, 0x04, 0xD0, 0x98, 0xCC, + 0x84, 0xCD, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x9A, 0xCC, 0x81, 0xCD, 0x04, 0xD0, 0x9E, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xCD, + 0x04, 0xD0, 0xA3, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0xA3, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, + // Bytes 38c0 - 38ff + 0x8B, 0xCD, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xAB, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xAD, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB3, 0xCC, 0x81, 0xCD, 0x04, 0xD0, + 0xB5, 0xCC, 0x80, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB6, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + // Bytes 3900 - 393f + 0xB6, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xB7, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xCD, + 0x04, 0xD0, 0xB8, 0xCC, 0x84, 0xCD, 0x04, 0xD0, + 0xB8, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xCD, + 0x04, 0xD0, 0xBE, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x83, 0xCC, 0x84, 0xCD, 0x04, 0xD1, 0x83, 0xCC, + 0x86, 0xCD, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xCD, + // Bytes 3940 - 397f + 0x04, 0xD1, 0x83, 0xCC, 0x8B, 0xCD, 0x04, 0xD1, + 0x87, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0x8B, 0xCC, + 0x88, 0xCD, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xCD, + 0x04, 0xD1, 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0xB4, 0xCC, 0x8F, 0xCD, 0x04, 0xD1, 0xB5, 0xCC, + 0x8F, 0xCD, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xCD, + 0x04, 0xD3, 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xD3, + 0xA8, 0xCC, 0x88, 0xCD, 0x04, 0xD3, 0xA9, 0xCC, + // Bytes 3980 - 39bf + 0x88, 0xCD, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, + 0x04, 0xD8, 0xA7, 0xD9, 0x94, 0xCD, 0x04, 0xD8, + 0xA7, 0xD9, 0x95, 0xB9, 0x04, 0xD9, 0x88, 0xD9, + 0x94, 0xCD, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, + 0x04, 0xDB, 0x81, 0xD9, 0x94, 0xCD, 0x04, 0xDB, + 0x92, 0xD9, 0x94, 0xCD, 0x04, 0xDB, 0x95, 0xD9, + 0x94, 0xCD, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + // Bytes 39c0 - 39ff + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + 0x41, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x41, + 0xCC, 0x86, 0xCC, 0x80, 0xCE, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x81, 0xCE, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x83, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x89, 0xCE, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, + 0xCE, 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCE, + 0x05, 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, + // Bytes 3a00 - 3a3f + 0x41, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x41, + 0xCC, 0xA3, 0xCC, 0x86, 0xCE, 0x05, 0x43, 0xCC, + 0xA7, 0xCC, 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, + 0xCC, 0x80, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, + 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCE, + 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x45, + // Bytes 3a40 - 3a7f + 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x45, 0xCC, + 0xA7, 0xCC, 0x86, 0xCE, 0x05, 0x49, 0xCC, 0x88, + 0xCC, 0x81, 0xCE, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, + 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x4F, + 0xCC, 0x83, 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, + // Bytes 3a80 - 3abf + 0x83, 0xCC, 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x83, + 0xCC, 0x88, 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, + 0x80, 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, + 0xCE, 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, + 0x05, 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x4F, 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x83, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, + // Bytes 3ac0 - 3aff + 0xCC, 0x89, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + 0xA3, 0xBA, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, + 0xCE, 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, + 0x05, 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, + 0x53, 0xCC, 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x53, + 0xCC, 0x8C, 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, + 0xA3, 0xCC, 0x87, 0xCE, 0x05, 0x55, 0xCC, 0x83, + 0xCC, 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x84, 0xCC, + // Bytes 3b00 - 3b3f + 0x88, 0xCE, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x55, + 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x83, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x89, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, + // Bytes 3b40 - 3b7f + 0xBA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x61, 0xCC, + 0x86, 0xCC, 0x80, 0xCE, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x83, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, + 0xCE, 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCE, + // Bytes 3b80 - 3bbf + 0x05, 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, + 0x61, 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x61, + 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x86, 0xCE, 0x05, 0x63, 0xCC, 0xA7, + 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, + 0x80, 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, + 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCE, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, + // Bytes 3bc0 - 3bff + 0x65, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x65, + 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, + 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x65, 0xCC, 0xA7, + 0xCC, 0x86, 0xCE, 0x05, 0x69, 0xCC, 0x88, 0xCC, + 0x81, 0xCE, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCE, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x6F, + // Bytes 3c00 - 3c3f + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x6F, 0xCC, + 0x83, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x88, 0xCE, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, + 0xCE, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, + 0x05, 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, + 0x6F, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x6F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x6F, 0xCC, + // Bytes 3c40 - 3c7f + 0x9B, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x89, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xBA, 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, + 0x05, 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, + 0x72, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x73, + 0xCC, 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, + 0x8C, 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, 0xA3, + // Bytes 3c80 - 3cbf + 0xCC, 0x87, 0xCE, 0x05, 0x75, 0xCC, 0x83, 0xCC, + 0x81, 0xCE, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, + 0xCE, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCE, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x75, 0xCC, + 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x81, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + // Bytes 3cc0 - 3cff + 0x83, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, + 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, + 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCE, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x81, 0xCE, 0x05, 0xE1, + 0xBE, 0xBF, 0xCD, 0x82, 0xCE, 0x05, 0xE1, 0xBF, + 0xBE, 0xCC, 0x80, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x81, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, + 0x82, 0xCE, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, + // Bytes 3d00 - 3d3f + 0x05, 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, + // Bytes 3d40 - 3d7f + 0x05, 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x85, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3d80 - 3dbf + 0xE2, 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB6, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3dc0 - 3dff + 0x8A, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0x86, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3e00 - 3e3f + 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, + 0x05, 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, + // Bytes 3e40 - 3e7f + 0xCE, 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 3e80 - 3ebf + 0xCE, 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 3ec0 - 3eff + 0xCE, 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, + // Bytes 3f00 - 3f3f + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 3f40 - 3f7f + 0xDE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 3f80 - 3fbf + 0xCE, 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, + // Bytes 3fc0 - 3fff + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, + 0xCE, 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, + 0xCE, 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, + // Bytes 4000 - 403f + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, + 0xDE, 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, + 0xDE, 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, + 0x0D, 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, + 0x89, 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, + // Bytes 4040 - 407f + 0x15, 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, + // Bytes 4080 - 40bf + 0x11, 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, + // Bytes 40c0 - 40ff + 0x11, 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, + // Bytes 4100 - 413f + 0x11, 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, + // Bytes 4140 - 417f + 0x11, 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, + // Bytes 4180 - 41bf + 0x11, 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, + // Bytes 41c0 - 41ff + 0x11, 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, + 0x11, 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, + // Bytes 4200 - 423f + 0x11, 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, + 0x11, 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, + 0x11, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, + // Bytes 4240 - 427f + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + // Bytes 4280 - 42bf + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, + // Bytes 42c0 - 42ff + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 4300 - 433f + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + // Bytes 4340 - 437f + 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, + // Bytes 4380 - 43bf + 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, 0x82, 0x9B, + 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, + 0x82, 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x42, + 0xC2, 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xCD, + 0x43, 0x20, 0xCC, 0x83, 0xCD, 0x43, 0x20, 0xCC, + 0x84, 0xCD, 0x43, 0x20, 0xCC, 0x85, 0xCD, 0x43, + 0x20, 0xCC, 0x86, 0xCD, 0x43, 0x20, 0xCC, 0x87, + 0xCD, 0x43, 0x20, 0xCC, 0x88, 0xCD, 0x43, 0x20, + // Bytes 43c0 - 43ff + 0xCC, 0x8A, 0xCD, 0x43, 0x20, 0xCC, 0x8B, 0xCD, + 0x43, 0x20, 0xCC, 0x93, 0xCD, 0x43, 0x20, 0xCC, + 0x94, 0xCD, 0x43, 0x20, 0xCC, 0xA7, 0xA9, 0x43, + 0x20, 0xCC, 0xA8, 0xA9, 0x43, 0x20, 0xCC, 0xB3, + 0xB9, 0x43, 0x20, 0xCD, 0x82, 0xCD, 0x43, 0x20, + 0xCD, 0x85, 0xDD, 0x43, 0x20, 0xD9, 0x8B, 0x5D, + 0x43, 0x20, 0xD9, 0x8C, 0x61, 0x43, 0x20, 0xD9, + 0x8D, 0x65, 0x43, 0x20, 0xD9, 0x8E, 0x69, 0x43, + // Bytes 4400 - 443f + 0x20, 0xD9, 0x8F, 0x6D, 0x43, 0x20, 0xD9, 0x90, + 0x71, 0x43, 0x20, 0xD9, 0x91, 0x75, 0x43, 0x20, + 0xD9, 0x92, 0x79, 0x43, 0x41, 0xCC, 0x8A, 0xCD, + 0x43, 0x73, 0xCC, 0x87, 0xCD, 0x44, 0x20, 0xE3, + 0x82, 0x99, 0x11, 0x44, 0x20, 0xE3, 0x82, 0x9A, + 0x11, 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x44, + 0xCE, 0x91, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x95, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x97, 0xCC, 0x81, + // Bytes 4440 - 447f + 0xCD, 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0x9F, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x88, + 0xCD, 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB5, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB7, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xBF, 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x85, + // Bytes 4480 - 44bf + 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x89, 0xCC, 0x81, + 0xCD, 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x35, 0x44, + 0xD7, 0x90, 0xD6, 0xB8, 0x39, 0x44, 0xD7, 0x90, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0x92, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x93, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x94, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x3D, 0x44, + // Bytes 44c0 - 44ff + 0xD7, 0x95, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x96, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x98, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x29, 0x44, + 0xD7, 0x99, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9A, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0x9C, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9E, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, + // Bytes 4500 - 453f + 0x45, 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA3, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, + 0x4D, 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA7, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA8, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x51, 0x44, + 0xD7, 0xA9, 0xD7, 0x82, 0x55, 0x44, 0xD7, 0xAA, + // Bytes 4540 - 457f + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, + 0x35, 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x5D, 0x44, + 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x44, 0xD8, 0xA7, + 0xD9, 0x94, 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x95, + 0xB9, 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x7D, 0x44, + 0xD8, 0xB1, 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x80, + 0xD9, 0x8B, 0x5D, 0x44, 0xD9, 0x80, 0xD9, 0x8E, + 0x69, 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x6D, 0x44, + // Bytes 4580 - 45bf + 0xD9, 0x80, 0xD9, 0x90, 0x71, 0x44, 0xD9, 0x80, + 0xD9, 0x91, 0x75, 0x44, 0xD9, 0x80, 0xD9, 0x92, + 0x79, 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x7D, 0x44, + 0xD9, 0x88, 0xD9, 0x94, 0xCD, 0x44, 0xD9, 0x89, + 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x8A, 0xD9, 0x94, + 0xCD, 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xCD, 0x44, + 0xDB, 0x95, 0xD9, 0x94, 0xCD, 0x45, 0x20, 0xCC, + 0x88, 0xCC, 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x88, + // Bytes 45c0 - 45ff + 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCD, + 0x82, 0xCE, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, + 0xCE, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCE, + 0x45, 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x45, + 0x20, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, + 0x94, 0xCD, 0x82, 0xCE, 0x45, 0x20, 0xD9, 0x8C, + 0xD9, 0x91, 0x76, 0x45, 0x20, 0xD9, 0x8D, 0xD9, + // Bytes 4600 - 463f + 0x91, 0x76, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, + 0x76, 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x76, + 0x45, 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x45, + 0x20, 0xD9, 0x91, 0xD9, 0xB0, 0x7E, 0x45, 0xE2, + 0xAB, 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x46, 0xCF, 0x85, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x46, 0xD7, 0xA9, + 0xD6, 0xBC, 0xD7, 0x81, 0x52, 0x46, 0xD7, 0xA9, + // Bytes 4640 - 467f + 0xD6, 0xBC, 0xD7, 0x82, 0x56, 0x46, 0xD9, 0x80, + 0xD9, 0x8E, 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, + 0xD9, 0x8F, 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, + 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x46, 0xE0, 0xA4, + 0x95, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x96, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x97, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0x9C, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + // Bytes 4680 - 46bf + 0xA1, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xA2, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xAB, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, + 0xAF, 0xE0, 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xA1, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xA2, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, + 0xAF, 0xE0, 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0x96, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + // Bytes 46c0 - 46ff + 0x97, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0x9C, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xAB, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xB2, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, + 0xB8, 0xE0, 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, + 0xA1, 0xE0, 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, + 0xA2, 0xE0, 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xBE, + 0xB2, 0xE0, 0xBE, 0x80, 0xA1, 0x46, 0xE0, 0xBE, + // Bytes 4700 - 473f + 0xB3, 0xE0, 0xBE, 0x80, 0xA1, 0x46, 0xE3, 0x83, + 0x86, 0xE3, 0x82, 0x99, 0x11, 0x48, 0xF0, 0x9D, + 0x85, 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, + 0x85, 0xA5, 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xBA, + 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x49, 0xE0, 0xBE, + 0xB2, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, + // Bytes 4740 - 477f + 0x49, 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, + 0xBE, 0x80, 0xA2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, + 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xB0, 0xB2, 0x4C, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + // Bytes 4780 - 47bf + 0x85, 0xB1, 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, + 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, + 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xAE, 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, + // Bytes 47c0 - 47ff + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, + 0xB2, 0x83, 0x41, 0xCC, 0x82, 0xCD, 0x83, 0x41, + 0xCC, 0x86, 0xCD, 0x83, 0x41, 0xCC, 0x87, 0xCD, + 0x83, 0x41, 0xCC, 0x88, 0xCD, 0x83, 0x41, 0xCC, + 0x8A, 0xCD, 0x83, 0x41, 0xCC, 0xA3, 0xB9, 0x83, + 0x43, 0xCC, 0xA7, 0xA9, 0x83, 0x45, 0xCC, 0x82, + 0xCD, 0x83, 0x45, 0xCC, 0x84, 0xCD, 0x83, 0x45, + 0xCC, 0xA3, 0xB9, 0x83, 0x45, 0xCC, 0xA7, 0xA9, + // Bytes 4800 - 483f + 0x83, 0x49, 0xCC, 0x88, 0xCD, 0x83, 0x4C, 0xCC, + 0xA3, 0xB9, 0x83, 0x4F, 0xCC, 0x82, 0xCD, 0x83, + 0x4F, 0xCC, 0x83, 0xCD, 0x83, 0x4F, 0xCC, 0x84, + 0xCD, 0x83, 0x4F, 0xCC, 0x87, 0xCD, 0x83, 0x4F, + 0xCC, 0x88, 0xCD, 0x83, 0x4F, 0xCC, 0x9B, 0xB1, + 0x83, 0x4F, 0xCC, 0xA3, 0xB9, 0x83, 0x4F, 0xCC, + 0xA8, 0xA9, 0x83, 0x52, 0xCC, 0xA3, 0xB9, 0x83, + 0x53, 0xCC, 0x81, 0xCD, 0x83, 0x53, 0xCC, 0x8C, + // Bytes 4840 - 487f + 0xCD, 0x83, 0x53, 0xCC, 0xA3, 0xB9, 0x83, 0x55, + 0xCC, 0x83, 0xCD, 0x83, 0x55, 0xCC, 0x84, 0xCD, + 0x83, 0x55, 0xCC, 0x88, 0xCD, 0x83, 0x55, 0xCC, + 0x9B, 0xB1, 0x83, 0x61, 0xCC, 0x82, 0xCD, 0x83, + 0x61, 0xCC, 0x86, 0xCD, 0x83, 0x61, 0xCC, 0x87, + 0xCD, 0x83, 0x61, 0xCC, 0x88, 0xCD, 0x83, 0x61, + 0xCC, 0x8A, 0xCD, 0x83, 0x61, 0xCC, 0xA3, 0xB9, + 0x83, 0x63, 0xCC, 0xA7, 0xA9, 0x83, 0x65, 0xCC, + // Bytes 4880 - 48bf + 0x82, 0xCD, 0x83, 0x65, 0xCC, 0x84, 0xCD, 0x83, + 0x65, 0xCC, 0xA3, 0xB9, 0x83, 0x65, 0xCC, 0xA7, + 0xA9, 0x83, 0x69, 0xCC, 0x88, 0xCD, 0x83, 0x6C, + 0xCC, 0xA3, 0xB9, 0x83, 0x6F, 0xCC, 0x82, 0xCD, + 0x83, 0x6F, 0xCC, 0x83, 0xCD, 0x83, 0x6F, 0xCC, + 0x84, 0xCD, 0x83, 0x6F, 0xCC, 0x87, 0xCD, 0x83, + 0x6F, 0xCC, 0x88, 0xCD, 0x83, 0x6F, 0xCC, 0x9B, + 0xB1, 0x83, 0x6F, 0xCC, 0xA3, 0xB9, 0x83, 0x6F, + // Bytes 48c0 - 48ff + 0xCC, 0xA8, 0xA9, 0x83, 0x72, 0xCC, 0xA3, 0xB9, + 0x83, 0x73, 0xCC, 0x81, 0xCD, 0x83, 0x73, 0xCC, + 0x8C, 0xCD, 0x83, 0x73, 0xCC, 0xA3, 0xB9, 0x83, + 0x75, 0xCC, 0x83, 0xCD, 0x83, 0x75, 0xCC, 0x84, + 0xCD, 0x83, 0x75, 0xCC, 0x88, 0xCD, 0x83, 0x75, + 0xCC, 0x9B, 0xB1, 0x84, 0xCE, 0x91, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0x95, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x95, + // Bytes 4900 - 493f + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x99, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x84, + // Bytes 4940 - 497f + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB1, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x84, + 0xCE, 0xB5, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB5, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x80, + 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x84, + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB7, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xB7, 0xCD, 0x82, + // Bytes 4980 - 49bf + 0xCD, 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x84, + 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB9, + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x84, 0xCF, 0x85, + 0xCC, 0x93, 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x94, + 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x84, + 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x84, 0xCF, 0x89, + // Bytes 49c0 - 49ff + 0xCC, 0x93, 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x94, + 0xCD, 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + // Bytes 4a00 - 4a3f + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + // Bytes 4a40 - 4a7f + 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + // Bytes 4a80 - 4abf + 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, + // Bytes 4ac0 - 4aff + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x42, + 0xCC, 0x80, 0xCD, 0x33, 0x42, 0xCC, 0x81, 0xCD, + 0x33, 0x42, 0xCC, 0x93, 0xCD, 0x33, 0x43, 0xE1, + // Bytes 4b00 - 4b3f + 0x85, 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, + // Bytes 4b40 - 4b7f + 0x43, 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, + // Bytes 4b80 - 4bbf + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, + 0x01, 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x33, 0x43, 0xE3, 0x82, 0x99, 0x11, 0x04, 0x43, + // Bytes 4bc0 - 4bff + 0xE3, 0x82, 0x9A, 0x11, 0x04, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB2, 0xA2, 0x27, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBD, 0xB4, 0xA6, 0x27, 0x46, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x27, + 0x00, 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10798 bytes (10.54 KiB). Checksum: b5981cc85e3bd14. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x30b0, 0xc1: 0x30b5, 0xc2: 0x47c9, 0xc3: 0x30ba, 0xc4: 0x47d8, 0xc5: 0x47dd, + 0xc6: 0xa000, 0xc7: 0x47e7, 0xc8: 0x3123, 0xc9: 0x3128, 0xca: 0x47ec, 0xcb: 0x313c, + 0xcc: 0x31af, 0xcd: 0x31b4, 0xce: 0x31b9, 0xcf: 0x4800, 0xd1: 0x3245, + 0xd2: 0x3268, 0xd3: 0x326d, 0xd4: 0x480a, 0xd5: 0x480f, 0xd6: 0x481e, + 0xd8: 0xa000, 0xd9: 0x32f4, 0xda: 0x32f9, 0xdb: 0x32fe, 0xdc: 0x4850, 0xdd: 0x3376, + 0xe0: 0x33bc, 0xe1: 0x33c1, 0xe2: 0x485a, 0xe3: 0x33c6, + 0xe4: 0x4869, 0xe5: 0x486e, 0xe6: 0xa000, 0xe7: 0x4878, 0xe8: 0x342f, 0xe9: 0x3434, + 0xea: 0x487d, 0xeb: 0x3448, 0xec: 0x34c0, 0xed: 0x34c5, 0xee: 0x34ca, 0xef: 0x4891, + 0xf1: 0x3556, 0xf2: 0x3579, 0xf3: 0x357e, 0xf4: 0x489b, 0xf5: 0x48a0, + 0xf6: 0x48af, 0xf8: 0xa000, 0xf9: 0x360a, 0xfa: 0x360f, 0xfb: 0x3614, + 0xfc: 0x48e1, 0xfd: 0x3691, 0xff: 0x36aa, + // Block 0x4, offset 0x100 + 0x100: 0x30bf, 0x101: 0x33cb, 0x102: 0x47ce, 0x103: 0x485f, 0x104: 0x30dd, 0x105: 0x33e9, + 0x106: 0x30f1, 0x107: 0x33fd, 0x108: 0x30f6, 0x109: 0x3402, 0x10a: 0x30fb, 0x10b: 0x3407, + 0x10c: 0x3100, 0x10d: 0x340c, 0x10e: 0x310a, 0x10f: 0x3416, + 0x112: 0x47f1, 0x113: 0x4882, 0x114: 0x3132, 0x115: 0x343e, 0x116: 0x3137, 0x117: 0x3443, + 0x118: 0x3155, 0x119: 0x3461, 0x11a: 0x3146, 0x11b: 0x3452, 0x11c: 0x316e, 0x11d: 0x347a, + 0x11e: 0x3178, 0x11f: 0x3484, 0x120: 0x317d, 0x121: 0x3489, 0x122: 0x3187, 0x123: 0x3493, + 0x124: 0x318c, 0x125: 0x3498, 0x128: 0x31be, 0x129: 0x34cf, + 0x12a: 0x31c3, 0x12b: 0x34d4, 0x12c: 0x31c8, 0x12d: 0x34d9, 0x12e: 0x31eb, 0x12f: 0x34f7, + 0x130: 0x31cd, 0x134: 0x31f5, 0x135: 0x3501, + 0x136: 0x3209, 0x137: 0x351a, 0x139: 0x3213, 0x13a: 0x3524, 0x13b: 0x321d, + 0x13c: 0x352e, 0x13d: 0x3218, 0x13e: 0x3529, + // Block 0x5, offset 0x140 + 0x143: 0x3240, 0x144: 0x3551, 0x145: 0x3259, + 0x146: 0x356a, 0x147: 0x324f, 0x148: 0x3560, + 0x14c: 0x4814, 0x14d: 0x48a5, 0x14e: 0x3272, 0x14f: 0x3583, 0x150: 0x327c, 0x151: 0x358d, + 0x154: 0x329a, 0x155: 0x35ab, 0x156: 0x32b3, 0x157: 0x35c4, + 0x158: 0x32a4, 0x159: 0x35b5, 0x15a: 0x4837, 0x15b: 0x48c8, 0x15c: 0x32bd, 0x15d: 0x35ce, + 0x15e: 0x32cc, 0x15f: 0x35dd, 0x160: 0x483c, 0x161: 0x48cd, 0x162: 0x32e5, 0x163: 0x35fb, + 0x164: 0x32d6, 0x165: 0x35ec, 0x168: 0x4846, 0x169: 0x48d7, + 0x16a: 0x484b, 0x16b: 0x48dc, 0x16c: 0x3303, 0x16d: 0x3619, 0x16e: 0x330d, 0x16f: 0x3623, + 0x170: 0x3312, 0x171: 0x3628, 0x172: 0x3330, 0x173: 0x3646, 0x174: 0x3353, 0x175: 0x3669, + 0x176: 0x337b, 0x177: 0x3696, 0x178: 0x338f, 0x179: 0x339e, 0x17a: 0x36be, 0x17b: 0x33a8, + 0x17c: 0x36c8, 0x17d: 0x33ad, 0x17e: 0x36cd, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x30c9, 0x18e: 0x33d5, 0x18f: 0x31d7, 0x190: 0x34e3, 0x191: 0x3281, + 0x192: 0x3592, 0x193: 0x3317, 0x194: 0x362d, 0x195: 0x3b10, 0x196: 0x3c9f, 0x197: 0x3b09, + 0x198: 0x3c98, 0x199: 0x3b17, 0x19a: 0x3ca6, 0x19b: 0x3b02, 0x19c: 0x3c91, + 0x19e: 0x39f1, 0x19f: 0x3b80, 0x1a0: 0x39ea, 0x1a1: 0x3b79, 0x1a2: 0x36f4, 0x1a3: 0x3706, + 0x1a6: 0x3182, 0x1a7: 0x348e, 0x1a8: 0x31ff, 0x1a9: 0x3510, + 0x1aa: 0x482d, 0x1ab: 0x48be, 0x1ac: 0x3ad1, 0x1ad: 0x3c60, 0x1ae: 0x3718, 0x1af: 0x371e, + 0x1b0: 0x3506, 0x1b4: 0x3169, 0x1b5: 0x3475, + 0x1b8: 0x323b, 0x1b9: 0x354c, 0x1ba: 0x39f8, 0x1bb: 0x3b87, + 0x1bc: 0x36ee, 0x1bd: 0x3700, 0x1be: 0x36fa, 0x1bf: 0x370c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x30ce, 0x1c1: 0x33da, 0x1c2: 0x30d3, 0x1c3: 0x33df, 0x1c4: 0x314b, 0x1c5: 0x3457, + 0x1c6: 0x3150, 0x1c7: 0x345c, 0x1c8: 0x31dc, 0x1c9: 0x34e8, 0x1ca: 0x31e1, 0x1cb: 0x34ed, + 0x1cc: 0x3286, 0x1cd: 0x3597, 0x1ce: 0x328b, 0x1cf: 0x359c, 0x1d0: 0x32a9, 0x1d1: 0x35ba, + 0x1d2: 0x32ae, 0x1d3: 0x35bf, 0x1d4: 0x331c, 0x1d5: 0x3632, 0x1d6: 0x3321, 0x1d7: 0x3637, + 0x1d8: 0x32c7, 0x1d9: 0x35d8, 0x1da: 0x32e0, 0x1db: 0x35f6, + 0x1de: 0x319b, 0x1df: 0x34a7, + 0x1e6: 0x47d3, 0x1e7: 0x4864, 0x1e8: 0x47fb, 0x1e9: 0x488c, + 0x1ea: 0x3aa0, 0x1eb: 0x3c2f, 0x1ec: 0x3a7d, 0x1ed: 0x3c0c, 0x1ee: 0x4819, 0x1ef: 0x48aa, + 0x1f0: 0x3a99, 0x1f1: 0x3c28, 0x1f2: 0x3385, 0x1f3: 0x36a0, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x4aef, 0x241: 0x4af4, 0x242: 0x9933, 0x243: 0x4af9, 0x244: 0x4bb2, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x01ee, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x36e2, + 0x286: 0x372a, 0x287: 0x00ce, 0x288: 0x3748, 0x289: 0x3754, 0x28a: 0x3766, + 0x28c: 0x3784, 0x28e: 0x3796, 0x28f: 0x37b4, 0x290: 0x3f49, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3778, 0x2ab: 0x37a8, 0x2ac: 0x493f, 0x2ad: 0x37d8, 0x2ae: 0x4969, 0x2af: 0x37ea, + 0x2b0: 0x3fb1, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3862, 0x2c1: 0x386e, 0x2c3: 0x385c, + 0x2c6: 0xa000, 0x2c7: 0x384a, + 0x2cc: 0x389e, 0x2cd: 0x3886, 0x2ce: 0x38b0, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3892, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x3916, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3874, 0x302: 0x38f8, + 0x310: 0x3850, 0x311: 0x38d4, + 0x312: 0x3856, 0x313: 0x38da, 0x316: 0x3868, 0x317: 0x38ec, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x396a, 0x31b: 0x3970, 0x31c: 0x387a, 0x31d: 0x38fe, + 0x31e: 0x3880, 0x31f: 0x3904, 0x322: 0x388c, 0x323: 0x3910, + 0x324: 0x3898, 0x325: 0x391c, 0x326: 0x38a4, 0x327: 0x3928, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3976, 0x32b: 0x397c, 0x32c: 0x38ce, 0x32d: 0x3952, 0x32e: 0x38aa, 0x32f: 0x392e, + 0x330: 0x38b6, 0x331: 0x393a, 0x332: 0x38bc, 0x333: 0x3940, 0x334: 0x38c2, 0x335: 0x3946, + 0x338: 0x38c8, 0x339: 0x394c, + // Block 0xd, offset 0x340 + 0x351: 0x812e, + 0x352: 0x8133, 0x353: 0x8133, 0x354: 0x8133, 0x355: 0x8133, 0x356: 0x812e, 0x357: 0x8133, + 0x358: 0x8133, 0x359: 0x8133, 0x35a: 0x812f, 0x35b: 0x812e, 0x35c: 0x8133, 0x35d: 0x8133, + 0x35e: 0x8133, 0x35f: 0x8133, 0x360: 0x8133, 0x361: 0x8133, 0x362: 0x812e, 0x363: 0x812e, + 0x364: 0x812e, 0x365: 0x812e, 0x366: 0x812e, 0x367: 0x812e, 0x368: 0x8133, 0x369: 0x8133, + 0x36a: 0x812e, 0x36b: 0x8133, 0x36c: 0x8133, 0x36d: 0x812f, 0x36e: 0x8132, 0x36f: 0x8133, + 0x370: 0x8106, 0x371: 0x8107, 0x372: 0x8108, 0x373: 0x8109, 0x374: 0x810a, 0x375: 0x810b, + 0x376: 0x810c, 0x377: 0x810d, 0x378: 0x810e, 0x379: 0x810f, 0x37a: 0x810f, 0x37b: 0x8110, + 0x37c: 0x8111, 0x37d: 0x8112, 0x37f: 0x8113, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8117, + 0x38c: 0x8118, 0x38d: 0x8119, 0x38e: 0x811a, 0x38f: 0x811b, 0x390: 0x811c, 0x391: 0x811d, + 0x392: 0x811e, 0x393: 0x9933, 0x394: 0x9933, 0x395: 0x992e, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x8133, 0x39b: 0x8133, 0x39c: 0x812e, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x812e, + 0x3b0: 0x811f, + // Block 0xf, offset 0x3c0 + 0x3ca: 0x8133, 0x3cb: 0x8133, + 0x3cc: 0x8133, 0x3cd: 0x8133, 0x3ce: 0x8133, 0x3cf: 0x812e, 0x3d0: 0x812e, 0x3d1: 0x812e, + 0x3d2: 0x812e, 0x3d3: 0x812e, 0x3d4: 0x8133, 0x3d5: 0x8133, 0x3d6: 0x8133, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x8133, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x8133, 0x3e0: 0x8133, 0x3e1: 0x8133, 0x3e3: 0x812e, + 0x3e4: 0x8133, 0x3e5: 0x8133, 0x3e6: 0x812e, 0x3e7: 0x8133, 0x3e8: 0x8133, 0x3e9: 0x812e, + 0x3ea: 0x8133, 0x3eb: 0x8133, 0x3ec: 0x8133, 0x3ed: 0x812e, 0x3ee: 0x812e, 0x3ef: 0x812e, + 0x3f0: 0x8117, 0x3f1: 0x8118, 0x3f2: 0x8119, 0x3f3: 0x8133, 0x3f4: 0x8133, 0x3f5: 0x8133, + 0x3f6: 0x812e, 0x3f7: 0x8133, 0x3f8: 0x8133, 0x3f9: 0x812e, 0x3fa: 0x812e, 0x3fb: 0x8133, + 0x3fc: 0x8133, 0x3fd: 0x8133, 0x3fe: 0x8133, 0x3ff: 0x8133, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2e5d, 0x407: 0xa000, 0x408: 0x2e65, 0x409: 0xa000, 0x40a: 0x2e6d, 0x40b: 0xa000, + 0x40c: 0x2e75, 0x40d: 0xa000, 0x40e: 0x2e7d, 0x411: 0xa000, + 0x412: 0x2e85, + 0x434: 0x8103, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2e8d, + 0x43c: 0xa000, 0x43d: 0x2e95, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8133, 0x441: 0x8133, 0x442: 0x812e, 0x443: 0x8133, 0x444: 0x8133, 0x445: 0x8133, + 0x446: 0x8133, 0x447: 0x8133, 0x448: 0x8133, 0x449: 0x8133, 0x44a: 0x812e, 0x44b: 0x8133, + 0x44c: 0x8133, 0x44d: 0x8136, 0x44e: 0x812b, 0x44f: 0x812e, 0x450: 0x812a, 0x451: 0x8133, + 0x452: 0x8133, 0x453: 0x8133, 0x454: 0x8133, 0x455: 0x8133, 0x456: 0x8133, 0x457: 0x8133, + 0x458: 0x8133, 0x459: 0x8133, 0x45a: 0x8133, 0x45b: 0x8133, 0x45c: 0x8133, 0x45d: 0x8133, + 0x45e: 0x8133, 0x45f: 0x8133, 0x460: 0x8133, 0x461: 0x8133, 0x462: 0x8133, 0x463: 0x8133, + 0x464: 0x8133, 0x465: 0x8133, 0x466: 0x8133, 0x467: 0x8133, 0x468: 0x8133, 0x469: 0x8133, + 0x46a: 0x8133, 0x46b: 0x8133, 0x46c: 0x8133, 0x46d: 0x8133, 0x46e: 0x8133, 0x46f: 0x8133, + 0x470: 0x8133, 0x471: 0x8133, 0x472: 0x8133, 0x473: 0x8133, 0x474: 0x8133, 0x475: 0x8133, + 0x476: 0x8134, 0x477: 0x8132, 0x478: 0x8132, 0x479: 0x812e, 0x47a: 0x812d, 0x47b: 0x8133, + 0x47c: 0x8135, 0x47d: 0x812e, 0x47e: 0x8133, 0x47f: 0x812e, + // Block 0x12, offset 0x480 + 0x480: 0x30d8, 0x481: 0x33e4, 0x482: 0x30e2, 0x483: 0x33ee, 0x484: 0x30e7, 0x485: 0x33f3, + 0x486: 0x30ec, 0x487: 0x33f8, 0x488: 0x3a0d, 0x489: 0x3b9c, 0x48a: 0x3105, 0x48b: 0x3411, + 0x48c: 0x310f, 0x48d: 0x341b, 0x48e: 0x311e, 0x48f: 0x342a, 0x490: 0x3114, 0x491: 0x3420, + 0x492: 0x3119, 0x493: 0x3425, 0x494: 0x3a30, 0x495: 0x3bbf, 0x496: 0x3a37, 0x497: 0x3bc6, + 0x498: 0x315a, 0x499: 0x3466, 0x49a: 0x315f, 0x49b: 0x346b, 0x49c: 0x3a45, 0x49d: 0x3bd4, + 0x49e: 0x3164, 0x49f: 0x3470, 0x4a0: 0x3173, 0x4a1: 0x347f, 0x4a2: 0x3191, 0x4a3: 0x349d, + 0x4a4: 0x31a0, 0x4a5: 0x34ac, 0x4a6: 0x3196, 0x4a7: 0x34a2, 0x4a8: 0x31a5, 0x4a9: 0x34b1, + 0x4aa: 0x31aa, 0x4ab: 0x34b6, 0x4ac: 0x31f0, 0x4ad: 0x34fc, 0x4ae: 0x3a4c, 0x4af: 0x3bdb, + 0x4b0: 0x31fa, 0x4b1: 0x350b, 0x4b2: 0x3204, 0x4b3: 0x3515, 0x4b4: 0x320e, 0x4b5: 0x351f, + 0x4b6: 0x4805, 0x4b7: 0x4896, 0x4b8: 0x3a53, 0x4b9: 0x3be2, 0x4ba: 0x3227, 0x4bb: 0x3538, + 0x4bc: 0x3222, 0x4bd: 0x3533, 0x4be: 0x322c, 0x4bf: 0x353d, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x3231, 0x4c1: 0x3542, 0x4c2: 0x3236, 0x4c3: 0x3547, 0x4c4: 0x324a, 0x4c5: 0x355b, + 0x4c6: 0x3254, 0x4c7: 0x3565, 0x4c8: 0x3263, 0x4c9: 0x3574, 0x4ca: 0x325e, 0x4cb: 0x356f, + 0x4cc: 0x3a76, 0x4cd: 0x3c05, 0x4ce: 0x3a84, 0x4cf: 0x3c13, 0x4d0: 0x3a8b, 0x4d1: 0x3c1a, + 0x4d2: 0x3a92, 0x4d3: 0x3c21, 0x4d4: 0x3290, 0x4d5: 0x35a1, 0x4d6: 0x3295, 0x4d7: 0x35a6, + 0x4d8: 0x329f, 0x4d9: 0x35b0, 0x4da: 0x4832, 0x4db: 0x48c3, 0x4dc: 0x3ad8, 0x4dd: 0x3c67, + 0x4de: 0x32b8, 0x4df: 0x35c9, 0x4e0: 0x32c2, 0x4e1: 0x35d3, 0x4e2: 0x4841, 0x4e3: 0x48d2, + 0x4e4: 0x3adf, 0x4e5: 0x3c6e, 0x4e6: 0x3ae6, 0x4e7: 0x3c75, 0x4e8: 0x3aed, 0x4e9: 0x3c7c, + 0x4ea: 0x32d1, 0x4eb: 0x35e2, 0x4ec: 0x32db, 0x4ed: 0x35f1, 0x4ee: 0x32ef, 0x4ef: 0x3605, + 0x4f0: 0x32ea, 0x4f1: 0x3600, 0x4f2: 0x332b, 0x4f3: 0x3641, 0x4f4: 0x333a, 0x4f5: 0x3650, + 0x4f6: 0x3335, 0x4f7: 0x364b, 0x4f8: 0x3af4, 0x4f9: 0x3c83, 0x4fa: 0x3afb, 0x4fb: 0x3c8a, + 0x4fc: 0x333f, 0x4fd: 0x3655, 0x4fe: 0x3344, 0x4ff: 0x365a, + // Block 0x14, offset 0x500 + 0x500: 0x3349, 0x501: 0x365f, 0x502: 0x334e, 0x503: 0x3664, 0x504: 0x335d, 0x505: 0x3673, + 0x506: 0x3358, 0x507: 0x366e, 0x508: 0x3362, 0x509: 0x367d, 0x50a: 0x3367, 0x50b: 0x3682, + 0x50c: 0x336c, 0x50d: 0x3687, 0x50e: 0x338a, 0x50f: 0x36a5, 0x510: 0x33a3, 0x511: 0x36c3, + 0x512: 0x33b2, 0x513: 0x36d2, 0x514: 0x33b7, 0x515: 0x36d7, 0x516: 0x34bb, 0x517: 0x35e7, + 0x518: 0x3678, 0x519: 0x36b4, 0x51b: 0x3712, + 0x520: 0x47e2, 0x521: 0x4873, 0x522: 0x30c4, 0x523: 0x33d0, + 0x524: 0x39b9, 0x525: 0x3b48, 0x526: 0x39b2, 0x527: 0x3b41, 0x528: 0x39c7, 0x529: 0x3b56, + 0x52a: 0x39c0, 0x52b: 0x3b4f, 0x52c: 0x39ff, 0x52d: 0x3b8e, 0x52e: 0x39d5, 0x52f: 0x3b64, + 0x530: 0x39ce, 0x531: 0x3b5d, 0x532: 0x39e3, 0x533: 0x3b72, 0x534: 0x39dc, 0x535: 0x3b6b, + 0x536: 0x3a06, 0x537: 0x3b95, 0x538: 0x47f6, 0x539: 0x4887, 0x53a: 0x3141, 0x53b: 0x344d, + 0x53c: 0x312d, 0x53d: 0x3439, 0x53e: 0x3a1b, 0x53f: 0x3baa, + // Block 0x15, offset 0x540 + 0x540: 0x3a14, 0x541: 0x3ba3, 0x542: 0x3a29, 0x543: 0x3bb8, 0x544: 0x3a22, 0x545: 0x3bb1, + 0x546: 0x3a3e, 0x547: 0x3bcd, 0x548: 0x31d2, 0x549: 0x34de, 0x54a: 0x31e6, 0x54b: 0x34f2, + 0x54c: 0x4828, 0x54d: 0x48b9, 0x54e: 0x3277, 0x54f: 0x3588, 0x550: 0x3a61, 0x551: 0x3bf0, + 0x552: 0x3a5a, 0x553: 0x3be9, 0x554: 0x3a6f, 0x555: 0x3bfe, 0x556: 0x3a68, 0x557: 0x3bf7, + 0x558: 0x3aca, 0x559: 0x3c59, 0x55a: 0x3aae, 0x55b: 0x3c3d, 0x55c: 0x3aa7, 0x55d: 0x3c36, + 0x55e: 0x3abc, 0x55f: 0x3c4b, 0x560: 0x3ab5, 0x561: 0x3c44, 0x562: 0x3ac3, 0x563: 0x3c52, + 0x564: 0x3326, 0x565: 0x363c, 0x566: 0x3308, 0x567: 0x361e, 0x568: 0x3b25, 0x569: 0x3cb4, + 0x56a: 0x3b1e, 0x56b: 0x3cad, 0x56c: 0x3b33, 0x56d: 0x3cc2, 0x56e: 0x3b2c, 0x56f: 0x3cbb, + 0x570: 0x3b3a, 0x571: 0x3cc9, 0x572: 0x3371, 0x573: 0x368c, 0x574: 0x3399, 0x575: 0x36b9, + 0x576: 0x3394, 0x577: 0x36af, 0x578: 0x3380, 0x579: 0x369b, + // Block 0x16, offset 0x580 + 0x580: 0x4945, 0x581: 0x494b, 0x582: 0x4a5f, 0x583: 0x4a77, 0x584: 0x4a67, 0x585: 0x4a7f, + 0x586: 0x4a6f, 0x587: 0x4a87, 0x588: 0x48eb, 0x589: 0x48f1, 0x58a: 0x49cf, 0x58b: 0x49e7, + 0x58c: 0x49d7, 0x58d: 0x49ef, 0x58e: 0x49df, 0x58f: 0x49f7, 0x590: 0x4957, 0x591: 0x495d, + 0x592: 0x3ef9, 0x593: 0x3f09, 0x594: 0x3f01, 0x595: 0x3f11, + 0x598: 0x48f7, 0x599: 0x48fd, 0x59a: 0x3e29, 0x59b: 0x3e39, 0x59c: 0x3e31, 0x59d: 0x3e41, + 0x5a0: 0x496f, 0x5a1: 0x4975, 0x5a2: 0x4a8f, 0x5a3: 0x4aa7, + 0x5a4: 0x4a97, 0x5a5: 0x4aaf, 0x5a6: 0x4a9f, 0x5a7: 0x4ab7, 0x5a8: 0x4903, 0x5a9: 0x4909, + 0x5aa: 0x49ff, 0x5ab: 0x4a17, 0x5ac: 0x4a07, 0x5ad: 0x4a1f, 0x5ae: 0x4a0f, 0x5af: 0x4a27, + 0x5b0: 0x4987, 0x5b1: 0x498d, 0x5b2: 0x3f59, 0x5b3: 0x3f71, 0x5b4: 0x3f61, 0x5b5: 0x3f79, + 0x5b6: 0x3f69, 0x5b7: 0x3f81, 0x5b8: 0x490f, 0x5b9: 0x4915, 0x5ba: 0x3e59, 0x5bb: 0x3e71, + 0x5bc: 0x3e61, 0x5bd: 0x3e79, 0x5be: 0x3e69, 0x5bf: 0x3e81, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4993, 0x5c1: 0x4999, 0x5c2: 0x3f89, 0x5c3: 0x3f99, 0x5c4: 0x3f91, 0x5c5: 0x3fa1, + 0x5c8: 0x491b, 0x5c9: 0x4921, 0x5ca: 0x3e89, 0x5cb: 0x3e99, + 0x5cc: 0x3e91, 0x5cd: 0x3ea1, 0x5d0: 0x49a5, 0x5d1: 0x49ab, + 0x5d2: 0x3fc1, 0x5d3: 0x3fd9, 0x5d4: 0x3fc9, 0x5d5: 0x3fe1, 0x5d6: 0x3fd1, 0x5d7: 0x3fe9, + 0x5d9: 0x4927, 0x5db: 0x3ea9, 0x5dd: 0x3eb1, + 0x5df: 0x3eb9, 0x5e0: 0x49bd, 0x5e1: 0x49c3, 0x5e2: 0x4abf, 0x5e3: 0x4ad7, + 0x5e4: 0x4ac7, 0x5e5: 0x4adf, 0x5e6: 0x4acf, 0x5e7: 0x4ae7, 0x5e8: 0x492d, 0x5e9: 0x4933, + 0x5ea: 0x4a2f, 0x5eb: 0x4a47, 0x5ec: 0x4a37, 0x5ed: 0x4a4f, 0x5ee: 0x4a3f, 0x5ef: 0x4a57, + 0x5f0: 0x4939, 0x5f1: 0x445f, 0x5f2: 0x37d2, 0x5f3: 0x4465, 0x5f4: 0x4963, 0x5f5: 0x446b, + 0x5f6: 0x37e4, 0x5f7: 0x4471, 0x5f8: 0x3802, 0x5f9: 0x4477, 0x5fa: 0x381a, 0x5fb: 0x447d, + 0x5fc: 0x49b1, 0x5fd: 0x4483, + // Block 0x18, offset 0x600 + 0x600: 0x3ee1, 0x601: 0x3ee9, 0x602: 0x42c5, 0x603: 0x42e3, 0x604: 0x42cf, 0x605: 0x42ed, + 0x606: 0x42d9, 0x607: 0x42f7, 0x608: 0x3e19, 0x609: 0x3e21, 0x60a: 0x4211, 0x60b: 0x422f, + 0x60c: 0x421b, 0x60d: 0x4239, 0x60e: 0x4225, 0x60f: 0x4243, 0x610: 0x3f29, 0x611: 0x3f31, + 0x612: 0x4301, 0x613: 0x431f, 0x614: 0x430b, 0x615: 0x4329, 0x616: 0x4315, 0x617: 0x4333, + 0x618: 0x3e49, 0x619: 0x3e51, 0x61a: 0x424d, 0x61b: 0x426b, 0x61c: 0x4257, 0x61d: 0x4275, + 0x61e: 0x4261, 0x61f: 0x427f, 0x620: 0x4001, 0x621: 0x4009, 0x622: 0x433d, 0x623: 0x435b, + 0x624: 0x4347, 0x625: 0x4365, 0x626: 0x4351, 0x627: 0x436f, 0x628: 0x3ec1, 0x629: 0x3ec9, + 0x62a: 0x4289, 0x62b: 0x42a7, 0x62c: 0x4293, 0x62d: 0x42b1, 0x62e: 0x429d, 0x62f: 0x42bb, + 0x630: 0x37c6, 0x631: 0x37c0, 0x632: 0x3ed1, 0x633: 0x37cc, 0x634: 0x3ed9, + 0x636: 0x4951, 0x637: 0x3ef1, 0x638: 0x3736, 0x639: 0x3730, 0x63a: 0x3724, 0x63b: 0x442f, + 0x63c: 0x373c, 0x63d: 0x8100, 0x63e: 0x0257, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x36e8, 0x642: 0x3f19, 0x643: 0x37de, 0x644: 0x3f21, + 0x646: 0x497b, 0x647: 0x3f39, 0x648: 0x3742, 0x649: 0x4435, 0x64a: 0x374e, 0x64b: 0x443b, + 0x64c: 0x375a, 0x64d: 0x3cd0, 0x64e: 0x3cd7, 0x64f: 0x3cde, 0x650: 0x37f6, 0x651: 0x37f0, + 0x652: 0x3f41, 0x653: 0x4625, 0x656: 0x37fc, 0x657: 0x3f51, + 0x658: 0x3772, 0x659: 0x376c, 0x65a: 0x3760, 0x65b: 0x4441, 0x65d: 0x3ce5, + 0x65e: 0x3cec, 0x65f: 0x3cf3, 0x660: 0x382c, 0x661: 0x3826, 0x662: 0x3fa9, 0x663: 0x462d, + 0x664: 0x380e, 0x665: 0x3814, 0x666: 0x3832, 0x667: 0x3fb9, 0x668: 0x37a2, 0x669: 0x379c, + 0x66a: 0x3790, 0x66b: 0x444d, 0x66c: 0x378a, 0x66d: 0x36dc, 0x66e: 0x4429, 0x66f: 0x0081, + 0x672: 0x3ff1, 0x673: 0x3838, 0x674: 0x3ff9, + 0x676: 0x49c9, 0x677: 0x4011, 0x678: 0x377e, 0x679: 0x4447, 0x67a: 0x37ae, 0x67b: 0x4459, + 0x67c: 0x37ba, 0x67d: 0x4397, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3d47, 0x683: 0xa000, 0x684: 0x3d4e, 0x685: 0xa000, + 0x687: 0x3d55, 0x688: 0xa000, 0x689: 0x3d5c, + 0x68d: 0xa000, + 0x6a0: 0x30a6, 0x6a1: 0xa000, 0x6a2: 0x3d6a, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3d63, 0x6ae: 0x30a1, 0x6af: 0x30ab, + 0x6b0: 0x3d71, 0x6b1: 0x3d78, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3d7f, 0x6b5: 0x3d86, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3d8d, 0x6b9: 0x3d94, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3d9b, 0x6c1: 0x3da2, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3db7, 0x6c5: 0x3dbe, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3dc5, 0x6c9: 0x3dcc, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3de1, 0x6ed: 0x3de8, 0x6ee: 0x3def, 0x6ef: 0x3df6, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x4049, 0x70d: 0xa000, 0x70e: 0x4051, 0x70f: 0xa000, 0x710: 0x4059, 0x711: 0xa000, + 0x712: 0x4061, 0x713: 0xa000, 0x714: 0x4069, 0x715: 0xa000, 0x716: 0x4071, 0x717: 0xa000, + 0x718: 0x4079, 0x719: 0xa000, 0x71a: 0x4081, 0x71b: 0xa000, 0x71c: 0x4089, 0x71d: 0xa000, + 0x71e: 0x4091, 0x71f: 0xa000, 0x720: 0x4099, 0x721: 0xa000, 0x722: 0x40a1, + 0x724: 0xa000, 0x725: 0x40a9, 0x726: 0xa000, 0x727: 0x40b1, 0x728: 0xa000, 0x729: 0x40b9, + 0x72f: 0xa000, + 0x730: 0x40c1, 0x731: 0x40c9, 0x732: 0xa000, 0x733: 0x40d1, 0x734: 0x40d9, 0x735: 0xa000, + 0x736: 0x40e1, 0x737: 0x40e9, 0x738: 0xa000, 0x739: 0x40f1, 0x73a: 0x40f9, 0x73b: 0xa000, + 0x73c: 0x4101, 0x73d: 0x4109, + // Block 0x1d, offset 0x740 + 0x754: 0x4041, + 0x759: 0x9904, 0x75a: 0x9904, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x4111, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x4121, 0x76d: 0xa000, 0x76e: 0x4129, 0x76f: 0xa000, + 0x770: 0x4131, 0x771: 0xa000, 0x772: 0x4139, 0x773: 0xa000, 0x774: 0x4141, 0x775: 0xa000, + 0x776: 0x4149, 0x777: 0xa000, 0x778: 0x4151, 0x779: 0xa000, 0x77a: 0x4159, 0x77b: 0xa000, + 0x77c: 0x4161, 0x77d: 0xa000, 0x77e: 0x4169, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4171, 0x781: 0xa000, 0x782: 0x4179, 0x784: 0xa000, 0x785: 0x4181, + 0x786: 0xa000, 0x787: 0x4189, 0x788: 0xa000, 0x789: 0x4191, + 0x78f: 0xa000, 0x790: 0x4199, 0x791: 0x41a1, + 0x792: 0xa000, 0x793: 0x41a9, 0x794: 0x41b1, 0x795: 0xa000, 0x796: 0x41b9, 0x797: 0x41c1, + 0x798: 0xa000, 0x799: 0x41c9, 0x79a: 0x41d1, 0x79b: 0xa000, 0x79c: 0x41d9, 0x79d: 0x41e1, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x4119, + 0x7b7: 0x41e9, 0x7b8: 0x41f1, 0x7b9: 0x41f9, 0x7ba: 0x4201, + 0x7bd: 0xa000, 0x7be: 0x4209, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1472, 0x7c1: 0x0df6, 0x7c2: 0x14ce, 0x7c3: 0x149a, 0x7c4: 0x0f52, 0x7c5: 0x07e6, + 0x7c6: 0x09da, 0x7c7: 0x1726, 0x7c8: 0x1726, 0x7c9: 0x0b06, 0x7ca: 0x155a, 0x7cb: 0x0a3e, + 0x7cc: 0x0b02, 0x7cd: 0x0cea, 0x7ce: 0x10ca, 0x7cf: 0x125a, 0x7d0: 0x1392, 0x7d1: 0x13ce, + 0x7d2: 0x1402, 0x7d3: 0x1516, 0x7d4: 0x0e6e, 0x7d5: 0x0efa, 0x7d6: 0x0fa6, 0x7d7: 0x103e, + 0x7d8: 0x135a, 0x7d9: 0x1542, 0x7da: 0x166e, 0x7db: 0x080a, 0x7dc: 0x09ae, 0x7dd: 0x0e82, + 0x7de: 0x0fca, 0x7df: 0x138e, 0x7e0: 0x16be, 0x7e1: 0x0bae, 0x7e2: 0x0f72, 0x7e3: 0x137e, + 0x7e4: 0x1412, 0x7e5: 0x0d1e, 0x7e6: 0x12b6, 0x7e7: 0x13da, 0x7e8: 0x0c1a, 0x7e9: 0x0e0a, + 0x7ea: 0x0f12, 0x7eb: 0x1016, 0x7ec: 0x1522, 0x7ed: 0x084a, 0x7ee: 0x08e2, 0x7ef: 0x094e, + 0x7f0: 0x0d86, 0x7f1: 0x0e7a, 0x7f2: 0x0fc6, 0x7f3: 0x10ea, 0x7f4: 0x1272, 0x7f5: 0x1386, + 0x7f6: 0x139e, 0x7f7: 0x14c2, 0x7f8: 0x15ea, 0x7f9: 0x169e, 0x7fa: 0x16ba, 0x7fb: 0x1126, + 0x7fc: 0x1166, 0x7fd: 0x121e, 0x7fe: 0x133e, 0x7ff: 0x1576, + // Block 0x20, offset 0x800 + 0x800: 0x16c6, 0x801: 0x1446, 0x802: 0x0ac2, 0x803: 0x0c36, 0x804: 0x11d6, 0x805: 0x1296, + 0x806: 0x0ffa, 0x807: 0x112e, 0x808: 0x1492, 0x809: 0x15e2, 0x80a: 0x0abe, 0x80b: 0x0b8a, + 0x80c: 0x0e72, 0x80d: 0x0f26, 0x80e: 0x0f5a, 0x80f: 0x120e, 0x810: 0x1236, 0x811: 0x15a2, + 0x812: 0x094a, 0x813: 0x12a2, 0x814: 0x08ee, 0x815: 0x08ea, 0x816: 0x1192, 0x817: 0x1222, + 0x818: 0x1356, 0x819: 0x15aa, 0x81a: 0x1462, 0x81b: 0x0d22, 0x81c: 0x0e6e, 0x81d: 0x1452, + 0x81e: 0x07f2, 0x81f: 0x0b5e, 0x820: 0x0c8e, 0x821: 0x102a, 0x822: 0x10aa, 0x823: 0x096e, + 0x824: 0x1136, 0x825: 0x085a, 0x826: 0x0c72, 0x827: 0x07d2, 0x828: 0x0ee6, 0x829: 0x0d9e, + 0x82a: 0x120a, 0x82b: 0x09c2, 0x82c: 0x0aae, 0x82d: 0x10f6, 0x82e: 0x135e, 0x82f: 0x1436, + 0x830: 0x0eb2, 0x831: 0x14f2, 0x832: 0x0ede, 0x833: 0x0d32, 0x834: 0x1316, 0x835: 0x0d52, + 0x836: 0x10a6, 0x837: 0x0826, 0x838: 0x08a2, 0x839: 0x08e6, 0x83a: 0x0e4e, 0x83b: 0x11f6, + 0x83c: 0x12ee, 0x83d: 0x1442, 0x83e: 0x1556, 0x83f: 0x0956, + // Block 0x21, offset 0x840 + 0x840: 0x0a0a, 0x841: 0x0b12, 0x842: 0x0c2a, 0x843: 0x0dba, 0x844: 0x0f76, 0x845: 0x113a, + 0x846: 0x1592, 0x847: 0x1676, 0x848: 0x16ca, 0x849: 0x16e2, 0x84a: 0x0932, 0x84b: 0x0dee, + 0x84c: 0x0e9e, 0x84d: 0x14e6, 0x84e: 0x0bf6, 0x84f: 0x0cd2, 0x850: 0x0cee, 0x851: 0x0d7e, + 0x852: 0x0f66, 0x853: 0x0fb2, 0x854: 0x1062, 0x855: 0x1186, 0x856: 0x122a, 0x857: 0x128e, + 0x858: 0x14d6, 0x859: 0x1366, 0x85a: 0x14fe, 0x85b: 0x157a, 0x85c: 0x090a, 0x85d: 0x0936, + 0x85e: 0x0a1e, 0x85f: 0x0fa2, 0x860: 0x13ee, 0x861: 0x1436, 0x862: 0x0c16, 0x863: 0x0c86, + 0x864: 0x0d4a, 0x865: 0x0eaa, 0x866: 0x11d2, 0x867: 0x101e, 0x868: 0x0836, 0x869: 0x0a7a, + 0x86a: 0x0b5e, 0x86b: 0x0bc2, 0x86c: 0x0c92, 0x86d: 0x103a, 0x86e: 0x1056, 0x86f: 0x1266, + 0x870: 0x1286, 0x871: 0x155e, 0x872: 0x15de, 0x873: 0x15ee, 0x874: 0x162a, 0x875: 0x084e, + 0x876: 0x117a, 0x877: 0x154a, 0x878: 0x15c6, 0x879: 0x0caa, 0x87a: 0x0812, 0x87b: 0x0872, + 0x87c: 0x0b62, 0x87d: 0x0b82, 0x87e: 0x0daa, 0x87f: 0x0e6e, + // Block 0x22, offset 0x880 + 0x880: 0x0fbe, 0x881: 0x10c6, 0x882: 0x1372, 0x883: 0x1512, 0x884: 0x171e, 0x885: 0x0dde, + 0x886: 0x159e, 0x887: 0x092e, 0x888: 0x0e2a, 0x889: 0x0e36, 0x88a: 0x0f0a, 0x88b: 0x0f42, + 0x88c: 0x1046, 0x88d: 0x10a2, 0x88e: 0x1122, 0x88f: 0x1206, 0x890: 0x1636, 0x891: 0x08aa, + 0x892: 0x0cfe, 0x893: 0x15ae, 0x894: 0x0862, 0x895: 0x0ba6, 0x896: 0x0f2a, 0x897: 0x14da, + 0x898: 0x0c62, 0x899: 0x0cb2, 0x89a: 0x0e3e, 0x89b: 0x102a, 0x89c: 0x15b6, 0x89d: 0x0912, + 0x89e: 0x09fa, 0x89f: 0x0b92, 0x8a0: 0x0dce, 0x8a1: 0x0e1a, 0x8a2: 0x0e5a, 0x8a3: 0x0eee, + 0x8a4: 0x1042, 0x8a5: 0x10b6, 0x8a6: 0x1252, 0x8a7: 0x13f2, 0x8a8: 0x13fe, 0x8a9: 0x1552, + 0x8aa: 0x15d2, 0x8ab: 0x097e, 0x8ac: 0x0f46, 0x8ad: 0x09fe, 0x8ae: 0x0fc2, 0x8af: 0x1066, + 0x8b0: 0x1382, 0x8b1: 0x15ba, 0x8b2: 0x16a6, 0x8b3: 0x16ce, 0x8b4: 0x0e32, 0x8b5: 0x0f22, + 0x8b6: 0x12be, 0x8b7: 0x11b2, 0x8b8: 0x11be, 0x8b9: 0x11e2, 0x8ba: 0x1012, 0x8bb: 0x0f9a, + 0x8bc: 0x145e, 0x8bd: 0x082e, 0x8be: 0x1326, 0x8bf: 0x0916, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0906, 0x8c1: 0x0c06, 0x8c2: 0x0d26, 0x8c3: 0x11ee, 0x8c4: 0x0b4e, 0x8c5: 0x0efe, + 0x8c6: 0x0dea, 0x8c7: 0x14e2, 0x8c8: 0x13e2, 0x8c9: 0x15a6, 0x8ca: 0x141e, 0x8cb: 0x0c22, + 0x8cc: 0x0882, 0x8cd: 0x0a56, 0x8d0: 0x0aaa, + 0x8d2: 0x0dda, 0x8d5: 0x08f2, 0x8d6: 0x101a, 0x8d7: 0x10de, + 0x8d8: 0x1142, 0x8d9: 0x115e, 0x8da: 0x1162, 0x8db: 0x1176, 0x8dc: 0x15f6, 0x8dd: 0x11e6, + 0x8de: 0x126a, 0x8e0: 0x138a, 0x8e2: 0x144e, + 0x8e5: 0x1502, 0x8e6: 0x152e, + 0x8ea: 0x164a, 0x8eb: 0x164e, 0x8ec: 0x1652, 0x8ed: 0x16b6, 0x8ee: 0x1526, 0x8ef: 0x15c2, + 0x8f0: 0x0852, 0x8f1: 0x0876, 0x8f2: 0x088a, 0x8f3: 0x0946, 0x8f4: 0x0952, 0x8f5: 0x0992, + 0x8f6: 0x0a46, 0x8f7: 0x0a62, 0x8f8: 0x0a6a, 0x8f9: 0x0aa6, 0x8fa: 0x0ab2, 0x8fb: 0x0b8e, + 0x8fc: 0x0b96, 0x8fd: 0x0c9e, 0x8fe: 0x0cc6, 0x8ff: 0x0cce, + // Block 0x24, offset 0x900 + 0x900: 0x0ce6, 0x901: 0x0d92, 0x902: 0x0dc2, 0x903: 0x0de2, 0x904: 0x0e52, 0x905: 0x0f16, + 0x906: 0x0f32, 0x907: 0x0f62, 0x908: 0x0fb6, 0x909: 0x0fd6, 0x90a: 0x104a, 0x90b: 0x112a, + 0x90c: 0x1146, 0x90d: 0x114e, 0x90e: 0x114a, 0x90f: 0x1152, 0x910: 0x1156, 0x911: 0x115a, + 0x912: 0x116e, 0x913: 0x1172, 0x914: 0x1196, 0x915: 0x11aa, 0x916: 0x11c6, 0x917: 0x122a, + 0x918: 0x1232, 0x919: 0x123a, 0x91a: 0x124e, 0x91b: 0x1276, 0x91c: 0x12c6, 0x91d: 0x12fa, + 0x91e: 0x12fa, 0x91f: 0x1362, 0x920: 0x140a, 0x921: 0x1422, 0x922: 0x1456, 0x923: 0x145a, + 0x924: 0x149e, 0x925: 0x14a2, 0x926: 0x14fa, 0x927: 0x1502, 0x928: 0x15d6, 0x929: 0x161a, + 0x92a: 0x1632, 0x92b: 0x0c96, 0x92c: 0x184b, 0x92d: 0x12de, + 0x930: 0x07da, 0x931: 0x08de, 0x932: 0x089e, 0x933: 0x0846, 0x934: 0x0886, 0x935: 0x08b2, + 0x936: 0x0942, 0x937: 0x095e, 0x938: 0x0a46, 0x939: 0x0a32, 0x93a: 0x0a42, 0x93b: 0x0a5e, + 0x93c: 0x0aaa, 0x93d: 0x0aba, 0x93e: 0x0afe, 0x93f: 0x0b0a, + // Block 0x25, offset 0x940 + 0x940: 0x0b26, 0x941: 0x0b36, 0x942: 0x0c1e, 0x943: 0x0c26, 0x944: 0x0c56, 0x945: 0x0c76, + 0x946: 0x0ca6, 0x947: 0x0cbe, 0x948: 0x0cae, 0x949: 0x0cce, 0x94a: 0x0cc2, 0x94b: 0x0ce6, + 0x94c: 0x0d02, 0x94d: 0x0d5a, 0x94e: 0x0d66, 0x94f: 0x0d6e, 0x950: 0x0d96, 0x951: 0x0dda, + 0x952: 0x0e0a, 0x953: 0x0e0e, 0x954: 0x0e22, 0x955: 0x0ea2, 0x956: 0x0eb2, 0x957: 0x0f0a, + 0x958: 0x0f56, 0x959: 0x0f4e, 0x95a: 0x0f62, 0x95b: 0x0f7e, 0x95c: 0x0fb6, 0x95d: 0x110e, + 0x95e: 0x0fda, 0x95f: 0x100e, 0x960: 0x101a, 0x961: 0x105a, 0x962: 0x1076, 0x963: 0x109a, + 0x964: 0x10be, 0x965: 0x10c2, 0x966: 0x10de, 0x967: 0x10e2, 0x968: 0x10f2, 0x969: 0x1106, + 0x96a: 0x1102, 0x96b: 0x1132, 0x96c: 0x11ae, 0x96d: 0x11c6, 0x96e: 0x11de, 0x96f: 0x1216, + 0x970: 0x122a, 0x971: 0x1246, 0x972: 0x1276, 0x973: 0x132a, 0x974: 0x1352, 0x975: 0x13c6, + 0x976: 0x140e, 0x977: 0x141a, 0x978: 0x1422, 0x979: 0x143a, 0x97a: 0x144e, 0x97b: 0x143e, + 0x97c: 0x1456, 0x97d: 0x1452, 0x97e: 0x144a, 0x97f: 0x145a, + // Block 0x26, offset 0x980 + 0x980: 0x1466, 0x981: 0x14a2, 0x982: 0x14de, 0x983: 0x150e, 0x984: 0x1546, 0x985: 0x1566, + 0x986: 0x15b2, 0x987: 0x15d6, 0x988: 0x15f6, 0x989: 0x160a, 0x98a: 0x161a, 0x98b: 0x1626, + 0x98c: 0x1632, 0x98d: 0x1686, 0x98e: 0x1726, 0x98f: 0x17e2, 0x990: 0x17dd, 0x991: 0x180f, + 0x992: 0x0702, 0x993: 0x072a, 0x994: 0x072e, 0x995: 0x1891, 0x996: 0x18be, 0x997: 0x1936, + 0x998: 0x1712, 0x999: 0x1722, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x07f6, 0x9c1: 0x07ee, 0x9c2: 0x07fe, 0x9c3: 0x1774, 0x9c4: 0x0842, 0x9c5: 0x0852, + 0x9c6: 0x0856, 0x9c7: 0x085e, 0x9c8: 0x0866, 0x9c9: 0x086a, 0x9ca: 0x0876, 0x9cb: 0x086e, + 0x9cc: 0x06ae, 0x9cd: 0x1788, 0x9ce: 0x088a, 0x9cf: 0x088e, 0x9d0: 0x0892, 0x9d1: 0x08ae, + 0x9d2: 0x1779, 0x9d3: 0x06b2, 0x9d4: 0x089a, 0x9d5: 0x08ba, 0x9d6: 0x1783, 0x9d7: 0x08ca, + 0x9d8: 0x08d2, 0x9d9: 0x0832, 0x9da: 0x08da, 0x9db: 0x08de, 0x9dc: 0x195e, 0x9dd: 0x08fa, + 0x9de: 0x0902, 0x9df: 0x06ba, 0x9e0: 0x091a, 0x9e1: 0x091e, 0x9e2: 0x0926, 0x9e3: 0x092a, + 0x9e4: 0x06be, 0x9e5: 0x0942, 0x9e6: 0x0946, 0x9e7: 0x0952, 0x9e8: 0x095e, 0x9e9: 0x0962, + 0x9ea: 0x0966, 0x9eb: 0x096e, 0x9ec: 0x098e, 0x9ed: 0x0992, 0x9ee: 0x099a, 0x9ef: 0x09aa, + 0x9f0: 0x09b2, 0x9f1: 0x09b6, 0x9f2: 0x09b6, 0x9f3: 0x09b6, 0x9f4: 0x1797, 0x9f5: 0x0f8e, + 0x9f6: 0x09ca, 0x9f7: 0x09d2, 0x9f8: 0x179c, 0x9f9: 0x09de, 0x9fa: 0x09e6, 0x9fb: 0x09ee, + 0x9fc: 0x0a16, 0x9fd: 0x0a02, 0x9fe: 0x0a0e, 0x9ff: 0x0a12, + // Block 0x28, offset 0xa00 + 0xa00: 0x0a1a, 0xa01: 0x0a22, 0xa02: 0x0a26, 0xa03: 0x0a2e, 0xa04: 0x0a36, 0xa05: 0x0a3a, + 0xa06: 0x0a3a, 0xa07: 0x0a42, 0xa08: 0x0a4a, 0xa09: 0x0a4e, 0xa0a: 0x0a5a, 0xa0b: 0x0a7e, + 0xa0c: 0x0a62, 0xa0d: 0x0a82, 0xa0e: 0x0a66, 0xa0f: 0x0a6e, 0xa10: 0x0906, 0xa11: 0x0aca, + 0xa12: 0x0a92, 0xa13: 0x0a96, 0xa14: 0x0a9a, 0xa15: 0x0a8e, 0xa16: 0x0aa2, 0xa17: 0x0a9e, + 0xa18: 0x0ab6, 0xa19: 0x17a1, 0xa1a: 0x0ad2, 0xa1b: 0x0ad6, 0xa1c: 0x0ade, 0xa1d: 0x0aea, + 0xa1e: 0x0af2, 0xa1f: 0x0b0e, 0xa20: 0x17a6, 0xa21: 0x17ab, 0xa22: 0x0b1a, 0xa23: 0x0b1e, + 0xa24: 0x0b22, 0xa25: 0x0b16, 0xa26: 0x0b2a, 0xa27: 0x06c2, 0xa28: 0x06c6, 0xa29: 0x0b32, + 0xa2a: 0x0b3a, 0xa2b: 0x0b3a, 0xa2c: 0x17b0, 0xa2d: 0x0b56, 0xa2e: 0x0b5a, 0xa2f: 0x0b5e, + 0xa30: 0x0b66, 0xa31: 0x17b5, 0xa32: 0x0b6e, 0xa33: 0x0b72, 0xa34: 0x0c4a, 0xa35: 0x0b7a, + 0xa36: 0x06ca, 0xa37: 0x0b86, 0xa38: 0x0b96, 0xa39: 0x0ba2, 0xa3a: 0x0b9e, 0xa3b: 0x17bf, + 0xa3c: 0x0baa, 0xa3d: 0x17c4, 0xa3e: 0x0bb6, 0xa3f: 0x0bb2, + // Block 0x29, offset 0xa40 + 0xa40: 0x0bba, 0xa41: 0x0bca, 0xa42: 0x0bce, 0xa43: 0x06ce, 0xa44: 0x0bde, 0xa45: 0x0be6, + 0xa46: 0x0bea, 0xa47: 0x0bee, 0xa48: 0x06d2, 0xa49: 0x17c9, 0xa4a: 0x06d6, 0xa4b: 0x0c0a, + 0xa4c: 0x0c0e, 0xa4d: 0x0c12, 0xa4e: 0x0c1a, 0xa4f: 0x1990, 0xa50: 0x0c32, 0xa51: 0x17d3, + 0xa52: 0x17d3, 0xa53: 0x12d2, 0xa54: 0x0c42, 0xa55: 0x0c42, 0xa56: 0x06da, 0xa57: 0x17f6, + 0xa58: 0x18c8, 0xa59: 0x0c52, 0xa5a: 0x0c5a, 0xa5b: 0x06de, 0xa5c: 0x0c6e, 0xa5d: 0x0c7e, + 0xa5e: 0x0c82, 0xa5f: 0x0c8a, 0xa60: 0x0c9a, 0xa61: 0x06e6, 0xa62: 0x06e2, 0xa63: 0x0c9e, + 0xa64: 0x17d8, 0xa65: 0x0ca2, 0xa66: 0x0cb6, 0xa67: 0x0cba, 0xa68: 0x0cbe, 0xa69: 0x0cba, + 0xa6a: 0x0cca, 0xa6b: 0x0cce, 0xa6c: 0x0cde, 0xa6d: 0x0cd6, 0xa6e: 0x0cda, 0xa6f: 0x0ce2, + 0xa70: 0x0ce6, 0xa71: 0x0cea, 0xa72: 0x0cf6, 0xa73: 0x0cfa, 0xa74: 0x0d12, 0xa75: 0x0d1a, + 0xa76: 0x0d2a, 0xa77: 0x0d3e, 0xa78: 0x17e7, 0xa79: 0x0d3a, 0xa7a: 0x0d2e, 0xa7b: 0x0d46, + 0xa7c: 0x0d4e, 0xa7d: 0x0d62, 0xa7e: 0x17ec, 0xa7f: 0x0d6a, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0d5e, 0xa81: 0x0d56, 0xa82: 0x06ea, 0xa83: 0x0d72, 0xa84: 0x0d7a, 0xa85: 0x0d82, + 0xa86: 0x0d76, 0xa87: 0x06ee, 0xa88: 0x0d92, 0xa89: 0x0d9a, 0xa8a: 0x17f1, 0xa8b: 0x0dc6, + 0xa8c: 0x0dfa, 0xa8d: 0x0dd6, 0xa8e: 0x06fa, 0xa8f: 0x0de2, 0xa90: 0x06f6, 0xa91: 0x06f2, + 0xa92: 0x08be, 0xa93: 0x08c2, 0xa94: 0x0dfe, 0xa95: 0x0de6, 0xa96: 0x12a6, 0xa97: 0x075e, + 0xa98: 0x0e0a, 0xa99: 0x0e0e, 0xa9a: 0x0e12, 0xa9b: 0x0e26, 0xa9c: 0x0e1e, 0xa9d: 0x180a, + 0xa9e: 0x06fe, 0xa9f: 0x0e3a, 0xaa0: 0x0e2e, 0xaa1: 0x0e4a, 0xaa2: 0x0e52, 0xaa3: 0x1814, + 0xaa4: 0x0e56, 0xaa5: 0x0e42, 0xaa6: 0x0e5e, 0xaa7: 0x0702, 0xaa8: 0x0e62, 0xaa9: 0x0e66, + 0xaaa: 0x0e6a, 0xaab: 0x0e76, 0xaac: 0x1819, 0xaad: 0x0e7e, 0xaae: 0x0706, 0xaaf: 0x0e8a, + 0xab0: 0x181e, 0xab1: 0x0e8e, 0xab2: 0x070a, 0xab3: 0x0e9a, 0xab4: 0x0ea6, 0xab5: 0x0eb2, + 0xab6: 0x0eb6, 0xab7: 0x1823, 0xab8: 0x17ba, 0xab9: 0x1828, 0xaba: 0x0ed6, 0xabb: 0x182d, + 0xabc: 0x0ee2, 0xabd: 0x0eea, 0xabe: 0x0eda, 0xabf: 0x0ef6, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0f06, 0xac1: 0x0f16, 0xac2: 0x0f0a, 0xac3: 0x0f0e, 0xac4: 0x0f1a, 0xac5: 0x0f1e, + 0xac6: 0x1832, 0xac7: 0x0f02, 0xac8: 0x0f36, 0xac9: 0x0f3a, 0xaca: 0x070e, 0xacb: 0x0f4e, + 0xacc: 0x0f4a, 0xacd: 0x1837, 0xace: 0x0f2e, 0xacf: 0x0f6a, 0xad0: 0x183c, 0xad1: 0x1841, + 0xad2: 0x0f6e, 0xad3: 0x0f82, 0xad4: 0x0f7e, 0xad5: 0x0f7a, 0xad6: 0x0712, 0xad7: 0x0f86, + 0xad8: 0x0f96, 0xad9: 0x0f92, 0xada: 0x0f9e, 0xadb: 0x177e, 0xadc: 0x0fae, 0xadd: 0x1846, + 0xade: 0x0fba, 0xadf: 0x1850, 0xae0: 0x0fce, 0xae1: 0x0fda, 0xae2: 0x0fee, 0xae3: 0x1855, + 0xae4: 0x1002, 0xae5: 0x1006, 0xae6: 0x185a, 0xae7: 0x185f, 0xae8: 0x1022, 0xae9: 0x1032, + 0xaea: 0x0716, 0xaeb: 0x1036, 0xaec: 0x071a, 0xaed: 0x071a, 0xaee: 0x104e, 0xaef: 0x1052, + 0xaf0: 0x105a, 0xaf1: 0x105e, 0xaf2: 0x106a, 0xaf3: 0x071e, 0xaf4: 0x1082, 0xaf5: 0x1864, + 0xaf6: 0x109e, 0xaf7: 0x1869, 0xaf8: 0x10aa, 0xaf9: 0x17ce, 0xafa: 0x10ba, 0xafb: 0x186e, + 0xafc: 0x1873, 0xafd: 0x1878, 0xafe: 0x0722, 0xaff: 0x0726, + // Block 0x2c, offset 0xb00 + 0xb00: 0x10f2, 0xb01: 0x1882, 0xb02: 0x187d, 0xb03: 0x1887, 0xb04: 0x188c, 0xb05: 0x10fa, + 0xb06: 0x10fe, 0xb07: 0x10fe, 0xb08: 0x1106, 0xb09: 0x072e, 0xb0a: 0x110a, 0xb0b: 0x0732, + 0xb0c: 0x0736, 0xb0d: 0x1896, 0xb0e: 0x111e, 0xb0f: 0x1126, 0xb10: 0x1132, 0xb11: 0x073a, + 0xb12: 0x189b, 0xb13: 0x1156, 0xb14: 0x18a0, 0xb15: 0x18a5, 0xb16: 0x1176, 0xb17: 0x118e, + 0xb18: 0x073e, 0xb19: 0x1196, 0xb1a: 0x119a, 0xb1b: 0x119e, 0xb1c: 0x18aa, 0xb1d: 0x18af, + 0xb1e: 0x18af, 0xb1f: 0x11b6, 0xb20: 0x0742, 0xb21: 0x18b4, 0xb22: 0x11ca, 0xb23: 0x11ce, + 0xb24: 0x0746, 0xb25: 0x18b9, 0xb26: 0x11ea, 0xb27: 0x074a, 0xb28: 0x11fa, 0xb29: 0x11f2, + 0xb2a: 0x1202, 0xb2b: 0x18c3, 0xb2c: 0x121a, 0xb2d: 0x074e, 0xb2e: 0x1226, 0xb2f: 0x122e, + 0xb30: 0x123e, 0xb31: 0x0752, 0xb32: 0x18cd, 0xb33: 0x18d2, 0xb34: 0x0756, 0xb35: 0x18d7, + 0xb36: 0x1256, 0xb37: 0x18dc, 0xb38: 0x1262, 0xb39: 0x126e, 0xb3a: 0x1276, 0xb3b: 0x18e1, + 0xb3c: 0x18e6, 0xb3d: 0x128a, 0xb3e: 0x18eb, 0xb3f: 0x1292, + // Block 0x2d, offset 0xb40 + 0xb40: 0x17fb, 0xb41: 0x075a, 0xb42: 0x12aa, 0xb43: 0x12ae, 0xb44: 0x0762, 0xb45: 0x12b2, + 0xb46: 0x0b2e, 0xb47: 0x18f0, 0xb48: 0x18f5, 0xb49: 0x1800, 0xb4a: 0x1805, 0xb4b: 0x12d2, + 0xb4c: 0x12d6, 0xb4d: 0x14ee, 0xb4e: 0x0766, 0xb4f: 0x1302, 0xb50: 0x12fe, 0xb51: 0x1306, + 0xb52: 0x093a, 0xb53: 0x130a, 0xb54: 0x130e, 0xb55: 0x1312, 0xb56: 0x131a, 0xb57: 0x18fa, + 0xb58: 0x1316, 0xb59: 0x131e, 0xb5a: 0x1332, 0xb5b: 0x1336, 0xb5c: 0x1322, 0xb5d: 0x133a, + 0xb5e: 0x134e, 0xb5f: 0x1362, 0xb60: 0x132e, 0xb61: 0x1342, 0xb62: 0x1346, 0xb63: 0x134a, + 0xb64: 0x18ff, 0xb65: 0x1909, 0xb66: 0x1904, 0xb67: 0x076a, 0xb68: 0x136a, 0xb69: 0x136e, + 0xb6a: 0x1376, 0xb6b: 0x191d, 0xb6c: 0x137a, 0xb6d: 0x190e, 0xb6e: 0x076e, 0xb6f: 0x0772, + 0xb70: 0x1913, 0xb71: 0x1918, 0xb72: 0x0776, 0xb73: 0x139a, 0xb74: 0x139e, 0xb75: 0x13a2, + 0xb76: 0x13a6, 0xb77: 0x13b2, 0xb78: 0x13ae, 0xb79: 0x13ba, 0xb7a: 0x13b6, 0xb7b: 0x13c6, + 0xb7c: 0x13be, 0xb7d: 0x13c2, 0xb7e: 0x13ca, 0xb7f: 0x077a, + // Block 0x2e, offset 0xb80 + 0xb80: 0x13d2, 0xb81: 0x13d6, 0xb82: 0x077e, 0xb83: 0x13e6, 0xb84: 0x13ea, 0xb85: 0x1922, + 0xb86: 0x13f6, 0xb87: 0x13fa, 0xb88: 0x0782, 0xb89: 0x1406, 0xb8a: 0x06b6, 0xb8b: 0x1927, + 0xb8c: 0x192c, 0xb8d: 0x0786, 0xb8e: 0x078a, 0xb8f: 0x1432, 0xb90: 0x144a, 0xb91: 0x1466, + 0xb92: 0x1476, 0xb93: 0x1931, 0xb94: 0x148a, 0xb95: 0x148e, 0xb96: 0x14a6, 0xb97: 0x14b2, + 0xb98: 0x193b, 0xb99: 0x178d, 0xb9a: 0x14be, 0xb9b: 0x14ba, 0xb9c: 0x14c6, 0xb9d: 0x1792, + 0xb9e: 0x14d2, 0xb9f: 0x14de, 0xba0: 0x1940, 0xba1: 0x1945, 0xba2: 0x151e, 0xba3: 0x152a, + 0xba4: 0x1532, 0xba5: 0x194a, 0xba6: 0x1536, 0xba7: 0x1562, 0xba8: 0x156e, 0xba9: 0x1572, + 0xbaa: 0x156a, 0xbab: 0x157e, 0xbac: 0x1582, 0xbad: 0x194f, 0xbae: 0x158e, 0xbaf: 0x078e, + 0xbb0: 0x1596, 0xbb1: 0x1954, 0xbb2: 0x0792, 0xbb3: 0x15ce, 0xbb4: 0x0bbe, 0xbb5: 0x15e6, + 0xbb6: 0x1959, 0xbb7: 0x1963, 0xbb8: 0x0796, 0xbb9: 0x079a, 0xbba: 0x160e, 0xbbb: 0x1968, + 0xbbc: 0x079e, 0xbbd: 0x196d, 0xbbe: 0x1626, 0xbbf: 0x1626, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x162e, 0xbc1: 0x1972, 0xbc2: 0x1646, 0xbc3: 0x07a2, 0xbc4: 0x1656, 0xbc5: 0x1662, + 0xbc6: 0x166a, 0xbc7: 0x1672, 0xbc8: 0x07a6, 0xbc9: 0x1977, 0xbca: 0x1686, 0xbcb: 0x16a2, + 0xbcc: 0x16ae, 0xbcd: 0x07aa, 0xbce: 0x07ae, 0xbcf: 0x16b2, 0xbd0: 0x197c, 0xbd1: 0x07b2, + 0xbd2: 0x1981, 0xbd3: 0x1986, 0xbd4: 0x198b, 0xbd5: 0x16d6, 0xbd6: 0x07b6, 0xbd7: 0x16ea, + 0xbd8: 0x16f2, 0xbd9: 0x16f6, 0xbda: 0x16fe, 0xbdb: 0x1706, 0xbdc: 0x170e, 0xbdd: 0x1995, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x122: 0x3d, 0x123: 0x0d, 0x124: 0x3e, 0x125: 0x3f, 0x126: 0x40, 0x127: 0x41, + 0x128: 0x42, 0x129: 0x43, 0x12a: 0x44, 0x12b: 0x45, 0x12c: 0x40, 0x12d: 0x46, 0x12e: 0x47, 0x12f: 0x48, + 0x130: 0x44, 0x131: 0x49, 0x132: 0x4a, 0x133: 0x4b, 0x134: 0x4c, 0x135: 0x4d, 0x137: 0x4e, + 0x138: 0x4f, 0x139: 0x50, 0x13a: 0x51, 0x13b: 0x52, 0x13c: 0x53, 0x13d: 0x54, 0x13e: 0x55, 0x13f: 0x56, + // Block 0x5, offset 0x140 + 0x140: 0x57, 0x142: 0x58, 0x144: 0x59, 0x145: 0x5a, 0x146: 0x5b, 0x147: 0x5c, + 0x14d: 0x5d, + 0x15c: 0x5e, 0x15f: 0x5f, + 0x162: 0x60, 0x164: 0x61, + 0x168: 0x62, 0x169: 0x63, 0x16a: 0x64, 0x16b: 0x65, 0x16c: 0x0e, 0x16d: 0x66, 0x16e: 0x67, 0x16f: 0x68, + 0x170: 0x69, 0x173: 0x6a, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x6b, 0x183: 0x6c, 0x184: 0x6d, 0x186: 0x6e, 0x187: 0x6f, + 0x188: 0x70, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x71, 0x18c: 0x72, + 0x1ab: 0x73, + 0x1b3: 0x74, 0x1b5: 0x75, 0x1b7: 0x76, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x77, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x78, 0x1c5: 0x79, + 0x1c9: 0x7a, 0x1cc: 0x7b, 0x1cd: 0x7c, + // Block 0x8, offset 0x200 + 0x219: 0x7d, 0x21a: 0x7e, 0x21b: 0x7f, + 0x220: 0x80, 0x223: 0x81, 0x224: 0x82, 0x225: 0x83, 0x226: 0x84, 0x227: 0x85, + 0x22a: 0x86, 0x22b: 0x87, 0x22f: 0x88, + 0x230: 0x89, 0x231: 0x8a, 0x232: 0x8b, 0x233: 0x8c, 0x234: 0x8d, 0x235: 0x8e, 0x236: 0x8f, 0x237: 0x89, + 0x238: 0x8a, 0x239: 0x8b, 0x23a: 0x8c, 0x23b: 0x8d, 0x23c: 0x8e, 0x23d: 0x8f, 0x23e: 0x89, 0x23f: 0x8a, + // Block 0x9, offset 0x240 + 0x240: 0x8b, 0x241: 0x8c, 0x242: 0x8d, 0x243: 0x8e, 0x244: 0x8f, 0x245: 0x89, 0x246: 0x8a, 0x247: 0x8b, + 0x248: 0x8c, 0x249: 0x8d, 0x24a: 0x8e, 0x24b: 0x8f, 0x24c: 0x89, 0x24d: 0x8a, 0x24e: 0x8b, 0x24f: 0x8c, + 0x250: 0x8d, 0x251: 0x8e, 0x252: 0x8f, 0x253: 0x89, 0x254: 0x8a, 0x255: 0x8b, 0x256: 0x8c, 0x257: 0x8d, + 0x258: 0x8e, 0x259: 0x8f, 0x25a: 0x89, 0x25b: 0x8a, 0x25c: 0x8b, 0x25d: 0x8c, 0x25e: 0x8d, 0x25f: 0x8e, + 0x260: 0x8f, 0x261: 0x89, 0x262: 0x8a, 0x263: 0x8b, 0x264: 0x8c, 0x265: 0x8d, 0x266: 0x8e, 0x267: 0x8f, + 0x268: 0x89, 0x269: 0x8a, 0x26a: 0x8b, 0x26b: 0x8c, 0x26c: 0x8d, 0x26d: 0x8e, 0x26e: 0x8f, 0x26f: 0x89, + 0x270: 0x8a, 0x271: 0x8b, 0x272: 0x8c, 0x273: 0x8d, 0x274: 0x8e, 0x275: 0x8f, 0x276: 0x89, 0x277: 0x8a, + 0x278: 0x8b, 0x279: 0x8c, 0x27a: 0x8d, 0x27b: 0x8e, 0x27c: 0x8f, 0x27d: 0x89, 0x27e: 0x8a, 0x27f: 0x8b, + // Block 0xa, offset 0x280 + 0x280: 0x8c, 0x281: 0x8d, 0x282: 0x8e, 0x283: 0x8f, 0x284: 0x89, 0x285: 0x8a, 0x286: 0x8b, 0x287: 0x8c, + 0x288: 0x8d, 0x289: 0x8e, 0x28a: 0x8f, 0x28b: 0x89, 0x28c: 0x8a, 0x28d: 0x8b, 0x28e: 0x8c, 0x28f: 0x8d, + 0x290: 0x8e, 0x291: 0x8f, 0x292: 0x89, 0x293: 0x8a, 0x294: 0x8b, 0x295: 0x8c, 0x296: 0x8d, 0x297: 0x8e, + 0x298: 0x8f, 0x299: 0x89, 0x29a: 0x8a, 0x29b: 0x8b, 0x29c: 0x8c, 0x29d: 0x8d, 0x29e: 0x8e, 0x29f: 0x8f, + 0x2a0: 0x89, 0x2a1: 0x8a, 0x2a2: 0x8b, 0x2a3: 0x8c, 0x2a4: 0x8d, 0x2a5: 0x8e, 0x2a6: 0x8f, 0x2a7: 0x89, + 0x2a8: 0x8a, 0x2a9: 0x8b, 0x2aa: 0x8c, 0x2ab: 0x8d, 0x2ac: 0x8e, 0x2ad: 0x8f, 0x2ae: 0x89, 0x2af: 0x8a, + 0x2b0: 0x8b, 0x2b1: 0x8c, 0x2b2: 0x8d, 0x2b3: 0x8e, 0x2b4: 0x8f, 0x2b5: 0x89, 0x2b6: 0x8a, 0x2b7: 0x8b, + 0x2b8: 0x8c, 0x2b9: 0x8d, 0x2ba: 0x8e, 0x2bb: 0x8f, 0x2bc: 0x89, 0x2bd: 0x8a, 0x2be: 0x8b, 0x2bf: 0x8c, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8d, 0x2c1: 0x8e, 0x2c2: 0x8f, 0x2c3: 0x89, 0x2c4: 0x8a, 0x2c5: 0x8b, 0x2c6: 0x8c, 0x2c7: 0x8d, + 0x2c8: 0x8e, 0x2c9: 0x8f, 0x2ca: 0x89, 0x2cb: 0x8a, 0x2cc: 0x8b, 0x2cd: 0x8c, 0x2ce: 0x8d, 0x2cf: 0x8e, + 0x2d0: 0x8f, 0x2d1: 0x89, 0x2d2: 0x8a, 0x2d3: 0x8b, 0x2d4: 0x8c, 0x2d5: 0x8d, 0x2d6: 0x8e, 0x2d7: 0x8f, + 0x2d8: 0x89, 0x2d9: 0x8a, 0x2da: 0x8b, 0x2db: 0x8c, 0x2dc: 0x8d, 0x2dd: 0x8e, 0x2de: 0x90, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x91, 0x32d: 0x92, 0x32e: 0x93, + 0x331: 0x94, 0x332: 0x95, 0x333: 0x96, 0x334: 0x97, + 0x338: 0x98, 0x339: 0x99, 0x33a: 0x9a, 0x33b: 0x9b, 0x33e: 0x9c, 0x33f: 0x9d, + // Block 0xd, offset 0x340 + 0x347: 0x9e, + 0x34b: 0x9f, 0x34d: 0xa0, + 0x368: 0xa1, 0x36b: 0xa2, + 0x374: 0xa3, + 0x37a: 0xa4, 0x37b: 0xa5, 0x37d: 0xa6, 0x37e: 0xa7, + // Block 0xe, offset 0x380 + 0x381: 0xa8, 0x382: 0xa9, 0x384: 0xaa, 0x385: 0x84, 0x387: 0xab, + 0x388: 0xac, 0x38b: 0xad, 0x38c: 0xae, 0x38d: 0xaf, + 0x391: 0xb0, 0x392: 0xb1, 0x393: 0xb2, 0x396: 0xb3, 0x397: 0xb4, + 0x398: 0x75, 0x39a: 0xb5, 0x39c: 0xb6, + 0x3a0: 0xb7, 0x3a4: 0xb8, 0x3a5: 0xb9, 0x3a7: 0xba, + 0x3a8: 0xbb, 0x3a9: 0xbc, 0x3aa: 0xbd, + 0x3b0: 0x75, 0x3b5: 0xbe, 0x3b6: 0xbf, + 0x3bd: 0xc0, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xc1, 0x3ec: 0xc2, + 0x3ff: 0xc3, + // Block 0x10, offset 0x400 + 0x432: 0xc4, + // Block 0x11, offset 0x440 + 0x445: 0xc5, 0x446: 0xc6, 0x447: 0xc7, + 0x449: 0xc8, + // Block 0x12, offset 0x480 + 0x480: 0xc9, 0x482: 0xca, 0x484: 0xc2, + 0x48a: 0xcb, 0x48b: 0xcc, + 0x493: 0xcd, + 0x4a3: 0xce, 0x4a5: 0xcf, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xd0, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 163 entries, 326 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x6e, 0x76, 0x7d, 0x80, 0x88, 0x8c, 0x90, 0x92, 0x94, 0x9d, 0xa1, 0xa8, 0xad, 0xb0, 0xba, 0xbd, 0xc4, 0xcc, 0xcf, 0xd1, 0xd4, 0xd6, 0xdb, 0xec, 0xf8, 0xfa, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10a, 0x10c, 0x10f, 0x112, 0x114, 0x117, 0x11a, 0x11e, 0x124, 0x12b, 0x134, 0x136, 0x139, 0x13b, 0x146, 0x14a, 0x158, 0x15b, 0x161, 0x167, 0x172, 0x176, 0x178, 0x17a, 0x17c, 0x17e, 0x180, 0x186, 0x18a, 0x18c, 0x18e, 0x196, 0x19a, 0x19d, 0x19f, 0x1a1, 0x1a4, 0x1a7, 0x1a9, 0x1ab, 0x1ad, 0x1af, 0x1b5, 0x1b8, 0x1ba, 0x1c1, 0x1c7, 0x1cd, 0x1d5, 0x1db, 0x1e1, 0x1e7, 0x1eb, 0x1f9, 0x202, 0x205, 0x208, 0x20a, 0x20d, 0x20f, 0x213, 0x218, 0x21a, 0x21c, 0x221, 0x227, 0x229, 0x22b, 0x22d, 0x233, 0x236, 0x238, 0x23a, 0x23c, 0x242, 0x246, 0x24a, 0x252, 0x259, 0x25c, 0x25f, 0x261, 0x264, 0x26c, 0x270, 0x277, 0x27a, 0x280, 0x282, 0x285, 0x287, 0x28a, 0x28f, 0x291, 0x293, 0x295, 0x297, 0x299, 0x29c, 0x29e, 0x2a0, 0x2a2, 0x2a4, 0x2a6, 0x2a8, 0x2b5, 0x2bf, 0x2c1, 0x2c3, 0x2c9, 0x2cb, 0x2cd, 0x2cf, 0x2d3, 0x2d5, 0x2d8} + +// nfcSparseValues: 730 entries, 2920 bytes +var nfcSparseValues = [730]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x4823, lo: 0xa0, hi: 0xa1}, + {value: 0x4855, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4981, lo: 0x8a, hi: 0x8a}, + {value: 0x499f, lo: 0x8b, hi: 0x8b}, + {value: 0x3808, lo: 0x8c, hi: 0x8c}, + {value: 0x3820, lo: 0x8d, hi: 0x8d}, + {value: 0x49b7, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x383e, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x38e6, lo: 0x90, hi: 0x90}, + {value: 0x38f2, lo: 0x91, hi: 0x91}, + {value: 0x38e0, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3958, lo: 0x97, hi: 0x97}, + {value: 0x3922, lo: 0x9c, hi: 0x9c}, + {value: 0x390a, lo: 0x9d, hi: 0x9d}, + {value: 0x3934, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x395e, lo: 0xb6, hi: 0xb6}, + {value: 0x3964, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3982, lo: 0xa2, hi: 0xa2}, + {value: 0x3988, lo: 0xa3, hi: 0xa3}, + {value: 0x3994, lo: 0xa4, hi: 0xa4}, + {value: 0x398e, lo: 0xa5, hi: 0xa5}, + {value: 0x399a, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x39ac, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x39a0, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x39a6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0x98, hi: 0x98}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + {value: 0x8133, lo: 0x9c, hi: 0x9f}, + // Block 0x10, offset 0x6e + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x4019, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x4021, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x4029, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x11, offset 0x76 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x465d, lo: 0x98, hi: 0x9f}, + // Block 0x12, offset 0x7d + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x80 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2dd5, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x469d, lo: 0x9c, hi: 0x9d}, + {value: 0x46ad, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x14, offset 0x88 + {value: 0x0000, lo: 0x03}, + {value: 0x46d5, lo: 0xb3, hi: 0xb3}, + {value: 0x46dd, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x8c + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x46b5, lo: 0x99, hi: 0x9b}, + {value: 0x46cd, lo: 0x9e, hi: 0x9e}, + // Block 0x16, offset 0x90 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x17, offset 0x92 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x18, offset 0x94 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ded, lo: 0x88, hi: 0x88}, + {value: 0x2de5, lo: 0x8b, hi: 0x8b}, + {value: 0x2df5, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x46e5, lo: 0x9c, hi: 0x9c}, + {value: 0x46ed, lo: 0x9d, hi: 0x9d}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2dfd, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1a, offset 0xa1 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e05, lo: 0x8a, hi: 0x8a}, + {value: 0x2e15, lo: 0x8b, hi: 0x8b}, + {value: 0x2e0d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1b, offset 0xa8 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x4031, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xad + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1d, offset 0xb0 + {value: 0x0000, lo: 0x09}, + {value: 0x2e1d, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2e25, lo: 0x87, hi: 0x87}, + {value: 0x2e2d, lo: 0x88, hi: 0x88}, + {value: 0x3091, lo: 0x8a, hi: 0x8a}, + {value: 0x2f19, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1e, offset 0xba + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1f, offset 0xbd + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e35, lo: 0x8a, hi: 0x8a}, + {value: 0x2e45, lo: 0x8b, hi: 0x8b}, + {value: 0x2e3d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x20, offset 0xc4 + {value: 0x6ab3, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4039, lo: 0x9a, hi: 0x9a}, + {value: 0x3099, lo: 0x9c, hi: 0x9c}, + {value: 0x2f24, lo: 0x9d, hi: 0x9d}, + {value: 0x2e4d, lo: 0x9e, hi: 0x9f}, + // Block 0x21, offset 0xcc + {value: 0x0000, lo: 0x02}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xcf + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x23, offset 0xd1 + {value: 0x0000, lo: 0x02}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x24, offset 0xd4 + {value: 0x0000, lo: 0x01}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + // Block 0x25, offset 0xd6 + {value: 0x0000, lo: 0x04}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0xdb + {value: 0x0000, lo: 0x10}, + {value: 0x2774, lo: 0x83, hi: 0x83}, + {value: 0x277b, lo: 0x8d, hi: 0x8d}, + {value: 0x2782, lo: 0x92, hi: 0x92}, + {value: 0x2789, lo: 0x97, hi: 0x97}, + {value: 0x2790, lo: 0x9c, hi: 0x9c}, + {value: 0x276d, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4bc5, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4bce, lo: 0xb5, hi: 0xb5}, + {value: 0x46f5, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x46fd, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x27, offset 0xec + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4bd7, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x279e, lo: 0x93, hi: 0x93}, + {value: 0x27a5, lo: 0x9d, hi: 0x9d}, + {value: 0x27ac, lo: 0xa2, hi: 0xa2}, + {value: 0x27b3, lo: 0xa7, hi: 0xa7}, + {value: 0x27ba, lo: 0xac, hi: 0xac}, + {value: 0x2797, lo: 0xb9, hi: 0xb9}, + // Block 0x28, offset 0xf8 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x29, offset 0xfa + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2e55, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x2a, offset 0x100 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2b, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x104 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x108 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x10a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x10c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x95}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x10f + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x114 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x117 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x11a + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x11e + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x37, offset 0x124 + {value: 0x0000, lo: 0x06}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8a}, + {value: 0x8133, lo: 0x8b, hi: 0x8e}, + // Block 0x38, offset 0x12b + {value: 0x0000, lo: 0x08}, + {value: 0x2e9d, lo: 0x80, hi: 0x80}, + {value: 0x2ea5, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2ead, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x39, offset 0x134 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x3a, offset 0x136 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3b, offset 0x139 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3c, offset 0x13b + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3d, offset 0x146 + {value: 0x0004, lo: 0x03}, + {value: 0x052a, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x14a + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x3f, offset 0x158 + {value: 0x43bc, lo: 0x02}, + {value: 0x023c, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x40, offset 0x15b + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3cfa, lo: 0x9a, hi: 0x9b}, + {value: 0x3d08, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x161 + {value: 0x000e, lo: 0x05}, + {value: 0x3d0f, lo: 0x8d, hi: 0x8e}, + {value: 0x3d16, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x167 + {value: 0x62c7, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3d24, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3d2b, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3d32, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3d39, lo: 0xa4, hi: 0xa5}, + {value: 0x3d40, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x172 + {value: 0x0007, lo: 0x03}, + {value: 0x3da9, lo: 0xa0, hi: 0xa1}, + {value: 0x3dd3, lo: 0xa2, hi: 0xa3}, + {value: 0x3dfd, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x176 + {value: 0x0004, lo: 0x01}, + {value: 0x0586, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x178 + {value: 0x0000, lo: 0x01}, + {value: 0x461e, lo: 0x9c, hi: 0x9c}, + // Block 0x46, offset 0x17a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x47, offset 0x17c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x48, offset 0x17e + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x180 + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xaf}, + // Block 0x4a, offset 0x186 + {value: 0x0000, lo: 0x03}, + {value: 0x4be0, lo: 0xb3, hi: 0xb3}, + {value: 0x4be0, lo: 0xb5, hi: 0xb6}, + {value: 0x4be0, lo: 0xba, hi: 0xbf}, + // Block 0x4b, offset 0x18a + {value: 0x0000, lo: 0x01}, + {value: 0x4be0, lo: 0x8f, hi: 0xa3}, + // Block 0x4c, offset 0x18c + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4d, offset 0x18e + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4e, offset 0x196 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4f, offset 0x19a + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x50, offset 0x19d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x51, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x52, offset 0x1a1 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x53, offset 0x1a4 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x54, offset 0x1a7 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x55, offset 0x1a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x56, offset 0x1ab + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x57, offset 0x1ad + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x58, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x59, offset 0x1b5 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x5a, offset 0x1b8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x5b, offset 0x1ba + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5c, offset 0x1c1 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5d, offset 0x1c7 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5e, offset 0x1cd + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5f, offset 0x1d5 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x60, offset 0x1db + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x61, offset 0x1e1 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x62, offset 0x1e7 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x63, offset 0x1eb + {value: 0x0006, lo: 0x0d}, + {value: 0x44d1, lo: 0x9d, hi: 0x9d}, + {value: 0x8116, lo: 0x9e, hi: 0x9e}, + {value: 0x4543, lo: 0x9f, hi: 0x9f}, + {value: 0x4531, lo: 0xaa, hi: 0xab}, + {value: 0x4635, lo: 0xac, hi: 0xac}, + {value: 0x463d, lo: 0xad, hi: 0xad}, + {value: 0x4489, lo: 0xae, hi: 0xb1}, + {value: 0x44a7, lo: 0xb2, hi: 0xb4}, + {value: 0x44bf, lo: 0xb5, hi: 0xb6}, + {value: 0x44cb, lo: 0xb8, hi: 0xb8}, + {value: 0x44d7, lo: 0xb9, hi: 0xbb}, + {value: 0x44ef, lo: 0xbc, hi: 0xbc}, + {value: 0x44f5, lo: 0xbe, hi: 0xbe}, + // Block 0x64, offset 0x1f9 + {value: 0x0006, lo: 0x08}, + {value: 0x44fb, lo: 0x80, hi: 0x81}, + {value: 0x4507, lo: 0x83, hi: 0x84}, + {value: 0x4519, lo: 0x86, hi: 0x89}, + {value: 0x453d, lo: 0x8a, hi: 0x8a}, + {value: 0x44b9, lo: 0x8b, hi: 0x8b}, + {value: 0x44a1, lo: 0x8c, hi: 0x8c}, + {value: 0x44e9, lo: 0x8d, hi: 0x8d}, + {value: 0x4513, lo: 0x8e, hi: 0x8e}, + // Block 0x65, offset 0x202 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x66, offset 0x205 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x67, offset 0x208 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x68, offset 0x20a + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x69, offset 0x20d + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x6a, offset 0x20f + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0xa0, hi: 0xa6}, + {value: 0x812e, lo: 0xa7, hi: 0xad}, + {value: 0x8133, lo: 0xae, hi: 0xaf}, + // Block 0x6b, offset 0x213 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6c, offset 0x218 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6d, offset 0x21a + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6e, offset 0x21c + {value: 0x0000, lo: 0x04}, + {value: 0x4be0, lo: 0x9e, hi: 0x9f}, + {value: 0x4be0, lo: 0xa3, hi: 0xa3}, + {value: 0x4be0, lo: 0xa5, hi: 0xa6}, + {value: 0x4be0, lo: 0xaa, hi: 0xaf}, + // Block 0x6f, offset 0x221 + {value: 0x0000, lo: 0x05}, + {value: 0x4be0, lo: 0x82, hi: 0x87}, + {value: 0x4be0, lo: 0x8a, hi: 0x8f}, + {value: 0x4be0, lo: 0x92, hi: 0x97}, + {value: 0x4be0, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x70, offset 0x227 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x71, offset 0x229 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x72, offset 0x22b + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x73, offset 0x22d + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x233 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x75, offset 0x236 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x76, offset 0x238 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x77, offset 0x23a + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbf}, + // Block 0x78, offset 0x23c + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x79, offset 0x242 + {value: 0x0005, lo: 0x03}, + {value: 0x8133, lo: 0x82, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + // Block 0x7a, offset 0x246 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xb0, hi: 0xb0}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x24a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4379, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4383, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x438d, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x7c, offset 0x252 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2eb5, lo: 0xae, hi: 0xae}, + {value: 0x2ebf, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x7d, offset 0x259 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7e, offset 0x25c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7f, offset 0x25f + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x80, offset 0x261 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x81, offset 0x264 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ec9, lo: 0x8b, hi: 0x8b}, + {value: 0x2ed3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x82, offset 0x26c + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x83, offset 0x270 + {value: 0x6a23, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2ee7, lo: 0xbb, hi: 0xbb}, + {value: 0x2edd, lo: 0xbc, hi: 0xbd}, + {value: 0x2ef1, lo: 0xbe, hi: 0xbe}, + // Block 0x84, offset 0x277 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x85, offset 0x27a + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2efb, lo: 0xba, hi: 0xba}, + {value: 0x2f05, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x86, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x87, offset 0x282 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x88, offset 0x285 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x89, offset 0x287 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x8a, offset 0x28a + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2f0f, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x8b, offset 0x28f + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x8c, offset 0x291 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8d, offset 0x293 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8e, offset 0x295 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8f, offset 0x297 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x90, offset 0x299 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x91, offset 0x29c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x92, offset 0x29e + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x81, hi: 0x82}, + // Block 0x93, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x94, offset 0x2a2 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x95, offset 0x2a4 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x96, offset 0x2a6 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x97, offset 0x2a8 + {value: 0x0000, lo: 0x0c}, + {value: 0x470d, lo: 0x9e, hi: 0x9e}, + {value: 0x4717, lo: 0x9f, hi: 0x9f}, + {value: 0x474b, lo: 0xa0, hi: 0xa0}, + {value: 0x4759, lo: 0xa1, hi: 0xa1}, + {value: 0x4767, lo: 0xa2, hi: 0xa2}, + {value: 0x4775, lo: 0xa3, hi: 0xa3}, + {value: 0x4783, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x98, offset 0x2b5 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x4721, lo: 0xbb, hi: 0xbb}, + {value: 0x472b, lo: 0xbc, hi: 0xbc}, + {value: 0x4791, lo: 0xbd, hi: 0xbd}, + {value: 0x47ad, lo: 0xbe, hi: 0xbe}, + {value: 0x479f, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x2bf + {value: 0x0000, lo: 0x01}, + {value: 0x47bb, lo: 0x80, hi: 0x80}, + // Block 0x9a, offset 0x2c1 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x9b, offset 0x2c3 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0x9c, offset 0x2c9 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + // Block 0x9d, offset 0x2cb + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xae, hi: 0xae}, + // Block 0x9e, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0x9f, offset 0x2cf + {value: 0x0000, lo: 0x03}, + {value: 0x8134, lo: 0xac, hi: 0xad}, + {value: 0x812e, lo: 0xae, hi: 0xae}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + // Block 0xa0, offset 0x2d3 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xa1, offset 0x2d5 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xa2, offset 0x2d8 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 19260 bytes (18.81 KiB). Checksum: 1a0bbc4c8c24da49. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 95: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 95 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 97 blocks, 6208 entries, 12416 bytes +// The third block is the zero block. +var nfkcValues = [6208]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x30b0, 0xc1: 0x30b5, 0xc2: 0x47c9, 0xc3: 0x30ba, 0xc4: 0x47d8, 0xc5: 0x47dd, + 0xc6: 0xa000, 0xc7: 0x47e7, 0xc8: 0x3123, 0xc9: 0x3128, 0xca: 0x47ec, 0xcb: 0x313c, + 0xcc: 0x31af, 0xcd: 0x31b4, 0xce: 0x31b9, 0xcf: 0x4800, 0xd1: 0x3245, + 0xd2: 0x3268, 0xd3: 0x326d, 0xd4: 0x480a, 0xd5: 0x480f, 0xd6: 0x481e, + 0xd8: 0xa000, 0xd9: 0x32f4, 0xda: 0x32f9, 0xdb: 0x32fe, 0xdc: 0x4850, 0xdd: 0x3376, + 0xe0: 0x33bc, 0xe1: 0x33c1, 0xe2: 0x485a, 0xe3: 0x33c6, + 0xe4: 0x4869, 0xe5: 0x486e, 0xe6: 0xa000, 0xe7: 0x4878, 0xe8: 0x342f, 0xe9: 0x3434, + 0xea: 0x487d, 0xeb: 0x3448, 0xec: 0x34c0, 0xed: 0x34c5, 0xee: 0x34ca, 0xef: 0x4891, + 0xf1: 0x3556, 0xf2: 0x3579, 0xf3: 0x357e, 0xf4: 0x489b, 0xf5: 0x48a0, + 0xf6: 0x48af, 0xf8: 0xa000, 0xf9: 0x360a, 0xfa: 0x360f, 0xfb: 0x3614, + 0xfc: 0x48e1, 0xfd: 0x3691, 0xff: 0x36aa, + // Block 0x4, offset 0x100 + 0x100: 0x30bf, 0x101: 0x33cb, 0x102: 0x47ce, 0x103: 0x485f, 0x104: 0x30dd, 0x105: 0x33e9, + 0x106: 0x30f1, 0x107: 0x33fd, 0x108: 0x30f6, 0x109: 0x3402, 0x10a: 0x30fb, 0x10b: 0x3407, + 0x10c: 0x3100, 0x10d: 0x340c, 0x10e: 0x310a, 0x10f: 0x3416, + 0x112: 0x47f1, 0x113: 0x4882, 0x114: 0x3132, 0x115: 0x343e, 0x116: 0x3137, 0x117: 0x3443, + 0x118: 0x3155, 0x119: 0x3461, 0x11a: 0x3146, 0x11b: 0x3452, 0x11c: 0x316e, 0x11d: 0x347a, + 0x11e: 0x3178, 0x11f: 0x3484, 0x120: 0x317d, 0x121: 0x3489, 0x122: 0x3187, 0x123: 0x3493, + 0x124: 0x318c, 0x125: 0x3498, 0x128: 0x31be, 0x129: 0x34cf, + 0x12a: 0x31c3, 0x12b: 0x34d4, 0x12c: 0x31c8, 0x12d: 0x34d9, 0x12e: 0x31eb, 0x12f: 0x34f7, + 0x130: 0x31cd, 0x132: 0x1a8a, 0x133: 0x1b17, 0x134: 0x31f5, 0x135: 0x3501, + 0x136: 0x3209, 0x137: 0x351a, 0x139: 0x3213, 0x13a: 0x3524, 0x13b: 0x321d, + 0x13c: 0x352e, 0x13d: 0x3218, 0x13e: 0x3529, 0x13f: 0x1cdc, + // Block 0x5, offset 0x140 + 0x140: 0x1d64, 0x143: 0x3240, 0x144: 0x3551, 0x145: 0x3259, + 0x146: 0x356a, 0x147: 0x324f, 0x148: 0x3560, 0x149: 0x1d8c, + 0x14c: 0x4814, 0x14d: 0x48a5, 0x14e: 0x3272, 0x14f: 0x3583, 0x150: 0x327c, 0x151: 0x358d, + 0x154: 0x329a, 0x155: 0x35ab, 0x156: 0x32b3, 0x157: 0x35c4, + 0x158: 0x32a4, 0x159: 0x35b5, 0x15a: 0x4837, 0x15b: 0x48c8, 0x15c: 0x32bd, 0x15d: 0x35ce, + 0x15e: 0x32cc, 0x15f: 0x35dd, 0x160: 0x483c, 0x161: 0x48cd, 0x162: 0x32e5, 0x163: 0x35fb, + 0x164: 0x32d6, 0x165: 0x35ec, 0x168: 0x4846, 0x169: 0x48d7, + 0x16a: 0x484b, 0x16b: 0x48dc, 0x16c: 0x3303, 0x16d: 0x3619, 0x16e: 0x330d, 0x16f: 0x3623, + 0x170: 0x3312, 0x171: 0x3628, 0x172: 0x3330, 0x173: 0x3646, 0x174: 0x3353, 0x175: 0x3669, + 0x176: 0x337b, 0x177: 0x3696, 0x178: 0x338f, 0x179: 0x339e, 0x17a: 0x36be, 0x17b: 0x33a8, + 0x17c: 0x36c8, 0x17d: 0x33ad, 0x17e: 0x36cd, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2f2f, 0x185: 0x2f35, + 0x186: 0x2f3b, 0x187: 0x1a9f, 0x188: 0x1aa2, 0x189: 0x1b38, 0x18a: 0x1ab7, 0x18b: 0x1aba, + 0x18c: 0x1b6e, 0x18d: 0x30c9, 0x18e: 0x33d5, 0x18f: 0x31d7, 0x190: 0x34e3, 0x191: 0x3281, + 0x192: 0x3592, 0x193: 0x3317, 0x194: 0x362d, 0x195: 0x3b10, 0x196: 0x3c9f, 0x197: 0x3b09, + 0x198: 0x3c98, 0x199: 0x3b17, 0x19a: 0x3ca6, 0x19b: 0x3b02, 0x19c: 0x3c91, + 0x19e: 0x39f1, 0x19f: 0x3b80, 0x1a0: 0x39ea, 0x1a1: 0x3b79, 0x1a2: 0x36f4, 0x1a3: 0x3706, + 0x1a6: 0x3182, 0x1a7: 0x348e, 0x1a8: 0x31ff, 0x1a9: 0x3510, + 0x1aa: 0x482d, 0x1ab: 0x48be, 0x1ac: 0x3ad1, 0x1ad: 0x3c60, 0x1ae: 0x3718, 0x1af: 0x371e, + 0x1b0: 0x3506, 0x1b1: 0x1a6f, 0x1b2: 0x1a72, 0x1b3: 0x1aff, 0x1b4: 0x3169, 0x1b5: 0x3475, + 0x1b8: 0x323b, 0x1b9: 0x354c, 0x1ba: 0x39f8, 0x1bb: 0x3b87, + 0x1bc: 0x36ee, 0x1bd: 0x3700, 0x1be: 0x36fa, 0x1bf: 0x370c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x30ce, 0x1c1: 0x33da, 0x1c2: 0x30d3, 0x1c3: 0x33df, 0x1c4: 0x314b, 0x1c5: 0x3457, + 0x1c6: 0x3150, 0x1c7: 0x345c, 0x1c8: 0x31dc, 0x1c9: 0x34e8, 0x1ca: 0x31e1, 0x1cb: 0x34ed, + 0x1cc: 0x3286, 0x1cd: 0x3597, 0x1ce: 0x328b, 0x1cf: 0x359c, 0x1d0: 0x32a9, 0x1d1: 0x35ba, + 0x1d2: 0x32ae, 0x1d3: 0x35bf, 0x1d4: 0x331c, 0x1d5: 0x3632, 0x1d6: 0x3321, 0x1d7: 0x3637, + 0x1d8: 0x32c7, 0x1d9: 0x35d8, 0x1da: 0x32e0, 0x1db: 0x35f6, + 0x1de: 0x319b, 0x1df: 0x34a7, + 0x1e6: 0x47d3, 0x1e7: 0x4864, 0x1e8: 0x47fb, 0x1e9: 0x488c, + 0x1ea: 0x3aa0, 0x1eb: 0x3c2f, 0x1ec: 0x3a7d, 0x1ed: 0x3c0c, 0x1ee: 0x4819, 0x1ef: 0x48aa, + 0x1f0: 0x3a99, 0x1f1: 0x3c28, 0x1f2: 0x3385, 0x1f3: 0x36a0, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x4aef, 0x241: 0x4af4, 0x242: 0x9933, 0x243: 0x4af9, 0x244: 0x4bb2, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x01ee, + 0x27a: 0x43e6, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x439b, 0x285: 0x45bc, + 0x286: 0x372a, 0x287: 0x00ce, 0x288: 0x3748, 0x289: 0x3754, 0x28a: 0x3766, + 0x28c: 0x3784, 0x28e: 0x3796, 0x28f: 0x37b4, 0x290: 0x3f49, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3778, 0x2ab: 0x37a8, 0x2ac: 0x493f, 0x2ad: 0x37d8, 0x2ae: 0x4969, 0x2af: 0x37ea, + 0x2b0: 0x3fb1, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4981, 0x2cb: 0x499f, + 0x2cc: 0x3808, 0x2cd: 0x3820, 0x2ce: 0x49b7, 0x2d0: 0x0242, 0x2d1: 0x0254, + 0x2d2: 0x0230, 0x2d3: 0x444d, 0x2d4: 0x4453, 0x2d5: 0x027e, 0x2d6: 0x026c, + 0x2f0: 0x025a, 0x2f1: 0x026f, 0x2f2: 0x0272, 0x2f4: 0x020c, 0x2f5: 0x024b, + 0x2f9: 0x022a, + // Block 0xc, offset 0x300 + 0x300: 0x3862, 0x301: 0x386e, 0x303: 0x385c, + 0x306: 0xa000, 0x307: 0x384a, + 0x30c: 0x389e, 0x30d: 0x3886, 0x30e: 0x38b0, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3892, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x3916, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3874, 0x342: 0x38f8, + 0x350: 0x3850, 0x351: 0x38d4, + 0x352: 0x3856, 0x353: 0x38da, 0x356: 0x3868, 0x357: 0x38ec, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x396a, 0x35b: 0x3970, 0x35c: 0x387a, 0x35d: 0x38fe, + 0x35e: 0x3880, 0x35f: 0x3904, 0x362: 0x388c, 0x363: 0x3910, + 0x364: 0x3898, 0x365: 0x391c, 0x366: 0x38a4, 0x367: 0x3928, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3976, 0x36b: 0x397c, 0x36c: 0x38ce, 0x36d: 0x3952, 0x36e: 0x38aa, 0x36f: 0x392e, + 0x370: 0x38b6, 0x371: 0x393a, 0x372: 0x38bc, 0x373: 0x3940, 0x374: 0x38c2, 0x375: 0x3946, + 0x378: 0x38c8, 0x379: 0x394c, + // Block 0xe, offset 0x380 + 0x387: 0x1e91, + 0x391: 0x812e, + 0x392: 0x8133, 0x393: 0x8133, 0x394: 0x8133, 0x395: 0x8133, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x812f, 0x39b: 0x812e, 0x39c: 0x8133, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x8133, 0x3a0: 0x8133, 0x3a1: 0x8133, 0x3a2: 0x812e, 0x3a3: 0x812e, + 0x3a4: 0x812e, 0x3a5: 0x812e, 0x3a6: 0x812e, 0x3a7: 0x812e, 0x3a8: 0x8133, 0x3a9: 0x8133, + 0x3aa: 0x812e, 0x3ab: 0x8133, 0x3ac: 0x8133, 0x3ad: 0x812f, 0x3ae: 0x8132, 0x3af: 0x8133, + 0x3b0: 0x8106, 0x3b1: 0x8107, 0x3b2: 0x8108, 0x3b3: 0x8109, 0x3b4: 0x810a, 0x3b5: 0x810b, + 0x3b6: 0x810c, 0x3b7: 0x810d, 0x3b8: 0x810e, 0x3b9: 0x810f, 0x3ba: 0x810f, 0x3bb: 0x8110, + 0x3bc: 0x8111, 0x3bd: 0x8112, 0x3bf: 0x8113, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8117, + 0x3cc: 0x8118, 0x3cd: 0x8119, 0x3ce: 0x811a, 0x3cf: 0x811b, 0x3d0: 0x811c, 0x3d1: 0x811d, + 0x3d2: 0x811e, 0x3d3: 0x9933, 0x3d4: 0x9933, 0x3d5: 0x992e, 0x3d6: 0x812e, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x812e, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x812e, + 0x3f0: 0x811f, 0x3f5: 0x1eb4, + 0x3f6: 0x2143, 0x3f7: 0x217f, 0x3f8: 0x217a, + // Block 0x10, offset 0x400 + 0x40a: 0x8133, 0x40b: 0x8133, + 0x40c: 0x8133, 0x40d: 0x8133, 0x40e: 0x8133, 0x40f: 0x812e, 0x410: 0x812e, 0x411: 0x812e, + 0x412: 0x812e, 0x413: 0x812e, 0x414: 0x8133, 0x415: 0x8133, 0x416: 0x8133, 0x417: 0x8133, + 0x418: 0x8133, 0x419: 0x8133, 0x41a: 0x8133, 0x41b: 0x8133, 0x41c: 0x8133, 0x41d: 0x8133, + 0x41e: 0x8133, 0x41f: 0x8133, 0x420: 0x8133, 0x421: 0x8133, 0x423: 0x812e, + 0x424: 0x8133, 0x425: 0x8133, 0x426: 0x812e, 0x427: 0x8133, 0x428: 0x8133, 0x429: 0x812e, + 0x42a: 0x8133, 0x42b: 0x8133, 0x42c: 0x8133, 0x42d: 0x812e, 0x42e: 0x812e, 0x42f: 0x812e, + 0x430: 0x8117, 0x431: 0x8118, 0x432: 0x8119, 0x433: 0x8133, 0x434: 0x8133, 0x435: 0x8133, + 0x436: 0x812e, 0x437: 0x8133, 0x438: 0x8133, 0x439: 0x812e, 0x43a: 0x812e, 0x43b: 0x8133, + 0x43c: 0x8133, 0x43d: 0x8133, 0x43e: 0x8133, 0x43f: 0x8133, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2e5d, 0x447: 0xa000, 0x448: 0x2e65, 0x449: 0xa000, 0x44a: 0x2e6d, 0x44b: 0xa000, + 0x44c: 0x2e75, 0x44d: 0xa000, 0x44e: 0x2e7d, 0x451: 0xa000, + 0x452: 0x2e85, + 0x474: 0x8103, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2e8d, + 0x47c: 0xa000, 0x47d: 0x2e95, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x0104, 0x485: 0x0107, + 0x486: 0x0506, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x011f, 0x48b: 0x0122, + 0x48c: 0x0125, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e6, + 0x492: 0x009f, 0x493: 0x0110, 0x494: 0x050a, 0x495: 0x050e, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0516, 0x49a: 0x015b, 0x49b: 0x00ad, 0x49c: 0x051a, 0x49d: 0x0242, + 0x49e: 0x0245, 0x49f: 0x0248, 0x4a0: 0x027e, 0x4a1: 0x0281, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x0242, 0x4a7: 0x0245, 0x4a8: 0x026f, 0x4a9: 0x027e, + 0x4aa: 0x0281, + 0x4b8: 0x02b4, + // Block 0x13, offset 0x4c0 + 0x4db: 0x010a, 0x4dc: 0x0087, 0x4dd: 0x0113, + 0x4de: 0x00d7, 0x4df: 0x0125, 0x4e0: 0x008d, 0x4e1: 0x012b, 0x4e2: 0x0131, 0x4e3: 0x013d, + 0x4e4: 0x0146, 0x4e5: 0x0149, 0x4e6: 0x014c, 0x4e7: 0x051e, 0x4e8: 0x01c7, 0x4e9: 0x0155, + 0x4ea: 0x0522, 0x4eb: 0x01ca, 0x4ec: 0x0161, 0x4ed: 0x015e, 0x4ee: 0x0164, 0x4ef: 0x0167, + 0x4f0: 0x016a, 0x4f1: 0x016d, 0x4f2: 0x0176, 0x4f3: 0x018e, 0x4f4: 0x0191, 0x4f5: 0x00f2, + 0x4f6: 0x019a, 0x4f7: 0x019d, 0x4f8: 0x0512, 0x4f9: 0x01a0, 0x4fa: 0x01a3, 0x4fb: 0x00b5, + 0x4fc: 0x01af, 0x4fd: 0x01b2, 0x4fe: 0x01b5, 0x4ff: 0x0254, + // Block 0x14, offset 0x500 + 0x500: 0x8133, 0x501: 0x8133, 0x502: 0x812e, 0x503: 0x8133, 0x504: 0x8133, 0x505: 0x8133, + 0x506: 0x8133, 0x507: 0x8133, 0x508: 0x8133, 0x509: 0x8133, 0x50a: 0x812e, 0x50b: 0x8133, + 0x50c: 0x8133, 0x50d: 0x8136, 0x50e: 0x812b, 0x50f: 0x812e, 0x510: 0x812a, 0x511: 0x8133, + 0x512: 0x8133, 0x513: 0x8133, 0x514: 0x8133, 0x515: 0x8133, 0x516: 0x8133, 0x517: 0x8133, + 0x518: 0x8133, 0x519: 0x8133, 0x51a: 0x8133, 0x51b: 0x8133, 0x51c: 0x8133, 0x51d: 0x8133, + 0x51e: 0x8133, 0x51f: 0x8133, 0x520: 0x8133, 0x521: 0x8133, 0x522: 0x8133, 0x523: 0x8133, + 0x524: 0x8133, 0x525: 0x8133, 0x526: 0x8133, 0x527: 0x8133, 0x528: 0x8133, 0x529: 0x8133, + 0x52a: 0x8133, 0x52b: 0x8133, 0x52c: 0x8133, 0x52d: 0x8133, 0x52e: 0x8133, 0x52f: 0x8133, + 0x530: 0x8133, 0x531: 0x8133, 0x532: 0x8133, 0x533: 0x8133, 0x534: 0x8133, 0x535: 0x8133, + 0x536: 0x8134, 0x537: 0x8132, 0x538: 0x8132, 0x539: 0x812e, 0x53a: 0x812d, 0x53b: 0x8133, + 0x53c: 0x8135, 0x53d: 0x812e, 0x53e: 0x8133, 0x53f: 0x812e, + // Block 0x15, offset 0x540 + 0x540: 0x30d8, 0x541: 0x33e4, 0x542: 0x30e2, 0x543: 0x33ee, 0x544: 0x30e7, 0x545: 0x33f3, + 0x546: 0x30ec, 0x547: 0x33f8, 0x548: 0x3a0d, 0x549: 0x3b9c, 0x54a: 0x3105, 0x54b: 0x3411, + 0x54c: 0x310f, 0x54d: 0x341b, 0x54e: 0x311e, 0x54f: 0x342a, 0x550: 0x3114, 0x551: 0x3420, + 0x552: 0x3119, 0x553: 0x3425, 0x554: 0x3a30, 0x555: 0x3bbf, 0x556: 0x3a37, 0x557: 0x3bc6, + 0x558: 0x315a, 0x559: 0x3466, 0x55a: 0x315f, 0x55b: 0x346b, 0x55c: 0x3a45, 0x55d: 0x3bd4, + 0x55e: 0x3164, 0x55f: 0x3470, 0x560: 0x3173, 0x561: 0x347f, 0x562: 0x3191, 0x563: 0x349d, + 0x564: 0x31a0, 0x565: 0x34ac, 0x566: 0x3196, 0x567: 0x34a2, 0x568: 0x31a5, 0x569: 0x34b1, + 0x56a: 0x31aa, 0x56b: 0x34b6, 0x56c: 0x31f0, 0x56d: 0x34fc, 0x56e: 0x3a4c, 0x56f: 0x3bdb, + 0x570: 0x31fa, 0x571: 0x350b, 0x572: 0x3204, 0x573: 0x3515, 0x574: 0x320e, 0x575: 0x351f, + 0x576: 0x4805, 0x577: 0x4896, 0x578: 0x3a53, 0x579: 0x3be2, 0x57a: 0x3227, 0x57b: 0x3538, + 0x57c: 0x3222, 0x57d: 0x3533, 0x57e: 0x322c, 0x57f: 0x353d, + // Block 0x16, offset 0x580 + 0x580: 0x3231, 0x581: 0x3542, 0x582: 0x3236, 0x583: 0x3547, 0x584: 0x324a, 0x585: 0x355b, + 0x586: 0x3254, 0x587: 0x3565, 0x588: 0x3263, 0x589: 0x3574, 0x58a: 0x325e, 0x58b: 0x356f, + 0x58c: 0x3a76, 0x58d: 0x3c05, 0x58e: 0x3a84, 0x58f: 0x3c13, 0x590: 0x3a8b, 0x591: 0x3c1a, + 0x592: 0x3a92, 0x593: 0x3c21, 0x594: 0x3290, 0x595: 0x35a1, 0x596: 0x3295, 0x597: 0x35a6, + 0x598: 0x329f, 0x599: 0x35b0, 0x59a: 0x4832, 0x59b: 0x48c3, 0x59c: 0x3ad8, 0x59d: 0x3c67, + 0x59e: 0x32b8, 0x59f: 0x35c9, 0x5a0: 0x32c2, 0x5a1: 0x35d3, 0x5a2: 0x4841, 0x5a3: 0x48d2, + 0x5a4: 0x3adf, 0x5a5: 0x3c6e, 0x5a6: 0x3ae6, 0x5a7: 0x3c75, 0x5a8: 0x3aed, 0x5a9: 0x3c7c, + 0x5aa: 0x32d1, 0x5ab: 0x35e2, 0x5ac: 0x32db, 0x5ad: 0x35f1, 0x5ae: 0x32ef, 0x5af: 0x3605, + 0x5b0: 0x32ea, 0x5b1: 0x3600, 0x5b2: 0x332b, 0x5b3: 0x3641, 0x5b4: 0x333a, 0x5b5: 0x3650, + 0x5b6: 0x3335, 0x5b7: 0x364b, 0x5b8: 0x3af4, 0x5b9: 0x3c83, 0x5ba: 0x3afb, 0x5bb: 0x3c8a, + 0x5bc: 0x333f, 0x5bd: 0x3655, 0x5be: 0x3344, 0x5bf: 0x365a, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3349, 0x5c1: 0x365f, 0x5c2: 0x334e, 0x5c3: 0x3664, 0x5c4: 0x335d, 0x5c5: 0x3673, + 0x5c6: 0x3358, 0x5c7: 0x366e, 0x5c8: 0x3362, 0x5c9: 0x367d, 0x5ca: 0x3367, 0x5cb: 0x3682, + 0x5cc: 0x336c, 0x5cd: 0x3687, 0x5ce: 0x338a, 0x5cf: 0x36a5, 0x5d0: 0x33a3, 0x5d1: 0x36c3, + 0x5d2: 0x33b2, 0x5d3: 0x36d2, 0x5d4: 0x33b7, 0x5d5: 0x36d7, 0x5d6: 0x34bb, 0x5d7: 0x35e7, + 0x5d8: 0x3678, 0x5d9: 0x36b4, 0x5da: 0x1d10, 0x5db: 0x4418, + 0x5e0: 0x47e2, 0x5e1: 0x4873, 0x5e2: 0x30c4, 0x5e3: 0x33d0, + 0x5e4: 0x39b9, 0x5e5: 0x3b48, 0x5e6: 0x39b2, 0x5e7: 0x3b41, 0x5e8: 0x39c7, 0x5e9: 0x3b56, + 0x5ea: 0x39c0, 0x5eb: 0x3b4f, 0x5ec: 0x39ff, 0x5ed: 0x3b8e, 0x5ee: 0x39d5, 0x5ef: 0x3b64, + 0x5f0: 0x39ce, 0x5f1: 0x3b5d, 0x5f2: 0x39e3, 0x5f3: 0x3b72, 0x5f4: 0x39dc, 0x5f5: 0x3b6b, + 0x5f6: 0x3a06, 0x5f7: 0x3b95, 0x5f8: 0x47f6, 0x5f9: 0x4887, 0x5fa: 0x3141, 0x5fb: 0x344d, + 0x5fc: 0x312d, 0x5fd: 0x3439, 0x5fe: 0x3a1b, 0x5ff: 0x3baa, + // Block 0x18, offset 0x600 + 0x600: 0x3a14, 0x601: 0x3ba3, 0x602: 0x3a29, 0x603: 0x3bb8, 0x604: 0x3a22, 0x605: 0x3bb1, + 0x606: 0x3a3e, 0x607: 0x3bcd, 0x608: 0x31d2, 0x609: 0x34de, 0x60a: 0x31e6, 0x60b: 0x34f2, + 0x60c: 0x4828, 0x60d: 0x48b9, 0x60e: 0x3277, 0x60f: 0x3588, 0x610: 0x3a61, 0x611: 0x3bf0, + 0x612: 0x3a5a, 0x613: 0x3be9, 0x614: 0x3a6f, 0x615: 0x3bfe, 0x616: 0x3a68, 0x617: 0x3bf7, + 0x618: 0x3aca, 0x619: 0x3c59, 0x61a: 0x3aae, 0x61b: 0x3c3d, 0x61c: 0x3aa7, 0x61d: 0x3c36, + 0x61e: 0x3abc, 0x61f: 0x3c4b, 0x620: 0x3ab5, 0x621: 0x3c44, 0x622: 0x3ac3, 0x623: 0x3c52, + 0x624: 0x3326, 0x625: 0x363c, 0x626: 0x3308, 0x627: 0x361e, 0x628: 0x3b25, 0x629: 0x3cb4, + 0x62a: 0x3b1e, 0x62b: 0x3cad, 0x62c: 0x3b33, 0x62d: 0x3cc2, 0x62e: 0x3b2c, 0x62f: 0x3cbb, + 0x630: 0x3b3a, 0x631: 0x3cc9, 0x632: 0x3371, 0x633: 0x368c, 0x634: 0x3399, 0x635: 0x36b9, + 0x636: 0x3394, 0x637: 0x36af, 0x638: 0x3380, 0x639: 0x369b, + // Block 0x19, offset 0x640 + 0x640: 0x4945, 0x641: 0x494b, 0x642: 0x4a5f, 0x643: 0x4a77, 0x644: 0x4a67, 0x645: 0x4a7f, + 0x646: 0x4a6f, 0x647: 0x4a87, 0x648: 0x48eb, 0x649: 0x48f1, 0x64a: 0x49cf, 0x64b: 0x49e7, + 0x64c: 0x49d7, 0x64d: 0x49ef, 0x64e: 0x49df, 0x64f: 0x49f7, 0x650: 0x4957, 0x651: 0x495d, + 0x652: 0x3ef9, 0x653: 0x3f09, 0x654: 0x3f01, 0x655: 0x3f11, + 0x658: 0x48f7, 0x659: 0x48fd, 0x65a: 0x3e29, 0x65b: 0x3e39, 0x65c: 0x3e31, 0x65d: 0x3e41, + 0x660: 0x496f, 0x661: 0x4975, 0x662: 0x4a8f, 0x663: 0x4aa7, + 0x664: 0x4a97, 0x665: 0x4aaf, 0x666: 0x4a9f, 0x667: 0x4ab7, 0x668: 0x4903, 0x669: 0x4909, + 0x66a: 0x49ff, 0x66b: 0x4a17, 0x66c: 0x4a07, 0x66d: 0x4a1f, 0x66e: 0x4a0f, 0x66f: 0x4a27, + 0x670: 0x4987, 0x671: 0x498d, 0x672: 0x3f59, 0x673: 0x3f71, 0x674: 0x3f61, 0x675: 0x3f79, + 0x676: 0x3f69, 0x677: 0x3f81, 0x678: 0x490f, 0x679: 0x4915, 0x67a: 0x3e59, 0x67b: 0x3e71, + 0x67c: 0x3e61, 0x67d: 0x3e79, 0x67e: 0x3e69, 0x67f: 0x3e81, + // Block 0x1a, offset 0x680 + 0x680: 0x4993, 0x681: 0x4999, 0x682: 0x3f89, 0x683: 0x3f99, 0x684: 0x3f91, 0x685: 0x3fa1, + 0x688: 0x491b, 0x689: 0x4921, 0x68a: 0x3e89, 0x68b: 0x3e99, + 0x68c: 0x3e91, 0x68d: 0x3ea1, 0x690: 0x49a5, 0x691: 0x49ab, + 0x692: 0x3fc1, 0x693: 0x3fd9, 0x694: 0x3fc9, 0x695: 0x3fe1, 0x696: 0x3fd1, 0x697: 0x3fe9, + 0x699: 0x4927, 0x69b: 0x3ea9, 0x69d: 0x3eb1, + 0x69f: 0x3eb9, 0x6a0: 0x49bd, 0x6a1: 0x49c3, 0x6a2: 0x4abf, 0x6a3: 0x4ad7, + 0x6a4: 0x4ac7, 0x6a5: 0x4adf, 0x6a6: 0x4acf, 0x6a7: 0x4ae7, 0x6a8: 0x492d, 0x6a9: 0x4933, + 0x6aa: 0x4a2f, 0x6ab: 0x4a47, 0x6ac: 0x4a37, 0x6ad: 0x4a4f, 0x6ae: 0x4a3f, 0x6af: 0x4a57, + 0x6b0: 0x4939, 0x6b1: 0x445f, 0x6b2: 0x37d2, 0x6b3: 0x4465, 0x6b4: 0x4963, 0x6b5: 0x446b, + 0x6b6: 0x37e4, 0x6b7: 0x4471, 0x6b8: 0x3802, 0x6b9: 0x4477, 0x6ba: 0x381a, 0x6bb: 0x447d, + 0x6bc: 0x49b1, 0x6bd: 0x4483, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3ee1, 0x6c1: 0x3ee9, 0x6c2: 0x42c5, 0x6c3: 0x42e3, 0x6c4: 0x42cf, 0x6c5: 0x42ed, + 0x6c6: 0x42d9, 0x6c7: 0x42f7, 0x6c8: 0x3e19, 0x6c9: 0x3e21, 0x6ca: 0x4211, 0x6cb: 0x422f, + 0x6cc: 0x421b, 0x6cd: 0x4239, 0x6ce: 0x4225, 0x6cf: 0x4243, 0x6d0: 0x3f29, 0x6d1: 0x3f31, + 0x6d2: 0x4301, 0x6d3: 0x431f, 0x6d4: 0x430b, 0x6d5: 0x4329, 0x6d6: 0x4315, 0x6d7: 0x4333, + 0x6d8: 0x3e49, 0x6d9: 0x3e51, 0x6da: 0x424d, 0x6db: 0x426b, 0x6dc: 0x4257, 0x6dd: 0x4275, + 0x6de: 0x4261, 0x6df: 0x427f, 0x6e0: 0x4001, 0x6e1: 0x4009, 0x6e2: 0x433d, 0x6e3: 0x435b, + 0x6e4: 0x4347, 0x6e5: 0x4365, 0x6e6: 0x4351, 0x6e7: 0x436f, 0x6e8: 0x3ec1, 0x6e9: 0x3ec9, + 0x6ea: 0x4289, 0x6eb: 0x42a7, 0x6ec: 0x4293, 0x6ed: 0x42b1, 0x6ee: 0x429d, 0x6ef: 0x42bb, + 0x6f0: 0x37c6, 0x6f1: 0x37c0, 0x6f2: 0x3ed1, 0x6f3: 0x37cc, 0x6f4: 0x3ed9, + 0x6f6: 0x4951, 0x6f7: 0x3ef1, 0x6f8: 0x3736, 0x6f9: 0x3730, 0x6fa: 0x3724, 0x6fb: 0x442f, + 0x6fc: 0x373c, 0x6fd: 0x43c8, 0x6fe: 0x0257, 0x6ff: 0x43c8, + // Block 0x1c, offset 0x700 + 0x700: 0x43e1, 0x701: 0x45c3, 0x702: 0x3f19, 0x703: 0x37de, 0x704: 0x3f21, + 0x706: 0x497b, 0x707: 0x3f39, 0x708: 0x3742, 0x709: 0x4435, 0x70a: 0x374e, 0x70b: 0x443b, + 0x70c: 0x375a, 0x70d: 0x45ca, 0x70e: 0x45d1, 0x70f: 0x45d8, 0x710: 0x37f6, 0x711: 0x37f0, + 0x712: 0x3f41, 0x713: 0x4625, 0x716: 0x37fc, 0x717: 0x3f51, + 0x718: 0x3772, 0x719: 0x376c, 0x71a: 0x3760, 0x71b: 0x4441, 0x71d: 0x45df, + 0x71e: 0x45e6, 0x71f: 0x45ed, 0x720: 0x382c, 0x721: 0x3826, 0x722: 0x3fa9, 0x723: 0x462d, + 0x724: 0x380e, 0x725: 0x3814, 0x726: 0x3832, 0x727: 0x3fb9, 0x728: 0x37a2, 0x729: 0x379c, + 0x72a: 0x3790, 0x72b: 0x444d, 0x72c: 0x378a, 0x72d: 0x45b5, 0x72e: 0x45bc, 0x72f: 0x0081, + 0x732: 0x3ff1, 0x733: 0x3838, 0x734: 0x3ff9, + 0x736: 0x49c9, 0x737: 0x4011, 0x738: 0x377e, 0x739: 0x4447, 0x73a: 0x37ae, 0x73b: 0x4459, + 0x73c: 0x37ba, 0x73d: 0x439b, 0x73e: 0x43cd, + // Block 0x1d, offset 0x740 + 0x740: 0x1d08, 0x741: 0x1d0c, 0x742: 0x0047, 0x743: 0x1d84, 0x745: 0x1d18, + 0x746: 0x1d1c, 0x747: 0x00ef, 0x749: 0x1d88, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00e0, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1abd, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x1acf, 0x761: 0x1cf8, 0x762: 0x1ad8, + 0x764: 0x0075, 0x766: 0x023c, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x4413, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0308, + 0x776: 0x030b, 0x777: 0x030e, 0x778: 0x0311, 0x779: 0x0093, 0x77b: 0x1cc8, + 0x77c: 0x026c, 0x77d: 0x0245, 0x77e: 0x01fd, 0x77f: 0x0224, + // Block 0x1e, offset 0x780 + 0x780: 0x055a, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x235e, 0x791: 0x236a, + 0x792: 0x241e, 0x793: 0x2346, 0x794: 0x23ca, 0x795: 0x2352, 0x796: 0x23d0, 0x797: 0x23e8, + 0x798: 0x23f4, 0x799: 0x2358, 0x79a: 0x23fa, 0x79b: 0x2364, 0x79c: 0x23ee, 0x79d: 0x2400, + 0x79e: 0x2406, 0x79f: 0x1dec, 0x7a0: 0x0053, 0x7a1: 0x1a87, 0x7a2: 0x1cd4, 0x7a3: 0x1a90, + 0x7a4: 0x006d, 0x7a5: 0x1adb, 0x7a6: 0x1d00, 0x7a7: 0x1e78, 0x7a8: 0x1a93, 0x7a9: 0x0071, + 0x7aa: 0x1ae7, 0x7ab: 0x1d04, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x1b14, 0x7b2: 0x1d48, 0x7b3: 0x1b1d, 0x7b4: 0x00ad, 0x7b5: 0x1b92, + 0x7b6: 0x1d7c, 0x7b7: 0x1e8c, 0x7b8: 0x1b20, 0x7b9: 0x00b1, 0x7ba: 0x1b95, 0x7bb: 0x1d80, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3d47, 0x7c3: 0xa000, 0x7c4: 0x3d4e, 0x7c5: 0xa000, + 0x7c7: 0x3d55, 0x7c8: 0xa000, 0x7c9: 0x3d5c, + 0x7cd: 0xa000, + 0x7e0: 0x30a6, 0x7e1: 0xa000, 0x7e2: 0x3d6a, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3d63, 0x7ee: 0x30a1, 0x7ef: 0x30ab, + 0x7f0: 0x3d71, 0x7f1: 0x3d78, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3d7f, 0x7f5: 0x3d86, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3d8d, 0x7f9: 0x3d94, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3d9b, 0x801: 0x3da2, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3db7, 0x805: 0x3dbe, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3dc5, 0x809: 0x3dcc, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3de1, 0x82d: 0x3de8, 0x82e: 0x3def, 0x82f: 0x3df6, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x19af, + 0x86a: 0x19b2, 0x86b: 0x19b5, 0x86c: 0x19b8, 0x86d: 0x19bb, 0x86e: 0x19be, 0x86f: 0x19c1, + 0x870: 0x19c4, 0x871: 0x19c7, 0x872: 0x19ca, 0x873: 0x19d3, 0x874: 0x1b98, 0x875: 0x1b9c, + 0x876: 0x1ba0, 0x877: 0x1ba4, 0x878: 0x1ba8, 0x879: 0x1bac, 0x87a: 0x1bb0, 0x87b: 0x1bb4, + 0x87c: 0x1bb8, 0x87d: 0x1db0, 0x87e: 0x1db5, 0x87f: 0x1dba, + // Block 0x22, offset 0x880 + 0x880: 0x1dbf, 0x881: 0x1dc4, 0x882: 0x1dc9, 0x883: 0x1dce, 0x884: 0x1dd3, 0x885: 0x1dd8, + 0x886: 0x1ddd, 0x887: 0x1de2, 0x888: 0x19ac, 0x889: 0x19d0, 0x88a: 0x19f4, 0x88b: 0x1a18, + 0x88c: 0x1a3c, 0x88d: 0x1a45, 0x88e: 0x1a4b, 0x88f: 0x1a51, 0x890: 0x1a57, 0x891: 0x1c90, + 0x892: 0x1c94, 0x893: 0x1c98, 0x894: 0x1c9c, 0x895: 0x1ca0, 0x896: 0x1ca4, 0x897: 0x1ca8, + 0x898: 0x1cac, 0x899: 0x1cb0, 0x89a: 0x1cb4, 0x89b: 0x1cb8, 0x89c: 0x1c24, 0x89d: 0x1c28, + 0x89e: 0x1c2c, 0x89f: 0x1c30, 0x8a0: 0x1c34, 0x8a1: 0x1c38, 0x8a2: 0x1c3c, 0x8a3: 0x1c40, + 0x8a4: 0x1c44, 0x8a5: 0x1c48, 0x8a6: 0x1c4c, 0x8a7: 0x1c50, 0x8a8: 0x1c54, 0x8a9: 0x1c58, + 0x8aa: 0x1c5c, 0x8ab: 0x1c60, 0x8ac: 0x1c64, 0x8ad: 0x1c68, 0x8ae: 0x1c6c, 0x8af: 0x1c70, + 0x8b0: 0x1c74, 0x8b1: 0x1c78, 0x8b2: 0x1c7c, 0x8b3: 0x1c80, 0x8b4: 0x1c84, 0x8b5: 0x1c88, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x07ba, 0x8c1: 0x07de, 0x8c2: 0x07ea, 0x8c3: 0x07fa, 0x8c4: 0x0802, 0x8c5: 0x080e, + 0x8c6: 0x0816, 0x8c7: 0x081e, 0x8c8: 0x082a, 0x8c9: 0x087e, 0x8ca: 0x0896, 0x8cb: 0x08a6, + 0x8cc: 0x08b6, 0x8cd: 0x08c6, 0x8ce: 0x08d6, 0x8cf: 0x08f6, 0x8d0: 0x08fa, 0x8d1: 0x08fe, + 0x8d2: 0x0932, 0x8d3: 0x095a, 0x8d4: 0x096a, 0x8d5: 0x0972, 0x8d6: 0x0976, 0x8d7: 0x0982, + 0x8d8: 0x099e, 0x8d9: 0x09a2, 0x8da: 0x09ba, 0x8db: 0x09be, 0x8dc: 0x09c6, 0x8dd: 0x09d6, + 0x8de: 0x0a72, 0x8df: 0x0a86, 0x8e0: 0x0ac6, 0x8e1: 0x0ada, 0x8e2: 0x0ae2, 0x8e3: 0x0ae6, + 0x8e4: 0x0af6, 0x8e5: 0x0b12, 0x8e6: 0x0b3e, 0x8e7: 0x0b4a, 0x8e8: 0x0b6a, 0x8e9: 0x0b76, + 0x8ea: 0x0b7a, 0x8eb: 0x0b7e, 0x8ec: 0x0b96, 0x8ed: 0x0b9a, 0x8ee: 0x0bc6, 0x8ef: 0x0bd2, + 0x8f0: 0x0bda, 0x8f1: 0x0be2, 0x8f2: 0x0bf2, 0x8f3: 0x0bfa, 0x8f4: 0x0c02, 0x8f5: 0x0c2e, + 0x8f6: 0x0c32, 0x8f7: 0x0c3a, 0x8f8: 0x0c3e, 0x8f9: 0x0c46, 0x8fa: 0x0c4e, 0x8fb: 0x0c5e, + 0x8fc: 0x0c7a, 0x8fd: 0x0cf2, 0x8fe: 0x0d06, 0x8ff: 0x0d0a, + // Block 0x24, offset 0x900 + 0x900: 0x0d8a, 0x901: 0x0d8e, 0x902: 0x0da2, 0x903: 0x0da6, 0x904: 0x0dae, 0x905: 0x0db6, + 0x906: 0x0dbe, 0x907: 0x0dca, 0x908: 0x0df2, 0x909: 0x0e02, 0x90a: 0x0e16, 0x90b: 0x0e86, + 0x90c: 0x0e92, 0x90d: 0x0ea2, 0x90e: 0x0eae, 0x90f: 0x0eba, 0x910: 0x0ec2, 0x911: 0x0ec6, + 0x912: 0x0eca, 0x913: 0x0ece, 0x914: 0x0ed2, 0x915: 0x0f8a, 0x916: 0x0fd2, 0x917: 0x0fde, + 0x918: 0x0fe2, 0x919: 0x0fe6, 0x91a: 0x0fea, 0x91b: 0x0ff2, 0x91c: 0x0ff6, 0x91d: 0x100a, + 0x91e: 0x1026, 0x91f: 0x102e, 0x920: 0x106e, 0x921: 0x1072, 0x922: 0x107a, 0x923: 0x107e, + 0x924: 0x1086, 0x925: 0x108a, 0x926: 0x10ae, 0x927: 0x10b2, 0x928: 0x10ce, 0x929: 0x10d2, + 0x92a: 0x10d6, 0x92b: 0x10da, 0x92c: 0x10ee, 0x92d: 0x1112, 0x92e: 0x1116, 0x92f: 0x111a, + 0x930: 0x113e, 0x931: 0x117e, 0x932: 0x1182, 0x933: 0x11a2, 0x934: 0x11b2, 0x935: 0x11ba, + 0x936: 0x11da, 0x937: 0x11fe, 0x938: 0x1242, 0x939: 0x124a, 0x93a: 0x125e, 0x93b: 0x126a, + 0x93c: 0x1272, 0x93d: 0x127a, 0x93e: 0x127e, 0x93f: 0x1282, + // Block 0x25, offset 0x940 + 0x940: 0x129a, 0x941: 0x129e, 0x942: 0x12ba, 0x943: 0x12c2, 0x944: 0x12ca, 0x945: 0x12ce, + 0x946: 0x12da, 0x947: 0x12e2, 0x948: 0x12e6, 0x949: 0x12ea, 0x94a: 0x12f2, 0x94b: 0x12f6, + 0x94c: 0x1396, 0x94d: 0x13aa, 0x94e: 0x13de, 0x94f: 0x13e2, 0x950: 0x13ea, 0x951: 0x1416, + 0x952: 0x141e, 0x953: 0x1426, 0x954: 0x142e, 0x955: 0x146a, 0x956: 0x146e, 0x957: 0x1476, + 0x958: 0x147a, 0x959: 0x147e, 0x95a: 0x14aa, 0x95b: 0x14ae, 0x95c: 0x14b6, 0x95d: 0x14ca, + 0x95e: 0x14ce, 0x95f: 0x14ea, 0x960: 0x14f2, 0x961: 0x14f6, 0x962: 0x151a, 0x963: 0x153a, + 0x964: 0x154e, 0x965: 0x1552, 0x966: 0x155a, 0x967: 0x1586, 0x968: 0x158a, 0x969: 0x159a, + 0x96a: 0x15be, 0x96b: 0x15ca, 0x96c: 0x15da, 0x96d: 0x15f2, 0x96e: 0x15fa, 0x96f: 0x15fe, + 0x970: 0x1602, 0x971: 0x1606, 0x972: 0x1612, 0x973: 0x1616, 0x974: 0x161e, 0x975: 0x163a, + 0x976: 0x163e, 0x977: 0x1642, 0x978: 0x165a, 0x979: 0x165e, 0x97a: 0x1666, 0x97b: 0x167a, + 0x97c: 0x167e, 0x97d: 0x1682, 0x97e: 0x168a, 0x97f: 0x168e, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x4049, 0x98d: 0xa000, 0x98e: 0x4051, 0x98f: 0xa000, 0x990: 0x4059, 0x991: 0xa000, + 0x992: 0x4061, 0x993: 0xa000, 0x994: 0x4069, 0x995: 0xa000, 0x996: 0x4071, 0x997: 0xa000, + 0x998: 0x4079, 0x999: 0xa000, 0x99a: 0x4081, 0x99b: 0xa000, 0x99c: 0x4089, 0x99d: 0xa000, + 0x99e: 0x4091, 0x99f: 0xa000, 0x9a0: 0x4099, 0x9a1: 0xa000, 0x9a2: 0x40a1, + 0x9a4: 0xa000, 0x9a5: 0x40a9, 0x9a6: 0xa000, 0x9a7: 0x40b1, 0x9a8: 0xa000, 0x9a9: 0x40b9, + 0x9af: 0xa000, + 0x9b0: 0x40c1, 0x9b1: 0x40c9, 0x9b2: 0xa000, 0x9b3: 0x40d1, 0x9b4: 0x40d9, 0x9b5: 0xa000, + 0x9b6: 0x40e1, 0x9b7: 0x40e9, 0x9b8: 0xa000, 0x9b9: 0x40f1, 0x9ba: 0x40f9, 0x9bb: 0xa000, + 0x9bc: 0x4101, 0x9bd: 0x4109, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x4041, + 0x9d9: 0x9904, 0x9da: 0x9904, 0x9db: 0x441d, 0x9dc: 0x4423, 0x9dd: 0xa000, + 0x9de: 0x4111, 0x9df: 0x27e4, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x4121, 0x9ed: 0xa000, 0x9ee: 0x4129, 0x9ef: 0xa000, + 0x9f0: 0x4131, 0x9f1: 0xa000, 0x9f2: 0x4139, 0x9f3: 0xa000, 0x9f4: 0x4141, 0x9f5: 0xa000, + 0x9f6: 0x4149, 0x9f7: 0xa000, 0x9f8: 0x4151, 0x9f9: 0xa000, 0x9fa: 0x4159, 0x9fb: 0xa000, + 0x9fc: 0x4161, 0x9fd: 0xa000, 0x9fe: 0x4169, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4171, 0xa01: 0xa000, 0xa02: 0x4179, 0xa04: 0xa000, 0xa05: 0x4181, + 0xa06: 0xa000, 0xa07: 0x4189, 0xa08: 0xa000, 0xa09: 0x4191, + 0xa0f: 0xa000, 0xa10: 0x4199, 0xa11: 0x41a1, + 0xa12: 0xa000, 0xa13: 0x41a9, 0xa14: 0x41b1, 0xa15: 0xa000, 0xa16: 0x41b9, 0xa17: 0x41c1, + 0xa18: 0xa000, 0xa19: 0x41c9, 0xa1a: 0x41d1, 0xa1b: 0xa000, 0xa1c: 0x41d9, 0xa1d: 0x41e1, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x4119, + 0xa37: 0x41e9, 0xa38: 0x41f1, 0xa39: 0x41f9, 0xa3a: 0x4201, + 0xa3d: 0xa000, 0xa3e: 0x4209, 0xa3f: 0x27f9, + // Block 0x29, offset 0xa40 + 0xa40: 0x045a, 0xa41: 0x041e, 0xa42: 0x0422, 0xa43: 0x0426, 0xa44: 0x046e, 0xa45: 0x042a, + 0xa46: 0x042e, 0xa47: 0x0432, 0xa48: 0x0436, 0xa49: 0x043a, 0xa4a: 0x043e, 0xa4b: 0x0442, + 0xa4c: 0x0446, 0xa4d: 0x044a, 0xa4e: 0x044e, 0xa4f: 0x4afe, 0xa50: 0x4b04, 0xa51: 0x4b0a, + 0xa52: 0x4b10, 0xa53: 0x4b16, 0xa54: 0x4b1c, 0xa55: 0x4b22, 0xa56: 0x4b28, 0xa57: 0x4b2e, + 0xa58: 0x4b34, 0xa59: 0x4b3a, 0xa5a: 0x4b40, 0xa5b: 0x4b46, 0xa5c: 0x4b4c, 0xa5d: 0x4b52, + 0xa5e: 0x4b58, 0xa5f: 0x4b5e, 0xa60: 0x4b64, 0xa61: 0x4b6a, 0xa62: 0x4b70, 0xa63: 0x4b76, + 0xa64: 0x04b6, 0xa65: 0x0452, 0xa66: 0x0456, 0xa67: 0x04da, 0xa68: 0x04de, 0xa69: 0x04e2, + 0xa6a: 0x04e6, 0xa6b: 0x04ea, 0xa6c: 0x04ee, 0xa6d: 0x04f2, 0xa6e: 0x045e, 0xa6f: 0x04f6, + 0xa70: 0x04fa, 0xa71: 0x0462, 0xa72: 0x0466, 0xa73: 0x046a, 0xa74: 0x0472, 0xa75: 0x0476, + 0xa76: 0x047a, 0xa77: 0x047e, 0xa78: 0x0482, 0xa79: 0x0486, 0xa7a: 0x048a, 0xa7b: 0x048e, + 0xa7c: 0x0492, 0xa7d: 0x0496, 0xa7e: 0x049a, 0xa7f: 0x049e, + // Block 0x2a, offset 0xa80 + 0xa80: 0x04a2, 0xa81: 0x04a6, 0xa82: 0x04fe, 0xa83: 0x0502, 0xa84: 0x04aa, 0xa85: 0x04ae, + 0xa86: 0x04b2, 0xa87: 0x04ba, 0xa88: 0x04be, 0xa89: 0x04c2, 0xa8a: 0x04c6, 0xa8b: 0x04ca, + 0xa8c: 0x04ce, 0xa8d: 0x04d2, 0xa8e: 0x04d6, + 0xa92: 0x07ba, 0xa93: 0x0816, 0xa94: 0x07c6, 0xa95: 0x0a76, 0xa96: 0x07ca, 0xa97: 0x07e2, + 0xa98: 0x07ce, 0xa99: 0x108e, 0xa9a: 0x0802, 0xa9b: 0x07d6, 0xa9c: 0x07be, 0xa9d: 0x0afa, + 0xa9e: 0x0a8a, 0xa9f: 0x082a, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2184, 0xac1: 0x218a, 0xac2: 0x2190, 0xac3: 0x2196, 0xac4: 0x219c, 0xac5: 0x21a2, + 0xac6: 0x21a8, 0xac7: 0x21ae, 0xac8: 0x21b4, 0xac9: 0x21ba, 0xaca: 0x21c0, 0xacb: 0x21c6, + 0xacc: 0x21cc, 0xacd: 0x21d2, 0xace: 0x285d, 0xacf: 0x2866, 0xad0: 0x286f, 0xad1: 0x2878, + 0xad2: 0x2881, 0xad3: 0x288a, 0xad4: 0x2893, 0xad5: 0x289c, 0xad6: 0x28a5, 0xad7: 0x28b7, + 0xad8: 0x28c0, 0xad9: 0x28c9, 0xada: 0x28d2, 0xadb: 0x28db, 0xadc: 0x28ae, 0xadd: 0x2ce3, + 0xade: 0x2c24, 0xae0: 0x21d8, 0xae1: 0x21f0, 0xae2: 0x21e4, 0xae3: 0x2238, + 0xae4: 0x21f6, 0xae5: 0x2214, 0xae6: 0x21de, 0xae7: 0x220e, 0xae8: 0x21ea, 0xae9: 0x2220, + 0xaea: 0x2250, 0xaeb: 0x226e, 0xaec: 0x2268, 0xaed: 0x225c, 0xaee: 0x22aa, 0xaef: 0x223e, + 0xaf0: 0x224a, 0xaf1: 0x2262, 0xaf2: 0x2256, 0xaf3: 0x2280, 0xaf4: 0x222c, 0xaf5: 0x2274, + 0xaf6: 0x229e, 0xaf7: 0x2286, 0xaf8: 0x221a, 0xaf9: 0x21fc, 0xafa: 0x2232, 0xafb: 0x2244, + 0xafc: 0x227a, 0xafd: 0x2202, 0xafe: 0x22a4, 0xaff: 0x2226, + // Block 0x2c, offset 0xb00 + 0xb00: 0x228c, 0xb01: 0x2208, 0xb02: 0x2292, 0xb03: 0x2298, 0xb04: 0x0a2a, 0xb05: 0x0bfe, + 0xb06: 0x0da2, 0xb07: 0x11c2, + 0xb10: 0x1cf4, 0xb11: 0x19d6, + 0xb12: 0x19d9, 0xb13: 0x19dc, 0xb14: 0x19df, 0xb15: 0x19e2, 0xb16: 0x19e5, 0xb17: 0x19e8, + 0xb18: 0x19eb, 0xb19: 0x19ee, 0xb1a: 0x19f7, 0xb1b: 0x19fa, 0xb1c: 0x19fd, 0xb1d: 0x1a00, + 0xb1e: 0x1a03, 0xb1f: 0x1a06, 0xb20: 0x0406, 0xb21: 0x040e, 0xb22: 0x0412, 0xb23: 0x041a, + 0xb24: 0x041e, 0xb25: 0x0422, 0xb26: 0x042a, 0xb27: 0x0432, 0xb28: 0x0436, 0xb29: 0x043e, + 0xb2a: 0x0442, 0xb2b: 0x0446, 0xb2c: 0x044a, 0xb2d: 0x044e, 0xb2e: 0x2f59, 0xb2f: 0x2f61, + 0xb30: 0x2f69, 0xb31: 0x2f71, 0xb32: 0x2f79, 0xb33: 0x2f81, 0xb34: 0x2f89, 0xb35: 0x2f91, + 0xb36: 0x2fa1, 0xb37: 0x2fa9, 0xb38: 0x2fb1, 0xb39: 0x2fb9, 0xb3a: 0x2fc1, 0xb3b: 0x2fc9, + 0xb3c: 0x3014, 0xb3d: 0x2fdc, 0xb3e: 0x2f99, + // Block 0x2d, offset 0xb40 + 0xb40: 0x07ba, 0xb41: 0x0816, 0xb42: 0x07c6, 0xb43: 0x0a76, 0xb44: 0x081a, 0xb45: 0x08aa, + 0xb46: 0x07c2, 0xb47: 0x08a6, 0xb48: 0x0806, 0xb49: 0x0982, 0xb4a: 0x0e02, 0xb4b: 0x0f8a, + 0xb4c: 0x0ed2, 0xb4d: 0x0e16, 0xb4e: 0x155a, 0xb4f: 0x0a86, 0xb50: 0x0dca, 0xb51: 0x0e46, + 0xb52: 0x0e06, 0xb53: 0x1146, 0xb54: 0x09f6, 0xb55: 0x0ffe, 0xb56: 0x1482, 0xb57: 0x115a, + 0xb58: 0x093e, 0xb59: 0x118a, 0xb5a: 0x1096, 0xb5b: 0x0b12, 0xb5c: 0x150a, 0xb5d: 0x087a, + 0xb5e: 0x09a6, 0xb5f: 0x0ef2, 0xb60: 0x1622, 0xb61: 0x083e, 0xb62: 0x08ce, 0xb63: 0x0e96, + 0xb64: 0x07ca, 0xb65: 0x07e2, 0xb66: 0x07ce, 0xb67: 0x0bd6, 0xb68: 0x09ea, 0xb69: 0x097a, + 0xb6a: 0x0b52, 0xb6b: 0x0b46, 0xb6c: 0x10e6, 0xb6d: 0x083a, 0xb6e: 0x1496, 0xb6f: 0x0996, + 0xb70: 0x0aee, 0xb71: 0x1a09, 0xb72: 0x1a0c, 0xb73: 0x1a0f, 0xb74: 0x1a12, 0xb75: 0x1a1b, + 0xb76: 0x1a1e, 0xb77: 0x1a21, 0xb78: 0x1a24, 0xb79: 0x1a27, 0xb7a: 0x1a2a, 0xb7b: 0x1a2d, + 0xb7c: 0x1a30, 0xb7d: 0x1a33, 0xb7e: 0x1a36, 0xb7f: 0x1a3f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1df6, 0xb81: 0x1e05, 0xb82: 0x1e14, 0xb83: 0x1e23, 0xb84: 0x1e32, 0xb85: 0x1e41, + 0xb86: 0x1e50, 0xb87: 0x1e5f, 0xb88: 0x1e6e, 0xb89: 0x22bc, 0xb8a: 0x22ce, 0xb8b: 0x22e0, + 0xb8c: 0x1a81, 0xb8d: 0x1d34, 0xb8e: 0x1b02, 0xb8f: 0x1cd8, 0xb90: 0x05c6, 0xb91: 0x05ce, + 0xb92: 0x05d6, 0xb93: 0x05de, 0xb94: 0x05e6, 0xb95: 0x05ea, 0xb96: 0x05ee, 0xb97: 0x05f2, + 0xb98: 0x05f6, 0xb99: 0x05fa, 0xb9a: 0x05fe, 0xb9b: 0x0602, 0xb9c: 0x0606, 0xb9d: 0x060a, + 0xb9e: 0x060e, 0xb9f: 0x0612, 0xba0: 0x0616, 0xba1: 0x061e, 0xba2: 0x0622, 0xba3: 0x0626, + 0xba4: 0x062a, 0xba5: 0x062e, 0xba6: 0x0632, 0xba7: 0x0636, 0xba8: 0x063a, 0xba9: 0x063e, + 0xbaa: 0x0642, 0xbab: 0x0646, 0xbac: 0x064a, 0xbad: 0x064e, 0xbae: 0x0652, 0xbaf: 0x0656, + 0xbb0: 0x065a, 0xbb1: 0x065e, 0xbb2: 0x0662, 0xbb3: 0x066a, 0xbb4: 0x0672, 0xbb5: 0x067a, + 0xbb6: 0x067e, 0xbb7: 0x0682, 0xbb8: 0x0686, 0xbb9: 0x068a, 0xbba: 0x068e, 0xbbb: 0x0692, + 0xbbc: 0x0696, 0xbbd: 0x069a, 0xbbe: 0x069e, 0xbbf: 0x282a, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2c43, 0xbc1: 0x2adf, 0xbc2: 0x2c53, 0xbc3: 0x29b7, 0xbc4: 0x3025, 0xbc5: 0x29c1, + 0xbc6: 0x29cb, 0xbc7: 0x3069, 0xbc8: 0x2aec, 0xbc9: 0x29d5, 0xbca: 0x29df, 0xbcb: 0x29e9, + 0xbcc: 0x2b13, 0xbcd: 0x2b20, 0xbce: 0x2af9, 0xbcf: 0x2b06, 0xbd0: 0x2fea, 0xbd1: 0x2b2d, + 0xbd2: 0x2b3a, 0xbd3: 0x2cf5, 0xbd4: 0x27eb, 0xbd5: 0x2d08, 0xbd6: 0x2d1b, 0xbd7: 0x2c63, + 0xbd8: 0x2b47, 0xbd9: 0x2d2e, 0xbda: 0x2d41, 0xbdb: 0x2b54, 0xbdc: 0x29f3, 0xbdd: 0x29fd, + 0xbde: 0x2ff8, 0xbdf: 0x2b61, 0xbe0: 0x2c73, 0xbe1: 0x3036, 0xbe2: 0x2a07, 0xbe3: 0x2a11, + 0xbe4: 0x2b6e, 0xbe5: 0x2a1b, 0xbe6: 0x2a25, 0xbe7: 0x2800, 0xbe8: 0x2807, 0xbe9: 0x2a2f, + 0xbea: 0x2a39, 0xbeb: 0x2d54, 0xbec: 0x2b7b, 0xbed: 0x2c83, 0xbee: 0x2d67, 0xbef: 0x2b88, + 0xbf0: 0x2a4d, 0xbf1: 0x2a43, 0xbf2: 0x307d, 0xbf3: 0x2b95, 0xbf4: 0x2d7a, 0xbf5: 0x2a57, + 0xbf6: 0x2c93, 0xbf7: 0x2a61, 0xbf8: 0x2baf, 0xbf9: 0x2a6b, 0xbfa: 0x2bbc, 0xbfb: 0x3047, + 0xbfc: 0x2ba2, 0xbfd: 0x2ca3, 0xbfe: 0x2bc9, 0xbff: 0x280e, + // Block 0x30, offset 0xc00 + 0xc00: 0x3058, 0xc01: 0x2a75, 0xc02: 0x2a7f, 0xc03: 0x2bd6, 0xc04: 0x2a89, 0xc05: 0x2a93, + 0xc06: 0x2a9d, 0xc07: 0x2cb3, 0xc08: 0x2be3, 0xc09: 0x2815, 0xc0a: 0x2d8d, 0xc0b: 0x2fd1, + 0xc0c: 0x2cc3, 0xc0d: 0x2bf0, 0xc0e: 0x3006, 0xc0f: 0x2aa7, 0xc10: 0x2ab1, 0xc11: 0x2bfd, + 0xc12: 0x281c, 0xc13: 0x2c0a, 0xc14: 0x2cd3, 0xc15: 0x2823, 0xc16: 0x2da0, 0xc17: 0x2abb, + 0xc18: 0x1de7, 0xc19: 0x1dfb, 0xc1a: 0x1e0a, 0xc1b: 0x1e19, 0xc1c: 0x1e28, 0xc1d: 0x1e37, + 0xc1e: 0x1e46, 0xc1f: 0x1e55, 0xc20: 0x1e64, 0xc21: 0x1e73, 0xc22: 0x22c2, 0xc23: 0x22d4, + 0xc24: 0x22e6, 0xc25: 0x22f2, 0xc26: 0x22fe, 0xc27: 0x230a, 0xc28: 0x2316, 0xc29: 0x2322, + 0xc2a: 0x232e, 0xc2b: 0x233a, 0xc2c: 0x2376, 0xc2d: 0x2382, 0xc2e: 0x238e, 0xc2f: 0x239a, + 0xc30: 0x23a6, 0xc31: 0x1d44, 0xc32: 0x1af6, 0xc33: 0x1a63, 0xc34: 0x1d14, 0xc35: 0x1b77, + 0xc36: 0x1b86, 0xc37: 0x1afc, 0xc38: 0x1d2c, 0xc39: 0x1d30, 0xc3a: 0x1a8d, 0xc3b: 0x2838, + 0xc3c: 0x2846, 0xc3d: 0x2831, 0xc3e: 0x283f, 0xc3f: 0x2c17, + // Block 0x31, offset 0xc40 + 0xc40: 0x1b7a, 0xc41: 0x1b62, 0xc42: 0x1d90, 0xc43: 0x1b4a, 0xc44: 0x1b23, 0xc45: 0x1a96, + 0xc46: 0x1aa5, 0xc47: 0x1a75, 0xc48: 0x1d20, 0xc49: 0x1e82, 0xc4a: 0x1b7d, 0xc4b: 0x1b65, + 0xc4c: 0x1d94, 0xc4d: 0x1da0, 0xc4e: 0x1b56, 0xc4f: 0x1b2c, 0xc50: 0x1a84, 0xc51: 0x1d4c, + 0xc52: 0x1ce0, 0xc53: 0x1ccc, 0xc54: 0x1cfc, 0xc55: 0x1da4, 0xc56: 0x1b59, 0xc57: 0x1af9, + 0xc58: 0x1b2f, 0xc59: 0x1b0e, 0xc5a: 0x1b71, 0xc5b: 0x1da8, 0xc5c: 0x1b5c, 0xc5d: 0x1af0, + 0xc5e: 0x1b32, 0xc5f: 0x1d6c, 0xc60: 0x1d24, 0xc61: 0x1b44, 0xc62: 0x1d54, 0xc63: 0x1d70, + 0xc64: 0x1d28, 0xc65: 0x1b47, 0xc66: 0x1d58, 0xc67: 0x2418, 0xc68: 0x242c, 0xc69: 0x1ac6, + 0xc6a: 0x1d50, 0xc6b: 0x1ce4, 0xc6c: 0x1cd0, 0xc6d: 0x1d78, 0xc6e: 0x284d, 0xc6f: 0x28e4, + 0xc70: 0x1b89, 0xc71: 0x1b74, 0xc72: 0x1dac, 0xc73: 0x1b5f, 0xc74: 0x1b80, 0xc75: 0x1b68, + 0xc76: 0x1d98, 0xc77: 0x1b4d, 0xc78: 0x1b26, 0xc79: 0x1ab1, 0xc7a: 0x1b83, 0xc7b: 0x1b6b, + 0xc7c: 0x1d9c, 0xc7d: 0x1b50, 0xc7e: 0x1b29, 0xc7f: 0x1ab4, + // Block 0x32, offset 0xc80 + 0xc80: 0x1d5c, 0xc81: 0x1ce8, 0xc82: 0x1e7d, 0xc83: 0x1a66, 0xc84: 0x1aea, 0xc85: 0x1aed, + 0xc86: 0x2425, 0xc87: 0x1cc4, 0xc88: 0x1af3, 0xc89: 0x1a78, 0xc8a: 0x1b11, 0xc8b: 0x1a7b, + 0xc8c: 0x1b1a, 0xc8d: 0x1a99, 0xc8e: 0x1a9c, 0xc8f: 0x1b35, 0xc90: 0x1b3b, 0xc91: 0x1b3e, + 0xc92: 0x1d60, 0xc93: 0x1b41, 0xc94: 0x1b53, 0xc95: 0x1d68, 0xc96: 0x1d74, 0xc97: 0x1ac0, + 0xc98: 0x1e87, 0xc99: 0x1cec, 0xc9a: 0x1ac3, 0xc9b: 0x1b8c, 0xc9c: 0x1ad5, 0xc9d: 0x1ae4, + 0xc9e: 0x2412, 0xc9f: 0x240c, 0xca0: 0x1df1, 0xca1: 0x1e00, 0xca2: 0x1e0f, 0xca3: 0x1e1e, + 0xca4: 0x1e2d, 0xca5: 0x1e3c, 0xca6: 0x1e4b, 0xca7: 0x1e5a, 0xca8: 0x1e69, 0xca9: 0x22b6, + 0xcaa: 0x22c8, 0xcab: 0x22da, 0xcac: 0x22ec, 0xcad: 0x22f8, 0xcae: 0x2304, 0xcaf: 0x2310, + 0xcb0: 0x231c, 0xcb1: 0x2328, 0xcb2: 0x2334, 0xcb3: 0x2370, 0xcb4: 0x237c, 0xcb5: 0x2388, + 0xcb6: 0x2394, 0xcb7: 0x23a0, 0xcb8: 0x23ac, 0xcb9: 0x23b2, 0xcba: 0x23b8, 0xcbb: 0x23be, + 0xcbc: 0x23c4, 0xcbd: 0x23d6, 0xcbe: 0x23dc, 0xcbf: 0x1d40, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1472, 0xcc1: 0x0df6, 0xcc2: 0x14ce, 0xcc3: 0x149a, 0xcc4: 0x0f52, 0xcc5: 0x07e6, + 0xcc6: 0x09da, 0xcc7: 0x1726, 0xcc8: 0x1726, 0xcc9: 0x0b06, 0xcca: 0x155a, 0xccb: 0x0a3e, + 0xccc: 0x0b02, 0xccd: 0x0cea, 0xcce: 0x10ca, 0xccf: 0x125a, 0xcd0: 0x1392, 0xcd1: 0x13ce, + 0xcd2: 0x1402, 0xcd3: 0x1516, 0xcd4: 0x0e6e, 0xcd5: 0x0efa, 0xcd6: 0x0fa6, 0xcd7: 0x103e, + 0xcd8: 0x135a, 0xcd9: 0x1542, 0xcda: 0x166e, 0xcdb: 0x080a, 0xcdc: 0x09ae, 0xcdd: 0x0e82, + 0xcde: 0x0fca, 0xcdf: 0x138e, 0xce0: 0x16be, 0xce1: 0x0bae, 0xce2: 0x0f72, 0xce3: 0x137e, + 0xce4: 0x1412, 0xce5: 0x0d1e, 0xce6: 0x12b6, 0xce7: 0x13da, 0xce8: 0x0c1a, 0xce9: 0x0e0a, + 0xcea: 0x0f12, 0xceb: 0x1016, 0xcec: 0x1522, 0xced: 0x084a, 0xcee: 0x08e2, 0xcef: 0x094e, + 0xcf0: 0x0d86, 0xcf1: 0x0e7a, 0xcf2: 0x0fc6, 0xcf3: 0x10ea, 0xcf4: 0x1272, 0xcf5: 0x1386, + 0xcf6: 0x139e, 0xcf7: 0x14c2, 0xcf8: 0x15ea, 0xcf9: 0x169e, 0xcfa: 0x16ba, 0xcfb: 0x1126, + 0xcfc: 0x1166, 0xcfd: 0x121e, 0xcfe: 0x133e, 0xcff: 0x1576, + // Block 0x34, offset 0xd00 + 0xd00: 0x16c6, 0xd01: 0x1446, 0xd02: 0x0ac2, 0xd03: 0x0c36, 0xd04: 0x11d6, 0xd05: 0x1296, + 0xd06: 0x0ffa, 0xd07: 0x112e, 0xd08: 0x1492, 0xd09: 0x15e2, 0xd0a: 0x0abe, 0xd0b: 0x0b8a, + 0xd0c: 0x0e72, 0xd0d: 0x0f26, 0xd0e: 0x0f5a, 0xd0f: 0x120e, 0xd10: 0x1236, 0xd11: 0x15a2, + 0xd12: 0x094a, 0xd13: 0x12a2, 0xd14: 0x08ee, 0xd15: 0x08ea, 0xd16: 0x1192, 0xd17: 0x1222, + 0xd18: 0x1356, 0xd19: 0x15aa, 0xd1a: 0x1462, 0xd1b: 0x0d22, 0xd1c: 0x0e6e, 0xd1d: 0x1452, + 0xd1e: 0x07f2, 0xd1f: 0x0b5e, 0xd20: 0x0c8e, 0xd21: 0x102a, 0xd22: 0x10aa, 0xd23: 0x096e, + 0xd24: 0x1136, 0xd25: 0x085a, 0xd26: 0x0c72, 0xd27: 0x07d2, 0xd28: 0x0ee6, 0xd29: 0x0d9e, + 0xd2a: 0x120a, 0xd2b: 0x09c2, 0xd2c: 0x0aae, 0xd2d: 0x10f6, 0xd2e: 0x135e, 0xd2f: 0x1436, + 0xd30: 0x0eb2, 0xd31: 0x14f2, 0xd32: 0x0ede, 0xd33: 0x0d32, 0xd34: 0x1316, 0xd35: 0x0d52, + 0xd36: 0x10a6, 0xd37: 0x0826, 0xd38: 0x08a2, 0xd39: 0x08e6, 0xd3a: 0x0e4e, 0xd3b: 0x11f6, + 0xd3c: 0x12ee, 0xd3d: 0x1442, 0xd3e: 0x1556, 0xd3f: 0x0956, + // Block 0x35, offset 0xd40 + 0xd40: 0x0a0a, 0xd41: 0x0b12, 0xd42: 0x0c2a, 0xd43: 0x0dba, 0xd44: 0x0f76, 0xd45: 0x113a, + 0xd46: 0x1592, 0xd47: 0x1676, 0xd48: 0x16ca, 0xd49: 0x16e2, 0xd4a: 0x0932, 0xd4b: 0x0dee, + 0xd4c: 0x0e9e, 0xd4d: 0x14e6, 0xd4e: 0x0bf6, 0xd4f: 0x0cd2, 0xd50: 0x0cee, 0xd51: 0x0d7e, + 0xd52: 0x0f66, 0xd53: 0x0fb2, 0xd54: 0x1062, 0xd55: 0x1186, 0xd56: 0x122a, 0xd57: 0x128e, + 0xd58: 0x14d6, 0xd59: 0x1366, 0xd5a: 0x14fe, 0xd5b: 0x157a, 0xd5c: 0x090a, 0xd5d: 0x0936, + 0xd5e: 0x0a1e, 0xd5f: 0x0fa2, 0xd60: 0x13ee, 0xd61: 0x1436, 0xd62: 0x0c16, 0xd63: 0x0c86, + 0xd64: 0x0d4a, 0xd65: 0x0eaa, 0xd66: 0x11d2, 0xd67: 0x101e, 0xd68: 0x0836, 0xd69: 0x0a7a, + 0xd6a: 0x0b5e, 0xd6b: 0x0bc2, 0xd6c: 0x0c92, 0xd6d: 0x103a, 0xd6e: 0x1056, 0xd6f: 0x1266, + 0xd70: 0x1286, 0xd71: 0x155e, 0xd72: 0x15de, 0xd73: 0x15ee, 0xd74: 0x162a, 0xd75: 0x084e, + 0xd76: 0x117a, 0xd77: 0x154a, 0xd78: 0x15c6, 0xd79: 0x0caa, 0xd7a: 0x0812, 0xd7b: 0x0872, + 0xd7c: 0x0b62, 0xd7d: 0x0b82, 0xd7e: 0x0daa, 0xd7f: 0x0e6e, + // Block 0x36, offset 0xd80 + 0xd80: 0x0fbe, 0xd81: 0x10c6, 0xd82: 0x1372, 0xd83: 0x1512, 0xd84: 0x171e, 0xd85: 0x0dde, + 0xd86: 0x159e, 0xd87: 0x092e, 0xd88: 0x0e2a, 0xd89: 0x0e36, 0xd8a: 0x0f0a, 0xd8b: 0x0f42, + 0xd8c: 0x1046, 0xd8d: 0x10a2, 0xd8e: 0x1122, 0xd8f: 0x1206, 0xd90: 0x1636, 0xd91: 0x08aa, + 0xd92: 0x0cfe, 0xd93: 0x15ae, 0xd94: 0x0862, 0xd95: 0x0ba6, 0xd96: 0x0f2a, 0xd97: 0x14da, + 0xd98: 0x0c62, 0xd99: 0x0cb2, 0xd9a: 0x0e3e, 0xd9b: 0x102a, 0xd9c: 0x15b6, 0xd9d: 0x0912, + 0xd9e: 0x09fa, 0xd9f: 0x0b92, 0xda0: 0x0dce, 0xda1: 0x0e1a, 0xda2: 0x0e5a, 0xda3: 0x0eee, + 0xda4: 0x1042, 0xda5: 0x10b6, 0xda6: 0x1252, 0xda7: 0x13f2, 0xda8: 0x13fe, 0xda9: 0x1552, + 0xdaa: 0x15d2, 0xdab: 0x097e, 0xdac: 0x0f46, 0xdad: 0x09fe, 0xdae: 0x0fc2, 0xdaf: 0x1066, + 0xdb0: 0x1382, 0xdb1: 0x15ba, 0xdb2: 0x16a6, 0xdb3: 0x16ce, 0xdb4: 0x0e32, 0xdb5: 0x0f22, + 0xdb6: 0x12be, 0xdb7: 0x11b2, 0xdb8: 0x11be, 0xdb9: 0x11e2, 0xdba: 0x1012, 0xdbb: 0x0f9a, + 0xdbc: 0x145e, 0xdbd: 0x082e, 0xdbe: 0x1326, 0xdbf: 0x0916, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0906, 0xdc1: 0x0c06, 0xdc2: 0x0d26, 0xdc3: 0x11ee, 0xdc4: 0x0b4e, 0xdc5: 0x0efe, + 0xdc6: 0x0dea, 0xdc7: 0x14e2, 0xdc8: 0x13e2, 0xdc9: 0x15a6, 0xdca: 0x141e, 0xdcb: 0x0c22, + 0xdcc: 0x0882, 0xdcd: 0x0a56, 0xdd0: 0x0aaa, + 0xdd2: 0x0dda, 0xdd5: 0x08f2, 0xdd6: 0x101a, 0xdd7: 0x10de, + 0xdd8: 0x1142, 0xdd9: 0x115e, 0xdda: 0x1162, 0xddb: 0x1176, 0xddc: 0x15f6, 0xddd: 0x11e6, + 0xdde: 0x126a, 0xde0: 0x138a, 0xde2: 0x144e, + 0xde5: 0x1502, 0xde6: 0x152e, + 0xdea: 0x164a, 0xdeb: 0x164e, 0xdec: 0x1652, 0xded: 0x16b6, 0xdee: 0x1526, 0xdef: 0x15c2, + 0xdf0: 0x0852, 0xdf1: 0x0876, 0xdf2: 0x088a, 0xdf3: 0x0946, 0xdf4: 0x0952, 0xdf5: 0x0992, + 0xdf6: 0x0a46, 0xdf7: 0x0a62, 0xdf8: 0x0a6a, 0xdf9: 0x0aa6, 0xdfa: 0x0ab2, 0xdfb: 0x0b8e, + 0xdfc: 0x0b96, 0xdfd: 0x0c9e, 0xdfe: 0x0cc6, 0xdff: 0x0cce, + // Block 0x38, offset 0xe00 + 0xe00: 0x0ce6, 0xe01: 0x0d92, 0xe02: 0x0dc2, 0xe03: 0x0de2, 0xe04: 0x0e52, 0xe05: 0x0f16, + 0xe06: 0x0f32, 0xe07: 0x0f62, 0xe08: 0x0fb6, 0xe09: 0x0fd6, 0xe0a: 0x104a, 0xe0b: 0x112a, + 0xe0c: 0x1146, 0xe0d: 0x114e, 0xe0e: 0x114a, 0xe0f: 0x1152, 0xe10: 0x1156, 0xe11: 0x115a, + 0xe12: 0x116e, 0xe13: 0x1172, 0xe14: 0x1196, 0xe15: 0x11aa, 0xe16: 0x11c6, 0xe17: 0x122a, + 0xe18: 0x1232, 0xe19: 0x123a, 0xe1a: 0x124e, 0xe1b: 0x1276, 0xe1c: 0x12c6, 0xe1d: 0x12fa, + 0xe1e: 0x12fa, 0xe1f: 0x1362, 0xe20: 0x140a, 0xe21: 0x1422, 0xe22: 0x1456, 0xe23: 0x145a, + 0xe24: 0x149e, 0xe25: 0x14a2, 0xe26: 0x14fa, 0xe27: 0x1502, 0xe28: 0x15d6, 0xe29: 0x161a, + 0xe2a: 0x1632, 0xe2b: 0x0c96, 0xe2c: 0x184b, 0xe2d: 0x12de, + 0xe30: 0x07da, 0xe31: 0x08de, 0xe32: 0x089e, 0xe33: 0x0846, 0xe34: 0x0886, 0xe35: 0x08b2, + 0xe36: 0x0942, 0xe37: 0x095e, 0xe38: 0x0a46, 0xe39: 0x0a32, 0xe3a: 0x0a42, 0xe3b: 0x0a5e, + 0xe3c: 0x0aaa, 0xe3d: 0x0aba, 0xe3e: 0x0afe, 0xe3f: 0x0b0a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0b26, 0xe41: 0x0b36, 0xe42: 0x0c1e, 0xe43: 0x0c26, 0xe44: 0x0c56, 0xe45: 0x0c76, + 0xe46: 0x0ca6, 0xe47: 0x0cbe, 0xe48: 0x0cae, 0xe49: 0x0cce, 0xe4a: 0x0cc2, 0xe4b: 0x0ce6, + 0xe4c: 0x0d02, 0xe4d: 0x0d5a, 0xe4e: 0x0d66, 0xe4f: 0x0d6e, 0xe50: 0x0d96, 0xe51: 0x0dda, + 0xe52: 0x0e0a, 0xe53: 0x0e0e, 0xe54: 0x0e22, 0xe55: 0x0ea2, 0xe56: 0x0eb2, 0xe57: 0x0f0a, + 0xe58: 0x0f56, 0xe59: 0x0f4e, 0xe5a: 0x0f62, 0xe5b: 0x0f7e, 0xe5c: 0x0fb6, 0xe5d: 0x110e, + 0xe5e: 0x0fda, 0xe5f: 0x100e, 0xe60: 0x101a, 0xe61: 0x105a, 0xe62: 0x1076, 0xe63: 0x109a, + 0xe64: 0x10be, 0xe65: 0x10c2, 0xe66: 0x10de, 0xe67: 0x10e2, 0xe68: 0x10f2, 0xe69: 0x1106, + 0xe6a: 0x1102, 0xe6b: 0x1132, 0xe6c: 0x11ae, 0xe6d: 0x11c6, 0xe6e: 0x11de, 0xe6f: 0x1216, + 0xe70: 0x122a, 0xe71: 0x1246, 0xe72: 0x1276, 0xe73: 0x132a, 0xe74: 0x1352, 0xe75: 0x13c6, + 0xe76: 0x140e, 0xe77: 0x141a, 0xe78: 0x1422, 0xe79: 0x143a, 0xe7a: 0x144e, 0xe7b: 0x143e, + 0xe7c: 0x1456, 0xe7d: 0x1452, 0xe7e: 0x144a, 0xe7f: 0x145a, + // Block 0x3a, offset 0xe80 + 0xe80: 0x1466, 0xe81: 0x14a2, 0xe82: 0x14de, 0xe83: 0x150e, 0xe84: 0x1546, 0xe85: 0x1566, + 0xe86: 0x15b2, 0xe87: 0x15d6, 0xe88: 0x15f6, 0xe89: 0x160a, 0xe8a: 0x161a, 0xe8b: 0x1626, + 0xe8c: 0x1632, 0xe8d: 0x1686, 0xe8e: 0x1726, 0xe8f: 0x17e2, 0xe90: 0x17dd, 0xe91: 0x180f, + 0xe92: 0x0702, 0xe93: 0x072a, 0xe94: 0x072e, 0xe95: 0x1891, 0xe96: 0x18be, 0xe97: 0x1936, + 0xe98: 0x1712, 0xe99: 0x1722, + // Block 0x3b, offset 0xec0 + 0xec0: 0x1b05, 0xec1: 0x1b08, 0xec2: 0x1b0b, 0xec3: 0x1d38, 0xec4: 0x1d3c, 0xec5: 0x1b8f, + 0xec6: 0x1b8f, + 0xed3: 0x1ea5, 0xed4: 0x1e96, 0xed5: 0x1e9b, 0xed6: 0x1eaa, 0xed7: 0x1ea0, + 0xedd: 0x44d1, + 0xede: 0x8116, 0xedf: 0x4543, 0xee0: 0x0320, 0xee1: 0x0308, 0xee2: 0x0311, 0xee3: 0x0314, + 0xee4: 0x0317, 0xee5: 0x031a, 0xee6: 0x031d, 0xee7: 0x0323, 0xee8: 0x0326, 0xee9: 0x0017, + 0xeea: 0x4531, 0xeeb: 0x4537, 0xeec: 0x4635, 0xeed: 0x463d, 0xeee: 0x4489, 0xeef: 0x448f, + 0xef0: 0x4495, 0xef1: 0x449b, 0xef2: 0x44a7, 0xef3: 0x44ad, 0xef4: 0x44b3, 0xef5: 0x44bf, + 0xef6: 0x44c5, 0xef8: 0x44cb, 0xef9: 0x44d7, 0xefa: 0x44dd, 0xefb: 0x44e3, + 0xefc: 0x44ef, 0xefe: 0x44f5, + // Block 0x3c, offset 0xf00 + 0xf00: 0x44fb, 0xf01: 0x4501, 0xf03: 0x4507, 0xf04: 0x450d, + 0xf06: 0x4519, 0xf07: 0x451f, 0xf08: 0x4525, 0xf09: 0x452b, 0xf0a: 0x453d, 0xf0b: 0x44b9, + 0xf0c: 0x44a1, 0xf0d: 0x44e9, 0xf0e: 0x4513, 0xf0f: 0x1eaf, 0xf10: 0x038c, 0xf11: 0x038c, + 0xf12: 0x0395, 0xf13: 0x0395, 0xf14: 0x0395, 0xf15: 0x0395, 0xf16: 0x0398, 0xf17: 0x0398, + 0xf18: 0x0398, 0xf19: 0x0398, 0xf1a: 0x039e, 0xf1b: 0x039e, 0xf1c: 0x039e, 0xf1d: 0x039e, + 0xf1e: 0x0392, 0xf1f: 0x0392, 0xf20: 0x0392, 0xf21: 0x0392, 0xf22: 0x039b, 0xf23: 0x039b, + 0xf24: 0x039b, 0xf25: 0x039b, 0xf26: 0x038f, 0xf27: 0x038f, 0xf28: 0x038f, 0xf29: 0x038f, + 0xf2a: 0x03c2, 0xf2b: 0x03c2, 0xf2c: 0x03c2, 0xf2d: 0x03c2, 0xf2e: 0x03c5, 0xf2f: 0x03c5, + 0xf30: 0x03c5, 0xf31: 0x03c5, 0xf32: 0x03a4, 0xf33: 0x03a4, 0xf34: 0x03a4, 0xf35: 0x03a4, + 0xf36: 0x03a1, 0xf37: 0x03a1, 0xf38: 0x03a1, 0xf39: 0x03a1, 0xf3a: 0x03a7, 0xf3b: 0x03a7, + 0xf3c: 0x03a7, 0xf3d: 0x03a7, 0xf3e: 0x03aa, 0xf3f: 0x03aa, + // Block 0x3d, offset 0xf40 + 0xf40: 0x03aa, 0xf41: 0x03aa, 0xf42: 0x03b3, 0xf43: 0x03b3, 0xf44: 0x03b0, 0xf45: 0x03b0, + 0xf46: 0x03b6, 0xf47: 0x03b6, 0xf48: 0x03ad, 0xf49: 0x03ad, 0xf4a: 0x03bc, 0xf4b: 0x03bc, + 0xf4c: 0x03b9, 0xf4d: 0x03b9, 0xf4e: 0x03c8, 0xf4f: 0x03c8, 0xf50: 0x03c8, 0xf51: 0x03c8, + 0xf52: 0x03ce, 0xf53: 0x03ce, 0xf54: 0x03ce, 0xf55: 0x03ce, 0xf56: 0x03d4, 0xf57: 0x03d4, + 0xf58: 0x03d4, 0xf59: 0x03d4, 0xf5a: 0x03d1, 0xf5b: 0x03d1, 0xf5c: 0x03d1, 0xf5d: 0x03d1, + 0xf5e: 0x03d7, 0xf5f: 0x03d7, 0xf60: 0x03da, 0xf61: 0x03da, 0xf62: 0x03da, 0xf63: 0x03da, + 0xf64: 0x45af, 0xf65: 0x45af, 0xf66: 0x03e0, 0xf67: 0x03e0, 0xf68: 0x03e0, 0xf69: 0x03e0, + 0xf6a: 0x03dd, 0xf6b: 0x03dd, 0xf6c: 0x03dd, 0xf6d: 0x03dd, 0xf6e: 0x03fb, 0xf6f: 0x03fb, + 0xf70: 0x45a9, 0xf71: 0x45a9, + // Block 0x3e, offset 0xf80 + 0xf93: 0x03cb, 0xf94: 0x03cb, 0xf95: 0x03cb, 0xf96: 0x03cb, 0xf97: 0x03e9, + 0xf98: 0x03e9, 0xf99: 0x03e6, 0xf9a: 0x03e6, 0xf9b: 0x03ec, 0xf9c: 0x03ec, 0xf9d: 0x217f, + 0xf9e: 0x03f2, 0xf9f: 0x03f2, 0xfa0: 0x03e3, 0xfa1: 0x03e3, 0xfa2: 0x03ef, 0xfa3: 0x03ef, + 0xfa4: 0x03f8, 0xfa5: 0x03f8, 0xfa6: 0x03f8, 0xfa7: 0x03f8, 0xfa8: 0x0380, 0xfa9: 0x0380, + 0xfaa: 0x26da, 0xfab: 0x26da, 0xfac: 0x274a, 0xfad: 0x274a, 0xfae: 0x2719, 0xfaf: 0x2719, + 0xfb0: 0x2735, 0xfb1: 0x2735, 0xfb2: 0x272e, 0xfb3: 0x272e, 0xfb4: 0x273c, 0xfb5: 0x273c, + 0xfb6: 0x2743, 0xfb7: 0x2743, 0xfb8: 0x2743, 0xfb9: 0x2720, 0xfba: 0x2720, 0xfbb: 0x2720, + 0xfbc: 0x03f5, 0xfbd: 0x03f5, 0xfbe: 0x03f5, 0xfbf: 0x03f5, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x26e1, 0xfc1: 0x26e8, 0xfc2: 0x2704, 0xfc3: 0x2720, 0xfc4: 0x2727, 0xfc5: 0x1eb9, + 0xfc6: 0x1ebe, 0xfc7: 0x1ec3, 0xfc8: 0x1ed2, 0xfc9: 0x1ee1, 0xfca: 0x1ee6, 0xfcb: 0x1eeb, + 0xfcc: 0x1ef0, 0xfcd: 0x1ef5, 0xfce: 0x1f04, 0xfcf: 0x1f13, 0xfd0: 0x1f18, 0xfd1: 0x1f1d, + 0xfd2: 0x1f2c, 0xfd3: 0x1f3b, 0xfd4: 0x1f40, 0xfd5: 0x1f45, 0xfd6: 0x1f4a, 0xfd7: 0x1f59, + 0xfd8: 0x1f5e, 0xfd9: 0x1f6d, 0xfda: 0x1f72, 0xfdb: 0x1f77, 0xfdc: 0x1f86, 0xfdd: 0x1f8b, + 0xfde: 0x1f90, 0xfdf: 0x1f9a, 0xfe0: 0x1fd6, 0xfe1: 0x1fe5, 0xfe2: 0x1ff4, 0xfe3: 0x1ff9, + 0xfe4: 0x1ffe, 0xfe5: 0x2008, 0xfe6: 0x2017, 0xfe7: 0x201c, 0xfe8: 0x202b, 0xfe9: 0x2030, + 0xfea: 0x2035, 0xfeb: 0x2044, 0xfec: 0x2049, 0xfed: 0x2058, 0xfee: 0x205d, 0xfef: 0x2062, + 0xff0: 0x2067, 0xff1: 0x206c, 0xff2: 0x2071, 0xff3: 0x2076, 0xff4: 0x207b, 0xff5: 0x2080, + 0xff6: 0x2085, 0xff7: 0x208a, 0xff8: 0x208f, 0xff9: 0x2094, 0xffa: 0x2099, 0xffb: 0x209e, + 0xffc: 0x20a3, 0xffd: 0x20a8, 0xffe: 0x20ad, 0xfff: 0x20b7, + // Block 0x40, offset 0x1000 + 0x1000: 0x20bc, 0x1001: 0x20c1, 0x1002: 0x20c6, 0x1003: 0x20d0, 0x1004: 0x20d5, 0x1005: 0x20df, + 0x1006: 0x20e4, 0x1007: 0x20e9, 0x1008: 0x20ee, 0x1009: 0x20f3, 0x100a: 0x20f8, 0x100b: 0x20fd, + 0x100c: 0x2102, 0x100d: 0x2107, 0x100e: 0x2116, 0x100f: 0x2125, 0x1010: 0x212a, 0x1011: 0x212f, + 0x1012: 0x2134, 0x1013: 0x2139, 0x1014: 0x213e, 0x1015: 0x2148, 0x1016: 0x214d, 0x1017: 0x2152, + 0x1018: 0x2161, 0x1019: 0x2170, 0x101a: 0x2175, 0x101b: 0x4561, 0x101c: 0x4567, 0x101d: 0x459d, + 0x101e: 0x45f4, 0x101f: 0x45fb, 0x1020: 0x4602, 0x1021: 0x4609, 0x1022: 0x4610, 0x1023: 0x4617, + 0x1024: 0x26f6, 0x1025: 0x26fd, 0x1026: 0x2704, 0x1027: 0x270b, 0x1028: 0x2720, 0x1029: 0x2727, + 0x102a: 0x1ec8, 0x102b: 0x1ecd, 0x102c: 0x1ed2, 0x102d: 0x1ed7, 0x102e: 0x1ee1, 0x102f: 0x1ee6, + 0x1030: 0x1efa, 0x1031: 0x1eff, 0x1032: 0x1f04, 0x1033: 0x1f09, 0x1034: 0x1f13, 0x1035: 0x1f18, + 0x1036: 0x1f22, 0x1037: 0x1f27, 0x1038: 0x1f2c, 0x1039: 0x1f31, 0x103a: 0x1f3b, 0x103b: 0x1f40, + 0x103c: 0x206c, 0x103d: 0x2071, 0x103e: 0x2080, 0x103f: 0x2085, + // Block 0x41, offset 0x1040 + 0x1040: 0x208a, 0x1041: 0x209e, 0x1042: 0x20a3, 0x1043: 0x20a8, 0x1044: 0x20ad, 0x1045: 0x20c6, + 0x1046: 0x20d0, 0x1047: 0x20d5, 0x1048: 0x20da, 0x1049: 0x20ee, 0x104a: 0x210c, 0x104b: 0x2111, + 0x104c: 0x2116, 0x104d: 0x211b, 0x104e: 0x2125, 0x104f: 0x212a, 0x1050: 0x459d, 0x1051: 0x2157, + 0x1052: 0x215c, 0x1053: 0x2161, 0x1054: 0x2166, 0x1055: 0x2170, 0x1056: 0x2175, 0x1057: 0x26e1, + 0x1058: 0x26e8, 0x1059: 0x26ef, 0x105a: 0x2704, 0x105b: 0x2712, 0x105c: 0x1eb9, 0x105d: 0x1ebe, + 0x105e: 0x1ec3, 0x105f: 0x1ed2, 0x1060: 0x1edc, 0x1061: 0x1eeb, 0x1062: 0x1ef0, 0x1063: 0x1ef5, + 0x1064: 0x1f04, 0x1065: 0x1f0e, 0x1066: 0x1f2c, 0x1067: 0x1f45, 0x1068: 0x1f4a, 0x1069: 0x1f59, + 0x106a: 0x1f5e, 0x106b: 0x1f6d, 0x106c: 0x1f77, 0x106d: 0x1f86, 0x106e: 0x1f8b, 0x106f: 0x1f90, + 0x1070: 0x1f9a, 0x1071: 0x1fd6, 0x1072: 0x1fdb, 0x1073: 0x1fe5, 0x1074: 0x1ff4, 0x1075: 0x1ff9, + 0x1076: 0x1ffe, 0x1077: 0x2008, 0x1078: 0x2017, 0x1079: 0x202b, 0x107a: 0x2030, 0x107b: 0x2035, + 0x107c: 0x2044, 0x107d: 0x2049, 0x107e: 0x2058, 0x107f: 0x205d, + // Block 0x42, offset 0x1080 + 0x1080: 0x2062, 0x1081: 0x2067, 0x1082: 0x2076, 0x1083: 0x207b, 0x1084: 0x208f, 0x1085: 0x2094, + 0x1086: 0x2099, 0x1087: 0x209e, 0x1088: 0x20a3, 0x1089: 0x20b7, 0x108a: 0x20bc, 0x108b: 0x20c1, + 0x108c: 0x20c6, 0x108d: 0x20cb, 0x108e: 0x20df, 0x108f: 0x20e4, 0x1090: 0x20e9, 0x1091: 0x20ee, + 0x1092: 0x20fd, 0x1093: 0x2102, 0x1094: 0x2107, 0x1095: 0x2116, 0x1096: 0x2120, 0x1097: 0x212f, + 0x1098: 0x2134, 0x1099: 0x4591, 0x109a: 0x2148, 0x109b: 0x214d, 0x109c: 0x2152, 0x109d: 0x2161, + 0x109e: 0x216b, 0x109f: 0x2704, 0x10a0: 0x2712, 0x10a1: 0x1ed2, 0x10a2: 0x1edc, 0x10a3: 0x1f04, + 0x10a4: 0x1f0e, 0x10a5: 0x1f2c, 0x10a6: 0x1f36, 0x10a7: 0x1f9a, 0x10a8: 0x1f9f, 0x10a9: 0x1fc2, + 0x10aa: 0x1fc7, 0x10ab: 0x209e, 0x10ac: 0x20a3, 0x10ad: 0x20c6, 0x10ae: 0x2116, 0x10af: 0x2120, + 0x10b0: 0x2161, 0x10b1: 0x216b, 0x10b2: 0x4645, 0x10b3: 0x464d, 0x10b4: 0x4655, 0x10b5: 0x2021, + 0x10b6: 0x2026, 0x10b7: 0x203a, 0x10b8: 0x203f, 0x10b9: 0x204e, 0x10ba: 0x2053, 0x10bb: 0x1fa4, + 0x10bc: 0x1fa9, 0x10bd: 0x1fcc, 0x10be: 0x1fd1, 0x10bf: 0x1f63, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1f68, 0x10c1: 0x1f4f, 0x10c2: 0x1f54, 0x10c3: 0x1f7c, 0x10c4: 0x1f81, 0x10c5: 0x1fea, + 0x10c6: 0x1fef, 0x10c7: 0x200d, 0x10c8: 0x2012, 0x10c9: 0x1fae, 0x10ca: 0x1fb3, 0x10cb: 0x1fb8, + 0x10cc: 0x1fc2, 0x10cd: 0x1fbd, 0x10ce: 0x1f95, 0x10cf: 0x1fe0, 0x10d0: 0x2003, 0x10d1: 0x2021, + 0x10d2: 0x2026, 0x10d3: 0x203a, 0x10d4: 0x203f, 0x10d5: 0x204e, 0x10d6: 0x2053, 0x10d7: 0x1fa4, + 0x10d8: 0x1fa9, 0x10d9: 0x1fcc, 0x10da: 0x1fd1, 0x10db: 0x1f63, 0x10dc: 0x1f68, 0x10dd: 0x1f4f, + 0x10de: 0x1f54, 0x10df: 0x1f7c, 0x10e0: 0x1f81, 0x10e1: 0x1fea, 0x10e2: 0x1fef, 0x10e3: 0x200d, + 0x10e4: 0x2012, 0x10e5: 0x1fae, 0x10e6: 0x1fb3, 0x10e7: 0x1fb8, 0x10e8: 0x1fc2, 0x10e9: 0x1fbd, + 0x10ea: 0x1f95, 0x10eb: 0x1fe0, 0x10ec: 0x2003, 0x10ed: 0x1fae, 0x10ee: 0x1fb3, 0x10ef: 0x1fb8, + 0x10f0: 0x1fc2, 0x10f1: 0x1f9f, 0x10f2: 0x1fc7, 0x10f3: 0x201c, 0x10f4: 0x1f86, 0x10f5: 0x1f8b, + 0x10f6: 0x1f90, 0x10f7: 0x1fae, 0x10f8: 0x1fb3, 0x10f9: 0x1fb8, 0x10fa: 0x201c, 0x10fb: 0x202b, + 0x10fc: 0x4549, 0x10fd: 0x4549, + // Block 0x44, offset 0x1100 + 0x1110: 0x2441, 0x1111: 0x2456, + 0x1112: 0x2456, 0x1113: 0x245d, 0x1114: 0x2464, 0x1115: 0x2479, 0x1116: 0x2480, 0x1117: 0x2487, + 0x1118: 0x24aa, 0x1119: 0x24aa, 0x111a: 0x24cd, 0x111b: 0x24c6, 0x111c: 0x24e2, 0x111d: 0x24d4, + 0x111e: 0x24db, 0x111f: 0x24fe, 0x1120: 0x24fe, 0x1121: 0x24f7, 0x1122: 0x2505, 0x1123: 0x2505, + 0x1124: 0x252f, 0x1125: 0x252f, 0x1126: 0x254b, 0x1127: 0x2513, 0x1128: 0x2513, 0x1129: 0x250c, + 0x112a: 0x2521, 0x112b: 0x2521, 0x112c: 0x2528, 0x112d: 0x2528, 0x112e: 0x2552, 0x112f: 0x2560, + 0x1130: 0x2560, 0x1131: 0x2567, 0x1132: 0x2567, 0x1133: 0x256e, 0x1134: 0x2575, 0x1135: 0x257c, + 0x1136: 0x2583, 0x1137: 0x2583, 0x1138: 0x258a, 0x1139: 0x2598, 0x113a: 0x25a6, 0x113b: 0x259f, + 0x113c: 0x25ad, 0x113d: 0x25ad, 0x113e: 0x25c2, 0x113f: 0x25c9, + // Block 0x45, offset 0x1140 + 0x1140: 0x25fa, 0x1141: 0x2608, 0x1142: 0x2601, 0x1143: 0x25e5, 0x1144: 0x25e5, 0x1145: 0x260f, + 0x1146: 0x260f, 0x1147: 0x2616, 0x1148: 0x2616, 0x1149: 0x2640, 0x114a: 0x2647, 0x114b: 0x264e, + 0x114c: 0x2624, 0x114d: 0x2632, 0x114e: 0x2655, 0x114f: 0x265c, + 0x1152: 0x262b, 0x1153: 0x26b0, 0x1154: 0x26b7, 0x1155: 0x268d, 0x1156: 0x2694, 0x1157: 0x2678, + 0x1158: 0x2678, 0x1159: 0x267f, 0x115a: 0x26a9, 0x115b: 0x26a2, 0x115c: 0x26cc, 0x115d: 0x26cc, + 0x115e: 0x243a, 0x115f: 0x244f, 0x1160: 0x2448, 0x1161: 0x2472, 0x1162: 0x246b, 0x1163: 0x2495, + 0x1164: 0x248e, 0x1165: 0x24b8, 0x1166: 0x249c, 0x1167: 0x24b1, 0x1168: 0x24e9, 0x1169: 0x2536, + 0x116a: 0x251a, 0x116b: 0x2559, 0x116c: 0x25f3, 0x116d: 0x261d, 0x116e: 0x26c5, 0x116f: 0x26be, + 0x1170: 0x26d3, 0x1171: 0x266a, 0x1172: 0x25d0, 0x1173: 0x269b, 0x1174: 0x25c2, 0x1175: 0x25fa, + 0x1176: 0x2591, 0x1177: 0x25de, 0x1178: 0x2671, 0x1179: 0x2663, 0x117a: 0x25ec, 0x117b: 0x25d7, + 0x117c: 0x25ec, 0x117d: 0x2671, 0x117e: 0x24a3, 0x117f: 0x24bf, + // Block 0x46, offset 0x1180 + 0x1180: 0x2639, 0x1181: 0x25b4, 0x1182: 0x2433, 0x1183: 0x25d7, 0x1184: 0x257c, 0x1185: 0x254b, + 0x1186: 0x24f0, 0x1187: 0x2686, + 0x11b0: 0x2544, 0x11b1: 0x25bb, 0x11b2: 0x28f6, 0x11b3: 0x28ed, 0x11b4: 0x2923, 0x11b5: 0x2911, + 0x11b6: 0x28ff, 0x11b7: 0x291a, 0x11b8: 0x292c, 0x11b9: 0x253d, 0x11ba: 0x2db3, 0x11bb: 0x2c33, + 0x11bc: 0x2908, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x057e, + 0x11d2: 0x0582, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x05ba, + 0x11d8: 0x05be, 0x11d9: 0x1c8c, + 0x11e0: 0x8133, 0x11e1: 0x8133, 0x11e2: 0x8133, 0x11e3: 0x8133, + 0x11e4: 0x8133, 0x11e5: 0x8133, 0x11e6: 0x8133, 0x11e7: 0x812e, 0x11e8: 0x812e, 0x11e9: 0x812e, + 0x11ea: 0x812e, 0x11eb: 0x812e, 0x11ec: 0x812e, 0x11ed: 0x812e, 0x11ee: 0x8133, 0x11ef: 0x8133, + 0x11f0: 0x19a0, 0x11f1: 0x053a, 0x11f2: 0x0536, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x05b2, 0x11fa: 0x05b6, 0x11fb: 0x05a6, + 0x11fc: 0x05aa, 0x11fd: 0x058e, 0x11fe: 0x0592, 0x11ff: 0x0586, + // Block 0x48, offset 0x1200 + 0x1200: 0x058a, 0x1201: 0x0596, 0x1202: 0x059a, 0x1203: 0x059e, 0x1204: 0x05a2, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x43aa, 0x120a: 0x43aa, 0x120b: 0x43aa, + 0x120c: 0x43aa, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x057e, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x053a, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x05b2, + 0x121e: 0x05b6, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x43eb, 0x1231: 0x456d, 0x1232: 0x43f0, 0x1234: 0x43f5, + 0x1236: 0x43fa, 0x1237: 0x4573, 0x1238: 0x43ff, 0x1239: 0x4579, 0x123a: 0x4404, 0x123b: 0x457f, + 0x123c: 0x4409, 0x123d: 0x4585, 0x123e: 0x440e, 0x123f: 0x458b, + // Block 0x49, offset 0x1240 + 0x1240: 0x0329, 0x1241: 0x454f, 0x1242: 0x454f, 0x1243: 0x4555, 0x1244: 0x4555, 0x1245: 0x4597, + 0x1246: 0x4597, 0x1247: 0x455b, 0x1248: 0x455b, 0x1249: 0x45a3, 0x124a: 0x45a3, 0x124b: 0x45a3, + 0x124c: 0x45a3, 0x124d: 0x032c, 0x124e: 0x032c, 0x124f: 0x032f, 0x1250: 0x032f, 0x1251: 0x032f, + 0x1252: 0x032f, 0x1253: 0x0332, 0x1254: 0x0332, 0x1255: 0x0335, 0x1256: 0x0335, 0x1257: 0x0335, + 0x1258: 0x0335, 0x1259: 0x0338, 0x125a: 0x0338, 0x125b: 0x0338, 0x125c: 0x0338, 0x125d: 0x033b, + 0x125e: 0x033b, 0x125f: 0x033b, 0x1260: 0x033b, 0x1261: 0x033e, 0x1262: 0x033e, 0x1263: 0x033e, + 0x1264: 0x033e, 0x1265: 0x0341, 0x1266: 0x0341, 0x1267: 0x0341, 0x1268: 0x0341, 0x1269: 0x0344, + 0x126a: 0x0344, 0x126b: 0x0347, 0x126c: 0x0347, 0x126d: 0x034a, 0x126e: 0x034a, 0x126f: 0x034d, + 0x1270: 0x034d, 0x1271: 0x0350, 0x1272: 0x0350, 0x1273: 0x0350, 0x1274: 0x0350, 0x1275: 0x0353, + 0x1276: 0x0353, 0x1277: 0x0353, 0x1278: 0x0353, 0x1279: 0x0356, 0x127a: 0x0356, 0x127b: 0x0356, + 0x127c: 0x0356, 0x127d: 0x0359, 0x127e: 0x0359, 0x127f: 0x0359, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0359, 0x1281: 0x035c, 0x1282: 0x035c, 0x1283: 0x035c, 0x1284: 0x035c, 0x1285: 0x035f, + 0x1286: 0x035f, 0x1287: 0x035f, 0x1288: 0x035f, 0x1289: 0x0362, 0x128a: 0x0362, 0x128b: 0x0362, + 0x128c: 0x0362, 0x128d: 0x0365, 0x128e: 0x0365, 0x128f: 0x0365, 0x1290: 0x0365, 0x1291: 0x0368, + 0x1292: 0x0368, 0x1293: 0x0368, 0x1294: 0x0368, 0x1295: 0x036b, 0x1296: 0x036b, 0x1297: 0x036b, + 0x1298: 0x036b, 0x1299: 0x036e, 0x129a: 0x036e, 0x129b: 0x036e, 0x129c: 0x036e, 0x129d: 0x0371, + 0x129e: 0x0371, 0x129f: 0x0371, 0x12a0: 0x0371, 0x12a1: 0x0374, 0x12a2: 0x0374, 0x12a3: 0x0374, + 0x12a4: 0x0374, 0x12a5: 0x0377, 0x12a6: 0x0377, 0x12a7: 0x0377, 0x12a8: 0x0377, 0x12a9: 0x037a, + 0x12aa: 0x037a, 0x12ab: 0x037a, 0x12ac: 0x037a, 0x12ad: 0x037d, 0x12ae: 0x037d, 0x12af: 0x0380, + 0x12b0: 0x0380, 0x12b1: 0x0383, 0x12b2: 0x0383, 0x12b3: 0x0383, 0x12b4: 0x0383, 0x12b5: 0x2f41, + 0x12b6: 0x2f41, 0x12b7: 0x2f49, 0x12b8: 0x2f49, 0x12b9: 0x2f51, 0x12ba: 0x2f51, 0x12bb: 0x20b2, + 0x12bc: 0x20b2, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x056e, 0x12e0: 0x0572, 0x12e1: 0x0582, 0x12e2: 0x0596, 0x12e3: 0x059a, + 0x12e4: 0x057e, 0x12e5: 0x06a6, 0x12e6: 0x069e, 0x12e7: 0x05c2, 0x12e8: 0x05ca, 0x12e9: 0x05d2, + 0x12ea: 0x05da, 0x12eb: 0x05e2, 0x12ec: 0x0666, 0x12ed: 0x066e, 0x12ee: 0x0676, 0x12ef: 0x061a, + 0x12f0: 0x06aa, 0x12f1: 0x05c6, 0x12f2: 0x05ce, 0x12f3: 0x05d6, 0x12f4: 0x05de, 0x12f5: 0x05e6, + 0x12f6: 0x05ea, 0x12f7: 0x05ee, 0x12f8: 0x05f2, 0x12f9: 0x05f6, 0x12fa: 0x05fa, 0x12fb: 0x05fe, + 0x12fc: 0x0602, 0x12fd: 0x0606, 0x12fe: 0x060a, 0x12ff: 0x060e, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0612, 0x1301: 0x0616, 0x1302: 0x061e, 0x1303: 0x0622, 0x1304: 0x0626, 0x1305: 0x062a, + 0x1306: 0x062e, 0x1307: 0x0632, 0x1308: 0x0636, 0x1309: 0x063a, 0x130a: 0x063e, 0x130b: 0x0642, + 0x130c: 0x0646, 0x130d: 0x064a, 0x130e: 0x064e, 0x130f: 0x0652, 0x1310: 0x0656, 0x1311: 0x065a, + 0x1312: 0x065e, 0x1313: 0x0662, 0x1314: 0x066a, 0x1315: 0x0672, 0x1316: 0x067a, 0x1317: 0x067e, + 0x1318: 0x0682, 0x1319: 0x0686, 0x131a: 0x068a, 0x131b: 0x068e, 0x131c: 0x0692, 0x131d: 0x06a2, + 0x131e: 0x4bb9, 0x131f: 0x4bbf, 0x1320: 0x04b6, 0x1321: 0x0406, 0x1322: 0x040a, 0x1323: 0x4b7c, + 0x1324: 0x040e, 0x1325: 0x4b82, 0x1326: 0x4b88, 0x1327: 0x0412, 0x1328: 0x0416, 0x1329: 0x041a, + 0x132a: 0x4b8e, 0x132b: 0x4b94, 0x132c: 0x4b9a, 0x132d: 0x4ba0, 0x132e: 0x4ba6, 0x132f: 0x4bac, + 0x1330: 0x045a, 0x1331: 0x041e, 0x1332: 0x0422, 0x1333: 0x0426, 0x1334: 0x046e, 0x1335: 0x042a, + 0x1336: 0x042e, 0x1337: 0x0432, 0x1338: 0x0436, 0x1339: 0x043a, 0x133a: 0x043e, 0x133b: 0x0442, + 0x133c: 0x0446, 0x133d: 0x044a, 0x133e: 0x044e, + // Block 0x4d, offset 0x1340 + 0x1342: 0x4afe, 0x1343: 0x4b04, 0x1344: 0x4b0a, 0x1345: 0x4b10, + 0x1346: 0x4b16, 0x1347: 0x4b1c, 0x134a: 0x4b22, 0x134b: 0x4b28, + 0x134c: 0x4b2e, 0x134d: 0x4b34, 0x134e: 0x4b3a, 0x134f: 0x4b40, + 0x1352: 0x4b46, 0x1353: 0x4b4c, 0x1354: 0x4b52, 0x1355: 0x4b58, 0x1356: 0x4b5e, 0x1357: 0x4b64, + 0x135a: 0x4b6a, 0x135b: 0x4b70, 0x135c: 0x4b76, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x43a5, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x053e, 0x1368: 0x0562, 0x1369: 0x0542, + 0x136a: 0x0546, 0x136b: 0x054a, 0x136c: 0x054e, 0x136d: 0x0566, 0x136e: 0x056a, + // Block 0x4e, offset 0x1380 + 0x1381: 0x01f1, 0x1382: 0x01f4, 0x1383: 0x00d4, 0x1384: 0x01be, 0x1385: 0x010d, + 0x1387: 0x01d3, 0x1388: 0x174e, 0x1389: 0x01d9, 0x138a: 0x01d6, 0x138b: 0x0116, + 0x138c: 0x0119, 0x138d: 0x0526, 0x138e: 0x011c, 0x138f: 0x0128, 0x1390: 0x01e5, 0x1391: 0x013a, + 0x1392: 0x0134, 0x1393: 0x012e, 0x1394: 0x01c1, 0x1395: 0x00e0, 0x1396: 0x01c4, 0x1397: 0x0143, + 0x1398: 0x0194, 0x1399: 0x01e8, 0x139a: 0x01eb, 0x139b: 0x0152, 0x139c: 0x1756, 0x139d: 0x1742, + 0x139e: 0x0158, 0x139f: 0x175b, 0x13a0: 0x01a9, 0x13a1: 0x1760, 0x13a2: 0x00da, 0x13a3: 0x0170, + 0x13a4: 0x0173, 0x13a5: 0x00a3, 0x13a6: 0x017c, 0x13a7: 0x1765, 0x13a8: 0x0182, 0x13a9: 0x0185, + 0x13aa: 0x0188, 0x13ab: 0x01e2, 0x13ac: 0x01dc, 0x13ad: 0x1752, 0x13ae: 0x01df, 0x13af: 0x0197, + 0x13b0: 0x0576, 0x13b2: 0x01ac, 0x13b3: 0x01cd, 0x13b4: 0x01d0, 0x13b5: 0x01bb, + 0x13b6: 0x00f5, 0x13b7: 0x00f8, 0x13b8: 0x00fb, 0x13b9: 0x176a, 0x13ba: 0x176f, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0063, 0x13c1: 0x0065, 0x13c2: 0x0067, 0x13c3: 0x0069, 0x13c4: 0x006b, 0x13c5: 0x006d, + 0x13c6: 0x006f, 0x13c7: 0x0071, 0x13c8: 0x0073, 0x13c9: 0x0075, 0x13ca: 0x0083, 0x13cb: 0x0085, + 0x13cc: 0x0087, 0x13cd: 0x0089, 0x13ce: 0x008b, 0x13cf: 0x008d, 0x13d0: 0x008f, 0x13d1: 0x0091, + 0x13d2: 0x0093, 0x13d3: 0x0095, 0x13d4: 0x0097, 0x13d5: 0x0099, 0x13d6: 0x009b, 0x13d7: 0x009d, + 0x13d8: 0x009f, 0x13d9: 0x00a1, 0x13da: 0x00a3, 0x13db: 0x00a5, 0x13dc: 0x00a7, 0x13dd: 0x00a9, + 0x13de: 0x00ab, 0x13df: 0x00ad, 0x13e0: 0x00af, 0x13e1: 0x00b1, 0x13e2: 0x00b3, 0x13e3: 0x00b5, + 0x13e4: 0x00e3, 0x13e5: 0x0101, 0x13e8: 0x01f7, 0x13e9: 0x01fa, + 0x13ea: 0x01fd, 0x13eb: 0x0200, 0x13ec: 0x0203, 0x13ed: 0x0206, 0x13ee: 0x0209, 0x13ef: 0x020c, + 0x13f0: 0x020f, 0x13f1: 0x0212, 0x13f2: 0x0215, 0x13f3: 0x0218, 0x13f4: 0x021b, 0x13f5: 0x021e, + 0x13f6: 0x0221, 0x13f7: 0x0224, 0x13f8: 0x0227, 0x13f9: 0x020c, 0x13fa: 0x022a, 0x13fb: 0x022d, + 0x13fc: 0x0230, 0x13fd: 0x0233, 0x13fe: 0x0236, 0x13ff: 0x0239, + // Block 0x50, offset 0x1400 + 0x1400: 0x0281, 0x1401: 0x0284, 0x1402: 0x0287, 0x1403: 0x0552, 0x1404: 0x024b, 0x1405: 0x0254, + 0x1406: 0x025a, 0x1407: 0x027e, 0x1408: 0x026f, 0x1409: 0x026c, 0x140a: 0x028a, 0x140b: 0x028d, + 0x140e: 0x0021, 0x140f: 0x0023, 0x1410: 0x0025, 0x1411: 0x0027, + 0x1412: 0x0029, 0x1413: 0x002b, 0x1414: 0x002d, 0x1415: 0x002f, 0x1416: 0x0031, 0x1417: 0x0033, + 0x1418: 0x0021, 0x1419: 0x0023, 0x141a: 0x0025, 0x141b: 0x0027, 0x141c: 0x0029, 0x141d: 0x002b, + 0x141e: 0x002d, 0x141f: 0x002f, 0x1420: 0x0031, 0x1421: 0x0033, 0x1422: 0x0021, 0x1423: 0x0023, + 0x1424: 0x0025, 0x1425: 0x0027, 0x1426: 0x0029, 0x1427: 0x002b, 0x1428: 0x002d, 0x1429: 0x002f, + 0x142a: 0x0031, 0x142b: 0x0033, 0x142c: 0x0021, 0x142d: 0x0023, 0x142e: 0x0025, 0x142f: 0x0027, + 0x1430: 0x0029, 0x1431: 0x002b, 0x1432: 0x002d, 0x1433: 0x002f, 0x1434: 0x0031, 0x1435: 0x0033, + 0x1436: 0x0021, 0x1437: 0x0023, 0x1438: 0x0025, 0x1439: 0x0027, 0x143a: 0x0029, 0x143b: 0x002b, + 0x143c: 0x002d, 0x143d: 0x002f, 0x143e: 0x0031, 0x143f: 0x0033, + // Block 0x51, offset 0x1440 + 0x1440: 0x8133, 0x1441: 0x8133, 0x1442: 0x8133, 0x1443: 0x8133, 0x1444: 0x8133, 0x1445: 0x8133, + 0x1446: 0x8133, 0x1448: 0x8133, 0x1449: 0x8133, 0x144a: 0x8133, 0x144b: 0x8133, + 0x144c: 0x8133, 0x144d: 0x8133, 0x144e: 0x8133, 0x144f: 0x8133, 0x1450: 0x8133, 0x1451: 0x8133, + 0x1452: 0x8133, 0x1453: 0x8133, 0x1454: 0x8133, 0x1455: 0x8133, 0x1456: 0x8133, 0x1457: 0x8133, + 0x1458: 0x8133, 0x145b: 0x8133, 0x145c: 0x8133, 0x145d: 0x8133, + 0x145e: 0x8133, 0x145f: 0x8133, 0x1460: 0x8133, 0x1461: 0x8133, 0x1463: 0x8133, + 0x1464: 0x8133, 0x1466: 0x8133, 0x1467: 0x8133, 0x1468: 0x8133, 0x1469: 0x8133, + 0x146a: 0x8133, + 0x1470: 0x0290, 0x1471: 0x0293, 0x1472: 0x0296, 0x1473: 0x0299, 0x1474: 0x029c, 0x1475: 0x029f, + 0x1476: 0x02a2, 0x1477: 0x02a5, 0x1478: 0x02a8, 0x1479: 0x02ab, 0x147a: 0x02ae, 0x147b: 0x02b1, + 0x147c: 0x02b7, 0x147d: 0x02ba, 0x147e: 0x02bd, 0x147f: 0x02c0, + // Block 0x52, offset 0x1480 + 0x1480: 0x02c3, 0x1481: 0x02c6, 0x1482: 0x02c9, 0x1483: 0x02cc, 0x1484: 0x02cf, 0x1485: 0x02d2, + 0x1486: 0x02d5, 0x1487: 0x02db, 0x1488: 0x02e1, 0x1489: 0x02e4, 0x148a: 0x1736, 0x148b: 0x0302, + 0x148c: 0x02ea, 0x148d: 0x02ed, 0x148e: 0x0305, 0x148f: 0x02f9, 0x1490: 0x02ff, 0x1491: 0x0290, + 0x1492: 0x0293, 0x1493: 0x0296, 0x1494: 0x0299, 0x1495: 0x029c, 0x1496: 0x029f, 0x1497: 0x02a2, + 0x1498: 0x02a5, 0x1499: 0x02a8, 0x149a: 0x02ab, 0x149b: 0x02ae, 0x149c: 0x02b7, 0x149d: 0x02ba, + 0x149e: 0x02c0, 0x149f: 0x02c6, 0x14a0: 0x02c9, 0x14a1: 0x02cc, 0x14a2: 0x02cf, 0x14a3: 0x02d2, + 0x14a4: 0x02d5, 0x14a5: 0x02d8, 0x14a6: 0x02db, 0x14a7: 0x02f3, 0x14a8: 0x02ea, 0x14a9: 0x02e7, + 0x14aa: 0x02f0, 0x14ab: 0x02f6, 0x14ac: 0x1732, 0x14ad: 0x02fc, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x032c, 0x14c1: 0x032f, 0x14c2: 0x033b, 0x14c3: 0x0344, 0x14c5: 0x037d, + 0x14c6: 0x034d, 0x14c7: 0x033e, 0x14c8: 0x035c, 0x14c9: 0x0383, 0x14ca: 0x036e, 0x14cb: 0x0371, + 0x14cc: 0x0374, 0x14cd: 0x0377, 0x14ce: 0x0350, 0x14cf: 0x0362, 0x14d0: 0x0368, 0x14d1: 0x0356, + 0x14d2: 0x036b, 0x14d3: 0x034a, 0x14d4: 0x0353, 0x14d5: 0x0335, 0x14d6: 0x0338, 0x14d7: 0x0341, + 0x14d8: 0x0347, 0x14d9: 0x0359, 0x14da: 0x035f, 0x14db: 0x0365, 0x14dc: 0x0386, 0x14dd: 0x03d7, + 0x14de: 0x03bf, 0x14df: 0x0389, 0x14e1: 0x032f, 0x14e2: 0x033b, + 0x14e4: 0x037a, 0x14e7: 0x033e, 0x14e9: 0x0383, + 0x14ea: 0x036e, 0x14eb: 0x0371, 0x14ec: 0x0374, 0x14ed: 0x0377, 0x14ee: 0x0350, 0x14ef: 0x0362, + 0x14f0: 0x0368, 0x14f1: 0x0356, 0x14f2: 0x036b, 0x14f4: 0x0353, 0x14f5: 0x0335, + 0x14f6: 0x0338, 0x14f7: 0x0341, 0x14f9: 0x0359, 0x14fb: 0x0365, + // Block 0x54, offset 0x1500 + 0x1502: 0x033b, + 0x1507: 0x033e, 0x1509: 0x0383, 0x150b: 0x0371, + 0x150d: 0x0377, 0x150e: 0x0350, 0x150f: 0x0362, 0x1511: 0x0356, + 0x1512: 0x036b, 0x1514: 0x0353, 0x1517: 0x0341, + 0x1519: 0x0359, 0x151b: 0x0365, 0x151d: 0x03d7, + 0x151f: 0x0389, 0x1521: 0x032f, 0x1522: 0x033b, + 0x1524: 0x037a, 0x1527: 0x033e, 0x1528: 0x035c, 0x1529: 0x0383, + 0x152a: 0x036e, 0x152c: 0x0374, 0x152d: 0x0377, 0x152e: 0x0350, 0x152f: 0x0362, + 0x1530: 0x0368, 0x1531: 0x0356, 0x1532: 0x036b, 0x1534: 0x0353, 0x1535: 0x0335, + 0x1536: 0x0338, 0x1537: 0x0341, 0x1539: 0x0359, 0x153a: 0x035f, 0x153b: 0x0365, + 0x153c: 0x0386, 0x153e: 0x03bf, + // Block 0x55, offset 0x1540 + 0x1540: 0x032c, 0x1541: 0x032f, 0x1542: 0x033b, 0x1543: 0x0344, 0x1544: 0x037a, 0x1545: 0x037d, + 0x1546: 0x034d, 0x1547: 0x033e, 0x1548: 0x035c, 0x1549: 0x0383, 0x154b: 0x0371, + 0x154c: 0x0374, 0x154d: 0x0377, 0x154e: 0x0350, 0x154f: 0x0362, 0x1550: 0x0368, 0x1551: 0x0356, + 0x1552: 0x036b, 0x1553: 0x034a, 0x1554: 0x0353, 0x1555: 0x0335, 0x1556: 0x0338, 0x1557: 0x0341, + 0x1558: 0x0347, 0x1559: 0x0359, 0x155a: 0x035f, 0x155b: 0x0365, + 0x1561: 0x032f, 0x1562: 0x033b, 0x1563: 0x0344, + 0x1565: 0x037d, 0x1566: 0x034d, 0x1567: 0x033e, 0x1568: 0x035c, 0x1569: 0x0383, + 0x156b: 0x0371, 0x156c: 0x0374, 0x156d: 0x0377, 0x156e: 0x0350, 0x156f: 0x0362, + 0x1570: 0x0368, 0x1571: 0x0356, 0x1572: 0x036b, 0x1573: 0x034a, 0x1574: 0x0353, 0x1575: 0x0335, + 0x1576: 0x0338, 0x1577: 0x0341, 0x1578: 0x0347, 0x1579: 0x0359, 0x157a: 0x035f, 0x157b: 0x0365, + // Block 0x56, offset 0x1580 + 0x1580: 0x19a6, 0x1581: 0x19a3, 0x1582: 0x19a9, 0x1583: 0x19cd, 0x1584: 0x19f1, 0x1585: 0x1a15, + 0x1586: 0x1a39, 0x1587: 0x1a42, 0x1588: 0x1a48, 0x1589: 0x1a4e, 0x158a: 0x1a54, + 0x1590: 0x1bbc, 0x1591: 0x1bc0, + 0x1592: 0x1bc4, 0x1593: 0x1bc8, 0x1594: 0x1bcc, 0x1595: 0x1bd0, 0x1596: 0x1bd4, 0x1597: 0x1bd8, + 0x1598: 0x1bdc, 0x1599: 0x1be0, 0x159a: 0x1be4, 0x159b: 0x1be8, 0x159c: 0x1bec, 0x159d: 0x1bf0, + 0x159e: 0x1bf4, 0x159f: 0x1bf8, 0x15a0: 0x1bfc, 0x15a1: 0x1c00, 0x15a2: 0x1c04, 0x15a3: 0x1c08, + 0x15a4: 0x1c0c, 0x15a5: 0x1c10, 0x15a6: 0x1c14, 0x15a7: 0x1c18, 0x15a8: 0x1c1c, 0x15a9: 0x1c20, + 0x15aa: 0x2855, 0x15ab: 0x0047, 0x15ac: 0x0065, 0x15ad: 0x1a69, 0x15ae: 0x1ae1, + 0x15b0: 0x0043, 0x15b1: 0x0045, 0x15b2: 0x0047, 0x15b3: 0x0049, 0x15b4: 0x004b, 0x15b5: 0x004d, + 0x15b6: 0x004f, 0x15b7: 0x0051, 0x15b8: 0x0053, 0x15b9: 0x0055, 0x15ba: 0x0057, 0x15bb: 0x0059, + 0x15bc: 0x005b, 0x15bd: 0x005d, 0x15be: 0x005f, 0x15bf: 0x0061, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x27dd, 0x15c1: 0x27f2, 0x15c2: 0x05fe, + 0x15d0: 0x0d0a, 0x15d1: 0x0b42, + 0x15d2: 0x09ce, 0x15d3: 0x4705, 0x15d4: 0x0816, 0x15d5: 0x0aea, 0x15d6: 0x142a, 0x15d7: 0x0afa, + 0x15d8: 0x0822, 0x15d9: 0x0dd2, 0x15da: 0x0faa, 0x15db: 0x0daa, 0x15dc: 0x0922, 0x15dd: 0x0c66, + 0x15de: 0x08ba, 0x15df: 0x0db2, 0x15e0: 0x090e, 0x15e1: 0x1212, 0x15e2: 0x107e, 0x15e3: 0x1486, + 0x15e4: 0x0ace, 0x15e5: 0x0a06, 0x15e6: 0x0f5e, 0x15e7: 0x0d16, 0x15e8: 0x0d42, 0x15e9: 0x07ba, + 0x15ea: 0x07c6, 0x15eb: 0x1506, 0x15ec: 0x0bd6, 0x15ed: 0x07e2, 0x15ee: 0x09ea, 0x15ef: 0x0d36, + 0x15f0: 0x14ae, 0x15f1: 0x0d0e, 0x15f2: 0x116a, 0x15f3: 0x11a6, 0x15f4: 0x09f2, 0x15f5: 0x0f3e, + 0x15f6: 0x0e06, 0x15f7: 0x0e02, 0x15f8: 0x1092, 0x15f9: 0x0926, 0x15fa: 0x0a52, 0x15fb: 0x153e, + // Block 0x58, offset 0x1600 + 0x1600: 0x07f6, 0x1601: 0x07ee, 0x1602: 0x07fe, 0x1603: 0x1774, 0x1604: 0x0842, 0x1605: 0x0852, + 0x1606: 0x0856, 0x1607: 0x085e, 0x1608: 0x0866, 0x1609: 0x086a, 0x160a: 0x0876, 0x160b: 0x086e, + 0x160c: 0x06ae, 0x160d: 0x1788, 0x160e: 0x088a, 0x160f: 0x088e, 0x1610: 0x0892, 0x1611: 0x08ae, + 0x1612: 0x1779, 0x1613: 0x06b2, 0x1614: 0x089a, 0x1615: 0x08ba, 0x1616: 0x1783, 0x1617: 0x08ca, + 0x1618: 0x08d2, 0x1619: 0x0832, 0x161a: 0x08da, 0x161b: 0x08de, 0x161c: 0x195e, 0x161d: 0x08fa, + 0x161e: 0x0902, 0x161f: 0x06ba, 0x1620: 0x091a, 0x1621: 0x091e, 0x1622: 0x0926, 0x1623: 0x092a, + 0x1624: 0x06be, 0x1625: 0x0942, 0x1626: 0x0946, 0x1627: 0x0952, 0x1628: 0x095e, 0x1629: 0x0962, + 0x162a: 0x0966, 0x162b: 0x096e, 0x162c: 0x098e, 0x162d: 0x0992, 0x162e: 0x099a, 0x162f: 0x09aa, + 0x1630: 0x09b2, 0x1631: 0x09b6, 0x1632: 0x09b6, 0x1633: 0x09b6, 0x1634: 0x1797, 0x1635: 0x0f8e, + 0x1636: 0x09ca, 0x1637: 0x09d2, 0x1638: 0x179c, 0x1639: 0x09de, 0x163a: 0x09e6, 0x163b: 0x09ee, + 0x163c: 0x0a16, 0x163d: 0x0a02, 0x163e: 0x0a0e, 0x163f: 0x0a12, + // Block 0x59, offset 0x1640 + 0x1640: 0x0a1a, 0x1641: 0x0a22, 0x1642: 0x0a26, 0x1643: 0x0a2e, 0x1644: 0x0a36, 0x1645: 0x0a3a, + 0x1646: 0x0a3a, 0x1647: 0x0a42, 0x1648: 0x0a4a, 0x1649: 0x0a4e, 0x164a: 0x0a5a, 0x164b: 0x0a7e, + 0x164c: 0x0a62, 0x164d: 0x0a82, 0x164e: 0x0a66, 0x164f: 0x0a6e, 0x1650: 0x0906, 0x1651: 0x0aca, + 0x1652: 0x0a92, 0x1653: 0x0a96, 0x1654: 0x0a9a, 0x1655: 0x0a8e, 0x1656: 0x0aa2, 0x1657: 0x0a9e, + 0x1658: 0x0ab6, 0x1659: 0x17a1, 0x165a: 0x0ad2, 0x165b: 0x0ad6, 0x165c: 0x0ade, 0x165d: 0x0aea, + 0x165e: 0x0af2, 0x165f: 0x0b0e, 0x1660: 0x17a6, 0x1661: 0x17ab, 0x1662: 0x0b1a, 0x1663: 0x0b1e, + 0x1664: 0x0b22, 0x1665: 0x0b16, 0x1666: 0x0b2a, 0x1667: 0x06c2, 0x1668: 0x06c6, 0x1669: 0x0b32, + 0x166a: 0x0b3a, 0x166b: 0x0b3a, 0x166c: 0x17b0, 0x166d: 0x0b56, 0x166e: 0x0b5a, 0x166f: 0x0b5e, + 0x1670: 0x0b66, 0x1671: 0x17b5, 0x1672: 0x0b6e, 0x1673: 0x0b72, 0x1674: 0x0c4a, 0x1675: 0x0b7a, + 0x1676: 0x06ca, 0x1677: 0x0b86, 0x1678: 0x0b96, 0x1679: 0x0ba2, 0x167a: 0x0b9e, 0x167b: 0x17bf, + 0x167c: 0x0baa, 0x167d: 0x17c4, 0x167e: 0x0bb6, 0x167f: 0x0bb2, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0bba, 0x1681: 0x0bca, 0x1682: 0x0bce, 0x1683: 0x06ce, 0x1684: 0x0bde, 0x1685: 0x0be6, + 0x1686: 0x0bea, 0x1687: 0x0bee, 0x1688: 0x06d2, 0x1689: 0x17c9, 0x168a: 0x06d6, 0x168b: 0x0c0a, + 0x168c: 0x0c0e, 0x168d: 0x0c12, 0x168e: 0x0c1a, 0x168f: 0x1990, 0x1690: 0x0c32, 0x1691: 0x17d3, + 0x1692: 0x17d3, 0x1693: 0x12d2, 0x1694: 0x0c42, 0x1695: 0x0c42, 0x1696: 0x06da, 0x1697: 0x17f6, + 0x1698: 0x18c8, 0x1699: 0x0c52, 0x169a: 0x0c5a, 0x169b: 0x06de, 0x169c: 0x0c6e, 0x169d: 0x0c7e, + 0x169e: 0x0c82, 0x169f: 0x0c8a, 0x16a0: 0x0c9a, 0x16a1: 0x06e6, 0x16a2: 0x06e2, 0x16a3: 0x0c9e, + 0x16a4: 0x17d8, 0x16a5: 0x0ca2, 0x16a6: 0x0cb6, 0x16a7: 0x0cba, 0x16a8: 0x0cbe, 0x16a9: 0x0cba, + 0x16aa: 0x0cca, 0x16ab: 0x0cce, 0x16ac: 0x0cde, 0x16ad: 0x0cd6, 0x16ae: 0x0cda, 0x16af: 0x0ce2, + 0x16b0: 0x0ce6, 0x16b1: 0x0cea, 0x16b2: 0x0cf6, 0x16b3: 0x0cfa, 0x16b4: 0x0d12, 0x16b5: 0x0d1a, + 0x16b6: 0x0d2a, 0x16b7: 0x0d3e, 0x16b8: 0x17e7, 0x16b9: 0x0d3a, 0x16ba: 0x0d2e, 0x16bb: 0x0d46, + 0x16bc: 0x0d4e, 0x16bd: 0x0d62, 0x16be: 0x17ec, 0x16bf: 0x0d6a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0d5e, 0x16c1: 0x0d56, 0x16c2: 0x06ea, 0x16c3: 0x0d72, 0x16c4: 0x0d7a, 0x16c5: 0x0d82, + 0x16c6: 0x0d76, 0x16c7: 0x06ee, 0x16c8: 0x0d92, 0x16c9: 0x0d9a, 0x16ca: 0x17f1, 0x16cb: 0x0dc6, + 0x16cc: 0x0dfa, 0x16cd: 0x0dd6, 0x16ce: 0x06fa, 0x16cf: 0x0de2, 0x16d0: 0x06f6, 0x16d1: 0x06f2, + 0x16d2: 0x08be, 0x16d3: 0x08c2, 0x16d4: 0x0dfe, 0x16d5: 0x0de6, 0x16d6: 0x12a6, 0x16d7: 0x075e, + 0x16d8: 0x0e0a, 0x16d9: 0x0e0e, 0x16da: 0x0e12, 0x16db: 0x0e26, 0x16dc: 0x0e1e, 0x16dd: 0x180a, + 0x16de: 0x06fe, 0x16df: 0x0e3a, 0x16e0: 0x0e2e, 0x16e1: 0x0e4a, 0x16e2: 0x0e52, 0x16e3: 0x1814, + 0x16e4: 0x0e56, 0x16e5: 0x0e42, 0x16e6: 0x0e5e, 0x16e7: 0x0702, 0x16e8: 0x0e62, 0x16e9: 0x0e66, + 0x16ea: 0x0e6a, 0x16eb: 0x0e76, 0x16ec: 0x1819, 0x16ed: 0x0e7e, 0x16ee: 0x0706, 0x16ef: 0x0e8a, + 0x16f0: 0x181e, 0x16f1: 0x0e8e, 0x16f2: 0x070a, 0x16f3: 0x0e9a, 0x16f4: 0x0ea6, 0x16f5: 0x0eb2, + 0x16f6: 0x0eb6, 0x16f7: 0x1823, 0x16f8: 0x17ba, 0x16f9: 0x1828, 0x16fa: 0x0ed6, 0x16fb: 0x182d, + 0x16fc: 0x0ee2, 0x16fd: 0x0eea, 0x16fe: 0x0eda, 0x16ff: 0x0ef6, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0f06, 0x1701: 0x0f16, 0x1702: 0x0f0a, 0x1703: 0x0f0e, 0x1704: 0x0f1a, 0x1705: 0x0f1e, + 0x1706: 0x1832, 0x1707: 0x0f02, 0x1708: 0x0f36, 0x1709: 0x0f3a, 0x170a: 0x070e, 0x170b: 0x0f4e, + 0x170c: 0x0f4a, 0x170d: 0x1837, 0x170e: 0x0f2e, 0x170f: 0x0f6a, 0x1710: 0x183c, 0x1711: 0x1841, + 0x1712: 0x0f6e, 0x1713: 0x0f82, 0x1714: 0x0f7e, 0x1715: 0x0f7a, 0x1716: 0x0712, 0x1717: 0x0f86, + 0x1718: 0x0f96, 0x1719: 0x0f92, 0x171a: 0x0f9e, 0x171b: 0x177e, 0x171c: 0x0fae, 0x171d: 0x1846, + 0x171e: 0x0fba, 0x171f: 0x1850, 0x1720: 0x0fce, 0x1721: 0x0fda, 0x1722: 0x0fee, 0x1723: 0x1855, + 0x1724: 0x1002, 0x1725: 0x1006, 0x1726: 0x185a, 0x1727: 0x185f, 0x1728: 0x1022, 0x1729: 0x1032, + 0x172a: 0x0716, 0x172b: 0x1036, 0x172c: 0x071a, 0x172d: 0x071a, 0x172e: 0x104e, 0x172f: 0x1052, + 0x1730: 0x105a, 0x1731: 0x105e, 0x1732: 0x106a, 0x1733: 0x071e, 0x1734: 0x1082, 0x1735: 0x1864, + 0x1736: 0x109e, 0x1737: 0x1869, 0x1738: 0x10aa, 0x1739: 0x17ce, 0x173a: 0x10ba, 0x173b: 0x186e, + 0x173c: 0x1873, 0x173d: 0x1878, 0x173e: 0x0722, 0x173f: 0x0726, + // Block 0x5d, offset 0x1740 + 0x1740: 0x10f2, 0x1741: 0x1882, 0x1742: 0x187d, 0x1743: 0x1887, 0x1744: 0x188c, 0x1745: 0x10fa, + 0x1746: 0x10fe, 0x1747: 0x10fe, 0x1748: 0x1106, 0x1749: 0x072e, 0x174a: 0x110a, 0x174b: 0x0732, + 0x174c: 0x0736, 0x174d: 0x1896, 0x174e: 0x111e, 0x174f: 0x1126, 0x1750: 0x1132, 0x1751: 0x073a, + 0x1752: 0x189b, 0x1753: 0x1156, 0x1754: 0x18a0, 0x1755: 0x18a5, 0x1756: 0x1176, 0x1757: 0x118e, + 0x1758: 0x073e, 0x1759: 0x1196, 0x175a: 0x119a, 0x175b: 0x119e, 0x175c: 0x18aa, 0x175d: 0x18af, + 0x175e: 0x18af, 0x175f: 0x11b6, 0x1760: 0x0742, 0x1761: 0x18b4, 0x1762: 0x11ca, 0x1763: 0x11ce, + 0x1764: 0x0746, 0x1765: 0x18b9, 0x1766: 0x11ea, 0x1767: 0x074a, 0x1768: 0x11fa, 0x1769: 0x11f2, + 0x176a: 0x1202, 0x176b: 0x18c3, 0x176c: 0x121a, 0x176d: 0x074e, 0x176e: 0x1226, 0x176f: 0x122e, + 0x1770: 0x123e, 0x1771: 0x0752, 0x1772: 0x18cd, 0x1773: 0x18d2, 0x1774: 0x0756, 0x1775: 0x18d7, + 0x1776: 0x1256, 0x1777: 0x18dc, 0x1778: 0x1262, 0x1779: 0x126e, 0x177a: 0x1276, 0x177b: 0x18e1, + 0x177c: 0x18e6, 0x177d: 0x128a, 0x177e: 0x18eb, 0x177f: 0x1292, + // Block 0x5e, offset 0x1780 + 0x1780: 0x17fb, 0x1781: 0x075a, 0x1782: 0x12aa, 0x1783: 0x12ae, 0x1784: 0x0762, 0x1785: 0x12b2, + 0x1786: 0x0b2e, 0x1787: 0x18f0, 0x1788: 0x18f5, 0x1789: 0x1800, 0x178a: 0x1805, 0x178b: 0x12d2, + 0x178c: 0x12d6, 0x178d: 0x14ee, 0x178e: 0x0766, 0x178f: 0x1302, 0x1790: 0x12fe, 0x1791: 0x1306, + 0x1792: 0x093a, 0x1793: 0x130a, 0x1794: 0x130e, 0x1795: 0x1312, 0x1796: 0x131a, 0x1797: 0x18fa, + 0x1798: 0x1316, 0x1799: 0x131e, 0x179a: 0x1332, 0x179b: 0x1336, 0x179c: 0x1322, 0x179d: 0x133a, + 0x179e: 0x134e, 0x179f: 0x1362, 0x17a0: 0x132e, 0x17a1: 0x1342, 0x17a2: 0x1346, 0x17a3: 0x134a, + 0x17a4: 0x18ff, 0x17a5: 0x1909, 0x17a6: 0x1904, 0x17a7: 0x076a, 0x17a8: 0x136a, 0x17a9: 0x136e, + 0x17aa: 0x1376, 0x17ab: 0x191d, 0x17ac: 0x137a, 0x17ad: 0x190e, 0x17ae: 0x076e, 0x17af: 0x0772, + 0x17b0: 0x1913, 0x17b1: 0x1918, 0x17b2: 0x0776, 0x17b3: 0x139a, 0x17b4: 0x139e, 0x17b5: 0x13a2, + 0x17b6: 0x13a6, 0x17b7: 0x13b2, 0x17b8: 0x13ae, 0x17b9: 0x13ba, 0x17ba: 0x13b6, 0x17bb: 0x13c6, + 0x17bc: 0x13be, 0x17bd: 0x13c2, 0x17be: 0x13ca, 0x17bf: 0x077a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x13d2, 0x17c1: 0x13d6, 0x17c2: 0x077e, 0x17c3: 0x13e6, 0x17c4: 0x13ea, 0x17c5: 0x1922, + 0x17c6: 0x13f6, 0x17c7: 0x13fa, 0x17c8: 0x0782, 0x17c9: 0x1406, 0x17ca: 0x06b6, 0x17cb: 0x1927, + 0x17cc: 0x192c, 0x17cd: 0x0786, 0x17ce: 0x078a, 0x17cf: 0x1432, 0x17d0: 0x144a, 0x17d1: 0x1466, + 0x17d2: 0x1476, 0x17d3: 0x1931, 0x17d4: 0x148a, 0x17d5: 0x148e, 0x17d6: 0x14a6, 0x17d7: 0x14b2, + 0x17d8: 0x193b, 0x17d9: 0x178d, 0x17da: 0x14be, 0x17db: 0x14ba, 0x17dc: 0x14c6, 0x17dd: 0x1792, + 0x17de: 0x14d2, 0x17df: 0x14de, 0x17e0: 0x1940, 0x17e1: 0x1945, 0x17e2: 0x151e, 0x17e3: 0x152a, + 0x17e4: 0x1532, 0x17e5: 0x194a, 0x17e6: 0x1536, 0x17e7: 0x1562, 0x17e8: 0x156e, 0x17e9: 0x1572, + 0x17ea: 0x156a, 0x17eb: 0x157e, 0x17ec: 0x1582, 0x17ed: 0x194f, 0x17ee: 0x158e, 0x17ef: 0x078e, + 0x17f0: 0x1596, 0x17f1: 0x1954, 0x17f2: 0x0792, 0x17f3: 0x15ce, 0x17f4: 0x0bbe, 0x17f5: 0x15e6, + 0x17f6: 0x1959, 0x17f7: 0x1963, 0x17f8: 0x0796, 0x17f9: 0x079a, 0x17fa: 0x160e, 0x17fb: 0x1968, + 0x17fc: 0x079e, 0x17fd: 0x196d, 0x17fe: 0x1626, 0x17ff: 0x1626, + // Block 0x60, offset 0x1800 + 0x1800: 0x162e, 0x1801: 0x1972, 0x1802: 0x1646, 0x1803: 0x07a2, 0x1804: 0x1656, 0x1805: 0x1662, + 0x1806: 0x166a, 0x1807: 0x1672, 0x1808: 0x07a6, 0x1809: 0x1977, 0x180a: 0x1686, 0x180b: 0x16a2, + 0x180c: 0x16ae, 0x180d: 0x07aa, 0x180e: 0x07ae, 0x180f: 0x16b2, 0x1810: 0x197c, 0x1811: 0x07b2, + 0x1812: 0x1981, 0x1813: 0x1986, 0x1814: 0x198b, 0x1815: 0x16d6, 0x1816: 0x07b6, 0x1817: 0x16ea, + 0x1818: 0x16f2, 0x1819: 0x16f6, 0x181a: 0x16fe, 0x181b: 0x1706, 0x181c: 0x170e, 0x181d: 0x1995, +} + +// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5f, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x60, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x61, 0xcb: 0x62, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x63, 0xd2: 0x64, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x65, + 0xd8: 0x66, 0xd9: 0x0d, 0xdb: 0x67, 0xdc: 0x68, 0xdd: 0x69, 0xdf: 0x6a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x6b, 0x121: 0x6c, 0x122: 0x6d, 0x123: 0x0e, 0x124: 0x6e, 0x125: 0x6f, 0x126: 0x70, 0x127: 0x71, + 0x128: 0x72, 0x129: 0x73, 0x12a: 0x74, 0x12b: 0x75, 0x12c: 0x70, 0x12d: 0x76, 0x12e: 0x77, 0x12f: 0x78, + 0x130: 0x74, 0x131: 0x79, 0x132: 0x7a, 0x133: 0x7b, 0x134: 0x7c, 0x135: 0x7d, 0x137: 0x7e, + 0x138: 0x7f, 0x139: 0x80, 0x13a: 0x81, 0x13b: 0x82, 0x13c: 0x83, 0x13d: 0x84, 0x13e: 0x85, 0x13f: 0x86, + // Block 0x5, offset 0x140 + 0x140: 0x87, 0x142: 0x88, 0x143: 0x89, 0x144: 0x8a, 0x145: 0x8b, 0x146: 0x8c, 0x147: 0x8d, + 0x14d: 0x8e, + 0x15c: 0x8f, 0x15f: 0x90, + 0x162: 0x91, 0x164: 0x92, + 0x168: 0x93, 0x169: 0x94, 0x16a: 0x95, 0x16b: 0x96, 0x16c: 0x0f, 0x16d: 0x97, 0x16e: 0x98, 0x16f: 0x99, + 0x170: 0x9a, 0x173: 0x9b, 0x174: 0x9c, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x9d, 0x181: 0x9e, 0x182: 0x9f, 0x183: 0xa0, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0xa1, 0x187: 0xa2, + 0x188: 0xa3, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0xa4, 0x18c: 0xa5, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa6, + 0x1a8: 0xa7, 0x1a9: 0xa8, 0x1ab: 0xa9, + 0x1b1: 0xaa, 0x1b3: 0xab, 0x1b5: 0xac, 0x1b7: 0xad, + 0x1ba: 0xae, 0x1bb: 0xaf, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xb0, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xb1, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xb2, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xb3, 0x21a: 0xb4, 0x21b: 0xb5, 0x21d: 0xb6, 0x21f: 0xb7, + 0x220: 0xb8, 0x223: 0xb9, 0x224: 0xba, 0x225: 0xbb, 0x226: 0xbc, 0x227: 0xbd, + 0x22a: 0xbe, 0x22b: 0xbf, 0x22d: 0xc0, 0x22f: 0xc1, + 0x230: 0xc2, 0x231: 0xc3, 0x232: 0xc4, 0x233: 0xc5, 0x234: 0xc6, 0x235: 0xc7, 0x236: 0xc8, 0x237: 0xc2, + 0x238: 0xc3, 0x239: 0xc4, 0x23a: 0xc5, 0x23b: 0xc6, 0x23c: 0xc7, 0x23d: 0xc8, 0x23e: 0xc2, 0x23f: 0xc3, + // Block 0x9, offset 0x240 + 0x240: 0xc4, 0x241: 0xc5, 0x242: 0xc6, 0x243: 0xc7, 0x244: 0xc8, 0x245: 0xc2, 0x246: 0xc3, 0x247: 0xc4, + 0x248: 0xc5, 0x249: 0xc6, 0x24a: 0xc7, 0x24b: 0xc8, 0x24c: 0xc2, 0x24d: 0xc3, 0x24e: 0xc4, 0x24f: 0xc5, + 0x250: 0xc6, 0x251: 0xc7, 0x252: 0xc8, 0x253: 0xc2, 0x254: 0xc3, 0x255: 0xc4, 0x256: 0xc5, 0x257: 0xc6, + 0x258: 0xc7, 0x259: 0xc8, 0x25a: 0xc2, 0x25b: 0xc3, 0x25c: 0xc4, 0x25d: 0xc5, 0x25e: 0xc6, 0x25f: 0xc7, + 0x260: 0xc8, 0x261: 0xc2, 0x262: 0xc3, 0x263: 0xc4, 0x264: 0xc5, 0x265: 0xc6, 0x266: 0xc7, 0x267: 0xc8, + 0x268: 0xc2, 0x269: 0xc3, 0x26a: 0xc4, 0x26b: 0xc5, 0x26c: 0xc6, 0x26d: 0xc7, 0x26e: 0xc8, 0x26f: 0xc2, + 0x270: 0xc3, 0x271: 0xc4, 0x272: 0xc5, 0x273: 0xc6, 0x274: 0xc7, 0x275: 0xc8, 0x276: 0xc2, 0x277: 0xc3, + 0x278: 0xc4, 0x279: 0xc5, 0x27a: 0xc6, 0x27b: 0xc7, 0x27c: 0xc8, 0x27d: 0xc2, 0x27e: 0xc3, 0x27f: 0xc4, + // Block 0xa, offset 0x280 + 0x280: 0xc5, 0x281: 0xc6, 0x282: 0xc7, 0x283: 0xc8, 0x284: 0xc2, 0x285: 0xc3, 0x286: 0xc4, 0x287: 0xc5, + 0x288: 0xc6, 0x289: 0xc7, 0x28a: 0xc8, 0x28b: 0xc2, 0x28c: 0xc3, 0x28d: 0xc4, 0x28e: 0xc5, 0x28f: 0xc6, + 0x290: 0xc7, 0x291: 0xc8, 0x292: 0xc2, 0x293: 0xc3, 0x294: 0xc4, 0x295: 0xc5, 0x296: 0xc6, 0x297: 0xc7, + 0x298: 0xc8, 0x299: 0xc2, 0x29a: 0xc3, 0x29b: 0xc4, 0x29c: 0xc5, 0x29d: 0xc6, 0x29e: 0xc7, 0x29f: 0xc8, + 0x2a0: 0xc2, 0x2a1: 0xc3, 0x2a2: 0xc4, 0x2a3: 0xc5, 0x2a4: 0xc6, 0x2a5: 0xc7, 0x2a6: 0xc8, 0x2a7: 0xc2, + 0x2a8: 0xc3, 0x2a9: 0xc4, 0x2aa: 0xc5, 0x2ab: 0xc6, 0x2ac: 0xc7, 0x2ad: 0xc8, 0x2ae: 0xc2, 0x2af: 0xc3, + 0x2b0: 0xc4, 0x2b1: 0xc5, 0x2b2: 0xc6, 0x2b3: 0xc7, 0x2b4: 0xc8, 0x2b5: 0xc2, 0x2b6: 0xc3, 0x2b7: 0xc4, + 0x2b8: 0xc5, 0x2b9: 0xc6, 0x2ba: 0xc7, 0x2bb: 0xc8, 0x2bc: 0xc2, 0x2bd: 0xc3, 0x2be: 0xc4, 0x2bf: 0xc5, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc6, 0x2c1: 0xc7, 0x2c2: 0xc8, 0x2c3: 0xc2, 0x2c4: 0xc3, 0x2c5: 0xc4, 0x2c6: 0xc5, 0x2c7: 0xc6, + 0x2c8: 0xc7, 0x2c9: 0xc8, 0x2ca: 0xc2, 0x2cb: 0xc3, 0x2cc: 0xc4, 0x2cd: 0xc5, 0x2ce: 0xc6, 0x2cf: 0xc7, + 0x2d0: 0xc8, 0x2d1: 0xc2, 0x2d2: 0xc3, 0x2d3: 0xc4, 0x2d4: 0xc5, 0x2d5: 0xc6, 0x2d6: 0xc7, 0x2d7: 0xc8, + 0x2d8: 0xc2, 0x2d9: 0xc3, 0x2da: 0xc4, 0x2db: 0xc5, 0x2dc: 0xc6, 0x2dd: 0xc7, 0x2de: 0xc9, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xca, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xcb, + 0x34b: 0xcc, 0x34d: 0xcd, + 0x35e: 0x4c, + 0x368: 0xce, 0x36b: 0xcf, + 0x374: 0xd0, + 0x37a: 0xd1, 0x37b: 0xd2, 0x37d: 0xd3, 0x37e: 0xd4, + // Block 0xe, offset 0x380 + 0x381: 0xd5, 0x382: 0xd6, 0x384: 0xd7, 0x385: 0xbc, 0x387: 0xd8, + 0x388: 0xd9, 0x38b: 0xda, 0x38c: 0xdb, 0x38d: 0xdc, + 0x391: 0xdd, 0x392: 0xde, 0x393: 0xdf, 0x396: 0xe0, 0x397: 0xe1, + 0x398: 0xe2, 0x39a: 0xe3, 0x39c: 0xe4, + 0x3a0: 0xe5, 0x3a4: 0xe6, 0x3a5: 0xe7, 0x3a7: 0xe8, + 0x3a8: 0xe9, 0x3a9: 0xea, 0x3aa: 0xeb, + 0x3b0: 0xe2, 0x3b5: 0xec, 0x3b6: 0xed, + 0x3bd: 0xee, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xef, 0x3ec: 0xf0, + 0x3ff: 0xf1, + // Block 0x10, offset 0x400 + 0x432: 0xf2, + // Block 0x11, offset 0x440 + 0x445: 0xf3, 0x446: 0xf4, 0x447: 0xf5, + 0x449: 0xf6, + 0x450: 0xf7, 0x451: 0xf8, 0x452: 0xf9, 0x453: 0xfa, 0x454: 0xfb, 0x455: 0xfc, 0x456: 0xfd, 0x457: 0xfe, + 0x458: 0xff, 0x459: 0x100, 0x45a: 0x4d, 0x45b: 0x101, 0x45c: 0x102, 0x45d: 0x103, 0x45e: 0x104, 0x45f: 0x4e, + // Block 0x12, offset 0x480 + 0x480: 0x4f, 0x481: 0x50, 0x482: 0x105, 0x484: 0xf0, + 0x48a: 0x106, 0x48b: 0x107, + 0x493: 0x108, + 0x4a3: 0x109, 0x4a5: 0x10a, + 0x4b8: 0x51, 0x4b9: 0x52, 0x4ba: 0x53, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x54, 0x4c5: 0x10b, 0x4c6: 0x10c, + 0x4c8: 0x55, 0x4c9: 0x10d, + 0x4ef: 0x10e, + // Block 0x14, offset 0x500 + 0x520: 0x56, 0x521: 0x57, 0x522: 0x58, 0x523: 0x59, 0x524: 0x5a, 0x525: 0x5b, 0x526: 0x5c, 0x527: 0x5d, + 0x528: 0x5e, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 176 entries, 352 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1c, 0x26, 0x36, 0x38, 0x3d, 0x48, 0x57, 0x64, 0x6c, 0x71, 0x76, 0x78, 0x7c, 0x84, 0x8b, 0x8e, 0x96, 0x9a, 0x9e, 0xa0, 0xa2, 0xab, 0xaf, 0xb6, 0xbb, 0xbe, 0xc8, 0xcb, 0xd2, 0xda, 0xde, 0xe0, 0xe4, 0xe8, 0xee, 0xff, 0x10b, 0x10d, 0x113, 0x115, 0x117, 0x119, 0x11b, 0x11d, 0x11f, 0x121, 0x124, 0x127, 0x129, 0x12c, 0x12f, 0x133, 0x139, 0x140, 0x149, 0x14b, 0x14e, 0x150, 0x15b, 0x166, 0x174, 0x182, 0x192, 0x1a0, 0x1a7, 0x1ad, 0x1bc, 0x1c0, 0x1c2, 0x1c6, 0x1c8, 0x1cb, 0x1cd, 0x1d0, 0x1d2, 0x1d5, 0x1d7, 0x1d9, 0x1db, 0x1e7, 0x1f1, 0x1fb, 0x1fe, 0x202, 0x204, 0x206, 0x20b, 0x20e, 0x211, 0x213, 0x215, 0x217, 0x219, 0x21f, 0x222, 0x227, 0x229, 0x230, 0x236, 0x23c, 0x244, 0x24a, 0x250, 0x256, 0x25a, 0x25c, 0x25e, 0x260, 0x262, 0x268, 0x26b, 0x26d, 0x26f, 0x271, 0x277, 0x27b, 0x27f, 0x287, 0x28e, 0x291, 0x294, 0x296, 0x299, 0x2a1, 0x2a5, 0x2ac, 0x2af, 0x2b5, 0x2b7, 0x2b9, 0x2bc, 0x2be, 0x2c1, 0x2c6, 0x2c8, 0x2ca, 0x2cc, 0x2ce, 0x2d0, 0x2d3, 0x2d5, 0x2d7, 0x2d9, 0x2db, 0x2dd, 0x2df, 0x2ec, 0x2f6, 0x2f8, 0x2fa, 0x2fe, 0x303, 0x30f, 0x314, 0x31d, 0x323, 0x328, 0x32c, 0x331, 0x335, 0x345, 0x353, 0x361, 0x36f, 0x371, 0x373, 0x375, 0x379, 0x37b, 0x37e, 0x389, 0x38b, 0x395} + +// nfkcSparseValues: 919 entries, 3676 bytes +var nfkcSparseValues = [919]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x43b9, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x43a5, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x439b, lo: 0xb4, hi: 0xb4}, + {value: 0x0260, lo: 0xb5, hi: 0xb5}, + {value: 0x43d2, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x234c, lo: 0xbc, hi: 0xbc}, + {value: 0x2340, lo: 0xbd, hi: 0xbd}, + {value: 0x23e2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x4823, lo: 0xa0, hi: 0xa1}, + {value: 0x4855, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0004, lo: 0x09}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0140, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0179, lo: 0xb4, hi: 0xb4}, + {value: 0x017f, lo: 0xb5, hi: 0xb5}, + {value: 0x018b, lo: 0xb6, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb8}, + // Block 0x3, offset 0x1c + {value: 0x000a, lo: 0x09}, + {value: 0x43af, lo: 0x98, hi: 0x98}, + {value: 0x43b4, lo: 0x99, hi: 0x9a}, + {value: 0x43d7, lo: 0x9b, hi: 0x9b}, + {value: 0x43a0, lo: 0x9c, hi: 0x9c}, + {value: 0x43c3, lo: 0x9d, hi: 0x9d}, + {value: 0x0137, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x01b8, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x26 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x38e6, lo: 0x90, hi: 0x90}, + {value: 0x38f2, lo: 0x91, hi: 0x91}, + {value: 0x38e0, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3958, lo: 0x97, hi: 0x97}, + {value: 0x3922, lo: 0x9c, hi: 0x9c}, + {value: 0x390a, lo: 0x9d, hi: 0x9d}, + {value: 0x3934, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x395e, lo: 0xb6, hi: 0xb6}, + {value: 0x3964, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x36 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x38 + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3d + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3982, lo: 0xa2, hi: 0xa2}, + {value: 0x3988, lo: 0xa3, hi: 0xa3}, + {value: 0x3994, lo: 0xa4, hi: 0xa4}, + {value: 0x398e, lo: 0xa5, hi: 0xa5}, + {value: 0x399a, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x48 + {value: 0x0000, lo: 0x0e}, + {value: 0x39ac, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x39a0, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x39a6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x57 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x64 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6c + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x71 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x76 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x78 + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0x98, hi: 0x98}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + {value: 0x8133, lo: 0x9c, hi: 0x9f}, + // Block 0xf, offset 0x7c + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x4019, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x4021, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x4029, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x84 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x465d, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x8b + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x8e + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2dd5, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x469d, lo: 0x9c, hi: 0x9d}, + {value: 0x46ad, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x96 + {value: 0x0000, lo: 0x03}, + {value: 0x46d5, lo: 0xb3, hi: 0xb3}, + {value: 0x46dd, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x9a + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x46b5, lo: 0x99, hi: 0x9b}, + {value: 0x46cd, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x9e + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0xa0 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0xa2 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ded, lo: 0x88, hi: 0x88}, + {value: 0x2de5, lo: 0x8b, hi: 0x8b}, + {value: 0x2df5, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x46e5, lo: 0x9c, hi: 0x9c}, + {value: 0x46ed, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0xab + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2dfd, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0xaf + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e05, lo: 0x8a, hi: 0x8a}, + {value: 0x2e15, lo: 0x8b, hi: 0x8b}, + {value: 0x2e0d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xb6 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x4031, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xbb + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xbe + {value: 0x0000, lo: 0x09}, + {value: 0x2e1d, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2e25, lo: 0x87, hi: 0x87}, + {value: 0x2e2d, lo: 0x88, hi: 0x88}, + {value: 0x3091, lo: 0x8a, hi: 0x8a}, + {value: 0x2f19, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xcb + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2e35, lo: 0x8a, hi: 0x8a}, + {value: 0x2e45, lo: 0x8b, hi: 0x8b}, + {value: 0x2e3d, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xd2 + {value: 0x6ab3, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4039, lo: 0x9a, hi: 0x9a}, + {value: 0x3099, lo: 0x9c, hi: 0x9c}, + {value: 0x2f24, lo: 0x9d, hi: 0x9d}, + {value: 0x2e4d, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xda + {value: 0x0000, lo: 0x03}, + {value: 0x2751, lo: 0xb3, hi: 0xb3}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xde + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xe0 + {value: 0x0000, lo: 0x03}, + {value: 0x2766, lo: 0xb3, hi: 0xb3}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x23, offset 0xe4 + {value: 0x0000, lo: 0x03}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + {value: 0x2758, lo: 0x9c, hi: 0x9c}, + {value: 0x275f, lo: 0x9d, hi: 0x9d}, + // Block 0x24, offset 0xe8 + {value: 0x0000, lo: 0x05}, + {value: 0x03fe, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xee + {value: 0x0000, lo: 0x10}, + {value: 0x2774, lo: 0x83, hi: 0x83}, + {value: 0x277b, lo: 0x8d, hi: 0x8d}, + {value: 0x2782, lo: 0x92, hi: 0x92}, + {value: 0x2789, lo: 0x97, hi: 0x97}, + {value: 0x2790, lo: 0x9c, hi: 0x9c}, + {value: 0x276d, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4bc5, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4bce, lo: 0xb5, hi: 0xb5}, + {value: 0x46f5, lo: 0xb6, hi: 0xb6}, + {value: 0x4735, lo: 0xb7, hi: 0xb7}, + {value: 0x46fd, lo: 0xb8, hi: 0xb8}, + {value: 0x4740, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xff + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4bd7, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x279e, lo: 0x93, hi: 0x93}, + {value: 0x27a5, lo: 0x9d, hi: 0x9d}, + {value: 0x27ac, lo: 0xa2, hi: 0xa2}, + {value: 0x27b3, lo: 0xa7, hi: 0xa7}, + {value: 0x27ba, lo: 0xac, hi: 0xac}, + {value: 0x2797, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0x10b + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0x10d + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2e55, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0x115 + {value: 0x0000, lo: 0x01}, + {value: 0x0402, lo: 0xbc, hi: 0xbc}, + // Block 0x2b, offset 0x117 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x119 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x11b + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x11d + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x11f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x121 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x95}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x124 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x127 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x129 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x12c + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x12f + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x133 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x37, offset 0x139 + {value: 0x0000, lo: 0x06}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8a}, + {value: 0x8133, lo: 0x8b, hi: 0x8e}, + // Block 0x38, offset 0x140 + {value: 0x0000, lo: 0x08}, + {value: 0x2e9d, lo: 0x80, hi: 0x80}, + {value: 0x2ea5, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2ead, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x39, offset 0x149 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x3a, offset 0x14b + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3b, offset 0x14e + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3c, offset 0x150 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3d, offset 0x15b + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00ec, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00fe, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x166 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x0532, lo: 0x91, hi: 0x91}, + {value: 0x43dc, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x19a0, lo: 0xa5, hi: 0xa5}, + {value: 0x1c8c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x27c1, lo: 0xb3, hi: 0xb3}, + {value: 0x2935, lo: 0xb4, hi: 0xb4}, + {value: 0x27c8, lo: 0xb6, hi: 0xb6}, + {value: 0x293f, lo: 0xb7, hi: 0xb7}, + {value: 0x199a, lo: 0xbc, hi: 0xbc}, + {value: 0x43aa, lo: 0xbe, hi: 0xbe}, + // Block 0x3f, offset 0x174 + {value: 0x0002, lo: 0x0d}, + {value: 0x1a60, lo: 0x87, hi: 0x87}, + {value: 0x1a5d, lo: 0x88, hi: 0x88}, + {value: 0x199d, lo: 0x89, hi: 0x89}, + {value: 0x2ac5, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x055e, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x40, offset 0x182 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x055e, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x011f, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1ac9, lo: 0xa8, hi: 0xa8}, + // Block 0x41, offset 0x192 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x42, offset 0x1a0 + {value: 0x0007, lo: 0x06}, + {value: 0x22b0, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3cfa, lo: 0x9a, hi: 0x9b}, + {value: 0x3d08, lo: 0xae, hi: 0xae}, + // Block 0x43, offset 0x1a7 + {value: 0x000e, lo: 0x05}, + {value: 0x3d0f, lo: 0x8d, hi: 0x8e}, + {value: 0x3d16, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x44, offset 0x1ad + {value: 0x017a, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3d24, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3d2b, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3d32, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3d39, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3d40, lo: 0xa6, hi: 0xa6}, + {value: 0x27cf, lo: 0xac, hi: 0xad}, + {value: 0x27d6, lo: 0xaf, hi: 0xaf}, + {value: 0x2953, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x45, offset 0x1bc + {value: 0x0007, lo: 0x03}, + {value: 0x3da9, lo: 0xa0, hi: 0xa1}, + {value: 0x3dd3, lo: 0xa2, hi: 0xa3}, + {value: 0x3dfd, lo: 0xaa, hi: 0xad}, + // Block 0x46, offset 0x1c0 + {value: 0x0004, lo: 0x01}, + {value: 0x0586, lo: 0xa9, hi: 0xaa}, + // Block 0x47, offset 0x1c2 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x48, offset 0x1c6 + {value: 0x0000, lo: 0x01}, + {value: 0x2ad2, lo: 0x8c, hi: 0x8c}, + // Block 0x49, offset 0x1c8 + {value: 0x0266, lo: 0x02}, + {value: 0x1cbc, lo: 0xb4, hi: 0xb4}, + {value: 0x1a5a, lo: 0xb5, hi: 0xb6}, + // Block 0x4a, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x461e, lo: 0x9c, hi: 0x9c}, + // Block 0x4b, offset 0x1cd + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4c, offset 0x1d0 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x4d, offset 0x1d2 + {value: 0x0000, lo: 0x02}, + {value: 0x057a, lo: 0xaf, hi: 0xaf}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x4e, offset 0x1d5 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x4f, offset 0x1d7 + {value: 0x0000, lo: 0x01}, + {value: 0x0ebe, lo: 0x9f, hi: 0x9f}, + // Block 0x50, offset 0x1d9 + {value: 0x0000, lo: 0x01}, + {value: 0x172a, lo: 0xb3, hi: 0xb3}, + // Block 0x51, offset 0x1db + {value: 0x0004, lo: 0x0b}, + {value: 0x1692, lo: 0x80, hi: 0x82}, + {value: 0x16aa, lo: 0x83, hi: 0x83}, + {value: 0x16c2, lo: 0x84, hi: 0x85}, + {value: 0x16d2, lo: 0x86, hi: 0x89}, + {value: 0x16e6, lo: 0x8a, hi: 0x8c}, + {value: 0x16fa, lo: 0x8d, hi: 0x8d}, + {value: 0x1702, lo: 0x8e, hi: 0x8e}, + {value: 0x170a, lo: 0x8f, hi: 0x90}, + {value: 0x1716, lo: 0x91, hi: 0x93}, + {value: 0x1726, lo: 0x94, hi: 0x94}, + {value: 0x172e, lo: 0x95, hi: 0x95}, + // Block 0x52, offset 0x1e7 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xae}, + {value: 0x8130, lo: 0xaf, hi: 0xaf}, + {value: 0x05ae, lo: 0xb6, hi: 0xb6}, + {value: 0x0982, lo: 0xb8, hi: 0xba}, + // Block 0x53, offset 0x1f1 + {value: 0x0006, lo: 0x09}, + {value: 0x0406, lo: 0xb1, hi: 0xb1}, + {value: 0x040a, lo: 0xb2, hi: 0xb2}, + {value: 0x4b7c, lo: 0xb3, hi: 0xb3}, + {value: 0x040e, lo: 0xb4, hi: 0xb4}, + {value: 0x4b82, lo: 0xb5, hi: 0xb6}, + {value: 0x0412, lo: 0xb7, hi: 0xb7}, + {value: 0x0416, lo: 0xb8, hi: 0xb8}, + {value: 0x041a, lo: 0xb9, hi: 0xb9}, + {value: 0x4b8e, lo: 0xba, hi: 0xbf}, + // Block 0x54, offset 0x1fb + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x55, offset 0x1fe + {value: 0x0000, lo: 0x03}, + {value: 0x02d8, lo: 0x9c, hi: 0x9c}, + {value: 0x02de, lo: 0x9d, hi: 0x9d}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x56, offset 0x202 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x57, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x173e, lo: 0xb0, hi: 0xb0}, + // Block 0x58, offset 0x206 + {value: 0x0006, lo: 0x04}, + {value: 0x0047, lo: 0xb2, hi: 0xb3}, + {value: 0x0063, lo: 0xb4, hi: 0xb4}, + {value: 0x00dd, lo: 0xb8, hi: 0xb8}, + {value: 0x00e9, lo: 0xb9, hi: 0xb9}, + // Block 0x59, offset 0x20b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x5a, offset 0x20e + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x5b, offset 0x211 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x5c, offset 0x213 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x5d, offset 0x215 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x5e, offset 0x217 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x5f, offset 0x219 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x60, offset 0x21f + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x61, offset 0x222 + {value: 0x000c, lo: 0x04}, + {value: 0x173a, lo: 0x9c, hi: 0x9d}, + {value: 0x014f, lo: 0x9e, hi: 0x9e}, + {value: 0x174a, lo: 0x9f, hi: 0x9f}, + {value: 0x01a6, lo: 0xa9, hi: 0xa9}, + // Block 0x62, offset 0x227 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x63, offset 0x229 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x64, offset 0x230 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x65, offset 0x236 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x66, offset 0x23c + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x67, offset 0x244 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x68, offset 0x24a + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x69, offset 0x250 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x6a, offset 0x256 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6b, offset 0x25a + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6c, offset 0x25c + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x6d, offset 0x25e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x6e, offset 0x260 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x6f, offset 0x262 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x70, offset 0x268 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x71, offset 0x26b + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x72, offset 0x26d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x73, offset 0x26f + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x271 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x277 + {value: 0x0005, lo: 0x03}, + {value: 0x8133, lo: 0x82, hi: 0x82}, + {value: 0x812e, lo: 0x83, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + // Block 0x76, offset 0x27b + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xb0, hi: 0xb0}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x77, offset 0x27f + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4379, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4383, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x438d, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x78, offset 0x287 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2eb5, lo: 0xae, hi: 0xae}, + {value: 0x2ebf, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x79, offset 0x28e + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7a, offset 0x291 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7b, offset 0x294 + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x7c, offset 0x296 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7d, offset 0x299 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ec9, lo: 0x8b, hi: 0x8b}, + {value: 0x2ed3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7e, offset 0x2a1 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x7f, offset 0x2a5 + {value: 0x6a23, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2ee7, lo: 0xbb, hi: 0xbb}, + {value: 0x2edd, lo: 0xbc, hi: 0xbd}, + {value: 0x2ef1, lo: 0xbe, hi: 0xbe}, + // Block 0x80, offset 0x2ac + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x81, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2efb, lo: 0xba, hi: 0xba}, + {value: 0x2f05, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x82, offset 0x2b5 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x83, offset 0x2b7 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x84, offset 0x2b9 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x85, offset 0x2bc + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x86, offset 0x2be + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x87, offset 0x2c1 + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2f0f, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x88, offset 0x2c6 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x89, offset 0x2c8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8a, offset 0x2ca + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8b, offset 0x2cc + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8c, offset 0x2ce + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8d, offset 0x2d0 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8e, offset 0x2d3 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8f, offset 0x2d5 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x81, hi: 0x82}, + // Block 0x90, offset 0x2d7 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x91, offset 0x2d9 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x92, offset 0x2db + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x93, offset 0x2dd + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x94, offset 0x2df + {value: 0x0000, lo: 0x0c}, + {value: 0x470d, lo: 0x9e, hi: 0x9e}, + {value: 0x4717, lo: 0x9f, hi: 0x9f}, + {value: 0x474b, lo: 0xa0, hi: 0xa0}, + {value: 0x4759, lo: 0xa1, hi: 0xa1}, + {value: 0x4767, lo: 0xa2, hi: 0xa2}, + {value: 0x4775, lo: 0xa3, hi: 0xa3}, + {value: 0x4783, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x95, offset 0x2ec + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x4721, lo: 0xbb, hi: 0xbb}, + {value: 0x472b, lo: 0xbc, hi: 0xbc}, + {value: 0x4791, lo: 0xbd, hi: 0xbd}, + {value: 0x47ad, lo: 0xbe, hi: 0xbe}, + {value: 0x479f, lo: 0xbf, hi: 0xbf}, + // Block 0x96, offset 0x2f6 + {value: 0x0000, lo: 0x01}, + {value: 0x47bb, lo: 0x80, hi: 0x80}, + // Block 0x97, offset 0x2f8 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x98, offset 0x2fa + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x99, offset 0x2fe + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x9a, offset 0x303 + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x9b, offset 0x30f + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x9c, offset 0x314 + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x9d, offset 0x31d + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x9e, offset 0x323 + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x9f, offset 0x328 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0xa0, offset 0x32c + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0xa1, offset 0x331 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0xa2, offset 0x335 + {value: 0x0003, lo: 0x0f}, + {value: 0x023c, lo: 0x80, hi: 0x80}, + {value: 0x0556, lo: 0x81, hi: 0x81}, + {value: 0x023f, lo: 0x82, hi: 0x9a}, + {value: 0x0552, lo: 0x9b, hi: 0x9b}, + {value: 0x024b, lo: 0x9c, hi: 0x9c}, + {value: 0x0254, lo: 0x9d, hi: 0x9d}, + {value: 0x025a, lo: 0x9e, hi: 0x9e}, + {value: 0x027e, lo: 0x9f, hi: 0x9f}, + {value: 0x026f, lo: 0xa0, hi: 0xa0}, + {value: 0x026c, lo: 0xa1, hi: 0xa1}, + {value: 0x01f7, lo: 0xa2, hi: 0xb2}, + {value: 0x020c, lo: 0xb3, hi: 0xb3}, + {value: 0x022a, lo: 0xb4, hi: 0xba}, + {value: 0x0556, lo: 0xbb, hi: 0xbb}, + {value: 0x023f, lo: 0xbc, hi: 0xbf}, + // Block 0xa3, offset 0x345 + {value: 0x0003, lo: 0x0d}, + {value: 0x024b, lo: 0x80, hi: 0x94}, + {value: 0x0552, lo: 0x95, hi: 0x95}, + {value: 0x024b, lo: 0x96, hi: 0x96}, + {value: 0x0254, lo: 0x97, hi: 0x97}, + {value: 0x025a, lo: 0x98, hi: 0x98}, + {value: 0x027e, lo: 0x99, hi: 0x99}, + {value: 0x026f, lo: 0x9a, hi: 0x9a}, + {value: 0x026c, lo: 0x9b, hi: 0x9b}, + {value: 0x01f7, lo: 0x9c, hi: 0xac}, + {value: 0x020c, lo: 0xad, hi: 0xad}, + {value: 0x022a, lo: 0xae, hi: 0xb4}, + {value: 0x0556, lo: 0xb5, hi: 0xb5}, + {value: 0x023f, lo: 0xb6, hi: 0xbf}, + // Block 0xa4, offset 0x353 + {value: 0x0003, lo: 0x0d}, + {value: 0x025d, lo: 0x80, hi: 0x8e}, + {value: 0x0552, lo: 0x8f, hi: 0x8f}, + {value: 0x024b, lo: 0x90, hi: 0x90}, + {value: 0x0254, lo: 0x91, hi: 0x91}, + {value: 0x025a, lo: 0x92, hi: 0x92}, + {value: 0x027e, lo: 0x93, hi: 0x93}, + {value: 0x026f, lo: 0x94, hi: 0x94}, + {value: 0x026c, lo: 0x95, hi: 0x95}, + {value: 0x01f7, lo: 0x96, hi: 0xa6}, + {value: 0x020c, lo: 0xa7, hi: 0xa7}, + {value: 0x022a, lo: 0xa8, hi: 0xae}, + {value: 0x0556, lo: 0xaf, hi: 0xaf}, + {value: 0x023f, lo: 0xb0, hi: 0xbf}, + // Block 0xa5, offset 0x361 + {value: 0x0003, lo: 0x0d}, + {value: 0x026f, lo: 0x80, hi: 0x88}, + {value: 0x0552, lo: 0x89, hi: 0x89}, + {value: 0x024b, lo: 0x8a, hi: 0x8a}, + {value: 0x0254, lo: 0x8b, hi: 0x8b}, + {value: 0x025a, lo: 0x8c, hi: 0x8c}, + {value: 0x027e, lo: 0x8d, hi: 0x8d}, + {value: 0x026f, lo: 0x8e, hi: 0x8e}, + {value: 0x026c, lo: 0x8f, hi: 0x8f}, + {value: 0x01f7, lo: 0x90, hi: 0xa0}, + {value: 0x020c, lo: 0xa1, hi: 0xa1}, + {value: 0x022a, lo: 0xa2, hi: 0xa8}, + {value: 0x0556, lo: 0xa9, hi: 0xa9}, + {value: 0x023f, lo: 0xaa, hi: 0xbf}, + // Block 0xa6, offset 0x36f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + // Block 0xa7, offset 0x371 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xae, hi: 0xae}, + // Block 0xa8, offset 0x373 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0xa9, offset 0x375 + {value: 0x0000, lo: 0x03}, + {value: 0x8134, lo: 0xac, hi: 0xad}, + {value: 0x812e, lo: 0xae, hi: 0xae}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + // Block 0xaa, offset 0x379 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xab, offset 0x37b + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xac, offset 0x37e + {value: 0x0002, lo: 0x0a}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1a7e, lo: 0x8a, hi: 0x8a}, + {value: 0x1ab1, lo: 0x8b, hi: 0x8b}, + {value: 0x1acc, lo: 0x8c, hi: 0x8c}, + {value: 0x1ad2, lo: 0x8d, hi: 0x8d}, + {value: 0x1cf0, lo: 0x8e, hi: 0x8e}, + {value: 0x1ade, lo: 0x8f, hi: 0x8f}, + {value: 0x1aa8, lo: 0xaa, hi: 0xaa}, + {value: 0x1aab, lo: 0xab, hi: 0xab}, + {value: 0x1aae, lo: 0xac, hi: 0xac}, + // Block 0xad, offset 0x389 + {value: 0x0000, lo: 0x01}, + {value: 0x1a6c, lo: 0x90, hi: 0x90}, + // Block 0xae, offset 0x38b + {value: 0x0028, lo: 0x09}, + {value: 0x2999, lo: 0x80, hi: 0x80}, + {value: 0x295d, lo: 0x81, hi: 0x81}, + {value: 0x2967, lo: 0x82, hi: 0x82}, + {value: 0x297b, lo: 0x83, hi: 0x84}, + {value: 0x2985, lo: 0x85, hi: 0x86}, + {value: 0x2971, lo: 0x87, hi: 0x87}, + {value: 0x298f, lo: 0x88, hi: 0x88}, + {value: 0x0c6a, lo: 0x90, hi: 0x90}, + {value: 0x09e2, lo: 0x91, hi: 0x91}, + // Block 0xaf, offset 0x395 + {value: 0x0002, lo: 0x01}, + {value: 0x0021, lo: 0xb0, hi: 0xb9}, +} + +// recompMap: 7528 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "\x195\x190\x00\x01\x198" + // 0x19351930: 0x00011938 + "" + // Total size of tables: 56KB (57068 bytes) diff --git a/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-api.json b/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-api.json index 01928639ee..ef8e429e29 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-api.json @@ -118,7 +118,13 @@ "httpMethod": "POST", "id": "appengine.apps.create", "parameterOrder": [], - "parameters": {}, + "parameters": { + "parent": { + "description": "The project and location in which the application should be created, specified in the format projects/*/locations/*", + "location": "query", + "type": "string" + } + }, "path": "v1/apps", "request": { "$ref": "Application" @@ -1600,6 +1606,40 @@ "resources": { "applications": { "methods": { + "create": { + "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields: id - The ID of the target Cloud Platform project. location - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/standard/python/console/).", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications", + "httpMethod": "POST", + "id": "appengine.projects.locations.applications.create", + "parameterOrder": [ + "projectsId", + "locationsId" + ], + "parameters": { + "locationsId": { + "description": "Part of `parent`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "projectsId": { + "description": "Part of `parent`. The project and location in which the application should be created, specified in the format projects/*/locations/*", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectsId}/locations/{locationsId}/applications", + "request": { + "$ref": "Application" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "get": { "description": "Gets information about an application.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}", @@ -1639,6 +1679,151 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only" ] + }, + "repair": { + "description": "Recreates the required App Engine features for the specified App Engine application, for example a Cloud Storage bucket or App Engine service account. Use this method if you receive an error message about a missing feature, for example, Error retrieving the App Engine service account. If you have deleted your App Engine service account, this will not be able to recreate it. Instead, you should attempt to use the IAM undelete API if possible at https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/undelete?apix_params=%7B\"name\"%3A\"projects%2F-%2FserviceAccounts%2Funique_id\"%2C\"resource\"%3A%7B%7D%7D . If the deletion was recent, the numeric ID can be found in the Cloud Console Activity Log.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}:repair", + "httpMethod": "POST", + "id": "appengine.projects.locations.applications.repair", + "parameterOrder": [ + "projectsId", + "locationsId", + "applicationsId" + ], + "parameters": { + "applicationsId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "locationsId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "projectsId": { + "description": "Part of `name`. Name of the application to repair. Example: apps/myapp", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}:repair", + "request": { + "$ref": "RepairApplicationRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "services": { + "methods": { + "get": { + "description": "Gets the current configuration of the specified service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services/{servicesId}", + "httpMethod": "GET", + "id": "appengine.projects.locations.applications.services.get", + "parameterOrder": [ + "projectsId", + "locationsId", + "applicationsId", + "servicesId" + ], + "parameters": { + "applicationsId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "locationsId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "projectsId": { + "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + "location": "path", + "required": true, + "type": "string" + }, + "servicesId": { + "description": "Part of `name`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services/{servicesId}", + "response": { + "$ref": "Service" + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "list": { + "description": "Lists all the services in the application.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services", + "httpMethod": "GET", + "id": "appengine.projects.locations.applications.services.list", + "parameterOrder": [ + "projectsId", + "locationsId", + "applicationsId" + ], + "parameters": { + "applicationsId": { + "description": "Part of `parent`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "locationsId": { + "description": "Part of `parent`. See documentation of `projectsId`.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Maximum results to return per page.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Continuation token for fetching the next page of results.", + "location": "query", + "type": "string" + }, + "projectsId": { + "description": "Part of `parent`. Name of the parent Application resource. Example: apps/myapp.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services", + "response": { + "$ref": "ListServicesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/appengine.admin", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + } + } } } } @@ -1647,7 +1832,7 @@ } } }, - "revision": "20230306", + "revision": "20230601", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -1733,7 +1918,8 @@ "type": "string" }, "codeBucket": { - "description": "Google Cloud Storage bucket that can be used for storing files associated with this application. This bucket is associated with the application and can be used by the gcloud deployment commands.@OutputOnly", + "description": "Output only. Google Cloud Storage bucket that can be used for storing files associated with this application. This bucket is associated with the application and can be used by the gcloud deployment commands.@OutputOnly", + "readOnly": true, "type": "string" }, "databaseType": { @@ -1753,7 +1939,8 @@ "type": "string" }, "defaultBucket": { - "description": "Google Cloud Storage bucket that can be used by this application to store content.@OutputOnly", + "description": "Output only. Google Cloud Storage bucket that can be used by this application to store content.@OutputOnly", + "readOnly": true, "type": "string" }, "defaultCookieExpiration": { @@ -1762,7 +1949,8 @@ "type": "string" }, "defaultHostname": { - "description": "Hostname used to reach this application, as resolved by App Engine.@OutputOnly", + "description": "Output only. Hostname used to reach this application, as resolved by App Engine.@OutputOnly", + "readOnly": true, "type": "string" }, "dispatchRules": { @@ -1777,7 +1965,8 @@ "description": "The feature specific settings to be used in the application." }, "gcrDomain": { - "description": "The Google Container Registry domain used for storing managed build docker images for this application.", + "description": "Output only. The Google Container Registry domain used for storing managed build docker images for this application.", + "readOnly": true, "type": "string" }, "iap": { @@ -1792,7 +1981,8 @@ "type": "string" }, "name": { - "description": "Full path to the Application resource in the API. Example: apps/myapp.@OutputOnly", + "description": "Output only. Full path to the Application resource in the API. Example: apps/myapp.@OutputOnly", + "readOnly": true, "type": "string" }, "serviceAccount": { @@ -1870,7 +2060,7 @@ "type": "object" }, "AuthorizedDomain": { - "description": "A domain that a user has been authorized to administer. To authorize use of a domain, verify ownership via Webmaster Central (https://www.google.com/webmasters/verification/home).", + "description": "A domain that a user has been authorized to administer. To authorize use of a domain, verify ownership via Search Console (https://search.google.com/search-console/welcome).", "id": "AuthorizedDomain", "properties": { "id": { @@ -2421,7 +2611,8 @@ "type": "string" }, "oauth2ClientSecretSha256": { - "description": "Hex-encoded SHA-256 hash of the client secret.@OutputOnly", + "description": "Output only. Hex-encoded SHA-256 hash of the client secret.@OutputOnly", + "readOnly": true, "type": "string" } }, @@ -2769,7 +2960,7 @@ "type": "object" }, "Location": { - "description": "A resource that represents Google Cloud Platform location.", + "description": "A resource that represents a Google Cloud location.", "id": "Location", "properties": { "displayName": { diff --git a/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-gen.go b/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-gen.go index c9ac8667f3..e668ec3e91 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/appengine/v1/appengine-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "appengine:v1" const apiName = "appengine" @@ -308,11 +309,23 @@ type ProjectsLocationsService struct { func NewProjectsLocationsApplicationsService(s *APIService) *ProjectsLocationsApplicationsService { rs := &ProjectsLocationsApplicationsService{s: s} + rs.Services = NewProjectsLocationsApplicationsServicesService(s) return rs } type ProjectsLocationsApplicationsService struct { s *APIService + + Services *ProjectsLocationsApplicationsServicesService +} + +func NewProjectsLocationsApplicationsServicesService(s *APIService) *ProjectsLocationsApplicationsServicesService { + rs := &ProjectsLocationsApplicationsServicesService{s: s} + return rs +} + +type ProjectsLocationsApplicationsServicesService struct { + s *APIService } // ApiConfigHandler: Google Cloud Endpoints @@ -431,10 +444,10 @@ type Application struct { // Google Account. AuthDomain string `json:"authDomain,omitempty"` - // CodeBucket: Google Cloud Storage bucket that can be used for storing - // files associated with this application. This bucket is associated - // with the application and can be used by the gcloud deployment - // commands.@OutputOnly + // CodeBucket: Output only. Google Cloud Storage bucket that can be used + // for storing files associated with this application. This bucket is + // associated with the application and can be used by the gcloud + // deployment commands.@OutputOnly CodeBucket string `json:"codeBucket,omitempty"` // DatabaseType: The type of the Cloud Firestore or Cloud Datastore @@ -447,16 +460,16 @@ type Application struct { // "CLOUD_DATASTORE_COMPATIBILITY" - Cloud Firestore in Datastore Mode DatabaseType string `json:"databaseType,omitempty"` - // DefaultBucket: Google Cloud Storage bucket that can be used by this - // application to store content.@OutputOnly + // DefaultBucket: Output only. Google Cloud Storage bucket that can be + // used by this application to store content.@OutputOnly DefaultBucket string `json:"defaultBucket,omitempty"` // DefaultCookieExpiration: Cookie expiration policy for this // application. DefaultCookieExpiration string `json:"defaultCookieExpiration,omitempty"` - // DefaultHostname: Hostname used to reach this application, as resolved - // by App Engine.@OutputOnly + // DefaultHostname: Output only. Hostname used to reach this + // application, as resolved by App Engine.@OutputOnly DefaultHostname string `json:"defaultHostname,omitempty"` // DispatchRules: HTTP path dispatch rules for requests to the @@ -468,8 +481,8 @@ type Application struct { // application. FeatureSettings *FeatureSettings `json:"featureSettings,omitempty"` - // GcrDomain: The Google Container Registry domain used for storing - // managed build docker images for this application. + // GcrDomain: Output only. The Google Container Registry domain used for + // storing managed build docker images for this application. GcrDomain string `json:"gcrDomain,omitempty"` Iap *IdentityAwareProxy `json:"iap,omitempty"` @@ -486,8 +499,8 @@ type Application struct { // (https://cloud.google.com/appengine/docs/locations). LocationId string `json:"locationId,omitempty"` - // Name: Full path to the Application resource in the API. Example: - // apps/myapp.@OutputOnly + // Name: Output only. Full path to the Application resource in the API. + // Example: apps/myapp.@OutputOnly Name string `json:"name,omitempty"` // ServiceAccount: The service account associated with the application. @@ -617,9 +630,8 @@ func (s *AuthorizedCertificate) MarshalJSON() ([]byte, error) { } // AuthorizedDomain: A domain that a user has been authorized to -// administer. To authorize use of a domain, verify ownership via -// Webmaster Central -// (https://www.google.com/webmasters/verification/home). +// administer. To authorize use of a domain, verify ownership via Search +// Console (https://search.google.com/search-console/welcome). type AuthorizedDomain struct { // Id: Fully qualified domain name of the domain authorized for use. // Example: example.com. @@ -1683,8 +1695,8 @@ type IdentityAwareProxy struct { // returned in the oauth2_client_secret_sha256 field.@InputOnly Oauth2ClientSecret string `json:"oauth2ClientSecret,omitempty"` - // Oauth2ClientSecretSha256: Hex-encoded SHA-256 hash of the client - // secret.@OutputOnly + // Oauth2ClientSecretSha256: Output only. Hex-encoded SHA-256 hash of + // the client secret.@OutputOnly Oauth2ClientSecretSha256 string `json:"oauth2ClientSecretSha256,omitempty"` // ForceSendFields is a list of field names (e.g. "Enabled") to @@ -2262,7 +2274,7 @@ func (s *LivenessCheck) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Location: A resource that represents Google Cloud Platform location. +// Location: A resource that represents a Google Cloud location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. For example, "Tokyo". @@ -4233,6 +4245,14 @@ func (r *AppsService) Create(application *Application) *AppsCreateCall { return c } +// Parent sets the optional parameter "parent": The project and location +// in which the application should be created, specified in the format +// projects/*/locations/* +func (c *AppsCreateCall) Parent(parent string) *AppsCreateCall { + c.urlParams_.Set("parent", parent) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -4326,7 +4346,13 @@ func (c *AppsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "httpMethod": "POST", // "id": "appengine.apps.create", // "parameterOrder": [], - // "parameters": {}, + // "parameters": { + // "parent": { + // "description": "The project and location in which the application should be created, specified in the format projects/*/locations/*", + // "location": "query", + // "type": "string" + // } + // }, // "path": "v1/apps", // "request": { // "$ref": "Application" @@ -10824,6 +10850,166 @@ func (c *AppsServicesVersionsInstancesListCall) Pages(ctx context.Context, f fun } } +// method id "appengine.projects.locations.applications.create": + +type ProjectsLocationsApplicationsCreateCall struct { + s *APIService + projectsId string + locationsId string + application *Application + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an App Engine application for a Google Cloud Platform +// project. Required fields: id - The ID of the target Cloud Platform +// project. location - The region +// (https://cloud.google.com/appengine/docs/locations) where you want +// the App Engine application located.For more information about App +// Engine applications, see Managing Projects, Applications, and Billing +// (https://cloud.google.com/appengine/docs/standard/python/console/). +// +// - locationsId: Part of `parent`. See documentation of `projectsId`. +// - projectsId: Part of `parent`. The project and location in which the +// application should be created, specified in the format +// projects/*/locations/*. +func (r *ProjectsLocationsApplicationsService) Create(projectsId string, locationsId string, application *Application) *ProjectsLocationsApplicationsCreateCall { + c := &ProjectsLocationsApplicationsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectsId = projectsId + c.locationsId = locationsId + c.application = application + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsApplicationsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsApplicationsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsApplicationsCreateCall) Context(ctx context.Context) *ProjectsLocationsApplicationsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsApplicationsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsApplicationsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.application) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectsId}/locations/{locationsId}/applications") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectsId": c.projectsId, + "locationsId": c.locationsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.projects.locations.applications.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsApplicationsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an App Engine application for a Google Cloud Platform project. Required fields: id - The ID of the target Cloud Platform project. location - The region (https://cloud.google.com/appengine/docs/locations) where you want the App Engine application located.For more information about App Engine applications, see Managing Projects, Applications, and Billing (https://cloud.google.com/appengine/docs/standard/python/console/).", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications", + // "httpMethod": "POST", + // "id": "appengine.projects.locations.applications.create", + // "parameterOrder": [ + // "projectsId", + // "locationsId" + // ], + // "parameters": { + // "locationsId": { + // "description": "Part of `parent`. See documentation of `projectsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectsId": { + // "description": "Part of `parent`. The project and location in which the application should be created, specified in the format projects/*/locations/*", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectsId}/locations/{locationsId}/applications", + // "request": { + // "$ref": "Application" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "appengine.projects.locations.applications.get": type ProjectsLocationsApplicationsGetCall struct { @@ -10993,3 +11179,574 @@ func (c *ProjectsLocationsApplicationsGetCall) Do(opts ...googleapi.CallOption) // } } + +// method id "appengine.projects.locations.applications.repair": + +type ProjectsLocationsApplicationsRepairCall struct { + s *APIService + projectsId string + locationsId string + applicationsId string + repairapplicationrequest *RepairApplicationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Repair: Recreates the required App Engine features for the specified +// App Engine application, for example a Cloud Storage bucket or App +// Engine service account. Use this method if you receive an error +// message about a missing feature, for example, Error retrieving the +// App Engine service account. If you have deleted your App Engine +// service account, this will not be able to recreate it. Instead, you +// should attempt to use the IAM undelete API if possible at +// https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/undelete?apix_params=%7B"name"%3A"projects%2F-%2FserviceAccounts%2Funique_id"%2C"resource"%3A%7B%7D%7D +// . If the deletion was recent, the numeric ID can be found in the +// Cloud Console Activity Log. +// +// - applicationsId: Part of `name`. See documentation of `projectsId`. +// - locationsId: Part of `name`. See documentation of `projectsId`. +// - projectsId: Part of `name`. Name of the application to repair. +// Example: apps/myapp. +func (r *ProjectsLocationsApplicationsService) Repair(projectsId string, locationsId string, applicationsId string, repairapplicationrequest *RepairApplicationRequest) *ProjectsLocationsApplicationsRepairCall { + c := &ProjectsLocationsApplicationsRepairCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectsId = projectsId + c.locationsId = locationsId + c.applicationsId = applicationsId + c.repairapplicationrequest = repairapplicationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsApplicationsRepairCall) Fields(s ...googleapi.Field) *ProjectsLocationsApplicationsRepairCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsApplicationsRepairCall) Context(ctx context.Context) *ProjectsLocationsApplicationsRepairCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsApplicationsRepairCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsApplicationsRepairCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.repairapplicationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}:repair") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectsId": c.projectsId, + "locationsId": c.locationsId, + "applicationsId": c.applicationsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.projects.locations.applications.repair" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsApplicationsRepairCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Recreates the required App Engine features for the specified App Engine application, for example a Cloud Storage bucket or App Engine service account. Use this method if you receive an error message about a missing feature, for example, Error retrieving the App Engine service account. If you have deleted your App Engine service account, this will not be able to recreate it. Instead, you should attempt to use the IAM undelete API if possible at https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/undelete?apix_params=%7B\"name\"%3A\"projects%2F-%2FserviceAccounts%2Funique_id\"%2C\"resource\"%3A%7B%7D%7D . If the deletion was recent, the numeric ID can be found in the Cloud Console Activity Log.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}:repair", + // "httpMethod": "POST", + // "id": "appengine.projects.locations.applications.repair", + // "parameterOrder": [ + // "projectsId", + // "locationsId", + // "applicationsId" + // ], + // "parameters": { + // "applicationsId": { + // "description": "Part of `name`. See documentation of `projectsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "locationsId": { + // "description": "Part of `name`. See documentation of `projectsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectsId": { + // "description": "Part of `name`. Name of the application to repair. Example: apps/myapp", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}:repair", + // "request": { + // "$ref": "RepairApplicationRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "appengine.projects.locations.applications.services.get": + +type ProjectsLocationsApplicationsServicesGetCall struct { + s *APIService + projectsId string + locationsId string + applicationsId string + servicesId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the current configuration of the specified service. +// +// - applicationsId: Part of `name`. See documentation of `projectsId`. +// - locationsId: Part of `name`. See documentation of `projectsId`. +// - projectsId: Part of `name`. Name of the resource requested. +// Example: apps/myapp/services/default. +// - servicesId: Part of `name`. See documentation of `projectsId`. +func (r *ProjectsLocationsApplicationsServicesService) Get(projectsId string, locationsId string, applicationsId string, servicesId string) *ProjectsLocationsApplicationsServicesGetCall { + c := &ProjectsLocationsApplicationsServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectsId = projectsId + c.locationsId = locationsId + c.applicationsId = applicationsId + c.servicesId = servicesId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsApplicationsServicesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsApplicationsServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsApplicationsServicesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsApplicationsServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsApplicationsServicesGetCall) Context(ctx context.Context) *ProjectsLocationsApplicationsServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsApplicationsServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsApplicationsServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services/{servicesId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectsId": c.projectsId, + "locationsId": c.locationsId, + "applicationsId": c.applicationsId, + "servicesId": c.servicesId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.projects.locations.applications.services.get" call. +// Exactly one of *Service or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Service.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsApplicationsServicesGetCall) Do(opts ...googleapi.CallOption) (*Service, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Service{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the current configuration of the specified service.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services/{servicesId}", + // "httpMethod": "GET", + // "id": "appengine.projects.locations.applications.services.get", + // "parameterOrder": [ + // "projectsId", + // "locationsId", + // "applicationsId", + // "servicesId" + // ], + // "parameters": { + // "applicationsId": { + // "description": "Part of `name`. See documentation of `projectsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "locationsId": { + // "description": "Part of `name`. See documentation of `projectsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectsId": { + // "description": "Part of `name`. Name of the resource requested. Example: apps/myapp/services/default.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "servicesId": { + // "description": "Part of `name`. See documentation of `projectsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services/{servicesId}", + // "response": { + // "$ref": "Service" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "appengine.projects.locations.applications.services.list": + +type ProjectsLocationsApplicationsServicesListCall struct { + s *APIService + projectsId string + locationsId string + applicationsId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all the services in the application. +// +// - applicationsId: Part of `parent`. See documentation of +// `projectsId`. +// - locationsId: Part of `parent`. See documentation of `projectsId`. +// - projectsId: Part of `parent`. Name of the parent Application +// resource. Example: apps/myapp. +func (r *ProjectsLocationsApplicationsServicesService) List(projectsId string, locationsId string, applicationsId string) *ProjectsLocationsApplicationsServicesListCall { + c := &ProjectsLocationsApplicationsServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectsId = projectsId + c.locationsId = locationsId + c.applicationsId = applicationsId + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum results to +// return per page. +func (c *ProjectsLocationsApplicationsServicesListCall) PageSize(pageSize int64) *ProjectsLocationsApplicationsServicesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Continuation token +// for fetching the next page of results. +func (c *ProjectsLocationsApplicationsServicesListCall) PageToken(pageToken string) *ProjectsLocationsApplicationsServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsApplicationsServicesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsApplicationsServicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsApplicationsServicesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsApplicationsServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsApplicationsServicesListCall) Context(ctx context.Context) *ProjectsLocationsApplicationsServicesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsApplicationsServicesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsApplicationsServicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectsId": c.projectsId, + "locationsId": c.locationsId, + "applicationsId": c.applicationsId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "appengine.projects.locations.applications.services.list" call. +// Exactly one of *ListServicesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListServicesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsApplicationsServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListServicesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all the services in the application.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services", + // "httpMethod": "GET", + // "id": "appengine.projects.locations.applications.services.list", + // "parameterOrder": [ + // "projectsId", + // "locationsId", + // "applicationsId" + // ], + // "parameters": { + // "applicationsId": { + // "description": "Part of `parent`. See documentation of `projectsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "locationsId": { + // "description": "Part of `parent`. See documentation of `projectsId`.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum results to return per page.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Continuation token for fetching the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "projectsId": { + // "description": "Part of `parent`. Name of the parent Application resource. Example: apps/myapp.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectsId}/locations/{locationsId}/applications/{applicationsId}/services", + // "response": { + // "$ref": "ListServicesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/appengine.admin", + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsApplicationsServicesListCall) Pages(ctx context.Context, f func(*ListServicesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json b/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json index 443568523f..8ca1be473b 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json @@ -1011,7 +1011,7 @@ "type": "string" }, "filter": { - "description": "If set, then only the Routines matching this filter are returned. The current supported form is either \"routine_type:\" or \"routineType:\", where is a RoutineType enum. Example: \"routineType:SCALAR_FUNCTION\".", + "description": "If set, then only the Routines matching this filter are returned. The supported format is `routineType:{RoutineType}`, where `{RoutineType}` is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`.", "location": "query", "type": "string" }, @@ -1710,7 +1710,7 @@ } } }, - "revision": "20230218", + "revision": "20230617", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { @@ -3105,7 +3105,7 @@ "id": "EncryptionConfiguration", "properties": { "kmsKeyName": { - "description": "[Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.", + "description": "Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.", "type": "string" } }, @@ -3433,6 +3433,10 @@ }, "type": "array" }, + "fileSetSpecType": { + "description": "[Optional] Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems.", + "type": "string" + }, "googleSheetsOptions": { "$ref": "GoogleSheetsOptions", "description": "[Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS." @@ -3445,6 +3449,10 @@ "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.", "type": "boolean" }, + "jsonOptions": { + "$ref": "JsonOptions", + "description": "Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`." + }, "maxBadRecords": { "description": "[Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.", "format": "int32", @@ -3642,6 +3650,13 @@ "HivePartitioningOptions": { "id": "HivePartitioningOptions", "properties": { + "fields": { + "description": "[Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field.", + "items": { + "type": "string" + }, + "type": "array" + }, "mode": { "description": "[Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet.", "type": "string" @@ -4161,6 +4176,10 @@ "description": "[Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',').", "type": "string" }, + "fileSetSpecType": { + "description": "[Optional] Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems.", + "type": "string" + }, "hivePartitioningOptions": { "$ref": "HivePartitioningOptions", "description": "[Optional] Options to configure hive partitioning support." @@ -4280,6 +4299,10 @@ }, "type": "array" }, + "continuous": { + "description": "[Optional] Specifies whether the query should be executed as a continuous query. The default value is false.", + "type": "boolean" + }, "createDisposition": { "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.", "type": "string" @@ -4902,6 +4925,16 @@ "id": "JsonObject", "type": "object" }, + "JsonOptions": { + "id": "JsonOptions", + "properties": { + "encoding": { + "description": "[Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.", + "type": "string" + } + }, + "type": "object" + }, "JsonValue": { "id": "JsonValue", "type": "any" @@ -5124,6 +5157,7 @@ "DNN_CLASSIFIER", "TENSORFLOW", "DNN_REGRESSOR", + "XGBOOST", "BOOSTED_TREE_REGRESSOR", "BOOSTED_TREE_CLASSIFIER", "ARIMA", @@ -5134,8 +5168,11 @@ "DNN_LINEAR_COMBINED_REGRESSOR", "AUTOENCODER", "ARIMA_PLUS", + "ARIMA_PLUS_XREG", "RANDOM_FOREST_REGRESSOR", - "RANDOM_FOREST_CLASSIFIER" + "RANDOM_FOREST_CLASSIFIER", + "TENSORFLOW_LITE", + "ONNX" ], "enumDescriptions": [ "", @@ -5146,6 +5183,7 @@ "DNN classifier model.", "An imported TensorFlow model.", "DNN regressor model.", + "An imported XGBoost model.", "Boosted tree regressor model.", "Boosted tree classifier model.", "ARIMA model.", @@ -5156,8 +5194,11 @@ "Wide-and-deep regressor model.", "Autoencoder model.", "New name for the ARIMA model.", - "Random Forest regressor model.", - "Random Forest classifier model." + "ARIMA with external regressors.", + "Random forest regressor model.", + "Random forest classifier model.", + "An imported TensorFlow Lite model.", + "An imported ONNX model." ], "readOnly": true, "type": "string" @@ -5171,6 +5212,11 @@ "readOnly": true, "type": "array" }, + "remoteModelInfo": { + "$ref": "RemoteModelInfo", + "description": "Output only. Remote model info", + "readOnly": true + }, "trainingRuns": { "description": "Information for all training runs in increasing order of start_time.", "items": { @@ -5216,15 +5262,15 @@ "id": "ModelReference", "properties": { "datasetId": { - "description": "[Required] The ID of the dataset containing this model.", + "description": "Required. The ID of the dataset containing this model.", "type": "string" }, "modelId": { - "description": "[Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.", + "description": "Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.", "type": "string" }, "projectId": { - "description": "[Required] The ID of the project containing this model.", + "description": "Required. The ID of the project containing this model.", "type": "string" } }, @@ -5472,6 +5518,10 @@ }, "type": "array" }, + "continuous": { + "description": "[Optional] Specifies whether the query should be executed as a continuous query. The default value is false.", + "type": "boolean" + }, "createSession": { "description": "If true, creates a new session, where session id will be a server generated random id. If false, runs query with an existing session_id passed in ConnectionProperty, otherwise runs query in non-session mode.", "type": "boolean" @@ -5777,6 +5827,46 @@ }, "type": "object" }, + "RemoteModelInfo": { + "description": "Remote Model Info", + "id": "RemoteModelInfo", + "properties": { + "connection": { + "description": "Output only. Fully qualified name of the user-provided connection object of the remote model. Format: ```\"projects/{project_id}/locations/{location_id}/connections/{connection_id}\"```", + "readOnly": true, + "type": "string" + }, + "endpoint": { + "description": "Output only. The endpoint for remote model.", + "readOnly": true, + "type": "string" + }, + "maxBatchingRows": { + "description": "Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically.", + "format": "int64", + "readOnly": true, + "type": "string" + }, + "remoteServiceType": { + "description": "Output only. The remote service type for remote model.", + "enum": [ + "REMOTE_SERVICE_TYPE_UNSPECIFIED", + "CLOUD_AI_TRANSLATE_V3", + "CLOUD_AI_VISION_V1", + "CLOUD_AI_NATURAL_LANGUAGE_V1" + ], + "enumDescriptions": [ + "Unspecified remote service type.", + "V3 Cloud AI Translation API. See more details at [Cloud Translation API] (https://cloud.google.com/translate/docs/reference/rest).", + "V1 Cloud AI Vision API See more details at [Cloud Vision API] (https://cloud.google.com/vision/docs/reference/rest).", + "V1 Cloud AI Natural Language API. See more details at [REST Resource: documents](https://cloud.google.com/natural-language/docs/reference/rest/v1/documents)." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "Routine": { "description": "A user-defined function or a stored procedure.", "id": "Routine", @@ -5834,13 +5924,17 @@ "LANGUAGE_UNSPECIFIED", "SQL", "JAVASCRIPT", - "PYTHON" + "PYTHON", + "JAVA", + "SCALA" ], "enumDescriptions": [ "", "SQL language.", "JavaScript language.", - "Python language." + "Python language.", + "Java language.", + "Scala language." ], "type": "string" }, @@ -5872,13 +5966,15 @@ "ROUTINE_TYPE_UNSPECIFIED", "SCALAR_FUNCTION", "PROCEDURE", - "TABLE_VALUED_FUNCTION" + "TABLE_VALUED_FUNCTION", + "AGGREGATE_FUNCTION" ], "enumDescriptions": [ "", - "Non-builtin permanent scalar function.", + "Non-built-in persistent scalar function.", "Stored procedure.", - "Non-builtin permanent TVF." + "Non-built-in persistent TVF.", + "Non-built-in persistent aggregate function." ], "type": "string" }, @@ -5897,15 +5993,15 @@ "id": "RoutineReference", "properties": { "datasetId": { - "description": "[Required] The ID of the dataset containing this routine.", + "description": "Required. The ID of the dataset containing this routine.", "type": "string" }, "projectId": { - "description": "[Required] The ID of the project containing this routine.", + "description": "Required. The ID of the project containing this routine.", "type": "string" }, "routineId": { - "description": "[Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.", + "description": "Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.", "type": "string" } }, @@ -5965,19 +6061,19 @@ "id": "RowAccessPolicyReference", "properties": { "datasetId": { - "description": "[Required] The ID of the dataset containing this row access policy.", + "description": "Required. The ID of the dataset containing this row access policy.", "type": "string" }, "policyId": { - "description": "[Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.", + "description": "Required. The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.", "type": "string" }, "projectId": { - "description": "[Required] The ID of the project containing this row access policy.", + "description": "Required. The ID of the project containing this row access policy.", "type": "string" }, "tableId": { - "description": "[Required] The ID of the table containing this row access policy.", + "description": "Required. The ID of the table containing this row access policy.", "type": "string" } }, @@ -6149,6 +6245,10 @@ }, "type": "array" }, + "mainClass": { + "description": "The fully qualified name of a class in jar_uris, for example, com.example.wordcount. Exactly one of main_class and main_jar_uri field should be set for Java/Scala language type.", + "type": "string" + }, "mainFileUri": { "description": "The main file/jar URI of the Spark application. Exactly one of the definition_body field and the main_file_uri field must be set for Python. Exactly one of main_class and main_file_uri field should be set for Java/Scala language type.", "type": "string" @@ -6157,7 +6257,7 @@ "additionalProperties": { "type": "string" }, - "description": "Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see [Apache Spark](https://spark.apache.org/docs/latest/index.html).", + "description": "Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see [Apache Spark](https://spark.apache.org/docs/latest/index.html) and the [procedure option list](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#procedure_option_list).", "type": "object" }, "pyFileUris": { @@ -6212,7 +6312,7 @@ "description": "The fields of this struct, in order, if type_kind = \"STRUCT\"." }, "typeKind": { - "description": "Required. The top level type of this field. Can be any standard SQL data type (e.g., \"INT64\", \"DATE\", \"ARRAY\").", + "description": "Required. The top level type of this field. Can be any GoogleSQL data type (e.g., \"INT64\", \"DATE\", \"ARRAY\").", "enum": [ "TYPE_KIND_UNSPECIFIED", "INT64", @@ -6419,62 +6519,62 @@ "$ref": "ModelDefinition", "description": "[Output-only, Beta] Present iff this table represents a ML model. Describes the training information for the model, and it is required to run 'PREDICT' queries." }, - "numBytes": { - "description": "[Output-only] The size of this table in bytes, excluding any data in the streaming buffer.", + "numActiveLogicalBytes": { + "description": "[Output-only] Number of logical bytes that are less than 90 days old.", "format": "int64", "type": "string" }, - "numLongTermBytes": { - "description": "[Output-only] The number of bytes in the table that are considered \"long-term storage\".", + "numActivePhysicalBytes": { + "description": "[Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" }, - "numPhysicalBytes": { - "description": "[Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel.", + "numBytes": { + "description": "[Output-only] The size of this table in bytes, excluding any data in the streaming buffer.", "format": "int64", "type": "string" }, - "numRows": { - "description": "[Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.", - "format": "uint64", + "numLongTermBytes": { + "description": "[Output-only] The number of bytes in the table that are considered \"long-term storage\".", + "format": "int64", "type": "string" }, - "num_active_logical_bytes": { - "description": "[Output-only] Number of logical bytes that are less than 90 days old.", + "numLongTermLogicalBytes": { + "description": "[Output-only] Number of logical bytes that are more than 90 days old.", "format": "int64", "type": "string" }, - "num_active_physical_bytes": { - "description": "[Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", + "numLongTermPhysicalBytes": { + "description": "[Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" }, - "num_long_term_logical_bytes": { - "description": "[Output-only] Number of logical bytes that are more than 90 days old.", + "numPartitions": { + "description": "[Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" }, - "num_long_term_physical_bytes": { - "description": "[Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", + "numPhysicalBytes": { + "description": "[Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel.", "format": "int64", "type": "string" }, - "num_partitions": { - "description": "[Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", - "format": "int64", + "numRows": { + "description": "[Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.", + "format": "uint64", "type": "string" }, - "num_time_travel_physical_bytes": { + "numTimeTravelPhysicalBytes": { "description": "[Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" }, - "num_total_logical_bytes": { + "numTotalLogicalBytes": { "description": "[Output-only] Total number of logical bytes in the table or materialized view.", "format": "int64", "type": "string" }, - "num_total_physical_bytes": { + "numTotalPhysicalBytes": { "description": "[Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" @@ -6504,6 +6604,10 @@ "$ref": "Streamingbuffer", "description": "[Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer." }, + "tableConstraints": { + "$ref": "TableConstraints", + "description": "[Optional] The table constraints on the table." + }, "tableReference": { "$ref": "TableReference", "description": "[Required] Reference describing the ID of this table." @@ -6532,6 +6636,64 @@ }, "type": "object" }, + "TableConstraints": { + "id": "TableConstraints", + "properties": { + "foreignKeys": { + "description": "[Optional] The foreign keys of the tables.", + "items": { + "properties": { + "columnReferences": { + "items": { + "properties": { + "referencedColumn": { + "type": "string" + }, + "referencingColumn": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "name": { + "type": "string" + }, + "referencedTable": { + "properties": { + "datasetId": { + "type": "string" + }, + "projectId": { + "type": "string" + }, + "tableId": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "primaryKey": { + "description": "[Optional] The primary key of the table.", + "properties": { + "columns": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TableDataInsertAllRequest": { "id": "TableDataInsertAllRequest", "properties": { @@ -6926,12 +7088,21 @@ "description": "If true, detect step changes and make data adjustment in the input time series.", "type": "boolean" }, + "approxGlobalFeatureContrib": { + "description": "Whether to use approximate feature contribution method in XGBoost model explanation for global explain.", + "type": "boolean" + }, "autoArima": { "description": "Whether to enable auto ARIMA or not.", "type": "boolean" }, "autoArimaMaxOrder": { - "description": "The max value of non-seasonal p and q.", + "description": "The max value of the sum of non-seasonal p and q.", + "format": "int64", + "type": "string" + }, + "autoArimaMinOrder": { + "description": "The min value of the sum of non-seasonal p and q.", "format": "int64", "type": "string" }, @@ -7331,6 +7502,10 @@ }, "type": "array" }, + "instanceWeightColumn": { + "description": "Name of the instance weight column for training data. This column isn't be used as a feature.", + "type": "string" + }, "integratedGradientsNumSteps": { "description": "Number of integral steps for the integrated gradients explain method.", "format": "int64", @@ -7493,10 +7668,6 @@ ], "type": "string" }, - "preserveInputStructs": { - "description": "Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b.", - "type": "boolean" - }, "sampledShapleyNumPaths": { "description": "Number of paths for the sampled Shapley explain method.", "format": "int64", @@ -7507,6 +7678,10 @@ "format": "double", "type": "number" }, + "tfVersion": { + "description": "Based on the selected TF version, the corresponding docker image is used to train external models.", + "type": "string" + }, "timeSeriesDataColumn": { "description": "Column to be designated as time series data for ARIMA model.", "type": "string" @@ -7566,6 +7741,10 @@ "warmStart": { "description": "Whether to train a model from the last checkpoint.", "type": "boolean" + }, + "xgboostVersion": { + "description": "User-selected XGBoost versions for training of XGBoost models.", + "type": "string" } }, "type": "object" diff --git a/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go b/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go index 90843ff7ef..5ff46738af 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go @@ -77,6 +77,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "bigquery:v2" const apiName = "bigquery" @@ -446,14 +447,24 @@ func (s *ArimaCoefficients) MarshalJSON() ([]byte, error) { func (s *ArimaCoefficients) UnmarshalJSON(data []byte) error { type NoMethod ArimaCoefficients var s1 struct { - InterceptCoefficient gensupport.JSONFloat64 `json:"interceptCoefficient"` + AutoRegressiveCoefficients []gensupport.JSONFloat64 `json:"autoRegressiveCoefficients"` + InterceptCoefficient gensupport.JSONFloat64 `json:"interceptCoefficient"` + MovingAverageCoefficients []gensupport.JSONFloat64 `json:"movingAverageCoefficients"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } + s.AutoRegressiveCoefficients = make([]float64, len(s1.AutoRegressiveCoefficients)) + for i := range s1.AutoRegressiveCoefficients { + s.AutoRegressiveCoefficients[i] = float64(s1.AutoRegressiveCoefficients[i]) + } s.InterceptCoefficient = float64(s1.InterceptCoefficient) + s.MovingAverageCoefficients = make([]float64, len(s1.MovingAverageCoefficients)) + for i := range s1.MovingAverageCoefficients { + s.MovingAverageCoefficients[i] = float64(s1.MovingAverageCoefficients[i]) + } return nil } @@ -2656,6 +2667,23 @@ func (s *DoubleCandidates) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +func (s *DoubleCandidates) UnmarshalJSON(data []byte) error { + type NoMethod DoubleCandidates + var s1 struct { + Candidates []gensupport.JSONFloat64 `json:"candidates"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Candidates = make([]float64, len(s1.Candidates)) + for i := range s1.Candidates { + s.Candidates[i] = float64(s1.Candidates[i]) + } + return nil +} + // DoubleHparamSearchSpace: Search space for a double hyperparameter. type DoubleHparamSearchSpace struct { // Candidates: Candidates of the double hyperparameter. @@ -2735,7 +2763,7 @@ func (s *DoubleRange) UnmarshalJSON(data []byte) error { } type EncryptionConfiguration struct { - // KmsKeyName: [Optional] Describes the Cloud KMS encryption key that + // KmsKeyName: Optional. Describes the Cloud KMS encryption key that // will be used to protect destination BigQuery table. The BigQuery // Service Account associated with your project requires access to this // encryption key. @@ -3234,6 +3262,12 @@ type ExternalDataConfiguration struct { // formats. DecimalTargetTypes []string `json:"decimalTargetTypes,omitempty"` + // FileSetSpecType: [Optional] Specifies how source URIs are interpreted + // for constructing the file set to load. By default source URIs are + // expanded against the underlying storage. Other options include + // specifying manifest files. Only applicable to object storage systems. + FileSetSpecType string `json:"fileSetSpecType,omitempty"` + // GoogleSheetsOptions: [Optional] Additional options if sourceFormat is // set to GOOGLE_SHEETS. GoogleSheetsOptions *GoogleSheetsOptions `json:"googleSheetsOptions,omitempty"` @@ -3254,6 +3288,10 @@ type ExternalDataConfiguration struct { // Avro: This setting is ignored. IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"` + // JsonOptions: Additional properties to set if `sourceFormat` is set to + // `NEWLINE_DELIMITED_JSON`. + JsonOptions *JsonOptions `json:"jsonOptions,omitempty"` + // MaxBadRecords: [Optional] The maximum number of bad records that // BigQuery can ignore when reading data. If the number of bad records // exceeds this value, an invalid error is returned in the job result. @@ -3649,6 +3687,14 @@ func (s *GoogleSheetsOptions) MarshalJSON() ([]byte, error) { } type HivePartitioningOptions struct { + // Fields: [Output-only] For permanent external tables, this field is + // populated with the hive partition keys in the order they were + // inferred. The types of the partition keys can be deduced by checking + // the table schema (which will include the partition keys). Not every + // API will populate this field in the output. For example, Tables.Get + // will populate it, but Tables.List will not contain this field. + Fields []string `json:"fields,omitempty"` + // Mode: [Optional] When set, what mode of hive partitioning to use when // reading data. The following modes are supported. (1) AUTO: // automatically infer partition key name(s) and type(s). (2) STRINGS: @@ -3680,7 +3726,7 @@ type HivePartitioningOptions struct { // slash does not matter). SourceUriPrefix string `json:"sourceUriPrefix,omitempty"` - // ForceSendFields is a list of field names (e.g. "Mode") to + // ForceSendFields is a list of field names (e.g. "Fields") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -3688,7 +3734,7 @@ type HivePartitioningOptions struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Mode") to include in API + // NullFields is a list of field names (e.g. "Fields") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -4443,6 +4489,12 @@ type JobConfigurationLoad struct { // specify a tab separator. The default value is a comma (','). FieldDelimiter string `json:"fieldDelimiter,omitempty"` + // FileSetSpecType: [Optional] Specifies how source URIs are interpreted + // for constructing the file set to load. By default source URIs are + // expanded against the underlying storage. Other options include + // specifying manifest files. Only applicable to object storage systems. + FileSetSpecType string `json:"fileSetSpecType,omitempty"` + // HivePartitioningOptions: [Optional] Options to configure hive // partitioning support. HivePartitioningOptions *HivePartitioningOptions `json:"hivePartitioningOptions,omitempty"` @@ -4631,6 +4683,10 @@ type JobConfigurationQuery struct { // ConnectionProperties: Connection properties. ConnectionProperties []*ConnectionProperty `json:"connectionProperties,omitempty"` + // Continuous: [Optional] Specifies whether the query should be executed + // as a continuous query. The default value is false. + Continuous bool `json:"continuous,omitempty"` + // CreateDisposition: [Optional] Specifies whether the job is allowed to // create new tables. The following values are supported: // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the @@ -5508,6 +5564,35 @@ func (s *JobStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type JsonOptions struct { + // Encoding: [Optional] The character encoding of the data. The + // supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and + // UTF-32LE. The default value is UTF-8. + Encoding string `json:"encoding,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Encoding") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Encoding") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JsonOptions) MarshalJSON() ([]byte, error) { + type NoMethod JsonOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type JsonValue interface{} type ListModelsResponse struct { @@ -5828,6 +5913,7 @@ type Model struct { // "DNN_CLASSIFIER" - DNN classifier model. // "TENSORFLOW" - An imported TensorFlow model. // "DNN_REGRESSOR" - DNN regressor model. + // "XGBOOST" - An imported XGBoost model. // "BOOSTED_TREE_REGRESSOR" - Boosted tree regressor model. // "BOOSTED_TREE_CLASSIFIER" - Boosted tree classifier model. // "ARIMA" - ARIMA model. @@ -5838,8 +5924,11 @@ type Model struct { // "DNN_LINEAR_COMBINED_REGRESSOR" - Wide-and-deep regressor model. // "AUTOENCODER" - Autoencoder model. // "ARIMA_PLUS" - New name for the ARIMA model. - // "RANDOM_FOREST_REGRESSOR" - Random Forest regressor model. - // "RANDOM_FOREST_CLASSIFIER" - Random Forest classifier model. + // "ARIMA_PLUS_XREG" - ARIMA with external regressors. + // "RANDOM_FOREST_REGRESSOR" - Random forest regressor model. + // "RANDOM_FOREST_CLASSIFIER" - Random forest classifier model. + // "TENSORFLOW_LITE" - An imported TensorFlow Lite model. + // "ONNX" - An imported ONNX model. ModelType string `json:"modelType,omitempty"` // OptimalTrialIds: Output only. For single-objective hyperparameter @@ -5852,6 +5941,9 @@ type Model struct { // trial_id. OptimalTrialIds googleapi.Int64s `json:"optimalTrialIds,omitempty"` + // RemoteModelInfo: Output only. Remote model info + RemoteModelInfo *RemoteModelInfo `json:"remoteModelInfo,omitempty"` + // TrainingRuns: Information for all training runs in increasing order // of start_time. TrainingRuns []*TrainingRun `json:"trainingRuns,omitempty"` @@ -5954,15 +6046,15 @@ func (s *ModelDefinitionModelOptions) MarshalJSON() ([]byte, error) { } type ModelReference struct { - // DatasetId: [Required] The ID of the dataset containing this model. + // DatasetId: Required. The ID of the dataset containing this model. DatasetId string `json:"datasetId,omitempty"` - // ModelId: [Required] The ID of the model. The ID must contain only + // ModelId: Required. The ID of the model. The ID must contain only // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum // length is 1,024 characters. ModelId string `json:"modelId,omitempty"` - // ProjectId: [Required] The ID of the project containing this model. + // ProjectId: Required. The ID of the project containing this model. ProjectId string `json:"projectId,omitempty"` // ForceSendFields is a list of field names (e.g. "DatasetId") to @@ -6478,6 +6570,10 @@ type QueryRequest struct { // ConnectionProperties: Connection properties. ConnectionProperties []*ConnectionProperty `json:"connectionProperties,omitempty"` + // Continuous: [Optional] Specifies whether the query should be executed + // as a continuous query. The default value is false. + Continuous bool `json:"continuous,omitempty"` + // CreateSession: If true, creates a new session, where session id will // be a server generated random id. If false, runs query with an // existing session_id passed in ConnectionProperty, otherwise runs @@ -7002,6 +7098,63 @@ func (s *RemoteFunctionOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RemoteModelInfo: Remote Model Info +type RemoteModelInfo struct { + // Connection: Output only. Fully qualified name of the user-provided + // connection object of the remote model. Format: + // ``"projects/{project_id}/locations/{location_id}/connections/{connect + // ion_id}"`` + Connection string `json:"connection,omitempty"` + + // Endpoint: Output only. The endpoint for remote model. + Endpoint string `json:"endpoint,omitempty"` + + // MaxBatchingRows: Output only. Max number of rows in each batch sent + // to the remote service. If unset, the number of rows in each batch is + // set dynamically. + MaxBatchingRows int64 `json:"maxBatchingRows,omitempty,string"` + + // RemoteServiceType: Output only. The remote service type for remote + // model. + // + // Possible values: + // "REMOTE_SERVICE_TYPE_UNSPECIFIED" - Unspecified remote service + // type. + // "CLOUD_AI_TRANSLATE_V3" - V3 Cloud AI Translation API. See more + // details at [Cloud Translation API] + // (https://cloud.google.com/translate/docs/reference/rest). + // "CLOUD_AI_VISION_V1" - V1 Cloud AI Vision API See more details at + // [Cloud Vision API] + // (https://cloud.google.com/vision/docs/reference/rest). + // "CLOUD_AI_NATURAL_LANGUAGE_V1" - V1 Cloud AI Natural Language API. + // See more details at [REST Resource: + // documents](https://cloud.google.com/natural-language/docs/reference/re + // st/v1/documents). + RemoteServiceType string `json:"remoteServiceType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Connection") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Connection") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RemoteModelInfo) MarshalJSON() ([]byte, error) { + type NoMethod RemoteModelInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Routine: A user-defined function or a stored procedure. type Routine struct { // Arguments: Optional. @@ -7054,6 +7207,8 @@ type Routine struct { // "SQL" - SQL language. // "JAVASCRIPT" - JavaScript language. // "PYTHON" - Python language. + // "JAVA" - Java language. + // "SCALA" - Scala language. Language string `json:"language,omitempty"` // LastModifiedTime: Output only. The time when this routine was last @@ -7096,9 +7251,10 @@ type Routine struct { // // Possible values: // "ROUTINE_TYPE_UNSPECIFIED" - // "SCALAR_FUNCTION" - Non-builtin permanent scalar function. + // "SCALAR_FUNCTION" - Non-built-in persistent scalar function. // "PROCEDURE" - Stored procedure. - // "TABLE_VALUED_FUNCTION" - Non-builtin permanent TVF. + // "TABLE_VALUED_FUNCTION" - Non-built-in persistent TVF. + // "AGGREGATE_FUNCTION" - Non-built-in persistent aggregate function. RoutineType string `json:"routineType,omitempty"` // SparkOptions: Optional. Spark specific options. @@ -7140,13 +7296,13 @@ func (s *Routine) MarshalJSON() ([]byte, error) { } type RoutineReference struct { - // DatasetId: [Required] The ID of the dataset containing this routine. + // DatasetId: Required. The ID of the dataset containing this routine. DatasetId string `json:"datasetId,omitempty"` - // ProjectId: [Required] The ID of the project containing this routine. + // ProjectId: Required. The ID of the project containing this routine. ProjectId string `json:"projectId,omitempty"` - // RoutineId: [Required] The ID of the routine. The ID must contain only + // RoutineId: Required. The ID of the routine. The ID must contain only // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum // length is 256 characters. RoutineId string `json:"routineId,omitempty"` @@ -7256,20 +7412,20 @@ func (s *RowAccessPolicy) MarshalJSON() ([]byte, error) { } type RowAccessPolicyReference struct { - // DatasetId: [Required] The ID of the dataset containing this row - // access policy. + // DatasetId: Required. The ID of the dataset containing this row access + // policy. DatasetId string `json:"datasetId,omitempty"` - // PolicyId: [Required] The ID of the row access policy. The ID must + // PolicyId: Required. The ID of the row access policy. The ID must // contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). // The maximum length is 256 characters. PolicyId string `json:"policyId,omitempty"` - // ProjectId: [Required] The ID of the project containing this row - // access policy. + // ProjectId: Required. The ID of the project containing this row access + // policy. ProjectId string `json:"projectId,omitempty"` - // TableId: [Required] The ID of the table containing this row access + // TableId: Required. The ID of the table containing this row access // policy. TableId string `json:"tableId,omitempty"` @@ -7592,6 +7748,11 @@ type SparkOptions struct { // (https://spark.apache.org/docs/latest/index.html). JarUris []string `json:"jarUris,omitempty"` + // MainClass: The fully qualified name of a class in jar_uris, for + // example, com.example.wordcount. Exactly one of main_class and + // main_jar_uri field should be set for Java/Scala language type. + MainClass string `json:"mainClass,omitempty"` + // MainFileUri: The main file/jar URI of the Spark application. Exactly // one of the definition_body field and the main_file_uri field must be // set for Python. Exactly one of main_class and main_file_uri field @@ -7601,7 +7762,9 @@ type SparkOptions struct { // Properties: Configuration properties as a set of key/value pairs, // which will be passed on to the Spark application. For more // information, see Apache Spark - // (https://spark.apache.org/docs/latest/index.html). + // (https://spark.apache.org/docs/latest/index.html) and the procedure + // option list + // (https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#procedure_option_list). Properties map[string]string `json:"properties,omitempty"` // PyFileUris: Python files to be placed on the PYTHONPATH for PySpark @@ -7693,7 +7856,7 @@ type StandardSqlDataType struct { StructType *StandardSqlStructType `json:"structType,omitempty"` // TypeKind: Required. The top level type of this field. Can be any - // standard SQL data type (e.g., "INT64", "DATE", "ARRAY"). + // GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). // // Possible values: // "TYPE_KIND_UNSPECIFIED" - Invalid type. @@ -7986,6 +8149,15 @@ type Table struct { // required to run 'PREDICT' queries. Model *ModelDefinition `json:"model,omitempty"` + // NumActiveLogicalBytes: [Output-only] Number of logical bytes that are + // less than 90 days old. + NumActiveLogicalBytes int64 `json:"numActiveLogicalBytes,omitempty,string"` + + // NumActivePhysicalBytes: [Output-only] Number of physical bytes less + // than 90 days old. This data is not kept in real time, and might be + // delayed by a few seconds to a few minutes. + NumActivePhysicalBytes int64 `json:"numActivePhysicalBytes,omitempty,string"` + // NumBytes: [Output-only] The size of this table in bytes, excluding // any data in the streaming buffer. NumBytes int64 `json:"numBytes,omitempty,string"` @@ -7994,53 +8166,44 @@ type Table struct { // are considered "long-term storage". NumLongTermBytes int64 `json:"numLongTermBytes,omitempty,string"` - // NumPhysicalBytes: [Output-only] [TrustedTester] The physical size of - // this table in bytes, excluding any data in the streaming buffer. This - // includes compression and storage used for time travel. - NumPhysicalBytes int64 `json:"numPhysicalBytes,omitempty,string"` - - // NumRows: [Output-only] The number of rows of data in this table, - // excluding any data in the streaming buffer. - NumRows uint64 `json:"numRows,omitempty,string"` - - // NumActiveLogicalBytes: [Output-only] Number of logical bytes that are - // less than 90 days old. - NumActiveLogicalBytes int64 `json:"num_active_logical_bytes,omitempty,string"` - - // NumActivePhysicalBytes: [Output-only] Number of physical bytes less - // than 90 days old. This data is not kept in real time, and might be - // delayed by a few seconds to a few minutes. - NumActivePhysicalBytes int64 `json:"num_active_physical_bytes,omitempty,string"` - // NumLongTermLogicalBytes: [Output-only] Number of logical bytes that // are more than 90 days old. - NumLongTermLogicalBytes int64 `json:"num_long_term_logical_bytes,omitempty,string"` + NumLongTermLogicalBytes int64 `json:"numLongTermLogicalBytes,omitempty,string"` // NumLongTermPhysicalBytes: [Output-only] Number of physical bytes more // than 90 days old. This data is not kept in real time, and might be // delayed by a few seconds to a few minutes. - NumLongTermPhysicalBytes int64 `json:"num_long_term_physical_bytes,omitempty,string"` + NumLongTermPhysicalBytes int64 `json:"numLongTermPhysicalBytes,omitempty,string"` // NumPartitions: [Output-only] The number of partitions present in the // table or materialized view. This data is not kept in real time, and // might be delayed by a few seconds to a few minutes. - NumPartitions int64 `json:"num_partitions,omitempty,string"` + NumPartitions int64 `json:"numPartitions,omitempty,string"` + + // NumPhysicalBytes: [Output-only] [TrustedTester] The physical size of + // this table in bytes, excluding any data in the streaming buffer. This + // includes compression and storage used for time travel. + NumPhysicalBytes int64 `json:"numPhysicalBytes,omitempty,string"` + + // NumRows: [Output-only] The number of rows of data in this table, + // excluding any data in the streaming buffer. + NumRows uint64 `json:"numRows,omitempty,string"` // NumTimeTravelPhysicalBytes: [Output-only] Number of physical bytes // used by time travel storage (deleted or changed data). This data is // not kept in real time, and might be delayed by a few seconds to a few // minutes. - NumTimeTravelPhysicalBytes int64 `json:"num_time_travel_physical_bytes,omitempty,string"` + NumTimeTravelPhysicalBytes int64 `json:"numTimeTravelPhysicalBytes,omitempty,string"` // NumTotalLogicalBytes: [Output-only] Total number of logical bytes in // the table or materialized view. - NumTotalLogicalBytes int64 `json:"num_total_logical_bytes,omitempty,string"` + NumTotalLogicalBytes int64 `json:"numTotalLogicalBytes,omitempty,string"` // NumTotalPhysicalBytes: [Output-only] The physical size of this table // in bytes. This also includes storage used for time travel. This data // is not kept in real time, and might be delayed by a few seconds to a // few minutes. - NumTotalPhysicalBytes int64 `json:"num_total_physical_bytes,omitempty,string"` + NumTotalPhysicalBytes int64 `json:"numTotalPhysicalBytes,omitempty,string"` // RangePartitioning: [TrustedTester] Range partitioning specification // for this table. Only one of timePartitioning and rangePartitioning @@ -8068,6 +8231,9 @@ type Table struct { // in the streaming buffer. StreamingBuffer *Streamingbuffer `json:"streamingBuffer,omitempty"` + // TableConstraints: [Optional] The table constraints on the table. + TableConstraints *TableConstraints `json:"tableConstraints,omitempty"` + // TableReference: [Required] Reference describing the ID of this table. TableReference *TableReference `json:"tableReference,omitempty"` @@ -8142,6 +8308,153 @@ func (s *TableCell) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type TableConstraints struct { + // ForeignKeys: [Optional] The foreign keys of the tables. + ForeignKeys []*TableConstraintsForeignKeys `json:"foreignKeys,omitempty"` + + // PrimaryKey: [Optional] The primary key of the table. + PrimaryKey *TableConstraintsPrimaryKey `json:"primaryKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ForeignKeys") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ForeignKeys") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableConstraints) MarshalJSON() ([]byte, error) { + type NoMethod TableConstraints + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableConstraintsForeignKeys struct { + ColumnReferences []*TableConstraintsForeignKeysColumnReferences `json:"columnReferences,omitempty"` + + Name string `json:"name,omitempty"` + + ReferencedTable *TableConstraintsForeignKeysReferencedTable `json:"referencedTable,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ColumnReferences") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ColumnReferences") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TableConstraintsForeignKeys) MarshalJSON() ([]byte, error) { + type NoMethod TableConstraintsForeignKeys + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableConstraintsForeignKeysColumnReferences struct { + ReferencedColumn string `json:"referencedColumn,omitempty"` + + ReferencingColumn string `json:"referencingColumn,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ReferencedColumn") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ReferencedColumn") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TableConstraintsForeignKeysColumnReferences) MarshalJSON() ([]byte, error) { + type NoMethod TableConstraintsForeignKeysColumnReferences + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TableConstraintsForeignKeysReferencedTable struct { + DatasetId string `json:"datasetId,omitempty"` + + ProjectId string `json:"projectId,omitempty"` + + TableId string `json:"tableId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DatasetId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DatasetId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableConstraintsForeignKeysReferencedTable) MarshalJSON() ([]byte, error) { + type NoMethod TableConstraintsForeignKeysReferencedTable + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TableConstraintsPrimaryKey: [Optional] The primary key of the table. +type TableConstraintsPrimaryKey struct { + Columns []string `json:"columns,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Columns") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Columns") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TableConstraintsPrimaryKey) MarshalJSON() ([]byte, error) { + type NoMethod TableConstraintsPrimaryKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TableDataInsertAllRequest struct { // IgnoreUnknownValues: [Optional] Accept rows that contain values that // do not match the schema. The unknown values are ignored. Default is @@ -8849,12 +9162,19 @@ type TrainingOptions struct { // adjustment in the input time series. AdjustStepChanges bool `json:"adjustStepChanges,omitempty"` + // ApproxGlobalFeatureContrib: Whether to use approximate feature + // contribution method in XGBoost model explanation for global explain. + ApproxGlobalFeatureContrib bool `json:"approxGlobalFeatureContrib,omitempty"` + // AutoArima: Whether to enable auto ARIMA or not. AutoArima bool `json:"autoArima,omitempty"` - // AutoArimaMaxOrder: The max value of non-seasonal p and q. + // AutoArimaMaxOrder: The max value of the sum of non-seasonal p and q. AutoArimaMaxOrder int64 `json:"autoArimaMaxOrder,omitempty,string"` + // AutoArimaMinOrder: The min value of the sum of non-seasonal p and q. + AutoArimaMinOrder int64 `json:"autoArimaMinOrder,omitempty,string"` + // BatchSize: Batch size for dnn models. BatchSize int64 `json:"batchSize,omitempty,string"` @@ -9122,6 +9442,10 @@ type TrainingOptions struct { // InputLabelColumns: Name of input label columns in training data. InputLabelColumns []string `json:"inputLabelColumns,omitempty"` + // InstanceWeightColumn: Name of the instance weight column for training + // data. This column isn't be used as a feature. + InstanceWeightColumn string `json:"instanceWeightColumn,omitempty"` + // IntegratedGradientsNumSteps: Number of integral steps for the // integrated gradients explain method. IntegratedGradientsNumSteps int64 `json:"integratedGradientsNumSteps,omitempty,string"` @@ -9243,12 +9567,6 @@ type TrainingOptions struct { // regression problem. OptimizationStrategy string `json:"optimizationStrategy,omitempty"` - // PreserveInputStructs: Whether to preserve the input structs in output - // feature names. Suppose there is a struct A with field b. When false - // (default), the output feature name is A_b. When true, the output - // feature name is A.b. - PreserveInputStructs bool `json:"preserveInputStructs,omitempty"` - // SampledShapleyNumPaths: Number of paths for the sampled Shapley // explain method. SampledShapleyNumPaths int64 `json:"sampledShapleyNumPaths,omitempty,string"` @@ -9257,6 +9575,10 @@ type TrainingOptions struct { // prevent overfitting for boosted tree models. Subsample float64 `json:"subsample,omitempty"` + // TfVersion: Based on the selected TF version, the corresponding docker + // image is used to train external models. + TfVersion string `json:"tfVersion,omitempty"` + // TimeSeriesDataColumn: Column to be designated as time series data for // ARIMA model. TimeSeriesDataColumn string `json:"timeSeriesDataColumn,omitempty"` @@ -9302,6 +9624,10 @@ type TrainingOptions struct { // WarmStart: Whether to train a model from the last checkpoint. WarmStart bool `json:"warmStart,omitempty"` + // XgboostVersion: User-selected XGBoost versions for training of + // XGBoost models. + XgboostVersion string `json:"xgboostVersion,omitempty"` + // ForceSendFields is a list of field names (e.g. "AdjustStepChanges") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -13390,9 +13716,9 @@ func (r *RoutinesService) List(projectId string, datasetId string) *RoutinesList } // Filter sets the optional parameter "filter": If set, then only the -// Routines matching this filter are returned. The current supported -// form is either "routine_type:" or "routineType:", where is a -// RoutineType enum. Example: "routineType:SCALAR_FUNCTION". +// Routines matching this filter are returned. The supported format is +// `routineType:{RoutineType}`, where `{RoutineType}` is a RoutineType +// enum. For example: `routineType:SCALAR_FUNCTION`. func (c *RoutinesListCall) Filter(filter string) *RoutinesListCall { c.urlParams_.Set("filter", filter) return c @@ -13541,7 +13867,7 @@ func (c *RoutinesListCall) Do(opts ...googleapi.CallOption) (*ListRoutinesRespon // "type": "string" // }, // "filter": { - // "description": "If set, then only the Routines matching this filter are returned. The current supported form is either \"routine_type:\" or \"routineType:\", where is a RoutineType enum. Example: \"routineType:SCALAR_FUNCTION\".", + // "description": "If set, then only the Routines matching this filter are returned. The supported format is `routineType:{RoutineType}`, where `{RoutineType}` is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`.", // "location": "query", // "type": "string" // }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json b/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json index f087a46906..e470c0d4f9 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-api.json @@ -229,7 +229,7 @@ "operations": { "methods": { "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", "flatPath": "v2/operations/projects/{projectsId}/operations", "httpMethod": "GET", "id": "bigtableadmin.operations.projects.operations.list", @@ -1877,6 +1877,108 @@ "https://www.googleapis.com/auth/cloud-platform" ] } + }, + "resources": { + "views": { + "methods": { + "getIamPolicy": { + "description": "Gets the access control policy for a Table resource. Returns an empty policy if the resource exists but does not have a policy set.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}/views/{viewsId}:getIamPolicy", + "httpMethod": "POST", + "id": "bigtableadmin.projects.instances.tables.views.getIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+resource}:getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on a Table resource. Replaces any existing policy.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}/views/{viewsId}:setIamPolicy", + "httpMethod": "POST", + "id": "bigtableadmin.projects.instances.tables.views.setIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+resource}:setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that the caller has on the specified table resource.", + "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}/views/{viewsId}:testIamPermissions", + "httpMethod": "POST", + "id": "bigtableadmin.projects.instances.tables.views.testIamPermissions", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+/views/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+resource}:testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } } } } @@ -1966,7 +2068,7 @@ } } }, - "revision": "20230206", + "revision": "20230622", "rootUrl": "https://bigtableadmin.googleapis.com/", "schemas": { "AppProfile": { @@ -2197,6 +2299,18 @@ }, "type": "object" }, + "ChangeStreamConfig": { + "description": "Change stream configuration.", + "id": "ChangeStreamConfig", + "properties": { + "retentionPeriod": { + "description": "How long the change stream should be retained. Change stream data older than the retention period will not be returned when reading the change stream from the table. Values must be at least 1 day and at most 7 days, and will be truncated to microsecond granularity.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "CheckConsistencyRequest": { "description": "Request message for google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", "id": "CheckConsistencyRequest", @@ -2995,7 +3109,7 @@ "type": "object" }, "Location": { - "description": "A resource that represents Google Cloud Platform location.", + "description": "A resource that represents a Google Cloud location.", "id": "Location", "properties": { "displayName": { @@ -3055,6 +3169,10 @@ "description": "Request message for google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", "id": "ModifyColumnFamiliesRequest", "properties": { + "ignoreWarnings": { + "description": "If true, ignore safety checks when modifying the column families.", + "type": "boolean" + }, "modifications": { "description": "Required. Modifications to be atomically applied to the specified table's families. Entries are applied in order, meaning that earlier modifications can be masked by later ones (in the case of repeated updates to the same family, for example).", "items": { @@ -3381,6 +3499,10 @@ "description": "A collection of user data indexed by row, column, and timestamp. Each table is served using the resources of its parent cluster.", "id": "Table", "properties": { + "changeStreamConfig": { + "$ref": "ChangeStreamConfig", + "description": "If specified, enable the change stream on this table. Otherwise, the change stream is disabled and the change stream is not retained." + }, "clusterStates": { "additionalProperties": { "$ref": "ClusterState" diff --git a/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go b/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go index 3a88ace8ec..06c9bb2f3f 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/bigtableadmin/v2/bigtableadmin-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "bigtableadmin:v2" const apiName = "bigtableadmin" @@ -287,11 +288,23 @@ type ProjectsInstancesClustersHotTabletsService struct { func NewProjectsInstancesTablesService(s *Service) *ProjectsInstancesTablesService { rs := &ProjectsInstancesTablesService{s: s} + rs.Views = NewProjectsInstancesTablesViewsService(s) return rs } type ProjectsInstancesTablesService struct { s *Service + + Views *ProjectsInstancesTablesViewsService +} + +func NewProjectsInstancesTablesViewsService(s *Service) *ProjectsInstancesTablesViewsService { + rs := &ProjectsInstancesTablesViewsService{s: s} + return rs +} + +type ProjectsInstancesTablesViewsService struct { + s *Service } func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { @@ -737,6 +750,39 @@ func (s *Binding) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ChangeStreamConfig: Change stream configuration. +type ChangeStreamConfig struct { + // RetentionPeriod: How long the change stream should be retained. + // Change stream data older than the retention period will not be + // returned when reading the change stream from the table. Values must + // be at least 1 day and at most 7 days, and will be truncated to + // microsecond granularity. + RetentionPeriod string `json:"retentionPeriod,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RetentionPeriod") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RetentionPeriod") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ChangeStreamConfig) MarshalJSON() ([]byte, error) { + type NoMethod ChangeStreamConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CheckConsistencyRequest: Request message for // google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency type CheckConsistencyRequest struct { @@ -2319,7 +2365,7 @@ func (s *ListTablesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Location: A resource that represents Google Cloud Platform location. +// Location: A resource that represents a Google Cloud location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. For example, "Tokyo". @@ -2413,13 +2459,17 @@ func (s *Modification) MarshalJSON() ([]byte, error) { // ModifyColumnFamiliesRequest: Request message for // google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies type ModifyColumnFamiliesRequest struct { + // IgnoreWarnings: If true, ignore safety checks when modifying the + // column families. + IgnoreWarnings bool `json:"ignoreWarnings,omitempty"` + // Modifications: Required. Modifications to be atomically applied to // the specified table's families. Entries are applied in order, meaning // that earlier modifications can be masked by later ones (in the case // of repeated updates to the same family, for example). Modifications []*Modification `json:"modifications,omitempty"` - // ForceSendFields is a list of field names (e.g. "Modifications") to + // ForceSendFields is a list of field names (e.g. "IgnoreWarnings") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -2427,12 +2477,13 @@ type ModifyColumnFamiliesRequest struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Modifications") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "IgnoreWarnings") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -3097,6 +3148,11 @@ func (s *Status) MarshalJSON() ([]byte, error) { // timestamp. Each table is served using the resources of its parent // cluster. type Table struct { + // ChangeStreamConfig: If specified, enable the change stream on this + // table. Otherwise, the change stream is disabled and the change stream + // is not retained. + ChangeStreamConfig *ChangeStreamConfig `json:"changeStreamConfig,omitempty"` + // ClusterStates: Output only. Map from cluster ID to per-cluster table // state. If it could not be determined whether or not the table has // data in a particular cluster (for example, if its zone is @@ -3148,20 +3204,21 @@ type Table struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ClusterStates") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "ChangeStreamConfig") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterStates") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "ChangeStreamConfig") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -3993,14 +4050,7 @@ type OperationsProjectsOperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to -// override the binding to use different resource name schemes, such as -// `users/*/operations`. To override the binding, API services can add a -// binding such as "/v1/{name=users/*}/operations" to their service -// configuration. For backwards compatibility, the default name includes -// the operations collection id, however overriding users must ensure -// the name binding is the parent resource, without the operations -// collection id. +// `UNIMPLEMENTED`. // // - name: The name of the operation's parent resource. func (r *OperationsProjectsOperationsService) List(name string) *OperationsProjectsOperationsListCall { @@ -4129,7 +4179,7 @@ func (c *OperationsProjectsOperationsListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", // "flatPath": "v2/operations/projects/{projectsId}/operations", // "httpMethod": "GET", // "id": "bigtableadmin.operations.projects.operations.list", @@ -11431,6 +11481,457 @@ func (c *ProjectsInstancesTablesUndeleteCall) Do(opts ...googleapi.CallOption) ( } +// method id "bigtableadmin.projects.instances.tables.views.getIamPolicy": + +type ProjectsInstancesTablesViewsGetIamPolicyCall struct { + s *Service + resource string + getiampolicyrequest *GetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a Table resource. +// Returns an empty policy if the resource exists but does not have a +// policy set. +// +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. +func (r *ProjectsInstancesTablesViewsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsInstancesTablesViewsGetIamPolicyCall { + c := &ProjectsInstancesTablesViewsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.getiampolicyrequest = getiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesTablesViewsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesTablesViewsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesTablesViewsGetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesTablesViewsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesTablesViewsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesTablesViewsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.tables.views.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesTablesViewsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a Table resource. Returns an empty policy if the resource exists but does not have a policy set.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}/views/{viewsId}:getIamPolicy", + // "httpMethod": "POST", + // "id": "bigtableadmin.projects.instances.tables.views.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+/views/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+resource}:getIamPolicy", + // "request": { + // "$ref": "GetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigtableadmin.projects.instances.tables.views.setIamPolicy": + +type ProjectsInstancesTablesViewsSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on a Table resource. +// Replaces any existing policy. +// +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. +func (r *ProjectsInstancesTablesViewsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsInstancesTablesViewsSetIamPolicyCall { + c := &ProjectsInstancesTablesViewsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesTablesViewsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsInstancesTablesViewsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesTablesViewsSetIamPolicyCall) Context(ctx context.Context) *ProjectsInstancesTablesViewsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesTablesViewsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesTablesViewsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.tables.views.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesTablesViewsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on a Table resource. Replaces any existing policy.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}/views/{viewsId}:setIamPolicy", + // "httpMethod": "POST", + // "id": "bigtableadmin.projects.instances.tables.views.setIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+/views/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+resource}:setIamPolicy", + // "request": { + // "$ref": "SetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "bigtableadmin.projects.instances.tables.views.testIamPermissions": + +type ProjectsInstancesTablesViewsTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that the caller has on the +// specified table resource. +// +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. +func (r *ProjectsInstancesTablesViewsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsInstancesTablesViewsTestIamPermissionsCall { + c := &ProjectsInstancesTablesViewsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesTablesViewsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsInstancesTablesViewsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesTablesViewsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsInstancesTablesViewsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesTablesViewsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesTablesViewsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "bigtableadmin.projects.instances.tables.views.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesTablesViewsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that the caller has on the specified table resource.", + // "flatPath": "v2/projects/{projectsId}/instances/{instancesId}/tables/{tablesId}/views/{viewsId}:testIamPermissions", + // "httpMethod": "POST", + // "id": "bigtableadmin.projects.instances.tables.views.testIamPermissions", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/tables/[^/]+/views/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+resource}:testIamPermissions", + // "request": { + // "$ref": "TestIamPermissionsRequest" + // }, + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/bigtable.admin", + // "https://www.googleapis.com/auth/bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-bigtable.admin", + // "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "bigtableadmin.projects.locations.get": type ProjectsLocationsGetCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-api.json index 61102cd62a..173c1488d6 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-api.json @@ -521,7 +521,7 @@ } } }, - "revision": "20230130", + "revision": "20230629", "rootUrl": "https://cloudbilling.googleapis.com/", "schemas": { "AggregationInfo": { @@ -935,15 +935,18 @@ "type": "string" }, "billingEnabled": { - "description": "True if the project is associated with an open billing account, to which usage on the project is charged. False if the project is associated with a closed billing account, or no billing account at all, and therefore cannot use paid services. This field is read-only.", + "description": "Output only. True if the project is associated with an open billing account, to which usage on the project is charged. False if the project is associated with a closed billing account, or no billing account at all, and therefore cannot use paid services.", + "readOnly": true, "type": "boolean" }, "name": { - "description": "The resource name for the `ProjectBillingInfo`; has the form `projects/{project_id}/billingInfo`. For example, the resource name for the billing information for project `tokyo-rain-123` would be `projects/tokyo-rain-123/billingInfo`. This field is read-only.", + "description": "Output only. The resource name for the `ProjectBillingInfo`; has the form `projects/{project_id}/billingInfo`. For example, the resource name for the billing information for project `tokyo-rain-123` would be `projects/tokyo-rain-123/billingInfo`.", + "readOnly": true, "type": "string" }, "projectId": { - "description": "The ID of the project that this `ProjectBillingInfo` represents, such as `tokyo-rain-123`. This is a convenience field so that you don't need to parse the `name` field to obtain a project ID. This field is read-only.", + "description": "Output only. The ID of the project that this `ProjectBillingInfo` represents, such as `tokyo-rain-123`. This is a convenience field so that you don't need to parse the `name` field to obtain a project ID.", + "readOnly": true, "type": "string" } }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go index 4d9e271b4e..fa27ac4b88 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudbilling/v1/cloudbilling-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "cloudbilling:v1" const apiName = "cloudbilling" @@ -1091,23 +1092,22 @@ type ProjectBillingInfo struct { // `billingAccounts/012345-567890-ABCDEF`. BillingAccountName string `json:"billingAccountName,omitempty"` - // BillingEnabled: True if the project is associated with an open - // billing account, to which usage on the project is charged. False if - // the project is associated with a closed billing account, or no - // billing account at all, and therefore cannot use paid services. This - // field is read-only. + // BillingEnabled: Output only. True if the project is associated with + // an open billing account, to which usage on the project is charged. + // False if the project is associated with a closed billing account, or + // no billing account at all, and therefore cannot use paid services. BillingEnabled bool `json:"billingEnabled,omitempty"` - // Name: The resource name for the `ProjectBillingInfo`; has the form - // `projects/{project_id}/billingInfo`. For example, the resource name - // for the billing information for project `tokyo-rain-123` would be - // `projects/tokyo-rain-123/billingInfo`. This field is read-only. + // Name: Output only. The resource name for the `ProjectBillingInfo`; + // has the form `projects/{project_id}/billingInfo`. For example, the + // resource name for the billing information for project + // `tokyo-rain-123` would be `projects/tokyo-rain-123/billingInfo`. Name string `json:"name,omitempty"` - // ProjectId: The ID of the project that this `ProjectBillingInfo` - // represents, such as `tokyo-rain-123`. This is a convenience field so - // that you don't need to parse the `name` field to obtain a project ID. - // This field is read-only. + // ProjectId: Output only. The ID of the project that this + // `ProjectBillingInfo` represents, such as `tokyo-rain-123`. This is a + // convenience field so that you don't need to parse the `name` field to + // obtain a project ID. ProjectId string `json:"projectId,omitempty"` // ServerResponse contains the HTTP response code and headers from the diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json index 4bdbc4261b..6f4697b9d3 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json @@ -401,7 +401,7 @@ ] }, "retry": { - "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Google Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", + "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", "flatPath": "v1/projects/{projectId}/builds/{id}:retry", "httpMethod": "POST", "id": "cloudbuild.projects.builds.retry", @@ -460,6 +460,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "ID of the project.", "location": "query", "type": "string" @@ -486,6 +487,7 @@ ], "parameters": { "configId": { + "deprecated": true, "description": "Unique identifier of the `GitHubEnterpriseConfig`", "location": "query", "type": "string" @@ -498,6 +500,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "ID of the project", "location": "query", "type": "string" @@ -521,6 +524,7 @@ ], "parameters": { "configId": { + "deprecated": true, "description": "Unique identifier of the `GitHubEnterpriseConfig`", "location": "query", "type": "string" @@ -533,6 +537,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "ID of the project", "location": "query", "type": "string" @@ -563,6 +568,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "ID of the project", "location": "query", "type": "string" @@ -1046,7 +1052,7 @@ ] }, "retry": { - "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Google Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", + "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds/{buildsId}:retry", "httpMethod": "POST", "id": "cloudbuild.projects.locations.builds.retry", @@ -1358,6 +1364,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "ID of the project.", "location": "query", "type": "string" @@ -1384,6 +1391,7 @@ ], "parameters": { "configId": { + "deprecated": true, "description": "Unique identifier of the `GitHubEnterpriseConfig`", "location": "query", "type": "string" @@ -1396,6 +1404,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "ID of the project", "location": "query", "type": "string" @@ -1419,6 +1428,7 @@ ], "parameters": { "configId": { + "deprecated": true, "description": "Unique identifier of the `GitHubEnterpriseConfig`", "location": "query", "type": "string" @@ -1431,6 +1441,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "ID of the project", "location": "query", "type": "string" @@ -1461,6 +1472,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "ID of the project", "location": "query", "type": "string" @@ -1738,6 +1750,12 @@ "description": "Required. ID of the `BuildTrigger` to update.", "location": "query", "type": "string" + }, + "updateMask": { + "description": "Update mask for the resource. If this is set, the server will only update the fields specified in the field mask. Otherwise, a full update of the mutable resource fields will be performed.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, "path": "v1/{+resourceName}", @@ -2172,6 +2190,12 @@ "location": "path", "required": true, "type": "string" + }, + "updateMask": { + "description": "Update mask for the resource. If this is set, the server will only update the fields specified in the field mask. Otherwise, a full update of the mutable resource fields will be performed.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, "path": "v1/projects/{projectId}/triggers/{triggerId}", @@ -2295,7 +2319,7 @@ } } }, - "revision": "20230312", + "revision": "20230626", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ApprovalConfig": { @@ -2395,7 +2419,7 @@ "type": "array" }, "location": { - "description": "The path of an artifact in a Google Cloud Storage bucket, with the generation number. For example, `gs://mybucket/path/to/output.jar#generation`.", + "description": "The path of an artifact in a Cloud Storage bucket, with the generation number. For example, `gs://mybucket/path/to/output.jar#generation`.", "type": "string" } }, @@ -2419,6 +2443,13 @@ }, "type": "array" }, + "npmPackages": { + "description": "A list of npm packages to be uploaded to Artifact Registry upon successful completion of all build steps. Npm packages in the specified paths will be uploaded to the specified Artifact Registry repository using the builder service account's credentials. If any packages fail to be pushed, the build is marked FAILURE.", + "items": { + "$ref": "NpmPackage" + }, + "type": "array" + }, "objects": { "$ref": "ArtifactObjects", "description": "A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. Files in the workspace matching specified paths globs will be uploaded to the specified Cloud Storage location using the builder service account's credentials. The location and generation of the uploaded objects will be stored in the Build resource's results field. If any objects fail to be pushed, the build is marked FAILURE." @@ -2760,7 +2791,7 @@ "type": "string" }, "logsBucket": { - "description": "Google Cloud Storage bucket where logs should be written (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). Logs file names will be of the format `${logs_bucket}/log-${build_id}.txt`.", + "description": "Cloud Storage bucket where logs should be written (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)). Logs file names will be of the format `${logs_bucket}/log-${build_id}.txt`.", "type": "string" }, "name": { @@ -2972,7 +3003,7 @@ "type": "array" }, "logStreamingOption": { - "description": "Option to define build log streaming behavior to Google Cloud Storage.", + "description": "Option to define build log streaming behavior to Cloud Storage.", "enum": [ "STREAM_DEFAULT", "STREAM_ON", @@ -2980,8 +3011,8 @@ ], "enumDescriptions": [ "Service may automatically determine build log streaming behavior.", - "Build logs should be streamed to Google Cloud Storage.", - "Build logs should not be streamed to Google Cloud Storage; they will be written when the build is completed." + "Build logs should be streamed to Cloud Storage.", + "Build logs should not be streamed to Cloud Storage; they will be written when the build is completed." ], "type": "string" }, @@ -3012,14 +3043,16 @@ "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", - "E2_HIGHCPU_32" + "E2_HIGHCPU_32", + "E2_MEDIUM" ], "enumDescriptions": [ "Standard machine type.", "Highcpu machine with 8 CPUs.", "Highcpu machine with 32 CPUs.", "Highcpu e2 machine with 8 CPUs.", - "Highcpu e2 machine with 32 CPUs." + "Highcpu e2 machine with 32 CPUs.", + "E2 machine with 1 CPU." ], "type": "string" }, @@ -3052,12 +3085,14 @@ "enum": [ "NONE", "SHA256", - "MD5" + "MD5", + "SHA512" ], "enumDescriptions": [ "No hash requested.", "Use a sha256 hash.", - "Use a md5 hash." + "Use a md5 hash.", + "Use a sha512 hash." ], "type": "string" }, @@ -3687,7 +3722,7 @@ "GITLAB" ], "enumDescriptions": [ - "The default, unknown repo type.", + "The default, unknown repo type. Don't use it, instead use one of the other repo types.", "A Google Cloud Source Repositories-hosted repo.", "A GitHub-hosted repo not necessarily on \"github.com\" (i.e. GitHub Enterprise).", "A Bitbucket Server-hosted repo.", @@ -3695,6 +3730,10 @@ ], "type": "string" }, + "repository": { + "description": "The fully qualified resource name of the Repos API repository. Either URI or repository can be specified. If unspecified, the repo from which the trigger invocation originated is assumed to be the repo from which to read the specified path.", + "type": "string" + }, "revision": { "description": "The branch, tag, arbitrary ref, or SHA version of the repo to use when resolving the filename (optional). This field respects the same syntax/resolution as described here: https://git-scm.com/docs/gitrevisions If unspecified, the revision from which the trigger invocation originated is assumed to be the revision from which to read the specified path.", "type": "string" @@ -4024,7 +4063,7 @@ "GITLAB" ], "enumDescriptions": [ - "The default, unknown repo type.", + "The default, unknown repo type. Don't use it, instead use one of the other repo types.", "A Google Cloud Source Repositories-hosted repo.", "A GitHub-hosted repo not necessarily on \"github.com\" (i.e. GitHub Enterprise).", "A Bitbucket Server-hosted repo.", @@ -4032,8 +4071,31 @@ ], "type": "string" }, + "repository": { + "description": "The connected repository resource name, in the format `projects/*/locations/*/connections/*/repositories/*`. Either `uri` or `repository` can be specified and is required.", + "type": "string" + }, "uri": { - "description": "The URI of the repo. Either uri or repository can be specified and is required.", + "description": "The URI of the repo (e.g. https://github.com/user/repo.git). Either `uri` or `repository` can be specified and is required.", + "type": "string" + } + }, + "type": "object" + }, + "GitSource": { + "description": "Location of the source in any accessible Git repository.", + "id": "GitSource", + "properties": { + "dir": { + "description": "Directory, relative to the source root, in which to run the build. This must be a relative path. If a step's `dir` is specified and is an absolute path, this value is ignored for that step's execution.", + "type": "string" + }, + "revision": { + "description": "The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git fetch` to fetch the revision from the Git repository; therefore make sure that the string you provide for `revision` is parsable by the command. For information on string values accepted by `git fetch`, see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For information on `git fetch`, see https://git-scm.com/docs/git-fetch.", + "type": "string" + }, + "url": { + "description": "Location of the Git repo to build. This will be used as a `git remote`, see https://git-scm.com/docs/git-remote.", "type": "string" } }, @@ -4048,12 +4110,14 @@ "enum": [ "NONE", "SHA256", - "MD5" + "MD5", + "SHA512" ], "enumDescriptions": [ "No hash requested.", "Use a sha256 hash.", - "Use a md5 hash." + "Use a md5 hash.", + "Use a sha512 hash." ], "type": "string" }, @@ -4307,6 +4371,21 @@ }, "type": "object" }, + "NpmPackage": { + "description": "Npm package to upload to Artifact Registry upon successful completion of all build steps.", + "id": "NpmPackage", + "properties": { + "packagePath": { + "description": "Path to the package.json. e.g. workspace/path/to/package", + "type": "string" + }, + "repository": { + "description": "Artifact Registry repository, in the form \"https://$REGION-npm.pkg.dev/$PROJECT/$REPOSITORY\" Npm package in the workspace specified by path will be zipped and uploaded to Artifact Registry with this location as a prefix.", + "type": "string" + } + }, + "type": "object" + }, "Operation": { "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", @@ -4684,6 +4763,13 @@ }, "type": "array" }, + "npmPackages": { + "description": "Npm packages uploaded to Artifact Registry at the end of the build.", + "items": { + "$ref": "UploadedNpmPackage" + }, + "type": "array" + }, "numArtifacts": { "description": "Number of non-container artifacts uploaded to Cloud Storage. Only populated when artifacts are uploaded to Cloud Storage.", "format": "int64", @@ -4807,17 +4893,21 @@ "description": "Location of the source in a supported storage service.", "id": "Source", "properties": { + "gitSource": { + "$ref": "GitSource", + "description": "If provided, get the source from this Git repository." + }, "repoSource": { "$ref": "RepoSource", "description": "If provided, get the source from this location in a Cloud Source Repository." }, "storageSource": { "$ref": "StorageSource", - "description": "If provided, get the source from this location in Google Cloud Storage." + "description": "If provided, get the source from this location in Cloud Storage." }, "storageSourceManifest": { "$ref": "StorageSourceManifest", - "description": "If provided, get the source from this manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher)." + "description": "If provided, get the source from this manifest in Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher)." } }, "type": "object" @@ -4877,40 +4967,40 @@ "type": "object" }, "StorageSource": { - "description": "Location of the source in an archive file in Google Cloud Storage.", + "description": "Location of the source in an archive file in Cloud Storage.", "id": "StorageSource", "properties": { "bucket": { - "description": "Google Cloud Storage bucket containing the source (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).", + "description": "Cloud Storage bucket containing the source (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).", "type": "string" }, "generation": { - "description": "Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.", + "description": "Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.", "format": "int64", "type": "string" }, "object": { - "description": "Google Cloud Storage object containing the source. This object must be a zipped (`.zip`) or gzipped archive file (`.tar.gz`) containing source to build.", + "description": "Cloud Storage object containing the source. This object must be a zipped (`.zip`) or gzipped archive file (`.tar.gz`) containing source to build.", "type": "string" } }, "type": "object" }, "StorageSourceManifest": { - "description": "Location of the source manifest in Google Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher).", + "description": "Location of the source manifest in Cloud Storage. This feature is in Preview; see description [here](https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher).", "id": "StorageSourceManifest", "properties": { "bucket": { - "description": "Google Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).", + "description": "Cloud Storage bucket containing the source manifest (see [Bucket Name Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).", "type": "string" }, "generation": { - "description": "Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.", + "description": "Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used.", "format": "int64", "type": "string" }, "object": { - "description": "Google Cloud Storage object containing the source manifest. This object must be a JSON file.", + "description": "Cloud Storage object containing the source manifest. This object must be a JSON file.", "type": "string" } }, @@ -5037,6 +5127,26 @@ }, "type": "object" }, + "UploadedNpmPackage": { + "description": "An npm package uploaded to Artifact Registry using the NpmPackage directive.", + "id": "UploadedNpmPackage", + "properties": { + "fileHashes": { + "$ref": "FileHashes", + "description": "Hash types and values of the npm package." + }, + "pushTiming": { + "$ref": "TimeSpan", + "description": "Output only. Stores timing information for pushing the specified artifact.", + "readOnly": true + }, + "uri": { + "description": "URI of the uploaded npm package.", + "type": "string" + } + }, + "type": "object" + }, "UploadedPythonPackage": { "description": "Artifact uploaded using the PythonPackage directive.", "id": "UploadedPythonPackage", diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go index 2c959b769e..5ea57d4035 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "cloudbuild:v1" const apiName = "cloudbuild" @@ -535,8 +536,8 @@ type ArtifactResult struct { // FileHash: The file hash of the artifact. FileHash []*FileHashes `json:"fileHash,omitempty"` - // Location: The path of an artifact in a Google Cloud Storage bucket, - // with the generation number. For example, + // Location: The path of an artifact in a Cloud Storage bucket, with the + // generation number. For example, // `gs://mybucket/path/to/output.jar#generation`. Location string `json:"location,omitempty"` @@ -581,6 +582,13 @@ type Artifacts struct { // is marked FAILURE. MavenArtifacts []*MavenArtifact `json:"mavenArtifacts,omitempty"` + // NpmPackages: A list of npm packages to be uploaded to Artifact + // Registry upon successful completion of all build steps. Npm packages + // in the specified paths will be uploaded to the specified Artifact + // Registry repository using the builder service account's credentials. + // If any packages fail to be pushed, the build is marked FAILURE. + NpmPackages []*NpmPackage `json:"npmPackages,omitempty"` + // Objects: A list of objects to be uploaded to Cloud Storage upon // successful completion of all build steps. Files in the workspace // matching specified paths globs will be uploaded to the specified @@ -1170,8 +1178,8 @@ type Build struct { // Console. LogUrl string `json:"logUrl,omitempty"` - // LogsBucket: Google Cloud Storage bucket where logs should be written - // (see Bucket Name Requirements + // LogsBucket: Cloud Storage bucket where logs should be written (see + // Bucket Name Requirements // (https://cloud.google.com/storage/docs/bucket-naming#requirements)). // Logs file names will be of the format // `${logs_bucket}/log-${build_id}.txt`. @@ -1403,15 +1411,14 @@ type BuildOptions struct { Env []string `json:"env,omitempty"` // LogStreamingOption: Option to define build log streaming behavior to - // Google Cloud Storage. + // Cloud Storage. // // Possible values: // "STREAM_DEFAULT" - Service may automatically determine build log // streaming behavior. - // "STREAM_ON" - Build logs should be streamed to Google Cloud - // Storage. - // "STREAM_OFF" - Build logs should not be streamed to Google Cloud - // Storage; they will be written when the build is completed. + // "STREAM_ON" - Build logs should be streamed to Cloud Storage. + // "STREAM_OFF" - Build logs should not be streamed to Cloud Storage; + // they will be written when the build is completed. LogStreamingOption string `json:"logStreamingOption,omitempty"` // Logging: Option to specify the logging mode, which determines if and @@ -1439,6 +1446,7 @@ type BuildOptions struct { // "N1_HIGHCPU_32" - Highcpu machine with 32 CPUs. // "E2_HIGHCPU_8" - Highcpu e2 machine with 8 CPUs. // "E2_HIGHCPU_32" - Highcpu e2 machine with 32 CPUs. + // "E2_MEDIUM" - E2 machine with 1 CPU. MachineType string `json:"machineType,omitempty"` // Pool: Optional. Specification for execution on a `WorkerPool`. See @@ -1466,6 +1474,7 @@ type BuildOptions struct { // "NONE" - No hash requested. // "SHA256" - Use a sha256 hash. // "MD5" - Use a md5 hash. + // "SHA512" - Use a sha512 hash. SourceProvenanceHash []string `json:"sourceProvenanceHash,omitempty"` // SubstitutionOption: Option to specify behavior when there is an error @@ -2394,7 +2403,8 @@ type GitFileSource struct { // RepoType: See RepoType above. // // Possible values: - // "UNKNOWN" - The default, unknown repo type. + // "UNKNOWN" - The default, unknown repo type. Don't use it, instead + // use one of the other repo types. // "CLOUD_SOURCE_REPOSITORIES" - A Google Cloud Source // Repositories-hosted repo. // "GITHUB" - A GitHub-hosted repo not necessarily on "github.com" @@ -2403,6 +2413,12 @@ type GitFileSource struct { // "GITLAB" - A GitLab-hosted repo. RepoType string `json:"repoType,omitempty"` + // Repository: The fully qualified resource name of the Repos API + // repository. Either URI or repository can be specified. If + // unspecified, the repo from which the trigger invocation originated is + // assumed to be the repo from which to read the specified path. + Repository string `json:"repository,omitempty"` + // Revision: The branch, tag, arbitrary ref, or SHA version of the repo // to use when resolving the filename (optional). This field respects // the same syntax/resolution as described here: @@ -2943,7 +2959,8 @@ type GitRepoSource struct { // RepoType: See RepoType below. // // Possible values: - // "UNKNOWN" - The default, unknown repo type. + // "UNKNOWN" - The default, unknown repo type. Don't use it, instead + // use one of the other repo types. // "CLOUD_SOURCE_REPOSITORIES" - A Google Cloud Source // Repositories-hosted repo. // "GITHUB" - A GitHub-hosted repo not necessarily on "github.com" @@ -2952,8 +2969,13 @@ type GitRepoSource struct { // "GITLAB" - A GitLab-hosted repo. RepoType string `json:"repoType,omitempty"` - // Uri: The URI of the repo. Either uri or repository can be specified - // and is required. + // Repository: The connected repository resource name, in the format + // `projects/*/locations/*/connections/*/repositories/*`. Either `uri` + // or `repository` can be specified and is required. + Repository string `json:"repository,omitempty"` + + // Uri: The URI of the repo (e.g. https://github.com/user/repo.git). + // Either `uri` or `repository` can be specified and is required. Uri string `json:"uri,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -2981,6 +3003,50 @@ func (s *GitRepoSource) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GitSource: Location of the source in any accessible Git repository. +type GitSource struct { + // Dir: Directory, relative to the source root, in which to run the + // build. This must be a relative path. If a step's `dir` is specified + // and is an absolute path, this value is ignored for that step's + // execution. + Dir string `json:"dir,omitempty"` + + // Revision: The revision to fetch from the Git repository such as a + // branch, a tag, a commit SHA, or any Git ref. Cloud Build uses `git + // fetch` to fetch the revision from the Git repository; therefore make + // sure that the string you provide for `revision` is parsable by the + // command. For information on string values accepted by `git fetch`, + // see https://git-scm.com/docs/gitrevisions#_specifying_revisions. For + // information on `git fetch`, see https://git-scm.com/docs/git-fetch. + Revision string `json:"revision,omitempty"` + + // Url: Location of the Git repo to build. This will be used as a `git + // remote`, see https://git-scm.com/docs/git-remote. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Dir") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Dir") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GitSource) MarshalJSON() ([]byte, error) { + type NoMethod GitSource + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Hash: Container message for hash values. type Hash struct { // Type: The type of hash that was performed. @@ -2989,6 +3055,7 @@ type Hash struct { // "NONE" - No hash requested. // "SHA256" - Use a sha256 hash. // "MD5" - Use a md5 hash. + // "SHA512" - Use a sha512 hash. Type string `json:"type,omitempty"` // Value: The hash value. @@ -3515,6 +3582,41 @@ func (s *NetworkConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NpmPackage: Npm package to upload to Artifact Registry upon +// successful completion of all build steps. +type NpmPackage struct { + // PackagePath: Path to the package.json. e.g. workspace/path/to/package + PackagePath string `json:"packagePath,omitempty"` + + // Repository: Artifact Registry repository, in the form + // "https://$REGION-npm.pkg.dev/$PROJECT/$REPOSITORY" Npm package in the + // workspace specified by path will be zipped and uploaded to Artifact + // Registry with this location as a prefix. + Repository string `json:"repository,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PackagePath") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PackagePath") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NpmPackage) MarshalJSON() ([]byte, error) { + type NoMethod NpmPackage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Operation: This resource represents a long-running operation that is // the result of a network API call. type Operation struct { @@ -4114,6 +4216,10 @@ type Results struct { // end of the build. MavenArtifacts []*UploadedMavenArtifact `json:"mavenArtifacts,omitempty"` + // NpmPackages: Npm packages uploaded to Artifact Registry at the end of + // the build. + NpmPackages []*UploadedNpmPackage `json:"npmPackages,omitempty"` + // NumArtifacts: Number of non-container artifacts uploaded to Cloud // Storage. Only populated when artifacts are uploaded to Cloud Storage. NumArtifacts int64 `json:"numArtifacts,omitempty,string"` @@ -4357,21 +4463,23 @@ func (s *ServiceDirectoryConfig) MarshalJSON() ([]byte, error) { // Source: Location of the source in a supported storage service. type Source struct { + // GitSource: If provided, get the source from this Git repository. + GitSource *GitSource `json:"gitSource,omitempty"` + // RepoSource: If provided, get the source from this location in a Cloud // Source Repository. RepoSource *RepoSource `json:"repoSource,omitempty"` // StorageSource: If provided, get the source from this location in - // Google Cloud Storage. + // Cloud Storage. StorageSource *StorageSource `json:"storageSource,omitempty"` // StorageSourceManifest: If provided, get the source from this manifest - // in Google Cloud Storage. This feature is in Preview; see description - // here + // in Cloud Storage. This feature is in Preview; see description here // (https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). StorageSourceManifest *StorageSourceManifest `json:"storageSourceManifest,omitempty"` - // ForceSendFields is a list of field names (e.g. "RepoSource") to + // ForceSendFields is a list of field names (e.g. "GitSource") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -4379,7 +4487,7 @@ type Source struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "RepoSource") to include in + // NullFields is a list of field names (e.g. "GitSource") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -4487,21 +4595,21 @@ func (s *Status) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// StorageSource: Location of the source in an archive file in Google -// Cloud Storage. +// StorageSource: Location of the source in an archive file in Cloud +// Storage. type StorageSource struct { - // Bucket: Google Cloud Storage bucket containing the source (see Bucket - // Name Requirements + // Bucket: Cloud Storage bucket containing the source (see Bucket Name + // Requirements // (https://cloud.google.com/storage/docs/bucket-naming#requirements)). Bucket string `json:"bucket,omitempty"` - // Generation: Google Cloud Storage generation for the object. If the + // Generation: Cloud Storage generation for the object. If the // generation is omitted, the latest generation will be used. Generation int64 `json:"generation,omitempty,string"` - // Object: Google Cloud Storage object containing the source. This - // object must be a zipped (`.zip`) or gzipped archive file (`.tar.gz`) - // containing source to build. + // Object: Cloud Storage object containing the source. This object must + // be a zipped (`.zip`) or gzipped archive file (`.tar.gz`) containing + // source to build. Object string `json:"object,omitempty"` // ForceSendFields is a list of field names (e.g. "Bucket") to @@ -4527,21 +4635,21 @@ func (s *StorageSource) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// StorageSourceManifest: Location of the source manifest in Google -// Cloud Storage. This feature is in Preview; see description here +// StorageSourceManifest: Location of the source manifest in Cloud +// Storage. This feature is in Preview; see description here // (https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/gcs-fetcher). type StorageSourceManifest struct { - // Bucket: Google Cloud Storage bucket containing the source manifest - // (see Bucket Name Requirements + // Bucket: Cloud Storage bucket containing the source manifest (see + // Bucket Name Requirements // (https://cloud.google.com/storage/docs/bucket-naming#requirements)). Bucket string `json:"bucket,omitempty"` - // Generation: Google Cloud Storage generation for the object. If the + // Generation: Cloud Storage generation for the object. If the // generation is omitted, the latest generation will be used. Generation int64 `json:"generation,omitempty,string"` - // Object: Google Cloud Storage object containing the source manifest. - // This object must be a JSON file. + // Object: Cloud Storage object containing the source manifest. This + // object must be a JSON file. Object string `json:"object,omitempty"` // ForceSendFields is a list of field names (e.g. "Bucket") to @@ -4784,6 +4892,42 @@ func (s *UploadedMavenArtifact) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UploadedNpmPackage: An npm package uploaded to Artifact Registry +// using the NpmPackage directive. +type UploadedNpmPackage struct { + // FileHashes: Hash types and values of the npm package. + FileHashes *FileHashes `json:"fileHashes,omitempty"` + + // PushTiming: Output only. Stores timing information for pushing the + // specified artifact. + PushTiming *TimeSpan `json:"pushTiming,omitempty"` + + // Uri: URI of the uploaded npm package. + Uri string `json:"uri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FileHashes") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FileHashes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UploadedNpmPackage) MarshalJSON() ([]byte, error) { + type NoMethod UploadedNpmPackage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // UploadedPythonPackage: Artifact uploaded using the PythonPackage // directive. type UploadedPythonPackage struct { @@ -6528,8 +6672,8 @@ type ProjectsBuildsRetryCall struct { // the original build. * If the original build specified a commit sha or // revision ID, the retried build will use the identical source. For // builds that specify `StorageSource`: * If the original build pulled -// source from Google Cloud Storage without specifying the generation of -// the object, the new build will use the current object, which may be +// source from Cloud Storage without specifying the generation of the +// object, the new build will use the current object, which may be // different from the original build source. * If the original build // pulled source from Cloud Storage and specified the generation of the // object, the new build will attempt to use the same object, which may @@ -6638,7 +6782,7 @@ func (c *ProjectsBuildsRetryCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Google Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", + // "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", // "flatPath": "v1/projects/{projectId}/builds/{id}:retry", // "httpMethod": "POST", // "id": "cloudbuild.projects.builds.retry", @@ -6826,6 +6970,7 @@ func (c *ProjectsGithubEnterpriseConfigsCreateCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "ID of the project.", // "location": "query", // "type": "string" @@ -6976,6 +7121,7 @@ func (c *ProjectsGithubEnterpriseConfigsDeleteCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "configId": { + // "deprecated": true, // "description": "Unique identifier of the `GitHubEnterpriseConfig`", // "location": "query", // "type": "string" @@ -6988,6 +7134,7 @@ func (c *ProjectsGithubEnterpriseConfigsDeleteCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "ID of the project", // "location": "query", // "type": "string" @@ -7148,6 +7295,7 @@ func (c *ProjectsGithubEnterpriseConfigsGetCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "configId": { + // "deprecated": true, // "description": "Unique identifier of the `GitHubEnterpriseConfig`", // "location": "query", // "type": "string" @@ -7160,6 +7308,7 @@ func (c *ProjectsGithubEnterpriseConfigsGetCall) Do(opts ...googleapi.CallOption // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "ID of the project", // "location": "query", // "type": "string" @@ -7319,6 +7468,7 @@ func (c *ProjectsGithubEnterpriseConfigsListCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "ID of the project", // "location": "query", // "type": "string" @@ -9645,8 +9795,8 @@ type ProjectsLocationsBuildsRetryCall struct { // the original build. * If the original build specified a commit sha or // revision ID, the retried build will use the identical source. For // builds that specify `StorageSource`: * If the original build pulled -// source from Google Cloud Storage without specifying the generation of -// the object, the new build will use the current object, which may be +// source from Cloud Storage without specifying the generation of the +// object, the new build will use the current object, which may be // different from the original build source. * If the original build // pulled source from Cloud Storage and specified the generation of the // object, the new build will attempt to use the same object, which may @@ -9753,7 +9903,7 @@ func (c *ProjectsLocationsBuildsRetryCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Google Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", + // "description": "Creates a new build based on the specified build. This method creates a new build using the original build request, which may or may not result in an identical build. For triggered builds: * Triggered builds resolve to a precise revision; therefore a retry of a triggered build will result in a build that uses the same revision. For non-triggered builds that specify `RepoSource`: * If the original build built from the tip of a branch, the retried build will build from the tip of that branch, which may not be the same revision as the original build. * If the original build specified a commit sha or revision ID, the retried build will use the identical source. For builds that specify `StorageSource`: * If the original build pulled source from Cloud Storage without specifying the generation of the object, the new build will use the current object, which may be different from the original build source. * If the original build pulled source from Cloud Storage and specified the generation of the object, the new build will attempt to use the same object, which may or may not be available depending on the bucket's lifecycle management settings.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/builds/{buildsId}:retry", // "httpMethod": "POST", // "id": "cloudbuild.projects.locations.builds.retry", @@ -11214,6 +11364,7 @@ func (c *ProjectsLocationsGithubEnterpriseConfigsCreateCall) Do(opts ...googleap // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "ID of the project.", // "location": "query", // "type": "string" @@ -11364,6 +11515,7 @@ func (c *ProjectsLocationsGithubEnterpriseConfigsDeleteCall) Do(opts ...googleap // ], // "parameters": { // "configId": { + // "deprecated": true, // "description": "Unique identifier of the `GitHubEnterpriseConfig`", // "location": "query", // "type": "string" @@ -11376,6 +11528,7 @@ func (c *ProjectsLocationsGithubEnterpriseConfigsDeleteCall) Do(opts ...googleap // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "ID of the project", // "location": "query", // "type": "string" @@ -11536,6 +11689,7 @@ func (c *ProjectsLocationsGithubEnterpriseConfigsGetCall) Do(opts ...googleapi.C // ], // "parameters": { // "configId": { + // "deprecated": true, // "description": "Unique identifier of the `GitHubEnterpriseConfig`", // "location": "query", // "type": "string" @@ -11548,6 +11702,7 @@ func (c *ProjectsLocationsGithubEnterpriseConfigsGetCall) Do(opts ...googleapi.C // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "ID of the project", // "location": "query", // "type": "string" @@ -11707,6 +11862,7 @@ func (c *ProjectsLocationsGithubEnterpriseConfigsListCall) Do(opts ...googleapi. // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "ID of the project", // "location": "query", // "type": "string" @@ -12911,6 +13067,15 @@ func (c *ProjectsLocationsTriggersPatchCall) TriggerId(triggerId string) *Projec return c } +// UpdateMask sets the optional parameter "updateMask": Update mask for +// the resource. If this is set, the server will only update the fields +// specified in the field mask. Otherwise, a full update of the mutable +// resource fields will be performed. +func (c *ProjectsLocationsTriggersPatchCall) UpdateMask(updateMask string) *ProjectsLocationsTriggersPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -13026,6 +13191,12 @@ func (c *ProjectsLocationsTriggersPatchCall) Do(opts ...googleapi.CallOption) (* // "description": "Required. ID of the `BuildTrigger` to update.", // "location": "query", // "type": "string" + // }, + // "updateMask": { + // "description": "Update mask for the resource. If this is set, the server will only update the fields specified in the field mask. Otherwise, a full update of the mutable resource fields will be performed.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "v1/{+resourceName}", @@ -14934,6 +15105,15 @@ func (r *ProjectsTriggersService) Patch(projectId string, triggerId string, buil return c } +// UpdateMask sets the optional parameter "updateMask": Update mask for +// the resource. If this is set, the server will only update the fields +// specified in the field mask. Otherwise, a full update of the mutable +// resource fields will be performed. +func (c *ProjectsTriggersPatchCall) UpdateMask(updateMask string) *ProjectsTriggersPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -15046,6 +15226,12 @@ func (c *ProjectsTriggersPatchCall) Do(opts ...googleapi.CallOption) (*BuildTrig // "location": "path", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "Update mask for the resource. If this is set, the server will only update the fields specified in the field mask. Otherwise, a full update of the mutable resource fields will be performed.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "v1/projects/{projectId}/triggers/{triggerId}", diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json index 61a8f2bead..2e184fa0eb 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-api.json @@ -546,7 +546,7 @@ } } }, - "revision": "20230307", + "revision": "20230608", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { @@ -702,7 +702,7 @@ "type": "string" }, "entryPoint": { - "description": "The name of the function (as defined in source code) that will be executed. Defaults to the resource name suffix, if not specified. For backward compatibility, if function with given name is not found, then the system will try to use function named \"function\". For Node.js this is name of a function exported by the module specified in `source_location`.", + "description": "The name of the function (as defined in source code) that will be executed. Defaults to the resource name suffix (ID of the function), if not specified.", "type": "string" }, "environmentVariables": { @@ -964,6 +964,30 @@ }, "type": "object" }, + "GoogleCloudFunctionsV2LocationMetadata": { + "description": "Extra GCF specific location information.", + "id": "GoogleCloudFunctionsV2LocationMetadata", + "properties": { + "environments": { + "description": "The Cloud Function environments this location supports.", + "items": { + "enum": [ + "ENVIRONMENT_UNSPECIFIED", + "GEN_1", + "GEN_2" + ], + "enumDescriptions": [ + "Unspecified", + "Gen 1", + "Gen 2" + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudFunctionsV2OperationMetadata": { "description": "Represents the metadata of the long-running operation.", "id": "GoogleCloudFunctionsV2OperationMetadata", @@ -973,7 +997,7 @@ "type": "string" }, "cancelRequested": { - "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have google.longrunning.Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "type": "boolean" }, "createTime": { @@ -1111,6 +1135,30 @@ }, "type": "object" }, + "GoogleCloudFunctionsV2alphaLocationMetadata": { + "description": "Extra GCF specific location information.", + "id": "GoogleCloudFunctionsV2alphaLocationMetadata", + "properties": { + "environments": { + "description": "The Cloud Function environments this location supports.", + "items": { + "enum": [ + "ENVIRONMENT_UNSPECIFIED", + "GEN_1", + "GEN_2" + ], + "enumDescriptions": [ + "Unspecified", + "Gen 1", + "Gen 2" + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudFunctionsV2alphaOperationMetadata": { "description": "Represents the metadata of the long-running operation.", "id": "GoogleCloudFunctionsV2alphaOperationMetadata", @@ -1120,7 +1168,7 @@ "type": "string" }, "cancelRequested": { - "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have google.longrunning.Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "type": "boolean" }, "createTime": { @@ -1258,6 +1306,30 @@ }, "type": "object" }, + "GoogleCloudFunctionsV2betaLocationMetadata": { + "description": "Extra GCF specific location information.", + "id": "GoogleCloudFunctionsV2betaLocationMetadata", + "properties": { + "environments": { + "description": "The Cloud Function environments this location supports.", + "items": { + "enum": [ + "ENVIRONMENT_UNSPECIFIED", + "GEN_1", + "GEN_2" + ], + "enumDescriptions": [ + "Unspecified", + "Gen 1", + "Gen 2" + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudFunctionsV2betaOperationMetadata": { "description": "Represents the metadata of the long-running operation.", "id": "GoogleCloudFunctionsV2betaOperationMetadata", @@ -1267,7 +1339,7 @@ "type": "string" }, "cancelRequested": { - "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "description": "Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have google.longrunning.Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", "type": "boolean" }, "createTime": { @@ -1493,7 +1565,7 @@ "type": "object" }, "Location": { - "description": "A resource that represents Google Cloud Platform location.", + "description": "A resource that represents a Google Cloud location.", "id": "Location", "properties": { "displayName": { @@ -1687,7 +1759,7 @@ "type": "string" }, "version": { - "description": "Version of the secret (version number or the string 'latest'). It is preferrable to use `latest` version with secret volumes as secret value changes are reflected immediately.", + "description": "Version of the secret (version number or the string 'latest'). It is preferable to use `latest` version with secret volumes as secret value changes are reflected immediately.", "type": "string" } }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go index 1f84d2701d..a5b2c153c9 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudfunctions/v1/cloudfunctions-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "cloudfunctions:v1" const apiName = "cloudfunctions" @@ -491,11 +492,8 @@ type CloudFunction struct { DockerRepository string `json:"dockerRepository,omitempty"` // EntryPoint: The name of the function (as defined in source code) that - // will be executed. Defaults to the resource name suffix, if not - // specified. For backward compatibility, if function with given name is - // not found, then the system will try to use function named "function". - // For Node.js this is name of a function exported by the module - // specified in `source_location`. + // will be executed. Defaults to the resource name suffix (ID of the + // function), if not specified. EntryPoint string `json:"entryPoint,omitempty"` // EnvironmentVariables: Environment variables that shall be available @@ -991,6 +989,40 @@ func (s *GenerateUploadUrlResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleCloudFunctionsV2LocationMetadata: Extra GCF specific location +// information. +type GoogleCloudFunctionsV2LocationMetadata struct { + // Environments: The Cloud Function environments this location supports. + // + // Possible values: + // "ENVIRONMENT_UNSPECIFIED" - Unspecified + // "GEN_1" - Gen 1 + // "GEN_2" - Gen 2 + Environments []string `json:"environments,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Environments") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Environments") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudFunctionsV2LocationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudFunctionsV2LocationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleCloudFunctionsV2OperationMetadata: Represents the metadata of // the long-running operation. type GoogleCloudFunctionsV2OperationMetadata struct { @@ -999,8 +1031,8 @@ type GoogleCloudFunctionsV2OperationMetadata struct { // CancelRequested: Identifies whether the user has requested // cancellation of the operation. Operations that have successfully been - // cancelled have Operation.error value with a google.rpc.Status.code of - // 1, corresponding to `Code.CANCELLED`. + // cancelled have google.longrunning.Operation.error value with a + // google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. CancelRequested bool `json:"cancelRequested,omitempty"` // CreateTime: The time the operation was created. @@ -1146,6 +1178,40 @@ func (s *GoogleCloudFunctionsV2StateMessage) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleCloudFunctionsV2alphaLocationMetadata: Extra GCF specific +// location information. +type GoogleCloudFunctionsV2alphaLocationMetadata struct { + // Environments: The Cloud Function environments this location supports. + // + // Possible values: + // "ENVIRONMENT_UNSPECIFIED" - Unspecified + // "GEN_1" - Gen 1 + // "GEN_2" - Gen 2 + Environments []string `json:"environments,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Environments") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Environments") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudFunctionsV2alphaLocationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudFunctionsV2alphaLocationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleCloudFunctionsV2alphaOperationMetadata: Represents the metadata // of the long-running operation. type GoogleCloudFunctionsV2alphaOperationMetadata struct { @@ -1154,8 +1220,8 @@ type GoogleCloudFunctionsV2alphaOperationMetadata struct { // CancelRequested: Identifies whether the user has requested // cancellation of the operation. Operations that have successfully been - // cancelled have Operation.error value with a google.rpc.Status.code of - // 1, corresponding to `Code.CANCELLED`. + // cancelled have google.longrunning.Operation.error value with a + // google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. CancelRequested bool `json:"cancelRequested,omitempty"` // CreateTime: The time the operation was created. @@ -1302,6 +1368,40 @@ func (s *GoogleCloudFunctionsV2alphaStateMessage) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleCloudFunctionsV2betaLocationMetadata: Extra GCF specific +// location information. +type GoogleCloudFunctionsV2betaLocationMetadata struct { + // Environments: The Cloud Function environments this location supports. + // + // Possible values: + // "ENVIRONMENT_UNSPECIFIED" - Unspecified + // "GEN_1" - Gen 1 + // "GEN_2" - Gen 2 + Environments []string `json:"environments,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Environments") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Environments") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudFunctionsV2betaLocationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudFunctionsV2betaLocationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleCloudFunctionsV2betaOperationMetadata: Represents the metadata // of the long-running operation. type GoogleCloudFunctionsV2betaOperationMetadata struct { @@ -1310,8 +1410,8 @@ type GoogleCloudFunctionsV2betaOperationMetadata struct { // CancelRequested: Identifies whether the user has requested // cancellation of the operation. Operations that have successfully been - // cancelled have Operation.error value with a google.rpc.Status.code of - // 1, corresponding to `Code.CANCELLED`. + // cancelled have google.longrunning.Operation.error value with a + // google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. CancelRequested bool `json:"cancelRequested,omitempty"` // CreateTime: The time the operation was created. @@ -1615,7 +1715,7 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Location: A resource that represents Google Cloud Platform location. +// Location: A resource that represents a Google Cloud location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. For example, "Tokyo". @@ -1959,7 +2059,7 @@ type SecretVersion struct { Path string `json:"path,omitempty"` // Version: Version of the secret (version number or the string - // 'latest'). It is preferrable to use `latest` version with secret + // 'latest'). It is preferable to use `latest` version with secret // volumes as secret value changes are reflected immediately. Version string `json:"version,omitempty"` diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-api.json index 3d7128bb75..5a491da158 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-api.json @@ -1460,6 +1460,54 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "searchDirectGroups": { + "description": "Searches direct groups of a member.", + "flatPath": "v1/groups/{groupsId}/memberships:searchDirectGroups", + "httpMethod": "GET", + "id": "cloudidentity.groups.memberships.searchDirectGroups", + "parameterOrder": [ + "parent" + ], + "parameters": { + "orderBy": { + "description": "The ordering of membership relation for the display name or email in the response. The syntax for this field can be found at https://cloud.google.com/apis/design/design_patterns#sorting_order. Example: Sort by the ascending display name: order_by=\"group_name\" or order_by=\"group_name asc\". Sort by the descending display name: order_by=\"group_name desc\". Sort by the ascending group key: order_by=\"group_key\" or order_by=\"group_key asc\". Sort by the descending group key: order_by=\"group_key desc\".", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "The default page size is 200 (max 1000).", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous list request, if any", + "location": "query", + "type": "string" + }, + "parent": { + "description": "[Resource name](https://cloud.google.com/apis/design/resource_names) of the group to search transitive memberships in. Format: groups/{group_id}, where group_id is always '-' as this API will search across all groups for a given member.", + "location": "path", + "pattern": "^groups/[^/]+$", + "required": true, + "type": "string" + }, + "query": { + "description": "Required. A CEL expression that MUST include member specification AND label(s). Users can search on label attributes of groups. CONTAINS match ('in') is supported on labels. Identity-mapped groups are uniquely identified by both a `member_key_id` and a `member_key_namespace`, which requires an additional query input: `member_key_namespace`. Example query: `member_key_id == 'member_key_id_value' \u0026\u0026 'label_value' in labels`", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/memberships:searchDirectGroups", + "response": { + "$ref": "SearchDirectGroupsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-identity.groups", + "https://www.googleapis.com/auth/cloud-identity.groups.readonly", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "searchTransitiveGroups": { "description": "Search transitive groups of a member. **Note:** This feature is only available to Google Workspace Enterprise Standard, Enterprise Plus, and Enterprise for Education; and Cloud Identity Premium accounts. If the account of the member is not one of these, a 403 (PERMISSION_DENIED) HTTP status code will be returned. A transitive group is any group that has a direct or indirect membership to the member. Actor must have view permissions all transitive groups.", "flatPath": "v1/groups/{groupsId}/memberships:searchTransitiveGroups", @@ -1942,7 +1990,7 @@ } } }, - "revision": "20230307", + "revision": "20230606", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AddIdpCredentialOperationMetadata": { @@ -2173,10 +2221,18 @@ "description": "Resource representing the Android specific attributes of a Device.", "id": "GoogleAppsCloudidentityDevicesV1AndroidAttributes", "properties": { + "ctsProfileMatch": { + "description": "Whether the device passes Android CTS compliance.", + "type": "boolean" + }, "enabledUnknownSources": { "description": "Whether applications from unknown sources can be installed on device.", "type": "boolean" }, + "hasPotentiallyHarmfulApps": { + "description": "Whether any potentially harmful apps were detected on the device.", + "type": "boolean" + }, "ownerProfileAccount": { "description": "Whether this account is on an owner/primary profile. For phones, only true for owner profiles. Android 4+ devices can have secondary or restricted user profiles.", "type": "boolean" @@ -2200,6 +2256,14 @@ "supportsWorkProfile": { "description": "Whether device supports Android work profiles. If false, this service will not block access to corp data even if an administrator turns on the \"Enforce Work Profile\" policy.", "type": "boolean" + }, + "verifiedBoot": { + "description": "Whether Android verified boot status is GREEN.", + "type": "boolean" + }, + "verifyAppsEnabled": { + "description": "Whether Google Play Protect Verify Apps is enabled.", + "type": "boolean" } }, "type": "object" @@ -3371,6 +3435,27 @@ "readOnly": true, "type": "string" }, + "deliverySetting": { + "description": "Output only. Delivery setting associated with the membership.", + "enum": [ + "DELIVERY_SETTING_UNSPECIFIED", + "ALL_MAIL", + "DIGEST", + "DAILY", + "NONE", + "DISABLED" + ], + "enumDescriptions": [ + "Default. Should not be used.", + "Represents each mail should be delivered", + "Represents 1 email for every 25 messages.", + "Represents daily summary of messages.", + "Represents no delivery.", + "Represents disabled state." + ], + "readOnly": true, + "type": "string" + }, "name": { "description": "Output only. The [resource name](https://cloud.google.com/apis/design/resource_names) of the `Membership`. Shall be of the form `groups/{group}/memberships/{membership}`.", "readOnly": true, @@ -3435,6 +3520,47 @@ }, "type": "object" }, + "MembershipRelation": { + "description": "Message containing membership relation.", + "id": "MembershipRelation", + "properties": { + "description": { + "description": "An extended description to help users determine the purpose of a `Group`.", + "type": "string" + }, + "displayName": { + "description": "The display name of the `Group`.", + "type": "string" + }, + "group": { + "description": "The [resource name](https://cloud.google.com/apis/design/resource_names) of the `Group`. Shall be of the form `groups/{group_id}`.", + "type": "string" + }, + "groupKey": { + "$ref": "EntityKey", + "description": "The `EntityKey` of the `Group`." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "One or more label entries that apply to the Group. Currently supported labels contain a key with an empty value.", + "type": "object" + }, + "membership": { + "description": "The [resource name](https://cloud.google.com/apis/design/resource_names) of the `Membership`. Shall be of the form `groups/{group_id}/memberships/{membership_id}`.", + "type": "string" + }, + "roles": { + "description": "The `MembershipRole`s that apply to the `Membership`.", + "items": { + "$ref": "MembershipRole" + }, + "type": "array" + } + }, + "type": "object" + }, "MembershipRole": { "description": "A membership role within the Cloud Identity Groups API. A `MembershipRole` defines the privileges granted to a `Membership`.", "id": "MembershipRole", @@ -3654,6 +3780,24 @@ }, "type": "object" }, + "SearchDirectGroupsResponse": { + "description": "The response message for MembershipsService.SearchDirectGroups.", + "id": "SearchDirectGroupsResponse", + "properties": { + "memberships": { + "description": "List of direct groups satisfying the query.", + "items": { + "$ref": "MembershipRelation" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no more results available for listing.", + "type": "string" + } + }, + "type": "object" + }, "SearchGroupsResponse": { "description": "The response message for GroupsService.SearchGroups.", "id": "SearchGroupsResponse", diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-gen.go index acc1acdfb9..cf77a4e777 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudidentity/v1/cloudidentity-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "cloudidentity:v1" const apiName = "cloudidentity" @@ -664,10 +665,17 @@ func (s *GetMembershipGraphResponse) MarshalJSON() ([]byte, error) { // GoogleAppsCloudidentityDevicesV1AndroidAttributes: Resource // representing the Android specific attributes of a Device. type GoogleAppsCloudidentityDevicesV1AndroidAttributes struct { + // CtsProfileMatch: Whether the device passes Android CTS compliance. + CtsProfileMatch bool `json:"ctsProfileMatch,omitempty"` + // EnabledUnknownSources: Whether applications from unknown sources can // be installed on device. EnabledUnknownSources bool `json:"enabledUnknownSources,omitempty"` + // HasPotentiallyHarmfulApps: Whether any potentially harmful apps were + // detected on the device. + HasPotentiallyHarmfulApps bool `json:"hasPotentiallyHarmfulApps,omitempty"` + // OwnerProfileAccount: Whether this account is on an owner/primary // profile. For phones, only true for owner profiles. Android 4+ devices // can have secondary or restricted user profiles. @@ -689,16 +697,22 @@ type GoogleAppsCloudidentityDevicesV1AndroidAttributes struct { // administrator turns on the "Enforce Work Profile" policy. SupportsWorkProfile bool `json:"supportsWorkProfile,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "EnabledUnknownSources") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // VerifiedBoot: Whether Android verified boot status is GREEN. + VerifiedBoot bool `json:"verifiedBoot,omitempty"` + + // VerifyAppsEnabled: Whether Google Play Protect Verify Apps is + // enabled. + VerifyAppsEnabled bool `json:"verifyAppsEnabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CtsProfileMatch") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EnabledUnknownSources") to + // NullFields is a list of field names (e.g. "CtsProfileMatch") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -2516,6 +2530,18 @@ type Membership struct { // CreateTime: Output only. The time when the `Membership` was created. CreateTime string `json:"createTime,omitempty"` + // DeliverySetting: Output only. Delivery setting associated with the + // membership. + // + // Possible values: + // "DELIVERY_SETTING_UNSPECIFIED" - Default. Should not be used. + // "ALL_MAIL" - Represents each mail should be delivered + // "DIGEST" - Represents 1 email for every 25 messages. + // "DAILY" - Represents daily summary of messages. + // "NONE" - Represents no delivery. + // "DISABLED" - Represents disabled state. + DeliverySetting string `json:"deliverySetting,omitempty"` + // Name: Output only. The resource name // (https://cloud.google.com/apis/design/resource_names) of the // `Membership`. Shall be of the form @@ -2609,6 +2635,59 @@ func (s *MembershipAdjacencyList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MembershipRelation: Message containing membership relation. +type MembershipRelation struct { + // Description: An extended description to help users determine the + // purpose of a `Group`. + Description string `json:"description,omitempty"` + + // DisplayName: The display name of the `Group`. + DisplayName string `json:"displayName,omitempty"` + + // Group: The resource name + // (https://cloud.google.com/apis/design/resource_names) of the `Group`. + // Shall be of the form `groups/{group_id}`. + Group string `json:"group,omitempty"` + + // GroupKey: The `EntityKey` of the `Group`. + GroupKey *EntityKey `json:"groupKey,omitempty"` + + // Labels: One or more label entries that apply to the Group. Currently + // supported labels contain a key with an empty value. + Labels map[string]string `json:"labels,omitempty"` + + // Membership: The resource name + // (https://cloud.google.com/apis/design/resource_names) of the + // `Membership`. Shall be of the form + // `groups/{group_id}/memberships/{membership_id}`. + Membership string `json:"membership,omitempty"` + + // Roles: The `MembershipRole`s that apply to the `Membership`. + Roles []*MembershipRole `json:"roles,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MembershipRelation) MarshalJSON() ([]byte, error) { + type NoMethod MembershipRelation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MembershipRole: A membership role within the Cloud Identity Groups // API. A `MembershipRole` defines the privileges granted to a // `Membership`. @@ -3047,6 +3126,43 @@ func (s *SamlSsoInfo) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SearchDirectGroupsResponse: The response message for +// MembershipsService.SearchDirectGroups. +type SearchDirectGroupsResponse struct { + // Memberships: List of direct groups satisfying the query. + Memberships []*MembershipRelation `json:"memberships,omitempty"` + + // NextPageToken: Token to retrieve the next page of results, or empty + // if there are no more results available for listing. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Memberships") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Memberships") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SearchDirectGroupsResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchDirectGroupsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SearchGroupsResponse: The response message for // GroupsService.SearchGroups. type SearchGroupsResponse struct { @@ -10190,6 +10306,240 @@ func (c *GroupsMembershipsModifyMembershipRolesCall) Do(opts ...googleapi.CallOp } +// method id "cloudidentity.groups.memberships.searchDirectGroups": + +type GroupsMembershipsSearchDirectGroupsCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// SearchDirectGroups: Searches direct groups of a member. +// +// - parent: Resource name +// (https://cloud.google.com/apis/design/resource_names) of the group +// to search transitive memberships in. Format: groups/{group_id}, +// where group_id is always '-' as this API will search across all +// groups for a given member. +func (r *GroupsMembershipsService) SearchDirectGroups(parent string) *GroupsMembershipsSearchDirectGroupsCall { + c := &GroupsMembershipsSearchDirectGroupsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// OrderBy sets the optional parameter "orderBy": The ordering of +// membership relation for the display name or email in the response. +// The syntax for this field can be found at +// https://cloud.google.com/apis/design/design_patterns#sorting_order. +// Example: Sort by the ascending display name: order_by="group_name" or +// order_by="group_name asc". Sort by the descending display name: +// order_by="group_name desc". Sort by the ascending group key: +// order_by="group_key" or order_by="group_key asc". Sort by the +// descending group key: order_by="group_key desc". +func (c *GroupsMembershipsSearchDirectGroupsCall) OrderBy(orderBy string) *GroupsMembershipsSearchDirectGroupsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": The default page +// size is 200 (max 1000). +func (c *GroupsMembershipsSearchDirectGroupsCall) PageSize(pageSize int64) *GroupsMembershipsSearchDirectGroupsCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous list request, if any +func (c *GroupsMembershipsSearchDirectGroupsCall) PageToken(pageToken string) *GroupsMembershipsSearchDirectGroupsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Query sets the optional parameter "query": Required. A CEL expression +// that MUST include member specification AND label(s). Users can search +// on label attributes of groups. CONTAINS match ('in') is supported on +// labels. Identity-mapped groups are uniquely identified by both a +// `member_key_id` and a `member_key_namespace`, which requires an +// additional query input: `member_key_namespace`. Example query: +// `member_key_id == 'member_key_id_value' && 'label_value' in labels` +func (c *GroupsMembershipsSearchDirectGroupsCall) Query(query string) *GroupsMembershipsSearchDirectGroupsCall { + c.urlParams_.Set("query", query) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GroupsMembershipsSearchDirectGroupsCall) Fields(s ...googleapi.Field) *GroupsMembershipsSearchDirectGroupsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GroupsMembershipsSearchDirectGroupsCall) IfNoneMatch(entityTag string) *GroupsMembershipsSearchDirectGroupsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GroupsMembershipsSearchDirectGroupsCall) Context(ctx context.Context) *GroupsMembershipsSearchDirectGroupsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GroupsMembershipsSearchDirectGroupsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GroupsMembershipsSearchDirectGroupsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/memberships:searchDirectGroups") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudidentity.groups.memberships.searchDirectGroups" call. +// Exactly one of *SearchDirectGroupsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SearchDirectGroupsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GroupsMembershipsSearchDirectGroupsCall) Do(opts ...googleapi.CallOption) (*SearchDirectGroupsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SearchDirectGroupsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Searches direct groups of a member.", + // "flatPath": "v1/groups/{groupsId}/memberships:searchDirectGroups", + // "httpMethod": "GET", + // "id": "cloudidentity.groups.memberships.searchDirectGroups", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "orderBy": { + // "description": "The ordering of membership relation for the display name or email in the response. The syntax for this field can be found at https://cloud.google.com/apis/design/design_patterns#sorting_order. Example: Sort by the ascending display name: order_by=\"group_name\" or order_by=\"group_name asc\". Sort by the descending display name: order_by=\"group_name desc\". Sort by the ascending group key: order_by=\"group_key\" or order_by=\"group_key asc\". Sort by the descending group key: order_by=\"group_key desc\".", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The default page size is 200 (max 1000).", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The next_page_token value returned from a previous list request, if any", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "[Resource name](https://cloud.google.com/apis/design/resource_names) of the group to search transitive memberships in. Format: groups/{group_id}, where group_id is always '-' as this API will search across all groups for a given member.", + // "location": "path", + // "pattern": "^groups/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "query": { + // "description": "Required. A CEL expression that MUST include member specification AND label(s). Users can search on label attributes of groups. CONTAINS match ('in') is supported on labels. Identity-mapped groups are uniquely identified by both a `member_key_id` and a `member_key_namespace`, which requires an additional query input: `member_key_namespace`. Example query: `member_key_id == 'member_key_id_value' \u0026\u0026 'label_value' in labels`", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/memberships:searchDirectGroups", + // "response": { + // "$ref": "SearchDirectGroupsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-identity.groups", + // "https://www.googleapis.com/auth/cloud-identity.groups.readonly", + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GroupsMembershipsSearchDirectGroupsCall) Pages(ctx context.Context, f func(*SearchDirectGroupsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "cloudidentity.groups.memberships.searchTransitiveGroups": type GroupsMembershipsSearchTransitiveGroupsCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go index a98334cf33..0602384d07 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "cloudiot:v1" const apiName = "cloudiot" diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json index e2ada4631b..359ac3dd4f 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json @@ -599,6 +599,32 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloudkms" ] + }, + "verifyConnectivity": { + "description": "Verifies that Cloud KMS can successfully connect to the external key manager specified by an EkmConnection. If there is an error connecting to the EKM, this method returns a FAILED_PRECONDITION status containing structured information as described at https://cloud.google.com/kms/docs/reference/ekm_errors.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/ekmConnections/{ekmConnectionsId}:verifyConnectivity", + "httpMethod": "GET", + "id": "cloudkms.projects.locations.ekmConnections.verifyConnectivity", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the EkmConnection to verify.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/ekmConnections/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:verifyConnectivity", + "response": { + "$ref": "VerifyConnectivityResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudkms" + ] } } }, @@ -1496,6 +1522,64 @@ "https://www.googleapis.com/auth/cloudkms" ] }, + "rawDecrypt": { + "description": "Decrypts data that was originally encrypted using a raw cryptographic mechanism. The CryptoKey.purpose must be RAW_ENCRYPT_DECRYPT.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:rawDecrypt", + "httpMethod": "POST", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.rawDecrypt", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the CryptoKeyVersion to use for decryption.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:rawDecrypt", + "request": { + "$ref": "RawDecryptRequest" + }, + "response": { + "$ref": "RawDecryptResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudkms" + ] + }, + "rawEncrypt": { + "description": "Encrypts data using portable cryptographic primitives. Most users should choose Encrypt and Decrypt rather than their raw counterparts. The CryptoKey.purpose must be RAW_ENCRYPT_DECRYPT.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:rawEncrypt", + "httpMethod": "POST", + "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.rawEncrypt", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the CryptoKeyVersion to use for encryption.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:rawEncrypt", + "request": { + "$ref": "RawEncryptRequest" + }, + "response": { + "$ref": "RawEncryptResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudkms" + ] + }, "restore": { "description": "Restore a CryptoKeyVersion in the DESTROY_SCHEDULED state. Upon restoration of the CryptoKeyVersion, state will be set to DISABLED, and destroy_time will be cleared.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore", @@ -1737,7 +1821,7 @@ } } }, - "revision": "20230307", + "revision": "20230625", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { @@ -2076,6 +2160,7 @@ "ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", + "RAW_ENCRYPT_DECRYPT", "MAC" ], "enumDescriptions": [ @@ -2083,6 +2168,7 @@ "CryptoKeys with this purpose may be used with Encrypt and Decrypt.", "CryptoKeys with this purpose may be used with AsymmetricSign and GetPublicKey.", "CryptoKeys with this purpose may be used with AsymmetricDecrypt and GetPublicKey.", + "CryptoKeys with this purpose may be used with RawEncrypt and RawDecrypt. This purpose is meant to be used for interoperable symmetric encryption and does not support automatic CryptoKey rotation.", "CryptoKeys with this purpose may be used with MacSign." ], "type": "string" @@ -2108,6 +2194,8 @@ "enum": [ "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", "GOOGLE_SYMMETRIC_ENCRYPTION", + "AES_128_GCM", + "AES_256_GCM", "RSA_SIGN_PSS_2048_SHA256", "RSA_SIGN_PSS_3072_SHA256", "RSA_SIGN_PSS_4096_SHA256", @@ -2139,6 +2227,8 @@ "enumDescriptions": [ "Not specified.", "Creates symmetric encryption keys.", + "AES-GCM (Galois Counter Mode) using 128-bit keys.", + "AES-GCM (Galois Counter Mode) using 256-bit keys.", "RSASSA-PSS 2048 bit key with a SHA256 digest.", "RSASSA-PSS 3072 bit key with a SHA256 digest.", "RSASSA-PSS 4096 bit key with a SHA256 digest.", @@ -2157,9 +2247,9 @@ "RSAES-OAEP 2048 bit key with a SHA1 digest.", "RSAES-OAEP 3072 bit key with a SHA1 digest.", "RSAES-OAEP 4096 bit key with a SHA1 digest.", - "ECDSA on the NIST P-256 curve with a SHA256 digest.", - "ECDSA on the NIST P-384 curve with a SHA384 digest.", - "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level.", + "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", + "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", + "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", @@ -2300,6 +2390,8 @@ "enum": [ "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", "GOOGLE_SYMMETRIC_ENCRYPTION", + "AES_128_GCM", + "AES_256_GCM", "RSA_SIGN_PSS_2048_SHA256", "RSA_SIGN_PSS_3072_SHA256", "RSA_SIGN_PSS_4096_SHA256", @@ -2331,6 +2423,8 @@ "enumDescriptions": [ "Not specified.", "Creates symmetric encryption keys.", + "AES-GCM (Galois Counter Mode) using 128-bit keys.", + "AES-GCM (Galois Counter Mode) using 256-bit keys.", "RSASSA-PSS 2048 bit key with a SHA256 digest.", "RSASSA-PSS 3072 bit key with a SHA256 digest.", "RSASSA-PSS 4096 bit key with a SHA256 digest.", @@ -2349,9 +2443,9 @@ "RSAES-OAEP 2048 bit key with a SHA1 digest.", "RSAES-OAEP 3072 bit key with a SHA1 digest.", "RSAES-OAEP 4096 bit key with a SHA1 digest.", - "ECDSA on the NIST P-256 curve with a SHA256 digest.", - "ECDSA on the NIST P-384 curve with a SHA384 digest.", - "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level.", + "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", + "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", + "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", @@ -2707,6 +2801,8 @@ "enum": [ "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", "GOOGLE_SYMMETRIC_ENCRYPTION", + "AES_128_GCM", + "AES_256_GCM", "RSA_SIGN_PSS_2048_SHA256", "RSA_SIGN_PSS_3072_SHA256", "RSA_SIGN_PSS_4096_SHA256", @@ -2738,6 +2834,8 @@ "enumDescriptions": [ "Not specified.", "Creates symmetric encryption keys.", + "AES-GCM (Galois Counter Mode) using 128-bit keys.", + "AES-GCM (Galois Counter Mode) using 256-bit keys.", "RSASSA-PSS 2048 bit key with a SHA256 digest.", "RSASSA-PSS 3072 bit key with a SHA256 digest.", "RSASSA-PSS 4096 bit key with a SHA256 digest.", @@ -2756,9 +2854,9 @@ "RSAES-OAEP 2048 bit key with a SHA1 digest.", "RSAES-OAEP 3072 bit key with a SHA1 digest.", "RSAES-OAEP 4096 bit key with a SHA1 digest.", - "ECDSA on the NIST P-256 curve with a SHA256 digest.", - "ECDSA on the NIST P-384 curve with a SHA384 digest.", - "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level.", + "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", + "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", + "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", @@ -3077,7 +3175,7 @@ "type": "object" }, "Location": { - "description": "A resource that represents Google Cloud Platform location.", + "description": "A resource that represents a Google Cloud location.", "id": "Location", "properties": { "displayName": { @@ -3297,6 +3395,8 @@ "enum": [ "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", "GOOGLE_SYMMETRIC_ENCRYPTION", + "AES_128_GCM", + "AES_256_GCM", "RSA_SIGN_PSS_2048_SHA256", "RSA_SIGN_PSS_3072_SHA256", "RSA_SIGN_PSS_4096_SHA256", @@ -3328,6 +3428,8 @@ "enumDescriptions": [ "Not specified.", "Creates symmetric encryption keys.", + "AES-GCM (Galois Counter Mode) using 128-bit keys.", + "AES-GCM (Galois Counter Mode) using 256-bit keys.", "RSASSA-PSS 2048 bit key with a SHA256 digest.", "RSASSA-PSS 3072 bit key with a SHA256 digest.", "RSASSA-PSS 4096 bit key with a SHA256 digest.", @@ -3346,9 +3448,9 @@ "RSAES-OAEP 2048 bit key with a SHA1 digest.", "RSAES-OAEP 3072 bit key with a SHA1 digest.", "RSAES-OAEP 4096 bit key with a SHA1 digest.", - "ECDSA on the NIST P-256 curve with a SHA256 digest.", - "ECDSA on the NIST P-384 curve with a SHA384 digest.", - "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level.", + "ECDSA on the NIST P-256 curve with a SHA256 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", + "ECDSA on the NIST P-384 curve with a SHA384 digest. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", + "ECDSA on the non-NIST secp256k1 curve. This curve is only supported for HSM protection level. Other hash functions can also be used: https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms", "HMAC-SHA256 signing with a 256 bit key.", "HMAC-SHA1 signing with a 160 bit key.", "HMAC-SHA384 signing with a 384 bit key.", @@ -3392,6 +3494,198 @@ }, "type": "object" }, + "RawDecryptRequest": { + "description": "Request message for KeyManagementService.RawDecrypt.", + "id": "RawDecryptRequest", + "properties": { + "additionalAuthenticatedData": { + "description": "Optional. Optional data that must match the data originally supplied in RawEncryptRequest.additional_authenticated_data.", + "format": "byte", + "type": "string" + }, + "additionalAuthenticatedDataCrc32c": { + "description": "Optional. An optional CRC32C checksum of the RawDecryptRequest.additional_authenticated_data. If specified, KeyManagementService will verify the integrity of the received additional_authenticated_data using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(additional_authenticated_data) is equal to additional_authenticated_data_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + }, + "ciphertext": { + "description": "Required. The encrypted data originally returned in RawEncryptResponse.ciphertext.", + "format": "byte", + "type": "string" + }, + "ciphertextCrc32c": { + "description": "Optional. An optional CRC32C checksum of the RawDecryptRequest.ciphertext. If specified, KeyManagementService will verify the integrity of the received ciphertext using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(ciphertext) is equal to ciphertext_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + }, + "initializationVector": { + "description": "Required. The initialization vector (IV) used during encryption, which must match the data originally provided in RawEncryptResponse.initialization_vector.", + "format": "byte", + "type": "string" + }, + "initializationVectorCrc32c": { + "description": "Optional. An optional CRC32C checksum of the RawDecryptRequest.initialization_vector. If specified, KeyManagementService will verify the integrity of the received initialization_vector using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(initialization_vector) is equal to initialization_vector_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + }, + "tagLength": { + "description": "The length of the authentication tag that is appended to the end of the ciphertext. If unspecified (0), the default value for the key's algorithm will be used (for AES-GCM, the default value is 16).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "RawDecryptResponse": { + "description": "Response message for KeyManagementService.RawDecrypt.", + "id": "RawDecryptResponse", + "properties": { + "plaintext": { + "description": "The decrypted data.", + "format": "byte", + "type": "string" + }, + "plaintextCrc32c": { + "description": "Integrity verification field. A CRC32C checksum of the returned RawDecryptResponse.plaintext. An integrity check of plaintext can be performed by computing the CRC32C checksum of plaintext and comparing your results to this field. Discard the response in case of non-matching checksum values, and perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: receiving this response message indicates that KeyManagementService is able to successfully decrypt the ciphertext. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + }, + "protectionLevel": { + "description": "The ProtectionLevel of the CryptoKeyVersion used in decryption.", + "enum": [ + "PROTECTION_LEVEL_UNSPECIFIED", + "SOFTWARE", + "HSM", + "EXTERNAL", + "EXTERNAL_VPC" + ], + "enumDescriptions": [ + "Not specified.", + "Crypto operations are performed in software.", + "Crypto operations are performed in a Hardware Security Module.", + "Crypto operations are performed by an external key manager.", + "Crypto operations are performed in an EKM-over-VPC backend." + ], + "type": "string" + }, + "verifiedAdditionalAuthenticatedDataCrc32c": { + "description": "Integrity verification field. A flag indicating whether RawDecryptRequest.additional_authenticated_data_crc32c was received by KeyManagementService and used for the integrity verification of additional_authenticated_data. A false value of this field indicates either that // RawDecryptRequest.additional_authenticated_data_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set RawDecryptRequest.additional_authenticated_data_crc32c but this field is still false, discard the response and perform a limited number of retries.", + "type": "boolean" + }, + "verifiedCiphertextCrc32c": { + "description": "Integrity verification field. A flag indicating whether RawDecryptRequest.ciphertext_crc32c was received by KeyManagementService and used for the integrity verification of the ciphertext. A false value of this field indicates either that RawDecryptRequest.ciphertext_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set RawDecryptRequest.ciphertext_crc32c but this field is still false, discard the response and perform a limited number of retries.", + "type": "boolean" + }, + "verifiedInitializationVectorCrc32c": { + "description": "Integrity verification field. A flag indicating whether RawDecryptRequest.initialization_vector_crc32c was received by KeyManagementService and used for the integrity verification of initialization_vector. A false value of this field indicates either that RawDecryptRequest.initialization_vector_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set RawDecryptRequest.initialization_vector_crc32c but this field is still false, discard the response and perform a limited number of retries.", + "type": "boolean" + } + }, + "type": "object" + }, + "RawEncryptRequest": { + "description": "Request message for KeyManagementService.RawEncrypt.", + "id": "RawEncryptRequest", + "properties": { + "additionalAuthenticatedData": { + "description": "Optional. Optional data that, if specified, must also be provided during decryption through RawDecryptRequest.additional_authenticated_data. This field may only be used in conjunction with an algorithm that accepts additional authenticated data (for example, AES-GCM). The maximum size depends on the key version's protection_level. For SOFTWARE keys, the plaintext must be no larger than 64KiB. For HSM keys, the combined length of the plaintext and additional_authenticated_data fields must be no larger than 8KiB.", + "format": "byte", + "type": "string" + }, + "additionalAuthenticatedDataCrc32c": { + "description": "Optional. An optional CRC32C checksum of the RawEncryptRequest.additional_authenticated_data. If specified, KeyManagementService will verify the integrity of the received additional_authenticated_data using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(additional_authenticated_data) is equal to additional_authenticated_data_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + }, + "initializationVector": { + "description": "Optional. A customer-supplied initialization vector that will be used for encryption. If it is not provided for AES-CBC and AES-CTR, one will be generated. It will be returned in RawEncryptResponse.initialization_vector.", + "format": "byte", + "type": "string" + }, + "initializationVectorCrc32c": { + "description": "Optional. An optional CRC32C checksum of the RawEncryptRequest.initialization_vector. If specified, KeyManagementService will verify the integrity of the received initialization_vector using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(initialization_vector) is equal to initialization_vector_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + }, + "plaintext": { + "description": "Required. The data to encrypt. Must be no larger than 64KiB. The maximum size depends on the key version's protection_level. For SOFTWARE keys, the plaintext must be no larger than 64KiB. For HSM keys, the combined length of the plaintext and additional_authenticated_data fields must be no larger than 8KiB.", + "format": "byte", + "type": "string" + }, + "plaintextCrc32c": { + "description": "Optional. An optional CRC32C checksum of the RawEncryptRequest.plaintext. If specified, KeyManagementService will verify the integrity of the received plaintext using this checksum. KeyManagementService will report an error if the checksum verification fails. If you receive a checksum error, your client should verify that CRC32C(plaintext) is equal to plaintext_crc32c, and if so, perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "RawEncryptResponse": { + "description": "Response message for KeyManagementService.RawEncrypt.", + "id": "RawEncryptResponse", + "properties": { + "ciphertext": { + "description": "The encrypted data. In the case of AES-GCM, the authentication tag is the tag_length bytes at the end of this field.", + "format": "byte", + "type": "string" + }, + "ciphertextCrc32c": { + "description": "Integrity verification field. A CRC32C checksum of the returned RawEncryptResponse.ciphertext. An integrity check of ciphertext can be performed by computing the CRC32C checksum of ciphertext and comparing your results to this field. Discard the response in case of non-matching checksum values, and perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + }, + "initializationVector": { + "description": "The initialization vector (IV) generated by the service during encryption. This value must be stored and provided in RawDecryptRequest.initialization_vector at decryption time.", + "format": "byte", + "type": "string" + }, + "initializationVectorCrc32c": { + "description": "Integrity verification field. A CRC32C checksum of the returned RawEncryptResponse.initialization_vector. An integrity check of initialization_vector can be performed by computing the CRC32C checksum of initialization_vector and comparing your results to this field. Discard the response in case of non-matching checksum values, and perform a limited number of retries. A persistent mismatch may indicate an issue in your computation of the CRC32C checksum. Note: This field is defined as int64 for reasons of compatibility across different languages. However, it is a non-negative integer, which will never exceed 2^32-1, and can be safely downconverted to uint32 in languages that support this type.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The resource name of the CryptoKeyVersion used in encryption. Check this field to verify that the intended resource was used for encryption.", + "type": "string" + }, + "protectionLevel": { + "description": "The ProtectionLevel of the CryptoKeyVersion used in encryption.", + "enum": [ + "PROTECTION_LEVEL_UNSPECIFIED", + "SOFTWARE", + "HSM", + "EXTERNAL", + "EXTERNAL_VPC" + ], + "enumDescriptions": [ + "Not specified.", + "Crypto operations are performed in software.", + "Crypto operations are performed in a Hardware Security Module.", + "Crypto operations are performed by an external key manager.", + "Crypto operations are performed in an EKM-over-VPC backend." + ], + "type": "string" + }, + "tagLength": { + "description": "The length of the authentication tag that is appended to the end of the ciphertext.", + "format": "int32", + "type": "integer" + }, + "verifiedAdditionalAuthenticatedDataCrc32c": { + "description": "Integrity verification field. A flag indicating whether RawEncryptRequest.additional_authenticated_data_crc32c was received by KeyManagementService and used for the integrity verification of additional_authenticated_data. A false value of this field indicates either that // RawEncryptRequest.additional_authenticated_data_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set RawEncryptRequest.additional_authenticated_data_crc32c but this field is still false, discard the response and perform a limited number of retries.", + "type": "boolean" + }, + "verifiedInitializationVectorCrc32c": { + "description": "Integrity verification field. A flag indicating whether RawEncryptRequest.initialization_vector_crc32c was received by KeyManagementService and used for the integrity verification of initialization_vector. A false value of this field indicates either that RawEncryptRequest.initialization_vector_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set RawEncryptRequest.initialization_vector_crc32c but this field is still false, discard the response and perform a limited number of retries.", + "type": "boolean" + }, + "verifiedPlaintextCrc32c": { + "description": "Integrity verification field. A flag indicating whether RawEncryptRequest.plaintext_crc32c was received by KeyManagementService and used for the integrity verification of the plaintext. A false value of this field indicates either that RawEncryptRequest.plaintext_crc32c was left unset or that it was not delivered to KeyManagementService. If you've set RawEncryptRequest.plaintext_crc32c but this field is still false, discard the response and perform a limited number of retries.", + "type": "boolean" + } + }, + "type": "object" + }, "RestoreCryptoKeyVersionRequest": { "description": "Request message for KeyManagementService.RestoreCryptoKeyVersion.", "id": "RestoreCryptoKeyVersionRequest", @@ -3479,6 +3773,12 @@ }, "type": "object" }, + "VerifyConnectivityResponse": { + "description": "Response message for EkmService.VerifyConnectivity.", + "id": "VerifyConnectivityResponse", + "properties": {}, + "type": "object" + }, "WrappingPublicKey": { "description": "The public key component of the wrapping key. For details of the type of key this public key corresponds to, see the ImportMethod.", "id": "WrappingPublicKey", diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go index 81ebe0850e..0d9c973e19 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go @@ -77,6 +77,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "cloudkms:v1" const apiName = "cloudkms" @@ -848,6 +849,10 @@ type CryptoKey struct { // AsymmetricSign and GetPublicKey. // "ASYMMETRIC_DECRYPT" - CryptoKeys with this purpose may be used // with AsymmetricDecrypt and GetPublicKey. + // "RAW_ENCRYPT_DECRYPT" - CryptoKeys with this purpose may be used + // with RawEncrypt and RawDecrypt. This purpose is meant to be used for + // interoperable symmetric encryption and does not support automatic + // CryptoKey rotation. // "MAC" - CryptoKeys with this purpose may be used with MacSign. Purpose string `json:"purpose,omitempty"` @@ -906,6 +911,8 @@ type CryptoKeyVersion struct { // Possible values: // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. // "GOOGLE_SYMMETRIC_ENCRYPTION" - Creates symmetric encryption keys. + // "AES_128_GCM" - AES-GCM (Galois Counter Mode) using 128-bit keys. + // "AES_256_GCM" - AES-GCM (Galois Counter Mode) using 256-bit keys. // "RSA_SIGN_PSS_2048_SHA256" - RSASSA-PSS 2048 bit key with a SHA256 // digest. // "RSA_SIGN_PSS_3072_SHA256" - RSASSA-PSS 3072 bit key with a SHA256 @@ -943,11 +950,15 @@ type CryptoKeyVersion struct { // "RSA_DECRYPT_OAEP_4096_SHA1" - RSAES-OAEP 4096 bit key with a SHA1 // digest. // "EC_SIGN_P256_SHA256" - ECDSA on the NIST P-256 curve with a SHA256 - // digest. + // digest. Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "EC_SIGN_P384_SHA384" - ECDSA on the NIST P-384 curve with a SHA384 - // digest. + // digest. Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "EC_SIGN_SECP256K1_SHA256" - ECDSA on the non-NIST secp256k1 curve. - // This curve is only supported for HSM protection level. + // This curve is only supported for HSM protection level. Other hash + // functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "HMAC_SHA256" - HMAC-SHA256 signing with a 256 bit key. // "HMAC_SHA1" - HMAC-SHA1 signing with a 160 bit key. // "HMAC_SHA384" - HMAC-SHA384 signing with a 384 bit key. @@ -1115,6 +1126,8 @@ type CryptoKeyVersionTemplate struct { // Possible values: // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. // "GOOGLE_SYMMETRIC_ENCRYPTION" - Creates symmetric encryption keys. + // "AES_128_GCM" - AES-GCM (Galois Counter Mode) using 128-bit keys. + // "AES_256_GCM" - AES-GCM (Galois Counter Mode) using 256-bit keys. // "RSA_SIGN_PSS_2048_SHA256" - RSASSA-PSS 2048 bit key with a SHA256 // digest. // "RSA_SIGN_PSS_3072_SHA256" - RSASSA-PSS 3072 bit key with a SHA256 @@ -1152,11 +1165,15 @@ type CryptoKeyVersionTemplate struct { // "RSA_DECRYPT_OAEP_4096_SHA1" - RSAES-OAEP 4096 bit key with a SHA1 // digest. // "EC_SIGN_P256_SHA256" - ECDSA on the NIST P-256 curve with a SHA256 - // digest. + // digest. Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "EC_SIGN_P384_SHA384" - ECDSA on the NIST P-384 curve with a SHA384 - // digest. + // digest. Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "EC_SIGN_SECP256K1_SHA256" - ECDSA on the non-NIST secp256k1 curve. - // This curve is only supported for HSM protection level. + // This curve is only supported for HSM protection level. Other hash + // functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "HMAC_SHA256" - HMAC-SHA256 signing with a 256 bit key. // "HMAC_SHA1" - HMAC-SHA1 signing with a 160 bit key. // "HMAC_SHA384" - HMAC-SHA384 signing with a 384 bit key. @@ -1861,6 +1878,8 @@ type ImportCryptoKeyVersionRequest struct { // Possible values: // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. // "GOOGLE_SYMMETRIC_ENCRYPTION" - Creates symmetric encryption keys. + // "AES_128_GCM" - AES-GCM (Galois Counter Mode) using 128-bit keys. + // "AES_256_GCM" - AES-GCM (Galois Counter Mode) using 256-bit keys. // "RSA_SIGN_PSS_2048_SHA256" - RSASSA-PSS 2048 bit key with a SHA256 // digest. // "RSA_SIGN_PSS_3072_SHA256" - RSASSA-PSS 3072 bit key with a SHA256 @@ -1898,11 +1917,15 @@ type ImportCryptoKeyVersionRequest struct { // "RSA_DECRYPT_OAEP_4096_SHA1" - RSAES-OAEP 4096 bit key with a SHA1 // digest. // "EC_SIGN_P256_SHA256" - ECDSA on the NIST P-256 curve with a SHA256 - // digest. + // digest. Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "EC_SIGN_P384_SHA384" - ECDSA on the NIST P-384 curve with a SHA384 - // digest. + // digest. Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "EC_SIGN_SECP256K1_SHA256" - ECDSA on the non-NIST secp256k1 curve. - // This curve is only supported for HSM protection level. + // This curve is only supported for HSM protection level. Other hash + // functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "HMAC_SHA256" - HMAC-SHA256 signing with a 256 bit key. // "HMAC_SHA1" - HMAC-SHA1 signing with a 160 bit key. // "HMAC_SHA384" - HMAC-SHA384 signing with a 384 bit key. @@ -2455,7 +2478,7 @@ func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Location: A resource that represents Google Cloud Platform location. +// Location: A resource that represents a Google Cloud location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. For example, "Tokyo". @@ -2918,6 +2941,8 @@ type PublicKey struct { // Possible values: // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. // "GOOGLE_SYMMETRIC_ENCRYPTION" - Creates symmetric encryption keys. + // "AES_128_GCM" - AES-GCM (Galois Counter Mode) using 128-bit keys. + // "AES_256_GCM" - AES-GCM (Galois Counter Mode) using 256-bit keys. // "RSA_SIGN_PSS_2048_SHA256" - RSASSA-PSS 2048 bit key with a SHA256 // digest. // "RSA_SIGN_PSS_3072_SHA256" - RSASSA-PSS 3072 bit key with a SHA256 @@ -2955,11 +2980,15 @@ type PublicKey struct { // "RSA_DECRYPT_OAEP_4096_SHA1" - RSAES-OAEP 4096 bit key with a SHA1 // digest. // "EC_SIGN_P256_SHA256" - ECDSA on the NIST P-256 curve with a SHA256 - // digest. + // digest. Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "EC_SIGN_P384_SHA384" - ECDSA on the NIST P-384 curve with a SHA384 - // digest. + // digest. Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "EC_SIGN_SECP256K1_SHA256" - ECDSA on the non-NIST secp256k1 curve. - // This curve is only supported for HSM protection level. + // This curve is only supported for HSM protection level. Other hash + // functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms // "HMAC_SHA256" - HMAC-SHA256 signing with a 256 bit key. // "HMAC_SHA1" - HMAC-SHA1 signing with a 160 bit key. // "HMAC_SHA384" - HMAC-SHA384 signing with a 384 bit key. @@ -3035,6 +3064,417 @@ func (s *PublicKey) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RawDecryptRequest: Request message for +// KeyManagementService.RawDecrypt. +type RawDecryptRequest struct { + // AdditionalAuthenticatedData: Optional. Optional data that must match + // the data originally supplied in + // RawEncryptRequest.additional_authenticated_data. + AdditionalAuthenticatedData string `json:"additionalAuthenticatedData,omitempty"` + + // AdditionalAuthenticatedDataCrc32c: Optional. An optional CRC32C + // checksum of the RawDecryptRequest.additional_authenticated_data. If + // specified, KeyManagementService will verify the integrity of the + // received additional_authenticated_data using this checksum. + // KeyManagementService will report an error if the checksum + // verification fails. If you receive a checksum error, your client + // should verify that CRC32C(additional_authenticated_data) is equal to + // additional_authenticated_data_crc32c, and if so, perform a limited + // number of retries. A persistent mismatch may indicate an issue in + // your computation of the CRC32C checksum. Note: This field is defined + // as int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. + AdditionalAuthenticatedDataCrc32c int64 `json:"additionalAuthenticatedDataCrc32c,omitempty,string"` + + // Ciphertext: Required. The encrypted data originally returned in + // RawEncryptResponse.ciphertext. + Ciphertext string `json:"ciphertext,omitempty"` + + // CiphertextCrc32c: Optional. An optional CRC32C checksum of the + // RawDecryptRequest.ciphertext. If specified, KeyManagementService will + // verify the integrity of the received ciphertext using this checksum. + // KeyManagementService will report an error if the checksum + // verification fails. If you receive a checksum error, your client + // should verify that CRC32C(ciphertext) is equal to ciphertext_crc32c, + // and if so, perform a limited number of retries. A persistent mismatch + // may indicate an issue in your computation of the CRC32C checksum. + // Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, + // which will never exceed 2^32-1, and can be safely downconverted to + // uint32 in languages that support this type. + CiphertextCrc32c int64 `json:"ciphertextCrc32c,omitempty,string"` + + // InitializationVector: Required. The initialization vector (IV) used + // during encryption, which must match the data originally provided in + // RawEncryptResponse.initialization_vector. + InitializationVector string `json:"initializationVector,omitempty"` + + // InitializationVectorCrc32c: Optional. An optional CRC32C checksum of + // the RawDecryptRequest.initialization_vector. If specified, + // KeyManagementService will verify the integrity of the received + // initialization_vector using this checksum. KeyManagementService will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C(initialization_vector) is equal to + // initialization_vector_crc32c, and if so, perform a limited number of + // retries. A persistent mismatch may indicate an issue in your + // computation of the CRC32C checksum. Note: This field is defined as + // int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. + InitializationVectorCrc32c int64 `json:"initializationVectorCrc32c,omitempty,string"` + + // TagLength: The length of the authentication tag that is appended to + // the end of the ciphertext. If unspecified (0), the default value for + // the key's algorithm will be used (for AES-GCM, the default value is + // 16). + TagLength int64 `json:"tagLength,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AdditionalAuthenticatedData") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "AdditionalAuthenticatedData") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RawDecryptRequest) MarshalJSON() ([]byte, error) { + type NoMethod RawDecryptRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RawDecryptResponse: Response message for +// KeyManagementService.RawDecrypt. +type RawDecryptResponse struct { + // Plaintext: The decrypted data. + Plaintext string `json:"plaintext,omitempty"` + + // PlaintextCrc32c: Integrity verification field. A CRC32C checksum of + // the returned RawDecryptResponse.plaintext. An integrity check of + // plaintext can be performed by computing the CRC32C checksum of + // plaintext and comparing your results to this field. Discard the + // response in case of non-matching checksum values, and perform a + // limited number of retries. A persistent mismatch may indicate an + // issue in your computation of the CRC32C checksum. Note: receiving + // this response message indicates that KeyManagementService is able to + // successfully decrypt the ciphertext. Note: This field is defined as + // int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. + PlaintextCrc32c int64 `json:"plaintextCrc32c,omitempty,string"` + + // ProtectionLevel: The ProtectionLevel of the CryptoKeyVersion used in + // decryption. + // + // Possible values: + // "PROTECTION_LEVEL_UNSPECIFIED" - Not specified. + // "SOFTWARE" - Crypto operations are performed in software. + // "HSM" - Crypto operations are performed in a Hardware Security + // Module. + // "EXTERNAL" - Crypto operations are performed by an external key + // manager. + // "EXTERNAL_VPC" - Crypto operations are performed in an EKM-over-VPC + // backend. + ProtectionLevel string `json:"protectionLevel,omitempty"` + + // VerifiedAdditionalAuthenticatedDataCrc32c: Integrity verification + // field. A flag indicating whether + // RawDecryptRequest.additional_authenticated_data_crc32c was received + // by KeyManagementService and used for the integrity verification of + // additional_authenticated_data. A false value of this field indicates + // either that // RawDecryptRequest.additional_authenticated_data_crc32c + // was left unset or that it was not delivered to KeyManagementService. + // If you've set RawDecryptRequest.additional_authenticated_data_crc32c + // but this field is still false, discard the response and perform a + // limited number of retries. + VerifiedAdditionalAuthenticatedDataCrc32c bool `json:"verifiedAdditionalAuthenticatedDataCrc32c,omitempty"` + + // VerifiedCiphertextCrc32c: Integrity verification field. A flag + // indicating whether RawDecryptRequest.ciphertext_crc32c was received + // by KeyManagementService and used for the integrity verification of + // the ciphertext. A false value of this field indicates either that + // RawDecryptRequest.ciphertext_crc32c was left unset or that it was not + // delivered to KeyManagementService. If you've set + // RawDecryptRequest.ciphertext_crc32c but this field is still false, + // discard the response and perform a limited number of retries. + VerifiedCiphertextCrc32c bool `json:"verifiedCiphertextCrc32c,omitempty"` + + // VerifiedInitializationVectorCrc32c: Integrity verification field. A + // flag indicating whether + // RawDecryptRequest.initialization_vector_crc32c was received by + // KeyManagementService and used for the integrity verification of + // initialization_vector. A false value of this field indicates either + // that RawDecryptRequest.initialization_vector_crc32c was left unset or + // that it was not delivered to KeyManagementService. If you've set + // RawDecryptRequest.initialization_vector_crc32c but this field is + // still false, discard the response and perform a limited number of + // retries. + VerifiedInitializationVectorCrc32c bool `json:"verifiedInitializationVectorCrc32c,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Plaintext") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Plaintext") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RawDecryptResponse) MarshalJSON() ([]byte, error) { + type NoMethod RawDecryptResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RawEncryptRequest: Request message for +// KeyManagementService.RawEncrypt. +type RawEncryptRequest struct { + // AdditionalAuthenticatedData: Optional. Optional data that, if + // specified, must also be provided during decryption through + // RawDecryptRequest.additional_authenticated_data. This field may only + // be used in conjunction with an algorithm that accepts additional + // authenticated data (for example, AES-GCM). The maximum size depends + // on the key version's protection_level. For SOFTWARE keys, the + // plaintext must be no larger than 64KiB. For HSM keys, the combined + // length of the plaintext and additional_authenticated_data fields must + // be no larger than 8KiB. + AdditionalAuthenticatedData string `json:"additionalAuthenticatedData,omitempty"` + + // AdditionalAuthenticatedDataCrc32c: Optional. An optional CRC32C + // checksum of the RawEncryptRequest.additional_authenticated_data. If + // specified, KeyManagementService will verify the integrity of the + // received additional_authenticated_data using this checksum. + // KeyManagementService will report an error if the checksum + // verification fails. If you receive a checksum error, your client + // should verify that CRC32C(additional_authenticated_data) is equal to + // additional_authenticated_data_crc32c, and if so, perform a limited + // number of retries. A persistent mismatch may indicate an issue in + // your computation of the CRC32C checksum. Note: This field is defined + // as int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. + AdditionalAuthenticatedDataCrc32c int64 `json:"additionalAuthenticatedDataCrc32c,omitempty,string"` + + // InitializationVector: Optional. A customer-supplied initialization + // vector that will be used for encryption. If it is not provided for + // AES-CBC and AES-CTR, one will be generated. It will be returned in + // RawEncryptResponse.initialization_vector. + InitializationVector string `json:"initializationVector,omitempty"` + + // InitializationVectorCrc32c: Optional. An optional CRC32C checksum of + // the RawEncryptRequest.initialization_vector. If specified, + // KeyManagementService will verify the integrity of the received + // initialization_vector using this checksum. KeyManagementService will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C(initialization_vector) is equal to + // initialization_vector_crc32c, and if so, perform a limited number of + // retries. A persistent mismatch may indicate an issue in your + // computation of the CRC32C checksum. Note: This field is defined as + // int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. + InitializationVectorCrc32c int64 `json:"initializationVectorCrc32c,omitempty,string"` + + // Plaintext: Required. The data to encrypt. Must be no larger than + // 64KiB. The maximum size depends on the key version's + // protection_level. For SOFTWARE keys, the plaintext must be no larger + // than 64KiB. For HSM keys, the combined length of the plaintext and + // additional_authenticated_data fields must be no larger than 8KiB. + Plaintext string `json:"plaintext,omitempty"` + + // PlaintextCrc32c: Optional. An optional CRC32C checksum of the + // RawEncryptRequest.plaintext. If specified, KeyManagementService will + // verify the integrity of the received plaintext using this checksum. + // KeyManagementService will report an error if the checksum + // verification fails. If you receive a checksum error, your client + // should verify that CRC32C(plaintext) is equal to plaintext_crc32c, + // and if so, perform a limited number of retries. A persistent mismatch + // may indicate an issue in your computation of the CRC32C checksum. + // Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, + // which will never exceed 2^32-1, and can be safely downconverted to + // uint32 in languages that support this type. + PlaintextCrc32c int64 `json:"plaintextCrc32c,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. + // "AdditionalAuthenticatedData") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "AdditionalAuthenticatedData") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RawEncryptRequest) MarshalJSON() ([]byte, error) { + type NoMethod RawEncryptRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RawEncryptResponse: Response message for +// KeyManagementService.RawEncrypt. +type RawEncryptResponse struct { + // Ciphertext: The encrypted data. In the case of AES-GCM, the + // authentication tag is the tag_length bytes at the end of this field. + Ciphertext string `json:"ciphertext,omitempty"` + + // CiphertextCrc32c: Integrity verification field. A CRC32C checksum of + // the returned RawEncryptResponse.ciphertext. An integrity check of + // ciphertext can be performed by computing the CRC32C checksum of + // ciphertext and comparing your results to this field. Discard the + // response in case of non-matching checksum values, and perform a + // limited number of retries. A persistent mismatch may indicate an + // issue in your computation of the CRC32C checksum. Note: This field is + // defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never + // exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. + CiphertextCrc32c int64 `json:"ciphertextCrc32c,omitempty,string"` + + // InitializationVector: The initialization vector (IV) generated by the + // service during encryption. This value must be stored and provided in + // RawDecryptRequest.initialization_vector at decryption time. + InitializationVector string `json:"initializationVector,omitempty"` + + // InitializationVectorCrc32c: Integrity verification field. A CRC32C + // checksum of the returned RawEncryptResponse.initialization_vector. An + // integrity check of initialization_vector can be performed by + // computing the CRC32C checksum of initialization_vector and comparing + // your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of + // retries. A persistent mismatch may indicate an issue in your + // computation of the CRC32C checksum. Note: This field is defined as + // int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that + // support this type. + InitializationVectorCrc32c int64 `json:"initializationVectorCrc32c,omitempty,string"` + + // Name: The resource name of the CryptoKeyVersion used in encryption. + // Check this field to verify that the intended resource was used for + // encryption. + Name string `json:"name,omitempty"` + + // ProtectionLevel: The ProtectionLevel of the CryptoKeyVersion used in + // encryption. + // + // Possible values: + // "PROTECTION_LEVEL_UNSPECIFIED" - Not specified. + // "SOFTWARE" - Crypto operations are performed in software. + // "HSM" - Crypto operations are performed in a Hardware Security + // Module. + // "EXTERNAL" - Crypto operations are performed by an external key + // manager. + // "EXTERNAL_VPC" - Crypto operations are performed in an EKM-over-VPC + // backend. + ProtectionLevel string `json:"protectionLevel,omitempty"` + + // TagLength: The length of the authentication tag that is appended to + // the end of the ciphertext. + TagLength int64 `json:"tagLength,omitempty"` + + // VerifiedAdditionalAuthenticatedDataCrc32c: Integrity verification + // field. A flag indicating whether + // RawEncryptRequest.additional_authenticated_data_crc32c was received + // by KeyManagementService and used for the integrity verification of + // additional_authenticated_data. A false value of this field indicates + // either that // RawEncryptRequest.additional_authenticated_data_crc32c + // was left unset or that it was not delivered to KeyManagementService. + // If you've set RawEncryptRequest.additional_authenticated_data_crc32c + // but this field is still false, discard the response and perform a + // limited number of retries. + VerifiedAdditionalAuthenticatedDataCrc32c bool `json:"verifiedAdditionalAuthenticatedDataCrc32c,omitempty"` + + // VerifiedInitializationVectorCrc32c: Integrity verification field. A + // flag indicating whether + // RawEncryptRequest.initialization_vector_crc32c was received by + // KeyManagementService and used for the integrity verification of + // initialization_vector. A false value of this field indicates either + // that RawEncryptRequest.initialization_vector_crc32c was left unset or + // that it was not delivered to KeyManagementService. If you've set + // RawEncryptRequest.initialization_vector_crc32c but this field is + // still false, discard the response and perform a limited number of + // retries. + VerifiedInitializationVectorCrc32c bool `json:"verifiedInitializationVectorCrc32c,omitempty"` + + // VerifiedPlaintextCrc32c: Integrity verification field. A flag + // indicating whether RawEncryptRequest.plaintext_crc32c was received by + // KeyManagementService and used for the integrity verification of the + // plaintext. A false value of this field indicates either that + // RawEncryptRequest.plaintext_crc32c was left unset or that it was not + // delivered to KeyManagementService. If you've set + // RawEncryptRequest.plaintext_crc32c but this field is still false, + // discard the response and perform a limited number of retries. + VerifiedPlaintextCrc32c bool `json:"verifiedPlaintextCrc32c,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Ciphertext") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Ciphertext") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RawEncryptResponse) MarshalJSON() ([]byte, error) { + type NoMethod RawEncryptResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RestoreCryptoKeyVersionRequest: Request message for // KeyManagementService.RestoreCryptoKeyVersion. type RestoreCryptoKeyVersionRequest struct { @@ -3222,6 +3662,14 @@ func (s *UpdateCryptoKeyPrimaryVersionRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// VerifyConnectivityResponse: Response message for +// EkmService.VerifyConnectivity. +type VerifyConnectivityResponse struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + // WrappingPublicKey: The public key component of the wrapping key. For // details of the type of key this public key corresponds to, see the // ImportMethod. @@ -5618,30 +6066,184 @@ func (c *ProjectsLocationsEkmConnectionsTestIamPermissionsCall) doRequest(alt st if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudkms.projects.locations.ekmConnections.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsEkmConnectionsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/ekmConnections/{ekmConnectionsId}:testIamPermissions", + // "httpMethod": "POST", + // "id": "cloudkms.projects.locations.ekmConnections.testIamPermissions", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/ekmConnections/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+resource}:testIamPermissions", + // "request": { + // "$ref": "TestIamPermissionsRequest" + // }, + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloudkms" + // ] + // } + +} + +// method id "cloudkms.projects.locations.ekmConnections.verifyConnectivity": + +type ProjectsLocationsEkmConnectionsVerifyConnectivityCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// VerifyConnectivity: Verifies that Cloud KMS can successfully connect +// to the external key manager specified by an EkmConnection. If there +// is an error connecting to the EKM, this method returns a +// FAILED_PRECONDITION status containing structured information as +// described at https://cloud.google.com/kms/docs/reference/ekm_errors. +// +// - name: The name of the EkmConnection to verify. +func (r *ProjectsLocationsEkmConnectionsService) VerifyConnectivity(name string) *ProjectsLocationsEkmConnectionsVerifyConnectivityCall { + c := &ProjectsLocationsEkmConnectionsVerifyConnectivityCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsEkmConnectionsVerifyConnectivityCall) Fields(s ...googleapi.Field) *ProjectsLocationsEkmConnectionsVerifyConnectivityCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsEkmConnectionsVerifyConnectivityCall) IfNoneMatch(entityTag string) *ProjectsLocationsEkmConnectionsVerifyConnectivityCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsEkmConnectionsVerifyConnectivityCall) Context(ctx context.Context) *ProjectsLocationsEkmConnectionsVerifyConnectivityCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsEkmConnectionsVerifyConnectivityCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsEkmConnectionsVerifyConnectivityCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:verifyConnectivity") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "cloudkms.projects.locations.ekmConnections.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Do executes the "cloudkms.projects.locations.ekmConnections.verifyConnectivity" call. +// Exactly one of *VerifyConnectivityResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// *VerifyConnectivityResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsLocationsEkmConnectionsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +func (c *ProjectsLocationsEkmConnectionsVerifyConnectivityCall) Do(opts ...googleapi.CallOption) (*VerifyConnectivityResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5660,7 +6262,7 @@ func (c *ProjectsLocationsEkmConnectionsTestIamPermissionsCall) Do(opts ...googl if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestIamPermissionsResponse{ + ret := &VerifyConnectivityResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5672,28 +6274,25 @@ func (c *ProjectsLocationsEkmConnectionsTestIamPermissionsCall) Do(opts ...googl } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/ekmConnections/{ekmConnectionsId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.ekmConnections.testIamPermissions", + // "description": "Verifies that Cloud KMS can successfully connect to the external key manager specified by an EkmConnection. If there is an error connecting to the EKM, this method returns a FAILED_PRECONDITION status containing structured information as described at https://cloud.google.com/kms/docs/reference/ekm_errors.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/ekmConnections/{ekmConnectionsId}:verifyConnectivity", + // "httpMethod": "GET", + // "id": "cloudkms.projects.locations.ekmConnections.verifyConnectivity", // "parameterOrder": [ - // "resource" + // "name" // ], // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", + // "name": { + // "description": "Required. The name of the EkmConnection to verify.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/ekmConnections/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, + // "path": "v1/{+name}:verifyConnectivity", // "response": { - // "$ref": "TestIamPermissionsResponse" + // "$ref": "VerifyConnectivityResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -10095,6 +10694,298 @@ func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Do(opts } +// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.rawDecrypt": + +type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall struct { + s *Service + name string + rawdecryptrequest *RawDecryptRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RawDecrypt: Decrypts data that was originally encrypted using a raw +// cryptographic mechanism. The CryptoKey.purpose must be +// RAW_ENCRYPT_DECRYPT. +// +// - name: The resource name of the CryptoKeyVersion to use for +// decryption. +func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) RawDecrypt(name string, rawdecryptrequest *RawDecryptRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall { + c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.rawdecryptrequest = rawdecryptrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.rawdecryptrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:rawDecrypt") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.rawDecrypt" call. +// Exactly one of *RawDecryptResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RawDecryptResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawDecryptCall) Do(opts ...googleapi.CallOption) (*RawDecryptResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RawDecryptResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Decrypts data that was originally encrypted using a raw cryptographic mechanism. The CryptoKey.purpose must be RAW_ENCRYPT_DECRYPT.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:rawDecrypt", + // "httpMethod": "POST", + // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.rawDecrypt", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the CryptoKeyVersion to use for decryption.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:rawDecrypt", + // "request": { + // "$ref": "RawDecryptRequest" + // }, + // "response": { + // "$ref": "RawDecryptResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloudkms" + // ] + // } + +} + +// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.rawEncrypt": + +type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall struct { + s *Service + name string + rawencryptrequest *RawEncryptRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RawEncrypt: Encrypts data using portable cryptographic primitives. +// Most users should choose Encrypt and Decrypt rather than their raw +// counterparts. The CryptoKey.purpose must be RAW_ENCRYPT_DECRYPT. +// +// - name: The resource name of the CryptoKeyVersion to use for +// encryption. +func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) RawEncrypt(name string, rawencryptrequest *RawEncryptRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall { + c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.rawencryptrequest = rawencryptrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.rawencryptrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:rawEncrypt") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.rawEncrypt" call. +// Exactly one of *RawEncryptResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RawEncryptResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRawEncryptCall) Do(opts ...googleapi.CallOption) (*RawEncryptResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RawEncryptResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Encrypts data using portable cryptographic primitives. Most users should choose Encrypt and Decrypt rather than their raw counterparts. The CryptoKey.purpose must be RAW_ENCRYPT_DECRYPT.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:rawEncrypt", + // "httpMethod": "POST", + // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.rawEncrypt", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The resource name of the CryptoKeyVersion to use for encryption.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:rawEncrypt", + // "request": { + // "$ref": "RawEncryptRequest" + // }, + // "response": { + // "$ref": "RawEncryptResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloudkms" + // ] + // } + +} + // method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore": type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 2e8496259c..a94af0185f 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "cloudresourcemanager:v1" const apiName = "cloudresourcemanager" diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-api.json b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-api.json index 800e39bf5b..014114cce8 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-api.json @@ -1273,6 +1273,28 @@ "https://www.googleapis.com/auth/cloud-platform.read-only" ] }, + "getNamespaced": { + "description": "Retrieves a TagKey by its namespaced name. This method will return `PERMISSION_DENIED` if the key does not exist or the user does not have permission to view it.", + "flatPath": "v3/tagKeys/namespaced", + "httpMethod": "GET", + "id": "cloudresourcemanager.tagKeys.getNamespaced", + "parameterOrder": [], + "parameters": { + "name": { + "description": "Required. A namespaced tag key name in the format `{parentId}/{tagKeyShort}`, such as `42/foo` for a key with short name \"foo\" under the organization with ID 42 or `r2-d2/bar` for a key with short name \"bar\" under the project `r2-d2`.", + "location": "query", + "type": "string" + } + }, + "path": "v3/tagKeys/namespaced", + "response": { + "$ref": "TagKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, "list": { "description": "Lists all TagKeys for a parent resource.", "flatPath": "v3/tagKeys", @@ -1292,7 +1314,7 @@ "type": "string" }, "parent": { - "description": "Required. The resource name of the new TagKey's parent. Must be of the form `folders/{folder_id}` or `organizations/{org_id}`.", + "description": "Required. The resource name of the TagKey's parent. Must be of the form `organizations/{org_id}` or `projects/{project_id}` or `projects/{project_number}`", "location": "query", "type": "string" } @@ -1519,6 +1541,28 @@ "https://www.googleapis.com/auth/cloud-platform.read-only" ] }, + "getNamespaced": { + "description": "Retrieves a TagValue by its namespaced name. This method will return `PERMISSION_DENIED` if the value does not exist or the user does not have permission to view it.", + "flatPath": "v3/tagValues/namespaced", + "httpMethod": "GET", + "id": "cloudresourcemanager.tagValues.getNamespaced", + "parameterOrder": [], + "parameters": { + "name": { + "description": "Required. A namespaced tag value name in the following format: `{parentId}/{tagKeyShort}/{tagValueShort}` Examples: - `42/foo/abc` for a value with short name \"abc\" under the key with short name \"foo\" under the organization with ID 42 - `r2-d2/bar/xyz` for a value with short name \"xyz\" under the key with short name \"bar\" under the project with ID \"r2-d2\"", + "location": "query", + "type": "string" + } + }, + "path": "v3/tagValues/namespaced", + "response": { + "$ref": "TagValue" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, "list": { "description": "Lists all TagValues for a specific TagKey.", "flatPath": "v3/tagValues", @@ -1761,7 +1805,7 @@ } } }, - "revision": "20230219", + "revision": "20230514", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { @@ -1998,11 +2042,11 @@ "type": "boolean" }, "namespacedTagKey": { - "description": "The namespaced_name of the TagKey. Now only supported in the format of `{organization_id}/{tag_key_short_name}`. Other formats will be supported when we add non-org parented tags.", + "description": "The namespaced name of the TagKey. Can be in the form `{organization_id}/{tag_key_short_name}` or `{project_id}/{tag_key_short_name}` or `{project_number}/{tag_key_short_name}`.", "type": "string" }, "namespacedTagValue": { - "description": "Namespaced name of the TagValue. Now only supported in the format `{organization_id}/{tag_key_short_name}/{tag_value_short_name}`. Other formats will be supported when we add non-org parented tags.", + "description": "The namespaced name of the TagValue. Can be in the form `{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or `{project_id}/{tag_key_short_name}/{tag_value_short_name}` or `{project_number}/{tag_key_short_name}/{tag_value_short_name}`.", "type": "string" }, "tagKey": { @@ -2010,7 +2054,7 @@ "type": "string" }, "tagKeyParentName": { - "description": "The parent name of the tag key. Must be in the format `organizations/{organization_id}`.", + "description": "The parent name of the tag key. Must be in the format `organizations/{organization_id}` or `projects/{project_number}`", "type": "string" }, "tagValue": { @@ -2751,6 +2795,10 @@ "tagValue": { "description": "The TagValue of the TagBinding. Must be of the form `tagValues/456`.", "type": "string" + }, + "tagValueNamespacedName": { + "description": "The namespaced name for the TagValue of the TagBinding. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`. For methods that support TagValue namespaced name, only one of tag_value_namespaced_name or tag_value may be filled. Requests with both fields will be rejected.", + "type": "string" } }, "type": "object" @@ -2813,7 +2861,7 @@ "type": "string" }, "parent": { - "description": "Immutable. The resource name of the new TagKey's parent. Must be of the form `organizations/{org_id}`.", + "description": "Immutable. The resource name of the TagKey's parent. A TagKey can be parented by an Organization or a Project. For a TagKey parented by an Organization, its parent must be in the form `organizations/{org_id}`. For a TagKey parented by a Project, its parent can be in the form `projects/{project_id}` or `projects/{project_number}`.", "type": "string" }, "purpose": { @@ -2824,7 +2872,7 @@ ], "enumDescriptions": [ "Unspecified purpose.", - "Purpose for Compute Engine firewalls. A corresponding purpose_data should be set for the network the tag is intended for. The key should be 'network' and the value should be in either of these two formats: -https://www.googleapis.com/compute/{compute_version}/projects/{project_id}/global/networks/{network_id} -{project_id}/{network_name} Examples: -https://www.googleapis.com/compute/staging_v1/projects/fail-closed-load-testing/global/networks/6992953698831725600 -fail-closed-load-testing/load-testing-network" + "Purpose for Compute Engine firewalls. A corresponding `purpose_data` should be set for the network the tag is intended for. The key should be `network` and the value should be in ## either of these two formats: `https://www.googleapis.com/compute/{compute_version}/projects/{project_id}/global/networks/{network_id}` - `{project_id}/{network_name}` ## Examples: `https://www.googleapis.com/compute/staging_v1/projects/fail-closed-load-testing/global/networks/6992953698831725600` - `fail-closed-load-testing/load-testing-network`" ], "type": "string" }, @@ -2871,7 +2919,7 @@ "type": "string" }, "namespacedName": { - "description": "Output only. Namespaced name of the TagValue. Now only supported in the format `{organization_id}/{tag_key_short_name}/{short_name}`. Other formats will be supported when we add non-org parented tags.", + "description": "Output only. The namespaced name of the TagValue. Can be in the form `{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or `{project_id}/{tag_key_short_name}/{tag_value_short_name}` or `{project_number}/{tag_key_short_name}/{tag_value_short_name}`.", "readOnly": true, "type": "string" }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-gen.go b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-gen.go index da5689cbdb..22924f583c 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/cloudresourcemanager/v3/cloudresourcemanager-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "cloudresourcemanager:v3" const apiName = "cloudresourcemanager" @@ -661,15 +662,17 @@ type EffectiveTag struct { // value is directly attached to the resource, inherited will be false. Inherited bool `json:"inherited,omitempty"` - // NamespacedTagKey: The namespaced_name of the TagKey. Now only - // supported in the format of `{organization_id}/{tag_key_short_name}`. - // Other formats will be supported when we add non-org parented tags. + // NamespacedTagKey: The namespaced name of the TagKey. Can be in the + // form `{organization_id}/{tag_key_short_name}` or + // `{project_id}/{tag_key_short_name}` or + // `{project_number}/{tag_key_short_name}`. NamespacedTagKey string `json:"namespacedTagKey,omitempty"` - // NamespacedTagValue: Namespaced name of the TagValue. Now only - // supported in the format - // `{organization_id}/{tag_key_short_name}/{tag_value_short_name}`. - // Other formats will be supported when we add non-org parented tags. + // NamespacedTagValue: The namespaced name of the TagValue. Can be in + // the form + // `{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or + // `{project_id}/{tag_key_short_name}/{tag_value_short_name}` or + // `{project_number}/{tag_key_short_name}/{tag_value_short_name}`. NamespacedTagValue string `json:"namespacedTagValue,omitempty"` // TagKey: The name of the TagKey, in the format `tagKeys/{id}`, such as @@ -677,7 +680,8 @@ type EffectiveTag struct { TagKey string `json:"tagKey,omitempty"` // TagKeyParentName: The parent name of the tag key. Must be in the - // format `organizations/{organization_id}`. + // format `organizations/{organization_id}` or + // `projects/{project_number}` TagKeyParentName string `json:"tagKeyParentName,omitempty"` // TagValue: Resource name for TagValue in the format `tagValues/456`. @@ -2098,6 +2102,14 @@ type TagBinding struct { // `tagValues/456`. TagValue string `json:"tagValue,omitempty"` + // TagValueNamespacedName: The namespaced name for the TagValue of the + // TagBinding. Must be in the format + // `{parent_id}/{tag_key_short_name}/{short_name}`. For methods that + // support TagValue namespaced name, only one of + // tag_value_namespaced_name or tag_value may be filled. Requests with + // both fields will be rejected. + TagValueNamespacedName string `json:"tagValueNamespacedName,omitempty"` + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -2199,8 +2211,12 @@ type TagKey struct { // TagKey. NamespacedName string `json:"namespacedName,omitempty"` - // Parent: Immutable. The resource name of the new TagKey's parent. Must - // be of the form `organizations/{org_id}`. + // Parent: Immutable. The resource name of the TagKey's parent. A TagKey + // can be parented by an Organization or a Project. For a TagKey + // parented by an Organization, its parent must be in the form + // `organizations/{org_id}`. For a TagKey parented by a Project, its + // parent can be in the form `projects/{project_id}` or + // `projects/{project_number}`. Parent string `json:"parent,omitempty"` // Purpose: Optional. A purpose denotes that this Tag is intended for @@ -2213,15 +2229,15 @@ type TagKey struct { // Possible values: // "PURPOSE_UNSPECIFIED" - Unspecified purpose. // "GCE_FIREWALL" - Purpose for Compute Engine firewalls. A - // corresponding purpose_data should be set for the network the tag is - // intended for. The key should be 'network' and the value should be in - // either of these two formats: - // -https://www.googleapis.com/compute/{compute_version}/projects/{projec - // t_id}/global/networks/{network_id} -{project_id}/{network_name} - // Examples: - // -https://www.googleapis.com/compute/staging_v1/projects/fail-closed-lo - // ad-testing/global/networks/6992953698831725600 - // -fail-closed-load-testing/load-testing-network + // corresponding `purpose_data` should be set for the network the tag is + // intended for. The key should be `network` and the value should be in + // ## either of these two formats: + // `https://www.googleapis.com/compute/{compute_version}/projects/{projec + // t_id}/global/networks/{network_id}` - `{project_id}/{network_name}` + // ## Examples: + // `https://www.googleapis.com/compute/staging_v1/projects/fail-closed-lo + // ad-testing/global/networks/6992953698831725600` - + // `fail-closed-load-testing/load-testing-network` Purpose string `json:"purpose,omitempty"` // PurposeData: Optional. Purpose data corresponds to the policy system @@ -2286,10 +2302,11 @@ type TagValue struct { // `tagValues/456`. Name string `json:"name,omitempty"` - // NamespacedName: Output only. Namespaced name of the TagValue. Now - // only supported in the format - // `{organization_id}/{tag_key_short_name}/{short_name}`. Other formats - // will be supported when we add non-org parented tags. + // NamespacedName: Output only. The namespaced name of the TagValue. Can + // be in the form + // `{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or + // `{project_id}/{tag_key_short_name}/{tag_value_short_name}` or + // `{project_number}/{tag_key_short_name}/{tag_value_short_name}`. NamespacedName string `json:"namespacedName,omitempty"` // Parent: Immutable. The resource name of the new TagValue's parent @@ -8862,6 +8879,154 @@ func (c *TagKeysGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } +// method id "cloudresourcemanager.tagKeys.getNamespaced": + +type TagKeysGetNamespacedCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetNamespaced: Retrieves a TagKey by its namespaced name. This method +// will return `PERMISSION_DENIED` if the key does not exist or the user +// does not have permission to view it. +func (r *TagKeysService) GetNamespaced() *TagKeysGetNamespacedCall { + c := &TagKeysGetNamespacedCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Name sets the optional parameter "name": Required. A namespaced tag +// key name in the format `{parentId}/{tagKeyShort}`, such as `42/foo` +// for a key with short name "foo" under the organization with ID 42 or +// `r2-d2/bar` for a key with short name "bar" under the project +// `r2-d2`. +func (c *TagKeysGetNamespacedCall) Name(name string) *TagKeysGetNamespacedCall { + c.urlParams_.Set("name", name) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TagKeysGetNamespacedCall) Fields(s ...googleapi.Field) *TagKeysGetNamespacedCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TagKeysGetNamespacedCall) IfNoneMatch(entityTag string) *TagKeysGetNamespacedCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TagKeysGetNamespacedCall) Context(ctx context.Context) *TagKeysGetNamespacedCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TagKeysGetNamespacedCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TagKeysGetNamespacedCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/tagKeys/namespaced") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.tagKeys.getNamespaced" call. +// Exactly one of *TagKey or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *TagKey.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *TagKeysGetNamespacedCall) Do(opts ...googleapi.CallOption) (*TagKey, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TagKey{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a TagKey by its namespaced name. This method will return `PERMISSION_DENIED` if the key does not exist or the user does not have permission to view it.", + // "flatPath": "v3/tagKeys/namespaced", + // "httpMethod": "GET", + // "id": "cloudresourcemanager.tagKeys.getNamespaced", + // "parameterOrder": [], + // "parameters": { + // "name": { + // "description": "Required. A namespaced tag key name in the format `{parentId}/{tagKeyShort}`, such as `42/foo` for a key with short name \"foo\" under the organization with ID 42 or `r2-d2/bar` for a key with short name \"bar\" under the project `r2-d2`.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v3/tagKeys/namespaced", + // "response": { + // "$ref": "TagKey" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + // method id "cloudresourcemanager.tagKeys.list": type TagKeysListCall struct { @@ -8896,8 +9061,9 @@ func (c *TagKeysListCall) PageToken(pageToken string) *TagKeysListCall { } // Parent sets the optional parameter "parent": Required. The resource -// name of the new TagKey's parent. Must be of the form -// `folders/{folder_id}` or `organizations/{org_id}`. +// name of the TagKey's parent. Must be of the form +// `organizations/{org_id}` or `projects/{project_id}` or +// `projects/{project_number}` func (c *TagKeysListCall) Parent(parent string) *TagKeysListCall { c.urlParams_.Set("parent", parent) return c @@ -9017,7 +9183,7 @@ func (c *TagKeysListCall) Do(opts ...googleapi.CallOption) (*ListTagKeysResponse // "type": "string" // }, // "parent": { - // "description": "Required. The resource name of the new TagKey's parent. Must be of the form `folders/{folder_id}` or `organizations/{org_id}`.", + // "description": "Required. The resource name of the TagKey's parent. Must be of the form `organizations/{org_id}` or `projects/{project_id}` or `projects/{project_number}`", // "location": "query", // "type": "string" // } @@ -10128,6 +10294,156 @@ func (c *TagValuesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } +// method id "cloudresourcemanager.tagValues.getNamespaced": + +type TagValuesGetNamespacedCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetNamespaced: Retrieves a TagValue by its namespaced name. This +// method will return `PERMISSION_DENIED` if the value does not exist or +// the user does not have permission to view it. +func (r *TagValuesService) GetNamespaced() *TagValuesGetNamespacedCall { + c := &TagValuesGetNamespacedCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Name sets the optional parameter "name": Required. A namespaced tag +// value name in the following format: +// `{parentId}/{tagKeyShort}/{tagValueShort}` Examples: - `42/foo/abc` +// for a value with short name "abc" under the key with short name "foo" +// under the organization with ID 42 - `r2-d2/bar/xyz` for a value with +// short name "xyz" under the key with short name "bar" under the +// project with ID "r2-d2" +func (c *TagValuesGetNamespacedCall) Name(name string) *TagValuesGetNamespacedCall { + c.urlParams_.Set("name", name) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TagValuesGetNamespacedCall) Fields(s ...googleapi.Field) *TagValuesGetNamespacedCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TagValuesGetNamespacedCall) IfNoneMatch(entityTag string) *TagValuesGetNamespacedCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TagValuesGetNamespacedCall) Context(ctx context.Context) *TagValuesGetNamespacedCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TagValuesGetNamespacedCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TagValuesGetNamespacedCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/tagValues/namespaced") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.tagValues.getNamespaced" call. +// Exactly one of *TagValue or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *TagValue.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TagValuesGetNamespacedCall) Do(opts ...googleapi.CallOption) (*TagValue, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TagValue{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a TagValue by its namespaced name. This method will return `PERMISSION_DENIED` if the value does not exist or the user does not have permission to view it.", + // "flatPath": "v3/tagValues/namespaced", + // "httpMethod": "GET", + // "id": "cloudresourcemanager.tagValues.getNamespaced", + // "parameterOrder": [], + // "parameters": { + // "name": { + // "description": "Required. A namespaced tag value name in the following format: `{parentId}/{tagKeyShort}/{tagValueShort}` Examples: - `42/foo/abc` for a value with short name \"abc\" under the key with short name \"foo\" under the organization with ID 42 - `r2-d2/bar/xyz` for a value with short name \"xyz\" under the key with short name \"bar\" under the project with ID \"r2-d2\"", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v3/tagValues/namespaced", + // "response": { + // "$ref": "TagValue" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + // method id "cloudresourcemanager.tagValues.list": type TagValuesListCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-api.json b/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-api.json index bafb4fd210..38f3cfaf43 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-api.json @@ -139,6 +139,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "databaseFailover": { + "description": "Triggers database failover (only for highly resilient environments).", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:databaseFailover", + "httpMethod": "POST", + "id": "composer.projects.locations.environments.databaseFailover", + "parameterOrder": [ + "environment" + ], + "parameters": { + "environment": { + "description": "Target environment: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+environment}:databaseFailover", + "request": { + "$ref": "DatabaseFailoverRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "delete": { "description": "Delete an environment.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}", @@ -164,6 +192,59 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "executeAirflowCommand": { + "description": "Executes Airflow CLI command.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:executeAirflowCommand", + "httpMethod": "POST", + "id": "composer.projects.locations.environments.executeAirflowCommand", + "parameterOrder": [ + "environment" + ], + "parameters": { + "environment": { + "description": "The resource name of the environment in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\".", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+environment}:executeAirflowCommand", + "request": { + "$ref": "ExecuteAirflowCommandRequest" + }, + "response": { + "$ref": "ExecuteAirflowCommandResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "fetchDatabaseProperties": { + "description": "Fetches database properties.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:fetchDatabaseProperties", + "httpMethod": "GET", + "id": "composer.projects.locations.environments.fetchDatabaseProperties", + "parameterOrder": [ + "environment" + ], + "parameters": { + "environment": { + "description": "Required. The resource name of the environment, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+environment}:fetchDatabaseProperties", + "response": { + "$ref": "FetchDatabasePropertiesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "get": { "description": "Get an existing environment.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}", @@ -287,6 +368,34 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "pollAirflowCommand": { + "description": "Polls Airflow CLI command execution and fetches logs.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:pollAirflowCommand", + "httpMethod": "POST", + "id": "composer.projects.locations.environments.pollAirflowCommand", + "parameterOrder": [ + "environment" + ], + "parameters": { + "environment": { + "description": "The resource name of the environment in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+environment}:pollAirflowCommand", + "request": { + "$ref": "PollAirflowCommandRequest" + }, + "response": { + "$ref": "PollAirflowCommandResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "saveSnapshot": { "description": "Creates a snapshots of a Cloud Composer environment. As a result of this operation, snapshot of environment's state is stored in a location specified in the SaveSnapshotRequest.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:saveSnapshot", @@ -314,6 +423,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] + }, + "stopAirflowCommand": { + "description": "Stops Airflow CLI command execution.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:stopAirflowCommand", + "httpMethod": "POST", + "id": "composer.projects.locations.environments.stopAirflowCommand", + "parameterOrder": [ + "environment" + ], + "parameters": { + "environment": { + "description": "The resource name of the environment in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\".", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+environment}:stopAirflowCommand", + "request": { + "$ref": "StopAirflowCommandRequest" + }, + "response": { + "$ref": "StopAirflowCommandResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] } } }, @@ -415,7 +552,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", "httpMethod": "GET", "id": "composer.projects.locations.operations.list", @@ -462,7 +599,7 @@ } } }, - "revision": "20230124", + "revision": "20230516", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AllowedIpRange": { @@ -549,6 +686,18 @@ }, "type": "object" }, + "DatabaseFailoverRequest": { + "description": "Request to trigger database failover (only for highly resilient environments).", + "id": "DatabaseFailoverRequest", + "properties": {}, + "type": "object" + }, + "DatabaseFailoverResponse": { + "description": "Response for DatabaseFailoverRequest.", + "id": "DatabaseFailoverResponse", + "properties": {}, + "type": "object" + }, "Date": { "description": "Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp", "id": "Date", @@ -648,6 +797,11 @@ "description": "Configuration information for an environment.", "id": "EnvironmentConfig", "properties": { + "airflowByoidUri": { + "description": "Output only. The 'bring your own identity' variant of the URI of the Apache Airflow Web UI hosted within this environment, to be accessed with external identities using workforce identity federation (see [Access environments with workforce identity federation](/composer/docs/composer-2/access-environments-with-workforce-identity-federation)).", + "readOnly": true, + "type": "string" + }, "airflowUri": { "description": "Output only. The URI of the Apache Airflow Web UI hosted within this environment (see [Airflow web interface](/composer/docs/how-to/accessing/airflow-web-interface)).", "type": "string" @@ -709,6 +863,18 @@ "$ref": "RecoveryConfig", "description": "Optional. The Recovery settings configuration of an environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer." }, + "resilienceMode": { + "description": "Optional. Resilience mode of the Cloud Composer Environment. This field is supported for Cloud Composer environments in versions composer-2.2.0-airflow-*.*.* and newer.", + "enum": [ + "RESILIENCE_MODE_UNSPECIFIED", + "HIGH_RESILIENCE" + ], + "enumDescriptions": [ + "Default mode doesn't change environment parameters.", + "Enabled High Resilience mode, including Cloud SQL HA." + ], + "type": "string" + }, "softwareConfig": { "$ref": "SoftwareConfig", "description": "The configuration settings for software inside the environment." @@ -728,6 +894,86 @@ }, "type": "object" }, + "ExecuteAirflowCommandRequest": { + "description": "Execute Airflow Command request.", + "id": "ExecuteAirflowCommandRequest", + "properties": { + "command": { + "description": "Airflow command.", + "type": "string" + }, + "parameters": { + "description": "Parameters for the Airflow command/subcommand as an array of arguments. It may contain positional arguments like `[\"my-dag-id\"]`, key-value parameters like `[\"--foo=bar\"]` or `[\"--foo\",\"bar\"]`, or other flags like `[\"-f\"]`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "subcommand": { + "description": "Airflow subcommand.", + "type": "string" + } + }, + "type": "object" + }, + "ExecuteAirflowCommandResponse": { + "description": "Response to ExecuteAirflowCommandRequest.", + "id": "ExecuteAirflowCommandResponse", + "properties": { + "error": { + "description": "Error message. Empty if there was no error.", + "type": "string" + }, + "executionId": { + "description": "The unique ID of the command execution for polling.", + "type": "string" + }, + "pod": { + "description": "The name of the pod where the command is executed.", + "type": "string" + }, + "podNamespace": { + "description": "The namespace of the pod where the command is executed.", + "type": "string" + } + }, + "type": "object" + }, + "ExitInfo": { + "description": "Information about how a command ended.", + "id": "ExitInfo", + "properties": { + "error": { + "description": "Error message. Empty if there was no error.", + "type": "string" + }, + "exitCode": { + "description": "The exit code from the command execution.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "FetchDatabasePropertiesResponse": { + "description": "Response for FetchDatabasePropertiesRequest.", + "id": "FetchDatabasePropertiesResponse", + "properties": { + "isFailoverReplicaAvailable": { + "description": "The availability status of the failover replica. A false status indicates that the failover replica is out of sync. The primary instance can only fail over to the failover replica when the status is true.", + "type": "boolean" + }, + "primaryGceZone": { + "description": "The Compute Engine zone that the instance is currently serving from.", + "type": "string" + }, + "secondaryGceZone": { + "description": "The Compute Engine zone that the failover instance is currently serving from for a regional Cloud SQL instance.", + "type": "string" + } + }, + "type": "object" + }, "IPAllocationPolicy": { "description": "Configuration for controlling how IPs are allocated in the GKE cluster running the Apache Airflow software.", "id": "IPAllocationPolicy", @@ -789,6 +1035,22 @@ }, "type": "object" }, + "Line": { + "description": "Contains information about a single line from logs.", + "id": "Line", + "properties": { + "content": { + "description": "Text content of the log line.", + "type": "string" + }, + "lineNumber": { + "description": "Number of the line.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "ListEnvironmentsResponse": { "description": "The environments in a project and location.", "id": "ListEnvironmentsResponse", @@ -1048,7 +1310,8 @@ "UPDATE", "CHECK", "SAVE_SNAPSHOT", - "LOAD_SNAPSHOT" + "LOAD_SNAPSHOT", + "DATABASE_FAILOVER" ], "enumDescriptions": [ "Unused.", @@ -1057,7 +1320,8 @@ "A resource update operation.", "A resource check operation.", "Saves snapshot of the resource operation.", - "Loads snapshot of the resource operation." + "Loads snapshot of the resource operation.", + "Triggers failover of environment's Cloud SQL instance (only for highly resilient environments)." ], "type": "string" }, @@ -1092,6 +1356,52 @@ }, "type": "object" }, + "PollAirflowCommandRequest": { + "description": "Poll Airflow Command request.", + "id": "PollAirflowCommandRequest", + "properties": { + "executionId": { + "description": "The unique ID of the command execution.", + "type": "string" + }, + "nextLineNumber": { + "description": "Line number from which new logs should be fetched.", + "format": "int32", + "type": "integer" + }, + "pod": { + "description": "The name of the pod where the command is executed.", + "type": "string" + }, + "podNamespace": { + "description": "The namespace of the pod where the command is executed.", + "type": "string" + } + }, + "type": "object" + }, + "PollAirflowCommandResponse": { + "description": "Response to PollAirflowCommandRequest.", + "id": "PollAirflowCommandResponse", + "properties": { + "exitInfo": { + "$ref": "ExitInfo", + "description": "The result exit status of the command." + }, + "output": { + "description": "Output from the command execution. It may not contain the full output and the caller may need to poll for more lines.", + "items": { + "$ref": "Line" + }, + "type": "array" + }, + "outputEnd": { + "description": "Whether the command execution has finished and there is no more output.", + "type": "boolean" + } + }, + "type": "object" + }, "PrivateClusterConfig": { "description": "Configuration options for the private GKE cluster in a Cloud Composer environment.", "id": "PrivateClusterConfig", @@ -1312,6 +1622,47 @@ }, "type": "object" }, + "StopAirflowCommandRequest": { + "description": "Stop Airflow Command request.", + "id": "StopAirflowCommandRequest", + "properties": { + "executionId": { + "description": "The unique ID of the command execution.", + "type": "string" + }, + "force": { + "description": "If true, the execution is terminated forcefully (SIGKILL). If false, the execution is stopped gracefully, giving it time for cleanup.", + "type": "boolean" + }, + "pod": { + "description": "The name of the pod where the command is executed.", + "type": "string" + }, + "podNamespace": { + "description": "The namespace of the pod where the command is executed.", + "type": "string" + } + }, + "type": "object" + }, + "StopAirflowCommandResponse": { + "description": "Response to StopAirflowCommandRequest.", + "id": "StopAirflowCommandResponse", + "properties": { + "isDone": { + "description": "Whether the execution is still running.", + "type": "boolean" + }, + "output": { + "description": "Output message from stopping execution request.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "WebServerConfig": { "description": "The configuration settings for the Airflow web server App Engine instance. Supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*", "id": "WebServerConfig", diff --git a/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-gen.go b/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-gen.go index d53dcbd2ed..66195ae2c4 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/composer/v1/composer-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "composer:v1" const apiName = "composer" @@ -347,6 +348,15 @@ func (s *DatabaseConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DatabaseFailoverRequest: Request to trigger database failover (only +// for highly resilient environments). +type DatabaseFailoverRequest struct { +} + +// DatabaseFailoverResponse: Response for DatabaseFailoverRequest. +type DatabaseFailoverResponse struct { +} + // Date: Represents a whole or partial calendar date, such as a // birthday. The time of day and time zone are either specified // elsewhere or are insignificant. The date is relative to the Gregorian @@ -515,6 +525,15 @@ func (s *Environment) MarshalJSON() ([]byte, error) { // EnvironmentConfig: Configuration information for an environment. type EnvironmentConfig struct { + // AirflowByoidUri: Output only. The 'bring your own identity' variant + // of the URI of the Apache Airflow Web UI hosted within this + // environment, to be accessed with external identities using workforce + // identity federation (see Access environments with workforce identity + // federation + // (/composer/docs/composer-2/access-environments-with-workforce-identity + // -federation)). + AirflowByoidUri string `json:"airflowByoidUri,omitempty"` + // AirflowUri: Output only. The URI of the Apache Airflow Web UI hosted // within this environment (see Airflow web interface // (/composer/docs/how-to/accessing/airflow-web-interface)). @@ -587,6 +606,17 @@ type EnvironmentConfig struct { // in versions composer-2.*.*-airflow-*.*.* and newer. RecoveryConfig *RecoveryConfig `json:"recoveryConfig,omitempty"` + // ResilienceMode: Optional. Resilience mode of the Cloud Composer + // Environment. This field is supported for Cloud Composer environments + // in versions composer-2.2.0-airflow-*.*.* and newer. + // + // Possible values: + // "RESILIENCE_MODE_UNSPECIFIED" - Default mode doesn't change + // environment parameters. + // "HIGH_RESILIENCE" - Enabled High Resilience mode, including Cloud + // SQL HA. + ResilienceMode string `json:"resilienceMode,omitempty"` + // SoftwareConfig: The configuration settings for software inside the // environment. SoftwareConfig *SoftwareConfig `json:"softwareConfig,omitempty"` @@ -607,7 +637,45 @@ type EnvironmentConfig struct { // composer-2.*.*-airflow-*.*.* and newer. WorkloadsConfig *WorkloadsConfig `json:"workloadsConfig,omitempty"` - // ForceSendFields is a list of field names (e.g. "AirflowUri") to + // ForceSendFields is a list of field names (e.g. "AirflowByoidUri") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AirflowByoidUri") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *EnvironmentConfig) MarshalJSON() ([]byte, error) { + type NoMethod EnvironmentConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExecuteAirflowCommandRequest: Execute Airflow Command request. +type ExecuteAirflowCommandRequest struct { + // Command: Airflow command. + Command string `json:"command,omitempty"` + + // Parameters: Parameters for the Airflow command/subcommand as an array + // of arguments. It may contain positional arguments like + // `["my-dag-id"]`, key-value parameters like `["--foo=bar"]` or + // `["--foo","bar"]`, or other flags like `["-f"]`. + Parameters []string `json:"parameters,omitempty"` + + // Subcommand: Airflow subcommand. + Subcommand string `json:"subcommand,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Command") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -615,7 +683,7 @@ type EnvironmentConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AirflowUri") to include in + // NullFields is a list of field names (e.g. "Command") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -624,8 +692,127 @@ type EnvironmentConfig struct { NullFields []string `json:"-"` } -func (s *EnvironmentConfig) MarshalJSON() ([]byte, error) { - type NoMethod EnvironmentConfig +func (s *ExecuteAirflowCommandRequest) MarshalJSON() ([]byte, error) { + type NoMethod ExecuteAirflowCommandRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExecuteAirflowCommandResponse: Response to +// ExecuteAirflowCommandRequest. +type ExecuteAirflowCommandResponse struct { + // Error: Error message. Empty if there was no error. + Error string `json:"error,omitempty"` + + // ExecutionId: The unique ID of the command execution for polling. + ExecutionId string `json:"executionId,omitempty"` + + // Pod: The name of the pod where the command is executed. + Pod string `json:"pod,omitempty"` + + // PodNamespace: The namespace of the pod where the command is executed. + PodNamespace string `json:"podNamespace,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Error") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExecuteAirflowCommandResponse) MarshalJSON() ([]byte, error) { + type NoMethod ExecuteAirflowCommandResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ExitInfo: Information about how a command ended. +type ExitInfo struct { + // Error: Error message. Empty if there was no error. + Error string `json:"error,omitempty"` + + // ExitCode: The exit code from the command execution. + ExitCode int64 `json:"exitCode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Error") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ExitInfo) MarshalJSON() ([]byte, error) { + type NoMethod ExitInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FetchDatabasePropertiesResponse: Response for +// FetchDatabasePropertiesRequest. +type FetchDatabasePropertiesResponse struct { + // IsFailoverReplicaAvailable: The availability status of the failover + // replica. A false status indicates that the failover replica is out of + // sync. The primary instance can only fail over to the failover replica + // when the status is true. + IsFailoverReplicaAvailable bool `json:"isFailoverReplicaAvailable,omitempty"` + + // PrimaryGceZone: The Compute Engine zone that the instance is + // currently serving from. + PrimaryGceZone string `json:"primaryGceZone,omitempty"` + + // SecondaryGceZone: The Compute Engine zone that the failover instance + // is currently serving from for a regional Cloud SQL instance. + SecondaryGceZone string `json:"secondaryGceZone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "IsFailoverReplicaAvailable") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "IsFailoverReplicaAvailable") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FetchDatabasePropertiesResponse) MarshalJSON() ([]byte, error) { + type NoMethod FetchDatabasePropertiesResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -749,6 +936,37 @@ func (s *ImageVersion) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Line: Contains information about a single line from logs. +type Line struct { + // Content: Text content of the log line. + Content string `json:"content,omitempty"` + + // LineNumber: Number of the line. + LineNumber int64 `json:"lineNumber,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Content") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Content") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Line) MarshalJSON() ([]byte, error) { + type NoMethod Line + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListEnvironmentsResponse: The environments in a project and location. type ListEnvironmentsResponse struct { // Environments: The list of environments returned by a @@ -1237,6 +1455,8 @@ type OperationMetadata struct { // "CHECK" - A resource check operation. // "SAVE_SNAPSHOT" - Saves snapshot of the resource operation. // "LOAD_SNAPSHOT" - Loads snapshot of the resource operation. + // "DATABASE_FAILOVER" - Triggers failover of environment's Cloud SQL + // instance (only for highly resilient environments). OperationType string `json:"operationType,omitempty"` // Resource: Output only. The resource being operated on, as a relative @@ -1281,6 +1501,83 @@ func (s *OperationMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PollAirflowCommandRequest: Poll Airflow Command request. +type PollAirflowCommandRequest struct { + // ExecutionId: The unique ID of the command execution. + ExecutionId string `json:"executionId,omitempty"` + + // NextLineNumber: Line number from which new logs should be fetched. + NextLineNumber int64 `json:"nextLineNumber,omitempty"` + + // Pod: The name of the pod where the command is executed. + Pod string `json:"pod,omitempty"` + + // PodNamespace: The namespace of the pod where the command is executed. + PodNamespace string `json:"podNamespace,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExecutionId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExecutionId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PollAirflowCommandRequest) MarshalJSON() ([]byte, error) { + type NoMethod PollAirflowCommandRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PollAirflowCommandResponse: Response to PollAirflowCommandRequest. +type PollAirflowCommandResponse struct { + // ExitInfo: The result exit status of the command. + ExitInfo *ExitInfo `json:"exitInfo,omitempty"` + + // Output: Output from the command execution. It may not contain the + // full output and the caller may need to poll for more lines. + Output []*Line `json:"output,omitempty"` + + // OutputEnd: Whether the command execution has finished and there is no + // more output. + OutputEnd bool `json:"outputEnd,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ExitInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExitInfo") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PollAirflowCommandResponse) MarshalJSON() ([]byte, error) { + type NoMethod PollAirflowCommandResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // PrivateClusterConfig: Configuration options for the private GKE // cluster in a Cloud Composer environment. type PrivateClusterConfig struct { @@ -1750,6 +2047,80 @@ func (s *Status) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// StopAirflowCommandRequest: Stop Airflow Command request. +type StopAirflowCommandRequest struct { + // ExecutionId: The unique ID of the command execution. + ExecutionId string `json:"executionId,omitempty"` + + // Force: If true, the execution is terminated forcefully (SIGKILL). If + // false, the execution is stopped gracefully, giving it time for + // cleanup. + Force bool `json:"force,omitempty"` + + // Pod: The name of the pod where the command is executed. + Pod string `json:"pod,omitempty"` + + // PodNamespace: The namespace of the pod where the command is executed. + PodNamespace string `json:"podNamespace,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExecutionId") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExecutionId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StopAirflowCommandRequest) MarshalJSON() ([]byte, error) { + type NoMethod StopAirflowCommandRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// StopAirflowCommandResponse: Response to StopAirflowCommandRequest. +type StopAirflowCommandResponse struct { + // IsDone: Whether the execution is still running. + IsDone bool `json:"isDone,omitempty"` + + // Output: Output message from stopping execution request. + Output []string `json:"output,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "IsDone") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IsDone") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StopAirflowCommandResponse) MarshalJSON() ([]byte, error) { + type NoMethod StopAirflowCommandResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // WebServerConfig: The configuration settings for the Airflow web // server App Engine instance. Supported for Cloud Composer environments // in versions composer-1.*.*-airflow-*.*.* @@ -2111,31 +2482,34 @@ func (c *ProjectsLocationsEnvironmentsCreateCall) Do(opts ...googleapi.CallOptio } -// method id "composer.projects.locations.environments.delete": +// method id "composer.projects.locations.environments.databaseFailover": -type ProjectsLocationsEnvironmentsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsEnvironmentsDatabaseFailoverCall struct { + s *Service + environment string + databasefailoverrequest *DatabaseFailoverRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Delete an environment. +// DatabaseFailover: Triggers database failover (only for highly +// resilient environments). // -// - name: The environment to delete, in the form: +// - environment: Target environment: // "projects/{projectId}/locations/{locationId}/environments/{environme // ntId}". -func (r *ProjectsLocationsEnvironmentsService) Delete(name string) *ProjectsLocationsEnvironmentsDeleteCall { - c := &ProjectsLocationsEnvironmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +func (r *ProjectsLocationsEnvironmentsService) DatabaseFailover(environment string, databasefailoverrequest *DatabaseFailoverRequest) *ProjectsLocationsEnvironmentsDatabaseFailoverCall { + c := &ProjectsLocationsEnvironmentsDatabaseFailoverCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.environment = environment + c.databasefailoverrequest = databasefailoverrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsEnvironmentsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsEnvironmentsDeleteCall { +func (c *ProjectsLocationsEnvironmentsDatabaseFailoverCall) Fields(s ...googleapi.Field) *ProjectsLocationsEnvironmentsDatabaseFailoverCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2143,21 +2517,21 @@ func (c *ProjectsLocationsEnvironmentsDeleteCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsEnvironmentsDeleteCall) Context(ctx context.Context) *ProjectsLocationsEnvironmentsDeleteCall { +func (c *ProjectsLocationsEnvironmentsDatabaseFailoverCall) Context(ctx context.Context) *ProjectsLocationsEnvironmentsDatabaseFailoverCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsEnvironmentsDeleteCall) Header() http.Header { +func (c *ProjectsLocationsEnvironmentsDatabaseFailoverCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsEnvironmentsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsEnvironmentsDatabaseFailoverCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2165,22 +2539,164 @@ func (c *ProjectsLocationsEnvironmentsDeleteCall) doRequest(alt string) (*http.R } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.databasefailoverrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+environment}:databaseFailover") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "environment": c.environment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "composer.projects.locations.environments.delete" call. +// Do executes the "composer.projects.locations.environments.databaseFailover" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsEnvironmentsDatabaseFailoverCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Triggers database failover (only for highly resilient environments).", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:databaseFailover", + // "httpMethod": "POST", + // "id": "composer.projects.locations.environments.databaseFailover", + // "parameterOrder": [ + // "environment" + // ], + // "parameters": { + // "environment": { + // "description": "Target environment: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+environment}:databaseFailover", + // "request": { + // "$ref": "DatabaseFailoverRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "composer.projects.locations.environments.delete": + +type ProjectsLocationsEnvironmentsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Delete an environment. +// +// - name: The environment to delete, in the form: +// "projects/{projectId}/locations/{locationId}/environments/{environme +// ntId}". +func (r *ProjectsLocationsEnvironmentsService) Delete(name string) *ProjectsLocationsEnvironmentsDeleteCall { + c := &ProjectsLocationsEnvironmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsEnvironmentsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsEnvironmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsEnvironmentsDeleteCall) Context(ctx context.Context) *ProjectsLocationsEnvironmentsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsEnvironmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsEnvironmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "composer.projects.locations.environments.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at @@ -2206,7 +2722,299 @@ func (c *ProjectsLocationsEnvironmentsDeleteCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Delete an environment.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}", + // "httpMethod": "DELETE", + // "id": "composer.projects.locations.environments.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The environment to delete, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "composer.projects.locations.environments.executeAirflowCommand": + +type ProjectsLocationsEnvironmentsExecuteAirflowCommandCall struct { + s *Service + environment string + executeairflowcommandrequest *ExecuteAirflowCommandRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ExecuteAirflowCommand: Executes Airflow CLI command. +// +// - environment: The resource name of the environment in the form: +// "projects/{projectId}/locations/{locationId}/environments/{environme +// ntId}". +func (r *ProjectsLocationsEnvironmentsService) ExecuteAirflowCommand(environment string, executeairflowcommandrequest *ExecuteAirflowCommandRequest) *ProjectsLocationsEnvironmentsExecuteAirflowCommandCall { + c := &ProjectsLocationsEnvironmentsExecuteAirflowCommandCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.environment = environment + c.executeairflowcommandrequest = executeairflowcommandrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsEnvironmentsExecuteAirflowCommandCall) Fields(s ...googleapi.Field) *ProjectsLocationsEnvironmentsExecuteAirflowCommandCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsEnvironmentsExecuteAirflowCommandCall) Context(ctx context.Context) *ProjectsLocationsEnvironmentsExecuteAirflowCommandCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsEnvironmentsExecuteAirflowCommandCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsEnvironmentsExecuteAirflowCommandCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.executeairflowcommandrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+environment}:executeAirflowCommand") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "environment": c.environment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "composer.projects.locations.environments.executeAirflowCommand" call. +// Exactly one of *ExecuteAirflowCommandResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ExecuteAirflowCommandResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsEnvironmentsExecuteAirflowCommandCall) Do(opts ...googleapi.CallOption) (*ExecuteAirflowCommandResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ExecuteAirflowCommandResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Executes Airflow CLI command.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:executeAirflowCommand", + // "httpMethod": "POST", + // "id": "composer.projects.locations.environments.executeAirflowCommand", + // "parameterOrder": [ + // "environment" + // ], + // "parameters": { + // "environment": { + // "description": "The resource name of the environment in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\".", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+environment}:executeAirflowCommand", + // "request": { + // "$ref": "ExecuteAirflowCommandRequest" + // }, + // "response": { + // "$ref": "ExecuteAirflowCommandResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "composer.projects.locations.environments.fetchDatabaseProperties": + +type ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall struct { + s *Service + environment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// FetchDatabaseProperties: Fetches database properties. +// +// - environment: The resource name of the environment, in the form: +// "projects/{projectId}/locations/{locationId}/environments/{environme +// ntId}". +func (r *ProjectsLocationsEnvironmentsService) FetchDatabaseProperties(environment string) *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall { + c := &ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.environment = environment + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall) Fields(s ...googleapi.Field) *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall) IfNoneMatch(entityTag string) *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall) Context(ctx context.Context) *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+environment}:fetchDatabaseProperties") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "environment": c.environment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "composer.projects.locations.environments.fetchDatabaseProperties" call. +// Exactly one of *FetchDatabasePropertiesResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *FetchDatabasePropertiesResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsEnvironmentsFetchDatabasePropertiesCall) Do(opts ...googleapi.CallOption) (*FetchDatabasePropertiesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &FetchDatabasePropertiesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2218,25 +3026,25 @@ func (c *ProjectsLocationsEnvironmentsDeleteCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Delete an environment.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}", - // "httpMethod": "DELETE", - // "id": "composer.projects.locations.environments.delete", + // "description": "Fetches database properties.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:fetchDatabaseProperties", + // "httpMethod": "GET", + // "id": "composer.projects.locations.environments.fetchDatabaseProperties", // "parameterOrder": [ - // "name" + // "environment" // ], // "parameters": { - // "name": { - // "description": "The environment to delete, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + // "environment": { + // "description": "Required. The resource name of the environment, in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/{+name}", + // "path": "v1/{+environment}:fetchDatabaseProperties", // "response": { - // "$ref": "Operation" + // "$ref": "FetchDatabasePropertiesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -2972,6 +3780,151 @@ func (c *ProjectsLocationsEnvironmentsPatchCall) Do(opts ...googleapi.CallOption } +// method id "composer.projects.locations.environments.pollAirflowCommand": + +type ProjectsLocationsEnvironmentsPollAirflowCommandCall struct { + s *Service + environment string + pollairflowcommandrequest *PollAirflowCommandRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// PollAirflowCommand: Polls Airflow CLI command execution and fetches +// logs. +// +// - environment: The resource name of the environment in the form: +// "projects/{projectId}/locations/{locationId}/environments/{environme +// ntId}". +func (r *ProjectsLocationsEnvironmentsService) PollAirflowCommand(environment string, pollairflowcommandrequest *PollAirflowCommandRequest) *ProjectsLocationsEnvironmentsPollAirflowCommandCall { + c := &ProjectsLocationsEnvironmentsPollAirflowCommandCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.environment = environment + c.pollairflowcommandrequest = pollairflowcommandrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsEnvironmentsPollAirflowCommandCall) Fields(s ...googleapi.Field) *ProjectsLocationsEnvironmentsPollAirflowCommandCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsEnvironmentsPollAirflowCommandCall) Context(ctx context.Context) *ProjectsLocationsEnvironmentsPollAirflowCommandCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsEnvironmentsPollAirflowCommandCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsEnvironmentsPollAirflowCommandCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.pollairflowcommandrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+environment}:pollAirflowCommand") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "environment": c.environment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "composer.projects.locations.environments.pollAirflowCommand" call. +// Exactly one of *PollAirflowCommandResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *PollAirflowCommandResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsEnvironmentsPollAirflowCommandCall) Do(opts ...googleapi.CallOption) (*PollAirflowCommandResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &PollAirflowCommandResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Polls Airflow CLI command execution and fetches logs.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:pollAirflowCommand", + // "httpMethod": "POST", + // "id": "composer.projects.locations.environments.pollAirflowCommand", + // "parameterOrder": [ + // "environment" + // ], + // "parameters": { + // "environment": { + // "description": "The resource name of the environment in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\"", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+environment}:pollAirflowCommand", + // "request": { + // "$ref": "PollAirflowCommandRequest" + // }, + // "response": { + // "$ref": "PollAirflowCommandResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "composer.projects.locations.environments.saveSnapshot": type ProjectsLocationsEnvironmentsSaveSnapshotCall struct { @@ -3119,6 +4072,150 @@ func (c *ProjectsLocationsEnvironmentsSaveSnapshotCall) Do(opts ...googleapi.Cal } +// method id "composer.projects.locations.environments.stopAirflowCommand": + +type ProjectsLocationsEnvironmentsStopAirflowCommandCall struct { + s *Service + environment string + stopairflowcommandrequest *StopAirflowCommandRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// StopAirflowCommand: Stops Airflow CLI command execution. +// +// - environment: The resource name of the environment in the form: +// "projects/{projectId}/locations/{locationId}/environments/{environme +// ntId}". +func (r *ProjectsLocationsEnvironmentsService) StopAirflowCommand(environment string, stopairflowcommandrequest *StopAirflowCommandRequest) *ProjectsLocationsEnvironmentsStopAirflowCommandCall { + c := &ProjectsLocationsEnvironmentsStopAirflowCommandCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.environment = environment + c.stopairflowcommandrequest = stopairflowcommandrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsEnvironmentsStopAirflowCommandCall) Fields(s ...googleapi.Field) *ProjectsLocationsEnvironmentsStopAirflowCommandCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsEnvironmentsStopAirflowCommandCall) Context(ctx context.Context) *ProjectsLocationsEnvironmentsStopAirflowCommandCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsEnvironmentsStopAirflowCommandCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsEnvironmentsStopAirflowCommandCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.stopairflowcommandrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+environment}:stopAirflowCommand") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "environment": c.environment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "composer.projects.locations.environments.stopAirflowCommand" call. +// Exactly one of *StopAirflowCommandResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *StopAirflowCommandResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsEnvironmentsStopAirflowCommandCall) Do(opts ...googleapi.CallOption) (*StopAirflowCommandResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &StopAirflowCommandResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stops Airflow CLI command execution.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/environments/{environmentsId}:stopAirflowCommand", + // "httpMethod": "POST", + // "id": "composer.projects.locations.environments.stopAirflowCommand", + // "parameterOrder": [ + // "environment" + // ], + // "parameters": { + // "environment": { + // "description": "The resource name of the environment in the form: \"projects/{projectId}/locations/{locationId}/environments/{environmentId}\".", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/environments/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+environment}:stopAirflowCommand", + // "request": { + // "$ref": "StopAirflowCommandRequest" + // }, + // "response": { + // "$ref": "StopAirflowCommandResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "composer.projects.locations.imageVersions.list": type ProjectsLocationsImageVersionsListCall struct { @@ -3621,14 +4718,7 @@ type ProjectsLocationsOperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to -// override the binding to use different resource name schemes, such as -// `users/*/operations`. To override the binding, API services can add a -// binding such as "/v1/{name=users/*}/operations" to their service -// configuration. For backwards compatibility, the default name includes -// the operations collection id, however overriding users must ensure -// the name binding is the parent resource, without the operations -// collection id. +// `UNIMPLEMENTED`. // // - name: The name of the operation's parent resource. func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { @@ -3757,7 +4847,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", // "httpMethod": "GET", // "id": "composer.projects.locations.operations.list", diff --git a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-api.json b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-api.json index b380f1a703..3c21bcacaf 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -550,6 +550,56 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "move": { + "description": "Moves the specified address resource.", + "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", + "httpMethod": "POST", + "id": "compute.addresses.move", + "parameterOrder": [ + "project", + "region", + "address" + ], + "parameters": { + "address": { + "description": "Name of the address resource to move.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Source project ID which the Address is moved from.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/addresses/{address}/move", + "request": { + "$ref": "RegionAddressesMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setLabels": { "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", @@ -2211,6 +2261,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "bulkInsert": { + "description": "Bulk create a set of disks.", + "flatPath": "projects/{project}/zones/{zone}/disks/bulkInsert", + "httpMethod": "POST", + "id": "compute.disks.bulkInsert", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/disks/bulkInsert", + "request": { + "$ref": "BulkInsertDiskResource" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "createSnapshot": { "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot", @@ -2709,6 +2801,145 @@ "https://www.googleapis.com/auth/compute" ] }, + "startAsyncReplication": { + "description": "Starts asynchronous replication. Must be invoked on the primary disk.", + "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", + "httpMethod": "POST", + "id": "compute.disks.startAsyncReplication", + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "parameters": { + "disk": { + "description": "The name of the persistent disk.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", + "request": { + "$ref": "DisksStartAsyncReplicationRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "stopAsyncReplication": { + "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", + "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", + "httpMethod": "POST", + "id": "compute.disks.stopAsyncReplication", + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "parameters": { + "disk": { + "description": "The name of the persistent disk.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "stopGroupAsyncReplication": { + "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", + "flatPath": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", + "httpMethod": "POST", + "id": "compute.disks.stopGroupAsyncReplication", + "parameterOrder": [ + "project", + "zone" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request. This must be the zone of the primary or secondary disks in the consistency group.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", + "request": { + "$ref": "DisksStopGroupAsyncReplicationResource" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", @@ -4497,6 +4728,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "move": { + "description": "Moves the specified address resource from one project to another project.", + "flatPath": "projects/{project}/global/addresses/{address}/move", + "httpMethod": "POST", + "id": "compute.globalAddresses.move", + "parameterOrder": [ + "project", + "address" + ], + "parameters": { + "address": { + "description": "Name of the address resource to move.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Source project ID which the Address is moved from.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/addresses/{address}/move", + "request": { + "$ref": "GlobalAddressesMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setLabels": { "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", @@ -10647,48 +10920,6 @@ "required": true, "type": "string" }, - "zone": { - "description": "The name of the zone for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "start": { - "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/start", - "httpMethod": "POST", - "id": "compute.instances.start", - "parameterOrder": [ - "project", - "zone", - "instance" - ], - "parameters": { - "instance": { - "description": "Name of the instance resource to start.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, "requestId": { "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", @@ -10702,7 +10933,7 @@ "type": "string" } }, - "path": "projects/{project}/zones/{zone}/instances/{instance}/start", + "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", "response": { "$ref": "Operation" }, @@ -10711,11 +10942,58 @@ "https://www.googleapis.com/auth/compute" ] }, - "startWithEncryptionKey": { + "start": { "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/start", "httpMethod": "POST", - "id": "compute.instances.startWithEncryptionKey", + "id": "compute.instances.start", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance resource to start.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/start", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "startWithEncryptionKey": { + "description": "Starts an instance that was stopped using the instances().stop method. For more information, see Restart an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + "httpMethod": "POST", + "id": "compute.instances.startWithEncryptionKey", "parameterOrder": [ "project", "zone", @@ -11669,6 +11947,100 @@ } } }, + "interconnectRemoteLocations": { + "methods": { + "get": { + "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", + "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", + "httpMethod": "GET", + "id": "compute.interconnectRemoteLocations.get", + "parameterOrder": [ + "project", + "interconnectRemoteLocation" + ], + "parameters": { + "interconnectRemoteLocation": { + "description": "Name of the interconnect remote location to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", + "response": { + "$ref": "InterconnectRemoteLocation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves the list of interconnect remote locations available to the specified project.", + "flatPath": "projects/{project}/global/interconnectRemoteLocations", + "httpMethod": "GET", + "id": "compute.interconnectRemoteLocations.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/interconnectRemoteLocations", + "response": { + "$ref": "InterconnectRemoteLocationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "interconnects": { "methods": { "delete": { @@ -15712,6 +16084,56 @@ "https://www.googleapis.com/auth/compute" ] }, + "simulateMaintenanceEvent": { + "description": "Simulates maintenance event on specified nodes from the node group.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", + "httpMethod": "POST", + "id": "compute.nodeGroups.simulateMaintenanceEvent", + "parameterOrder": [ + "project", + "zone", + "nodeGroup" + ], + "parameters": { + "nodeGroup": { + "description": "Name of the NodeGroup resource whose nodes will go under maintenance simulation.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", + "request": { + "$ref": "NodeGroupsSimulateMaintenanceEventRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", @@ -18841,6 +19263,48 @@ "https://www.googleapis.com/auth/compute" ] }, + "bulkInsert": { + "description": "Bulk create a set of disks.", + "flatPath": "projects/{project}/regions/{region}/disks/bulkInsert", + "httpMethod": "POST", + "id": "compute.regionDisks.bulkInsert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/bulkInsert", + "request": { + "$ref": "BulkInsertDiskResource" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "createSnapshot": { "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", @@ -19334,6 +19798,145 @@ "https://www.googleapis.com/auth/compute" ] }, + "startAsyncReplication": { + "description": "Starts asynchronous replication. Must be invoked on the primary disk.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", + "httpMethod": "POST", + "id": "compute.regionDisks.startAsyncReplication", + "parameterOrder": [ + "project", + "region", + "disk" + ], + "parameters": { + "disk": { + "description": "The name of the persistent disk.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", + "request": { + "$ref": "RegionDisksStartAsyncReplicationRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "stopAsyncReplication": { + "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", + "httpMethod": "POST", + "id": "compute.regionDisks.stopAsyncReplication", + "parameterOrder": [ + "project", + "region", + "disk" + ], + "parameters": { + "disk": { + "description": "The name of the persistent disk.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "stopGroupAsyncReplication": { + "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", + "flatPath": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", + "httpMethod": "POST", + "id": "compute.regionDisks.stopGroupAsyncReplication", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request. This must be the region of the primary or secondary disks in the consistency group.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", + "request": { + "$ref": "DisksStopGroupAsyncReplicationResource" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "flatPath": "projects/{project}/regions/{region}/disks/{resource}/testIamPermissions", @@ -25956,6 +26559,11 @@ "minimum": "0", "type": "integer" }, + "natName": { + "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + "location": "query", + "type": "string" + }, "orderBy": { "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", @@ -33123,7 +33731,7 @@ } } }, - "revision": "20230307", + "revision": "20230620", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -33240,6 +33848,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33269,6 +33878,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -33360,6 +33970,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33389,6 +34000,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -33462,6 +34074,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33491,6 +34104,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -33543,11 +34157,11 @@ "id": "AccessConfig", "properties": { "externalIpv6": { - "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", + "description": "Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", "type": "string" }, "externalIpv6PrefixLength": { - "description": "The prefix length of the external IPv6 range.", + "description": "Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range.", "format": "int32", "type": "integer" }, @@ -33557,11 +34171,11 @@ "type": "string" }, "name": { - "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", + "description": "The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6.", "type": "string" }, "natIP": { - "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "description": "Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", "type": "string" }, "networkTier": { @@ -33589,8 +34203,7 @@ "type": "boolean" }, "type": { - "default": "ONE_TO_ONE_NAT", - "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", + "description": "The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6.", "enum": [ "DIRECT_IPV6", "ONE_TO_ONE_NAT" @@ -33640,7 +34253,7 @@ "type": "string" }, "ipVersion": { - "description": "The IP version that will be used by this address. Valid options are IPV4 or IPV6. This can only be specified for a global address.", + "description": "The IP version that will be used by this address. Valid options are IPV4 or IPV6.", "enum": [ "IPV4", "IPV6", @@ -33670,6 +34283,18 @@ "description": "[Output Only] Type of the resource. Always compute#address for addresses.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this Address, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Address.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "name": { "annotations": { "required": [ @@ -33816,6 +34441,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33845,6 +34471,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -33936,6 +34563,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33965,6 +34593,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -34038,6 +34667,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34067,6 +34697,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -34357,6 +34988,18 @@ ], "type": "string" }, + "savedState": { + "description": "For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api.", + "enum": [ + "DISK_SAVED_STATE_UNSPECIFIED", + "PRESERVED" + ], + "enumDescriptions": [ + "*[Default]* Disk state has not been preserved.", + "Disk state has been preserved." + ], + "type": "string" + }, "shieldedInstanceInitialState": { "$ref": "InitialStateConfig", "description": "[Output Only] shielded vm initial state stored on disk" @@ -34448,6 +35091,18 @@ "format": "int64", "type": "string" }, + "provisionedThroughput": { + "description": "Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.", + "format": "int64", + "type": "string" + }, + "replicaZones": { + "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", + "items": { + "type": "string" + }, + "type": "array" + }, "resourceManagerTags": { "additionalProperties": { "type": "string" @@ -34706,6 +35361,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34735,6 +35391,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -34826,6 +35483,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34855,6 +35513,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -34986,6 +35645,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -35015,6 +35675,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -35067,7 +35728,7 @@ "id": "AutoscalingPolicy", "properties": { "coolDownPeriodSec": { - "description": "The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", + "description": "The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process.", "format": "int32", "type": "integer" }, @@ -35097,7 +35758,7 @@ "type": "integer" }, "mode": { - "description": "Defines operating mode for this policy.", + "description": "Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see \"Turning off or restricting an autoscaler\"", "enum": [ "OFF", "ON", @@ -35565,6 +36226,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -35594,6 +36256,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -35806,6 +36469,13 @@ "$ref": "Duration", "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." }, + "metadatas": { + "additionalProperties": { + "type": "string" + }, + "description": "Deployment metadata associated with the resource to be set by a GKE hub controller and read by the backend RCTH", + "type": "object" + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -35962,6 +36632,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -35991,6 +36662,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -36303,6 +36975,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -36332,6 +37005,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -36510,6 +37184,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -36539,6 +37214,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -36855,6 +37531,17 @@ }, "type": "object" }, + "BulkInsertDiskResource": { + "description": "A transient resource used in compute.disks.bulkInsert and compute.regionDisks.bulkInsert. It is only used to process requests and is not persisted.", + "id": "BulkInsertDiskResource", + "properties": { + "sourceConsistencyGroupPolicy": { + "description": "The URL of the DiskConsistencyGroupPolicy for the group of disks to clone. This may be a full or partial URL, such as: - https://www.googleapis.com/compute/v1/projects/project/regions/region /resourcePolicies/resourcePolicy - projects/project/regions/region/resourcePolicies/resourcePolicy - regions/region/resourcePolicies/resourcePolicy ", + "type": "string" + } + }, + "type": "object" + }, "BulkInsertInstanceResource": { "description": "A transient resource used in compute.instances.bulkInsert and compute.regionInstances.bulkInsert . This resource is not persisted anywhere, it is used only for processing the requests.", "id": "BulkInsertInstanceResource", @@ -37095,7 +37782,7 @@ "type": "string" }, "splitSourceCommitment": { - "description": "Source commitment to be splitted into a new commitment.", + "description": "Source commitment to be split into a new commitment.", "type": "string" }, "startTimestamp": { @@ -37136,6 +37823,7 @@ "GENERAL_PURPOSE_N2", "GENERAL_PURPOSE_N2D", "GENERAL_PURPOSE_T2D", + "GRAPHICS_OPTIMIZED", "MEMORY_OPTIMIZED", "MEMORY_OPTIMIZED_M3", "TYPE_UNSPECIFIED" @@ -37152,6 +37840,7 @@ "", "", "", + "", "" ], "type": "string" @@ -37210,6 +37899,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -37239,6 +37929,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -37330,6 +38021,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -37359,6 +38051,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -37432,6 +38125,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -37461,6 +38155,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -37709,7 +38404,7 @@ "id": "CustomerEncryptionKey", "properties": { "kmsKeyName": { - "description": "The name of the encryption key that is stored in Google Cloud KMS. For example: \"kmsKeyName\": \"projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key ", + "description": "The name of the encryption key that is stored in Google Cloud KMS. For example: \"kmsKeyName\": \"projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key The fully-qualifed key name may be returned for resource GET requests. For example: \"kmsKeyName\": \"projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeyVersions/1 ", "type": "string" }, "kmsKeyServiceAccount": { @@ -37802,6 +38497,17 @@ ], "type": "string" }, + "asyncPrimaryDisk": { + "$ref": "DiskAsyncReplication", + "description": "Disk asynchronously replicated into this disk." + }, + "asyncSecondaryDisks": { + "additionalProperties": { + "$ref": "DiskAsyncReplicationList" + }, + "description": "[Output Only] A list of disks this disk is asynchronously replicated to.", + "type": "object" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -37898,6 +38604,11 @@ "format": "int64", "type": "string" }, + "provisionedThroughput": { + "description": "Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.", + "format": "int64", + "type": "string" + }, "region": { "description": "[Output Only] URL of the region where the disk resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" @@ -37916,6 +38627,10 @@ }, "type": "array" }, + "resourceStatus": { + "$ref": "DiskResourceStatus", + "description": "[Output Only] Status information for the disk resource." + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -37929,6 +38644,14 @@ "format": "int64", "type": "string" }, + "sourceConsistencyGroupPolicy": { + "description": "[Output Only] URL of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", + "type": "string" + }, + "sourceConsistencyGroupPolicyId": { + "description": "[Output Only] ID of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", + "type": "string" + }, "sourceDisk": { "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", "type": "string" @@ -38052,6 +38775,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38081,6 +38805,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -38128,6 +38853,37 @@ }, "type": "object" }, + "DiskAsyncReplication": { + "id": "DiskAsyncReplication", + "properties": { + "consistencyGroupPolicy": { + "description": "[Output Only] URL of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", + "type": "string" + }, + "consistencyGroupPolicyId": { + "description": "[Output Only] ID of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", + "type": "string" + }, + "disk": { + "description": "The other disk asynchronously replicated to or from the current disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", + "type": "string" + }, + "diskId": { + "description": "[Output Only] The unique ID of the other disk asynchronously replicated to or from the current disk. This value identifies the exact disk that was used to create this replication. For example, if you started replicating the persistent disk from a disk that was later deleted and recreated under the same name, the disk ID would identify the exact version of the disk that was used.", + "type": "string" + } + }, + "type": "object" + }, + "DiskAsyncReplicationList": { + "id": "DiskAsyncReplicationList", + "properties": { + "asyncReplicationDisk": { + "$ref": "DiskAsyncReplication" + } + }, + "type": "object" + }, "DiskInstantiationConfig": { "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", "id": "DiskInstantiationConfig", @@ -38213,6 +38969,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38242,6 +38999,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -38317,6 +39075,47 @@ }, "type": "object" }, + "DiskResourceStatus": { + "id": "DiskResourceStatus", + "properties": { + "asyncPrimaryDisk": { + "$ref": "DiskResourceStatusAsyncReplicationStatus" + }, + "asyncSecondaryDisks": { + "additionalProperties": { + "$ref": "DiskResourceStatusAsyncReplicationStatus" + }, + "description": "Key: disk, value: AsyncReplicationStatus message", + "type": "object" + } + }, + "type": "object" + }, + "DiskResourceStatusAsyncReplicationStatus": { + "id": "DiskResourceStatusAsyncReplicationStatus", + "properties": { + "state": { + "enum": [ + "ACTIVE", + "CREATED", + "STARTING", + "STATE_UNSPECIFIED", + "STOPPED", + "STOPPING" + ], + "enumDescriptions": [ + "Replication is active.", + "Secondary disk is created and is waiting for replication to start.", + "Replication is starting.", + "", + "Replication is stopped.", + "Replication is stopping." + ], + "type": "string" + } + }, + "type": "object" + }, "DiskType": { "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", "id": "DiskType", @@ -38423,6 +39222,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38452,6 +39252,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -38543,6 +39344,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38572,6 +39374,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -38645,6 +39448,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38674,6 +39478,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -38784,6 +39589,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38813,6 +39619,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -38860,6 +39667,27 @@ }, "type": "object" }, + "DisksStartAsyncReplicationRequest": { + "id": "DisksStartAsyncReplicationRequest", + "properties": { + "asyncSecondaryDisk": { + "description": "The secondary disk to start asynchronous replication to. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", + "type": "string" + } + }, + "type": "object" + }, + "DisksStopGroupAsyncReplicationResource": { + "description": "A transient resource used in compute.disks.stopGroupAsyncReplication and compute.regionDisks.stopGroupAsyncReplication. It is only used to process requests and is not persisted.", + "id": "DisksStopGroupAsyncReplicationResource", + "properties": { + "resourcePolicy": { + "description": "The URL of the DiskConsistencyGroupPolicy for the group of disks to stop. This may be a full or partial URL, such as: - https://www.googleapis.com/compute/v1/projects/project/regions/region /resourcePolicies/resourcePolicy - projects/project/regions/region/resourcePolicies/resourcePolicy - regions/region/resourcePolicies/resourcePolicy ", + "type": "string" + } + }, + "type": "object" + }, "DisplayDevice": { "description": "A set of Display Device options", "id": "DisplayDevice", @@ -39032,6 +39860,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39061,6 +39890,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -39266,6 +40096,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39295,6 +40126,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -39562,6 +40394,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39591,6 +40424,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -39823,6 +40657,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39852,6 +40687,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -39982,6 +40818,20 @@ "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", "id": "FirewallPolicyRuleMatcher", "properties": { + "destAddressGroups": { + "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.", + "items": { + "type": "string" + }, + "type": "array" + }, + "destFqdns": { + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.", + "items": { + "type": "string" + }, + "type": "array" + }, "destIpRanges": { "description": "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", "items": { @@ -39989,6 +40839,20 @@ }, "type": "array" }, + "destRegionCodes": { + "description": "Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of dest region codes allowed is 5000.", + "items": { + "type": "string" + }, + "type": "array" + }, + "destThreatIntelligences": { + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.", + "items": { + "type": "string" + }, + "type": "array" + }, "layer4Configs": { "description": "Pairs of IP protocols and ports that the rule should match.", "items": { @@ -39996,6 +40860,20 @@ }, "type": "array" }, + "srcAddressGroups": { + "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.", + "items": { + "type": "string" + }, + "type": "array" + }, + "srcFqdns": { + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.", + "items": { + "type": "string" + }, + "type": "array" + }, "srcIpRanges": { "description": "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", "items": { @@ -40003,12 +40881,26 @@ }, "type": "array" }, + "srcRegionCodes": { + "description": "Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of source region codes allowed is 5000.", + "items": { + "type": "string" + }, + "type": "array" + }, "srcSecureTags": { "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", "items": { "$ref": "FirewallPolicyRuleSecureTag" }, "type": "array" + }, + "srcThreatIntelligences": { + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -40110,7 +41002,11 @@ "type": "boolean" }, "allowGlobalAccess": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If the field is set to TRUE, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", + "type": "boolean" + }, + "allowPscGlobalAccess": { + "description": "This is used in PSC consumer ForwardingRule to control whether the PSC endpoint can be accessed from another region.", "type": "boolean" }, "backendService": { @@ -40207,7 +41103,7 @@ "type": "string" }, "network": { - "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -40358,6 +41254,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40387,6 +41284,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -40478,6 +41376,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40507,6 +41406,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -40608,6 +41508,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40637,6 +41538,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -40717,6 +41619,20 @@ }, "type": "object" }, + "GlobalAddressesMoveRequest": { + "id": "GlobalAddressesMoveRequest", + "properties": { + "description": { + "description": "An optional destination address description if intended to be different from the source.", + "type": "string" + }, + "destinationAddress": { + "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project /global/addresses/address - projects/project/global/addresses/address Note that destination project must be different from the source project. So /global/addresses/address is not valid partial url.", + "type": "string" + } + }, + "type": "object" + }, "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", "properties": { @@ -40874,13 +41790,14 @@ "id": "GuestOsFeature", "properties": { "type": { - "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling guest operating system features.", + "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", "enum": [ "FEATURE_TYPE_UNSPECIFIED", "GVNIC", "MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", + "SEV_LIVE_MIGRATABLE", "SEV_SNP_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", @@ -40895,6 +41812,7 @@ "", "", "", + "", "" ], "type": "string" @@ -41062,7 +41980,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -41207,6 +42125,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -41236,6 +42155,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -41433,6 +42353,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -41462,6 +42383,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -41560,6 +42482,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -41589,6 +42512,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -41662,6 +42586,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -41691,6 +42616,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -41757,7 +42683,7 @@ "type": "string" }, "healthState": { - "description": "Health state of the instance.", + "description": "Health state of the IPv4 address of the instance.", "enum": [ "HEALTHY", "UNHEALTHY" @@ -41830,10 +42756,10 @@ "UNKNOWN" ], "enumDescriptions": [ - "", - "", - "", - "" + "Endpoint is being drained.", + "Endpoint is healthy.", + "Endpoint is unhealthy.", + "Health status of the endpoint is unknown." ], "type": "string" } @@ -42143,6 +43069,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42172,6 +43099,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -42416,6 +43344,10 @@ }, "type": "array" }, + "pathTemplateMatch": { + "description": "If specified, the route is a pattern match expression that must match the :path header once the query string is removed. A pattern match allows you to match - The value must be between 1 and 1024 characters - The pattern must start with a leading slash (\"/\") - There may be no more than 5 operators in pattern Precisely one of prefix_match, full_path_match, regex_match or path_template_match must be set.", + "type": "string" + }, "prefixMatch": { "description": "For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified.", "type": "string" @@ -42545,6 +43477,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42574,6 +43507,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -42887,6 +43821,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42916,6 +43851,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -43058,6 +43994,10 @@ "format": "uint64", "type": "string" }, + "instanceEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts suspended data for an instance with a customer-managed encryption key. If you are creating a new instance, this field will encrypt the local SSD and in-memory contents of the instance during the suspend operation. If you do not provide an encryption key when creating the instance, then the local SSD and in-memory contents will be encrypted using an automatically generated key during the suspend operation." + }, "keyRevocationActionType": { "description": "KeyRevocationActionType of the instance. Supported options are \"STOP\" and \"NONE\". The default value is \"NONE\" if it is not specified.", "enum": [ @@ -43218,7 +44158,7 @@ "TERMINATED" ], "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", "Resources are being allocated for the instance.", "The instance is in repair.", "The instance is running.", @@ -43297,6 +44237,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43326,6 +44267,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -43536,6 +44478,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43565,6 +44508,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -43656,6 +44600,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43685,6 +44630,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -43996,6 +44942,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44025,6 +44972,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -44080,7 +45028,7 @@ "type": "string" }, "initialDelaySec": { - "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", + "description": "The initial delay is the number of seconds that a new VM takes to initialize and run its startup script. During a VM's initial delay period, the MIG ignores unsuccessful health checks because the VM might be in the startup process. This prevents the MIG from prematurely recreating a VM. If the health check receives a healthy response during the initial delay, it indicates that the startup process is complete and the VM is ready. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", "format": "int32", "type": "integer" } @@ -44131,6 +45079,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44160,6 +45109,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -44296,14 +45246,14 @@ ], "enumDescriptions": [ "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "Do not stop the instance.", + "(Default.) Replace the instance according to the replacement method option.", + "Stop the instance and start it again." ], "type": "string" }, "mostDisruptiveAllowedAction": { - "description": "Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all.", + "description": "Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to avoid restarting the VM and to limit disruption as much as possible. RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all.", "enum": [ "NONE", "REFRESH", @@ -44312,9 +45262,9 @@ ], "enumDescriptions": [ "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "Do not stop the instance.", + "(Default.) Replace the instance according to the replacement method option.", + "Stop the instance and start it again." ], "type": "string" }, @@ -44392,7 +45342,7 @@ "type": "array" }, "minimalAction": { - "description": "The minimal action that you want to perform on each instance during the update: - REPLACE: At minimum, delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt the instance at all. By default, the minimum action is NONE. If your update requires a more disruptive action than you set with this flag, the necessary action is performed to execute the update.", + "description": "The minimal action that you want to perform on each instance during the update: - REPLACE: At minimum, delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance and limit disruption as much as possible. - NONE: Do not disrupt the instance at all. By default, the minimum action is NONE. If your update requires a more disruptive action than you set with this flag, the necessary action is performed to execute the update.", "enum": [ "NONE", "REFRESH", @@ -44401,14 +45351,14 @@ ], "enumDescriptions": [ "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "Do not stop the instance.", + "(Default.) Replace the instance according to the replacement method option.", + "Stop the instance and start it again." ], "type": "string" }, "mostDisruptiveAllowedAction": { - "description": "The most disruptive action that you want to perform on each instance during the update: - REPLACE: Delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt the instance at all. By default, the most disruptive allowed action is REPLACE. If your update requires a more disruptive action than you set with this flag, the update request will fail.", + "description": "The most disruptive action that you want to perform on each instance during the update: - REPLACE: Delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance and limit disruption as much as possible. - NONE: Do not disrupt the instance at all. By default, the most disruptive allowed action is REPLACE. If your update requires a more disruptive action than you set with this flag, the update request will fail.", "enum": [ "NONE", "REFRESH", @@ -44417,9 +45367,9 @@ ], "enumDescriptions": [ "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "Do not stop the instance.", + "(Default.) Replace the instance according to the replacement method option.", + "Stop the instance and start it again." ], "type": "string" } @@ -44444,7 +45394,7 @@ "id": "InstanceGroupManagersDeleteInstancesRequest", "properties": { "instances": { - "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have URL and can be deleted only by name. One cannot specify both URLs and names in a single request.", "items": { "type": "string" }, @@ -44535,6 +45485,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44564,6 +45515,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -44664,6 +45616,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44693,6 +45646,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -44838,6 +45792,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44867,6 +45822,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -44971,6 +45927,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -45000,6 +45957,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -45109,6 +46067,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -45138,6 +46097,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -45229,6 +46189,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -45258,6 +46219,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -45659,6 +46621,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -45688,6 +46651,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -45779,6 +46743,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -45808,6 +46773,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -45881,6 +46847,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -45910,6 +46877,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -45986,7 +46954,7 @@ "TERMINATED" ], "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", "Resources are being allocated for the instance.", "The instance is in repair.", "The instance is running.", @@ -46115,6 +47083,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -46144,6 +47113,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -46303,7 +47273,7 @@ "type": "object" }, "Interconnect": { - "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the Google Cloud network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", "id": "Interconnect", "properties": { "adminEnabled": { @@ -46375,6 +47345,18 @@ "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Interconnect.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "linkType": { "description": "Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", "enum": [ @@ -46426,6 +47408,10 @@ "format": "int32", "type": "integer" }, + "remoteLocation": { + "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", + "type": "string" + }, "requestedLinkCount": { "description": "Target number of physical links in the link bundle, as requested by the customer.", "format": "int32", @@ -46520,6 +47506,10 @@ "description": "This field is not available.", "type": "string" }, + "configurationConstraints": { + "$ref": "InterconnectAttachmentConfigurationConstraints", + "description": "[Output Only] Constraints for this attachment, if any. The attachment does not work if these constraints are not met." + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -46585,7 +47575,7 @@ "type": "string" }, "ipsecInternalAddresses": { - "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", + "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool.", "items": { "type": "string" }, @@ -46596,6 +47586,18 @@ "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this InterconnectAttachment, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an InterconnectAttachment.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "mtu": { "description": "Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect attachment. Only 1440 and 1500 are allowed. If not specified, the value will default to 1440.", "format": "int32", @@ -46639,6 +47641,10 @@ "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, + "remoteService": { + "description": "[Output Only] If the attachment is on a Cross-Cloud Interconnect connection, this field contains the interconnect's remote location service provider. Example values: \"Amazon Web Services\" \"Microsoft Azure\". The field is set only for attachments on Cross-Cloud Interconnect connections. Its value is copied from the InterconnectRemoteLocation remoteService field.", + "type": "string" + }, "router": { "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", "type": "string" @@ -46685,6 +47691,11 @@ ], "type": "string" }, + "subnetLength": { + "description": "Length of the IPv4 subnet mask. Allowed values: - 29 (default) - 30 The default value is 29, except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure remote location fall into this category. In these cases, the default value is 30, and requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it gives Google Cloud Support more debugging visibility. ", + "format": "int32", + "type": "integer" + }, "type": { "description": "The type of interconnect attachment this is, which can take one of the following values: - DEDICATED: an attachment to a Dedicated Interconnect. - PARTNER: an attachment to a Partner Interconnect, created by the customer. - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. ", "enum": [ @@ -46758,6 +47769,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -46787,6 +47799,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -46834,6 +47847,47 @@ }, "type": "object" }, + "InterconnectAttachmentConfigurationConstraints": { + "id": "InterconnectAttachmentConfigurationConstraints", + "properties": { + "bgpMd5": { + "description": "[Output Only] Whether the attachment's BGP session requires/allows/disallows BGP MD5 authentication. This can take one of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. For example, a Cross-Cloud Interconnect connection to a remote cloud provider that requires BGP MD5 authentication has the interconnectRemoteLocation attachment_configuration_constraints.bgp_md5 field set to MD5_REQUIRED, and that property is propagated to the attachment. Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 is requested.", + "enum": [ + "MD5_OPTIONAL", + "MD5_REQUIRED", + "MD5_UNSUPPORTED" + ], + "enumDescriptions": [ + "MD5_OPTIONAL: BGP MD5 authentication is supported and can optionally be configured.", + "MD5_REQUIRED: BGP MD5 authentication must be configured.", + "MD5_UNSUPPORTED: BGP MD5 authentication must not be configured" + ], + "type": "string" + }, + "bgpPeerAsnRanges": { + "description": "[Output Only] List of ASN ranges that the remote location is known to support. Formatted as an array of inclusive ranges {min: min-value, max: max-value}. For example, [{min: 123, max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or anything in the range 64512-65534. This field is only advisory. Although the API accepts other ranges, these are the ranges that we recommend.", + "items": { + "$ref": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange" + }, + "type": "array" + } + }, + "type": "object" + }, + "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange": { + "id": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange", + "properties": { + "max": { + "format": "uint32", + "type": "integer" + }, + "min": { + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, "InterconnectAttachmentList": { "description": "Response to the list request, and contains a list of interconnect attachments.", "id": "InterconnectAttachmentList", @@ -46878,6 +47932,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -46907,6 +47962,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -47011,6 +48067,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47040,6 +48097,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -47313,6 +48371,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47342,6 +48401,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -47455,10 +48515,366 @@ "type": "string" }, "kind": { - "default": "compute#interconnectLocation", - "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", + "default": "compute#interconnectLocation", + "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", + "type": "string" + }, + "name": { + "description": "[Output Only] Name of the resource.", + "type": "string" + }, + "peeringdbFacilityId": { + "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", + "type": "string" + }, + "regionInfos": { + "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", + "items": { + "$ref": "InterconnectLocationRegionInfo" + }, + "type": "array" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "status": { + "description": "[Output Only] The status of this InterconnectLocation, which can take one of the following values: - CLOSED: The InterconnectLocation is closed and is unavailable for provisioning new Interconnects. - AVAILABLE: The InterconnectLocation is available for provisioning new Interconnects. ", + "enum": [ + "AVAILABLE", + "CLOSED" + ], + "enumDescriptions": [ + "The InterconnectLocation is available for provisioning new Interconnects.", + "The InterconnectLocation is closed for provisioning new Interconnects." + ], + "type": "string" + }, + "supportsPzs": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + } + }, + "type": "object" + }, + "InterconnectLocationList": { + "description": "Response to the list request, and contains a list of interconnect locations.", + "id": "InterconnectLocationList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of InterconnectLocation resources.", + "items": { + "$ref": "InterconnectLocation" + }, + "type": "array" + }, + "kind": { + "default": "compute#interconnectLocationList", + "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "InterconnectLocationRegionInfo": { + "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", + "id": "InterconnectLocationRegionInfo", + "properties": { + "expectedRttMs": { + "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", + "format": "int64", + "type": "string" + }, + "locationPresence": { + "description": "Identifies the network presence of this location.", + "enum": [ + "GLOBAL", + "LOCAL_REGION", + "LP_GLOBAL", + "LP_LOCAL_REGION" + ], + "enumDescriptions": [ + "This region is not in any common network presence with this InterconnectLocation.", + "This region shares the same regional network presence as this InterconnectLocation.", + "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", + "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." + ], + "type": "string" + }, + "region": { + "description": "URL for the region of this location.", + "type": "string" + } + }, + "type": "object" + }, + "InterconnectOutageNotification": { + "description": "Description of a planned outage on this Interconnect.", + "id": "InterconnectOutageNotification", + "properties": { + "affectedCircuits": { + "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", + "items": { + "type": "string" + }, + "type": "array" + }, + "description": { + "description": "A description about the purpose of the outage.", + "type": "string" + }, + "endTime": { + "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", + "format": "int64", + "type": "string" + }, + "issueType": { + "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", + "enum": [ + "IT_OUTAGE", + "IT_PARTIAL_OUTAGE", + "OUTAGE", + "PARTIAL_OUTAGE" + ], + "enumDescriptions": [ + "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", + "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", + "The Interconnect may be completely out of service for some or all of the specified window.", + "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." + ], + "type": "string" + }, + "name": { + "description": "Unique identifier for this outage notification.", + "type": "string" + }, + "source": { + "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", + "enum": [ + "GOOGLE", + "NSRC_GOOGLE" + ], + "enumDescriptions": [ + "This notification was generated by Google.", + "[Deprecated] This notification was generated by Google." + ], + "type": "string" + }, + "startTime": { + "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", + "format": "int64", + "type": "string" + }, + "state": { + "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", + "enum": [ + "ACTIVE", + "CANCELLED", + "COMPLETED", + "NS_ACTIVE", + "NS_CANCELED" + ], + "enumDescriptions": [ + "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", + "The outage associated with this notification was cancelled before the outage was due to start.", + "The outage associated with this notification is complete.", + "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", + "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." + ], + "type": "string" + } + }, + "type": "object" + }, + "InterconnectRemoteLocation": { + "description": "Represents a Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", + "id": "InterconnectRemoteLocation", + "properties": { + "address": { + "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", + "type": "string" + }, + "attachmentConfigurationConstraints": { + "$ref": "InterconnectAttachmentConfigurationConstraints", + "description": "[Output Only] Subset of fields from InterconnectAttachment's |configurationConstraints| field that apply to all attachments for this remote location." + }, + "city": { + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", + "type": "string" + }, + "constraints": { + "$ref": "InterconnectRemoteLocationConstraints", + "description": "[Output Only] Constraints on the parameters for creating Cross-Cloud Interconnect and associated InterconnectAttachments." + }, + "continent": { + "description": "[Output Only] Continent for this location, which can take one of the following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - SOUTH_AMERICA ", + "enum": [ + "AFRICA", + "ASIA_PAC", + "EUROPE", + "NORTH_AMERICA", + "SOUTH_AMERICA" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "[Output Only] An optional description of the resource.", + "type": "string" + }, + "facilityProvider": { + "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", + "type": "string" + }, + "facilityProviderFacilityId": { + "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#interconnectRemoteLocation", + "description": "[Output Only] Type of the resource. Always compute#interconnectRemoteLocation for interconnect remote locations.", + "type": "string" + }, + "lacp": { + "description": "[Output Only] Link Aggregation Control Protocol (LACP) constraints, which can take one of the following values: LACP_SUPPORTED, LACP_UNSUPPORTED", + "enum": [ + "LACP_SUPPORTED", + "LACP_UNSUPPORTED" + ], + "enumDescriptions": [ + "LACP_SUPPORTED: LACP is supported, and enabled by default on the Cross-Cloud Interconnect.", + "LACP_UNSUPPORTED: LACP is not supported and is not be enabled on this port. GetDiagnostics shows bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." + ], "type": "string" }, + "maxLagSize100Gbps": { + "description": "[Output Only] The maximum number of 100 Gbps ports supported in a link aggregation group (LAG). When linkType is 100 Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps.", + "format": "int32", + "type": "integer" + }, + "maxLagSize10Gbps": { + "description": "[Output Only] The maximum number of 10 Gbps ports supported in a link aggregation group (LAG). When linkType is 10 Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps.", + "format": "int32", + "type": "integer" + }, "name": { "description": "[Output Only] Name of the resource.", "type": "string" @@ -47467,58 +48883,106 @@ "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", "type": "string" }, - "regionInfos": { - "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", + "permittedConnections": { + "description": "[Output Only] Permitted connections.", "items": { - "$ref": "InterconnectLocationRegionInfo" + "$ref": "InterconnectRemoteLocationPermittedConnections" }, "type": "array" }, + "remoteService": { + "description": "[Output Only] Indicates the service provider present at the remote location. Example values: \"Amazon Web Services\", \"Microsoft Azure\".", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, "status": { - "description": "[Output Only] The status of this InterconnectLocation, which can take one of the following values: - CLOSED: The InterconnectLocation is closed and is unavailable for provisioning new Interconnects. - AVAILABLE: The InterconnectLocation is available for provisioning new Interconnects. ", + "description": "[Output Only] The status of this InterconnectRemoteLocation, which can take one of the following values: - CLOSED: The InterconnectRemoteLocation is closed and is unavailable for provisioning new Cross-Cloud Interconnects. - AVAILABLE: The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects. ", "enum": [ "AVAILABLE", "CLOSED" ], "enumDescriptions": [ - "The InterconnectLocation is available for provisioning new Interconnects.", - "The InterconnectLocation is closed for provisioning new Interconnects." + "The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects.", + "The InterconnectRemoteLocation is closed for provisioning new Cross-Cloud Interconnects." + ], + "type": "string" + } + }, + "type": "object" + }, + "InterconnectRemoteLocationConstraints": { + "id": "InterconnectRemoteLocationConstraints", + "properties": { + "portPairRemoteLocation": { + "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI requires that both ports use the same remote location.", + "enum": [ + "PORT_PAIR_MATCHING_REMOTE_LOCATION", + "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" + ], + "enumDescriptions": [ + "If PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider allocates ports in pairs, and the user should choose the same remote location for both ports.", + "If PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision a redundant pair of Cross-Cloud Interconnects using two different remote locations in the same city." ], "type": "string" }, - "supportsPzs": { - "description": "[Output Only] Reserved for future use.", - "type": "boolean" + "portPairVlan": { + "description": "[Output Only] Port pair VLAN constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, PORT_PAIR_MATCHING_VLAN", + "enum": [ + "PORT_PAIR_MATCHING_VLAN", + "PORT_PAIR_UNCONSTRAINED_VLAN" + ], + "enumDescriptions": [ + "If PORT_PAIR_MATCHING_VLAN, the Interconnect for this attachment is part of a pair of ports that should have matching VLAN allocations. This occurs with Cross-Cloud Interconnect to Azure remote locations. While GCP's API does not explicitly group pairs of ports, the UI uses this field to ensure matching VLAN ids when configuring a redundant VLAN pair.", + "PORT_PAIR_UNCONSTRAINED_VLAN means there is no constraint." + ], + "type": "string" + }, + "subnetLengthRange": { + "$ref": "InterconnectRemoteLocationConstraintsSubnetLengthRange", + "description": "[Output Only] [min-length, max-length] The minimum and maximum value (inclusive) for the IPv4 subnet length. For example, an interconnectRemoteLocation for Azure has {min: 30, max: 30} because Azure requires /30 subnets. This range specifies the values supported by both cloud providers. Interconnect currently supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no constraint on IPv4 subnet length, the range would thus be {min: 29, max: 30}. " } }, "type": "object" }, - "InterconnectLocationList": { - "description": "Response to the list request, and contains a list of interconnect locations.", - "id": "InterconnectLocationList", + "InterconnectRemoteLocationConstraintsSubnetLengthRange": { + "id": "InterconnectRemoteLocationConstraintsSubnetLengthRange", + "properties": { + "max": { + "format": "int32", + "type": "integer" + }, + "min": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "InterconnectRemoteLocationList": { + "description": "Response to the list request, and contains a list of interconnect remote locations.", + "id": "InterconnectRemoteLocationList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of InterconnectLocation resources.", + "description": "A list of InterconnectRemoteLocation resources.", "items": { - "$ref": "InterconnectLocation" + "$ref": "InterconnectRemoteLocation" }, "type": "array" }, "kind": { - "default": "compute#interconnectLocationList", - "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", + "default": "compute#interconnectRemoteLocationList", + "description": "[Output Only] Type of resource. Always compute#interconnectRemoteLocationList for lists of interconnect remote locations.", "type": "string" }, "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "description": "[Output Only] This token lets you get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, "selfLink": { @@ -47541,6 +49005,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47570,6 +49035,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -47617,111 +49083,11 @@ }, "type": "object" }, - "InterconnectLocationRegionInfo": { - "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", - "id": "InterconnectLocationRegionInfo", + "InterconnectRemoteLocationPermittedConnections": { + "id": "InterconnectRemoteLocationPermittedConnections", "properties": { - "expectedRttMs": { - "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", - "format": "int64", - "type": "string" - }, - "locationPresence": { - "description": "Identifies the network presence of this location.", - "enum": [ - "GLOBAL", - "LOCAL_REGION", - "LP_GLOBAL", - "LP_LOCAL_REGION" - ], - "enumDescriptions": [ - "This region is not in any common network presence with this InterconnectLocation.", - "This region shares the same regional network presence as this InterconnectLocation.", - "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", - "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." - ], - "type": "string" - }, - "region": { - "description": "URL for the region of this location.", - "type": "string" - } - }, - "type": "object" - }, - "InterconnectOutageNotification": { - "description": "Description of a planned outage on this Interconnect.", - "id": "InterconnectOutageNotification", - "properties": { - "affectedCircuits": { - "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", - "items": { - "type": "string" - }, - "type": "array" - }, - "description": { - "description": "A description about the purpose of the outage.", - "type": "string" - }, - "endTime": { - "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", - "format": "int64", - "type": "string" - }, - "issueType": { - "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", - "enum": [ - "IT_OUTAGE", - "IT_PARTIAL_OUTAGE", - "OUTAGE", - "PARTIAL_OUTAGE" - ], - "enumDescriptions": [ - "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", - "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", - "The Interconnect may be completely out of service for some or all of the specified window.", - "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." - ], - "type": "string" - }, - "name": { - "description": "Unique identifier for this outage notification.", - "type": "string" - }, - "source": { - "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", - "enum": [ - "GOOGLE", - "NSRC_GOOGLE" - ], - "enumDescriptions": [ - "This notification was generated by Google.", - "[Deprecated] This notification was generated by Google." - ], - "type": "string" - }, - "startTime": { - "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", - "format": "int64", - "type": "string" - }, - "state": { - "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", - "enum": [ - "ACTIVE", - "CANCELLED", - "COMPLETED", - "NS_ACTIVE", - "NS_CANCELED" - ], - "enumDescriptions": [ - "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", - "The outage associated with this notification was cancelled before the outage was due to start.", - "The outage associated with this notification is complete.", - "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", - "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." - ], + "interconnectLocation": { + "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", "type": "string" } }, @@ -47948,6 +49314,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47977,6 +49344,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -48374,6 +49742,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48403,6 +49772,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -48464,7 +49834,7 @@ "type": "integer" }, "guestAcceleratorType": { - "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", + "description": "The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.", "type": "string" } }, @@ -48604,6 +49974,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48633,6 +50004,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -48724,6 +50096,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48753,6 +50126,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -48826,6 +50200,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48855,6 +50230,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -48971,7 +50347,7 @@ "TERMINATED" ], "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", "Resources are being allocated for the instance.", "The instance is in repair.", "The instance is running.", @@ -49236,7 +50612,7 @@ "type": "string" }, "gatewayIPv4": { - "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", + "description": "[Output Only] The gateway address for default routing out of the network, selected by Google Cloud.", "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", "type": "string" }, @@ -49343,7 +50719,7 @@ "type": "string" }, "fingerprint": { - "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", "format": "byte", "type": "string" }, @@ -49368,7 +50744,7 @@ "type": "string" }, "network": { - "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", + "description": "[Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.", "type": "string" }, "producerAcceptLists": { @@ -49451,6 +50827,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49480,6 +50857,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -49532,7 +50910,7 @@ "id": "NetworkAttachmentConnectedEndpoint", "properties": { "ipAddress": { - "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", "type": "string" }, "projectIdOrNum": { @@ -49540,7 +50918,7 @@ "type": "string" }, "secondaryIpCidrRanges": { - "description": "Alias IP ranges from the same subnetwork", + "description": "Alias IP ranges from the same subnetwork.", "items": { "type": "string" }, @@ -49615,6 +50993,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49644,6 +51023,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -49717,6 +51097,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49746,6 +51127,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -49898,6 +51280,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49927,6 +51310,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -50000,6 +51384,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50029,6 +51414,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -50265,6 +51651,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50294,6 +51681,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -50437,6 +51825,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50466,6 +51855,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -50632,6 +52022,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50661,6 +52052,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -50734,6 +52126,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50763,6 +52156,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -50919,7 +52313,7 @@ "type": "integer" }, "stackType": { - "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", + "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", "enum": [ "IPV4_IPV6", "IPV4_ONLY" @@ -50981,6 +52375,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51010,6 +52405,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -51417,6 +52813,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51446,6 +52843,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -51569,6 +52967,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51598,6 +52997,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -51820,6 +53220,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51849,6 +53250,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -51922,6 +53324,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51951,6 +53354,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -52008,6 +53412,19 @@ }, "type": "object" }, + "NodeGroupsSimulateMaintenanceEventRequest": { + "id": "NodeGroupsSimulateMaintenanceEventRequest", + "properties": { + "nodes": { + "description": "Names of the nodes to go under maintenance simulation.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "NodeTemplate": { "description": "Represent a sole-tenant Node Template resource. You can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances.", "id": "NodeTemplate", @@ -52161,6 +53578,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52190,6 +53608,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -52281,6 +53700,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52310,6 +53730,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -52398,6 +53819,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52427,6 +53849,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -52586,6 +54009,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52615,6 +54039,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -52706,6 +54131,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52735,6 +54161,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -52808,6 +54235,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52837,6 +54265,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -52997,6 +54426,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -53026,6 +54456,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -53241,6 +54672,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -53270,6 +54702,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -53374,6 +54807,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -53403,6 +54837,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -53494,6 +54929,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -53523,6 +54959,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -53596,6 +55033,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -53625,6 +55063,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -53924,6 +55363,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -53953,6 +55393,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -54092,6 +55533,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -54121,6 +55563,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -54263,6 +55706,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -54292,6 +55736,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -54745,7 +56190,7 @@ "type": "string" }, "dnsVerificationIp": { - "description": "The IPv4 address to be used for reverse DNS verification.", + "description": "The address to be used for reverse DNS verification.", "type": "string" }, "fingerprint": { @@ -54759,7 +56204,7 @@ "type": "string" }, "ipCidrRange": { - "description": "The IPv4 address range, in CIDR format, represented by this public advertised prefix.", + "description": "The address range, in CIDR format, represented by this public advertised prefix.", "type": "string" }, "kind": { @@ -54860,6 +56305,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -54889,6 +56335,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -54986,7 +56433,7 @@ "type": "string" }, "ipCidrRange": { - "description": "The IPv4 address range, in CIDR format, represented by this public delegated prefix.", + "description": "The IP address range, in CIDR format, represented by this public delegated prefix.", "type": "string" }, "isLiveMigration": { @@ -55097,6 +56544,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -55126,6 +56574,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -55216,6 +56665,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -55245,6 +56695,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -55305,7 +56756,7 @@ "type": "string" }, "ipCidrRange": { - "description": "The IPv4 address range, in CIDR format, represented by this sub public delegated prefix.", + "description": "The IP address range, in CIDR format, represented by this sub public delegated prefix.", "type": "string" }, "isAddress": { @@ -55361,6 +56812,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -55390,6 +56842,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -55474,6 +56927,7 @@ "COMMITTED_NVIDIA_A100_80GB_GPUS", "COMMITTED_NVIDIA_A100_GPUS", "COMMITTED_NVIDIA_K80_GPUS", + "COMMITTED_NVIDIA_L4_GPUS", "COMMITTED_NVIDIA_P100_GPUS", "COMMITTED_NVIDIA_P4_GPUS", "COMMITTED_NVIDIA_T4_GPUS", @@ -55525,11 +56979,15 @@ "NETWORK_ATTACHMENTS", "NETWORK_ENDPOINT_GROUPS", "NETWORK_FIREWALL_POLICIES", + "NET_LB_SECURITY_POLICIES_PER_REGION", + "NET_LB_SECURITY_POLICY_RULES_PER_REGION", + "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION", "NODE_GROUPS", "NODE_TEMPLATES", "NVIDIA_A100_80GB_GPUS", "NVIDIA_A100_GPUS", "NVIDIA_K80_GPUS", + "NVIDIA_L4_GPUS", "NVIDIA_P100_GPUS", "NVIDIA_P100_VWS_GPUS", "NVIDIA_P4_GPUS", @@ -55544,6 +57002,7 @@ "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", "PREEMPTIBLE_NVIDIA_A100_GPUS", "PREEMPTIBLE_NVIDIA_K80_GPUS", + "PREEMPTIBLE_NVIDIA_L4_GPUS", "PREEMPTIBLE_NVIDIA_P100_GPUS", "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", "PREEMPTIBLE_NVIDIA_P4_GPUS", @@ -55567,6 +57026,7 @@ "ROUTES", "SECURITY_POLICIES", "SECURITY_POLICIES_PER_REGION", + "SECURITY_POLICY_ADVANCED_RULES_PER_REGION", "SECURITY_POLICY_CEVAL_RULES", "SECURITY_POLICY_RULES", "SECURITY_POLICY_RULES_PER_REGION", @@ -55624,6 +57084,7 @@ "", "", "", + "", "Guest CPUs", "", "", @@ -55715,6 +57176,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -55868,6 +57335,20 @@ }, "type": "object" }, + "RegionAddressesMoveRequest": { + "id": "RegionAddressesMoveRequest", + "properties": { + "description": { + "description": "An optional destination address description if intended to be different from the source.", + "type": "string" + }, + "destinationAddress": { + "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project/regions/region /addresses/address - projects/project/regions/region/addresses/address Note that destination project must be different from the source project. So /regions/region/addresses/address is not valid partial url.", + "type": "string" + } + }, + "type": "object" + }, "RegionAutoscalerList": { "description": "Contains a list of autoscalers.", "id": "RegionAutoscalerList", @@ -55912,6 +57393,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -55941,6 +57423,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -56031,6 +57514,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -56060,6 +57544,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -56144,6 +57629,16 @@ }, "type": "object" }, + "RegionDisksStartAsyncReplicationRequest": { + "id": "RegionDisksStartAsyncReplicationRequest", + "properties": { + "asyncSecondaryDisk": { + "description": "The secondary disk to start asynchronous replication to. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", + "type": "string" + } + }, + "type": "object" + }, "RegionInstanceGroupList": { "description": "Contains a list of InstanceGroup resources.", "id": "RegionInstanceGroupList", @@ -56188,6 +57683,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -56217,6 +57713,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -56322,6 +57819,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -56351,6 +57849,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -56455,7 +57954,7 @@ "type": "array" }, "minimalAction": { - "description": "The minimal action that you want to perform on each instance during the update: - REPLACE: At minimum, delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt the instance at all. By default, the minimum action is NONE. If your update requires a more disruptive action than you set with this flag, the necessary action is performed to execute the update.", + "description": "The minimal action that you want to perform on each instance during the update: - REPLACE: At minimum, delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance and limit disruption as much as possible. - NONE: Do not disrupt the instance at all. By default, the minimum action is NONE. If your update requires a more disruptive action than you set with this flag, the necessary action is performed to execute the update.", "enum": [ "NONE", "REFRESH", @@ -56464,14 +57963,14 @@ ], "enumDescriptions": [ "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "Do not stop the instance.", + "(Default.) Replace the instance according to the replacement method option.", + "Stop the instance and start it again." ], "type": "string" }, "mostDisruptiveAllowedAction": { - "description": "The most disruptive action that you want to perform on each instance during the update: - REPLACE: Delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt the instance at all. By default, the most disruptive allowed action is REPLACE. If your update requires a more disruptive action than you set with this flag, the update request will fail.", + "description": "The most disruptive action that you want to perform on each instance during the update: - REPLACE: Delete the instance and create it again. - RESTART: Stop the instance and start it again. - REFRESH: Do not stop the instance and limit disruption as much as possible. - NONE: Do not disrupt the instance at all. By default, the most disruptive allowed action is REPLACE. If your update requires a more disruptive action than you set with this flag, the update request will fail.", "enum": [ "NONE", "REFRESH", @@ -56480,9 +57979,9 @@ ], "enumDescriptions": [ "Do not perform any action.", - "Updates applied in runtime, instances will not be disrupted.", - "Old instances will be deleted. New instances will be created from the target template.", - "Every instance will be restarted." + "Do not stop the instance.", + "(Default.) Replace the instance according to the replacement method option.", + "Stop the instance and start it again." ], "type": "string" } @@ -56567,6 +58066,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -56596,6 +58096,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -56744,6 +58245,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -56773,6 +58275,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -56905,6 +58408,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -56934,6 +58438,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -57290,6 +58795,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -57319,6 +58825,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -57409,6 +58916,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -57438,6 +58946,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -57522,6 +59031,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -57551,6 +59061,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -57668,6 +59179,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -57697,6 +59209,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -57755,6 +59268,10 @@ "description": { "type": "string" }, + "diskConsistencyGroupPolicy": { + "$ref": "ResourcePolicyDiskConsistencyGroupPolicy", + "description": "Resource policy for disk consistency groups." + }, "groupPlacementPolicy": { "$ref": "ResourcePolicyGroupPlacementPolicy", "description": "Resource policy for instances for placement configuration." @@ -57874,6 +59391,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -57903,6 +59421,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -57970,6 +59489,12 @@ }, "type": "object" }, + "ResourcePolicyDiskConsistencyGroupPolicy": { + "description": "Resource policy for disk consistency groups.", + "id": "ResourcePolicyDiskConsistencyGroupPolicy", + "properties": {}, + "type": "object" + }, "ResourcePolicyGroupPlacementPolicy": { "description": "A GroupPlacementPolicy specifies resource placement configuration. It specifies the failure bucket separation as well as network locality", "id": "ResourcePolicyGroupPlacementPolicy", @@ -58103,6 +59628,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -58132,6 +59658,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -58418,6 +59945,10 @@ "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/ project/global/gateways/default-internet-gateway", "type": "string" }, + "nextHopHub": { + "description": "[Output Only] The full resource name of the Network Connectivity Center hub that will handle matching packets.", + "type": "string" + }, "nextHopIlb": { "description": "The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets or the IP address of the forwarding Rule. For example, the following are all valid URLs: - 10.128.0.56 - https://www.googleapis.com/compute/v1/projects/project/regions/region /forwardingRules/forwardingRule - regions/region/forwardingRules/forwardingRule ", "type": "string" @@ -58517,6 +60048,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -58546,6 +60078,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -58669,6 +60202,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -58698,6 +60232,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -58901,6 +60436,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -58930,6 +60466,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -59069,6 +60606,18 @@ "$ref": "RouterBgpPeerBfd", "description": "BFD configuration for the BGP peering." }, + "customLearnedIpRanges": { + "description": "A list of user-defined custom learned route IP address ranges for a BGP session.", + "items": { + "$ref": "RouterBgpPeerCustomLearnedIpRange" + }, + "type": "array" + }, + "customLearnedRoutePriority": { + "description": "The user-defined custom learned route priority for a BGP session. This value is applied to all custom learned route ranges for the session. You can choose a value from `0` to `65335`. If you don't provide a value, Google Cloud assigns a priority of `100` to the ranges.", + "format": "int32", + "type": "integer" + }, "enable": { "description": "The status of the BGP peer connection. If set to FALSE, any active session with the peer is terminated and all associated routing information is removed. If set to TRUE, the peer connection can be established with routing information. The default is TRUE.", "enum": [ @@ -59183,6 +60732,16 @@ }, "type": "object" }, + "RouterBgpPeerCustomLearnedIpRange": { + "id": "RouterBgpPeerCustomLearnedIpRange", + "properties": { + "range": { + "description": "The custom learned route IP address range. Must be a valid CIDR-formatted prefix. If an IP address is provided without a subnet mask, it is interpreted as, for IPv4, a `/32` singular IP address range, and, for IPv6, `/128`.", + "type": "string" + } + }, + "type": "object" + }, "RouterInterface": { "id": "RouterInterface", "properties": { @@ -59280,6 +60839,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -59309,6 +60869,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -59385,6 +60946,22 @@ "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", "id": "RouterNat", "properties": { + "autoNetworkTier": { + "description": "The network tier to use when automatically reserving IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, PREMIUM tier will be used.", + "enum": [ + "FIXED_STANDARD", + "PREMIUM", + "STANDARD", + "STANDARD_OVERRIDES_FIXED_STANDARD" + ], + "enumDescriptions": [ + "Public internet quality with fixed bandwidth.", + "High quality, Google-grade network tier, support for all networking products.", + "Public internet quality, only limited support for other networking products.", + "(Output only) Temporary tier for FIXED_STANDARD when fixed standard tier is expired or not configured." + ], + "type": "string" + }, "drainNatIps": { "description": "A list of URLs of the IP resources to be drained. These IPs must be valid static external IPs that have been assigned to the NAT. These IPs should be used for updating/patching a NAT only.", "items": { @@ -59465,7 +61042,7 @@ "type": "array" }, "sourceSubnetworkIpRangesToNat": { - "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", + "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then there should not be any other Router.Nat section in any Router for this network in this region.", "enum": [ "ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", @@ -59891,6 +61468,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -59920,6 +61498,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -60443,6 +62022,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -60472,6 +62052,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -60554,6 +62135,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -60583,6 +62165,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -60675,6 +62258,18 @@ "description": "[Output only] Type of the resource. Always compute#securityPolicyfor security policies", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -60727,15 +62322,15 @@ "type": "object" }, "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig": { - "description": "Configuration options for L7 DDoS detection.", + "description": "Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig", "properties": { "enable": { - "description": "If set to true, enables CAAP for L7 DDoS detection.", + "description": "If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", "type": "boolean" }, "ruleVisibility": { - "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules.", + "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", "enum": [ "PREMIUM", "STANDARD" @@ -60850,6 +62445,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -60879,6 +62475,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -60930,7 +62527,7 @@ "id": "SecurityPolicyRecaptchaOptionsConfig", "properties": { "redirectSiteKey": { - "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.", + "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", "type": "string" } }, @@ -60950,7 +62547,7 @@ "id": "SecurityPolicyRule", "properties": { "action": { - "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", "type": "string" }, "description": { @@ -60959,7 +62556,7 @@ }, "headerAction": { "$ref": "SecurityPolicyRuleHttpHeaderAction", - "description": "Optional, additional actions that are performed on headers." + "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR." }, "kind": { "default": "compute#securityPolicyRule", @@ -60989,7 +62586,7 @@ }, "redirectOptions": { "$ref": "SecurityPolicyRuleRedirectOptions", - "description": "Parameters defining the redirect action. Cannot be specified for any other actions." + "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR." } }, "type": "object" @@ -61031,7 +62628,7 @@ }, "expr": { "$ref": "Expr", - "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." + "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." }, "versionedExpr": { "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", @@ -61185,17 +62782,24 @@ ], "type": "string" }, + "enforceOnKeyConfigs": { + "description": "If specified, any combination of values of enforce_on_key_type/enforce_on_key_name is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforce_on_key_configs. If enforce_on_key_configs is specified, enforce_on_key must not be specified.", + "items": { + "$ref": "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig" + }, + "type": "array" + }, "enforceOnKeyName": { "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", "type": "string" }, "exceedAction": { - "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below.", + "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR.", "type": "string" }, "exceedRedirectOptions": { "$ref": "SecurityPolicyRuleRedirectOptions", - "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect." + "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR." }, "rateLimitThreshold": { "$ref": "SecurityPolicyRuleRateLimitOptionsThreshold", @@ -61204,6 +62808,40 @@ }, "type": "object" }, + "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig": { + "id": "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig", + "properties": { + "enforceOnKeyName": { + "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", + "type": "string" + }, + "enforceOnKeyType": { + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "enum": [ + "ALL", + "HTTP_COOKIE", + "HTTP_HEADER", + "HTTP_PATH", + "IP", + "REGION_CODE", + "SNI", + "XFF_IP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "SecurityPolicyRuleRateLimitOptionsThreshold": { "id": "SecurityPolicyRuleRateLimitOptionsThreshold", "properties": { @@ -61247,11 +62885,11 @@ "id": "SecuritySettings", "properties": { "clientTlsPolicy": { - "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", + "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", "type": "string" }, "subjectAltNames": { - "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact.", + "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).", "items": { "type": "string" }, @@ -61328,7 +62966,7 @@ "type": "object" }, "ServiceAttachment": { - "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. next tag = 20", + "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service.", "id": "ServiceAttachment", "properties": { "connectedEndpoints": { @@ -61425,6 +63063,10 @@ "$ref": "Uint128", "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." }, + "reconcileConnections": { + "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", + "type": "boolean" + }, "region": { "description": "[Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" @@ -61492,6 +63134,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -61521,6 +63164,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -61666,6 +63310,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -61695,6 +63340,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -61768,6 +63414,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -61797,6 +63444,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -62059,6 +63707,7 @@ "name": { "annotations": { "required": [ + "compute.disks.createSnapshot", "compute.snapshots.insert" ] }, @@ -62199,6 +63848,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -62228,6 +63878,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -62521,6 +64172,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -62550,6 +64202,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -62641,6 +64294,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -62670,6 +64324,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -62817,6 +64472,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -62846,6 +64502,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -62947,6 +64604,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -62976,6 +64634,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -63066,6 +64725,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -63095,6 +64755,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -63180,6 +64841,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -63209,6 +64871,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -63357,6 +65020,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -63386,6 +65050,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -63499,7 +65164,7 @@ "type": "string" }, "enableFlowLogs": { - "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", + "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "type": "boolean" }, "externalIpv6Prefix": { @@ -63581,7 +65246,7 @@ "type": "string" }, "purpose": { - "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", + "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "enum": [ "INTERNAL_HTTPS_LOAD_BALANCER", "PRIVATE", @@ -63603,7 +65268,7 @@ "type": "string" }, "role": { - "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", "enum": [ "ACTIVE", "BACKUP" @@ -63703,6 +65368,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -63732,6 +65398,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -63823,6 +65490,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -63852,6 +65520,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -63924,7 +65593,7 @@ "type": "string" }, "enable": { - "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled.", + "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Flow logging isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "type": "boolean" }, "filterExpr": { @@ -64011,6 +65680,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -64040,6 +65710,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -64282,6 +65953,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -64311,6 +65983,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -64384,6 +66057,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -64413,6 +66087,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -64477,6 +66152,11 @@ "format": "byte", "type": "string" }, + "httpKeepAliveTimeoutSec": { + "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", + "format": "int32", + "type": "integer" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -64593,6 +66273,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -64622,6 +66303,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -64695,6 +66377,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -64724,6 +66407,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -64775,7 +66459,7 @@ "id": "TargetHttpsProxiesSetCertificateMapRequest", "properties": { "certificateMap": { - "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", + "description": "URL of the Certificate Map to associate with this TargetHttpsProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" } }, @@ -64823,7 +66507,7 @@ "type": "string" }, "certificateMap": { - "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", + "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" }, "creationTimestamp": { @@ -64839,6 +66523,11 @@ "format": "byte", "type": "string" }, + "httpKeepAliveTimeoutSec": { + "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", + "format": "int32", + "type": "integer" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -64881,7 +66570,7 @@ "type": "string" }, "serverTlsPolicy": { - "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", + "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted.", "type": "string" }, "sslCertificates": { @@ -64953,6 +66642,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -64982,6 +66672,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -65073,6 +66764,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -65102,6 +66794,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -65256,6 +66949,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -65285,6 +66979,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -65376,6 +67071,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -65405,6 +67101,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -65478,6 +67175,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -65507,6 +67205,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -65690,6 +67389,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -65719,6 +67419,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -65827,6 +67528,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -65856,6 +67558,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -65981,6 +67684,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -66010,6 +67714,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -66080,7 +67785,7 @@ "id": "TargetSslProxiesSetCertificateMapRequest", "properties": { "certificateMap": { - "description": "URL of the Certificate Map to associate with this TargetSslProxy.", + "description": "URL of the Certificate Map to associate with this TargetSslProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" } }, @@ -66122,7 +67827,7 @@ "id": "TargetSslProxy", "properties": { "certificateMap": { - "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", + "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" }, "creationTimestamp": { @@ -66226,6 +67931,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -66255,6 +67961,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -66328,6 +68035,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -66357,6 +68065,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -66541,6 +68250,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -66570,6 +68280,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -66661,6 +68372,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -66690,6 +68402,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -66766,6 +68479,18 @@ "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a TargetVpnGateway.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "name": { "annotations": { "required": [ @@ -66870,6 +68595,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -66899,6 +68625,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -66990,6 +68717,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -67019,6 +68747,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -67092,6 +68821,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -67121,6 +68851,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -67379,6 +69110,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -67408,6 +69140,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -67596,6 +69329,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -67625,6 +69359,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -67698,6 +69433,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -67727,6 +69463,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -67821,6 +69558,10 @@ "pathPrefixRewrite": { "description": "Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters.", "type": "string" + }, + "pathTemplateRewrite": { + "description": " If specified, the pattern rewrites the URL path (based on the :path header) using the HTTP template syntax. A corresponding path_template_match must be specified. Any template variables must exist in the path_template_match field. - -At least one variable must be specified in the path_template_match field - You can omit variables from the rewritten URL - The * and ** operators cannot be matched unless they have a corresponding variable name - e.g. {format=*} or {var=**}. For example, a path_template_match of /static/{format=**} could be rewritten as /static/content/{format} to prefix /content to the URL. Variables can also be re-ordered in a rewrite, so that /{country}/{format}/{suffix=**} can be rewritten as /content/{format}/{country}/{suffix}. At least one non-empty routeRules[].matchRules[].path_template_match is required. Only one of path_prefix_rewrite or path_template_rewrite may be specified.", + "type": "string" } }, "type": "object" @@ -67858,7 +69599,7 @@ "type": "string" }, "purpose": { - "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", + "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "enum": [ "INTERNAL_HTTPS_LOAD_BALANCER", "PRIVATE", @@ -67876,7 +69617,7 @@ "type": "string" }, "role": { - "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", "enum": [ "ACTIVE", "BACKUP" @@ -67971,6 +69712,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -68000,6 +69742,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -68205,6 +69948,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -68234,6 +69978,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -68415,6 +70160,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -68444,6 +70190,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -68535,6 +70282,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -68564,6 +70312,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -68663,7 +70412,7 @@ "type": "integer" }, "peerGatewayInterface": { - "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", + "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", "format": "uint32", "type": "integer" }, @@ -68675,7 +70424,7 @@ "type": "object" }, "VpnGatewayStatusVpnConnection": { - "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", + "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", "id": "VpnGatewayStatusVpnConnection", "properties": { "peerExternalGateway": { @@ -68755,6 +70504,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -68784,6 +70534,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -68862,6 +70613,18 @@ "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this VpnTunnel, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a VpnTunnel.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "localTrafficSelector": { "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported.", "items": { @@ -69024,6 +70787,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -69053,6 +70817,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -69144,6 +70909,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -69173,6 +70939,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -69246,6 +71013,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -69275,6 +71043,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -69424,6 +71193,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -69453,6 +71223,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", @@ -69631,6 +71402,7 @@ "INJECTED_KERNELS_DEPRECATED", "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -69660,6 +71432,7 @@ "The operation involved use of an injected kernel, which is deprecated.", "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", "A resource depends on a missing type", "The route's nextHopIp address is not assigned to an instance on the network.", "The route's next hop instance cannot ip forward.", diff --git a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-gen.go b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-gen.go index c30ae0d4e1..6617549790 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "compute:v1" const apiName = "compute" @@ -172,6 +173,7 @@ func New(client *http.Client) (*Service, error) { s.Instances = NewInstancesService(s) s.InterconnectAttachments = NewInterconnectAttachmentsService(s) s.InterconnectLocations = NewInterconnectLocationsService(s) + s.InterconnectRemoteLocations = NewInterconnectRemoteLocationsService(s) s.Interconnects = NewInterconnectsService(s) s.LicenseCodes = NewLicenseCodesService(s) s.Licenses = NewLicensesService(s) @@ -299,6 +301,8 @@ type Service struct { InterconnectLocations *InterconnectLocationsService + InterconnectRemoteLocations *InterconnectRemoteLocationsService + Interconnects *InterconnectsService LicenseCodes *LicenseCodesService @@ -685,6 +689,15 @@ type InterconnectLocationsService struct { s *Service } +func NewInterconnectRemoteLocationsService(s *Service) *InterconnectRemoteLocationsService { + rs := &InterconnectRemoteLocationsService{s: s} + return rs +} + +type InterconnectRemoteLocationsService struct { + s *Service +} + func NewInterconnectsService(s *Service) *InterconnectsService { rs := &InterconnectsService{s: s} return rs @@ -1448,6 +1461,9 @@ type AcceleratorTypeAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -1638,6 +1654,9 @@ type AcceleratorTypeListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -1807,6 +1826,9 @@ type AcceleratorTypesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -1917,32 +1939,35 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { // AccessConfig: An access configuration attached to an instance's // network interface. Only one access config per instance is supported. type AccessConfig struct { - // ExternalIpv6: The first IPv6 address of the external IPv6 range - // associated with this instance, prefix length is stored in - // externalIpv6PrefixLength in ipv6AccessConfig. To use a static - // external IP address, it must be unused and in the same region as the - // instance's zone. If not specified, Google Cloud will automatically - // assign an external IPv6 address from the instance's subnetwork. + // ExternalIpv6: Applies to ipv6AccessConfigs only. The first IPv6 + // address of the external IPv6 range associated with this instance, + // prefix length is stored in externalIpv6PrefixLength in + // ipv6AccessConfig. To use a static external IP address, it must be + // unused and in the same region as the instance's zone. If not + // specified, Google Cloud will automatically assign an external IPv6 + // address from the instance's subnetwork. ExternalIpv6 string `json:"externalIpv6,omitempty"` - // ExternalIpv6PrefixLength: The prefix length of the external IPv6 - // range. + // ExternalIpv6PrefixLength: Applies to ipv6AccessConfigs only. The + // prefix length of the external IPv6 range. ExternalIpv6PrefixLength int64 `json:"externalIpv6PrefixLength,omitempty"` // Kind: [Output Only] Type of the resource. Always compute#accessConfig // for access configs. Kind string `json:"kind,omitempty"` - // Name: The name of this access configuration. The default and - // recommended name is External NAT, but you can use any arbitrary - // string, such as My external IP or Network Access. + // Name: The name of this access configuration. In accessConfigs (IPv4), + // the default and recommended name is External NAT, but you can use any + // arbitrary string, such as My external IP or Network Access. In + // ipv6AccessConfigs, the recommend name is External IPv6. Name string `json:"name,omitempty"` - // NatIP: An external IP address associated with this instance. Specify - // an unused static external IP address available to the project or - // leave this field undefined to use an IP from a shared ephemeral IP - // address pool. If you specify a static external IP address, it must - // live in the same region as the zone of the instance. + // NatIP: Applies to accessConfigs (IPv4) only. An external IP address + // associated with this instance. Specify an unused static external IP + // address available to the project or leave this field undefined to use + // an IP from a shared ephemeral IP address pool. If you specify a + // static external IP address, it must live in the same region as the + // zone of the instance. NatIP string `json:"natIP,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -1978,12 +2003,13 @@ type AccessConfig struct { // associated. SetPublicPtr bool `json:"setPublicPtr,omitempty"` - // Type: The type of configuration. The default and only option is - // ONE_TO_ONE_NAT. + // Type: The type of configuration. In accessConfigs (IPv4), the default + // and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default + // and only option is DIRECT_IPV6. // // Possible values: // "DIRECT_IPV6" - // "ONE_TO_ONE_NAT" (default) + // "ONE_TO_ONE_NAT" Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "ExternalIpv6") to @@ -2042,8 +2068,7 @@ type Address struct { Id uint64 `json:"id,omitempty,string"` // IpVersion: The IP version that will be used by this address. Valid - // options are IPV4 or IPV6. This can only be specified for a global - // address. + // options are IPV4 or IPV6. // // Possible values: // "IPV4" @@ -2065,6 +2090,21 @@ type Address struct { // addresses. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // Address, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve an Address. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -2285,6 +2325,9 @@ type AddressAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -2474,6 +2517,9 @@ type AddressListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -2641,6 +2687,9 @@ type AddressesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -3140,6 +3189,17 @@ type AttachedDisk struct { // read-write mode. Mode string `json:"mode,omitempty"` + // SavedState: For LocalSSD disks on VM Instances in STOPPED or + // SUSPENDED state, this field is set to PRESERVED if the LocalSSD data + // has been saved to a persistent location by customer request. (see the + // discard_local_ssd option on Stop/Suspend). Read-only in the api. + // + // Possible values: + // "DISK_SAVED_STATE_UNSPECIFIED" - *[Default]* Disk state has not + // been preserved. + // "PRESERVED" - Disk state has been preserved. + SavedState string `json:"savedState,omitempty"` + // ShieldedInstanceInitialState: [Output Only] shielded vm initial state // stored on disk ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` @@ -3263,6 +3323,18 @@ type AttachedDiskInitializeParams struct { // see the Extreme persistent disk documentation. ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // ProvisionedThroughput: Indicates how much throughput to provision for + // the disk. This sets the number of throughput mb per second that the + // disk can handle. Values must be between 1 and 7,124. + ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` + + // ReplicaZones: Required for each regional disk associated with the + // instance. Specify the URLs of the zones where the disk should be + // replicated to. You must provide exactly two replica zones, and one + // zone must be the same as the instance zone. You can't use this option + // with boot disks. + ReplicaZones []string `json:"replicaZones,omitempty"` + // ResourceManagerTags: Resource manager tags to be bound to the disk. // Tag keys and values have the same definition as resource manager // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values @@ -3681,6 +3753,9 @@ type AutoscalerAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -3870,6 +3945,9 @@ type AutoscalerListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -4153,6 +4231,9 @@ type AutoscalersScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -4262,15 +4343,17 @@ func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { // AutoscalingPolicy: Cloud Autoscaler policy. type AutoscalingPolicy struct { - // CoolDownPeriodSec: The number of seconds that the autoscaler waits - // before it starts collecting information from a new instance. This - // prevents the autoscaler from collecting information when the instance - // is initializing, during which the collected usage would not be - // reliable. The default time autoscaler waits is 60 seconds. Virtual - // machine initialization times might vary because of numerous factors. - // We recommend that you test how long an instance may take to - // initialize. To do this, create an instance and time the startup - // process. + // CoolDownPeriodSec: The number of seconds that your application takes + // to initialize on a VM instance. This is referred to as the + // initialization period (/compute/docs/autoscaler#cool_down_period). + // Specifying an accurate initialization period improves autoscaler + // decisions. For example, when scaling out, the autoscaler ignores data + // from VMs that are still initializing because those VMs might not yet + // represent normal usage of your application. The default + // initialization period is 60 seconds. Initialization periods might + // vary because of numerous factors. We recommend that you test how long + // your application takes to initialize. To do this, create a VM and + // time your application's startup process. CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` // CpuUtilization: Defines the CPU utilization policy that allows the @@ -4298,7 +4381,12 @@ type AutoscalingPolicy struct { // instances allowed. MinNumReplicas int64 `json:"minNumReplicas,omitempty"` - // Mode: Defines operating mode for this policy. + // Mode: Defines the operating mode for this policy. The following modes + // are available: - OFF: Disables the autoscaler but maintains its + // configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM + // instances only. - ON: Enables all autoscaler activities according to + // its policy. For more information, see "Turning off or restricting an + // autoscaler" // // Possible values: // "OFF" - Do not automatically scale the MIG in or out. The @@ -5246,6 +5334,9 @@ type BackendBucketListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -5591,6 +5682,10 @@ type BackendService struct { // loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED. MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + // Metadatas: Deployment metadata associated with the resource to be set + // by a GKE hub controller and read by the backend RCTH + Metadatas map[string]string `json:"metadatas,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -5835,6 +5930,9 @@ type BackendServiceAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -6505,6 +6603,9 @@ type BackendServiceListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -6906,6 +7007,9 @@ type BackendServicesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -7340,6 +7444,44 @@ func (s *Binding) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BulkInsertDiskResource: A transient resource used in +// compute.disks.bulkInsert and compute.regionDisks.bulkInsert. It is +// only used to process requests and is not persisted. +type BulkInsertDiskResource struct { + // SourceConsistencyGroupPolicy: The URL of the + // DiskConsistencyGroupPolicy for the group of disks to clone. This may + // be a full or partial URL, such as: - + // https://www.googleapis.com/compute/v1/projects/project/regions/region + // /resourcePolicies/resourcePolicy - + // projects/project/regions/region/resourcePolicies/resourcePolicy - + // regions/region/resourcePolicies/resourcePolicy + SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SourceConsistencyGroupPolicy") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "SourceConsistencyGroupPolicy") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BulkInsertDiskResource) MarshalJSON() ([]byte, error) { + type NoMethod BulkInsertDiskResource + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BulkInsertInstanceResource: A transient resource used in // compute.instances.bulkInsert and compute.regionInstances.bulkInsert . // This resource is not persisted anywhere, it is used only for @@ -7685,7 +7827,7 @@ type Commitment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SplitSourceCommitment: Source commitment to be splitted into a new + // SplitSourceCommitment: Source commitment to be split into a new // commitment. SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` @@ -7726,6 +7868,7 @@ type Commitment struct { // "GENERAL_PURPOSE_N2" // "GENERAL_PURPOSE_N2D" // "GENERAL_PURPOSE_T2D" + // "GRAPHICS_OPTIMIZED" // "MEMORY_OPTIMIZED" // "MEMORY_OPTIMIZED_M3" // "TYPE_UNSPECIFIED" @@ -7843,6 +7986,9 @@ type CommitmentAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -8032,6 +8178,9 @@ type CommitmentListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -8200,6 +8349,9 @@ type CommitmentsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -8595,7 +8747,10 @@ type CustomerEncryptionKey struct { // KmsKeyName: The name of the encryption key that is stored in Google // Cloud KMS. For example: "kmsKeyName": // "projects/kms_project_id/locations/region/keyRings/ - // key_region/cryptoKeys/key + // key_region/cryptoKeys/key The fully-qualifed key name may be returned + // for resource GET requests. For example: "kmsKeyName": + // "projects/kms_project_id/locations/region/keyRings/ + // key_region/cryptoKeys/key /cryptoKeyVersions/1 KmsKeyName string `json:"kmsKeyName,omitempty"` // KmsKeyServiceAccount: The service account being used for the @@ -8773,6 +8928,13 @@ type Disk struct { // "X86_64" - Machines with architecture X86_64 Architecture string `json:"architecture,omitempty"` + // AsyncPrimaryDisk: Disk asynchronously replicated into this disk. + AsyncPrimaryDisk *DiskAsyncReplication `json:"asyncPrimaryDisk,omitempty"` + + // AsyncSecondaryDisks: [Output Only] A list of disks this disk is + // asynchronously replicated to. + AsyncSecondaryDisks map[string]DiskAsyncReplicationList `json:"asyncSecondaryDisks,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -8877,6 +9039,11 @@ type Disk struct { // see the Extreme persistent disk documentation. ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // ProvisionedThroughput: Indicates how much throughput to provision for + // the disk. This sets the number of throughput mb per second that the + // disk can handle. Values must be between 1 and 7,124. + ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` + // Region: [Output Only] URL of the region where the disk resides. Only // applicable for regional resources. You must specify this field as // part of the HTTP request URL. It is not settable as a field in the @@ -8891,6 +9058,10 @@ type Disk struct { // automatic snapshot creations. ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // ResourceStatus: [Output Only] Status information for the disk + // resource. + ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -8906,6 +9077,16 @@ type Disk struct { // source. Acceptable values are 1 to 65536, inclusive. SizeGb int64 `json:"sizeGb,omitempty,string"` + // SourceConsistencyGroupPolicy: [Output Only] URL of the + // DiskConsistencyGroupPolicy for a secondary disk that was created + // using a consistency group. + SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` + + // SourceConsistencyGroupPolicyId: [Output Only] ID of the + // DiskConsistencyGroupPolicy for a secondary disk that was created + // using a consistency group. + SourceConsistencyGroupPolicyId string `json:"sourceConsistencyGroupPolicyId,omitempty"` + // SourceDisk: The source disk used to create this disk. You can provide // this as a partial or full URL to the resource. For example, the // following are valid values: - @@ -9123,6 +9304,9 @@ type DiskAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -9230,6 +9414,86 @@ func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type DiskAsyncReplication struct { + // ConsistencyGroupPolicy: [Output Only] URL of the + // DiskConsistencyGroupPolicy if replication was started on the disk as + // a member of a group. + ConsistencyGroupPolicy string `json:"consistencyGroupPolicy,omitempty"` + + // ConsistencyGroupPolicyId: [Output Only] ID of the + // DiskConsistencyGroupPolicy if replication was started on the disk as + // a member of a group. + ConsistencyGroupPolicyId string `json:"consistencyGroupPolicyId,omitempty"` + + // Disk: The other disk asynchronously replicated to or from the current + // disk. You can provide this as a partial or full URL to the resource. + // For example, the following are valid values: - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone + // /disks/disk - projects/project/zones/zone/disks/disk - + // zones/zone/disks/disk + Disk string `json:"disk,omitempty"` + + // DiskId: [Output Only] The unique ID of the other disk asynchronously + // replicated to or from the current disk. This value identifies the + // exact disk that was used to create this replication. For example, if + // you started replicating the persistent disk from a disk that was + // later deleted and recreated under the same name, the disk ID would + // identify the exact version of the disk that was used. + DiskId string `json:"diskId,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ConsistencyGroupPolicy") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsistencyGroupPolicy") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskAsyncReplication) MarshalJSON() ([]byte, error) { + type NoMethod DiskAsyncReplication + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskAsyncReplicationList struct { + AsyncReplicationDisk *DiskAsyncReplication `json:"asyncReplicationDisk,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AsyncReplicationDisk") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AsyncReplicationDisk") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskAsyncReplicationList) MarshalJSON() ([]byte, error) { + type NoMethod DiskAsyncReplicationList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskInstantiationConfig: A specification of the desired way to // instantiate a disk in the instance template when its created from a // source instance. @@ -9392,6 +9656,9 @@ type DiskListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -9571,6 +9838,70 @@ func (s *DiskParams) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type DiskResourceStatus struct { + AsyncPrimaryDisk *DiskResourceStatusAsyncReplicationStatus `json:"asyncPrimaryDisk,omitempty"` + + // AsyncSecondaryDisks: Key: disk, value: AsyncReplicationStatus message + AsyncSecondaryDisks map[string]DiskResourceStatusAsyncReplicationStatus `json:"asyncSecondaryDisks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AsyncPrimaryDisk") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AsyncPrimaryDisk") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod DiskResourceStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskResourceStatusAsyncReplicationStatus struct { + // Possible values: + // "ACTIVE" - Replication is active. + // "CREATED" - Secondary disk is created and is waiting for + // replication to start. + // "STARTING" - Replication is starting. + // "STATE_UNSPECIFIED" + // "STOPPED" - Replication is stopped. + // "STOPPING" - Replication is stopping. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "State") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "State") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskResourceStatusAsyncReplicationStatus) MarshalJSON() ([]byte, error) { + type NoMethod DiskResourceStatusAsyncReplicationStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskType: Represents a Disk Type resource. Google Compute Engine has // two Disk Type resources: * Regional // (/compute/docs/reference/rest/v1/regionDiskTypes) * Zonal @@ -9739,6 +10070,9 @@ type DiskTypeAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -9928,6 +10262,9 @@ type DiskTypeListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -10096,6 +10433,9 @@ type DiskTypesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -10348,6 +10688,9 @@ type DisksScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -10455,6 +10798,79 @@ func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type DisksStartAsyncReplicationRequest struct { + // AsyncSecondaryDisk: The secondary disk to start asynchronous + // replication to. You can provide this as a partial or full URL to the + // resource. For example, the following are valid values: - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone + // /disks/disk - + // https://www.googleapis.com/compute/v1/projects/project/regions/region + // /disks/disk - projects/project/zones/zone/disks/disk - + // projects/project/regions/region/disks/disk - zones/zone/disks/disk - + // regions/region/disks/disk + AsyncSecondaryDisk string `json:"asyncSecondaryDisk,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AsyncSecondaryDisk") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AsyncSecondaryDisk") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { + type NoMethod DisksStartAsyncReplicationRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DisksStopGroupAsyncReplicationResource: A transient resource used in +// compute.disks.stopGroupAsyncReplication and +// compute.regionDisks.stopGroupAsyncReplication. It is only used to +// process requests and is not persisted. +type DisksStopGroupAsyncReplicationResource struct { + // ResourcePolicy: The URL of the DiskConsistencyGroupPolicy for the + // group of disks to stop. This may be a full or partial URL, such as: - + // https://www.googleapis.com/compute/v1/projects/project/regions/region + // /resourcePolicies/resourcePolicy - + // projects/project/regions/region/resourcePolicies/resourcePolicy - + // regions/region/resourcePolicies/resourcePolicy + ResourcePolicy string `json:"resourcePolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourcePolicy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourcePolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DisksStopGroupAsyncReplicationResource) MarshalJSON() ([]byte, error) { + type NoMethod DisksStopGroupAsyncReplicationResource + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DisplayDevice: A set of Display Device options type DisplayDevice struct { // EnableDisplay: Defines whether the instance has Display enabled. @@ -10791,6 +11207,9 @@ type ExchangedPeeringRoutesListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -11200,6 +11619,9 @@ type ExternalVpnGatewayListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -11669,6 +12091,9 @@ type FirewallListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -12092,6 +12517,9 @@ type FirewallPolicyListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -12303,38 +12731,77 @@ func (s *FirewallPolicyRule) MarshalJSON() ([]byte, error) { // FirewallPolicyRuleMatcher: Represents a match condition that incoming // traffic is evaluated against. Exactly one field must be specified. type FirewallPolicyRuleMatcher struct { + // DestAddressGroups: Address groups which should be matched against the + // traffic destination. Maximum number of destination address groups is + // 10. + DestAddressGroups []string `json:"destAddressGroups,omitempty"` + + // DestFqdns: Fully Qualified Domain Name (FQDN) which should be matched + // against traffic destination. Maximum number of destination fqdn + // allowed is 100. + DestFqdns []string `json:"destFqdns,omitempty"` + // DestIpRanges: CIDR IP address range. Maximum number of destination // CIDR IP ranges allowed is 5000. DestIpRanges []string `json:"destIpRanges,omitempty"` + // DestRegionCodes: Region codes whose IP addresses will be used to + // match for destination of traffic. Should be specified as 2 letter + // country code defined as per ISO 3166 alpha-2 country codes. ex."US" + // Maximum number of dest region codes allowed is 5000. + DestRegionCodes []string `json:"destRegionCodes,omitempty"` + + // DestThreatIntelligences: Names of Network Threat Intelligence lists. + // The IPs in these lists will be matched against traffic destination. + DestThreatIntelligences []string `json:"destThreatIntelligences,omitempty"` + // Layer4Configs: Pairs of IP protocols and ports that the rule should // match. Layer4Configs []*FirewallPolicyRuleMatcherLayer4Config `json:"layer4Configs,omitempty"` + // SrcAddressGroups: Address groups which should be matched against the + // traffic source. Maximum number of source address groups is 10. + SrcAddressGroups []string `json:"srcAddressGroups,omitempty"` + + // SrcFqdns: Fully Qualified Domain Name (FQDN) which should be matched + // against traffic source. Maximum number of source fqdn allowed is 100. + SrcFqdns []string `json:"srcFqdns,omitempty"` + // SrcIpRanges: CIDR IP address range. Maximum number of source CIDR IP // ranges allowed is 5000. SrcIpRanges []string `json:"srcIpRanges,omitempty"` + // SrcRegionCodes: Region codes whose IP addresses will be used to match + // for source of traffic. Should be specified as 2 letter country code + // defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number + // of source region codes allowed is 5000. + SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` + // SrcSecureTags: List of secure tag values, which should be matched at // the source of the traffic. For INGRESS rule, if all the srcSecureTag // are INEFFECTIVE, and there is no srcIpRange, this rule will be // ignored. Maximum number of source tag values allowed is 256. SrcSecureTags []*FirewallPolicyRuleSecureTag `json:"srcSecureTags,omitempty"` - // ForceSendFields is a list of field names (e.g. "DestIpRanges") to - // unconditionally include in API requests. By default, fields with + // SrcThreatIntelligences: Names of Network Threat Intelligence lists. + // The IPs in these lists will be matched against traffic source. + SrcThreatIntelligences []string `json:"srcThreatIntelligences,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestAddressGroups") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DestIpRanges") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DestAddressGroups") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -12528,11 +12995,18 @@ type ForwardingRule struct { // AllowGlobalAccess: This field is used along with the backend_service // field for internal load balancing or with the target field for - // internal TargetInstance. If the field is set to TRUE, clients can - // access ILB from all regions. Otherwise only allows access from - // clients in the same region as the internal load balancer. + // internal TargetInstance. If set to true, clients can access the + // Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load + // Balancer from all regions. If false, only allows access from the + // local region the load balancer is located at. Note that for + // INTERNAL_MANAGED forwarding rules, this field cannot be changed after + // the forwarding rule is created. AllowGlobalAccess bool `json:"allowGlobalAccess,omitempty"` + // AllowPscGlobalAccess: This is used in PSC consumer ForwardingRule to + // control whether the PSC endpoint can be accessed from another region. + AllowPscGlobalAccess bool `json:"allowPscGlobalAccess,omitempty"` + // BackendService: Identifies the backend service to which the // forwarding rule sends traffic. Required for Internal TCP/UDP Load // Balancing and Network Load Balancing; must be omitted for all other @@ -12651,9 +13125,10 @@ type ForwardingRule struct { // Network: This field is not used for external load balancing. For // Internal TCP/UDP Load Balancing, this field identifies the network // that the load balanced IP should belong to for this Forwarding Rule. - // If this field is not specified, the default network will be used. For - // Private Service Connect forwarding rules that forward traffic to - // Google APIs, a network must be provided. + // If the subnetwork is specified, the network of the subnetwork will be + // used. If neither subnetwork nor this field is specified, the default + // network will be used. For Private Service Connect forwarding rules + // that forward traffic to Google APIs, a network must be provided. Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -12903,6 +13378,9 @@ type ForwardingRuleAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -13092,6 +13570,9 @@ type ForwardingRuleListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -13330,6 +13811,9 @@ type ForwardingRulesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -13511,6 +13995,43 @@ func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type GlobalAddressesMoveRequest struct { + // Description: An optional destination address description if intended + // to be different from the source. + Description string `json:"description,omitempty"` + + // DestinationAddress: The URL of the destination address to move to. + // This can be a full or partial URL. For example, the following are all + // valid URLs to a address: - + // https://www.googleapis.com/compute/v1/projects/project + // /global/addresses/address - projects/project/global/addresses/address + // Note that destination project must be different from the source + // project. So /global/addresses/address is not valid partial url. + DestinationAddress string `json:"destinationAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GlobalAddressesMoveRequest) MarshalJSON() ([]byte, error) { + type NoMethod GlobalAddressesMoveRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { // NetworkEndpoints: The list of network endpoints to be attached. NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` @@ -13800,8 +14321,8 @@ type GuestOsFeature struct { // commas to separate values. Set to one or more of the following // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - - // SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling - // guest operating system features. + // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see + // Enabling guest operating system features. // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" @@ -13809,6 +14330,7 @@ type GuestOsFeature struct { // "MULTI_IP_SUBNET" // "SECURE_BOOT" // "SEV_CAPABLE" + // "SEV_LIVE_MIGRATABLE" // "SEV_SNP_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" @@ -14126,12 +14648,12 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // (/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) // load balancers must use regional health checks // (`compute.v1.regionHealthChecks`). Traffic Director must use global -// health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load +// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load // balancers can use either regional or global health checks -// (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). +// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). // External HTTP(S), TCP proxy, and SSL proxy load balancers as well as // managed instance group auto-healing must use global health checks -// (`compute.v1.HealthChecks`). Backend service-based network load +// (`compute.v1.healthChecks`). Backend service-based network load // balancers must use regional health checks // (`compute.v1.regionHealthChecks`). Target pool-based network load // balancers must use legacy HTTP health checks @@ -14326,6 +14848,9 @@ type HealthCheckListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -14732,6 +15257,9 @@ type HealthCheckServicesListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -14923,6 +15451,9 @@ type HealthChecksAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -15090,6 +15621,9 @@ type HealthChecksScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -15209,7 +15743,7 @@ type HealthStatus struct { // instance. ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` - // HealthState: Health state of the instance. + // HealthState: Health state of the IPv4 address of the instance. // // Possible values: // "HEALTHY" @@ -15294,10 +15828,10 @@ type HealthStatusForNetworkEndpoint struct { // the health checks configured. // // Possible values: - // "DRAINING" - // "HEALTHY" - // "UNHEALTHY" - // "UNKNOWN" + // "DRAINING" - Endpoint is being drained. + // "HEALTHY" - Endpoint is healthy. + // "UNHEALTHY" - Endpoint is unhealthy. + // "UNKNOWN" - Health status of the endpoint is unknown. HealthState string `json:"healthState,omitempty"` // ForceSendFields is a list of field names (e.g. "BackendService") to @@ -15919,6 +16453,9 @@ type HttpHealthCheckListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -16450,6 +16987,15 @@ type HttpRouteRuleMatch struct { // validateForProxyless field set to true. MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + // PathTemplateMatch: If specified, the route is a pattern match + // expression that must match the :path header once the query string is + // removed. A pattern match allows you to match - The value must be + // between 1 and 1024 characters - The pattern must start with a leading + // slash ("/") - There may be no more than 5 operators in pattern + // Precisely one of prefix_match, full_path_match, regex_match or + // path_template_match must be set. + PathTemplateMatch string `json:"pathTemplateMatch,omitempty"` + // PrefixMatch: For satisfying the matchRule condition, the request's // path must begin with the specified prefixMatch. prefixMatch must // begin with a /. The value must be from 1 to 1024 characters. Only one @@ -16670,6 +17216,9 @@ type HttpsHealthCheckListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -17156,6 +17705,9 @@ type ImageListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -17363,6 +17915,15 @@ type Instance struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` + // InstanceEncryptionKey: Encrypts suspended data for an instance with a + // customer-managed encryption key. If you are creating a new instance, + // this field will encrypt the local SSD and in-memory contents of the + // instance during the suspend operation. If you do not provide an + // encryption key when creating the instance, then the local SSD and + // in-memory contents will be encrypted using an automatically generated + // key during the suspend operation. + InstanceEncryptionKey *CustomerEncryptionKey `json:"instanceEncryptionKey,omitempty"` + // KeyRevocationActionType: KeyRevocationActionType of the instance. // Supported options are "STOP" and "NONE". The default value is "NONE" // if it is not specified. @@ -17520,9 +18081,9 @@ type Instance struct { // cycle. // // Possible values: - // "DEPROVISIONING" - The Nanny is halted and we are performing tear - // down tasks like network deprogramming, releasing quota, IP, tearing - // down disks etc. + // "DEPROVISIONING" - The instance is halted and we are performing + // tear down tasks like network deprogramming, releasing quota, IP, + // tearing down disks etc. // "PROVISIONING" - Resources are being allocated for the instance. // "REPAIRING" - The instance is in repair. // "RUNNING" - The instance is running. @@ -17668,6 +18229,9 @@ type InstanceAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -18031,6 +18595,9 @@ type InstanceGroupAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -18221,6 +18788,9 @@ type InstanceGroupListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -18664,6 +19234,9 @@ type InstanceGroupManagerAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -18775,13 +19348,14 @@ type InstanceGroupManagerAutoHealingPolicy struct { // HealthCheck: The URL for the health check that signals autohealing. HealthCheck string `json:"healthCheck,omitempty"` - // InitialDelaySec: The number of seconds that the managed instance - // group waits before it applies autohealing policies to new instances - // or recently recreated instances. This initial delay allows instances - // to initialize and run their startup scripts before the instance group - // determines that they are UNHEALTHY. This prevents the managed - // instance group from recreating its instances prematurely. This value - // must be from range [0, 3600]. + // InitialDelaySec: The initial delay is the number of seconds that a + // new VM takes to initialize and run its startup script. During a VM's + // initial delay period, the MIG ignores unsuccessful health checks + // because the VM might be in the startup process. This prevents the MIG + // from prematurely recreating a VM. If the health check receives a + // healthy response during the initial delay, it indicates that the + // startup process is complete and the VM is ready. The value of initial + // delay must be between 0 and 3600 seconds. The default value is 0. InitialDelaySec int64 `json:"initialDelaySec,omitempty"` // ForceSendFields is a list of field names (e.g. "HealthCheck") to @@ -18892,6 +19466,9 @@ type InstanceGroupManagerListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -19195,29 +19772,27 @@ type InstanceGroupManagerUpdatePolicy struct { // // Possible values: // "NONE" - Do not perform any action. - // "REFRESH" - Updates applied in runtime, instances will not be - // disrupted. - // "REPLACE" - Old instances will be deleted. New instances will be - // created from the target template. - // "RESTART" - Every instance will be restarted. + // "REFRESH" - Do not stop the instance. + // "REPLACE" - (Default.) Replace the instance according to the + // replacement method option. + // "RESTART" - Stop the instance and start it again. MinimalAction string `json:"minimalAction,omitempty"` // MostDisruptiveAllowedAction: Most disruptive action that is allowed // to be taken on an instance. You can specify either NONE to forbid any - // actions, REFRESH to allow actions that do not need instance restart, - // RESTART to allow actions that can be applied without instance - // replacing or REPLACE to allow all possible actions. If the Updater - // determines that the minimal update action needed is more disruptive - // than most disruptive allowed action you specify it will not perform - // the update at all. + // actions, REFRESH to avoid restarting the VM and to limit disruption + // as much as possible. RESTART to allow actions that can be applied + // without instance replacing or REPLACE to allow all possible actions. + // If the Updater determines that the minimal update action needed is + // more disruptive than most disruptive allowed action you specify it + // will not perform the update at all. // // Possible values: // "NONE" - Do not perform any action. - // "REFRESH" - Updates applied in runtime, instances will not be - // disrupted. - // "REPLACE" - Old instances will be deleted. New instances will be - // created from the target template. - // "RESTART" - Every instance will be restarted. + // "REFRESH" - Do not stop the instance. + // "REPLACE" - (Default.) Replace the instance according to the + // replacement method option. + // "RESTART" - Stop the instance and start it again. MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` // ReplacementMethod: What action should be used to replace instances. @@ -19366,35 +19941,35 @@ type InstanceGroupManagersApplyUpdatesRequest struct { // MinimalAction: The minimal action that you want to perform on each // instance during the update: - REPLACE: At minimum, delete the // instance and create it again. - RESTART: Stop the instance and start - // it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt - // the instance at all. By default, the minimum action is NONE. If your - // update requires a more disruptive action than you set with this flag, - // the necessary action is performed to execute the update. + // it again. - REFRESH: Do not stop the instance and limit disruption as + // much as possible. - NONE: Do not disrupt the instance at all. By + // default, the minimum action is NONE. If your update requires a more + // disruptive action than you set with this flag, the necessary action + // is performed to execute the update. // // Possible values: // "NONE" - Do not perform any action. - // "REFRESH" - Updates applied in runtime, instances will not be - // disrupted. - // "REPLACE" - Old instances will be deleted. New instances will be - // created from the target template. - // "RESTART" - Every instance will be restarted. + // "REFRESH" - Do not stop the instance. + // "REPLACE" - (Default.) Replace the instance according to the + // replacement method option. + // "RESTART" - Stop the instance and start it again. MinimalAction string `json:"minimalAction,omitempty"` // MostDisruptiveAllowedAction: The most disruptive action that you want // to perform on each instance during the update: - REPLACE: Delete the // instance and create it again. - RESTART: Stop the instance and start - // it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt - // the instance at all. By default, the most disruptive allowed action - // is REPLACE. If your update requires a more disruptive action than you - // set with this flag, the update request will fail. + // it again. - REFRESH: Do not stop the instance and limit disruption as + // much as possible. - NONE: Do not disrupt the instance at all. By + // default, the most disruptive allowed action is REPLACE. If your + // update requires a more disruptive action than you set with this flag, + // the update request will fail. // // Possible values: // "NONE" - Do not perform any action. - // "REFRESH" - Updates applied in runtime, instances will not be - // disrupted. - // "REPLACE" - Old instances will be deleted. New instances will be - // created from the target template. - // "RESTART" - Every instance will be restarted. + // "REFRESH" - Do not stop the instance. + // "REPLACE" - (Default.) Replace the instance according to the + // replacement method option. + // "RESTART" - Stop the instance and start it again. MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` // ForceSendFields is a list of field names (e.g. "AllInstances") to @@ -19452,7 +20027,9 @@ func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, err type InstanceGroupManagersDeleteInstancesRequest struct { // Instances: The URLs of one or more instances to delete. This can be a // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. + // zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have + // URL and can be deleted only by name. One cannot specify both URLs and + // names in a single request. Instances []string `json:"instances,omitempty"` // SkipInstancesOnValidationError: Specifies whether the request should @@ -19670,6 +20247,9 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -19901,6 +20481,9 @@ type InstanceGroupManagersScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -20221,6 +20804,9 @@ type InstanceGroupsListInstancesWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -20454,6 +21040,9 @@ type InstanceGroupsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -20680,6 +21269,9 @@ type InstanceListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -20870,6 +21462,9 @@ type InstanceListReferrersWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -21532,6 +22127,9 @@ type InstanceTemplateAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -21722,6 +22320,9 @@ type InstanceTemplateListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -21892,6 +22493,9 @@ type InstanceTemplatesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -22010,9 +22614,9 @@ type InstanceWithNamedPorts struct { // Status: [Output Only] The status of the instance. // // Possible values: - // "DEPROVISIONING" - The Nanny is halted and we are performing tear - // down tasks like network deprogramming, releasing quota, IP, tearing - // down disks etc. + // "DEPROVISIONING" - The instance is halted and we are performing + // tear down tasks like network deprogramming, releasing quota, IP, + // tearing down disks etc. // "PROVISIONING" - Resources are being allocated for the instance. // "REPAIRING" - The instance is in repair. // "RUNNING" - The instance is running. @@ -22248,6 +22852,9 @@ type InstancesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -22604,9 +23211,9 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { } // Interconnect: Represents an Interconnect resource. An Interconnect -// resource is a dedicated connection between the GCP network and your -// on-premises network. For more information, read the Dedicated -// Interconnect Overview. +// resource is a dedicated connection between the Google Cloud network +// and your on-premises network. For more information, read the +// Dedicated Interconnect Overview. type Interconnect struct { // AdminEnabled: Administrative status of the interconnect. When this is // set to true, the Interconnect is functional and can carry traffic. @@ -22671,6 +23278,21 @@ type Interconnect struct { // for interconnects. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // Interconnect, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve an Interconnect. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // LinkType: Type of link requested, which can take one of the following // values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - // LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that @@ -22731,6 +23353,11 @@ type Interconnect struct { // provisioned in this interconnect. ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` + // RemoteLocation: Indicates that this is a Cross-Cloud Interconnect. + // This field specifies the location outside of Google's network that + // the interconnect is connected to. + RemoteLocation string `json:"remoteLocation,omitempty"` + // RequestedLinkCount: Target number of physical links in the link // bundle, as requested by the customer. RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` @@ -22846,6 +23473,11 @@ type InterconnectAttachment struct { // CloudRouterIpv6InterfaceId: This field is not available. CloudRouterIpv6InterfaceId string `json:"cloudRouterIpv6InterfaceId,omitempty"` + // ConfigurationConstraints: [Output Only] Constraints for this + // attachment, if any. The attachment does not work if these constraints + // are not met. + ConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"configurationConstraints,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -22938,14 +23570,28 @@ type InterconnectAttachment struct { // attachment. If this field is not specified when creating the VLAN // attachment, then later on when creating an HA VPN gateway on this // VLAN attachment, the HA VPN gateway's IP address is allocated from - // the regional external IP address pool. Not currently available - // publicly. + // the regional external IP address pool. IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` // Kind: [Output Only] Type of the resource. Always // compute#interconnectAttachment for interconnect attachments. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // InterconnectAttachment, which is essentially a hash of the labels set + // used for optimistic locking. The fingerprint is initially generated + // by Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. To see the latest fingerprint, make a + // get() request to retrieve an InterconnectAttachment. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Mtu: Maximum Transmission Unit (MTU), in bytes, of packets passing // through this interconnect attachment. Only 1440 and 1500 are allowed. // If not specified, the value will default to 1440. @@ -23001,6 +23647,14 @@ type InterconnectAttachment struct { // body. Region string `json:"region,omitempty"` + // RemoteService: [Output Only] If the attachment is on a Cross-Cloud + // Interconnect connection, this field contains the interconnect's + // remote location service provider. Example values: "Amazon Web + // Services" "Microsoft Azure". The field is set only for attachments on + // Cross-Cloud Interconnect connections. Its value is copied from the + // InterconnectRemoteLocation remoteService field. + RemoteService string `json:"remoteService,omitempty"` + // Router: URL of the Cloud Router to be used for dynamic routing. This // router must be in the same region as this InterconnectAttachment. The // InterconnectAttachment will automatically connect the Interconnect to @@ -23064,6 +23718,16 @@ type InterconnectAttachment struct { // yet, because turnup is not complete. State string `json:"state,omitempty"` + // SubnetLength: Length of the IPv4 subnet mask. Allowed values: - 29 + // (default) - 30 The default value is 29, except for Cross-Cloud + // Interconnect connections that use an InterconnectRemoteLocation with + // a constraints.subnetLengthRange.min equal to 30. For example, + // connections that use an Azure remote location fall into this + // category. In these cases, the default value is 30, and requesting 29 + // returns an error. Where both 29 and 30 are allowed, 29 is preferred, + // because it gives Google Cloud Support more debugging visibility. + SubnetLength int64 `json:"subnetLength,omitempty"` + // Type: The type of interconnect attachment this is, which can take one // of the following values: - DEDICATED: an attachment to a Dedicated // Interconnect. - PARTNER: an attachment to a Partner Interconnect, @@ -23195,6 +23859,9 @@ type InterconnectAttachmentAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -23302,139 +23969,37 @@ func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachmentList: Response to the list request, and -// contains a list of interconnect attachments. -type InterconnectAttachmentList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InterconnectAttachment resources. - Items []*InterconnectAttachment `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#interconnectAttachmentList for lists of interconnect - // attachments. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *InterconnectAttachmentListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InterconnectAttachmentListWarning: [Output Only] Informational -// warning message. -type InterconnectAttachmentListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. +type InterconnectAttachmentConfigurationConstraints struct { + // BgpMd5: [Output Only] Whether the attachment's BGP session + // requires/allows/disallows BGP MD5 authentication. This can take one + // of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. + // For example, a Cross-Cloud Interconnect connection to a remote cloud + // provider that requires BGP MD5 authentication has the + // interconnectRemoteLocation + // attachment_configuration_constraints.bgp_md5 field set to + // MD5_REQUIRED, and that property is propagated to the attachment. + // Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 + // is requested. // // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV - // backend service is associated with a health check that is not of type - // HTTP/HTTPS/HTTP2. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's - // nextHopInstance URL refers to an instance that does not have an ipv6 - // interface on the same network as the route. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*InterconnectAttachmentListWarningData `json:"data,omitempty"` + // "MD5_OPTIONAL" - MD5_OPTIONAL: BGP MD5 authentication is supported + // and can optionally be configured. + // "MD5_REQUIRED" - MD5_REQUIRED: BGP MD5 authentication must be + // configured. + // "MD5_UNSUPPORTED" - MD5_UNSUPPORTED: BGP MD5 authentication must + // not be configured + BgpMd5 string `json:"bgpMd5,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // BgpPeerAsnRanges: [Output Only] List of ASN ranges that the remote + // location is known to support. Formatted as an array of inclusive + // ranges {min: min-value, max: max-value}. For example, [{min: 123, + // max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or + // anything in the range 64512-65534. This field is only advisory. + // Although the API accepts other ranges, these are the ranges that we + // recommend. + BgpPeerAsnRanges []*InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange `json:"bgpPeerAsnRanges,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "BgpMd5") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -23442,7 +24007,7 @@ type InterconnectAttachmentListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "BgpMd5") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -23451,27 +24016,18 @@ type InterconnectAttachmentListWarning struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentListWarning +func (s *InterconnectAttachmentConfigurationConstraints) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentConfigurationConstraints raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type InterconnectAttachmentListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange struct { + Max int64 `json:"max,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + Min int64 `json:"min,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "Max") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -23479,7 +24035,7 @@ type InterconnectAttachmentListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Max") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -23488,67 +24044,46 @@ type InterconnectAttachmentListWarningData struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentListWarningData +func (s *InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachmentPartnerMetadata: Informational metadata about -// Partner attachments from Partners to display to customers. These -// fields are propagated from PARTNER_PROVIDER attachments to their -// corresponding PARTNER attachments. -type InterconnectAttachmentPartnerMetadata struct { - // InterconnectName: Plain text name of the Interconnect this attachment - // is connected to, as displayed in the Partner's portal. For instance - // "Chicago 1". This value may be validated to match approved Partner - // values. - InterconnectName string `json:"interconnectName,omitempty"` +// InterconnectAttachmentList: Response to the list request, and +// contains a list of interconnect attachments. +type InterconnectAttachmentList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // PartnerName: Plain text name of the Partner providing this - // attachment. This value may be validated to match approved Partner - // values. - PartnerName string `json:"partnerName,omitempty"` + // Items: A list of InterconnectAttachment resources. + Items []*InterconnectAttachment `json:"items,omitempty"` - // PortalUrl: URL of the Partner's portal for this Attachment. Partners - // may customise this to be a deep link to the specific resource on the - // Partner portal. This value may be validated to match approved Partner - // values. - PortalUrl string `json:"portalUrl,omitempty"` + // Kind: [Output Only] Type of resource. Always + // compute#interconnectAttachmentList for lists of interconnect + // attachments. + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "InterconnectName") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // NullFields is a list of field names (e.g. "InterconnectName") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` -func (s *InterconnectAttachmentPartnerMetadata) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentPartnerMetadata - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentListWarning `json:"warning,omitempty"` -// InterconnectAttachmentPrivateInfo: Information for an interconnect -// attachment when this belongs to an interconnect of type DEDICATED. -type InterconnectAttachmentPrivateInfo struct { - // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for - // traffic between Google and the customer, going to and from this - // network and region. - Tag8021q int64 `json:"tag8021q,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Tag8021q") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -23556,8 +24091,8 @@ type InterconnectAttachmentPrivateInfo struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Tag8021q") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -23565,49 +24100,15 @@ type InterconnectAttachmentPrivateInfo struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentPrivateInfo - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InterconnectAttachmentsScopedList struct { - // InterconnectAttachments: A list of interconnect attachments contained - // in this scope. - InterconnectAttachments []*InterconnectAttachment `json:"interconnectAttachments,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *InterconnectAttachmentsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "InterconnectAttachments") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InterconnectAttachments") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentsScopedList +func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachmentsScopedListWarning: Informational warning which -// replaces the list of addresses when the list is empty. -type InterconnectAttachmentsScopedListWarning struct { +// InterconnectAttachmentListWarning: [Output Only] Informational +// warning message. +type InterconnectAttachmentListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -23634,6 +24135,259 @@ type InterconnectAttachmentsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*InterconnectAttachmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentPartnerMetadata: Informational metadata about +// Partner attachments from Partners to display to customers. These +// fields are propagated from PARTNER_PROVIDER attachments to their +// corresponding PARTNER attachments. +type InterconnectAttachmentPartnerMetadata struct { + // InterconnectName: Plain text name of the Interconnect this attachment + // is connected to, as displayed in the Partner's portal. For instance + // "Chicago 1". This value may be validated to match approved Partner + // values. + InterconnectName string `json:"interconnectName,omitempty"` + + // PartnerName: Plain text name of the Partner providing this + // attachment. This value may be validated to match approved Partner + // values. + PartnerName string `json:"partnerName,omitempty"` + + // PortalUrl: URL of the Partner's portal for this Attachment. Partners + // may customise this to be a deep link to the specific resource on the + // Partner portal. This value may be validated to match approved Partner + // values. + PortalUrl string `json:"portalUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InterconnectName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentPartnerMetadata) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentPartnerMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentPrivateInfo: Information for an interconnect +// attachment when this belongs to an interconnect of type DEDICATED. +type InterconnectAttachmentPrivateInfo struct { + // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for + // traffic between Google and the customer, going to and from this + // network and region. + Tag8021q int64 `json:"tag8021q,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Tag8021q") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Tag8021q") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentPrivateInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentsScopedList struct { + // InterconnectAttachments: A list of interconnect attachments contained + // in this scope. + InterconnectAttachments []*InterconnectAttachment `json:"interconnectAttachments,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *InterconnectAttachmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InterconnectAttachments") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectAttachments") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentsScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type InterconnectAttachmentsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -24125,6 +24879,9 @@ type InterconnectListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -24434,6 +25191,9 @@ type InterconnectLocationListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -24690,86 +25450,123 @@ func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectsGetDiagnosticsResponse: Response for the -// InterconnectsGetDiagnosticsRequest. -type InterconnectsGetDiagnosticsResponse struct { - Result *InterconnectDiagnostics `json:"result,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +// InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect +// Remote Location resource. You can use this resource to find remote +// location details about an Interconnect attachment (VLAN). +type InterconnectRemoteLocation struct { + // Address: [Output Only] The postal address of the Point of Presence, + // each line in the address is separated by a newline character. + Address string `json:"address,omitempty"` - // ForceSendFields is a list of field names (e.g. "Result") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // AttachmentConfigurationConstraints: [Output Only] Subset of fields + // from InterconnectAttachment's |configurationConstraints| field that + // apply to all attachments for this remote location. + AttachmentConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"attachmentConfigurationConstraints,omitempty"` - // NullFields is a list of field names (e.g. "Result") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // City: [Output Only] Metropolitan area designator that indicates which + // city an interconnect is located. For example: "Chicago, IL", + // "Amsterdam, Netherlands". + City string `json:"city,omitempty"` -func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectsGetDiagnosticsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Constraints: [Output Only] Constraints on the parameters for creating + // Cross-Cloud Interconnect and associated InterconnectAttachments. + Constraints *InterconnectRemoteLocationConstraints `json:"constraints,omitempty"` -// License: Represents a License resource. A License represents billing -// and aggregate usage data for public and marketplace images. *Caution* -// This resource is intended for use only by third-party partners who -// are creating Cloud Marketplace images. -type License struct { - // ChargesUseFee: [Output Only] Deprecated. This field no longer - // reflects whether a license charges a usage fee. - ChargesUseFee bool `json:"chargesUseFee,omitempty"` + // Continent: [Output Only] Continent for this location, which can take + // one of the following values: - AFRICA - ASIA_PAC - EUROPE - + // NORTH_AMERICA - SOUTH_AMERICA + // + // Possible values: + // "AFRICA" + // "ASIA_PAC" + // "EUROPE" + // "NORTH_AMERICA" + // "SOUTH_AMERICA" + Continent string `json:"continent,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Description: An optional textual description of the resource; - // provided by the client when the resource is created. + // Description: [Output Only] An optional description of the resource. Description string `json:"description,omitempty"` + // FacilityProvider: [Output Only] The name of the provider for this + // facility (e.g., EQUINIX). + FacilityProvider string `json:"facilityProvider,omitempty"` + + // FacilityProviderFacilityId: [Output Only] A provider-assigned + // Identifier for this facility (e.g., Ashburn-DC1). + FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#license for - // licenses. + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectRemoteLocation for interconnect remote locations. Kind string `json:"kind,omitempty"` - // LicenseCode: [Output Only] The unique code used to attach this - // license to images, snapshots, and disks. - LicenseCode uint64 `json:"licenseCode,omitempty,string"` + // Lacp: [Output Only] Link Aggregation Control Protocol (LACP) + // constraints, which can take one of the following values: + // LACP_SUPPORTED, LACP_UNSUPPORTED + // + // Possible values: + // "LACP_SUPPORTED" - LACP_SUPPORTED: LACP is supported, and enabled + // by default on the Cross-Cloud Interconnect. + // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and is + // not be enabled on this port. GetDiagnostics shows + // bundleAggregationType as "static". GCP does not support LAGs without + // LACP, so requestedLinkCount must be 1. + Lacp string `json:"lacp,omitempty"` + + // MaxLagSize100Gbps: [Output Only] The maximum number of 100 Gbps ports + // supported in a link aggregation group (LAG). When linkType is 100 + // Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps. + MaxLagSize100Gbps int64 `json:"maxLagSize100Gbps,omitempty"` + + // MaxLagSize10Gbps: [Output Only] The maximum number of 10 Gbps ports + // supported in a link aggregation group (LAG). When linkType is 10 + // Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps. + MaxLagSize10Gbps int64 `json:"maxLagSize10Gbps,omitempty"` - // Name: Name of the resource. The name must be 1-63 characters long and - // comply with RFC1035. + // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - ResourceRequirements *LicenseResourceRequirements `json:"resourceRequirements,omitempty"` + // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this + // facility (corresponding with a netfac type in peeringdb). + PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` + + // PermittedConnections: [Output Only] Permitted connections. + PermittedConnections []*InterconnectRemoteLocationPermittedConnections `json:"permittedConnections,omitempty"` + + // RemoteService: [Output Only] Indicates the service provider present + // at the remote location. Example values: "Amazon Web Services", + // "Microsoft Azure". + RemoteService string `json:"remoteService,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Transferable: If false, licenses will not be copied from the source - // resource when creating an image from a disk, disk from snapshot, or - // snapshot from disk. - Transferable bool `json:"transferable,omitempty"` + // Status: [Output Only] The status of this InterconnectRemoteLocation, + // which can take one of the following values: - CLOSED: The + // InterconnectRemoteLocation is closed and is unavailable for + // provisioning new Cross-Cloud Interconnects. - AVAILABLE: The + // InterconnectRemoteLocation is available for provisioning new + // Cross-Cloud Interconnects. + // + // Possible values: + // "AVAILABLE" - The InterconnectRemoteLocation is available for + // provisioning new Cross-Cloud Interconnects. + // "CLOSED" - The InterconnectRemoteLocation is closed for + // provisioning new Cross-Cloud Interconnects. + Status string `json:"status,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to + // ForceSendFields is a list of field names (e.g. "Address") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -24777,144 +25574,101 @@ type License struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ChargesUseFee") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *License) MarshalJSON() ([]byte, error) { - type NoMethod License +func (s *InterconnectRemoteLocation) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LicenseCode: Represents a License Code resource. A License Code is a -// unique identifier used to represent a license resource. *Caution* -// This resource is intended for use only by third-party partners who -// are creating Cloud Marketplace images. -type LicenseCode struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: [Output Only] Description of this License Code. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#licenseCode for - // licenses. - Kind string `json:"kind,omitempty"` - - // LicenseAlias: [Output Only] URL and description aliases of Licenses - // with the same License Code. - LicenseAlias []*LicenseCodeLicenseAlias `json:"licenseAlias,omitempty"` - - // Name: [Output Only] Name of the resource. The name is 1-20 characters - // long and must be a valid 64 bit integer. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // State: [Output Only] Current state of this License Code. +type InterconnectRemoteLocationConstraints struct { + // PortPairRemoteLocation: [Output Only] Port pair remote location + // constraints, which can take one of the following values: + // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, + // PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to + // individual ports, but the UI uses this field when ordering a pair of + // ports, to prevent users from accidentally ordering something that is + // incompatible with their cloud provider. Specifically, when ordering a + // redundant pair of Cross-Cloud Interconnect ports, and one of them + // uses a remote location with portPairMatchingRemoteLocation set to + // matching, the UI requires that both ports use the same remote + // location. // // Possible values: - // "DISABLED" - Machines are not allowed to attach boot disks with - // this License Code. Requests to create new resources with this license - // will be rejected. - // "ENABLED" - Use is allowed for anyone with USE_READ_ONLY access to - // this License Code. - // "RESTRICTED" - Use of this license is limited to a project - // whitelist. - // "STATE_UNSPECIFIED" - // "TERMINATED" - Reserved state. - State string `json:"state,omitempty"` - - // Transferable: [Output Only] If true, the license will remain attached - // when creating images or snapshots from disks. Otherwise, the license - // is not transferred. - Transferable bool `json:"transferable,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // "PORT_PAIR_MATCHING_REMOTE_LOCATION" - If + // PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider + // allocates ports in pairs, and the user should choose the same remote + // location for both ports. + // "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" - If + // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision + // a redundant pair of Cross-Cloud Interconnects using two different + // remote locations in the same city. + PortPairRemoteLocation string `json:"portPairRemoteLocation,omitempty"` + + // PortPairVlan: [Output Only] Port pair VLAN constraints, which can + // take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, + // PORT_PAIR_MATCHING_VLAN + // + // Possible values: + // "PORT_PAIR_MATCHING_VLAN" - If PORT_PAIR_MATCHING_VLAN, the + // Interconnect for this attachment is part of a pair of ports that + // should have matching VLAN allocations. This occurs with Cross-Cloud + // Interconnect to Azure remote locations. While GCP's API does not + // explicitly group pairs of ports, the UI uses this field to ensure + // matching VLAN ids when configuring a redundant VLAN pair. + // "PORT_PAIR_UNCONSTRAINED_VLAN" - PORT_PAIR_UNCONSTRAINED_VLAN means + // there is no constraint. + PortPairVlan string `json:"portPairVlan,omitempty"` + + // SubnetLengthRange: [Output Only] [min-length, max-length] The minimum + // and maximum value (inclusive) for the IPv4 subnet length. For + // example, an interconnectRemoteLocation for Azure has {min: 30, max: + // 30} because Azure requires /30 subnets. This range specifies the + // values supported by both cloud providers. Interconnect currently + // supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no + // constraint on IPv4 subnet length, the range would thus be {min: 29, + // max: 30}. + SubnetLengthRange *InterconnectRemoteLocationConstraintsSubnetLengthRange `json:"subnetLengthRange,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "PortPairRemoteLocation") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the + // NullFields is a list of field names (e.g. "PortPairRemoteLocation") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } -func (s *LicenseCode) MarshalJSON() ([]byte, error) { - type NoMethod LicenseCode - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type LicenseCodeLicenseAlias struct { - // Description: [Output Only] Description of this License Code. - Description string `json:"description,omitempty"` - - // SelfLink: [Output Only] URL of license corresponding to this License - // Code. - SelfLink string `json:"selfLink,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { - type NoMethod LicenseCodeLicenseAlias +func (s *InterconnectRemoteLocationConstraints) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationConstraints raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LicenseResourceCommitment: Commitment for a particular license -// resource. -type LicenseResourceCommitment struct { - // Amount: The number of licenses purchased. - Amount int64 `json:"amount,omitempty,string"` - - // CoresPerLicense: Specifies the core range of the instance for which - // this license applies. - CoresPerLicense string `json:"coresPerLicense,omitempty"` +type InterconnectRemoteLocationConstraintsSubnetLengthRange struct { + Max int64 `json:"max,omitempty"` - // License: Any applicable license URI. - License string `json:"license,omitempty"` + Min int64 `json:"min,omitempty"` - // ForceSendFields is a list of field names (e.g. "Amount") to + // ForceSendFields is a list of field names (e.g. "Max") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -24922,7 +25676,7 @@ type LicenseResourceCommitment struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Amount") to include in API + // NullFields is a list of field names (e.g. "Max") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -24931,66 +25685,537 @@ type LicenseResourceCommitment struct { NullFields []string `json:"-"` } -func (s *LicenseResourceCommitment) MarshalJSON() ([]byte, error) { - type NoMethod LicenseResourceCommitment - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type LicenseResourceRequirements struct { - // MinGuestCpuCount: Minimum number of guest cpus required to use the - // Instance. Enforced at Instance creation and Instance start. - MinGuestCpuCount int64 `json:"minGuestCpuCount,omitempty"` - - // MinMemoryMb: Minimum memory required to use the Instance. Enforced at - // Instance creation and Instance start. - MinMemoryMb int64 `json:"minMemoryMb,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MinGuestCpuCount") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MinGuestCpuCount") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *LicenseResourceRequirements) MarshalJSON() ([]byte, error) { - type NoMethod LicenseResourceRequirements +func (s *InterconnectRemoteLocationConstraintsSubnetLengthRange) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationConstraintsSubnetLengthRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type LicensesListResponse struct { +// InterconnectRemoteLocationList: Response to the list request, and +// contains a list of interconnect remote locations. +type InterconnectRemoteLocationList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of License resources. - Items []*License `json:"items,omitempty"` + // Items: A list of InterconnectRemoteLocation resources. + Items []*InterconnectRemoteLocation `json:"items,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. + // Kind: [Output Only] Type of resource. Always + // compute#interconnectRemoteLocationList for lists of interconnect + // remote locations. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token lets you get the next page of + // results for list requests. If the number of results is larger than + // maxResults, use the nextPageToken as a value for the query parameter + // pageToken in the next list request. Subsequent list requests will + // have their own nextPageToken to continue paging through the results. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *LicensesListResponseWarning `json:"warning,omitempty"` + Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectRemoteLocationListWarning: [Output Only] Informational +// warning message. +type InterconnectRemoteLocationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectRemoteLocationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectRemoteLocationPermittedConnections struct { + // InterconnectLocation: [Output Only] URL of an Interconnect location + // that is permitted to connect to this Interconnect remote location. + InterconnectLocation string `json:"interconnectLocation,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InterconnectLocation") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectLocation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationPermittedConnections + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectsGetDiagnosticsResponse: Response for the +// InterconnectsGetDiagnosticsRequest. +type InterconnectsGetDiagnosticsResponse struct { + Result *InterconnectDiagnostics `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectsGetDiagnosticsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// License: Represents a License resource. A License represents billing +// and aggregate usage data for public and marketplace images. *Caution* +// This resource is intended for use only by third-party partners who +// are creating Cloud Marketplace images. +type License struct { + // ChargesUseFee: [Output Only] Deprecated. This field no longer + // reflects whether a license charges a usage fee. + ChargesUseFee bool `json:"chargesUseFee,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional textual description of the resource; + // provided by the client when the resource is created. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#license for + // licenses. + Kind string `json:"kind,omitempty"` + + // LicenseCode: [Output Only] The unique code used to attach this + // license to images, snapshots, and disks. + LicenseCode uint64 `json:"licenseCode,omitempty,string"` + + // Name: Name of the resource. The name must be 1-63 characters long and + // comply with RFC1035. + Name string `json:"name,omitempty"` + + ResourceRequirements *LicenseResourceRequirements `json:"resourceRequirements,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Transferable: If false, licenses will not be copied from the source + // resource when creating an image from a disk, disk from snapshot, or + // snapshot from disk. + Transferable bool `json:"transferable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChargesUseFee") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *License) MarshalJSON() ([]byte, error) { + type NoMethod License + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LicenseCode: Represents a License Code resource. A License Code is a +// unique identifier used to represent a license resource. *Caution* +// This resource is intended for use only by third-party partners who +// are creating Cloud Marketplace images. +type LicenseCode struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] Description of this License Code. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#licenseCode for + // licenses. + Kind string `json:"kind,omitempty"` + + // LicenseAlias: [Output Only] URL and description aliases of Licenses + // with the same License Code. + LicenseAlias []*LicenseCodeLicenseAlias `json:"licenseAlias,omitempty"` + + // Name: [Output Only] Name of the resource. The name is 1-20 characters + // long and must be a valid 64 bit integer. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // State: [Output Only] Current state of this License Code. + // + // Possible values: + // "DISABLED" - Machines are not allowed to attach boot disks with + // this License Code. Requests to create new resources with this license + // will be rejected. + // "ENABLED" - Use is allowed for anyone with USE_READ_ONLY access to + // this License Code. + // "RESTRICTED" - Use of this license is limited to a project + // whitelist. + // "STATE_UNSPECIFIED" + // "TERMINATED" - Reserved state. + State string `json:"state,omitempty"` + + // Transferable: [Output Only] If true, the license will remain attached + // when creating images or snapshots from disks. Otherwise, the license + // is not transferred. + Transferable bool `json:"transferable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LicenseCode) MarshalJSON() ([]byte, error) { + type NoMethod LicenseCode + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicenseCodeLicenseAlias struct { + // Description: [Output Only] Description of this License Code. + Description string `json:"description,omitempty"` + + // SelfLink: [Output Only] URL of license corresponding to this License + // Code. + SelfLink string `json:"selfLink,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { + type NoMethod LicenseCodeLicenseAlias + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LicenseResourceCommitment: Commitment for a particular license +// resource. +type LicenseResourceCommitment struct { + // Amount: The number of licenses purchased. + Amount int64 `json:"amount,omitempty,string"` + + // CoresPerLicense: Specifies the core range of the instance for which + // this license applies. + CoresPerLicense string `json:"coresPerLicense,omitempty"` + + // License: Any applicable license URI. + License string `json:"license,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Amount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Amount") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicenseResourceCommitment) MarshalJSON() ([]byte, error) { + type NoMethod LicenseResourceCommitment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicenseResourceRequirements struct { + // MinGuestCpuCount: Minimum number of guest cpus required to use the + // Instance. Enforced at Instance creation and Instance start. + MinGuestCpuCount int64 `json:"minGuestCpuCount,omitempty"` + + // MinMemoryMb: Minimum memory required to use the Instance. Enforced at + // Instance creation and Instance start. + MinMemoryMb int64 `json:"minMemoryMb,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MinGuestCpuCount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MinGuestCpuCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LicenseResourceRequirements) MarshalJSON() ([]byte, error) { + type NoMethod LicenseResourceRequirements + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicensesListResponse struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of License resources. + Items []*License `json:"items,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *LicensesListResponseWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -25048,6 +26273,9 @@ type LicensesListResponseWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -25729,6 +26957,9 @@ type MachineImageListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -25936,7 +27167,7 @@ type MachineTypeAccelerators struct { GuestAcceleratorCount int64 `json:"guestAcceleratorCount,omitempty"` // GuestAcceleratorType: The accelerator type resource name, not a full - // URL, e.g. 'nvidia-tesla-k80'. + // URL, e.g. nvidia-tesla-t4. GuestAcceleratorType string `json:"guestAcceleratorType,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -26077,6 +27308,9 @@ type MachineTypeAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -26266,6 +27500,9 @@ type MachineTypeListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -26434,6 +27671,9 @@ type MachineTypesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -26618,9 +27858,9 @@ type ManagedInstance struct { // is empty when the instance does not exist. // // Possible values: - // "DEPROVISIONING" - The Nanny is halted and we are performing tear - // down tasks like network deprogramming, releasing quota, IP, tearing - // down disks etc. + // "DEPROVISIONING" - The instance is halted and we are performing + // tear down tasks like network deprogramming, releasing quota, IP, + // tearing down disks etc. // "PROVISIONING" - Resources are being allocated for the instance. // "REPAIRING" - The instance is in repair. // "RUNNING" - The instance is running. @@ -27130,7 +28370,7 @@ type Network struct { FirewallPolicy string `json:"firewallPolicy,omitempty"` // GatewayIPv4: [Output Only] The gateway address for default routing - // out of the network, selected by GCP. + // out of the network, selected by Google Cloud. GatewayIPv4 string `json:"gatewayIPv4,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -27242,10 +28482,9 @@ type NetworkAttachment struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: [Output Only] Fingerprint of this resource. A hash of - // the contents stored in this object. This field is used in optimistic - // locking. An up-to-date fingerprint must be provided in order to - // patch. + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. An + // up-to-date fingerprint must be provided in order to patch. Fingerprint string `json:"fingerprint,omitempty"` // Id: [Output Only] The unique identifier for the resource type. The @@ -27265,7 +28504,11 @@ type NetworkAttachment struct { Name string `json:"name,omitempty"` // Network: [Output Only] The URL of the network which the Network - // Attachment belongs to. + // Attachment belongs to. Practically it is inferred by fetching the + // network of the first subnetwork associated. Because it is required + // that all the subnetworks must be from the same network, it is assured + // that the Network Attachment belongs to the same network as all the + // subnetworks. Network string `json:"network,omitempty"` // ProducerAcceptLists: Projects that are allowed to connect to this @@ -27406,6 +28649,9 @@ type NetworkAttachmentAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -27516,7 +28762,7 @@ func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, erro // NetworkAttachmentConnectedEndpoint: [Output Only] A connection // connected to this network attachment. type NetworkAttachmentConnectedEndpoint struct { - // IpAddress: The IP address assigned to the producer instance network + // IpAddress: The IPv4 address assigned to the producer instance network // interface. This value will be a range in case of Serverless. IpAddress string `json:"ipAddress,omitempty"` @@ -27524,7 +28770,7 @@ type NetworkAttachmentConnectedEndpoint struct { // the IP was assigned. ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` - // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork + // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork. SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` // Status: The status of a connected endpoint to this network @@ -27650,6 +28896,9 @@ type NetworkAttachmentListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -27819,6 +29068,9 @@ type NetworkAttachmentsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -28095,6 +29347,9 @@ type NetworkEdgeSecurityServiceAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -28265,6 +29520,9 @@ type NetworkEdgeSecurityServicesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -28642,6 +29900,9 @@ type NetworkEndpointGroupAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -28974,6 +30235,9 @@ type NetworkEndpointGroupListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -29303,6 +30567,9 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -29474,6 +30741,9 @@ type NetworkEndpointGroupsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -29708,10 +30978,11 @@ type NetworkInterface struct { // number. It'll be empty if not specified by the users. QueueCount int64 `json:"queueCount,omitempty"` - // StackType: The stack type for this network interface to identify - // whether the IPv6 feature is enabled or not. If not specified, - // IPV4_ONLY will be used. This field can be both set at instance - // creation and update network interface operations. + // StackType: The stack type for this network interface. To assign only + // IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 + // addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This + // field can be both set at instance creation and update network + // interface operations. // // Possible values: // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 @@ -29835,6 +31106,9 @@ type NetworkListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -30506,6 +31780,9 @@ type NodeGroupAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -30737,6 +32014,9 @@ type NodeGroupListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -31095,6 +32375,9 @@ type NodeGroupsListNodesWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -31263,6 +32546,9 @@ type NodeGroupsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -31398,6 +32684,33 @@ func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type NodeGroupsSimulateMaintenanceEventRequest struct { + // Nodes: Names of the nodes to go under maintenance simulation. + Nodes []string `json:"nodes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Nodes") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Nodes") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupsSimulateMaintenanceEventRequest) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsSimulateMaintenanceEventRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NodeTemplate: Represent a sole-tenant Node Template resource. You can // use a template to define properties for nodes in a node group. For // more information, read Creating node groups and instances. @@ -31593,6 +32906,9 @@ type NodeTemplateAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -31782,6 +33098,9 @@ type NodeTemplateListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -31980,6 +33299,9 @@ type NodeTemplatesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -32251,6 +33573,9 @@ type NodeTypeAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -32440,6 +33765,9 @@ type NodeTypeListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -32608,6 +33936,9 @@ type NodeTypesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -32921,6 +34252,9 @@ type NotificationEndpointListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -33304,6 +34638,9 @@ type OperationWarnings struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -33496,6 +34833,9 @@ type OperationAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -33685,6 +35025,9 @@ type OperationListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -33853,6 +35196,9 @@ type OperationsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -34311,6 +35657,9 @@ type PacketMirroringAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -34579,6 +35928,9 @@ type PacketMirroringListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -34883,6 +36235,9 @@ type PacketMirroringsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -35729,7 +37084,7 @@ type PublicAdvertisedPrefix struct { // property when you create the resource. Description string `json:"description,omitempty"` - // DnsVerificationIp: The IPv4 address to be used for reverse DNS + // DnsVerificationIp: The address to be used for reverse DNS // verification. DnsVerificationIp string `json:"dnsVerificationIp,omitempty"` @@ -35746,8 +37101,8 @@ type PublicAdvertisedPrefix struct { // server generates this identifier. Id uint64 `json:"id,omitempty,string"` - // IpCidrRange: The IPv4 address range, in CIDR format, represented by - // this public advertised prefix. + // IpCidrRange: The address range, in CIDR format, represented by this + // public advertised prefix. IpCidrRange string `json:"ipCidrRange,omitempty"` // Kind: [Output Only] Type of the resource. Always @@ -35904,6 +37259,9 @@ type PublicAdvertisedPrefixListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -36084,7 +37442,7 @@ type PublicDelegatedPrefix struct { // server generates this identifier. Id uint64 `json:"id,omitempty,string"` - // IpCidrRange: The IPv4 address range, in CIDR format, represented by + // IpCidrRange: The IP address range, in CIDR format, represented by // this public delegated prefix. IpCidrRange string `json:"ipCidrRange,omitempty"` @@ -36252,6 +37610,9 @@ type PublicDelegatedPrefixAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -36441,6 +37802,9 @@ type PublicDelegatedPrefixListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -36559,7 +37923,7 @@ type PublicDelegatedPrefixPublicDelegatedSubPrefix struct { // property when you create the resource. Description string `json:"description,omitempty"` - // IpCidrRange: The IPv4 address range, in CIDR format, represented by + // IpCidrRange: The IP address range, in CIDR format, represented by // this sub public delegated prefix. IpCidrRange string `json:"ipCidrRange,omitempty"` @@ -36669,6 +38033,9 @@ type PublicDelegatedPrefixesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -36809,6 +38176,7 @@ type Quota struct { // "COMMITTED_NVIDIA_A100_80GB_GPUS" // "COMMITTED_NVIDIA_A100_GPUS" // "COMMITTED_NVIDIA_K80_GPUS" + // "COMMITTED_NVIDIA_L4_GPUS" // "COMMITTED_NVIDIA_P100_GPUS" // "COMMITTED_NVIDIA_P4_GPUS" // "COMMITTED_NVIDIA_T4_GPUS" @@ -36860,11 +38228,15 @@ type Quota struct { // "NETWORK_ATTACHMENTS" // "NETWORK_ENDPOINT_GROUPS" // "NETWORK_FIREWALL_POLICIES" + // "NET_LB_SECURITY_POLICIES_PER_REGION" + // "NET_LB_SECURITY_POLICY_RULES_PER_REGION" + // "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION" // "NODE_GROUPS" // "NODE_TEMPLATES" // "NVIDIA_A100_80GB_GPUS" // "NVIDIA_A100_GPUS" // "NVIDIA_K80_GPUS" + // "NVIDIA_L4_GPUS" // "NVIDIA_P100_GPUS" // "NVIDIA_P100_VWS_GPUS" // "NVIDIA_P4_GPUS" @@ -36879,6 +38251,7 @@ type Quota struct { // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" // "PREEMPTIBLE_NVIDIA_A100_GPUS" // "PREEMPTIBLE_NVIDIA_K80_GPUS" + // "PREEMPTIBLE_NVIDIA_L4_GPUS" // "PREEMPTIBLE_NVIDIA_P100_GPUS" // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" // "PREEMPTIBLE_NVIDIA_P4_GPUS" @@ -36902,6 +38275,7 @@ type Quota struct { // "ROUTES" // "SECURITY_POLICIES" // "SECURITY_POLICIES_PER_REGION" + // "SECURITY_POLICY_ADVANCED_RULES_PER_REGION" // "SECURITY_POLICY_CEVAL_RULES" // "SECURITY_POLICY_RULES" // "SECURITY_POLICY_RULES_PER_REGION" @@ -37141,6 +38515,44 @@ func (s *Region) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type RegionAddressesMoveRequest struct { + // Description: An optional destination address description if intended + // to be different from the source. + Description string `json:"description,omitempty"` + + // DestinationAddress: The URL of the destination address to move to. + // This can be a full or partial URL. For example, the following are all + // valid URLs to a address: - + // https://www.googleapis.com/compute/v1/projects/project/regions/region + // /addresses/address - + // projects/project/regions/region/addresses/address Note that + // destination project must be different from the source project. So + // /regions/region/addresses/address is not valid partial url. + DestinationAddress string `json:"destinationAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionAddressesMoveRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionAddressesMoveRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RegionAutoscalerList: Contains a list of autoscalers. type RegionAutoscalerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -37223,6 +38635,9 @@ type RegionAutoscalerListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -37412,6 +38827,9 @@ type RegionDiskTypeListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -37603,238 +39021,52 @@ func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupList: Contains a list of InstanceGroup resources. -type RegionInstanceGroupList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceGroup resources. - Items []*InstanceGroup `json:"items,omitempty"` - - // Kind: The resource type. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupListWarning: [Output Only] Informational warning -// message. -type RegionInstanceGroupListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV - // backend service is associated with a health check that is not of type - // HTTP/HTTPS/HTTP2. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's - // nextHopInstance URL refers to an instance that does not have an ipv6 - // interface on the same network as the route. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Code") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupListWarning - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupManagerDeleteInstanceConfigReq: -// RegionInstanceGroupManagers.deletePerInstanceConfigs -type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { - // Names: The list of instance names for which we want to delete - // per-instance configs on this managed instance group. - Names []string `json:"names,omitempty"` +type RegionDisksStartAsyncReplicationRequest struct { + // AsyncSecondaryDisk: The secondary disk to start asynchronous + // replication to. You can provide this as a partial or full URL to the + // resource. For example, the following are valid values: - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone + // /disks/disk - + // https://www.googleapis.com/compute/v1/projects/project/regions/region + // /disks/disk - projects/project/zones/zone/disks/disk - + // projects/project/regions/region/disks/disk - zones/zone/disks/disk - + // regions/region/disks/disk + AsyncSecondaryDisk string `json:"asyncSecondaryDisk,omitempty"` - // ForceSendFields is a list of field names (e.g. "Names") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AsyncSecondaryDisk") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Names") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AsyncSecondaryDisk") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { - type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq +func (s *RegionDisksStartAsyncReplicationRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionDisksStartAsyncReplicationRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerList: Contains a list of managed instance -// groups. -type RegionInstanceGroupManagerList struct { +// RegionInstanceGroupList: Contains a list of InstanceGroup resources. +type RegionInstanceGroupList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of InstanceGroupManager resources. - Items []*InstanceGroupManager `json:"items,omitempty"` + // Items: A list of InstanceGroup resources. + Items []*InstanceGroup `json:"items,omitempty"` - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManagerList for a list of managed instance - // groups that exist in th regional scope. + // Kind: The resource type. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -37849,7 +39081,232 @@ type RegionInstanceGroupManagerList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` + Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupListWarning: [Output Only] Informational warning +// message. +type RegionInstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupManagerDeleteInstanceConfigReq: +// RegionInstanceGroupManagers.deletePerInstanceConfigs +type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { + // Names: The list of instance names for which we want to delete + // per-instance configs on this managed instance group. + Names []string `json:"names,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Names") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Names") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { + type NoMethod RegionInstanceGroupManagerDeleteInstanceConfigReq + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RegionInstanceGroupManagerList: Contains a list of managed instance +// groups. +type RegionInstanceGroupManagerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InstanceGroupManager resources. + Items []*InstanceGroupManager `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerList for a list of managed instance + // groups that exist in th regional scope. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -37907,6 +39364,9 @@ type RegionInstanceGroupManagerListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -38121,35 +39581,35 @@ type RegionInstanceGroupManagersApplyUpdatesRequest struct { // MinimalAction: The minimal action that you want to perform on each // instance during the update: - REPLACE: At minimum, delete the // instance and create it again. - RESTART: Stop the instance and start - // it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt - // the instance at all. By default, the minimum action is NONE. If your - // update requires a more disruptive action than you set with this flag, - // the necessary action is performed to execute the update. + // it again. - REFRESH: Do not stop the instance and limit disruption as + // much as possible. - NONE: Do not disrupt the instance at all. By + // default, the minimum action is NONE. If your update requires a more + // disruptive action than you set with this flag, the necessary action + // is performed to execute the update. // // Possible values: // "NONE" - Do not perform any action. - // "REFRESH" - Updates applied in runtime, instances will not be - // disrupted. - // "REPLACE" - Old instances will be deleted. New instances will be - // created from the target template. - // "RESTART" - Every instance will be restarted. + // "REFRESH" - Do not stop the instance. + // "REPLACE" - (Default.) Replace the instance according to the + // replacement method option. + // "RESTART" - Stop the instance and start it again. MinimalAction string `json:"minimalAction,omitempty"` // MostDisruptiveAllowedAction: The most disruptive action that you want // to perform on each instance during the update: - REPLACE: Delete the // instance and create it again. - RESTART: Stop the instance and start - // it again. - REFRESH: Do not stop the instance. - NONE: Do not disrupt - // the instance at all. By default, the most disruptive allowed action - // is REPLACE. If your update requires a more disruptive action than you - // set with this flag, the update request will fail. + // it again. - REFRESH: Do not stop the instance and limit disruption as + // much as possible. - NONE: Do not disrupt the instance at all. By + // default, the most disruptive allowed action is REPLACE. If your + // update requires a more disruptive action than you set with this flag, + // the update request will fail. // // Possible values: // "NONE" - Do not perform any action. - // "REFRESH" - Updates applied in runtime, instances will not be - // disrupted. - // "REPLACE" - Old instances will be deleted. New instances will be - // created from the target template. - // "RESTART" - Every instance will be restarted. + // "REFRESH" - Do not stop the instance. + // "REPLACE" - (Default.) Replace the instance according to the + // replacement method option. + // "RESTART" - Stop the instance and start it again. MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` // ForceSendFields is a list of field names (e.g. "AllInstances") to @@ -38354,6 +39814,9 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -38674,6 +40137,9 @@ type RegionInstanceGroupsListInstancesWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -38937,6 +40403,9 @@ type RegionListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -39535,6 +41004,9 @@ type ReservationAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -39723,6 +41195,9 @@ type ReservationListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -39919,6 +41394,9 @@ type ReservationsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -40163,6 +41641,9 @@ type ResourcePoliciesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -40281,6 +41762,10 @@ type ResourcePolicy struct { Description string `json:"description,omitempty"` + // DiskConsistencyGroupPolicy: Resource policy for disk consistency + // groups. + DiskConsistencyGroupPolicy *ResourcePolicyDiskConsistencyGroupPolicy `json:"diskConsistencyGroupPolicy,omitempty"` + // GroupPlacementPolicy: Resource policy for instances for placement // configuration. GroupPlacementPolicy *ResourcePolicyGroupPlacementPolicy `json:"groupPlacementPolicy,omitempty"` @@ -40446,6 +41931,9 @@ type ResourcePolicyAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -40592,6 +42080,11 @@ func (s *ResourcePolicyDailyCycle) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResourcePolicyDiskConsistencyGroupPolicy: Resource policy for disk +// consistency groups. +type ResourcePolicyDiskConsistencyGroupPolicy struct { +} + // ResourcePolicyGroupPlacementPolicy: A GroupPlacementPolicy specifies // resource placement configuration. It specifies the failure bucket // separation as well as network locality @@ -40838,6 +42331,9 @@ type ResourcePolicyListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -41322,6 +42818,10 @@ type Route struct { // project/global/gateways/default-internet-gateway NextHopGateway string `json:"nextHopGateway,omitempty"` + // NextHopHub: [Output Only] The full resource name of the Network + // Connectivity Center hub that will handle matching packets. + NextHopHub string `json:"nextHopHub,omitempty"` + // NextHopIlb: The URL to a forwarding rule of type // loadBalancingScheme=INTERNAL that should handle matching packets or // the IP address of the forwarding Rule. For example, the following are @@ -41451,6 +42951,9 @@ type RouteWarnings struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -41682,6 +43185,9 @@ type RouteListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -41997,6 +43503,9 @@ type RouterAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -42210,6 +43719,17 @@ type RouterBgpPeer struct { // Bfd: BFD configuration for the BGP peering. Bfd *RouterBgpPeerBfd `json:"bfd,omitempty"` + // CustomLearnedIpRanges: A list of user-defined custom learned route IP + // address ranges for a BGP session. + CustomLearnedIpRanges []*RouterBgpPeerCustomLearnedIpRange `json:"customLearnedIpRanges,omitempty"` + + // CustomLearnedRoutePriority: The user-defined custom learned route + // priority for a BGP session. This value is applied to all custom + // learned route ranges for the session. You can choose a value from `0` + // to `65335`. If you don't provide a value, Google Cloud assigns a + // priority of `100` to the ranges. + CustomLearnedRoutePriority int64 `json:"customLearnedRoutePriority,omitempty"` + // Enable: The status of the BGP peer connection. If set to FALSE, any // active session with the peer is terminated and all associated routing // information is removed. If set to TRUE, the peer connection can be @@ -42370,6 +43890,36 @@ func (s *RouterBgpPeerBfd) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type RouterBgpPeerCustomLearnedIpRange struct { + // Range: The custom learned route IP address range. Must be a valid + // CIDR-formatted prefix. If an IP address is provided without a subnet + // mask, it is interpreted as, for IPv4, a `/32` singular IP address + // range, and, for IPv6, `/128`. + Range string `json:"range,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Range") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Range") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterBgpPeerCustomLearnedIpRange) MarshalJSON() ([]byte, error) { + type NoMethod RouterBgpPeerCustomLearnedIpRange + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type RouterInterface struct { // IpRange: IP address and range of the interface. The IP range must be // in the RFC3927 link-local IP address space. The value must be a @@ -42550,6 +44100,9 @@ type RouterListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -42699,6 +44252,21 @@ func (s *RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { // that would be used for NAT. GCP would auto-allocate ephemeral IPs if // no external IPs are provided. type RouterNat struct { + // AutoNetworkTier: The network tier to use when automatically reserving + // IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, + // PREMIUM tier will be used. + // + // Possible values: + // "FIXED_STANDARD" - Public internet quality with fixed bandwidth. + // "PREMIUM" - High quality, Google-grade network tier, support for + // all networking products. + // "STANDARD" - Public internet quality, only limited support for + // other networking products. + // "STANDARD_OVERRIDES_FIXED_STANDARD" - (Output only) Temporary tier + // for FIXED_STANDARD when fixed standard tier is expired or not + // configured. + AutoNetworkTier string `json:"autoNetworkTier,omitempty"` + // DrainNatIps: A list of URLs of the IP resources to be drained. These // IPs must be valid static external IPs that have been assigned to the // NAT. These IPs should be used for updating/patching a NAT only. @@ -42782,10 +44350,9 @@ type RouterNat struct { // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list // of Subnetworks are allowed to Nat (specified in the field subnetwork // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. - // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or - // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any - // other Router.Nat section in any Router for this network in this - // region. + // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then + // there should not be any other Router.Nat section in any Router for + // this network in this region. // // Possible values: // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every @@ -42817,7 +44384,7 @@ type RouterNat struct { // to 30s if not set. UdpIdleTimeoutSec int64 `json:"udpIdleTimeoutSec,omitempty"` - // ForceSendFields is a list of field names (e.g. "DrainNatIps") to + // ForceSendFields is a list of field names (e.g. "AutoNetworkTier") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42825,12 +44392,13 @@ type RouterNat struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DrainNatIps") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoNetworkTier") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -43377,6 +44945,9 @@ type RoutersScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -44092,6 +45663,9 @@ type SecurityPoliciesAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -44292,6 +45866,9 @@ type SecurityPoliciesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -44462,6 +46039,20 @@ type SecurityPolicy struct { // compute#securityPolicyfor security policies Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // security policy, which is essentially a hash of the labels set used + // for optimistic locking. The fingerprint is initially generated by + // Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels. To see the latest fingerprint, make + // get() request to the security policy. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -44576,13 +46167,17 @@ func (s *SecurityPolicyAdaptiveProtectionConfig) MarshalJSON() ([]byte, error) { } // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: -// Configuration options for L7 DDoS detection. +// Configuration options for L7 DDoS detection. This field is only +// supported in Global Security Policies of type CLOUD_ARMOR. type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { - // Enable: If set to true, enables CAAP for L7 DDoS detection. + // Enable: If set to true, enables CAAP for L7 DDoS detection. This + // field is only supported in Global Security Policies of type + // CLOUD_ARMOR. Enable bool `json:"enable,omitempty"` // RuleVisibility: Rule visibility can be one of the following: STANDARD - // - opaque rules. (default) PREMIUM - transparent rules. + // - opaque rules. (default) PREMIUM - transparent rules. This field is + // only supported in Global Security Policies of type CLOUD_ARMOR. // // Possible values: // "PREMIUM" @@ -44792,6 +46387,9 @@ type SecurityPolicyListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -44905,7 +46503,8 @@ type SecurityPolicyRecaptchaOptionsConfig struct { // GOOGLE_RECAPTCHA under the security policy. The specified site key // needs to be created from the reCAPTCHA API. The user is responsible // for the validity of the specified site key. If not specified, a - // Google-managed site key is used. + // Google-managed site key is used. This field is only supported in + // Global Security Policies of type CLOUD_ARMOR. RedirectSiteKey string `json:"redirectSiteKey,omitempty"` // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to @@ -44973,10 +46572,11 @@ type SecurityPolicyRule struct { // rate_limit_options to be set. - redirect: redirect to a different // target. This can either be an internal reCAPTCHA redirect, or an // external URL-based redirect via a 302 response. Parameters for this - // action can be configured via redirectOptions. - throttle: limit - // client traffic to the configured threshold. Configure parameters for - // this action in rateLimitOptions. Requires rate_limit_options to be - // set for this. + // action can be configured via redirectOptions. This action is only + // supported in Global Security Policies of type CLOUD_ARMOR. - + // throttle: limit client traffic to the configured threshold. Configure + // parameters for this action in rateLimitOptions. Requires + // rate_limit_options to be set for this. Action string `json:"action,omitempty"` // Description: An optional description of this resource. Provide this @@ -44984,7 +46584,8 @@ type SecurityPolicyRule struct { Description string `json:"description,omitempty"` // HeaderAction: Optional, additional actions that are performed on - // headers. + // headers. This field is only supported in Global Security Policies of + // type CLOUD_ARMOR. HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` // Kind: [Output only] Type of the resource. Always @@ -45015,7 +46616,8 @@ type SecurityPolicyRule struct { RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` // RedirectOptions: Parameters defining the redirect action. Cannot be - // specified for any other actions. + // specified for any other actions. This field is only supported in + // Global Security Policies of type CLOUD_ARMOR. RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -45115,7 +46717,13 @@ type SecurityPolicyRuleMatcher struct { // Expr: User defined CEVAL expression. A CEVAL expression is used to // specify match criteria such as origin.ip, source.region_code and - // contents in the request header. + // contents in the request header. Expressions containing + // `evaluateThreatIntelligence` require Cloud Armor Managed Protection + // Plus tier and are not supported in Edge Policies nor in Regional + // Policies. Expressions containing + // `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor + // Managed Protection Plus tier and are only supported in Global + // Security Policies. Expr *Expr `json:"expr,omitempty"` // VersionedExpr: Preconfigured versioned expression. If this field is @@ -45354,6 +46962,13 @@ type SecurityPolicyRuleRateLimitOptions struct { // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` + // EnforceOnKeyConfigs: If specified, any combination of values of + // enforce_on_key_type/enforce_on_key_name is treated as the key on + // which ratelimit threshold/action is enforced. You can specify up to 3 + // enforce_on_key_configs. If enforce_on_key_configs is specified, + // enforce_on_key must not be specified. + EnforceOnKeyConfigs []*SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig `json:"enforceOnKeyConfigs,omitempty"` + // EnforceOnKeyName: Rate limit key name applicable only for the // following key types: HTTP_HEADER -- Name of the HTTP header whose // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP @@ -45365,12 +46980,14 @@ type SecurityPolicyRuleRateLimitOptions struct { // response code, or redirect to a different endpoint. Valid options are // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, // and 502, and `redirect`, where the redirect parameters come from - // `exceedRedirectOptions` below. + // `exceedRedirectOptions` below. The `redirect` action is only + // supported in Global Security Policies of type CLOUD_ARMOR. ExceedAction string `json:"exceedAction,omitempty"` // ExceedRedirectOptions: Parameters defining the redirect action that // is used as the exceed action. Cannot be specified if the exceed - // action is not redirect. + // action is not redirect. This field is only supported in Global + // Security Policies of type CLOUD_ARMOR. ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` // RateLimitThreshold: Threshold at which to begin ratelimiting. @@ -45400,6 +47017,71 @@ func (s *SecurityPolicyRuleRateLimitOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { + // EnforceOnKeyName: Rate limit key name applicable only for the + // following key types: HTTP_HEADER -- Name of the HTTP header whose + // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP + // cookie whose value is taken as the key value. + EnforceOnKeyName string `json:"enforceOnKeyName,omitempty"` + + // EnforceOnKeyType: Determines the key to enforce the + // rate_limit_threshold on. Possible values are: - ALL: A single rate + // limit threshold is applied to all the requests matching this rule. + // This is the default value if "enforceOnKeyConfigs" is not configured. + // - IP: The source IP address of the request is the key. Each IP has + // this limit enforced separately. - HTTP_HEADER: The value of the HTTP + // header whose name is configured under "enforceOnKeyName". The key + // value is truncated to the first 128 bytes of the header value. If no + // such header is present in the request, the key type defaults to ALL. + // - XFF_IP: The first IP address (i.e. the originating client IP + // address) specified in the list of IPs under X-Forwarded-For HTTP + // header. If no such header is present or the value is not a valid IP, + // the key defaults to the source IP address of the request i.e. key + // type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is + // configured under "enforceOnKeyName". The key value is truncated to + // the first 128 bytes of the cookie value. If no such cookie is present + // in the request, the key type defaults to ALL. - HTTP_PATH: The URL + // path of the HTTP request. The key value is truncated to the first 128 + // bytes. - SNI: Server name indication in the TLS session of the HTTPS + // request. The key value is truncated to the first 128 bytes. The key + // type defaults to ALL on a HTTP session. - REGION_CODE: The + // country/region from which the request originates. + // + // Possible values: + // "ALL" + // "HTTP_COOKIE" + // "HTTP_HEADER" + // "HTTP_PATH" + // "IP" + // "REGION_CODE" + // "SNI" + // "XFF_IP" + EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EnforceOnKeyName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnforceOnKeyName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyRuleRateLimitOptionsThreshold struct { // Count: Number of HTTP(S) requests for calculating the threshold. Count int64 `json:"count,omitempty"` @@ -45473,7 +47155,7 @@ type SecuritySettings struct { // should authenticate with this service's backends. clientTlsPolicy // only applies to a global BackendService with the loadBalancingScheme // set to INTERNAL_SELF_MANAGED. If left blank, communications are not - // encrypted. Note: This field currently has no impact. + // encrypted. ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) @@ -45488,8 +47170,7 @@ type SecuritySettings struct { // Public Key Infrastructure which provisions server identities. Only // applies to a global BackendService with loadBalancingScheme set to // INTERNAL_SELF_MANAGED. Only applies when BackendService has an - // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: - // This field currently has no impact. + // attached clientTlsPolicy with clientCertificate (mTLS mode). SubjectAltNames []string `json:"subjectAltNames,omitempty"` // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to @@ -45638,7 +47319,7 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { // attachment represents a service that a producer has exposed. It // encapsulates the load balancer which fronts the service runs and a // list of NAT IP ranges that the producers uses to represent the -// consumers connecting to the service. next tag = 20 +// consumers connecting to the service. type ServiceAttachment struct { // ConnectedEndpoints: [Output Only] An array of connections for all the // consumers connected to this service attachment. @@ -45723,6 +47404,18 @@ type ServiceAttachment struct { // the PSC service attachment. PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` + // ReconcileConnections: This flag determines whether a consumer + // accept/reject list change can reconcile the statuses of existing + // ACCEPTED or REJECTED PSC endpoints. - If false, connection policy + // update will only affect existing PENDING PSC endpoints. Existing + // ACCEPTED/REJECTED endpoints will remain untouched regardless how the + // connection policy is modified . - If true, update will affect both + // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED + // PSC endpoint will be moved to REJECTED if its project is added to the + // reject list. For newly created service attachment, this boolean + // defaults to true. + ReconcileConnections bool `json:"reconcileConnections,omitempty"` + // Region: [Output Only] URL of the region where the service attachment // resides. This field applies only to the region resource. You must // specify this field as part of the HTTP request URL. It is not @@ -45850,6 +47543,9 @@ type ServiceAttachmentAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -46121,6 +47817,9 @@ type ServiceAttachmentListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -46290,6 +47989,9 @@ type ServiceAttachmentsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -46925,6 +48627,9 @@ type SnapshotListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -47403,6 +49108,9 @@ type SslCertificateAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -47592,6 +49300,9 @@ type SslCertificateListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -47855,6 +49566,9 @@ type SslCertificatesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -48049,6 +49763,9 @@ type SslPoliciesAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -48237,6 +49954,9 @@ type SslPoliciesListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -48434,6 +50154,9 @@ type SslPoliciesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -48685,6 +50408,9 @@ type SslPolicyWarnings struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -48933,8 +50659,8 @@ type Subnetwork struct { // If this field is not explicitly set, it will not appear in get // listings. If not set the default behavior is determined by the org // policy, if there is no org policy specified, then it will default to - // disabled. This field isn't supported with the purpose field set to - // INTERNAL_HTTPS_LOAD_BALANCER. + // disabled. This field isn't supported if the subnet purpose field is + // set to REGIONAL_MANAGED_PROXY. EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` // ExternalIpv6Prefix: The external IPv6 address range that is owned by @@ -49027,12 +50753,20 @@ type Subnetwork struct { PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` // Purpose: The purpose of the resource. This field can be either - // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with - // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created - // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If - // unspecified, the purpose defaults to PRIVATE_RFC_1918. The - // enableFlowLogs field isn't supported with the purpose field set to - // INTERNAL_HTTPS_LOAD_BALANCER. + // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or + // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for + // user-created subnets or subnets that are automatically created in + // auto mode networks. A subnet with purpose set to + // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved + // for regional Envoy-based load balancers. A subnet with purpose set to + // PRIVATE_SERVICE_CONNECT is used to publish services using Private + // Service Connect. A subnet with purpose set to + // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used + // only by regional internal HTTP(S) load balancers. Note that + // REGIONAL_MANAGED_PROXY is the preferred setting for all regional + // Envoy load balancers. If unspecified, the subnet purpose defaults to + // PRIVATE. The enableFlowLogs field isn't supported if the subnet + // purpose field is set to REGIONAL_MANAGED_PROXY. // // Possible values: // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal @@ -49051,9 +50785,9 @@ type Subnetwork struct { Region string `json:"region,omitempty"` // Role: The role of subnetwork. Currently, this field is only used when - // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to - // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being - // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one + // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or + // BACKUP. An ACTIVE subnetwork is one that is currently being used for + // Envoy-based load balancers in a region. A BACKUP subnetwork is one // that is ready to be promoted to ACTIVE or is currently draining. This // field can be updated with a patch request. // @@ -49210,6 +50944,9 @@ type SubnetworkAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -49399,6 +51136,9 @@ type SubnetworkListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -49528,6 +51268,8 @@ type SubnetworkLogConfig struct { // field is not explicitly set, it will not appear in get listings. If // not set the default behavior is determined by the org policy, if // there is no org policy specified, then it will default to disabled. + // Flow logging isn't supported if the subnet purpose field is set to + // REGIONAL_MANAGED_PROXY. Enable bool `json:"enable,omitempty"` // FilterExpr: Can only be specified if VPC flow logs for this @@ -49727,6 +51469,9 @@ type SubnetworksScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -50205,6 +51950,9 @@ type TargetGrpcProxyListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -50374,6 +52122,9 @@ type TargetHttpProxiesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -50509,6 +52260,15 @@ type TargetHttpProxy struct { // to retrieve the TargetHttpProxy. Fingerprint string `json:"fingerprint,omitempty"` + // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection + // open, after completing a response, while there is no matching traffic + // (in seconds). If an HTTP keep-alive is not specified, a default value + // (610 seconds) will be used. For Global external HTTP(S) load + // balancer, the minimum allowed value is 5 seconds and the maximum + // allowed value is 1200 seconds. For Global external HTTP(S) load + // balancer (classic), this option is not available publicly. + HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -50714,6 +52474,9 @@ type TargetHttpProxyListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -50883,6 +52646,9 @@ type TargetHttpsProxiesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -50992,7 +52758,9 @@ func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) type TargetHttpsProxiesSetCertificateMapRequest struct { // CertificateMap: URL of the Certificate Map to associate with this - // TargetHttpsProxy. + // TargetHttpsProxy. Accepted format is + // //certificatemanager.googleapis.com/projects/{project + // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // ForceSendFields is a list of field names (e.g. "CertificateMap") to @@ -51109,7 +52877,9 @@ type TargetHttpsProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field // can only be set for global target proxies. If set, sslCertificates - // will be ignored. + // will be ignored. Accepted format is + // //certificatemanager.googleapis.com/projects/{project + // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -51129,6 +52899,15 @@ type TargetHttpsProxy struct { // to retrieve the TargetHttpsProxy. Fingerprint string `json:"fingerprint,omitempty"` + // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection + // open, after completing a response, while there is no matching traffic + // (in seconds). If an HTTP keep-alive is not specified, a default value + // (610 seconds) will be used. For Global external HTTP(S) load + // balancer, the minimum allowed value is 5 seconds and the maximum + // allowed value is 1200 seconds. For Global external HTTP(S) load + // balancer (classic), this option is not available publicly. + HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -51187,9 +52966,11 @@ type TargetHttpsProxy struct { // networksecurity.ServerTlsPolicy resource that describes how the proxy // should authenticate inbound traffic. serverTlsPolicy only applies to // a global TargetHttpsProxy attached to globalForwardingRules with the - // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, - // communications are not encrypted. Note: This field currently has no - // impact. + // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or + // EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are + // accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, + // EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy + // documentation. If left blank, communications are not encrypted. ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` // SslCertificates: URLs to SslCertificate resources that are used to @@ -51326,6 +53107,9 @@ type TargetHttpsProxyAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -51516,6 +53300,9 @@ type TargetHttpsProxyListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -51796,6 +53583,9 @@ type TargetInstanceAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -51985,6 +53775,9 @@ type TargetInstanceListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -52153,6 +53946,9 @@ type TargetInstancesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -52502,6 +54298,9 @@ type TargetPoolAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -52726,6 +54525,9 @@ type TargetPoolListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -53012,6 +54814,9 @@ type TargetPoolsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -53175,7 +54980,9 @@ func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) type TargetSslProxiesSetCertificateMapRequest struct { // CertificateMap: URL of the Certificate Map to associate with this - // TargetSslProxy. + // TargetSslProxy. Accepted format is + // //certificatemanager.googleapis.com/projects/{project + // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // ForceSendFields is a list of field names (e.g. "CertificateMap") to @@ -53273,7 +55080,9 @@ type TargetSslProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field // can only be set for global target proxies. If set, sslCertificates - // will be ignored. + // will be ignored. Accepted format is + // //certificatemanager.googleapis.com/projects/{project + // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -53438,6 +55247,9 @@ type TargetSslProxyListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -53606,6 +55418,9 @@ type TargetTcpProxiesScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -53947,6 +55762,9 @@ type TargetTcpProxyAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -54136,6 +55954,9 @@ type TargetTcpProxyListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -54268,6 +56089,21 @@ type TargetVpnGateway struct { // for target VPN gateways. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // TargetVpnGateway, which is essentially a hash of the labels set used + // for optimistic locking. The fingerprint is initially generated by + // Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. To see the latest fingerprint, make a + // get() request to retrieve a TargetVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -54417,6 +56253,9 @@ type TargetVpnGatewayAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -54607,6 +56446,9 @@ type TargetVpnGatewayListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -54776,6 +56618,9 @@ type TargetVpnGatewaysScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -55255,6 +57100,9 @@ type UrlMapListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -55608,6 +57456,9 @@ type UrlMapsAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -55775,6 +57626,9 @@ type UrlMapsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -55980,6 +57834,22 @@ type UrlRewrite struct { // characters. PathPrefixRewrite string `json:"pathPrefixRewrite,omitempty"` + // PathTemplateRewrite: If specified, the pattern rewrites the URL path + // (based on the :path header) using the HTTP template syntax. A + // corresponding path_template_match must be specified. Any template + // variables must exist in the path_template_match field. - -At least + // one variable must be specified in the path_template_match field - You + // can omit variables from the rewritten URL - The * and ** operators + // cannot be matched unless they have a corresponding variable name - + // e.g. {format=*} or {var=**}. For example, a path_template_match of + // /static/{format=**} could be rewritten as /static/content/{format} to + // prefix /content to the URL. Variables can also be re-ordered in a + // rewrite, so that /{country}/{format}/{suffix=**} can be rewritten as + // /content/{format}/{country}/{suffix}. At least one non-empty + // routeRules[].matchRules[].path_template_match is required. Only one + // of path_prefix_rewrite or path_template_rewrite may be specified. + PathTemplateRewrite string `json:"pathTemplateRewrite,omitempty"` + // ForceSendFields is a list of field names (e.g. "HostRewrite") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -56033,12 +57903,20 @@ type UsableSubnetwork struct { Network string `json:"network,omitempty"` // Purpose: The purpose of the resource. This field can be either - // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with - // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created - // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If - // unspecified, the purpose defaults to PRIVATE_RFC_1918. The - // enableFlowLogs field isn't supported with the purpose field set to - // INTERNAL_HTTPS_LOAD_BALANCER. + // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or + // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for + // user-created subnets or subnets that are automatically created in + // auto mode networks. A subnet with purpose set to + // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved + // for regional Envoy-based load balancers. A subnet with purpose set to + // PRIVATE_SERVICE_CONNECT is used to publish services using Private + // Service Connect. A subnet with purpose set to + // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used + // only by regional internal HTTP(S) load balancers. Note that + // REGIONAL_MANAGED_PROXY is the preferred setting for all regional + // Envoy load balancers. If unspecified, the subnet purpose defaults to + // PRIVATE. The enableFlowLogs field isn't supported if the subnet + // purpose field is set to REGIONAL_MANAGED_PROXY. // // Possible values: // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal @@ -56053,9 +57931,9 @@ type UsableSubnetwork struct { Purpose string `json:"purpose,omitempty"` // Role: The role of subnetwork. Currently, this field is only used when - // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to - // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being - // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one + // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or + // BACKUP. An ACTIVE subnetwork is one that is currently being used for + // Envoy-based load balancers in a region. A BACKUP subnetwork is one // that is ready to be promoted to ACTIVE or is currently draining. This // field can be updated with a patch request. // @@ -56228,6 +58106,9 @@ type UsableSubnetworksAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -56609,6 +58490,9 @@ type VmEndpointNatMappingsListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -56900,6 +58784,9 @@ type VpnGatewayAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -57089,6 +58976,9 @@ type VpnGatewayListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -57284,7 +59174,7 @@ type VpnGatewayStatusTunnel struct { // PeerGatewayInterface: The peer gateway interface this VPN tunnel is // connected to, the peer gateway could either be an external VPN - // gateway or GCP VPN gateway. + // gateway or a Google Cloud VPN gateway. PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` // TunnelUrl: URL reference to the VPN tunnel. @@ -57317,8 +59207,8 @@ func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN // tunnels connected from this VpnGateway to the same peer gateway. The -// peer gateway could either be a external VPN gateway or GCP VPN -// gateway. +// peer gateway could either be an external VPN gateway or a Google +// Cloud VPN gateway. type VpnGatewayStatusVpnConnection struct { // PeerExternalGateway: URL reference to the peer external VPN gateways // to which the VPN tunnels in this VPN connection are connected. This @@ -57500,6 +59390,9 @@ type VpnGatewaysScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -57635,6 +59528,21 @@ type VpnTunnel struct { // VPN tunnels. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // VpnTunnel, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve a VpnTunnel. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // LocalTrafficSelector: Local traffic selector to use when establishing // the VPN tunnel with the peer VPN gateway. The value should be a CIDR // formatted string, for example: 192.168.0.0/16. The ranges must be @@ -57873,6 +59781,9 @@ type VpnTunnelAggregatedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -58062,6 +59973,9 @@ type VpnTunnelListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -58229,6 +60143,9 @@ type VpnTunnelsScopedListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -58546,6 +60463,9 @@ type XpnHostListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -58848,6 +60768,9 @@ type ZoneListWarning struct { // HTTP/HTTPS/HTTP2. // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is // not assigned to an instance on the network. @@ -60906,6 +62829,194 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro } } +// method id "compute.addresses.move": + +type AddressesMoveCall struct { + s *Service + project string + region string + address string + regionaddressesmoverequest *RegionAddressesMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Move: Moves the specified address resource. +// +// - address: Name of the address resource to move. +// - project: Source project ID which the Address is moved from. +// - region: Name of the region for this request. +func (r *AddressesService) Move(project string, region string, address string, regionaddressesmoverequest *RegionAddressesMoveRequest) *AddressesMoveCall { + c := &AddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + c.regionaddressesmoverequest = regionaddressesmoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AddressesMoveCall) RequestId(requestId string) *AddressesMoveCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesMoveCall) Fields(s ...googleapi.Field) *AddressesMoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesMoveCall) Context(ctx context.Context) *AddressesMoveCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesMoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesMoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionaddressesmoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}/move") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.move" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves the specified address resource.", + // "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", + // "httpMethod": "POST", + // "id": "compute.addresses.move", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to move.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Source project ID which the Address is moved from.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses/{address}/move", + // "request": { + // "$ref": "RegionAddressesMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.addresses.setLabels": type AddressesSetLabelsCall struct { @@ -68120,41 +70231,27 @@ func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggrega } } -// method id "compute.disks.createSnapshot": +// method id "compute.disks.bulkInsert": -type DisksCreateSnapshotCall struct { - s *Service - project string - zone string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksBulkInsertCall struct { + s *Service + project string + zone string + bulkinsertdiskresource *BulkInsertDiskResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For regular snapshot creation, consider using snapshots.insert -// instead, as that method supports more features, such as creating -// snapshots in a project different from the source disk project. +// BulkInsert: Bulk create a set of disks. // -// - disk: Name of the persistent disk to snapshot. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *DisksService) BulkInsert(project string, zone string, bulkinsertdiskresource *BulkInsertDiskResource) *DisksBulkInsertCall { + c := &DisksBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": [Input Only] -// Whether to attempt an application consistent snapshot by informing -// the OS to prepare for the snapshot process. -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + c.bulkinsertdiskresource = bulkinsertdiskresource return c } @@ -68169,7 +70266,7 @@ func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapsh // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { +func (c *DisksBulkInsertCall) RequestId(requestId string) *DisksBulkInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68177,7 +70274,7 @@ func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapsh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { +func (c *DisksBulkInsertCall) Fields(s ...googleapi.Field) *DisksBulkInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68185,21 +70282,21 @@ func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnaps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { +func (c *DisksBulkInsertCall) Context(ctx context.Context) *DisksBulkInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { +func (c *DisksBulkInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksBulkInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -68207,14 +70304,14 @@ func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertdiskresource) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/bulkInsert") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -68224,19 +70321,209 @@ func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.createSnapshot" call. +// Do executes the "compute.disks.bulkInsert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Bulk create a set of disks.", + // "flatPath": "projects/{project}/zones/{zone}/disks/bulkInsert", + // "httpMethod": "POST", + // "id": "compute.disks.bulkInsert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/bulkInsert", + // "request": { + // "$ref": "BulkInsertDiskResource" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.createSnapshot": + +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For regular snapshot creation, consider using snapshots.insert +// instead, as that method supports more features, such as creating +// snapshots in a project different from the source disk project. +// +// - disk: Name of the persistent disk to snapshot. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": [Input Only] +// Whether to attempt an application consistent snapshot by informing +// the OS to prepare for the snapshot process. +func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksCreateSnapshotCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70081,38 +72368,54 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.disks.testIamPermissions": +// method id "compute.disks.startAsyncReplication": -type DisksTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksStartAsyncReplicationCall struct { + s *Service + project string + zone string + disk string + disksstartasyncreplicationrequest *DisksStartAsyncReplicationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. +// StartAsyncReplication: Starts asynchronous replication. Must be +// invoked on the primary disk. // +// - disk: The name of the persistent disk. // - project: Project ID for this request. -// - resource: Name or id of the resource for this request. // - zone: The name of the zone for this request. -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *DisksService) StartAsyncReplication(project string, zone string, disk string, disksstartasyncreplicationrequest *DisksStartAsyncReplicationRequest) *DisksStartAsyncReplicationCall { + c := &DisksStartAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.disk = disk + c.disksstartasyncreplicationrequest = disksstartasyncreplicationrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksStartAsyncReplicationCall) RequestId(requestId string) *DisksStartAsyncReplicationCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { +func (c *DisksStartAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStartAsyncReplicationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70120,21 +72423,21 @@ func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIam // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { +func (c *DisksStartAsyncReplicationCall) Context(ctx context.Context) *DisksStartAsyncReplicationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { +func (c *DisksStartAsyncReplicationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksStartAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -70142,14 +72445,14 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstartasyncreplicationrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -70157,21 +72460,21 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.disks.startAsyncReplication" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksStartAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70190,7 +72493,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70202,16 +72505,23 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "description": "Starts asynchronous replication. Must be invoked on the primary disk.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", + // "id": "compute.disks.startAsyncReplication", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "disk" // ], // "parameters": { + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -70219,11 +72529,9 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -70234,55 +72542,44 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "path": "projects/{project}/zones/{zone}/disks/{disk}/startAsyncReplication", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "DisksStartAsyncReplicationRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.disks.update": +// method id "compute.disks.stopAsyncReplication": -type DisksUpdateCall struct { +type DisksStopAsyncReplicationCall struct { s *Service project string zone string disk string - disk2 *Disk urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Update: Updates the specified disk with the data included in the -// request. The update is performed only on selected fields included as -// part of update-mask. Only the following fields can be modified: -// user_license. +// StopAsyncReplication: Stops asynchronous replication. Can be invoked +// either on the primary or on the secondary disk. // -// - disk: The disk name for this request. +// - disk: The name of the persistent disk. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *DisksService) Update(project string, zone string, disk string, disk2 *Disk) *DisksUpdateCall { - c := &DisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *DisksService) StopAsyncReplication(project string, zone string, disk string) *DisksStopAsyncReplicationCall { + c := &DisksStopAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.disk = disk - c.disk2 = disk2 - return c -} - -// Paths sets the optional parameter "paths": -func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { - c.urlParams_.SetMulti("paths", append([]string{}, paths...)) return c } @@ -70297,22 +72594,15 @@ func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *DisksUpdateCall) RequestId(requestId string) *DisksUpdateCall { +func (c *DisksStopAsyncReplicationCall) RequestId(requestId string) *DisksStopAsyncReplicationCall { c.urlParams_.Set("requestId", requestId) return c } -// UpdateMask sets the optional parameter "updateMask": update_mask -// indicates fields to be updated as part of this request. -func (c *DisksUpdateCall) UpdateMask(updateMask string) *DisksUpdateCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { +func (c *DisksStopAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStopAsyncReplicationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70320,21 +72610,21 @@ func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksUpdateCall) Context(ctx context.Context) *DisksUpdateCall { +func (c *DisksStopAsyncReplicationCall) Context(ctx context.Context) *DisksStopAsyncReplicationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksUpdateCall) Header() http.Header { +func (c *DisksStopAsyncReplicationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksStopAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -70342,16 +72632,11 @@ func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -70364,14 +72649,563 @@ func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.update" call. +// Do executes the "compute.disks.stopAsyncReplication" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksStopAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", + // "httpMethod": "POST", + // "id": "compute.disks.stopAsyncReplication", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}/stopAsyncReplication", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.stopGroupAsyncReplication": + +type DisksStopGroupAsyncReplicationCall struct { + s *Service + project string + zone string + disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// StopGroupAsyncReplication: Stops asynchronous replication for a +// consistency group of disks. Can be invoked either in the primary or +// secondary scope. +// +// - project: Project ID for this request. +// - zone: The name of the zone for this request. This must be the zone +// of the primary or secondary disks in the consistency group. +func (r *DisksService) StopGroupAsyncReplication(project string, zone string, disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource) *DisksStopGroupAsyncReplicationCall { + c := &DisksStopGroupAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disksstopgroupasyncreplicationresource = disksstopgroupasyncreplicationresource + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksStopGroupAsyncReplicationCall) RequestId(requestId string) *DisksStopGroupAsyncReplicationCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksStopGroupAsyncReplicationCall) Fields(s ...googleapi.Field) *DisksStopGroupAsyncReplicationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksStopGroupAsyncReplicationCall) Context(ctx context.Context) *DisksStopGroupAsyncReplicationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksStopGroupAsyncReplicationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksStopGroupAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstopgroupasyncreplicationresource) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.stopGroupAsyncReplication" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksStopGroupAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", + // "flatPath": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", + // "httpMethod": "POST", + // "id": "compute.disks.stopGroupAsyncReplication", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request. This must be the zone of the primary or secondary disks in the consistency group.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/stopGroupAsyncReplication", + // "request": { + // "$ref": "DisksStopGroupAsyncReplicationResource" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.testIamPermissions": + +type DisksTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { + c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.disks.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.update": + +type DisksUpdateCall struct { + s *Service + project string + zone string + disk string + disk2 *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified disk with the data included in the +// request. The update is performed only on selected fields included as +// part of update-mask. Only the following fields can be modified: +// user_license. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) Update(project string, zone string, disk string, disk2 *Disk) *DisksUpdateCall { + c := &DisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disk2 = disk2 + return c +} + +// Paths sets the optional parameter "paths": +func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksUpdateCall) RequestId(requestId string) *DisksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// UpdateMask sets the optional parameter "updateMask": update_mask +// indicates fields to be updated as part of this request. +func (c *DisksUpdateCall) UpdateMask(updateMask string) *DisksUpdateCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksUpdateCall) Context(ctx context.Context) *DisksUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78150,6 +80984,183 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList } } +// method id "compute.globalAddresses.move": + +type GlobalAddressesMoveCall struct { + s *Service + project string + address string + globaladdressesmoverequest *GlobalAddressesMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Move: Moves the specified address resource from one project to +// another project. +// +// - address: Name of the address resource to move. +// - project: Source project ID which the Address is moved from. +func (r *GlobalAddressesService) Move(project string, address string, globaladdressesmoverequest *GlobalAddressesMoveRequest) *GlobalAddressesMoveCall { + c := &GlobalAddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address + c.globaladdressesmoverequest = globaladdressesmoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *GlobalAddressesMoveCall) RequestId(requestId string) *GlobalAddressesMoveCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesMoveCall) Fields(s ...googleapi.Field) *GlobalAddressesMoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesMoveCall) Context(ctx context.Context) *GlobalAddressesMoveCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalAddressesMoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalAddressesMoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globaladdressesmoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}/move") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalAddresses.move" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves the specified address resource from one project to another project.", + // "flatPath": "projects/{project}/global/addresses/{address}/move", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.move", + // "parameterOrder": [ + // "project", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to move.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Source project ID which the Address is moved from.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/addresses/{address}/move", + // "request": { + // "$ref": "GlobalAddressesMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.globalAddresses.setLabels": type GlobalAddressesSetLabelsCall struct { @@ -104089,164 +107100,6 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.simulateMaintenanceEvent" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", - // "httpMethod": "POST", - // "id": "compute.instances.simulateMaintenanceEvent", - // "parameterOrder": [ - // "project", - // "zone", - // "instance" - // ], - // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.instances.start": - -type InstancesStartCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Start: Starts an instance that was stopped using the instances().stop -// method. For more information, see Restart an instance. -// -// - instance: Name of the instance resource to start. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} - // RequestId sets the optional parameter "requestId": An optional // request ID to identify requests. Specify a unique request ID so that // if you must retry your request, the server will know to ignore the @@ -104258,7 +107111,7 @@ func (r *InstancesService) Start(project string, zone string, instance string) * // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { +func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { c.urlParams_.Set("requestId", requestId) return c } @@ -104266,7 +107119,7 @@ func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -104274,21 +107127,21 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -104298,7 +107151,7 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -104313,14 +107166,193 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. +// Do executes the "compute.instances.simulateMaintenanceEvent" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "httpMethod": "POST", + // "id": "compute.instances.simulateMaintenanceEvent", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.start": + +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Start: Starts an instance that was stopped using the instances().stop +// method. For more information, see Restart an instance. +// +// - instance: Name of the instance resource to start. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesStartCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/start") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.start" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -108151,6 +111183,449 @@ func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*Inter } } +// method id "compute.interconnectRemoteLocations.get": + +type InterconnectRemoteLocationsGetCall struct { + s *Service + project string + interconnectRemoteLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the details for the specified interconnect remote +// location. Gets a list of available interconnect remote locations by +// making a list() request. +// +// - interconnectRemoteLocation: Name of the interconnect remote +// location to return. +// - project: Project ID for this request. +func (r *InterconnectRemoteLocationsService) Get(project string, interconnectRemoteLocation string) *InterconnectRemoteLocationsGetCall { + c := &InterconnectRemoteLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnectRemoteLocation = interconnectRemoteLocation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectRemoteLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectRemoteLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectRemoteLocationsGetCall) Context(ctx context.Context) *InterconnectRemoteLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectRemoteLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectRemoteLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnectRemoteLocation": c.interconnectRemoteLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectRemoteLocations.get" call. +// Exactly one of *InterconnectRemoteLocation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectRemoteLocation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectRemoteLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InterconnectRemoteLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", + // "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", + // "httpMethod": "GET", + // "id": "compute.interconnectRemoteLocations.get", + // "parameterOrder": [ + // "project", + // "interconnectRemoteLocation" + // ], + // "parameters": { + // "interconnectRemoteLocation": { + // "description": "Name of the interconnect remote location to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", + // "response": { + // "$ref": "InterconnectRemoteLocation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnectRemoteLocations.list": + +type InterconnectRemoteLocationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect remote locations available +// to the specified project. +// +// - project: Project ID for this request. +func (r *InterconnectRemoteLocationsService) List(project string) *InterconnectRemoteLocationsListCall { + c := &InterconnectRemoteLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InterconnectRemoteLocationsListCall) Filter(filter string) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InterconnectRemoteLocationsListCall) MaxResults(maxResults int64) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InterconnectRemoteLocationsListCall) OrderBy(orderBy string) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InterconnectRemoteLocationsListCall) PageToken(pageToken string) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InterconnectRemoteLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectRemoteLocationsListCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectRemoteLocationsListCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectRemoteLocationsListCall) Context(ctx context.Context) *InterconnectRemoteLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectRemoteLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectRemoteLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectRemoteLocations.list" call. +// Exactly one of *InterconnectRemoteLocationList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectRemoteLocationList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectRemoteLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InterconnectRemoteLocationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect remote locations available to the specified project.", + // "flatPath": "projects/{project}/global/interconnectRemoteLocations", + // "httpMethod": "GET", + // "id": "compute.interconnectRemoteLocations.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/global/interconnectRemoteLocations", + // "response": { + // "$ref": "InterconnectRemoteLocationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectRemoteLocationsListCall) Pages(ctx context.Context, f func(*InterconnectRemoteLocationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.interconnects.delete": type InterconnectsDeleteCall struct { @@ -125309,6 +128784,196 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } +// method id "compute.nodeGroups.simulateMaintenanceEvent": + +type NodeGroupsSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupssimulatemaintenanceeventrequest *NodeGroupsSimulateMaintenanceEventRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SimulateMaintenanceEvent: Simulates maintenance event on specified +// nodes from the node group. +// +// - nodeGroup: Name of the NodeGroup resource whose nodes will go under +// maintenance simulation. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) SimulateMaintenanceEvent(project string, zone string, nodeGroup string, nodegroupssimulatemaintenanceeventrequest *NodeGroupsSimulateMaintenanceEventRequest) *NodeGroupsSimulateMaintenanceEventCall { + c := &NodeGroupsSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupssimulatemaintenanceeventrequest = nodegroupssimulatemaintenanceeventrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsSimulateMaintenanceEventCall) RequestId(requestId string) *NodeGroupsSimulateMaintenanceEventCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *NodeGroupsSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsSimulateMaintenanceEventCall) Context(ctx context.Context) *NodeGroupsSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssimulatemaintenanceeventrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.simulateMaintenanceEvent" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Simulates maintenance event on specified nodes from the node group.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.simulateMaintenanceEvent", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource whose nodes will go under maintenance simulation.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", + // "request": { + // "$ref": "NodeGroupsSimulateMaintenanceEventRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.nodeGroups.testIamPermissions": type NodeGroupsTestIamPermissionsCall struct { @@ -138682,33 +142347,27 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionDisks.createSnapshot": +// method id "compute.regionDisks.bulkInsert": -type RegionDisksCreateSnapshotCall struct { - s *Service - project string - region string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksBulkInsertCall struct { + s *Service + project string + region string + bulkinsertdiskresource *BulkInsertDiskResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For regular snapshot creation, consider using snapshots.insert -// instead, as that method supports more features, such as creating -// snapshots in a project different from the source disk project. +// BulkInsert: Bulk create a set of disks. // -// - disk: Name of the regional persistent disk to snapshot. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { - c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +func (r *RegionDisksService) BulkInsert(project string, region string, bulkinsertdiskresource *BulkInsertDiskResource) *RegionDisksBulkInsertCall { + c := &RegionDisksBulkInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.snapshot = snapshot + c.bulkinsertdiskresource = bulkinsertdiskresource return c } @@ -138723,7 +142382,7 @@ func (r *RegionDisksService) CreateSnapshot(project string, region string, disk // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { +func (c *RegionDisksBulkInsertCall) RequestId(requestId string) *RegionDisksBulkInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -138731,7 +142390,7 @@ func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { +func (c *RegionDisksBulkInsertCall) Fields(s ...googleapi.Field) *RegionDisksBulkInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -138739,21 +142398,21 @@ func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisk // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { +func (c *RegionDisksBulkInsertCall) Context(ctx context.Context) *RegionDisksBulkInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksCreateSnapshotCall) Header() http.Header { +func (c *RegionDisksBulkInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksBulkInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -138761,14 +142420,14 @@ func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkinsertdiskresource) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/bulkInsert") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -138778,19 +142437,18 @@ func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.createSnapshot" call. +// Do executes the "compute.regionDisks.bulkInsert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -138821,23 +142479,15 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", - // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", + // "description": "Bulk create a set of disks.", + // "flatPath": "projects/{project}/regions/{region}/disks/bulkInsert", // "httpMethod": "POST", - // "id": "compute.regionDisks.createSnapshot", + // "id": "compute.regionDisks.bulkInsert", // "parameterOrder": [ // "project", - // "region", - // "disk" + // "region" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to snapshot.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -138846,7 +142496,7 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -138858,9 +142508,9 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", + // "path": "projects/{project}/regions/{region}/disks/bulkInsert", // "request": { - // "$ref": "Snapshot" + // "$ref": "BulkInsertDiskResource" // }, // "response": { // "$ref": "Operation" @@ -138873,31 +142523,33 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.regionDisks.delete": +// method id "compute.regionDisks.createSnapshot": -type RegionDisksDeleteCall struct { +type RegionDisksCreateSnapshotCall struct { s *Service project string region string disk string + snapshot *Snapshot urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified regional persistent disk. Deleting a -// regional disk removes all the replicas of its data permanently and is -// irreversible. However, deleting a disk does not delete any snapshots -// previously made from the disk. You must separately delete snapshots. +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For regular snapshot creation, consider using snapshots.insert +// instead, as that method supports more features, such as creating +// snapshots in a project different from the source disk project. // -// - disk: Name of the regional persistent disk to delete. +// - disk: Name of the regional persistent disk to snapshot. // - project: Project ID for this request. // - region: Name of the region for this request. -func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { - c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { + c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.disk = disk + c.snapshot = snapshot return c } @@ -138912,7 +142564,7 @@ func (r *RegionDisksService) Delete(project string, region string, disk string) // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { +func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { c.urlParams_.Set("requestId", requestId) return c } @@ -138920,7 +142572,7 @@ func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { +func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -138928,21 +142580,21 @@ func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { +func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksDeleteCall) Header() http.Header { +func (c *RegionDisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -138950,11 +142602,16 @@ func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -138967,14 +142624,198 @@ func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.delete" call. +// Do executes the "compute.regionDisks.createSnapshot" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a snapshot of a specified persistent disk. For regular snapshot creation, consider using snapshots.insert instead, as that method supports more features, such as creating snapshots in a project different from the source disk project.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", + // "httpMethod": "POST", + // "id": "compute.regionDisks.createSnapshot", + // "parameterOrder": [ + // "project", + // "region", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the regional persistent disk to snapshot.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/disks/{disk}/createSnapshot", + // "request": { + // "$ref": "Snapshot" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionDisks.delete": + +type RegionDisksDeleteCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified regional persistent disk. Deleting a +// regional disk removes all the replicas of its data permanently and is +// irreversible. However, deleting a disk does not delete any snapshots +// previously made from the disk. You must separately delete snapshots. +// +// - disk: Name of the regional persistent disk to delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { + c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDisksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDisks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -140066,9 +143907,742 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", + // "path": "projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies", + // "request": { + // "$ref": "RegionDisksRemoveResourcePoliciesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionDisks.resize": + +type RegionDisksResizeCall struct { + s *Service + project string + region string + disk string + regiondisksresizerequest *RegionDisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Resize: Resizes the specified regional persistent disk. +// +// - disk: Name of the regional persistent disk. +// - project: The project ID for this request. +// - region: Name of the region for this request. +func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { + c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.disk = disk + c.regiondisksresizerequest = regiondisksresizerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDisksResizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/resize") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDisks.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the specified regional persistent disk.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/resize", + // "httpMethod": "POST", + // "id": "compute.regionDisks.resize", + // "parameterOrder": [ + // "project", + // "region", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the regional persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "The project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/disks/{disk}/resize", + // "request": { + // "$ref": "RegionDisksResizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionDisks.setIamPolicy": + +type RegionDisksSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionDisksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionDisksSetIamPolicyCall { + c := &RegionDisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDisksSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDisksSetIamPolicyCall) Context(ctx context.Context) *RegionDisksSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDisksSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDisks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.regionDisks.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionDisks.setLabels": + +type RegionDisksSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on the target regional disk. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { + c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDisksSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDisks.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on the target regional disk.", + // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.regionDisks.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionDisks.startAsyncReplication": + +type RegionDisksStartAsyncReplicationCall struct { + s *Service + project string + region string + disk string + regiondisksstartasyncreplicationrequest *RegionDisksStartAsyncReplicationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// StartAsyncReplication: Starts asynchronous replication. Must be +// invoked on the primary disk. +// +// - disk: The name of the persistent disk. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionDisksService) StartAsyncReplication(project string, region string, disk string, regiondisksstartasyncreplicationrequest *RegionDisksStartAsyncReplicationRequest) *RegionDisksStartAsyncReplicationCall { + c := &RegionDisksStartAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.disk = disk + c.regiondisksstartasyncreplicationrequest = regiondisksstartasyncreplicationrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionDisksStartAsyncReplicationCall) RequestId(requestId string) *RegionDisksStartAsyncReplicationCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDisksStartAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStartAsyncReplicationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDisksStartAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStartAsyncReplicationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDisksStartAsyncReplicationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDisksStartAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksstartasyncreplicationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDisks.startAsyncReplication" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksStartAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts asynchronous replication. Must be invoked on the primary disk.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", + // "httpMethod": "POST", + // "id": "compute.regionDisks.startAsyncReplication", + // "parameterOrder": [ + // "project", + // "region", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/disks/{disk}/startAsyncReplication", // "request": { - // "$ref": "RegionDisksRemoveResourcePoliciesRequest" + // "$ref": "RegionDisksStartAsyncReplicationRequest" // }, // "response": { // "$ref": "Operation" @@ -140081,30 +144655,29 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionDisks.resize": +// method id "compute.regionDisks.stopAsyncReplication": -type RegionDisksResizeCall struct { - s *Service - project string - region string - disk string - regiondisksresizerequest *RegionDisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksStopAsyncReplicationCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified regional persistent disk. +// StopAsyncReplication: Stops asynchronous replication. Can be invoked +// either on the primary or on the secondary disk. // -// - disk: Name of the regional persistent disk. -// - project: The project ID for this request. -// - region: Name of the region for this request. -func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { - c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - disk: The name of the persistent disk. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionDisksService) StopAsyncReplication(project string, region string, disk string) *RegionDisksStopAsyncReplicationCall { + c := &RegionDisksStopAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.disk = disk - c.regiondisksresizerequest = regiondisksresizerequest return c } @@ -140119,7 +144692,7 @@ func (r *RegionDisksService) Resize(project string, region string, disk string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { +func (c *RegionDisksStopAsyncReplicationCall) RequestId(requestId string) *RegionDisksStopAsyncReplicationCall { c.urlParams_.Set("requestId", requestId) return c } @@ -140127,7 +144700,7 @@ func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { +func (c *RegionDisksStopAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStopAsyncReplicationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -140135,21 +144708,21 @@ func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { +func (c *RegionDisksStopAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStopAsyncReplicationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksResizeCall) Header() http.Header { +func (c *RegionDisksStopAsyncReplicationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksStopAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -140157,14 +144730,9 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -140179,14 +144747,14 @@ func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.resize" call. +// Do executes the "compute.regionDisks.stopAsyncReplication" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksStopAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -140217,10 +144785,10 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Resizes the specified regional persistent disk.", - // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/resize", + // "description": "Stops asynchronous replication. Can be invoked either on the primary or on the secondary disk.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", // "httpMethod": "POST", - // "id": "compute.regionDisks.resize", + // "id": "compute.regionDisks.stopAsyncReplication", // "parameterOrder": [ // "project", // "region", @@ -140228,21 +144796,21 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er // ], // "parameters": { // "disk": { - // "description": "Name of the regional persistent disk.", + // "description": "The name of the persistent disk.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, // "project": { - // "description": "The project ID for this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -140254,10 +144822,7 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{disk}/resize", - // "request": { - // "$ref": "RegionDisksResizeRequest" - // }, + // "path": "projects/{project}/regions/{region}/disks/{disk}/stopAsyncReplication", // "response": { // "$ref": "Operation" // }, @@ -140269,198 +144834,30 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.setIamPolicy": - -type RegionDisksSetIamPolicyCall struct { - s *Service - project string - region string - resource string - regionsetpolicyrequest *RegionSetPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -// -// - project: Project ID for this request. -// - region: The name of the region for this request. -// - resource: Name or id of the resource for this request. -func (r *RegionDisksService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionDisksSetIamPolicyCall { - c := &RegionDisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.regionsetpolicyrequest = regionsetpolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionDisksSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionDisksSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionDisksSetIamPolicyCall) Context(ctx context.Context) *RegionDisksSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionDisksSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} +// method id "compute.regionDisks.stopGroupAsyncReplication": -func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionDisks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", - // "httpMethod": "POST", - // "id": "compute.regionDisks.setIamPolicy", - // "parameterOrder": [ - // "project", - // "region", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/disks/{resource}/setIamPolicy", - // "request": { - // "$ref": "RegionSetPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionDisks.setLabels": - -type RegionDisksSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksStopGroupAsyncReplicationCall struct { + s *Service + project string + region string + disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the target regional disk. +// StopGroupAsyncReplication: Stops asynchronous replication for a +// consistency group of disks. Can be invoked either in the primary or +// secondary scope. // -// - project: Project ID for this request. -// - region: The region for this request. -// - resource: Name or id of the resource for this request. -func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { - c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: The name of the region for this request. This must be the +// region of the primary or secondary disks in the consistency group. +func (r *RegionDisksService) StopGroupAsyncReplication(project string, region string, disksstopgroupasyncreplicationresource *DisksStopGroupAsyncReplicationResource) *RegionDisksStopGroupAsyncReplicationCall { + c := &RegionDisksStopGroupAsyncReplicationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.disksstopgroupasyncreplicationresource = disksstopgroupasyncreplicationresource return c } @@ -140475,7 +144872,7 @@ func (r *RegionDisksService) SetLabels(project string, region string, resource s // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { +func (c *RegionDisksStopGroupAsyncReplicationCall) RequestId(requestId string) *RegionDisksStopGroupAsyncReplicationCall { c.urlParams_.Set("requestId", requestId) return c } @@ -140483,7 +144880,7 @@ func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { +func (c *RegionDisksStopGroupAsyncReplicationCall) Fields(s ...googleapi.Field) *RegionDisksStopGroupAsyncReplicationCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -140491,21 +144888,21 @@ func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetL // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { +func (c *RegionDisksStopGroupAsyncReplicationCall) Context(ctx context.Context) *RegionDisksStopGroupAsyncReplicationCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksSetLabelsCall) Header() http.Header { +func (c *RegionDisksStopGroupAsyncReplicationCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksStopGroupAsyncReplicationCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -140513,14 +144910,14 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksstopgroupasyncreplicationresource) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -140528,21 +144925,20 @@ func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.setLabels" call. +// Do executes the "compute.regionDisks.stopGroupAsyncReplication" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksStopGroupAsyncReplicationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -140573,14 +144969,13 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets the labels on the target regional disk.", - // "flatPath": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + // "description": "Stops asynchronous replication for a consistency group of disks. Can be invoked either in the primary or secondary scope.", + // "flatPath": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", // "httpMethod": "POST", - // "id": "compute.regionDisks.setLabels", + // "id": "compute.regionDisks.stopGroupAsyncReplication", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "region" // ], // "parameters": { // "project": { @@ -140591,7 +144986,7 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "The region for this request.", + // "description": "The name of the region for this request. This must be the region of the primary or secondary disks in the consistency group.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -140601,18 +144996,11 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name or id of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/disks/{resource}/setLabels", + // "path": "projects/{project}/regions/{region}/disks/stopGroupAsyncReplication", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "DisksStopGroupAsyncReplicationResource" // }, // "response": { // "$ref": "Operation" @@ -167365,6 +171753,15 @@ func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetN return c } +// NatName sets the optional parameter "natName": Name of the nat +// service to filter the Nat Mapping information. If it is omitted, all +// nats for this router will be returned. Name should conform to +// RFC1035. +func (c *RoutersGetNatMappingInfoCall) NatName(natName string) *RoutersGetNatMappingInfoCall { + c.urlParams_.Set("natName", natName) + return c +} + // OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. You can also sort results in @@ -167520,6 +171917,11 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "minimum": "0", // "type": "integer" // }, + // "natName": { + // "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + // "location": "query", + // "type": "string" + // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", diff --git a/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-api.json b/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-api.json index 2569981131..45d258e3f9 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-api.json @@ -175,11 +175,13 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -197,6 +199,31 @@ "resources": { "clusters": { "methods": { + "checkAutopilotCompatibility": { + "description": "Checks the cluster compatibility with Autopilot mode, and returns a list of compatibility issues.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:checkAutopilotCompatibility", + "httpMethod": "GET", + "id": "container.projects.locations.clusters.checkAutopilotCompatibility", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:checkAutopilotCompatibility", + "response": { + "$ref": "CheckAutopilotCompatibilityResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "completeIpRotation": { "description": "Completes master IP rotation.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:completeIpRotation", @@ -263,6 +290,7 @@ ], "parameters": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -275,11 +303,13 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -303,6 +333,7 @@ ], "parameters": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -315,11 +346,13 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -372,11 +405,13 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" @@ -447,6 +482,7 @@ ] }, "setLocations": { + "deprecated": true, "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", "httpMethod": "POST", @@ -796,6 +832,7 @@ ], "parameters": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -808,16 +845,19 @@ "type": "string" }, "nodePoolId": { + "deprecated": true, "description": "Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -841,6 +881,7 @@ ], "parameters": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -853,16 +894,19 @@ "type": "string" }, "nodePoolId": { + "deprecated": true, "description": "Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -886,6 +930,7 @@ ], "parameters": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" @@ -898,11 +943,13 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" @@ -1133,16 +1180,19 @@ "type": "string" }, "operationId": { + "deprecated": true, "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "location": "query", "type": "string" @@ -1173,11 +1223,13 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", "location": "query", "type": "string" @@ -1517,6 +1569,7 @@ ] }, "locations": { + "deprecated": true, "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", "httpMethod": "POST", @@ -2487,7 +2540,7 @@ } } }, - "revision": "20230222", + "revision": "20230620", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2503,6 +2556,10 @@ "description": "The accelerator type resource name. List of supported accelerators [here](https://cloud.google.com/compute/docs/gpus)", "type": "string" }, + "gpuDriverInstallationConfig": { + "$ref": "GPUDriverInstallationConfig", + "description": "The configuration for auto installation of GPU driver." + }, "gpuPartitionSize": { "description": "Size of partitions to create on the GPU. Valid values are described in the NVIDIA [mig user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).", "type": "string" @@ -2514,6 +2571,28 @@ }, "type": "object" }, + "AdditionalPodRangesConfig": { + "description": "AdditionalPodRangesConfig is the configuration for additional pod secondary ranges supporting the ClusterUpdate message.", + "id": "AdditionalPodRangesConfig", + "properties": { + "podRangeInfo": { + "description": "Output only. [Output only] Information for additional pod range.", + "items": { + "$ref": "RangeInfo" + }, + "readOnly": true, + "type": "array" + }, + "podRangeNames": { + "description": "Name for pod secondary ipv4 range which has the actual range defined ahead.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "AddonsConfig": { "description": "Configuration for the addons that can be automatically spun up in the cluster, enabling additional functionality.", "id": "AddonsConfig", @@ -2538,6 +2617,10 @@ "$ref": "GcpFilestoreCsiDriverConfig", "description": "Configuration for the GCP Filestore CSI driver." }, + "gcsFuseCsiDriverConfig": { + "$ref": "GcsFuseCsiDriverConfig", + "description": "Configuration for the Cloud Storage Fuse CSI driver." + }, "gkeBackupAgentConfig": { "$ref": "GkeBackupAgentConfig", "description": "Configuration for the Backup for GKE agent addon." @@ -2610,6 +2693,57 @@ "enabled": { "description": "Enable Autopilot", "type": "boolean" + }, + "workloadPolicyConfig": { + "$ref": "WorkloadPolicyConfig", + "description": "Workload policy configuration for Autopilot." + } + }, + "type": "object" + }, + "AutopilotCompatibilityIssue": { + "description": "AutopilotCompatibilityIssue contains information about a specific compatibility issue with Autopilot mode.", + "id": "AutopilotCompatibilityIssue", + "properties": { + "constraintType": { + "description": "The constraint type of the issue.", + "type": "string" + }, + "description": { + "description": "The description of the issue.", + "type": "string" + }, + "documentationUrl": { + "description": "A URL to a public documnetation, which addresses resolving this issue.", + "type": "string" + }, + "incompatibilityType": { + "description": "The incompatibility type of this issue.", + "enum": [ + "UNSPECIFIED", + "INCOMPATIBILITY", + "ADDITIONAL_CONFIG_REQUIRED", + "PASSED_WITH_OPTIONAL_CONFIG" + ], + "enumDescriptions": [ + "Default value, should not be used.", + "Indicates that the issue is a known incompatibility between the cluster and Autopilot mode.", + "Indicates the issue is an incompatibility if customers take no further action to resolve.", + "Indicates the issue is not an incompatibility, but depending on the workloads business logic, there is a potential that they won't work on Autopilot." + ], + "type": "string" + }, + "lastObservation": { + "description": "The last time when this issue was observed.", + "format": "google-datetime", + "type": "string" + }, + "subjects": { + "description": "The name of the resources which are subject to this issue.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -2635,6 +2769,10 @@ "description": "The image type to use for NAP created node. Please see https://cloud.google.com/kubernetes-engine/docs/concepts/node-images for available image types.", "type": "string" }, + "insecureKubeletReadonlyPortEnabled": { + "description": "Enable or disable Kubelet read only port.", + "type": "boolean" + }, "management": { "$ref": "NodeManagement", "description": "Specifies the node management options for NAP created node-pools." @@ -2665,6 +2803,22 @@ }, "type": "object" }, + "BestEffortProvisioning": { + "description": "Best effort provisioning.", + "id": "BestEffortProvisioning", + "properties": { + "enabled": { + "description": "When this is enabled, cluster/node pool creations will ignore non-fatal errors like stockout to best provision as many nodes as possible right now and eventually bring up all target number of nodes", + "type": "boolean" + }, + "minProvisionNodes": { + "description": "Minimum number of nodes to be provisioned to be considered as succeeded, and the rest of nodes will be provisioned gradually and eventually when stockout issue has been resolved.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "BigQueryDestination": { "description": "Parameters for using BigQuery as the destination of resource usage export.", "id": "BigQueryDestination", @@ -2793,6 +2947,24 @@ }, "type": "object" }, + "CheckAutopilotCompatibilityResponse": { + "description": "CheckAutopilotCompatibilityResponse has a list of compatibility issues.", + "id": "CheckAutopilotCompatibilityResponse", + "properties": { + "issues": { + "description": "The list of issues for the given operation.", + "items": { + "$ref": "AutopilotCompatibilityIssue" + }, + "type": "array" + }, + "summary": { + "description": "The summary of the autopilot compatibility response.", + "type": "string" + } + }, + "type": "object" + }, "CidrBlock": { "description": "CidrBlock contains an optional name and one CIDR block.", "id": "CidrBlock", @@ -2916,6 +3088,10 @@ "description": "An optional description of this cluster.", "type": "string" }, + "enableK8sBetaApis": { + "$ref": "K8sBetaAPIConfig", + "description": "Beta APIs Config" + }, "enableKubernetesAlpha": { "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha API groups (e.g. v1alpha1) and features that may not be production ready in the kubernetes version of the master and nodes. The cluster has no SLA for uptime and master/node upgrades are disabled. Alpha enabled clusters are automatically deleted thirty days after creation.", "type": "boolean" @@ -2936,6 +3112,10 @@ "description": "[Output only] The time the cluster will be automatically deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, + "fleet": { + "$ref": "Fleet", + "description": "Fleet information for the cluster." + }, "id": { "description": "Output only. Unique id for the cluster.", "readOnly": true, @@ -3066,7 +3246,7 @@ }, "releaseChannel": { "$ref": "ReleaseChannel", - "description": "Release channel configuration." + "description": "Release channel configuration. If left unspecified on cluster creation and a version is specified, the cluster is enrolled in the most mature release channel where the version is available (first checking STABLE, then REGULAR, and finally RAPID). Otherwise, if no release channel configuration and no version is specified, the cluster is enrolled in the REGULAR channel with its default version." }, "resourceLabels": { "additionalProperties": { @@ -3079,6 +3259,10 @@ "$ref": "ResourceUsageExportConfig", "description": "Configuration for exporting resource usages. Resource usage export is disabled when this config is unspecified." }, + "securityPostureConfig": { + "$ref": "SecurityPostureConfig", + "description": "Enable/Disable Security Posture API features for the cluster." + }, "selfLink": { "description": "[Output only] Server-defined URL for the resource.", "type": "string" @@ -3183,10 +3367,33 @@ }, "type": "object" }, + "ClusterNetworkPerformanceConfig": { + "description": "Configuration of network bandwidth tiers", + "id": "ClusterNetworkPerformanceConfig", + "properties": { + "totalEgressBandwidthTier": { + "description": "Specifies the total network bandwidth tier for NodePools in the cluster.", + "enum": [ + "TIER_UNSPECIFIED", + "TIER_1" + ], + "enumDescriptions": [ + "Default value", + "Higher bandwidth, actual values based on VM size." + ], + "type": "string" + } + }, + "type": "object" + }, "ClusterUpdate": { "description": "ClusterUpdate describes an update to the cluster. Exactly one update can be applied to a cluster with each request, so at most one field can be provided.", "id": "ClusterUpdate", "properties": { + "additionalPodRangesConfig": { + "$ref": "AdditionalPodRangesConfig", + "description": "The additional pod ranges to be added to the cluster. These pod ranges can be used by node pools to allocate pod IPs." + }, "desiredAddonsConfig": { "$ref": "AddonsConfig", "description": "Configurations for the various addons available to run in the cluster." @@ -3195,6 +3402,10 @@ "$ref": "AuthenticatorGroupsConfig", "description": "The desired authenticator groups config for the cluster." }, + "desiredAutopilotWorkloadPolicyConfig": { + "$ref": "WorkloadPolicyConfig", + "description": "The desired workload policy configuration for the autopilot cluster." + }, "desiredBinaryAuthorization": { "$ref": "BinaryAuthorization", "description": "The desired configuration options for the Binary Authorization feature." @@ -3233,10 +3444,18 @@ "$ref": "DNSConfig", "description": "DNSConfig contains clusterDNS config for this cluster." }, + "desiredEnableFqdnNetworkPolicy": { + "description": "Enable/Disable FQDN Network Policy for the cluster.", + "type": "boolean" + }, "desiredEnablePrivateEndpoint": { "description": "Enable/Disable private endpoint for the cluster's master.", "type": "boolean" }, + "desiredFleet": { + "$ref": "Fleet", + "description": "The desired fleet configuration for the cluster." + }, "desiredGatewayApiConfig": { "$ref": "GatewayAPIConfig", "description": "The desired config of Gateway API on this cluster." @@ -3257,6 +3476,10 @@ "$ref": "IntraNodeVisibilityConfig", "description": "The desired config of Intra-node visibility." }, + "desiredK8sBetaApis": { + "$ref": "K8sBetaAPIConfig", + "description": "Desired Beta APIs to be enabled for cluster." + }, "desiredL4ilbSubsettingConfig": { "$ref": "ILBSubsettingConfig", "description": "The desired L4 Internal Load Balancer Subsetting configuration." @@ -3296,6 +3519,10 @@ "description": "The monitoring service the cluster should use to write metrics. Currently available options: * \"monitoring.googleapis.com/kubernetes\" - The Cloud Monitoring service with a Kubernetes-native resource model * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no longer available as of GKE 1.15). * `none` - No metrics will be exported from the cluster. If left as an empty string,`monitoring.googleapis.com/kubernetes` will be used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.", "type": "string" }, + "desiredNetworkPerformanceConfig": { + "$ref": "ClusterNetworkPerformanceConfig", + "description": "The desired network performance config." + }, "desiredNodePoolAutoConfigNetworkTags": { "$ref": "NetworkTags", "description": "The desired network tags that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters." @@ -3348,6 +3575,10 @@ "$ref": "ResourceUsageExportConfig", "description": "The desired configuration for exporting resource usage." }, + "desiredSecurityPostureConfig": { + "$ref": "SecurityPostureConfig", + "description": "Enable/Disable Security Posture API features for the cluster." + }, "desiredServiceExternalIpsConfig": { "$ref": "ServiceExternalIPsConfig", "description": "ServiceExternalIPsConfig specifies the config for the use of Services with ExternalIPs field." @@ -3378,9 +3609,17 @@ "$ref": "WorkloadIdentityConfig", "description": "Configuration for Workload Identity." }, + "enableK8sBetaApis": { + "$ref": "K8sBetaAPIConfig", + "description": "Kubernetes open source beta apis enabled on the cluster. Only beta apis" + }, "etag": { "description": "The current etag of the cluster. If an etag is provided and does not match the current etag of the cluster, update will be blocked and an ABORTED error will be returned.", "type": "string" + }, + "removedAdditionalPodRangesConfig": { + "$ref": "AdditionalPodRangesConfig", + "description": "The additional pod ranges that are to be removed from the cluster. The pod ranges specified here must have been specified earlier in the 'additional_pod_ranges_config' argument." } }, "type": "object" @@ -3517,12 +3756,14 @@ "enum": [ "PROVIDER_UNSPECIFIED", "PLATFORM_DEFAULT", - "CLOUD_DNS" + "CLOUD_DNS", + "KUBE_DNS" ], "enumDescriptions": [ "Default value", "Use GKE default DNS provider(kube-dns) for DNS resolution.", - "Use CloudDNS for DNS resolution." + "Use CloudDNS for DNS resolution.", + "Use KubeDNS for DNS resolution" ], "type": "string" }, @@ -3571,7 +3812,7 @@ "type": "string" }, "state": { - "description": "Denotes the state of etcd encryption.", + "description": "The desired state of etcd encryption.", "enum": [ "UNKNOWN", "ENCRYPTED", @@ -3664,6 +3905,48 @@ }, "type": "object" }, + "Fleet": { + "description": "Fleet is the fleet configuration for the cluster.", + "id": "Fleet", + "properties": { + "membership": { + "description": "[Output only] The full resource name of the registered fleet membership of the cluster, in the format `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`.", + "type": "string" + }, + "preRegistered": { + "description": "[Output only] Whether the cluster has been registered through the fleet API.", + "type": "boolean" + }, + "project": { + "description": "The Fleet host project(project ID or project number) where this cluster will be registered to. This field cannot be changed after the cluster has been registered.", + "type": "string" + } + }, + "type": "object" + }, + "GPUDriverInstallationConfig": { + "description": "GPUDriverInstallationConfig specifies the version of GPU driver to be auto installed.", + "id": "GPUDriverInstallationConfig", + "properties": { + "gpuDriverVersion": { + "description": "Mode for how the GPU driver is installed.", + "enum": [ + "GPU_DRIVER_VERSION_UNSPECIFIED", + "INSTALLATION_DISABLED", + "DEFAULT", + "LATEST" + ], + "enumDescriptions": [ + "Default value is to not install any GPU driver.", + "Disable GPU driver auto installation and needs manual installation", + "\"Default\" GPU driver in COS and Ubuntu.", + "\"Latest\" GPU driver in COS." + ], + "type": "string" + } + }, + "type": "object" + }, "GPUSharingConfig": { "description": "GPUSharingConfig represents the GPU sharing configuration for Hardware Accelerators.", "id": "GPUSharingConfig", @@ -3744,6 +4027,17 @@ }, "type": "object" }, + "GcsFuseCsiDriverConfig": { + "description": "Configuration for the Cloud Storage Fuse CSI driver.", + "id": "GcsFuseCsiDriverConfig", + "properties": { + "enabled": { + "description": "Whether the Cloud Storage Fuse CSI driver is enabled for this cluster.", + "type": "boolean" + } + }, + "type": "object" + }, "GetJSONWebKeysResponse": { "description": "GetJSONWebKeysResponse is a valid JSON Web Key Set as specififed in rfc 7517", "id": "GetJSONWebKeysResponse", @@ -3884,6 +4178,11 @@ "description": "Configuration for controlling how IPs are allocated in the cluster.", "id": "IPAllocationPolicy", "properties": { + "additionalPodRangesConfig": { + "$ref": "AdditionalPodRangesConfig", + "description": "Output only. [Output only] The additional pod ranges that are added to the cluster. These pod ranges can be used by new node pools to allocate pod IPs automatically. Once the range is removed it will not show up in IPAllocationPolicy.", + "readOnly": true + }, "clusterIpv4Cidr": { "description": "This field is deprecated, use cluster_ipv4_cidr_block.", "type": "string" @@ -3900,6 +4199,12 @@ "description": "Whether a new subnetwork will be created automatically for the cluster. This field is only applicable when `use_ip_aliases` is true.", "type": "boolean" }, + "defaultPodIpv4RangeUtilization": { + "description": "Output only. [Output only] The utilization of the cluster default IPv4 range for pod. The ratio is Usage/[Total number of IPs in the secondary range], Usage=numNodes*numZones*podIPsPerNode.", + "format": "double", + "readOnly": true, + "type": "number" + }, "ipv6AccessType": { "description": "The ipv6 access type (internal or external) when create_subnetwork is true", "enum": [ @@ -3922,6 +4227,10 @@ "description": "The IP address range of the instance IPs in this cluster. This is applicable only if `create_subnetwork` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range to use.", "type": "string" }, + "podCidrOverprovisionConfig": { + "$ref": "PodCIDROverprovisionConfig", + "description": "[PRIVATE FIELD] Pod CIDR size overprovisioning config for the cluster. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is doubled and then rounded off to next power of 2 to get the size of pod CIDR block per node. Example: max_pods_per_node of 30 would result in 64 IPs (/26). This config can disable the doubling of IPs (we still round off to next power of 2) Example: max_pods_per_node of 30 will result in 32 IPs (/27) when overprovisioning is disabled." + }, "servicesIpv4Cidr": { "description": "This field is deprecated, use services_ipv4_cidr_block.", "type": "string" @@ -4042,6 +4351,20 @@ }, "type": "object" }, + "K8sBetaAPIConfig": { + "description": "K8sBetaAPIConfig , configuration for beta APIs", + "id": "K8sBetaAPIConfig", + "properties": { + "enabledApis": { + "description": "Enabled k8s beta APIs.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "KubernetesDashboard": { "description": "Configuration for the Kubernetes Dashboard.", "id": "KubernetesDashboard", @@ -4479,6 +4802,10 @@ "$ref": "DNSConfig", "description": "DNSConfig contains clusterDNS config for this cluster." }, + "enableFqdnNetworkPolicy": { + "description": "Whether FQDN Network Policy is enabled on this cluster.", + "type": "boolean" + }, "enableIntraNodeVisibility": { "description": "Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.", "type": "boolean" @@ -4495,6 +4822,10 @@ "description": "Output only. The relative name of the Google Compute Engine network(https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. Example: projects/my-project/global/networks/my-network", "type": "string" }, + "networkPerformanceConfig": { + "$ref": "ClusterNetworkPerformanceConfig", + "description": "Network bandwidth tier configuration." + }, "privateIpv6GoogleAccess": { "description": "The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4)", "enum": [ @@ -4589,6 +4920,38 @@ }, "type": "object" }, + "NodeAffinity": { + "description": "Specifies the NodeAffinity key, values, and affinity operator according to [shared sole tenant node group affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity).", + "id": "NodeAffinity", + "properties": { + "key": { + "description": "Key for NodeAffinity.", + "type": "string" + }, + "operator": { + "description": "Operator for NodeAffinity.", + "enum": [ + "OPERATOR_UNSPECIFIED", + "IN", + "NOT_IN" + ], + "enumDescriptions": [ + "Invalid or unspecified affinity operator.", + "Affinity operator.", + "Anti-affinity operator." + ], + "type": "string" + }, + "values": { + "description": "Values for NodeAffinity.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "NodeConfig": { "description": "Parameters that describe the nodes in a cluster. GKE Autopilot clusters do not recognize parameters in `NodeConfig`. Use AutoprovisioningNodePoolDefaults instead.", "id": "NodeConfig", @@ -4722,6 +5085,10 @@ "$ref": "ShieldedInstanceConfig", "description": "Shielded Instance options." }, + "soleTenantConfig": { + "$ref": "SoleTenantConfig", + "description": "Parameters for node pools to be backed by shared sole tenant node groups." + }, "spot": { "description": "Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag.", "type": "boolean" @@ -4782,6 +5149,10 @@ "description": "Control the CPU management policy on the node. See https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/ The following values are allowed. * \"none\": the default, which represents the existing scheduling behavior. * \"static\": allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. The default value is 'none' if unspecified.", "type": "string" }, + "insecureKubeletReadonlyPortEnabled": { + "description": "Enable or disable Kubelet read only port.", + "type": "boolean" + }, "podPidsLimit": { "description": "Set the Pod PID limits. See https://kubernetes.io/docs/concepts/policy/pid-limiting/#pod-pid-limits Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.", "format": "int64", @@ -4839,10 +5210,20 @@ "$ref": "NetworkPerformanceConfig", "description": "Network bandwidth tier configuration." }, + "podCidrOverprovisionConfig": { + "$ref": "PodCIDROverprovisionConfig", + "description": "[PRIVATE FIELD] Pod CIDR size overprovisioning config for the nodepool. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is rounded off to next power of 2 and we then double that to get the size of pod CIDR block per node. Example: max_pods_per_node of 30 would result in 64 IPs (/26). This config can disable the doubling of IPs (we still round off to next power of 2) Example: max_pods_per_node of 30 will result in 32 IPs (/27) when overprovisioning is disabled." + }, "podIpv4CidrBlock": { "description": "The IP address range for pod IPs in this node pool. Only applicable if `create_pod_range` is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. `/14`) to have a range chosen with a specific netmask. Set to a [CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`) to pick a specific range to use. Only applicable if `ip_allocation_policy.use_ip_aliases` is true. This field cannot be changed after the node pool has been created.", "type": "string" }, + "podIpv4RangeUtilization": { + "description": "Output only. [Output only] The utilization of the IPv4 range for pod. The ratio is Usage/[Total number of IPs in the secondary range], Usage=numNodes*numZones*podIPsPerNode.", + "format": "double", + "readOnly": true, + "type": "number" + }, "podRange": { "description": "The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID. Only applicable if `ip_allocation_policy.use_ip_aliases` is true. This field cannot be changed after the node pool has been created.", "type": "string" @@ -4858,6 +5239,10 @@ "$ref": "NodePoolAutoscaling", "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled only if a valid configuration is present." }, + "bestEffortProvisioning": { + "$ref": "BestEffortProvisioning", + "description": "Enable best effort provisioning for nodes" + }, "conditions": { "description": "Which conditions caused the current node pool state.", "items": { @@ -5158,26 +5543,28 @@ "SET_MASTER_AUTH", "SET_NODE_POOL_SIZE", "SET_NETWORK_POLICY", - "SET_MAINTENANCE_POLICY" + "SET_MAINTENANCE_POLICY", + "RESIZE_CLUSTER" ], "enumDescriptions": [ "Not set.", - "Cluster create.", - "Cluster delete.", - "A master upgrade.", - "A node upgrade.", - "Cluster repair.", - "Cluster update.", - "Node pool create.", - "Node pool delete.", - "Set node pool management.", - "Automatic node pool repair.", - "Automatic node upgrade.", - "Set labels.", - "Set/generate master auth materials", - "Set node pool size.", - "Updates network policy for a cluster.", - "Set the maintenance policy." + "The cluster is being created. The cluster should be assumed to be unusable until the operation finishes. In the event of the operation failing, the cluster will enter the ERROR state and eventually be deleted.", + "The cluster is being deleted. The cluster should be assumed to be unusable as soon as this operation starts. In the event of the operation failing, the cluster will enter the ERROR state and the deletion will be automatically retried until completed.", + "The cluster version is being updated. Note that this includes \"upgrades\" to the same version, which are simply a recreation. This also includes [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-upgrades#upgrading_automatically). For more details, see [documentation on cluster upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-upgrades#cluster_upgrades).", + "A node pool is being updated. Despite calling this an \"upgrade\", this includes most forms of updates to node pools. This also includes [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades). This operation sets the progress field and may be canceled. The upgrade strategy depends on [node pool configuration](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pool-upgrade-strategies). The nodes are generally still usable during this operation.", + "A problem has been detected with the control plane and is being repaired. This operation type is initiated by GKE. For more details, see [documentation on repairs](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions#repairs).", + "The cluster is being updated. This is a broad category of operations and includes operations that only change metadata as well as those that must recreate the entire cluster. If the control plane must be recreated, this will cause temporary downtime for zonal clusters. Some features require recreating the nodes as well. Those will be recreated as separate operations and the update may not be completely functional until the node pools recreations finish. Node recreations will generally follow [maintenance policies](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions). Some GKE-initiated operations use this type. This includes certain types of auto-upgrades and incident mitigations.", + "A node pool is being created. The node pool should be assumed to be unusable until this operation finishes. In the event of an error, the node pool may be partially created. If enabled, [node autoprovisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning) may have automatically initiated such operations.", + "The node pool is being deleted. The node pool should be assumed to be unusable as soon as this operation starts.", + "The node pool's manamagent field is being updated. These operations only update metadata and may be concurrent with most other operations.", + "A problem has been detected with nodes and [they are being repaired](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair). This operation type is initiated by GKE, typically automatically. This operation may be concurrent with other operations and there may be multiple repairs occurring on the same node pool.", + "Unused. Automatic node upgrade uses UPGRADE_NODES.", + "Unused. Updating labels uses UPDATE_CLUSTER.", + "Unused. Updating master auth uses UPDATE_CLUSTER.", + "The node pool is being resized. With the exception of resizing to or from size zero, the node pool is generally usable during this operation.", + "Unused. Updating network policy uses UPDATE_CLUSTER.", + "Unused. Updating maintenance policy uses UPDATE_CLUSTER.", + "The control plane is being resized. This operation type is initiated by GKE. These operations are often performed preemptively to ensure that the control plane has sufficient resources and is not typically an indication of issues. For more details, see [documentation on resizes](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions#repairs)." ], "type": "string" }, @@ -5187,7 +5574,7 @@ "readOnly": true }, "selfLink": { - "description": "Server-defined URL for the resource.", + "description": "Server-defined URI for the operation. Example: `https://container.googleapis.com/v1alpha1/projects/123/locations/us-central1/operations/operation-123`.", "type": "string" }, "startTime": { @@ -5218,7 +5605,7 @@ "type": "string" }, "targetLink": { - "description": "Server-defined URL for the target of the operation.", + "description": "Server-defined URI for the target of the operation. The format of this is a URI to the resource being modified (such as a cluster, node pool, or node). For node pool repairs, there may be multiple nodes being repaired, but only one will be the target. Examples: - ## `https://container.googleapis.com/v1/projects/123/locations/us-central1/clusters/my-cluster` ## `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np` `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/clusters/my-cluster/nodePools/my-np/node/my-node`", "type": "string" }, "zone": { @@ -5290,6 +5677,17 @@ }, "type": "object" }, + "PodCIDROverprovisionConfig": { + "description": "[PRIVATE FIELD] Config for pod CIDR size overprovisioning.", + "id": "PodCIDROverprovisionConfig", + "properties": { + "disable": { + "description": "Whether Pod CIDR overprovisioning is disabled. Note: Pod CIDR overprovisioning is enabled by default.", + "type": "boolean" + } + }, + "type": "object" + }, "PrivateClusterConfig": { "description": "Configuration options for private clusters.", "id": "PrivateClusterConfig", @@ -5359,6 +5757,24 @@ }, "type": "object" }, + "RangeInfo": { + "description": "RangeInfo contains the range name and the range utilization by this cluster.", + "id": "RangeInfo", + "properties": { + "rangeName": { + "description": "Output only. [Output only] Name of a range.", + "readOnly": true, + "type": "string" + }, + "utilization": { + "description": "Output only. [Output only] The utilization of the range.", + "format": "double", + "readOnly": true, + "type": "number" + } + }, + "type": "object" + }, "RecurringTimeWindow": { "description": "Represents an arbitrary window of time that recurs.", "id": "RecurringTimeWindow", @@ -5625,6 +6041,41 @@ }, "type": "object" }, + "SecurityPostureConfig": { + "description": "SecurityPostureConfig defines the flags needed to enable/disable features for the Security Posture API.", + "id": "SecurityPostureConfig", + "properties": { + "mode": { + "description": "Sets which mode to use for Security Posture features.", + "enum": [ + "MODE_UNSPECIFIED", + "DISABLED", + "BASIC" + ], + "enumDescriptions": [ + "Default value not specified.", + "Disables Security Posture features on the cluster.", + "Applies Security Posture features on the cluster." + ], + "type": "string" + }, + "vulnerabilityMode": { + "description": "Sets which mode to use for vulnerability scanning.", + "enum": [ + "VULNERABILITY_MODE_UNSPECIFIED", + "VULNERABILITY_DISABLED", + "VULNERABILITY_BASIC" + ], + "enumDescriptions": [ + "Default value not specified.", + "Disables vulnerability scanning on the cluster.", + "Applies basic vulnerability scanning on the cluster." + ], + "type": "string" + } + }, + "type": "object" + }, "ServerConfig": { "description": "Kubernetes Engine service configuration.", "id": "ServerConfig", @@ -6068,6 +6519,20 @@ }, "type": "object" }, + "SoleTenantConfig": { + "description": "SoleTenantConfig contains the NodeAffinities to specify what shared sole tenant node groups should back the node pool.", + "id": "SoleTenantConfig", + "properties": { + "nodeAffinities": { + "description": "NodeAffinities used to match to a shared sole tenant node group.", + "items": { + "$ref": "NodeAffinity" + }, + "type": "array" + } + }, + "type": "object" + }, "StandardRolloutPolicy": { "description": "Standard rollout policy is the default policy for blue-green.", "id": "StandardRolloutPolicy", @@ -6575,7 +7040,7 @@ "enumDescriptions": [ "UNKNOWN is the zero value of the Status enum. It's not a valid status.", "UNUSED denotes that this range is unclaimed by any cluster.", - "IN_USE_SERVICE denotes that this range is claimed by a cluster for services. It cannot be used for other clusters.", + "IN_USE_SERVICE denotes that this range is claimed by cluster(s) for services. User-managed services range can be shared between clusters within the same subnetwork.", "IN_USE_SHAREABLE_POD denotes this range was created by the network admin and is currently claimed by a cluster for pods. It can only be used by other clusters as a pod range.", "IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed for pods. It cannot be used for other clusters." ], @@ -6658,6 +7123,17 @@ } }, "type": "object" + }, + "WorkloadPolicyConfig": { + "description": "WorkloadPolicyConfig is the configuration of workload policy for autopilot clusters.", + "id": "WorkloadPolicyConfig", + "properties": { + "allowNetAdmin": { + "description": "If true, workloads can use NET_ADMIN capability.", + "type": "boolean" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-gen.go b/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-gen.go index 675d4a13ba..a6368a67be 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/container/v1/container-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "container:v1" const apiName = "container" @@ -290,6 +291,10 @@ type AcceleratorConfig struct { // (https://cloud.google.com/compute/docs/gpus) AcceleratorType string `json:"acceleratorType,omitempty"` + // GpuDriverInstallationConfig: The configuration for auto installation + // of GPU driver. + GpuDriverInstallationConfig *GPUDriverInstallationConfig `json:"gpuDriverInstallationConfig,omitempty"` + // GpuPartitionSize: Size of partitions to create on the GPU. Valid // values are described in the NVIDIA mig user guide // (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). @@ -322,6 +327,41 @@ func (s *AcceleratorConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AdditionalPodRangesConfig: AdditionalPodRangesConfig is the +// configuration for additional pod secondary ranges supporting the +// ClusterUpdate message. +type AdditionalPodRangesConfig struct { + // PodRangeInfo: Output only. [Output only] Information for additional + // pod range. + PodRangeInfo []*RangeInfo `json:"podRangeInfo,omitempty"` + + // PodRangeNames: Name for pod secondary ipv4 range which has the actual + // range defined ahead. + PodRangeNames []string `json:"podRangeNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PodRangeInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PodRangeInfo") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AdditionalPodRangesConfig) MarshalJSON() ([]byte, error) { + type NoMethod AdditionalPodRangesConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AddonsConfig: Configuration for the addons that can be automatically // spun up in the cluster, enabling additional functionality. type AddonsConfig struct { @@ -346,6 +386,10 @@ type AddonsConfig struct { // driver. GcpFilestoreCsiDriverConfig *GcpFilestoreCsiDriverConfig `json:"gcpFilestoreCsiDriverConfig,omitempty"` + // GcsFuseCsiDriverConfig: Configuration for the Cloud Storage Fuse CSI + // driver. + GcsFuseCsiDriverConfig *GcsFuseCsiDriverConfig `json:"gcsFuseCsiDriverConfig,omitempty"` + // GkeBackupAgentConfig: Configuration for the Backup for GKE agent // addon. GkeBackupAgentConfig *GkeBackupAgentConfig `json:"gkeBackupAgentConfig,omitempty"` @@ -508,6 +552,9 @@ type Autopilot struct { // Enabled: Enable Autopilot Enabled bool `json:"enabled,omitempty"` + // WorkloadPolicyConfig: Workload policy configuration for Autopilot. + WorkloadPolicyConfig *WorkloadPolicyConfig `json:"workloadPolicyConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -531,6 +578,62 @@ func (s *Autopilot) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutopilotCompatibilityIssue: AutopilotCompatibilityIssue contains +// information about a specific compatibility issue with Autopilot mode. +type AutopilotCompatibilityIssue struct { + // ConstraintType: The constraint type of the issue. + ConstraintType string `json:"constraintType,omitempty"` + + // Description: The description of the issue. + Description string `json:"description,omitempty"` + + // DocumentationUrl: A URL to a public documnetation, which addresses + // resolving this issue. + DocumentationUrl string `json:"documentationUrl,omitempty"` + + // IncompatibilityType: The incompatibility type of this issue. + // + // Possible values: + // "UNSPECIFIED" - Default value, should not be used. + // "INCOMPATIBILITY" - Indicates that the issue is a known + // incompatibility between the cluster and Autopilot mode. + // "ADDITIONAL_CONFIG_REQUIRED" - Indicates the issue is an + // incompatibility if customers take no further action to resolve. + // "PASSED_WITH_OPTIONAL_CONFIG" - Indicates the issue is not an + // incompatibility, but depending on the workloads business logic, there + // is a potential that they won't work on Autopilot. + IncompatibilityType string `json:"incompatibilityType,omitempty"` + + // LastObservation: The last time when this issue was observed. + LastObservation string `json:"lastObservation,omitempty"` + + // Subjects: The name of the resources which are subject to this issue. + Subjects []string `json:"subjects,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConstraintType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConstraintType") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AutopilotCompatibilityIssue) MarshalJSON() ([]byte, error) { + type NoMethod AutopilotCompatibilityIssue + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AutoprovisioningNodePoolDefaults: AutoprovisioningNodePoolDefaults // contains defaults for a node pool created by NAP. type AutoprovisioningNodePoolDefaults struct { @@ -558,6 +661,10 @@ type AutoprovisioningNodePoolDefaults struct { // for available image types. ImageType string `json:"imageType,omitempty"` + // InsecureKubeletReadonlyPortEnabled: Enable or disable Kubelet read + // only port. + InsecureKubeletReadonlyPortEnabled bool `json:"insecureKubeletReadonlyPortEnabled,omitempty"` + // Management: Specifies the node management options for NAP created // node-pools. Management *NodeManagement `json:"management,omitempty"` @@ -613,6 +720,42 @@ func (s *AutoprovisioningNodePoolDefaults) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BestEffortProvisioning: Best effort provisioning. +type BestEffortProvisioning struct { + // Enabled: When this is enabled, cluster/node pool creations will + // ignore non-fatal errors like stockout to best provision as many nodes + // as possible right now and eventually bring up all target number of + // nodes + Enabled bool `json:"enabled,omitempty"` + + // MinProvisionNodes: Minimum number of nodes to be provisioned to be + // considered as succeeded, and the rest of nodes will be provisioned + // gradually and eventually when stockout issue has been resolved. + MinProvisionNodes int64 `json:"minProvisionNodes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BestEffortProvisioning) MarshalJSON() ([]byte, error) { + type NoMethod BestEffortProvisioning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BigQueryDestination: Parameters for using BigQuery as the destination // of resource usage export. type BigQueryDestination struct { @@ -826,6 +969,43 @@ func (s *CancelOperationRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CheckAutopilotCompatibilityResponse: +// CheckAutopilotCompatibilityResponse has a list of compatibility +// issues. +type CheckAutopilotCompatibilityResponse struct { + // Issues: The list of issues for the given operation. + Issues []*AutopilotCompatibilityIssue `json:"issues,omitempty"` + + // Summary: The summary of the autopilot compatibility response. + Summary string `json:"summary,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Issues") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Issues") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CheckAutopilotCompatibilityResponse) MarshalJSON() ([]byte, error) { + type NoMethod CheckAutopilotCompatibilityResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CidrBlock: CidrBlock contains an optional name and one CIDR block. type CidrBlock struct { // CidrBlock: cidr_block must be specified in CIDR notation. @@ -998,6 +1178,9 @@ type Cluster struct { // Description: An optional description of this cluster. Description string `json:"description,omitempty"` + // EnableK8sBetaApis: Beta APIs Config + EnableK8sBetaApis *K8sBetaAPIConfig `json:"enableK8sBetaApis,omitempty"` + // EnableKubernetesAlpha: Kubernetes alpha features are enabled on this // cluster. This includes alpha API groups (e.g. v1alpha1) and features // that may not be production ready in the kubernetes version of the @@ -1025,6 +1208,9 @@ type Cluster struct { // format. ExpireTime string `json:"expireTime,omitempty"` + // Fleet: Fleet information for the cluster. + Fleet *Fleet `json:"fleet,omitempty"` + // Id: Output only. Unique id for the cluster. Id string `json:"id,omitempty"` @@ -1190,7 +1376,12 @@ type Cluster struct { // PrivateClusterConfig: Configuration for private cluster. PrivateClusterConfig *PrivateClusterConfig `json:"privateClusterConfig,omitempty"` - // ReleaseChannel: Release channel configuration. + // ReleaseChannel: Release channel configuration. If left unspecified on + // cluster creation and a version is specified, the cluster is enrolled + // in the most mature release channel where the version is available + // (first checking STABLE, then REGULAR, and finally RAPID). Otherwise, + // if no release channel configuration and no version is specified, the + // cluster is enrolled in the REGULAR channel with its default version. ReleaseChannel *ReleaseChannel `json:"releaseChannel,omitempty"` // ResourceLabels: The resource labels for the cluster to use to @@ -1202,6 +1393,10 @@ type Cluster struct { // unspecified. ResourceUsageExportConfig *ResourceUsageExportConfig `json:"resourceUsageExportConfig,omitempty"` + // SecurityPostureConfig: Enable/Disable Security Posture API features + // for the cluster. + SecurityPostureConfig *SecurityPostureConfig `json:"securityPostureConfig,omitempty"` + // SelfLink: [Output only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -1348,10 +1543,51 @@ func (s *ClusterAutoscaling) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ClusterNetworkPerformanceConfig: Configuration of network bandwidth +// tiers +type ClusterNetworkPerformanceConfig struct { + // TotalEgressBandwidthTier: Specifies the total network bandwidth tier + // for NodePools in the cluster. + // + // Possible values: + // "TIER_UNSPECIFIED" - Default value + // "TIER_1" - Higher bandwidth, actual values based on VM size. + TotalEgressBandwidthTier string `json:"totalEgressBandwidthTier,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "TotalEgressBandwidthTier") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TotalEgressBandwidthTier") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ClusterNetworkPerformanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod ClusterNetworkPerformanceConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ClusterUpdate: ClusterUpdate describes an update to the cluster. // Exactly one update can be applied to a cluster with each request, so // at most one field can be provided. type ClusterUpdate struct { + // AdditionalPodRangesConfig: The additional pod ranges to be added to + // the cluster. These pod ranges can be used by node pools to allocate + // pod IPs. + AdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"additionalPodRangesConfig,omitempty"` + // DesiredAddonsConfig: Configurations for the various addons available // to run in the cluster. DesiredAddonsConfig *AddonsConfig `json:"desiredAddonsConfig,omitempty"` @@ -1360,6 +1596,10 @@ type ClusterUpdate struct { // config for the cluster. DesiredAuthenticatorGroupsConfig *AuthenticatorGroupsConfig `json:"desiredAuthenticatorGroupsConfig,omitempty"` + // DesiredAutopilotWorkloadPolicyConfig: The desired workload policy + // configuration for the autopilot cluster. + DesiredAutopilotWorkloadPolicyConfig *WorkloadPolicyConfig `json:"desiredAutopilotWorkloadPolicyConfig,omitempty"` + // DesiredBinaryAuthorization: The desired configuration options for the // Binary Authorization feature. DesiredBinaryAuthorization *BinaryAuthorization `json:"desiredBinaryAuthorization,omitempty"` @@ -1395,10 +1635,17 @@ type ClusterUpdate struct { // cluster. DesiredDnsConfig *DNSConfig `json:"desiredDnsConfig,omitempty"` + // DesiredEnableFqdnNetworkPolicy: Enable/Disable FQDN Network Policy + // for the cluster. + DesiredEnableFqdnNetworkPolicy bool `json:"desiredEnableFqdnNetworkPolicy,omitempty"` + // DesiredEnablePrivateEndpoint: Enable/Disable private endpoint for the // cluster's master. DesiredEnablePrivateEndpoint bool `json:"desiredEnablePrivateEndpoint,omitempty"` + // DesiredFleet: The desired fleet configuration for the cluster. + DesiredFleet *Fleet `json:"desiredFleet,omitempty"` + // DesiredGatewayApiConfig: The desired config of Gateway API on this // cluster. DesiredGatewayApiConfig *GatewayAPIConfig `json:"desiredGatewayApiConfig,omitempty"` @@ -1418,6 +1665,9 @@ type ClusterUpdate struct { // visibility. DesiredIntraNodeVisibilityConfig *IntraNodeVisibilityConfig `json:"desiredIntraNodeVisibilityConfig,omitempty"` + // DesiredK8sBetaApis: Desired Beta APIs to be enabled for cluster. + DesiredK8sBetaApis *K8sBetaAPIConfig `json:"desiredK8sBetaApis,omitempty"` + // DesiredL4ilbSubsettingConfig: The desired L4 Internal Load Balancer // Subsetting configuration. DesiredL4ilbSubsettingConfig *ILBSubsettingConfig `json:"desiredL4ilbSubsettingConfig,omitempty"` @@ -1475,6 +1725,10 @@ type ClusterUpdate struct { // versions. DesiredMonitoringService string `json:"desiredMonitoringService,omitempty"` + // DesiredNetworkPerformanceConfig: The desired network performance + // config. + DesiredNetworkPerformanceConfig *ClusterNetworkPerformanceConfig `json:"desiredNetworkPerformanceConfig,omitempty"` + // DesiredNodePoolAutoConfigNetworkTags: The desired network tags that // apply to all auto-provisioned node pools in autopilot clusters and // node auto-provisioning enabled clusters. @@ -1534,6 +1788,10 @@ type ClusterUpdate struct { // exporting resource usage. DesiredResourceUsageExportConfig *ResourceUsageExportConfig `json:"desiredResourceUsageExportConfig,omitempty"` + // DesiredSecurityPostureConfig: Enable/Disable Security Posture API + // features for the cluster. + DesiredSecurityPostureConfig *SecurityPostureConfig `json:"desiredSecurityPostureConfig,omitempty"` + // DesiredServiceExternalIpsConfig: ServiceExternalIPsConfig specifies // the config for the use of Services with ExternalIPs field. DesiredServiceExternalIpsConfig *ServiceExternalIPsConfig `json:"desiredServiceExternalIpsConfig,omitempty"` @@ -1560,26 +1818,37 @@ type ClusterUpdate struct { // DesiredWorkloadIdentityConfig: Configuration for Workload Identity. DesiredWorkloadIdentityConfig *WorkloadIdentityConfig `json:"desiredWorkloadIdentityConfig,omitempty"` + // EnableK8sBetaApis: Kubernetes open source beta apis enabled on the + // cluster. Only beta apis + EnableK8sBetaApis *K8sBetaAPIConfig `json:"enableK8sBetaApis,omitempty"` + // Etag: The current etag of the cluster. If an etag is provided and // does not match the current etag of the cluster, update will be // blocked and an ABORTED error will be returned. Etag string `json:"etag,omitempty"` - // ForceSendFields is a list of field names (e.g. "DesiredAddonsConfig") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // RemovedAdditionalPodRangesConfig: The additional pod ranges that are + // to be removed from the cluster. The pod ranges specified here must + // have been specified earlier in the 'additional_pod_ranges_config' + // argument. + RemovedAdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"removedAdditionalPodRangesConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AdditionalPodRangesConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DesiredAddonsConfig") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. + // "AdditionalPodRangesConfig") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } @@ -1866,6 +2135,7 @@ type DNSConfig struct { // "PLATFORM_DEFAULT" - Use GKE default DNS provider(kube-dns) for DNS // resolution. // "CLOUD_DNS" - Use CloudDNS for DNS resolution. + // "KUBE_DNS" - Use KubeDNS for DNS resolution ClusterDns string `json:"clusterDns,omitempty"` // ClusterDnsDomain: cluster_dns_domain is the suffix used for all @@ -1952,7 +2222,7 @@ type DatabaseEncryption struct { // y KeyName string `json:"keyName,omitempty"` - // State: Denotes the state of etcd encryption. + // State: The desired state of etcd encryption. // // Possible values: // "UNKNOWN" - Should never be set @@ -2155,6 +2425,83 @@ func (s *Filter) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Fleet: Fleet is the fleet configuration for the cluster. +type Fleet struct { + // Membership: [Output only] The full resource name of the registered + // fleet membership of the cluster, in the format + // `//gkehub.googleapis.com/projects/*/locations/*/memberships/*`. + Membership string `json:"membership,omitempty"` + + // PreRegistered: [Output only] Whether the cluster has been registered + // through the fleet API. + PreRegistered bool `json:"preRegistered,omitempty"` + + // Project: The Fleet host project(project ID or project number) where + // this cluster will be registered to. This field cannot be changed + // after the cluster has been registered. + Project string `json:"project,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Membership") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Membership") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Fleet) MarshalJSON() ([]byte, error) { + type NoMethod Fleet + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GPUDriverInstallationConfig: GPUDriverInstallationConfig specifies +// the version of GPU driver to be auto installed. +type GPUDriverInstallationConfig struct { + // GpuDriverVersion: Mode for how the GPU driver is installed. + // + // Possible values: + // "GPU_DRIVER_VERSION_UNSPECIFIED" - Default value is to not install + // any GPU driver. + // "INSTALLATION_DISABLED" - Disable GPU driver auto installation and + // needs manual installation + // "DEFAULT" - "Default" GPU driver in COS and Ubuntu. + // "LATEST" - "Latest" GPU driver in COS. + GpuDriverVersion string `json:"gpuDriverVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GpuDriverVersion") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GpuDriverVersion") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GPUDriverInstallationConfig) MarshalJSON() ([]byte, error) { + type NoMethod GPUDriverInstallationConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GPUSharingConfig: GPUSharingConfig represents the GPU sharing // configuration for Hardware Accelerators. type GPUSharingConfig struct { @@ -2320,6 +2667,36 @@ func (s *GcpFilestoreCsiDriverConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GcsFuseCsiDriverConfig: Configuration for the Cloud Storage Fuse CSI +// driver. +type GcsFuseCsiDriverConfig struct { + // Enabled: Whether the Cloud Storage Fuse CSI driver is enabled for + // this cluster. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GcsFuseCsiDriverConfig) MarshalJSON() ([]byte, error) { + type NoMethod GcsFuseCsiDriverConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GetJSONWebKeysResponse: GetJSONWebKeysResponse is a valid JSON Web // Key Set as specififed in rfc 7517 type GetJSONWebKeysResponse struct { @@ -2576,6 +2953,12 @@ func (s *ILBSubsettingConfig) MarshalJSON() ([]byte, error) { // IPAllocationPolicy: Configuration for controlling how IPs are // allocated in the cluster. type IPAllocationPolicy struct { + // AdditionalPodRangesConfig: Output only. [Output only] The additional + // pod ranges that are added to the cluster. These pod ranges can be + // used by new node pools to allocate pod IPs automatically. Once the + // range is removed it will not show up in IPAllocationPolicy. + AdditionalPodRangesConfig *AdditionalPodRangesConfig `json:"additionalPodRangesConfig,omitempty"` + // ClusterIpv4Cidr: This field is deprecated, use // cluster_ipv4_cidr_block. ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` @@ -2604,6 +2987,12 @@ type IPAllocationPolicy struct { // `use_ip_aliases` is true. CreateSubnetwork bool `json:"createSubnetwork,omitempty"` + // DefaultPodIpv4RangeUtilization: Output only. [Output only] The + // utilization of the cluster default IPv4 range for pod. The ratio is + // Usage/[Total number of IPs in the secondary range], + // Usage=numNodes*numZones*podIPsPerNode. + DefaultPodIpv4RangeUtilization float64 `json:"defaultPodIpv4RangeUtilization,omitempty"` + // Ipv6AccessType: The ipv6 access type (internal or external) when // create_subnetwork is true // @@ -2630,6 +3019,17 @@ type IPAllocationPolicy struct { // specific range to use. NodeIpv4CidrBlock string `json:"nodeIpv4CidrBlock,omitempty"` + // PodCidrOverprovisionConfig: [PRIVATE FIELD] Pod CIDR size + // overprovisioning config for the cluster. Pod CIDR size per node + // depends on max_pods_per_node. By default, the value of + // max_pods_per_node is doubled and then rounded off to next power of 2 + // to get the size of pod CIDR block per node. Example: + // max_pods_per_node of 30 would result in 64 IPs (/26). This config can + // disable the doubling of IPs (we still round off to next power of 2) + // Example: max_pods_per_node of 30 will result in 32 IPs (/27) when + // overprovisioning is disabled. + PodCidrOverprovisionConfig *PodCIDROverprovisionConfig `json:"podCidrOverprovisionConfig,omitempty"` + // ServicesIpv4Cidr: This field is deprecated, use // services_ipv4_cidr_block. ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` @@ -2699,21 +3099,22 @@ type IPAllocationPolicy struct { // false, then the server picks the default IP allocation mode UseRoutes bool `json:"useRoutes,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClusterIpv4Cidr") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AdditionalPodRangesConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClusterIpv4Cidr") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. + // "AdditionalPodRangesConfig") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } @@ -2723,6 +3124,20 @@ func (s *IPAllocationPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +func (s *IPAllocationPolicy) UnmarshalJSON(data []byte) error { + type NoMethod IPAllocationPolicy + var s1 struct { + DefaultPodIpv4RangeUtilization gensupport.JSONFloat64 `json:"defaultPodIpv4RangeUtilization"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.DefaultPodIpv4RangeUtilization = float64(s1.DefaultPodIpv4RangeUtilization) + return nil +} + // IdentityServiceConfig: IdentityServiceConfig is configuration for // Identity Service which allows customers to use external identity // providers with the K8S API @@ -2834,6 +3249,34 @@ func (s *Jwk) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// K8sBetaAPIConfig: K8sBetaAPIConfig , configuration for beta APIs +type K8sBetaAPIConfig struct { + // EnabledApis: Enabled k8s beta APIs. + EnabledApis []string `json:"enabledApis,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EnabledApis") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnabledApis") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *K8sBetaAPIConfig) MarshalJSON() ([]byte, error) { + type NoMethod K8sBetaAPIConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // KubernetesDashboard: Configuration for the Kubernetes Dashboard. type KubernetesDashboard struct { // Disabled: Whether the Kubernetes Dashboard is enabled for this @@ -3696,6 +4139,10 @@ type NetworkConfig struct { // DnsConfig: DNSConfig contains clusterDNS config for this cluster. DnsConfig *DNSConfig `json:"dnsConfig,omitempty"` + // EnableFqdnNetworkPolicy: Whether FQDN Network Policy is enabled on + // this cluster. + EnableFqdnNetworkPolicy bool `json:"enableFqdnNetworkPolicy,omitempty"` + // EnableIntraNodeVisibility: Whether Intra-node visibility is enabled // for this cluster. This makes same node pod to pod traffic visible for // VPC network. @@ -3715,6 +4162,9 @@ type NetworkConfig struct { // projects/my-project/global/networks/my-network Network string `json:"network,omitempty"` + // NetworkPerformanceConfig: Network bandwidth tier configuration. + NetworkPerformanceConfig *ClusterNetworkPerformanceConfig `json:"networkPerformanceConfig,omitempty"` + // PrivateIpv6GoogleAccess: The desired state of IPv6 connectivity to // Google Services. By default, no private IPv6 access to or from Google // Services (all access will be via IPv4) @@ -3895,6 +4345,47 @@ func (s *NetworkTags) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NodeAffinity: Specifies the NodeAffinity key, values, and affinity +// operator according to shared sole tenant node group affinities +// (https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity). +type NodeAffinity struct { + // Key: Key for NodeAffinity. + Key string `json:"key,omitempty"` + + // Operator: Operator for NodeAffinity. + // + // Possible values: + // "OPERATOR_UNSPECIFIED" - Invalid or unspecified affinity operator. + // "IN" - Affinity operator. + // "NOT_IN" - Anti-affinity operator. + Operator string `json:"operator,omitempty"` + + // Values: Values for NodeAffinity. + Values []string `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeAffinity) MarshalJSON() ([]byte, error) { + type NoMethod NodeAffinity + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NodeConfig: Parameters that describe the nodes in a cluster. GKE // Autopilot clusters do not recognize parameters in `NodeConfig`. Use // AutoprovisioningNodePoolDefaults instead. @@ -4057,6 +4548,10 @@ type NodeConfig struct { // ShieldedInstanceConfig: Shielded Instance options. ShieldedInstanceConfig *ShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + // SoleTenantConfig: Parameters for node pools to be backed by shared + // sole tenant node groups. + SoleTenantConfig *SoleTenantConfig `json:"soleTenantConfig,omitempty"` + // Spot: Spot flag for enabling Spot VM, which is a rebrand of the // existing preemptible flag. Spot bool `json:"spot,omitempty"` @@ -4164,6 +4659,10 @@ type NodeKubeletConfig struct { // unspecified. CpuManagerPolicy string `json:"cpuManagerPolicy,omitempty"` + // InsecureKubeletReadonlyPortEnabled: Enable or disable Kubelet read + // only port. + InsecureKubeletReadonlyPortEnabled bool `json:"insecureKubeletReadonlyPortEnabled,omitempty"` + // PodPidsLimit: Set the Pod PID limits. See // https://kubernetes.io/docs/concepts/policy/pid-limiting/#pod-pid-limits // Controls the maximum number of processes allowed to run in a pod. The @@ -4282,6 +4781,17 @@ type NodeNetworkConfig struct { // NetworkPerformanceConfig: Network bandwidth tier configuration. NetworkPerformanceConfig *NetworkPerformanceConfig `json:"networkPerformanceConfig,omitempty"` + // PodCidrOverprovisionConfig: [PRIVATE FIELD] Pod CIDR size + // overprovisioning config for the nodepool. Pod CIDR size per node + // depends on max_pods_per_node. By default, the value of + // max_pods_per_node is rounded off to next power of 2 and we then + // double that to get the size of pod CIDR block per node. Example: + // max_pods_per_node of 30 would result in 64 IPs (/26). This config can + // disable the doubling of IPs (we still round off to next power of 2) + // Example: max_pods_per_node of 30 will result in 32 IPs (/27) when + // overprovisioning is disabled. + PodCidrOverprovisionConfig *PodCIDROverprovisionConfig `json:"podCidrOverprovisionConfig,omitempty"` + // PodIpv4CidrBlock: The IP address range for pod IPs in this node pool. // Only applicable if `create_pod_range` is true. Set to blank to have a // range chosen with the default size. Set to /netmask (e.g. `/14`) to @@ -4292,6 +4802,11 @@ type NodeNetworkConfig struct { // field cannot be changed after the node pool has been created. PodIpv4CidrBlock string `json:"podIpv4CidrBlock,omitempty"` + // PodIpv4RangeUtilization: Output only. [Output only] The utilization + // of the IPv4 range for pod. The ratio is Usage/[Total number of IPs in + // the secondary range], Usage=numNodes*numZones*podIPsPerNode. + PodIpv4RangeUtilization float64 `json:"podIpv4RangeUtilization,omitempty"` + // PodRange: The ID of the secondary range for pod IPs. If // `create_pod_range` is true, this ID is used for the new range. If // `create_pod_range` is false, uses an existing secondary range with @@ -4324,6 +4839,20 @@ func (s *NodeNetworkConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +func (s *NodeNetworkConfig) UnmarshalJSON(data []byte) error { + type NoMethod NodeNetworkConfig + var s1 struct { + PodIpv4RangeUtilization gensupport.JSONFloat64 `json:"podIpv4RangeUtilization"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.PodIpv4RangeUtilization = float64(s1.PodIpv4RangeUtilization) + return nil +} + // NodePool: NodePool contains the name and configuration for a // cluster's node pool. Node pools are a set of nodes (i.e. VM's), with // a common configuration and specification, under the control of the @@ -4335,6 +4864,9 @@ type NodePool struct { // is enabled only if a valid configuration is present. Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"` + // BestEffortProvisioning: Enable best effort provisioning for nodes + BestEffortProvisioning *BestEffortProvisioning `json:"bestEffortProvisioning,omitempty"` + // Conditions: Which conditions caused the current node pool state. Conditions []*StatusCondition `json:"conditions,omitempty"` @@ -4752,29 +5284,95 @@ type Operation struct { // // Possible values: // "TYPE_UNSPECIFIED" - Not set. - // "CREATE_CLUSTER" - Cluster create. - // "DELETE_CLUSTER" - Cluster delete. - // "UPGRADE_MASTER" - A master upgrade. - // "UPGRADE_NODES" - A node upgrade. - // "REPAIR_CLUSTER" - Cluster repair. - // "UPDATE_CLUSTER" - Cluster update. - // "CREATE_NODE_POOL" - Node pool create. - // "DELETE_NODE_POOL" - Node pool delete. - // "SET_NODE_POOL_MANAGEMENT" - Set node pool management. - // "AUTO_REPAIR_NODES" - Automatic node pool repair. - // "AUTO_UPGRADE_NODES" - Automatic node upgrade. - // "SET_LABELS" - Set labels. - // "SET_MASTER_AUTH" - Set/generate master auth materials - // "SET_NODE_POOL_SIZE" - Set node pool size. - // "SET_NETWORK_POLICY" - Updates network policy for a cluster. - // "SET_MAINTENANCE_POLICY" - Set the maintenance policy. + // "CREATE_CLUSTER" - The cluster is being created. The cluster should + // be assumed to be unusable until the operation finishes. In the event + // of the operation failing, the cluster will enter the ERROR state and + // eventually be deleted. + // "DELETE_CLUSTER" - The cluster is being deleted. The cluster should + // be assumed to be unusable as soon as this operation starts. In the + // event of the operation failing, the cluster will enter the ERROR + // state and the deletion will be automatically retried until completed. + // "UPGRADE_MASTER" - The cluster version is being updated. Note that + // this includes "upgrades" to the same version, which are simply a + // recreation. This also includes + // [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/concep + // ts/cluster-upgrades#upgrading_automatically). For more details, see + // [documentation on cluster + // upgrades](https://cloud.google.com/kubernetes-engine/docs/concepts/clu + // ster-upgrades#cluster_upgrades). + // "UPGRADE_NODES" - A node pool is being updated. Despite calling + // this an "upgrade", this includes most forms of updates to node pools. + // This also includes + // [auto-upgrades](https://cloud.google.com/kubernetes-engine/docs/how-to + // /node-auto-upgrades). This operation sets the progress field and may + // be canceled. The upgrade strategy depends on [node pool + // configuration](https://cloud.google.com/kubernetes-engine/docs/concept + // s/node-pool-upgrade-strategies). The nodes are generally still usable + // during this operation. + // "REPAIR_CLUSTER" - A problem has been detected with the control + // plane and is being repaired. This operation type is initiated by GKE. + // For more details, see [documentation on + // repairs](https://cloud.google.com/kubernetes-engine/docs/concepts/main + // tenance-windows-and-exclusions#repairs). + // "UPDATE_CLUSTER" - The cluster is being updated. This is a broad + // category of operations and includes operations that only change + // metadata as well as those that must recreate the entire cluster. If + // the control plane must be recreated, this will cause temporary + // downtime for zonal clusters. Some features require recreating the + // nodes as well. Those will be recreated as separate operations and the + // update may not be completely functional until the node pools + // recreations finish. Node recreations will generally follow + // [maintenance + // policies](https://cloud.google.com/kubernetes-engine/docs/concepts/mai + // ntenance-windows-and-exclusions). Some GKE-initiated operations use + // this type. This includes certain types of auto-upgrades and incident + // mitigations. + // "CREATE_NODE_POOL" - A node pool is being created. The node pool + // should be assumed to be unusable until this operation finishes. In + // the event of an error, the node pool may be partially created. If + // enabled, [node + // autoprovisioning](https://cloud.google.com/kubernetes-engine/docs/how- + // to/node-auto-provisioning) may have automatically initiated such + // operations. + // "DELETE_NODE_POOL" - The node pool is being deleted. The node pool + // should be assumed to be unusable as soon as this operation starts. + // "SET_NODE_POOL_MANAGEMENT" - The node pool's manamagent field is + // being updated. These operations only update metadata and may be + // concurrent with most other operations. + // "AUTO_REPAIR_NODES" - A problem has been detected with nodes and + // [they are being + // repaired](https://cloud.google.com/kubernetes-engine/docs/how-to/node- + // auto-repair). This operation type is initiated by GKE, typically + // automatically. This operation may be concurrent with other operations + // and there may be multiple repairs occurring on the same node pool. + // "AUTO_UPGRADE_NODES" - Unused. Automatic node upgrade uses + // UPGRADE_NODES. + // "SET_LABELS" - Unused. Updating labels uses UPDATE_CLUSTER. + // "SET_MASTER_AUTH" - Unused. Updating master auth uses + // UPDATE_CLUSTER. + // "SET_NODE_POOL_SIZE" - The node pool is being resized. With the + // exception of resizing to or from size zero, the node pool is + // generally usable during this operation. + // "SET_NETWORK_POLICY" - Unused. Updating network policy uses + // UPDATE_CLUSTER. + // "SET_MAINTENANCE_POLICY" - Unused. Updating maintenance policy uses + // UPDATE_CLUSTER. + // "RESIZE_CLUSTER" - The control plane is being resized. This + // operation type is initiated by GKE. These operations are often + // performed preemptively to ensure that the control plane has + // sufficient resources and is not typically an indication of issues. + // For more details, see [documentation on + // resizes](https://cloud.google.com/kubernetes-engine/docs/concepts/main + // tenance-windows-and-exclusions#repairs). OperationType string `json:"operationType,omitempty"` // Progress: Output only. [Output only] Progress information for an // operation. Progress *OperationProgress `json:"progress,omitempty"` - // SelfLink: Server-defined URL for the resource. + // SelfLink: Server-defined URI for the operation. Example: + // `https://container.googleapis.com/v1alpha1/projects/123/locations/us-c + // entral1/operations/operation-123`. SelfLink string `json:"selfLink,omitempty"` // StartTime: [Output only] The time the operation started, in RFC3339 @@ -4795,7 +5393,17 @@ type Operation struct { // description of the error. Deprecated. Use the field error instead. StatusMessage string `json:"statusMessage,omitempty"` - // TargetLink: Server-defined URL for the target of the operation. + // TargetLink: Server-defined URI for the target of the operation. The + // format of this is a URI to the resource being modified (such as a + // cluster, node pool, or node). For node pool repairs, there may be + // multiple nodes being repaired, but only one will be the target. + // Examples: - ## + // `https://container.googleapis.com/v1/projects/123/locations/us-central + // 1/clusters/my-cluster` ## + // `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/ + // clusters/my-cluster/nodePools/my-np` + // `https://container.googleapis.com/v1/projects/123/zones/us-central1-c/ + // clusters/my-cluster/nodePools/my-np/node/my-node` TargetLink string `json:"targetLink,omitempty"` // Zone: The name of the Google Compute Engine zone @@ -4917,6 +5525,36 @@ func (s *PlacementPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PodCIDROverprovisionConfig: [PRIVATE FIELD] Config for pod CIDR size +// overprovisioning. +type PodCIDROverprovisionConfig struct { + // Disable: Whether Pod CIDR overprovisioning is disabled. Note: Pod + // CIDR overprovisioning is enabled by default. + Disable bool `json:"disable,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disable") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Disable") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PodCIDROverprovisionConfig) MarshalJSON() ([]byte, error) { + type NoMethod PodCIDROverprovisionConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // PrivateClusterConfig: Configuration options for private clusters. type PrivateClusterConfig struct { // EnablePrivateEndpoint: Whether the master's internal IP address is @@ -5046,6 +5684,52 @@ func (s *PubSub) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RangeInfo: RangeInfo contains the range name and the range +// utilization by this cluster. +type RangeInfo struct { + // RangeName: Output only. [Output only] Name of a range. + RangeName string `json:"rangeName,omitempty"` + + // Utilization: Output only. [Output only] The utilization of the range. + Utilization float64 `json:"utilization,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RangeName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RangeName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RangeInfo) MarshalJSON() ([]byte, error) { + type NoMethod RangeInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *RangeInfo) UnmarshalJSON(data []byte) error { + type NoMethod RangeInfo + var s1 struct { + Utilization gensupport.JSONFloat64 `json:"utilization"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Utilization = float64(s1.Utilization) + return nil +} + // RecurringTimeWindow: Represents an arbitrary window of time that // recurs. type RecurringTimeWindow struct { @@ -5506,6 +6190,50 @@ func (s *SecurityBulletinEvent) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SecurityPostureConfig: SecurityPostureConfig defines the flags needed +// to enable/disable features for the Security Posture API. +type SecurityPostureConfig struct { + // Mode: Sets which mode to use for Security Posture features. + // + // Possible values: + // "MODE_UNSPECIFIED" - Default value not specified. + // "DISABLED" - Disables Security Posture features on the cluster. + // "BASIC" - Applies Security Posture features on the cluster. + Mode string `json:"mode,omitempty"` + + // VulnerabilityMode: Sets which mode to use for vulnerability scanning. + // + // Possible values: + // "VULNERABILITY_MODE_UNSPECIFIED" - Default value not specified. + // "VULNERABILITY_DISABLED" - Disables vulnerability scanning on the + // cluster. + // "VULNERABILITY_BASIC" - Applies basic vulnerability scanning on the + // cluster. + VulnerabilityMode string `json:"vulnerabilityMode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPostureConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPostureConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ServerConfig: Kubernetes Engine service configuration. type ServerConfig struct { // Channels: List of release channel configurations. @@ -6306,6 +7034,38 @@ func (s *ShieldedNodes) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SoleTenantConfig: SoleTenantConfig contains the NodeAffinities to +// specify what shared sole tenant node groups should back the node +// pool. +type SoleTenantConfig struct { + // NodeAffinities: NodeAffinities used to match to a shared sole tenant + // node group. + NodeAffinities []*NodeAffinity `json:"nodeAffinities,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NodeAffinities") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NodeAffinities") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SoleTenantConfig) MarshalJSON() ([]byte, error) { + type NoMethod SoleTenantConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // StandardRolloutPolicy: Standard rollout policy is the default policy // for blue-green. type StandardRolloutPolicy struct { @@ -7153,8 +7913,8 @@ type UsableSubnetworkSecondaryRange struct { // "UNUSED" - UNUSED denotes that this range is unclaimed by any // cluster. // "IN_USE_SERVICE" - IN_USE_SERVICE denotes that this range is - // claimed by a cluster for services. It cannot be used for other - // clusters. + // claimed by cluster(s) for services. User-managed services range can + // be shared between clusters within the same subnetwork. // "IN_USE_SHAREABLE_POD" - IN_USE_SHAREABLE_POD denotes this range // was created by the network admin and is currently claimed by a // cluster for pods. It can only be used by other clusters as a pod @@ -7352,6 +8112,35 @@ func (s *WorkloadMetadataConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// WorkloadPolicyConfig: WorkloadPolicyConfig is the configuration of +// workload policy for autopilot clusters. +type WorkloadPolicyConfig struct { + // AllowNetAdmin: If true, workloads can use NET_ADMIN capability. + AllowNetAdmin bool `json:"allowNetAdmin,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowNetAdmin") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowNetAdmin") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WorkloadPolicyConfig) MarshalJSON() ([]byte, error) { + type NoMethod WorkloadPolicyConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // method id "container.projects.aggregated.usableSubnetworks.list": type ProjectsAggregatedUsableSubnetworksListCall struct { @@ -7721,11 +8510,13 @@ func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -7742,6 +8533,156 @@ func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) } +// method id "container.projects.locations.clusters.checkAutopilotCompatibility": + +type ProjectsLocationsClustersCheckAutopilotCompatibilityCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// CheckAutopilotCompatibility: Checks the cluster compatibility with +// Autopilot mode, and returns a list of compatibility issues. +// +// - name: The name (project, location, cluster) of the cluster to +// retrieve. Specified in the format +// `projects/*/locations/*/clusters/*`. +func (r *ProjectsLocationsClustersService) CheckAutopilotCompatibility(name string) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { + c := &ProjectsLocationsClustersCheckAutopilotCompatibilityCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Context(ctx context.Context) *ProjectsLocationsClustersCheckAutopilotCompatibilityCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:checkAutopilotCompatibility") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.checkAutopilotCompatibility" call. +// Exactly one of *CheckAutopilotCompatibilityResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *CheckAutopilotCompatibilityResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsClustersCheckAutopilotCompatibilityCall) Do(opts ...googleapi.CallOption) (*CheckAutopilotCompatibilityResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &CheckAutopilotCompatibilityResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Checks the cluster compatibility with Autopilot mode, and returns a list of compatibility issues.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:checkAutopilotCompatibility", + // "httpMethod": "GET", + // "id": "container.projects.locations.clusters.checkAutopilotCompatibility", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster) of the cluster to retrieve. Specified in the format `projects/*/locations/*/clusters/*`.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:checkAutopilotCompatibility", + // "response": { + // "$ref": "CheckAutopilotCompatibilityResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "container.projects.locations.clusters.completeIpRotation": type ProjectsLocationsClustersCompleteIpRotationCall struct { @@ -8185,6 +9126,7 @@ func (c *ProjectsLocationsClustersDeleteCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { + // "deprecated": true, // "description": "Deprecated. The name of the cluster to delete. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -8197,11 +9139,13 @@ func (c *ProjectsLocationsClustersDeleteCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -8375,6 +9319,7 @@ func (c *ProjectsLocationsClustersGetCall) Do(opts ...googleapi.CallOption) (*Cl // ], // "parameters": { // "clusterId": { + // "deprecated": true, // "description": "Deprecated. The name of the cluster to retrieve. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -8387,11 +9332,13 @@ func (c *ProjectsLocationsClustersGetCall) Do(opts ...googleapi.CallOption) (*Cl // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -8711,11 +9658,13 @@ func (c *ProjectsLocationsClustersListCall) Do(opts ...googleapi.CallOption) (*L // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" @@ -9138,6 +10087,7 @@ func (c *ProjectsLocationsClustersSetLocationsCall) Do(opts ...googleapi.CallOpt } return ret, nil // { + // "deprecated": true, // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", // "httpMethod": "POST", @@ -10905,6 +11855,7 @@ func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Do(opts ...googleapi.Call // ], // "parameters": { // "clusterId": { + // "deprecated": true, // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -10917,16 +11868,19 @@ func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Do(opts ...googleapi.Call // "type": "string" // }, // "nodePoolId": { + // "deprecated": true, // "description": "Deprecated. The name of the node pool to delete. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -11108,6 +12062,7 @@ func (c *ProjectsLocationsClustersNodePoolsGetCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { + // "deprecated": true, // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -11120,16 +12075,19 @@ func (c *ProjectsLocationsClustersNodePoolsGetCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "nodePoolId": { + // "deprecated": true, // "description": "Deprecated. The name of the node pool. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -11303,6 +12261,7 @@ func (c *ProjectsLocationsClustersNodePoolsListCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "clusterId": { + // "deprecated": true, // "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" @@ -11315,11 +12274,13 @@ func (c *ProjectsLocationsClustersNodePoolsListCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" @@ -12518,16 +13479,19 @@ func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "operationId": { + // "deprecated": true, // "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", // "location": "query", // "type": "string" @@ -12701,11 +13665,13 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "projectId": { + // "deprecated": true, // "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" // }, // "zone": { + // "deprecated": true, // "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for all zones. This field has been deprecated and replaced by the parent field.", // "location": "query", // "type": "string" @@ -14247,6 +15213,7 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { + // "deprecated": true, // "description": "Sets the locations for a specific cluster. Deprecated. Use [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update) instead.", // "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", // "httpMethod": "POST", diff --git a/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json b/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json index 3f289ceae7..045edbea4c 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json @@ -188,7 +188,7 @@ "jobs": { "methods": { "aggregated": { - "description": "List the jobs of a project across all regions.", + "description": "List the jobs of a project across all regions. **Note:** This method doesn't support filtering the list of jobs by name.", "flatPath": "v1b3/projects/{projectId}/jobs:aggregated", "httpMethod": "GET", "id": "dataflow.projects.jobs.aggregated", @@ -219,7 +219,7 @@ "type": "string" }, "name": { - "description": "Optional. The job name. Optional.", + "description": "Optional. The job name.", "location": "query", "type": "string" }, @@ -241,6 +241,7 @@ "type": "string" }, "view": { + "deprecated": true, "description": "Deprecated. ListJobs always returns summaries now. Use GetJob for other JobViews.", "enum": [ "JOB_VIEW_UNKNOWN", @@ -251,7 +252,7 @@ "enumDescriptions": [ "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - "Request all information available for this job.", + "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", "Request summary info and limited job description data for steps, labels and environment." ], "location": "query", @@ -305,7 +306,7 @@ "enumDescriptions": [ "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - "Request all information available for this job.", + "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", "Request summary info and limited job description data for steps, labels and environment." ], "location": "query", @@ -364,7 +365,7 @@ "enumDescriptions": [ "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - "Request all information available for this job.", + "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", "Request summary info and limited job description data for steps, labels and environment." ], "location": "query", @@ -428,7 +429,7 @@ ] }, "list": { - "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, as you can only get the list of jobs that are running in `us-central1`.", + "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, because you can only get the list of jobs that are running in `us-central1`. `projects.locations.jobs.list` and `projects.jobs.list` support filtering the list of jobs by name. Filtering by name isn't supported by `projects.jobs.aggregated`.", "flatPath": "v1b3/projects/{projectId}/jobs", "httpMethod": "GET", "id": "dataflow.projects.jobs.list", @@ -459,7 +460,7 @@ "type": "string" }, "name": { - "description": "Optional. The job name. Optional.", + "description": "Optional. The job name.", "location": "query", "type": "string" }, @@ -481,6 +482,7 @@ "type": "string" }, "view": { + "deprecated": true, "description": "Deprecated. ListJobs always returns summaries now. Use GetJob for other JobViews.", "enum": [ "JOB_VIEW_UNKNOWN", @@ -491,7 +493,7 @@ "enumDescriptions": [ "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - "Request all information available for this job.", + "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", "Request summary info and limited job description data for steps, labels and environment." ], "location": "query", @@ -572,6 +574,12 @@ "location": "path", "required": true, "type": "string" + }, + "updateMask": { + "description": "The list of fields to update relative to Job. If empty, only RequestedJobState will be considered for update. If the FieldMask is not empty and RequestedJobState is none/empty, The fields specified in the update mask will be the only ones considered for update. If both RequestedJobState and update_mask are specified, an error will be returned as we cannot update both state and mask.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, "path": "v1b3/projects/{projectId}/jobs/{jobId}", @@ -957,7 +965,7 @@ "enumDescriptions": [ "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - "Request all information available for this job.", + "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", "Request summary info and limited job description data for steps, labels and environment." ], "location": "query", @@ -1018,7 +1026,7 @@ "enumDescriptions": [ "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - "Request all information available for this job.", + "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", "Request summary info and limited job description data for steps, labels and environment." ], "location": "query", @@ -1136,7 +1144,7 @@ ] }, "list": { - "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, as you can only get the list of jobs that are running in `us-central1`.", + "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, because you can only get the list of jobs that are running in `us-central1`. `projects.locations.jobs.list` and `projects.jobs.list` support filtering the list of jobs by name. Filtering by name isn't supported by `projects.jobs.aggregated`.", "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs", "httpMethod": "GET", "id": "dataflow.projects.locations.jobs.list", @@ -1169,7 +1177,7 @@ "type": "string" }, "name": { - "description": "Optional. The job name. Optional.", + "description": "Optional. The job name.", "location": "query", "type": "string" }, @@ -1191,6 +1199,7 @@ "type": "string" }, "view": { + "deprecated": true, "description": "Deprecated. ListJobs always returns summaries now. Use GetJob for other JobViews.", "enum": [ "JOB_VIEW_UNKNOWN", @@ -1201,7 +1210,7 @@ "enumDescriptions": [ "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - "Request all information available for this job.", + "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", "Request summary info and limited job description data for steps, labels and environment." ], "location": "query", @@ -1291,6 +1300,12 @@ "location": "path", "required": true, "type": "string" + }, + "updateMask": { + "description": "The list of fields to update relative to Job. If empty, only RequestedJobState will be considered for update. If the FieldMask is not empty and RequestedJobState is none/empty, The fields specified in the update mask will be the only ones considered for update. If both RequestedJobState and update_mask are specified, an error will be returned as we cannot update both state and mask.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", @@ -2199,7 +2214,7 @@ } } }, - "revision": "20230119", + "revision": "20230619", "rootUrl": "https://dataflow.googleapis.com/", "schemas": { "ApproximateProgress": { @@ -3826,6 +3841,10 @@ ], "type": "string" }, + "runtimeUpdatableParams": { + "$ref": "RuntimeUpdatableParams", + "description": "This field may ONLY be modified at runtime using the projects.jobs.update method to adjust job behavior. This field has no effect when specified at job creation." + }, "satisfiesPzs": { "description": "Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests.", "type": "boolean" @@ -4019,6 +4038,13 @@ "$ref": "SpannerIODetails" }, "type": "array" + }, + "userDisplayProperties": { + "additionalProperties": { + "type": "string" + }, + "description": "List of display properties to help UI filter jobs.", + "type": "object" } }, "type": "object" @@ -4674,6 +4700,17 @@ "description": "Optional. Additional metadata for describing this parameter.", "type": "object" }, + "enumOptions": { + "description": "Optional. The options shown when ENUM ParameterType is specified.", + "items": { + "$ref": "ParameterMetadataEnumOption" + }, + "type": "array" + }, + "groupName": { + "description": "Optional. Specifies a group name for this parameter to be rendered under. Group header text will be rendered exactly as specified in this field. Only considered when parent_name is NOT provided.", + "type": "string" + }, "helpText": { "description": "Required. The help text to display for the parameter.", "type": "string" @@ -4704,7 +4741,14 @@ "PUBSUB_TOPIC", "PUBSUB_SUBSCRIPTION", "BIGQUERY_TABLE", - "JAVASCRIPT_UDF_FILE" + "JAVASCRIPT_UDF_FILE", + "SERVICE_ACCOUNT", + "MACHINE_TYPE", + "KMS_KEY_NAME", + "WORKER_REGION", + "WORKER_ZONE", + "BOOLEAN", + "ENUM" ], "enumDescriptions": [ "Default input type.", @@ -4718,10 +4762,28 @@ "The parameter specifies a Pub/Sub Topic.", "The parameter specifies a Pub/Sub Subscription.", "The parameter specifies a BigQuery table.", - "The parameter specifies a JavaScript UDF in Cloud Storage." + "The parameter specifies a JavaScript UDF in Cloud Storage.", + "The parameter specifies a Service Account email.", + "The parameter specifies a Machine Type.", + "The parameter specifies a KMS Key name.", + "The parameter specifies a Worker Region.", + "The parameter specifies a Worker Zone.", + "The parameter specifies a boolean input.", + "The parameter specifies an enum input." ], "type": "string" }, + "parentName": { + "description": "Optional. Specifies the name of the parent parameter. Used in conjunction with 'parent_trigger_values' to make this parameter conditional (will only be rendered conditionally). Should be mappable to a ParameterMetadata.name field.", + "type": "string" + }, + "parentTriggerValues": { + "description": "Optional. The value(s) of the 'parent_name' parameter which will trigger this parameter to be shown. If left empty, ANY non-empty value in parent_name will trigger this parameter to be shown. Only considered when this parameter is conditional (when 'parent_name' has been provided).", + "items": { + "type": "string" + }, + "type": "array" + }, "regexes": { "description": "Optional. Regexes that the parameter must match.", "items": { @@ -4732,6 +4794,25 @@ }, "type": "object" }, + "ParameterMetadataEnumOption": { + "description": "ParameterMetadataEnumOption specifies the option shown in the enum form.", + "id": "ParameterMetadataEnumOption", + "properties": { + "description": { + "description": "Optional. The description to display for the enum option.", + "type": "string" + }, + "label": { + "description": "Optional. The label to display for the enum option.", + "type": "string" + }, + "value": { + "description": "Required. The value of the enum option.", + "type": "string" + } + }, + "type": "object" + }, "PartialGroupByKeyInstruction": { "description": "An instruction that does a partial group-by-key. One input and one output.", "id": "PartialGroupByKeyInstruction", @@ -4898,6 +4979,10 @@ "description": "Indicates whether the pipeline allows late-arriving data.", "type": "boolean" }, + "dynamicDestinations": { + "description": "If true, then this location represents dynamic topics.", + "type": "boolean" + }, "idLabel": { "description": "If set, contains a pubsub label from which to extract record ids. If left empty, record deduplication will be strictly best effort.", "type": "string" @@ -5170,6 +5255,23 @@ }, "type": "object" }, + "RuntimeUpdatableParams": { + "description": "Additional job parameters that can only be updated during runtime using the projects.jobs.update method. These fields have no effect when specified during job creation.", + "id": "RuntimeUpdatableParams", + "properties": { + "maxNumWorkers": { + "description": "The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs.", + "format": "int32", + "type": "integer" + }, + "minNumWorkers": { + "description": "The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "SDKInfo": { "description": "SDK Information.", "id": "SDKInfo", diff --git a/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go b/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go index a7fdac3abc..95e9b30488 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "dataflow:v1b3" const apiName = "dataflow" @@ -2262,6 +2263,23 @@ func (s *FloatingPointList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +func (s *FloatingPointList) UnmarshalJSON(data []byte) error { + type NoMethod FloatingPointList + var s1 struct { + Elements []gensupport.JSONFloat64 `json:"elements"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Elements = make([]float64, len(s1.Elements)) + for i := range s1.Elements { + s.Elements[i] = float64(s1.Elements[i]) + } + return nil +} + // FloatingPointMean: A representation of a floating point mean metric // contribution. type FloatingPointMean struct { @@ -2959,6 +2977,11 @@ type Job struct { // interested. RequestedState string `json:"requestedState,omitempty"` + // RuntimeUpdatableParams: This field may ONLY be modified at runtime + // using the projects.jobs.update method to adjust job behavior. This + // field has no effect when specified at job creation. + RuntimeUpdatableParams *RuntimeUpdatableParams `json:"runtimeUpdatableParams,omitempty"` + // SatisfiesPzs: Reserved for future use. This field is set only in // responses from the server; it is ignored if it is set in any // requests. @@ -3228,6 +3251,10 @@ type JobMetadata struct { // Dataflow job. SpannerDetails []*SpannerIODetails `json:"spannerDetails,omitempty"` + // UserDisplayProperties: List of display properties to help UI filter + // jobs. + UserDisplayProperties map[string]string `json:"userDisplayProperties,omitempty"` + // ForceSendFields is a list of field names (e.g. "BigTableDetails") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -4321,6 +4348,16 @@ type ParameterMetadata struct { // parameter. CustomMetadata map[string]string `json:"customMetadata,omitempty"` + // EnumOptions: Optional. The options shown when ENUM ParameterType is + // specified. + EnumOptions []*ParameterMetadataEnumOption `json:"enumOptions,omitempty"` + + // GroupName: Optional. Specifies a group name for this parameter to be + // rendered under. Group header text will be rendered exactly as + // specified in this field. Only considered when parent_name is NOT + // provided. + GroupName string `json:"groupName,omitempty"` + // HelpText: Required. The help text to display for the parameter. HelpText string `json:"helpText,omitempty"` @@ -4358,8 +4395,29 @@ type ParameterMetadata struct { // "BIGQUERY_TABLE" - The parameter specifies a BigQuery table. // "JAVASCRIPT_UDF_FILE" - The parameter specifies a JavaScript UDF in // Cloud Storage. + // "SERVICE_ACCOUNT" - The parameter specifies a Service Account + // email. + // "MACHINE_TYPE" - The parameter specifies a Machine Type. + // "KMS_KEY_NAME" - The parameter specifies a KMS Key name. + // "WORKER_REGION" - The parameter specifies a Worker Region. + // "WORKER_ZONE" - The parameter specifies a Worker Zone. + // "BOOLEAN" - The parameter specifies a boolean input. + // "ENUM" - The parameter specifies an enum input. ParamType string `json:"paramType,omitempty"` + // ParentName: Optional. Specifies the name of the parent parameter. + // Used in conjunction with 'parent_trigger_values' to make this + // parameter conditional (will only be rendered conditionally). Should + // be mappable to a ParameterMetadata.name field. + ParentName string `json:"parentName,omitempty"` + + // ParentTriggerValues: Optional. The value(s) of the 'parent_name' + // parameter which will trigger this parameter to be shown. If left + // empty, ANY non-empty value in parent_name will trigger this parameter + // to be shown. Only considered when this parameter is conditional (when + // 'parent_name' has been provided). + ParentTriggerValues []string `json:"parentTriggerValues,omitempty"` + // Regexes: Optional. Regexes that the parameter must match. Regexes []string `json:"regexes,omitempty"` @@ -4387,6 +4445,42 @@ func (s *ParameterMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ParameterMetadataEnumOption: ParameterMetadataEnumOption specifies +// the option shown in the enum form. +type ParameterMetadataEnumOption struct { + // Description: Optional. The description to display for the enum + // option. + Description string `json:"description,omitempty"` + + // Label: Optional. The label to display for the enum option. + Label string `json:"label,omitempty"` + + // Value: Required. The value of the enum option. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ParameterMetadataEnumOption) MarshalJSON() ([]byte, error) { + type NoMethod ParameterMetadataEnumOption + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // PartialGroupByKeyInstruction: An instruction that does a partial // group-by-key. One input and one output. type PartialGroupByKeyInstruction struct { @@ -4658,6 +4752,10 @@ type PubsubLocation struct { // data. DropLateData bool `json:"dropLateData,omitempty"` + // DynamicDestinations: If true, then this location represents dynamic + // topics. + DynamicDestinations bool `json:"dynamicDestinations,omitempty"` + // IdLabel: If set, contains a pubsub label from which to extract record // ids. If left empty, record deduplication will be strictly best // effort. @@ -5101,6 +5199,41 @@ func (s *RuntimeMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RuntimeUpdatableParams: Additional job parameters that can only be +// updated during runtime using the projects.jobs.update method. These +// fields have no effect when specified during job creation. +type RuntimeUpdatableParams struct { + // MaxNumWorkers: The maximum number of workers to cap autoscaling at. + // This field is currently only supported for Streaming Engine jobs. + MaxNumWorkers int64 `json:"maxNumWorkers,omitempty"` + + // MinNumWorkers: The minimum number of workers to scale down to. This + // field is currently only supported for Streaming Engine jobs. + MinNumWorkers int64 `json:"minNumWorkers,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxNumWorkers") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxNumWorkers") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RuntimeUpdatableParams) MarshalJSON() ([]byte, error) { + type NoMethod RuntimeUpdatableParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SDKInfo: SDK Information. type SDKInfo struct { // Language: Required. The SDK Language. @@ -8591,7 +8724,8 @@ type ProjectsJobsAggregatedCall struct { header_ http.Header } -// Aggregated: List the jobs of a project across all regions. +// Aggregated: List the jobs of a project across all regions. **Note:** +// This method doesn't support filtering the list of jobs by name. // // - projectId: The project which owns the jobs. func (r *ProjectsJobsService) Aggregated(projectId string) *ProjectsJobsAggregatedCall { @@ -8636,7 +8770,7 @@ func (c *ProjectsJobsAggregatedCall) Location(location string) *ProjectsJobsAggr return c } -// Name sets the optional parameter "name": The job name. Optional. +// Name sets the optional parameter "name": The job name. func (c *ProjectsJobsAggregatedCall) Name(name string) *ProjectsJobsAggregatedCall { c.urlParams_.Set("name", name) return c @@ -8675,6 +8809,14 @@ func (c *ProjectsJobsAggregatedCall) PageToken(pageToken string) *ProjectsJobsAg // version details. // // "JOB_VIEW_ALL" - Request all information available for this job. +// +// When the job is in `JOB_STATE_PENDING`, the job has been created but +// is not yet running, and not all job information is available. For +// complete job information, wait until the job in is +// `JOB_STATE_RUNNING`. For more information, see +// [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ +// projects.jobs#jobstate). +// // "JOB_VIEW_DESCRIPTION" - Request summary info and limited job // // description data for steps, labels and environment. @@ -8782,7 +8924,7 @@ func (c *ProjectsJobsAggregatedCall) Do(opts ...googleapi.CallOption) (*ListJobs } return ret, nil // { - // "description": "List the jobs of a project across all regions.", + // "description": "List the jobs of a project across all regions. **Note:** This method doesn't support filtering the list of jobs by name.", // "flatPath": "v1b3/projects/{projectId}/jobs:aggregated", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.aggregated", @@ -8813,7 +8955,7 @@ func (c *ProjectsJobsAggregatedCall) Do(opts ...googleapi.CallOption) (*ListJobs // "type": "string" // }, // "name": { - // "description": "Optional. The job name. Optional.", + // "description": "Optional. The job name.", // "location": "query", // "type": "string" // }, @@ -8835,6 +8977,7 @@ func (c *ProjectsJobsAggregatedCall) Do(opts ...googleapi.CallOption) (*ListJobs // "type": "string" // }, // "view": { + // "deprecated": true, // "description": "Deprecated. ListJobs always returns summaries now. Use GetJob for other JobViews.", // "enum": [ // "JOB_VIEW_UNKNOWN", @@ -8845,7 +8988,7 @@ func (c *ProjectsJobsAggregatedCall) Do(opts ...googleapi.CallOption) (*ListJobs // "enumDescriptions": [ // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - // "Request all information available for this job.", + // "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", // "Request summary info and limited job description data for steps, labels and environment." // ], // "location": "query", @@ -8946,6 +9089,14 @@ func (c *ProjectsJobsCreateCall) ReplaceJobId(replaceJobId string) *ProjectsJobs // version details. // // "JOB_VIEW_ALL" - Request all information available for this job. +// +// When the job is in `JOB_STATE_PENDING`, the job has been created but +// is not yet running, and not all job information is available. For +// complete job information, wait until the job in is +// `JOB_STATE_RUNNING`. For more information, see +// [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ +// projects.jobs#jobstate). +// // "JOB_VIEW_DESCRIPTION" - Request summary info and limited job // // description data for steps, labels and environment. @@ -9080,7 +9231,7 @@ func (c *ProjectsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job, error) // "enumDescriptions": [ // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - // "Request all information available for this job.", + // "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", // "Request summary info and limited job description data for steps, labels and environment." // ], // "location": "query", @@ -9158,6 +9309,14 @@ func (c *ProjectsJobsGetCall) Location(location string) *ProjectsJobsGetCall { // version details. // // "JOB_VIEW_ALL" - Request all information available for this job. +// +// When the job is in `JOB_STATE_PENDING`, the job has been created but +// is not yet running, and not all job information is available. For +// complete job information, wait until the job in is +// `JOB_STATE_RUNNING`. For more information, see +// [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ +// projects.jobs#jobstate). +// // "JOB_VIEW_DESCRIPTION" - Request summary info and limited job // // description data for steps, labels and environment. @@ -9303,7 +9462,7 @@ func (c *ProjectsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, error) { // "enumDescriptions": [ // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - // "Request all information available for this job.", + // "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", // "Request summary info and limited job description data for steps, labels and environment." // ], // "location": "query", @@ -9533,8 +9692,10 @@ type ProjectsJobsListCall struct { // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). // To list the all jobs across all regions, use // `projects.jobs.aggregated`. Using `projects.jobs.list` is not -// recommended, as you can only get the list of jobs that are running in -// `us-central1`. +// recommended, because you can only get the list of jobs that are +// running in `us-central1`. `projects.locations.jobs.list` and +// `projects.jobs.list` support filtering the list of jobs by name. +// Filtering by name isn't supported by `projects.jobs.aggregated`. // // - projectId: The project which owns the jobs. func (r *ProjectsJobsService) List(projectId string) *ProjectsJobsListCall { @@ -9579,7 +9740,7 @@ func (c *ProjectsJobsListCall) Location(location string) *ProjectsJobsListCall { return c } -// Name sets the optional parameter "name": The job name. Optional. +// Name sets the optional parameter "name": The job name. func (c *ProjectsJobsListCall) Name(name string) *ProjectsJobsListCall { c.urlParams_.Set("name", name) return c @@ -9618,6 +9779,14 @@ func (c *ProjectsJobsListCall) PageToken(pageToken string) *ProjectsJobsListCall // version details. // // "JOB_VIEW_ALL" - Request all information available for this job. +// +// When the job is in `JOB_STATE_PENDING`, the job has been created but +// is not yet running, and not all job information is available. For +// complete job information, wait until the job in is +// `JOB_STATE_RUNNING`. For more information, see +// [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ +// projects.jobs#jobstate). +// // "JOB_VIEW_DESCRIPTION" - Request summary info and limited job // // description data for steps, labels and environment. @@ -9725,7 +9894,7 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon } return ret, nil // { - // "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, as you can only get the list of jobs that are running in `us-central1`.", + // "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, because you can only get the list of jobs that are running in `us-central1`. `projects.locations.jobs.list` and `projects.jobs.list` support filtering the list of jobs by name. Filtering by name isn't supported by `projects.jobs.aggregated`.", // "flatPath": "v1b3/projects/{projectId}/jobs", // "httpMethod": "GET", // "id": "dataflow.projects.jobs.list", @@ -9756,7 +9925,7 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // "type": "string" // }, // "name": { - // "description": "Optional. The job name. Optional.", + // "description": "Optional. The job name.", // "location": "query", // "type": "string" // }, @@ -9778,6 +9947,7 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // "type": "string" // }, // "view": { + // "deprecated": true, // "description": "Deprecated. ListJobs always returns summaries now. Use GetJob for other JobViews.", // "enum": [ // "JOB_VIEW_UNKNOWN", @@ -9788,7 +9958,7 @@ func (c *ProjectsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJobsRespon // "enumDescriptions": [ // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - // "Request all information available for this job.", + // "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", // "Request summary info and limited job description data for steps, labels and environment." // ], // "location": "query", @@ -10024,6 +10194,18 @@ func (c *ProjectsJobsUpdateCall) Location(location string) *ProjectsJobsUpdateCa return c } +// UpdateMask sets the optional parameter "updateMask": The list of +// fields to update relative to Job. If empty, only RequestedJobState +// will be considered for update. If the FieldMask is not empty and +// RequestedJobState is none/empty, The fields specified in the update +// mask will be the only ones considered for update. If both +// RequestedJobState and update_mask are specified, an error will be +// returned as we cannot update both state and mask. +func (c *ProjectsJobsUpdateCall) UpdateMask(updateMask string) *ProjectsJobsUpdateCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -10141,6 +10323,12 @@ func (c *ProjectsJobsUpdateCall) Do(opts ...googleapi.CallOption) (*Job, error) // "location": "path", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "The list of fields to update relative to Job. If empty, only RequestedJobState will be considered for update. If the FieldMask is not empty and RequestedJobState is none/empty, The fields specified in the update mask will be the only ones considered for update. If both RequestedJobState and update_mask are specified, an error will be returned as we cannot update both state and mask.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "v1b3/projects/{projectId}/jobs/{jobId}", @@ -11475,6 +11663,14 @@ func (c *ProjectsLocationsJobsCreateCall) ReplaceJobId(replaceJobId string) *Pro // version details. // // "JOB_VIEW_ALL" - Request all information available for this job. +// +// When the job is in `JOB_STATE_PENDING`, the job has been created but +// is not yet running, and not all job information is available. For +// complete job information, wait until the job in is +// `JOB_STATE_RUNNING`. For more information, see +// [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ +// projects.jobs#jobstate). +// // "JOB_VIEW_DESCRIPTION" - Request summary info and limited job // // description data for steps, labels and environment. @@ -11612,7 +11808,7 @@ func (c *ProjectsLocationsJobsCreateCall) Do(opts ...googleapi.CallOption) (*Job // "enumDescriptions": [ // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - // "Request all information available for this job.", + // "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", // "Request summary info and limited job description data for steps, labels and environment." // ], // "location": "query", @@ -11686,6 +11882,14 @@ func (r *ProjectsLocationsJobsService) Get(projectId string, location string, jo // version details. // // "JOB_VIEW_ALL" - Request all information available for this job. +// +// When the job is in `JOB_STATE_PENDING`, the job has been created but +// is not yet running, and not all job information is available. For +// complete job information, wait until the job in is +// `JOB_STATE_RUNNING`. For more information, see +// [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ +// projects.jobs#jobstate). +// // "JOB_VIEW_DESCRIPTION" - Request summary info and limited job // // description data for steps, labels and environment. @@ -11834,7 +12038,7 @@ func (c *ProjectsLocationsJobsGetCall) Do(opts ...googleapi.CallOption) (*Job, e // "enumDescriptions": [ // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - // "Request all information available for this job.", + // "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", // "Request summary info and limited job description data for steps, labels and environment." // ], // "location": "query", @@ -12287,8 +12491,10 @@ type ProjectsLocationsJobsListCall struct { // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). // To list the all jobs across all regions, use // `projects.jobs.aggregated`. Using `projects.jobs.list` is not -// recommended, as you can only get the list of jobs that are running in -// `us-central1`. +// recommended, because you can only get the list of jobs that are +// running in `us-central1`. `projects.locations.jobs.list` and +// `projects.jobs.list` support filtering the list of jobs by name. +// Filtering by name isn't supported by `projects.jobs.aggregated`. // // - location: The [regional endpoint] // (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) @@ -12328,7 +12534,7 @@ func (c *ProjectsLocationsJobsListCall) Filter(filter string) *ProjectsLocations return c } -// Name sets the optional parameter "name": The job name. Optional. +// Name sets the optional parameter "name": The job name. func (c *ProjectsLocationsJobsListCall) Name(name string) *ProjectsLocationsJobsListCall { c.urlParams_.Set("name", name) return c @@ -12367,6 +12573,14 @@ func (c *ProjectsLocationsJobsListCall) PageToken(pageToken string) *ProjectsLoc // version details. // // "JOB_VIEW_ALL" - Request all information available for this job. +// +// When the job is in `JOB_STATE_PENDING`, the job has been created but +// is not yet running, and not all job information is available. For +// complete job information, wait until the job in is +// `JOB_STATE_RUNNING`. For more information, see +// [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ +// projects.jobs#jobstate). +// // "JOB_VIEW_DESCRIPTION" - Request summary info and limited job // // description data for steps, labels and environment. @@ -12475,7 +12689,7 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ } return ret, nil // { - // "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, as you can only get the list of jobs that are running in `us-central1`.", + // "description": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, because you can only get the list of jobs that are running in `us-central1`. `projects.locations.jobs.list` and `projects.jobs.list` support filtering the list of jobs by name. Filtering by name isn't supported by `projects.jobs.aggregated`.", // "flatPath": "v1b3/projects/{projectId}/locations/{location}/jobs", // "httpMethod": "GET", // "id": "dataflow.projects.locations.jobs.list", @@ -12508,7 +12722,7 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ // "type": "string" // }, // "name": { - // "description": "Optional. The job name. Optional.", + // "description": "Optional. The job name.", // "location": "query", // "type": "string" // }, @@ -12530,6 +12744,7 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ // "type": "string" // }, // "view": { + // "deprecated": true, // "description": "Deprecated. ListJobs always returns summaries now. Use GetJob for other JobViews.", // "enum": [ // "JOB_VIEW_UNKNOWN", @@ -12540,7 +12755,7 @@ func (c *ProjectsLocationsJobsListCall) Do(opts ...googleapi.CallOption) (*ListJ // "enumDescriptions": [ // "The job view to return isn't specified, or is unknown. Responses will contain at least the `JOB_VIEW_SUMMARY` information, and may contain additional information.", // "Request summary information only: Project ID, Job ID, job name, job type, job status, start/end time, and Cloud SDK version details.", - // "Request all information available for this job.", + // "Request all information available for this job. When the job is in `JOB_STATE_PENDING`, the job has been created but is not yet running, and not all job information is available. For complete job information, wait until the job in is `JOB_STATE_RUNNING`. For more information, see [JobState](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#jobstate).", // "Request summary info and limited job description data for steps, labels and environment." // ], // "location": "query", @@ -12783,6 +12998,18 @@ func (r *ProjectsLocationsJobsService) Update(projectId string, location string, return c } +// UpdateMask sets the optional parameter "updateMask": The list of +// fields to update relative to Job. If empty, only RequestedJobState +// will be considered for update. If the FieldMask is not empty and +// RequestedJobState is none/empty, The fields specified in the update +// mask will be the only ones considered for update. If both +// RequestedJobState and update_mask are specified, an error will be +// returned as we cannot update both state and mask. +func (c *ProjectsLocationsJobsUpdateCall) UpdateMask(updateMask string) *ProjectsLocationsJobsUpdateCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -12903,6 +13130,12 @@ func (c *ProjectsLocationsJobsUpdateCall) Do(opts ...googleapi.CallOption) (*Job // "location": "path", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "The list of fields to update relative to Job. If empty, only RequestedJobState will be considered for update. If the FieldMask is not empty and RequestedJobState is none/empty, The fields specified in the update mask will be the only ones considered for update. If both RequestedJobState and update_mask are specified, an error will be returned as we cannot update both state and mask.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "v1b3/projects/{projectId}/locations/{location}/jobs/{jobId}", diff --git a/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json b/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json index a6f3e830f0..679fc02c29 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json @@ -555,7 +555,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", "httpMethod": "GET", "id": "dataproc.projects.locations.operations.list", @@ -2265,7 +2265,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations", "httpMethod": "GET", "id": "dataproc.projects.regions.operations.list", @@ -2671,7 +2671,7 @@ } } }, - "revision": "20230220", + "revision": "20230622", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -3413,9 +3413,23 @@ "description": "Optional. DEPRECATED Specifies the job on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", "type": "string" }, + "jobs": { + "description": "Optional. Specifies a list of jobs on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", + "items": { + "type": "string" + }, + "type": "array" + }, "yarnApplicationId": { "description": "Optional. DEPRECATED Specifies the yarn application on which diagnosis is to be performed.", "type": "string" + }, + "yarnApplicationIds": { + "description": "Optional. Specifies a list of yarn applications on which diagnosis is to be performed.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -3534,7 +3548,7 @@ "id": "ExecutionConfig", "properties": { "idleTtl": { - "description": "Optional. The duration to keep the session alive while it's idling. Passing this threshold will cause the session to be terminated. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 4 hours if not set. If both ttl and idle_ttl are specified, the conditions are treated as and OR: the workload will be terminated when it has been idle for idle_ttl or when the ttl has passed, whichever comes first.", + "description": "Optional. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 4 hours if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", "format": "google-duration", "type": "string" }, @@ -3566,7 +3580,7 @@ "type": "string" }, "ttl": { - "description": "Optional. The duration after which the workload will be terminated. When the workload passes this ttl, it will be unconditionally killed without waiting for ongoing work to finish. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). If both ttl and idle_ttl are specified, the conditions are treated as and OR: the workload will be terminated when it has been idle for idle_ttl or when the ttl has passed, whichever comes first. If ttl is not specified for a session, it defaults to 24h.", + "description": "Optional. The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or runs forever without exiting). If ttl is not specified for an interactive session, it defaults to 24h. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4h. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", "format": "google-duration", "type": "string" } @@ -3733,7 +3747,7 @@ "type": "array" }, "bootDiskKmsKey": { - "description": "Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/KEY_PROJECT_ID/locations/LOCATION /keyRings/RING_NAME/cryptoKeys/KEY_NAME.", + "description": "Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}", "type": "string" }, "localSsdCount": { @@ -4555,10 +4569,11 @@ "id": "ListBatchesResponse", "properties": { "batches": { - "description": "The batches from the specified collection.", + "description": "Output only. The batches from the specified collection.", "items": { "$ref": "Batch" }, + "readOnly": true, "type": "array" }, "nextPageToken": { @@ -4675,7 +4690,7 @@ ], "type": "string" }, - "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'", "type": "object" } }, @@ -4732,18 +4747,18 @@ "type": "object" }, "Metric": { - "description": "A Dataproc OSS metric.", + "description": "A Dataproc custom metric.", "id": "Metric", "properties": { "metricOverrides": { - "description": "Optional. Specify one or more available OSS metrics (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course (for the SPARK metric source, any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics will be collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics will not be collected. The collection of the default metrics for other OSS metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all default YARN metrics will be collected.", + "description": "Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.", "items": { "type": "string" }, "type": "array" }, "metricSource": { - "description": "Required. Default metrics are collected unless metricOverrides are specified for the metric source (see Available OSS metrics (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) for more information).", + "description": "Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).", "enum": [ "METRIC_SOURCE_UNSPECIFIED", "MONITORING_AGENT_DEFAULTS", @@ -4756,7 +4771,7 @@ ], "enumDescriptions": [ "Required unspecified metric source.", - "Default monitoring agent metrics. If this source is enabled, Dataproc enables the monitoring agent in Compute Engine, and collects default monitoring agent metrics, which are published with an agent.googleapis.com prefix.", + "Monitoring agent metrics. If this source is enabled, Dataproc enables the monitoring agent in Compute Engine, and collects monitoring agent metrics, which are published with an agent.googleapis.com prefix.", "HDFS metric source.", "Spark metric source.", "YARN metric source.", @@ -5387,7 +5402,7 @@ "id": "ResizeNodeGroupRequest", "properties": { "gracefulDecommissionTimeout": { - "description": "Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) allows the removal of nodes from the Compute Engine node group without interrupting jobs in progress. This timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", + "description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) allows the removal of nodes from the Compute Engine node group without interrupting jobs in progress. This timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", "format": "google-duration", "type": "string" }, @@ -5876,7 +5891,7 @@ "id": "SparkStandaloneAutoscalingConfig", "properties": { "gracefulDecommissionTimeout": { - "description": "Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decomissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.", + "description": "Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.", "format": "google-duration", "type": "string" }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go b/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go index baa308820d..3158504ca5 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go @@ -73,6 +73,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "dataproc:v1" const apiName = "dataproc" @@ -1415,10 +1416,18 @@ type DiagnoseClusterRequest struct { // be performed. Format: projects/{project}/regions/{region}/jobs/{job} Job string `json:"job,omitempty"` + // Jobs: Optional. Specifies a list of jobs on which diagnosis is to be + // performed. Format: projects/{project}/regions/{region}/jobs/{job} + Jobs []string `json:"jobs,omitempty"` + // YarnApplicationId: Optional. DEPRECATED Specifies the yarn // application on which diagnosis is to be performed. YarnApplicationId string `json:"yarnApplicationId,omitempty"` + // YarnApplicationIds: Optional. Specifies a list of yarn applications + // on which diagnosis is to be performed. + YarnApplicationIds []string `json:"yarnApplicationIds,omitempty"` + // ForceSendFields is a list of field names (e.g. "DiagnosisInterval") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -1677,14 +1686,15 @@ func (s *EnvironmentConfig) MarshalJSON() ([]byte, error) { // ExecutionConfig: Execution configuration for a workload. type ExecutionConfig struct { // IdleTtl: Optional. The duration to keep the session alive while it's - // idling. Passing this threshold will cause the session to be - // terminated. Minimum value is 10 minutes; maximum value is 14 days - // (see JSON representation of Duration + // idling. Exceeding this threshold causes the session to terminate. + // This field cannot be set on a batch workload. Minimum value is 10 + // minutes; maximum value is 14 days (see JSON representation of + // Duration // (https://developers.google.com/protocol-buffers/docs/proto3#json)). // Defaults to 4 hours if not set. If both ttl and idle_ttl are - // specified, the conditions are treated as and OR: the workload will be - // terminated when it has been idle for idle_ttl or when the ttl has - // passed, whichever comes first. + // specified for an interactive session, the conditions are treated as + // OR conditions: the workload will be terminated when it has been idle + // for idle_ttl or when ttl has been exceeded, whichever occurs first. IdleTtl string `json:"idleTtl,omitempty"` // KmsKey: Optional. The Cloud KMS key to use for encryption. @@ -1714,15 +1724,19 @@ type ExecutionConfig struct { SubnetworkUri string `json:"subnetworkUri,omitempty"` // Ttl: Optional. The duration after which the workload will be - // terminated. When the workload passes this ttl, it will be - // unconditionally killed without waiting for ongoing work to finish. - // Minimum value is 10 minutes; maximum value is 14 days (see JSON - // representation of Duration + // terminated. When the workload exceeds this duration, it will be + // unconditionally terminated without waiting for ongoing work to + // finish. If ttl is not specified for a batch workload, the workload + // will be allowed to run until it exits naturally (or runs forever + // without exiting). If ttl is not specified for an interactive session, + // it defaults to 24h. If ttl is not specified for a batch that uses + // 2.1+ runtime version, it defaults to 4h. Minimum value is 10 minutes; + // maximum value is 14 days (see JSON representation of Duration // (https://developers.google.com/protocol-buffers/docs/proto3#json)). - // If both ttl and idle_ttl are specified, the conditions are treated as - // and OR: the workload will be terminated when it has been idle for - // idle_ttl or when the ttl has passed, whichever comes first. If ttl is - // not specified for a session, it defaults to 24h. + // If both ttl and idle_ttl are specified (for an interactive session), + // the conditions are treated as OR conditions: the workload will be + // terminated when it has been idle for idle_ttl or when ttl has been + // exceeded, whichever occurs first. Ttl string `json:"ttl,omitempty"` // ForceSendFields is a list of field names (e.g. "IdleTtl") to @@ -2065,8 +2079,8 @@ type GkeNodeConfig struct { // (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) // used to encrypt the boot disk attached to each node in the node pool. // Specify the key using the following format: - // projects/KEY_PROJECT_ID/locations/LOCATION - // /keyRings/RING_NAME/cryptoKeys/KEY_NAME. + // projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys + // /{crypto_key} BootDiskKmsKey string `json:"bootDiskKmsKey,omitempty"` // LocalSsdCount: Optional. The number of local SSD disks to attach to @@ -3396,7 +3410,7 @@ func (s *ListAutoscalingPoliciesResponse) MarshalJSON() ([]byte, error) { // ListBatchesResponse: A list of batch workloads. type ListBatchesResponse struct { - // Batches: The batches from the specified collection. + // Batches: Output only. The batches from the specified collection. Batches []*Batch `json:"batches,omitempty"` // NextPageToken: A token, which can be sent as page_token to retrieve @@ -3584,8 +3598,8 @@ func (s *ListWorkflowTemplatesResponse) MarshalJSON() ([]byte, error) { // LoggingConfig: The runtime logging config of the job. type LoggingConfig struct { // DriverLogLevels: The per-package log levels for the driver. This may - // include "root" package name to configure rootLogger. Examples: - // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // include "root" package name to configure rootLogger. Examples: - + // 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG' DriverLogLevels map[string]string `json:"driverLogLevels,omitempty"` // ForceSendFields is a list of field names (e.g. "DriverLogLevels") to @@ -3724,11 +3738,11 @@ func (s *MetastoreConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Metric: A Dataproc OSS metric. +// Metric: A Dataproc custom metric. type Metric struct { - // MetricOverrides: Optional. Specify one or more available OSS metrics - // (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) - // to collect for the metric course (for the SPARK metric source, any + // MetricOverrides: Optional. Specify one or more Custom metrics + // (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) + // to collect for the metric course (for the SPARK metric source (any // Spark metric // (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be // specified).Provide metrics in the following format: METRIC_SOURCE: @@ -3737,27 +3751,27 @@ type Metric struct { // spark:driver:DAGScheduler:job.allJobs // sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed // hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the - // specified overridden metrics will be collected for the metric source. - // For example, if one or more spark:executive metrics are listed as - // metric overrides, other SPARK metrics will not be collected. The - // collection of the default metrics for other OSS metric sources is - // unaffected. For example, if both SPARK andd YARN metric sources are - // enabled, and overrides are provided for Spark metrics only, all - // default YARN metrics will be collected. + // specified overridden metrics are collected for the metric source. For + // example, if one or more spark:executive metrics are listed as metric + // overrides, other SPARK metrics are not collected. The collection of + // the metrics for other enabled custom metric sources is unaffected. + // For example, if both SPARK andd YARN metric sources are enabled, and + // overrides are provided for Spark metrics only, all YARN metrics are + // collected. MetricOverrides []string `json:"metricOverrides,omitempty"` - // MetricSource: Required. Default metrics are collected unless - // metricOverrides are specified for the metric source (see Available - // OSS metrics - // (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) + // MetricSource: Required. A standard set of metrics is collected unless + // metricOverrides are specified for the metric source (see Custom + // metrics + // (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) // for more information). // // Possible values: // "METRIC_SOURCE_UNSPECIFIED" - Required unspecified metric source. - // "MONITORING_AGENT_DEFAULTS" - Default monitoring agent metrics. If - // this source is enabled, Dataproc enables the monitoring agent in - // Compute Engine, and collects default monitoring agent metrics, which - // are published with an agent.googleapis.com prefix. + // "MONITORING_AGENT_DEFAULTS" - Monitoring agent metrics. If this + // source is enabled, Dataproc enables the monitoring agent in Compute + // Engine, and collects monitoring agent metrics, which are published + // with an agent.googleapis.com prefix. // "HDFS" - HDFS metric source. // "SPARK" - Spark metric source. // "YARN" - YARN metric source. @@ -4764,7 +4778,7 @@ func (s *ReservationAffinity) MarshalJSON() ([]byte, error) { // ResizeNodeGroupRequest: A request to resize a node group. type ResizeNodeGroupRequest struct { // GracefulDecommissionTimeout: Optional. Timeout for graceful YARN - // decomissioning. Graceful decommissioning + // decommissioning. Graceful decommissioning // (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) // allows the removal of nodes from the Compute Engine node group // without interrupting jobs in progress. This timeout specifies how @@ -5472,9 +5486,9 @@ func (s *SparkSqlJob) MarshalJSON() ([]byte, error) { type SparkStandaloneAutoscalingConfig struct { // GracefulDecommissionTimeout: Required. Timeout for Spark graceful // decommissioning of spark workers. Specifies the duration to wait for - // spark worker to complete spark decomissioning tasks before forcefully - // removing workers. Only applicable to downscaling operations.Bounds: - // 0s, 1d. + // spark worker to complete spark decommissioning tasks before + // forcefully removing workers. Only applicable to downscaling + // operations.Bounds: 0s, 1d. GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout,omitempty"` // ScaleDownFactor: Required. Fraction of required executors to remove @@ -8842,14 +8856,7 @@ type ProjectsLocationsOperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// UNIMPLEMENTED.NOTE: the name binding allows API services to override -// the binding to use different resource name schemes, such as -// users/*/operations. To override the binding, API services can add a -// binding such as "/v1/{name=users/*}/operations" to their service -// configuration. For backwards compatibility, the default name includes -// the operations collection id, however overriding users must ensure -// the name binding is the parent resource, without the operations -// collection id. +// UNIMPLEMENTED. // // - name: The name of the operation's parent resource. func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { @@ -8978,7 +8985,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", // "httpMethod": "GET", // "id": "dataproc.projects.locations.operations.list", @@ -16938,14 +16945,7 @@ type ProjectsRegionsOperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// UNIMPLEMENTED.NOTE: the name binding allows API services to override -// the binding to use different resource name schemes, such as -// users/*/operations. To override the binding, API services can add a -// binding such as "/v1/{name=users/*}/operations" to their service -// configuration. For backwards compatibility, the default name includes -// the operations collection id, however overriding users must ensure -// the name binding is the parent resource, without the operations -// collection id. +// UNIMPLEMENTED. // // - name: The name of the operation's parent resource. func (r *ProjectsRegionsOperationsService) List(name string) *ProjectsRegionsOperationsListCall { @@ -17074,7 +17074,7 @@ func (c *ProjectsRegionsOperationsListCall) Do(opts ...googleapi.CallOption) (*L } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as \"/v1/{name=users/*}/operations\" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", // "flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations", // "httpMethod": "GET", // "id": "dataproc.projects.regions.operations.list", diff --git a/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-api.json b/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-api.json index beb6e773ef..7db2154b3a 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-api.json @@ -524,7 +524,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", "httpMethod": "GET", "id": "datastream.projects.locations.operations.list", @@ -1024,6 +1024,17 @@ "name" ], "parameters": { + "cdcStrategy.specificStartPosition.mysqlLogPosition.logFile": { + "description": "The binary log file name.", + "location": "query", + "type": "string" + }, + "cdcStrategy.specificStartPosition.mysqlLogPosition.logPosition": { + "description": "The position within the binary log file. Default is head of file.", + "format": "int32", + "location": "query", + "type": "integer" + }, "force": { "description": "Optional. Update the stream without validating it.", "location": "query", @@ -1222,7 +1233,7 @@ } } }, - "revision": "20230301", + "revision": "20230611", "rootUrl": "https://datastream.googleapis.com/", "schemas": { "AvroFileFormat": { @@ -1275,7 +1286,7 @@ "type": "string" }, "state": { - "description": "Backfill job state.", + "description": "Output only. Backfill job state.", "enum": [ "STATE_UNSPECIFIED", "NOT_STARTED", @@ -1296,6 +1307,7 @@ "Backfill completed successfully.", "Backfill job failed since the table structure is currently unsupported for backfill." ], + "readOnly": true, "type": "string" }, "trigger": { @@ -1611,7 +1623,7 @@ "description": "AVRO file format configuration." }, "fileRotationInterval": { - "description": "The maximum duration for which new events are added before a file is closed and a new file is created.", + "description": "The maximum duration for which new events are added before a file is closed and a new file is created. Values within the range of 15-60 seconds are allowed.", "format": "google-duration", "type": "string" }, @@ -1836,7 +1848,7 @@ "type": "object" }, "Location": { - "description": "A resource that represents Google Cloud Platform location.", + "description": "A resource that represents a Google Cloud location.", "id": "Location", "properties": { "displayName": { @@ -2622,7 +2634,7 @@ "id": "SingleTargetDataset", "properties": { "datasetId": { - "description": "The dataset ID of the target dataset.", + "description": "The dataset ID of the target dataset. DatasetIds allowed characters: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#datasetreference.", "type": "string" } }, @@ -2907,7 +2919,7 @@ "type": "array" }, "state": { - "description": "Validation execution status.", + "description": "Output only. Validation execution status.", "enum": [ "STATE_UNSPECIFIED", "NOT_EXECUTED", @@ -2920,6 +2932,7 @@ "Validation failed.", "Validation passed." ], + "readOnly": true, "type": "string" } }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-gen.go b/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-gen.go index 3f46e2700c..7b3ce7d6a5 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/datastream/v1/datastream-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "datastream:v1" const apiName = "datastream" @@ -284,7 +285,7 @@ type BackfillJob struct { // LastStartTime: Output only. Backfill job's start time. LastStartTime string `json:"lastStartTime,omitempty"` - // State: Backfill job state. + // State: Output only. Backfill job state. // // Possible values: // "STATE_UNSPECIFIED" - Default value. @@ -773,7 +774,8 @@ type GcsDestinationConfig struct { AvroFileFormat *AvroFileFormat `json:"avroFileFormat,omitempty"` // FileRotationInterval: The maximum duration for which new events are - // added before a file is closed and a new file is created. + // added before a file is closed and a new file is created. Values + // within the range of 15-60 seconds are allowed. FileRotationInterval string `json:"fileRotationInterval,omitempty"` // FileRotationMb: The maximum file size to be saved in the bucket. @@ -1155,7 +1157,7 @@ func (s *ListStreamsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Location: A resource that represents Google Cloud Platform location. +// Location: A resource that represents a Google Cloud location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. For example, "Tokyo". @@ -2338,7 +2340,9 @@ func (s *Route) MarshalJSON() ([]byte, error) { // SingleTargetDataset: A single target dataset to which all data will // be streamed. type SingleTargetDataset struct { - // DatasetId: The dataset ID of the target dataset. + // DatasetId: The dataset ID of the target dataset. DatasetIds allowed + // characters: + // https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#datasetreference. DatasetId string `json:"datasetId,omitempty"` // ForceSendFields is a list of field names (e.g. "DatasetId") to @@ -2752,7 +2756,7 @@ type Validation struct { // Message: Messages reflecting the validation results. Message []*ValidationMessage `json:"message,omitempty"` - // State: Validation execution status. + // State: Output only. Validation execution status. // // Possible values: // "STATE_UNSPECIFIED" - Unspecified state. @@ -4962,14 +4966,7 @@ type ProjectsLocationsOperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to -// override the binding to use different resource name schemes, such as -// `users/*/operations`. To override the binding, API services can add a -// binding such as "/v1/{name=users/*}/operations" to their service -// configuration. For backwards compatibility, the default name includes -// the operations collection id, however overriding users must ensure -// the name binding is the parent resource, without the operations -// collection id. +// `UNIMPLEMENTED`. // // - name: The name of the operation's parent resource. func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall { @@ -5098,7 +5095,7 @@ func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", // "httpMethod": "GET", // "id": "datastream.projects.locations.operations.list", @@ -7325,6 +7322,24 @@ func (r *ProjectsLocationsStreamsService) Patch(name string, stream *Stream) *Pr return c } +// CdcStrategySpecificStartPositionMysqlLogPositionLogFile sets the +// optional parameter +// "cdcStrategy.specificStartPosition.mysqlLogPosition.logFile": The +// binary log file name. +func (c *ProjectsLocationsStreamsPatchCall) CdcStrategySpecificStartPositionMysqlLogPositionLogFile(cdcStrategySpecificStartPositionMysqlLogPositionLogFile string) *ProjectsLocationsStreamsPatchCall { + c.urlParams_.Set("cdcStrategy.specificStartPosition.mysqlLogPosition.logFile", cdcStrategySpecificStartPositionMysqlLogPositionLogFile) + return c +} + +// CdcStrategySpecificStartPositionMysqlLogPositionLogPosition sets the +// optional parameter +// "cdcStrategy.specificStartPosition.mysqlLogPosition.logPosition": The +// position within the binary log file. Default is head of file. +func (c *ProjectsLocationsStreamsPatchCall) CdcStrategySpecificStartPositionMysqlLogPositionLogPosition(cdcStrategySpecificStartPositionMysqlLogPositionLogPosition int64) *ProjectsLocationsStreamsPatchCall { + c.urlParams_.Set("cdcStrategy.specificStartPosition.mysqlLogPosition.logPosition", fmt.Sprint(cdcStrategySpecificStartPositionMysqlLogPositionLogPosition)) + return c +} + // Force sets the optional parameter "force": Update the stream without // validating it. func (c *ProjectsLocationsStreamsPatchCall) Force(force bool) *ProjectsLocationsStreamsPatchCall { @@ -7467,6 +7482,17 @@ func (c *ProjectsLocationsStreamsPatchCall) Do(opts ...googleapi.CallOption) (*O // "name" // ], // "parameters": { + // "cdcStrategy.specificStartPosition.mysqlLogPosition.logFile": { + // "description": "The binary log file name.", + // "location": "query", + // "type": "string" + // }, + // "cdcStrategy.specificStartPosition.mysqlLogPosition.logPosition": { + // "description": "The position within the binary log file. Default is head of file.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "force": { // "description": "Optional. Update the stream without validating it.", // "location": "query", diff --git a/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-api.json b/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-api.json index 0a86c9ffb1..3cea8f8c8f 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -1824,7 +1824,7 @@ } } }, - "revision": "20230126", + "revision": "20230330", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { @@ -3109,7 +3109,7 @@ "id": "RRSetRoutingPolicyLoadBalancerTarget", "properties": { "ipAddress": { - "description": "The frontend IP address of the", + "description": "The frontend IP address of the Load Balancer to health check.", "type": "string" }, "ipProtocol": { @@ -3130,6 +3130,7 @@ "type": "string" }, "loadBalancerType": { + "description": "The type of Load Balancer specified by this target. Must match the configuration of the Load Balancer located at the LoadBalancerTarget's IP address/port and region.", "enum": [ "none", "regionalL4ilb" @@ -3141,19 +3142,19 @@ "type": "string" }, "networkUrl": { - "description": "The fully qualified url of the network on which the ILB is", + "description": "The fully qualified url of the network on which the ILB is present. This should be formatted like https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}", "type": "string" }, "port": { - "description": "Load Balancer to health check. The configured port of the Load Balancer.", + "description": "The configured port of the Load Balancer.", "type": "string" }, "project": { - "description": "present. This should be formatted like https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} The project ID in which the ILB exists.", + "description": "The project ID in which the ILB exists.", "type": "string" }, "region": { - "description": "The region for regional ILBs.", + "description": "The region in which the ILB exists.", "type": "string" } }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-gen.go b/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-gen.go index 19a209a8c0..4b144d7f4e 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "dns:v1" const apiName = "dns" @@ -2491,7 +2492,8 @@ func (s *RRSetRoutingPolicyHealthCheckTargets) MarshalJSON() ([]byte, error) { } type RRSetRoutingPolicyLoadBalancerTarget struct { - // IpAddress: The frontend IP address of the + // IpAddress: The frontend IP address of the Load Balancer to health + // check. IpAddress string `json:"ipAddress,omitempty"` // Possible values: @@ -2502,25 +2504,27 @@ type RRSetRoutingPolicyLoadBalancerTarget struct { Kind string `json:"kind,omitempty"` + // LoadBalancerType: The type of Load Balancer specified by this target. + // Must match the configuration of the Load Balancer located at the + // LoadBalancerTarget's IP address/port and region. + // // Possible values: // "none" // "regionalL4ilb" LoadBalancerType string `json:"loadBalancerType,omitempty"` // NetworkUrl: The fully qualified url of the network on which the ILB - // is + // is present. This should be formatted like + // https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} NetworkUrl string `json:"networkUrl,omitempty"` - // Port: Load Balancer to health check. The configured port of the Load - // Balancer. + // Port: The configured port of the Load Balancer. Port string `json:"port,omitempty"` - // Project: present. This should be formatted like - // https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} - // The project ID in which the ILB exists. + // Project: The project ID in which the ILB exists. Project string `json:"project,omitempty"` - // Region: The region for regional ILBs. + // Region: The region in which the ILB exists. Region string `json:"region,omitempty"` // ForceSendFields is a list of field names (e.g. "IpAddress") to diff --git a/terraform/providers/google/vendor/google.golang.org/api/googleapi/googleapi.go b/terraform/providers/google/vendor/google.golang.org/api/googleapi/googleapi.go index b328a7976a..b5e38c6628 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/terraform/providers/google/vendor/google.golang.org/api/googleapi/googleapi.go @@ -11,7 +11,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -144,7 +143,7 @@ func CheckResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err == nil { jerr := new(errorReply) err = json.Unmarshal(slurp, jerr) @@ -184,7 +183,7 @@ func CheckMediaResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + slurp, _ := io.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ Code: res.StatusCode, Body: string(slurp), diff --git a/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json b/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json index f7f3e26bff..2213847fc5 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-api.json @@ -2969,7 +2969,7 @@ "type": "string" }, "_type": { - "description": "String of comma-delimited FHIR resource types. If provided, only resources of the specified resource type(s) are returned.", + "description": "String of comma-delimited FHIR resource types. If provided, only resources of the specified resource type(s) are returned. Specifying multiple `_type` parameters isn't supported. For example, the result of `_type=Observation\u0026_type=Encounter` is undefined. Use `_type=Observation,Encounter` instead.", "location": "query", "type": "string" }, @@ -3279,7 +3279,7 @@ ] }, "search": { - "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports four methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method across all resources. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method for the specified type. The `GET` and `POST` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `recurse` (DSTU2 and STU3) or `:iterate` (R4). Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. If there are additional results, the returned `Bundle` contains a link of `relation` \"next\", which has a `_page_token` parameter for an opaque pagination token that can be used to retrieve the next page. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search).", + "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports four methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method across all resources. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method for the specified type. The `GET` and `POST` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `recurse` (DSTU2 and STU3) or `:iterate` (R4). Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. The server might return fewer resources than requested to prevent excessively large responses. If there are additional results, the returned `Bundle` contains a link of `relation` \"next\", which has a `_page_token` parameter for an opaque pagination token that can be used to retrieve the next page. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/_search", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.search", @@ -3307,7 +3307,7 @@ ] }, "search-type": { - "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports four methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method across all resources. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method for the specified type. The `GET` and `POST` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `recurse` (DSTU2 and STU3) or `:iterate` (R4). Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. If there are additional results, the returned `Bundle` contains a link of `relation` \"next\", which has a `_page_token` parameter for an opaque pagination token that can be used to retrieve the next page. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search).", + "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports four methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method across all resources. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method for the specified type. The `GET` and `POST` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `recurse` (DSTU2 and STU3) or `:iterate` (R4). Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. The server might return fewer resources than requested to prevent excessively large responses. If there are additional results, the returned `Bundle` contains a link of `relation` \"next\", which has a `_page_token` parameter for an opaque pagination token that can be used to retrieve the next page. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search).", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{resourceType}/_search", "httpMethod": "POST", "id": "healthcare.projects.locations.datasets.fhirStores.fhir.search-type", @@ -3715,7 +3715,7 @@ ], "parameters": { "parent": { - "description": "The name of the dataset this message belongs to.", + "description": "The name of the HL7v2 store this message belongs to.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", "required": true, @@ -3993,7 +3993,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/operations", "httpMethod": "GET", "id": "healthcare.projects.locations.datasets.operations.list", @@ -4078,7 +4078,7 @@ } } }, - "revision": "20230207", + "revision": "20230510", "rootUrl": "https://healthcare.googleapis.com/", "schemas": { "ActivateConsentRequest": { @@ -4142,7 +4142,7 @@ "type": "array" }, "entityMentions": { - "description": "entity_mentions contains all the annotated medical entities that were mentioned in the provided document.", + "description": "The `entity_mentions` field contains all the annotated medical entities that were mentioned in the provided document.", "items": { "$ref": "EntityMention" }, @@ -4653,6 +4653,10 @@ "text": { "$ref": "TextConfig", "description": "Configures de-identification of text wherever it is found in the source_dataset." + }, + "useRegionalDataProcessing": { + "description": "Ensures in-flight data remains in the region of origin during de-identification. Using this option results in a significant reduction of throughput, and is not compatible with `LOCATION` or `ORGANIZATION_NAME` infoTypes. `LOCATION` must be excluded within `TextConfig`, and must also be excluded within `ImageConfig` if image redaction is required.", + "type": "boolean" } }, "type": "object" @@ -5107,6 +5111,25 @@ }, "type": "object" }, + "FhirNotificationConfig": { + "description": "Contains the configuration for FHIR notifications.", + "id": "FhirNotificationConfig", + "properties": { + "pubsubTopic": { + "description": "The [Pub/Sub](https://cloud.google.com/pubsub/docs/) topic that notifications of changes are published on. Supplied by the client. The notification is a `PubsubMessage` with the following fields: * `PubsubMessage.Data` contains the resource name. * `PubsubMessage.MessageId` is the ID of this notification. It is guaranteed to be unique within the topic. * `PubsubMessage.PublishTime` is the time when the message was published. Note that notifications are only sent if the topic is non-empty. [Topic names](https://cloud.google.com/pubsub/docs/overview#names) must be scoped to a project. The Cloud Healthcare API service account, service-@gcp-sa-healthcare.iam.gserviceaccount.com, must have publisher permissions on the given Pub/Sub topic. Not having adequate permissions causes the calls that send notifications to fail (https://cloud.google.com/healthcare-api/docs/permissions-healthcare-api-gcp-products#dicom_fhir_and_hl7v2_store_cloud_pubsub_permissions). If a notification can't be published to Pub/Sub, errors are logged to Cloud Logging. For more information, see [Viewing error logs in Cloud Logging](https://cloud.google.com/healthcare-api/docs/how-tos/logging).", + "type": "string" + }, + "sendFullResource": { + "description": "Whether to send full FHIR resource to this Pub/Sub topic.", + "type": "boolean" + }, + "sendPreviousResourceOnDelete": { + "description": "Whether to send full FHIR resource to this Pub/Sub topic for deleting FHIR resource. Note that setting this to true does not guarantee that all previous resources will be sent in the format of full FHIR resource. When a resource change is too large or during heavy traffic, only the resource name will be sent. Clients should always check the \"payloadType\" label from a Pub/Sub message to determine whether it needs to fetch the full previous resource as a separate operation.", + "type": "boolean" + } + }, + "type": "object" + }, "FhirStore": { "description": "Represents a FHIR store.", "id": "FhirStore", @@ -5154,7 +5177,14 @@ }, "notificationConfig": { "$ref": "NotificationConfig", - "description": "If non-empty, publish all resource modifications of this FHIR store to this destination. The Pub/Sub message attributes contain a map with a string describing the action that has triggered the notification. For example, \"action\":\"CreateResource\"." + "description": "Deprecated. Use `notification_configs` instead. If non-empty, publish all resource modifications of this FHIR store to this destination. The Pub/Sub message attributes contain a map with a string describing the action that has triggered the notification. For example, \"action\":\"CreateResource\"." + }, + "notificationConfigs": { + "description": "Specifies where and whether to send notifications upon changes to a FHIR store.", + "items": { + "$ref": "FhirNotificationConfig" + }, + "type": "array" }, "streamConfigs": { "description": "A list of streaming configs that configure the destinations of streaming export for every resource mutation in this FHIR store. Each store is allowed to have up to 10 streaming configs. After a new config is added, the next resource mutation is streamed to the new location in addition to the existing ones. When a location is removed from the list, the server stops streaming to that location. Before adding a new config, you must add the required [`bigquery.dataEditor`](https://cloud.google.com/bigquery/docs/access-control#bigquery.dataEditor) role to your project's **Cloud Healthcare Service Agent** [service account](https://cloud.google.com/iam/docs/service-accounts). Some lag (typically on the order of dozens of seconds) is expected before the results show up in the streaming destination.", @@ -6048,7 +6078,7 @@ "type": "object" }, "Location": { - "description": "A resource that represents Google Cloud Platform location.", + "description": "A resource that represents a Google Cloud location.", "id": "Location", "properties": { "displayName": { @@ -6465,8 +6495,8 @@ ], "enumDescriptions": [ "No schema type specified. This type is unsupported.", - "Analytics schema defined by the FHIR community. See https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. BigQuery only allows a maximum of 10,000 columns per table. Due to this limitation, the server will not generate schemas for fields of type `Resource`, which can hold any resource type. The affected fields are `Parameters.parameter.resource`, `Bundle.entry.resource`, and `Bundle.entry.response.outcome`.", - "Analytics V2, similar to schema defined by the FHIR community, with added support for extensions with one or more occurrences and contained resources in stringified JSON. Analytics V2 uses more space in the destination table than Analytics V1." + "Analytics schema defined by the FHIR community. See https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. BigQuery only allows a maximum of 10,000 columns per table. Due to this limitation, the server will not generate schemas for fields of type `Resource`, which can hold any resource type. The affected fields are `Parameters.parameter.resource`, `Bundle.entry.resource`, and `Bundle.entry.response.outcome`. Analytics schema does not gracefully handle extensions with one or more occurrences, anaytics schema also does not handle contained resource.", + "Analytics V2, similar to schema defined by the FHIR community, with added support for extensions with one or more occurrences and contained resources in stringified JSON. Analytics V2 uses more space in the destination table than Analytics V1. It is generally recommended to use Analytics V2 over Analytics." ], "type": "string" } @@ -6766,6 +6796,20 @@ "TextConfig": { "id": "TextConfig", "properties": { + "additionalTransformations": { + "description": "Transformations to apply to the detected data, overridden by `exclude_info_types`.", + "items": { + "$ref": "InfoTypeTransformation" + }, + "type": "array" + }, + "excludeInfoTypes": { + "description": "InfoTypes to skip transforming, overriding `additional_transformations`.", + "items": { + "type": "string" + }, + "type": "array" + }, "transformations": { "description": "The transformations to apply to the detected data. Deprecated. Use `additional_transformations` instead.", "items": { diff --git a/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go b/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go index 8aa9752a84..c62b625933 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/healthcare/v1/healthcare-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "healthcare:v1" const apiName = "healthcare" @@ -469,8 +470,9 @@ type AnalyzeEntitiesResponse struct { // concepts or normalized mention content. Entities []*Entity `json:"entities,omitempty"` - // EntityMentions: entity_mentions contains all the annotated medical - // entities that were mentioned in the provided document. + // EntityMentions: The `entity_mentions` field contains all the + // annotated medical entities that were mentioned in the provided + // document. EntityMentions []*EntityMention `json:"entityMentions,omitempty"` // Relationships: relationships contains all the binary relationships @@ -1429,6 +1431,14 @@ type DeidentifyConfig struct { // the source_dataset. Text *TextConfig `json:"text,omitempty"` + // UseRegionalDataProcessing: Ensures in-flight data remains in the + // region of origin during de-identification. Using this option results + // in a significant reduction of throughput, and is not compatible with + // `LOCATION` or `ORGANIZATION_NAME` infoTypes. `LOCATION` must be + // excluded within `TextConfig`, and must also be excluded within + // `ImageConfig` if image redaction is required. + UseRegionalDataProcessing bool `json:"useRegionalDataProcessing,omitempty"` + // ForceSendFields is a list of field names (e.g. "Dicom") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -2387,6 +2397,67 @@ func (s *FhirFilter) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// FhirNotificationConfig: Contains the configuration for FHIR +// notifications. +type FhirNotificationConfig struct { + // PubsubTopic: The Pub/Sub (https://cloud.google.com/pubsub/docs/) + // topic that notifications of changes are published on. Supplied by the + // client. The notification is a `PubsubMessage` with the following + // fields: * `PubsubMessage.Data` contains the resource name. * + // `PubsubMessage.MessageId` is the ID of this notification. It is + // guaranteed to be unique within the topic. * + // `PubsubMessage.PublishTime` is the time when the message was + // published. Note that notifications are only sent if the topic is + // non-empty. Topic names + // (https://cloud.google.com/pubsub/docs/overview#names) must be scoped + // to a project. The Cloud Healthcare API service account, + // service-@gcp-sa-healthcare.iam.gserviceaccount.com, must have + // publisher permissions on the given Pub/Sub topic. Not having adequate + // permissions causes the calls that send notifications to fail + // (https://cloud.google.com/healthcare-api/docs/permissions-healthcare-api-gcp-products#dicom_fhir_and_hl7v2_store_cloud_pubsub_permissions). + // If a notification can't be published to Pub/Sub, errors are logged to + // Cloud Logging. For more information, see Viewing error logs in Cloud + // Logging + // (https://cloud.google.com/healthcare-api/docs/how-tos/logging). + PubsubTopic string `json:"pubsubTopic,omitempty"` + + // SendFullResource: Whether to send full FHIR resource to this Pub/Sub + // topic. + SendFullResource bool `json:"sendFullResource,omitempty"` + + // SendPreviousResourceOnDelete: Whether to send full FHIR resource to + // this Pub/Sub topic for deleting FHIR resource. Note that setting this + // to true does not guarantee that all previous resources will be sent + // in the format of full FHIR resource. When a resource change is too + // large or during heavy traffic, only the resource name will be sent. + // Clients should always check the "payloadType" label from a Pub/Sub + // message to determine whether it needs to fetch the full previous + // resource as a separate operation. + SendPreviousResourceOnDelete bool `json:"sendPreviousResourceOnDelete,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PubsubTopic") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PubsubTopic") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FhirNotificationConfig) MarshalJSON() ([]byte, error) { + type NoMethod FhirNotificationConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // FhirStore: Represents a FHIR store. type FhirStore struct { // ComplexDataTypeReferenceParsing: Enable parsing of references within @@ -2466,12 +2537,17 @@ type FhirStore struct { // }`. Name string `json:"name,omitempty"` - // NotificationConfig: If non-empty, publish all resource modifications - // of this FHIR store to this destination. The Pub/Sub message - // attributes contain a map with a string describing the action that has - // triggered the notification. For example, "action":"CreateResource". + // NotificationConfig: Deprecated. Use `notification_configs` instead. + // If non-empty, publish all resource modifications of this FHIR store + // to this destination. The Pub/Sub message attributes contain a map + // with a string describing the action that has triggered the + // notification. For example, "action":"CreateResource". NotificationConfig *NotificationConfig `json:"notificationConfig,omitempty"` + // NotificationConfigs: Specifies where and whether to send + // notifications upon changes to a FHIR store. + NotificationConfigs []*FhirNotificationConfig `json:"notificationConfigs,omitempty"` + // StreamConfigs: A list of streaming configs that configure the // destinations of streaming export for every resource mutation in this // FHIR store. Each store is allowed to have up to 10 streaming configs. @@ -4364,7 +4440,7 @@ func (s *ListUserDataMappingsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Location: A resource that represents Google Cloud Platform location. +// Location: A resource that represents a Google Cloud location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. For example, "Tokyo". @@ -5169,11 +5245,14 @@ type SchemaConfig struct { // this limitation, the server will not generate schemas for fields of // type `Resource`, which can hold any resource type. The affected // fields are `Parameters.parameter.resource`, `Bundle.entry.resource`, - // and `Bundle.entry.response.outcome`. + // and `Bundle.entry.response.outcome`. Analytics schema does not + // gracefully handle extensions with one or more occurrences, anaytics + // schema also does not handle contained resource. // "ANALYTICS_V2" - Analytics V2, similar to schema defined by the // FHIR community, with added support for extensions with one or more // occurrences and contained resources in stringified JSON. Analytics V2 - // uses more space in the destination table than Analytics V1. + // uses more space in the destination table than Analytics V1. It is + // generally recommended to use Analytics V2 over Analytics. SchemaType string `json:"schemaType,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -5767,25 +5846,34 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { } type TextConfig struct { + // AdditionalTransformations: Transformations to apply to the detected + // data, overridden by `exclude_info_types`. + AdditionalTransformations []*InfoTypeTransformation `json:"additionalTransformations,omitempty"` + + // ExcludeInfoTypes: InfoTypes to skip transforming, overriding + // `additional_transformations`. + ExcludeInfoTypes []string `json:"excludeInfoTypes,omitempty"` + // Transformations: The transformations to apply to the detected data. // Deprecated. Use `additional_transformations` instead. Transformations []*InfoTypeTransformation `json:"transformations,omitempty"` - // ForceSendFields is a list of field names (e.g. "Transformations") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AdditionalTransformations") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Transformations") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. + // "AdditionalTransformations") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } @@ -20273,7 +20361,10 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Since(Sin // Type sets the optional parameter "_type": String of comma-delimited // FHIR resource types. If provided, only resources of the specified -// resource type(s) are returned. +// resource type(s) are returned. Specifying multiple `_type` parameters +// isn't supported. For example, the result of +// `_type=Observation&_type=Encounter` is undefined. Use +// `_type=Observation,Encounter` instead. func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Type(Type string) *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall { c.urlParams_.Set("_type", Type) return c @@ -20388,7 +20479,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirPatientEverythingCall) Do(opts . // "type": "string" // }, // "_type": { - // "description": "String of comma-delimited FHIR resource types. If provided, only resources of the specified resource type(s) are returned.", + // "description": "String of comma-delimited FHIR resource types. If provided, only resources of the specified resource type(s) are returned. Specifying multiple `_type` parameters isn't supported. For example, the result of `_type=Observation\u0026_type=Encounter` is undefined. Use `_type=Observation,Encounter` instead.", // "location": "query", // "type": "string" // }, @@ -21728,16 +21819,17 @@ type ProjectsLocationsDatasetsFhirStoresFhirSearchCall struct { // `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The // maximum number of search results returned defaults to 100, which can // be overridden by the `_count` parameter up to a maximum limit of -// 1000. If there are additional results, the returned `Bundle` contains -// a link of `relation` "next", which has a `_page_token` parameter for -// an opaque pagination token that can be used to retrieve the next -// page. Resources with a total size larger than 5MB or a field count -// larger than 50,000 might not be fully searchable as the server might -// trim its generated search index in those cases. Note: FHIR resources -// are indexed asynchronously, so there might be a slight delay between -// the time a resource is created or changes and when the change is -// reflected in search results. For samples and detailed information, -// see Searching for FHIR resources +// 1000. The server might return fewer resources than requested to +// prevent excessively large responses. If there are additional results, +// the returned `Bundle` contains a link of `relation` "next", which has +// a `_page_token` parameter for an opaque pagination token that can be +// used to retrieve the next page. Resources with a total size larger +// than 5MB or a field count larger than 50,000 might not be fully +// searchable as the server might trim its generated search index in +// those cases. Note: FHIR resources are indexed asynchronously, so +// there might be a slight delay between the time a resource is created +// or changes and when the change is reflected in search results. For +// samples and detailed information, see Searching for FHIR resources // (https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and // Advanced FHIR search features // (https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search). @@ -21808,7 +21900,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirSearchCall) Do(opts ...googleapi gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports four methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method across all resources. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method for the specified type. The `GET` and `POST` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `recurse` (DSTU2 and STU3) or `:iterate` (R4). Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. If there are additional results, the returned `Bundle` contains a link of `relation` \"next\", which has a `_page_token` parameter for an opaque pagination token that can be used to retrieve the next page. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search).", + // "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports four methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method across all resources. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method for the specified type. The `GET` and `POST` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `recurse` (DSTU2 and STU3) or `:iterate` (R4). Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. The server might return fewer resources than requested to prevent excessively large responses. If there are additional results, the returned `Bundle` contains a link of `relation` \"next\", which has a `_page_token` parameter for an opaque pagination token that can be used to retrieve the next page. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/_search", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.search", @@ -21890,16 +21982,17 @@ type ProjectsLocationsDatasetsFhirStoresFhirSearchTypeCall struct { // `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The // maximum number of search results returned defaults to 100, which can // be overridden by the `_count` parameter up to a maximum limit of -// 1000. If there are additional results, the returned `Bundle` contains -// a link of `relation` "next", which has a `_page_token` parameter for -// an opaque pagination token that can be used to retrieve the next -// page. Resources with a total size larger than 5MB or a field count -// larger than 50,000 might not be fully searchable as the server might -// trim its generated search index in those cases. Note: FHIR resources -// are indexed asynchronously, so there might be a slight delay between -// the time a resource is created or changes and when the change is -// reflected in search results. For samples and detailed information, -// see Searching for FHIR resources +// 1000. The server might return fewer resources than requested to +// prevent excessively large responses. If there are additional results, +// the returned `Bundle` contains a link of `relation` "next", which has +// a `_page_token` parameter for an opaque pagination token that can be +// used to retrieve the next page. Resources with a total size larger +// than 5MB or a field count larger than 50,000 might not be fully +// searchable as the server might trim its generated search index in +// those cases. Note: FHIR resources are indexed asynchronously, so +// there might be a slight delay between the time a resource is created +// or changes and when the change is reflected in search results. For +// samples and detailed information, see Searching for FHIR resources // (https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and // Advanced FHIR search features // (https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search). @@ -21979,7 +22072,7 @@ func (c *ProjectsLocationsDatasetsFhirStoresFhirSearchTypeCall) Do(opts ...googl gensupport.SetOptions(c.urlParams_, opts...) return c.doRequest("") // { - // "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports four methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method across all resources. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method for the specified type. The `GET` and `POST` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `recurse` (DSTU2 and STU3) or `:iterate` (R4). Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. If there are additional results, the returned `Bundle` contains a link of `relation` \"next\", which has a `_page_token` parameter for an opaque pagination token that can be used to retrieve the next page. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search).", + // "description": "Searches for resources in the given FHIR store according to criteria specified as query parameters. Implements the FHIR standard search interaction ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/http.html#search), [STU3](http://hl7.org/implement/standards/fhir/STU3/http.html#search), [R4](http://hl7.org/implement/standards/fhir/R4/http.html#search)) using the search semantics described in the FHIR Search specification ([DSTU2](http://hl7.org/implement/standards/fhir/DSTU2/search.html), [STU3](http://hl7.org/implement/standards/fhir/STU3/search.html), [R4](http://hl7.org/implement/standards/fhir/R4/search.html)). Supports four methods of search defined by the specification: * `GET [base]?[parameters]` to search across all resources. * `GET [base]/[type]?[parameters]` to search resources of a specified type. * `POST [base]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method across all resources. * `POST [base]/[type]/_search?[parameters]` as an alternate form having the same semantics as the `GET` method for the specified type. The `GET` and `POST` methods do not support compartment searches. The `POST` method does not support `application/x-www-form-urlencoded` search parameters. On success, the response body contains a JSON-encoded representation of a `Bundle` resource of type `searchset`, containing the results of the search. Errors generated by the FHIR store contain a JSON-encoded `OperationOutcome` resource describing the reason for the error. If the request cannot be mapped to a valid API method on a FHIR store, a generic GCP error might be returned instead. The server's capability statement, retrieved through capabilities, indicates what search parameters are supported on each FHIR resource. A list of all search parameters defined by the specification can be found in the FHIR Search Parameter Registry ([STU3](http://hl7.org/implement/standards/fhir/STU3/searchparameter-registry.html), [R4](http://hl7.org/implement/standards/fhir/R4/searchparameter-registry.html)). FHIR search parameters for DSTU2 can be found on each resource's definition page. Supported search modifiers: `:missing`, `:exact`, `:contains`, `:text`, `:in`, `:not-in`, `:above`, `:below`, `:[type]`, `:not`, and `recurse` (DSTU2 and STU3) or `:iterate` (R4). Supported search result parameters: `_sort`, `_count`, `_include`, `_revinclude`, `_summary=text`, `_summary=data`, and `_elements`. The maximum number of search results returned defaults to 100, which can be overridden by the `_count` parameter up to a maximum limit of 1000. The server might return fewer resources than requested to prevent excessively large responses. If there are additional results, the returned `Bundle` contains a link of `relation` \"next\", which has a `_page_token` parameter for an opaque pagination token that can be used to retrieve the next page. Resources with a total size larger than 5MB or a field count larger than 50,000 might not be fully searchable as the server might trim its generated search index in those cases. Note: FHIR resources are indexed asynchronously, so there might be a slight delay between the time a resource is created or changes and when the change is reflected in search results. For samples and detailed information, see [Searching for FHIR resources](https://cloud.google.com/healthcare/docs/how-tos/fhir-search) and [Advanced FHIR search features](https://cloud.google.com/healthcare/docs/how-tos/fhir-advanced-search).", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/fhirStores/{fhirStoresId}/fhir/{resourceType}/_search", // "httpMethod": "POST", // "id": "healthcare.projects.locations.datasets.fhirStores.fhir.search-type", @@ -23901,7 +23994,7 @@ type ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall struct { // topic, the adapter transmits the message when a notification is // received. // -// - parent: The name of the dataset this message belongs to. +// - parent: The name of the HL7v2 store this message belongs to. func (r *ProjectsLocationsDatasetsHl7V2StoresMessagesService) Create(parent string, createmessagerequest *CreateMessageRequest) *ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall { c := &ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -24009,7 +24102,7 @@ func (c *ProjectsLocationsDatasetsHl7V2StoresMessagesCreateCall) Do(opts ...goog // ], // "parameters": { // "parent": { - // "description": "The name of the dataset this message belongs to.", + // "description": "The name of the HL7v2 store this message belongs to.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/datasets/[^/]+/hl7V2Stores/[^/]+$", // "required": true, @@ -25295,14 +25388,7 @@ type ProjectsLocationsDatasetsOperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to -// override the binding to use different resource name schemes, such as -// `users/*/operations`. To override the binding, API services can add a -// binding such as "/v1/{name=users/*}/operations" to their service -// configuration. For backwards compatibility, the default name includes -// the operations collection id, however overriding users must ensure -// the name binding is the parent resource, without the operations -// collection id. +// `UNIMPLEMENTED`. // // - name: The name of the operation's parent resource. func (r *ProjectsLocationsDatasetsOperationsService) List(name string) *ProjectsLocationsDatasetsOperationsListCall { @@ -25431,7 +25517,7 @@ func (c *ProjectsLocationsDatasetsOperationsListCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/datasets/{datasetsId}/operations", // "httpMethod": "GET", // "id": "healthcare.projects.locations.datasets.operations.list", diff --git a/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-api.json b/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-api.json index 7982136421..533e7731c7 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -646,6 +646,160 @@ }, "resources": { "keys": { + "methods": { + "create": { + "description": "Creates a new WorkforcePoolProviderKey in a WorkforcePoolProvider.", + "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys", + "httpMethod": "POST", + "id": "iam.locations.workforcePools.providers.keys.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The provider to create this key in.", + "location": "path", + "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+$", + "required": true, + "type": "string" + }, + "workforcePoolProviderKeyId": { + "description": "Required. The ID to use for the key, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-].", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/keys", + "request": { + "$ref": "WorkforcePoolProviderKey" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a WorkforcePoolProviderKey. You can undelete a key for 30 days. After 30 days, deletion is permanent.", + "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys/{keysId}", + "httpMethod": "DELETE", + "id": "iam.locations.workforcePools.providers.keys.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the key to delete.", + "location": "path", + "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/keys/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets a WorkforcePoolProviderKey.", + "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys/{keysId}", + "httpMethod": "GET", + "id": "iam.locations.workforcePools.providers.keys.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the key to retrieve.", + "location": "path", + "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/keys/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "WorkforcePoolProviderKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists all non-deleted WorkforcePoolProviderKeys in a WorkforcePoolProvider. If `show_deleted` is set to `true`, then deleted keys are also listed.", + "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys", + "httpMethod": "GET", + "id": "iam.locations.workforcePools.providers.keys.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of keys to return. If unspecified, all keys are returned. The maximum value is 10; values above 10 are truncated to 10.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token, received from a previous `ListWorkforcePoolProviderKeys` call. Provide this to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The provider resource to list encryption keys for. Format: `locations/{location}/workforcePools/{workforce_pool_id}/providers/{provider_id}`", + "location": "path", + "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+$", + "required": true, + "type": "string" + }, + "showDeleted": { + "description": "Whether to return soft-deleted keys.", + "location": "query", + "type": "boolean" + } + }, + "path": "v1/{+parent}/keys", + "response": { + "$ref": "ListWorkforcePoolProviderKeysResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "undelete": { + "description": "Undeletes a WorkforcePoolProviderKey, as long as it was deleted fewer than 30 days ago.", + "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys/{keysId}:undelete", + "httpMethod": "POST", + "id": "iam.locations.workforcePools.providers.keys.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the key to undelete.", + "location": "path", + "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/keys/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:undelete", + "request": { + "$ref": "UndeleteWorkforcePoolProviderKeyRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, "resources": { "operations": { "methods": { @@ -1225,6 +1379,138 @@ } }, "resources": { + "namespaces": { + "resources": { + "managedIdentities": { + "resources": { + "operations": { + "methods": { + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/namespaces/{namespacesId}/managedIdentities/{managedIdentitiesId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "iam.projects.locations.workloadIdentityPools.namespaces.managedIdentities.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/managedIdentities/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "workloadSources": { + "resources": { + "operations": { + "methods": { + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/namespaces/{namespacesId}/managedIdentities/{managedIdentitiesId}/workloadSources/{workloadSourcesId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "iam.projects.locations.workloadIdentityPools.namespaces.managedIdentities.workloadSources.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/managedIdentities/[^/]+/workloadSources/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + }, + "operations": { + "methods": { + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/namespaces/{namespacesId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "iam.projects.locations.workloadIdentityPools.namespaces.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "workloadSources": { + "resources": { + "operations": { + "methods": { + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/namespaces/{namespacesId}/workloadSources/{workloadSourcesId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "iam.projects.locations.workloadIdentityPools.namespaces.workloadSources.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/workloadSources/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + }, "operations": { "methods": { "get": { @@ -1445,6 +1731,160 @@ }, "resources": { "keys": { + "methods": { + "create": { + "description": "Create a new WorkloadIdentityPoolProviderKey in a WorkloadIdentityPoolProvider.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys", + "httpMethod": "POST", + "id": "iam.projects.locations.workloadIdentityPools.providers.keys.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent provider resource to create the key in.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", + "required": true, + "type": "string" + }, + "workloadIdentityPoolProviderKeyId": { + "description": "Required. The ID to use for the key, which becomes the final component of the resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-].", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/keys", + "request": { + "$ref": "WorkloadIdentityPoolProviderKey" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes an WorkloadIdentityPoolProviderKey. You can undelete a key for 30 days. After 30 days, deletion is permanent.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys/{keysId}", + "httpMethod": "DELETE", + "id": "iam.projects.locations.workloadIdentityPools.providers.keys.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the encryption key to delete.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+/keys/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets an individual WorkloadIdentityPoolProviderKey.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys/{keysId}", + "httpMethod": "GET", + "id": "iam.projects.locations.workloadIdentityPools.providers.keys.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the key to retrieve.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+/keys/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "WorkloadIdentityPoolProviderKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists all non-deleted WorkloadIdentityPoolProviderKeys in a project. If show_deleted is set to `true`, then deleted pools are also listed.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys", + "httpMethod": "GET", + "id": "iam.projects.locations.workloadIdentityPools.providers.keys.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of keys to return. If unspecified, all keys are returned. The maximum value is 10; values above 10 are truncated to 10.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token, received from a previous `ListWorkloadIdentityPoolProviderKeys` call. Provide this to retrieve the subsequent page.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent provider resource to list encryption keys for.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", + "required": true, + "type": "string" + }, + "showDeleted": { + "description": "Whether to return soft deleted resources as well.", + "location": "query", + "type": "boolean" + } + }, + "path": "v1/{+parent}/keys", + "response": { + "$ref": "ListWorkloadIdentityPoolProviderKeysResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "undelete": { + "description": "Undeletes an WorkloadIdentityPoolProviderKey, as long as it was deleted fewer than 30 days ago.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys/{keysId}:undelete", + "httpMethod": "POST", + "id": "iam.projects.locations.workloadIdentityPools.providers.keys.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the encryption key to undelete.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+/keys/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:undelete", + "request": { + "$ref": "UndeleteWorkloadIdentityPoolProviderKeyRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, "resources": { "operations": { "methods": { @@ -1976,6 +2416,7 @@ ] }, "signBlob": { + "deprecated": true, "description": "**Note:** This method is deprecated. Use the [`signBlob`](https://cloud.google.com/iam/help/rest-credentials/v1/projects.serviceAccounts/signBlob) method in the IAM Service Account Credentials API instead. If you currently use this method, see the [migration guide](https://cloud.google.com/iam/help/credentials/migrate-api) for instructions. Signs a blob using the system-managed private key for a ServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", "httpMethod": "POST", @@ -2004,6 +2445,7 @@ ] }, "signJwt": { + "deprecated": true, "description": "**Note:** This method is deprecated. Use the [`signJwt`](https://cloud.google.com/iam/help/rest-credentials/v1/projects.serviceAccounts/signJwt) method in the IAM Service Account Credentials API instead. If you currently use this method, see the [migration guide](https://cloud.google.com/iam/help/credentials/migrate-api) for instructions. Signs a JSON Web Token (JWT) using the system-managed private key for a ServiceAccount.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", "httpMethod": "POST", @@ -2441,7 +2883,7 @@ } } }, - "revision": "20230209", + "revision": "20230622", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AdminAuditData": { @@ -2740,9 +3182,86 @@ "description": "Required. The client ID. Must match the audience claim of the JWT issued by the identity provider.", "type": "string" }, + "clientSecret": { + "$ref": "GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret", + "description": "The optional client secret. Required to enable Authorization Code flow for web sign-in." + }, "issuerUri": { "description": "Required. The OIDC issuer URI. Must be a valid URI using the 'https' scheme.", "type": "string" + }, + "webSsoConfig": { + "$ref": "GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig", + "description": "Required. Configuration for web single sign-on for the OIDC provider. Here, web sign-in refers to console sign-in and gcloud sign-in through the browser." + } + }, + "type": "object" + }, + "GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret": { + "description": "Representation of a client secret configured for the OIDC provider.", + "id": "GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret", + "properties": { + "value": { + "$ref": "GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue", + "description": "The value of the client secret." + } + }, + "type": "object" + }, + "GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue": { + "description": "Representation of the value of the client secret.", + "id": "GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue", + "properties": { + "plainText": { + "description": "Input only. The plain text of the client secret value. For security reasons, this field is only used for input and will never be populated in any response.", + "type": "string" + }, + "thumbprint": { + "description": "Output only. A thumbprint to represent the current client secret value.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig": { + "description": "Configuration for web single sign-on for the OIDC provider.", + "id": "GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig", + "properties": { + "additionalScopes": { + "description": "Additional scopes to request for in the OIDC authentication request on top of scopes requested by default. By default, the `openid`, `profile` and `email` scopes that are supported by the identity provider are requested. Each additional scope may be at most 256 characters. A maximum of 10 additional scopes may be configured.", + "items": { + "type": "string" + }, + "type": "array" + }, + "assertionClaimsBehavior": { + "description": "Required. The behavior for how OIDC Claims are included in the `assertion` object used for attribute mapping and attribute condition.", + "enum": [ + "ASSERTION_CLAIMS_BEHAVIOR_UNSPECIFIED", + "MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS", + "ONLY_ID_TOKEN_CLAIMS" + ], + "enumDescriptions": [ + "No assertion claims behavior specified.", + "Merge the UserInfo Endpoint Claims with ID Token Claims, preferring UserInfo Claim Values for the same Claim Name. Only possible for flows granting an Access Token, which comprise only the Authorization Code Flow at the moment.", + "Only include ID Token Claims." + ], + "type": "string" + }, + "responseType": { + "description": "Required. The Response Type to request for in the OIDC Authorization Request for web sign-in.", + "enum": [ + "RESPONSE_TYPE_UNSPECIFIED", + "CODE", + "ID_TOKEN" + ], + "enumDescriptions": [ + "No Response Type specified.", + "The `response_type=code` selection uses the Authorization Code Flow for web sign-in. Requires a configured client secret.", + "The `response_type=id_token` selection uses the Implicit Flow for web sign-in." + ], + "type": "string" } }, "type": "object" @@ -2758,6 +3277,59 @@ }, "type": "object" }, + "KeyData": { + "description": "Represents a public key data along with its format.", + "id": "KeyData", + "properties": { + "format": { + "description": "Output only. The format of the key.", + "enum": [ + "KEY_FORMAT_UNSPECIFIED", + "RSA_X509_PEM" + ], + "enumDescriptions": [ + "No format has been specified. This is an invalid format and must not be used.", + "A RSA public key wrapped in an X.509v3 certificate ([RFC5280] ( https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped in [public certificate label](https://datatracker.ietf.org/doc/html/rfc7468#section-5.1)." + ], + "readOnly": true, + "type": "string" + }, + "key": { + "description": "Output only. The key data. The format of the key is represented by the format field.", + "readOnly": true, + "type": "string" + }, + "keySpec": { + "description": "Required. The specifications for the key.", + "enum": [ + "KEY_SPEC_UNSPECIFIED", + "RSA_2048", + "RSA_3072", + "RSA_4096" + ], + "enumDescriptions": [ + "No key specification specified.", + "A 2048 bit RSA key.", + "A 3072 bit RSA key.", + "A 4096 bit RSA key." + ], + "type": "string" + }, + "notAfterTime": { + "description": "Output only. Latest timestamp when this key is valid. Attempts to use this key after this time will fail. Only present if the key data represents a X.509 certificate.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "notBeforeTime": { + "description": "Output only. Earliest timestamp when this key is valid. Attempts to use this key before this time will fail. Only present if the key data represents a X.509 certificate.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "LintPolicyRequest": { "description": "The request to lint a Cloud IAM policy object.", "id": "LintPolicyRequest", @@ -2893,6 +3465,24 @@ }, "type": "object" }, + "ListWorkforcePoolProviderKeysResponse": { + "description": "Response message for ListWorkforcePoolProviderKeys.", + "id": "ListWorkforcePoolProviderKeysResponse", + "properties": { + "nextPageToken": { + "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "workforcePoolProviderKeys": { + "description": "A list of WorkforcePoolProviderKeys.", + "items": { + "$ref": "WorkforcePoolProviderKey" + }, + "type": "array" + } + }, + "type": "object" + }, "ListWorkforcePoolProvidersResponse": { "description": "Response message for ListWorkforcePoolProviders.", "id": "ListWorkforcePoolProvidersResponse", @@ -2929,6 +3519,24 @@ }, "type": "object" }, + "ListWorkloadIdentityPoolProviderKeysResponse": { + "description": "Response message for ListWorkloadIdentityPoolProviderKeys.", + "id": "ListWorkloadIdentityPoolProviderKeysResponse", + "properties": { + "nextPageToken": { + "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "workloadIdentityPoolProviderKeys": { + "description": "A list of WorkloadIdentityPoolProviderKey", + "items": { + "$ref": "WorkloadIdentityPoolProviderKey" + }, + "type": "array" + } + }, + "type": "object" + }, "ListWorkloadIdentityPoolProvidersResponse": { "description": "Response message for ListWorkloadIdentityPoolProviders.", "id": "ListWorkloadIdentityPoolProvidersResponse", @@ -2979,6 +3587,10 @@ "issuerUri": { "description": "Required. The OIDC issuer URL. Must be an HTTPS endpoint.", "type": "string" + }, + "jwksJson": { + "description": "Optional. OIDC JWKs in JSON String format. For details on the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If not set, the `jwks_uri` from the discovery document(fetched from the .well-known path of the `issuer_uri`) will be used. Currently, RSA and EC asymmetric keys are supported. The JWK must use following format and include only the following fields: { \"keys\": [ { \"kty\": \"RSA/EC\", \"alg\": \"\", \"use\": \"sig\", \"kid\": \"\", \"n\": \"\", \"e\": \"\", \"x\": \"\", \"y\": \"\", \"crv\": \"\" } ] }", + "type": "string" } }, "type": "object" @@ -3295,7 +3907,7 @@ "type": "array" }, "name": { - "description": "The name of the role. When Role is used in CreateRole, the role name must not be set. When Role is used in output and other input such as UpdateRole, the role name is the complete path, e.g., roles/logging.viewer for predefined roles and organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.", + "description": "The name of the role. When `Role` is used in `CreateRole`, the role name must not be set. When `Role` is used in output and other input such as `UpdateRole`, the role name is the complete path. For example, `roles/logging.viewer` for predefined roles, `organizations/{ORGANIZATION_ID}/roles/my-role` for organization-level custom roles, and `projects/{PROJECT_ID}/roles/my-role` for project-level custom roles.", "type": "string" }, "stage": { @@ -3629,6 +4241,12 @@ }, "type": "object" }, + "UndeleteWorkforcePoolProviderKeyRequest": { + "description": "Request message for UndeleteWorkforcePoolProviderKey.", + "id": "UndeleteWorkforcePoolProviderKeyRequest", + "properties": {}, + "type": "object" + }, "UndeleteWorkforcePoolProviderRequest": { "description": "Request message for UndeleteWorkforcePoolProvider.", "id": "UndeleteWorkforcePoolProviderRequest", @@ -3647,6 +4265,12 @@ "properties": {}, "type": "object" }, + "UndeleteWorkloadIdentityPoolProviderKeyRequest": { + "description": "Request message for UndeleteWorkloadIdentityPoolProviderKey.", + "id": "UndeleteWorkloadIdentityPoolProviderKeyRequest", + "properties": {}, + "type": "object" + }, "UndeleteWorkloadIdentityPoolProviderRequest": { "description": "Request message for UndeleteWorkloadIdentityPoolProvider.", "id": "UndeleteWorkloadIdentityPoolProviderRequest", @@ -3680,7 +4304,7 @@ "type": "string" }, "disabled": { - "description": "Whether the pool is disabled. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again.", + "description": "Disables the workforce pool. You cannot use a disabled pool to exchange tokens, or use existing tokens to access resources. If the pool is re-enabled, existing tokens grant access again.", "type": "boolean" }, "displayName": { @@ -3739,7 +4363,7 @@ "type": "string" }, "disabled": { - "description": "Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access.", + "description": "Disables the workforce pool provider. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access.", "type": "boolean" }, "displayName": { @@ -3777,6 +4401,55 @@ }, "type": "object" }, + "WorkforcePoolProviderKey": { + "description": "Represents a public key configuration for a Workforce Pool Provider. The key can be configured in your identity provider to encrypt SAML assertions. Google holds the corresponding private key, which it uses to decrypt encrypted tokens.", + "id": "WorkforcePoolProviderKey", + "properties": { + "expireTime": { + "description": "Output only. The time after which the key will be permanently deleted and cannot be recovered. Note that the key may get purged before this time if the total limit of keys per provider is exceeded.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "keyData": { + "$ref": "KeyData", + "description": "Immutable. Public half of the asymmetric key." + }, + "name": { + "description": "Output only. The resource name of the key.", + "readOnly": true, + "type": "string" + }, + "state": { + "description": "Output only. The state of the key.", + "enum": [ + "STATE_UNSPECIFIED", + "ACTIVE", + "DELETED" + ], + "enumDescriptions": [ + "State unspecified.", + "The key is active.", + "The key is soft-deleted. Soft-deleted keys are permanently deleted after approximately 30 days. You can restore a soft-deleted key using UndeleteWorkforcePoolProviderKey." + ], + "readOnly": true, + "type": "string" + }, + "use": { + "description": "Required. The purpose of the key.", + "enum": [ + "KEY_USE_UNSPECIFIED", + "ENCRYPTION" + ], + "enumDescriptions": [ + "KeyUse unspecified.", + "The key is used for encryption." + ], + "type": "string" + } + }, + "type": "object" + }, "WorkloadIdentityPool": { "description": "Represents a collection of external workload identities. You can define IAM policies to grant these identities access to Google Cloud resources.", "id": "WorkloadIdentityPool", @@ -3793,6 +4466,20 @@ "description": "A display name for the pool. Cannot exceed 32 characters.", "type": "string" }, + "identityMode": { + "description": "Immutable. The identity mode of the pool.", + "enum": [ + "IDENTITY_MODE_UNSPECIFIED", + "FEDERATION_ONLY", + "TRUST_DOMAIN" + ], + "enumDescriptions": [ + "Existing pools will be in this mode. For existing worklod identity pools created through the public API, they will act as if they are set to FEDERATION_ONLY.", + "With FEDERATION_ONLY mode, providers can be created at the root level within the pool. Attribute mappings must specify a \"google.subject\" claim that specifies the identity of the federation workload. Namespace or any sub-namespace resources is not allowed with this mode.", + "With TRUST_DOMAIN mode, providers can be created at the root level within the pool. Attribute mappings must specify the \"google.namespace\" and \"google.workload_identifier\" claims that, respectively, specify the namespace and individual sub-namespace identifier for the workload. Namespaces and sub-Namespace resources are allowed." + ], + "type": "string" + }, "name": { "description": "Output only. The resource name of the pool.", "readOnly": true, @@ -3883,6 +4570,55 @@ } }, "type": "object" + }, + "WorkloadIdentityPoolProviderKey": { + "description": "Represents a public key configuration for your workload identity pool provider. The key can be configured in your identity provider to encrypt the SAML assertions. Google holds the corresponding private key which it uses to decrypt encrypted tokens.", + "id": "WorkloadIdentityPoolProviderKey", + "properties": { + "expireTime": { + "description": "Output only. Time after which the key will be permanently purged and cannot be recovered. Note that the key may get purged before this timestamp if the total limit of keys per provider is crossed.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "keyData": { + "$ref": "KeyData", + "description": "Immutable. Public half of the asymmetric key." + }, + "name": { + "description": "Output only. The resource name of the key.", + "readOnly": true, + "type": "string" + }, + "state": { + "description": "Output only. The state of the key.", + "enum": [ + "STATE_UNSPECIFIED", + "ACTIVE", + "DELETED" + ], + "enumDescriptions": [ + "State unspecified.", + "The key is active.", + "The key is soft-deleted. Soft-deleted keys are permanently deleted after approximately 30 days. You can restore a soft-deleted key using UndeleteWorkloadIdentityPoolProviderKey. While a key is deleted, you cannot use it during the federation." + ], + "readOnly": true, + "type": "string" + }, + "use": { + "description": "Required. The purpose of the key.", + "enum": [ + "KEY_USE_UNSPECIFIED", + "ENCRYPTION" + ], + "enumDescriptions": [ + "The key use is not known.", + "The public key is used for encryption purposes." + ], + "type": "string" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-gen.go b/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-gen.go index d67b7c8d13..f1ea03e96d 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "iam:v1" const apiName = "iam" @@ -328,6 +329,7 @@ type ProjectsLocationsService struct { func NewProjectsLocationsWorkloadIdentityPoolsService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsService { rs := &ProjectsLocationsWorkloadIdentityPoolsService{s: s} + rs.Namespaces = NewProjectsLocationsWorkloadIdentityPoolsNamespacesService(s) rs.Operations = NewProjectsLocationsWorkloadIdentityPoolsOperationsService(s) rs.Providers = NewProjectsLocationsWorkloadIdentityPoolsProvidersService(s) return rs @@ -336,11 +338,106 @@ func NewProjectsLocationsWorkloadIdentityPoolsService(s *Service) *ProjectsLocat type ProjectsLocationsWorkloadIdentityPoolsService struct { s *Service + Namespaces *ProjectsLocationsWorkloadIdentityPoolsNamespacesService + Operations *ProjectsLocationsWorkloadIdentityPoolsOperationsService Providers *ProjectsLocationsWorkloadIdentityPoolsProvidersService } +func NewProjectsLocationsWorkloadIdentityPoolsNamespacesService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsNamespacesService { + rs := &ProjectsLocationsWorkloadIdentityPoolsNamespacesService{s: s} + rs.ManagedIdentities = NewProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesService(s) + rs.Operations = NewProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsService(s) + rs.WorkloadSources = NewProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesService(s) + return rs +} + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesService struct { + s *Service + + ManagedIdentities *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesService + + Operations *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsService + + WorkloadSources *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesService +} + +func NewProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesService { + rs := &ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesService{s: s} + rs.Operations = NewProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsService(s) + rs.WorkloadSources = NewProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesService(s) + return rs +} + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesService struct { + s *Service + + Operations *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsService + + WorkloadSources *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesService +} + +func NewProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsService { + rs := &ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsService{s: s} + return rs +} + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsService struct { + s *Service +} + +func NewProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesService { + rs := &ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesService{s: s} + rs.Operations = NewProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsService(s) + return rs +} + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesService struct { + s *Service + + Operations *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsService +} + +func NewProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsService { + rs := &ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsService{s: s} + return rs +} + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsService struct { + s *Service +} + +func NewProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsService { + rs := &ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsService{s: s} + return rs +} + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsService struct { + s *Service +} + +func NewProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesService { + rs := &ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesService{s: s} + rs.Operations = NewProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsService(s) + return rs +} + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesService struct { + s *Service + + Operations *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsService +} + +func NewProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsService { + rs := &ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsService{s: s} + return rs +} + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsService struct { + s *Service +} + func NewProjectsLocationsWorkloadIdentityPoolsOperationsService(s *Service) *ProjectsLocationsWorkloadIdentityPoolsOperationsService { rs := &ProjectsLocationsWorkloadIdentityPoolsOperationsService{s: s} return rs @@ -1060,10 +1157,19 @@ type GoogleIamAdminV1WorkforcePoolProviderOidc struct { // the JWT issued by the identity provider. ClientId string `json:"clientId,omitempty"` + // ClientSecret: The optional client secret. Required to enable + // Authorization Code flow for web sign-in. + ClientSecret *GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret `json:"clientSecret,omitempty"` + // IssuerUri: Required. The OIDC issuer URI. Must be a valid URI using // the 'https' scheme. IssuerUri string `json:"issuerUri,omitempty"` + // WebSsoConfig: Required. Configuration for web single sign-on for the + // OIDC provider. Here, web sign-in refers to console sign-in and gcloud + // sign-in through the browser. + WebSsoConfig *GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig `json:"webSsoConfig,omitempty"` + // ForceSendFields is a list of field names (e.g. "ClientId") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -1087,6 +1193,131 @@ func (s *GoogleIamAdminV1WorkforcePoolProviderOidc) MarshalJSON() ([]byte, error return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret: Representation +// of a client secret configured for the OIDC provider. +type GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret struct { + // Value: The value of the client secret. + Value *GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Value") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Value") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamAdminV1WorkforcePoolProviderOidcClientSecret + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue: +// Representation of the value of the client secret. +type GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue struct { + // PlainText: Input only. The plain text of the client secret value. For + // security reasons, this field is only used for input and will never be + // populated in any response. + PlainText string `json:"plainText,omitempty"` + + // Thumbprint: Output only. A thumbprint to represent the current client + // secret value. + Thumbprint string `json:"thumbprint,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PlainText") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PlainText") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamAdminV1WorkforcePoolProviderOidcClientSecretValue + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig: Configuration +// for web single sign-on for the OIDC provider. +type GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig struct { + // AdditionalScopes: Additional scopes to request for in the OIDC + // authentication request on top of scopes requested by default. By + // default, the `openid`, `profile` and `email` scopes that are + // supported by the identity provider are requested. Each additional + // scope may be at most 256 characters. A maximum of 10 additional + // scopes may be configured. + AdditionalScopes []string `json:"additionalScopes,omitempty"` + + // AssertionClaimsBehavior: Required. The behavior for how OIDC Claims + // are included in the `assertion` object used for attribute mapping and + // attribute condition. + // + // Possible values: + // "ASSERTION_CLAIMS_BEHAVIOR_UNSPECIFIED" - No assertion claims + // behavior specified. + // "MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS" - Merge the UserInfo + // Endpoint Claims with ID Token Claims, preferring UserInfo Claim + // Values for the same Claim Name. Only possible for flows granting an + // Access Token, which comprise only the Authorization Code Flow at the + // moment. + // "ONLY_ID_TOKEN_CLAIMS" - Only include ID Token Claims. + AssertionClaimsBehavior string `json:"assertionClaimsBehavior,omitempty"` + + // ResponseType: Required. The Response Type to request for in the OIDC + // Authorization Request for web sign-in. + // + // Possible values: + // "RESPONSE_TYPE_UNSPECIFIED" - No Response Type specified. + // "CODE" - The `response_type=code` selection uses the Authorization + // Code Flow for web sign-in. Requires a configured client secret. + // "ID_TOKEN" - The `response_type=id_token` selection uses the + // Implicit Flow for web sign-in. + ResponseType string `json:"responseType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdditionalScopes") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdditionalScopes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig) MarshalJSON() ([]byte, error) { + type NoMethod GoogleIamAdminV1WorkforcePoolProviderOidcWebSsoConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleIamAdminV1WorkforcePoolProviderSaml: Represents a SAML identity // provider. type GoogleIamAdminV1WorkforcePoolProviderSaml struct { @@ -1131,6 +1362,65 @@ func (s *GoogleIamAdminV1WorkforcePoolProviderSaml) MarshalJSON() ([]byte, error return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// KeyData: Represents a public key data along with its format. +type KeyData struct { + // Format: Output only. The format of the key. + // + // Possible values: + // "KEY_FORMAT_UNSPECIFIED" - No format has been specified. This is an + // invalid format and must not be used. + // "RSA_X509_PEM" - A RSA public key wrapped in an X.509v3 certificate + // ([RFC5280] ( https://www.ietf.org/rfc/rfc5280.txt)), encoded in + // base64, and wrapped in [public certificate + // label](https://datatracker.ietf.org/doc/html/rfc7468#section-5.1). + Format string `json:"format,omitempty"` + + // Key: Output only. The key data. The format of the key is represented + // by the format field. + Key string `json:"key,omitempty"` + + // KeySpec: Required. The specifications for the key. + // + // Possible values: + // "KEY_SPEC_UNSPECIFIED" - No key specification specified. + // "RSA_2048" - A 2048 bit RSA key. + // "RSA_3072" - A 3072 bit RSA key. + // "RSA_4096" - A 4096 bit RSA key. + KeySpec string `json:"keySpec,omitempty"` + + // NotAfterTime: Output only. Latest timestamp when this key is valid. + // Attempts to use this key after this time will fail. Only present if + // the key data represents a X.509 certificate. + NotAfterTime string `json:"notAfterTime,omitempty"` + + // NotBeforeTime: Output only. Earliest timestamp when this key is + // valid. Attempts to use this key before this time will fail. Only + // present if the key data represents a X.509 certificate. + NotBeforeTime string `json:"notBeforeTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Format") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Format") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *KeyData) MarshalJSON() ([]byte, error) { + type NoMethod KeyData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LintPolicyRequest: The request to lint a Cloud IAM policy object. type LintPolicyRequest struct { // Condition: google.iam.v1.Binding.condition object to be linted. @@ -1388,6 +1678,44 @@ func (s *ListServiceAccountsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ListWorkforcePoolProviderKeysResponse: Response message for +// ListWorkforcePoolProviderKeys. +type ListWorkforcePoolProviderKeysResponse struct { + // NextPageToken: A token, which can be sent as `page_token` to retrieve + // the next page. If this field is omitted, there are no subsequent + // pages. + NextPageToken string `json:"nextPageToken,omitempty"` + + // WorkforcePoolProviderKeys: A list of WorkforcePoolProviderKeys. + WorkforcePoolProviderKeys []*WorkforcePoolProviderKey `json:"workforcePoolProviderKeys,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListWorkforcePoolProviderKeysResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListWorkforcePoolProviderKeysResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListWorkforcePoolProvidersResponse: Response message for // ListWorkforcePoolProviders. type ListWorkforcePoolProvidersResponse struct { @@ -1463,6 +1791,45 @@ func (s *ListWorkforcePoolsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ListWorkloadIdentityPoolProviderKeysResponse: Response message for +// ListWorkloadIdentityPoolProviderKeys. +type ListWorkloadIdentityPoolProviderKeysResponse struct { + // NextPageToken: A token, which can be sent as `page_token` to retrieve + // the next page. If this field is omitted, there are no subsequent + // pages. + NextPageToken string `json:"nextPageToken,omitempty"` + + // WorkloadIdentityPoolProviderKeys: A list of + // WorkloadIdentityPoolProviderKey + WorkloadIdentityPoolProviderKeys []*WorkloadIdentityPoolProviderKey `json:"workloadIdentityPoolProviderKeys,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListWorkloadIdentityPoolProviderKeysResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListWorkloadIdentityPoolProviderKeysResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListWorkloadIdentityPoolProvidersResponse: Response message for // ListWorkloadIdentityPoolProviders. type ListWorkloadIdentityPoolProvidersResponse struct { @@ -1558,6 +1925,16 @@ type Oidc struct { // IssuerUri: Required. The OIDC issuer URL. Must be an HTTPS endpoint. IssuerUri string `json:"issuerUri,omitempty"` + // JwksJson: Optional. OIDC JWKs in JSON String format. For details on + // the definition of a JWK, see https://tools.ietf.org/html/rfc7517. If + // not set, the `jwks_uri` from the discovery document(fetched from the + // .well-known path of the `issuer_uri`) will be used. Currently, RSA + // and EC asymmetric keys are supported. The JWK must use following + // format and include only the following fields: { "keys": [ { "kty": + // "RSA/EC", "alg": "", "use": "sig", "kid": "", "n": "", "e": "", "x": + // "", "y": "", "crv": "" } ] } + JwksJson string `json:"jwksJson,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedAudiences") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -2160,12 +2537,13 @@ type Role struct { // when bound in an IAM policy. IncludedPermissions []string `json:"includedPermissions,omitempty"` - // Name: The name of the role. When Role is used in CreateRole, the role - // name must not be set. When Role is used in output and other input - // such as UpdateRole, the role name is the complete path, e.g., - // roles/logging.viewer for predefined roles and - // organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom - // roles. + // Name: The name of the role. When `Role` is used in `CreateRole`, the + // role name must not be set. When `Role` is used in output and other + // input such as `UpdateRole`, the role name is the complete path. For + // example, `roles/logging.viewer` for predefined roles, + // `organizations/{ORGANIZATION_ID}/roles/my-role` for + // organization-level custom roles, and + // `projects/{PROJECT_ID}/roles/my-role` for project-level custom roles. Name string `json:"name,omitempty"` // Stage: The current launch stage of the role. If the `ALPHA` launch @@ -2825,6 +3203,11 @@ func (s *UndeleteServiceAccountResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UndeleteWorkforcePoolProviderKeyRequest: Request message for +// UndeleteWorkforcePoolProviderKey. +type UndeleteWorkforcePoolProviderKeyRequest struct { +} + // UndeleteWorkforcePoolProviderRequest: Request message for // UndeleteWorkforcePoolProvider. type UndeleteWorkforcePoolProviderRequest struct { @@ -2840,6 +3223,11 @@ type UndeleteWorkforcePoolRequest struct { type UndeleteWorkforcePoolSubjectRequest struct { } +// UndeleteWorkloadIdentityPoolProviderKeyRequest: Request message for +// UndeleteWorkloadIdentityPoolProviderKey. +type UndeleteWorkloadIdentityPoolProviderKeyRequest struct { +} + // UndeleteWorkloadIdentityPoolProviderRequest: Request message for // UndeleteWorkloadIdentityPoolProvider. type UndeleteWorkloadIdentityPoolProviderRequest struct { @@ -2890,9 +3278,9 @@ type WorkforcePool struct { // 256 characters. Description string `json:"description,omitempty"` - // Disabled: Whether the pool is disabled. You cannot use a disabled - // pool to exchange tokens, or use existing tokens to access resources. - // If the pool is re-enabled, existing tokens grant access again. + // Disabled: Disables the workforce pool. You cannot use a disabled pool + // to exchange tokens, or use existing tokens to access resources. If + // the pool is re-enabled, existing tokens grant access again. Disabled bool `json:"disabled,omitempty"` // DisplayName: A user-specified display name of the pool in Google @@ -3031,9 +3419,9 @@ type WorkforcePoolProvider struct { // exceed 256 characters. Description string `json:"description,omitempty"` - // Disabled: Whether the provider is disabled. You cannot use a disabled - // provider to exchange tokens. However, existing tokens still grant - // access. + // Disabled: Disables the workforce pool provider. You cannot use a + // disabled provider to exchange tokens. However, existing tokens still + // grant access. Disabled bool `json:"disabled,omitempty"` // DisplayName: A user-specified display name for the provider. Cannot @@ -3090,45 +3478,45 @@ func (s *WorkforcePoolProvider) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// WorkloadIdentityPool: Represents a collection of external workload -// identities. You can define IAM policies to grant these identities -// access to Google Cloud resources. -type WorkloadIdentityPool struct { - // Description: A description of the pool. Cannot exceed 256 characters. - Description string `json:"description,omitempty"` - - // Disabled: Whether the pool is disabled. You cannot use a disabled - // pool to exchange tokens, or use existing tokens to access resources. - // If the pool is re-enabled, existing tokens grant access again. - Disabled bool `json:"disabled,omitempty"` +// WorkforcePoolProviderKey: Represents a public key configuration for a +// Workforce Pool Provider. The key can be configured in your identity +// provider to encrypt SAML assertions. Google holds the corresponding +// private key, which it uses to decrypt encrypted tokens. +type WorkforcePoolProviderKey struct { + // ExpireTime: Output only. The time after which the key will be + // permanently deleted and cannot be recovered. Note that the key may + // get purged before this time if the total limit of keys per provider + // is exceeded. + ExpireTime string `json:"expireTime,omitempty"` - // DisplayName: A display name for the pool. Cannot exceed 32 - // characters. - DisplayName string `json:"displayName,omitempty"` + // KeyData: Immutable. Public half of the asymmetric key. + KeyData *KeyData `json:"keyData,omitempty"` - // Name: Output only. The resource name of the pool. + // Name: Output only. The resource name of the key. Name string `json:"name,omitempty"` - // State: Output only. The state of the pool. + // State: Output only. The state of the key. // // Possible values: // "STATE_UNSPECIFIED" - State unspecified. - // "ACTIVE" - The pool is active, and may be used in Google Cloud - // policies. - // "DELETED" - The pool is soft-deleted. Soft-deleted pools are + // "ACTIVE" - The key is active. + // "DELETED" - The key is soft-deleted. Soft-deleted keys are // permanently deleted after approximately 30 days. You can restore a - // soft-deleted pool using UndeleteWorkloadIdentityPool. You cannot - // reuse the ID of a soft-deleted pool until it is permanently deleted. - // While a pool is deleted, you cannot use it to exchange tokens, or use - // existing tokens to access resources. If the pool is undeleted, - // existing tokens grant access again. + // soft-deleted key using UndeleteWorkforcePoolProviderKey. State string `json:"state,omitempty"` + // Use: Required. The purpose of the key. + // + // Possible values: + // "KEY_USE_UNSPECIFIED" - KeyUse unspecified. + // "ENCRYPTION" - The key is used for encryption. + Use string `json:"use,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "ExpireTime") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -3136,17 +3524,97 @@ type WorkloadIdentityPool struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ExpireTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *WorkloadIdentityPool) MarshalJSON() ([]byte, error) { - type NoMethod WorkloadIdentityPool +func (s *WorkforcePoolProviderKey) MarshalJSON() ([]byte, error) { + type NoMethod WorkforcePoolProviderKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// WorkloadIdentityPool: Represents a collection of external workload +// identities. You can define IAM policies to grant these identities +// access to Google Cloud resources. +type WorkloadIdentityPool struct { + // Description: A description of the pool. Cannot exceed 256 characters. + Description string `json:"description,omitempty"` + + // Disabled: Whether the pool is disabled. You cannot use a disabled + // pool to exchange tokens, or use existing tokens to access resources. + // If the pool is re-enabled, existing tokens grant access again. + Disabled bool `json:"disabled,omitempty"` + + // DisplayName: A display name for the pool. Cannot exceed 32 + // characters. + DisplayName string `json:"displayName,omitempty"` + + // IdentityMode: Immutable. The identity mode of the pool. + // + // Possible values: + // "IDENTITY_MODE_UNSPECIFIED" - Existing pools will be in this mode. + // For existing worklod identity pools created through the public API, + // they will act as if they are set to FEDERATION_ONLY. + // "FEDERATION_ONLY" - With FEDERATION_ONLY mode, providers can be + // created at the root level within the pool. Attribute mappings must + // specify a "google.subject" claim that specifies the identity of the + // federation workload. Namespace or any sub-namespace resources is not + // allowed with this mode. + // "TRUST_DOMAIN" - With TRUST_DOMAIN mode, providers can be created + // at the root level within the pool. Attribute mappings must specify + // the "google.namespace" and "google.workload_identifier" claims that, + // respectively, specify the namespace and individual sub-namespace + // identifier for the workload. Namespaces and sub-Namespace resources + // are allowed. + IdentityMode string `json:"identityMode,omitempty"` + + // Name: Output only. The resource name of the pool. + Name string `json:"name,omitempty"` + + // State: Output only. The state of the pool. + // + // Possible values: + // "STATE_UNSPECIFIED" - State unspecified. + // "ACTIVE" - The pool is active, and may be used in Google Cloud + // policies. + // "DELETED" - The pool is soft-deleted. Soft-deleted pools are + // permanently deleted after approximately 30 days. You can restore a + // soft-deleted pool using UndeleteWorkloadIdentityPool. You cannot + // reuse the ID of a soft-deleted pool until it is permanently deleted. + // While a pool is deleted, you cannot use it to exchange tokens, or use + // existing tokens to access resources. If the pool is undeleted, + // existing tokens grant access again. + State string `json:"state,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WorkloadIdentityPool) MarshalJSON() ([]byte, error) { + type NoMethod WorkloadIdentityPool raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -3288,6 +3756,69 @@ func (s *WorkloadIdentityPoolProvider) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// WorkloadIdentityPoolProviderKey: Represents a public key +// configuration for your workload identity pool provider. The key can +// be configured in your identity provider to encrypt the SAML +// assertions. Google holds the corresponding private key which it uses +// to decrypt encrypted tokens. +type WorkloadIdentityPoolProviderKey struct { + // ExpireTime: Output only. Time after which the key will be permanently + // purged and cannot be recovered. Note that the key may get purged + // before this timestamp if the total limit of keys per provider is + // crossed. + ExpireTime string `json:"expireTime,omitempty"` + + // KeyData: Immutable. Public half of the asymmetric key. + KeyData *KeyData `json:"keyData,omitempty"` + + // Name: Output only. The resource name of the key. + Name string `json:"name,omitempty"` + + // State: Output only. The state of the key. + // + // Possible values: + // "STATE_UNSPECIFIED" - State unspecified. + // "ACTIVE" - The key is active. + // "DELETED" - The key is soft-deleted. Soft-deleted keys are + // permanently deleted after approximately 30 days. You can restore a + // soft-deleted key using UndeleteWorkloadIdentityPoolProviderKey. While + // a key is deleted, you cannot use it during the federation. + State string `json:"state,omitempty"` + + // Use: Required. The purpose of the key. + // + // Possible values: + // "KEY_USE_UNSPECIFIED" - The key use is not known. + // "ENCRYPTION" - The public key is used for encryption purposes. + Use string `json:"use,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ExpireTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExpireTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *WorkloadIdentityPoolProviderKey) MarshalJSON() ([]byte, error) { + type NoMethod WorkloadIdentityPoolProviderKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // method id "iam.iamPolicies.lintPolicy": type IamPoliciesLintPolicyCall struct { @@ -6057,97 +6588,98 @@ func (c *LocationsWorkforcePoolsProvidersUndeleteCall) Do(opts ...googleapi.Call } -// method id "iam.locations.workforcePools.providers.keys.operations.get": +// method id "iam.locations.workforcePools.providers.keys.create": -type LocationsWorkforcePoolsProvidersKeysOperationsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LocationsWorkforcePoolsProvidersKeysCreateCall struct { + s *Service + parent string + workforcepoolproviderkey *WorkforcePoolProviderKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this method to poll the operation result at intervals as -// recommended by the API service. +// Create: Creates a new WorkforcePoolProviderKey in a +// WorkforcePoolProvider. // -// - name: The name of the operation resource. -func (r *LocationsWorkforcePoolsProvidersKeysOperationsService) Get(name string) *LocationsWorkforcePoolsProvidersKeysOperationsGetCall { - c := &LocationsWorkforcePoolsProvidersKeysOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// - parent: The provider to create this key in. +func (r *LocationsWorkforcePoolsProvidersKeysService) Create(parent string, workforcepoolproviderkey *WorkforcePoolProviderKey) *LocationsWorkforcePoolsProvidersKeysCreateCall { + c := &LocationsWorkforcePoolsProvidersKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.workforcepoolproviderkey = workforcepoolproviderkey + return c +} + +// WorkforcePoolProviderKeyId sets the optional parameter +// "workforcePoolProviderKeyId": Required. The ID to use for the key, +// which becomes the final component of the resource name. This value +// must be 4-32 characters, and may contain the characters [a-z0-9-]. +func (c *LocationsWorkforcePoolsProvidersKeysCreateCall) WorkforcePoolProviderKeyId(workforcePoolProviderKeyId string) *LocationsWorkforcePoolsProvidersKeysCreateCall { + c.urlParams_.Set("workforcePoolProviderKeyId", workforcePoolProviderKeyId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersKeysOperationsGetCall { +func (c *LocationsWorkforcePoolsProvidersKeysCreateCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersKeysCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) IfNoneMatch(entityTag string) *LocationsWorkforcePoolsProvidersKeysOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersKeysOperationsGetCall { +func (c *LocationsWorkforcePoolsProvidersKeysCreateCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersKeysCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Header() http.Header { +func (c *LocationsWorkforcePoolsProvidersKeysCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsProvidersKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.workforcepoolproviderkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/keys") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.locations.workforcePools.providers.keys.operations.get" call. +// Do executes the "iam.locations.workforcePools.providers.keys.create" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LocationsWorkforcePoolsProvidersKeysCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6178,23 +6710,31 @@ func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Do(opts ...googl } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys/{keysId}/operations/{operationsId}", - // "httpMethod": "GET", - // "id": "iam.locations.workforcePools.providers.keys.operations.get", + // "description": "Creates a new WorkforcePoolProviderKey in a WorkforcePoolProvider.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys", + // "httpMethod": "POST", + // "id": "iam.locations.workforcePools.providers.keys.create", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "The name of the operation resource.", + // "parent": { + // "description": "Required. The provider to create this key in.", // "location": "path", - // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/keys/[^/]+/operations/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+$", // "required": true, // "type": "string" + // }, + // "workforcePoolProviderKeyId": { + // "description": "Required. The ID to use for the key, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-].", + // "location": "query", + // "type": "string" // } // }, - // "path": "v1/{+name}", + // "path": "v1/{+parent}/keys", + // "request": { + // "$ref": "WorkforcePoolProviderKey" + // }, // "response": { // "$ref": "Operation" // }, @@ -6205,24 +6745,22 @@ func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Do(opts ...googl } -// method id "iam.locations.workforcePools.providers.operations.get": +// method id "iam.locations.workforcePools.providers.keys.delete": -type LocationsWorkforcePoolsProvidersOperationsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LocationsWorkforcePoolsProvidersKeysDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this method to poll the operation result at intervals as -// recommended by the API service. +// Delete: Deletes a WorkforcePoolProviderKey. You can undelete a key +// for 30 days. After 30 days, deletion is permanent. // -// - name: The name of the operation resource. -func (r *LocationsWorkforcePoolsProvidersOperationsService) Get(name string) *LocationsWorkforcePoolsProvidersOperationsGetCall { - c := &LocationsWorkforcePoolsProvidersOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the key to delete. +func (r *LocationsWorkforcePoolsProvidersKeysService) Delete(name string) *LocationsWorkforcePoolsProvidersKeysDeleteCall { + c := &LocationsWorkforcePoolsProvidersKeysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -6230,54 +6768,41 @@ func (r *LocationsWorkforcePoolsProvidersOperationsService) Get(name string) *Lo // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersOperationsGetCall { +func (c *LocationsWorkforcePoolsProvidersKeysDeleteCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersKeysDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) IfNoneMatch(entityTag string) *LocationsWorkforcePoolsProvidersOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersOperationsGetCall { +func (c *LocationsWorkforcePoolsProvidersKeysDeleteCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersKeysDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Header() http.Header { +func (c *LocationsWorkforcePoolsProvidersKeysDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsProvidersKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -6288,14 +6813,14 @@ func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) doRequest(alt string return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.locations.workforcePools.providers.operations.get" call. +// Do executes the "iam.locations.workforcePools.providers.keys.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LocationsWorkforcePoolsProvidersKeysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6326,18 +6851,18 @@ func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/operations/{operationsId}", - // "httpMethod": "GET", - // "id": "iam.locations.workforcePools.providers.operations.get", + // "description": "Deletes a WorkforcePoolProviderKey. You can undelete a key for 30 days. After 30 days, deletion is permanent.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys/{keysId}", + // "httpMethod": "DELETE", + // "id": "iam.locations.workforcePools.providers.keys.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "The name of the operation resource.", + // "description": "Required. The name of the key to delete.", // "location": "path", - // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/operations/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // } @@ -6353,35 +6878,22 @@ func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Do(opts ...googleapi } -// method id "iam.locations.workforcePools.subjects.delete": +// method id "iam.locations.workforcePools.providers.keys.get": -type LocationsWorkforcePoolsSubjectsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LocationsWorkforcePoolsProvidersKeysGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a WorkforcePoolSubject. Subject must not already be -// in a deleted state. A WorkforcePoolSubject is automatically created -// the first time an external credential is exchanged for a Google Cloud -// credential with a mapped `google.subject` attribute. There is no path -// to manually create WorkforcePoolSubjects. Once deleted, the -// WorkforcePoolSubject may not be used for 30 days. After 30 days, the -// WorkforcePoolSubject will be deleted forever and can be reused in -// token exchanges with Google Cloud STS. This will automatically create -// a new WorkforcePoolSubject that is independent of the previously -// deleted WorkforcePoolSubject with the same google.subject value. +// Get: Gets a WorkforcePoolProviderKey. // -// - name: The resource name of the WorkforcePoolSubject. Special -// characters, like '/' and ':', must be escaped, because all URLs -// need to conform to the "When to Escape and Unescape" section of -// RFC3986 (https://www.ietf.org/rfc/rfc2396.txt). Format: -// `locations/{location}/workforcePools/{workforce_pool_id}/subjects/{s -// ubject_id}`. -func (r *LocationsWorkforcePoolsSubjectsService) Delete(name string) *LocationsWorkforcePoolsSubjectsDeleteCall { - c := &LocationsWorkforcePoolsSubjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the key to retrieve. +func (r *LocationsWorkforcePoolsProvidersKeysService) Get(name string) *LocationsWorkforcePoolsProvidersKeysGetCall { + c := &LocationsWorkforcePoolsProvidersKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -6389,41 +6901,54 @@ func (r *LocationsWorkforcePoolsSubjectsService) Delete(name string) *LocationsW // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsSubjectsDeleteCall { +func (c *LocationsWorkforcePoolsProvidersKeysGetCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersKeysGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LocationsWorkforcePoolsProvidersKeysGetCall) IfNoneMatch(entityTag string) *LocationsWorkforcePoolsProvidersKeysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Context(ctx context.Context) *LocationsWorkforcePoolsSubjectsDeleteCall { +func (c *LocationsWorkforcePoolsProvidersKeysGetCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersKeysGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Header() http.Header { +func (c *LocationsWorkforcePoolsProvidersKeysGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LocationsWorkforcePoolsSubjectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsProvidersKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -6434,14 +6959,14 @@ func (c *LocationsWorkforcePoolsSubjectsDeleteCall) doRequest(alt string) (*http return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.locations.workforcePools.subjects.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "iam.locations.workforcePools.providers.keys.get" call. +// Exactly one of *WorkforcePoolProviderKey or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *WorkforcePoolProviderKey.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LocationsWorkforcePoolsProvidersKeysGetCall) Do(opts ...googleapi.CallOption) (*WorkforcePoolProviderKey, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6460,7 +6985,7 @@ func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &WorkforcePoolProviderKey{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6472,25 +6997,25 @@ func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Deletes a WorkforcePoolSubject. Subject must not already be in a deleted state. A WorkforcePoolSubject is automatically created the first time an external credential is exchanged for a Google Cloud credential with a mapped `google.subject` attribute. There is no path to manually create WorkforcePoolSubjects. Once deleted, the WorkforcePoolSubject may not be used for 30 days. After 30 days, the WorkforcePoolSubject will be deleted forever and can be reused in token exchanges with Google Cloud STS. This will automatically create a new WorkforcePoolSubject that is independent of the previously deleted WorkforcePoolSubject with the same google.subject value.", - // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/subjects/{subjectsId}", - // "httpMethod": "DELETE", - // "id": "iam.locations.workforcePools.subjects.delete", + // "description": "Gets a WorkforcePoolProviderKey.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys/{keysId}", + // "httpMethod": "GET", + // "id": "iam.locations.workforcePools.providers.keys.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the WorkforcePoolSubject. Special characters, like '/' and ':', must be escaped, because all URLs need to conform to the \"When to Escape and Unescape\" section of [RFC3986](https://www.ietf.org/rfc/rfc2396.txt). Format: `locations/{location}/workforcePools/{workforce_pool_id}/subjects/{subject_id}`", + // "description": "Required. The name of the key to retrieve.", // "location": "path", - // "pattern": "^locations/[^/]+/workforcePools/[^/]+/subjects/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}", // "response": { - // "$ref": "Operation" + // "$ref": "WorkforcePoolProviderKey" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -6499,37 +7024,243 @@ func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Do(opts ...googleapi.CallOpt } -// method id "iam.locations.workforcePools.subjects.undelete": +// method id "iam.locations.workforcePools.providers.keys.list": -type LocationsWorkforcePoolsSubjectsUndeleteCall struct { - s *Service - name string - undeleteworkforcepoolsubjectrequest *UndeleteWorkforcePoolSubjectRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LocationsWorkforcePoolsProvidersKeysListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Undelete: Undeletes a WorkforcePoolSubject, as long as it was deleted -// fewer than 30 days ago. +// List: Lists all non-deleted WorkforcePoolProviderKeys in a +// WorkforcePoolProvider. If `show_deleted` is set to `true`, then +// deleted keys are also listed. // -// - name: The resource name of the WorkforcePoolSubject. Special -// characters, like '/' and ':', must be escaped, because all URLs -// need to conform to the "When to Escape and Unescape" section of -// RFC3986 (https://www.ietf.org/rfc/rfc2396.txt). Format: -// `locations/{location}/workforcePools/{workforce_pool_id}/subjects/{s -// ubject_id}`. -func (r *LocationsWorkforcePoolsSubjectsService) Undelete(name string, undeleteworkforcepoolsubjectrequest *UndeleteWorkforcePoolSubjectRequest) *LocationsWorkforcePoolsSubjectsUndeleteCall { - c := &LocationsWorkforcePoolsSubjectsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - parent: The provider resource to list encryption keys for. Format: +// `locations/{location}/workforcePools/{workforce_pool_id}/providers/{ +// provider_id}`. +func (r *LocationsWorkforcePoolsProvidersKeysService) List(parent string) *LocationsWorkforcePoolsProvidersKeysListCall { + c := &LocationsWorkforcePoolsProvidersKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of keys to return. If unspecified, all keys are returned. The maximum +// value is 10; values above 10 are truncated to 10. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) PageSize(pageSize int64) *LocationsWorkforcePoolsProvidersKeysListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token, +// received from a previous `ListWorkforcePoolProviderKeys` call. +// Provide this to retrieve the subsequent page. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) PageToken(pageToken string) *LocationsWorkforcePoolsProvidersKeysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ShowDeleted sets the optional parameter "showDeleted": Whether to +// return soft-deleted keys. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) ShowDeleted(showDeleted bool) *LocationsWorkforcePoolsProvidersKeysListCall { + c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersKeysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) IfNoneMatch(entityTag string) *LocationsWorkforcePoolsProvidersKeysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersKeysListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LocationsWorkforcePoolsProvidersKeysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/keys") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.locations.workforcePools.providers.keys.list" call. +// Exactly one of *ListWorkforcePoolProviderKeysResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *ListWorkforcePoolProviderKeysResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) Do(opts ...googleapi.CallOption) (*ListWorkforcePoolProviderKeysResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListWorkforcePoolProviderKeysResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all non-deleted WorkforcePoolProviderKeys in a WorkforcePoolProvider. If `show_deleted` is set to `true`, then deleted keys are also listed.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys", + // "httpMethod": "GET", + // "id": "iam.locations.workforcePools.providers.keys.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "The maximum number of keys to return. If unspecified, all keys are returned. The maximum value is 10; values above 10 are truncated to 10.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A page token, received from a previous `ListWorkforcePoolProviderKeys` call. Provide this to retrieve the subsequent page.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The provider resource to list encryption keys for. Format: `locations/{location}/workforcePools/{workforce_pool_id}/providers/{provider_id}`", + // "location": "path", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "showDeleted": { + // "description": "Whether to return soft-deleted keys.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "v1/{+parent}/keys", + // "response": { + // "$ref": "ListWorkforcePoolProviderKeysResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *LocationsWorkforcePoolsProvidersKeysListCall) Pages(ctx context.Context, f func(*ListWorkforcePoolProviderKeysResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "iam.locations.workforcePools.providers.keys.undelete": + +type LocationsWorkforcePoolsProvidersKeysUndeleteCall struct { + s *Service + name string + undeleteworkforcepoolproviderkeyrequest *UndeleteWorkforcePoolProviderKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Undelete: Undeletes a WorkforcePoolProviderKey, as long as it was +// deleted fewer than 30 days ago. +// +// - name: The name of the key to undelete. +func (r *LocationsWorkforcePoolsProvidersKeysService) Undelete(name string, undeleteworkforcepoolproviderkeyrequest *UndeleteWorkforcePoolProviderKeyRequest) *LocationsWorkforcePoolsProvidersKeysUndeleteCall { + c := &LocationsWorkforcePoolsProvidersKeysUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.undeleteworkforcepoolsubjectrequest = undeleteworkforcepoolsubjectrequest + c.undeleteworkforcepoolproviderkeyrequest = undeleteworkforcepoolproviderkeyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsSubjectsUndeleteCall { +func (c *LocationsWorkforcePoolsProvidersKeysUndeleteCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersKeysUndeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6537,21 +7268,21 @@ func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Context(ctx context.Context) *LocationsWorkforcePoolsSubjectsUndeleteCall { +func (c *LocationsWorkforcePoolsProvidersKeysUndeleteCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersKeysUndeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Header() http.Header { +func (c *LocationsWorkforcePoolsProvidersKeysUndeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsProvidersKeysUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6559,7 +7290,7 @@ func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) doRequest(alt string) (*ht } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteworkforcepoolsubjectrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteworkforcepoolproviderkeyrequest) if err != nil { return nil, err } @@ -6579,14 +7310,14 @@ func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) doRequest(alt string) (*ht return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.locations.workforcePools.subjects.undelete" call. +// Do executes the "iam.locations.workforcePools.providers.keys.undelete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LocationsWorkforcePoolsProvidersKeysUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6617,25 +7348,25 @@ func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Undeletes a WorkforcePoolSubject, as long as it was deleted fewer than 30 days ago.", - // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/subjects/{subjectsId}:undelete", + // "description": "Undeletes a WorkforcePoolProviderKey, as long as it was deleted fewer than 30 days ago.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys/{keysId}:undelete", // "httpMethod": "POST", - // "id": "iam.locations.workforcePools.subjects.undelete", + // "id": "iam.locations.workforcePools.providers.keys.undelete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The resource name of the WorkforcePoolSubject. Special characters, like '/' and ':', must be escaped, because all URLs need to conform to the \"When to Escape and Unescape\" section of [RFC3986](https://www.ietf.org/rfc/rfc2396.txt). Format: `locations/{location}/workforcePools/{workforce_pool_id}/subjects/{subject_id}`", + // "description": "Required. The name of the key to undelete.", // "location": "path", - // "pattern": "^locations/[^/]+/workforcePools/[^/]+/subjects/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}:undelete", // "request": { - // "$ref": "UndeleteWorkforcePoolSubjectRequest" + // "$ref": "UndeleteWorkforcePoolProviderKeyRequest" // }, // "response": { // "$ref": "Operation" @@ -6647,9 +7378,9 @@ func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Do(opts ...googleapi.CallO } -// method id "iam.locations.workforcePools.subjects.operations.get": +// method id "iam.locations.workforcePools.providers.keys.operations.get": -type LocationsWorkforcePoolsSubjectsOperationsGetCall struct { +type LocationsWorkforcePoolsProvidersKeysOperationsGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -6663,8 +7394,8 @@ type LocationsWorkforcePoolsSubjectsOperationsGetCall struct { // recommended by the API service. // // - name: The name of the operation resource. -func (r *LocationsWorkforcePoolsSubjectsOperationsService) Get(name string) *LocationsWorkforcePoolsSubjectsOperationsGetCall { - c := &LocationsWorkforcePoolsSubjectsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *LocationsWorkforcePoolsProvidersKeysOperationsService) Get(name string) *LocationsWorkforcePoolsProvidersKeysOperationsGetCall { + c := &LocationsWorkforcePoolsProvidersKeysOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -6672,7 +7403,7 @@ func (r *LocationsWorkforcePoolsSubjectsOperationsService) Get(name string) *Loc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsSubjectsOperationsGetCall { +func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersKeysOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6682,7 +7413,7 @@ func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Fields(s ...googleapi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) IfNoneMatch(entityTag string) *LocationsWorkforcePoolsSubjectsOperationsGetCall { +func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) IfNoneMatch(entityTag string) *LocationsWorkforcePoolsProvidersKeysOperationsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -6690,21 +7421,21 @@ func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) IfNoneMatch(entityTag // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Context(ctx context.Context) *LocationsWorkforcePoolsSubjectsOperationsGetCall { +func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersKeysOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Header() http.Header { +func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6730,14 +7461,14 @@ func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) doRequest(alt string) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.locations.workforcePools.subjects.operations.get" call. +// Do executes the "iam.locations.workforcePools.providers.keys.operations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LocationsWorkforcePoolsProvidersKeysOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6769,9 +7500,9 @@ func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Do(opts ...googleapi. return ret, nil // { // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/subjects/{subjectsId}/operations/{operationsId}", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/keys/{keysId}/operations/{operationsId}", // "httpMethod": "GET", - // "id": "iam.locations.workforcePools.subjects.operations.get", + // "id": "iam.locations.workforcePools.providers.keys.operations.get", // "parameterOrder": [ // "name" // ], @@ -6779,7 +7510,7 @@ func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Do(opts ...googleapi. // "name": { // "description": "The name of the operation resource.", // "location": "path", - // "pattern": "^locations/[^/]+/workforcePools/[^/]+/subjects/[^/]+/operations/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/keys/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" // } @@ -6795,109 +7526,97 @@ func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Do(opts ...googleapi. } -// method id "iam.organizations.roles.create": +// method id "iam.locations.workforcePools.providers.operations.get": -type OrganizationsRolesCreateCall struct { - s *Service - parent string - createrolerequest *CreateRoleRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LocationsWorkforcePoolsProvidersOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Create: Creates a new custom Role. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. // -// - parent: The `parent` parameter's value depends on the target -// resource for the request, namely `projects` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles) or -// `organizations` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). -// Each resource type's `parent` value format is described below: * -// `projects.roles.create()` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/create): -// `projects/{PROJECT_ID}`. This method creates project-level custom -// roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles). -// Example request URL: -// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * -// `organizations.roles.create()` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/create): -// `organizations/{ORGANIZATION_ID}`. This method creates -// organization-level custom roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles). -// Example request URL: -// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles -// ` Note: Wildcard (*) values are invalid; you must specify a -// complete project ID or organization ID. -func (r *OrganizationsRolesService) Create(parent string, createrolerequest *CreateRoleRequest) *OrganizationsRolesCreateCall { - c := &OrganizationsRolesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.createrolerequest = createrolerequest +// - name: The name of the operation resource. +func (r *LocationsWorkforcePoolsProvidersOperationsService) Get(name string) *LocationsWorkforcePoolsProvidersOperationsGetCall { + c := &LocationsWorkforcePoolsProvidersOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsRolesCreateCall) Fields(s ...googleapi.Field) *OrganizationsRolesCreateCall { +func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsProvidersOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) IfNoneMatch(entityTag string) *LocationsWorkforcePoolsProvidersOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsRolesCreateCall) Context(ctx context.Context) *OrganizationsRolesCreateCall { +func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Context(ctx context.Context) *LocationsWorkforcePoolsProvidersOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsRolesCreateCall) Header() http.Header { +func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsRolesCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.createrolerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/roles") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.organizations.roles.create" call. -// Exactly one of *Role or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Role.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *OrganizationsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, error) { +// Do executes the "iam.locations.workforcePools.providers.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LocationsWorkforcePoolsProvidersOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6916,7 +7635,7 @@ func (c *OrganizationsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Role{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6928,28 +7647,25 @@ func (c *OrganizationsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, } return ret, nil // { - // "description": "Creates a new custom Role.", - // "flatPath": "v1/organizations/{organizationsId}/roles", - // "httpMethod": "POST", - // "id": "iam.organizations.roles.create", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/providers/{providersId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "iam.locations.workforcePools.providers.operations.get", // "parameterOrder": [ - // "parent" + // "name" // ], // "parameters": { - // "parent": { - // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles) or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`projects.roles.create()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/create): `projects/{PROJECT_ID}`. This method creates project-level [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.create()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/create): `organizations/{ORGANIZATION_ID}`. This method creates organization-level [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "name": { + // "description": "The name of the operation resource.", // "location": "path", - // "pattern": "^organizations/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/providers/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/{+parent}/roles", - // "request": { - // "$ref": "CreateRoleRequest" - // }, + // "path": "v1/{+name}", // "response": { - // "$ref": "Role" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -6958,9 +7674,9 @@ func (c *OrganizationsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, } -// method id "iam.organizations.roles.delete": +// method id "iam.locations.workforcePools.subjects.delete": -type OrganizationsRolesDeleteCall struct { +type LocationsWorkforcePoolsSubjectsDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -6968,55 +7684,33 @@ type OrganizationsRolesDeleteCall struct { header_ http.Header } -// Delete: Deletes a custom Role. When you delete a custom role, the -// following changes occur immediately: * You cannot bind a principal to -// the custom role in an IAM Policy. * Existing bindings to the custom -// role are not changed, but they have no effect. * By default, the -// response from ListRoles does not include the custom role. You have 7 -// days to undelete the custom role. After 7 days, the following changes -// occur: * The custom role is permanently deleted and cannot be -// recovered. * If an IAM policy contains a binding to the custom role, -// the binding is permanently removed. +// Delete: Deletes a WorkforcePoolSubject. Subject must not already be +// in a deleted state. A WorkforcePoolSubject is automatically created +// the first time an external credential is exchanged for a Google Cloud +// credential with a mapped `google.subject` attribute. There is no path +// to manually create WorkforcePoolSubjects. Once deleted, the +// WorkforcePoolSubject may not be used for 30 days. After 30 days, the +// WorkforcePoolSubject will be deleted forever and can be reused in +// token exchanges with Google Cloud STS. This will automatically create +// a new WorkforcePoolSubject that is independent of the previously +// deleted WorkforcePoolSubject with the same google.subject value. // -// - name: The `name` parameter's value depends on the target resource -// for the request, namely `projects` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles) or -// `organizations` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). -// Each resource type's `name` value format is described below: * -// `projects.roles.delete()` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/delete): -// `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes -// only custom roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles) that -// have been created at the project level. Example request URL: -// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_R -// OLE_ID}` * `organizations.roles.delete()` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/delete): -// `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This -// method deletes only custom roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles) that -// have been created at the organization level. Example request URL: -// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles -// /{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must -// specify a complete project ID or organization ID. -func (r *OrganizationsRolesService) Delete(name string) *OrganizationsRolesDeleteCall { - c := &OrganizationsRolesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The resource name of the WorkforcePoolSubject. Special +// characters, like '/' and ':', must be escaped, because all URLs +// need to conform to the "When to Escape and Unescape" section of +// RFC3986 (https://www.ietf.org/rfc/rfc2396.txt). Format: +// `locations/{location}/workforcePools/{workforce_pool_id}/subjects/{s +// ubject_id}`. +func (r *LocationsWorkforcePoolsSubjectsService) Delete(name string) *LocationsWorkforcePoolsSubjectsDeleteCall { + c := &LocationsWorkforcePoolsSubjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// Etag sets the optional parameter "etag": Used to perform a consistent -// read-modify-write. -func (c *OrganizationsRolesDeleteCall) Etag(etag string) *OrganizationsRolesDeleteCall { - c.urlParams_.Set("etag", etag) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsRolesDeleteCall) Fields(s ...googleapi.Field) *OrganizationsRolesDeleteCall { +func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsSubjectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7024,21 +7718,21 @@ func (c *OrganizationsRolesDeleteCall) Fields(s ...googleapi.Field) *Organizatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsRolesDeleteCall) Context(ctx context.Context) *OrganizationsRolesDeleteCall { +func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Context(ctx context.Context) *LocationsWorkforcePoolsSubjectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsRolesDeleteCall) Header() http.Header { +func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsRolesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsSubjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7061,14 +7755,14 @@ func (c *OrganizationsRolesDeleteCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.organizations.roles.delete" call. -// Exactly one of *Role or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Role.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *OrganizationsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, error) { +// Do executes the "iam.locations.workforcePools.subjects.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LocationsWorkforcePoolsSubjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7087,7 +7781,7 @@ func (c *OrganizationsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Role{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7099,31 +7793,25 @@ func (c *OrganizationsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, } return ret, nil // { - // "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a principal to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed.", - // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", + // "description": "Deletes a WorkforcePoolSubject. Subject must not already be in a deleted state. A WorkforcePoolSubject is automatically created the first time an external credential is exchanged for a Google Cloud credential with a mapped `google.subject` attribute. There is no path to manually create WorkforcePoolSubjects. Once deleted, the WorkforcePoolSubject may not be used for 30 days. After 30 days, the WorkforcePoolSubject will be deleted forever and can be reused in token exchanges with Google Cloud STS. This will automatically create a new WorkforcePoolSubject that is independent of the previously deleted WorkforcePoolSubject with the same google.subject value.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/subjects/{subjectsId}", // "httpMethod": "DELETE", - // "id": "iam.organizations.roles.delete", + // "id": "iam.locations.workforcePools.subjects.delete", // "parameterOrder": [ // "name" // ], // "parameters": { - // "etag": { - // "description": "Used to perform a consistent read-modify-write.", - // "format": "byte", - // "location": "query", - // "type": "string" - // }, // "name": { - // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles) or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.delete()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/delete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.delete()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/delete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "description": "Required. The resource name of the WorkforcePoolSubject. Special characters, like '/' and ':', must be escaped, because all URLs need to conform to the \"When to Escape and Unescape\" section of [RFC3986](https://www.ietf.org/rfc/rfc2396.txt). Format: `locations/{location}/workforcePools/{workforce_pool_id}/subjects/{subject_id}`", // "location": "path", - // "pattern": "^organizations/[^/]+/roles/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/subjects/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}", // "response": { - // "$ref": "Role" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -7132,106 +7820,76 @@ func (c *OrganizationsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, } -// method id "iam.organizations.roles.get": +// method id "iam.locations.workforcePools.subjects.undelete": -type OrganizationsRolesGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LocationsWorkforcePoolsSubjectsUndeleteCall struct { + s *Service + name string + undeleteworkforcepoolsubjectrequest *UndeleteWorkforcePoolSubjectRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets the definition of a Role. +// Undelete: Undeletes a WorkforcePoolSubject, as long as it was deleted +// fewer than 30 days ago. // -// - name: The `name` parameter's value depends on the target resource -// for the request, namely `roles` -// (https://cloud.google.com/iam/reference/rest/v1/roles), `projects` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles), or -// `organizations` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). -// Each resource type's `name` value format is described below: * -// `roles.get()` -// (https://cloud.google.com/iam/reference/rest/v1/roles/get): -// `roles/{ROLE_NAME}`. This method returns results from all -// predefined roles -// (https://cloud.google.com/iam/docs/understanding-roles#predefined_roles) -// in Cloud IAM. Example request URL: -// `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * -// `projects.roles.get()` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/get): -// `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns -// only custom roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles) that -// have been created at the project level. Example request URL: -// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_R -// OLE_ID}` * `organizations.roles.get()` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/get): -// `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This -// method returns only custom roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles) that -// have been created at the organization level. Example request URL: -// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles -// /{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must -// specify a complete project ID or organization ID. -func (r *OrganizationsRolesService) Get(name string) *OrganizationsRolesGetCall { - c := &OrganizationsRolesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The resource name of the WorkforcePoolSubject. Special +// characters, like '/' and ':', must be escaped, because all URLs +// need to conform to the "When to Escape and Unescape" section of +// RFC3986 (https://www.ietf.org/rfc/rfc2396.txt). Format: +// `locations/{location}/workforcePools/{workforce_pool_id}/subjects/{s +// ubject_id}`. +func (r *LocationsWorkforcePoolsSubjectsService) Undelete(name string, undeleteworkforcepoolsubjectrequest *UndeleteWorkforcePoolSubjectRequest) *LocationsWorkforcePoolsSubjectsUndeleteCall { + c := &LocationsWorkforcePoolsSubjectsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.undeleteworkforcepoolsubjectrequest = undeleteworkforcepoolsubjectrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsRolesGetCall) Fields(s ...googleapi.Field) *OrganizationsRolesGetCall { +func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsSubjectsUndeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *OrganizationsRolesGetCall) IfNoneMatch(entityTag string) *OrganizationsRolesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsRolesGetCall) Context(ctx context.Context) *OrganizationsRolesGetCall { +func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Context(ctx context.Context) *LocationsWorkforcePoolsSubjectsUndeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsRolesGetCall) Header() http.Header { +func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsRolesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteworkforcepoolsubjectrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:undelete") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -7242,14 +7900,14 @@ func (c *OrganizationsRolesGetCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.organizations.roles.get" call. -// Exactly one of *Role or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Role.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *OrganizationsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { +// Do executes the "iam.locations.workforcePools.subjects.undelete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LocationsWorkforcePoolsSubjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7268,7 +7926,7 @@ func (c *OrganizationsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, err if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Role{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7280,25 +7938,28 @@ func (c *OrganizationsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, err } return ret, nil // { - // "description": "Gets the definition of a Role.", - // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", - // "httpMethod": "GET", - // "id": "iam.organizations.roles.get", + // "description": "Undeletes a WorkforcePoolSubject, as long as it was deleted fewer than 30 days ago.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/subjects/{subjectsId}:undelete", + // "httpMethod": "POST", + // "id": "iam.locations.workforcePools.subjects.undelete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the request, namely [`roles`](https://cloud.google.com/iam/reference/rest/v1/roles), [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles), or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`roles.get()`](https://cloud.google.com/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`. This method returns results from all [predefined roles](https://cloud.google.com/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * [`projects.roles.get()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/get): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.get()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/get): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "description": "Required. The resource name of the WorkforcePoolSubject. Special characters, like '/' and ':', must be escaped, because all URLs need to conform to the \"When to Escape and Unescape\" section of [RFC3986](https://www.ietf.org/rfc/rfc2396.txt). Format: `locations/{location}/workforcePools/{workforce_pool_id}/subjects/{subject_id}`", // "location": "path", - // "pattern": "^organizations/[^/]+/roles/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/subjects/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/{+name}", + // "path": "v1/{+name}:undelete", + // "request": { + // "$ref": "UndeleteWorkforcePoolSubjectRequest" + // }, // "response": { - // "$ref": "Role" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -7307,99 +7968,32 @@ func (c *OrganizationsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, err } -// method id "iam.organizations.roles.list": +// method id "iam.locations.workforcePools.subjects.operations.get": -type OrganizationsRolesListCall struct { +type LocationsWorkforcePoolsSubjectsOperationsGetCall struct { s *Service - parent string + name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists every predefined Role that IAM supports, or every custom -// role that is defined for an organization or project. -// -// - parent: The `parent` parameter's value depends on the target -// resource for the request, namely `roles` -// (https://cloud.google.com/iam/reference/rest/v1/roles), `projects` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles), or -// `organizations` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). -// Each resource type's `parent` value format is described below: * -// `roles.list()` -// (https://cloud.google.com/iam/reference/rest/v1/roles/list): An -// empty string. This method doesn't require a resource; it simply -// returns all predefined roles -// (https://cloud.google.com/iam/docs/understanding-roles#predefined_roles) -// in Cloud IAM. Example request URL: -// `https://iam.googleapis.com/v1/roles` * `projects.roles.list()` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/list): -// `projects/{PROJECT_ID}`. This method lists all project-level custom -// roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles). -// Example request URL: -// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * -// `organizations.roles.list()` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/list): -// `organizations/{ORGANIZATION_ID}`. This method lists all -// organization-level custom roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles). -// Example request URL: -// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles -// ` Note: Wildcard (*) values are invalid; you must specify a -// complete project ID or organization ID. -func (r *OrganizationsRolesService) List(parent string) *OrganizationsRolesListCall { - c := &OrganizationsRolesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of roles to include in the response. The default is 300, -// and the maximum is 1,000. -func (c *OrganizationsRolesListCall) PageSize(pageSize int64) *OrganizationsRolesListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": Optional -// pagination token returned in an earlier ListRolesResponse. -func (c *OrganizationsRolesListCall) PageToken(pageToken string) *OrganizationsRolesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ShowDeleted sets the optional parameter "showDeleted": Include Roles -// that have been deleted. -func (c *OrganizationsRolesListCall) ShowDeleted(showDeleted bool) *OrganizationsRolesListCall { - c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) - return c -} - -// View sets the optional parameter "view": Optional view for the -// returned Role objects. When `FULL` is specified, the -// `includedPermissions` field is returned, which includes a list of all -// permissions in the role. The default value is `BASIC`, which does not -// return the `includedPermissions` field. -// -// Possible values: -// -// "BASIC" - Omits the `included_permissions` field. This is the -// -// default value. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. // -// "FULL" - Returns all fields. -func (c *OrganizationsRolesListCall) View(view string) *OrganizationsRolesListCall { - c.urlParams_.Set("view", view) +// - name: The name of the operation resource. +func (r *LocationsWorkforcePoolsSubjectsOperationsService) Get(name string) *LocationsWorkforcePoolsSubjectsOperationsGetCall { + c := &LocationsWorkforcePoolsSubjectsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsRolesListCall) Fields(s ...googleapi.Field) *OrganizationsRolesListCall { +func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Fields(s ...googleapi.Field) *LocationsWorkforcePoolsSubjectsOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7409,7 +8003,7 @@ func (c *OrganizationsRolesListCall) Fields(s ...googleapi.Field) *Organizations // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *OrganizationsRolesListCall) IfNoneMatch(entityTag string) *OrganizationsRolesListCall { +func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) IfNoneMatch(entityTag string) *LocationsWorkforcePoolsSubjectsOperationsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -7417,21 +8011,21 @@ func (c *OrganizationsRolesListCall) IfNoneMatch(entityTag string) *Organization // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsRolesListCall) Context(ctx context.Context) *OrganizationsRolesListCall { +func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Context(ctx context.Context) *LocationsWorkforcePoolsSubjectsOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsRolesListCall) Header() http.Header { +func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsRolesListCall) doRequest(alt string) (*http.Response, error) { +func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7444,7 +8038,7 @@ func (c *OrganizationsRolesListCall) doRequest(alt string) (*http.Response, erro var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/roles") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7452,19 +8046,19 @@ func (c *OrganizationsRolesListCall) doRequest(alt string) (*http.Response, erro } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.organizations.roles.list" call. -// Exactly one of *ListRolesResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListRolesResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResponse, error) { +// Do executes the "iam.locations.workforcePools.subjects.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LocationsWorkforcePoolsSubjectsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7483,7 +8077,7 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListRolesResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7495,54 +8089,25 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole } return ret, nil // { - // "description": "Lists every predefined Role that IAM supports, or every custom role that is defined for an organization or project.", - // "flatPath": "v1/organizations/{organizationsId}/roles", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/locations/{locationsId}/workforcePools/{workforcePoolsId}/subjects/{subjectsId}/operations/{operationsId}", // "httpMethod": "GET", - // "id": "iam.organizations.roles.list", + // "id": "iam.locations.workforcePools.subjects.operations.get", // "parameterOrder": [ - // "parent" + // "name" // ], // "parameters": { - // "pageSize": { - // "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional pagination token returned in an earlier ListRolesResponse.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`roles`](https://cloud.google.com/iam/reference/rest/v1/roles), [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles), or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`roles.list()`](https://cloud.google.com/iam/reference/rest/v1/roles/list): An empty string. This method doesn't require a resource; it simply returns all [predefined roles](https://cloud.google.com/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` * [`projects.roles.list()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/list): `projects/{PROJECT_ID}`. This method lists all project-level [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.list()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/list): `organizations/{ORGANIZATION_ID}`. This method lists all organization-level [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "name": { + // "description": "The name of the operation resource.", // "location": "path", - // "pattern": "^organizations/[^/]+$", + // "pattern": "^locations/[^/]+/workforcePools/[^/]+/subjects/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" - // }, - // "showDeleted": { - // "description": "Include Roles that have been deleted.", - // "location": "query", - // "type": "boolean" - // }, - // "view": { - // "description": "Optional view for the returned Role objects. When `FULL` is specified, the `includedPermissions` field is returned, which includes a list of all permissions in the role. The default value is `BASIC`, which does not return the `includedPermissions` field.", - // "enum": [ - // "BASIC", - // "FULL" - // ], - // "enumDescriptions": [ - // "Omits the `included_permissions` field. This is the default value.", - // "Returns all fields." - // ], - // "location": "query", - // "type": "string" // } // }, - // "path": "v1/{+parent}/roles", + // "path": "v1/{+name}", // "response": { - // "$ref": "ListRolesResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -7551,39 +8116,188 @@ func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRole } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *OrganizationsRolesListCall) Pages(ctx context.Context, f func(*ListRolesResponse) error) error { +// method id "iam.organizations.roles.create": + +type OrganizationsRolesCreateCall struct { + s *Service + parent string + createrolerequest *CreateRoleRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new custom Role. +// +// - parent: The `parent` parameter's value depends on the target +// resource for the request, namely `projects` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles) or +// `organizations` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). +// Each resource type's `parent` value format is described below: * +// `projects.roles.create()` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/create): +// `projects/{PROJECT_ID}`. This method creates project-level custom +// roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles). +// Example request URL: +// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * +// `organizations.roles.create()` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/create): +// `organizations/{ORGANIZATION_ID}`. This method creates +// organization-level custom roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles). +// Example request URL: +// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles +// ` Note: Wildcard (*) values are invalid; you must specify a +// complete project ID or organization ID. +func (r *OrganizationsRolesService) Create(parent string, createrolerequest *CreateRoleRequest) *OrganizationsRolesCreateCall { + c := &OrganizationsRolesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.createrolerequest = createrolerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsRolesCreateCall) Fields(s ...googleapi.Field) *OrganizationsRolesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsRolesCreateCall) Context(ctx context.Context) *OrganizationsRolesCreateCall { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsRolesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsRolesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createrolerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/roles") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.organizations.roles.create" call. +// Exactly one of *Role or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Role.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsRolesCreateCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() } - c.PageToken(x.NextPageToken) + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err } + return ret, nil + // { + // "description": "Creates a new custom Role.", + // "flatPath": "v1/organizations/{organizationsId}/roles", + // "httpMethod": "POST", + // "id": "iam.organizations.roles.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles) or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`projects.roles.create()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/create): `projects/{PROJECT_ID}`. This method creates project-level [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.create()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/create): `organizations/{ORGANIZATION_ID}`. This method creates organization-level [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/roles", + // "request": { + // "$ref": "CreateRoleRequest" + // }, + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + } -// method id "iam.organizations.roles.patch": +// method id "iam.organizations.roles.delete": -type OrganizationsRolesPatchCall struct { +type OrganizationsRolesDeleteCall struct { s *Service name string - role *Role urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Updates the definition of a custom Role. +// Delete: Deletes a custom Role. When you delete a custom role, the +// following changes occur immediately: * You cannot bind a principal to +// the custom role in an IAM Policy. * Existing bindings to the custom +// role are not changed, but they have no effect. * By default, the +// response from ListRoles does not include the custom role. You have 7 +// days to undelete the custom role. After 7 days, the following changes +// occur: * The custom role is permanently deleted and cannot be +// recovered. * If an IAM policy contains a binding to the custom role, +// the binding is permanently removed. // // - name: The `name` parameter's value depends on the target resource // for the request, namely `projects` @@ -7591,40 +8305,39 @@ type OrganizationsRolesPatchCall struct { // `organizations` // (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). // Each resource type's `name` value format is described below: * -// `projects.roles.patch()` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/patch): -// `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates +// `projects.roles.delete()` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/delete): +// `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes // only custom roles // (https://cloud.google.com/iam/docs/understanding-custom-roles) that // have been created at the project level. Example request URL: // `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_R -// OLE_ID}` * `organizations.roles.patch()` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/patch): +// OLE_ID}` * `organizations.roles.delete()` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/delete): // `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This -// method updates only custom roles +// method deletes only custom roles // (https://cloud.google.com/iam/docs/understanding-custom-roles) that // have been created at the organization level. Example request URL: // `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles // /{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must // specify a complete project ID or organization ID. -func (r *OrganizationsRolesService) Patch(name string, role *Role) *OrganizationsRolesPatchCall { - c := &OrganizationsRolesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *OrganizationsRolesService) Delete(name string) *OrganizationsRolesDeleteCall { + c := &OrganizationsRolesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.role = role return c } -// UpdateMask sets the optional parameter "updateMask": A mask -// describing which fields in the Role have changed. -func (c *OrganizationsRolesPatchCall) UpdateMask(updateMask string) *OrganizationsRolesPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// Etag sets the optional parameter "etag": Used to perform a consistent +// read-modify-write. +func (c *OrganizationsRolesDeleteCall) Etag(etag string) *OrganizationsRolesDeleteCall { + c.urlParams_.Set("etag", etag) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsRolesPatchCall) Fields(s ...googleapi.Field) *OrganizationsRolesPatchCall { +func (c *OrganizationsRolesDeleteCall) Fields(s ...googleapi.Field) *OrganizationsRolesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7632,21 +8345,21 @@ func (c *OrganizationsRolesPatchCall) Fields(s ...googleapi.Field) *Organization // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsRolesPatchCall) Context(ctx context.Context) *OrganizationsRolesPatchCall { +func (c *OrganizationsRolesDeleteCall) Context(ctx context.Context) *OrganizationsRolesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsRolesPatchCall) Header() http.Header { +func (c *OrganizationsRolesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsRolesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *OrganizationsRolesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7654,16 +8367,11 @@ func (c *OrganizationsRolesPatchCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.role) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -7674,14 +8382,2194 @@ func (c *OrganizationsRolesPatchCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.organizations.roles.patch" call. +// Do executes the "iam.organizations.roles.delete" call. // Exactly one of *Role or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Role.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. -func (c *OrganizationsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, error) { +func (c *OrganizationsRolesDeleteCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a custom Role. When you delete a custom role, the following changes occur immediately: * You cannot bind a principal to the custom role in an IAM Policy. * Existing bindings to the custom role are not changed, but they have no effect. * By default, the response from ListRoles does not include the custom role. You have 7 days to undelete the custom role. After 7 days, the following changes occur: * The custom role is permanently deleted and cannot be recovered. * If an IAM policy contains a binding to the custom role, the binding is permanently removed.", + // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", + // "httpMethod": "DELETE", + // "id": "iam.organizations.roles.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "etag": { + // "description": "Used to perform a consistent read-modify-write.", + // "format": "byte", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles) or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.delete()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/delete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.delete()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/delete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method deletes only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "location": "path", + // "pattern": "^organizations/[^/]+/roles/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.organizations.roles.get": + +type OrganizationsRolesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the definition of a Role. +// +// - name: The `name` parameter's value depends on the target resource +// for the request, namely `roles` +// (https://cloud.google.com/iam/reference/rest/v1/roles), `projects` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles), or +// `organizations` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). +// Each resource type's `name` value format is described below: * +// `roles.get()` +// (https://cloud.google.com/iam/reference/rest/v1/roles/get): +// `roles/{ROLE_NAME}`. This method returns results from all +// predefined roles +// (https://cloud.google.com/iam/docs/understanding-roles#predefined_roles) +// in Cloud IAM. Example request URL: +// `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * +// `projects.roles.get()` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/get): +// `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns +// only custom roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles) that +// have been created at the project level. Example request URL: +// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_R +// OLE_ID}` * `organizations.roles.get()` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/get): +// `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This +// method returns only custom roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles) that +// have been created at the organization level. Example request URL: +// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles +// /{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must +// specify a complete project ID or organization ID. +func (r *OrganizationsRolesService) Get(name string) *OrganizationsRolesGetCall { + c := &OrganizationsRolesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsRolesGetCall) Fields(s ...googleapi.Field) *OrganizationsRolesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsRolesGetCall) IfNoneMatch(entityTag string) *OrganizationsRolesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsRolesGetCall) Context(ctx context.Context) *OrganizationsRolesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsRolesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsRolesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.organizations.roles.get" call. +// Exactly one of *Role or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Role.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsRolesGetCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the definition of a Role.", + // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", + // "httpMethod": "GET", + // "id": "iam.organizations.roles.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`roles`](https://cloud.google.com/iam/reference/rest/v1/roles), [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles), or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`roles.get()`](https://cloud.google.com/iam/reference/rest/v1/roles/get): `roles/{ROLE_NAME}`. This method returns results from all [predefined roles](https://cloud.google.com/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles/{ROLE_NAME}` * [`projects.roles.get()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/get): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.get()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/get): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method returns only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "location": "path", + // "pattern": "^organizations/[^/]+/roles/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.organizations.roles.list": + +type OrganizationsRolesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists every predefined Role that IAM supports, or every custom +// role that is defined for an organization or project. +// +// - parent: The `parent` parameter's value depends on the target +// resource for the request, namely `roles` +// (https://cloud.google.com/iam/reference/rest/v1/roles), `projects` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles), or +// `organizations` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). +// Each resource type's `parent` value format is described below: * +// `roles.list()` +// (https://cloud.google.com/iam/reference/rest/v1/roles/list): An +// empty string. This method doesn't require a resource; it simply +// returns all predefined roles +// (https://cloud.google.com/iam/docs/understanding-roles#predefined_roles) +// in Cloud IAM. Example request URL: +// `https://iam.googleapis.com/v1/roles` * `projects.roles.list()` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/list): +// `projects/{PROJECT_ID}`. This method lists all project-level custom +// roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles). +// Example request URL: +// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * +// `organizations.roles.list()` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/list): +// `organizations/{ORGANIZATION_ID}`. This method lists all +// organization-level custom roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles). +// Example request URL: +// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles +// ` Note: Wildcard (*) values are invalid; you must specify a +// complete project ID or organization ID. +func (r *OrganizationsRolesService) List(parent string) *OrganizationsRolesListCall { + c := &OrganizationsRolesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Optional limit on +// the number of roles to include in the response. The default is 300, +// and the maximum is 1,000. +func (c *OrganizationsRolesListCall) PageSize(pageSize int64) *OrganizationsRolesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Optional +// pagination token returned in an earlier ListRolesResponse. +func (c *OrganizationsRolesListCall) PageToken(pageToken string) *OrganizationsRolesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ShowDeleted sets the optional parameter "showDeleted": Include Roles +// that have been deleted. +func (c *OrganizationsRolesListCall) ShowDeleted(showDeleted bool) *OrganizationsRolesListCall { + c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) + return c +} + +// View sets the optional parameter "view": Optional view for the +// returned Role objects. When `FULL` is specified, the +// `includedPermissions` field is returned, which includes a list of all +// permissions in the role. The default value is `BASIC`, which does not +// return the `includedPermissions` field. +// +// Possible values: +// +// "BASIC" - Omits the `included_permissions` field. This is the +// +// default value. +// +// "FULL" - Returns all fields. +func (c *OrganizationsRolesListCall) View(view string) *OrganizationsRolesListCall { + c.urlParams_.Set("view", view) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsRolesListCall) Fields(s ...googleapi.Field) *OrganizationsRolesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsRolesListCall) IfNoneMatch(entityTag string) *OrganizationsRolesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsRolesListCall) Context(ctx context.Context) *OrganizationsRolesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsRolesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsRolesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/roles") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.organizations.roles.list" call. +// Exactly one of *ListRolesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListRolesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationsRolesListCall) Do(opts ...googleapi.CallOption) (*ListRolesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListRolesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists every predefined Role that IAM supports, or every custom role that is defined for an organization or project.", + // "flatPath": "v1/organizations/{organizationsId}/roles", + // "httpMethod": "GET", + // "id": "iam.organizations.roles.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional limit on the number of roles to include in the response. The default is 300, and the maximum is 1,000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional pagination token returned in an earlier ListRolesResponse.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The `parent` parameter's value depends on the target resource for the request, namely [`roles`](https://cloud.google.com/iam/reference/rest/v1/roles), [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles), or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `parent` value format is described below: * [`roles.list()`](https://cloud.google.com/iam/reference/rest/v1/roles/list): An empty string. This method doesn't require a resource; it simply returns all [predefined roles](https://cloud.google.com/iam/docs/understanding-roles#predefined_roles) in Cloud IAM. Example request URL: `https://iam.googleapis.com/v1/roles` * [`projects.roles.list()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/list): `projects/{PROJECT_ID}`. This method lists all project-level [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles` * [`organizations.roles.list()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/list): `organizations/{ORGANIZATION_ID}`. This method lists all organization-level [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles). Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "showDeleted": { + // "description": "Include Roles that have been deleted.", + // "location": "query", + // "type": "boolean" + // }, + // "view": { + // "description": "Optional view for the returned Role objects. When `FULL` is specified, the `includedPermissions` field is returned, which includes a list of all permissions in the role. The default value is `BASIC`, which does not return the `includedPermissions` field.", + // "enum": [ + // "BASIC", + // "FULL" + // ], + // "enumDescriptions": [ + // "Omits the `included_permissions` field. This is the default value.", + // "Returns all fields." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/roles", + // "response": { + // "$ref": "ListRolesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsRolesListCall) Pages(ctx context.Context, f func(*ListRolesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "iam.organizations.roles.patch": + +type OrganizationsRolesPatchCall struct { + s *Service + name string + role *Role + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the definition of a custom Role. +// +// - name: The `name` parameter's value depends on the target resource +// for the request, namely `projects` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles) or +// `organizations` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). +// Each resource type's `name` value format is described below: * +// `projects.roles.patch()` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/patch): +// `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates +// only custom roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles) that +// have been created at the project level. Example request URL: +// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_R +// OLE_ID}` * `organizations.roles.patch()` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/patch): +// `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This +// method updates only custom roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles) that +// have been created at the organization level. Example request URL: +// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles +// /{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must +// specify a complete project ID or organization ID. +func (r *OrganizationsRolesService) Patch(name string, role *Role) *OrganizationsRolesPatchCall { + c := &OrganizationsRolesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.role = role + return c +} + +// UpdateMask sets the optional parameter "updateMask": A mask +// describing which fields in the Role have changed. +func (c *OrganizationsRolesPatchCall) UpdateMask(updateMask string) *OrganizationsRolesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsRolesPatchCall) Fields(s ...googleapi.Field) *OrganizationsRolesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsRolesPatchCall) Context(ctx context.Context) *OrganizationsRolesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsRolesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsRolesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.role) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.organizations.roles.patch" call. +// Exactly one of *Role or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Role.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the definition of a custom Role.", + // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", + // "httpMethod": "PATCH", + // "id": "iam.organizations.roles.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles) or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.patch()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/patch): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.patch()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/patch): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "location": "path", + // "pattern": "^organizations/[^/]+/roles/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "A mask describing which fields in the Role have changed.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "Role" + // }, + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.organizations.roles.undelete": + +type OrganizationsRolesUndeleteCall struct { + s *Service + name string + undeleterolerequest *UndeleteRoleRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Undelete: Undeletes a custom Role. +// +// - name: The `name` parameter's value depends on the target resource +// for the request, namely `projects` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles) or +// `organizations` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). +// Each resource type's `name` value format is described below: * +// `projects.roles.undelete()` +// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/undelete): +// `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method +// undeletes only custom roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles) that +// have been created at the project level. Example request URL: +// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_R +// OLE_ID}` * `organizations.roles.undelete()` +// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/undelete): +// `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This +// method undeletes only custom roles +// (https://cloud.google.com/iam/docs/understanding-custom-roles) that +// have been created at the organization level. Example request URL: +// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles +// /{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must +// specify a complete project ID or organization ID. +func (r *OrganizationsRolesService) Undelete(name string, undeleterolerequest *UndeleteRoleRequest) *OrganizationsRolesUndeleteCall { + c := &OrganizationsRolesUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.undeleterolerequest = undeleterolerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsRolesUndeleteCall) Fields(s ...googleapi.Field) *OrganizationsRolesUndeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsRolesUndeleteCall) Context(ctx context.Context) *OrganizationsRolesUndeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsRolesUndeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsRolesUndeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleterolerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:undelete") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.organizations.roles.undelete" call. +// Exactly one of *Role or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Role.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Role{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Undeletes a custom Role.", + // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}:undelete", + // "httpMethod": "POST", + // "id": "iam.organizations.roles.undelete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles) or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.undelete()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/undelete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.undelete()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/undelete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "location": "path", + // "pattern": "^organizations/[^/]+/roles/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:undelete", + // "request": { + // "$ref": "UndeleteRoleRequest" + // }, + // "response": { + // "$ref": "Role" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.permissions.queryTestablePermissions": + +type PermissionsQueryTestablePermissionsCall struct { + s *Service + querytestablepermissionsrequest *QueryTestablePermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// QueryTestablePermissions: Lists every permission that you can test on +// a resource. A permission is testable if you can check whether a +// principal has that permission on the resource. +func (r *PermissionsService) QueryTestablePermissions(querytestablepermissionsrequest *QueryTestablePermissionsRequest) *PermissionsQueryTestablePermissionsCall { + c := &PermissionsQueryTestablePermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.querytestablepermissionsrequest = querytestablepermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PermissionsQueryTestablePermissionsCall) Fields(s ...googleapi.Field) *PermissionsQueryTestablePermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PermissionsQueryTestablePermissionsCall) Context(ctx context.Context) *PermissionsQueryTestablePermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PermissionsQueryTestablePermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PermissionsQueryTestablePermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.querytestablepermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/permissions:queryTestablePermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.permissions.queryTestablePermissions" call. +// Exactly one of *QueryTestablePermissionsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *QueryTestablePermissionsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *PermissionsQueryTestablePermissionsCall) Do(opts ...googleapi.CallOption) (*QueryTestablePermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &QueryTestablePermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists every permission that you can test on a resource. A permission is testable if you can check whether a principal has that permission on the resource.", + // "flatPath": "v1/permissions:queryTestablePermissions", + // "httpMethod": "POST", + // "id": "iam.permissions.queryTestablePermissions", + // "parameterOrder": [], + // "parameters": {}, + // "path": "v1/permissions:queryTestablePermissions", + // "request": { + // "$ref": "QueryTestablePermissionsRequest" + // }, + // "response": { + // "$ref": "QueryTestablePermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *PermissionsQueryTestablePermissionsCall) Pages(ctx context.Context, f func(*QueryTestablePermissionsResponse) error) error { + c.ctx_ = ctx + defer func(pt string) { c.querytestablepermissionsrequest.PageToken = pt }(c.querytestablepermissionsrequest.PageToken) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.querytestablepermissionsrequest.PageToken = x.NextPageToken + } +} + +// method id "iam.projects.locations.workloadIdentityPools.create": + +type ProjectsLocationsWorkloadIdentityPoolsCreateCall struct { + s *Service + parent string + workloadidentitypool *WorkloadIdentityPool + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new WorkloadIdentityPool. You cannot reuse the name +// of a deleted pool until 30 days after deletion. +// +// - parent: The parent resource to create the pool in. The only +// supported location is `global`. +func (r *ProjectsLocationsWorkloadIdentityPoolsService) Create(parent string, workloadidentitypool *WorkloadIdentityPool) *ProjectsLocationsWorkloadIdentityPoolsCreateCall { + c := &ProjectsLocationsWorkloadIdentityPoolsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.workloadidentitypool = workloadidentitypool + return c +} + +// WorkloadIdentityPoolId sets the optional parameter +// "workloadIdentityPoolId": Required. The ID to use for the pool, which +// becomes the final component of the resource name. This value should +// be 4-32 characters, and may contain the characters [a-z0-9-]. The +// prefix `gcp-` is reserved for use by Google, and may not be +// specified. +func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) WorkloadIdentityPoolId(workloadIdentityPoolId string) *ProjectsLocationsWorkloadIdentityPoolsCreateCall { + c.urlParams_.Set("workloadIdentityPoolId", workloadIdentityPoolId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypool) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/workloadIdentityPools") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.locations.workloadIdentityPools.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new WorkloadIdentityPool. You cannot reuse the name of a deleted pool until 30 days after deletion.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools", + // "httpMethod": "POST", + // "id": "iam.projects.locations.workloadIdentityPools.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The parent resource to create the pool in. The only supported location is `global`.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "workloadIdentityPoolId": { + // "description": "Required. The ID to use for the pool, which becomes the final component of the resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/workloadIdentityPools", + // "request": { + // "$ref": "WorkloadIdentityPool" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.projects.locations.workloadIdentityPools.delete": + +type ProjectsLocationsWorkloadIdentityPoolsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a WorkloadIdentityPool. You cannot use a deleted pool +// to exchange external credentials for Google Cloud credentials. +// However, deletion does not revoke credentials that have already been +// issued. Credentials issued for a deleted pool do not grant access to +// resources. If the pool is undeleted, and the credentials are not +// expired, they grant access again. You can undelete a pool for 30 +// days. After 30 days, deletion is permanent. You cannot update deleted +// pools. However, you can view and list them. +// +// - name: The name of the pool to delete. +func (r *ProjectsLocationsWorkloadIdentityPoolsService) Delete(name string) *ProjectsLocationsWorkloadIdentityPoolsDeleteCall { + c := &ProjectsLocationsWorkloadIdentityPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.locations.workloadIdentityPools.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a WorkloadIdentityPool. You cannot use a deleted pool to exchange external credentials for Google Cloud credentials. However, deletion does not revoke credentials that have already been issued. Credentials issued for a deleted pool do not grant access to resources. If the pool is undeleted, and the credentials are not expired, they grant access again. You can undelete a pool for 30 days. After 30 days, deletion is permanent. You cannot update deleted pools. However, you can view and list them.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}", + // "httpMethod": "DELETE", + // "id": "iam.projects.locations.workloadIdentityPools.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the pool to delete.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.projects.locations.workloadIdentityPools.get": + +type ProjectsLocationsWorkloadIdentityPoolsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets an individual WorkloadIdentityPool. +// +// - name: The name of the pool to retrieve. +func (r *ProjectsLocationsWorkloadIdentityPoolsService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsGetCall { + c := &ProjectsLocationsWorkloadIdentityPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.locations.workloadIdentityPools.get" call. +// Exactly one of *WorkloadIdentityPool or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *WorkloadIdentityPool.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Do(opts ...googleapi.CallOption) (*WorkloadIdentityPool, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &WorkloadIdentityPool{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets an individual WorkloadIdentityPool.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}", + // "httpMethod": "GET", + // "id": "iam.projects.locations.workloadIdentityPools.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the pool to retrieve.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "WorkloadIdentityPool" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.projects.locations.workloadIdentityPools.list": + +type ProjectsLocationsWorkloadIdentityPoolsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all non-deleted WorkloadIdentityPools in a project. If +// `show_deleted` is set to `true`, then deleted pools are also listed. +// +// - parent: The parent resource to list pools for. +func (r *ProjectsLocationsWorkloadIdentityPoolsService) List(parent string) *ProjectsLocationsWorkloadIdentityPoolsListCall { + c := &ProjectsLocationsWorkloadIdentityPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of pools to return. If unspecified, at most 50 pools are returned. +// The maximum value is 1000; values above are 1000 truncated to 1000. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) PageSize(pageSize int64) *ProjectsLocationsWorkloadIdentityPoolsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A page token, +// received from a previous `ListWorkloadIdentityPools` call. Provide +// this to retrieve the subsequent page. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) PageToken(pageToken string) *ProjectsLocationsWorkloadIdentityPoolsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ShowDeleted sets the optional parameter "showDeleted": Whether to +// return soft-deleted pools. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) ShowDeleted(showDeleted bool) *ProjectsLocationsWorkloadIdentityPoolsListCall { + c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/workloadIdentityPools") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.locations.workloadIdentityPools.list" call. +// Exactly one of *ListWorkloadIdentityPoolsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListWorkloadIdentityPoolsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Do(opts ...googleapi.CallOption) (*ListWorkloadIdentityPoolsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListWorkloadIdentityPoolsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all non-deleted WorkloadIdentityPools in a project. If `show_deleted` is set to `true`, then deleted pools are also listed.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools", + // "httpMethod": "GET", + // "id": "iam.projects.locations.workloadIdentityPools.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "The maximum number of pools to return. If unspecified, at most 50 pools are returned. The maximum value is 1000; values above are 1000 truncated to 1000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A page token, received from a previous `ListWorkloadIdentityPools` call. Provide this to retrieve the subsequent page.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The parent resource to list pools for.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "showDeleted": { + // "description": "Whether to return soft-deleted pools.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "v1/{+parent}/workloadIdentityPools", + // "response": { + // "$ref": "ListWorkloadIdentityPoolsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Pages(ctx context.Context, f func(*ListWorkloadIdentityPoolsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "iam.projects.locations.workloadIdentityPools.patch": + +type ProjectsLocationsWorkloadIdentityPoolsPatchCall struct { + s *Service + name string + workloadidentitypool *WorkloadIdentityPool + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an existing WorkloadIdentityPool. +// +// - name: Output only. The resource name of the pool. +func (r *ProjectsLocationsWorkloadIdentityPoolsService) Patch(name string, workloadidentitypool *WorkloadIdentityPool) *ProjectsLocationsWorkloadIdentityPoolsPatchCall { + c := &ProjectsLocationsWorkloadIdentityPoolsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.workloadidentitypool = workloadidentitypool + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The +// list of fields to update. +func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsWorkloadIdentityPoolsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypool) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.locations.workloadIdentityPools.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an existing WorkloadIdentityPool.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}", + // "httpMethod": "PATCH", + // "id": "iam.projects.locations.workloadIdentityPools.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Output only. The resource name of the pool.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Required. The list of fields to update.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "WorkloadIdentityPool" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.projects.locations.workloadIdentityPools.undelete": + +type ProjectsLocationsWorkloadIdentityPoolsUndeleteCall struct { + s *Service + name string + undeleteworkloadidentitypoolrequest *UndeleteWorkloadIdentityPoolRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Undelete: Undeletes a WorkloadIdentityPool, as long as it was deleted +// fewer than 30 days ago. +// +// - name: The name of the pool to undelete. +func (r *ProjectsLocationsWorkloadIdentityPoolsService) Undelete(name string, undeleteworkloadidentitypoolrequest *UndeleteWorkloadIdentityPoolRequest) *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall { + c := &ProjectsLocationsWorkloadIdentityPoolsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.undeleteworkloadidentitypoolrequest = undeleteworkloadidentitypoolrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteworkloadidentitypoolrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:undelete") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.locations.workloadIdentityPools.undelete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Undeletes a WorkloadIdentityPool, as long as it was deleted fewer than 30 days ago.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}:undelete", + // "httpMethod": "POST", + // "id": "iam.projects.locations.workloadIdentityPools.undelete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the pool to undelete.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:undelete", + // "request": { + // "$ref": "UndeleteWorkloadIdentityPoolRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.projects.locations.workloadIdentityPools.namespaces.managedIdentities.operations.get": + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. +// +// - name: The name of the operation resource. +func (r *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall { + c := &ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.locations.workloadIdentityPools.namespaces.managedIdentities.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/namespaces/{namespacesId}/managedIdentities/{managedIdentitiesId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "iam.projects.locations.workloadIdentityPools.namespaces.managedIdentities.operations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/managedIdentities/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "iam.projects.locations.workloadIdentityPools.namespaces.managedIdentities.workloadSources.operations.get": + +type ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. +// +// - name: The name of the operation resource. +func (r *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall { + c := &ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iam.projects.locations.workloadIdentityPools.namespaces.managedIdentities.workloadSources.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7700,7 +10588,7 @@ func (c *OrganizationsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, e if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Role{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7712,34 +10600,25 @@ func (c *OrganizationsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, e } return ret, nil // { - // "description": "Updates the definition of a custom Role.", - // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", - // "httpMethod": "PATCH", - // "id": "iam.organizations.roles.patch", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/namespaces/{namespacesId}/managedIdentities/{managedIdentitiesId}/workloadSources/{workloadSourcesId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "iam.projects.locations.workloadIdentityPools.namespaces.managedIdentities.workloadSources.operations.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles) or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.patch()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/patch): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.patch()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/patch): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method updates only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "description": "The name of the operation resource.", // "location": "path", - // "pattern": "^organizations/[^/]+/roles/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/managedIdentities/[^/]+/workloadSources/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" - // }, - // "updateMask": { - // "description": "A mask describing which fields in the Role have changed.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" // } // }, // "path": "v1/{+name}", - // "request": { - // "$ref": "Role" - // }, // "response": { - // "$ref": "Role" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -7748,91 +10627,79 @@ func (c *OrganizationsRolesPatchCall) Do(opts ...googleapi.CallOption) (*Role, e } -// method id "iam.organizations.roles.undelete": +// method id "iam.projects.locations.workloadIdentityPools.namespaces.operations.get": -type OrganizationsRolesUndeleteCall struct { - s *Service - name string - undeleterolerequest *UndeleteRoleRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Undelete: Undeletes a custom Role. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. // -// - name: The `name` parameter's value depends on the target resource -// for the request, namely `projects` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles) or -// `organizations` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles). -// Each resource type's `name` value format is described below: * -// `projects.roles.undelete()` -// (https://cloud.google.com/iam/reference/rest/v1/projects.roles/undelete): -// `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method -// undeletes only custom roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles) that -// have been created at the project level. Example request URL: -// `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_R -// OLE_ID}` * `organizations.roles.undelete()` -// (https://cloud.google.com/iam/reference/rest/v1/organizations.roles/undelete): -// `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This -// method undeletes only custom roles -// (https://cloud.google.com/iam/docs/understanding-custom-roles) that -// have been created at the organization level. Example request URL: -// `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles -// /{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must -// specify a complete project ID or organization ID. -func (r *OrganizationsRolesService) Undelete(name string, undeleterolerequest *UndeleteRoleRequest) *OrganizationsRolesUndeleteCall { - c := &OrganizationsRolesUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the operation resource. +func (r *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall { + c := &ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.undeleterolerequest = undeleterolerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *OrganizationsRolesUndeleteCall) Fields(s ...googleapi.Field) *OrganizationsRolesUndeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *OrganizationsRolesUndeleteCall) Context(ctx context.Context) *OrganizationsRolesUndeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *OrganizationsRolesUndeleteCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *OrganizationsRolesUndeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleterolerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:undelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -7843,14 +10710,14 @@ func (c *OrganizationsRolesUndeleteCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.organizations.roles.undelete" call. -// Exactly one of *Role or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Role.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *OrganizationsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role, error) { +// Do executes the "iam.projects.locations.workloadIdentityPools.namespaces.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7869,7 +10736,7 @@ func (c *OrganizationsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Role{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7881,28 +10748,25 @@ func (c *OrganizationsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role } return ret, nil // { - // "description": "Undeletes a custom Role.", - // "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}:undelete", - // "httpMethod": "POST", - // "id": "iam.organizations.roles.undelete", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/namespaces/{namespacesId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "iam.projects.locations.workloadIdentityPools.namespaces.operations.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "The `name` parameter's value depends on the target resource for the request, namely [`projects`](https://cloud.google.com/iam/reference/rest/v1/projects.roles) or [`organizations`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles). Each resource type's `name` value format is described below: * [`projects.roles.undelete()`](https://cloud.google.com/iam/reference/rest/v1/projects.roles/undelete): `projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the project level. Example request URL: `https://iam.googleapis.com/v1/projects/{PROJECT_ID}/roles/{CUSTOM_ROLE_ID}` * [`organizations.roles.undelete()`](https://cloud.google.com/iam/reference/rest/v1/organizations.roles/undelete): `organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}`. This method undeletes only [custom roles](https://cloud.google.com/iam/docs/understanding-custom-roles) that have been created at the organization level. Example request URL: `https://iam.googleapis.com/v1/organizations/{ORGANIZATION_ID}/roles/{CUSTOM_ROLE_ID}` Note: Wildcard (*) values are invalid; you must specify a complete project ID or organization ID.", + // "description": "The name of the operation resource.", // "location": "path", - // "pattern": "^organizations/[^/]+/roles/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/{+name}:undelete", - // "request": { - // "$ref": "UndeleteRoleRequest" - // }, + // "path": "v1/{+name}", // "response": { - // "$ref": "Role" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -7911,83 +10775,97 @@ func (c *OrganizationsRolesUndeleteCall) Do(opts ...googleapi.CallOption) (*Role } -// method id "iam.permissions.queryTestablePermissions": +// method id "iam.projects.locations.workloadIdentityPools.namespaces.workloadSources.operations.get": -type PermissionsQueryTestablePermissionsCall struct { - s *Service - querytestablepermissionsrequest *QueryTestablePermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// QueryTestablePermissions: Lists every permission that you can test on -// a resource. A permission is testable if you can check whether a -// principal has that permission on the resource. -func (r *PermissionsService) QueryTestablePermissions(querytestablepermissionsrequest *QueryTestablePermissionsRequest) *PermissionsQueryTestablePermissionsCall { - c := &PermissionsQueryTestablePermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.querytestablepermissionsrequest = querytestablepermissionsrequest +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. +// +// - name: The name of the operation resource. +func (r *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall { + c := &ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *PermissionsQueryTestablePermissionsCall) Fields(s ...googleapi.Field) *PermissionsQueryTestablePermissionsCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *PermissionsQueryTestablePermissionsCall) Context(ctx context.Context) *PermissionsQueryTestablePermissionsCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *PermissionsQueryTestablePermissionsCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *PermissionsQueryTestablePermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.querytestablepermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/permissions:queryTestablePermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.permissions.queryTestablePermissions" call. -// Exactly one of *QueryTestablePermissionsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *QueryTestablePermissionsResponse.ServerResponse.Header or (if -// a response was returned at all) in error.(*googleapi.Error).Header. -// Use googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *PermissionsQueryTestablePermissionsCall) Do(opts ...googleapi.CallOption) (*QueryTestablePermissionsResponse, error) { +// Do executes the "iam.projects.locations.workloadIdentityPools.namespaces.workloadSources.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8006,7 +10884,7 @@ func (c *PermissionsQueryTestablePermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &QueryTestablePermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8018,18 +10896,25 @@ func (c *PermissionsQueryTestablePermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Lists every permission that you can test on a resource. A permission is testable if you can check whether a principal has that permission on the resource.", - // "flatPath": "v1/permissions:queryTestablePermissions", - // "httpMethod": "POST", - // "id": "iam.permissions.queryTestablePermissions", - // "parameterOrder": [], - // "parameters": {}, - // "path": "v1/permissions:queryTestablePermissions", - // "request": { - // "$ref": "QueryTestablePermissionsRequest" + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/namespaces/{namespacesId}/workloadSources/{workloadSourcesId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "iam.projects.locations.workloadIdentityPools.namespaces.workloadSources.operations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/workloadSources/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } // }, + // "path": "v1/{+name}", // "response": { - // "$ref": "QueryTestablePermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -8038,122 +10923,97 @@ func (c *PermissionsQueryTestablePermissionsCall) Do(opts ...googleapi.CallOptio } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *PermissionsQueryTestablePermissionsCall) Pages(ctx context.Context, f func(*QueryTestablePermissionsResponse) error) error { - c.ctx_ = ctx - defer func(pt string) { c.querytestablepermissionsrequest.PageToken = pt }(c.querytestablepermissionsrequest.PageToken) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.querytestablepermissionsrequest.PageToken = x.NextPageToken - } -} - -// method id "iam.projects.locations.workloadIdentityPools.create": +// method id "iam.projects.locations.workloadIdentityPools.operations.get": -type ProjectsLocationsWorkloadIdentityPoolsCreateCall struct { - s *Service - parent string - workloadidentitypool *WorkloadIdentityPool - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Create: Creates a new WorkloadIdentityPool. You cannot reuse the name -// of a deleted pool until 30 days after deletion. +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. // -// - parent: The parent resource to create the pool in. The only -// supported location is `global`. -func (r *ProjectsLocationsWorkloadIdentityPoolsService) Create(parent string, workloadidentitypool *WorkloadIdentityPool) *ProjectsLocationsWorkloadIdentityPoolsCreateCall { - c := &ProjectsLocationsWorkloadIdentityPoolsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.workloadidentitypool = workloadidentitypool - return c -} - -// WorkloadIdentityPoolId sets the optional parameter -// "workloadIdentityPoolId": Required. The ID to use for the pool, which -// becomes the final component of the resource name. This value should -// be 4-32 characters, and may contain the characters [a-z0-9-]. The -// prefix `gcp-` is reserved for use by Google, and may not be -// specified. -func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) WorkloadIdentityPoolId(workloadIdentityPoolId string) *ProjectsLocationsWorkloadIdentityPoolsCreateCall { - c.urlParams_.Set("workloadIdentityPoolId", workloadIdentityPoolId) +// - name: The name of the operation resource. +func (r *ProjectsLocationsWorkloadIdentityPoolsOperationsService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall { + c := &ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsCreateCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsCreateCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypool) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/workloadIdentityPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.create" call. +// Do executes the "iam.projects.locations.workloadIdentityPools.operations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8184,31 +11044,23 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Creates a new WorkloadIdentityPool. You cannot reuse the name of a deleted pool until 30 days after deletion.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools", - // "httpMethod": "POST", - // "id": "iam.projects.locations.workloadIdentityPools.create", + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "iam.projects.locations.workloadIdentityPools.operations.get", // "parameterOrder": [ - // "parent" + // "name" // ], // "parameters": { - // "parent": { - // "description": "Required. The parent resource to create the pool in. The only supported location is `global`.", + // "name": { + // "description": "The name of the operation resource.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/operations/[^/]+$", // "required": true, // "type": "string" - // }, - // "workloadIdentityPoolId": { - // "description": "Required. The ID to use for the pool, which becomes the final component of the resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified.", - // "location": "query", - // "type": "string" // } // }, - // "path": "v1/{+parent}/workloadIdentityPools", - // "request": { - // "$ref": "WorkloadIdentityPool" - // }, + // "path": "v1/{+name}", // "response": { // "$ref": "Operation" // }, @@ -8219,36 +11071,44 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsCreateCall) Do(opts ...googleapi. } -// method id "iam.projects.locations.workloadIdentityPools.delete": +// method id "iam.projects.locations.workloadIdentityPools.providers.create": -type ProjectsLocationsWorkloadIdentityPoolsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall struct { + s *Service + parent string + workloadidentitypoolprovider *WorkloadIdentityPoolProvider + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a WorkloadIdentityPool. You cannot use a deleted pool -// to exchange external credentials for Google Cloud credentials. -// However, deletion does not revoke credentials that have already been -// issued. Credentials issued for a deleted pool do not grant access to -// resources. If the pool is undeleted, and the credentials are not -// expired, they grant access again. You can undelete a pool for 30 -// days. After 30 days, deletion is permanent. You cannot update deleted -// pools. However, you can view and list them. +// Create: Creates a new WorkloadIdentityPoolProvider in a +// WorkloadIdentityPool. You cannot reuse the name of a deleted provider +// until 30 days after deletion. // -// - name: The name of the pool to delete. -func (r *ProjectsLocationsWorkloadIdentityPoolsService) Delete(name string) *ProjectsLocationsWorkloadIdentityPoolsDeleteCall { - c := &ProjectsLocationsWorkloadIdentityPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name +// - parent: The pool to create this provider in. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Create(parent string, workloadidentitypoolprovider *WorkloadIdentityPoolProvider) *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.workloadidentitypoolprovider = workloadidentitypoolprovider + return c +} + +// WorkloadIdentityPoolProviderId sets the optional parameter +// "workloadIdentityPoolProviderId": Required. The ID for the provider, +// which becomes the final component of the resource name. This value +// must be 4-32 characters, and may contain the characters [a-z0-9-]. +// The prefix `gcp-` is reserved for use by Google, and may not be +// specified. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) WorkloadIdentityPoolProviderId(workloadIdentityPoolProviderId string) *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall { + c.urlParams_.Set("workloadIdentityPoolProviderId", workloadIdentityPoolProviderId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsDeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8256,21 +11116,21 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsDeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -8278,29 +11138,34 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) doRequest(alt string) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypoolprovider) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/providers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.delete" call. +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.create" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8331,23 +11196,31 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Deletes a WorkloadIdentityPool. You cannot use a deleted pool to exchange external credentials for Google Cloud credentials. However, deletion does not revoke credentials that have already been issued. Credentials issued for a deleted pool do not grant access to resources. If the pool is undeleted, and the credentials are not expired, they grant access again. You can undelete a pool for 30 days. After 30 days, deletion is permanent. You cannot update deleted pools. However, you can view and list them.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}", - // "httpMethod": "DELETE", - // "id": "iam.projects.locations.workloadIdentityPools.delete", + // "description": "Creates a new WorkloadIdentityPoolProvider in a WorkloadIdentityPool. You cannot reuse the name of a deleted provider until 30 days after deletion.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers", + // "httpMethod": "POST", + // "id": "iam.projects.locations.workloadIdentityPools.providers.create", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Required. The name of the pool to delete.", + // "parent": { + // "description": "Required. The pool to create this provider in.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", // "required": true, // "type": "string" + // }, + // "workloadIdentityPoolProviderId": { + // "description": "Required. The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified.", + // "location": "query", + // "type": "string" // } // }, - // "path": "v1/{+name}", + // "path": "v1/{+parent}/providers", + // "request": { + // "$ref": "WorkloadIdentityPoolProvider" + // }, // "response": { // "$ref": "Operation" // }, @@ -8358,22 +11231,25 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsDeleteCall) Do(opts ...googleapi. } -// method id "iam.projects.locations.workloadIdentityPools.get": +// method id "iam.projects.locations.workloadIdentityPools.providers.delete": -type ProjectsLocationsWorkloadIdentityPoolsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets an individual WorkloadIdentityPool. +// Delete: Deletes a WorkloadIdentityPoolProvider. Deleting a provider +// does not revoke credentials that have already been issued; they +// continue to grant access. You can undelete a provider for 30 days. +// After 30 days, deletion is permanent. You cannot update deleted +// providers. However, you can view and list them. // -// - name: The name of the pool to retrieve. -func (r *ProjectsLocationsWorkloadIdentityPoolsService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsGetCall { - c := &ProjectsLocationsWorkloadIdentityPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the provider to delete. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Delete(name string) *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -8381,54 +11257,41 @@ func (r *ProjectsLocationsWorkloadIdentityPoolsService) Get(name string) *Projec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsGetCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsGetCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -8439,14 +11302,14 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) doRequest(alt string) (* return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.get" call. -// Exactly one of *WorkloadIdentityPool or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *WorkloadIdentityPool.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Do(opts ...googleapi.CallOption) (*WorkloadIdentityPool, error) { +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8465,7 +11328,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &WorkloadIdentityPool{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8477,25 +11340,25 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Gets an individual WorkloadIdentityPool.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}", - // "httpMethod": "GET", - // "id": "iam.projects.locations.workloadIdentityPools.get", + // "description": "Deletes a WorkloadIdentityPoolProvider. Deleting a provider does not revoke credentials that have already been issued; they continue to grant access. You can undelete a provider for 30 days. After 30 days, deletion is permanent. You cannot update deleted providers. However, you can view and list them.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}", + // "httpMethod": "DELETE", + // "id": "iam.projects.locations.workloadIdentityPools.providers.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The name of the pool to retrieve.", + // "description": "Required. The name of the provider to delete.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}", // "response": { - // "$ref": "WorkloadIdentityPool" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -8504,54 +11367,30 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsGetCall) Do(opts ...googleapi.Cal } -// method id "iam.projects.locations.workloadIdentityPools.list": +// method id "iam.projects.locations.workloadIdentityPools.providers.get": -type ProjectsLocationsWorkloadIdentityPoolsListCall struct { +type ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall struct { s *Service - parent string + name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Lists all non-deleted WorkloadIdentityPools in a project. If -// `show_deleted` is set to `true`, then deleted pools are also listed. +// Get: Gets an individual WorkloadIdentityPoolProvider. // -// - parent: The parent resource to list pools for. -func (r *ProjectsLocationsWorkloadIdentityPoolsService) List(parent string) *ProjectsLocationsWorkloadIdentityPoolsListCall { - c := &ProjectsLocationsWorkloadIdentityPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of pools to return. If unspecified, at most 50 pools are returned. -// The maximum value is 1000; values above are 1000 truncated to 1000. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) PageSize(pageSize int64) *ProjectsLocationsWorkloadIdentityPoolsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": A page token, -// received from a previous `ListWorkloadIdentityPools` call. Provide -// this to retrieve the subsequent page. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) PageToken(pageToken string) *ProjectsLocationsWorkloadIdentityPoolsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ShowDeleted sets the optional parameter "showDeleted": Whether to -// return soft-deleted pools. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) ShowDeleted(showDeleted bool) *ProjectsLocationsWorkloadIdentityPoolsListCall { - c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) +// - name: The name of the provider to retrieve. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsListCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8561,7 +11400,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Fields(s ...googleapi.F // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsListCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -8569,21 +11408,21 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) IfNoneMatch(entityTag s // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsListCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -8596,7 +11435,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) doRequest(alt string) ( var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/workloadIdentityPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8604,20 +11443,19 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) doRequest(alt string) ( } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, + "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.list" call. -// Exactly one of *ListWorkloadIdentityPoolsResponse or error will be +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.get" call. +// Exactly one of *WorkloadIdentityPoolProvider or error will be // non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListWorkloadIdentityPoolsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Do(opts ...googleapi.CallOption) (*ListWorkloadIdentityPoolsResponse, error) { +// either *WorkloadIdentityPoolProvider.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Do(opts ...googleapi.CallOption) (*WorkloadIdentityPoolProvider, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8636,7 +11474,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListWorkloadIdentityPoolsResponse{ + ret := &WorkloadIdentityPoolProvider{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8648,41 +11486,25 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Lists all non-deleted WorkloadIdentityPools in a project. If `show_deleted` is set to `true`, then deleted pools are also listed.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools", + // "description": "Gets an individual WorkloadIdentityPoolProvider.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}", // "httpMethod": "GET", - // "id": "iam.projects.locations.workloadIdentityPools.list", + // "id": "iam.projects.locations.workloadIdentityPools.providers.get", // "parameterOrder": [ - // "parent" + // "name" // ], // "parameters": { - // "pageSize": { - // "description": "The maximum number of pools to return. If unspecified, at most 50 pools are returned. The maximum value is 1000; values above are 1000 truncated to 1000.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A page token, received from a previous `ListWorkloadIdentityPools` call. Provide this to retrieve the subsequent page.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The parent resource to list pools for.", + // "name": { + // "description": "Required. The name of the provider to retrieve.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", // "required": true, // "type": "string" - // }, - // "showDeleted": { - // "description": "Whether to return soft-deleted pools.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "v1/{+parent}/workloadIdentityPools", + // "path": "v1/{+name}", // "response": { - // "$ref": "ListWorkloadIdentityPoolsResponse" + // "$ref": "WorkloadIdentityPoolProvider" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -8691,116 +11513,123 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Do(opts ...googleapi.Ca } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsWorkloadIdentityPoolsListCall) Pages(ctx context.Context, f func(*ListWorkloadIdentityPoolsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +// method id "iam.projects.locations.workloadIdentityPools.providers.list": + +type ProjectsLocationsWorkloadIdentityPoolsProvidersListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "iam.projects.locations.workloadIdentityPools.patch": +// List: Lists all non-deleted WorkloadIdentityPoolProviders in a +// WorkloadIdentityPool. If `show_deleted` is set to `true`, then +// deleted providers are also listed. +// +// - parent: The pool to list providers for. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) List(parent string) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} -type ProjectsLocationsWorkloadIdentityPoolsPatchCall struct { - s *Service - name string - workloadidentitypool *WorkloadIdentityPool - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// PageSize sets the optional parameter "pageSize": The maximum number +// of providers to return. If unspecified, at most 50 providers are +// returned. The maximum value is 100; values above 100 are truncated to +// 100. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) PageSize(pageSize int64) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c } -// Patch: Updates an existing WorkloadIdentityPool. -// -// - name: Output only. The resource name of the pool. -func (r *ProjectsLocationsWorkloadIdentityPoolsService) Patch(name string, workloadidentitypool *WorkloadIdentityPool) *ProjectsLocationsWorkloadIdentityPoolsPatchCall { - c := &ProjectsLocationsWorkloadIdentityPoolsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.workloadidentitypool = workloadidentitypool +// PageToken sets the optional parameter "pageToken": A page token, +// received from a previous `ListWorkloadIdentityPoolProviders` call. +// Provide this to retrieve the subsequent page. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) PageToken(pageToken string) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } -// UpdateMask sets the optional parameter "updateMask": Required. The -// list of fields to update. -func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsWorkloadIdentityPoolsPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// ShowDeleted sets the optional parameter "showDeleted": Whether to +// return soft-deleted providers. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) ShowDeleted(showDeleted bool) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { + c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsPatchCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsPatchCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypool) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/providers") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.list" call. +// Exactly one of *ListWorkloadIdentityPoolProvidersResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *ListWorkloadIdentityPoolProvidersResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Do(opts ...googleapi.CallOption) (*ListWorkloadIdentityPoolProvidersResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8819,7 +11648,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &ListWorkloadIdentityPoolProvidersResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8831,34 +11660,41 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Updates an existing WorkloadIdentityPool.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}", - // "httpMethod": "PATCH", - // "id": "iam.projects.locations.workloadIdentityPools.patch", + // "description": "Lists all non-deleted WorkloadIdentityPoolProviders in a WorkloadIdentityPool. If `show_deleted` is set to `true`, then deleted providers are also listed.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers", + // "httpMethod": "GET", + // "id": "iam.projects.locations.workloadIdentityPools.providers.list", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Output only. The resource name of the pool.", + // "pageSize": { + // "description": "The maximum number of providers to return. If unspecified, at most 50 providers are returned. The maximum value is 100; values above 100 are truncated to 100.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A page token, received from a previous `ListWorkloadIdentityPoolProviders` call. Provide this to retrieve the subsequent page.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The pool to list providers for.", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", // "required": true, // "type": "string" // }, - // "updateMask": { - // "description": "Required. The list of fields to update.", - // "format": "google-fieldmask", + // "showDeleted": { + // "description": "Whether to return soft-deleted providers.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "WorkloadIdentityPool" - // }, + // "path": "v1/{+parent}/providers", // "response": { - // "$ref": "Operation" + // "$ref": "ListWorkloadIdentityPoolProvidersResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -8867,32 +11703,59 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsPatchCall) Do(opts ...googleapi.C } -// method id "iam.projects.locations.workloadIdentityPools.undelete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Pages(ctx context.Context, f func(*ListWorkloadIdentityPoolProvidersResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsLocationsWorkloadIdentityPoolsUndeleteCall struct { - s *Service - name string - undeleteworkloadidentitypoolrequest *UndeleteWorkloadIdentityPoolRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "iam.projects.locations.workloadIdentityPools.providers.patch": + +type ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall struct { + s *Service + name string + workloadidentitypoolprovider *WorkloadIdentityPoolProvider + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Undelete: Undeletes a WorkloadIdentityPool, as long as it was deleted -// fewer than 30 days ago. +// Patch: Updates an existing WorkloadIdentityPoolProvider. // -// - name: The name of the pool to undelete. -func (r *ProjectsLocationsWorkloadIdentityPoolsService) Undelete(name string, undeleteworkloadidentitypoolrequest *UndeleteWorkloadIdentityPoolRequest) *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall { - c := &ProjectsLocationsWorkloadIdentityPoolsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: Output only. The resource name of the provider. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Patch(name string, workloadidentitypoolprovider *WorkloadIdentityPoolProvider) *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.undeleteworkloadidentitypoolrequest = undeleteworkloadidentitypoolrequest + c.workloadidentitypoolprovider = workloadidentitypoolprovider + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The +// list of fields to update. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) UpdateMask(updateMask string) *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8900,21 +11763,21 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Fields(s ...googlea // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -8922,16 +11785,16 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) doRequest(alt strin } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteworkloadidentitypoolrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypoolprovider) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:undelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -8942,14 +11805,14 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) doRequest(alt strin return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.undelete" call. +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8980,25 +11843,31 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Do(opts ...googleap } return ret, nil // { - // "description": "Undeletes a WorkloadIdentityPool, as long as it was deleted fewer than 30 days ago.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}:undelete", - // "httpMethod": "POST", - // "id": "iam.projects.locations.workloadIdentityPools.undelete", + // "description": "Updates an existing WorkloadIdentityPoolProvider.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}", + // "httpMethod": "PATCH", + // "id": "iam.projects.locations.workloadIdentityPools.providers.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The name of the pool to undelete.", + // "description": "Output only. The resource name of the provider.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "Required. The list of fields to update.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "v1/{+name}:undelete", + // "path": "v1/{+name}", // "request": { - // "$ref": "UndeleteWorkloadIdentityPoolRequest" + // "$ref": "WorkloadIdentityPoolProvider" // }, // "response": { // "$ref": "Operation" @@ -9010,79 +11879,71 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsUndeleteCall) Do(opts ...googleap } -// method id "iam.projects.locations.workloadIdentityPools.operations.get": +// method id "iam.projects.locations.workloadIdentityPools.providers.undelete": -type ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall struct { + s *Service + name string + undeleteworkloadidentitypoolproviderrequest *UndeleteWorkloadIdentityPoolProviderRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets the latest state of a long-running operation. Clients can -// use this method to poll the operation result at intervals as -// recommended by the API service. +// Undelete: Undeletes a WorkloadIdentityPoolProvider, as long as it was +// deleted fewer than 30 days ago. // -// - name: The name of the operation resource. -func (r *ProjectsLocationsWorkloadIdentityPoolsOperationsService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall { - c := &ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the provider to undelete. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Undelete(name string, undeleteworkloadidentitypoolproviderrequest *UndeleteWorkloadIdentityPoolProviderRequest) *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.undeleteworkloadidentitypoolproviderrequest = undeleteworkloadidentitypoolproviderrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteworkloadidentitypoolproviderrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:undelete") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -9093,14 +11954,14 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) doRequest(alt return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.operations.get" call. +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.undelete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9131,23 +11992,26 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Do(opts ...goo } return ret, nil // { - // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/operations/{operationsId}", - // "httpMethod": "GET", - // "id": "iam.projects.locations.workloadIdentityPools.operations.get", + // "description": "Undeletes a WorkloadIdentityPoolProvider, as long as it was deleted fewer than 30 days ago.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}:undelete", + // "httpMethod": "POST", + // "id": "iam.projects.locations.workloadIdentityPools.providers.undelete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "The name of the operation resource.", + // "description": "Required. The name of the provider to undelete.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/operations/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v1/{+name}", + // "path": "v1/{+name}:undelete", + // "request": { + // "$ref": "UndeleteWorkloadIdentityPoolProviderRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -9158,44 +12022,42 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsOperationsGetCall) Do(opts ...goo } -// method id "iam.projects.locations.workloadIdentityPools.providers.create": +// method id "iam.projects.locations.workloadIdentityPools.providers.keys.create": -type ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall struct { - s *Service - parent string - workloadidentitypoolprovider *WorkloadIdentityPoolProvider - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall struct { + s *Service + parent string + workloadidentitypoolproviderkey *WorkloadIdentityPoolProviderKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Creates a new WorkloadIdentityPoolProvider in a -// WorkloadIdentityPool. You cannot reuse the name of a deleted provider -// until 30 days after deletion. +// Create: Create a new WorkloadIdentityPoolProviderKey in a +// WorkloadIdentityPoolProvider. // -// - parent: The pool to create this provider in. -func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Create(parent string, workloadidentitypoolprovider *WorkloadIdentityPoolProvider) *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall { - c := &ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - parent: The parent provider resource to create the key in. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysService) Create(parent string, workloadidentitypoolproviderkey *WorkloadIdentityPoolProviderKey) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent - c.workloadidentitypoolprovider = workloadidentitypoolprovider + c.workloadidentitypoolproviderkey = workloadidentitypoolproviderkey return c } -// WorkloadIdentityPoolProviderId sets the optional parameter -// "workloadIdentityPoolProviderId": Required. The ID for the provider, -// which becomes the final component of the resource name. This value -// must be 4-32 characters, and may contain the characters [a-z0-9-]. -// The prefix `gcp-` is reserved for use by Google, and may not be -// specified. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) WorkloadIdentityPoolProviderId(workloadIdentityPoolProviderId string) *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall { - c.urlParams_.Set("workloadIdentityPoolProviderId", workloadIdentityPoolProviderId) +// WorkloadIdentityPoolProviderKeyId sets the optional parameter +// "workloadIdentityPoolProviderKeyId": Required. The ID to use for the +// key, which becomes the final component of the resource name. This +// value should be 4-32 characters, and may contain the characters +// [a-z0-9-]. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall) WorkloadIdentityPoolProviderKeyId(workloadIdentityPoolProviderKeyId string) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall { + c.urlParams_.Set("workloadIdentityPoolProviderKeyId", workloadIdentityPoolProviderKeyId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9203,21 +12065,21 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Fields(s ... // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9225,14 +12087,14 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) doRequest(al } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypoolprovider) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypoolproviderkey) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/providers") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/keys") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -9245,14 +12107,14 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) doRequest(al return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.providers.create" call. +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.keys.create" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9283,30 +12145,30 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Do(opts ...g } return ret, nil // { - // "description": "Creates a new WorkloadIdentityPoolProvider in a WorkloadIdentityPool. You cannot reuse the name of a deleted provider until 30 days after deletion.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers", + // "description": "Create a new WorkloadIdentityPoolProviderKey in a WorkloadIdentityPoolProvider.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys", // "httpMethod": "POST", - // "id": "iam.projects.locations.workloadIdentityPools.providers.create", + // "id": "iam.projects.locations.workloadIdentityPools.providers.keys.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { - // "description": "Required. The pool to create this provider in.", + // "description": "Required. The parent provider resource to create the key in.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", // "required": true, // "type": "string" // }, - // "workloadIdentityPoolProviderId": { - // "description": "Required. The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified.", + // "workloadIdentityPoolProviderKeyId": { + // "description": "Required. The ID to use for the key, which becomes the final component of the resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-].", // "location": "query", // "type": "string" // } // }, - // "path": "v1/{+parent}/providers", + // "path": "v1/{+parent}/keys", // "request": { - // "$ref": "WorkloadIdentityPoolProvider" + // "$ref": "WorkloadIdentityPoolProviderKey" // }, // "response": { // "$ref": "Operation" @@ -9318,9 +12180,9 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersCreateCall) Do(opts ...g } -// method id "iam.projects.locations.workloadIdentityPools.providers.delete": +// method id "iam.projects.locations.workloadIdentityPools.providers.keys.delete": -type ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall struct { +type ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -9328,15 +12190,12 @@ type ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall struct { header_ http.Header } -// Delete: Deletes a WorkloadIdentityPoolProvider. Deleting a provider -// does not revoke credentials that have already been issued; they -// continue to grant access. You can undelete a provider for 30 days. -// After 30 days, deletion is permanent. You cannot update deleted -// providers. However, you can view and list them. +// Delete: Deletes an WorkloadIdentityPoolProviderKey. You can undelete +// a key for 30 days. After 30 days, deletion is permanent. // -// - name: The name of the provider to delete. -func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Delete(name string) *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall { - c := &ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the encryption key to delete. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysService) Delete(name string) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -9344,7 +12203,7 @@ func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Delete(name str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9352,21 +12211,21 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Fields(s ... // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9389,14 +12248,14 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) doRequest(al return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.providers.delete" call. +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.keys.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9427,18 +12286,18 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Do(opts ...g } return ret, nil // { - // "description": "Deletes a WorkloadIdentityPoolProvider. Deleting a provider does not revoke credentials that have already been issued; they continue to grant access. You can undelete a provider for 30 days. After 30 days, deletion is permanent. You cannot update deleted providers. However, you can view and list them.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}", + // "description": "Deletes an WorkloadIdentityPoolProviderKey. You can undelete a key for 30 days. After 30 days, deletion is permanent.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys/{keysId}", // "httpMethod": "DELETE", - // "id": "iam.projects.locations.workloadIdentityPools.providers.delete", + // "id": "iam.projects.locations.workloadIdentityPools.providers.keys.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The name of the provider to delete.", + // "description": "Required. The name of the encryption key to delete.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // } @@ -9454,9 +12313,9 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersDeleteCall) Do(opts ...g } -// method id "iam.projects.locations.workloadIdentityPools.providers.get": +// method id "iam.projects.locations.workloadIdentityPools.providers.keys.get": -type ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall struct { +type ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -9465,11 +12324,11 @@ type ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall struct { header_ http.Header } -// Get: Gets an individual WorkloadIdentityPoolProvider. +// Get: Gets an individual WorkloadIdentityPoolProviderKey. // -// - name: The name of the provider to retrieve. -func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall { - c := &ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the key to retrieve. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysService) Get(name string) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -9477,7 +12336,7 @@ func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Get(name string // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9487,7 +12346,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Fields(s ...goo // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall { c.ifNoneMatch_ = entityTag return c } @@ -9495,21 +12354,21 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) IfNoneMatch(ent // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9535,14 +12394,14 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) doRequest(alt s return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.providers.get" call. -// Exactly one of *WorkloadIdentityPoolProvider or error will be +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.keys.get" call. +// Exactly one of *WorkloadIdentityPoolProviderKey or error will be // non-nil. Any non-2xx status code is an error. Response headers are in -// either *WorkloadIdentityPoolProvider.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was +// either *WorkloadIdentityPoolProviderKey.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Do(opts ...googleapi.CallOption) (*WorkloadIdentityPoolProvider, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysGetCall) Do(opts ...googleapi.CallOption) (*WorkloadIdentityPoolProviderKey, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9561,7 +12420,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Do(opts ...goog if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &WorkloadIdentityPoolProvider{ + ret := &WorkloadIdentityPoolProviderKey{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9573,25 +12432,25 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Do(opts ...goog } return ret, nil // { - // "description": "Gets an individual WorkloadIdentityPoolProvider.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}", + // "description": "Gets an individual WorkloadIdentityPoolProviderKey.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys/{keysId}", // "httpMethod": "GET", - // "id": "iam.projects.locations.workloadIdentityPools.providers.get", + // "id": "iam.projects.locations.workloadIdentityPools.providers.keys.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The name of the provider to retrieve.", + // "description": "Required. The name of the key to retrieve.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}", // "response": { - // "$ref": "WorkloadIdentityPoolProvider" + // "$ref": "WorkloadIdentityPoolProviderKey" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -9600,9 +12459,9 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersGetCall) Do(opts ...goog } -// method id "iam.projects.locations.workloadIdentityPools.providers.list": +// method id "iam.projects.locations.workloadIdentityPools.providers.keys.list": -type ProjectsLocationsWorkloadIdentityPoolsProvidersListCall struct { +type ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall struct { s *Service parent string urlParams_ gensupport.URLParams @@ -9611,37 +12470,36 @@ type ProjectsLocationsWorkloadIdentityPoolsProvidersListCall struct { header_ http.Header } -// List: Lists all non-deleted WorkloadIdentityPoolProviders in a -// WorkloadIdentityPool. If `show_deleted` is set to `true`, then -// deleted providers are also listed. +// List: Lists all non-deleted WorkloadIdentityPoolProviderKeys in a +// project. If show_deleted is set to `true`, then deleted pools are +// also listed. // -// - parent: The pool to list providers for. -func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) List(parent string) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { - c := &ProjectsLocationsWorkloadIdentityPoolsProvidersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - parent: The parent provider resource to list encryption keys for. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysService) List(parent string) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // PageSize sets the optional parameter "pageSize": The maximum number -// of providers to return. If unspecified, at most 50 providers are -// returned. The maximum value is 100; values above 100 are truncated to -// 100. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) PageSize(pageSize int64) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { +// of keys to return. If unspecified, all keys are returned. The maximum +// value is 10; values above 10 are truncated to 10. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) PageSize(pageSize int64) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A page token, -// received from a previous `ListWorkloadIdentityPoolProviders` call. +// received from a previous `ListWorkloadIdentityPoolProviderKeys` call. // Provide this to retrieve the subsequent page. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) PageToken(pageToken string) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) PageToken(pageToken string) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall { c.urlParams_.Set("pageToken", pageToken) return c } // ShowDeleted sets the optional parameter "showDeleted": Whether to -// return soft-deleted providers. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) ShowDeleted(showDeleted bool) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { +// return soft deleted resources as well. +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) ShowDeleted(showDeleted bool) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall { c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) return c } @@ -9649,7 +12507,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) ShowDeleted(sh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9659,7 +12517,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Fields(s ...go // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) IfNoneMatch(entityTag string) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall { c.ifNoneMatch_ = entityTag return c } @@ -9667,21 +12525,21 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) IfNoneMatch(en // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9694,7 +12552,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) doRequest(alt var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/providers") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/keys") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -9707,16 +12565,16 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) doRequest(alt return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.providers.list" call. -// Exactly one of *ListWorkloadIdentityPoolProvidersResponse or error +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.keys.list" call. +// Exactly one of *ListWorkloadIdentityPoolProviderKeysResponse or error // will be non-nil. Any non-2xx status code is an error. Response // headers are in either -// *ListWorkloadIdentityPoolProvidersResponse.ServerResponse.Header or -// (if a response was returned at all) in +// *ListWorkloadIdentityPoolProviderKeysResponse.ServerResponse.Header +// or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Do(opts ...googleapi.CallOption) (*ListWorkloadIdentityPoolProvidersResponse, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) Do(opts ...googleapi.CallOption) (*ListWorkloadIdentityPoolProviderKeysResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9735,7 +12593,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Do(opts ...goo if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ListWorkloadIdentityPoolProvidersResponse{ + ret := &ListWorkloadIdentityPoolProviderKeysResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9747,41 +12605,41 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Do(opts ...goo } return ret, nil // { - // "description": "Lists all non-deleted WorkloadIdentityPoolProviders in a WorkloadIdentityPool. If `show_deleted` is set to `true`, then deleted providers are also listed.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers", + // "description": "Lists all non-deleted WorkloadIdentityPoolProviderKeys in a project. If show_deleted is set to `true`, then deleted pools are also listed.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys", // "httpMethod": "GET", - // "id": "iam.projects.locations.workloadIdentityPools.providers.list", + // "id": "iam.projects.locations.workloadIdentityPools.providers.keys.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "pageSize": { - // "description": "The maximum number of providers to return. If unspecified, at most 50 providers are returned. The maximum value is 100; values above 100 are truncated to 100.", + // "description": "The maximum number of keys to return. If unspecified, all keys are returned. The maximum value is 10; values above 10 are truncated to 10.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "A page token, received from a previous `ListWorkloadIdentityPoolProviders` call. Provide this to retrieve the subsequent page.", + // "description": "A page token, received from a previous `ListWorkloadIdentityPoolProviderKeys` call. Provide this to retrieve the subsequent page.", // "location": "query", // "type": "string" // }, // "parent": { - // "description": "Required. The pool to list providers for.", + // "description": "Required. The parent provider resource to list encryption keys for.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", // "required": true, // "type": "string" // }, // "showDeleted": { - // "description": "Whether to return soft-deleted providers.", + // "description": "Whether to return soft deleted resources as well.", // "location": "query", // "type": "boolean" // } // }, - // "path": "v1/{+parent}/providers", + // "path": "v1/{+parent}/keys", // "response": { - // "$ref": "ListWorkloadIdentityPoolProvidersResponse" + // "$ref": "ListWorkloadIdentityPoolProviderKeysResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" @@ -9793,7 +12651,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Do(opts ...goo // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Pages(ctx context.Context, f func(*ListWorkloadIdentityPoolProvidersResponse) error) error { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysListCall) Pages(ctx context.Context, f func(*ListWorkloadIdentityPoolProviderKeysResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -9811,187 +12669,32 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersListCall) Pages(ctx cont } } -// method id "iam.projects.locations.workloadIdentityPools.providers.patch": - -type ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall struct { - s *Service - name string - workloadidentitypoolprovider *WorkloadIdentityPoolProvider - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates an existing WorkloadIdentityPoolProvider. -// -// - name: Output only. The resource name of the provider. -func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Patch(name string, workloadidentitypoolprovider *WorkloadIdentityPoolProvider) *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall { - c := &ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.workloadidentitypoolprovider = workloadidentitypoolprovider - return c -} - -// UpdateMask sets the optional parameter "updateMask": Required. The -// list of fields to update. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) UpdateMask(updateMask string) *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.workloadidentitypoolprovider) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "iam.projects.locations.workloadIdentityPools.providers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an existing WorkloadIdentityPoolProvider.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}", - // "httpMethod": "PATCH", - // "id": "iam.projects.locations.workloadIdentityPools.providers.patch", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Output only. The resource name of the provider.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "updateMask": { - // "description": "Required. The list of fields to update.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "WorkloadIdentityPoolProvider" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "iam.projects.locations.workloadIdentityPools.providers.undelete": +// method id "iam.projects.locations.workloadIdentityPools.providers.keys.undelete": -type ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall struct { - s *Service - name string - undeleteworkloadidentitypoolproviderrequest *UndeleteWorkloadIdentityPoolProviderRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall struct { + s *Service + name string + undeleteworkloadidentitypoolproviderkeyrequest *UndeleteWorkloadIdentityPoolProviderKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Undelete: Undeletes a WorkloadIdentityPoolProvider, as long as it was -// deleted fewer than 30 days ago. +// Undelete: Undeletes an WorkloadIdentityPoolProviderKey, as long as it +// was deleted fewer than 30 days ago. // -// - name: The name of the provider to undelete. -func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersService) Undelete(name string, undeleteworkloadidentitypoolproviderrequest *UndeleteWorkloadIdentityPoolProviderRequest) *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall { - c := &ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - name: The name of the encryption key to undelete. +func (r *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysService) Undelete(name string, undeleteworkloadidentitypoolproviderkeyrequest *UndeleteWorkloadIdentityPoolProviderKeyRequest) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall { + c := &ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.undeleteworkloadidentitypoolproviderrequest = undeleteworkloadidentitypoolproviderrequest + c.undeleteworkloadidentitypoolproviderkeyrequest = undeleteworkloadidentitypoolproviderkeyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9999,21 +12702,21 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Fields(s . // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall) Context(ctx context.Context) *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Header() http.Header { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -10021,7 +12724,7 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) doRequest( } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteworkloadidentitypoolproviderrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteworkloadidentitypoolproviderkeyrequest) if err != nil { return nil, err } @@ -10041,14 +12744,14 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) doRequest( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "iam.projects.locations.workloadIdentityPools.providers.undelete" call. +// Do executes the "iam.projects.locations.workloadIdentityPools.providers.keys.undelete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersKeysUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10079,25 +12782,25 @@ func (c *ProjectsLocationsWorkloadIdentityPoolsProvidersUndeleteCall) Do(opts .. } return ret, nil // { - // "description": "Undeletes a WorkloadIdentityPoolProvider, as long as it was deleted fewer than 30 days ago.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}:undelete", + // "description": "Undeletes an WorkloadIdentityPoolProviderKey, as long as it was deleted fewer than 30 days ago.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workloadIdentityPools/{workloadIdentityPoolsId}/providers/{providersId}/keys/{keysId}:undelete", // "httpMethod": "POST", - // "id": "iam.projects.locations.workloadIdentityPools.providers.undelete", + // "id": "iam.projects.locations.workloadIdentityPools.providers.keys.undelete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The name of the provider to undelete.", + // "description": "Required. The name of the encryption key to undelete.", // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", + // "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+/keys/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1/{+name}:undelete", // "request": { - // "$ref": "UndeleteWorkloadIdentityPoolProviderRequest" + // "$ref": "UndeleteWorkloadIdentityPoolProviderKeyRequest" // }, // "response": { // "$ref": "Operation" @@ -13109,6 +15812,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { + // "deprecated": true, // "description": "**Note:** This method is deprecated. Use the [`signBlob`](https://cloud.google.com/iam/help/rest-credentials/v1/projects.serviceAccounts/signBlob) method in the IAM Service Account Credentials API instead. If you currently use this method, see the [migration guide](https://cloud.google.com/iam/help/credentials/migrate-api) for instructions. Signs a blob using the system-managed private key for a ServiceAccount.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", // "httpMethod": "POST", @@ -13270,6 +15974,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { + // "deprecated": true, // "description": "**Note:** This method is deprecated. Use the [`signJwt`](https://cloud.google.com/iam/help/rest-credentials/v1/projects.serviceAccounts/signJwt) method in the IAM Service Account Credentials API instead. If you currently use this method, see the [migration guide](https://cloud.google.com/iam/help/credentials/migrate-api) for instructions. Signs a JSON Web Token (JWT) using the system-managed private key for a ServiceAccount.", // "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", // "httpMethod": "POST", diff --git a/terraform/providers/google/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/terraform/providers/google/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 29302a1e99..0a6304d51d 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "iamcredentials:v1" const apiName = "iamcredentials" diff --git a/terraform/providers/google/vendor/google.golang.org/api/impersonate/idtoken.go b/terraform/providers/google/vendor/google.golang.org/api/impersonate/idtoken.go index a2defff151..9000b8e0f6 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/impersonate/idtoken.go +++ b/terraform/providers/google/vendor/google.golang.org/api/impersonate/idtoken.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "time" @@ -109,7 +108,7 @@ func (i impersonatedIDTokenSource) Token() (*oauth2.Token, error) { return nil, fmt.Errorf("impersonate: unable to generate ID token: %v", err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("impersonate: unable to read body: %v", err) } diff --git a/terraform/providers/google/vendor/google.golang.org/api/impersonate/impersonate.go b/terraform/providers/google/vendor/google.golang.org/api/impersonate/impersonate.go index 52c32589b7..86d5eb82d5 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/impersonate/impersonate.go +++ b/terraform/providers/google/vendor/google.golang.org/api/impersonate/impersonate.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "time" @@ -161,7 +160,7 @@ func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("impersonate: unable to read body: %v", err) } diff --git a/terraform/providers/google/vendor/google.golang.org/api/impersonate/user.go b/terraform/providers/google/vendor/google.golang.org/api/impersonate/user.go index 059deab711..c234abb899 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/impersonate/user.go +++ b/terraform/providers/google/vendor/google.golang.org/api/impersonate/user.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -123,7 +122,7 @@ func (u userTokenSource) signJWT() (string, error) { if err != nil { return "", fmt.Errorf("impersonate: unable to sign JWT: %v", err) } - body, err := ioutil.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) if err != nil { return "", fmt.Errorf("impersonate: unable to read body: %v", err) } @@ -148,7 +147,7 @@ func (u userTokenSource) exchangeToken(signedJWT string) (*oauth2.Token, error) if err != nil { return nil, fmt.Errorf("impersonate: unable to exchange token: %v", err) } - body, err := ioutil.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("impersonate: unable to read body: %v", err) } diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/cba.go b/terraform/providers/google/vendor/google.golang.org/api/internal/cba.go new file mode 100644 index 0000000000..cecbb9ba11 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/cba.go @@ -0,0 +1,282 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cba.go (certificate-based access) contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 and Default Credentials +// for Google Cloud Virtual Environments according to https://google.aip.dev/auth/4115. +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// If running within Google's cloud environment, and client certificate is not specified +// and not available through DCA, we will try mTLS with credentials held by +// the Secure Session Agent, which is part of Google's cloud infrastructure. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. + +// Package internal supports the options and transport packages. +package internal + +import ( + "context" + "crypto/tls" + "net" + "net/url" + "os" + "strings" + + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/api/internal/cert" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" +) + +// getClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + return clientCertSource, endpoint, nil +} + +type transportConfig struct { + clientCertSource cert.Source // The client certificate source. + endpoint string // The corresponding endpoint to use based on client certificate source. + s2aAddress string // The S2A address if it can be used, otherwise an empty string. + s2aMTLSEndpoint string // The MTLS endpoint to use with S2A. +} + +func getTransportConfig(settings *DialSettings) (*transportConfig, error) { + clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) + if err != nil { + return &transportConfig{ + clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", + }, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: "", + s2aMTLSEndpoint: "", + } + + // Check the env to determine whether to use S2A. + if !isGoogleS2AEnabled() { + return &defaultTransportConfig, nil + } + + // If client cert is found, use that over S2A. + // If MTLS is not enabled for the endpoint, skip S2A. + if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { + return &defaultTransportConfig, nil + } + s2aMTLSEndpoint := settings.DefaultMTLSEndpoint + // If there is endpoint override, honor it. + if settings.Endpoint != "" { + s2aMTLSEndpoint = endpoint + } + s2aAddress := GetS2AAddress() + if s2aAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + s2aMTLSEndpoint: s2aMTLSEndpoint, + }, nil +} + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + return settings.DefaultMTLSEndpoint, nil + } + return settings.DefaultEndpoint, nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if settings.DefaultEndpoint == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportConfigAndEndpoint returns an instance of credentials.TransportCredentials, and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportConfigAndEndpoint(settings *DialSettings) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + if config.s2aAddress == "" { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfigAndEndpoint returns a client certificate source, a function for dialing MTLS with S2A, +// and the endpoint to use for HTTP client. +func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, func(context.Context, string, string) (net.Conn, error), string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, nil, "", err + } + + if config.s2aAddress == "" { + return config.clientCertSource, nil, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil +} + +// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. +var mtlsEndpointEnabledForS2A = func() bool { + // TODO(xmenxk): determine this via discovery config. + return true +} diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go b/terraform/providers/google/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go index 5913cab801..afd79ffe2b 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go @@ -18,7 +18,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "os/user" @@ -59,7 +58,7 @@ func NewSecureConnectSource(configFilePath string) (Source, error) { configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) } - file, err := ioutil.ReadFile(configFilePath) + file, err := os.ReadFile(configFilePath) if err != nil { if errors.Is(err, os.ErrNotExist) { // Config file missing means Secure Connect is not supported. diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/creds.go b/terraform/providers/google/vendor/google.golang.org/api/internal/creds.go index 63c6609220..92b3acf6ed 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/creds.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/creds.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "net" "net/http" "os" @@ -48,7 +47,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { - data, err := ioutil.ReadFile(ds.CredentialsFile) + data, err := os.ReadFile(ds.CredentialsFile) if err != nil { return nil, fmt.Errorf("cannot read credentials file: %v", err) } @@ -92,7 +91,7 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g // Determine configurations for the OAuth2 transport, which is separate from the API transport. // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, oauth2Endpoint, err := GetClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/dca.go b/terraform/providers/google/vendor/google.golang.org/api/internal/dca.go deleted file mode 100644 index 204a3fd2f3..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/dca.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package dca contains utils for implementing Device Certificate -// Authentication according to https://google.aip.dev/auth/4114 -// -// The overall logic for DCA is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. -// -// Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. -// -// We would like to avoid introducing client-side logic that parses whether the -// endpoint override is an mTLS url, since the url pattern may change at anytime. -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. - -// Package internal supports the options and transport packages. -package internal - -import ( - "net/url" - "os" - "strings" - - "google.golang.org/api/internal/cert" -) - -const ( - mTLSModeAlways = "always" - mTLSModeNever = "never" - mTLSModeAuto = "auto" -) - -// GetClientCertificateSourceAndEndpoint is a convenience function that invokes -// getClientCertificateSource and getEndpoint sequentially and returns the client -// cert source and endpoint as a tuple. -func GetClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { - clientCertSource, err := getClientCertificateSource(settings) - if err != nil { - return nil, "", err - } - endpoint, err := getEndpoint(settings, clientCertSource) - if err != nil { - return nil, "", err - } - return clientCertSource, endpoint, nil -} - -// getClientCertificateSource returns a default client certificate source, if -// not provided by the user. -// -// A nil default source can be returned if the source does not exist. Any exceptions -// encountered while initializing the default source will be reported as client -// error (ex. corrupt metadata file). -// -// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE -// must be set to "true" to allow certificate to be used (including user provided -// certificates). For details, see AIP-4114. -func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { - if !isClientCertificateEnabled() { - return nil, nil - } else if settings.ClientCertSource != nil { - return settings.ClientCertSource, nil - } else { - return cert.DefaultSource() - } -} - -func isClientCertificateEnabled() bool { - useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") - // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. - return strings.ToLower(useClientCert) == "true" -} - -// getEndpoint returns the endpoint for the service, taking into account the -// user-provided endpoint override "settings.Endpoint". -// -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. -// -// You can override the default endpoint choice (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. -// -// If the endpoint override is an address (host:port) rather than full base -// URL (ex. https://...), then the user-provided address will be merged into -// the default endpoint. For example, WithEndpoint("myhost:8000") and -// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" -func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { - if settings.Endpoint == "" { - mtlsMode := getMTLSMode() - if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - return settings.DefaultMTLSEndpoint, nil - } - return settings.DefaultEndpoint, nil - } - if strings.Contains(settings.Endpoint, "://") { - // User passed in a full URL path, use it verbatim. - return settings.Endpoint, nil - } - if settings.DefaultEndpoint == "" { - // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. - // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. - return settings.Endpoint, nil - } - - // Assume user-provided endpoint is host[:port], merge it with the default endpoint. - return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) -} - -func getMTLSMode() string { - mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") - if mode == "" { - mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. - } - if mode == "" { - return mTLSModeAuto - } - return strings.ToLower(mode) -} - -func mergeEndpoints(baseURL, newHost string) (string, error) { - u, err := url.Parse(fixScheme(baseURL)) - if err != nil { - return "", err - } - return strings.Replace(baseURL, u.Host, newHost, 1), nil -} - -func fixScheme(baseURL string) string { - if !strings.Contains(baseURL, "://") { - return "https://" + baseURL - } - return baseURL -} diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/media.go b/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/media.go index 8356e7f27b..c048a57084 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/media.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -222,8 +221,8 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { - rb := ioutil.NopCloser(fb()) - rm := ioutil.NopCloser(fm()) + rb := io.NopCloser(fb()) + rm := io.NopCloser(fm()) var mimeBoundary string if _, params, err := mime.ParseMediaType(ctype); err == nil { mimeBoundary = params["boundary"] @@ -243,7 +242,7 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB fb := readerFunc(body) if fb != nil { getBody = func() (io.ReadCloser, error) { - rb := ioutil.NopCloser(fb()) + rb := io.NopCloser(fb()) toCleanup = append(toCleanup, rb) return rb, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/resumable.go b/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/resumable.go index f168ea6d2b..08e7aacefb 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -43,8 +43,8 @@ type ResumableUpload struct { // retries should happen. ChunkRetryDeadline time.Duration - // Track current request invocation ID and attempt count for retry metric - // headers. + // Track current request invocation ID and attempt count for retry metrics + // and idempotency headers. invocationID string attempts int } @@ -81,10 +81,15 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + // Set idempotency token header which is used by GCS uploads. + req.Header.Set("X-Goog-Gcs-Idempotency-Token", rx.invocationID) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/send.go b/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/send.go index 85c7bcbfdf..693a1b1aba 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/gensupport/send.go @@ -138,9 +138,14 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r } return resp, ctx.Err() } + + // Set retry metrics and idempotency headers for GCS. + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") req.Header.Set("X-Goog-Api-Client", xGoogHeader) + req.Header.Set("X-Goog-Gcs-Idempotency-Token", invocationID) resp, err = client.Do(req.WithContext(ctx)) diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/terraform/providers/google/vendor/google.golang.org/api/internal/impersonate/impersonate.go index b465bbcd12..4b2c775f21 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/impersonate/impersonate.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/impersonate/impersonate.go @@ -11,7 +11,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "time" @@ -105,7 +104,7 @@ func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("impersonate: unable to read body: %v", err) } diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/s2a.go b/terraform/providers/google/vendor/google.golang.org/api/internal/s2a.go new file mode 100644 index 0000000000..c5b421f554 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/s2a.go @@ -0,0 +1,136 @@ +// Copyright 2023 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "log" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const configEndpointSuffix = "googleAutoMtlsConfiguration" + +// The period an MTLS config can be reused before needing refresh. +var configExpiry = time.Hour + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +func GetS2AAddress() string { + c, err := getMetadataMTLSAutoConfig().Config() + if err != nil { + return "" + } + if !c.Valid() { + return "" + } + return c.S2A.PlaintextAddress +} + +type mtlsConfigSource interface { + Config() (*mtlsConfig, error) +} + +// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. +var ( + mdsMTLSAutoConfigSource mtlsConfigSource + once sync.Once +) + +// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. +func getMetadataMTLSAutoConfig() mtlsConfigSource { + once.Do(func() { + mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ + src: &metadataMTLSAutoConfig{}, + } + }) + return mdsMTLSAutoConfigSource +} + +// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. +// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. +type reuseMTLSConfigSource struct { + src mtlsConfigSource // src.Config() is called when config is expired + mu sync.Mutex // mutex guards config + config *mtlsConfig // cached config +} + +func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + if cs.config.Valid() { + return cs.config, nil + } + c, err := cs.src.Config() + if err != nil { + return nil, err + } + cs.config = c + return c, nil +} + +// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource +// It has the logic to query MDS and return an mtlsConfig +type metadataMTLSAutoConfig struct{} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.Get(configEndpointSuffix) +} + +func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + log.Printf("querying MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + + if config.S2A == nil { + log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) + return defaultMTLSConfig(), nil + } + + // set new expiry + config.Expiry = time.Now().Add(configExpiry) + return &config, nil +} + +func defaultMTLSConfig() *mtlsConfig { + return &mtlsConfig{ + S2A: &s2aAddresses{ + PlaintextAddress: "", + MTLSAddress: "", + }, + Expiry: time.Now().Add(configExpiry), + } +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` + Expiry time.Time +} + +func (c *mtlsConfig) Valid() bool { + return c != nil && c.S2A != nil && !c.expired() +} +func (c *mtlsConfig) expired() bool { + return c.Expiry.Before(time.Now()) +} diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/settings.go b/terraform/providers/google/vendor/google.golang.org/api/internal/settings.go index 76efdb2277..3a3874df11 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/settings.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/settings.go @@ -46,6 +46,7 @@ type DialSettings struct { SkipValidation bool ImpersonationConfig *impersonate.Config EnableDirectPath bool + EnableDirectPathXds bool AllowNonDefaultServiceAccount bool // Google API system parameters. For more information please read: diff --git a/terraform/providers/google/vendor/google.golang.org/api/internal/version.go b/terraform/providers/google/vendor/google.golang.org/api/internal/version.go index 7a4f6d8982..053e4ee2b2 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/internal/version.go +++ b/terraform/providers/google/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.114.0" +const Version = "0.130.0" diff --git a/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-api.json b/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-api.json index c03c4adcf0..7baf442ceb 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-api.json @@ -681,7 +681,7 @@ "links": { "methods": { "create": { - "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/links", "httpMethod": "POST", "id": "logging.billingAccounts.locations.buckets.links.create", @@ -998,14 +998,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -1184,14 +1184,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^billingAccounts/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -1221,6 +1221,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -1348,6 +1353,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -1388,6 +1398,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -2268,7 +2283,7 @@ "links": { "methods": { "create": { - "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/links", "httpMethod": "POST", "id": "logging.folders.locations.buckets.links.create", @@ -2585,14 +2600,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -2771,14 +2786,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^folders/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -2808,6 +2823,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -2935,6 +2955,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -2975,6 +3000,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -3353,7 +3383,7 @@ "links": { "methods": { "create": { - "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}/links", "httpMethod": "POST", "id": "logging.locations.buckets.links.create", @@ -3805,14 +3835,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -4499,7 +4529,7 @@ "links": { "methods": { "create": { - "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/links", "httpMethod": "POST", "id": "logging.organizations.locations.buckets.links.create", @@ -4816,14 +4846,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -5002,14 +5032,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^organizations/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -5039,6 +5069,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -5166,6 +5201,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -5206,6 +5246,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -5805,7 +5850,7 @@ "links": { "methods": { "create": { - "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/links", "httpMethod": "POST", "id": "logging.projects.locations.buckets.links.create", @@ -6122,14 +6167,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -6308,14 +6353,14 @@ "type": "string" }, "parent": { - "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", "location": "path", "pattern": "^projects/[^/]+$", "required": true, "type": "string" }, "resourceNames": { - "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", "location": "query", "repeated": true, "type": "string" @@ -6503,6 +6548,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -6630,6 +6680,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -6670,6 +6725,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -6716,6 +6776,11 @@ "parent" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + "location": "query", + "type": "string" + }, "parent": { "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", "location": "path", @@ -6843,6 +6908,11 @@ "sinkName" ], "parameters": { + "customWriterIdentity": { + "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + "location": "query", + "type": "string" + }, "sinkName": { "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", "location": "path", @@ -7007,7 +7077,7 @@ } } }, - "revision": "20230303", + "revision": "20230623", "rootUrl": "https://logging.googleapis.com/", "schemas": { "BigQueryDataset": { @@ -7293,7 +7363,7 @@ "type": "object" }, "Exponential": { - "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", + "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i).Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", "id": "Exponential", "properties": { "growthFactor": { @@ -7345,7 +7415,7 @@ "type": "string" }, "referer": { - "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).", + "description": "The referer URL of the request, as defined in HTTP/1.1 Header Field Definitions (https://datatracker.ietf.org/doc/html/rfc2616#section-14.36).", "type": "string" }, "remoteIp": { @@ -7447,7 +7517,7 @@ "type": "object" }, "Linear": { - "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", + "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i).Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", "id": "Linear", "properties": { "numFiniteBuckets": { @@ -7638,7 +7708,7 @@ "id": "ListLogEntriesRequest", "properties": { "filter": { - "description": "Optional. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of a filter is 20,000 characters.", + "description": "Optional. A filter that chooses which log entries to return. For more information, see Logging query language (https://cloud.google.com/logging/docs/view/logging-query-language).Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in resource_names. Referencing a parent resource that is not listed in resource_names will cause the filter to return no results. The maximum length of a filter is 20,000 characters.", "type": "string" }, "orderBy": { @@ -7798,7 +7868,7 @@ "type": "object" }, "Location": { - "description": "A resource that represents Google Cloud Platform location.", + "description": "A resource that represents a Google Cloud location.", "id": "Location", "properties": { "displayName": { @@ -8251,7 +8321,7 @@ "type": "string" }, "destination": { - "description": "Required. The export destination: \"storage.googleapis.com/[GCS_BUCKET]\" \"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\" \"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\" The sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs with Sinks (https://cloud.google.com/logging/docs/api/tasks/exporting-logs).", + "description": "Required. The export destination: \"storage.googleapis.com/[GCS_BUCKET]\" \"bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]\" \"pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]\" \"logging.googleapis.com/projects/[PROJECT_ID]\" The sink's writer_identity, set when the sink is created, must have permission to write to the destination or else the log entries are not exported. For more information, see Exporting Logs with Sinks (https://cloud.google.com/logging/docs/api/tasks/exporting-logs).", "type": "string" }, "disabled": { @@ -8809,6 +8879,11 @@ "readOnly": true, "type": "string" }, + "loggingServiceAccountId": { + "description": "Output only. The service account for the given container. Sinks use this service account as their writer_identity if no custom service account is provided.", + "readOnly": true, + "type": "string" + }, "name": { "description": "Output only. The resource name of the settings.", "readOnly": true, diff --git a/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-gen.go b/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-gen.go index 760f3a1d25..39799cc116 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/logging/v2/logging-gen.go @@ -77,6 +77,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "logging:v2" const apiName = "logging" @@ -1349,12 +1350,29 @@ func (s *Explicit) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +func (s *Explicit) UnmarshalJSON(data []byte) error { + type NoMethod Explicit + var s1 struct { + Bounds []gensupport.JSONFloat64 `json:"bounds"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Bounds = make([]float64, len(s1.Bounds)) + for i := range s1.Bounds { + s.Bounds[i] = float64(s1.Bounds[i]) + } + return nil +} + // Exponential: Specifies an exponential sequence of buckets that have a // width that is proportional to the value of the lower bound. Each // bucket represents a constant relative uncertainty on a specific value // in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket // i has the following boundaries:Upper bound (0 <= i < N-1): scale * -// (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor +// (growth_factor ^ i).Lower bound (1 <= i < N): scale * (growth_factor // ^ (i - 1)). type Exponential struct { // GrowthFactor: Must be greater than 1. @@ -1435,7 +1453,7 @@ type HttpRequest struct { // Referer: The referer URL of the request, as defined in HTTP/1.1 // Header Field Definitions - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). + // (https://datatracker.ietf.org/doc/html/rfc2616#section-14.36). Referer string `json:"referer,omitempty"` // RemoteIp: The IP address (IPv4 or IPv6) of the client that issued the @@ -1587,7 +1605,7 @@ func (s *LabelDescriptor) MarshalJSON() ([]byte, error) { // constant absolute uncertainty on the specific value in the // bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has // the following boundaries:Upper bound (0 <= i < N-1): offset + (width -// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). +// * i).Lower bound (1 <= i < N): offset + (width * (i - 1)). type Linear struct { // NumFiniteBuckets: Must be greater than 0. NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` @@ -1918,11 +1936,14 @@ func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { // ListLogEntriesRequest: The parameters to ListLogEntries. type ListLogEntriesRequest struct { - // Filter: Optional. Only log entries that match the filter are - // returned. An empty filter matches all log entries in the resources - // listed in resource_names. Referencing a parent resource that is not - // listed in resource_names will cause the filter to return no results. - // The maximum length of a filter is 20,000 characters. + // Filter: Optional. A filter that chooses which log entries to return. + // For more information, see Logging query language + // (https://cloud.google.com/logging/docs/view/logging-query-language).Only + // log entries that match the filter are returned. An empty filter + // matches all log entries in the resources listed in resource_names. + // Referencing a parent resource that is not listed in resource_names + // will cause the filter to return no results. The maximum length of a + // filter is 20,000 characters. Filter string `json:"filter,omitempty"` // OrderBy: Optional. How the results should be sorted. Presently, the @@ -2268,7 +2289,7 @@ func (s *ListViewsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Location: A resource that represents Google Cloud Platform location. +// Location: A resource that represents a Google Cloud location. type Location struct { // DisplayName: The friendly name for this location, typically a nearby // city name. For example, "Tokyo". @@ -2999,10 +3020,11 @@ type LogSink struct { // Destination: Required. The export destination: // "storage.googleapis.com/[GCS_BUCKET]" // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" - // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" The - // sink's writer_identity, set when the sink is created, must have - // permission to write to the destination or else the log entries are - // not exported. For more information, see Exporting Logs with Sinks + // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" + // "logging.googleapis.com/projects/[PROJECT_ID]" The sink's + // writer_identity, set when the sink is created, must have permission + // to write to the destination or else the log entries are not exported. + // For more information, see Exporting Logs with Sinks // (https://cloud.google.com/logging/docs/api/tasks/exporting-logs). Destination string `json:"destination,omitempty"` @@ -3918,6 +3940,11 @@ type Settings struct { // for more information. KmsServiceAccountId string `json:"kmsServiceAccountId,omitempty"` + // LoggingServiceAccountId: Output only. The service account for the + // given container. Sinks use this service account as their + // writer_identity if no custom service account is provided. + LoggingServiceAccountId string `json:"loggingServiceAccountId,omitempty"` + // Name: Output only. The resource name of the settings. Name string `json:"name,omitempty"` @@ -7192,9 +7219,9 @@ type BillingAccountsLocationsBucketsLinksCreateCall struct { header_ http.Header } -// Create: Asynchronously creates linked dataset in BigQuery which makes -// it possible to use BugQuery to read the logs stored in the bucket. A -// bucket may currently only contain one link. +// Create: Asynchronously creates a linked dataset in BigQuery which +// makes it possible to use BigQuery to read the logs stored in the log +// bucket. A log bucket may currently only contain one link. // // - parent: The full resource name of the bucket to create a link for. // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" @@ -7310,7 +7337,7 @@ func (c *BillingAccountsLocationsBucketsLinksCreateCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + // "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", // "flatPath": "v2/billingAccounts/{billingAccountsId}/locations/{locationsId}/buckets/{bucketsId}/links", // "httpMethod": "POST", // "id": "logging.billingAccounts.locations.buckets.links.create", @@ -8686,7 +8713,7 @@ type BillingAccountsLocationsBucketsViewsLogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *BillingAccountsLocationsBucketsViewsLogsService) List(parent string) *BillingAccountsLocationsBucketsViewsLogsListCall { @@ -8714,8 +8741,8 @@ func (c *BillingAccountsLocationsBucketsViewsLogsListCall) PageToken(pageToken s return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -8725,7 +8752,8 @@ func (c *BillingAccountsLocationsBucketsViewsLogsListCall) PageToken(pageToken s // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *BillingAccountsLocationsBucketsViewsLogsListCall) ResourceNames(resourceNames ...string) *BillingAccountsLocationsBucketsViewsLogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -8850,14 +8878,14 @@ func (c *BillingAccountsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi. // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^billingAccounts/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -9568,7 +9596,7 @@ type BillingAccountsLogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *BillingAccountsLogsService) List(parent string) *BillingAccountsLogsListCall { @@ -9596,8 +9624,8 @@ func (c *BillingAccountsLogsListCall) PageToken(pageToken string) *BillingAccoun return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -9607,7 +9635,8 @@ func (c *BillingAccountsLogsListCall) PageToken(pageToken string) *BillingAccoun // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *BillingAccountsLogsListCall) ResourceNames(resourceNames ...string) *BillingAccountsLogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -9732,14 +9761,14 @@ func (c *BillingAccountsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLog // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^billingAccounts/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -9808,6 +9837,17 @@ func (r *BillingAccountsSinksService) Create(parent string, logsink *LogSink) *B return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. The format must be +// serviceAccount:some@email. This field can only be specified if you +// are routing logs to a destination outside this sink's project. If not +// specified, a Logging service account will automatically be generated. +func (c *BillingAccountsSinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *BillingAccountsSinksCreateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set @@ -9924,6 +9964,11 @@ func (c *BillingAccountsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogS // "parent" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "parent": { // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", // "location": "path", @@ -10478,6 +10523,17 @@ func (r *BillingAccountsSinksService) Patch(sinkNameid string, logsink *LogSink) return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *BillingAccountsSinksPatchCall) CustomWriterIdentity(customWriterIdentity string) *BillingAccountsSinksPatchCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -10607,6 +10663,11 @@ func (c *BillingAccountsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSi // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", @@ -10671,6 +10732,17 @@ func (r *BillingAccountsSinksService) Update(sinkNameid string, logsink *LogSink return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *BillingAccountsSinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *BillingAccountsSinksUpdateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -10800,6 +10872,11 @@ func (c *BillingAccountsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogS // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", @@ -15195,9 +15272,9 @@ type FoldersLocationsBucketsLinksCreateCall struct { header_ http.Header } -// Create: Asynchronously creates linked dataset in BigQuery which makes -// it possible to use BugQuery to read the logs stored in the bucket. A -// bucket may currently only contain one link. +// Create: Asynchronously creates a linked dataset in BigQuery which +// makes it possible to use BigQuery to read the logs stored in the log +// bucket. A log bucket may currently only contain one link. // // - parent: The full resource name of the bucket to create a link for. // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" @@ -15313,7 +15390,7 @@ func (c *FoldersLocationsBucketsLinksCreateCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + // "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", // "flatPath": "v2/folders/{foldersId}/locations/{locationsId}/buckets/{bucketsId}/links", // "httpMethod": "POST", // "id": "logging.folders.locations.buckets.links.create", @@ -16689,7 +16766,7 @@ type FoldersLocationsBucketsViewsLogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *FoldersLocationsBucketsViewsLogsService) List(parent string) *FoldersLocationsBucketsViewsLogsListCall { @@ -16717,8 +16794,8 @@ func (c *FoldersLocationsBucketsViewsLogsListCall) PageToken(pageToken string) * return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -16728,7 +16805,8 @@ func (c *FoldersLocationsBucketsViewsLogsListCall) PageToken(pageToken string) * // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *FoldersLocationsBucketsViewsLogsListCall) ResourceNames(resourceNames ...string) *FoldersLocationsBucketsViewsLogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -16853,14 +16931,14 @@ func (c *FoldersLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.CallOpti // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^folders/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -17571,7 +17649,7 @@ type FoldersLogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *FoldersLogsService) List(parent string) *FoldersLogsListCall { @@ -17599,8 +17677,8 @@ func (c *FoldersLogsListCall) PageToken(pageToken string) *FoldersLogsListCall { return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -17610,7 +17688,8 @@ func (c *FoldersLogsListCall) PageToken(pageToken string) *FoldersLogsListCall { // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *FoldersLogsListCall) ResourceNames(resourceNames ...string) *FoldersLogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -17735,14 +17814,14 @@ func (c *FoldersLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespons // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^folders/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -17811,6 +17890,17 @@ func (r *FoldersSinksService) Create(parent string, logsink *LogSink) *FoldersSi return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. The format must be +// serviceAccount:some@email. This field can only be specified if you +// are routing logs to a destination outside this sink's project. If not +// specified, a Logging service account will automatically be generated. +func (c *FoldersSinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *FoldersSinksCreateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set @@ -17927,6 +18017,11 @@ func (c *FoldersSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, err // "parent" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "parent": { // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", // "location": "path", @@ -18481,6 +18576,17 @@ func (r *FoldersSinksService) Patch(sinkNameid string, logsink *LogSink) *Folder return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *FoldersSinksPatchCall) CustomWriterIdentity(customWriterIdentity string) *FoldersSinksPatchCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -18610,6 +18716,11 @@ func (c *FoldersSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, erro // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", @@ -18674,6 +18785,17 @@ func (r *FoldersSinksService) Update(sinkNameid string, logsink *LogSink) *Folde return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *FoldersSinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *FoldersSinksUpdateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -18803,6 +18925,11 @@ func (c *FoldersSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, err // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", @@ -20532,9 +20659,9 @@ type LocationsBucketsLinksCreateCall struct { header_ http.Header } -// Create: Asynchronously creates linked dataset in BigQuery which makes -// it possible to use BugQuery to read the logs stored in the bucket. A -// bucket may currently only contain one link. +// Create: Asynchronously creates a linked dataset in BigQuery which +// makes it possible to use BigQuery to read the logs stored in the log +// bucket. A log bucket may currently only contain one link. // // - parent: The full resource name of the bucket to create a link for. // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" @@ -20650,7 +20777,7 @@ func (c *LocationsBucketsLinksCreateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + // "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", // "flatPath": "v2/{v2Id}/{v2Id1}/locations/{locationsId}/buckets/{bucketsId}/links", // "httpMethod": "POST", // "id": "logging.locations.buckets.links.create", @@ -22682,7 +22809,7 @@ type LogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *LogsService) List(parent string) *LogsListCall { @@ -22710,8 +22837,8 @@ func (c *LogsListCall) PageToken(pageToken string) *LogsListCall { return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -22721,7 +22848,8 @@ func (c *LogsListCall) PageToken(pageToken string) *LogsListCall { // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *LogsListCall) ResourceNames(resourceNames ...string) *LogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -22846,14 +22974,14 @@ func (c *LogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsResponse, erro // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -26269,9 +26397,9 @@ type OrganizationsLocationsBucketsLinksCreateCall struct { header_ http.Header } -// Create: Asynchronously creates linked dataset in BigQuery which makes -// it possible to use BugQuery to read the logs stored in the bucket. A -// bucket may currently only contain one link. +// Create: Asynchronously creates a linked dataset in BigQuery which +// makes it possible to use BigQuery to read the logs stored in the log +// bucket. A log bucket may currently only contain one link. // // - parent: The full resource name of the bucket to create a link for. // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" @@ -26387,7 +26515,7 @@ func (c *OrganizationsLocationsBucketsLinksCreateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + // "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", // "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/buckets/{bucketsId}/links", // "httpMethod": "POST", // "id": "logging.organizations.locations.buckets.links.create", @@ -27763,7 +27891,7 @@ type OrganizationsLocationsBucketsViewsLogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *OrganizationsLocationsBucketsViewsLogsService) List(parent string) *OrganizationsLocationsBucketsViewsLogsListCall { @@ -27791,8 +27919,8 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) PageToken(pageToken str return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -27802,7 +27930,8 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) PageToken(pageToken str // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *OrganizationsLocationsBucketsViewsLogsListCall) ResourceNames(resourceNames ...string) *OrganizationsLocationsBucketsViewsLogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -27927,14 +28056,14 @@ func (c *OrganizationsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.Ca // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^organizations/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -28645,7 +28774,7 @@ type OrganizationsLogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *OrganizationsLogsService) List(parent string) *OrganizationsLogsListCall { @@ -28673,8 +28802,8 @@ func (c *OrganizationsLogsListCall) PageToken(pageToken string) *OrganizationsLo return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -28684,7 +28813,8 @@ func (c *OrganizationsLogsListCall) PageToken(pageToken string) *OrganizationsLo // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *OrganizationsLogsListCall) ResourceNames(resourceNames ...string) *OrganizationsLogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -28809,14 +28939,14 @@ func (c *OrganizationsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsR // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -28885,6 +29015,17 @@ func (r *OrganizationsSinksService) Create(parent string, logsink *LogSink) *Org return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. The format must be +// serviceAccount:some@email. This field can only be specified if you +// are routing logs to a destination outside this sink's project. If not +// specified, a Logging service account will automatically be generated. +func (c *OrganizationsSinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *OrganizationsSinksCreateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set @@ -29001,6 +29142,11 @@ func (c *OrganizationsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSin // "parent" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "parent": { // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", // "location": "path", @@ -29555,6 +29701,17 @@ func (r *OrganizationsSinksService) Patch(sinkNameid string, logsink *LogSink) * return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *OrganizationsSinksPatchCall) CustomWriterIdentity(customWriterIdentity string) *OrganizationsSinksPatchCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -29684,6 +29841,11 @@ func (c *OrganizationsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", @@ -29748,6 +29910,17 @@ func (r *OrganizationsSinksService) Update(sinkNameid string, logsink *LogSink) return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *OrganizationsSinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *OrganizationsSinksUpdateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -29877,6 +30050,11 @@ func (c *OrganizationsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSin // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", @@ -32747,9 +32925,9 @@ type ProjectsLocationsBucketsLinksCreateCall struct { header_ http.Header } -// Create: Asynchronously creates linked dataset in BigQuery which makes -// it possible to use BugQuery to read the logs stored in the bucket. A -// bucket may currently only contain one link. +// Create: Asynchronously creates a linked dataset in BigQuery which +// makes it possible to use BigQuery to read the logs stored in the log +// bucket. A log bucket may currently only contain one link. // // - parent: The full resource name of the bucket to create a link for. // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" @@ -32865,7 +33043,7 @@ func (c *ProjectsLocationsBucketsLinksCreateCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Asynchronously creates linked dataset in BigQuery which makes it possible to use BugQuery to read the logs stored in the bucket. A bucket may currently only contain one link.", + // "description": "Asynchronously creates a linked dataset in BigQuery which makes it possible to use BigQuery to read the logs stored in the log bucket. A log bucket may currently only contain one link.", // "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/links", // "httpMethod": "POST", // "id": "logging.projects.locations.buckets.links.create", @@ -34241,7 +34419,7 @@ type ProjectsLocationsBucketsViewsLogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *ProjectsLocationsBucketsViewsLogsService) List(parent string) *ProjectsLocationsBucketsViewsLogsListCall { @@ -34269,8 +34447,8 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) PageToken(pageToken string) return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -34280,7 +34458,8 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) PageToken(pageToken string) // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *ProjectsLocationsBucketsViewsLogsListCall) ResourceNames(resourceNames ...string) *ProjectsLocationsBucketsViewsLogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -34405,14 +34584,14 @@ func (c *ProjectsLocationsBucketsViewsLogsListCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/views/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -35123,7 +35302,7 @@ type ProjectsLogsListCall struct { // List: Lists the logs in projects, organizations, folders, or billing // accounts. Only logs that have entries are listed. // -// - parent: The resource name that owns the logs: projects/[PROJECT_ID] +// - parent: The resource name to list logs for: projects/[PROJECT_ID] // organizations/[ORGANIZATION_ID] // billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]. func (r *ProjectsLogsService) List(parent string) *ProjectsLogsListCall { @@ -35151,8 +35330,8 @@ func (c *ProjectsLogsListCall) PageToken(pageToken string) *ProjectsLogsListCall return c } -// ResourceNames sets the optional parameter "resourceNames": The -// resource name that owns the logs: +// ResourceNames sets the optional parameter "resourceNames": List of +// resource names to list logs for: // projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/view // s/[VIEW_ID] // organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKE @@ -35162,7 +35341,8 @@ func (c *ProjectsLogsListCall) PageToken(pageToken string) *ProjectsLogsListCall // folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/ // [VIEW_ID]To support legacy queries, it could also be: // projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] -// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID] +// billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource +// name in the parent field is added to this list. func (c *ProjectsLogsListCall) ResourceNames(resourceNames ...string) *ProjectsLogsListCall { c.urlParams_.SetMulti("resourceNames", append([]string{}, resourceNames...)) return c @@ -35287,14 +35467,14 @@ func (c *ProjectsLogsListCall) Do(opts ...googleapi.CallOption) (*ListLogsRespon // "type": "string" // }, // "parent": { - // "description": "Required. The resource name that owns the logs: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Required. The resource name to list logs for: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, // "resourceNames": { - // "description": "Optional. The resource name that owns the logs: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]", + // "description": "Optional. List of resource names to list logs for: projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]To support legacy queries, it could also be: projects/[PROJECT_ID] organizations/[ORGANIZATION_ID] billingAccounts/[BILLING_ACCOUNT_ID] folders/[FOLDER_ID]The resource name in the parent field is added to this list.", // "location": "query", // "repeated": true, // "type": "string" @@ -36143,6 +36323,17 @@ func (r *ProjectsSinksService) Create(parent string, logsink *LogSink) *Projects return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. The format must be +// serviceAccount:some@email. This field can only be specified if you +// are routing logs to a destination outside this sink's project. If not +// specified, a Logging service account will automatically be generated. +func (c *ProjectsSinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *ProjectsSinksCreateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set @@ -36259,6 +36450,11 @@ func (c *ProjectsSinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, er // "parent" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "parent": { // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", // "location": "path", @@ -36813,6 +37009,17 @@ func (r *ProjectsSinksService) Patch(sinkNameid string, logsink *LogSink) *Proje return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *ProjectsSinksPatchCall) CustomWriterIdentity(customWriterIdentity string) *ProjectsSinksPatchCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -36942,6 +37149,11 @@ func (c *ProjectsSinksPatchCall) Do(opts ...googleapi.CallOption) (*LogSink, err // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", @@ -37006,6 +37218,17 @@ func (r *ProjectsSinksService) Update(sinkNameid string, logsink *LogSink) *Proj return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *ProjectsSinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *ProjectsSinksUpdateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -37135,6 +37358,11 @@ func (c *ProjectsSinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, er // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", @@ -37197,6 +37425,17 @@ func (r *SinksService) Create(parent string, logsink *LogSink) *SinksCreateCall return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. The format must be +// serviceAccount:some@email. This field can only be specified if you +// are routing logs to a destination outside this sink's project. If not +// specified, a Logging service account will automatically be generated. +func (c *SinksCreateCall) CustomWriterIdentity(customWriterIdentity string) *SinksCreateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": Determines the kind of IAM identity returned // as writer_identity in the new sink. If this value is omitted or set @@ -37313,6 +37552,11 @@ func (c *SinksCreateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { // "parent" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. The format must be serviceAccount:some@email. This field can only be specified if you are routing logs to a destination outside this sink's project. If not specified, a Logging service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "parent": { // "description": "Required. The resource in which to create the sink: \"projects/[PROJECT_ID]\" \"organizations/[ORGANIZATION_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]\" \"folders/[FOLDER_ID]\" For examples:\"projects/my-project\" \"organizations/123456789\"", // "location": "path", @@ -37867,6 +38111,17 @@ func (r *SinksService) Update(sinkNameid string, logsink *LogSink) *SinksUpdateC return c } +// CustomWriterIdentity sets the optional parameter +// "customWriterIdentity": A service account provided by the caller that +// will be used to write the log entries. Must be of format +// serviceAccount:some@email. This can only be specified if writing to a +// destination outside the sink's project. If not specified, a p4 +// service account will automatically be generated. +func (c *SinksUpdateCall) CustomWriterIdentity(customWriterIdentity string) *SinksUpdateCall { + c.urlParams_.Set("customWriterIdentity", customWriterIdentity) + return c +} + // UniqueWriterIdentity sets the optional parameter // "uniqueWriterIdentity": See sinks.create for a description of this // field. When updating a sink, the effect of this field on the value of @@ -37996,6 +38251,11 @@ func (c *SinksUpdateCall) Do(opts ...googleapi.CallOption) (*LogSink, error) { // "sinkName" // ], // "parameters": { + // "customWriterIdentity": { + // "description": "Optional. A service account provided by the caller that will be used to write the log entries. Must be of format serviceAccount:some@email. This can only be specified if writing to a destination outside the sink's project. If not specified, a p4 service account will automatically be generated.", + // "location": "query", + // "type": "string" + // }, // "sinkName": { // "description": "Required. The full resource name of the sink to update, including the parent resource and the sink identifier: \"projects/[PROJECT_ID]/sinks/[SINK_ID]\" \"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]\" \"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]\" \"folders/[FOLDER_ID]/sinks/[SINK_ID]\" For example:\"projects/my-project/sinks/my-sink\"", // "location": "path", diff --git a/terraform/providers/google/vendor/google.golang.org/api/option/internaloption/internaloption.go b/terraform/providers/google/vendor/google.golang.org/api/option/internaloption/internaloption.go index cc7ebfe277..3b8461d1da 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/terraform/providers/google/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -67,6 +67,21 @@ func (e enableDirectPath) Apply(o *internal.DialSettings) { o.EnableDirectPath = bool(e) } +// EnableDirectPathXds returns a ClientOption that overrides the default +// DirectPath type. It is only valid when DirectPath is enabled. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func EnableDirectPathXds() option.ClientOption { + return enableDirectPathXds(true) +} + +type enableDirectPathXds bool + +func (x enableDirectPathXds) Apply(o *internal.DialSettings) { + o.EnableDirectPathXds = bool(x) +} + // AllowNonDefaultServiceAccount returns a ClientOption that overrides the default // requirement for using the default service account for DirectPath. // diff --git a/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json b/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json index ac519b603f..62204b156e 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-api.json @@ -217,6 +217,7 @@ "type": "string" }, "revisionId": { + "deprecated": true, "description": "Optional. This field is deprecated and should not be used for specifying the revision ID. The revision ID should be specified via the `name` parameter.", "location": "query", "type": "string" @@ -680,13 +681,13 @@ ], "parameters": { "pageSize": { - "description": "Maximum number of snapshots to return.", + "description": "Optional. Maximum number of snapshots to return.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListSnapshotsResponse`; indicates that this is a continuation of a prior `ListSnapshots` call, and that the system should return the next page of data.", + "description": "Optional. The value returned by the last `ListSnapshotsResponse`; indicates that this is a continuation of a prior `ListSnapshots` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -717,7 +718,7 @@ ], "parameters": { "name": { - "description": "The name of the snapshot.", + "description": "Optional. The name of the snapshot.", "location": "path", "pattern": "^projects/[^/]+/snapshots/[^/]+$", "required": true, @@ -976,13 +977,13 @@ ], "parameters": { "pageSize": { - "description": "Maximum number of subscriptions to return.", + "description": "Optional. Maximum number of subscriptions to return.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", + "description": "Optional. The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -1333,13 +1334,13 @@ ], "parameters": { "pageSize": { - "description": "Maximum number of topics to return.", + "description": "Optional. Maximum number of topics to return.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", + "description": "Optional. The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -1490,13 +1491,13 @@ ], "parameters": { "pageSize": { - "description": "Maximum number of snapshot names to return.", + "description": "Optional. Maximum number of snapshot names to return.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListTopicSnapshotsResponse`; indicates that this is a continuation of a prior `ListTopicSnapshots` call, and that the system should return the next page of data.", + "description": "Optional. The value returned by the last `ListTopicSnapshotsResponse`; indicates that this is a continuation of a prior `ListTopicSnapshots` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -1531,13 +1532,13 @@ ], "parameters": { "pageSize": { - "description": "Maximum number of subscription names to return.", + "description": "Optional. Maximum number of subscription names to return.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", + "description": "Optional. The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -1565,7 +1566,7 @@ } } }, - "revision": "20230218", + "revision": "20230620", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { @@ -1582,12 +1583,23 @@ }, "type": "object" }, + "AvroConfig": { + "description": "Configuration for writing message data in Avro format. Message payloads and metadata will be written to files as an Avro binary.", + "id": "AvroConfig", + "properties": { + "writeMetadata": { + "description": "Optional. When true, write the subscription name, message_id, publish_time, attributes, and ordering_key as additional fields in the output.", + "type": "boolean" + } + }, + "type": "object" + }, "BigQueryConfig": { "description": "Configuration for a BigQuery subscription.", "id": "BigQueryConfig", "properties": { "dropUnknownFields": { - "description": "When true and use_topic_schema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog.", + "description": "Optional. When true and use_topic_schema is true, any fields that are a part of the topic schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog.", "type": "boolean" }, "state": { @@ -1610,15 +1622,15 @@ "type": "string" }, "table": { - "description": "The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId}", + "description": "Optional. The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId}", "type": "string" }, "useTopicSchema": { - "description": "When true, use the topic's schema as the columns to write to in BigQuery, if it exists.", + "description": "Optional. When true, use the topic's schema as the columns to write to in BigQuery, if it exists.", "type": "boolean" }, "writeMetadata": { - "description": "When true, write the subscription name, message_id, publish_time, attributes, and ordering_key to additional columns in the table. The subscription name, message_id, and publish_time fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column.", + "description": "Optional. When true, write the subscription name, message_id, publish_time, attributes, and ordering_key to additional columns in the table. The subscription name, message_id, and publish_time fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column.", "type": "boolean" } }, @@ -1646,6 +1658,60 @@ }, "type": "object" }, + "CloudStorageConfig": { + "description": "Configuration for a Cloud Storage subscription.", + "id": "CloudStorageConfig", + "properties": { + "avroConfig": { + "$ref": "AvroConfig", + "description": "Optional. If set, message data will be written to Cloud Storage in Avro format." + }, + "bucket": { + "description": "Required. User-provided name for the Cloud Storage bucket. The bucket must be created by the user. The bucket name must be without any prefix like \"gs://\". See the [bucket naming requirements] (https://cloud.google.com/storage/docs/buckets#naming).", + "type": "string" + }, + "filenamePrefix": { + "description": "Optional. User-provided prefix for Cloud Storage filename. See the [object naming requirements](https://cloud.google.com/storage/docs/objects#naming).", + "type": "string" + }, + "filenameSuffix": { + "description": "Optional. User-provided suffix for Cloud Storage filename. See the [object naming requirements](https://cloud.google.com/storage/docs/objects#naming).", + "type": "string" + }, + "maxBytes": { + "description": "Optional. The maximum bytes that can be written to a Cloud Storage file before a new file is created. Min 1 KB, max 10 GiB. The max_bytes limit may be exceeded in cases where messages are larger than the limit.", + "format": "int64", + "type": "string" + }, + "maxDuration": { + "description": "Optional. The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not exceed the subscription's acknowledgement deadline.", + "format": "google-duration", + "type": "string" + }, + "state": { + "description": "Output only. An output-only field that indicates whether or not the subscription can receive messages.", + "enum": [ + "STATE_UNSPECIFIED", + "ACTIVE", + "PERMISSION_DENIED", + "NOT_FOUND" + ], + "enumDescriptions": [ + "Default value. This value is unused.", + "The subscription can actively send messages to Cloud Storage.", + "Cannot write to the Cloud Storage bucket because of permission denied errors.", + "Cannot write to the Cloud Storage bucket because it does not exist." + ], + "readOnly": true, + "type": "string" + }, + "textConfig": { + "$ref": "TextConfig", + "description": "Optional. If set, message data will be written to Cloud Storage in text format." + } + }, + "type": "object" + }, "CommitSchemaRequest": { "description": "Request for CommitSchema method.", "id": "CommitSchemaRequest", @@ -1665,7 +1731,7 @@ "additionalProperties": { "type": "string" }, - "description": "See [Creating and managing labels](https://cloud.google.com/pubsub/docs/labels).", + "description": "Optional. See [Creating and managing labels](https://cloud.google.com/pubsub/docs/labels).", "type": "object" }, "subscription": { @@ -1680,11 +1746,11 @@ "id": "DeadLetterPolicy", "properties": { "deadLetterTopic": { - "description": "The name of the topic to which dead letter messages should be published. Format is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service account associated with the enclosing subscription's parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have permission to Publish() to this topic. The operation will fail if the topic does not exist. Users should ensure that there is a subscription attached to this topic since messages published to a topic with no subscriptions are lost.", + "description": "Optional. The name of the topic to which dead letter messages should be published. Format is `projects/{project}/topics/{topic}`.The Pub/Sub service account associated with the enclosing subscription's parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have permission to Publish() to this topic. The operation will fail if the topic does not exist. Users should ensure that there is a subscription attached to this topic since messages published to a topic with no subscriptions are lost.", "type": "string" }, "maxDeliveryAttempts": { - "description": "The maximum number of delivery attempts for any message. The value must be between 5 and 100. The number of delivery attempts is defined as 1 + (the sum of number of NACKs and number of times the acknowledgement deadline has been exceeded for the message). A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that client libraries may automatically extend ack_deadlines. This field will be honored on a best effort basis. If this parameter is 0, a default value of 5 is used.", + "description": "Optional. The maximum number of delivery attempts for any message. The value must be between 5 and 100. The number of delivery attempts is defined as 1 + (the sum of number of NACKs and number of times the acknowledgement deadline has been exceeded for the message). A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that client libraries may automatically extend ack_deadlines. This field will be honored on a best effort basis. If this parameter is 0, a default value of 5 is used.", "format": "int32", "type": "integer" } @@ -1708,7 +1774,7 @@ "id": "ExpirationPolicy", "properties": { "ttl": { - "description": "Specifies the \"time-to-live\" duration for an associated resource. The resource expires if it is not active for a period of `ttl`. The definition of \"activity\" depends on the type of the associated resource. The minimum and maximum allowed values for `ttl` depend on the type of the associated resource, as well. If `ttl` is not set, the associated resource never expires.", + "description": "Optional. Specifies the \"time-to-live\" duration for an associated resource. The resource expires if it is not active for a period of `ttl`. The definition of \"activity\" depends on the type of the associated resource. The minimum and maximum allowed values for `ttl` depend on the type of the associated resource, as well. If `ttl` is not set, the associated resource never expires.", "format": "google-duration", "type": "string" } @@ -1779,11 +1845,11 @@ "id": "ListSnapshotsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more snapshot that match the request; this value should be passed in a new `ListSnapshotsRequest`.", + "description": "Optional. If not empty, indicates that there may be more snapshot that match the request; this value should be passed in a new `ListSnapshotsRequest`.", "type": "string" }, "snapshots": { - "description": "The resulting snapshots.", + "description": "Optional. The resulting snapshots.", "items": { "$ref": "Snapshot" }, @@ -1797,11 +1863,11 @@ "id": "ListSubscriptionsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListSubscriptionsRequest` to get more subscriptions.", + "description": "Optional. If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListSubscriptionsRequest` to get more subscriptions.", "type": "string" }, "subscriptions": { - "description": "The subscriptions that match the request.", + "description": "Optional. The subscriptions that match the request.", "items": { "$ref": "Subscription" }, @@ -1815,11 +1881,11 @@ "id": "ListTopicSnapshotsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more snapshots that match the request; this value should be passed in a new `ListTopicSnapshotsRequest` to get more snapshots.", + "description": "Optional. If not empty, indicates that there may be more snapshots that match the request; this value should be passed in a new `ListTopicSnapshotsRequest` to get more snapshots.", "type": "string" }, "snapshots": { - "description": "The names of the snapshots that match the request.", + "description": "Optional. The names of the snapshots that match the request.", "items": { "type": "string" }, @@ -1833,11 +1899,11 @@ "id": "ListTopicSubscriptionsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListTopicSubscriptionsRequest` to get more subscriptions.", + "description": "Optional. If not empty, indicates that there may be more subscriptions that match the request; this value should be passed in a new `ListTopicSubscriptionsRequest` to get more subscriptions.", "type": "string" }, "subscriptions": { - "description": "The names of subscriptions attached to the topic specified in the request.", + "description": "Optional. The names of subscriptions attached to the topic specified in the request.", "items": { "type": "string" }, @@ -1851,11 +1917,11 @@ "id": "ListTopicsResponse", "properties": { "nextPageToken": { - "description": "If not empty, indicates that there may be more topics that match the request; this value should be passed in a new `ListTopicsRequest`.", + "description": "Optional. If not empty, indicates that there may be more topics that match the request; this value should be passed in a new `ListTopicsRequest`.", "type": "string" }, "topics": { - "description": "The resulting topics.", + "description": "Optional. The resulting topics.", "items": { "$ref": "Topic" }, @@ -1869,7 +1935,7 @@ "id": "MessageStoragePolicy", "properties": { "allowedPersistenceRegions": { - "description": "A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration.", + "description": "Optional. A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration.", "items": { "type": "string" }, @@ -1908,15 +1974,27 @@ }, "type": "object" }, + "NoWrapper": { + "description": "Sets the `data` field as the HTTP body for delivery.", + "id": "NoWrapper", + "properties": { + "writeMetadata": { + "description": "Optional. When true, writes the Pub/Sub message metadata to `x-goog-pubsub-:` headers of the HTTP request. Writes the Pub/Sub message attributes to `:` headers of the HTTP request.", + "type": "boolean" + } + }, + "type": "object" + }, "OidcToken": { - "description": "Contains information needed for generating an [OpenID Connect token](https://developers.google.com/identity/protocols/OpenIDConnect). [Service account email](https://cloud.google.com/iam/docs/service-accounts) used for generating the OIDC token. For more information on setting up authentication, see [Push subscriptions](https://cloud.google.com/pubsub/docs/push).", + "description": "Contains information needed for generating an [OpenID Connect token](https://developers.google.com/identity/protocols/OpenIDConnect).", "id": "OidcToken", "properties": { "audience": { - "description": "Audience to be used when generating OIDC token. The audience claim identifies the recipients that the JWT is intended for. The audience value is a single case-sensitive string. Having multiple values (array) for the audience field is not supported. More info about the OIDC JWT token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified, the Push endpoint URL will be used.", + "description": "Optional. Audience to be used when generating OIDC token. The audience claim identifies the recipients that the JWT is intended for. The audience value is a single case-sensitive string. Having multiple values (array) for the audience field is not supported. More info about the OIDC JWT token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified, the Push endpoint URL will be used.", "type": "string" }, "serviceAccountEmail": { + "description": "Optional. [Service account email](https://cloud.google.com/iam/docs/service-accounts) used for generating the OIDC token. For more information on setting up authentication, see [Push subscriptions](https://cloud.google.com/pubsub/docs/push).", "type": "string" } }, @@ -1965,7 +2043,7 @@ "id": "PublishResponse", "properties": { "messageIds": { - "description": "The server-assigned ID of each published message, in the same order as the messages in the request. IDs are guaranteed to be unique within the topic.", + "description": "Optional. The server-assigned ID of each published message, in the same order as the messages in the request. IDs are guaranteed to be unique within the topic.", "items": { "type": "string" }, @@ -1982,30 +2060,36 @@ "additionalProperties": { "type": "string" }, - "description": "Attributes for this message. If this field is empty, the message must contain non-empty data. This can be used to filter messages on the subscription.", + "description": "Optional. Attributes for this message. If this field is empty, the message must contain non-empty data. This can be used to filter messages on the subscription.", "type": "object" }, "data": { - "description": "The message data field. If this field is empty, the message must contain at least one attribute.", + "description": "Optional. The message data field. If this field is empty, the message must contain at least one attribute.", "format": "byte", "type": "string" }, "messageId": { - "description": "ID of this message, assigned by the server when the message is published. Guaranteed to be unique within the topic. This value may be read by a subscriber that receives a `PubsubMessage` via a `Pull` call or a push delivery. It must not be populated by the publisher in a `Publish` call.", + "description": "Optional. ID of this message, assigned by the server when the message is published. Guaranteed to be unique within the topic. This value may be read by a subscriber that receives a `PubsubMessage` via a `Pull` call or a push delivery. It must not be populated by the publisher in a `Publish` call.", "type": "string" }, "orderingKey": { - "description": "If non-empty, identifies related messages for which publish order should be respected. If a `Subscription` has `enable_message_ordering` set to `true`, messages published with the same non-empty `ordering_key` value will be delivered to subscribers in the order in which they are received by the Pub/Sub system. All `PubsubMessage`s published in a given `PublishRequest` must specify the same `ordering_key` value. For more information, see [ordering messages](https://cloud.google.com/pubsub/docs/ordering).", + "description": "Optional. If non-empty, identifies related messages for which publish order should be respected. If a `Subscription` has `enable_message_ordering` set to `true`, messages published with the same non-empty `ordering_key` value will be delivered to subscribers in the order in which they are received by the Pub/Sub system. All `PubsubMessage`s published in a given `PublishRequest` must specify the same `ordering_key` value. For more information, see [ordering messages](https://cloud.google.com/pubsub/docs/ordering).", "type": "string" }, "publishTime": { - "description": "The time at which the message was published, populated by the server when it receives the `Publish` call. It must not be populated by the publisher in a `Publish` call.", + "description": "Optional. The time at which the message was published, populated by the server when it receives the `Publish` call. It must not be populated by the publisher in a `Publish` call.", "format": "google-datetime", "type": "string" } }, "type": "object" }, + "PubsubWrapper": { + "description": "The payload to the push endpoint is in the form of the JSON representation of a PubsubMessage (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage).", + "id": "PubsubWrapper", + "properties": {}, + "type": "object" + }, "PullRequest": { "description": "Request for the `Pull` method.", "id": "PullRequest", @@ -2027,7 +2111,7 @@ "id": "PullResponse", "properties": { "receivedMessages": { - "description": "Received Pub/Sub messages. The list will be empty if there are no more messages available in the backlog, or if no messages could be returned before the request timeout. For JSON, the response can be entirely empty. The Pub/Sub system may return fewer than the `maxMessages` requested even if there are more messages available in the backlog.", + "description": "Optional. Received Pub/Sub messages. The list will be empty if there are no more messages available in the backlog, or if no messages could be returned before the request timeout. For JSON, the response can be entirely empty. The Pub/Sub system may return fewer than the `maxMessages` requested even if there are more messages available in the backlog.", "items": { "$ref": "ReceivedMessage" }, @@ -2044,15 +2128,23 @@ "additionalProperties": { "type": "string" }, - "description": "Endpoint configuration attributes that can be used to control different aspects of the message delivery. The only currently supported attribute is `x-goog-version`, which you can use to change the format of the pushed message. This attribute indicates the version of the data expected by the endpoint. This controls the shape of the pushed message (i.e., its fields and metadata). If not present during the `CreateSubscription` call, it will default to the version of the Pub/Sub API used to make such call. If not present in a `ModifyPushConfig` call, its value will not be changed. `GetSubscription` calls will always return a valid version, even if the subscription was created without this attribute. The only supported values for the `x-goog-version` attribute are: * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. For example: `attributes { \"x-goog-version\": \"v1\" }`", + "description": "Optional. Endpoint configuration attributes that can be used to control different aspects of the message delivery. The only currently supported attribute is `x-goog-version`, which you can use to change the format of the pushed message. This attribute indicates the version of the data expected by the endpoint. This controls the shape of the pushed message (i.e., its fields and metadata). If not present during the `CreateSubscription` call, it will default to the version of the Pub/Sub API used to make such call. If not present in a `ModifyPushConfig` call, its value will not be changed. `GetSubscription` calls will always return a valid version, even if the subscription was created without this attribute. The only supported values for the `x-goog-version` attribute are: * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. For example: `attributes { \"x-goog-version\": \"v1\" }`", "type": "object" }, + "noWrapper": { + "$ref": "NoWrapper", + "description": "Optional. When set, the payload to the push endpoint is not wrapped." + }, "oidcToken": { "$ref": "OidcToken", - "description": "If specified, Pub/Sub will generate and attach an OIDC JWT token as an `Authorization` header in the HTTP request for every pushed message." + "description": "Optional. If specified, Pub/Sub will generate and attach an OIDC JWT token as an `Authorization` header in the HTTP request for every pushed message." + }, + "pubsubWrapper": { + "$ref": "PubsubWrapper", + "description": "Optional. When set, the payload to the push endpoint is in the form of the JSON representation of a PubsubMessage (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage)." }, "pushEndpoint": { - "description": "A URL locating the endpoint to which messages should be pushed. For example, a Webhook endpoint might use `https://example.com/push`.", + "description": "Optional. A URL locating the endpoint to which messages should be pushed. For example, a Webhook endpoint might use `https://example.com/push`.", "type": "string" } }, @@ -2063,32 +2155,32 @@ "id": "ReceivedMessage", "properties": { "ackId": { - "description": "This ID can be used to acknowledge the received message.", + "description": "Optional. This ID can be used to acknowledge the received message.", "type": "string" }, "deliveryAttempt": { - "description": "The approximate number of times that Cloud Pub/Sub has attempted to deliver the associated message to a subscriber. More precisely, this is 1 + (number of NACKs) + (number of ack_deadline exceeds) for this message. A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline exceeds event is whenever a message is not acknowledged within ack_deadline. Note that ack_deadline is initially Subscription.ackDeadlineSeconds, but may get extended automatically by the client library. Upon the first delivery of a given message, `delivery_attempt` will have a value of 1. The value is calculated at best effort and is approximate. If a DeadLetterPolicy is not set on the subscription, this will be 0.", + "description": "Optional. The approximate number of times that Pub/Sub has attempted to deliver the associated message to a subscriber. More precisely, this is 1 + (number of NACKs) + (number of ack_deadline exceeds) for this message. A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline exceeds event is whenever a message is not acknowledged within ack_deadline. Note that ack_deadline is initially Subscription.ackDeadlineSeconds, but may get extended automatically by the client library. Upon the first delivery of a given message, `delivery_attempt` will have a value of 1. The value is calculated at best effort and is approximate. If a DeadLetterPolicy is not set on the subscription, this will be 0.", "format": "int32", "type": "integer" }, "message": { "$ref": "PubsubMessage", - "description": "The message." + "description": "Optional. The message." } }, "type": "object" }, "RetryPolicy": { - "description": "A policy that specifies how Cloud Pub/Sub retries message delivery. Retry delay will be exponential based on provided minimum and maximum backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message. Retry Policy is implemented on a best effort basis. At times, the delay between consecutive deliveries may not match the configuration. That is, delay can be more or less than configured backoff.", + "description": "A policy that specifies how Pub/Sub retries message delivery. Retry delay will be exponential based on provided minimum and maximum backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message. Retry Policy is implemented on a best effort basis. At times, the delay between consecutive deliveries may not match the configuration. That is, delay can be more or less than configured backoff.", "id": "RetryPolicy", "properties": { "maximumBackoff": { - "description": "The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds.", + "description": "Optional. The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds.", "format": "google-duration", "type": "string" }, "minimumBackoff": { - "description": "The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds.", + "description": "Optional. The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds.", "format": "google-duration", "type": "string" } @@ -2151,7 +2243,7 @@ "id": "SchemaSettings", "properties": { "encoding": { - "description": "The encoding of messages validated against `schema`.", + "description": "Optional. The encoding of messages validated against `schema`.", "enum": [ "ENCODING_UNSPECIFIED", "JSON", @@ -2165,11 +2257,11 @@ "type": "string" }, "firstRevisionId": { - "description": "The minimum (inclusive) revision allowed for validating messages. If empty or not present, allow any revision to be validated against last_revision or any revision created before.", + "description": "Optional. The minimum (inclusive) revision allowed for validating messages. If empty or not present, allow any revision to be validated against last_revision or any revision created before.", "type": "string" }, "lastRevisionId": { - "description": "The maximum (inclusive) revision allowed for validating messages. If empty or not present, allow any revision to be validated against first_revision or any revision created after.", + "description": "Optional. The maximum (inclusive) revision allowed for validating messages. If empty or not present, allow any revision to be validated against first_revision or any revision created after.", "type": "string" }, "schema": { @@ -2184,11 +2276,11 @@ "id": "SeekRequest", "properties": { "snapshot": { - "description": "The snapshot to seek to. The snapshot's topic must be the same as that of the provided subscription. Format is `projects/{project}/snapshots/{snap}`.", + "description": "Optional. The snapshot to seek to. The snapshot's topic must be the same as that of the provided subscription. Format is `projects/{project}/snapshots/{snap}`.", "type": "string" }, "time": { - "description": "The time to seek to. Messages retained in the subscription that were published before this time are marked as acknowledged, and messages retained in the subscription that were published after this time are marked as unacknowledged. Note that this operation affects only those messages retained in the subscription (configured by the combination of `message_retention_duration` and `retain_acked_messages`). For example, if `time` corresponds to a point before the message retention window (or to a point before the system's notion of the subscription creation time), only retained messages will be marked as unacknowledged, and already-expunged messages will not be restored.", + "description": "Optional. The time to seek to. Messages retained in the subscription that were published before this time are marked as acknowledged, and messages retained in the subscription that were published after this time are marked as unacknowledged. Note that this operation affects only those messages retained in the subscription (configured by the combination of `message_retention_duration` and `retain_acked_messages`). For example, if `time` corresponds to a point before the message retention window (or to a point before the system's notion of the subscription creation time), only retained messages will be marked as unacknowledged, and already-expunged messages will not be restored.", "format": "google-datetime", "type": "string" } @@ -2217,7 +2309,7 @@ "id": "Snapshot", "properties": { "expireTime": { - "description": "The snapshot is guaranteed to exist up until this time. A newly-created snapshot expires no later than 7 days from the time of its creation. Its exact lifetime is determined at creation by the existing backlog in the source subscription. Specifically, the lifetime of the snapshot is `7 days - (age of oldest unacked message in the subscription)`. For example, consider a subscription whose oldest unacked message is 3 days old. If a snapshot is created from this subscription, the snapshot -- which will always capture this 3-day-old backlog as long as the snapshot exists -- will expire in 4 days. The service will refuse to create a snapshot that would expire in less than 1 hour after creation.", + "description": "Optional. The snapshot is guaranteed to exist up until this time. A newly-created snapshot expires no later than 7 days from the time of its creation. Its exact lifetime is determined at creation by the existing backlog in the source subscription. Specifically, the lifetime of the snapshot is `7 days - (age of oldest unacked message in the subscription)`. For example, consider a subscription whose oldest unacked message is 3 days old. If a snapshot is created from this subscription, the snapshot -- which will always capture this 3-day-old backlog as long as the snapshot exists -- will expire in 4 days. The service will refuse to create a snapshot that would expire in less than 1 hour after creation.", "format": "google-datetime", "type": "string" }, @@ -2225,66 +2317,70 @@ "additionalProperties": { "type": "string" }, - "description": "See [Creating and managing labels] (https://cloud.google.com/pubsub/docs/labels).", + "description": "Optional. See [Creating and managing labels] (https://cloud.google.com/pubsub/docs/labels).", "type": "object" }, "name": { - "description": "The name of the snapshot.", + "description": "Optional. The name of the snapshot.", "type": "string" }, "topic": { - "description": "The name of the topic from which this snapshot is retaining messages.", + "description": "Optional. The name of the topic from which this snapshot is retaining messages.", "type": "string" } }, "type": "object" }, "Subscription": { - "description": "A subscription resource. If none of `push_config` or `bigquery_config` is set, then the subscriber will pull and ack messages using API methods. At most one of these fields may be set.", + "description": "A subscription resource. If none of `push_config`, `bigquery_config`, or `cloud_storage_config` is set, then the subscriber will pull and ack messages using API methods. At most one of these fields may be set.", "id": "Subscription", "properties": { "ackDeadlineSeconds": { - "description": "The approximate amount of time (on a best-effort basis) Pub/Sub waits for the subscriber to acknowledge receipt before resending the message. In the interval after the message is delivered and before it is acknowledged, it is considered to be _outstanding_. During that time period, the message will not be redelivered (on a best-effort basis). For pull subscriptions, this value is used as the initial value for the ack deadline. To override this value for a given message, call `ModifyAckDeadline` with the corresponding `ack_id` if using non-streaming pull or send the `ack_id` in a `StreamingModifyAckDeadlineRequest` if using streaming pull. The minimum custom deadline you can specify is 10 seconds. The maximum custom deadline you can specify is 600 seconds (10 minutes). If this parameter is 0, a default value of 10 seconds is used. For push delivery, this value is also used to set the request timeout for the call to the push endpoint. If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message.", + "description": "Optional. The approximate amount of time (on a best-effort basis) Pub/Sub waits for the subscriber to acknowledge receipt before resending the message. In the interval after the message is delivered and before it is acknowledged, it is considered to be _outstanding_. During that time period, the message will not be redelivered (on a best-effort basis). For pull subscriptions, this value is used as the initial value for the ack deadline. To override this value for a given message, call `ModifyAckDeadline` with the corresponding `ack_id` if using non-streaming pull or send the `ack_id` in a `StreamingModifyAckDeadlineRequest` if using streaming pull. The minimum custom deadline you can specify is 10 seconds. The maximum custom deadline you can specify is 600 seconds (10 minutes). If this parameter is 0, a default value of 10 seconds is used. For push delivery, this value is also used to set the request timeout for the call to the push endpoint. If the subscriber never acknowledges the message, the Pub/Sub system will eventually redeliver the message.", "format": "int32", "type": "integer" }, "bigqueryConfig": { "$ref": "BigQueryConfig", - "description": "If delivery to BigQuery is used with this subscription, this field is used to configure it." + "description": "Optional. If delivery to BigQuery is used with this subscription, this field is used to configure it." + }, + "cloudStorageConfig": { + "$ref": "CloudStorageConfig", + "description": "Optional. If delivery to Google Cloud Storage is used with this subscription, this field is used to configure it." }, "deadLetterPolicy": { "$ref": "DeadLetterPolicy", - "description": "A policy that specifies the conditions for dead lettering messages in this subscription. If dead_letter_policy is not set, dead lettering is disabled. The Cloud Pub/Sub service account associated with this subscriptions's parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have permission to Acknowledge() messages on this subscription." + "description": "Optional. A policy that specifies the conditions for dead lettering messages in this subscription. If dead_letter_policy is not set, dead lettering is disabled. The Pub/Sub service account associated with this subscriptions's parent project (i.e., service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have permission to Acknowledge() messages on this subscription." }, "detached": { - "description": "Indicates whether the subscription is detached from its topic. Detached subscriptions don't receive messages from their topic and don't retain any backlog. `Pull` and `StreamingPull` requests will return FAILED_PRECONDITION. If the subscription is a push subscription, pushes to the endpoint will not be made.", + "description": "Optional. Indicates whether the subscription is detached from its topic. Detached subscriptions don't receive messages from their topic and don't retain any backlog. `Pull` and `StreamingPull` requests will return FAILED_PRECONDITION. If the subscription is a push subscription, pushes to the endpoint will not be made.", "type": "boolean" }, "enableExactlyOnceDelivery": { - "description": "If true, Pub/Sub provides the following guarantees for the delivery of a message with a given value of `message_id` on this subscription: * The message sent to a subscriber is guaranteed not to be resent before the message's acknowledgement deadline expires. * An acknowledged message will not be resent to a subscriber. Note that subscribers may still receive multiple copies of a message when `enable_exactly_once_delivery` is true if the message was published multiple times by a publisher client. These copies are considered distinct by Pub/Sub and have distinct `message_id` values.", + "description": "Optional. If true, Pub/Sub provides the following guarantees for the delivery of a message with a given value of `message_id` on this subscription: * The message sent to a subscriber is guaranteed not to be resent before the message's acknowledgement deadline expires. * An acknowledged message will not be resent to a subscriber. Note that subscribers may still receive multiple copies of a message when `enable_exactly_once_delivery` is true if the message was published multiple times by a publisher client. These copies are considered distinct by Pub/Sub and have distinct `message_id` values.", "type": "boolean" }, "enableMessageOrdering": { - "description": "If true, messages published with the same `ordering_key` in `PubsubMessage` will be delivered to the subscribers in the order in which they are received by the Pub/Sub system. Otherwise, they may be delivered in any order.", + "description": "Optional. If true, messages published with the same `ordering_key` in `PubsubMessage` will be delivered to the subscribers in the order in which they are received by the Pub/Sub system. Otherwise, they may be delivered in any order.", "type": "boolean" }, "expirationPolicy": { "$ref": "ExpirationPolicy", - "description": "A policy that specifies the conditions for this subscription's expiration. A subscription is considered active as long as any connected subscriber is successfully consuming messages from the subscription or is issuing operations on the subscription. If `expiration_policy` is not set, a *default policy* with `ttl` of 31 days will be used. The minimum allowed value for `expiration_policy.ttl` is 1 day. If `expiration_policy` is set, but `expiration_policy.ttl` is not set, the subscription never expires." + "description": "Optional. A policy that specifies the conditions for this subscription's expiration. A subscription is considered active as long as any connected subscriber is successfully consuming messages from the subscription or is issuing operations on the subscription. If `expiration_policy` is not set, a *default policy* with `ttl` of 31 days will be used. The minimum allowed value for `expiration_policy.ttl` is 1 day. If `expiration_policy` is set, but `expiration_policy.ttl` is not set, the subscription never expires." }, "filter": { - "description": "An expression written in the Pub/Sub [filter language](https://cloud.google.com/pubsub/docs/filtering). If non-empty, then only `PubsubMessage`s whose `attributes` field matches the filter are delivered on this subscription. If empty, then no messages are filtered out.", + "description": "Optional. An expression written in the Pub/Sub [filter language](https://cloud.google.com/pubsub/docs/filtering). If non-empty, then only `PubsubMessage`s whose `attributes` field matches the filter are delivered on this subscription. If empty, then no messages are filtered out.", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "See [Creating and managing labels](https://cloud.google.com/pubsub/docs/labels).", + "description": "Optional. See [Creating and managing labels](https://cloud.google.com/pubsub/docs/labels).", "type": "object" }, "messageRetentionDuration": { - "description": "How long to retain unacknowledged messages in the subscription's backlog, from the moment a message is published. If `retain_acked_messages` is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a `Seek` can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 minutes.", + "description": "Optional. How long to retain unacknowledged messages in the subscription's backlog, from the moment a message is published. If `retain_acked_messages` is true, then this also configures the retention of acknowledged messages, and thus configures how far back in time a `Seek` can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 minutes.", "format": "google-duration", "type": "string" }, @@ -2294,15 +2390,15 @@ }, "pushConfig": { "$ref": "PushConfig", - "description": "If push delivery is used with this subscription, this field is used to configure it." + "description": "Optional. If push delivery is used with this subscription, this field is used to configure it." }, "retainAckedMessages": { - "description": "Indicates whether to retain acknowledged messages. If true, then messages are not expunged from the subscription's backlog, even if they are acknowledged, until they fall out of the `message_retention_duration` window. This must be true if you would like to [`Seek` to a timestamp] (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) in the past to replay previously-acknowledged messages.", + "description": "Optional. Indicates whether to retain acknowledged messages. If true, then messages are not expunged from the subscription's backlog, even if they are acknowledged, until they fall out of the `message_retention_duration` window. This must be true if you would like to [`Seek` to a timestamp] (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) in the past to replay previously-acknowledged messages.", "type": "boolean" }, "retryPolicy": { "$ref": "RetryPolicy", - "description": "A policy that specifies how Pub/Sub retries message delivery for this subscription. If not set, the default retry policy is applied. This generally implies that messages will be retried as soon as possible for healthy subscribers. RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message." + "description": "Optional. A policy that specifies how Pub/Sub retries message delivery for this subscription. If not set, the default retry policy is applied. This generally implies that messages will be retried as soon as possible for healthy subscribers. RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message." }, "state": { "description": "Output only. An output-only field indicating whether or not the subscription can receive messages.", @@ -2360,41 +2456,47 @@ }, "type": "object" }, + "TextConfig": { + "description": "Configuration for writing message data in text format. Message payloads will be written to files as raw text, separated by a newline.", + "id": "TextConfig", + "properties": {}, + "type": "object" + }, "Topic": { "description": "A topic resource.", "id": "Topic", "properties": { "kmsKeyName": { - "description": "The resource name of the Cloud KMS CryptoKey to be used to protect access to messages published on this topic. The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", + "description": "Optional. The resource name of the Cloud KMS CryptoKey to be used to protect access to messages published on this topic. The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`.", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "See [Creating and managing labels] (https://cloud.google.com/pubsub/docs/labels).", + "description": "Optional. See [Creating and managing labels] (https://cloud.google.com/pubsub/docs/labels).", "type": "object" }, "messageRetentionDuration": { - "description": "Indicates the minimum duration to retain a message after it is published to the topic. If this field is set, messages published to the topic in the last `message_retention_duration` are always available to subscribers. For instance, it allows any attached subscription to [seek to a timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) that is up to `message_retention_duration` in the past. If this field is not set, message retention is controlled by settings on individual subscriptions. Cannot be more than 31 days or less than 10 minutes.", + "description": "Optional. Indicates the minimum duration to retain a message after it is published to the topic. If this field is set, messages published to the topic in the last `message_retention_duration` are always available to subscribers. For instance, it allows any attached subscription to [seek to a timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) that is up to `message_retention_duration` in the past. If this field is not set, message retention is controlled by settings on individual subscriptions. Cannot be more than 31 days or less than 10 minutes.", "format": "google-duration", "type": "string" }, "messageStoragePolicy": { "$ref": "MessageStoragePolicy", - "description": "Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect." + "description": "Optional. Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect." }, "name": { "description": "Required. The name of the topic. It must have the format `\"projects/{project}/topics/{topic}\"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `\"goog\"`.", "type": "string" }, "satisfiesPzs": { - "description": "Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests.", + "description": "Optional. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests.", "type": "boolean" }, "schemaSettings": { "$ref": "SchemaSettings", - "description": "Settings for validating messages published against a schema." + "description": "Optional. Settings for validating messages published against a schema." } }, "type": "object" diff --git a/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go b/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go index 2f24e25d9d..a0bc0b0113 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/pubsub/v1/pubsub-gen.go @@ -77,6 +77,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "pubsub:v1" const apiName = "pubsub" @@ -258,13 +259,46 @@ func (s *AcknowledgeRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AvroConfig: Configuration for writing message data in Avro format. +// Message payloads and metadata will be written to files as an Avro +// binary. +type AvroConfig struct { + // WriteMetadata: Optional. When true, write the subscription name, + // message_id, publish_time, attributes, and ordering_key as additional + // fields in the output. + WriteMetadata bool `json:"writeMetadata,omitempty"` + + // ForceSendFields is a list of field names (e.g. "WriteMetadata") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WriteMetadata") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AvroConfig) MarshalJSON() ([]byte, error) { + type NoMethod AvroConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BigQueryConfig: Configuration for a BigQuery subscription. type BigQueryConfig struct { - // DropUnknownFields: When true and use_topic_schema is true, any fields - // that are a part of the topic schema that are not part of the BigQuery - // table schema are dropped when writing to BigQuery. Otherwise, the - // schemas must be kept in sync and any messages with extra fields are - // not written and remain in the subscription's backlog. + // DropUnknownFields: Optional. When true and use_topic_schema is true, + // any fields that are a part of the topic schema that are not part of + // the BigQuery table schema are dropped when writing to BigQuery. + // Otherwise, the schemas must be kept in sync and any messages with + // extra fields are not written and remain in the subscription's + // backlog. DropUnknownFields bool `json:"dropUnknownFields,omitempty"` // State: Output only. An output-only field that indicates whether or @@ -287,20 +321,20 @@ type BigQueryConfig struct { // schema mismatch. State string `json:"state,omitempty"` - // Table: The name of the table to which to write data, of the form - // {projectId}.{datasetId}.{tableId} + // Table: Optional. The name of the table to which to write data, of the + // form {projectId}.{datasetId}.{tableId} Table string `json:"table,omitempty"` - // UseTopicSchema: When true, use the topic's schema as the columns to - // write to in BigQuery, if it exists. + // UseTopicSchema: Optional. When true, use the topic's schema as the + // columns to write to in BigQuery, if it exists. UseTopicSchema bool `json:"useTopicSchema,omitempty"` - // WriteMetadata: When true, write the subscription name, message_id, - // publish_time, attributes, and ordering_key to additional columns in - // the table. The subscription name, message_id, and publish_time fields - // are put in their own columns while all other message properties - // (other than data) are written to a JSON object in the attributes - // column. + // WriteMetadata: Optional. When true, write the subscription name, + // message_id, publish_time, attributes, and ordering_key to additional + // columns in the table. The subscription name, message_id, and + // publish_time fields are put in their own columns while all other + // message properties (other than data) are written to a JSON object in + // the attributes column. WriteMetadata bool `json:"writeMetadata,omitempty"` // ForceSendFields is a list of field names (e.g. "DropUnknownFields") @@ -405,6 +439,80 @@ func (s *Binding) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CloudStorageConfig: Configuration for a Cloud Storage subscription. +type CloudStorageConfig struct { + // AvroConfig: Optional. If set, message data will be written to Cloud + // Storage in Avro format. + AvroConfig *AvroConfig `json:"avroConfig,omitempty"` + + // Bucket: Required. User-provided name for the Cloud Storage bucket. + // The bucket must be created by the user. The bucket name must be + // without any prefix like "gs://". See the [bucket naming requirements] + // (https://cloud.google.com/storage/docs/buckets#naming). + Bucket string `json:"bucket,omitempty"` + + // FilenamePrefix: Optional. User-provided prefix for Cloud Storage + // filename. See the object naming requirements + // (https://cloud.google.com/storage/docs/objects#naming). + FilenamePrefix string `json:"filenamePrefix,omitempty"` + + // FilenameSuffix: Optional. User-provided suffix for Cloud Storage + // filename. See the object naming requirements + // (https://cloud.google.com/storage/docs/objects#naming). + FilenameSuffix string `json:"filenameSuffix,omitempty"` + + // MaxBytes: Optional. The maximum bytes that can be written to a Cloud + // Storage file before a new file is created. Min 1 KB, max 10 GiB. The + // max_bytes limit may be exceeded in cases where messages are larger + // than the limit. + MaxBytes int64 `json:"maxBytes,omitempty,string"` + + // MaxDuration: Optional. The maximum duration that can elapse before a + // new Cloud Storage file is created. Min 1 minute, max 10 minutes, + // default 5 minutes. May not exceed the subscription's acknowledgement + // deadline. + MaxDuration string `json:"maxDuration,omitempty"` + + // State: Output only. An output-only field that indicates whether or + // not the subscription can receive messages. + // + // Possible values: + // "STATE_UNSPECIFIED" - Default value. This value is unused. + // "ACTIVE" - The subscription can actively send messages to Cloud + // Storage. + // "PERMISSION_DENIED" - Cannot write to the Cloud Storage bucket + // because of permission denied errors. + // "NOT_FOUND" - Cannot write to the Cloud Storage bucket because it + // does not exist. + State string `json:"state,omitempty"` + + // TextConfig: Optional. If set, message data will be written to Cloud + // Storage in text format. + TextConfig *TextConfig `json:"textConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AvroConfig") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AvroConfig") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CloudStorageConfig) MarshalJSON() ([]byte, error) { + type NoMethod CloudStorageConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CommitSchemaRequest: Request for CommitSchema method. type CommitSchemaRequest struct { // Schema: Required. The schema revision to commit. @@ -435,7 +543,7 @@ func (s *CommitSchemaRequest) MarshalJSON() ([]byte, error) { // CreateSnapshotRequest: Request for the `CreateSnapshot` method. type CreateSnapshotRequest struct { - // Labels: See Creating and managing labels + // Labels: Optional. See Creating and managing labels // (https://cloud.google.com/pubsub/docs/labels). Labels map[string]string `json:"labels,omitempty"` @@ -477,9 +585,9 @@ func (s *CreateSnapshotRequest) MarshalJSON() ([]byte, error) { // any of the fields fails at subscription creation/updation, the // create/update subscription request will fail. type DeadLetterPolicy struct { - // DeadLetterTopic: The name of the topic to which dead letter messages - // should be published. Format is - // `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service account + // DeadLetterTopic: Optional. The name of the topic to which dead letter + // messages should be published. Format is + // `projects/{project}/topics/{topic}`.The Pub/Sub service account // associated with the enclosing subscription's parent project (i.e., // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must // have permission to Publish() to this topic. The operation will fail @@ -488,14 +596,14 @@ type DeadLetterPolicy struct { // topic with no subscriptions are lost. DeadLetterTopic string `json:"deadLetterTopic,omitempty"` - // MaxDeliveryAttempts: The maximum number of delivery attempts for any - // message. The value must be between 5 and 100. The number of delivery - // attempts is defined as 1 + (the sum of number of NACKs and number of - // times the acknowledgement deadline has been exceeded for the - // message). A NACK is any call to ModifyAckDeadline with a 0 deadline. - // Note that client libraries may automatically extend ack_deadlines. - // This field will be honored on a best effort basis. If this parameter - // is 0, a default value of 5 is used. + // MaxDeliveryAttempts: Optional. The maximum number of delivery + // attempts for any message. The value must be between 5 and 100. The + // number of delivery attempts is defined as 1 + (the sum of number of + // NACKs and number of times the acknowledgement deadline has been + // exceeded for the message). A NACK is any call to ModifyAckDeadline + // with a 0 deadline. Note that client libraries may automatically + // extend ack_deadlines. This field will be honored on a best effort + // basis. If this parameter is 0, a default value of 5 is used. MaxDeliveryAttempts int64 `json:"maxDeliveryAttempts,omitempty"` // ForceSendFields is a list of field names (e.g. "DeadLetterTopic") to @@ -544,12 +652,12 @@ type Empty struct { // ExpirationPolicy: A policy that specifies the conditions for resource // expiration (i.e., automatic resource deletion). type ExpirationPolicy struct { - // Ttl: Specifies the "time-to-live" duration for an associated - // resource. The resource expires if it is not active for a period of - // `ttl`. The definition of "activity" depends on the type of the - // associated resource. The minimum and maximum allowed values for `ttl` - // depend on the type of the associated resource, as well. If `ttl` is - // not set, the associated resource never expires. + // Ttl: Optional. Specifies the "time-to-live" duration for an + // associated resource. The resource expires if it is not active for a + // period of `ttl`. The definition of "activity" depends on the type of + // the associated resource. The minimum and maximum allowed values for + // `ttl` depend on the type of the associated resource, as well. If + // `ttl` is not set, the associated resource never expires. Ttl string `json:"ttl,omitempty"` // ForceSendFields is a list of field names (e.g. "Ttl") to @@ -711,12 +819,12 @@ func (s *ListSchemasResponse) MarshalJSON() ([]byte, error) { // ListSnapshotsResponse: Response for the `ListSnapshots` method. type ListSnapshotsResponse struct { - // NextPageToken: If not empty, indicates that there may be more - // snapshot that match the request; this value should be passed in a new - // `ListSnapshotsRequest`. + // NextPageToken: Optional. If not empty, indicates that there may be + // more snapshot that match the request; this value should be passed in + // a new `ListSnapshotsRequest`. NextPageToken string `json:"nextPageToken,omitempty"` - // Snapshots: The resulting snapshots. + // Snapshots: Optional. The resulting snapshots. Snapshots []*Snapshot `json:"snapshots,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -749,12 +857,12 @@ func (s *ListSnapshotsResponse) MarshalJSON() ([]byte, error) { // ListSubscriptionsResponse: Response for the `ListSubscriptions` // method. type ListSubscriptionsResponse struct { - // NextPageToken: If not empty, indicates that there may be more - // subscriptions that match the request; this value should be passed in - // a new `ListSubscriptionsRequest` to get more subscriptions. + // NextPageToken: Optional. If not empty, indicates that there may be + // more subscriptions that match the request; this value should be + // passed in a new `ListSubscriptionsRequest` to get more subscriptions. NextPageToken string `json:"nextPageToken,omitempty"` - // Subscriptions: The subscriptions that match the request. + // Subscriptions: Optional. The subscriptions that match the request. Subscriptions []*Subscription `json:"subscriptions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -787,12 +895,13 @@ func (s *ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { // ListTopicSnapshotsResponse: Response for the `ListTopicSnapshots` // method. type ListTopicSnapshotsResponse struct { - // NextPageToken: If not empty, indicates that there may be more - // snapshots that match the request; this value should be passed in a - // new `ListTopicSnapshotsRequest` to get more snapshots. + // NextPageToken: Optional. If not empty, indicates that there may be + // more snapshots that match the request; this value should be passed in + // a new `ListTopicSnapshotsRequest` to get more snapshots. NextPageToken string `json:"nextPageToken,omitempty"` - // Snapshots: The names of the snapshots that match the request. + // Snapshots: Optional. The names of the snapshots that match the + // request. Snapshots []string `json:"snapshots,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -825,13 +934,14 @@ func (s *ListTopicSnapshotsResponse) MarshalJSON() ([]byte, error) { // ListTopicSubscriptionsResponse: Response for the // `ListTopicSubscriptions` method. type ListTopicSubscriptionsResponse struct { - // NextPageToken: If not empty, indicates that there may be more - // subscriptions that match the request; this value should be passed in - // a new `ListTopicSubscriptionsRequest` to get more subscriptions. + // NextPageToken: Optional. If not empty, indicates that there may be + // more subscriptions that match the request; this value should be + // passed in a new `ListTopicSubscriptionsRequest` to get more + // subscriptions. NextPageToken string `json:"nextPageToken,omitempty"` - // Subscriptions: The names of subscriptions attached to the topic - // specified in the request. + // Subscriptions: Optional. The names of subscriptions attached to the + // topic specified in the request. Subscriptions []string `json:"subscriptions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -863,12 +973,12 @@ func (s *ListTopicSubscriptionsResponse) MarshalJSON() ([]byte, error) { // ListTopicsResponse: Response for the `ListTopics` method. type ListTopicsResponse struct { - // NextPageToken: If not empty, indicates that there may be more topics - // that match the request; this value should be passed in a new - // `ListTopicsRequest`. + // NextPageToken: Optional. If not empty, indicates that there may be + // more topics that match the request; this value should be passed in a + // new `ListTopicsRequest`. NextPageToken string `json:"nextPageToken,omitempty"` - // Topics: The resulting topics. + // Topics: Optional. The resulting topics. Topics []*Topic `json:"topics,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -901,12 +1011,12 @@ func (s *ListTopicsResponse) MarshalJSON() ([]byte, error) { // MessageStoragePolicy: A policy constraining the storage of messages // published to the topic. type MessageStoragePolicy struct { - // AllowedPersistenceRegions: A list of IDs of GCP regions where - // messages that are published to the topic may be persisted in storage. - // Messages published by publishers running in non-allowed GCP regions - // (or running outside of GCP altogether) will be routed for storage in - // one of the allowed regions. An empty list means that no regions are - // allowed, and is not a valid configuration. + // AllowedPersistenceRegions: Optional. A list of IDs of GCP regions + // where messages that are published to the topic may be persisted in + // storage. Messages published by publishers running in non-allowed GCP + // regions (or running outside of GCP altogether) will be routed for + // storage in one of the allowed regions. An empty list means that no + // regions are allowed, and is not a valid configuration. AllowedPersistenceRegions []string `json:"allowedPersistenceRegions,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -1006,17 +1116,42 @@ func (s *ModifyPushConfigRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NoWrapper: Sets the `data` field as the HTTP body for delivery. +type NoWrapper struct { + // WriteMetadata: Optional. When true, writes the Pub/Sub message + // metadata to `x-goog-pubsub-:` headers of the HTTP request. Writes the + // Pub/Sub message attributes to `:` headers of the HTTP request. + WriteMetadata bool `json:"writeMetadata,omitempty"` + + // ForceSendFields is a list of field names (e.g. "WriteMetadata") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WriteMetadata") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NoWrapper) MarshalJSON() ([]byte, error) { + type NoMethod NoWrapper + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // OidcToken: Contains information needed for generating an OpenID // Connect token // (https://developers.google.com/identity/protocols/OpenIDConnect). -// Service account email -// (https://cloud.google.com/iam/docs/service-accounts) used for -// generating the OIDC token. For more information on setting up -// authentication, see Push subscriptions -// (https://cloud.google.com/pubsub/docs/push). type OidcToken struct { - // Audience: Audience to be used when generating OIDC token. The - // audience claim identifies the recipients that the JWT is intended + // Audience: Optional. Audience to be used when generating OIDC token. + // The audience claim identifies the recipients that the JWT is intended // for. The audience value is a single case-sensitive string. Having // multiple values (array) for the audience field is not supported. More // info about the OIDC JWT token audience here: @@ -1024,6 +1159,11 @@ type OidcToken struct { // specified, the Push endpoint URL will be used. Audience string `json:"audience,omitempty"` + // ServiceAccountEmail: Optional. Service account email + // (https://cloud.google.com/iam/docs/service-accounts) used for + // generating the OIDC token. For more information on setting up + // authentication, see Push subscriptions + // (https://cloud.google.com/pubsub/docs/push). ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` // ForceSendFields is a list of field names (e.g. "Audience") to @@ -1185,9 +1325,9 @@ func (s *PublishRequest) MarshalJSON() ([]byte, error) { // PublishResponse: Response for the `Publish` method. type PublishResponse struct { - // MessageIds: The server-assigned ID of each published message, in the - // same order as the messages in the request. IDs are guaranteed to be - // unique within the topic. + // MessageIds: Optional. The server-assigned ID of each published + // message, in the same order as the messages in the request. IDs are + // guaranteed to be unique within the topic. MessageIds []string `json:"messageIds,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1227,24 +1367,24 @@ func (s *PublishResponse) MarshalJSON() ([]byte, error) { // (https://cloud.google.com/pubsub/quotas) for more information about // message limits. type PubsubMessage struct { - // Attributes: Attributes for this message. If this field is empty, the - // message must contain non-empty data. This can be used to filter - // messages on the subscription. + // Attributes: Optional. Attributes for this message. If this field is + // empty, the message must contain non-empty data. This can be used to + // filter messages on the subscription. Attributes map[string]string `json:"attributes,omitempty"` - // Data: The message data field. If this field is empty, the message - // must contain at least one attribute. + // Data: Optional. The message data field. If this field is empty, the + // message must contain at least one attribute. Data string `json:"data,omitempty"` - // MessageId: ID of this message, assigned by the server when the - // message is published. Guaranteed to be unique within the topic. This - // value may be read by a subscriber that receives a `PubsubMessage` via - // a `Pull` call or a push delivery. It must not be populated by the - // publisher in a `Publish` call. + // MessageId: Optional. ID of this message, assigned by the server when + // the message is published. Guaranteed to be unique within the topic. + // This value may be read by a subscriber that receives a + // `PubsubMessage` via a `Pull` call or a push delivery. It must not be + // populated by the publisher in a `Publish` call. MessageId string `json:"messageId,omitempty"` - // OrderingKey: If non-empty, identifies related messages for which - // publish order should be respected. If a `Subscription` has + // OrderingKey: Optional. If non-empty, identifies related messages for + // which publish order should be respected. If a `Subscription` has // `enable_message_ordering` set to `true`, messages published with the // same non-empty `ordering_key` value will be delivered to subscribers // in the order in which they are received by the Pub/Sub system. All @@ -1253,9 +1393,9 @@ type PubsubMessage struct { // messages (https://cloud.google.com/pubsub/docs/ordering). OrderingKey string `json:"orderingKey,omitempty"` - // PublishTime: The time at which the message was published, populated - // by the server when it receives the `Publish` call. It must not be - // populated by the publisher in a `Publish` call. + // PublishTime: Optional. The time at which the message was published, + // populated by the server when it receives the `Publish` call. It must + // not be populated by the publisher in a `Publish` call. PublishTime string `json:"publishTime,omitempty"` // ForceSendFields is a list of field names (e.g. "Attributes") to @@ -1281,6 +1421,12 @@ func (s *PubsubMessage) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PubsubWrapper: The payload to the push endpoint is in the form of the +// JSON representation of a PubsubMessage +// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). +type PubsubWrapper struct { +} + // PullRequest: Request for the `Pull` method. type PullRequest struct { // MaxMessages: Required. The maximum number of messages to return for @@ -1322,12 +1468,12 @@ func (s *PullRequest) MarshalJSON() ([]byte, error) { // PullResponse: Response for the `Pull` method. type PullResponse struct { - // ReceivedMessages: Received Pub/Sub messages. The list will be empty - // if there are no more messages available in the backlog, or if no - // messages could be returned before the request timeout. For JSON, the - // response can be entirely empty. The Pub/Sub system may return fewer - // than the `maxMessages` requested even if there are more messages - // available in the backlog. + // ReceivedMessages: Optional. Received Pub/Sub messages. The list will + // be empty if there are no more messages available in the backlog, or + // if no messages could be returned before the request timeout. For + // JSON, the response can be entirely empty. The Pub/Sub system may + // return fewer than the `maxMessages` requested even if there are more + // messages available in the backlog. ReceivedMessages []*ReceivedMessage `json:"receivedMessages,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1360,15 +1506,15 @@ func (s *PullResponse) MarshalJSON() ([]byte, error) { // PushConfig: Configuration for a push delivery endpoint. type PushConfig struct { - // Attributes: Endpoint configuration attributes that can be used to - // control different aspects of the message delivery. The only currently - // supported attribute is `x-goog-version`, which you can use to change - // the format of the pushed message. This attribute indicates the - // version of the data expected by the endpoint. This controls the shape - // of the pushed message (i.e., its fields and metadata). If not present - // during the `CreateSubscription` call, it will default to the version - // of the Pub/Sub API used to make such call. If not present in a - // `ModifyPushConfig` call, its value will not be changed. + // Attributes: Optional. Endpoint configuration attributes that can be + // used to control different aspects of the message delivery. The only + // currently supported attribute is `x-goog-version`, which you can use + // to change the format of the pushed message. This attribute indicates + // the version of the data expected by the endpoint. This controls the + // shape of the pushed message (i.e., its fields and metadata). If not + // present during the `CreateSubscription` call, it will default to the + // version of the Pub/Sub API used to make such call. If not present in + // a `ModifyPushConfig` call, its value will not be changed. // `GetSubscription` calls will always return a valid version, even if // the subscription was created without this attribute. The only // supported values for the `x-goog-version` attribute are: * `v1beta1`: @@ -1377,13 +1523,22 @@ type PushConfig struct { // example: `attributes { "x-goog-version": "v1" }` Attributes map[string]string `json:"attributes,omitempty"` - // OidcToken: If specified, Pub/Sub will generate and attach an OIDC JWT - // token as an `Authorization` header in the HTTP request for every - // pushed message. + // NoWrapper: Optional. When set, the payload to the push endpoint is + // not wrapped. + NoWrapper *NoWrapper `json:"noWrapper,omitempty"` + + // OidcToken: Optional. If specified, Pub/Sub will generate and attach + // an OIDC JWT token as an `Authorization` header in the HTTP request + // for every pushed message. OidcToken *OidcToken `json:"oidcToken,omitempty"` - // PushEndpoint: A URL locating the endpoint to which messages should be - // pushed. For example, a Webhook endpoint might use + // PubsubWrapper: Optional. When set, the payload to the push endpoint + // is in the form of the JSON representation of a PubsubMessage + // (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). + PubsubWrapper *PubsubWrapper `json:"pubsubWrapper,omitempty"` + + // PushEndpoint: Optional. A URL locating the endpoint to which messages + // should be pushed. For example, a Webhook endpoint might use // `https://example.com/push`. PushEndpoint string `json:"pushEndpoint,omitempty"` @@ -1412,15 +1567,16 @@ func (s *PushConfig) MarshalJSON() ([]byte, error) { // ReceivedMessage: A message and its corresponding acknowledgment ID. type ReceivedMessage struct { - // AckId: This ID can be used to acknowledge the received message. + // AckId: Optional. This ID can be used to acknowledge the received + // message. AckId string `json:"ackId,omitempty"` - // DeliveryAttempt: The approximate number of times that Cloud Pub/Sub - // has attempted to deliver the associated message to a subscriber. More - // precisely, this is 1 + (number of NACKs) + (number of ack_deadline - // exceeds) for this message. A NACK is any call to ModifyAckDeadline - // with a 0 deadline. An ack_deadline exceeds event is whenever a - // message is not acknowledged within ack_deadline. Note that + // DeliveryAttempt: Optional. The approximate number of times that + // Pub/Sub has attempted to deliver the associated message to a + // subscriber. More precisely, this is 1 + (number of NACKs) + (number + // of ack_deadline exceeds) for this message. A NACK is any call to + // ModifyAckDeadline with a 0 deadline. An ack_deadline exceeds event is + // whenever a message is not acknowledged within ack_deadline. Note that // ack_deadline is initially Subscription.ackDeadlineSeconds, but may // get extended automatically by the client library. Upon the first // delivery of a given message, `delivery_attempt` will have a value of @@ -1428,7 +1584,7 @@ type ReceivedMessage struct { // DeadLetterPolicy is not set on the subscription, this will be 0. DeliveryAttempt int64 `json:"deliveryAttempt,omitempty"` - // Message: The message. + // Message: Optional. The message. Message *PubsubMessage `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "AckId") to @@ -1454,9 +1610,9 @@ func (s *ReceivedMessage) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RetryPolicy: A policy that specifies how Cloud Pub/Sub retries -// message delivery. Retry delay will be exponential based on provided -// minimum and maximum backoffs. +// RetryPolicy: A policy that specifies how Pub/Sub retries message +// delivery. Retry delay will be exponential based on provided minimum +// and maximum backoffs. // https://en.wikipedia.org/wiki/Exponential_backoff. RetryPolicy will // be triggered on NACKs or acknowledgement deadline exceeded events for // a given message. Retry Policy is implemented on a best effort basis. @@ -1464,14 +1620,14 @@ func (s *ReceivedMessage) MarshalJSON() ([]byte, error) { // configuration. That is, delay can be more or less than configured // backoff. type RetryPolicy struct { - // MaximumBackoff: The maximum delay between consecutive deliveries of a - // given message. Value should be between 0 and 600 seconds. Defaults to - // 600 seconds. + // MaximumBackoff: Optional. The maximum delay between consecutive + // deliveries of a given message. Value should be between 0 and 600 + // seconds. Defaults to 600 seconds. MaximumBackoff string `json:"maximumBackoff,omitempty"` - // MinimumBackoff: The minimum delay between consecutive deliveries of a - // given message. Value should be between 0 and 600 seconds. Defaults to - // 10 seconds. + // MinimumBackoff: Optional. The minimum delay between consecutive + // deliveries of a given message. Value should be between 0 and 600 + // seconds. Defaults to 10 seconds. MinimumBackoff string `json:"minimumBackoff,omitempty"` // ForceSendFields is a list of field names (e.g. "MaximumBackoff") to @@ -1583,7 +1739,8 @@ func (s *Schema) MarshalJSON() ([]byte, error) { // SchemaSettings: Settings for validating messages published against a // schema. type SchemaSettings struct { - // Encoding: The encoding of messages validated against `schema`. + // Encoding: Optional. The encoding of messages validated against + // `schema`. // // Possible values: // "ENCODING_UNSPECIFIED" - Unspecified @@ -1592,14 +1749,14 @@ type SchemaSettings struct { // schema types, binary encoding may not be available. Encoding string `json:"encoding,omitempty"` - // FirstRevisionId: The minimum (inclusive) revision allowed for - // validating messages. If empty or not present, allow any revision to - // be validated against last_revision or any revision created before. + // FirstRevisionId: Optional. The minimum (inclusive) revision allowed + // for validating messages. If empty or not present, allow any revision + // to be validated against last_revision or any revision created before. FirstRevisionId string `json:"firstRevisionId,omitempty"` - // LastRevisionId: The maximum (inclusive) revision allowed for - // validating messages. If empty or not present, allow any revision to - // be validated against first_revision or any revision created after. + // LastRevisionId: Optional. The maximum (inclusive) revision allowed + // for validating messages. If empty or not present, allow any revision + // to be validated against first_revision or any revision created after. LastRevisionId string `json:"lastRevisionId,omitempty"` // Schema: Required. The name of the schema that messages published @@ -1633,22 +1790,22 @@ func (s *SchemaSettings) MarshalJSON() ([]byte, error) { // SeekRequest: Request for the `Seek` method. type SeekRequest struct { - // Snapshot: The snapshot to seek to. The snapshot's topic must be the - // same as that of the provided subscription. Format is + // Snapshot: Optional. The snapshot to seek to. The snapshot's topic + // must be the same as that of the provided subscription. Format is // `projects/{project}/snapshots/{snap}`. Snapshot string `json:"snapshot,omitempty"` - // Time: The time to seek to. Messages retained in the subscription that - // were published before this time are marked as acknowledged, and - // messages retained in the subscription that were published after this - // time are marked as unacknowledged. Note that this operation affects - // only those messages retained in the subscription (configured by the - // combination of `message_retention_duration` and - // `retain_acked_messages`). For example, if `time` corresponds to a - // point before the message retention window (or to a point before the - // system's notion of the subscription creation time), only retained - // messages will be marked as unacknowledged, and already-expunged - // messages will not be restored. + // Time: Optional. The time to seek to. Messages retained in the + // subscription that were published before this time are marked as + // acknowledged, and messages retained in the subscription that were + // published after this time are marked as unacknowledged. Note that + // this operation affects only those messages retained in the + // subscription (configured by the combination of + // `message_retention_duration` and `retain_acked_messages`). For + // example, if `time` corresponds to a point before the message + // retention window (or to a point before the system's notion of the + // subscription creation time), only retained messages will be marked as + // unacknowledged, and already-expunged messages will not be restored. Time string `json:"time,omitempty"` // ForceSendFields is a list of field names (e.g. "Snapshot") to @@ -1719,28 +1876,28 @@ func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { // you can set the acknowledgment state of messages in an existing // subscription to the state captured by a snapshot. type Snapshot struct { - // ExpireTime: The snapshot is guaranteed to exist up until this time. A - // newly-created snapshot expires no later than 7 days from the time of - // its creation. Its exact lifetime is determined at creation by the - // existing backlog in the source subscription. Specifically, the - // lifetime of the snapshot is `7 days - (age of oldest unacked message - // in the subscription)`. For example, consider a subscription whose - // oldest unacked message is 3 days old. If a snapshot is created from - // this subscription, the snapshot -- which will always capture this - // 3-day-old backlog as long as the snapshot exists -- will expire in 4 - // days. The service will refuse to create a snapshot that would expire - // in less than 1 hour after creation. + // ExpireTime: Optional. The snapshot is guaranteed to exist up until + // this time. A newly-created snapshot expires no later than 7 days from + // the time of its creation. Its exact lifetime is determined at + // creation by the existing backlog in the source subscription. + // Specifically, the lifetime of the snapshot is `7 days - (age of + // oldest unacked message in the subscription)`. For example, consider a + // subscription whose oldest unacked message is 3 days old. If a + // snapshot is created from this subscription, the snapshot -- which + // will always capture this 3-day-old backlog as long as the snapshot + // exists -- will expire in 4 days. The service will refuse to create a + // snapshot that would expire in less than 1 hour after creation. ExpireTime string `json:"expireTime,omitempty"` - // Labels: See [Creating and managing labels] + // Labels: Optional. See [Creating and managing labels] // (https://cloud.google.com/pubsub/docs/labels). Labels map[string]string `json:"labels,omitempty"` - // Name: The name of the snapshot. + // Name: Optional. The name of the snapshot. Name string `json:"name,omitempty"` - // Topic: The name of the topic from which this snapshot is retaining - // messages. + // Topic: Optional. The name of the topic from which this snapshot is + // retaining messages. Topic string `json:"topic,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1770,88 +1927,95 @@ func (s *Snapshot) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subscription: A subscription resource. If none of `push_config` or -// `bigquery_config` is set, then the subscriber will pull and ack -// messages using API methods. At most one of these fields may be set. +// Subscription: A subscription resource. If none of `push_config`, +// `bigquery_config`, or `cloud_storage_config` is set, then the +// subscriber will pull and ack messages using API methods. At most one +// of these fields may be set. type Subscription struct { - // AckDeadlineSeconds: The approximate amount of time (on a best-effort - // basis) Pub/Sub waits for the subscriber to acknowledge receipt before - // resending the message. In the interval after the message is delivered - // and before it is acknowledged, it is considered to be _outstanding_. - // During that time period, the message will not be redelivered (on a - // best-effort basis). For pull subscriptions, this value is used as the - // initial value for the ack deadline. To override this value for a - // given message, call `ModifyAckDeadline` with the corresponding - // `ack_id` if using non-streaming pull or send the `ack_id` in a - // `StreamingModifyAckDeadlineRequest` if using streaming pull. The - // minimum custom deadline you can specify is 10 seconds. The maximum - // custom deadline you can specify is 600 seconds (10 minutes). If this - // parameter is 0, a default value of 10 seconds is used. For push - // delivery, this value is also used to set the request timeout for the - // call to the push endpoint. If the subscriber never acknowledges the - // message, the Pub/Sub system will eventually redeliver the message. + // AckDeadlineSeconds: Optional. The approximate amount of time (on a + // best-effort basis) Pub/Sub waits for the subscriber to acknowledge + // receipt before resending the message. In the interval after the + // message is delivered and before it is acknowledged, it is considered + // to be _outstanding_. During that time period, the message will not be + // redelivered (on a best-effort basis). For pull subscriptions, this + // value is used as the initial value for the ack deadline. To override + // this value for a given message, call `ModifyAckDeadline` with the + // corresponding `ack_id` if using non-streaming pull or send the + // `ack_id` in a `StreamingModifyAckDeadlineRequest` if using streaming + // pull. The minimum custom deadline you can specify is 10 seconds. The + // maximum custom deadline you can specify is 600 seconds (10 minutes). + // If this parameter is 0, a default value of 10 seconds is used. For + // push delivery, this value is also used to set the request timeout for + // the call to the push endpoint. If the subscriber never acknowledges + // the message, the Pub/Sub system will eventually redeliver the + // message. AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"` - // BigqueryConfig: If delivery to BigQuery is used with this + // BigqueryConfig: Optional. If delivery to BigQuery is used with this // subscription, this field is used to configure it. BigqueryConfig *BigQueryConfig `json:"bigqueryConfig,omitempty"` - // DeadLetterPolicy: A policy that specifies the conditions for dead - // lettering messages in this subscription. If dead_letter_policy is not - // set, dead lettering is disabled. The Cloud Pub/Sub service account - // associated with this subscriptions's parent project (i.e., + // CloudStorageConfig: Optional. If delivery to Google Cloud Storage is + // used with this subscription, this field is used to configure it. + CloudStorageConfig *CloudStorageConfig `json:"cloudStorageConfig,omitempty"` + + // DeadLetterPolicy: Optional. A policy that specifies the conditions + // for dead lettering messages in this subscription. If + // dead_letter_policy is not set, dead lettering is disabled. The + // Pub/Sub service account associated with this subscriptions's parent + // project (i.e., // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must // have permission to Acknowledge() messages on this subscription. DeadLetterPolicy *DeadLetterPolicy `json:"deadLetterPolicy,omitempty"` - // Detached: Indicates whether the subscription is detached from its - // topic. Detached subscriptions don't receive messages from their topic - // and don't retain any backlog. `Pull` and `StreamingPull` requests - // will return FAILED_PRECONDITION. If the subscription is a push - // subscription, pushes to the endpoint will not be made. + // Detached: Optional. Indicates whether the subscription is detached + // from its topic. Detached subscriptions don't receive messages from + // their topic and don't retain any backlog. `Pull` and `StreamingPull` + // requests will return FAILED_PRECONDITION. If the subscription is a + // push subscription, pushes to the endpoint will not be made. Detached bool `json:"detached,omitempty"` - // EnableExactlyOnceDelivery: If true, Pub/Sub provides the following - // guarantees for the delivery of a message with a given value of - // `message_id` on this subscription: * The message sent to a subscriber - // is guaranteed not to be resent before the message's acknowledgement - // deadline expires. * An acknowledged message will not be resent to a - // subscriber. Note that subscribers may still receive multiple copies - // of a message when `enable_exactly_once_delivery` is true if the - // message was published multiple times by a publisher client. These - // copies are considered distinct by Pub/Sub and have distinct - // `message_id` values. + // EnableExactlyOnceDelivery: Optional. If true, Pub/Sub provides the + // following guarantees for the delivery of a message with a given value + // of `message_id` on this subscription: * The message sent to a + // subscriber is guaranteed not to be resent before the message's + // acknowledgement deadline expires. * An acknowledged message will not + // be resent to a subscriber. Note that subscribers may still receive + // multiple copies of a message when `enable_exactly_once_delivery` is + // true if the message was published multiple times by a publisher + // client. These copies are considered distinct by Pub/Sub and have + // distinct `message_id` values. EnableExactlyOnceDelivery bool `json:"enableExactlyOnceDelivery,omitempty"` - // EnableMessageOrdering: If true, messages published with the same - // `ordering_key` in `PubsubMessage` will be delivered to the + // EnableMessageOrdering: Optional. If true, messages published with the + // same `ordering_key` in `PubsubMessage` will be delivered to the // subscribers in the order in which they are received by the Pub/Sub // system. Otherwise, they may be delivered in any order. EnableMessageOrdering bool `json:"enableMessageOrdering,omitempty"` - // ExpirationPolicy: A policy that specifies the conditions for this - // subscription's expiration. A subscription is considered active as - // long as any connected subscriber is successfully consuming messages - // from the subscription or is issuing operations on the subscription. - // If `expiration_policy` is not set, a *default policy* with `ttl` of - // 31 days will be used. The minimum allowed value for + // ExpirationPolicy: Optional. A policy that specifies the conditions + // for this subscription's expiration. A subscription is considered + // active as long as any connected subscriber is successfully consuming + // messages from the subscription or is issuing operations on the + // subscription. If `expiration_policy` is not set, a *default policy* + // with `ttl` of 31 days will be used. The minimum allowed value for // `expiration_policy.ttl` is 1 day. If `expiration_policy` is set, but // `expiration_policy.ttl` is not set, the subscription never expires. ExpirationPolicy *ExpirationPolicy `json:"expirationPolicy,omitempty"` - // Filter: An expression written in the Pub/Sub filter language - // (https://cloud.google.com/pubsub/docs/filtering). If non-empty, then - // only `PubsubMessage`s whose `attributes` field matches the filter are - // delivered on this subscription. If empty, then no messages are - // filtered out. + // Filter: Optional. An expression written in the Pub/Sub filter + // language (https://cloud.google.com/pubsub/docs/filtering). If + // non-empty, then only `PubsubMessage`s whose `attributes` field + // matches the filter are delivered on this subscription. If empty, then + // no messages are filtered out. Filter string `json:"filter,omitempty"` - // Labels: See Creating and managing labels + // Labels: Optional. See Creating and managing labels // (https://cloud.google.com/pubsub/docs/labels). Labels map[string]string `json:"labels,omitempty"` - // MessageRetentionDuration: How long to retain unacknowledged messages - // in the subscription's backlog, from the moment a message is + // MessageRetentionDuration: Optional. How long to retain unacknowledged + // messages in the subscription's backlog, from the moment a message is // published. If `retain_acked_messages` is true, then this also // configures the retention of acknowledged messages, and thus // configures how far back in time a `Seek` can be done. Defaults to 7 @@ -1866,25 +2030,25 @@ type Subscription struct { // and 255 characters in length, and it must not start with "goog". Name string `json:"name,omitempty"` - // PushConfig: If push delivery is used with this subscription, this - // field is used to configure it. + // PushConfig: Optional. If push delivery is used with this + // subscription, this field is used to configure it. PushConfig *PushConfig `json:"pushConfig,omitempty"` - // RetainAckedMessages: Indicates whether to retain acknowledged - // messages. If true, then messages are not expunged from the - // subscription's backlog, even if they are acknowledged, until they + // RetainAckedMessages: Optional. Indicates whether to retain + // acknowledged messages. If true, then messages are not expunged from + // the subscription's backlog, even if they are acknowledged, until they // fall out of the `message_retention_duration` window. This must be // true if you would like to [`Seek` to a timestamp] // (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) // in the past to replay previously-acknowledged messages. RetainAckedMessages bool `json:"retainAckedMessages,omitempty"` - // RetryPolicy: A policy that specifies how Pub/Sub retries message - // delivery for this subscription. If not set, the default retry policy - // is applied. This generally implies that messages will be retried as - // soon as possible for healthy subscribers. RetryPolicy will be - // triggered on NACKs or acknowledgement deadline exceeded events for a - // given message. + // RetryPolicy: Optional. A policy that specifies how Pub/Sub retries + // message delivery for this subscription. If not set, the default retry + // policy is applied. This generally implies that messages will be + // retried as soon as possible for healthy subscribers. RetryPolicy will + // be triggered on NACKs or acknowledgement deadline exceeded events for + // a given message. RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` // State: Output only. An output-only field indicating whether or not @@ -2008,20 +2172,26 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TextConfig: Configuration for writing message data in text format. +// Message payloads will be written to files as raw text, separated by a +// newline. +type TextConfig struct { +} + // Topic: A topic resource. type Topic struct { - // KmsKeyName: The resource name of the Cloud KMS CryptoKey to be used - // to protect access to messages published on this topic. The expected - // format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + // KmsKeyName: Optional. The resource name of the Cloud KMS CryptoKey to + // be used to protect access to messages published on this topic. The + // expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. KmsKeyName string `json:"kmsKeyName,omitempty"` - // Labels: See [Creating and managing labels] + // Labels: Optional. See [Creating and managing labels] // (https://cloud.google.com/pubsub/docs/labels). Labels map[string]string `json:"labels,omitempty"` - // MessageRetentionDuration: Indicates the minimum duration to retain a - // message after it is published to the topic. If this field is set, - // messages published to the topic in the last + // MessageRetentionDuration: Optional. Indicates the minimum duration to + // retain a message after it is published to the topic. If this field is + // set, messages published to the topic in the last // `message_retention_duration` are always available to subscribers. For // instance, it allows any attached subscription to seek to a timestamp // (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) @@ -2030,9 +2200,9 @@ type Topic struct { // subscriptions. Cannot be more than 31 days or less than 10 minutes. MessageRetentionDuration string `json:"messageRetentionDuration,omitempty"` - // MessageStoragePolicy: Policy constraining the set of Google Cloud - // Platform regions where messages published to the topic may be stored. - // If not present, then no constraints are in effect. + // MessageStoragePolicy: Optional. Policy constraining the set of Google + // Cloud Platform regions where messages published to the topic may be + // stored. If not present, then no constraints are in effect. MessageStoragePolicy *MessageStoragePolicy `json:"messageStoragePolicy,omitempty"` // Name: Required. The name of the topic. It must have the format @@ -2043,13 +2213,13 @@ type Topic struct { // in length, and it must not start with "goog". Name string `json:"name,omitempty"` - // SatisfiesPzs: Reserved for future use. This field is set only in - // responses from the server; it is ignored if it is set in any + // SatisfiesPzs: Optional. Reserved for future use. This field is set + // only in responses from the server; it is ignored if it is set in any // requests. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` - // SchemaSettings: Settings for validating messages published against a - // schema. + // SchemaSettings: Optional. Settings for validating messages published + // against a schema. SchemaSettings *SchemaSettings `json:"schemaSettings,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2835,6 +3005,7 @@ func (c *ProjectsSchemasDeleteRevisionCall) Do(opts ...googleapi.CallOption) (*S // "type": "string" // }, // "revisionId": { + // "deprecated": true, // "description": "Optional. This field is deprecated and should not be used for specifying the revision ID. The revision ID should be specified via the `name` parameter.", // "location": "query", // "type": "string" @@ -5187,13 +5358,13 @@ func (c *ProjectsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*ListSnaps // ], // "parameters": { // "pageSize": { - // "description": "Maximum number of snapshots to return.", + // "description": "Optional. Maximum number of snapshots to return.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListSnapshotsResponse`; indicates that this is a continuation of a prior `ListSnapshots` call, and that the system should return the next page of data.", + // "description": "Optional. The value returned by the last `ListSnapshotsResponse`; indicates that this is a continuation of a prior `ListSnapshots` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, @@ -5255,7 +5426,7 @@ type ProjectsSnapshotsPatchCall struct { // you can set the acknowledgment state of messages in an existing // subscription to the state captured by a snapshot. // -// - name: The name of the snapshot. +// - name: Optional. The name of the snapshot. func (r *ProjectsSnapshotsService) Patch(name string, updatesnapshotrequest *UpdateSnapshotRequest) *ProjectsSnapshotsPatchCall { c := &ProjectsSnapshotsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5363,7 +5534,7 @@ func (c *ProjectsSnapshotsPatchCall) Do(opts ...googleapi.CallOption) (*Snapshot // ], // "parameters": { // "name": { - // "description": "The name of the snapshot.", + // "description": "Optional. The name of the snapshot.", // "location": "path", // "pattern": "^projects/[^/]+/snapshots/[^/]+$", // "required": true, @@ -6738,13 +6909,13 @@ func (c *ProjectsSubscriptionsListCall) Do(opts ...googleapi.CallOption) (*ListS // ], // "parameters": { // "pageSize": { - // "description": "Maximum number of subscriptions to return.", + // "description": "Optional. Maximum number of subscriptions to return.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", + // "description": "Optional. The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation of a prior `ListSubscriptions` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, @@ -8587,13 +8758,13 @@ func (c *ProjectsTopicsListCall) Do(opts ...googleapi.CallOption) (*ListTopicsRe // ], // "parameters": { // "pageSize": { - // "description": "Maximum number of topics to return.", + // "description": "Optional. Maximum number of topics to return.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", + // "description": "Optional. The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a prior `ListTopics` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, @@ -9380,13 +9551,13 @@ func (c *ProjectsTopicsSnapshotsListCall) Do(opts ...googleapi.CallOption) (*Lis // ], // "parameters": { // "pageSize": { - // "description": "Maximum number of snapshot names to return.", + // "description": "Optional. Maximum number of snapshot names to return.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListTopicSnapshotsResponse`; indicates that this is a continuation of a prior `ListTopicSnapshots` call, and that the system should return the next page of data.", + // "description": "Optional. The value returned by the last `ListTopicSnapshotsResponse`; indicates that this is a continuation of a prior `ListTopicSnapshots` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, @@ -9576,13 +9747,13 @@ func (c *ProjectsTopicsSubscriptionsListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "pageSize": { - // "description": "Maximum number of subscription names to return.", + // "description": "Optional. Maximum number of subscription names to return.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { - // "description": "The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", + // "description": "Optional. The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next page of data.", // "location": "query", // "type": "string" // }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-api.json b/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-api.json index edd2b85815..2bd30332af 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-api.json @@ -889,7 +889,7 @@ ], "parameters": { "allowMissing": { - "description": "If set to true, and if the Service does not exist, it will create a new one. Caller must have both create and update permissions for this call if this is set to true.", + "description": "If set to true, and if the Service does not exist, it will create a new one. The caller must have 'run.services.create' permissions if this is set to true and the Service does not exist.", "location": "query", "type": "boolean" }, @@ -1087,7 +1087,7 @@ } } }, - "revision": "20230305", + "revision": "20230618", "rootUrl": "https://run.googleapis.com/", "schemas": { "GoogleCloudRunV2BinaryAuthorization": { @@ -1129,13 +1129,15 @@ "EXECUTION_REASON_UNDEFINED", "JOB_STATUS_SERVICE_POLLING_ERROR", "NON_ZERO_EXIT_CODE", - "CANCELLED" + "CANCELLED", + "CANCELLING" ], "enumDescriptions": [ "Default value.", "Internal system error getting execution status. System will retry.", "A task reached its retry limit and the last attempt failed due to the user container exiting with a non-zero exit code.", - "The execution was cancelled by users." + "The execution was cancelled by users.", + "The execution is in the process of being cancelled." ], "type": "string" }, @@ -1262,18 +1264,25 @@ "type": "object" }, "GoogleCloudRunV2Container": { - "description": "A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments may be supplied by the system to the container at runtime.", + "description": "A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime.", "id": "GoogleCloudRunV2Container", "properties": { "args": { - "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided.", "items": { "type": "string" }, "type": "array" }, "command": { - "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided.", + "items": { + "type": "string" + }, + "type": "array" + }, + "dependsOn": { + "description": "Container names which must start before this container.", "items": { "type": "string" }, @@ -1287,12 +1296,12 @@ "type": "array" }, "image": { - "description": "Required. Name of the container image in Dockerhub, Google Artifact Registry, or Google Container Registry. If the host is not provided, Dockerhub is assumed. More info: https://kubernetes.io/docs/concepts/containers/images", + "description": "Required. Name of the container image in Dockerhub, Google Artifact Registry, or Google Container Registry. If the host is not provided, Dockerhub is assumed.", "type": "string" }, "livenessProbe": { "$ref": "GoogleCloudRunV2Probe", - "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + "description": "Periodic probe of container liveness. Container will be restarted if the probe fails." }, "name": { "description": "Name of the container specified as a DNS_LABEL (RFC 1123).", @@ -1307,11 +1316,11 @@ }, "resources": { "$ref": "GoogleCloudRunV2ResourceRequirements", - "description": "Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" + "description": "Compute Resource requirements by this container." }, "startupProbe": { "$ref": "GoogleCloudRunV2Probe", - "description": "Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + "description": "Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails." }, "volumeMounts": { "description": "Volume to mount into the container's filesystem.", @@ -1343,12 +1352,35 @@ }, "type": "object" }, + "GoogleCloudRunV2EmptyDirVolumeSource": { + "description": "Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs).", + "id": "GoogleCloudRunV2EmptyDirVolumeSource", + "properties": { + "medium": { + "description": "The medium on which the data is stored. Acceptable values today is only MEMORY or none. When none, the default will currently be backed by memory but could change over time. +optional", + "enum": [ + "MEDIUM_UNSPECIFIED", + "MEMORY" + ], + "enumDescriptions": [ + "When not specified, falls back to the default implementation which is currently in memory (this may change over time).", + "Explicitly set the EmptyDir to be in memory. Uses tmpfs." + ], + "type": "string" + }, + "sizeLimit": { + "description": "Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudRunV2EnvVar": { "description": "EnvVar represents an environment variable present in a Container.", "id": "GoogleCloudRunV2EnvVar", "properties": { "name": { - "description": "Required. Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.", + "description": "Required. Name of the environment variable. Must be a C_IDENTIFIER, and must not exceed 32768 characters.", "type": "string" }, "value": { @@ -1381,7 +1413,8 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style annotations for the resource.", + "description": "Output only. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects.", + "readOnly": true, "type": "object" }, "cancelledCount": { @@ -1448,11 +1481,12 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style labels for the resource. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels", + "description": "Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels", + "readOnly": true, "type": "object" }, "launchStage": { - "description": "Set the launch stage to a preview stage on write to allow use of preview features in that stage. On read, describes whether the resource uses preview features. Launch Stages are defined at [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages).", + "description": "The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA.", "enum": [ "LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", @@ -1492,7 +1526,7 @@ "type": "string" }, "parallelism": { - "description": "Output only. Specifies the maximum desired number of tasks the execution should run at any given time. Must be \u003c= task_count. The actual number of tasks running in steady state will be less than this number when ((.spec.task_count - .status.successful) \u003c .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "description": "Output only. Specifies the maximum desired number of tasks the execution should run at any given time. Must be \u003c= task_count. The actual number of tasks running in steady state will be less than this number when ((.spec.task_count - .status.successful) \u003c .spec.parallelism), i.e. when the work left to do is less than max parallelism.", "format": "int32", "readOnly": true, "type": "integer" @@ -1532,7 +1566,7 @@ "type": "integer" }, "taskCount": { - "description": "Output only. Specifies the desired number of tasks the execution should run. Setting to 1 means that parallelism is limited to 1 and the success of that task signals the success of the execution. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "description": "Output only. Specifies the desired number of tasks the execution should run. Setting to 1 means that parallelism is limited to 1 and the success of that task signals the success of the execution.", "format": "int32", "readOnly": true, "type": "integer" @@ -1585,14 +1619,14 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style annotations for the resource. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate.", + "description": "Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules.", "type": "object" }, "labels": { "additionalProperties": { "type": "string" }, - "description": "KRM-style labels for the resource. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 ExecutionTemplate.", + "description": "Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 ExecutionTemplate.", "type": "object" }, "parallelism": { @@ -1674,7 +1708,7 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style annotations for the resource. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 Job. This field follows Kubernetes annotations' namespacing, limits, and rules. More info: https://kubernetes.io/docs/user-guide/annotations", + "description": "Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected on new resources. All system annotations in v1 now have a corresponding field in v2 Job. This field follows Kubernetes annotations' namespacing, limits, and rules.", "type": "object" }, "binaryAuthorization": { @@ -1741,7 +1775,7 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style labels for the resource. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Job.", + "description": "Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Job.", "type": "object" }, "lastModifier": { @@ -1755,7 +1789,7 @@ "readOnly": true }, "launchStage": { - "description": "The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed.", + "description": "The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed. Set the launch stage to a preview stage on input to allow use of preview features in that stage. On read (or output), describes whether the resource uses preview features. For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output.", "enum": [ "LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", @@ -1929,7 +1963,7 @@ "description": "HTTPGet specifies the http request to perform. Exactly one of httpGet, tcpSocket, or grpc must be specified." }, "initialDelaySeconds": { - "description": "Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240.", "format": "int32", "type": "integer" }, @@ -1943,7 +1977,7 @@ "description": "TCPSocket specifies an action involving a TCP port. Exactly one of httpGet, tcpSocket, or grpc must be specified." }, "timeoutSeconds": { - "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than period_seconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than period_seconds.", "format": "int32", "type": "integer" } @@ -1962,8 +1996,12 @@ "additionalProperties": { "type": "string" }, - "description": "Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go", + "description": "Only ´memory´ and 'cpu' are supported. Notes: * The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. For more information, go to https://cloud.google.com/run/docs/configuring/cpu. * For supported 'memory' values and syntax, go to https://cloud.google.com/run/docs/configuring/memory-limits", "type": "object" + }, + "startupCpuBoost": { + "description": "Determines whether CPU should be boosted on startup of a new container instance above the requested CPU threshold, this can help reduce cold-start latency.", + "type": "boolean" } }, "type": "object" @@ -1976,7 +2014,8 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style annotations for the resource.", + "description": "Output only. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects.", + "readOnly": true, "type": "object" }, "conditions": { @@ -2064,11 +2103,12 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style labels for the resource. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels", + "description": "Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels.", + "readOnly": true, "type": "object" }, "launchStage": { - "description": "Set the launch stage to a preview stage on write to allow use of preview features in that stage. On read, describes whether the resource uses preview features. Launch Stages are defined at [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages).", + "description": "The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA.", "enum": [ "LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", @@ -2135,6 +2175,10 @@ "description": "Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has.", "type": "string" }, + "sessionAffinity": { + "description": "Enable session affinity.", + "type": "boolean" + }, "timeout": { "description": "Max allowed time for an instance to respond to a request.", "format": "google-duration", @@ -2190,7 +2234,7 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style annotations for the resource. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate.", + "description": "Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules.", "type": "object" }, "containers": { @@ -2222,7 +2266,7 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style labels for the resource. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 RevisionTemplate.", + "description": "Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 RevisionTemplate.", "type": "object" }, "maxInstanceRequestConcurrency": { @@ -2242,6 +2286,10 @@ "description": "Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account.", "type": "string" }, + "sessionAffinity": { + "description": "Enable session affinity.", + "type": "boolean" + }, "timeout": { "description": "Max allowed time for an instance to respond to a request.", "format": "google-duration", @@ -2322,7 +2370,7 @@ "additionalProperties": { "type": "string" }, - "description": "Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 Service. This field follows Kubernetes annotations' namespacing, limits, and rules. More info: https://kubernetes.io/docs/user-guide/annotations", + "description": "Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected in new resources. All system annotations in v1 now have a corresponding field in v2 Service. This field follows Kubernetes annotations' namespacing, limits, and rules.", "type": "object" }, "binaryAuthorization": { @@ -2356,6 +2404,13 @@ "readOnly": true, "type": "string" }, + "customAudiences": { + "description": "Custom audiences that can be used in the audience field of ID token for authenticated requests.", + "items": { + "type": "string" + }, + "type": "array" + }, "deleteTime": { "description": "Output only. The deletion time.", "format": "google-datetime", @@ -2403,7 +2458,7 @@ "additionalProperties": { "type": "string" }, - "description": "Map of string keys and values that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service.", + "description": "Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service.", "type": "object" }, "lastModifier": { @@ -2422,7 +2477,7 @@ "type": "string" }, "launchStage": { - "description": "The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed.", + "description": "The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. If no value is specified, GA is assumed. Set the launch stage to a preview stage on input to allow use of preview features in that stage. On read (or output), describes whether the resource uses preview features. For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output.", "enum": [ "LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", @@ -2528,7 +2583,8 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style annotations for the resource.", + "description": "Output only. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects.", + "readOnly": true, "type": "object" }, "completionTime": { @@ -2620,7 +2676,8 @@ "additionalProperties": { "type": "string" }, - "description": "KRM-style labels for the resource. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels", + "description": "Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels", + "readOnly": true, "type": "object" }, "lastAttemptResult": { @@ -2881,13 +2938,17 @@ "$ref": "GoogleCloudRunV2CloudSqlInstance", "description": "For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run." }, + "emptyDir": { + "$ref": "GoogleCloudRunV2EmptyDirVolumeSource", + "description": "Ephemeral storage used as a shared volume." + }, "name": { "description": "Required. Volume's name.", "type": "string" }, "secret": { "$ref": "GoogleCloudRunV2SecretVolumeSource", - "description": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + "description": "Secret represents a secret that should populate this volume." } }, "type": "object" @@ -2916,7 +2977,7 @@ "type": "string" }, "egress": { - "description": "Traffic VPC egress settings.", + "description": "Traffic VPC egress settings. If not provided, it defaults to PRIVATE_RANGES_ONLY.", "enum": [ "VPC_EGRESS_UNSPECIFIED", "ALL_TRAFFIC", diff --git a/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-gen.go b/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-gen.go index 1d9a4588dc..882da887dd 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/run/v2/run-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "run:v2" const apiName = "run" @@ -316,6 +317,7 @@ type GoogleCloudRunV2Condition struct { // attempt failed due to the user container exiting with a non-zero exit // code. // "CANCELLED" - The execution was cancelled by users. + // "CANCELLING" - The execution is in the process of being cancelled. ExecutionReason string `json:"executionReason,omitempty"` // LastTransitionTime: Last time the condition transitioned from one @@ -442,41 +444,29 @@ func (s *GoogleCloudRunV2Condition) MarshalJSON() ([]byte, error) { // GoogleCloudRunV2Container: A single application container. This // specifies both the container to run, the command to run in the // container and the arguments to supply to it. Note that additional -// arguments may be supplied by the system to the container at runtime. +// arguments can be supplied by the system to the container at runtime. type GoogleCloudRunV2Container struct { // Args: Arguments to the entrypoint. The docker image's CMD is used if - // this is not provided. Variable references $(VAR_NAME) are expanded - // using the container's environment. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. More info: - // https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // this is not provided. Args []string `json:"args,omitempty"` // Command: Entrypoint array. Not executed within a shell. The docker - // image's ENTRYPOINT is used if this is not provided. Variable - // references $(VAR_NAME) are expanded using the container's - // environment. If a variable cannot be resolved, the reference in the - // input string will be unchanged. The $(VAR_NAME) syntax can be escaped - // with a double $$, ie: $$(VAR_NAME). Escaped references will never be - // expanded, regardless of whether the variable exists or not. More - // info: - // https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // image's ENTRYPOINT is used if this is not provided. Command []string `json:"command,omitempty"` + // DependsOn: Container names which must start before this container. + DependsOn []string `json:"dependsOn,omitempty"` + // Env: List of environment variables to set in the container. Env []*GoogleCloudRunV2EnvVar `json:"env,omitempty"` // Image: Required. Name of the container image in Dockerhub, Google // Artifact Registry, or Google Container Registry. If the host is not - // provided, Dockerhub is assumed. More info: - // https://kubernetes.io/docs/concepts/containers/images + // provided, Dockerhub is assumed. Image string `json:"image,omitempty"` // LivenessProbe: Periodic probe of container liveness. Container will - // be restarted if the probe fails. More info: - // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // be restarted if the probe fails. LivenessProbe *GoogleCloudRunV2Probe `json:"livenessProbe,omitempty"` // Name: Name of the container specified as a DNS_LABEL (RFC 1123). @@ -489,16 +479,13 @@ type GoogleCloudRunV2Container struct { // through the PORT environment variable for the container to listen on. Ports []*GoogleCloudRunV2ContainerPort `json:"ports,omitempty"` - // Resources: Compute Resource requirements by this container. More - // info: - // https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // Resources: Compute Resource requirements by this container. Resources *GoogleCloudRunV2ResourceRequirements `json:"resources,omitempty"` // StartupProbe: Startup probe of application within the container. All // other probes are disabled if a startup probe is provided, until it // succeeds. Container will not be added to service endpoints if the - // probe fails. More info: - // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // probe fails. StartupProbe *GoogleCloudRunV2Probe `json:"startupProbe,omitempty"` // VolumeMounts: Volume to mount into the container's filesystem. @@ -566,11 +553,62 @@ func (s *GoogleCloudRunV2ContainerPort) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleCloudRunV2EmptyDirVolumeSource: Ephemeral storage which can be +// backed by real disks (HD, SSD), network storage or memory (i.e. +// tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral +// in the sense that when the sandbox is taken down, the data is +// destroyed with it (it does not persist across sandbox runs). +type GoogleCloudRunV2EmptyDirVolumeSource struct { + // Medium: The medium on which the data is stored. Acceptable values + // today is only MEMORY or none. When none, the default will currently + // be backed by memory but could change over time. +optional + // + // Possible values: + // "MEDIUM_UNSPECIFIED" - When not specified, falls back to the + // default implementation which is currently in memory (this may change + // over time). + // "MEMORY" - Explicitly set the EmptyDir to be in memory. Uses tmpfs. + Medium string `json:"medium,omitempty"` + + // SizeLimit: Limit on the storage usable by this EmptyDir volume. The + // size limit is also applicable for memory medium. The maximum usage on + // memory medium EmptyDir would be the minimum value between the + // SizeLimit specified here and the sum of memory limits of all + // containers in a pod. This field's values are of the 'Quantity' k8s + // type: + // https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. + // The default is nil which means that the limit is undefined. More + // info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + SizeLimit string `json:"sizeLimit,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Medium") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Medium") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudRunV2EmptyDirVolumeSource) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudRunV2EmptyDirVolumeSource + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleCloudRunV2EnvVar: EnvVar represents an environment variable // present in a Container. type GoogleCloudRunV2EnvVar struct { // Name: Required. Name of the environment variable. Must be a - // C_IDENTIFIER, and mnay not exceed 32768 characters. + // C_IDENTIFIER, and must not exceed 32768 characters. Name string `json:"name,omitempty"` // Value: Variable references $(VAR_NAME) are expanded using the @@ -642,7 +680,9 @@ func (s *GoogleCloudRunV2EnvVarSource) MarshalJSON() ([]byte, error) { // a single execution. A execution an immutable resource that references // a container image which is run to completion. type GoogleCloudRunV2Execution struct { - // Annotations: KRM-style annotations for the resource. + // Annotations: Output only. Unstructured key value map that may be set + // by external tools to store and arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. Annotations map[string]string `json:"annotations,omitempty"` // CancelledCount: Output only. The number of tasks which reached phase @@ -689,19 +729,22 @@ type GoogleCloudRunV2Execution struct { // Job: Output only. The name of the parent Job. Job string `json:"job,omitempty"` - // Labels: KRM-style labels for the resource. User-provided labels are - // shared with Google's billing system, so they can be used to filter, - // or break down billing charges by team, component, environment, state, - // etc. For more information, visit + // Labels: Output only. Unstructured key value map that can be used to + // organize and categorize objects. User-provided labels are shared with + // Google's billing system, so they can be used to filter, or break down + // billing charges by team, component, environment, state, etc. For more + // information, visit // https://cloud.google.com/resource-manager/docs/creating-managing-labels // or https://cloud.google.com/run/docs/configuring/labels Labels map[string]string `json:"labels,omitempty"` - // LaunchStage: Set the launch stage to a preview stage on write to - // allow use of preview features in that stage. On read, describes - // whether the resource uses preview features. Launch Stages are defined - // at Google Cloud Platform Launch Stages - // (https://cloud.google.com/terms/launch-stages). + // LaunchStage: The least stable launch stage needed to create this + // resource, as defined by Google Cloud Platform Launch Stages + // (https://cloud.google.com/terms/launch-stages). Cloud Run supports + // `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was + // used as input. For example, if ALPHA was provided as input in the + // parent resource, but only BETA and GA-level features are were, this + // field will be BETA. // // Possible values: // "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value. @@ -755,8 +798,7 @@ type GoogleCloudRunV2Execution struct { // task_count. The actual number of tasks running in steady state will // be less than this number when ((.spec.task_count - // .status.successful) < .spec.parallelism), i.e. when the work left to - // do is less than max parallelism. More info: - // https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // do is less than max parallelism. Parallelism int64 `json:"parallelism,omitempty"` // Reconciling: Output only. Indicates whether the resource's @@ -787,8 +829,7 @@ type GoogleCloudRunV2Execution struct { // TaskCount: Output only. Specifies the desired number of tasks the // execution should run. Setting to 1 means that parallelism is limited // to 1 and the success of that task signals the success of the - // execution. More info: - // https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // execution. TaskCount int64 `json:"taskCount,omitempty"` // Template: Output only. The template used to create tasks for this @@ -870,19 +911,29 @@ func (s *GoogleCloudRunV2ExecutionReference) MarshalJSON() ([]byte, error) { // GoogleCloudRunV2ExecutionTemplate: ExecutionTemplate describes the // data an execution should have when created from a template. type GoogleCloudRunV2ExecutionTemplate struct { - // Annotations: KRM-style annotations for the resource. Cloud Run API v2 - // does not support annotations with `run.googleapis.com`, + // Annotations: Unstructured key value map that may be set by external + // tools to store and arbitrary metadata. They are not queryable and + // should be preserved when modifying objects. Cloud Run API v2 does not + // support annotations with `run.googleapis.com`, // `cloud.googleapis.com`, `serving.knative.dev`, or // `autoscaling.knative.dev` namespaces, and they will be rejected. All // system annotations in v1 now have a corresponding field in v2 - // ExecutionTemplate. + // ExecutionTemplate. This field follows Kubernetes annotations' + // namespacing, limits, and rules. Annotations map[string]string `json:"annotations,omitempty"` - // Labels: KRM-style labels for the resource. Cloud Run API v2 does not - // support labels with `run.googleapis.com`, `cloud.googleapis.com`, - // `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and - // they will be rejected. All system labels in v1 now have a - // corresponding field in v2 ExecutionTemplate. + // Labels: Unstructured key value map that can be used to organize and + // categorize objects. User-provided labels are shared with Google's + // billing system, so they can be used to filter, or break down billing + // charges by team, component, environment, state, etc. For more + // information, visit + // https://cloud.google.com/resource-manager/docs/creating-managing-labels + // or https://cloud.google.com/run/docs/configuring/labels. Cloud Run + // API v2 does not support labels with `run.googleapis.com`, + // `cloud.googleapis.com`, `serving.knative.dev`, or + // `autoscaling.knative.dev` namespaces, and they will be rejected. All + // system labels in v1 now have a corresponding field in v2 + // ExecutionTemplate. Labels map[string]string `json:"labels,omitempty"` // Parallelism: Specifies the maximum desired number of tasks the @@ -1037,16 +1088,15 @@ func (s *GoogleCloudRunV2HTTPHeader) MarshalJSON() ([]byte, error) { // GoogleCloudRunV2Job: Job represents the configuration of a single // job, which references a container image that is run to completion. type GoogleCloudRunV2Job struct { - // Annotations: KRM-style annotations for the resource. Unstructured key - // value map that may be set by external tools to store and arbitrary - // metadata. They are not queryable and should be preserved when - // modifying objects. Cloud Run API v2 does not support annotations with - // `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, - // or `autoscaling.knative.dev` namespaces, and they will be rejected. - // All system annotations in v1 now have a corresponding field in v2 - // Job. This field follows Kubernetes annotations' namespacing, limits, - // and rules. More info: - // https://kubernetes.io/docs/user-guide/annotations + // Annotations: Unstructured key value map that may be set by external + // tools to store and arbitrary metadata. They are not queryable and + // should be preserved when modifying objects. Cloud Run API v2 does not + // support annotations with `run.googleapis.com`, + // `cloud.googleapis.com`, `serving.knative.dev`, or + // `autoscaling.knative.dev` namespaces, and they will be rejected on + // new resources. All system annotations in v1 now have a corresponding + // field in v2 Job. This field follows Kubernetes annotations' + // namespacing, limits, and rules. Annotations map[string]string `json:"annotations,omitempty"` // BinaryAuthorization: Settings for the Binary Authorization feature. @@ -1091,13 +1141,14 @@ type GoogleCloudRunV2Job struct { // time the user modifies the desired state. Generation int64 `json:"generation,omitempty,string"` - // Labels: KRM-style labels for the resource. User-provided labels are - // shared with Google's billing system, so they can be used to filter, - // or break down billing charges by team, component, environment, state, - // etc. For more information, visit + // Labels: Unstructured key value map that can be used to organize and + // categorize objects. User-provided labels are shared with Google's + // billing system, so they can be used to filter, or break down billing + // charges by team, component, environment, state, etc. For more + // information, visit // https://cloud.google.com/resource-manager/docs/creating-managing-labels - // or https://cloud.google.com/run/docs/configuring/labels Cloud Run API - // v2 does not support labels with `run.googleapis.com`, + // or https://cloud.google.com/run/docs/configuring/labels. Cloud Run + // API v2 does not support labels with `run.googleapis.com`, // `cloud.googleapis.com`, `serving.knative.dev`, or // `autoscaling.knative.dev` namespaces, and they will be rejected. All // system labels in v1 now have a corresponding field in v2 Job. @@ -1114,7 +1165,11 @@ type GoogleCloudRunV2Job struct { // LaunchStage: The launch stage as defined by Google Cloud Platform // Launch Stages (https://cloud.google.com/terms/launch-stages). Cloud // Run supports `ALPHA`, `BETA`, and `GA`. If no value is specified, GA - // is assumed. + // is assumed. Set the launch stage to a preview stage on input to allow + // use of preview features in that stage. On read (or output), describes + // whether the resource uses preview features. For example, if ALPHA is + // provided as input, but only BETA and GA-level features are used, this + // field will be BETA on output. // // Possible values: // "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value. @@ -1433,8 +1488,7 @@ type GoogleCloudRunV2Probe struct { // InitialDelaySeconds: Number of seconds after the container has // started before the probe is initiated. Defaults to 0 seconds. Minimum // value is 0. Maximum value for liveness probe is 3600. Maximum value - // for startup probe is 240. More info: - // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // for startup probe is 240. InitialDelaySeconds int64 `json:"initialDelaySeconds,omitempty"` // PeriodSeconds: How often (in seconds) to perform the probe. Default @@ -1449,8 +1503,7 @@ type GoogleCloudRunV2Probe struct { // TimeoutSeconds: Number of seconds after which the probe times out. // Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must - // be smaller than period_seconds. More info: - // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // be smaller than period_seconds. TimeoutSeconds int64 `json:"timeoutSeconds,omitempty"` // ForceSendFields is a list of field names (e.g. "FailureThreshold") to @@ -1484,13 +1537,19 @@ type GoogleCloudRunV2ResourceRequirements struct { // requests. CpuIdle bool `json:"cpuIdle,omitempty"` - // Limits: Only memory and CPU are supported. Note: The only supported - // values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at - // least 2Gi of memory. The values of the map is string form of the - // 'quantity' k8s type: - // https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + // Limits: Only ´memory´ and 'cpu' are supported. Notes: * The only + // supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU + // requires at least 2Gi of memory. For more information, go to + // https://cloud.google.com/run/docs/configuring/cpu. * For supported + // 'memory' values and syntax, go to + // https://cloud.google.com/run/docs/configuring/memory-limits Limits map[string]string `json:"limits,omitempty"` + // StartupCpuBoost: Determines whether CPU should be boosted on startup + // of a new container instance above the requested CPU threshold, this + // can help reduce cold-start latency. + StartupCpuBoost bool `json:"startupCpuBoost,omitempty"` + // ForceSendFields is a list of field names (e.g. "CpuIdle") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -1518,7 +1577,9 @@ func (s *GoogleCloudRunV2ResourceRequirements) MarshalJSON() ([]byte, error) { // and configuration. A Revision references a container image. Revisions // are only created by updates to its parent Service. type GoogleCloudRunV2Revision struct { - // Annotations: KRM-style annotations for the resource. + // Annotations: Output only. Unstructured key value map that may be set + // by external tools to store and arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. Annotations map[string]string `json:"annotations,omitempty"` // Conditions: Output only. The Condition of this Revision, containing @@ -1581,19 +1642,22 @@ type GoogleCloudRunV2Revision struct { // time the user modifies the desired state. Generation int64 `json:"generation,omitempty,string"` - // Labels: KRM-style labels for the resource. User-provided labels are - // shared with Google's billing system, so they can be used to filter, - // or break down billing charges by team, component, environment, state, - // etc. For more information, visit + // Labels: Output only. Unstructured key value map that can be used to + // organize and categorize objects. User-provided labels are shared with + // Google's billing system, so they can be used to filter, or break down + // billing charges by team, component, environment, state, etc. For more + // information, visit // https://cloud.google.com/resource-manager/docs/creating-managing-labels - // or https://cloud.google.com/run/docs/configuring/labels + // or https://cloud.google.com/run/docs/configuring/labels. Labels map[string]string `json:"labels,omitempty"` - // LaunchStage: Set the launch stage to a preview stage on write to - // allow use of preview features in that stage. On read, describes - // whether the resource uses preview features. Launch Stages are defined - // at Google Cloud Platform Launch Stages - // (https://cloud.google.com/terms/launch-stages). + // LaunchStage: The least stable launch stage needed to create this + // resource, as defined by Google Cloud Platform Launch Stages + // (https://cloud.google.com/terms/launch-stages). Cloud Run supports + // `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was + // used as input. For example, if ALPHA was provided as input in the + // parent resource, but only BETA and GA-level features are were, this + // field will be BETA. // // Possible values: // "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value. @@ -1667,6 +1731,9 @@ type GoogleCloudRunV2Revision struct { // revision has. ServiceAccount string `json:"serviceAccount,omitempty"` + // SessionAffinity: Enable session affinity. + SessionAffinity bool `json:"sessionAffinity,omitempty"` + // Timeout: Max allowed time for an instance to respond to a request. Timeout string `json:"timeout,omitempty"` @@ -1751,12 +1818,15 @@ func (s *GoogleCloudRunV2RevisionScaling) MarshalJSON() ([]byte, error) { // GoogleCloudRunV2RevisionTemplate: RevisionTemplate describes the data // a revision should have when created from a template. type GoogleCloudRunV2RevisionTemplate struct { - // Annotations: KRM-style annotations for the resource. Cloud Run API v2 - // does not support annotations with `run.googleapis.com`, + // Annotations: Unstructured key value map that may be set by external + // tools to store and arbitrary metadata. They are not queryable and + // should be preserved when modifying objects. Cloud Run API v2 does not + // support annotations with `run.googleapis.com`, // `cloud.googleapis.com`, `serving.knative.dev`, or // `autoscaling.knative.dev` namespaces, and they will be rejected. All // system annotations in v1 now have a corresponding field in v2 - // RevisionTemplate. + // RevisionTemplate. This field follows Kubernetes annotations' + // namespacing, limits, and rules. Annotations map[string]string `json:"annotations,omitempty"` // Containers: Holds the single container that defines the unit of @@ -1777,11 +1847,18 @@ type GoogleCloudRunV2RevisionTemplate struct { // "EXECUTION_ENVIRONMENT_GEN2" - Uses Second Generation environment. ExecutionEnvironment string `json:"executionEnvironment,omitempty"` - // Labels: KRM-style labels for the resource. Cloud Run API v2 does not - // support labels with `run.googleapis.com`, `cloud.googleapis.com`, - // `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and - // they will be rejected. All system labels in v1 now have a - // corresponding field in v2 RevisionTemplate. + // Labels: Unstructured key value map that can be used to organize and + // categorize objects. User-provided labels are shared with Google's + // billing system, so they can be used to filter, or break down billing + // charges by team, component, environment, state, etc. For more + // information, visit + // https://cloud.google.com/resource-manager/docs/creating-managing-labels + // or https://cloud.google.com/run/docs/configuring/labels. Cloud Run + // API v2 does not support labels with `run.googleapis.com`, + // `cloud.googleapis.com`, `serving.knative.dev`, or + // `autoscaling.knative.dev` namespaces, and they will be rejected. All + // system labels in v1 now have a corresponding field in v2 + // RevisionTemplate. Labels map[string]string `json:"labels,omitempty"` // MaxInstanceRequestConcurrency: Sets the maximum number of requests @@ -1802,6 +1879,9 @@ type GoogleCloudRunV2RevisionTemplate struct { // default service account. ServiceAccount string `json:"serviceAccount,omitempty"` + // SessionAffinity: Enable session affinity. + SessionAffinity bool `json:"sessionAffinity,omitempty"` + // Timeout: Max allowed time for an instance to respond to a request. Timeout string `json:"timeout,omitempty"` @@ -1975,11 +2055,10 @@ type GoogleCloudRunV2Service struct { // should be preserved when modifying objects. Cloud Run API v2 does not // support annotations with `run.googleapis.com`, // `cloud.googleapis.com`, `serving.knative.dev`, or - // `autoscaling.knative.dev` namespaces, and they will be rejected. All - // system annotations in v1 now have a corresponding field in v2 - // Service. This field follows Kubernetes annotations' namespacing, - // limits, and rules. More info: - // https://kubernetes.io/docs/user-guide/annotations + // `autoscaling.knative.dev` namespaces, and they will be rejected in + // new resources. All system annotations in v1 now have a corresponding + // field in v2 Service. This field follows Kubernetes annotations' + // namespacing, limits, and rules. Annotations map[string]string `json:"annotations,omitempty"` // BinaryAuthorization: Settings for the Binary Authorization feature. @@ -2004,6 +2083,10 @@ type GoogleCloudRunV2Service struct { // Creator: Output only. Email address of the authenticated creator. Creator string `json:"creator,omitempty"` + // CustomAudiences: Custom audiences that can be used in the audience + // field of ID token for authenticated requests. + CustomAudiences []string `json:"customAudiences,omitempty"` + // DeleteTime: Output only. The deletion time. DeleteTime string `json:"deleteTime,omitempty"` @@ -2038,14 +2121,14 @@ type GoogleCloudRunV2Service struct { // Cloud Load Balancer traffic is allowed. Ingress string `json:"ingress,omitempty"` - // Labels: Map of string keys and values that can be used to organize - // and categorize objects. User-provided labels are shared with Google's + // Labels: Unstructured key value map that can be used to organize and + // categorize objects. User-provided labels are shared with Google's // billing system, so they can be used to filter, or break down billing // charges by team, component, environment, state, etc. For more // information, visit // https://cloud.google.com/resource-manager/docs/creating-managing-labels - // or https://cloud.google.com/run/docs/configuring/labels Cloud Run API - // v2 does not support labels with `run.googleapis.com`, + // or https://cloud.google.com/run/docs/configuring/labels. Cloud Run + // API v2 does not support labels with `run.googleapis.com`, // `cloud.googleapis.com`, `serving.knative.dev`, or // `autoscaling.knative.dev` namespaces, and they will be rejected. All // system labels in v1 now have a corresponding field in v2 Service. @@ -2068,7 +2151,11 @@ type GoogleCloudRunV2Service struct { // LaunchStage: The launch stage as defined by Google Cloud Platform // Launch Stages (https://cloud.google.com/terms/launch-stages). Cloud // Run supports `ALPHA`, `BETA`, and `GA`. If no value is specified, GA - // is assumed. + // is assumed. Set the launch stage to a preview stage on input to allow + // use of preview features in that stage. On read (or output), describes + // whether the resource uses preview features. For example, if ALPHA is + // provided as input, but only BETA and GA-level features are used, this + // field will be BETA on output. // // Possible values: // "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value. @@ -2236,7 +2323,9 @@ func (s *GoogleCloudRunV2TCPSocketAction) MarshalJSON() ([]byte, error) { // GoogleCloudRunV2Task: Task represents a single run of a container to // completion. type GoogleCloudRunV2Task struct { - // Annotations: KRM-style annotations for the resource. + // Annotations: Output only. Unstructured key value map that may be set + // by external tools to store and arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. Annotations map[string]string `json:"annotations,omitempty"` // CompletionTime: Output only. Represents time when the Task was @@ -2302,10 +2391,11 @@ type GoogleCloudRunV2Task struct { // Job: Output only. The name of the parent Job. Job string `json:"job,omitempty"` - // Labels: KRM-style labels for the resource. User-provided labels are - // shared with Google's billing system, so they can be used to filter, - // or break down billing charges by team, component, environment, state, - // etc. For more information, visit + // Labels: Output only. Unstructured key value map that can be used to + // organize and categorize objects. User-provided labels are shared with + // Google's billing system, so they can be used to filter, or break down + // billing charges by team, component, environment, state, etc. For more + // information, visit // https://cloud.google.com/resource-manager/docs/creating-managing-labels // or https://cloud.google.com/run/docs/configuring/labels Labels map[string]string `json:"labels,omitempty"` @@ -2665,11 +2755,13 @@ type GoogleCloudRunV2Volume struct { // information on how to connect Cloud SQL and Cloud Run. CloudSqlInstance *GoogleCloudRunV2CloudSqlInstance `json:"cloudSqlInstance,omitempty"` + // EmptyDir: Ephemeral storage used as a shared volume. + EmptyDir *GoogleCloudRunV2EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // Name: Required. Volume's name. Name string `json:"name,omitempty"` // Secret: Secret represents a secret that should populate this volume. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret Secret *GoogleCloudRunV2SecretVolumeSource `json:"secret,omitempty"` // ForceSendFields is a list of field names (e.g. "CloudSqlInstance") to @@ -2745,7 +2837,8 @@ type GoogleCloudRunV2VpcAccess struct { // {project} can be project id or number. Connector string `json:"connector,omitempty"` - // Egress: Traffic VPC egress settings. + // Egress: Traffic VPC egress settings. If not provided, it defaults to + // PRIVATE_RANGES_ONLY. // // Possible values: // "VPC_EGRESS_UNSPECIFIED" - Unspecified @@ -7297,8 +7390,8 @@ func (r *ProjectsLocationsServicesService) Patch(name string, googlecloudrunv2se // AllowMissing sets the optional parameter "allowMissing": If set to // true, and if the Service does not exist, it will create a new one. -// Caller must have both create and update permissions for this call if -// this is set to true. +// The caller must have 'run.services.create' permissions if this is set +// to true and the Service does not exist. func (c *ProjectsLocationsServicesPatchCall) AllowMissing(allowMissing bool) *ProjectsLocationsServicesPatchCall { c.urlParams_.Set("allowMissing", fmt.Sprint(allowMissing)) return c @@ -7412,7 +7505,7 @@ func (c *ProjectsLocationsServicesPatchCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "allowMissing": { - // "description": "If set to true, and if the Service does not exist, it will create a new one. Caller must have both create and update permissions for this call if this is set to true.", + // "description": "If set to true, and if the Service does not exist, it will create a new one. The caller must have 'run.services.create' permissions if this is set to true and the Service does not exist.", // "location": "query", // "type": "boolean" // }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json b/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json index 1d861eb3b3..1b6a843882 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json @@ -358,6 +358,7 @@ "parameterOrder": [], "parameters": { "consumerId": { + "deprecated": true, "description": "Include services consumed by the specified consumer. The Google Service Management implementation accepts the following forms: - project:", "location": "query", "type": "string" @@ -829,7 +830,7 @@ } } }, - "revision": "20230224", + "revision": "20230609", "rootUrl": "https://servicemanagement.googleapis.com/", "schemas": { "Advice": { @@ -880,11 +881,13 @@ "description": "The source syntax of the service.", "enum": [ "SYNTAX_PROTO2", - "SYNTAX_PROTO3" + "SYNTAX_PROTO3", + "SYNTAX_EDITIONS" ], "enumDescriptions": [ "Syntax `proto2`.", - "Syntax `proto3`." + "Syntax `proto3`.", + "Syntax `editions`." ], "type": "string" }, @@ -1248,7 +1251,7 @@ "description": "Settings for Ruby client libraries." }, "version": { - "description": "Version of the API to apply these settings to.", + "description": "Version of the API to apply these settings to. This is the full protobuf package for the API, ending in the version element. Examples: \"google.cloud.speech.v1\" and \"google.spanner.admin.database.v1\".", "type": "string" } }, @@ -1549,7 +1552,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages; - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages: - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1574,6 +1577,13 @@ }, "type": "array" }, + "sectionOverrides": { + "description": "Specifies section and content to override boilerplate content provided by go/api-docgen. Currently overrides following sections: 1. rest.service.client_libraries", + "items": { + "$ref": "Page" + }, + "type": "array" + }, "serviceRootUrl": { "description": "Specifies the service root url if the default one (the service name from the yaml file) is not suitable. This can be seen in any fully specified service urls as well as sections that show a base that other urls are relative to.", "type": "string" @@ -1597,6 +1607,10 @@ "description": "Description of the selected proto element (e.g. a message, a method, a 'service' definition, or a field). Defaults to leading \u0026 trailing comments taken from the proto source definition of the proto element.", "type": "string" }, + "disableReplacementWords": { + "description": "String of comma or space separated case-sensitive words for which method/field name replacement will be disabled by go/api-docgen.", + "type": "string" + }, "selector": { "description": "The selector is a comma-separated list of patterns for any element such as a method, a field, an enum value. Each pattern is a qualified name of the element which may end in \"*\", indicating a wildcard. Wildcards are only allowed at the end and for a whole component of the qualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A wildcard will match one or more components. To specify a default for all applicable elements, the whole pattern \"*\" is used.", "type": "string" @@ -1611,6 +1625,41 @@ "common": { "$ref": "CommonLanguageSettings", "description": "Some settings." + }, + "forcedNamespaceAliases": { + "description": "Namespaces which must be aliased in snippets due to a known (but non-generator-predictable) naming collision", + "items": { + "type": "string" + }, + "type": "array" + }, + "handwrittenSignatures": { + "description": "Method signatures (in the form \"service.method(signature)\") which are provided separately, so shouldn't be generated. Snippets *calling* these methods are still generated, however.", + "items": { + "type": "string" + }, + "type": "array" + }, + "ignoredResources": { + "description": "List of full resource types to ignore during generation. This is typically used for API-specific Location resources, which should be handled by the generator as if they were actually the common Location resources. Example entry: \"documentai.googleapis.com/Location\"", + "items": { + "type": "string" + }, + "type": "array" + }, + "renamedResources": { + "additionalProperties": { + "type": "string" + }, + "description": "Map from full resource types to the effective short name for the resource. This is used when otherwise resource named from different services would cause naming collisions. Example entry: \"datalabeling.googleapis.com/Dataset\": \"DataLabelingDataset\"", + "type": "object" + }, + "renamedServices": { + "additionalProperties": { + "type": "string" + }, + "description": "Map from original service names to renamed versions. This is used when the default generated types would cause a naming conflict. (Neither name is fully-qualified.) Example: Subscriber to SubscriberServiceApi.", + "type": "object" } }, "type": "object" @@ -1651,6 +1700,10 @@ "description": "Enum type definition.", "id": "Enum", "properties": { + "edition": { + "description": "The source edition string, only valid when syntax is SYNTAX_EDITIONS.", + "type": "string" + }, "enumvalue": { "description": "Enum value definitions.", "items": { @@ -1677,11 +1730,13 @@ "description": "The source syntax.", "enum": [ "SYNTAX_PROTO2", - "SYNTAX_PROTO3" + "SYNTAX_PROTO3", + "SYNTAX_EDITIONS" ], "enumDescriptions": [ "Syntax `proto2`.", - "Syntax `proto3`." + "Syntax `proto3`.", + "Syntax `editions`." ], "type": "string" } @@ -2297,11 +2352,13 @@ "description": "The source syntax of this method.", "enum": [ "SYNTAX_PROTO2", - "SYNTAX_PROTO3" + "SYNTAX_PROTO3", + "SYNTAX_EDITIONS" ], "enumDescriptions": [ "Syntax `proto2`.", - "Syntax `proto3`." + "Syntax `proto3`.", + "Syntax `editions`." ], "type": "string" } @@ -2314,7 +2371,7 @@ "properties": { "longRunning": { "$ref": "LongRunning", - "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_behavior: - selector: CreateAdDomain long_running: initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: 54000 # 90 minutes" + "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: 54000 # 90 minutes" }, "selector": { "description": "The fully qualified name of the method, for which the options below apply. This is used to find the method to apply the options.", @@ -2491,7 +2548,7 @@ "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -2826,7 +2883,7 @@ "type": "array" }, "newIssueUri": { - "description": "Link to a place that API users can report issues. Example: https://issuetracker.google.com/issues/new?component=190865\u0026template=1161103", + "description": "Link to a *public* URI where users can report issues. Example: https://issuetracker.google.com/issues/new?component=190865\u0026template=1161103", "type": "string" }, "organization": { @@ -2836,14 +2893,20 @@ "CLOUD", "ADS", "PHOTOS", - "STREET_VIEW" + "STREET_VIEW", + "SHOPPING", + "GEO", + "GENERATIVE_AI" ], "enumDescriptions": [ "Not useful.", "Google Cloud Platform Org.", "Ads (Advertising) Org.", "Photos Org.", - "Street View Org." + "Street View Org.", + "Shopping Org.", + "Geo Org.", + "Generative AI - https://developers.generativeai.google" ], "type": "string" }, @@ -3391,6 +3454,10 @@ "description": "A protocol buffer message type.", "id": "Type", "properties": { + "edition": { + "description": "The source edition string, only valid when syntax is SYNTAX_EDITIONS.", + "type": "string" + }, "fields": { "description": "The list of fields.", "items": { @@ -3424,11 +3491,13 @@ "description": "The source syntax.", "enum": [ "SYNTAX_PROTO2", - "SYNTAX_PROTO3" + "SYNTAX_PROTO3", + "SYNTAX_EDITIONS" ], "enumDescriptions": [ "Syntax `proto2`.", - "Syntax `proto3`." + "Syntax `proto3`.", + "Syntax `editions`." ], "type": "string" } diff --git a/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go b/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go index 58fc4d7c1c..566568fd9e 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "servicemanagement:v1" const apiName = "servicemanagement" @@ -273,6 +274,7 @@ type Api struct { // Possible values: // "SYNTAX_PROTO2" - Syntax `proto2`. // "SYNTAX_PROTO3" - Syntax `proto3`. + // "SYNTAX_EDITIONS" - Syntax `editions`. Syntax string `json:"syntax,omitempty"` // Version: A version string for this interface. If specified, must have @@ -1048,7 +1050,10 @@ type ClientLibrarySettings struct { // RubySettings: Settings for Ruby client libraries. RubySettings *RubySettings `json:"rubySettings,omitempty"` - // Version: Version of the API to apply these settings to. + // Version: Version of the API to apply these settings to. This is the + // full protobuf package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and + // "google.spanner.admin.database.v1". Version string `json:"version,omitempty"` // ForceSendFields is a list of field names (e.g. "CppSettings") to @@ -1600,7 +1605,7 @@ func (s *Diagnostic) MarshalJSON() ([]byte, error) { // describing a service. Example: documentation: summary: > The Google // Calendar API gives access to most calendar features. pages: - name: // Overview content: (== include google/foo/overview.md ==) - name: -// Tutorial content: (== include google/foo/tutorial.md ==) subpages; - +// Tutorial content: (== include google/foo/tutorial.md ==) subpages: - // name: Java content: (== include google/foo/tutorial_java.md ==) // rules: - selector: google.calendar.Calendar.Get description: > ... - // selector: google.calendar.Calendar.Put description: > ... @@ -1646,6 +1651,11 @@ type Documentation struct { // wins" order. Rules []*DocumentationRule `json:"rules,omitempty"` + // SectionOverrides: Specifies section and content to override + // boilerplate content provided by go/api-docgen. Currently overrides + // following sections: 1. rest.service.client_libraries + SectionOverrides []*Page `json:"sectionOverrides,omitempty"` + // ServiceRootUrl: Specifies the service root url if the default one // (the service name from the yaml file) is not suitable. This can be // seen in any fully specified service urls as well as sections that @@ -1697,6 +1707,11 @@ type DocumentationRule struct { // the proto element. Description string `json:"description,omitempty"` + // DisableReplacementWords: String of comma or space separated + // case-sensitive words for which method/field name replacement will be + // disabled by go/api-docgen. + DisableReplacementWords string `json:"disableReplacementWords,omitempty"` + // Selector: The selector is a comma-separated list of patterns for any // element such as a method, a field, an enum value. Each pattern is a // qualified name of the element which may end in "*", indicating a @@ -1737,6 +1752,35 @@ type DotnetSettings struct { // Common: Some settings. Common *CommonLanguageSettings `json:"common,omitempty"` + // ForcedNamespaceAliases: Namespaces which must be aliased in snippets + // due to a known (but non-generator-predictable) naming collision + ForcedNamespaceAliases []string `json:"forcedNamespaceAliases,omitempty"` + + // HandwrittenSignatures: Method signatures (in the form + // "service.method(signature)") which are provided separately, so + // shouldn't be generated. Snippets *calling* these methods are still + // generated, however. + HandwrittenSignatures []string `json:"handwrittenSignatures,omitempty"` + + // IgnoredResources: List of full resource types to ignore during + // generation. This is typically used for API-specific Location + // resources, which should be handled by the generator as if they were + // actually the common Location resources. Example entry: + // "documentai.googleapis.com/Location" + IgnoredResources []string `json:"ignoredResources,omitempty"` + + // RenamedResources: Map from full resource types to the effective short + // name for the resource. This is used when otherwise resource named + // from different services would cause naming collisions. Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + RenamedResources map[string]string `json:"renamedResources,omitempty"` + + // RenamedServices: Map from original service names to renamed versions. + // This is used when the default generated types would cause a naming + // conflict. (Neither name is fully-qualified.) Example: Subscriber to + // SubscriberServiceApi. + RenamedServices map[string]string `json:"renamedServices,omitempty"` + // ForceSendFields is a list of field names (e.g. "Common") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -1828,6 +1872,10 @@ func (s *Endpoint) MarshalJSON() ([]byte, error) { // Enum: Enum type definition. type Enum struct { + // Edition: The source edition string, only valid when syntax is + // SYNTAX_EDITIONS. + Edition string `json:"edition,omitempty"` + // Enumvalue: Enum value definitions. Enumvalue []*EnumValue `json:"enumvalue,omitempty"` @@ -1845,9 +1893,10 @@ type Enum struct { // Possible values: // "SYNTAX_PROTO2" - Syntax `proto2`. // "SYNTAX_PROTO3" - Syntax `proto3`. + // "SYNTAX_EDITIONS" - Syntax `editions`. Syntax string `json:"syntax,omitempty"` - // ForceSendFields is a list of field names (e.g. "Enumvalue") to + // ForceSendFields is a list of field names (e.g. "Edition") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -1855,7 +1904,7 @@ type Enum struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Enumvalue") to include in + // NullFields is a list of field names (e.g. "Edition") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -3061,6 +3110,7 @@ type Method struct { // Possible values: // "SYNTAX_PROTO2" - Syntax `proto2`. // "SYNTAX_PROTO3" - Syntax `proto3`. + // "SYNTAX_EDITIONS" - Syntax `editions`. Syntax string `json:"syntax,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -3091,10 +3141,11 @@ type MethodSettings struct { // LongRunning: Describes settings to use for long-running operations // when generating API methods for RPCs. Complements RPCs that use the // annotations in google/longrunning/operations.proto. Example of a YAML - // configuration:: publishing: method_behavior: - selector: - // CreateAdDomain long_running: initial_poll_delay: seconds: 60 # 1 - // minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 - // minutes total_poll_timeout: seconds: 54000 # 90 minutes + // configuration:: publishing: method_settings: - selector: + // google.cloud.speech.v2.Speech.BatchRecognize long_running: + // initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 + // max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: + // 54000 # 90 minutes LongRunning *LongRunning `json:"longRunning,omitempty"` // Selector: The fully qualified name of the method, for which the @@ -3452,7 +3503,7 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { // The mixin construct implies that all methods in `AccessControl` are // also declared with same name and request/response types in `Storage`. // A documentation generator or annotation processor will see the -// effective `Storage.GetAcl` method after inheriting documentation and +// effective `Storage.GetAcl` method after inherting documentation and // annotations as follows: service Storage { // Get the underlying ACL // object. rpc GetAcl(GetAclRequest) returns (Acl) { option // (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how @@ -4183,7 +4234,7 @@ type Publishing struct { // methods that use the long-running operation pattern. MethodSettings []*MethodSettings `json:"methodSettings,omitempty"` - // NewIssueUri: Link to a place that API users can report issues. + // NewIssueUri: Link to a *public* URI where users can report issues. // Example: // https://issuetracker.google.com/issues/new?component=190865&template=1161103 NewIssueUri string `json:"newIssueUri,omitempty"` @@ -4196,6 +4247,10 @@ type Publishing struct { // "ADS" - Ads (Advertising) Org. // "PHOTOS" - Photos Org. // "STREET_VIEW" - Street View Org. + // "SHOPPING" - Shopping Org. + // "GEO" - Geo Org. + // "GENERATIVE_AI" - Generative AI - + // https://developers.generativeai.google Organization string `json:"organization,omitempty"` // ProtoReferenceDocumentationUri: Optional link to proto reference @@ -5186,6 +5241,10 @@ func (s *TrafficPercentStrategy) MarshalJSON() ([]byte, error) { // Type: A protocol buffer message type. type Type struct { + // Edition: The source edition string, only valid when syntax is + // SYNTAX_EDITIONS. + Edition string `json:"edition,omitempty"` + // Fields: The list of fields. Fields []*Field `json:"fields,omitempty"` @@ -5207,9 +5266,10 @@ type Type struct { // Possible values: // "SYNTAX_PROTO2" - Syntax `proto2`. // "SYNTAX_PROTO3" - Syntax `proto3`. + // "SYNTAX_EDITIONS" - Syntax `editions`. Syntax string `json:"syntax,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fields") to + // ForceSendFields is a list of field names (e.g. "Edition") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -5217,8 +5277,8 @@ type Type struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fields") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Edition") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -6777,6 +6837,7 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon // "parameterOrder": [], // "parameters": { // "consumerId": { + // "deprecated": true, // "description": "Include services consumed by the specified consumer. The Google Service Management implementation accepts the following forms: - project:", // "location": "query", // "type": "string" diff --git a/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json b/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json index a825ab9fab..48cbbf0d3a 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-api.json @@ -865,7 +865,7 @@ } } }, - "revision": "20230315", + "revision": "20230703", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { @@ -1247,6 +1247,7 @@ "type": "string" }, "minDeadline": { + "deprecated": true, "description": "Deprecated, do not use.", "format": "double", "type": "number" @@ -1438,6 +1439,7 @@ "type": "array" }, "referenceDocsUri": { + "deprecated": true, "description": "Link to automatically generated reference documentation. Example: https://cloud.google.com/nodejs/docs/reference/asset/latest", "type": "string" } @@ -1765,7 +1767,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages; - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages: - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1790,6 +1792,13 @@ }, "type": "array" }, + "sectionOverrides": { + "description": "Specifies section and content to override boilerplate content provided by go/api-docgen. Currently overrides following sections: 1. rest.service.client_libraries", + "items": { + "$ref": "Page" + }, + "type": "array" + }, "serviceRootUrl": { "description": "Specifies the service root url if the default one (the service name from the yaml file) is not suitable. This can be seen in any fully specified service urls as well as sections that show a base that other urls are relative to.", "type": "string" @@ -1813,6 +1822,10 @@ "description": "Description of the selected proto element (e.g. a message, a method, a 'service' definition, or a field). Defaults to leading \u0026 trailing comments taken from the proto source definition of the proto element.", "type": "string" }, + "disableReplacementWords": { + "description": "String of comma or space separated case-sensitive words for which method/field name replacement will be disabled by go/api-docgen.", + "type": "string" + }, "selector": { "description": "The selector is a comma-separated list of patterns for any element such as a method, a field, an enum value. Each pattern is a qualified name of the element which may end in \"*\", indicating a wildcard. Wildcards are only allowed at the end and for a whole component of the qualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A wildcard will match one or more components. To specify a default for all applicable elements, the whole pattern \"*\" is used.", "type": "string" @@ -1827,6 +1840,41 @@ "common": { "$ref": "CommonLanguageSettings", "description": "Some settings." + }, + "forcedNamespaceAliases": { + "description": "Namespaces which must be aliased in snippets due to a known (but non-generator-predictable) naming collision", + "items": { + "type": "string" + }, + "type": "array" + }, + "handwrittenSignatures": { + "description": "Method signatures (in the form \"service.method(signature)\") which are provided separately, so shouldn't be generated. Snippets *calling* these methods are still generated, however.", + "items": { + "type": "string" + }, + "type": "array" + }, + "ignoredResources": { + "description": "List of full resource types to ignore during generation. This is typically used for API-specific Location resources, which should be handled by the generator as if they were actually the common Location resources. Example entry: \"documentai.googleapis.com/Location\"", + "items": { + "type": "string" + }, + "type": "array" + }, + "renamedResources": { + "additionalProperties": { + "type": "string" + }, + "description": "Map from full resource types to the effective short name for the resource. This is used when otherwise resource named from different services would cause naming collisions. Example entry: \"datalabeling.googleapis.com/Dataset\": \"DataLabelingDataset\"", + "type": "object" + }, + "renamedServices": { + "additionalProperties": { + "type": "string" + }, + "description": "Map from original service names to renamed versions. This is used when the default generated types would cause a naming conflict. (Neither name is fully-qualified.) Example: Subscriber to SubscriberServiceApi.", + "type": "object" } }, "type": "object" @@ -1853,6 +1901,7 @@ "id": "Endpoint", "properties": { "aliases": { + "deprecated": true, "description": "Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on.", "items": { "type": "string" @@ -2464,7 +2513,7 @@ "properties": { "longRunning": { "$ref": "LongRunning", - "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_behavior: - selector: CreateAdDomain long_running: initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: 54000 # 90 minutes" + "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: 54000 # 90 minutes" }, "selector": { "description": "The fully qualified name of the method, for which the options below apply. This is used to find the method to apply the options.", @@ -2590,6 +2639,7 @@ "type": "string" }, "launchStage": { + "deprecated": true, "description": "Deprecated. Must use the MetricDescriptor.launch_stage instead.", "enum": [ "LAUNCH_STAGE_UNSPECIFIED", @@ -2641,7 +2691,7 @@ "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -2893,7 +2943,7 @@ "type": "string" }, "role": { - "description": "Required. Role to apply. Only allowlisted roles can be used at the specified granularity. The role must be one of the following: - 'roles/container.hostServiceAgentUser' applied on the shared VPC host project - 'roles/compute.securityAdmin' applied on the shared VPC host project", + "description": "Required. Role to apply. Only allowlisted roles can be used at the specified granularity. The role must be one of the following: - 'roles/container.hostServiceAgentUser' applied on the shared VPC host project - 'roles/compute.securityAdmin' applied on the shared VPC host project - 'roles/compute.networkAdmin' applied on the shared VPC host project - 'roles/compute.xpnAdmin' applied on the shared VPC host project - 'roles/dns.admin' applied on the shared VPC host project", "type": "string" } }, @@ -2941,7 +2991,7 @@ "type": "array" }, "newIssueUri": { - "description": "Link to a place that API users can report issues. Example: https://issuetracker.google.com/issues/new?component=190865\u0026template=1161103", + "description": "Link to a *public* URI where users can report issues. Example: https://issuetracker.google.com/issues/new?component=190865\u0026template=1161103", "type": "string" }, "organization": { @@ -2951,14 +3001,20 @@ "CLOUD", "ADS", "PHOTOS", - "STREET_VIEW" + "STREET_VIEW", + "SHOPPING", + "GEO", + "GENERATIVE_AI" ], "enumDescriptions": [ "Not useful.", "Google Cloud Platform Org.", "Ads (Advertising) Org.", "Photos Org.", - "Street View Org." + "Street View Org.", + "Shopping Org.", + "Geo Org.", + "Generative AI - https://developers.generativeai.google" ], "type": "string" }, diff --git a/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go b/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go index 516ac2bc61..d23aaf6aa5 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/servicenetworking/v1/servicenetworking-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "servicenetworking:v1" const apiName = "servicenetworking" @@ -1912,7 +1913,7 @@ func (s *DnsZone) MarshalJSON() ([]byte, error) { // describing a service. Example: documentation: summary: > The Google // Calendar API gives access to most calendar features. pages: - name: // Overview content: (== include google/foo/overview.md ==) - name: -// Tutorial content: (== include google/foo/tutorial.md ==) subpages; - +// Tutorial content: (== include google/foo/tutorial.md ==) subpages: - // name: Java content: (== include google/foo/tutorial_java.md ==) // rules: - selector: google.calendar.Calendar.Get description: > ... - // selector: google.calendar.Calendar.Put description: > ... @@ -1958,6 +1959,11 @@ type Documentation struct { // wins" order. Rules []*DocumentationRule `json:"rules,omitempty"` + // SectionOverrides: Specifies section and content to override + // boilerplate content provided by go/api-docgen. Currently overrides + // following sections: 1. rest.service.client_libraries + SectionOverrides []*Page `json:"sectionOverrides,omitempty"` + // ServiceRootUrl: Specifies the service root url if the default one // (the service name from the yaml file) is not suitable. This can be // seen in any fully specified service urls as well as sections that @@ -2009,6 +2015,11 @@ type DocumentationRule struct { // the proto element. Description string `json:"description,omitempty"` + // DisableReplacementWords: String of comma or space separated + // case-sensitive words for which method/field name replacement will be + // disabled by go/api-docgen. + DisableReplacementWords string `json:"disableReplacementWords,omitempty"` + // Selector: The selector is a comma-separated list of patterns for any // element such as a method, a field, an enum value. Each pattern is a // qualified name of the element which may end in "*", indicating a @@ -2049,6 +2060,35 @@ type DotnetSettings struct { // Common: Some settings. Common *CommonLanguageSettings `json:"common,omitempty"` + // ForcedNamespaceAliases: Namespaces which must be aliased in snippets + // due to a known (but non-generator-predictable) naming collision + ForcedNamespaceAliases []string `json:"forcedNamespaceAliases,omitempty"` + + // HandwrittenSignatures: Method signatures (in the form + // "service.method(signature)") which are provided separately, so + // shouldn't be generated. Snippets *calling* these methods are still + // generated, however. + HandwrittenSignatures []string `json:"handwrittenSignatures,omitempty"` + + // IgnoredResources: List of full resource types to ignore during + // generation. This is typically used for API-specific Location + // resources, which should be handled by the generator as if they were + // actually the common Location resources. Example entry: + // "documentai.googleapis.com/Location" + IgnoredResources []string `json:"ignoredResources,omitempty"` + + // RenamedResources: Map from full resource types to the effective short + // name for the resource. This is used when otherwise resource named + // from different services would cause naming collisions. Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + RenamedResources map[string]string `json:"renamedResources,omitempty"` + + // RenamedServices: Map from original service names to renamed versions. + // This is used when the default generated types would cause a naming + // conflict. (Neither name is fully-qualified.) Example: Subscriber to + // SubscriberServiceApi. + RenamedServices map[string]string `json:"renamedServices,omitempty"` + // ForceSendFields is a list of field names (e.g. "Common") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -3254,10 +3294,11 @@ type MethodSettings struct { // LongRunning: Describes settings to use for long-running operations // when generating API methods for RPCs. Complements RPCs that use the // annotations in google/longrunning/operations.proto. Example of a YAML - // configuration:: publishing: method_behavior: - selector: - // CreateAdDomain long_running: initial_poll_delay: seconds: 60 # 1 - // minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 - // minutes total_poll_timeout: seconds: 54000 # 90 minutes + // configuration:: publishing: method_settings: - selector: + // google.cloud.speech.v2.Speech.BatchRecognize long_running: + // initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 + // max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: + // 54000 # 90 minutes LongRunning *LongRunning `json:"longRunning,omitempty"` // Selector: The fully qualified name of the method, for which the @@ -3615,7 +3656,7 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { // The mixin construct implies that all methods in `AccessControl` are // also declared with same name and request/response types in `Storage`. // A documentation generator or annotation processor will see the -// effective `Storage.GetAcl` method after inheriting documentation and +// effective `Storage.GetAcl` method after inherting documentation and // annotations as follows: service Storage { // Get the underlying ACL // object. rpc GetAcl(GetAclRequest) returns (Acl) { option // (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how @@ -4174,7 +4215,10 @@ type PolicyBinding struct { // the specified granularity. The role must be one of the following: - // 'roles/container.hostServiceAgentUser' applied on the shared VPC host // project - 'roles/compute.securityAdmin' applied on the shared VPC - // host project + // host project - 'roles/compute.networkAdmin' applied on the shared VPC + // host project - 'roles/compute.xpnAdmin' applied on the shared VPC + // host project - 'roles/dns.admin' applied on the shared VPC host + // project Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Member") to @@ -4236,7 +4280,7 @@ type Publishing struct { // methods that use the long-running operation pattern. MethodSettings []*MethodSettings `json:"methodSettings,omitempty"` - // NewIssueUri: Link to a place that API users can report issues. + // NewIssueUri: Link to a *public* URI where users can report issues. // Example: // https://issuetracker.google.com/issues/new?component=190865&template=1161103 NewIssueUri string `json:"newIssueUri,omitempty"` @@ -4249,6 +4293,10 @@ type Publishing struct { // "ADS" - Ads (Advertising) Org. // "PHOTOS" - Photos Org. // "STREET_VIEW" - Street View Org. + // "SHOPPING" - Shopping Org. + // "GEO" - Geo Org. + // "GENERATIVE_AI" - Generative AI - + // https://developers.generativeai.google Organization string `json:"organization,omitempty"` // ProtoReferenceDocumentationUri: Optional link to proto reference diff --git a/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json b/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json index a3174d8b4f..76c4e2080a 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json @@ -426,9 +426,33 @@ } } }, - "revision": "20230309", + "revision": "20230619", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { + "AddEnableRulesMetadata": { + "description": "Metadata for the `AddEnableRules` method.", + "id": "AddEnableRulesMetadata", + "properties": {}, + "type": "object" + }, + "AddEnableRulesResponse": { + "description": "The response message of \"AddEnableRules\" method.", + "id": "AddEnableRulesResponse", + "properties": { + "addedValues": { + "description": "The values added to the parent consumer policy.", + "items": { + "type": "string" + }, + "type": "array" + }, + "parent": { + "description": "The parent consumer policy. It can be `projects/12345/consumerPolicies/default`, or `folders/12345/consumerPolicies/default`, or `organizations/12345/consumerPolicies/default`.", + "type": "string" + } + }, + "type": "object" + }, "AdminQuotaPolicy": { "description": "Quota policy created by quota administrator.", "id": "AdminQuotaPolicy", @@ -441,7 +465,7 @@ "additionalProperties": { "type": "string" }, - "description": " If this map is nonempty, then this policy applies only to specific values for dimensions defined in the limit unit. For example, an policy on a limit with the unit `1/{project}/{region}` could contain an entry with the key `region` and the value `us-east-1`; the policy is only applied to quota consumed in that region. This map has the following restrictions: * If `region` appears as a key, its value must be a valid Cloud region. * If `zone` appears as a key, its value must be a valid Cloud zone. * Keys other than `region` or `zone` are not valid.", + "description": " If this map is nonempty, then this policy applies only to specific values for dimensions defined in the limit unit. For example, a policy on a limit with the unit `1/{project}/{region}` could contain an entry with the key `region` and the value `us-east-1`; the policy is only applied to quota consumed in that region. This map has the following restrictions: * If `region` appears as a key, its value must be a valid Cloud region. * If `zone` appears as a key, its value must be a valid Cloud zone. * Keys other than `region` or `zone` are not valid.", "type": "object" }, "metric": { @@ -870,7 +894,7 @@ "description": "Settings for Ruby client libraries." }, "version": { - "description": "Version of the API to apply these settings to.", + "description": "Version of the API to apply these settings to. This is the full protobuf package for the API, ending in the version element. Examples: \"google.cloud.speech.v1\" and \"google.spanner.admin.database.v1\".", "type": "string" } }, @@ -904,6 +928,41 @@ }, "type": "object" }, + "ConsumerPolicy": { + "description": "Consumer Policy is a set of rules that define what services or service groups can be used for a cloud resource hierarchy.", + "id": "ConsumerPolicy", + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Annotations is an unstructured key-value map stored with a policy that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. [AIP-128](https://google.aip.dev/128#annotations)", + "type": "object" + }, + "enableRules": { + "description": "Enable rules define usable services and service groups.", + "items": { + "$ref": "EnableRule" + }, + "type": "array" + }, + "etag": { + "description": "An opaque tag indicating the current version of the policy, used for concurrency control.", + "type": "string" + }, + "name": { + "description": "Output only. The resource name of the policy. For example, We only allow consumer policy name as \"default\" for now: `projects/12345/consumerPolicies/default`, `folders/12345/consumerPolicies/default`, `organizations/12345/consumerPolicies/default`. Legacy format: `projects/12345/consumerPoly`", + "readOnly": true, + "type": "string" + }, + "updateTime": { + "description": "The last-modified time.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, "Context": { "description": "`Context` defines which contexts an API requests. Example: context: rules: - selector: \"*\" requested: - google.rpc.context.ProjectContext - google.rpc.context.OriginContext The above specifies that all methods in the API request `google.rpc.context.ProjectContext` and `google.rpc.context.OriginContext`. Available context types are defined in package `google.rpc.context`. This also provides mechanism to allowlist any protobuf message extension that can be sent in grpc metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” format. For example, list any service specific protobuf types that can appear in grpc metadata as follows in your yaml file: Example: context: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allowed_request_extensions: - google.foo.v1.NewExtension allowed_response_extensions: - google.foo.v1.NewExtension You can also specify extension ID instead of fully qualified extension name here.", "id": "Context", @@ -1079,7 +1138,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages; - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service. Example: documentation: summary: \u003e The Google Calendar API gives access to most calendar features. pages: - name: Overview content: (== include google/foo/overview.md ==) - name: Tutorial content: (== include google/foo/tutorial.md ==) subpages: - name: Java content: (== include google/foo/tutorial_java.md ==) rules: - selector: google.calendar.Calendar.Get description: \u003e ... - selector: google.calendar.Calendar.Put description: \u003e ... Documentation is provided in markdown syntax. In addition to standard markdown features, definition lists, tables and fenced code blocks are supported. Section headers can be provided and are interpreted relative to the section nesting of the context where a documentation fragment is embedded. Documentation from the IDL is merged with documentation defined via the config at normalization time, where documentation provided by config rules overrides IDL provided. A number of constructs specific to the API platform are supported in documentation text. In order to reference a proto element, the following notation can be used: [fully.qualified.proto.name][] To override the display text used for the link, this can be used: [display text][fully.qualified.proto.name] Text can be excluded from doc using the following notation: (-- internal comment --) A few directives are available in documentation. Note that directives must appear on a single line to be properly identified. The `include` directive includes a markdown file from an external source: (== include path/to/file ==) The `resource_for` directive marks a message to be the resource of a collection in REST view. If it is not specified, tools attempt to infer the resource from the operations in a collection: (== resource_for v1.shelves.books ==) The directive `suppress_warning` does not directly affect documentation and is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1104,6 +1163,13 @@ }, "type": "array" }, + "sectionOverrides": { + "description": "Specifies section and content to override boilerplate content provided by go/api-docgen. Currently overrides following sections: 1. rest.service.client_libraries", + "items": { + "$ref": "Page" + }, + "type": "array" + }, "serviceRootUrl": { "description": "Specifies the service root url if the default one (the service name from the yaml file) is not suitable. This can be seen in any fully specified service urls as well as sections that show a base that other urls are relative to.", "type": "string" @@ -1127,6 +1193,10 @@ "description": "Description of the selected proto element (e.g. a message, a method, a 'service' definition, or a field). Defaults to leading \u0026 trailing comments taken from the proto source definition of the proto element.", "type": "string" }, + "disableReplacementWords": { + "description": "String of comma or space separated case-sensitive words for which method/field name replacement will be disabled by go/api-docgen.", + "type": "string" + }, "selector": { "description": "The selector is a comma-separated list of patterns for any element such as a method, a field, an enum value. Each pattern is a qualified name of the element which may end in \"*\", indicating a wildcard. Wildcards are only allowed at the end and for a whole component of the qualified name, i.e. \"foo.*\" is ok, but not \"foo.b*\" or \"foo.*.bar\". A wildcard will match one or more components. To specify a default for all applicable elements, the whole pattern \"*\" is used.", "type": "string" @@ -1141,6 +1211,41 @@ "common": { "$ref": "CommonLanguageSettings", "description": "Some settings." + }, + "forcedNamespaceAliases": { + "description": "Namespaces which must be aliased in snippets due to a known (but non-generator-predictable) naming collision", + "items": { + "type": "string" + }, + "type": "array" + }, + "handwrittenSignatures": { + "description": "Method signatures (in the form \"service.method(signature)\") which are provided separately, so shouldn't be generated. Snippets *calling* these methods are still generated, however.", + "items": { + "type": "string" + }, + "type": "array" + }, + "ignoredResources": { + "description": "List of full resource types to ignore during generation. This is typically used for API-specific Location resources, which should be handled by the generator as if they were actually the common Location resources. Example entry: \"documentai.googleapis.com/Location\"", + "items": { + "type": "string" + }, + "type": "array" + }, + "renamedResources": { + "additionalProperties": { + "type": "string" + }, + "description": "Map from full resource types to the effective short name for the resource. This is used when otherwise resource named from different services would cause naming collisions. Example entry: \"datalabeling.googleapis.com/Dataset\": \"DataLabelingDataset\"", + "type": "object" + }, + "renamedServices": { + "additionalProperties": { + "type": "string" + }, + "description": "Map from original service names to renamed versions. This is used when the default generated types would cause a naming conflict. (Neither name is fully-qualified.) Example: Subscriber to SubscriberServiceApi.", + "type": "object" } }, "type": "object" @@ -1166,6 +1271,50 @@ }, "type": "object" }, + "EnableRule": { + "description": "The consumer policy rule that defines usable services and service groups.", + "id": "EnableRule", + "properties": { + "enableType": { + "description": "Client and resource project enable type.", + "enum": [ + "ENABLE_TYPE_UNSPECIFIED", + "CLIENT", + "RESOURCE", + "V1_COMPATIBLE" + ], + "enumDescriptions": [ + "Unspecified enable type, which means enabled as both client and resource project.", + "Enable all clients under the CRM node specified by `ConsumerPolicy.name` to use the listed services. A client can be an API key, an OAuth client, or a service account.", + "Enable resources in the list services to be created and used under the CRM node specified by the `ConsumerPolicy.name`.", + "Activation made by Service Usage v1 API. This will be how consumers differentiate between policy changes made by v1 and v2 clients and understand what is actually possible based on those different policies." + ], + "type": "string" + }, + "groups": { + "description": "DEPRECATED: Please use field `values`. Service group should have prefix `groups/`. The names of the service groups that are enabled (Not Implemented). go/predefined-service-groups. Example: `groups/googleServices`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "services": { + "description": "DEPRECATED: Please use field `values`. Service should have prefix `services/`. The names of the services that are enabled. Example: `storage.googleapis.com`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "values": { + "description": "The names of the services or service groups that are enabled. Example: `services/storage.googleapis.com`, groups/googleServices`, groups/allServices`.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "EnableServiceRequest": { "description": "Request message for the `EnableService` method.", "id": "EnableServiceRequest", @@ -2094,7 +2243,7 @@ "properties": { "longRunning": { "$ref": "LongRunning", - "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_behavior: - selector: CreateAdDomain long_running: initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: 54000 # 90 minutes" + "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_settings: - selector: google.cloud.speech.v2.Speech.BatchRecognize long_running: initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: 54000 # 90 minutes" }, "selector": { "description": "The fully qualified name of the method, for which the options below apply. This is used to find the method to apply the options.", @@ -2271,7 +2420,7 @@ "type": "object" }, "Mixin": { - "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", + "description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { @@ -2543,7 +2692,7 @@ "type": "array" }, "newIssueUri": { - "description": "Link to a place that API users can report issues. Example: https://issuetracker.google.com/issues/new?component=190865\u0026template=1161103", + "description": "Link to a *public* URI where users can report issues. Example: https://issuetracker.google.com/issues/new?component=190865\u0026template=1161103", "type": "string" }, "organization": { @@ -2553,14 +2702,20 @@ "CLOUD", "ADS", "PHOTOS", - "STREET_VIEW" + "STREET_VIEW", + "SHOPPING", + "GEO", + "GENERATIVE_AI" ], "enumDescriptions": [ "Not useful.", "Google Cloud Platform Org.", "Ads (Advertising) Org.", "Photos Org.", - "Street View Org." + "Street View Org.", + "Shopping Org.", + "Geo Org.", + "Generative AI - https://developers.generativeai.google" ], "type": "string" }, @@ -2692,6 +2847,30 @@ }, "type": "object" }, + "RemoveEnableRulesMetadata": { + "description": "Metadata for the `RemoveEnableRules` method.", + "id": "RemoveEnableRulesMetadata", + "properties": {}, + "type": "object" + }, + "RemoveEnableRulesResponse": { + "description": "The response message of \"RemoveEnableRules\" method.", + "id": "RemoveEnableRulesResponse", + "properties": { + "parent": { + "description": "The parent consumer policy. It can be `projects/12345/consumerPolicies/default`, or `folders/12345/consumerPolicies/default`, or `organizations/12345/consumerPolicies/default`.", + "type": "string" + }, + "removedValues": { + "description": "The values removed from the parent consumer policy.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "RubySettings": { "description": "Settings for Ruby client libraries.", "id": "RubySettings", @@ -2885,6 +3064,12 @@ "properties": {}, "type": "object" }, + "UpdateConsumerPolicyLROMetadata": { + "description": "Metadata for the `UpdateConsumerPolicyLRO` method.", + "id": "UpdateConsumerPolicyLROMetadata", + "properties": {}, + "type": "object" + }, "Usage": { "description": "Configuration controlling usage of a service.", "id": "Usage", diff --git a/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go b/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go index a713d49b7d..b8b69ee2d9 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "serviceusage:v1" const apiName = "serviceusage" @@ -171,6 +172,45 @@ type ServicesService struct { s *Service } +// AddEnableRulesMetadata: Metadata for the `AddEnableRules` method. +type AddEnableRulesMetadata struct { +} + +// AddEnableRulesResponse: The response message of "AddEnableRules" +// method. +type AddEnableRulesResponse struct { + // AddedValues: The values added to the parent consumer policy. + AddedValues []string `json:"addedValues,omitempty"` + + // Parent: The parent consumer policy. It can be + // `projects/12345/consumerPolicies/default`, or + // `folders/12345/consumerPolicies/default`, or + // `organizations/12345/consumerPolicies/default`. + Parent string `json:"parent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AddedValues") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AddedValues") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddEnableRulesResponse) MarshalJSON() ([]byte, error) { + type NoMethod AddEnableRulesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AdminQuotaPolicy: Quota policy created by quota administrator. type AdminQuotaPolicy struct { // Container: The cloud resource container at which the quota policy is @@ -179,7 +219,7 @@ type AdminQuotaPolicy struct { // Dimensions: If this map is nonempty, then this policy applies only // to specific values for dimensions defined in the limit unit. For - // example, an policy on a limit with the unit `1/{project}/{region}` + // example, a policy on a limit with the unit `1/{project}/{region}` // could contain an entry with the key `region` and the value // `us-east-1`; the policy is only applied to quota consumed in that // region. This map has the following restrictions: * If `region` @@ -997,7 +1037,10 @@ type ClientLibrarySettings struct { // RubySettings: Settings for Ruby client libraries. RubySettings *RubySettings `json:"rubySettings,omitempty"` - // Version: Version of the API to apply these settings to. + // Version: Version of the API to apply these settings to. This is the + // full protobuf package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and + // "google.spanner.admin.database.v1". Version string `json:"version,omitempty"` // ForceSendFields is a list of field names (e.g. "CppSettings") to @@ -1065,6 +1108,58 @@ func (s *CommonLanguageSettings) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ConsumerPolicy: Consumer Policy is a set of rules that define what +// services or service groups can be used for a cloud resource +// hierarchy. +type ConsumerPolicy struct { + // Annotations: Optional. Annotations is an unstructured key-value map + // stored with a policy that may be set by external tools to store and + // retrieve arbitrary metadata. They are not queryable and should be + // preserved when modifying objects. AIP-128 + // (https://google.aip.dev/128#annotations) + Annotations map[string]string `json:"annotations,omitempty"` + + // EnableRules: Enable rules define usable services and service groups. + EnableRules []*EnableRule `json:"enableRules,omitempty"` + + // Etag: An opaque tag indicating the current version of the policy, + // used for concurrency control. + Etag string `json:"etag,omitempty"` + + // Name: Output only. The resource name of the policy. For example, We + // only allow consumer policy name as "default" for now: + // `projects/12345/consumerPolicies/default`, + // `folders/12345/consumerPolicies/default`, + // `organizations/12345/consumerPolicies/default`. Legacy format: + // `projects/12345/consumerPoly` + Name string `json:"name,omitempty"` + + // UpdateTime: The last-modified time. + UpdateTime string `json:"updateTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Annotations") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Annotations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConsumerPolicy) MarshalJSON() ([]byte, error) { + type NoMethod ConsumerPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Context: `Context` defines which contexts an API requests. Example: // context: rules: - selector: "*" requested: - // google.rpc.context.ProjectContext - google.rpc.context.OriginContext @@ -1415,7 +1510,7 @@ func (s *DisableServiceResponse) MarshalJSON() ([]byte, error) { // describing a service. Example: documentation: summary: > The Google // Calendar API gives access to most calendar features. pages: - name: // Overview content: (== include google/foo/overview.md ==) - name: -// Tutorial content: (== include google/foo/tutorial.md ==) subpages; - +// Tutorial content: (== include google/foo/tutorial.md ==) subpages: - // name: Java content: (== include google/foo/tutorial_java.md ==) // rules: - selector: google.calendar.Calendar.Get description: > ... - // selector: google.calendar.Calendar.Put description: > ... @@ -1461,6 +1556,11 @@ type Documentation struct { // wins" order. Rules []*DocumentationRule `json:"rules,omitempty"` + // SectionOverrides: Specifies section and content to override + // boilerplate content provided by go/api-docgen. Currently overrides + // following sections: 1. rest.service.client_libraries + SectionOverrides []*Page `json:"sectionOverrides,omitempty"` + // ServiceRootUrl: Specifies the service root url if the default one // (the service name from the yaml file) is not suitable. This can be // seen in any fully specified service urls as well as sections that @@ -1512,6 +1612,11 @@ type DocumentationRule struct { // the proto element. Description string `json:"description,omitempty"` + // DisableReplacementWords: String of comma or space separated + // case-sensitive words for which method/field name replacement will be + // disabled by go/api-docgen. + DisableReplacementWords string `json:"disableReplacementWords,omitempty"` + // Selector: The selector is a comma-separated list of patterns for any // element such as a method, a field, an enum value. Each pattern is a // qualified name of the element which may end in "*", indicating a @@ -1552,6 +1657,35 @@ type DotnetSettings struct { // Common: Some settings. Common *CommonLanguageSettings `json:"common,omitempty"` + // ForcedNamespaceAliases: Namespaces which must be aliased in snippets + // due to a known (but non-generator-predictable) naming collision + ForcedNamespaceAliases []string `json:"forcedNamespaceAliases,omitempty"` + + // HandwrittenSignatures: Method signatures (in the form + // "service.method(signature)") which are provided separately, so + // shouldn't be generated. Snippets *calling* these methods are still + // generated, however. + HandwrittenSignatures []string `json:"handwrittenSignatures,omitempty"` + + // IgnoredResources: List of full resource types to ignore during + // generation. This is typically used for API-specific Location + // resources, which should be handled by the generator as if they were + // actually the common Location resources. Example entry: + // "documentai.googleapis.com/Location" + IgnoredResources []string `json:"ignoredResources,omitempty"` + + // RenamedResources: Map from full resource types to the effective short + // name for the resource. This is used when otherwise resource named + // from different services would cause naming collisions. Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + RenamedResources map[string]string `json:"renamedResources,omitempty"` + + // RenamedServices: Map from original service names to renamed versions. + // This is used when the default generated types would cause a naming + // conflict. (Neither name is fully-qualified.) Example: Subscriber to + // SubscriberServiceApi. + RenamedServices map[string]string `json:"renamedServices,omitempty"` + // ForceSendFields is a list of field names (e.g. "Common") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -1618,6 +1752,64 @@ func (s *EnableFailure) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// EnableRule: The consumer policy rule that defines usable services and +// service groups. +type EnableRule struct { + // EnableType: Client and resource project enable type. + // + // Possible values: + // "ENABLE_TYPE_UNSPECIFIED" - Unspecified enable type, which means + // enabled as both client and resource project. + // "CLIENT" - Enable all clients under the CRM node specified by + // `ConsumerPolicy.name` to use the listed services. A client can be an + // API key, an OAuth client, or a service account. + // "RESOURCE" - Enable resources in the list services to be created + // and used under the CRM node specified by the `ConsumerPolicy.name`. + // "V1_COMPATIBLE" - Activation made by Service Usage v1 API. This + // will be how consumers differentiate between policy changes made by v1 + // and v2 clients and understand what is actually possible based on + // those different policies. + EnableType string `json:"enableType,omitempty"` + + // Groups: DEPRECATED: Please use field `values`. Service group should + // have prefix `groups/`. The names of the service groups that are + // enabled (Not Implemented). go/predefined-service-groups. Example: + // `groups/googleServices`. + Groups []string `json:"groups,omitempty"` + + // Services: DEPRECATED: Please use field `values`. Service should have + // prefix `services/`. The names of the services that are enabled. + // Example: `storage.googleapis.com`. + Services []string `json:"services,omitempty"` + + // Values: The names of the services or service groups that are enabled. + // Example: `services/storage.googleapis.com`, groups/googleServices`, + // groups/allServices`. + Values []string `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EnableType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnableType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *EnableRule) MarshalJSON() ([]byte, error) { + type NoMethod EnableRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // EnableServiceRequest: Request message for the `EnableService` method. type EnableServiceRequest struct { } @@ -3163,10 +3355,11 @@ type MethodSettings struct { // LongRunning: Describes settings to use for long-running operations // when generating API methods for RPCs. Complements RPCs that use the // annotations in google/longrunning/operations.proto. Example of a YAML - // configuration:: publishing: method_behavior: - selector: - // CreateAdDomain long_running: initial_poll_delay: seconds: 60 # 1 - // minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 - // minutes total_poll_timeout: seconds: 54000 # 90 minutes + // configuration:: publishing: method_settings: - selector: + // google.cloud.speech.v2.Speech.BatchRecognize long_running: + // initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 + // max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: + // 54000 # 90 minutes LongRunning *LongRunning `json:"longRunning,omitempty"` // Selector: The fully qualified name of the method, for which the @@ -3524,7 +3717,7 @@ func (s *MetricRule) MarshalJSON() ([]byte, error) { // The mixin construct implies that all methods in `AccessControl` are // also declared with same name and request/response types in `Storage`. // A documentation generator or annotation processor will see the -// effective `Storage.GetAcl` method after inheriting documentation and +// effective `Storage.GetAcl` method after inherting documentation and // annotations as follows: service Storage { // Get the underlying ACL // object. rpc GetAcl(GetAclRequest) returns (Acl) { option // (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how @@ -4089,7 +4282,7 @@ type Publishing struct { // methods that use the long-running operation pattern. MethodSettings []*MethodSettings `json:"methodSettings,omitempty"` - // NewIssueUri: Link to a place that API users can report issues. + // NewIssueUri: Link to a *public* URI where users can report issues. // Example: // https://issuetracker.google.com/issues/new?component=190865&template=1161103 NewIssueUri string `json:"newIssueUri,omitempty"` @@ -4102,6 +4295,10 @@ type Publishing struct { // "ADS" - Ads (Advertising) Org. // "PHOTOS" - Photos Org. // "STREET_VIEW" - Street View Org. + // "SHOPPING" - Shopping Org. + // "GEO" - Geo Org. + // "GENERATIVE_AI" - Generative AI - + // https://developers.generativeai.google Organization string `json:"organization,omitempty"` // ProtoReferenceDocumentationUri: Optional link to proto reference @@ -4384,6 +4581,46 @@ func (s *QuotaOverride) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RemoveEnableRulesMetadata: Metadata for the `RemoveEnableRules` +// method. +type RemoveEnableRulesMetadata struct { +} + +// RemoveEnableRulesResponse: The response message of +// "RemoveEnableRules" method. +type RemoveEnableRulesResponse struct { + // Parent: The parent consumer policy. It can be + // `projects/12345/consumerPolicies/default`, or + // `folders/12345/consumerPolicies/default`, or + // `organizations/12345/consumerPolicies/default`. + Parent string `json:"parent,omitempty"` + + // RemovedValues: The values removed from the parent consumer policy. + RemovedValues []string `json:"removedValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Parent") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Parent") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RemoveEnableRulesResponse) MarshalJSON() ([]byte, error) { + type NoMethod RemoveEnableRulesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RubySettings: Settings for Ruby client libraries. type RubySettings struct { // Common: Some settings. @@ -4729,6 +4966,11 @@ func (s *Type) MarshalJSON() ([]byte, error) { type UpdateAdminQuotaPolicyMetadata struct { } +// UpdateConsumerPolicyLROMetadata: Metadata for the +// `UpdateConsumerPolicyLRO` method. +type UpdateConsumerPolicyLROMetadata struct { +} + // Usage: Configuration controlling usage of a service. type Usage struct { // ProducerNotificationChannel: The full resource name of a channel used diff --git a/terraform/providers/google/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go b/terraform/providers/google/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go index b2050620bb..6e30dabe2d 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/sourcerepo/v1/sourcerepo-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "sourcerepo:v1" const apiName = "sourcerepo" diff --git a/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-api.json b/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-api.json index 61b027f2af..aecb4cfc6e 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-api.json @@ -1452,6 +1452,41 @@ "https://www.googleapis.com/auth/spanner.admin" ] }, + "patch": { + "description": "Updates a Cloud Spanner database. The returned long-running operation can be used to track the progress of updating the database. If the named database does not exist, returns `NOT_FOUND`. While the operation is pending: * The database's reconciling field is set to true. * Cancelling the operation is best-effort. If the cancellation succeeds, the operation metadata's cancel_time is set, the updates are reverted, and the operation terminates with a `CANCELLED` status. * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error until the pending operation is done (returns successfully or with error). * Reading the database via the API continues to give the pre-request values. Upon completion of the returned operation: * The new values are in effect and readable via the API. * The database's reconciling field becomes false. The returned long-running operation will have a name of the format `projects//instances//databases//operations/` and can be used to track the database modification. The metadata field type is UpdateDatabaseMetadata. The response field type is Database, if successful.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}", + "httpMethod": "PATCH", + "id": "spanner.projects.instances.databases.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the database. Values are of the form `projects//instances//databases/`, where `` is as specified in the `CREATE DATABASE` statement. This name can be passed to other API methods to identify the database.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. The list of fields to update. Currently, only `enable_drop_protection` field can be updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "Database" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, "restore": { "description": "Create a new database by restoring from a completed backup. The new database must be in the same project and in an instance with the same instance configuration as the instance containing the backup. The returned database long-running operation has a name of the format `projects//instances//databases//operations/`, and can be used to track the progress of the operation, and to cancel it. The metadata field type is RestoreDatabaseMetadata. The response type is Database, if successful. Cancelling the returned operation will stop the restore and delete the database. There can be only one database being restored into an instance at a time. Once the restore operation completes, a new restore operation can be initiated, without waiting for the optimize operation associated with the first restore to complete.", "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases:restore", @@ -2212,6 +2247,134 @@ } } }, + "instancePartitions": { + "resources": { + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/instancePartitions/{instancePartitionsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "spanner.projects.instances.instancePartitions.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be cancelled.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/instancePartitions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:cancel", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "delete": { + "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/instancePartitions/{instancePartitionsId}/operations/{operationsId}", + "httpMethod": "DELETE", + "id": "spanner.projects.instances.instancePartitions.operations.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource to be deleted.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/instancePartitions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/instancePartitions/{instancePartitionsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "spanner.projects.instances.instancePartitions.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/instancePartitions/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", + "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/instancePartitions/{instancePartitionsId}/operations", + "httpMethod": "GET", + "id": "spanner.projects.instances.instancePartitions.operations.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "filter": { + "description": "The standard list filter.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name of the operation's parent resource.", + "location": "path", + "pattern": "^projects/[^/]+/instances/[^/]+/instancePartitions/[^/]+/operations$", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin" + ] + } + } + } + } + }, "operations": { "methods": { "cancel": { @@ -2402,7 +2565,7 @@ } } }, - "revision": "20230310", + "revision": "20230614", "rootUrl": "https://spanner.googleapis.com/", "schemas": { "Backup": { @@ -2428,7 +2591,7 @@ ], "enumDescriptions": [ "Default value. This value will create a database with the GOOGLE_STANDARD_SQL dialect.", - "Google standard SQL.", + "GoogleSQL supported SQL.", "PostgreSQL supported SQL." ], "readOnly": true, @@ -2835,7 +2998,7 @@ ], "enumDescriptions": [ "Default value. This value will create a database with the GOOGLE_STANDARD_SQL dialect.", - "Google standard SQL.", + "GoogleSQL supported SQL.", "PostgreSQL supported SQL." ], "type": "string" @@ -2969,7 +3132,7 @@ ], "enumDescriptions": [ "Default value. This value will create a database with the GOOGLE_STANDARD_SQL dialect.", - "Google standard SQL.", + "GoogleSQL supported SQL.", "PostgreSQL supported SQL." ], "readOnly": true, @@ -2986,6 +3149,10 @@ "readOnly": true, "type": "string" }, + "enableDropProtection": { + "description": "Whether drop protection is enabled for this database. Defaults to false, if not set.", + "type": "boolean" + }, "encryptionConfig": { "$ref": "EncryptionConfig", "description": "Output only. For databases that are using customer managed encryption, this field contains the encryption configuration for the database. For databases that are using Google default or other types of encryption, this field is empty.", @@ -3003,6 +3170,11 @@ "description": "Required. The name of the database. Values are of the form `projects//instances//databases/`, where `` is as specified in the `CREATE DATABASE` statement. This name can be passed to other API methods to identify the database.", "type": "string" }, + "reconciling": { + "description": "Output only. If true, the database is being updated. If false, there are no ongoing update operations for the database.", + "readOnly": true, + "type": "boolean" + }, "restoreInfo": { "$ref": "RestoreInfo", "description": "Output only. Applicable only for restored databases. Contains information about the restore source.", @@ -3044,6 +3216,28 @@ }, "type": "object" }, + "DdlStatementActionInfo": { + "description": "Action information extracted from a DDL statement. This proto is used to display the brief info of the DDL statement for the operation UpdateDatabaseDdl.", + "id": "DdlStatementActionInfo", + "properties": { + "action": { + "description": "The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc. This field is a non-empty string.", + "type": "string" + }, + "entityNames": { + "description": "The entity name(s) being operated on the DDL statement. E.g. 1. For statement \"CREATE TABLE t1(...)\", `entity_names` = [\"t1\"]. 2. For statement \"GRANT ROLE r1, r2 ...\", `entity_names` = [\"r1\", \"r2\"]. 3. For statement \"ANALYZE\", `entity_names` = [].", + "items": { + "type": "string" + }, + "type": "array" + }, + "entityType": { + "description": "The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc. This field can be empty string for some DDL statement, e.g. for statement \"ANALYZE\", `entity_type` = \"\".", + "type": "string" + } + }, + "type": "object" + }, "Delete": { "description": "Arguments to delete operations.", "id": "Delete", @@ -3599,7 +3793,7 @@ "type": "array" }, "state": { - "description": "Output only. The current instance config state.", + "description": "Output only. The current instance config state. Applicable only for USER_MANAGED configs.", "enum": [ "STATE_UNSPECIFIED", "CREATING", @@ -5198,6 +5392,13 @@ "description": "Metadata type for the operation returned by UpdateDatabaseDdl.", "id": "UpdateDatabaseDdlMetadata", "properties": { + "actions": { + "description": "The brief action info for the DDL statements. `actions[i]` is the brief info for `statements[i]`.", + "items": { + "$ref": "DdlStatementActionInfo" + }, + "type": "array" + }, "commitTimestamps": { "description": "Reports the commit timestamps of all statements that have succeeded so far, where `commit_timestamps[i]` is the commit timestamp for the statement `statements[i]`.", "items": { @@ -5211,7 +5412,7 @@ "type": "string" }, "progress": { - "description": "The progress of the UpdateDatabaseDdl operations. Currently, only index creation statements will have a continuously updating progress. For non-index creation statements, `progress[i]` will have start time and end time populated with commit timestamp of operation, as well as a progress of 100% once the operation has completed. `progress[i]` is the operation progress for `statements[i]`.", + "description": "The progress of the UpdateDatabaseDdl operations. All DDL statements will have continuously updating progress, and `progress[i]` is the operation progress for `statements[i]`. Also, `progress[i]` will have start time and end time populated with commit timestamp of operation, as well as a progress of 100% once the operation has completed.", "items": { "$ref": "OperationProgress" }, @@ -5225,7 +5426,7 @@ "type": "array" }, "throttled": { - "description": "Output only. When true, indicates that the operation is throttled e.g due to resource constraints. When resources become available the operation will resume and this field will be false again.", + "description": "Output only. When true, indicates that the operation is throttled e.g. due to resource constraints. When resources become available the operation will resume and this field will be false again.", "readOnly": true, "type": "boolean" } @@ -5255,6 +5456,42 @@ }, "type": "object" }, + "UpdateDatabaseMetadata": { + "description": "Metadata type for the operation returned by UpdateDatabase.", + "id": "UpdateDatabaseMetadata", + "properties": { + "cancelTime": { + "description": "The time at which this operation was cancelled. If set, this operation is in the process of undoing itself (which is best-effort).", + "format": "google-datetime", + "type": "string" + }, + "progress": { + "$ref": "OperationProgress", + "description": "The progress of the UpdateDatabase operation." + }, + "request": { + "$ref": "UpdateDatabaseRequest", + "description": "The request for UpdateDatabase." + } + }, + "type": "object" + }, + "UpdateDatabaseRequest": { + "description": "The request for UpdateDatabase.", + "id": "UpdateDatabaseRequest", + "properties": { + "database": { + "$ref": "Database", + "description": "Required. The database to update. The `name` field of the database is of the form `projects//instances//databases/`." + }, + "updateMask": { + "description": "Required. The list of fields to update. Currently, only `enable_drop_protection` field can be updated.", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, "UpdateInstanceConfigMetadata": { "description": "Metadata type for the operation returned by UpdateInstanceConfig.", "id": "UpdateInstanceConfigMetadata", diff --git a/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-gen.go b/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-gen.go index 9ac3cb0a78..e7a9cc4ecc 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/spanner/v1/spanner-gen.go @@ -77,6 +77,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "spanner:v1" const apiName = "spanner" @@ -208,6 +209,7 @@ func NewProjectsInstancesService(s *Service) *ProjectsInstancesService { rs.Backups = NewProjectsInstancesBackupsService(s) rs.DatabaseOperations = NewProjectsInstancesDatabaseOperationsService(s) rs.Databases = NewProjectsInstancesDatabasesService(s) + rs.InstancePartitions = NewProjectsInstancesInstancePartitionsService(s) rs.Operations = NewProjectsInstancesOperationsService(s) return rs } @@ -223,6 +225,8 @@ type ProjectsInstancesService struct { Databases *ProjectsInstancesDatabasesService + InstancePartitions *ProjectsInstancesInstancePartitionsService + Operations *ProjectsInstancesOperationsService } @@ -310,6 +314,27 @@ type ProjectsInstancesDatabasesSessionsService struct { s *Service } +func NewProjectsInstancesInstancePartitionsService(s *Service) *ProjectsInstancesInstancePartitionsService { + rs := &ProjectsInstancesInstancePartitionsService{s: s} + rs.Operations = NewProjectsInstancesInstancePartitionsOperationsService(s) + return rs +} + +type ProjectsInstancesInstancePartitionsService struct { + s *Service + + Operations *ProjectsInstancesInstancePartitionsOperationsService +} + +func NewProjectsInstancesInstancePartitionsOperationsService(s *Service) *ProjectsInstancesInstancePartitionsOperationsService { + rs := &ProjectsInstancesInstancePartitionsOperationsService{s: s} + return rs +} + +type ProjectsInstancesInstancePartitionsOperationsService struct { + s *Service +} + func NewProjectsInstancesOperationsService(s *Service) *ProjectsInstancesOperationsService { rs := &ProjectsInstancesOperationsService{s: s} return rs @@ -347,7 +372,7 @@ type Backup struct { // Possible values: // "DATABASE_DIALECT_UNSPECIFIED" - Default value. This value will // create a database with the GOOGLE_STANDARD_SQL dialect. - // "GOOGLE_STANDARD_SQL" - Google standard SQL. + // "GOOGLE_STANDARD_SQL" - GoogleSQL supported SQL. // "POSTGRESQL" - PostgreSQL supported SQL. DatabaseDialect string `json:"databaseDialect,omitempty"` @@ -1128,7 +1153,7 @@ type CreateDatabaseRequest struct { // Possible values: // "DATABASE_DIALECT_UNSPECIFIED" - Default value. This value will // create a database with the GOOGLE_STANDARD_SQL dialect. - // "GOOGLE_STANDARD_SQL" - Google standard SQL. + // "GOOGLE_STANDARD_SQL" - GoogleSQL supported SQL. // "POSTGRESQL" - PostgreSQL supported SQL. DatabaseDialect string `json:"databaseDialect,omitempty"` @@ -1376,7 +1401,7 @@ type Database struct { // Possible values: // "DATABASE_DIALECT_UNSPECIFIED" - Default value. This value will // create a database with the GOOGLE_STANDARD_SQL dialect. - // "GOOGLE_STANDARD_SQL" - Google standard SQL. + // "GOOGLE_STANDARD_SQL" - GoogleSQL supported SQL. // "POSTGRESQL" - PostgreSQL supported SQL. DatabaseDialect string `json:"databaseDialect,omitempty"` @@ -1395,6 +1420,10 @@ type Database struct { // initiate the recovery. EarliestVersionTime string `json:"earliestVersionTime,omitempty"` + // EnableDropProtection: Whether drop protection is enabled for this + // database. Defaults to false, if not set. + EnableDropProtection bool `json:"enableDropProtection,omitempty"` + // EncryptionConfig: Output only. For databases that are using customer // managed encryption, this field contains the encryption configuration // for the database. For databases that are using Google default or @@ -1417,6 +1446,10 @@ type Database struct { // methods to identify the database. Name string `json:"name,omitempty"` + // Reconciling: Output only. If true, the database is being updated. If + // false, there are no ongoing update operations for the database. + Reconciling bool `json:"reconciling,omitempty"` + // RestoreInfo: Output only. Applicable only for restored databases. // Contains information about the restore source. RestoreInfo *RestoreInfo `json:"restoreInfo,omitempty"` @@ -1499,6 +1532,48 @@ func (s *DatabaseRole) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DdlStatementActionInfo: Action information extracted from a DDL +// statement. This proto is used to display the brief info of the DDL +// statement for the operation UpdateDatabaseDdl. +type DdlStatementActionInfo struct { + // Action: The action for the DDL statement, e.g. CREATE, ALTER, DROP, + // GRANT, etc. This field is a non-empty string. + Action string `json:"action,omitempty"` + + // EntityNames: The entity name(s) being operated on the DDL statement. + // E.g. 1. For statement "CREATE TABLE t1(...)", `entity_names` = + // ["t1"]. 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = + // ["r1", "r2"]. 3. For statement "ANALYZE", `entity_names` = []. + EntityNames []string `json:"entityNames,omitempty"` + + // EntityType: The entity type for the DDL statement, e.g. TABLE, INDEX, + // VIEW, etc. This field can be empty string for some DDL statement, + // e.g. for statement "ANALYZE", `entity_type` = "". + EntityType string `json:"entityType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DdlStatementActionInfo) MarshalJSON() ([]byte, error) { + type NoMethod DdlStatementActionInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Delete: Arguments to delete operations. type Delete struct { // KeySet: Required. The primary keys of the rows within table to @@ -2471,7 +2546,8 @@ type InstanceConfig struct { // configuration and their replication properties. Replicas []*ReplicaInfo `json:"replicas,omitempty"` - // State: Output only. The current instance config state. + // State: Output only. The current instance config state. Applicable + // only for USER_MANAGED configs. // // Possible values: // "STATE_UNSPECIFIED" - Not specified. @@ -3381,6 +3457,23 @@ func (s *MetricMatrixRow) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +func (s *MetricMatrixRow) UnmarshalJSON(data []byte) error { + type NoMethod MetricMatrixRow + var s1 struct { + Cols []gensupport.JSONFloat64 `json:"cols"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Cols = make([]float64, len(s1.Cols)) + for i := range s1.Cols { + s.Cols[i] = float64(s1.Cols[i]) + } + return nil +} + // Mutation: A modification to one or more Cloud Spanner rows. Mutations // can be applied to a Cloud Spanner database by sending them in a // Commit call. @@ -5723,6 +5816,10 @@ func (s *Type) MarshalJSON() ([]byte, error) { // UpdateDatabaseDdlMetadata: Metadata type for the operation returned // by UpdateDatabaseDdl. type UpdateDatabaseDdlMetadata struct { + // Actions: The brief action info for the DDL statements. `actions[i]` + // is the brief info for `statements[i]`. + Actions []*DdlStatementActionInfo `json:"actions,omitempty"` + // CommitTimestamps: Reports the commit timestamps of all statements // that have succeeded so far, where `commit_timestamps[i]` is the // commit timestamp for the statement `statements[i]`. @@ -5731,13 +5828,12 @@ type UpdateDatabaseDdlMetadata struct { // Database: The database being modified. Database string `json:"database,omitempty"` - // Progress: The progress of the UpdateDatabaseDdl operations. - // Currently, only index creation statements will have a continuously - // updating progress. For non-index creation statements, `progress[i]` - // will have start time and end time populated with commit timestamp of - // operation, as well as a progress of 100% once the operation has - // completed. `progress[i]` is the operation progress for - // `statements[i]`. + // Progress: The progress of the UpdateDatabaseDdl operations. All DDL + // statements will have continuously updating progress, and + // `progress[i]` is the operation progress for `statements[i]`. Also, + // `progress[i]` will have start time and end time populated with commit + // timestamp of operation, as well as a progress of 100% once the + // operation has completed. Progress []*OperationProgress `json:"progress,omitempty"` // Statements: For an update this list contains all the statements. For @@ -5745,12 +5841,12 @@ type UpdateDatabaseDdlMetadata struct { Statements []string `json:"statements,omitempty"` // Throttled: Output only. When true, indicates that the operation is - // throttled e.g due to resource constraints. When resources become + // throttled e.g. due to resource constraints. When resources become // available the operation will resume and this field will be false // again. Throttled bool `json:"throttled,omitempty"` - // ForceSendFields is a list of field names (e.g. "CommitTimestamps") to + // ForceSendFields is a list of field names (e.g. "Actions") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -5758,13 +5854,12 @@ type UpdateDatabaseDdlMetadata struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CommitTimestamps") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Actions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -5840,6 +5935,76 @@ func (s *UpdateDatabaseDdlRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UpdateDatabaseMetadata: Metadata type for the operation returned by +// UpdateDatabase. +type UpdateDatabaseMetadata struct { + // CancelTime: The time at which this operation was cancelled. If set, + // this operation is in the process of undoing itself (which is + // best-effort). + CancelTime string `json:"cancelTime,omitempty"` + + // Progress: The progress of the UpdateDatabase operation. + Progress *OperationProgress `json:"progress,omitempty"` + + // Request: The request for UpdateDatabase. + Request *UpdateDatabaseRequest `json:"request,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CancelTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CancelTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateDatabaseMetadata) MarshalJSON() ([]byte, error) { + type NoMethod UpdateDatabaseMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UpdateDatabaseRequest: The request for UpdateDatabase. +type UpdateDatabaseRequest struct { + // Database: Required. The database to update. The `name` field of the + // database is of the form `projects//instances//databases/`. + Database *Database `json:"database,omitempty"` + + // UpdateMask: Required. The list of fields to update. Currently, only + // `enable_drop_protection` field can be updated. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Database") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Database") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UpdateDatabaseRequest) MarshalJSON() ([]byte, error) { + type NoMethod UpdateDatabaseRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // UpdateInstanceConfigMetadata: Metadata type for the operation // returned by UpdateInstanceConfig. type UpdateInstanceConfigMetadata struct { @@ -12929,6 +13094,183 @@ func (c *ProjectsInstancesDatabasesListCall) Pages(ctx context.Context, f func(* } } +// method id "spanner.projects.instances.databases.patch": + +type ProjectsInstancesDatabasesPatchCall struct { + s *Service + name string + database *Database + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a Cloud Spanner database. The returned long-running +// operation can be used to track the progress of updating the database. +// If the named database does not exist, returns `NOT_FOUND`. While the +// operation is pending: * The database's reconciling field is set to +// true. * Cancelling the operation is best-effort. If the cancellation +// succeeds, the operation metadata's cancel_time is set, the updates +// are reverted, and the operation terminates with a `CANCELLED` status. +// * New UpdateDatabase requests will return a `FAILED_PRECONDITION` +// error until the pending operation is done (returns successfully or +// with error). * Reading the database via the API continues to give the +// pre-request values. Upon completion of the returned operation: * The +// new values are in effect and readable via the API. * The database's +// reconciling field becomes false. The returned long-running operation +// will have a name of the format +// `projects//instances//databases//operations/` and can be used to +// track the database modification. The metadata field type is +// UpdateDatabaseMetadata. The response field type is Database, if +// successful. +// +// - name: The name of the database. Values are of the form +// `projects//instances//databases/`, where “ is as specified in the +// `CREATE DATABASE` statement. This name can be passed to other API +// methods to identify the database. +func (r *ProjectsInstancesDatabasesService) Patch(name string, database *Database) *ProjectsInstancesDatabasesPatchCall { + c := &ProjectsInstancesDatabasesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.database = database + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The +// list of fields to update. Currently, only `enable_drop_protection` +// field can be updated. +func (c *ProjectsInstancesDatabasesPatchCall) UpdateMask(updateMask string) *ProjectsInstancesDatabasesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesDatabasesPatchCall) Fields(s ...googleapi.Field) *ProjectsInstancesDatabasesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesDatabasesPatchCall) Context(ctx context.Context) *ProjectsInstancesDatabasesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesDatabasesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesDatabasesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.database) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.databases.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesDatabasesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a Cloud Spanner database. The returned long-running operation can be used to track the progress of updating the database. If the named database does not exist, returns `NOT_FOUND`. While the operation is pending: * The database's reconciling field is set to true. * Cancelling the operation is best-effort. If the cancellation succeeds, the operation metadata's cancel_time is set, the updates are reverted, and the operation terminates with a `CANCELLED` status. * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error until the pending operation is done (returns successfully or with error). * Reading the database via the API continues to give the pre-request values. Upon completion of the returned operation: * The new values are in effect and readable via the API. * The database's reconciling field becomes false. The returned long-running operation will have a name of the format `projects//instances//databases//operations/` and can be used to track the database modification. The metadata field type is UpdateDatabaseMetadata. The response field type is Database, if successful.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/databases/{databasesId}", + // "httpMethod": "PATCH", + // "id": "spanner.projects.instances.databases.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the database. Values are of the form `projects//instances//databases/`, where `` is as specified in the `CREATE DATABASE` statement. This name can be passed to other API methods to identify the database.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/databases/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Required. The list of fields to update. Currently, only `enable_drop_protection` field can be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "Database" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/spanner.admin" + // ] + // } + +} + // method id "spanner.projects.instances.databases.restore": type ProjectsInstancesDatabasesRestoreCall struct { @@ -16819,6 +17161,640 @@ func (c *ProjectsInstancesDatabasesSessionsStreamingReadCall) Do(opts ...googlea } +// method id "spanner.projects.instances.instancePartitions.operations.cancel": + +type ProjectsInstancesInstancePartitionsOperationsCancelCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Starts asynchronous cancellation on a long-running operation. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. If the server doesn't support this method, it +// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use +// Operations.GetOperation or other methods to check whether the +// cancellation succeeded or whether the operation completed despite +// cancellation. On successful cancellation, the operation is not +// deleted; instead, it becomes an operation with an Operation.error +// value with a google.rpc.Status.code of 1, corresponding to +// `Code.CANCELLED`. +// +// - name: The name of the operation resource to be cancelled. +func (r *ProjectsInstancesInstancePartitionsOperationsService) Cancel(name string) *ProjectsInstancesInstancePartitionsOperationsCancelCall { + c := &ProjectsInstancesInstancePartitionsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesInstancePartitionsOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsInstancesInstancePartitionsOperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesInstancePartitionsOperationsCancelCall) Context(ctx context.Context) *ProjectsInstancesInstancePartitionsOperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesInstancePartitionsOperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesInstancePartitionsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.instancePartitions.operations.cancel" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesInstancePartitionsOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/instancePartitions/{instancePartitionsId}/operations/{operationsId}:cancel", + // "httpMethod": "POST", + // "id": "spanner.projects.instances.instancePartitions.operations.cancel", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource to be cancelled.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/instancePartitions/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:cancel", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/spanner.admin" + // ] + // } + +} + +// method id "spanner.projects.instances.instancePartitions.operations.delete": + +type ProjectsInstancesInstancePartitionsOperationsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a long-running operation. This method indicates that +// the client is no longer interested in the operation result. It does +// not cancel the operation. If the server doesn't support this method, +// it returns `google.rpc.Code.UNIMPLEMENTED`. +// +// - name: The name of the operation resource to be deleted. +func (r *ProjectsInstancesInstancePartitionsOperationsService) Delete(name string) *ProjectsInstancesInstancePartitionsOperationsDeleteCall { + c := &ProjectsInstancesInstancePartitionsOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesInstancePartitionsOperationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsInstancesInstancePartitionsOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesInstancePartitionsOperationsDeleteCall) Context(ctx context.Context) *ProjectsInstancesInstancePartitionsOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesInstancePartitionsOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesInstancePartitionsOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.instancePartitions.operations.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsInstancesInstancePartitionsOperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/instancePartitions/{instancePartitionsId}/operations/{operationsId}", + // "httpMethod": "DELETE", + // "id": "spanner.projects.instances.instancePartitions.operations.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource to be deleted.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/instancePartitions/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/spanner.admin" + // ] + // } + +} + +// method id "spanner.projects.instances.instancePartitions.operations.get": + +type ProjectsInstancesInstancePartitionsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this method to poll the operation result at intervals as +// recommended by the API service. +// +// - name: The name of the operation resource. +func (r *ProjectsInstancesInstancePartitionsOperationsService) Get(name string) *ProjectsInstancesInstancePartitionsOperationsGetCall { + c := &ProjectsInstancesInstancePartitionsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesInstancePartitionsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsInstancesInstancePartitionsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesInstancePartitionsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsInstancesInstancePartitionsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesInstancePartitionsOperationsGetCall) Context(ctx context.Context) *ProjectsInstancesInstancePartitionsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesInstancePartitionsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesInstancePartitionsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.instancePartitions.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsInstancesInstancePartitionsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/instancePartitions/{instancePartitionsId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.instancePartitions.operations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/instancePartitions/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/spanner.admin" + // ] + // } + +} + +// method id "spanner.projects.instances.instancePartitions.operations.list": + +type ProjectsInstancesInstancePartitionsOperationsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the +// request. If the server doesn't support this method, it returns +// `UNIMPLEMENTED`. +// +// - name: The name of the operation's parent resource. +func (r *ProjectsInstancesInstancePartitionsOperationsService) List(name string) *ProjectsInstancesInstancePartitionsOperationsListCall { + c := &ProjectsInstancesInstancePartitionsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": The standard list +// filter. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) Filter(filter string) *ProjectsInstancesInstancePartitionsOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) PageSize(pageSize int64) *ProjectsInstancesInstancePartitionsOperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) PageToken(pageToken string) *ProjectsInstancesInstancePartitionsOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsInstancesInstancePartitionsOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) IfNoneMatch(entityTag string) *ProjectsInstancesInstancePartitionsOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) Context(ctx context.Context) *ProjectsInstancesInstancePartitionsOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "spanner.projects.instances.instancePartitions.operations.list" call. +// Exactly one of *ListOperationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", + // "flatPath": "v1/projects/{projectsId}/instances/{instancesId}/instancePartitions/{instancePartitionsId}/operations", + // "httpMethod": "GET", + // "id": "spanner.projects.instances.instancePartitions.operations.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "filter": { + // "description": "The standard list filter.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The name of the operation's parent resource.", + // "location": "path", + // "pattern": "^projects/[^/]+/instances/[^/]+/instancePartitions/[^/]+/operations$", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "ListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/spanner.admin" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInstancesInstancePartitionsOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "spanner.projects.instances.operations.cancel": type ProjectsInstancesOperationsCancelCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json b/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json index 2b38bb6fc2..7885a612e4 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json @@ -428,7 +428,7 @@ ] }, "insert": { - "description": "Inserts a resource containing information about a database inside a Cloud SQL instance.", + "description": "Inserts a resource containing information about a database inside a Cloud SQL instance. **Note:** You can't modify the default character set and collation.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases", "httpMethod": "POST", "id": "sql.databases.insert", @@ -1045,6 +1045,41 @@ "https://www.googleapis.com/auth/sqlservice.admin" ] }, + "reencrypt": { + "description": "Reencrypt CMEK instance with latest key version.", + "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/reencrypt", + "httpMethod": "POST", + "id": "sql.instances.reencrypt", + "parameterOrder": [ + "project", + "instance" + ], + "parameters": { + "instance": { + "description": "Cloud SQL instance ID. This does not include the project ID.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "ID of the project that contains the instance.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "sql/v1beta4/projects/{project}/instances/{instance}/reencrypt", + "request": { + "$ref": "InstancesReencryptRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, "resetSslConfig": { "description": "Deletes all client certificates and generates a new server SSL certificate for the instance.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig", @@ -1317,6 +1352,38 @@ }, "operations": { "methods": { + "cancel": { + "description": "Cancels an instance operation that has been performed on an instance.", + "flatPath": "sql/v1beta4/projects/{project}/operations/{operation}/cancel", + "httpMethod": "POST", + "id": "sql.operations.cancel", + "parameterOrder": [ + "project", + "operation" + ], + "parameters": { + "operation": { + "description": "Instance operation ID.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID of the project that contains the instance.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "sql/v1beta4/projects/{project}/operations/{operation}/cancel", + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, "get": { "description": "Retrieves an instance operation that has been performed on an instance.", "flatPath": "sql/v1beta4/projects/{project}/operations/{operation}", @@ -1428,6 +1495,38 @@ "https://www.googleapis.com/auth/sqlservice.admin" ] }, + "getLatestRecoveryTime": { + "description": "Get Latest Recovery Time for a given instance.", + "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/getLatestRecoveryTime", + "httpMethod": "GET", + "id": "sql.projects.instances.getLatestRecoveryTime", + "parameterOrder": [ + "project", + "instance" + ], + "parameters": { + "instance": { + "description": "Cloud SQL instance ID. This does not include the project ID.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID of the project that contains the instance.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "sql/v1beta4/projects/{project}/instances/{instance}/getLatestRecoveryTime", + "response": { + "$ref": "SqlInstancesGetLatestRecoveryTimeResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, "performDiskShrink": { "description": "Perform Disk Shrink on primary instance.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/performDiskShrink", @@ -2023,7 +2122,7 @@ } } }, - "revision": "20230309", + "revision": "20230627", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -2050,6 +2149,18 @@ }, "type": "object" }, + "AdvancedMachineFeatures": { + "description": "Specifies options for controlling advanced machine features.", + "id": "AdvancedMachineFeatures", + "properties": { + "threadsPerCore": { + "description": "The number of threads per physical core.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "ApiWarning": { "description": "An Admin API warning message.", "id": "ApiWarning", @@ -2104,7 +2215,7 @@ "type": "string" }, "pointInTimeRecoveryEnabled": { - "description": "(Postgres only) Whether point in time recovery is enabled.", + "description": "Whether point in time recovery is enabled.", "type": "boolean" }, "replicationLogArchivingEnabled": { @@ -2139,6 +2250,32 @@ }, "type": "object" }, + "BackupReencryptionConfig": { + "description": "Backup Reencryption Config", + "id": "BackupReencryptionConfig", + "properties": { + "backupLimit": { + "description": "Backup re-encryption limit", + "format": "int32", + "type": "integer" + }, + "backupType": { + "description": "Type of backups users want to re-encrypt.", + "enum": [ + "BACKUP_TYPE_UNSPECIFIED", + "AUTOMATED", + "ON_DEMAND" + ], + "enumDescriptions": [ + "Unknown backup type, will be defaulted to AUTOMATIC backup type", + "Reencrypt automatic backups", + "Reencrypt on-demand backups" + ], + "type": "string" + } + }, + "type": "object" + }, "BackupRetentionSettings": { "description": "We currently only support backup retention by specifying the number of backups we will retain.", "id": "BackupRetentionSettings", @@ -2365,6 +2502,10 @@ "description": "Timestamp, if specified, identifies the time to which the source instance is cloned.", "format": "google-datetime", "type": "string" + }, + "preferredZone": { + "description": "Optional. (Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance.", + "type": "string" } }, "type": "object" @@ -2381,6 +2522,12 @@ "SECOND_GEN", "EXTERNAL" ], + "enumDeprecated": [ + false, + true, + false, + false + ], "enumDescriptions": [ "This is an unknown backend type for instance.", "V1 speckle instance.", @@ -2407,6 +2554,7 @@ "POSTGRES_12", "POSTGRES_13", "POSTGRES_14", + "POSTGRES_15", "MYSQL_8_0", "MYSQL_8_0_18", "MYSQL_8_0_26", @@ -2416,10 +2564,57 @@ "MYSQL_8_0_30", "MYSQL_8_0_31", "MYSQL_8_0_32", + "MYSQL_8_0_33", + "MYSQL_8_0_34", + "MYSQL_8_0_35", + "MYSQL_8_0_36", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS", - "SQLSERVER_2019_WEB" + "SQLSERVER_2019_WEB", + "SQLSERVER_2022_STANDARD", + "SQLSERVER_2022_ENTERPRISE", + "SQLSERVER_2022_EXPRESS", + "SQLSERVER_2022_WEB" + ], + "enumDeprecated": [ + false, + true, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false ], "enumDescriptions": [ "This is an unknown database version.", @@ -2437,6 +2632,7 @@ "The database version is PostgreSQL 12.", "The database version is PostgreSQL 13.", "The database version is PostgreSQL 14.", + "The database version is PostgreSQL 15.", "The database version is MySQL 8.", "The database major version is MySQL 8.0 and the minor version is 18.", "The database major version is MySQL 8.0 and the minor version is 26.", @@ -2446,13 +2642,25 @@ "The database major version is MySQL 8.0 and the minor version is 30.", "The database major version is MySQL 8.0 and the minor version is 31.", "The database major version is MySQL 8.0 and the minor version is 32.", + "The database major version is MySQL 8.0 and the minor version is 33.", + "The database major version is MySQL 8.0 and the minor version is 34.", + "The database major version is MySQL 8.0 and the minor version is 35.", + "The database major version is MySQL 8.0 and the minor version is 36.", "The database version is SQL Server 2019 Standard.", "The database version is SQL Server 2019 Enterprise.", "The database version is SQL Server 2019 Express.", - "The database version is SQL Server 2019 Web." + "The database version is SQL Server 2019 Web.", + "The database version is SQL Server 2022 Standard.", + "The database version is SQL Server 2022 Enterprise.", + "The database version is SQL Server 2022 Express.", + "The database version is SQL Server 2022 Web." ], "type": "string" }, + "dnsName": { + "description": "The dns name of the instance.", + "type": "string" + }, "ipAddresses": { "description": "The assigned IP addresses for the instance.", "items": { @@ -2475,6 +2683,17 @@ }, "type": "object" }, + "DataCacheConfig": { + "description": "Data cache configurations.", + "id": "DataCacheConfig", + "properties": { + "dataCacheEnabled": { + "description": "Whether data cache is enabled for the instance.", + "type": "boolean" + } + }, + "type": "object" + }, "Database": { "description": "Represents a SQL database on the Cloud SQL instance.", "id": "Database", @@ -2537,10 +2756,11 @@ "id": "DatabaseInstance", "properties": { "availableMaintenanceVersions": { - "description": "List all maintenance versions applicable on the instance", + "description": "Output only. List all maintenance versions applicable on the instance", "items": { "type": "string" }, + "readOnly": true, "type": "array" }, "backendType": { @@ -2551,6 +2771,12 @@ "SECOND_GEN", "EXTERNAL" ], + "enumDeprecated": [ + false, + true, + false, + false + ], "enumDescriptions": [ "This is an unknown backend type for instance.", "V1 speckle instance.", @@ -2570,6 +2796,7 @@ "type": "string" }, "currentDiskSize": { + "deprecated": true, "description": "The current disk usage of the instance in bytes. This property has been deprecated. Use the \"cloudsql.googleapis.com/database/disk/bytes_used\" metric in Cloud Monitoring API instead. Please see [this announcement](https://groups.google.com/d/msg/google-cloud-sql-announce/I_7-F9EBhT0/BtvFtdFeAgAJ) for details.", "format": "int64", "type": "string" @@ -2597,6 +2824,7 @@ "POSTGRES_12", "POSTGRES_13", "POSTGRES_14", + "POSTGRES_15", "MYSQL_8_0", "MYSQL_8_0_18", "MYSQL_8_0_26", @@ -2606,10 +2834,57 @@ "MYSQL_8_0_30", "MYSQL_8_0_31", "MYSQL_8_0_32", + "MYSQL_8_0_33", + "MYSQL_8_0_34", + "MYSQL_8_0_35", + "MYSQL_8_0_36", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS", - "SQLSERVER_2019_WEB" + "SQLSERVER_2019_WEB", + "SQLSERVER_2022_STANDARD", + "SQLSERVER_2022_ENTERPRISE", + "SQLSERVER_2022_EXPRESS", + "SQLSERVER_2022_WEB" + ], + "enumDeprecated": [ + false, + true, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false ], "enumDescriptions": [ "This is an unknown database version.", @@ -2627,6 +2902,7 @@ "The database version is PostgreSQL 12.", "The database version is PostgreSQL 13.", "The database version is PostgreSQL 14.", + "The database version is PostgreSQL 15.", "The database version is MySQL 8.", "The database major version is MySQL 8.0 and the minor version is 18.", "The database major version is MySQL 8.0 and the minor version is 26.", @@ -2636,10 +2912,18 @@ "The database major version is MySQL 8.0 and the minor version is 30.", "The database major version is MySQL 8.0 and the minor version is 31.", "The database major version is MySQL 8.0 and the minor version is 32.", + "The database major version is MySQL 8.0 and the minor version is 33.", + "The database major version is MySQL 8.0 and the minor version is 34.", + "The database major version is MySQL 8.0 and the minor version is 35.", + "The database major version is MySQL 8.0 and the minor version is 36.", "The database version is SQL Server 2019 Standard.", "The database version is SQL Server 2019 Enterprise.", "The database version is SQL Server 2019 Express.", - "The database version is SQL Server 2019 Web." + "The database version is SQL Server 2019 Web.", + "The database version is SQL Server 2022 Standard.", + "The database version is SQL Server 2022 Enterprise.", + "The database version is SQL Server 2022 Express.", + "The database version is SQL Server 2022 Web." ], "type": "string" }, @@ -2697,6 +2981,7 @@ "type": "array" }, "ipv6Address": { + "deprecated": true, "description": "The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.", "type": "string" }, @@ -2713,6 +2998,7 @@ "type": "string" }, "maxDiskSize": { + "deprecated": true, "description": "The maximum disk size of the instance in bytes.", "format": "int64", "type": "string" @@ -2792,6 +3078,16 @@ "FAILED", "ONLINE_MAINTENANCE" ], + "enumDeprecated": [ + false, + false, + false, + false, + false, + false, + false, + true + ], "enumDescriptions": [ "The state of the instance is unknown.", "The instance is running, or has been stopped by owner.", @@ -2968,6 +3264,12 @@ }, "type": "object" }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", + "id": "Empty", + "properties": {}, + "type": "object" + }, "ExportContext": { "description": "Database instance export context.", "id": "ExportContext", @@ -2975,6 +3277,29 @@ "bakExportOptions": { "description": "Options for exporting BAK files (SQL Server-only)", "properties": { + "bakType": { + "description": "Type of this bak file will be export, FULL or DIFF, SQL Server only", + "enum": [ + "BAK_TYPE_UNSPECIFIED", + "FULL", + "DIFF" + ], + "enumDescriptions": [ + "default type.", + "Full backup.", + "Differential backup." + ], + "type": "string" + }, + "copyOnly": { + "deprecated": true, + "description": "Deprecated: copy_only is deprecated. Use differential_base instead", + "type": "boolean" + }, + "differentialBase": { + "description": "Whether or not the backup can be used as a differential base copy_only backup can not be served as differential base", + "type": "boolean" + }, "stripeCount": { "description": "Option for specifying how many stripes to use for the export. If blank, and the value of the striped field is true, the number of stripes is automatically chosen.", "format": "int32", @@ -3133,6 +3458,7 @@ "POSTGRES_12", "POSTGRES_13", "POSTGRES_14", + "POSTGRES_15", "MYSQL_8_0", "MYSQL_8_0_18", "MYSQL_8_0_26", @@ -3142,10 +3468,57 @@ "MYSQL_8_0_30", "MYSQL_8_0_31", "MYSQL_8_0_32", + "MYSQL_8_0_33", + "MYSQL_8_0_34", + "MYSQL_8_0_35", + "MYSQL_8_0_36", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS", - "SQLSERVER_2019_WEB" + "SQLSERVER_2019_WEB", + "SQLSERVER_2022_STANDARD", + "SQLSERVER_2022_ENTERPRISE", + "SQLSERVER_2022_EXPRESS", + "SQLSERVER_2022_WEB" + ], + "enumDeprecated": [ + false, + true, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false ], "enumDescriptions": [ "This is an unknown database version.", @@ -3163,6 +3536,7 @@ "The database version is PostgreSQL 12.", "The database version is PostgreSQL 13.", "The database version is PostgreSQL 14.", + "The database version is PostgreSQL 15.", "The database version is MySQL 8.", "The database major version is MySQL 8.0 and the minor version is 18.", "The database major version is MySQL 8.0 and the minor version is 26.", @@ -3172,10 +3546,18 @@ "The database major version is MySQL 8.0 and the minor version is 30.", "The database major version is MySQL 8.0 and the minor version is 31.", "The database major version is MySQL 8.0 and the minor version is 32.", + "The database major version is MySQL 8.0 and the minor version is 33.", + "The database major version is MySQL 8.0 and the minor version is 34.", + "The database major version is MySQL 8.0 and the minor version is 35.", + "The database major version is MySQL 8.0 and the minor version is 36.", "The database version is SQL Server 2019 Standard.", "The database version is SQL Server 2019 Enterprise.", "The database version is SQL Server 2019 Express.", - "The database version is SQL Server 2019 Web." + "The database version is SQL Server 2019 Web.", + "The database version is SQL Server 2022 Standard.", + "The database version is SQL Server 2022 Enterprise.", + "The database version is SQL Server 2022 Express.", + "The database version is SQL Server 2022 Web." ], "type": "string" }, @@ -3295,6 +3677,20 @@ "bakImportOptions": { "description": "Import parameters specific to SQL Server .BAK files", "properties": { + "bakType": { + "description": "Type of the bak content, FULL or DIFF.", + "enum": [ + "BAK_TYPE_UNSPECIFIED", + "FULL", + "DIFF" + ], + "enumDescriptions": [ + "default type.", + "Full backup.", + "Differential backup." + ], + "type": "string" + }, "encryptionOptions": { "properties": { "certPath": { @@ -3312,6 +3708,14 @@ }, "type": "object" }, + "noRecovery": { + "description": "Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server.", + "type": "boolean" + }, + "recoveryOnly": { + "description": "Whether or not the backup importing request will just bring database online without downloading Bak content only one of \"no_recovery\" and \"recovery_only\" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server.", + "type": "boolean" + }, "striped": { "description": "Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server.", "type": "boolean" @@ -3540,6 +3944,17 @@ }, "type": "object" }, + "InstancesReencryptRequest": { + "description": "Database Instance reencrypt request.", + "id": "InstancesReencryptRequest", + "properties": { + "backupReencryptionConfig": { + "$ref": "BackupReencryptionConfig", + "description": "Configuration specific to backup re-encryption" + } + }, + "type": "object" + }, "InstancesRestoreBackupRequest": { "description": "Database instance restore backup request.", "id": "InstancesRestoreBackupRequest", @@ -3646,6 +4061,7 @@ "id": "LocationPreference", "properties": { "followGaeApplication": { + "deprecated": true, "description": "The App Engine application to follow, it must be in the same region as the Cloud SQL instance. WARNING: Changing this might restart the instance.", "type": "string" }, @@ -3887,7 +4303,49 @@ "START_EXTERNAL_SYNC", "LOG_CLEANUP", "AUTO_RESTART", - "REENCRYPT" + "REENCRYPT", + "SWITCHOVER" + ], + "enumDeprecated": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + false, + false, + false, + false, + false, + false ], "enumDescriptions": [ "Unknown operation type.", @@ -3927,7 +4385,8 @@ "Starts external sync of a Cloud SQL EM replica to an external primary instance.", "Recovers logs from an instance's old data disk.", "Performs auto-restart of an HA-enabled Cloud SQL database for auto recovery.", - "Re-encrypts CMEK instances with latest key version." + "Re-encrypts CMEK instances with latest key version.", + "Switches over to replica instance from primary." ], "type": "string" }, @@ -4244,6 +4703,12 @@ "NEVER", "ON_DEMAND" ], + "enumDeprecated": [ + false, + false, + false, + true + ], "enumDescriptions": [ "Unknown activation plan.", "The instance is always up and running.", @@ -4256,7 +4721,12 @@ "$ref": "SqlActiveDirectoryConfig", "description": "Active Directory configuration, relevant only for Cloud SQL for SQL Server." }, + "advancedMachineFeatures": { + "$ref": "AdvancedMachineFeatures", + "description": "Specifies advance machine configuration for the instance relevant only for SQL Server." + }, "authorizedGaeApplications": { + "deprecated": true, "description": "The App Engine app IDs that can access this instance. (Deprecated) Applied to First Generation instances only.", "items": { "type": "string" @@ -4300,9 +4770,14 @@ "type": "string" }, "crashSafeReplicationEnabled": { + "deprecated": true, "description": "Configuration specific to read replica instances. Indicates whether database flags for crash-safe replication are enabled. This property was only applicable to First Generation instances.", "type": "boolean" }, + "dataCacheConfig": { + "$ref": "DataCacheConfig", + "description": "Configuration for data cache." + }, "dataDiskSizeGb": { "description": "The size of data disk, in GB. The data disk size minimum is 10GB.", "format": "int64", @@ -4316,6 +4791,12 @@ "PD_HDD", "OBSOLETE_LOCAL_SSD" ], + "enumDeprecated": [ + false, + false, + false, + true + ], "enumDescriptions": [ "This is an unknown data disk type.", "An SSD data disk.", @@ -4346,6 +4827,20 @@ }, "type": "array" }, + "edition": { + "description": "Optional. The edition of the instance.", + "enum": [ + "EDITION_UNSPECIFIED", + "ENTERPRISE", + "ENTERPRISE_PLUS" + ], + "enumDescriptions": [ + "The instance did not specify the edition.", + "The instance is an enterprise edition.", + "The instance is an Enterprise Plus edition." + ], + "type": "string" + }, "insightsConfig": { "$ref": "InsightsConfig", "description": "Insights configuration, for now relevant only for Postgres." @@ -4385,6 +4880,7 @@ "type": "string" }, "replicationType": { + "deprecated": true, "description": "The type of replication this instance uses. This can be either `ASYNCHRONOUS` or `SYNCHRONOUS`. (Deprecated) This property was only applicable to First Generation instances.", "enum": [ "SQL_REPLICATION_TYPE_UNSPECIFIED", @@ -4492,7 +4988,13 @@ "BINLOG_RETENTION_SETTING", "UNSUPPORTED_STORAGE_ENGINE", "LIMITED_SUPPORT_TABLES", - "EXISTING_DATA_IN_REPLICA" + "EXISTING_DATA_IN_REPLICA", + "MISSING_OPTIONAL_PRIVILEGES", + "RISKY_BACKUP_ADMIN_PRIVILEGE", + "INSUFFICIENT_GCS_PERMISSIONS", + "INVALID_FILE_INFO", + "UNSUPPORTED_DATABASE_SETTINGS", + "MYSQL_PARALLEL_IMPORT_INSUFFICIENT_PRIVILEGE" ], "enumDescriptions": [ "", @@ -4500,7 +5002,7 @@ "", "", "", - "", + "The replication user is missing privileges that are required.", "Unsupported migration type.", "No pglogical extension installed on databases, applicable for postgres.", "pglogical node already exists on databases, applicable for postgres.", @@ -4517,13 +5019,19 @@ "SQL Server Agent is not running.", "The table definition is not support due to missing primary key or replica identity, applicable for postgres.", "The customer has a definer that will break EM setup.", - "SQL Server @@SERVERNAME does not match actual host name", + "SQL Server @@SERVERNAME does not match actual host name.", "The primary instance has been setup and will fail the setup.", "The primary instance has unsupported binary log format.", "The primary instance's binary log retention setting.", "The primary instance has tables with unsupported storage engine.", "Source has tables with limited support eg: PostgreSQL tables without primary keys.", - "The replica instance contains existing data." + "The replica instance contains existing data.", + "The replication user is missing privileges that are optional.", + "Additional BACKUP_ADMIN privilege is granted to the replication user which may lock source MySQL 8 instance for DDLs during initial sync.", + "The Cloud Storage bucket is missing necessary permissions.", + "The Cloud Storage bucket has an error in the file or contains invalid file information.", + "The source instance has unsupported database settings for migration.", + "The replication user is missing parallel import specific privileges. (e.g. LOCK TABLES) for MySQL." ], "type": "string" } @@ -4538,6 +5046,10 @@ "description": "This is always `sql#getDiskShrinkConfig`.", "type": "string" }, + "message": { + "description": "Additional message to customers.", + "type": "string" + }, "minimalTargetSizeGb": { "description": "The minimum size to which a disk can be shrunk in GigaBytes.", "format": "int64", @@ -4546,6 +5058,22 @@ }, "type": "object" }, + "SqlInstancesGetLatestRecoveryTimeResponse": { + "description": "Instance get latest recovery time response.", + "id": "SqlInstancesGetLatestRecoveryTimeResponse", + "properties": { + "kind": { + "description": "This is always `sql#getLatestRecoveryTime`.", + "type": "string" + }, + "latestRecoveryTime": { + "description": "Timestamp, identifies the latest recovery time of the source instance.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, "SqlInstancesRescheduleMaintenanceRequestBody": { "description": "Reschedule options for maintenance windows.", "id": "SqlInstancesRescheduleMaintenanceRequestBody", @@ -4587,6 +5115,22 @@ "Offline external sync only dumps and loads a one-time snapshot of the primary instance's data" ], "type": "string" + }, + "syncParallelLevel": { + "description": "Optional. Parallel level for initial data sync. Currently only applicable for MySQL.", + "enum": [ + "EXTERNAL_SYNC_PARALLEL_LEVEL_UNSPECIFIED", + "MIN", + "OPTIMAL", + "MAX" + ], + "enumDescriptions": [ + "Unknown sync parallel level. Will be defaulted to OPTIMAL.", + "Minimal parallel level.", + "Optimal parallel level.", + "Maximum parallel level." + ], + "type": "string" } }, "type": "object" @@ -4679,6 +5223,7 @@ "id": "SqlScheduledMaintenance", "properties": { "canDefer": { + "deprecated": true, "type": "boolean" }, "canReschedule": { @@ -5082,6 +5627,7 @@ "type": "string" }, "nextPageToken": { + "deprecated": true, "description": "Unused.", "type": "string" } diff --git a/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go b/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go index bf53327d9c..b66ec18cda 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "sqladmin:v1beta4" const apiName = "sqladmin" @@ -314,6 +315,36 @@ func (s *AclEntry) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AdvancedMachineFeatures: Specifies options for controlling advanced +// machine features. +type AdvancedMachineFeatures struct { + // ThreadsPerCore: The number of threads per physical core. + ThreadsPerCore int64 `json:"threadsPerCore,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ThreadsPerCore") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ThreadsPerCore") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AdvancedMachineFeatures) MarshalJSON() ([]byte, error) { + type NoMethod AdvancedMachineFeatures + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ApiWarning: An Admin API warning message. type ApiWarning struct { // Code: Code to uniquely identify the warning type. @@ -375,8 +406,8 @@ type BackupConfiguration struct { // Location: Location of the backup Location string `json:"location,omitempty"` - // PointInTimeRecoveryEnabled: (Postgres only) Whether point in time - // recovery is enabled. + // PointInTimeRecoveryEnabled: Whether point in time recovery is + // enabled. PointInTimeRecoveryEnabled bool `json:"pointInTimeRecoveryEnabled,omitempty"` // ReplicationLogArchivingEnabled: Reserved for future use. @@ -446,6 +477,43 @@ func (s *BackupContext) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackupReencryptionConfig: Backup Reencryption Config +type BackupReencryptionConfig struct { + // BackupLimit: Backup re-encryption limit + BackupLimit int64 `json:"backupLimit,omitempty"` + + // BackupType: Type of backups users want to re-encrypt. + // + // Possible values: + // "BACKUP_TYPE_UNSPECIFIED" - Unknown backup type, will be defaulted + // to AUTOMATIC backup type + // "AUTOMATED" - Reencrypt automatic backups + // "ON_DEMAND" - Reencrypt on-demand backups + BackupType string `json:"backupType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackupLimit") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupLimit") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackupReencryptionConfig) MarshalJSON() ([]byte, error) { + type NoMethod BackupReencryptionConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackupRetentionSettings: We currently only support backup retention // by specifying the number of backups we will retain. type BackupRetentionSettings struct { @@ -722,6 +790,11 @@ type CloneContext struct { // the source instance is cloned. PointInTime string `json:"pointInTime,omitempty"` + // PreferredZone: Optional. (Point-in-time recovery for PostgreSQL only) + // Clone to an instance in the specified zone. If no zone is specified, + // clone to the same zone as the source instance. + PreferredZone string `json:"preferredZone,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllocatedIpRange") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -792,6 +865,7 @@ type ConnectSettings struct { // "POSTGRES_12" - The database version is PostgreSQL 12. // "POSTGRES_13" - The database version is PostgreSQL 13. // "POSTGRES_14" - The database version is PostgreSQL 14. + // "POSTGRES_15" - The database version is PostgreSQL 15. // "MYSQL_8_0" - The database version is MySQL 8. // "MYSQL_8_0_18" - The database major version is MySQL 8.0 and the // minor version is 18. @@ -809,6 +883,14 @@ type ConnectSettings struct { // minor version is 31. // "MYSQL_8_0_32" - The database major version is MySQL 8.0 and the // minor version is 32. + // "MYSQL_8_0_33" - The database major version is MySQL 8.0 and the + // minor version is 33. + // "MYSQL_8_0_34" - The database major version is MySQL 8.0 and the + // minor version is 34. + // "MYSQL_8_0_35" - The database major version is MySQL 8.0 and the + // minor version is 35. + // "MYSQL_8_0_36" - The database major version is MySQL 8.0 and the + // minor version is 36. // "SQLSERVER_2019_STANDARD" - The database version is SQL Server 2019 // Standard. // "SQLSERVER_2019_ENTERPRISE" - The database version is SQL Server @@ -816,8 +898,18 @@ type ConnectSettings struct { // "SQLSERVER_2019_EXPRESS" - The database version is SQL Server 2019 // Express. // "SQLSERVER_2019_WEB" - The database version is SQL Server 2019 Web. + // "SQLSERVER_2022_STANDARD" - The database version is SQL Server 2022 + // Standard. + // "SQLSERVER_2022_ENTERPRISE" - The database version is SQL Server + // 2022 Enterprise. + // "SQLSERVER_2022_EXPRESS" - The database version is SQL Server 2022 + // Express. + // "SQLSERVER_2022_WEB" - The database version is SQL Server 2022 Web. DatabaseVersion string `json:"databaseVersion,omitempty"` + // DnsName: The dns name of the instance. + DnsName string `json:"dnsName,omitempty"` + // IpAddresses: The assigned IP addresses for the instance. IpAddresses []*IpMapping `json:"ipAddresses,omitempty"` @@ -858,6 +950,35 @@ func (s *ConnectSettings) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DataCacheConfig: Data cache configurations. +type DataCacheConfig struct { + // DataCacheEnabled: Whether data cache is enabled for the instance. + DataCacheEnabled bool `json:"dataCacheEnabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DataCacheEnabled") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DataCacheEnabled") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DataCacheConfig) MarshalJSON() ([]byte, error) { + type NoMethod DataCacheConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Database: Represents a SQL database on the Cloud SQL instance. type Database struct { // Charset: The Cloud SQL charset value. @@ -957,8 +1078,8 @@ func (s *DatabaseFlags) MarshalJSON() ([]byte, error) { // DatabaseInstance: A Cloud SQL instance resource. type DatabaseInstance struct { - // AvailableMaintenanceVersions: List all maintenance versions - // applicable on the instance + // AvailableMaintenanceVersions: Output only. List all maintenance + // versions applicable on the instance AvailableMaintenanceVersions []string `json:"availableMaintenanceVersions,omitempty"` // BackendType: The backend type. `SECOND_GEN`: Cloud SQL database @@ -1019,6 +1140,7 @@ type DatabaseInstance struct { // "POSTGRES_12" - The database version is PostgreSQL 12. // "POSTGRES_13" - The database version is PostgreSQL 13. // "POSTGRES_14" - The database version is PostgreSQL 14. + // "POSTGRES_15" - The database version is PostgreSQL 15. // "MYSQL_8_0" - The database version is MySQL 8. // "MYSQL_8_0_18" - The database major version is MySQL 8.0 and the // minor version is 18. @@ -1036,6 +1158,14 @@ type DatabaseInstance struct { // minor version is 31. // "MYSQL_8_0_32" - The database major version is MySQL 8.0 and the // minor version is 32. + // "MYSQL_8_0_33" - The database major version is MySQL 8.0 and the + // minor version is 33. + // "MYSQL_8_0_34" - The database major version is MySQL 8.0 and the + // minor version is 34. + // "MYSQL_8_0_35" - The database major version is MySQL 8.0 and the + // minor version is 35. + // "MYSQL_8_0_36" - The database major version is MySQL 8.0 and the + // minor version is 36. // "SQLSERVER_2019_STANDARD" - The database version is SQL Server 2019 // Standard. // "SQLSERVER_2019_ENTERPRISE" - The database version is SQL Server @@ -1043,6 +1173,13 @@ type DatabaseInstance struct { // "SQLSERVER_2019_EXPRESS" - The database version is SQL Server 2019 // Express. // "SQLSERVER_2019_WEB" - The database version is SQL Server 2019 Web. + // "SQLSERVER_2022_STANDARD" - The database version is SQL Server 2022 + // Standard. + // "SQLSERVER_2022_ENTERPRISE" - The database version is SQL Server + // 2022 Enterprise. + // "SQLSERVER_2022_EXPRESS" - The database version is SQL Server 2022 + // Express. + // "SQLSERVER_2022_WEB" - The database version is SQL Server 2022 Web. DatabaseVersion string `json:"databaseVersion,omitempty"` // DiskEncryptionConfiguration: Disk encryption configuration specific @@ -1540,6 +1677,17 @@ func (s *DiskEncryptionStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Empty: A generic empty message that you can re-use to avoid defining +// duplicated empty messages in your APIs. A typical example is to use +// it as the request or the response type of an API method. For +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } +type Empty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + // ExportContext: Database instance export context. type ExportContext struct { // BakExportOptions: Options for exporting BAK files (SQL Server-only) @@ -1613,6 +1761,24 @@ func (s *ExportContext) MarshalJSON() ([]byte, error) { // ExportContextBakExportOptions: Options for exporting BAK files (SQL // Server-only) type ExportContextBakExportOptions struct { + // BakType: Type of this bak file will be export, FULL or DIFF, SQL + // Server only + // + // Possible values: + // "BAK_TYPE_UNSPECIFIED" - default type. + // "FULL" - Full backup. + // "DIFF" - Differential backup. + BakType string `json:"bakType,omitempty"` + + // CopyOnly: Deprecated: copy_only is deprecated. Use differential_base + // instead + CopyOnly bool `json:"copyOnly,omitempty"` + + // DifferentialBase: Whether or not the backup can be used as a + // differential base copy_only backup can not be served as differential + // base + DifferentialBase bool `json:"differentialBase,omitempty"` + // StripeCount: Option for specifying how many stripes to use for the // export. If blank, and the value of the striped field is true, the // number of stripes is automatically chosen. @@ -1621,7 +1787,7 @@ type ExportContextBakExportOptions struct { // Striped: Whether or not the export should be striped. Striped bool `json:"striped,omitempty"` - // ForceSendFields is a list of field names (e.g. "StripeCount") to + // ForceSendFields is a list of field names (e.g. "BakType") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -1629,10 +1795,10 @@ type ExportContextBakExportOptions struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "StripeCount") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "BakType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -1837,6 +2003,7 @@ type Flag struct { // "POSTGRES_12" - The database version is PostgreSQL 12. // "POSTGRES_13" - The database version is PostgreSQL 13. // "POSTGRES_14" - The database version is PostgreSQL 14. + // "POSTGRES_15" - The database version is PostgreSQL 15. // "MYSQL_8_0" - The database version is MySQL 8. // "MYSQL_8_0_18" - The database major version is MySQL 8.0 and the // minor version is 18. @@ -1854,6 +2021,14 @@ type Flag struct { // minor version is 31. // "MYSQL_8_0_32" - The database major version is MySQL 8.0 and the // minor version is 32. + // "MYSQL_8_0_33" - The database major version is MySQL 8.0 and the + // minor version is 33. + // "MYSQL_8_0_34" - The database major version is MySQL 8.0 and the + // minor version is 34. + // "MYSQL_8_0_35" - The database major version is MySQL 8.0 and the + // minor version is 35. + // "MYSQL_8_0_36" - The database major version is MySQL 8.0 and the + // minor version is 36. // "SQLSERVER_2019_STANDARD" - The database version is SQL Server 2019 // Standard. // "SQLSERVER_2019_ENTERPRISE" - The database version is SQL Server @@ -1861,6 +2036,13 @@ type Flag struct { // "SQLSERVER_2019_EXPRESS" - The database version is SQL Server 2019 // Express. // "SQLSERVER_2019_WEB" - The database version is SQL Server 2019 Web. + // "SQLSERVER_2022_STANDARD" - The database version is SQL Server 2022 + // Standard. + // "SQLSERVER_2022_ENTERPRISE" - The database version is SQL Server + // 2022 Enterprise. + // "SQLSERVER_2022_EXPRESS" - The database version is SQL Server 2022 + // Express. + // "SQLSERVER_2022_WEB" - The database version is SQL Server 2022 Web. AppliesTo []string `json:"appliesTo,omitempty"` // InBeta: Whether or not the flag is considered in beta. @@ -2099,27 +2281,44 @@ func (s *ImportContext) MarshalJSON() ([]byte, error) { // ImportContextBakImportOptions: Import parameters specific to SQL // Server .BAK files type ImportContextBakImportOptions struct { + // BakType: Type of the bak content, FULL or DIFF. + // + // Possible values: + // "BAK_TYPE_UNSPECIFIED" - default type. + // "FULL" - Full backup. + // "DIFF" - Differential backup. + BakType string `json:"bakType,omitempty"` + EncryptionOptions *ImportContextBakImportOptionsEncryptionOptions `json:"encryptionOptions,omitempty"` + // NoRecovery: Whether or not the backup importing will restore database + // with NORECOVERY option Applies only to Cloud SQL for SQL Server. + NoRecovery bool `json:"noRecovery,omitempty"` + + // RecoveryOnly: Whether or not the backup importing request will just + // bring database online without downloading Bak content only one of + // "no_recovery" and "recovery_only" can be true otherwise error will + // return. Applies only to Cloud SQL for SQL Server. + RecoveryOnly bool `json:"recoveryOnly,omitempty"` + // Striped: Whether or not the backup set being restored is striped. // Applies only to Cloud SQL for SQL Server. Striped bool `json:"striped,omitempty"` - // ForceSendFields is a list of field names (e.g. "EncryptionOptions") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "BakType") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "EncryptionOptions") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "BakType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -2525,6 +2724,37 @@ func (s *InstancesListServerCasResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstancesReencryptRequest: Database Instance reencrypt request. +type InstancesReencryptRequest struct { + // BackupReencryptionConfig: Configuration specific to backup + // re-encryption + BackupReencryptionConfig *BackupReencryptionConfig `json:"backupReencryptionConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "BackupReencryptionConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupReencryptionConfig") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesReencryptRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesReencryptRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstancesRestoreBackupRequest: Database instance restore backup // request. type InstancesRestoreBackupRequest struct { @@ -3075,6 +3305,7 @@ type Operation struct { // "AUTO_RESTART" - Performs auto-restart of an HA-enabled Cloud SQL // database for auto recovery. // "REENCRYPT" - Re-encrypts CMEK instances with latest key version. + // "SWITCHOVER" - Switches over to replica instance from primary. OperationType string `json:"operationType,omitempty"` // SelfLink: The URI of this resource. @@ -3583,6 +3814,10 @@ type Settings struct { // for Cloud SQL for SQL Server. ActiveDirectoryConfig *SqlActiveDirectoryConfig `json:"activeDirectoryConfig,omitempty"` + // AdvancedMachineFeatures: Specifies advance machine configuration for + // the instance relevant only for SQL Server. + AdvancedMachineFeatures *AdvancedMachineFeatures `json:"advancedMachineFeatures,omitempty"` + // AuthorizedGaeApplications: The App Engine app IDs that can access // this instance. (Deprecated) Applied to First Generation instances // only. @@ -3632,6 +3867,9 @@ type Settings struct { // Generation instances. CrashSafeReplicationEnabled bool `json:"crashSafeReplicationEnabled,omitempty"` + // DataCacheConfig: Configuration for data cache. + DataCacheConfig *DataCacheConfig `json:"dataCacheConfig,omitempty"` + // DataDiskSizeGb: The size of data disk, in GB. The data disk size // minimum is 10GB. DataDiskSizeGb int64 `json:"dataDiskSizeGb,omitempty,string"` @@ -3663,6 +3901,14 @@ type Settings struct { // DenyMaintenancePeriods: Deny maintenance periods DenyMaintenancePeriods []*DenyMaintenancePeriod `json:"denyMaintenancePeriods,omitempty"` + // Edition: Optional. The edition of the instance. + // + // Possible values: + // "EDITION_UNSPECIFIED" - The instance did not specify the edition. + // "ENTERPRISE" - The instance is an enterprise edition. + // "ENTERPRISE_PLUS" - The instance is an Enterprise Plus edition. + Edition string `json:"edition,omitempty"` + // InsightsConfig: Insights configuration, for now relevant only for // Postgres. InsightsConfig *InsightsConfig `json:"insightsConfig,omitempty"` @@ -3821,7 +4067,8 @@ type SqlExternalSyncSettingError struct { // "BINLOG_NOT_ENABLED" // "INCOMPATIBLE_DATABASE_VERSION" // "REPLICA_ALREADY_SETUP" - // "INSUFFICIENT_PRIVILEGE" + // "INSUFFICIENT_PRIVILEGE" - The replication user is missing + // privileges that are required. // "UNSUPPORTED_MIGRATION_TYPE" - Unsupported migration type. // "NO_PGLOGICAL_INSTALLED" - No pglogical extension installed on // databases, applicable for postgres. @@ -3854,7 +4101,7 @@ type SqlExternalSyncSettingError struct { // "UNSUPPORTED_DEFINER" - The customer has a definer that will break // EM setup. // "SQLSERVER_SERVERNAME_MISMATCH" - SQL Server @@SERVERNAME does not - // match actual host name + // match actual host name. // "PRIMARY_ALREADY_SETUP" - The primary instance has been setup and // will fail the setup. // "UNSUPPORTED_BINLOG_FORMAT" - The primary instance has unsupported @@ -3867,6 +4114,20 @@ type SqlExternalSyncSettingError struct { // eg: PostgreSQL tables without primary keys. // "EXISTING_DATA_IN_REPLICA" - The replica instance contains existing // data. + // "MISSING_OPTIONAL_PRIVILEGES" - The replication user is missing + // privileges that are optional. + // "RISKY_BACKUP_ADMIN_PRIVILEGE" - Additional BACKUP_ADMIN privilege + // is granted to the replication user which may lock source MySQL 8 + // instance for DDLs during initial sync. + // "INSUFFICIENT_GCS_PERMISSIONS" - The Cloud Storage bucket is + // missing necessary permissions. + // "INVALID_FILE_INFO" - The Cloud Storage bucket has an error in the + // file or contains invalid file information. + // "UNSUPPORTED_DATABASE_SETTINGS" - The source instance has + // unsupported database settings for migration. + // "MYSQL_PARALLEL_IMPORT_INSUFFICIENT_PRIVILEGE" - The replication + // user is missing parallel import specific privileges. (e.g. LOCK + // TABLES) for MySQL. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Detail") to @@ -3898,6 +4159,9 @@ type SqlInstancesGetDiskShrinkConfigResponse struct { // Kind: This is always `sql#getDiskShrinkConfig`. Kind string `json:"kind,omitempty"` + // Message: Additional message to customers. + Message string `json:"message,omitempty"` + // MinimalTargetSizeGb: The minimum size to which a disk can be shrunk // in GigaBytes. MinimalTargetSizeGb int64 `json:"minimalTargetSizeGb,omitempty,string"` @@ -3929,6 +4193,43 @@ func (s *SqlInstancesGetDiskShrinkConfigResponse) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SqlInstancesGetLatestRecoveryTimeResponse: Instance get latest +// recovery time response. +type SqlInstancesGetLatestRecoveryTimeResponse struct { + // Kind: This is always `sql#getLatestRecoveryTime`. + Kind string `json:"kind,omitempty"` + + // LatestRecoveryTime: Timestamp, identifies the latest recovery time of + // the source instance. + LatestRecoveryTime string `json:"latestRecoveryTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SqlInstancesGetLatestRecoveryTimeResponse) MarshalJSON() ([]byte, error) { + type NoMethod SqlInstancesGetLatestRecoveryTimeResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SqlInstancesRescheduleMaintenanceRequestBody: Reschedule options for // maintenance windows. type SqlInstancesRescheduleMaintenanceRequestBody struct { @@ -3981,6 +4282,17 @@ type SqlInstancesStartExternalSyncRequest struct { // snapshot of the primary instance's data SyncMode string `json:"syncMode,omitempty"` + // SyncParallelLevel: Optional. Parallel level for initial data sync. + // Currently only applicable for MySQL. + // + // Possible values: + // "EXTERNAL_SYNC_PARALLEL_LEVEL_UNSPECIFIED" - Unknown sync parallel + // level. Will be defaulted to OPTIMAL. + // "MIN" - Minimal parallel level. + // "OPTIMAL" - Optimal parallel level. + // "MAX" - Maximum parallel level. + SyncParallelLevel string `json:"syncParallelLevel,omitempty"` + // ForceSendFields is a list of field names (e.g. "MysqlSyncConfig") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -6179,7 +6491,8 @@ type DatabasesInsertCall struct { } // Insert: Inserts a resource containing information about a database -// inside a Cloud SQL instance. +// inside a Cloud SQL instance. **Note:** You can't modify the default +// character set and collation. // // - instance: Database instance ID. This does not include the project // ID. @@ -6284,7 +6597,7 @@ func (c *DatabasesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Inserts a resource containing information about a database inside a Cloud SQL instance.", + // "description": "Inserts a resource containing information about a database inside a Cloud SQL instance. **Note:** You can't modify the default character set and collation.", // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/databases", // "httpMethod": "POST", // "id": "sql.databases.insert", @@ -9008,34 +9321,35 @@ func (c *InstancesPromoteReplicaCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "sql.instances.resetSslConfig": +// method id "sql.instances.reencrypt": -type InstancesResetSslConfigCall struct { - s *Service - project string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesReencryptCall struct { + s *Service + project string + instance string + instancesreencryptrequest *InstancesReencryptRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ResetSslConfig: Deletes all client certificates and generates a new -// server SSL certificate for the instance. +// Reencrypt: Reencrypt CMEK instance with latest key version. // // - instance: Cloud SQL instance ID. This does not include the project // ID. -// - project: Project ID of the project that contains the instance. -func (r *InstancesService) ResetSslConfig(project string, instance string) *InstancesResetSslConfigCall { - c := &InstancesResetSslConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: ID of the project that contains the instance. +func (r *InstancesService) Reencrypt(project string, instance string, instancesreencryptrequest *InstancesReencryptRequest) *InstancesReencryptCall { + c := &InstancesReencryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.instance = instance + c.instancesreencryptrequest = instancesreencryptrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetSslConfigCall) Fields(s ...googleapi.Field) *InstancesResetSslConfigCall { +func (c *InstancesReencryptCall) Fields(s ...googleapi.Field) *InstancesReencryptCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9043,21 +9357,21 @@ func (c *InstancesResetSslConfigCall) Fields(s ...googleapi.Field) *InstancesRes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetSslConfigCall) Context(ctx context.Context) *InstancesResetSslConfigCall { +func (c *InstancesReencryptCall) Context(ctx context.Context) *InstancesReencryptCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResetSslConfigCall) Header() http.Header { +func (c *InstancesReencryptCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResetSslConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesReencryptCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9065,9 +9379,14 @@ func (c *InstancesResetSslConfigCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesreencryptrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/reencrypt") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -9081,14 +9400,14 @@ func (c *InstancesResetSslConfigCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "sql.instances.resetSslConfig" call. +// Do executes the "sql.instances.reencrypt" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesResetSslConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesReencryptCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9119,10 +9438,10 @@ func (c *InstancesResetSslConfigCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes all client certificates and generates a new server SSL certificate for the instance.", - // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig", + // "description": "Reencrypt CMEK instance with latest key version.", + // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/reencrypt", // "httpMethod": "POST", - // "id": "sql.instances.resetSslConfig", + // "id": "sql.instances.reencrypt", // "parameterOrder": [ // "project", // "instance" @@ -9135,13 +9454,16 @@ func (c *InstancesResetSslConfigCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "project": { - // "description": "Project ID of the project that contains the instance.", + // "description": "ID of the project that contains the instance.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig", + // "path": "sql/v1beta4/projects/{project}/instances/{instance}/reencrypt", + // "request": { + // "$ref": "InstancesReencryptRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -9153,9 +9475,9 @@ func (c *InstancesResetSslConfigCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "sql.instances.restart": +// method id "sql.instances.resetSslConfig": -type InstancesRestartCall struct { +type InstancesResetSslConfigCall struct { s *Service project string instance string @@ -9164,14 +9486,14 @@ type InstancesRestartCall struct { header_ http.Header } -// Restart: Restarts a Cloud SQL instance. +// ResetSslConfig: Deletes all client certificates and generates a new +// server SSL certificate for the instance. // // - instance: Cloud SQL instance ID. This does not include the project // ID. -// - project: Project ID of the project that contains the instance to be -// restarted. -func (r *InstancesService) Restart(project string, instance string) *InstancesRestartCall { - c := &InstancesRestartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID of the project that contains the instance. +func (r *InstancesService) ResetSslConfig(project string, instance string) *InstancesResetSslConfigCall { + c := &InstancesResetSslConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.instance = instance return c @@ -9180,7 +9502,7 @@ func (r *InstancesService) Restart(project string, instance string) *InstancesRe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesRestartCall) Fields(s ...googleapi.Field) *InstancesRestartCall { +func (c *InstancesResetSslConfigCall) Fields(s ...googleapi.Field) *InstancesResetSslConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9188,21 +9510,21 @@ func (c *InstancesRestartCall) Fields(s ...googleapi.Field) *InstancesRestartCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesRestartCall) Context(ctx context.Context) *InstancesRestartCall { +func (c *InstancesResetSslConfigCall) Context(ctx context.Context) *InstancesResetSslConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesRestartCall) Header() http.Header { +func (c *InstancesResetSslConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesRestartCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResetSslConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9212,7 +9534,7 @@ func (c *InstancesRestartCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/restart") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -9226,14 +9548,14 @@ func (c *InstancesRestartCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "sql.instances.restart" call. +// Do executes the "sql.instances.resetSslConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesRestartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesResetSslConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9264,10 +9586,10 @@ func (c *InstancesRestartCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Restarts a Cloud SQL instance.", - // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/restart", + // "description": "Deletes all client certificates and generates a new server SSL certificate for the instance.", + // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig", // "httpMethod": "POST", - // "id": "sql.instances.restart", + // "id": "sql.instances.resetSslConfig", // "parameterOrder": [ // "project", // "instance" @@ -9280,13 +9602,13 @@ func (c *InstancesRestartCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // }, // "project": { - // "description": "Project ID of the project that contains the instance to be restarted.", + // "description": "Project ID of the project that contains the instance.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "sql/v1beta4/projects/{project}/instances/{instance}/restart", + // "path": "sql/v1beta4/projects/{project}/instances/{instance}/resetSslConfig", // "response": { // "$ref": "Operation" // }, @@ -9298,16 +9620,161 @@ func (c *InstancesRestartCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "sql.instances.restoreBackup": +// method id "sql.instances.restart": -type InstancesRestoreBackupCall struct { - s *Service - project string - instance string - instancesrestorebackuprequest *InstancesRestoreBackupRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesRestartCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restart: Restarts a Cloud SQL instance. +// +// - instance: Cloud SQL instance ID. This does not include the project +// ID. +// - project: Project ID of the project that contains the instance to be +// restarted. +func (r *InstancesService) Restart(project string, instance string) *InstancesRestartCall { + c := &InstancesRestartCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesRestartCall) Fields(s ...googleapi.Field) *InstancesRestartCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesRestartCall) Context(ctx context.Context) *InstancesRestartCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesRestartCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesRestartCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/restart") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sql.instances.restart" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesRestartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Restarts a Cloud SQL instance.", + // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/restart", + // "httpMethod": "POST", + // "id": "sql.instances.restart", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance to be restarted.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "sql/v1beta4/projects/{project}/instances/{instance}/restart", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + +// method id "sql.instances.restoreBackup": + +type InstancesRestoreBackupCall struct { + s *Service + project string + instance string + instancesrestorebackuprequest *InstancesRestoreBackupRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } // RestoreBackup: Restores a backup of a Cloud SQL instance. Using this @@ -10205,6 +10672,150 @@ func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } +// method id "sql.operations.cancel": + +type OperationsCancelCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Cancels an instance operation that has been performed on an +// instance. +// +// - operation: Instance operation ID. +// - project: Project ID of the project that contains the instance. +func (r *OperationsService) Cancel(project string, operation string) *OperationsCancelCall { + c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OperationsCancelCall) Fields(s ...googleapi.Field) *OperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OperationsCancelCall) Context(ctx context.Context) *OperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/operations/{operation}/cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sql.operations.cancel" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Cancels an instance operation that has been performed on an instance.", + // "flatPath": "sql/v1beta4/projects/{project}/operations/{operation}/cancel", + // "httpMethod": "POST", + // "id": "sql.operations.cancel", + // "parameterOrder": [ + // "project", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Instance operation ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "sql/v1beta4/projects/{project}/operations/{operation}/cancel", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + // method id "sql.operations.get": type OperationsGetCall struct { @@ -10729,6 +11340,166 @@ func (c *ProjectsInstancesGetDiskShrinkConfigCall) Do(opts ...googleapi.CallOpti } +// method id "sql.projects.instances.getLatestRecoveryTime": + +type ProjectsInstancesGetLatestRecoveryTimeCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetLatestRecoveryTime: Get Latest Recovery Time for a given instance. +// +// - instance: Cloud SQL instance ID. This does not include the project +// ID. +// - project: Project ID of the project that contains the instance. +func (r *ProjectsInstancesService) GetLatestRecoveryTime(project string, instance string) *ProjectsInstancesGetLatestRecoveryTimeCall { + c := &ProjectsInstancesGetLatestRecoveryTimeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInstancesGetLatestRecoveryTimeCall) Fields(s ...googleapi.Field) *ProjectsInstancesGetLatestRecoveryTimeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInstancesGetLatestRecoveryTimeCall) IfNoneMatch(entityTag string) *ProjectsInstancesGetLatestRecoveryTimeCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInstancesGetLatestRecoveryTimeCall) Context(ctx context.Context) *ProjectsInstancesGetLatestRecoveryTimeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInstancesGetLatestRecoveryTimeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInstancesGetLatestRecoveryTimeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/getLatestRecoveryTime") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sql.projects.instances.getLatestRecoveryTime" call. +// Exactly one of *SqlInstancesGetLatestRecoveryTimeResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SqlInstancesGetLatestRecoveryTimeResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsInstancesGetLatestRecoveryTimeCall) Do(opts ...googleapi.CallOption) (*SqlInstancesGetLatestRecoveryTimeResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SqlInstancesGetLatestRecoveryTimeResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get Latest Recovery Time for a given instance.", + // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/getLatestRecoveryTime", + // "httpMethod": "GET", + // "id": "sql.projects.instances.getLatestRecoveryTime", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "sql/v1beta4/projects/{project}/instances/{instance}/getLatestRecoveryTime", + // "response": { + // "$ref": "SqlInstancesGetLatestRecoveryTimeResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + // method id "sql.projects.instances.performDiskShrink": type ProjectsInstancesPerformDiskShrinkCall struct { diff --git a/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-api.json b/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-api.json index edebc73ad4..b0649447b7 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"34333739363230323936363635393736363430\"", + "etag": "\"383236363234373537353532383237373333\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -1311,7 +1311,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1357,7 +1357,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1399,7 +1399,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1444,7 +1444,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1493,7 +1493,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1545,7 +1545,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1588,7 +1588,7 @@ "type": "string" }, "destinationObject": { - "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1662,7 +1662,7 @@ ], "parameters": { "destinationBucket": { - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1773,7 +1773,7 @@ "type": "string" }, "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1843,7 +1843,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1907,7 +1907,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1967,7 +1967,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2054,7 +2054,7 @@ "type": "string" }, "name": { - "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "query", "type": "string" }, @@ -2252,7 +2252,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2332,7 +2332,7 @@ "type": "string" }, "destinationObject": { - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2443,7 +2443,7 @@ "type": "string" }, "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2489,7 +2489,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2536,7 +2536,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2612,7 +2612,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -3010,7 +3010,7 @@ } } }, - "revision": "20230301", + "revision": "20230617", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { diff --git a/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-gen.go b/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-gen.go index 2f9ceefdcd..9f04327d92 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -78,6 +78,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "storage:v1" const apiName = "storage" @@ -7259,7 +7260,8 @@ type ObjectAccessControlsDeleteCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7374,7 +7376,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7415,7 +7417,8 @@ type ObjectAccessControlsGetCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7568,7 +7571,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7607,7 +7610,8 @@ type ObjectAccessControlsInsertCall struct { // // - bucket: Name of a bucket. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7744,7 +7748,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7786,7 +7790,8 @@ type ObjectAccessControlsListCall struct { // // - bucket: Name of a bucket. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7930,7 +7935,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -7973,7 +7978,8 @@ type ObjectAccessControlsPatchCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8119,7 +8125,7 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8165,7 +8171,8 @@ type ObjectAccessControlsUpdateCall struct { // user-emailAddress, group-groupId, group-emailAddress, allUsers, or // allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8311,7 +8318,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8356,7 +8363,7 @@ type ObjectsComposeCall struct { // objects. The destination object is stored in this bucket. // - destinationObject: Name of the new object. For information about // how to URL encode object names to be path safe, see Encoding URI -// Path Parts. +// Path Parts (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket @@ -8539,7 +8546,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -8624,7 +8631,8 @@ type ObjectsCopyCall struct { // - destinationBucket: Name of the bucket in which to store the new // object. Overrides the provided object metadata's bucket value, if // any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts. +// safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). // - destinationObject: Name of the new object. Required when the object // metadata is not otherwise provided. Overrides the object metadata's // name value, if any. @@ -8632,7 +8640,7 @@ type ObjectsCopyCall struct { // object. // - sourceObject: Name of the source object. For information about how // to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// Parts (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -8893,7 +8901,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // ], // "parameters": { // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9004,7 +9012,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9048,7 +9056,8 @@ type ObjectsDeleteCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9214,7 +9223,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9251,7 +9260,8 @@ type ObjectsGetCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9483,7 +9493,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9540,7 +9550,8 @@ type ObjectsGetIamPolicyCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9684,7 +9695,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -9796,7 +9807,8 @@ func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { // Name sets the optional parameter "name": Name of the object. Required // when the object metadata is not otherwise provided. Overrides the // object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts. +// URL encode object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { c.urlParams_.Set("name", name) return c @@ -10106,7 +10118,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "query", // "type": "string" // }, @@ -10516,7 +10528,8 @@ type ObjectsPatchCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10755,7 +10768,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -10838,12 +10851,13 @@ type ObjectsRewriteCall struct { // - destinationObject: Name of the new object. Required when the object // metadata is not otherwise provided. Overrides the object metadata's // name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. +// names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). // - sourceBucket: Name of the bucket in which to find the source // object. // - sourceObject: Name of the source object. For information about how // to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// Parts (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -11139,7 +11153,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // }, // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11250,7 +11264,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // }, // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11293,7 +11307,8 @@ type ObjectsSetIamPolicyCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11430,7 +11445,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11474,7 +11489,8 @@ type ObjectsTestIamPermissionsCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). // - permissions: Permissions to test. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -11621,7 +11637,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" @@ -11670,7 +11686,8 @@ type ObjectsUpdateCall struct { // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// object names to be path safe, see Encoding URI Path Parts +// (http://cloud/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11909,7 +11926,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](http://cloud/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" diff --git a/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json b/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json index e2fc024b39..622338170b 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json +++ b/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json @@ -632,7 +632,7 @@ } } }, - "revision": "20230307", + "revision": "20230516", "rootUrl": "https://storagetransfer.googleapis.com/", "schemas": { "AgentPool": { @@ -725,6 +725,10 @@ "description": "Required. S3 Bucket name (see [Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)).", "type": "string" }, + "credentialsSecret": { + "description": "Optional. The Resource name of a secret in Secret Manager. The Azure SAS token must be stored in Secret Manager in JSON format: { \"sas_token\" : \"SAS_TOKEN\" } GoogleServiceAccount must be granted `roles/secretmanager.secretAccessor` for the resource. See [Configure access to a source: Microsoft Azure Blob Storage] (https://cloud.google.com/storage-transfer/docs/source-microsoft-azure#secret_manager) for more information. If `credentials_secret` is specified, do not specify azure_credentials. This feature is in [preview](https://cloud.google.com/terms/service-terms#1). Format: `projects/{project_number}/secrets/{secret_name}`", + "type": "string" + }, "path": { "description": "Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'.", "type": "string" @@ -748,6 +752,10 @@ "description": "Required. The container to transfer from the Azure Storage account.", "type": "string" }, + "credentialsSecret": { + "description": "Optional. The Resource name of a secret in Secret Manager. The Azure SAS token must be stored in Secret Manager in JSON format: { \"sas_token\" : \"SAS_TOKEN\" } GoogleServiceAccount must be granted `roles/secretmanager.secretAccessor` for the resource. See [Configure access to a source: Microsoft Azure Blob Storage] (https://cloud.google.com/storage-transfer/docs/source-microsoft-azure#secret_manager) for more information. If `credentials_secret` is specified, do not specify azure_credentials. This feature is in [preview](https://cloud.google.com/terms/service-terms#1). Format: `projects/{project_number}/secrets/{secret_name}`", + "type": "string" + }, "path": { "description": "Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'.", "type": "string" @@ -1132,7 +1140,7 @@ "enumDescriptions": [ "Storage class behavior is unspecified.", "Use the destination bucket's default storage class.", - "Preserve the object's original storage class. This is only supported for transfers from Google Cloud Storage buckets.", + "Preserve the object's original storage class. This is only supported for transfers from Google Cloud Storage buckets. REGIONAL and MULTI_REGIONAL storage classes will be mapped to STANDARD to ensure they can be written to the destination bucket.", "Set the storage class to STANDARD.", "Set the storage class to NEARLINE.", "Set the storage class to COLDLINE.", @@ -1839,7 +1847,7 @@ }, "gcsIntermediateDataLocation": { "$ref": "GcsData", - "description": "Cloud Storage intermediate data location." + "description": "For transfers between file systems, specifies a Cloud Storage bucket to be used as an intermediate location through which to transfer data. See [Transfer data between file systems](https://cloud.google.com/storage-transfer/docs/file-to-file) for more information." }, "httpDataSource": { "$ref": "HttpData", diff --git a/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go b/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go index c170440717..8554a03253 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go +++ b/terraform/providers/google/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-gen.go @@ -71,6 +71,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "storagetransfer:v1" const apiName = "storagetransfer" @@ -344,6 +345,18 @@ type AwsS3Data struct { // (https://docs.aws.amazon.com/AmazonS3/latest/dev/create-bucket-get-location-example.html)). BucketName string `json:"bucketName,omitempty"` + // CredentialsSecret: Optional. The Resource name of a secret in Secret + // Manager. The Azure SAS token must be stored in Secret Manager in JSON + // format: { "sas_token" : "SAS_TOKEN" } GoogleServiceAccount must be + // granted `roles/secretmanager.secretAccessor` for the resource. See + // [Configure access to a source: Microsoft Azure Blob Storage] + // (https://cloud.google.com/storage-transfer/docs/source-microsoft-azure#secret_manager) + // for more information. If `credentials_secret` is specified, do not + // specify azure_credentials. This feature is in preview + // (https://cloud.google.com/terms/service-terms#1). Format: + // `projects/{project_number}/secrets/{secret_name}` + CredentialsSecret string `json:"credentialsSecret,omitempty"` + // Path: Root path to transfer objects. Must be an empty string or full // path name that ends with a '/'. This field is treated as an object // prefix. As such, it should generally not begin with a '/'. @@ -401,6 +414,18 @@ type AzureBlobStorageData struct { // account. Container string `json:"container,omitempty"` + // CredentialsSecret: Optional. The Resource name of a secret in Secret + // Manager. The Azure SAS token must be stored in Secret Manager in JSON + // format: { "sas_token" : "SAS_TOKEN" } GoogleServiceAccount must be + // granted `roles/secretmanager.secretAccessor` for the resource. See + // [Configure access to a source: Microsoft Azure Blob Storage] + // (https://cloud.google.com/storage-transfer/docs/source-microsoft-azure#secret_manager) + // for more information. If `credentials_secret` is specified, do not + // specify azure_credentials. This feature is in preview + // (https://cloud.google.com/terms/service-terms#1). Format: + // `projects/{project_number}/secrets/{secret_name}` + CredentialsSecret string `json:"credentialsSecret,omitempty"` + // Path: Root path to transfer objects. Must be an empty string or full // path name that ends with a '/'. This field is treated as an object // prefix. As such, it should generally not begin with a '/'. @@ -1130,7 +1155,8 @@ type MetadataOptions struct { // bucket's default storage class. // "STORAGE_CLASS_PRESERVE" - Preserve the object's original storage // class. This is only supported for transfers from Google Cloud Storage - // buckets. + // buckets. REGIONAL and MULTI_REGIONAL storage classes will be mapped + // to STANDARD to ensure they can be written to the destination bucket. // "STORAGE_CLASS_STANDARD" - Set the storage class to STANDARD. // "STORAGE_CLASS_NEARLINE" - Set the storage class to NEARLINE. // "STORAGE_CLASS_COLDLINE" - Set the storage class to COLDLINE. @@ -2156,8 +2182,12 @@ type TransferSpec struct { // GcsDataSource: A Cloud Storage data source. GcsDataSource *GcsData `json:"gcsDataSource,omitempty"` - // GcsIntermediateDataLocation: Cloud Storage intermediate data - // location. + // GcsIntermediateDataLocation: For transfers between file systems, + // specifies a Cloud Storage bucket to be used as an intermediate + // location through which to transfer data. See Transfer data between + // file systems + // (https://cloud.google.com/storage-transfer/docs/file-to-file) for + // more information. GcsIntermediateDataLocation *GcsData `json:"gcsIntermediateDataLocation,omitempty"` // HttpDataSource: An HTTP URL data source. diff --git a/terraform/providers/google/vendor/google.golang.org/api/transport/grpc/dial.go b/terraform/providers/google/vendor/google.golang.org/api/transport/grpc/dial.go index 20c94fa640..e1403e08ee 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/terraform/providers/google/vendor/google.golang.org/api/transport/grpc/dial.go @@ -9,7 +9,6 @@ package grpc import ( "context" - "crypto/tls" "errors" "log" "net" @@ -22,7 +21,6 @@ import ( "google.golang.org/api/internal" "google.golang.org/api/option" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" grpcinsecure "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" @@ -122,18 +120,13 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if o.GRPCConn != nil { return o.GRPCConn, nil } - clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(o) + transportCreds, endpoint, err := internal.GetGRPCTransportConfigAndEndpoint(o) if err != nil { return nil, err } - var transportCreds credentials.TransportCredentials if insecure { transportCreds = grpcinsecure.NewCredentials() - } else { - transportCreds = credentials.NewTLS(&tls.Config{ - GetClientCertificate: clientCertSource, - }) } // Initialize gRPC dial options with transport-level security options. @@ -171,7 +164,7 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C grpcOpts = append(grpcOpts, timeoutDialerOption) } // Check if google-c2p resolver is enabled for DirectPath - if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { + if isDirectPathXdsUsed(o) { // google-c2p resolver target must not have a port number if addr, _, err := net.SplitHostPort(endpoint); err == nil { endpoint = "google-c2p:///" + addr @@ -258,6 +251,19 @@ func isDirectPathEnabled(endpoint string, o *internal.DialSettings) bool { return true } +func isDirectPathXdsUsed(o *internal.DialSettings) bool { + // Method 1: Enable DirectPath xDS by env; + if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { + return true + } + // Method 2: Enable DirectPath xDS by option; + if o.EnableDirectPathXds { + return true + } + return false + +} + func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource, o *internal.DialSettings) bool { if ts == nil { return false diff --git a/terraform/providers/google/vendor/google.golang.org/api/transport/http/dial.go b/terraform/providers/google/vendor/google.golang.org/api/transport/http/dial.go index 403509d08f..eca0c3ba79 100644 --- a/terraform/providers/google/vendor/google.golang.org/api/transport/http/dial.go +++ b/terraform/providers/google/vendor/google.golang.org/api/transport/http/dial.go @@ -33,7 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } - clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(settings) + clientCertSource, dialTLSContext, endpoint, err := internal.GetHTTPTransportConfigAndEndpoint(settings) if err != nil { return nil, "", err } @@ -41,7 +41,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if settings.HTTPClient != nil { return settings.HTTPClient, endpoint, nil } - trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) + + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) if err != nil { return nil, "", err } @@ -152,7 +153,7 @@ var appengineUrlfetchHook func(context.Context) http.RoundTripper // Otherwise, use a default transport, taking most defaults from // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. -func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { if appengineUrlfetchHook != nil { return appengineUrlfetchHook(ctx) } @@ -171,6 +172,10 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt GetClientCertificate: clientCertSource, } } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } configureHTTP2(trans) diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 4c91534d5a..83774fbcbe 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.21.12 // source: google/api/client.proto package annotations @@ -53,6 +53,12 @@ const ( ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 // Street View Org. ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 + // Shopping Org. + ClientLibraryOrganization_SHOPPING ClientLibraryOrganization = 5 + // Geo Org. + ClientLibraryOrganization_GEO ClientLibraryOrganization = 6 + // Generative AI - https://developers.generativeai.google + ClientLibraryOrganization_GENERATIVE_AI ClientLibraryOrganization = 7 ) // Enum value maps for ClientLibraryOrganization. @@ -63,13 +69,19 @@ var ( 2: "ADS", 3: "PHOTOS", 4: "STREET_VIEW", + 5: "SHOPPING", + 6: "GEO", + 7: "GENERATIVE_AI", } ClientLibraryOrganization_value = map[string]int32{ "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, - "CLOUD": 1, - "ADS": 2, - "PHOTOS": 3, - "STREET_VIEW": 4, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + "SHOPPING": 5, + "GEO": 6, + "GENERATIVE_AI": 7, } ) @@ -223,7 +235,9 @@ type ClientLibrarySettings struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Version of the API to apply these settings to. + // Version of the API to apply these settings to. This is the full protobuf + // package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Launch stage of this version of the API. LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` @@ -368,7 +382,7 @@ type Publishing struct { // A list of API method settings, e.g. the behavior for methods that use the // long-running operation pattern. MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` - // Link to a place that API users can report issues. Example: + // Link to a *public* URI where users can report issues. Example: // https://issuetracker.google.com/issues/new?component=190865&template=1161103 NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` // Link to product home page. Example: @@ -392,6 +406,9 @@ type Publishing struct { // times in this list, then the last one wins. Settings from earlier // settings with the same version string are discarded. LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` + // Optional link to proto reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rpc + ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"` } func (x *Publishing) Reset() { @@ -489,6 +506,13 @@ func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { return nil } +func (x *Publishing) GetProtoReferenceDocumentationUri() string { + if x != nil { + return x.ProtoReferenceDocumentationUri + } + return "" +} + // Settings for Java client libraries. type JavaSettings struct { state protoimpl.MessageState @@ -783,6 +807,31 @@ type DotnetSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map from original service names to renamed versions. + // This is used when the default generated types + // would cause a naming conflict. (Neither name is + // fully-qualified.) + // Example: Subscriber to SubscriberServiceApi. + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Map from full resource types to the effective short name + // for the resource. This is used when otherwise resource + // named from different services would cause naming collisions. + // Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + RenamedResources map[string]string `protobuf:"bytes,3,rep,name=renamed_resources,json=renamedResources,proto3" json:"renamed_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // List of full resource types to ignore during generation. + // This is typically used for API-specific Location resources, + // which should be handled by the generator as if they were actually + // the common Location resources. + // Example entry: "documentai.googleapis.com/Location" + IgnoredResources []string `protobuf:"bytes,4,rep,name=ignored_resources,json=ignoredResources,proto3" json:"ignored_resources,omitempty"` + // Namespaces which must be aliased in snippets due to + // a known (but non-generator-predictable) naming collision + ForcedNamespaceAliases []string `protobuf:"bytes,5,rep,name=forced_namespace_aliases,json=forcedNamespaceAliases,proto3" json:"forced_namespace_aliases,omitempty"` + // Method signatures (in the form "service.method(signature)") + // which are provided separately, so shouldn't be generated. + // Snippets *calling* these methods are still generated, however. + HandwrittenSignatures []string `protobuf:"bytes,6,rep,name=handwritten_signatures,json=handwrittenSignatures,proto3" json:"handwritten_signatures,omitempty"` } func (x *DotnetSettings) Reset() { @@ -824,6 +873,41 @@ func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *DotnetSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + +func (x *DotnetSettings) GetRenamedResources() map[string]string { + if x != nil { + return x.RenamedResources + } + return nil +} + +func (x *DotnetSettings) GetIgnoredResources() []string { + if x != nil { + return x.IgnoredResources + } + return nil +} + +func (x *DotnetSettings) GetForcedNamespaceAliases() []string { + if x != nil { + return x.ForcedNamespaceAliases + } + return nil +} + +func (x *DotnetSettings) GetHandwrittenSignatures() []string { + if x != nil { + return x.HandwrittenSignatures + } + return nil +} + // Settings for Ruby client libraries. type RubySettings struct { state protoimpl.MessageState @@ -938,8 +1022,8 @@ type MethodSettings struct { // Example of a YAML configuration:: // // publishing: - // method_behavior: - // - selector: CreateAdDomain + // method_settings: + // - selector: google.cloud.speech.v2.Speech.BatchRecognize // long_running: // initial_poll_delay: // seconds: 60 # 1 minute @@ -1025,7 +1109,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[13] + mi := &file_google_api_client_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1038,7 +1122,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[13] + mi := &file_google_api_client_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1252,7 +1336,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xe0, 0x03, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, + 0x6e, 0x67, 0x73, 0x22, 0xab, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, @@ -1282,118 +1366,155 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, - 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, - 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, - 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, + 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, + 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, - 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, - 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, + 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, - 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, - 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, - 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, - 0x67, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, - 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, - 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, - 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, - 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, - 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, - 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, + 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, + 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, + 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, + 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, + 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, + 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, - 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, - 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, - 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, - 0x57, 0x10, 0x04, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, - 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, - 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, - 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, - 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, - 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, - 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, + 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, + 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, + 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, + 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, + 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, + 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, + 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, + 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, + 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, + 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, + 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, + 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, + 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1409,7 +1530,7 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_google_api_client_proto_goTypes = []interface{}{ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination @@ -1426,15 +1547,17 @@ var file_google_api_client_proto_goTypes = []interface{}{ (*GoSettings)(nil), // 12: google.api.GoSettings (*MethodSettings)(nil), // 13: google.api.MethodSettings nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - (*MethodSettings_LongRunning)(nil), // 15: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 16: google.api.LaunchStage - (*durationpb.Duration)(nil), // 17: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 18: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 19: google.protobuf.ServiceOptions + nil, // 15: google.api.DotnetSettings.RenamedServicesEntry + nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 18: google.api.LaunchStage + (*durationpb.Duration)(nil), // 19: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1453,20 +1576,22 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 29, // [29:29] is the sub-list for method output_type - 29, // [29:29] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 26, // [26:29] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name + 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 28, // [28:31] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1619,7 +1744,7 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1638,7 +1763,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 14, + NumMessages: 16, NumExtensions: 3, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 164e0df0bf..dbe2e2d0c6 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.12.2 +// protoc-gen-go v1.26.0 +// protoc v3.21.9 // source: google/api/field_behavior.proto package annotations @@ -149,13 +149,13 @@ var ( // // Examples: // - // string name = 1 [(google.api.field_behavior) = REQUIRED]; - // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // google.protobuf.Duration ttl = 1 - // [(google.api.field_behavior) = INPUT_ONLY]; - // google.protobuf.Timestamp expire_time = 1 - // [(google.api.field_behavior) = OUTPUT_ONLY, - // (google.api.field_behavior) = IMMUTABLE]; + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; // // repeated google.api.FieldBehavior field_behavior = 1052; E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 6f11b7c500..8a0e1c345b 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/http.proto package annotations @@ -270,15 +270,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: // - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP // request body. // - All other fields are passed via the URL query parameters, and the // parameter name is the field path in the request message. A repeated // field can be represented as multiple query parameters under the same // name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields // are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all // fields are passed via URL path and URL query parameters. // // ### Path template syntax @@ -377,13 +380,15 @@ type HttpRule struct { // Selects a method to which this rule applies. // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Determines the URL pattern is matched by this rules. This pattern can be // used with any of the {get|put|post|delete|patch} methods. A custom method // can be defined using the 'custom' field. // // Types that are assignable to Pattern: + // // *HttpRule_Get // *HttpRule_Put // *HttpRule_Post diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 13ea54b294..bbcc12d29c 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/resource.proto package annotations @@ -218,14 +218,14 @@ type ResourceDescriptor struct { // The path pattern must follow the syntax, which aligns with HTTP binding // syntax: // - // Template = Segment { "/" Segment } ; - // Segment = LITERAL | Variable ; - // Variable = "{" LITERAL "}" ; + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; // // Examples: // - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" // // The components in braces correspond to the IDs for each resource in the // hierarchy. It is expected that, if multiple patterns are provided, @@ -239,17 +239,17 @@ type ResourceDescriptor struct { // // Example: // - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: - // "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission @@ -362,22 +362,22 @@ type ResourceReference struct { // // Example: // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type: "pubsub.googleapis.com/Topic" - // }]; - // } + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } // // Occasionally, a field may reference an arbitrary resource. In this case, // APIs use the special value * in their resource reference. // // Example: // - // message GetIamPolicyRequest { - // string resource = 2 [(google.api.resource_reference) = { - // type: "*" - // }]; - // } + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // The resource type of a child collection that the annotated field // references. This is useful for annotating the `parent` field that @@ -385,11 +385,11 @@ type ResourceReference struct { // // Example: // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` } diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index 6707a7b1c1..9a9ae04c29 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/routing.proto package annotations @@ -468,46 +468,46 @@ type RoutingParameter struct { // // Example: // - // -- This is a field in the request message - // | that the header value will be extracted from. - // | - // | -- This is the key name in the - // | | routing header. - // V | - // field: "table_name" v - // path_template: "projects/*/{table_location=instances/*}/tables/*" - // ^ ^ - // | | - // In the {} brackets is the pattern that -- | - // specifies what to extract from the | - // field as a value to be sent. | - // | - // The string in the field must match the whole pattern -- - // before brackets, inside brackets, after brackets. + // -- This is a field in the request message + // | that the header value will be extracted from. + // | + // | -- This is the key name in the + // | | routing header. + // V | + // field: "table_name" v + // path_template: "projects/*/{table_location=instances/*}/tables/*" + // ^ ^ + // | | + // In the {} brackets is the pattern that -- | + // specifies what to extract from the | + // field as a value to be sent. | + // | + // The string in the field must match the whole pattern -- + // before brackets, inside brackets, after brackets. // // When looking at this specific example, we can see that: - // - A key-value pair with the key `table_location` - // and the value matching `instances/*` should be added - // to the x-goog-request-params routing header. - // - The value is extracted from the request message's `table_name` field - // if it matches the full pattern specified: - // `projects/*/instances/*/tables/*`. + // - A key-value pair with the key `table_location` + // and the value matching `instances/*` should be added + // to the x-goog-request-params routing header. + // - The value is extracted from the request message's `table_name` field + // if it matches the full pattern specified: + // `projects/*/instances/*/tables/*`. // // **NB:** If the `path_template` field is not provided, the key name is // equal to the field name, and the whole field should be sent as a value. // This makes the pattern for the field and the value functionally equivalent // to `**`, and the configuration // - // { - // field: "table_name" - // } + // { + // field: "table_name" + // } // // is a functionally equivalent shorthand to: // - // { - // field: "table_name" - // path_template: "{table_name=**}" - // } + // { + // field: "table_name" + // path_template: "{table_name=**}" + // } // // See Example 1 for more details. PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"` diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index 7107531377..454948669d 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc v3.21.9 // source: google/api/launch_stage.proto package api diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go new file mode 100644 index 0000000000..1d3f1b5b7e --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the {{.RootMod}} import, won't actually become part of +// the resultant binary. +//go:build modhack +// +build modhack + +package api + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "google.golang.org/genproto/internal" diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go index 4170de70fd..b48798f0ab 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/bigtable/admin/v2/bigtable_table_admin.proto package admin @@ -25,9 +25,9 @@ import ( reflect "reflect" sync "sync" + iampb "cloud.google.com/go/iam/apiv1/iampb" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" _ "google.golang.org/genproto/googleapis/api/annotations" - v1 "google.golang.org/genproto/googleapis/iam/v1" - longrunning "google.golang.org/genproto/googleapis/longrunning" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -813,11 +813,14 @@ type UpdateTableRequest struct { // The table's `name` field is used to identify the table to update. Table *Table `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` // Required. The list of fields to update. - // A mask specifying which fields (e.g. `deletion_protection`) in the `table` + // A mask specifying which fields (e.g. `change_stream_config`) in the `table` // field should be updated. This mask is relative to the `table` field, not to // the request message. The wildcard (*) path is currently not supported. - // Currently UpdateTable is only supported for the following field: - // - `deletion_protection` + // Currently UpdateTable is only supported for the following fields: + // + // * `change_stream_config` + // * `change_stream_config.retention_period` + // * `deletion_protection` // // If `column_families` is set in `update_mask`, it will return an // UNIMPLEMENTED error. @@ -3361,13 +3364,13 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interfa (*Snapshot)(nil), // 43: google.bigtable.admin.v2.Snapshot (*Backup)(nil), // 44: google.bigtable.admin.v2.Backup (*ColumnFamily)(nil), // 45: google.bigtable.admin.v2.ColumnFamily - (*v1.GetIamPolicyRequest)(nil), // 46: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 47: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 48: google.iam.v1.TestIamPermissionsRequest - (*longrunning.Operation)(nil), // 49: google.longrunning.Operation + (*iampb.GetIamPolicyRequest)(nil), // 46: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 47: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 48: google.iam.v1.TestIamPermissionsRequest + (*longrunningpb.Operation)(nil), // 49: google.longrunning.Operation (*emptypb.Empty)(nil), // 50: google.protobuf.Empty - (*v1.Policy)(nil), // 51: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 52: google.iam.v1.TestIamPermissionsResponse + (*iampb.Policy)(nil), // 51: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 52: google.iam.v1.TestIamPermissionsResponse } var file_google_bigtable_admin_v2_bigtable_table_admin_proto_depIdxs = []int32{ 35, // 0: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType @@ -3945,17 +3948,17 @@ type BigtableTableAdminClient interface { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - CreateTableFromSnapshot(ctx context.Context, in *CreateTableFromSnapshotRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + CreateTableFromSnapshot(ctx context.Context, in *CreateTableFromSnapshotRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Lists all tables served from a specified instance. ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) // Gets metadata information about the specified table. GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) // Updates a specified table. - UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Permanently deletes a specified table and all of its data. DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Restores a specified table which was accidentally deleted. - UndeleteTable(ctx context.Context, in *UndeleteTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + UndeleteTable(ctx context.Context, in *UndeleteTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Performs a series of column family modifications on the specified table. // Either all or none of the modifications will occur before this method // returns, but data requests received prior to that point may see a table @@ -3982,7 +3985,7 @@ type BigtableTableAdminClient interface { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - SnapshotTable(ctx context.Context, in *SnapshotTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + SnapshotTable(ctx context.Context, in *SnapshotTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Gets metadata information about the specified snapshot. // // Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -4015,7 +4018,7 @@ type BigtableTableAdminClient interface { // [response][google.longrunning.Operation.response] field type is // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the // creation and delete the backup. - CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Gets metadata on a pending or completed Cloud Bigtable Backup. GetBackup(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) // Updates a pending or completed Cloud Bigtable Backup. @@ -4033,16 +4036,16 @@ type BigtableTableAdminClient interface { // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. - RestoreTable(ctx context.Context, in *RestoreTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + RestoreTable(ctx context.Context, in *RestoreTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. - GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. - SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Returns permissions that the caller has on the specified Table or Backup resource. - TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) + TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) } type bigtableTableAdminClient struct { @@ -4062,8 +4065,8 @@ func (c *bigtableTableAdminClient) CreateTable(ctx context.Context, in *CreateTa return out, nil } -func (c *bigtableTableAdminClient) CreateTableFromSnapshot(ctx context.Context, in *CreateTableFromSnapshotRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableTableAdminClient) CreateTableFromSnapshot(ctx context.Context, in *CreateTableFromSnapshotRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", in, out, opts...) if err != nil { return nil, err @@ -4089,8 +4092,8 @@ func (c *bigtableTableAdminClient) GetTable(ctx context.Context, in *GetTableReq return out, nil } -func (c *bigtableTableAdminClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableTableAdminClient) UpdateTable(ctx context.Context, in *UpdateTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", in, out, opts...) if err != nil { return nil, err @@ -4107,8 +4110,8 @@ func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTa return out, nil } -func (c *bigtableTableAdminClient) UndeleteTable(ctx context.Context, in *UndeleteTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableTableAdminClient) UndeleteTable(ctx context.Context, in *UndeleteTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", in, out, opts...) if err != nil { return nil, err @@ -4152,8 +4155,8 @@ func (c *bigtableTableAdminClient) CheckConsistency(ctx context.Context, in *Che return out, nil } -func (c *bigtableTableAdminClient) SnapshotTable(ctx context.Context, in *SnapshotTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableTableAdminClient) SnapshotTable(ctx context.Context, in *SnapshotTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", in, out, opts...) if err != nil { return nil, err @@ -4188,8 +4191,8 @@ func (c *bigtableTableAdminClient) DeleteSnapshot(ctx context.Context, in *Delet return out, nil } -func (c *bigtableTableAdminClient) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableTableAdminClient) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", in, out, opts...) if err != nil { return nil, err @@ -4233,8 +4236,8 @@ func (c *bigtableTableAdminClient) ListBackups(ctx context.Context, in *ListBack return out, nil } -func (c *bigtableTableAdminClient) RestoreTable(ctx context.Context, in *RestoreTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableTableAdminClient) RestoreTable(ctx context.Context, in *RestoreTableRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", in, out, opts...) if err != nil { return nil, err @@ -4242,8 +4245,8 @@ func (c *bigtableTableAdminClient) RestoreTable(ctx context.Context, in *Restore return out, nil } -func (c *bigtableTableAdminClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) +func (c *bigtableTableAdminClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", in, out, opts...) if err != nil { return nil, err @@ -4251,8 +4254,8 @@ func (c *bigtableTableAdminClient) GetIamPolicy(ctx context.Context, in *v1.GetI return out, nil } -func (c *bigtableTableAdminClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) +func (c *bigtableTableAdminClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", in, out, opts...) if err != nil { return nil, err @@ -4260,8 +4263,8 @@ func (c *bigtableTableAdminClient) SetIamPolicy(ctx context.Context, in *v1.SetI return out, nil } -func (c *bigtableTableAdminClient) TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) { - out := new(v1.TestIamPermissionsResponse) +func (c *bigtableTableAdminClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) { + out := new(iampb.TestIamPermissionsResponse) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", in, out, opts...) if err != nil { return nil, err @@ -4283,17 +4286,17 @@ type BigtableTableAdminServer interface { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - CreateTableFromSnapshot(context.Context, *CreateTableFromSnapshotRequest) (*longrunning.Operation, error) + CreateTableFromSnapshot(context.Context, *CreateTableFromSnapshotRequest) (*longrunningpb.Operation, error) // Lists all tables served from a specified instance. ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) // Gets metadata information about the specified table. GetTable(context.Context, *GetTableRequest) (*Table, error) // Updates a specified table. - UpdateTable(context.Context, *UpdateTableRequest) (*longrunning.Operation, error) + UpdateTable(context.Context, *UpdateTableRequest) (*longrunningpb.Operation, error) // Permanently deletes a specified table and all of its data. DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) // Restores a specified table which was accidentally deleted. - UndeleteTable(context.Context, *UndeleteTableRequest) (*longrunning.Operation, error) + UndeleteTable(context.Context, *UndeleteTableRequest) (*longrunningpb.Operation, error) // Performs a series of column family modifications on the specified table. // Either all or none of the modifications will occur before this method // returns, but data requests received prior to that point may see a table @@ -4320,7 +4323,7 @@ type BigtableTableAdminServer interface { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - SnapshotTable(context.Context, *SnapshotTableRequest) (*longrunning.Operation, error) + SnapshotTable(context.Context, *SnapshotTableRequest) (*longrunningpb.Operation, error) // Gets metadata information about the specified snapshot. // // Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -4353,7 +4356,7 @@ type BigtableTableAdminServer interface { // [response][google.longrunning.Operation.response] field type is // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the // creation and delete the backup. - CreateBackup(context.Context, *CreateBackupRequest) (*longrunning.Operation, error) + CreateBackup(context.Context, *CreateBackupRequest) (*longrunningpb.Operation, error) // Gets metadata on a pending or completed Cloud Bigtable Backup. GetBackup(context.Context, *GetBackupRequest) (*Backup, error) // Updates a pending or completed Cloud Bigtable Backup. @@ -4371,16 +4374,16 @@ type BigtableTableAdminServer interface { // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. - RestoreTable(context.Context, *RestoreTableRequest) (*longrunning.Operation, error) + RestoreTable(context.Context, *RestoreTableRequest) (*longrunningpb.Operation, error) // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. - GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. - SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Returns permissions that the caller has on the specified Table or Backup resource. - TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) } // UnimplementedBigtableTableAdminServer can be embedded to have forward compatible implementations. @@ -4390,7 +4393,7 @@ type UnimplementedBigtableTableAdminServer struct { func (*UnimplementedBigtableTableAdminServer) CreateTable(context.Context, *CreateTableRequest) (*Table, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateTable not implemented") } -func (*UnimplementedBigtableTableAdminServer) CreateTableFromSnapshot(context.Context, *CreateTableFromSnapshotRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableTableAdminServer) CreateTableFromSnapshot(context.Context, *CreateTableFromSnapshotRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateTableFromSnapshot not implemented") } func (*UnimplementedBigtableTableAdminServer) ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error) { @@ -4399,13 +4402,13 @@ func (*UnimplementedBigtableTableAdminServer) ListTables(context.Context, *ListT func (*UnimplementedBigtableTableAdminServer) GetTable(context.Context, *GetTableRequest) (*Table, error) { return nil, status.Errorf(codes.Unimplemented, "method GetTable not implemented") } -func (*UnimplementedBigtableTableAdminServer) UpdateTable(context.Context, *UpdateTableRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableTableAdminServer) UpdateTable(context.Context, *UpdateTableRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateTable not implemented") } func (*UnimplementedBigtableTableAdminServer) DeleteTable(context.Context, *DeleteTableRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteTable not implemented") } -func (*UnimplementedBigtableTableAdminServer) UndeleteTable(context.Context, *UndeleteTableRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableTableAdminServer) UndeleteTable(context.Context, *UndeleteTableRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method UndeleteTable not implemented") } func (*UnimplementedBigtableTableAdminServer) ModifyColumnFamilies(context.Context, *ModifyColumnFamiliesRequest) (*Table, error) { @@ -4420,7 +4423,7 @@ func (*UnimplementedBigtableTableAdminServer) GenerateConsistencyToken(context.C func (*UnimplementedBigtableTableAdminServer) CheckConsistency(context.Context, *CheckConsistencyRequest) (*CheckConsistencyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CheckConsistency not implemented") } -func (*UnimplementedBigtableTableAdminServer) SnapshotTable(context.Context, *SnapshotTableRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableTableAdminServer) SnapshotTable(context.Context, *SnapshotTableRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method SnapshotTable not implemented") } func (*UnimplementedBigtableTableAdminServer) GetSnapshot(context.Context, *GetSnapshotRequest) (*Snapshot, error) { @@ -4432,7 +4435,7 @@ func (*UnimplementedBigtableTableAdminServer) ListSnapshots(context.Context, *Li func (*UnimplementedBigtableTableAdminServer) DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") } -func (*UnimplementedBigtableTableAdminServer) CreateBackup(context.Context, *CreateBackupRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableTableAdminServer) CreateBackup(context.Context, *CreateBackupRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateBackup not implemented") } func (*UnimplementedBigtableTableAdminServer) GetBackup(context.Context, *GetBackupRequest) (*Backup, error) { @@ -4447,16 +4450,16 @@ func (*UnimplementedBigtableTableAdminServer) DeleteBackup(context.Context, *Del func (*UnimplementedBigtableTableAdminServer) ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListBackups not implemented") } -func (*UnimplementedBigtableTableAdminServer) RestoreTable(context.Context, *RestoreTableRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableTableAdminServer) RestoreTable(context.Context, *RestoreTableRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method RestoreTable not implemented") } -func (*UnimplementedBigtableTableAdminServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) { +func (*UnimplementedBigtableTableAdminServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") } -func (*UnimplementedBigtableTableAdminServer) SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) { +func (*UnimplementedBigtableTableAdminServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") } -func (*UnimplementedBigtableTableAdminServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) { +func (*UnimplementedBigtableTableAdminServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") } @@ -4843,7 +4846,7 @@ func _BigtableTableAdmin_RestoreTable_Handler(srv interface{}, ctx context.Conte } func _BigtableTableAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.GetIamPolicyRequest) + in := new(iampb.GetIamPolicyRequest) if err := dec(in); err != nil { return nil, err } @@ -4855,13 +4858,13 @@ func _BigtableTableAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Conte FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BigtableTableAdminServer).GetIamPolicy(ctx, req.(*v1.GetIamPolicyRequest)) + return srv.(BigtableTableAdminServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest)) } return interceptor(ctx, in, info, handler) } func _BigtableTableAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.SetIamPolicyRequest) + in := new(iampb.SetIamPolicyRequest) if err := dec(in); err != nil { return nil, err } @@ -4873,13 +4876,13 @@ func _BigtableTableAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Conte FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BigtableTableAdminServer).SetIamPolicy(ctx, req.(*v1.SetIamPolicyRequest)) + return srv.(BigtableTableAdminServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest)) } return interceptor(ctx, in, info, handler) } func _BigtableTableAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.TestIamPermissionsRequest) + in := new(iampb.TestIamPermissionsRequest) if err := dec(in); err != nil { return nil, err } @@ -4891,7 +4894,7 @@ func _BigtableTableAdmin_TestIamPermissions_Handler(srv interface{}, ctx context FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BigtableTableAdminServer).TestIamPermissions(ctx, req.(*v1.TestIamPermissionsRequest)) + return srv.(BigtableTableAdminServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest)) } return interceptor(ctx, in, info, handler) } diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go index edd18c4f43..78f2ca79f6 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/bigtable/admin/v2/table.proto package admin @@ -136,7 +136,7 @@ func (x Table_TimestampGranularity) Number() protoreflect.EnumNumber { // Deprecated: Use Table_TimestampGranularity.Descriptor instead. func (Table_TimestampGranularity) EnumDescriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{1, 0} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{2, 0} } // Defines a view over a table's fields. @@ -202,7 +202,7 @@ func (x Table_View) Number() protoreflect.EnumNumber { // Deprecated: Use Table_View.Descriptor instead. func (Table_View) EnumDescriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{1, 1} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{2, 1} } // Table replication states. @@ -275,7 +275,7 @@ func (x Table_ClusterState_ReplicationState) Number() protoreflect.EnumNumber { // Deprecated: Use Table_ClusterState_ReplicationState.Descriptor instead. func (Table_ClusterState_ReplicationState) EnumDescriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{1, 0, 0} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{2, 0, 0} } // Possible encryption types for a resource. @@ -336,7 +336,7 @@ func (x EncryptionInfo_EncryptionType) Number() protoreflect.EnumNumber { // Deprecated: Use EncryptionInfo_EncryptionType.Descriptor instead. func (EncryptionInfo_EncryptionType) EnumDescriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{4, 0} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{5, 0} } // Possible states of a snapshot. @@ -391,7 +391,7 @@ func (x Snapshot_State) Number() protoreflect.EnumNumber { // Deprecated: Use Snapshot_State.Descriptor instead. func (Snapshot_State) EnumDescriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{5, 0} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{6, 0} } // Indicates the current state of the backup. @@ -445,7 +445,7 @@ func (x Backup_State) Number() protoreflect.EnumNumber { // Deprecated: Use Backup_State.Descriptor instead. func (Backup_State) EnumDescriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{6, 0} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{7, 0} } // Information about a table restore. @@ -529,6 +529,59 @@ type RestoreInfo_BackupInfo struct { func (*RestoreInfo_BackupInfo) isRestoreInfo_SourceInfo() {} +// Change stream configuration. +type ChangeStreamConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // How long the change stream should be retained. Change stream data older + // than the retention period will not be returned when reading the change + // stream from the table. + // Values must be at least 1 day and at most 7 days, and will be truncated to + // microsecond granularity. + RetentionPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_period,json=retentionPeriod,proto3" json:"retention_period,omitempty"` +} + +func (x *ChangeStreamConfig) Reset() { + *x = ChangeStreamConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChangeStreamConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChangeStreamConfig) ProtoMessage() {} + +func (x *ChangeStreamConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChangeStreamConfig.ProtoReflect.Descriptor instead. +func (*ChangeStreamConfig) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{1} +} + +func (x *ChangeStreamConfig) GetRetentionPeriod() *durationpb.Duration { + if x != nil { + return x.RetentionPeriod + } + return nil +} + // A collection of user data indexed by row, column, and timestamp. // Each table is served using the resources of its parent cluster. type Table struct { @@ -557,6 +610,10 @@ type Table struct { // Output only. If this table was restored from another data source (e.g. a backup), this // field will be populated with information about the restore. RestoreInfo *RestoreInfo `protobuf:"bytes,6,opt,name=restore_info,json=restoreInfo,proto3" json:"restore_info,omitempty"` + // If specified, enable the change stream on this table. + // Otherwise, the change stream is disabled and the change stream is not + // retained. + ChangeStreamConfig *ChangeStreamConfig `protobuf:"bytes,8,opt,name=change_stream_config,json=changeStreamConfig,proto3" json:"change_stream_config,omitempty"` // Set to true to make the table protected against data loss. i.e. deleting // the following resources through Admin APIs are prohibited: // - The table. @@ -570,7 +627,7 @@ type Table struct { func (x *Table) Reset() { *x = Table{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[1] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -583,7 +640,7 @@ func (x *Table) String() string { func (*Table) ProtoMessage() {} func (x *Table) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[1] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -596,7 +653,7 @@ func (x *Table) ProtoReflect() protoreflect.Message { // Deprecated: Use Table.ProtoReflect.Descriptor instead. func (*Table) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{1} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{2} } func (x *Table) GetName() string { @@ -634,6 +691,13 @@ func (x *Table) GetRestoreInfo() *RestoreInfo { return nil } +func (x *Table) GetChangeStreamConfig() *ChangeStreamConfig { + if x != nil { + return x.ChangeStreamConfig + } + return nil +} + func (x *Table) GetDeletionProtection() bool { if x != nil { return x.DeletionProtection @@ -659,7 +723,7 @@ type ColumnFamily struct { func (x *ColumnFamily) Reset() { *x = ColumnFamily{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[2] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -672,7 +736,7 @@ func (x *ColumnFamily) String() string { func (*ColumnFamily) ProtoMessage() {} func (x *ColumnFamily) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[2] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -685,7 +749,7 @@ func (x *ColumnFamily) ProtoReflect() protoreflect.Message { // Deprecated: Use ColumnFamily.ProtoReflect.Descriptor instead. func (*ColumnFamily) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{2} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{3} } func (x *ColumnFamily) GetGcRule() *GcRule { @@ -715,7 +779,7 @@ type GcRule struct { func (x *GcRule) Reset() { *x = GcRule{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[3] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -728,7 +792,7 @@ func (x *GcRule) String() string { func (*GcRule) ProtoMessage() {} func (x *GcRule) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[3] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -741,7 +805,7 @@ func (x *GcRule) ProtoReflect() protoreflect.Message { // Deprecated: Use GcRule.ProtoReflect.Descriptor instead. func (*GcRule) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{3} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{4} } func (m *GcRule) GetRule() isGcRule_Rule { @@ -835,7 +899,7 @@ type EncryptionInfo struct { func (x *EncryptionInfo) Reset() { *x = EncryptionInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[4] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -848,7 +912,7 @@ func (x *EncryptionInfo) String() string { func (*EncryptionInfo) ProtoMessage() {} func (x *EncryptionInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[4] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -861,7 +925,7 @@ func (x *EncryptionInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use EncryptionInfo.ProtoReflect.Descriptor instead. func (*EncryptionInfo) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{4} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{5} } func (x *EncryptionInfo) GetEncryptionType() EncryptionInfo_EncryptionType { @@ -923,7 +987,7 @@ type Snapshot struct { func (x *Snapshot) Reset() { *x = Snapshot{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[5] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -936,7 +1000,7 @@ func (x *Snapshot) String() string { func (*Snapshot) ProtoMessage() {} func (x *Snapshot) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[5] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -949,7 +1013,7 @@ func (x *Snapshot) ProtoReflect() protoreflect.Message { // Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. func (*Snapshot) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{5} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{6} } func (x *Snapshot) GetName() string { @@ -1049,7 +1113,7 @@ type Backup struct { func (x *Backup) Reset() { *x = Backup{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[6] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1062,7 +1126,7 @@ func (x *Backup) String() string { func (*Backup) ProtoMessage() {} func (x *Backup) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[6] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1075,7 +1139,7 @@ func (x *Backup) ProtoReflect() protoreflect.Message { // Deprecated: Use Backup.ProtoReflect.Descriptor instead. func (*Backup) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{6} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{7} } func (x *Backup) GetName() string { @@ -1155,7 +1219,7 @@ type BackupInfo struct { func (x *BackupInfo) Reset() { *x = BackupInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[7] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1168,7 +1232,7 @@ func (x *BackupInfo) String() string { func (*BackupInfo) ProtoMessage() {} func (x *BackupInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[7] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1181,7 +1245,7 @@ func (x *BackupInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupInfo.ProtoReflect.Descriptor instead. func (*BackupInfo) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{7} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{8} } func (x *BackupInfo) GetBackup() string { @@ -1231,7 +1295,7 @@ type Table_ClusterState struct { func (x *Table_ClusterState) Reset() { *x = Table_ClusterState{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[8] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1244,7 +1308,7 @@ func (x *Table_ClusterState) String() string { func (*Table_ClusterState) ProtoMessage() {} func (x *Table_ClusterState) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[8] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1257,7 +1321,7 @@ func (x *Table_ClusterState) ProtoReflect() protoreflect.Message { // Deprecated: Use Table_ClusterState.ProtoReflect.Descriptor instead. func (*Table_ClusterState) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{1, 0} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{2, 0} } func (x *Table_ClusterState) GetReplicationState() Table_ClusterState_ReplicationState { @@ -1287,7 +1351,7 @@ type GcRule_Intersection struct { func (x *GcRule_Intersection) Reset() { *x = GcRule_Intersection{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[11] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1300,7 +1364,7 @@ func (x *GcRule_Intersection) String() string { func (*GcRule_Intersection) ProtoMessage() {} func (x *GcRule_Intersection) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[11] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1313,7 +1377,7 @@ func (x *GcRule_Intersection) ProtoReflect() protoreflect.Message { // Deprecated: Use GcRule_Intersection.ProtoReflect.Descriptor instead. func (*GcRule_Intersection) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{3, 0} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{4, 0} } func (x *GcRule_Intersection) GetRules() []*GcRule { @@ -1336,7 +1400,7 @@ type GcRule_Union struct { func (x *GcRule_Union) Reset() { *x = GcRule_Union{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[12] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1349,7 +1413,7 @@ func (x *GcRule_Union) String() string { func (*GcRule_Union) ProtoMessage() {} func (x *GcRule_Union) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[12] + mi := &file_google_bigtable_admin_v2_table_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1362,7 +1426,7 @@ func (x *GcRule_Union) ProtoReflect() protoreflect.Message { // Deprecated: Use GcRule_Union.ProtoReflect.Descriptor instead. func (*GcRule_Union) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{3, 1} + return file_google_bigtable_admin_v2_table_proto_rawDescGZIP(), []int{4, 1} } func (x *GcRule_Union) GetRules() []*GcRule { @@ -1399,259 +1463,271 @@ var file_google_bigtable_admin_v2_table_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x0d, 0x0a, - 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x9b, 0x0a, 0x0a, - 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x0f, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, - 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x0b, 0x67, 0x72, 0x61, 0x6e, - 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x5a, 0x0a, 0x12, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x44, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfb, 0x0a, 0x0a, 0x05, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, + 0x6c, 0x69, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, + 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x42, + 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, + 0x79, 0x12, 0x4d, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x5e, 0x0a, 0x14, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0xe8, 0x02, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x6f, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, - 0x69, 0x74, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x75, 0x6c, - 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xe8, 0x02, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x6f, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, - 0x8e, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, - 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, - 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x50, - 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, - 0x43, 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, - 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x12, - 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, - 0x41, 0x44, 0x59, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x05, - 0x1a, 0x6e, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, + 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x4c, 0x41, 0x4e, 0x4e, + 0x45, 0x44, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x02, + 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x44, 0x5f, 0x4d, 0x41, + 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x52, + 0x45, 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, + 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x1a, 0x6e, 0x0a, 0x12, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x42, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x69, 0x0a, 0x13, + 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x14, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x25, 0x0a, 0x21, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x5f, 0x47, 0x52, 0x41, + 0x4e, 0x55, 0x4c, 0x41, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, 0x4c, 0x4c, 0x49, 0x53, + 0x10, 0x01, 0x22, 0x71, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, + 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, + 0x0f, 0x0a, 0x0b, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x02, + 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x56, 0x49, 0x45, 0x57, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, 0x46, + 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x3a, 0x5f, 0xea, 0x41, 0x5c, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x36, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x7b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x22, 0x49, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x67, 0x63, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0x69, 0x0a, 0x13, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x67, 0x63, 0x52, 0x75, 0x6c, + 0x65, 0x22, 0x90, 0x03, 0x0a, 0x06, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2a, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x53, + 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, + 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x75, 0x6e, + 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, + 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x3f, 0x0a, 0x05, 0x55, + 0x6e, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x06, 0x0a, 0x04, + 0x72, 0x75, 0x6c, 0x65, 0x22, 0x8a, 0x03, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x65, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x44, + 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, + 0x41, 0x03, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x0d, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x71, + 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, + 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, + 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, + 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, + 0x02, 0x22, 0x9a, 0x04, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x14, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x25, 0x0a, 0x21, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, - 0x5f, 0x47, 0x52, 0x41, 0x4e, 0x55, 0x4c, 0x41, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x49, - 0x4c, 0x4c, 0x49, 0x53, 0x10, 0x01, 0x22, 0x71, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x14, - 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x49, - 0x45, 0x57, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x05, 0x12, - 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x3a, 0x5f, 0xea, 0x41, 0x5c, 0x0a, 0x22, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x36, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x7d, 0x22, 0x49, 0x0a, 0x0c, 0x43, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x67, 0x63, - 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x67, - 0x63, 0x52, 0x75, 0x6c, 0x65, 0x22, 0x90, 0x03, 0x0a, 0x06, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, - 0x12, 0x2a, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0e, 0x6d, 0x61, - 0x78, 0x4e, 0x75, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x07, - 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x64, 0x61, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3b, + 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x05, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x54, + 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, + 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, + 0x02, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x4f, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, + 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x7d, 0x22, 0xf4, + 0x04, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, + 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, - 0x67, 0x65, 0x12, 0x53, 0x0a, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x48, 0x00, - 0x52, 0x05, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, + 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, + 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, + 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x1a, - 0x3f, 0x0a, 0x05, 0x55, 0x6e, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x63, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x42, 0x06, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x22, 0x8a, 0x03, 0x0a, 0x0e, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x65, 0x0a, 0x0f, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x6b, 0x6d, 0x73, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x30, 0xe0, 0x41, 0x03, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x71, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, - 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, - 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x22, 0x9a, 0x04, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, - 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, - 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, - 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x25, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x7d, 0x22, 0xf4, 0x04, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x05, 0xe0, 0x41, 0x02, 0x52, - 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, - 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, - 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, - 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x69, - 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x41, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x56, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, - 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, - 0x10, 0x02, 0x3a, 0x75, 0xea, 0x41, 0x72, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, - 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x7d, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2a, 0x44, 0x0a, 0x11, 0x52, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, - 0x0a, 0x1f, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x01, 0x42, - 0xfc, 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x42, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, - 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, - 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, - 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, - 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, - 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, + 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x75, + 0xea, 0x41, 0x72, 0x0a, 0x23, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x7d, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, + 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2a, 0x44, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x53, 0x54, 0x4f, 0x52, 0x45, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x01, 0x42, 0xfc, 0x02, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0a, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, + 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, + 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, + 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1667,7 +1743,7 @@ func file_google_bigtable_admin_v2_table_proto_rawDescGZIP() []byte { } var file_google_bigtable_admin_v2_table_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_google_bigtable_admin_v2_table_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_google_bigtable_admin_v2_table_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_google_bigtable_admin_v2_table_proto_goTypes = []interface{}{ (RestoreSourceType)(0), // 0: google.bigtable.admin.v2.RestoreSourceType (Table_TimestampGranularity)(0), // 1: google.bigtable.admin.v2.Table.TimestampGranularity @@ -1677,57 +1753,60 @@ var file_google_bigtable_admin_v2_table_proto_goTypes = []interface{}{ (Snapshot_State)(0), // 5: google.bigtable.admin.v2.Snapshot.State (Backup_State)(0), // 6: google.bigtable.admin.v2.Backup.State (*RestoreInfo)(nil), // 7: google.bigtable.admin.v2.RestoreInfo - (*Table)(nil), // 8: google.bigtable.admin.v2.Table - (*ColumnFamily)(nil), // 9: google.bigtable.admin.v2.ColumnFamily - (*GcRule)(nil), // 10: google.bigtable.admin.v2.GcRule - (*EncryptionInfo)(nil), // 11: google.bigtable.admin.v2.EncryptionInfo - (*Snapshot)(nil), // 12: google.bigtable.admin.v2.Snapshot - (*Backup)(nil), // 13: google.bigtable.admin.v2.Backup - (*BackupInfo)(nil), // 14: google.bigtable.admin.v2.BackupInfo - (*Table_ClusterState)(nil), // 15: google.bigtable.admin.v2.Table.ClusterState - nil, // 16: google.bigtable.admin.v2.Table.ClusterStatesEntry - nil, // 17: google.bigtable.admin.v2.Table.ColumnFamiliesEntry - (*GcRule_Intersection)(nil), // 18: google.bigtable.admin.v2.GcRule.Intersection - (*GcRule_Union)(nil), // 19: google.bigtable.admin.v2.GcRule.Union - (*durationpb.Duration)(nil), // 20: google.protobuf.Duration - (*status.Status)(nil), // 21: google.rpc.Status - (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp + (*ChangeStreamConfig)(nil), // 8: google.bigtable.admin.v2.ChangeStreamConfig + (*Table)(nil), // 9: google.bigtable.admin.v2.Table + (*ColumnFamily)(nil), // 10: google.bigtable.admin.v2.ColumnFamily + (*GcRule)(nil), // 11: google.bigtable.admin.v2.GcRule + (*EncryptionInfo)(nil), // 12: google.bigtable.admin.v2.EncryptionInfo + (*Snapshot)(nil), // 13: google.bigtable.admin.v2.Snapshot + (*Backup)(nil), // 14: google.bigtable.admin.v2.Backup + (*BackupInfo)(nil), // 15: google.bigtable.admin.v2.BackupInfo + (*Table_ClusterState)(nil), // 16: google.bigtable.admin.v2.Table.ClusterState + nil, // 17: google.bigtable.admin.v2.Table.ClusterStatesEntry + nil, // 18: google.bigtable.admin.v2.Table.ColumnFamiliesEntry + (*GcRule_Intersection)(nil), // 19: google.bigtable.admin.v2.GcRule.Intersection + (*GcRule_Union)(nil), // 20: google.bigtable.admin.v2.GcRule.Union + (*durationpb.Duration)(nil), // 21: google.protobuf.Duration + (*status.Status)(nil), // 22: google.rpc.Status + (*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp } var file_google_bigtable_admin_v2_table_proto_depIdxs = []int32{ 0, // 0: google.bigtable.admin.v2.RestoreInfo.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType - 14, // 1: google.bigtable.admin.v2.RestoreInfo.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo - 16, // 2: google.bigtable.admin.v2.Table.cluster_states:type_name -> google.bigtable.admin.v2.Table.ClusterStatesEntry - 17, // 3: google.bigtable.admin.v2.Table.column_families:type_name -> google.bigtable.admin.v2.Table.ColumnFamiliesEntry - 1, // 4: google.bigtable.admin.v2.Table.granularity:type_name -> google.bigtable.admin.v2.Table.TimestampGranularity - 7, // 5: google.bigtable.admin.v2.Table.restore_info:type_name -> google.bigtable.admin.v2.RestoreInfo - 10, // 6: google.bigtable.admin.v2.ColumnFamily.gc_rule:type_name -> google.bigtable.admin.v2.GcRule - 20, // 7: google.bigtable.admin.v2.GcRule.max_age:type_name -> google.protobuf.Duration - 18, // 8: google.bigtable.admin.v2.GcRule.intersection:type_name -> google.bigtable.admin.v2.GcRule.Intersection - 19, // 9: google.bigtable.admin.v2.GcRule.union:type_name -> google.bigtable.admin.v2.GcRule.Union - 4, // 10: google.bigtable.admin.v2.EncryptionInfo.encryption_type:type_name -> google.bigtable.admin.v2.EncryptionInfo.EncryptionType - 21, // 11: google.bigtable.admin.v2.EncryptionInfo.encryption_status:type_name -> google.rpc.Status - 8, // 12: google.bigtable.admin.v2.Snapshot.source_table:type_name -> google.bigtable.admin.v2.Table - 22, // 13: google.bigtable.admin.v2.Snapshot.create_time:type_name -> google.protobuf.Timestamp - 22, // 14: google.bigtable.admin.v2.Snapshot.delete_time:type_name -> google.protobuf.Timestamp - 5, // 15: google.bigtable.admin.v2.Snapshot.state:type_name -> google.bigtable.admin.v2.Snapshot.State - 22, // 16: google.bigtable.admin.v2.Backup.expire_time:type_name -> google.protobuf.Timestamp - 22, // 17: google.bigtable.admin.v2.Backup.start_time:type_name -> google.protobuf.Timestamp - 22, // 18: google.bigtable.admin.v2.Backup.end_time:type_name -> google.protobuf.Timestamp - 6, // 19: google.bigtable.admin.v2.Backup.state:type_name -> google.bigtable.admin.v2.Backup.State - 11, // 20: google.bigtable.admin.v2.Backup.encryption_info:type_name -> google.bigtable.admin.v2.EncryptionInfo - 22, // 21: google.bigtable.admin.v2.BackupInfo.start_time:type_name -> google.protobuf.Timestamp - 22, // 22: google.bigtable.admin.v2.BackupInfo.end_time:type_name -> google.protobuf.Timestamp - 3, // 23: google.bigtable.admin.v2.Table.ClusterState.replication_state:type_name -> google.bigtable.admin.v2.Table.ClusterState.ReplicationState - 11, // 24: google.bigtable.admin.v2.Table.ClusterState.encryption_info:type_name -> google.bigtable.admin.v2.EncryptionInfo - 15, // 25: google.bigtable.admin.v2.Table.ClusterStatesEntry.value:type_name -> google.bigtable.admin.v2.Table.ClusterState - 9, // 26: google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value:type_name -> google.bigtable.admin.v2.ColumnFamily - 10, // 27: google.bigtable.admin.v2.GcRule.Intersection.rules:type_name -> google.bigtable.admin.v2.GcRule - 10, // 28: google.bigtable.admin.v2.GcRule.Union.rules:type_name -> google.bigtable.admin.v2.GcRule - 29, // [29:29] is the sub-list for method output_type - 29, // [29:29] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 29, // [29:29] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 15, // 1: google.bigtable.admin.v2.RestoreInfo.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo + 21, // 2: google.bigtable.admin.v2.ChangeStreamConfig.retention_period:type_name -> google.protobuf.Duration + 17, // 3: google.bigtable.admin.v2.Table.cluster_states:type_name -> google.bigtable.admin.v2.Table.ClusterStatesEntry + 18, // 4: google.bigtable.admin.v2.Table.column_families:type_name -> google.bigtable.admin.v2.Table.ColumnFamiliesEntry + 1, // 5: google.bigtable.admin.v2.Table.granularity:type_name -> google.bigtable.admin.v2.Table.TimestampGranularity + 7, // 6: google.bigtable.admin.v2.Table.restore_info:type_name -> google.bigtable.admin.v2.RestoreInfo + 8, // 7: google.bigtable.admin.v2.Table.change_stream_config:type_name -> google.bigtable.admin.v2.ChangeStreamConfig + 11, // 8: google.bigtable.admin.v2.ColumnFamily.gc_rule:type_name -> google.bigtable.admin.v2.GcRule + 21, // 9: google.bigtable.admin.v2.GcRule.max_age:type_name -> google.protobuf.Duration + 19, // 10: google.bigtable.admin.v2.GcRule.intersection:type_name -> google.bigtable.admin.v2.GcRule.Intersection + 20, // 11: google.bigtable.admin.v2.GcRule.union:type_name -> google.bigtable.admin.v2.GcRule.Union + 4, // 12: google.bigtable.admin.v2.EncryptionInfo.encryption_type:type_name -> google.bigtable.admin.v2.EncryptionInfo.EncryptionType + 22, // 13: google.bigtable.admin.v2.EncryptionInfo.encryption_status:type_name -> google.rpc.Status + 9, // 14: google.bigtable.admin.v2.Snapshot.source_table:type_name -> google.bigtable.admin.v2.Table + 23, // 15: google.bigtable.admin.v2.Snapshot.create_time:type_name -> google.protobuf.Timestamp + 23, // 16: google.bigtable.admin.v2.Snapshot.delete_time:type_name -> google.protobuf.Timestamp + 5, // 17: google.bigtable.admin.v2.Snapshot.state:type_name -> google.bigtable.admin.v2.Snapshot.State + 23, // 18: google.bigtable.admin.v2.Backup.expire_time:type_name -> google.protobuf.Timestamp + 23, // 19: google.bigtable.admin.v2.Backup.start_time:type_name -> google.protobuf.Timestamp + 23, // 20: google.bigtable.admin.v2.Backup.end_time:type_name -> google.protobuf.Timestamp + 6, // 21: google.bigtable.admin.v2.Backup.state:type_name -> google.bigtable.admin.v2.Backup.State + 12, // 22: google.bigtable.admin.v2.Backup.encryption_info:type_name -> google.bigtable.admin.v2.EncryptionInfo + 23, // 23: google.bigtable.admin.v2.BackupInfo.start_time:type_name -> google.protobuf.Timestamp + 23, // 24: google.bigtable.admin.v2.BackupInfo.end_time:type_name -> google.protobuf.Timestamp + 3, // 25: google.bigtable.admin.v2.Table.ClusterState.replication_state:type_name -> google.bigtable.admin.v2.Table.ClusterState.ReplicationState + 12, // 26: google.bigtable.admin.v2.Table.ClusterState.encryption_info:type_name -> google.bigtable.admin.v2.EncryptionInfo + 16, // 27: google.bigtable.admin.v2.Table.ClusterStatesEntry.value:type_name -> google.bigtable.admin.v2.Table.ClusterState + 10, // 28: google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value:type_name -> google.bigtable.admin.v2.ColumnFamily + 11, // 29: google.bigtable.admin.v2.GcRule.Intersection.rules:type_name -> google.bigtable.admin.v2.GcRule + 11, // 30: google.bigtable.admin.v2.GcRule.Union.rules:type_name -> google.bigtable.admin.v2.GcRule + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_table_proto_init() } @@ -1749,7 +1828,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } file_google_bigtable_admin_v2_table_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Table); i { + switch v := v.(*ChangeStreamConfig); i { case 0: return &v.state case 1: @@ -1761,7 +1840,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } file_google_bigtable_admin_v2_table_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ColumnFamily); i { + switch v := v.(*Table); i { case 0: return &v.state case 1: @@ -1773,7 +1852,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } file_google_bigtable_admin_v2_table_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GcRule); i { + switch v := v.(*ColumnFamily); i { case 0: return &v.state case 1: @@ -1785,7 +1864,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } file_google_bigtable_admin_v2_table_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EncryptionInfo); i { + switch v := v.(*GcRule); i { case 0: return &v.state case 1: @@ -1797,7 +1876,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } file_google_bigtable_admin_v2_table_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Snapshot); i { + switch v := v.(*EncryptionInfo); i { case 0: return &v.state case 1: @@ -1809,7 +1888,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } file_google_bigtable_admin_v2_table_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Backup); i { + switch v := v.(*Snapshot); i { case 0: return &v.state case 1: @@ -1821,7 +1900,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } file_google_bigtable_admin_v2_table_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupInfo); i { + switch v := v.(*Backup); i { case 0: return &v.state case 1: @@ -1833,6 +1912,18 @@ func file_google_bigtable_admin_v2_table_proto_init() { } } file_google_bigtable_admin_v2_table_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_admin_v2_table_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Table_ClusterState); i { case 0: return &v.state @@ -1844,7 +1935,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GcRule_Intersection); i { case 0: return &v.state @@ -1856,7 +1947,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { return nil } } - file_google_bigtable_admin_v2_table_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_bigtable_admin_v2_table_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GcRule_Union); i { case 0: return &v.state @@ -1872,7 +1963,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { file_google_bigtable_admin_v2_table_proto_msgTypes[0].OneofWrappers = []interface{}{ (*RestoreInfo_BackupInfo)(nil), } - file_google_bigtable_admin_v2_table_proto_msgTypes[3].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_table_proto_msgTypes[4].OneofWrappers = []interface{}{ (*GcRule_MaxNumVersions)(nil), (*GcRule_MaxAge)(nil), (*GcRule_Intersection_)(nil), @@ -1884,7 +1975,7 @@ func file_google_bigtable_admin_v2_table_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_table_proto_rawDesc, NumEnums: 7, - NumMessages: 13, + NumMessages: 14, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go index 226802646a..37f23cb635 100644 --- a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.21.12 // source: google/bigtable/v2/bigtable.proto package bigtable @@ -156,7 +156,7 @@ func (x ReadChangeStreamResponse_DataChange_Type) Number() protoreflect.EnumNumb // Deprecated: Use ReadChangeStreamResponse_DataChange_Type.Descriptor instead. func (ReadChangeStreamResponse_DataChange_Type) EnumDescriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{17, 1, 0} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{18, 1, 0} } // Request message for Bigtable.ReadRows. @@ -169,8 +169,8 @@ type ReadRowsRequest struct { // Values are of the form // `projects//instances//tables/
`. TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` - // This value specifies routing for replication. This API only accepts the - // empty value of app_profile_id. + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. AppProfileId string `protobuf:"bytes,5,opt,name=app_profile_id,json=appProfileId,proto3" json:"app_profile_id,omitempty"` // The row keys and/or ranges to read sequentially. If not specified, reads // from all rows. @@ -681,6 +681,10 @@ type MutateRowsResponse struct { // One or more results for Entries from the batch request. Entries []*MutateRowsResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // Information about how client should limit the rate (QPS). Primirily used by + // supported official Cloud Bigtable clients. If unset, the rate limit info is + // not provided by the server. + RateLimitInfo *RateLimitInfo `protobuf:"bytes,3,opt,name=rate_limit_info,json=rateLimitInfo,proto3,oneof" json:"rate_limit_info,omitempty"` } func (x *MutateRowsResponse) Reset() { @@ -722,6 +726,84 @@ func (x *MutateRowsResponse) GetEntries() []*MutateRowsResponse_Entry { return nil } +func (x *MutateRowsResponse) GetRateLimitInfo() *RateLimitInfo { + if x != nil { + return x.RateLimitInfo + } + return nil +} + +// Information about how client should adjust the load to Bigtable. +type RateLimitInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Time that clients should wait before adjusting the target rate again. + // If clients adjust rate too frequently, the impact of the previous + // adjustment may not have been taken into account and may + // over-throttle or under-throttle. If clients adjust rate too slowly, they + // will not be responsive to load changes on server side, and may + // over-throttle or under-throttle. + Period *durationpb.Duration `protobuf:"bytes,1,opt,name=period,proto3" json:"period,omitempty"` + // If it has been at least one `period` since the last load adjustment, the + // client should multiply the current load by this value to get the new target + // load. For example, if the current load is 100 and `factor` is 0.8, the new + // target load should be 80. After adjusting, the client should ignore + // `factor` until another `period` has passed. + // + // The client can measure its load using any unit that's comparable over time + // For example, QPS can be used as long as each request involves a similar + // amount of work. + Factor float64 `protobuf:"fixed64,2,opt,name=factor,proto3" json:"factor,omitempty"` +} + +func (x *RateLimitInfo) Reset() { + *x = RateLimitInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitInfo) ProtoMessage() {} + +func (x *RateLimitInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitInfo.ProtoReflect.Descriptor instead. +func (*RateLimitInfo) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{8} +} + +func (x *RateLimitInfo) GetPeriod() *durationpb.Duration { + if x != nil { + return x.Period + } + return nil +} + +func (x *RateLimitInfo) GetFactor() float64 { + if x != nil { + return x.Factor + } + return 0 +} + // Request message for Bigtable.CheckAndMutateRow. type CheckAndMutateRowRequest struct { state protoimpl.MessageState @@ -760,7 +842,7 @@ type CheckAndMutateRowRequest struct { func (x *CheckAndMutateRowRequest) Reset() { *x = CheckAndMutateRowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[8] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -773,7 +855,7 @@ func (x *CheckAndMutateRowRequest) String() string { func (*CheckAndMutateRowRequest) ProtoMessage() {} func (x *CheckAndMutateRowRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[8] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -786,7 +868,7 @@ func (x *CheckAndMutateRowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckAndMutateRowRequest.ProtoReflect.Descriptor instead. func (*CheckAndMutateRowRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{8} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{9} } func (x *CheckAndMutateRowRequest) GetTableName() string { @@ -845,7 +927,7 @@ type CheckAndMutateRowResponse struct { func (x *CheckAndMutateRowResponse) Reset() { *x = CheckAndMutateRowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[9] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -858,7 +940,7 @@ func (x *CheckAndMutateRowResponse) String() string { func (*CheckAndMutateRowResponse) ProtoMessage() {} func (x *CheckAndMutateRowResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[9] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -871,7 +953,7 @@ func (x *CheckAndMutateRowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckAndMutateRowResponse.ProtoReflect.Descriptor instead. func (*CheckAndMutateRowResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{9} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{10} } func (x *CheckAndMutateRowResponse) GetPredicateMatched() bool { @@ -899,7 +981,7 @@ type PingAndWarmRequest struct { func (x *PingAndWarmRequest) Reset() { *x = PingAndWarmRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[10] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -912,7 +994,7 @@ func (x *PingAndWarmRequest) String() string { func (*PingAndWarmRequest) ProtoMessage() {} func (x *PingAndWarmRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[10] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -925,7 +1007,7 @@ func (x *PingAndWarmRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingAndWarmRequest.ProtoReflect.Descriptor instead. func (*PingAndWarmRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{10} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{11} } func (x *PingAndWarmRequest) GetName() string { @@ -952,7 +1034,7 @@ type PingAndWarmResponse struct { func (x *PingAndWarmResponse) Reset() { *x = PingAndWarmResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[11] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -965,7 +1047,7 @@ func (x *PingAndWarmResponse) String() string { func (*PingAndWarmResponse) ProtoMessage() {} func (x *PingAndWarmResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[11] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -978,7 +1060,7 @@ func (x *PingAndWarmResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingAndWarmResponse.ProtoReflect.Descriptor instead. func (*PingAndWarmResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{11} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{12} } // Request message for Bigtable.ReadModifyWriteRow. @@ -1006,7 +1088,7 @@ type ReadModifyWriteRowRequest struct { func (x *ReadModifyWriteRowRequest) Reset() { *x = ReadModifyWriteRowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[12] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1019,7 +1101,7 @@ func (x *ReadModifyWriteRowRequest) String() string { func (*ReadModifyWriteRowRequest) ProtoMessage() {} func (x *ReadModifyWriteRowRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[12] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1032,7 +1114,7 @@ func (x *ReadModifyWriteRowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadModifyWriteRowRequest.ProtoReflect.Descriptor instead. func (*ReadModifyWriteRowRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{12} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{13} } func (x *ReadModifyWriteRowRequest) GetTableName() string { @@ -1076,7 +1158,7 @@ type ReadModifyWriteRowResponse struct { func (x *ReadModifyWriteRowResponse) Reset() { *x = ReadModifyWriteRowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[13] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1089,7 +1171,7 @@ func (x *ReadModifyWriteRowResponse) String() string { func (*ReadModifyWriteRowResponse) ProtoMessage() {} func (x *ReadModifyWriteRowResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[13] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1102,7 +1184,7 @@ func (x *ReadModifyWriteRowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadModifyWriteRowResponse.ProtoReflect.Descriptor instead. func (*ReadModifyWriteRowResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{13} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{14} } func (x *ReadModifyWriteRowResponse) GetRow() *Row { @@ -1133,7 +1215,7 @@ type GenerateInitialChangeStreamPartitionsRequest struct { func (x *GenerateInitialChangeStreamPartitionsRequest) Reset() { *x = GenerateInitialChangeStreamPartitionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[14] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1146,7 +1228,7 @@ func (x *GenerateInitialChangeStreamPartitionsRequest) String() string { func (*GenerateInitialChangeStreamPartitionsRequest) ProtoMessage() {} func (x *GenerateInitialChangeStreamPartitionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[14] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1159,7 +1241,7 @@ func (x *GenerateInitialChangeStreamPartitionsRequest) ProtoReflect() protorefle // Deprecated: Use GenerateInitialChangeStreamPartitionsRequest.ProtoReflect.Descriptor instead. func (*GenerateInitialChangeStreamPartitionsRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{14} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{15} } func (x *GenerateInitialChangeStreamPartitionsRequest) GetTableName() string { @@ -1190,7 +1272,7 @@ type GenerateInitialChangeStreamPartitionsResponse struct { func (x *GenerateInitialChangeStreamPartitionsResponse) Reset() { *x = GenerateInitialChangeStreamPartitionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[15] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1203,7 +1285,7 @@ func (x *GenerateInitialChangeStreamPartitionsResponse) String() string { func (*GenerateInitialChangeStreamPartitionsResponse) ProtoMessage() {} func (x *GenerateInitialChangeStreamPartitionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[15] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1216,7 +1298,7 @@ func (x *GenerateInitialChangeStreamPartitionsResponse) ProtoReflect() protorefl // Deprecated: Use GenerateInitialChangeStreamPartitionsResponse.ProtoReflect.Descriptor instead. func (*GenerateInitialChangeStreamPartitionsResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{15} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{16} } func (x *GenerateInitialChangeStreamPartitionsResponse) GetPartition() *StreamPartition { @@ -1263,7 +1345,7 @@ type ReadChangeStreamRequest struct { func (x *ReadChangeStreamRequest) Reset() { *x = ReadChangeStreamRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[16] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1276,7 +1358,7 @@ func (x *ReadChangeStreamRequest) String() string { func (*ReadChangeStreamRequest) ProtoMessage() {} func (x *ReadChangeStreamRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[16] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1289,7 +1371,7 @@ func (x *ReadChangeStreamRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadChangeStreamRequest.ProtoReflect.Descriptor instead. func (*ReadChangeStreamRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{16} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{17} } func (x *ReadChangeStreamRequest) GetTableName() string { @@ -1398,7 +1480,7 @@ type ReadChangeStreamResponse struct { func (x *ReadChangeStreamResponse) Reset() { *x = ReadChangeStreamResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[17] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1411,7 +1493,7 @@ func (x *ReadChangeStreamResponse) String() string { func (*ReadChangeStreamResponse) ProtoMessage() {} func (x *ReadChangeStreamResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[17] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1424,7 +1506,7 @@ func (x *ReadChangeStreamResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadChangeStreamResponse.ProtoReflect.Descriptor instead. func (*ReadChangeStreamResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{17} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{18} } func (m *ReadChangeStreamResponse) GetStreamRecord() isReadChangeStreamResponse_StreamRecord { @@ -1541,7 +1623,7 @@ type ReadRowsResponse_CellChunk struct { func (x *ReadRowsResponse_CellChunk) Reset() { *x = ReadRowsResponse_CellChunk{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[18] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1554,7 +1636,7 @@ func (x *ReadRowsResponse_CellChunk) String() string { func (*ReadRowsResponse_CellChunk) ProtoMessage() {} func (x *ReadRowsResponse_CellChunk) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[18] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1677,7 +1759,7 @@ type MutateRowsRequest_Entry struct { func (x *MutateRowsRequest_Entry) Reset() { *x = MutateRowsRequest_Entry{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1690,7 +1772,7 @@ func (x *MutateRowsRequest_Entry) String() string { func (*MutateRowsRequest_Entry) ProtoMessage() {} func (x *MutateRowsRequest_Entry) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[19] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1739,7 +1821,7 @@ type MutateRowsResponse_Entry struct { func (x *MutateRowsResponse_Entry) Reset() { *x = MutateRowsResponse_Entry{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1752,7 +1834,7 @@ func (x *MutateRowsResponse_Entry) String() string { func (*MutateRowsResponse_Entry) ProtoMessage() {} func (x *MutateRowsResponse_Entry) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[20] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1800,7 +1882,7 @@ type ReadChangeStreamResponse_MutationChunk struct { func (x *ReadChangeStreamResponse_MutationChunk) Reset() { *x = ReadChangeStreamResponse_MutationChunk{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1813,7 +1895,7 @@ func (x *ReadChangeStreamResponse_MutationChunk) String() string { func (*ReadChangeStreamResponse_MutationChunk) ProtoMessage() {} func (x *ReadChangeStreamResponse_MutationChunk) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[21] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1826,7 +1908,7 @@ func (x *ReadChangeStreamResponse_MutationChunk) ProtoReflect() protoreflect.Mes // Deprecated: Use ReadChangeStreamResponse_MutationChunk.ProtoReflect.Descriptor instead. func (*ReadChangeStreamResponse_MutationChunk) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{17, 0} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{18, 0} } func (x *ReadChangeStreamResponse_MutationChunk) GetChunkInfo() *ReadChangeStreamResponse_MutationChunk_ChunkInfo { @@ -1895,7 +1977,7 @@ type ReadChangeStreamResponse_DataChange struct { func (x *ReadChangeStreamResponse_DataChange) Reset() { *x = ReadChangeStreamResponse_DataChange{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1908,7 +1990,7 @@ func (x *ReadChangeStreamResponse_DataChange) String() string { func (*ReadChangeStreamResponse_DataChange) ProtoMessage() {} func (x *ReadChangeStreamResponse_DataChange) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[22] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1921,7 +2003,7 @@ func (x *ReadChangeStreamResponse_DataChange) ProtoReflect() protoreflect.Messag // Deprecated: Use ReadChangeStreamResponse_DataChange.ProtoReflect.Descriptor instead. func (*ReadChangeStreamResponse_DataChange) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{17, 1} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{18, 1} } func (x *ReadChangeStreamResponse_DataChange) GetType() ReadChangeStreamResponse_DataChange_Type { @@ -2009,7 +2091,7 @@ type ReadChangeStreamResponse_Heartbeat struct { func (x *ReadChangeStreamResponse_Heartbeat) Reset() { *x = ReadChangeStreamResponse_Heartbeat{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2022,7 +2104,7 @@ func (x *ReadChangeStreamResponse_Heartbeat) String() string { func (*ReadChangeStreamResponse_Heartbeat) ProtoMessage() {} func (x *ReadChangeStreamResponse_Heartbeat) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[23] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2035,7 +2117,7 @@ func (x *ReadChangeStreamResponse_Heartbeat) ProtoReflect() protoreflect.Message // Deprecated: Use ReadChangeStreamResponse_Heartbeat.ProtoReflect.Descriptor instead. func (*ReadChangeStreamResponse_Heartbeat) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{17, 2} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{18, 2} } func (x *ReadChangeStreamResponse_Heartbeat) GetContinuationToken() *StreamContinuationToken { @@ -2095,7 +2177,7 @@ type ReadChangeStreamResponse_CloseStream struct { func (x *ReadChangeStreamResponse_CloseStream) Reset() { *x = ReadChangeStreamResponse_CloseStream{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2108,7 +2190,7 @@ func (x *ReadChangeStreamResponse_CloseStream) String() string { func (*ReadChangeStreamResponse_CloseStream) ProtoMessage() {} func (x *ReadChangeStreamResponse_CloseStream) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[24] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2121,7 +2203,7 @@ func (x *ReadChangeStreamResponse_CloseStream) ProtoReflect() protoreflect.Messa // Deprecated: Use ReadChangeStreamResponse_CloseStream.ProtoReflect.Descriptor instead. func (*ReadChangeStreamResponse_CloseStream) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{17, 3} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{18, 3} } func (x *ReadChangeStreamResponse_CloseStream) GetStatus() *status.Status { @@ -2165,7 +2247,7 @@ type ReadChangeStreamResponse_MutationChunk_ChunkInfo struct { func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) Reset() { *x = ReadChangeStreamResponse_MutationChunk_ChunkInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2178,7 +2260,7 @@ func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) String() string { func (*ReadChangeStreamResponse_MutationChunk_ChunkInfo) ProtoMessage() {} func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[25] + mi := &file_google_bigtable_v2_bigtable_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2191,7 +2273,7 @@ func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) ProtoReflect() protor // Deprecated: Use ReadChangeStreamResponse_MutationChunk_ChunkInfo.ProtoReflect.Descriptor instead. func (*ReadChangeStreamResponse_MutationChunk_ChunkInfo) Descriptor() ([]byte, []int) { - return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{17, 0, 0} + return file_google_bigtable_v2_bigtable_proto_rawDescGZIP(), []int{18, 0, 0} } func (x *ReadChangeStreamResponse_MutationChunk_ChunkInfo) GetChunkedValueSize() int32 { @@ -2356,18 +2438,30 @@ var file_google_bigtable_v2_bigtable_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6d, 0x75, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x12, 0x4d, 0x75, 0x74, 0x61, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x12, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x1a, 0x49, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0xff, 0x02, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, + 0x73, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x5a, 0x0a, 0x0d, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x66, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x22, 0xff, 0x02, 0x0a, 0x18, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x6e, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, @@ -2812,7 +2906,7 @@ func file_google_bigtable_v2_bigtable_proto_rawDescGZIP() []byte { } var file_google_bigtable_v2_bigtable_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_bigtable_v2_bigtable_proto_msgTypes = make([]protoimpl.MessageInfo, 26) +var file_google_bigtable_v2_bigtable_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_google_bigtable_v2_bigtable_proto_goTypes = []interface{}{ (ReadRowsRequest_RequestStatsView)(0), // 0: google.bigtable.v2.ReadRowsRequest.RequestStatsView (ReadChangeStreamResponse_DataChange_Type)(0), // 1: google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type @@ -2824,100 +2918,103 @@ var file_google_bigtable_v2_bigtable_proto_goTypes = []interface{}{ (*MutateRowResponse)(nil), // 7: google.bigtable.v2.MutateRowResponse (*MutateRowsRequest)(nil), // 8: google.bigtable.v2.MutateRowsRequest (*MutateRowsResponse)(nil), // 9: google.bigtable.v2.MutateRowsResponse - (*CheckAndMutateRowRequest)(nil), // 10: google.bigtable.v2.CheckAndMutateRowRequest - (*CheckAndMutateRowResponse)(nil), // 11: google.bigtable.v2.CheckAndMutateRowResponse - (*PingAndWarmRequest)(nil), // 12: google.bigtable.v2.PingAndWarmRequest - (*PingAndWarmResponse)(nil), // 13: google.bigtable.v2.PingAndWarmResponse - (*ReadModifyWriteRowRequest)(nil), // 14: google.bigtable.v2.ReadModifyWriteRowRequest - (*ReadModifyWriteRowResponse)(nil), // 15: google.bigtable.v2.ReadModifyWriteRowResponse - (*GenerateInitialChangeStreamPartitionsRequest)(nil), // 16: google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest - (*GenerateInitialChangeStreamPartitionsResponse)(nil), // 17: google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse - (*ReadChangeStreamRequest)(nil), // 18: google.bigtable.v2.ReadChangeStreamRequest - (*ReadChangeStreamResponse)(nil), // 19: google.bigtable.v2.ReadChangeStreamResponse - (*ReadRowsResponse_CellChunk)(nil), // 20: google.bigtable.v2.ReadRowsResponse.CellChunk - (*MutateRowsRequest_Entry)(nil), // 21: google.bigtable.v2.MutateRowsRequest.Entry - (*MutateRowsResponse_Entry)(nil), // 22: google.bigtable.v2.MutateRowsResponse.Entry - (*ReadChangeStreamResponse_MutationChunk)(nil), // 23: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk - (*ReadChangeStreamResponse_DataChange)(nil), // 24: google.bigtable.v2.ReadChangeStreamResponse.DataChange - (*ReadChangeStreamResponse_Heartbeat)(nil), // 25: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat - (*ReadChangeStreamResponse_CloseStream)(nil), // 26: google.bigtable.v2.ReadChangeStreamResponse.CloseStream - (*ReadChangeStreamResponse_MutationChunk_ChunkInfo)(nil), // 27: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo - (*RowSet)(nil), // 28: google.bigtable.v2.RowSet - (*RowFilter)(nil), // 29: google.bigtable.v2.RowFilter - (*RequestStats)(nil), // 30: google.bigtable.v2.RequestStats - (*Mutation)(nil), // 31: google.bigtable.v2.Mutation - (*ReadModifyWriteRule)(nil), // 32: google.bigtable.v2.ReadModifyWriteRule - (*Row)(nil), // 33: google.bigtable.v2.Row - (*StreamPartition)(nil), // 34: google.bigtable.v2.StreamPartition - (*timestamppb.Timestamp)(nil), // 35: google.protobuf.Timestamp - (*StreamContinuationTokens)(nil), // 36: google.bigtable.v2.StreamContinuationTokens - (*durationpb.Duration)(nil), // 37: google.protobuf.Duration - (*wrapperspb.StringValue)(nil), // 38: google.protobuf.StringValue - (*wrapperspb.BytesValue)(nil), // 39: google.protobuf.BytesValue - (*status.Status)(nil), // 40: google.rpc.Status - (*StreamContinuationToken)(nil), // 41: google.bigtable.v2.StreamContinuationToken + (*RateLimitInfo)(nil), // 10: google.bigtable.v2.RateLimitInfo + (*CheckAndMutateRowRequest)(nil), // 11: google.bigtable.v2.CheckAndMutateRowRequest + (*CheckAndMutateRowResponse)(nil), // 12: google.bigtable.v2.CheckAndMutateRowResponse + (*PingAndWarmRequest)(nil), // 13: google.bigtable.v2.PingAndWarmRequest + (*PingAndWarmResponse)(nil), // 14: google.bigtable.v2.PingAndWarmResponse + (*ReadModifyWriteRowRequest)(nil), // 15: google.bigtable.v2.ReadModifyWriteRowRequest + (*ReadModifyWriteRowResponse)(nil), // 16: google.bigtable.v2.ReadModifyWriteRowResponse + (*GenerateInitialChangeStreamPartitionsRequest)(nil), // 17: google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + (*GenerateInitialChangeStreamPartitionsResponse)(nil), // 18: google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + (*ReadChangeStreamRequest)(nil), // 19: google.bigtable.v2.ReadChangeStreamRequest + (*ReadChangeStreamResponse)(nil), // 20: google.bigtable.v2.ReadChangeStreamResponse + (*ReadRowsResponse_CellChunk)(nil), // 21: google.bigtable.v2.ReadRowsResponse.CellChunk + (*MutateRowsRequest_Entry)(nil), // 22: google.bigtable.v2.MutateRowsRequest.Entry + (*MutateRowsResponse_Entry)(nil), // 23: google.bigtable.v2.MutateRowsResponse.Entry + (*ReadChangeStreamResponse_MutationChunk)(nil), // 24: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk + (*ReadChangeStreamResponse_DataChange)(nil), // 25: google.bigtable.v2.ReadChangeStreamResponse.DataChange + (*ReadChangeStreamResponse_Heartbeat)(nil), // 26: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat + (*ReadChangeStreamResponse_CloseStream)(nil), // 27: google.bigtable.v2.ReadChangeStreamResponse.CloseStream + (*ReadChangeStreamResponse_MutationChunk_ChunkInfo)(nil), // 28: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + (*RowSet)(nil), // 29: google.bigtable.v2.RowSet + (*RowFilter)(nil), // 30: google.bigtable.v2.RowFilter + (*RequestStats)(nil), // 31: google.bigtable.v2.RequestStats + (*Mutation)(nil), // 32: google.bigtable.v2.Mutation + (*durationpb.Duration)(nil), // 33: google.protobuf.Duration + (*ReadModifyWriteRule)(nil), // 34: google.bigtable.v2.ReadModifyWriteRule + (*Row)(nil), // 35: google.bigtable.v2.Row + (*StreamPartition)(nil), // 36: google.bigtable.v2.StreamPartition + (*timestamppb.Timestamp)(nil), // 37: google.protobuf.Timestamp + (*StreamContinuationTokens)(nil), // 38: google.bigtable.v2.StreamContinuationTokens + (*wrapperspb.StringValue)(nil), // 39: google.protobuf.StringValue + (*wrapperspb.BytesValue)(nil), // 40: google.protobuf.BytesValue + (*status.Status)(nil), // 41: google.rpc.Status + (*StreamContinuationToken)(nil), // 42: google.bigtable.v2.StreamContinuationToken } var file_google_bigtable_v2_bigtable_proto_depIdxs = []int32{ - 28, // 0: google.bigtable.v2.ReadRowsRequest.rows:type_name -> google.bigtable.v2.RowSet - 29, // 1: google.bigtable.v2.ReadRowsRequest.filter:type_name -> google.bigtable.v2.RowFilter + 29, // 0: google.bigtable.v2.ReadRowsRequest.rows:type_name -> google.bigtable.v2.RowSet + 30, // 1: google.bigtable.v2.ReadRowsRequest.filter:type_name -> google.bigtable.v2.RowFilter 0, // 2: google.bigtable.v2.ReadRowsRequest.request_stats_view:type_name -> google.bigtable.v2.ReadRowsRequest.RequestStatsView - 20, // 3: google.bigtable.v2.ReadRowsResponse.chunks:type_name -> google.bigtable.v2.ReadRowsResponse.CellChunk - 30, // 4: google.bigtable.v2.ReadRowsResponse.request_stats:type_name -> google.bigtable.v2.RequestStats - 31, // 5: google.bigtable.v2.MutateRowRequest.mutations:type_name -> google.bigtable.v2.Mutation - 21, // 6: google.bigtable.v2.MutateRowsRequest.entries:type_name -> google.bigtable.v2.MutateRowsRequest.Entry - 22, // 7: google.bigtable.v2.MutateRowsResponse.entries:type_name -> google.bigtable.v2.MutateRowsResponse.Entry - 29, // 8: google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter:type_name -> google.bigtable.v2.RowFilter - 31, // 9: google.bigtable.v2.CheckAndMutateRowRequest.true_mutations:type_name -> google.bigtable.v2.Mutation - 31, // 10: google.bigtable.v2.CheckAndMutateRowRequest.false_mutations:type_name -> google.bigtable.v2.Mutation - 32, // 11: google.bigtable.v2.ReadModifyWriteRowRequest.rules:type_name -> google.bigtable.v2.ReadModifyWriteRule - 33, // 12: google.bigtable.v2.ReadModifyWriteRowResponse.row:type_name -> google.bigtable.v2.Row - 34, // 13: google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.partition:type_name -> google.bigtable.v2.StreamPartition - 34, // 14: google.bigtable.v2.ReadChangeStreamRequest.partition:type_name -> google.bigtable.v2.StreamPartition - 35, // 15: google.bigtable.v2.ReadChangeStreamRequest.start_time:type_name -> google.protobuf.Timestamp - 36, // 16: google.bigtable.v2.ReadChangeStreamRequest.continuation_tokens:type_name -> google.bigtable.v2.StreamContinuationTokens - 35, // 17: google.bigtable.v2.ReadChangeStreamRequest.end_time:type_name -> google.protobuf.Timestamp - 37, // 18: google.bigtable.v2.ReadChangeStreamRequest.heartbeat_duration:type_name -> google.protobuf.Duration - 24, // 19: google.bigtable.v2.ReadChangeStreamResponse.data_change:type_name -> google.bigtable.v2.ReadChangeStreamResponse.DataChange - 25, // 20: google.bigtable.v2.ReadChangeStreamResponse.heartbeat:type_name -> google.bigtable.v2.ReadChangeStreamResponse.Heartbeat - 26, // 21: google.bigtable.v2.ReadChangeStreamResponse.close_stream:type_name -> google.bigtable.v2.ReadChangeStreamResponse.CloseStream - 38, // 22: google.bigtable.v2.ReadRowsResponse.CellChunk.family_name:type_name -> google.protobuf.StringValue - 39, // 23: google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier:type_name -> google.protobuf.BytesValue - 31, // 24: google.bigtable.v2.MutateRowsRequest.Entry.mutations:type_name -> google.bigtable.v2.Mutation - 40, // 25: google.bigtable.v2.MutateRowsResponse.Entry.status:type_name -> google.rpc.Status - 27, // 26: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.chunk_info:type_name -> google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo - 31, // 27: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.mutation:type_name -> google.bigtable.v2.Mutation - 1, // 28: google.bigtable.v2.ReadChangeStreamResponse.DataChange.type:type_name -> google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type - 35, // 29: google.bigtable.v2.ReadChangeStreamResponse.DataChange.commit_timestamp:type_name -> google.protobuf.Timestamp - 23, // 30: google.bigtable.v2.ReadChangeStreamResponse.DataChange.chunks:type_name -> google.bigtable.v2.ReadChangeStreamResponse.MutationChunk - 35, // 31: google.bigtable.v2.ReadChangeStreamResponse.DataChange.estimated_low_watermark:type_name -> google.protobuf.Timestamp - 41, // 32: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.continuation_token:type_name -> google.bigtable.v2.StreamContinuationToken - 35, // 33: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.estimated_low_watermark:type_name -> google.protobuf.Timestamp - 40, // 34: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.status:type_name -> google.rpc.Status - 41, // 35: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.continuation_tokens:type_name -> google.bigtable.v2.StreamContinuationToken - 34, // 36: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.new_partitions:type_name -> google.bigtable.v2.StreamPartition - 2, // 37: google.bigtable.v2.Bigtable.ReadRows:input_type -> google.bigtable.v2.ReadRowsRequest - 4, // 38: google.bigtable.v2.Bigtable.SampleRowKeys:input_type -> google.bigtable.v2.SampleRowKeysRequest - 6, // 39: google.bigtable.v2.Bigtable.MutateRow:input_type -> google.bigtable.v2.MutateRowRequest - 8, // 40: google.bigtable.v2.Bigtable.MutateRows:input_type -> google.bigtable.v2.MutateRowsRequest - 10, // 41: google.bigtable.v2.Bigtable.CheckAndMutateRow:input_type -> google.bigtable.v2.CheckAndMutateRowRequest - 12, // 42: google.bigtable.v2.Bigtable.PingAndWarm:input_type -> google.bigtable.v2.PingAndWarmRequest - 14, // 43: google.bigtable.v2.Bigtable.ReadModifyWriteRow:input_type -> google.bigtable.v2.ReadModifyWriteRowRequest - 16, // 44: google.bigtable.v2.Bigtable.GenerateInitialChangeStreamPartitions:input_type -> google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest - 18, // 45: google.bigtable.v2.Bigtable.ReadChangeStream:input_type -> google.bigtable.v2.ReadChangeStreamRequest - 3, // 46: google.bigtable.v2.Bigtable.ReadRows:output_type -> google.bigtable.v2.ReadRowsResponse - 5, // 47: google.bigtable.v2.Bigtable.SampleRowKeys:output_type -> google.bigtable.v2.SampleRowKeysResponse - 7, // 48: google.bigtable.v2.Bigtable.MutateRow:output_type -> google.bigtable.v2.MutateRowResponse - 9, // 49: google.bigtable.v2.Bigtable.MutateRows:output_type -> google.bigtable.v2.MutateRowsResponse - 11, // 50: google.bigtable.v2.Bigtable.CheckAndMutateRow:output_type -> google.bigtable.v2.CheckAndMutateRowResponse - 13, // 51: google.bigtable.v2.Bigtable.PingAndWarm:output_type -> google.bigtable.v2.PingAndWarmResponse - 15, // 52: google.bigtable.v2.Bigtable.ReadModifyWriteRow:output_type -> google.bigtable.v2.ReadModifyWriteRowResponse - 17, // 53: google.bigtable.v2.Bigtable.GenerateInitialChangeStreamPartitions:output_type -> google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse - 19, // 54: google.bigtable.v2.Bigtable.ReadChangeStream:output_type -> google.bigtable.v2.ReadChangeStreamResponse - 46, // [46:55] is the sub-list for method output_type - 37, // [37:46] is the sub-list for method input_type - 37, // [37:37] is the sub-list for extension type_name - 37, // [37:37] is the sub-list for extension extendee - 0, // [0:37] is the sub-list for field type_name + 21, // 3: google.bigtable.v2.ReadRowsResponse.chunks:type_name -> google.bigtable.v2.ReadRowsResponse.CellChunk + 31, // 4: google.bigtable.v2.ReadRowsResponse.request_stats:type_name -> google.bigtable.v2.RequestStats + 32, // 5: google.bigtable.v2.MutateRowRequest.mutations:type_name -> google.bigtable.v2.Mutation + 22, // 6: google.bigtable.v2.MutateRowsRequest.entries:type_name -> google.bigtable.v2.MutateRowsRequest.Entry + 23, // 7: google.bigtable.v2.MutateRowsResponse.entries:type_name -> google.bigtable.v2.MutateRowsResponse.Entry + 10, // 8: google.bigtable.v2.MutateRowsResponse.rate_limit_info:type_name -> google.bigtable.v2.RateLimitInfo + 33, // 9: google.bigtable.v2.RateLimitInfo.period:type_name -> google.protobuf.Duration + 30, // 10: google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter:type_name -> google.bigtable.v2.RowFilter + 32, // 11: google.bigtable.v2.CheckAndMutateRowRequest.true_mutations:type_name -> google.bigtable.v2.Mutation + 32, // 12: google.bigtable.v2.CheckAndMutateRowRequest.false_mutations:type_name -> google.bigtable.v2.Mutation + 34, // 13: google.bigtable.v2.ReadModifyWriteRowRequest.rules:type_name -> google.bigtable.v2.ReadModifyWriteRule + 35, // 14: google.bigtable.v2.ReadModifyWriteRowResponse.row:type_name -> google.bigtable.v2.Row + 36, // 15: google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse.partition:type_name -> google.bigtable.v2.StreamPartition + 36, // 16: google.bigtable.v2.ReadChangeStreamRequest.partition:type_name -> google.bigtable.v2.StreamPartition + 37, // 17: google.bigtable.v2.ReadChangeStreamRequest.start_time:type_name -> google.protobuf.Timestamp + 38, // 18: google.bigtable.v2.ReadChangeStreamRequest.continuation_tokens:type_name -> google.bigtable.v2.StreamContinuationTokens + 37, // 19: google.bigtable.v2.ReadChangeStreamRequest.end_time:type_name -> google.protobuf.Timestamp + 33, // 20: google.bigtable.v2.ReadChangeStreamRequest.heartbeat_duration:type_name -> google.protobuf.Duration + 25, // 21: google.bigtable.v2.ReadChangeStreamResponse.data_change:type_name -> google.bigtable.v2.ReadChangeStreamResponse.DataChange + 26, // 22: google.bigtable.v2.ReadChangeStreamResponse.heartbeat:type_name -> google.bigtable.v2.ReadChangeStreamResponse.Heartbeat + 27, // 23: google.bigtable.v2.ReadChangeStreamResponse.close_stream:type_name -> google.bigtable.v2.ReadChangeStreamResponse.CloseStream + 39, // 24: google.bigtable.v2.ReadRowsResponse.CellChunk.family_name:type_name -> google.protobuf.StringValue + 40, // 25: google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier:type_name -> google.protobuf.BytesValue + 32, // 26: google.bigtable.v2.MutateRowsRequest.Entry.mutations:type_name -> google.bigtable.v2.Mutation + 41, // 27: google.bigtable.v2.MutateRowsResponse.Entry.status:type_name -> google.rpc.Status + 28, // 28: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.chunk_info:type_name -> google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo + 32, // 29: google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.mutation:type_name -> google.bigtable.v2.Mutation + 1, // 30: google.bigtable.v2.ReadChangeStreamResponse.DataChange.type:type_name -> google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type + 37, // 31: google.bigtable.v2.ReadChangeStreamResponse.DataChange.commit_timestamp:type_name -> google.protobuf.Timestamp + 24, // 32: google.bigtable.v2.ReadChangeStreamResponse.DataChange.chunks:type_name -> google.bigtable.v2.ReadChangeStreamResponse.MutationChunk + 37, // 33: google.bigtable.v2.ReadChangeStreamResponse.DataChange.estimated_low_watermark:type_name -> google.protobuf.Timestamp + 42, // 34: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.continuation_token:type_name -> google.bigtable.v2.StreamContinuationToken + 37, // 35: google.bigtable.v2.ReadChangeStreamResponse.Heartbeat.estimated_low_watermark:type_name -> google.protobuf.Timestamp + 41, // 36: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.status:type_name -> google.rpc.Status + 42, // 37: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.continuation_tokens:type_name -> google.bigtable.v2.StreamContinuationToken + 36, // 38: google.bigtable.v2.ReadChangeStreamResponse.CloseStream.new_partitions:type_name -> google.bigtable.v2.StreamPartition + 2, // 39: google.bigtable.v2.Bigtable.ReadRows:input_type -> google.bigtable.v2.ReadRowsRequest + 4, // 40: google.bigtable.v2.Bigtable.SampleRowKeys:input_type -> google.bigtable.v2.SampleRowKeysRequest + 6, // 41: google.bigtable.v2.Bigtable.MutateRow:input_type -> google.bigtable.v2.MutateRowRequest + 8, // 42: google.bigtable.v2.Bigtable.MutateRows:input_type -> google.bigtable.v2.MutateRowsRequest + 11, // 43: google.bigtable.v2.Bigtable.CheckAndMutateRow:input_type -> google.bigtable.v2.CheckAndMutateRowRequest + 13, // 44: google.bigtable.v2.Bigtable.PingAndWarm:input_type -> google.bigtable.v2.PingAndWarmRequest + 15, // 45: google.bigtable.v2.Bigtable.ReadModifyWriteRow:input_type -> google.bigtable.v2.ReadModifyWriteRowRequest + 17, // 46: google.bigtable.v2.Bigtable.GenerateInitialChangeStreamPartitions:input_type -> google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest + 19, // 47: google.bigtable.v2.Bigtable.ReadChangeStream:input_type -> google.bigtable.v2.ReadChangeStreamRequest + 3, // 48: google.bigtable.v2.Bigtable.ReadRows:output_type -> google.bigtable.v2.ReadRowsResponse + 5, // 49: google.bigtable.v2.Bigtable.SampleRowKeys:output_type -> google.bigtable.v2.SampleRowKeysResponse + 7, // 50: google.bigtable.v2.Bigtable.MutateRow:output_type -> google.bigtable.v2.MutateRowResponse + 9, // 51: google.bigtable.v2.Bigtable.MutateRows:output_type -> google.bigtable.v2.MutateRowsResponse + 12, // 52: google.bigtable.v2.Bigtable.CheckAndMutateRow:output_type -> google.bigtable.v2.CheckAndMutateRowResponse + 14, // 53: google.bigtable.v2.Bigtable.PingAndWarm:output_type -> google.bigtable.v2.PingAndWarmResponse + 16, // 54: google.bigtable.v2.Bigtable.ReadModifyWriteRow:output_type -> google.bigtable.v2.ReadModifyWriteRowResponse + 18, // 55: google.bigtable.v2.Bigtable.GenerateInitialChangeStreamPartitions:output_type -> google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse + 20, // 56: google.bigtable.v2.Bigtable.ReadChangeStream:output_type -> google.bigtable.v2.ReadChangeStreamResponse + 48, // [48:57] is the sub-list for method output_type + 39, // [39:48] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name } func init() { file_google_bigtable_v2_bigtable_proto_init() } @@ -3025,7 +3122,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckAndMutateRowRequest); i { + switch v := v.(*RateLimitInfo); i { case 0: return &v.state case 1: @@ -3037,7 +3134,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckAndMutateRowResponse); i { + switch v := v.(*CheckAndMutateRowRequest); i { case 0: return &v.state case 1: @@ -3049,7 +3146,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingAndWarmRequest); i { + switch v := v.(*CheckAndMutateRowResponse); i { case 0: return &v.state case 1: @@ -3061,7 +3158,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingAndWarmResponse); i { + switch v := v.(*PingAndWarmRequest); i { case 0: return &v.state case 1: @@ -3073,7 +3170,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadModifyWriteRowRequest); i { + switch v := v.(*PingAndWarmResponse); i { case 0: return &v.state case 1: @@ -3085,7 +3182,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadModifyWriteRowResponse); i { + switch v := v.(*ReadModifyWriteRowRequest); i { case 0: return &v.state case 1: @@ -3097,7 +3194,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateInitialChangeStreamPartitionsRequest); i { + switch v := v.(*ReadModifyWriteRowResponse); i { case 0: return &v.state case 1: @@ -3109,7 +3206,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateInitialChangeStreamPartitionsResponse); i { + switch v := v.(*GenerateInitialChangeStreamPartitionsRequest); i { case 0: return &v.state case 1: @@ -3121,7 +3218,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadChangeStreamRequest); i { + switch v := v.(*GenerateInitialChangeStreamPartitionsResponse); i { case 0: return &v.state case 1: @@ -3133,7 +3230,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadChangeStreamResponse); i { + switch v := v.(*ReadChangeStreamRequest); i { case 0: return &v.state case 1: @@ -3145,7 +3242,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadRowsResponse_CellChunk); i { + switch v := v.(*ReadChangeStreamResponse); i { case 0: return &v.state case 1: @@ -3157,7 +3254,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MutateRowsRequest_Entry); i { + switch v := v.(*ReadRowsResponse_CellChunk); i { case 0: return &v.state case 1: @@ -3169,7 +3266,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MutateRowsResponse_Entry); i { + switch v := v.(*MutateRowsRequest_Entry); i { case 0: return &v.state case 1: @@ -3181,7 +3278,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadChangeStreamResponse_MutationChunk); i { + switch v := v.(*MutateRowsResponse_Entry); i { case 0: return &v.state case 1: @@ -3193,7 +3290,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadChangeStreamResponse_DataChange); i { + switch v := v.(*ReadChangeStreamResponse_MutationChunk); i { case 0: return &v.state case 1: @@ -3205,7 +3302,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadChangeStreamResponse_Heartbeat); i { + switch v := v.(*ReadChangeStreamResponse_DataChange); i { case 0: return &v.state case 1: @@ -3217,7 +3314,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadChangeStreamResponse_CloseStream); i { + switch v := v.(*ReadChangeStreamResponse_Heartbeat); i { case 0: return &v.state case 1: @@ -3229,6 +3326,18 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } file_google_bigtable_v2_bigtable_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadChangeStreamResponse_CloseStream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_bigtable_v2_bigtable_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadChangeStreamResponse_MutationChunk_ChunkInfo); i { case 0: return &v.state @@ -3241,16 +3350,17 @@ func file_google_bigtable_v2_bigtable_proto_init() { } } } - file_google_bigtable_v2_bigtable_proto_msgTypes[16].OneofWrappers = []interface{}{ + file_google_bigtable_v2_bigtable_proto_msgTypes[7].OneofWrappers = []interface{}{} + file_google_bigtable_v2_bigtable_proto_msgTypes[17].OneofWrappers = []interface{}{ (*ReadChangeStreamRequest_StartTime)(nil), (*ReadChangeStreamRequest_ContinuationTokens)(nil), } - file_google_bigtable_v2_bigtable_proto_msgTypes[17].OneofWrappers = []interface{}{ + file_google_bigtable_v2_bigtable_proto_msgTypes[18].OneofWrappers = []interface{}{ (*ReadChangeStreamResponse_DataChange_)(nil), (*ReadChangeStreamResponse_Heartbeat_)(nil), (*ReadChangeStreamResponse_CloseStream_)(nil), } - file_google_bigtable_v2_bigtable_proto_msgTypes[18].OneofWrappers = []interface{}{ + file_google_bigtable_v2_bigtable_proto_msgTypes[19].OneofWrappers = []interface{}{ (*ReadRowsResponse_CellChunk_ResetRow)(nil), (*ReadRowsResponse_CellChunk_CommitRow)(nil), } @@ -3260,7 +3370,7 @@ func file_google_bigtable_v2_bigtable_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_v2_bigtable_proto_rawDesc, NumEnums: 2, - NumMessages: 26, + NumMessages: 27, NumExtensions: 0, NumServices: 1, }, diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go new file mode 100644 index 0000000000..ac12aa2dbc --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/bigtable/v2/feature_flags.pb.go @@ -0,0 +1,182 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.12 +// source: google/bigtable/v2/feature_flags.proto + +package bigtable + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Feature flags supported by a client. +// This is intended to be sent as part of request metadata to assure the server +// that certain behaviors are safe to enable. This proto is meant to be +// serialized and websafe-base64 encoded under the `bigtable-features` metadata +// key. The value will remain constant for the lifetime of a client and due to +// HTTP2's HPACK compression, the request overhead will be tiny. +// This is an internal implementation detail and should not be used by endusers +// directly. +type FeatureFlags struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Notify the server that the client enables batch write flow control by + // requesting RateLimitInfo from MutateRowsResponse. + MutateRowsRateLimit bool `protobuf:"varint,3,opt,name=mutate_rows_rate_limit,json=mutateRowsRateLimit,proto3" json:"mutate_rows_rate_limit,omitempty"` +} + +func (x *FeatureFlags) Reset() { + *x = FeatureFlags{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_v2_feature_flags_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureFlags) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureFlags) ProtoMessage() {} + +func (x *FeatureFlags) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_v2_feature_flags_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureFlags.ProtoReflect.Descriptor instead. +func (*FeatureFlags) Descriptor() ([]byte, []int) { + return file_google_bigtable_v2_feature_flags_proto_rawDescGZIP(), []int{0} +} + +func (x *FeatureFlags) GetMutateRowsRateLimit() bool { + if x != nil { + return x.MutateRowsRateLimit + } + return false +} + +var File_google_bigtable_v2_feature_flags_proto protoreflect.FileDescriptor + +var file_google_bigtable_v2_feature_flags_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6c, 0x61, + 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x22, 0x43, 0x0a, 0x0c, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x33, 0x0a, 0x16, + 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x42, 0xbd, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x11, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0xaa, 0x02, 0x18, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x18, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5c, 0x56, 0x32, 0xea, 0x02, 0x1b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x56, + 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_bigtable_v2_feature_flags_proto_rawDescOnce sync.Once + file_google_bigtable_v2_feature_flags_proto_rawDescData = file_google_bigtable_v2_feature_flags_proto_rawDesc +) + +func file_google_bigtable_v2_feature_flags_proto_rawDescGZIP() []byte { + file_google_bigtable_v2_feature_flags_proto_rawDescOnce.Do(func() { + file_google_bigtable_v2_feature_flags_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_bigtable_v2_feature_flags_proto_rawDescData) + }) + return file_google_bigtable_v2_feature_flags_proto_rawDescData +} + +var file_google_bigtable_v2_feature_flags_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_bigtable_v2_feature_flags_proto_goTypes = []interface{}{ + (*FeatureFlags)(nil), // 0: google.bigtable.v2.FeatureFlags +} +var file_google_bigtable_v2_feature_flags_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_bigtable_v2_feature_flags_proto_init() } +func file_google_bigtable_v2_feature_flags_proto_init() { + if File_google_bigtable_v2_feature_flags_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_bigtable_v2_feature_flags_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FeatureFlags); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_bigtable_v2_feature_flags_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_bigtable_v2_feature_flags_proto_goTypes, + DependencyIndexes: file_google_bigtable_v2_feature_flags_proto_depIdxs, + MessageInfos: file_google_bigtable_v2_feature_flags_proto_msgTypes, + }.Build() + File_google_bigtable_v2_feature_flags_proto = out.File + file_google_bigtable_v2_feature_flags_proto_rawDesc = nil + file_google_bigtable_v2_feature_flags_proto_goTypes = nil + file_google_bigtable_v2_feature_flags_proto_depIdxs = nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/internal/doc.go b/terraform/providers/google/vendor/google.golang.org/genproto/internal/doc.go new file mode 100644 index 0000000000..90e89b4aa3 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/genproto/internal/doc.go @@ -0,0 +1,17 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file makes internal an importable go package +// for use with backreferences from submodules. +package internal diff --git a/terraform/providers/google/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/terraform/providers/google/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go deleted file mode 100644 index d10ad66533..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package field_mask aliases all exported identifiers in -// package "google.golang.org/protobuf/types/known/fieldmaskpb". -package field_mask - -import "google.golang.org/protobuf/types/known/fieldmaskpb" - -type FieldMask = fieldmaskpb.FieldMask - -var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/CONTRIBUTING.md b/terraform/providers/google/vendor/google.golang.org/grpc/CONTRIBUTING.md index 52338d004c..608aa6e1ac 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/terraform/providers/google/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/attributes/attributes.go b/terraform/providers/google/vendor/google.golang.org/grpc/attributes/attributes.go index 02f5dc5318..3efca45914 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,6 +25,11 @@ // later release. package attributes +import ( + "fmt" + "strings" +) + // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an @@ -99,3 +104,27 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + var key, val string + if str, ok := k.(interface{ String() string }); ok { + key = str.String() + } + if str, ok := v.(interface{ String() string }); ok { + val = str.String() + } + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + first = false + } + sb.WriteString("}") + return sb.String() +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/authz/audit/audit_logger.go b/terraform/providers/google/vendor/google.golang.org/grpc/authz/audit/audit_logger.go new file mode 100644 index 0000000000..b9b7219703 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/authz/audit/audit_logger.go @@ -0,0 +1,127 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package audit contains interfaces for audit logging during authorization. +package audit + +import ( + "encoding/json" + "sync" +) + +// loggerBuilderRegistry holds a map of audit logger builders and a mutex +// to facilitate thread-safe reading/writing operations. +type loggerBuilderRegistry struct { + mu sync.Mutex + builders map[string]LoggerBuilder +} + +var ( + registry = loggerBuilderRegistry{ + builders: make(map[string]LoggerBuilder), + } +) + +// RegisterLoggerBuilder registers the builder in a global map +// using b.Name() as the key. +// +// This should only be called during initialization time (i.e. in an init() +// function). If multiple builders are registered with the same name, +// the one registered last will take effect. +func RegisterLoggerBuilder(b LoggerBuilder) { + registry.mu.Lock() + defer registry.mu.Unlock() + registry.builders[b.Name()] = b +} + +// GetLoggerBuilder returns a builder with the given name. +// It returns nil if the builder is not found in the registry. +func GetLoggerBuilder(name string) LoggerBuilder { + registry.mu.Lock() + defer registry.mu.Unlock() + return registry.builders[name] +} + +// Event contains information passed to the audit logger as part of an +// audit logging event. +type Event struct { + // FullMethodName is the full method name of the audited RPC, in the format + // of "/pkg.Service/Method". For example, "/helloworld.Greeter/SayHello". + FullMethodName string + // Principal is the identity of the caller. Currently it will only be + // available in certificate-based TLS authentication. + Principal string + // PolicyName is the authorization policy name or the xDS RBAC filter name. + PolicyName string + // MatchedRule is the matched rule or policy name in the xDS RBAC filter. + // It will be empty if there is no match. + MatchedRule string + // Authorized indicates whether the audited RPC is authorized or not. + Authorized bool +} + +// LoggerConfig represents an opaque data structure holding an audit +// logger configuration. Concrete types representing configuration of specific +// audit loggers must embed this interface to implement it. +type LoggerConfig interface { + loggerConfig() +} + +// Logger is the interface to be implemented by audit loggers. +// +// An audit logger is a logger instance that can be configured via the +// authorization policy API or xDS HTTP RBAC filters. When the authorization +// decision meets the condition for audit, all the configured audit loggers' +// Log() method will be invoked to log that event. +// +// TODO(lwge): Change the link to the merged gRFC once it's ready. +// Please refer to https://github.com/grpc/proposal/pull/346 for more details +// about audit logging. +type Logger interface { + // Log performs audit logging for the provided audit event. + // + // This method is invoked in the RPC path and therefore implementations + // must not block. + Log(*Event) +} + +// LoggerBuilder is the interface to be implemented by audit logger +// builders that are used at runtime to configure and instantiate audit loggers. +// +// Users who want to implement their own audit logging logic should +// implement this interface, along with the Logger interface, and register +// it by calling RegisterLoggerBuilder() at init time. +// +// TODO(lwge): Change the link to the merged gRFC once it's ready. +// Please refer to https://github.com/grpc/proposal/pull/346 for more details +// about audit logging. +type LoggerBuilder interface { + // ParseLoggerConfig parses the given JSON bytes into a structured + // logger config this builder can use to build an audit logger. + ParseLoggerConfig(config json.RawMessage) (LoggerConfig, error) + // Build builds an audit logger with the given logger config. + // This will only be called with valid configs returned from + // ParseLoggerConfig() and any runtime issues such as failing to + // create a file should be handled by the logger implementation instead of + // failing the logger instantiation. So implementers need to make sure it + // can return a logger without error at this stage. + Build(LoggerConfig) Logger + // Name returns the name of logger built by this builder. + // This is used to register and pick the builder. + Name() string +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go b/terraform/providers/google/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go new file mode 100644 index 0000000000..c4ba21fa46 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stdout defines an stdout audit logger. +package stdout + +import ( + "encoding/json" + "log" + "os" + "time" + + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/grpclog" +) + +var grpcLogger = grpclog.Component("authz-audit") + +// Name is the string to identify this logger type in the registry +const Name = "stdout_logger" + +func init() { + audit.RegisterLoggerBuilder(&loggerBuilder{ + goLogger: log.New(os.Stdout, "", 0), + }) +} + +type event struct { + FullMethodName string `json:"rpc_method"` + Principal string `json:"principal"` + PolicyName string `json:"policy_name"` + MatchedRule string `json:"matched_rule"` + Authorized bool `json:"authorized"` + Timestamp string `json:"timestamp"` // Time when the audit event is logged via Log method +} + +// logger implements the audit.logger interface by logging to standard output. +type logger struct { + goLogger *log.Logger +} + +// Log marshals the audit.Event to json and prints it to standard output. +func (l *logger) Log(event *audit.Event) { + jsonContainer := map[string]interface{}{ + "grpc_audit_log": convertEvent(event), + } + jsonBytes, err := json.Marshal(jsonContainer) + if err != nil { + grpcLogger.Errorf("failed to marshal AuditEvent data to JSON: %v", err) + return + } + l.goLogger.Println(string(jsonBytes)) +} + +// loggerConfig represents the configuration for the stdout logger. +// It is currently empty and implements the audit.Logger interface by embedding it. +type loggerConfig struct { + audit.LoggerConfig +} + +type loggerBuilder struct { + goLogger *log.Logger +} + +func (loggerBuilder) Name() string { + return Name +} + +// Build returns a new instance of the stdout logger. +// Passed in configuration is ignored as the stdout logger does not +// expect any configuration to be provided. +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &logger{ + goLogger: lb.goLogger, + } +} + +// ParseLoggerConfig is a no-op since the stdout logger does not accept any configuration. +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + if len(config) != 0 && string(config) != "{}" { + grpcLogger.Warningf("Stdout logger doesn't support custom configs. Ignoring:\n%s", string(config)) + } + return &loggerConfig{}, nil +} + +func convertEvent(auditEvent *audit.Event) *event { + return &event{ + FullMethodName: auditEvent.FullMethodName, + Principal: auditEvent.Principal, + PolicyName: auditEvent.PolicyName, + MatchedRule: auditEvent.MatchedRule, + Authorized: auditEvent.Authorized, + Timestamp: time.Now().Format(time.RFC3339Nano), + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/balancer.go index 09d61dd1b5..8f00523c0e 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/balancer.go @@ -286,7 +286,7 @@ type PickResult struct { // // LB policies with child policies are responsible for propagating metadata // injected by their children to the ClientConn, as part of Pick(). - Metatada metadata.MD + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 1205aff23f..f070878bd9 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index cf1034830d..00d0954b38 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -37,6 +37,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" +) + // LoadBalancerClient is the client API for LoadBalancer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -54,7 +58,7 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { } func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/balancer.go new file mode 100644 index 0000000000..076aae8c99 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/balancer.go @@ -0,0 +1,658 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS LB policy. +package rls + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/buffer" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" +) + +const ( + // Name is the name of the RLS LB policy. + // + // It currently has an experimental suffix which would be removed once + // end-to-end testing of the policy is completed. + Name = internal.RLSLoadBalancingPolicyName + // Default frequency for data cache purging. + periodicCachePurgeFreq = time.Minute +) + +var ( + logger = grpclog.Component("rls") + errBalancerClosed = errors.New("rls LB policy is closed") + + // Below defined vars for overriding in unit tests. + + // Default exponential backoff strategy for data cache entries. + defaultBackoffStrategy = backoff.Strategy(backoff.DefaultExponential) + // Ticker used for periodic data cache purging. + dataCachePurgeTicker = func() *time.Ticker { return time.NewTicker(periodicCachePurgeFreq) } + // We want every cache entry to live in the cache for at least this + // duration. If we encounter a cache entry whose minimum expiration time is + // in the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases where + // the cache is too small, when we receive an RLS Response, we keep the + // resulting cache entry around long enough for the pending incoming + // requests to be re-processed through the new Picker. If we didn't do this, + // then we'd risk throwing away each RLS response as we receive it, in which + // case we would fail to actually route any of our incoming requests. + minEvictDuration = 5 * time.Second + + // Following functions are no-ops in actual code, but can be overridden in + // tests to give tests visibility into exactly when certain events happen. + clientConnUpdateHook = func() {} + dataCachePurgeHook = func() {} + resetBackoffHook = func() {} +) + +func init() { + balancer.Register(&rlsBB{}) +} + +type rlsBB struct{} + +func (rlsBB) Name() string { + return Name +} + +func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + lb := &rlsBalancer{ + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + cc: cc, + bopts: opts, + purgeTicker: dataCachePurgeTicker(), + dataCachePurgeHook: dataCachePurgeHook, + lbCfg: &lbConfig{}, + pendingMap: make(map[cacheKey]*backoffState), + childPolicies: make(map[string]*childPolicyWrapper), + updateCh: buffer.NewUnbounded(), + } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) + lb.dataCache = newDataCache(maxCacheSize, lb.logger) + lb.bg = balancergroup.New(cc, opts, lb, lb.logger) + lb.bg.Start() + go lb.run() + return lb +} + +// rlsBalancer implements the RLS LB policy. +type rlsBalancer struct { + closed *grpcsync.Event // Fires when Close() is invoked. Guarded by stateMu. + done *grpcsync.Event // Fires when Close() is done. + cc balancer.ClientConn + bopts balancer.BuildOptions + purgeTicker *time.Ticker + dataCachePurgeHook func() + logger *internalgrpclog.PrefixLogger + + // If both cacheMu and stateMu need to be acquired, the former must be + // acquired first to prevent a deadlock. This order restriction is due to the + // fact that in places where we need to acquire both the locks, we always + // start off reading the cache. + + // cacheMu guards access to the data cache and pending requests map. We + // cannot use an RWMutex here since even an operation like + // dataCache.getEntry() modifies the underlying LRU, which is implemented as + // a doubly linked list. + cacheMu sync.Mutex + dataCache *dataCache // Cache of RLS data. + pendingMap map[cacheKey]*backoffState // Map of pending RLS requests. + + // stateMu guards access to all LB policy state. + stateMu sync.Mutex + lbCfg *lbConfig // Most recently received service config. + childPolicyBuilder balancer.Builder // Cached child policy builder. + resolverState resolver.State // Cached resolver state. + ctrlCh *controlChannel // Control channel to the RLS server. + bg *balancergroup.BalancerGroup + childPolicies map[string]*childPolicyWrapper + defaultPolicy *childPolicyWrapper + // A reference to the most recent picker sent to gRPC as part of a state + // update is cached in this field so that we can release the reference to the + // default child policy wrapper when a new picker is created. See + // sendNewPickerLocked() for details. + lastPicker *rlsPicker + // Set during UpdateClientConnState when pushing updates to child policies. + // Prevents state updates from child policies causing new pickers to be sent + // up the channel. Cleared after all child policies have processed the + // updates sent to them, after which a new picker is sent up the channel. + inhibitPickerUpdates bool + + // Channel on which all updates are pushed. Processed in run(). + updateCh *buffer.Unbounded +} + +type resumePickerUpdates struct { + done chan struct{} +} + +// childPolicyIDAndState wraps a child policy id and its state update. +type childPolicyIDAndState struct { + id string + state balancer.State +} + +type controlChannelReady struct{} + +// run is a long-running goroutine which handles all the updates that the +// balancer wishes to handle. The appropriate updateHandler will push the update +// on to a channel that this goroutine will select on, thereby the handling of +// the update will happen asynchronously. +func (b *rlsBalancer) run() { + // We exit out of the for loop below only after `Close()` has been invoked. + // Firing the done event here will ensure that Close() returns only after + // all goroutines are done. + defer func() { b.done.Fire() }() + + // Wait for purgeDataCache() goroutine to exit before returning from here. + doneCh := make(chan struct{}) + defer func() { + <-doneCh + }() + go b.purgeDataCache(doneCh) + + for { + select { + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } + b.updateCh.Load() + switch update := u.(type) { + case childPolicyIDAndState: + b.handleChildPolicyStateUpdate(update.id, update.state) + case controlChannelReady: + b.logger.Infof("Resetting backoff state after control channel getting back to READY") + b.cacheMu.Lock() + updatePicker := b.dataCache.resetBackoffState(&backoffState{bs: defaultBackoffStrategy}) + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + resetBackoffHook() + case resumePickerUpdates: + b.stateMu.Lock() + b.logger.Infof("Resuming picker updates after config propagation to child policies") + b.inhibitPickerUpdates = false + b.sendNewPickerLocked() + close(update.done) + b.stateMu.Unlock() + default: + b.logger.Errorf("Unsupported update type %T", update) + } + case <-b.closed.Done(): + return + } + } +} + +// purgeDataCache is a long-running goroutine which periodically deletes expired +// entries. An expired entry is one for which both the expiryTime and +// backoffExpiryTime are in the past. +func (b *rlsBalancer) purgeDataCache(doneCh chan struct{}) { + defer close(doneCh) + + for { + select { + case <-b.closed.Done(): + return + case <-b.purgeTicker.C: + b.cacheMu.Lock() + updatePicker := b.dataCache.evictExpiredEntries() + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + b.dataCachePurgeHook() + } + } +} + +func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + defer clientConnUpdateHook() + + b.stateMu.Lock() + if b.closed.HasFired() { + b.stateMu.Unlock() + b.logger.Warningf("Received service config after balancer close: %s", pretty.ToJSON(ccs.BalancerConfig)) + return errBalancerClosed + } + + newCfg := ccs.BalancerConfig.(*lbConfig) + if b.lbCfg.Equal(newCfg) { + b.stateMu.Unlock() + b.logger.Infof("New service config matches existing config") + return nil + } + + b.logger.Infof("Delaying picker updates until config is propagated to and processed by child policies") + b.inhibitPickerUpdates = true + + // When the RLS server name changes, the old control channel needs to be + // swapped out for a new one. All state associated with the throttling + // algorithm is stored on a per-control-channel basis; when we swap out + // channels, we also swap out the throttling state. + b.handleControlChannelUpdate(newCfg) + + // Any changes to child policy name or configuration needs to be handled by + // either creating new child policies or pushing updates to existing ones. + b.resolverState = ccs.ResolverState + b.handleChildPolicyConfigUpdate(newCfg, &ccs) + + // Resize the cache if the size in the config has changed. + resizeCache := newCfg.cacheSizeBytes != b.lbCfg.cacheSizeBytes + + // Update the copy of the config in the LB policy before releasing the lock. + b.lbCfg = newCfg + + // Enqueue an event which will notify us when the above update has been + // propagated to all child policies, and the child policies have all + // processed their updates, and we have sent a picker update. + done := make(chan struct{}) + b.updateCh.Put(resumePickerUpdates{done: done}) + b.stateMu.Unlock() + <-done + + if resizeCache { + // If the new config changes reduces the size of the data cache, we + // might have to evict entries to get the cache size down to the newly + // specified size. + // + // And we cannot do this operation above (where we compute the + // `resizeCache` boolean) because `cacheMu` needs to be grabbed before + // `stateMu` if we are to hold both locks at the same time. + b.cacheMu.Lock() + b.dataCache.resize(newCfg.cacheSizeBytes) + b.cacheMu.Unlock() + } + return nil +} + +// handleControlChannelUpdate handles updates to service config fields which +// influence the control channel to the RLS server. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleControlChannelUpdate(newCfg *lbConfig) { + if newCfg.lookupService == b.lbCfg.lookupService && newCfg.lookupServiceTimeout == b.lbCfg.lookupServiceTimeout { + return + } + + // Create a new control channel and close the existing one. + b.logger.Infof("Creating control channel to RLS server at: %v", newCfg.lookupService) + backToReadyFn := func() { + b.updateCh.Put(controlChannelReady{}) + } + ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.controlChannelServiceConfig, newCfg.lookupServiceTimeout, b.bopts, backToReadyFn) + if err != nil { + // This is very uncommon and usually represents a non-transient error. + // There is not much we can do here other than wait for another update + // which might fix things. + b.logger.Errorf("Failed to create control channel to %q: %v", newCfg.lookupService, err) + return + } + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.ctrlCh = ctrlCh +} + +// handleChildPolicyConfigUpdate handles updates to service config fields which +// influence child policy configuration. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleChildPolicyConfigUpdate(newCfg *lbConfig, ccs *balancer.ClientConnState) { + // Update child policy builder first since other steps are dependent on this. + if b.childPolicyBuilder == nil || b.childPolicyBuilder.Name() != newCfg.childPolicyName { + b.logger.Infof("Child policy changed to %q", newCfg.childPolicyName) + b.childPolicyBuilder = balancer.Get(newCfg.childPolicyName) + for _, cpw := range b.childPolicies { + // If the child policy has changed, we need to remove the old policy + // from the BalancerGroup and add a new one. The BalancerGroup takes + // care of closing the old one in this case. + b.bg.Remove(cpw.target) + b.bg.Add(cpw.target, b.childPolicyBuilder) + } + } + + configSentToDefault := false + if b.lbCfg.defaultTarget != newCfg.defaultTarget { + // If the default target has changed, create a new childPolicyWrapper for + // the new target if required. If a new wrapper is created, add it to the + // childPolicies map and the BalancerGroup. + b.logger.Infof("Default target in LB config changing from %q to %q", b.lbCfg.defaultTarget, newCfg.defaultTarget) + cpw := b.childPolicies[newCfg.defaultTarget] + if cpw == nil { + cpw = newChildPolicyWrapper(newCfg.defaultTarget) + b.childPolicies[newCfg.defaultTarget] = cpw + b.bg.Add(newCfg.defaultTarget, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", newCfg.defaultTarget) + } + if err := b.buildAndPushChildPolicyConfigs(newCfg.defaultTarget, newCfg, ccs); err != nil { + cpw.lamify(err) + } + + // If an old default exists, release its reference. If this was the last + // reference, remove the child policy from the BalancerGroup and remove the + // corresponding entry the childPolicies map. + if b.defaultPolicy != nil { + if b.defaultPolicy.releaseRef() { + delete(b.childPolicies, b.lbCfg.defaultTarget) + b.bg.Remove(b.defaultPolicy.target) + } + } + b.defaultPolicy = cpw + configSentToDefault = true + } + + // No change in configuration affecting child policies. Return early. + if b.lbCfg.childPolicyName == newCfg.childPolicyName && b.lbCfg.childPolicyTargetField == newCfg.childPolicyTargetField && childPolicyConfigEqual(b.lbCfg.childPolicyConfig, newCfg.childPolicyConfig) { + return + } + + // If fields affecting child policy configuration have changed, the changes + // are pushed to the childPolicyWrapper which handles them appropriately. + for _, cpw := range b.childPolicies { + if configSentToDefault && cpw.target == newCfg.defaultTarget { + // Default target has already been taken care of. + continue + } + if err := b.buildAndPushChildPolicyConfigs(cpw.target, newCfg, ccs); err != nil { + cpw.lamify(err) + } + } +} + +// buildAndPushChildPolicyConfigs builds the final child policy configuration by +// adding the `targetField` to the base child policy configuration received in +// RLS LB policy configuration. The `targetField` is set to target and +// configuration is pushed to the child policy through the BalancerGroup. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) buildAndPushChildPolicyConfigs(target string, newCfg *lbConfig, ccs *balancer.ClientConnState) error { + jsonTarget, err := json.Marshal(target) + if err != nil { + return fmt.Errorf("failed to marshal child policy target %q: %v", target, err) + } + + config := newCfg.childPolicyConfig + targetField := newCfg.childPolicyTargetField + config[targetField] = jsonTarget + jsonCfg, err := json.Marshal(config) + if err != nil { + return fmt.Errorf("failed to marshal child policy config %+v: %v", config, err) + } + + parser, _ := b.childPolicyBuilder.(balancer.ConfigParser) + parsedCfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("childPolicy config parsing failed: %v", err) + } + + state := balancer.ClientConnState{ResolverState: ccs.ResolverState, BalancerConfig: parsedCfg} + b.logger.Infof("Pushing new state to child policy %q: %+v", target, state) + if err := b.bg.UpdateClientConnState(target, state); err != nil { + b.logger.Warningf("UpdateClientConnState(%q, %+v) failed : %v", target, ccs, err) + } + return nil +} + +func (b *rlsBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *rlsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.bg.UpdateSubConnState(sc, state) +} + +func (b *rlsBalancer) Close() { + b.stateMu.Lock() + b.closed.Fire() + b.purgeTicker.Stop() + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.bg.Close() + b.stateMu.Unlock() + + b.cacheMu.Lock() + b.dataCache.stop() + b.cacheMu.Unlock() + + b.updateCh.Close() + + <-b.done.Done() +} + +func (b *rlsBalancer) ExitIdle() { + b.bg.ExitIdle() +} + +// sendNewPickerLocked pushes a new picker on to the channel. +// +// Note that regardless of what connectivity state is reported, the policy will +// return its own picker, and not a picker that unconditionally queues +// (typically used for IDLE or CONNECTING) or a picker that unconditionally +// fails (typically used for TRANSIENT_FAILURE). This is required because, +// irrespective of the connectivity state, we need to able to perform RLS +// lookups for incoming RPCs and affect the status of queued RPCs based on the +// receipt of RLS responses. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) sendNewPickerLocked() { + aggregatedState := b.aggregatedConnectivityState() + + // Acquire a separate reference for the picker. This is required to ensure + // that the wrapper held by the old picker is not closed when the default + // target changes in the config, and a new wrapper is created for the new + // default target. See handleChildPolicyConfigUpdate() for how config changes + // affecting the default target are handled. + if b.defaultPolicy != nil { + b.defaultPolicy.acquireRef() + } + picker := &rlsPicker{ + kbm: b.lbCfg.kbMap, + origEndpoint: b.bopts.Target.Endpoint(), + lb: b, + defaultPolicy: b.defaultPolicy, + ctrlCh: b.ctrlCh, + maxAge: b.lbCfg.maxAge, + staleAge: b.lbCfg.staleAge, + bg: b.bg, + } + picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker)) + state := balancer.State{ + ConnectivityState: aggregatedState, + Picker: picker, + } + + if !b.inhibitPickerUpdates { + b.logger.Infof("New balancer.State: %+v", state) + b.cc.UpdateState(state) + } else { + b.logger.Infof("Delaying picker update: %+v", state) + } + + if b.lastPicker != nil { + if b.defaultPolicy != nil { + b.defaultPolicy.releaseRef() + } + } + b.lastPicker = picker +} + +func (b *rlsBalancer) sendNewPicker() { + b.stateMu.Lock() + defer b.stateMu.Unlock() + if b.closed.HasFired() { + return + } + b.sendNewPickerLocked() +} + +// The aggregated connectivity state reported is determined as follows: +// - If there is at least one child policy in state READY, the connectivity +// state is READY. +// - Otherwise, if there is at least one child policy in state CONNECTING, the +// connectivity state is CONNECTING. +// - Otherwise, if there is at least one child policy in state IDLE, the +// connectivity state is IDLE. +// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the +// connectivity state is TRANSIENT_FAILURE. +// +// If the RLS policy has no child policies and no configured default target, +// then we will report connectivity state IDLE. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) aggregatedConnectivityState() connectivity.State { + if len(b.childPolicies) == 0 && b.lbCfg.defaultTarget == "" { + return connectivity.Idle + } + + var readyN, connectingN, idleN int + for _, cpw := range b.childPolicies { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + switch state.ConnectivityState { + case connectivity.Ready: + readyN++ + case connectivity.Connecting: + connectingN++ + case connectivity.Idle: + idleN++ + } + } + + switch { + case readyN > 0: + return connectivity.Ready + case connectingN > 0: + return connectivity.Connecting + case idleN > 0: + return connectivity.Idle + default: + return connectivity.TransientFailure + } +} + +// UpdateState is a implementation of the balancergroup.BalancerStateAggregator +// interface. The actual state aggregation functionality is handled +// asynchronously. This method only pushes the state update on to channel read +// and dispatched by the run() goroutine. +func (b *rlsBalancer) UpdateState(id string, state balancer.State) { + b.updateCh.Put(childPolicyIDAndState{id: id, state: state}) +} + +// handleChildPolicyStateUpdate provides the state aggregator functionality for +// the BalancerGroup. +// +// This method is invoked by the BalancerGroup whenever a child policy sends a +// state update. We cache the child policy's connectivity state and picker for +// two reasons: +// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states +// other than READY +// - to delegate picks to child policies +func (b *rlsBalancer) handleChildPolicyStateUpdate(id string, newState balancer.State) { + b.stateMu.Lock() + defer b.stateMu.Unlock() + + cpw := b.childPolicies[id] + if cpw == nil { + // All child policies start with an entry in the map. If ID is not in + // map, it's either been removed, or never existed. + b.logger.Warningf("Received state update %+v for missing child policy %q", newState, id) + return + } + + oldState := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + if oldState.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting { + // Ignore state transitions from TRANSIENT_FAILURE to CONNECTING, and thus + // fail pending RPCs instead of queuing them indefinitely when all + // subChannels are failing, even if the subChannels are bouncing back and + // forth between CONNECTING and TRANSIENT_FAILURE. + return + } + atomic.StorePointer(&cpw.state, unsafe.Pointer(&newState)) + b.logger.Infof("Child policy %q has new state %+v", id, newState) + b.sendNewPickerLocked() +} + +// acquireChildPolicyReferences attempts to acquire references to +// childPolicyWrappers corresponding to the passed in targets. If there is no +// childPolicyWrapper corresponding to one of the targets, a new one is created +// and added to the BalancerGroup. +func (b *rlsBalancer) acquireChildPolicyReferences(targets []string) []*childPolicyWrapper { + b.stateMu.Lock() + var newChildPolicies []*childPolicyWrapper + for _, target := range targets { + // If the target exists in the LB policy's childPolicies map. a new + // reference is taken here and added to the new list. + if cpw := b.childPolicies[target]; cpw != nil { + cpw.acquireRef() + newChildPolicies = append(newChildPolicies, cpw) + continue + } + + // If the target does not exist in the child policy map, then a new + // child policy wrapper is created and added to the new list. + cpw := newChildPolicyWrapper(target) + b.childPolicies[target] = cpw + b.bg.Add(target, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", target) + newChildPolicies = append(newChildPolicies, cpw) + if err := b.buildAndPushChildPolicyConfigs(target, b.lbCfg, &balancer.ClientConnState{ + ResolverState: b.resolverState, + }); err != nil { + cpw.lamify(err) + } + } + b.stateMu.Unlock() + return newChildPolicies +} + +// releaseChildPolicyReferences releases references to childPolicyWrappers +// corresponding to the passed in targets. If the release reference was the last +// one, the child policy is removed from the BalancerGroup. +func (b *rlsBalancer) releaseChildPolicyReferences(targets []string) { + b.stateMu.Lock() + for _, target := range targets { + if cpw := b.childPolicies[target]; cpw.releaseRef() { + delete(b.childPolicies, cpw.target) + b.bg.Remove(cpw.target) + } + } + b.stateMu.Unlock() +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/cache.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/cache.go new file mode 100644 index 0000000000..d7a6a1a436 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/cache.go @@ -0,0 +1,361 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "container/list" + "time" + + "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" +) + +// cacheKey represents the key used to uniquely identify an entry in the data +// cache and in the pending requests map. +type cacheKey struct { + // path is the full path of the incoming RPC request. + path string + // keys is a stringified version of the RLS request key map built using the + // RLS keyBuilder. Since maps are not a type which is comparable in Go, it + // cannot be part of the key for another map (entries in the data cache and + // pending requests map are stored in maps). + keys string +} + +// cacheEntry wraps all the data to be stored in a data cache entry. +type cacheEntry struct { + // childPolicyWrappers contains the list of child policy wrappers + // corresponding to the targets returned by the RLS server for this entry. + childPolicyWrappers []*childPolicyWrapper + // headerData is received in the RLS response and is to be sent in the + // X-Google-RLS-Data header for matching RPCs. + headerData string + // expiryTime is the absolute time at which this cache entry entry stops + // being valid. When an RLS request succeeds, this is set to the current + // time plus the max_age field from the LB policy config. + expiryTime time.Time + // staleTime is the absolute time after which this cache entry will be + // proactively refreshed if an incoming RPC matches this entry. When an RLS + // request succeeds, this is set to the current time plus the stale_age from + // the LB policy config. + staleTime time.Time + // earliestEvictTime is the absolute time before which this entry should not + // be evicted from the cache. When a cache entry is created, this is set to + // the current time plus a default value of 5 seconds. This is required to + // make sure that a new entry added to the cache is not evicted before the + // RLS response arrives (usually when the cache is too small). + earliestEvictTime time.Time + + // status stores the RPC status of the previous RLS request for this + // entry. Picks for entries with a non-nil value for this field are failed + // with the error stored here. + status error + // backoffState contains all backoff related state. When an RLS request + // succeeds, backoffState is reset. This state moves between the data cache + // and the pending requests map. + backoffState *backoffState + // backoffTime is the absolute time at which the backoff period for this + // entry ends. When an RLS request fails, this is set to the current time + // plus the backoff value returned by the backoffState. The backoff timer is + // also setup with this value. No new RLS requests are sent out for this + // entry until the backoff period ends. + // + // Set to zero time instant upon a successful RLS response. + backoffTime time.Time + // backoffExpiryTime is the absolute time at which an entry which has gone + // through backoff stops being valid. When an RLS request fails, this is + // set to the current time plus twice the backoff time. The cache expiry + // timer will only delete entries for which both expiryTime and + // backoffExpiryTime are in the past. + // + // Set to zero time instant upon a successful RLS response. + backoffExpiryTime time.Time + + // size stores the size of this cache entry. Used to enforce the cache size + // specified in the LB policy configuration. + size int64 +} + +// backoffState wraps all backoff related state associated with a cache entry. +type backoffState struct { + // retries keeps track of the number of RLS failures, to be able to + // determine the amount of time to backoff before the next attempt. + retries int + // bs is the exponential backoff implementation which returns the amount of + // time to backoff, given the number of retries. + bs backoff.Strategy + // timer fires when the backoff period ends and incoming requests after this + // will trigger a new RLS request. + timer *time.Timer +} + +// lru is a cache implementation with a least recently used eviction policy. +// Internally it uses a doubly linked list, with the least recently used element +// at the front of the list and the most recently used element at the back of +// the list. The value stored in this cache will be of type `cacheKey`. +// +// It is not safe for concurrent access. +type lru struct { + ll *list.List + + // A map from the value stored in the lru to its underlying list element is + // maintained to have a clean API. Without this, a subset of the lru's API + // would accept/return cacheKey while another subset would accept/return + // list elements. + m map[cacheKey]*list.Element +} + +// newLRU creates a new cache with a least recently used eviction policy. +func newLRU() *lru { + return &lru{ + ll: list.New(), + m: make(map[cacheKey]*list.Element), + } +} + +func (l *lru) addEntry(key cacheKey) { + e := l.ll.PushBack(key) + l.m[key] = e +} + +func (l *lru) makeRecent(key cacheKey) { + e := l.m[key] + l.ll.MoveToBack(e) +} + +func (l *lru) removeEntry(key cacheKey) { + e := l.m[key] + l.ll.Remove(e) + delete(l.m, key) +} + +func (l *lru) getLeastRecentlyUsed() cacheKey { + e := l.ll.Front() + if e == nil { + return cacheKey{} + } + return e.Value.(cacheKey) +} + +// dataCache contains a cache of RLS data used by the LB policy to make routing +// decisions. +// +// The dataCache will be keyed by the request's path and keys, represented by +// the `cacheKey` type. It will maintain the cache keys in an `lru` and the +// cache data, represented by the `cacheEntry` type, in a native map. +// +// It is not safe for concurrent access. +type dataCache struct { + maxSize int64 // Maximum allowed size. + currentSize int64 // Current size. + keys *lru // Cache keys maintained in lru order. + entries map[cacheKey]*cacheEntry + logger *internalgrpclog.PrefixLogger + shutdown *grpcsync.Event +} + +func newDataCache(size int64, logger *internalgrpclog.PrefixLogger) *dataCache { + return &dataCache{ + maxSize: size, + keys: newLRU(), + entries: make(map[cacheKey]*cacheEntry), + logger: logger, + shutdown: grpcsync.NewEvent(), + } +} + +// resize changes the maximum allowed size of the data cache. +// +// The return value indicates if an entry with a valid backoff timer was +// evicted. This is important to the RLS LB policy which would send a new picker +// on the channel to re-process any RPCs queued as a result of this backoff +// timer. +func (dc *dataCache) resize(size int64) (backoffCancelled bool) { + if dc.shutdown.HasFired() { + return false + } + + backoffCancelled = false + for dc.currentSize > size { + key := dc.keys.getLeastRecentlyUsed() + entry, ok := dc.entries[key] + if !ok { + // This should never happen. + dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to resize it", key) + break + } + + // When we encounter a cache entry whose minimum expiration time is in + // the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases + // where the cache is too small, when we receive an RLS Response, we + // keep the resulting cache entry around long enough for the pending + // incoming requests to be re-processed through the new Picker. If we + // didn't do this, then we'd risk throwing away each RLS response as we + // receive it, in which case we would fail to actually route any of our + // incoming requests. + if entry.earliestEvictTime.After(time.Now()) { + dc.logger.Warningf("cachekey %+v is too recent to be evicted. Stopping cache resizing for now", key) + break + } + + // Stop the backoff timer before evicting the entry. + if entry.backoffState != nil && entry.backoffState.timer != nil { + if entry.backoffState.timer.Stop() { + entry.backoffState.timer = nil + backoffCancelled = true + } + } + dc.deleteAndcleanup(key, entry) + } + dc.maxSize = size + return backoffCancelled +} + +// evictExpiredEntries sweeps through the cache and deletes expired entries. An +// expired entry is one for which both the `expiryTime` and `backoffExpiryTime` +// fields are in the past. +// +// The return value indicates if any expired entries were evicted. +// +// The LB policy invokes this method periodically to purge expired entries. +func (dc *dataCache) evictExpiredEntries() bool { + if dc.shutdown.HasFired() { + return false + } + + evicted := false + for key, entry := range dc.entries { + // Only evict entries for which both the data expiration time and + // backoff expiration time fields are in the past. + now := time.Now() + if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { + continue + } + dc.deleteAndcleanup(key, entry) + evicted = true + } + return evicted +} + +// resetBackoffState sweeps through the cache and for entries with a backoff +// state, the backoff timer is cancelled and the backoff state is reset. The +// return value indicates if any entries were mutated in this fashion. +// +// The LB policy invokes this method when the control channel moves from READY +// to TRANSIENT_FAILURE back to READY. See `monitorConnectivityState` method on +// the `controlChannel` type for more details. +func (dc *dataCache) resetBackoffState(newBackoffState *backoffState) bool { + if dc.shutdown.HasFired() { + return false + } + + backoffReset := false + for _, entry := range dc.entries { + if entry.backoffState == nil { + continue + } + if entry.backoffState.timer != nil { + entry.backoffState.timer.Stop() + entry.backoffState.timer = nil + } + entry.backoffState = &backoffState{bs: newBackoffState.bs} + entry.backoffTime = time.Time{} + entry.backoffExpiryTime = time.Time{} + backoffReset = true + } + return backoffReset +} + +// addEntry adds a cache entry for the given key. +// +// Return value backoffCancelled indicates if a cache entry with a valid backoff +// timer was evicted to make space for the current entry. This is important to +// the RLS LB policy which would send a new picker on the channel to re-process +// any RPCs queued as a result of this backoff timer. +// +// Return value ok indicates if entry was successfully added to the cache. +func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled bool, ok bool) { + if dc.shutdown.HasFired() { + return false, false + } + + // Handle the extremely unlikely case that a single entry is bigger than the + // size of the cache. + if entry.size > dc.maxSize { + return false, false + } + dc.entries[key] = entry + dc.currentSize += entry.size + dc.keys.addEntry(key) + // If the new entry makes the cache go over its configured size, remove some + // old entries. + if dc.currentSize > dc.maxSize { + backoffCancelled = dc.resize(dc.maxSize) + } + return backoffCancelled, true +} + +// updateEntrySize updates the size of a cache entry and the current size of the +// data cache. An entry's size can change upon receipt of an RLS response. +func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) { + dc.currentSize -= entry.size + entry.size = newSize + dc.currentSize += entry.size +} + +func (dc *dataCache) getEntry(key cacheKey) *cacheEntry { + if dc.shutdown.HasFired() { + return nil + } + + entry, ok := dc.entries[key] + if !ok { + return nil + } + dc.keys.makeRecent(key) + return entry +} + +func (dc *dataCache) removeEntryForTesting(key cacheKey) { + entry, ok := dc.entries[key] + if !ok { + return + } + dc.deleteAndcleanup(key, entry) +} + +// deleteAndCleanup performs actions required at the time of deleting an entry +// from the data cache. +// - the entry is removed from the map of entries +// - current size of the data cache is update +// - the key is removed from the LRU +func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { + delete(dc.entries, key) + dc.currentSize -= entry.size + dc.keys.removeEntry(key) +} + +func (dc *dataCache) stop() { + for key, entry := range dc.entries { + dc.deleteAndcleanup(key, entry) + } + dc.shutdown.Fire() +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/child_policy.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/child_policy.go new file mode 100644 index 0000000000..c74184cac2 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/child_policy.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "fmt" + "sync/atomic" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +// childPolicyWrapper is a reference counted wrapper around a child policy. +// +// The LB policy maintains a map of these wrappers keyed by the target returned +// by RLS. When a target is seen for the first time, a child policy wrapper is +// created for it and the wrapper is added to the child policy map. Each entry +// in the data cache holds references to the corresponding child policy +// wrappers. The LB policy also holds a reference to the child policy wrapper +// for the default target specified in the LB Policy Configuration +// +// When a cache entry is evicted, it releases references to the child policy +// wrappers that it contains. When all references have been released, the +// wrapper is removed from the child policy map and is destroyed. +// +// The child policy wrapper also caches the connectivity state and most recent +// picker from the child policy. Once the child policy wrapper reports +// TRANSIENT_FAILURE, it will continue reporting that state until it goes READY; +// transitions from TRANSIENT_FAILURE to CONNECTING are ignored. +// +// Whenever a child policy wrapper changes its connectivity state, the LB policy +// returns a new picker to the channel, since the channel may need to re-process +// the picks for queued RPCs. +// +// It is not safe for concurrent access. +type childPolicyWrapper struct { + logger *internalgrpclog.PrefixLogger + target string // RLS target corresponding to this child policy. + refCnt int // Reference count. + + // Balancer state reported by the child policy. The RLS LB policy maintains + // these child policies in a BalancerGroup. The state reported by the child + // policy is pushed to the state aggregator (which is also implemented by the + // RLS LB policy) and cached here. See handleChildPolicyStateUpdate() for + // details on how the state aggregation is performed. + // + // While this field is written to by the LB policy, it is read by the picker + // at Pick time. Making this an atomic to enable the picker to read this value + // without a mutex. + state unsafe.Pointer // *balancer.State +} + +// newChildPolicyWrapper creates a child policy wrapper for the given target, +// and is initialized with one reference and starts off in CONNECTING state. +func newChildPolicyWrapper(target string) *childPolicyWrapper { + c := &childPolicyWrapper{ + target: target, + refCnt: 1, + state: unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }), + } + c.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-child-policy-wrapper %s %p] ", c.target, c)) + c.logger.Infof("Created") + return c +} + +// acquireRef increments the reference count on the child policy wrapper. +func (c *childPolicyWrapper) acquireRef() { + c.refCnt++ +} + +// releaseRef decrements the reference count on the child policy wrapper. The +// return value indicates whether the released reference was the last one. +func (c *childPolicyWrapper) releaseRef() bool { + c.refCnt-- + return c.refCnt == 0 +} + +// lamify causes the child policy wrapper to return a picker which will always +// fail requests. This is used when the wrapper runs into errors when trying to +// build and parse the child policy configuration. +func (c *childPolicyWrapper) lamify(err error) { + c.logger.Warningf("Entering lame mode: %v", err) + atomic.StorePointer(&c.state, unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + })) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/config.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/config.go new file mode 100644 index 0000000000..77b6bdcd1c --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/config.go @@ -0,0 +1,312 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + "time" + + "github.com/golang/protobuf/ptypes" + durationpb "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/pretty" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/protobuf/encoding/protojson" +) + +const ( + // Default max_age if not specified (or greater than this value) in the + // service config. + maxMaxAge = 5 * time.Minute + // Upper limit for cache_size since we don't fully trust the service config. + maxCacheSize = 5 * 1024 * 1024 * 8 // 5MB in bytes + // Default lookup_service_timeout if not specified in the service config. + defaultLookupServiceTimeout = 10 * time.Second + // Default value for targetNameField in the child policy config during + // service config validation. + dummyChildPolicyTarget = "target_name_to_be_filled_in_later" +) + +// lbConfig is the internal representation of the RLS LB policy's config. +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + cacheSizeBytes int64 // Keep this field 64-bit aligned. + kbMap keys.BuilderMap + lookupService string + lookupServiceTimeout time.Duration + maxAge time.Duration + staleAge time.Duration + defaultTarget string + + childPolicyName string + childPolicyConfig map[string]json.RawMessage + childPolicyTargetField string + controlChannelServiceConfig string +} + +func (lbCfg *lbConfig) Equal(other *lbConfig) bool { + return lbCfg.kbMap.Equal(other.kbMap) && + lbCfg.lookupService == other.lookupService && + lbCfg.lookupServiceTimeout == other.lookupServiceTimeout && + lbCfg.maxAge == other.maxAge && + lbCfg.staleAge == other.staleAge && + lbCfg.cacheSizeBytes == other.cacheSizeBytes && + lbCfg.defaultTarget == other.defaultTarget && + lbCfg.childPolicyName == other.childPolicyName && + lbCfg.childPolicyTargetField == other.childPolicyTargetField && + lbCfg.controlChannelServiceConfig == other.controlChannelServiceConfig && + childPolicyConfigEqual(lbCfg.childPolicyConfig, other.childPolicyConfig) +} + +func childPolicyConfigEqual(a, b map[string]json.RawMessage) bool { + if (b == nil) != (a == nil) { + return false + } + if len(b) != len(a) { + return false + } + for k, jsonA := range a { + jsonB, ok := b[k] + if !ok { + return false + } + if !bytes.Equal(jsonA, jsonB) { + return false + } + } + return true +} + +// This struct resembles the JSON representation of the loadBalancing config +// and makes it easier to unmarshal. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage + RouteLookupChannelServiceConfig json.RawMessage + ChildPolicy []map[string]json.RawMessage + ChildPolicyConfigTargetFieldName string +} + +// ParseConfig parses the JSON load balancer config provided into an +// internal form or returns an error if the config is invalid. +// +// When parsing a config update, the following validations are performed: +// - routeLookupConfig: +// - grpc_keybuilders field: +// - must have at least one entry +// - must not have two entries with the same `Name` +// - within each entry: +// - must have at least one `Name` +// - must not have a `Name` with the `service` field unset or empty +// - within each `headers` entry: +// - must not have `required_match` set +// - must not have `key` unset or empty +// - across all `headers`, `constant_keys` and `extra_keys` fields: +// - must not have the same `key` specified twice +// - no `key` must be the empty string +// - `lookup_service` field must be set and must parse as a target URI +// - if `max_age` > 5m, it should be set to 5 minutes +// - if `stale_age` > `max_age`, ignore it +// - if `stale_age` is set, then `max_age` must also be set +// - ignore `valid_targets` field +// - `cache_size_bytes` field must have a value greater than 0, and if its +// value is greater than 5M, we cap it at 5M +// +// - routeLookupChannelServiceConfig: +// - if specified, must parse as valid service config +// +// - childPolicy: +// - must find a valid child policy with a valid config +// +// - childPolicyConfigTargetFieldName: +// - must be set and non-empty +func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) + cfgJSON := &lbConfigJSON{} + if err := json.Unmarshal(c, cfgJSON); err != nil { + return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err) + } + + m := protojson.UnmarshalOptions{DiscardUnknown: true} + rlsProto := &rlspb.RouteLookupConfig{} + if err := m.Unmarshal(cfgJSON.RouteLookupConfig, rlsProto); err != nil { + return nil, fmt.Errorf("rls: bad RouteLookupConfig proto %+v: %v", string(cfgJSON.RouteLookupConfig), err) + } + lbCfg, err := parseRLSProto(rlsProto) + if err != nil { + return nil, err + } + + if sc := string(cfgJSON.RouteLookupChannelServiceConfig); sc != "" { + parsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(sc) + if parsed.Err != nil { + return nil, fmt.Errorf("rls: bad control channel service config %q: %v", sc, parsed.Err) + } + lbCfg.controlChannelServiceConfig = sc + } + + if cfgJSON.ChildPolicyConfigTargetFieldName == "" { + return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config %+v", string(c)) + } + name, config, err := parseChildPolicyConfigs(cfgJSON.ChildPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) + if err != nil { + return nil, err + } + lbCfg.childPolicyName = name + lbCfg.childPolicyConfig = config + lbCfg.childPolicyTargetField = cfgJSON.ChildPolicyConfigTargetFieldName + return lbCfg, nil +} + +func parseRLSProto(rlsProto *rlspb.RouteLookupConfig) (*lbConfig, error) { + // Validations specified on the `grpc_keybuilders` field are performed here. + kbMap, err := keys.MakeBuilderMap(rlsProto) + if err != nil { + return nil, err + } + + // `lookup_service` field must be set and must parse as a target URI. + lookupService := rlsProto.GetLookupService() + if lookupService == "" { + return nil, fmt.Errorf("rls: empty lookup_service in route lookup config %+v", rlsProto) + } + parsedTarget, err := url.Parse(lookupService) + if err != nil { + // url.Parse() fails if scheme is missing. Retry with default scheme. + parsedTarget, err = url.Parse(resolver.GetDefaultScheme() + ":///" + lookupService) + if err != nil { + return nil, fmt.Errorf("rls: invalid target URI in lookup_service %s", lookupService) + } + } + if parsedTarget.Scheme == "" { + parsedTarget.Scheme = resolver.GetDefaultScheme() + } + if resolver.Get(parsedTarget.Scheme) == nil { + return nil, fmt.Errorf("rls: unregistered scheme in lookup_service %s", lookupService) + } + + lookupServiceTimeout, err := convertDuration(rlsProto.GetLookupServiceTimeout()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in route lookup config %+v: %v", rlsProto, err) + } + if lookupServiceTimeout == 0 { + lookupServiceTimeout = defaultLookupServiceTimeout + } + + // Validations performed here: + // - if `max_age` > 5m, it should be set to 5 minutes + // - if `stale_age` > `max_age`, ignore it + // - if `stale_age` is set, then `max_age` must also be set + maxAge, err := convertDuration(rlsProto.GetMaxAge()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse max_age in route lookup config %+v: %v", rlsProto, err) + } + staleAge, err := convertDuration(rlsProto.GetStaleAge()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse staleAge in route lookup config %+v: %v", rlsProto, err) + } + if staleAge != 0 && maxAge == 0 { + return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in route lookup config %+v", rlsProto) + } + if staleAge >= maxAge { + logger.Infof("rls: stale_age %v is not less than max_age %v, ignoring it", staleAge, maxAge) + staleAge = 0 + } + if maxAge == 0 || maxAge > maxMaxAge { + logger.Infof("rls: max_age in route lookup config is %v, using %v", maxAge, maxMaxAge) + maxAge = maxMaxAge + } + + // `cache_size_bytes` field must have a value greater than 0, and if its + // value is greater than 5M, we cap it at 5M + cacheSizeBytes := rlsProto.GetCacheSizeBytes() + if cacheSizeBytes <= 0 { + return nil, fmt.Errorf("rls: cache_size_bytes must be set to a non-zero value: %+v", rlsProto) + } + if cacheSizeBytes > maxCacheSize { + logger.Info("rls: cache_size_bytes %v is too large, setting it to: %v", cacheSizeBytes, maxCacheSize) + cacheSizeBytes = maxCacheSize + } + return &lbConfig{ + kbMap: kbMap, + lookupService: lookupService, + lookupServiceTimeout: lookupServiceTimeout, + maxAge: maxAge, + staleAge: staleAge, + cacheSizeBytes: cacheSizeBytes, + defaultTarget: rlsProto.GetDefaultTarget(), + }, nil +} + +// parseChildPolicyConfigs iterates through the list of child policies and picks +// the first registered policy and validates its config. +func parseChildPolicyConfigs(childPolicies []map[string]json.RawMessage, targetFieldName string) (string, map[string]json.RawMessage, error) { + for i, config := range childPolicies { + if len(config) != 1 { + return "", nil, fmt.Errorf("rls: invalid childPolicy: entry %v does not contain exactly 1 policy/config pair: %q", i, config) + } + + var name string + var rawCfg json.RawMessage + for name, rawCfg = range config { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + parser, ok := builder.(balancer.ConfigParser) + if !ok { + return "", nil, fmt.Errorf("rls: childPolicy %q with config %q does not support config parsing", name, string(rawCfg)) + } + + // To validate child policy configs we do the following: + // - unmarshal the raw JSON bytes of the child policy config into a map + // - add an entry with key set to `target_field_name` and a dummy value + // - marshal the map back to JSON and parse the config using the parser + // retrieved previously + var childConfig map[string]json.RawMessage + if err := json.Unmarshal(rawCfg, &childConfig); err != nil { + return "", nil, fmt.Errorf("rls: json unmarshal failed for child policy config %q: %v", string(rawCfg), err) + } + childConfig[targetFieldName], _ = json.Marshal(dummyChildPolicyTarget) + jsonCfg, err := json.Marshal(childConfig) + if err != nil { + return "", nil, fmt.Errorf("rls: json marshal failed for child policy config {%+v}: %v", childConfig, err) + } + if _, err := parser.ParseConfig(jsonCfg); err != nil { + return "", nil, fmt.Errorf("rls: childPolicy config validation failed: %v", err) + } + return name, childConfig, nil + } + return "", nil, fmt.Errorf("rls: invalid childPolicy config: no supported policies found in %+v", childPolicies) +} + +func convertDuration(d *durationpb.Duration) (time.Duration, error) { + if d == nil { + return 0, nil + } + return ptypes.Duration(d) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/control_channel.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/control_channel.go new file mode 100644 index 0000000000..4acc11d90e --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/control_channel.go @@ -0,0 +1,220 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/adaptive" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" +) + +var newAdaptiveThrottler = func() adaptiveThrottler { return adaptive.New() } + +type adaptiveThrottler interface { + ShouldThrottle() bool + RegisterBackendResponse(throttled bool) +} + +// controlChannel is a wrapper around the gRPC channel to the RLS server +// specified in the service config. +type controlChannel struct { + // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB + // policy receives this value in its service config. + rpcTimeout time.Duration + // backToReadyFunc is a callback to be invoked when the connectivity state + // changes from READY --> TRANSIENT_FAILURE --> READY. + backToReadyFunc func() + // throttler in an adaptive throttling implementation used to avoid + // hammering the RLS service while it is overloaded or down. + throttler adaptiveThrottler + + cc *grpc.ClientConn + client rlsgrpc.RouteLookupServiceClient + logger *internalgrpclog.PrefixLogger +} + +// newControlChannel creates a controlChannel to rlsServerName and uses +// serviceConfig, if non-empty, as the default service config for the underlying +// gRPC channel. +func newControlChannel(rlsServerName, serviceConfig string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyFunc func()) (*controlChannel, error) { + ctrlCh := &controlChannel{ + rpcTimeout: rpcTimeout, + backToReadyFunc: backToReadyFunc, + throttler: newAdaptiveThrottler(), + } + ctrlCh.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-control-channel %p] ", ctrlCh)) + + dopts, err := ctrlCh.dialOpts(bOpts, serviceConfig) + if err != nil { + return nil, err + } + ctrlCh.cc, err = grpc.Dial(rlsServerName, dopts...) + if err != nil { + return nil, err + } + ctrlCh.client = rlsgrpc.NewRouteLookupServiceClient(ctrlCh.cc) + ctrlCh.logger.Infof("Control channel created to RLS server at: %v", rlsServerName) + + go ctrlCh.monitorConnectivityState() + return ctrlCh, nil +} + +// dialOpts constructs the dial options for the control plane channel. +func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions, serviceConfig string) ([]grpc.DialOption, error) { + // The control plane channel will use the same authority as the parent + // channel for server authorization. This ensures that the identity of the + // RLS server and the identity of the backends is the same, so if the RLS + // config is injected by an attacker, it cannot cause leakage of private + // information contained in headers set by the application. + dopts := []grpc.DialOption{grpc.WithAuthority(bOpts.Authority)} + if bOpts.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(bOpts.Dialer)) + } + + // The control channel will use the channel credentials from the parent + // channel, including any call creds associated with the channel creds. + var credsOpt grpc.DialOption + switch { + case bOpts.DialCreds != nil: + credsOpt = grpc.WithTransportCredentials(bOpts.DialCreds.Clone()) + case bOpts.CredsBundle != nil: + // The "fallback" mode in google default credentials (which is the only + // type of credentials we expect to be used with RLS) uses TLS/ALTS + // creds for transport and uses the same call creds as that on the + // parent bundle. + bundle, err := bOpts.CredsBundle.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + return nil, err + } + credsOpt = grpc.WithCredentialsBundle(bundle) + default: + cc.logger.Warningf("no credentials available, using Insecure") + credsOpt = grpc.WithTransportCredentials(insecure.NewCredentials()) + } + dopts = append(dopts, credsOpt) + + // If the RLS LB policy's configuration specified a service config for the + // control channel, use that and disable service config fetching via the name + // resolver for the control channel. + if serviceConfig != "" { + cc.logger.Infof("Disabling service config from the name resolver and instead using: %s", serviceConfig) + dopts = append(dopts, grpc.WithDisableServiceConfig(), grpc.WithDefaultServiceConfig(serviceConfig)) + } + + return dopts, nil +} + +func (cc *controlChannel) monitorConnectivityState() { + cc.logger.Infof("Starting connectivity state monitoring goroutine") + // Since we use two mechanisms to deal with RLS server being down: + // - adaptive throttling for the channel as a whole + // - exponential backoff on a per-request basis + // we need a way to avoid double-penalizing requests by counting failures + // toward both mechanisms when the RLS server is unreachable. + // + // To accomplish this, we monitor the state of the control plane channel. If + // the state has been TRANSIENT_FAILURE since the last time it was in state + // READY, and it then transitions into state READY, we push on a channel + // which is being read by the LB policy. + // + // The LB the policy will iterate through the cache to reset the backoff + // timeouts in all cache entries. Specifically, this means that it will + // reset the backoff state and cancel the pending backoff timer. Note that + // when cancelling the backoff timer, just like when the backoff timer fires + // normally, a new picker is returned to the channel, to force it to + // re-process any wait-for-ready RPCs that may still be queued if we failed + // them while we were in backoff. However, we should optimize this case by + // returning only one new picker, regardless of how many backoff timers are + // cancelled. + + // Using the background context is fine here since we check for the ClientConn + // entering SHUTDOWN and return early in that case. + ctx := context.Background() + + first := true + for { + // Wait for the control channel to become READY. + for s := cc.cc.GetState(); s != connectivity.Ready; s = cc.cc.GetState() { + if s == connectivity.Shutdown { + return + } + cc.cc.WaitForStateChange(ctx, s) + } + cc.logger.Infof("Connectivity state is READY") + + if !first { + cc.logger.Infof("Control channel back to READY") + cc.backToReadyFunc() + } + first = false + + // Wait for the control channel to move out of READY. + cc.cc.WaitForStateChange(ctx, connectivity.Ready) + if cc.cc.GetState() == connectivity.Shutdown { + return + } + cc.logger.Infof("Connectivity state is %s", cc.cc.GetState()) + } +} + +func (cc *controlChannel) close() { + cc.logger.Infof("Closing control channel") + cc.cc.Close() +} + +type lookupCallback func(targets []string, headerData string, err error) + +// lookup starts a RouteLookup RPC in a separate goroutine and returns the +// results (and error, if any) in the provided callback. +// +// The returned boolean indicates whether the request was throttled by the +// client-side adaptive throttling algorithm in which case the provided callback +// will not be invoked. +func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string, cb lookupCallback) (throttled bool) { + if cc.throttler.ShouldThrottle() { + cc.logger.Infof("RLS request throttled by client-side adaptive throttling") + return true + } + go func() { + req := &rlspb.RouteLookupRequest{ + TargetType: "grpc", + KeyMap: reqKeys, + Reason: reason, + StaleHeaderData: staleHeaders, + } + cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) + + ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout) + defer cancel() + resp, err := cc.client.RouteLookup(ctx, req) + cb(resp.GetTargets(), resp.GetHeaderData(), err) + }() + return false +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go new file mode 100644 index 0000000000..a3b0931b29 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package adaptive provides functionality for adaptive client-side throttling. +package adaptive + +import ( + "sync" + "time" + + "google.golang.org/grpc/internal/grpcrand" +) + +// For overriding in unittests. +var ( + timeNowFunc = func() time.Time { return time.Now() } + randFunc = func() float64 { return grpcrand.Float64() } +) + +const ( + defaultDuration = 30 * time.Second + defaultBins = 100 + defaultRatioForAccepts = 2.0 + defaultRequestsPadding = 8.0 +) + +// Throttler implements a client-side throttling recommendation system. All +// methods are safe for concurrent use by multiple goroutines. +// +// The throttler has the following knobs for which we will use defaults for +// now. If there is a need to make them configurable at a later point in time, +// support for the same will be added. +// - Duration: amount of recent history that will be taken into account for +// making client-side throttling decisions. A default of 30 seconds is used. +// - Bins: number of bins to be used for bucketing historical data. A default +// of 100 is used. +// - RatioForAccepts: ratio by which accepts are multiplied, typically a value +// slightly larger than 1.0. This is used to make the throttler behave as if +// the backend had accepted more requests than it actually has, which lets us +// err on the side of sending to the backend more requests than we think it +// will accept for the sake of speeding up the propagation of state. A +// default of 2.0 is used. +// - RequestsPadding: is used to decrease the (client-side) throttling +// probability in the low QPS regime (to speed up propagation of state), as +// well as to safeguard against hitting a client-side throttling probability +// of 100%. The weight of this value decreases as the number of requests in +// recent history grows. A default of 8 is used. +// +// The adaptive throttler attempts to estimate the probability that a request +// will be throttled using recent history. Server requests (both throttled and +// accepted) are registered with the throttler (via the RegisterBackendResponse +// method), which then recommends client-side throttling (via the +// ShouldThrottle method) with probability given by: +// (requests - RatioForAccepts * accepts) / (requests + RequestsPadding) +type Throttler struct { + ratioForAccepts float64 + requestsPadding float64 + + // Number of total accepts and throttles in the lookback period. + mu sync.Mutex + accepts *lookback + throttles *lookback +} + +// New initializes a new adaptive throttler with the default values. +func New() *Throttler { + return newWithArgs(defaultDuration, defaultBins, defaultRatioForAccepts, defaultRequestsPadding) +} + +// newWithArgs initializes a new adaptive throttler with the provided values. +// Used only in unittests. +func newWithArgs(duration time.Duration, bins int64, ratioForAccepts, requestsPadding float64) *Throttler { + return &Throttler{ + ratioForAccepts: ratioForAccepts, + requestsPadding: requestsPadding, + accepts: newLookback(bins, duration), + throttles: newLookback(bins, duration), + } +} + +// ShouldThrottle returns a probabilistic estimate of whether the server would +// throttle the next request. This should be called for every request before +// allowing it to hit the network. If the returned value is true, the request +// should be aborted immediately (as if it had been throttled by the server). +func (t *Throttler) ShouldThrottle() bool { + randomProbability := randFunc() + now := timeNowFunc() + + t.mu.Lock() + defer t.mu.Unlock() + + accepts, throttles := float64(t.accepts.sum(now)), float64(t.throttles.sum(now)) + requests := accepts + throttles + throttleProbability := (requests - t.ratioForAccepts*accepts) / (requests + t.requestsPadding) + if throttleProbability <= randomProbability { + return false + } + + t.throttles.add(now, 1) + return true +} + +// RegisterBackendResponse registers a response received from the backend for a +// request allowed by ShouldThrottle. This should be called for every response +// received from the backend (i.e., once for each request for which +// ShouldThrottle returned false). +func (t *Throttler) RegisterBackendResponse(throttled bool) { + now := timeNowFunc() + + t.mu.Lock() + if throttled { + t.throttles.add(now, 1) + } else { + t.accepts.add(now, 1) + } + t.mu.Unlock() +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go new file mode 100644 index 0000000000..13b316b7fa --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go @@ -0,0 +1,91 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package adaptive + +import "time" + +// lookback implements a moving sum over an int64 timeline. +type lookback struct { + bins int64 // Number of bins to use for lookback. + width time.Duration // Width of each bin. + + head int64 // Absolute bin index (time * bins / duration) of the current head bin. + total int64 // Sum over all the values in buf, within the lookback window behind head. + buf []int64 // Ring buffer for keeping track of the sum elements. +} + +// newLookback creates a new lookback for the given duration with a set number +// of bins. +func newLookback(bins int64, duration time.Duration) *lookback { + return &lookback{ + bins: bins, + width: duration / time.Duration(bins), + buf: make([]int64, bins), + } +} + +// add is used to increment the lookback sum. +func (l *lookback) add(t time.Time, v int64) { + pos := l.advance(t) + + if (l.head - pos) >= l.bins { + // Do not increment counters if pos is more than bins behind head. + return + } + l.buf[pos%l.bins] += v + l.total += v +} + +// sum returns the sum of the lookback buffer at the given time or head, +// whichever is greater. +func (l *lookback) sum(t time.Time) int64 { + l.advance(t) + return l.total +} + +// advance prepares the lookback buffer for calls to add() or sum() at time t. +// If head is greater than t then the lookback buffer will be untouched. The +// absolute bin index corresponding to t is returned. It will always be less +// than or equal to head. +func (l *lookback) advance(t time.Time) int64 { + ch := l.head // Current head bin index. + nh := t.UnixNano() / l.width.Nanoseconds() // New head bin index. + + if nh <= ch { + // Either head unchanged or clock jitter (time has moved backwards). Do + // not advance. + return nh + } + + jmax := min(l.bins, nh-ch) + for j := int64(0); j < jmax; j++ { + i := (ch + j + 1) % l.bins + l.total -= l.buf[i] + l.buf[i] = 0 + } + l.head = nh + return nh +} + +func min(x int64, y int64) int64 { + if x < y { + return x + } + return y +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go new file mode 100644 index 0000000000..d010f74456 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go @@ -0,0 +1,267 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keys provides functionality required to build RLS request keys. +package keys + +import ( + "errors" + "fmt" + "sort" + "strings" + + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" +) + +// BuilderMap maps from request path to the key builder for that path. +type BuilderMap map[string]builder + +// MakeBuilderMap parses the provided RouteLookupConfig proto and returns a map +// from paths to key builders. +func MakeBuilderMap(cfg *rlspb.RouteLookupConfig) (BuilderMap, error) { + kbs := cfg.GetGrpcKeybuilders() + if len(kbs) == 0 { + return nil, errors.New("rls: RouteLookupConfig does not contain any GrpcKeyBuilder") + } + + bm := make(map[string]builder) + for _, kb := range kbs { + // Extract keys from `headers`, `constant_keys` and `extra_keys` fields + // and populate appropriate values in the builder struct. Also ensure + // that keys are not repeated. + var matchers []matcher + seenKeys := make(map[string]bool) + constantKeys := kb.GetConstantKeys() + for k := range kb.GetConstantKeys() { + seenKeys[k] = true + } + for _, h := range kb.GetHeaders() { + if h.GetRequiredMatch() { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set {%+v}", kbs) + } + key := h.GetKey() + if seenKeys[key] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q across headers, constant_keys and extra_keys {%+v}", key, kbs) + } + seenKeys[key] = true + matchers = append(matchers, matcher{key: h.GetKey(), names: h.GetNames()}) + } + if seenKeys[kb.GetExtraKeys().GetHost()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetHost(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetService()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetService(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetMethod()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetMethod(), kbs) + } + b := builder{ + headerKeys: matchers, + constantKeys: constantKeys, + hostKey: kb.GetExtraKeys().GetHost(), + serviceKey: kb.GetExtraKeys().GetService(), + methodKey: kb.GetExtraKeys().GetMethod(), + } + + // Store the builder created above in the BuilderMap based on the value + // of the `Names` field, which wraps incoming request's service and + // method. Also, ensure that there are no repeated `Names` field. + names := kb.GetNames() + if len(names) == 0 { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig does not contain any Name {%+v}", kbs) + } + for _, name := range names { + if name.GetService() == "" { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains a Name field with no Service {%+v}", kbs) + } + if strings.Contains(name.GetMethod(), `/`) { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains a method with a slash {%+v}", kbs) + } + path := "/" + name.GetService() + "/" + name.GetMethod() + if _, ok := bm[path]; ok { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Name field {%+v}", kbs) + } + bm[path] = b + } + } + return bm, nil +} + +// KeyMap represents the RLS keys to be used for a request. +type KeyMap struct { + // Map is the representation of an RLS key as a Go map. This is used when + // an actual RLS request is to be sent out on the wire, since the + // RouteLookupRequest proto expects a Go map. + Map map[string]string + // Str is the representation of an RLS key as a string, sorted by keys. + // Since the RLS keys are part of the cache key in the request cache + // maintained by the RLS balancer, and Go maps cannot be used as keys for + // Go maps (the cache is implemented as a map), we need a stringified + // version of it. + Str string +} + +// RLSKey builds the RLS keys to be used for the given request, identified by +// the request path and the request headers stored in metadata. +func (bm BuilderMap) RLSKey(md metadata.MD, host, path string) KeyMap { + // The path passed in is of the form "/service/method". The keyBuilderMap is + // indexed with keys of the form "/service/" or "/service/method". The service + // that we set in the keyMap (to be sent out in the RLS request) should not + // include any slashes though. + i := strings.LastIndex(path, "/") + service, method := path[:i+1], path[i+1:] + b, ok := bm[path] + if !ok { + b, ok = bm[service] + if !ok { + return KeyMap{} + } + } + + kvMap := b.buildHeaderKeys(md) + if b.hostKey != "" { + kvMap[b.hostKey] = host + } + if b.serviceKey != "" { + kvMap[b.serviceKey] = strings.Trim(service, "/") + } + if b.methodKey != "" { + kvMap[b.methodKey] = method + } + for k, v := range b.constantKeys { + kvMap[k] = v + } + return KeyMap{Map: kvMap, Str: mapToString(kvMap)} +} + +// Equal reports whether bm and am represent equivalent BuilderMaps. +func (bm BuilderMap) Equal(am BuilderMap) bool { + if (bm == nil) != (am == nil) { + return false + } + if len(bm) != len(am) { + return false + } + + for key, bBuilder := range bm { + aBuilder, ok := am[key] + if !ok { + return false + } + if !bBuilder.Equal(aBuilder) { + return false + } + } + return true +} + +// builder provides the actual functionality of building RLS keys. +type builder struct { + headerKeys []matcher + constantKeys map[string]string + // The following keys mirror corresponding fields in `extra_keys`. + hostKey string + serviceKey string + methodKey string +} + +// Equal reports whether b and a represent equivalent key builders. +func (b builder) Equal(a builder) bool { + if len(b.headerKeys) != len(a.headerKeys) { + return false + } + // Protobuf serialization maintains the order of repeated fields. Matchers + // are specified as a repeated field inside the KeyBuilder proto. If the + // order changes, it means that the order in the protobuf changed. We report + // this case as not being equal even though the builders could possible be + // functionally equal. + for i, bMatcher := range b.headerKeys { + aMatcher := a.headerKeys[i] + if !bMatcher.Equal(aMatcher) { + return false + } + } + + if len(b.constantKeys) != len(a.constantKeys) { + return false + } + for k, v := range b.constantKeys { + if a.constantKeys[k] != v { + return false + } + } + + return b.hostKey == a.hostKey && b.serviceKey == a.serviceKey && b.methodKey == a.methodKey +} + +// matcher helps extract a key from request headers based on a given name. +type matcher struct { + // The key used in the keyMap sent as part of the RLS request. + key string + // List of header names which can supply the value for this key. + names []string +} + +// Equal reports if m and are are equivalent headerKeys. +func (m matcher) Equal(a matcher) bool { + if m.key != a.key { + return false + } + if len(m.names) != len(a.names) { + return false + } + for i := 0; i < len(m.names); i++ { + if m.names[i] != a.names[i] { + return false + } + } + return true +} + +func (b builder) buildHeaderKeys(md metadata.MD) map[string]string { + kvMap := make(map[string]string) + if len(md) == 0 { + return kvMap + } + for _, m := range b.headerKeys { + for _, name := range m.names { + if vals := md.Get(name); vals != nil { + kvMap[m.key] = strings.Join(vals, ",") + break + } + } + } + return kvMap +} + +func mapToString(kv map[string]string) string { + keys := make([]string, 0, len(kv)) + for k := range kv { + keys = append(keys, k) + } + sort.Strings(keys) + var sb strings.Builder + for i, k := range keys { + if i != 0 { + fmt.Fprint(&sb, ",") + } + fmt.Fprintf(&sb, "%s=%s", k, kv[k]) + } + return sb.String() +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/picker.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/picker.go new file mode 100644 index 0000000000..c2d9727396 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/rls/picker.go @@ -0,0 +1,331 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "errors" + "fmt" + "strings" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + errRLSThrottled = errors.New("RLS call throttled at client side") + + // Function to compute data cache entry size. + computeDataCacheEntrySize = dcEntrySize +) + +// exitIdler wraps the only method on the BalancerGroup that the picker calls. +type exitIdler interface { + ExitIdleOne(id string) +} + +// rlsPicker selects the subConn to be used for a particular RPC. It does not +// manage subConns directly and delegates to pickers provided by child policies. +type rlsPicker struct { + // The keyBuilder map used to generate RLS keys for the RPC. This is built + // by the LB policy based on the received ServiceConfig. + kbm keys.BuilderMap + // Endpoint from the user's original dial target. Used to set the `host_key` + // field in `extra_keys`. + origEndpoint string + + lb *rlsBalancer + + // The picker is given its own copy of the below fields from the RLS LB policy + // to avoid having to grab the mutex on the latter. + defaultPolicy *childPolicyWrapper // Child policy for the default target. + ctrlCh *controlChannel // Control channel to the RLS server. + maxAge time.Duration // Cache max age from LB config. + staleAge time.Duration // Cache stale age from LB config. + bg exitIdler + logger *internalgrpclog.PrefixLogger +} + +// isFullMethodNameValid return true if name is of the form `/service/method`. +func isFullMethodNameValid(name string) bool { + return strings.HasPrefix(name, "/") && strings.Count(name, "/") == 2 +} + +// Pick makes the routing decision for every outbound RPC. +func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + if name := info.FullMethodName; !isFullMethodNameValid(name) { + return balancer.PickResult{}, fmt.Errorf("rls: method name %q is not of the form '/service/method", name) + } + + // Build the request's keys using the key builders from LB config. + md, _ := metadata.FromOutgoingContext(info.Ctx) + reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) + + p.lb.cacheMu.Lock() + defer p.lb.cacheMu.Unlock() + + // Lookup data cache and pending request map using request path and keys. + cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} + dcEntry := p.lb.dataCache.getEntry(cacheKey) + pendingEntry := p.lb.pendingMap[cacheKey] + now := time.Now() + + switch { + // No data cache entry. No pending request. + case dcEntry == nil && pendingEntry == nil: + throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + return p.useDefaultPickIfPossible(info, errRLSThrottled) + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // No data cache entry. Pending request exits. + case dcEntry == nil && pendingEntry != nil: + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. No pending request. + case dcEntry != nil && pendingEntry == nil: + if dcEntry.expiryTime.After(now) { + if !dcEntry.staleTime.IsZero() && dcEntry.staleTime.Before(now) && dcEntry.backoffTime.Before(now) { + p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) + } + // Delegate to child policies. + res, err := p.delegateToChildPoliciesLocked(dcEntry, info) + return res, err + } + + // We get here only if the data cache entry has expired. If entry is in + // backoff, delegate to default target or fail the pick. + if dcEntry.backoffState != nil && dcEntry.backoffTime.After(now) { + // Avoid propagating the status code received on control plane RPCs to the + // data plane which can lead to unexpected outcomes as we do not control + // the status code sent by the control plane. Propagating the status + // message received from the control plane is still fine, as it could be + // useful for debugging purposes. + st := dcEntry.status + return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) + } + + // We get here only if the entry has expired and is not in backoff. + throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + return p.useDefaultPickIfPossible(info, errRLSThrottled) + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. Pending request exists. + default: + if dcEntry.expiryTime.After(now) { + res, err := p.delegateToChildPoliciesLocked(dcEntry, info) + return res, err + } + // Data cache entry has expired and pending request exists. Queue pick. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} + +// delegateToChildPoliciesLocked is a helper function which iterates through the +// list of child policy wrappers in a cache entry and attempts to find a child +// policy to which this RPC can be routed to. If all child policies are in +// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. +func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { + const rlsDataHeaderName = "x-google-rls-data" + for i, cpw := range dcEntry.childPolicyWrappers { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if + // it is the last one (which handles the case of delegating to the last + // child picker if all child polcies are in TRANSIENT_FAILURE). + if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { + // Any header data received from the RLS server is stored in the + // cache entry and needs to be sent to the actual backend in the + // X-Google-RLS-Data header. + res, err := state.Picker.Pick(info) + if err != nil { + return res, err + } + if res.Metadata == nil { + res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) + } else { + res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData) + } + return res, nil + } + } + // In the unlikely event that we have a cache entry with no targets, we end up + // queueing the RPC. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// useDefaultPickIfPossible is a helper method which delegates to the default +// target if one is configured, or fails the pick with the given error. +func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) { + if p.defaultPolicy != nil { + state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state)) + return state.Picker.Pick(info) + } + return balancer.PickResult{}, errOnNoDefault +} + +// sendRouteLookupRequestLocked adds an entry to the pending request map and +// sends out an RLS request using the passed in arguments. Returns a value +// indicating if the request was throttled by the client-side adaptive +// throttler. +func (p *rlsPicker) sendRouteLookupRequestLocked(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string) bool { + if p.lb.pendingMap[cacheKey] != nil { + return false + } + + p.lb.pendingMap[cacheKey] = bs + throttled := p.ctrlCh.lookup(reqKeys, reason, staleHeaders, func(targets []string, headerData string, err error) { + p.handleRouteLookupResponse(cacheKey, targets, headerData, err) + }) + if throttled { + delete(p.lb.pendingMap, cacheKey) + } + return throttled +} + +// handleRouteLookupResponse is the callback invoked by the control channel upon +// receipt of an RLS response. Modifies the data cache and pending requests map +// and sends a new picker. +// +// Acquires the write-lock on the cache. Caller must not hold p.lb.cacheMu. +func (p *rlsPicker) handleRouteLookupResponse(cacheKey cacheKey, targets []string, headerData string, err error) { + p.logger.Infof("Received RLS response for key %+v with targets %+v, headerData %q, err: %v", cacheKey, targets, headerData, err) + + p.lb.cacheMu.Lock() + defer func() { + // Pending request map entry is unconditionally deleted since the request is + // no longer pending. + p.logger.Infof("Removing pending request entry for key %+v", cacheKey) + delete(p.lb.pendingMap, cacheKey) + p.lb.sendNewPicker() + p.lb.cacheMu.Unlock() + }() + + // Lookup the data cache entry or create a new one. + dcEntry := p.lb.dataCache.getEntry(cacheKey) + if dcEntry == nil { + dcEntry = &cacheEntry{} + if _, ok := p.lb.dataCache.addEntry(cacheKey, dcEntry); !ok { + // This is a very unlikely case where we are unable to add a + // data cache entry. Log and leave. + p.logger.Warningf("Failed to add data cache entry for %+v", cacheKey) + return + } + } + + // For failed requests, the data cache entry is modified as follows: + // - status is set to error returned from the control channel + // - current backoff state is available in the pending entry + // - `retries` field is incremented and + // - backoff state is moved to the data cache + // - backoffTime is set to the time indicated by the backoff state + // - backoffExpirationTime is set to twice the backoff time + // - backoffTimer is set to fire after backoffTime + // + // When a proactive cache refresh fails, this would leave the targets and the + // expiry time from the old entry unchanged. And this mean that the old valid + // entry would be used until expiration, and a new picker would be sent upon + // backoff expiry. + now := time.Now() + if err != nil { + dcEntry.status = err + pendingEntry := p.lb.pendingMap[cacheKey] + pendingEntry.retries++ + backoffTime := pendingEntry.bs.Backoff(pendingEntry.retries) + dcEntry.backoffState = pendingEntry + dcEntry.backoffTime = now.Add(backoffTime) + dcEntry.backoffExpiryTime = now.Add(2 * backoffTime) + if dcEntry.backoffState.timer != nil { + dcEntry.backoffState.timer.Stop() + } + dcEntry.backoffState.timer = time.AfterFunc(backoffTime, p.lb.sendNewPicker) + return + } + + // For successful requests, the cache entry is modified as follows: + // - childPolicyWrappers is set to point to the child policy wrappers + // associated with the targets specified in the received response + // - headerData is set to the value received in the response + // - expiryTime, stateTime and earliestEvictionTime are set + // - status is set to nil (OK status) + // - backoff state is cleared + p.setChildPolicyWrappersInCacheEntry(dcEntry, targets) + dcEntry.headerData = headerData + dcEntry.expiryTime = now.Add(p.maxAge) + if p.staleAge != 0 { + dcEntry.staleTime = now.Add(p.staleAge) + } + dcEntry.earliestEvictTime = now.Add(minEvictDuration) + dcEntry.status = nil + dcEntry.backoffState = &backoffState{bs: defaultBackoffStrategy} + dcEntry.backoffTime = time.Time{} + dcEntry.backoffExpiryTime = time.Time{} + p.lb.dataCache.updateEntrySize(dcEntry, computeDataCacheEntrySize(cacheKey, dcEntry)) +} + +// setChildPolicyWrappersInCacheEntry sets up the childPolicyWrappers field in +// the cache entry to point to the child policy wrappers for the targets +// specified in the RLS response. +// +// Caller must hold a write-lock on p.lb.cacheMu. +func (p *rlsPicker) setChildPolicyWrappersInCacheEntry(dcEntry *cacheEntry, newTargets []string) { + // If the childPolicyWrappers field is already pointing to the right targets, + // then the field's value does not need to change. + targetsChanged := true + func() { + if cpws := dcEntry.childPolicyWrappers; cpws != nil { + if len(newTargets) != len(cpws) { + return + } + for i, target := range newTargets { + if cpws[i].target != target { + return + } + } + targetsChanged = false + } + }() + if !targetsChanged { + return + } + + // If the childPolicyWrappers field is not already set to the right targets, + // then it must be reset. We construct a new list of child policies and + // then swap out the old list for the new one. + newChildPolicies := p.lb.acquireChildPolicyReferences(newTargets) + oldChildPolicyTargets := make([]string, len(dcEntry.childPolicyWrappers)) + for i, cpw := range dcEntry.childPolicyWrappers { + oldChildPolicyTargets[i] = cpw.target + } + p.lb.releaseChildPolicyReferences(oldChildPolicyTargets) + dcEntry.childPolicyWrappers = newChildPolicies +} + +func dcEntrySize(key cacheKey, entry *cacheEntry) int64 { + return int64(len(key.path) + len(key.keys) + len(entry.headerData)) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go new file mode 100644 index 0000000000..a164d1bedd --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -0,0 +1,537 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// Name is the name of the weighted round robin balancer. +const Name = "weighted_round_robin_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &wrrBalancer{ + cc: cc, + subConns: resolver.NewAddressMap(), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + scMap: make(map[balancer.SubConn]*weightedSubConn), + connectivityState: connectivity.Connecting, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &lbConfig{ + // Default values as documented in A58. + OOBReportingPeriod: iserviceconfig.Duration(10 * time.Second), + BlackoutPeriod: iserviceconfig.Duration(10 * time.Second), + WeightExpirationPeriod: iserviceconfig.Duration(3 * time.Minute), + WeightUpdatePeriod: iserviceconfig.Duration(time.Second), + ErrorUtilizationPenalty: 1, + } + if err := json.Unmarshal(js, lbCfg); err != nil { + return nil, fmt.Errorf("wrr: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + + if lbCfg.ErrorUtilizationPenalty < 0 { + return nil, fmt.Errorf("wrr: errorUtilizationPenalty must be non-negative") + } + + // For easier comparisons later, ensure the OOB reporting period is unset + // (0s) when OOB reports are disabled. + if !lbCfg.EnableOOBLoadReport { + lbCfg.OOBReportingPeriod = 0 + } + + // Impose lower bound of 100ms on weightUpdatePeriod. + if !internal.AllowAnyWeightUpdatePeriod && lbCfg.WeightUpdatePeriod < iserviceconfig.Duration(100*time.Millisecond) { + lbCfg.WeightUpdatePeriod = iserviceconfig.Duration(100 * time.Millisecond) + } + + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// wrrBalancer implements the weighted round robin LB policy. +type wrrBalancer struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + cfg *lbConfig // active config + subConns *resolver.AddressMap // active weightedSubConns mapped by address + scMap map[balancer.SubConn]*weightedSubConn + connectivityState connectivity.State // aggregate state + csEvltr *balancer.ConnectivityStateEvaluator + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure + stopPicker func() +} + +func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + b.logger.Infof("UpdateCCS: %v", ccs) + b.resolverErr = nil + cfg, ok := ccs.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("wrr: received nil or illegal BalancerConfig (type %T): %v", ccs.BalancerConfig, ccs.BalancerConfig) + } + + b.cfg = cfg + b.updateAddresses(ccs.ResolverState.Addresses) + + if len(ccs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("resolver produced zero addresses")) // will call regeneratePicker + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + + return nil +} + +func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { + addrsSet := resolver.NewAddressMap() + + // Loop through new address list and create subconns for any new addresses. + for _, addr := range addrs { + if _, ok := addrsSet.Get(addr); ok { + // Redundant address; skip. + continue + } + addrsSet.Set(addr, nil) + + var wsc *weightedSubConn + wsci, ok := b.subConns.Get(addr) + if ok { + wsc = wsci.(*weightedSubConn) + } else { + // addr is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) + if err != nil { + b.logger.Warningf("Failed to create new SubConn for address %v: %v", addr, err) + continue + } + wsc = &weightedSubConn{ + SubConn: sc, + logger: b.logger, + connectivityState: connectivity.Idle, + // Initially, we set load reports to off, because they are not + // running upon initial weightedSubConn creation. + cfg: &lbConfig{EnableOOBLoadReport: false}, + } + b.subConns.Set(addr, wsc) + b.scMap[sc] = wsc + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + // Update config for existing weightedSubConn or send update for first + // time to new one. Ensures an OOB listener is running if needed + // (and stops the existing one if applicable). + wsc.updateConfig(b.cfg) + } + + // Loop through existing subconns and remove ones that are not in addrs. + for _, addr := range b.subConns.Keys() { + if _, ok := addrsSet.Get(addr); ok { + // Existing address also in new address list; skip. + continue + } + // addr was removed by resolver. Remove. + wsci, _ := b.subConns.Get(addr) + wsc := wsci.(*weightedSubConn) + b.cc.RemoveSubConn(wsc.SubConn) + b.subConns.Delete(addr) + } +} + +func (b *wrrBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.connectivityState = connectivity.TransientFailure + } + if b.connectivityState != connectivity.TransientFailure { + // No need to update the picker since no error is being returned. + return + } + b.regeneratePicker() +} + +func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + wsc := b.scMap[sc] + if wsc == nil { + b.logger.Errorf("UpdateSubConnState called with an unknown SubConn: %p, %v", sc, state) + return + } + if b.logger.V(2) { + logger.Infof("UpdateSubConnState(%+v, %+v)", sc, state) + } + + cs := state.ConnectivityState + + if cs == connectivity.TransientFailure { + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + if cs == connectivity.Shutdown { + delete(b.scMap, sc) + // The subconn was removed from b.subConns when the address was removed + // in updateAddresses. + } + + oldCS := wsc.updateConnectivityState(cs) + b.connectivityState = b.csEvltr.RecordTransition(oldCS, cs) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (cs == connectivity.Ready) != (oldCS == connectivity.Ready) || + b.connectivityState == connectivity.TransientFailure { + b.regeneratePicker() + } +} + +// Close stops the balancer. It cancels any ongoing scheduler updates and +// stops any ORCA listeners. +func (b *wrrBalancer) Close() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + for _, wsc := range b.scMap { + // Ensure any lingering OOB watchers are stopped. + wsc.updateConnectivityState(connectivity.Shutdown) + } +} + +// ExitIdle is ignored; we always connect to all backends. +func (b *wrrBalancer) ExitIdle() {} + +func (b *wrrBalancer) readySubConns() []*weightedSubConn { + var ret []*weightedSubConn + for _, v := range b.subConns.Values() { + wsc := v.(*weightedSubConn) + if wsc.connectivityState == connectivity.Ready { + ret = append(ret, wsc) + } + } + return ret +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.connectivityState is +// TransientFailure. +func (b *wrrBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *wrrBalancer) regeneratePicker() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + + switch b.connectivityState { + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(b.mergeErrors()), + }) + return + case connectivity.Connecting, connectivity.Idle: + // Idle could happen very briefly if all subconns are Idle and we've + // asked them to connect but they haven't reported Connecting yet. + // Report the same as Connecting since this is temporary. + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }) + return + case connectivity.Ready: + b.connErr = nil + } + + p := &picker{ + v: grpcrand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), + } + var ctx context.Context + ctx, b.stopPicker = context.WithCancel(context.Background()) + p.start(ctx) + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.connectivityState, + Picker: p, + }) +} + +// picker is the WRR policy's picker. It uses live-updating backend weights to +// update the scheduler periodically and ensure picks are routed proportional +// to those weights. +type picker struct { + scheduler unsafe.Pointer // *scheduler; accessed atomically + v uint32 // incrementing value used by the scheduler; accessed atomically + cfg *lbConfig // active config when picker created + subConns []*weightedSubConn // all READY subconns +} + +// scWeights returns a slice containing the weights from p.subConns in the same +// order as p.subConns. +func (p *picker) scWeights() []float64 { + ws := make([]float64, len(p.subConns)) + now := internal.TimeNow() + for i, wsc := range p.subConns { + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod)) + } + return ws +} + +func (p *picker) inc() uint32 { + return atomic.AddUint32(&p.v, 1) +} + +func (p *picker) regenerateScheduler() { + s := newScheduler(p.scWeights(), p.inc) + atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) +} + +func (p *picker) start(ctx context.Context) { + p.regenerateScheduler() + if len(p.subConns) == 1 { + // No need to regenerate weights with only one backend. + return + } + go func() { + ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.regenerateScheduler() + } + } + }() +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + // Read the scheduler atomically. All scheduler operations are threadsafe, + // and if the scheduler is replaced during this usage, we want to use the + // scheduler that was live when the pick started. + sched := *(*scheduler)(atomic.LoadPointer(&p.scheduler)) + + pickedSC := p.subConns[sched.nextIndex()] + pr := balancer.PickResult{SubConn: pickedSC.SubConn} + if !p.cfg.EnableOOBLoadReport { + pr.Done = func(info balancer.DoneInfo) { + if load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport); ok && load != nil { + pickedSC.OnLoadReport(load) + } + } + } + return pr, nil +} + +// weightedSubConn is the wrapper of a subconn that holds the subconn and its +// weight (and other parameters relevant to computing the effective weight). +// When needed, it also tracks connectivity state, listens for metrics updates +// by implementing the orca.OOBListener interface and manages that listener. +type weightedSubConn struct { + balancer.SubConn + logger *grpclog.PrefixLogger + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + connectivityState connectivity.State + stopORCAListener func() + + // The following fields are accessed asynchronously and are protected by + // mu. Note that mu may not be held when calling into the stopORCAListener + // or when registering a new listener, as those calls require the ORCA + // producer mu which is held when calling the listener, and the listener + // holds mu. + mu sync.Mutex + weightVal float64 + nonEmptySince time.Time + lastUpdated time.Time + cfg *lbConfig +} + +func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { + if w.logger.V(2) { + w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) + } + // Update weights of this subchannel according to the reported load + utilization := load.ApplicationUtilization + if utilization == 0 { + utilization = load.CpuUtilization + } + if utilization == 0 || load.RpsFractional == 0 { + if w.logger.V(2) { + w.logger.Infof("Ignoring empty load report for subchannel %v", w.SubConn) + } + return + } + + w.mu.Lock() + defer w.mu.Unlock() + + errorRate := load.Eps / load.RpsFractional + w.weightVal = load.RpsFractional / (utilization + errorRate*w.cfg.ErrorUtilizationPenalty) + if w.logger.V(2) { + w.logger.Infof("New weight for subchannel %v: %v", w.SubConn, w.weightVal) + } + + w.lastUpdated = internal.TimeNow() + if w.nonEmptySince == (time.Time{}) { + w.nonEmptySince = w.lastUpdated + } +} + +// updateConfig updates the parameters of the WRR policy and +// stops/starts/restarts the ORCA OOB listener. +func (w *weightedSubConn) updateConfig(cfg *lbConfig) { + w.mu.Lock() + oldCfg := w.cfg + w.cfg = cfg + w.mu.Unlock() + + newPeriod := cfg.OOBReportingPeriod + if cfg.EnableOOBLoadReport == oldCfg.EnableOOBLoadReport && + newPeriod == oldCfg.OOBReportingPeriod { + // Load reporting wasn't enabled before or after, or load reporting was + // enabled before and after, and had the same period. (Note that with + // load reporting disabled, OOBReportingPeriod is always 0.) + return + } + // (Optionally stop and) start the listener to use the new config's + // settings for OOB reporting. + + if w.stopORCAListener != nil { + w.stopORCAListener() + } + if !cfg.EnableOOBLoadReport { + w.stopORCAListener = nil + return + } + if w.logger.V(2) { + w.logger.Infof("Registering ORCA listener for %v with interval %v", w.SubConn, newPeriod) + } + opts := orca.OOBListenerOptions{ReportInterval: time.Duration(newPeriod)} + w.stopORCAListener = orca.RegisterOOBListener(w.SubConn, w, opts) +} + +func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connectivity.State { + switch cs { + case connectivity.Idle: + // Always reconnect when idle. + w.SubConn.Connect() + case connectivity.Ready: + // If we transition back to READY state, reset nonEmptySince so that we + // apply the blackout period after we start receiving load data. Note + // that we cannot guarantee that we will never receive lingering + // callbacks for backend metric reports from the previous connection + // after the new connection has been established, but they should be + // masked by new backend metric reports from the new connection by the + // time the blackout period ends. + w.mu.Lock() + w.nonEmptySince = time.Time{} + w.mu.Unlock() + case connectivity.Shutdown: + if w.stopORCAListener != nil { + w.stopORCAListener() + } + } + + oldCS := w.connectivityState + + if oldCS == connectivity.TransientFailure && + (cs == connectivity.Connecting || cs == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return oldCS + } + + w.connectivityState = cs + + return oldCS +} + +// weight returns the current effective weight of the subconn, taking into +// account the parameters. Returns 0 for blacked out or expired data, which +// will cause the backend weight to be treated as the mean of the weights of +// the other backends. +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration) float64 { + w.mu.Lock() + defer w.mu.Unlock() + // If the most recent update was longer ago than the expiration period, + // reset nonEmptySince so that we apply the blackout period again if we + // start getting data again in the future, and return 0. + if now.Sub(w.lastUpdated) >= weightExpirationPeriod { + w.nonEmptySince = time.Time{} + return 0 + } + // If we don't have at least blackoutPeriod worth of data, return 0. + if blackoutPeriod != 0 && (w.nonEmptySince == (time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + return 0 + } + return w.weightVal +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go new file mode 100644 index 0000000000..38f89d32fb --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // Whether to enable out-of-band utilization reporting collection from the + // endpoints. By default, per-request utilization reporting is used. + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + + // Load reporting interval to request from the server. Note that the + // server may not provide reports as frequently as the client requests. + // Used only when enable_oob_load_report is true. Default is 10 seconds. + OOBReportingPeriod iserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + + // A given endpoint must report load metrics continuously for at least this + // long before the endpoint weight will be used. This avoids churn when + // the set of endpoint addresses changes. Takes effect both immediately + // after we establish a connection to an endpoint and after + // weight_expiration_period has caused us to stop using the most recent + // load metrics. Default is 10 seconds. + BlackoutPeriod iserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + + // If a given endpoint has not reported load metrics in this long, + // then we stop using the reported weight. This ensures that we do + // not continue to use very stale weights. Once we stop using a stale + // value, if we later start seeing fresh reports again, the + // blackout_period applies. Defaults to 3 minutes. + WeightExpirationPeriod iserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + + // How often endpoint weights are recalculated. Default is 1 second. + WeightUpdatePeriod iserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + + // The multiplier used to adjust endpoint weights with the error rate + // calculated as eps/qps. Default is 1.0. + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go new file mode 100644 index 0000000000..7b64fbf4e5 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal allows for easier testing of the weightedroundrobin +// package. +package internal + +import ( + "time" +) + +// AllowAnyWeightUpdatePeriod permits any setting of WeightUpdatePeriod for +// testing. Normally a minimum of 100ms is applied. +var AllowAnyWeightUpdatePeriod bool + +// LBConfig allows tests to produce a JSON form of the config from the struct +// instead of using a string. +type LBConfig struct { + EnableOOBLoadReport *bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod *string `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod *string `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod *string `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod *string `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty *float64 `json:"errorUtilizationPenalty,omitempty"` +} + +// TimeNow can be overridden by tests to return a different value for the +// current iserviceconfig. +var TimeNow = time.Now diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go new file mode 100644 index 0000000000..43184ca9ab --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[%p] " + +var logger = grpclog.Component("weighted-round-robin") + +func prefixLogger(p *wrrBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go new file mode 100644 index 0000000000..e19428112e --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "math" +) + +type scheduler interface { + nextIndex() int +} + +// newScheduler uses scWeights to create a new scheduler for selecting subconns +// in a picker. It will return a round robin implementation if at least +// len(scWeights)-1 are zero or there is only a single subconn, otherwise it +// will return an Earliest Deadline First (EDF) scheduler implementation that +// selects the subchannels according to their weights. +func newScheduler(scWeights []float64, inc func() uint32) scheduler { + n := len(scWeights) + if n == 0 { + return nil + } + if n == 1 { + return &rrScheduler{numSCs: 1, inc: inc} + } + sum := float64(0) + numZero := 0 + max := float64(0) + for _, w := range scWeights { + sum += w + if w > max { + max = w + } + if w == 0 { + numZero++ + } + } + if numZero >= n-1 { + return &rrScheduler{numSCs: uint32(n), inc: inc} + } + unscaledMean := sum / float64(n-numZero) + scalingFactor := maxWeight / max + mean := uint16(math.Round(scalingFactor * unscaledMean)) + + weights := make([]uint16, n) + allEqual := true + for i, w := range scWeights { + if w == 0 { + // Backends with weight = 0 use the mean. + weights[i] = mean + } else { + scaledWeight := uint16(math.Round(scalingFactor * w)) + weights[i] = scaledWeight + if scaledWeight != mean { + allEqual = false + } + } + } + + if allEqual { + return &rrScheduler{numSCs: uint32(n), inc: inc} + } + + logger.Infof("using edf scheduler with weights: %v", weights) + return &edfScheduler{weights: weights, inc: inc} +} + +const maxWeight = math.MaxUint16 + +// edfScheduler implements EDF using the same algorithm as grpc-c++ here: +// +// https://github.com/grpc/grpc/blob/master/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc +type edfScheduler struct { + inc func() uint32 + weights []uint16 +} + +// Returns the index in s.weights for the picker to choose. +func (s *edfScheduler) nextIndex() int { + const offset = maxWeight / 2 + + for { + idx := uint64(s.inc()) + + // The sequence number (idx) is split in two: the lower %n gives the + // index of the backend, and the rest gives the number of times we've + // iterated through all backends. `generation` is used to + // deterministically decide whether we pick or skip the backend on this + // iteration, in proportion to the backend's weight. + + backendIndex := idx % uint64(len(s.weights)) + generation := idx / uint64(len(s.weights)) + weight := uint64(s.weights[backendIndex]) + + // We pick a backend `weight` times per `maxWeight` generations. The + // multiply and modulus ~evenly spread out the picks for a given + // backend between different generations. The offset by `backendIndex` + // helps to reduce the chance of multiple consecutive non-picks: if we + // have two consecutive backends with an equal, say, 80% weight of the + // max, with no offset we would see 1/5 generations that skipped both. + // TODO(b/190488683): add test for offset efficacy. + mod := uint64(weight*generation+backendIndex*offset) % maxWeight + + if mod < maxWeight-weight { + continue + } + return int(backendIndex) + } +} + +// A simple RR scheduler to use for fallback when fewer than two backends have +// non-zero weights, or all backends have the the same weight, or when only one +// subconn exists. +type rrScheduler struct { + inc func() uint32 + numSCs uint32 +} + +func (s *rrScheduler) nextIndex() int { + idx := s.inc() + return int(idx % s.numSCs) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go index 6fc4d1910e..7567462e02 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go @@ -16,16 +16,23 @@ * */ -// Package weightedroundrobin defines a weighted roundrobin balancer. +// Package weightedroundrobin provides an implementation of the weighted round +// robin LB policy, as defined in [gRFC A58]. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +// +// [gRFC A58]: https://github.com/grpc/proposal/blob/master/A58-client-side-weighted-round-robin-lb-policy.md package weightedroundrobin import ( + "fmt" + "google.golang.org/grpc/resolver" ) -// Name is the name of weighted_round_robin balancer. -const Name = "weighted_round_robin" - // attributeKey is the type used as the key to store AddrInfo in the // BalancerAttributes field of resolver.Address. type attributeKey struct{} @@ -44,11 +51,6 @@ func (a AddrInfo) Equal(o interface{}) bool { // SetAddrInfo returns a copy of addr in which the BalancerAttributes field is // updated with addrInfo. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) return addr @@ -56,13 +58,12 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { // GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of // addr. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func GetAddrInfo(addr resolver.Address) AddrInfo { v := addr.BalancerAttributes.Value(attributeKey{}) ai, _ := v.(AddrInfo) return ai } + +func (a AddrInfo) String() string { + return fmt.Sprintf("Weight: %d", a.Weight) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go index 37fc41c168..27279257ed 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -178,6 +178,14 @@ func (wbsa *Aggregator) ResumeStateUpdates() { } } +// NeedUpdateStateOnResume sets the UpdateStateOnResume bool to true, letting a +// picker update be sent once ResumeStateUpdates is called. +func (wbsa *Aggregator) NeedUpdateStateOnResume() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.needUpdateStateOnResume = true +} + // UpdateState is called to report a balancer state change from sub-balancer. // It's usually called by the balancer group. // diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go index 83bb7d701f..3d5acdab6a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -143,6 +143,18 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat b.targets = newConfig.Targets + // If the targets length is zero, it means we have removed all child + // policies from the balancer group and aggregator. + // At the start of this UpdateClientConnState() operation, a call to + // b.stateAggregator.ResumeStateUpdates() is deferred. Thus, setting the + // needUpdateStateOnResume bool to true here will ensure a new picker is + // built as part of that deferred function. Since there are now no child + // policies, the aggregated connectivity state reported form the Aggregator + // will be TRANSIENT_FAILURE. + if len(b.targets) == 0 { + b.stateAggregator.NeedUpdateStateOnResume() + } + return nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/terraform/providers/google/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 0359956d36..04b9ad4116 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -25,14 +25,20 @@ import ( "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" +) + +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -49,192 +55,101 @@ import ( // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - - // Since these fields are accessed only from handleXxx() methods which are - // synchronized by the watcher goroutine, we do not need a mutex to protect - // these fields. + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. balancer *gracefulswitch.Balancer curBalancerName string - updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). - resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. - closed *grpcsync.Event // Indicates if close has been called. - done *grpcsync.Event // Indicates if close has completed its work. + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer // is not created until the switchTo() method is invoked. func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - resultCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// The following xxxUpdate structs wrap the arguments received as part of the -// corresponding update. The watcher goroutine uses the 'type' of the update to -// invoke the appropriate handler routine to handle the update. - -type ccStateUpdate struct { - ccs *balancer.ClientConnState -} - -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -type exitIdleUpdate struct{} - -type resolverErrorUpdate struct { - err error -} - -type switchToUpdate struct { - name string -} - -type subConnUpdate struct { - acbw *acBalancerWrapper -} - -// watcher is a long-running goroutine which reads updates from a channel and -// invokes corresponding methods on the underlying balancer. It ensures that -// these methods are invoked in a synchronous fashion. It also ensures that -// these methods are invoked in the order in which the updates were received. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case u := <-ccb.updateCh.Get(): - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch update := u.(type) { - case *ccStateUpdate: - ccb.handleClientConnStateChange(update.ccs) - case *scStateUpdate: - ccb.handleSubConnStateChange(update) - case *exitIdleUpdate: - ccb.handleExitIdle() - case *resolverErrorUpdate: - ccb.handleResolverError(update.err) - case *switchToUpdate: - ccb.handleSwitchTo(update.name) - case *subConnUpdate: - ccb.handleRemoveSubConn(update.acbw) - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) - } - case <-ccb.closed.Done(): - } - - if ccb.closed.HasFired() { - ccb.handleClose() - return - } - } -} - // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. -// -// Unlike other methods invoked by grpc to push updates to the underlying -// balancer, this method cannot simply push the update onto the update channel -// and return. It needs to return the error returned by the underlying balancer -// back to grpc which propagates that to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) - - var res interface{} - select { - case res = <-ccb.resultCh.Get(): - ccb.resultCh.Load() - case <-ccb.closed.Done(): - // Return early if the balancer wrapper is closed while we are waiting for - // the underlying balancer to process a ClientConnState update. - return nil - } - // If the returned error is nil, attempting to type assert to error leads to - // panic. So, this needs to handled separately. - if res == nil { - return nil - } - return res.(error) -} - -// handleClientConnStateChange handles a ClientConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -// -// If the addresses specified in the update contain addresses of type "grpclb" -// and the selected LB policy is not "grpclb", these addresses will be filtered -// out and ccs will be modified with the updated address list. -func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { - if ccb.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + // If the addresses specified in the update contain addresses of type + // "grpclb" and the selected LB policy is not "grpclb", these addresses + // will be filtered out and ccs will be modified with the updated + // address list. + if ccb.curBalancerName != grpclbName { + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) } - addrs = append(addrs, addr) + ccs.ResolverState.Addresses = addrs } - ccs.ResolverState.Addresses = addrs + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") } - ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) + ccb.mu.Unlock() + + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + return err } // updateSubConnState is invoked by grpc to push a subConn state update to the // underlying balancer. func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) -} - -// handleSubConnStateChange handles a SubConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { - ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) -} - -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.updateCh.Put(&exitIdleUpdate{}) -} - -func (ccb *ccBalancerWrapper) handleExitIdle() { - if ccb.cc.GetState() != connectivity.Idle { - return - } - ccb.balancer.ExitIdle() + ccb.mu.Unlock() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.updateCh.Put(&resolverErrorUpdate{err: err}) -} - -func (ccb *ccBalancerWrapper) handleResolverError(err error) { - ccb.balancer.ResolverError(err) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -248,24 +163,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.updateCh.Put(&switchToUpdate{name: name}) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() } -// handleSwitchTo handles a balancer switch update from the update channel. It -// calls the SwitchTo() method on the gracefulswitch.Balancer with a -// balancer.Builder corresponding to name. If no balancer.Builder is registered -// for the given name, it uses the default LB policy which is "pick_first". -func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { - // TODO: Other languages use case-insensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - - // TODO: Ensure that name is a registered LB policy when we get here. - // We currently only validate the `loadBalancingConfig` field. We need to do - // the same for the `loadBalancingPolicy` field and reject the service config - // if the specified policy is not registered. +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { builder := balancer.Get(name) if builder == nil { channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) @@ -281,26 +199,114 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { ccb.curBalancerName = builder.Name() } -// handleRemoveSucConn handles a request from the underlying balancer to remove -// a subConn. -// -// See comments in RemoveSubConn() for more details. -func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +func (ccb *ccBalancerWrapper) close() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() + return + } + + ccb.mode = m + done := ccb.serializer.Done + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" + }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-done + // Spawn a goroutine to close the balancer (since it may block trying to + // cleanup all allocated resources) and return early. + go b.Close() } -func (ccb *ccBalancerWrapper) handleClose() { - ccb.balancer.Close() - ccb.done.Fire() +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done +} + +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } + + if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) @@ -309,31 +315,35 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return nil, err } acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} - acbw.ac.mu.Lock() ac.acbw = acbw - acbw.ac.mu.Unlock() return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it - // was required to handle the RemoveSubConn() method asynchronously by pushing - // the update onto the update channel. This was done to avoid a deadlock as - // switchBalancer() was holding cc.mu when calling Close() on the old - // balancer, which would in turn call RemoveSubConn(). - // - // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this - // asynchronously is probably not required anymore since the switchTo() method - // handles the balancer switch by pushing the update onto the channel. - // TODO(easwars): Handle this inline. + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return } - ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -342,6 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + if ccb.isIdleOrClosed() { + return + } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -352,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -362,71 +380,31 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + ac *addrConn // read-only + mu sync.Mutex - ac *addrConn producers map[balancer.ProducerBuilder]*refCountedProducer } -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} - newAC, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = newAC - newAC.mu.Lock() - newAC.acbw = acbw - newAC.mu.Unlock() - if acState != connectivity.Idle { - go newAC.connect() - } - } +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) } func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() go acbw.ac.connect() } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} - -var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") - // NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, returns errSubConnNotReady. +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport := acbw.ac.getReadyTransport() - if transport == nil { - return nil, errSubConnNotReady + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 66d141fce7..ec2c2fa14d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/call.go b/terraform/providers/google/vendor/google.golang.org/grpc/call.go index 9e20e4d385..e6a1dc5d75 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/call.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/call.go @@ -27,6 +27,11 @@ import ( // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/clientconn.go b/terraform/providers/google/vendor/google.golang.org/grpc/clientconn.go index d607d4e9e2..95a7459b02 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/clientconn.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/clientconn.go @@ -24,7 +24,6 @@ import ( "fmt" "math" "net/url" - "reflect" "strings" "sync" "sync/atomic" @@ -69,6 +68,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -134,20 +136,42 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) - for _, opt := range extraDialOptions { - opt.apply(&cc.dopts) + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } } for _, opt := range opts { @@ -163,40 +187,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - pid := cc.dopts.channelzParentID - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) - ted := &channelz.TraceEventDesc{ - Desc: "Channel created", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) - cc.csMgr.channelzID = cc.channelzID + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return nil, errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -234,35 +229,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - scSet = true - } - default: - } - } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } // Determine the resolver to use. - resolverBuilder, err := cc.parseTargetAndFindResolver() - if err != nil { + if err := cc.parseTargetAndFindResolver(); err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) - if err != nil { + if err = cc.determineAuthority(); err != nil { return nil, err } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - if cc.dopts.scChan != nil && !scSet { + if cc.dopts.scChan != nil { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: @@ -278,57 +257,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + logger.Info("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper() + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") + } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + logger.Error("ClientConn asked to enter idle mode when not active") + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - cc.Connect() - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") + cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -474,7 +620,9 @@ type ClientConn struct { authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idlenessManager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -495,11 +643,31 @@ type ClientConn struct { sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -539,7 +707,10 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.balancerWrapper.exitIdle() + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() } func (cc *ClientConn) scWatcher() { @@ -708,6 +879,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -801,9 +973,6 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -822,58 +991,62 @@ func equalAddresses(a, b []resolver.Address) bool { return true } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If the addresses is the same as the old list, it does nothing and returns -// true. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if equalAddresses(ac.addrs, addrs) { - return true + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - if ac.state == connectivity.Connecting { - return false - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => Close => onClose, which + // requires locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -1026,39 +1199,40 @@ func (cc *ClientConn) Close() error { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - cc.blockingpicker.close() + if pWrapper != nil { + pWrapper.close() + } if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } + if idlenessMgr != nil { + idlenessMgr.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - ted := &channelz.TraceEventDesc{ - Desc: "Channel deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1088,7 +1262,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1102,8 +1277,15 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1123,7 +1305,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { + acCtx := ac.ctx + if acCtx.Err() != nil { ac.mu.Unlock() return } @@ -1151,15 +1334,14 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) // After exhausting all addresses, the addrConn enters // TRANSIENT_FAILURE. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if acCtx.Err() != nil { return } + ac.mu.Lock() ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1174,13 +1356,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-ac.ctx.Done(): + case <-acCtx.Done(): timer.Stop() return } ac.mu.Lock() - if ac.state != connectivity.Shutdown { + if acCtx.Err() == nil { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1195,14 +1377,13 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if ctx.Err() != nil { return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1216,7 +1397,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { return nil } @@ -1233,19 +1414,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ac.ctx) + hctx, hcancel := context.WithCancel(ctx) onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() // adjust params based on GoAwayReason ac.adjustParams(r) - if ac.state == connectivity.Shutdown { - // Already shut down. tearDown() already cleared the transport and - // canceled hctx via ac.ctx, and we expected this connection to be - // closed, so do nothing here. + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. return } hcancel() @@ -1264,7 +1446,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID @@ -1281,7 +1463,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() - if ac.state == connectivity.Shutdown { + if ctx.Err() != nil { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1293,6 +1475,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. go newTr.Close(transport.ErrConnClosing) return nil } @@ -1400,6 +1585,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1527,6 +1735,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { @@ -1548,7 +1759,14 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1560,7 +1778,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } } @@ -1575,15 +1794,16 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return nil, err + return err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } // parseTarget uses RFC 3986 semantics to parse the given target into a @@ -1606,7 +1826,15 @@ func parseTarget(target string) (resolver.Target, error) { // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1623,25 +1851,58 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) } + endpoint := cc.parsedTarget.Endpoint() + target := cc.target switch { case authorityFromDialOption != "": - return authorityFromDialOption, nil + cc.authority = authorityFromDialOption case authorityFromCreds != "": - return authorityFromCreds, nil + cc.authority = authorityFromCreds case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): // TODO: remove when the unix resolver implements optional interface to // return channel authority. - return "localhost", nil + cc.authority = "localhost" case strings.HasPrefix(endpoint, ":"): - return "localhost" + endpoint, nil + cc.authority = "localhost" + endpoint default: // TODO: Define an optional interface on the resolver builder to return // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - return endpoint, nil + cc.authority = endpoint } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/codes/code_string.go b/terraform/providers/google/vendor/google.golang.org/grpc/codes/code_string.go index 0b206a5782..934fac2b09 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/codes/code_string.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 7b953a520e..150ae55767 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -138,7 +138,7 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions { // and server options (server options struct does not exist now. When // caller can provide endpoints, it should be created. -// altsHandshaker is used to complete a ALTS handshaking between client and +// altsHandshaker is used to complete an ALTS handshake between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. type altsHandshaker struct { @@ -146,6 +146,8 @@ type altsHandshaker struct { stream altsgrpc.HandshakerService_DoHandshakeClient // the connection to the peer. conn net.Conn + // a virtual connection to the ALTS handshaker service. + clientConn *grpc.ClientConn // client handshake options. clientOpts *ClientHandshakerOptions // server handshake options. @@ -154,39 +156,33 @@ type altsHandshaker struct { side core.Side } -// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker +// NewClientHandshaker creates a core.Handshaker that performs a client-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) - if err != nil { - return nil, err - } return &altsHandshaker{ - stream: stream, + stream: nil, conn: c, + clientConn: conn, clientOpts: opts, side: core.ClientSide, }, nil } -// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker +// NewServerHandshaker creates a core.Handshaker that performs a server-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) - if err != nil { - return nil, err - } return &altsHandshaker{ - stream: stream, + stream: nil, conn: c, + clientConn: conn, serverOpts: opts, side: core.ServerSide, }, nil } -// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { @@ -198,6 +194,16 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") } + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + // Create target identities from service account list. targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) for _, account := range h.clientOpts.TargetServiceAccounts { @@ -229,7 +235,7 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return conn, authInfo, nil } -// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { @@ -241,6 +247,16 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") } + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + p := make([]byte, frameLimit) n, err := h.conn.Read(p) if err != nil { @@ -371,5 +387,7 @@ func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []b // Close terminates the Handshaker. It should be called when the caller obtains // the secure connection. func (h *altsHandshaker) Close() { - h.stream.CloseSend() + if h.stream != nil { + h.stream.CloseSend() + } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index 2de2c4affd..e1cdafb980 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -58,3 +58,21 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { } return hsConn, nil } + +// CloseForTesting closes all open connections to the handshaker service. +// +// For testing purposes only. +func CloseForTesting() error { + for _, hsConn := range hsConnMap { + if hsConn == nil { + continue + } + if err := hsConn.Close(); err != nil { + return err + } + } + + // Reset the connection map. + hsConnMap = make(map[string]*grpc.ClientConn) + return nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 1a40e17e8d..83e3bae37b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/gcp/altscontext.proto package grpc_gcp diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 50eefa5383..0b0093328b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/gcp/handshaker.proto package grpc_gcp diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index d3562c6d5e..39ecccf878 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -35,6 +35,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake" +) + // HandshakerServiceClient is the client API for HandshakerService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -57,7 +61,7 @@ func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceCl } func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { - stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index b07412f185..c2e564c7de 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto package grpc_gcp diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/dialoptions.go b/terraform/providers/google/vendor/google.golang.org/grpc/dialoptions.go index 4866da101c..15a3d5102a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/dialoptions.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/dialoptions.go @@ -38,13 +38,14 @@ import ( func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { - extraDialOptions = append(extraDialOptions, opt...) + globalDialOptions = append(globalDialOptions, opt...) } internal.ClearGlobalDialOptions = func() { - extraDialOptions = nil + globalDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -76,6 +77,7 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration } // DialOption configures how we set up the connection. @@ -83,7 +85,7 @@ type DialOption interface { apply(*dialOptions) } -var extraDialOptions []DialOption +var globalDialOptions []DialOption // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. @@ -96,6 +98,16 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -284,6 +296,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -295,6 +310,9 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -437,6 +455,9 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -635,3 +656,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 8e29a62f16..142d35f753 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index a332dfd7b5..a01a1b4d54 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -35,6 +35,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" +) + // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -70,7 +75,7 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -78,7 +83,7 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) if err != nil { return nil, err } @@ -166,7 +171,7 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.health.v1.Health/Check", + FullMethod: Health_Check_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/idle.go b/terraform/providers/google/vendor/google.golang.org/grpc/idle.go new file mode 100644 index 0000000000..dc3dc72f6b --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/idle.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type idlenessEnforcer interface { + exitIdleMode() error + enterIdleMode() error +} + +// idlenessManager defines the functionality required to track RPC activity on a +// channel. +type idlenessManager interface { + onCallBegin() error + onCallEnd() + close() +} + +type noopIdlenessManager struct{} + +func (noopIdlenessManager) onCallBegin() error { return nil } +func (noopIdlenessManager) onCallEnd() {} +func (noopIdlenessManager) close() {} + +// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +// operations to synchronize access to shared state and a mutex to guarantee +// mutual exclusion in a critical section. +type idlenessManagerImpl struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and onCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// newIdlenessManager creates a new idleness manager implementation for the +// given idle timeout. +func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { + if idleTimeout == 0 { + return noopIdlenessManager{} + } + + i := &idlenessManagerImpl{ + enforcer: enforcer, + timeout: int64(idleTimeout), + } + i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) + return i +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if i.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + i.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (i *idlenessManagerImpl) handleIdleTimeout() { + if i.isClosed() { + return + } + + if atomic.LoadInt32(&i.activeCallsCount) > 0 { + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) + i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if i.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.resetIdleTimer(time.Duration(i.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := i.enforcer.enterIdleMode(); err != nil { + logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + i.actuallyIdle = true + return true +} + +// onCallBegin is invoked at the start of every RPC. +func (i *idlenessManagerImpl) onCallBegin() error { + if i.isClosed() { + return nil + } + + if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := i.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&i.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (i *idlenessManagerImpl) exitIdleMode() error { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if !i.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and onCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in onCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := i.enforcer.exitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + return nil +} + +// onCallEnd is invoked at the end of every RPC. +func (i *idlenessManagerImpl) onCallEnd() { + if i.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&i.activeCallsCount, -1) +} + +func (i *idlenessManagerImpl) isClosed() bool { + return atomic.LoadInt32(&i.closed) == 1 +} + +func (i *idlenessManagerImpl) close() { + atomic.StoreInt32(&i.closed, 1) + + i.idleMu.Lock() + i.timer.Stop() + i.timer = nil + i.idleMu.Unlock() +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go new file mode 100644 index 0000000000..0c96f1b811 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package nop implements a balancer with all of its balancer operations as +// no-ops, other than returning a Transient Failure Picker on a Client Conn +// update. +package nop + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" +) + +// bal is a balancer with all of its balancer operations as no-ops, other than +// returning a Transient Failure Picker on a Client Conn update. +type bal struct { + cc balancer.ClientConn + err error +} + +// NewBalancer returns a no-op balancer. +func NewBalancer(cc balancer.ClientConn, err error) balancer.Balancer { + return &bal{ + cc: cc, + err: err, + } +} + +// UpdateClientConnState updates the bal's Client Conn with an Error Picker +// and a Connectivity State of TRANSIENT_FAILURE. +func (b *bal) UpdateClientConnState(_ balancer.ClientConnState) error { + b.cc.UpdateState(balancer.State{ + Picker: base.NewErrPicker(b.err), + ConnectivityState: connectivity.TransientFailure, + }) + return nil +} + +// ResolverError is a no-op. +func (b *bal) ResolverError(_ error) {} + +// UpdateSubConnState is a no-op. +func (b *bal) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) {} + +// Close is a no-op. +func (b *bal) Close() {} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 809d73ccaf..755fdebc1b 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -28,8 +28,13 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { GetMethodLogger(methodName string) MethodLogger } @@ -40,8 +45,6 @@ type Logger interface { // It is used to get a MethodLogger for each individual method. var binLogger Logger -var grpclogLogger = grpclog.Component("binarylog") - // SetLogger sets the binary logger. // // Only call this at init time. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index d71e441778..6c3f632215 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,6 +19,7 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" @@ -48,8 +49,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type MethodLogger interface { - Log(LogEntryConfig) + Log(context.Context, LogEntryConfig) } // TruncatingMethodLogger is a method logger that truncates headers and messages @@ -64,6 +68,9 @@ type TruncatingMethodLogger struct { } // NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -98,7 +105,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } @@ -144,6 +151,9 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { toProto() *binlogpb.GrpcLogEntry } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 9f6a0c1200..81c2f5fd76 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -35,6 +35,7 @@ import "sync" // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} + closed bool mu sync.Mutex backlog []interface{} } @@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded { // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. func (b *Unbounded) Get() <-chan interface{} { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 5ba9d94d49..80fd5c7d2a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,6 +36,10 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy, which can be enabled by setting the environment + // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/observability.go index 821dd0a7c1..dd314cfb18 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/observability.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -28,9 +28,15 @@ const ( var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 04136882c7..02b4b6a1c1 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -61,11 +61,10 @@ var ( // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster - // and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, @@ -79,14 +78,18 @@ var ( // XDSFederation indicates whether federation support is enabled, which can // be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be enabled by + // support for the RLS CLuster Specifier is enabled, which can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "true". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 82af70e96f..02224b42ca 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 517ea70642..d08e3e9076 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -72,3 +72,17 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 0000000000..37b8d4117e --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // Done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + Done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + t := &CallbackSerializer{ + Done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go t.run(ctx) + return t +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + t.closedMu.Lock() + defer t.closedMu.Unlock() + + if t.closed { + return false + } + t.callbacks.Put(f) + return true +} + +func (t *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(t.Done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-t.callbacks.Get(): + if !ok { + return + } + t.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing t.Done. + t.closedMu.Lock() + t.closed = true + backlog = t.fetchPendingCallbacks() + t.callbacks.Close() + t.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-t.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + t.callbacks.Load() + default: + return backlog + } + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/internal.go index 0a76d9de6e..42ff39c844 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/internal.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/internal.go @@ -58,6 +58,12 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString interface{} // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular @@ -66,16 +72,35 @@ var ( // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. AddGlobalServerOptions interface{} // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. AddGlobalDialOptions interface{} // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions interface{} // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. @@ -86,9 +111,15 @@ var ( // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -130,6 +161,9 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b2980f8ac4..c82e608e07 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { return addr } -// Validate returns an error if the input md contains invalid keys or values. -// -// If the header is not a pseudo-header, the following items are checked: -// - header names must contain one or more characters from this set [0-9 a-z _ - .]. -// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. -// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +// Validate validates every pair in md with ValidatePair. func Validate(md metadata.MD) error { for k, vals := range md { - // pseudo-header will be ignored - if k[0] == ':' { - continue - } - // check key, for i that saving a conversion if not using for range - for i := 0; i < len(k); i++ { - r := k[i] - if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { - return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) - } - } - if strings.HasSuffix(k, "-bin") { - continue - } - // check value - for _, val := range vals { - if hasNotPrintable(val) { - return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) - } + if err := ValidatePair(k, vals...); err != nil { + return err } } return nil @@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool { } return false } + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 21b6429d65..df4cd5484e 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index 05a307092a..317a35a390 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/lookup/v1/rls_config.proto package grpc_lookup_v1 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 076b966f34..2435fbc9a9 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 @@ -32,6 +32,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + RouteLookupService_RouteLookup_FullMethodName = "/grpc.lookup.v1.RouteLookupService/RouteLookup" +) + // RouteLookupServiceClient is the client API for RouteLookupService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -50,7 +54,7 @@ func NewRouteLookupServiceClient(cc grpc.ClientConnInterface) RouteLookupService func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) { out := new(RouteLookupResponse) - err := c.cc.Invoke(ctx, "/grpc.lookup.v1.RouteLookupService/RouteLookup", in, out, opts...) + err := c.cc.Invoke(ctx, RouteLookupService_RouteLookup_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -96,7 +100,7 @@ func _RouteLookupService_RouteLookup_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.lookup.v1.RouteLookupService/RouteLookup", + FullMethod: RouteLookupService_RouteLookup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RouteLookupServiceServer).RouteLookup(ctx, req.(*RouteLookupRequest)) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 0000000000..11d82afcc7 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/balancer.go deleted file mode 100644 index 8927823d09..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/balancer.go +++ /dev/null @@ -1,372 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package testutils - -import ( - "context" - "errors" - "fmt" - "testing" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/resolver" -) - -// TestSubConnsCount is the number of TestSubConns initialized as part of -// package init. -const TestSubConnsCount = 16 - -// testingLogger wraps the logging methods from testing.T. -type testingLogger interface { - Log(args ...interface{}) - Logf(format string, args ...interface{}) -} - -// TestSubConns contains a list of SubConns to be used in tests. -var TestSubConns []*TestSubConn - -func init() { - for i := 0; i < TestSubConnsCount; i++ { - TestSubConns = append(TestSubConns, &TestSubConn{ - id: fmt.Sprintf("sc%d", i), - ConnectCh: make(chan struct{}, 1), - }) - } -} - -// TestSubConn implements the SubConn interface, to be used in tests. -type TestSubConn struct { - id string - ConnectCh chan struct{} -} - -// UpdateAddresses is a no-op. -func (tsc *TestSubConn) UpdateAddresses([]resolver.Address) {} - -// Connect is a no-op. -func (tsc *TestSubConn) Connect() { - select { - case tsc.ConnectCh <- struct{}{}: - default: - } -} - -// GetOrBuildProducer is a no-op. -func (tsc *TestSubConn) GetOrBuildProducer(balancer.ProducerBuilder) (balancer.Producer, func()) { - return nil, nil -} - -// String implements stringer to print human friendly error message. -func (tsc *TestSubConn) String() string { - return tsc.id -} - -// TestClientConn is a mock balancer.ClientConn used in tests. -type TestClientConn struct { - logger testingLogger - - NewSubConnAddrsCh chan []resolver.Address // the last 10 []Address to create subconn. - NewSubConnCh chan balancer.SubConn // the last 10 subconn created. - RemoveSubConnCh chan balancer.SubConn // the last 10 subconn removed. - UpdateAddressesAddrsCh chan []resolver.Address // last updated address via UpdateAddresses(). - - NewPickerCh chan balancer.Picker // the last picker updated. - NewStateCh chan connectivity.State // the last state. - ResolveNowCh chan resolver.ResolveNowOptions // the last ResolveNow(). - - subConnIdx int -} - -// NewTestClientConn creates a TestClientConn. -func NewTestClientConn(t *testing.T) *TestClientConn { - return &TestClientConn{ - logger: t, - - NewSubConnAddrsCh: make(chan []resolver.Address, 10), - NewSubConnCh: make(chan balancer.SubConn, 10), - RemoveSubConnCh: make(chan balancer.SubConn, 10), - UpdateAddressesAddrsCh: make(chan []resolver.Address, 1), - - NewPickerCh: make(chan balancer.Picker, 1), - NewStateCh: make(chan connectivity.State, 1), - ResolveNowCh: make(chan resolver.ResolveNowOptions, 1), - } -} - -// NewSubConn creates a new SubConn. -func (tcc *TestClientConn) NewSubConn(a []resolver.Address, o balancer.NewSubConnOptions) (balancer.SubConn, error) { - sc := TestSubConns[tcc.subConnIdx] - tcc.subConnIdx++ - - tcc.logger.Logf("testClientConn: NewSubConn(%v, %+v) => %s", a, o, sc) - select { - case tcc.NewSubConnAddrsCh <- a: - default: - } - - select { - case tcc.NewSubConnCh <- sc: - default: - } - - return sc, nil -} - -// RemoveSubConn removes the SubConn. -func (tcc *TestClientConn) RemoveSubConn(sc balancer.SubConn) { - tcc.logger.Logf("testClientConn: RemoveSubConn(%s)", sc) - select { - case tcc.RemoveSubConnCh <- sc: - default: - } -} - -// UpdateAddresses updates the addresses on the SubConn. -func (tcc *TestClientConn) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - tcc.logger.Logf("testClientConn: UpdateAddresses(%v, %+v)", sc, addrs) - select { - case tcc.UpdateAddressesAddrsCh <- addrs: - default: - } -} - -// UpdateState updates connectivity state and picker. -func (tcc *TestClientConn) UpdateState(bs balancer.State) { - tcc.logger.Logf("testClientConn: UpdateState(%v)", bs) - select { - case <-tcc.NewStateCh: - default: - } - tcc.NewStateCh <- bs.ConnectivityState - - select { - case <-tcc.NewPickerCh: - default: - } - tcc.NewPickerCh <- bs.Picker -} - -// ResolveNow panics. -func (tcc *TestClientConn) ResolveNow(o resolver.ResolveNowOptions) { - select { - case <-tcc.ResolveNowCh: - default: - } - tcc.ResolveNowCh <- o -} - -// Target panics. -func (tcc *TestClientConn) Target() string { - panic("not implemented") -} - -// WaitForErrPicker waits until an error picker is pushed to this ClientConn. -// Returns error if the provided context expires or a non-error picker is pushed -// to the ClientConn. -func (tcc *TestClientConn) WaitForErrPicker(ctx context.Context) error { - select { - case <-ctx.Done(): - return errors.New("timeout when waiting for an error picker") - case picker := <-tcc.NewPickerCh: - if _, perr := picker.Pick(balancer.PickInfo{}); perr == nil { - return fmt.Errorf("balancer returned a picker which is not an error picker") - } - } - return nil -} - -// WaitForPickerWithErr waits until an error picker is pushed to this -// ClientConn with the error matching the wanted error. Returns an error if -// the provided context expires, including the last received picker error (if -// any). -func (tcc *TestClientConn) WaitForPickerWithErr(ctx context.Context, want error) error { - lastErr := errors.New("received no picker") - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timeout when waiting for an error picker with %v; last picker error: %v", want, lastErr) - case picker := <-tcc.NewPickerCh: - if _, lastErr = picker.Pick(balancer.PickInfo{}); lastErr != nil && lastErr.Error() == want.Error() { - return nil - } - } - } -} - -// WaitForConnectivityState waits until the state pushed to this ClientConn -// matches the wanted state. Returns an error if the provided context expires, -// including the last received state (if any). -func (tcc *TestClientConn) WaitForConnectivityState(ctx context.Context, want connectivity.State) error { - var lastState connectivity.State = -1 - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timeout when waiting for state to be %s; last state: %s", want, lastState) - case s := <-tcc.NewStateCh: - if s == want { - return nil - } - lastState = s - } - } -} - -// WaitForRoundRobinPicker waits for a picker that passes IsRoundRobin. Also -// drains the matching state channel and requires it to be READY (if an entry -// is pending) to be considered. Returns an error if the provided context -// expires, including the last received error from IsRoundRobin or the picker -// (if any). -func (tcc *TestClientConn) WaitForRoundRobinPicker(ctx context.Context, want ...balancer.SubConn) error { - lastErr := errors.New("received no picker") - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timeout when waiting for round robin picker with %v; last error: %v", want, lastErr) - case p := <-tcc.NewPickerCh: - s := connectivity.Ready - select { - case s = <-tcc.NewStateCh: - default: - } - if s != connectivity.Ready { - lastErr = fmt.Errorf("received state %v instead of ready", s) - break - } - var pickerErr error - if err := IsRoundRobin(want, func() balancer.SubConn { - sc, err := p.Pick(balancer.PickInfo{}) - if err != nil { - pickerErr = err - } else if sc.Done != nil { - sc.Done(balancer.DoneInfo{}) - } - return sc.SubConn - }); pickerErr != nil { - lastErr = pickerErr - continue - } else if err != nil { - lastErr = err - continue - } - return nil - } - } -} - -// WaitForPicker waits for a picker that results in f returning nil. If the -// context expires, returns the last error returned by f (if any). -func (tcc *TestClientConn) WaitForPicker(ctx context.Context, f func(balancer.Picker) error) error { - lastErr := errors.New("received no picker") - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timeout when waiting for picker; last error: %v", lastErr) - case p := <-tcc.NewPickerCh: - if err := f(p); err != nil { - lastErr = err - continue - } - return nil - } - } -} - -// IsRoundRobin checks whether f's return value is roundrobin of elements from -// want. But it doesn't check for the order. Note that want can contain -// duplicate items, which makes it weight-round-robin. -// -// Step 1. the return values of f should form a permutation of all elements in -// want, but not necessary in the same order. E.g. if want is {a,a,b}, the check -// fails if f returns: -// - {a,a,a}: third a is returned before b -// - {a,b,b}: second b is returned before the second a -// -// If error is found in this step, the returned error contains only the first -// iteration until where it goes wrong. -// -// Step 2. the return values of f should be repetitions of the same permutation. -// E.g. if want is {a,a,b}, the check failes if f returns: -// - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not -// repeating the first iteration. -// -// If error is found in this step, the returned error contains the first -// iteration + the second iteration until where it goes wrong. -func IsRoundRobin(want []balancer.SubConn, f func() balancer.SubConn) error { - wantSet := make(map[balancer.SubConn]int) // SubConn -> count, for weighted RR. - for _, sc := range want { - wantSet[sc]++ - } - - // The first iteration: makes sure f's return values form a permutation of - // elements in want. - // - // Also keep the returns values in a slice, so we can compare the order in - // the second iteration. - gotSliceFirstIteration := make([]balancer.SubConn, 0, len(want)) - for range want { - got := f() - gotSliceFirstIteration = append(gotSliceFirstIteration, got) - wantSet[got]-- - if wantSet[got] < 0 { - return fmt.Errorf("non-roundrobin want: %v, result: %v", want, gotSliceFirstIteration) - } - } - - // The second iteration should repeat the first iteration. - var gotSliceSecondIteration []balancer.SubConn - for i := 0; i < 2; i++ { - for _, w := range gotSliceFirstIteration { - g := f() - gotSliceSecondIteration = append(gotSliceSecondIteration, g) - if w != g { - return fmt.Errorf("non-roundrobin, first iter: %v, second iter: %v", gotSliceFirstIteration, gotSliceSecondIteration) - } - } - } - - return nil -} - -// SubConnFromPicker returns a function which returns a SubConn by calling the -// Pick() method of the provided picker. There is no caching of SubConns here. -// Every invocation of the returned function results in a new pick. -func SubConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - -// ErrTestConstPicker is error returned by test const picker. -var ErrTestConstPicker = fmt.Errorf("const picker error") - -// TestConstPicker is a const picker for tests. -type TestConstPicker struct { - Err error - SC balancer.SubConn -} - -// Pick returns the const SubConn or the error. -func (tcp *TestConstPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - if tcp.Err != nil { - return balancer.PickResult{}, tcp.Err - } - return balancer.PickResult{SubConn: tcp.SC}, nil -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/channel.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/channel.go deleted file mode 100644 index 6a08a94a09..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/channel.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package testutils - -import ( - "context" -) - -// DefaultChanBufferSize is the default buffer size of the underlying channel. -const DefaultChanBufferSize = 1 - -// Channel wraps a generic channel and provides a timed receive operation. -type Channel struct { - ch chan interface{} -} - -// Send sends value on the underlying channel. -func (c *Channel) Send(value interface{}) { - c.ch <- value -} - -// SendContext sends value on the underlying channel, or returns an error if -// the context expires. -func (c *Channel) SendContext(ctx context.Context, value interface{}) error { - select { - case c.ch <- value: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// SendOrFail attempts to send value on the underlying channel. Returns true -// if successful or false if the channel was full. -func (c *Channel) SendOrFail(value interface{}) bool { - select { - case c.ch <- value: - return true - default: - return false - } -} - -// ReceiveOrFail returns the value on the underlying channel and true, or nil -// and false if the channel was empty. -func (c *Channel) ReceiveOrFail() (interface{}, bool) { - select { - case got := <-c.ch: - return got, true - default: - return nil, false - } -} - -// Receive returns the value received on the underlying channel, or the error -// returned by ctx if it is closed or cancelled. -func (c *Channel) Receive(ctx context.Context) (interface{}, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case got := <-c.ch: - return got, nil - } -} - -// Replace clears the value on the underlying channel, and sends the new value. -// -// It's expected to be used with a size-1 channel, to only keep the most -// up-to-date item. This method is inherently racy when invoked concurrently -// from multiple goroutines. -func (c *Channel) Replace(value interface{}) { - for { - select { - case c.ch <- value: - return - case <-c.ch: - } - } -} - -// NewChannel returns a new Channel. -func NewChannel() *Channel { - return NewChannelWithSize(DefaultChanBufferSize) -} - -// NewChannelWithSize returns a new Channel with a buffer of bufSize. -func NewChannelWithSize(bufSize int) *Channel { - return &Channel{ch: make(chan interface{}, bufSize)} -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/http_client.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/http_client.go deleted file mode 100644 index 9832bf3057..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/http_client.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package testutils - -import ( - "context" - "net/http" - "time" -) - -// DefaultHTTPRequestTimeout is the default timeout value for the amount of time -// this client waits for a response to be pushed on RespChan before it fails the -// Do() call. -const DefaultHTTPRequestTimeout = 1 * time.Second - -// FakeHTTPClient helps mock out HTTP calls made by the code under test. It -// makes HTTP requests made by the code under test available through a channel, -// and makes it possible to inject various responses. -type FakeHTTPClient struct { - // ReqChan exposes the HTTP.Request made by the code under test. - ReqChan *Channel - // RespChan is a channel on which this fake client accepts responses to be - // sent to the code under test. - RespChan *Channel - // Err, if set, is returned by Do(). - Err error - // RecvTimeout is the amount of the time this client waits for a response to - // be pushed on RespChan before it fails the Do() call. If this field is - // left unspecified, DefaultHTTPRequestTimeout is used. - RecvTimeout time.Duration -} - -// Do pushes req on ReqChan and returns the response available on RespChan. -func (fc *FakeHTTPClient) Do(req *http.Request) (*http.Response, error) { - fc.ReqChan.Send(req) - - timeout := fc.RecvTimeout - if timeout == 0 { - timeout = DefaultHTTPRequestTimeout - } - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - val, err := fc.RespChan.Receive(ctx) - if err != nil { - return nil, err - } - return val.(*http.Response), fc.Err -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/local_listener.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/local_listener.go deleted file mode 100644 index f831b95f41..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/local_listener.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package testutils - -import "net" - -// LocalTCPListener returns a net.Listener listening on local address and port. -func LocalTCPListener() (net.Listener, error) { - return net.Listen("tcp", "localhost:0") -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/marshal_any.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/marshal_any.go deleted file mode 100644 index 9ddef6de15..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/marshal_any.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package testutils - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "google.golang.org/protobuf/types/known/anypb" -) - -// MarshalAny is a convenience function to marshal protobuf messages into any -// protos. It will panic if the marshaling fails. -func MarshalAny(m proto.Message) *anypb.Any { - a, err := ptypes.MarshalAny(m) - if err != nil { - panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) - } - return a -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/parse_url.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/parse_url.go deleted file mode 100644 index ff276e4d0c..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/parse_url.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package testutils - -import ( - "fmt" - "net/url" -) - -// MustParseURL attempts to parse the provided target using url.Parse() -// and panics if parsing fails. -func MustParseURL(target string) *url.URL { - u, err := url.Parse(target) - if err != nil { - panic(fmt.Sprintf("Error parsing target(%s): %v", target, err)) - } - return u -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/pipe_listener.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/pipe_listener.go deleted file mode 100644 index 6bd3bc0bea..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/pipe_listener.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package testutils contains testing helpers. -package testutils - -import ( - "errors" - "net" - "time" -) - -var errClosed = errors.New("closed") - -type pipeAddr struct{} - -func (p pipeAddr) Network() string { return "pipe" } -func (p pipeAddr) String() string { return "pipe" } - -// PipeListener is a listener with an unbuffered pipe. Each write will complete only once the other side reads. It -// should only be created using NewPipeListener. -type PipeListener struct { - c chan chan<- net.Conn - done chan struct{} -} - -// NewPipeListener creates a new pipe listener. -func NewPipeListener() *PipeListener { - return &PipeListener{ - c: make(chan chan<- net.Conn), - done: make(chan struct{}), - } -} - -// Accept accepts a connection. -func (p *PipeListener) Accept() (net.Conn, error) { - var connChan chan<- net.Conn - select { - case <-p.done: - return nil, errClosed - case connChan = <-p.c: - select { - case <-p.done: - close(connChan) - return nil, errClosed - default: - } - } - c1, c2 := net.Pipe() - connChan <- c1 - close(connChan) - return c2, nil -} - -// Close closes the listener. -func (p *PipeListener) Close() error { - close(p.done) - return nil -} - -// Addr returns a pipe addr. -func (p *PipeListener) Addr() net.Addr { - return pipeAddr{} -} - -// Dialer dials a connection. -func (p *PipeListener) Dialer() func(string, time.Duration) (net.Conn, error) { - return func(string, time.Duration) (net.Conn, error) { - connChan := make(chan net.Conn) - select { - case p.c <- connChan: - case <-p.done: - return nil, errClosed - } - conn, ok := <-connChan - if !ok { - return nil, errClosed - } - return conn, nil - } -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/restartable_listener.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/restartable_listener.go deleted file mode 100644 index efe4019a08..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/restartable_listener.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package testutils - -import ( - "net" - "sync" -) - -type tempError struct{} - -func (*tempError) Error() string { - return "restartable listener temporary error" -} -func (*tempError) Temporary() bool { - return true -} - -// RestartableListener wraps a net.Listener and supports stopping and restarting -// the latter. -type RestartableListener struct { - lis net.Listener - - mu sync.Mutex - stopped bool - conns []net.Conn -} - -// NewRestartableListener returns a new RestartableListener wrapping l. -func NewRestartableListener(l net.Listener) *RestartableListener { - return &RestartableListener{lis: l} -} - -// Accept waits for and returns the next connection to the listener. -// -// If the listener is currently not accepting new connections, because `Stop` -// was called on it, the connection is immediately closed after accepting -// without any bytes being sent on it. -func (l *RestartableListener) Accept() (net.Conn, error) { - conn, err := l.lis.Accept() - if err != nil { - return nil, err - } - - l.mu.Lock() - defer l.mu.Unlock() - if l.stopped { - conn.Close() - return nil, &tempError{} - } - l.conns = append(l.conns, conn) - return conn, nil -} - -// Close closes the listener. -func (l *RestartableListener) Close() error { - return l.lis.Close() -} - -// Addr returns the listener's network address. -func (l *RestartableListener) Addr() net.Addr { - return l.lis.Addr() -} - -// Stop closes existing connections on the listener and prevents new connections -// from being accepted. -func (l *RestartableListener) Stop() { - l.mu.Lock() - l.stopped = true - for _, conn := range l.conns { - conn.Close() - } - l.conns = nil - l.mu.Unlock() -} - -// Restart gets a previously stopped listener to start accepting connections. -func (l *RestartableListener) Restart() { - l.mu.Lock() - l.stopped = false - l.mu.Unlock() -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/status_equal.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/status_equal.go deleted file mode 100644 index dfd647336d..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/status_equal.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package testutils - -import ( - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/status" -) - -// StatusErrEqual returns true iff both err1 and err2 wrap status.Status errors -// and their underlying status protos are equal. -func StatusErrEqual(err1, err2 error) bool { - status1, ok := status.FromError(err1) - if !ok { - return false - } - status2, ok := status.FromError(err2) - if !ok { - return false - } - return proto.Equal(status1.Proto(), status2.Proto()) -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/wrappers.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/wrappers.go deleted file mode 100644 index c9b596d885..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/wrappers.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package testutils - -import ( - "net" - "testing" -) - -// ConnWrapper wraps a net.Conn and pushes on a channel when closed. -type ConnWrapper struct { - net.Conn - CloseCh *Channel -} - -// Close closes the connection and sends a value on the close channel. -func (cw *ConnWrapper) Close() error { - err := cw.Conn.Close() - cw.CloseCh.Replace(nil) - return err -} - -// ListenerWrapper wraps a net.Listener and the returned net.Conn. -// -// It pushes on a channel whenever it accepts a new connection. -type ListenerWrapper struct { - net.Listener - NewConnCh *Channel -} - -// Accept wraps the Listener Accept and sends the accepted connection on a -// channel. -func (l *ListenerWrapper) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return nil, err - } - closeCh := NewChannel() - conn := &ConnWrapper{Conn: c, CloseCh: closeCh} - l.NewConnCh.Send(conn) - return conn, nil -} - -// NewListenerWrapper returns a ListenerWrapper. -func NewListenerWrapper(t *testing.T, lis net.Listener) *ListenerWrapper { - if lis == nil { - var err error - lis, err = LocalTCPListener() - if err != nil { - t.Fatal(err) - } - } - - return &ListenerWrapper{ - Listener: lis, - NewConnCh: NewChannel(), - } -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/wrr.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/wrr.go deleted file mode 100644 index 6c9486329d..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/testutils/wrr.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package testutils - -import ( - "fmt" - "sync" - - "google.golang.org/grpc/internal/wrr" -) - -// testWRR is a deterministic WRR implementation. -// -// The real implementation does random WRR. testWRR makes the balancer behavior -// deterministic and easier to test. -// -// With {a: 2, b: 3}, the Next() results will be {a, a, b, b, b}. -type testWRR struct { - itemsWithWeight []struct { - item interface{} - weight int64 - } - length int - - mu sync.Mutex - idx int // The index of the item that will be picked - count int64 // The number of times the current item has been picked. -} - -// NewTestWRR return a WRR for testing. It's deterministic instead of random. -func NewTestWRR() wrr.WRR { - return &testWRR{} -} - -func (twrr *testWRR) Add(item interface{}, weight int64) { - twrr.itemsWithWeight = append(twrr.itemsWithWeight, struct { - item interface{} - weight int64 - }{item: item, weight: weight}) - twrr.length++ -} - -func (twrr *testWRR) Next() interface{} { - twrr.mu.Lock() - iww := twrr.itemsWithWeight[twrr.idx] - twrr.count++ - if twrr.count >= iww.weight { - twrr.idx = (twrr.idx + 1) % twrr.length - twrr.count = 0 - } - twrr.mu.Unlock() - return iww.item -} - -func (twrr *testWRR) String() string { - return fmt.Sprint(twrr.itemsWithWeight) -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 9097385e1a..be5a9c81eb 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -22,6 +22,7 @@ import ( "bytes" "errors" "fmt" + "net" "runtime" "strconv" "sync" @@ -29,6 +30,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -486,12 +488,14 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn + logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -504,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, + conn: conn, + logger: logger, } return l } @@ -521,15 +527,27 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { - // Always flush the writer before exiting in case there are pending frames - // to be sent. - defer l.framer.writer.Flush() + defer func() { + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) + } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() + }() for { it, err := l.cbuf.get(true) if err != nil { @@ -581,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -593,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -604,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -618,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) } return nil } @@ -681,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) } } } @@ -720,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -732,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -743,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -763,6 +775,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. return errors.New("finished processing active streams while in draining mode") } return nil @@ -798,6 +811,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. return errors.New("received GOAWAY with no active streams") } } @@ -816,17 +830,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) closeConnectionHandler() error { - // Exit loopyWriter entirely by returning an error here. This will lead to - // the transport closing the connection, and, ultimately, transport - // closure. - return ErrConnClosing -} - func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -836,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *earlyAbortStream: @@ -844,21 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error { case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) case closeConnection: - return l.closeConnectionHandler() + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -877,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -911,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/handler_server.go index e6626bf96e..98f80e3fa0 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -83,6 +84,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentSubtype: contentSubtype, stats: stats, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) @@ -150,13 +152,14 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats []stats.Handler + logger *grpclog.PrefixLogger } func (ht *serverHandlerTransport) Close(err error) { ht.closeOnce.Do(func() { - if logger.V(logLevel) { - logger.Infof("Closing serverHandlerTransport: %v", err) + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) } close(ht.closedCh) }) @@ -450,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 79ee8aea0a..326bf08480 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" @@ -145,6 +146,7 @@ type http2Client struct { bufferPool *bufferPool connectionID uint64 + logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { @@ -244,7 +246,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. if logger.V(logLevel) { - logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + logger.Infof("Aborting due to connect deadline expiring: %v", err) } conn.Close() } @@ -346,6 +348,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts bufferPool: newBufferPool(), onClose: onClose, } + t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -444,15 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) - } - // Do not close the transport. Let reader goroutine handle it since - // there might be data in the buffers. - t.conn.Close() - t.controlBuf.finish() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.run() close(t.writerDone) }() return t, nil @@ -789,7 +785,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() - if t.activeStreams == nil { // Can be niled from Close(). + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } @@ -866,8 +862,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } } if transportDrainRequired { - if logger.V(logLevel) { - logger.Infof("transport: t.nextID > MaxStreamID. Draining") + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") } t.GracefulClose() } @@ -959,8 +955,8 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: closing: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. @@ -1016,8 +1012,8 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: GracefulClose called") + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") } t.onClose(GoAwayInvalid) t.state = draining @@ -1181,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) } statusCode = codes.Unknown } @@ -1264,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - if logger.V(logLevel) { - logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID if id > 0 && id%2 == 0 { @@ -1339,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_server.go index bc3da70672..79e86ba088 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,7 +35,9 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" @@ -129,6 +131,8 @@ type http2Server struct { // This lock may not be taken if mu is already held. maxStreamMu sync.Mutex maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } // NewServerTransport creates a http2 transport with conn and configuration @@ -267,6 +271,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + t.logger = prefixLoggerForServerTransport(t) // Add peer information to the http2server context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -331,14 +336,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - err := t.loopy.run() - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) - } - t.conn.Close() - t.controlBuf.finish() + t.loopy.run() close(t.writerDone) }() go t.keepalive() @@ -383,7 +383,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // if false, content-type was missing or invalid isGRPC = false contentType = "" - mdata = make(map[string][]string) + mdata = make(metadata.MD, len(frame.Fields)) httpMethod string // these are set if an error is encountered while parsing the headers protocolError bool @@ -404,6 +404,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( mdata[hf.Name] = append(mdata[hf.Name], hf.Value) s.contentSubtype = contentSubtype isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors case "grpc-encoding": s.recvCompress = hf.Value case ":method": @@ -419,8 +430,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // "Transports must consider requests containing the Connection header // as malformed." - A41 case "connection": - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") } protocolError = true default: @@ -430,7 +441,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) - logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } mdata[hf.Name] = append(mdata[hf.Name], v) @@ -444,8 +455,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // error, this takes precedence over a client not speaking gRPC. if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) - if logger.V(logLevel) { - logger.Errorf("transport: %v", errMsg) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: http.StatusBadRequest, @@ -539,9 +550,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() - errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) - if logger.V(logLevel) { - logger.Infof("transport: %v", errMsg) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: 405, @@ -557,8 +568,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( var err error if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() - if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) } stat, ok := status.FromError(err) if !ok { @@ -595,7 +606,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(mdata).Copy(), + Header: mdata.Copy(), } sh.HandleRPC(s.ctx, inHeader) } @@ -632,8 +643,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -676,8 +687,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) } } } @@ -936,8 +947,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if logger.V(logLevel) { - logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -1050,7 +1061,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -1155,18 +1166,18 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to maximum connection age.") + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") } t.controlBuf.put(closeConnection{}) case <-t.done: @@ -1217,8 +1228,8 @@ func (t *http2Server) Close(err error) { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: closing: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } t.state = closing streams := t.activeStreams @@ -1226,8 +1237,8 @@ func (t *http2Server) Close(err error) { t.mu.Unlock() t.controlBuf.finish() close(t.done) - if err := t.conn.Close(); err != nil && logger.V(logLevel) { - logger.Infof("transport: error closing conn during Close: %v", err) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } channelz.RemoveEntry(t.channelzID) // Cancel all active streams. @@ -1307,14 +1318,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() if t.drainEvent != nil { return } t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1344,9 +1355,6 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } if retErr != nil { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() return false, retErr } return true, nil @@ -1359,7 +1367,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http_util.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http_util.go index 2c601a864d..19cbb18f5a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -21,6 +21,7 @@ package transport import ( "bufio" "encoding/base64" + "errors" "fmt" "io" "math" @@ -37,7 +38,6 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -85,7 +85,6 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } - logger = grpclog.Component("transport") ) // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -330,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err = w.conn.Write(b) + return n, toIOError(err) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -352,10 +352,30 @@ func (w *bufWriter) Flush() error { return nil } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/logging.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 0000000000..42ed2b07af --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/transport.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/transport.go index 0ac77ea4f8..aa1c896595 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -257,6 +257,9 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -345,8 +348,24 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors } // Done returns a channel which is closed when it receives the final status @@ -707,7 +726,7 @@ type ServerTransport interface { RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go index fd4833d3ff..01433f4122 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go @@ -241,3 +241,34 @@ func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { func (hcm *HeaderContainsMatcher) String() string { return fmt.Sprintf("headerContains:%v%v", hcm.key, hcm.contains) } + +// HeaderStringMatcher matches on whether the header value matches against the +// StringMatcher specified. +type HeaderStringMatcher struct { + key string + stringMatcher StringMatcher + invert bool +} + +// NewHeaderStringMatcher returns a new HeaderStringMatcher. +func NewHeaderStringMatcher(key string, sm StringMatcher, invert bool) *HeaderStringMatcher { + return &HeaderStringMatcher{ + key: key, + stringMatcher: sm, + invert: invert, + } +} + +// Match returns whether the passed in HTTP Headers match according to the +// specified StringMatcher. +func (hsm *HeaderStringMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return hsm.stringMatcher.Match(v) != hsm.invert +} + +func (hsm *HeaderStringMatcher) String() string { + return fmt.Sprintf("headerString:%v:%v", hsm.key, hsm.stringMatcher) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go new file mode 100644 index 0000000000..713e39cf31 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go @@ -0,0 +1,101 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "encoding/json" + "fmt" + "strings" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3auditloggersstreampb "github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/authz/audit/stdout" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig) (audit.Logger, error) { + if loggerConfig.GetAuditLogger().GetTypedConfig() == nil { + return nil, fmt.Errorf("missing required field: TypedConfig") + } + customConfig, loggerName, err := getCustomConfig(loggerConfig.AuditLogger.TypedConfig) + if err != nil { + return nil, err + } + if loggerName == "" { + return nil, fmt.Errorf("field TypedConfig.TypeURL cannot be an empty string") + } + factory := audit.GetLoggerBuilder(loggerName) + if factory == nil { + if loggerConfig.IsOptional { + return nil, nil + } + return nil, fmt.Errorf("no builder registered for %v", loggerName) + } + auditLoggerConfig, err := factory.ParseLoggerConfig(customConfig) + if err != nil { + return nil, fmt.Errorf("custom config could not be parsed by registered factory. error: %v", err) + } + auditLogger := factory.Build(auditLoggerConfig) + return auditLogger, nil +} + +func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) { + any, err := config.UnmarshalNew() + if err != nil { + return nil, "", err + } + switch m := any.(type) { + case *v1xdsudpatypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3xdsxdstypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3auditloggersstreampb.StdoutAuditLog: + return convertStdoutConfig(m) + } + return nil, "", fmt.Errorf("custom config not implemented for type [%v]", config.GetTypeUrl()) +} + +func convertStdoutConfig(config *v3auditloggersstreampb.StdoutAuditLog) (json.RawMessage, string, error) { + json, err := protojson.Marshal(config) + return json, stdout.Name, err +} + +func convertCustomConfig(typeURL string, s *structpb.Struct) (json.RawMessage, string, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + urls := strings.Split(typeURL, "/") + if len(urls) == 0 { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: typeURL must have a url-like format with the typeName being the value after the last /", typeURL, s) + } + name := urls[len(urls)-1] + + rawJSON := []byte("{}") + var err error + if s != nil { + rawJSON, err = json.Marshal(s) + if err != nil { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: %v", typeURL, s, err) + } + } + return rawJSON, name, nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go index a212579c63..63237affe2 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go @@ -30,6 +30,7 @@ import ( v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" @@ -51,10 +52,10 @@ type ChainEngine struct { // NewChainEngine returns a chain of RBAC engines, used to make authorization // decisions on incoming RPCs. Returns a non-nil error for invalid policies. -func NewChainEngine(policies []*v3rbacpb.RBAC) (*ChainEngine, error) { +func NewChainEngine(policies []*v3rbacpb.RBAC, policyName string) (*ChainEngine, error) { engines := make([]*engine, 0, len(policies)) for _, policy := range policies { - engine, err := newEngine(policy) + engine, err := newEngine(policy, policyName) if err != nil { return nil, err } @@ -94,13 +95,16 @@ func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { switch { case engine.action == v3rbacpb.RBAC_ALLOW && !ok: cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) return status.Errorf(codes.PermissionDenied, "incoming RPC did not match an allow policy") case engine.action == v3rbacpb.RBAC_DENY && ok: cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) return status.Errorf(codes.PermissionDenied, "incoming RPC matched a deny policy %q", matchingPolicyName) } // Every policy in the engine list must be queried. Thus, iterate to the // next policy. + engine.doAuditLogging(rpcData, matchingPolicyName, true) } // If the incoming RPC gets through all of the engines successfully (i.e. // doesn't not match an allow or match a deny engine), the RPC is authorized @@ -110,14 +114,18 @@ func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { // engine is used for matching incoming RPCs to policies. type engine struct { - policies map[string]*policyMatcher + // TODO(gtcooke94) - differentiate between `policyName`, `policies`, and `rules` + policyName string + policies map[string]*policyMatcher // action must be ALLOW or DENY. - action v3rbacpb.RBAC_Action + action v3rbacpb.RBAC_Action + auditLoggers []audit.Logger + auditCondition v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition } -// newEngine creates an RBAC Engine based on the contents of policy. Returns a +// newEngine creates an RBAC Engine based on the contents of a policy. Returns a // non-nil error if the policy is invalid. -func newEngine(config *v3rbacpb.RBAC) (*engine, error) { +func newEngine(config *v3rbacpb.RBAC, policyName string) (*engine, error) { a := config.GetAction() if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY { return nil, fmt.Errorf("unsupported action %s", config.Action) @@ -131,18 +139,47 @@ func newEngine(config *v3rbacpb.RBAC) (*engine, error) { } policies[name] = matcher } + + auditLoggers, auditCondition, err := parseAuditOptions(config.GetAuditLoggingOptions()) + if err != nil { + return nil, err + } return &engine{ - policies: policies, - action: a, + policyName: policyName, + policies: policies, + action: a, + auditLoggers: auditLoggers, + auditCondition: auditCondition, }, nil } +func parseAuditOptions(opts *v3rbacpb.RBAC_AuditLoggingOptions) ([]audit.Logger, v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition, error) { + if opts == nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, nil + } + var auditLoggers []audit.Logger + for _, logger := range opts.LoggerConfigs { + auditLogger, err := buildLogger(logger) + if err != nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, err + } + if auditLogger == nil { + // This occurs when the audit logger is not registered but also + // marked optional. + continue + } + auditLoggers = append(auditLoggers, auditLogger) + } + return auditLoggers, opts.GetAuditCondition(), nil + +} + // findMatchingPolicy determines if an incoming RPC matches a policy. On a // successful match, it returns the name of the matching policy and a true bool // to specify that there was a matching policy found. It returns false in // the case of not finding a matching policy. -func (r *engine) findMatchingPolicy(rpcData *rpcData) (string, bool) { - for policy, matcher := range r.policies { +func (e *engine) findMatchingPolicy(rpcData *rpcData) (string, bool) { + for policy, matcher := range e.policies { if matcher.match(rpcData) { return policy, true } @@ -238,3 +275,43 @@ type rpcData struct { // handshake. certs []*x509.Certificate } + +func (e *engine) doAuditLogging(rpcData *rpcData, rule string, authorized bool) { + // In the RBAC world, we need to have a SPIFFE ID as the principal for this + // to be meaningful + principal := "" + if rpcData.peerInfo != nil && rpcData.peerInfo.AuthInfo != nil && rpcData.peerInfo.AuthInfo.AuthType() == "tls" { + // If AuthType = tls, then we can cast AuthInfo to TLSInfo. + tlsInfo := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo) + if tlsInfo.SPIFFEID != nil { + principal = tlsInfo.SPIFFEID.String() + } + } + + //TODO(gtcooke94) check if we need to log before creating the event + event := &audit.Event{ + FullMethodName: rpcData.fullMethod, + Principal: principal, + PolicyName: e.policyName, + MatchedRule: rule, + Authorized: authorized, + } + for _, logger := range e.auditLoggers { + switch e.auditCondition { + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY: + if !authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW: + if authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW: + logger.Log(event) + } + } +} + +// This is used when converting a custom config from raw JSON to a TypedStruct. +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/". +const typeURLPrefix = "grpc.authz.audit_logging/" diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/metadata/metadata.go b/terraform/providers/google/vendor/google.golang.org/grpc/metadata/metadata.go index fb4a88f59b..a2cdcaf12a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/metadata/metadata.go @@ -91,7 +91,11 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. @@ -171,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/call_metrics.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/call_metrics.go new file mode 100644 index 0000000000..558c7bce6a --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/call_metrics.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "context" + "sync" + + "google.golang.org/grpc" + grpcinternal "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" + "google.golang.org/protobuf/proto" +) + +// CallMetricsRecorder allows a service method handler to record per-RPC +// metrics. It contains all utilization-based metrics from +// ServerMetricsRecorder as well as additional request cost metrics. +type CallMetricsRecorder interface { + ServerMetricsRecorder + + // SetRequestCost sets the relevant server metric. + SetRequestCost(name string, val float64) + // DeleteRequestCost deletes the relevant server metric to prevent it + // from being sent. + DeleteRequestCost(name string) + + // SetNamedMetric sets the relevant server metric. + SetNamedMetric(name string, val float64) + // DeleteNamedMetric deletes the relevant server metric to prevent it + // from being sent. + DeleteNamedMetric(name string) +} + +type callMetricsRecorderCtxKey struct{} + +// CallMetricsRecorderFromContext returns the RPC-specific custom metrics +// recorder embedded in the provided RPC context. +// +// Returns nil if no custom metrics recorder is found in the provided context, +// which will be the case when custom metrics reporting is not enabled. +func CallMetricsRecorderFromContext(ctx context.Context) CallMetricsRecorder { + rw, ok := ctx.Value(callMetricsRecorderCtxKey{}).(*recorderWrapper) + if !ok { + return nil + } + return rw.recorder() +} + +// recorderWrapper is a wrapper around a CallMetricsRecorder to ensure that +// concurrent calls to CallMetricsRecorderFromContext() results in only one +// allocation of the underlying metrics recorder, while also allowing for lazy +// initialization of the recorder itself. +type recorderWrapper struct { + once sync.Once + r CallMetricsRecorder + smp ServerMetricsProvider +} + +func (rw *recorderWrapper) recorder() CallMetricsRecorder { + rw.once.Do(func() { + rw.r = newServerMetricsRecorder() + }) + return rw.r +} + +// setTrailerMetadata adds a trailer metadata entry with key being set to +// `internal.TrailerMetadataKey` and value being set to the binary-encoded +// orca.OrcaLoadReport protobuf message. +// +// This function is called from the unary and streaming interceptors defined +// above. Any errors encountered here are not propagated to the caller because +// they are ignored there. Hence we simply log any errors encountered here at +// warning level, and return nothing. +func (rw *recorderWrapper) setTrailerMetadata(ctx context.Context) { + var sm *ServerMetrics + if rw.smp != nil { + sm = rw.smp.ServerMetrics() + sm.merge(rw.r.ServerMetrics()) + } else { + sm = rw.r.ServerMetrics() + } + + b, err := proto.Marshal(sm.toLoadReportProto()) + if err != nil { + logger.Warningf("Failed to marshal load report: %v", err) + return + } + if err := grpc.SetTrailer(ctx, metadata.Pairs(internal.TrailerMetadataKey, string(b))); err != nil { + logger.Warningf("Failed to set trailer metadata: %v", err) + } +} + +var joinServerOptions = grpcinternal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) + +// CallMetricsServerOption returns a server option which enables the reporting +// of per-RPC custom backend metrics for unary and streaming RPCs. +// +// Server applications interested in injecting custom backend metrics should +// pass the server option returned from this function as the first argument to +// grpc.NewServer(). +// +// Subsequently, server RPC handlers can retrieve a reference to the RPC +// specific custom metrics recorder [CallMetricsRecorder] to be used, via a call +// to CallMetricsRecorderFromContext(), and inject custom metrics at any time +// during the RPC lifecycle. +// +// The injected custom metrics will be sent as part of trailer metadata, as a +// binary-encoded [ORCA LoadReport] protobuf message, with the metadata key +// being set be "endpoint-load-metrics-bin". +// +// If a non-nil ServerMetricsProvider is provided, the gRPC server will +// transmit the metrics it provides, overwritten by any per-RPC metrics given +// to the CallMetricsRecorder. A ServerMetricsProvider is typically obtained +// by calling NewServerMetricsRecorder. +// +// [ORCA LoadReport]: https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15 +func CallMetricsServerOption(smp ServerMetricsProvider) grpc.ServerOption { + return joinServerOptions(grpc.ChainUnaryInterceptor(unaryInt(smp)), grpc.ChainStreamInterceptor(streamInt(smp))) +} + +func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ctxWithRecorder := newContextWithRecorderWrapper(ctx, rw) + + resp, err := handler(ctxWithRecorder, req) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ctx) + } + return resp, err + } +} + +func streamInt(smp ServerMetricsProvider) func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ws := &wrappedStream{ + ServerStream: ss, + ctx: newContextWithRecorderWrapper(ss.Context(), rw), + } + + err := handler(srv, ws) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ss.Context()) + } + return err + } +} + +func newContextWithRecorderWrapper(ctx context.Context, r *recorderWrapper) context.Context { + return context.WithValue(ctx, callMetricsRecorderCtxKey{}, r) +} + +// wrappedStream wraps the grpc.ServerStream received by the streaming +// interceptor. Overrides only the Context() method to return a context which +// contains a reference to the CallMetricsRecorder corresponding to this +// stream. +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/internal/internal.go new file mode 100644 index 0000000000..35b899d9e8 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/internal/internal.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains orca-internal code, for testing purposes and to +// avoid polluting the godoc of the top-level orca package. +package internal + +import ( + "errors" + "fmt" + + ibackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// AllowAnyMinReportingInterval prevents clamping of the MinReportingInterval +// configured via ServiceOptions, to a minimum of 30s. +// +// For testing purposes only. +var AllowAnyMinReportingInterval interface{} // func(*ServiceOptions) + +// DefaultBackoffFunc is used by the producer to control its backoff behavior. +// +// For testing purposes only. +var DefaultBackoffFunc = ibackoff.DefaultExponential.Backoff + +// TrailerMetadataKey is the key in which the per-call backend metrics are +// transmitted. +const TrailerMetadataKey = "endpoint-load-metrics-bin" + +// ToLoadReport unmarshals a binary encoded [ORCA LoadReport] protobuf message +// from md and returns the corresponding struct. The load report is expected to +// be stored as the value for key "endpoint-load-metrics-bin". +// +// If no load report was found in the provided metadata, if multiple load +// reports are found, or if the load report found cannot be parsed, an error is +// returned. +// +// [ORCA LoadReport]: (https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15) +func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { + vs := md.Get(TrailerMetadataKey) + if len(vs) == 0 { + return nil, nil + } + if len(vs) != 1 { + return nil, errors.New("multiple orca load reports found in provided metadata") + } + ret := new(v3orcapb.OrcaLoadReport) + if err := proto.Unmarshal([]byte(vs[0]), ret); err != nil { + return nil, fmt.Errorf("failed to unmarshal load report found in metadata: %v", err) + } + return ret, nil +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/orca.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/orca.go new file mode 100644 index 0000000000..771db36af1 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/orca.go @@ -0,0 +1,60 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package orca implements Open Request Cost Aggregation, which is an open +// standard for request cost aggregation and reporting by backends and the +// corresponding aggregation of such reports by L7 load balancers (such as +// Envoy) on the data plane. In a proxyless world with gRPC enabled +// applications, aggregation of such reports will be done by the gRPC client. +// +// # Experimental +// +// Notice: All APIs is this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package orca + +import ( + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" +) + +var logger = grpclog.Component("orca-backend-metrics") + +// loadParser implements the Parser interface defined in `internal/balancerload` +// package. This interface is used by the client stream to parse load reports +// sent by the server in trailer metadata. The parsed loads are then sent to +// balancers via balancer.DoneInfo. +// +// The grpc package cannot directly call toLoadReport() as that would cause an +// import cycle. Hence this roundabout method is used. +type loadParser struct{} + +func (loadParser) Parse(md metadata.MD) interface{} { + lr, err := internal.ToLoadReport(md) + if err != nil { + logger.Infof("Parse failed: %v", err) + } + if lr == nil && logger.V(2) { + logger.Infof("Missing ORCA load report data") + } + return lr +} + +func init() { + balancerload.SetParser(loadParser{}) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/producer.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/producer.go new file mode 100644 index 0000000000..ce108aad65 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/producer.go @@ -0,0 +1,241 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orca + +import ( + "context" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" + "google.golang.org/protobuf/types/known/durationpb" +) + +type producerBuilder struct{} + +// Build constructs and returns a producer and its cleanup function +func (*producerBuilder) Build(cci interface{}) (balancer.Producer, func()) { + p := &producer{ + client: v3orcaservicegrpc.NewOpenRcaServiceClient(cci.(grpc.ClientConnInterface)), + intervals: make(map[time.Duration]int), + listeners: make(map[OOBListener]struct{}), + backoff: internal.DefaultBackoffFunc, + } + return p, func() { + <-p.stopped + } +} + +var producerBuilderSingleton = &producerBuilder{} + +// OOBListener is used to receive out-of-band load reports as they arrive. +type OOBListener interface { + // OnLoadReport is called when a load report is received. + OnLoadReport(*v3orcapb.OrcaLoadReport) +} + +// OOBListenerOptions contains options to control how an OOBListener is called. +type OOBListenerOptions struct { + // ReportInterval specifies how often to request the server to provide a + // load report. May be provided less frequently if the server requires a + // longer interval, or may be provided more frequently if another + // subscriber requests a shorter interval. + ReportInterval time.Duration +} + +// RegisterOOBListener registers an out-of-band load report listener on sc. +// Any OOBListener may only be registered once per subchannel at a time. The +// returned stop function must be called when no longer needed. Do not +// register a single OOBListener more than once per SubConn. +func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) { + pr, close := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*producer) + + p.registerListener(l, opts.ReportInterval) + + // TODO: When we can register for SubConn state updates, automatically call + // stop() on SHUTDOWN. + + // If stop is called multiple times, prevent it from having any effect on + // subsequent calls. + return grpcsync.OnceFunc(func() { + p.unregisterListener(l, opts.ReportInterval) + close() + }) +} + +type producer struct { + client v3orcaservicegrpc.OpenRcaServiceClient + + // backoff is called between stream attempts to determine how long to delay + // to avoid overloading a server experiencing problems. The attempt count + // is incremented when stream errors occur and is reset when the stream + // reports a result. + backoff func(int) time.Duration + + mu sync.Mutex + intervals map[time.Duration]int // map from interval time to count of listeners requesting that time + listeners map[OOBListener]struct{} // set of registered listeners + minInterval time.Duration + stop func() // stops the current run goroutine + stopped chan struct{} // closed when the run goroutine exits +} + +// registerListener adds the listener and its requested report interval to the +// producer. +func (p *producer) registerListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + p.listeners[l] = struct{}{} + p.intervals[interval]++ + if len(p.listeners) == 1 || interval < p.minInterval { + p.minInterval = interval + p.updateRunLocked() + } +} + +// registerListener removes the listener and its requested report interval to +// the producer. +func (p *producer) unregisterListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + delete(p.listeners, l) + p.intervals[interval]-- + if p.intervals[interval] == 0 { + delete(p.intervals, interval) + + if p.minInterval == interval { + p.recomputeMinInterval() + p.updateRunLocked() + } + } +} + +// recomputeMinInterval sets p.minInterval to the minimum key's value in +// p.intervals. +func (p *producer) recomputeMinInterval() { + first := true + for interval := range p.intervals { + if first || interval < p.minInterval { + p.minInterval = interval + first = false + } + } +} + +// updateRunLocked is called whenever the run goroutine needs to be started / +// stopped / restarted due to: 1. the initial listener being registered, 2. the +// final listener being unregistered, or 3. the minimum registered interval +// changing. +func (p *producer) updateRunLocked() { + if p.stop != nil { + p.stop() + p.stop = nil + } + if len(p.listeners) > 0 { + var ctx context.Context + ctx, p.stop = context.WithCancel(context.Background()) + p.stopped = make(chan struct{}) + go p.run(ctx, p.stopped, p.minInterval) + } +} + +// run manages the ORCA OOB stream on the subchannel. +func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) { + defer close(done) + + backoffAttempt := 0 + backoffTimer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-backoffTimer.C: + case <-ctx.Done(): + return + } + + resetBackoff, err := p.runStream(ctx, interval) + + if resetBackoff { + backoffTimer.Reset(0) + backoffAttempt = 0 + } else { + backoffTimer.Reset(p.backoff(backoffAttempt)) + backoffAttempt++ + } + + switch { + case err == nil: + // No error was encountered; restart the stream. + case ctx.Err() != nil: + // Producer was stopped; exit immediately and without logging an + // error. + return + case status.Code(err) == codes.Unimplemented: + // Unimplemented; do not retry. + logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") + return + case status.Code(err) == codes.Unavailable, status.Code(err) == codes.Canceled: + // TODO: these codes should ideally log an error, too, but for now + // we receive them when shutting down the ClientConn (Unavailable + // if the stream hasn't started yet, and Canceled if it happens + // mid-stream). Once we can determine the state or ensure the + // producer is stopped before the stream ends, we can log an error + // when it's not a natural shutdown. + default: + // Log all other errors. + logger.Error("Received unexpected stream error:", err) + } + } +} + +// runStream runs a single stream on the subchannel and returns the resulting +// error, if any, and whether or not the run loop should reset the backoff +// timer to zero or advance it. +func (p *producer) runStream(ctx context.Context, interval time.Duration) (resetBackoff bool, err error) { + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := p.client.StreamCoreMetrics(streamCtx, &v3orcaservicepb.OrcaLoadReportRequest{ + ReportInterval: durationpb.New(interval), + }) + if err != nil { + return false, err + } + + for { + report, err := stream.Recv() + if err != nil { + return resetBackoff, err + } + resetBackoff = true + p.mu.Lock() + for l := range p.listeners { + l.OnLoadReport(report) + } + p.mu.Unlock() + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/server_metrics.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/server_metrics.go new file mode 100644 index 0000000000..f2cdb9b0b2 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/server_metrics.go @@ -0,0 +1,351 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "sync" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// ServerMetrics is the data returned from a server to a client to describe the +// current state of the server and/or the cost of a request when used per-call. +type ServerMetrics struct { + CPUUtilization float64 // CPU utilization: [0, inf); unset=-1 + MemUtilization float64 // Memory utilization: [0, 1.0]; unset=-1 + AppUtilization float64 // Application utilization: [0, inf); unset=-1 + QPS float64 // queries per second: [0, inf); unset=-1 + EPS float64 // errors per second: [0, inf); unset=-1 + + // The following maps must never be nil. + + Utilization map[string]float64 // Custom fields: [0, 1.0] + RequestCost map[string]float64 // Custom fields: [0, inf); not sent OOB + NamedMetrics map[string]float64 // Custom fields: [0, inf); not sent OOB +} + +// toLoadReportProto dumps sm as an OrcaLoadReport proto. +func (sm *ServerMetrics) toLoadReportProto() *v3orcapb.OrcaLoadReport { + ret := &v3orcapb.OrcaLoadReport{ + Utilization: sm.Utilization, + RequestCost: sm.RequestCost, + NamedMetrics: sm.NamedMetrics, + } + if sm.CPUUtilization != -1 { + ret.CpuUtilization = sm.CPUUtilization + } + if sm.MemUtilization != -1 { + ret.MemUtilization = sm.MemUtilization + } + if sm.AppUtilization != -1 { + ret.ApplicationUtilization = sm.AppUtilization + } + if sm.QPS != -1 { + ret.RpsFractional = sm.QPS + } + if sm.EPS != -1 { + ret.Eps = sm.EPS + } + return ret +} + +// merge merges o into sm, overwriting any values present in both. +func (sm *ServerMetrics) merge(o *ServerMetrics) { + mergeMap(sm.Utilization, o.Utilization) + mergeMap(sm.RequestCost, o.RequestCost) + mergeMap(sm.NamedMetrics, o.NamedMetrics) + if o.CPUUtilization != -1 { + sm.CPUUtilization = o.CPUUtilization + } + if o.MemUtilization != -1 { + sm.MemUtilization = o.MemUtilization + } + if o.AppUtilization != -1 { + sm.AppUtilization = o.AppUtilization + } + if o.QPS != -1 { + sm.QPS = o.QPS + } + if o.EPS != -1 { + sm.EPS = o.EPS + } +} + +func mergeMap(a, b map[string]float64) { + for k, v := range b { + a[k] = v + } +} + +// ServerMetricsRecorder allows for recording and providing out of band server +// metrics. +type ServerMetricsRecorder interface { + ServerMetricsProvider + + // SetCPUUtilization sets the CPU utilization server metric. Must be + // greater than zero. + SetCPUUtilization(float64) + // DeleteCPUUtilization deletes the CPU utilization server metric to + // prevent it from being sent. + DeleteCPUUtilization() + + // SetMemoryUtilization sets the memory utilization server metric. Must be + // in the range [0, 1]. + SetMemoryUtilization(float64) + // DeleteMemoryUtilization deletes the memory utiliztion server metric to + // prevent it from being sent. + DeleteMemoryUtilization() + + // SetApplicationUtilization sets the application utilization server + // metric. Must be greater than zero. + SetApplicationUtilization(float64) + // DeleteApplicationUtilization deletes the application utilization server + // metric to prevent it from being sent. + DeleteApplicationUtilization() + + // SetQPS sets the Queries Per Second server metric. Must be greater than + // zero. + SetQPS(float64) + // DeleteQPS deletes the Queries Per Second server metric to prevent it + // from being sent. + DeleteQPS() + + // SetEPS sets the Errors Per Second server metric. Must be greater than + // zero. + SetEPS(float64) + // DeleteEPS deletes the Errors Per Second server metric to prevent it from + // being sent. + DeleteEPS() + + // SetNamedUtilization sets the named utilization server metric for the + // name provided. val must be in the range [0, 1]. + SetNamedUtilization(name string, val float64) + // DeleteNamedUtilization deletes the named utilization server metric for + // the name provided to prevent it from being sent. + DeleteNamedUtilization(name string) +} + +type serverMetricsRecorder struct { + mu sync.Mutex // protects state + state *ServerMetrics // the current metrics +} + +// NewServerMetricsRecorder returns an in-memory store for ServerMetrics and +// allows for safe setting and retrieving of ServerMetrics. Also implements +// ServerMetricsProvider for use with NewService. +func NewServerMetricsRecorder() ServerMetricsRecorder { + return newServerMetricsRecorder() +} + +func newServerMetricsRecorder() *serverMetricsRecorder { + return &serverMetricsRecorder{ + state: &ServerMetrics{ + CPUUtilization: -1, + MemUtilization: -1, + AppUtilization: -1, + QPS: -1, + EPS: -1, + Utilization: make(map[string]float64), + RequestCost: make(map[string]float64), + NamedMetrics: make(map[string]float64), + }, + } +} + +// ServerMetrics returns a copy of the current ServerMetrics. +func (s *serverMetricsRecorder) ServerMetrics() *ServerMetrics { + s.mu.Lock() + defer s.mu.Unlock() + return &ServerMetrics{ + CPUUtilization: s.state.CPUUtilization, + MemUtilization: s.state.MemUtilization, + AppUtilization: s.state.AppUtilization, + QPS: s.state.QPS, + EPS: s.state.EPS, + Utilization: copyMap(s.state.Utilization), + RequestCost: copyMap(s.state.RequestCost), + NamedMetrics: copyMap(s.state.NamedMetrics), + } +} + +func copyMap(m map[string]float64) map[string]float64 { + ret := make(map[string]float64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +// SetCPUUtilization records a measurement for the CPU utilization metric. +func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring CPU Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.CPUUtilization = val +} + +// DeleteCPUUtilization deletes the relevant server metric to prevent it from +// being sent. +func (s *serverMetricsRecorder) DeleteCPUUtilization() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.CPUUtilization = -1 +} + +// SetMemoryUtilization records a measurement for the memory utilization metric. +func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Memory Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.MemUtilization = val +} + +// DeleteMemoryUtilization deletes the relevant server metric to prevent it +// from being sent. +func (s *serverMetricsRecorder) DeleteMemoryUtilization() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.MemUtilization = -1 +} + +// SetApplicationUtilization records a measurement for a generic utilization +// metric. +func (s *serverMetricsRecorder) SetApplicationUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring Application Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.AppUtilization = val +} + +// DeleteApplicationUtilization deletes the relevant server metric to prevent +// it from being sent. +func (s *serverMetricsRecorder) DeleteApplicationUtilization() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.AppUtilization = -1 +} + +// SetQPS records a measurement for the QPS metric. +func (s *serverMetricsRecorder) SetQPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring QPS value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.QPS = val +} + +// DeleteQPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteQPS() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.QPS = -1 +} + +// SetEPS records a measurement for the EPS metric. +func (s *serverMetricsRecorder) SetEPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring EPS value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.EPS = val +} + +// DeleteEPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteEPS() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.EPS = -1 +} + +// SetNamedUtilization records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedUtilization(name string, val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Named Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.Utilization[name] = val +} + +// DeleteNamedUtilization deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedUtilization(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.Utilization, name) +} + +// SetRequestCost records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetRequestCost(name string, val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.RequestCost[name] = val +} + +// DeleteRequestCost deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteRequestCost(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.RequestCost, name) +} + +// SetNamedMetric records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedMetric(name string, val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.NamedMetrics[name] = val +} + +// DeleteNamedMetric deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedMetric(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.NamedMetrics, name) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/orca/service.go b/terraform/providers/google/vendor/google.golang.org/grpc/orca/service.go new file mode 100644 index 0000000000..7461a6b05a --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/orca/service.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + ointernal "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" +) + +func init() { + ointernal.AllowAnyMinReportingInterval = func(so *ServiceOptions) { + so.allowAnyMinReportingInterval = true + } + internal.ORCAAllowAnyMinReportingInterval = ointernal.AllowAnyMinReportingInterval +} + +// minReportingInterval is the absolute minimum value supported for +// out-of-band metrics reporting from the ORCA service implementation +// provided by the orca package. +const minReportingInterval = 30 * time.Second + +// Service provides an implementation of the OpenRcaService as defined in the +// [ORCA] service protos. Instances of this type must be created via calls to +// Register() or NewService(). +// +// Server applications can use the SetXxx() and DeleteXxx() methods to record +// measurements corresponding to backend metrics, which eventually get pushed to +// clients who have initiated the SteamCoreMetrics streaming RPC. +// +// [ORCA]: https://github.com/cncf/xds/blob/main/xds/service/orca/v3/orca.proto +type Service struct { + v3orcaservicegrpc.UnimplementedOpenRcaServiceServer + + // Minimum reporting interval, as configured by the user, or the default. + minReportingInterval time.Duration + + smProvider ServerMetricsProvider +} + +// ServiceOptions contains options to configure the ORCA service implementation. +type ServiceOptions struct { + // ServerMetricsProvider is the provider to be used by the service for + // reporting OOB server metrics to clients. Typically obtained via + // NewServerMetricsRecorder. This field is required. + ServerMetricsProvider ServerMetricsProvider + + // MinReportingInterval sets the lower bound for how often out-of-band + // metrics are reported on the streaming RPC initiated by the client. If + // unspecified, negative or less than the default value of 30s, the default + // is used. Clients may request a higher value as part of the + // StreamCoreMetrics streaming RPC. + MinReportingInterval time.Duration + + // Allow a minReportingInterval which is less than the default of 30s. + // Used for testing purposes only. + allowAnyMinReportingInterval bool +} + +// A ServerMetricsProvider provides ServerMetrics upon request. +type ServerMetricsProvider interface { + // ServerMetrics returns the current set of server metrics. It should + // return a read-only, immutable copy of the data that is active at the + // time of the call. + ServerMetrics() *ServerMetrics +} + +// NewService creates a new ORCA service implementation configured using the +// provided options. +func NewService(opts ServiceOptions) (*Service, error) { + // The default minimum supported reporting interval value can be overridden + // for testing purposes through the orca internal package. + if opts.ServerMetricsProvider == nil { + return nil, fmt.Errorf("ServerMetricsProvider not specified") + } + if !opts.allowAnyMinReportingInterval { + if opts.MinReportingInterval < 0 || opts.MinReportingInterval < minReportingInterval { + opts.MinReportingInterval = minReportingInterval + } + } + service := &Service{ + minReportingInterval: opts.MinReportingInterval, + smProvider: opts.ServerMetricsProvider, + } + return service, nil +} + +// Register creates a new ORCA service implementation configured using the +// provided options and registers the same on the provided grpc Server. +func Register(s *grpc.Server, opts ServiceOptions) error { + // TODO(https://github.com/cncf/xds/issues/41): replace *grpc.Server with + // grpc.ServiceRegistrar when possible. + service, err := NewService(opts) + if err != nil { + return err + } + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, service) + return nil +} + +// determineReportingInterval determines the reporting interval for out-of-band +// metrics. If the reporting interval is not specified in the request, or is +// negative or is less than the configured minimum (via +// ServiceOptions.MinReportingInterval), the latter is used. Else the value from +// the incoming request is used. +func (s *Service) determineReportingInterval(req *v3orcaservicepb.OrcaLoadReportRequest) time.Duration { + if req.GetReportInterval() == nil { + return s.minReportingInterval + } + dur := req.GetReportInterval().AsDuration() + if dur < s.minReportingInterval { + logger.Warningf("Received reporting interval %q is less than configured minimum: %v. Using minimum", dur, s.minReportingInterval) + return s.minReportingInterval + } + return dur +} + +func (s *Service) sendMetricsResponse(stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + return stream.Send(s.smProvider.ServerMetrics().toLoadReportProto()) +} + +// StreamCoreMetrics streams custom backend metrics injected by the server +// application. +func (s *Service) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + ticker := time.NewTicker(s.determineReportingInterval(req)) + defer ticker.Stop() + + for { + if err := s.sendMetricsResponse(stream); err != nil { + return err + } + // Send a response containing the currently recorded metrics + select { + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + case <-ticker.C: + } + } +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/picker_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/picker_wrapper.go index c525dc070f..02f9759512 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/picker_wrapper.go @@ -36,6 +36,7 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool + idle bool blockingCh chan struct{} picker balancer.Picker } @@ -47,7 +48,11 @@ func newPickerWrapper() *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -63,10 +68,8 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { // - wraps the done function in the passed in result to increment the calls // failed or calls succeeded channelz counter before invoking the actual // done function. -func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() done := result.Done result.Done = func(b balancer.DoneInfo) { @@ -152,14 +155,14 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - doneChannelzWrapper(acw, &pickResult) + doneChannelzWrapper(acbw, &pickResult) return t, pickResult, nil } return t, pickResult, nil @@ -187,6 +190,25 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/pickfirst.go b/terraform/providers/google/vendor/google.golang.org/grpc/pickfirst.go index fc91b4d266..abe266b021 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/pickfirst.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/pickfirst.go @@ -19,11 +19,15 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/serviceconfig" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -43,10 +47,28 @@ func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &pfConfig{} + if err := json.Unmarshal(js, cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { state connectivity.State cc balancer.ClientConn subConn balancer.SubConn + cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { @@ -69,7 +91,8 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if len(state.ResolverState.Addresses) == 0 { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -82,12 +105,23 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return balancer.ErrBadResolverState } + if state.BalancerConfig != nil { + cfg, ok := state.BalancerConfig.(*pfConfig) + if !ok { + return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + b.cfg = cfg + } + + if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) if err != nil { if logger.V(2) { logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) @@ -119,7 +153,6 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b } return } - b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -132,11 +165,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -147,6 +190,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index ee4b04caf0..d54c07676d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -39,11 +39,14 @@ const ( ) // The message sent by the client when calling ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their @@ -91,6 +94,7 @@ func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetHost() string { if x != nil { return x.Host @@ -105,6 +109,7 @@ func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_ return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileByFilename() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { return x.FileByFilename @@ -112,6 +117,7 @@ func (x *ServerReflectionRequest) GetFileByFilename() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingSymbol() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { return x.FileContainingSymbol @@ -119,6 +125,7 @@ func (x *ServerReflectionRequest) GetFileContainingSymbol() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { return x.FileContainingExtension @@ -126,6 +133,7 @@ func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { return x.AllExtensionNumbersOfType @@ -133,6 +141,7 @@ func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetListServices() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { return x.ListServices @@ -146,6 +155,8 @@ type isServerReflectionRequest_MessageRequest interface { type ServerReflectionRequest_FileByFilename struct { // Find a proto file by the file name. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` } @@ -153,12 +164,16 @@ type ServerReflectionRequest_FileContainingSymbol struct { // Find the proto file that declares the given fully-qualified symbol name. // This field should be a fully-qualified symbol name // (e.g. .[.] or .). + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` } type ServerReflectionRequest_FileContainingExtension struct { // Find the proto file which defines an extension extending the given // message type with the given field number. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` } @@ -171,12 +186,16 @@ type ServerReflectionRequest_AllExtensionNumbersOfType struct { // StatusCode::UNIMPLEMENTED if it's not implemented. // This field should be a fully-qualified type name. The format is // . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` } type ServerReflectionRequest_ListServices struct { // List the full names of registered services. The content will not be // checked. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` } @@ -193,14 +212,19 @@ func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRe // The type name and extension number sent by the client when requesting // file_containing_extension. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Fully-qualified type name. The format should be . - ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` - ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` } func (x *ExtensionRequest) Reset() { @@ -235,6 +259,7 @@ func (*ExtensionRequest) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionRequest) GetContainingType() string { if x != nil { return x.ContainingType @@ -242,6 +267,7 @@ func (x *ExtensionRequest) GetContainingType() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionRequest) GetExtensionNumber() int32 { if x != nil { return x.ExtensionNumber @@ -250,12 +276,16 @@ func (x *ExtensionRequest) GetExtensionNumber() int32 { } // The message sent by the server to answer ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The server set one of the following fields according to the message_request // in the request. @@ -301,6 +331,7 @@ func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetValidHost() string { if x != nil { return x.ValidHost @@ -308,6 +339,7 @@ func (x *ServerReflectionResponse) GetValidHost() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { if x != nil { return x.OriginalRequest @@ -322,6 +354,7 @@ func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionRespon return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { return x.FileDescriptorResponse @@ -329,6 +362,7 @@ func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorRe return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { return x.AllExtensionNumbersResponse @@ -336,6 +370,7 @@ func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNu return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { return x.ListServicesResponse @@ -343,6 +378,7 @@ func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceRespons return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { return x.ErrorResponse @@ -361,21 +397,29 @@ type ServerReflectionResponse_FileDescriptorResponse struct { // FileDescriptorResponse message to encapsulate the repeated fields. // The reflection service is allowed to avoid sending FileDescriptorProtos // that were previously sent in response to earlier requests in the stream. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` } type ServerReflectionResponse_AllExtensionNumbersResponse struct { // This message is used to answer all_extension_numbers_of_type requst. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` } type ServerReflectionResponse_ListServicesResponse struct { // This message is used to answer list_services request. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` } type ServerReflectionResponse_ErrorResponse struct { // This message is used when an error occurs. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` } @@ -392,6 +436,8 @@ func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_Messag // Serialized FileDescriptorProto messages sent by the server answering // a file_by_filename, file_containing_symbol, or file_containing_extension // request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type FileDescriptorResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -400,6 +446,8 @@ type FileDescriptorResponse struct { // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` } @@ -435,6 +483,7 @@ func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { if x != nil { return x.FileDescriptorProto @@ -444,6 +493,8 @@ func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionNumberResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -451,7 +502,10 @@ type ExtensionNumberResponse struct { // Full name of the base type, including the package name. The format // is . - BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` } @@ -487,6 +541,7 @@ func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionNumberResponse) GetBaseTypeName() string { if x != nil { return x.BaseTypeName @@ -494,6 +549,7 @@ func (x *ExtensionNumberResponse) GetBaseTypeName() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { if x != nil { return x.ExtensionNumber @@ -502,6 +558,8 @@ func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { } // A list of ServiceResponse sent by the server answering list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ListServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -509,6 +567,8 @@ type ListServiceResponse struct { // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` } @@ -544,6 +604,7 @@ func (*ListServiceResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ListServiceResponse) GetService() []*ServiceResponse { if x != nil { return x.Service @@ -553,6 +614,8 @@ func (x *ListServiceResponse) GetService() []*ServiceResponse { // The information of a single service used by ListServiceResponse to answer // list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -560,6 +623,8 @@ type ServiceResponse struct { // Full name of a registered service, including its package name. The format // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -595,6 +660,7 @@ func (*ServiceResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServiceResponse) GetName() string { if x != nil { return x.Name @@ -603,13 +669,18 @@ func (x *ServiceResponse) GetName() string { } // The error code and error message sent by the server when an error occurs. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ErrorResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // This field uses the error codes defined in grpc::StatusCode. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } @@ -645,6 +716,7 @@ func (*ErrorResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ErrorResponse) GetErrorCode() int32 { if x != nil { return x.ErrorCode @@ -652,6 +724,7 @@ func (x *ErrorResponse) GetErrorCode() int32 { return 0 } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ErrorResponse) GetErrorMessage() string { if x != nil { return x.ErrorMessage diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index ed54ab1378..367a029be6 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -36,6 +36,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" +) + // ServerReflectionClient is the client API for ServerReflection service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -54,7 +58,7 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/resolver/resolver.go b/terraform/providers/google/vendor/google.golang.org/grpc/resolver/resolver.go index 654e9ce69f..353c10b69a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,13 +22,13 @@ package resolver import ( "context" + "fmt" "net" "net/url" "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -41,8 +41,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -123,7 +124,7 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attribes do not affect SubConn + // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. BalancerAttributes *attributes.Attributes @@ -150,7 +151,17 @@ func (a Address) Equal(o Address) bool { // String returns JSON formatted string representation of the address. func (a Address) String() string { - return pretty.ToJSON(a) + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() } // BuildOptions includes additional information for the builder to create @@ -203,6 +214,15 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling @@ -280,8 +300,10 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 05a9d4e0ba..b408b3688f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,11 +19,11 @@ package grpc import ( + "context" "strings" "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -31,129 +31,192 @@ import ( "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} - incomingMu sync.Mutex // Synchronizes all the incoming calls. +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() +} + +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() } +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { + errCh := make(chan error, 1) + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. return nil } - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -172,5 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/rpc_util.go b/terraform/providers/google/vendor/google.golang.org/grpc/rpc_util.go index cb7020ebec..2030736a30 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/rpc_util.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/rpc_util.go @@ -159,6 +159,7 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -295,6 +296,41 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default // 4MB. @@ -658,12 +694,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, } } @@ -684,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. + compressedLength int // The compressed length got from wire. uncompressedBytes []byte } @@ -694,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, err } if payInfo != nil { - payInfo.wireLength = len(d) + payInfo.compressedLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/server.go b/terraform/providers/google/vendor/google.golang.org/grpc/server.go index d5a6e78be4..81969e7c15 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/server.go @@ -43,8 +43,8 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -74,10 +74,10 @@ func init() { srv.drainServerTransports(addr) } internal.AddGlobalServerOptions = func(opt ...ServerOption) { - extraServerOptions = append(extraServerOptions, opt...) + globalServerOptions = append(globalServerOptions, opt...) } internal.ClearGlobalServerOptions = func() { - extraServerOptions = nil + globalServerOptions = nil } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption @@ -145,7 +145,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannels []chan *serverWorkerData + serverWorkerChannel chan *serverWorkerData } type serverOptions struct { @@ -183,7 +183,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } -var extraServerOptions []ServerOption +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -560,47 +560,45 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows different requests to be +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker(ch chan *serverWorkerData) { - // To make sure all server workers don't reset at the same time, choose a - // random number of iterations before resetting. - threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) - for completed := 0; completed < threshold; completed++ { - data, ok := <-ch +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + data, ok := <-s.serverWorkerChannel if !ok { return } - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) - data.wg.Done() + s.handleSingleStream(data) } - go s.serverWorker(ch) + go s.serverWorker() } -// initServerWorkers creates worker goroutines and channels to process incoming +func (s *Server) handleSingleStream(data *serverWorkerData) { + defer data.wg.Done() + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +} + +// initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + s.serverWorkerChannel = make(chan *serverWorkerData) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - s.serverWorkerChannels[i] = make(chan *serverWorkerData) - go s.serverWorker(s.serverWorkerChannels[i]) + go s.serverWorker() } } func (s *Server) stopServerWorkers() { - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - close(s.serverWorkerChannels[i]) - } + close(s.serverWorkerChannel) } // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions - for _, o := range extraServerOptions { + for _, o := range globalServerOptions { o.apply(&opts) } for _, o := range opt { @@ -897,7 +895,7 @@ func (s *Server) drainServerTransports(addr string) { s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain() + st.Drain("") } s.mu.Unlock() } @@ -945,26 +943,21 @@ func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) if s.opts.numServerWorkers > 0 { data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + case s.serverWorkerChannel <- data: + return default: // If all stream workers are busy, fallback to the default code path. - go func() { - s.handleStream(st, stream, s.traceInfo(st, stream)) - wg.Done() - }() } - } else { - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() } + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -1053,7 +1046,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1252,7 +1245,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. logEntry.PeerAddr = peer.Addr } for _, binlog := range binlogs { - binlog.Log(logEntry) + binlog.Log(ctx, logEntry) } } @@ -1263,6 +1256,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1283,12 +1277,18 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1312,11 +1312,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength + headerLen, - Data: d, - Length: len(d), + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, }) } if len(binlogs) != 0 { @@ -1324,7 +1325,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(cm) + binlog.Log(stream.Context(), cm) } } if trInfo != nil { @@ -1357,7 +1358,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(sh) + binlog.Log(stream.Context(), sh) } } st := &binarylog.ServerTrailer{ @@ -1365,7 +1366,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } return appErr @@ -1375,6 +1376,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). @@ -1402,8 +1408,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(sh) - binlog.Log(st) + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), st) } } return err @@ -1417,8 +1423,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(sh) - binlog.Log(sm) + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), sm) } } if channelz.IsOn() { @@ -1430,17 +1436,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) if len(binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, } for _, binlog := range binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } - return err + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1574,7 +1579,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(logEntry) + binlog.Log(stream.Context(), logEntry) } } @@ -1597,12 +1602,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1640,16 +1651,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1658,17 +1669,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } - return err + return t.WriteStatus(ss.s, statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1846,7 +1856,7 @@ func (s *Server) GracefulStop() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true @@ -1935,6 +1945,60 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. // @@ -1969,3 +2033,22 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/service_config.go b/terraform/providers/google/vendor/google.golang.org/grpc/service_config.go index f22acace42..0df11fc098 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/service_config.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -106,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -129,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -252,15 +206,10 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) @@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/stats/stats.go b/terraform/providers/google/vendor/google.golang.org/grpc/stats/stats.go index 0285dcc6a2..7a552a9b78 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/stats/stats.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/stats/stats.go @@ -67,10 +67,18 @@ type InPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -129,9 +137,15 @@ type OutPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/status/status.go b/terraform/providers/google/vendor/google.golang.org/grpc/status/status.go index 623be39f26..53910fb7c9 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/status/status.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/status/status.go @@ -77,7 +77,9 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// *Status`, or if err wraps a type satisfying this, the appropriate Status is +// returned. For wrapped errors, the message returned contains the entire +// err.Error() text and not just the wrapped status. // // - If err is nil, a Status is returned with codes.OK and no message. // @@ -88,10 +90,15 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + return gs.GRPCStatus(), true + } + var gs grpcstatus + if errors.As(err, &gs) { + p := gs.GRPCStatus().Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } @@ -103,19 +110,16 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } // FromContextError converts a context error or wrapped context error into a diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/stream.go b/terraform/providers/google/vendor/google.golang.org/grpc/stream.go index 93231af2ac..10092685b2 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/stream.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/stream.go @@ -123,6 +123,9 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On @@ -152,6 +155,11 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return nil, err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -168,10 +176,19 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } } if channelz.IsOn() { cc.incrCallsStarted() @@ -352,7 +369,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } } for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } @@ -460,7 +477,7 @@ func (a *csAttempt) newStream() error { // It is safe to overwrite the csAttempt's context here, since all state // maintained in it are local to the attempt. When the attempt has to be // retried, a new instance of csAttempt will be created. - if a.pickResult.Metatada != nil { + if a.pickResult.Metadata != nil { // We currently do not have a function it the metadata package which // merges given metadata with existing metadata in a context. Existing // function `AppendToOutgoingContext()` takes a variadic argument of key @@ -470,7 +487,7 @@ func (a *csAttempt) newStream() error { // in a form passable to AppendToOutgoingContext(), or create a version // of AppendToOutgoingContext() that accepts a metadata.MD. md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metatada) + md = metadata.Join(md, a.pickResult.Metadata) a.ctx = metadata.NewOutgoingContext(a.ctx, md) } @@ -800,7 +817,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { } cs.serverHeaderBinlogged = true for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } return m, nil @@ -881,7 +898,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Message: data, } for _, binlog := range cs.binlogs { - binlog.Log(cm) + binlog.Log(cs.ctx, cm) } } return err @@ -905,7 +922,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error { Message: recvInfo.uncompressedBytes, } for _, binlog := range cs.binlogs { - binlog.Log(sm) + binlog.Log(cs.ctx, sm) } } if err != nil || !cs.desc.ServerStreams { @@ -926,7 +943,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error { logEntry.PeerAddr = peer.Addr } for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } } @@ -953,7 +970,7 @@ func (cs *clientStream) CloseSend() error { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(chc) + binlog.Log(cs.ctx, chc) } } // We never returned an error here for reasons. @@ -971,6 +988,9 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -992,7 +1012,7 @@ func (cs *clientStream) finish(err error) { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(c) + binlog.Log(cs.ctx, c) } } if err == nil { @@ -1081,9 +1101,10 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1252,14 +1273,19 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin as.p = &parser{r: s} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1511,6 +1537,8 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1558,7 +1586,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(sh) + binlog.Log(ss.ctx, sh) } } return err @@ -1603,6 +1631,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1624,14 +1659,14 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(sh) + binlog.Log(ss.ctx, sh) } } sm := &binarylog.ServerMessage{ Message: data, } for _, binlog := range ss.binlogs { - binlog.Log(sm) + binlog.Log(ss.ctx, sm) } } if len(ss.statsHandler) != 0 { @@ -1679,7 +1714,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} for _, binlog := range ss.binlogs { - binlog.Log(chc) + binlog.Log(ss.ctx, chc) } } return err @@ -1695,9 +1730,10 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, }) } } @@ -1706,7 +1742,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { Message: payInfo.uncompressedBytes, } for _, binlog := range ss.binlogs { - binlog.Log(cm) + binlog.Log(ss.ctx, cm) } } return nil diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/version.go b/terraform/providers/google/vendor/google.golang.org/grpc/version.go index fe552c315b..0f1f8b9b33 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/version.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.53.0" +const Version = "1.56.1" diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/vet.sh b/terraform/providers/google/vendor/google.golang.org/grpc/vet.sh index 3728aed04f..a8e4732b3d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/vet.sh +++ b/terraform/providers/google/vendor/google.golang.org/grpc/vet.sh @@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -68,8 +60,7 @@ fi # - Check that generated proto files are up to date. if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ + make proto && git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/csds/csds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/csds/csds.go index 551757b800..8d03124811 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/csds/csds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/csds/csds.go @@ -29,7 +29,6 @@ import ( "io" "sync" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -39,8 +38,6 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) @@ -126,7 +123,7 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp ret := &v3statuspb.ClientStatusResponse{ Config: []*v3statuspb.ClientConfig{ { - Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().XDSServer.NodeProto, s.logger), + Node: s.xdsClient.BootstrapConfig().NodeProto, GenericXdsConfigs: dumpToGenericXdsConfig(dump), }, }, @@ -141,37 +138,6 @@ func (s *ClientStatusDiscoveryServer) Close() { } } -// nodeProtoToV3 converts the given proto into a v3.Node. n is from bootstrap -// config, it can be either v2.Node or v3.Node. -// -// If n is already a v3.Node, return it. -// If n is v2.Node, marshal and unmarshal it to v3. -// Otherwise, return nil. -// -// The default case (not v2 or v3) is nil, instead of error, because the -// resources in the response are more important than the node. The worst case is -// that the user will receive no Node info, but will still get resources. -func nodeProtoToV3(n proto.Message, logger *internalgrpclog.PrefixLogger) *v3corepb.Node { - var node *v3corepb.Node - switch nn := n.(type) { - case *v3corepb.Node: - node = nn - case *v2corepb.Node: - v2, err := proto.Marshal(nn) - if err != nil { - logger.Warningf("Failed to marshal node (%v): %v", n, err) - break - } - node = new(v3corepb.Node) - if err := proto.Unmarshal(v2, node); err != nil { - logger.Warningf("Failed to unmarshal node (%v): %v", v2, err) - } - default: - logger.Warningf("node from bootstrap is %#v, only v2.Node and v3.Node are supported", nn) - } - return node -} - func dumpToGenericXdsConfig(dump map[string]map[string]xdsresource.UpdateWithMD) []*v3statuspb.ClientConfig_GenericXdsConfig { var ret []*v3statuspb.ClientConfig_GenericXdsConfig for typeURL, updates := range dump { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index 5bc17b03e5..20891c7a4c 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -31,20 +31,19 @@ import ( "time" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/googlecloud" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" - _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/structpb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + + _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. ) const ( @@ -117,11 +116,14 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts if balancerName == "" { balancerName = tdURL } - serverConfig := &bootstrap.ServerConfig{ - ServerURI: balancerName, - Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), - TransportAPI: version.TransportV3, - NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), + serverConfig, err := bootstrap.ServerConfigFromJSON([]byte(fmt.Sprintf(` + { + "server_uri": "%s", + "channel_creds": [{"type": "google_default"}], + "server_features": ["xds_v3", "ignore_resource_deletion"] + }`, balancerName))) + if err != nil { + return nil, fmt.Errorf("failed to build bootstrap configuration: %v", err) } config := &bootstrap.Config{ XDSServer: serverConfig, @@ -131,6 +133,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts XDSServer: serverConfig, }, }, + NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), } // Create singleton xds client with this config. The xds client will be diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 4a0beab131..bcdeaf681a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -27,18 +27,16 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal/balancer/nop" "google.golang.org/grpc/internal/buffer" xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" - "google.golang.org/grpc/xds/internal/balancer/outlierdetection" - "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -48,7 +46,7 @@ const ( ) var ( - errBalancerClosed = errors.New("cdsBalancer is closed") + errBalancerClosed = errors.New("cds_experimental LB policy is closed") // newChildBalancer is a helper function to build a new cluster_resolver // balancer and will be overridden in unittests. @@ -76,11 +74,25 @@ type bb struct{} // Build creates a new CDS balancer with the ClientConn. func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + // Shouldn't happen, registered through imported Cluster Resolver, + // defensive programming. + logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)) + } + crParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Cluster Resolver builder has this method. + logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)) + } b := &cdsBalancer{ bOpts: opts, updateCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), + crParser: crParser, xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), } b.logger = prefixLogger((b)) @@ -161,6 +173,7 @@ type cdsBalancer struct { logger *grpclog.PrefixLogger closed *grpcsync.Event done *grpcsync.Event + crParser balancer.ConfigParser // The certificate providers are cached here to that they can be closed when // a new provider is to be created. @@ -272,52 +285,6 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } -func outlierDetectionToConfig(od *xdsresource.OutlierDetection) outlierdetection.LBConfig { // Already validated - no need to return error - if od == nil { - // "If the outlier_detection field is not set in the Cluster message, a - // "no-op" outlier_detection config will be generated, with interval set - // to the maximum possible value and all other fields unset." - A50 - return outlierdetection.LBConfig{ - Interval: 1<<63 - 1, - } - } - - // "if the enforcing_success_rate field is set to 0, the config - // success_rate_ejection field will be null and all success_rate_* fields - // will be ignored." - A50 - var sre *outlierdetection.SuccessRateEjection - if od.EnforcingSuccessRate != 0 { - sre = &outlierdetection.SuccessRateEjection{ - StdevFactor: od.SuccessRateStdevFactor, - EnforcementPercentage: od.EnforcingSuccessRate, - MinimumHosts: od.SuccessRateMinimumHosts, - RequestVolume: od.SuccessRateRequestVolume, - } - } - - // "If the enforcing_failure_percent field is set to 0 or null, the config - // failure_percent_ejection field will be null and all failure_percent_* - // fields will be ignored." - A50 - var fpe *outlierdetection.FailurePercentageEjection - if od.EnforcingFailurePercentage != 0 { - fpe = &outlierdetection.FailurePercentageEjection{ - Threshold: od.FailurePercentageThreshold, - EnforcementPercentage: od.EnforcingFailurePercentage, - MinimumHosts: od.FailurePercentageMinimumHosts, - RequestVolume: od.FailurePercentageRequestVolume, - } - } - - return outlierdetection.LBConfig{ - Interval: od.Interval, - BaseEjectionTime: od.BaseEjectionTime, - MaxEjectionTime: od.MaxEjectionTime, - MaxEjectionPercent: od.MaxEjectionPercent, - SuccessRateEjection: sre, - FailurePercentageEjection: fpe, - } -} - // handleWatchUpdate handles a watch update from the xDS Client. Good updates // lead to clientConn updates being invoked on the underlying cluster_resolver balancer. func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { @@ -327,7 +294,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { return } - b.logger.Infof("Watch update from xds-client %p, content: %+v, security config: %v", b.xdsClient, pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) + b.logger.Infof("Received Cluster resource contains content: %s, security config: %s", pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) // Process the security config from the received update before building the // child policy or forwarding the update to it. We do this because the child @@ -338,7 +305,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { // If the security config is invalid, for example, if the provider // instance is not found in the bootstrap config, we need to put the // channel in transient failure. - b.logger.Warningf("Invalid security config update from xds-client %p: %v", b.xdsClient, err) + b.logger.Warningf("Received Cluster resource contains invalid security config: %v", err) b.handleErrorFromUpdate(err, false) return } @@ -388,35 +355,49 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { DNSHostname: cu.DNSHostName, } default: - b.logger.Infof("unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) + b.logger.Infof("Unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) } if envconfig.XDSOutlierDetection { - dms[i].OutlierDetection = outlierDetectionToConfig(cu.OutlierDetection) + odJSON := cu.OutlierDetection + // "In the cds LB policy, if the outlier_detection field is not set in + // the Cluster resource, a "no-op" outlier_detection config will be + // generated in the corresponding DiscoveryMechanism config, with all + // fields unset." - A50 + if odJSON == nil { + // This will pick up top level defaults in Cluster Resolver + // ParseConfig, but sre and fpe will be nil still so still a + // "no-op" config. + odJSON = json.RawMessage(`{}`) + } + dms[i].OutlierDetection = odJSON } } + + // Prepare Cluster Resolver config, marshal into JSON, and then Parse it to + // get configuration to send downward to Cluster Resolver. lbCfg := &clusterresolver.LBConfig{ DiscoveryMechanisms: dms, + XDSLBPolicy: update.lbPolicy, + } + crLBCfgJSON, err := json.Marshal(lbCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", lbCfg) + return } - // lbPolicy is set only when the policy is ringhash. The default (when it's - // not set) is roundrobin. And similarly, we only need to set XDSLBPolicy - // for ringhash (it also defaults to roundrobin). - if lbp := update.lbPolicy; lbp != nil { - lbCfg.XDSLBPolicy = &internalserviceconfig.BalancerConfig{ - Name: ringhash.Name, - Config: &ringhash.LBConfig{ - MinRingSize: lbp.MinimumRingSize, - MaxRingSize: lbp.MaximumRingSize, - }, - } + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.crParser.ParseConfig(crLBCfgJSON); err != nil { + b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", crLBCfgJSON, err) + return } ccState := balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), - BalancerConfig: lbCfg, + BalancerConfig: sc, } if err := b.childLB.UpdateClientConnState(ccState); err != nil { - b.logger.Errorf("xds: cluster_resolver balancer.UpdateClientConnState(%+v) returned error: %v", ccState, err) + b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) } } @@ -426,7 +407,10 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { func (b *cdsBalancer) run() { for { select { - case u := <-b.updateCh.Get(): + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } b.updateCh.Load() switch update := u.(type) { case *ccUpdate: @@ -435,13 +419,13 @@ func (b *cdsBalancer) run() { // SubConn updates are passthrough and are simply handed over to // the underlying cluster_resolver balancer. if b.childLB == nil { - b.logger.Errorf("xds: received scUpdate {%+v} with no cluster_resolver balancer", update) + b.logger.Errorf("Received SubConn update with no child policy: %+v", update) break } b.childLB.UpdateSubConnState(update.subConn, update.state) case exitIdle: if b.childLB == nil { - b.logger.Errorf("xds: received ExitIdle with no child balancer") + b.logger.Errorf("Received ExitIdle with no child policy") break } // This implementation assumes the child balancer supports @@ -466,6 +450,7 @@ func (b *cdsBalancer) run() { if b.cachedIdentity != nil { b.cachedIdentity.Close() } + b.updateCh.Close() b.logger.Infof("Shutdown") b.done.Fire() return @@ -515,7 +500,7 @@ func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { // xdsResolver. func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if b.closed.HasFired() { - b.logger.Warningf("xds: received ClientConnState {%+v} after cdsBalancer was closed", state) + b.logger.Errorf("Received balancer config after close") return errBalancerClosed } @@ -526,18 +511,18 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro } b.xdsClient = c } + b.logger.Infof("Received balancer config update: %s", pretty.ToJSON(state.BalancerConfig)) - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(state.BalancerConfig)) // The errors checked here should ideally never happen because the // ServiceConfig in this case is prepared by the xdsResolver and is not // something that is received on the wire. lbCfg, ok := state.BalancerConfig.(*lbConfig) if !ok { - b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", state.BalancerConfig) + b.logger.Warningf("Received unexpected balancer config type: %T", state.BalancerConfig) return balancer.ErrBadResolverState } if lbCfg.ClusterName == "" { - b.logger.Warningf("xds: no clusterName found in LoadBalancingConfig: %+v", lbCfg) + b.logger.Warningf("Received balancer config with no cluster name") return balancer.ErrBadResolverState } b.updateCh.Put(&ccUpdate{clusterName: lbCfg.ClusterName}) @@ -547,7 +532,7 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro // ResolverError handles errors reported by the xdsResolver. func (b *cdsBalancer) ResolverError(err error) { if b.closed.HasFired() { - b.logger.Warningf("xds: received resolver error {%v} after cdsBalancer was closed", err) + b.logger.Warningf("Received resolver error after close: %v", err) return } b.updateCh.Put(&ccUpdate{err: err}) @@ -556,7 +541,7 @@ func (b *cdsBalancer) ResolverError(err error) { // UpdateSubConnState handles subConn updates from gRPC. func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { if b.closed.HasFired() { - b.logger.Warningf("xds: received subConn update {%v, %v} after cdsBalancer was closed", sc, state) + b.logger.Warningf("Received subConn update after close: {%v, %v}", sc, state) return } b.updateCh.Put(&scUpdate{subConn: sc, state: state}) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go index 234511a45d..aa2d9674a7 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -17,6 +17,7 @@ package cdsbalancer import ( + "encoding/json" "errors" "sync" @@ -38,13 +39,9 @@ var ( type clusterHandlerUpdate struct { // securityCfg is the Security Config from the top (root) cluster. securityCfg *xdsresource.SecurityConfig - // lbPolicy is the lb policy from the top (root) cluster. - // - // Currently, we only support roundrobin or ringhash, and since roundrobin - // does need configs, this is only set to the ringhash config, if the policy - // is ringhash. In the future, if we support more policies, we can make this - // an interface, and set it to config of the other policies. - lbPolicy *xdsresource.ClusterLBPolicyRingHash + + // lbPolicy is the the child of the cluster_impl policy, for all priorities. + lbPolicy json.RawMessage // updates is a list of ClusterUpdates from all the leaf clusters. updates []xdsresource.ClusterUpdate @@ -123,6 +120,7 @@ func (ch *clusterHandler) constructClusterUpdate() { case <-ch.updateChannel: default: } + ch.updateChannel <- clusterHandlerUpdate{ securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg, lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy, diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go index b79b941ec7..e1a18ae338 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -186,7 +186,7 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { } else { // Old is not nil, new is not nil, compare string values, if // different, stop old and start new. - if *b.lrsServer != *newConfig.LoadReportingServer { + if !b.lrsServer.Equal(newConfig.LoadReportingServer) { b.lrsServer = newConfig.LoadReportingServer stopOldLoadReport = true startNewLoadReport = true @@ -333,6 +333,7 @@ func (b *clusterImplBalancer) Close() { b.childLB = nil b.childState = balancer.State{} } + b.pickerUpdateCh.Close() <-b.done.Done() b.logger.Infof("Shutdown") } @@ -506,7 +507,10 @@ func (b *clusterImplBalancer) run() { defer b.done.Fire() for { select { - case update := <-b.pickerUpdateCh.Get(): + case update, ok := <-b.pickerUpdateCh.Get(): + if !ok { + return + } b.pickerUpdateCh.Load() b.mu.Lock() if b.closed.HasFired() { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go index 360fc44c9e..3f354424f2 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -160,7 +160,7 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { d.loadStore.CallFinished(lIDStr, info.Err) load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport) - if !ok { + if !ok || load == nil { return } d.loadStore.CallServerLoad(lIDStr, serverLoadCPUName, load.CpuUtilization) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index b4a37f60c0..5eadd1ac1d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -16,28 +16,30 @@ * */ -// Package clusterresolver contains EDS balancer implementation. +// Package clusterresolver contains the implementation of the +// xds_cluster_resolver_experimental LB policy which resolves endpoint addresses +// using a list of one or more discovery mechanisms. package clusterresolver import ( "encoding/json" "errors" "fmt" - "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/nop" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -62,13 +64,13 @@ type bb struct{} func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { priorityBuilder := balancer.Get(priority.Name) if priorityBuilder == nil { - logger.Errorf("priority balancer is needed but not registered") - return nil + logger.Errorf("%q LB policy is needed but not registered", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", priority.Name)) } priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) if !ok { - logger.Errorf("priority balancer builder is not a config parser") - return nil + logger.Errorf("%q LB policy does not implement a config parser", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", priority.Name)) } b := &clusterResolverBalancer{ @@ -97,26 +99,58 @@ func (bb) Name() string { return Name } -func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg LBConfig - if err := json.Unmarshal(c, &cfg); err != nil { - return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(c), err) +func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + odBuilder := balancer.Get(outlierdetection.Name) + if odBuilder == nil { + // Shouldn't happen, registered through imported Outlier Detection, + // defensive programming. + return nil, fmt.Errorf("%q LB policy is needed but not registered", outlierdetection.Name) } - if lbp := cfg.XDSLBPolicy; lbp != nil && !strings.EqualFold(lbp.Name, roundrobin.Name) && !strings.EqualFold(lbp.Name, ringhash.Name) { - return nil, fmt.Errorf("unsupported child policy with name %q, not one of {%q,%q}", lbp.Name, roundrobin.Name, ringhash.Name) + odParser, ok := odBuilder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Outlier Detection builder has this method. + return nil, fmt.Errorf("%q LB policy does not implement a config parser", outlierdetection.Name) + } + + var cfg *LBConfig + if err := json.Unmarshal(j, &cfg); err != nil { + return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(j), err) } - return &cfg, nil + + if envconfig.XDSOutlierDetection { + for i, dm := range cfg.DiscoveryMechanisms { + lbCfg, err := odParser.ParseConfig(dm.OutlierDetection) + if err != nil { + return nil, fmt.Errorf("error parsing Outlier Detection config %v: %v", dm.OutlierDetection, err) + } + odCfg, ok := lbCfg.(*outlierdetection.LBConfig) + if !ok { + // Shouldn't happen, Parser built at build time with Outlier Detection + // builder pulled from gRPC LB Registry. + return nil, fmt.Errorf("odParser returned config with unexpected type %T: %v", lbCfg, lbCfg) + } + cfg.DiscoveryMechanisms[i].outlierDetection = *odCfg + } + } + if err := json.Unmarshal(cfg.XDSLBPolicy, &cfg.xdsLBPolicy); err != nil { + // This will never occur, valid configuration is emitted from the xDS + // Client. Validity is already checked in the xDS Client, however, this + // double validation is present because Unmarshalling and Validating are + // coupled into one json.Unmarshal operation). We will switch this in + // the future to two separate operations. + return nil, fmt.Errorf("error unmarshaling xDS LB Policy: %v", err) + } + return cfg, nil } -// ccUpdate wraps a clientConn update received from gRPC (pushed from the -// xdsResolver). +// ccUpdate wraps a clientConn update received from gRPC. type ccUpdate struct { state balancer.ClientConnState err error } // scUpdate wraps a subConn update received from gRPC. This is directly passed -// on to the child balancer. +// on to the child policy. type scUpdate struct { subConn balancer.SubConn state balancer.SubConnState @@ -124,10 +158,8 @@ type scUpdate struct { type exitIdle struct{} -// clusterResolverBalancer manages xdsClient and the actual EDS balancer implementation that -// does load balancing. -// -// It currently has only an clusterResolverBalancer. Later, we may add fallback. +// clusterResolverBalancer resolves endpoint addresses using a list of one or +// more discovery mechanisms. type clusterResolverBalancer struct { cc balancer.ClientConn bOpts balancer.BuildOptions @@ -150,22 +182,21 @@ type clusterResolverBalancer struct { watchUpdateReceived bool } -// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good -// updates lead to registration of EDS and DNS watches. Updates with error lead -// to cancellation of existing watch and propagation of the same error to the -// child balancer. +// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. +// +// A good update results in creation of endpoint resolvers for the configured +// discovery mechanisms. An update with an error results in cancellation of any +// existing endpoint resolution and propagation of the same to the child policy. func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { - // We first handle errors, if any, and then proceed with handling the - // update, only if the status quo has changed. if err := update.err; err != nil { b.handleErrorFromUpdate(err, true) return } - b.logger.Infof("Receive update from resolver, balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) + b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) cfg, _ := update.state.BalancerConfig.(*LBConfig) if cfg == nil { - b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", update.state.BalancerConfig) + b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) return } @@ -173,23 +204,19 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { b.configRaw = update.state.ResolverState.ServiceConfig b.resourceWatcher.updateMechanisms(cfg.DiscoveryMechanisms) + // The child policy is created only after all configured discovery + // mechanisms have been successfully returned endpoints. If that is not the + // case, we return early. if !b.watchUpdateReceived { - // If update was not received, wait for it. return } - // If eds resp was received before this, the child policy was created. We - // need to generate a new balancer config and send it to the child, because - // certain fields (unrelated to EDS watch) might have changed. - if err := b.updateChildConfig(); err != nil { - b.logger.Warningf("failed to update child policy config: %v", err) - } + b.updateChildConfig() } -// handleWatchUpdate handles a watch update from the xDS Client. Good updates -// lead to clientConn updates being invoked on the underlying child balancer. -func (b *clusterResolverBalancer) handleWatchUpdate(update *resourceUpdate) { +// handleResourceUpdate handles a resource update or error from the resource +// resolver by propagating the same to the child LB policy. +func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { if err := update.err; err != nil { - b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) b.handleErrorFromUpdate(err, false) return } @@ -197,84 +224,84 @@ func (b *clusterResolverBalancer) handleWatchUpdate(update *resourceUpdate) { b.watchUpdateReceived = true b.priorities = update.priorities - // A new EDS update triggers new child configs (e.g. different priorities - // for the priority balancer), and new addresses (the endpoints come from - // the EDS response). - if err := b.updateChildConfig(); err != nil { - b.logger.Warningf("failed to update child policy's balancer config: %v", err) - } + // An update from the resource resolver contains resolved endpoint addresses + // for all configured discovery mechanisms ordered by priority. This is used + // to generate configuration for the priority LB policy. + b.updateChildConfig() } -// updateChildConfig builds a balancer config from eb's cached eds resp and -// service config, and sends that to the child balancer. Note that it also -// generates the addresses, because the endpoints come from the EDS resp. +// updateChildConfig builds child policy configuration using endpoint addresses +// returned by the resource resolver and child policy configuration provided by +// parent LB policy. // -// If child balancer doesn't already exist, one will be created. -func (b *clusterResolverBalancer) updateChildConfig() error { - // Child was build when the first EDS resp was received, so we just build - // the config and addresses. +// A child policy is created if one doesn't already exist. The newly built +// configuration is then pushed to the child policy. +func (b *clusterResolverBalancer) updateChildConfig() { if b.child == nil { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } - childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, b.config.XDSLBPolicy) + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) if err != nil { - return fmt.Errorf("failed to build priority balancer config: %v", err) + b.logger.Warningf("Failed to build child policy config: %v", err) + return } childCfg, err := b.priorityConfigParser.ParseConfig(childCfgBytes) if err != nil { - return fmt.Errorf("failed to parse generated priority balancer config, this should never happen because the config is generated: %v", err) + b.logger.Warningf("Failed to parse child policy config. This should never happen because the config was generated: %v", err) + return } - b.logger.Infof("build balancer config: %v", pretty.ToJSON(childCfg)) - return b.child.UpdateClientConnState(balancer.ClientConnState{ + b.logger.Infof("Built child policy config: %v", pretty.ToJSON(childCfg)) + + if err := b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addrs, ServiceConfig: b.configRaw, Attributes: b.attrsWithClient, }, BalancerConfig: childCfg, - }) + }); err != nil { + b.logger.Warningf("Failed to push config to child policy: %v", err) + } } -// handleErrorFromUpdate handles both the error from parent ClientConn (from CDS -// balancer) and the error from xds client (from the watcher). fromParent is -// true if error is from parent ClientConn. -// -// If the error is connection error, it should be handled for fallback purposes. -// -// If the error is resource-not-found: -// - If it's from CDS balancer (shows as a resolver error), it means LDS or CDS -// resources were removed. The EDS watch should be canceled. -// - If it's from xds client, it means EDS resource were removed. The EDS -// watcher should keep watching. -// In both cases, the sub-balancers will be receive the error. +// handleErrorFromUpdate handles errors from the parent LB policy and endpoint +// resolvers. fromParent is true if error is from the parent LB policy. In both +// cases, the error is propagated to the child policy, if one exists. func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { b.logger.Warningf("Received error: %v", err) + + // A resource-not-found error from the parent LB policy means that the LDS + // or CDS resource was removed. This should result in endpoint resolvers + // being stopped here. + // + // A resource-not-found error from the EDS endpoint resolver means that the + // EDS resource was removed. No action needs to be taken for this, and we + // should continue watching the same EDS resource. if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { - // This is an error from the parent ClientConn (can be the parent CDS - // balancer), and is a resource-not-found error. This means the resource - // (can be either LDS or CDS) was removed. Stop the EDS watch. b.resourceWatcher.stop() } + if b.child != nil { b.child.ResolverError(err) - } else { - // If eds balancer was never created, fail the RPCs with errors. - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(err), - }) + return } - + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) } -// run is a long-running goroutine which handles all updates from gRPC and -// xdsClient. All methods which are invoked directly by gRPC or xdsClient simply -// push an update onto a channel which is read and acted upon right here. +// run is a long-running goroutine that handles updates from gRPC and endpoint +// resolvers. The methods handling the individual updates simply push them onto +// a channel which is read and acted upon from here. func (b *clusterResolverBalancer) run() { for { select { - case u := <-b.updateCh.Get(): + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } b.updateCh.Load() switch update := u.(type) { case *ccUpdate: @@ -283,7 +310,7 @@ func (b *clusterResolverBalancer) run() { // SubConn updates are simply handed over to the underlying // child balancer. if b.child == nil { - b.logger.Errorf("xds: received scUpdate {%+v} with no child balancer", update) + b.logger.Errorf("Received a SubConn update {%+v} with no child policy", update) break } b.child.UpdateSubConnState(update.subConn, update.state) @@ -301,9 +328,9 @@ func (b *clusterResolverBalancer) run() { } } case u := <-b.resourceWatcher.updateChannel: - b.handleWatchUpdate(u) + b.handleResourceUpdate(u) - // Close results in cancellation of the EDS watch and closing of the + // Close results in stopping the endpoint resolvers and closing the // underlying child policy and is the only way to exit this goroutine. case <-b.closed.Done(): b.resourceWatcher.stop() @@ -312,6 +339,7 @@ func (b *clusterResolverBalancer) run() { b.child.Close() b.child = nil } + b.updateCh.Close() // This is the *ONLY* point of return from this function. b.logger.Infof("Shutdown") b.done.Fire() @@ -322,12 +350,9 @@ func (b *clusterResolverBalancer) run() { // Following are methods to implement the balancer interface. -// UpdateClientConnState receives the serviceConfig (which contains the -// clusterName to watch for in CDS) and the xdsClient object from the -// xdsResolver. func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if b.closed.HasFired() { - b.logger.Warningf("xds: received ClientConnState {%+v} after clusterResolverBalancer was closed", state) + b.logger.Warningf("Received update from gRPC {%+v} after close", state) return errBalancerClosed } @@ -347,7 +372,7 @@ func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientCon // ResolverError handles errors reported by the xdsResolver. func (b *clusterResolverBalancer) ResolverError(err error) { if b.closed.HasFired() { - b.logger.Warningf("xds: received resolver error {%v} after clusterResolverBalancer was closed", err) + b.logger.Warningf("Received resolver error {%v} after close", err) return } b.updateCh.Put(&ccUpdate{err: err}) @@ -356,7 +381,7 @@ func (b *clusterResolverBalancer) ResolverError(err error) { // UpdateSubConnState handles subConn updates from gRPC. func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { if b.closed.HasFired() { - b.logger.Warningf("xds: received subConn update {%v, %v} after clusterResolverBalancer was closed", sc, state) + b.logger.Warningf("Received subConn update {%v, %v} after close", sc, state) return } b.updateCh.Put(&scUpdate{subConn: sc, state: state}) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go index 2458b10677..c676088191 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go @@ -102,11 +102,13 @@ type DiscoveryMechanism struct { DNSHostname string `json:"dnsHostname,omitempty"` // OutlierDetection is the Outlier Detection LB configuration for this // priority. - OutlierDetection outlierdetection.LBConfig `json:"outlierDetection,omitempty"` + OutlierDetection json.RawMessage `json:"outlierDetection,omitempty"` + outlierDetection outlierdetection.LBConfig } // Equal returns whether the DiscoveryMechanism is the same with the parameter. func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { + od := &dm.outlierDetection switch { case dm.Cluster != b.Cluster: return false @@ -118,7 +120,7 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { return false case dm.DNSHostname != b.DNSHostname: return false - case !dm.OutlierDetection.EqualIgnoringChildPolicy(&b.OutlierDetection): + case !od.EqualIgnoringChildPolicy(&b.outlierDetection): return false } @@ -151,16 +153,6 @@ type LBConfig struct { DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` // XDSLBPolicy specifies the policy for locality picking and endpoint picking. - // - // Note that it's not normal balancing policy, and it can only be either - // ROUND_ROBIN or RING_HASH. - // - // For ROUND_ROBIN, the policy name will be "ROUND_ROBIN", and the config - // will be empty. This sets the locality-picking policy to weighted_target - // and the endpoint-picking policy to round_robin. - // - // For RING_HASH, the policy name will be "RING_HASH", and the config will - // be lb config for the ring_hash_experimental LB Policy. ring_hash policy - // is responsible for both locality picking and endpoint picking. - XDSLBPolicy *internalserviceconfig.BalancerConfig `json:"xdsLbPolicy,omitempty"` + XDSLBPolicy json.RawMessage `json:"xdsLbPolicy,omitempty"` + xdsLBPolicy internalserviceconfig.BalancerConfig } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go index b76a40355c..4b83dfb2bf 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -23,9 +23,7 @@ import ( "fmt" "sort" - "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" - "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" @@ -34,7 +32,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -63,33 +61,6 @@ type priorityConfig struct { // // The built tree of balancers (see test for the output struct). // -// If xds lb policy is ROUND_ROBIN, the children will be weighted_target for -// locality picking, and round_robin for endpoint picking. -// -// ┌────────┐ -// │priority│ -// └┬──────┬┘ -// │ │ -// ┌───────────▼┐ ┌▼───────────┐ -// │cluster_impl│ │cluster_impl│ -// └─┬──────────┘ └──────────┬─┘ -// │ │ -// ┌──────────────▼─┐ ┌─▼──────────────┐ -// │locality_picking│ │locality_picking│ -// └┬──────────────┬┘ └┬──────────────┬┘ -// │ │ │ │ -// ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ -// │LRS│ │LRS│ │LRS│ │LRS│ -// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘ -// │ │ │ │ -// ┌──────────▼─────┐ ┌─────▼──────────┐ ┌──────────▼─────┐ ┌─────▼──────────┐ -// │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ -// └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘ -// -// If xds lb policy is RING_HASH, the children will be just a ring_hash policy. -// The endpoints from all localities will be flattened to one addresses list, -// and the ring_hash policy will pick endpoints from it. -// // ┌────────┐ // │priority│ // └┬──────┬┘ @@ -99,13 +70,8 @@ type priorityConfig struct { // └──────┬─────┘ └─────┬──────┘ // │ │ // ┌──────▼─────┐ ┌─────▼──────┐ -// │ ring_hash │ │ ring_hash │ +// │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) // └────────────┘ └────────────┘ -// -// If endpointPickingPolicy is nil, roundrobin will be used. -// -// Custom locality picking policy isn't support, and weighted_target is always -// used. func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) if err != nil { @@ -134,7 +100,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi retAddrs = append(retAddrs, addrs...) var odCfgs map[string]*outlierdetection.LBConfig if envconfig.XDSOutlierDetection { - odCfgs = convertClusterImplMapToOutlierDetection(configs, p.mechanism.OutlierDetection) + odCfgs = convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) for n, c := range odCfgs { retConfig.Children[n] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c}, @@ -158,7 +124,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi retAddrs = append(retAddrs, addrs...) var odCfg *outlierdetection.LBConfig if envconfig.XDSOutlierDetection { - odCfg = makeClusterImplOutlierDetectionChild(config, p.mechanism.OutlierDetection) + odCfg = makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, // Not ignore re-resolution from DNS children, they will trigger @@ -284,55 +250,11 @@ func dedupSortedIntSlice(a []int) []int { return a[:i+1] } -// rrBalancerConfig is a const roundrobin config, used as child of -// weighted-roundrobin. To avoid allocating memory everytime. -var rrBalancerConfig = &internalserviceconfig.BalancerConfig{Name: roundrobin.Name} - // priorityLocalitiesToClusterImpl takes a list of localities (with the same // priority), and generates a cluster impl policy config, and a list of -// addresses. +// addresses with their path hierarchy set to [priority-name, locality-name], so +// priority and the xDS LB Policy know which child policy each address is for. func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { - clusterImplCfg := &clusterimpl.LBConfig{ - Cluster: mechanism.Cluster, - EDSServiceName: mechanism.EDSServiceName, - LoadReportingServer: mechanism.LoadReportingServer, - MaxConcurrentRequests: mechanism.MaxConcurrentRequests, - DropCategories: drops, - // ChildPolicy is not set. Will be set based on xdsLBPolicy - } - - if xdsLBPolicy == nil || xdsLBPolicy.Name == roundrobin.Name { - // If lb policy is ROUND_ROBIN: - // - locality-picking policy is weighted_target - // - endpoint-picking policy is round_robin - logger.Infof("xds lb policy is %q, building config with weighted_target + round_robin", roundrobin.Name) - // Child of weighted_target is hardcoded to round_robin. - wtConfig, addrs := localitiesToWeightedTarget(localities, priorityName, rrBalancerConfig) - clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: weightedtarget.Name, Config: wtConfig} - return clusterImplCfg, addrs, nil - } - - if xdsLBPolicy.Name == ringhash.Name { - // If lb policy is RIHG_HASH, will build one ring_hash policy as child. - // The endpoints from all localities will be flattened to one addresses - // list, and the ring_hash policy will pick endpoints from it. - logger.Infof("xds lb policy is %q, building config with ring_hash", ringhash.Name) - addrs := localitiesToRingHash(localities, priorityName) - // Set child to ring_hash, note that the ring_hash config is from - // xdsLBPolicy. - clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: ringhash.Name, Config: xdsLBPolicy.Config} - return clusterImplCfg, addrs, nil - } - - return nil, nil, fmt.Errorf("unsupported xds LB policy %q, not one of {%q,%q}", xdsLBPolicy.Name, roundrobin.Name, ringhash.Name) -} - -// localitiesToRingHash takes a list of localities (with the same priority), and -// generates a list of addresses. -// -// The addresses have path hierarchy set to [priority-name], so priority knows -// which child policy they are for. -func localitiesToRingHash(localities []xdsresource.Locality, priorityName string) []resolver.Address { var addrs []resolver.Address for _, locality := range localities { var lw uint32 = 1 @@ -350,54 +272,29 @@ func localitiesToRingHash(localities []xdsresource.Locality, priorityName string if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } - + addr := resolver.Address{Addr: endpoint.Address} + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) + // "To provide the xds_wrr_locality load balancer information about + // locality weights received from EDS, the cluster resolver will + // populate a new locality weight attribute for each address The + // attribute will have the weight (as an integer) of the locality + // the address is part of." - A52 + addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: lw}) var ew uint32 = 1 if endpoint.Weight != 0 { ew = endpoint.Weight } - - // The weight of each endpoint is locality_weight * endpoint_weight. - ai := weightedroundrobin.AddrInfo{Weight: lw * ew} - addr := weightedroundrobin.SetAddrInfo(resolver.Address{Addr: endpoint.Address}, ai) - addr = hierarchy.Set(addr, []string{priorityName, localityStr}) - addr = internal.SetLocalityID(addr, locality.ID) - addrs = append(addrs, addr) - } - } - return addrs -} - -// localitiesToWeightedTarget takes a list of localities (with the same -// priority), and generates a weighted target config, and list of addresses. -// -// The addresses have path hierarchy set to [priority-name, locality-name], so -// priority and weighted target know which child policy they are for. -func localitiesToWeightedTarget(localities []xdsresource.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) { - weightedTargets := make(map[string]weightedtarget.Target) - var addrs []resolver.Address - for _, locality := range localities { - localityStr, err := locality.ID.ToString() - if err != nil { - localityStr = fmt.Sprintf("%+v", locality.ID) - } - weightedTargets[localityStr] = weightedtarget.Target{Weight: locality.Weight, ChildPolicy: childPolicy} - for _, endpoint := range locality.Endpoints { - // Filter out all "unhealthy" endpoints (unknown and healthy are - // both considered to be healthy: - // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { - continue - } - - addr := resolver.Address{Addr: endpoint.Address} - if childPolicy.Name == weightedroundrobin.Name && endpoint.Weight != 0 { - ai := weightedroundrobin.AddrInfo{Weight: endpoint.Weight} - addr = weightedroundrobin.SetAddrInfo(addr, ai) - } - addr = hierarchy.Set(addr, []string{priorityName, localityStr}) - addr = internal.SetLocalityID(addr, locality.ID) + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: lw * ew}) addrs = append(addrs, addr) } } - return &weightedtarget.LBConfig{Targets: weightedTargets}, addrs + return &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + LoadReportingServer: mechanism.LoadReportingServer, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + DropCategories: drops, + ChildPolicy: xdsLBPolicy, + }, addrs, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index 9c2fc6e7c7..580734a021 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -21,7 +21,6 @@ package clusterresolver import ( "sync" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -32,9 +31,34 @@ type resourceUpdate struct { err error } -type discoveryMechanism interface { +// topLevelResolver is used by concrete endpointsResolver implementations for +// reporting updates and errors. The `resourceResolver` type implements this +// interface and takes appropriate actions upon receipt of updates and errors +// from underlying concrete resolvers. +type topLevelResolver interface { + onUpdate() + onError(error) +} + +// endpointsResolver wraps the functionality to resolve a given resource name to +// a set of endpoints. The mechanism used by concrete implementations depend on +// the supported discovery mechanism type. +type endpointsResolver interface { + // lastUpdate returns endpoint results from the most recent resolution. + // + // The type of the first return result is dependent on the resolver + // implementation. + // + // The second return result indicates whether the resolver was able to + // successfully resolve the resource name to endpoints. If set to false, the + // first return result is invalid and must not be used. lastUpdate() (interface{}, bool) + + // resolverNow triggers re-resolution of the resource. resolveNow() + + // stop stops resolution of the resource. Implementations must not invoke + // any methods on the topLevelResolver interface once `stop()` returns. stop() } @@ -47,14 +71,13 @@ type discoveryMechanismKey struct { name string } -// resolverMechanismTuple is needed to keep the resolver and the discovery -// mechanism together, because resolvers can be shared. And we need the -// mechanism for fields like circuit breaking, LRS etc when generating the +// discoveryMechanismAndResolver is needed to keep the resolver and the +// discovery mechanism together, because resolvers can be shared. And we need +// the mechanism for fields like circuit breaking, LRS etc when generating the // balancer config. -type resolverMechanismTuple struct { - dm DiscoveryMechanism - dmKey discoveryMechanismKey - r discoveryMechanism +type discoveryMechanismAndResolver struct { + dm DiscoveryMechanism + r endpointsResolver childNameGen *nameGenerator } @@ -66,14 +89,14 @@ type resourceResolver struct { // mu protects the slice and map, and content of the resolvers in the slice. mu sync.Mutex mechanisms []DiscoveryMechanism - children []resolverMechanismTuple + children []discoveryMechanismAndResolver // childrenMap's value only needs the resolver implementation (type // discoveryMechanism) and the childNameGen. The other two fields are not // used. // // TODO(cleanup): maybe we can make a new type with just the necessary // fields, and use it here instead. - childrenMap map[discoveryMechanismKey]resolverMechanismTuple + childrenMap map[discoveryMechanismKey]discoveryMechanismAndResolver // Each new discovery mechanism needs a child name generator to reuse child // policy names. But to make sure the names across discover mechanism // doesn't conflict, we need a seq ID. This ID is incremented for each new @@ -85,7 +108,7 @@ func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { return &resourceResolver{ parent: parent, updateChannel: make(chan *resourceUpdate, 1), - childrenMap: make(map[discoveryMechanismKey]resolverMechanismTuple), + childrenMap: make(map[discoveryMechanismKey]discoveryMechanismAndResolver), } } @@ -102,6 +125,21 @@ func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { return true } +func discoveryMechanismToKey(dm DiscoveryMechanism) discoveryMechanismKey { + switch dm.Type { + case DiscoveryMechanismTypeEDS: + nameToWatch := dm.EDSServiceName + if nameToWatch == "" { + nameToWatch = dm.Cluster + } + return discoveryMechanismKey{typ: dm.Type, name: nameToWatch} + case DiscoveryMechanismTypeLogicalDNS: + return discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} + default: + return discoveryMechanismKey{} + } +} + func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { rr.mu.Lock() defer rr.mu.Unlock() @@ -109,65 +147,45 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { return } rr.mechanisms = mechanisms - rr.children = make([]resolverMechanismTuple, len(mechanisms)) + rr.children = make([]discoveryMechanismAndResolver, len(mechanisms)) newDMs := make(map[discoveryMechanismKey]bool) // Start one watch for each new discover mechanism {type+resource_name}. for i, dm := range mechanisms { + dmKey := discoveryMechanismToKey(dm) + newDMs[dmKey] = true + dmAndResolver, ok := rr.childrenMap[dmKey] + if ok { + // If this is not new, keep the fields (especially childNameGen), + // and only update the DiscoveryMechanism. + // + // Note that the same dmKey doesn't mean the same + // DiscoveryMechanism. There are fields (e.g. + // MaxConcurrentRequests) in DiscoveryMechanism that are not copied + // to dmKey, we need to keep those updated. + dmAndResolver.dm = dm + rr.children[i] = dmAndResolver + continue + } + + // Create resolver for a newly seen resource. + var resolver endpointsResolver switch dm.Type { case DiscoveryMechanismTypeEDS: - // If EDSServiceName is not set, use the cluster name as EDS service - // name to watch. - nameToWatch := dm.EDSServiceName - if nameToWatch == "" { - nameToWatch = dm.Cluster - } - dmKey := discoveryMechanismKey{typ: dm.Type, name: nameToWatch} - newDMs[dmKey] = true - - r, ok := rr.childrenMap[dmKey] - if !ok { - r = resolverMechanismTuple{ - dm: dm, - dmKey: dmKey, - r: newEDSResolver(nameToWatch, rr.parent.xdsClient, rr), - childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), - } - rr.childrenMap[dmKey] = r - rr.childNameGeneratorSeqID++ - } else { - // If this is not new, keep the fields (especially - // childNameGen), and only update the DiscoveryMechanism. - // - // Note that the same dmKey doesn't mean the same - // DiscoveryMechanism. There are fields (e.g. - // MaxConcurrentRequests) in DiscoveryMechanism that are not - // copied to dmKey, we need to keep those updated. - r.dm = dm - } - rr.children[i] = r + resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr) case DiscoveryMechanismTypeLogicalDNS: - // Name to resolve in DNS is the hostname, not the ClientConn - // target. - dmKey := discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} - newDMs[dmKey] = true - - r, ok := rr.childrenMap[dmKey] - if !ok { - r = resolverMechanismTuple{ - dm: dm, - dmKey: dmKey, - r: newDNSResolver(dm.DNSHostname, rr), - childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), - } - rr.childrenMap[dmKey] = r - rr.childNameGeneratorSeqID++ - } else { - r.dm = dm - } - rr.children[i] = r + resolver = newDNSResolver(dmKey.name, rr) } + dmAndResolver = discoveryMechanismAndResolver{ + dm: dm, + r: resolver, + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), + } + rr.childrenMap[dmKey] = dmAndResolver + rr.children[i] = dmAndResolver + rr.childNameGeneratorSeqID++ } + // Stop the resources that were removed. for dm, r := range rr.childrenMap { if !newDMs[dm] { @@ -177,7 +195,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { } // Regenerate even if there's no change in discovery mechanism, in case // priority order changed. - rr.generate() + rr.generateLocked() } // resolveNow is typically called to trigger re-resolve of DNS. The EDS @@ -199,7 +217,7 @@ func (rr *resourceResolver) stop() { // be removed entirely, but a future use case might want to reuse the // policy instead. cm := rr.childrenMap - rr.childrenMap = make(map[discoveryMechanismKey]resolverMechanismTuple) + rr.childrenMap = make(map[discoveryMechanismKey]discoveryMechanismAndResolver) rr.mechanisms = nil rr.children = nil rr.mu.Unlock() @@ -207,15 +225,28 @@ func (rr *resourceResolver) stop() { for _, r := range cm { r.r.stop() } + + // stop() is called when the LB policy is closed or when the underlying + // cluster resource is removed by the management server. In the latter case, + // an empty config update needs to be pushed to the child policy to ensure + // that a picker that fails RPCs is sent up to the channel. + // + // Resource resolver implementations are expected to not send any updates + // after they are stopped. Therefore, we don't have to worry about another + // write to this channel happening at the same time as this one. + select { + case <-rr.updateChannel: + default: + } + rr.updateChannel <- &resourceUpdate{} } -// generate collects all the updates from all the resolvers, and push the -// combined result into the update channel. It only pushes the update when all -// the child resolvers have received at least one update, otherwise it will -// wait. +// generateLocked collects updates from all resolvers. It pushes the combined +// result on the update channel if all child resolvers have received at least +// one update. Otherwise it returns early. // // caller must hold rr.mu. -func (rr *resourceResolver) generate() { +func (rr *resourceResolver) generateLocked() { var ret []priorityConfig for _, rDM := range rr.children { u, ok := rDM.r.lastUpdate() @@ -238,49 +269,16 @@ func (rr *resourceResolver) generate() { rr.updateChannel <- &resourceUpdate{priorities: ret} } -type edsDiscoveryMechanism struct { - cancel func() - - update xdsresource.EndpointsUpdate - updateReceived bool -} - -func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { - if !er.updateReceived { - return nil, false - } - return er.update, true -} - -func (er *edsDiscoveryMechanism) resolveNow() { -} - -func (er *edsDiscoveryMechanism) stop() { - er.cancel() +func (rr *resourceResolver) onUpdate() { + rr.mu.Lock() + rr.generateLocked() + rr.mu.Unlock() } -// newEDSResolver starts the EDS watch on the given xds client. -func newEDSResolver(nameToWatch string, xdsc xdsclient.XDSClient, topLevelResolver *resourceResolver) *edsDiscoveryMechanism { - ret := &edsDiscoveryMechanism{} - topLevelResolver.parent.logger.Infof("EDS watch started on %v", nameToWatch) - cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsresource.EndpointsUpdate, err error) { - topLevelResolver.mu.Lock() - defer topLevelResolver.mu.Unlock() - if err != nil { - select { - case <-topLevelResolver.updateChannel: - default: - } - topLevelResolver.updateChannel <- &resourceUpdate{err: err} - return - } - ret.update = update - ret.updateReceived = true - topLevelResolver.generate() - }) - ret.cancel = func() { - topLevelResolver.parent.logger.Infof("EDS watch canceled on %v", nameToWatch) - cancel() +func (rr *resourceResolver) onError(err error) { + select { + case <-rr.updateChannel: + default: } - return ret + rr.updateChannel <- &resourceUpdate{err: err} } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 703b00811d..06af9cc6df 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -20,8 +20,9 @@ package clusterresolver import ( "fmt" + "net/url" + "sync" - "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -39,31 +40,54 @@ var ( // It implements resolver.ClientConn interface to work with the DNS resolver. type dnsDiscoveryMechanism struct { target string - topLevelResolver *resourceResolver - r resolver.Resolver + topLevelResolver topLevelResolver + dnsR resolver.Resolver + mu sync.Mutex addrs []string updateReceived bool } -func newDNSResolver(target string, topLevelResolver *resourceResolver) *dnsDiscoveryMechanism { +// newDNSResolver creates an endpoints resolver which uses a DNS resolver under +// the hood. +// +// An error in parsing the provided target string or an error in creating a DNS +// resolver means that we will never be able to resolve the provided target +// strings to endpoints. The topLevelResolver propagates address updates to the +// clusterresolver LB policy **only** after it receives updates from all its +// child resolvers. Therefore, an error here means that the topLevelResolver +// will never send address updates to the clusterresolver LB policy. +// +// Calling the onError() callback will ensure that this error is +// propagated to the child policy which eventually move the channel to +// transient failure. +// +// The `dnsR` field is unset if we run into erros in this function. Therefore, a +// nil check is required wherever we access that field. +func newDNSResolver(target string, topLevelResolver topLevelResolver) *dnsDiscoveryMechanism { ret := &dnsDiscoveryMechanism{ target: target, topLevelResolver: topLevelResolver, } - r, err := newDNS(resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + target)}, ret, resolver.BuildOptions{}) + u, err := url.Parse("dns:///" + target) + if err != nil { + topLevelResolver.onError(fmt.Errorf("failed to parse dns hostname %q in clusterresolver LB policy", target)) + return ret + } + + r, err := newDNS(resolver.Target{Scheme: "dns", URL: *u}, ret, resolver.BuildOptions{}) if err != nil { - select { - case <-topLevelResolver.updateChannel: - default: - } - topLevelResolver.updateChannel <- &resourceUpdate{err: err} + topLevelResolver.onError(fmt.Errorf("failed to build DNS resolver for target %q: %v", target, err)) + return ret } - ret.r = r + ret.dnsR = r return ret } func (dr *dnsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + dr.mu.Lock() + defer dr.mu.Unlock() + if !dr.updateReceived { return nil, false } @@ -71,35 +95,42 @@ func (dr *dnsDiscoveryMechanism) lastUpdate() (interface{}, bool) { } func (dr *dnsDiscoveryMechanism) resolveNow() { - dr.r.ResolveNow(resolver.ResolveNowOptions{}) + if dr.dnsR != nil { + dr.dnsR.ResolveNow(resolver.ResolveNowOptions{}) + } } +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. The +// underlying dns resolver does not send any updates to the resolver.ClientConn +// interface passed to it (implemented by dnsDiscoveryMechanism in this case) +// after its `Close()` returns. Therefore, we can guarantee that no methods of +// the topLevelResolver are invoked after we return from this method. func (dr *dnsDiscoveryMechanism) stop() { - dr.r.Close() + if dr.dnsR != nil { + dr.dnsR.Close() + } } // dnsDiscoveryMechanism needs to implement resolver.ClientConn interface to receive // updates from the real DNS resolver. func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { - dr.topLevelResolver.mu.Lock() - defer dr.topLevelResolver.mu.Unlock() + dr.mu.Lock() addrs := make([]string, len(state.Addresses)) for i, a := range state.Addresses { addrs[i] = a.Addr } dr.addrs = addrs dr.updateReceived = true - dr.topLevelResolver.generate() + dr.mu.Unlock() + + dr.topLevelResolver.onUpdate() return nil } func (dr *dnsDiscoveryMechanism) ReportError(err error) { - select { - case <-dr.topLevelResolver.updateChannel: - default: - } - dr.topLevelResolver.updateChannel <- &resourceUpdate{err: err} + dr.topLevelResolver.onError(err) } func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go new file mode 100644 index 0000000000..2517cf4915 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "sync" + + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type edsResourceWatcher interface { + WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() +} + +type edsDiscoveryMechanism struct { + cancelWatch func() + topLevelResolver topLevelResolver + stopped *grpcsync.Event + + mu sync.Mutex + update xdsresource.EndpointsUpdate + updateReceived bool +} + +func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + er.mu.Lock() + defer er.mu.Unlock() + + if !er.updateReceived { + return nil, false + } + return er.update, true +} + +func (er *edsDiscoveryMechanism) resolveNow() { +} + +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. +func (er *edsDiscoveryMechanism) stop() { + // Canceling a watch with the xDS client can race with an xDS response + // received around the same time, and can result in the watch callback being + // invoked after the watch is canceled. Callers need to handle this race, + // and we fire the stopped event here to ensure that a watch callback + // invocation around the same time becomes a no-op. + er.stopped.Fire() + er.cancelWatch() +} + +func (er *edsDiscoveryMechanism) handleEndpointsUpdate(update xdsresource.EndpointsUpdate, err error) { + if er.stopped.HasFired() { + return + } + + if err != nil { + er.topLevelResolver.onError(err) + return + } + + er.mu.Lock() + er.update = update + er.updateReceived = true + er.mu.Unlock() + + er.topLevelResolver.onUpdate() +} + +// newEDSResolver returns an implementation of the endpointsResolver interface +// that uses EDS to resolve the given name to endpoints. +func newEDSResolver(nameToWatch string, watcher edsResourceWatcher, topLevelResolver topLevelResolver) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{ + topLevelResolver: topLevelResolver, + stopped: grpcsync.NewEvent(), + } + ret.cancelWatch = watcher.WatchEndpoints(nameToWatch, ret.handleEndpointsUpdate) + return ret +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go index 062a8e5e48..eaf4f7fc9a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -23,9 +23,9 @@ package outlierdetection import ( "encoding/json" - "errors" "fmt" "math" + "strings" "sync" "sync/atomic" "time" @@ -35,10 +35,12 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -62,13 +64,14 @@ type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &outlierDetectionBalancer{ - cc: cc, - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - addrs: make(map[string]*addressInfo), - scWrappers: make(map[balancer.SubConn]*subConnWrapper), - scUpdateCh: buffer.NewUnbounded(), - pickerUpdateCh: buffer.NewUnbounded(), + cc: cc, + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + addrs: make(map[string]*addressInfo), + scWrappers: make(map[balancer.SubConn]*subConnWrapper), + scUpdateCh: buffer.NewUnbounded(), + pickerUpdateCh: buffer.NewUnbounded(), + channelzParentID: bOpts.ChannelzParentID, } b.logger = prefixLogger(b) b.logger.Infof("Created") @@ -78,19 +81,27 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var lbCfg *LBConfig - if err := json.Unmarshal(s, &lbCfg); err != nil { // Validates child config if present as well. + lbCfg := &LBConfig{ + // Default top layer values as documented in A50. + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + } + + // This unmarshalling handles underlying layers sre and fpe which have their + // own defaults for their fields if either sre or fpe are present. + if err := json.Unmarshal(s, lbCfg); err != nil { // Validates child config if present as well. return nil, fmt.Errorf("xds: unable to unmarshal LBconfig: %s, error: %v", string(s), err) } // Note: in the xds flow, these validations will never fail. The xdsclient // performs the same validations as here on the xds Outlier Detection - // resource before parsing into the internal struct which gets marshaled - // into JSON before calling this function. A50 defines two separate places - // for these validations to take place, the xdsclient and this ParseConfig - // method. "When parsing a config from JSON, if any of these requirements is - // violated, that should be treated as a parsing error." - A50 - + // resource before parsing resource into JSON which this function gets + // called with. A50 defines two separate places for these validations to + // take place, the xdsclient and this ParseConfig method. "When parsing a + // config from JSON, if any of these requirements is violated, that should + // be treated as a parsing error." - A50 switch { // "The google.protobuf.Duration fields interval, base_ejection_time, and // max_ejection_time must obey the restrictions in the @@ -119,10 +130,7 @@ func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, err return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = %v; must be <= 100", lbCfg.FailurePercentageEjection.Threshold) case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.EnforcementPercentage > 100: return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = %v; must be <= 100", lbCfg.FailurePercentageEjection.EnforcementPercentage) - case lbCfg.ChildPolicy == nil: - return nil, errors.New("OutlierDetectionLoadBalancingConfig.child_policy must be present") } - return lbCfg, nil } @@ -159,10 +167,11 @@ type outlierDetectionBalancer struct { // to suppress redundant picker updates. recentPickerNoop bool - closed *grpcsync.Event - done *grpcsync.Event - cc balancer.ClientConn - logger *grpclog.PrefixLogger + closed *grpcsync.Event + done *grpcsync.Event + cc balancer.ClientConn + logger *grpclog.PrefixLogger + channelzParentID *channelz.Identifier // childMu guards calls into child (to uphold the balancer.Balancer API // guarantee of synchronous calls). @@ -221,9 +230,9 @@ func (b *outlierDetectionBalancer) onIntervalConfig() { for _, addrInfo := range b.addrs { addrInfo.callCounter.clear() } - interval = b.cfg.Interval + interval = time.Duration(b.cfg.Interval) } else { - interval = b.cfg.Interval - now().Sub(b.timerStartTime) + interval = time.Duration(b.cfg.Interval) - now().Sub(b.timerStartTime) if interval < 0 { interval = 0 } @@ -362,6 +371,9 @@ func (b *outlierDetectionBalancer) Close() { b.child.Close() b.childMu.Unlock() + b.scUpdateCh.Close() + b.pickerUpdateCh.Close() + b.mu.Lock() defer b.mu.Unlock() if b.intervalTimer != nil { @@ -405,13 +417,15 @@ func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, erro // programming. logger.Errorf("Picked SubConn from child picker is not a SubConnWrapper") return balancer.PickResult{ - SubConn: pr.SubConn, - Done: done, + SubConn: pr.SubConn, + Done: done, + Metadata: pr.Metadata, }, nil } return balancer.PickResult{ - SubConn: scw.SubConn, - Done: done, + SubConn: scw.SubConn, + Done: done, + Metadata: pr.Metadata, }, nil } @@ -580,14 +594,14 @@ func (b *outlierDetectionBalancer) Target() string { return b.cc.Target() } -func max(x, y int64) int64 { +func max(x, y time.Duration) time.Duration { if x < y { return y } return x } -func min(x, y int64) int64 { +func min(x, y time.Duration) time.Duration { if x < y { return x } @@ -681,7 +695,10 @@ func (b *outlierDetectionBalancer) run() { defer b.done.Fire() for { select { - case update := <-b.scUpdateCh.Get(): + case update, ok := <-b.scUpdateCh.Get(): + if !ok { + return + } b.scUpdateCh.Load() if b.closed.HasFired() { // don't send SubConn updates to child after the balancer has been closed return @@ -692,7 +709,10 @@ func (b *outlierDetectionBalancer) run() { case *ejectionUpdate: b.handleEjectedUpdate(u) } - case update := <-b.pickerUpdateCh.Get(): + case update, ok := <-b.pickerUpdateCh.Get(): + if !ok { + return + } b.pickerUpdateCh.Load() if b.closed.HasFired() { // don't send picker updates to grpc after the balancer has been closed return @@ -739,10 +759,10 @@ func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { // to uneject the address below. continue } - et := b.cfg.BaseEjectionTime.Nanoseconds() * addrInfo.ejectionTimeMultiplier - met := max(b.cfg.BaseEjectionTime.Nanoseconds(), b.cfg.MaxEjectionTime.Nanoseconds()) - curTimeAfterEt := now().After(addrInfo.latestEjectionTimestamp.Add(time.Duration(min(et, met)))) - if curTimeAfterEt { + et := time.Duration(b.cfg.BaseEjectionTime) * time.Duration(addrInfo.ejectionTimeMultiplier) + met := max(time.Duration(b.cfg.BaseEjectionTime), time.Duration(b.cfg.MaxEjectionTime)) + uet := addrInfo.latestEjectionTimestamp.Add(min(et, met)) + if now().After(uet) { b.unejectAddress(addrInfo) } } @@ -752,7 +772,7 @@ func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { if b.intervalTimer != nil { b.intervalTimer.Stop() } - b.intervalTimer = afterFunc(b.cfg.Interval, b.intervalTimerAlgorithm) + b.intervalTimer = afterFunc(time.Duration(b.cfg.Interval), b.intervalTimerAlgorithm) } // addrsWithAtLeastRequestVolume returns a slice of address information of all @@ -813,7 +833,9 @@ func (b *outlierDetectionBalancer) successRateAlgorithm() { return } successRate := float64(bucket.numSuccesses) / float64(bucket.numSuccesses+bucket.numFailures) - if successRate < (mean - stddev*(float64(ejectionCfg.StdevFactor)/1000)) { + requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) + if successRate < requiredSuccessRate { + channelz.Infof(logger, b.channelzParentID, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } @@ -840,6 +862,7 @@ func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { } failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { + channelz.Infof(logger, b.channelzParentID, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } @@ -854,7 +877,9 @@ func (b *outlierDetectionBalancer) ejectAddress(addrInfo *addressInfo) { addrInfo.ejectionTimeMultiplier++ for _, sbw := range addrInfo.sws { sbw.eject() + channelz.Infof(logger, b.channelzParentID, "Subchannel ejected: %s", sbw) } + } // Caller must hold b.mu. @@ -863,6 +888,7 @@ func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { addrInfo.latestEjectionTimestamp = time.Time{} for _, sbw := range addrInfo.sws { sbw.uneject() + channelz.Infof(logger, b.channelzParentID, "Subchannel unejected: %s", sbw) } } @@ -887,6 +913,16 @@ type addressInfo struct { sws []*subConnWrapper } +func (a *addressInfo) String() string { + var res strings.Builder + res.WriteString("[") + for _, sw := range a.sws { + res.WriteString(sw.String()) + } + res.WriteString("]") + return res.String() +} + func newAddressInfo() *addressInfo { return &addressInfo{ callCounter: newCallCounter(), diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go index c931674ae4..196a562ed6 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go @@ -18,9 +18,10 @@ package outlierdetection import ( + "encoding/json" "time" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -54,6 +55,24 @@ type SuccessRateEjection struct { RequestVolume uint32 `json:"requestVolume,omitempty"` } +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type successRateEjection SuccessRateEjection + +// UnmarshalJSON unmarshals JSON into SuccessRateEjection. If a +// SuccessRateEjection field is not set, that field will get its default value. +func (sre *SuccessRateEjection) UnmarshalJSON(j []byte) error { + sre.StdevFactor = 1900 + sre.EnforcementPercentage = 100 + sre.MinimumHosts = 5 + sre.RequestVolume = 100 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*successRateEjection)(sre)) +} + // Equal returns whether the SuccessRateEjection is the same with the parameter. func (sre *SuccessRateEjection) Equal(sre2 *SuccessRateEjection) bool { if sre == nil && sre2 == nil { @@ -101,6 +120,25 @@ type FailurePercentageEjection struct { RequestVolume uint32 `json:"requestVolume,omitempty"` } +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type failurePercentageEjection FailurePercentageEjection + +// UnmarshalJSON unmarshals JSON into FailurePercentageEjection. If a +// FailurePercentageEjection field is not set, that field will get its default +// value. +func (fpe *FailurePercentageEjection) UnmarshalJSON(j []byte) error { + fpe.Threshold = 85 + fpe.EnforcementPercentage = 0 + fpe.MinimumHosts = 5 + fpe.RequestVolume = 50 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*failurePercentageEjection)(fpe)) +} + // Equal returns whether the FailurePercentageEjection is the same with the // parameter. func (fpe *FailurePercentageEjection) Equal(fpe2 *FailurePercentageEjection) bool { @@ -128,15 +166,15 @@ type LBConfig struct { // Interval is the time interval between ejection analysis sweeps. This can // result in both new ejections as well as addresses being returned to // service. Defaults to 10s. - Interval time.Duration `json:"interval,omitempty"` + Interval iserviceconfig.Duration `json:"interval,omitempty"` // BaseEjectionTime is the base time that a host is ejected for. The real // time is equal to the base time multiplied by the number of times the host // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. - BaseEjectionTime time.Duration `json:"baseEjectionTime,omitempty"` + BaseEjectionTime iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` // MaxEjectionTime is the maximum time that an address is ejected for. If // not specified, the default value (300s) or the BaseEjectionTime value is // applied, whichever is larger. - MaxEjectionTime time.Duration `json:"maxEjectionTime,omitempty"` + MaxEjectionTime iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` // MaxEjectionPercent is the maximum % of an upstream cluster that can be // ejected due to outlier detection. Defaults to 10% but will eject at least // one host regardless of the value. @@ -148,7 +186,29 @@ type LBConfig struct { // algorithm. If set, failure rate ejections will be performed. FailurePercentageEjection *FailurePercentageEjection `json:"failurePercentageEjection,omitempty"` // ChildPolicy is the config for the child policy. - ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` + ChildPolicy *iserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type lbConfig LBConfig + +// UnmarshalJSON unmarshals JSON into LBConfig. If a top level LBConfig field +// (i.e. not next layer sre or fpe) is not set, that field will get its default +// value. If sre or fpe is not set, it will stay unset, otherwise it will +// unmarshal on those types populating with default values for their fields if +// needed. +func (lbc *LBConfig) UnmarshalJSON(j []byte) error { + // Default top layer values as documented in A50. + lbc.Interval = iserviceconfig.Duration(10 * time.Second) + lbc.BaseEjectionTime = iserviceconfig.Duration(30 * time.Second) + lbc.MaxEjectionTime = iserviceconfig.Duration(300 * time.Second) + lbc.MaxEjectionPercent = 10 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*lbConfig)(lbc)) } // EqualIgnoringChildPolicy returns whether the LBConfig is same with the diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go index 8e25eb788b..71a996f29a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -18,6 +18,7 @@ package outlierdetection import ( + "fmt" "unsafe" "google.golang.org/grpc/balancer" @@ -66,3 +67,7 @@ func (scw *subConnWrapper) uneject() { isEjected: false, }) } + +func (scw *subConnWrapper) String() string { + return fmt.Sprintf("%+v", scw.addresses) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index 28062c51ee..40c047d558 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -205,6 +205,7 @@ func (b *priorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balance func (b *priorityBalancer) Close() { b.bg.Close() + b.childBalancerStateUpdate.Close() b.mu.Lock() defer b.mu.Unlock() @@ -247,7 +248,10 @@ type resumePickerUpdates struct { func (b *priorityBalancer) run() { for { select { - case u := <-b.childBalancerStateUpdate.Get(): + case u, ok := <-b.childBalancerStateUpdate.Get(): + if !ok { + return + } b.childBalancerStateUpdate.Load() // Needs to handle state update in a goroutine, because each state // update needs to start/close child policy, could result in diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go index 4763120fa6..b4afcf1001 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go @@ -35,8 +35,9 @@ type LBConfig struct { } const ( - defaultMinSize = 1024 - defaultMaxSize = 4096 + defaultMinSize = 1024 + defaultMaxSize = 4096 + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M ) func parseConfig(c json.RawMessage) (*LBConfig, error) { @@ -44,6 +45,12 @@ func parseConfig(c json.RawMessage) (*LBConfig, error) { if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } + if cfg.MinRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("min_ring_size value of %d is greater than max supported value %d for this field", cfg.MinRingSize, ringHashSizeUpperBound) + } + if cfg.MaxRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("max_ring_size value of %d is greater than max supported value %d for this field", cfg.MaxRingSize, ringHashSizeUpperBound) + } if cfg.MinRingSize == 0 { cfg.MinRingSize = defaultMinSize } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go index 3e35556d8a..4d7fdb35e7 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go @@ -92,16 +92,19 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, log // // A hash is generated for each item, and later the results will be sorted // based on the hash. - var ( - idx int - targetIdx float64 - ) + var currentHashes, targetHashes float64 for _, scw := range normalizedWeights { - targetIdx += scale * scw.weight - for float64(idx) < targetIdx { - h := xxhash.Sum64String(scw.sc.addr + strconv.Itoa(idx)) - items = append(items, &ringEntry{idx: idx, hash: h, sc: scw.sc}) + targetHashes += scale * scw.weight + // This index ensures that ring entries corresponding to the same + // address hash to different values. And since this index is + // per-address, these entries hash to the same value across address + // updates. + idx := 0 + for currentHashes < targetHashes { + h := xxhash.Sum64String(scw.sc.addr + "_" + strconv.Itoa(idx)) + items = append(items, &ringEntry{hash: h, sc: scw.sc}) idx++ + currentHashes++ } } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go new file mode 100644 index 0000000000..4df2e4ed00 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go @@ -0,0 +1,201 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package wrrlocality provides an implementation of the wrr locality LB policy, +// as defined in [A52 - xDS Custom LB Policies]. +// +// [A52 - xDS Custom LB Policies]: https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md +package wrrlocality + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/grpclog" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal" +) + +// Name is the name of wrr_locality balancer. +const Name = "xds_wrr_locality_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Name() string { + return Name +} + +// LBConfig is the config for the wrr locality balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // ChildPolicy is the config for the child policy. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// To plumb in a different child in tests. +var weightedTargetName = weightedtarget.Name + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(weightedTargetName) + if builder == nil { + // Shouldn't happen, registered through imported weighted target, + // defensive programming. + return nil + } + + // Doesn't need to intercept any balancer.ClientConn operations; pass + // through by just giving cc to child balancer. + wtb := builder.Build(cc, bOpts) + if wtb == nil { + // shouldn't happen, defensive programming. + return nil + } + wtbCfgParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported weighted target builder has this method. + return nil + } + wrrL := &wrrLocalityBalancer{ + child: wtb, + childParser: wtbCfgParser, + } + + wrrL.logger = prefixLogger(wrrL) + wrrL.logger.Infof("Created") + return wrrL +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg *LBConfig + if err := json.Unmarshal(s, &lbCfg); err != nil { + return nil, fmt.Errorf("xds_wrr_locality: invalid LBConfig: %s, error: %v", string(s), err) + } + if lbCfg == nil || lbCfg.ChildPolicy == nil { + return nil, errors.New("xds_wrr_locality: invalid LBConfig: child policy field must be set") + } + return lbCfg, nil +} + +type attributeKey struct{} + +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o interface{}) bool { + oa, ok := o.(AddrInfo) + return ok && oa.LocalityWeight == a.LocalityWeight +} + +// AddrInfo is the locality weight of the locality an address is a part of. +type AddrInfo struct { + LocalityWeight uint32 +} + +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with AddrInfo. +func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) + return addr +} + +func (a AddrInfo) String() string { + return fmt.Sprintf("Locality Weight: %d", a.LocalityWeight) +} + +// getAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. Returns false if no AddrInfo found. +func getAddrInfo(addr resolver.Address) (AddrInfo, bool) { + v := addr.BalancerAttributes.Value(attributeKey{}) + ai, ok := v.(AddrInfo) + return ai, ok +} + +// wrrLocalityBalancer wraps a weighted target balancer, and builds +// configuration for the weighted target once it receives configuration +// specifying the weighted target child balancer and locality weight +// information. +type wrrLocalityBalancer struct { + // child will be a weighted target balancer, and will be built it at + // wrrLocalityBalancer build time. Other than preparing configuration, other + // balancer operations are simply pass through. + child balancer.Balancer + + childParser balancer.ConfigParser + + logger *grpclog.PrefixLogger +} + +func (b *wrrLocalityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("Received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + weightedTargets := make(map[string]weightedtarget.Target) + for _, addr := range s.ResolverState.Addresses { + // This get of LocalityID could potentially return a zero value. This + // shouldn't happen though (this attribute that is set actually gets + // used to build localities in the first place), and thus don't error + // out, and just build a weighted target with undefined behavior. + locality, err := internal.GetLocalityID(addr).ToString() + if err != nil { + // Should never happen. + logger.Errorf("Failed to marshal LocalityID: %v, skipping this locality in weighted target") + } + ai, ok := getAddrInfo(addr) + if !ok { + return fmt.Errorf("xds_wrr_locality: missing locality weight information in address %q", addr) + } + weightedTargets[locality] = weightedtarget.Target{Weight: ai.LocalityWeight, ChildPolicy: lbCfg.ChildPolicy} + } + wtCfg := &weightedtarget.LBConfig{Targets: weightedTargets} + wtCfgJSON, err := json.Marshal(wtCfg) + if err != nil { + // Shouldn't happen. + return fmt.Errorf("xds_wrr_locality: error marshalling prepared config: %v", wtCfg) + } + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childParser.ParseConfig(wtCfgJSON); err != nil { + return fmt.Errorf("xds_wrr_locality: config generated %v is invalid: %v", wtCfgJSON, err) + } + + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: sc, + }) +} + +func (b *wrrLocalityBalancer) ResolverError(err error) { + b.child.ResolverError(err) +} + +func (b *wrrLocalityBalancer) UpdateSubConnState(sc balancer.SubConn, scState balancer.SubConnState) { + b.child.UpdateSubConnState(sc, scState) +} + +func (b *wrrLocalityBalancer) Close() { + b.child.Close() +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go new file mode 100644 index 0000000000..42ccea0a92 --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package wrrlocality + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[wrrlocality-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *wrrLocalityBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go index 209283c3bf..277fcfc592 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go @@ -126,7 +126,10 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { return config{}, nil } - ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}) + // TODO(gregorycooke) - change the call chain to here so we have the filter + // name to input here instead of an empty string. It will come from here: + // https://github.com/grpc/grpc-go/blob/eff0942e95d93112921414aee758e619ec86f26f/xds/internal/xdsclient/xdsresource/unmarshal_lds.go#L199 + ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") if err != nil { // "At this time, if the RBAC.action is Action.LOG then the policy will be // completely ignored, as if RBAC was not configurated." - A41 diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/internal.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/internal.go index 8df20a1f9c..ba6fa3d788 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -80,3 +80,6 @@ func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { addr.BalancerAttributes = addr.BalancerAttributes.WithValue(localityKey, l) return addr } + +// ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. +var ResourceTypeMapForTesting map[string]interface{} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go index c6ab885fcf..9f5b2ecafe 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go @@ -66,10 +66,6 @@ type ServingModeCallback func(addr net.Addr, mode connectivity.ServingMode, err // connections. type DrainCallback func(addr net.Addr) -func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", p)) -} - // XDSClient wraps the methods on the XDSClient which are required by // the listenerWrapper. type XDSClient interface { @@ -117,7 +113,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru ldsUpdateCh: make(chan ldsUpdateWithError, 1), rdsUpdateCh: make(chan rdsHandlerUpdate, 1), } - lw.logger = prefixLogger(lw) + lw.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", lw)) // Serve() verifies that Addr() returns a valid TCPAddr. So, it is safe to // ignore the error from SplitHostPort(). @@ -125,13 +121,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru lw.addr, lw.port, _ = net.SplitHostPort(lisAddr) lw.rdsHandler = newRDSHandler(lw.xdsC, lw.rdsUpdateCh) - - cancelWatch := lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) - lw.logger.Infof("Watch started on resource name %v", lw.name) - lw.cancelWatch = func() { - cancelWatch() - lw.logger.Infof("Watch cancelled on resource name %v", lw.name) - } + lw.cancelWatch = lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) go lw.run() return lw, lw.goodUpdate.Done() } @@ -270,7 +260,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { // error, `grpc.Serve()` method sleeps for a small duration and // therefore ends up blocking all connection attempts during that // time frame, which is also not ideal for an error like this. - l.logger.Warningf("connection from %s to %s failed to find any matching filter chain", conn.RemoteAddr().String(), conn.LocalAddr().String()) + l.logger.Warningf("Connection from %s to %s failed to find any matching filter chain", conn.RemoteAddr().String(), conn.LocalAddr().String()) conn.Close() continue } @@ -302,7 +292,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { // tradeoff for simplicity. vhswi, err := fc.ConstructUsableRouteConfiguration(rc) if err != nil { - l.logger.Warningf("route configuration construction: %v", err) + l.logger.Warningf("Failed to construct usable route configuration: %v", err) conn.Close() continue } @@ -388,7 +378,6 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { // continue to use the old configuration. return } - l.logger.Infof("Received update for resource %q: %+v", l.name, update.update) // Make sure that the socket address on the received Listener resource // matches the address of the net.Listener passed to us by the user. This diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index 3d4f99e88d..61adf794e9 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -19,11 +19,14 @@ package xdsclient import ( "context" + "errors" "fmt" + "strings" "sync" "time" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/transport" @@ -34,16 +37,18 @@ import ( type watchState int const ( - watchStateStarted watchState = iota - watchStateRespReceived - watchStateTimeout - watchStateCanceled + watchStateStarted watchState = iota // Watch started, request not yet set. + watchStateRequested // Request sent for resource being watched. + watchStateReceived // Response received for resource being watched. + watchStateTimeout // Watch timer expired, no response. + watchStateCanceled // Watch cancelled. ) type resourceState struct { - watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource - cache xdsresource.ResourceData // Most recent ACKed update for this resource - md xdsresource.UpdateMetadata // Metadata for the most recent update + watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource + cache xdsresource.ResourceData // Most recent ACKed update for this resource + md xdsresource.UpdateMetadata // Metadata for the most recent update + deletionIgnored bool // True if resource deletion was ignored for a prior update // Common watch state for all watchers of this resource. wTimer *time.Timer // Expiry timer @@ -62,7 +67,7 @@ type authority struct { serverCfg *bootstrap.ServerConfig // Server config for this authority bootstrapCfg *bootstrap.Config // Full bootstrap configuration refCount int // Reference count of watches referring to this authority - serializer *callbackSerializer // Callback serializer for invoking watch callbacks + serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup transport *transport.Transport // Underlying xDS transport to the management server watchExpiryTimeout time.Duration // Resource watch expiry timeout @@ -96,7 +101,7 @@ type authorityArgs struct { // the second case. serverCfg *bootstrap.ServerConfig bootstrapCfg *bootstrap.Config - serializer *callbackSerializer + serializer *grpcsync.CallbackSerializer resourceTypeGetter func(string) xdsresource.Type watchExpiryTimeout time.Duration logger *grpclog.PrefixLogger @@ -114,17 +119,34 @@ func newAuthority(args authorityArgs) (*authority, error) { } tr, err := transport.New(transport.Options{ - ServerCfg: *args.serverCfg, - UpdateHandler: ret.handleResourceUpdate, - StreamErrorHandler: ret.newConnectionError, - Logger: args.logger, + ServerCfg: *args.serverCfg, + OnRecvHandler: ret.handleResourceUpdate, + OnErrorHandler: ret.newConnectionError, + OnSendHandler: ret.transportOnSendHandler, + Logger: args.logger, + NodeProto: args.bootstrapCfg.NodeProto, }) if err != nil { return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) } ret.transport = tr return ret, nil +} +// transportOnSendHandler is called by the underlying transport when it sends a +// resource request successfully. Timers are activated for resources waiting for +// a response. +func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { + rType := a.resourceTypeGetter(u.URL) + // Resource type not found is not expected under normal circumstances, since + // the resource type url passed to the transport is determined by the authority. + if rType == nil { + a.logger.Warningf("Unknown resource type url: %s.", u.URL) + return + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + a.startWatchTimersLocked(rType, u.ResourceNames) } func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate) error { @@ -133,10 +155,7 @@ func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) } - opts := &xdsresource.DecodeOptions{ - BootstrapConfig: a.bootstrapCfg, - Logger: a.logger, - } + opts := &xdsresource.DecodeOptions{BootstrapConfig: a.bootstrapCfg} updates, md, err := decodeAllResources(opts, rType, resourceUpdate) a.updateResourceStateAndScheduleCallbacks(rType, updates, md) return err @@ -152,8 +171,34 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // Cancel the expiry timer associated with the resource once a // response is received, irrespective of whether the update is a // good one or not. - state.wTimer.Stop() - state.wState = watchStateRespReceived + // + // We check for watch states `started` and `requested` here to + // accommodate for a race which can happen in the following + // scenario: + // - When a watch is registered, it is possible that the ADS stream + // is not yet created. In this case, the request for the resource + // is not sent out immediately. An entry in the `resourceStates` + // map is created with a watch state of `started`. + // - Once the stream is created, it is possible that the management + // server might respond with the requested resource before we send + // out request for the same. If we don't check for `started` here, + // and move the state to `received`, we will end up starting the + // timer when the request gets sent out. And since the mangement + // server already sent us the resource, there is a good chance + // that it will not send it again. This would eventually lead to + // the timer firing, even though we have the resource in the + // cache. + if state.wState == watchStateStarted || state.wState == watchStateRequested { + // It is OK to ignore the return value from Stop() here because + // if the timer has already fired, it means that the timer watch + // expiry callback is blocked on the same lock that we currently + // hold. Since we move the state to `received` here, the timer + // callback will be a no-op. + if state.wTimer != nil { + state.wTimer.Stop() + } + state.wState = watchStateReceived + } if uErr.err != nil { // On error, keep previous version of the resource. But update @@ -167,8 +212,12 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty } continue } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different + + if state.deletionIgnored { + state.deletionIgnored = false + a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) + } + // Notify watchers only if this is a first time update or it is different // from the one currently cached. if state.cache == nil || !state.cache.Equal(uErr.resource) { for watcher := range state.watchers { @@ -178,7 +227,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty } } // Sync cache. - a.logger.Debugf("Resource type %q with name %q, value %s added to cache", rType.TypeEnum().String(), name, uErr.resource.ToJSON()) + a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeName(), name) state.cache = uErr.resource // Set status to ACK, and clear error state. The metadata might be a // NACK metadata because some other resources in the same response @@ -195,7 +244,8 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // If this resource type requires that all resources be present in every // SotW response from the server, a response that does not include a // previously seen resource will be interpreted as a deletion of that - // resource. + // resource unless ignore_resource_deletion option was set in the server + // config. if !rType.AllResourcesRequiredInSotW() { return } @@ -226,7 +276,18 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty if state.md.Status == xdsresource.ServiceStatusNotExist { continue } - + // Per A53, resource deletions are ignored if the `ignore_resource_deletion` + // server feature is enabled through the bootstrap configuration. If the + // resource deletion is to be ignored, the resource is not removed from + // the cache and the corresponding OnResourceDoesNotExist() callback is + // not invoked on the watchers. + if a.serverCfg.IgnoreResourceDeletion { + if !state.deletionIgnored { + state.deletionIgnored = true + a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) + } + continue + } // If resource exists in cache, but not in the new update, delete // the resource from cache, and also send a resource not found error // to indicate resource removed. Metadata for the resource is still @@ -284,9 +345,8 @@ func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, return ret, md, nil } - typeStr := rType.TypeEnum().String() md.Status = xdsresource.ServiceStatusNACKed - errRet := xdsresource.CombineErrors(typeStr, topLevelErrors, perResourceErrors) + errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) md.ErrState = &xdsresource.UpdateErrorMetadata{ Version: update.Version, Err: errRet, @@ -295,17 +355,71 @@ func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, return ret, md, errRet } +// startWatchTimersLocked is invoked upon transport.OnSend() callback with resources +// requested on the underlying ADS stream. This satisfies the conditions to start +// watch timers per A57 [https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md#handling-resources-that-do-not-exist] +// +// Caller must hold a.resourcesMu. +func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames []string) { + resourceStates := a.resources[rType] + for _, resourceName := range resourceNames { + if state, ok := resourceStates[resourceName]; ok { + if state.wState != watchStateStarted { + continue + } + state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { + a.handleWatchTimerExpiry(rType, resourceName, state) + }) + state.wState = watchStateRequested + } + } +} + +// stopWatchTimersLocked is invoked upon connection errors to stops watch timers +// for resources that have been requested, but not yet responded to by the management +// server. +// +// Caller must hold a.resourcesMu. +func (a *authority) stopWatchTimersLocked() { + for _, rType := range a.resources { + for resourceName, state := range rType { + if state.wState != watchStateRequested { + continue + } + if !state.wTimer.Stop() { + // If the timer has already fired, it means that the timer watch expiry + // callback is blocked on the same lock that we currently hold. Don't change + // the watch state and instead let the watch expiry callback handle it. + a.logger.Warningf("Watch timer for resource %v already fired. Ignoring here.", resourceName) + continue + } + state.wTimer = nil + state.wState = watchStateStarted + } + } +} + // newConnectionError is called by the underlying transport when it receives a // connection error. The error will be forwarded to all the resource watchers. func (a *authority) newConnectionError(err error) { a.resourcesMu.Lock() defer a.resourcesMu.Unlock() - // For all resource types, for all resources within each resource type, and - // for all the watchers for every resource, propagate the connection error - // from the transport layer. + a.stopWatchTimersLocked() + + // We do not consider it an error if the ADS stream was closed after having received + // a response on the stream. This is because there are legitimate reasons why the server + // may need to close the stream during normal operations, such as needing to rebalance + // load or the underlying connection hitting its max connection age limit. + // See gRFC A57 for more details. + if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { + a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) + return + } + for _, rType := range a.resources { for _, state := range rType { + // Propagate the connection error from the transport layer to all watchers. for watcher := range state.watchers { watcher := watcher a.serializer.Schedule(func(context.Context) { @@ -332,7 +446,7 @@ func (a *authority) close() { } func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { - a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeEnum(), resourceName) + a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeName(), resourceName) a.resourcesMu.Lock() defer a.resourcesMu.Unlock() @@ -349,15 +463,12 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // instruct the transport layer to send a DiscoveryRequest for the same. state := resources[resourceName] if state == nil { - a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeEnum(), resourceName) + a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeName(), resourceName) state = &resourceState{ watchers: make(map[xdsresource.ResourceWatcher]bool), md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, wState: watchStateStarted, } - state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { - a.handleWatchTimerExpiry(rType, resourceName, state) - }) resources[resourceName] = state a.sendDiscoveryRequestLocked(rType, resources) } @@ -366,7 +477,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // If we have a cached copy of the resource, notify the new watcher. if state.cache != nil { - a.logger.Debugf("Resource type %q with resource name %q found in cache: %s", rType.TypeEnum(), resourceName, state.cache.ToJSON()) + a.logger.Debugf("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) resource := state.cache a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) } @@ -389,17 +500,25 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // There are no more watchers for this resource, delete the state // associated with it, and instruct the transport to send a request // which does not include this resource name. + a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) delete(resources, resourceName) a.sendDiscoveryRequestLocked(rType, resources) } } func (a *authority) handleWatchTimerExpiry(rType xdsresource.Type, resourceName string, state *resourceState) { - a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeEnum().String()) + a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) a.resourcesMu.Lock() defer a.resourcesMu.Unlock() - if state.wState == watchStateCanceled { + switch state.wState { + case watchStateRequested: + // This is the only state where we need to handle the timer expiry by + // invoking appropriate watch callbacks. This is handled outside the switch. + case watchStateCanceled: + return + default: + a.logger.Warningf("Unexpected watch state %q for resource %q.", state.wState, resourceName) return } @@ -454,3 +573,28 @@ func (a *authority) dumpResources() map[string]map[string]xdsresource.UpdateWith } return dump } + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go index 23eff2d639..aec2fa51f3 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -28,8 +28,8 @@ import ( "os" "strings" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/google" @@ -39,17 +39,18 @@ import ( "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) const ( // The "server_features" field in the bootstrap file contains a list of - // features supported by the server. A value of "xds_v3" indicates that the - // server supports the v3 version of the xDS transport protocol. - serverFeaturesV3 = "xds_v3" + // features supported by the server: + // - A value of "xds_v3" indicates that the server supports the v3 version of + // the xDS transport protocol. + // - A value of "ignore_resource_deletion" indicates that the client should + // ignore deletion of Listener and Cluster resources in updates from the + // server. + serverFeaturesV3 = "xds_v3" + serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion" gRPCUserAgentName = "gRPC Go" clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" @@ -61,8 +62,6 @@ func init() { bootstrap.RegisterCredentials(&googleDefaultCredsBuilder{}) } -var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version) - // For overriding in unit tests. var bootstrapFileReadFunc = os.ReadFile @@ -90,30 +89,73 @@ func (d *googleDefaultCredsBuilder) Name() string { return "google_default" } +// ChannelCreds contains the credentials to be used while communicating with an +// xDS server. It is also used to dedup servers with the same server URI. +type ChannelCreds struct { + // Type contains a unique name identifying the credentials type. The only + // supported types currently are "google_default" and "insecure". + Type string + // Config contains the JSON configuration associated with the credentials. + Config json.RawMessage +} + +// Equal reports whether cc and other are considered equal. +func (cc ChannelCreds) Equal(other ChannelCreds) bool { + return cc.Type == other.Type && bytes.Equal(cc.Config, other.Config) +} + +// String returns a string representation of the credentials. It contains the +// type and the config (if non-nil) separated by a "-". +func (cc ChannelCreds) String() string { + if cc.Config == nil { + return cc.Type + } + + // We do not expect the Marshal call to fail since we wrote to cc.Config + // after a successful unmarshaling from JSON configuration. Therefore, + // it is safe to ignore the error here. + b, _ := json.Marshal(cc.Config) + return cc.Type + "-" + string(b) +} + // ServerConfig contains the configuration to connect to a server, including // URI, creds, and transport API version (e.g. v2 or v3). +// +// It contains unexported fields that are initialized when unmarshaled from JSON +// using either the UnmarshalJSON() method or the ServerConfigFromJSON() +// function. Hence users are strongly encouraged not to use a literal struct +// initialization to create an instance of this type, but instead unmarshal from +// JSON using one of the two available options. type ServerConfig struct { // ServerURI is the management server to connect to. // // The bootstrap file contains an ordered list of xDS servers to contact for // this authority. The first one is picked. ServerURI string - // Creds contains the credentials to be used while talking to the xDS - // server, as a grpc.DialOption. - Creds grpc.DialOption - // CredsType is the type of the creds. It will be used to dedup servers. - CredsType string - // TransportAPI indicates the API version of xDS transport protocol to use. - // This describes the xDS gRPC endpoint and version of - // DiscoveryRequest/Response used on the wire. - TransportAPI version.TransportAPI - // NodeProto contains the Node proto to be used in xDS requests. The actual - // type depends on the transport protocol version used. - // - // Note that it's specified in the bootstrap globally for all the servers, - // but we keep it in each server config so that its type (e.g. *v2pb.Node or - // *v3pb.Node) is consistent with the transport API version. - NodeProto proto.Message + // Creds contains the credentials to be used while communicationg with this + // xDS server. It is also used to dedup servers with the same server URI. + Creds ChannelCreds + // ServerFeatures contains a list of features supported by this xDS server. + // It is also used to dedup servers with the same server URI and creds. + ServerFeatures []string + + // As part of unmarshaling the JSON config into this struct, we ensure that + // the credentials config is valid by building an instance of the specified + // credentials and store it here as a grpc.DialOption for easy access when + // dialing this xDS server. + credsDialOption grpc.DialOption + + // IgnoreResourceDeletion controls the behavior of the xDS client when the + // server deletes a previously sent Listener or Cluster resource. If set, the + // xDS client will not invoke the watchers' OnResourceDoesNotExist() method + // when a resource is deleted, nor will it remove the existing resource value + // from its cache. + IgnoreResourceDeletion bool +} + +// CredsDialOption returns the configured credentials as a grpc dial option. +func (sc *ServerConfig) CredsDialOption() grpc.DialOption { + return sc.credsDialOption } // String returns the string representation of the ServerConfig. @@ -126,24 +168,20 @@ type ServerConfig struct { // content. It doesn't cover NodeProto because NodeProto isn't used by // federation. func (sc *ServerConfig) String() string { - var ver string - switch sc.TransportAPI { - case version.TransportV3: - ver = "xDSv3" - case version.TransportV2: - ver = "xDSv2" - } - return strings.Join([]string{sc.ServerURI, sc.CredsType, ver}, "-") + features := strings.Join(sc.ServerFeatures, "-") + return strings.Join([]string{sc.ServerURI, sc.Creds.String(), features}, "-") } // MarshalJSON marshals the ServerConfig to json. func (sc ServerConfig) MarshalJSON() ([]byte, error) { server := xdsServer{ - ServerURI: sc.ServerURI, - ChannelCreds: []channelCreds{{Type: sc.CredsType, Config: nil}}, + ServerURI: sc.ServerURI, + ChannelCreds: []channelCreds{{Type: sc.Creds.Type, Config: sc.Creds.Config}}, + ServerFeatures: sc.ServerFeatures, } - if sc.TransportAPI == version.TransportV3 { - server.ServerFeatures = []string{serverFeaturesV3} + server.ServerFeatures = []string{serverFeaturesV3} + if sc.IgnoreResourceDeletion { + server.ServerFeatures = append(server.ServerFeatures, serverFeaturesIgnoreResourceDeletion) } return json.Marshal(server) } @@ -154,10 +192,16 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &server); err != nil { return fmt.Errorf("xds: json.Unmarshal(data) for field ServerConfig failed during bootstrap: %v", err) } + sc.ServerURI = server.ServerURI + sc.ServerFeatures = server.ServerFeatures + for _, f := range server.ServerFeatures { + if f == serverFeaturesIgnoreResourceDeletion { + sc.IgnoreResourceDeletion = true + } + } for _, cc := range server.ChannelCreds { // We stop at the first credential type that we support. - sc.CredsType = cc.Type c := bootstrap.GetCredentials(cc.Type) if c == nil { continue @@ -166,15 +210,51 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) } - sc.Creds = grpc.WithCredentialsBundle(bundle) + sc.Creds = ChannelCreds(cc) + sc.credsDialOption = grpc.WithCredentialsBundle(bundle) break } - for _, f := range server.ServerFeatures { - if f == serverFeaturesV3 { - sc.TransportAPI = version.TransportV3 + return nil +} + +// ServerConfigFromJSON creates a new ServerConfig from the given JSON +// configuration. This is the preferred way of creating a ServerConfig when +// hand-crafting the JSON configuration. +func ServerConfigFromJSON(data []byte) (*ServerConfig, error) { + sc := new(ServerConfig) + if err := sc.UnmarshalJSON(data); err != nil { + return nil, err + } + return sc, nil +} + +// Equal reports whether sc and other are considered equal. +func (sc *ServerConfig) Equal(other *ServerConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + case sc.ServerURI != other.ServerURI: + return false + case !sc.Creds.Equal(other.Creds): + return false + case !equalStringSlice(sc.ServerFeatures, other.ServerFeatures): + return false + } + return true +} + +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false } } - return nil + return true } // unmarshalJSONServerConfigSlice unmarshals JSON to a slice. @@ -277,7 +357,6 @@ type Config struct { // // Defaults to "%s". ClientDefaultListenerResourceNameTemplate string - // Authorities is a map of authority name to corresponding configuration. // // This is used in the following cases: @@ -292,6 +371,9 @@ type Config struct { // In any of those cases, it is an error if the specified authority is // not present in this map. Authorities map[string]*Authority + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node } type channelCreds struct { @@ -317,7 +399,7 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { // // Note that even if the content is invalid, we don't failover to the // file content env variable. - logger.Debugf("xds: using bootstrap file with name %q", fName) + logger.Debugf("Using bootstrap file with name %q", fName) return bootstrapFileReadFunc(fName) } @@ -349,7 +431,6 @@ func NewConfig() (*Config, error) { if err != nil { return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) } - logger.Debugf("Bootstrap content: %s", data) return newConfigFromContents(data) } @@ -374,12 +455,6 @@ func newConfigFromContents(data []byte) (*Config, error) { for k, v := range jsonData { switch k { case "node": - // We unconditionally convert the JSON into a v3.Node proto. The v3 - // proto does not contain the deprecated field "build_version" from - // the v2 proto. We do not expect the bootstrap file to contain the - // "build_version" field. In any case, the unmarshal will succeed - // because we have set the `AllowUnknownFields` option on the - // unmarshaler. node = &v3corepb.Node{} if err := m.Unmarshal(bytes.NewReader(v), node); err != nil { return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) @@ -425,7 +500,7 @@ func newConfigFromContents(data []byte) (*Config, error) { } case "client_default_listener_resource_name_template": if !envconfig.XDSFederation { - logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k) + logger.Warningf("Bootstrap field %v is not support when Federation is disabled", k) continue } if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil { @@ -433,7 +508,7 @@ func newConfigFromContents(data []byte) (*Config, error) { } case "authorities": if !envconfig.XDSFederation { - logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k) + logger.Warningf("Bootstrap field %v is not support when Federation is disabled", k) continue } if err := json.Unmarshal(v, &config.Authorities); err != nil { @@ -457,7 +532,7 @@ func newConfigFromContents(data []byte) (*Config, error) { if config.XDSServer.ServerURI == "" { return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) } - if config.XDSServer.Creds == nil { + if config.XDSServer.CredsDialOption() == nil { return nil, fmt.Errorf("xds: required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) } // Post-process the authorities' client listener resource template field: @@ -474,66 +549,16 @@ func newConfigFromContents(data []byte) (*Config, error) { } } - if err := config.updateNodeProto(node); err != nil { - return nil, err - } - logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) - return config, nil -} - -// updateNodeProto updates the node proto read from the bootstrap file. -// -// The input node is a v3.Node protobuf message corresponding to the JSON -// contents found in the bootstrap file. This method performs some post -// processing on it: -// 1. If the node is nil, we create an empty one here. That way, callers of this -// function can always expect that the NodeProto field is non-nil. -// 2. Some additional fields which are not expected to be set in the bootstrap -// file are populated here. -// 3. For each server config (both top level and in each authority), we set its -// node field to the v3.Node, or a v2.Node with the same content, depending on -// the server's transport API version. -func (c *Config) updateNodeProto(node *v3corepb.Node) error { - v3 := node - if v3 == nil { - v3 = &v3corepb.Node{} - } - v3.UserAgentName = gRPCUserAgentName - v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) - - v3bytes, err := proto.Marshal(v3) - if err != nil { - return fmt.Errorf("xds: proto.Marshal(%v): %v", v3, err) - } - v2 := &v2corepb.Node{} - if err := proto.Unmarshal(v3bytes, v2); err != nil { - return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3bytes, err) - } - // BuildVersion is deprecated, and is replaced by user_agent_name and - // user_agent_version. But the management servers are still using the old - // field, so we will keep both set. - v2.BuildVersion = gRPCVersion - v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - - switch c.XDSServer.TransportAPI { - case version.TransportV2: - c.XDSServer.NodeProto = v2 - case version.TransportV3: - c.XDSServer.NodeProto = v3 + // Performing post-production on the node information. Some additional fields + // which are not expected to be set in the bootstrap file are populated here. + if node == nil { + node = &v3corepb.Node{} } + node.UserAgentName = gRPCUserAgentName + node.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} + node.ClientFeatures = append(node.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) + config.NodeProto = node - for _, a := range c.Authorities { - if a.XDSServer == nil { - continue - } - switch a.XDSServer.TransportAPI { - case version.TransportV2: - a.XDSServer.NodeProto = v2 - case version.TransportV3: - a.XDSServer.NodeProto = v3 - } - } - - return nil + logger.Debugf("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) + return config, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/callback_serializer.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/callback_serializer.go deleted file mode 100644 index 4c799e2163..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/callback_serializer.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "context" - - "google.golang.org/grpc/internal/buffer" -) - -// callbackSerializer provides a mechanism to schedule callbacks in a -// synchronized manner. It provides a FIFO guarantee on the order of execution -// of scheduled callbacks. New callbacks can be scheduled by invoking the -// Schedule() method. -// -// This type is safe for concurrent access. -type callbackSerializer struct { - callbacks *buffer.Unbounded -} - -// newCallbackSerializer returns a new callbackSerializer instance. The provided -// context will be passed to the scheduled callbacks. Users should cancel the -// provided context to shutdown the callbackSerializer. It is guaranteed that no -// callbacks will be executed once this context is canceled. -func newCallbackSerializer(ctx context.Context) *callbackSerializer { - t := &callbackSerializer{callbacks: buffer.NewUnbounded()} - go t.run(ctx) - return t -} - -// Schedule adds a callback to be scheduled after existing callbacks are run. -// -// Callbacks are expected to honor the context when performing any blocking -// operations, and should return early when the context is canceled. -func (t *callbackSerializer) Schedule(f func(ctx context.Context)) { - t.callbacks.Put(f) -} - -func (t *callbackSerializer) run(ctx context.Context) { - for ctx.Err() == nil { - select { - case <-ctx.Done(): - return - case callback := <-t.callbacks.Get(): - t.callbacks.Load() - callback.(func(ctx context.Context))(ctx) - } - } -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index 6e380b27d5..cc39fb2e4d 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -45,6 +45,11 @@ type XDSClient interface { // instead use a resource-type-specific wrapper API provided by the relevant // resource type implementation. // + // + // During a race (e.g. an xDS response is received while the user is calling + // cancel()), there's a small window where the callback can be called after + // the watcher is canceled. Callers need to handle this case. + // // TODO: Once this generic client API is fully implemented and integrated, // delete the resource type specific watch APIs on this interface. WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go index 8335f9a88a..b330c19dfd 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -69,7 +69,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i done: grpcsync.NewEvent(), config: config, watchExpiryTimeout: watchExpiryTimeout, - serializer: newCallbackSerializer(ctx), + serializer: grpcsync.NewCallbackSerializer(ctx), serializerClose: cancel, resourceTypes: newResourceTypeRegistry(), authorities: make(map[string]*authority), diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go index 261b6bf48f..2c05ea66f5 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -37,7 +37,7 @@ type clientImpl struct { config *bootstrap.Config logger *grpclog.PrefixLogger watchExpiryTimeout time.Duration - serializer *callbackSerializer + serializer *grpcsync.CallbackSerializer serializerClose func() resourceTypes *resourceTypeRegistry diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index 77c4a614a2..3c3adad534 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -48,10 +48,6 @@ func (l *listenerWatcher) OnResourceDoesNotExist() { // WatchListener uses LDS to discover information about the Listener resource // identified by resourceName. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchListener(resourceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { watcher := &listenerWatcher{resourceName: resourceName, cb: cb} return xdsresource.WatchListener(c, resourceName, watcher) @@ -80,10 +76,6 @@ func (r *routeConfigWatcher) OnResourceDoesNotExist() { // WatchRouteConfig uses RDS to discover information about the // RouteConfiguration resource identified by resourceName. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchRouteConfig(resourceName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { watcher := &routeConfigWatcher{resourceName: resourceName, cb: cb} return xdsresource.WatchRouteConfig(c, resourceName, watcher) @@ -115,10 +107,6 @@ func (c *clusterWatcher) OnResourceDoesNotExist() { // // WatchCluster can be called multiple times, with same or different // clusterNames. Each call will start an independent watcher for the resource. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { watcher := &clusterWatcher{resourceName: resourceName, cb: cb} return xdsresource.WatchCluster(c, resourceName, watcher) @@ -150,10 +138,6 @@ func (c *endpointsWatcher) OnResourceDoesNotExist() { // // WatchEndpoints can be called multiple times, with same or different // clusterNames. Each call will start an independent watcher for the resource. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchEndpoints(resourceName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { watcher := &endpointsWatcher{resourceName: resourceName, cb: cb} return xdsresource.WatchEndpoints(c, resourceName, watcher) @@ -172,12 +156,12 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, // ref-counted client sets its pointer to `nil`. And if any watch APIs are // made on such a closed client, we will get here with a `nil` receiver. if c == nil || c.done.HasFired() { - logger.Warningf("Watch registered for name %q of type %q, but client is closed", rType.TypeEnum().String(), resourceName) + logger.Warningf("Watch registered for name %q of type %q, but client is closed", rType.TypeName(), resourceName) return func() {} } if err := c.resourceTypes.maybeRegister(rType); err != nil { - logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeEnum().String(), resourceName) + logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName) c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) return func() {} } @@ -196,7 +180,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, n := xdsresource.ParseName(resourceName) a, unref, err := c.findAuthority(n) if err != nil { - logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeEnum().String(), resourceName, n.Authority) + logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) return func() {} } @@ -232,7 +216,7 @@ func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { url := rType.TypeURL() typ, ok := r.types[url] if ok && typ != rType { - return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeEnum()) + return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeName()) } r.types[url] = rType return nil diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go index 4c42ae4249..96db8ef513 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go @@ -94,11 +94,7 @@ func newRefCountedWithConfig(fallbackConfig *bootstrap.Config) (XDSClient, func( singletonClient = &clientRefCounted{clientImpl: c, refCount: 1} singletonClientImplCreateHook() - nodeID := "" - if node, ok := config.XDSServer.NodeProto.(interface{ GetId() string }); ok { - nodeID = node.GetId() - } - logger.Infof("xDS node ID: %s", nodeID) + logger.Infof("xDS node ID: %s", config.NodeProto.GetId()) return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go index 58a2e5dedb..89ffc4fcec 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -120,19 +120,19 @@ func (t *Transport) lrsRunner(ctx context.Context) { defer cancel() stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) if err != nil { - t.logger.Warningf("Failed to create LRS stream: %v", err) + t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) return false } - t.logger.Infof("Created LRS stream to server: %s", t.serverURI) + t.logger.Infof("Created LRS stream to server %q", t.serverURI) if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { - t.logger.Warningf("Failed to send first LRS request: %v", err) + t.logger.Warningf("Sending first LRS request failed: %v", err) return false } clusters, interval, err := t.recvFirstLoadStatsResponse(stream) if err != nil { - t.logger.Warningf("Failed to read from LRS stream: %v", err) + t.logger.Warningf("Reading from LRS stream failed: %v", err) return false } @@ -160,7 +160,7 @@ func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterName return } if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { - t.logger.Warningf("Failed to write to LRS stream: %v", err) + t.logger.Warningf("Writing to LRS stream failed: %v", err) return } } @@ -168,7 +168,9 @@ func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterName func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { req := &v3lrspb.LoadStatsRequest{Node: node} - t.logger.Debugf("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + } err := stream.Send(req) if err == io.EOF { return getStreamError(stream) @@ -181,7 +183,9 @@ func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time if err != nil { return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) } - t.logger.Debugf("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + } interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) if err != nil { @@ -251,7 +255,9 @@ func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) e } req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} - t.logger.Debugf("Sending LRS loads: %s", pretty.ToJSON(req)) + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) + } err := stream.Send(req) if err == io.EOF { return getStreamError(stream) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go index 814ca5f872..86803588a7 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -45,6 +45,12 @@ import ( statuspb "google.golang.org/genproto/googleapis/rpc/status" ) +// Any per-RPC level logs which print complete request or response messages +// should be gated at this verbosity level. Other per-RPC level logs which print +// terse output should be at `INFO` and verbosity 2, which corresponds to using +// the `Debugf` method on the logger. +const perRPCVerbosityLevel = 9 + type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient // Transport provides a resource-type agnostic implementation of the xDS @@ -57,17 +63,18 @@ type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesC // protocol version. type Transport struct { // These fields are initialized at creation time and are read-only afterwards. - cc *grpc.ClientConn // ClientConn to the mangement server. - serverURI string // URI of the management server. - updateHandler UpdateHandlerFunc // Resource update handler. xDS data model layer. - adsStreamErrHandler func(error) // To report underlying stream errors. - lrsStore *load.Store // Store returned to user for pushing loads. - backoff func(int) time.Duration // Backoff after stream failures. - nodeProto *v3corepb.Node // Identifies the gRPC application. - logger *grpclog.PrefixLogger // Prefix logger for transport logs. - adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. - adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. - lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. + cc *grpc.ClientConn // ClientConn to the mangement server. + serverURI string // URI of the management server. + onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer. + onErrorHandler func(error) // To report underlying stream errors. + onSendHandler OnSendHandlerFunc // To report resources requested on ADS stream. + lrsStore *load.Store // Store returned to user for pushing loads. + backoff func(int) time.Duration // Backoff after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + logger *grpclog.PrefixLogger // Prefix logger for transport logs. + adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. + adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. + lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. // These channels enable synchronization amongst the different goroutines // spawned by the transport, and between asynchorous events resulting from @@ -96,7 +103,7 @@ type Transport struct { lrsRefCount int // Reference count on the load store. } -// UpdateHandlerFunc is the implementation at the xDS data model layer, which +// OnRecvHandlerFunc is the implementation at the xDS data model layer, which // determines if the configuration received from the management server can be // applied locally or not. // @@ -105,7 +112,11 @@ type Transport struct { // cause the transport layer to send an ACK to the management server. A non-nil // error is returned from this function when the data model layer believes // otherwise, and this will cause the transport layer to send a NACK. -type UpdateHandlerFunc func(update ResourceUpdate) error +type OnRecvHandlerFunc func(update ResourceUpdate) error + +// OnSendHandlerFunc is the implementation at the authority, which handles state +// changes for the resource watch and stop watch timers accordingly. +type OnSendHandlerFunc func(update *ResourceSendInfo) // ResourceUpdate is a representation of the configuration update received from // the management server. It only contains fields which are useful to the data @@ -125,17 +136,27 @@ type Options struct { // ServerCfg contains all the configuration required to connect to the xDS // management server. ServerCfg bootstrap.ServerConfig - // UpdateHandler is the component which makes ACK/NACK decisions based on + // OnRecvHandler is the component which makes ACK/NACK decisions based on // the received resources. // // Invoked inline and implementations must not block. - UpdateHandler UpdateHandlerFunc - // StreamErrorHandler provides a way for the transport layer to report + OnRecvHandler OnRecvHandlerFunc + // OnErrorHandler provides a way for the transport layer to report // underlying stream errors. These can be bubbled all the way up to the user // of the xdsClient. // // Invoked inline and implementations must not block. - StreamErrorHandler func(error) + OnErrorHandler func(error) + // OnSendHandler provides a way for the transport layer to report underlying + // resource requests sent on the stream. However, Send() on the ADS stream will + // return successfully as long as: + // 1. there is enough flow control quota to send the message. + // 2. the message is added to the send buffer. + // However, the connection may fail after the callback is invoked and before + // the message is actually sent on the wire. This is accepted. + // + // Invoked inline and implementations must not block. + OnSendHandler func(*ResourceSendInfo) // Backoff controls the amount of time to backoff before recreating failed // ADS streams. If unspecified, a default exponential backoff implementation // is used. For more details, see: @@ -143,6 +164,9 @@ type Options struct { Backoff func(retries int) time.Duration // Logger does logging with a prefix. Logger *grpclog.PrefixLogger + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node } // For overriding in unit tests. @@ -153,22 +177,19 @@ func New(opts Options) (*Transport, error) { switch { case opts.ServerCfg.ServerURI == "": return nil, errors.New("missing server URI when creating a new transport") - case opts.ServerCfg.Creds == nil: + case opts.ServerCfg.CredsDialOption() == nil: return nil, errors.New("missing credentials when creating a new transport") - case opts.UpdateHandler == nil: - return nil, errors.New("missing update handler when creating a new transport") - case opts.StreamErrorHandler == nil: - return nil, errors.New("missing stream error handler when creating a new transport") - } - - node, ok := opts.ServerCfg.NodeProto.(*v3corepb.Node) - if !ok { - return nil, fmt.Errorf("unexpected type %T for NodeProto, want %T", opts.ServerCfg.NodeProto, &v3corepb.Node{}) + case opts.OnRecvHandler == nil: + return nil, errors.New("missing OnRecv callback handler when creating a new transport") + case opts.OnErrorHandler == nil: + return nil, errors.New("missing OnError callback handler when creating a new transport") + case opts.OnSendHandler == nil: + return nil, errors.New("missing OnSend callback handler when creating a new transport") } // Dial the xDS management with the passed in credentials. dopts := []grpc.DialOption{ - opts.ServerCfg.Creds, + opts.ServerCfg.CredsDialOption(), grpc.WithKeepaliveParams(keepalive.ClientParameters{ // We decided to use these sane defaults in all languages, and // kicked the can down the road as far making these configurable. @@ -187,14 +208,15 @@ func New(opts Options) (*Transport, error) { boff = backoff.DefaultExponential.Backoff } ret := &Transport{ - cc: cc, - serverURI: opts.ServerCfg.ServerURI, - updateHandler: opts.UpdateHandler, - adsStreamErrHandler: opts.StreamErrorHandler, - lrsStore: load.NewStore(), - backoff: boff, - nodeProto: node, - logger: opts.Logger, + cc: cc, + serverURI: opts.ServerCfg.ServerURI, + onRecvHandler: opts.OnRecvHandler, + onErrorHandler: opts.OnErrorHandler, + onSendHandler: opts.OnSendHandler, + lrsStore: load.NewStore(), + backoff: boff, + nodeProto: opts.NodeProto, + logger: opts.Logger, adsStreamCh: make(chan adsStream, 1), adsRequestCh: buffer.NewUnbounded(), @@ -243,42 +265,55 @@ func (t *Transport) SendRequest(url string, resources []string) { func (t *Transport) newAggregatedDiscoveryServiceStream(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { // The transport retries the stream with an exponential backoff whenever the - // stream breaks. But if the channel is broken, we don't want the backoff - // logic to continuously retry the stream. Setting WaitForReady() blocks the - // stream creation until the channel is READY. - // - // TODO(easwars): Make changes required to comply with A57: - // https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true)) + // stream breaks without ever having seen a response. + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) } -func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { +// ResourceSendInfo wraps the names and url of resources sent to the management +// server. This is used by the `authority` type to start/stop the watch timer +// associated with every resource in the update. +type ResourceSendInfo struct { + ResourceNames []string + URL string +} + +func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, sendNodeProto bool, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { req := &v3discoverypb.DiscoveryRequest{ - Node: t.nodeProto, TypeUrl: resourceURL, ResourceNames: resourceNames, VersionInfo: version, ResponseNonce: nonce, } + if sendNodeProto { + req.Node = t.nodeProto + } if nackErr != nil { req.ErrorDetail = &statuspb.Status{ Code: int32(codes.InvalidArgument), Message: nackErr.Error(), } } if err := stream.Send(req); err != nil { - return fmt.Errorf("sending ADS request %s failed: %v", pretty.ToJSON(req), err) + return err } - t.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) + } else { + t.logger.Debugf("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + } + t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) return nil } func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { resp, err := stream.Recv() if err != nil { - return nil, "", "", "", fmt.Errorf("failed to read ADS response: %v", err) + return nil, "", "", "", err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) + } else { + t.logger.Debugf("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) } - t.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - t.logger.Debugf("ADS response received: %v", pretty.ToJSON(resp)) return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil } @@ -290,9 +325,6 @@ func (t *Transport) adsRunner(ctx context.Context) { go t.send(ctx) - // TODO: start a goroutine monitoring ClientConn's connectivity state, and - // report error (and log) when stats is transient failure. - backoffAttempt := 0 backoffTimer := time.NewTimer(0) for ctx.Err() == nil { @@ -308,8 +340,8 @@ func (t *Transport) adsRunner(ctx context.Context) { resetBackoff := func() bool { stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) if err != nil { - t.adsStreamErrHandler(err) - t.logger.Warningf("ADS stream creation failed: %v", err) + t.onErrorHandler(err) + t.logger.Warningf("Creating new ADS stream failed: %v", err) return false } t.logger.Infof("ADS stream created") @@ -342,17 +374,37 @@ func (t *Transport) adsRunner(ctx context.Context) { // there are new streams) and the appropriate request is sent out. func (t *Transport) send(ctx context.Context) { var stream adsStream + // The xDS protocol only requires that we send the node proto in the first + // discovery request on every stream. Sending the node proto in every + // request message wastes CPU resources on the client and the server. + sendNodeProto := true for { select { case <-ctx.Done(): return case stream = <-t.adsStreamCh: + // We have a new stream and we've to ensure that the node proto gets + // sent out in the first request on the stream. At this point, we + // might not have any registered watches. Setting this field to true + // here will ensure that the node proto gets sent out along with the + // discovery request when the first watch is registered. + if len(t.resources) == 0 { + sendNodeProto = true + continue + } + if !t.sendExisting(stream) { // Send failed, clear the current stream. Attempt to resend will // only be made after a new stream is created. stream = nil + continue + } + sendNodeProto = false + case u, ok := <-t.adsRequestCh.Get(): + if !ok { + // No requests will be sent after the adsRequestCh buffer is closed. + return } - case u := <-t.adsRequestCh.Get(): t.adsRequestCh.Load() var ( @@ -378,11 +430,12 @@ func (t *Transport) send(ctx context.Context) { // sending response back). continue } - if err := t.sendAggregatedDiscoveryServiceRequest(stream, resources, url, version, nonce, nackErr); err != nil { - t.logger.Warningf("ADS request for {resources: %q, url: %v, version: %q, nonce: %q} failed: %v", resources, url, version, nonce, err) + if err := t.sendAggregatedDiscoveryServiceRequest(stream, sendNodeProto, resources, url, version, nonce, nackErr); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) // Send failed, clear the current stream. stream = nil } + sendNodeProto = false } } } @@ -410,11 +463,14 @@ func (t *Transport) sendExisting(stream adsStream) bool { // seen by the client on the previous stream t.nonces = make(map[string]string) + // Send node proto only in the first request on the stream. + sendNodeProto := true for url, resources := range t.resources { - if err := t.sendAggregatedDiscoveryServiceRequest(stream, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { - t.logger.Warningf("ADS request failed: %v", err) + if err := t.sendAggregatedDiscoveryServiceRequest(stream, sendNodeProto, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) return false } + sendNodeProto = false } return true @@ -428,13 +484,22 @@ func (t *Transport) recv(stream adsStream) bool { for { resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) if err != nil { - t.adsStreamErrHandler(err) - t.logger.Warningf("ADS stream is closed with error: %v", err) + // Note that we do not consider it an error if the ADS stream was closed + // after having received a response on the stream. This is because there + // are legitimate reasons why the server may need to close the stream during + // normal operations, such as needing to rebalance load or the underlying + // connection hitting its max connection age limit. + // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). + if msgReceived { + err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) + } + t.onErrorHandler(err) + t.logger.Warningf("ADS stream closed: %v", err) return msgReceived } msgReceived = true - err = t.updateHandler(ResourceUpdate{ + err = t.onRecvHandler(ResourceUpdate{ Resources: resources, URL: url, Version: rVersion, @@ -456,7 +521,7 @@ func (t *Transport) recv(stream adsStream) bool { nackErr: err, }) t.mu.Unlock() - t.logger.Warningf("Sending NACK for resource type: %v, version: %v, nonce: %v, reason: %v", url, rVersion, nonce, err) + t.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, rVersion, nonce, err) continue } t.adsRequestCh.Put(&ackRequest{ @@ -465,7 +530,7 @@ func (t *Transport) recv(stream adsStream) bool { stream: stream, version: rVersion, }) - t.logger.Infof("Sending ACK for resource type: %v, version: %v, nonce: %v", url, rVersion, nonce) + t.logger.Debugf("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) } } @@ -560,6 +625,7 @@ func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) func (t *Transport) Close() { t.adsRunnerCancel() <-t.adsRunnerDoneCh + t.adsRequestCh.Close() t.cc.Close() } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go new file mode 100644 index 0000000000..c5d5afe4eb --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go @@ -0,0 +1,234 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package converter provides converters to convert proto load balancing +// configuration, defined by the xDS API spec, to JSON load balancing +// configuration. These converters are registered by proto type in a registry, +// which gets pulled from based off proto type passed in. +package converter + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/envconfig" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" + v3pickfirstpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + structpb "github.com/golang/protobuf/ptypes/struct" +) + +func init() { + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin", convertWeightedRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash", convertRingHashProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst", convertPickFirstProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin", convertRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality", convertWRRLocalityProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/udpa.type.v1.TypedStruct", convertV1TypedStructToServiceConfig) + xdslbregistry.Register("type.googleapis.com/xds.type.v3.TypedStruct", convertV3TypedStructToServiceConfig) +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M +) + +func convertRingHashProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + if !envconfig.XDSRingHash { + return nil, nil + } + rhProto := &v3ringhashpb.RingHash{} + if err := proto.Unmarshal(rawProto, rhProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + if rhProto.GetHashFunction() != v3ringhashpb.RingHash_XX_HASH { + return nil, fmt.Errorf("unsupported ring_hash hash function %v", rhProto.GetHashFunction()) + } + + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhProto.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := rhProto.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + rhCfg := &ringhash.LBConfig{ + MinRingSize: minSize, + MaxRingSize: maxSize, + } + + rhCfgJSON, err := json.Marshal(rhCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", rhCfg, err) + } + return makeBalancerConfigJSON(ringhash.Name, rhCfgJSON), nil +} + +type pfConfig struct { + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func convertPickFirstProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + if !envconfig.PickFirstLBConfig { + return nil, nil + } + pfProto := &v3pickfirstpb.PickFirst{} + if err := proto.Unmarshal(rawProto, pfProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + pfCfg := &pfConfig{ShuffleAddressList: pfProto.GetShuffleAddressList()} + js, err := json.Marshal(pfCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", pfCfg, err) + } + return makeBalancerConfigJSON(grpc.PickFirstBalancerName, js), nil +} + +func convertRoundRobinProtoToServiceConfig([]byte, int) (json.RawMessage, error) { + return makeBalancerConfigJSON(roundrobin.Name, json.RawMessage("{}")), nil +} + +type wrrLocalityLBConfig struct { + ChildPolicy json.RawMessage `json:"childPolicy,omitempty"` +} + +func convertWRRLocalityProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + wrrlProto := &v3wrrlocalitypb.WrrLocality{} + if err := proto.Unmarshal(rawProto, wrrlProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + epJSON, err := xdslbregistry.ConvertToServiceConfig(wrrlProto.GetEndpointPickingPolicy(), depth+1) + if err != nil { + return nil, fmt.Errorf("error converting endpoint picking policy: %v for %+v", err, wrrlProto) + } + wrrLCfg := wrrLocalityLBConfig{ + ChildPolicy: epJSON, + } + + lbCfgJSON, err := json.Marshal(wrrLCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLCfg, err) + } + return makeBalancerConfigJSON(wrrlocality.Name, lbCfgJSON), nil +} + +func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + cswrrProto := &v3clientsideweightedroundrobinpb.ClientSideWeightedRoundRobin{} + if err := proto.Unmarshal(rawProto, cswrrProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + wrrLBCfg := &wrrLBConfig{} + // Only set fields if specified in proto. If not set, ParseConfig of the WRR + // will populate the config with defaults. + if enableOOBLoadReportCfg := cswrrProto.GetEnableOobLoadReport(); enableOOBLoadReportCfg != nil { + wrrLBCfg.EnableOOBLoadReport = enableOOBLoadReportCfg.GetValue() + } + if oobReportingPeriodCfg := cswrrProto.GetOobReportingPeriod(); oobReportingPeriodCfg != nil { + wrrLBCfg.OOBReportingPeriod = internalserviceconfig.Duration(oobReportingPeriodCfg.AsDuration()) + } + if blackoutPeriodCfg := cswrrProto.GetBlackoutPeriod(); blackoutPeriodCfg != nil { + wrrLBCfg.BlackoutPeriod = internalserviceconfig.Duration(blackoutPeriodCfg.AsDuration()) + } + if weightExpirationPeriodCfg := cswrrProto.GetBlackoutPeriod(); weightExpirationPeriodCfg != nil { + wrrLBCfg.WeightExpirationPeriod = internalserviceconfig.Duration(weightExpirationPeriodCfg.AsDuration()) + } + if weightUpdatePeriodCfg := cswrrProto.GetWeightUpdatePeriod(); weightUpdatePeriodCfg != nil { + wrrLBCfg.WeightUpdatePeriod = internalserviceconfig.Duration(weightUpdatePeriodCfg.AsDuration()) + } + if errorUtilizationPenaltyCfg := cswrrProto.GetErrorUtilizationPenalty(); errorUtilizationPenaltyCfg != nil { + wrrLBCfg.ErrorUtilizationPenalty = float64(errorUtilizationPenaltyCfg.GetValue()) + } + + lbCfgJSON, err := json.Marshal(wrrLBCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLBCfg, err) + } + return makeBalancerConfigJSON(weightedroundrobin.Name, lbCfgJSON), nil +} + +func convertV1TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + tsProto := &v1xdsudpatypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +func convertV3TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + tsProto := &v3xdsxdstypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +// convertCustomPolicy attempts to prepare json configuration for a custom lb +// proto, which specifies the gRPC balancer type and configuration. Returns the +// converted json and an error which should cause caller to error if error +// converting. If both json and error returned are nil, it means the gRPC +// Balancer registry does not contain that balancer type, and the caller should +// continue to the next policy. +func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + pos := strings.LastIndex(typeURL, "/") + name := typeURL[pos+1:] + + if balancer.Get(name) == nil { + return nil, nil + } + + rawJSON, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) + } + + // The Struct contained in the TypedStruct will be returned as-is as the + // configuration JSON object. + return makeBalancerConfigJSON(name, rawJSON), nil +} + +type wrrLBConfig struct { + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod internalserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod internalserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod internalserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod internalserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} + +func makeBalancerConfigJSON(name string, value json.RawMessage) []byte { + return []byte(fmt.Sprintf(`[{%q: %s}]`, name, value)) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go new file mode 100644 index 0000000000..0f3d1df4db --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdslbregistry provides a registry of converters that convert proto +// from load balancing configuration, defined by the xDS API spec, to JSON load +// balancing configuration. +package xdslbregistry + +import ( + "encoding/json" + "fmt" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" +) + +var ( + // m is a map from proto type to Converter. + m = make(map[string]Converter) +) + +// Register registers the converter to the map keyed on a proto type. Must be +// called at init time. Not thread safe. +func Register(protoType string, c Converter) { + m[protoType] = c +} + +// SetRegistry sets the xDS LB registry. Must be called at init time. Not thread +// safe. +func SetRegistry(registry map[string]Converter) { + m = registry +} + +// Converter converts raw proto bytes into the internal Go JSON representation +// of the proto passed. Returns the json message, and an error. If both +// returned are nil, it represents continuing to the next proto. +type Converter func([]byte, int) (json.RawMessage, error) + +// ConvertToServiceConfig converts a proto Load Balancing Policy configuration +// into a json string. Returns an error if: +// - no supported policy found +// - there is more than 16 layers of recursion in the configuration +// - a failure occurs when converting the policy +func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { + // "Configurations that require more than 16 levels of recursion are + // considered invalid and should result in a NACK response." - A51 + if depth > 15 { + return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) + } + + // "This function iterate over the list of policy messages in + // LoadBalancingPolicy, attempting to convert each one to gRPC form, + // stopping at the first supported policy." - A52 + for _, policy := range lbPolicy.GetPolicies() { + policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl() + converter := m[policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl()] + // "Any entry not in the above list is unsupported and will be skipped." + // - A52 + // This includes Least Request as well, since grpc-go does not support + // the Least Request Load Balancing Policy. + if converter == nil { + continue + } + json, err := converter(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), depth) + if json == nil && err == nil { + continue + } + return json, err + } + return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) +} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index 87e6dbd119..183801c1c6 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -19,20 +19,26 @@ package xdsresource import ( "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) +const ( + // ClusterResourceTypeName represents the transport agnostic name for the + // cluster resource. + ClusterResourceTypeName = "ClusterResource" +) + var ( // Compile time interface checks. - _ Type = clusterResourceType{} - _ ResourceData = &ClusterResourceData{} + _ Type = clusterResourceType{} // Singleton instantiation of the resource type implementation. clusterType = clusterResourceType{ resourceTypeState: resourceTypeState{ - typeURL: "type.googleapis.com/envoy.config.cluster.v3.Cluster", - typeEnum: ClusterResource, + typeURL: version.V3ClusterURL, + typeName: ClusterResourceTypeName, allResourcesRequiredInSotW: true, }, } @@ -49,7 +55,7 @@ type clusterResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (clusterResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, cluster, err := unmarshalClusterResource(resource, opts.Logger) + name, cluster, err := unmarshalClusterResource(resource) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index dc1c09da08..775a8aa194 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -19,20 +19,26 @@ package xdsresource import ( "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) +const ( + // EndpointsResourceTypeName represents the transport agnostic name for the + // endpoint resource. + EndpointsResourceTypeName = "EndpointsResource" +) + var ( // Compile time interface checks. - _ Type = endpointsResourceType{} - _ ResourceData = &EndpointsResourceData{} + _ Type = endpointsResourceType{} // Singleton instantiation of the resource type implementation. endpointsType = endpointsResourceType{ resourceTypeState: resourceTypeState{ - typeURL: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", - typeEnum: EndpointsResource, + typeURL: version.V3EndpointsURL, + typeName: "EndpointsResource", allResourcesRequiredInSotW: false, }, } @@ -49,7 +55,7 @@ type endpointsResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (endpointsResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, rc, err := unmarshalEndpointsResource(resource, opts.Logger) + name, rc, err := unmarshalEndpointsResource(resource) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go index 2d1b179db1..00ef931048 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go @@ -37,6 +37,9 @@ const ( // ErrorTypeResourceTypeUnsupported indicates the receipt of a message from // the management server with resources of an unsupported resource type. ErrorTypeResourceTypeUnsupported + // ErrTypeStreamFailedAfterRecv indicates an ADS stream error, after + // successful receipt of at least one message from the server. + ErrTypeStreamFailedAfterRecv ) type xdsClientError struct { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go index 20cd408795..0390412fdc 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -28,7 +28,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -177,7 +176,6 @@ const ( // 7. Source IP address. // 8. Source port. type FilterChainManager struct { - logger *grpclog.PrefixLogger // Destination prefix is the first match criteria that we support. // Therefore, this multi-stage map is indexed on destination prefixes // specified in the match criteria. @@ -248,10 +246,9 @@ type sourcePrefixEntry struct { // // This function is only exported so that tests outside of this package can // create a FilterChainManager. -func NewFilterChainManager(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (*FilterChainManager, error) { +func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { // Parse all the filter chains and build the internal data structures. fci := &FilterChainManager{ - logger: logger, dstPrefixMap: make(map[string]*destPrefixEntry), RouteConfigNames: make(map[string]bool), } @@ -305,7 +302,7 @@ func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) if fcm.GetDestinationPort().GetValue() != 0 { // Destination port is the first match criteria and we do not // support filter chains which contains this match criteria. - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) continue } @@ -354,7 +351,7 @@ func (fci *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefi // Filter chains specifying server names in their match criteria always fail // a match at connection time. So, these filter chains can be dropped now. if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) return nil } @@ -367,13 +364,13 @@ func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *de case tp != "" && tp != "raw_buffer": // Only allow filter chains with transport protocol set to empty string // or "raw_buffer". - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) return nil case tp == "" && dstEntry.rawBufferSeen: // If we have already seen filter chains with transport protocol set to // "raw_buffer", we can drop filter chains with transport protocol set // to empty string, since the former takes precedence. - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) return nil case tp != "" && !dstEntry.rawBufferSeen: // This is the first "raw_buffer" that we are seeing. Set the bit and @@ -387,7 +384,7 @@ func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *de func (fci *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) return nil } return fci.addFilterChainsForSourceType(dstEntry, fc) @@ -652,7 +649,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) // server-side." - A36 // Can specify v3 here, as will never get to this function // if v2. - routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig(), nil, false) + routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig()) if err != nil { return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 6b2fff9f6f..0aff941389 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -22,20 +22,26 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) +const ( + // ListenerResourceTypeName represents the transport agnostic name for the + // listener resource. + ListenerResourceTypeName = "ListenerResource" +) + var ( // Compile time interface checks. - _ Type = listenerResourceType{} - _ ResourceData = &ListenerResourceData{} + _ Type = listenerResourceType{} // Singleton instantiation of the resource type implementation. listenerType = listenerResourceType{ resourceTypeState: resourceTypeState{ - typeURL: "type.googleapis.com/envoy.config.listener.v3.Listener", - typeEnum: ListenerResource, + typeURL: version.V3ListenerURL, + typeName: ListenerResourceTypeName, allResourcesRequiredInSotW: true, }, } @@ -81,7 +87,7 @@ func listenerValidator(bc *bootstrap.Config, lis ListenerUpdate) error { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (listenerResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, listener, err := unmarshalListenerResource(resource, opts.Logger) + name, listener, err := unmarshalListenerResource(resource) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go new file mode 100644 index 0000000000..62bcb016ba --- /dev/null +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsresource + +import ( + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-resource] " + +var logger = internalgrpclog.NewPrefixLogger(grpclog.Component("xds"), prefix) diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go index 6a056235f3..77aa85b68e 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go @@ -59,6 +59,8 @@ func RouteToMatcher(r *Route) (*CompositeMatcher, error) { matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End, invert) case h.PresentMatch != nil: matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch, invert) + case h.StringMatch != nil: + matcherT = matcher.NewHeaderStringMatcher(h.Name, *h.StringMatch, invert) default: return nil, fmt.Errorf("illegal route: missing header_match_specifier") } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go index 6fced7784d..f67f0ea153 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -15,14 +15,30 @@ * limitations under the License. */ +// Package xdsresource implements the xDS data model layer. +// +// Provides resource-type specific functionality to unmarshal xDS protos into +// internal data structures that contain only fields gRPC is interested in. +// These internal data structures are passed to components in the xDS stack +// (resolver/balancers/server) that have expressed interest in receiving +// updates to specific resources. package xdsresource import ( - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) +func init() { + internal.ResourceTypeMapForTesting = make(map[string]interface{}) + internal.ResourceTypeMapForTesting[version.V3ListenerURL] = listenerType + internal.ResourceTypeMapForTesting[version.V3RouteConfigURL] = routeConfigType + internal.ResourceTypeMapForTesting[version.V3ClusterURL] = clusterType + internal.ResourceTypeMapForTesting[version.V3EndpointsURL] = endpointsType +} + // Producer contains a single method to discover resource configuration from a // remote management server using xDS APIs. // @@ -68,14 +84,14 @@ type Type interface { // TypeURL is the xDS type URL of this resource type for v3 transport. TypeURL() string - // TypeEnum is an enumerated value for this resource type. This can be used - // for logging/debugging purposes, as well in cases where the resource type - // is to be uniquely identified but the actual functionality provided by the - // resource type is not required. + // TypeName identifies resources in a transport protocol agnostic way. This + // can be used for logging/debugging purposes, as well in cases where the + // resource type name is to be uniquely identified but the actual + // functionality provided by the resource type is not required. // - // TODO: once Type is renamed to ResourceType, rename ResourceType to - // ResourceTypeEnum. - TypeEnum() ResourceType + // TODO: once Type is renamed to ResourceType, rename TypeName to + // ResourceTypeName. + TypeName() string // AllResourcesRequiredInSotW indicates whether this resource type requires // that all resources be present in every SotW response from the server. If @@ -115,8 +131,6 @@ type DecodeOptions struct { // BootstrapConfig contains the bootstrap configuration passed to the // top-level xdsClient. This contains useful data for resource validation. BootstrapConfig *bootstrap.Config - // Logger is to be used for emitting logs during the Decode operation. - Logger *grpclog.PrefixLogger } // DecodeResult is the result of a decode operation. @@ -133,7 +147,7 @@ type DecodeResult struct { // implemented here for free. type resourceTypeState struct { typeURL string - typeEnum ResourceType + typeName string allResourcesRequiredInSotW bool } @@ -141,8 +155,8 @@ func (r resourceTypeState) TypeURL() string { return r.typeURL } -func (r resourceTypeState) TypeEnum() ResourceType { - return r.typeEnum +func (r resourceTypeState) TypeName() string { + return r.typeName } func (r resourceTypeState) AllResourcesRequiredInSotW() bool { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index 31be4d6aeb..8ce5cb2859 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -19,20 +19,26 @@ package xdsresource import ( "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) +const ( + // RouteConfigTypeName represents the transport agnostic name for the + // route config resource. + RouteConfigTypeName = "RouteConfigResource" +) + var ( // Compile time interface checks. - _ Type = routeConfigResourceType{} - _ ResourceData = &RouteConfigResourceData{} + _ Type = routeConfigResourceType{} // Singleton instantiation of the resource type implementation. routeConfigType = routeConfigResourceType{ resourceTypeState: resourceTypeState{ - typeURL: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", - typeEnum: RouteConfigResource, + typeURL: version.V3RouteConfigURL, + typeName: "RouteConfigResource", allResourcesRequiredInSotW: false, }, } @@ -49,7 +55,7 @@ type routeConfigResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (routeConfigResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, rc, err := unmarshalRouteConfigResource(resource, opts.Logger) + name, rc, err := unmarshalRouteConfigResource(resource) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go index d9c78997cf..0fb3f274ed 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go @@ -51,38 +51,38 @@ type UpdateMetadata struct { // IsListenerResource returns true if the provider URL corresponds to an xDS // Listener resource. func IsListenerResource(url string) bool { - return url == version.V2ListenerURL || url == version.V3ListenerURL + return url == version.V3ListenerURL } // IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS // HTTPConnManager resource. func IsHTTPConnManagerResource(url string) bool { - return url == version.V2HTTPConnManagerURL || url == version.V3HTTPConnManagerURL + return url == version.V3HTTPConnManagerURL } // IsRouteConfigResource returns true if the provider URL corresponds to an xDS // RouteConfig resource. func IsRouteConfigResource(url string) bool { - return url == version.V2RouteConfigURL || url == version.V3RouteConfigURL + return url == version.V3RouteConfigURL } // IsClusterResource returns true if the provider URL corresponds to an xDS // Cluster resource. func IsClusterResource(url string) bool { - return url == version.V2ClusterURL || url == version.V3ClusterURL + return url == version.V3ClusterURL } // IsEndpointsResource returns true if the provider URL corresponds to an xDS // Endpoints resource. func IsEndpointsResource(url string) bool { - return url == version.V2EndpointsURL || url == version.V3EndpointsURL + return url == version.V3EndpointsURL } -// unwrapResource unwraps and returns the inner resource if it's in a resource +// UnwrapResource unwraps and returns the inner resource if it's in a resource // wrapper. The original resource is returned if it's not wrapped. -func unwrapResource(r *anypb.Any) (*anypb.Any, error) { +func UnwrapResource(r *anypb.Any) (*anypb.Any, error) { url := r.GetTypeUrl() - if url != version.V2ResourceWrapperURL && url != version.V3ResourceWrapperURL { + if url != version.V3ResourceWrapperURL { // Not wrapped. return r, nil } @@ -133,89 +133,3 @@ type UpdateWithMD struct { MD UpdateMetadata Raw *anypb.Any } - -// ResourceType identifies resources in a transport protocol agnostic way. These -// will be used in transport version agnostic code, while the versioned API -// clients will map these to appropriate version URLs. -type ResourceType int - -// Version agnostic resource type constants. -const ( - UnknownResource ResourceType = iota - ListenerResource - HTTPConnManagerResource - RouteConfigResource - ClusterResource - EndpointsResource -) - -func (r ResourceType) String() string { - switch r { - case ListenerResource: - return "ListenerResource" - case HTTPConnManagerResource: - return "HTTPConnManagerResource" - case RouteConfigResource: - return "RouteConfigResource" - case ClusterResource: - return "ClusterResource" - case EndpointsResource: - return "EndpointsResource" - default: - return "UnknownResource" - } -} - -var v2ResourceTypeToURL = map[ResourceType]string{ - ListenerResource: version.V2ListenerURL, - HTTPConnManagerResource: version.V2HTTPConnManagerURL, - RouteConfigResource: version.V2RouteConfigURL, - ClusterResource: version.V2ClusterURL, - EndpointsResource: version.V2EndpointsURL, -} -var v3ResourceTypeToURL = map[ResourceType]string{ - ListenerResource: version.V3ListenerURL, - HTTPConnManagerResource: version.V3HTTPConnManagerURL, - RouteConfigResource: version.V3RouteConfigURL, - ClusterResource: version.V3ClusterURL, - EndpointsResource: version.V3EndpointsURL, -} - -// URL returns the transport protocol specific resource type URL. -func (r ResourceType) URL(v version.TransportAPI) string { - var mapping map[ResourceType]string - switch v { - case version.TransportV2: - mapping = v2ResourceTypeToURL - case version.TransportV3: - mapping = v3ResourceTypeToURL - default: - return "UnknownResource" - } - if url, ok := mapping[r]; ok { - return url - } - return "UnknownResource" -} - -var urlToResourceType = map[string]ResourceType{ - version.V2ListenerURL: ListenerResource, - version.V2RouteConfigURL: RouteConfigResource, - version.V2ClusterURL: ClusterResource, - version.V2EndpointsURL: EndpointsResource, - version.V2HTTPConnManagerURL: HTTPConnManagerResource, - version.V3ListenerURL: ListenerResource, - version.V3RouteConfigURL: RouteConfigResource, - version.V3ClusterURL: ClusterResource, - version.V3EndpointsURL: EndpointsResource, - version.V3HTTPConnManagerURL: HTTPConnManagerResource, -} - -// ResourceTypeFromURL returns the xDS resource type associated with the given -// resource type URL. -func ResourceTypeFromURL(url string) ResourceType { - if typ, ok := urlToResourceType[url]; ok { - return typ - } - return UnknownResource -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go index d459717acd..269d9ebdae 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go @@ -18,7 +18,7 @@ package xdsresource import ( - "time" + "encoding/json" "google.golang.org/protobuf/types/known/anypb" ) @@ -51,78 +51,6 @@ const ( ClusterLRSServerSelf ) -// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its -// config. -type ClusterLBPolicyRingHash struct { - MinimumRingSize uint64 - MaximumRingSize uint64 -} - -// OutlierDetection is the outlier detection configuration for a cluster. -type OutlierDetection struct { - // Interval is the time interval between ejection analysis sweeps. This can - // result in both new ejections as well as addresses being returned to - // service. Defaults to 10s. - Interval time.Duration - // BaseEjectionTime is the base time that a host is ejected for. The real - // time is equal to the base time multiplied by the number of times the host - // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. - BaseEjectionTime time.Duration - // MaxEjectionTime is the maximum time that an address is ejected for. If - // not specified, the default value (300s) or the BaseEjectionTime value is - // applied, whichever is larger. - MaxEjectionTime time.Duration - // MaxEjectionPercent is the maximum % of an upstream cluster that can be - // ejected due to outlier detection. Defaults to 10% but will eject at least - // one host regardless of the value. - MaxEjectionPercent uint32 - // SuccessRateStdevFactor is used to determine the ejection threshold for - // success rate outlier ejection. The ejection threshold is the difference - // between the mean success rate, and the product of this factor and the - // standard deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - SuccessRateStdevFactor uint32 - // EnforcingSuccessRate is the % chance that a host will be actually ejected - // when an outlier status is detected through success rate statistics. This - // setting can be used to disable ejection or to ramp it up slowly. Defaults - // to 100. - EnforcingSuccessRate uint32 - // SuccessRateMinimumHosts is the number of hosts in a cluster that must - // have enough request volume to detect success rate outliers. If the number - // of hosts is less than this setting, outlier detection via success rate - // statistics is not performed for any host in the cluster. Defaults to 5. - SuccessRateMinimumHosts uint32 - // SuccessRateRequestVolume is the minimum number of total requests that - // must be collected in one interval (as defined by the interval duration - // above) to include this host in success rate based outlier detection. If - // the volume is lower than this setting, outlier detection via success rate - // statistics is not performed for that host. Defaults to 100. - SuccessRateRequestVolume uint32 - // FailurePercentageThreshold is the failure percentage to use when - // determining failure percentage-based outlier detection. If the failure - // percentage of a given host is greater than or equal to this value, it - // will be ejected. Defaults to 85. - FailurePercentageThreshold uint32 - // EnforcingFailurePercentage is the % chance that a host will be actually - // ejected when an outlier status is detected through failure percentage - // statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 0. - EnforcingFailurePercentage uint32 - // FailurePercentageMinimumHosts is the minimum number of hosts in a cluster - // in order to perform failure percentage-based ejection. If the total - // number of hosts in the cluster is less than this value, failure - // percentage-based ejection will not be performed. Defaults to 5. - FailurePercentageMinimumHosts uint32 - // FailurePercentageRequestVolume is the minimum number of total requests - // that must be collected in one interval (as defined by the interval - // duration above) to perform failure percentage-based ejection for this - // host. If the volume is lower than this setting, failure percentage-based - // ejection will not be performed for this host. Defaults to 50. - FailurePercentageRequestVolume uint32 -} - // ClusterUpdate contains information from a received CDS response, which is of // interest to the registered CDS watcher. type ClusterUpdate struct { @@ -147,19 +75,13 @@ type ClusterUpdate struct { // a prioritized list of cluster names. PrioritizedClusterNames []string - // LBPolicy is the lb policy for this cluster. - // - // This only support round_robin and ring_hash. - // - if it's nil, the lb policy is round_robin - // - if it's not nil, the lb policy is ring_hash, the this field has the config. - // - // When we add more support policies, this can be made an interface, and - // will be set to different types based on the policy type. - LBPolicy *ClusterLBPolicyRingHash + // LBPolicy represents the locality and endpoint picking policy in JSON, + // which will be the child policy of xds_cluster_impl. + LBPolicy json.RawMessage // OutlierDetection is the outlier detection configuration for this cluster. // If nil, it means this cluster does not use the outlier detection feature. - OutlierDetection *OutlierDetection + OutlierDetection json.RawMessage // Raw is the resource from the xds response. Raw *anypb.Any diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go index 0504346c39..ad59209163 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go @@ -171,6 +171,7 @@ type HeaderMatcher struct { SuffixMatch *string RangeMatch *Int64Range PresentMatch *bool + StringMatch *matcher.StringMatcher } // Int64Range is a range for header range match. diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal.go deleted file mode 100644 index 28ae41e43a..0000000000 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package xdsresource contains functions to proto xds updates (unmarshal from -// proto), and types for the resource updates. -package xdsresource - -import ( - "errors" - "fmt" - "strings" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/protobuf/types/known/anypb" -) - -// UnmarshalOptions wraps the input parameters for `UnmarshalXxx` functions. -type UnmarshalOptions struct { - // Version is the version of the received response. - Version string - // Resources are the xDS resources resources in the received response. - Resources []*anypb.Any - // Logger is the prefix logger to be used during unmarshaling. - Logger *grpclog.PrefixLogger - // UpdateValidator is a post unmarshal validation check provided by the - // upper layer. - UpdateValidator UpdateValidatorFunc -} - -// CombineErrors TBD. -func CombineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { - var errStrB strings.Builder - errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) - if len(topLevelErrors) > 0 { - errStrB.WriteString("top level errors: ") - for i, err := range topLevelErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - errStrB.WriteString(err.Error()) - } - } - if len(perResourceErrors) > 0 { - var i int - for name, err := range perResourceErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - i++ - errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) - } - } - return errors.New(errStrB.String()) -} diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index f04939182b..9f8530111a 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -18,6 +18,7 @@ package xdsresource import ( + "encoding/json" "errors" "fmt" "net" @@ -29,20 +30,26 @@ import ( v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) +// ValidateClusterAndConstructClusterUpdateForTesting exports the +// validateClusterAndConstructClusterUpdate function for testing purposes. +var ValidateClusterAndConstructClusterUpdateForTesting = validateClusterAndConstructClusterUpdate + // TransportSocket proto message has a `name` field which is expected to be set // to this value by the management server. const transportSocketName = "envoy.transport_sockets.tls" -func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { - r, err := unwrapResource(r) +func unmarshalClusterResource(r *anypb.Any) (string, ClusterUpdate, error) { + r, err := UnwrapResource(r) if err != nil { return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) } @@ -55,7 +62,6 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, pretty.ToJSON(cluster)) cu, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { return cluster.GetName(), ClusterUpdate{}, err @@ -72,10 +78,11 @@ const ( ) func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - var lbPolicy *ClusterLBPolicyRingHash + var lbPolicy json.RawMessage + var err error switch cluster.GetLbPolicy() { case v3clusterpb.Cluster_ROUND_ROBIN: - lbPolicy = nil // The default is round_robin, and there's no config to set. + lbPolicy = []byte(`[{"xds_wrr_locality_experimental": {"childPolicy": [{"round_robin": {}}]}}]`) case v3clusterpb.Cluster_RING_HASH: if !envconfig.XDSRingHash { return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) @@ -88,25 +95,17 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // defaults to 8M entries, and limited to 8M entries var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize if min := rhc.GetMinimumRingSize(); min != nil { - if min.GetValue() > ringHashSizeUpperBound { - return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash mininum ring size %v in response: %+v", min.GetValue(), cluster) - } minSize = min.GetValue() } if max := rhc.GetMaximumRingSize(); max != nil { - if max.GetValue() > ringHashSizeUpperBound { - return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash maxinum ring size %v in response: %+v", max.GetValue(), cluster) - } maxSize = max.GetValue() } - if minSize > maxSize { - return ClusterUpdate{}, fmt.Errorf("ring_hash config min size %v is greater than max %v", minSize, maxSize) - } - lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} + + rhLBCfg := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) + lbPolicy = []byte(fmt.Sprintf(`[{"ring_hash_experimental": %s}]`, rhLBCfg)) default: return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } - // Process security configuration received from the control plane iff the // corresponding environment variable is set. var sc *SecurityConfig @@ -119,7 +118,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // Process outlier detection received from the control plane iff the // corresponding environment variable is set. - var od *OutlierDetection + var od json.RawMessage if envconfig.XDSOutlierDetection { var err error if od, err = outlierConfigFromCluster(cluster); err != nil { @@ -127,6 +126,20 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu } } + if cluster.GetLoadBalancingPolicy() != nil && envconfig.XDSCustomLBPolicy { + lbPolicy, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy(), 0) + if err != nil { + return ClusterUpdate{}, fmt.Errorf("error converting LoadBalancingPolicy %v in response: %+v: %v", cluster.GetLoadBalancingPolicy(), cluster, err) + } + // "It will be the responsibility of the XdsClient to validate the + // converted configuration. It will do this by having the gRPC LB policy + // registry parse the configuration." - A52 + bc := &iserviceconfig.BalancerConfig{} + if err := json.Unmarshal(lbPolicy, bc); err != nil { + return ClusterUpdate{}, fmt.Errorf("JSON generated from xDS LB policy registry: %s is invalid: %v", pretty.FormatJSON(lbPolicy), err) + } + } + ret := ClusterUpdate{ ClusterName: cluster.GetName(), SecurityCfg: sc, @@ -180,6 +193,9 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } + if len(clusters.Clusters) == 0 { + return ClusterUpdate{}, fmt.Errorf("xds: aggregate cluster has empty clusters field in response: %+v", cluster) + } ret.ClusterType = ClusterTypeAggregate ret.PrioritizedClusterNames = clusters.Clusters return ret, nil @@ -474,59 +490,87 @@ func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { return nil } -// outlierConfigFromCluster extracts the relevant outlier detection -// configuration from the received cluster resource. Returns nil if no -// OutlierDetection field set in the cluster resource. -func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (*OutlierDetection, error) { +// idurationp takes a time.Duration and converts it to an internal duration, and +// returns a pointer to that internal duration. +func idurationp(d time.Duration) *iserviceconfig.Duration { + id := iserviceconfig.Duration(d) + return &id +} + +func uint32p(i uint32) *uint32 { + return &i +} + +// Helper types to prepare Outlier Detection JSON. Pointer types to distinguish +// between unset and a zero value. +type successRateEjection struct { + StdevFactor *uint32 `json:"stdevFactor,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type failurePercentageEjection struct { + Threshold *uint32 `json:"threshold,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type odLBConfig struct { + Interval *iserviceconfig.Duration `json:"interval,omitempty"` + BaseEjectionTime *iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` + MaxEjectionTime *iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` + MaxEjectionPercent *uint32 `json:"maxEjectionPercent,omitempty"` + SuccessRateEjection *successRateEjection `json:"successRateEjection,omitempty"` + FailurePercentageEjection *failurePercentageEjection `json:"failurePercentageEjection,omitempty"` +} + +// outlierConfigFromCluster converts the received Outlier Detection +// configuration into JSON configuration for Outlier Detection, taking into +// account xDS Defaults. Returns nil if no OutlierDetection field set in the +// cluster resource. +func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (json.RawMessage, error) { od := cluster.GetOutlierDetection() if od == nil { return nil, nil } - const ( - defaultInterval = 10 * time.Second - defaultBaseEjectionTime = 30 * time.Second - defaultMaxEjectionTime = 300 * time.Second - defaultMaxEjectionPercent = 10 - defaultSuccessRateStdevFactor = 1900 - defaultEnforcingSuccessRate = 100 - defaultSuccessRateMinimumHosts = 5 - defaultSuccessRateRequestVolume = 100 - defaultFailurePercentageThreshold = 85 - defaultEnforcingFailurePercentage = 0 - defaultFailurePercentageMinimumHosts = 5 - defaultFailurePercentageRequestVolume = 50 - ) + + // "The outlier_detection field of the Cluster resource should have its fields + // validated according to the rules for the corresponding LB policy config + // fields in the above "Validation" section. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 // "The google.protobuf.Duration fields interval, base_ejection_time, and // max_ejection_time must obey the restrictions in the // google.protobuf.Duration documentation and they must have non-negative // values." - A50 - interval := defaultInterval + var interval *iserviceconfig.Duration if i := od.GetInterval(); i != nil { if err := i.CheckValid(); err != nil { return nil, fmt.Errorf("outlier_detection.interval is invalid with error: %v", err) } - if interval = i.AsDuration(); interval < 0 { - return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", interval) + if interval = idurationp(i.AsDuration()); *interval < 0 { + return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", *interval) } } - baseEjectionTime := defaultBaseEjectionTime + var baseEjectionTime *iserviceconfig.Duration if bet := od.GetBaseEjectionTime(); bet != nil { if err := bet.CheckValid(); err != nil { return nil, fmt.Errorf("outlier_detection.base_ejection_time is invalid with error: %v", err) } - if baseEjectionTime = bet.AsDuration(); baseEjectionTime < 0 { - return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", baseEjectionTime) + if baseEjectionTime = idurationp(bet.AsDuration()); *baseEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", *baseEjectionTime) } } - maxEjectionTime := defaultMaxEjectionTime + var maxEjectionTime *iserviceconfig.Duration if met := od.GetMaxEjectionTime(); met != nil { if err := met.CheckValid(); err != nil { return nil, fmt.Errorf("outlier_detection.max_ejection_time is invalid: %v", err) } - if maxEjectionTime = met.AsDuration(); maxEjectionTime < 0 { - return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", maxEjectionTime) + if maxEjectionTime = idurationp(met.AsDuration()); *maxEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", *maxEjectionTime) } } @@ -534,64 +578,91 @@ func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (*OutlierDetection, // failure_percentage_threshold, and enforcing_failure_percentage must have // values less than or equal to 100. If any of these requirements is // violated, the Cluster resource should be NACKed." - A50 - maxEjectionPercent := uint32(defaultMaxEjectionPercent) + var maxEjectionPercent *uint32 if mep := od.GetMaxEjectionPercent(); mep != nil { - if maxEjectionPercent = mep.GetValue(); maxEjectionPercent > 100 { - return nil, fmt.Errorf("outlier_detection.max_ejection_percent = %v; must be <= 100", maxEjectionPercent) + if maxEjectionPercent = uint32p(mep.GetValue()); *maxEjectionPercent > 100 { + return nil, fmt.Errorf("outlier_detection.max_ejection_percent = %v; must be <= 100", *maxEjectionPercent) } } - enforcingSuccessRate := uint32(defaultEnforcingSuccessRate) + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var enforcingSuccessRate *uint32 if esr := od.GetEnforcingSuccessRate(); esr != nil { - if enforcingSuccessRate = esr.GetValue(); enforcingSuccessRate > 100 { - return nil, fmt.Errorf("outlier_detection.enforcing_success_rate = %v; must be <= 100", enforcingSuccessRate) + if enforcingSuccessRate = uint32p(esr.GetValue()); *enforcingSuccessRate > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_success_rate = %v; must be <= 100", *enforcingSuccessRate) } } - failurePercentageThreshold := uint32(defaultFailurePercentageThreshold) + var failurePercentageThreshold *uint32 if fpt := od.GetFailurePercentageThreshold(); fpt != nil { - if failurePercentageThreshold = fpt.GetValue(); failurePercentageThreshold > 100 { - return nil, fmt.Errorf("outlier_detection.failure_percentage_threshold = %v; must be <= 100", failurePercentageThreshold) + if failurePercentageThreshold = uint32p(fpt.GetValue()); *failurePercentageThreshold > 100 { + return nil, fmt.Errorf("outlier_detection.failure_percentage_threshold = %v; must be <= 100", *failurePercentageThreshold) } } - enforcingFailurePercentage := uint32(defaultEnforcingFailurePercentage) + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var enforcingFailurePercentage *uint32 if efp := od.GetEnforcingFailurePercentage(); efp != nil { - if enforcingFailurePercentage = efp.GetValue(); enforcingFailurePercentage > 100 { - return nil, fmt.Errorf("outlier_detection.enforcing_failure_percentage = %v; must be <= 100", enforcingFailurePercentage) + if enforcingFailurePercentage = uint32p(efp.GetValue()); *enforcingFailurePercentage > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_failure_percentage = %v; must be <= 100", *enforcingFailurePercentage) } } - successRateStdevFactor := uint32(defaultSuccessRateStdevFactor) + var successRateStdevFactor *uint32 if srsf := od.GetSuccessRateStdevFactor(); srsf != nil { - successRateStdevFactor = srsf.GetValue() + successRateStdevFactor = uint32p(srsf.GetValue()) } - successRateMinimumHosts := uint32(defaultSuccessRateMinimumHosts) + var successRateMinimumHosts *uint32 if srmh := od.GetSuccessRateMinimumHosts(); srmh != nil { - successRateMinimumHosts = srmh.GetValue() + successRateMinimumHosts = uint32p(srmh.GetValue()) } - successRateRequestVolume := uint32(defaultSuccessRateRequestVolume) + var successRateRequestVolume *uint32 if srrv := od.GetSuccessRateRequestVolume(); srrv != nil { - successRateRequestVolume = srrv.GetValue() + successRateRequestVolume = uint32p(srrv.GetValue()) } - failurePercentageMinimumHosts := uint32(defaultFailurePercentageMinimumHosts) + var failurePercentageMinimumHosts *uint32 if fpmh := od.GetFailurePercentageMinimumHosts(); fpmh != nil { - failurePercentageMinimumHosts = fpmh.GetValue() + failurePercentageMinimumHosts = uint32p(fpmh.GetValue()) } - failurePercentageRequestVolume := uint32(defaultFailurePercentageRequestVolume) + var failurePercentageRequestVolume *uint32 if fprv := od.GetFailurePercentageRequestVolume(); fprv != nil { - failurePercentageRequestVolume = fprv.GetValue() - } - - return &OutlierDetection{ - Interval: interval, - BaseEjectionTime: baseEjectionTime, - MaxEjectionTime: maxEjectionTime, - MaxEjectionPercent: maxEjectionPercent, - EnforcingSuccessRate: enforcingSuccessRate, - FailurePercentageThreshold: failurePercentageThreshold, - EnforcingFailurePercentage: enforcingFailurePercentage, - SuccessRateStdevFactor: successRateStdevFactor, - SuccessRateMinimumHosts: successRateMinimumHosts, - SuccessRateRequestVolume: successRateRequestVolume, - FailurePercentageMinimumHosts: failurePercentageMinimumHosts, - FailurePercentageRequestVolume: failurePercentageRequestVolume, - }, nil + failurePercentageRequestVolume = uint32p(fprv.GetValue()) + } + + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var sre *successRateEjection + if enforcingSuccessRate == nil || *enforcingSuccessRate != 0 { + sre = &successRateEjection{ + StdevFactor: successRateStdevFactor, + EnforcementPercentage: enforcingSuccessRate, + MinimumHosts: successRateMinimumHosts, + RequestVolume: successRateRequestVolume, + } + } + + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var fpe *failurePercentageEjection + if enforcingFailurePercentage != nil && *enforcingFailurePercentage != 0 { + fpe = &failurePercentageEjection{ + Threshold: failurePercentageThreshold, + EnforcementPercentage: enforcingFailurePercentage, + MinimumHosts: failurePercentageMinimumHosts, + RequestVolume: failurePercentageRequestVolume, + } + } + + odLBCfg := &odLBConfig{ + Interval: interval, + BaseEjectionTime: baseEjectionTime, + MaxEjectionTime: maxEjectionTime, + MaxEjectionPercent: maxEjectionPercent, + SuccessRateEjection: sre, + FailurePercentageEjection: fpe, + } + return json.Marshal(odLBCfg) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index a1809a62fc..95333aaf61 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -27,14 +27,13 @@ import ( v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal" "google.golang.org/protobuf/types/known/anypb" ) -func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { - r, err := unwrapResource(r) +func unmarshalEndpointsResource(r *anypb.Any) (string, EndpointsUpdate, error) { + r, err := UnwrapResource(r) if err != nil { return "", EndpointsUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) } @@ -47,9 +46,8 @@ func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (str if err := proto.Unmarshal(r.GetValue(), cla); err != nil { return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) - u, err := parseEDSRespProto(cla, logger) + u, err := parseEDSRespProto(cla) if err != nil { return cla.GetClusterName(), EndpointsUpdate{}, err } @@ -109,7 +107,7 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs return endpoints, nil } -func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.PrefixLogger) (EndpointsUpdate, error) { +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { ret := EndpointsUpdate{} for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) @@ -143,6 +141,17 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.Pr SubZone: l.SubZone, } lidStr, _ := lid.ToString() + + // "Since an xDS configuration can place a given locality under multiple + // priorities, it is possible to see locality weight attributes with + // different values for the same locality." - A52 + // + // This is handled in the client by emitting the locality weight + // specified for the priority it is specified in. If the same locality + // has a different weight in two priorities, each priority will specify + // a locality with the locality weight specified for that priority, and + // thus the subsequent tree of balancers linked to that priority will + // use that locality weight as well. if localitiesWithPriority[lidStr] { return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) } @@ -154,7 +163,7 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.Pr ret.Localities = append(ret.Localities, Locality{ ID: lid, Endpoints: endpoints, - Weight: locality.GetLoadBalancingWeight().GetValue(), + Weight: weight, Priority: priority, }) } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index 6b273e82f9..8f18b02e28 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -22,22 +22,19 @@ import ( "fmt" "strconv" - v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" - v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v1udpaudpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) -func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { - r, err := unwrapResource(r) +func unmarshalListenerResource(r *anypb.Any) (string, ListenerUpdate, error) { + r, err := UnwrapResource(r) if err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) } @@ -45,15 +42,12 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri if !IsListenerResource(r.GetTypeUrl()) { return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2ListenerURL lis := &v3listenerpb.Listener{} if err := proto.Unmarshal(r.GetValue(), lis); err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, pretty.ToJSON(lis)) - lu, err := processListener(lis, logger, v2) + lu, err := processListener(lis) if err != nil { return lis.GetName(), ListenerUpdate{}, err } @@ -61,16 +55,16 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri return lis.GetName(), *lu, nil } -func processListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { +func processListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { if lis.GetApiListener() != nil { - return processClientSideListener(lis, logger, v2) + return processClientSideListener(lis) } - return processServerSideListener(lis, logger) + return processServerSideListener(lis) } // processClientSideListener checks if the provided Listener proto meets // the expected criteria. If so, it returns a non-empty routeConfigName. -func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { +func processClientSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { update := &ListenerUpdate{} apiLisAny := lis.GetApiListener().GetApiListener() @@ -102,7 +96,7 @@ func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi } update.RouteConfigName = name case *v3httppb.HttpConnectionManager_RouteConfig: - routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), logger, v2) + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig()) if err != nil { return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) } @@ -113,10 +107,6 @@ func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) } - if v2 { - return update, nil - } - // The following checks and fields only apply to xDS protocol versions v3+. update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() @@ -131,16 +121,16 @@ func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { switch { - case ptypes.Is(config, &v3cncftypepb.TypedStruct{}): + case ptypes.Is(config, &v3xdsxdstypepb.TypedStruct{}): // The real type name is inside the new TypedStruct message. - s := new(v3cncftypepb.TypedStruct) + s := new(v3xdsxdstypepb.TypedStruct) if err := ptypes.UnmarshalAny(config, s); err != nil { return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) } return s, s.GetTypeUrl(), nil - case ptypes.Is(config, &v1udpatypepb.TypedStruct{}): + case ptypes.Is(config, &v1udpaudpatypepb.TypedStruct{}): // The real type name is inside the old TypedStruct message. - s := new(v1udpatypepb.TypedStruct) + s := new(v1udpaudpatypepb.TypedStruct) if err := ptypes.UnmarshalAny(config, s); err != nil { return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) } @@ -257,7 +247,7 @@ func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilt return ret, nil } -func processServerSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (*ListenerUpdate, error) { +func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { if n := len(lis.ListenerFilters); n != 0 { return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) } @@ -279,7 +269,7 @@ func processServerSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi }, } - fcMgr, err := NewFilterChainManager(lis, logger) + fcMgr, err := NewFilterChainManager(lis) if err != nil { return nil, err } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 819e47d32f..c51a0c24b5 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -19,24 +19,24 @@ package xdsresource import ( "fmt" + "math" "regexp" "strings" "time" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/clusterspecifier" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" ) -func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { - r, err := unwrapResource(r) +func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, error) { + r, err := UnwrapResource(r) if err != nil { return "", RouteConfigUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) } @@ -48,11 +48,8 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s if err := proto.Unmarshal(r.GetValue(), rc); err != nil { return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, pretty.ToJSON(rc)) - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2RouteConfigURL - u, err := generateRDSUpdateFromRouteConfiguration(rc, logger, v2) + u, err := generateRDSUpdateFromRouteConfiguration(rc) if err != nil { return rc.GetName(), RouteConfigUpdate{}, err } @@ -76,7 +73,7 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s // field must be empty and whose route field must be set. Inside that route // message, the cluster field will contain the clusterName or weighted clusters // we are looking for. -func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { +func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration) (RouteConfigUpdate, error) { vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) csps := make(map[string]clusterspecifier.BalancerConfig) if envconfig.XDSRLS { @@ -91,7 +88,7 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l // ignored and not emitted by the xdsclient. var cspNames = make(map[string]bool) for _, vh := range rc.GetVirtualHosts() { - routes, cspNs, err := routesProtoToSlice(vh.Routes, csps, logger, v2) + routes, cspNs, err := routesProtoToSlice(vh.Routes, csps) if err != nil { return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) } @@ -107,13 +104,11 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l Routes: routes, RetryConfig: rc, } - if !v2 { - cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) - } - vhOut.HTTPFilterConfigOverride = cfgs + cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) } + vhOut.HTTPFilterConfigOverride = cfgs vhs = append(vhs, vhOut) } @@ -216,7 +211,7 @@ func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { return cfg, nil } -func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, map[string]bool, error) { +func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig) ([]*Route, map[string]bool, error) { var routesRet []*Route var cspNames = make(map[string]bool) for _, r := range routes { @@ -227,7 +222,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif if len(match.GetQueryParameters()) != 0 { // Ignore route with query parameters. - logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r) + logger.Warningf("Ignoring route %+v with query parameter matchers", r) continue } @@ -280,6 +275,12 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif header.PrefixMatch = &ht.PrefixMatch case *v3routepb.HeaderMatcher_SuffixMatch: header.SuffixMatch = &ht.SuffixMatch + case *v3routepb.HeaderMatcher_StringMatch: + sm, err := matcher.StringMatcherFromProto(ht.StringMatch) + if err != nil { + return nil, nil, fmt.Errorf("route %+v has an invalid string matcher: %v", err, ht.StringMatch) + } + header.StringMatch = &sm default: return nil, nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) } @@ -309,7 +310,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif // Hash Policies are only applicable for a Ring Hash LB. if envconfig.XDSRingHash { - hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) + hp, err := hashPoliciesProtoToSlice(action.HashPolicy) if err != nil { return nil, nil, err } @@ -321,31 +322,23 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} case *v3routepb.RouteAction_WeightedClusters: wcs := a.WeightedClusters - var totalWeight uint32 + var totalWeight uint64 for _, c := range wcs.Clusters { w := c.GetWeight().GetValue() if w == 0 { continue } + totalWeight += uint64(w) + if totalWeight > math.MaxUint32 { + return nil, nil, fmt.Errorf("xds: total weight of clusters exceeds MaxUint32") + } wc := WeightedCluster{Weight: w} - if !v2 { - cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) - if err != nil { - return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) - } - wc.HTTPFilterConfigOverride = cfgs + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) } + wc.HTTPFilterConfigOverride = cfgs route.WeightedClusters[c.GetName()] = wc - totalWeight += w - } - // envoy xds doc - // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight - wantTotalWeight := uint32(100) - if tw := wcs.GetTotalWeight(); tw != nil { - wantTotalWeight = tw.GetValue() - } - if totalWeight != wantTotalWeight { - return nil, nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) } if totalWeight == 0 { return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) @@ -357,7 +350,6 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif // cluster_specifier: // - Can be Cluster // - Can be Weighted_clusters - // - The sum of weights must add up to the total_weight. // - Can be unset or an unsupported field. The route containing // this action will be ignored. // @@ -365,7 +357,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif // it as if it we didn't know about the cluster_specifier_plugin // at all. if !envconfig.XDSRLS { - logger.Infof("route %+v contains route_action with unsupported field: cluster_specifier_plugin, the route will be ignored", r) + logger.Warningf("Ignoring route %+v with unsupported route_action field: cluster_specifier_plugin", r) continue } if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { @@ -376,13 +368,13 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) } if csps[a.ClusterSpecifierPlugin] == nil { - logger.Infof("route %+v references optional and unsupported cluster specifier plugin %v, the route will be ignored", r, a.ClusterSpecifierPlugin) + logger.Warningf("Ignoring route %+v with optional and unsupported cluster specifier plugin %+v", r, a.ClusterSpecifierPlugin) continue } cspNames[a.ClusterSpecifierPlugin] = true route.ClusterSpecifierPlugin = a.ClusterSpecifierPlugin default: - logger.Infof("route %+v references unknown ClusterSpecifier %+v, the route will be ignored", r, a) + logger.Warningf("Ignoring route %+v with unknown ClusterSpecifier %+v", r, a) continue } @@ -412,19 +404,17 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif route.ActionType = RouteActionUnsupported } - if !v2 { - cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) - if err != nil { - return nil, nil, fmt.Errorf("route %+v: %v", r, err) - } - route.HTTPFilterConfigOverride = cfgs + cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v: %v", r, err) } + route.HTTPFilterConfigOverride = cfgs routesRet = append(routesRet, &route) } return routesRet, cspNames, nil } -func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy) ([]*HashPolicy, error) { var hashPoliciesRet []*HashPolicy for _, p := range policies { policy := HashPolicy{Terminal: p.Terminal} @@ -443,12 +433,12 @@ func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logg } case *v3routepb.RouteAction_HashPolicy_FilterState_: if p.GetFilterState().GetKey() != "io.grpc.channel_id" { - logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + logger.Warningf("Ignoring hash policy %+v with invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) continue } policy.HashPolicyType = HashPolicyTypeChannelID default: - logger.Infof("hash policy %T is an unsupported hash policy", p.GetPolicySpecifier()) + logger.Warningf("Ignoring unsupported hash policy %T", p.GetPolicySpecifier()) continue } diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go index 2c4819abdd..82ad5fe52c 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go @@ -20,35 +20,11 @@ // versions. package version -// TransportAPI refers to the API version for xDS transport protocol. This -// describes the xDS gRPC endpoint and version of DiscoveryRequest/Response used -// on the wire. -type TransportAPI int - -const ( - // TransportV2 refers to the v2 xDS transport protocol. - TransportV2 TransportAPI = iota - // TransportV3 refers to the v3 xDS transport protocol. - TransportV3 -) - // Resource URLs. We need to be able to accept either version of the resource // regardless of the version of the transport protocol in use. const ( googleapiPrefix = "type.googleapis.com/" - V2ListenerType = "envoy.api.v2.Listener" - V2RouteConfigType = "envoy.api.v2.RouteConfiguration" - V2ClusterType = "envoy.api.v2.Cluster" - V2EndpointsType = "envoy.api.v2.ClusterLoadAssignment" - - V2ResourceWrapperURL = googleapiPrefix + "envoy.api.v2.Resource" - V2ListenerURL = googleapiPrefix + V2ListenerType - V2RouteConfigURL = googleapiPrefix + V2RouteConfigType - V2ClusterURL = googleapiPrefix + V2ClusterType - V2EndpointsURL = googleapiPrefix + V2EndpointsType - V2HTTPConnManagerURL = googleapiPrefix + "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" - V3ListenerType = "envoy.config.listener.v3.Listener" V3RouteConfigType = "envoy.config.route.v3.RouteConfiguration" V3ClusterType = "envoy.config.cluster.v3.Cluster" diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/server.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/server.go index f7003f6cd5..55b678bb78 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/server.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/server.go @@ -61,10 +61,6 @@ var ( logger = grpclog.Component("xds") ) -func prefixLogger(p *GRPCServer) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, p)) -} - // grpcServer contains methods from grpc.Server which are used by the // GRPCServer type here. This is useful for overriding in unit tests. type grpcServer interface { @@ -107,7 +103,7 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { gs: newGRPCServer(newOpts...), quit: grpcsync.NewEvent(), } - s.logger = prefixLogger(s) + s.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, s)) s.logger.Infof("Created xds.GRPCServer") s.handleServerOptions(opts) @@ -196,7 +192,6 @@ func (s *GRPCServer) initXDSClient() error { } s.xdsC = client s.xdsClientClose = close - s.logger.Infof("Created an xdsClient") return nil } @@ -277,6 +272,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { // need to explicitly close the listener. Cancellation of the xDS watch // is handled by the listenerWrapper. lw.Close() + modeUpdateCh.Close() return nil case <-goodUpdateCh: } @@ -300,7 +296,10 @@ func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { select { case <-s.quit.Done(): return - case u := <-updateCh.Get(): + case u, ok := <-updateCh.Get(): + if !ok { + return + } updateCh.Load() args := u.(*modeChangeArgs) if args.mode == connectivity.ServingModeNotServing { diff --git a/terraform/providers/google/vendor/google.golang.org/grpc/xds/xds.go b/terraform/providers/google/vendor/google.golang.org/grpc/xds/xds.go index 706e11c494..bd6ed9c90f 100644 --- a/terraform/providers/google/vendor/google.golang.org/grpc/xds/xds.go +++ b/terraform/providers/google/vendor/google.golang.org/grpc/xds/xds.go @@ -36,13 +36,14 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" - _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. - _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. - _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. - _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. + _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter" // Register the xDS LB Registry Converters. v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) @@ -56,14 +57,14 @@ func init() { case *GRPCServer: sss, ok := ss.gs.(*grpc.Server) if !ok { - logger.Warningf("grpc server within xds.GRPCServer is not *grpc.Server, CSDS will not be registered") + logger.Warning("grpc server within xds.GRPCServer is not *grpc.Server, CSDS will not be registered") return nil, nil } grpcServer = sss default: // Returning an error would cause the top level admin.Register() to // fail. Log a warning instead. - logger.Warningf("server to register service on is neither a *grpc.Server or a *xds.GRPCServer, CSDS will not be registered") + logger.Error("Server to register service on is neither a *grpc.Server or a *xds.GRPCServer, CSDS will not be registered") return nil, nil } diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index d09d22e139..66b95870e9 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -106,13 +106,19 @@ func (o MarshalOptions) Format(m proto.Message) string { // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { if o.Multiline && o.Indent == "" { o.Indent = defaultIndent } @@ -120,7 +126,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := json.NewEncoder(o.Indent) + internalEnc, err := json.NewEncoder(b, o.Indent) if err != nil { return nil, err } @@ -128,7 +134,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case the output in an empty JSON object. if m == nil { - return []byte("{}"), nil + return append(b, '{', '}'), nil } enc := encoder{internalEnc, o} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index ebf6c65284..722a7b41df 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -101,13 +101,19 @@ func (o MarshalOptions) Format(m proto.Message) string { // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the textproto format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { var delims = [2]byte{'{', '}'} if o.Multiline && o.Indent == "" { @@ -117,7 +123,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII) if err != nil { return nil, err } @@ -125,7 +131,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case there is nothing to output. if m == nil { - return []byte{}, nil + return b, nil } enc := encoder{internalEnc, o} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go index fbdf348734..934f2dcb39 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -41,8 +41,10 @@ type Encoder struct { // // If indent is a non-empty string, it causes every entry for an Array or Object // to be preceded by the indent and trailed by a newline. -func NewEncoder(indent string) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space or tab characters") @@ -176,13 +178,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer in JSON number value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer in JSON number value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // StartObject writes out the '{' symbol. diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index da289ccce6..cf7aed77bc 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -53,8 +53,10 @@ type encoderState struct { // If outputASCII is true, strings will be serialized in such a way that // multi-byte UTF-8 sequences are escaped. This property ensures that the // overall output is ASCII (as opposed to UTF-8). -func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{ + encoderState: encoderState{out: buf}, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space and tab characters") @@ -195,13 +197,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // WriteLiteral writes out the given string as a literal value without quotes. diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 5c0e8f73f4..136f1b2157 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -183,13 +183,58 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" + ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) // Field numbers for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState" + ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" +) + +// Names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" + ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration" +) + +// Field names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -540,6 +585,7 @@ const ( FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" FieldOptions_Target_field_name protoreflect.Name = "target" + FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -552,6 +598,7 @@ const ( FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" + FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -567,6 +614,7 @@ const ( FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 FieldOptions_Target_field_number protoreflect.FieldNumber = 18 + FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index 3bc710138a..e0f75fea0a 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -32,6 +32,7 @@ const ( Type_Options_field_name protoreflect.Name = "options" Type_SourceContext_field_name protoreflect.Name = "source_context" Type_Syntax_field_name protoreflect.Name = "syntax" + Type_Edition_field_name protoreflect.Name = "edition" Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" @@ -39,6 +40,7 @@ const ( Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" + Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition" ) // Field numbers for google.protobuf.Type. @@ -49,6 +51,7 @@ const ( Type_Options_field_number protoreflect.FieldNumber = 4 Type_SourceContext_field_number protoreflect.FieldNumber = 5 Type_Syntax_field_number protoreflect.FieldNumber = 6 + Type_Edition_field_number protoreflect.FieldNumber = 7 ) // Names for google.protobuf.Field. @@ -121,12 +124,14 @@ const ( Enum_Options_field_name protoreflect.Name = "options" Enum_SourceContext_field_name protoreflect.Name = "source_context" Enum_Syntax_field_name protoreflect.Name = "syntax" + Enum_Edition_field_name protoreflect.Name = "edition" Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" + Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition" ) // Field numbers for google.protobuf.Enum. @@ -136,6 +141,7 @@ const ( Enum_Options_field_number protoreflect.FieldNumber = 3 Enum_SourceContext_field_number protoreflect.FieldNumber = 4 Enum_Syntax_field_number protoreflect.FieldNumber = 5 + Enum_Edition_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumValue. diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/order/order.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/order/order.go index 33745ed062..dea522e127 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/order/order.go @@ -33,7 +33,7 @@ var ( return !inOneof(ox) && inOneof(oy) } // Fields in disjoint oneof sets are sorted by declaration index. - if ox != nil && oy != nil && ox != oy { + if inOneof(ox) && inOneof(oy) && ox != oy { return ox.Index() < oy.Index() } // Fields sorted by field number. diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/version/version.go b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/version/version.go index daefe11056..0999f29d50 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 29 - Patch = 1 + Minor = 31 + Patch = 0 PreRelease = "" ) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/proto/size.go b/terraform/providers/google/vendor/google.golang.org/protobuf/proto/size.go index 554b9c6c09..f1692b49b6 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/proto/size.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/proto/size.go @@ -73,23 +73,27 @@ func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protore } func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + sizeTag := protowire.SizeTag(num) + if fd.IsPacked() && list.Len() > 0 { content := 0 for i, llen := 0, list.Len(); i < llen; i++ { content += o.sizeSingular(num, fd.Kind(), list.Get(i)) } - return protowire.SizeTag(num) + protowire.SizeBytes(content) + return sizeTag + protowire.SizeBytes(content) } for i, llen := 0, list.Len(); i < llen; i++ { - size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i)) } return size } func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + sizeTag := protowire.SizeTag(num) + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { - size += protowire.SizeTag(num) + size += sizeTag size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) return true }) diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 54ce326df9..717b106f3d 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -363,6 +363,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "retention", nil) case 18: b = p.appendSingularField(b, "target", nil) + case 19: + b = p.appendRepeatedField(b, "targets", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -418,6 +420,10 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { switch (*p)[0] { case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + case 2: + b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 3: + b = p.appendSingularField(b, "verification", nil) } return b } @@ -473,3 +479,24 @@ func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { } return b } + +func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "number", nil) + case 2: + b = p.appendSingularField(b, "full_name", nil) + case 3: + b = p.appendSingularField(b, "type", nil) + case 4: + b = p.appendSingularField(b, "is_repeated", nil) + case 5: + b = p.appendSingularField(b, "reserved", nil) + case 6: + b = p.appendSingularField(b, "repeated", nil) + } + return b +} diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index dac5671db0..04c00f737c 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,64 @@ import ( sync "sync" ) +// The verification state of the extension range. +type ExtensionRangeOptions_VerificationState int32 + +const ( + // All the extensions of the range must be declared. + ExtensionRangeOptions_DECLARATION ExtensionRangeOptions_VerificationState = 0 + ExtensionRangeOptions_UNVERIFIED ExtensionRangeOptions_VerificationState = 1 +) + +// Enum value maps for ExtensionRangeOptions_VerificationState. +var ( + ExtensionRangeOptions_VerificationState_name = map[int32]string{ + 0: "DECLARATION", + 1: "UNVERIFIED", + } + ExtensionRangeOptions_VerificationState_value = map[string]int32{ + "DECLARATION": 0, + "UNVERIFIED": 1, + } +) + +func (x ExtensionRangeOptions_VerificationState) Enum() *ExtensionRangeOptions_VerificationState { + p := new(ExtensionRangeOptions_VerificationState) + *p = x + return p +} + +func (x ExtensionRangeOptions_VerificationState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *ExtensionRangeOptions_VerificationState) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = ExtensionRangeOptions_VerificationState(num) + return nil +} + +// Deprecated: Use ExtensionRangeOptions_VerificationState.Descriptor instead. +func (ExtensionRangeOptions_VerificationState) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + type FieldDescriptorProto_Type int32 const ( @@ -137,11 +195,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -197,11 +255,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -258,11 +316,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -288,7 +346,13 @@ type FieldOptions_CType int32 const ( // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_STRING FieldOptions_CType = 0 + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. FieldOptions_CORD FieldOptions_CType = 1 FieldOptions_STRING_PIECE FieldOptions_CType = 2 ) @@ -318,11 +382,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -380,11 +444,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -442,11 +506,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -526,11 +590,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -588,11 +652,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -652,11 +716,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1015,7 +1079,21 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} + // go/protobuf-stripping-extension-declarations + // Like Metadata, but we use a repeated field to hold all extension + // declarations. This should avoid the size increases of transforming a large + // extension range into small ranges in generated binaries. + Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // The verification state of the range. + // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` +} + +// Default values for ExtensionRangeOptions fields. +const ( + Default_ExtensionRangeOptions_Verification = ExtensionRangeOptions_UNVERIFIED +) func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} @@ -1056,6 +1134,20 @@ func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption return nil } +func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declaration { + if x != nil { + return x.Declaration + } + return nil +} + +func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { + if x != nil && x.Verification != nil { + return *x.Verification + } + return Default_ExtensionRangeOptions_Verification +} + // Describes a field within a message. type FieldDescriptorProto struct { state protoimpl.MessageState @@ -2046,8 +2138,10 @@ type FieldOptions struct { // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2111,9 +2205,11 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2224,6 +2320,7 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { if x != nil && x.Target != nil { return *x.Target @@ -2231,6 +2328,13 @@ func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { return FieldOptions_TARGET_TYPE_UNKNOWN } +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2960,6 +3064,108 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions_Declaration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension number declared within the extension range. + Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + FullName *string `protobuf:"bytes,2,opt,name=full_name,json=fullName" json:"full_name,omitempty"` + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // Deprecated. Please use "repeated". + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` +} + +func (x *ExtensionRangeOptions_Declaration) Reset() { + *x = ExtensionRangeOptions_Declaration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions_Declaration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} + +func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions_Declaration.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions_Declaration) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ExtensionRangeOptions_Declaration) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *ExtensionRangeOptions_Declaration) GetFullName() string { + if x != nil && x.FullName != nil { + return *x.FullName + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { + if x != nil && x.IsRepeated != nil { + return *x.IsRepeated + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { + if x != nil && x.Reserved != nil { + return *x.Reserved + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { + if x != nil && x.Repeated != nil { + return *x.Repeated + } + return false +} + // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // @@ -2978,7 +3184,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2991,7 +3197,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3038,7 +3244,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3051,7 +3257,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3182,7 +3388,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3195,7 +3401,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3269,7 +3475,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3282,7 +3488,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3436,264 +3642,296 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, - 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, + 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, + 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, + 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, - 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, - 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, - 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, - 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, - 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, - 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, - 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, - 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, - 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, - 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, + 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, + 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, + 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, + 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, + 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, + 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, + 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, + 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, + 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, + 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, + 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, - 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, - 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, - 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, - 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, - 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, - 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, - 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, - 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, - 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, - 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, - 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, - 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, - 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, - 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, - 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, - 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, - 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, - 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, - 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, + 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, + 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, + 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, + 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, + 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, + 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, + 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, + 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, @@ -3885,98 +4123,103 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 19: google.protobuf.FileOptions - (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation + (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 20: google.protobuf.FileOptions + (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 46, // [46:46] is the sub-list for method output_type - 46, // [46:46] is the sub-list for method input_type - 46, // [46:46] is the sub-list for extension type_name - 46, // [46:46] is the sub-list for extension extendee - 0, // [0:46] is the sub-list for field type_name + 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 49, // [49:49] is the sub-list for method output_type + 49, // [49:49] is the sub-list for method input_type + 49, // [49:49] is the sub-list for extension type_name + 49, // [49:49] is the sub-list for extension extendee + 0, // [0:49] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4280,7 +4523,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4292,7 +4535,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4304,7 +4547,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state case 1: @@ -4316,6 +4559,18 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4333,8 +4588,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 9, - NumMessages: 27, + NumEnums: 10, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index a6c7a33f33..580b232f47 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -142,39 +142,39 @@ import ( // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// // or ... -// if (any.isSameTypeAs(Foo.getDefaultInstance())) { -// foo = any.unpack(Foo.getDefaultInstance()); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,8 +182,8 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // -// # JSON -// +// JSON +// ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index 9577ed593c..d2bac8b88e 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -132,7 +132,7 @@ import ( // `NullValue` is a singleton enumeration to represent the null value for the // `Value` type union. // -// The JSON representation for `NullValue` is JSON `null`. +// The JSON representation for `NullValue` is JSON `null`. type NullValue int32 const ( diff --git a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 61f69fc11b..81511a3363 100644 --- a/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/terraform/providers/google/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -167,7 +167,7 @@ import ( // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { state protoimpl.MessageState diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/.travis.yml b/terraform/providers/google/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 7348c50c0c..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "1.14.x" - - "tip" - -go_import_path: gopkg.in/yaml.v2 diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/LICENSE b/terraform/providers/google/vendor/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/terraform/providers/google/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf6f..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/NOTICE b/terraform/providers/google/vendor/gopkg.in/yaml.v2/NOTICE deleted file mode 100644 index 866d74a7ad..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/README.md b/terraform/providers/google/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index b50c6e8775..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/apic.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index acf71402cf..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,744 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -var disableLineWrapping = false - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - if disableLineWrapping { - emitter.best_width = -1 - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/decode.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index 129bc2a97d..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,815 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + p.event.typ.String()) - } -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type - terrors []string - strict bool - - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == yaml_BINARY_TAG { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - return true - } - if resolved != nil { - out.SetString(n.value) - return true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return - } - out.SetMapIndex(k, v) -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - var doneFields []bool - if d.strict { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - if n.alias != nil && n.alias.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - if ni.alias != nil && ni.alias.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/emitterc.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index a1c2cc5262..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/encode.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 0ee738e11b..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/parserc.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 81d05dfe57..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/readerc.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index 7c1f5fac3d..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/resolve.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 4120e0c916..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,258 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { - switch v := out.(type) { - case int64: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - case int: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(in) - if ok { - return yaml_TIMESTAMP_TAG, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - } - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - return yaml_STR_TAG, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/scannerc.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index 0b9bb6030a..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2711 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - if parser.tokens_head != len(parser.tokens) { - // If queue is non-empty, check if any potential simple key may - // occupy the head position. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the “:” indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/sorter.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 4c45e660a8..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,113 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/writerc.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index a2dde608cb..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/yaml.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index 30813884c0..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,478 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - -// A Decoder reads and decodes YAML values from an input stream. -type Decoder struct { - strict bool - parser *parser -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder(strict) - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} - -// FutureLineWrap globally disables line wrapping when encoding long strings. -// This is a temporary and thus deprecated method introduced to faciliate -// migration towards v3, which offers more control of line lengths on -// individual encodings, and has a default matching the behavior introduced -// by this function. -// -// The default formatting of v2 was erroneously changed in v2.3.0 and reverted -// in v2.4.0, at which point this function was introduced to help migration. -func FutureLineWrap() { - disableLineWrapping = true -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/yamlh.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index f6a9c8e34b..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/terraform/providers/google/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/terraform/providers/google/vendor/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3c37..0000000000 --- a/terraform/providers/google/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/terraform/providers/google/vendor/modules.txt b/terraform/providers/google/vendor/modules.txt index fb942f51d6..4fddd64ebd 100644 --- a/terraform/providers/google/vendor/modules.txt +++ b/terraform/providers/google/vendor/modules.txt @@ -1,33 +1,33 @@ -# bitbucket.org/creachadair/stringset v0.0.11 +# bitbucket.org/creachadair/stringset v0.0.8 => bitbucket.org/creachadair/stringset v0.0.11 ## explicit; go 1.18 bitbucket.org/creachadair/stringset -# cloud.google.com/go v0.110.0 +# cloud.google.com/go v0.110.2 ## explicit; go 1.19 cloud.google.com/go cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/bigtable v1.17.0 +# cloud.google.com/go/bigtable v1.19.0 ## explicit; go 1.19 cloud.google.com/go/bigtable cloud.google.com/go/bigtable/internal cloud.google.com/go/bigtable/internal/option -# cloud.google.com/go/compute v1.18.0 +# cloud.google.com/go/compute v1.19.3 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.12.0 +# cloud.google.com/go/iam v1.1.0 ## explicit; go 1.19 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/longrunning v0.4.1 +# cloud.google.com/go/longrunning v0.5.0 ## explicit; go 1.19 cloud.google.com/go/longrunning cloud.google.com/go/longrunning/autogen cloud.google.com/go/longrunning/autogen/longrunningpb -# github.com/GoogleCloudPlatform/declarative-resource-client-library v1.34.0 +# github.com/GoogleCloudPlatform/declarative-resource-client-library v1.44.0 ## explicit; go 1.19 github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl/operations @@ -35,6 +35,7 @@ github.com/GoogleCloudPlatform/declarative-resource-client-library/services/goog github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/bigqueryreservation github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild +github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuildv2 github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/clouddeploy github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute @@ -44,11 +45,8 @@ github.com/GoogleCloudPlatform/declarative-resource-client-library/services/goog github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/firebaserules -github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/logging -github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/monitoring github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy -github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise # github.com/agext/levenshtein v1.2.2 @@ -73,27 +71,23 @@ github.com/cespare/xxhash/v2 # github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe ## explicit; go 1.11 github.com/cncf/udpa/go/udpa/type/v1 -# github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b +# github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 ## explicit; go 1.11 github.com/cncf/xds/go/udpa/annotations github.com/cncf/xds/go/udpa/type/v1 github.com/cncf/xds/go/xds/annotations/v3 github.com/cncf/xds/go/xds/core/v3 github.com/cncf/xds/go/xds/data/orca/v3 +github.com/cncf/xds/go/xds/service/orca/v3 github.com/cncf/xds/go/xds/type/matcher/v3 github.com/cncf/xds/go/xds/type/v3 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/dnaeon/go-vcr v1.0.1 -## explicit -github.com/dnaeon/go-vcr/cassette -github.com/dnaeon/go-vcr/recorder -# github.com/envoyproxy/go-control-plane v0.10.3 -## explicit; go 1.16 +# github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f +## explicit; go 1.17 github.com/envoyproxy/go-control-plane/envoy/admin/v3 github.com/envoyproxy/go-control-plane/envoy/annotations -github.com/envoyproxy/go-control-plane/envoy/api/v2/core github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3 github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3 github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3 @@ -107,25 +101,30 @@ github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3 github.com/envoyproxy/go-control-plane/envoy/config/route/v3 github.com/envoyproxy/go-control-plane/envoy/config/tap/v3 github.com/envoyproxy/go-control-plane/envoy/config/trace/v3 +github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3 github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3 github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3 github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3 github.com/envoyproxy/go-control-plane/envoy/service/status/v3 -github.com/envoyproxy/go-control-plane/envoy/type github.com/envoyproxy/go-control-plane/envoy/type/http/v3 -github.com/envoyproxy/go-control-plane/envoy/type/matcher github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3 github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3 github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3 github.com/envoyproxy/go-control-plane/envoy/type/v3 -# github.com/envoyproxy/protoc-gen-validate v0.9.1 -## explicit; go 1.17 +# github.com/envoyproxy/protoc-gen-validate v0.10.1 +## explicit; go 1.19 github.com/envoyproxy/protoc-gen-validate/validate # github.com/fatih/color v1.13.0 ## explicit; go 1.13 @@ -136,13 +135,15 @@ github.com/gammazero/deque # github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 ## explicit github.com/gammazero/workerpool -# github.com/golang/glog v1.0.0 -## explicit; go 1.11 +# github.com/golang/glog v1.1.0 +## explicit; go 1.18 github.com/golang/glog +github.com/golang/glog/internal/logsink +github.com/golang/glog/internal/stackdump # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.2 +# github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto @@ -164,14 +165,36 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 ## explicit; go 1.17 github.com/google/go-cpy/cpy +# github.com/google/s2a-go v0.1.4 +## explicit; go 1.16 +github.com/google/s2a-go +github.com/google/s2a-go/fallback +github.com/google/s2a-go/internal/authinfo +github.com/google/s2a-go/internal/handshaker +github.com/google/s2a-go/internal/handshaker/service +github.com/google/s2a-go/internal/proto/common_go_proto +github.com/google/s2a-go/internal/proto/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/s2a_go_proto +github.com/google/s2a-go/internal/proto/v2/common_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_go_proto +github.com/google/s2a-go/internal/record +github.com/google/s2a-go/internal/record/internal/aeadcrypter +github.com/google/s2a-go/internal/record/internal/halfconn +github.com/google/s2a-go/internal/tokenmanager +github.com/google/s2a-go/internal/v2 +github.com/google/s2a-go/internal/v2/certverifier +github.com/google/s2a-go/internal/v2/remotesigner +github.com/google/s2a-go/internal/v2/tlsconfigstore +github.com/google/s2a-go/stream # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +# github.com/googleapis/enterprise-certificate-proxy v0.2.5 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.7.1 +# github.com/googleapis/gax-go/v2 v2.11.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror @@ -251,8 +274,42 @@ github.com/hashicorp/terraform-json # github.com/hashicorp/terraform-plugin-framework v1.1.1 ## explicit; go 1.18 github.com/hashicorp/terraform-plugin-framework/attr +github.com/hashicorp/terraform-plugin-framework/attr/xattr +github.com/hashicorp/terraform-plugin-framework/datasource +github.com/hashicorp/terraform-plugin-framework/datasource/schema github.com/hashicorp/terraform-plugin-framework/diag +github.com/hashicorp/terraform-plugin-framework/internal/fromproto5 +github.com/hashicorp/terraform-plugin-framework/internal/fromproto6 +github.com/hashicorp/terraform-plugin-framework/internal/fromtftypes +github.com/hashicorp/terraform-plugin-framework/internal/fwschema +github.com/hashicorp/terraform-plugin-framework/internal/fwschema/fwxschema +github.com/hashicorp/terraform-plugin-framework/internal/fwschemadata +github.com/hashicorp/terraform-plugin-framework/internal/fwserver +github.com/hashicorp/terraform-plugin-framework/internal/logging +github.com/hashicorp/terraform-plugin-framework/internal/privatestate +github.com/hashicorp/terraform-plugin-framework/internal/proto5server +github.com/hashicorp/terraform-plugin-framework/internal/proto6server +github.com/hashicorp/terraform-plugin-framework/internal/reflect +github.com/hashicorp/terraform-plugin-framework/internal/toproto5 +github.com/hashicorp/terraform-plugin-framework/internal/toproto6 +github.com/hashicorp/terraform-plugin-framework/internal/totftypes github.com/hashicorp/terraform-plugin-framework/path +github.com/hashicorp/terraform-plugin-framework/provider +github.com/hashicorp/terraform-plugin-framework/provider/metaschema +github.com/hashicorp/terraform-plugin-framework/provider/schema +github.com/hashicorp/terraform-plugin-framework/providerserver +github.com/hashicorp/terraform-plugin-framework/resource +github.com/hashicorp/terraform-plugin-framework/resource/schema +github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier +github.com/hashicorp/terraform-plugin-framework/schema/validator +github.com/hashicorp/terraform-plugin-framework/tfsdk +github.com/hashicorp/terraform-plugin-framework/types +github.com/hashicorp/terraform-plugin-framework/types/basetypes +# github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 +## explicit; go 1.18 +github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag +github.com/hashicorp/terraform-plugin-framework-validators/internal/schemavalidator +github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator # github.com/hashicorp/terraform-plugin-go v0.14.3 ## explicit; go 1.18 github.com/hashicorp/terraform-plugin-go/internal/logging @@ -285,7 +342,6 @@ github.com/hashicorp/terraform-plugin-mux/tf5muxserver # github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.0 ## explicit; go 1.18 github.com/hashicorp/terraform-plugin-sdk/v2/diag -github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource @@ -304,10 +360,127 @@ github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags github.com/hashicorp/terraform-plugin-sdk/v2/meta github.com/hashicorp/terraform-plugin-sdk/v2/plugin github.com/hashicorp/terraform-plugin-sdk/v2/terraform -# github.com/hashicorp/terraform-provider-google v1.20.1-0.20230327171628-0dc3bde12208 -## explicit; go 1.18 +# github.com/hashicorp/terraform-provider-google v1.20.1-0.20230718215755-3edc574a3a8f +## explicit; go 1.19 github.com/hashicorp/terraform-provider-google -github.com/hashicorp/terraform-provider-google/google +github.com/hashicorp/terraform-provider-google/google/envvar +github.com/hashicorp/terraform-provider-google/google/fwmodels +github.com/hashicorp/terraform-provider-google/google/fwprovider +github.com/hashicorp/terraform-provider-google/google/fwresource +github.com/hashicorp/terraform-provider-google/google/fwtransport +github.com/hashicorp/terraform-provider-google/google/provider +github.com/hashicorp/terraform-provider-google/google/services/accessapproval +github.com/hashicorp/terraform-provider-google/google/services/accesscontextmanager +github.com/hashicorp/terraform-provider-google/google/services/activedirectory +github.com/hashicorp/terraform-provider-google/google/services/alloydb +github.com/hashicorp/terraform-provider-google/google/services/apigee +github.com/hashicorp/terraform-provider-google/google/services/apikeys +github.com/hashicorp/terraform-provider-google/google/services/appengine +github.com/hashicorp/terraform-provider-google/google/services/artifactregistry +github.com/hashicorp/terraform-provider-google/google/services/assuredworkloads +github.com/hashicorp/terraform-provider-google/google/services/beyondcorp +github.com/hashicorp/terraform-provider-google/google/services/bigquery +github.com/hashicorp/terraform-provider-google/google/services/bigqueryanalyticshub +github.com/hashicorp/terraform-provider-google/google/services/bigqueryconnection +github.com/hashicorp/terraform-provider-google/google/services/bigquerydatapolicy +github.com/hashicorp/terraform-provider-google/google/services/bigquerydatatransfer +github.com/hashicorp/terraform-provider-google/google/services/bigqueryreservation +github.com/hashicorp/terraform-provider-google/google/services/bigtable +github.com/hashicorp/terraform-provider-google/google/services/billing +github.com/hashicorp/terraform-provider-google/google/services/binaryauthorization +github.com/hashicorp/terraform-provider-google/google/services/certificatemanager +github.com/hashicorp/terraform-provider-google/google/services/cloudasset +github.com/hashicorp/terraform-provider-google/google/services/cloudbuild +github.com/hashicorp/terraform-provider-google/google/services/cloudbuildv2 +github.com/hashicorp/terraform-provider-google/google/services/clouddeploy +github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions +github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions2 +github.com/hashicorp/terraform-provider-google/google/services/cloudidentity +github.com/hashicorp/terraform-provider-google/google/services/cloudids +github.com/hashicorp/terraform-provider-google/google/services/cloudiot +github.com/hashicorp/terraform-provider-google/google/services/cloudrun +github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2 +github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler +github.com/hashicorp/terraform-provider-google/google/services/cloudtasks +github.com/hashicorp/terraform-provider-google/google/services/composer +github.com/hashicorp/terraform-provider-google/google/services/compute +github.com/hashicorp/terraform-provider-google/google/services/container +github.com/hashicorp/terraform-provider-google/google/services/containeranalysis +github.com/hashicorp/terraform-provider-google/google/services/containerattached +github.com/hashicorp/terraform-provider-google/google/services/containeraws +github.com/hashicorp/terraform-provider-google/google/services/containerazure +github.com/hashicorp/terraform-provider-google/google/services/databasemigrationservice +github.com/hashicorp/terraform-provider-google/google/services/datacatalog +github.com/hashicorp/terraform-provider-google/google/services/dataflow +github.com/hashicorp/terraform-provider-google/google/services/datafusion +github.com/hashicorp/terraform-provider-google/google/services/datalossprevention +github.com/hashicorp/terraform-provider-google/google/services/dataplex +github.com/hashicorp/terraform-provider-google/google/services/dataproc +github.com/hashicorp/terraform-provider-google/google/services/dataprocmetastore +github.com/hashicorp/terraform-provider-google/google/services/datastore +github.com/hashicorp/terraform-provider-google/google/services/datastream +github.com/hashicorp/terraform-provider-google/google/services/deploymentmanager +github.com/hashicorp/terraform-provider-google/google/services/dialogflow +github.com/hashicorp/terraform-provider-google/google/services/dialogflowcx +github.com/hashicorp/terraform-provider-google/google/services/dns +github.com/hashicorp/terraform-provider-google/google/services/documentai +github.com/hashicorp/terraform-provider-google/google/services/essentialcontacts +github.com/hashicorp/terraform-provider-google/google/services/eventarc +github.com/hashicorp/terraform-provider-google/google/services/filestore +github.com/hashicorp/terraform-provider-google/google/services/firebaserules +github.com/hashicorp/terraform-provider-google/google/services/firestore +github.com/hashicorp/terraform-provider-google/google/services/gameservices +github.com/hashicorp/terraform-provider-google/google/services/gkebackup +github.com/hashicorp/terraform-provider-google/google/services/gkehub +github.com/hashicorp/terraform-provider-google/google/services/gkehub2 +github.com/hashicorp/terraform-provider-google/google/services/healthcare +github.com/hashicorp/terraform-provider-google/google/services/iam2 +github.com/hashicorp/terraform-provider-google/google/services/iambeta +github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool +github.com/hashicorp/terraform-provider-google/google/services/iap +github.com/hashicorp/terraform-provider-google/google/services/identityplatform +github.com/hashicorp/terraform-provider-google/google/services/kms +github.com/hashicorp/terraform-provider-google/google/services/logging +github.com/hashicorp/terraform-provider-google/google/services/looker +github.com/hashicorp/terraform-provider-google/google/services/memcache +github.com/hashicorp/terraform-provider-google/google/services/mlengine +github.com/hashicorp/terraform-provider-google/google/services/monitoring +github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity +github.com/hashicorp/terraform-provider-google/google/services/networkmanagement +github.com/hashicorp/terraform-provider-google/google/services/networksecurity +github.com/hashicorp/terraform-provider-google/google/services/networkservices +github.com/hashicorp/terraform-provider-google/google/services/notebooks +github.com/hashicorp/terraform-provider-google/google/services/orgpolicy +github.com/hashicorp/terraform-provider-google/google/services/osconfig +github.com/hashicorp/terraform-provider-google/google/services/oslogin +github.com/hashicorp/terraform-provider-google/google/services/privateca +github.com/hashicorp/terraform-provider-google/google/services/publicca +github.com/hashicorp/terraform-provider-google/google/services/pubsub +github.com/hashicorp/terraform-provider-google/google/services/pubsublite +github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise +github.com/hashicorp/terraform-provider-google/google/services/redis +github.com/hashicorp/terraform-provider-google/google/services/resourcemanager +github.com/hashicorp/terraform-provider-google/google/services/secretmanager +github.com/hashicorp/terraform-provider-google/google/services/securitycenter +github.com/hashicorp/terraform-provider-google/google/services/servicemanagement +github.com/hashicorp/terraform-provider-google/google/services/servicenetworking +github.com/hashicorp/terraform-provider-google/google/services/serviceusage +github.com/hashicorp/terraform-provider-google/google/services/sourcerepo +github.com/hashicorp/terraform-provider-google/google/services/spanner +github.com/hashicorp/terraform-provider-google/google/services/sql +github.com/hashicorp/terraform-provider-google/google/services/storage +github.com/hashicorp/terraform-provider-google/google/services/storagetransfer +github.com/hashicorp/terraform-provider-google/google/services/tags +github.com/hashicorp/terraform-provider-google/google/services/tpu +github.com/hashicorp/terraform-provider-google/google/services/vertexai +github.com/hashicorp/terraform-provider-google/google/services/vpcaccess +github.com/hashicorp/terraform-provider-google/google/services/workflows +github.com/hashicorp/terraform-provider-google/google/sweeper +github.com/hashicorp/terraform-provider-google/google/tpgdclresource +github.com/hashicorp/terraform-provider-google/google/tpgiamresource +github.com/hashicorp/terraform-provider-google/google/tpgresource +github.com/hashicorp/terraform-provider-google/google/transport +github.com/hashicorp/terraform-provider-google/google/verify github.com/hashicorp/terraform-provider-google/version # github.com/hashicorp/terraform-registry-address v0.1.0 ## explicit; go 1.14 @@ -396,25 +569,23 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 => golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 +# golang.org/x/crypto v0.11.0 => golang.org/x/crypto v0.11.0 ## explicit; go 1.17 -golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 -golang.org/x/crypto/curve25519 -golang.org/x/crypto/curve25519/internal/field -golang.org/x/crypto/ed25519 +golang.org/x/crypto/chacha20poly1305 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/hkdf +golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -golang.org/x/crypto/internal/subtle golang.org/x/crypto/openpgp golang.org/x/crypto/openpgp/armor golang.org/x/crypto/openpgp/elgamal golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k -golang.org/x/crypto/ssh -golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/net v0.8.0 => golang.org/x/net v0.8.0 +# golang.org/x/net v0.12.0 => golang.org/x/net v0.12.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/http/httpguts @@ -423,7 +594,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.6.0 +# golang.org/x/oauth2 v0.9.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -432,13 +603,13 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.6.0 +# golang.org/x/sys v0.10.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.8.0 => golang.org/x/text v0.8.0 +# golang.org/x/text v0.11.0 => golang.org/x/text v0.11.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform @@ -448,7 +619,7 @@ golang.org/x/text/unicode/norm ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.114.0 +# google.golang.org/api v0.130.0 ## explicit; go 1.19 google.golang.org/api/appengine/v1 google.golang.org/api/bigquery/v2 @@ -515,32 +686,42 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 +# google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc ## explicit; go 1.19 -google.golang.org/genproto/googleapis/api -google.golang.org/genproto/googleapis/api/annotations -google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/bigtable/admin/v2 google.golang.org/genproto/googleapis/bigtable/v2 google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/longrunning +google.golang.org/genproto/googleapis/type/expr +google.golang.org/genproto/internal +# google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc +## explicit; go 1.19 +google.golang.org/genproto/googleapis/api +google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/expr/v1alpha1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 +## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.53.0 +# google.golang.org/grpc v1.56.1 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes +google.golang.org/grpc/authz/audit +google.golang.org/grpc/authz/audit/stdout google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/rls +google.golang.org/grpc/balancer/rls/internal/adaptive +google.golang.org/grpc/balancer/rls/internal/keys google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/balancer/weightedroundrobin +google.golang.org/grpc/balancer/weightedroundrobin/internal google.golang.org/grpc/balancer/weightedtarget google.golang.org/grpc/balancer/weightedtarget/weightedaggregator google.golang.org/grpc/binarylog/grpc_binarylog_v1 @@ -569,6 +750,7 @@ google.golang.org/grpc/internal google.golang.org/grpc/internal/admin google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancer/gracefulswitch +google.golang.org/grpc/internal/balancer/nop google.golang.org/grpc/internal/balancergroup google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog @@ -594,7 +776,6 @@ google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall -google.golang.org/grpc/internal/testutils google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/internal/wrr @@ -602,6 +783,8 @@ google.golang.org/grpc/internal/xds/matcher google.golang.org/grpc/internal/xds/rbac google.golang.org/grpc/keepalive google.golang.org/grpc/metadata +google.golang.org/grpc/orca +google.golang.org/grpc/orca/internal google.golang.org/grpc/peer google.golang.org/grpc/reflection google.golang.org/grpc/reflection/grpc_reflection_v1alpha @@ -624,6 +807,7 @@ google.golang.org/grpc/xds/internal/balancer/loadstore google.golang.org/grpc/xds/internal/balancer/outlierdetection google.golang.org/grpc/xds/internal/balancer/priority google.golang.org/grpc/xds/internal/balancer/ringhash +google.golang.org/grpc/xds/internal/balancer/wrrlocality google.golang.org/grpc/xds/internal/clusterspecifier google.golang.org/grpc/xds/internal/clusterspecifier/rls google.golang.org/grpc/xds/internal/httpfilter @@ -636,9 +820,11 @@ google.golang.org/grpc/xds/internal/xdsclient google.golang.org/grpc/xds/internal/xdsclient/bootstrap google.golang.org/grpc/xds/internal/xdsclient/load google.golang.org/grpc/xds/internal/xdsclient/transport +google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry +google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter google.golang.org/grpc/xds/internal/xdsclient/xdsresource google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version -# google.golang.org/protobuf v1.29.1 +# google.golang.org/protobuf v1.31.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -676,12 +862,7 @@ google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb -# gopkg.in/yaml.v2 v2.4.0 -## explicit; go 1.15 -gopkg.in/yaml.v2 -# github.com/hashicorp/go-getter => github.com/hashicorp/go-getter v1.7.0 -# golang.org/x/crypto => golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 -# golang.org/x/text => golang.org/x/text v0.8.0 -# github.com/Masterminds/goutils => github.com/Masterminds/goutils v1.1.1 -# github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.12.1 -# golang.org/x/net => golang.org/x/net v0.8.0 +# golang.org/x/crypto => golang.org/x/crypto v0.11.0 +# golang.org/x/text => golang.org/x/text v0.11.0 +# golang.org/x/net => golang.org/x/net v0.12.0 +# bitbucket.org/creachadair/stringset => bitbucket.org/creachadair/stringset v0.0.11